Mercurial > hg > CbC > CbC_gcc
comparison gcc/simplify-rtx.c @ 0:a06113de4d67
first commit
author | kent <kent@cr.ie.u-ryukyu.ac.jp> |
---|---|
date | Fri, 17 Jul 2009 14:47:48 +0900 |
parents | |
children | 855418dad1a3 |
comparison
equal
deleted
inserted
replaced
-1:000000000000 | 0:a06113de4d67 |
---|---|
1 /* RTL simplification functions for GNU compiler. | |
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998, | |
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 | |
4 Free Software Foundation, Inc. | |
5 | |
6 This file is part of GCC. | |
7 | |
8 GCC is free software; you can redistribute it and/or modify it under | |
9 the terms of the GNU General Public License as published by the Free | |
10 Software Foundation; either version 3, or (at your option) any later | |
11 version. | |
12 | |
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY | |
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
16 for more details. | |
17 | |
18 You should have received a copy of the GNU General Public License | |
19 along with GCC; see the file COPYING3. If not see | |
20 <http://www.gnu.org/licenses/>. */ | |
21 | |
22 | |
23 #include "config.h" | |
24 #include "system.h" | |
25 #include "coretypes.h" | |
26 #include "tm.h" | |
27 #include "rtl.h" | |
28 #include "tree.h" | |
29 #include "tm_p.h" | |
30 #include "regs.h" | |
31 #include "hard-reg-set.h" | |
32 #include "flags.h" | |
33 #include "real.h" | |
34 #include "insn-config.h" | |
35 #include "recog.h" | |
36 #include "function.h" | |
37 #include "expr.h" | |
38 #include "toplev.h" | |
39 #include "output.h" | |
40 #include "ggc.h" | |
41 #include "target.h" | |
42 | |
43 /* Simplification and canonicalization of RTL. */ | |
44 | |
45 /* Much code operates on (low, high) pairs; the low value is an | |
46 unsigned wide int, the high value a signed wide int. We | |
47 occasionally need to sign extend from low to high as if low were a | |
48 signed wide int. */ | |
49 #define HWI_SIGN_EXTEND(low) \ | |
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0)) | |
51 | |
52 static rtx neg_const_int (enum machine_mode, const_rtx); | |
53 static bool plus_minus_operand_p (const_rtx); | |
54 static bool simplify_plus_minus_op_data_cmp (rtx, rtx); | |
55 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx); | |
56 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode, | |
57 unsigned int); | |
58 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode, | |
59 rtx, rtx); | |
60 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode, | |
61 enum machine_mode, rtx, rtx); | |
62 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx); | |
63 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode, | |
64 rtx, rtx, rtx, rtx); | |
65 | |
66 /* Negate a CONST_INT rtx, truncating (because a conversion from a | |
67 maximally negative number can overflow). */ | |
68 static rtx | |
69 neg_const_int (enum machine_mode mode, const_rtx i) | |
70 { | |
71 return gen_int_mode (- INTVAL (i), mode); | |
72 } | |
73 | |
74 /* Test whether expression, X, is an immediate constant that represents | |
75 the most significant bit of machine mode MODE. */ | |
76 | |
77 bool | |
78 mode_signbit_p (enum machine_mode mode, const_rtx x) | |
79 { | |
80 unsigned HOST_WIDE_INT val; | |
81 unsigned int width; | |
82 | |
83 if (GET_MODE_CLASS (mode) != MODE_INT) | |
84 return false; | |
85 | |
86 width = GET_MODE_BITSIZE (mode); | |
87 if (width == 0) | |
88 return false; | |
89 | |
90 if (width <= HOST_BITS_PER_WIDE_INT | |
91 && GET_CODE (x) == CONST_INT) | |
92 val = INTVAL (x); | |
93 else if (width <= 2 * HOST_BITS_PER_WIDE_INT | |
94 && GET_CODE (x) == CONST_DOUBLE | |
95 && CONST_DOUBLE_LOW (x) == 0) | |
96 { | |
97 val = CONST_DOUBLE_HIGH (x); | |
98 width -= HOST_BITS_PER_WIDE_INT; | |
99 } | |
100 else | |
101 return false; | |
102 | |
103 if (width < HOST_BITS_PER_WIDE_INT) | |
104 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1; | |
105 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1)); | |
106 } | |
107 | |
108 /* Make a binary operation by properly ordering the operands and | |
109 seeing if the expression folds. */ | |
110 | |
111 rtx | |
112 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0, | |
113 rtx op1) | |
114 { | |
115 rtx tem; | |
116 | |
117 /* If this simplifies, do it. */ | |
118 tem = simplify_binary_operation (code, mode, op0, op1); | |
119 if (tem) | |
120 return tem; | |
121 | |
122 /* Put complex operands first and constants second if commutative. */ | |
123 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH | |
124 && swap_commutative_operands_p (op0, op1)) | |
125 tem = op0, op0 = op1, op1 = tem; | |
126 | |
127 return gen_rtx_fmt_ee (code, mode, op0, op1); | |
128 } | |
129 | |
130 /* If X is a MEM referencing the constant pool, return the real value. | |
131 Otherwise return X. */ | |
132 rtx | |
133 avoid_constant_pool_reference (rtx x) | |
134 { | |
135 rtx c, tmp, addr; | |
136 enum machine_mode cmode; | |
137 HOST_WIDE_INT offset = 0; | |
138 | |
139 switch (GET_CODE (x)) | |
140 { | |
141 case MEM: | |
142 break; | |
143 | |
144 case FLOAT_EXTEND: | |
145 /* Handle float extensions of constant pool references. */ | |
146 tmp = XEXP (x, 0); | |
147 c = avoid_constant_pool_reference (tmp); | |
148 if (c != tmp && GET_CODE (c) == CONST_DOUBLE) | |
149 { | |
150 REAL_VALUE_TYPE d; | |
151 | |
152 REAL_VALUE_FROM_CONST_DOUBLE (d, c); | |
153 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x)); | |
154 } | |
155 return x; | |
156 | |
157 default: | |
158 return x; | |
159 } | |
160 | |
161 if (GET_MODE (x) == BLKmode) | |
162 return x; | |
163 | |
164 addr = XEXP (x, 0); | |
165 | |
166 /* Call target hook to avoid the effects of -fpic etc.... */ | |
167 addr = targetm.delegitimize_address (addr); | |
168 | |
169 /* Split the address into a base and integer offset. */ | |
170 if (GET_CODE (addr) == CONST | |
171 && GET_CODE (XEXP (addr, 0)) == PLUS | |
172 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT) | |
173 { | |
174 offset = INTVAL (XEXP (XEXP (addr, 0), 1)); | |
175 addr = XEXP (XEXP (addr, 0), 0); | |
176 } | |
177 | |
178 if (GET_CODE (addr) == LO_SUM) | |
179 addr = XEXP (addr, 1); | |
180 | |
181 /* If this is a constant pool reference, we can turn it into its | |
182 constant and hope that simplifications happen. */ | |
183 if (GET_CODE (addr) == SYMBOL_REF | |
184 && CONSTANT_POOL_ADDRESS_P (addr)) | |
185 { | |
186 c = get_pool_constant (addr); | |
187 cmode = get_pool_mode (addr); | |
188 | |
189 /* If we're accessing the constant in a different mode than it was | |
190 originally stored, attempt to fix that up via subreg simplifications. | |
191 If that fails we have no choice but to return the original memory. */ | |
192 if (offset != 0 || cmode != GET_MODE (x)) | |
193 { | |
194 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset); | |
195 if (tem && CONSTANT_P (tem)) | |
196 return tem; | |
197 } | |
198 else | |
199 return c; | |
200 } | |
201 | |
202 return x; | |
203 } | |
204 | |
205 /* Make a unary operation by first seeing if it folds and otherwise making | |
206 the specified operation. */ | |
207 | |
208 rtx | |
209 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op, | |
210 enum machine_mode op_mode) | |
211 { | |
212 rtx tem; | |
213 | |
214 /* If this simplifies, use it. */ | |
215 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0) | |
216 return tem; | |
217 | |
218 return gen_rtx_fmt_e (code, mode, op); | |
219 } | |
220 | |
221 /* Likewise for ternary operations. */ | |
222 | |
223 rtx | |
224 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode, | |
225 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2) | |
226 { | |
227 rtx tem; | |
228 | |
229 /* If this simplifies, use it. */ | |
230 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode, | |
231 op0, op1, op2))) | |
232 return tem; | |
233 | |
234 return gen_rtx_fmt_eee (code, mode, op0, op1, op2); | |
235 } | |
236 | |
237 /* Likewise, for relational operations. | |
238 CMP_MODE specifies mode comparison is done in. */ | |
239 | |
240 rtx | |
241 simplify_gen_relational (enum rtx_code code, enum machine_mode mode, | |
242 enum machine_mode cmp_mode, rtx op0, rtx op1) | |
243 { | |
244 rtx tem; | |
245 | |
246 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode, | |
247 op0, op1))) | |
248 return tem; | |
249 | |
250 return gen_rtx_fmt_ee (code, mode, op0, op1); | |
251 } | |
252 | |
253 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the | |
254 resulting RTX. Return a new RTX which is as simplified as possible. */ | |
255 | |
256 rtx | |
257 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx) | |
258 { | |
259 enum rtx_code code = GET_CODE (x); | |
260 enum machine_mode mode = GET_MODE (x); | |
261 enum machine_mode op_mode; | |
262 rtx op0, op1, op2; | |
263 | |
264 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try | |
265 to build a new expression substituting recursively. If we can't do | |
266 anything, return our input. */ | |
267 | |
268 if (x == old_rtx) | |
269 return new_rtx; | |
270 | |
271 switch (GET_RTX_CLASS (code)) | |
272 { | |
273 case RTX_UNARY: | |
274 op0 = XEXP (x, 0); | |
275 op_mode = GET_MODE (op0); | |
276 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx); | |
277 if (op0 == XEXP (x, 0)) | |
278 return x; | |
279 return simplify_gen_unary (code, mode, op0, op_mode); | |
280 | |
281 case RTX_BIN_ARITH: | |
282 case RTX_COMM_ARITH: | |
283 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx); | |
284 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx); | |
285 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1)) | |
286 return x; | |
287 return simplify_gen_binary (code, mode, op0, op1); | |
288 | |
289 case RTX_COMPARE: | |
290 case RTX_COMM_COMPARE: | |
291 op0 = XEXP (x, 0); | |
292 op1 = XEXP (x, 1); | |
293 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1); | |
294 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx); | |
295 op1 = simplify_replace_rtx (op1, old_rtx, new_rtx); | |
296 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1)) | |
297 return x; | |
298 return simplify_gen_relational (code, mode, op_mode, op0, op1); | |
299 | |
300 case RTX_TERNARY: | |
301 case RTX_BITFIELD_OPS: | |
302 op0 = XEXP (x, 0); | |
303 op_mode = GET_MODE (op0); | |
304 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx); | |
305 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx); | |
306 op2 = simplify_replace_rtx (XEXP (x, 2), old_rtx, new_rtx); | |
307 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2)) | |
308 return x; | |
309 if (op_mode == VOIDmode) | |
310 op_mode = GET_MODE (op0); | |
311 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2); | |
312 | |
313 case RTX_EXTRA: | |
314 /* The only case we try to handle is a SUBREG. */ | |
315 if (code == SUBREG) | |
316 { | |
317 op0 = simplify_replace_rtx (SUBREG_REG (x), old_rtx, new_rtx); | |
318 if (op0 == SUBREG_REG (x)) | |
319 return x; | |
320 op0 = simplify_gen_subreg (GET_MODE (x), op0, | |
321 GET_MODE (SUBREG_REG (x)), | |
322 SUBREG_BYTE (x)); | |
323 return op0 ? op0 : x; | |
324 } | |
325 break; | |
326 | |
327 case RTX_OBJ: | |
328 if (code == MEM) | |
329 { | |
330 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx); | |
331 if (op0 == XEXP (x, 0)) | |
332 return x; | |
333 return replace_equiv_address_nv (x, op0); | |
334 } | |
335 else if (code == LO_SUM) | |
336 { | |
337 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx); | |
338 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx); | |
339 | |
340 /* (lo_sum (high x) x) -> x */ | |
341 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1)) | |
342 return op1; | |
343 | |
344 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1)) | |
345 return x; | |
346 return gen_rtx_LO_SUM (mode, op0, op1); | |
347 } | |
348 else if (code == REG) | |
349 { | |
350 if (rtx_equal_p (x, old_rtx)) | |
351 return new_rtx; | |
352 } | |
353 break; | |
354 | |
355 default: | |
356 break; | |
357 } | |
358 return x; | |
359 } | |
360 | |
361 /* Try to simplify a unary operation CODE whose output mode is to be | |
362 MODE with input operand OP whose mode was originally OP_MODE. | |
363 Return zero if no simplification can be made. */ | |
364 rtx | |
365 simplify_unary_operation (enum rtx_code code, enum machine_mode mode, | |
366 rtx op, enum machine_mode op_mode) | |
367 { | |
368 rtx trueop, tem; | |
369 | |
370 if (GET_CODE (op) == CONST) | |
371 op = XEXP (op, 0); | |
372 | |
373 trueop = avoid_constant_pool_reference (op); | |
374 | |
375 tem = simplify_const_unary_operation (code, mode, trueop, op_mode); | |
376 if (tem) | |
377 return tem; | |
378 | |
379 return simplify_unary_operation_1 (code, mode, op); | |
380 } | |
381 | |
382 /* Perform some simplifications we can do even if the operands | |
383 aren't constant. */ | |
384 static rtx | |
385 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op) | |
386 { | |
387 enum rtx_code reversed; | |
388 rtx temp; | |
389 | |
390 switch (code) | |
391 { | |
392 case NOT: | |
393 /* (not (not X)) == X. */ | |
394 if (GET_CODE (op) == NOT) | |
395 return XEXP (op, 0); | |
396 | |
397 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the | |
398 comparison is all ones. */ | |
399 if (COMPARISON_P (op) | |
400 && (mode == BImode || STORE_FLAG_VALUE == -1) | |
401 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN)) | |
402 return simplify_gen_relational (reversed, mode, VOIDmode, | |
403 XEXP (op, 0), XEXP (op, 1)); | |
404 | |
405 /* (not (plus X -1)) can become (neg X). */ | |
406 if (GET_CODE (op) == PLUS | |
407 && XEXP (op, 1) == constm1_rtx) | |
408 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode); | |
409 | |
410 /* Similarly, (not (neg X)) is (plus X -1). */ | |
411 if (GET_CODE (op) == NEG) | |
412 return plus_constant (XEXP (op, 0), -1); | |
413 | |
414 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */ | |
415 if (GET_CODE (op) == XOR | |
416 && GET_CODE (XEXP (op, 1)) == CONST_INT | |
417 && (temp = simplify_unary_operation (NOT, mode, | |
418 XEXP (op, 1), mode)) != 0) | |
419 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp); | |
420 | |
421 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */ | |
422 if (GET_CODE (op) == PLUS | |
423 && GET_CODE (XEXP (op, 1)) == CONST_INT | |
424 && mode_signbit_p (mode, XEXP (op, 1)) | |
425 && (temp = simplify_unary_operation (NOT, mode, | |
426 XEXP (op, 1), mode)) != 0) | |
427 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp); | |
428 | |
429 | |
430 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for | |
431 operands other than 1, but that is not valid. We could do a | |
432 similar simplification for (not (lshiftrt C X)) where C is | |
433 just the sign bit, but this doesn't seem common enough to | |
434 bother with. */ | |
435 if (GET_CODE (op) == ASHIFT | |
436 && XEXP (op, 0) == const1_rtx) | |
437 { | |
438 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode); | |
439 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1)); | |
440 } | |
441 | |
442 /* (not (ashiftrt foo C)) where C is the number of bits in FOO | |
443 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1, | |
444 so we can perform the above simplification. */ | |
445 | |
446 if (STORE_FLAG_VALUE == -1 | |
447 && GET_CODE (op) == ASHIFTRT | |
448 && GET_CODE (XEXP (op, 1)) == CONST_INT | |
449 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1) | |
450 return simplify_gen_relational (GE, mode, VOIDmode, | |
451 XEXP (op, 0), const0_rtx); | |
452 | |
453 | |
454 if (GET_CODE (op) == SUBREG | |
455 && subreg_lowpart_p (op) | |
456 && (GET_MODE_SIZE (GET_MODE (op)) | |
457 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op)))) | |
458 && GET_CODE (SUBREG_REG (op)) == ASHIFT | |
459 && XEXP (SUBREG_REG (op), 0) == const1_rtx) | |
460 { | |
461 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op)); | |
462 rtx x; | |
463 | |
464 x = gen_rtx_ROTATE (inner_mode, | |
465 simplify_gen_unary (NOT, inner_mode, const1_rtx, | |
466 inner_mode), | |
467 XEXP (SUBREG_REG (op), 1)); | |
468 return rtl_hooks.gen_lowpart_no_emit (mode, x); | |
469 } | |
470 | |
471 /* Apply De Morgan's laws to reduce number of patterns for machines | |
472 with negating logical insns (and-not, nand, etc.). If result has | |
473 only one NOT, put it first, since that is how the patterns are | |
474 coded. */ | |
475 | |
476 if (GET_CODE (op) == IOR || GET_CODE (op) == AND) | |
477 { | |
478 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1); | |
479 enum machine_mode op_mode; | |
480 | |
481 op_mode = GET_MODE (in1); | |
482 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode); | |
483 | |
484 op_mode = GET_MODE (in2); | |
485 if (op_mode == VOIDmode) | |
486 op_mode = mode; | |
487 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode); | |
488 | |
489 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT) | |
490 { | |
491 rtx tem = in2; | |
492 in2 = in1; in1 = tem; | |
493 } | |
494 | |
495 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR, | |
496 mode, in1, in2); | |
497 } | |
498 break; | |
499 | |
500 case NEG: | |
501 /* (neg (neg X)) == X. */ | |
502 if (GET_CODE (op) == NEG) | |
503 return XEXP (op, 0); | |
504 | |
505 /* (neg (plus X 1)) can become (not X). */ | |
506 if (GET_CODE (op) == PLUS | |
507 && XEXP (op, 1) == const1_rtx) | |
508 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode); | |
509 | |
510 /* Similarly, (neg (not X)) is (plus X 1). */ | |
511 if (GET_CODE (op) == NOT) | |
512 return plus_constant (XEXP (op, 0), 1); | |
513 | |
514 /* (neg (minus X Y)) can become (minus Y X). This transformation | |
515 isn't safe for modes with signed zeros, since if X and Y are | |
516 both +0, (minus Y X) is the same as (minus X Y). If the | |
517 rounding mode is towards +infinity (or -infinity) then the two | |
518 expressions will be rounded differently. */ | |
519 if (GET_CODE (op) == MINUS | |
520 && !HONOR_SIGNED_ZEROS (mode) | |
521 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode)) | |
522 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0)); | |
523 | |
524 if (GET_CODE (op) == PLUS | |
525 && !HONOR_SIGNED_ZEROS (mode) | |
526 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode)) | |
527 { | |
528 /* (neg (plus A C)) is simplified to (minus -C A). */ | |
529 if (GET_CODE (XEXP (op, 1)) == CONST_INT | |
530 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE) | |
531 { | |
532 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode); | |
533 if (temp) | |
534 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0)); | |
535 } | |
536 | |
537 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */ | |
538 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode); | |
539 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1)); | |
540 } | |
541 | |
542 /* (neg (mult A B)) becomes (mult (neg A) B). | |
543 This works even for floating-point values. */ | |
544 if (GET_CODE (op) == MULT | |
545 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode)) | |
546 { | |
547 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode); | |
548 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1)); | |
549 } | |
550 | |
551 /* NEG commutes with ASHIFT since it is multiplication. Only do | |
552 this if we can then eliminate the NEG (e.g., if the operand | |
553 is a constant). */ | |
554 if (GET_CODE (op) == ASHIFT) | |
555 { | |
556 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode); | |
557 if (temp) | |
558 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1)); | |
559 } | |
560 | |
561 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when | |
562 C is equal to the width of MODE minus 1. */ | |
563 if (GET_CODE (op) == ASHIFTRT | |
564 && GET_CODE (XEXP (op, 1)) == CONST_INT | |
565 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1) | |
566 return simplify_gen_binary (LSHIFTRT, mode, | |
567 XEXP (op, 0), XEXP (op, 1)); | |
568 | |
569 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when | |
570 C is equal to the width of MODE minus 1. */ | |
571 if (GET_CODE (op) == LSHIFTRT | |
572 && GET_CODE (XEXP (op, 1)) == CONST_INT | |
573 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1) | |
574 return simplify_gen_binary (ASHIFTRT, mode, | |
575 XEXP (op, 0), XEXP (op, 1)); | |
576 | |
577 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */ | |
578 if (GET_CODE (op) == XOR | |
579 && XEXP (op, 1) == const1_rtx | |
580 && nonzero_bits (XEXP (op, 0), mode) == 1) | |
581 return plus_constant (XEXP (op, 0), -1); | |
582 | |
583 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */ | |
584 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */ | |
585 if (GET_CODE (op) == LT | |
586 && XEXP (op, 1) == const0_rtx | |
587 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0)))) | |
588 { | |
589 enum machine_mode inner = GET_MODE (XEXP (op, 0)); | |
590 int isize = GET_MODE_BITSIZE (inner); | |
591 if (STORE_FLAG_VALUE == 1) | |
592 { | |
593 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0), | |
594 GEN_INT (isize - 1)); | |
595 if (mode == inner) | |
596 return temp; | |
597 if (GET_MODE_BITSIZE (mode) > isize) | |
598 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner); | |
599 return simplify_gen_unary (TRUNCATE, mode, temp, inner); | |
600 } | |
601 else if (STORE_FLAG_VALUE == -1) | |
602 { | |
603 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0), | |
604 GEN_INT (isize - 1)); | |
605 if (mode == inner) | |
606 return temp; | |
607 if (GET_MODE_BITSIZE (mode) > isize) | |
608 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner); | |
609 return simplify_gen_unary (TRUNCATE, mode, temp, inner); | |
610 } | |
611 } | |
612 break; | |
613 | |
614 case TRUNCATE: | |
615 /* We can't handle truncation to a partial integer mode here | |
616 because we don't know the real bitsize of the partial | |
617 integer mode. */ | |
618 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT) | |
619 break; | |
620 | |
621 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */ | |
622 if ((GET_CODE (op) == SIGN_EXTEND | |
623 || GET_CODE (op) == ZERO_EXTEND) | |
624 && GET_MODE (XEXP (op, 0)) == mode) | |
625 return XEXP (op, 0); | |
626 | |
627 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is | |
628 (OP:SI foo:SI) if OP is NEG or ABS. */ | |
629 if ((GET_CODE (op) == ABS | |
630 || GET_CODE (op) == NEG) | |
631 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND | |
632 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND) | |
633 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode) | |
634 return simplify_gen_unary (GET_CODE (op), mode, | |
635 XEXP (XEXP (op, 0), 0), mode); | |
636 | |
637 /* (truncate:A (subreg:B (truncate:C X) 0)) is | |
638 (truncate:A X). */ | |
639 if (GET_CODE (op) == SUBREG | |
640 && GET_CODE (SUBREG_REG (op)) == TRUNCATE | |
641 && subreg_lowpart_p (op)) | |
642 return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0), | |
643 GET_MODE (XEXP (SUBREG_REG (op), 0))); | |
644 | |
645 /* If we know that the value is already truncated, we can | |
646 replace the TRUNCATE with a SUBREG. Note that this is also | |
647 valid if TRULY_NOOP_TRUNCATION is false for the corresponding | |
648 modes we just have to apply a different definition for | |
649 truncation. But don't do this for an (LSHIFTRT (MULT ...)) | |
650 since this will cause problems with the umulXi3_highpart | |
651 patterns. */ | |
652 if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode), | |
653 GET_MODE_BITSIZE (GET_MODE (op))) | |
654 ? (num_sign_bit_copies (op, GET_MODE (op)) | |
655 > (unsigned int) (GET_MODE_BITSIZE (GET_MODE (op)) | |
656 - GET_MODE_BITSIZE (mode))) | |
657 : truncated_to_mode (mode, op)) | |
658 && ! (GET_CODE (op) == LSHIFTRT | |
659 && GET_CODE (XEXP (op, 0)) == MULT)) | |
660 return rtl_hooks.gen_lowpart_no_emit (mode, op); | |
661 | |
662 /* A truncate of a comparison can be replaced with a subreg if | |
663 STORE_FLAG_VALUE permits. This is like the previous test, | |
664 but it works even if the comparison is done in a mode larger | |
665 than HOST_BITS_PER_WIDE_INT. */ | |
666 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT | |
667 && COMPARISON_P (op) | |
668 && ((HOST_WIDE_INT) STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0) | |
669 return rtl_hooks.gen_lowpart_no_emit (mode, op); | |
670 break; | |
671 | |
672 case FLOAT_TRUNCATE: | |
673 if (DECIMAL_FLOAT_MODE_P (mode)) | |
674 break; | |
675 | |
676 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */ | |
677 if (GET_CODE (op) == FLOAT_EXTEND | |
678 && GET_MODE (XEXP (op, 0)) == mode) | |
679 return XEXP (op, 0); | |
680 | |
681 /* (float_truncate:SF (float_truncate:DF foo:XF)) | |
682 = (float_truncate:SF foo:XF). | |
683 This may eliminate double rounding, so it is unsafe. | |
684 | |
685 (float_truncate:SF (float_extend:XF foo:DF)) | |
686 = (float_truncate:SF foo:DF). | |
687 | |
688 (float_truncate:DF (float_extend:XF foo:SF)) | |
689 = (float_extend:SF foo:DF). */ | |
690 if ((GET_CODE (op) == FLOAT_TRUNCATE | |
691 && flag_unsafe_math_optimizations) | |
692 || GET_CODE (op) == FLOAT_EXTEND) | |
693 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op, | |
694 0))) | |
695 > GET_MODE_SIZE (mode) | |
696 ? FLOAT_TRUNCATE : FLOAT_EXTEND, | |
697 mode, | |
698 XEXP (op, 0), mode); | |
699 | |
700 /* (float_truncate (float x)) is (float x) */ | |
701 if (GET_CODE (op) == FLOAT | |
702 && (flag_unsafe_math_optimizations | |
703 || (SCALAR_FLOAT_MODE_P (GET_MODE (op)) | |
704 && ((unsigned)significand_size (GET_MODE (op)) | |
705 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))) | |
706 - num_sign_bit_copies (XEXP (op, 0), | |
707 GET_MODE (XEXP (op, 0)))))))) | |
708 return simplify_gen_unary (FLOAT, mode, | |
709 XEXP (op, 0), | |
710 GET_MODE (XEXP (op, 0))); | |
711 | |
712 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is | |
713 (OP:SF foo:SF) if OP is NEG or ABS. */ | |
714 if ((GET_CODE (op) == ABS | |
715 || GET_CODE (op) == NEG) | |
716 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND | |
717 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode) | |
718 return simplify_gen_unary (GET_CODE (op), mode, | |
719 XEXP (XEXP (op, 0), 0), mode); | |
720 | |
721 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0)) | |
722 is (float_truncate:SF x). */ | |
723 if (GET_CODE (op) == SUBREG | |
724 && subreg_lowpart_p (op) | |
725 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE) | |
726 return SUBREG_REG (op); | |
727 break; | |
728 | |
729 case FLOAT_EXTEND: | |
730 if (DECIMAL_FLOAT_MODE_P (mode)) | |
731 break; | |
732 | |
733 /* (float_extend (float_extend x)) is (float_extend x) | |
734 | |
735 (float_extend (float x)) is (float x) assuming that double | |
736 rounding can't happen. | |
737 */ | |
738 if (GET_CODE (op) == FLOAT_EXTEND | |
739 || (GET_CODE (op) == FLOAT | |
740 && SCALAR_FLOAT_MODE_P (GET_MODE (op)) | |
741 && ((unsigned)significand_size (GET_MODE (op)) | |
742 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))) | |
743 - num_sign_bit_copies (XEXP (op, 0), | |
744 GET_MODE (XEXP (op, 0))))))) | |
745 return simplify_gen_unary (GET_CODE (op), mode, | |
746 XEXP (op, 0), | |
747 GET_MODE (XEXP (op, 0))); | |
748 | |
749 break; | |
750 | |
751 case ABS: | |
752 /* (abs (neg <foo>)) -> (abs <foo>) */ | |
753 if (GET_CODE (op) == NEG) | |
754 return simplify_gen_unary (ABS, mode, XEXP (op, 0), | |
755 GET_MODE (XEXP (op, 0))); | |
756 | |
757 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS), | |
758 do nothing. */ | |
759 if (GET_MODE (op) == VOIDmode) | |
760 break; | |
761 | |
762 /* If operand is something known to be positive, ignore the ABS. */ | |
763 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS | |
764 || ((GET_MODE_BITSIZE (GET_MODE (op)) | |
765 <= HOST_BITS_PER_WIDE_INT) | |
766 && ((nonzero_bits (op, GET_MODE (op)) | |
767 & ((HOST_WIDE_INT) 1 | |
768 << (GET_MODE_BITSIZE (GET_MODE (op)) - 1))) | |
769 == 0))) | |
770 return op; | |
771 | |
772 /* If operand is known to be only -1 or 0, convert ABS to NEG. */ | |
773 if (num_sign_bit_copies (op, mode) == GET_MODE_BITSIZE (mode)) | |
774 return gen_rtx_NEG (mode, op); | |
775 | |
776 break; | |
777 | |
778 case FFS: | |
779 /* (ffs (*_extend <X>)) = (ffs <X>) */ | |
780 if (GET_CODE (op) == SIGN_EXTEND | |
781 || GET_CODE (op) == ZERO_EXTEND) | |
782 return simplify_gen_unary (FFS, mode, XEXP (op, 0), | |
783 GET_MODE (XEXP (op, 0))); | |
784 break; | |
785 | |
786 case POPCOUNT: | |
787 switch (GET_CODE (op)) | |
788 { | |
789 case BSWAP: | |
790 case ZERO_EXTEND: | |
791 /* (popcount (zero_extend <X>)) = (popcount <X>) */ | |
792 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0), | |
793 GET_MODE (XEXP (op, 0))); | |
794 | |
795 case ROTATE: | |
796 case ROTATERT: | |
797 /* Rotations don't affect popcount. */ | |
798 if (!side_effects_p (XEXP (op, 1))) | |
799 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0), | |
800 GET_MODE (XEXP (op, 0))); | |
801 break; | |
802 | |
803 default: | |
804 break; | |
805 } | |
806 break; | |
807 | |
808 case PARITY: | |
809 switch (GET_CODE (op)) | |
810 { | |
811 case NOT: | |
812 case BSWAP: | |
813 case ZERO_EXTEND: | |
814 case SIGN_EXTEND: | |
815 return simplify_gen_unary (PARITY, mode, XEXP (op, 0), | |
816 GET_MODE (XEXP (op, 0))); | |
817 | |
818 case ROTATE: | |
819 case ROTATERT: | |
820 /* Rotations don't affect parity. */ | |
821 if (!side_effects_p (XEXP (op, 1))) | |
822 return simplify_gen_unary (PARITY, mode, XEXP (op, 0), | |
823 GET_MODE (XEXP (op, 0))); | |
824 break; | |
825 | |
826 default: | |
827 break; | |
828 } | |
829 break; | |
830 | |
831 case BSWAP: | |
832 /* (bswap (bswap x)) -> x. */ | |
833 if (GET_CODE (op) == BSWAP) | |
834 return XEXP (op, 0); | |
835 break; | |
836 | |
837 case FLOAT: | |
838 /* (float (sign_extend <X>)) = (float <X>). */ | |
839 if (GET_CODE (op) == SIGN_EXTEND) | |
840 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0), | |
841 GET_MODE (XEXP (op, 0))); | |
842 break; | |
843 | |
844 case SIGN_EXTEND: | |
845 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2)))) | |
846 becomes just the MINUS if its mode is MODE. This allows | |
847 folding switch statements on machines using casesi (such as | |
848 the VAX). */ | |
849 if (GET_CODE (op) == TRUNCATE | |
850 && GET_MODE (XEXP (op, 0)) == mode | |
851 && GET_CODE (XEXP (op, 0)) == MINUS | |
852 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF | |
853 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF) | |
854 return XEXP (op, 0); | |
855 | |
856 /* Check for a sign extension of a subreg of a promoted | |
857 variable, where the promotion is sign-extended, and the | |
858 target mode is the same as the variable's promotion. */ | |
859 if (GET_CODE (op) == SUBREG | |
860 && SUBREG_PROMOTED_VAR_P (op) | |
861 && ! SUBREG_PROMOTED_UNSIGNED_P (op) | |
862 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0)))) | |
863 return rtl_hooks.gen_lowpart_no_emit (mode, op); | |
864 | |
865 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend) | |
866 if (! POINTERS_EXTEND_UNSIGNED | |
867 && mode == Pmode && GET_MODE (op) == ptr_mode | |
868 && (CONSTANT_P (op) | |
869 || (GET_CODE (op) == SUBREG | |
870 && REG_P (SUBREG_REG (op)) | |
871 && REG_POINTER (SUBREG_REG (op)) | |
872 && GET_MODE (SUBREG_REG (op)) == Pmode))) | |
873 return convert_memory_address (Pmode, op); | |
874 #endif | |
875 break; | |
876 | |
877 case ZERO_EXTEND: | |
878 /* Check for a zero extension of a subreg of a promoted | |
879 variable, where the promotion is zero-extended, and the | |
880 target mode is the same as the variable's promotion. */ | |
881 if (GET_CODE (op) == SUBREG | |
882 && SUBREG_PROMOTED_VAR_P (op) | |
883 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0 | |
884 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0)))) | |
885 return rtl_hooks.gen_lowpart_no_emit (mode, op); | |
886 | |
887 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend) | |
888 if (POINTERS_EXTEND_UNSIGNED > 0 | |
889 && mode == Pmode && GET_MODE (op) == ptr_mode | |
890 && (CONSTANT_P (op) | |
891 || (GET_CODE (op) == SUBREG | |
892 && REG_P (SUBREG_REG (op)) | |
893 && REG_POINTER (SUBREG_REG (op)) | |
894 && GET_MODE (SUBREG_REG (op)) == Pmode))) | |
895 return convert_memory_address (Pmode, op); | |
896 #endif | |
897 break; | |
898 | |
899 default: | |
900 break; | |
901 } | |
902 | |
903 return 0; | |
904 } | |
905 | |
906 /* Try to compute the value of a unary operation CODE whose output mode is to | |
907 be MODE with input operand OP whose mode was originally OP_MODE. | |
908 Return zero if the value cannot be computed. */ | |
909 rtx | |
910 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode, | |
911 rtx op, enum machine_mode op_mode) | |
912 { | |
913 unsigned int width = GET_MODE_BITSIZE (mode); | |
914 | |
915 if (code == VEC_DUPLICATE) | |
916 { | |
917 gcc_assert (VECTOR_MODE_P (mode)); | |
918 if (GET_MODE (op) != VOIDmode) | |
919 { | |
920 if (!VECTOR_MODE_P (GET_MODE (op))) | |
921 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op)); | |
922 else | |
923 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER | |
924 (GET_MODE (op))); | |
925 } | |
926 if (GET_CODE (op) == CONST_INT || GET_CODE (op) == CONST_DOUBLE | |
927 || GET_CODE (op) == CONST_VECTOR) | |
928 { | |
929 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode)); | |
930 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size); | |
931 rtvec v = rtvec_alloc (n_elts); | |
932 unsigned int i; | |
933 | |
934 if (GET_CODE (op) != CONST_VECTOR) | |
935 for (i = 0; i < n_elts; i++) | |
936 RTVEC_ELT (v, i) = op; | |
937 else | |
938 { | |
939 enum machine_mode inmode = GET_MODE (op); | |
940 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode)); | |
941 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size); | |
942 | |
943 gcc_assert (in_n_elts < n_elts); | |
944 gcc_assert ((n_elts % in_n_elts) == 0); | |
945 for (i = 0; i < n_elts; i++) | |
946 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts); | |
947 } | |
948 return gen_rtx_CONST_VECTOR (mode, v); | |
949 } | |
950 } | |
951 | |
952 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR) | |
953 { | |
954 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode)); | |
955 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size); | |
956 enum machine_mode opmode = GET_MODE (op); | |
957 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode)); | |
958 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size); | |
959 rtvec v = rtvec_alloc (n_elts); | |
960 unsigned int i; | |
961 | |
962 gcc_assert (op_n_elts == n_elts); | |
963 for (i = 0; i < n_elts; i++) | |
964 { | |
965 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode), | |
966 CONST_VECTOR_ELT (op, i), | |
967 GET_MODE_INNER (opmode)); | |
968 if (!x) | |
969 return 0; | |
970 RTVEC_ELT (v, i) = x; | |
971 } | |
972 return gen_rtx_CONST_VECTOR (mode, v); | |
973 } | |
974 | |
975 /* The order of these tests is critical so that, for example, we don't | |
976 check the wrong mode (input vs. output) for a conversion operation, | |
977 such as FIX. At some point, this should be simplified. */ | |
978 | |
979 if (code == FLOAT && GET_MODE (op) == VOIDmode | |
980 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT)) | |
981 { | |
982 HOST_WIDE_INT hv, lv; | |
983 REAL_VALUE_TYPE d; | |
984 | |
985 if (GET_CODE (op) == CONST_INT) | |
986 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv); | |
987 else | |
988 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op); | |
989 | |
990 REAL_VALUE_FROM_INT (d, lv, hv, mode); | |
991 d = real_value_truncate (mode, d); | |
992 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode); | |
993 } | |
994 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode | |
995 && (GET_CODE (op) == CONST_DOUBLE | |
996 || GET_CODE (op) == CONST_INT)) | |
997 { | |
998 HOST_WIDE_INT hv, lv; | |
999 REAL_VALUE_TYPE d; | |
1000 | |
1001 if (GET_CODE (op) == CONST_INT) | |
1002 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv); | |
1003 else | |
1004 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op); | |
1005 | |
1006 if (op_mode == VOIDmode) | |
1007 { | |
1008 /* We don't know how to interpret negative-looking numbers in | |
1009 this case, so don't try to fold those. */ | |
1010 if (hv < 0) | |
1011 return 0; | |
1012 } | |
1013 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2) | |
1014 ; | |
1015 else | |
1016 hv = 0, lv &= GET_MODE_MASK (op_mode); | |
1017 | |
1018 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode); | |
1019 d = real_value_truncate (mode, d); | |
1020 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode); | |
1021 } | |
1022 | |
1023 if (GET_CODE (op) == CONST_INT | |
1024 && width <= HOST_BITS_PER_WIDE_INT && width > 0) | |
1025 { | |
1026 HOST_WIDE_INT arg0 = INTVAL (op); | |
1027 HOST_WIDE_INT val; | |
1028 | |
1029 switch (code) | |
1030 { | |
1031 case NOT: | |
1032 val = ~ arg0; | |
1033 break; | |
1034 | |
1035 case NEG: | |
1036 val = - arg0; | |
1037 break; | |
1038 | |
1039 case ABS: | |
1040 val = (arg0 >= 0 ? arg0 : - arg0); | |
1041 break; | |
1042 | |
1043 case FFS: | |
1044 /* Don't use ffs here. Instead, get low order bit and then its | |
1045 number. If arg0 is zero, this will return 0, as desired. */ | |
1046 arg0 &= GET_MODE_MASK (mode); | |
1047 val = exact_log2 (arg0 & (- arg0)) + 1; | |
1048 break; | |
1049 | |
1050 case CLZ: | |
1051 arg0 &= GET_MODE_MASK (mode); | |
1052 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val)) | |
1053 ; | |
1054 else | |
1055 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1; | |
1056 break; | |
1057 | |
1058 case CTZ: | |
1059 arg0 &= GET_MODE_MASK (mode); | |
1060 if (arg0 == 0) | |
1061 { | |
1062 /* Even if the value at zero is undefined, we have to come | |
1063 up with some replacement. Seems good enough. */ | |
1064 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val)) | |
1065 val = GET_MODE_BITSIZE (mode); | |
1066 } | |
1067 else | |
1068 val = exact_log2 (arg0 & -arg0); | |
1069 break; | |
1070 | |
1071 case POPCOUNT: | |
1072 arg0 &= GET_MODE_MASK (mode); | |
1073 val = 0; | |
1074 while (arg0) | |
1075 val++, arg0 &= arg0 - 1; | |
1076 break; | |
1077 | |
1078 case PARITY: | |
1079 arg0 &= GET_MODE_MASK (mode); | |
1080 val = 0; | |
1081 while (arg0) | |
1082 val++, arg0 &= arg0 - 1; | |
1083 val &= 1; | |
1084 break; | |
1085 | |
1086 case BSWAP: | |
1087 { | |
1088 unsigned int s; | |
1089 | |
1090 val = 0; | |
1091 for (s = 0; s < width; s += 8) | |
1092 { | |
1093 unsigned int d = width - s - 8; | |
1094 unsigned HOST_WIDE_INT byte; | |
1095 byte = (arg0 >> s) & 0xff; | |
1096 val |= byte << d; | |
1097 } | |
1098 } | |
1099 break; | |
1100 | |
1101 case TRUNCATE: | |
1102 val = arg0; | |
1103 break; | |
1104 | |
1105 case ZERO_EXTEND: | |
1106 /* When zero-extending a CONST_INT, we need to know its | |
1107 original mode. */ | |
1108 gcc_assert (op_mode != VOIDmode); | |
1109 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT) | |
1110 { | |
1111 /* If we were really extending the mode, | |
1112 we would have to distinguish between zero-extension | |
1113 and sign-extension. */ | |
1114 gcc_assert (width == GET_MODE_BITSIZE (op_mode)); | |
1115 val = arg0; | |
1116 } | |
1117 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT) | |
1118 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode)); | |
1119 else | |
1120 return 0; | |
1121 break; | |
1122 | |
1123 case SIGN_EXTEND: | |
1124 if (op_mode == VOIDmode) | |
1125 op_mode = mode; | |
1126 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT) | |
1127 { | |
1128 /* If we were really extending the mode, | |
1129 we would have to distinguish between zero-extension | |
1130 and sign-extension. */ | |
1131 gcc_assert (width == GET_MODE_BITSIZE (op_mode)); | |
1132 val = arg0; | |
1133 } | |
1134 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT) | |
1135 { | |
1136 val | |
1137 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode)); | |
1138 if (val | |
1139 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1))) | |
1140 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode); | |
1141 } | |
1142 else | |
1143 return 0; | |
1144 break; | |
1145 | |
1146 case SQRT: | |
1147 case FLOAT_EXTEND: | |
1148 case FLOAT_TRUNCATE: | |
1149 case SS_TRUNCATE: | |
1150 case US_TRUNCATE: | |
1151 case SS_NEG: | |
1152 case US_NEG: | |
1153 return 0; | |
1154 | |
1155 default: | |
1156 gcc_unreachable (); | |
1157 } | |
1158 | |
1159 return gen_int_mode (val, mode); | |
1160 } | |
1161 | |
1162 /* We can do some operations on integer CONST_DOUBLEs. Also allow | |
1163 for a DImode operation on a CONST_INT. */ | |
1164 else if (GET_MODE (op) == VOIDmode | |
1165 && width <= HOST_BITS_PER_WIDE_INT * 2 | |
1166 && (GET_CODE (op) == CONST_DOUBLE | |
1167 || GET_CODE (op) == CONST_INT)) | |
1168 { | |
1169 unsigned HOST_WIDE_INT l1, lv; | |
1170 HOST_WIDE_INT h1, hv; | |
1171 | |
1172 if (GET_CODE (op) == CONST_DOUBLE) | |
1173 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op); | |
1174 else | |
1175 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1); | |
1176 | |
1177 switch (code) | |
1178 { | |
1179 case NOT: | |
1180 lv = ~ l1; | |
1181 hv = ~ h1; | |
1182 break; | |
1183 | |
1184 case NEG: | |
1185 neg_double (l1, h1, &lv, &hv); | |
1186 break; | |
1187 | |
1188 case ABS: | |
1189 if (h1 < 0) | |
1190 neg_double (l1, h1, &lv, &hv); | |
1191 else | |
1192 lv = l1, hv = h1; | |
1193 break; | |
1194 | |
1195 case FFS: | |
1196 hv = 0; | |
1197 if (l1 == 0) | |
1198 { | |
1199 if (h1 == 0) | |
1200 lv = 0; | |
1201 else | |
1202 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1; | |
1203 } | |
1204 else | |
1205 lv = exact_log2 (l1 & -l1) + 1; | |
1206 break; | |
1207 | |
1208 case CLZ: | |
1209 hv = 0; | |
1210 if (h1 != 0) | |
1211 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1 | |
1212 - HOST_BITS_PER_WIDE_INT; | |
1213 else if (l1 != 0) | |
1214 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1; | |
1215 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv)) | |
1216 lv = GET_MODE_BITSIZE (mode); | |
1217 break; | |
1218 | |
1219 case CTZ: | |
1220 hv = 0; | |
1221 if (l1 != 0) | |
1222 lv = exact_log2 (l1 & -l1); | |
1223 else if (h1 != 0) | |
1224 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1); | |
1225 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv)) | |
1226 lv = GET_MODE_BITSIZE (mode); | |
1227 break; | |
1228 | |
1229 case POPCOUNT: | |
1230 hv = 0; | |
1231 lv = 0; | |
1232 while (l1) | |
1233 lv++, l1 &= l1 - 1; | |
1234 while (h1) | |
1235 lv++, h1 &= h1 - 1; | |
1236 break; | |
1237 | |
1238 case PARITY: | |
1239 hv = 0; | |
1240 lv = 0; | |
1241 while (l1) | |
1242 lv++, l1 &= l1 - 1; | |
1243 while (h1) | |
1244 lv++, h1 &= h1 - 1; | |
1245 lv &= 1; | |
1246 break; | |
1247 | |
1248 case BSWAP: | |
1249 { | |
1250 unsigned int s; | |
1251 | |
1252 hv = 0; | |
1253 lv = 0; | |
1254 for (s = 0; s < width; s += 8) | |
1255 { | |
1256 unsigned int d = width - s - 8; | |
1257 unsigned HOST_WIDE_INT byte; | |
1258 | |
1259 if (s < HOST_BITS_PER_WIDE_INT) | |
1260 byte = (l1 >> s) & 0xff; | |
1261 else | |
1262 byte = (h1 >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff; | |
1263 | |
1264 if (d < HOST_BITS_PER_WIDE_INT) | |
1265 lv |= byte << d; | |
1266 else | |
1267 hv |= byte << (d - HOST_BITS_PER_WIDE_INT); | |
1268 } | |
1269 } | |
1270 break; | |
1271 | |
1272 case TRUNCATE: | |
1273 /* This is just a change-of-mode, so do nothing. */ | |
1274 lv = l1, hv = h1; | |
1275 break; | |
1276 | |
1277 case ZERO_EXTEND: | |
1278 gcc_assert (op_mode != VOIDmode); | |
1279 | |
1280 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT) | |
1281 return 0; | |
1282 | |
1283 hv = 0; | |
1284 lv = l1 & GET_MODE_MASK (op_mode); | |
1285 break; | |
1286 | |
1287 case SIGN_EXTEND: | |
1288 if (op_mode == VOIDmode | |
1289 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT) | |
1290 return 0; | |
1291 else | |
1292 { | |
1293 lv = l1 & GET_MODE_MASK (op_mode); | |
1294 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT | |
1295 && (lv & ((HOST_WIDE_INT) 1 | |
1296 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0) | |
1297 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode); | |
1298 | |
1299 hv = HWI_SIGN_EXTEND (lv); | |
1300 } | |
1301 break; | |
1302 | |
1303 case SQRT: | |
1304 return 0; | |
1305 | |
1306 default: | |
1307 return 0; | |
1308 } | |
1309 | |
1310 return immed_double_const (lv, hv, mode); | |
1311 } | |
1312 | |
1313 else if (GET_CODE (op) == CONST_DOUBLE | |
1314 && SCALAR_FLOAT_MODE_P (mode)) | |
1315 { | |
1316 REAL_VALUE_TYPE d, t; | |
1317 REAL_VALUE_FROM_CONST_DOUBLE (d, op); | |
1318 | |
1319 switch (code) | |
1320 { | |
1321 case SQRT: | |
1322 if (HONOR_SNANS (mode) && real_isnan (&d)) | |
1323 return 0; | |
1324 real_sqrt (&t, mode, &d); | |
1325 d = t; | |
1326 break; | |
1327 case ABS: | |
1328 d = REAL_VALUE_ABS (d); | |
1329 break; | |
1330 case NEG: | |
1331 d = REAL_VALUE_NEGATE (d); | |
1332 break; | |
1333 case FLOAT_TRUNCATE: | |
1334 d = real_value_truncate (mode, d); | |
1335 break; | |
1336 case FLOAT_EXTEND: | |
1337 /* All this does is change the mode. */ | |
1338 break; | |
1339 case FIX: | |
1340 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL); | |
1341 break; | |
1342 case NOT: | |
1343 { | |
1344 long tmp[4]; | |
1345 int i; | |
1346 | |
1347 real_to_target (tmp, &d, GET_MODE (op)); | |
1348 for (i = 0; i < 4; i++) | |
1349 tmp[i] = ~tmp[i]; | |
1350 real_from_target (&d, tmp, mode); | |
1351 break; | |
1352 } | |
1353 default: | |
1354 gcc_unreachable (); | |
1355 } | |
1356 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode); | |
1357 } | |
1358 | |
1359 else if (GET_CODE (op) == CONST_DOUBLE | |
1360 && SCALAR_FLOAT_MODE_P (GET_MODE (op)) | |
1361 && GET_MODE_CLASS (mode) == MODE_INT | |
1362 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0) | |
1363 { | |
1364 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX | |
1365 operators are intentionally left unspecified (to ease implementation | |
1366 by target backends), for consistency, this routine implements the | |
1367 same semantics for constant folding as used by the middle-end. */ | |
1368 | |
1369 /* This was formerly used only for non-IEEE float. | |
1370 eggert@twinsun.com says it is safe for IEEE also. */ | |
1371 HOST_WIDE_INT xh, xl, th, tl; | |
1372 REAL_VALUE_TYPE x, t; | |
1373 REAL_VALUE_FROM_CONST_DOUBLE (x, op); | |
1374 switch (code) | |
1375 { | |
1376 case FIX: | |
1377 if (REAL_VALUE_ISNAN (x)) | |
1378 return const0_rtx; | |
1379 | |
1380 /* Test against the signed upper bound. */ | |
1381 if (width > HOST_BITS_PER_WIDE_INT) | |
1382 { | |
1383 th = ((unsigned HOST_WIDE_INT) 1 | |
1384 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1; | |
1385 tl = -1; | |
1386 } | |
1387 else | |
1388 { | |
1389 th = 0; | |
1390 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1; | |
1391 } | |
1392 real_from_integer (&t, VOIDmode, tl, th, 0); | |
1393 if (REAL_VALUES_LESS (t, x)) | |
1394 { | |
1395 xh = th; | |
1396 xl = tl; | |
1397 break; | |
1398 } | |
1399 | |
1400 /* Test against the signed lower bound. */ | |
1401 if (width > HOST_BITS_PER_WIDE_INT) | |
1402 { | |
1403 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1); | |
1404 tl = 0; | |
1405 } | |
1406 else | |
1407 { | |
1408 th = -1; | |
1409 tl = (HOST_WIDE_INT) -1 << (width - 1); | |
1410 } | |
1411 real_from_integer (&t, VOIDmode, tl, th, 0); | |
1412 if (REAL_VALUES_LESS (x, t)) | |
1413 { | |
1414 xh = th; | |
1415 xl = tl; | |
1416 break; | |
1417 } | |
1418 REAL_VALUE_TO_INT (&xl, &xh, x); | |
1419 break; | |
1420 | |
1421 case UNSIGNED_FIX: | |
1422 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x)) | |
1423 return const0_rtx; | |
1424 | |
1425 /* Test against the unsigned upper bound. */ | |
1426 if (width == 2*HOST_BITS_PER_WIDE_INT) | |
1427 { | |
1428 th = -1; | |
1429 tl = -1; | |
1430 } | |
1431 else if (width >= HOST_BITS_PER_WIDE_INT) | |
1432 { | |
1433 th = ((unsigned HOST_WIDE_INT) 1 | |
1434 << (width - HOST_BITS_PER_WIDE_INT)) - 1; | |
1435 tl = -1; | |
1436 } | |
1437 else | |
1438 { | |
1439 th = 0; | |
1440 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1; | |
1441 } | |
1442 real_from_integer (&t, VOIDmode, tl, th, 1); | |
1443 if (REAL_VALUES_LESS (t, x)) | |
1444 { | |
1445 xh = th; | |
1446 xl = tl; | |
1447 break; | |
1448 } | |
1449 | |
1450 REAL_VALUE_TO_INT (&xl, &xh, x); | |
1451 break; | |
1452 | |
1453 default: | |
1454 gcc_unreachable (); | |
1455 } | |
1456 return immed_double_const (xl, xh, mode); | |
1457 } | |
1458 | |
1459 return NULL_RTX; | |
1460 } | |
1461 | |
1462 /* Subroutine of simplify_binary_operation to simplify a commutative, | |
1463 associative binary operation CODE with result mode MODE, operating | |
1464 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR, | |
1465 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or | |
1466 canonicalization is possible. */ | |
1467 | |
1468 static rtx | |
1469 simplify_associative_operation (enum rtx_code code, enum machine_mode mode, | |
1470 rtx op0, rtx op1) | |
1471 { | |
1472 rtx tem; | |
1473 | |
1474 /* Linearize the operator to the left. */ | |
1475 if (GET_CODE (op1) == code) | |
1476 { | |
1477 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */ | |
1478 if (GET_CODE (op0) == code) | |
1479 { | |
1480 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0)); | |
1481 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1)); | |
1482 } | |
1483 | |
1484 /* "a op (b op c)" becomes "(b op c) op a". */ | |
1485 if (! swap_commutative_operands_p (op1, op0)) | |
1486 return simplify_gen_binary (code, mode, op1, op0); | |
1487 | |
1488 tem = op0; | |
1489 op0 = op1; | |
1490 op1 = tem; | |
1491 } | |
1492 | |
1493 if (GET_CODE (op0) == code) | |
1494 { | |
1495 /* Canonicalize "(x op c) op y" as "(x op y) op c". */ | |
1496 if (swap_commutative_operands_p (XEXP (op0, 1), op1)) | |
1497 { | |
1498 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1); | |
1499 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1)); | |
1500 } | |
1501 | |
1502 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */ | |
1503 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1); | |
1504 if (tem != 0) | |
1505 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem); | |
1506 | |
1507 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */ | |
1508 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1); | |
1509 if (tem != 0) | |
1510 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1)); | |
1511 } | |
1512 | |
1513 return 0; | |
1514 } | |
1515 | |
1516 | |
1517 /* Simplify a binary operation CODE with result mode MODE, operating on OP0 | |
1518 and OP1. Return 0 if no simplification is possible. | |
1519 | |
1520 Don't use this for relational operations such as EQ or LT. | |
1521 Use simplify_relational_operation instead. */ | |
1522 rtx | |
1523 simplify_binary_operation (enum rtx_code code, enum machine_mode mode, | |
1524 rtx op0, rtx op1) | |
1525 { | |
1526 rtx trueop0, trueop1; | |
1527 rtx tem; | |
1528 | |
1529 /* Relational operations don't work here. We must know the mode | |
1530 of the operands in order to do the comparison correctly. | |
1531 Assuming a full word can give incorrect results. | |
1532 Consider comparing 128 with -128 in QImode. */ | |
1533 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE); | |
1534 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE); | |
1535 | |
1536 /* Make sure the constant is second. */ | |
1537 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH | |
1538 && swap_commutative_operands_p (op0, op1)) | |
1539 { | |
1540 tem = op0, op0 = op1, op1 = tem; | |
1541 } | |
1542 | |
1543 trueop0 = avoid_constant_pool_reference (op0); | |
1544 trueop1 = avoid_constant_pool_reference (op1); | |
1545 | |
1546 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1); | |
1547 if (tem) | |
1548 return tem; | |
1549 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1); | |
1550 } | |
1551 | |
1552 /* Subroutine of simplify_binary_operation. Simplify a binary operation | |
1553 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or | |
1554 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the | |
1555 actual constants. */ | |
1556 | |
1557 static rtx | |
1558 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode, | |
1559 rtx op0, rtx op1, rtx trueop0, rtx trueop1) | |
1560 { | |
1561 rtx tem, reversed, opleft, opright; | |
1562 HOST_WIDE_INT val; | |
1563 unsigned int width = GET_MODE_BITSIZE (mode); | |
1564 | |
1565 /* Even if we can't compute a constant result, | |
1566 there are some cases worth simplifying. */ | |
1567 | |
1568 switch (code) | |
1569 { | |
1570 case PLUS: | |
1571 /* Maybe simplify x + 0 to x. The two expressions are equivalent | |
1572 when x is NaN, infinite, or finite and nonzero. They aren't | |
1573 when x is -0 and the rounding mode is not towards -infinity, | |
1574 since (-0) + 0 is then 0. */ | |
1575 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode)) | |
1576 return op0; | |
1577 | |
1578 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These | |
1579 transformations are safe even for IEEE. */ | |
1580 if (GET_CODE (op0) == NEG) | |
1581 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0)); | |
1582 else if (GET_CODE (op1) == NEG) | |
1583 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0)); | |
1584 | |
1585 /* (~a) + 1 -> -a */ | |
1586 if (INTEGRAL_MODE_P (mode) | |
1587 && GET_CODE (op0) == NOT | |
1588 && trueop1 == const1_rtx) | |
1589 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode); | |
1590 | |
1591 /* Handle both-operands-constant cases. We can only add | |
1592 CONST_INTs to constants since the sum of relocatable symbols | |
1593 can't be handled by most assemblers. Don't add CONST_INT | |
1594 to CONST_INT since overflow won't be computed properly if wider | |
1595 than HOST_BITS_PER_WIDE_INT. */ | |
1596 | |
1597 if ((GET_CODE (op0) == CONST | |
1598 || GET_CODE (op0) == SYMBOL_REF | |
1599 || GET_CODE (op0) == LABEL_REF) | |
1600 && GET_CODE (op1) == CONST_INT) | |
1601 return plus_constant (op0, INTVAL (op1)); | |
1602 else if ((GET_CODE (op1) == CONST | |
1603 || GET_CODE (op1) == SYMBOL_REF | |
1604 || GET_CODE (op1) == LABEL_REF) | |
1605 && GET_CODE (op0) == CONST_INT) | |
1606 return plus_constant (op1, INTVAL (op0)); | |
1607 | |
1608 /* See if this is something like X * C - X or vice versa or | |
1609 if the multiplication is written as a shift. If so, we can | |
1610 distribute and make a new multiply, shift, or maybe just | |
1611 have X (if C is 2 in the example above). But don't make | |
1612 something more expensive than we had before. */ | |
1613 | |
1614 if (SCALAR_INT_MODE_P (mode)) | |
1615 { | |
1616 HOST_WIDE_INT coeff0h = 0, coeff1h = 0; | |
1617 unsigned HOST_WIDE_INT coeff0l = 1, coeff1l = 1; | |
1618 rtx lhs = op0, rhs = op1; | |
1619 | |
1620 if (GET_CODE (lhs) == NEG) | |
1621 { | |
1622 coeff0l = -1; | |
1623 coeff0h = -1; | |
1624 lhs = XEXP (lhs, 0); | |
1625 } | |
1626 else if (GET_CODE (lhs) == MULT | |
1627 && GET_CODE (XEXP (lhs, 1)) == CONST_INT) | |
1628 { | |
1629 coeff0l = INTVAL (XEXP (lhs, 1)); | |
1630 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0; | |
1631 lhs = XEXP (lhs, 0); | |
1632 } | |
1633 else if (GET_CODE (lhs) == ASHIFT | |
1634 && GET_CODE (XEXP (lhs, 1)) == CONST_INT | |
1635 && INTVAL (XEXP (lhs, 1)) >= 0 | |
1636 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT) | |
1637 { | |
1638 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1)); | |
1639 coeff0h = 0; | |
1640 lhs = XEXP (lhs, 0); | |
1641 } | |
1642 | |
1643 if (GET_CODE (rhs) == NEG) | |
1644 { | |
1645 coeff1l = -1; | |
1646 coeff1h = -1; | |
1647 rhs = XEXP (rhs, 0); | |
1648 } | |
1649 else if (GET_CODE (rhs) == MULT | |
1650 && GET_CODE (XEXP (rhs, 1)) == CONST_INT) | |
1651 { | |
1652 coeff1l = INTVAL (XEXP (rhs, 1)); | |
1653 coeff1h = INTVAL (XEXP (rhs, 1)) < 0 ? -1 : 0; | |
1654 rhs = XEXP (rhs, 0); | |
1655 } | |
1656 else if (GET_CODE (rhs) == ASHIFT | |
1657 && GET_CODE (XEXP (rhs, 1)) == CONST_INT | |
1658 && INTVAL (XEXP (rhs, 1)) >= 0 | |
1659 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT) | |
1660 { | |
1661 coeff1l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1)); | |
1662 coeff1h = 0; | |
1663 rhs = XEXP (rhs, 0); | |
1664 } | |
1665 | |
1666 if (rtx_equal_p (lhs, rhs)) | |
1667 { | |
1668 rtx orig = gen_rtx_PLUS (mode, op0, op1); | |
1669 rtx coeff; | |
1670 unsigned HOST_WIDE_INT l; | |
1671 HOST_WIDE_INT h; | |
1672 bool speed = optimize_function_for_speed_p (cfun); | |
1673 | |
1674 add_double (coeff0l, coeff0h, coeff1l, coeff1h, &l, &h); | |
1675 coeff = immed_double_const (l, h, mode); | |
1676 | |
1677 tem = simplify_gen_binary (MULT, mode, lhs, coeff); | |
1678 return rtx_cost (tem, SET, speed) <= rtx_cost (orig, SET, speed) | |
1679 ? tem : 0; | |
1680 } | |
1681 } | |
1682 | |
1683 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */ | |
1684 if ((GET_CODE (op1) == CONST_INT | |
1685 || GET_CODE (op1) == CONST_DOUBLE) | |
1686 && GET_CODE (op0) == XOR | |
1687 && (GET_CODE (XEXP (op0, 1)) == CONST_INT | |
1688 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE) | |
1689 && mode_signbit_p (mode, op1)) | |
1690 return simplify_gen_binary (XOR, mode, XEXP (op0, 0), | |
1691 simplify_gen_binary (XOR, mode, op1, | |
1692 XEXP (op0, 1))); | |
1693 | |
1694 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */ | |
1695 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode) | |
1696 && GET_CODE (op0) == MULT | |
1697 && GET_CODE (XEXP (op0, 0)) == NEG) | |
1698 { | |
1699 rtx in1, in2; | |
1700 | |
1701 in1 = XEXP (XEXP (op0, 0), 0); | |
1702 in2 = XEXP (op0, 1); | |
1703 return simplify_gen_binary (MINUS, mode, op1, | |
1704 simplify_gen_binary (MULT, mode, | |
1705 in1, in2)); | |
1706 } | |
1707 | |
1708 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if | |
1709 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE | |
1710 is 1. */ | |
1711 if (COMPARISON_P (op0) | |
1712 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx) | |
1713 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx)) | |
1714 && (reversed = reversed_comparison (op0, mode))) | |
1715 return | |
1716 simplify_gen_unary (NEG, mode, reversed, mode); | |
1717 | |
1718 /* If one of the operands is a PLUS or a MINUS, see if we can | |
1719 simplify this by the associative law. | |
1720 Don't use the associative law for floating point. | |
1721 The inaccuracy makes it nonassociative, | |
1722 and subtle programs can break if operations are associated. */ | |
1723 | |
1724 if (INTEGRAL_MODE_P (mode) | |
1725 && (plus_minus_operand_p (op0) | |
1726 || plus_minus_operand_p (op1)) | |
1727 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0) | |
1728 return tem; | |
1729 | |
1730 /* Reassociate floating point addition only when the user | |
1731 specifies associative math operations. */ | |
1732 if (FLOAT_MODE_P (mode) | |
1733 && flag_associative_math) | |
1734 { | |
1735 tem = simplify_associative_operation (code, mode, op0, op1); | |
1736 if (tem) | |
1737 return tem; | |
1738 } | |
1739 break; | |
1740 | |
1741 case COMPARE: | |
1742 #ifdef HAVE_cc0 | |
1743 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't | |
1744 using cc0, in which case we want to leave it as a COMPARE | |
1745 so we can distinguish it from a register-register-copy. | |
1746 | |
1747 In IEEE floating point, x-0 is not the same as x. */ | |
1748 if (!(HONOR_SIGNED_ZEROS (mode) | |
1749 && HONOR_SIGN_DEPENDENT_ROUNDING (mode)) | |
1750 && trueop1 == CONST0_RTX (mode)) | |
1751 return op0; | |
1752 #endif | |
1753 | |
1754 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */ | |
1755 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT) | |
1756 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU)) | |
1757 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx) | |
1758 { | |
1759 rtx xop00 = XEXP (op0, 0); | |
1760 rtx xop10 = XEXP (op1, 0); | |
1761 | |
1762 #ifdef HAVE_cc0 | |
1763 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0) | |
1764 #else | |
1765 if (REG_P (xop00) && REG_P (xop10) | |
1766 && GET_MODE (xop00) == GET_MODE (xop10) | |
1767 && REGNO (xop00) == REGNO (xop10) | |
1768 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC | |
1769 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC) | |
1770 #endif | |
1771 return xop00; | |
1772 } | |
1773 break; | |
1774 | |
1775 case MINUS: | |
1776 /* We can't assume x-x is 0 even with non-IEEE floating point, | |
1777 but since it is zero except in very strange circumstances, we | |
1778 will treat it as zero with -ffinite-math-only. */ | |
1779 if (rtx_equal_p (trueop0, trueop1) | |
1780 && ! side_effects_p (op0) | |
1781 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode))) | |
1782 return CONST0_RTX (mode); | |
1783 | |
1784 /* Change subtraction from zero into negation. (0 - x) is the | |
1785 same as -x when x is NaN, infinite, or finite and nonzero. | |
1786 But if the mode has signed zeros, and does not round towards | |
1787 -infinity, then 0 - 0 is 0, not -0. */ | |
1788 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode)) | |
1789 return simplify_gen_unary (NEG, mode, op1, mode); | |
1790 | |
1791 /* (-1 - a) is ~a. */ | |
1792 if (trueop0 == constm1_rtx) | |
1793 return simplify_gen_unary (NOT, mode, op1, mode); | |
1794 | |
1795 /* Subtracting 0 has no effect unless the mode has signed zeros | |
1796 and supports rounding towards -infinity. In such a case, | |
1797 0 - 0 is -0. */ | |
1798 if (!(HONOR_SIGNED_ZEROS (mode) | |
1799 && HONOR_SIGN_DEPENDENT_ROUNDING (mode)) | |
1800 && trueop1 == CONST0_RTX (mode)) | |
1801 return op0; | |
1802 | |
1803 /* See if this is something like X * C - X or vice versa or | |
1804 if the multiplication is written as a shift. If so, we can | |
1805 distribute and make a new multiply, shift, or maybe just | |
1806 have X (if C is 2 in the example above). But don't make | |
1807 something more expensive than we had before. */ | |
1808 | |
1809 if (SCALAR_INT_MODE_P (mode)) | |
1810 { | |
1811 HOST_WIDE_INT coeff0h = 0, negcoeff1h = -1; | |
1812 unsigned HOST_WIDE_INT coeff0l = 1, negcoeff1l = -1; | |
1813 rtx lhs = op0, rhs = op1; | |
1814 | |
1815 if (GET_CODE (lhs) == NEG) | |
1816 { | |
1817 coeff0l = -1; | |
1818 coeff0h = -1; | |
1819 lhs = XEXP (lhs, 0); | |
1820 } | |
1821 else if (GET_CODE (lhs) == MULT | |
1822 && GET_CODE (XEXP (lhs, 1)) == CONST_INT) | |
1823 { | |
1824 coeff0l = INTVAL (XEXP (lhs, 1)); | |
1825 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0; | |
1826 lhs = XEXP (lhs, 0); | |
1827 } | |
1828 else if (GET_CODE (lhs) == ASHIFT | |
1829 && GET_CODE (XEXP (lhs, 1)) == CONST_INT | |
1830 && INTVAL (XEXP (lhs, 1)) >= 0 | |
1831 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT) | |
1832 { | |
1833 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1)); | |
1834 coeff0h = 0; | |
1835 lhs = XEXP (lhs, 0); | |
1836 } | |
1837 | |
1838 if (GET_CODE (rhs) == NEG) | |
1839 { | |
1840 negcoeff1l = 1; | |
1841 negcoeff1h = 0; | |
1842 rhs = XEXP (rhs, 0); | |
1843 } | |
1844 else if (GET_CODE (rhs) == MULT | |
1845 && GET_CODE (XEXP (rhs, 1)) == CONST_INT) | |
1846 { | |
1847 negcoeff1l = -INTVAL (XEXP (rhs, 1)); | |
1848 negcoeff1h = INTVAL (XEXP (rhs, 1)) <= 0 ? 0 : -1; | |
1849 rhs = XEXP (rhs, 0); | |
1850 } | |
1851 else if (GET_CODE (rhs) == ASHIFT | |
1852 && GET_CODE (XEXP (rhs, 1)) == CONST_INT | |
1853 && INTVAL (XEXP (rhs, 1)) >= 0 | |
1854 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT) | |
1855 { | |
1856 negcoeff1l = -(((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1))); | |
1857 negcoeff1h = -1; | |
1858 rhs = XEXP (rhs, 0); | |
1859 } | |
1860 | |
1861 if (rtx_equal_p (lhs, rhs)) | |
1862 { | |
1863 rtx orig = gen_rtx_MINUS (mode, op0, op1); | |
1864 rtx coeff; | |
1865 unsigned HOST_WIDE_INT l; | |
1866 HOST_WIDE_INT h; | |
1867 bool speed = optimize_function_for_speed_p (cfun); | |
1868 | |
1869 add_double (coeff0l, coeff0h, negcoeff1l, negcoeff1h, &l, &h); | |
1870 coeff = immed_double_const (l, h, mode); | |
1871 | |
1872 tem = simplify_gen_binary (MULT, mode, lhs, coeff); | |
1873 return rtx_cost (tem, SET, speed) <= rtx_cost (orig, SET, speed) | |
1874 ? tem : 0; | |
1875 } | |
1876 } | |
1877 | |
1878 /* (a - (-b)) -> (a + b). True even for IEEE. */ | |
1879 if (GET_CODE (op1) == NEG) | |
1880 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0)); | |
1881 | |
1882 /* (-x - c) may be simplified as (-c - x). */ | |
1883 if (GET_CODE (op0) == NEG | |
1884 && (GET_CODE (op1) == CONST_INT | |
1885 || GET_CODE (op1) == CONST_DOUBLE)) | |
1886 { | |
1887 tem = simplify_unary_operation (NEG, mode, op1, mode); | |
1888 if (tem) | |
1889 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0)); | |
1890 } | |
1891 | |
1892 /* Don't let a relocatable value get a negative coeff. */ | |
1893 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode) | |
1894 return simplify_gen_binary (PLUS, mode, | |
1895 op0, | |
1896 neg_const_int (mode, op1)); | |
1897 | |
1898 /* (x - (x & y)) -> (x & ~y) */ | |
1899 if (GET_CODE (op1) == AND) | |
1900 { | |
1901 if (rtx_equal_p (op0, XEXP (op1, 0))) | |
1902 { | |
1903 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1), | |
1904 GET_MODE (XEXP (op1, 1))); | |
1905 return simplify_gen_binary (AND, mode, op0, tem); | |
1906 } | |
1907 if (rtx_equal_p (op0, XEXP (op1, 1))) | |
1908 { | |
1909 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0), | |
1910 GET_MODE (XEXP (op1, 0))); | |
1911 return simplify_gen_binary (AND, mode, op0, tem); | |
1912 } | |
1913 } | |
1914 | |
1915 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done | |
1916 by reversing the comparison code if valid. */ | |
1917 if (STORE_FLAG_VALUE == 1 | |
1918 && trueop0 == const1_rtx | |
1919 && COMPARISON_P (op1) | |
1920 && (reversed = reversed_comparison (op1, mode))) | |
1921 return reversed; | |
1922 | |
1923 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */ | |
1924 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode) | |
1925 && GET_CODE (op1) == MULT | |
1926 && GET_CODE (XEXP (op1, 0)) == NEG) | |
1927 { | |
1928 rtx in1, in2; | |
1929 | |
1930 in1 = XEXP (XEXP (op1, 0), 0); | |
1931 in2 = XEXP (op1, 1); | |
1932 return simplify_gen_binary (PLUS, mode, | |
1933 simplify_gen_binary (MULT, mode, | |
1934 in1, in2), | |
1935 op0); | |
1936 } | |
1937 | |
1938 /* Canonicalize (minus (neg A) (mult B C)) to | |
1939 (minus (mult (neg B) C) A). */ | |
1940 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode) | |
1941 && GET_CODE (op1) == MULT | |
1942 && GET_CODE (op0) == NEG) | |
1943 { | |
1944 rtx in1, in2; | |
1945 | |
1946 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode); | |
1947 in2 = XEXP (op1, 1); | |
1948 return simplify_gen_binary (MINUS, mode, | |
1949 simplify_gen_binary (MULT, mode, | |
1950 in1, in2), | |
1951 XEXP (op0, 0)); | |
1952 } | |
1953 | |
1954 /* If one of the operands is a PLUS or a MINUS, see if we can | |
1955 simplify this by the associative law. This will, for example, | |
1956 canonicalize (minus A (plus B C)) to (minus (minus A B) C). | |
1957 Don't use the associative law for floating point. | |
1958 The inaccuracy makes it nonassociative, | |
1959 and subtle programs can break if operations are associated. */ | |
1960 | |
1961 if (INTEGRAL_MODE_P (mode) | |
1962 && (plus_minus_operand_p (op0) | |
1963 || plus_minus_operand_p (op1)) | |
1964 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0) | |
1965 return tem; | |
1966 break; | |
1967 | |
1968 case MULT: | |
1969 if (trueop1 == constm1_rtx) | |
1970 return simplify_gen_unary (NEG, mode, op0, mode); | |
1971 | |
1972 /* Maybe simplify x * 0 to 0. The reduction is not valid if | |
1973 x is NaN, since x * 0 is then also NaN. Nor is it valid | |
1974 when the mode has signed zeros, since multiplying a negative | |
1975 number by 0 will give -0, not 0. */ | |
1976 if (!HONOR_NANS (mode) | |
1977 && !HONOR_SIGNED_ZEROS (mode) | |
1978 && trueop1 == CONST0_RTX (mode) | |
1979 && ! side_effects_p (op0)) | |
1980 return op1; | |
1981 | |
1982 /* In IEEE floating point, x*1 is not equivalent to x for | |
1983 signalling NaNs. */ | |
1984 if (!HONOR_SNANS (mode) | |
1985 && trueop1 == CONST1_RTX (mode)) | |
1986 return op0; | |
1987 | |
1988 /* Convert multiply by constant power of two into shift unless | |
1989 we are still generating RTL. This test is a kludge. */ | |
1990 if (GET_CODE (trueop1) == CONST_INT | |
1991 && (val = exact_log2 (INTVAL (trueop1))) >= 0 | |
1992 /* If the mode is larger than the host word size, and the | |
1993 uppermost bit is set, then this isn't a power of two due | |
1994 to implicit sign extension. */ | |
1995 && (width <= HOST_BITS_PER_WIDE_INT | |
1996 || val != HOST_BITS_PER_WIDE_INT - 1)) | |
1997 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val)); | |
1998 | |
1999 /* Likewise for multipliers wider than a word. */ | |
2000 if (GET_CODE (trueop1) == CONST_DOUBLE | |
2001 && (GET_MODE (trueop1) == VOIDmode | |
2002 || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT) | |
2003 && GET_MODE (op0) == mode | |
2004 && CONST_DOUBLE_LOW (trueop1) == 0 | |
2005 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0) | |
2006 return simplify_gen_binary (ASHIFT, mode, op0, | |
2007 GEN_INT (val + HOST_BITS_PER_WIDE_INT)); | |
2008 | |
2009 /* x*2 is x+x and x*(-1) is -x */ | |
2010 if (GET_CODE (trueop1) == CONST_DOUBLE | |
2011 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1)) | |
2012 && GET_MODE (op0) == mode) | |
2013 { | |
2014 REAL_VALUE_TYPE d; | |
2015 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1); | |
2016 | |
2017 if (REAL_VALUES_EQUAL (d, dconst2)) | |
2018 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0)); | |
2019 | |
2020 if (!HONOR_SNANS (mode) | |
2021 && REAL_VALUES_EQUAL (d, dconstm1)) | |
2022 return simplify_gen_unary (NEG, mode, op0, mode); | |
2023 } | |
2024 | |
2025 /* Optimize -x * -x as x * x. */ | |
2026 if (FLOAT_MODE_P (mode) | |
2027 && GET_CODE (op0) == NEG | |
2028 && GET_CODE (op1) == NEG | |
2029 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0)) | |
2030 && !side_effects_p (XEXP (op0, 0))) | |
2031 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0)); | |
2032 | |
2033 /* Likewise, optimize abs(x) * abs(x) as x * x. */ | |
2034 if (SCALAR_FLOAT_MODE_P (mode) | |
2035 && GET_CODE (op0) == ABS | |
2036 && GET_CODE (op1) == ABS | |
2037 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0)) | |
2038 && !side_effects_p (XEXP (op0, 0))) | |
2039 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0)); | |
2040 | |
2041 /* Reassociate multiplication, but for floating point MULTs | |
2042 only when the user specifies unsafe math optimizations. */ | |
2043 if (! FLOAT_MODE_P (mode) | |
2044 || flag_unsafe_math_optimizations) | |
2045 { | |
2046 tem = simplify_associative_operation (code, mode, op0, op1); | |
2047 if (tem) | |
2048 return tem; | |
2049 } | |
2050 break; | |
2051 | |
2052 case IOR: | |
2053 if (trueop1 == const0_rtx) | |
2054 return op0; | |
2055 if (GET_CODE (trueop1) == CONST_INT | |
2056 && ((INTVAL (trueop1) & GET_MODE_MASK (mode)) | |
2057 == GET_MODE_MASK (mode))) | |
2058 return op1; | |
2059 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)) | |
2060 return op0; | |
2061 /* A | (~A) -> -1 */ | |
2062 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1)) | |
2063 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0))) | |
2064 && ! side_effects_p (op0) | |
2065 && SCALAR_INT_MODE_P (mode)) | |
2066 return constm1_rtx; | |
2067 | |
2068 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */ | |
2069 if (GET_CODE (op1) == CONST_INT | |
2070 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT | |
2071 && (nonzero_bits (op0, mode) & ~INTVAL (op1)) == 0) | |
2072 return op1; | |
2073 | |
2074 /* Canonicalize (X & C1) | C2. */ | |
2075 if (GET_CODE (op0) == AND | |
2076 && GET_CODE (trueop1) == CONST_INT | |
2077 && GET_CODE (XEXP (op0, 1)) == CONST_INT) | |
2078 { | |
2079 HOST_WIDE_INT mask = GET_MODE_MASK (mode); | |
2080 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1)); | |
2081 HOST_WIDE_INT c2 = INTVAL (trueop1); | |
2082 | |
2083 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */ | |
2084 if ((c1 & c2) == c1 | |
2085 && !side_effects_p (XEXP (op0, 0))) | |
2086 return trueop1; | |
2087 | |
2088 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */ | |
2089 if (((c1|c2) & mask) == mask) | |
2090 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1); | |
2091 | |
2092 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */ | |
2093 if (((c1 & ~c2) & mask) != (c1 & mask)) | |
2094 { | |
2095 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0), | |
2096 gen_int_mode (c1 & ~c2, mode)); | |
2097 return simplify_gen_binary (IOR, mode, tem, op1); | |
2098 } | |
2099 } | |
2100 | |
2101 /* Convert (A & B) | A to A. */ | |
2102 if (GET_CODE (op0) == AND | |
2103 && (rtx_equal_p (XEXP (op0, 0), op1) | |
2104 || rtx_equal_p (XEXP (op0, 1), op1)) | |
2105 && ! side_effects_p (XEXP (op0, 0)) | |
2106 && ! side_effects_p (XEXP (op0, 1))) | |
2107 return op1; | |
2108 | |
2109 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the | |
2110 mode size to (rotate A CX). */ | |
2111 | |
2112 if (GET_CODE (op1) == ASHIFT | |
2113 || GET_CODE (op1) == SUBREG) | |
2114 { | |
2115 opleft = op1; | |
2116 opright = op0; | |
2117 } | |
2118 else | |
2119 { | |
2120 opright = op1; | |
2121 opleft = op0; | |
2122 } | |
2123 | |
2124 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT | |
2125 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0)) | |
2126 && GET_CODE (XEXP (opleft, 1)) == CONST_INT | |
2127 && GET_CODE (XEXP (opright, 1)) == CONST_INT | |
2128 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1)) | |
2129 == GET_MODE_BITSIZE (mode))) | |
2130 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1)); | |
2131 | |
2132 /* Same, but for ashift that has been "simplified" to a wider mode | |
2133 by simplify_shift_const. */ | |
2134 | |
2135 if (GET_CODE (opleft) == SUBREG | |
2136 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT | |
2137 && GET_CODE (opright) == LSHIFTRT | |
2138 && GET_CODE (XEXP (opright, 0)) == SUBREG | |
2139 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0)) | |
2140 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0)) | |
2141 && (GET_MODE_SIZE (GET_MODE (opleft)) | |
2142 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft)))) | |
2143 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0), | |
2144 SUBREG_REG (XEXP (opright, 0))) | |
2145 && GET_CODE (XEXP (SUBREG_REG (opleft), 1)) == CONST_INT | |
2146 && GET_CODE (XEXP (opright, 1)) == CONST_INT | |
2147 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1)) | |
2148 == GET_MODE_BITSIZE (mode))) | |
2149 return gen_rtx_ROTATE (mode, XEXP (opright, 0), | |
2150 XEXP (SUBREG_REG (opleft), 1)); | |
2151 | |
2152 /* If we have (ior (and (X C1) C2)), simplify this by making | |
2153 C1 as small as possible if C1 actually changes. */ | |
2154 if (GET_CODE (op1) == CONST_INT | |
2155 && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT | |
2156 || INTVAL (op1) > 0) | |
2157 && GET_CODE (op0) == AND | |
2158 && GET_CODE (XEXP (op0, 1)) == CONST_INT | |
2159 && GET_CODE (op1) == CONST_INT | |
2160 && (INTVAL (XEXP (op0, 1)) & INTVAL (op1)) != 0) | |
2161 return simplify_gen_binary (IOR, mode, | |
2162 simplify_gen_binary | |
2163 (AND, mode, XEXP (op0, 0), | |
2164 GEN_INT (INTVAL (XEXP (op0, 1)) | |
2165 & ~INTVAL (op1))), | |
2166 op1); | |
2167 | |
2168 /* If OP0 is (ashiftrt (plus ...) C), it might actually be | |
2169 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and | |
2170 the PLUS does not affect any of the bits in OP1: then we can do | |
2171 the IOR as a PLUS and we can associate. This is valid if OP1 | |
2172 can be safely shifted left C bits. */ | |
2173 if (GET_CODE (trueop1) == CONST_INT && GET_CODE (op0) == ASHIFTRT | |
2174 && GET_CODE (XEXP (op0, 0)) == PLUS | |
2175 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == CONST_INT | |
2176 && GET_CODE (XEXP (op0, 1)) == CONST_INT | |
2177 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT) | |
2178 { | |
2179 int count = INTVAL (XEXP (op0, 1)); | |
2180 HOST_WIDE_INT mask = INTVAL (trueop1) << count; | |
2181 | |
2182 if (mask >> count == INTVAL (trueop1) | |
2183 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0) | |
2184 return simplify_gen_binary (ASHIFTRT, mode, | |
2185 plus_constant (XEXP (op0, 0), mask), | |
2186 XEXP (op0, 1)); | |
2187 } | |
2188 | |
2189 tem = simplify_associative_operation (code, mode, op0, op1); | |
2190 if (tem) | |
2191 return tem; | |
2192 break; | |
2193 | |
2194 case XOR: | |
2195 if (trueop1 == const0_rtx) | |
2196 return op0; | |
2197 if (GET_CODE (trueop1) == CONST_INT | |
2198 && ((INTVAL (trueop1) & GET_MODE_MASK (mode)) | |
2199 == GET_MODE_MASK (mode))) | |
2200 return simplify_gen_unary (NOT, mode, op0, mode); | |
2201 if (rtx_equal_p (trueop0, trueop1) | |
2202 && ! side_effects_p (op0) | |
2203 && GET_MODE_CLASS (mode) != MODE_CC) | |
2204 return CONST0_RTX (mode); | |
2205 | |
2206 /* Canonicalize XOR of the most significant bit to PLUS. */ | |
2207 if ((GET_CODE (op1) == CONST_INT | |
2208 || GET_CODE (op1) == CONST_DOUBLE) | |
2209 && mode_signbit_p (mode, op1)) | |
2210 return simplify_gen_binary (PLUS, mode, op0, op1); | |
2211 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */ | |
2212 if ((GET_CODE (op1) == CONST_INT | |
2213 || GET_CODE (op1) == CONST_DOUBLE) | |
2214 && GET_CODE (op0) == PLUS | |
2215 && (GET_CODE (XEXP (op0, 1)) == CONST_INT | |
2216 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE) | |
2217 && mode_signbit_p (mode, XEXP (op0, 1))) | |
2218 return simplify_gen_binary (XOR, mode, XEXP (op0, 0), | |
2219 simplify_gen_binary (XOR, mode, op1, | |
2220 XEXP (op0, 1))); | |
2221 | |
2222 /* If we are XORing two things that have no bits in common, | |
2223 convert them into an IOR. This helps to detect rotation encoded | |
2224 using those methods and possibly other simplifications. */ | |
2225 | |
2226 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT | |
2227 && (nonzero_bits (op0, mode) | |
2228 & nonzero_bits (op1, mode)) == 0) | |
2229 return (simplify_gen_binary (IOR, mode, op0, op1)); | |
2230 | |
2231 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y). | |
2232 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for | |
2233 (NOT y). */ | |
2234 { | |
2235 int num_negated = 0; | |
2236 | |
2237 if (GET_CODE (op0) == NOT) | |
2238 num_negated++, op0 = XEXP (op0, 0); | |
2239 if (GET_CODE (op1) == NOT) | |
2240 num_negated++, op1 = XEXP (op1, 0); | |
2241 | |
2242 if (num_negated == 2) | |
2243 return simplify_gen_binary (XOR, mode, op0, op1); | |
2244 else if (num_negated == 1) | |
2245 return simplify_gen_unary (NOT, mode, | |
2246 simplify_gen_binary (XOR, mode, op0, op1), | |
2247 mode); | |
2248 } | |
2249 | |
2250 /* Convert (xor (and A B) B) to (and (not A) B). The latter may | |
2251 correspond to a machine insn or result in further simplifications | |
2252 if B is a constant. */ | |
2253 | |
2254 if (GET_CODE (op0) == AND | |
2255 && rtx_equal_p (XEXP (op0, 1), op1) | |
2256 && ! side_effects_p (op1)) | |
2257 return simplify_gen_binary (AND, mode, | |
2258 simplify_gen_unary (NOT, mode, | |
2259 XEXP (op0, 0), mode), | |
2260 op1); | |
2261 | |
2262 else if (GET_CODE (op0) == AND | |
2263 && rtx_equal_p (XEXP (op0, 0), op1) | |
2264 && ! side_effects_p (op1)) | |
2265 return simplify_gen_binary (AND, mode, | |
2266 simplify_gen_unary (NOT, mode, | |
2267 XEXP (op0, 1), mode), | |
2268 op1); | |
2269 | |
2270 /* (xor (comparison foo bar) (const_int 1)) can become the reversed | |
2271 comparison if STORE_FLAG_VALUE is 1. */ | |
2272 if (STORE_FLAG_VALUE == 1 | |
2273 && trueop1 == const1_rtx | |
2274 && COMPARISON_P (op0) | |
2275 && (reversed = reversed_comparison (op0, mode))) | |
2276 return reversed; | |
2277 | |
2278 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1 | |
2279 is (lt foo (const_int 0)), so we can perform the above | |
2280 simplification if STORE_FLAG_VALUE is 1. */ | |
2281 | |
2282 if (STORE_FLAG_VALUE == 1 | |
2283 && trueop1 == const1_rtx | |
2284 && GET_CODE (op0) == LSHIFTRT | |
2285 && GET_CODE (XEXP (op0, 1)) == CONST_INT | |
2286 && INTVAL (XEXP (op0, 1)) == GET_MODE_BITSIZE (mode) - 1) | |
2287 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx); | |
2288 | |
2289 /* (xor (comparison foo bar) (const_int sign-bit)) | |
2290 when STORE_FLAG_VALUE is the sign bit. */ | |
2291 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT | |
2292 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode)) | |
2293 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)) | |
2294 && trueop1 == const_true_rtx | |
2295 && COMPARISON_P (op0) | |
2296 && (reversed = reversed_comparison (op0, mode))) | |
2297 return reversed; | |
2298 | |
2299 tem = simplify_associative_operation (code, mode, op0, op1); | |
2300 if (tem) | |
2301 return tem; | |
2302 break; | |
2303 | |
2304 case AND: | |
2305 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0)) | |
2306 return trueop1; | |
2307 if (GET_CODE (trueop1) == CONST_INT | |
2308 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT) | |
2309 { | |
2310 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode); | |
2311 HOST_WIDE_INT val1 = INTVAL (trueop1); | |
2312 /* If we are turning off bits already known off in OP0, we need | |
2313 not do an AND. */ | |
2314 if ((nzop0 & ~val1) == 0) | |
2315 return op0; | |
2316 /* If we are clearing all the nonzero bits, the result is zero. */ | |
2317 if ((val1 & nzop0) == 0 && !side_effects_p (op0)) | |
2318 return CONST0_RTX (mode); | |
2319 } | |
2320 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0) | |
2321 && GET_MODE_CLASS (mode) != MODE_CC) | |
2322 return op0; | |
2323 /* A & (~A) -> 0 */ | |
2324 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1)) | |
2325 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0))) | |
2326 && ! side_effects_p (op0) | |
2327 && GET_MODE_CLASS (mode) != MODE_CC) | |
2328 return CONST0_RTX (mode); | |
2329 | |
2330 /* Transform (and (extend X) C) into (zero_extend (and X C)) if | |
2331 there are no nonzero bits of C outside of X's mode. */ | |
2332 if ((GET_CODE (op0) == SIGN_EXTEND | |
2333 || GET_CODE (op0) == ZERO_EXTEND) | |
2334 && GET_CODE (trueop1) == CONST_INT | |
2335 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT | |
2336 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0))) | |
2337 & INTVAL (trueop1)) == 0) | |
2338 { | |
2339 enum machine_mode imode = GET_MODE (XEXP (op0, 0)); | |
2340 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0), | |
2341 gen_int_mode (INTVAL (trueop1), | |
2342 imode)); | |
2343 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode); | |
2344 } | |
2345 | |
2346 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */ | |
2347 if (GET_CODE (op0) == IOR | |
2348 && GET_CODE (trueop1) == CONST_INT | |
2349 && GET_CODE (XEXP (op0, 1)) == CONST_INT) | |
2350 { | |
2351 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1)); | |
2352 return simplify_gen_binary (IOR, mode, | |
2353 simplify_gen_binary (AND, mode, | |
2354 XEXP (op0, 0), op1), | |
2355 gen_int_mode (tmp, mode)); | |
2356 } | |
2357 | |
2358 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single | |
2359 insn (and may simplify more). */ | |
2360 if (GET_CODE (op0) == XOR | |
2361 && rtx_equal_p (XEXP (op0, 0), op1) | |
2362 && ! side_effects_p (op1)) | |
2363 return simplify_gen_binary (AND, mode, | |
2364 simplify_gen_unary (NOT, mode, | |
2365 XEXP (op0, 1), mode), | |
2366 op1); | |
2367 | |
2368 if (GET_CODE (op0) == XOR | |
2369 && rtx_equal_p (XEXP (op0, 1), op1) | |
2370 && ! side_effects_p (op1)) | |
2371 return simplify_gen_binary (AND, mode, | |
2372 simplify_gen_unary (NOT, mode, | |
2373 XEXP (op0, 0), mode), | |
2374 op1); | |
2375 | |
2376 /* Similarly for (~(A ^ B)) & A. */ | |
2377 if (GET_CODE (op0) == NOT | |
2378 && GET_CODE (XEXP (op0, 0)) == XOR | |
2379 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1) | |
2380 && ! side_effects_p (op1)) | |
2381 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1); | |
2382 | |
2383 if (GET_CODE (op0) == NOT | |
2384 && GET_CODE (XEXP (op0, 0)) == XOR | |
2385 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1) | |
2386 && ! side_effects_p (op1)) | |
2387 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1); | |
2388 | |
2389 /* Convert (A | B) & A to A. */ | |
2390 if (GET_CODE (op0) == IOR | |
2391 && (rtx_equal_p (XEXP (op0, 0), op1) | |
2392 || rtx_equal_p (XEXP (op0, 1), op1)) | |
2393 && ! side_effects_p (XEXP (op0, 0)) | |
2394 && ! side_effects_p (XEXP (op0, 1))) | |
2395 return op1; | |
2396 | |
2397 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M, | |
2398 ((A & N) + B) & M -> (A + B) & M | |
2399 Similarly if (N & M) == 0, | |
2400 ((A | N) + B) & M -> (A + B) & M | |
2401 and for - instead of + and/or ^ instead of |. | |
2402 Also, if (N & M) == 0, then | |
2403 (A +- N) & M -> A & M. */ | |
2404 if (GET_CODE (trueop1) == CONST_INT | |
2405 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT | |
2406 && ~INTVAL (trueop1) | |
2407 && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0 | |
2408 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS)) | |
2409 { | |
2410 rtx pmop[2]; | |
2411 int which; | |
2412 | |
2413 pmop[0] = XEXP (op0, 0); | |
2414 pmop[1] = XEXP (op0, 1); | |
2415 | |
2416 if (GET_CODE (pmop[1]) == CONST_INT | |
2417 && (INTVAL (pmop[1]) & INTVAL (trueop1)) == 0) | |
2418 return simplify_gen_binary (AND, mode, pmop[0], op1); | |
2419 | |
2420 for (which = 0; which < 2; which++) | |
2421 { | |
2422 tem = pmop[which]; | |
2423 switch (GET_CODE (tem)) | |
2424 { | |
2425 case AND: | |
2426 if (GET_CODE (XEXP (tem, 1)) == CONST_INT | |
2427 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) | |
2428 == INTVAL (trueop1)) | |
2429 pmop[which] = XEXP (tem, 0); | |
2430 break; | |
2431 case IOR: | |
2432 case XOR: | |
2433 if (GET_CODE (XEXP (tem, 1)) == CONST_INT | |
2434 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0) | |
2435 pmop[which] = XEXP (tem, 0); | |
2436 break; | |
2437 default: | |
2438 break; | |
2439 } | |
2440 } | |
2441 | |
2442 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1)) | |
2443 { | |
2444 tem = simplify_gen_binary (GET_CODE (op0), mode, | |
2445 pmop[0], pmop[1]); | |
2446 return simplify_gen_binary (code, mode, tem, op1); | |
2447 } | |
2448 } | |
2449 | |
2450 /* (and X (ior (not X) Y) -> (and X Y) */ | |
2451 if (GET_CODE (op1) == IOR | |
2452 && GET_CODE (XEXP (op1, 0)) == NOT | |
2453 && op0 == XEXP (XEXP (op1, 0), 0)) | |
2454 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1)); | |
2455 | |
2456 /* (and (ior (not X) Y) X) -> (and X Y) */ | |
2457 if (GET_CODE (op0) == IOR | |
2458 && GET_CODE (XEXP (op0, 0)) == NOT | |
2459 && op1 == XEXP (XEXP (op0, 0), 0)) | |
2460 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1)); | |
2461 | |
2462 tem = simplify_associative_operation (code, mode, op0, op1); | |
2463 if (tem) | |
2464 return tem; | |
2465 break; | |
2466 | |
2467 case UDIV: | |
2468 /* 0/x is 0 (or x&0 if x has side-effects). */ | |
2469 if (trueop0 == CONST0_RTX (mode)) | |
2470 { | |
2471 if (side_effects_p (op1)) | |
2472 return simplify_gen_binary (AND, mode, op1, trueop0); | |
2473 return trueop0; | |
2474 } | |
2475 /* x/1 is x. */ | |
2476 if (trueop1 == CONST1_RTX (mode)) | |
2477 return rtl_hooks.gen_lowpart_no_emit (mode, op0); | |
2478 /* Convert divide by power of two into shift. */ | |
2479 if (GET_CODE (trueop1) == CONST_INT | |
2480 && (val = exact_log2 (INTVAL (trueop1))) > 0) | |
2481 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val)); | |
2482 break; | |
2483 | |
2484 case DIV: | |
2485 /* Handle floating point and integers separately. */ | |
2486 if (SCALAR_FLOAT_MODE_P (mode)) | |
2487 { | |
2488 /* Maybe change 0.0 / x to 0.0. This transformation isn't | |
2489 safe for modes with NaNs, since 0.0 / 0.0 will then be | |
2490 NaN rather than 0.0. Nor is it safe for modes with signed | |
2491 zeros, since dividing 0 by a negative number gives -0.0 */ | |
2492 if (trueop0 == CONST0_RTX (mode) | |
2493 && !HONOR_NANS (mode) | |
2494 && !HONOR_SIGNED_ZEROS (mode) | |
2495 && ! side_effects_p (op1)) | |
2496 return op0; | |
2497 /* x/1.0 is x. */ | |
2498 if (trueop1 == CONST1_RTX (mode) | |
2499 && !HONOR_SNANS (mode)) | |
2500 return op0; | |
2501 | |
2502 if (GET_CODE (trueop1) == CONST_DOUBLE | |
2503 && trueop1 != CONST0_RTX (mode)) | |
2504 { | |
2505 REAL_VALUE_TYPE d; | |
2506 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1); | |
2507 | |
2508 /* x/-1.0 is -x. */ | |
2509 if (REAL_VALUES_EQUAL (d, dconstm1) | |
2510 && !HONOR_SNANS (mode)) | |
2511 return simplify_gen_unary (NEG, mode, op0, mode); | |
2512 | |
2513 /* Change FP division by a constant into multiplication. | |
2514 Only do this with -freciprocal-math. */ | |
2515 if (flag_reciprocal_math | |
2516 && !REAL_VALUES_EQUAL (d, dconst0)) | |
2517 { | |
2518 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d); | |
2519 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode); | |
2520 return simplify_gen_binary (MULT, mode, op0, tem); | |
2521 } | |
2522 } | |
2523 } | |
2524 else | |
2525 { | |
2526 /* 0/x is 0 (or x&0 if x has side-effects). */ | |
2527 if (trueop0 == CONST0_RTX (mode)) | |
2528 { | |
2529 if (side_effects_p (op1)) | |
2530 return simplify_gen_binary (AND, mode, op1, trueop0); | |
2531 return trueop0; | |
2532 } | |
2533 /* x/1 is x. */ | |
2534 if (trueop1 == CONST1_RTX (mode)) | |
2535 return rtl_hooks.gen_lowpart_no_emit (mode, op0); | |
2536 /* x/-1 is -x. */ | |
2537 if (trueop1 == constm1_rtx) | |
2538 { | |
2539 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0); | |
2540 return simplify_gen_unary (NEG, mode, x, mode); | |
2541 } | |
2542 } | |
2543 break; | |
2544 | |
2545 case UMOD: | |
2546 /* 0%x is 0 (or x&0 if x has side-effects). */ | |
2547 if (trueop0 == CONST0_RTX (mode)) | |
2548 { | |
2549 if (side_effects_p (op1)) | |
2550 return simplify_gen_binary (AND, mode, op1, trueop0); | |
2551 return trueop0; | |
2552 } | |
2553 /* x%1 is 0 (of x&0 if x has side-effects). */ | |
2554 if (trueop1 == CONST1_RTX (mode)) | |
2555 { | |
2556 if (side_effects_p (op0)) | |
2557 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode)); | |
2558 return CONST0_RTX (mode); | |
2559 } | |
2560 /* Implement modulus by power of two as AND. */ | |
2561 if (GET_CODE (trueop1) == CONST_INT | |
2562 && exact_log2 (INTVAL (trueop1)) > 0) | |
2563 return simplify_gen_binary (AND, mode, op0, | |
2564 GEN_INT (INTVAL (op1) - 1)); | |
2565 break; | |
2566 | |
2567 case MOD: | |
2568 /* 0%x is 0 (or x&0 if x has side-effects). */ | |
2569 if (trueop0 == CONST0_RTX (mode)) | |
2570 { | |
2571 if (side_effects_p (op1)) | |
2572 return simplify_gen_binary (AND, mode, op1, trueop0); | |
2573 return trueop0; | |
2574 } | |
2575 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */ | |
2576 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx) | |
2577 { | |
2578 if (side_effects_p (op0)) | |
2579 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode)); | |
2580 return CONST0_RTX (mode); | |
2581 } | |
2582 break; | |
2583 | |
2584 case ROTATERT: | |
2585 case ROTATE: | |
2586 case ASHIFTRT: | |
2587 if (trueop1 == CONST0_RTX (mode)) | |
2588 return op0; | |
2589 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1)) | |
2590 return op0; | |
2591 /* Rotating ~0 always results in ~0. */ | |
2592 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT | |
2593 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode) | |
2594 && ! side_effects_p (op1)) | |
2595 return op0; | |
2596 canonicalize_shift: | |
2597 if (SHIFT_COUNT_TRUNCATED && GET_CODE (op1) == CONST_INT) | |
2598 { | |
2599 val = INTVAL (op1) & (GET_MODE_BITSIZE (mode) - 1); | |
2600 if (val != INTVAL (op1)) | |
2601 return simplify_gen_binary (code, mode, op0, GEN_INT (val)); | |
2602 } | |
2603 break; | |
2604 | |
2605 case ASHIFT: | |
2606 case SS_ASHIFT: | |
2607 case US_ASHIFT: | |
2608 if (trueop1 == CONST0_RTX (mode)) | |
2609 return op0; | |
2610 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1)) | |
2611 return op0; | |
2612 goto canonicalize_shift; | |
2613 | |
2614 case LSHIFTRT: | |
2615 if (trueop1 == CONST0_RTX (mode)) | |
2616 return op0; | |
2617 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1)) | |
2618 return op0; | |
2619 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */ | |
2620 if (GET_CODE (op0) == CLZ | |
2621 && GET_CODE (trueop1) == CONST_INT | |
2622 && STORE_FLAG_VALUE == 1 | |
2623 && INTVAL (trueop1) < (HOST_WIDE_INT)width) | |
2624 { | |
2625 enum machine_mode imode = GET_MODE (XEXP (op0, 0)); | |
2626 unsigned HOST_WIDE_INT zero_val = 0; | |
2627 | |
2628 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val) | |
2629 && zero_val == GET_MODE_BITSIZE (imode) | |
2630 && INTVAL (trueop1) == exact_log2 (zero_val)) | |
2631 return simplify_gen_relational (EQ, mode, imode, | |
2632 XEXP (op0, 0), const0_rtx); | |
2633 } | |
2634 goto canonicalize_shift; | |
2635 | |
2636 case SMIN: | |
2637 if (width <= HOST_BITS_PER_WIDE_INT | |
2638 && GET_CODE (trueop1) == CONST_INT | |
2639 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1) | |
2640 && ! side_effects_p (op0)) | |
2641 return op1; | |
2642 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)) | |
2643 return op0; | |
2644 tem = simplify_associative_operation (code, mode, op0, op1); | |
2645 if (tem) | |
2646 return tem; | |
2647 break; | |
2648 | |
2649 case SMAX: | |
2650 if (width <= HOST_BITS_PER_WIDE_INT | |
2651 && GET_CODE (trueop1) == CONST_INT | |
2652 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1) | |
2653 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1) | |
2654 && ! side_effects_p (op0)) | |
2655 return op1; | |
2656 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)) | |
2657 return op0; | |
2658 tem = simplify_associative_operation (code, mode, op0, op1); | |
2659 if (tem) | |
2660 return tem; | |
2661 break; | |
2662 | |
2663 case UMIN: | |
2664 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0)) | |
2665 return op1; | |
2666 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)) | |
2667 return op0; | |
2668 tem = simplify_associative_operation (code, mode, op0, op1); | |
2669 if (tem) | |
2670 return tem; | |
2671 break; | |
2672 | |
2673 case UMAX: | |
2674 if (trueop1 == constm1_rtx && ! side_effects_p (op0)) | |
2675 return op1; | |
2676 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)) | |
2677 return op0; | |
2678 tem = simplify_associative_operation (code, mode, op0, op1); | |
2679 if (tem) | |
2680 return tem; | |
2681 break; | |
2682 | |
2683 case SS_PLUS: | |
2684 case US_PLUS: | |
2685 case SS_MINUS: | |
2686 case US_MINUS: | |
2687 case SS_MULT: | |
2688 case US_MULT: | |
2689 case SS_DIV: | |
2690 case US_DIV: | |
2691 /* ??? There are simplifications that can be done. */ | |
2692 return 0; | |
2693 | |
2694 case VEC_SELECT: | |
2695 if (!VECTOR_MODE_P (mode)) | |
2696 { | |
2697 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0))); | |
2698 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0))); | |
2699 gcc_assert (GET_CODE (trueop1) == PARALLEL); | |
2700 gcc_assert (XVECLEN (trueop1, 0) == 1); | |
2701 gcc_assert (GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT); | |
2702 | |
2703 if (GET_CODE (trueop0) == CONST_VECTOR) | |
2704 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP | |
2705 (trueop1, 0, 0))); | |
2706 | |
2707 /* Extract a scalar element from a nested VEC_SELECT expression | |
2708 (with optional nested VEC_CONCAT expression). Some targets | |
2709 (i386) extract scalar element from a vector using chain of | |
2710 nested VEC_SELECT expressions. When input operand is a memory | |
2711 operand, this operation can be simplified to a simple scalar | |
2712 load from an offseted memory address. */ | |
2713 if (GET_CODE (trueop0) == VEC_SELECT) | |
2714 { | |
2715 rtx op0 = XEXP (trueop0, 0); | |
2716 rtx op1 = XEXP (trueop0, 1); | |
2717 | |
2718 enum machine_mode opmode = GET_MODE (op0); | |
2719 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode)); | |
2720 int n_elts = GET_MODE_SIZE (opmode) / elt_size; | |
2721 | |
2722 int i = INTVAL (XVECEXP (trueop1, 0, 0)); | |
2723 int elem; | |
2724 | |
2725 rtvec vec; | |
2726 rtx tmp_op, tmp; | |
2727 | |
2728 gcc_assert (GET_CODE (op1) == PARALLEL); | |
2729 gcc_assert (i < n_elts); | |
2730 | |
2731 /* Select element, pointed by nested selector. */ | |
2732 elem = INTVAL (XVECEXP (op1, 0, i)); | |
2733 | |
2734 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */ | |
2735 if (GET_CODE (op0) == VEC_CONCAT) | |
2736 { | |
2737 rtx op00 = XEXP (op0, 0); | |
2738 rtx op01 = XEXP (op0, 1); | |
2739 | |
2740 enum machine_mode mode00, mode01; | |
2741 int n_elts00, n_elts01; | |
2742 | |
2743 mode00 = GET_MODE (op00); | |
2744 mode01 = GET_MODE (op01); | |
2745 | |
2746 /* Find out number of elements of each operand. */ | |
2747 if (VECTOR_MODE_P (mode00)) | |
2748 { | |
2749 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00)); | |
2750 n_elts00 = GET_MODE_SIZE (mode00) / elt_size; | |
2751 } | |
2752 else | |
2753 n_elts00 = 1; | |
2754 | |
2755 if (VECTOR_MODE_P (mode01)) | |
2756 { | |
2757 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01)); | |
2758 n_elts01 = GET_MODE_SIZE (mode01) / elt_size; | |
2759 } | |
2760 else | |
2761 n_elts01 = 1; | |
2762 | |
2763 gcc_assert (n_elts == n_elts00 + n_elts01); | |
2764 | |
2765 /* Select correct operand of VEC_CONCAT | |
2766 and adjust selector. */ | |
2767 if (elem < n_elts01) | |
2768 tmp_op = op00; | |
2769 else | |
2770 { | |
2771 tmp_op = op01; | |
2772 elem -= n_elts00; | |
2773 } | |
2774 } | |
2775 else | |
2776 tmp_op = op0; | |
2777 | |
2778 vec = rtvec_alloc (1); | |
2779 RTVEC_ELT (vec, 0) = GEN_INT (elem); | |
2780 | |
2781 tmp = gen_rtx_fmt_ee (code, mode, | |
2782 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec)); | |
2783 return tmp; | |
2784 } | |
2785 } | |
2786 else | |
2787 { | |
2788 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0))); | |
2789 gcc_assert (GET_MODE_INNER (mode) | |
2790 == GET_MODE_INNER (GET_MODE (trueop0))); | |
2791 gcc_assert (GET_CODE (trueop1) == PARALLEL); | |
2792 | |
2793 if (GET_CODE (trueop0) == CONST_VECTOR) | |
2794 { | |
2795 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode)); | |
2796 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size); | |
2797 rtvec v = rtvec_alloc (n_elts); | |
2798 unsigned int i; | |
2799 | |
2800 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts); | |
2801 for (i = 0; i < n_elts; i++) | |
2802 { | |
2803 rtx x = XVECEXP (trueop1, 0, i); | |
2804 | |
2805 gcc_assert (GET_CODE (x) == CONST_INT); | |
2806 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, | |
2807 INTVAL (x)); | |
2808 } | |
2809 | |
2810 return gen_rtx_CONST_VECTOR (mode, v); | |
2811 } | |
2812 } | |
2813 | |
2814 if (XVECLEN (trueop1, 0) == 1 | |
2815 && GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT | |
2816 && GET_CODE (trueop0) == VEC_CONCAT) | |
2817 { | |
2818 rtx vec = trueop0; | |
2819 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode); | |
2820 | |
2821 /* Try to find the element in the VEC_CONCAT. */ | |
2822 while (GET_MODE (vec) != mode | |
2823 && GET_CODE (vec) == VEC_CONCAT) | |
2824 { | |
2825 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0))); | |
2826 if (offset < vec_size) | |
2827 vec = XEXP (vec, 0); | |
2828 else | |
2829 { | |
2830 offset -= vec_size; | |
2831 vec = XEXP (vec, 1); | |
2832 } | |
2833 vec = avoid_constant_pool_reference (vec); | |
2834 } | |
2835 | |
2836 if (GET_MODE (vec) == mode) | |
2837 return vec; | |
2838 } | |
2839 | |
2840 return 0; | |
2841 case VEC_CONCAT: | |
2842 { | |
2843 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode | |
2844 ? GET_MODE (trueop0) | |
2845 : GET_MODE_INNER (mode)); | |
2846 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode | |
2847 ? GET_MODE (trueop1) | |
2848 : GET_MODE_INNER (mode)); | |
2849 | |
2850 gcc_assert (VECTOR_MODE_P (mode)); | |
2851 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode) | |
2852 == GET_MODE_SIZE (mode)); | |
2853 | |
2854 if (VECTOR_MODE_P (op0_mode)) | |
2855 gcc_assert (GET_MODE_INNER (mode) | |
2856 == GET_MODE_INNER (op0_mode)); | |
2857 else | |
2858 gcc_assert (GET_MODE_INNER (mode) == op0_mode); | |
2859 | |
2860 if (VECTOR_MODE_P (op1_mode)) | |
2861 gcc_assert (GET_MODE_INNER (mode) | |
2862 == GET_MODE_INNER (op1_mode)); | |
2863 else | |
2864 gcc_assert (GET_MODE_INNER (mode) == op1_mode); | |
2865 | |
2866 if ((GET_CODE (trueop0) == CONST_VECTOR | |
2867 || GET_CODE (trueop0) == CONST_INT | |
2868 || GET_CODE (trueop0) == CONST_DOUBLE) | |
2869 && (GET_CODE (trueop1) == CONST_VECTOR | |
2870 || GET_CODE (trueop1) == CONST_INT | |
2871 || GET_CODE (trueop1) == CONST_DOUBLE)) | |
2872 { | |
2873 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode)); | |
2874 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size); | |
2875 rtvec v = rtvec_alloc (n_elts); | |
2876 unsigned int i; | |
2877 unsigned in_n_elts = 1; | |
2878 | |
2879 if (VECTOR_MODE_P (op0_mode)) | |
2880 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size); | |
2881 for (i = 0; i < n_elts; i++) | |
2882 { | |
2883 if (i < in_n_elts) | |
2884 { | |
2885 if (!VECTOR_MODE_P (op0_mode)) | |
2886 RTVEC_ELT (v, i) = trueop0; | |
2887 else | |
2888 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i); | |
2889 } | |
2890 else | |
2891 { | |
2892 if (!VECTOR_MODE_P (op1_mode)) | |
2893 RTVEC_ELT (v, i) = trueop1; | |
2894 else | |
2895 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1, | |
2896 i - in_n_elts); | |
2897 } | |
2898 } | |
2899 | |
2900 return gen_rtx_CONST_VECTOR (mode, v); | |
2901 } | |
2902 } | |
2903 return 0; | |
2904 | |
2905 default: | |
2906 gcc_unreachable (); | |
2907 } | |
2908 | |
2909 return 0; | |
2910 } | |
2911 | |
2912 rtx | |
2913 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode, | |
2914 rtx op0, rtx op1) | |
2915 { | |
2916 HOST_WIDE_INT arg0, arg1, arg0s, arg1s; | |
2917 HOST_WIDE_INT val; | |
2918 unsigned int width = GET_MODE_BITSIZE (mode); | |
2919 | |
2920 if (VECTOR_MODE_P (mode) | |
2921 && code != VEC_CONCAT | |
2922 && GET_CODE (op0) == CONST_VECTOR | |
2923 && GET_CODE (op1) == CONST_VECTOR) | |
2924 { | |
2925 unsigned n_elts = GET_MODE_NUNITS (mode); | |
2926 enum machine_mode op0mode = GET_MODE (op0); | |
2927 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode); | |
2928 enum machine_mode op1mode = GET_MODE (op1); | |
2929 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode); | |
2930 rtvec v = rtvec_alloc (n_elts); | |
2931 unsigned int i; | |
2932 | |
2933 gcc_assert (op0_n_elts == n_elts); | |
2934 gcc_assert (op1_n_elts == n_elts); | |
2935 for (i = 0; i < n_elts; i++) | |
2936 { | |
2937 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode), | |
2938 CONST_VECTOR_ELT (op0, i), | |
2939 CONST_VECTOR_ELT (op1, i)); | |
2940 if (!x) | |
2941 return 0; | |
2942 RTVEC_ELT (v, i) = x; | |
2943 } | |
2944 | |
2945 return gen_rtx_CONST_VECTOR (mode, v); | |
2946 } | |
2947 | |
2948 if (VECTOR_MODE_P (mode) | |
2949 && code == VEC_CONCAT | |
2950 && (CONST_INT_P (op0) | |
2951 || GET_CODE (op0) == CONST_DOUBLE | |
2952 || GET_CODE (op0) == CONST_FIXED) | |
2953 && (CONST_INT_P (op1) | |
2954 || GET_CODE (op1) == CONST_DOUBLE | |
2955 || GET_CODE (op1) == CONST_FIXED)) | |
2956 { | |
2957 unsigned n_elts = GET_MODE_NUNITS (mode); | |
2958 rtvec v = rtvec_alloc (n_elts); | |
2959 | |
2960 gcc_assert (n_elts >= 2); | |
2961 if (n_elts == 2) | |
2962 { | |
2963 gcc_assert (GET_CODE (op0) != CONST_VECTOR); | |
2964 gcc_assert (GET_CODE (op1) != CONST_VECTOR); | |
2965 | |
2966 RTVEC_ELT (v, 0) = op0; | |
2967 RTVEC_ELT (v, 1) = op1; | |
2968 } | |
2969 else | |
2970 { | |
2971 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0)); | |
2972 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1)); | |
2973 unsigned i; | |
2974 | |
2975 gcc_assert (GET_CODE (op0) == CONST_VECTOR); | |
2976 gcc_assert (GET_CODE (op1) == CONST_VECTOR); | |
2977 gcc_assert (op0_n_elts + op1_n_elts == n_elts); | |
2978 | |
2979 for (i = 0; i < op0_n_elts; ++i) | |
2980 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i); | |
2981 for (i = 0; i < op1_n_elts; ++i) | |
2982 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i); | |
2983 } | |
2984 | |
2985 return gen_rtx_CONST_VECTOR (mode, v); | |
2986 } | |
2987 | |
2988 if (SCALAR_FLOAT_MODE_P (mode) | |
2989 && GET_CODE (op0) == CONST_DOUBLE | |
2990 && GET_CODE (op1) == CONST_DOUBLE | |
2991 && mode == GET_MODE (op0) && mode == GET_MODE (op1)) | |
2992 { | |
2993 if (code == AND | |
2994 || code == IOR | |
2995 || code == XOR) | |
2996 { | |
2997 long tmp0[4]; | |
2998 long tmp1[4]; | |
2999 REAL_VALUE_TYPE r; | |
3000 int i; | |
3001 | |
3002 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0), | |
3003 GET_MODE (op0)); | |
3004 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1), | |
3005 GET_MODE (op1)); | |
3006 for (i = 0; i < 4; i++) | |
3007 { | |
3008 switch (code) | |
3009 { | |
3010 case AND: | |
3011 tmp0[i] &= tmp1[i]; | |
3012 break; | |
3013 case IOR: | |
3014 tmp0[i] |= tmp1[i]; | |
3015 break; | |
3016 case XOR: | |
3017 tmp0[i] ^= tmp1[i]; | |
3018 break; | |
3019 default: | |
3020 gcc_unreachable (); | |
3021 } | |
3022 } | |
3023 real_from_target (&r, tmp0, mode); | |
3024 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode); | |
3025 } | |
3026 else | |
3027 { | |
3028 REAL_VALUE_TYPE f0, f1, value, result; | |
3029 bool inexact; | |
3030 | |
3031 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0); | |
3032 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1); | |
3033 real_convert (&f0, mode, &f0); | |
3034 real_convert (&f1, mode, &f1); | |
3035 | |
3036 if (HONOR_SNANS (mode) | |
3037 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1))) | |
3038 return 0; | |
3039 | |
3040 if (code == DIV | |
3041 && REAL_VALUES_EQUAL (f1, dconst0) | |
3042 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode))) | |
3043 return 0; | |
3044 | |
3045 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode) | |
3046 && flag_trapping_math | |
3047 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1)) | |
3048 { | |
3049 int s0 = REAL_VALUE_NEGATIVE (f0); | |
3050 int s1 = REAL_VALUE_NEGATIVE (f1); | |
3051 | |
3052 switch (code) | |
3053 { | |
3054 case PLUS: | |
3055 /* Inf + -Inf = NaN plus exception. */ | |
3056 if (s0 != s1) | |
3057 return 0; | |
3058 break; | |
3059 case MINUS: | |
3060 /* Inf - Inf = NaN plus exception. */ | |
3061 if (s0 == s1) | |
3062 return 0; | |
3063 break; | |
3064 case DIV: | |
3065 /* Inf / Inf = NaN plus exception. */ | |
3066 return 0; | |
3067 default: | |
3068 break; | |
3069 } | |
3070 } | |
3071 | |
3072 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode) | |
3073 && flag_trapping_math | |
3074 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0)) | |
3075 || (REAL_VALUE_ISINF (f1) | |
3076 && REAL_VALUES_EQUAL (f0, dconst0)))) | |
3077 /* Inf * 0 = NaN plus exception. */ | |
3078 return 0; | |
3079 | |
3080 inexact = real_arithmetic (&value, rtx_to_tree_code (code), | |
3081 &f0, &f1); | |
3082 real_convert (&result, mode, &value); | |
3083 | |
3084 /* Don't constant fold this floating point operation if | |
3085 the result has overflowed and flag_trapping_math. */ | |
3086 | |
3087 if (flag_trapping_math | |
3088 && MODE_HAS_INFINITIES (mode) | |
3089 && REAL_VALUE_ISINF (result) | |
3090 && !REAL_VALUE_ISINF (f0) | |
3091 && !REAL_VALUE_ISINF (f1)) | |
3092 /* Overflow plus exception. */ | |
3093 return 0; | |
3094 | |
3095 /* Don't constant fold this floating point operation if the | |
3096 result may dependent upon the run-time rounding mode and | |
3097 flag_rounding_math is set, or if GCC's software emulation | |
3098 is unable to accurately represent the result. */ | |
3099 | |
3100 if ((flag_rounding_math | |
3101 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations)) | |
3102 && (inexact || !real_identical (&result, &value))) | |
3103 return NULL_RTX; | |
3104 | |
3105 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode); | |
3106 } | |
3107 } | |
3108 | |
3109 /* We can fold some multi-word operations. */ | |
3110 if (GET_MODE_CLASS (mode) == MODE_INT | |
3111 && width == HOST_BITS_PER_WIDE_INT * 2 | |
3112 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT) | |
3113 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT)) | |
3114 { | |
3115 unsigned HOST_WIDE_INT l1, l2, lv, lt; | |
3116 HOST_WIDE_INT h1, h2, hv, ht; | |
3117 | |
3118 if (GET_CODE (op0) == CONST_DOUBLE) | |
3119 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0); | |
3120 else | |
3121 l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1); | |
3122 | |
3123 if (GET_CODE (op1) == CONST_DOUBLE) | |
3124 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1); | |
3125 else | |
3126 l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2); | |
3127 | |
3128 switch (code) | |
3129 { | |
3130 case MINUS: | |
3131 /* A - B == A + (-B). */ | |
3132 neg_double (l2, h2, &lv, &hv); | |
3133 l2 = lv, h2 = hv; | |
3134 | |
3135 /* Fall through.... */ | |
3136 | |
3137 case PLUS: | |
3138 add_double (l1, h1, l2, h2, &lv, &hv); | |
3139 break; | |
3140 | |
3141 case MULT: | |
3142 mul_double (l1, h1, l2, h2, &lv, &hv); | |
3143 break; | |
3144 | |
3145 case DIV: | |
3146 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2, | |
3147 &lv, &hv, <, &ht)) | |
3148 return 0; | |
3149 break; | |
3150 | |
3151 case MOD: | |
3152 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2, | |
3153 <, &ht, &lv, &hv)) | |
3154 return 0; | |
3155 break; | |
3156 | |
3157 case UDIV: | |
3158 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2, | |
3159 &lv, &hv, <, &ht)) | |
3160 return 0; | |
3161 break; | |
3162 | |
3163 case UMOD: | |
3164 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2, | |
3165 <, &ht, &lv, &hv)) | |
3166 return 0; | |
3167 break; | |
3168 | |
3169 case AND: | |
3170 lv = l1 & l2, hv = h1 & h2; | |
3171 break; | |
3172 | |
3173 case IOR: | |
3174 lv = l1 | l2, hv = h1 | h2; | |
3175 break; | |
3176 | |
3177 case XOR: | |
3178 lv = l1 ^ l2, hv = h1 ^ h2; | |
3179 break; | |
3180 | |
3181 case SMIN: | |
3182 if (h1 < h2 | |
3183 || (h1 == h2 | |
3184 && ((unsigned HOST_WIDE_INT) l1 | |
3185 < (unsigned HOST_WIDE_INT) l2))) | |
3186 lv = l1, hv = h1; | |
3187 else | |
3188 lv = l2, hv = h2; | |
3189 break; | |
3190 | |
3191 case SMAX: | |
3192 if (h1 > h2 | |
3193 || (h1 == h2 | |
3194 && ((unsigned HOST_WIDE_INT) l1 | |
3195 > (unsigned HOST_WIDE_INT) l2))) | |
3196 lv = l1, hv = h1; | |
3197 else | |
3198 lv = l2, hv = h2; | |
3199 break; | |
3200 | |
3201 case UMIN: | |
3202 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2 | |
3203 || (h1 == h2 | |
3204 && ((unsigned HOST_WIDE_INT) l1 | |
3205 < (unsigned HOST_WIDE_INT) l2))) | |
3206 lv = l1, hv = h1; | |
3207 else | |
3208 lv = l2, hv = h2; | |
3209 break; | |
3210 | |
3211 case UMAX: | |
3212 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2 | |
3213 || (h1 == h2 | |
3214 && ((unsigned HOST_WIDE_INT) l1 | |
3215 > (unsigned HOST_WIDE_INT) l2))) | |
3216 lv = l1, hv = h1; | |
3217 else | |
3218 lv = l2, hv = h2; | |
3219 break; | |
3220 | |
3221 case LSHIFTRT: case ASHIFTRT: | |
3222 case ASHIFT: | |
3223 case ROTATE: case ROTATERT: | |
3224 if (SHIFT_COUNT_TRUNCATED) | |
3225 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0; | |
3226 | |
3227 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode)) | |
3228 return 0; | |
3229 | |
3230 if (code == LSHIFTRT || code == ASHIFTRT) | |
3231 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, | |
3232 code == ASHIFTRT); | |
3233 else if (code == ASHIFT) | |
3234 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1); | |
3235 else if (code == ROTATE) | |
3236 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv); | |
3237 else /* code == ROTATERT */ | |
3238 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv); | |
3239 break; | |
3240 | |
3241 default: | |
3242 return 0; | |
3243 } | |
3244 | |
3245 return immed_double_const (lv, hv, mode); | |
3246 } | |
3247 | |
3248 if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT | |
3249 && width <= HOST_BITS_PER_WIDE_INT && width != 0) | |
3250 { | |
3251 /* Get the integer argument values in two forms: | |
3252 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */ | |
3253 | |
3254 arg0 = INTVAL (op0); | |
3255 arg1 = INTVAL (op1); | |
3256 | |
3257 if (width < HOST_BITS_PER_WIDE_INT) | |
3258 { | |
3259 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1; | |
3260 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1; | |
3261 | |
3262 arg0s = arg0; | |
3263 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1))) | |
3264 arg0s |= ((HOST_WIDE_INT) (-1) << width); | |
3265 | |
3266 arg1s = arg1; | |
3267 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1))) | |
3268 arg1s |= ((HOST_WIDE_INT) (-1) << width); | |
3269 } | |
3270 else | |
3271 { | |
3272 arg0s = arg0; | |
3273 arg1s = arg1; | |
3274 } | |
3275 | |
3276 /* Compute the value of the arithmetic. */ | |
3277 | |
3278 switch (code) | |
3279 { | |
3280 case PLUS: | |
3281 val = arg0s + arg1s; | |
3282 break; | |
3283 | |
3284 case MINUS: | |
3285 val = arg0s - arg1s; | |
3286 break; | |
3287 | |
3288 case MULT: | |
3289 val = arg0s * arg1s; | |
3290 break; | |
3291 | |
3292 case DIV: | |
3293 if (arg1s == 0 | |
3294 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1) | |
3295 && arg1s == -1)) | |
3296 return 0; | |
3297 val = arg0s / arg1s; | |
3298 break; | |
3299 | |
3300 case MOD: | |
3301 if (arg1s == 0 | |
3302 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1) | |
3303 && arg1s == -1)) | |
3304 return 0; | |
3305 val = arg0s % arg1s; | |
3306 break; | |
3307 | |
3308 case UDIV: | |
3309 if (arg1 == 0 | |
3310 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1) | |
3311 && arg1s == -1)) | |
3312 return 0; | |
3313 val = (unsigned HOST_WIDE_INT) arg0 / arg1; | |
3314 break; | |
3315 | |
3316 case UMOD: | |
3317 if (arg1 == 0 | |
3318 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1) | |
3319 && arg1s == -1)) | |
3320 return 0; | |
3321 val = (unsigned HOST_WIDE_INT) arg0 % arg1; | |
3322 break; | |
3323 | |
3324 case AND: | |
3325 val = arg0 & arg1; | |
3326 break; | |
3327 | |
3328 case IOR: | |
3329 val = arg0 | arg1; | |
3330 break; | |
3331 | |
3332 case XOR: | |
3333 val = arg0 ^ arg1; | |
3334 break; | |
3335 | |
3336 case LSHIFTRT: | |
3337 case ASHIFT: | |
3338 case ASHIFTRT: | |
3339 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure | |
3340 the value is in range. We can't return any old value for | |
3341 out-of-range arguments because either the middle-end (via | |
3342 shift_truncation_mask) or the back-end might be relying on | |
3343 target-specific knowledge. Nor can we rely on | |
3344 shift_truncation_mask, since the shift might not be part of an | |
3345 ashlM3, lshrM3 or ashrM3 instruction. */ | |
3346 if (SHIFT_COUNT_TRUNCATED) | |
3347 arg1 = (unsigned HOST_WIDE_INT) arg1 % width; | |
3348 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode)) | |
3349 return 0; | |
3350 | |
3351 val = (code == ASHIFT | |
3352 ? ((unsigned HOST_WIDE_INT) arg0) << arg1 | |
3353 : ((unsigned HOST_WIDE_INT) arg0) >> arg1); | |
3354 | |
3355 /* Sign-extend the result for arithmetic right shifts. */ | |
3356 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0) | |
3357 val |= ((HOST_WIDE_INT) -1) << (width - arg1); | |
3358 break; | |
3359 | |
3360 case ROTATERT: | |
3361 if (arg1 < 0) | |
3362 return 0; | |
3363 | |
3364 arg1 %= width; | |
3365 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1)) | |
3366 | (((unsigned HOST_WIDE_INT) arg0) >> arg1)); | |
3367 break; | |
3368 | |
3369 case ROTATE: | |
3370 if (arg1 < 0) | |
3371 return 0; | |
3372 | |
3373 arg1 %= width; | |
3374 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1) | |
3375 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1))); | |
3376 break; | |
3377 | |
3378 case COMPARE: | |
3379 /* Do nothing here. */ | |
3380 return 0; | |
3381 | |
3382 case SMIN: | |
3383 val = arg0s <= arg1s ? arg0s : arg1s; | |
3384 break; | |
3385 | |
3386 case UMIN: | |
3387 val = ((unsigned HOST_WIDE_INT) arg0 | |
3388 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1); | |
3389 break; | |
3390 | |
3391 case SMAX: | |
3392 val = arg0s > arg1s ? arg0s : arg1s; | |
3393 break; | |
3394 | |
3395 case UMAX: | |
3396 val = ((unsigned HOST_WIDE_INT) arg0 | |
3397 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1); | |
3398 break; | |
3399 | |
3400 case SS_PLUS: | |
3401 case US_PLUS: | |
3402 case SS_MINUS: | |
3403 case US_MINUS: | |
3404 case SS_MULT: | |
3405 case US_MULT: | |
3406 case SS_DIV: | |
3407 case US_DIV: | |
3408 case SS_ASHIFT: | |
3409 case US_ASHIFT: | |
3410 /* ??? There are simplifications that can be done. */ | |
3411 return 0; | |
3412 | |
3413 default: | |
3414 gcc_unreachable (); | |
3415 } | |
3416 | |
3417 return gen_int_mode (val, mode); | |
3418 } | |
3419 | |
3420 return NULL_RTX; | |
3421 } | |
3422 | |
3423 | |
3424 | |
3425 /* Simplify a PLUS or MINUS, at least one of whose operands may be another | |
3426 PLUS or MINUS. | |
3427 | |
3428 Rather than test for specific case, we do this by a brute-force method | |
3429 and do all possible simplifications until no more changes occur. Then | |
3430 we rebuild the operation. */ | |
3431 | |
3432 struct simplify_plus_minus_op_data | |
3433 { | |
3434 rtx op; | |
3435 short neg; | |
3436 }; | |
3437 | |
3438 static bool | |
3439 simplify_plus_minus_op_data_cmp (rtx x, rtx y) | |
3440 { | |
3441 int result; | |
3442 | |
3443 result = (commutative_operand_precedence (y) | |
3444 - commutative_operand_precedence (x)); | |
3445 if (result) | |
3446 return result > 0; | |
3447 | |
3448 /* Group together equal REGs to do more simplification. */ | |
3449 if (REG_P (x) && REG_P (y)) | |
3450 return REGNO (x) > REGNO (y); | |
3451 else | |
3452 return false; | |
3453 } | |
3454 | |
3455 static rtx | |
3456 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0, | |
3457 rtx op1) | |
3458 { | |
3459 struct simplify_plus_minus_op_data ops[8]; | |
3460 rtx result, tem; | |
3461 int n_ops = 2, input_ops = 2; | |
3462 int changed, n_constants = 0, canonicalized = 0; | |
3463 int i, j; | |
3464 | |
3465 memset (ops, 0, sizeof ops); | |
3466 | |
3467 /* Set up the two operands and then expand them until nothing has been | |
3468 changed. If we run out of room in our array, give up; this should | |
3469 almost never happen. */ | |
3470 | |
3471 ops[0].op = op0; | |
3472 ops[0].neg = 0; | |
3473 ops[1].op = op1; | |
3474 ops[1].neg = (code == MINUS); | |
3475 | |
3476 do | |
3477 { | |
3478 changed = 0; | |
3479 | |
3480 for (i = 0; i < n_ops; i++) | |
3481 { | |
3482 rtx this_op = ops[i].op; | |
3483 int this_neg = ops[i].neg; | |
3484 enum rtx_code this_code = GET_CODE (this_op); | |
3485 | |
3486 switch (this_code) | |
3487 { | |
3488 case PLUS: | |
3489 case MINUS: | |
3490 if (n_ops == 7) | |
3491 return NULL_RTX; | |
3492 | |
3493 ops[n_ops].op = XEXP (this_op, 1); | |
3494 ops[n_ops].neg = (this_code == MINUS) ^ this_neg; | |
3495 n_ops++; | |
3496 | |
3497 ops[i].op = XEXP (this_op, 0); | |
3498 input_ops++; | |
3499 changed = 1; | |
3500 canonicalized |= this_neg; | |
3501 break; | |
3502 | |
3503 case NEG: | |
3504 ops[i].op = XEXP (this_op, 0); | |
3505 ops[i].neg = ! this_neg; | |
3506 changed = 1; | |
3507 canonicalized = 1; | |
3508 break; | |
3509 | |
3510 case CONST: | |
3511 if (n_ops < 7 | |
3512 && GET_CODE (XEXP (this_op, 0)) == PLUS | |
3513 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0)) | |
3514 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1))) | |
3515 { | |
3516 ops[i].op = XEXP (XEXP (this_op, 0), 0); | |
3517 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1); | |
3518 ops[n_ops].neg = this_neg; | |
3519 n_ops++; | |
3520 changed = 1; | |
3521 canonicalized = 1; | |
3522 } | |
3523 break; | |
3524 | |
3525 case NOT: | |
3526 /* ~a -> (-a - 1) */ | |
3527 if (n_ops != 7) | |
3528 { | |
3529 ops[n_ops].op = constm1_rtx; | |
3530 ops[n_ops++].neg = this_neg; | |
3531 ops[i].op = XEXP (this_op, 0); | |
3532 ops[i].neg = !this_neg; | |
3533 changed = 1; | |
3534 canonicalized = 1; | |
3535 } | |
3536 break; | |
3537 | |
3538 case CONST_INT: | |
3539 n_constants++; | |
3540 if (this_neg) | |
3541 { | |
3542 ops[i].op = neg_const_int (mode, this_op); | |
3543 ops[i].neg = 0; | |
3544 changed = 1; | |
3545 canonicalized = 1; | |
3546 } | |
3547 break; | |
3548 | |
3549 default: | |
3550 break; | |
3551 } | |
3552 } | |
3553 } | |
3554 while (changed); | |
3555 | |
3556 if (n_constants > 1) | |
3557 canonicalized = 1; | |
3558 | |
3559 gcc_assert (n_ops >= 2); | |
3560 | |
3561 /* If we only have two operands, we can avoid the loops. */ | |
3562 if (n_ops == 2) | |
3563 { | |
3564 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS; | |
3565 rtx lhs, rhs; | |
3566 | |
3567 /* Get the two operands. Be careful with the order, especially for | |
3568 the cases where code == MINUS. */ | |
3569 if (ops[0].neg && ops[1].neg) | |
3570 { | |
3571 lhs = gen_rtx_NEG (mode, ops[0].op); | |
3572 rhs = ops[1].op; | |
3573 } | |
3574 else if (ops[0].neg) | |
3575 { | |
3576 lhs = ops[1].op; | |
3577 rhs = ops[0].op; | |
3578 } | |
3579 else | |
3580 { | |
3581 lhs = ops[0].op; | |
3582 rhs = ops[1].op; | |
3583 } | |
3584 | |
3585 return simplify_const_binary_operation (code, mode, lhs, rhs); | |
3586 } | |
3587 | |
3588 /* Now simplify each pair of operands until nothing changes. */ | |
3589 do | |
3590 { | |
3591 /* Insertion sort is good enough for an eight-element array. */ | |
3592 for (i = 1; i < n_ops; i++) | |
3593 { | |
3594 struct simplify_plus_minus_op_data save; | |
3595 j = i - 1; | |
3596 if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op)) | |
3597 continue; | |
3598 | |
3599 canonicalized = 1; | |
3600 save = ops[i]; | |
3601 do | |
3602 ops[j + 1] = ops[j]; | |
3603 while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op)); | |
3604 ops[j + 1] = save; | |
3605 } | |
3606 | |
3607 changed = 0; | |
3608 for (i = n_ops - 1; i > 0; i--) | |
3609 for (j = i - 1; j >= 0; j--) | |
3610 { | |
3611 rtx lhs = ops[j].op, rhs = ops[i].op; | |
3612 int lneg = ops[j].neg, rneg = ops[i].neg; | |
3613 | |
3614 if (lhs != 0 && rhs != 0) | |
3615 { | |
3616 enum rtx_code ncode = PLUS; | |
3617 | |
3618 if (lneg != rneg) | |
3619 { | |
3620 ncode = MINUS; | |
3621 if (lneg) | |
3622 tem = lhs, lhs = rhs, rhs = tem; | |
3623 } | |
3624 else if (swap_commutative_operands_p (lhs, rhs)) | |
3625 tem = lhs, lhs = rhs, rhs = tem; | |
3626 | |
3627 if ((GET_CODE (lhs) == CONST || GET_CODE (lhs) == CONST_INT) | |
3628 && (GET_CODE (rhs) == CONST || GET_CODE (rhs) == CONST_INT)) | |
3629 { | |
3630 rtx tem_lhs, tem_rhs; | |
3631 | |
3632 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs; | |
3633 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs; | |
3634 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs); | |
3635 | |
3636 if (tem && !CONSTANT_P (tem)) | |
3637 tem = gen_rtx_CONST (GET_MODE (tem), tem); | |
3638 } | |
3639 else | |
3640 tem = simplify_binary_operation (ncode, mode, lhs, rhs); | |
3641 | |
3642 /* Reject "simplifications" that just wrap the two | |
3643 arguments in a CONST. Failure to do so can result | |
3644 in infinite recursion with simplify_binary_operation | |
3645 when it calls us to simplify CONST operations. */ | |
3646 if (tem | |
3647 && ! (GET_CODE (tem) == CONST | |
3648 && GET_CODE (XEXP (tem, 0)) == ncode | |
3649 && XEXP (XEXP (tem, 0), 0) == lhs | |
3650 && XEXP (XEXP (tem, 0), 1) == rhs)) | |
3651 { | |
3652 lneg &= rneg; | |
3653 if (GET_CODE (tem) == NEG) | |
3654 tem = XEXP (tem, 0), lneg = !lneg; | |
3655 if (GET_CODE (tem) == CONST_INT && lneg) | |
3656 tem = neg_const_int (mode, tem), lneg = 0; | |
3657 | |
3658 ops[i].op = tem; | |
3659 ops[i].neg = lneg; | |
3660 ops[j].op = NULL_RTX; | |
3661 changed = 1; | |
3662 canonicalized = 1; | |
3663 } | |
3664 } | |
3665 } | |
3666 | |
3667 /* If nothing changed, fail. */ | |
3668 if (!canonicalized) | |
3669 return NULL_RTX; | |
3670 | |
3671 /* Pack all the operands to the lower-numbered entries. */ | |
3672 for (i = 0, j = 0; j < n_ops; j++) | |
3673 if (ops[j].op) | |
3674 { | |
3675 ops[i] = ops[j]; | |
3676 i++; | |
3677 } | |
3678 n_ops = i; | |
3679 } | |
3680 while (changed); | |
3681 | |
3682 /* Create (minus -C X) instead of (neg (const (plus X C))). */ | |
3683 if (n_ops == 2 | |
3684 && GET_CODE (ops[1].op) == CONST_INT | |
3685 && CONSTANT_P (ops[0].op) | |
3686 && ops[0].neg) | |
3687 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op); | |
3688 | |
3689 /* We suppressed creation of trivial CONST expressions in the | |
3690 combination loop to avoid recursion. Create one manually now. | |
3691 The combination loop should have ensured that there is exactly | |
3692 one CONST_INT, and the sort will have ensured that it is last | |
3693 in the array and that any other constant will be next-to-last. */ | |
3694 | |
3695 if (n_ops > 1 | |
3696 && GET_CODE (ops[n_ops - 1].op) == CONST_INT | |
3697 && CONSTANT_P (ops[n_ops - 2].op)) | |
3698 { | |
3699 rtx value = ops[n_ops - 1].op; | |
3700 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg) | |
3701 value = neg_const_int (mode, value); | |
3702 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value)); | |
3703 n_ops--; | |
3704 } | |
3705 | |
3706 /* Put a non-negated operand first, if possible. */ | |
3707 | |
3708 for (i = 0; i < n_ops && ops[i].neg; i++) | |
3709 continue; | |
3710 if (i == n_ops) | |
3711 ops[0].op = gen_rtx_NEG (mode, ops[0].op); | |
3712 else if (i != 0) | |
3713 { | |
3714 tem = ops[0].op; | |
3715 ops[0] = ops[i]; | |
3716 ops[i].op = tem; | |
3717 ops[i].neg = 1; | |
3718 } | |
3719 | |
3720 /* Now make the result by performing the requested operations. */ | |
3721 result = ops[0].op; | |
3722 for (i = 1; i < n_ops; i++) | |
3723 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS, | |
3724 mode, result, ops[i].op); | |
3725 | |
3726 return result; | |
3727 } | |
3728 | |
3729 /* Check whether an operand is suitable for calling simplify_plus_minus. */ | |
3730 static bool | |
3731 plus_minus_operand_p (const_rtx x) | |
3732 { | |
3733 return GET_CODE (x) == PLUS | |
3734 || GET_CODE (x) == MINUS | |
3735 || (GET_CODE (x) == CONST | |
3736 && GET_CODE (XEXP (x, 0)) == PLUS | |
3737 && CONSTANT_P (XEXP (XEXP (x, 0), 0)) | |
3738 && CONSTANT_P (XEXP (XEXP (x, 0), 1))); | |
3739 } | |
3740 | |
3741 /* Like simplify_binary_operation except used for relational operators. | |
3742 MODE is the mode of the result. If MODE is VOIDmode, both operands must | |
3743 not also be VOIDmode. | |
3744 | |
3745 CMP_MODE specifies in which mode the comparison is done in, so it is | |
3746 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from | |
3747 the operands or, if both are VOIDmode, the operands are compared in | |
3748 "infinite precision". */ | |
3749 rtx | |
3750 simplify_relational_operation (enum rtx_code code, enum machine_mode mode, | |
3751 enum machine_mode cmp_mode, rtx op0, rtx op1) | |
3752 { | |
3753 rtx tem, trueop0, trueop1; | |
3754 | |
3755 if (cmp_mode == VOIDmode) | |
3756 cmp_mode = GET_MODE (op0); | |
3757 if (cmp_mode == VOIDmode) | |
3758 cmp_mode = GET_MODE (op1); | |
3759 | |
3760 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1); | |
3761 if (tem) | |
3762 { | |
3763 if (SCALAR_FLOAT_MODE_P (mode)) | |
3764 { | |
3765 if (tem == const0_rtx) | |
3766 return CONST0_RTX (mode); | |
3767 #ifdef FLOAT_STORE_FLAG_VALUE | |
3768 { | |
3769 REAL_VALUE_TYPE val; | |
3770 val = FLOAT_STORE_FLAG_VALUE (mode); | |
3771 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode); | |
3772 } | |
3773 #else | |
3774 return NULL_RTX; | |
3775 #endif | |
3776 } | |
3777 if (VECTOR_MODE_P (mode)) | |
3778 { | |
3779 if (tem == const0_rtx) | |
3780 return CONST0_RTX (mode); | |
3781 #ifdef VECTOR_STORE_FLAG_VALUE | |
3782 { | |
3783 int i, units; | |
3784 rtvec v; | |
3785 | |
3786 rtx val = VECTOR_STORE_FLAG_VALUE (mode); | |
3787 if (val == NULL_RTX) | |
3788 return NULL_RTX; | |
3789 if (val == const1_rtx) | |
3790 return CONST1_RTX (mode); | |
3791 | |
3792 units = GET_MODE_NUNITS (mode); | |
3793 v = rtvec_alloc (units); | |
3794 for (i = 0; i < units; i++) | |
3795 RTVEC_ELT (v, i) = val; | |
3796 return gen_rtx_raw_CONST_VECTOR (mode, v); | |
3797 } | |
3798 #else | |
3799 return NULL_RTX; | |
3800 #endif | |
3801 } | |
3802 | |
3803 return tem; | |
3804 } | |
3805 | |
3806 /* For the following tests, ensure const0_rtx is op1. */ | |
3807 if (swap_commutative_operands_p (op0, op1) | |
3808 || (op0 == const0_rtx && op1 != const0_rtx)) | |
3809 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code); | |
3810 | |
3811 /* If op0 is a compare, extract the comparison arguments from it. */ | |
3812 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx) | |
3813 return simplify_relational_operation (code, mode, VOIDmode, | |
3814 XEXP (op0, 0), XEXP (op0, 1)); | |
3815 | |
3816 if (GET_MODE_CLASS (cmp_mode) == MODE_CC | |
3817 || CC0_P (op0)) | |
3818 return NULL_RTX; | |
3819 | |
3820 trueop0 = avoid_constant_pool_reference (op0); | |
3821 trueop1 = avoid_constant_pool_reference (op1); | |
3822 return simplify_relational_operation_1 (code, mode, cmp_mode, | |
3823 trueop0, trueop1); | |
3824 } | |
3825 | |
3826 /* This part of simplify_relational_operation is only used when CMP_MODE | |
3827 is not in class MODE_CC (i.e. it is a real comparison). | |
3828 | |
3829 MODE is the mode of the result, while CMP_MODE specifies in which | |
3830 mode the comparison is done in, so it is the mode of the operands. */ | |
3831 | |
3832 static rtx | |
3833 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode, | |
3834 enum machine_mode cmp_mode, rtx op0, rtx op1) | |
3835 { | |
3836 enum rtx_code op0code = GET_CODE (op0); | |
3837 | |
3838 if (op1 == const0_rtx && COMPARISON_P (op0)) | |
3839 { | |
3840 /* If op0 is a comparison, extract the comparison arguments | |
3841 from it. */ | |
3842 if (code == NE) | |
3843 { | |
3844 if (GET_MODE (op0) == mode) | |
3845 return simplify_rtx (op0); | |
3846 else | |
3847 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode, | |
3848 XEXP (op0, 0), XEXP (op0, 1)); | |
3849 } | |
3850 else if (code == EQ) | |
3851 { | |
3852 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX); | |
3853 if (new_code != UNKNOWN) | |
3854 return simplify_gen_relational (new_code, mode, VOIDmode, | |
3855 XEXP (op0, 0), XEXP (op0, 1)); | |
3856 } | |
3857 } | |
3858 | |
3859 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */ | |
3860 if ((code == LTU || code == GEU) | |
3861 && GET_CODE (op0) == PLUS | |
3862 && rtx_equal_p (op1, XEXP (op0, 1)) | |
3863 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */ | |
3864 && !rtx_equal_p (op1, XEXP (op0, 0))) | |
3865 return simplify_gen_relational (code, mode, cmp_mode, op0, XEXP (op0, 0)); | |
3866 | |
3867 if (op1 == const0_rtx) | |
3868 { | |
3869 /* Canonicalize (GTU x 0) as (NE x 0). */ | |
3870 if (code == GTU) | |
3871 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1); | |
3872 /* Canonicalize (LEU x 0) as (EQ x 0). */ | |
3873 if (code == LEU) | |
3874 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1); | |
3875 } | |
3876 else if (op1 == const1_rtx) | |
3877 { | |
3878 switch (code) | |
3879 { | |
3880 case GE: | |
3881 /* Canonicalize (GE x 1) as (GT x 0). */ | |
3882 return simplify_gen_relational (GT, mode, cmp_mode, | |
3883 op0, const0_rtx); | |
3884 case GEU: | |
3885 /* Canonicalize (GEU x 1) as (NE x 0). */ | |
3886 return simplify_gen_relational (NE, mode, cmp_mode, | |
3887 op0, const0_rtx); | |
3888 case LT: | |
3889 /* Canonicalize (LT x 1) as (LE x 0). */ | |
3890 return simplify_gen_relational (LE, mode, cmp_mode, | |
3891 op0, const0_rtx); | |
3892 case LTU: | |
3893 /* Canonicalize (LTU x 1) as (EQ x 0). */ | |
3894 return simplify_gen_relational (EQ, mode, cmp_mode, | |
3895 op0, const0_rtx); | |
3896 default: | |
3897 break; | |
3898 } | |
3899 } | |
3900 else if (op1 == constm1_rtx) | |
3901 { | |
3902 /* Canonicalize (LE x -1) as (LT x 0). */ | |
3903 if (code == LE) | |
3904 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx); | |
3905 /* Canonicalize (GT x -1) as (GE x 0). */ | |
3906 if (code == GT) | |
3907 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx); | |
3908 } | |
3909 | |
3910 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */ | |
3911 if ((code == EQ || code == NE) | |
3912 && (op0code == PLUS || op0code == MINUS) | |
3913 && CONSTANT_P (op1) | |
3914 && CONSTANT_P (XEXP (op0, 1)) | |
3915 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations)) | |
3916 { | |
3917 rtx x = XEXP (op0, 0); | |
3918 rtx c = XEXP (op0, 1); | |
3919 | |
3920 c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS, | |
3921 cmp_mode, op1, c); | |
3922 return simplify_gen_relational (code, mode, cmp_mode, x, c); | |
3923 } | |
3924 | |
3925 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is | |
3926 the same as (zero_extract:SI FOO (const_int 1) BAR). */ | |
3927 if (code == NE | |
3928 && op1 == const0_rtx | |
3929 && GET_MODE_CLASS (mode) == MODE_INT | |
3930 && cmp_mode != VOIDmode | |
3931 /* ??? Work-around BImode bugs in the ia64 backend. */ | |
3932 && mode != BImode | |
3933 && cmp_mode != BImode | |
3934 && nonzero_bits (op0, cmp_mode) == 1 | |
3935 && STORE_FLAG_VALUE == 1) | |
3936 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode) | |
3937 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode) | |
3938 : lowpart_subreg (mode, op0, cmp_mode); | |
3939 | |
3940 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */ | |
3941 if ((code == EQ || code == NE) | |
3942 && op1 == const0_rtx | |
3943 && op0code == XOR) | |
3944 return simplify_gen_relational (code, mode, cmp_mode, | |
3945 XEXP (op0, 0), XEXP (op0, 1)); | |
3946 | |
3947 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */ | |
3948 if ((code == EQ || code == NE) | |
3949 && op0code == XOR | |
3950 && rtx_equal_p (XEXP (op0, 0), op1) | |
3951 && !side_effects_p (XEXP (op0, 0))) | |
3952 return simplify_gen_relational (code, mode, cmp_mode, | |
3953 XEXP (op0, 1), const0_rtx); | |
3954 | |
3955 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */ | |
3956 if ((code == EQ || code == NE) | |
3957 && op0code == XOR | |
3958 && rtx_equal_p (XEXP (op0, 1), op1) | |
3959 && !side_effects_p (XEXP (op0, 1))) | |
3960 return simplify_gen_relational (code, mode, cmp_mode, | |
3961 XEXP (op0, 0), const0_rtx); | |
3962 | |
3963 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */ | |
3964 if ((code == EQ || code == NE) | |
3965 && op0code == XOR | |
3966 && (GET_CODE (op1) == CONST_INT | |
3967 || GET_CODE (op1) == CONST_DOUBLE) | |
3968 && (GET_CODE (XEXP (op0, 1)) == CONST_INT | |
3969 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)) | |
3970 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0), | |
3971 simplify_gen_binary (XOR, cmp_mode, | |
3972 XEXP (op0, 1), op1)); | |
3973 | |
3974 if (op0code == POPCOUNT && op1 == const0_rtx) | |
3975 switch (code) | |
3976 { | |
3977 case EQ: | |
3978 case LE: | |
3979 case LEU: | |
3980 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */ | |
3981 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)), | |
3982 XEXP (op0, 0), const0_rtx); | |
3983 | |
3984 case NE: | |
3985 case GT: | |
3986 case GTU: | |
3987 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */ | |
3988 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)), | |
3989 XEXP (op0, 0), const0_rtx); | |
3990 | |
3991 default: | |
3992 break; | |
3993 } | |
3994 | |
3995 return NULL_RTX; | |
3996 } | |
3997 | |
3998 enum | |
3999 { | |
4000 CMP_EQ = 1, | |
4001 CMP_LT = 2, | |
4002 CMP_GT = 4, | |
4003 CMP_LTU = 8, | |
4004 CMP_GTU = 16 | |
4005 }; | |
4006 | |
4007 | |
4008 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in | |
4009 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE | |
4010 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the | |
4011 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU). | |
4012 For floating-point comparisons, assume that the operands were ordered. */ | |
4013 | |
4014 static rtx | |
4015 comparison_result (enum rtx_code code, int known_results) | |
4016 { | |
4017 switch (code) | |
4018 { | |
4019 case EQ: | |
4020 case UNEQ: | |
4021 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx; | |
4022 case NE: | |
4023 case LTGT: | |
4024 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx; | |
4025 | |
4026 case LT: | |
4027 case UNLT: | |
4028 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx; | |
4029 case GE: | |
4030 case UNGE: | |
4031 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx; | |
4032 | |
4033 case GT: | |
4034 case UNGT: | |
4035 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx; | |
4036 case LE: | |
4037 case UNLE: | |
4038 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx; | |
4039 | |
4040 case LTU: | |
4041 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx; | |
4042 case GEU: | |
4043 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx; | |
4044 | |
4045 case GTU: | |
4046 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx; | |
4047 case LEU: | |
4048 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx; | |
4049 | |
4050 case ORDERED: | |
4051 return const_true_rtx; | |
4052 case UNORDERED: | |
4053 return const0_rtx; | |
4054 default: | |
4055 gcc_unreachable (); | |
4056 } | |
4057 } | |
4058 | |
4059 /* Check if the given comparison (done in the given MODE) is actually a | |
4060 tautology or a contradiction. | |
4061 If no simplification is possible, this function returns zero. | |
4062 Otherwise, it returns either const_true_rtx or const0_rtx. */ | |
4063 | |
4064 rtx | |
4065 simplify_const_relational_operation (enum rtx_code code, | |
4066 enum machine_mode mode, | |
4067 rtx op0, rtx op1) | |
4068 { | |
4069 rtx tem; | |
4070 rtx trueop0; | |
4071 rtx trueop1; | |
4072 | |
4073 gcc_assert (mode != VOIDmode | |
4074 || (GET_MODE (op0) == VOIDmode | |
4075 && GET_MODE (op1) == VOIDmode)); | |
4076 | |
4077 /* If op0 is a compare, extract the comparison arguments from it. */ | |
4078 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx) | |
4079 { | |
4080 op1 = XEXP (op0, 1); | |
4081 op0 = XEXP (op0, 0); | |
4082 | |
4083 if (GET_MODE (op0) != VOIDmode) | |
4084 mode = GET_MODE (op0); | |
4085 else if (GET_MODE (op1) != VOIDmode) | |
4086 mode = GET_MODE (op1); | |
4087 else | |
4088 return 0; | |
4089 } | |
4090 | |
4091 /* We can't simplify MODE_CC values since we don't know what the | |
4092 actual comparison is. */ | |
4093 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0)) | |
4094 return 0; | |
4095 | |
4096 /* Make sure the constant is second. */ | |
4097 if (swap_commutative_operands_p (op0, op1)) | |
4098 { | |
4099 tem = op0, op0 = op1, op1 = tem; | |
4100 code = swap_condition (code); | |
4101 } | |
4102 | |
4103 trueop0 = avoid_constant_pool_reference (op0); | |
4104 trueop1 = avoid_constant_pool_reference (op1); | |
4105 | |
4106 /* For integer comparisons of A and B maybe we can simplify A - B and can | |
4107 then simplify a comparison of that with zero. If A and B are both either | |
4108 a register or a CONST_INT, this can't help; testing for these cases will | |
4109 prevent infinite recursion here and speed things up. | |
4110 | |
4111 We can only do this for EQ and NE comparisons as otherwise we may | |
4112 lose or introduce overflow which we cannot disregard as undefined as | |
4113 we do not know the signedness of the operation on either the left or | |
4114 the right hand side of the comparison. */ | |
4115 | |
4116 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx | |
4117 && (code == EQ || code == NE) | |
4118 && ! ((REG_P (op0) || GET_CODE (trueop0) == CONST_INT) | |
4119 && (REG_P (op1) || GET_CODE (trueop1) == CONST_INT)) | |
4120 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1)) | |
4121 /* We cannot do this if tem is a nonzero address. */ | |
4122 && ! nonzero_address_p (tem)) | |
4123 return simplify_const_relational_operation (signed_condition (code), | |
4124 mode, tem, const0_rtx); | |
4125 | |
4126 if (! HONOR_NANS (mode) && code == ORDERED) | |
4127 return const_true_rtx; | |
4128 | |
4129 if (! HONOR_NANS (mode) && code == UNORDERED) | |
4130 return const0_rtx; | |
4131 | |
4132 /* For modes without NaNs, if the two operands are equal, we know the | |
4133 result except if they have side-effects. Even with NaNs we know | |
4134 the result of unordered comparisons and, if signaling NaNs are | |
4135 irrelevant, also the result of LT/GT/LTGT. */ | |
4136 if ((! HONOR_NANS (GET_MODE (trueop0)) | |
4137 || code == UNEQ || code == UNLE || code == UNGE | |
4138 || ((code == LT || code == GT || code == LTGT) | |
4139 && ! HONOR_SNANS (GET_MODE (trueop0)))) | |
4140 && rtx_equal_p (trueop0, trueop1) | |
4141 && ! side_effects_p (trueop0)) | |
4142 return comparison_result (code, CMP_EQ); | |
4143 | |
4144 /* If the operands are floating-point constants, see if we can fold | |
4145 the result. */ | |
4146 if (GET_CODE (trueop0) == CONST_DOUBLE | |
4147 && GET_CODE (trueop1) == CONST_DOUBLE | |
4148 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0))) | |
4149 { | |
4150 REAL_VALUE_TYPE d0, d1; | |
4151 | |
4152 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0); | |
4153 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1); | |
4154 | |
4155 /* Comparisons are unordered iff at least one of the values is NaN. */ | |
4156 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1)) | |
4157 switch (code) | |
4158 { | |
4159 case UNEQ: | |
4160 case UNLT: | |
4161 case UNGT: | |
4162 case UNLE: | |
4163 case UNGE: | |
4164 case NE: | |
4165 case UNORDERED: | |
4166 return const_true_rtx; | |
4167 case EQ: | |
4168 case LT: | |
4169 case GT: | |
4170 case LE: | |
4171 case GE: | |
4172 case LTGT: | |
4173 case ORDERED: | |
4174 return const0_rtx; | |
4175 default: | |
4176 return 0; | |
4177 } | |
4178 | |
4179 return comparison_result (code, | |
4180 (REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ : | |
4181 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT)); | |
4182 } | |
4183 | |
4184 /* Otherwise, see if the operands are both integers. */ | |
4185 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode) | |
4186 && (GET_CODE (trueop0) == CONST_DOUBLE | |
4187 || GET_CODE (trueop0) == CONST_INT) | |
4188 && (GET_CODE (trueop1) == CONST_DOUBLE | |
4189 || GET_CODE (trueop1) == CONST_INT)) | |
4190 { | |
4191 int width = GET_MODE_BITSIZE (mode); | |
4192 HOST_WIDE_INT l0s, h0s, l1s, h1s; | |
4193 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u; | |
4194 | |
4195 /* Get the two words comprising each integer constant. */ | |
4196 if (GET_CODE (trueop0) == CONST_DOUBLE) | |
4197 { | |
4198 l0u = l0s = CONST_DOUBLE_LOW (trueop0); | |
4199 h0u = h0s = CONST_DOUBLE_HIGH (trueop0); | |
4200 } | |
4201 else | |
4202 { | |
4203 l0u = l0s = INTVAL (trueop0); | |
4204 h0u = h0s = HWI_SIGN_EXTEND (l0s); | |
4205 } | |
4206 | |
4207 if (GET_CODE (trueop1) == CONST_DOUBLE) | |
4208 { | |
4209 l1u = l1s = CONST_DOUBLE_LOW (trueop1); | |
4210 h1u = h1s = CONST_DOUBLE_HIGH (trueop1); | |
4211 } | |
4212 else | |
4213 { | |
4214 l1u = l1s = INTVAL (trueop1); | |
4215 h1u = h1s = HWI_SIGN_EXTEND (l1s); | |
4216 } | |
4217 | |
4218 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT, | |
4219 we have to sign or zero-extend the values. */ | |
4220 if (width != 0 && width < HOST_BITS_PER_WIDE_INT) | |
4221 { | |
4222 l0u &= ((HOST_WIDE_INT) 1 << width) - 1; | |
4223 l1u &= ((HOST_WIDE_INT) 1 << width) - 1; | |
4224 | |
4225 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1))) | |
4226 l0s |= ((HOST_WIDE_INT) (-1) << width); | |
4227 | |
4228 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1))) | |
4229 l1s |= ((HOST_WIDE_INT) (-1) << width); | |
4230 } | |
4231 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT) | |
4232 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s); | |
4233 | |
4234 if (h0u == h1u && l0u == l1u) | |
4235 return comparison_result (code, CMP_EQ); | |
4236 else | |
4237 { | |
4238 int cr; | |
4239 cr = (h0s < h1s || (h0s == h1s && l0u < l1u)) ? CMP_LT : CMP_GT; | |
4240 cr |= (h0u < h1u || (h0u == h1u && l0u < l1u)) ? CMP_LTU : CMP_GTU; | |
4241 return comparison_result (code, cr); | |
4242 } | |
4243 } | |
4244 | |
4245 /* Optimize comparisons with upper and lower bounds. */ | |
4246 if (SCALAR_INT_MODE_P (mode) | |
4247 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT | |
4248 && GET_CODE (trueop1) == CONST_INT) | |
4249 { | |
4250 int sign; | |
4251 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode); | |
4252 HOST_WIDE_INT val = INTVAL (trueop1); | |
4253 HOST_WIDE_INT mmin, mmax; | |
4254 | |
4255 if (code == GEU | |
4256 || code == LEU | |
4257 || code == GTU | |
4258 || code == LTU) | |
4259 sign = 0; | |
4260 else | |
4261 sign = 1; | |
4262 | |
4263 /* Get a reduced range if the sign bit is zero. */ | |
4264 if (nonzero <= (GET_MODE_MASK (mode) >> 1)) | |
4265 { | |
4266 mmin = 0; | |
4267 mmax = nonzero; | |
4268 } | |
4269 else | |
4270 { | |
4271 rtx mmin_rtx, mmax_rtx; | |
4272 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx); | |
4273 | |
4274 mmin = INTVAL (mmin_rtx); | |
4275 mmax = INTVAL (mmax_rtx); | |
4276 if (sign) | |
4277 { | |
4278 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode); | |
4279 | |
4280 mmin >>= (sign_copies - 1); | |
4281 mmax >>= (sign_copies - 1); | |
4282 } | |
4283 } | |
4284 | |
4285 switch (code) | |
4286 { | |
4287 /* x >= y is always true for y <= mmin, always false for y > mmax. */ | |
4288 case GEU: | |
4289 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin) | |
4290 return const_true_rtx; | |
4291 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax) | |
4292 return const0_rtx; | |
4293 break; | |
4294 case GE: | |
4295 if (val <= mmin) | |
4296 return const_true_rtx; | |
4297 if (val > mmax) | |
4298 return const0_rtx; | |
4299 break; | |
4300 | |
4301 /* x <= y is always true for y >= mmax, always false for y < mmin. */ | |
4302 case LEU: | |
4303 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax) | |
4304 return const_true_rtx; | |
4305 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin) | |
4306 return const0_rtx; | |
4307 break; | |
4308 case LE: | |
4309 if (val >= mmax) | |
4310 return const_true_rtx; | |
4311 if (val < mmin) | |
4312 return const0_rtx; | |
4313 break; | |
4314 | |
4315 case EQ: | |
4316 /* x == y is always false for y out of range. */ | |
4317 if (val < mmin || val > mmax) | |
4318 return const0_rtx; | |
4319 break; | |
4320 | |
4321 /* x > y is always false for y >= mmax, always true for y < mmin. */ | |
4322 case GTU: | |
4323 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax) | |
4324 return const0_rtx; | |
4325 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin) | |
4326 return const_true_rtx; | |
4327 break; | |
4328 case GT: | |
4329 if (val >= mmax) | |
4330 return const0_rtx; | |
4331 if (val < mmin) | |
4332 return const_true_rtx; | |
4333 break; | |
4334 | |
4335 /* x < y is always false for y <= mmin, always true for y > mmax. */ | |
4336 case LTU: | |
4337 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin) | |
4338 return const0_rtx; | |
4339 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax) | |
4340 return const_true_rtx; | |
4341 break; | |
4342 case LT: | |
4343 if (val <= mmin) | |
4344 return const0_rtx; | |
4345 if (val > mmax) | |
4346 return const_true_rtx; | |
4347 break; | |
4348 | |
4349 case NE: | |
4350 /* x != y is always true for y out of range. */ | |
4351 if (val < mmin || val > mmax) | |
4352 return const_true_rtx; | |
4353 break; | |
4354 | |
4355 default: | |
4356 break; | |
4357 } | |
4358 } | |
4359 | |
4360 /* Optimize integer comparisons with zero. */ | |
4361 if (trueop1 == const0_rtx) | |
4362 { | |
4363 /* Some addresses are known to be nonzero. We don't know | |
4364 their sign, but equality comparisons are known. */ | |
4365 if (nonzero_address_p (trueop0)) | |
4366 { | |
4367 if (code == EQ || code == LEU) | |
4368 return const0_rtx; | |
4369 if (code == NE || code == GTU) | |
4370 return const_true_rtx; | |
4371 } | |
4372 | |
4373 /* See if the first operand is an IOR with a constant. If so, we | |
4374 may be able to determine the result of this comparison. */ | |
4375 if (GET_CODE (op0) == IOR) | |
4376 { | |
4377 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1)); | |
4378 if (GET_CODE (inner_const) == CONST_INT && inner_const != const0_rtx) | |
4379 { | |
4380 int sign_bitnum = GET_MODE_BITSIZE (mode) - 1; | |
4381 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum | |
4382 && (INTVAL (inner_const) | |
4383 & ((HOST_WIDE_INT) 1 << sign_bitnum))); | |
4384 | |
4385 switch (code) | |
4386 { | |
4387 case EQ: | |
4388 case LEU: | |
4389 return const0_rtx; | |
4390 case NE: | |
4391 case GTU: | |
4392 return const_true_rtx; | |
4393 case LT: | |
4394 case LE: | |
4395 if (has_sign) | |
4396 return const_true_rtx; | |
4397 break; | |
4398 case GT: | |
4399 case GE: | |
4400 if (has_sign) | |
4401 return const0_rtx; | |
4402 break; | |
4403 default: | |
4404 break; | |
4405 } | |
4406 } | |
4407 } | |
4408 } | |
4409 | |
4410 /* Optimize comparison of ABS with zero. */ | |
4411 if (trueop1 == CONST0_RTX (mode) | |
4412 && (GET_CODE (trueop0) == ABS | |
4413 || (GET_CODE (trueop0) == FLOAT_EXTEND | |
4414 && GET_CODE (XEXP (trueop0, 0)) == ABS))) | |
4415 { | |
4416 switch (code) | |
4417 { | |
4418 case LT: | |
4419 /* Optimize abs(x) < 0.0. */ | |
4420 if (!HONOR_SNANS (mode) | |
4421 && (!INTEGRAL_MODE_P (mode) | |
4422 || (!flag_wrapv && !flag_trapv && flag_strict_overflow))) | |
4423 { | |
4424 if (INTEGRAL_MODE_P (mode) | |
4425 && (issue_strict_overflow_warning | |
4426 (WARN_STRICT_OVERFLOW_CONDITIONAL))) | |
4427 warning (OPT_Wstrict_overflow, | |
4428 ("assuming signed overflow does not occur when " | |
4429 "assuming abs (x) < 0 is false")); | |
4430 return const0_rtx; | |
4431 } | |
4432 break; | |
4433 | |
4434 case GE: | |
4435 /* Optimize abs(x) >= 0.0. */ | |
4436 if (!HONOR_NANS (mode) | |
4437 && (!INTEGRAL_MODE_P (mode) | |
4438 || (!flag_wrapv && !flag_trapv && flag_strict_overflow))) | |
4439 { | |
4440 if (INTEGRAL_MODE_P (mode) | |
4441 && (issue_strict_overflow_warning | |
4442 (WARN_STRICT_OVERFLOW_CONDITIONAL))) | |
4443 warning (OPT_Wstrict_overflow, | |
4444 ("assuming signed overflow does not occur when " | |
4445 "assuming abs (x) >= 0 is true")); | |
4446 return const_true_rtx; | |
4447 } | |
4448 break; | |
4449 | |
4450 case UNGE: | |
4451 /* Optimize ! (abs(x) < 0.0). */ | |
4452 return const_true_rtx; | |
4453 | |
4454 default: | |
4455 break; | |
4456 } | |
4457 } | |
4458 | |
4459 return 0; | |
4460 } | |
4461 | |
4462 /* Simplify CODE, an operation with result mode MODE and three operands, | |
4463 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became | |
4464 a constant. Return 0 if no simplifications is possible. */ | |
4465 | |
4466 rtx | |
4467 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode, | |
4468 enum machine_mode op0_mode, rtx op0, rtx op1, | |
4469 rtx op2) | |
4470 { | |
4471 unsigned int width = GET_MODE_BITSIZE (mode); | |
4472 | |
4473 /* VOIDmode means "infinite" precision. */ | |
4474 if (width == 0) | |
4475 width = HOST_BITS_PER_WIDE_INT; | |
4476 | |
4477 switch (code) | |
4478 { | |
4479 case SIGN_EXTRACT: | |
4480 case ZERO_EXTRACT: | |
4481 if (GET_CODE (op0) == CONST_INT | |
4482 && GET_CODE (op1) == CONST_INT | |
4483 && GET_CODE (op2) == CONST_INT | |
4484 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width) | |
4485 && width <= (unsigned) HOST_BITS_PER_WIDE_INT) | |
4486 { | |
4487 /* Extracting a bit-field from a constant */ | |
4488 HOST_WIDE_INT val = INTVAL (op0); | |
4489 | |
4490 if (BITS_BIG_ENDIAN) | |
4491 val >>= (GET_MODE_BITSIZE (op0_mode) | |
4492 - INTVAL (op2) - INTVAL (op1)); | |
4493 else | |
4494 val >>= INTVAL (op2); | |
4495 | |
4496 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1)) | |
4497 { | |
4498 /* First zero-extend. */ | |
4499 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1; | |
4500 /* If desired, propagate sign bit. */ | |
4501 if (code == SIGN_EXTRACT | |
4502 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1)))) | |
4503 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1); | |
4504 } | |
4505 | |
4506 /* Clear the bits that don't belong in our mode, | |
4507 unless they and our sign bit are all one. | |
4508 So we get either a reasonable negative value or a reasonable | |
4509 unsigned value for this mode. */ | |
4510 if (width < HOST_BITS_PER_WIDE_INT | |
4511 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1))) | |
4512 != ((HOST_WIDE_INT) (-1) << (width - 1)))) | |
4513 val &= ((HOST_WIDE_INT) 1 << width) - 1; | |
4514 | |
4515 return gen_int_mode (val, mode); | |
4516 } | |
4517 break; | |
4518 | |
4519 case IF_THEN_ELSE: | |
4520 if (GET_CODE (op0) == CONST_INT) | |
4521 return op0 != const0_rtx ? op1 : op2; | |
4522 | |
4523 /* Convert c ? a : a into "a". */ | |
4524 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0)) | |
4525 return op1; | |
4526 | |
4527 /* Convert a != b ? a : b into "a". */ | |
4528 if (GET_CODE (op0) == NE | |
4529 && ! side_effects_p (op0) | |
4530 && ! HONOR_NANS (mode) | |
4531 && ! HONOR_SIGNED_ZEROS (mode) | |
4532 && ((rtx_equal_p (XEXP (op0, 0), op1) | |
4533 && rtx_equal_p (XEXP (op0, 1), op2)) | |
4534 || (rtx_equal_p (XEXP (op0, 0), op2) | |
4535 && rtx_equal_p (XEXP (op0, 1), op1)))) | |
4536 return op1; | |
4537 | |
4538 /* Convert a == b ? a : b into "b". */ | |
4539 if (GET_CODE (op0) == EQ | |
4540 && ! side_effects_p (op0) | |
4541 && ! HONOR_NANS (mode) | |
4542 && ! HONOR_SIGNED_ZEROS (mode) | |
4543 && ((rtx_equal_p (XEXP (op0, 0), op1) | |
4544 && rtx_equal_p (XEXP (op0, 1), op2)) | |
4545 || (rtx_equal_p (XEXP (op0, 0), op2) | |
4546 && rtx_equal_p (XEXP (op0, 1), op1)))) | |
4547 return op2; | |
4548 | |
4549 if (COMPARISON_P (op0) && ! side_effects_p (op0)) | |
4550 { | |
4551 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode | |
4552 ? GET_MODE (XEXP (op0, 1)) | |
4553 : GET_MODE (XEXP (op0, 0))); | |
4554 rtx temp; | |
4555 | |
4556 /* Look for happy constants in op1 and op2. */ | |
4557 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT) | |
4558 { | |
4559 HOST_WIDE_INT t = INTVAL (op1); | |
4560 HOST_WIDE_INT f = INTVAL (op2); | |
4561 | |
4562 if (t == STORE_FLAG_VALUE && f == 0) | |
4563 code = GET_CODE (op0); | |
4564 else if (t == 0 && f == STORE_FLAG_VALUE) | |
4565 { | |
4566 enum rtx_code tmp; | |
4567 tmp = reversed_comparison_code (op0, NULL_RTX); | |
4568 if (tmp == UNKNOWN) | |
4569 break; | |
4570 code = tmp; | |
4571 } | |
4572 else | |
4573 break; | |
4574 | |
4575 return simplify_gen_relational (code, mode, cmp_mode, | |
4576 XEXP (op0, 0), XEXP (op0, 1)); | |
4577 } | |
4578 | |
4579 if (cmp_mode == VOIDmode) | |
4580 cmp_mode = op0_mode; | |
4581 temp = simplify_relational_operation (GET_CODE (op0), op0_mode, | |
4582 cmp_mode, XEXP (op0, 0), | |
4583 XEXP (op0, 1)); | |
4584 | |
4585 /* See if any simplifications were possible. */ | |
4586 if (temp) | |
4587 { | |
4588 if (GET_CODE (temp) == CONST_INT) | |
4589 return temp == const0_rtx ? op2 : op1; | |
4590 else if (temp) | |
4591 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2); | |
4592 } | |
4593 } | |
4594 break; | |
4595 | |
4596 case VEC_MERGE: | |
4597 gcc_assert (GET_MODE (op0) == mode); | |
4598 gcc_assert (GET_MODE (op1) == mode); | |
4599 gcc_assert (VECTOR_MODE_P (mode)); | |
4600 op2 = avoid_constant_pool_reference (op2); | |
4601 if (GET_CODE (op2) == CONST_INT) | |
4602 { | |
4603 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode)); | |
4604 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size); | |
4605 int mask = (1 << n_elts) - 1; | |
4606 | |
4607 if (!(INTVAL (op2) & mask)) | |
4608 return op1; | |
4609 if ((INTVAL (op2) & mask) == mask) | |
4610 return op0; | |
4611 | |
4612 op0 = avoid_constant_pool_reference (op0); | |
4613 op1 = avoid_constant_pool_reference (op1); | |
4614 if (GET_CODE (op0) == CONST_VECTOR | |
4615 && GET_CODE (op1) == CONST_VECTOR) | |
4616 { | |
4617 rtvec v = rtvec_alloc (n_elts); | |
4618 unsigned int i; | |
4619 | |
4620 for (i = 0; i < n_elts; i++) | |
4621 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i) | |
4622 ? CONST_VECTOR_ELT (op0, i) | |
4623 : CONST_VECTOR_ELT (op1, i)); | |
4624 return gen_rtx_CONST_VECTOR (mode, v); | |
4625 } | |
4626 } | |
4627 break; | |
4628 | |
4629 default: | |
4630 gcc_unreachable (); | |
4631 } | |
4632 | |
4633 return 0; | |
4634 } | |
4635 | |
4636 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED | |
4637 or CONST_VECTOR, | |
4638 returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR. | |
4639 | |
4640 Works by unpacking OP into a collection of 8-bit values | |
4641 represented as a little-endian array of 'unsigned char', selecting by BYTE, | |
4642 and then repacking them again for OUTERMODE. */ | |
4643 | |
4644 static rtx | |
4645 simplify_immed_subreg (enum machine_mode outermode, rtx op, | |
4646 enum machine_mode innermode, unsigned int byte) | |
4647 { | |
4648 /* We support up to 512-bit values (for V8DFmode). */ | |
4649 enum { | |
4650 max_bitsize = 512, | |
4651 value_bit = 8, | |
4652 value_mask = (1 << value_bit) - 1 | |
4653 }; | |
4654 unsigned char value[max_bitsize / value_bit]; | |
4655 int value_start; | |
4656 int i; | |
4657 int elem; | |
4658 | |
4659 int num_elem; | |
4660 rtx * elems; | |
4661 int elem_bitsize; | |
4662 rtx result_s; | |
4663 rtvec result_v = NULL; | |
4664 enum mode_class outer_class; | |
4665 enum machine_mode outer_submode; | |
4666 | |
4667 /* Some ports misuse CCmode. */ | |
4668 if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT) | |
4669 return op; | |
4670 | |
4671 /* We have no way to represent a complex constant at the rtl level. */ | |
4672 if (COMPLEX_MODE_P (outermode)) | |
4673 return NULL_RTX; | |
4674 | |
4675 /* Unpack the value. */ | |
4676 | |
4677 if (GET_CODE (op) == CONST_VECTOR) | |
4678 { | |
4679 num_elem = CONST_VECTOR_NUNITS (op); | |
4680 elems = &CONST_VECTOR_ELT (op, 0); | |
4681 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode)); | |
4682 } | |
4683 else | |
4684 { | |
4685 num_elem = 1; | |
4686 elems = &op; | |
4687 elem_bitsize = max_bitsize; | |
4688 } | |
4689 /* If this asserts, it is too complicated; reducing value_bit may help. */ | |
4690 gcc_assert (BITS_PER_UNIT % value_bit == 0); | |
4691 /* I don't know how to handle endianness of sub-units. */ | |
4692 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0); | |
4693 | |
4694 for (elem = 0; elem < num_elem; elem++) | |
4695 { | |
4696 unsigned char * vp; | |
4697 rtx el = elems[elem]; | |
4698 | |
4699 /* Vectors are kept in target memory order. (This is probably | |
4700 a mistake.) */ | |
4701 { | |
4702 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT; | |
4703 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize) | |
4704 / BITS_PER_UNIT); | |
4705 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte; | |
4706 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte; | |
4707 unsigned bytele = (subword_byte % UNITS_PER_WORD | |
4708 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD); | |
4709 vp = value + (bytele * BITS_PER_UNIT) / value_bit; | |
4710 } | |
4711 | |
4712 switch (GET_CODE (el)) | |
4713 { | |
4714 case CONST_INT: | |
4715 for (i = 0; | |
4716 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize; | |
4717 i += value_bit) | |
4718 *vp++ = INTVAL (el) >> i; | |
4719 /* CONST_INTs are always logically sign-extended. */ | |
4720 for (; i < elem_bitsize; i += value_bit) | |
4721 *vp++ = INTVAL (el) < 0 ? -1 : 0; | |
4722 break; | |
4723 | |
4724 case CONST_DOUBLE: | |
4725 if (GET_MODE (el) == VOIDmode) | |
4726 { | |
4727 /* If this triggers, someone should have generated a | |
4728 CONST_INT instead. */ | |
4729 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT); | |
4730 | |
4731 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit) | |
4732 *vp++ = CONST_DOUBLE_LOW (el) >> i; | |
4733 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize) | |
4734 { | |
4735 *vp++ | |
4736 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT); | |
4737 i += value_bit; | |
4738 } | |
4739 /* It shouldn't matter what's done here, so fill it with | |
4740 zero. */ | |
4741 for (; i < elem_bitsize; i += value_bit) | |
4742 *vp++ = 0; | |
4743 } | |
4744 else | |
4745 { | |
4746 long tmp[max_bitsize / 32]; | |
4747 int bitsize = GET_MODE_BITSIZE (GET_MODE (el)); | |
4748 | |
4749 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el))); | |
4750 gcc_assert (bitsize <= elem_bitsize); | |
4751 gcc_assert (bitsize % value_bit == 0); | |
4752 | |
4753 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el), | |
4754 GET_MODE (el)); | |
4755 | |
4756 /* real_to_target produces its result in words affected by | |
4757 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this, | |
4758 and use WORDS_BIG_ENDIAN instead; see the documentation | |
4759 of SUBREG in rtl.texi. */ | |
4760 for (i = 0; i < bitsize; i += value_bit) | |
4761 { | |
4762 int ibase; | |
4763 if (WORDS_BIG_ENDIAN) | |
4764 ibase = bitsize - 1 - i; | |
4765 else | |
4766 ibase = i; | |
4767 *vp++ = tmp[ibase / 32] >> i % 32; | |
4768 } | |
4769 | |
4770 /* It shouldn't matter what's done here, so fill it with | |
4771 zero. */ | |
4772 for (; i < elem_bitsize; i += value_bit) | |
4773 *vp++ = 0; | |
4774 } | |
4775 break; | |
4776 | |
4777 case CONST_FIXED: | |
4778 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT) | |
4779 { | |
4780 for (i = 0; i < elem_bitsize; i += value_bit) | |
4781 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i; | |
4782 } | |
4783 else | |
4784 { | |
4785 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit) | |
4786 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i; | |
4787 for (; i < 2 * HOST_BITS_PER_WIDE_INT && i < elem_bitsize; | |
4788 i += value_bit) | |
4789 *vp++ = CONST_FIXED_VALUE_HIGH (el) | |
4790 >> (i - HOST_BITS_PER_WIDE_INT); | |
4791 for (; i < elem_bitsize; i += value_bit) | |
4792 *vp++ = 0; | |
4793 } | |
4794 break; | |
4795 | |
4796 default: | |
4797 gcc_unreachable (); | |
4798 } | |
4799 } | |
4800 | |
4801 /* Now, pick the right byte to start with. */ | |
4802 /* Renumber BYTE so that the least-significant byte is byte 0. A special | |
4803 case is paradoxical SUBREGs, which shouldn't be adjusted since they | |
4804 will already have offset 0. */ | |
4805 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode)) | |
4806 { | |
4807 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode) | |
4808 - byte); | |
4809 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte; | |
4810 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte; | |
4811 byte = (subword_byte % UNITS_PER_WORD | |
4812 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD); | |
4813 } | |
4814 | |
4815 /* BYTE should still be inside OP. (Note that BYTE is unsigned, | |
4816 so if it's become negative it will instead be very large.) */ | |
4817 gcc_assert (byte < GET_MODE_SIZE (innermode)); | |
4818 | |
4819 /* Convert from bytes to chunks of size value_bit. */ | |
4820 value_start = byte * (BITS_PER_UNIT / value_bit); | |
4821 | |
4822 /* Re-pack the value. */ | |
4823 | |
4824 if (VECTOR_MODE_P (outermode)) | |
4825 { | |
4826 num_elem = GET_MODE_NUNITS (outermode); | |
4827 result_v = rtvec_alloc (num_elem); | |
4828 elems = &RTVEC_ELT (result_v, 0); | |
4829 outer_submode = GET_MODE_INNER (outermode); | |
4830 } | |
4831 else | |
4832 { | |
4833 num_elem = 1; | |
4834 elems = &result_s; | |
4835 outer_submode = outermode; | |
4836 } | |
4837 | |
4838 outer_class = GET_MODE_CLASS (outer_submode); | |
4839 elem_bitsize = GET_MODE_BITSIZE (outer_submode); | |
4840 | |
4841 gcc_assert (elem_bitsize % value_bit == 0); | |
4842 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize); | |
4843 | |
4844 for (elem = 0; elem < num_elem; elem++) | |
4845 { | |
4846 unsigned char *vp; | |
4847 | |
4848 /* Vectors are stored in target memory order. (This is probably | |
4849 a mistake.) */ | |
4850 { | |
4851 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT; | |
4852 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize) | |
4853 / BITS_PER_UNIT); | |
4854 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte; | |
4855 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte; | |
4856 unsigned bytele = (subword_byte % UNITS_PER_WORD | |
4857 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD); | |
4858 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit; | |
4859 } | |
4860 | |
4861 switch (outer_class) | |
4862 { | |
4863 case MODE_INT: | |
4864 case MODE_PARTIAL_INT: | |
4865 { | |
4866 unsigned HOST_WIDE_INT hi = 0, lo = 0; | |
4867 | |
4868 for (i = 0; | |
4869 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize; | |
4870 i += value_bit) | |
4871 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i; | |
4872 for (; i < elem_bitsize; i += value_bit) | |
4873 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask) | |
4874 << (i - HOST_BITS_PER_WIDE_INT)); | |
4875 | |
4876 /* immed_double_const doesn't call trunc_int_for_mode. I don't | |
4877 know why. */ | |
4878 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT) | |
4879 elems[elem] = gen_int_mode (lo, outer_submode); | |
4880 else if (elem_bitsize <= 2 * HOST_BITS_PER_WIDE_INT) | |
4881 elems[elem] = immed_double_const (lo, hi, outer_submode); | |
4882 else | |
4883 return NULL_RTX; | |
4884 } | |
4885 break; | |
4886 | |
4887 case MODE_FLOAT: | |
4888 case MODE_DECIMAL_FLOAT: | |
4889 { | |
4890 REAL_VALUE_TYPE r; | |
4891 long tmp[max_bitsize / 32]; | |
4892 | |
4893 /* real_from_target wants its input in words affected by | |
4894 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this, | |
4895 and use WORDS_BIG_ENDIAN instead; see the documentation | |
4896 of SUBREG in rtl.texi. */ | |
4897 for (i = 0; i < max_bitsize / 32; i++) | |
4898 tmp[i] = 0; | |
4899 for (i = 0; i < elem_bitsize; i += value_bit) | |
4900 { | |
4901 int ibase; | |
4902 if (WORDS_BIG_ENDIAN) | |
4903 ibase = elem_bitsize - 1 - i; | |
4904 else | |
4905 ibase = i; | |
4906 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32; | |
4907 } | |
4908 | |
4909 real_from_target (&r, tmp, outer_submode); | |
4910 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode); | |
4911 } | |
4912 break; | |
4913 | |
4914 case MODE_FRACT: | |
4915 case MODE_UFRACT: | |
4916 case MODE_ACCUM: | |
4917 case MODE_UACCUM: | |
4918 { | |
4919 FIXED_VALUE_TYPE f; | |
4920 f.data.low = 0; | |
4921 f.data.high = 0; | |
4922 f.mode = outer_submode; | |
4923 | |
4924 for (i = 0; | |
4925 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize; | |
4926 i += value_bit) | |
4927 f.data.low |= (HOST_WIDE_INT)(*vp++ & value_mask) << i; | |
4928 for (; i < elem_bitsize; i += value_bit) | |
4929 f.data.high |= ((HOST_WIDE_INT)(*vp++ & value_mask) | |
4930 << (i - HOST_BITS_PER_WIDE_INT)); | |
4931 | |
4932 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode); | |
4933 } | |
4934 break; | |
4935 | |
4936 default: | |
4937 gcc_unreachable (); | |
4938 } | |
4939 } | |
4940 if (VECTOR_MODE_P (outermode)) | |
4941 return gen_rtx_CONST_VECTOR (outermode, result_v); | |
4942 else | |
4943 return result_s; | |
4944 } | |
4945 | |
4946 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE) | |
4947 Return 0 if no simplifications are possible. */ | |
4948 rtx | |
4949 simplify_subreg (enum machine_mode outermode, rtx op, | |
4950 enum machine_mode innermode, unsigned int byte) | |
4951 { | |
4952 /* Little bit of sanity checking. */ | |
4953 gcc_assert (innermode != VOIDmode); | |
4954 gcc_assert (outermode != VOIDmode); | |
4955 gcc_assert (innermode != BLKmode); | |
4956 gcc_assert (outermode != BLKmode); | |
4957 | |
4958 gcc_assert (GET_MODE (op) == innermode | |
4959 || GET_MODE (op) == VOIDmode); | |
4960 | |
4961 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0); | |
4962 gcc_assert (byte < GET_MODE_SIZE (innermode)); | |
4963 | |
4964 if (outermode == innermode && !byte) | |
4965 return op; | |
4966 | |
4967 if (GET_CODE (op) == CONST_INT | |
4968 || GET_CODE (op) == CONST_DOUBLE | |
4969 || GET_CODE (op) == CONST_FIXED | |
4970 || GET_CODE (op) == CONST_VECTOR) | |
4971 return simplify_immed_subreg (outermode, op, innermode, byte); | |
4972 | |
4973 /* Changing mode twice with SUBREG => just change it once, | |
4974 or not at all if changing back op starting mode. */ | |
4975 if (GET_CODE (op) == SUBREG) | |
4976 { | |
4977 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op)); | |
4978 int final_offset = byte + SUBREG_BYTE (op); | |
4979 rtx newx; | |
4980 | |
4981 if (outermode == innermostmode | |
4982 && byte == 0 && SUBREG_BYTE (op) == 0) | |
4983 return SUBREG_REG (op); | |
4984 | |
4985 /* The SUBREG_BYTE represents offset, as if the value were stored | |
4986 in memory. Irritating exception is paradoxical subreg, where | |
4987 we define SUBREG_BYTE to be 0. On big endian machines, this | |
4988 value should be negative. For a moment, undo this exception. */ | |
4989 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode)) | |
4990 { | |
4991 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)); | |
4992 if (WORDS_BIG_ENDIAN) | |
4993 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD; | |
4994 if (BYTES_BIG_ENDIAN) | |
4995 final_offset += difference % UNITS_PER_WORD; | |
4996 } | |
4997 if (SUBREG_BYTE (op) == 0 | |
4998 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode)) | |
4999 { | |
5000 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode)); | |
5001 if (WORDS_BIG_ENDIAN) | |
5002 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD; | |
5003 if (BYTES_BIG_ENDIAN) | |
5004 final_offset += difference % UNITS_PER_WORD; | |
5005 } | |
5006 | |
5007 /* See whether resulting subreg will be paradoxical. */ | |
5008 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode)) | |
5009 { | |
5010 /* In nonparadoxical subregs we can't handle negative offsets. */ | |
5011 if (final_offset < 0) | |
5012 return NULL_RTX; | |
5013 /* Bail out in case resulting subreg would be incorrect. */ | |
5014 if (final_offset % GET_MODE_SIZE (outermode) | |
5015 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode)) | |
5016 return NULL_RTX; | |
5017 } | |
5018 else | |
5019 { | |
5020 int offset = 0; | |
5021 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode)); | |
5022 | |
5023 /* In paradoxical subreg, see if we are still looking on lower part. | |
5024 If so, our SUBREG_BYTE will be 0. */ | |
5025 if (WORDS_BIG_ENDIAN) | |
5026 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD; | |
5027 if (BYTES_BIG_ENDIAN) | |
5028 offset += difference % UNITS_PER_WORD; | |
5029 if (offset == final_offset) | |
5030 final_offset = 0; | |
5031 else | |
5032 return NULL_RTX; | |
5033 } | |
5034 | |
5035 /* Recurse for further possible simplifications. */ | |
5036 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode, | |
5037 final_offset); | |
5038 if (newx) | |
5039 return newx; | |
5040 if (validate_subreg (outermode, innermostmode, | |
5041 SUBREG_REG (op), final_offset)) | |
5042 { | |
5043 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset); | |
5044 if (SUBREG_PROMOTED_VAR_P (op) | |
5045 && SUBREG_PROMOTED_UNSIGNED_P (op) >= 0 | |
5046 && GET_MODE_CLASS (outermode) == MODE_INT | |
5047 && IN_RANGE (GET_MODE_SIZE (outermode), | |
5048 GET_MODE_SIZE (innermode), | |
5049 GET_MODE_SIZE (innermostmode)) | |
5050 && subreg_lowpart_p (newx)) | |
5051 { | |
5052 SUBREG_PROMOTED_VAR_P (newx) = 1; | |
5053 SUBREG_PROMOTED_UNSIGNED_SET | |
5054 (newx, SUBREG_PROMOTED_UNSIGNED_P (op)); | |
5055 } | |
5056 return newx; | |
5057 } | |
5058 return NULL_RTX; | |
5059 } | |
5060 | |
5061 /* Merge implicit and explicit truncations. */ | |
5062 | |
5063 if (GET_CODE (op) == TRUNCATE | |
5064 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (innermode) | |
5065 && subreg_lowpart_offset (outermode, innermode) == byte) | |
5066 return simplify_gen_unary (TRUNCATE, outermode, XEXP (op, 0), | |
5067 GET_MODE (XEXP (op, 0))); | |
5068 | |
5069 /* SUBREG of a hard register => just change the register number | |
5070 and/or mode. If the hard register is not valid in that mode, | |
5071 suppress this simplification. If the hard register is the stack, | |
5072 frame, or argument pointer, leave this as a SUBREG. */ | |
5073 | |
5074 if (REG_P (op) && HARD_REGISTER_P (op)) | |
5075 { | |
5076 unsigned int regno, final_regno; | |
5077 | |
5078 regno = REGNO (op); | |
5079 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode); | |
5080 if (HARD_REGISTER_NUM_P (final_regno)) | |
5081 { | |
5082 rtx x; | |
5083 int final_offset = byte; | |
5084 | |
5085 /* Adjust offset for paradoxical subregs. */ | |
5086 if (byte == 0 | |
5087 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode)) | |
5088 { | |
5089 int difference = (GET_MODE_SIZE (innermode) | |
5090 - GET_MODE_SIZE (outermode)); | |
5091 if (WORDS_BIG_ENDIAN) | |
5092 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD; | |
5093 if (BYTES_BIG_ENDIAN) | |
5094 final_offset += difference % UNITS_PER_WORD; | |
5095 } | |
5096 | |
5097 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset); | |
5098 | |
5099 /* Propagate original regno. We don't have any way to specify | |
5100 the offset inside original regno, so do so only for lowpart. | |
5101 The information is used only by alias analysis that can not | |
5102 grog partial register anyway. */ | |
5103 | |
5104 if (subreg_lowpart_offset (outermode, innermode) == byte) | |
5105 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op); | |
5106 return x; | |
5107 } | |
5108 } | |
5109 | |
5110 /* If we have a SUBREG of a register that we are replacing and we are | |
5111 replacing it with a MEM, make a new MEM and try replacing the | |
5112 SUBREG with it. Don't do this if the MEM has a mode-dependent address | |
5113 or if we would be widening it. */ | |
5114 | |
5115 if (MEM_P (op) | |
5116 && ! mode_dependent_address_p (XEXP (op, 0)) | |
5117 /* Allow splitting of volatile memory references in case we don't | |
5118 have instruction to move the whole thing. */ | |
5119 && (! MEM_VOLATILE_P (op) | |
5120 || ! have_insn_for (SET, innermode)) | |
5121 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op))) | |
5122 return adjust_address_nv (op, outermode, byte); | |
5123 | |
5124 /* Handle complex values represented as CONCAT | |
5125 of real and imaginary part. */ | |
5126 if (GET_CODE (op) == CONCAT) | |
5127 { | |
5128 unsigned int part_size, final_offset; | |
5129 rtx part, res; | |
5130 | |
5131 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0))); | |
5132 if (byte < part_size) | |
5133 { | |
5134 part = XEXP (op, 0); | |
5135 final_offset = byte; | |
5136 } | |
5137 else | |
5138 { | |
5139 part = XEXP (op, 1); | |
5140 final_offset = byte - part_size; | |
5141 } | |
5142 | |
5143 if (final_offset + GET_MODE_SIZE (outermode) > part_size) | |
5144 return NULL_RTX; | |
5145 | |
5146 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset); | |
5147 if (res) | |
5148 return res; | |
5149 if (validate_subreg (outermode, GET_MODE (part), part, final_offset)) | |
5150 return gen_rtx_SUBREG (outermode, part, final_offset); | |
5151 return NULL_RTX; | |
5152 } | |
5153 | |
5154 /* Optimize SUBREG truncations of zero and sign extended values. */ | |
5155 if ((GET_CODE (op) == ZERO_EXTEND | |
5156 || GET_CODE (op) == SIGN_EXTEND) | |
5157 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)) | |
5158 { | |
5159 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte); | |
5160 | |
5161 /* If we're requesting the lowpart of a zero or sign extension, | |
5162 there are three possibilities. If the outermode is the same | |
5163 as the origmode, we can omit both the extension and the subreg. | |
5164 If the outermode is not larger than the origmode, we can apply | |
5165 the truncation without the extension. Finally, if the outermode | |
5166 is larger than the origmode, but both are integer modes, we | |
5167 can just extend to the appropriate mode. */ | |
5168 if (bitpos == 0) | |
5169 { | |
5170 enum machine_mode origmode = GET_MODE (XEXP (op, 0)); | |
5171 if (outermode == origmode) | |
5172 return XEXP (op, 0); | |
5173 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode)) | |
5174 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode, | |
5175 subreg_lowpart_offset (outermode, | |
5176 origmode)); | |
5177 if (SCALAR_INT_MODE_P (outermode)) | |
5178 return simplify_gen_unary (GET_CODE (op), outermode, | |
5179 XEXP (op, 0), origmode); | |
5180 } | |
5181 | |
5182 /* A SUBREG resulting from a zero extension may fold to zero if | |
5183 it extracts higher bits that the ZERO_EXTEND's source bits. */ | |
5184 if (GET_CODE (op) == ZERO_EXTEND | |
5185 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))) | |
5186 return CONST0_RTX (outermode); | |
5187 } | |
5188 | |
5189 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into | |
5190 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and | |
5191 the outer subreg is effectively a truncation to the original mode. */ | |
5192 if ((GET_CODE (op) == LSHIFTRT | |
5193 || GET_CODE (op) == ASHIFTRT) | |
5194 && SCALAR_INT_MODE_P (outermode) | |
5195 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE | |
5196 to avoid the possibility that an outer LSHIFTRT shifts by more | |
5197 than the sign extension's sign_bit_copies and introduces zeros | |
5198 into the high bits of the result. */ | |
5199 && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode) | |
5200 && GET_CODE (XEXP (op, 1)) == CONST_INT | |
5201 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND | |
5202 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode | |
5203 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode) | |
5204 && subreg_lsb_1 (outermode, innermode, byte) == 0) | |
5205 return simplify_gen_binary (ASHIFTRT, outermode, | |
5206 XEXP (XEXP (op, 0), 0), XEXP (op, 1)); | |
5207 | |
5208 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into | |
5209 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and | |
5210 the outer subreg is effectively a truncation to the original mode. */ | |
5211 if ((GET_CODE (op) == LSHIFTRT | |
5212 || GET_CODE (op) == ASHIFTRT) | |
5213 && SCALAR_INT_MODE_P (outermode) | |
5214 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode) | |
5215 && GET_CODE (XEXP (op, 1)) == CONST_INT | |
5216 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND | |
5217 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode | |
5218 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode) | |
5219 && subreg_lsb_1 (outermode, innermode, byte) == 0) | |
5220 return simplify_gen_binary (LSHIFTRT, outermode, | |
5221 XEXP (XEXP (op, 0), 0), XEXP (op, 1)); | |
5222 | |
5223 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into | |
5224 to (ashift:QI (x:QI) C), where C is a suitable small constant and | |
5225 the outer subreg is effectively a truncation to the original mode. */ | |
5226 if (GET_CODE (op) == ASHIFT | |
5227 && SCALAR_INT_MODE_P (outermode) | |
5228 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode) | |
5229 && GET_CODE (XEXP (op, 1)) == CONST_INT | |
5230 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND | |
5231 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND) | |
5232 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode | |
5233 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode) | |
5234 && subreg_lsb_1 (outermode, innermode, byte) == 0) | |
5235 return simplify_gen_binary (ASHIFT, outermode, | |
5236 XEXP (XEXP (op, 0), 0), XEXP (op, 1)); | |
5237 | |
5238 /* Recognize a word extraction from a multi-word subreg. */ | |
5239 if ((GET_CODE (op) == LSHIFTRT | |
5240 || GET_CODE (op) == ASHIFTRT) | |
5241 && SCALAR_INT_MODE_P (outermode) | |
5242 && GET_MODE_BITSIZE (outermode) >= BITS_PER_WORD | |
5243 && GET_MODE_BITSIZE (innermode) >= (2 * GET_MODE_BITSIZE (outermode)) | |
5244 && GET_CODE (XEXP (op, 1)) == CONST_INT | |
5245 && (INTVAL (XEXP (op, 1)) & (GET_MODE_BITSIZE (outermode) - 1)) == 0 | |
5246 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (innermode) | |
5247 && byte == subreg_lowpart_offset (outermode, innermode)) | |
5248 { | |
5249 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT; | |
5250 return simplify_gen_subreg (outermode, XEXP (op, 0), innermode, | |
5251 (WORDS_BIG_ENDIAN | |
5252 ? byte - shifted_bytes : byte + shifted_bytes)); | |
5253 } | |
5254 | |
5255 return NULL_RTX; | |
5256 } | |
5257 | |
5258 /* Make a SUBREG operation or equivalent if it folds. */ | |
5259 | |
5260 rtx | |
5261 simplify_gen_subreg (enum machine_mode outermode, rtx op, | |
5262 enum machine_mode innermode, unsigned int byte) | |
5263 { | |
5264 rtx newx; | |
5265 | |
5266 newx = simplify_subreg (outermode, op, innermode, byte); | |
5267 if (newx) | |
5268 return newx; | |
5269 | |
5270 if (GET_CODE (op) == SUBREG | |
5271 || GET_CODE (op) == CONCAT | |
5272 || GET_MODE (op) == VOIDmode) | |
5273 return NULL_RTX; | |
5274 | |
5275 if (validate_subreg (outermode, innermode, op, byte)) | |
5276 return gen_rtx_SUBREG (outermode, op, byte); | |
5277 | |
5278 return NULL_RTX; | |
5279 } | |
5280 | |
5281 /* Simplify X, an rtx expression. | |
5282 | |
5283 Return the simplified expression or NULL if no simplifications | |
5284 were possible. | |
5285 | |
5286 This is the preferred entry point into the simplification routines; | |
5287 however, we still allow passes to call the more specific routines. | |
5288 | |
5289 Right now GCC has three (yes, three) major bodies of RTL simplification | |
5290 code that need to be unified. | |
5291 | |
5292 1. fold_rtx in cse.c. This code uses various CSE specific | |
5293 information to aid in RTL simplification. | |
5294 | |
5295 2. simplify_rtx in combine.c. Similar to fold_rtx, except that | |
5296 it uses combine specific information to aid in RTL | |
5297 simplification. | |
5298 | |
5299 3. The routines in this file. | |
5300 | |
5301 | |
5302 Long term we want to only have one body of simplification code; to | |
5303 get to that state I recommend the following steps: | |
5304 | |
5305 1. Pour over fold_rtx & simplify_rtx and move any simplifications | |
5306 which are not pass dependent state into these routines. | |
5307 | |
5308 2. As code is moved by #1, change fold_rtx & simplify_rtx to | |
5309 use this routine whenever possible. | |
5310 | |
5311 3. Allow for pass dependent state to be provided to these | |
5312 routines and add simplifications based on the pass dependent | |
5313 state. Remove code from cse.c & combine.c that becomes | |
5314 redundant/dead. | |
5315 | |
5316 It will take time, but ultimately the compiler will be easier to | |
5317 maintain and improve. It's totally silly that when we add a | |
5318 simplification that it needs to be added to 4 places (3 for RTL | |
5319 simplification and 1 for tree simplification. */ | |
5320 | |
5321 rtx | |
5322 simplify_rtx (const_rtx x) | |
5323 { | |
5324 const enum rtx_code code = GET_CODE (x); | |
5325 const enum machine_mode mode = GET_MODE (x); | |
5326 | |
5327 switch (GET_RTX_CLASS (code)) | |
5328 { | |
5329 case RTX_UNARY: | |
5330 return simplify_unary_operation (code, mode, | |
5331 XEXP (x, 0), GET_MODE (XEXP (x, 0))); | |
5332 case RTX_COMM_ARITH: | |
5333 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1))) | |
5334 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0)); | |
5335 | |
5336 /* Fall through.... */ | |
5337 | |
5338 case RTX_BIN_ARITH: | |
5339 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1)); | |
5340 | |
5341 case RTX_TERNARY: | |
5342 case RTX_BITFIELD_OPS: | |
5343 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)), | |
5344 XEXP (x, 0), XEXP (x, 1), | |
5345 XEXP (x, 2)); | |
5346 | |
5347 case RTX_COMPARE: | |
5348 case RTX_COMM_COMPARE: | |
5349 return simplify_relational_operation (code, mode, | |
5350 ((GET_MODE (XEXP (x, 0)) | |
5351 != VOIDmode) | |
5352 ? GET_MODE (XEXP (x, 0)) | |
5353 : GET_MODE (XEXP (x, 1))), | |
5354 XEXP (x, 0), | |
5355 XEXP (x, 1)); | |
5356 | |
5357 case RTX_EXTRA: | |
5358 if (code == SUBREG) | |
5359 return simplify_subreg (mode, SUBREG_REG (x), | |
5360 GET_MODE (SUBREG_REG (x)), | |
5361 SUBREG_BYTE (x)); | |
5362 break; | |
5363 | |
5364 case RTX_OBJ: | |
5365 if (code == LO_SUM) | |
5366 { | |
5367 /* Convert (lo_sum (high FOO) FOO) to FOO. */ | |
5368 if (GET_CODE (XEXP (x, 0)) == HIGH | |
5369 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1))) | |
5370 return XEXP (x, 1); | |
5371 } | |
5372 break; | |
5373 | |
5374 default: | |
5375 break; | |
5376 } | |
5377 return NULL; | |
5378 } |