1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to the Free
20 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
26 #include "coretypes.h"
32 #include "hard-reg-set.h"
35 #include "insn-config.h"
44 /* Simplification and canonicalization of RTL. */
46 /* Much code operates on (low, high) pairs; the low value is an
47 unsigned wide int, the high value a signed wide int. We
48 occasionally need to sign extend from low to high as if low were a
50 #define HWI_SIGN_EXTEND(low) \
51 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
53 static rtx neg_const_int (enum machine_mode, rtx);
54 static bool plus_minus_operand_p (rtx);
55 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
56 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
57 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
59 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
61 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
62 enum machine_mode, rtx, rtx);
63 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
64 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
67 /* Negate a CONST_INT rtx, truncating (because a conversion from a
68 maximally negative number can overflow). */
70 neg_const_int (enum machine_mode mode, rtx i)
72 return gen_int_mode (- INTVAL (i), mode);
75 /* Test whether expression, X, is an immediate constant that represents
76 the most significant bit of machine mode MODE. */
79 mode_signbit_p (enum machine_mode mode, rtx x)
81 unsigned HOST_WIDE_INT val;
84 if (GET_MODE_CLASS (mode) != MODE_INT)
87 width = GET_MODE_BITSIZE (mode);
91 if (width <= HOST_BITS_PER_WIDE_INT
92 && GET_CODE (x) == CONST_INT)
94 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
95 && GET_CODE (x) == CONST_DOUBLE
96 && CONST_DOUBLE_LOW (x) == 0)
98 val = CONST_DOUBLE_HIGH (x);
99 width -= HOST_BITS_PER_WIDE_INT;
104 if (width < HOST_BITS_PER_WIDE_INT)
105 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
106 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
109 /* Make a binary operation by properly ordering the operands and
110 seeing if the expression folds. */
113 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
118 /* If this simplifies, do it. */
119 tem = simplify_binary_operation (code, mode, op0, op1);
123 /* Put complex operands first and constants second if commutative. */
124 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
125 && swap_commutative_operands_p (op0, op1))
126 tem = op0, op0 = op1, op1 = tem;
128 return gen_rtx_fmt_ee (code, mode, op0, op1);
131 /* If X is a MEM referencing the constant pool, return the real value.
132 Otherwise return X. */
134 avoid_constant_pool_reference (rtx x)
137 enum machine_mode cmode;
138 HOST_WIDE_INT offset = 0;
140 switch (GET_CODE (x))
146 /* Handle float extensions of constant pool references. */
148 c = avoid_constant_pool_reference (tmp);
149 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
153 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
154 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
164 /* Call target hook to avoid the effects of -fpic etc.... */
165 addr = targetm.delegitimize_address (addr);
167 /* Split the address into a base and integer offset. */
168 if (GET_CODE (addr) == CONST
169 && GET_CODE (XEXP (addr, 0)) == PLUS
170 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
172 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
173 addr = XEXP (XEXP (addr, 0), 0);
176 if (GET_CODE (addr) == LO_SUM)
177 addr = XEXP (addr, 1);
179 /* If this is a constant pool reference, we can turn it into its
180 constant and hope that simplifications happen. */
181 if (GET_CODE (addr) == SYMBOL_REF
182 && CONSTANT_POOL_ADDRESS_P (addr))
184 c = get_pool_constant (addr);
185 cmode = get_pool_mode (addr);
187 /* If we're accessing the constant in a different mode than it was
188 originally stored, attempt to fix that up via subreg simplifications.
189 If that fails we have no choice but to return the original memory. */
190 if (offset != 0 || cmode != GET_MODE (x))
192 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
193 if (tem && CONSTANT_P (tem))
203 /* Return true if X is a MEM referencing the constant pool. */
206 constant_pool_reference_p (rtx x)
208 return avoid_constant_pool_reference (x) != x;
211 /* Make a unary operation by first seeing if it folds and otherwise making
212 the specified operation. */
215 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
216 enum machine_mode op_mode)
220 /* If this simplifies, use it. */
221 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
224 return gen_rtx_fmt_e (code, mode, op);
227 /* Likewise for ternary operations. */
230 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
231 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
235 /* If this simplifies, use it. */
236 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
240 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
243 /* Likewise, for relational operations.
244 CMP_MODE specifies mode comparison is done in. */
247 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
248 enum machine_mode cmp_mode, rtx op0, rtx op1)
252 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
256 return gen_rtx_fmt_ee (code, mode, op0, op1);
259 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
260 resulting RTX. Return a new RTX which is as simplified as possible. */
263 simplify_replace_rtx (rtx x, rtx old_rtx, rtx new_rtx)
265 enum rtx_code code = GET_CODE (x);
266 enum machine_mode mode = GET_MODE (x);
267 enum machine_mode op_mode;
270 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
271 to build a new expression substituting recursively. If we can't do
272 anything, return our input. */
277 switch (GET_RTX_CLASS (code))
281 op_mode = GET_MODE (op0);
282 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
283 if (op0 == XEXP (x, 0))
285 return simplify_gen_unary (code, mode, op0, op_mode);
289 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
290 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
291 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
293 return simplify_gen_binary (code, mode, op0, op1);
296 case RTX_COMM_COMPARE:
299 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
300 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
301 op1 = simplify_replace_rtx (op1, old_rtx, new_rtx);
302 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
304 return simplify_gen_relational (code, mode, op_mode, op0, op1);
307 case RTX_BITFIELD_OPS:
309 op_mode = GET_MODE (op0);
310 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
311 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
312 op2 = simplify_replace_rtx (XEXP (x, 2), old_rtx, new_rtx);
313 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
315 if (op_mode == VOIDmode)
316 op_mode = GET_MODE (op0);
317 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
320 /* The only case we try to handle is a SUBREG. */
323 op0 = simplify_replace_rtx (SUBREG_REG (x), old_rtx, new_rtx);
324 if (op0 == SUBREG_REG (x))
326 op0 = simplify_gen_subreg (GET_MODE (x), op0,
327 GET_MODE (SUBREG_REG (x)),
329 return op0 ? op0 : x;
336 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
337 if (op0 == XEXP (x, 0))
339 return replace_equiv_address_nv (x, op0);
341 else if (code == LO_SUM)
343 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
344 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
346 /* (lo_sum (high x) x) -> x */
347 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
350 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
352 return gen_rtx_LO_SUM (mode, op0, op1);
354 else if (code == REG)
356 if (rtx_equal_p (x, old_rtx))
367 /* Try to simplify a unary operation CODE whose output mode is to be
368 MODE with input operand OP whose mode was originally OP_MODE.
369 Return zero if no simplification can be made. */
371 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
372 rtx op, enum machine_mode op_mode)
376 if (GET_CODE (op) == CONST)
379 trueop = avoid_constant_pool_reference (op);
381 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
385 return simplify_unary_operation_1 (code, mode, op);
388 /* Perform some simplifications we can do even if the operands
391 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
393 enum rtx_code reversed;
399 /* (not (not X)) == X. */
400 if (GET_CODE (op) == NOT)
403 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
404 comparison is all ones. */
405 if (COMPARISON_P (op)
406 && (mode == BImode || STORE_FLAG_VALUE == -1)
407 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
408 return simplify_gen_relational (reversed, mode, VOIDmode,
409 XEXP (op, 0), XEXP (op, 1));
411 /* (not (plus X -1)) can become (neg X). */
412 if (GET_CODE (op) == PLUS
413 && XEXP (op, 1) == constm1_rtx)
414 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
416 /* Similarly, (not (neg X)) is (plus X -1). */
417 if (GET_CODE (op) == NEG)
418 return plus_constant (XEXP (op, 0), -1);
420 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
421 if (GET_CODE (op) == XOR
422 && GET_CODE (XEXP (op, 1)) == CONST_INT
423 && (temp = simplify_unary_operation (NOT, mode,
424 XEXP (op, 1), mode)) != 0)
425 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
427 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
428 if (GET_CODE (op) == PLUS
429 && GET_CODE (XEXP (op, 1)) == CONST_INT
430 && mode_signbit_p (mode, XEXP (op, 1))
431 && (temp = simplify_unary_operation (NOT, mode,
432 XEXP (op, 1), mode)) != 0)
433 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
436 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
437 operands other than 1, but that is not valid. We could do a
438 similar simplification for (not (lshiftrt C X)) where C is
439 just the sign bit, but this doesn't seem common enough to
441 if (GET_CODE (op) == ASHIFT
442 && XEXP (op, 0) == const1_rtx)
444 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
445 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
448 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
449 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
450 so we can perform the above simplification. */
452 if (STORE_FLAG_VALUE == -1
453 && GET_CODE (op) == ASHIFTRT
454 && GET_CODE (XEXP (op, 1)) == CONST_INT
455 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
456 return simplify_gen_relational (GE, mode, VOIDmode,
457 XEXP (op, 0), const0_rtx);
460 if (GET_CODE (op) == SUBREG
461 && subreg_lowpart_p (op)
462 && (GET_MODE_SIZE (GET_MODE (op))
463 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
464 && GET_CODE (SUBREG_REG (op)) == ASHIFT
465 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
467 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
470 x = gen_rtx_ROTATE (inner_mode,
471 simplify_gen_unary (NOT, inner_mode, const1_rtx,
473 XEXP (SUBREG_REG (op), 1));
474 return rtl_hooks.gen_lowpart_no_emit (mode, x);
477 /* Apply De Morgan's laws to reduce number of patterns for machines
478 with negating logical insns (and-not, nand, etc.). If result has
479 only one NOT, put it first, since that is how the patterns are
482 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
484 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
485 enum machine_mode op_mode;
487 op_mode = GET_MODE (in1);
488 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
490 op_mode = GET_MODE (in2);
491 if (op_mode == VOIDmode)
493 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
495 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
498 in2 = in1; in1 = tem;
501 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
507 /* (neg (neg X)) == X. */
508 if (GET_CODE (op) == NEG)
511 /* (neg (plus X 1)) can become (not X). */
512 if (GET_CODE (op) == PLUS
513 && XEXP (op, 1) == const1_rtx)
514 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
516 /* Similarly, (neg (not X)) is (plus X 1). */
517 if (GET_CODE (op) == NOT)
518 return plus_constant (XEXP (op, 0), 1);
520 /* (neg (minus X Y)) can become (minus Y X). This transformation
521 isn't safe for modes with signed zeros, since if X and Y are
522 both +0, (minus Y X) is the same as (minus X Y). If the
523 rounding mode is towards +infinity (or -infinity) then the two
524 expressions will be rounded differently. */
525 if (GET_CODE (op) == MINUS
526 && !HONOR_SIGNED_ZEROS (mode)
527 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
528 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
530 if (GET_CODE (op) == PLUS
531 && !HONOR_SIGNED_ZEROS (mode)
532 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
534 /* (neg (plus A C)) is simplified to (minus -C A). */
535 if (GET_CODE (XEXP (op, 1)) == CONST_INT
536 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
538 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
540 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
543 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
544 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
545 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
548 /* (neg (mult A B)) becomes (mult (neg A) B).
549 This works even for floating-point values. */
550 if (GET_CODE (op) == MULT
551 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
553 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
554 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
557 /* NEG commutes with ASHIFT since it is multiplication. Only do
558 this if we can then eliminate the NEG (e.g., if the operand
560 if (GET_CODE (op) == ASHIFT)
562 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
564 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
567 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
568 C is equal to the width of MODE minus 1. */
569 if (GET_CODE (op) == ASHIFTRT
570 && GET_CODE (XEXP (op, 1)) == CONST_INT
571 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
572 return simplify_gen_binary (LSHIFTRT, mode,
573 XEXP (op, 0), XEXP (op, 1));
575 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
576 C is equal to the width of MODE minus 1. */
577 if (GET_CODE (op) == LSHIFTRT
578 && GET_CODE (XEXP (op, 1)) == CONST_INT
579 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
580 return simplify_gen_binary (ASHIFTRT, mode,
581 XEXP (op, 0), XEXP (op, 1));
583 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
584 if (GET_CODE (op) == XOR
585 && XEXP (op, 1) == const1_rtx
586 && nonzero_bits (XEXP (op, 0), mode) == 1)
587 return plus_constant (XEXP (op, 0), -1);
589 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
590 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
591 if (GET_CODE (op) == LT
592 && XEXP (op, 1) == const0_rtx
593 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
595 enum machine_mode inner = GET_MODE (XEXP (op, 0));
596 int isize = GET_MODE_BITSIZE (inner);
597 if (STORE_FLAG_VALUE == 1)
599 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
600 GEN_INT (isize - 1));
603 if (GET_MODE_BITSIZE (mode) > isize)
604 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
605 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
607 else if (STORE_FLAG_VALUE == -1)
609 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
610 GEN_INT (isize - 1));
613 if (GET_MODE_BITSIZE (mode) > isize)
614 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
615 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
621 /* We can't handle truncation to a partial integer mode here
622 because we don't know the real bitsize of the partial
624 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
627 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
628 if ((GET_CODE (op) == SIGN_EXTEND
629 || GET_CODE (op) == ZERO_EXTEND)
630 && GET_MODE (XEXP (op, 0)) == mode)
633 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
634 (OP:SI foo:SI) if OP is NEG or ABS. */
635 if ((GET_CODE (op) == ABS
636 || GET_CODE (op) == NEG)
637 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
638 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
639 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
640 return simplify_gen_unary (GET_CODE (op), mode,
641 XEXP (XEXP (op, 0), 0), mode);
643 /* (truncate:A (subreg:B (truncate:C X) 0)) is
645 if (GET_CODE (op) == SUBREG
646 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
647 && subreg_lowpart_p (op))
648 return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
649 GET_MODE (XEXP (SUBREG_REG (op), 0)));
651 /* If we know that the value is already truncated, we can
652 replace the TRUNCATE with a SUBREG. Note that this is also
653 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
654 modes we just have to apply a different definition for
655 truncation. But don't do this for an (LSHIFTRT (MULT ...))
656 since this will cause problems with the umulXi3_highpart
658 if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
659 GET_MODE_BITSIZE (GET_MODE (op)))
660 ? (num_sign_bit_copies (op, GET_MODE (op))
661 > (unsigned int) (GET_MODE_BITSIZE (GET_MODE (op))
662 - GET_MODE_BITSIZE (mode)))
663 : truncated_to_mode (mode, op))
664 && ! (GET_CODE (op) == LSHIFTRT
665 && GET_CODE (XEXP (op, 0)) == MULT))
666 return rtl_hooks.gen_lowpart_no_emit (mode, op);
668 /* A truncate of a comparison can be replaced with a subreg if
669 STORE_FLAG_VALUE permits. This is like the previous test,
670 but it works even if the comparison is done in a mode larger
671 than HOST_BITS_PER_WIDE_INT. */
672 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
674 && ((HOST_WIDE_INT) STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
675 return rtl_hooks.gen_lowpart_no_emit (mode, op);
679 if (DECIMAL_FLOAT_MODE_P (mode))
682 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
683 if (GET_CODE (op) == FLOAT_EXTEND
684 && GET_MODE (XEXP (op, 0)) == mode)
687 /* (float_truncate:SF (float_truncate:DF foo:XF))
688 = (float_truncate:SF foo:XF).
689 This may eliminate double rounding, so it is unsafe.
691 (float_truncate:SF (float_extend:XF foo:DF))
692 = (float_truncate:SF foo:DF).
694 (float_truncate:DF (float_extend:XF foo:SF))
695 = (float_extend:SF foo:DF). */
696 if ((GET_CODE (op) == FLOAT_TRUNCATE
697 && flag_unsafe_math_optimizations)
698 || GET_CODE (op) == FLOAT_EXTEND)
699 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
701 > GET_MODE_SIZE (mode)
702 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
706 /* (float_truncate (float x)) is (float x) */
707 if (GET_CODE (op) == FLOAT
708 && (flag_unsafe_math_optimizations
709 || ((unsigned)significand_size (GET_MODE (op))
710 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
711 - num_sign_bit_copies (XEXP (op, 0),
712 GET_MODE (XEXP (op, 0)))))))
713 return simplify_gen_unary (FLOAT, mode,
715 GET_MODE (XEXP (op, 0)));
717 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
718 (OP:SF foo:SF) if OP is NEG or ABS. */
719 if ((GET_CODE (op) == ABS
720 || GET_CODE (op) == NEG)
721 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
722 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
723 return simplify_gen_unary (GET_CODE (op), mode,
724 XEXP (XEXP (op, 0), 0), mode);
726 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
727 is (float_truncate:SF x). */
728 if (GET_CODE (op) == SUBREG
729 && subreg_lowpart_p (op)
730 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
731 return SUBREG_REG (op);
735 if (DECIMAL_FLOAT_MODE_P (mode))
738 /* (float_extend (float_extend x)) is (float_extend x)
740 (float_extend (float x)) is (float x) assuming that double
741 rounding can't happen.
743 if (GET_CODE (op) == FLOAT_EXTEND
744 || (GET_CODE (op) == FLOAT
745 && ((unsigned)significand_size (GET_MODE (op))
746 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
747 - num_sign_bit_copies (XEXP (op, 0),
748 GET_MODE (XEXP (op, 0)))))))
749 return simplify_gen_unary (GET_CODE (op), mode,
751 GET_MODE (XEXP (op, 0)));
756 /* (abs (neg <foo>)) -> (abs <foo>) */
757 if (GET_CODE (op) == NEG)
758 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
759 GET_MODE (XEXP (op, 0)));
761 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
763 if (GET_MODE (op) == VOIDmode)
766 /* If operand is something known to be positive, ignore the ABS. */
767 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
768 || ((GET_MODE_BITSIZE (GET_MODE (op))
769 <= HOST_BITS_PER_WIDE_INT)
770 && ((nonzero_bits (op, GET_MODE (op))
772 << (GET_MODE_BITSIZE (GET_MODE (op)) - 1)))
776 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
777 if (num_sign_bit_copies (op, mode) == GET_MODE_BITSIZE (mode))
778 return gen_rtx_NEG (mode, op);
783 /* (ffs (*_extend <X>)) = (ffs <X>) */
784 if (GET_CODE (op) == SIGN_EXTEND
785 || GET_CODE (op) == ZERO_EXTEND)
786 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
787 GET_MODE (XEXP (op, 0)));
792 /* (pop* (zero_extend <X>)) = (pop* <X>) */
793 if (GET_CODE (op) == ZERO_EXTEND)
794 return simplify_gen_unary (code, mode, XEXP (op, 0),
795 GET_MODE (XEXP (op, 0)));
799 /* (float (sign_extend <X>)) = (float <X>). */
800 if (GET_CODE (op) == SIGN_EXTEND)
801 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
802 GET_MODE (XEXP (op, 0)));
806 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
807 becomes just the MINUS if its mode is MODE. This allows
808 folding switch statements on machines using casesi (such as
810 if (GET_CODE (op) == TRUNCATE
811 && GET_MODE (XEXP (op, 0)) == mode
812 && GET_CODE (XEXP (op, 0)) == MINUS
813 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
814 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
817 /* Check for a sign extension of a subreg of a promoted
818 variable, where the promotion is sign-extended, and the
819 target mode is the same as the variable's promotion. */
820 if (GET_CODE (op) == SUBREG
821 && SUBREG_PROMOTED_VAR_P (op)
822 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
823 && GET_MODE (XEXP (op, 0)) == mode)
826 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
827 if (! POINTERS_EXTEND_UNSIGNED
828 && mode == Pmode && GET_MODE (op) == ptr_mode
830 || (GET_CODE (op) == SUBREG
831 && REG_P (SUBREG_REG (op))
832 && REG_POINTER (SUBREG_REG (op))
833 && GET_MODE (SUBREG_REG (op)) == Pmode)))
834 return convert_memory_address (Pmode, op);
839 /* Check for a zero extension of a subreg of a promoted
840 variable, where the promotion is zero-extended, and the
841 target mode is the same as the variable's promotion. */
842 if (GET_CODE (op) == SUBREG
843 && SUBREG_PROMOTED_VAR_P (op)
844 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
845 && GET_MODE (XEXP (op, 0)) == mode)
848 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
849 if (POINTERS_EXTEND_UNSIGNED > 0
850 && mode == Pmode && GET_MODE (op) == ptr_mode
852 || (GET_CODE (op) == SUBREG
853 && REG_P (SUBREG_REG (op))
854 && REG_POINTER (SUBREG_REG (op))
855 && GET_MODE (SUBREG_REG (op)) == Pmode)))
856 return convert_memory_address (Pmode, op);
867 /* Try to compute the value of a unary operation CODE whose output mode is to
868 be MODE with input operand OP whose mode was originally OP_MODE.
869 Return zero if the value cannot be computed. */
871 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
872 rtx op, enum machine_mode op_mode)
874 unsigned int width = GET_MODE_BITSIZE (mode);
876 if (code == VEC_DUPLICATE)
878 gcc_assert (VECTOR_MODE_P (mode));
879 if (GET_MODE (op) != VOIDmode)
881 if (!VECTOR_MODE_P (GET_MODE (op)))
882 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
884 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
887 if (GET_CODE (op) == CONST_INT || GET_CODE (op) == CONST_DOUBLE
888 || GET_CODE (op) == CONST_VECTOR)
890 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
891 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
892 rtvec v = rtvec_alloc (n_elts);
895 if (GET_CODE (op) != CONST_VECTOR)
896 for (i = 0; i < n_elts; i++)
897 RTVEC_ELT (v, i) = op;
900 enum machine_mode inmode = GET_MODE (op);
901 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
902 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
904 gcc_assert (in_n_elts < n_elts);
905 gcc_assert ((n_elts % in_n_elts) == 0);
906 for (i = 0; i < n_elts; i++)
907 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
909 return gen_rtx_CONST_VECTOR (mode, v);
913 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
915 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
916 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
917 enum machine_mode opmode = GET_MODE (op);
918 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
919 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
920 rtvec v = rtvec_alloc (n_elts);
923 gcc_assert (op_n_elts == n_elts);
924 for (i = 0; i < n_elts; i++)
926 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
927 CONST_VECTOR_ELT (op, i),
928 GET_MODE_INNER (opmode));
931 RTVEC_ELT (v, i) = x;
933 return gen_rtx_CONST_VECTOR (mode, v);
936 /* The order of these tests is critical so that, for example, we don't
937 check the wrong mode (input vs. output) for a conversion operation,
938 such as FIX. At some point, this should be simplified. */
940 if (code == FLOAT && GET_MODE (op) == VOIDmode
941 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
943 HOST_WIDE_INT hv, lv;
946 if (GET_CODE (op) == CONST_INT)
947 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
949 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
951 REAL_VALUE_FROM_INT (d, lv, hv, mode);
952 d = real_value_truncate (mode, d);
953 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
955 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
956 && (GET_CODE (op) == CONST_DOUBLE
957 || GET_CODE (op) == CONST_INT))
959 HOST_WIDE_INT hv, lv;
962 if (GET_CODE (op) == CONST_INT)
963 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
965 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
967 if (op_mode == VOIDmode)
969 /* We don't know how to interpret negative-looking numbers in
970 this case, so don't try to fold those. */
974 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
977 hv = 0, lv &= GET_MODE_MASK (op_mode);
979 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
980 d = real_value_truncate (mode, d);
981 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
984 if (GET_CODE (op) == CONST_INT
985 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
987 HOST_WIDE_INT arg0 = INTVAL (op);
1001 val = (arg0 >= 0 ? arg0 : - arg0);
1005 /* Don't use ffs here. Instead, get low order bit and then its
1006 number. If arg0 is zero, this will return 0, as desired. */
1007 arg0 &= GET_MODE_MASK (mode);
1008 val = exact_log2 (arg0 & (- arg0)) + 1;
1012 arg0 &= GET_MODE_MASK (mode);
1013 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1016 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
1020 arg0 &= GET_MODE_MASK (mode);
1023 /* Even if the value at zero is undefined, we have to come
1024 up with some replacement. Seems good enough. */
1025 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1026 val = GET_MODE_BITSIZE (mode);
1029 val = exact_log2 (arg0 & -arg0);
1033 arg0 &= GET_MODE_MASK (mode);
1036 val++, arg0 &= arg0 - 1;
1040 arg0 &= GET_MODE_MASK (mode);
1043 val++, arg0 &= arg0 - 1;
1052 /* When zero-extending a CONST_INT, we need to know its
1054 gcc_assert (op_mode != VOIDmode);
1055 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1057 /* If we were really extending the mode,
1058 we would have to distinguish between zero-extension
1059 and sign-extension. */
1060 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1063 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1064 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1070 if (op_mode == VOIDmode)
1072 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1074 /* If we were really extending the mode,
1075 we would have to distinguish between zero-extension
1076 and sign-extension. */
1077 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1080 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1083 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1085 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
1086 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1094 case FLOAT_TRUNCATE:
1104 return gen_int_mode (val, mode);
1107 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1108 for a DImode operation on a CONST_INT. */
1109 else if (GET_MODE (op) == VOIDmode
1110 && width <= HOST_BITS_PER_WIDE_INT * 2
1111 && (GET_CODE (op) == CONST_DOUBLE
1112 || GET_CODE (op) == CONST_INT))
1114 unsigned HOST_WIDE_INT l1, lv;
1115 HOST_WIDE_INT h1, hv;
1117 if (GET_CODE (op) == CONST_DOUBLE)
1118 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
1120 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
1130 neg_double (l1, h1, &lv, &hv);
1135 neg_double (l1, h1, &lv, &hv);
1147 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
1150 lv = exact_log2 (l1 & -l1) + 1;
1156 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
1157 - HOST_BITS_PER_WIDE_INT;
1159 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
1160 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1161 lv = GET_MODE_BITSIZE (mode);
1167 lv = exact_log2 (l1 & -l1);
1169 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
1170 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1171 lv = GET_MODE_BITSIZE (mode);
1194 /* This is just a change-of-mode, so do nothing. */
1199 gcc_assert (op_mode != VOIDmode);
1201 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1205 lv = l1 & GET_MODE_MASK (op_mode);
1209 if (op_mode == VOIDmode
1210 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1214 lv = l1 & GET_MODE_MASK (op_mode);
1215 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
1216 && (lv & ((HOST_WIDE_INT) 1
1217 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
1218 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1220 hv = HWI_SIGN_EXTEND (lv);
1231 return immed_double_const (lv, hv, mode);
1234 else if (GET_CODE (op) == CONST_DOUBLE
1235 && SCALAR_FLOAT_MODE_P (mode))
1237 REAL_VALUE_TYPE d, t;
1238 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1243 if (HONOR_SNANS (mode) && real_isnan (&d))
1245 real_sqrt (&t, mode, &d);
1249 d = REAL_VALUE_ABS (d);
1252 d = REAL_VALUE_NEGATE (d);
1254 case FLOAT_TRUNCATE:
1255 d = real_value_truncate (mode, d);
1258 /* All this does is change the mode. */
1261 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1268 real_to_target (tmp, &d, GET_MODE (op));
1269 for (i = 0; i < 4; i++)
1271 real_from_target (&d, tmp, mode);
1277 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1280 else if (GET_CODE (op) == CONST_DOUBLE
1281 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1282 && GET_MODE_CLASS (mode) == MODE_INT
1283 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
1285 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1286 operators are intentionally left unspecified (to ease implementation
1287 by target backends), for consistency, this routine implements the
1288 same semantics for constant folding as used by the middle-end. */
1290 /* This was formerly used only for non-IEEE float.
1291 eggert@twinsun.com says it is safe for IEEE also. */
1292 HOST_WIDE_INT xh, xl, th, tl;
1293 REAL_VALUE_TYPE x, t;
1294 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1298 if (REAL_VALUE_ISNAN (x))
1301 /* Test against the signed upper bound. */
1302 if (width > HOST_BITS_PER_WIDE_INT)
1304 th = ((unsigned HOST_WIDE_INT) 1
1305 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1311 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1313 real_from_integer (&t, VOIDmode, tl, th, 0);
1314 if (REAL_VALUES_LESS (t, x))
1321 /* Test against the signed lower bound. */
1322 if (width > HOST_BITS_PER_WIDE_INT)
1324 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
1330 tl = (HOST_WIDE_INT) -1 << (width - 1);
1332 real_from_integer (&t, VOIDmode, tl, th, 0);
1333 if (REAL_VALUES_LESS (x, t))
1339 REAL_VALUE_TO_INT (&xl, &xh, x);
1343 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1346 /* Test against the unsigned upper bound. */
1347 if (width == 2*HOST_BITS_PER_WIDE_INT)
1352 else if (width >= HOST_BITS_PER_WIDE_INT)
1354 th = ((unsigned HOST_WIDE_INT) 1
1355 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1361 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1363 real_from_integer (&t, VOIDmode, tl, th, 1);
1364 if (REAL_VALUES_LESS (t, x))
1371 REAL_VALUE_TO_INT (&xl, &xh, x);
1377 return immed_double_const (xl, xh, mode);
1383 /* Subroutine of simplify_binary_operation to simplify a commutative,
1384 associative binary operation CODE with result mode MODE, operating
1385 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1386 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1387 canonicalization is possible. */
1390 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1395 /* Linearize the operator to the left. */
1396 if (GET_CODE (op1) == code)
1398 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1399 if (GET_CODE (op0) == code)
1401 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1402 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1405 /* "a op (b op c)" becomes "(b op c) op a". */
1406 if (! swap_commutative_operands_p (op1, op0))
1407 return simplify_gen_binary (code, mode, op1, op0);
1414 if (GET_CODE (op0) == code)
1416 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1417 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1419 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1420 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1423 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1424 tem = swap_commutative_operands_p (XEXP (op0, 1), op1)
1425 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 1))
1426 : simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1428 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1430 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1431 tem = swap_commutative_operands_p (XEXP (op0, 0), op1)
1432 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 0))
1433 : simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1435 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1442 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1443 and OP1. Return 0 if no simplification is possible.
1445 Don't use this for relational operations such as EQ or LT.
1446 Use simplify_relational_operation instead. */
1448 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1451 rtx trueop0, trueop1;
1454 /* Relational operations don't work here. We must know the mode
1455 of the operands in order to do the comparison correctly.
1456 Assuming a full word can give incorrect results.
1457 Consider comparing 128 with -128 in QImode. */
1458 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1459 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1461 /* Make sure the constant is second. */
1462 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1463 && swap_commutative_operands_p (op0, op1))
1465 tem = op0, op0 = op1, op1 = tem;
1468 trueop0 = avoid_constant_pool_reference (op0);
1469 trueop1 = avoid_constant_pool_reference (op1);
1471 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1474 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1477 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1478 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1479 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1480 actual constants. */
1483 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1484 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1486 rtx tem, reversed, opleft, opright;
1488 unsigned int width = GET_MODE_BITSIZE (mode);
1490 /* Even if we can't compute a constant result,
1491 there are some cases worth simplifying. */
1496 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1497 when x is NaN, infinite, or finite and nonzero. They aren't
1498 when x is -0 and the rounding mode is not towards -infinity,
1499 since (-0) + 0 is then 0. */
1500 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1503 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1504 transformations are safe even for IEEE. */
1505 if (GET_CODE (op0) == NEG)
1506 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1507 else if (GET_CODE (op1) == NEG)
1508 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1510 /* (~a) + 1 -> -a */
1511 if (INTEGRAL_MODE_P (mode)
1512 && GET_CODE (op0) == NOT
1513 && trueop1 == const1_rtx)
1514 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1516 /* Handle both-operands-constant cases. We can only add
1517 CONST_INTs to constants since the sum of relocatable symbols
1518 can't be handled by most assemblers. Don't add CONST_INT
1519 to CONST_INT since overflow won't be computed properly if wider
1520 than HOST_BITS_PER_WIDE_INT. */
1522 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1523 && GET_CODE (op1) == CONST_INT)
1524 return plus_constant (op0, INTVAL (op1));
1525 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1526 && GET_CODE (op0) == CONST_INT)
1527 return plus_constant (op1, INTVAL (op0));
1529 /* See if this is something like X * C - X or vice versa or
1530 if the multiplication is written as a shift. If so, we can
1531 distribute and make a new multiply, shift, or maybe just
1532 have X (if C is 2 in the example above). But don't make
1533 something more expensive than we had before. */
1535 if (SCALAR_INT_MODE_P (mode))
1537 HOST_WIDE_INT coeff0h = 0, coeff1h = 0;
1538 unsigned HOST_WIDE_INT coeff0l = 1, coeff1l = 1;
1539 rtx lhs = op0, rhs = op1;
1541 if (GET_CODE (lhs) == NEG)
1545 lhs = XEXP (lhs, 0);
1547 else if (GET_CODE (lhs) == MULT
1548 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1550 coeff0l = INTVAL (XEXP (lhs, 1));
1551 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1552 lhs = XEXP (lhs, 0);
1554 else if (GET_CODE (lhs) == ASHIFT
1555 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1556 && INTVAL (XEXP (lhs, 1)) >= 0
1557 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1559 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1561 lhs = XEXP (lhs, 0);
1564 if (GET_CODE (rhs) == NEG)
1568 rhs = XEXP (rhs, 0);
1570 else if (GET_CODE (rhs) == MULT
1571 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1573 coeff1l = INTVAL (XEXP (rhs, 1));
1574 coeff1h = INTVAL (XEXP (rhs, 1)) < 0 ? -1 : 0;
1575 rhs = XEXP (rhs, 0);
1577 else if (GET_CODE (rhs) == ASHIFT
1578 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1579 && INTVAL (XEXP (rhs, 1)) >= 0
1580 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1582 coeff1l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1584 rhs = XEXP (rhs, 0);
1587 if (rtx_equal_p (lhs, rhs))
1589 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1591 unsigned HOST_WIDE_INT l;
1594 add_double (coeff0l, coeff0h, coeff1l, coeff1h, &l, &h);
1595 coeff = immed_double_const (l, h, mode);
1597 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1598 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1603 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1604 if ((GET_CODE (op1) == CONST_INT
1605 || GET_CODE (op1) == CONST_DOUBLE)
1606 && GET_CODE (op0) == XOR
1607 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1608 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1609 && mode_signbit_p (mode, op1))
1610 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1611 simplify_gen_binary (XOR, mode, op1,
1614 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
1615 if (GET_CODE (op0) == MULT
1616 && GET_CODE (XEXP (op0, 0)) == NEG)
1620 in1 = XEXP (XEXP (op0, 0), 0);
1621 in2 = XEXP (op0, 1);
1622 return simplify_gen_binary (MINUS, mode, op1,
1623 simplify_gen_binary (MULT, mode,
1627 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
1628 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
1630 if (COMPARISON_P (op0)
1631 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
1632 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
1633 && (reversed = reversed_comparison (op0, mode)))
1635 simplify_gen_unary (NEG, mode, reversed, mode);
1637 /* If one of the operands is a PLUS or a MINUS, see if we can
1638 simplify this by the associative law.
1639 Don't use the associative law for floating point.
1640 The inaccuracy makes it nonassociative,
1641 and subtle programs can break if operations are associated. */
1643 if (INTEGRAL_MODE_P (mode)
1644 && (plus_minus_operand_p (op0)
1645 || plus_minus_operand_p (op1))
1646 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1649 /* Reassociate floating point addition only when the user
1650 specifies unsafe math optimizations. */
1651 if (FLOAT_MODE_P (mode)
1652 && flag_unsafe_math_optimizations)
1654 tem = simplify_associative_operation (code, mode, op0, op1);
1662 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1663 using cc0, in which case we want to leave it as a COMPARE
1664 so we can distinguish it from a register-register-copy.
1666 In IEEE floating point, x-0 is not the same as x. */
1668 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1669 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1670 && trueop1 == CONST0_RTX (mode))
1674 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1675 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1676 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1677 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1679 rtx xop00 = XEXP (op0, 0);
1680 rtx xop10 = XEXP (op1, 0);
1683 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1685 if (REG_P (xop00) && REG_P (xop10)
1686 && GET_MODE (xop00) == GET_MODE (xop10)
1687 && REGNO (xop00) == REGNO (xop10)
1688 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1689 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1696 /* We can't assume x-x is 0 even with non-IEEE floating point,
1697 but since it is zero except in very strange circumstances, we
1698 will treat it as zero with -funsafe-math-optimizations. */
1699 if (rtx_equal_p (trueop0, trueop1)
1700 && ! side_effects_p (op0)
1701 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1702 return CONST0_RTX (mode);
1704 /* Change subtraction from zero into negation. (0 - x) is the
1705 same as -x when x is NaN, infinite, or finite and nonzero.
1706 But if the mode has signed zeros, and does not round towards
1707 -infinity, then 0 - 0 is 0, not -0. */
1708 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1709 return simplify_gen_unary (NEG, mode, op1, mode);
1711 /* (-1 - a) is ~a. */
1712 if (trueop0 == constm1_rtx)
1713 return simplify_gen_unary (NOT, mode, op1, mode);
1715 /* Subtracting 0 has no effect unless the mode has signed zeros
1716 and supports rounding towards -infinity. In such a case,
1718 if (!(HONOR_SIGNED_ZEROS (mode)
1719 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1720 && trueop1 == CONST0_RTX (mode))
1723 /* See if this is something like X * C - X or vice versa or
1724 if the multiplication is written as a shift. If so, we can
1725 distribute and make a new multiply, shift, or maybe just
1726 have X (if C is 2 in the example above). But don't make
1727 something more expensive than we had before. */
1729 if (SCALAR_INT_MODE_P (mode))
1731 HOST_WIDE_INT coeff0h = 0, negcoeff1h = -1;
1732 unsigned HOST_WIDE_INT coeff0l = 1, negcoeff1l = -1;
1733 rtx lhs = op0, rhs = op1;
1735 if (GET_CODE (lhs) == NEG)
1739 lhs = XEXP (lhs, 0);
1741 else if (GET_CODE (lhs) == MULT
1742 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1744 coeff0l = INTVAL (XEXP (lhs, 1));
1745 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1746 lhs = XEXP (lhs, 0);
1748 else if (GET_CODE (lhs) == ASHIFT
1749 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1750 && INTVAL (XEXP (lhs, 1)) >= 0
1751 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1753 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1755 lhs = XEXP (lhs, 0);
1758 if (GET_CODE (rhs) == NEG)
1762 rhs = XEXP (rhs, 0);
1764 else if (GET_CODE (rhs) == MULT
1765 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1767 negcoeff1l = -INTVAL (XEXP (rhs, 1));
1768 negcoeff1h = INTVAL (XEXP (rhs, 1)) <= 0 ? 0 : -1;
1769 rhs = XEXP (rhs, 0);
1771 else if (GET_CODE (rhs) == ASHIFT
1772 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1773 && INTVAL (XEXP (rhs, 1)) >= 0
1774 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1776 negcoeff1l = -(((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1)));
1778 rhs = XEXP (rhs, 0);
1781 if (rtx_equal_p (lhs, rhs))
1783 rtx orig = gen_rtx_MINUS (mode, op0, op1);
1785 unsigned HOST_WIDE_INT l;
1788 add_double (coeff0l, coeff0h, negcoeff1l, negcoeff1h, &l, &h);
1789 coeff = immed_double_const (l, h, mode);
1791 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1792 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1797 /* (a - (-b)) -> (a + b). True even for IEEE. */
1798 if (GET_CODE (op1) == NEG)
1799 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1801 /* (-x - c) may be simplified as (-c - x). */
1802 if (GET_CODE (op0) == NEG
1803 && (GET_CODE (op1) == CONST_INT
1804 || GET_CODE (op1) == CONST_DOUBLE))
1806 tem = simplify_unary_operation (NEG, mode, op1, mode);
1808 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1811 /* Don't let a relocatable value get a negative coeff. */
1812 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1813 return simplify_gen_binary (PLUS, mode,
1815 neg_const_int (mode, op1));
1817 /* (x - (x & y)) -> (x & ~y) */
1818 if (GET_CODE (op1) == AND)
1820 if (rtx_equal_p (op0, XEXP (op1, 0)))
1822 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1823 GET_MODE (XEXP (op1, 1)));
1824 return simplify_gen_binary (AND, mode, op0, tem);
1826 if (rtx_equal_p (op0, XEXP (op1, 1)))
1828 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1829 GET_MODE (XEXP (op1, 0)));
1830 return simplify_gen_binary (AND, mode, op0, tem);
1834 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
1835 by reversing the comparison code if valid. */
1836 if (STORE_FLAG_VALUE == 1
1837 && trueop0 == const1_rtx
1838 && COMPARISON_P (op1)
1839 && (reversed = reversed_comparison (op1, mode)))
1842 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
1843 if (GET_CODE (op1) == MULT
1844 && GET_CODE (XEXP (op1, 0)) == NEG)
1848 in1 = XEXP (XEXP (op1, 0), 0);
1849 in2 = XEXP (op1, 1);
1850 return simplify_gen_binary (PLUS, mode,
1851 simplify_gen_binary (MULT, mode,
1856 /* Canonicalize (minus (neg A) (mult B C)) to
1857 (minus (mult (neg B) C) A). */
1858 if (GET_CODE (op1) == MULT
1859 && GET_CODE (op0) == NEG)
1863 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
1864 in2 = XEXP (op1, 1);
1865 return simplify_gen_binary (MINUS, mode,
1866 simplify_gen_binary (MULT, mode,
1871 /* If one of the operands is a PLUS or a MINUS, see if we can
1872 simplify this by the associative law. This will, for example,
1873 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
1874 Don't use the associative law for floating point.
1875 The inaccuracy makes it nonassociative,
1876 and subtle programs can break if operations are associated. */
1878 if (INTEGRAL_MODE_P (mode)
1879 && (plus_minus_operand_p (op0)
1880 || plus_minus_operand_p (op1))
1881 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1886 if (trueop1 == constm1_rtx)
1887 return simplify_gen_unary (NEG, mode, op0, mode);
1889 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1890 x is NaN, since x * 0 is then also NaN. Nor is it valid
1891 when the mode has signed zeros, since multiplying a negative
1892 number by 0 will give -0, not 0. */
1893 if (!HONOR_NANS (mode)
1894 && !HONOR_SIGNED_ZEROS (mode)
1895 && trueop1 == CONST0_RTX (mode)
1896 && ! side_effects_p (op0))
1899 /* In IEEE floating point, x*1 is not equivalent to x for
1901 if (!HONOR_SNANS (mode)
1902 && trueop1 == CONST1_RTX (mode))
1905 /* Convert multiply by constant power of two into shift unless
1906 we are still generating RTL. This test is a kludge. */
1907 if (GET_CODE (trueop1) == CONST_INT
1908 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1909 /* If the mode is larger than the host word size, and the
1910 uppermost bit is set, then this isn't a power of two due
1911 to implicit sign extension. */
1912 && (width <= HOST_BITS_PER_WIDE_INT
1913 || val != HOST_BITS_PER_WIDE_INT - 1))
1914 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1916 /* Likewise for multipliers wider than a word. */
1917 if (GET_CODE (trueop1) == CONST_DOUBLE
1918 && (GET_MODE (trueop1) == VOIDmode
1919 || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
1920 && GET_MODE (op0) == mode
1921 && CONST_DOUBLE_LOW (trueop1) == 0
1922 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0)
1923 return simplify_gen_binary (ASHIFT, mode, op0,
1924 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
1926 /* x*2 is x+x and x*(-1) is -x */
1927 if (GET_CODE (trueop1) == CONST_DOUBLE
1928 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
1929 && GET_MODE (op0) == mode)
1932 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1934 if (REAL_VALUES_EQUAL (d, dconst2))
1935 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
1937 if (!HONOR_SNANS (mode)
1938 && REAL_VALUES_EQUAL (d, dconstm1))
1939 return simplify_gen_unary (NEG, mode, op0, mode);
1942 /* Optimize -x * -x as x * x. */
1943 if (FLOAT_MODE_P (mode)
1944 && GET_CODE (op0) == NEG
1945 && GET_CODE (op1) == NEG
1946 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
1947 && !side_effects_p (XEXP (op0, 0)))
1948 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
1950 /* Likewise, optimize abs(x) * abs(x) as x * x. */
1951 if (SCALAR_FLOAT_MODE_P (mode)
1952 && GET_CODE (op0) == ABS
1953 && GET_CODE (op1) == ABS
1954 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
1955 && !side_effects_p (XEXP (op0, 0)))
1956 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
1958 /* Reassociate multiplication, but for floating point MULTs
1959 only when the user specifies unsafe math optimizations. */
1960 if (! FLOAT_MODE_P (mode)
1961 || flag_unsafe_math_optimizations)
1963 tem = simplify_associative_operation (code, mode, op0, op1);
1970 if (trueop1 == const0_rtx)
1972 if (GET_CODE (trueop1) == CONST_INT
1973 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1974 == GET_MODE_MASK (mode)))
1976 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1978 /* A | (~A) -> -1 */
1979 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1980 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1981 && ! side_effects_p (op0)
1982 && SCALAR_INT_MODE_P (mode))
1985 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
1986 if (GET_CODE (op1) == CONST_INT
1987 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1988 && (nonzero_bits (op0, mode) & ~INTVAL (op1)) == 0)
1991 /* Convert (A & B) | A to A. */
1992 if (GET_CODE (op0) == AND
1993 && (rtx_equal_p (XEXP (op0, 0), op1)
1994 || rtx_equal_p (XEXP (op0, 1), op1))
1995 && ! side_effects_p (XEXP (op0, 0))
1996 && ! side_effects_p (XEXP (op0, 1)))
1999 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2000 mode size to (rotate A CX). */
2002 if (GET_CODE (op1) == ASHIFT
2003 || GET_CODE (op1) == SUBREG)
2014 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2015 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2016 && GET_CODE (XEXP (opleft, 1)) == CONST_INT
2017 && GET_CODE (XEXP (opright, 1)) == CONST_INT
2018 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2019 == GET_MODE_BITSIZE (mode)))
2020 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2022 /* Same, but for ashift that has been "simplified" to a wider mode
2023 by simplify_shift_const. */
2025 if (GET_CODE (opleft) == SUBREG
2026 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2027 && GET_CODE (opright) == LSHIFTRT
2028 && GET_CODE (XEXP (opright, 0)) == SUBREG
2029 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2030 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2031 && (GET_MODE_SIZE (GET_MODE (opleft))
2032 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2033 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2034 SUBREG_REG (XEXP (opright, 0)))
2035 && GET_CODE (XEXP (SUBREG_REG (opleft), 1)) == CONST_INT
2036 && GET_CODE (XEXP (opright, 1)) == CONST_INT
2037 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2038 == GET_MODE_BITSIZE (mode)))
2039 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2040 XEXP (SUBREG_REG (opleft), 1));
2042 /* If we have (ior (and (X C1) C2)), simplify this by making
2043 C1 as small as possible if C1 actually changes. */
2044 if (GET_CODE (op1) == CONST_INT
2045 && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2046 || INTVAL (op1) > 0)
2047 && GET_CODE (op0) == AND
2048 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2049 && GET_CODE (op1) == CONST_INT
2050 && (INTVAL (XEXP (op0, 1)) & INTVAL (op1)) != 0)
2051 return simplify_gen_binary (IOR, mode,
2053 (AND, mode, XEXP (op0, 0),
2054 GEN_INT (INTVAL (XEXP (op0, 1))
2058 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2059 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2060 the PLUS does not affect any of the bits in OP1: then we can do
2061 the IOR as a PLUS and we can associate. This is valid if OP1
2062 can be safely shifted left C bits. */
2063 if (GET_CODE (trueop1) == CONST_INT && GET_CODE (op0) == ASHIFTRT
2064 && GET_CODE (XEXP (op0, 0)) == PLUS
2065 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == CONST_INT
2066 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2067 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2069 int count = INTVAL (XEXP (op0, 1));
2070 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2072 if (mask >> count == INTVAL (trueop1)
2073 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2074 return simplify_gen_binary (ASHIFTRT, mode,
2075 plus_constant (XEXP (op0, 0), mask),
2079 tem = simplify_associative_operation (code, mode, op0, op1);
2085 if (trueop1 == const0_rtx)
2087 if (GET_CODE (trueop1) == CONST_INT
2088 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2089 == GET_MODE_MASK (mode)))
2090 return simplify_gen_unary (NOT, mode, op0, mode);
2091 if (rtx_equal_p (trueop0, trueop1)
2092 && ! side_effects_p (op0)
2093 && GET_MODE_CLASS (mode) != MODE_CC)
2094 return CONST0_RTX (mode);
2096 /* Canonicalize XOR of the most significant bit to PLUS. */
2097 if ((GET_CODE (op1) == CONST_INT
2098 || GET_CODE (op1) == CONST_DOUBLE)
2099 && mode_signbit_p (mode, op1))
2100 return simplify_gen_binary (PLUS, mode, op0, op1);
2101 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2102 if ((GET_CODE (op1) == CONST_INT
2103 || GET_CODE (op1) == CONST_DOUBLE)
2104 && GET_CODE (op0) == PLUS
2105 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
2106 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
2107 && mode_signbit_p (mode, XEXP (op0, 1)))
2108 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2109 simplify_gen_binary (XOR, mode, op1,
2112 /* If we are XORing two things that have no bits in common,
2113 convert them into an IOR. This helps to detect rotation encoded
2114 using those methods and possibly other simplifications. */
2116 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2117 && (nonzero_bits (op0, mode)
2118 & nonzero_bits (op1, mode)) == 0)
2119 return (simplify_gen_binary (IOR, mode, op0, op1));
2121 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2122 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2125 int num_negated = 0;
2127 if (GET_CODE (op0) == NOT)
2128 num_negated++, op0 = XEXP (op0, 0);
2129 if (GET_CODE (op1) == NOT)
2130 num_negated++, op1 = XEXP (op1, 0);
2132 if (num_negated == 2)
2133 return simplify_gen_binary (XOR, mode, op0, op1);
2134 else if (num_negated == 1)
2135 return simplify_gen_unary (NOT, mode,
2136 simplify_gen_binary (XOR, mode, op0, op1),
2140 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2141 correspond to a machine insn or result in further simplifications
2142 if B is a constant. */
2144 if (GET_CODE (op0) == AND
2145 && rtx_equal_p (XEXP (op0, 1), op1)
2146 && ! side_effects_p (op1))
2147 return simplify_gen_binary (AND, mode,
2148 simplify_gen_unary (NOT, mode,
2149 XEXP (op0, 0), mode),
2152 else if (GET_CODE (op0) == AND
2153 && rtx_equal_p (XEXP (op0, 0), op1)
2154 && ! side_effects_p (op1))
2155 return simplify_gen_binary (AND, mode,
2156 simplify_gen_unary (NOT, mode,
2157 XEXP (op0, 1), mode),
2160 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2161 comparison if STORE_FLAG_VALUE is 1. */
2162 if (STORE_FLAG_VALUE == 1
2163 && trueop1 == const1_rtx
2164 && COMPARISON_P (op0)
2165 && (reversed = reversed_comparison (op0, mode)))
2168 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2169 is (lt foo (const_int 0)), so we can perform the above
2170 simplification if STORE_FLAG_VALUE is 1. */
2172 if (STORE_FLAG_VALUE == 1
2173 && trueop1 == const1_rtx
2174 && GET_CODE (op0) == LSHIFTRT
2175 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2176 && INTVAL (XEXP (op0, 1)) == GET_MODE_BITSIZE (mode) - 1)
2177 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2179 /* (xor (comparison foo bar) (const_int sign-bit))
2180 when STORE_FLAG_VALUE is the sign bit. */
2181 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2182 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
2183 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
2184 && trueop1 == const_true_rtx
2185 && COMPARISON_P (op0)
2186 && (reversed = reversed_comparison (op0, mode)))
2191 tem = simplify_associative_operation (code, mode, op0, op1);
2197 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2199 /* If we are turning off bits already known off in OP0, we need
2201 if (GET_CODE (trueop1) == CONST_INT
2202 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2203 && (nonzero_bits (trueop0, mode) & ~INTVAL (trueop1)) == 0)
2205 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2206 && GET_MODE_CLASS (mode) != MODE_CC)
2209 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2210 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2211 && ! side_effects_p (op0)
2212 && GET_MODE_CLASS (mode) != MODE_CC)
2213 return CONST0_RTX (mode);
2215 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2216 there are no nonzero bits of C outside of X's mode. */
2217 if ((GET_CODE (op0) == SIGN_EXTEND
2218 || GET_CODE (op0) == ZERO_EXTEND)
2219 && GET_CODE (trueop1) == CONST_INT
2220 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2221 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2222 & INTVAL (trueop1)) == 0)
2224 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2225 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2226 gen_int_mode (INTVAL (trueop1),
2228 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2231 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2232 insn (and may simplify more). */
2233 if (GET_CODE (op0) == XOR
2234 && rtx_equal_p (XEXP (op0, 0), op1)
2235 && ! side_effects_p (op1))
2236 return simplify_gen_binary (AND, mode,
2237 simplify_gen_unary (NOT, mode,
2238 XEXP (op0, 1), mode),
2241 if (GET_CODE (op0) == XOR
2242 && rtx_equal_p (XEXP (op0, 1), op1)
2243 && ! side_effects_p (op1))
2244 return simplify_gen_binary (AND, mode,
2245 simplify_gen_unary (NOT, mode,
2246 XEXP (op0, 0), mode),
2249 /* Similarly for (~(A ^ B)) & A. */
2250 if (GET_CODE (op0) == NOT
2251 && GET_CODE (XEXP (op0, 0)) == XOR
2252 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2253 && ! side_effects_p (op1))
2254 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2256 if (GET_CODE (op0) == NOT
2257 && GET_CODE (XEXP (op0, 0)) == XOR
2258 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2259 && ! side_effects_p (op1))
2260 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2262 /* Convert (A | B) & A to A. */
2263 if (GET_CODE (op0) == IOR
2264 && (rtx_equal_p (XEXP (op0, 0), op1)
2265 || rtx_equal_p (XEXP (op0, 1), op1))
2266 && ! side_effects_p (XEXP (op0, 0))
2267 && ! side_effects_p (XEXP (op0, 1)))
2270 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2271 ((A & N) + B) & M -> (A + B) & M
2272 Similarly if (N & M) == 0,
2273 ((A | N) + B) & M -> (A + B) & M
2274 and for - instead of + and/or ^ instead of |. */
2275 if (GET_CODE (trueop1) == CONST_INT
2276 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2277 && ~INTVAL (trueop1)
2278 && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
2279 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2284 pmop[0] = XEXP (op0, 0);
2285 pmop[1] = XEXP (op0, 1);
2287 for (which = 0; which < 2; which++)
2290 switch (GET_CODE (tem))
2293 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2294 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
2295 == INTVAL (trueop1))
2296 pmop[which] = XEXP (tem, 0);
2300 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2301 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
2302 pmop[which] = XEXP (tem, 0);
2309 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2311 tem = simplify_gen_binary (GET_CODE (op0), mode,
2313 return simplify_gen_binary (code, mode, tem, op1);
2316 tem = simplify_associative_operation (code, mode, op0, op1);
2322 /* 0/x is 0 (or x&0 if x has side-effects). */
2323 if (trueop0 == CONST0_RTX (mode))
2325 if (side_effects_p (op1))
2326 return simplify_gen_binary (AND, mode, op1, trueop0);
2330 if (trueop1 == CONST1_RTX (mode))
2331 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2332 /* Convert divide by power of two into shift. */
2333 if (GET_CODE (trueop1) == CONST_INT
2334 && (val = exact_log2 (INTVAL (trueop1))) > 0)
2335 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2339 /* Handle floating point and integers separately. */
2340 if (SCALAR_FLOAT_MODE_P (mode))
2342 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2343 safe for modes with NaNs, since 0.0 / 0.0 will then be
2344 NaN rather than 0.0. Nor is it safe for modes with signed
2345 zeros, since dividing 0 by a negative number gives -0.0 */
2346 if (trueop0 == CONST0_RTX (mode)
2347 && !HONOR_NANS (mode)
2348 && !HONOR_SIGNED_ZEROS (mode)
2349 && ! side_effects_p (op1))
2352 if (trueop1 == CONST1_RTX (mode)
2353 && !HONOR_SNANS (mode))
2356 if (GET_CODE (trueop1) == CONST_DOUBLE
2357 && trueop1 != CONST0_RTX (mode))
2360 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2363 if (REAL_VALUES_EQUAL (d, dconstm1)
2364 && !HONOR_SNANS (mode))
2365 return simplify_gen_unary (NEG, mode, op0, mode);
2367 /* Change FP division by a constant into multiplication.
2368 Only do this with -funsafe-math-optimizations. */
2369 if (flag_unsafe_math_optimizations
2370 && !REAL_VALUES_EQUAL (d, dconst0))
2372 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2373 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2374 return simplify_gen_binary (MULT, mode, op0, tem);
2380 /* 0/x is 0 (or x&0 if x has side-effects). */
2381 if (trueop0 == CONST0_RTX (mode))
2383 if (side_effects_p (op1))
2384 return simplify_gen_binary (AND, mode, op1, trueop0);
2388 if (trueop1 == CONST1_RTX (mode))
2389 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2391 if (trueop1 == constm1_rtx)
2393 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2394 return simplify_gen_unary (NEG, mode, x, mode);
2400 /* 0%x is 0 (or x&0 if x has side-effects). */
2401 if (trueop0 == CONST0_RTX (mode))
2403 if (side_effects_p (op1))
2404 return simplify_gen_binary (AND, mode, op1, trueop0);
2407 /* x%1 is 0 (of x&0 if x has side-effects). */
2408 if (trueop1 == CONST1_RTX (mode))
2410 if (side_effects_p (op0))
2411 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2412 return CONST0_RTX (mode);
2414 /* Implement modulus by power of two as AND. */
2415 if (GET_CODE (trueop1) == CONST_INT
2416 && exact_log2 (INTVAL (trueop1)) > 0)
2417 return simplify_gen_binary (AND, mode, op0,
2418 GEN_INT (INTVAL (op1) - 1));
2422 /* 0%x is 0 (or x&0 if x has side-effects). */
2423 if (trueop0 == CONST0_RTX (mode))
2425 if (side_effects_p (op1))
2426 return simplify_gen_binary (AND, mode, op1, trueop0);
2429 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2430 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
2432 if (side_effects_p (op0))
2433 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2434 return CONST0_RTX (mode);
2441 if (trueop1 == CONST0_RTX (mode))
2443 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2445 /* Rotating ~0 always results in ~0. */
2446 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
2447 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
2448 && ! side_effects_p (op1))
2454 if (trueop1 == CONST0_RTX (mode))
2456 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2461 if (trueop1 == CONST0_RTX (mode))
2463 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2465 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
2466 if (GET_CODE (op0) == CLZ
2467 && GET_CODE (trueop1) == CONST_INT
2468 && STORE_FLAG_VALUE == 1
2469 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
2471 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2472 unsigned HOST_WIDE_INT zero_val = 0;
2474 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
2475 && zero_val == GET_MODE_BITSIZE (imode)
2476 && INTVAL (trueop1) == exact_log2 (zero_val))
2477 return simplify_gen_relational (EQ, mode, imode,
2478 XEXP (op0, 0), const0_rtx);
2483 if (width <= HOST_BITS_PER_WIDE_INT
2484 && GET_CODE (trueop1) == CONST_INT
2485 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
2486 && ! side_effects_p (op0))
2488 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2490 tem = simplify_associative_operation (code, mode, op0, op1);
2496 if (width <= HOST_BITS_PER_WIDE_INT
2497 && GET_CODE (trueop1) == CONST_INT
2498 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
2499 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
2500 && ! side_effects_p (op0))
2502 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2504 tem = simplify_associative_operation (code, mode, op0, op1);
2510 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2512 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2514 tem = simplify_associative_operation (code, mode, op0, op1);
2520 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
2522 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2524 tem = simplify_associative_operation (code, mode, op0, op1);
2533 /* ??? There are simplifications that can be done. */
2537 if (!VECTOR_MODE_P (mode))
2539 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2540 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
2541 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2542 gcc_assert (XVECLEN (trueop1, 0) == 1);
2543 gcc_assert (GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT);
2545 if (GET_CODE (trueop0) == CONST_VECTOR)
2546 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
2551 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2552 gcc_assert (GET_MODE_INNER (mode)
2553 == GET_MODE_INNER (GET_MODE (trueop0)));
2554 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2556 if (GET_CODE (trueop0) == CONST_VECTOR)
2558 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2559 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2560 rtvec v = rtvec_alloc (n_elts);
2563 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
2564 for (i = 0; i < n_elts; i++)
2566 rtx x = XVECEXP (trueop1, 0, i);
2568 gcc_assert (GET_CODE (x) == CONST_INT);
2569 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
2573 return gen_rtx_CONST_VECTOR (mode, v);
2577 if (XVECLEN (trueop1, 0) == 1
2578 && GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT
2579 && GET_CODE (trueop0) == VEC_CONCAT)
2582 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
2584 /* Try to find the element in the VEC_CONCAT. */
2585 while (GET_MODE (vec) != mode
2586 && GET_CODE (vec) == VEC_CONCAT)
2588 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
2589 if (offset < vec_size)
2590 vec = XEXP (vec, 0);
2594 vec = XEXP (vec, 1);
2596 vec = avoid_constant_pool_reference (vec);
2599 if (GET_MODE (vec) == mode)
2606 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
2607 ? GET_MODE (trueop0)
2608 : GET_MODE_INNER (mode));
2609 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
2610 ? GET_MODE (trueop1)
2611 : GET_MODE_INNER (mode));
2613 gcc_assert (VECTOR_MODE_P (mode));
2614 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
2615 == GET_MODE_SIZE (mode));
2617 if (VECTOR_MODE_P (op0_mode))
2618 gcc_assert (GET_MODE_INNER (mode)
2619 == GET_MODE_INNER (op0_mode));
2621 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
2623 if (VECTOR_MODE_P (op1_mode))
2624 gcc_assert (GET_MODE_INNER (mode)
2625 == GET_MODE_INNER (op1_mode));
2627 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
2629 if ((GET_CODE (trueop0) == CONST_VECTOR
2630 || GET_CODE (trueop0) == CONST_INT
2631 || GET_CODE (trueop0) == CONST_DOUBLE)
2632 && (GET_CODE (trueop1) == CONST_VECTOR
2633 || GET_CODE (trueop1) == CONST_INT
2634 || GET_CODE (trueop1) == CONST_DOUBLE))
2636 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2637 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2638 rtvec v = rtvec_alloc (n_elts);
2640 unsigned in_n_elts = 1;
2642 if (VECTOR_MODE_P (op0_mode))
2643 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2644 for (i = 0; i < n_elts; i++)
2648 if (!VECTOR_MODE_P (op0_mode))
2649 RTVEC_ELT (v, i) = trueop0;
2651 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2655 if (!VECTOR_MODE_P (op1_mode))
2656 RTVEC_ELT (v, i) = trueop1;
2658 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2663 return gen_rtx_CONST_VECTOR (mode, v);
2676 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
2679 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
2681 unsigned int width = GET_MODE_BITSIZE (mode);
2683 if (VECTOR_MODE_P (mode)
2684 && code != VEC_CONCAT
2685 && GET_CODE (op0) == CONST_VECTOR
2686 && GET_CODE (op1) == CONST_VECTOR)
2688 unsigned n_elts = GET_MODE_NUNITS (mode);
2689 enum machine_mode op0mode = GET_MODE (op0);
2690 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
2691 enum machine_mode op1mode = GET_MODE (op1);
2692 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
2693 rtvec v = rtvec_alloc (n_elts);
2696 gcc_assert (op0_n_elts == n_elts);
2697 gcc_assert (op1_n_elts == n_elts);
2698 for (i = 0; i < n_elts; i++)
2700 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
2701 CONST_VECTOR_ELT (op0, i),
2702 CONST_VECTOR_ELT (op1, i));
2705 RTVEC_ELT (v, i) = x;
2708 return gen_rtx_CONST_VECTOR (mode, v);
2711 if (VECTOR_MODE_P (mode)
2712 && code == VEC_CONCAT
2713 && CONSTANT_P (op0) && CONSTANT_P (op1))
2715 unsigned n_elts = GET_MODE_NUNITS (mode);
2716 rtvec v = rtvec_alloc (n_elts);
2718 gcc_assert (n_elts >= 2);
2721 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
2722 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
2724 RTVEC_ELT (v, 0) = op0;
2725 RTVEC_ELT (v, 1) = op1;
2729 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
2730 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
2733 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
2734 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
2735 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
2737 for (i = 0; i < op0_n_elts; ++i)
2738 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
2739 for (i = 0; i < op1_n_elts; ++i)
2740 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
2743 return gen_rtx_CONST_VECTOR (mode, v);
2746 if (SCALAR_FLOAT_MODE_P (mode)
2747 && GET_CODE (op0) == CONST_DOUBLE
2748 && GET_CODE (op1) == CONST_DOUBLE
2749 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
2760 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
2762 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
2764 for (i = 0; i < 4; i++)
2781 real_from_target (&r, tmp0, mode);
2782 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
2786 REAL_VALUE_TYPE f0, f1, value, result;
2789 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
2790 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
2791 real_convert (&f0, mode, &f0);
2792 real_convert (&f1, mode, &f1);
2794 if (HONOR_SNANS (mode)
2795 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
2799 && REAL_VALUES_EQUAL (f1, dconst0)
2800 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
2803 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2804 && flag_trapping_math
2805 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
2807 int s0 = REAL_VALUE_NEGATIVE (f0);
2808 int s1 = REAL_VALUE_NEGATIVE (f1);
2813 /* Inf + -Inf = NaN plus exception. */
2818 /* Inf - Inf = NaN plus exception. */
2823 /* Inf / Inf = NaN plus exception. */
2830 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2831 && flag_trapping_math
2832 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
2833 || (REAL_VALUE_ISINF (f1)
2834 && REAL_VALUES_EQUAL (f0, dconst0))))
2835 /* Inf * 0 = NaN plus exception. */
2838 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
2840 real_convert (&result, mode, &value);
2842 /* Don't constant fold this floating point operation if
2843 the result has overflowed and flag_trapping_math. */
2845 if (flag_trapping_math
2846 && MODE_HAS_INFINITIES (mode)
2847 && REAL_VALUE_ISINF (result)
2848 && !REAL_VALUE_ISINF (f0)
2849 && !REAL_VALUE_ISINF (f1))
2850 /* Overflow plus exception. */
2853 /* Don't constant fold this floating point operation if the
2854 result may dependent upon the run-time rounding mode and
2855 flag_rounding_math is set, or if GCC's software emulation
2856 is unable to accurately represent the result. */
2858 if ((flag_rounding_math
2859 || (REAL_MODE_FORMAT_COMPOSITE_P (mode)
2860 && !flag_unsafe_math_optimizations))
2861 && (inexact || !real_identical (&result, &value)))
2864 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
2868 /* We can fold some multi-word operations. */
2869 if (GET_MODE_CLASS (mode) == MODE_INT
2870 && width == HOST_BITS_PER_WIDE_INT * 2
2871 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
2872 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
2874 unsigned HOST_WIDE_INT l1, l2, lv, lt;
2875 HOST_WIDE_INT h1, h2, hv, ht;
2877 if (GET_CODE (op0) == CONST_DOUBLE)
2878 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
2880 l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
2882 if (GET_CODE (op1) == CONST_DOUBLE)
2883 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
2885 l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
2890 /* A - B == A + (-B). */
2891 neg_double (l2, h2, &lv, &hv);
2894 /* Fall through.... */
2897 add_double (l1, h1, l2, h2, &lv, &hv);
2901 mul_double (l1, h1, l2, h2, &lv, &hv);
2905 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
2906 &lv, &hv, <, &ht))
2911 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
2912 <, &ht, &lv, &hv))
2917 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
2918 &lv, &hv, <, &ht))
2923 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
2924 <, &ht, &lv, &hv))
2929 lv = l1 & l2, hv = h1 & h2;
2933 lv = l1 | l2, hv = h1 | h2;
2937 lv = l1 ^ l2, hv = h1 ^ h2;
2943 && ((unsigned HOST_WIDE_INT) l1
2944 < (unsigned HOST_WIDE_INT) l2)))
2953 && ((unsigned HOST_WIDE_INT) l1
2954 > (unsigned HOST_WIDE_INT) l2)))
2961 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
2963 && ((unsigned HOST_WIDE_INT) l1
2964 < (unsigned HOST_WIDE_INT) l2)))
2971 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
2973 && ((unsigned HOST_WIDE_INT) l1
2974 > (unsigned HOST_WIDE_INT) l2)))
2980 case LSHIFTRT: case ASHIFTRT:
2982 case ROTATE: case ROTATERT:
2983 if (SHIFT_COUNT_TRUNCATED)
2984 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
2986 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
2989 if (code == LSHIFTRT || code == ASHIFTRT)
2990 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
2992 else if (code == ASHIFT)
2993 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
2994 else if (code == ROTATE)
2995 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
2996 else /* code == ROTATERT */
2997 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3004 return immed_double_const (lv, hv, mode);
3007 if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT
3008 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
3010 /* Get the integer argument values in two forms:
3011 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3013 arg0 = INTVAL (op0);
3014 arg1 = INTVAL (op1);
3016 if (width < HOST_BITS_PER_WIDE_INT)
3018 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
3019 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
3022 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3023 arg0s |= ((HOST_WIDE_INT) (-1) << width);
3026 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3027 arg1s |= ((HOST_WIDE_INT) (-1) << width);
3035 /* Compute the value of the arithmetic. */
3040 val = arg0s + arg1s;
3044 val = arg0s - arg1s;
3048 val = arg0s * arg1s;
3053 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3056 val = arg0s / arg1s;
3061 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3064 val = arg0s % arg1s;
3069 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3072 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
3077 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3080 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
3098 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3099 the value is in range. We can't return any old value for
3100 out-of-range arguments because either the middle-end (via
3101 shift_truncation_mask) or the back-end might be relying on
3102 target-specific knowledge. Nor can we rely on
3103 shift_truncation_mask, since the shift might not be part of an
3104 ashlM3, lshrM3 or ashrM3 instruction. */
3105 if (SHIFT_COUNT_TRUNCATED)
3106 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
3107 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
3110 val = (code == ASHIFT
3111 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
3112 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
3114 /* Sign-extend the result for arithmetic right shifts. */
3115 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
3116 val |= ((HOST_WIDE_INT) -1) << (width - arg1);
3124 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
3125 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
3133 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
3134 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
3138 /* Do nothing here. */
3142 val = arg0s <= arg1s ? arg0s : arg1s;
3146 val = ((unsigned HOST_WIDE_INT) arg0
3147 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3151 val = arg0s > arg1s ? arg0s : arg1s;
3155 val = ((unsigned HOST_WIDE_INT) arg0
3156 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3164 /* ??? There are simplifications that can be done. */
3171 return gen_int_mode (val, mode);
3179 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3182 Rather than test for specific case, we do this by a brute-force method
3183 and do all possible simplifications until no more changes occur. Then
3184 we rebuild the operation. */
3186 struct simplify_plus_minus_op_data
3193 simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
3195 const struct simplify_plus_minus_op_data *d1 = p1;
3196 const struct simplify_plus_minus_op_data *d2 = p2;
3199 result = (commutative_operand_precedence (d2->op)
3200 - commutative_operand_precedence (d1->op));
3204 /* Group together equal REGs to do more simplification. */
3205 if (REG_P (d1->op) && REG_P (d2->op))
3206 return REGNO (d1->op) - REGNO (d2->op);
3212 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3215 struct simplify_plus_minus_op_data ops[8];
3217 int n_ops = 2, input_ops = 2;
3218 int changed, n_constants = 0, canonicalized = 0;
3221 memset (ops, 0, sizeof ops);
3223 /* Set up the two operands and then expand them until nothing has been
3224 changed. If we run out of room in our array, give up; this should
3225 almost never happen. */
3230 ops[1].neg = (code == MINUS);
3236 for (i = 0; i < n_ops; i++)
3238 rtx this_op = ops[i].op;
3239 int this_neg = ops[i].neg;
3240 enum rtx_code this_code = GET_CODE (this_op);
3249 ops[n_ops].op = XEXP (this_op, 1);
3250 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3253 ops[i].op = XEXP (this_op, 0);
3256 canonicalized |= this_neg;
3260 ops[i].op = XEXP (this_op, 0);
3261 ops[i].neg = ! this_neg;
3268 && GET_CODE (XEXP (this_op, 0)) == PLUS
3269 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
3270 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
3272 ops[i].op = XEXP (XEXP (this_op, 0), 0);
3273 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
3274 ops[n_ops].neg = this_neg;
3282 /* ~a -> (-a - 1) */
3285 ops[n_ops].op = constm1_rtx;
3286 ops[n_ops++].neg = this_neg;
3287 ops[i].op = XEXP (this_op, 0);
3288 ops[i].neg = !this_neg;
3298 ops[i].op = neg_const_int (mode, this_op);
3312 if (n_constants > 1)
3315 gcc_assert (n_ops >= 2);
3317 /* If we only have two operands, we can avoid the loops. */
3320 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
3323 /* Get the two operands. Be careful with the order, especially for
3324 the cases where code == MINUS. */
3325 if (ops[0].neg && ops[1].neg)
3327 lhs = gen_rtx_NEG (mode, ops[0].op);
3330 else if (ops[0].neg)
3341 return simplify_const_binary_operation (code, mode, lhs, rhs);
3344 /* Now simplify each pair of operands until nothing changes. */
3347 /* Insertion sort is good enough for an eight-element array. */
3348 for (i = 1; i < n_ops; i++)
3350 struct simplify_plus_minus_op_data save;
3352 if (simplify_plus_minus_op_data_cmp (&ops[j], &ops[i]) < 0)
3358 ops[j + 1] = ops[j];
3359 while (j-- && simplify_plus_minus_op_data_cmp (&ops[j], &save) > 0);
3363 /* This is only useful the first time through. */
3368 for (i = n_ops - 1; i > 0; i--)
3369 for (j = i - 1; j >= 0; j--)
3371 rtx lhs = ops[j].op, rhs = ops[i].op;
3372 int lneg = ops[j].neg, rneg = ops[i].neg;
3374 if (lhs != 0 && rhs != 0)
3376 enum rtx_code ncode = PLUS;
3382 tem = lhs, lhs = rhs, rhs = tem;
3384 else if (swap_commutative_operands_p (lhs, rhs))
3385 tem = lhs, lhs = rhs, rhs = tem;
3387 if ((GET_CODE (lhs) == CONST || GET_CODE (lhs) == CONST_INT)
3388 && (GET_CODE (rhs) == CONST || GET_CODE (rhs) == CONST_INT))
3390 rtx tem_lhs, tem_rhs;
3392 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
3393 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
3394 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
3396 if (tem && !CONSTANT_P (tem))
3397 tem = gen_rtx_CONST (GET_MODE (tem), tem);
3400 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
3402 /* Reject "simplifications" that just wrap the two
3403 arguments in a CONST. Failure to do so can result
3404 in infinite recursion with simplify_binary_operation
3405 when it calls us to simplify CONST operations. */
3407 && ! (GET_CODE (tem) == CONST
3408 && GET_CODE (XEXP (tem, 0)) == ncode
3409 && XEXP (XEXP (tem, 0), 0) == lhs
3410 && XEXP (XEXP (tem, 0), 1) == rhs))
3413 if (GET_CODE (tem) == NEG)
3414 tem = XEXP (tem, 0), lneg = !lneg;
3415 if (GET_CODE (tem) == CONST_INT && lneg)
3416 tem = neg_const_int (mode, tem), lneg = 0;
3420 ops[j].op = NULL_RTX;
3426 /* Pack all the operands to the lower-numbered entries. */
3427 for (i = 0, j = 0; j < n_ops; j++)
3437 /* Create (minus -C X) instead of (neg (const (plus X C))). */
3439 && GET_CODE (ops[1].op) == CONST_INT
3440 && CONSTANT_P (ops[0].op)
3442 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
3444 /* We suppressed creation of trivial CONST expressions in the
3445 combination loop to avoid recursion. Create one manually now.
3446 The combination loop should have ensured that there is exactly
3447 one CONST_INT, and the sort will have ensured that it is last
3448 in the array and that any other constant will be next-to-last. */
3451 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
3452 && CONSTANT_P (ops[n_ops - 2].op))
3454 rtx value = ops[n_ops - 1].op;
3455 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
3456 value = neg_const_int (mode, value);
3457 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
3461 /* Put a non-negated operand first, if possible. */
3463 for (i = 0; i < n_ops && ops[i].neg; i++)
3466 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
3475 /* Now make the result by performing the requested operations. */
3477 for (i = 1; i < n_ops; i++)
3478 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
3479 mode, result, ops[i].op);
3484 /* Check whether an operand is suitable for calling simplify_plus_minus. */
3486 plus_minus_operand_p (rtx x)
3488 return GET_CODE (x) == PLUS
3489 || GET_CODE (x) == MINUS
3490 || (GET_CODE (x) == CONST
3491 && GET_CODE (XEXP (x, 0)) == PLUS
3492 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
3493 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
3496 /* Like simplify_binary_operation except used for relational operators.
3497 MODE is the mode of the result. If MODE is VOIDmode, both operands must
3498 not also be VOIDmode.
3500 CMP_MODE specifies in which mode the comparison is done in, so it is
3501 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
3502 the operands or, if both are VOIDmode, the operands are compared in
3503 "infinite precision". */
3505 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
3506 enum machine_mode cmp_mode, rtx op0, rtx op1)
3508 rtx tem, trueop0, trueop1;
3510 if (cmp_mode == VOIDmode)
3511 cmp_mode = GET_MODE (op0);
3512 if (cmp_mode == VOIDmode)
3513 cmp_mode = GET_MODE (op1);
3515 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
3518 if (SCALAR_FLOAT_MODE_P (mode))
3520 if (tem == const0_rtx)
3521 return CONST0_RTX (mode);
3522 #ifdef FLOAT_STORE_FLAG_VALUE
3524 REAL_VALUE_TYPE val;
3525 val = FLOAT_STORE_FLAG_VALUE (mode);
3526 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
3532 if (VECTOR_MODE_P (mode))
3534 if (tem == const0_rtx)
3535 return CONST0_RTX (mode);
3536 #ifdef VECTOR_STORE_FLAG_VALUE
3541 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
3542 if (val == NULL_RTX)
3544 if (val == const1_rtx)
3545 return CONST1_RTX (mode);
3547 units = GET_MODE_NUNITS (mode);
3548 v = rtvec_alloc (units);
3549 for (i = 0; i < units; i++)
3550 RTVEC_ELT (v, i) = val;
3551 return gen_rtx_raw_CONST_VECTOR (mode, v);
3561 /* For the following tests, ensure const0_rtx is op1. */
3562 if (swap_commutative_operands_p (op0, op1)
3563 || (op0 == const0_rtx && op1 != const0_rtx))
3564 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
3566 /* If op0 is a compare, extract the comparison arguments from it. */
3567 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3568 return simplify_relational_operation (code, mode, VOIDmode,
3569 XEXP (op0, 0), XEXP (op0, 1));
3571 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
3575 trueop0 = avoid_constant_pool_reference (op0);
3576 trueop1 = avoid_constant_pool_reference (op1);
3577 return simplify_relational_operation_1 (code, mode, cmp_mode,
3581 /* This part of simplify_relational_operation is only used when CMP_MODE
3582 is not in class MODE_CC (i.e. it is a real comparison).
3584 MODE is the mode of the result, while CMP_MODE specifies in which
3585 mode the comparison is done in, so it is the mode of the operands. */
3588 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
3589 enum machine_mode cmp_mode, rtx op0, rtx op1)
3591 enum rtx_code op0code = GET_CODE (op0);
3593 if (GET_CODE (op1) == CONST_INT)
3595 if (INTVAL (op1) == 0 && COMPARISON_P (op0))
3597 /* If op0 is a comparison, extract the comparison arguments
3601 if (GET_MODE (op0) == mode)
3602 return simplify_rtx (op0);
3604 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
3605 XEXP (op0, 0), XEXP (op0, 1));
3607 else if (code == EQ)
3609 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
3610 if (new_code != UNKNOWN)
3611 return simplify_gen_relational (new_code, mode, VOIDmode,
3612 XEXP (op0, 0), XEXP (op0, 1));
3617 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
3618 if ((code == EQ || code == NE)
3619 && (op0code == PLUS || op0code == MINUS)
3621 && CONSTANT_P (XEXP (op0, 1))
3622 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
3624 rtx x = XEXP (op0, 0);
3625 rtx c = XEXP (op0, 1);
3627 c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
3629 return simplify_gen_relational (code, mode, cmp_mode, x, c);
3632 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
3633 the same as (zero_extract:SI FOO (const_int 1) BAR). */
3635 && op1 == const0_rtx
3636 && GET_MODE_CLASS (mode) == MODE_INT
3637 && cmp_mode != VOIDmode
3638 /* ??? Work-around BImode bugs in the ia64 backend. */
3640 && cmp_mode != BImode
3641 && nonzero_bits (op0, cmp_mode) == 1
3642 && STORE_FLAG_VALUE == 1)
3643 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
3644 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
3645 : lowpart_subreg (mode, op0, cmp_mode);
3647 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
3648 if ((code == EQ || code == NE)
3649 && op1 == const0_rtx
3651 return simplify_gen_relational (code, mode, cmp_mode,
3652 XEXP (op0, 0), XEXP (op0, 1));
3654 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
3655 if ((code == EQ || code == NE)
3657 && rtx_equal_p (XEXP (op0, 0), op1)
3658 && !side_effects_p (XEXP (op0, 0)))
3659 return simplify_gen_relational (code, mode, cmp_mode,
3660 XEXP (op0, 1), const0_rtx);
3662 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
3663 if ((code == EQ || code == NE)
3665 && rtx_equal_p (XEXP (op0, 1), op1)
3666 && !side_effects_p (XEXP (op0, 1)))
3667 return simplify_gen_relational (code, mode, cmp_mode,
3668 XEXP (op0, 0), const0_rtx);
3670 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
3671 if ((code == EQ || code == NE)
3673 && (GET_CODE (op1) == CONST_INT
3674 || GET_CODE (op1) == CONST_DOUBLE)
3675 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
3676 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE))
3677 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
3678 simplify_gen_binary (XOR, cmp_mode,
3679 XEXP (op0, 1), op1));
3684 /* Check if the given comparison (done in the given MODE) is actually a
3685 tautology or a contradiction.
3686 If no simplification is possible, this function returns zero.
3687 Otherwise, it returns either const_true_rtx or const0_rtx. */
3690 simplify_const_relational_operation (enum rtx_code code,
3691 enum machine_mode mode,
3694 int equal, op0lt, op0ltu, op1lt, op1ltu;
3699 gcc_assert (mode != VOIDmode
3700 || (GET_MODE (op0) == VOIDmode
3701 && GET_MODE (op1) == VOIDmode));
3703 /* If op0 is a compare, extract the comparison arguments from it. */
3704 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3706 op1 = XEXP (op0, 1);
3707 op0 = XEXP (op0, 0);
3709 if (GET_MODE (op0) != VOIDmode)
3710 mode = GET_MODE (op0);
3711 else if (GET_MODE (op1) != VOIDmode)
3712 mode = GET_MODE (op1);
3717 /* We can't simplify MODE_CC values since we don't know what the
3718 actual comparison is. */
3719 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
3722 /* Make sure the constant is second. */
3723 if (swap_commutative_operands_p (op0, op1))
3725 tem = op0, op0 = op1, op1 = tem;
3726 code = swap_condition (code);
3729 trueop0 = avoid_constant_pool_reference (op0);
3730 trueop1 = avoid_constant_pool_reference (op1);
3732 /* For integer comparisons of A and B maybe we can simplify A - B and can
3733 then simplify a comparison of that with zero. If A and B are both either
3734 a register or a CONST_INT, this can't help; testing for these cases will
3735 prevent infinite recursion here and speed things up.
3737 We can only do this for EQ and NE comparisons as otherwise we may
3738 lose or introduce overflow which we cannot disregard as undefined as
3739 we do not know the signedness of the operation on either the left or
3740 the right hand side of the comparison. */
3742 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
3743 && (code == EQ || code == NE)
3744 && ! ((REG_P (op0) || GET_CODE (trueop0) == CONST_INT)
3745 && (REG_P (op1) || GET_CODE (trueop1) == CONST_INT))
3746 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
3747 /* We cannot do this if tem is a nonzero address. */
3748 && ! nonzero_address_p (tem))
3749 return simplify_const_relational_operation (signed_condition (code),
3750 mode, tem, const0_rtx);
3752 if (! HONOR_NANS (mode) && code == ORDERED)
3753 return const_true_rtx;
3755 if (! HONOR_NANS (mode) && code == UNORDERED)
3758 /* For modes without NaNs, if the two operands are equal, we know the
3759 result except if they have side-effects. */
3760 if (! HONOR_NANS (GET_MODE (trueop0))
3761 && rtx_equal_p (trueop0, trueop1)
3762 && ! side_effects_p (trueop0))
3763 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
3765 /* If the operands are floating-point constants, see if we can fold
3767 else if (GET_CODE (trueop0) == CONST_DOUBLE
3768 && GET_CODE (trueop1) == CONST_DOUBLE
3769 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
3771 REAL_VALUE_TYPE d0, d1;
3773 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
3774 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
3776 /* Comparisons are unordered iff at least one of the values is NaN. */
3777 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
3787 return const_true_rtx;
3800 equal = REAL_VALUES_EQUAL (d0, d1);
3801 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
3802 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
3805 /* Otherwise, see if the operands are both integers. */
3806 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
3807 && (GET_CODE (trueop0) == CONST_DOUBLE
3808 || GET_CODE (trueop0) == CONST_INT)
3809 && (GET_CODE (trueop1) == CONST_DOUBLE
3810 || GET_CODE (trueop1) == CONST_INT))
3812 int width = GET_MODE_BITSIZE (mode);
3813 HOST_WIDE_INT l0s, h0s, l1s, h1s;
3814 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
3816 /* Get the two words comprising each integer constant. */
3817 if (GET_CODE (trueop0) == CONST_DOUBLE)
3819 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
3820 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
3824 l0u = l0s = INTVAL (trueop0);
3825 h0u = h0s = HWI_SIGN_EXTEND (l0s);
3828 if (GET_CODE (trueop1) == CONST_DOUBLE)
3830 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
3831 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
3835 l1u = l1s = INTVAL (trueop1);
3836 h1u = h1s = HWI_SIGN_EXTEND (l1s);
3839 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
3840 we have to sign or zero-extend the values. */
3841 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
3843 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
3844 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
3846 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3847 l0s |= ((HOST_WIDE_INT) (-1) << width);
3849 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3850 l1s |= ((HOST_WIDE_INT) (-1) << width);
3852 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
3853 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
3855 equal = (h0u == h1u && l0u == l1u);
3856 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
3857 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
3858 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
3859 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
3862 /* Otherwise, there are some code-specific tests we can make. */
3865 /* Optimize comparisons with upper and lower bounds. */
3866 if (SCALAR_INT_MODE_P (mode)
3867 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
3880 get_mode_bounds (mode, sign, mode, &mmin, &mmax);
3887 /* x >= min is always true. */
3888 if (rtx_equal_p (trueop1, mmin))
3889 tem = const_true_rtx;
3895 /* x <= max is always true. */
3896 if (rtx_equal_p (trueop1, mmax))
3897 tem = const_true_rtx;
3902 /* x > max is always false. */
3903 if (rtx_equal_p (trueop1, mmax))
3909 /* x < min is always false. */
3910 if (rtx_equal_p (trueop1, mmin))
3917 if (tem == const0_rtx
3918 || tem == const_true_rtx)
3925 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3930 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3931 return const_true_rtx;
3935 /* Optimize abs(x) < 0.0. */
3936 if (trueop1 == CONST0_RTX (mode)
3937 && !HONOR_SNANS (mode)
3938 && (!INTEGRAL_MODE_P (mode)
3939 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
3941 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3943 if (GET_CODE (tem) == ABS)
3945 if (INTEGRAL_MODE_P (mode)
3946 && (issue_strict_overflow_warning
3947 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
3948 warning (OPT_Wstrict_overflow,
3949 ("assuming signed overflow does not occur when "
3950 "assuming abs (x) < 0 is false"));
3957 /* Optimize abs(x) >= 0.0. */
3958 if (trueop1 == CONST0_RTX (mode)
3959 && !HONOR_NANS (mode)
3960 && (!INTEGRAL_MODE_P (mode)
3961 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
3963 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3965 if (GET_CODE (tem) == ABS)
3967 if (INTEGRAL_MODE_P (mode)
3968 && (issue_strict_overflow_warning
3969 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
3970 warning (OPT_Wstrict_overflow,
3971 ("assuming signed overflow does not occur when "
3972 "assuming abs (x) >= 0 is true"));
3973 return const_true_rtx;
3979 /* Optimize ! (abs(x) < 0.0). */
3980 if (trueop1 == CONST0_RTX (mode))
3982 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3984 if (GET_CODE (tem) == ABS)
3985 return const_true_rtx;
3996 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
4002 return equal ? const_true_rtx : const0_rtx;
4005 return ! equal ? const_true_rtx : const0_rtx;
4008 return op0lt ? const_true_rtx : const0_rtx;
4011 return op1lt ? const_true_rtx : const0_rtx;
4013 return op0ltu ? const_true_rtx : const0_rtx;
4015 return op1ltu ? const_true_rtx : const0_rtx;
4018 return equal || op0lt ? const_true_rtx : const0_rtx;
4021 return equal || op1lt ? const_true_rtx : const0_rtx;
4023 return equal || op0ltu ? const_true_rtx : const0_rtx;
4025 return equal || op1ltu ? const_true_rtx : const0_rtx;
4027 return const_true_rtx;
4035 /* Simplify CODE, an operation with result mode MODE and three operands,
4036 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4037 a constant. Return 0 if no simplifications is possible. */
4040 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
4041 enum machine_mode op0_mode, rtx op0, rtx op1,
4044 unsigned int width = GET_MODE_BITSIZE (mode);
4046 /* VOIDmode means "infinite" precision. */
4048 width = HOST_BITS_PER_WIDE_INT;
4054 if (GET_CODE (op0) == CONST_INT
4055 && GET_CODE (op1) == CONST_INT
4056 && GET_CODE (op2) == CONST_INT
4057 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
4058 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
4060 /* Extracting a bit-field from a constant */
4061 HOST_WIDE_INT val = INTVAL (op0);
4063 if (BITS_BIG_ENDIAN)
4064 val >>= (GET_MODE_BITSIZE (op0_mode)
4065 - INTVAL (op2) - INTVAL (op1));
4067 val >>= INTVAL (op2);
4069 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
4071 /* First zero-extend. */
4072 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
4073 /* If desired, propagate sign bit. */
4074 if (code == SIGN_EXTRACT
4075 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
4076 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
4079 /* Clear the bits that don't belong in our mode,
4080 unless they and our sign bit are all one.
4081 So we get either a reasonable negative value or a reasonable
4082 unsigned value for this mode. */
4083 if (width < HOST_BITS_PER_WIDE_INT
4084 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
4085 != ((HOST_WIDE_INT) (-1) << (width - 1))))
4086 val &= ((HOST_WIDE_INT) 1 << width) - 1;
4088 return gen_int_mode (val, mode);
4093 if (GET_CODE (op0) == CONST_INT)
4094 return op0 != const0_rtx ? op1 : op2;
4096 /* Convert c ? a : a into "a". */
4097 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
4100 /* Convert a != b ? a : b into "a". */
4101 if (GET_CODE (op0) == NE
4102 && ! side_effects_p (op0)
4103 && ! HONOR_NANS (mode)
4104 && ! HONOR_SIGNED_ZEROS (mode)
4105 && ((rtx_equal_p (XEXP (op0, 0), op1)
4106 && rtx_equal_p (XEXP (op0, 1), op2))
4107 || (rtx_equal_p (XEXP (op0, 0), op2)
4108 && rtx_equal_p (XEXP (op0, 1), op1))))
4111 /* Convert a == b ? a : b into "b". */
4112 if (GET_CODE (op0) == EQ
4113 && ! side_effects_p (op0)
4114 && ! HONOR_NANS (mode)
4115 && ! HONOR_SIGNED_ZEROS (mode)
4116 && ((rtx_equal_p (XEXP (op0, 0), op1)
4117 && rtx_equal_p (XEXP (op0, 1), op2))
4118 || (rtx_equal_p (XEXP (op0, 0), op2)
4119 && rtx_equal_p (XEXP (op0, 1), op1))))
4122 if (COMPARISON_P (op0) && ! side_effects_p (op0))
4124 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
4125 ? GET_MODE (XEXP (op0, 1))
4126 : GET_MODE (XEXP (op0, 0)));
4129 /* Look for happy constants in op1 and op2. */
4130 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
4132 HOST_WIDE_INT t = INTVAL (op1);
4133 HOST_WIDE_INT f = INTVAL (op2);
4135 if (t == STORE_FLAG_VALUE && f == 0)
4136 code = GET_CODE (op0);
4137 else if (t == 0 && f == STORE_FLAG_VALUE)
4140 tmp = reversed_comparison_code (op0, NULL_RTX);
4148 return simplify_gen_relational (code, mode, cmp_mode,
4149 XEXP (op0, 0), XEXP (op0, 1));
4152 if (cmp_mode == VOIDmode)
4153 cmp_mode = op0_mode;
4154 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
4155 cmp_mode, XEXP (op0, 0),
4158 /* See if any simplifications were possible. */
4161 if (GET_CODE (temp) == CONST_INT)
4162 return temp == const0_rtx ? op2 : op1;
4164 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
4170 gcc_assert (GET_MODE (op0) == mode);
4171 gcc_assert (GET_MODE (op1) == mode);
4172 gcc_assert (VECTOR_MODE_P (mode));
4173 op2 = avoid_constant_pool_reference (op2);
4174 if (GET_CODE (op2) == CONST_INT)
4176 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
4177 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
4178 int mask = (1 << n_elts) - 1;
4180 if (!(INTVAL (op2) & mask))
4182 if ((INTVAL (op2) & mask) == mask)
4185 op0 = avoid_constant_pool_reference (op0);
4186 op1 = avoid_constant_pool_reference (op1);
4187 if (GET_CODE (op0) == CONST_VECTOR
4188 && GET_CODE (op1) == CONST_VECTOR)
4190 rtvec v = rtvec_alloc (n_elts);
4193 for (i = 0; i < n_elts; i++)
4194 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
4195 ? CONST_VECTOR_ELT (op0, i)
4196 : CONST_VECTOR_ELT (op1, i));
4197 return gen_rtx_CONST_VECTOR (mode, v);
4209 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
4210 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
4212 Works by unpacking OP into a collection of 8-bit values
4213 represented as a little-endian array of 'unsigned char', selecting by BYTE,
4214 and then repacking them again for OUTERMODE. */
4217 simplify_immed_subreg (enum machine_mode outermode, rtx op,
4218 enum machine_mode innermode, unsigned int byte)
4220 /* We support up to 512-bit values (for V8DFmode). */
4224 value_mask = (1 << value_bit) - 1
4226 unsigned char value[max_bitsize / value_bit];
4235 rtvec result_v = NULL;
4236 enum mode_class outer_class;
4237 enum machine_mode outer_submode;
4239 /* Some ports misuse CCmode. */
4240 if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
4243 /* We have no way to represent a complex constant at the rtl level. */
4244 if (COMPLEX_MODE_P (outermode))
4247 /* Unpack the value. */
4249 if (GET_CODE (op) == CONST_VECTOR)
4251 num_elem = CONST_VECTOR_NUNITS (op);
4252 elems = &CONST_VECTOR_ELT (op, 0);
4253 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
4259 elem_bitsize = max_bitsize;
4261 /* If this asserts, it is too complicated; reducing value_bit may help. */
4262 gcc_assert (BITS_PER_UNIT % value_bit == 0);
4263 /* I don't know how to handle endianness of sub-units. */
4264 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
4266 for (elem = 0; elem < num_elem; elem++)
4269 rtx el = elems[elem];
4271 /* Vectors are kept in target memory order. (This is probably
4274 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4275 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4277 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4278 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4279 unsigned bytele = (subword_byte % UNITS_PER_WORD
4280 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4281 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
4284 switch (GET_CODE (el))
4288 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4290 *vp++ = INTVAL (el) >> i;
4291 /* CONST_INTs are always logically sign-extended. */
4292 for (; i < elem_bitsize; i += value_bit)
4293 *vp++ = INTVAL (el) < 0 ? -1 : 0;
4297 if (GET_MODE (el) == VOIDmode)
4299 /* If this triggers, someone should have generated a
4300 CONST_INT instead. */
4301 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
4303 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
4304 *vp++ = CONST_DOUBLE_LOW (el) >> i;
4305 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
4308 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
4311 /* It shouldn't matter what's done here, so fill it with
4313 for (; i < elem_bitsize; i += value_bit)
4318 long tmp[max_bitsize / 32];
4319 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
4321 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
4322 gcc_assert (bitsize <= elem_bitsize);
4323 gcc_assert (bitsize % value_bit == 0);
4325 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
4328 /* real_to_target produces its result in words affected by
4329 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4330 and use WORDS_BIG_ENDIAN instead; see the documentation
4331 of SUBREG in rtl.texi. */
4332 for (i = 0; i < bitsize; i += value_bit)
4335 if (WORDS_BIG_ENDIAN)
4336 ibase = bitsize - 1 - i;
4339 *vp++ = tmp[ibase / 32] >> i % 32;
4342 /* It shouldn't matter what's done here, so fill it with
4344 for (; i < elem_bitsize; i += value_bit)
4354 /* Now, pick the right byte to start with. */
4355 /* Renumber BYTE so that the least-significant byte is byte 0. A special
4356 case is paradoxical SUBREGs, which shouldn't be adjusted since they
4357 will already have offset 0. */
4358 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
4360 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
4362 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4363 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4364 byte = (subword_byte % UNITS_PER_WORD
4365 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4368 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
4369 so if it's become negative it will instead be very large.) */
4370 gcc_assert (byte < GET_MODE_SIZE (innermode));
4372 /* Convert from bytes to chunks of size value_bit. */
4373 value_start = byte * (BITS_PER_UNIT / value_bit);
4375 /* Re-pack the value. */
4377 if (VECTOR_MODE_P (outermode))
4379 num_elem = GET_MODE_NUNITS (outermode);
4380 result_v = rtvec_alloc (num_elem);
4381 elems = &RTVEC_ELT (result_v, 0);
4382 outer_submode = GET_MODE_INNER (outermode);
4388 outer_submode = outermode;
4391 outer_class = GET_MODE_CLASS (outer_submode);
4392 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
4394 gcc_assert (elem_bitsize % value_bit == 0);
4395 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
4397 for (elem = 0; elem < num_elem; elem++)
4401 /* Vectors are stored in target memory order. (This is probably
4404 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4405 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4407 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4408 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4409 unsigned bytele = (subword_byte % UNITS_PER_WORD
4410 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4411 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
4414 switch (outer_class)
4417 case MODE_PARTIAL_INT:
4419 unsigned HOST_WIDE_INT hi = 0, lo = 0;
4422 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4424 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
4425 for (; i < elem_bitsize; i += value_bit)
4426 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
4427 << (i - HOST_BITS_PER_WIDE_INT));
4429 /* immed_double_const doesn't call trunc_int_for_mode. I don't
4431 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
4432 elems[elem] = gen_int_mode (lo, outer_submode);
4433 else if (elem_bitsize <= 2 * HOST_BITS_PER_WIDE_INT)
4434 elems[elem] = immed_double_const (lo, hi, outer_submode);
4441 case MODE_DECIMAL_FLOAT:
4444 long tmp[max_bitsize / 32];
4446 /* real_from_target wants its input in words affected by
4447 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4448 and use WORDS_BIG_ENDIAN instead; see the documentation
4449 of SUBREG in rtl.texi. */
4450 for (i = 0; i < max_bitsize / 32; i++)
4452 for (i = 0; i < elem_bitsize; i += value_bit)
4455 if (WORDS_BIG_ENDIAN)
4456 ibase = elem_bitsize - 1 - i;
4459 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
4462 real_from_target (&r, tmp, outer_submode);
4463 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
4471 if (VECTOR_MODE_P (outermode))
4472 return gen_rtx_CONST_VECTOR (outermode, result_v);
4477 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
4478 Return 0 if no simplifications are possible. */
4480 simplify_subreg (enum machine_mode outermode, rtx op,
4481 enum machine_mode innermode, unsigned int byte)
4483 /* Little bit of sanity checking. */
4484 gcc_assert (innermode != VOIDmode);
4485 gcc_assert (outermode != VOIDmode);
4486 gcc_assert (innermode != BLKmode);
4487 gcc_assert (outermode != BLKmode);
4489 gcc_assert (GET_MODE (op) == innermode
4490 || GET_MODE (op) == VOIDmode);
4492 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
4493 gcc_assert (byte < GET_MODE_SIZE (innermode));
4495 if (outermode == innermode && !byte)
4498 if (GET_CODE (op) == CONST_INT
4499 || GET_CODE (op) == CONST_DOUBLE
4500 || GET_CODE (op) == CONST_VECTOR)
4501 return simplify_immed_subreg (outermode, op, innermode, byte);
4503 /* Changing mode twice with SUBREG => just change it once,
4504 or not at all if changing back op starting mode. */
4505 if (GET_CODE (op) == SUBREG)
4507 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
4508 int final_offset = byte + SUBREG_BYTE (op);
4511 if (outermode == innermostmode
4512 && byte == 0 && SUBREG_BYTE (op) == 0)
4513 return SUBREG_REG (op);
4515 /* The SUBREG_BYTE represents offset, as if the value were stored
4516 in memory. Irritating exception is paradoxical subreg, where
4517 we define SUBREG_BYTE to be 0. On big endian machines, this
4518 value should be negative. For a moment, undo this exception. */
4519 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
4521 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
4522 if (WORDS_BIG_ENDIAN)
4523 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4524 if (BYTES_BIG_ENDIAN)
4525 final_offset += difference % UNITS_PER_WORD;
4527 if (SUBREG_BYTE (op) == 0
4528 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
4530 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
4531 if (WORDS_BIG_ENDIAN)
4532 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4533 if (BYTES_BIG_ENDIAN)
4534 final_offset += difference % UNITS_PER_WORD;
4537 /* See whether resulting subreg will be paradoxical. */
4538 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
4540 /* In nonparadoxical subregs we can't handle negative offsets. */
4541 if (final_offset < 0)
4543 /* Bail out in case resulting subreg would be incorrect. */
4544 if (final_offset % GET_MODE_SIZE (outermode)
4545 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
4551 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
4553 /* In paradoxical subreg, see if we are still looking on lower part.
4554 If so, our SUBREG_BYTE will be 0. */
4555 if (WORDS_BIG_ENDIAN)
4556 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4557 if (BYTES_BIG_ENDIAN)
4558 offset += difference % UNITS_PER_WORD;
4559 if (offset == final_offset)
4565 /* Recurse for further possible simplifications. */
4566 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
4570 if (validate_subreg (outermode, innermostmode,
4571 SUBREG_REG (op), final_offset))
4572 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
4576 /* Merge implicit and explicit truncations. */
4578 if (GET_CODE (op) == TRUNCATE
4579 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (innermode)
4580 && subreg_lowpart_offset (outermode, innermode) == byte)
4581 return simplify_gen_unary (TRUNCATE, outermode, XEXP (op, 0),
4582 GET_MODE (XEXP (op, 0)));
4584 /* SUBREG of a hard register => just change the register number
4585 and/or mode. If the hard register is not valid in that mode,
4586 suppress this simplification. If the hard register is the stack,
4587 frame, or argument pointer, leave this as a SUBREG. */
4590 && REGNO (op) < FIRST_PSEUDO_REGISTER
4591 #ifdef CANNOT_CHANGE_MODE_CLASS
4592 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
4593 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
4594 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
4596 && ((reload_completed && !frame_pointer_needed)
4597 || (REGNO (op) != FRAME_POINTER_REGNUM
4598 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
4599 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
4602 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
4603 && REGNO (op) != ARG_POINTER_REGNUM
4605 && REGNO (op) != STACK_POINTER_REGNUM
4606 && subreg_offset_representable_p (REGNO (op), innermode,
4609 unsigned int regno = REGNO (op);
4610 unsigned int final_regno
4611 = regno + subreg_regno_offset (regno, innermode, byte, outermode);
4613 /* ??? We do allow it if the current REG is not valid for
4614 its mode. This is a kludge to work around how float/complex
4615 arguments are passed on 32-bit SPARC and should be fixed. */
4616 if (HARD_REGNO_MODE_OK (final_regno, outermode)
4617 || ! HARD_REGNO_MODE_OK (regno, innermode))
4620 int final_offset = byte;
4622 /* Adjust offset for paradoxical subregs. */
4624 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
4626 int difference = (GET_MODE_SIZE (innermode)
4627 - GET_MODE_SIZE (outermode));
4628 if (WORDS_BIG_ENDIAN)
4629 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4630 if (BYTES_BIG_ENDIAN)
4631 final_offset += difference % UNITS_PER_WORD;
4634 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
4636 /* Propagate original regno. We don't have any way to specify
4637 the offset inside original regno, so do so only for lowpart.
4638 The information is used only by alias analysis that can not
4639 grog partial register anyway. */
4641 if (subreg_lowpart_offset (outermode, innermode) == byte)
4642 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
4647 /* If we have a SUBREG of a register that we are replacing and we are
4648 replacing it with a MEM, make a new MEM and try replacing the
4649 SUBREG with it. Don't do this if the MEM has a mode-dependent address
4650 or if we would be widening it. */
4653 && ! mode_dependent_address_p (XEXP (op, 0))
4654 /* Allow splitting of volatile memory references in case we don't
4655 have instruction to move the whole thing. */
4656 && (! MEM_VOLATILE_P (op)
4657 || ! have_insn_for (SET, innermode))
4658 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
4659 return adjust_address_nv (op, outermode, byte);
4661 /* Handle complex values represented as CONCAT
4662 of real and imaginary part. */
4663 if (GET_CODE (op) == CONCAT)
4665 unsigned int inner_size, final_offset;
4668 inner_size = GET_MODE_UNIT_SIZE (innermode);
4669 part = byte < inner_size ? XEXP (op, 0) : XEXP (op, 1);
4670 final_offset = byte % inner_size;
4671 if (final_offset + GET_MODE_SIZE (outermode) > inner_size)
4674 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
4677 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
4678 return gen_rtx_SUBREG (outermode, part, final_offset);
4682 /* Optimize SUBREG truncations of zero and sign extended values. */
4683 if ((GET_CODE (op) == ZERO_EXTEND
4684 || GET_CODE (op) == SIGN_EXTEND)
4685 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
4687 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
4689 /* If we're requesting the lowpart of a zero or sign extension,
4690 there are three possibilities. If the outermode is the same
4691 as the origmode, we can omit both the extension and the subreg.
4692 If the outermode is not larger than the origmode, we can apply
4693 the truncation without the extension. Finally, if the outermode
4694 is larger than the origmode, but both are integer modes, we
4695 can just extend to the appropriate mode. */
4698 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
4699 if (outermode == origmode)
4700 return XEXP (op, 0);
4701 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
4702 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
4703 subreg_lowpart_offset (outermode,
4705 if (SCALAR_INT_MODE_P (outermode))
4706 return simplify_gen_unary (GET_CODE (op), outermode,
4707 XEXP (op, 0), origmode);
4710 /* A SUBREG resulting from a zero extension may fold to zero if
4711 it extracts higher bits that the ZERO_EXTEND's source bits. */
4712 if (GET_CODE (op) == ZERO_EXTEND
4713 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
4714 return CONST0_RTX (outermode);
4717 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
4718 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
4719 the outer subreg is effectively a truncation to the original mode. */
4720 if ((GET_CODE (op) == LSHIFTRT
4721 || GET_CODE (op) == ASHIFTRT)
4722 && SCALAR_INT_MODE_P (outermode)
4723 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
4724 to avoid the possibility that an outer LSHIFTRT shifts by more
4725 than the sign extension's sign_bit_copies and introduces zeros
4726 into the high bits of the result. */
4727 && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
4728 && GET_CODE (XEXP (op, 1)) == CONST_INT
4729 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
4730 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4731 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4732 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4733 return simplify_gen_binary (ASHIFTRT, outermode,
4734 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4736 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
4737 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
4738 the outer subreg is effectively a truncation to the original mode. */
4739 if ((GET_CODE (op) == LSHIFTRT
4740 || GET_CODE (op) == ASHIFTRT)
4741 && SCALAR_INT_MODE_P (outermode)
4742 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
4743 && GET_CODE (XEXP (op, 1)) == CONST_INT
4744 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
4745 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4746 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4747 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4748 return simplify_gen_binary (LSHIFTRT, outermode,
4749 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4751 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
4752 to (ashift:QI (x:QI) C), where C is a suitable small constant and
4753 the outer subreg is effectively a truncation to the original mode. */
4754 if (GET_CODE (op) == ASHIFT
4755 && SCALAR_INT_MODE_P (outermode)
4756 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
4757 && GET_CODE (XEXP (op, 1)) == CONST_INT
4758 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
4759 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
4760 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4761 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4762 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4763 return simplify_gen_binary (ASHIFT, outermode,
4764 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4769 /* Make a SUBREG operation or equivalent if it folds. */
4772 simplify_gen_subreg (enum machine_mode outermode, rtx op,
4773 enum machine_mode innermode, unsigned int byte)
4777 newx = simplify_subreg (outermode, op, innermode, byte);
4781 if (GET_CODE (op) == SUBREG
4782 || GET_CODE (op) == CONCAT
4783 || GET_MODE (op) == VOIDmode)
4786 if (validate_subreg (outermode, innermode, op, byte))
4787 return gen_rtx_SUBREG (outermode, op, byte);
4792 /* Simplify X, an rtx expression.
4794 Return the simplified expression or NULL if no simplifications
4797 This is the preferred entry point into the simplification routines;
4798 however, we still allow passes to call the more specific routines.
4800 Right now GCC has three (yes, three) major bodies of RTL simplification
4801 code that need to be unified.
4803 1. fold_rtx in cse.c. This code uses various CSE specific
4804 information to aid in RTL simplification.
4806 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
4807 it uses combine specific information to aid in RTL
4810 3. The routines in this file.
4813 Long term we want to only have one body of simplification code; to
4814 get to that state I recommend the following steps:
4816 1. Pour over fold_rtx & simplify_rtx and move any simplifications
4817 which are not pass dependent state into these routines.
4819 2. As code is moved by #1, change fold_rtx & simplify_rtx to
4820 use this routine whenever possible.
4822 3. Allow for pass dependent state to be provided to these
4823 routines and add simplifications based on the pass dependent
4824 state. Remove code from cse.c & combine.c that becomes
4827 It will take time, but ultimately the compiler will be easier to
4828 maintain and improve. It's totally silly that when we add a
4829 simplification that it needs to be added to 4 places (3 for RTL
4830 simplification and 1 for tree simplification. */
4833 simplify_rtx (rtx x)
4835 enum rtx_code code = GET_CODE (x);
4836 enum machine_mode mode = GET_MODE (x);
4838 switch (GET_RTX_CLASS (code))
4841 return simplify_unary_operation (code, mode,
4842 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
4843 case RTX_COMM_ARITH:
4844 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
4845 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
4847 /* Fall through.... */
4850 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
4853 case RTX_BITFIELD_OPS:
4854 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
4855 XEXP (x, 0), XEXP (x, 1),
4859 case RTX_COMM_COMPARE:
4860 return simplify_relational_operation (code, mode,
4861 ((GET_MODE (XEXP (x, 0))
4863 ? GET_MODE (XEXP (x, 0))
4864 : GET_MODE (XEXP (x, 1))),
4870 return simplify_gen_subreg (mode, SUBREG_REG (x),
4871 GET_MODE (SUBREG_REG (x)),
4878 /* Convert (lo_sum (high FOO) FOO) to FOO. */
4879 if (GET_CODE (XEXP (x, 0)) == HIGH
4880 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))