1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
29 #include "hard-reg-set.h"
32 #include "insn-config.h"
40 /* Simplification and canonicalization of RTL. */
42 /* Nonzero if X has the form (PLUS frame-pointer integer). We check for
43 virtual regs here because the simplify_*_operation routines are called
44 by integrate.c, which is called before virtual register instantiation.
46 ?!? FIXED_BASE_PLUS_P and NONZERO_BASE_PLUS_P need to move into
47 a header file so that their definitions can be shared with the
48 simplification routines in simplify-rtx.c. Until then, do not
49 change these macros without also changing the copy in simplify-rtx.c. */
51 #define FIXED_BASE_PLUS_P(X) \
52 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
53 || ((X) == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM])\
54 || (X) == virtual_stack_vars_rtx \
55 || (X) == virtual_incoming_args_rtx \
56 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
57 && (XEXP (X, 0) == frame_pointer_rtx \
58 || XEXP (X, 0) == hard_frame_pointer_rtx \
59 || ((X) == arg_pointer_rtx \
60 && fixed_regs[ARG_POINTER_REGNUM]) \
61 || XEXP (X, 0) == virtual_stack_vars_rtx \
62 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
63 || GET_CODE (X) == ADDRESSOF)
65 /* Similar, but also allows reference to the stack pointer.
67 This used to include FIXED_BASE_PLUS_P, however, we can't assume that
68 arg_pointer_rtx by itself is nonzero, because on at least one machine,
69 the i960, the arg pointer is zero when it is unused. */
71 #define NONZERO_BASE_PLUS_P(X) \
72 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
73 || (X) == virtual_stack_vars_rtx \
74 || (X) == virtual_incoming_args_rtx \
75 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
76 && (XEXP (X, 0) == frame_pointer_rtx \
77 || XEXP (X, 0) == hard_frame_pointer_rtx \
78 || ((X) == arg_pointer_rtx \
79 && fixed_regs[ARG_POINTER_REGNUM]) \
80 || XEXP (X, 0) == virtual_stack_vars_rtx \
81 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
82 || (X) == stack_pointer_rtx \
83 || (X) == virtual_stack_dynamic_rtx \
84 || (X) == virtual_outgoing_args_rtx \
85 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
86 && (XEXP (X, 0) == stack_pointer_rtx \
87 || XEXP (X, 0) == virtual_stack_dynamic_rtx \
88 || XEXP (X, 0) == virtual_outgoing_args_rtx)) \
89 || GET_CODE (X) == ADDRESSOF)
91 /* Much code operates on (low, high) pairs; the low value is an
92 unsigned wide int, the high value a signed wide int. We
93 occasionally need to sign extend from low to high as if low were a
95 #define HWI_SIGN_EXTEND(low) \
96 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
98 static rtx neg_const_int PARAMS ((enum machine_mode, rtx));
99 static int simplify_plus_minus_op_data_cmp PARAMS ((const void *,
101 static rtx simplify_plus_minus PARAMS ((enum rtx_code,
102 enum machine_mode, rtx,
104 static void check_fold_consts PARAMS ((PTR));
105 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
106 static void simplify_unary_real PARAMS ((PTR));
107 static void simplify_binary_real PARAMS ((PTR));
109 static void simplify_binary_is2orm1 PARAMS ((PTR));
112 /* Negate a CONST_INT rtx, truncating (because a conversion from a
113 maximally negative number can overflow). */
115 neg_const_int (mode, i)
116 enum machine_mode mode;
119 return GEN_INT (trunc_int_for_mode (- INTVAL (i), mode));
123 /* Make a binary operation by properly ordering the operands and
124 seeing if the expression folds. */
127 simplify_gen_binary (code, mode, op0, op1)
129 enum machine_mode mode;
134 /* Put complex operands first and constants second if commutative. */
135 if (GET_RTX_CLASS (code) == 'c'
136 && swap_commutative_operands_p (op0, op1))
137 tem = op0, op0 = op1, op1 = tem;
139 /* If this simplifies, do it. */
140 tem = simplify_binary_operation (code, mode, op0, op1);
144 /* Handle addition and subtraction specially. Otherwise, just form
147 if (code == PLUS || code == MINUS)
149 tem = simplify_plus_minus (code, mode, op0, op1, 1);
154 return gen_rtx_fmt_ee (code, mode, op0, op1);
157 /* If X is a MEM referencing the constant pool, return the real value.
158 Otherwise return X. */
160 avoid_constant_pool_reference (x)
164 enum machine_mode cmode;
166 if (GET_CODE (x) != MEM)
170 if (GET_CODE (addr) != SYMBOL_REF
171 || ! CONSTANT_POOL_ADDRESS_P (addr))
174 c = get_pool_constant (addr);
175 cmode = get_pool_mode (addr);
177 /* If we're accessing the constant in a different mode than it was
178 originally stored, attempt to fix that up via subreg simplifications.
179 If that fails we have no choice but to return the original memory. */
180 if (cmode != GET_MODE (x))
182 c = simplify_subreg (GET_MODE (x), c, cmode, 0);
189 /* Make a unary operation by first seeing if it folds and otherwise making
190 the specified operation. */
193 simplify_gen_unary (code, mode, op, op_mode)
195 enum machine_mode mode;
197 enum machine_mode op_mode;
201 /* If this simplifies, use it. */
202 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
205 return gen_rtx_fmt_e (code, mode, op);
208 /* Likewise for ternary operations. */
211 simplify_gen_ternary (code, mode, op0_mode, op0, op1, op2)
213 enum machine_mode mode, op0_mode;
218 /* If this simplifies, use it. */
219 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
223 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
226 /* Likewise, for relational operations.
227 CMP_MODE specifies mode comparison is done in.
231 simplify_gen_relational (code, mode, cmp_mode, op0, op1)
233 enum machine_mode mode;
234 enum machine_mode cmp_mode;
239 if ((tem = simplify_relational_operation (code, cmp_mode, op0, op1)) != 0)
242 /* Put complex operands first and constants second. */
243 if (swap_commutative_operands_p (op0, op1))
244 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
246 return gen_rtx_fmt_ee (code, mode, op0, op1);
249 /* Replace all occurrences of OLD in X with NEW and try to simplify the
250 resulting RTX. Return a new RTX which is as simplified as possible. */
253 simplify_replace_rtx (x, old, new)
258 enum rtx_code code = GET_CODE (x);
259 enum machine_mode mode = GET_MODE (x);
261 /* If X is OLD, return NEW. Otherwise, if this is an expression, try
262 to build a new expression substituting recursively. If we can't do
263 anything, return our input. */
268 switch (GET_RTX_CLASS (code))
272 enum machine_mode op_mode = GET_MODE (XEXP (x, 0));
273 rtx op = (XEXP (x, 0) == old
274 ? new : simplify_replace_rtx (XEXP (x, 0), old, new));
276 return simplify_gen_unary (code, mode, op, op_mode);
282 simplify_gen_binary (code, mode,
283 simplify_replace_rtx (XEXP (x, 0), old, new),
284 simplify_replace_rtx (XEXP (x, 1), old, new));
287 enum machine_mode op_mode = (GET_MODE (XEXP (x, 0)) != VOIDmode
288 ? GET_MODE (XEXP (x, 0))
289 : GET_MODE (XEXP (x, 1)));
290 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
291 rtx op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
294 simplify_gen_relational (code, mode,
297 : GET_MODE (op0) != VOIDmode
306 enum machine_mode op_mode = GET_MODE (XEXP (x, 0));
307 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
310 simplify_gen_ternary (code, mode,
315 simplify_replace_rtx (XEXP (x, 1), old, new),
316 simplify_replace_rtx (XEXP (x, 2), old, new));
320 /* The only case we try to handle is a SUBREG. */
324 exp = simplify_gen_subreg (GET_MODE (x),
325 simplify_replace_rtx (SUBREG_REG (x),
327 GET_MODE (SUBREG_REG (x)),
335 if (GET_CODE (x) == MEM)
337 replace_equiv_address_nv (x,
338 simplify_replace_rtx (XEXP (x, 0),
346 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
347 /* Subroutine of simplify_unary_operation, called via do_float_handler.
348 Handles simplification of unary ops on floating point values. */
349 struct simplify_unary_real_args
353 enum machine_mode mode;
357 #define REAL_VALUE_ABS(d_) \
358 (REAL_VALUE_NEGATIVE (d_) ? REAL_VALUE_NEGATE (d_) : (d_))
361 simplify_unary_real (p)
366 struct simplify_unary_real_args *args =
367 (struct simplify_unary_real_args *) p;
369 REAL_VALUE_FROM_CONST_DOUBLE (d, args->operand);
371 if (args->want_integer)
377 case FIX: i = REAL_VALUE_FIX (d); break;
378 case UNSIGNED_FIX: i = REAL_VALUE_UNSIGNED_FIX (d); break;
382 args->result = GEN_INT (trunc_int_for_mode (i, args->mode));
389 /* We don't attempt to optimize this. */
393 case ABS: d = REAL_VALUE_ABS (d); break;
394 case NEG: d = REAL_VALUE_NEGATE (d); break;
395 case FLOAT_TRUNCATE: d = real_value_truncate (args->mode, d); break;
396 case FLOAT_EXTEND: /* All this does is change the mode. */ break;
397 case FIX: d = REAL_VALUE_RNDZINT (d); break;
398 case UNSIGNED_FIX: d = REAL_VALUE_UNSIGNED_RNDZINT (d); break;
402 args->result = CONST_DOUBLE_FROM_REAL_VALUE (d, args->mode);
407 /* Try to simplify a unary operation CODE whose output mode is to be
408 MODE with input operand OP whose mode was originally OP_MODE.
409 Return zero if no simplification can be made. */
411 simplify_unary_operation (code, mode, op, op_mode)
413 enum machine_mode mode;
415 enum machine_mode op_mode;
417 unsigned int width = GET_MODE_BITSIZE (mode);
418 rtx trueop = avoid_constant_pool_reference (op);
420 /* The order of these tests is critical so that, for example, we don't
421 check the wrong mode (input vs. output) for a conversion operation,
422 such as FIX. At some point, this should be simplified. */
424 #if !defined(REAL_IS_NOT_DOUBLE) || defined(REAL_ARITHMETIC)
426 if (code == FLOAT && GET_MODE (trueop) == VOIDmode
427 && (GET_CODE (trueop) == CONST_DOUBLE || GET_CODE (trueop) == CONST_INT))
429 HOST_WIDE_INT hv, lv;
432 if (GET_CODE (trueop) == CONST_INT)
433 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
435 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
437 #ifdef REAL_ARITHMETIC
438 REAL_VALUE_FROM_INT (d, lv, hv, mode);
443 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
444 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
445 d += (double) (unsigned HOST_WIDE_INT) (~ lv);
451 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
452 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
453 d += (double) (unsigned HOST_WIDE_INT) lv;
455 #endif /* REAL_ARITHMETIC */
456 d = real_value_truncate (mode, d);
457 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
459 else if (code == UNSIGNED_FLOAT && GET_MODE (trueop) == VOIDmode
460 && (GET_CODE (trueop) == CONST_DOUBLE
461 || GET_CODE (trueop) == CONST_INT))
463 HOST_WIDE_INT hv, lv;
466 if (GET_CODE (trueop) == CONST_INT)
467 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
469 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
471 if (op_mode == VOIDmode)
473 /* We don't know how to interpret negative-looking numbers in
474 this case, so don't try to fold those. */
478 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
481 hv = 0, lv &= GET_MODE_MASK (op_mode);
483 #ifdef REAL_ARITHMETIC
484 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
487 d = (double) (unsigned HOST_WIDE_INT) hv;
488 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
489 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
490 d += (double) (unsigned HOST_WIDE_INT) lv;
491 #endif /* REAL_ARITHMETIC */
492 d = real_value_truncate (mode, d);
493 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
497 if (GET_CODE (trueop) == CONST_INT
498 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
500 HOST_WIDE_INT arg0 = INTVAL (trueop);
514 val = (arg0 >= 0 ? arg0 : - arg0);
518 /* Don't use ffs here. Instead, get low order bit and then its
519 number. If arg0 is zero, this will return 0, as desired. */
520 arg0 &= GET_MODE_MASK (mode);
521 val = exact_log2 (arg0 & (- arg0)) + 1;
529 /* When zero-extending a CONST_INT, we need to know its
531 if (op_mode == VOIDmode)
533 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
535 /* If we were really extending the mode,
536 we would have to distinguish between zero-extension
537 and sign-extension. */
538 if (width != GET_MODE_BITSIZE (op_mode))
542 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
543 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
549 if (op_mode == VOIDmode)
551 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
553 /* If we were really extending the mode,
554 we would have to distinguish between zero-extension
555 and sign-extension. */
556 if (width != GET_MODE_BITSIZE (op_mode))
560 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
563 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
565 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
566 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
583 val = trunc_int_for_mode (val, mode);
585 return GEN_INT (val);
588 /* We can do some operations on integer CONST_DOUBLEs. Also allow
589 for a DImode operation on a CONST_INT. */
590 else if (GET_MODE (trueop) == VOIDmode
591 && width <= HOST_BITS_PER_WIDE_INT * 2
592 && (GET_CODE (trueop) == CONST_DOUBLE
593 || GET_CODE (trueop) == CONST_INT))
595 unsigned HOST_WIDE_INT l1, lv;
596 HOST_WIDE_INT h1, hv;
598 if (GET_CODE (trueop) == CONST_DOUBLE)
599 l1 = CONST_DOUBLE_LOW (trueop), h1 = CONST_DOUBLE_HIGH (trueop);
601 l1 = INTVAL (trueop), h1 = HWI_SIGN_EXTEND (l1);
611 neg_double (l1, h1, &lv, &hv);
616 neg_double (l1, h1, &lv, &hv);
624 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & (-h1)) + 1;
626 lv = exact_log2 (l1 & (-l1)) + 1;
630 /* This is just a change-of-mode, so do nothing. */
635 if (op_mode == VOIDmode)
638 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
642 lv = l1 & GET_MODE_MASK (op_mode);
646 if (op_mode == VOIDmode
647 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
651 lv = l1 & GET_MODE_MASK (op_mode);
652 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
653 && (lv & ((HOST_WIDE_INT) 1
654 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
655 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
657 hv = HWI_SIGN_EXTEND (lv);
668 return immed_double_const (lv, hv, mode);
671 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
672 else if (GET_CODE (trueop) == CONST_DOUBLE
673 && GET_MODE_CLASS (mode) == MODE_FLOAT)
675 struct simplify_unary_real_args args;
676 args.operand = trueop;
679 args.want_integer = false;
681 if (do_float_handler (simplify_unary_real, (PTR) &args))
687 else if (GET_CODE (trueop) == CONST_DOUBLE
688 && GET_MODE_CLASS (GET_MODE (trueop)) == MODE_FLOAT
689 && GET_MODE_CLASS (mode) == MODE_INT
690 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
692 struct simplify_unary_real_args args;
693 args.operand = trueop;
696 args.want_integer = true;
698 if (do_float_handler (simplify_unary_real, (PTR) &args))
704 /* This was formerly used only for non-IEEE float.
705 eggert@twinsun.com says it is safe for IEEE also. */
708 enum rtx_code reversed;
709 /* There are some simplifications we can do even if the operands
714 /* (not (not X)) == X. */
715 if (GET_CODE (op) == NOT)
718 /* (not (eq X Y)) == (ne X Y), etc. */
719 if (mode == BImode && GET_RTX_CLASS (GET_CODE (op)) == '<'
720 && ((reversed = reversed_comparison_code (op, NULL_RTX))
722 return gen_rtx_fmt_ee (reversed,
723 op_mode, XEXP (op, 0), XEXP (op, 1));
727 /* (neg (neg X)) == X. */
728 if (GET_CODE (op) == NEG)
733 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
734 becomes just the MINUS if its mode is MODE. This allows
735 folding switch statements on machines using casesi (such as
737 if (GET_CODE (op) == TRUNCATE
738 && GET_MODE (XEXP (op, 0)) == mode
739 && GET_CODE (XEXP (op, 0)) == MINUS
740 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
741 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
744 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
745 if (! POINTERS_EXTEND_UNSIGNED
746 && mode == Pmode && GET_MODE (op) == ptr_mode
748 || (GET_CODE (op) == SUBREG
749 && GET_CODE (SUBREG_REG (op)) == REG
750 && REG_POINTER (SUBREG_REG (op))
751 && GET_MODE (SUBREG_REG (op)) == Pmode)))
752 return convert_memory_address (Pmode, op);
756 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
758 if (POINTERS_EXTEND_UNSIGNED > 0
759 && mode == Pmode && GET_MODE (op) == ptr_mode
761 || (GET_CODE (op) == SUBREG
762 && GET_CODE (SUBREG_REG (op)) == REG
763 && REG_POINTER (SUBREG_REG (op))
764 && GET_MODE (SUBREG_REG (op)) == Pmode)))
765 return convert_memory_address (Pmode, op);
777 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
778 /* Subroutine of simplify_binary_operation, called via do_float_handler.
779 Handles simplification of binary ops on floating point values. */
780 struct simplify_binary_real_args
782 rtx trueop0, trueop1;
785 enum machine_mode mode;
789 simplify_binary_real (p)
792 REAL_VALUE_TYPE f0, f1, value;
793 struct simplify_binary_real_args *args =
794 (struct simplify_binary_real_args *) p;
796 REAL_VALUE_FROM_CONST_DOUBLE (f0, args->trueop0);
797 REAL_VALUE_FROM_CONST_DOUBLE (f1, args->trueop1);
798 f0 = real_value_truncate (args->mode, f0);
799 f1 = real_value_truncate (args->mode, f1);
801 #ifdef REAL_ARITHMETIC
802 #ifndef REAL_INFINITY
803 if (args->code == DIV && REAL_VALUES_EQUAL (f1, dconst0))
809 REAL_ARITHMETIC (value, rtx_to_tree_code (args->code), f0, f1);
823 #ifndef REAL_INFINITY
830 value = MIN (f0, f1);
833 value = MAX (f0, f1);
840 value = real_value_truncate (args->mode, value);
841 args->result = CONST_DOUBLE_FROM_REAL_VALUE (value, args->mode);
845 /* Another subroutine called via do_float_handler. This one tests
846 the floating point value given against 2. and -1. */
847 struct simplify_binary_is2orm1_args
855 simplify_binary_is2orm1 (p)
859 struct simplify_binary_is2orm1_args *args =
860 (struct simplify_binary_is2orm1_args *) p;
862 REAL_VALUE_FROM_CONST_DOUBLE (d, args->value);
863 args->is_2 = REAL_VALUES_EQUAL (d, dconst2);
864 args->is_m1 = REAL_VALUES_EQUAL (d, dconstm1);
867 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
868 and OP1. Return 0 if no simplification is possible.
870 Don't use this for relational operations such as EQ or LT.
871 Use simplify_relational_operation instead. */
873 simplify_binary_operation (code, mode, op0, op1)
875 enum machine_mode mode;
878 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
880 unsigned int width = GET_MODE_BITSIZE (mode);
882 rtx trueop0 = avoid_constant_pool_reference (op0);
883 rtx trueop1 = avoid_constant_pool_reference (op1);
885 /* Relational operations don't work here. We must know the mode
886 of the operands in order to do the comparison correctly.
887 Assuming a full word can give incorrect results.
888 Consider comparing 128 with -128 in QImode. */
890 if (GET_RTX_CLASS (code) == '<')
893 /* Make sure the constant is second. */
894 if (GET_RTX_CLASS (code) == 'c'
895 && swap_commutative_operands_p (trueop0, trueop1))
897 tem = op0, op0 = op1, op1 = tem;
898 tem = trueop0, trueop0 = trueop1, trueop1 = tem;
901 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
902 if (GET_MODE_CLASS (mode) == MODE_FLOAT
903 && GET_CODE (trueop0) == CONST_DOUBLE
904 && GET_CODE (trueop1) == CONST_DOUBLE
905 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
907 struct simplify_binary_real_args args;
908 args.trueop0 = trueop0;
909 args.trueop1 = trueop1;
913 if (do_float_handler (simplify_binary_real, (PTR) &args))
917 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
919 /* We can fold some multi-word operations. */
920 if (GET_MODE_CLASS (mode) == MODE_INT
921 && width == HOST_BITS_PER_WIDE_INT * 2
922 && (GET_CODE (trueop0) == CONST_DOUBLE
923 || GET_CODE (trueop0) == CONST_INT)
924 && (GET_CODE (trueop1) == CONST_DOUBLE
925 || GET_CODE (trueop1) == CONST_INT))
927 unsigned HOST_WIDE_INT l1, l2, lv;
928 HOST_WIDE_INT h1, h2, hv;
930 if (GET_CODE (trueop0) == CONST_DOUBLE)
931 l1 = CONST_DOUBLE_LOW (trueop0), h1 = CONST_DOUBLE_HIGH (trueop0);
933 l1 = INTVAL (trueop0), h1 = HWI_SIGN_EXTEND (l1);
935 if (GET_CODE (trueop1) == CONST_DOUBLE)
936 l2 = CONST_DOUBLE_LOW (trueop1), h2 = CONST_DOUBLE_HIGH (trueop1);
938 l2 = INTVAL (trueop1), h2 = HWI_SIGN_EXTEND (l2);
943 /* A - B == A + (-B). */
944 neg_double (l2, h2, &lv, &hv);
947 /* .. fall through ... */
950 add_double (l1, h1, l2, h2, &lv, &hv);
954 mul_double (l1, h1, l2, h2, &lv, &hv);
957 case DIV: case MOD: case UDIV: case UMOD:
958 /* We'd need to include tree.h to do this and it doesn't seem worth
963 lv = l1 & l2, hv = h1 & h2;
967 lv = l1 | l2, hv = h1 | h2;
971 lv = l1 ^ l2, hv = h1 ^ h2;
977 && ((unsigned HOST_WIDE_INT) l1
978 < (unsigned HOST_WIDE_INT) l2)))
987 && ((unsigned HOST_WIDE_INT) l1
988 > (unsigned HOST_WIDE_INT) l2)))
995 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
997 && ((unsigned HOST_WIDE_INT) l1
998 < (unsigned HOST_WIDE_INT) l2)))
1005 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
1007 && ((unsigned HOST_WIDE_INT) l1
1008 > (unsigned HOST_WIDE_INT) l2)))
1014 case LSHIFTRT: case ASHIFTRT:
1016 case ROTATE: case ROTATERT:
1017 #ifdef SHIFT_COUNT_TRUNCATED
1018 if (SHIFT_COUNT_TRUNCATED)
1019 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
1022 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
1025 if (code == LSHIFTRT || code == ASHIFTRT)
1026 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
1028 else if (code == ASHIFT)
1029 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
1030 else if (code == ROTATE)
1031 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1032 else /* code == ROTATERT */
1033 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1040 return immed_double_const (lv, hv, mode);
1043 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
1044 || width > HOST_BITS_PER_WIDE_INT || width == 0)
1046 /* Even if we can't compute a constant result,
1047 there are some cases worth simplifying. */
1052 /* In IEEE floating point, x+0 is not the same as x. Similarly
1053 for the other optimizations below. */
1054 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
1055 && FLOAT_MODE_P (mode) && ! flag_unsafe_math_optimizations)
1058 if (trueop1 == CONST0_RTX (mode))
1061 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)) */
1062 if (GET_CODE (op0) == NEG)
1063 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1064 else if (GET_CODE (op1) == NEG)
1065 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1067 /* (~a) + 1 -> -a */
1068 if (INTEGRAL_MODE_P (mode)
1069 && GET_CODE (op0) == NOT
1070 && trueop1 == const1_rtx)
1071 return gen_rtx_NEG (mode, XEXP (op0, 0));
1073 /* Handle both-operands-constant cases. We can only add
1074 CONST_INTs to constants since the sum of relocatable symbols
1075 can't be handled by most assemblers. Don't add CONST_INT
1076 to CONST_INT since overflow won't be computed properly if wider
1077 than HOST_BITS_PER_WIDE_INT. */
1079 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1080 && GET_CODE (op1) == CONST_INT)
1081 return plus_constant (op0, INTVAL (op1));
1082 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1083 && GET_CODE (op0) == CONST_INT)
1084 return plus_constant (op1, INTVAL (op0));
1086 /* See if this is something like X * C - X or vice versa or
1087 if the multiplication is written as a shift. If so, we can
1088 distribute and make a new multiply, shift, or maybe just
1089 have X (if C is 2 in the example above). But don't make
1090 real multiply if we didn't have one before. */
1092 if (! FLOAT_MODE_P (mode))
1094 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1095 rtx lhs = op0, rhs = op1;
1098 if (GET_CODE (lhs) == NEG)
1099 coeff0 = -1, lhs = XEXP (lhs, 0);
1100 else if (GET_CODE (lhs) == MULT
1101 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1103 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1106 else if (GET_CODE (lhs) == ASHIFT
1107 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1108 && INTVAL (XEXP (lhs, 1)) >= 0
1109 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1111 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1112 lhs = XEXP (lhs, 0);
1115 if (GET_CODE (rhs) == NEG)
1116 coeff1 = -1, rhs = XEXP (rhs, 0);
1117 else if (GET_CODE (rhs) == MULT
1118 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1120 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1123 else if (GET_CODE (rhs) == ASHIFT
1124 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1125 && INTVAL (XEXP (rhs, 1)) >= 0
1126 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1128 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1129 rhs = XEXP (rhs, 0);
1132 if (rtx_equal_p (lhs, rhs))
1134 tem = simplify_gen_binary (MULT, mode, lhs,
1135 GEN_INT (coeff0 + coeff1));
1136 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1140 /* If one of the operands is a PLUS or a MINUS, see if we can
1141 simplify this by the associative law.
1142 Don't use the associative law for floating point.
1143 The inaccuracy makes it nonassociative,
1144 and subtle programs can break if operations are associated. */
1146 if (INTEGRAL_MODE_P (mode)
1147 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1148 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1149 || (GET_CODE (op0) == CONST
1150 && GET_CODE (XEXP (op0, 0)) == PLUS)
1151 || (GET_CODE (op1) == CONST
1152 && GET_CODE (XEXP (op1, 0)) == PLUS))
1153 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1159 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1160 using cc0, in which case we want to leave it as a COMPARE
1161 so we can distinguish it from a register-register-copy.
1163 In IEEE floating point, x-0 is not the same as x. */
1165 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1166 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1167 && trueop1 == CONST0_RTX (mode))
1171 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1172 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1173 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1174 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1176 rtx xop00 = XEXP (op0, 0);
1177 rtx xop10 = XEXP (op1, 0);
1180 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1182 if (GET_CODE (xop00) == REG && GET_CODE (xop10) == REG
1183 && GET_MODE (xop00) == GET_MODE (xop10)
1184 && REGNO (xop00) == REGNO (xop10)
1185 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1186 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1193 /* None of these optimizations can be done for IEEE
1195 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
1196 && FLOAT_MODE_P (mode) && ! flag_unsafe_math_optimizations)
1199 /* We can't assume x-x is 0 even with non-IEEE floating point,
1200 but since it is zero except in very strange circumstances, we
1201 will treat it as zero with -funsafe-math-optimizations. */
1202 if (rtx_equal_p (trueop0, trueop1)
1203 && ! side_effects_p (op0)
1204 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1205 return CONST0_RTX (mode);
1207 /* Change subtraction from zero into negation. */
1208 if (trueop0 == CONST0_RTX (mode))
1209 return gen_rtx_NEG (mode, op1);
1211 /* (-1 - a) is ~a. */
1212 if (trueop0 == constm1_rtx)
1213 return gen_rtx_NOT (mode, op1);
1215 /* Subtracting 0 has no effect. */
1216 if (trueop1 == CONST0_RTX (mode))
1219 /* See if this is something like X * C - X or vice versa or
1220 if the multiplication is written as a shift. If so, we can
1221 distribute and make a new multiply, shift, or maybe just
1222 have X (if C is 2 in the example above). But don't make
1223 real multiply if we didn't have one before. */
1225 if (! FLOAT_MODE_P (mode))
1227 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1228 rtx lhs = op0, rhs = op1;
1231 if (GET_CODE (lhs) == NEG)
1232 coeff0 = -1, lhs = XEXP (lhs, 0);
1233 else if (GET_CODE (lhs) == MULT
1234 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1236 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1239 else if (GET_CODE (lhs) == ASHIFT
1240 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1241 && INTVAL (XEXP (lhs, 1)) >= 0
1242 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1244 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1245 lhs = XEXP (lhs, 0);
1248 if (GET_CODE (rhs) == NEG)
1249 coeff1 = - 1, rhs = XEXP (rhs, 0);
1250 else if (GET_CODE (rhs) == MULT
1251 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1253 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1256 else if (GET_CODE (rhs) == ASHIFT
1257 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1258 && INTVAL (XEXP (rhs, 1)) >= 0
1259 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1261 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1262 rhs = XEXP (rhs, 0);
1265 if (rtx_equal_p (lhs, rhs))
1267 tem = simplify_gen_binary (MULT, mode, lhs,
1268 GEN_INT (coeff0 - coeff1));
1269 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1273 /* (a - (-b)) -> (a + b). */
1274 if (GET_CODE (op1) == NEG)
1275 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1277 /* If one of the operands is a PLUS or a MINUS, see if we can
1278 simplify this by the associative law.
1279 Don't use the associative law for floating point.
1280 The inaccuracy makes it nonassociative,
1281 and subtle programs can break if operations are associated. */
1283 if (INTEGRAL_MODE_P (mode)
1284 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1285 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1286 || (GET_CODE (op0) == CONST
1287 && GET_CODE (XEXP (op0, 0)) == PLUS)
1288 || (GET_CODE (op1) == CONST
1289 && GET_CODE (XEXP (op1, 0)) == PLUS))
1290 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1293 /* Don't let a relocatable value get a negative coeff. */
1294 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1295 return simplify_gen_binary (PLUS, mode,
1297 neg_const_int (mode, op1));
1299 /* (x - (x & y)) -> (x & ~y) */
1300 if (GET_CODE (op1) == AND)
1302 if (rtx_equal_p (op0, XEXP (op1, 0)))
1303 return simplify_gen_binary (AND, mode, op0,
1304 gen_rtx_NOT (mode, XEXP (op1, 1)));
1305 if (rtx_equal_p (op0, XEXP (op1, 1)))
1306 return simplify_gen_binary (AND, mode, op0,
1307 gen_rtx_NOT (mode, XEXP (op1, 0)));
1312 if (trueop1 == constm1_rtx)
1314 tem = simplify_unary_operation (NEG, mode, op0, mode);
1316 return tem ? tem : gen_rtx_NEG (mode, op0);
1319 /* In IEEE floating point, x*0 is not always 0. */
1320 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1321 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1322 && trueop1 == CONST0_RTX (mode)
1323 && ! side_effects_p (op0))
1326 /* In IEEE floating point, x*1 is not equivalent to x for nans.
1327 However, ANSI says we can drop signals,
1328 so we can do this anyway. */
1329 if (trueop1 == CONST1_RTX (mode))
1332 /* Convert multiply by constant power of two into shift unless
1333 we are still generating RTL. This test is a kludge. */
1334 if (GET_CODE (trueop1) == CONST_INT
1335 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1336 /* If the mode is larger than the host word size, and the
1337 uppermost bit is set, then this isn't a power of two due
1338 to implicit sign extension. */
1339 && (width <= HOST_BITS_PER_WIDE_INT
1340 || val != HOST_BITS_PER_WIDE_INT - 1)
1341 && ! rtx_equal_function_value_matters)
1342 return gen_rtx_ASHIFT (mode, op0, GEN_INT (val));
1344 if (GET_CODE (trueop1) == CONST_DOUBLE
1345 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT)
1347 struct simplify_binary_is2orm1_args args;
1349 args.value = trueop1;
1350 if (! do_float_handler (simplify_binary_is2orm1, (PTR) &args))
1353 /* x*2 is x+x and x*(-1) is -x */
1354 if (args.is_2 && GET_MODE (op0) == mode)
1355 return gen_rtx_PLUS (mode, op0, copy_rtx (op0));
1357 else if (args.is_m1 && GET_MODE (op0) == mode)
1358 return gen_rtx_NEG (mode, op0);
1363 if (trueop1 == const0_rtx)
1365 if (GET_CODE (trueop1) == CONST_INT
1366 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1367 == GET_MODE_MASK (mode)))
1369 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1371 /* A | (~A) -> -1 */
1372 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1373 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1374 && ! side_effects_p (op0)
1375 && GET_MODE_CLASS (mode) != MODE_CC)
1380 if (trueop1 == const0_rtx)
1382 if (GET_CODE (trueop1) == CONST_INT
1383 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1384 == GET_MODE_MASK (mode)))
1385 return gen_rtx_NOT (mode, op0);
1386 if (trueop0 == trueop1 && ! side_effects_p (op0)
1387 && GET_MODE_CLASS (mode) != MODE_CC)
1392 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1394 if (GET_CODE (trueop1) == CONST_INT
1395 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1396 == GET_MODE_MASK (mode)))
1398 if (trueop0 == trueop1 && ! side_effects_p (op0)
1399 && GET_MODE_CLASS (mode) != MODE_CC)
1402 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1403 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1404 && ! side_effects_p (op0)
1405 && GET_MODE_CLASS (mode) != MODE_CC)
1410 /* Convert divide by power of two into shift (divide by 1 handled
1412 if (GET_CODE (trueop1) == CONST_INT
1413 && (arg1 = exact_log2 (INTVAL (trueop1))) > 0)
1414 return gen_rtx_LSHIFTRT (mode, op0, GEN_INT (arg1));
1416 /* ... fall through ... */
1419 if (trueop1 == CONST1_RTX (mode))
1421 /* On some platforms DIV uses narrower mode than its
1423 rtx x = gen_lowpart_common (mode, op0);
1426 else if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
1427 return gen_lowpart_SUBREG (mode, op0);
1432 /* In IEEE floating point, 0/x is not always 0. */
1433 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1434 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1435 && trueop0 == CONST0_RTX (mode)
1436 && ! side_effects_p (op1))
1439 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1440 /* Change division by a constant into multiplication. Only do
1441 this with -funsafe-math-optimizations. */
1442 else if (GET_CODE (trueop1) == CONST_DOUBLE
1443 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1444 && trueop1 != CONST0_RTX (mode)
1445 && flag_unsafe_math_optimizations)
1448 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1450 if (! REAL_VALUES_EQUAL (d, dconst0))
1452 #if defined (REAL_ARITHMETIC)
1453 REAL_ARITHMETIC (d, rtx_to_tree_code (DIV), dconst1, d);
1454 return gen_rtx_MULT (mode, op0,
1455 CONST_DOUBLE_FROM_REAL_VALUE (d, mode));
1458 gen_rtx_MULT (mode, op0,
1459 CONST_DOUBLE_FROM_REAL_VALUE (1./d, mode));
1467 /* Handle modulus by power of two (mod with 1 handled below). */
1468 if (GET_CODE (trueop1) == CONST_INT
1469 && exact_log2 (INTVAL (trueop1)) > 0)
1470 return gen_rtx_AND (mode, op0, GEN_INT (INTVAL (op1) - 1));
1472 /* ... fall through ... */
1475 if ((trueop0 == const0_rtx || trueop1 == const1_rtx)
1476 && ! side_effects_p (op0) && ! side_effects_p (op1))
1482 /* Rotating ~0 always results in ~0. */
1483 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
1484 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
1485 && ! side_effects_p (op1))
1488 /* ... fall through ... */
1493 if (trueop1 == const0_rtx)
1495 if (trueop0 == const0_rtx && ! side_effects_p (op1))
1500 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (trueop1) == CONST_INT
1501 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
1502 && ! side_effects_p (op0))
1504 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1509 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (trueop1) == CONST_INT
1510 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
1511 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
1512 && ! side_effects_p (op0))
1514 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1519 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1521 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1526 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
1528 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1536 /* ??? There are simplifications that can be done. */
1546 /* Get the integer argument values in two forms:
1547 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
1549 arg0 = INTVAL (trueop0);
1550 arg1 = INTVAL (trueop1);
1552 if (width < HOST_BITS_PER_WIDE_INT)
1554 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
1555 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
1558 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
1559 arg0s |= ((HOST_WIDE_INT) (-1) << width);
1562 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
1563 arg1s |= ((HOST_WIDE_INT) (-1) << width);
1571 /* Compute the value of the arithmetic. */
1576 val = arg0s + arg1s;
1580 val = arg0s - arg1s;
1584 val = arg0s * arg1s;
1589 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1592 val = arg0s / arg1s;
1597 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1600 val = arg0s % arg1s;
1605 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1608 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
1613 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1616 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
1632 /* If shift count is undefined, don't fold it; let the machine do
1633 what it wants. But truncate it if the machine will do that. */
1637 #ifdef SHIFT_COUNT_TRUNCATED
1638 if (SHIFT_COUNT_TRUNCATED)
1642 val = ((unsigned HOST_WIDE_INT) arg0) >> arg1;
1649 #ifdef SHIFT_COUNT_TRUNCATED
1650 if (SHIFT_COUNT_TRUNCATED)
1654 val = ((unsigned HOST_WIDE_INT) arg0) << arg1;
1661 #ifdef SHIFT_COUNT_TRUNCATED
1662 if (SHIFT_COUNT_TRUNCATED)
1666 val = arg0s >> arg1;
1668 /* Bootstrap compiler may not have sign extended the right shift.
1669 Manually extend the sign to insure bootstrap cc matches gcc. */
1670 if (arg0s < 0 && arg1 > 0)
1671 val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1);
1680 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
1681 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
1689 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
1690 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
1694 /* Do nothing here. */
1698 val = arg0s <= arg1s ? arg0s : arg1s;
1702 val = ((unsigned HOST_WIDE_INT) arg0
1703 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1707 val = arg0s > arg1s ? arg0s : arg1s;
1711 val = ((unsigned HOST_WIDE_INT) arg0
1712 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1719 val = trunc_int_for_mode (val, mode);
1721 return GEN_INT (val);
1724 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
1727 Rather than test for specific case, we do this by a brute-force method
1728 and do all possible simplifications until no more changes occur. Then
1729 we rebuild the operation.
1731 If FORCE is true, then always generate the rtx. This is used to
1732 canonicalize stuff emitted from simplify_gen_binary. Note that this
1733 can still fail if the rtx is too complex. It won't fail just because
1734 the result is not 'simpler' than the input, however. */
1736 struct simplify_plus_minus_op_data
1743 simplify_plus_minus_op_data_cmp (p1, p2)
1747 const struct simplify_plus_minus_op_data *d1 = p1;
1748 const struct simplify_plus_minus_op_data *d2 = p2;
1750 return (commutative_operand_precedence (d2->op)
1751 - commutative_operand_precedence (d1->op));
1755 simplify_plus_minus (code, mode, op0, op1, force)
1757 enum machine_mode mode;
1761 struct simplify_plus_minus_op_data ops[8];
1763 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts;
1764 int first, negate, changed;
1767 memset ((char *) ops, 0, sizeof ops);
1769 /* Set up the two operands and then expand them until nothing has been
1770 changed. If we run out of room in our array, give up; this should
1771 almost never happen. */
1776 ops[1].neg = (code == MINUS);
1782 for (i = 0; i < n_ops; i++)
1784 rtx this_op = ops[i].op;
1785 int this_neg = ops[i].neg;
1786 enum rtx_code this_code = GET_CODE (this_op);
1795 ops[n_ops].op = XEXP (this_op, 1);
1796 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
1799 ops[i].op = XEXP (this_op, 0);
1805 ops[i].op = XEXP (this_op, 0);
1806 ops[i].neg = ! this_neg;
1812 && GET_CODE (XEXP (this_op, 0)) == PLUS
1813 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
1814 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
1816 ops[i].op = XEXP (XEXP (this_op, 0), 0);
1817 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
1818 ops[n_ops].neg = this_neg;
1826 /* ~a -> (-a - 1) */
1829 ops[n_ops].op = constm1_rtx;
1830 ops[n_ops++].neg = this_neg;
1831 ops[i].op = XEXP (this_op, 0);
1832 ops[i].neg = !this_neg;
1840 ops[i].op = neg_const_int (mode, this_op);
1853 /* If we only have two operands, we can't do anything. */
1854 if (n_ops <= 2 && !force)
1857 /* Count the number of CONSTs we didn't split above. */
1858 for (i = 0; i < n_ops; i++)
1859 if (GET_CODE (ops[i].op) == CONST)
1862 /* Now simplify each pair of operands until nothing changes. The first
1863 time through just simplify constants against each other. */
1870 for (i = 0; i < n_ops - 1; i++)
1871 for (j = i + 1; j < n_ops; j++)
1873 rtx lhs = ops[i].op, rhs = ops[j].op;
1874 int lneg = ops[i].neg, rneg = ops[j].neg;
1876 if (lhs != 0 && rhs != 0
1877 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
1879 enum rtx_code ncode = PLUS;
1885 tem = lhs, lhs = rhs, rhs = tem;
1887 else if (swap_commutative_operands_p (lhs, rhs))
1888 tem = lhs, lhs = rhs, rhs = tem;
1890 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
1892 /* Reject "simplifications" that just wrap the two
1893 arguments in a CONST. Failure to do so can result
1894 in infinite recursion with simplify_binary_operation
1895 when it calls us to simplify CONST operations. */
1897 && ! (GET_CODE (tem) == CONST
1898 && GET_CODE (XEXP (tem, 0)) == ncode
1899 && XEXP (XEXP (tem, 0), 0) == lhs
1900 && XEXP (XEXP (tem, 0), 1) == rhs)
1901 /* Don't allow -x + -1 -> ~x simplifications in the
1902 first pass. This allows us the chance to combine
1903 the -1 with other constants. */
1905 && GET_CODE (tem) == NOT
1906 && XEXP (tem, 0) == rhs))
1909 if (GET_CODE (tem) == NEG)
1910 tem = XEXP (tem, 0), lneg = !lneg;
1911 if (GET_CODE (tem) == CONST_INT && lneg)
1912 tem = neg_const_int (mode, tem), lneg = 0;
1916 ops[j].op = NULL_RTX;
1926 /* Pack all the operands to the lower-numbered entries. */
1927 for (i = 0, j = 0; j < n_ops; j++)
1932 /* Sort the operations based on swap_commutative_operands_p. */
1933 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
1935 /* We suppressed creation of trivial CONST expressions in the
1936 combination loop to avoid recursion. Create one manually now.
1937 The combination loop should have ensured that there is exactly
1938 one CONST_INT, and the sort will have ensured that it is last
1939 in the array and that any other constant will be next-to-last. */
1942 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
1943 && CONSTANT_P (ops[n_ops - 2].op))
1945 rtx value = ops[n_ops - 1].op;
1946 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
1947 value = neg_const_int (mode, value);
1948 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
1952 /* Count the number of CONSTs that we generated. */
1954 for (i = 0; i < n_ops; i++)
1955 if (GET_CODE (ops[i].op) == CONST)
1958 /* Give up if we didn't reduce the number of operands we had. Make
1959 sure we count a CONST as two operands. If we have the same
1960 number of operands, but have made more CONSTs than before, this
1961 is also an improvement, so accept it. */
1963 && (n_ops + n_consts > input_ops
1964 || (n_ops + n_consts == input_ops && n_consts <= input_consts)))
1967 /* Put a non-negated operand first. If there aren't any, make all
1968 operands positive and negate the whole thing later. */
1971 for (i = 0; i < n_ops && ops[i].neg; i++)
1975 for (i = 0; i < n_ops; i++)
1987 /* Now make the result by performing the requested operations. */
1989 for (i = 1; i < n_ops; i++)
1990 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
1991 mode, result, ops[i].op);
1993 return negate ? gen_rtx_NEG (mode, result) : result;
1998 rtx op0, op1; /* Input */
1999 int equal, op0lt, op1lt; /* Output */
2004 check_fold_consts (data)
2007 struct cfc_args *args = (struct cfc_args *) data;
2008 REAL_VALUE_TYPE d0, d1;
2010 /* We may possibly raise an exception while reading the value. */
2011 args->unordered = 1;
2012 REAL_VALUE_FROM_CONST_DOUBLE (d0, args->op0);
2013 REAL_VALUE_FROM_CONST_DOUBLE (d1, args->op1);
2015 /* Comparisons of Inf versus Inf are ordered. */
2016 if (REAL_VALUE_ISNAN (d0)
2017 || REAL_VALUE_ISNAN (d1))
2019 args->equal = REAL_VALUES_EQUAL (d0, d1);
2020 args->op0lt = REAL_VALUES_LESS (d0, d1);
2021 args->op1lt = REAL_VALUES_LESS (d1, d0);
2022 args->unordered = 0;
2025 /* Like simplify_binary_operation except used for relational operators.
2026 MODE is the mode of the operands, not that of the result. If MODE
2027 is VOIDmode, both operands must also be VOIDmode and we compare the
2028 operands in "infinite precision".
2030 If no simplification is possible, this function returns zero. Otherwise,
2031 it returns either const_true_rtx or const0_rtx. */
2034 simplify_relational_operation (code, mode, op0, op1)
2036 enum machine_mode mode;
2039 int equal, op0lt, op0ltu, op1lt, op1ltu;
2044 if (mode == VOIDmode
2045 && (GET_MODE (op0) != VOIDmode
2046 || GET_MODE (op1) != VOIDmode))
2049 /* If op0 is a compare, extract the comparison arguments from it. */
2050 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2051 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
2053 trueop0 = avoid_constant_pool_reference (op0);
2054 trueop1 = avoid_constant_pool_reference (op1);
2056 /* We can't simplify MODE_CC values since we don't know what the
2057 actual comparison is. */
2058 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC
2065 /* Make sure the constant is second. */
2066 if (swap_commutative_operands_p (trueop0, trueop1))
2068 tem = op0, op0 = op1, op1 = tem;
2069 tem = trueop0, trueop0 = trueop1, trueop1 = tem;
2070 code = swap_condition (code);
2073 /* For integer comparisons of A and B maybe we can simplify A - B and can
2074 then simplify a comparison of that with zero. If A and B are both either
2075 a register or a CONST_INT, this can't help; testing for these cases will
2076 prevent infinite recursion here and speed things up.
2078 If CODE is an unsigned comparison, then we can never do this optimization,
2079 because it gives an incorrect result if the subtraction wraps around zero.
2080 ANSI C defines unsigned operations such that they never overflow, and
2081 thus such cases can not be ignored. */
2083 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
2084 && ! ((GET_CODE (op0) == REG || GET_CODE (trueop0) == CONST_INT)
2085 && (GET_CODE (op1) == REG || GET_CODE (trueop1) == CONST_INT))
2086 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
2087 && code != GTU && code != GEU && code != LTU && code != LEU)
2088 return simplify_relational_operation (signed_condition (code),
2089 mode, tem, const0_rtx);
2091 if (flag_unsafe_math_optimizations && code == ORDERED)
2092 return const_true_rtx;
2094 if (flag_unsafe_math_optimizations && code == UNORDERED)
2097 /* For non-IEEE floating-point, if the two operands are equal, we know the
2099 if (rtx_equal_p (trueop0, trueop1)
2100 && (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
2101 || ! FLOAT_MODE_P (GET_MODE (trueop0))
2102 || flag_unsafe_math_optimizations))
2103 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
2105 /* If the operands are floating-point constants, see if we can fold
2107 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
2108 else if (GET_CODE (trueop0) == CONST_DOUBLE
2109 && GET_CODE (trueop1) == CONST_DOUBLE
2110 && GET_MODE_CLASS (GET_MODE (trueop0)) == MODE_FLOAT)
2112 struct cfc_args args;
2114 /* Setup input for check_fold_consts() */
2119 if (!do_float_handler (check_fold_consts, (PTR) &args))
2132 return const_true_rtx;
2145 /* Receive output from check_fold_consts() */
2147 op0lt = op0ltu = args.op0lt;
2148 op1lt = op1ltu = args.op1lt;
2150 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
2152 /* Otherwise, see if the operands are both integers. */
2153 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
2154 && (GET_CODE (trueop0) == CONST_DOUBLE
2155 || GET_CODE (trueop0) == CONST_INT)
2156 && (GET_CODE (trueop1) == CONST_DOUBLE
2157 || GET_CODE (trueop1) == CONST_INT))
2159 int width = GET_MODE_BITSIZE (mode);
2160 HOST_WIDE_INT l0s, h0s, l1s, h1s;
2161 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
2163 /* Get the two words comprising each integer constant. */
2164 if (GET_CODE (trueop0) == CONST_DOUBLE)
2166 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
2167 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
2171 l0u = l0s = INTVAL (trueop0);
2172 h0u = h0s = HWI_SIGN_EXTEND (l0s);
2175 if (GET_CODE (trueop1) == CONST_DOUBLE)
2177 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
2178 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
2182 l1u = l1s = INTVAL (trueop1);
2183 h1u = h1s = HWI_SIGN_EXTEND (l1s);
2186 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
2187 we have to sign or zero-extend the values. */
2188 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
2190 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
2191 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
2193 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2194 l0s |= ((HOST_WIDE_INT) (-1) << width);
2196 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2197 l1s |= ((HOST_WIDE_INT) (-1) << width);
2199 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
2200 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
2202 equal = (h0u == h1u && l0u == l1u);
2203 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
2204 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
2205 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
2206 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
2209 /* Otherwise, there are some code-specific tests we can make. */
2215 /* References to the frame plus a constant or labels cannot
2216 be zero, but a SYMBOL_REF can due to #pragma weak. */
2217 if (((NONZERO_BASE_PLUS_P (op0) && trueop1 == const0_rtx)
2218 || GET_CODE (trueop0) == LABEL_REF)
2219 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2220 /* On some machines, the ap reg can be 0 sometimes. */
2221 && op0 != arg_pointer_rtx
2228 if (((NONZERO_BASE_PLUS_P (op0) && trueop1 == const0_rtx)
2229 || GET_CODE (trueop0) == LABEL_REF)
2230 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2231 && op0 != arg_pointer_rtx
2234 return const_true_rtx;
2238 /* Unsigned values are never negative. */
2239 if (trueop1 == const0_rtx)
2240 return const_true_rtx;
2244 if (trueop1 == const0_rtx)
2249 /* Unsigned values are never greater than the largest
2251 if (GET_CODE (trueop1) == CONST_INT
2252 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2253 && INTEGRAL_MODE_P (mode))
2254 return const_true_rtx;
2258 if (GET_CODE (trueop1) == CONST_INT
2259 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2260 && INTEGRAL_MODE_P (mode))
2271 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
2277 return equal ? const_true_rtx : const0_rtx;
2280 return ! equal ? const_true_rtx : const0_rtx;
2283 return op0lt ? const_true_rtx : const0_rtx;
2286 return op1lt ? const_true_rtx : const0_rtx;
2288 return op0ltu ? const_true_rtx : const0_rtx;
2290 return op1ltu ? const_true_rtx : const0_rtx;
2293 return equal || op0lt ? const_true_rtx : const0_rtx;
2296 return equal || op1lt ? const_true_rtx : const0_rtx;
2298 return equal || op0ltu ? const_true_rtx : const0_rtx;
2300 return equal || op1ltu ? const_true_rtx : const0_rtx;
2302 return const_true_rtx;
2310 /* Simplify CODE, an operation with result mode MODE and three operands,
2311 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
2312 a constant. Return 0 if no simplifications is possible. */
2315 simplify_ternary_operation (code, mode, op0_mode, op0, op1, op2)
2317 enum machine_mode mode, op0_mode;
2320 unsigned int width = GET_MODE_BITSIZE (mode);
2322 /* VOIDmode means "infinite" precision. */
2324 width = HOST_BITS_PER_WIDE_INT;
2330 if (GET_CODE (op0) == CONST_INT
2331 && GET_CODE (op1) == CONST_INT
2332 && GET_CODE (op2) == CONST_INT
2333 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
2334 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
2336 /* Extracting a bit-field from a constant */
2337 HOST_WIDE_INT val = INTVAL (op0);
2339 if (BITS_BIG_ENDIAN)
2340 val >>= (GET_MODE_BITSIZE (op0_mode)
2341 - INTVAL (op2) - INTVAL (op1));
2343 val >>= INTVAL (op2);
2345 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
2347 /* First zero-extend. */
2348 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
2349 /* If desired, propagate sign bit. */
2350 if (code == SIGN_EXTRACT
2351 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
2352 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
2355 /* Clear the bits that don't belong in our mode,
2356 unless they and our sign bit are all one.
2357 So we get either a reasonable negative value or a reasonable
2358 unsigned value for this mode. */
2359 if (width < HOST_BITS_PER_WIDE_INT
2360 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
2361 != ((HOST_WIDE_INT) (-1) << (width - 1))))
2362 val &= ((HOST_WIDE_INT) 1 << width) - 1;
2364 return GEN_INT (val);
2369 if (GET_CODE (op0) == CONST_INT)
2370 return op0 != const0_rtx ? op1 : op2;
2372 /* Convert a == b ? b : a to "a". */
2373 if (GET_CODE (op0) == NE && ! side_effects_p (op0)
2374 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
2375 && rtx_equal_p (XEXP (op0, 0), op1)
2376 && rtx_equal_p (XEXP (op0, 1), op2))
2378 else if (GET_CODE (op0) == EQ && ! side_effects_p (op0)
2379 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
2380 && rtx_equal_p (XEXP (op0, 1), op1)
2381 && rtx_equal_p (XEXP (op0, 0), op2))
2383 else if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && ! side_effects_p (op0))
2385 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
2386 ? GET_MODE (XEXP (op0, 1))
2387 : GET_MODE (XEXP (op0, 0)));
2389 if (cmp_mode == VOIDmode)
2390 cmp_mode = op0_mode;
2391 temp = simplify_relational_operation (GET_CODE (op0), cmp_mode,
2392 XEXP (op0, 0), XEXP (op0, 1));
2394 /* See if any simplifications were possible. */
2395 if (temp == const0_rtx)
2397 else if (temp == const1_rtx)
2402 /* Look for happy constants in op1 and op2. */
2403 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
2405 HOST_WIDE_INT t = INTVAL (op1);
2406 HOST_WIDE_INT f = INTVAL (op2);
2408 if (t == STORE_FLAG_VALUE && f == 0)
2409 code = GET_CODE (op0);
2410 else if (t == 0 && f == STORE_FLAG_VALUE)
2413 tmp = reversed_comparison_code (op0, NULL_RTX);
2421 return gen_rtx_fmt_ee (code, mode, XEXP (op0, 0), XEXP (op0, 1));
2433 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
2434 Return 0 if no simplifications is possible. */
2436 simplify_subreg (outermode, op, innermode, byte)
2439 enum machine_mode outermode, innermode;
2441 /* Little bit of sanity checking. */
2442 if (innermode == VOIDmode || outermode == VOIDmode
2443 || innermode == BLKmode || outermode == BLKmode)
2446 if (GET_MODE (op) != innermode
2447 && GET_MODE (op) != VOIDmode)
2450 if (byte % GET_MODE_SIZE (outermode)
2451 || byte >= GET_MODE_SIZE (innermode))
2454 if (outermode == innermode && !byte)
2457 /* Attempt to simplify constant to non-SUBREG expression. */
2458 if (CONSTANT_P (op))
2461 unsigned HOST_WIDE_INT val = 0;
2463 /* ??? This code is partly redundant with code below, but can handle
2464 the subregs of floats and similar corner cases.
2465 Later it we should move all simplification code here and rewrite
2466 GEN_LOWPART_IF_POSSIBLE, GEN_HIGHPART, OPERAND_SUBWORD and friends
2467 using SIMPLIFY_SUBREG. */
2468 if (subreg_lowpart_offset (outermode, innermode) == byte)
2470 rtx new = gen_lowpart_if_possible (outermode, op);
2475 /* Similar comment as above apply here. */
2476 if (GET_MODE_SIZE (outermode) == UNITS_PER_WORD
2477 && GET_MODE_SIZE (innermode) > UNITS_PER_WORD
2478 && GET_MODE_CLASS (outermode) == MODE_INT)
2480 rtx new = constant_subword (op,
2481 (byte / UNITS_PER_WORD),
2487 offset = byte * BITS_PER_UNIT;
2488 switch (GET_CODE (op))
2491 if (GET_MODE (op) != VOIDmode)
2494 /* We can't handle this case yet. */
2495 if (GET_MODE_BITSIZE (outermode) >= HOST_BITS_PER_WIDE_INT)
2498 part = offset >= HOST_BITS_PER_WIDE_INT;
2499 if ((BITS_PER_WORD > HOST_BITS_PER_WIDE_INT
2500 && BYTES_BIG_ENDIAN)
2501 || (BITS_PER_WORD <= HOST_BITS_PER_WIDE_INT
2502 && WORDS_BIG_ENDIAN))
2504 val = part ? CONST_DOUBLE_HIGH (op) : CONST_DOUBLE_LOW (op);
2505 offset %= HOST_BITS_PER_WIDE_INT;
2507 /* We've already picked the word we want from a double, so
2508 pretend this is actually an integer. */
2509 innermode = mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0);
2513 if (GET_CODE (op) == CONST_INT)
2516 /* We don't handle synthetizing of non-integral constants yet. */
2517 if (GET_MODE_CLASS (outermode) != MODE_INT)
2520 if (BYTES_BIG_ENDIAN || WORDS_BIG_ENDIAN)
2522 if (WORDS_BIG_ENDIAN)
2523 offset = (GET_MODE_BITSIZE (innermode)
2524 - GET_MODE_BITSIZE (outermode) - offset);
2525 if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN
2526 && GET_MODE_SIZE (outermode) < UNITS_PER_WORD)
2527 offset = (offset + BITS_PER_WORD - GET_MODE_BITSIZE (outermode)
2528 - 2 * (offset % BITS_PER_WORD));
2531 if (offset >= HOST_BITS_PER_WIDE_INT)
2532 return ((HOST_WIDE_INT) val < 0) ? constm1_rtx : const0_rtx;
2536 if (GET_MODE_BITSIZE (outermode) < HOST_BITS_PER_WIDE_INT)
2537 val = trunc_int_for_mode (val, outermode);
2538 return GEN_INT (val);
2545 /* Changing mode twice with SUBREG => just change it once,
2546 or not at all if changing back op starting mode. */
2547 if (GET_CODE (op) == SUBREG)
2549 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
2550 int final_offset = byte + SUBREG_BYTE (op);
2553 if (outermode == innermostmode
2554 && byte == 0 && SUBREG_BYTE (op) == 0)
2555 return SUBREG_REG (op);
2557 /* The SUBREG_BYTE represents offset, as if the value were stored
2558 in memory. Irritating exception is paradoxical subreg, where
2559 we define SUBREG_BYTE to be 0. On big endian machines, this
2560 value should be negative. For a moment, undo this exception. */
2561 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
2563 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
2564 if (WORDS_BIG_ENDIAN)
2565 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2566 if (BYTES_BIG_ENDIAN)
2567 final_offset += difference % UNITS_PER_WORD;
2569 if (SUBREG_BYTE (op) == 0
2570 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
2572 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
2573 if (WORDS_BIG_ENDIAN)
2574 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2575 if (BYTES_BIG_ENDIAN)
2576 final_offset += difference % UNITS_PER_WORD;
2579 /* See whether resulting subreg will be paradoxical. */
2580 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
2582 /* In nonparadoxical subregs we can't handle negative offsets. */
2583 if (final_offset < 0)
2585 /* Bail out in case resulting subreg would be incorrect. */
2586 if (final_offset % GET_MODE_SIZE (outermode)
2587 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
2593 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
2595 /* In paradoxical subreg, see if we are still looking on lower part.
2596 If so, our SUBREG_BYTE will be 0. */
2597 if (WORDS_BIG_ENDIAN)
2598 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2599 if (BYTES_BIG_ENDIAN)
2600 offset += difference % UNITS_PER_WORD;
2601 if (offset == final_offset)
2607 /* Recurse for futher possible simplifications. */
2608 new = simplify_subreg (outermode, SUBREG_REG (op),
2609 GET_MODE (SUBREG_REG (op)),
2613 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
2616 /* SUBREG of a hard register => just change the register number
2617 and/or mode. If the hard register is not valid in that mode,
2618 suppress this simplification. If the hard register is the stack,
2619 frame, or argument pointer, leave this as a SUBREG. */
2622 && (! REG_FUNCTION_VALUE_P (op)
2623 || ! rtx_equal_function_value_matters)
2624 #ifdef CLASS_CANNOT_CHANGE_MODE
2625 && ! (CLASS_CANNOT_CHANGE_MODE_P (outermode, innermode)
2626 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
2627 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT
2628 && (TEST_HARD_REG_BIT
2629 (reg_class_contents[(int) CLASS_CANNOT_CHANGE_MODE],
2632 && REGNO (op) < FIRST_PSEUDO_REGISTER
2633 && ((reload_completed && !frame_pointer_needed)
2634 || (REGNO (op) != FRAME_POINTER_REGNUM
2635 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
2636 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
2639 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2640 && REGNO (op) != ARG_POINTER_REGNUM
2642 && REGNO (op) != STACK_POINTER_REGNUM)
2644 int final_regno = subreg_hard_regno (gen_rtx_SUBREG (outermode, op, byte),
2647 /* ??? We do allow it if the current REG is not valid for
2648 its mode. This is a kludge to work around how float/complex
2649 arguments are passed on 32-bit Sparc and should be fixed. */
2650 if (HARD_REGNO_MODE_OK (final_regno, outermode)
2651 || ! HARD_REGNO_MODE_OK (REGNO (op), innermode))
2653 rtx x = gen_rtx_REG (outermode, final_regno);
2655 /* Propagate original regno. We don't have any way to specify
2656 the offset inside orignal regno, so do so only for lowpart.
2657 The information is used only by alias analysis that can not
2658 grog partial register anyway. */
2660 if (subreg_lowpart_offset (outermode, innermode) == byte)
2661 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
2666 /* If we have a SUBREG of a register that we are replacing and we are
2667 replacing it with a MEM, make a new MEM and try replacing the
2668 SUBREG with it. Don't do this if the MEM has a mode-dependent address
2669 or if we would be widening it. */
2671 if (GET_CODE (op) == MEM
2672 && ! mode_dependent_address_p (XEXP (op, 0))
2673 /* Allow splitting of volatile memory references in case we don't
2674 have instruction to move the whole thing. */
2675 && (! MEM_VOLATILE_P (op)
2676 || ! have_insn_for (SET, innermode))
2677 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
2678 return adjust_address_nv (op, outermode, byte);
2680 /* Handle complex values represented as CONCAT
2681 of real and imaginary part. */
2682 if (GET_CODE (op) == CONCAT)
2684 int is_realpart = byte < GET_MODE_UNIT_SIZE (innermode);
2685 rtx part = is_realpart ? XEXP (op, 0) : XEXP (op, 1);
2686 unsigned int final_offset;
2689 final_offset = byte % (GET_MODE_UNIT_SIZE (innermode));
2690 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
2693 /* We can at least simplify it by referring directly to the relevant part. */
2694 return gen_rtx_SUBREG (outermode, part, final_offset);
2699 /* Make a SUBREG operation or equivalent if it folds. */
2702 simplify_gen_subreg (outermode, op, innermode, byte)
2705 enum machine_mode outermode, innermode;
2708 /* Little bit of sanity checking. */
2709 if (innermode == VOIDmode || outermode == VOIDmode
2710 || innermode == BLKmode || outermode == BLKmode)
2713 if (GET_MODE (op) != innermode
2714 && GET_MODE (op) != VOIDmode)
2717 if (byte % GET_MODE_SIZE (outermode)
2718 || byte >= GET_MODE_SIZE (innermode))
2721 if (GET_CODE (op) == QUEUED)
2724 new = simplify_subreg (outermode, op, innermode, byte);
2728 if (GET_CODE (op) == SUBREG || GET_MODE (op) == VOIDmode)
2731 return gen_rtx_SUBREG (outermode, op, byte);
2733 /* Simplify X, an rtx expression.
2735 Return the simplified expression or NULL if no simplifications
2738 This is the preferred entry point into the simplification routines;
2739 however, we still allow passes to call the more specific routines.
2741 Right now GCC has three (yes, three) major bodies of RTL simplficiation
2742 code that need to be unified.
2744 1. fold_rtx in cse.c. This code uses various CSE specific
2745 information to aid in RTL simplification.
2747 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
2748 it uses combine specific information to aid in RTL
2751 3. The routines in this file.
2754 Long term we want to only have one body of simplification code; to
2755 get to that state I recommend the following steps:
2757 1. Pour over fold_rtx & simplify_rtx and move any simplifications
2758 which are not pass dependent state into these routines.
2760 2. As code is moved by #1, change fold_rtx & simplify_rtx to
2761 use this routine whenever possible.
2763 3. Allow for pass dependent state to be provided to these
2764 routines and add simplifications based on the pass dependent
2765 state. Remove code from cse.c & combine.c that becomes
2768 It will take time, but ultimately the compiler will be easier to
2769 maintain and improve. It's totally silly that when we add a
2770 simplification that it needs to be added to 4 places (3 for RTL
2771 simplification and 1 for tree simplification. */
2777 enum rtx_code code = GET_CODE (x);
2778 enum machine_mode mode = GET_MODE (x);
2780 switch (GET_RTX_CLASS (code))
2783 return simplify_unary_operation (code, mode,
2784 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
2786 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
2791 XEXP (x, 0) = XEXP (x, 1);
2793 return simplify_binary_operation (code, mode,
2794 XEXP (x, 0), XEXP (x, 1));
2798 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
2802 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
2803 XEXP (x, 0), XEXP (x, 1),
2807 return simplify_relational_operation (code,
2808 ((GET_MODE (XEXP (x, 0))
2810 ? GET_MODE (XEXP (x, 0))
2811 : GET_MODE (XEXP (x, 1))),
2812 XEXP (x, 0), XEXP (x, 1));
2814 /* The only case we try to handle is a SUBREG. */
2816 return simplify_gen_subreg (mode, SUBREG_REG (x),
2817 GET_MODE (SUBREG_REG (x)),