1 /* Output routines for GCC for ARM.
2 Copyright (C) 1991, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
4 Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
5 and Martin Simmons (@harleqn.co.uk).
6 More major hacks by Richard Earnshaw (rearnsha@arm.com).
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it
11 under the terms of the GNU General Public License as published
12 by the Free Software Foundation; either version 2, or (at your
13 option) any later version.
15 GCC is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
17 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
18 License for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING. If not, write to
22 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
23 Boston, MA 02110-1301, USA. */
27 #include "coretypes.h"
33 #include "hard-reg-set.h"
35 #include "insn-config.h"
36 #include "conditions.h"
38 #include "insn-attr.h"
49 #include "integrate.h"
52 #include "target-def.h"
54 #include "langhooks.h"
56 /* Forward definitions of types. */
57 typedef struct minipool_node Mnode;
58 typedef struct minipool_fixup Mfix;
60 const struct attribute_spec arm_attribute_table[];
62 /* Forward function declarations. */
63 static arm_stack_offsets *arm_get_frame_offsets (void);
64 static void arm_add_gc_roots (void);
65 static int arm_gen_constant (enum rtx_code, enum machine_mode, rtx,
66 HOST_WIDE_INT, rtx, rtx, int, int);
67 static unsigned bit_count (unsigned long);
68 static int arm_address_register_rtx_p (rtx, int);
69 static int arm_legitimate_index_p (enum machine_mode, rtx, RTX_CODE, int);
70 static int thumb_base_register_rtx_p (rtx, enum machine_mode, int);
71 inline static int thumb_index_register_rtx_p (rtx, int);
72 static int thumb_far_jump_used_p (void);
73 static bool thumb_force_lr_save (void);
74 static int const_ok_for_op (HOST_WIDE_INT, enum rtx_code);
75 static rtx emit_sfm (int, int);
76 static int arm_size_return_regs (void);
78 static bool arm_assemble_integer (rtx, unsigned int, int);
80 static const char *fp_const_from_val (REAL_VALUE_TYPE *);
81 static arm_cc get_arm_condition_code (rtx);
82 static HOST_WIDE_INT int_log2 (HOST_WIDE_INT);
83 static rtx is_jump_table (rtx);
84 static const char *output_multi_immediate (rtx *, const char *, const char *,
86 static const char *shift_op (rtx, HOST_WIDE_INT *);
87 static struct machine_function *arm_init_machine_status (void);
88 static void thumb_exit (FILE *, int);
89 static rtx is_jump_table (rtx);
90 static HOST_WIDE_INT get_jump_table_size (rtx);
91 static Mnode *move_minipool_fix_forward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
92 static Mnode *add_minipool_forward_ref (Mfix *);
93 static Mnode *move_minipool_fix_backward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
94 static Mnode *add_minipool_backward_ref (Mfix *);
95 static void assign_minipool_offsets (Mfix *);
96 static void arm_print_value (FILE *, rtx);
97 static void dump_minipool (rtx);
98 static int arm_barrier_cost (rtx);
99 static Mfix *create_fix_barrier (Mfix *, HOST_WIDE_INT);
100 static void push_minipool_barrier (rtx, HOST_WIDE_INT);
101 static void push_minipool_fix (rtx, HOST_WIDE_INT, rtx *, enum machine_mode,
103 static void arm_reorg (void);
104 static bool note_invalid_constants (rtx, HOST_WIDE_INT, int);
105 static int current_file_function_operand (rtx);
106 static unsigned long arm_compute_save_reg0_reg12_mask (void);
107 static unsigned long arm_compute_save_reg_mask (void);
108 static unsigned long arm_isr_value (tree);
109 static unsigned long arm_compute_func_type (void);
110 static tree arm_handle_fndecl_attribute (tree *, tree, tree, int, bool *);
111 static tree arm_handle_isr_attribute (tree *, tree, tree, int, bool *);
112 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
113 static tree arm_handle_notshared_attribute (tree *, tree, tree, int, bool *);
115 static void arm_output_function_epilogue (FILE *, HOST_WIDE_INT);
116 static void arm_output_function_prologue (FILE *, HOST_WIDE_INT);
117 static void thumb_output_function_prologue (FILE *, HOST_WIDE_INT);
118 static int arm_comp_type_attributes (tree, tree);
119 static void arm_set_default_type_attributes (tree);
120 static int arm_adjust_cost (rtx, rtx, rtx, int);
121 static int count_insns_for_constant (HOST_WIDE_INT, int);
122 static int arm_get_strip_length (int);
123 static bool arm_function_ok_for_sibcall (tree, tree);
124 static void arm_internal_label (FILE *, const char *, unsigned long);
125 static void arm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT, HOST_WIDE_INT,
127 static int arm_rtx_costs_1 (rtx, enum rtx_code, enum rtx_code);
128 static bool arm_size_rtx_costs (rtx, int, int, int *);
129 static bool arm_slowmul_rtx_costs (rtx, int, int, int *);
130 static bool arm_fastmul_rtx_costs (rtx, int, int, int *);
131 static bool arm_xscale_rtx_costs (rtx, int, int, int *);
132 static bool arm_9e_rtx_costs (rtx, int, int, int *);
133 static int arm_address_cost (rtx);
134 static bool arm_memory_load_p (rtx);
135 static bool arm_cirrus_insn_p (rtx);
136 static void cirrus_reorg (rtx);
137 static void arm_init_builtins (void);
138 static rtx arm_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
139 static void arm_init_iwmmxt_builtins (void);
140 static rtx safe_vector_operand (rtx, enum machine_mode);
141 static rtx arm_expand_binop_builtin (enum insn_code, tree, rtx);
142 static rtx arm_expand_unop_builtin (enum insn_code, tree, rtx, int);
143 static rtx arm_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
144 static void emit_constant_insn (rtx cond, rtx pattern);
145 static rtx emit_set_insn (rtx, rtx);
146 static int arm_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
149 #ifdef OBJECT_FORMAT_ELF
150 static void arm_elf_asm_constructor (rtx, int);
153 static void arm_encode_section_info (tree, rtx, int);
156 static void arm_file_end (void);
157 static void arm_file_start (void);
160 static void aof_globalize_label (FILE *, const char *);
161 static void aof_dump_imports (FILE *);
162 static void aof_dump_pic_table (FILE *);
163 static void aof_file_start (void);
164 static void aof_file_end (void);
165 static void aof_asm_init_sections (void);
167 static void arm_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
169 static bool arm_pass_by_reference (CUMULATIVE_ARGS *,
170 enum machine_mode, tree, bool);
171 static bool arm_promote_prototypes (tree);
172 static bool arm_default_short_enums (void);
173 static bool arm_align_anon_bitfield (void);
174 static bool arm_return_in_msb (tree);
175 static bool arm_must_pass_in_stack (enum machine_mode, tree);
176 #ifdef TARGET_UNWIND_INFO
177 static void arm_unwind_emit (FILE *, rtx);
178 static bool arm_output_ttype (rtx);
181 static tree arm_cxx_guard_type (void);
182 static bool arm_cxx_guard_mask_bit (void);
183 static tree arm_get_cookie_size (tree);
184 static bool arm_cookie_has_size (void);
185 static bool arm_cxx_cdtor_returns_this (void);
186 static bool arm_cxx_key_method_may_be_inline (void);
187 static void arm_cxx_determine_class_data_visibility (tree);
188 static bool arm_cxx_class_data_always_comdat (void);
189 static bool arm_cxx_use_aeabi_atexit (void);
190 static void arm_init_libfuncs (void);
191 static bool arm_handle_option (size_t, const char *, int);
192 static unsigned HOST_WIDE_INT arm_shift_truncation_mask (enum machine_mode);
193 static bool arm_cannot_copy_insn_p (rtx);
194 static bool arm_tls_symbol_p (rtx x);
197 /* Initialize the GCC target structure. */
198 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
199 #undef TARGET_MERGE_DECL_ATTRIBUTES
200 #define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
203 #undef TARGET_ATTRIBUTE_TABLE
204 #define TARGET_ATTRIBUTE_TABLE arm_attribute_table
206 #undef TARGET_ASM_FILE_START
207 #define TARGET_ASM_FILE_START arm_file_start
209 #undef TARGET_ASM_FILE_END
210 #define TARGET_ASM_FILE_END arm_file_end
213 #undef TARGET_ASM_BYTE_OP
214 #define TARGET_ASM_BYTE_OP "\tDCB\t"
215 #undef TARGET_ASM_ALIGNED_HI_OP
216 #define TARGET_ASM_ALIGNED_HI_OP "\tDCW\t"
217 #undef TARGET_ASM_ALIGNED_SI_OP
218 #define TARGET_ASM_ALIGNED_SI_OP "\tDCD\t"
219 #undef TARGET_ASM_GLOBALIZE_LABEL
220 #define TARGET_ASM_GLOBALIZE_LABEL aof_globalize_label
221 #undef TARGET_ASM_FILE_START
222 #define TARGET_ASM_FILE_START aof_file_start
223 #undef TARGET_ASM_FILE_END
224 #define TARGET_ASM_FILE_END aof_file_end
226 #undef TARGET_ASM_ALIGNED_SI_OP
227 #define TARGET_ASM_ALIGNED_SI_OP NULL
228 #undef TARGET_ASM_INTEGER
229 #define TARGET_ASM_INTEGER arm_assemble_integer
232 #undef TARGET_ASM_FUNCTION_PROLOGUE
233 #define TARGET_ASM_FUNCTION_PROLOGUE arm_output_function_prologue
235 #undef TARGET_ASM_FUNCTION_EPILOGUE
236 #define TARGET_ASM_FUNCTION_EPILOGUE arm_output_function_epilogue
238 #undef TARGET_DEFAULT_TARGET_FLAGS
239 #define TARGET_DEFAULT_TARGET_FLAGS (TARGET_DEFAULT | MASK_SCHED_PROLOG)
240 #undef TARGET_HANDLE_OPTION
241 #define TARGET_HANDLE_OPTION arm_handle_option
243 #undef TARGET_COMP_TYPE_ATTRIBUTES
244 #define TARGET_COMP_TYPE_ATTRIBUTES arm_comp_type_attributes
246 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
247 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES arm_set_default_type_attributes
249 #undef TARGET_SCHED_ADJUST_COST
250 #define TARGET_SCHED_ADJUST_COST arm_adjust_cost
252 #undef TARGET_ENCODE_SECTION_INFO
254 #define TARGET_ENCODE_SECTION_INFO arm_pe_encode_section_info
256 #define TARGET_ENCODE_SECTION_INFO arm_encode_section_info
259 #undef TARGET_STRIP_NAME_ENCODING
260 #define TARGET_STRIP_NAME_ENCODING arm_strip_name_encoding
262 #undef TARGET_ASM_INTERNAL_LABEL
263 #define TARGET_ASM_INTERNAL_LABEL arm_internal_label
265 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
266 #define TARGET_FUNCTION_OK_FOR_SIBCALL arm_function_ok_for_sibcall
268 #undef TARGET_ASM_OUTPUT_MI_THUNK
269 #define TARGET_ASM_OUTPUT_MI_THUNK arm_output_mi_thunk
270 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
271 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
273 /* This will be overridden in arm_override_options. */
274 #undef TARGET_RTX_COSTS
275 #define TARGET_RTX_COSTS arm_slowmul_rtx_costs
276 #undef TARGET_ADDRESS_COST
277 #define TARGET_ADDRESS_COST arm_address_cost
279 #undef TARGET_SHIFT_TRUNCATION_MASK
280 #define TARGET_SHIFT_TRUNCATION_MASK arm_shift_truncation_mask
281 #undef TARGET_VECTOR_MODE_SUPPORTED_P
282 #define TARGET_VECTOR_MODE_SUPPORTED_P arm_vector_mode_supported_p
284 #undef TARGET_MACHINE_DEPENDENT_REORG
285 #define TARGET_MACHINE_DEPENDENT_REORG arm_reorg
287 #undef TARGET_INIT_BUILTINS
288 #define TARGET_INIT_BUILTINS arm_init_builtins
289 #undef TARGET_EXPAND_BUILTIN
290 #define TARGET_EXPAND_BUILTIN arm_expand_builtin
292 #undef TARGET_INIT_LIBFUNCS
293 #define TARGET_INIT_LIBFUNCS arm_init_libfuncs
295 #undef TARGET_PROMOTE_FUNCTION_ARGS
296 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
297 #undef TARGET_PROMOTE_FUNCTION_RETURN
298 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
299 #undef TARGET_PROMOTE_PROTOTYPES
300 #define TARGET_PROMOTE_PROTOTYPES arm_promote_prototypes
301 #undef TARGET_PASS_BY_REFERENCE
302 #define TARGET_PASS_BY_REFERENCE arm_pass_by_reference
303 #undef TARGET_ARG_PARTIAL_BYTES
304 #define TARGET_ARG_PARTIAL_BYTES arm_arg_partial_bytes
306 #undef TARGET_SETUP_INCOMING_VARARGS
307 #define TARGET_SETUP_INCOMING_VARARGS arm_setup_incoming_varargs
309 #undef TARGET_DEFAULT_SHORT_ENUMS
310 #define TARGET_DEFAULT_SHORT_ENUMS arm_default_short_enums
312 #undef TARGET_ALIGN_ANON_BITFIELD
313 #define TARGET_ALIGN_ANON_BITFIELD arm_align_anon_bitfield
315 #undef TARGET_NARROW_VOLATILE_BITFIELD
316 #define TARGET_NARROW_VOLATILE_BITFIELD hook_bool_void_false
318 #undef TARGET_CXX_GUARD_TYPE
319 #define TARGET_CXX_GUARD_TYPE arm_cxx_guard_type
321 #undef TARGET_CXX_GUARD_MASK_BIT
322 #define TARGET_CXX_GUARD_MASK_BIT arm_cxx_guard_mask_bit
324 #undef TARGET_CXX_GET_COOKIE_SIZE
325 #define TARGET_CXX_GET_COOKIE_SIZE arm_get_cookie_size
327 #undef TARGET_CXX_COOKIE_HAS_SIZE
328 #define TARGET_CXX_COOKIE_HAS_SIZE arm_cookie_has_size
330 #undef TARGET_CXX_CDTOR_RETURNS_THIS
331 #define TARGET_CXX_CDTOR_RETURNS_THIS arm_cxx_cdtor_returns_this
333 #undef TARGET_CXX_KEY_METHOD_MAY_BE_INLINE
334 #define TARGET_CXX_KEY_METHOD_MAY_BE_INLINE arm_cxx_key_method_may_be_inline
336 #undef TARGET_CXX_USE_AEABI_ATEXIT
337 #define TARGET_CXX_USE_AEABI_ATEXIT arm_cxx_use_aeabi_atexit
339 #undef TARGET_CXX_DETERMINE_CLASS_DATA_VISIBILITY
340 #define TARGET_CXX_DETERMINE_CLASS_DATA_VISIBILITY \
341 arm_cxx_determine_class_data_visibility
343 #undef TARGET_CXX_CLASS_DATA_ALWAYS_COMDAT
344 #define TARGET_CXX_CLASS_DATA_ALWAYS_COMDAT arm_cxx_class_data_always_comdat
346 #undef TARGET_RETURN_IN_MSB
347 #define TARGET_RETURN_IN_MSB arm_return_in_msb
349 #undef TARGET_MUST_PASS_IN_STACK
350 #define TARGET_MUST_PASS_IN_STACK arm_must_pass_in_stack
352 #ifdef TARGET_UNWIND_INFO
353 #undef TARGET_UNWIND_EMIT
354 #define TARGET_UNWIND_EMIT arm_unwind_emit
356 /* EABI unwinding tables use a different format for the typeinfo tables. */
357 #undef TARGET_ASM_TTYPE
358 #define TARGET_ASM_TTYPE arm_output_ttype
360 #undef TARGET_ARM_EABI_UNWINDER
361 #define TARGET_ARM_EABI_UNWINDER true
362 #endif /* TARGET_UNWIND_INFO */
364 #undef TARGET_CANNOT_COPY_INSN_P
365 #define TARGET_CANNOT_COPY_INSN_P arm_cannot_copy_insn_p
368 #undef TARGET_HAVE_TLS
369 #define TARGET_HAVE_TLS true
372 #undef TARGET_CANNOT_FORCE_CONST_MEM
373 #define TARGET_CANNOT_FORCE_CONST_MEM arm_tls_referenced_p
375 struct gcc_target targetm = TARGET_INITIALIZER;
377 /* Obstack for minipool constant handling. */
378 static struct obstack minipool_obstack;
379 static char * minipool_startobj;
381 /* The maximum number of insns skipped which
382 will be conditionalised if possible. */
383 static int max_insns_skipped = 5;
385 extern FILE * asm_out_file;
387 /* True if we are currently building a constant table. */
388 int making_const_table;
390 /* Define the information needed to generate branch insns. This is
391 stored from the compare operation. */
392 rtx arm_compare_op0, arm_compare_op1;
394 /* The processor for which instructions should be scheduled. */
395 enum processor_type arm_tune = arm_none;
397 /* The default processor used if not overriden by commandline. */
398 static enum processor_type arm_default_cpu = arm_none;
400 /* Which floating point model to use. */
401 enum arm_fp_model arm_fp_model;
403 /* Which floating point hardware is available. */
404 enum fputype arm_fpu_arch;
406 /* Which floating point hardware to schedule for. */
407 enum fputype arm_fpu_tune;
409 /* Whether to use floating point hardware. */
410 enum float_abi_type arm_float_abi;
412 /* Which ABI to use. */
413 enum arm_abi_type arm_abi;
415 /* Which thread pointer model to use. */
416 enum arm_tp_type target_thread_pointer = TP_AUTO;
418 /* Used to parse -mstructure_size_boundary command line option. */
419 int arm_structure_size_boundary = DEFAULT_STRUCTURE_SIZE_BOUNDARY;
421 /* Used for Thumb call_via trampolines. */
422 rtx thumb_call_via_label[14];
423 static int thumb_call_reg_needed;
425 /* Bit values used to identify processor capabilities. */
426 #define FL_CO_PROC (1 << 0) /* Has external co-processor bus */
427 #define FL_ARCH3M (1 << 1) /* Extended multiply */
428 #define FL_MODE26 (1 << 2) /* 26-bit mode support */
429 #define FL_MODE32 (1 << 3) /* 32-bit mode support */
430 #define FL_ARCH4 (1 << 4) /* Architecture rel 4 */
431 #define FL_ARCH5 (1 << 5) /* Architecture rel 5 */
432 #define FL_THUMB (1 << 6) /* Thumb aware */
433 #define FL_LDSCHED (1 << 7) /* Load scheduling necessary */
434 #define FL_STRONG (1 << 8) /* StrongARM */
435 #define FL_ARCH5E (1 << 9) /* DSP extensions to v5 */
436 #define FL_XSCALE (1 << 10) /* XScale */
437 #define FL_CIRRUS (1 << 11) /* Cirrus/DSP. */
438 #define FL_ARCH6 (1 << 12) /* Architecture rel 6. Adds
439 media instructions. */
440 #define FL_VFPV2 (1 << 13) /* Vector Floating Point V2. */
441 #define FL_WBUF (1 << 14) /* Schedule for write buffer ops.
442 Note: ARM6 & 7 derivatives only. */
443 #define FL_ARCH6K (1 << 15) /* Architecture rel 6 K extensions. */
445 #define FL_IWMMXT (1 << 29) /* XScale v2 or "Intel Wireless MMX technology". */
447 #define FL_FOR_ARCH2 0
448 #define FL_FOR_ARCH3 FL_MODE32
449 #define FL_FOR_ARCH3M (FL_FOR_ARCH3 | FL_ARCH3M)
450 #define FL_FOR_ARCH4 (FL_FOR_ARCH3M | FL_ARCH4)
451 #define FL_FOR_ARCH4T (FL_FOR_ARCH4 | FL_THUMB)
452 #define FL_FOR_ARCH5 (FL_FOR_ARCH4 | FL_ARCH5)
453 #define FL_FOR_ARCH5T (FL_FOR_ARCH5 | FL_THUMB)
454 #define FL_FOR_ARCH5E (FL_FOR_ARCH5 | FL_ARCH5E)
455 #define FL_FOR_ARCH5TE (FL_FOR_ARCH5E | FL_THUMB)
456 #define FL_FOR_ARCH5TEJ FL_FOR_ARCH5TE
457 #define FL_FOR_ARCH6 (FL_FOR_ARCH5TE | FL_ARCH6)
458 #define FL_FOR_ARCH6J FL_FOR_ARCH6
459 #define FL_FOR_ARCH6K (FL_FOR_ARCH6 | FL_ARCH6K)
460 #define FL_FOR_ARCH6Z FL_FOR_ARCH6
461 #define FL_FOR_ARCH6ZK FL_FOR_ARCH6K
463 /* The bits in this mask specify which
464 instructions we are allowed to generate. */
465 static unsigned long insn_flags = 0;
467 /* The bits in this mask specify which instruction scheduling options should
469 static unsigned long tune_flags = 0;
471 /* The following are used in the arm.md file as equivalents to bits
472 in the above two flag variables. */
474 /* Nonzero if this chip supports the ARM Architecture 3M extensions. */
477 /* Nonzero if this chip supports the ARM Architecture 4 extensions. */
480 /* Nonzero if this chip supports the ARM Architecture 4t extensions. */
483 /* Nonzero if this chip supports the ARM Architecture 5 extensions. */
486 /* Nonzero if this chip supports the ARM Architecture 5E extensions. */
489 /* Nonzero if this chip supports the ARM Architecture 6 extensions. */
492 /* Nonzero if this chip supports the ARM 6K extensions. */
495 /* Nonzero if this chip can benefit from load scheduling. */
496 int arm_ld_sched = 0;
498 /* Nonzero if this chip is a StrongARM. */
499 int arm_tune_strongarm = 0;
501 /* Nonzero if this chip is a Cirrus variant. */
502 int arm_arch_cirrus = 0;
504 /* Nonzero if this chip supports Intel Wireless MMX technology. */
505 int arm_arch_iwmmxt = 0;
507 /* Nonzero if this chip is an XScale. */
508 int arm_arch_xscale = 0;
510 /* Nonzero if tuning for XScale */
511 int arm_tune_xscale = 0;
513 /* Nonzero if we want to tune for stores that access the write-buffer.
514 This typically means an ARM6 or ARM7 with MMU or MPU. */
515 int arm_tune_wbuf = 0;
517 /* Nonzero if generating Thumb instructions. */
520 /* Nonzero if we should define __THUMB_INTERWORK__ in the
522 XXX This is a bit of a hack, it's intended to help work around
523 problems in GLD which doesn't understand that armv5t code is
524 interworking clean. */
525 int arm_cpp_interwork = 0;
527 /* In case of a PRE_INC, POST_INC, PRE_DEC, POST_DEC memory reference, we
528 must report the mode of the memory reference from PRINT_OPERAND to
529 PRINT_OPERAND_ADDRESS. */
530 enum machine_mode output_memory_reference_mode;
532 /* The register number to be used for the PIC offset register. */
533 unsigned arm_pic_register = INVALID_REGNUM;
535 /* Set to 1 when a return insn is output, this means that the epilogue
537 int return_used_this_function;
539 /* Set to 1 after arm_reorg has started. Reset to start at the start of
540 the next function. */
541 static int after_arm_reorg = 0;
543 /* The maximum number of insns to be used when loading a constant. */
544 static int arm_constant_limit = 3;
546 /* For an explanation of these variables, see final_prescan_insn below. */
548 enum arm_cond_code arm_current_cc;
550 int arm_target_label;
552 /* The condition codes of the ARM, and the inverse function. */
553 static const char * const arm_condition_codes[] =
555 "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
556 "hi", "ls", "ge", "lt", "gt", "le", "al", "nv"
559 #define streq(string1, string2) (strcmp (string1, string2) == 0)
561 /* Initialization code. */
565 const char *const name;
566 enum processor_type core;
568 const unsigned long flags;
569 bool (* rtx_costs) (rtx, int, int, int *);
572 /* Not all of these give usefully different compilation alternatives,
573 but there is no simple way of generalizing them. */
574 static const struct processors all_cores[] =
577 #define ARM_CORE(NAME, IDENT, ARCH, FLAGS, COSTS) \
578 {NAME, arm_none, #ARCH, FLAGS | FL_FOR_ARCH##ARCH, arm_##COSTS##_rtx_costs},
579 #include "arm-cores.def"
581 {NULL, arm_none, NULL, 0, NULL}
584 static const struct processors all_architectures[] =
586 /* ARM Architectures */
587 /* We don't specify rtx_costs here as it will be figured out
590 {"armv2", arm2, "2", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH2, NULL},
591 {"armv2a", arm2, "2", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH2, NULL},
592 {"armv3", arm6, "3", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH3, NULL},
593 {"armv3m", arm7m, "3M", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH3M, NULL},
594 {"armv4", arm7tdmi, "4", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH4, NULL},
595 /* Strictly, FL_MODE26 is a permitted option for v4t, but there are no
596 implementations that support it, so we will leave it out for now. */
597 {"armv4t", arm7tdmi, "4T", FL_CO_PROC | FL_FOR_ARCH4T, NULL},
598 {"armv5", arm10tdmi, "5", FL_CO_PROC | FL_FOR_ARCH5, NULL},
599 {"armv5t", arm10tdmi, "5T", FL_CO_PROC | FL_FOR_ARCH5T, NULL},
600 {"armv5e", arm1026ejs, "5E", FL_CO_PROC | FL_FOR_ARCH5E, NULL},
601 {"armv5te", arm1026ejs, "5TE", FL_CO_PROC | FL_FOR_ARCH5TE, NULL},
602 {"armv6", arm1136js, "6", FL_CO_PROC | FL_FOR_ARCH6, NULL},
603 {"armv6j", arm1136js, "6J", FL_CO_PROC | FL_FOR_ARCH6J, NULL},
604 {"armv6k", mpcore, "6K", FL_CO_PROC | FL_FOR_ARCH6K, NULL},
605 {"armv6z", arm1176jzs, "6Z", FL_CO_PROC | FL_FOR_ARCH6Z, NULL},
606 {"armv6zk", arm1176jzs, "6ZK", FL_CO_PROC | FL_FOR_ARCH6ZK, NULL},
607 {"ep9312", ep9312, "4T", FL_LDSCHED | FL_CIRRUS | FL_FOR_ARCH4, NULL},
608 {"iwmmxt", iwmmxt, "5TE", FL_LDSCHED | FL_STRONG | FL_FOR_ARCH5TE | FL_XSCALE | FL_IWMMXT , NULL},
609 {NULL, arm_none, NULL, 0 , NULL}
612 struct arm_cpu_select
616 const struct processors * processors;
619 /* This is a magic structure. The 'string' field is magically filled in
620 with a pointer to the value specified by the user on the command line
621 assuming that the user has specified such a value. */
623 static struct arm_cpu_select arm_select[] =
625 /* string name processors */
626 { NULL, "-mcpu=", all_cores },
627 { NULL, "-march=", all_architectures },
628 { NULL, "-mtune=", all_cores }
631 /* Defines representing the indexes into the above table. */
632 #define ARM_OPT_SET_CPU 0
633 #define ARM_OPT_SET_ARCH 1
634 #define ARM_OPT_SET_TUNE 2
636 /* The name of the preprocessor macro to define for this architecture. */
638 char arm_arch_name[] = "__ARM_ARCH_0UNK__";
647 /* Available values for -mfpu=. */
649 static const struct fpu_desc all_fpus[] =
651 {"fpa", FPUTYPE_FPA},
652 {"fpe2", FPUTYPE_FPA_EMU2},
653 {"fpe3", FPUTYPE_FPA_EMU2},
654 {"maverick", FPUTYPE_MAVERICK},
659 /* Floating point models used by the different hardware.
660 See fputype in arm.h. */
662 static const enum fputype fp_model_for_fpu[] =
664 /* No FP hardware. */
665 ARM_FP_MODEL_UNKNOWN, /* FPUTYPE_NONE */
666 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA */
667 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA_EMU2 */
668 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA_EMU3 */
669 ARM_FP_MODEL_MAVERICK, /* FPUTYPE_MAVERICK */
670 ARM_FP_MODEL_VFP /* FPUTYPE_VFP */
677 enum float_abi_type abi_type;
681 /* Available values for -mfloat-abi=. */
683 static const struct float_abi all_float_abis[] =
685 {"soft", ARM_FLOAT_ABI_SOFT},
686 {"softfp", ARM_FLOAT_ABI_SOFTFP},
687 {"hard", ARM_FLOAT_ABI_HARD}
694 enum arm_abi_type abi_type;
698 /* Available values for -mabi=. */
700 static const struct abi_name arm_all_abis[] =
702 {"apcs-gnu", ARM_ABI_APCS},
703 {"atpcs", ARM_ABI_ATPCS},
704 {"aapcs", ARM_ABI_AAPCS},
705 {"iwmmxt", ARM_ABI_IWMMXT},
706 {"aapcs-linux", ARM_ABI_AAPCS_LINUX}
709 /* Supported TLS relocations. */
719 /* Emit an insn that's a simple single-set. Both the operands must be known
722 emit_set_insn (rtx x, rtx y)
724 return emit_insn (gen_rtx_SET (VOIDmode, x, y));
727 /* Return the number of bits set in VALUE. */
729 bit_count (unsigned long value)
731 unsigned long count = 0;
736 value &= value - 1; /* Clear the least-significant set bit. */
742 /* Set up library functions unique to ARM. */
745 arm_init_libfuncs (void)
747 /* There are no special library functions unless we are using the
752 /* The functions below are described in Section 4 of the "Run-Time
753 ABI for the ARM architecture", Version 1.0. */
755 /* Double-precision floating-point arithmetic. Table 2. */
756 set_optab_libfunc (add_optab, DFmode, "__aeabi_dadd");
757 set_optab_libfunc (sdiv_optab, DFmode, "__aeabi_ddiv");
758 set_optab_libfunc (smul_optab, DFmode, "__aeabi_dmul");
759 set_optab_libfunc (neg_optab, DFmode, "__aeabi_dneg");
760 set_optab_libfunc (sub_optab, DFmode, "__aeabi_dsub");
762 /* Double-precision comparisons. Table 3. */
763 set_optab_libfunc (eq_optab, DFmode, "__aeabi_dcmpeq");
764 set_optab_libfunc (ne_optab, DFmode, NULL);
765 set_optab_libfunc (lt_optab, DFmode, "__aeabi_dcmplt");
766 set_optab_libfunc (le_optab, DFmode, "__aeabi_dcmple");
767 set_optab_libfunc (ge_optab, DFmode, "__aeabi_dcmpge");
768 set_optab_libfunc (gt_optab, DFmode, "__aeabi_dcmpgt");
769 set_optab_libfunc (unord_optab, DFmode, "__aeabi_dcmpun");
771 /* Single-precision floating-point arithmetic. Table 4. */
772 set_optab_libfunc (add_optab, SFmode, "__aeabi_fadd");
773 set_optab_libfunc (sdiv_optab, SFmode, "__aeabi_fdiv");
774 set_optab_libfunc (smul_optab, SFmode, "__aeabi_fmul");
775 set_optab_libfunc (neg_optab, SFmode, "__aeabi_fneg");
776 set_optab_libfunc (sub_optab, SFmode, "__aeabi_fsub");
778 /* Single-precision comparisons. Table 5. */
779 set_optab_libfunc (eq_optab, SFmode, "__aeabi_fcmpeq");
780 set_optab_libfunc (ne_optab, SFmode, NULL);
781 set_optab_libfunc (lt_optab, SFmode, "__aeabi_fcmplt");
782 set_optab_libfunc (le_optab, SFmode, "__aeabi_fcmple");
783 set_optab_libfunc (ge_optab, SFmode, "__aeabi_fcmpge");
784 set_optab_libfunc (gt_optab, SFmode, "__aeabi_fcmpgt");
785 set_optab_libfunc (unord_optab, SFmode, "__aeabi_fcmpun");
787 /* Floating-point to integer conversions. Table 6. */
788 set_conv_libfunc (sfix_optab, SImode, DFmode, "__aeabi_d2iz");
789 set_conv_libfunc (ufix_optab, SImode, DFmode, "__aeabi_d2uiz");
790 set_conv_libfunc (sfix_optab, DImode, DFmode, "__aeabi_d2lz");
791 set_conv_libfunc (ufix_optab, DImode, DFmode, "__aeabi_d2ulz");
792 set_conv_libfunc (sfix_optab, SImode, SFmode, "__aeabi_f2iz");
793 set_conv_libfunc (ufix_optab, SImode, SFmode, "__aeabi_f2uiz");
794 set_conv_libfunc (sfix_optab, DImode, SFmode, "__aeabi_f2lz");
795 set_conv_libfunc (ufix_optab, DImode, SFmode, "__aeabi_f2ulz");
797 /* Conversions between floating types. Table 7. */
798 set_conv_libfunc (trunc_optab, SFmode, DFmode, "__aeabi_d2f");
799 set_conv_libfunc (sext_optab, DFmode, SFmode, "__aeabi_f2d");
801 /* Integer to floating-point conversions. Table 8. */
802 set_conv_libfunc (sfloat_optab, DFmode, SImode, "__aeabi_i2d");
803 set_conv_libfunc (ufloat_optab, DFmode, SImode, "__aeabi_ui2d");
804 set_conv_libfunc (sfloat_optab, DFmode, DImode, "__aeabi_l2d");
805 set_conv_libfunc (ufloat_optab, DFmode, DImode, "__aeabi_ul2d");
806 set_conv_libfunc (sfloat_optab, SFmode, SImode, "__aeabi_i2f");
807 set_conv_libfunc (ufloat_optab, SFmode, SImode, "__aeabi_ui2f");
808 set_conv_libfunc (sfloat_optab, SFmode, DImode, "__aeabi_l2f");
809 set_conv_libfunc (ufloat_optab, SFmode, DImode, "__aeabi_ul2f");
811 /* Long long. Table 9. */
812 set_optab_libfunc (smul_optab, DImode, "__aeabi_lmul");
813 set_optab_libfunc (sdivmod_optab, DImode, "__aeabi_ldivmod");
814 set_optab_libfunc (udivmod_optab, DImode, "__aeabi_uldivmod");
815 set_optab_libfunc (ashl_optab, DImode, "__aeabi_llsl");
816 set_optab_libfunc (lshr_optab, DImode, "__aeabi_llsr");
817 set_optab_libfunc (ashr_optab, DImode, "__aeabi_lasr");
818 set_optab_libfunc (cmp_optab, DImode, "__aeabi_lcmp");
819 set_optab_libfunc (ucmp_optab, DImode, "__aeabi_ulcmp");
821 /* Integer (32/32->32) division. \S 4.3.1. */
822 set_optab_libfunc (sdivmod_optab, SImode, "__aeabi_idivmod");
823 set_optab_libfunc (udivmod_optab, SImode, "__aeabi_uidivmod");
825 /* The divmod functions are designed so that they can be used for
826 plain division, even though they return both the quotient and the
827 remainder. The quotient is returned in the usual location (i.e.,
828 r0 for SImode, {r0, r1} for DImode), just as would be expected
829 for an ordinary division routine. Because the AAPCS calling
830 conventions specify that all of { r0, r1, r2, r3 } are
831 callee-saved registers, there is no need to tell the compiler
832 explicitly that those registers are clobbered by these
834 set_optab_libfunc (sdiv_optab, DImode, "__aeabi_ldivmod");
835 set_optab_libfunc (udiv_optab, DImode, "__aeabi_uldivmod");
837 /* For SImode division the ABI provides div-without-mod routines,
839 set_optab_libfunc (sdiv_optab, SImode, "__aeabi_idiv");
840 set_optab_libfunc (udiv_optab, SImode, "__aeabi_uidiv");
842 /* We don't have mod libcalls. Fortunately gcc knows how to use the
843 divmod libcalls instead. */
844 set_optab_libfunc (smod_optab, DImode, NULL);
845 set_optab_libfunc (umod_optab, DImode, NULL);
846 set_optab_libfunc (smod_optab, SImode, NULL);
847 set_optab_libfunc (umod_optab, SImode, NULL);
850 /* Implement TARGET_HANDLE_OPTION. */
853 arm_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
858 arm_select[1].string = arg;
862 arm_select[0].string = arg;
865 case OPT_mhard_float:
866 target_float_abi_name = "hard";
869 case OPT_msoft_float:
870 target_float_abi_name = "soft";
874 arm_select[2].string = arg;
882 /* Fix up any incompatible options that the user has specified.
883 This has now turned into a maze. */
885 arm_override_options (void)
888 enum processor_type target_arch_cpu = arm_none;
890 /* Set up the flags based on the cpu/architecture selected by the user. */
891 for (i = ARRAY_SIZE (arm_select); i--;)
893 struct arm_cpu_select * ptr = arm_select + i;
895 if (ptr->string != NULL && ptr->string[0] != '\0')
897 const struct processors * sel;
899 for (sel = ptr->processors; sel->name != NULL; sel++)
900 if (streq (ptr->string, sel->name))
902 /* Set the architecture define. */
903 if (i != ARM_OPT_SET_TUNE)
904 sprintf (arm_arch_name, "__ARM_ARCH_%s__", sel->arch);
906 /* Determine the processor core for which we should
907 tune code-generation. */
908 if (/* -mcpu= is a sensible default. */
910 /* -mtune= overrides -mcpu= and -march=. */
911 || i == ARM_OPT_SET_TUNE)
912 arm_tune = (enum processor_type) (sel - ptr->processors);
914 /* Remember the CPU associated with this architecture.
915 If no other option is used to set the CPU type,
916 we'll use this to guess the most suitable tuning
918 if (i == ARM_OPT_SET_ARCH)
919 target_arch_cpu = sel->core;
921 if (i != ARM_OPT_SET_TUNE)
923 /* If we have been given an architecture and a processor
924 make sure that they are compatible. We only generate
925 a warning though, and we prefer the CPU over the
927 if (insn_flags != 0 && (insn_flags ^ sel->flags))
928 warning (0, "switch -mcpu=%s conflicts with -march= switch",
931 insn_flags = sel->flags;
937 if (sel->name == NULL)
938 error ("bad value (%s) for %s switch", ptr->string, ptr->name);
942 /* Guess the tuning options from the architecture if necessary. */
943 if (arm_tune == arm_none)
944 arm_tune = target_arch_cpu;
946 /* If the user did not specify a processor, choose one for them. */
949 const struct processors * sel;
951 enum processor_type cpu;
953 cpu = TARGET_CPU_DEFAULT;
956 #ifdef SUBTARGET_CPU_DEFAULT
957 /* Use the subtarget default CPU if none was specified by
959 cpu = SUBTARGET_CPU_DEFAULT;
961 /* Default to ARM6. */
965 sel = &all_cores[cpu];
967 insn_flags = sel->flags;
969 /* Now check to see if the user has specified some command line
970 switch that require certain abilities from the cpu. */
973 if (TARGET_INTERWORK || TARGET_THUMB)
975 sought |= (FL_THUMB | FL_MODE32);
977 /* There are no ARM processors that support both APCS-26 and
978 interworking. Therefore we force FL_MODE26 to be removed
979 from insn_flags here (if it was set), so that the search
980 below will always be able to find a compatible processor. */
981 insn_flags &= ~FL_MODE26;
984 if (sought != 0 && ((sought & insn_flags) != sought))
986 /* Try to locate a CPU type that supports all of the abilities
987 of the default CPU, plus the extra abilities requested by
989 for (sel = all_cores; sel->name != NULL; sel++)
990 if ((sel->flags & sought) == (sought | insn_flags))
993 if (sel->name == NULL)
995 unsigned current_bit_count = 0;
996 const struct processors * best_fit = NULL;
998 /* Ideally we would like to issue an error message here
999 saying that it was not possible to find a CPU compatible
1000 with the default CPU, but which also supports the command
1001 line options specified by the programmer, and so they
1002 ought to use the -mcpu=<name> command line option to
1003 override the default CPU type.
1005 If we cannot find a cpu that has both the
1006 characteristics of the default cpu and the given
1007 command line options we scan the array again looking
1008 for a best match. */
1009 for (sel = all_cores; sel->name != NULL; sel++)
1010 if ((sel->flags & sought) == sought)
1014 count = bit_count (sel->flags & insn_flags);
1016 if (count >= current_bit_count)
1019 current_bit_count = count;
1023 gcc_assert (best_fit);
1027 insn_flags = sel->flags;
1029 sprintf (arm_arch_name, "__ARM_ARCH_%s__", sel->arch);
1030 arm_default_cpu = (enum processor_type) (sel - all_cores);
1031 if (arm_tune == arm_none)
1032 arm_tune = arm_default_cpu;
1035 /* The processor for which we should tune should now have been
1037 gcc_assert (arm_tune != arm_none);
1039 tune_flags = all_cores[(int)arm_tune].flags;
1041 targetm.rtx_costs = arm_size_rtx_costs;
1043 targetm.rtx_costs = all_cores[(int)arm_tune].rtx_costs;
1045 /* Make sure that the processor choice does not conflict with any of the
1046 other command line choices. */
1047 if (TARGET_INTERWORK && !(insn_flags & FL_THUMB))
1049 warning (0, "target CPU does not support interworking" );
1050 target_flags &= ~MASK_INTERWORK;
1053 if (TARGET_THUMB && !(insn_flags & FL_THUMB))
1055 warning (0, "target CPU does not support THUMB instructions");
1056 target_flags &= ~MASK_THUMB;
1059 if (TARGET_APCS_FRAME && TARGET_THUMB)
1061 /* warning (0, "ignoring -mapcs-frame because -mthumb was used"); */
1062 target_flags &= ~MASK_APCS_FRAME;
1065 /* Callee super interworking implies thumb interworking. Adding
1066 this to the flags here simplifies the logic elsewhere. */
1067 if (TARGET_THUMB && TARGET_CALLEE_INTERWORKING)
1068 target_flags |= MASK_INTERWORK;
1070 /* TARGET_BACKTRACE calls leaf_function_p, which causes a crash if done
1071 from here where no function is being compiled currently. */
1072 if ((TARGET_TPCS_FRAME || TARGET_TPCS_LEAF_FRAME) && TARGET_ARM)
1073 warning (0, "enabling backtrace support is only meaningful when compiling for the Thumb");
1075 if (TARGET_ARM && TARGET_CALLEE_INTERWORKING)
1076 warning (0, "enabling callee interworking support is only meaningful when compiling for the Thumb");
1078 if (TARGET_ARM && TARGET_CALLER_INTERWORKING)
1079 warning (0, "enabling caller interworking support is only meaningful when compiling for the Thumb");
1081 if (TARGET_APCS_STACK && !TARGET_APCS_FRAME)
1083 warning (0, "-mapcs-stack-check incompatible with -mno-apcs-frame");
1084 target_flags |= MASK_APCS_FRAME;
1087 if (TARGET_POKE_FUNCTION_NAME)
1088 target_flags |= MASK_APCS_FRAME;
1090 if (TARGET_APCS_REENT && flag_pic)
1091 error ("-fpic and -mapcs-reent are incompatible");
1093 if (TARGET_APCS_REENT)
1094 warning (0, "APCS reentrant code not supported. Ignored");
1096 /* If this target is normally configured to use APCS frames, warn if they
1097 are turned off and debugging is turned on. */
1099 && write_symbols != NO_DEBUG
1100 && !TARGET_APCS_FRAME
1101 && (TARGET_DEFAULT & MASK_APCS_FRAME))
1102 warning (0, "-g with -mno-apcs-frame may not give sensible debugging");
1104 /* If stack checking is disabled, we can use r10 as the PIC register,
1105 which keeps r9 available. */
1106 if (flag_pic && TARGET_SINGLE_PIC_BASE)
1107 arm_pic_register = TARGET_APCS_STACK ? 9 : 10;
1109 if (TARGET_APCS_FLOAT)
1110 warning (0, "passing floating point arguments in fp regs not yet supported");
1112 /* Initialize boolean versions of the flags, for use in the arm.md file. */
1113 arm_arch3m = (insn_flags & FL_ARCH3M) != 0;
1114 arm_arch4 = (insn_flags & FL_ARCH4) != 0;
1115 arm_arch4t = arm_arch4 & ((insn_flags & FL_THUMB) != 0);
1116 arm_arch5 = (insn_flags & FL_ARCH5) != 0;
1117 arm_arch5e = (insn_flags & FL_ARCH5E) != 0;
1118 arm_arch6 = (insn_flags & FL_ARCH6) != 0;
1119 arm_arch6k = (insn_flags & FL_ARCH6K) != 0;
1120 arm_arch_xscale = (insn_flags & FL_XSCALE) != 0;
1121 arm_arch_cirrus = (insn_flags & FL_CIRRUS) != 0;
1123 arm_ld_sched = (tune_flags & FL_LDSCHED) != 0;
1124 arm_tune_strongarm = (tune_flags & FL_STRONG) != 0;
1125 thumb_code = (TARGET_ARM == 0);
1126 arm_tune_wbuf = (tune_flags & FL_WBUF) != 0;
1127 arm_tune_xscale = (tune_flags & FL_XSCALE) != 0;
1128 arm_arch_iwmmxt = (insn_flags & FL_IWMMXT) != 0;
1130 /* V5 code we generate is completely interworking capable, so we turn off
1131 TARGET_INTERWORK here to avoid many tests later on. */
1133 /* XXX However, we must pass the right pre-processor defines to CPP
1134 or GLD can get confused. This is a hack. */
1135 if (TARGET_INTERWORK)
1136 arm_cpp_interwork = 1;
1139 target_flags &= ~MASK_INTERWORK;
1141 if (target_abi_name)
1143 for (i = 0; i < ARRAY_SIZE (arm_all_abis); i++)
1145 if (streq (arm_all_abis[i].name, target_abi_name))
1147 arm_abi = arm_all_abis[i].abi_type;
1151 if (i == ARRAY_SIZE (arm_all_abis))
1152 error ("invalid ABI option: -mabi=%s", target_abi_name);
1155 arm_abi = ARM_DEFAULT_ABI;
1157 if (TARGET_IWMMXT && !ARM_DOUBLEWORD_ALIGN)
1158 error ("iwmmxt requires an AAPCS compatible ABI for proper operation");
1160 if (TARGET_IWMMXT_ABI && !TARGET_IWMMXT)
1161 error ("iwmmxt abi requires an iwmmxt capable cpu");
1163 arm_fp_model = ARM_FP_MODEL_UNKNOWN;
1164 if (target_fpu_name == NULL && target_fpe_name != NULL)
1166 if (streq (target_fpe_name, "2"))
1167 target_fpu_name = "fpe2";
1168 else if (streq (target_fpe_name, "3"))
1169 target_fpu_name = "fpe3";
1171 error ("invalid floating point emulation option: -mfpe=%s",
1174 if (target_fpu_name != NULL)
1176 /* The user specified a FPU. */
1177 for (i = 0; i < ARRAY_SIZE (all_fpus); i++)
1179 if (streq (all_fpus[i].name, target_fpu_name))
1181 arm_fpu_arch = all_fpus[i].fpu;
1182 arm_fpu_tune = arm_fpu_arch;
1183 arm_fp_model = fp_model_for_fpu[arm_fpu_arch];
1187 if (arm_fp_model == ARM_FP_MODEL_UNKNOWN)
1188 error ("invalid floating point option: -mfpu=%s", target_fpu_name);
1192 #ifdef FPUTYPE_DEFAULT
1193 /* Use the default if it is specified for this platform. */
1194 arm_fpu_arch = FPUTYPE_DEFAULT;
1195 arm_fpu_tune = FPUTYPE_DEFAULT;
1197 /* Pick one based on CPU type. */
1198 /* ??? Some targets assume FPA is the default.
1199 if ((insn_flags & FL_VFP) != 0)
1200 arm_fpu_arch = FPUTYPE_VFP;
1203 if (arm_arch_cirrus)
1204 arm_fpu_arch = FPUTYPE_MAVERICK;
1206 arm_fpu_arch = FPUTYPE_FPA_EMU2;
1208 if (tune_flags & FL_CO_PROC && arm_fpu_arch == FPUTYPE_FPA_EMU2)
1209 arm_fpu_tune = FPUTYPE_FPA;
1211 arm_fpu_tune = arm_fpu_arch;
1212 arm_fp_model = fp_model_for_fpu[arm_fpu_arch];
1213 gcc_assert (arm_fp_model != ARM_FP_MODEL_UNKNOWN);
1216 if (target_float_abi_name != NULL)
1218 /* The user specified a FP ABI. */
1219 for (i = 0; i < ARRAY_SIZE (all_float_abis); i++)
1221 if (streq (all_float_abis[i].name, target_float_abi_name))
1223 arm_float_abi = all_float_abis[i].abi_type;
1227 if (i == ARRAY_SIZE (all_float_abis))
1228 error ("invalid floating point abi: -mfloat-abi=%s",
1229 target_float_abi_name);
1232 arm_float_abi = TARGET_DEFAULT_FLOAT_ABI;
1234 if (arm_float_abi == ARM_FLOAT_ABI_HARD && TARGET_VFP)
1235 sorry ("-mfloat-abi=hard and VFP");
1237 /* FPA and iWMMXt are incompatible because the insn encodings overlap.
1238 VFP and iWMMXt can theoretically coexist, but it's unlikely such silicon
1239 will ever exist. GCC makes no attempt to support this combination. */
1240 if (TARGET_IWMMXT && !TARGET_SOFT_FLOAT)
1241 sorry ("iWMMXt and hardware floating point");
1243 /* If soft-float is specified then don't use FPU. */
1244 if (TARGET_SOFT_FLOAT)
1245 arm_fpu_arch = FPUTYPE_NONE;
1247 /* For arm2/3 there is no need to do any scheduling if there is only
1248 a floating point emulator, or we are doing software floating-point. */
1249 if ((TARGET_SOFT_FLOAT
1250 || arm_fpu_tune == FPUTYPE_FPA_EMU2
1251 || arm_fpu_tune == FPUTYPE_FPA_EMU3)
1252 && (tune_flags & FL_MODE32) == 0)
1253 flag_schedule_insns = flag_schedule_insns_after_reload = 0;
1255 if (target_thread_switch)
1257 if (strcmp (target_thread_switch, "soft") == 0)
1258 target_thread_pointer = TP_SOFT;
1259 else if (strcmp (target_thread_switch, "auto") == 0)
1260 target_thread_pointer = TP_AUTO;
1261 else if (strcmp (target_thread_switch, "cp15") == 0)
1262 target_thread_pointer = TP_CP15;
1264 error ("invalid thread pointer option: -mtp=%s", target_thread_switch);
1267 /* Use the cp15 method if it is available. */
1268 if (target_thread_pointer == TP_AUTO)
1270 if (arm_arch6k && !TARGET_THUMB)
1271 target_thread_pointer = TP_CP15;
1273 target_thread_pointer = TP_SOFT;
1276 if (TARGET_HARD_TP && TARGET_THUMB)
1277 error ("can not use -mtp=cp15 with -mthumb");
1279 /* Override the default structure alignment for AAPCS ABI. */
1280 if (TARGET_AAPCS_BASED)
1281 arm_structure_size_boundary = 8;
1283 if (structure_size_string != NULL)
1285 int size = strtol (structure_size_string, NULL, 0);
1287 if (size == 8 || size == 32
1288 || (ARM_DOUBLEWORD_ALIGN && size == 64))
1289 arm_structure_size_boundary = size;
1291 warning (0, "structure size boundary can only be set to %s",
1292 ARM_DOUBLEWORD_ALIGN ? "8, 32 or 64": "8 or 32");
1295 if (arm_pic_register_string != NULL)
1297 int pic_register = decode_reg_name (arm_pic_register_string);
1300 warning (0, "-mpic-register= is useless without -fpic");
1302 /* Prevent the user from choosing an obviously stupid PIC register. */
1303 else if (pic_register < 0 || call_used_regs[pic_register]
1304 || pic_register == HARD_FRAME_POINTER_REGNUM
1305 || pic_register == STACK_POINTER_REGNUM
1306 || pic_register >= PC_REGNUM)
1307 error ("unable to use '%s' for PIC register", arm_pic_register_string);
1309 arm_pic_register = pic_register;
1312 if (TARGET_THUMB && flag_schedule_insns)
1314 /* Don't warn since it's on by default in -O2. */
1315 flag_schedule_insns = 0;
1320 arm_constant_limit = 1;
1322 /* If optimizing for size, bump the number of instructions that we
1323 are prepared to conditionally execute (even on a StrongARM). */
1324 max_insns_skipped = 6;
1328 /* For processors with load scheduling, it never costs more than
1329 2 cycles to load a constant, and the load scheduler may well
1330 reduce that to 1. */
1332 arm_constant_limit = 1;
1334 /* On XScale the longer latency of a load makes it more difficult
1335 to achieve a good schedule, so it's faster to synthesize
1336 constants that can be done in two insns. */
1337 if (arm_tune_xscale)
1338 arm_constant_limit = 2;
1340 /* StrongARM has early execution of branches, so a sequence
1341 that is worth skipping is shorter. */
1342 if (arm_tune_strongarm)
1343 max_insns_skipped = 3;
1346 /* Register global variables with the garbage collector. */
1347 arm_add_gc_roots ();
1351 arm_add_gc_roots (void)
1353 gcc_obstack_init(&minipool_obstack);
1354 minipool_startobj = (char *) obstack_alloc (&minipool_obstack, 0);
1357 /* A table of known ARM exception types.
1358 For use with the interrupt function attribute. */
1362 const char *const arg;
1363 const unsigned long return_value;
1367 static const isr_attribute_arg isr_attribute_args [] =
1369 { "IRQ", ARM_FT_ISR },
1370 { "irq", ARM_FT_ISR },
1371 { "FIQ", ARM_FT_FIQ },
1372 { "fiq", ARM_FT_FIQ },
1373 { "ABORT", ARM_FT_ISR },
1374 { "abort", ARM_FT_ISR },
1375 { "ABORT", ARM_FT_ISR },
1376 { "abort", ARM_FT_ISR },
1377 { "UNDEF", ARM_FT_EXCEPTION },
1378 { "undef", ARM_FT_EXCEPTION },
1379 { "SWI", ARM_FT_EXCEPTION },
1380 { "swi", ARM_FT_EXCEPTION },
1381 { NULL, ARM_FT_NORMAL }
1384 /* Returns the (interrupt) function type of the current
1385 function, or ARM_FT_UNKNOWN if the type cannot be determined. */
1387 static unsigned long
1388 arm_isr_value (tree argument)
1390 const isr_attribute_arg * ptr;
1393 /* No argument - default to IRQ. */
1394 if (argument == NULL_TREE)
1397 /* Get the value of the argument. */
1398 if (TREE_VALUE (argument) == NULL_TREE
1399 || TREE_CODE (TREE_VALUE (argument)) != STRING_CST)
1400 return ARM_FT_UNKNOWN;
1402 arg = TREE_STRING_POINTER (TREE_VALUE (argument));
1404 /* Check it against the list of known arguments. */
1405 for (ptr = isr_attribute_args; ptr->arg != NULL; ptr++)
1406 if (streq (arg, ptr->arg))
1407 return ptr->return_value;
1409 /* An unrecognized interrupt type. */
1410 return ARM_FT_UNKNOWN;
1413 /* Computes the type of the current function. */
1415 static unsigned long
1416 arm_compute_func_type (void)
1418 unsigned long type = ARM_FT_UNKNOWN;
1422 gcc_assert (TREE_CODE (current_function_decl) == FUNCTION_DECL);
1424 /* Decide if the current function is volatile. Such functions
1425 never return, and many memory cycles can be saved by not storing
1426 register values that will never be needed again. This optimization
1427 was added to speed up context switching in a kernel application. */
1429 && (TREE_NOTHROW (current_function_decl)
1430 || !(flag_unwind_tables
1431 || (flag_exceptions && !USING_SJLJ_EXCEPTIONS)))
1432 && TREE_THIS_VOLATILE (current_function_decl))
1433 type |= ARM_FT_VOLATILE;
1435 if (cfun->static_chain_decl != NULL)
1436 type |= ARM_FT_NESTED;
1438 attr = DECL_ATTRIBUTES (current_function_decl);
1440 a = lookup_attribute ("naked", attr);
1442 type |= ARM_FT_NAKED;
1444 a = lookup_attribute ("isr", attr);
1446 a = lookup_attribute ("interrupt", attr);
1449 type |= TARGET_INTERWORK ? ARM_FT_INTERWORKED : ARM_FT_NORMAL;
1451 type |= arm_isr_value (TREE_VALUE (a));
1456 /* Returns the type of the current function. */
1459 arm_current_func_type (void)
1461 if (ARM_FUNC_TYPE (cfun->machine->func_type) == ARM_FT_UNKNOWN)
1462 cfun->machine->func_type = arm_compute_func_type ();
1464 return cfun->machine->func_type;
1467 /* Return 1 if it is possible to return using a single instruction.
1468 If SIBLING is non-null, this is a test for a return before a sibling
1469 call. SIBLING is the call insn, so we can examine its register usage. */
1472 use_return_insn (int iscond, rtx sibling)
1475 unsigned int func_type;
1476 unsigned long saved_int_regs;
1477 unsigned HOST_WIDE_INT stack_adjust;
1478 arm_stack_offsets *offsets;
1480 /* Never use a return instruction before reload has run. */
1481 if (!reload_completed)
1484 func_type = arm_current_func_type ();
1486 /* Naked functions and volatile functions need special
1488 if (func_type & (ARM_FT_VOLATILE | ARM_FT_NAKED))
1491 /* So do interrupt functions that use the frame pointer. */
1492 if (IS_INTERRUPT (func_type) && frame_pointer_needed)
1495 offsets = arm_get_frame_offsets ();
1496 stack_adjust = offsets->outgoing_args - offsets->saved_regs;
1498 /* As do variadic functions. */
1499 if (current_function_pretend_args_size
1500 || cfun->machine->uses_anonymous_args
1501 /* Or if the function calls __builtin_eh_return () */
1502 || current_function_calls_eh_return
1503 /* Or if the function calls alloca */
1504 || current_function_calls_alloca
1505 /* Or if there is a stack adjustment. However, if the stack pointer
1506 is saved on the stack, we can use a pre-incrementing stack load. */
1507 || !(stack_adjust == 0 || (frame_pointer_needed && stack_adjust == 4)))
1510 saved_int_regs = arm_compute_save_reg_mask ();
1512 /* Unfortunately, the insn
1514 ldmib sp, {..., sp, ...}
1516 triggers a bug on most SA-110 based devices, such that the stack
1517 pointer won't be correctly restored if the instruction takes a
1518 page fault. We work around this problem by popping r3 along with
1519 the other registers, since that is never slower than executing
1520 another instruction.
1522 We test for !arm_arch5 here, because code for any architecture
1523 less than this could potentially be run on one of the buggy
1525 if (stack_adjust == 4 && !arm_arch5)
1527 /* Validate that r3 is a call-clobbered register (always true in
1528 the default abi) ... */
1529 if (!call_used_regs[3])
1532 /* ... that it isn't being used for a return value ... */
1533 if (arm_size_return_regs () >= (4 * UNITS_PER_WORD))
1536 /* ... or for a tail-call argument ... */
1539 gcc_assert (GET_CODE (sibling) == CALL_INSN);
1541 if (find_regno_fusage (sibling, USE, 3))
1545 /* ... and that there are no call-saved registers in r0-r2
1546 (always true in the default ABI). */
1547 if (saved_int_regs & 0x7)
1551 /* Can't be done if interworking with Thumb, and any registers have been
1553 if (TARGET_INTERWORK && saved_int_regs != 0)
1556 /* On StrongARM, conditional returns are expensive if they aren't
1557 taken and multiple registers have been stacked. */
1558 if (iscond && arm_tune_strongarm)
1560 /* Conditional return when just the LR is stored is a simple
1561 conditional-load instruction, that's not expensive. */
1562 if (saved_int_regs != 0 && saved_int_regs != (1 << LR_REGNUM))
1566 && arm_pic_register != INVALID_REGNUM
1567 && regs_ever_live[PIC_OFFSET_TABLE_REGNUM])
1571 /* If there are saved registers but the LR isn't saved, then we need
1572 two instructions for the return. */
1573 if (saved_int_regs && !(saved_int_regs & (1 << LR_REGNUM)))
1576 /* Can't be done if any of the FPA regs are pushed,
1577 since this also requires an insn. */
1578 if (TARGET_HARD_FLOAT && TARGET_FPA)
1579 for (regno = FIRST_FPA_REGNUM; regno <= LAST_FPA_REGNUM; regno++)
1580 if (regs_ever_live[regno] && !call_used_regs[regno])
1583 /* Likewise VFP regs. */
1584 if (TARGET_HARD_FLOAT && TARGET_VFP)
1585 for (regno = FIRST_VFP_REGNUM; regno <= LAST_VFP_REGNUM; regno++)
1586 if (regs_ever_live[regno] && !call_used_regs[regno])
1589 if (TARGET_REALLY_IWMMXT)
1590 for (regno = FIRST_IWMMXT_REGNUM; regno <= LAST_IWMMXT_REGNUM; regno++)
1591 if (regs_ever_live[regno] && ! call_used_regs [regno])
1597 /* Return TRUE if int I is a valid immediate ARM constant. */
1600 const_ok_for_arm (HOST_WIDE_INT i)
1604 /* For machines with >32 bit HOST_WIDE_INT, the bits above bit 31 must
1605 be all zero, or all one. */
1606 if ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff) != 0
1607 && ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff)
1608 != ((~(unsigned HOST_WIDE_INT) 0)
1609 & ~(unsigned HOST_WIDE_INT) 0xffffffff)))
1612 i &= (unsigned HOST_WIDE_INT) 0xffffffff;
1614 /* Fast return for 0 and small values. We must do this for zero, since
1615 the code below can't handle that one case. */
1616 if ((i & ~(unsigned HOST_WIDE_INT) 0xff) == 0)
1619 /* Get the number of trailing zeros, rounded down to the nearest even
1621 lowbit = (ffs ((int) i) - 1) & ~1;
1623 if ((i & ~(((unsigned HOST_WIDE_INT) 0xff) << lowbit)) == 0)
1625 else if (lowbit <= 4
1626 && ((i & ~0xc000003f) == 0
1627 || (i & ~0xf000000f) == 0
1628 || (i & ~0xfc000003) == 0))
1634 /* Return true if I is a valid constant for the operation CODE. */
1636 const_ok_for_op (HOST_WIDE_INT i, enum rtx_code code)
1638 if (const_ok_for_arm (i))
1644 return const_ok_for_arm (ARM_SIGN_EXTEND (-i));
1646 case MINUS: /* Should only occur with (MINUS I reg) => rsb */
1652 return const_ok_for_arm (ARM_SIGN_EXTEND (~i));
1659 /* Emit a sequence of insns to handle a large constant.
1660 CODE is the code of the operation required, it can be any of SET, PLUS,
1661 IOR, AND, XOR, MINUS;
1662 MODE is the mode in which the operation is being performed;
1663 VAL is the integer to operate on;
1664 SOURCE is the other operand (a register, or a null-pointer for SET);
1665 SUBTARGETS means it is safe to create scratch registers if that will
1666 either produce a simpler sequence, or we will want to cse the values.
1667 Return value is the number of insns emitted. */
1670 arm_split_constant (enum rtx_code code, enum machine_mode mode, rtx insn,
1671 HOST_WIDE_INT val, rtx target, rtx source, int subtargets)
1675 if (insn && GET_CODE (PATTERN (insn)) == COND_EXEC)
1676 cond = COND_EXEC_TEST (PATTERN (insn));
1680 if (subtargets || code == SET
1681 || (GET_CODE (target) == REG && GET_CODE (source) == REG
1682 && REGNO (target) != REGNO (source)))
1684 /* After arm_reorg has been called, we can't fix up expensive
1685 constants by pushing them into memory so we must synthesize
1686 them in-line, regardless of the cost. This is only likely to
1687 be more costly on chips that have load delay slots and we are
1688 compiling without running the scheduler (so no splitting
1689 occurred before the final instruction emission).
1691 Ref: gcc -O1 -mcpu=strongarm gcc.c-torture/compile/980506-2.c
1693 if (!after_arm_reorg
1695 && (arm_gen_constant (code, mode, NULL_RTX, val, target, source,
1697 > arm_constant_limit + (code != SET)))
1701 /* Currently SET is the only monadic value for CODE, all
1702 the rest are diadic. */
1703 emit_set_insn (target, GEN_INT (val));
1708 rtx temp = subtargets ? gen_reg_rtx (mode) : target;
1710 emit_set_insn (temp, GEN_INT (val));
1711 /* For MINUS, the value is subtracted from, since we never
1712 have subtraction of a constant. */
1714 emit_set_insn (target, gen_rtx_MINUS (mode, temp, source));
1716 emit_set_insn (target,
1717 gen_rtx_fmt_ee (code, mode, source, temp));
1723 return arm_gen_constant (code, mode, cond, val, target, source, subtargets,
1728 count_insns_for_constant (HOST_WIDE_INT remainder, int i)
1730 HOST_WIDE_INT temp1;
1738 if (remainder & (3 << (i - 2)))
1743 temp1 = remainder & ((0x0ff << end)
1744 | ((i < end) ? (0xff >> (32 - end)) : 0));
1745 remainder &= ~temp1;
1750 } while (remainder);
1754 /* Emit an instruction with the indicated PATTERN. If COND is
1755 non-NULL, conditionalize the execution of the instruction on COND
1759 emit_constant_insn (rtx cond, rtx pattern)
1762 pattern = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (cond), pattern);
1763 emit_insn (pattern);
1766 /* As above, but extra parameter GENERATE which, if clear, suppresses
1770 arm_gen_constant (enum rtx_code code, enum machine_mode mode, rtx cond,
1771 HOST_WIDE_INT val, rtx target, rtx source, int subtargets,
1776 int can_negate_initial = 0;
1779 int num_bits_set = 0;
1780 int set_sign_bit_copies = 0;
1781 int clear_sign_bit_copies = 0;
1782 int clear_zero_bit_copies = 0;
1783 int set_zero_bit_copies = 0;
1785 unsigned HOST_WIDE_INT temp1, temp2;
1786 unsigned HOST_WIDE_INT remainder = val & 0xffffffff;
1788 /* Find out which operations are safe for a given CODE. Also do a quick
1789 check for degenerate cases; these can occur when DImode operations
1801 can_negate_initial = 1;
1805 if (remainder == 0xffffffff)
1808 emit_constant_insn (cond,
1809 gen_rtx_SET (VOIDmode, target,
1810 GEN_INT (ARM_SIGN_EXTEND (val))));
1815 if (reload_completed && rtx_equal_p (target, source))
1818 emit_constant_insn (cond,
1819 gen_rtx_SET (VOIDmode, target, source));
1828 emit_constant_insn (cond,
1829 gen_rtx_SET (VOIDmode, target, const0_rtx));
1832 if (remainder == 0xffffffff)
1834 if (reload_completed && rtx_equal_p (target, source))
1837 emit_constant_insn (cond,
1838 gen_rtx_SET (VOIDmode, target, source));
1847 if (reload_completed && rtx_equal_p (target, source))
1850 emit_constant_insn (cond,
1851 gen_rtx_SET (VOIDmode, target, source));
1855 /* We don't know how to handle other cases yet. */
1856 gcc_assert (remainder == 0xffffffff);
1859 emit_constant_insn (cond,
1860 gen_rtx_SET (VOIDmode, target,
1861 gen_rtx_NOT (mode, source)));
1865 /* We treat MINUS as (val - source), since (source - val) is always
1866 passed as (source + (-val)). */
1870 emit_constant_insn (cond,
1871 gen_rtx_SET (VOIDmode, target,
1872 gen_rtx_NEG (mode, source)));
1875 if (const_ok_for_arm (val))
1878 emit_constant_insn (cond,
1879 gen_rtx_SET (VOIDmode, target,
1880 gen_rtx_MINUS (mode, GEN_INT (val),
1892 /* If we can do it in one insn get out quickly. */
1893 if (const_ok_for_arm (val)
1894 || (can_negate_initial && const_ok_for_arm (-val))
1895 || (can_invert && const_ok_for_arm (~val)))
1898 emit_constant_insn (cond,
1899 gen_rtx_SET (VOIDmode, target,
1901 ? gen_rtx_fmt_ee (code, mode, source,
1907 /* Calculate a few attributes that may be useful for specific
1909 for (i = 31; i >= 0; i--)
1911 if ((remainder & (1 << i)) == 0)
1912 clear_sign_bit_copies++;
1917 for (i = 31; i >= 0; i--)
1919 if ((remainder & (1 << i)) != 0)
1920 set_sign_bit_copies++;
1925 for (i = 0; i <= 31; i++)
1927 if ((remainder & (1 << i)) == 0)
1928 clear_zero_bit_copies++;
1933 for (i = 0; i <= 31; i++)
1935 if ((remainder & (1 << i)) != 0)
1936 set_zero_bit_copies++;
1944 /* See if we can do this by sign_extending a constant that is known
1945 to be negative. This is a good, way of doing it, since the shift
1946 may well merge into a subsequent insn. */
1947 if (set_sign_bit_copies > 1)
1949 if (const_ok_for_arm
1950 (temp1 = ARM_SIGN_EXTEND (remainder
1951 << (set_sign_bit_copies - 1))))
1955 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1956 emit_constant_insn (cond,
1957 gen_rtx_SET (VOIDmode, new_src,
1959 emit_constant_insn (cond,
1960 gen_ashrsi3 (target, new_src,
1961 GEN_INT (set_sign_bit_copies - 1)));
1965 /* For an inverted constant, we will need to set the low bits,
1966 these will be shifted out of harm's way. */
1967 temp1 |= (1 << (set_sign_bit_copies - 1)) - 1;
1968 if (const_ok_for_arm (~temp1))
1972 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1973 emit_constant_insn (cond,
1974 gen_rtx_SET (VOIDmode, new_src,
1976 emit_constant_insn (cond,
1977 gen_ashrsi3 (target, new_src,
1978 GEN_INT (set_sign_bit_copies - 1)));
1984 /* See if we can calculate the value as the difference between two
1985 valid immediates. */
1986 if (clear_sign_bit_copies + clear_zero_bit_copies <= 16)
1988 int topshift = clear_sign_bit_copies & ~1;
1990 temp1 = ARM_SIGN_EXTEND ((remainder + (0x00800000 >> topshift))
1991 & (0xff000000 >> topshift));
1993 /* If temp1 is zero, then that means the 9 most significant
1994 bits of remainder were 1 and we've caused it to overflow.
1995 When topshift is 0 we don't need to do anything since we
1996 can borrow from 'bit 32'. */
1997 if (temp1 == 0 && topshift != 0)
1998 temp1 = 0x80000000 >> (topshift - 1);
2000 temp2 = ARM_SIGN_EXTEND (temp1 - remainder);
2002 if (const_ok_for_arm (temp2))
2006 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2007 emit_constant_insn (cond,
2008 gen_rtx_SET (VOIDmode, new_src,
2010 emit_constant_insn (cond,
2011 gen_addsi3 (target, new_src,
2019 /* See if we can generate this by setting the bottom (or the top)
2020 16 bits, and then shifting these into the other half of the
2021 word. We only look for the simplest cases, to do more would cost
2022 too much. Be careful, however, not to generate this when the
2023 alternative would take fewer insns. */
2024 if (val & 0xffff0000)
2026 temp1 = remainder & 0xffff0000;
2027 temp2 = remainder & 0x0000ffff;
2029 /* Overlaps outside this range are best done using other methods. */
2030 for (i = 9; i < 24; i++)
2032 if ((((temp2 | (temp2 << i)) & 0xffffffff) == remainder)
2033 && !const_ok_for_arm (temp2))
2035 rtx new_src = (subtargets
2036 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
2038 insns = arm_gen_constant (code, mode, cond, temp2, new_src,
2039 source, subtargets, generate);
2047 gen_rtx_ASHIFT (mode, source,
2054 /* Don't duplicate cases already considered. */
2055 for (i = 17; i < 24; i++)
2057 if (((temp1 | (temp1 >> i)) == remainder)
2058 && !const_ok_for_arm (temp1))
2060 rtx new_src = (subtargets
2061 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
2063 insns = arm_gen_constant (code, mode, cond, temp1, new_src,
2064 source, subtargets, generate);
2069 gen_rtx_SET (VOIDmode, target,
2072 gen_rtx_LSHIFTRT (mode, source,
2083 /* If we have IOR or XOR, and the constant can be loaded in a
2084 single instruction, and we can find a temporary to put it in,
2085 then this can be done in two instructions instead of 3-4. */
2087 /* TARGET can't be NULL if SUBTARGETS is 0 */
2088 || (reload_completed && !reg_mentioned_p (target, source)))
2090 if (const_ok_for_arm (ARM_SIGN_EXTEND (~val)))
2094 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
2096 emit_constant_insn (cond,
2097 gen_rtx_SET (VOIDmode, sub,
2099 emit_constant_insn (cond,
2100 gen_rtx_SET (VOIDmode, target,
2101 gen_rtx_fmt_ee (code, mode,
2111 if (set_sign_bit_copies > 8
2112 && (val & (-1 << (32 - set_sign_bit_copies))) == val)
2116 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
2117 rtx shift = GEN_INT (set_sign_bit_copies);
2121 gen_rtx_SET (VOIDmode, sub,
2123 gen_rtx_ASHIFT (mode,
2128 gen_rtx_SET (VOIDmode, target,
2130 gen_rtx_LSHIFTRT (mode, sub,
2136 if (set_zero_bit_copies > 8
2137 && (remainder & ((1 << set_zero_bit_copies) - 1)) == remainder)
2141 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
2142 rtx shift = GEN_INT (set_zero_bit_copies);
2146 gen_rtx_SET (VOIDmode, sub,
2148 gen_rtx_LSHIFTRT (mode,
2153 gen_rtx_SET (VOIDmode, target,
2155 gen_rtx_ASHIFT (mode, sub,
2161 if (const_ok_for_arm (temp1 = ARM_SIGN_EXTEND (~val)))
2165 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
2166 emit_constant_insn (cond,
2167 gen_rtx_SET (VOIDmode, sub,
2168 gen_rtx_NOT (mode, source)));
2171 sub = gen_reg_rtx (mode);
2172 emit_constant_insn (cond,
2173 gen_rtx_SET (VOIDmode, sub,
2174 gen_rtx_AND (mode, source,
2176 emit_constant_insn (cond,
2177 gen_rtx_SET (VOIDmode, target,
2178 gen_rtx_NOT (mode, sub)));
2185 /* See if two shifts will do 2 or more insn's worth of work. */
2186 if (clear_sign_bit_copies >= 16 && clear_sign_bit_copies < 24)
2188 HOST_WIDE_INT shift_mask = ((0xffffffff
2189 << (32 - clear_sign_bit_copies))
2192 if ((remainder | shift_mask) != 0xffffffff)
2196 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2197 insns = arm_gen_constant (AND, mode, cond,
2198 remainder | shift_mask,
2199 new_src, source, subtargets, 1);
2204 rtx targ = subtargets ? NULL_RTX : target;
2205 insns = arm_gen_constant (AND, mode, cond,
2206 remainder | shift_mask,
2207 targ, source, subtargets, 0);
2213 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2214 rtx shift = GEN_INT (clear_sign_bit_copies);
2216 emit_insn (gen_ashlsi3 (new_src, source, shift));
2217 emit_insn (gen_lshrsi3 (target, new_src, shift));
2223 if (clear_zero_bit_copies >= 16 && clear_zero_bit_copies < 24)
2225 HOST_WIDE_INT shift_mask = (1 << clear_zero_bit_copies) - 1;
2227 if ((remainder | shift_mask) != 0xffffffff)
2231 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2233 insns = arm_gen_constant (AND, mode, cond,
2234 remainder | shift_mask,
2235 new_src, source, subtargets, 1);
2240 rtx targ = subtargets ? NULL_RTX : target;
2242 insns = arm_gen_constant (AND, mode, cond,
2243 remainder | shift_mask,
2244 targ, source, subtargets, 0);
2250 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2251 rtx shift = GEN_INT (clear_zero_bit_copies);
2253 emit_insn (gen_lshrsi3 (new_src, source, shift));
2254 emit_insn (gen_ashlsi3 (target, new_src, shift));
2266 for (i = 0; i < 32; i++)
2267 if (remainder & (1 << i))
2270 if (code == AND || (can_invert && num_bits_set > 16))
2271 remainder = (~remainder) & 0xffffffff;
2272 else if (code == PLUS && num_bits_set > 16)
2273 remainder = (-remainder) & 0xffffffff;
2280 /* Now try and find a way of doing the job in either two or three
2282 We start by looking for the largest block of zeros that are aligned on
2283 a 2-bit boundary, we then fill up the temps, wrapping around to the
2284 top of the word when we drop off the bottom.
2285 In the worst case this code should produce no more than four insns. */
2288 int best_consecutive_zeros = 0;
2290 for (i = 0; i < 32; i += 2)
2292 int consecutive_zeros = 0;
2294 if (!(remainder & (3 << i)))
2296 while ((i < 32) && !(remainder & (3 << i)))
2298 consecutive_zeros += 2;
2301 if (consecutive_zeros > best_consecutive_zeros)
2303 best_consecutive_zeros = consecutive_zeros;
2304 best_start = i - consecutive_zeros;
2310 /* So long as it won't require any more insns to do so, it's
2311 desirable to emit a small constant (in bits 0...9) in the last
2312 insn. This way there is more chance that it can be combined with
2313 a later addressing insn to form a pre-indexed load or store
2314 operation. Consider:
2316 *((volatile int *)0xe0000100) = 1;
2317 *((volatile int *)0xe0000110) = 2;
2319 We want this to wind up as:
2323 str rB, [rA, #0x100]
2325 str rB, [rA, #0x110]
2327 rather than having to synthesize both large constants from scratch.
2329 Therefore, we calculate how many insns would be required to emit
2330 the constant starting from `best_start', and also starting from
2331 zero (i.e. with bit 31 first to be output). If `best_start' doesn't
2332 yield a shorter sequence, we may as well use zero. */
2334 && ((((unsigned HOST_WIDE_INT) 1) << best_start) < remainder)
2335 && (count_insns_for_constant (remainder, 0) <=
2336 count_insns_for_constant (remainder, best_start)))
2339 /* Now start emitting the insns. */
2347 if (remainder & (3 << (i - 2)))
2352 temp1 = remainder & ((0x0ff << end)
2353 | ((i < end) ? (0xff >> (32 - end)) : 0));
2354 remainder &= ~temp1;
2358 rtx new_src, temp1_rtx;
2360 if (code == SET || code == MINUS)
2362 new_src = (subtargets ? gen_reg_rtx (mode) : target);
2363 if (can_invert && code != MINUS)
2368 if (remainder && subtargets)
2369 new_src = gen_reg_rtx (mode);
2374 else if (can_negate)
2378 temp1 = trunc_int_for_mode (temp1, mode);
2379 temp1_rtx = GEN_INT (temp1);
2383 else if (code == MINUS)
2384 temp1_rtx = gen_rtx_MINUS (mode, temp1_rtx, source);
2386 temp1_rtx = gen_rtx_fmt_ee (code, mode, source, temp1_rtx);
2388 emit_constant_insn (cond,
2389 gen_rtx_SET (VOIDmode, new_src,
2399 else if (code == MINUS)
2413 /* Canonicalize a comparison so that we are more likely to recognize it.
2414 This can be done for a few constant compares, where we can make the
2415 immediate value easier to load. */
2418 arm_canonicalize_comparison (enum rtx_code code, enum machine_mode mode,
2421 unsigned HOST_WIDE_INT i = INTVAL (*op1);
2422 unsigned HOST_WIDE_INT maxval;
2423 maxval = (((unsigned HOST_WIDE_INT) 1) << (GET_MODE_BITSIZE(mode) - 1)) - 1;
2434 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
2436 *op1 = GEN_INT (i + 1);
2437 return code == GT ? GE : LT;
2444 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
2446 *op1 = GEN_INT (i - 1);
2447 return code == GE ? GT : LE;
2453 if (i != ~((unsigned HOST_WIDE_INT) 0)
2454 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
2456 *op1 = GEN_INT (i + 1);
2457 return code == GTU ? GEU : LTU;
2464 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
2466 *op1 = GEN_INT (i - 1);
2467 return code == GEU ? GTU : LEU;
2479 /* Define how to find the value returned by a function. */
2482 arm_function_value(tree type, tree func ATTRIBUTE_UNUSED)
2484 enum machine_mode mode;
2485 int unsignedp ATTRIBUTE_UNUSED;
2486 rtx r ATTRIBUTE_UNUSED;
2488 mode = TYPE_MODE (type);
2489 /* Promote integer types. */
2490 if (INTEGRAL_TYPE_P (type))
2491 PROMOTE_FUNCTION_MODE (mode, unsignedp, type);
2493 /* Promotes small structs returned in a register to full-word size
2494 for big-endian AAPCS. */
2495 if (arm_return_in_msb (type))
2497 HOST_WIDE_INT size = int_size_in_bytes (type);
2498 if (size % UNITS_PER_WORD != 0)
2500 size += UNITS_PER_WORD - size % UNITS_PER_WORD;
2501 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
2505 return LIBCALL_VALUE(mode);
2508 /* Determine the amount of memory needed to store the possible return
2509 registers of an untyped call. */
2511 arm_apply_result_size (void)
2517 if (TARGET_HARD_FLOAT_ABI)
2521 if (TARGET_MAVERICK)
2524 if (TARGET_IWMMXT_ABI)
2531 /* Decide whether a type should be returned in memory (true)
2532 or in a register (false). This is called by the macro
2533 RETURN_IN_MEMORY. */
2535 arm_return_in_memory (tree type)
2539 if (!AGGREGATE_TYPE_P (type) &&
2540 (TREE_CODE (type) != VECTOR_TYPE) &&
2541 !(TARGET_AAPCS_BASED && TREE_CODE (type) == COMPLEX_TYPE))
2542 /* All simple types are returned in registers.
2543 For AAPCS, complex types are treated the same as aggregates. */
2546 size = int_size_in_bytes (type);
2548 if (arm_abi != ARM_ABI_APCS)
2550 /* ATPCS and later return aggregate types in memory only if they are
2551 larger than a word (or are variable size). */
2552 return (size < 0 || size > UNITS_PER_WORD);
2555 /* To maximize backwards compatibility with previous versions of gcc,
2556 return vectors up to 4 words in registers. */
2557 if (TREE_CODE (type) == VECTOR_TYPE)
2558 return (size < 0 || size > (4 * UNITS_PER_WORD));
2560 /* For the arm-wince targets we choose to be compatible with Microsoft's
2561 ARM and Thumb compilers, which always return aggregates in memory. */
2563 /* All structures/unions bigger than one word are returned in memory.
2564 Also catch the case where int_size_in_bytes returns -1. In this case
2565 the aggregate is either huge or of variable size, and in either case
2566 we will want to return it via memory and not in a register. */
2567 if (size < 0 || size > UNITS_PER_WORD)
2570 if (TREE_CODE (type) == RECORD_TYPE)
2574 /* For a struct the APCS says that we only return in a register
2575 if the type is 'integer like' and every addressable element
2576 has an offset of zero. For practical purposes this means
2577 that the structure can have at most one non bit-field element
2578 and that this element must be the first one in the structure. */
2580 /* Find the first field, ignoring non FIELD_DECL things which will
2581 have been created by C++. */
2582 for (field = TYPE_FIELDS (type);
2583 field && TREE_CODE (field) != FIELD_DECL;
2584 field = TREE_CHAIN (field))
2588 return 0; /* An empty structure. Allowed by an extension to ANSI C. */
2590 /* Check that the first field is valid for returning in a register. */
2592 /* ... Floats are not allowed */
2593 if (FLOAT_TYPE_P (TREE_TYPE (field)))
2596 /* ... Aggregates that are not themselves valid for returning in
2597 a register are not allowed. */
2598 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
2601 /* Now check the remaining fields, if any. Only bitfields are allowed,
2602 since they are not addressable. */
2603 for (field = TREE_CHAIN (field);
2605 field = TREE_CHAIN (field))
2607 if (TREE_CODE (field) != FIELD_DECL)
2610 if (!DECL_BIT_FIELD_TYPE (field))
2617 if (TREE_CODE (type) == UNION_TYPE)
2621 /* Unions can be returned in registers if every element is
2622 integral, or can be returned in an integer register. */
2623 for (field = TYPE_FIELDS (type);
2625 field = TREE_CHAIN (field))
2627 if (TREE_CODE (field) != FIELD_DECL)
2630 if (FLOAT_TYPE_P (TREE_TYPE (field)))
2633 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
2639 #endif /* not ARM_WINCE */
2641 /* Return all other types in memory. */
2645 /* Indicate whether or not words of a double are in big-endian order. */
2648 arm_float_words_big_endian (void)
2650 if (TARGET_MAVERICK)
2653 /* For FPA, float words are always big-endian. For VFP, floats words
2654 follow the memory system mode. */
2662 return (TARGET_BIG_END ? 1 : 0);
2667 /* Initialize a variable CUM of type CUMULATIVE_ARGS
2668 for a call to a function whose data type is FNTYPE.
2669 For a library call, FNTYPE is NULL. */
2671 arm_init_cumulative_args (CUMULATIVE_ARGS *pcum, tree fntype,
2672 rtx libname ATTRIBUTE_UNUSED,
2673 tree fndecl ATTRIBUTE_UNUSED)
2675 /* On the ARM, the offset starts at 0. */
2677 pcum->iwmmxt_nregs = 0;
2678 pcum->can_split = true;
2680 pcum->call_cookie = CALL_NORMAL;
2682 if (TARGET_LONG_CALLS)
2683 pcum->call_cookie = CALL_LONG;
2685 /* Check for long call/short call attributes. The attributes
2686 override any command line option. */
2689 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (fntype)))
2690 pcum->call_cookie = CALL_SHORT;
2691 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (fntype)))
2692 pcum->call_cookie = CALL_LONG;
2695 /* Varargs vectors are treated the same as long long.
2696 named_count avoids having to change the way arm handles 'named' */
2697 pcum->named_count = 0;
2700 if (TARGET_REALLY_IWMMXT && fntype)
2704 for (fn_arg = TYPE_ARG_TYPES (fntype);
2706 fn_arg = TREE_CHAIN (fn_arg))
2707 pcum->named_count += 1;
2709 if (! pcum->named_count)
2710 pcum->named_count = INT_MAX;
2715 /* Return true if mode/type need doubleword alignment. */
2717 arm_needs_doubleword_align (enum machine_mode mode, tree type)
2719 return (GET_MODE_ALIGNMENT (mode) > PARM_BOUNDARY
2720 || (type && TYPE_ALIGN (type) > PARM_BOUNDARY));
2724 /* Determine where to put an argument to a function.
2725 Value is zero to push the argument on the stack,
2726 or a hard register in which to store the argument.
2728 MODE is the argument's machine mode.
2729 TYPE is the data type of the argument (as a tree).
2730 This is null for libcalls where that information may
2732 CUM is a variable of type CUMULATIVE_ARGS which gives info about
2733 the preceding args and about the function being called.
2734 NAMED is nonzero if this argument is a named parameter
2735 (otherwise it is an extra parameter matching an ellipsis). */
2738 arm_function_arg (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
2739 tree type, int named)
2743 /* Varargs vectors are treated the same as long long.
2744 named_count avoids having to change the way arm handles 'named' */
2745 if (TARGET_IWMMXT_ABI
2746 && arm_vector_mode_supported_p (mode)
2747 && pcum->named_count > pcum->nargs + 1)
2749 if (pcum->iwmmxt_nregs <= 9)
2750 return gen_rtx_REG (mode, pcum->iwmmxt_nregs + FIRST_IWMMXT_REGNUM);
2753 pcum->can_split = false;
2758 /* Put doubleword aligned quantities in even register pairs. */
2760 && ARM_DOUBLEWORD_ALIGN
2761 && arm_needs_doubleword_align (mode, type))
2764 if (mode == VOIDmode)
2765 /* Compute operand 2 of the call insn. */
2766 return GEN_INT (pcum->call_cookie);
2768 /* Only allow splitting an arg between regs and memory if all preceding
2769 args were allocated to regs. For args passed by reference we only count
2770 the reference pointer. */
2771 if (pcum->can_split)
2774 nregs = ARM_NUM_REGS2 (mode, type);
2776 if (!named || pcum->nregs + nregs > NUM_ARG_REGS)
2779 return gen_rtx_REG (mode, pcum->nregs);
2783 arm_arg_partial_bytes (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
2784 tree type, bool named ATTRIBUTE_UNUSED)
2786 int nregs = pcum->nregs;
2788 if (arm_vector_mode_supported_p (mode))
2791 if (NUM_ARG_REGS > nregs
2792 && (NUM_ARG_REGS < nregs + ARM_NUM_REGS2 (mode, type))
2794 return (NUM_ARG_REGS - nregs) * UNITS_PER_WORD;
2799 /* Variable sized types are passed by reference. This is a GCC
2800 extension to the ARM ABI. */
2803 arm_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
2804 enum machine_mode mode ATTRIBUTE_UNUSED,
2805 tree type, bool named ATTRIBUTE_UNUSED)
2807 return type && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST;
2810 /* Encode the current state of the #pragma [no_]long_calls. */
2813 OFF, /* No #pragma [no_]long_calls is in effect. */
2814 LONG, /* #pragma long_calls is in effect. */
2815 SHORT /* #pragma no_long_calls is in effect. */
2818 static arm_pragma_enum arm_pragma_long_calls = OFF;
2821 arm_pr_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2823 arm_pragma_long_calls = LONG;
2827 arm_pr_no_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2829 arm_pragma_long_calls = SHORT;
2833 arm_pr_long_calls_off (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2835 arm_pragma_long_calls = OFF;
2838 /* Table of machine attributes. */
2839 const struct attribute_spec arm_attribute_table[] =
2841 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
2842 /* Function calls made to this symbol must be done indirectly, because
2843 it may lie outside of the 26 bit addressing range of a normal function
2845 { "long_call", 0, 0, false, true, true, NULL },
2846 /* Whereas these functions are always known to reside within the 26 bit
2847 addressing range. */
2848 { "short_call", 0, 0, false, true, true, NULL },
2849 /* Interrupt Service Routines have special prologue and epilogue requirements. */
2850 { "isr", 0, 1, false, false, false, arm_handle_isr_attribute },
2851 { "interrupt", 0, 1, false, false, false, arm_handle_isr_attribute },
2852 { "naked", 0, 0, true, false, false, arm_handle_fndecl_attribute },
2854 /* ARM/PE has three new attributes:
2856 dllexport - for exporting a function/variable that will live in a dll
2857 dllimport - for importing a function/variable from a dll
2859 Microsoft allows multiple declspecs in one __declspec, separating
2860 them with spaces. We do NOT support this. Instead, use __declspec
2863 { "dllimport", 0, 0, true, false, false, NULL },
2864 { "dllexport", 0, 0, true, false, false, NULL },
2865 { "interfacearm", 0, 0, true, false, false, arm_handle_fndecl_attribute },
2866 #elif TARGET_DLLIMPORT_DECL_ATTRIBUTES
2867 { "dllimport", 0, 0, false, false, false, handle_dll_attribute },
2868 { "dllexport", 0, 0, false, false, false, handle_dll_attribute },
2869 { "notshared", 0, 0, false, true, false, arm_handle_notshared_attribute },
2871 { NULL, 0, 0, false, false, false, NULL }
2874 /* Handle an attribute requiring a FUNCTION_DECL;
2875 arguments as in struct attribute_spec.handler. */
2877 arm_handle_fndecl_attribute (tree *node, tree name, tree args ATTRIBUTE_UNUSED,
2878 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
2880 if (TREE_CODE (*node) != FUNCTION_DECL)
2882 warning (OPT_Wattributes, "%qs attribute only applies to functions",
2883 IDENTIFIER_POINTER (name));
2884 *no_add_attrs = true;
2890 /* Handle an "interrupt" or "isr" attribute;
2891 arguments as in struct attribute_spec.handler. */
2893 arm_handle_isr_attribute (tree *node, tree name, tree args, int flags,
2898 if (TREE_CODE (*node) != FUNCTION_DECL)
2900 warning (OPT_Wattributes, "%qs attribute only applies to functions",
2901 IDENTIFIER_POINTER (name));
2902 *no_add_attrs = true;
2904 /* FIXME: the argument if any is checked for type attributes;
2905 should it be checked for decl ones? */
2909 if (TREE_CODE (*node) == FUNCTION_TYPE
2910 || TREE_CODE (*node) == METHOD_TYPE)
2912 if (arm_isr_value (args) == ARM_FT_UNKNOWN)
2914 warning (OPT_Wattributes, "%qs attribute ignored",
2915 IDENTIFIER_POINTER (name));
2916 *no_add_attrs = true;
2919 else if (TREE_CODE (*node) == POINTER_TYPE
2920 && (TREE_CODE (TREE_TYPE (*node)) == FUNCTION_TYPE
2921 || TREE_CODE (TREE_TYPE (*node)) == METHOD_TYPE)
2922 && arm_isr_value (args) != ARM_FT_UNKNOWN)
2924 *node = build_variant_type_copy (*node);
2925 TREE_TYPE (*node) = build_type_attribute_variant
2927 tree_cons (name, args, TYPE_ATTRIBUTES (TREE_TYPE (*node))));
2928 *no_add_attrs = true;
2932 /* Possibly pass this attribute on from the type to a decl. */
2933 if (flags & ((int) ATTR_FLAG_DECL_NEXT
2934 | (int) ATTR_FLAG_FUNCTION_NEXT
2935 | (int) ATTR_FLAG_ARRAY_NEXT))
2937 *no_add_attrs = true;
2938 return tree_cons (name, args, NULL_TREE);
2942 warning (OPT_Wattributes, "%qs attribute ignored",
2943 IDENTIFIER_POINTER (name));
2951 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
2952 /* Handle the "notshared" attribute. This attribute is another way of
2953 requesting hidden visibility. ARM's compiler supports
2954 "__declspec(notshared)"; we support the same thing via an
2958 arm_handle_notshared_attribute (tree *node,
2959 tree name ATTRIBUTE_UNUSED,
2960 tree args ATTRIBUTE_UNUSED,
2961 int flags ATTRIBUTE_UNUSED,
2964 tree decl = TYPE_NAME (*node);
2968 DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
2969 DECL_VISIBILITY_SPECIFIED (decl) = 1;
2970 *no_add_attrs = false;
2976 /* Return 0 if the attributes for two types are incompatible, 1 if they
2977 are compatible, and 2 if they are nearly compatible (which causes a
2978 warning to be generated). */
2980 arm_comp_type_attributes (tree type1, tree type2)
2984 /* Check for mismatch of non-default calling convention. */
2985 if (TREE_CODE (type1) != FUNCTION_TYPE)
2988 /* Check for mismatched call attributes. */
2989 l1 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type1)) != NULL;
2990 l2 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type2)) != NULL;
2991 s1 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type1)) != NULL;
2992 s2 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type2)) != NULL;
2994 /* Only bother to check if an attribute is defined. */
2995 if (l1 | l2 | s1 | s2)
2997 /* If one type has an attribute, the other must have the same attribute. */
2998 if ((l1 != l2) || (s1 != s2))
3001 /* Disallow mixed attributes. */
3002 if ((l1 & s2) || (l2 & s1))
3006 /* Check for mismatched ISR attribute. */
3007 l1 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type1)) != NULL;
3009 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type1)) != NULL;
3010 l2 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type2)) != NULL;
3012 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type2)) != NULL;
3019 /* Encode long_call or short_call attribute by prefixing
3020 symbol name in DECL with a special character FLAG. */
3022 arm_encode_call_attribute (tree decl, int flag)
3024 const char * str = XSTR (XEXP (DECL_RTL (decl), 0), 0);
3025 int len = strlen (str);
3028 /* Do not allow weak functions to be treated as short call. */
3029 if (DECL_WEAK (decl) && flag == SHORT_CALL_FLAG_CHAR)
3032 newstr = alloca (len + 2);
3034 strcpy (newstr + 1, str);
3036 newstr = (char *) ggc_alloc_string (newstr, len + 1);
3037 XSTR (XEXP (DECL_RTL (decl), 0), 0) = newstr;
3040 /* Assigns default attributes to newly defined type. This is used to
3041 set short_call/long_call attributes for function types of
3042 functions defined inside corresponding #pragma scopes. */
3044 arm_set_default_type_attributes (tree type)
3046 /* Add __attribute__ ((long_call)) to all functions, when
3047 inside #pragma long_calls or __attribute__ ((short_call)),
3048 when inside #pragma no_long_calls. */
3049 if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE)
3051 tree type_attr_list, attr_name;
3052 type_attr_list = TYPE_ATTRIBUTES (type);
3054 if (arm_pragma_long_calls == LONG)
3055 attr_name = get_identifier ("long_call");
3056 else if (arm_pragma_long_calls == SHORT)
3057 attr_name = get_identifier ("short_call");
3061 type_attr_list = tree_cons (attr_name, NULL_TREE, type_attr_list);
3062 TYPE_ATTRIBUTES (type) = type_attr_list;
3066 /* Return 1 if the operand is a SYMBOL_REF for a function known to be
3067 defined within the current compilation unit. If this cannot be
3068 determined, then 0 is returned. */
3070 current_file_function_operand (rtx sym_ref)
3072 /* This is a bit of a fib. A function will have a short call flag
3073 applied to its name if it has the short call attribute, or it has
3074 already been defined within the current compilation unit. */
3075 if (ENCODED_SHORT_CALL_ATTR_P (XSTR (sym_ref, 0)))
3078 /* The current function is always defined within the current compilation
3079 unit. If it s a weak definition however, then this may not be the real
3080 definition of the function, and so we have to say no. */
3081 if (sym_ref == XEXP (DECL_RTL (current_function_decl), 0)
3082 && !DECL_WEAK (current_function_decl))
3085 /* We cannot make the determination - default to returning 0. */
3089 /* Return nonzero if a 32 bit "long_call" should be generated for
3090 this call. We generate a long_call if the function:
3092 a. has an __attribute__((long call))
3093 or b. is within the scope of a #pragma long_calls
3094 or c. the -mlong-calls command line switch has been specified
3096 1. -ffunction-sections is in effect
3097 or 2. the current function has __attribute__ ((section))
3098 or 3. the target function has __attribute__ ((section))
3100 However we do not generate a long call if the function:
3102 d. has an __attribute__ ((short_call))
3103 or e. is inside the scope of a #pragma no_long_calls
3104 or f. is defined within the current compilation unit.
3106 This function will be called by C fragments contained in the machine
3107 description file. SYM_REF and CALL_COOKIE correspond to the matched
3108 rtl operands. CALL_SYMBOL is used to distinguish between
3109 two different callers of the function. It is set to 1 in the
3110 "call_symbol" and "call_symbol_value" patterns and to 0 in the "call"
3111 and "call_value" patterns. This is because of the difference in the
3112 SYM_REFs passed by these patterns. */
3114 arm_is_longcall_p (rtx sym_ref, int call_cookie, int call_symbol)
3118 if (GET_CODE (sym_ref) != MEM)
3121 sym_ref = XEXP (sym_ref, 0);
3124 if (GET_CODE (sym_ref) != SYMBOL_REF)
3127 if (call_cookie & CALL_SHORT)
3130 if (TARGET_LONG_CALLS)
3132 if (flag_function_sections
3133 || DECL_SECTION_NAME (current_function_decl))
3134 /* c.3 is handled by the definition of the
3135 ARM_DECLARE_FUNCTION_SIZE macro. */
3139 if (current_file_function_operand (sym_ref))
3142 return (call_cookie & CALL_LONG)
3143 || ENCODED_LONG_CALL_ATTR_P (XSTR (sym_ref, 0))
3144 || TARGET_LONG_CALLS;
3147 /* Return nonzero if it is ok to make a tail-call to DECL. */
3149 arm_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
3151 int call_type = TARGET_LONG_CALLS ? CALL_LONG : CALL_NORMAL;
3153 if (cfun->machine->sibcall_blocked)
3156 /* Never tailcall something for which we have no decl, or if we
3157 are in Thumb mode. */
3158 if (decl == NULL || TARGET_THUMB)
3161 /* Get the calling method. */
3162 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
3163 call_type = CALL_SHORT;
3164 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
3165 call_type = CALL_LONG;
3167 /* Cannot tail-call to long calls, since these are out of range of
3168 a branch instruction. However, if not compiling PIC, we know
3169 we can reach the symbol if it is in this compilation unit. */
3170 if (call_type == CALL_LONG && (flag_pic || !TREE_ASM_WRITTEN (decl)))
3173 /* If we are interworking and the function is not declared static
3174 then we can't tail-call it unless we know that it exists in this
3175 compilation unit (since it might be a Thumb routine). */
3176 if (TARGET_INTERWORK && TREE_PUBLIC (decl) && !TREE_ASM_WRITTEN (decl))
3179 /* Never tailcall from an ISR routine - it needs a special exit sequence. */
3180 if (IS_INTERRUPT (arm_current_func_type ()))
3183 /* Everything else is ok. */
3188 /* Addressing mode support functions. */
3190 /* Return nonzero if X is a legitimate immediate operand when compiling
3191 for PIC. We know that X satisfies CONSTANT_P and flag_pic is true. */
3193 legitimate_pic_operand_p (rtx x)
3195 if (GET_CODE (x) == SYMBOL_REF
3196 || (GET_CODE (x) == CONST
3197 && GET_CODE (XEXP (x, 0)) == PLUS
3198 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF))
3205 legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
3207 if (GET_CODE (orig) == SYMBOL_REF
3208 || GET_CODE (orig) == LABEL_REF)
3210 #ifndef AOF_ASSEMBLER
3211 rtx pic_ref, address;
3216 /* If this function doesn't have a pic register, create one now.
3217 A lot of the logic here is made obscure by the fact that this
3218 routine gets called as part of the rtx cost estimation
3219 process. We don't want those calls to affect any assumptions
3220 about the real function; and further, we can't call
3221 entry_of_function() until we start the real expansion
3223 if (!current_function_uses_pic_offset_table)
3225 gcc_assert (!no_new_pseudos);
3226 if (arm_pic_register != INVALID_REGNUM)
3228 if (!cfun->machine->pic_reg)
3229 cfun->machine->pic_reg = gen_rtx_REG (Pmode, arm_pic_register);
3231 /* Play games to avoid marking the function as needing pic
3232 if we are being called as part of the cost-estimation
3235 current_function_uses_pic_offset_table = 1;
3241 if (!cfun->machine->pic_reg)
3242 cfun->machine->pic_reg = gen_reg_rtx (Pmode);
3244 /* Play games to avoid marking the function as needing pic
3245 if we are being called as part of the cost-estimation
3249 current_function_uses_pic_offset_table = 1;
3252 arm_load_pic_register (0UL);
3256 emit_insn_after (seq, entry_of_function ());
3263 gcc_assert (!no_new_pseudos);
3264 reg = gen_reg_rtx (Pmode);
3269 #ifdef AOF_ASSEMBLER
3270 /* The AOF assembler can generate relocations for these directly, and
3271 understands that the PIC register has to be added into the offset. */
3272 insn = emit_insn (gen_pic_load_addr_based (reg, orig));
3275 address = gen_reg_rtx (Pmode);
3280 emit_insn (gen_pic_load_addr_arm (address, orig));
3282 emit_insn (gen_pic_load_addr_thumb (address, orig));
3284 if ((GET_CODE (orig) == LABEL_REF
3285 || (GET_CODE (orig) == SYMBOL_REF &&
3286 SYMBOL_REF_LOCAL_P (orig)))
3288 pic_ref = gen_rtx_PLUS (Pmode, cfun->machine->pic_reg, address);
3291 pic_ref = gen_const_mem (Pmode,
3292 gen_rtx_PLUS (Pmode, cfun->machine->pic_reg,
3296 insn = emit_move_insn (reg, pic_ref);
3298 /* Put a REG_EQUAL note on this insn, so that it can be optimized
3300 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EQUAL, orig,
3304 else if (GET_CODE (orig) == CONST)
3308 if (GET_CODE (XEXP (orig, 0)) == PLUS
3309 && XEXP (XEXP (orig, 0), 0) == cfun->machine->pic_reg)
3312 if (GET_CODE (XEXP (orig, 0)) == UNSPEC
3313 && XINT (XEXP (orig, 0), 1) == UNSPEC_TLS)
3318 gcc_assert (!no_new_pseudos);
3319 reg = gen_reg_rtx (Pmode);
3322 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
3324 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
3325 offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
3326 base == reg ? 0 : reg);
3328 if (GET_CODE (offset) == CONST_INT)
3330 /* The base register doesn't really matter, we only want to
3331 test the index for the appropriate mode. */
3332 if (!arm_legitimate_index_p (mode, offset, SET, 0))
3334 gcc_assert (!no_new_pseudos);
3335 offset = force_reg (Pmode, offset);
3338 if (GET_CODE (offset) == CONST_INT)
3339 return plus_constant (base, INTVAL (offset));
3342 if (GET_MODE_SIZE (mode) > 4
3343 && (GET_MODE_CLASS (mode) == MODE_INT
3344 || TARGET_SOFT_FLOAT))
3346 emit_insn (gen_addsi3 (reg, base, offset));
3350 return gen_rtx_PLUS (Pmode, base, offset);
3357 /* Find a spare low register to use during the prolog of a function. */
3360 thumb_find_work_register (unsigned long pushed_regs_mask)
3364 /* Check the argument registers first as these are call-used. The
3365 register allocation order means that sometimes r3 might be used
3366 but earlier argument registers might not, so check them all. */
3367 for (reg = LAST_ARG_REGNUM; reg >= 0; reg --)
3368 if (!regs_ever_live[reg])
3371 /* Before going on to check the call-saved registers we can try a couple
3372 more ways of deducing that r3 is available. The first is when we are
3373 pushing anonymous arguments onto the stack and we have less than 4
3374 registers worth of fixed arguments(*). In this case r3 will be part of
3375 the variable argument list and so we can be sure that it will be
3376 pushed right at the start of the function. Hence it will be available
3377 for the rest of the prologue.
3378 (*): ie current_function_pretend_args_size is greater than 0. */
3379 if (cfun->machine->uses_anonymous_args
3380 && current_function_pretend_args_size > 0)
3381 return LAST_ARG_REGNUM;
3383 /* The other case is when we have fixed arguments but less than 4 registers
3384 worth. In this case r3 might be used in the body of the function, but
3385 it is not being used to convey an argument into the function. In theory
3386 we could just check current_function_args_size to see how many bytes are
3387 being passed in argument registers, but it seems that it is unreliable.
3388 Sometimes it will have the value 0 when in fact arguments are being
3389 passed. (See testcase execute/20021111-1.c for an example). So we also
3390 check the args_info.nregs field as well. The problem with this field is
3391 that it makes no allowances for arguments that are passed to the
3392 function but which are not used. Hence we could miss an opportunity
3393 when a function has an unused argument in r3. But it is better to be
3394 safe than to be sorry. */
3395 if (! cfun->machine->uses_anonymous_args
3396 && current_function_args_size >= 0
3397 && current_function_args_size <= (LAST_ARG_REGNUM * UNITS_PER_WORD)
3398 && cfun->args_info.nregs < 4)
3399 return LAST_ARG_REGNUM;
3401 /* Otherwise look for a call-saved register that is going to be pushed. */
3402 for (reg = LAST_LO_REGNUM; reg > LAST_ARG_REGNUM; reg --)
3403 if (pushed_regs_mask & (1 << reg))
3406 /* Something went wrong - thumb_compute_save_reg_mask()
3407 should have arranged for a suitable register to be pushed. */
3411 static GTY(()) int pic_labelno;
3413 /* Generate code to load the PIC register. In thumb mode SCRATCH is a
3417 arm_load_pic_register (unsigned long saved_regs ATTRIBUTE_UNUSED)
3419 #ifndef AOF_ASSEMBLER
3420 rtx l1, labelno, pic_tmp, pic_tmp2, pic_rtx;
3421 rtx global_offset_table;
3423 if (current_function_uses_pic_offset_table == 0 || TARGET_SINGLE_PIC_BASE)
3426 gcc_assert (flag_pic);
3428 /* We use an UNSPEC rather than a LABEL_REF because this label never appears
3429 in the code stream. */
3431 labelno = GEN_INT (pic_labelno++);
3432 l1 = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, labelno), UNSPEC_PIC_LABEL);
3433 l1 = gen_rtx_CONST (VOIDmode, l1);
3435 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3436 /* On the ARM the PC register contains 'dot + 8' at the time of the
3437 addition, on the Thumb it is 'dot + 4'. */
3438 pic_tmp = plus_constant (l1, TARGET_ARM ? 8 : 4);
3440 pic_tmp2 = gen_rtx_CONST (VOIDmode,
3441 gen_rtx_PLUS (Pmode, global_offset_table, pc_rtx));
3443 pic_tmp2 = gen_rtx_CONST (VOIDmode, global_offset_table);
3445 pic_rtx = gen_rtx_CONST (Pmode, gen_rtx_MINUS (Pmode, pic_tmp2, pic_tmp));
3449 emit_insn (gen_pic_load_addr_arm (cfun->machine->pic_reg, pic_rtx));
3450 emit_insn (gen_pic_add_dot_plus_eight (cfun->machine->pic_reg,
3451 cfun->machine->pic_reg, labelno));
3455 if (arm_pic_register != INVALID_REGNUM
3456 && REGNO (cfun->machine->pic_reg) > LAST_LO_REGNUM)
3458 /* We will have pushed the pic register, so we should always be
3459 able to find a work register. */
3460 pic_tmp = gen_rtx_REG (SImode,
3461 thumb_find_work_register (saved_regs));
3462 emit_insn (gen_pic_load_addr_thumb (pic_tmp, pic_rtx));
3463 emit_insn (gen_movsi (pic_offset_table_rtx, pic_tmp));
3466 emit_insn (gen_pic_load_addr_thumb (cfun->machine->pic_reg, pic_rtx));
3467 emit_insn (gen_pic_add_dot_plus_four (cfun->machine->pic_reg,
3468 cfun->machine->pic_reg, labelno));
3471 /* Need to emit this whether or not we obey regdecls,
3472 since setjmp/longjmp can cause life info to screw up. */
3473 emit_insn (gen_rtx_USE (VOIDmode, cfun->machine->pic_reg));
3474 #endif /* AOF_ASSEMBLER */
3478 /* Return nonzero if X is valid as an ARM state addressing register. */
3480 arm_address_register_rtx_p (rtx x, int strict_p)
3484 if (GET_CODE (x) != REG)
3490 return ARM_REGNO_OK_FOR_BASE_P (regno);
3492 return (regno <= LAST_ARM_REGNUM
3493 || regno >= FIRST_PSEUDO_REGISTER
3494 || regno == FRAME_POINTER_REGNUM
3495 || regno == ARG_POINTER_REGNUM);
3498 /* Return TRUE if this rtx is the difference of a symbol and a label,
3499 and will reduce to a PC-relative relocation in the object file.
3500 Expressions like this can be left alone when generating PIC, rather
3501 than forced through the GOT. */
3503 pcrel_constant_p (rtx x)
3505 if (GET_CODE (x) == MINUS)
3506 return symbol_mentioned_p (XEXP (x, 0)) && label_mentioned_p (XEXP (x, 1));
3511 /* Return nonzero if X is a valid ARM state address operand. */
3513 arm_legitimate_address_p (enum machine_mode mode, rtx x, RTX_CODE outer,
3517 enum rtx_code code = GET_CODE (x);
3519 if (arm_address_register_rtx_p (x, strict_p))
3522 use_ldrd = (TARGET_LDRD
3524 || (mode == DFmode && (TARGET_SOFT_FLOAT || TARGET_VFP))));
3526 if (code == POST_INC || code == PRE_DEC
3527 || ((code == PRE_INC || code == POST_DEC)
3528 && (use_ldrd || GET_MODE_SIZE (mode) <= 4)))
3529 return arm_address_register_rtx_p (XEXP (x, 0), strict_p);
3531 else if ((code == POST_MODIFY || code == PRE_MODIFY)
3532 && arm_address_register_rtx_p (XEXP (x, 0), strict_p)
3533 && GET_CODE (XEXP (x, 1)) == PLUS
3534 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
3536 rtx addend = XEXP (XEXP (x, 1), 1);
3538 /* Don't allow ldrd post increment by register because it's hard
3539 to fixup invalid register choices. */
3541 && GET_CODE (x) == POST_MODIFY
3542 && GET_CODE (addend) == REG)
3545 return ((use_ldrd || GET_MODE_SIZE (mode) <= 4)
3546 && arm_legitimate_index_p (mode, addend, outer, strict_p));
3549 /* After reload constants split into minipools will have addresses
3550 from a LABEL_REF. */
3551 else if (reload_completed
3552 && (code == LABEL_REF
3554 && GET_CODE (XEXP (x, 0)) == PLUS
3555 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
3556 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
3559 else if (mode == TImode)
3562 else if (code == PLUS)
3564 rtx xop0 = XEXP (x, 0);
3565 rtx xop1 = XEXP (x, 1);
3567 return ((arm_address_register_rtx_p (xop0, strict_p)
3568 && arm_legitimate_index_p (mode, xop1, outer, strict_p))
3569 || (arm_address_register_rtx_p (xop1, strict_p)
3570 && arm_legitimate_index_p (mode, xop0, outer, strict_p)));
3574 /* Reload currently can't handle MINUS, so disable this for now */
3575 else if (GET_CODE (x) == MINUS)
3577 rtx xop0 = XEXP (x, 0);
3578 rtx xop1 = XEXP (x, 1);
3580 return (arm_address_register_rtx_p (xop0, strict_p)
3581 && arm_legitimate_index_p (mode, xop1, outer, strict_p));
3585 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
3586 && code == SYMBOL_REF
3587 && CONSTANT_POOL_ADDRESS_P (x)
3589 && symbol_mentioned_p (get_pool_constant (x))
3590 && ! pcrel_constant_p (get_pool_constant (x))))
3596 /* Return nonzero if INDEX is valid for an address index operand in
3599 arm_legitimate_index_p (enum machine_mode mode, rtx index, RTX_CODE outer,
3602 HOST_WIDE_INT range;
3603 enum rtx_code code = GET_CODE (index);
3605 /* Standard coprocessor addressing modes. */
3606 if (TARGET_HARD_FLOAT
3607 && (TARGET_FPA || TARGET_MAVERICK)
3608 && (GET_MODE_CLASS (mode) == MODE_FLOAT
3609 || (TARGET_MAVERICK && mode == DImode)))
3610 return (code == CONST_INT && INTVAL (index) < 1024
3611 && INTVAL (index) > -1024
3612 && (INTVAL (index) & 3) == 0);
3614 if (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (mode))
3616 /* For DImode assume values will usually live in core regs
3617 and only allow LDRD addressing modes. */
3618 if (!TARGET_LDRD || mode != DImode)
3619 return (code == CONST_INT
3620 && INTVAL (index) < 1024
3621 && INTVAL (index) > -1024
3622 && (INTVAL (index) & 3) == 0);
3625 if (arm_address_register_rtx_p (index, strict_p)
3626 && (GET_MODE_SIZE (mode) <= 4))
3629 if (mode == DImode || mode == DFmode)
3631 if (code == CONST_INT)
3633 HOST_WIDE_INT val = INTVAL (index);
3636 return val > -256 && val < 256;
3638 return val > -4096 && val < 4092;
3641 return TARGET_LDRD && arm_address_register_rtx_p (index, strict_p);
3644 if (GET_MODE_SIZE (mode) <= 4
3647 || (mode == QImode && outer == SIGN_EXTEND))))
3651 rtx xiop0 = XEXP (index, 0);
3652 rtx xiop1 = XEXP (index, 1);
3654 return ((arm_address_register_rtx_p (xiop0, strict_p)
3655 && power_of_two_operand (xiop1, SImode))
3656 || (arm_address_register_rtx_p (xiop1, strict_p)
3657 && power_of_two_operand (xiop0, SImode)));
3659 else if (code == LSHIFTRT || code == ASHIFTRT
3660 || code == ASHIFT || code == ROTATERT)
3662 rtx op = XEXP (index, 1);
3664 return (arm_address_register_rtx_p (XEXP (index, 0), strict_p)
3665 && GET_CODE (op) == CONST_INT
3667 && INTVAL (op) <= 31);
3671 /* For ARM v4 we may be doing a sign-extend operation during the
3675 if (mode == HImode || (outer == SIGN_EXTEND && mode == QImode))
3681 range = (mode == HImode) ? 4095 : 4096;
3683 return (code == CONST_INT
3684 && INTVAL (index) < range
3685 && INTVAL (index) > -range);
3688 /* Return nonzero if X is valid as a Thumb state base register. */
3690 thumb_base_register_rtx_p (rtx x, enum machine_mode mode, int strict_p)
3694 if (GET_CODE (x) != REG)
3700 return THUMB_REGNO_MODE_OK_FOR_BASE_P (regno, mode);
3702 return (regno <= LAST_LO_REGNUM
3703 || regno > LAST_VIRTUAL_REGISTER
3704 || regno == FRAME_POINTER_REGNUM
3705 || (GET_MODE_SIZE (mode) >= 4
3706 && (regno == STACK_POINTER_REGNUM
3707 || regno >= FIRST_PSEUDO_REGISTER
3708 || x == hard_frame_pointer_rtx
3709 || x == arg_pointer_rtx)));
3712 /* Return nonzero if x is a legitimate index register. This is the case
3713 for any base register that can access a QImode object. */
3715 thumb_index_register_rtx_p (rtx x, int strict_p)
3717 return thumb_base_register_rtx_p (x, QImode, strict_p);
3720 /* Return nonzero if x is a legitimate Thumb-state address.
3722 The AP may be eliminated to either the SP or the FP, so we use the
3723 least common denominator, e.g. SImode, and offsets from 0 to 64.
3725 ??? Verify whether the above is the right approach.
3727 ??? Also, the FP may be eliminated to the SP, so perhaps that
3728 needs special handling also.
3730 ??? Look at how the mips16 port solves this problem. It probably uses
3731 better ways to solve some of these problems.
3733 Although it is not incorrect, we don't accept QImode and HImode
3734 addresses based on the frame pointer or arg pointer until the
3735 reload pass starts. This is so that eliminating such addresses
3736 into stack based ones won't produce impossible code. */
3738 thumb_legitimate_address_p (enum machine_mode mode, rtx x, int strict_p)
3740 /* ??? Not clear if this is right. Experiment. */
3741 if (GET_MODE_SIZE (mode) < 4
3742 && !(reload_in_progress || reload_completed)
3743 && (reg_mentioned_p (frame_pointer_rtx, x)
3744 || reg_mentioned_p (arg_pointer_rtx, x)
3745 || reg_mentioned_p (virtual_incoming_args_rtx, x)
3746 || reg_mentioned_p (virtual_outgoing_args_rtx, x)
3747 || reg_mentioned_p (virtual_stack_dynamic_rtx, x)
3748 || reg_mentioned_p (virtual_stack_vars_rtx, x)))
3751 /* Accept any base register. SP only in SImode or larger. */
3752 else if (thumb_base_register_rtx_p (x, mode, strict_p))
3755 /* This is PC relative data before arm_reorg runs. */
3756 else if (GET_MODE_SIZE (mode) >= 4 && CONSTANT_P (x)
3757 && GET_CODE (x) == SYMBOL_REF
3758 && CONSTANT_POOL_ADDRESS_P (x) && !flag_pic)
3761 /* This is PC relative data after arm_reorg runs. */
3762 else if (GET_MODE_SIZE (mode) >= 4 && reload_completed
3763 && (GET_CODE (x) == LABEL_REF
3764 || (GET_CODE (x) == CONST
3765 && GET_CODE (XEXP (x, 0)) == PLUS
3766 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
3767 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
3770 /* Post-inc indexing only supported for SImode and larger. */
3771 else if (GET_CODE (x) == POST_INC && GET_MODE_SIZE (mode) >= 4
3772 && thumb_index_register_rtx_p (XEXP (x, 0), strict_p))
3775 else if (GET_CODE (x) == PLUS)
3777 /* REG+REG address can be any two index registers. */
3778 /* We disallow FRAME+REG addressing since we know that FRAME
3779 will be replaced with STACK, and SP relative addressing only
3780 permits SP+OFFSET. */
3781 if (GET_MODE_SIZE (mode) <= 4
3782 && XEXP (x, 0) != frame_pointer_rtx
3783 && XEXP (x, 1) != frame_pointer_rtx
3784 && thumb_index_register_rtx_p (XEXP (x, 0), strict_p)
3785 && thumb_index_register_rtx_p (XEXP (x, 1), strict_p))
3788 /* REG+const has 5-7 bit offset for non-SP registers. */
3789 else if ((thumb_index_register_rtx_p (XEXP (x, 0), strict_p)
3790 || XEXP (x, 0) == arg_pointer_rtx)
3791 && GET_CODE (XEXP (x, 1)) == CONST_INT
3792 && thumb_legitimate_offset_p (mode, INTVAL (XEXP (x, 1))))
3795 /* REG+const has 10 bit offset for SP, but only SImode and
3796 larger is supported. */
3797 /* ??? Should probably check for DI/DFmode overflow here
3798 just like GO_IF_LEGITIMATE_OFFSET does. */
3799 else if (GET_CODE (XEXP (x, 0)) == REG
3800 && REGNO (XEXP (x, 0)) == STACK_POINTER_REGNUM
3801 && GET_MODE_SIZE (mode) >= 4
3802 && GET_CODE (XEXP (x, 1)) == CONST_INT
3803 && INTVAL (XEXP (x, 1)) >= 0
3804 && INTVAL (XEXP (x, 1)) + GET_MODE_SIZE (mode) <= 1024
3805 && (INTVAL (XEXP (x, 1)) & 3) == 0)
3808 else if (GET_CODE (XEXP (x, 0)) == REG
3809 && REGNO (XEXP (x, 0)) == FRAME_POINTER_REGNUM
3810 && GET_MODE_SIZE (mode) >= 4
3811 && GET_CODE (XEXP (x, 1)) == CONST_INT
3812 && (INTVAL (XEXP (x, 1)) & 3) == 0)
3816 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
3817 && GET_MODE_SIZE (mode) == 4
3818 && GET_CODE (x) == SYMBOL_REF
3819 && CONSTANT_POOL_ADDRESS_P (x)
3821 && symbol_mentioned_p (get_pool_constant (x))
3822 && ! pcrel_constant_p (get_pool_constant (x))))
3828 /* Return nonzero if VAL can be used as an offset in a Thumb-state address
3829 instruction of mode MODE. */
3831 thumb_legitimate_offset_p (enum machine_mode mode, HOST_WIDE_INT val)
3833 switch (GET_MODE_SIZE (mode))
3836 return val >= 0 && val < 32;
3839 return val >= 0 && val < 64 && (val & 1) == 0;
3843 && (val + GET_MODE_SIZE (mode)) <= 128
3848 /* Build the SYMBOL_REF for __tls_get_addr. */
3850 static GTY(()) rtx tls_get_addr_libfunc;
3853 get_tls_get_addr (void)
3855 if (!tls_get_addr_libfunc)
3856 tls_get_addr_libfunc = init_one_libfunc ("__tls_get_addr");
3857 return tls_get_addr_libfunc;
3861 arm_load_tp (rtx target)
3864 target = gen_reg_rtx (SImode);
3868 /* Can return in any reg. */
3869 emit_insn (gen_load_tp_hard (target));
3873 /* Always returned in r0. Immediately copy the result into a pseudo,
3874 otherwise other uses of r0 (e.g. setting up function arguments) may
3875 clobber the value. */
3879 emit_insn (gen_load_tp_soft ());
3881 tmp = gen_rtx_REG (SImode, 0);
3882 emit_move_insn (target, tmp);
3888 load_tls_operand (rtx x, rtx reg)
3892 if (reg == NULL_RTX)
3893 reg = gen_reg_rtx (SImode);
3895 tmp = gen_rtx_CONST (SImode, x);
3897 emit_move_insn (reg, tmp);
3903 arm_call_tls_get_addr (rtx x, rtx reg, rtx *valuep, int reloc)
3905 rtx insns, label, labelno, sum;
3909 labelno = GEN_INT (pic_labelno++);
3910 label = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, labelno), UNSPEC_PIC_LABEL);
3911 label = gen_rtx_CONST (VOIDmode, label);
3913 sum = gen_rtx_UNSPEC (Pmode,
3914 gen_rtvec (4, x, GEN_INT (reloc), label,
3915 GEN_INT (TARGET_ARM ? 8 : 4)),
3917 reg = load_tls_operand (sum, reg);
3920 emit_insn (gen_pic_add_dot_plus_eight (reg, reg, labelno));
3922 emit_insn (gen_pic_add_dot_plus_four (reg, reg, labelno));
3924 *valuep = emit_library_call_value (get_tls_get_addr (), NULL_RTX, LCT_PURE, /* LCT_CONST? */
3925 Pmode, 1, reg, Pmode);
3927 insns = get_insns ();
3934 legitimize_tls_address (rtx x, rtx reg)
3936 rtx dest, tp, label, labelno, sum, insns, ret, eqv, addend;
3937 unsigned int model = SYMBOL_REF_TLS_MODEL (x);
3941 case TLS_MODEL_GLOBAL_DYNAMIC:
3942 insns = arm_call_tls_get_addr (x, reg, &ret, TLS_GD32);
3943 dest = gen_reg_rtx (Pmode);
3944 emit_libcall_block (insns, dest, ret, x);
3947 case TLS_MODEL_LOCAL_DYNAMIC:
3948 insns = arm_call_tls_get_addr (x, reg, &ret, TLS_LDM32);
3950 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
3951 share the LDM result with other LD model accesses. */
3952 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const1_rtx),
3954 dest = gen_reg_rtx (Pmode);
3955 emit_libcall_block (insns, dest, ret, eqv);
3957 /* Load the addend. */
3958 addend = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, x, GEN_INT (TLS_LDO32)),
3960 addend = force_reg (SImode, gen_rtx_CONST (SImode, addend));
3961 return gen_rtx_PLUS (Pmode, dest, addend);
3963 case TLS_MODEL_INITIAL_EXEC:
3964 labelno = GEN_INT (pic_labelno++);
3965 label = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, labelno), UNSPEC_PIC_LABEL);
3966 label = gen_rtx_CONST (VOIDmode, label);
3967 sum = gen_rtx_UNSPEC (Pmode,
3968 gen_rtvec (4, x, GEN_INT (TLS_IE32), label,
3969 GEN_INT (TARGET_ARM ? 8 : 4)),
3971 reg = load_tls_operand (sum, reg);
3974 emit_insn (gen_tls_load_dot_plus_eight (reg, reg, labelno));
3977 emit_insn (gen_pic_add_dot_plus_four (reg, reg, labelno));
3978 emit_move_insn (reg, gen_const_mem (SImode, reg));
3981 tp = arm_load_tp (NULL_RTX);
3983 return gen_rtx_PLUS (Pmode, tp, reg);
3985 case TLS_MODEL_LOCAL_EXEC:
3986 tp = arm_load_tp (NULL_RTX);
3988 reg = gen_rtx_UNSPEC (Pmode,
3989 gen_rtvec (2, x, GEN_INT (TLS_LE32)),
3991 reg = force_reg (SImode, gen_rtx_CONST (SImode, reg));
3993 return gen_rtx_PLUS (Pmode, tp, reg);
4000 /* Try machine-dependent ways of modifying an illegitimate address
4001 to be legitimate. If we find one, return the new, valid address. */
4003 arm_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
4005 if (arm_tls_symbol_p (x))
4006 return legitimize_tls_address (x, NULL_RTX);
4008 if (GET_CODE (x) == PLUS)
4010 rtx xop0 = XEXP (x, 0);
4011 rtx xop1 = XEXP (x, 1);
4013 if (CONSTANT_P (xop0) && !symbol_mentioned_p (xop0))
4014 xop0 = force_reg (SImode, xop0);
4016 if (CONSTANT_P (xop1) && !symbol_mentioned_p (xop1))
4017 xop1 = force_reg (SImode, xop1);
4019 if (ARM_BASE_REGISTER_RTX_P (xop0)
4020 && GET_CODE (xop1) == CONST_INT)
4022 HOST_WIDE_INT n, low_n;
4026 /* VFP addressing modes actually allow greater offsets, but for
4027 now we just stick with the lowest common denominator. */
4029 || ((TARGET_SOFT_FLOAT || TARGET_VFP) && mode == DFmode))
4041 low_n = ((mode) == TImode ? 0
4042 : n >= 0 ? (n & 0xfff) : -((-n) & 0xfff));
4046 base_reg = gen_reg_rtx (SImode);
4047 val = force_operand (plus_constant (xop0, n), NULL_RTX);
4048 emit_move_insn (base_reg, val);
4049 x = plus_constant (base_reg, low_n);
4051 else if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
4052 x = gen_rtx_PLUS (SImode, xop0, xop1);
4055 /* XXX We don't allow MINUS any more -- see comment in
4056 arm_legitimate_address_p (). */
4057 else if (GET_CODE (x) == MINUS)
4059 rtx xop0 = XEXP (x, 0);
4060 rtx xop1 = XEXP (x, 1);
4062 if (CONSTANT_P (xop0))
4063 xop0 = force_reg (SImode, xop0);
4065 if (CONSTANT_P (xop1) && ! symbol_mentioned_p (xop1))
4066 xop1 = force_reg (SImode, xop1);
4068 if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
4069 x = gen_rtx_MINUS (SImode, xop0, xop1);
4072 /* Make sure to take full advantage of the pre-indexed addressing mode
4073 with absolute addresses which often allows for the base register to
4074 be factorized for multiple adjacent memory references, and it might
4075 even allows for the mini pool to be avoided entirely. */
4076 else if (GET_CODE (x) == CONST_INT && optimize > 0)
4079 HOST_WIDE_INT mask, base, index;
4082 /* ldr and ldrb can use a 12 bit index, ldrsb and the rest can only
4083 use a 8 bit index. So let's use a 12 bit index for SImode only and
4084 hope that arm_gen_constant will enable ldrb to use more bits. */
4085 bits = (mode == SImode) ? 12 : 8;
4086 mask = (1 << bits) - 1;
4087 base = INTVAL (x) & ~mask;
4088 index = INTVAL (x) & mask;
4089 if (bit_count (base & 0xffffffff) > (32 - bits)/2)
4091 /* It'll most probably be more efficient to generate the base
4092 with more bits set and use a negative index instead. */
4096 base_reg = force_reg (SImode, GEN_INT (base));
4097 x = plus_constant (base_reg, index);
4102 /* We need to find and carefully transform any SYMBOL and LABEL
4103 references; so go back to the original address expression. */
4104 rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
4106 if (new_x != orig_x)
4114 /* Try machine-dependent ways of modifying an illegitimate Thumb address
4115 to be legitimate. If we find one, return the new, valid address. */
4117 thumb_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
4119 if (arm_tls_symbol_p (x))
4120 return legitimize_tls_address (x, NULL_RTX);
4122 if (GET_CODE (x) == PLUS
4123 && GET_CODE (XEXP (x, 1)) == CONST_INT
4124 && (INTVAL (XEXP (x, 1)) >= 32 * GET_MODE_SIZE (mode)
4125 || INTVAL (XEXP (x, 1)) < 0))
4127 rtx xop0 = XEXP (x, 0);
4128 rtx xop1 = XEXP (x, 1);
4129 HOST_WIDE_INT offset = INTVAL (xop1);
4131 /* Try and fold the offset into a biasing of the base register and
4132 then offsetting that. Don't do this when optimizing for space
4133 since it can cause too many CSEs. */
4134 if (optimize_size && offset >= 0
4135 && offset < 256 + 31 * GET_MODE_SIZE (mode))
4137 HOST_WIDE_INT delta;
4140 delta = offset - (256 - GET_MODE_SIZE (mode));
4141 else if (offset < 32 * GET_MODE_SIZE (mode) + 8)
4142 delta = 31 * GET_MODE_SIZE (mode);
4144 delta = offset & (~31 * GET_MODE_SIZE (mode));
4146 xop0 = force_operand (plus_constant (xop0, offset - delta),
4148 x = plus_constant (xop0, delta);
4150 else if (offset < 0 && offset > -256)
4151 /* Small negative offsets are best done with a subtract before the
4152 dereference, forcing these into a register normally takes two
4154 x = force_operand (x, NULL_RTX);
4157 /* For the remaining cases, force the constant into a register. */
4158 xop1 = force_reg (SImode, xop1);
4159 x = gen_rtx_PLUS (SImode, xop0, xop1);
4162 else if (GET_CODE (x) == PLUS
4163 && s_register_operand (XEXP (x, 1), SImode)
4164 && !s_register_operand (XEXP (x, 0), SImode))
4166 rtx xop0 = force_operand (XEXP (x, 0), NULL_RTX);
4168 x = gen_rtx_PLUS (SImode, xop0, XEXP (x, 1));
4173 /* We need to find and carefully transform any SYMBOL and LABEL
4174 references; so go back to the original address expression. */
4175 rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
4177 if (new_x != orig_x)
4185 thumb_legitimize_reload_address (rtx *x_p,
4186 enum machine_mode mode,
4187 int opnum, int type,
4188 int ind_levels ATTRIBUTE_UNUSED)
4192 if (GET_CODE (x) == PLUS
4193 && GET_MODE_SIZE (mode) < 4
4194 && REG_P (XEXP (x, 0))
4195 && XEXP (x, 0) == stack_pointer_rtx
4196 && GET_CODE (XEXP (x, 1)) == CONST_INT
4197 && !thumb_legitimate_offset_p (mode, INTVAL (XEXP (x, 1))))
4202 push_reload (orig_x, NULL_RTX, x_p, NULL, MODE_BASE_REG_CLASS (mode),
4203 Pmode, VOIDmode, 0, 0, opnum, type);
4207 /* If both registers are hi-regs, then it's better to reload the
4208 entire expression rather than each register individually. That
4209 only requires one reload register rather than two. */
4210 if (GET_CODE (x) == PLUS
4211 && REG_P (XEXP (x, 0))
4212 && REG_P (XEXP (x, 1))
4213 && !REG_MODE_OK_FOR_REG_BASE_P (XEXP (x, 0), mode)
4214 && !REG_MODE_OK_FOR_REG_BASE_P (XEXP (x, 1), mode))
4219 push_reload (orig_x, NULL_RTX, x_p, NULL, MODE_BASE_REG_CLASS (mode),
4220 Pmode, VOIDmode, 0, 0, opnum, type);
4227 /* Test for various thread-local symbols. */
4229 /* Return TRUE if X is a thread-local symbol. */
4232 arm_tls_symbol_p (rtx x)
4234 if (! TARGET_HAVE_TLS)
4237 if (GET_CODE (x) != SYMBOL_REF)
4240 return SYMBOL_REF_TLS_MODEL (x) != 0;
4243 /* Helper for arm_tls_referenced_p. */
4246 arm_tls_operand_p_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
4248 if (GET_CODE (*x) == SYMBOL_REF)
4249 return SYMBOL_REF_TLS_MODEL (*x) != 0;
4251 /* Don't recurse into UNSPEC_TLS looking for TLS symbols; these are
4252 TLS offsets, not real symbol references. */
4253 if (GET_CODE (*x) == UNSPEC
4254 && XINT (*x, 1) == UNSPEC_TLS)
4260 /* Return TRUE if X contains any TLS symbol references. */
4263 arm_tls_referenced_p (rtx x)
4265 if (! TARGET_HAVE_TLS)
4268 return for_each_rtx (&x, arm_tls_operand_p_1, NULL);
4271 #define REG_OR_SUBREG_REG(X) \
4272 (GET_CODE (X) == REG \
4273 || (GET_CODE (X) == SUBREG && GET_CODE (SUBREG_REG (X)) == REG))
4275 #define REG_OR_SUBREG_RTX(X) \
4276 (GET_CODE (X) == REG ? (X) : SUBREG_REG (X))
4278 #ifndef COSTS_N_INSNS
4279 #define COSTS_N_INSNS(N) ((N) * 4 - 2)
4282 thumb_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer)
4284 enum machine_mode mode = GET_MODE (x);
4297 return COSTS_N_INSNS (1);
4300 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4303 unsigned HOST_WIDE_INT i = INTVAL (XEXP (x, 1));
4310 return COSTS_N_INSNS (2) + cycles;
4312 return COSTS_N_INSNS (1) + 16;
4315 return (COSTS_N_INSNS (1)
4316 + 4 * ((GET_CODE (SET_SRC (x)) == MEM)
4317 + GET_CODE (SET_DEST (x)) == MEM));
4322 if ((unsigned HOST_WIDE_INT) INTVAL (x) < 256)
4324 if (thumb_shiftable_const (INTVAL (x)))
4325 return COSTS_N_INSNS (2);
4326 return COSTS_N_INSNS (3);
4328 else if ((outer == PLUS || outer == COMPARE)
4329 && INTVAL (x) < 256 && INTVAL (x) > -256)
4331 else if (outer == AND
4332 && INTVAL (x) < 256 && INTVAL (x) >= -256)
4333 return COSTS_N_INSNS (1);
4334 else if (outer == ASHIFT || outer == ASHIFTRT
4335 || outer == LSHIFTRT)
4337 return COSTS_N_INSNS (2);
4343 return COSTS_N_INSNS (3);
4361 /* XXX another guess. */
4362 /* Memory costs quite a lot for the first word, but subsequent words
4363 load at the equivalent of a single insn each. */
4364 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
4365 + ((GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
4370 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
4375 /* XXX still guessing. */
4376 switch (GET_MODE (XEXP (x, 0)))
4379 return (1 + (mode == DImode ? 4 : 0)
4380 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4383 return (4 + (mode == DImode ? 4 : 0)
4384 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4387 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4399 /* Worker routine for arm_rtx_costs. */
4401 arm_rtx_costs_1 (rtx x, enum rtx_code code, enum rtx_code outer)
4403 enum machine_mode mode = GET_MODE (x);
4404 enum rtx_code subcode;
4410 /* Memory costs quite a lot for the first word, but subsequent words
4411 load at the equivalent of a single insn each. */
4412 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
4413 + (GET_CODE (x) == SYMBOL_REF
4414 && CONSTANT_POOL_ADDRESS_P (x) ? 4 : 0));
4420 return optimize_size ? COSTS_N_INSNS (2) : 100;
4423 if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG)
4430 case ASHIFT: case LSHIFTRT: case ASHIFTRT:
4432 return (8 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : 8)
4433 + ((GET_CODE (XEXP (x, 0)) == REG
4434 || (GET_CODE (XEXP (x, 0)) == SUBREG
4435 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
4437 return (1 + ((GET_CODE (XEXP (x, 0)) == REG
4438 || (GET_CODE (XEXP (x, 0)) == SUBREG
4439 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
4441 + ((GET_CODE (XEXP (x, 1)) == REG
4442 || (GET_CODE (XEXP (x, 1)) == SUBREG
4443 && GET_CODE (SUBREG_REG (XEXP (x, 1))) == REG)
4444 || (GET_CODE (XEXP (x, 1)) == CONST_INT))
4449 return (4 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 8)
4450 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
4451 || (GET_CODE (XEXP (x, 0)) == CONST_INT
4452 && const_ok_for_arm (INTVAL (XEXP (x, 0)))))
4455 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
4456 return (2 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
4457 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
4458 && arm_const_double_rtx (XEXP (x, 1))))
4460 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
4461 || (GET_CODE (XEXP (x, 0)) == CONST_DOUBLE
4462 && arm_const_double_rtx (XEXP (x, 0))))
4465 if (((GET_CODE (XEXP (x, 0)) == CONST_INT
4466 && const_ok_for_arm (INTVAL (XEXP (x, 0)))
4467 && REG_OR_SUBREG_REG (XEXP (x, 1))))
4468 || (((subcode = GET_CODE (XEXP (x, 1))) == ASHIFT
4469 || subcode == ASHIFTRT || subcode == LSHIFTRT
4470 || subcode == ROTATE || subcode == ROTATERT
4472 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
4473 && ((INTVAL (XEXP (XEXP (x, 1), 1)) &
4474 (INTVAL (XEXP (XEXP (x, 1), 1)) - 1)) == 0)))
4475 && REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 0))
4476 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 1))
4477 || GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT)
4478 && REG_OR_SUBREG_REG (XEXP (x, 0))))
4483 if (GET_CODE (XEXP (x, 0)) == MULT)
4485 extra_cost = rtx_cost (XEXP (x, 0), code);
4486 if (!REG_OR_SUBREG_REG (XEXP (x, 1)))
4487 extra_cost += 4 * ARM_NUM_REGS (mode);
4491 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
4492 return (2 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
4493 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
4494 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
4495 && arm_const_double_rtx (XEXP (x, 1))))
4499 case AND: case XOR: case IOR:
4502 /* Normally the frame registers will be spilt into reg+const during
4503 reload, so it is a bad idea to combine them with other instructions,
4504 since then they might not be moved outside of loops. As a compromise
4505 we allow integration with ops that have a constant as their second
4507 if ((REG_OR_SUBREG_REG (XEXP (x, 0))
4508 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))
4509 && GET_CODE (XEXP (x, 1)) != CONST_INT)
4510 || (REG_OR_SUBREG_REG (XEXP (x, 0))
4511 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))))
4515 return (4 + extra_cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
4516 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
4517 || (GET_CODE (XEXP (x, 1)) == CONST_INT
4518 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
4521 if (REG_OR_SUBREG_REG (XEXP (x, 0)))
4522 return (1 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : extra_cost)
4523 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
4524 || (GET_CODE (XEXP (x, 1)) == CONST_INT
4525 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
4528 else if (REG_OR_SUBREG_REG (XEXP (x, 1)))
4529 return (1 + extra_cost
4530 + ((((subcode = GET_CODE (XEXP (x, 0))) == ASHIFT
4531 || subcode == LSHIFTRT || subcode == ASHIFTRT
4532 || subcode == ROTATE || subcode == ROTATERT
4534 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
4535 && ((INTVAL (XEXP (XEXP (x, 0), 1)) &
4536 (INTVAL (XEXP (XEXP (x, 0), 1)) - 1)) == 0)))
4537 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 0)))
4538 && ((REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 1)))
4539 || GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT))
4545 /* This should have been handled by the CPU specific routines. */
4549 if (arm_arch3m && mode == SImode
4550 && GET_CODE (XEXP (x, 0)) == LSHIFTRT
4551 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
4552 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0))
4553 == GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)))
4554 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == ZERO_EXTEND
4555 || GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == SIGN_EXTEND))
4560 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
4561 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 6);
4565 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
4567 return 1 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
4570 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
4578 return 4 + (mode == DImode ? 4 : 0);
4581 if (GET_MODE (XEXP (x, 0)) == QImode)
4582 return (4 + (mode == DImode ? 4 : 0)
4583 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4586 switch (GET_MODE (XEXP (x, 0)))
4589 return (1 + (mode == DImode ? 4 : 0)
4590 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4593 return (4 + (mode == DImode ? 4 : 0)
4594 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4597 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4612 if (const_ok_for_arm (INTVAL (x)))
4613 return outer == SET ? 2 : -1;
4614 else if (outer == AND
4615 && const_ok_for_arm (~INTVAL (x)))
4617 else if ((outer == COMPARE
4618 || outer == PLUS || outer == MINUS)
4619 && const_ok_for_arm (-INTVAL (x)))
4630 if (arm_const_double_rtx (x))
4631 return outer == SET ? 2 : -1;
4632 else if ((outer == COMPARE || outer == PLUS)
4633 && neg_const_double_rtx_ok_for_fpa (x))
4642 /* RTX costs when optimizing for size. */
4644 arm_size_rtx_costs (rtx x, int code, int outer_code, int *total)
4646 enum machine_mode mode = GET_MODE (x);
4650 /* XXX TBD. For now, use the standard costs. */
4651 *total = thumb_rtx_costs (x, code, outer_code);
4658 /* A memory access costs 1 insn if the mode is small, or the address is
4659 a single register, otherwise it costs one insn per word. */
4660 if (REG_P (XEXP (x, 0)))
4661 *total = COSTS_N_INSNS (1);
4663 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4670 /* Needs a libcall, so it costs about this. */
4671 *total = COSTS_N_INSNS (2);
4675 if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG)
4677 *total = COSTS_N_INSNS (2) + rtx_cost (XEXP (x, 0), code);
4685 if (mode == DImode && GET_CODE (XEXP (x, 1)) == CONST_INT)
4687 *total = COSTS_N_INSNS (3) + rtx_cost (XEXP (x, 0), code);
4690 else if (mode == SImode)
4692 *total = COSTS_N_INSNS (1) + rtx_cost (XEXP (x, 0), code);
4693 /* Slightly disparage register shifts, but not by much. */
4694 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
4695 *total += 1 + rtx_cost (XEXP (x, 1), code);
4699 /* Needs a libcall. */
4700 *total = COSTS_N_INSNS (2);
4704 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4706 *total = COSTS_N_INSNS (1);
4712 enum rtx_code subcode0 = GET_CODE (XEXP (x, 0));
4713 enum rtx_code subcode1 = GET_CODE (XEXP (x, 1));
4715 if (subcode0 == ROTATE || subcode0 == ROTATERT || subcode0 == ASHIFT
4716 || subcode0 == LSHIFTRT || subcode0 == ASHIFTRT
4717 || subcode1 == ROTATE || subcode1 == ROTATERT
4718 || subcode1 == ASHIFT || subcode1 == LSHIFTRT
4719 || subcode1 == ASHIFTRT)
4721 /* It's just the cost of the two operands. */
4726 *total = COSTS_N_INSNS (1);
4730 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4734 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4736 *total = COSTS_N_INSNS (1);
4741 case AND: case XOR: case IOR:
4744 enum rtx_code subcode = GET_CODE (XEXP (x, 0));
4746 if (subcode == ROTATE || subcode == ROTATERT || subcode == ASHIFT
4747 || subcode == LSHIFTRT || subcode == ASHIFTRT
4748 || (code == AND && subcode == NOT))
4750 /* It's just the cost of the two operands. */
4756 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4760 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4764 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4765 *total = COSTS_N_INSNS (1);
4768 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4777 if (cc_register (XEXP (x, 0), VOIDmode))
4780 *total = COSTS_N_INSNS (1);
4784 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4785 *total = COSTS_N_INSNS (1);
4787 *total = COSTS_N_INSNS (1 + ARM_NUM_REGS (mode));
4792 if (GET_MODE_SIZE (GET_MODE (XEXP (x, 0))) < 4)
4794 if (!(arm_arch4 && MEM_P (XEXP (x, 0))))
4795 *total += COSTS_N_INSNS (arm_arch6 ? 1 : 2);
4798 *total += COSTS_N_INSNS (1);
4803 if (!(arm_arch4 && MEM_P (XEXP (x, 0))))
4805 switch (GET_MODE (XEXP (x, 0)))
4808 *total += COSTS_N_INSNS (1);
4812 *total += COSTS_N_INSNS (arm_arch6 ? 1 : 2);
4818 *total += COSTS_N_INSNS (2);
4823 *total += COSTS_N_INSNS (1);
4828 if (const_ok_for_arm (INTVAL (x)))
4829 *total = COSTS_N_INSNS (outer_code == SET ? 1 : 0);
4830 else if (const_ok_for_arm (~INTVAL (x)))
4831 *total = COSTS_N_INSNS (outer_code == AND ? 0 : 1);
4832 else if (const_ok_for_arm (-INTVAL (x)))
4834 if (outer_code == COMPARE || outer_code == PLUS
4835 || outer_code == MINUS)
4838 *total = COSTS_N_INSNS (1);
4841 *total = COSTS_N_INSNS (2);
4847 *total = COSTS_N_INSNS (2);
4851 *total = COSTS_N_INSNS (4);
4855 if (mode != VOIDmode)
4856 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4858 *total = COSTS_N_INSNS (4); /* How knows? */
4863 /* RTX costs for cores with a slow MUL implementation. */
4866 arm_slowmul_rtx_costs (rtx x, int code, int outer_code, int *total)
4868 enum machine_mode mode = GET_MODE (x);
4872 *total = thumb_rtx_costs (x, code, outer_code);
4879 if (GET_MODE_CLASS (mode) == MODE_FLOAT
4886 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4888 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
4889 & (unsigned HOST_WIDE_INT) 0xffffffff);
4890 int cost, const_ok = const_ok_for_arm (i);
4891 int j, booth_unit_size;
4893 /* Tune as appropriate. */
4894 cost = const_ok ? 4 : 8;
4895 booth_unit_size = 2;
4896 for (j = 0; i && j < 32; j += booth_unit_size)
4898 i >>= booth_unit_size;
4906 *total = 30 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
4907 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
4911 *total = arm_rtx_costs_1 (x, code, outer_code);
4917 /* RTX cost for cores with a fast multiply unit (M variants). */
4920 arm_fastmul_rtx_costs (rtx x, int code, int outer_code, int *total)
4922 enum machine_mode mode = GET_MODE (x);
4926 *total = thumb_rtx_costs (x, code, outer_code);
4933 /* There is no point basing this on the tuning, since it is always the
4934 fast variant if it exists at all. */
4936 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
4937 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
4938 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
4945 if (GET_MODE_CLASS (mode) == MODE_FLOAT
4952 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4954 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
4955 & (unsigned HOST_WIDE_INT) 0xffffffff);
4956 int cost, const_ok = const_ok_for_arm (i);
4957 int j, booth_unit_size;
4959 /* Tune as appropriate. */
4960 cost = const_ok ? 4 : 8;
4961 booth_unit_size = 8;
4962 for (j = 0; i && j < 32; j += booth_unit_size)
4964 i >>= booth_unit_size;
4972 *total = 8 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
4973 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
4977 *total = arm_rtx_costs_1 (x, code, outer_code);
4983 /* RTX cost for XScale CPUs. */
4986 arm_xscale_rtx_costs (rtx x, int code, int outer_code, int *total)
4988 enum machine_mode mode = GET_MODE (x);
4992 *total = thumb_rtx_costs (x, code, outer_code);
4999 /* There is no point basing this on the tuning, since it is always the
5000 fast variant if it exists at all. */
5002 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
5003 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
5004 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
5011 if (GET_MODE_CLASS (mode) == MODE_FLOAT
5018 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
5020 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
5021 & (unsigned HOST_WIDE_INT) 0xffffffff);
5022 int cost, const_ok = const_ok_for_arm (i);
5023 unsigned HOST_WIDE_INT masked_const;
5025 /* The cost will be related to two insns.
5026 First a load of the constant (MOV or LDR), then a multiply. */
5029 cost += 1; /* LDR is probably more expensive because
5030 of longer result latency. */
5031 masked_const = i & 0xffff8000;
5032 if (masked_const != 0 && masked_const != 0xffff8000)
5034 masked_const = i & 0xf8000000;
5035 if (masked_const == 0 || masked_const == 0xf8000000)
5044 *total = 8 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
5045 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
5049 /* A COMPARE of a MULT is slow on XScale; the muls instruction
5050 will stall until the multiplication is complete. */
5051 if (GET_CODE (XEXP (x, 0)) == MULT)
5052 *total = 4 + rtx_cost (XEXP (x, 0), code);
5054 *total = arm_rtx_costs_1 (x, code, outer_code);
5058 *total = arm_rtx_costs_1 (x, code, outer_code);
5064 /* RTX costs for 9e (and later) cores. */
5067 arm_9e_rtx_costs (rtx x, int code, int outer_code, int *total)
5069 enum machine_mode mode = GET_MODE (x);
5078 *total = COSTS_N_INSNS (3);
5082 *total = thumb_rtx_costs (x, code, outer_code);
5090 /* There is no point basing this on the tuning, since it is always the
5091 fast variant if it exists at all. */
5093 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
5094 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
5095 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
5102 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
5119 *total = cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : nonreg_cost)
5120 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : nonreg_cost);
5124 *total = arm_rtx_costs_1 (x, code, outer_code);
5128 /* All address computations that can be done are free, but rtx cost returns
5129 the same for practically all of them. So we weight the different types
5130 of address here in the order (most pref first):
5131 PRE/POST_INC/DEC, SHIFT or NON-INT sum, INT sum, REG, MEM or LABEL. */
5133 arm_arm_address_cost (rtx x)
5135 enum rtx_code c = GET_CODE (x);
5137 if (c == PRE_INC || c == PRE_DEC || c == POST_INC || c == POST_DEC)
5139 if (c == MEM || c == LABEL_REF || c == SYMBOL_REF)
5142 if (c == PLUS || c == MINUS)
5144 if (GET_CODE (XEXP (x, 0)) == CONST_INT)
5147 if (ARITHMETIC_P (XEXP (x, 0)) || ARITHMETIC_P (XEXP (x, 1)))
5157 arm_thumb_address_cost (rtx x)
5159 enum rtx_code c = GET_CODE (x);
5164 && GET_CODE (XEXP (x, 0)) == REG
5165 && GET_CODE (XEXP (x, 1)) == CONST_INT)
5172 arm_address_cost (rtx x)
5174 return TARGET_ARM ? arm_arm_address_cost (x) : arm_thumb_address_cost (x);
5178 arm_adjust_cost (rtx insn, rtx link, rtx dep, int cost)
5182 /* Some true dependencies can have a higher cost depending
5183 on precisely how certain input operands are used. */
5185 && REG_NOTE_KIND (link) == 0
5186 && recog_memoized (insn) >= 0
5187 && recog_memoized (dep) >= 0)
5189 int shift_opnum = get_attr_shift (insn);
5190 enum attr_type attr_type = get_attr_type (dep);
5192 /* If nonzero, SHIFT_OPNUM contains the operand number of a shifted
5193 operand for INSN. If we have a shifted input operand and the
5194 instruction we depend on is another ALU instruction, then we may
5195 have to account for an additional stall. */
5196 if (shift_opnum != 0
5197 && (attr_type == TYPE_ALU_SHIFT || attr_type == TYPE_ALU_SHIFT_REG))
5199 rtx shifted_operand;
5202 /* Get the shifted operand. */
5203 extract_insn (insn);
5204 shifted_operand = recog_data.operand[shift_opnum];
5206 /* Iterate over all the operands in DEP. If we write an operand
5207 that overlaps with SHIFTED_OPERAND, then we have increase the
5208 cost of this dependency. */
5210 preprocess_constraints ();
5211 for (opno = 0; opno < recog_data.n_operands; opno++)
5213 /* We can ignore strict inputs. */
5214 if (recog_data.operand_type[opno] == OP_IN)
5217 if (reg_overlap_mentioned_p (recog_data.operand[opno],
5224 /* XXX This is not strictly true for the FPA. */
5225 if (REG_NOTE_KIND (link) == REG_DEP_ANTI
5226 || REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
5229 /* Call insns don't incur a stall, even if they follow a load. */
5230 if (REG_NOTE_KIND (link) == 0
5231 && GET_CODE (insn) == CALL_INSN)
5234 if ((i_pat = single_set (insn)) != NULL
5235 && GET_CODE (SET_SRC (i_pat)) == MEM
5236 && (d_pat = single_set (dep)) != NULL
5237 && GET_CODE (SET_DEST (d_pat)) == MEM)
5239 rtx src_mem = XEXP (SET_SRC (i_pat), 0);
5240 /* This is a load after a store, there is no conflict if the load reads
5241 from a cached area. Assume that loads from the stack, and from the
5242 constant pool are cached, and that others will miss. This is a
5245 if ((GET_CODE (src_mem) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (src_mem))
5246 || reg_mentioned_p (stack_pointer_rtx, src_mem)
5247 || reg_mentioned_p (frame_pointer_rtx, src_mem)
5248 || reg_mentioned_p (hard_frame_pointer_rtx, src_mem))
5255 static int fp_consts_inited = 0;
5257 /* Only zero is valid for VFP. Other values are also valid for FPA. */
5258 static const char * const strings_fp[8] =
5261 "4", "5", "0.5", "10"
5264 static REAL_VALUE_TYPE values_fp[8];
5267 init_fp_table (void)
5273 fp_consts_inited = 1;
5275 fp_consts_inited = 8;
5277 for (i = 0; i < fp_consts_inited; i++)
5279 r = REAL_VALUE_ATOF (strings_fp[i], DFmode);
5284 /* Return TRUE if rtx X is a valid immediate FP constant. */
5286 arm_const_double_rtx (rtx x)
5291 if (!fp_consts_inited)
5294 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
5295 if (REAL_VALUE_MINUS_ZERO (r))
5298 for (i = 0; i < fp_consts_inited; i++)
5299 if (REAL_VALUES_EQUAL (r, values_fp[i]))
5305 /* Return TRUE if rtx X is a valid immediate FPA constant. */
5307 neg_const_double_rtx_ok_for_fpa (rtx x)
5312 if (!fp_consts_inited)
5315 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
5316 r = REAL_VALUE_NEGATE (r);
5317 if (REAL_VALUE_MINUS_ZERO (r))
5320 for (i = 0; i < 8; i++)
5321 if (REAL_VALUES_EQUAL (r, values_fp[i]))
5327 /* Predicates for `match_operand' and `match_operator'. */
5329 /* Return nonzero if OP is a valid Cirrus memory address pattern. */
5331 cirrus_memory_offset (rtx op)
5333 /* Reject eliminable registers. */
5334 if (! (reload_in_progress || reload_completed)
5335 && ( reg_mentioned_p (frame_pointer_rtx, op)
5336 || reg_mentioned_p (arg_pointer_rtx, op)
5337 || reg_mentioned_p (virtual_incoming_args_rtx, op)
5338 || reg_mentioned_p (virtual_outgoing_args_rtx, op)
5339 || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
5340 || reg_mentioned_p (virtual_stack_vars_rtx, op)))
5343 if (GET_CODE (op) == MEM)
5349 /* Match: (mem (reg)). */
5350 if (GET_CODE (ind) == REG)
5356 if (GET_CODE (ind) == PLUS
5357 && GET_CODE (XEXP (ind, 0)) == REG
5358 && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
5359 && GET_CODE (XEXP (ind, 1)) == CONST_INT)
5366 /* Return TRUE if OP is a valid coprocessor memory address pattern.
5367 WB if true if writeback address modes are allowed. */
5370 arm_coproc_mem_operand (rtx op, bool wb)
5374 /* Reject eliminable registers. */
5375 if (! (reload_in_progress || reload_completed)
5376 && ( reg_mentioned_p (frame_pointer_rtx, op)
5377 || reg_mentioned_p (arg_pointer_rtx, op)
5378 || reg_mentioned_p (virtual_incoming_args_rtx, op)
5379 || reg_mentioned_p (virtual_outgoing_args_rtx, op)
5380 || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
5381 || reg_mentioned_p (virtual_stack_vars_rtx, op)))
5384 /* Constants are converted into offsets from labels. */
5385 if (GET_CODE (op) != MEM)
5390 if (reload_completed
5391 && (GET_CODE (ind) == LABEL_REF
5392 || (GET_CODE (ind) == CONST
5393 && GET_CODE (XEXP (ind, 0)) == PLUS
5394 && GET_CODE (XEXP (XEXP (ind, 0), 0)) == LABEL_REF
5395 && GET_CODE (XEXP (XEXP (ind, 0), 1)) == CONST_INT)))
5398 /* Match: (mem (reg)). */
5399 if (GET_CODE (ind) == REG)
5400 return arm_address_register_rtx_p (ind, 0);
5402 /* Autoincremment addressing modes. */
5404 && (GET_CODE (ind) == PRE_INC
5405 || GET_CODE (ind) == POST_INC
5406 || GET_CODE (ind) == PRE_DEC
5407 || GET_CODE (ind) == POST_DEC))
5408 return arm_address_register_rtx_p (XEXP (ind, 0), 0);
5411 && (GET_CODE (ind) == POST_MODIFY || GET_CODE (ind) == PRE_MODIFY)
5412 && arm_address_register_rtx_p (XEXP (ind, 0), 0)
5413 && GET_CODE (XEXP (ind, 1)) == PLUS
5414 && rtx_equal_p (XEXP (XEXP (ind, 1), 0), XEXP (ind, 0)))
5415 ind = XEXP (ind, 1);
5420 if (GET_CODE (ind) == PLUS
5421 && GET_CODE (XEXP (ind, 0)) == REG
5422 && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
5423 && GET_CODE (XEXP (ind, 1)) == CONST_INT
5424 && INTVAL (XEXP (ind, 1)) > -1024
5425 && INTVAL (XEXP (ind, 1)) < 1024
5426 && (INTVAL (XEXP (ind, 1)) & 3) == 0)
5432 /* Return true if X is a register that will be eliminated later on. */
5434 arm_eliminable_register (rtx x)
5436 return REG_P (x) && (REGNO (x) == FRAME_POINTER_REGNUM
5437 || REGNO (x) == ARG_POINTER_REGNUM
5438 || (REGNO (x) >= FIRST_VIRTUAL_REGISTER
5439 && REGNO (x) <= LAST_VIRTUAL_REGISTER));
5442 /* Return GENERAL_REGS if a scratch register required to reload x to/from
5443 coprocessor registers. Otherwise return NO_REGS. */
5446 coproc_secondary_reload_class (enum machine_mode mode, rtx x, bool wb)
5448 if (arm_coproc_mem_operand (x, wb) || s_register_operand (x, mode))
5451 return GENERAL_REGS;
5454 /* Values which must be returned in the most-significant end of the return
5458 arm_return_in_msb (tree valtype)
5460 return (TARGET_AAPCS_BASED
5462 && (AGGREGATE_TYPE_P (valtype)
5463 || TREE_CODE (valtype) == COMPLEX_TYPE));
5466 /* Returns TRUE if INSN is an "LDR REG, ADDR" instruction.
5467 Use by the Cirrus Maverick code which has to workaround
5468 a hardware bug triggered by such instructions. */
5470 arm_memory_load_p (rtx insn)
5472 rtx body, lhs, rhs;;
5474 if (insn == NULL_RTX || GET_CODE (insn) != INSN)
5477 body = PATTERN (insn);
5479 if (GET_CODE (body) != SET)
5482 lhs = XEXP (body, 0);
5483 rhs = XEXP (body, 1);
5485 lhs = REG_OR_SUBREG_RTX (lhs);
5487 /* If the destination is not a general purpose
5488 register we do not have to worry. */
5489 if (GET_CODE (lhs) != REG
5490 || REGNO_REG_CLASS (REGNO (lhs)) != GENERAL_REGS)
5493 /* As well as loads from memory we also have to react
5494 to loads of invalid constants which will be turned
5495 into loads from the minipool. */
5496 return (GET_CODE (rhs) == MEM
5497 || GET_CODE (rhs) == SYMBOL_REF
5498 || note_invalid_constants (insn, -1, false));
5501 /* Return TRUE if INSN is a Cirrus instruction. */
5503 arm_cirrus_insn_p (rtx insn)
5505 enum attr_cirrus attr;
5507 /* get_attr cannot accept USE or CLOBBER. */
5509 || GET_CODE (insn) != INSN
5510 || GET_CODE (PATTERN (insn)) == USE
5511 || GET_CODE (PATTERN (insn)) == CLOBBER)
5514 attr = get_attr_cirrus (insn);
5516 return attr != CIRRUS_NOT;
5519 /* Cirrus reorg for invalid instruction combinations. */
5521 cirrus_reorg (rtx first)
5523 enum attr_cirrus attr;
5524 rtx body = PATTERN (first);
5528 /* Any branch must be followed by 2 non Cirrus instructions. */
5529 if (GET_CODE (first) == JUMP_INSN && GET_CODE (body) != RETURN)
5532 t = next_nonnote_insn (first);
5534 if (arm_cirrus_insn_p (t))
5537 if (arm_cirrus_insn_p (next_nonnote_insn (t)))
5541 emit_insn_after (gen_nop (), first);
5546 /* (float (blah)) is in parallel with a clobber. */
5547 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
5548 body = XVECEXP (body, 0, 0);
5550 if (GET_CODE (body) == SET)
5552 rtx lhs = XEXP (body, 0), rhs = XEXP (body, 1);
5554 /* cfldrd, cfldr64, cfstrd, cfstr64 must
5555 be followed by a non Cirrus insn. */
5556 if (get_attr_cirrus (first) == CIRRUS_DOUBLE)
5558 if (arm_cirrus_insn_p (next_nonnote_insn (first)))
5559 emit_insn_after (gen_nop (), first);
5563 else if (arm_memory_load_p (first))
5565 unsigned int arm_regno;
5567 /* Any ldr/cfmvdlr, ldr/cfmvdhr, ldr/cfmvsr, ldr/cfmv64lr,
5568 ldr/cfmv64hr combination where the Rd field is the same
5569 in both instructions must be split with a non Cirrus
5576 /* Get Arm register number for ldr insn. */
5577 if (GET_CODE (lhs) == REG)
5578 arm_regno = REGNO (lhs);
5581 gcc_assert (GET_CODE (rhs) == REG);
5582 arm_regno = REGNO (rhs);
5586 first = next_nonnote_insn (first);
5588 if (! arm_cirrus_insn_p (first))
5591 body = PATTERN (first);
5593 /* (float (blah)) is in parallel with a clobber. */
5594 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0))
5595 body = XVECEXP (body, 0, 0);
5597 if (GET_CODE (body) == FLOAT)
5598 body = XEXP (body, 0);
5600 if (get_attr_cirrus (first) == CIRRUS_MOVE
5601 && GET_CODE (XEXP (body, 1)) == REG
5602 && arm_regno == REGNO (XEXP (body, 1)))
5603 emit_insn_after (gen_nop (), first);
5609 /* get_attr cannot accept USE or CLOBBER. */
5611 || GET_CODE (first) != INSN
5612 || GET_CODE (PATTERN (first)) == USE
5613 || GET_CODE (PATTERN (first)) == CLOBBER)
5616 attr = get_attr_cirrus (first);
5618 /* Any coprocessor compare instruction (cfcmps, cfcmpd, ...)
5619 must be followed by a non-coprocessor instruction. */
5620 if (attr == CIRRUS_COMPARE)
5624 t = next_nonnote_insn (first);
5626 if (arm_cirrus_insn_p (t))
5629 if (arm_cirrus_insn_p (next_nonnote_insn (t)))
5633 emit_insn_after (gen_nop (), first);
5639 /* Return TRUE if X references a SYMBOL_REF. */
5641 symbol_mentioned_p (rtx x)
5646 if (GET_CODE (x) == SYMBOL_REF)
5649 /* UNSPEC_TLS entries for a symbol include the SYMBOL_REF, but they
5650 are constant offsets, not symbols. */
5651 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLS)
5654 fmt = GET_RTX_FORMAT (GET_CODE (x));
5656 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5662 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5663 if (symbol_mentioned_p (XVECEXP (x, i, j)))
5666 else if (fmt[i] == 'e' && symbol_mentioned_p (XEXP (x, i)))
5673 /* Return TRUE if X references a LABEL_REF. */
5675 label_mentioned_p (rtx x)
5680 if (GET_CODE (x) == LABEL_REF)
5683 /* UNSPEC_TLS entries for a symbol include a LABEL_REF for the referencing
5684 instruction, but they are constant offsets, not symbols. */
5685 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLS)
5688 fmt = GET_RTX_FORMAT (GET_CODE (x));
5689 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5695 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5696 if (label_mentioned_p (XVECEXP (x, i, j)))
5699 else if (fmt[i] == 'e' && label_mentioned_p (XEXP (x, i)))
5707 tls_mentioned_p (rtx x)
5709 switch (GET_CODE (x))
5712 return tls_mentioned_p (XEXP (x, 0));
5715 if (XINT (x, 1) == UNSPEC_TLS)
5723 /* Must not copy a SET whose source operand is PC-relative. */
5726 arm_cannot_copy_insn_p (rtx insn)
5728 rtx pat = PATTERN (insn);
5730 if (GET_CODE (pat) == PARALLEL
5731 && GET_CODE (XVECEXP (pat, 0, 0)) == SET)
5733 rtx rhs = SET_SRC (XVECEXP (pat, 0, 0));
5735 if (GET_CODE (rhs) == UNSPEC
5736 && XINT (rhs, 1) == UNSPEC_PIC_BASE)
5739 if (GET_CODE (rhs) == MEM
5740 && GET_CODE (XEXP (rhs, 0)) == UNSPEC
5741 && XINT (XEXP (rhs, 0), 1) == UNSPEC_PIC_BASE)
5751 enum rtx_code code = GET_CODE (x);
5768 /* Return 1 if memory locations are adjacent. */
5770 adjacent_mem_locations (rtx a, rtx b)
5772 /* We don't guarantee to preserve the order of these memory refs. */
5773 if (volatile_refs_p (a) || volatile_refs_p (b))
5776 if ((GET_CODE (XEXP (a, 0)) == REG
5777 || (GET_CODE (XEXP (a, 0)) == PLUS
5778 && GET_CODE (XEXP (XEXP (a, 0), 1)) == CONST_INT))
5779 && (GET_CODE (XEXP (b, 0)) == REG
5780 || (GET_CODE (XEXP (b, 0)) == PLUS
5781 && GET_CODE (XEXP (XEXP (b, 0), 1)) == CONST_INT)))
5783 HOST_WIDE_INT val0 = 0, val1 = 0;
5787 if (GET_CODE (XEXP (a, 0)) == PLUS)
5789 reg0 = XEXP (XEXP (a, 0), 0);
5790 val0 = INTVAL (XEXP (XEXP (a, 0), 1));
5795 if (GET_CODE (XEXP (b, 0)) == PLUS)
5797 reg1 = XEXP (XEXP (b, 0), 0);
5798 val1 = INTVAL (XEXP (XEXP (b, 0), 1));
5803 /* Don't accept any offset that will require multiple
5804 instructions to handle, since this would cause the
5805 arith_adjacentmem pattern to output an overlong sequence. */
5806 if (!const_ok_for_op (PLUS, val0) || !const_ok_for_op (PLUS, val1))
5809 /* Don't allow an eliminable register: register elimination can make
5810 the offset too large. */
5811 if (arm_eliminable_register (reg0))
5814 val_diff = val1 - val0;
5818 /* If the target has load delay slots, then there's no benefit
5819 to using an ldm instruction unless the offset is zero and
5820 we are optimizing for size. */
5821 return (optimize_size && (REGNO (reg0) == REGNO (reg1))
5822 && (val0 == 0 || val1 == 0 || val0 == 4 || val1 == 4)
5823 && (val_diff == 4 || val_diff == -4));
5826 return ((REGNO (reg0) == REGNO (reg1))
5827 && (val_diff == 4 || val_diff == -4));
5834 load_multiple_sequence (rtx *operands, int nops, int *regs, int *base,
5835 HOST_WIDE_INT *load_offset)
5837 int unsorted_regs[4];
5838 HOST_WIDE_INT unsorted_offsets[4];
5843 /* Can only handle 2, 3, or 4 insns at present,
5844 though could be easily extended if required. */
5845 gcc_assert (nops >= 2 && nops <= 4);
5847 /* Loop over the operands and check that the memory references are
5848 suitable (i.e. immediate offsets from the same base register). At
5849 the same time, extract the target register, and the memory
5851 for (i = 0; i < nops; i++)
5856 /* Convert a subreg of a mem into the mem itself. */
5857 if (GET_CODE (operands[nops + i]) == SUBREG)
5858 operands[nops + i] = alter_subreg (operands + (nops + i));
5860 gcc_assert (GET_CODE (operands[nops + i]) == MEM);
5862 /* Don't reorder volatile memory references; it doesn't seem worth
5863 looking for the case where the order is ok anyway. */
5864 if (MEM_VOLATILE_P (operands[nops + i]))
5867 offset = const0_rtx;
5869 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
5870 || (GET_CODE (reg) == SUBREG
5871 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5872 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
5873 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
5875 || (GET_CODE (reg) == SUBREG
5876 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5877 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
5882 base_reg = REGNO (reg);
5883 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
5884 ? REGNO (operands[i])
5885 : REGNO (SUBREG_REG (operands[i])));
5890 if (base_reg != (int) REGNO (reg))
5891 /* Not addressed from the same base register. */
5894 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
5895 ? REGNO (operands[i])
5896 : REGNO (SUBREG_REG (operands[i])));
5897 if (unsorted_regs[i] < unsorted_regs[order[0]])
5901 /* If it isn't an integer register, or if it overwrites the
5902 base register but isn't the last insn in the list, then
5903 we can't do this. */
5904 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14
5905 || (i != nops - 1 && unsorted_regs[i] == base_reg))
5908 unsorted_offsets[i] = INTVAL (offset);
5911 /* Not a suitable memory address. */
5915 /* All the useful information has now been extracted from the
5916 operands into unsorted_regs and unsorted_offsets; additionally,
5917 order[0] has been set to the lowest numbered register in the
5918 list. Sort the registers into order, and check that the memory
5919 offsets are ascending and adjacent. */
5921 for (i = 1; i < nops; i++)
5925 order[i] = order[i - 1];
5926 for (j = 0; j < nops; j++)
5927 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
5928 && (order[i] == order[i - 1]
5929 || unsorted_regs[j] < unsorted_regs[order[i]]))
5932 /* Have we found a suitable register? if not, one must be used more
5934 if (order[i] == order[i - 1])
5937 /* Is the memory address adjacent and ascending? */
5938 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
5946 for (i = 0; i < nops; i++)
5947 regs[i] = unsorted_regs[order[i]];
5949 *load_offset = unsorted_offsets[order[0]];
5952 if (unsorted_offsets[order[0]] == 0)
5953 return 1; /* ldmia */
5955 if (unsorted_offsets[order[0]] == 4)
5956 return 2; /* ldmib */
5958 if (unsorted_offsets[order[nops - 1]] == 0)
5959 return 3; /* ldmda */
5961 if (unsorted_offsets[order[nops - 1]] == -4)
5962 return 4; /* ldmdb */
5964 /* For ARM8,9 & StrongARM, 2 ldr instructions are faster than an ldm
5965 if the offset isn't small enough. The reason 2 ldrs are faster
5966 is because these ARMs are able to do more than one cache access
5967 in a single cycle. The ARM9 and StrongARM have Harvard caches,
5968 whilst the ARM8 has a double bandwidth cache. This means that
5969 these cores can do both an instruction fetch and a data fetch in
5970 a single cycle, so the trick of calculating the address into a
5971 scratch register (one of the result regs) and then doing a load
5972 multiple actually becomes slower (and no smaller in code size).
5973 That is the transformation
5975 ldr rd1, [rbase + offset]
5976 ldr rd2, [rbase + offset + 4]
5980 add rd1, rbase, offset
5981 ldmia rd1, {rd1, rd2}
5983 produces worse code -- '3 cycles + any stalls on rd2' instead of
5984 '2 cycles + any stalls on rd2'. On ARMs with only one cache
5985 access per cycle, the first sequence could never complete in less
5986 than 6 cycles, whereas the ldm sequence would only take 5 and
5987 would make better use of sequential accesses if not hitting the
5990 We cheat here and test 'arm_ld_sched' which we currently know to
5991 only be true for the ARM8, ARM9 and StrongARM. If this ever
5992 changes, then the test below needs to be reworked. */
5993 if (nops == 2 && arm_ld_sched)
5996 /* Can't do it without setting up the offset, only do this if it takes
5997 no more than one insn. */
5998 return (const_ok_for_arm (unsorted_offsets[order[0]])
5999 || const_ok_for_arm (-unsorted_offsets[order[0]])) ? 5 : 0;
6003 emit_ldm_seq (rtx *operands, int nops)
6007 HOST_WIDE_INT offset;
6011 switch (load_multiple_sequence (operands, nops, regs, &base_reg, &offset))
6014 strcpy (buf, "ldm%?ia\t");
6018 strcpy (buf, "ldm%?ib\t");
6022 strcpy (buf, "ldm%?da\t");
6026 strcpy (buf, "ldm%?db\t");
6031 sprintf (buf, "add%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
6032 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
6035 sprintf (buf, "sub%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
6036 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
6038 output_asm_insn (buf, operands);
6040 strcpy (buf, "ldm%?ia\t");
6047 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
6048 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
6050 for (i = 1; i < nops; i++)
6051 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
6052 reg_names[regs[i]]);
6054 strcat (buf, "}\t%@ phole ldm");
6056 output_asm_insn (buf, operands);
6061 store_multiple_sequence (rtx *operands, int nops, int *regs, int *base,
6062 HOST_WIDE_INT * load_offset)
6064 int unsorted_regs[4];
6065 HOST_WIDE_INT unsorted_offsets[4];
6070 /* Can only handle 2, 3, or 4 insns at present, though could be easily
6071 extended if required. */
6072 gcc_assert (nops >= 2 && nops <= 4);
6074 /* Loop over the operands and check that the memory references are
6075 suitable (i.e. immediate offsets from the same base register). At
6076 the same time, extract the target register, and the memory
6078 for (i = 0; i < nops; i++)
6083 /* Convert a subreg of a mem into the mem itself. */
6084 if (GET_CODE (operands[nops + i]) == SUBREG)
6085 operands[nops + i] = alter_subreg (operands + (nops + i));
6087 gcc_assert (GET_CODE (operands[nops + i]) == MEM);
6089 /* Don't reorder volatile memory references; it doesn't seem worth
6090 looking for the case where the order is ok anyway. */
6091 if (MEM_VOLATILE_P (operands[nops + i]))
6094 offset = const0_rtx;
6096 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
6097 || (GET_CODE (reg) == SUBREG
6098 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
6099 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
6100 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
6102 || (GET_CODE (reg) == SUBREG
6103 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
6104 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
6109 base_reg = REGNO (reg);
6110 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
6111 ? REGNO (operands[i])
6112 : REGNO (SUBREG_REG (operands[i])));
6117 if (base_reg != (int) REGNO (reg))
6118 /* Not addressed from the same base register. */
6121 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
6122 ? REGNO (operands[i])
6123 : REGNO (SUBREG_REG (operands[i])));
6124 if (unsorted_regs[i] < unsorted_regs[order[0]])
6128 /* If it isn't an integer register, then we can't do this. */
6129 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14)
6132 unsorted_offsets[i] = INTVAL (offset);
6135 /* Not a suitable memory address. */
6139 /* All the useful information has now been extracted from the
6140 operands into unsorted_regs and unsorted_offsets; additionally,
6141 order[0] has been set to the lowest numbered register in the
6142 list. Sort the registers into order, and check that the memory
6143 offsets are ascending and adjacent. */
6145 for (i = 1; i < nops; i++)
6149 order[i] = order[i - 1];
6150 for (j = 0; j < nops; j++)
6151 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
6152 && (order[i] == order[i - 1]
6153 || unsorted_regs[j] < unsorted_regs[order[i]]))
6156 /* Have we found a suitable register? if not, one must be used more
6158 if (order[i] == order[i - 1])
6161 /* Is the memory address adjacent and ascending? */
6162 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
6170 for (i = 0; i < nops; i++)
6171 regs[i] = unsorted_regs[order[i]];
6173 *load_offset = unsorted_offsets[order[0]];
6176 if (unsorted_offsets[order[0]] == 0)
6177 return 1; /* stmia */
6179 if (unsorted_offsets[order[0]] == 4)
6180 return 2; /* stmib */
6182 if (unsorted_offsets[order[nops - 1]] == 0)
6183 return 3; /* stmda */
6185 if (unsorted_offsets[order[nops - 1]] == -4)
6186 return 4; /* stmdb */
6192 emit_stm_seq (rtx *operands, int nops)
6196 HOST_WIDE_INT offset;
6200 switch (store_multiple_sequence (operands, nops, regs, &base_reg, &offset))
6203 strcpy (buf, "stm%?ia\t");
6207 strcpy (buf, "stm%?ib\t");
6211 strcpy (buf, "stm%?da\t");
6215 strcpy (buf, "stm%?db\t");
6222 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
6223 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
6225 for (i = 1; i < nops; i++)
6226 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
6227 reg_names[regs[i]]);
6229 strcat (buf, "}\t%@ phole stm");
6231 output_asm_insn (buf, operands);
6235 /* Routines for use in generating RTL. */
6238 arm_gen_load_multiple (int base_regno, int count, rtx from, int up,
6239 int write_back, rtx basemem, HOST_WIDE_INT *offsetp)
6241 HOST_WIDE_INT offset = *offsetp;
6244 int sign = up ? 1 : -1;
6247 /* XScale has load-store double instructions, but they have stricter
6248 alignment requirements than load-store multiple, so we cannot
6251 For XScale ldm requires 2 + NREGS cycles to complete and blocks
6252 the pipeline until completion.
6260 An ldr instruction takes 1-3 cycles, but does not block the
6269 Best case ldr will always win. However, the more ldr instructions
6270 we issue, the less likely we are to be able to schedule them well.
6271 Using ldr instructions also increases code size.
6273 As a compromise, we use ldr for counts of 1 or 2 regs, and ldm
6274 for counts of 3 or 4 regs. */
6275 if (arm_tune_xscale && count <= 2 && ! optimize_size)
6281 for (i = 0; i < count; i++)
6283 addr = plus_constant (from, i * 4 * sign);
6284 mem = adjust_automodify_address (basemem, SImode, addr, offset);
6285 emit_move_insn (gen_rtx_REG (SImode, base_regno + i), mem);
6291 emit_move_insn (from, plus_constant (from, count * 4 * sign));
6301 result = gen_rtx_PARALLEL (VOIDmode,
6302 rtvec_alloc (count + (write_back ? 1 : 0)));
6305 XVECEXP (result, 0, 0)
6306 = gen_rtx_SET (VOIDmode, from, plus_constant (from, count * 4 * sign));
6311 for (j = 0; i < count; i++, j++)
6313 addr = plus_constant (from, j * 4 * sign);
6314 mem = adjust_automodify_address_nv (basemem, SImode, addr, offset);
6315 XVECEXP (result, 0, i)
6316 = gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, base_regno + j), mem);
6327 arm_gen_store_multiple (int base_regno, int count, rtx to, int up,
6328 int write_back, rtx basemem, HOST_WIDE_INT *offsetp)
6330 HOST_WIDE_INT offset = *offsetp;
6333 int sign = up ? 1 : -1;
6336 /* See arm_gen_load_multiple for discussion of
6337 the pros/cons of ldm/stm usage for XScale. */
6338 if (arm_tune_xscale && count <= 2 && ! optimize_size)
6344 for (i = 0; i < count; i++)
6346 addr = plus_constant (to, i * 4 * sign);
6347 mem = adjust_automodify_address (basemem, SImode, addr, offset);
6348 emit_move_insn (mem, gen_rtx_REG (SImode, base_regno + i));
6354 emit_move_insn (to, plus_constant (to, count * 4 * sign));
6364 result = gen_rtx_PARALLEL (VOIDmode,
6365 rtvec_alloc (count + (write_back ? 1 : 0)));
6368 XVECEXP (result, 0, 0)
6369 = gen_rtx_SET (VOIDmode, to,
6370 plus_constant (to, count * 4 * sign));
6375 for (j = 0; i < count; i++, j++)
6377 addr = plus_constant (to, j * 4 * sign);
6378 mem = adjust_automodify_address_nv (basemem, SImode, addr, offset);
6379 XVECEXP (result, 0, i)
6380 = gen_rtx_SET (VOIDmode, mem, gen_rtx_REG (SImode, base_regno + j));
6391 arm_gen_movmemqi (rtx *operands)
6393 HOST_WIDE_INT in_words_to_go, out_words_to_go, last_bytes;
6394 HOST_WIDE_INT srcoffset, dstoffset;
6396 rtx src, dst, srcbase, dstbase;
6397 rtx part_bytes_reg = NULL;
6400 if (GET_CODE (operands[2]) != CONST_INT
6401 || GET_CODE (operands[3]) != CONST_INT
6402 || INTVAL (operands[2]) > 64
6403 || INTVAL (operands[3]) & 3)
6406 dstbase = operands[0];
6407 srcbase = operands[1];
6409 dst = copy_to_mode_reg (SImode, XEXP (dstbase, 0));
6410 src = copy_to_mode_reg (SImode, XEXP (srcbase, 0));
6412 in_words_to_go = ARM_NUM_INTS (INTVAL (operands[2]));
6413 out_words_to_go = INTVAL (operands[2]) / 4;
6414 last_bytes = INTVAL (operands[2]) & 3;
6415 dstoffset = srcoffset = 0;
6417 if (out_words_to_go != in_words_to_go && ((in_words_to_go - 1) & 3) != 0)
6418 part_bytes_reg = gen_rtx_REG (SImode, (in_words_to_go - 1) & 3);
6420 for (i = 0; in_words_to_go >= 2; i+=4)
6422 if (in_words_to_go > 4)
6423 emit_insn (arm_gen_load_multiple (0, 4, src, TRUE, TRUE,
6424 srcbase, &srcoffset));
6426 emit_insn (arm_gen_load_multiple (0, in_words_to_go, src, TRUE,
6427 FALSE, srcbase, &srcoffset));
6429 if (out_words_to_go)
6431 if (out_words_to_go > 4)
6432 emit_insn (arm_gen_store_multiple (0, 4, dst, TRUE, TRUE,
6433 dstbase, &dstoffset));
6434 else if (out_words_to_go != 1)
6435 emit_insn (arm_gen_store_multiple (0, out_words_to_go,
6439 dstbase, &dstoffset));
6442 mem = adjust_automodify_address (dstbase, SImode, dst, dstoffset);
6443 emit_move_insn (mem, gen_rtx_REG (SImode, 0));
6444 if (last_bytes != 0)
6446 emit_insn (gen_addsi3 (dst, dst, GEN_INT (4)));
6452 in_words_to_go -= in_words_to_go < 4 ? in_words_to_go : 4;
6453 out_words_to_go -= out_words_to_go < 4 ? out_words_to_go : 4;
6456 /* OUT_WORDS_TO_GO will be zero here if there are byte stores to do. */
6457 if (out_words_to_go)
6461 mem = adjust_automodify_address (srcbase, SImode, src, srcoffset);
6462 sreg = copy_to_reg (mem);
6464 mem = adjust_automodify_address (dstbase, SImode, dst, dstoffset);
6465 emit_move_insn (mem, sreg);
6468 gcc_assert (!in_words_to_go); /* Sanity check */
6473 gcc_assert (in_words_to_go > 0);
6475 mem = adjust_automodify_address (srcbase, SImode, src, srcoffset);
6476 part_bytes_reg = copy_to_mode_reg (SImode, mem);
6479 gcc_assert (!last_bytes || part_bytes_reg);
6481 if (BYTES_BIG_ENDIAN && last_bytes)
6483 rtx tmp = gen_reg_rtx (SImode);
6485 /* The bytes we want are in the top end of the word. */
6486 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg,
6487 GEN_INT (8 * (4 - last_bytes))));
6488 part_bytes_reg = tmp;
6492 mem = adjust_automodify_address (dstbase, QImode,
6493 plus_constant (dst, last_bytes - 1),
6494 dstoffset + last_bytes - 1);
6495 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
6499 tmp = gen_reg_rtx (SImode);
6500 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (8)));
6501 part_bytes_reg = tmp;
6510 mem = adjust_automodify_address (dstbase, HImode, dst, dstoffset);
6511 emit_move_insn (mem, gen_lowpart (HImode, part_bytes_reg));
6515 rtx tmp = gen_reg_rtx (SImode);
6516 emit_insn (gen_addsi3 (dst, dst, const2_rtx));
6517 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (16)));
6518 part_bytes_reg = tmp;
6525 mem = adjust_automodify_address (dstbase, QImode, dst, dstoffset);
6526 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
6533 /* Select a dominance comparison mode if possible for a test of the general
6534 form (OP (COND_OR (X) (Y)) (const_int 0)). We support three forms.
6535 COND_OR == DOM_CC_X_AND_Y => (X && Y)
6536 COND_OR == DOM_CC_NX_OR_Y => ((! X) || Y)
6537 COND_OR == DOM_CC_X_OR_Y => (X || Y)
6538 In all cases OP will be either EQ or NE, but we don't need to know which
6539 here. If we are unable to support a dominance comparison we return
6540 CC mode. This will then fail to match for the RTL expressions that
6541 generate this call. */
6543 arm_select_dominance_cc_mode (rtx x, rtx y, HOST_WIDE_INT cond_or)
6545 enum rtx_code cond1, cond2;
6548 /* Currently we will probably get the wrong result if the individual
6549 comparisons are not simple. This also ensures that it is safe to
6550 reverse a comparison if necessary. */
6551 if ((arm_select_cc_mode (cond1 = GET_CODE (x), XEXP (x, 0), XEXP (x, 1))
6553 || (arm_select_cc_mode (cond2 = GET_CODE (y), XEXP (y, 0), XEXP (y, 1))
6557 /* The if_then_else variant of this tests the second condition if the
6558 first passes, but is true if the first fails. Reverse the first
6559 condition to get a true "inclusive-or" expression. */
6560 if (cond_or == DOM_CC_NX_OR_Y)
6561 cond1 = reverse_condition (cond1);
6563 /* If the comparisons are not equal, and one doesn't dominate the other,
6564 then we can't do this. */
6566 && !comparison_dominates_p (cond1, cond2)
6567 && (swapped = 1, !comparison_dominates_p (cond2, cond1)))
6572 enum rtx_code temp = cond1;
6580 if (cond_or == DOM_CC_X_AND_Y)
6585 case EQ: return CC_DEQmode;
6586 case LE: return CC_DLEmode;
6587 case LEU: return CC_DLEUmode;
6588 case GE: return CC_DGEmode;
6589 case GEU: return CC_DGEUmode;
6590 default: gcc_unreachable ();
6594 if (cond_or == DOM_CC_X_AND_Y)
6610 if (cond_or == DOM_CC_X_AND_Y)
6626 if (cond_or == DOM_CC_X_AND_Y)
6642 if (cond_or == DOM_CC_X_AND_Y)
6657 /* The remaining cases only occur when both comparisons are the
6660 gcc_assert (cond1 == cond2);
6664 gcc_assert (cond1 == cond2);
6668 gcc_assert (cond1 == cond2);
6672 gcc_assert (cond1 == cond2);
6676 gcc_assert (cond1 == cond2);
6685 arm_select_cc_mode (enum rtx_code op, rtx x, rtx y)
6687 /* All floating point compares return CCFP if it is an equality
6688 comparison, and CCFPE otherwise. */
6689 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
6709 if (TARGET_HARD_FLOAT && TARGET_MAVERICK)
6718 /* A compare with a shifted operand. Because of canonicalization, the
6719 comparison will have to be swapped when we emit the assembler. */
6720 if (GET_MODE (y) == SImode && GET_CODE (y) == REG
6721 && (GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
6722 || GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ROTATE
6723 || GET_CODE (x) == ROTATERT))
6726 /* This operation is performed swapped, but since we only rely on the Z
6727 flag we don't need an additional mode. */
6728 if (GET_MODE (y) == SImode && REG_P (y)
6729 && GET_CODE (x) == NEG
6730 && (op == EQ || op == NE))
6733 /* This is a special case that is used by combine to allow a
6734 comparison of a shifted byte load to be split into a zero-extend
6735 followed by a comparison of the shifted integer (only valid for
6736 equalities and unsigned inequalities). */
6737 if (GET_MODE (x) == SImode
6738 && GET_CODE (x) == ASHIFT
6739 && GET_CODE (XEXP (x, 1)) == CONST_INT && INTVAL (XEXP (x, 1)) == 24
6740 && GET_CODE (XEXP (x, 0)) == SUBREG
6741 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == MEM
6742 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == QImode
6743 && (op == EQ || op == NE
6744 || op == GEU || op == GTU || op == LTU || op == LEU)
6745 && GET_CODE (y) == CONST_INT)
6748 /* A construct for a conditional compare, if the false arm contains
6749 0, then both conditions must be true, otherwise either condition
6750 must be true. Not all conditions are possible, so CCmode is
6751 returned if it can't be done. */
6752 if (GET_CODE (x) == IF_THEN_ELSE
6753 && (XEXP (x, 2) == const0_rtx
6754 || XEXP (x, 2) == const1_rtx)
6755 && COMPARISON_P (XEXP (x, 0))
6756 && COMPARISON_P (XEXP (x, 1)))
6757 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6758 INTVAL (XEXP (x, 2)));
6760 /* Alternate canonicalizations of the above. These are somewhat cleaner. */
6761 if (GET_CODE (x) == AND
6762 && COMPARISON_P (XEXP (x, 0))
6763 && COMPARISON_P (XEXP (x, 1)))
6764 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6767 if (GET_CODE (x) == IOR
6768 && COMPARISON_P (XEXP (x, 0))
6769 && COMPARISON_P (XEXP (x, 1)))
6770 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6773 /* An operation (on Thumb) where we want to test for a single bit.
6774 This is done by shifting that bit up into the top bit of a
6775 scratch register; we can then branch on the sign bit. */
6777 && GET_MODE (x) == SImode
6778 && (op == EQ || op == NE)
6779 && GET_CODE (x) == ZERO_EXTRACT
6780 && XEXP (x, 1) == const1_rtx)
6783 /* An operation that sets the condition codes as a side-effect, the
6784 V flag is not set correctly, so we can only use comparisons where
6785 this doesn't matter. (For LT and GE we can use "mi" and "pl"
6787 if (GET_MODE (x) == SImode
6789 && (op == EQ || op == NE || op == LT || op == GE)
6790 && (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
6791 || GET_CODE (x) == AND || GET_CODE (x) == IOR
6792 || GET_CODE (x) == XOR || GET_CODE (x) == MULT
6793 || GET_CODE (x) == NOT || GET_CODE (x) == NEG
6794 || GET_CODE (x) == LSHIFTRT
6795 || GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
6796 || GET_CODE (x) == ROTATERT
6797 || (TARGET_ARM && GET_CODE (x) == ZERO_EXTRACT)))
6800 if (GET_MODE (x) == QImode && (op == EQ || op == NE))
6803 if (GET_MODE (x) == SImode && (op == LTU || op == GEU)
6804 && GET_CODE (x) == PLUS
6805 && (rtx_equal_p (XEXP (x, 0), y) || rtx_equal_p (XEXP (x, 1), y)))
6811 /* X and Y are two things to compare using CODE. Emit the compare insn and
6812 return the rtx for register 0 in the proper mode. FP means this is a
6813 floating point compare: I don't think that it is needed on the arm. */
6815 arm_gen_compare_reg (enum rtx_code code, rtx x, rtx y)
6817 enum machine_mode mode = SELECT_CC_MODE (code, x, y);
6818 rtx cc_reg = gen_rtx_REG (mode, CC_REGNUM);
6820 emit_set_insn (cc_reg, gen_rtx_COMPARE (mode, x, y));
6825 /* Generate a sequence of insns that will generate the correct return
6826 address mask depending on the physical architecture that the program
6829 arm_gen_return_addr_mask (void)
6831 rtx reg = gen_reg_rtx (Pmode);
6833 emit_insn (gen_return_addr_mask (reg));
6838 arm_reload_in_hi (rtx *operands)
6840 rtx ref = operands[1];
6842 HOST_WIDE_INT offset = 0;
6844 if (GET_CODE (ref) == SUBREG)
6846 offset = SUBREG_BYTE (ref);
6847 ref = SUBREG_REG (ref);
6850 if (GET_CODE (ref) == REG)
6852 /* We have a pseudo which has been spilt onto the stack; there
6853 are two cases here: the first where there is a simple
6854 stack-slot replacement and a second where the stack-slot is
6855 out of range, or is used as a subreg. */
6856 if (reg_equiv_mem[REGNO (ref)])
6858 ref = reg_equiv_mem[REGNO (ref)];
6859 base = find_replacement (&XEXP (ref, 0));
6862 /* The slot is out of range, or was dressed up in a SUBREG. */
6863 base = reg_equiv_address[REGNO (ref)];
6866 base = find_replacement (&XEXP (ref, 0));
6868 /* Handle the case where the address is too complex to be offset by 1. */
6869 if (GET_CODE (base) == MINUS
6870 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
6872 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6874 emit_set_insn (base_plus, base);
6877 else if (GET_CODE (base) == PLUS)
6879 /* The addend must be CONST_INT, or we would have dealt with it above. */
6880 HOST_WIDE_INT hi, lo;
6882 offset += INTVAL (XEXP (base, 1));
6883 base = XEXP (base, 0);
6885 /* Rework the address into a legal sequence of insns. */
6886 /* Valid range for lo is -4095 -> 4095 */
6889 : -((-offset) & 0xfff));
6891 /* Corner case, if lo is the max offset then we would be out of range
6892 once we have added the additional 1 below, so bump the msb into the
6893 pre-loading insn(s). */
6897 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
6898 ^ (HOST_WIDE_INT) 0x80000000)
6899 - (HOST_WIDE_INT) 0x80000000);
6901 gcc_assert (hi + lo == offset);
6905 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6907 /* Get the base address; addsi3 knows how to handle constants
6908 that require more than one insn. */
6909 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
6915 /* Operands[2] may overlap operands[0] (though it won't overlap
6916 operands[1]), that's why we asked for a DImode reg -- so we can
6917 use the bit that does not overlap. */
6918 if (REGNO (operands[2]) == REGNO (operands[0]))
6919 scratch = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6921 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
6923 emit_insn (gen_zero_extendqisi2 (scratch,
6924 gen_rtx_MEM (QImode,
6925 plus_constant (base,
6927 emit_insn (gen_zero_extendqisi2 (gen_rtx_SUBREG (SImode, operands[0], 0),
6928 gen_rtx_MEM (QImode,
6929 plus_constant (base,
6931 if (!BYTES_BIG_ENDIAN)
6932 emit_set_insn (gen_rtx_SUBREG (SImode, operands[0], 0),
6933 gen_rtx_IOR (SImode,
6936 gen_rtx_SUBREG (SImode, operands[0], 0),
6940 emit_set_insn (gen_rtx_SUBREG (SImode, operands[0], 0),
6941 gen_rtx_IOR (SImode,
6942 gen_rtx_ASHIFT (SImode, scratch,
6944 gen_rtx_SUBREG (SImode, operands[0], 0)));
6947 /* Handle storing a half-word to memory during reload by synthesizing as two
6948 byte stores. Take care not to clobber the input values until after we
6949 have moved them somewhere safe. This code assumes that if the DImode
6950 scratch in operands[2] overlaps either the input value or output address
6951 in some way, then that value must die in this insn (we absolutely need
6952 two scratch registers for some corner cases). */
6954 arm_reload_out_hi (rtx *operands)
6956 rtx ref = operands[0];
6957 rtx outval = operands[1];
6959 HOST_WIDE_INT offset = 0;
6961 if (GET_CODE (ref) == SUBREG)
6963 offset = SUBREG_BYTE (ref);
6964 ref = SUBREG_REG (ref);
6967 if (GET_CODE (ref) == REG)
6969 /* We have a pseudo which has been spilt onto the stack; there
6970 are two cases here: the first where there is a simple
6971 stack-slot replacement and a second where the stack-slot is
6972 out of range, or is used as a subreg. */
6973 if (reg_equiv_mem[REGNO (ref)])
6975 ref = reg_equiv_mem[REGNO (ref)];
6976 base = find_replacement (&XEXP (ref, 0));
6979 /* The slot is out of range, or was dressed up in a SUBREG. */
6980 base = reg_equiv_address[REGNO (ref)];
6983 base = find_replacement (&XEXP (ref, 0));
6985 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
6987 /* Handle the case where the address is too complex to be offset by 1. */
6988 if (GET_CODE (base) == MINUS
6989 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
6991 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6993 /* Be careful not to destroy OUTVAL. */
6994 if (reg_overlap_mentioned_p (base_plus, outval))
6996 /* Updating base_plus might destroy outval, see if we can
6997 swap the scratch and base_plus. */
6998 if (!reg_overlap_mentioned_p (scratch, outval))
7001 scratch = base_plus;
7006 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
7008 /* Be conservative and copy OUTVAL into the scratch now,
7009 this should only be necessary if outval is a subreg
7010 of something larger than a word. */
7011 /* XXX Might this clobber base? I can't see how it can,
7012 since scratch is known to overlap with OUTVAL, and
7013 must be wider than a word. */
7014 emit_insn (gen_movhi (scratch_hi, outval));
7015 outval = scratch_hi;
7019 emit_set_insn (base_plus, base);
7022 else if (GET_CODE (base) == PLUS)
7024 /* The addend must be CONST_INT, or we would have dealt with it above. */
7025 HOST_WIDE_INT hi, lo;
7027 offset += INTVAL (XEXP (base, 1));
7028 base = XEXP (base, 0);
7030 /* Rework the address into a legal sequence of insns. */
7031 /* Valid range for lo is -4095 -> 4095 */
7034 : -((-offset) & 0xfff));
7036 /* Corner case, if lo is the max offset then we would be out of range
7037 once we have added the additional 1 below, so bump the msb into the
7038 pre-loading insn(s). */
7042 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
7043 ^ (HOST_WIDE_INT) 0x80000000)
7044 - (HOST_WIDE_INT) 0x80000000);
7046 gcc_assert (hi + lo == offset);
7050 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
7052 /* Be careful not to destroy OUTVAL. */
7053 if (reg_overlap_mentioned_p (base_plus, outval))
7055 /* Updating base_plus might destroy outval, see if we
7056 can swap the scratch and base_plus. */
7057 if (!reg_overlap_mentioned_p (scratch, outval))
7060 scratch = base_plus;
7065 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
7067 /* Be conservative and copy outval into scratch now,
7068 this should only be necessary if outval is a
7069 subreg of something larger than a word. */
7070 /* XXX Might this clobber base? I can't see how it
7071 can, since scratch is known to overlap with
7073 emit_insn (gen_movhi (scratch_hi, outval));
7074 outval = scratch_hi;
7078 /* Get the base address; addsi3 knows how to handle constants
7079 that require more than one insn. */
7080 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
7086 if (BYTES_BIG_ENDIAN)
7088 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
7089 plus_constant (base, offset + 1)),
7090 gen_lowpart (QImode, outval)));
7091 emit_insn (gen_lshrsi3 (scratch,
7092 gen_rtx_SUBREG (SImode, outval, 0),
7094 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
7095 gen_lowpart (QImode, scratch)));
7099 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
7100 gen_lowpart (QImode, outval)));
7101 emit_insn (gen_lshrsi3 (scratch,
7102 gen_rtx_SUBREG (SImode, outval, 0),
7104 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
7105 plus_constant (base, offset + 1)),
7106 gen_lowpart (QImode, scratch)));
7110 /* Return true if a type must be passed in memory. For AAPCS, small aggregates
7111 (padded to the size of a word) should be passed in a register. */
7114 arm_must_pass_in_stack (enum machine_mode mode, tree type)
7116 if (TARGET_AAPCS_BASED)
7117 return must_pass_in_stack_var_size (mode, type);
7119 return must_pass_in_stack_var_size_or_pad (mode, type);
7123 /* For use by FUNCTION_ARG_PADDING (MODE, TYPE).
7124 Return true if an argument passed on the stack should be padded upwards,
7125 i.e. if the least-significant byte has useful data.
7126 For legacy APCS ABIs we use the default. For AAPCS based ABIs small
7127 aggregate types are placed in the lowest memory address. */
7130 arm_pad_arg_upward (enum machine_mode mode, tree type)
7132 if (!TARGET_AAPCS_BASED)
7133 return DEFAULT_FUNCTION_ARG_PADDING(mode, type) == upward;
7135 if (type && BYTES_BIG_ENDIAN && INTEGRAL_TYPE_P (type))
7142 /* Similarly, for use by BLOCK_REG_PADDING (MODE, TYPE, FIRST).
7143 For non-AAPCS, return !BYTES_BIG_ENDIAN if the least significant
7144 byte of the register has useful data, and return the opposite if the
7145 most significant byte does.
7146 For AAPCS, small aggregates and small complex types are always padded
7150 arm_pad_reg_upward (enum machine_mode mode ATTRIBUTE_UNUSED,
7151 tree type, int first ATTRIBUTE_UNUSED)
7153 if (TARGET_AAPCS_BASED
7155 && (AGGREGATE_TYPE_P (type) || TREE_CODE (type) == COMPLEX_TYPE)
7156 && int_size_in_bytes (type) <= 4)
7159 /* Otherwise, use default padding. */
7160 return !BYTES_BIG_ENDIAN;
7164 /* Print a symbolic form of X to the debug file, F. */
7166 arm_print_value (FILE *f, rtx x)
7168 switch (GET_CODE (x))
7171 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (x));
7175 fprintf (f, "<0x%lx,0x%lx>", (long)XWINT (x, 2), (long)XWINT (x, 3));
7183 for (i = 0; i < CONST_VECTOR_NUNITS (x); i++)
7185 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (CONST_VECTOR_ELT (x, i)));
7186 if (i < (CONST_VECTOR_NUNITS (x) - 1))
7194 fprintf (f, "\"%s\"", XSTR (x, 0));
7198 fprintf (f, "`%s'", XSTR (x, 0));
7202 fprintf (f, "L%d", INSN_UID (XEXP (x, 0)));
7206 arm_print_value (f, XEXP (x, 0));
7210 arm_print_value (f, XEXP (x, 0));
7212 arm_print_value (f, XEXP (x, 1));
7220 fprintf (f, "????");
7225 /* Routines for manipulation of the constant pool. */
7227 /* Arm instructions cannot load a large constant directly into a
7228 register; they have to come from a pc relative load. The constant
7229 must therefore be placed in the addressable range of the pc
7230 relative load. Depending on the precise pc relative load
7231 instruction the range is somewhere between 256 bytes and 4k. This
7232 means that we often have to dump a constant inside a function, and
7233 generate code to branch around it.
7235 It is important to minimize this, since the branches will slow
7236 things down and make the code larger.
7238 Normally we can hide the table after an existing unconditional
7239 branch so that there is no interruption of the flow, but in the
7240 worst case the code looks like this:
7258 We fix this by performing a scan after scheduling, which notices
7259 which instructions need to have their operands fetched from the
7260 constant table and builds the table.
7262 The algorithm starts by building a table of all the constants that
7263 need fixing up and all the natural barriers in the function (places
7264 where a constant table can be dropped without breaking the flow).
7265 For each fixup we note how far the pc-relative replacement will be
7266 able to reach and the offset of the instruction into the function.
7268 Having built the table we then group the fixes together to form
7269 tables that are as large as possible (subject to addressing
7270 constraints) and emit each table of constants after the last
7271 barrier that is within range of all the instructions in the group.
7272 If a group does not contain a barrier, then we forcibly create one
7273 by inserting a jump instruction into the flow. Once the table has
7274 been inserted, the insns are then modified to reference the
7275 relevant entry in the pool.
7277 Possible enhancements to the algorithm (not implemented) are:
7279 1) For some processors and object formats, there may be benefit in
7280 aligning the pools to the start of cache lines; this alignment
7281 would need to be taken into account when calculating addressability
7284 /* These typedefs are located at the start of this file, so that
7285 they can be used in the prototypes there. This comment is to
7286 remind readers of that fact so that the following structures
7287 can be understood more easily.
7289 typedef struct minipool_node Mnode;
7290 typedef struct minipool_fixup Mfix; */
7292 struct minipool_node
7294 /* Doubly linked chain of entries. */
7297 /* The maximum offset into the code that this entry can be placed. While
7298 pushing fixes for forward references, all entries are sorted in order
7299 of increasing max_address. */
7300 HOST_WIDE_INT max_address;
7301 /* Similarly for an entry inserted for a backwards ref. */
7302 HOST_WIDE_INT min_address;
7303 /* The number of fixes referencing this entry. This can become zero
7304 if we "unpush" an entry. In this case we ignore the entry when we
7305 come to emit the code. */
7307 /* The offset from the start of the minipool. */
7308 HOST_WIDE_INT offset;
7309 /* The value in table. */
7311 /* The mode of value. */
7312 enum machine_mode mode;
7313 /* The size of the value. With iWMMXt enabled
7314 sizes > 4 also imply an alignment of 8-bytes. */
7318 struct minipool_fixup
7322 HOST_WIDE_INT address;
7324 enum machine_mode mode;
7328 HOST_WIDE_INT forwards;
7329 HOST_WIDE_INT backwards;
7332 /* Fixes less than a word need padding out to a word boundary. */
7333 #define MINIPOOL_FIX_SIZE(mode) \
7334 (GET_MODE_SIZE ((mode)) >= 4 ? GET_MODE_SIZE ((mode)) : 4)
7336 static Mnode * minipool_vector_head;
7337 static Mnode * minipool_vector_tail;
7338 static rtx minipool_vector_label;
7339 static int minipool_pad;
7341 /* The linked list of all minipool fixes required for this function. */
7342 Mfix * minipool_fix_head;
7343 Mfix * minipool_fix_tail;
7344 /* The fix entry for the current minipool, once it has been placed. */
7345 Mfix * minipool_barrier;
7347 /* Determines if INSN is the start of a jump table. Returns the end
7348 of the TABLE or NULL_RTX. */
7350 is_jump_table (rtx insn)
7354 if (GET_CODE (insn) == JUMP_INSN
7355 && JUMP_LABEL (insn) != NULL
7356 && ((table = next_real_insn (JUMP_LABEL (insn)))
7357 == next_real_insn (insn))
7359 && GET_CODE (table) == JUMP_INSN
7360 && (GET_CODE (PATTERN (table)) == ADDR_VEC
7361 || GET_CODE (PATTERN (table)) == ADDR_DIFF_VEC))
7367 #ifndef JUMP_TABLES_IN_TEXT_SECTION
7368 #define JUMP_TABLES_IN_TEXT_SECTION 0
7371 static HOST_WIDE_INT
7372 get_jump_table_size (rtx insn)
7374 /* ADDR_VECs only take room if read-only data does into the text
7376 if (JUMP_TABLES_IN_TEXT_SECTION || readonly_data_section == text_section)
7378 rtx body = PATTERN (insn);
7379 int elt = GET_CODE (body) == ADDR_DIFF_VEC ? 1 : 0;
7381 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, elt);
7387 /* Move a minipool fix MP from its current location to before MAX_MP.
7388 If MAX_MP is NULL, then MP doesn't need moving, but the addressing
7389 constraints may need updating. */
7391 move_minipool_fix_forward_ref (Mnode *mp, Mnode *max_mp,
7392 HOST_WIDE_INT max_address)
7394 /* The code below assumes these are different. */
7395 gcc_assert (mp != max_mp);
7399 if (max_address < mp->max_address)
7400 mp->max_address = max_address;
7404 if (max_address > max_mp->max_address - mp->fix_size)
7405 mp->max_address = max_mp->max_address - mp->fix_size;
7407 mp->max_address = max_address;
7409 /* Unlink MP from its current position. Since max_mp is non-null,
7410 mp->prev must be non-null. */
7411 mp->prev->next = mp->next;
7412 if (mp->next != NULL)
7413 mp->next->prev = mp->prev;
7415 minipool_vector_tail = mp->prev;
7417 /* Re-insert it before MAX_MP. */
7419 mp->prev = max_mp->prev;
7422 if (mp->prev != NULL)
7423 mp->prev->next = mp;
7425 minipool_vector_head = mp;
7428 /* Save the new entry. */
7431 /* Scan over the preceding entries and adjust their addresses as
7433 while (mp->prev != NULL
7434 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
7436 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
7443 /* Add a constant to the minipool for a forward reference. Returns the
7444 node added or NULL if the constant will not fit in this pool. */
7446 add_minipool_forward_ref (Mfix *fix)
7448 /* If set, max_mp is the first pool_entry that has a lower
7449 constraint than the one we are trying to add. */
7450 Mnode * max_mp = NULL;
7451 HOST_WIDE_INT max_address = fix->address + fix->forwards - minipool_pad;
7454 /* If the minipool starts before the end of FIX->INSN then this FIX
7455 can not be placed into the current pool. Furthermore, adding the
7456 new constant pool entry may cause the pool to start FIX_SIZE bytes
7458 if (minipool_vector_head &&
7459 (fix->address + get_attr_length (fix->insn)
7460 >= minipool_vector_head->max_address - fix->fix_size))
7463 /* Scan the pool to see if a constant with the same value has
7464 already been added. While we are doing this, also note the
7465 location where we must insert the constant if it doesn't already
7467 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7469 if (GET_CODE (fix->value) == GET_CODE (mp->value)
7470 && fix->mode == mp->mode
7471 && (GET_CODE (fix->value) != CODE_LABEL
7472 || (CODE_LABEL_NUMBER (fix->value)
7473 == CODE_LABEL_NUMBER (mp->value)))
7474 && rtx_equal_p (fix->value, mp->value))
7476 /* More than one fix references this entry. */
7478 return move_minipool_fix_forward_ref (mp, max_mp, max_address);
7481 /* Note the insertion point if necessary. */
7483 && mp->max_address > max_address)
7486 /* If we are inserting an 8-bytes aligned quantity and
7487 we have not already found an insertion point, then
7488 make sure that all such 8-byte aligned quantities are
7489 placed at the start of the pool. */
7490 if (ARM_DOUBLEWORD_ALIGN
7492 && fix->fix_size == 8
7493 && mp->fix_size != 8)
7496 max_address = mp->max_address;
7500 /* The value is not currently in the minipool, so we need to create
7501 a new entry for it. If MAX_MP is NULL, the entry will be put on
7502 the end of the list since the placement is less constrained than
7503 any existing entry. Otherwise, we insert the new fix before
7504 MAX_MP and, if necessary, adjust the constraints on the other
7507 mp->fix_size = fix->fix_size;
7508 mp->mode = fix->mode;
7509 mp->value = fix->value;
7511 /* Not yet required for a backwards ref. */
7512 mp->min_address = -65536;
7516 mp->max_address = max_address;
7518 mp->prev = minipool_vector_tail;
7520 if (mp->prev == NULL)
7522 minipool_vector_head = mp;
7523 minipool_vector_label = gen_label_rtx ();
7526 mp->prev->next = mp;
7528 minipool_vector_tail = mp;
7532 if (max_address > max_mp->max_address - mp->fix_size)
7533 mp->max_address = max_mp->max_address - mp->fix_size;
7535 mp->max_address = max_address;
7538 mp->prev = max_mp->prev;
7540 if (mp->prev != NULL)
7541 mp->prev->next = mp;
7543 minipool_vector_head = mp;
7546 /* Save the new entry. */
7549 /* Scan over the preceding entries and adjust their addresses as
7551 while (mp->prev != NULL
7552 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
7554 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
7562 move_minipool_fix_backward_ref (Mnode *mp, Mnode *min_mp,
7563 HOST_WIDE_INT min_address)
7565 HOST_WIDE_INT offset;
7567 /* The code below assumes these are different. */
7568 gcc_assert (mp != min_mp);
7572 if (min_address > mp->min_address)
7573 mp->min_address = min_address;
7577 /* We will adjust this below if it is too loose. */
7578 mp->min_address = min_address;
7580 /* Unlink MP from its current position. Since min_mp is non-null,
7581 mp->next must be non-null. */
7582 mp->next->prev = mp->prev;
7583 if (mp->prev != NULL)
7584 mp->prev->next = mp->next;
7586 minipool_vector_head = mp->next;
7588 /* Reinsert it after MIN_MP. */
7590 mp->next = min_mp->next;
7592 if (mp->next != NULL)
7593 mp->next->prev = mp;
7595 minipool_vector_tail = mp;
7601 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7603 mp->offset = offset;
7604 if (mp->refcount > 0)
7605 offset += mp->fix_size;
7607 if (mp->next && mp->next->min_address < mp->min_address + mp->fix_size)
7608 mp->next->min_address = mp->min_address + mp->fix_size;
7614 /* Add a constant to the minipool for a backward reference. Returns the
7615 node added or NULL if the constant will not fit in this pool.
7617 Note that the code for insertion for a backwards reference can be
7618 somewhat confusing because the calculated offsets for each fix do
7619 not take into account the size of the pool (which is still under
7622 add_minipool_backward_ref (Mfix *fix)
7624 /* If set, min_mp is the last pool_entry that has a lower constraint
7625 than the one we are trying to add. */
7626 Mnode *min_mp = NULL;
7627 /* This can be negative, since it is only a constraint. */
7628 HOST_WIDE_INT min_address = fix->address - fix->backwards;
7631 /* If we can't reach the current pool from this insn, or if we can't
7632 insert this entry at the end of the pool without pushing other
7633 fixes out of range, then we don't try. This ensures that we
7634 can't fail later on. */
7635 if (min_address >= minipool_barrier->address
7636 || (minipool_vector_tail->min_address + fix->fix_size
7637 >= minipool_barrier->address))
7640 /* Scan the pool to see if a constant with the same value has
7641 already been added. While we are doing this, also note the
7642 location where we must insert the constant if it doesn't already
7644 for (mp = minipool_vector_tail; mp != NULL; mp = mp->prev)
7646 if (GET_CODE (fix->value) == GET_CODE (mp->value)
7647 && fix->mode == mp->mode
7648 && (GET_CODE (fix->value) != CODE_LABEL
7649 || (CODE_LABEL_NUMBER (fix->value)
7650 == CODE_LABEL_NUMBER (mp->value)))
7651 && rtx_equal_p (fix->value, mp->value)
7652 /* Check that there is enough slack to move this entry to the
7653 end of the table (this is conservative). */
7655 > (minipool_barrier->address
7656 + minipool_vector_tail->offset
7657 + minipool_vector_tail->fix_size)))
7660 return move_minipool_fix_backward_ref (mp, min_mp, min_address);
7664 mp->min_address += fix->fix_size;
7667 /* Note the insertion point if necessary. */
7668 if (mp->min_address < min_address)
7670 /* For now, we do not allow the insertion of 8-byte alignment
7671 requiring nodes anywhere but at the start of the pool. */
7672 if (ARM_DOUBLEWORD_ALIGN
7673 && fix->fix_size == 8 && mp->fix_size != 8)
7678 else if (mp->max_address
7679 < minipool_barrier->address + mp->offset + fix->fix_size)
7681 /* Inserting before this entry would push the fix beyond
7682 its maximum address (which can happen if we have
7683 re-located a forwards fix); force the new fix to come
7686 min_address = mp->min_address + fix->fix_size;
7688 /* If we are inserting an 8-bytes aligned quantity and
7689 we have not already found an insertion point, then
7690 make sure that all such 8-byte aligned quantities are
7691 placed at the start of the pool. */
7692 else if (ARM_DOUBLEWORD_ALIGN
7694 && fix->fix_size == 8
7695 && mp->fix_size < 8)
7698 min_address = mp->min_address + fix->fix_size;
7703 /* We need to create a new entry. */
7705 mp->fix_size = fix->fix_size;
7706 mp->mode = fix->mode;
7707 mp->value = fix->value;
7709 mp->max_address = minipool_barrier->address + 65536;
7711 mp->min_address = min_address;
7716 mp->next = minipool_vector_head;
7718 if (mp->next == NULL)
7720 minipool_vector_tail = mp;
7721 minipool_vector_label = gen_label_rtx ();
7724 mp->next->prev = mp;
7726 minipool_vector_head = mp;
7730 mp->next = min_mp->next;
7734 if (mp->next != NULL)
7735 mp->next->prev = mp;
7737 minipool_vector_tail = mp;
7740 /* Save the new entry. */
7748 /* Scan over the following entries and adjust their offsets. */
7749 while (mp->next != NULL)
7751 if (mp->next->min_address < mp->min_address + mp->fix_size)
7752 mp->next->min_address = mp->min_address + mp->fix_size;
7755 mp->next->offset = mp->offset + mp->fix_size;
7757 mp->next->offset = mp->offset;
7766 assign_minipool_offsets (Mfix *barrier)
7768 HOST_WIDE_INT offset = 0;
7771 minipool_barrier = barrier;
7773 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7775 mp->offset = offset;
7777 if (mp->refcount > 0)
7778 offset += mp->fix_size;
7782 /* Output the literal table */
7784 dump_minipool (rtx scan)
7790 if (ARM_DOUBLEWORD_ALIGN)
7791 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7792 if (mp->refcount > 0 && mp->fix_size == 8)
7800 ";; Emitting minipool after insn %u; address %ld; align %d (bytes)\n",
7801 INSN_UID (scan), (unsigned long) minipool_barrier->address, align64 ? 8 : 4);
7803 scan = emit_label_after (gen_label_rtx (), scan);
7804 scan = emit_insn_after (align64 ? gen_align_8 () : gen_align_4 (), scan);
7805 scan = emit_label_after (minipool_vector_label, scan);
7807 for (mp = minipool_vector_head; mp != NULL; mp = nmp)
7809 if (mp->refcount > 0)
7814 ";; Offset %u, min %ld, max %ld ",
7815 (unsigned) mp->offset, (unsigned long) mp->min_address,
7816 (unsigned long) mp->max_address);
7817 arm_print_value (dump_file, mp->value);
7818 fputc ('\n', dump_file);
7821 switch (mp->fix_size)
7823 #ifdef HAVE_consttable_1
7825 scan = emit_insn_after (gen_consttable_1 (mp->value), scan);
7829 #ifdef HAVE_consttable_2
7831 scan = emit_insn_after (gen_consttable_2 (mp->value), scan);
7835 #ifdef HAVE_consttable_4
7837 scan = emit_insn_after (gen_consttable_4 (mp->value), scan);
7841 #ifdef HAVE_consttable_8
7843 scan = emit_insn_after (gen_consttable_8 (mp->value), scan);
7856 minipool_vector_head = minipool_vector_tail = NULL;
7857 scan = emit_insn_after (gen_consttable_end (), scan);
7858 scan = emit_barrier_after (scan);
7861 /* Return the cost of forcibly inserting a barrier after INSN. */
7863 arm_barrier_cost (rtx insn)
7865 /* Basing the location of the pool on the loop depth is preferable,
7866 but at the moment, the basic block information seems to be
7867 corrupt by this stage of the compilation. */
7869 rtx next = next_nonnote_insn (insn);
7871 if (next != NULL && GET_CODE (next) == CODE_LABEL)
7874 switch (GET_CODE (insn))
7877 /* It will always be better to place the table before the label, rather
7886 return base_cost - 10;
7889 return base_cost + 10;
7893 /* Find the best place in the insn stream in the range
7894 (FIX->address,MAX_ADDRESS) to forcibly insert a minipool barrier.
7895 Create the barrier by inserting a jump and add a new fix entry for
7898 create_fix_barrier (Mfix *fix, HOST_WIDE_INT max_address)
7900 HOST_WIDE_INT count = 0;
7902 rtx from = fix->insn;
7903 /* The instruction after which we will insert the jump. */
7904 rtx selected = NULL;
7906 /* The address at which the jump instruction will be placed. */
7907 HOST_WIDE_INT selected_address;
7909 HOST_WIDE_INT max_count = max_address - fix->address;
7910 rtx label = gen_label_rtx ();
7912 selected_cost = arm_barrier_cost (from);
7913 selected_address = fix->address;
7915 while (from && count < max_count)
7920 /* This code shouldn't have been called if there was a natural barrier
7922 gcc_assert (GET_CODE (from) != BARRIER);
7924 /* Count the length of this insn. */
7925 count += get_attr_length (from);
7927 /* If there is a jump table, add its length. */
7928 tmp = is_jump_table (from);
7931 count += get_jump_table_size (tmp);
7933 /* Jump tables aren't in a basic block, so base the cost on
7934 the dispatch insn. If we select this location, we will
7935 still put the pool after the table. */
7936 new_cost = arm_barrier_cost (from);
7938 if (count < max_count
7939 && (!selected || new_cost <= selected_cost))
7942 selected_cost = new_cost;
7943 selected_address = fix->address + count;
7946 /* Continue after the dispatch table. */
7947 from = NEXT_INSN (tmp);
7951 new_cost = arm_barrier_cost (from);
7953 if (count < max_count
7954 && (!selected || new_cost <= selected_cost))
7957 selected_cost = new_cost;
7958 selected_address = fix->address + count;
7961 from = NEXT_INSN (from);
7964 /* Make sure that we found a place to insert the jump. */
7965 gcc_assert (selected);
7967 /* Create a new JUMP_INSN that branches around a barrier. */
7968 from = emit_jump_insn_after (gen_jump (label), selected);
7969 JUMP_LABEL (from) = label;
7970 barrier = emit_barrier_after (from);
7971 emit_label_after (label, barrier);
7973 /* Create a minipool barrier entry for the new barrier. */
7974 new_fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* new_fix));
7975 new_fix->insn = barrier;
7976 new_fix->address = selected_address;
7977 new_fix->next = fix->next;
7978 fix->next = new_fix;
7983 /* Record that there is a natural barrier in the insn stream at
7986 push_minipool_barrier (rtx insn, HOST_WIDE_INT address)
7988 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
7991 fix->address = address;
7994 if (minipool_fix_head != NULL)
7995 minipool_fix_tail->next = fix;
7997 minipool_fix_head = fix;
7999 minipool_fix_tail = fix;
8002 /* Record INSN, which will need fixing up to load a value from the
8003 minipool. ADDRESS is the offset of the insn since the start of the
8004 function; LOC is a pointer to the part of the insn which requires
8005 fixing; VALUE is the constant that must be loaded, which is of type
8008 push_minipool_fix (rtx insn, HOST_WIDE_INT address, rtx *loc,
8009 enum machine_mode mode, rtx value)
8011 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
8013 #ifdef AOF_ASSEMBLER
8014 /* PIC symbol references need to be converted into offsets into the
8016 /* XXX This shouldn't be done here. */
8017 if (flag_pic && GET_CODE (value) == SYMBOL_REF)
8018 value = aof_pic_entry (value);
8019 #endif /* AOF_ASSEMBLER */
8022 fix->address = address;
8025 fix->fix_size = MINIPOOL_FIX_SIZE (mode);
8027 fix->forwards = get_attr_pool_range (insn);
8028 fix->backwards = get_attr_neg_pool_range (insn);
8029 fix->minipool = NULL;
8031 /* If an insn doesn't have a range defined for it, then it isn't
8032 expecting to be reworked by this code. Better to stop now than
8033 to generate duff assembly code. */
8034 gcc_assert (fix->forwards || fix->backwards);
8036 /* If an entry requires 8-byte alignment then assume all constant pools
8037 require 4 bytes of padding. Trying to do this later on a per-pool
8038 basis is awkward because existing pool entries have to be modified. */
8039 if (ARM_DOUBLEWORD_ALIGN && fix->fix_size == 8)
8045 ";; %smode fixup for i%d; addr %lu, range (%ld,%ld): ",
8046 GET_MODE_NAME (mode),
8047 INSN_UID (insn), (unsigned long) address,
8048 -1 * (long)fix->backwards, (long)fix->forwards);
8049 arm_print_value (dump_file, fix->value);
8050 fprintf (dump_file, "\n");
8053 /* Add it to the chain of fixes. */
8056 if (minipool_fix_head != NULL)
8057 minipool_fix_tail->next = fix;
8059 minipool_fix_head = fix;
8061 minipool_fix_tail = fix;
8064 /* Return the cost of synthesizing a 64-bit constant VAL inline.
8065 Returns the number of insns needed, or 99 if we don't know how to
8068 arm_const_double_inline_cost (rtx val)
8070 rtx lowpart, highpart;
8071 enum machine_mode mode;
8073 mode = GET_MODE (val);
8075 if (mode == VOIDmode)
8078 gcc_assert (GET_MODE_SIZE (mode) == 8);
8080 lowpart = gen_lowpart (SImode, val);
8081 highpart = gen_highpart_mode (SImode, mode, val);
8083 gcc_assert (GET_CODE (lowpart) == CONST_INT);
8084 gcc_assert (GET_CODE (highpart) == CONST_INT);
8086 return (arm_gen_constant (SET, SImode, NULL_RTX, INTVAL (lowpart),
8087 NULL_RTX, NULL_RTX, 0, 0)
8088 + arm_gen_constant (SET, SImode, NULL_RTX, INTVAL (highpart),
8089 NULL_RTX, NULL_RTX, 0, 0));
8092 /* Return true if it is worthwhile to split a 64-bit constant into two
8093 32-bit operations. This is the case if optimizing for size, or
8094 if we have load delay slots, or if one 32-bit part can be done with
8095 a single data operation. */
8097 arm_const_double_by_parts (rtx val)
8099 enum machine_mode mode = GET_MODE (val);
8102 if (optimize_size || arm_ld_sched)
8105 if (mode == VOIDmode)
8108 part = gen_highpart_mode (SImode, mode, val);
8110 gcc_assert (GET_CODE (part) == CONST_INT);
8112 if (const_ok_for_arm (INTVAL (part))
8113 || const_ok_for_arm (~INTVAL (part)))
8116 part = gen_lowpart (SImode, val);
8118 gcc_assert (GET_CODE (part) == CONST_INT);
8120 if (const_ok_for_arm (INTVAL (part))
8121 || const_ok_for_arm (~INTVAL (part)))
8127 /* Scan INSN and note any of its operands that need fixing.
8128 If DO_PUSHES is false we do not actually push any of the fixups
8129 needed. The function returns TRUE if any fixups were needed/pushed.
8130 This is used by arm_memory_load_p() which needs to know about loads
8131 of constants that will be converted into minipool loads. */
8133 note_invalid_constants (rtx insn, HOST_WIDE_INT address, int do_pushes)
8135 bool result = false;
8138 extract_insn (insn);
8140 if (!constrain_operands (1))
8141 fatal_insn_not_found (insn);
8143 if (recog_data.n_alternatives == 0)
8146 /* Fill in recog_op_alt with information about the constraints of
8148 preprocess_constraints ();
8150 for (opno = 0; opno < recog_data.n_operands; opno++)
8152 /* Things we need to fix can only occur in inputs. */
8153 if (recog_data.operand_type[opno] != OP_IN)
8156 /* If this alternative is a memory reference, then any mention
8157 of constants in this alternative is really to fool reload
8158 into allowing us to accept one there. We need to fix them up
8159 now so that we output the right code. */
8160 if (recog_op_alt[opno][which_alternative].memory_ok)
8162 rtx op = recog_data.operand[opno];
8164 if (CONSTANT_P (op))
8167 push_minipool_fix (insn, address, recog_data.operand_loc[opno],
8168 recog_data.operand_mode[opno], op);
8171 else if (GET_CODE (op) == MEM
8172 && GET_CODE (XEXP (op, 0)) == SYMBOL_REF
8173 && CONSTANT_POOL_ADDRESS_P (XEXP (op, 0)))
8177 rtx cop = avoid_constant_pool_reference (op);
8179 /* Casting the address of something to a mode narrower
8180 than a word can cause avoid_constant_pool_reference()
8181 to return the pool reference itself. That's no good to
8182 us here. Lets just hope that we can use the
8183 constant pool value directly. */
8185 cop = get_pool_constant (XEXP (op, 0));
8187 push_minipool_fix (insn, address,
8188 recog_data.operand_loc[opno],
8189 recog_data.operand_mode[opno], cop);
8200 /* Gcc puts the pool in the wrong place for ARM, since we can only
8201 load addresses a limited distance around the pc. We do some
8202 special munging to move the constant pool values to the correct
8203 point in the code. */
8208 HOST_WIDE_INT address = 0;
8211 minipool_fix_head = minipool_fix_tail = NULL;
8213 /* The first insn must always be a note, or the code below won't
8214 scan it properly. */
8215 insn = get_insns ();
8216 gcc_assert (GET_CODE (insn) == NOTE);
8219 /* Scan all the insns and record the operands that will need fixing. */
8220 for (insn = next_nonnote_insn (insn); insn; insn = next_nonnote_insn (insn))
8222 if (TARGET_CIRRUS_FIX_INVALID_INSNS
8223 && (arm_cirrus_insn_p (insn)
8224 || GET_CODE (insn) == JUMP_INSN
8225 || arm_memory_load_p (insn)))
8226 cirrus_reorg (insn);
8228 if (GET_CODE (insn) == BARRIER)
8229 push_minipool_barrier (insn, address);
8230 else if (INSN_P (insn))
8234 note_invalid_constants (insn, address, true);
8235 address += get_attr_length (insn);
8237 /* If the insn is a vector jump, add the size of the table
8238 and skip the table. */
8239 if ((table = is_jump_table (insn)) != NULL)
8241 address += get_jump_table_size (table);
8247 fix = minipool_fix_head;
8249 /* Now scan the fixups and perform the required changes. */
8254 Mfix * last_added_fix;
8255 Mfix * last_barrier = NULL;
8258 /* Skip any further barriers before the next fix. */
8259 while (fix && GET_CODE (fix->insn) == BARRIER)
8262 /* No more fixes. */
8266 last_added_fix = NULL;
8268 for (ftmp = fix; ftmp; ftmp = ftmp->next)
8270 if (GET_CODE (ftmp->insn) == BARRIER)
8272 if (ftmp->address >= minipool_vector_head->max_address)
8275 last_barrier = ftmp;
8277 else if ((ftmp->minipool = add_minipool_forward_ref (ftmp)) == NULL)
8280 last_added_fix = ftmp; /* Keep track of the last fix added. */
8283 /* If we found a barrier, drop back to that; any fixes that we
8284 could have reached but come after the barrier will now go in
8285 the next mini-pool. */
8286 if (last_barrier != NULL)
8288 /* Reduce the refcount for those fixes that won't go into this
8290 for (fdel = last_barrier->next;
8291 fdel && fdel != ftmp;
8294 fdel->minipool->refcount--;
8295 fdel->minipool = NULL;
8298 ftmp = last_barrier;
8302 /* ftmp is first fix that we can't fit into this pool and
8303 there no natural barriers that we could use. Insert a
8304 new barrier in the code somewhere between the previous
8305 fix and this one, and arrange to jump around it. */
8306 HOST_WIDE_INT max_address;
8308 /* The last item on the list of fixes must be a barrier, so
8309 we can never run off the end of the list of fixes without
8310 last_barrier being set. */
8313 max_address = minipool_vector_head->max_address;
8314 /* Check that there isn't another fix that is in range that
8315 we couldn't fit into this pool because the pool was
8316 already too large: we need to put the pool before such an
8317 instruction. The pool itself may come just after the
8318 fix because create_fix_barrier also allows space for a
8319 jump instruction. */
8320 if (ftmp->address < max_address)
8321 max_address = ftmp->address + 1;
8323 last_barrier = create_fix_barrier (last_added_fix, max_address);
8326 assign_minipool_offsets (last_barrier);
8330 if (GET_CODE (ftmp->insn) != BARRIER
8331 && ((ftmp->minipool = add_minipool_backward_ref (ftmp))
8338 /* Scan over the fixes we have identified for this pool, fixing them
8339 up and adding the constants to the pool itself. */
8340 for (this_fix = fix; this_fix && ftmp != this_fix;
8341 this_fix = this_fix->next)
8342 if (GET_CODE (this_fix->insn) != BARRIER)
8345 = plus_constant (gen_rtx_LABEL_REF (VOIDmode,
8346 minipool_vector_label),
8347 this_fix->minipool->offset);
8348 *this_fix->loc = gen_rtx_MEM (this_fix->mode, addr);
8351 dump_minipool (last_barrier->insn);
8355 /* From now on we must synthesize any constants that we can't handle
8356 directly. This can happen if the RTL gets split during final
8357 instruction generation. */
8358 after_arm_reorg = 1;
8360 /* Free the minipool memory. */
8361 obstack_free (&minipool_obstack, minipool_startobj);
8364 /* Routines to output assembly language. */
8366 /* If the rtx is the correct value then return the string of the number.
8367 In this way we can ensure that valid double constants are generated even
8368 when cross compiling. */
8370 fp_immediate_constant (rtx x)
8375 if (!fp_consts_inited)
8378 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
8379 for (i = 0; i < 8; i++)
8380 if (REAL_VALUES_EQUAL (r, values_fp[i]))
8381 return strings_fp[i];
8386 /* As for fp_immediate_constant, but value is passed directly, not in rtx. */
8388 fp_const_from_val (REAL_VALUE_TYPE *r)
8392 if (!fp_consts_inited)
8395 for (i = 0; i < 8; i++)
8396 if (REAL_VALUES_EQUAL (*r, values_fp[i]))
8397 return strings_fp[i];
8402 /* Output the operands of a LDM/STM instruction to STREAM.
8403 MASK is the ARM register set mask of which only bits 0-15 are important.
8404 REG is the base register, either the frame pointer or the stack pointer,
8405 INSTR is the possibly suffixed load or store instruction. */
8408 print_multi_reg (FILE *stream, const char *instr, unsigned reg,
8412 bool not_first = FALSE;
8414 fputc ('\t', stream);
8415 asm_fprintf (stream, instr, reg);
8416 fputs (", {", stream);
8418 for (i = 0; i <= LAST_ARM_REGNUM; i++)
8419 if (mask & (1 << i))
8422 fprintf (stream, ", ");
8424 asm_fprintf (stream, "%r", i);
8428 fprintf (stream, "}\n");
8432 /* Output a FLDMX instruction to STREAM.
8433 BASE if the register containing the address.
8434 REG and COUNT specify the register range.
8435 Extra registers may be added to avoid hardware bugs. */
8438 arm_output_fldmx (FILE * stream, unsigned int base, int reg, int count)
8442 /* Workaround ARM10 VFPr1 bug. */
8443 if (count == 2 && !arm_arch6)
8450 fputc ('\t', stream);
8451 asm_fprintf (stream, "fldmfdx\t%r!, {", base);
8453 for (i = reg; i < reg + count; i++)
8456 fputs (", ", stream);
8457 asm_fprintf (stream, "d%d", i);
8459 fputs ("}\n", stream);
8464 /* Output the assembly for a store multiple. */
8467 vfp_output_fstmx (rtx * operands)
8474 strcpy (pattern, "fstmfdx\t%m0!, {%P1");
8475 p = strlen (pattern);
8477 gcc_assert (GET_CODE (operands[1]) == REG);
8479 base = (REGNO (operands[1]) - FIRST_VFP_REGNUM) / 2;
8480 for (i = 1; i < XVECLEN (operands[2], 0); i++)
8482 p += sprintf (&pattern[p], ", d%d", base + i);
8484 strcpy (&pattern[p], "}");
8486 output_asm_insn (pattern, operands);
8491 /* Emit RTL to save block of VFP register pairs to the stack. Returns the
8492 number of bytes pushed. */
8495 vfp_emit_fstmx (int base_reg, int count)
8502 /* Workaround ARM10 VFPr1 bug. Data corruption can occur when exactly two
8503 register pairs are stored by a store multiple insn. We avoid this
8504 by pushing an extra pair. */
8505 if (count == 2 && !arm_arch6)
8507 if (base_reg == LAST_VFP_REGNUM - 3)
8512 /* ??? The frame layout is implementation defined. We describe
8513 standard format 1 (equivalent to a FSTMD insn and unused pad word).
8514 We really need some way of representing the whole block so that the
8515 unwinder can figure it out at runtime. */
8516 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
8517 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (count + 1));
8519 reg = gen_rtx_REG (DFmode, base_reg);
8523 = gen_rtx_SET (VOIDmode,
8524 gen_frame_mem (BLKmode,
8525 gen_rtx_PRE_DEC (BLKmode,
8526 stack_pointer_rtx)),
8527 gen_rtx_UNSPEC (BLKmode,
8531 tmp = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8532 plus_constant (stack_pointer_rtx, -(count * 8 + 4)));
8533 RTX_FRAME_RELATED_P (tmp) = 1;
8534 XVECEXP (dwarf, 0, 0) = tmp;
8536 tmp = gen_rtx_SET (VOIDmode,
8537 gen_frame_mem (DFmode, stack_pointer_rtx),
8539 RTX_FRAME_RELATED_P (tmp) = 1;
8540 XVECEXP (dwarf, 0, 1) = tmp;
8542 for (i = 1; i < count; i++)
8544 reg = gen_rtx_REG (DFmode, base_reg);
8546 XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
8548 tmp = gen_rtx_SET (VOIDmode,
8549 gen_frame_mem (DFmode,
8550 plus_constant (stack_pointer_rtx,
8553 RTX_FRAME_RELATED_P (tmp) = 1;
8554 XVECEXP (dwarf, 0, i + 1) = tmp;
8557 par = emit_insn (par);
8558 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
8560 RTX_FRAME_RELATED_P (par) = 1;
8562 return count * 8 + 4;
8566 /* Output a 'call' insn. */
8568 output_call (rtx *operands)
8570 gcc_assert (!arm_arch5); /* Patterns should call blx <reg> directly. */
8572 /* Handle calls to lr using ip (which may be clobbered in subr anyway). */
8573 if (REGNO (operands[0]) == LR_REGNUM)
8575 operands[0] = gen_rtx_REG (SImode, IP_REGNUM);
8576 output_asm_insn ("mov%?\t%0, %|lr", operands);
8579 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8581 if (TARGET_INTERWORK || arm_arch4t)
8582 output_asm_insn ("bx%?\t%0", operands);
8584 output_asm_insn ("mov%?\t%|pc, %0", operands);
8589 /* Output a 'call' insn that is a reference in memory. */
8591 output_call_mem (rtx *operands)
8593 if (TARGET_INTERWORK && !arm_arch5)
8595 output_asm_insn ("ldr%?\t%|ip, %0", operands);
8596 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8597 output_asm_insn ("bx%?\t%|ip", operands);
8599 else if (regno_use_in (LR_REGNUM, operands[0]))
8601 /* LR is used in the memory address. We load the address in the
8602 first instruction. It's safe to use IP as the target of the
8603 load since the call will kill it anyway. */
8604 output_asm_insn ("ldr%?\t%|ip, %0", operands);
8606 output_asm_insn ("blx%?\t%|ip", operands);
8609 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8611 output_asm_insn ("bx%?\t%|ip", operands);
8613 output_asm_insn ("mov%?\t%|pc, %|ip", operands);
8618 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8619 output_asm_insn ("ldr%?\t%|pc, %0", operands);
8626 /* Output a move from arm registers to an fpa registers.
8627 OPERANDS[0] is an fpa register.
8628 OPERANDS[1] is the first registers of an arm register pair. */
8630 output_mov_long_double_fpa_from_arm (rtx *operands)
8632 int arm_reg0 = REGNO (operands[1]);
8635 gcc_assert (arm_reg0 != IP_REGNUM);
8637 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8638 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8639 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
8641 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1, %2}", ops);
8642 output_asm_insn ("ldf%?e\t%0, [%|sp], #12", operands);
8647 /* Output a move from an fpa register to arm registers.
8648 OPERANDS[0] is the first registers of an arm register pair.
8649 OPERANDS[1] is an fpa register. */
8651 output_mov_long_double_arm_from_fpa (rtx *operands)
8653 int arm_reg0 = REGNO (operands[0]);
8656 gcc_assert (arm_reg0 != IP_REGNUM);
8658 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8659 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8660 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
8662 output_asm_insn ("stf%?e\t%1, [%|sp, #-12]!", operands);
8663 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1, %2}", ops);
8667 /* Output a move from arm registers to arm registers of a long double
8668 OPERANDS[0] is the destination.
8669 OPERANDS[1] is the source. */
8671 output_mov_long_double_arm_from_arm (rtx *operands)
8673 /* We have to be careful here because the two might overlap. */
8674 int dest_start = REGNO (operands[0]);
8675 int src_start = REGNO (operands[1]);
8679 if (dest_start < src_start)
8681 for (i = 0; i < 3; i++)
8683 ops[0] = gen_rtx_REG (SImode, dest_start + i);
8684 ops[1] = gen_rtx_REG (SImode, src_start + i);
8685 output_asm_insn ("mov%?\t%0, %1", ops);
8690 for (i = 2; i >= 0; i--)
8692 ops[0] = gen_rtx_REG (SImode, dest_start + i);
8693 ops[1] = gen_rtx_REG (SImode, src_start + i);
8694 output_asm_insn ("mov%?\t%0, %1", ops);
8702 /* Output a move from arm registers to an fpa registers.
8703 OPERANDS[0] is an fpa register.
8704 OPERANDS[1] is the first registers of an arm register pair. */
8706 output_mov_double_fpa_from_arm (rtx *operands)
8708 int arm_reg0 = REGNO (operands[1]);
8711 gcc_assert (arm_reg0 != IP_REGNUM);
8713 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8714 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8715 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1}", ops);
8716 output_asm_insn ("ldf%?d\t%0, [%|sp], #8", operands);
8720 /* Output a move from an fpa register to arm registers.
8721 OPERANDS[0] is the first registers of an arm register pair.
8722 OPERANDS[1] is an fpa register. */
8724 output_mov_double_arm_from_fpa (rtx *operands)
8726 int arm_reg0 = REGNO (operands[0]);
8729 gcc_assert (arm_reg0 != IP_REGNUM);
8731 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8732 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8733 output_asm_insn ("stf%?d\t%1, [%|sp, #-8]!", operands);
8734 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1}", ops);
8738 /* Output a move between double words.
8739 It must be REG<-REG, REG<-CONST_DOUBLE, REG<-CONST_INT, REG<-MEM
8740 or MEM<-REG and all MEMs must be offsettable addresses. */
8742 output_move_double (rtx *operands)
8744 enum rtx_code code0 = GET_CODE (operands[0]);
8745 enum rtx_code code1 = GET_CODE (operands[1]);
8750 int reg0 = REGNO (operands[0]);
8752 otherops[0] = gen_rtx_REG (SImode, 1 + reg0);
8754 gcc_assert (code1 == MEM); /* Constraints should ensure this. */
8756 switch (GET_CODE (XEXP (operands[1], 0)))
8759 output_asm_insn ("ldm%?ia\t%m1, %M0", operands);
8763 gcc_assert (TARGET_LDRD);
8764 output_asm_insn ("ldr%?d\t%0, [%m1, #8]!", operands);
8768 output_asm_insn ("ldm%?db\t%m1!, %M0", operands);
8772 output_asm_insn ("ldm%?ia\t%m1!, %M0", operands);
8776 gcc_assert (TARGET_LDRD);
8777 output_asm_insn ("ldr%?d\t%0, [%m1], #-8", operands);
8782 otherops[0] = operands[0];
8783 otherops[1] = XEXP (XEXP (XEXP (operands[1], 0), 1), 0);
8784 otherops[2] = XEXP (XEXP (XEXP (operands[1], 0), 1), 1);
8786 if (GET_CODE (XEXP (operands[1], 0)) == PRE_MODIFY)
8788 if (reg_overlap_mentioned_p (otherops[0], otherops[2]))
8790 /* Registers overlap so split out the increment. */
8791 output_asm_insn ("add%?\t%1, %1, %2", otherops);
8792 output_asm_insn ("ldr%?d\t%0, [%1] @split", otherops);
8796 /* IWMMXT allows offsets larger than ldrd can handle,
8797 fix these up with a pair of ldr. */
8798 if (GET_CODE (otherops[2]) == CONST_INT
8799 && (INTVAL(otherops[2]) <= -256
8800 || INTVAL(otherops[2]) >= 256))
8802 output_asm_insn ("ldr%?\t%0, [%1, %2]!", otherops);
8803 otherops[0] = gen_rtx_REG (SImode, 1 + reg0);
8804 output_asm_insn ("ldr%?\t%0, [%1, #4]", otherops);
8807 output_asm_insn ("ldr%?d\t%0, [%1, %2]!", otherops);
8812 /* IWMMXT allows offsets larger than ldrd can handle,
8813 fix these up with a pair of ldr. */
8814 if (GET_CODE (otherops[2]) == CONST_INT
8815 && (INTVAL(otherops[2]) <= -256
8816 || INTVAL(otherops[2]) >= 256))
8818 otherops[0] = gen_rtx_REG (SImode, 1 + reg0);
8819 output_asm_insn ("ldr%?\t%0, [%1, #4]", otherops);
8820 otherops[0] = operands[0];
8821 output_asm_insn ("ldr%?\t%0, [%1], %2", otherops);
8824 /* We only allow constant increments, so this is safe. */
8825 output_asm_insn ("ldr%?d\t%0, [%1], %2", otherops);
8831 output_asm_insn ("adr%?\t%0, %1", operands);
8832 output_asm_insn ("ldm%?ia\t%0, %M0", operands);
8836 if (arm_add_operand (XEXP (XEXP (operands[1], 0), 1),
8837 GET_MODE (XEXP (XEXP (operands[1], 0), 1))))
8839 otherops[0] = operands[0];
8840 otherops[1] = XEXP (XEXP (operands[1], 0), 0);
8841 otherops[2] = XEXP (XEXP (operands[1], 0), 1);
8843 if (GET_CODE (XEXP (operands[1], 0)) == PLUS)
8845 if (GET_CODE (otherops[2]) == CONST_INT)
8847 switch ((int) INTVAL (otherops[2]))
8850 output_asm_insn ("ldm%?db\t%1, %M0", otherops);
8853 output_asm_insn ("ldm%?da\t%1, %M0", otherops);
8856 output_asm_insn ("ldm%?ib\t%1, %M0", otherops);
8861 && (GET_CODE (otherops[2]) == REG
8862 || (GET_CODE (otherops[2]) == CONST_INT
8863 && INTVAL (otherops[2]) > -256
8864 && INTVAL (otherops[2]) < 256)))
8866 if (reg_overlap_mentioned_p (otherops[0],
8869 /* Swap base and index registers over to
8870 avoid a conflict. */
8871 otherops[1] = XEXP (XEXP (operands[1], 0), 1);
8872 otherops[2] = XEXP (XEXP (operands[1], 0), 0);
8874 /* If both registers conflict, it will usually
8875 have been fixed by a splitter. */
8876 if (reg_overlap_mentioned_p (otherops[0], otherops[2]))
8878 output_asm_insn ("add%?\t%1, %1, %2", otherops);
8879 output_asm_insn ("ldr%?d\t%0, [%1]",
8883 output_asm_insn ("ldr%?d\t%0, [%1, %2]", otherops);
8887 if (GET_CODE (otherops[2]) == CONST_INT)
8889 if (!(const_ok_for_arm (INTVAL (otherops[2]))))
8890 output_asm_insn ("sub%?\t%0, %1, #%n2", otherops);
8892 output_asm_insn ("add%?\t%0, %1, %2", otherops);
8895 output_asm_insn ("add%?\t%0, %1, %2", otherops);
8898 output_asm_insn ("sub%?\t%0, %1, %2", otherops);
8900 return "ldm%?ia\t%0, %M0";
8904 otherops[1] = adjust_address (operands[1], SImode, 4);
8905 /* Take care of overlapping base/data reg. */
8906 if (reg_mentioned_p (operands[0], operands[1]))
8908 output_asm_insn ("ldr%?\t%0, %1", otherops);
8909 output_asm_insn ("ldr%?\t%0, %1", operands);
8913 output_asm_insn ("ldr%?\t%0, %1", operands);
8914 output_asm_insn ("ldr%?\t%0, %1", otherops);
8921 /* Constraints should ensure this. */
8922 gcc_assert (code0 == MEM && code1 == REG);
8923 gcc_assert (REGNO (operands[1]) != IP_REGNUM);
8925 switch (GET_CODE (XEXP (operands[0], 0)))
8928 output_asm_insn ("stm%?ia\t%m0, %M1", operands);
8932 gcc_assert (TARGET_LDRD);
8933 output_asm_insn ("str%?d\t%1, [%m0, #8]!", operands);
8937 output_asm_insn ("stm%?db\t%m0!, %M1", operands);
8941 output_asm_insn ("stm%?ia\t%m0!, %M1", operands);
8945 gcc_assert (TARGET_LDRD);
8946 output_asm_insn ("str%?d\t%1, [%m0], #-8", operands);
8951 otherops[0] = operands[1];
8952 otherops[1] = XEXP (XEXP (XEXP (operands[0], 0), 1), 0);
8953 otherops[2] = XEXP (XEXP (XEXP (operands[0], 0), 1), 1);
8955 /* IWMMXT allows offsets larger than ldrd can handle,
8956 fix these up with a pair of ldr. */
8957 if (GET_CODE (otherops[2]) == CONST_INT
8958 && (INTVAL(otherops[2]) <= -256
8959 || INTVAL(otherops[2]) >= 256))
8962 reg1 = gen_rtx_REG (SImode, 1 + REGNO (operands[1]));
8963 if (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY)
8965 output_asm_insn ("ldr%?\t%0, [%1, %2]!", otherops);
8967 output_asm_insn ("ldr%?\t%0, [%1, #4]", otherops);
8972 output_asm_insn ("ldr%?\t%0, [%1, #4]", otherops);
8973 otherops[0] = operands[1];
8974 output_asm_insn ("ldr%?\t%0, [%1], %2", otherops);
8977 else if (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY)
8978 output_asm_insn ("str%?d\t%0, [%1, %2]!", otherops);
8980 output_asm_insn ("str%?d\t%0, [%1], %2", otherops);
8984 otherops[2] = XEXP (XEXP (operands[0], 0), 1);
8985 if (GET_CODE (otherops[2]) == CONST_INT)
8987 switch ((int) INTVAL (XEXP (XEXP (operands[0], 0), 1)))
8990 output_asm_insn ("stm%?db\t%m0, %M1", operands);
8994 output_asm_insn ("stm%?da\t%m0, %M1", operands);
8998 output_asm_insn ("stm%?ib\t%m0, %M1", operands);
9003 && (GET_CODE (otherops[2]) == REG
9004 || (GET_CODE (otherops[2]) == CONST_INT
9005 && INTVAL (otherops[2]) > -256
9006 && INTVAL (otherops[2]) < 256)))
9008 otherops[0] = operands[1];
9009 otherops[1] = XEXP (XEXP (operands[0], 0), 0);
9010 output_asm_insn ("str%?d\t%0, [%1, %2]", otherops);
9016 otherops[0] = adjust_address (operands[0], SImode, 4);
9017 otherops[1] = gen_rtx_REG (SImode, 1 + REGNO (operands[1]));
9018 output_asm_insn ("str%?\t%1, %0", operands);
9019 output_asm_insn ("str%?\t%1, %0", otherops);
9026 /* Output an ADD r, s, #n where n may be too big for one instruction.
9027 If adding zero to one register, output nothing. */
9029 output_add_immediate (rtx *operands)
9031 HOST_WIDE_INT n = INTVAL (operands[2]);
9033 if (n != 0 || REGNO (operands[0]) != REGNO (operands[1]))
9036 output_multi_immediate (operands,
9037 "sub%?\t%0, %1, %2", "sub%?\t%0, %0, %2", 2,
9040 output_multi_immediate (operands,
9041 "add%?\t%0, %1, %2", "add%?\t%0, %0, %2", 2,
9048 /* Output a multiple immediate operation.
9049 OPERANDS is the vector of operands referred to in the output patterns.
9050 INSTR1 is the output pattern to use for the first constant.
9051 INSTR2 is the output pattern to use for subsequent constants.
9052 IMMED_OP is the index of the constant slot in OPERANDS.
9053 N is the constant value. */
9055 output_multi_immediate (rtx *operands, const char *instr1, const char *instr2,
9056 int immed_op, HOST_WIDE_INT n)
9058 #if HOST_BITS_PER_WIDE_INT > 32
9064 /* Quick and easy output. */
9065 operands[immed_op] = const0_rtx;
9066 output_asm_insn (instr1, operands);
9071 const char * instr = instr1;
9073 /* Note that n is never zero here (which would give no output). */
9074 for (i = 0; i < 32; i += 2)
9078 operands[immed_op] = GEN_INT (n & (255 << i));
9079 output_asm_insn (instr, operands);
9089 /* Return the appropriate ARM instruction for the operation code.
9090 The returned result should not be overwritten. OP is the rtx of the
9091 operation. SHIFT_FIRST_ARG is TRUE if the first argument of the operator
9094 arithmetic_instr (rtx op, int shift_first_arg)
9096 switch (GET_CODE (op))
9102 return shift_first_arg ? "rsb" : "sub";
9118 /* Ensure valid constant shifts and return the appropriate shift mnemonic
9119 for the operation code. The returned result should not be overwritten.
9120 OP is the rtx code of the shift.
9121 On exit, *AMOUNTP will be -1 if the shift is by a register, or a constant
9124 shift_op (rtx op, HOST_WIDE_INT *amountp)
9127 enum rtx_code code = GET_CODE (op);
9129 switch (GET_CODE (XEXP (op, 1)))
9137 *amountp = INTVAL (XEXP (op, 1));
9159 gcc_assert (*amountp != -1);
9160 *amountp = 32 - *amountp;
9169 /* We never have to worry about the amount being other than a
9170 power of 2, since this case can never be reloaded from a reg. */
9171 gcc_assert (*amountp != -1);
9172 *amountp = int_log2 (*amountp);
9181 /* This is not 100% correct, but follows from the desire to merge
9182 multiplication by a power of 2 with the recognizer for a
9183 shift. >=32 is not a valid shift for "asl", so we must try and
9184 output a shift that produces the correct arithmetical result.
9185 Using lsr #32 is identical except for the fact that the carry bit
9186 is not set correctly if we set the flags; but we never use the
9187 carry bit from such an operation, so we can ignore that. */
9188 if (code == ROTATERT)
9189 /* Rotate is just modulo 32. */
9191 else if (*amountp != (*amountp & 31))
9198 /* Shifts of 0 are no-ops. */
9206 /* Obtain the shift from the POWER of two. */
9208 static HOST_WIDE_INT
9209 int_log2 (HOST_WIDE_INT power)
9211 HOST_WIDE_INT shift = 0;
9213 while ((((HOST_WIDE_INT) 1 << shift) & power) == 0)
9215 gcc_assert (shift <= 31);
9222 /* Output a .ascii pseudo-op, keeping track of lengths. This is
9223 because /bin/as is horribly restrictive. The judgement about
9224 whether or not each character is 'printable' (and can be output as
9225 is) or not (and must be printed with an octal escape) must be made
9226 with reference to the *host* character set -- the situation is
9227 similar to that discussed in the comments above pp_c_char in
9228 c-pretty-print.c. */
9230 #define MAX_ASCII_LEN 51
9233 output_ascii_pseudo_op (FILE *stream, const unsigned char *p, int len)
9238 fputs ("\t.ascii\t\"", stream);
9240 for (i = 0; i < len; i++)
9244 if (len_so_far >= MAX_ASCII_LEN)
9246 fputs ("\"\n\t.ascii\t\"", stream);
9252 if (c == '\\' || c == '\"')
9254 putc ('\\', stream);
9262 fprintf (stream, "\\%03o", c);
9267 fputs ("\"\n", stream);
9270 /* Compute the register save mask for registers 0 through 12
9271 inclusive. This code is used by arm_compute_save_reg_mask. */
9273 static unsigned long
9274 arm_compute_save_reg0_reg12_mask (void)
9276 unsigned long func_type = arm_current_func_type ();
9277 unsigned long save_reg_mask = 0;
9280 if (IS_INTERRUPT (func_type))
9282 unsigned int max_reg;
9283 /* Interrupt functions must not corrupt any registers,
9284 even call clobbered ones. If this is a leaf function
9285 we can just examine the registers used by the RTL, but
9286 otherwise we have to assume that whatever function is
9287 called might clobber anything, and so we have to save
9288 all the call-clobbered registers as well. */
9289 if (ARM_FUNC_TYPE (func_type) == ARM_FT_FIQ)
9290 /* FIQ handlers have registers r8 - r12 banked, so
9291 we only need to check r0 - r7, Normal ISRs only
9292 bank r14 and r15, so we must check up to r12.
9293 r13 is the stack pointer which is always preserved,
9294 so we do not need to consider it here. */
9299 for (reg = 0; reg <= max_reg; reg++)
9300 if (regs_ever_live[reg]
9301 || (! current_function_is_leaf && call_used_regs [reg]))
9302 save_reg_mask |= (1 << reg);
9304 /* Also save the pic base register if necessary. */
9306 && !TARGET_SINGLE_PIC_BASE
9307 && arm_pic_register != INVALID_REGNUM
9308 && current_function_uses_pic_offset_table)
9309 save_reg_mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
9313 /* In the normal case we only need to save those registers
9314 which are call saved and which are used by this function. */
9315 for (reg = 0; reg <= 10; reg++)
9316 if (regs_ever_live[reg] && ! call_used_regs [reg])
9317 save_reg_mask |= (1 << reg);
9319 /* Handle the frame pointer as a special case. */
9320 if (! TARGET_APCS_FRAME
9321 && ! frame_pointer_needed
9322 && regs_ever_live[HARD_FRAME_POINTER_REGNUM]
9323 && ! call_used_regs[HARD_FRAME_POINTER_REGNUM])
9324 save_reg_mask |= 1 << HARD_FRAME_POINTER_REGNUM;
9326 /* If we aren't loading the PIC register,
9327 don't stack it even though it may be live. */
9329 && !TARGET_SINGLE_PIC_BASE
9330 && arm_pic_register != INVALID_REGNUM
9331 && (regs_ever_live[PIC_OFFSET_TABLE_REGNUM]
9332 || current_function_uses_pic_offset_table))
9333 save_reg_mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
9336 /* Save registers so the exception handler can modify them. */
9337 if (current_function_calls_eh_return)
9343 reg = EH_RETURN_DATA_REGNO (i);
9344 if (reg == INVALID_REGNUM)
9346 save_reg_mask |= 1 << reg;
9350 return save_reg_mask;
9353 /* Compute a bit mask of which registers need to be
9354 saved on the stack for the current function. */
9356 static unsigned long
9357 arm_compute_save_reg_mask (void)
9359 unsigned int save_reg_mask = 0;
9360 unsigned long func_type = arm_current_func_type ();
9362 if (IS_NAKED (func_type))
9363 /* This should never really happen. */
9366 /* If we are creating a stack frame, then we must save the frame pointer,
9367 IP (which will hold the old stack pointer), LR and the PC. */
9368 if (frame_pointer_needed)
9370 (1 << ARM_HARD_FRAME_POINTER_REGNUM)
9375 /* Volatile functions do not return, so there
9376 is no need to save any other registers. */
9377 if (IS_VOLATILE (func_type))
9378 return save_reg_mask;
9380 save_reg_mask |= arm_compute_save_reg0_reg12_mask ();
9382 /* Decide if we need to save the link register.
9383 Interrupt routines have their own banked link register,
9384 so they never need to save it.
9385 Otherwise if we do not use the link register we do not need to save
9386 it. If we are pushing other registers onto the stack however, we
9387 can save an instruction in the epilogue by pushing the link register
9388 now and then popping it back into the PC. This incurs extra memory
9389 accesses though, so we only do it when optimizing for size, and only
9390 if we know that we will not need a fancy return sequence. */
9391 if (regs_ever_live [LR_REGNUM]
9394 && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
9395 && !current_function_calls_eh_return))
9396 save_reg_mask |= 1 << LR_REGNUM;
9398 if (cfun->machine->lr_save_eliminated)
9399 save_reg_mask &= ~ (1 << LR_REGNUM);
9401 if (TARGET_REALLY_IWMMXT
9402 && ((bit_count (save_reg_mask)
9403 + ARM_NUM_INTS (current_function_pretend_args_size)) % 2) != 0)
9407 /* The total number of registers that are going to be pushed
9408 onto the stack is odd. We need to ensure that the stack
9409 is 64-bit aligned before we start to save iWMMXt registers,
9410 and also before we start to create locals. (A local variable
9411 might be a double or long long which we will load/store using
9412 an iWMMXt instruction). Therefore we need to push another
9413 ARM register, so that the stack will be 64-bit aligned. We
9414 try to avoid using the arg registers (r0 -r3) as they might be
9415 used to pass values in a tail call. */
9416 for (reg = 4; reg <= 12; reg++)
9417 if ((save_reg_mask & (1 << reg)) == 0)
9421 save_reg_mask |= (1 << reg);
9424 cfun->machine->sibcall_blocked = 1;
9425 save_reg_mask |= (1 << 3);
9429 return save_reg_mask;
9433 /* Compute a bit mask of which registers need to be
9434 saved on the stack for the current function. */
9435 static unsigned long
9436 thumb_compute_save_reg_mask (void)
9442 for (reg = 0; reg < 12; reg ++)
9443 if (regs_ever_live[reg] && !call_used_regs[reg])
9447 && !TARGET_SINGLE_PIC_BASE
9448 && arm_pic_register != INVALID_REGNUM
9449 && current_function_uses_pic_offset_table)
9450 mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
9452 /* See if we might need r11 for calls to _interwork_r11_call_via_rN(). */
9453 if (!frame_pointer_needed && CALLER_INTERWORKING_SLOT_SIZE > 0)
9454 mask |= 1 << ARM_HARD_FRAME_POINTER_REGNUM;
9456 /* LR will also be pushed if any lo regs are pushed. */
9457 if (mask & 0xff || thumb_force_lr_save ())
9458 mask |= (1 << LR_REGNUM);
9460 /* Make sure we have a low work register if we need one.
9461 We will need one if we are going to push a high register,
9462 but we are not currently intending to push a low register. */
9463 if ((mask & 0xff) == 0
9464 && ((mask & 0x0f00) || TARGET_BACKTRACE))
9466 /* Use thumb_find_work_register to choose which register
9467 we will use. If the register is live then we will
9468 have to push it. Use LAST_LO_REGNUM as our fallback
9469 choice for the register to select. */
9470 reg = thumb_find_work_register (1 << LAST_LO_REGNUM);
9472 if (! call_used_regs[reg])
9480 /* Return the number of bytes required to save VFP registers. */
9482 arm_get_vfp_saved_size (void)
9489 /* Space for saved VFP registers. */
9490 if (TARGET_HARD_FLOAT && TARGET_VFP)
9493 for (regno = FIRST_VFP_REGNUM;
9494 regno < LAST_VFP_REGNUM;
9497 if ((!regs_ever_live[regno] || call_used_regs[regno])
9498 && (!regs_ever_live[regno + 1] || call_used_regs[regno + 1]))
9502 /* Workaround ARM10 VFPr1 bug. */
9503 if (count == 2 && !arm_arch6)
9505 saved += count * 8 + 4;
9514 if (count == 2 && !arm_arch6)
9516 saved += count * 8 + 4;
9523 /* Generate a function exit sequence. If REALLY_RETURN is false, then do
9524 everything bar the final return instruction. */
9526 output_return_instruction (rtx operand, int really_return, int reverse)
9528 char conditional[10];
9531 unsigned long live_regs_mask;
9532 unsigned long func_type;
9533 arm_stack_offsets *offsets;
9535 func_type = arm_current_func_type ();
9537 if (IS_NAKED (func_type))
9540 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
9542 /* If this function was declared non-returning, and we have
9543 found a tail call, then we have to trust that the called
9544 function won't return. */
9549 /* Otherwise, trap an attempted return by aborting. */
9551 ops[1] = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)"
9553 assemble_external_libcall (ops[1]);
9554 output_asm_insn (reverse ? "bl%D0\t%a1" : "bl%d0\t%a1", ops);
9560 gcc_assert (!current_function_calls_alloca || really_return);
9562 sprintf (conditional, "%%?%%%c0", reverse ? 'D' : 'd');
9564 return_used_this_function = 1;
9566 live_regs_mask = arm_compute_save_reg_mask ();
9570 const char * return_reg;
9572 /* If we do not have any special requirements for function exit
9573 (e.g. interworking, or ISR) then we can load the return address
9574 directly into the PC. Otherwise we must load it into LR. */
9576 && ! TARGET_INTERWORK)
9577 return_reg = reg_names[PC_REGNUM];
9579 return_reg = reg_names[LR_REGNUM];
9581 if ((live_regs_mask & (1 << IP_REGNUM)) == (1 << IP_REGNUM))
9583 /* There are three possible reasons for the IP register
9584 being saved. 1) a stack frame was created, in which case
9585 IP contains the old stack pointer, or 2) an ISR routine
9586 corrupted it, or 3) it was saved to align the stack on
9587 iWMMXt. In case 1, restore IP into SP, otherwise just
9589 if (frame_pointer_needed)
9591 live_regs_mask &= ~ (1 << IP_REGNUM);
9592 live_regs_mask |= (1 << SP_REGNUM);
9595 gcc_assert (IS_INTERRUPT (func_type) || TARGET_REALLY_IWMMXT);
9598 /* On some ARM architectures it is faster to use LDR rather than
9599 LDM to load a single register. On other architectures, the
9600 cost is the same. In 26 bit mode, or for exception handlers,
9601 we have to use LDM to load the PC so that the CPSR is also
9603 for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
9604 if (live_regs_mask == (1U << reg))
9607 if (reg <= LAST_ARM_REGNUM
9608 && (reg != LR_REGNUM
9610 || ! IS_INTERRUPT (func_type)))
9612 sprintf (instr, "ldr%s\t%%|%s, [%%|sp], #4", conditional,
9613 (reg == LR_REGNUM) ? return_reg : reg_names[reg]);
9620 /* Generate the load multiple instruction to restore the
9621 registers. Note we can get here, even if
9622 frame_pointer_needed is true, but only if sp already
9623 points to the base of the saved core registers. */
9624 if (live_regs_mask & (1 << SP_REGNUM))
9626 unsigned HOST_WIDE_INT stack_adjust;
9628 offsets = arm_get_frame_offsets ();
9629 stack_adjust = offsets->outgoing_args - offsets->saved_regs;
9630 gcc_assert (stack_adjust == 0 || stack_adjust == 4);
9632 if (stack_adjust && arm_arch5)
9633 sprintf (instr, "ldm%sib\t%%|sp, {", conditional);
9636 /* If we can't use ldmib (SA110 bug),
9637 then try to pop r3 instead. */
9639 live_regs_mask |= 1 << 3;
9640 sprintf (instr, "ldm%sfd\t%%|sp, {", conditional);
9644 sprintf (instr, "ldm%sfd\t%%|sp!, {", conditional);
9646 p = instr + strlen (instr);
9648 for (reg = 0; reg <= SP_REGNUM; reg++)
9649 if (live_regs_mask & (1 << reg))
9651 int l = strlen (reg_names[reg]);
9657 memcpy (p, ", ", 2);
9661 memcpy (p, "%|", 2);
9662 memcpy (p + 2, reg_names[reg], l);
9666 if (live_regs_mask & (1 << LR_REGNUM))
9668 sprintf (p, "%s%%|%s}", first ? "" : ", ", return_reg);
9669 /* If returning from an interrupt, restore the CPSR. */
9670 if (IS_INTERRUPT (func_type))
9677 output_asm_insn (instr, & operand);
9679 /* See if we need to generate an extra instruction to
9680 perform the actual function return. */
9682 && func_type != ARM_FT_INTERWORKED
9683 && (live_regs_mask & (1 << LR_REGNUM)) != 0)
9685 /* The return has already been handled
9686 by loading the LR into the PC. */
9693 switch ((int) ARM_FUNC_TYPE (func_type))
9697 sprintf (instr, "sub%ss\t%%|pc, %%|lr, #4", conditional);
9700 case ARM_FT_INTERWORKED:
9701 sprintf (instr, "bx%s\t%%|lr", conditional);
9704 case ARM_FT_EXCEPTION:
9705 sprintf (instr, "mov%ss\t%%|pc, %%|lr", conditional);
9709 /* Use bx if it's available. */
9710 if (arm_arch5 || arm_arch4t)
9711 sprintf (instr, "bx%s\t%%|lr", conditional);
9713 sprintf (instr, "mov%s\t%%|pc, %%|lr", conditional);
9717 output_asm_insn (instr, & operand);
9723 /* Write the function name into the code section, directly preceding
9724 the function prologue.
9726 Code will be output similar to this:
9728 .ascii "arm_poke_function_name", 0
9731 .word 0xff000000 + (t1 - t0)
9732 arm_poke_function_name
9734 stmfd sp!, {fp, ip, lr, pc}
9737 When performing a stack backtrace, code can inspect the value
9738 of 'pc' stored at 'fp' + 0. If the trace function then looks
9739 at location pc - 12 and the top 8 bits are set, then we know
9740 that there is a function name embedded immediately preceding this
9741 location and has length ((pc[-3]) & 0xff000000).
9743 We assume that pc is declared as a pointer to an unsigned long.
9745 It is of no benefit to output the function name if we are assembling
9746 a leaf function. These function types will not contain a stack
9747 backtrace structure, therefore it is not possible to determine the
9750 arm_poke_function_name (FILE *stream, const char *name)
9752 unsigned long alignlength;
9753 unsigned long length;
9756 length = strlen (name) + 1;
9757 alignlength = ROUND_UP_WORD (length);
9759 ASM_OUTPUT_ASCII (stream, name, length);
9760 ASM_OUTPUT_ALIGN (stream, 2);
9761 x = GEN_INT ((unsigned HOST_WIDE_INT) 0xff000000 + alignlength);
9762 assemble_aligned_integer (UNITS_PER_WORD, x);
9765 /* Place some comments into the assembler stream
9766 describing the current function. */
9768 arm_output_function_prologue (FILE *f, HOST_WIDE_INT frame_size)
9770 unsigned long func_type;
9774 thumb_output_function_prologue (f, frame_size);
9779 gcc_assert (!arm_ccfsm_state && !arm_target_insn);
9781 func_type = arm_current_func_type ();
9783 switch ((int) ARM_FUNC_TYPE (func_type))
9788 case ARM_FT_INTERWORKED:
9789 asm_fprintf (f, "\t%@ Function supports interworking.\n");
9792 asm_fprintf (f, "\t%@ Interrupt Service Routine.\n");
9795 asm_fprintf (f, "\t%@ Fast Interrupt Service Routine.\n");
9797 case ARM_FT_EXCEPTION:
9798 asm_fprintf (f, "\t%@ ARM Exception Handler.\n");
9802 if (IS_NAKED (func_type))
9803 asm_fprintf (f, "\t%@ Naked Function: prologue and epilogue provided by programmer.\n");
9805 if (IS_VOLATILE (func_type))
9806 asm_fprintf (f, "\t%@ Volatile: function does not return.\n");
9808 if (IS_NESTED (func_type))
9809 asm_fprintf (f, "\t%@ Nested: function declared inside another function.\n");
9811 asm_fprintf (f, "\t%@ args = %d, pretend = %d, frame = %wd\n",
9812 current_function_args_size,
9813 current_function_pretend_args_size, frame_size);
9815 asm_fprintf (f, "\t%@ frame_needed = %d, uses_anonymous_args = %d\n",
9816 frame_pointer_needed,
9817 cfun->machine->uses_anonymous_args);
9819 if (cfun->machine->lr_save_eliminated)
9820 asm_fprintf (f, "\t%@ link register save eliminated.\n");
9822 if (current_function_calls_eh_return)
9823 asm_fprintf (f, "\t@ Calls __builtin_eh_return.\n");
9825 #ifdef AOF_ASSEMBLER
9827 asm_fprintf (f, "\tmov\t%r, %r\n", IP_REGNUM, PIC_OFFSET_TABLE_REGNUM);
9830 return_used_this_function = 0;
9834 arm_output_epilogue (rtx sibling)
9837 unsigned long saved_regs_mask;
9838 unsigned long func_type;
9839 /* Floats_offset is the offset from the "virtual" frame. In an APCS
9840 frame that is $fp + 4 for a non-variadic function. */
9841 int floats_offset = 0;
9843 FILE * f = asm_out_file;
9844 unsigned int lrm_count = 0;
9845 int really_return = (sibling == NULL);
9847 arm_stack_offsets *offsets;
9849 /* If we have already generated the return instruction
9850 then it is futile to generate anything else. */
9851 if (use_return_insn (FALSE, sibling) && return_used_this_function)
9854 func_type = arm_current_func_type ();
9856 if (IS_NAKED (func_type))
9857 /* Naked functions don't have epilogues. */
9860 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
9864 /* A volatile function should never return. Call abort. */
9865 op = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)" : "abort");
9866 assemble_external_libcall (op);
9867 output_asm_insn ("bl\t%a0", &op);
9872 /* If we are throwing an exception, then we really must be doing a
9873 return, so we can't tail-call. */
9874 gcc_assert (!current_function_calls_eh_return || really_return);
9876 offsets = arm_get_frame_offsets ();
9877 saved_regs_mask = arm_compute_save_reg_mask ();
9880 lrm_count = bit_count (saved_regs_mask);
9882 floats_offset = offsets->saved_args;
9883 /* Compute how far away the floats will be. */
9884 for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
9885 if (saved_regs_mask & (1 << reg))
9888 if (frame_pointer_needed)
9890 /* This variable is for the Virtual Frame Pointer, not VFP regs. */
9891 int vfp_offset = offsets->frame;
9893 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
9895 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
9896 if (regs_ever_live[reg] && !call_used_regs[reg])
9898 floats_offset += 12;
9899 asm_fprintf (f, "\tldfe\t%r, [%r, #-%d]\n",
9900 reg, FP_REGNUM, floats_offset - vfp_offset);
9905 start_reg = LAST_FPA_REGNUM;
9907 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
9909 if (regs_ever_live[reg] && !call_used_regs[reg])
9911 floats_offset += 12;
9913 /* We can't unstack more than four registers at once. */
9914 if (start_reg - reg == 3)
9916 asm_fprintf (f, "\tlfm\t%r, 4, [%r, #-%d]\n",
9917 reg, FP_REGNUM, floats_offset - vfp_offset);
9918 start_reg = reg - 1;
9923 if (reg != start_reg)
9924 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
9925 reg + 1, start_reg - reg,
9926 FP_REGNUM, floats_offset - vfp_offset);
9927 start_reg = reg - 1;
9931 /* Just in case the last register checked also needs unstacking. */
9932 if (reg != start_reg)
9933 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
9934 reg + 1, start_reg - reg,
9935 FP_REGNUM, floats_offset - vfp_offset);
9938 if (TARGET_HARD_FLOAT && TARGET_VFP)
9942 /* The fldmx insn does not have base+offset addressing modes,
9943 so we use IP to hold the address. */
9944 saved_size = arm_get_vfp_saved_size ();
9948 floats_offset += saved_size;
9949 asm_fprintf (f, "\tsub\t%r, %r, #%d\n", IP_REGNUM,
9950 FP_REGNUM, floats_offset - vfp_offset);
9952 start_reg = FIRST_VFP_REGNUM;
9953 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
9955 if ((!regs_ever_live[reg] || call_used_regs[reg])
9956 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
9958 if (start_reg != reg)
9959 arm_output_fldmx (f, IP_REGNUM,
9960 (start_reg - FIRST_VFP_REGNUM) / 2,
9961 (reg - start_reg) / 2);
9962 start_reg = reg + 2;
9965 if (start_reg != reg)
9966 arm_output_fldmx (f, IP_REGNUM,
9967 (start_reg - FIRST_VFP_REGNUM) / 2,
9968 (reg - start_reg) / 2);
9973 /* The frame pointer is guaranteed to be non-double-word aligned.
9974 This is because it is set to (old_stack_pointer - 4) and the
9975 old_stack_pointer was double word aligned. Thus the offset to
9976 the iWMMXt registers to be loaded must also be non-double-word
9977 sized, so that the resultant address *is* double-word aligned.
9978 We can ignore floats_offset since that was already included in
9979 the live_regs_mask. */
9980 lrm_count += (lrm_count % 2 ? 2 : 1);
9982 for (reg = LAST_IWMMXT_REGNUM; reg >= FIRST_IWMMXT_REGNUM; reg--)
9983 if (regs_ever_live[reg] && !call_used_regs[reg])
9985 asm_fprintf (f, "\twldrd\t%r, [%r, #-%d]\n",
9986 reg, FP_REGNUM, lrm_count * 4);
9991 /* saved_regs_mask should contain the IP, which at the time of stack
9992 frame generation actually contains the old stack pointer. So a
9993 quick way to unwind the stack is just pop the IP register directly
9994 into the stack pointer. */
9995 gcc_assert (saved_regs_mask & (1 << IP_REGNUM));
9996 saved_regs_mask &= ~ (1 << IP_REGNUM);
9997 saved_regs_mask |= (1 << SP_REGNUM);
9999 /* There are two registers left in saved_regs_mask - LR and PC. We
10000 only need to restore the LR register (the return address), but to
10001 save time we can load it directly into the PC, unless we need a
10002 special function exit sequence, or we are not really returning. */
10004 && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
10005 && !current_function_calls_eh_return)
10006 /* Delete the LR from the register mask, so that the LR on
10007 the stack is loaded into the PC in the register mask. */
10008 saved_regs_mask &= ~ (1 << LR_REGNUM);
10010 saved_regs_mask &= ~ (1 << PC_REGNUM);
10012 /* We must use SP as the base register, because SP is one of the
10013 registers being restored. If an interrupt or page fault
10014 happens in the ldm instruction, the SP might or might not
10015 have been restored. That would be bad, as then SP will no
10016 longer indicate the safe area of stack, and we can get stack
10017 corruption. Using SP as the base register means that it will
10018 be reset correctly to the original value, should an interrupt
10019 occur. If the stack pointer already points at the right
10020 place, then omit the subtraction. */
10021 if (offsets->outgoing_args != (1 + (int) bit_count (saved_regs_mask))
10022 || current_function_calls_alloca)
10023 asm_fprintf (f, "\tsub\t%r, %r, #%d\n", SP_REGNUM, FP_REGNUM,
10024 4 * bit_count (saved_regs_mask));
10025 print_multi_reg (f, "ldmfd\t%r", SP_REGNUM, saved_regs_mask);
10027 if (IS_INTERRUPT (func_type))
10028 /* Interrupt handlers will have pushed the
10029 IP onto the stack, so restore it now. */
10030 print_multi_reg (f, "ldmfd\t%r!", SP_REGNUM, 1 << IP_REGNUM);
10034 /* Restore stack pointer if necessary. */
10035 if (offsets->outgoing_args != offsets->saved_regs)
10037 operands[0] = operands[1] = stack_pointer_rtx;
10038 operands[2] = GEN_INT (offsets->outgoing_args - offsets->saved_regs);
10039 output_add_immediate (operands);
10042 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
10044 for (reg = FIRST_FPA_REGNUM; reg <= LAST_FPA_REGNUM; reg++)
10045 if (regs_ever_live[reg] && !call_used_regs[reg])
10046 asm_fprintf (f, "\tldfe\t%r, [%r], #12\n",
10051 start_reg = FIRST_FPA_REGNUM;
10053 for (reg = FIRST_FPA_REGNUM; reg <= LAST_FPA_REGNUM; reg++)
10055 if (regs_ever_live[reg] && !call_used_regs[reg])
10057 if (reg - start_reg == 3)
10059 asm_fprintf (f, "\tlfmfd\t%r, 4, [%r]!\n",
10060 start_reg, SP_REGNUM);
10061 start_reg = reg + 1;
10066 if (reg != start_reg)
10067 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
10068 start_reg, reg - start_reg,
10071 start_reg = reg + 1;
10075 /* Just in case the last register checked also needs unstacking. */
10076 if (reg != start_reg)
10077 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
10078 start_reg, reg - start_reg, SP_REGNUM);
10081 if (TARGET_HARD_FLOAT && TARGET_VFP)
10083 start_reg = FIRST_VFP_REGNUM;
10084 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
10086 if ((!regs_ever_live[reg] || call_used_regs[reg])
10087 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
10089 if (start_reg != reg)
10090 arm_output_fldmx (f, SP_REGNUM,
10091 (start_reg - FIRST_VFP_REGNUM) / 2,
10092 (reg - start_reg) / 2);
10093 start_reg = reg + 2;
10096 if (start_reg != reg)
10097 arm_output_fldmx (f, SP_REGNUM,
10098 (start_reg - FIRST_VFP_REGNUM) / 2,
10099 (reg - start_reg) / 2);
10102 for (reg = FIRST_IWMMXT_REGNUM; reg <= LAST_IWMMXT_REGNUM; reg++)
10103 if (regs_ever_live[reg] && !call_used_regs[reg])
10104 asm_fprintf (f, "\twldrd\t%r, [%r], #8\n", reg, SP_REGNUM);
10106 /* If we can, restore the LR into the PC. */
10107 if (ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
10109 && current_function_pretend_args_size == 0
10110 && saved_regs_mask & (1 << LR_REGNUM)
10111 && !current_function_calls_eh_return)
10113 saved_regs_mask &= ~ (1 << LR_REGNUM);
10114 saved_regs_mask |= (1 << PC_REGNUM);
10117 /* Load the registers off the stack. If we only have one register
10118 to load use the LDR instruction - it is faster. */
10119 if (saved_regs_mask == (1 << LR_REGNUM))
10121 asm_fprintf (f, "\tldr\t%r, [%r], #4\n", LR_REGNUM, SP_REGNUM);
10123 else if (saved_regs_mask)
10125 if (saved_regs_mask & (1 << SP_REGNUM))
10126 /* Note - write back to the stack register is not enabled
10127 (i.e. "ldmfd sp!..."). We know that the stack pointer is
10128 in the list of registers and if we add writeback the
10129 instruction becomes UNPREDICTABLE. */
10130 print_multi_reg (f, "ldmfd\t%r", SP_REGNUM, saved_regs_mask);
10132 print_multi_reg (f, "ldmfd\t%r!", SP_REGNUM, saved_regs_mask);
10135 if (current_function_pretend_args_size)
10137 /* Unwind the pre-pushed regs. */
10138 operands[0] = operands[1] = stack_pointer_rtx;
10139 operands[2] = GEN_INT (current_function_pretend_args_size);
10140 output_add_immediate (operands);
10144 /* We may have already restored PC directly from the stack. */
10145 if (!really_return || saved_regs_mask & (1 << PC_REGNUM))
10148 /* Stack adjustment for exception handler. */
10149 if (current_function_calls_eh_return)
10150 asm_fprintf (f, "\tadd\t%r, %r, %r\n", SP_REGNUM, SP_REGNUM,
10151 ARM_EH_STACKADJ_REGNUM);
10153 /* Generate the return instruction. */
10154 switch ((int) ARM_FUNC_TYPE (func_type))
10158 asm_fprintf (f, "\tsubs\t%r, %r, #4\n", PC_REGNUM, LR_REGNUM);
10161 case ARM_FT_EXCEPTION:
10162 asm_fprintf (f, "\tmovs\t%r, %r\n", PC_REGNUM, LR_REGNUM);
10165 case ARM_FT_INTERWORKED:
10166 asm_fprintf (f, "\tbx\t%r\n", LR_REGNUM);
10170 if (arm_arch5 || arm_arch4t)
10171 asm_fprintf (f, "\tbx\t%r\n", LR_REGNUM);
10173 asm_fprintf (f, "\tmov\t%r, %r\n", PC_REGNUM, LR_REGNUM);
10181 arm_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
10182 HOST_WIDE_INT frame_size ATTRIBUTE_UNUSED)
10184 arm_stack_offsets *offsets;
10190 /* Emit any call-via-reg trampolines that are needed for v4t support
10191 of call_reg and call_value_reg type insns. */
10192 for (regno = 0; regno < LR_REGNUM; regno++)
10194 rtx label = cfun->machine->call_via[regno];
10198 switch_to_section (function_section (current_function_decl));
10199 targetm.asm_out.internal_label (asm_out_file, "L",
10200 CODE_LABEL_NUMBER (label));
10201 asm_fprintf (asm_out_file, "\tbx\t%r\n", regno);
10205 /* ??? Probably not safe to set this here, since it assumes that a
10206 function will be emitted as assembly immediately after we generate
10207 RTL for it. This does not happen for inline functions. */
10208 return_used_this_function = 0;
10212 /* We need to take into account any stack-frame rounding. */
10213 offsets = arm_get_frame_offsets ();
10215 gcc_assert (!use_return_insn (FALSE, NULL)
10216 || !return_used_this_function
10217 || offsets->saved_regs == offsets->outgoing_args
10218 || frame_pointer_needed);
10220 /* Reset the ARM-specific per-function variables. */
10221 after_arm_reorg = 0;
10225 /* Generate and emit an insn that we will recognize as a push_multi.
10226 Unfortunately, since this insn does not reflect very well the actual
10227 semantics of the operation, we need to annotate the insn for the benefit
10228 of DWARF2 frame unwind information. */
10230 emit_multi_reg_push (unsigned long mask)
10233 int num_dwarf_regs;
10237 int dwarf_par_index;
10240 for (i = 0; i <= LAST_ARM_REGNUM; i++)
10241 if (mask & (1 << i))
10244 gcc_assert (num_regs && num_regs <= 16);
10246 /* We don't record the PC in the dwarf frame information. */
10247 num_dwarf_regs = num_regs;
10248 if (mask & (1 << PC_REGNUM))
10251 /* For the body of the insn we are going to generate an UNSPEC in
10252 parallel with several USEs. This allows the insn to be recognized
10253 by the push_multi pattern in the arm.md file. The insn looks
10254 something like this:
10257 (set (mem:BLK (pre_dec:BLK (reg:SI sp)))
10258 (unspec:BLK [(reg:SI r4)] UNSPEC_PUSH_MULT))
10259 (use (reg:SI 11 fp))
10260 (use (reg:SI 12 ip))
10261 (use (reg:SI 14 lr))
10262 (use (reg:SI 15 pc))
10265 For the frame note however, we try to be more explicit and actually
10266 show each register being stored into the stack frame, plus a (single)
10267 decrement of the stack pointer. We do it this way in order to be
10268 friendly to the stack unwinding code, which only wants to see a single
10269 stack decrement per instruction. The RTL we generate for the note looks
10270 something like this:
10273 (set (reg:SI sp) (plus:SI (reg:SI sp) (const_int -20)))
10274 (set (mem:SI (reg:SI sp)) (reg:SI r4))
10275 (set (mem:SI (plus:SI (reg:SI sp) (const_int 4))) (reg:SI fp))
10276 (set (mem:SI (plus:SI (reg:SI sp) (const_int 8))) (reg:SI ip))
10277 (set (mem:SI (plus:SI (reg:SI sp) (const_int 12))) (reg:SI lr))
10280 This sequence is used both by the code to support stack unwinding for
10281 exceptions handlers and the code to generate dwarf2 frame debugging. */
10283 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (num_regs));
10284 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (num_dwarf_regs + 1));
10285 dwarf_par_index = 1;
10287 for (i = 0; i <= LAST_ARM_REGNUM; i++)
10289 if (mask & (1 << i))
10291 reg = gen_rtx_REG (SImode, i);
10293 XVECEXP (par, 0, 0)
10294 = gen_rtx_SET (VOIDmode,
10295 gen_frame_mem (BLKmode,
10296 gen_rtx_PRE_DEC (BLKmode,
10297 stack_pointer_rtx)),
10298 gen_rtx_UNSPEC (BLKmode,
10299 gen_rtvec (1, reg),
10300 UNSPEC_PUSH_MULT));
10302 if (i != PC_REGNUM)
10304 tmp = gen_rtx_SET (VOIDmode,
10305 gen_frame_mem (SImode, stack_pointer_rtx),
10307 RTX_FRAME_RELATED_P (tmp) = 1;
10308 XVECEXP (dwarf, 0, dwarf_par_index) = tmp;
10316 for (j = 1, i++; j < num_regs; i++)
10318 if (mask & (1 << i))
10320 reg = gen_rtx_REG (SImode, i);
10322 XVECEXP (par, 0, j) = gen_rtx_USE (VOIDmode, reg);
10324 if (i != PC_REGNUM)
10327 = gen_rtx_SET (VOIDmode,
10328 gen_frame_mem (SImode,
10329 plus_constant (stack_pointer_rtx,
10332 RTX_FRAME_RELATED_P (tmp) = 1;
10333 XVECEXP (dwarf, 0, dwarf_par_index++) = tmp;
10340 par = emit_insn (par);
10342 tmp = gen_rtx_SET (VOIDmode,
10344 plus_constant (stack_pointer_rtx, -4 * num_regs));
10345 RTX_FRAME_RELATED_P (tmp) = 1;
10346 XVECEXP (dwarf, 0, 0) = tmp;
10348 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
10353 /* Calculate the size of the return value that is passed in registers. */
10355 arm_size_return_regs (void)
10357 enum machine_mode mode;
10359 if (current_function_return_rtx != 0)
10360 mode = GET_MODE (current_function_return_rtx);
10362 mode = DECL_MODE (DECL_RESULT (current_function_decl));
10364 return GET_MODE_SIZE (mode);
10368 emit_sfm (int base_reg, int count)
10375 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
10376 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (count + 1));
10378 reg = gen_rtx_REG (XFmode, base_reg++);
10380 XVECEXP (par, 0, 0)
10381 = gen_rtx_SET (VOIDmode,
10382 gen_frame_mem (BLKmode,
10383 gen_rtx_PRE_DEC (BLKmode,
10384 stack_pointer_rtx)),
10385 gen_rtx_UNSPEC (BLKmode,
10386 gen_rtvec (1, reg),
10387 UNSPEC_PUSH_MULT));
10388 tmp = gen_rtx_SET (VOIDmode,
10389 gen_frame_mem (XFmode, stack_pointer_rtx), reg);
10390 RTX_FRAME_RELATED_P (tmp) = 1;
10391 XVECEXP (dwarf, 0, 1) = tmp;
10393 for (i = 1; i < count; i++)
10395 reg = gen_rtx_REG (XFmode, base_reg++);
10396 XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
10398 tmp = gen_rtx_SET (VOIDmode,
10399 gen_frame_mem (XFmode,
10400 plus_constant (stack_pointer_rtx,
10403 RTX_FRAME_RELATED_P (tmp) = 1;
10404 XVECEXP (dwarf, 0, i + 1) = tmp;
10407 tmp = gen_rtx_SET (VOIDmode,
10409 plus_constant (stack_pointer_rtx, -12 * count));
10411 RTX_FRAME_RELATED_P (tmp) = 1;
10412 XVECEXP (dwarf, 0, 0) = tmp;
10414 par = emit_insn (par);
10415 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
10421 /* Return true if the current function needs to save/restore LR. */
10424 thumb_force_lr_save (void)
10426 return !cfun->machine->lr_save_eliminated
10427 && (!leaf_function_p ()
10428 || thumb_far_jump_used_p ()
10429 || regs_ever_live [LR_REGNUM]);
10433 /* Compute the distance from register FROM to register TO.
10434 These can be the arg pointer (26), the soft frame pointer (25),
10435 the stack pointer (13) or the hard frame pointer (11).
10436 In thumb mode r7 is used as the soft frame pointer, if needed.
10437 Typical stack layout looks like this:
10439 old stack pointer -> | |
10442 | | saved arguments for
10443 | | vararg functions
10446 hard FP & arg pointer -> | | \
10454 soft frame pointer -> | | /
10459 locals base pointer -> | | /
10464 current stack pointer -> | | /
10467 For a given function some or all of these stack components
10468 may not be needed, giving rise to the possibility of
10469 eliminating some of the registers.
10471 The values returned by this function must reflect the behavior
10472 of arm_expand_prologue() and arm_compute_save_reg_mask().
10474 The sign of the number returned reflects the direction of stack
10475 growth, so the values are positive for all eliminations except
10476 from the soft frame pointer to the hard frame pointer.
10478 SFP may point just inside the local variables block to ensure correct
10482 /* Calculate stack offsets. These are used to calculate register elimination
10483 offsets and in prologue/epilogue code. */
10485 static arm_stack_offsets *
10486 arm_get_frame_offsets (void)
10488 struct arm_stack_offsets *offsets;
10489 unsigned long func_type;
10492 HOST_WIDE_INT frame_size;
10494 offsets = &cfun->machine->stack_offsets;
10496 /* We need to know if we are a leaf function. Unfortunately, it
10497 is possible to be called after start_sequence has been called,
10498 which causes get_insns to return the insns for the sequence,
10499 not the function, which will cause leaf_function_p to return
10500 the incorrect result.
10502 to know about leaf functions once reload has completed, and the
10503 frame size cannot be changed after that time, so we can safely
10504 use the cached value. */
10506 if (reload_completed)
10509 /* Initially this is the size of the local variables. It will translated
10510 into an offset once we have determined the size of preceding data. */
10511 frame_size = ROUND_UP_WORD (get_frame_size ());
10513 leaf = leaf_function_p ();
10515 /* Space for variadic functions. */
10516 offsets->saved_args = current_function_pretend_args_size;
10518 offsets->frame = offsets->saved_args + (frame_pointer_needed ? 4 : 0);
10522 unsigned int regno;
10524 saved = bit_count (arm_compute_save_reg_mask ()) * 4;
10526 /* We know that SP will be doubleword aligned on entry, and we must
10527 preserve that condition at any subroutine call. We also require the
10528 soft frame pointer to be doubleword aligned. */
10530 if (TARGET_REALLY_IWMMXT)
10532 /* Check for the call-saved iWMMXt registers. */
10533 for (regno = FIRST_IWMMXT_REGNUM;
10534 regno <= LAST_IWMMXT_REGNUM;
10536 if (regs_ever_live [regno] && ! call_used_regs [regno])
10540 func_type = arm_current_func_type ();
10541 if (! IS_VOLATILE (func_type))
10543 /* Space for saved FPA registers. */
10544 for (regno = FIRST_FPA_REGNUM; regno <= LAST_FPA_REGNUM; regno++)
10545 if (regs_ever_live[regno] && ! call_used_regs[regno])
10548 /* Space for saved VFP registers. */
10549 if (TARGET_HARD_FLOAT && TARGET_VFP)
10550 saved += arm_get_vfp_saved_size ();
10553 else /* TARGET_THUMB */
10555 saved = bit_count (thumb_compute_save_reg_mask ()) * 4;
10556 if (TARGET_BACKTRACE)
10560 /* Saved registers include the stack frame. */
10561 offsets->saved_regs = offsets->saved_args + saved;
10562 offsets->soft_frame = offsets->saved_regs + CALLER_INTERWORKING_SLOT_SIZE;
10563 /* A leaf function does not need any stack alignment if it has nothing
10565 if (leaf && frame_size == 0)
10567 offsets->outgoing_args = offsets->soft_frame;
10568 offsets->locals_base = offsets->soft_frame;
10572 /* Ensure SFP has the correct alignment. */
10573 if (ARM_DOUBLEWORD_ALIGN
10574 && (offsets->soft_frame & 7))
10575 offsets->soft_frame += 4;
10577 offsets->locals_base = offsets->soft_frame + frame_size;
10578 offsets->outgoing_args = (offsets->locals_base
10579 + current_function_outgoing_args_size);
10581 if (ARM_DOUBLEWORD_ALIGN)
10583 /* Ensure SP remains doubleword aligned. */
10584 if (offsets->outgoing_args & 7)
10585 offsets->outgoing_args += 4;
10586 gcc_assert (!(offsets->outgoing_args & 7));
10593 /* Calculate the relative offsets for the different stack pointers. Positive
10594 offsets are in the direction of stack growth. */
10597 arm_compute_initial_elimination_offset (unsigned int from, unsigned int to)
10599 arm_stack_offsets *offsets;
10601 offsets = arm_get_frame_offsets ();
10603 /* OK, now we have enough information to compute the distances.
10604 There must be an entry in these switch tables for each pair
10605 of registers in ELIMINABLE_REGS, even if some of the entries
10606 seem to be redundant or useless. */
10609 case ARG_POINTER_REGNUM:
10612 case THUMB_HARD_FRAME_POINTER_REGNUM:
10615 case FRAME_POINTER_REGNUM:
10616 /* This is the reverse of the soft frame pointer
10617 to hard frame pointer elimination below. */
10618 return offsets->soft_frame - offsets->saved_args;
10620 case ARM_HARD_FRAME_POINTER_REGNUM:
10621 /* If there is no stack frame then the hard
10622 frame pointer and the arg pointer coincide. */
10623 if (offsets->frame == offsets->saved_regs)
10625 /* FIXME: Not sure about this. Maybe we should always return 0 ? */
10626 return (frame_pointer_needed
10627 && cfun->static_chain_decl != NULL
10628 && ! cfun->machine->uses_anonymous_args) ? 4 : 0;
10630 case STACK_POINTER_REGNUM:
10631 /* If nothing has been pushed on the stack at all
10632 then this will return -4. This *is* correct! */
10633 return offsets->outgoing_args - (offsets->saved_args + 4);
10636 gcc_unreachable ();
10638 gcc_unreachable ();
10640 case FRAME_POINTER_REGNUM:
10643 case THUMB_HARD_FRAME_POINTER_REGNUM:
10646 case ARM_HARD_FRAME_POINTER_REGNUM:
10647 /* The hard frame pointer points to the top entry in the
10648 stack frame. The soft frame pointer to the bottom entry
10649 in the stack frame. If there is no stack frame at all,
10650 then they are identical. */
10652 return offsets->frame - offsets->soft_frame;
10654 case STACK_POINTER_REGNUM:
10655 return offsets->outgoing_args - offsets->soft_frame;
10658 gcc_unreachable ();
10660 gcc_unreachable ();
10663 /* You cannot eliminate from the stack pointer.
10664 In theory you could eliminate from the hard frame
10665 pointer to the stack pointer, but this will never
10666 happen, since if a stack frame is not needed the
10667 hard frame pointer will never be used. */
10668 gcc_unreachable ();
10673 /* Generate the prologue instructions for entry into an ARM function. */
10675 arm_expand_prologue (void)
10681 unsigned long live_regs_mask;
10682 unsigned long func_type;
10684 int saved_pretend_args = 0;
10685 int saved_regs = 0;
10686 unsigned HOST_WIDE_INT args_to_push;
10687 arm_stack_offsets *offsets;
10689 func_type = arm_current_func_type ();
10691 /* Naked functions don't have prologues. */
10692 if (IS_NAKED (func_type))
10695 /* Make a copy of c_f_p_a_s as we may need to modify it locally. */
10696 args_to_push = current_function_pretend_args_size;
10698 /* Compute which register we will have to save onto the stack. */
10699 live_regs_mask = arm_compute_save_reg_mask ();
10701 ip_rtx = gen_rtx_REG (SImode, IP_REGNUM);
10703 if (frame_pointer_needed)
10705 if (IS_INTERRUPT (func_type))
10707 /* Interrupt functions must not corrupt any registers.
10708 Creating a frame pointer however, corrupts the IP
10709 register, so we must push it first. */
10710 insn = emit_multi_reg_push (1 << IP_REGNUM);
10712 /* Do not set RTX_FRAME_RELATED_P on this insn.
10713 The dwarf stack unwinding code only wants to see one
10714 stack decrement per function, and this is not it. If
10715 this instruction is labeled as being part of the frame
10716 creation sequence then dwarf2out_frame_debug_expr will
10717 die when it encounters the assignment of IP to FP
10718 later on, since the use of SP here establishes SP as
10719 the CFA register and not IP.
10721 Anyway this instruction is not really part of the stack
10722 frame creation although it is part of the prologue. */
10724 else if (IS_NESTED (func_type))
10726 /* The Static chain register is the same as the IP register
10727 used as a scratch register during stack frame creation.
10728 To get around this need to find somewhere to store IP
10729 whilst the frame is being created. We try the following
10732 1. The last argument register.
10733 2. A slot on the stack above the frame. (This only
10734 works if the function is not a varargs function).
10735 3. Register r3, after pushing the argument registers
10738 Note - we only need to tell the dwarf2 backend about the SP
10739 adjustment in the second variant; the static chain register
10740 doesn't need to be unwound, as it doesn't contain a value
10741 inherited from the caller. */
10743 if (regs_ever_live[3] == 0)
10744 insn = emit_set_insn (gen_rtx_REG (SImode, 3), ip_rtx);
10745 else if (args_to_push == 0)
10749 insn = gen_rtx_PRE_DEC (SImode, stack_pointer_rtx);
10750 insn = emit_set_insn (gen_frame_mem (SImode, insn), ip_rtx);
10753 /* Just tell the dwarf backend that we adjusted SP. */
10754 dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
10755 plus_constant (stack_pointer_rtx,
10757 RTX_FRAME_RELATED_P (insn) = 1;
10758 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
10759 dwarf, REG_NOTES (insn));
10763 /* Store the args on the stack. */
10764 if (cfun->machine->uses_anonymous_args)
10765 insn = emit_multi_reg_push
10766 ((0xf0 >> (args_to_push / 4)) & 0xf);
10769 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10770 GEN_INT (- args_to_push)));
10772 RTX_FRAME_RELATED_P (insn) = 1;
10774 saved_pretend_args = 1;
10775 fp_offset = args_to_push;
10778 /* Now reuse r3 to preserve IP. */
10779 emit_set_insn (gen_rtx_REG (SImode, 3), ip_rtx);
10783 insn = emit_set_insn (ip_rtx,
10784 plus_constant (stack_pointer_rtx, fp_offset));
10785 RTX_FRAME_RELATED_P (insn) = 1;
10790 /* Push the argument registers, or reserve space for them. */
10791 if (cfun->machine->uses_anonymous_args)
10792 insn = emit_multi_reg_push
10793 ((0xf0 >> (args_to_push / 4)) & 0xf);
10796 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10797 GEN_INT (- args_to_push)));
10798 RTX_FRAME_RELATED_P (insn) = 1;
10801 /* If this is an interrupt service routine, and the link register
10802 is going to be pushed, and we are not creating a stack frame,
10803 (which would involve an extra push of IP and a pop in the epilogue)
10804 subtracting four from LR now will mean that the function return
10805 can be done with a single instruction. */
10806 if ((func_type == ARM_FT_ISR || func_type == ARM_FT_FIQ)
10807 && (live_regs_mask & (1 << LR_REGNUM)) != 0
10808 && ! frame_pointer_needed)
10810 rtx lr = gen_rtx_REG (SImode, LR_REGNUM);
10812 emit_set_insn (lr, plus_constant (lr, -4));
10815 if (live_regs_mask)
10817 insn = emit_multi_reg_push (live_regs_mask);
10818 saved_regs += bit_count (live_regs_mask) * 4;
10819 RTX_FRAME_RELATED_P (insn) = 1;
10823 for (reg = LAST_IWMMXT_REGNUM; reg >= FIRST_IWMMXT_REGNUM; reg--)
10824 if (regs_ever_live[reg] && ! call_used_regs [reg])
10826 insn = gen_rtx_PRE_DEC (V2SImode, stack_pointer_rtx);
10827 insn = gen_frame_mem (V2SImode, insn);
10828 insn = emit_set_insn (insn, gen_rtx_REG (V2SImode, reg));
10829 RTX_FRAME_RELATED_P (insn) = 1;
10833 if (! IS_VOLATILE (func_type))
10837 /* Save any floating point call-saved registers used by this
10839 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
10841 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
10842 if (regs_ever_live[reg] && !call_used_regs[reg])
10844 insn = gen_rtx_PRE_DEC (XFmode, stack_pointer_rtx);
10845 insn = gen_frame_mem (XFmode, insn);
10846 insn = emit_set_insn (insn, gen_rtx_REG (XFmode, reg));
10847 RTX_FRAME_RELATED_P (insn) = 1;
10853 start_reg = LAST_FPA_REGNUM;
10855 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
10857 if (regs_ever_live[reg] && !call_used_regs[reg])
10859 if (start_reg - reg == 3)
10861 insn = emit_sfm (reg, 4);
10862 RTX_FRAME_RELATED_P (insn) = 1;
10864 start_reg = reg - 1;
10869 if (start_reg != reg)
10871 insn = emit_sfm (reg + 1, start_reg - reg);
10872 RTX_FRAME_RELATED_P (insn) = 1;
10873 saved_regs += (start_reg - reg) * 12;
10875 start_reg = reg - 1;
10879 if (start_reg != reg)
10881 insn = emit_sfm (reg + 1, start_reg - reg);
10882 saved_regs += (start_reg - reg) * 12;
10883 RTX_FRAME_RELATED_P (insn) = 1;
10886 if (TARGET_HARD_FLOAT && TARGET_VFP)
10888 start_reg = FIRST_VFP_REGNUM;
10890 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
10892 if ((!regs_ever_live[reg] || call_used_regs[reg])
10893 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
10895 if (start_reg != reg)
10896 saved_regs += vfp_emit_fstmx (start_reg,
10897 (reg - start_reg) / 2);
10898 start_reg = reg + 2;
10901 if (start_reg != reg)
10902 saved_regs += vfp_emit_fstmx (start_reg,
10903 (reg - start_reg) / 2);
10907 if (frame_pointer_needed)
10909 /* Create the new frame pointer. */
10910 insn = GEN_INT (-(4 + args_to_push + fp_offset));
10911 insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx, ip_rtx, insn));
10912 RTX_FRAME_RELATED_P (insn) = 1;
10914 if (IS_NESTED (func_type))
10916 /* Recover the static chain register. */
10917 if (regs_ever_live [3] == 0
10918 || saved_pretend_args)
10919 insn = gen_rtx_REG (SImode, 3);
10920 else /* if (current_function_pretend_args_size == 0) */
10922 insn = plus_constant (hard_frame_pointer_rtx, 4);
10923 insn = gen_frame_mem (SImode, insn);
10926 emit_set_insn (ip_rtx, insn);
10927 /* Add a USE to stop propagate_one_insn() from barfing. */
10928 emit_insn (gen_prologue_use (ip_rtx));
10932 offsets = arm_get_frame_offsets ();
10933 if (offsets->outgoing_args != offsets->saved_args + saved_regs)
10935 /* This add can produce multiple insns for a large constant, so we
10936 need to get tricky. */
10937 rtx last = get_last_insn ();
10939 amount = GEN_INT (offsets->saved_args + saved_regs
10940 - offsets->outgoing_args);
10942 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10946 last = last ? NEXT_INSN (last) : get_insns ();
10947 RTX_FRAME_RELATED_P (last) = 1;
10949 while (last != insn);
10951 /* If the frame pointer is needed, emit a special barrier that
10952 will prevent the scheduler from moving stores to the frame
10953 before the stack adjustment. */
10954 if (frame_pointer_needed)
10955 insn = emit_insn (gen_stack_tie (stack_pointer_rtx,
10956 hard_frame_pointer_rtx));
10960 if (flag_pic && arm_pic_register != INVALID_REGNUM)
10961 arm_load_pic_register (0UL);
10963 /* If we are profiling, make sure no instructions are scheduled before
10964 the call to mcount. Similarly if the user has requested no
10965 scheduling in the prolog. Similarly if we want non-call exceptions
10966 using the EABI unwinder, to prevent faulting instructions from being
10967 swapped with a stack adjustment. */
10968 if (current_function_profile || !TARGET_SCHED_PROLOG
10969 || (ARM_EABI_UNWIND_TABLES && flag_non_call_exceptions))
10970 emit_insn (gen_blockage ());
10972 /* If the link register is being kept alive, with the return address in it,
10973 then make sure that it does not get reused by the ce2 pass. */
10974 if ((live_regs_mask & (1 << LR_REGNUM)) == 0)
10976 emit_insn (gen_prologue_use (gen_rtx_REG (SImode, LR_REGNUM)));
10977 cfun->machine->lr_save_eliminated = 1;
10981 /* If CODE is 'd', then the X is a condition operand and the instruction
10982 should only be executed if the condition is true.
10983 if CODE is 'D', then the X is a condition operand and the instruction
10984 should only be executed if the condition is false: however, if the mode
10985 of the comparison is CCFPEmode, then always execute the instruction -- we
10986 do this because in these circumstances !GE does not necessarily imply LT;
10987 in these cases the instruction pattern will take care to make sure that
10988 an instruction containing %d will follow, thereby undoing the effects of
10989 doing this instruction unconditionally.
10990 If CODE is 'N' then X is a floating point operand that must be negated
10992 If CODE is 'B' then output a bitwise inverted value of X (a const int).
10993 If X is a REG and CODE is `M', output a ldm/stm style multi-reg. */
10995 arm_print_operand (FILE *stream, rtx x, int code)
11000 fputs (ASM_COMMENT_START, stream);
11004 fputs (user_label_prefix, stream);
11008 fputs (REGISTER_PREFIX, stream);
11012 if (arm_ccfsm_state == 3 || arm_ccfsm_state == 4)
11016 output_operand_lossage ("predicated Thumb instruction");
11019 if (current_insn_predicate != NULL)
11021 output_operand_lossage
11022 ("predicated instruction in conditional sequence");
11026 fputs (arm_condition_codes[arm_current_cc], stream);
11028 else if (current_insn_predicate)
11030 enum arm_cond_code code;
11034 output_operand_lossage ("predicated Thumb instruction");
11038 code = get_arm_condition_code (current_insn_predicate);
11039 fputs (arm_condition_codes[code], stream);
11046 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
11047 r = REAL_VALUE_NEGATE (r);
11048 fprintf (stream, "%s", fp_const_from_val (&r));
11053 if (GET_CODE (x) == CONST_INT)
11056 val = ARM_SIGN_EXTEND (~INTVAL (x));
11057 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, val);
11061 putc ('~', stream);
11062 output_addr_const (stream, x);
11067 fprintf (stream, "%s", arithmetic_instr (x, 1));
11070 /* Truncate Cirrus shift counts. */
11072 if (GET_CODE (x) == CONST_INT)
11074 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0x3f);
11077 arm_print_operand (stream, x, 0);
11081 fprintf (stream, "%s", arithmetic_instr (x, 0));
11089 if (!shift_operator (x, SImode))
11091 output_operand_lossage ("invalid shift operand");
11095 shift = shift_op (x, &val);
11099 fprintf (stream, ", %s ", shift);
11101 arm_print_operand (stream, XEXP (x, 1), 0);
11103 fprintf (stream, "#" HOST_WIDE_INT_PRINT_DEC, val);
11108 /* An explanation of the 'Q', 'R' and 'H' register operands:
11110 In a pair of registers containing a DI or DF value the 'Q'
11111 operand returns the register number of the register containing
11112 the least significant part of the value. The 'R' operand returns
11113 the register number of the register containing the most
11114 significant part of the value.
11116 The 'H' operand returns the higher of the two register numbers.
11117 On a run where WORDS_BIG_ENDIAN is true the 'H' operand is the
11118 same as the 'Q' operand, since the most significant part of the
11119 value is held in the lower number register. The reverse is true
11120 on systems where WORDS_BIG_ENDIAN is false.
11122 The purpose of these operands is to distinguish between cases
11123 where the endian-ness of the values is important (for example
11124 when they are added together), and cases where the endian-ness
11125 is irrelevant, but the order of register operations is important.
11126 For example when loading a value from memory into a register
11127 pair, the endian-ness does not matter. Provided that the value
11128 from the lower memory address is put into the lower numbered
11129 register, and the value from the higher address is put into the
11130 higher numbered register, the load will work regardless of whether
11131 the value being loaded is big-wordian or little-wordian. The
11132 order of the two register loads can matter however, if the address
11133 of the memory location is actually held in one of the registers
11134 being overwritten by the load. */
11136 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
11138 output_operand_lossage ("invalid operand for code '%c'", code);
11142 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 1 : 0));
11146 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
11148 output_operand_lossage ("invalid operand for code '%c'", code);
11152 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 0 : 1));
11156 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
11158 output_operand_lossage ("invalid operand for code '%c'", code);
11162 asm_fprintf (stream, "%r", REGNO (x) + 1);
11166 asm_fprintf (stream, "%r",
11167 GET_CODE (XEXP (x, 0)) == REG
11168 ? REGNO (XEXP (x, 0)) : REGNO (XEXP (XEXP (x, 0), 0)));
11172 asm_fprintf (stream, "{%r-%r}",
11174 REGNO (x) + ARM_NUM_REGS (GET_MODE (x)) - 1);
11178 /* CONST_TRUE_RTX means always -- that's the default. */
11179 if (x == const_true_rtx)
11182 if (!COMPARISON_P (x))
11184 output_operand_lossage ("invalid operand for code '%c'", code);
11188 fputs (arm_condition_codes[get_arm_condition_code (x)],
11193 /* CONST_TRUE_RTX means not always -- i.e. never. We shouldn't ever
11194 want to do that. */
11195 if (x == const_true_rtx)
11197 output_operand_lossage ("instruction never exectued");
11200 if (!COMPARISON_P (x))
11202 output_operand_lossage ("invalid operand for code '%c'", code);
11206 fputs (arm_condition_codes[ARM_INVERSE_CONDITION_CODE
11207 (get_arm_condition_code (x))],
11211 /* Cirrus registers can be accessed in a variety of ways:
11212 single floating point (f)
11213 double floating point (d)
11215 64bit integer (dx). */
11216 case 'W': /* Cirrus register in F mode. */
11217 case 'X': /* Cirrus register in D mode. */
11218 case 'Y': /* Cirrus register in FX mode. */
11219 case 'Z': /* Cirrus register in DX mode. */
11220 gcc_assert (GET_CODE (x) == REG
11221 && REGNO_REG_CLASS (REGNO (x)) == CIRRUS_REGS);
11223 fprintf (stream, "mv%s%s",
11225 : code == 'X' ? "d"
11226 : code == 'Y' ? "fx" : "dx", reg_names[REGNO (x)] + 2);
11230 /* Print cirrus register in the mode specified by the register's mode. */
11233 int mode = GET_MODE (x);
11235 if (GET_CODE (x) != REG || REGNO_REG_CLASS (REGNO (x)) != CIRRUS_REGS)
11237 output_operand_lossage ("invalid operand for code '%c'", code);
11241 fprintf (stream, "mv%s%s",
11242 mode == DFmode ? "d"
11243 : mode == SImode ? "fx"
11244 : mode == DImode ? "dx"
11245 : "f", reg_names[REGNO (x)] + 2);
11251 if (GET_CODE (x) != REG
11252 || REGNO (x) < FIRST_IWMMXT_GR_REGNUM
11253 || REGNO (x) > LAST_IWMMXT_GR_REGNUM)
11254 /* Bad value for wCG register number. */
11256 output_operand_lossage ("invalid operand for code '%c'", code);
11261 fprintf (stream, "%d", REGNO (x) - FIRST_IWMMXT_GR_REGNUM);
11264 /* Print an iWMMXt control register name. */
11266 if (GET_CODE (x) != CONST_INT
11268 || INTVAL (x) >= 16)
11269 /* Bad value for wC register number. */
11271 output_operand_lossage ("invalid operand for code '%c'", code);
11277 static const char * wc_reg_names [16] =
11279 "wCID", "wCon", "wCSSF", "wCASF",
11280 "wC4", "wC5", "wC6", "wC7",
11281 "wCGR0", "wCGR1", "wCGR2", "wCGR3",
11282 "wC12", "wC13", "wC14", "wC15"
11285 fprintf (stream, wc_reg_names [INTVAL (x)]);
11289 /* Print a VFP double precision register name. */
11292 int mode = GET_MODE (x);
11295 if (mode != DImode && mode != DFmode)
11297 output_operand_lossage ("invalid operand for code '%c'", code);
11301 if (GET_CODE (x) != REG
11302 || !IS_VFP_REGNUM (REGNO (x)))
11304 output_operand_lossage ("invalid operand for code '%c'", code);
11308 num = REGNO(x) - FIRST_VFP_REGNUM;
11311 output_operand_lossage ("invalid operand for code '%c'", code);
11315 fprintf (stream, "d%d", num >> 1);
11322 output_operand_lossage ("missing operand");
11326 switch (GET_CODE (x))
11329 asm_fprintf (stream, "%r", REGNO (x));
11333 output_memory_reference_mode = GET_MODE (x);
11334 output_address (XEXP (x, 0));
11338 fprintf (stream, "#%s", fp_immediate_constant (x));
11342 gcc_assert (GET_CODE (x) != NEG);
11343 fputc ('#', stream);
11344 output_addr_const (stream, x);
11350 #ifndef AOF_ASSEMBLER
11351 /* Target hook for assembling integer objects. The ARM version needs to
11352 handle word-sized values specially. */
11354 arm_assemble_integer (rtx x, unsigned int size, int aligned_p)
11356 if (size == UNITS_PER_WORD && aligned_p)
11358 fputs ("\t.word\t", asm_out_file);
11359 output_addr_const (asm_out_file, x);
11361 /* Mark symbols as position independent. We only do this in the
11362 .text segment, not in the .data segment. */
11363 if (NEED_GOT_RELOC && flag_pic && making_const_table &&
11364 (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF))
11366 if (GET_CODE (x) == SYMBOL_REF
11367 && (CONSTANT_POOL_ADDRESS_P (x)
11368 || SYMBOL_REF_LOCAL_P (x)))
11369 fputs ("(GOTOFF)", asm_out_file);
11370 else if (GET_CODE (x) == LABEL_REF)
11371 fputs ("(GOTOFF)", asm_out_file);
11373 fputs ("(GOT)", asm_out_file);
11375 fputc ('\n', asm_out_file);
11379 if (arm_vector_mode_supported_p (GET_MODE (x)))
11383 gcc_assert (GET_CODE (x) == CONST_VECTOR);
11385 units = CONST_VECTOR_NUNITS (x);
11387 switch (GET_MODE (x))
11389 case V2SImode: size = 4; break;
11390 case V4HImode: size = 2; break;
11391 case V8QImode: size = 1; break;
11393 gcc_unreachable ();
11396 for (i = 0; i < units; i++)
11400 elt = CONST_VECTOR_ELT (x, i);
11402 (elt, size, i == 0 ? BIGGEST_ALIGNMENT : size * BITS_PER_UNIT, 1);
11408 return default_assemble_integer (x, size, aligned_p);
11412 /* Add a function to the list of static constructors. */
11415 arm_elf_asm_constructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
11417 if (!TARGET_AAPCS_BASED)
11419 default_named_section_asm_out_constructor (symbol, priority);
11423 /* Put these in the .init_array section, using a special relocation. */
11424 switch_to_section (ctors_section);
11425 assemble_align (POINTER_SIZE);
11426 fputs ("\t.word\t", asm_out_file);
11427 output_addr_const (asm_out_file, symbol);
11428 fputs ("(target1)\n", asm_out_file);
11432 /* A finite state machine takes care of noticing whether or not instructions
11433 can be conditionally executed, and thus decrease execution time and code
11434 size by deleting branch instructions. The fsm is controlled by
11435 final_prescan_insn, and controls the actions of ASM_OUTPUT_OPCODE. */
11437 /* The state of the fsm controlling condition codes are:
11438 0: normal, do nothing special
11439 1: make ASM_OUTPUT_OPCODE not output this instruction
11440 2: make ASM_OUTPUT_OPCODE not output this instruction
11441 3: make instructions conditional
11442 4: make instructions conditional
11444 State transitions (state->state by whom under condition):
11445 0 -> 1 final_prescan_insn if the `target' is a label
11446 0 -> 2 final_prescan_insn if the `target' is an unconditional branch
11447 1 -> 3 ASM_OUTPUT_OPCODE after not having output the conditional branch
11448 2 -> 4 ASM_OUTPUT_OPCODE after not having output the conditional branch
11449 3 -> 0 (*targetm.asm_out.internal_label) if the `target' label is reached
11450 (the target label has CODE_LABEL_NUMBER equal to arm_target_label).
11451 4 -> 0 final_prescan_insn if the `target' unconditional branch is reached
11452 (the target insn is arm_target_insn).
11454 If the jump clobbers the conditions then we use states 2 and 4.
11456 A similar thing can be done with conditional return insns.
11458 XXX In case the `target' is an unconditional branch, this conditionalising
11459 of the instructions always reduces code size, but not always execution
11460 time. But then, I want to reduce the code size to somewhere near what
11461 /bin/cc produces. */
11463 /* Returns the index of the ARM condition code string in
11464 `arm_condition_codes'. COMPARISON should be an rtx like
11465 `(eq (...) (...))'. */
11466 static enum arm_cond_code
11467 get_arm_condition_code (rtx comparison)
11469 enum machine_mode mode = GET_MODE (XEXP (comparison, 0));
11471 enum rtx_code comp_code = GET_CODE (comparison);
11473 if (GET_MODE_CLASS (mode) != MODE_CC)
11474 mode = SELECT_CC_MODE (comp_code, XEXP (comparison, 0),
11475 XEXP (comparison, 1));
11479 case CC_DNEmode: code = ARM_NE; goto dominance;
11480 case CC_DEQmode: code = ARM_EQ; goto dominance;
11481 case CC_DGEmode: code = ARM_GE; goto dominance;
11482 case CC_DGTmode: code = ARM_GT; goto dominance;
11483 case CC_DLEmode: code = ARM_LE; goto dominance;
11484 case CC_DLTmode: code = ARM_LT; goto dominance;
11485 case CC_DGEUmode: code = ARM_CS; goto dominance;
11486 case CC_DGTUmode: code = ARM_HI; goto dominance;
11487 case CC_DLEUmode: code = ARM_LS; goto dominance;
11488 case CC_DLTUmode: code = ARM_CC;
11491 gcc_assert (comp_code == EQ || comp_code == NE);
11493 if (comp_code == EQ)
11494 return ARM_INVERSE_CONDITION_CODE (code);
11500 case NE: return ARM_NE;
11501 case EQ: return ARM_EQ;
11502 case GE: return ARM_PL;
11503 case LT: return ARM_MI;
11504 default: gcc_unreachable ();
11510 case NE: return ARM_NE;
11511 case EQ: return ARM_EQ;
11512 default: gcc_unreachable ();
11518 case NE: return ARM_MI;
11519 case EQ: return ARM_PL;
11520 default: gcc_unreachable ();
11525 /* These encodings assume that AC=1 in the FPA system control
11526 byte. This allows us to handle all cases except UNEQ and
11530 case GE: return ARM_GE;
11531 case GT: return ARM_GT;
11532 case LE: return ARM_LS;
11533 case LT: return ARM_MI;
11534 case NE: return ARM_NE;
11535 case EQ: return ARM_EQ;
11536 case ORDERED: return ARM_VC;
11537 case UNORDERED: return ARM_VS;
11538 case UNLT: return ARM_LT;
11539 case UNLE: return ARM_LE;
11540 case UNGT: return ARM_HI;
11541 case UNGE: return ARM_PL;
11542 /* UNEQ and LTGT do not have a representation. */
11543 case UNEQ: /* Fall through. */
11544 case LTGT: /* Fall through. */
11545 default: gcc_unreachable ();
11551 case NE: return ARM_NE;
11552 case EQ: return ARM_EQ;
11553 case GE: return ARM_LE;
11554 case GT: return ARM_LT;
11555 case LE: return ARM_GE;
11556 case LT: return ARM_GT;
11557 case GEU: return ARM_LS;
11558 case GTU: return ARM_CC;
11559 case LEU: return ARM_CS;
11560 case LTU: return ARM_HI;
11561 default: gcc_unreachable ();
11567 case LTU: return ARM_CS;
11568 case GEU: return ARM_CC;
11569 default: gcc_unreachable ();
11575 case NE: return ARM_NE;
11576 case EQ: return ARM_EQ;
11577 case GE: return ARM_GE;
11578 case GT: return ARM_GT;
11579 case LE: return ARM_LE;
11580 case LT: return ARM_LT;
11581 case GEU: return ARM_CS;
11582 case GTU: return ARM_HI;
11583 case LEU: return ARM_LS;
11584 case LTU: return ARM_CC;
11585 default: gcc_unreachable ();
11588 default: gcc_unreachable ();
11593 arm_final_prescan_insn (rtx insn)
11595 /* BODY will hold the body of INSN. */
11596 rtx body = PATTERN (insn);
11598 /* This will be 1 if trying to repeat the trick, and things need to be
11599 reversed if it appears to fail. */
11602 /* JUMP_CLOBBERS will be one implies that the conditions if a branch is
11603 taken are clobbered, even if the rtl suggests otherwise. It also
11604 means that we have to grub around within the jump expression to find
11605 out what the conditions are when the jump isn't taken. */
11606 int jump_clobbers = 0;
11608 /* If we start with a return insn, we only succeed if we find another one. */
11609 int seeking_return = 0;
11611 /* START_INSN will hold the insn from where we start looking. This is the
11612 first insn after the following code_label if REVERSE is true. */
11613 rtx start_insn = insn;
11615 /* If in state 4, check if the target branch is reached, in order to
11616 change back to state 0. */
11617 if (arm_ccfsm_state == 4)
11619 if (insn == arm_target_insn)
11621 arm_target_insn = NULL;
11622 arm_ccfsm_state = 0;
11627 /* If in state 3, it is possible to repeat the trick, if this insn is an
11628 unconditional branch to a label, and immediately following this branch
11629 is the previous target label which is only used once, and the label this
11630 branch jumps to is not too far off. */
11631 if (arm_ccfsm_state == 3)
11633 if (simplejump_p (insn))
11635 start_insn = next_nonnote_insn (start_insn);
11636 if (GET_CODE (start_insn) == BARRIER)
11638 /* XXX Isn't this always a barrier? */
11639 start_insn = next_nonnote_insn (start_insn);
11641 if (GET_CODE (start_insn) == CODE_LABEL
11642 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
11643 && LABEL_NUSES (start_insn) == 1)
11648 else if (GET_CODE (body) == RETURN)
11650 start_insn = next_nonnote_insn (start_insn);
11651 if (GET_CODE (start_insn) == BARRIER)
11652 start_insn = next_nonnote_insn (start_insn);
11653 if (GET_CODE (start_insn) == CODE_LABEL
11654 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
11655 && LABEL_NUSES (start_insn) == 1)
11658 seeking_return = 1;
11667 gcc_assert (!arm_ccfsm_state || reverse);
11668 if (GET_CODE (insn) != JUMP_INSN)
11671 /* This jump might be paralleled with a clobber of the condition codes
11672 the jump should always come first */
11673 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
11674 body = XVECEXP (body, 0, 0);
11677 || (GET_CODE (body) == SET && GET_CODE (SET_DEST (body)) == PC
11678 && GET_CODE (SET_SRC (body)) == IF_THEN_ELSE))
11681 int fail = FALSE, succeed = FALSE;
11682 /* Flag which part of the IF_THEN_ELSE is the LABEL_REF. */
11683 int then_not_else = TRUE;
11684 rtx this_insn = start_insn, label = 0;
11686 /* If the jump cannot be done with one instruction, we cannot
11687 conditionally execute the instruction in the inverse case. */
11688 if (get_attr_conds (insn) == CONDS_JUMP_CLOB)
11694 /* Register the insn jumped to. */
11697 if (!seeking_return)
11698 label = XEXP (SET_SRC (body), 0);
11700 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == LABEL_REF)
11701 label = XEXP (XEXP (SET_SRC (body), 1), 0);
11702 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == LABEL_REF)
11704 label = XEXP (XEXP (SET_SRC (body), 2), 0);
11705 then_not_else = FALSE;
11707 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == RETURN)
11708 seeking_return = 1;
11709 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == RETURN)
11711 seeking_return = 1;
11712 then_not_else = FALSE;
11715 gcc_unreachable ();
11717 /* See how many insns this branch skips, and what kind of insns. If all
11718 insns are okay, and the label or unconditional branch to the same
11719 label is not too far away, succeed. */
11720 for (insns_skipped = 0;
11721 !fail && !succeed && insns_skipped++ < max_insns_skipped;)
11725 this_insn = next_nonnote_insn (this_insn);
11729 switch (GET_CODE (this_insn))
11732 /* Succeed if it is the target label, otherwise fail since
11733 control falls in from somewhere else. */
11734 if (this_insn == label)
11738 arm_ccfsm_state = 2;
11739 this_insn = next_nonnote_insn (this_insn);
11742 arm_ccfsm_state = 1;
11750 /* Succeed if the following insn is the target label.
11752 If return insns are used then the last insn in a function
11753 will be a barrier. */
11754 this_insn = next_nonnote_insn (this_insn);
11755 if (this_insn && this_insn == label)
11759 arm_ccfsm_state = 2;
11760 this_insn = next_nonnote_insn (this_insn);
11763 arm_ccfsm_state = 1;
11771 /* The AAPCS says that conditional calls should not be
11772 used since they make interworking inefficient (the
11773 linker can't transform BL<cond> into BLX). That's
11774 only a problem if the machine has BLX. */
11781 /* Succeed if the following insn is the target label, or
11782 if the following two insns are a barrier and the
11784 this_insn = next_nonnote_insn (this_insn);
11785 if (this_insn && GET_CODE (this_insn) == BARRIER)
11786 this_insn = next_nonnote_insn (this_insn);
11788 if (this_insn && this_insn == label
11789 && insns_skipped < max_insns_skipped)
11793 arm_ccfsm_state = 2;
11794 this_insn = next_nonnote_insn (this_insn);
11797 arm_ccfsm_state = 1;
11805 /* If this is an unconditional branch to the same label, succeed.
11806 If it is to another label, do nothing. If it is conditional,
11808 /* XXX Probably, the tests for SET and the PC are
11811 scanbody = PATTERN (this_insn);
11812 if (GET_CODE (scanbody) == SET
11813 && GET_CODE (SET_DEST (scanbody)) == PC)
11815 if (GET_CODE (SET_SRC (scanbody)) == LABEL_REF
11816 && XEXP (SET_SRC (scanbody), 0) == label && !reverse)
11818 arm_ccfsm_state = 2;
11821 else if (GET_CODE (SET_SRC (scanbody)) == IF_THEN_ELSE)
11824 /* Fail if a conditional return is undesirable (e.g. on a
11825 StrongARM), but still allow this if optimizing for size. */
11826 else if (GET_CODE (scanbody) == RETURN
11827 && !use_return_insn (TRUE, NULL)
11830 else if (GET_CODE (scanbody) == RETURN
11833 arm_ccfsm_state = 2;
11836 else if (GET_CODE (scanbody) == PARALLEL)
11838 switch (get_attr_conds (this_insn))
11848 fail = TRUE; /* Unrecognized jump (e.g. epilogue). */
11853 /* Instructions using or affecting the condition codes make it
11855 scanbody = PATTERN (this_insn);
11856 if (!(GET_CODE (scanbody) == SET
11857 || GET_CODE (scanbody) == PARALLEL)
11858 || get_attr_conds (this_insn) != CONDS_NOCOND)
11861 /* A conditional cirrus instruction must be followed by
11862 a non Cirrus instruction. However, since we
11863 conditionalize instructions in this function and by
11864 the time we get here we can't add instructions
11865 (nops), because shorten_branches() has already been
11866 called, we will disable conditionalizing Cirrus
11867 instructions to be safe. */
11868 if (GET_CODE (scanbody) != USE
11869 && GET_CODE (scanbody) != CLOBBER
11870 && get_attr_cirrus (this_insn) != CIRRUS_NOT)
11880 if ((!seeking_return) && (arm_ccfsm_state == 1 || reverse))
11881 arm_target_label = CODE_LABEL_NUMBER (label);
11884 gcc_assert (seeking_return || arm_ccfsm_state == 2);
11886 while (this_insn && GET_CODE (PATTERN (this_insn)) == USE)
11888 this_insn = next_nonnote_insn (this_insn);
11889 gcc_assert (!this_insn
11890 || (GET_CODE (this_insn) != BARRIER
11891 && GET_CODE (this_insn) != CODE_LABEL));
11895 /* Oh, dear! we ran off the end.. give up. */
11896 recog (PATTERN (insn), insn, NULL);
11897 arm_ccfsm_state = 0;
11898 arm_target_insn = NULL;
11901 arm_target_insn = this_insn;
11905 gcc_assert (!reverse);
11907 get_arm_condition_code (XEXP (XEXP (XEXP (SET_SRC (body),
11909 if (GET_CODE (XEXP (XEXP (SET_SRC (body), 0), 0)) == AND)
11910 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11911 if (GET_CODE (XEXP (SET_SRC (body), 0)) == NE)
11912 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11916 /* If REVERSE is true, ARM_CURRENT_CC needs to be inverted from
11919 arm_current_cc = get_arm_condition_code (XEXP (SET_SRC (body),
11923 if (reverse || then_not_else)
11924 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11927 /* Restore recog_data (getting the attributes of other insns can
11928 destroy this array, but final.c assumes that it remains intact
11929 across this call; since the insn has been recognized already we
11930 call recog direct). */
11931 recog (PATTERN (insn), insn, NULL);
11935 /* Returns true if REGNO is a valid register
11936 for holding a quantity of type MODE. */
11938 arm_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
11940 if (GET_MODE_CLASS (mode) == MODE_CC)
11941 return (regno == CC_REGNUM
11942 || (TARGET_HARD_FLOAT && TARGET_VFP
11943 && regno == VFPCC_REGNUM));
11946 /* For the Thumb we only allow values bigger than SImode in
11947 registers 0 - 6, so that there is always a second low
11948 register available to hold the upper part of the value.
11949 We probably we ought to ensure that the register is the
11950 start of an even numbered register pair. */
11951 return (ARM_NUM_REGS (mode) < 2) || (regno < LAST_LO_REGNUM);
11953 if (TARGET_HARD_FLOAT && TARGET_MAVERICK
11954 && IS_CIRRUS_REGNUM (regno))
11955 /* We have outlawed SI values in Cirrus registers because they
11956 reside in the lower 32 bits, but SF values reside in the
11957 upper 32 bits. This causes gcc all sorts of grief. We can't
11958 even split the registers into pairs because Cirrus SI values
11959 get sign extended to 64bits-- aldyh. */
11960 return (GET_MODE_CLASS (mode) == MODE_FLOAT) || (mode == DImode);
11962 if (TARGET_HARD_FLOAT && TARGET_VFP
11963 && IS_VFP_REGNUM (regno))
11965 if (mode == SFmode || mode == SImode)
11968 /* DFmode values are only valid in even register pairs. */
11969 if (mode == DFmode)
11970 return ((regno - FIRST_VFP_REGNUM) & 1) == 0;
11974 if (TARGET_REALLY_IWMMXT)
11976 if (IS_IWMMXT_GR_REGNUM (regno))
11977 return mode == SImode;
11979 if (IS_IWMMXT_REGNUM (regno))
11980 return VALID_IWMMXT_REG_MODE (mode);
11983 /* We allow any value to be stored in the general registers.
11984 Restrict doubleword quantities to even register pairs so that we can
11986 if (regno <= LAST_ARM_REGNUM)
11987 return !(TARGET_LDRD && GET_MODE_SIZE (mode) > 4 && (regno & 1) != 0);
11989 if (regno == FRAME_POINTER_REGNUM
11990 || regno == ARG_POINTER_REGNUM)
11991 /* We only allow integers in the fake hard registers. */
11992 return GET_MODE_CLASS (mode) == MODE_INT;
11994 /* The only registers left are the FPA registers
11995 which we only allow to hold FP values. */
11996 return (TARGET_HARD_FLOAT && TARGET_FPA
11997 && GET_MODE_CLASS (mode) == MODE_FLOAT
11998 && regno >= FIRST_FPA_REGNUM
11999 && regno <= LAST_FPA_REGNUM);
12003 arm_regno_class (int regno)
12007 if (regno == STACK_POINTER_REGNUM)
12009 if (regno == CC_REGNUM)
12016 if ( regno <= LAST_ARM_REGNUM
12017 || regno == FRAME_POINTER_REGNUM
12018 || regno == ARG_POINTER_REGNUM)
12019 return GENERAL_REGS;
12021 if (regno == CC_REGNUM || regno == VFPCC_REGNUM)
12024 if (IS_CIRRUS_REGNUM (regno))
12025 return CIRRUS_REGS;
12027 if (IS_VFP_REGNUM (regno))
12030 if (IS_IWMMXT_REGNUM (regno))
12031 return IWMMXT_REGS;
12033 if (IS_IWMMXT_GR_REGNUM (regno))
12034 return IWMMXT_GR_REGS;
12039 /* Handle a special case when computing the offset
12040 of an argument from the frame pointer. */
12042 arm_debugger_arg_offset (int value, rtx addr)
12046 /* We are only interested if dbxout_parms() failed to compute the offset. */
12050 /* We can only cope with the case where the address is held in a register. */
12051 if (GET_CODE (addr) != REG)
12054 /* If we are using the frame pointer to point at the argument, then
12055 an offset of 0 is correct. */
12056 if (REGNO (addr) == (unsigned) HARD_FRAME_POINTER_REGNUM)
12059 /* If we are using the stack pointer to point at the
12060 argument, then an offset of 0 is correct. */
12061 if ((TARGET_THUMB || !frame_pointer_needed)
12062 && REGNO (addr) == SP_REGNUM)
12065 /* Oh dear. The argument is pointed to by a register rather
12066 than being held in a register, or being stored at a known
12067 offset from the frame pointer. Since GDB only understands
12068 those two kinds of argument we must translate the address
12069 held in the register into an offset from the frame pointer.
12070 We do this by searching through the insns for the function
12071 looking to see where this register gets its value. If the
12072 register is initialized from the frame pointer plus an offset
12073 then we are in luck and we can continue, otherwise we give up.
12075 This code is exercised by producing debugging information
12076 for a function with arguments like this:
12078 double func (double a, double b, int c, double d) {return d;}
12080 Without this code the stab for parameter 'd' will be set to
12081 an offset of 0 from the frame pointer, rather than 8. */
12083 /* The if() statement says:
12085 If the insn is a normal instruction
12086 and if the insn is setting the value in a register
12087 and if the register being set is the register holding the address of the argument
12088 and if the address is computing by an addition
12089 that involves adding to a register
12090 which is the frame pointer
12095 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
12097 if ( GET_CODE (insn) == INSN
12098 && GET_CODE (PATTERN (insn)) == SET
12099 && REGNO (XEXP (PATTERN (insn), 0)) == REGNO (addr)
12100 && GET_CODE (XEXP (PATTERN (insn), 1)) == PLUS
12101 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 0)) == REG
12102 && REGNO (XEXP (XEXP (PATTERN (insn), 1), 0)) == (unsigned) HARD_FRAME_POINTER_REGNUM
12103 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 1)) == CONST_INT
12106 value = INTVAL (XEXP (XEXP (PATTERN (insn), 1), 1));
12115 warning (0, "unable to compute real location of stacked parameter");
12116 value = 8; /* XXX magic hack */
12122 #define def_mbuiltin(MASK, NAME, TYPE, CODE) \
12125 if ((MASK) & insn_flags) \
12126 lang_hooks.builtin_function ((NAME), (TYPE), (CODE), \
12127 BUILT_IN_MD, NULL, NULL_TREE); \
12131 struct builtin_description
12133 const unsigned int mask;
12134 const enum insn_code icode;
12135 const char * const name;
12136 const enum arm_builtins code;
12137 const enum rtx_code comparison;
12138 const unsigned int flag;
12141 static const struct builtin_description bdesc_2arg[] =
12143 #define IWMMXT_BUILTIN(code, string, builtin) \
12144 { FL_IWMMXT, CODE_FOR_##code, "__builtin_arm_" string, \
12145 ARM_BUILTIN_##builtin, 0, 0 },
12147 IWMMXT_BUILTIN (addv8qi3, "waddb", WADDB)
12148 IWMMXT_BUILTIN (addv4hi3, "waddh", WADDH)
12149 IWMMXT_BUILTIN (addv2si3, "waddw", WADDW)
12150 IWMMXT_BUILTIN (subv8qi3, "wsubb", WSUBB)
12151 IWMMXT_BUILTIN (subv4hi3, "wsubh", WSUBH)
12152 IWMMXT_BUILTIN (subv2si3, "wsubw", WSUBW)
12153 IWMMXT_BUILTIN (ssaddv8qi3, "waddbss", WADDSSB)
12154 IWMMXT_BUILTIN (ssaddv4hi3, "waddhss", WADDSSH)
12155 IWMMXT_BUILTIN (ssaddv2si3, "waddwss", WADDSSW)
12156 IWMMXT_BUILTIN (sssubv8qi3, "wsubbss", WSUBSSB)
12157 IWMMXT_BUILTIN (sssubv4hi3, "wsubhss", WSUBSSH)
12158 IWMMXT_BUILTIN (sssubv2si3, "wsubwss", WSUBSSW)
12159 IWMMXT_BUILTIN (usaddv8qi3, "waddbus", WADDUSB)
12160 IWMMXT_BUILTIN (usaddv4hi3, "waddhus", WADDUSH)
12161 IWMMXT_BUILTIN (usaddv2si3, "waddwus", WADDUSW)
12162 IWMMXT_BUILTIN (ussubv8qi3, "wsubbus", WSUBUSB)
12163 IWMMXT_BUILTIN (ussubv4hi3, "wsubhus", WSUBUSH)
12164 IWMMXT_BUILTIN (ussubv2si3, "wsubwus", WSUBUSW)
12165 IWMMXT_BUILTIN (mulv4hi3, "wmulul", WMULUL)
12166 IWMMXT_BUILTIN (smulv4hi3_highpart, "wmulsm", WMULSM)
12167 IWMMXT_BUILTIN (umulv4hi3_highpart, "wmulum", WMULUM)
12168 IWMMXT_BUILTIN (eqv8qi3, "wcmpeqb", WCMPEQB)
12169 IWMMXT_BUILTIN (eqv4hi3, "wcmpeqh", WCMPEQH)
12170 IWMMXT_BUILTIN (eqv2si3, "wcmpeqw", WCMPEQW)
12171 IWMMXT_BUILTIN (gtuv8qi3, "wcmpgtub", WCMPGTUB)
12172 IWMMXT_BUILTIN (gtuv4hi3, "wcmpgtuh", WCMPGTUH)
12173 IWMMXT_BUILTIN (gtuv2si3, "wcmpgtuw", WCMPGTUW)
12174 IWMMXT_BUILTIN (gtv8qi3, "wcmpgtsb", WCMPGTSB)
12175 IWMMXT_BUILTIN (gtv4hi3, "wcmpgtsh", WCMPGTSH)
12176 IWMMXT_BUILTIN (gtv2si3, "wcmpgtsw", WCMPGTSW)
12177 IWMMXT_BUILTIN (umaxv8qi3, "wmaxub", WMAXUB)
12178 IWMMXT_BUILTIN (smaxv8qi3, "wmaxsb", WMAXSB)
12179 IWMMXT_BUILTIN (umaxv4hi3, "wmaxuh", WMAXUH)
12180 IWMMXT_BUILTIN (smaxv4hi3, "wmaxsh", WMAXSH)
12181 IWMMXT_BUILTIN (umaxv2si3, "wmaxuw", WMAXUW)
12182 IWMMXT_BUILTIN (smaxv2si3, "wmaxsw", WMAXSW)
12183 IWMMXT_BUILTIN (uminv8qi3, "wminub", WMINUB)
12184 IWMMXT_BUILTIN (sminv8qi3, "wminsb", WMINSB)
12185 IWMMXT_BUILTIN (uminv4hi3, "wminuh", WMINUH)
12186 IWMMXT_BUILTIN (sminv4hi3, "wminsh", WMINSH)
12187 IWMMXT_BUILTIN (uminv2si3, "wminuw", WMINUW)
12188 IWMMXT_BUILTIN (sminv2si3, "wminsw", WMINSW)
12189 IWMMXT_BUILTIN (iwmmxt_anddi3, "wand", WAND)
12190 IWMMXT_BUILTIN (iwmmxt_nanddi3, "wandn", WANDN)
12191 IWMMXT_BUILTIN (iwmmxt_iordi3, "wor", WOR)
12192 IWMMXT_BUILTIN (iwmmxt_xordi3, "wxor", WXOR)
12193 IWMMXT_BUILTIN (iwmmxt_uavgv8qi3, "wavg2b", WAVG2B)
12194 IWMMXT_BUILTIN (iwmmxt_uavgv4hi3, "wavg2h", WAVG2H)
12195 IWMMXT_BUILTIN (iwmmxt_uavgrndv8qi3, "wavg2br", WAVG2BR)
12196 IWMMXT_BUILTIN (iwmmxt_uavgrndv4hi3, "wavg2hr", WAVG2HR)
12197 IWMMXT_BUILTIN (iwmmxt_wunpckilb, "wunpckilb", WUNPCKILB)
12198 IWMMXT_BUILTIN (iwmmxt_wunpckilh, "wunpckilh", WUNPCKILH)
12199 IWMMXT_BUILTIN (iwmmxt_wunpckilw, "wunpckilw", WUNPCKILW)
12200 IWMMXT_BUILTIN (iwmmxt_wunpckihb, "wunpckihb", WUNPCKIHB)
12201 IWMMXT_BUILTIN (iwmmxt_wunpckihh, "wunpckihh", WUNPCKIHH)
12202 IWMMXT_BUILTIN (iwmmxt_wunpckihw, "wunpckihw", WUNPCKIHW)
12203 IWMMXT_BUILTIN (iwmmxt_wmadds, "wmadds", WMADDS)
12204 IWMMXT_BUILTIN (iwmmxt_wmaddu, "wmaddu", WMADDU)
12206 #define IWMMXT_BUILTIN2(code, builtin) \
12207 { FL_IWMMXT, CODE_FOR_##code, NULL, ARM_BUILTIN_##builtin, 0, 0 },
12209 IWMMXT_BUILTIN2 (iwmmxt_wpackhss, WPACKHSS)
12210 IWMMXT_BUILTIN2 (iwmmxt_wpackwss, WPACKWSS)
12211 IWMMXT_BUILTIN2 (iwmmxt_wpackdss, WPACKDSS)
12212 IWMMXT_BUILTIN2 (iwmmxt_wpackhus, WPACKHUS)
12213 IWMMXT_BUILTIN2 (iwmmxt_wpackwus, WPACKWUS)
12214 IWMMXT_BUILTIN2 (iwmmxt_wpackdus, WPACKDUS)
12215 IWMMXT_BUILTIN2 (ashlv4hi3_di, WSLLH)
12216 IWMMXT_BUILTIN2 (ashlv4hi3, WSLLHI)
12217 IWMMXT_BUILTIN2 (ashlv2si3_di, WSLLW)
12218 IWMMXT_BUILTIN2 (ashlv2si3, WSLLWI)
12219 IWMMXT_BUILTIN2 (ashldi3_di, WSLLD)
12220 IWMMXT_BUILTIN2 (ashldi3_iwmmxt, WSLLDI)
12221 IWMMXT_BUILTIN2 (lshrv4hi3_di, WSRLH)
12222 IWMMXT_BUILTIN2 (lshrv4hi3, WSRLHI)
12223 IWMMXT_BUILTIN2 (lshrv2si3_di, WSRLW)
12224 IWMMXT_BUILTIN2 (lshrv2si3, WSRLWI)
12225 IWMMXT_BUILTIN2 (lshrdi3_di, WSRLD)
12226 IWMMXT_BUILTIN2 (lshrdi3_iwmmxt, WSRLDI)
12227 IWMMXT_BUILTIN2 (ashrv4hi3_di, WSRAH)
12228 IWMMXT_BUILTIN2 (ashrv4hi3, WSRAHI)
12229 IWMMXT_BUILTIN2 (ashrv2si3_di, WSRAW)
12230 IWMMXT_BUILTIN2 (ashrv2si3, WSRAWI)
12231 IWMMXT_BUILTIN2 (ashrdi3_di, WSRAD)
12232 IWMMXT_BUILTIN2 (ashrdi3_iwmmxt, WSRADI)
12233 IWMMXT_BUILTIN2 (rorv4hi3_di, WRORH)
12234 IWMMXT_BUILTIN2 (rorv4hi3, WRORHI)
12235 IWMMXT_BUILTIN2 (rorv2si3_di, WRORW)
12236 IWMMXT_BUILTIN2 (rorv2si3, WRORWI)
12237 IWMMXT_BUILTIN2 (rordi3_di, WRORD)
12238 IWMMXT_BUILTIN2 (rordi3, WRORDI)
12239 IWMMXT_BUILTIN2 (iwmmxt_wmacuz, WMACUZ)
12240 IWMMXT_BUILTIN2 (iwmmxt_wmacsz, WMACSZ)
12243 static const struct builtin_description bdesc_1arg[] =
12245 IWMMXT_BUILTIN (iwmmxt_tmovmskb, "tmovmskb", TMOVMSKB)
12246 IWMMXT_BUILTIN (iwmmxt_tmovmskh, "tmovmskh", TMOVMSKH)
12247 IWMMXT_BUILTIN (iwmmxt_tmovmskw, "tmovmskw", TMOVMSKW)
12248 IWMMXT_BUILTIN (iwmmxt_waccb, "waccb", WACCB)
12249 IWMMXT_BUILTIN (iwmmxt_wacch, "wacch", WACCH)
12250 IWMMXT_BUILTIN (iwmmxt_waccw, "waccw", WACCW)
12251 IWMMXT_BUILTIN (iwmmxt_wunpckehub, "wunpckehub", WUNPCKEHUB)
12252 IWMMXT_BUILTIN (iwmmxt_wunpckehuh, "wunpckehuh", WUNPCKEHUH)
12253 IWMMXT_BUILTIN (iwmmxt_wunpckehuw, "wunpckehuw", WUNPCKEHUW)
12254 IWMMXT_BUILTIN (iwmmxt_wunpckehsb, "wunpckehsb", WUNPCKEHSB)
12255 IWMMXT_BUILTIN (iwmmxt_wunpckehsh, "wunpckehsh", WUNPCKEHSH)
12256 IWMMXT_BUILTIN (iwmmxt_wunpckehsw, "wunpckehsw", WUNPCKEHSW)
12257 IWMMXT_BUILTIN (iwmmxt_wunpckelub, "wunpckelub", WUNPCKELUB)
12258 IWMMXT_BUILTIN (iwmmxt_wunpckeluh, "wunpckeluh", WUNPCKELUH)
12259 IWMMXT_BUILTIN (iwmmxt_wunpckeluw, "wunpckeluw", WUNPCKELUW)
12260 IWMMXT_BUILTIN (iwmmxt_wunpckelsb, "wunpckelsb", WUNPCKELSB)
12261 IWMMXT_BUILTIN (iwmmxt_wunpckelsh, "wunpckelsh", WUNPCKELSH)
12262 IWMMXT_BUILTIN (iwmmxt_wunpckelsw, "wunpckelsw", WUNPCKELSW)
12265 /* Set up all the iWMMXt builtins. This is
12266 not called if TARGET_IWMMXT is zero. */
12269 arm_init_iwmmxt_builtins (void)
12271 const struct builtin_description * d;
12273 tree endlink = void_list_node;
12275 tree V2SI_type_node = build_vector_type_for_mode (intSI_type_node, V2SImode);
12276 tree V4HI_type_node = build_vector_type_for_mode (intHI_type_node, V4HImode);
12277 tree V8QI_type_node = build_vector_type_for_mode (intQI_type_node, V8QImode);
12280 = build_function_type (integer_type_node,
12281 tree_cons (NULL_TREE, integer_type_node, endlink));
12282 tree v8qi_ftype_v8qi_v8qi_int
12283 = build_function_type (V8QI_type_node,
12284 tree_cons (NULL_TREE, V8QI_type_node,
12285 tree_cons (NULL_TREE, V8QI_type_node,
12286 tree_cons (NULL_TREE,
12289 tree v4hi_ftype_v4hi_int
12290 = build_function_type (V4HI_type_node,
12291 tree_cons (NULL_TREE, V4HI_type_node,
12292 tree_cons (NULL_TREE, integer_type_node,
12294 tree v2si_ftype_v2si_int
12295 = build_function_type (V2SI_type_node,
12296 tree_cons (NULL_TREE, V2SI_type_node,
12297 tree_cons (NULL_TREE, integer_type_node,
12299 tree v2si_ftype_di_di
12300 = build_function_type (V2SI_type_node,
12301 tree_cons (NULL_TREE, long_long_integer_type_node,
12302 tree_cons (NULL_TREE, long_long_integer_type_node,
12304 tree di_ftype_di_int
12305 = build_function_type (long_long_integer_type_node,
12306 tree_cons (NULL_TREE, long_long_integer_type_node,
12307 tree_cons (NULL_TREE, integer_type_node,
12309 tree di_ftype_di_int_int
12310 = build_function_type (long_long_integer_type_node,
12311 tree_cons (NULL_TREE, long_long_integer_type_node,
12312 tree_cons (NULL_TREE, integer_type_node,
12313 tree_cons (NULL_TREE,
12316 tree int_ftype_v8qi
12317 = build_function_type (integer_type_node,
12318 tree_cons (NULL_TREE, V8QI_type_node,
12320 tree int_ftype_v4hi
12321 = build_function_type (integer_type_node,
12322 tree_cons (NULL_TREE, V4HI_type_node,
12324 tree int_ftype_v2si
12325 = build_function_type (integer_type_node,
12326 tree_cons (NULL_TREE, V2SI_type_node,
12328 tree int_ftype_v8qi_int
12329 = build_function_type (integer_type_node,
12330 tree_cons (NULL_TREE, V8QI_type_node,
12331 tree_cons (NULL_TREE, integer_type_node,
12333 tree int_ftype_v4hi_int
12334 = build_function_type (integer_type_node,
12335 tree_cons (NULL_TREE, V4HI_type_node,
12336 tree_cons (NULL_TREE, integer_type_node,
12338 tree int_ftype_v2si_int
12339 = build_function_type (integer_type_node,
12340 tree_cons (NULL_TREE, V2SI_type_node,
12341 tree_cons (NULL_TREE, integer_type_node,
12343 tree v8qi_ftype_v8qi_int_int
12344 = build_function_type (V8QI_type_node,
12345 tree_cons (NULL_TREE, V8QI_type_node,
12346 tree_cons (NULL_TREE, integer_type_node,
12347 tree_cons (NULL_TREE,
12350 tree v4hi_ftype_v4hi_int_int
12351 = build_function_type (V4HI_type_node,
12352 tree_cons (NULL_TREE, V4HI_type_node,
12353 tree_cons (NULL_TREE, integer_type_node,
12354 tree_cons (NULL_TREE,
12357 tree v2si_ftype_v2si_int_int
12358 = build_function_type (V2SI_type_node,
12359 tree_cons (NULL_TREE, V2SI_type_node,
12360 tree_cons (NULL_TREE, integer_type_node,
12361 tree_cons (NULL_TREE,
12364 /* Miscellaneous. */
12365 tree v8qi_ftype_v4hi_v4hi
12366 = build_function_type (V8QI_type_node,
12367 tree_cons (NULL_TREE, V4HI_type_node,
12368 tree_cons (NULL_TREE, V4HI_type_node,
12370 tree v4hi_ftype_v2si_v2si
12371 = build_function_type (V4HI_type_node,
12372 tree_cons (NULL_TREE, V2SI_type_node,
12373 tree_cons (NULL_TREE, V2SI_type_node,
12375 tree v2si_ftype_v4hi_v4hi
12376 = build_function_type (V2SI_type_node,
12377 tree_cons (NULL_TREE, V4HI_type_node,
12378 tree_cons (NULL_TREE, V4HI_type_node,
12380 tree v2si_ftype_v8qi_v8qi
12381 = build_function_type (V2SI_type_node,
12382 tree_cons (NULL_TREE, V8QI_type_node,
12383 tree_cons (NULL_TREE, V8QI_type_node,
12385 tree v4hi_ftype_v4hi_di
12386 = build_function_type (V4HI_type_node,
12387 tree_cons (NULL_TREE, V4HI_type_node,
12388 tree_cons (NULL_TREE,
12389 long_long_integer_type_node,
12391 tree v2si_ftype_v2si_di
12392 = build_function_type (V2SI_type_node,
12393 tree_cons (NULL_TREE, V2SI_type_node,
12394 tree_cons (NULL_TREE,
12395 long_long_integer_type_node,
12397 tree void_ftype_int_int
12398 = build_function_type (void_type_node,
12399 tree_cons (NULL_TREE, integer_type_node,
12400 tree_cons (NULL_TREE, integer_type_node,
12403 = build_function_type (long_long_unsigned_type_node, endlink);
12405 = build_function_type (long_long_integer_type_node,
12406 tree_cons (NULL_TREE, V8QI_type_node,
12409 = build_function_type (long_long_integer_type_node,
12410 tree_cons (NULL_TREE, V4HI_type_node,
12413 = build_function_type (long_long_integer_type_node,
12414 tree_cons (NULL_TREE, V2SI_type_node,
12416 tree v2si_ftype_v4hi
12417 = build_function_type (V2SI_type_node,
12418 tree_cons (NULL_TREE, V4HI_type_node,
12420 tree v4hi_ftype_v8qi
12421 = build_function_type (V4HI_type_node,
12422 tree_cons (NULL_TREE, V8QI_type_node,
12425 tree di_ftype_di_v4hi_v4hi
12426 = build_function_type (long_long_unsigned_type_node,
12427 tree_cons (NULL_TREE,
12428 long_long_unsigned_type_node,
12429 tree_cons (NULL_TREE, V4HI_type_node,
12430 tree_cons (NULL_TREE,
12434 tree di_ftype_v4hi_v4hi
12435 = build_function_type (long_long_unsigned_type_node,
12436 tree_cons (NULL_TREE, V4HI_type_node,
12437 tree_cons (NULL_TREE, V4HI_type_node,
12440 /* Normal vector binops. */
12441 tree v8qi_ftype_v8qi_v8qi
12442 = build_function_type (V8QI_type_node,
12443 tree_cons (NULL_TREE, V8QI_type_node,
12444 tree_cons (NULL_TREE, V8QI_type_node,
12446 tree v4hi_ftype_v4hi_v4hi
12447 = build_function_type (V4HI_type_node,
12448 tree_cons (NULL_TREE, V4HI_type_node,
12449 tree_cons (NULL_TREE, V4HI_type_node,
12451 tree v2si_ftype_v2si_v2si
12452 = build_function_type (V2SI_type_node,
12453 tree_cons (NULL_TREE, V2SI_type_node,
12454 tree_cons (NULL_TREE, V2SI_type_node,
12456 tree di_ftype_di_di
12457 = build_function_type (long_long_unsigned_type_node,
12458 tree_cons (NULL_TREE, long_long_unsigned_type_node,
12459 tree_cons (NULL_TREE,
12460 long_long_unsigned_type_node,
12463 /* Add all builtins that are more or less simple operations on two
12465 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
12467 /* Use one of the operands; the target can have a different mode for
12468 mask-generating compares. */
12469 enum machine_mode mode;
12475 mode = insn_data[d->icode].operand[1].mode;
12480 type = v8qi_ftype_v8qi_v8qi;
12483 type = v4hi_ftype_v4hi_v4hi;
12486 type = v2si_ftype_v2si_v2si;
12489 type = di_ftype_di_di;
12493 gcc_unreachable ();
12496 def_mbuiltin (d->mask, d->name, type, d->code);
12499 /* Add the remaining MMX insns with somewhat more complicated types. */
12500 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wzero", di_ftype_void, ARM_BUILTIN_WZERO);
12501 def_mbuiltin (FL_IWMMXT, "__builtin_arm_setwcx", void_ftype_int_int, ARM_BUILTIN_SETWCX);
12502 def_mbuiltin (FL_IWMMXT, "__builtin_arm_getwcx", int_ftype_int, ARM_BUILTIN_GETWCX);
12504 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSLLH);
12505 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllw", v2si_ftype_v2si_di, ARM_BUILTIN_WSLLW);
12506 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wslld", di_ftype_di_di, ARM_BUILTIN_WSLLD);
12507 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSLLHI);
12508 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllwi", v2si_ftype_v2si_int, ARM_BUILTIN_WSLLWI);
12509 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wslldi", di_ftype_di_int, ARM_BUILTIN_WSLLDI);
12511 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSRLH);
12512 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlw", v2si_ftype_v2si_di, ARM_BUILTIN_WSRLW);
12513 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrld", di_ftype_di_di, ARM_BUILTIN_WSRLD);
12514 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSRLHI);
12515 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlwi", v2si_ftype_v2si_int, ARM_BUILTIN_WSRLWI);
12516 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrldi", di_ftype_di_int, ARM_BUILTIN_WSRLDI);
12518 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrah", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSRAH);
12519 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsraw", v2si_ftype_v2si_di, ARM_BUILTIN_WSRAW);
12520 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrad", di_ftype_di_di, ARM_BUILTIN_WSRAD);
12521 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrahi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSRAHI);
12522 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrawi", v2si_ftype_v2si_int, ARM_BUILTIN_WSRAWI);
12523 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsradi", di_ftype_di_int, ARM_BUILTIN_WSRADI);
12525 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WRORH);
12526 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorw", v2si_ftype_v2si_di, ARM_BUILTIN_WRORW);
12527 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrord", di_ftype_di_di, ARM_BUILTIN_WRORD);
12528 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WRORHI);
12529 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorwi", v2si_ftype_v2si_int, ARM_BUILTIN_WRORWI);
12530 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrordi", di_ftype_di_int, ARM_BUILTIN_WRORDI);
12532 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wshufh", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSHUFH);
12534 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadb", v2si_ftype_v8qi_v8qi, ARM_BUILTIN_WSADB);
12535 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadh", v2si_ftype_v4hi_v4hi, ARM_BUILTIN_WSADH);
12536 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadbz", v2si_ftype_v8qi_v8qi, ARM_BUILTIN_WSADBZ);
12537 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadhz", v2si_ftype_v4hi_v4hi, ARM_BUILTIN_WSADHZ);
12539 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsb", int_ftype_v8qi_int, ARM_BUILTIN_TEXTRMSB);
12540 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsh", int_ftype_v4hi_int, ARM_BUILTIN_TEXTRMSH);
12541 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsw", int_ftype_v2si_int, ARM_BUILTIN_TEXTRMSW);
12542 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmub", int_ftype_v8qi_int, ARM_BUILTIN_TEXTRMUB);
12543 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmuh", int_ftype_v4hi_int, ARM_BUILTIN_TEXTRMUH);
12544 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmuw", int_ftype_v2si_int, ARM_BUILTIN_TEXTRMUW);
12545 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrb", v8qi_ftype_v8qi_int_int, ARM_BUILTIN_TINSRB);
12546 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrh", v4hi_ftype_v4hi_int_int, ARM_BUILTIN_TINSRH);
12547 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrw", v2si_ftype_v2si_int_int, ARM_BUILTIN_TINSRW);
12549 def_mbuiltin (FL_IWMMXT, "__builtin_arm_waccb", di_ftype_v8qi, ARM_BUILTIN_WACCB);
12550 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wacch", di_ftype_v4hi, ARM_BUILTIN_WACCH);
12551 def_mbuiltin (FL_IWMMXT, "__builtin_arm_waccw", di_ftype_v2si, ARM_BUILTIN_WACCW);
12553 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskb", int_ftype_v8qi, ARM_BUILTIN_TMOVMSKB);
12554 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskh", int_ftype_v4hi, ARM_BUILTIN_TMOVMSKH);
12555 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskw", int_ftype_v2si, ARM_BUILTIN_TMOVMSKW);
12557 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackhss", v8qi_ftype_v4hi_v4hi, ARM_BUILTIN_WPACKHSS);
12558 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackhus", v8qi_ftype_v4hi_v4hi, ARM_BUILTIN_WPACKHUS);
12559 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackwus", v4hi_ftype_v2si_v2si, ARM_BUILTIN_WPACKWUS);
12560 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackwss", v4hi_ftype_v2si_v2si, ARM_BUILTIN_WPACKWSS);
12561 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackdus", v2si_ftype_di_di, ARM_BUILTIN_WPACKDUS);
12562 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackdss", v2si_ftype_di_di, ARM_BUILTIN_WPACKDSS);
12564 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehub", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKEHUB);
12565 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehuh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKEHUH);
12566 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehuw", di_ftype_v2si, ARM_BUILTIN_WUNPCKEHUW);
12567 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsb", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKEHSB);
12568 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKEHSH);
12569 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsw", di_ftype_v2si, ARM_BUILTIN_WUNPCKEHSW);
12570 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelub", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKELUB);
12571 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckeluh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKELUH);
12572 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckeluw", di_ftype_v2si, ARM_BUILTIN_WUNPCKELUW);
12573 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsb", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKELSB);
12574 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKELSH);
12575 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsw", di_ftype_v2si, ARM_BUILTIN_WUNPCKELSW);
12577 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacs", di_ftype_di_v4hi_v4hi, ARM_BUILTIN_WMACS);
12578 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacsz", di_ftype_v4hi_v4hi, ARM_BUILTIN_WMACSZ);
12579 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacu", di_ftype_di_v4hi_v4hi, ARM_BUILTIN_WMACU);
12580 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacuz", di_ftype_v4hi_v4hi, ARM_BUILTIN_WMACUZ);
12582 def_mbuiltin (FL_IWMMXT, "__builtin_arm_walign", v8qi_ftype_v8qi_v8qi_int, ARM_BUILTIN_WALIGN);
12583 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmia", di_ftype_di_int_int, ARM_BUILTIN_TMIA);
12584 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiaph", di_ftype_di_int_int, ARM_BUILTIN_TMIAPH);
12585 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiabb", di_ftype_di_int_int, ARM_BUILTIN_TMIABB);
12586 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiabt", di_ftype_di_int_int, ARM_BUILTIN_TMIABT);
12587 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiatb", di_ftype_di_int_int, ARM_BUILTIN_TMIATB);
12588 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiatt", di_ftype_di_int_int, ARM_BUILTIN_TMIATT);
12592 arm_init_tls_builtins (void)
12595 tree nothrow = tree_cons (get_identifier ("nothrow"), NULL, NULL);
12596 tree const_nothrow = tree_cons (get_identifier ("const"), NULL, nothrow);
12598 ftype = build_function_type (ptr_type_node, void_list_node);
12599 lang_hooks.builtin_function ("__builtin_thread_pointer", ftype,
12600 ARM_BUILTIN_THREAD_POINTER, BUILT_IN_MD,
12601 NULL, const_nothrow);
12605 arm_init_builtins (void)
12607 arm_init_tls_builtins ();
12609 if (TARGET_REALLY_IWMMXT)
12610 arm_init_iwmmxt_builtins ();
12613 /* Errors in the source file can cause expand_expr to return const0_rtx
12614 where we expect a vector. To avoid crashing, use one of the vector
12615 clear instructions. */
12618 safe_vector_operand (rtx x, enum machine_mode mode)
12620 if (x != const0_rtx)
12622 x = gen_reg_rtx (mode);
12624 emit_insn (gen_iwmmxt_clrdi (mode == DImode ? x
12625 : gen_rtx_SUBREG (DImode, x, 0)));
12629 /* Subroutine of arm_expand_builtin to take care of binop insns. */
12632 arm_expand_binop_builtin (enum insn_code icode,
12633 tree arglist, rtx target)
12636 tree arg0 = TREE_VALUE (arglist);
12637 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12638 rtx op0 = expand_normal (arg0);
12639 rtx op1 = expand_normal (arg1);
12640 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12641 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
12642 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
12644 if (VECTOR_MODE_P (mode0))
12645 op0 = safe_vector_operand (op0, mode0);
12646 if (VECTOR_MODE_P (mode1))
12647 op1 = safe_vector_operand (op1, mode1);
12650 || GET_MODE (target) != tmode
12651 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12652 target = gen_reg_rtx (tmode);
12654 gcc_assert (GET_MODE (op0) == mode0 && GET_MODE (op1) == mode1);
12656 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12657 op0 = copy_to_mode_reg (mode0, op0);
12658 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12659 op1 = copy_to_mode_reg (mode1, op1);
12661 pat = GEN_FCN (icode) (target, op0, op1);
12668 /* Subroutine of arm_expand_builtin to take care of unop insns. */
12671 arm_expand_unop_builtin (enum insn_code icode,
12672 tree arglist, rtx target, int do_load)
12675 tree arg0 = TREE_VALUE (arglist);
12676 rtx op0 = expand_normal (arg0);
12677 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12678 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
12681 || GET_MODE (target) != tmode
12682 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12683 target = gen_reg_rtx (tmode);
12685 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
12688 if (VECTOR_MODE_P (mode0))
12689 op0 = safe_vector_operand (op0, mode0);
12691 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12692 op0 = copy_to_mode_reg (mode0, op0);
12695 pat = GEN_FCN (icode) (target, op0);
12702 /* Expand an expression EXP that calls a built-in function,
12703 with result going to TARGET if that's convenient
12704 (and in mode MODE if that's convenient).
12705 SUBTARGET may be used as the target for computing one of EXP's operands.
12706 IGNORE is nonzero if the value is to be ignored. */
12709 arm_expand_builtin (tree exp,
12711 rtx subtarget ATTRIBUTE_UNUSED,
12712 enum machine_mode mode ATTRIBUTE_UNUSED,
12713 int ignore ATTRIBUTE_UNUSED)
12715 const struct builtin_description * d;
12716 enum insn_code icode;
12717 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
12718 tree arglist = TREE_OPERAND (exp, 1);
12726 int fcode = DECL_FUNCTION_CODE (fndecl);
12728 enum machine_mode tmode;
12729 enum machine_mode mode0;
12730 enum machine_mode mode1;
12731 enum machine_mode mode2;
12735 case ARM_BUILTIN_TEXTRMSB:
12736 case ARM_BUILTIN_TEXTRMUB:
12737 case ARM_BUILTIN_TEXTRMSH:
12738 case ARM_BUILTIN_TEXTRMUH:
12739 case ARM_BUILTIN_TEXTRMSW:
12740 case ARM_BUILTIN_TEXTRMUW:
12741 icode = (fcode == ARM_BUILTIN_TEXTRMSB ? CODE_FOR_iwmmxt_textrmsb
12742 : fcode == ARM_BUILTIN_TEXTRMUB ? CODE_FOR_iwmmxt_textrmub
12743 : fcode == ARM_BUILTIN_TEXTRMSH ? CODE_FOR_iwmmxt_textrmsh
12744 : fcode == ARM_BUILTIN_TEXTRMUH ? CODE_FOR_iwmmxt_textrmuh
12745 : CODE_FOR_iwmmxt_textrmw);
12747 arg0 = TREE_VALUE (arglist);
12748 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12749 op0 = expand_normal (arg0);
12750 op1 = expand_normal (arg1);
12751 tmode = insn_data[icode].operand[0].mode;
12752 mode0 = insn_data[icode].operand[1].mode;
12753 mode1 = insn_data[icode].operand[2].mode;
12755 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12756 op0 = copy_to_mode_reg (mode0, op0);
12757 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12759 /* @@@ better error message */
12760 error ("selector must be an immediate");
12761 return gen_reg_rtx (tmode);
12764 || GET_MODE (target) != tmode
12765 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12766 target = gen_reg_rtx (tmode);
12767 pat = GEN_FCN (icode) (target, op0, op1);
12773 case ARM_BUILTIN_TINSRB:
12774 case ARM_BUILTIN_TINSRH:
12775 case ARM_BUILTIN_TINSRW:
12776 icode = (fcode == ARM_BUILTIN_TINSRB ? CODE_FOR_iwmmxt_tinsrb
12777 : fcode == ARM_BUILTIN_TINSRH ? CODE_FOR_iwmmxt_tinsrh
12778 : CODE_FOR_iwmmxt_tinsrw);
12779 arg0 = TREE_VALUE (arglist);
12780 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12781 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
12782 op0 = expand_normal (arg0);
12783 op1 = expand_normal (arg1);
12784 op2 = expand_normal (arg2);
12785 tmode = insn_data[icode].operand[0].mode;
12786 mode0 = insn_data[icode].operand[1].mode;
12787 mode1 = insn_data[icode].operand[2].mode;
12788 mode2 = insn_data[icode].operand[3].mode;
12790 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12791 op0 = copy_to_mode_reg (mode0, op0);
12792 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12793 op1 = copy_to_mode_reg (mode1, op1);
12794 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
12796 /* @@@ better error message */
12797 error ("selector must be an immediate");
12801 || GET_MODE (target) != tmode
12802 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12803 target = gen_reg_rtx (tmode);
12804 pat = GEN_FCN (icode) (target, op0, op1, op2);
12810 case ARM_BUILTIN_SETWCX:
12811 arg0 = TREE_VALUE (arglist);
12812 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12813 op0 = force_reg (SImode, expand_normal (arg0));
12814 op1 = expand_normal (arg1);
12815 emit_insn (gen_iwmmxt_tmcr (op1, op0));
12818 case ARM_BUILTIN_GETWCX:
12819 arg0 = TREE_VALUE (arglist);
12820 op0 = expand_normal (arg0);
12821 target = gen_reg_rtx (SImode);
12822 emit_insn (gen_iwmmxt_tmrc (target, op0));
12825 case ARM_BUILTIN_WSHUFH:
12826 icode = CODE_FOR_iwmmxt_wshufh;
12827 arg0 = TREE_VALUE (arglist);
12828 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12829 op0 = expand_normal (arg0);
12830 op1 = expand_normal (arg1);
12831 tmode = insn_data[icode].operand[0].mode;
12832 mode1 = insn_data[icode].operand[1].mode;
12833 mode2 = insn_data[icode].operand[2].mode;
12835 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
12836 op0 = copy_to_mode_reg (mode1, op0);
12837 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
12839 /* @@@ better error message */
12840 error ("mask must be an immediate");
12844 || GET_MODE (target) != tmode
12845 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12846 target = gen_reg_rtx (tmode);
12847 pat = GEN_FCN (icode) (target, op0, op1);
12853 case ARM_BUILTIN_WSADB:
12854 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadb, arglist, target);
12855 case ARM_BUILTIN_WSADH:
12856 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadh, arglist, target);
12857 case ARM_BUILTIN_WSADBZ:
12858 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadbz, arglist, target);
12859 case ARM_BUILTIN_WSADHZ:
12860 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadhz, arglist, target);
12862 /* Several three-argument builtins. */
12863 case ARM_BUILTIN_WMACS:
12864 case ARM_BUILTIN_WMACU:
12865 case ARM_BUILTIN_WALIGN:
12866 case ARM_BUILTIN_TMIA:
12867 case ARM_BUILTIN_TMIAPH:
12868 case ARM_BUILTIN_TMIATT:
12869 case ARM_BUILTIN_TMIATB:
12870 case ARM_BUILTIN_TMIABT:
12871 case ARM_BUILTIN_TMIABB:
12872 icode = (fcode == ARM_BUILTIN_WMACS ? CODE_FOR_iwmmxt_wmacs
12873 : fcode == ARM_BUILTIN_WMACU ? CODE_FOR_iwmmxt_wmacu
12874 : fcode == ARM_BUILTIN_TMIA ? CODE_FOR_iwmmxt_tmia
12875 : fcode == ARM_BUILTIN_TMIAPH ? CODE_FOR_iwmmxt_tmiaph
12876 : fcode == ARM_BUILTIN_TMIABB ? CODE_FOR_iwmmxt_tmiabb
12877 : fcode == ARM_BUILTIN_TMIABT ? CODE_FOR_iwmmxt_tmiabt
12878 : fcode == ARM_BUILTIN_TMIATB ? CODE_FOR_iwmmxt_tmiatb
12879 : fcode == ARM_BUILTIN_TMIATT ? CODE_FOR_iwmmxt_tmiatt
12880 : CODE_FOR_iwmmxt_walign);
12881 arg0 = TREE_VALUE (arglist);
12882 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12883 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
12884 op0 = expand_normal (arg0);
12885 op1 = expand_normal (arg1);
12886 op2 = expand_normal (arg2);
12887 tmode = insn_data[icode].operand[0].mode;
12888 mode0 = insn_data[icode].operand[1].mode;
12889 mode1 = insn_data[icode].operand[2].mode;
12890 mode2 = insn_data[icode].operand[3].mode;
12892 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12893 op0 = copy_to_mode_reg (mode0, op0);
12894 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12895 op1 = copy_to_mode_reg (mode1, op1);
12896 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
12897 op2 = copy_to_mode_reg (mode2, op2);
12899 || GET_MODE (target) != tmode
12900 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12901 target = gen_reg_rtx (tmode);
12902 pat = GEN_FCN (icode) (target, op0, op1, op2);
12908 case ARM_BUILTIN_WZERO:
12909 target = gen_reg_rtx (DImode);
12910 emit_insn (gen_iwmmxt_clrdi (target));
12913 case ARM_BUILTIN_THREAD_POINTER:
12914 return arm_load_tp (target);
12920 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
12921 if (d->code == (const enum arm_builtins) fcode)
12922 return arm_expand_binop_builtin (d->icode, arglist, target);
12924 for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
12925 if (d->code == (const enum arm_builtins) fcode)
12926 return arm_expand_unop_builtin (d->icode, arglist, target, 0);
12928 /* @@@ Should really do something sensible here. */
12932 /* Return the number (counting from 0) of
12933 the least significant set bit in MASK. */
12936 number_of_first_bit_set (unsigned mask)
12941 (mask & (1 << bit)) == 0;
12948 /* Emit code to push or pop registers to or from the stack. F is the
12949 assembly file. MASK is the registers to push or pop. PUSH is
12950 nonzero if we should push, and zero if we should pop. For debugging
12951 output, if pushing, adjust CFA_OFFSET by the amount of space added
12952 to the stack. REAL_REGS should have the same number of bits set as
12953 MASK, and will be used instead (in the same order) to describe which
12954 registers were saved - this is used to mark the save slots when we
12955 push high registers after moving them to low registers. */
12957 thumb_pushpop (FILE *f, unsigned long mask, int push, int *cfa_offset,
12958 unsigned long real_regs)
12961 int lo_mask = mask & 0xFF;
12962 int pushed_words = 0;
12966 if (lo_mask == 0 && !push && (mask & (1 << PC_REGNUM)))
12968 /* Special case. Do not generate a POP PC statement here, do it in
12970 thumb_exit (f, -1);
12974 if (ARM_EABI_UNWIND_TABLES && push)
12976 fprintf (f, "\t.save\t{");
12977 for (regno = 0; regno < 15; regno++)
12979 if (real_regs & (1 << regno))
12981 if (real_regs & ((1 << regno) -1))
12983 asm_fprintf (f, "%r", regno);
12986 fprintf (f, "}\n");
12989 fprintf (f, "\t%s\t{", push ? "push" : "pop");
12991 /* Look at the low registers first. */
12992 for (regno = 0; regno <= LAST_LO_REGNUM; regno++, lo_mask >>= 1)
12996 asm_fprintf (f, "%r", regno);
12998 if ((lo_mask & ~1) != 0)
13005 if (push && (mask & (1 << LR_REGNUM)))
13007 /* Catch pushing the LR. */
13011 asm_fprintf (f, "%r", LR_REGNUM);
13015 else if (!push && (mask & (1 << PC_REGNUM)))
13017 /* Catch popping the PC. */
13018 if (TARGET_INTERWORK || TARGET_BACKTRACE
13019 || current_function_calls_eh_return)
13021 /* The PC is never poped directly, instead
13022 it is popped into r3 and then BX is used. */
13023 fprintf (f, "}\n");
13025 thumb_exit (f, -1);
13034 asm_fprintf (f, "%r", PC_REGNUM);
13038 fprintf (f, "}\n");
13040 if (push && pushed_words && dwarf2out_do_frame ())
13042 char *l = dwarf2out_cfi_label ();
13043 int pushed_mask = real_regs;
13045 *cfa_offset += pushed_words * 4;
13046 dwarf2out_def_cfa (l, SP_REGNUM, *cfa_offset);
13049 pushed_mask = real_regs;
13050 for (regno = 0; regno <= 14; regno++, pushed_mask >>= 1)
13052 if (pushed_mask & 1)
13053 dwarf2out_reg_save (l, regno, 4 * pushed_words++ - *cfa_offset);
13058 /* Generate code to return from a thumb function.
13059 If 'reg_containing_return_addr' is -1, then the return address is
13060 actually on the stack, at the stack pointer. */
13062 thumb_exit (FILE *f, int reg_containing_return_addr)
13064 unsigned regs_available_for_popping;
13065 unsigned regs_to_pop;
13067 unsigned available;
13071 int restore_a4 = FALSE;
13073 /* Compute the registers we need to pop. */
13077 if (reg_containing_return_addr == -1)
13079 regs_to_pop |= 1 << LR_REGNUM;
13083 if (TARGET_BACKTRACE)
13085 /* Restore the (ARM) frame pointer and stack pointer. */
13086 regs_to_pop |= (1 << ARM_HARD_FRAME_POINTER_REGNUM) | (1 << SP_REGNUM);
13090 /* If there is nothing to pop then just emit the BX instruction and
13092 if (pops_needed == 0)
13094 if (current_function_calls_eh_return)
13095 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, ARM_EH_STACKADJ_REGNUM);
13097 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
13100 /* Otherwise if we are not supporting interworking and we have not created
13101 a backtrace structure and the function was not entered in ARM mode then
13102 just pop the return address straight into the PC. */
13103 else if (!TARGET_INTERWORK
13104 && !TARGET_BACKTRACE
13105 && !is_called_in_ARM_mode (current_function_decl)
13106 && !current_function_calls_eh_return)
13108 asm_fprintf (f, "\tpop\t{%r}\n", PC_REGNUM);
13112 /* Find out how many of the (return) argument registers we can corrupt. */
13113 regs_available_for_popping = 0;
13115 /* If returning via __builtin_eh_return, the bottom three registers
13116 all contain information needed for the return. */
13117 if (current_function_calls_eh_return)
13121 /* If we can deduce the registers used from the function's
13122 return value. This is more reliable that examining
13123 regs_ever_live[] because that will be set if the register is
13124 ever used in the function, not just if the register is used
13125 to hold a return value. */
13127 if (current_function_return_rtx != 0)
13128 mode = GET_MODE (current_function_return_rtx);
13130 mode = DECL_MODE (DECL_RESULT (current_function_decl));
13132 size = GET_MODE_SIZE (mode);
13136 /* In a void function we can use any argument register.
13137 In a function that returns a structure on the stack
13138 we can use the second and third argument registers. */
13139 if (mode == VOIDmode)
13140 regs_available_for_popping =
13141 (1 << ARG_REGISTER (1))
13142 | (1 << ARG_REGISTER (2))
13143 | (1 << ARG_REGISTER (3));
13145 regs_available_for_popping =
13146 (1 << ARG_REGISTER (2))
13147 | (1 << ARG_REGISTER (3));
13149 else if (size <= 4)
13150 regs_available_for_popping =
13151 (1 << ARG_REGISTER (2))
13152 | (1 << ARG_REGISTER (3));
13153 else if (size <= 8)
13154 regs_available_for_popping =
13155 (1 << ARG_REGISTER (3));
13158 /* Match registers to be popped with registers into which we pop them. */
13159 for (available = regs_available_for_popping,
13160 required = regs_to_pop;
13161 required != 0 && available != 0;
13162 available &= ~(available & - available),
13163 required &= ~(required & - required))
13166 /* If we have any popping registers left over, remove them. */
13168 regs_available_for_popping &= ~available;
13170 /* Otherwise if we need another popping register we can use
13171 the fourth argument register. */
13172 else if (pops_needed)
13174 /* If we have not found any free argument registers and
13175 reg a4 contains the return address, we must move it. */
13176 if (regs_available_for_popping == 0
13177 && reg_containing_return_addr == LAST_ARG_REGNUM)
13179 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
13180 reg_containing_return_addr = LR_REGNUM;
13182 else if (size > 12)
13184 /* Register a4 is being used to hold part of the return value,
13185 but we have dire need of a free, low register. */
13188 asm_fprintf (f, "\tmov\t%r, %r\n",IP_REGNUM, LAST_ARG_REGNUM);
13191 if (reg_containing_return_addr != LAST_ARG_REGNUM)
13193 /* The fourth argument register is available. */
13194 regs_available_for_popping |= 1 << LAST_ARG_REGNUM;
13200 /* Pop as many registers as we can. */
13201 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
13202 regs_available_for_popping);
13204 /* Process the registers we popped. */
13205 if (reg_containing_return_addr == -1)
13207 /* The return address was popped into the lowest numbered register. */
13208 regs_to_pop &= ~(1 << LR_REGNUM);
13210 reg_containing_return_addr =
13211 number_of_first_bit_set (regs_available_for_popping);
13213 /* Remove this register for the mask of available registers, so that
13214 the return address will not be corrupted by further pops. */
13215 regs_available_for_popping &= ~(1 << reg_containing_return_addr);
13218 /* If we popped other registers then handle them here. */
13219 if (regs_available_for_popping)
13223 /* Work out which register currently contains the frame pointer. */
13224 frame_pointer = number_of_first_bit_set (regs_available_for_popping);
13226 /* Move it into the correct place. */
13227 asm_fprintf (f, "\tmov\t%r, %r\n",
13228 ARM_HARD_FRAME_POINTER_REGNUM, frame_pointer);
13230 /* (Temporarily) remove it from the mask of popped registers. */
13231 regs_available_for_popping &= ~(1 << frame_pointer);
13232 regs_to_pop &= ~(1 << ARM_HARD_FRAME_POINTER_REGNUM);
13234 if (regs_available_for_popping)
13238 /* We popped the stack pointer as well,
13239 find the register that contains it. */
13240 stack_pointer = number_of_first_bit_set (regs_available_for_popping);
13242 /* Move it into the stack register. */
13243 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, stack_pointer);
13245 /* At this point we have popped all necessary registers, so
13246 do not worry about restoring regs_available_for_popping
13247 to its correct value:
13249 assert (pops_needed == 0)
13250 assert (regs_available_for_popping == (1 << frame_pointer))
13251 assert (regs_to_pop == (1 << STACK_POINTER)) */
13255 /* Since we have just move the popped value into the frame
13256 pointer, the popping register is available for reuse, and
13257 we know that we still have the stack pointer left to pop. */
13258 regs_available_for_popping |= (1 << frame_pointer);
13262 /* If we still have registers left on the stack, but we no longer have
13263 any registers into which we can pop them, then we must move the return
13264 address into the link register and make available the register that
13266 if (regs_available_for_popping == 0 && pops_needed > 0)
13268 regs_available_for_popping |= 1 << reg_containing_return_addr;
13270 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM,
13271 reg_containing_return_addr);
13273 reg_containing_return_addr = LR_REGNUM;
13276 /* If we have registers left on the stack then pop some more.
13277 We know that at most we will want to pop FP and SP. */
13278 if (pops_needed > 0)
13283 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
13284 regs_available_for_popping);
13286 /* We have popped either FP or SP.
13287 Move whichever one it is into the correct register. */
13288 popped_into = number_of_first_bit_set (regs_available_for_popping);
13289 move_to = number_of_first_bit_set (regs_to_pop);
13291 asm_fprintf (f, "\tmov\t%r, %r\n", move_to, popped_into);
13293 regs_to_pop &= ~(1 << move_to);
13298 /* If we still have not popped everything then we must have only
13299 had one register available to us and we are now popping the SP. */
13300 if (pops_needed > 0)
13304 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
13305 regs_available_for_popping);
13307 popped_into = number_of_first_bit_set (regs_available_for_popping);
13309 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, popped_into);
13311 assert (regs_to_pop == (1 << STACK_POINTER))
13312 assert (pops_needed == 1)
13316 /* If necessary restore the a4 register. */
13319 if (reg_containing_return_addr != LR_REGNUM)
13321 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
13322 reg_containing_return_addr = LR_REGNUM;
13325 asm_fprintf (f, "\tmov\t%r, %r\n", LAST_ARG_REGNUM, IP_REGNUM);
13328 if (current_function_calls_eh_return)
13329 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, ARM_EH_STACKADJ_REGNUM);
13331 /* Return to caller. */
13332 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
13337 thumb_final_prescan_insn (rtx insn)
13339 if (flag_print_asm_name)
13340 asm_fprintf (asm_out_file, "%@ 0x%04x\n",
13341 INSN_ADDRESSES (INSN_UID (insn)));
13345 thumb_shiftable_const (unsigned HOST_WIDE_INT val)
13347 unsigned HOST_WIDE_INT mask = 0xff;
13350 if (val == 0) /* XXX */
13353 for (i = 0; i < 25; i++)
13354 if ((val & (mask << i)) == val)
13360 /* Returns nonzero if the current function contains,
13361 or might contain a far jump. */
13363 thumb_far_jump_used_p (void)
13367 /* This test is only important for leaf functions. */
13368 /* assert (!leaf_function_p ()); */
13370 /* If we have already decided that far jumps may be used,
13371 do not bother checking again, and always return true even if
13372 it turns out that they are not being used. Once we have made
13373 the decision that far jumps are present (and that hence the link
13374 register will be pushed onto the stack) we cannot go back on it. */
13375 if (cfun->machine->far_jump_used)
13378 /* If this function is not being called from the prologue/epilogue
13379 generation code then it must be being called from the
13380 INITIAL_ELIMINATION_OFFSET macro. */
13381 if (!(ARM_DOUBLEWORD_ALIGN || reload_completed))
13383 /* In this case we know that we are being asked about the elimination
13384 of the arg pointer register. If that register is not being used,
13385 then there are no arguments on the stack, and we do not have to
13386 worry that a far jump might force the prologue to push the link
13387 register, changing the stack offsets. In this case we can just
13388 return false, since the presence of far jumps in the function will
13389 not affect stack offsets.
13391 If the arg pointer is live (or if it was live, but has now been
13392 eliminated and so set to dead) then we do have to test to see if
13393 the function might contain a far jump. This test can lead to some
13394 false negatives, since before reload is completed, then length of
13395 branch instructions is not known, so gcc defaults to returning their
13396 longest length, which in turn sets the far jump attribute to true.
13398 A false negative will not result in bad code being generated, but it
13399 will result in a needless push and pop of the link register. We
13400 hope that this does not occur too often.
13402 If we need doubleword stack alignment this could affect the other
13403 elimination offsets so we can't risk getting it wrong. */
13404 if (regs_ever_live [ARG_POINTER_REGNUM])
13405 cfun->machine->arg_pointer_live = 1;
13406 else if (!cfun->machine->arg_pointer_live)
13410 /* Check to see if the function contains a branch
13411 insn with the far jump attribute set. */
13412 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
13414 if (GET_CODE (insn) == JUMP_INSN
13415 /* Ignore tablejump patterns. */
13416 && GET_CODE (PATTERN (insn)) != ADDR_VEC
13417 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC
13418 && get_attr_far_jump (insn) == FAR_JUMP_YES
13421 /* Record the fact that we have decided that
13422 the function does use far jumps. */
13423 cfun->machine->far_jump_used = 1;
13431 /* Return nonzero if FUNC must be entered in ARM mode. */
13433 is_called_in_ARM_mode (tree func)
13435 gcc_assert (TREE_CODE (func) == FUNCTION_DECL);
13437 /* Ignore the problem about functions whose address is taken. */
13438 if (TARGET_CALLEE_INTERWORKING && TREE_PUBLIC (func))
13442 return lookup_attribute ("interfacearm", DECL_ATTRIBUTES (func)) != NULL_TREE;
13448 /* The bits which aren't usefully expanded as rtl. */
13450 thumb_unexpanded_epilogue (void)
13453 unsigned long live_regs_mask = 0;
13454 int high_regs_pushed = 0;
13455 int had_to_push_lr;
13458 if (return_used_this_function)
13461 if (IS_NAKED (arm_current_func_type ()))
13464 live_regs_mask = thumb_compute_save_reg_mask ();
13465 high_regs_pushed = bit_count (live_regs_mask & 0x0f00);
13467 /* If we can deduce the registers used from the function's return value.
13468 This is more reliable that examining regs_ever_live[] because that
13469 will be set if the register is ever used in the function, not just if
13470 the register is used to hold a return value. */
13471 size = arm_size_return_regs ();
13473 /* The prolog may have pushed some high registers to use as
13474 work registers. e.g. the testsuite file:
13475 gcc/testsuite/gcc/gcc.c-torture/execute/complex-2.c
13476 compiles to produce:
13477 push {r4, r5, r6, r7, lr}
13481 as part of the prolog. We have to undo that pushing here. */
13483 if (high_regs_pushed)
13485 unsigned long mask = live_regs_mask & 0xff;
13488 /* The available low registers depend on the size of the value we are
13496 /* Oh dear! We have no low registers into which we can pop
13499 ("no low registers available for popping high registers");
13501 for (next_hi_reg = 8; next_hi_reg < 13; next_hi_reg++)
13502 if (live_regs_mask & (1 << next_hi_reg))
13505 while (high_regs_pushed)
13507 /* Find lo register(s) into which the high register(s) can
13509 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
13511 if (mask & (1 << regno))
13512 high_regs_pushed--;
13513 if (high_regs_pushed == 0)
13517 mask &= (2 << regno) - 1; /* A noop if regno == 8 */
13519 /* Pop the values into the low register(s). */
13520 thumb_pushpop (asm_out_file, mask, 0, NULL, mask);
13522 /* Move the value(s) into the high registers. */
13523 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
13525 if (mask & (1 << regno))
13527 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", next_hi_reg,
13530 for (next_hi_reg++; next_hi_reg < 13; next_hi_reg++)
13531 if (live_regs_mask & (1 << next_hi_reg))
13536 live_regs_mask &= ~0x0f00;
13539 had_to_push_lr = (live_regs_mask & (1 << LR_REGNUM)) != 0;
13540 live_regs_mask &= 0xff;
13542 if (current_function_pretend_args_size == 0 || TARGET_BACKTRACE)
13544 /* Pop the return address into the PC. */
13545 if (had_to_push_lr)
13546 live_regs_mask |= 1 << PC_REGNUM;
13548 /* Either no argument registers were pushed or a backtrace
13549 structure was created which includes an adjusted stack
13550 pointer, so just pop everything. */
13551 if (live_regs_mask)
13552 thumb_pushpop (asm_out_file, live_regs_mask, FALSE, NULL,
13555 /* We have either just popped the return address into the
13556 PC or it is was kept in LR for the entire function. */
13557 if (!had_to_push_lr)
13558 thumb_exit (asm_out_file, LR_REGNUM);
13562 /* Pop everything but the return address. */
13563 if (live_regs_mask)
13564 thumb_pushpop (asm_out_file, live_regs_mask, FALSE, NULL,
13567 if (had_to_push_lr)
13571 /* We have no free low regs, so save one. */
13572 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", IP_REGNUM,
13576 /* Get the return address into a temporary register. */
13577 thumb_pushpop (asm_out_file, 1 << LAST_ARG_REGNUM, 0, NULL,
13578 1 << LAST_ARG_REGNUM);
13582 /* Move the return address to lr. */
13583 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", LR_REGNUM,
13585 /* Restore the low register. */
13586 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", LAST_ARG_REGNUM,
13591 regno = LAST_ARG_REGNUM;
13596 /* Remove the argument registers that were pushed onto the stack. */
13597 asm_fprintf (asm_out_file, "\tadd\t%r, %r, #%d\n",
13598 SP_REGNUM, SP_REGNUM,
13599 current_function_pretend_args_size);
13601 thumb_exit (asm_out_file, regno);
13607 /* Functions to save and restore machine-specific function data. */
13608 static struct machine_function *
13609 arm_init_machine_status (void)
13611 struct machine_function *machine;
13612 machine = (machine_function *) ggc_alloc_cleared (sizeof (machine_function));
13614 #if ARM_FT_UNKNOWN != 0
13615 machine->func_type = ARM_FT_UNKNOWN;
13620 /* Return an RTX indicating where the return address to the
13621 calling function can be found. */
13623 arm_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
13628 return get_hard_reg_initial_val (Pmode, LR_REGNUM);
13631 /* Do anything needed before RTL is emitted for each function. */
13633 arm_init_expanders (void)
13635 /* Arrange to initialize and mark the machine per-function status. */
13636 init_machine_status = arm_init_machine_status;
13638 /* This is to stop the combine pass optimizing away the alignment
13639 adjustment of va_arg. */
13640 /* ??? It is claimed that this should not be necessary. */
13642 mark_reg_pointer (arg_pointer_rtx, PARM_BOUNDARY);
13646 /* Like arm_compute_initial_elimination offset. Simpler because there
13647 isn't an ABI specified frame pointer for Thumb. Instead, we set it
13648 to point at the base of the local variables after static stack
13649 space for a function has been allocated. */
13652 thumb_compute_initial_elimination_offset (unsigned int from, unsigned int to)
13654 arm_stack_offsets *offsets;
13656 offsets = arm_get_frame_offsets ();
13660 case ARG_POINTER_REGNUM:
13663 case STACK_POINTER_REGNUM:
13664 return offsets->outgoing_args - offsets->saved_args;
13666 case FRAME_POINTER_REGNUM:
13667 return offsets->soft_frame - offsets->saved_args;
13669 case ARM_HARD_FRAME_POINTER_REGNUM:
13670 return offsets->saved_regs - offsets->saved_args;
13672 case THUMB_HARD_FRAME_POINTER_REGNUM:
13673 return offsets->locals_base - offsets->saved_args;
13676 gcc_unreachable ();
13680 case FRAME_POINTER_REGNUM:
13683 case STACK_POINTER_REGNUM:
13684 return offsets->outgoing_args - offsets->soft_frame;
13686 case ARM_HARD_FRAME_POINTER_REGNUM:
13687 return offsets->saved_regs - offsets->soft_frame;
13689 case THUMB_HARD_FRAME_POINTER_REGNUM:
13690 return offsets->locals_base - offsets->soft_frame;
13693 gcc_unreachable ();
13698 gcc_unreachable ();
13703 /* Generate the rest of a function's prologue. */
13705 thumb_expand_prologue (void)
13709 HOST_WIDE_INT amount;
13710 arm_stack_offsets *offsets;
13711 unsigned long func_type;
13713 unsigned long live_regs_mask;
13715 func_type = arm_current_func_type ();
13717 /* Naked functions don't have prologues. */
13718 if (IS_NAKED (func_type))
13721 if (IS_INTERRUPT (func_type))
13723 error ("interrupt Service Routines cannot be coded in Thumb mode");
13727 live_regs_mask = thumb_compute_save_reg_mask ();
13728 /* Load the pic register before setting the frame pointer,
13729 so we can use r7 as a temporary work register. */
13730 if (flag_pic && arm_pic_register != INVALID_REGNUM)
13731 arm_load_pic_register (live_regs_mask);
13733 if (!frame_pointer_needed && CALLER_INTERWORKING_SLOT_SIZE > 0)
13734 emit_move_insn (gen_rtx_REG (Pmode, ARM_HARD_FRAME_POINTER_REGNUM),
13735 stack_pointer_rtx);
13737 offsets = arm_get_frame_offsets ();
13738 amount = offsets->outgoing_args - offsets->saved_regs;
13743 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
13744 GEN_INT (- amount)));
13745 RTX_FRAME_RELATED_P (insn) = 1;
13751 /* The stack decrement is too big for an immediate value in a single
13752 insn. In theory we could issue multiple subtracts, but after
13753 three of them it becomes more space efficient to place the full
13754 value in the constant pool and load into a register. (Also the
13755 ARM debugger really likes to see only one stack decrement per
13756 function). So instead we look for a scratch register into which
13757 we can load the decrement, and then we subtract this from the
13758 stack pointer. Unfortunately on the thumb the only available
13759 scratch registers are the argument registers, and we cannot use
13760 these as they may hold arguments to the function. Instead we
13761 attempt to locate a call preserved register which is used by this
13762 function. If we can find one, then we know that it will have
13763 been pushed at the start of the prologue and so we can corrupt
13765 for (regno = LAST_ARG_REGNUM + 1; regno <= LAST_LO_REGNUM; regno++)
13766 if (live_regs_mask & (1 << regno)
13767 && !(frame_pointer_needed
13768 && (regno == THUMB_HARD_FRAME_POINTER_REGNUM)))
13771 if (regno > LAST_LO_REGNUM) /* Very unlikely. */
13773 rtx spare = gen_rtx_REG (SImode, IP_REGNUM);
13775 /* Choose an arbitrary, non-argument low register. */
13776 reg = gen_rtx_REG (SImode, LAST_LO_REGNUM);
13778 /* Save it by copying it into a high, scratch register. */
13779 emit_insn (gen_movsi (spare, reg));
13780 /* Add a USE to stop propagate_one_insn() from barfing. */
13781 emit_insn (gen_prologue_use (spare));
13783 /* Decrement the stack. */
13784 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
13785 insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
13786 stack_pointer_rtx, reg));
13787 RTX_FRAME_RELATED_P (insn) = 1;
13788 dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
13789 plus_constant (stack_pointer_rtx,
13791 RTX_FRAME_RELATED_P (dwarf) = 1;
13793 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
13796 /* Restore the low register's original value. */
13797 emit_insn (gen_movsi (reg, spare));
13799 /* Emit a USE of the restored scratch register, so that flow
13800 analysis will not consider the restore redundant. The
13801 register won't be used again in this function and isn't
13802 restored by the epilogue. */
13803 emit_insn (gen_prologue_use (reg));
13807 reg = gen_rtx_REG (SImode, regno);
13809 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
13811 insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
13812 stack_pointer_rtx, reg));
13813 RTX_FRAME_RELATED_P (insn) = 1;
13814 dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
13815 plus_constant (stack_pointer_rtx,
13817 RTX_FRAME_RELATED_P (dwarf) = 1;
13819 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
13825 if (frame_pointer_needed)
13827 amount = offsets->outgoing_args - offsets->locals_base;
13830 insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx,
13831 stack_pointer_rtx, GEN_INT (amount)));
13834 emit_insn (gen_movsi (hard_frame_pointer_rtx, GEN_INT (amount)));
13835 insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx,
13836 hard_frame_pointer_rtx,
13837 stack_pointer_rtx));
13838 dwarf = gen_rtx_SET (VOIDmode, hard_frame_pointer_rtx,
13839 plus_constant (stack_pointer_rtx, amount));
13840 RTX_FRAME_RELATED_P (dwarf) = 1;
13841 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
13845 RTX_FRAME_RELATED_P (insn) = 1;
13848 /* If we are profiling, make sure no instructions are scheduled before
13849 the call to mcount. Similarly if the user has requested no
13850 scheduling in the prolog. Similarly if we want non-call exceptions
13851 using the EABI unwinder, to prevent faulting instructions from being
13852 swapped with a stack adjustment. */
13853 if (current_function_profile || !TARGET_SCHED_PROLOG
13854 || (ARM_EABI_UNWIND_TABLES && flag_non_call_exceptions))
13855 emit_insn (gen_blockage ());
13857 cfun->machine->lr_save_eliminated = !thumb_force_lr_save ();
13858 if (live_regs_mask & 0xff)
13859 cfun->machine->lr_save_eliminated = 0;
13861 /* If the link register is being kept alive, with the return address in it,
13862 then make sure that it does not get reused by the ce2 pass. */
13863 if (cfun->machine->lr_save_eliminated)
13864 emit_insn (gen_prologue_use (gen_rtx_REG (SImode, LR_REGNUM)));
13869 thumb_expand_epilogue (void)
13871 HOST_WIDE_INT amount;
13872 arm_stack_offsets *offsets;
13875 /* Naked functions don't have prologues. */
13876 if (IS_NAKED (arm_current_func_type ()))
13879 offsets = arm_get_frame_offsets ();
13880 amount = offsets->outgoing_args - offsets->saved_regs;
13882 if (frame_pointer_needed)
13884 emit_insn (gen_movsi (stack_pointer_rtx, hard_frame_pointer_rtx));
13885 amount = offsets->locals_base - offsets->saved_regs;
13888 gcc_assert (amount >= 0);
13892 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
13893 GEN_INT (amount)));
13896 /* r3 is always free in the epilogue. */
13897 rtx reg = gen_rtx_REG (SImode, LAST_ARG_REGNUM);
13899 emit_insn (gen_movsi (reg, GEN_INT (amount)));
13900 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, reg));
13904 /* Emit a USE (stack_pointer_rtx), so that
13905 the stack adjustment will not be deleted. */
13906 emit_insn (gen_prologue_use (stack_pointer_rtx));
13908 if (current_function_profile || !TARGET_SCHED_PROLOG)
13909 emit_insn (gen_blockage ());
13911 /* Emit a clobber for each insn that will be restored in the epilogue,
13912 so that flow2 will get register lifetimes correct. */
13913 for (regno = 0; regno < 13; regno++)
13914 if (regs_ever_live[regno] && !call_used_regs[regno])
13915 emit_insn (gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, regno)));
13917 if (! regs_ever_live[LR_REGNUM])
13918 emit_insn (gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, LR_REGNUM)));
13922 thumb_output_function_prologue (FILE *f, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
13924 unsigned long live_regs_mask = 0;
13925 unsigned long l_mask;
13926 unsigned high_regs_pushed = 0;
13927 int cfa_offset = 0;
13930 if (IS_NAKED (arm_current_func_type ()))
13933 if (is_called_in_ARM_mode (current_function_decl))
13937 gcc_assert (GET_CODE (DECL_RTL (current_function_decl)) == MEM);
13938 gcc_assert (GET_CODE (XEXP (DECL_RTL (current_function_decl), 0))
13940 name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
13942 /* Generate code sequence to switch us into Thumb mode. */
13943 /* The .code 32 directive has already been emitted by
13944 ASM_DECLARE_FUNCTION_NAME. */
13945 asm_fprintf (f, "\torr\t%r, %r, #1\n", IP_REGNUM, PC_REGNUM);
13946 asm_fprintf (f, "\tbx\t%r\n", IP_REGNUM);
13948 /* Generate a label, so that the debugger will notice the
13949 change in instruction sets. This label is also used by
13950 the assembler to bypass the ARM code when this function
13951 is called from a Thumb encoded function elsewhere in the
13952 same file. Hence the definition of STUB_NAME here must
13953 agree with the definition in gas/config/tc-arm.c. */
13955 #define STUB_NAME ".real_start_of"
13957 fprintf (f, "\t.code\t16\n");
13959 if (arm_dllexport_name_p (name))
13960 name = arm_strip_name_encoding (name);
13962 asm_fprintf (f, "\t.globl %s%U%s\n", STUB_NAME, name);
13963 fprintf (f, "\t.thumb_func\n");
13964 asm_fprintf (f, "%s%U%s:\n", STUB_NAME, name);
13967 if (current_function_pretend_args_size)
13969 /* Output unwind directive for the stack adjustment. */
13970 if (ARM_EABI_UNWIND_TABLES)
13971 fprintf (f, "\t.pad #%d\n",
13972 current_function_pretend_args_size);
13974 if (cfun->machine->uses_anonymous_args)
13978 fprintf (f, "\tpush\t{");
13980 num_pushes = ARM_NUM_INTS (current_function_pretend_args_size);
13982 for (regno = LAST_ARG_REGNUM + 1 - num_pushes;
13983 regno <= LAST_ARG_REGNUM;
13985 asm_fprintf (f, "%r%s", regno,
13986 regno == LAST_ARG_REGNUM ? "" : ", ");
13988 fprintf (f, "}\n");
13991 asm_fprintf (f, "\tsub\t%r, %r, #%d\n",
13992 SP_REGNUM, SP_REGNUM,
13993 current_function_pretend_args_size);
13995 /* We don't need to record the stores for unwinding (would it
13996 help the debugger any if we did?), but record the change in
13997 the stack pointer. */
13998 if (dwarf2out_do_frame ())
14000 char *l = dwarf2out_cfi_label ();
14002 cfa_offset = cfa_offset + current_function_pretend_args_size;
14003 dwarf2out_def_cfa (l, SP_REGNUM, cfa_offset);
14007 /* Get the registers we are going to push. */
14008 live_regs_mask = thumb_compute_save_reg_mask ();
14009 /* Extract a mask of the ones we can give to the Thumb's push instruction. */
14010 l_mask = live_regs_mask & 0x40ff;
14011 /* Then count how many other high registers will need to be pushed. */
14012 high_regs_pushed = bit_count (live_regs_mask & 0x0f00);
14014 if (TARGET_BACKTRACE)
14017 unsigned work_register;
14019 /* We have been asked to create a stack backtrace structure.
14020 The code looks like this:
14024 0 sub SP, #16 Reserve space for 4 registers.
14025 2 push {R7} Push low registers.
14026 4 add R7, SP, #20 Get the stack pointer before the push.
14027 6 str R7, [SP, #8] Store the stack pointer (before reserving the space).
14028 8 mov R7, PC Get hold of the start of this code plus 12.
14029 10 str R7, [SP, #16] Store it.
14030 12 mov R7, FP Get hold of the current frame pointer.
14031 14 str R7, [SP, #4] Store it.
14032 16 mov R7, LR Get hold of the current return address.
14033 18 str R7, [SP, #12] Store it.
14034 20 add R7, SP, #16 Point at the start of the backtrace structure.
14035 22 mov FP, R7 Put this value into the frame pointer. */
14037 work_register = thumb_find_work_register (live_regs_mask);
14039 if (ARM_EABI_UNWIND_TABLES)
14040 asm_fprintf (f, "\t.pad #16\n");
14043 (f, "\tsub\t%r, %r, #16\t%@ Create stack backtrace structure\n",
14044 SP_REGNUM, SP_REGNUM);
14046 if (dwarf2out_do_frame ())
14048 char *l = dwarf2out_cfi_label ();
14050 cfa_offset = cfa_offset + 16;
14051 dwarf2out_def_cfa (l, SP_REGNUM, cfa_offset);
14056 thumb_pushpop (f, l_mask, 1, &cfa_offset, l_mask);
14057 offset = bit_count (l_mask) * UNITS_PER_WORD;
14062 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
14063 offset + 16 + current_function_pretend_args_size);
14065 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
14068 /* Make sure that the instruction fetching the PC is in the right place
14069 to calculate "start of backtrace creation code + 12". */
14072 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
14073 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
14075 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
14076 ARM_HARD_FRAME_POINTER_REGNUM);
14077 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
14082 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
14083 ARM_HARD_FRAME_POINTER_REGNUM);
14084 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
14086 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
14087 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
14091 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, LR_REGNUM);
14092 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
14094 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
14096 asm_fprintf (f, "\tmov\t%r, %r\t\t%@ Backtrace structure created\n",
14097 ARM_HARD_FRAME_POINTER_REGNUM, work_register);
14099 /* Optimization: If we are not pushing any low registers but we are going
14100 to push some high registers then delay our first push. This will just
14101 be a push of LR and we can combine it with the push of the first high
14103 else if ((l_mask & 0xff) != 0
14104 || (high_regs_pushed == 0 && l_mask))
14105 thumb_pushpop (f, l_mask, 1, &cfa_offset, l_mask);
14107 if (high_regs_pushed)
14109 unsigned pushable_regs;
14110 unsigned next_hi_reg;
14112 for (next_hi_reg = 12; next_hi_reg > LAST_LO_REGNUM; next_hi_reg--)
14113 if (live_regs_mask & (1 << next_hi_reg))
14116 pushable_regs = l_mask & 0xff;
14118 if (pushable_regs == 0)
14119 pushable_regs = 1 << thumb_find_work_register (live_regs_mask);
14121 while (high_regs_pushed > 0)
14123 unsigned long real_regs_mask = 0;
14125 for (regno = LAST_LO_REGNUM; regno >= 0; regno --)
14127 if (pushable_regs & (1 << regno))
14129 asm_fprintf (f, "\tmov\t%r, %r\n", regno, next_hi_reg);
14131 high_regs_pushed --;
14132 real_regs_mask |= (1 << next_hi_reg);
14134 if (high_regs_pushed)
14136 for (next_hi_reg --; next_hi_reg > LAST_LO_REGNUM;
14138 if (live_regs_mask & (1 << next_hi_reg))
14143 pushable_regs &= ~((1 << regno) - 1);
14149 /* If we had to find a work register and we have not yet
14150 saved the LR then add it to the list of regs to push. */
14151 if (l_mask == (1 << LR_REGNUM))
14153 thumb_pushpop (f, pushable_regs | (1 << LR_REGNUM),
14155 real_regs_mask | (1 << LR_REGNUM));
14159 thumb_pushpop (f, pushable_regs, 1, &cfa_offset, real_regs_mask);
14164 /* Handle the case of a double word load into a low register from
14165 a computed memory address. The computed address may involve a
14166 register which is overwritten by the load. */
14168 thumb_load_double_from_address (rtx *operands)
14176 gcc_assert (GET_CODE (operands[0]) == REG);
14177 gcc_assert (GET_CODE (operands[1]) == MEM);
14179 /* Get the memory address. */
14180 addr = XEXP (operands[1], 0);
14182 /* Work out how the memory address is computed. */
14183 switch (GET_CODE (addr))
14186 operands[2] = adjust_address (operands[1], SImode, 4);
14188 if (REGNO (operands[0]) == REGNO (addr))
14190 output_asm_insn ("ldr\t%H0, %2", operands);
14191 output_asm_insn ("ldr\t%0, %1", operands);
14195 output_asm_insn ("ldr\t%0, %1", operands);
14196 output_asm_insn ("ldr\t%H0, %2", operands);
14201 /* Compute <address> + 4 for the high order load. */
14202 operands[2] = adjust_address (operands[1], SImode, 4);
14204 output_asm_insn ("ldr\t%0, %1", operands);
14205 output_asm_insn ("ldr\t%H0, %2", operands);
14209 arg1 = XEXP (addr, 0);
14210 arg2 = XEXP (addr, 1);
14212 if (CONSTANT_P (arg1))
14213 base = arg2, offset = arg1;
14215 base = arg1, offset = arg2;
14217 gcc_assert (GET_CODE (base) == REG);
14219 /* Catch the case of <address> = <reg> + <reg> */
14220 if (GET_CODE (offset) == REG)
14222 int reg_offset = REGNO (offset);
14223 int reg_base = REGNO (base);
14224 int reg_dest = REGNO (operands[0]);
14226 /* Add the base and offset registers together into the
14227 higher destination register. */
14228 asm_fprintf (asm_out_file, "\tadd\t%r, %r, %r",
14229 reg_dest + 1, reg_base, reg_offset);
14231 /* Load the lower destination register from the address in
14232 the higher destination register. */
14233 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #0]",
14234 reg_dest, reg_dest + 1);
14236 /* Load the higher destination register from its own address
14238 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #4]",
14239 reg_dest + 1, reg_dest + 1);
14243 /* Compute <address> + 4 for the high order load. */
14244 operands[2] = adjust_address (operands[1], SImode, 4);
14246 /* If the computed address is held in the low order register
14247 then load the high order register first, otherwise always
14248 load the low order register first. */
14249 if (REGNO (operands[0]) == REGNO (base))
14251 output_asm_insn ("ldr\t%H0, %2", operands);
14252 output_asm_insn ("ldr\t%0, %1", operands);
14256 output_asm_insn ("ldr\t%0, %1", operands);
14257 output_asm_insn ("ldr\t%H0, %2", operands);
14263 /* With no registers to worry about we can just load the value
14265 operands[2] = adjust_address (operands[1], SImode, 4);
14267 output_asm_insn ("ldr\t%H0, %2", operands);
14268 output_asm_insn ("ldr\t%0, %1", operands);
14272 gcc_unreachable ();
14279 thumb_output_move_mem_multiple (int n, rtx *operands)
14286 if (REGNO (operands[4]) > REGNO (operands[5]))
14289 operands[4] = operands[5];
14292 output_asm_insn ("ldmia\t%1!, {%4, %5}", operands);
14293 output_asm_insn ("stmia\t%0!, {%4, %5}", operands);
14297 if (REGNO (operands[4]) > REGNO (operands[5]))
14300 operands[4] = operands[5];
14303 if (REGNO (operands[5]) > REGNO (operands[6]))
14306 operands[5] = operands[6];
14309 if (REGNO (operands[4]) > REGNO (operands[5]))
14312 operands[4] = operands[5];
14316 output_asm_insn ("ldmia\t%1!, {%4, %5, %6}", operands);
14317 output_asm_insn ("stmia\t%0!, {%4, %5, %6}", operands);
14321 gcc_unreachable ();
14327 /* Output a call-via instruction for thumb state. */
14329 thumb_call_via_reg (rtx reg)
14331 int regno = REGNO (reg);
14334 gcc_assert (regno < LR_REGNUM);
14336 /* If we are in the normal text section we can use a single instance
14337 per compilation unit. If we are doing function sections, then we need
14338 an entry per section, since we can't rely on reachability. */
14339 if (in_section == text_section)
14341 thumb_call_reg_needed = 1;
14343 if (thumb_call_via_label[regno] == NULL)
14344 thumb_call_via_label[regno] = gen_label_rtx ();
14345 labelp = thumb_call_via_label + regno;
14349 if (cfun->machine->call_via[regno] == NULL)
14350 cfun->machine->call_via[regno] = gen_label_rtx ();
14351 labelp = cfun->machine->call_via + regno;
14354 output_asm_insn ("bl\t%a0", labelp);
14358 /* Routines for generating rtl. */
14360 thumb_expand_movmemqi (rtx *operands)
14362 rtx out = copy_to_mode_reg (SImode, XEXP (operands[0], 0));
14363 rtx in = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
14364 HOST_WIDE_INT len = INTVAL (operands[2]);
14365 HOST_WIDE_INT offset = 0;
14369 emit_insn (gen_movmem12b (out, in, out, in));
14375 emit_insn (gen_movmem8b (out, in, out, in));
14381 rtx reg = gen_reg_rtx (SImode);
14382 emit_insn (gen_movsi (reg, gen_rtx_MEM (SImode, in)));
14383 emit_insn (gen_movsi (gen_rtx_MEM (SImode, out), reg));
14390 rtx reg = gen_reg_rtx (HImode);
14391 emit_insn (gen_movhi (reg, gen_rtx_MEM (HImode,
14392 plus_constant (in, offset))));
14393 emit_insn (gen_movhi (gen_rtx_MEM (HImode, plus_constant (out, offset)),
14401 rtx reg = gen_reg_rtx (QImode);
14402 emit_insn (gen_movqi (reg, gen_rtx_MEM (QImode,
14403 plus_constant (in, offset))));
14404 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (out, offset)),
14410 thumb_reload_out_hi (rtx *operands)
14412 emit_insn (gen_thumb_movhi_clobber (operands[0], operands[1], operands[2]));
14415 /* Handle reading a half-word from memory during reload. */
14417 thumb_reload_in_hi (rtx *operands ATTRIBUTE_UNUSED)
14419 gcc_unreachable ();
14422 /* Return the length of a function name prefix
14423 that starts with the character 'c'. */
14425 arm_get_strip_length (int c)
14429 ARM_NAME_ENCODING_LENGTHS
14434 /* Return a pointer to a function's name with any
14435 and all prefix encodings stripped from it. */
14437 arm_strip_name_encoding (const char *name)
14441 while ((skip = arm_get_strip_length (* name)))
14447 /* If there is a '*' anywhere in the name's prefix, then
14448 emit the stripped name verbatim, otherwise prepend an
14449 underscore if leading underscores are being used. */
14451 arm_asm_output_labelref (FILE *stream, const char *name)
14456 while ((skip = arm_get_strip_length (* name)))
14458 verbatim |= (*name == '*');
14463 fputs (name, stream);
14465 asm_fprintf (stream, "%U%s", name);
14469 arm_file_start (void)
14475 const char *fpu_name;
14476 if (arm_select[0].string)
14477 asm_fprintf (asm_out_file, "\t.cpu %s\n", arm_select[0].string);
14478 else if (arm_select[1].string)
14479 asm_fprintf (asm_out_file, "\t.arch %s\n", arm_select[1].string);
14481 asm_fprintf (asm_out_file, "\t.cpu %s\n",
14482 all_cores[arm_default_cpu].name);
14484 if (TARGET_SOFT_FLOAT)
14487 fpu_name = "softvfp";
14489 fpu_name = "softfpa";
14493 switch (arm_fpu_arch)
14498 case FPUTYPE_FPA_EMU2:
14501 case FPUTYPE_FPA_EMU3:
14504 case FPUTYPE_MAVERICK:
14505 fpu_name = "maverick";
14508 if (TARGET_HARD_FLOAT)
14509 asm_fprintf (asm_out_file, "\t.eabi_attribute 27, 3\n");
14510 if (TARGET_HARD_FLOAT_ABI)
14511 asm_fprintf (asm_out_file, "\t.eabi_attribute 28, 1\n");
14518 asm_fprintf (asm_out_file, "\t.fpu %s\n", fpu_name);
14520 /* Some of these attributes only apply when the corresponding features
14521 are used. However we don't have any easy way of figuring this out.
14522 Conservatively record the setting that would have been used. */
14524 /* Tag_ABI_PCS_wchar_t. */
14525 asm_fprintf (asm_out_file, "\t.eabi_attribute 18, %d\n",
14526 (int)WCHAR_TYPE_SIZE / BITS_PER_UNIT);
14528 /* Tag_ABI_FP_rounding. */
14529 if (flag_rounding_math)
14530 asm_fprintf (asm_out_file, "\t.eabi_attribute 19, 1\n");
14531 if (!flag_unsafe_math_optimizations)
14533 /* Tag_ABI_FP_denomal. */
14534 asm_fprintf (asm_out_file, "\t.eabi_attribute 20, 1\n");
14535 /* Tag_ABI_FP_exceptions. */
14536 asm_fprintf (asm_out_file, "\t.eabi_attribute 21, 1\n");
14538 /* Tag_ABI_FP_user_exceptions. */
14539 if (flag_signaling_nans)
14540 asm_fprintf (asm_out_file, "\t.eabi_attribute 22, 1\n");
14541 /* Tag_ABI_FP_number_model. */
14542 asm_fprintf (asm_out_file, "\t.eabi_attribute 23, %d\n",
14543 flag_finite_math_only ? 1 : 3);
14545 /* Tag_ABI_align8_needed. */
14546 asm_fprintf (asm_out_file, "\t.eabi_attribute 24, 1\n");
14547 /* Tag_ABI_align8_preserved. */
14548 asm_fprintf (asm_out_file, "\t.eabi_attribute 25, 1\n");
14549 /* Tag_ABI_enum_size. */
14550 asm_fprintf (asm_out_file, "\t.eabi_attribute 26, %d\n",
14551 flag_short_enums ? 1 : 2);
14553 /* Tag_ABI_optimization_goals. */
14556 else if (optimize >= 2)
14562 asm_fprintf (asm_out_file, "\t.eabi_attribute 30, %d\n", val);
14564 default_file_start();
14568 arm_file_end (void)
14572 if (! thumb_call_reg_needed)
14575 switch_to_section (text_section);
14576 asm_fprintf (asm_out_file, "\t.code 16\n");
14577 ASM_OUTPUT_ALIGN (asm_out_file, 1);
14579 for (regno = 0; regno < LR_REGNUM; regno++)
14581 rtx label = thumb_call_via_label[regno];
14585 targetm.asm_out.internal_label (asm_out_file, "L",
14586 CODE_LABEL_NUMBER (label));
14587 asm_fprintf (asm_out_file, "\tbx\t%r\n", regno);
14594 #ifdef AOF_ASSEMBLER
14595 /* Special functions only needed when producing AOF syntax assembler. */
14599 struct pic_chain * next;
14600 const char * symname;
14603 static struct pic_chain * aof_pic_chain = NULL;
14606 aof_pic_entry (rtx x)
14608 struct pic_chain ** chainp;
14611 if (aof_pic_label == NULL_RTX)
14613 aof_pic_label = gen_rtx_SYMBOL_REF (Pmode, "x$adcons");
14616 for (offset = 0, chainp = &aof_pic_chain; *chainp;
14617 offset += 4, chainp = &(*chainp)->next)
14618 if ((*chainp)->symname == XSTR (x, 0))
14619 return plus_constant (aof_pic_label, offset);
14621 *chainp = (struct pic_chain *) xmalloc (sizeof (struct pic_chain));
14622 (*chainp)->next = NULL;
14623 (*chainp)->symname = XSTR (x, 0);
14624 return plus_constant (aof_pic_label, offset);
14628 aof_dump_pic_table (FILE *f)
14630 struct pic_chain * chain;
14632 if (aof_pic_chain == NULL)
14635 asm_fprintf (f, "\tAREA |%r$$adcons|, BASED %r\n",
14636 PIC_OFFSET_TABLE_REGNUM,
14637 PIC_OFFSET_TABLE_REGNUM);
14638 fputs ("|x$adcons|\n", f);
14640 for (chain = aof_pic_chain; chain; chain = chain->next)
14642 fputs ("\tDCD\t", f);
14643 assemble_name (f, chain->symname);
14648 int arm_text_section_count = 1;
14650 /* A get_unnamed_section callback for switching to the text section. */
14653 aof_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED)
14655 fprintf (asm_out_file, "\tAREA |C$$code%d|, CODE, READONLY",
14656 arm_text_section_count++);
14658 fprintf (asm_out_file, ", PIC, REENTRANT");
14659 fprintf (asm_out_file, "\n");
14662 static int arm_data_section_count = 1;
14664 /* A get_unnamed_section callback for switching to the data section. */
14667 aof_output_data_section_asm_op (const void *data ATTRIBUTE_UNUSED)
14669 fprintf (asm_out_file, "\tAREA |C$$data%d|, DATA\n",
14670 arm_data_section_count++);
14673 /* Implement TARGET_ASM_INIT_SECTIONS.
14675 AOF Assembler syntax is a nightmare when it comes to areas, since once
14676 we change from one area to another, we can't go back again. Instead,
14677 we must create a new area with the same attributes and add the new output
14678 to that. Unfortunately, there is nothing we can do here to guarantee that
14679 two areas with the same attributes will be linked adjacently in the
14680 resulting executable, so we have to be careful not to do pc-relative
14681 addressing across such boundaries. */
14684 aof_asm_init_sections (void)
14686 text_section = get_unnamed_section (SECTION_CODE,
14687 aof_output_text_section_asm_op, NULL);
14688 data_section = get_unnamed_section (SECTION_WRITE,
14689 aof_output_data_section_asm_op, NULL);
14690 readonly_data_section = text_section;
14694 zero_init_section (void)
14696 static int zero_init_count = 1;
14698 fprintf (asm_out_file, "\tAREA |C$$zidata%d|,NOINIT\n", zero_init_count++);
14702 /* The AOF assembler is religiously strict about declarations of
14703 imported and exported symbols, so that it is impossible to declare
14704 a function as imported near the beginning of the file, and then to
14705 export it later on. It is, however, possible to delay the decision
14706 until all the functions in the file have been compiled. To get
14707 around this, we maintain a list of the imports and exports, and
14708 delete from it any that are subsequently defined. At the end of
14709 compilation we spit the remainder of the list out before the END
14714 struct import * next;
14718 static struct import * imports_list = NULL;
14721 aof_add_import (const char *name)
14723 struct import * new;
14725 for (new = imports_list; new; new = new->next)
14726 if (new->name == name)
14729 new = (struct import *) xmalloc (sizeof (struct import));
14730 new->next = imports_list;
14731 imports_list = new;
14736 aof_delete_import (const char *name)
14738 struct import ** old;
14740 for (old = &imports_list; *old; old = & (*old)->next)
14742 if ((*old)->name == name)
14744 *old = (*old)->next;
14750 int arm_main_function = 0;
14753 aof_dump_imports (FILE *f)
14755 /* The AOF assembler needs this to cause the startup code to be extracted
14756 from the library. Brining in __main causes the whole thing to work
14758 if (arm_main_function)
14760 switch_to_section (text_section);
14761 fputs ("\tIMPORT __main\n", f);
14762 fputs ("\tDCD __main\n", f);
14765 /* Now dump the remaining imports. */
14766 while (imports_list)
14768 fprintf (f, "\tIMPORT\t");
14769 assemble_name (f, imports_list->name);
14771 imports_list = imports_list->next;
14776 aof_globalize_label (FILE *stream, const char *name)
14778 default_globalize_label (stream, name);
14779 if (! strcmp (name, "main"))
14780 arm_main_function = 1;
14784 aof_file_start (void)
14786 fputs ("__r0\tRN\t0\n", asm_out_file);
14787 fputs ("__a1\tRN\t0\n", asm_out_file);
14788 fputs ("__a2\tRN\t1\n", asm_out_file);
14789 fputs ("__a3\tRN\t2\n", asm_out_file);
14790 fputs ("__a4\tRN\t3\n", asm_out_file);
14791 fputs ("__v1\tRN\t4\n", asm_out_file);
14792 fputs ("__v2\tRN\t5\n", asm_out_file);
14793 fputs ("__v3\tRN\t6\n", asm_out_file);
14794 fputs ("__v4\tRN\t7\n", asm_out_file);
14795 fputs ("__v5\tRN\t8\n", asm_out_file);
14796 fputs ("__v6\tRN\t9\n", asm_out_file);
14797 fputs ("__sl\tRN\t10\n", asm_out_file);
14798 fputs ("__fp\tRN\t11\n", asm_out_file);
14799 fputs ("__ip\tRN\t12\n", asm_out_file);
14800 fputs ("__sp\tRN\t13\n", asm_out_file);
14801 fputs ("__lr\tRN\t14\n", asm_out_file);
14802 fputs ("__pc\tRN\t15\n", asm_out_file);
14803 fputs ("__f0\tFN\t0\n", asm_out_file);
14804 fputs ("__f1\tFN\t1\n", asm_out_file);
14805 fputs ("__f2\tFN\t2\n", asm_out_file);
14806 fputs ("__f3\tFN\t3\n", asm_out_file);
14807 fputs ("__f4\tFN\t4\n", asm_out_file);
14808 fputs ("__f5\tFN\t5\n", asm_out_file);
14809 fputs ("__f6\tFN\t6\n", asm_out_file);
14810 fputs ("__f7\tFN\t7\n", asm_out_file);
14811 switch_to_section (text_section);
14815 aof_file_end (void)
14818 aof_dump_pic_table (asm_out_file);
14820 aof_dump_imports (asm_out_file);
14821 fputs ("\tEND\n", asm_out_file);
14823 #endif /* AOF_ASSEMBLER */
14826 /* Symbols in the text segment can be accessed without indirecting via the
14827 constant pool; it may take an extra binary operation, but this is still
14828 faster than indirecting via memory. Don't do this when not optimizing,
14829 since we won't be calculating al of the offsets necessary to do this
14833 arm_encode_section_info (tree decl, rtx rtl, int first)
14835 /* This doesn't work with AOF syntax, since the string table may be in
14836 a different AREA. */
14837 #ifndef AOF_ASSEMBLER
14838 if (optimize > 0 && TREE_CONSTANT (decl))
14839 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
14842 /* If we are referencing a function that is weak then encode a long call
14843 flag in the function name, otherwise if the function is static or
14844 or known to be defined in this file then encode a short call flag. */
14845 if (first && DECL_P (decl))
14847 if (TREE_CODE (decl) == FUNCTION_DECL && DECL_WEAK (decl))
14848 arm_encode_call_attribute (decl, LONG_CALL_FLAG_CHAR);
14849 else if (! TREE_PUBLIC (decl))
14850 arm_encode_call_attribute (decl, SHORT_CALL_FLAG_CHAR);
14853 default_encode_section_info (decl, rtl, first);
14855 #endif /* !ARM_PE */
14858 arm_internal_label (FILE *stream, const char *prefix, unsigned long labelno)
14860 if (arm_ccfsm_state == 3 && (unsigned) arm_target_label == labelno
14861 && !strcmp (prefix, "L"))
14863 arm_ccfsm_state = 0;
14864 arm_target_insn = NULL;
14866 default_internal_label (stream, prefix, labelno);
14869 /* Output code to add DELTA to the first argument, and then jump
14870 to FUNCTION. Used for C++ multiple inheritance. */
14872 arm_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
14873 HOST_WIDE_INT delta,
14874 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
14877 static int thunk_label = 0;
14880 int mi_delta = delta;
14881 const char *const mi_op = mi_delta < 0 ? "sub" : "add";
14883 int this_regno = (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function)
14886 mi_delta = - mi_delta;
14889 int labelno = thunk_label++;
14890 ASM_GENERATE_INTERNAL_LABEL (label, "LTHUMBFUNC", labelno);
14891 fputs ("\tldr\tr12, ", file);
14892 assemble_name (file, label);
14893 fputc ('\n', file);
14896 /* If we are generating PIC, the ldr instruction below loads
14897 "(target - 7) - .LTHUNKPCn" into r12. The pc reads as
14898 the address of the add + 8, so we have:
14900 r12 = (target - 7) - .LTHUNKPCn + (.LTHUNKPCn + 8)
14903 Note that we have "+ 1" because some versions of GNU ld
14904 don't set the low bit of the result for R_ARM_REL32
14905 relocations against thumb function symbols. */
14906 ASM_GENERATE_INTERNAL_LABEL (labelpc, "LTHUNKPC", labelno);
14907 assemble_name (file, labelpc);
14908 fputs (":\n", file);
14909 fputs ("\tadd\tr12, pc, r12\n", file);
14912 while (mi_delta != 0)
14914 if ((mi_delta & (3 << shift)) == 0)
14918 asm_fprintf (file, "\t%s\t%r, %r, #%d\n",
14919 mi_op, this_regno, this_regno,
14920 mi_delta & (0xff << shift));
14921 mi_delta &= ~(0xff << shift);
14927 fprintf (file, "\tbx\tr12\n");
14928 ASM_OUTPUT_ALIGN (file, 2);
14929 assemble_name (file, label);
14930 fputs (":\n", file);
14933 /* Output ".word .LTHUNKn-7-.LTHUNKPCn". */
14934 rtx tem = XEXP (DECL_RTL (function), 0);
14935 tem = gen_rtx_PLUS (GET_MODE (tem), tem, GEN_INT (-7));
14936 tem = gen_rtx_MINUS (GET_MODE (tem),
14938 gen_rtx_SYMBOL_REF (Pmode,
14939 ggc_strdup (labelpc)));
14940 assemble_integer (tem, 4, BITS_PER_WORD, 1);
14943 /* Output ".word .LTHUNKn". */
14944 assemble_integer (XEXP (DECL_RTL (function), 0), 4, BITS_PER_WORD, 1);
14948 fputs ("\tb\t", file);
14949 assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
14950 if (NEED_PLT_RELOC)
14951 fputs ("(PLT)", file);
14952 fputc ('\n', file);
14957 arm_emit_vector_const (FILE *file, rtx x)
14960 const char * pattern;
14962 gcc_assert (GET_CODE (x) == CONST_VECTOR);
14964 switch (GET_MODE (x))
14966 case V2SImode: pattern = "%08x"; break;
14967 case V4HImode: pattern = "%04x"; break;
14968 case V8QImode: pattern = "%02x"; break;
14969 default: gcc_unreachable ();
14972 fprintf (file, "0x");
14973 for (i = CONST_VECTOR_NUNITS (x); i--;)
14977 element = CONST_VECTOR_ELT (x, i);
14978 fprintf (file, pattern, INTVAL (element));
14985 arm_output_load_gr (rtx *operands)
14992 if (GET_CODE (operands [1]) != MEM
14993 || GET_CODE (sum = XEXP (operands [1], 0)) != PLUS
14994 || GET_CODE (reg = XEXP (sum, 0)) != REG
14995 || GET_CODE (offset = XEXP (sum, 1)) != CONST_INT
14996 || ((INTVAL (offset) < 1024) && (INTVAL (offset) > -1024)))
14997 return "wldrw%?\t%0, %1";
14999 /* Fix up an out-of-range load of a GR register. */
15000 output_asm_insn ("str%?\t%0, [sp, #-4]!\t@ Start of GR load expansion", & reg);
15001 wcgr = operands[0];
15003 output_asm_insn ("ldr%?\t%0, %1", operands);
15005 operands[0] = wcgr;
15007 output_asm_insn ("tmcr%?\t%0, %1", operands);
15008 output_asm_insn ("ldr%?\t%0, [sp], #4\t@ End of GR load expansion", & reg);
15013 /* Worker function for TARGET_SETUP_INCOMING_VARARGS.
15015 On the ARM, PRETEND_SIZE is set in order to have the prologue push the last
15016 named arg and all anonymous args onto the stack.
15017 XXX I know the prologue shouldn't be pushing registers, but it is faster
15021 arm_setup_incoming_varargs (CUMULATIVE_ARGS *cum,
15022 enum machine_mode mode ATTRIBUTE_UNUSED,
15023 tree type ATTRIBUTE_UNUSED,
15025 int second_time ATTRIBUTE_UNUSED)
15027 cfun->machine->uses_anonymous_args = 1;
15028 if (cum->nregs < NUM_ARG_REGS)
15029 *pretend_size = (NUM_ARG_REGS - cum->nregs) * UNITS_PER_WORD;
15032 /* Return nonzero if the CONSUMER instruction (a store) does not need
15033 PRODUCER's value to calculate the address. */
15036 arm_no_early_store_addr_dep (rtx producer, rtx consumer)
15038 rtx value = PATTERN (producer);
15039 rtx addr = PATTERN (consumer);
15041 if (GET_CODE (value) == COND_EXEC)
15042 value = COND_EXEC_CODE (value);
15043 if (GET_CODE (value) == PARALLEL)
15044 value = XVECEXP (value, 0, 0);
15045 value = XEXP (value, 0);
15046 if (GET_CODE (addr) == COND_EXEC)
15047 addr = COND_EXEC_CODE (addr);
15048 if (GET_CODE (addr) == PARALLEL)
15049 addr = XVECEXP (addr, 0, 0);
15050 addr = XEXP (addr, 0);
15052 return !reg_overlap_mentioned_p (value, addr);
15055 /* Return nonzero if the CONSUMER instruction (an ALU op) does not
15056 have an early register shift value or amount dependency on the
15057 result of PRODUCER. */
15060 arm_no_early_alu_shift_dep (rtx producer, rtx consumer)
15062 rtx value = PATTERN (producer);
15063 rtx op = PATTERN (consumer);
15066 if (GET_CODE (value) == COND_EXEC)
15067 value = COND_EXEC_CODE (value);
15068 if (GET_CODE (value) == PARALLEL)
15069 value = XVECEXP (value, 0, 0);
15070 value = XEXP (value, 0);
15071 if (GET_CODE (op) == COND_EXEC)
15072 op = COND_EXEC_CODE (op);
15073 if (GET_CODE (op) == PARALLEL)
15074 op = XVECEXP (op, 0, 0);
15077 early_op = XEXP (op, 0);
15078 /* This is either an actual independent shift, or a shift applied to
15079 the first operand of another operation. We want the whole shift
15081 if (GET_CODE (early_op) == REG)
15084 return !reg_overlap_mentioned_p (value, early_op);
15087 /* Return nonzero if the CONSUMER instruction (an ALU op) does not
15088 have an early register shift value dependency on the result of
15092 arm_no_early_alu_shift_value_dep (rtx producer, rtx consumer)
15094 rtx value = PATTERN (producer);
15095 rtx op = PATTERN (consumer);
15098 if (GET_CODE (value) == COND_EXEC)
15099 value = COND_EXEC_CODE (value);
15100 if (GET_CODE (value) == PARALLEL)
15101 value = XVECEXP (value, 0, 0);
15102 value = XEXP (value, 0);
15103 if (GET_CODE (op) == COND_EXEC)
15104 op = COND_EXEC_CODE (op);
15105 if (GET_CODE (op) == PARALLEL)
15106 op = XVECEXP (op, 0, 0);
15109 early_op = XEXP (op, 0);
15111 /* This is either an actual independent shift, or a shift applied to
15112 the first operand of another operation. We want the value being
15113 shifted, in either case. */
15114 if (GET_CODE (early_op) != REG)
15115 early_op = XEXP (early_op, 0);
15117 return !reg_overlap_mentioned_p (value, early_op);
15120 /* Return nonzero if the CONSUMER (a mul or mac op) does not
15121 have an early register mult dependency on the result of
15125 arm_no_early_mul_dep (rtx producer, rtx consumer)
15127 rtx value = PATTERN (producer);
15128 rtx op = PATTERN (consumer);
15130 if (GET_CODE (value) == COND_EXEC)
15131 value = COND_EXEC_CODE (value);
15132 if (GET_CODE (value) == PARALLEL)
15133 value = XVECEXP (value, 0, 0);
15134 value = XEXP (value, 0);
15135 if (GET_CODE (op) == COND_EXEC)
15136 op = COND_EXEC_CODE (op);
15137 if (GET_CODE (op) == PARALLEL)
15138 op = XVECEXP (op, 0, 0);
15141 return (GET_CODE (op) == PLUS
15142 && !reg_overlap_mentioned_p (value, XEXP (op, 0)));
15146 /* We can't rely on the caller doing the proper promotion when
15147 using APCS or ATPCS. */
15150 arm_promote_prototypes (tree t ATTRIBUTE_UNUSED)
15152 return !TARGET_AAPCS_BASED;
15156 /* AAPCS based ABIs use short enums by default. */
15159 arm_default_short_enums (void)
15161 return TARGET_AAPCS_BASED && arm_abi != ARM_ABI_AAPCS_LINUX;
15165 /* AAPCS requires that anonymous bitfields affect structure alignment. */
15168 arm_align_anon_bitfield (void)
15170 return TARGET_AAPCS_BASED;
15174 /* The generic C++ ABI says 64-bit (long long). The EABI says 32-bit. */
15177 arm_cxx_guard_type (void)
15179 return TARGET_AAPCS_BASED ? integer_type_node : long_long_integer_type_node;
15183 /* The EABI says test the least significant bit of a guard variable. */
15186 arm_cxx_guard_mask_bit (void)
15188 return TARGET_AAPCS_BASED;
15192 /* The EABI specifies that all array cookies are 8 bytes long. */
15195 arm_get_cookie_size (tree type)
15199 if (!TARGET_AAPCS_BASED)
15200 return default_cxx_get_cookie_size (type);
15202 size = build_int_cst (sizetype, 8);
15207 /* The EABI says that array cookies should also contain the element size. */
15210 arm_cookie_has_size (void)
15212 return TARGET_AAPCS_BASED;
15216 /* The EABI says constructors and destructors should return a pointer to
15217 the object constructed/destroyed. */
15220 arm_cxx_cdtor_returns_this (void)
15222 return TARGET_AAPCS_BASED;
15225 /* The EABI says that an inline function may never be the key
15229 arm_cxx_key_method_may_be_inline (void)
15231 return !TARGET_AAPCS_BASED;
15235 arm_cxx_determine_class_data_visibility (tree decl)
15237 if (!TARGET_AAPCS_BASED)
15240 /* In general, \S 3.2.5.5 of the ARM EABI requires that class data
15241 is exported. However, on systems without dynamic vague linkage,
15242 \S 3.2.5.6 says that COMDAT class data has hidden linkage. */
15243 if (!TARGET_ARM_DYNAMIC_VAGUE_LINKAGE_P && DECL_COMDAT (decl))
15244 DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
15246 DECL_VISIBILITY (decl) = VISIBILITY_DEFAULT;
15247 DECL_VISIBILITY_SPECIFIED (decl) = 1;
15251 arm_cxx_class_data_always_comdat (void)
15253 /* \S 3.2.5.4 of the ARM C++ ABI says that class data only have
15254 vague linkage if the class has no key function. */
15255 return !TARGET_AAPCS_BASED;
15259 /* The EABI says __aeabi_atexit should be used to register static
15263 arm_cxx_use_aeabi_atexit (void)
15265 return TARGET_AAPCS_BASED;
15270 arm_set_return_address (rtx source, rtx scratch)
15272 arm_stack_offsets *offsets;
15273 HOST_WIDE_INT delta;
15275 unsigned long saved_regs;
15277 saved_regs = arm_compute_save_reg_mask ();
15279 if ((saved_regs & (1 << LR_REGNUM)) == 0)
15280 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);
15283 if (frame_pointer_needed)
15284 addr = plus_constant(hard_frame_pointer_rtx, -4);
15287 /* LR will be the first saved register. */
15288 offsets = arm_get_frame_offsets ();
15289 delta = offsets->outgoing_args - (offsets->frame + 4);
15294 emit_insn (gen_addsi3 (scratch, stack_pointer_rtx,
15295 GEN_INT (delta & ~4095)));
15300 addr = stack_pointer_rtx;
15302 addr = plus_constant (addr, delta);
15304 emit_move_insn (gen_frame_mem (Pmode, addr), source);
15310 thumb_set_return_address (rtx source, rtx scratch)
15312 arm_stack_offsets *offsets;
15313 HOST_WIDE_INT delta;
15316 unsigned long mask;
15318 emit_insn (gen_rtx_USE (VOIDmode, source));
15320 mask = thumb_compute_save_reg_mask ();
15321 if (mask & (1 << LR_REGNUM))
15323 offsets = arm_get_frame_offsets ();
15325 /* Find the saved regs. */
15326 if (frame_pointer_needed)
15328 delta = offsets->soft_frame - offsets->saved_args;
15329 reg = THUMB_HARD_FRAME_POINTER_REGNUM;
15333 delta = offsets->outgoing_args - offsets->saved_args;
15336 /* Allow for the stack frame. */
15337 if (TARGET_BACKTRACE)
15339 /* The link register is always the first saved register. */
15342 /* Construct the address. */
15343 addr = gen_rtx_REG (SImode, reg);
15344 if ((reg != SP_REGNUM && delta >= 128)
15347 emit_insn (gen_movsi (scratch, GEN_INT (delta)));
15348 emit_insn (gen_addsi3 (scratch, scratch, stack_pointer_rtx));
15352 addr = plus_constant (addr, delta);
15354 emit_move_insn (gen_frame_mem (Pmode, addr), source);
15357 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);
15360 /* Implements target hook vector_mode_supported_p. */
15362 arm_vector_mode_supported_p (enum machine_mode mode)
15364 if ((mode == V2SImode)
15365 || (mode == V4HImode)
15366 || (mode == V8QImode))
15372 /* Implement TARGET_SHIFT_TRUNCATION_MASK. SImode shifts use normal
15373 ARM insns and therefore guarantee that the shift count is modulo 256.
15374 DImode shifts (those implemented by lib1funcs.asm or by optabs.c)
15375 guarantee no particular behavior for out-of-range counts. */
15377 static unsigned HOST_WIDE_INT
15378 arm_shift_truncation_mask (enum machine_mode mode)
15380 return mode == SImode ? 255 : 0;
15384 /* Map internal gcc register numbers to DWARF2 register numbers. */
15387 arm_dbx_register_number (unsigned int regno)
15392 /* TODO: Legacy targets output FPA regs as registers 16-23 for backwards
15393 compatibility. The EABI defines them as registers 96-103. */
15394 if (IS_FPA_REGNUM (regno))
15395 return (TARGET_AAPCS_BASED ? 96 : 16) + regno - FIRST_FPA_REGNUM;
15397 if (IS_VFP_REGNUM (regno))
15398 return 64 + regno - FIRST_VFP_REGNUM;
15400 if (IS_IWMMXT_GR_REGNUM (regno))
15401 return 104 + regno - FIRST_IWMMXT_GR_REGNUM;
15403 if (IS_IWMMXT_REGNUM (regno))
15404 return 112 + regno - FIRST_IWMMXT_REGNUM;
15406 gcc_unreachable ();
15410 #ifdef TARGET_UNWIND_INFO
15411 /* Emit unwind directives for a store-multiple instruction. This should
15412 only ever be generated by the function prologue code, so we expect it
15413 to have a particular form. */
15416 arm_unwind_emit_stm (FILE * asm_out_file, rtx p)
15419 HOST_WIDE_INT offset;
15420 HOST_WIDE_INT nregs;
15426 /* First insn will adjust the stack pointer. */
15427 e = XVECEXP (p, 0, 0);
15428 if (GET_CODE (e) != SET
15429 || GET_CODE (XEXP (e, 0)) != REG
15430 || REGNO (XEXP (e, 0)) != SP_REGNUM
15431 || GET_CODE (XEXP (e, 1)) != PLUS)
15434 offset = -INTVAL (XEXP (XEXP (e, 1), 1));
15435 nregs = XVECLEN (p, 0) - 1;
15437 reg = REGNO (XEXP (XVECEXP (p, 0, 1), 1));
15440 /* The function prologue may also push pc, but not annotate it as it is
15441 never restored. We turn this into a stack pointer adjustment. */
15442 if (nregs * 4 == offset - 4)
15444 fprintf (asm_out_file, "\t.pad #4\n");
15449 else if (IS_VFP_REGNUM (reg))
15451 /* FPA register saves use an additional word. */
15455 else if (reg >= FIRST_FPA_REGNUM && reg <= LAST_FPA_REGNUM)
15457 /* FPA registers are done differently. */
15458 asm_fprintf (asm_out_file, "\t.save %r, %wd\n", reg, nregs);
15462 /* Unknown register type. */
15465 /* If the stack increment doesn't match the size of the saved registers,
15466 something has gone horribly wrong. */
15467 if (offset != nregs * reg_size)
15470 fprintf (asm_out_file, "\t.save {");
15474 /* The remaining insns will describe the stores. */
15475 for (i = 1; i <= nregs; i++)
15477 /* Expect (set (mem <addr>) (reg)).
15478 Where <addr> is (reg:SP) or (plus (reg:SP) (const_int)). */
15479 e = XVECEXP (p, 0, i);
15480 if (GET_CODE (e) != SET
15481 || GET_CODE (XEXP (e, 0)) != MEM
15482 || GET_CODE (XEXP (e, 1)) != REG)
15485 reg = REGNO (XEXP (e, 1));
15490 fprintf (asm_out_file, ", ");
15491 /* We can't use %r for vfp because we need to use the
15492 double precision register names. */
15493 if (IS_VFP_REGNUM (reg))
15494 asm_fprintf (asm_out_file, "d%d", (reg - FIRST_VFP_REGNUM) / 2);
15496 asm_fprintf (asm_out_file, "%r", reg);
15498 #ifdef ENABLE_CHECKING
15499 /* Check that the addresses are consecutive. */
15500 e = XEXP (XEXP (e, 0), 0);
15501 if (GET_CODE (e) == PLUS)
15503 offset += reg_size;
15504 if (GET_CODE (XEXP (e, 0)) != REG
15505 || REGNO (XEXP (e, 0)) != SP_REGNUM
15506 || GET_CODE (XEXP (e, 1)) != CONST_INT
15507 || offset != INTVAL (XEXP (e, 1)))
15511 || GET_CODE (e) != REG
15512 || REGNO (e) != SP_REGNUM)
15516 fprintf (asm_out_file, "}\n");
15519 /* Emit unwind directives for a SET. */
15522 arm_unwind_emit_set (FILE * asm_out_file, rtx p)
15529 switch (GET_CODE (e0))
15532 /* Pushing a single register. */
15533 if (GET_CODE (XEXP (e0, 0)) != PRE_DEC
15534 || GET_CODE (XEXP (XEXP (e0, 0), 0)) != REG
15535 || REGNO (XEXP (XEXP (e0, 0), 0)) != SP_REGNUM)
15538 asm_fprintf (asm_out_file, "\t.save ");
15539 if (IS_VFP_REGNUM (REGNO (e1)))
15540 asm_fprintf(asm_out_file, "{d%d}\n",
15541 (REGNO (e1) - FIRST_VFP_REGNUM) / 2);
15543 asm_fprintf(asm_out_file, "{%r}\n", REGNO (e1));
15547 if (REGNO (e0) == SP_REGNUM)
15549 /* A stack increment. */
15550 if (GET_CODE (e1) != PLUS
15551 || GET_CODE (XEXP (e1, 0)) != REG
15552 || REGNO (XEXP (e1, 0)) != SP_REGNUM
15553 || GET_CODE (XEXP (e1, 1)) != CONST_INT)
15556 asm_fprintf (asm_out_file, "\t.pad #%wd\n",
15557 -INTVAL (XEXP (e1, 1)));
15559 else if (REGNO (e0) == HARD_FRAME_POINTER_REGNUM)
15561 HOST_WIDE_INT offset;
15564 if (GET_CODE (e1) == PLUS)
15566 if (GET_CODE (XEXP (e1, 0)) != REG
15567 || GET_CODE (XEXP (e1, 1)) != CONST_INT)
15569 reg = REGNO (XEXP (e1, 0));
15570 offset = INTVAL (XEXP (e1, 1));
15571 asm_fprintf (asm_out_file, "\t.setfp %r, %r, #%wd\n",
15572 HARD_FRAME_POINTER_REGNUM, reg,
15573 INTVAL (XEXP (e1, 1)));
15575 else if (GET_CODE (e1) == REG)
15578 asm_fprintf (asm_out_file, "\t.setfp %r, %r\n",
15579 HARD_FRAME_POINTER_REGNUM, reg);
15584 else if (GET_CODE (e1) == REG && REGNO (e1) == SP_REGNUM)
15586 /* Move from sp to reg. */
15587 asm_fprintf (asm_out_file, "\t.movsp %r\n", REGNO (e0));
15589 else if (GET_CODE (e1) == PLUS
15590 && GET_CODE (XEXP (e1, 0)) == REG
15591 && REGNO (XEXP (e1, 0)) == SP_REGNUM
15592 && GET_CODE (XEXP (e1, 1)) == CONST_INT)
15594 /* Set reg to offset from sp. */
15595 asm_fprintf (asm_out_file, "\t.movsp %r, #%d\n",
15596 REGNO (e0), (int)INTVAL(XEXP (e1, 1)));
15608 /* Emit unwind directives for the given insn. */
15611 arm_unwind_emit (FILE * asm_out_file, rtx insn)
15615 if (!ARM_EABI_UNWIND_TABLES)
15618 if (GET_CODE (insn) == NOTE || !RTX_FRAME_RELATED_P (insn))
15621 pat = find_reg_note (insn, REG_FRAME_RELATED_EXPR, NULL_RTX);
15623 pat = XEXP (pat, 0);
15625 pat = PATTERN (insn);
15627 switch (GET_CODE (pat))
15630 arm_unwind_emit_set (asm_out_file, pat);
15634 /* Store multiple. */
15635 arm_unwind_emit_stm (asm_out_file, pat);
15644 /* Output a reference from a function exception table to the type_info
15645 object X. The EABI specifies that the symbol should be relocated by
15646 an R_ARM_TARGET2 relocation. */
15649 arm_output_ttype (rtx x)
15651 fputs ("\t.word\t", asm_out_file);
15652 output_addr_const (asm_out_file, x);
15653 /* Use special relocations for symbol references. */
15654 if (GET_CODE (x) != CONST_INT)
15655 fputs ("(TARGET2)", asm_out_file);
15656 fputc ('\n', asm_out_file);
15660 #endif /* TARGET_UNWIND_INFO */
15663 /* Output unwind directives for the start/end of a function. */
15666 arm_output_fn_unwind (FILE * f, bool prologue)
15668 if (!ARM_EABI_UNWIND_TABLES)
15672 fputs ("\t.fnstart\n", f);
15674 fputs ("\t.fnend\n", f);
15678 arm_emit_tls_decoration (FILE *fp, rtx x)
15680 enum tls_reloc reloc;
15683 val = XVECEXP (x, 0, 0);
15684 reloc = INTVAL (XVECEXP (x, 0, 1));
15686 output_addr_const (fp, val);
15691 fputs ("(tlsgd)", fp);
15694 fputs ("(tlsldm)", fp);
15697 fputs ("(tlsldo)", fp);
15700 fputs ("(gottpoff)", fp);
15703 fputs ("(tpoff)", fp);
15706 gcc_unreachable ();
15714 fputs (" + (. - ", fp);
15715 output_addr_const (fp, XVECEXP (x, 0, 2));
15717 output_addr_const (fp, XVECEXP (x, 0, 3));
15728 arm_output_addr_const_extra (FILE *fp, rtx x)
15730 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLS)
15731 return arm_emit_tls_decoration (fp, x);
15732 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_PIC_LABEL)
15735 int labelno = INTVAL (XVECEXP (x, 0, 0));
15737 ASM_GENERATE_INTERNAL_LABEL (label, "LPIC", labelno);
15738 assemble_name_raw (fp, label);
15742 else if (GET_CODE (x) == CONST_VECTOR)
15743 return arm_emit_vector_const (fp, x);
15748 #include "gt-arm.h"