111
|
1 /* Machine description for AArch64 architecture.
|
|
2 Copyright (C) 2009-2017 Free Software Foundation, Inc.
|
|
3 Contributed by ARM Ltd.
|
|
4
|
|
5 This file is part of GCC.
|
|
6
|
|
7 GCC is free software; you can redistribute it and/or modify it
|
|
8 under the terms of the GNU General Public License as published by
|
|
9 the Free Software Foundation; either version 3, or (at your option)
|
|
10 any later version.
|
|
11
|
|
12 GCC is distributed in the hope that it will be useful, but
|
|
13 WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
15 General Public License for more details.
|
|
16
|
|
17 You should have received a copy of the GNU General Public License
|
|
18 along with GCC; see the file COPYING3. If not see
|
|
19 <http://www.gnu.org/licenses/>. */
|
|
20
|
|
21
|
|
22 #ifndef GCC_AARCH64_PROTOS_H
|
|
23 #define GCC_AARCH64_PROTOS_H
|
|
24
|
|
25 #include "input.h"
|
|
26
|
|
27 /* SYMBOL_SMALL_ABSOLUTE: Generate symbol accesses through
|
|
28 high and lo relocs that calculate the base address using a PC
|
|
29 relative reloc.
|
|
30 So to get the address of foo, we generate
|
|
31 adrp x0, foo
|
|
32 add x0, x0, :lo12:foo
|
|
33
|
|
34 To load or store something to foo, we could use the corresponding
|
|
35 load store variants that generate an
|
|
36 ldr x0, [x0,:lo12:foo]
|
|
37 or
|
|
38 str x1, [x0, :lo12:foo]
|
|
39
|
|
40 This corresponds to the small code model of the compiler.
|
|
41
|
|
42 SYMBOL_SMALL_GOT_4G: Similar to the one above but this
|
|
43 gives us the GOT entry of the symbol being referred to :
|
|
44 Thus calculating the GOT entry for foo is done using the
|
|
45 following sequence of instructions. The ADRP instruction
|
|
46 gets us to the page containing the GOT entry of the symbol
|
|
47 and the got_lo12 gets us the actual offset in it, together
|
|
48 the base and offset, we can address 4G size GOT table.
|
|
49
|
|
50 adrp x0, :got:foo
|
|
51 ldr x0, [x0, :gotoff_lo12:foo]
|
|
52
|
|
53 This corresponds to the small PIC model of the compiler.
|
|
54
|
|
55 SYMBOL_SMALL_GOT_28K: Similar to SYMBOL_SMALL_GOT_4G, but used for symbol
|
|
56 restricted within 28K GOT table size.
|
|
57
|
|
58 ldr reg, [gp, #:gotpage_lo15:sym]
|
|
59
|
|
60 This corresponds to -fpic model for small memory model of the compiler.
|
|
61
|
|
62 SYMBOL_SMALL_TLSGD
|
|
63 SYMBOL_SMALL_TLSDESC
|
|
64 SYMBOL_SMALL_TLSIE
|
|
65 SYMBOL_TINY_TLSIE
|
|
66 SYMBOL_TLSLE12
|
|
67 SYMBOL_TLSLE24
|
|
68 SYMBOL_TLSLE32
|
|
69 SYMBOL_TLSLE48
|
|
70 Each of these represents a thread-local symbol, and corresponds to the
|
|
71 thread local storage relocation operator for the symbol being referred to.
|
|
72
|
|
73 SYMBOL_TINY_ABSOLUTE
|
|
74
|
|
75 Generate symbol accesses as a PC relative address using a single
|
|
76 instruction. To compute the address of symbol foo, we generate:
|
|
77
|
|
78 ADR x0, foo
|
|
79
|
|
80 SYMBOL_TINY_GOT
|
|
81
|
|
82 Generate symbol accesses via the GOT using a single PC relative
|
|
83 instruction. To compute the address of symbol foo, we generate:
|
|
84
|
|
85 ldr t0, :got:foo
|
|
86
|
|
87 The value of foo can subsequently read using:
|
|
88
|
|
89 ldrb t0, [t0]
|
|
90
|
|
91 SYMBOL_FORCE_TO_MEM : Global variables are addressed using
|
|
92 constant pool. All variable addresses are spilled into constant
|
|
93 pools. The constant pools themselves are addressed using PC
|
|
94 relative accesses. This only works for the large code model.
|
|
95 */
|
|
96 enum aarch64_symbol_type
|
|
97 {
|
|
98 SYMBOL_SMALL_ABSOLUTE,
|
|
99 SYMBOL_SMALL_GOT_28K,
|
|
100 SYMBOL_SMALL_GOT_4G,
|
|
101 SYMBOL_SMALL_TLSGD,
|
|
102 SYMBOL_SMALL_TLSDESC,
|
|
103 SYMBOL_SMALL_TLSIE,
|
|
104 SYMBOL_TINY_ABSOLUTE,
|
|
105 SYMBOL_TINY_GOT,
|
|
106 SYMBOL_TINY_TLSIE,
|
|
107 SYMBOL_TLSLE12,
|
|
108 SYMBOL_TLSLE24,
|
|
109 SYMBOL_TLSLE32,
|
|
110 SYMBOL_TLSLE48,
|
|
111 SYMBOL_FORCE_TO_MEM
|
|
112 };
|
|
113
|
|
114 /* A set of tuning parameters contains references to size and time
|
|
115 cost models and vectors for address cost calculations, register
|
|
116 move costs and memory move costs. */
|
|
117
|
|
118 /* Scaled addressing modes can vary cost depending on the mode of the
|
|
119 value to be loaded/stored. QImode values cannot use scaled
|
|
120 addressing modes. */
|
|
121
|
|
122 struct scale_addr_mode_cost
|
|
123 {
|
|
124 const int hi;
|
|
125 const int si;
|
|
126 const int di;
|
|
127 const int ti;
|
|
128 };
|
|
129
|
|
130 /* Additional cost for addresses. */
|
|
131 struct cpu_addrcost_table
|
|
132 {
|
|
133 const struct scale_addr_mode_cost addr_scale_costs;
|
|
134 const int pre_modify;
|
|
135 const int post_modify;
|
|
136 const int register_offset;
|
|
137 const int register_sextend;
|
|
138 const int register_zextend;
|
|
139 const int imm_offset;
|
|
140 };
|
|
141
|
|
142 /* Additional costs for register copies. Cost is for one register. */
|
|
143 struct cpu_regmove_cost
|
|
144 {
|
|
145 const int GP2GP;
|
|
146 const int GP2FP;
|
|
147 const int FP2GP;
|
|
148 const int FP2FP;
|
|
149 };
|
|
150
|
|
151 /* Cost for vector insn classes. */
|
|
152 struct cpu_vector_cost
|
|
153 {
|
|
154 const int scalar_int_stmt_cost; /* Cost of any int scalar operation,
|
|
155 excluding load and store. */
|
|
156 const int scalar_fp_stmt_cost; /* Cost of any fp scalar operation,
|
|
157 excluding load and store. */
|
|
158 const int scalar_load_cost; /* Cost of scalar load. */
|
|
159 const int scalar_store_cost; /* Cost of scalar store. */
|
|
160 const int vec_int_stmt_cost; /* Cost of any int vector operation,
|
|
161 excluding load, store, permute,
|
|
162 vector-to-scalar and
|
|
163 scalar-to-vector operation. */
|
|
164 const int vec_fp_stmt_cost; /* Cost of any fp vector operation,
|
|
165 excluding load, store, permute,
|
|
166 vector-to-scalar and
|
|
167 scalar-to-vector operation. */
|
|
168 const int vec_permute_cost; /* Cost of permute operation. */
|
|
169 const int vec_to_scalar_cost; /* Cost of vec-to-scalar operation. */
|
|
170 const int scalar_to_vec_cost; /* Cost of scalar-to-vector
|
|
171 operation. */
|
|
172 const int vec_align_load_cost; /* Cost of aligned vector load. */
|
|
173 const int vec_unalign_load_cost; /* Cost of unaligned vector load. */
|
|
174 const int vec_unalign_store_cost; /* Cost of unaligned vector store. */
|
|
175 const int vec_store_cost; /* Cost of vector store. */
|
|
176 const int cond_taken_branch_cost; /* Cost of taken branch. */
|
|
177 const int cond_not_taken_branch_cost; /* Cost of not taken branch. */
|
|
178 };
|
|
179
|
|
180 /* Branch costs. */
|
|
181 struct cpu_branch_cost
|
|
182 {
|
|
183 const int predictable; /* Predictable branch or optimizing for size. */
|
|
184 const int unpredictable; /* Unpredictable branch or optimizing for speed. */
|
|
185 };
|
|
186
|
|
187 /* Control approximate alternatives to certain FP operators. */
|
|
188 #define AARCH64_APPROX_MODE(MODE) \
|
|
189 ((MIN_MODE_FLOAT <= (MODE) && (MODE) <= MAX_MODE_FLOAT) \
|
|
190 ? (1 << ((MODE) - MIN_MODE_FLOAT)) \
|
|
191 : (MIN_MODE_VECTOR_FLOAT <= (MODE) && (MODE) <= MAX_MODE_VECTOR_FLOAT) \
|
|
192 ? (1 << ((MODE) - MIN_MODE_VECTOR_FLOAT \
|
|
193 + MAX_MODE_FLOAT - MIN_MODE_FLOAT + 1)) \
|
|
194 : (0))
|
|
195 #define AARCH64_APPROX_NONE (0)
|
|
196 #define AARCH64_APPROX_ALL (-1)
|
|
197
|
|
198 /* Allowed modes for approximations. */
|
|
199 struct cpu_approx_modes
|
|
200 {
|
|
201 const unsigned int division; /* Division. */
|
|
202 const unsigned int sqrt; /* Square root. */
|
|
203 const unsigned int recip_sqrt; /* Reciprocal square root. */
|
|
204 };
|
|
205
|
|
206 /* Cache prefetch settings for prefetch-loop-arrays. */
|
|
207 struct cpu_prefetch_tune
|
|
208 {
|
|
209 const int num_slots;
|
|
210 const int l1_cache_size;
|
|
211 const int l1_cache_line_size;
|
|
212 const int l2_cache_size;
|
|
213 const int default_opt_level;
|
|
214 };
|
|
215
|
|
216 struct tune_params
|
|
217 {
|
|
218 const struct cpu_cost_table *insn_extra_cost;
|
|
219 const struct cpu_addrcost_table *addr_cost;
|
|
220 const struct cpu_regmove_cost *regmove_cost;
|
|
221 const struct cpu_vector_cost *vec_costs;
|
|
222 const struct cpu_branch_cost *branch_costs;
|
|
223 const struct cpu_approx_modes *approx_modes;
|
|
224 int memmov_cost;
|
|
225 int issue_rate;
|
|
226 unsigned int fusible_ops;
|
|
227 int function_align;
|
|
228 int jump_align;
|
|
229 int loop_align;
|
|
230 int int_reassoc_width;
|
|
231 int fp_reassoc_width;
|
|
232 int vec_reassoc_width;
|
|
233 int min_div_recip_mul_sf;
|
|
234 int min_div_recip_mul_df;
|
|
235 /* Value for aarch64_case_values_threshold; or 0 for the default. */
|
|
236 unsigned int max_case_values;
|
|
237 /* An enum specifying how to take into account CPU autoprefetch capabilities
|
|
238 during instruction scheduling:
|
|
239 - AUTOPREFETCHER_OFF: Do not take autoprefetch capabilities into account.
|
|
240 - AUTOPREFETCHER_WEAK: Attempt to sort sequences of loads/store in order of
|
|
241 offsets but allow the pipeline hazard recognizer to alter that order to
|
|
242 maximize multi-issue opportunities.
|
|
243 - AUTOPREFETCHER_STRONG: Attempt to sort sequences of loads/store in order of
|
|
244 offsets and prefer this even if it restricts multi-issue opportunities. */
|
|
245
|
|
246 enum aarch64_autoprefetch_model
|
|
247 {
|
|
248 AUTOPREFETCHER_OFF,
|
|
249 AUTOPREFETCHER_WEAK,
|
|
250 AUTOPREFETCHER_STRONG
|
|
251 } autoprefetcher_model;
|
|
252
|
|
253 unsigned int extra_tuning_flags;
|
|
254
|
|
255 /* Place prefetch struct pointer at the end to enable type checking
|
|
256 errors when tune_params misses elements (e.g., from erroneous merges). */
|
|
257 const struct cpu_prefetch_tune *prefetch;
|
|
258 };
|
|
259
|
|
260 #define AARCH64_FUSION_PAIR(x, name) \
|
|
261 AARCH64_FUSE_##name##_index,
|
|
262 /* Supported fusion operations. */
|
|
263 enum aarch64_fusion_pairs_index
|
|
264 {
|
|
265 #include "aarch64-fusion-pairs.def"
|
|
266 AARCH64_FUSE_index_END
|
|
267 };
|
|
268
|
|
269 #define AARCH64_FUSION_PAIR(x, name) \
|
|
270 AARCH64_FUSE_##name = (1u << AARCH64_FUSE_##name##_index),
|
|
271 /* Supported fusion operations. */
|
|
272 enum aarch64_fusion_pairs
|
|
273 {
|
|
274 AARCH64_FUSE_NOTHING = 0,
|
|
275 #include "aarch64-fusion-pairs.def"
|
|
276 AARCH64_FUSE_ALL = (1u << AARCH64_FUSE_index_END) - 1
|
|
277 };
|
|
278
|
|
279 #define AARCH64_EXTRA_TUNING_OPTION(x, name) \
|
|
280 AARCH64_EXTRA_TUNE_##name##_index,
|
|
281 /* Supported tuning flags indexes. */
|
|
282 enum aarch64_extra_tuning_flags_index
|
|
283 {
|
|
284 #include "aarch64-tuning-flags.def"
|
|
285 AARCH64_EXTRA_TUNE_index_END
|
|
286 };
|
|
287
|
|
288
|
|
289 #define AARCH64_EXTRA_TUNING_OPTION(x, name) \
|
|
290 AARCH64_EXTRA_TUNE_##name = (1u << AARCH64_EXTRA_TUNE_##name##_index),
|
|
291 /* Supported tuning flags. */
|
|
292 enum aarch64_extra_tuning_flags
|
|
293 {
|
|
294 AARCH64_EXTRA_TUNE_NONE = 0,
|
|
295 #include "aarch64-tuning-flags.def"
|
|
296 AARCH64_EXTRA_TUNE_ALL = (1u << AARCH64_EXTRA_TUNE_index_END) - 1
|
|
297 };
|
|
298
|
|
299 /* Enum describing the various ways that the
|
|
300 aarch64_parse_{arch,tune,cpu,extension} functions can fail.
|
|
301 This way their callers can choose what kind of error to give. */
|
|
302
|
|
303 enum aarch64_parse_opt_result
|
|
304 {
|
|
305 AARCH64_PARSE_OK, /* Parsing was successful. */
|
|
306 AARCH64_PARSE_MISSING_ARG, /* Missing argument. */
|
|
307 AARCH64_PARSE_INVALID_FEATURE, /* Invalid feature modifier. */
|
|
308 AARCH64_PARSE_INVALID_ARG /* Invalid arch, tune, cpu arg. */
|
|
309 };
|
|
310
|
|
311 /* Enum to distinguish which type of check is to be done in
|
|
312 aarch64_simd_valid_immediate. This is used as a bitmask where
|
|
313 AARCH64_CHECK_MOV has both bits set. Thus AARCH64_CHECK_MOV will
|
|
314 perform all checks. Adding new types would require changes accordingly. */
|
|
315 enum simd_immediate_check {
|
|
316 AARCH64_CHECK_ORR = 1 << 0,
|
|
317 AARCH64_CHECK_BIC = 1 << 1,
|
|
318 AARCH64_CHECK_MOV = AARCH64_CHECK_ORR | AARCH64_CHECK_BIC
|
|
319 };
|
|
320
|
|
321 extern struct tune_params aarch64_tune_params;
|
|
322
|
|
323 HOST_WIDE_INT aarch64_initial_elimination_offset (unsigned, unsigned);
|
|
324 int aarch64_get_condition_code (rtx);
|
|
325 bool aarch64_address_valid_for_prefetch_p (rtx, bool);
|
|
326 bool aarch64_bitmask_imm (HOST_WIDE_INT val, machine_mode);
|
|
327 unsigned HOST_WIDE_INT aarch64_and_split_imm1 (HOST_WIDE_INT val_in);
|
|
328 unsigned HOST_WIDE_INT aarch64_and_split_imm2 (HOST_WIDE_INT val_in);
|
|
329 bool aarch64_and_bitmask_imm (unsigned HOST_WIDE_INT val_in, machine_mode mode);
|
|
330 int aarch64_branch_cost (bool, bool);
|
|
331 enum aarch64_symbol_type aarch64_classify_symbolic_expression (rtx);
|
|
332 bool aarch64_can_const_movi_rtx_p (rtx x, machine_mode mode);
|
|
333 bool aarch64_const_vec_all_same_int_p (rtx, HOST_WIDE_INT);
|
|
334 bool aarch64_constant_address_p (rtx);
|
|
335 bool aarch64_emit_approx_div (rtx, rtx, rtx);
|
|
336 bool aarch64_emit_approx_sqrt (rtx, rtx, bool);
|
|
337 void aarch64_expand_call (rtx, rtx, bool);
|
|
338 bool aarch64_expand_movmem (rtx *);
|
|
339 bool aarch64_float_const_zero_rtx_p (rtx);
|
|
340 bool aarch64_float_const_rtx_p (rtx);
|
|
341 bool aarch64_function_arg_regno_p (unsigned);
|
|
342 bool aarch64_fusion_enabled_p (enum aarch64_fusion_pairs);
|
|
343 bool aarch64_gen_movmemqi (rtx *);
|
|
344 bool aarch64_gimple_fold_builtin (gimple_stmt_iterator *);
|
|
345 bool aarch64_is_extend_from_extract (scalar_int_mode, rtx, rtx);
|
|
346 bool aarch64_is_long_call_p (rtx);
|
|
347 bool aarch64_is_noplt_call_p (rtx);
|
|
348 bool aarch64_label_mentioned_p (rtx);
|
|
349 void aarch64_declare_function_name (FILE *, const char*, tree);
|
|
350 bool aarch64_legitimate_pic_operand_p (rtx);
|
|
351 bool aarch64_mask_and_shift_for_ubfiz_p (scalar_int_mode, rtx, rtx);
|
|
352 bool aarch64_zero_extend_const_eq (machine_mode, rtx, machine_mode, rtx);
|
|
353 bool aarch64_move_imm (HOST_WIDE_INT, machine_mode);
|
|
354 bool aarch64_mov_operand_p (rtx, machine_mode);
|
|
355 rtx aarch64_reverse_mask (machine_mode);
|
|
356 bool aarch64_offset_7bit_signed_scaled_p (machine_mode, HOST_WIDE_INT);
|
|
357 char *aarch64_output_scalar_simd_mov_immediate (rtx, scalar_int_mode);
|
|
358 char *aarch64_output_simd_mov_immediate (rtx, machine_mode, unsigned,
|
|
359 enum simd_immediate_check w = AARCH64_CHECK_MOV);
|
|
360 bool aarch64_pad_reg_upward (machine_mode, const_tree, bool);
|
|
361 bool aarch64_regno_ok_for_base_p (int, bool);
|
|
362 bool aarch64_regno_ok_for_index_p (int, bool);
|
|
363 bool aarch64_reinterpret_float_as_int (rtx value, unsigned HOST_WIDE_INT *fail);
|
|
364 bool aarch64_simd_check_vect_par_cnst_half (rtx op, machine_mode mode,
|
|
365 bool high);
|
|
366 bool aarch64_simd_imm_zero_p (rtx, machine_mode);
|
|
367 bool aarch64_simd_scalar_immediate_valid_for_move (rtx, scalar_int_mode);
|
|
368 bool aarch64_simd_shift_imm_p (rtx, machine_mode, bool);
|
|
369 bool aarch64_simd_valid_immediate (rtx, machine_mode, bool,
|
|
370 struct simd_immediate_info *,
|
|
371 enum simd_immediate_check w = AARCH64_CHECK_MOV);
|
|
372 bool aarch64_split_dimode_const_store (rtx, rtx);
|
|
373 bool aarch64_symbolic_address_p (rtx);
|
|
374 bool aarch64_uimm12_shift (HOST_WIDE_INT);
|
|
375 bool aarch64_use_return_insn_p (void);
|
|
376 const char *aarch64_mangle_builtin_type (const_tree);
|
|
377 const char *aarch64_output_casesi (rtx *);
|
|
378
|
|
379 enum aarch64_symbol_type aarch64_classify_symbol (rtx, rtx);
|
|
380 enum aarch64_symbol_type aarch64_classify_tls_symbol (rtx);
|
|
381 enum reg_class aarch64_regno_regclass (unsigned);
|
|
382 int aarch64_asm_preferred_eh_data_format (int, int);
|
|
383 int aarch64_fpconst_pow_of_2 (rtx);
|
|
384 machine_mode aarch64_hard_regno_caller_save_mode (unsigned, unsigned,
|
|
385 machine_mode);
|
|
386 int aarch64_uxt_size (int, HOST_WIDE_INT);
|
|
387 int aarch64_vec_fpconst_pow_of_2 (rtx);
|
|
388 rtx aarch64_eh_return_handler_rtx (void);
|
|
389 rtx aarch64_mask_from_zextract_ops (rtx, rtx);
|
|
390 const char *aarch64_output_move_struct (rtx *operands);
|
|
391 rtx aarch64_return_addr (int, rtx);
|
|
392 rtx aarch64_simd_gen_const_vector_dup (machine_mode, HOST_WIDE_INT);
|
|
393 bool aarch64_simd_mem_operand_p (rtx);
|
|
394 rtx aarch64_simd_vect_par_cnst_half (machine_mode, bool);
|
|
395 rtx aarch64_tls_get_addr (void);
|
|
396 tree aarch64_fold_builtin (tree, int, tree *, bool);
|
|
397 unsigned aarch64_dbx_register_number (unsigned);
|
|
398 unsigned aarch64_trampoline_size (void);
|
|
399 void aarch64_asm_output_labelref (FILE *, const char *);
|
|
400 void aarch64_cpu_cpp_builtins (cpp_reader *);
|
|
401 const char * aarch64_gen_far_branch (rtx *, int, const char *, const char *);
|
|
402 const char * aarch64_output_probe_stack_range (rtx, rtx);
|
|
403 void aarch64_err_no_fpadvsimd (machine_mode, const char *);
|
|
404 void aarch64_expand_epilogue (bool);
|
|
405 void aarch64_expand_mov_immediate (rtx, rtx);
|
|
406 void aarch64_expand_prologue (void);
|
|
407 void aarch64_expand_vector_init (rtx, rtx);
|
|
408 void aarch64_init_cumulative_args (CUMULATIVE_ARGS *, const_tree, rtx,
|
|
409 const_tree, unsigned);
|
|
410 void aarch64_init_expanders (void);
|
|
411 void aarch64_init_simd_builtins (void);
|
|
412 void aarch64_emit_call_insn (rtx);
|
|
413 void aarch64_register_pragmas (void);
|
|
414 void aarch64_relayout_simd_types (void);
|
|
415 void aarch64_reset_previous_fndecl (void);
|
|
416 bool aarch64_return_address_signing_enabled (void);
|
|
417 void aarch64_save_restore_target_globals (tree);
|
|
418
|
|
419 /* Initialize builtins for SIMD intrinsics. */
|
|
420 void init_aarch64_simd_builtins (void);
|
|
421
|
|
422 void aarch64_simd_emit_reg_reg_move (rtx *, machine_mode, unsigned int);
|
|
423
|
|
424 /* Expand builtins for SIMD intrinsics. */
|
|
425 rtx aarch64_simd_expand_builtin (int, tree, rtx);
|
|
426
|
|
427 void aarch64_simd_lane_bounds (rtx, HOST_WIDE_INT, HOST_WIDE_INT, const_tree);
|
|
428
|
|
429 void aarch64_split_128bit_move (rtx, rtx);
|
|
430
|
|
431 bool aarch64_split_128bit_move_p (rtx, rtx);
|
|
432
|
|
433 void aarch64_split_simd_combine (rtx, rtx, rtx);
|
|
434
|
|
435 void aarch64_split_simd_move (rtx, rtx);
|
|
436
|
|
437 /* Check for a legitimate floating point constant for FMOV. */
|
|
438 bool aarch64_float_const_representable_p (rtx);
|
|
439
|
|
440 #if defined (RTX_CODE)
|
|
441
|
|
442 bool aarch64_legitimate_address_p (machine_mode, rtx, RTX_CODE, bool);
|
|
443 machine_mode aarch64_select_cc_mode (RTX_CODE, rtx, rtx);
|
|
444 rtx aarch64_gen_compare_reg (RTX_CODE, rtx, rtx);
|
|
445 rtx aarch64_load_tp (rtx);
|
|
446
|
|
447 void aarch64_expand_compare_and_swap (rtx op[]);
|
|
448 void aarch64_split_compare_and_swap (rtx op[]);
|
|
449 void aarch64_gen_atomic_cas (rtx, rtx, rtx, rtx, rtx);
|
|
450
|
|
451 bool aarch64_atomic_ldop_supported_p (enum rtx_code);
|
|
452 void aarch64_gen_atomic_ldop (enum rtx_code, rtx, rtx, rtx, rtx, rtx);
|
|
453 void aarch64_split_atomic_op (enum rtx_code, rtx, rtx, rtx, rtx, rtx, rtx);
|
|
454
|
|
455 bool aarch64_gen_adjusted_ldpstp (rtx *, bool, scalar_mode, RTX_CODE);
|
|
456 #endif /* RTX_CODE */
|
|
457
|
|
458 void aarch64_init_builtins (void);
|
|
459
|
|
460 bool aarch64_process_target_attr (tree, const char*);
|
|
461 void aarch64_override_options_internal (struct gcc_options *);
|
|
462
|
|
463 rtx aarch64_expand_builtin (tree exp,
|
|
464 rtx target,
|
|
465 rtx subtarget ATTRIBUTE_UNUSED,
|
|
466 machine_mode mode ATTRIBUTE_UNUSED,
|
|
467 int ignore ATTRIBUTE_UNUSED);
|
|
468 tree aarch64_builtin_decl (unsigned, bool ATTRIBUTE_UNUSED);
|
|
469 tree aarch64_builtin_rsqrt (unsigned int);
|
|
470 tree aarch64_builtin_vectorized_function (unsigned int, tree, tree);
|
|
471
|
|
472 extern void aarch64_split_combinev16qi (rtx operands[3]);
|
|
473 extern void aarch64_expand_vec_perm (rtx target, rtx op0, rtx op1, rtx sel);
|
|
474 extern bool aarch64_madd_needs_nop (rtx_insn *);
|
|
475 extern void aarch64_final_prescan_insn (rtx_insn *);
|
|
476 extern bool
|
|
477 aarch64_expand_vec_perm_const (rtx target, rtx op0, rtx op1, rtx sel);
|
|
478 void aarch64_atomic_assign_expand_fenv (tree *, tree *, tree *);
|
|
479 int aarch64_ccmp_mode_to_code (machine_mode mode);
|
|
480
|
|
481 bool extract_base_offset_in_addr (rtx mem, rtx *base, rtx *offset);
|
|
482 bool aarch64_operands_ok_for_ldpstp (rtx *, bool, machine_mode);
|
|
483 bool aarch64_operands_adjust_ok_for_ldpstp (rtx *, bool, scalar_mode);
|
|
484
|
|
485 extern void aarch64_asm_output_pool_epilogue (FILE *, const char *,
|
|
486 tree, HOST_WIDE_INT);
|
|
487
|
|
488 /* Defined in common/config/aarch64-common.c. */
|
|
489 bool aarch64_handle_option (struct gcc_options *, struct gcc_options *,
|
|
490 const struct cl_decoded_option *, location_t);
|
|
491 const char *aarch64_rewrite_selected_cpu (const char *name);
|
|
492 enum aarch64_parse_opt_result aarch64_parse_extension (const char *,
|
|
493 unsigned long *);
|
|
494 std::string aarch64_get_extension_string_for_isa_flags (unsigned long,
|
|
495 unsigned long);
|
|
496
|
|
497 rtl_opt_pass *make_pass_fma_steering (gcc::context *ctxt);
|
|
498
|
|
499 #endif /* GCC_AARCH64_PROTOS_H */
|