Mercurial > hg > CbC > CbC_gcc
comparison gcc/config/m68k/m68k.c @ 0:a06113de4d67
first commit
author | kent <kent@cr.ie.u-ryukyu.ac.jp> |
---|---|
date | Fri, 17 Jul 2009 14:47:48 +0900 |
parents | |
children | 77e2b8dfacca |
comparison
equal
deleted
inserted
replaced
-1:000000000000 | 0:a06113de4d67 |
---|---|
1 /* Subroutines for insn-output.c for Motorola 68000 family. | |
2 Copyright (C) 1987, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, | |
3 2001, 2003, 2004, 2005, 2006, 2007, 2008 | |
4 Free Software Foundation, Inc. | |
5 | |
6 This file is part of GCC. | |
7 | |
8 GCC is free software; you can redistribute it and/or modify | |
9 it under the terms of the GNU General Public License as published by | |
10 the Free Software Foundation; either version 3, or (at your option) | |
11 any later version. | |
12 | |
13 GCC is distributed in the hope that it will be useful, | |
14 but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
16 GNU General Public License for more details. | |
17 | |
18 You should have received a copy of the GNU General Public License | |
19 along with GCC; see the file COPYING3. If not see | |
20 <http://www.gnu.org/licenses/>. */ | |
21 | |
22 #include "config.h" | |
23 #include "system.h" | |
24 #include "coretypes.h" | |
25 #include "tm.h" | |
26 #include "tree.h" | |
27 #include "rtl.h" | |
28 #include "function.h" | |
29 #include "regs.h" | |
30 #include "hard-reg-set.h" | |
31 #include "real.h" | |
32 #include "insn-config.h" | |
33 #include "conditions.h" | |
34 #include "output.h" | |
35 #include "insn-attr.h" | |
36 #include "recog.h" | |
37 #include "toplev.h" | |
38 #include "expr.h" | |
39 #include "reload.h" | |
40 #include "tm_p.h" | |
41 #include "target.h" | |
42 #include "target-def.h" | |
43 #include "debug.h" | |
44 #include "flags.h" | |
45 #include "df.h" | |
46 /* ??? Need to add a dependency between m68k.o and sched-int.h. */ | |
47 #include "sched-int.h" | |
48 #include "insn-codes.h" | |
49 | |
50 enum reg_class regno_reg_class[] = | |
51 { | |
52 DATA_REGS, DATA_REGS, DATA_REGS, DATA_REGS, | |
53 DATA_REGS, DATA_REGS, DATA_REGS, DATA_REGS, | |
54 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, | |
55 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, | |
56 FP_REGS, FP_REGS, FP_REGS, FP_REGS, | |
57 FP_REGS, FP_REGS, FP_REGS, FP_REGS, | |
58 ADDR_REGS | |
59 }; | |
60 | |
61 | |
62 /* The minimum number of integer registers that we want to save with the | |
63 movem instruction. Using two movel instructions instead of a single | |
64 moveml is about 15% faster for the 68020 and 68030 at no expense in | |
65 code size. */ | |
66 #define MIN_MOVEM_REGS 3 | |
67 | |
68 /* The minimum number of floating point registers that we want to save | |
69 with the fmovem instruction. */ | |
70 #define MIN_FMOVEM_REGS 1 | |
71 | |
72 /* Structure describing stack frame layout. */ | |
73 struct m68k_frame | |
74 { | |
75 /* Stack pointer to frame pointer offset. */ | |
76 HOST_WIDE_INT offset; | |
77 | |
78 /* Offset of FPU registers. */ | |
79 HOST_WIDE_INT foffset; | |
80 | |
81 /* Frame size in bytes (rounded up). */ | |
82 HOST_WIDE_INT size; | |
83 | |
84 /* Data and address register. */ | |
85 int reg_no; | |
86 unsigned int reg_mask; | |
87 | |
88 /* FPU registers. */ | |
89 int fpu_no; | |
90 unsigned int fpu_mask; | |
91 | |
92 /* Offsets relative to ARG_POINTER. */ | |
93 HOST_WIDE_INT frame_pointer_offset; | |
94 HOST_WIDE_INT stack_pointer_offset; | |
95 | |
96 /* Function which the above information refers to. */ | |
97 int funcdef_no; | |
98 }; | |
99 | |
100 /* Current frame information calculated by m68k_compute_frame_layout(). */ | |
101 static struct m68k_frame current_frame; | |
102 | |
103 /* Structure describing an m68k address. | |
104 | |
105 If CODE is UNKNOWN, the address is BASE + INDEX * SCALE + OFFSET, | |
106 with null fields evaluating to 0. Here: | |
107 | |
108 - BASE satisfies m68k_legitimate_base_reg_p | |
109 - INDEX satisfies m68k_legitimate_index_reg_p | |
110 - OFFSET satisfies m68k_legitimate_constant_address_p | |
111 | |
112 INDEX is either HImode or SImode. The other fields are SImode. | |
113 | |
114 If CODE is PRE_DEC, the address is -(BASE). If CODE is POST_INC, | |
115 the address is (BASE)+. */ | |
116 struct m68k_address { | |
117 enum rtx_code code; | |
118 rtx base; | |
119 rtx index; | |
120 rtx offset; | |
121 int scale; | |
122 }; | |
123 | |
124 static int m68k_sched_adjust_cost (rtx, rtx, rtx, int); | |
125 static int m68k_sched_issue_rate (void); | |
126 static int m68k_sched_variable_issue (FILE *, int, rtx, int); | |
127 static void m68k_sched_md_init_global (FILE *, int, int); | |
128 static void m68k_sched_md_finish_global (FILE *, int); | |
129 static void m68k_sched_md_init (FILE *, int, int); | |
130 static void m68k_sched_dfa_pre_advance_cycle (void); | |
131 static void m68k_sched_dfa_post_advance_cycle (void); | |
132 static int m68k_sched_first_cycle_multipass_dfa_lookahead (void); | |
133 | |
134 static bool m68k_handle_option (size_t, const char *, int); | |
135 static rtx find_addr_reg (rtx); | |
136 static const char *singlemove_string (rtx *); | |
137 #ifdef M68K_TARGET_COFF | |
138 static void m68k_coff_asm_named_section (const char *, unsigned int, tree); | |
139 #endif /* M68K_TARGET_COFF */ | |
140 static void m68k_output_mi_thunk (FILE *, tree, HOST_WIDE_INT, | |
141 HOST_WIDE_INT, tree); | |
142 static rtx m68k_struct_value_rtx (tree, int); | |
143 static tree m68k_handle_fndecl_attribute (tree *node, tree name, | |
144 tree args, int flags, | |
145 bool *no_add_attrs); | |
146 static void m68k_compute_frame_layout (void); | |
147 static bool m68k_save_reg (unsigned int regno, bool interrupt_handler); | |
148 static bool m68k_ok_for_sibcall_p (tree, tree); | |
149 static bool m68k_rtx_costs (rtx, int, int, int *, bool); | |
150 #if M68K_HONOR_TARGET_STRICT_ALIGNMENT | |
151 static bool m68k_return_in_memory (const_tree, const_tree); | |
152 #endif | |
153 | |
154 | |
155 /* Specify the identification number of the library being built */ | |
156 const char *m68k_library_id_string = "_current_shared_library_a5_offset_"; | |
157 | |
158 /* Nonzero if the last compare/test insn had FP operands. The | |
159 sCC expanders peek at this to determine what to do for the | |
160 68060, which has no fsCC instructions. */ | |
161 int m68k_last_compare_had_fp_operands; | |
162 | |
163 /* Initialize the GCC target structure. */ | |
164 | |
165 #if INT_OP_GROUP == INT_OP_DOT_WORD | |
166 #undef TARGET_ASM_ALIGNED_HI_OP | |
167 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t" | |
168 #endif | |
169 | |
170 #if INT_OP_GROUP == INT_OP_NO_DOT | |
171 #undef TARGET_ASM_BYTE_OP | |
172 #define TARGET_ASM_BYTE_OP "\tbyte\t" | |
173 #undef TARGET_ASM_ALIGNED_HI_OP | |
174 #define TARGET_ASM_ALIGNED_HI_OP "\tshort\t" | |
175 #undef TARGET_ASM_ALIGNED_SI_OP | |
176 #define TARGET_ASM_ALIGNED_SI_OP "\tlong\t" | |
177 #endif | |
178 | |
179 #if INT_OP_GROUP == INT_OP_DC | |
180 #undef TARGET_ASM_BYTE_OP | |
181 #define TARGET_ASM_BYTE_OP "\tdc.b\t" | |
182 #undef TARGET_ASM_ALIGNED_HI_OP | |
183 #define TARGET_ASM_ALIGNED_HI_OP "\tdc.w\t" | |
184 #undef TARGET_ASM_ALIGNED_SI_OP | |
185 #define TARGET_ASM_ALIGNED_SI_OP "\tdc.l\t" | |
186 #endif | |
187 | |
188 #undef TARGET_ASM_UNALIGNED_HI_OP | |
189 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP | |
190 #undef TARGET_ASM_UNALIGNED_SI_OP | |
191 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP | |
192 | |
193 #undef TARGET_ASM_OUTPUT_MI_THUNK | |
194 #define TARGET_ASM_OUTPUT_MI_THUNK m68k_output_mi_thunk | |
195 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK | |
196 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true | |
197 | |
198 #undef TARGET_ASM_FILE_START_APP_OFF | |
199 #define TARGET_ASM_FILE_START_APP_OFF true | |
200 | |
201 #undef TARGET_SCHED_ADJUST_COST | |
202 #define TARGET_SCHED_ADJUST_COST m68k_sched_adjust_cost | |
203 | |
204 #undef TARGET_SCHED_ISSUE_RATE | |
205 #define TARGET_SCHED_ISSUE_RATE m68k_sched_issue_rate | |
206 | |
207 #undef TARGET_SCHED_VARIABLE_ISSUE | |
208 #define TARGET_SCHED_VARIABLE_ISSUE m68k_sched_variable_issue | |
209 | |
210 #undef TARGET_SCHED_INIT_GLOBAL | |
211 #define TARGET_SCHED_INIT_GLOBAL m68k_sched_md_init_global | |
212 | |
213 #undef TARGET_SCHED_FINISH_GLOBAL | |
214 #define TARGET_SCHED_FINISH_GLOBAL m68k_sched_md_finish_global | |
215 | |
216 #undef TARGET_SCHED_INIT | |
217 #define TARGET_SCHED_INIT m68k_sched_md_init | |
218 | |
219 #undef TARGET_SCHED_DFA_PRE_ADVANCE_CYCLE | |
220 #define TARGET_SCHED_DFA_PRE_ADVANCE_CYCLE m68k_sched_dfa_pre_advance_cycle | |
221 | |
222 #undef TARGET_SCHED_DFA_POST_ADVANCE_CYCLE | |
223 #define TARGET_SCHED_DFA_POST_ADVANCE_CYCLE m68k_sched_dfa_post_advance_cycle | |
224 | |
225 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD | |
226 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \ | |
227 m68k_sched_first_cycle_multipass_dfa_lookahead | |
228 | |
229 #undef TARGET_HANDLE_OPTION | |
230 #define TARGET_HANDLE_OPTION m68k_handle_option | |
231 | |
232 #undef TARGET_RTX_COSTS | |
233 #define TARGET_RTX_COSTS m68k_rtx_costs | |
234 | |
235 #undef TARGET_ATTRIBUTE_TABLE | |
236 #define TARGET_ATTRIBUTE_TABLE m68k_attribute_table | |
237 | |
238 #undef TARGET_PROMOTE_PROTOTYPES | |
239 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true | |
240 | |
241 #undef TARGET_STRUCT_VALUE_RTX | |
242 #define TARGET_STRUCT_VALUE_RTX m68k_struct_value_rtx | |
243 | |
244 #undef TARGET_CANNOT_FORCE_CONST_MEM | |
245 #define TARGET_CANNOT_FORCE_CONST_MEM m68k_illegitimate_symbolic_constant_p | |
246 | |
247 #undef TARGET_FUNCTION_OK_FOR_SIBCALL | |
248 #define TARGET_FUNCTION_OK_FOR_SIBCALL m68k_ok_for_sibcall_p | |
249 | |
250 #if M68K_HONOR_TARGET_STRICT_ALIGNMENT | |
251 #undef TARGET_RETURN_IN_MEMORY | |
252 #define TARGET_RETURN_IN_MEMORY m68k_return_in_memory | |
253 #endif | |
254 | |
255 static const struct attribute_spec m68k_attribute_table[] = | |
256 { | |
257 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */ | |
258 { "interrupt", 0, 0, true, false, false, m68k_handle_fndecl_attribute }, | |
259 { "interrupt_handler", 0, 0, true, false, false, m68k_handle_fndecl_attribute }, | |
260 { "interrupt_thread", 0, 0, true, false, false, m68k_handle_fndecl_attribute }, | |
261 { NULL, 0, 0, false, false, false, NULL } | |
262 }; | |
263 | |
264 struct gcc_target targetm = TARGET_INITIALIZER; | |
265 | |
266 /* Base flags for 68k ISAs. */ | |
267 #define FL_FOR_isa_00 FL_ISA_68000 | |
268 #define FL_FOR_isa_10 (FL_FOR_isa_00 | FL_ISA_68010) | |
269 /* FL_68881 controls the default setting of -m68881. gcc has traditionally | |
270 generated 68881 code for 68020 and 68030 targets unless explicitly told | |
271 not to. */ | |
272 #define FL_FOR_isa_20 (FL_FOR_isa_10 | FL_ISA_68020 \ | |
273 | FL_BITFIELD | FL_68881) | |
274 #define FL_FOR_isa_40 (FL_FOR_isa_20 | FL_ISA_68040) | |
275 #define FL_FOR_isa_cpu32 (FL_FOR_isa_10 | FL_ISA_68020) | |
276 | |
277 /* Base flags for ColdFire ISAs. */ | |
278 #define FL_FOR_isa_a (FL_COLDFIRE | FL_ISA_A) | |
279 #define FL_FOR_isa_aplus (FL_FOR_isa_a | FL_ISA_APLUS | FL_CF_USP) | |
280 /* Note ISA_B doesn't necessarily include USP (user stack pointer) support. */ | |
281 #define FL_FOR_isa_b (FL_FOR_isa_a | FL_ISA_B | FL_CF_HWDIV) | |
282 /* ISA_C is not upwardly compatible with ISA_B. */ | |
283 #define FL_FOR_isa_c (FL_FOR_isa_a | FL_ISA_C | FL_CF_USP) | |
284 | |
285 enum m68k_isa | |
286 { | |
287 /* Traditional 68000 instruction sets. */ | |
288 isa_00, | |
289 isa_10, | |
290 isa_20, | |
291 isa_40, | |
292 isa_cpu32, | |
293 /* ColdFire instruction set variants. */ | |
294 isa_a, | |
295 isa_aplus, | |
296 isa_b, | |
297 isa_c, | |
298 isa_max | |
299 }; | |
300 | |
301 /* Information about one of the -march, -mcpu or -mtune arguments. */ | |
302 struct m68k_target_selection | |
303 { | |
304 /* The argument being described. */ | |
305 const char *name; | |
306 | |
307 /* For -mcpu, this is the device selected by the option. | |
308 For -mtune and -march, it is a representative device | |
309 for the microarchitecture or ISA respectively. */ | |
310 enum target_device device; | |
311 | |
312 /* The M68K_DEVICE fields associated with DEVICE. See the comment | |
313 in m68k-devices.def for details. FAMILY is only valid for -mcpu. */ | |
314 const char *family; | |
315 enum uarch_type microarch; | |
316 enum m68k_isa isa; | |
317 unsigned long flags; | |
318 }; | |
319 | |
320 /* A list of all devices in m68k-devices.def. Used for -mcpu selection. */ | |
321 static const struct m68k_target_selection all_devices[] = | |
322 { | |
323 #define M68K_DEVICE(NAME,ENUM_VALUE,FAMILY,MULTILIB,MICROARCH,ISA,FLAGS) \ | |
324 { NAME, ENUM_VALUE, FAMILY, u##MICROARCH, ISA, FLAGS | FL_FOR_##ISA }, | |
325 #include "m68k-devices.def" | |
326 #undef M68K_DEVICE | |
327 { NULL, unk_device, NULL, unk_arch, isa_max, 0 } | |
328 }; | |
329 | |
330 /* A list of all ISAs, mapping each one to a representative device. | |
331 Used for -march selection. */ | |
332 static const struct m68k_target_selection all_isas[] = | |
333 { | |
334 { "68000", m68000, NULL, u68000, isa_00, FL_FOR_isa_00 }, | |
335 { "68010", m68010, NULL, u68010, isa_10, FL_FOR_isa_10 }, | |
336 { "68020", m68020, NULL, u68020, isa_20, FL_FOR_isa_20 }, | |
337 { "68030", m68030, NULL, u68030, isa_20, FL_FOR_isa_20 }, | |
338 { "68040", m68040, NULL, u68040, isa_40, FL_FOR_isa_40 }, | |
339 { "68060", m68060, NULL, u68060, isa_40, FL_FOR_isa_40 }, | |
340 { "cpu32", cpu32, NULL, ucpu32, isa_20, FL_FOR_isa_cpu32 }, | |
341 { "isaa", mcf5206e, NULL, ucfv2, isa_a, (FL_FOR_isa_a | |
342 | FL_CF_HWDIV) }, | |
343 { "isaaplus", mcf5271, NULL, ucfv2, isa_aplus, (FL_FOR_isa_aplus | |
344 | FL_CF_HWDIV) }, | |
345 { "isab", mcf5407, NULL, ucfv4, isa_b, FL_FOR_isa_b }, | |
346 { "isac", unk_device, NULL, ucfv4, isa_c, (FL_FOR_isa_c | |
347 | FL_CF_HWDIV) }, | |
348 { NULL, unk_device, NULL, unk_arch, isa_max, 0 } | |
349 }; | |
350 | |
351 /* A list of all microarchitectures, mapping each one to a representative | |
352 device. Used for -mtune selection. */ | |
353 static const struct m68k_target_selection all_microarchs[] = | |
354 { | |
355 { "68000", m68000, NULL, u68000, isa_00, FL_FOR_isa_00 }, | |
356 { "68010", m68010, NULL, u68010, isa_10, FL_FOR_isa_10 }, | |
357 { "68020", m68020, NULL, u68020, isa_20, FL_FOR_isa_20 }, | |
358 { "68020-40", m68020, NULL, u68020_40, isa_20, FL_FOR_isa_20 }, | |
359 { "68020-60", m68020, NULL, u68020_60, isa_20, FL_FOR_isa_20 }, | |
360 { "68030", m68030, NULL, u68030, isa_20, FL_FOR_isa_20 }, | |
361 { "68040", m68040, NULL, u68040, isa_40, FL_FOR_isa_40 }, | |
362 { "68060", m68060, NULL, u68060, isa_40, FL_FOR_isa_40 }, | |
363 { "cpu32", cpu32, NULL, ucpu32, isa_20, FL_FOR_isa_cpu32 }, | |
364 { "cfv1", mcf51qe, NULL, ucfv1, isa_c, FL_FOR_isa_c }, | |
365 { "cfv2", mcf5206, NULL, ucfv2, isa_a, FL_FOR_isa_a }, | |
366 { "cfv3", mcf5307, NULL, ucfv3, isa_a, (FL_FOR_isa_a | |
367 | FL_CF_HWDIV) }, | |
368 { "cfv4", mcf5407, NULL, ucfv4, isa_b, FL_FOR_isa_b }, | |
369 { "cfv4e", mcf547x, NULL, ucfv4e, isa_b, (FL_FOR_isa_b | |
370 | FL_CF_USP | |
371 | FL_CF_EMAC | |
372 | FL_CF_FPU) }, | |
373 { NULL, unk_device, NULL, unk_arch, isa_max, 0 } | |
374 }; | |
375 | |
376 /* The entries associated with the -mcpu, -march and -mtune settings, | |
377 or null for options that have not been used. */ | |
378 const struct m68k_target_selection *m68k_cpu_entry; | |
379 const struct m68k_target_selection *m68k_arch_entry; | |
380 const struct m68k_target_selection *m68k_tune_entry; | |
381 | |
382 /* Which CPU we are generating code for. */ | |
383 enum target_device m68k_cpu; | |
384 | |
385 /* Which microarchitecture to tune for. */ | |
386 enum uarch_type m68k_tune; | |
387 | |
388 /* Which FPU to use. */ | |
389 enum fpu_type m68k_fpu; | |
390 | |
391 /* The set of FL_* flags that apply to the target processor. */ | |
392 unsigned int m68k_cpu_flags; | |
393 | |
394 /* The set of FL_* flags that apply to the processor to be tuned for. */ | |
395 unsigned int m68k_tune_flags; | |
396 | |
397 /* Asm templates for calling or jumping to an arbitrary symbolic address, | |
398 or NULL if such calls or jumps are not supported. The address is held | |
399 in operand 0. */ | |
400 const char *m68k_symbolic_call; | |
401 const char *m68k_symbolic_jump; | |
402 | |
403 /* Enum variable that corresponds to m68k_symbolic_call values. */ | |
404 enum M68K_SYMBOLIC_CALL m68k_symbolic_call_var; | |
405 | |
406 | |
407 /* See whether TABLE has an entry with name NAME. Return true and | |
408 store the entry in *ENTRY if so, otherwise return false and | |
409 leave *ENTRY alone. */ | |
410 | |
411 static bool | |
412 m68k_find_selection (const struct m68k_target_selection **entry, | |
413 const struct m68k_target_selection *table, | |
414 const char *name) | |
415 { | |
416 size_t i; | |
417 | |
418 for (i = 0; table[i].name; i++) | |
419 if (strcmp (table[i].name, name) == 0) | |
420 { | |
421 *entry = table + i; | |
422 return true; | |
423 } | |
424 return false; | |
425 } | |
426 | |
427 /* Implement TARGET_HANDLE_OPTION. */ | |
428 | |
429 static bool | |
430 m68k_handle_option (size_t code, const char *arg, int value) | |
431 { | |
432 switch (code) | |
433 { | |
434 case OPT_march_: | |
435 return m68k_find_selection (&m68k_arch_entry, all_isas, arg); | |
436 | |
437 case OPT_mcpu_: | |
438 return m68k_find_selection (&m68k_cpu_entry, all_devices, arg); | |
439 | |
440 case OPT_mtune_: | |
441 return m68k_find_selection (&m68k_tune_entry, all_microarchs, arg); | |
442 | |
443 case OPT_m5200: | |
444 return m68k_find_selection (&m68k_cpu_entry, all_devices, "5206"); | |
445 | |
446 case OPT_m5206e: | |
447 return m68k_find_selection (&m68k_cpu_entry, all_devices, "5206e"); | |
448 | |
449 case OPT_m528x: | |
450 return m68k_find_selection (&m68k_cpu_entry, all_devices, "528x"); | |
451 | |
452 case OPT_m5307: | |
453 return m68k_find_selection (&m68k_cpu_entry, all_devices, "5307"); | |
454 | |
455 case OPT_m5407: | |
456 return m68k_find_selection (&m68k_cpu_entry, all_devices, "5407"); | |
457 | |
458 case OPT_mcfv4e: | |
459 return m68k_find_selection (&m68k_cpu_entry, all_devices, "547x"); | |
460 | |
461 case OPT_m68000: | |
462 case OPT_mc68000: | |
463 return m68k_find_selection (&m68k_cpu_entry, all_devices, "68000"); | |
464 | |
465 case OPT_m68010: | |
466 return m68k_find_selection (&m68k_cpu_entry, all_devices, "68010"); | |
467 | |
468 case OPT_m68020: | |
469 case OPT_mc68020: | |
470 return m68k_find_selection (&m68k_cpu_entry, all_devices, "68020"); | |
471 | |
472 case OPT_m68020_40: | |
473 return (m68k_find_selection (&m68k_tune_entry, all_microarchs, | |
474 "68020-40") | |
475 && m68k_find_selection (&m68k_cpu_entry, all_devices, "68020")); | |
476 | |
477 case OPT_m68020_60: | |
478 return (m68k_find_selection (&m68k_tune_entry, all_microarchs, | |
479 "68020-60") | |
480 && m68k_find_selection (&m68k_cpu_entry, all_devices, "68020")); | |
481 | |
482 case OPT_m68030: | |
483 return m68k_find_selection (&m68k_cpu_entry, all_devices, "68030"); | |
484 | |
485 case OPT_m68040: | |
486 return m68k_find_selection (&m68k_cpu_entry, all_devices, "68040"); | |
487 | |
488 case OPT_m68060: | |
489 return m68k_find_selection (&m68k_cpu_entry, all_devices, "68060"); | |
490 | |
491 case OPT_m68302: | |
492 return m68k_find_selection (&m68k_cpu_entry, all_devices, "68302"); | |
493 | |
494 case OPT_m68332: | |
495 case OPT_mcpu32: | |
496 return m68k_find_selection (&m68k_cpu_entry, all_devices, "68332"); | |
497 | |
498 case OPT_mshared_library_id_: | |
499 if (value > MAX_LIBRARY_ID) | |
500 error ("-mshared-library-id=%s is not between 0 and %d", | |
501 arg, MAX_LIBRARY_ID); | |
502 else | |
503 { | |
504 char *tmp; | |
505 asprintf (&tmp, "%d", (value * -4) - 4); | |
506 m68k_library_id_string = tmp; | |
507 } | |
508 return true; | |
509 | |
510 default: | |
511 return true; | |
512 } | |
513 } | |
514 | |
515 /* Sometimes certain combinations of command options do not make | |
516 sense on a particular target machine. You can define a macro | |
517 `OVERRIDE_OPTIONS' to take account of this. This macro, if | |
518 defined, is executed once just after all the command options have | |
519 been parsed. | |
520 | |
521 Don't use this macro to turn on various extra optimizations for | |
522 `-O'. That is what `OPTIMIZATION_OPTIONS' is for. */ | |
523 | |
524 void | |
525 override_options (void) | |
526 { | |
527 const struct m68k_target_selection *entry; | |
528 unsigned long target_mask; | |
529 | |
530 /* User can choose: | |
531 | |
532 -mcpu= | |
533 -march= | |
534 -mtune= | |
535 | |
536 -march=ARCH should generate code that runs any processor | |
537 implementing architecture ARCH. -mcpu=CPU should override -march | |
538 and should generate code that runs on processor CPU, making free | |
539 use of any instructions that CPU understands. -mtune=UARCH applies | |
540 on top of -mcpu or -march and optimizes the code for UARCH. It does | |
541 not change the target architecture. */ | |
542 if (m68k_cpu_entry) | |
543 { | |
544 /* Complain if the -march setting is for a different microarchitecture, | |
545 or includes flags that the -mcpu setting doesn't. */ | |
546 if (m68k_arch_entry | |
547 && (m68k_arch_entry->microarch != m68k_cpu_entry->microarch | |
548 || (m68k_arch_entry->flags & ~m68k_cpu_entry->flags) != 0)) | |
549 warning (0, "-mcpu=%s conflicts with -march=%s", | |
550 m68k_cpu_entry->name, m68k_arch_entry->name); | |
551 | |
552 entry = m68k_cpu_entry; | |
553 } | |
554 else | |
555 entry = m68k_arch_entry; | |
556 | |
557 if (!entry) | |
558 entry = all_devices + TARGET_CPU_DEFAULT; | |
559 | |
560 m68k_cpu_flags = entry->flags; | |
561 | |
562 /* Use the architecture setting to derive default values for | |
563 certain flags. */ | |
564 target_mask = 0; | |
565 | |
566 /* ColdFire is lenient about alignment. */ | |
567 if (!TARGET_COLDFIRE) | |
568 target_mask |= MASK_STRICT_ALIGNMENT; | |
569 | |
570 if ((m68k_cpu_flags & FL_BITFIELD) != 0) | |
571 target_mask |= MASK_BITFIELD; | |
572 if ((m68k_cpu_flags & FL_CF_HWDIV) != 0) | |
573 target_mask |= MASK_CF_HWDIV; | |
574 if ((m68k_cpu_flags & (FL_68881 | FL_CF_FPU)) != 0) | |
575 target_mask |= MASK_HARD_FLOAT; | |
576 target_flags |= target_mask & ~target_flags_explicit; | |
577 | |
578 /* Set the directly-usable versions of the -mcpu and -mtune settings. */ | |
579 m68k_cpu = entry->device; | |
580 if (m68k_tune_entry) | |
581 { | |
582 m68k_tune = m68k_tune_entry->microarch; | |
583 m68k_tune_flags = m68k_tune_entry->flags; | |
584 } | |
585 #ifdef M68K_DEFAULT_TUNE | |
586 else if (!m68k_cpu_entry && !m68k_arch_entry) | |
587 { | |
588 enum target_device dev; | |
589 dev = all_microarchs[M68K_DEFAULT_TUNE].device; | |
590 m68k_tune_flags = all_devices[dev]->flags; | |
591 } | |
592 #endif | |
593 else | |
594 { | |
595 m68k_tune = entry->microarch; | |
596 m68k_tune_flags = entry->flags; | |
597 } | |
598 | |
599 /* Set the type of FPU. */ | |
600 m68k_fpu = (!TARGET_HARD_FLOAT ? FPUTYPE_NONE | |
601 : (m68k_cpu_flags & FL_COLDFIRE) != 0 ? FPUTYPE_COLDFIRE | |
602 : FPUTYPE_68881); | |
603 | |
604 /* Sanity check to ensure that msep-data and mid-sahred-library are not | |
605 * both specified together. Doing so simply doesn't make sense. | |
606 */ | |
607 if (TARGET_SEP_DATA && TARGET_ID_SHARED_LIBRARY) | |
608 error ("cannot specify both -msep-data and -mid-shared-library"); | |
609 | |
610 /* If we're generating code for a separate A5 relative data segment, | |
611 * we've got to enable -fPIC as well. This might be relaxable to | |
612 * -fpic but it hasn't been tested properly. | |
613 */ | |
614 if (TARGET_SEP_DATA || TARGET_ID_SHARED_LIBRARY) | |
615 flag_pic = 2; | |
616 | |
617 /* -mpcrel -fPIC uses 32-bit pc-relative displacements. Raise an | |
618 error if the target does not support them. */ | |
619 if (TARGET_PCREL && !TARGET_68020 && flag_pic == 2) | |
620 error ("-mpcrel -fPIC is not currently supported on selected cpu"); | |
621 | |
622 /* ??? A historic way of turning on pic, or is this intended to | |
623 be an embedded thing that doesn't have the same name binding | |
624 significance that it does on hosted ELF systems? */ | |
625 if (TARGET_PCREL && flag_pic == 0) | |
626 flag_pic = 1; | |
627 | |
628 if (!flag_pic) | |
629 { | |
630 m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_JSR; | |
631 | |
632 m68k_symbolic_jump = "jra %a0"; | |
633 } | |
634 else if (TARGET_ID_SHARED_LIBRARY) | |
635 /* All addresses must be loaded from the GOT. */ | |
636 ; | |
637 else if (TARGET_68020 || TARGET_ISAB || TARGET_ISAC) | |
638 { | |
639 if (TARGET_PCREL) | |
640 m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_BSR_C; | |
641 else | |
642 m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_BSR_P; | |
643 | |
644 if (TARGET_ISAC) | |
645 /* No unconditional long branch */; | |
646 else if (TARGET_PCREL) | |
647 m68k_symbolic_jump = "bra%.l %c0"; | |
648 else | |
649 m68k_symbolic_jump = "bra%.l %p0"; | |
650 /* Turn off function cse if we are doing PIC. We always want | |
651 function call to be done as `bsr foo@PLTPC'. */ | |
652 /* ??? It's traditional to do this for -mpcrel too, but it isn't | |
653 clear how intentional that is. */ | |
654 flag_no_function_cse = 1; | |
655 } | |
656 | |
657 switch (m68k_symbolic_call_var) | |
658 { | |
659 case M68K_SYMBOLIC_CALL_JSR: | |
660 m68k_symbolic_call = "jsr %a0"; | |
661 break; | |
662 | |
663 case M68K_SYMBOLIC_CALL_BSR_C: | |
664 m68k_symbolic_call = "bsr%.l %c0"; | |
665 break; | |
666 | |
667 case M68K_SYMBOLIC_CALL_BSR_P: | |
668 m68k_symbolic_call = "bsr%.l %p0"; | |
669 break; | |
670 | |
671 case M68K_SYMBOLIC_CALL_NONE: | |
672 gcc_assert (m68k_symbolic_call == NULL); | |
673 break; | |
674 | |
675 default: | |
676 gcc_unreachable (); | |
677 } | |
678 | |
679 #ifndef ASM_OUTPUT_ALIGN_WITH_NOP | |
680 if (align_labels > 2) | |
681 { | |
682 warning (0, "-falign-labels=%d is not supported", align_labels); | |
683 align_labels = 0; | |
684 } | |
685 if (align_loops > 2) | |
686 { | |
687 warning (0, "-falign-loops=%d is not supported", align_loops); | |
688 align_loops = 0; | |
689 } | |
690 #endif | |
691 | |
692 SUBTARGET_OVERRIDE_OPTIONS; | |
693 | |
694 /* Setup scheduling options. */ | |
695 if (TUNE_CFV1) | |
696 m68k_sched_cpu = CPU_CFV1; | |
697 else if (TUNE_CFV2) | |
698 m68k_sched_cpu = CPU_CFV2; | |
699 else if (TUNE_CFV3) | |
700 m68k_sched_cpu = CPU_CFV3; | |
701 else if (TUNE_CFV4) | |
702 m68k_sched_cpu = CPU_CFV4; | |
703 else | |
704 { | |
705 m68k_sched_cpu = CPU_UNKNOWN; | |
706 flag_schedule_insns = 0; | |
707 flag_schedule_insns_after_reload = 0; | |
708 flag_modulo_sched = 0; | |
709 } | |
710 | |
711 if (m68k_sched_cpu != CPU_UNKNOWN) | |
712 { | |
713 if ((m68k_cpu_flags & (FL_CF_EMAC | FL_CF_EMAC_B)) != 0) | |
714 m68k_sched_mac = MAC_CF_EMAC; | |
715 else if ((m68k_cpu_flags & FL_CF_MAC) != 0) | |
716 m68k_sched_mac = MAC_CF_MAC; | |
717 else | |
718 m68k_sched_mac = MAC_NO; | |
719 } | |
720 } | |
721 | |
722 /* Generate a macro of the form __mPREFIX_cpu_NAME, where PREFIX is the | |
723 given argument and NAME is the argument passed to -mcpu. Return NULL | |
724 if -mcpu was not passed. */ | |
725 | |
726 const char * | |
727 m68k_cpp_cpu_ident (const char *prefix) | |
728 { | |
729 if (!m68k_cpu_entry) | |
730 return NULL; | |
731 return concat ("__m", prefix, "_cpu_", m68k_cpu_entry->name, NULL); | |
732 } | |
733 | |
734 /* Generate a macro of the form __mPREFIX_family_NAME, where PREFIX is the | |
735 given argument and NAME is the name of the representative device for | |
736 the -mcpu argument's family. Return NULL if -mcpu was not passed. */ | |
737 | |
738 const char * | |
739 m68k_cpp_cpu_family (const char *prefix) | |
740 { | |
741 if (!m68k_cpu_entry) | |
742 return NULL; | |
743 return concat ("__m", prefix, "_family_", m68k_cpu_entry->family, NULL); | |
744 } | |
745 | |
746 /* Return m68k_fk_interrupt_handler if FUNC has an "interrupt" or | |
747 "interrupt_handler" attribute and interrupt_thread if FUNC has an | |
748 "interrupt_thread" attribute. Otherwise, return | |
749 m68k_fk_normal_function. */ | |
750 | |
751 enum m68k_function_kind | |
752 m68k_get_function_kind (tree func) | |
753 { | |
754 tree a; | |
755 | |
756 gcc_assert (TREE_CODE (func) == FUNCTION_DECL); | |
757 | |
758 a = lookup_attribute ("interrupt", DECL_ATTRIBUTES (func)); | |
759 if (a != NULL_TREE) | |
760 return m68k_fk_interrupt_handler; | |
761 | |
762 a = lookup_attribute ("interrupt_handler", DECL_ATTRIBUTES (func)); | |
763 if (a != NULL_TREE) | |
764 return m68k_fk_interrupt_handler; | |
765 | |
766 a = lookup_attribute ("interrupt_thread", DECL_ATTRIBUTES (func)); | |
767 if (a != NULL_TREE) | |
768 return m68k_fk_interrupt_thread; | |
769 | |
770 return m68k_fk_normal_function; | |
771 } | |
772 | |
773 /* Handle an attribute requiring a FUNCTION_DECL; arguments as in | |
774 struct attribute_spec.handler. */ | |
775 static tree | |
776 m68k_handle_fndecl_attribute (tree *node, tree name, | |
777 tree args ATTRIBUTE_UNUSED, | |
778 int flags ATTRIBUTE_UNUSED, | |
779 bool *no_add_attrs) | |
780 { | |
781 if (TREE_CODE (*node) != FUNCTION_DECL) | |
782 { | |
783 warning (OPT_Wattributes, "%qs attribute only applies to functions", | |
784 IDENTIFIER_POINTER (name)); | |
785 *no_add_attrs = true; | |
786 } | |
787 | |
788 if (m68k_get_function_kind (*node) != m68k_fk_normal_function) | |
789 { | |
790 error ("multiple interrupt attributes not allowed"); | |
791 *no_add_attrs = true; | |
792 } | |
793 | |
794 if (!TARGET_FIDOA | |
795 && !strcmp (IDENTIFIER_POINTER (name), "interrupt_thread")) | |
796 { | |
797 error ("interrupt_thread is available only on fido"); | |
798 *no_add_attrs = true; | |
799 } | |
800 | |
801 return NULL_TREE; | |
802 } | |
803 | |
804 static void | |
805 m68k_compute_frame_layout (void) | |
806 { | |
807 int regno, saved; | |
808 unsigned int mask; | |
809 enum m68k_function_kind func_kind = | |
810 m68k_get_function_kind (current_function_decl); | |
811 bool interrupt_handler = func_kind == m68k_fk_interrupt_handler; | |
812 bool interrupt_thread = func_kind == m68k_fk_interrupt_thread; | |
813 | |
814 /* Only compute the frame once per function. | |
815 Don't cache information until reload has been completed. */ | |
816 if (current_frame.funcdef_no == current_function_funcdef_no | |
817 && reload_completed) | |
818 return; | |
819 | |
820 current_frame.size = (get_frame_size () + 3) & -4; | |
821 | |
822 mask = saved = 0; | |
823 | |
824 /* Interrupt thread does not need to save any register. */ | |
825 if (!interrupt_thread) | |
826 for (regno = 0; regno < 16; regno++) | |
827 if (m68k_save_reg (regno, interrupt_handler)) | |
828 { | |
829 mask |= 1 << (regno - D0_REG); | |
830 saved++; | |
831 } | |
832 current_frame.offset = saved * 4; | |
833 current_frame.reg_no = saved; | |
834 current_frame.reg_mask = mask; | |
835 | |
836 current_frame.foffset = 0; | |
837 mask = saved = 0; | |
838 if (TARGET_HARD_FLOAT) | |
839 { | |
840 /* Interrupt thread does not need to save any register. */ | |
841 if (!interrupt_thread) | |
842 for (regno = 16; regno < 24; regno++) | |
843 if (m68k_save_reg (regno, interrupt_handler)) | |
844 { | |
845 mask |= 1 << (regno - FP0_REG); | |
846 saved++; | |
847 } | |
848 current_frame.foffset = saved * TARGET_FP_REG_SIZE; | |
849 current_frame.offset += current_frame.foffset; | |
850 } | |
851 current_frame.fpu_no = saved; | |
852 current_frame.fpu_mask = mask; | |
853 | |
854 /* Remember what function this frame refers to. */ | |
855 current_frame.funcdef_no = current_function_funcdef_no; | |
856 } | |
857 | |
858 HOST_WIDE_INT | |
859 m68k_initial_elimination_offset (int from, int to) | |
860 { | |
861 int argptr_offset; | |
862 /* The arg pointer points 8 bytes before the start of the arguments, | |
863 as defined by FIRST_PARM_OFFSET. This makes it coincident with the | |
864 frame pointer in most frames. */ | |
865 argptr_offset = frame_pointer_needed ? 0 : UNITS_PER_WORD; | |
866 if (from == ARG_POINTER_REGNUM && to == FRAME_POINTER_REGNUM) | |
867 return argptr_offset; | |
868 | |
869 m68k_compute_frame_layout (); | |
870 | |
871 gcc_assert (to == STACK_POINTER_REGNUM); | |
872 switch (from) | |
873 { | |
874 case ARG_POINTER_REGNUM: | |
875 return current_frame.offset + current_frame.size - argptr_offset; | |
876 case FRAME_POINTER_REGNUM: | |
877 return current_frame.offset + current_frame.size; | |
878 default: | |
879 gcc_unreachable (); | |
880 } | |
881 } | |
882 | |
883 /* Refer to the array `regs_ever_live' to determine which registers | |
884 to save; `regs_ever_live[I]' is nonzero if register number I | |
885 is ever used in the function. This function is responsible for | |
886 knowing which registers should not be saved even if used. | |
887 Return true if we need to save REGNO. */ | |
888 | |
889 static bool | |
890 m68k_save_reg (unsigned int regno, bool interrupt_handler) | |
891 { | |
892 if (flag_pic && regno == PIC_REG) | |
893 { | |
894 if (crtl->saves_all_registers) | |
895 return true; | |
896 if (crtl->uses_pic_offset_table) | |
897 return true; | |
898 /* Reload may introduce constant pool references into a function | |
899 that thitherto didn't need a PIC register. Note that the test | |
900 above will not catch that case because we will only set | |
901 crtl->uses_pic_offset_table when emitting | |
902 the address reloads. */ | |
903 if (crtl->uses_const_pool) | |
904 return true; | |
905 } | |
906 | |
907 if (crtl->calls_eh_return) | |
908 { | |
909 unsigned int i; | |
910 for (i = 0; ; i++) | |
911 { | |
912 unsigned int test = EH_RETURN_DATA_REGNO (i); | |
913 if (test == INVALID_REGNUM) | |
914 break; | |
915 if (test == regno) | |
916 return true; | |
917 } | |
918 } | |
919 | |
920 /* Fixed regs we never touch. */ | |
921 if (fixed_regs[regno]) | |
922 return false; | |
923 | |
924 /* The frame pointer (if it is such) is handled specially. */ | |
925 if (regno == FRAME_POINTER_REGNUM && frame_pointer_needed) | |
926 return false; | |
927 | |
928 /* Interrupt handlers must also save call_used_regs | |
929 if they are live or when calling nested functions. */ | |
930 if (interrupt_handler) | |
931 { | |
932 if (df_regs_ever_live_p (regno)) | |
933 return true; | |
934 | |
935 if (!current_function_is_leaf && call_used_regs[regno]) | |
936 return true; | |
937 } | |
938 | |
939 /* Never need to save registers that aren't touched. */ | |
940 if (!df_regs_ever_live_p (regno)) | |
941 return false; | |
942 | |
943 /* Otherwise save everything that isn't call-clobbered. */ | |
944 return !call_used_regs[regno]; | |
945 } | |
946 | |
947 /* Emit RTL for a MOVEM or FMOVEM instruction. BASE + OFFSET represents | |
948 the lowest memory address. COUNT is the number of registers to be | |
949 moved, with register REGNO + I being moved if bit I of MASK is set. | |
950 STORE_P specifies the direction of the move and ADJUST_STACK_P says | |
951 whether or not this is pre-decrement (if STORE_P) or post-increment | |
952 (if !STORE_P) operation. */ | |
953 | |
954 static rtx | |
955 m68k_emit_movem (rtx base, HOST_WIDE_INT offset, | |
956 unsigned int count, unsigned int regno, | |
957 unsigned int mask, bool store_p, bool adjust_stack_p) | |
958 { | |
959 int i; | |
960 rtx body, addr, src, operands[2]; | |
961 enum machine_mode mode; | |
962 | |
963 body = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (adjust_stack_p + count)); | |
964 mode = reg_raw_mode[regno]; | |
965 i = 0; | |
966 | |
967 if (adjust_stack_p) | |
968 { | |
969 src = plus_constant (base, (count | |
970 * GET_MODE_SIZE (mode) | |
971 * (HOST_WIDE_INT) (store_p ? -1 : 1))); | |
972 XVECEXP (body, 0, i++) = gen_rtx_SET (VOIDmode, base, src); | |
973 } | |
974 | |
975 for (; mask != 0; mask >>= 1, regno++) | |
976 if (mask & 1) | |
977 { | |
978 addr = plus_constant (base, offset); | |
979 operands[!store_p] = gen_frame_mem (mode, addr); | |
980 operands[store_p] = gen_rtx_REG (mode, regno); | |
981 XVECEXP (body, 0, i++) | |
982 = gen_rtx_SET (VOIDmode, operands[0], operands[1]); | |
983 offset += GET_MODE_SIZE (mode); | |
984 } | |
985 gcc_assert (i == XVECLEN (body, 0)); | |
986 | |
987 return emit_insn (body); | |
988 } | |
989 | |
990 /* Make INSN a frame-related instruction. */ | |
991 | |
992 static void | |
993 m68k_set_frame_related (rtx insn) | |
994 { | |
995 rtx body; | |
996 int i; | |
997 | |
998 RTX_FRAME_RELATED_P (insn) = 1; | |
999 body = PATTERN (insn); | |
1000 if (GET_CODE (body) == PARALLEL) | |
1001 for (i = 0; i < XVECLEN (body, 0); i++) | |
1002 RTX_FRAME_RELATED_P (XVECEXP (body, 0, i)) = 1; | |
1003 } | |
1004 | |
1005 /* Emit RTL for the "prologue" define_expand. */ | |
1006 | |
1007 void | |
1008 m68k_expand_prologue (void) | |
1009 { | |
1010 HOST_WIDE_INT fsize_with_regs; | |
1011 rtx limit, src, dest, insn; | |
1012 | |
1013 m68k_compute_frame_layout (); | |
1014 | |
1015 /* If the stack limit is a symbol, we can check it here, | |
1016 before actually allocating the space. */ | |
1017 if (crtl->limit_stack | |
1018 && GET_CODE (stack_limit_rtx) == SYMBOL_REF) | |
1019 { | |
1020 limit = plus_constant (stack_limit_rtx, current_frame.size + 4); | |
1021 if (!LEGITIMATE_CONSTANT_P (limit)) | |
1022 { | |
1023 emit_move_insn (gen_rtx_REG (Pmode, D0_REG), limit); | |
1024 limit = gen_rtx_REG (Pmode, D0_REG); | |
1025 } | |
1026 emit_insn (gen_cmpsi (stack_pointer_rtx, limit)); | |
1027 emit_insn (gen_conditional_trap (gen_rtx_LTU (VOIDmode, | |
1028 cc0_rtx, const0_rtx), | |
1029 const1_rtx)); | |
1030 } | |
1031 | |
1032 fsize_with_regs = current_frame.size; | |
1033 if (TARGET_COLDFIRE) | |
1034 { | |
1035 /* ColdFire's move multiple instructions do not allow pre-decrement | |
1036 addressing. Add the size of movem saves to the initial stack | |
1037 allocation instead. */ | |
1038 if (current_frame.reg_no >= MIN_MOVEM_REGS) | |
1039 fsize_with_regs += current_frame.reg_no * GET_MODE_SIZE (SImode); | |
1040 if (current_frame.fpu_no >= MIN_FMOVEM_REGS) | |
1041 fsize_with_regs += current_frame.fpu_no * GET_MODE_SIZE (DFmode); | |
1042 } | |
1043 | |
1044 if (frame_pointer_needed) | |
1045 { | |
1046 if (fsize_with_regs == 0 && TUNE_68040) | |
1047 { | |
1048 /* On the 68040, two separate moves are faster than link.w 0. */ | |
1049 dest = gen_frame_mem (Pmode, | |
1050 gen_rtx_PRE_DEC (Pmode, stack_pointer_rtx)); | |
1051 m68k_set_frame_related (emit_move_insn (dest, frame_pointer_rtx)); | |
1052 m68k_set_frame_related (emit_move_insn (frame_pointer_rtx, | |
1053 stack_pointer_rtx)); | |
1054 } | |
1055 else if (fsize_with_regs < 0x8000 || TARGET_68020) | |
1056 m68k_set_frame_related | |
1057 (emit_insn (gen_link (frame_pointer_rtx, | |
1058 GEN_INT (-4 - fsize_with_regs)))); | |
1059 else | |
1060 { | |
1061 m68k_set_frame_related | |
1062 (emit_insn (gen_link (frame_pointer_rtx, GEN_INT (-4)))); | |
1063 m68k_set_frame_related | |
1064 (emit_insn (gen_addsi3 (stack_pointer_rtx, | |
1065 stack_pointer_rtx, | |
1066 GEN_INT (-fsize_with_regs)))); | |
1067 } | |
1068 | |
1069 /* If the frame pointer is needed, emit a special barrier that | |
1070 will prevent the scheduler from moving stores to the frame | |
1071 before the stack adjustment. */ | |
1072 emit_insn (gen_stack_tie (stack_pointer_rtx, frame_pointer_rtx)); | |
1073 } | |
1074 else if (fsize_with_regs != 0) | |
1075 m68k_set_frame_related | |
1076 (emit_insn (gen_addsi3 (stack_pointer_rtx, | |
1077 stack_pointer_rtx, | |
1078 GEN_INT (-fsize_with_regs)))); | |
1079 | |
1080 if (current_frame.fpu_mask) | |
1081 { | |
1082 gcc_assert (current_frame.fpu_no >= MIN_FMOVEM_REGS); | |
1083 if (TARGET_68881) | |
1084 m68k_set_frame_related | |
1085 (m68k_emit_movem (stack_pointer_rtx, | |
1086 current_frame.fpu_no * -GET_MODE_SIZE (XFmode), | |
1087 current_frame.fpu_no, FP0_REG, | |
1088 current_frame.fpu_mask, true, true)); | |
1089 else | |
1090 { | |
1091 int offset; | |
1092 | |
1093 /* If we're using moveml to save the integer registers, | |
1094 the stack pointer will point to the bottom of the moveml | |
1095 save area. Find the stack offset of the first FP register. */ | |
1096 if (current_frame.reg_no < MIN_MOVEM_REGS) | |
1097 offset = 0; | |
1098 else | |
1099 offset = current_frame.reg_no * GET_MODE_SIZE (SImode); | |
1100 m68k_set_frame_related | |
1101 (m68k_emit_movem (stack_pointer_rtx, offset, | |
1102 current_frame.fpu_no, FP0_REG, | |
1103 current_frame.fpu_mask, true, false)); | |
1104 } | |
1105 } | |
1106 | |
1107 /* If the stack limit is not a symbol, check it here. | |
1108 This has the disadvantage that it may be too late... */ | |
1109 if (crtl->limit_stack) | |
1110 { | |
1111 if (REG_P (stack_limit_rtx)) | |
1112 { | |
1113 emit_insn (gen_cmpsi (stack_pointer_rtx, stack_limit_rtx)); | |
1114 emit_insn (gen_conditional_trap (gen_rtx_LTU (VOIDmode, | |
1115 cc0_rtx, const0_rtx), | |
1116 const1_rtx)); | |
1117 } | |
1118 else if (GET_CODE (stack_limit_rtx) != SYMBOL_REF) | |
1119 warning (0, "stack limit expression is not supported"); | |
1120 } | |
1121 | |
1122 if (current_frame.reg_no < MIN_MOVEM_REGS) | |
1123 { | |
1124 /* Store each register separately in the same order moveml does. */ | |
1125 int i; | |
1126 | |
1127 for (i = 16; i-- > 0; ) | |
1128 if (current_frame.reg_mask & (1 << i)) | |
1129 { | |
1130 src = gen_rtx_REG (SImode, D0_REG + i); | |
1131 dest = gen_frame_mem (SImode, | |
1132 gen_rtx_PRE_DEC (Pmode, stack_pointer_rtx)); | |
1133 m68k_set_frame_related (emit_insn (gen_movsi (dest, src))); | |
1134 } | |
1135 } | |
1136 else | |
1137 { | |
1138 if (TARGET_COLDFIRE) | |
1139 /* The required register save space has already been allocated. | |
1140 The first register should be stored at (%sp). */ | |
1141 m68k_set_frame_related | |
1142 (m68k_emit_movem (stack_pointer_rtx, 0, | |
1143 current_frame.reg_no, D0_REG, | |
1144 current_frame.reg_mask, true, false)); | |
1145 else | |
1146 m68k_set_frame_related | |
1147 (m68k_emit_movem (stack_pointer_rtx, | |
1148 current_frame.reg_no * -GET_MODE_SIZE (SImode), | |
1149 current_frame.reg_no, D0_REG, | |
1150 current_frame.reg_mask, true, true)); | |
1151 } | |
1152 | |
1153 if (flag_pic | |
1154 && !TARGET_SEP_DATA | |
1155 && crtl->uses_pic_offset_table) | |
1156 insn = emit_insn (gen_load_got (pic_offset_table_rtx)); | |
1157 } | |
1158 | |
1159 /* Return true if a simple (return) instruction is sufficient for this | |
1160 instruction (i.e. if no epilogue is needed). */ | |
1161 | |
1162 bool | |
1163 m68k_use_return_insn (void) | |
1164 { | |
1165 if (!reload_completed || frame_pointer_needed || get_frame_size () != 0) | |
1166 return false; | |
1167 | |
1168 m68k_compute_frame_layout (); | |
1169 return current_frame.offset == 0; | |
1170 } | |
1171 | |
1172 /* Emit RTL for the "epilogue" or "sibcall_epilogue" define_expand; | |
1173 SIBCALL_P says which. | |
1174 | |
1175 The function epilogue should not depend on the current stack pointer! | |
1176 It should use the frame pointer only, if there is a frame pointer. | |
1177 This is mandatory because of alloca; we also take advantage of it to | |
1178 omit stack adjustments before returning. */ | |
1179 | |
1180 void | |
1181 m68k_expand_epilogue (bool sibcall_p) | |
1182 { | |
1183 HOST_WIDE_INT fsize, fsize_with_regs; | |
1184 bool big, restore_from_sp; | |
1185 | |
1186 m68k_compute_frame_layout (); | |
1187 | |
1188 fsize = current_frame.size; | |
1189 big = false; | |
1190 restore_from_sp = false; | |
1191 | |
1192 /* FIXME : current_function_is_leaf below is too strong. | |
1193 What we really need to know there is if there could be pending | |
1194 stack adjustment needed at that point. */ | |
1195 restore_from_sp = (!frame_pointer_needed | |
1196 || (!cfun->calls_alloca | |
1197 && current_function_is_leaf)); | |
1198 | |
1199 /* fsize_with_regs is the size we need to adjust the sp when | |
1200 popping the frame. */ | |
1201 fsize_with_regs = fsize; | |
1202 if (TARGET_COLDFIRE && restore_from_sp) | |
1203 { | |
1204 /* ColdFire's move multiple instructions do not allow post-increment | |
1205 addressing. Add the size of movem loads to the final deallocation | |
1206 instead. */ | |
1207 if (current_frame.reg_no >= MIN_MOVEM_REGS) | |
1208 fsize_with_regs += current_frame.reg_no * GET_MODE_SIZE (SImode); | |
1209 if (current_frame.fpu_no >= MIN_FMOVEM_REGS) | |
1210 fsize_with_regs += current_frame.fpu_no * GET_MODE_SIZE (DFmode); | |
1211 } | |
1212 | |
1213 if (current_frame.offset + fsize >= 0x8000 | |
1214 && !restore_from_sp | |
1215 && (current_frame.reg_mask || current_frame.fpu_mask)) | |
1216 { | |
1217 if (TARGET_COLDFIRE | |
1218 && (current_frame.reg_no >= MIN_MOVEM_REGS | |
1219 || current_frame.fpu_no >= MIN_FMOVEM_REGS)) | |
1220 { | |
1221 /* ColdFire's move multiple instructions do not support the | |
1222 (d8,Ax,Xi) addressing mode, so we're as well using a normal | |
1223 stack-based restore. */ | |
1224 emit_move_insn (gen_rtx_REG (Pmode, A1_REG), | |
1225 GEN_INT (-(current_frame.offset + fsize))); | |
1226 emit_insn (gen_addsi3 (stack_pointer_rtx, | |
1227 gen_rtx_REG (Pmode, A1_REG), | |
1228 frame_pointer_rtx)); | |
1229 restore_from_sp = true; | |
1230 } | |
1231 else | |
1232 { | |
1233 emit_move_insn (gen_rtx_REG (Pmode, A1_REG), GEN_INT (-fsize)); | |
1234 fsize = 0; | |
1235 big = true; | |
1236 } | |
1237 } | |
1238 | |
1239 if (current_frame.reg_no < MIN_MOVEM_REGS) | |
1240 { | |
1241 /* Restore each register separately in the same order moveml does. */ | |
1242 int i; | |
1243 HOST_WIDE_INT offset; | |
1244 | |
1245 offset = current_frame.offset + fsize; | |
1246 for (i = 0; i < 16; i++) | |
1247 if (current_frame.reg_mask & (1 << i)) | |
1248 { | |
1249 rtx addr; | |
1250 | |
1251 if (big) | |
1252 { | |
1253 /* Generate the address -OFFSET(%fp,%a1.l). */ | |
1254 addr = gen_rtx_REG (Pmode, A1_REG); | |
1255 addr = gen_rtx_PLUS (Pmode, addr, frame_pointer_rtx); | |
1256 addr = plus_constant (addr, -offset); | |
1257 } | |
1258 else if (restore_from_sp) | |
1259 addr = gen_rtx_POST_INC (Pmode, stack_pointer_rtx); | |
1260 else | |
1261 addr = plus_constant (frame_pointer_rtx, -offset); | |
1262 emit_move_insn (gen_rtx_REG (SImode, D0_REG + i), | |
1263 gen_frame_mem (SImode, addr)); | |
1264 offset -= GET_MODE_SIZE (SImode); | |
1265 } | |
1266 } | |
1267 else if (current_frame.reg_mask) | |
1268 { | |
1269 if (big) | |
1270 m68k_emit_movem (gen_rtx_PLUS (Pmode, | |
1271 gen_rtx_REG (Pmode, A1_REG), | |
1272 frame_pointer_rtx), | |
1273 -(current_frame.offset + fsize), | |
1274 current_frame.reg_no, D0_REG, | |
1275 current_frame.reg_mask, false, false); | |
1276 else if (restore_from_sp) | |
1277 m68k_emit_movem (stack_pointer_rtx, 0, | |
1278 current_frame.reg_no, D0_REG, | |
1279 current_frame.reg_mask, false, | |
1280 !TARGET_COLDFIRE); | |
1281 else | |
1282 m68k_emit_movem (frame_pointer_rtx, | |
1283 -(current_frame.offset + fsize), | |
1284 current_frame.reg_no, D0_REG, | |
1285 current_frame.reg_mask, false, false); | |
1286 } | |
1287 | |
1288 if (current_frame.fpu_no > 0) | |
1289 { | |
1290 if (big) | |
1291 m68k_emit_movem (gen_rtx_PLUS (Pmode, | |
1292 gen_rtx_REG (Pmode, A1_REG), | |
1293 frame_pointer_rtx), | |
1294 -(current_frame.foffset + fsize), | |
1295 current_frame.fpu_no, FP0_REG, | |
1296 current_frame.fpu_mask, false, false); | |
1297 else if (restore_from_sp) | |
1298 { | |
1299 if (TARGET_COLDFIRE) | |
1300 { | |
1301 int offset; | |
1302 | |
1303 /* If we used moveml to restore the integer registers, the | |
1304 stack pointer will still point to the bottom of the moveml | |
1305 save area. Find the stack offset of the first FP | |
1306 register. */ | |
1307 if (current_frame.reg_no < MIN_MOVEM_REGS) | |
1308 offset = 0; | |
1309 else | |
1310 offset = current_frame.reg_no * GET_MODE_SIZE (SImode); | |
1311 m68k_emit_movem (stack_pointer_rtx, offset, | |
1312 current_frame.fpu_no, FP0_REG, | |
1313 current_frame.fpu_mask, false, false); | |
1314 } | |
1315 else | |
1316 m68k_emit_movem (stack_pointer_rtx, 0, | |
1317 current_frame.fpu_no, FP0_REG, | |
1318 current_frame.fpu_mask, false, true); | |
1319 } | |
1320 else | |
1321 m68k_emit_movem (frame_pointer_rtx, | |
1322 -(current_frame.foffset + fsize), | |
1323 current_frame.fpu_no, FP0_REG, | |
1324 current_frame.fpu_mask, false, false); | |
1325 } | |
1326 | |
1327 if (frame_pointer_needed) | |
1328 emit_insn (gen_unlink (frame_pointer_rtx)); | |
1329 else if (fsize_with_regs) | |
1330 emit_insn (gen_addsi3 (stack_pointer_rtx, | |
1331 stack_pointer_rtx, | |
1332 GEN_INT (fsize_with_regs))); | |
1333 | |
1334 if (crtl->calls_eh_return) | |
1335 emit_insn (gen_addsi3 (stack_pointer_rtx, | |
1336 stack_pointer_rtx, | |
1337 EH_RETURN_STACKADJ_RTX)); | |
1338 | |
1339 if (!sibcall_p) | |
1340 emit_jump_insn (gen_rtx_RETURN (VOIDmode)); | |
1341 } | |
1342 | |
1343 /* Return true if X is a valid comparison operator for the dbcc | |
1344 instruction. | |
1345 | |
1346 Note it rejects floating point comparison operators. | |
1347 (In the future we could use Fdbcc). | |
1348 | |
1349 It also rejects some comparisons when CC_NO_OVERFLOW is set. */ | |
1350 | |
1351 int | |
1352 valid_dbcc_comparison_p_2 (rtx x, enum machine_mode mode ATTRIBUTE_UNUSED) | |
1353 { | |
1354 switch (GET_CODE (x)) | |
1355 { | |
1356 case EQ: case NE: case GTU: case LTU: | |
1357 case GEU: case LEU: | |
1358 return 1; | |
1359 | |
1360 /* Reject some when CC_NO_OVERFLOW is set. This may be over | |
1361 conservative */ | |
1362 case GT: case LT: case GE: case LE: | |
1363 return ! (cc_prev_status.flags & CC_NO_OVERFLOW); | |
1364 default: | |
1365 return 0; | |
1366 } | |
1367 } | |
1368 | |
1369 /* Return nonzero if flags are currently in the 68881 flag register. */ | |
1370 int | |
1371 flags_in_68881 (void) | |
1372 { | |
1373 /* We could add support for these in the future */ | |
1374 return cc_status.flags & CC_IN_68881; | |
1375 } | |
1376 | |
1377 /* Implement TARGET_FUNCTION_OK_FOR_SIBCALL_P. */ | |
1378 | |
1379 static bool | |
1380 m68k_ok_for_sibcall_p (tree decl, tree exp) | |
1381 { | |
1382 enum m68k_function_kind kind; | |
1383 | |
1384 /* We cannot use sibcalls for nested functions because we use the | |
1385 static chain register for indirect calls. */ | |
1386 if (CALL_EXPR_STATIC_CHAIN (exp)) | |
1387 return false; | |
1388 | |
1389 kind = m68k_get_function_kind (current_function_decl); | |
1390 if (kind == m68k_fk_normal_function) | |
1391 /* We can always sibcall from a normal function, because it's | |
1392 undefined if it is calling an interrupt function. */ | |
1393 return true; | |
1394 | |
1395 /* Otherwise we can only sibcall if the function kind is known to be | |
1396 the same. */ | |
1397 if (decl && m68k_get_function_kind (decl) == kind) | |
1398 return true; | |
1399 | |
1400 return false; | |
1401 } | |
1402 | |
1403 /* Convert X to a legitimate function call memory reference and return the | |
1404 result. */ | |
1405 | |
1406 rtx | |
1407 m68k_legitimize_call_address (rtx x) | |
1408 { | |
1409 gcc_assert (MEM_P (x)); | |
1410 if (call_operand (XEXP (x, 0), VOIDmode)) | |
1411 return x; | |
1412 return replace_equiv_address (x, force_reg (Pmode, XEXP (x, 0))); | |
1413 } | |
1414 | |
1415 /* Likewise for sibling calls. */ | |
1416 | |
1417 rtx | |
1418 m68k_legitimize_sibcall_address (rtx x) | |
1419 { | |
1420 gcc_assert (MEM_P (x)); | |
1421 if (sibcall_operand (XEXP (x, 0), VOIDmode)) | |
1422 return x; | |
1423 | |
1424 emit_move_insn (gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM), XEXP (x, 0)); | |
1425 return replace_equiv_address (x, gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM)); | |
1426 } | |
1427 | |
1428 /* Output a dbCC; jCC sequence. Note we do not handle the | |
1429 floating point version of this sequence (Fdbcc). We also | |
1430 do not handle alternative conditions when CC_NO_OVERFLOW is | |
1431 set. It is assumed that valid_dbcc_comparison_p and flags_in_68881 will | |
1432 kick those out before we get here. */ | |
1433 | |
1434 void | |
1435 output_dbcc_and_branch (rtx *operands) | |
1436 { | |
1437 switch (GET_CODE (operands[3])) | |
1438 { | |
1439 case EQ: | |
1440 output_asm_insn ("dbeq %0,%l1\n\tjeq %l2", operands); | |
1441 break; | |
1442 | |
1443 case NE: | |
1444 output_asm_insn ("dbne %0,%l1\n\tjne %l2", operands); | |
1445 break; | |
1446 | |
1447 case GT: | |
1448 output_asm_insn ("dbgt %0,%l1\n\tjgt %l2", operands); | |
1449 break; | |
1450 | |
1451 case GTU: | |
1452 output_asm_insn ("dbhi %0,%l1\n\tjhi %l2", operands); | |
1453 break; | |
1454 | |
1455 case LT: | |
1456 output_asm_insn ("dblt %0,%l1\n\tjlt %l2", operands); | |
1457 break; | |
1458 | |
1459 case LTU: | |
1460 output_asm_insn ("dbcs %0,%l1\n\tjcs %l2", operands); | |
1461 break; | |
1462 | |
1463 case GE: | |
1464 output_asm_insn ("dbge %0,%l1\n\tjge %l2", operands); | |
1465 break; | |
1466 | |
1467 case GEU: | |
1468 output_asm_insn ("dbcc %0,%l1\n\tjcc %l2", operands); | |
1469 break; | |
1470 | |
1471 case LE: | |
1472 output_asm_insn ("dble %0,%l1\n\tjle %l2", operands); | |
1473 break; | |
1474 | |
1475 case LEU: | |
1476 output_asm_insn ("dbls %0,%l1\n\tjls %l2", operands); | |
1477 break; | |
1478 | |
1479 default: | |
1480 gcc_unreachable (); | |
1481 } | |
1482 | |
1483 /* If the decrement is to be done in SImode, then we have | |
1484 to compensate for the fact that dbcc decrements in HImode. */ | |
1485 switch (GET_MODE (operands[0])) | |
1486 { | |
1487 case SImode: | |
1488 output_asm_insn ("clr%.w %0\n\tsubq%.l #1,%0\n\tjpl %l1", operands); | |
1489 break; | |
1490 | |
1491 case HImode: | |
1492 break; | |
1493 | |
1494 default: | |
1495 gcc_unreachable (); | |
1496 } | |
1497 } | |
1498 | |
1499 const char * | |
1500 output_scc_di (rtx op, rtx operand1, rtx operand2, rtx dest) | |
1501 { | |
1502 rtx loperands[7]; | |
1503 enum rtx_code op_code = GET_CODE (op); | |
1504 | |
1505 /* This does not produce a useful cc. */ | |
1506 CC_STATUS_INIT; | |
1507 | |
1508 /* The m68k cmp.l instruction requires operand1 to be a reg as used | |
1509 below. Swap the operands and change the op if these requirements | |
1510 are not fulfilled. */ | |
1511 if (GET_CODE (operand2) == REG && GET_CODE (operand1) != REG) | |
1512 { | |
1513 rtx tmp = operand1; | |
1514 | |
1515 operand1 = operand2; | |
1516 operand2 = tmp; | |
1517 op_code = swap_condition (op_code); | |
1518 } | |
1519 loperands[0] = operand1; | |
1520 if (GET_CODE (operand1) == REG) | |
1521 loperands[1] = gen_rtx_REG (SImode, REGNO (operand1) + 1); | |
1522 else | |
1523 loperands[1] = adjust_address (operand1, SImode, 4); | |
1524 if (operand2 != const0_rtx) | |
1525 { | |
1526 loperands[2] = operand2; | |
1527 if (GET_CODE (operand2) == REG) | |
1528 loperands[3] = gen_rtx_REG (SImode, REGNO (operand2) + 1); | |
1529 else | |
1530 loperands[3] = adjust_address (operand2, SImode, 4); | |
1531 } | |
1532 loperands[4] = gen_label_rtx (); | |
1533 if (operand2 != const0_rtx) | |
1534 output_asm_insn ("cmp%.l %2,%0\n\tjne %l4\n\tcmp%.l %3,%1", loperands); | |
1535 else | |
1536 { | |
1537 if (TARGET_68020 || TARGET_COLDFIRE || ! ADDRESS_REG_P (loperands[0])) | |
1538 output_asm_insn ("tst%.l %0", loperands); | |
1539 else | |
1540 output_asm_insn ("cmp%.w #0,%0", loperands); | |
1541 | |
1542 output_asm_insn ("jne %l4", loperands); | |
1543 | |
1544 if (TARGET_68020 || TARGET_COLDFIRE || ! ADDRESS_REG_P (loperands[1])) | |
1545 output_asm_insn ("tst%.l %1", loperands); | |
1546 else | |
1547 output_asm_insn ("cmp%.w #0,%1", loperands); | |
1548 } | |
1549 | |
1550 loperands[5] = dest; | |
1551 | |
1552 switch (op_code) | |
1553 { | |
1554 case EQ: | |
1555 (*targetm.asm_out.internal_label) (asm_out_file, "L", | |
1556 CODE_LABEL_NUMBER (loperands[4])); | |
1557 output_asm_insn ("seq %5", loperands); | |
1558 break; | |
1559 | |
1560 case NE: | |
1561 (*targetm.asm_out.internal_label) (asm_out_file, "L", | |
1562 CODE_LABEL_NUMBER (loperands[4])); | |
1563 output_asm_insn ("sne %5", loperands); | |
1564 break; | |
1565 | |
1566 case GT: | |
1567 loperands[6] = gen_label_rtx (); | |
1568 output_asm_insn ("shi %5\n\tjra %l6", loperands); | |
1569 (*targetm.asm_out.internal_label) (asm_out_file, "L", | |
1570 CODE_LABEL_NUMBER (loperands[4])); | |
1571 output_asm_insn ("sgt %5", loperands); | |
1572 (*targetm.asm_out.internal_label) (asm_out_file, "L", | |
1573 CODE_LABEL_NUMBER (loperands[6])); | |
1574 break; | |
1575 | |
1576 case GTU: | |
1577 (*targetm.asm_out.internal_label) (asm_out_file, "L", | |
1578 CODE_LABEL_NUMBER (loperands[4])); | |
1579 output_asm_insn ("shi %5", loperands); | |
1580 break; | |
1581 | |
1582 case LT: | |
1583 loperands[6] = gen_label_rtx (); | |
1584 output_asm_insn ("scs %5\n\tjra %l6", loperands); | |
1585 (*targetm.asm_out.internal_label) (asm_out_file, "L", | |
1586 CODE_LABEL_NUMBER (loperands[4])); | |
1587 output_asm_insn ("slt %5", loperands); | |
1588 (*targetm.asm_out.internal_label) (asm_out_file, "L", | |
1589 CODE_LABEL_NUMBER (loperands[6])); | |
1590 break; | |
1591 | |
1592 case LTU: | |
1593 (*targetm.asm_out.internal_label) (asm_out_file, "L", | |
1594 CODE_LABEL_NUMBER (loperands[4])); | |
1595 output_asm_insn ("scs %5", loperands); | |
1596 break; | |
1597 | |
1598 case GE: | |
1599 loperands[6] = gen_label_rtx (); | |
1600 output_asm_insn ("scc %5\n\tjra %l6", loperands); | |
1601 (*targetm.asm_out.internal_label) (asm_out_file, "L", | |
1602 CODE_LABEL_NUMBER (loperands[4])); | |
1603 output_asm_insn ("sge %5", loperands); | |
1604 (*targetm.asm_out.internal_label) (asm_out_file, "L", | |
1605 CODE_LABEL_NUMBER (loperands[6])); | |
1606 break; | |
1607 | |
1608 case GEU: | |
1609 (*targetm.asm_out.internal_label) (asm_out_file, "L", | |
1610 CODE_LABEL_NUMBER (loperands[4])); | |
1611 output_asm_insn ("scc %5", loperands); | |
1612 break; | |
1613 | |
1614 case LE: | |
1615 loperands[6] = gen_label_rtx (); | |
1616 output_asm_insn ("sls %5\n\tjra %l6", loperands); | |
1617 (*targetm.asm_out.internal_label) (asm_out_file, "L", | |
1618 CODE_LABEL_NUMBER (loperands[4])); | |
1619 output_asm_insn ("sle %5", loperands); | |
1620 (*targetm.asm_out.internal_label) (asm_out_file, "L", | |
1621 CODE_LABEL_NUMBER (loperands[6])); | |
1622 break; | |
1623 | |
1624 case LEU: | |
1625 (*targetm.asm_out.internal_label) (asm_out_file, "L", | |
1626 CODE_LABEL_NUMBER (loperands[4])); | |
1627 output_asm_insn ("sls %5", loperands); | |
1628 break; | |
1629 | |
1630 default: | |
1631 gcc_unreachable (); | |
1632 } | |
1633 return ""; | |
1634 } | |
1635 | |
1636 const char * | |
1637 output_btst (rtx *operands, rtx countop, rtx dataop, rtx insn, int signpos) | |
1638 { | |
1639 operands[0] = countop; | |
1640 operands[1] = dataop; | |
1641 | |
1642 if (GET_CODE (countop) == CONST_INT) | |
1643 { | |
1644 register int count = INTVAL (countop); | |
1645 /* If COUNT is bigger than size of storage unit in use, | |
1646 advance to the containing unit of same size. */ | |
1647 if (count > signpos) | |
1648 { | |
1649 int offset = (count & ~signpos) / 8; | |
1650 count = count & signpos; | |
1651 operands[1] = dataop = adjust_address (dataop, QImode, offset); | |
1652 } | |
1653 if (count == signpos) | |
1654 cc_status.flags = CC_NOT_POSITIVE | CC_Z_IN_NOT_N; | |
1655 else | |
1656 cc_status.flags = CC_NOT_NEGATIVE | CC_Z_IN_NOT_N; | |
1657 | |
1658 /* These three statements used to use next_insns_test_no... | |
1659 but it appears that this should do the same job. */ | |
1660 if (count == 31 | |
1661 && next_insn_tests_no_inequality (insn)) | |
1662 return "tst%.l %1"; | |
1663 if (count == 15 | |
1664 && next_insn_tests_no_inequality (insn)) | |
1665 return "tst%.w %1"; | |
1666 if (count == 7 | |
1667 && next_insn_tests_no_inequality (insn)) | |
1668 return "tst%.b %1"; | |
1669 /* Try to use `movew to ccr' followed by the appropriate branch insn. | |
1670 On some m68k variants unfortunately that's slower than btst. | |
1671 On 68000 and higher, that should also work for all HImode operands. */ | |
1672 if (TUNE_CPU32 || TARGET_COLDFIRE || optimize_size) | |
1673 { | |
1674 if (count == 3 && DATA_REG_P (operands[1]) | |
1675 && next_insn_tests_no_inequality (insn)) | |
1676 { | |
1677 cc_status.flags = CC_NOT_NEGATIVE | CC_Z_IN_NOT_N | CC_NO_OVERFLOW; | |
1678 return "move%.w %1,%%ccr"; | |
1679 } | |
1680 if (count == 2 && DATA_REG_P (operands[1]) | |
1681 && next_insn_tests_no_inequality (insn)) | |
1682 { | |
1683 cc_status.flags = CC_NOT_NEGATIVE | CC_INVERTED | CC_NO_OVERFLOW; | |
1684 return "move%.w %1,%%ccr"; | |
1685 } | |
1686 /* count == 1 followed by bvc/bvs and | |
1687 count == 0 followed by bcc/bcs are also possible, but need | |
1688 m68k-specific CC_Z_IN_NOT_V and CC_Z_IN_NOT_C flags. */ | |
1689 } | |
1690 | |
1691 cc_status.flags = CC_NOT_NEGATIVE; | |
1692 } | |
1693 return "btst %0,%1"; | |
1694 } | |
1695 | |
1696 /* Return true if X is a legitimate base register. STRICT_P says | |
1697 whether we need strict checking. */ | |
1698 | |
1699 bool | |
1700 m68k_legitimate_base_reg_p (rtx x, bool strict_p) | |
1701 { | |
1702 /* Allow SUBREG everywhere we allow REG. This results in better code. */ | |
1703 if (!strict_p && GET_CODE (x) == SUBREG) | |
1704 x = SUBREG_REG (x); | |
1705 | |
1706 return (REG_P (x) | |
1707 && (strict_p | |
1708 ? REGNO_OK_FOR_BASE_P (REGNO (x)) | |
1709 : REGNO_OK_FOR_BASE_NONSTRICT_P (REGNO (x)))); | |
1710 } | |
1711 | |
1712 /* Return true if X is a legitimate index register. STRICT_P says | |
1713 whether we need strict checking. */ | |
1714 | |
1715 bool | |
1716 m68k_legitimate_index_reg_p (rtx x, bool strict_p) | |
1717 { | |
1718 if (!strict_p && GET_CODE (x) == SUBREG) | |
1719 x = SUBREG_REG (x); | |
1720 | |
1721 return (REG_P (x) | |
1722 && (strict_p | |
1723 ? REGNO_OK_FOR_INDEX_P (REGNO (x)) | |
1724 : REGNO_OK_FOR_INDEX_NONSTRICT_P (REGNO (x)))); | |
1725 } | |
1726 | |
1727 /* Return true if X is a legitimate index expression for a (d8,An,Xn) or | |
1728 (bd,An,Xn) addressing mode. Fill in the INDEX and SCALE fields of | |
1729 ADDRESS if so. STRICT_P says whether we need strict checking. */ | |
1730 | |
1731 static bool | |
1732 m68k_decompose_index (rtx x, bool strict_p, struct m68k_address *address) | |
1733 { | |
1734 int scale; | |
1735 | |
1736 /* Check for a scale factor. */ | |
1737 scale = 1; | |
1738 if ((TARGET_68020 || TARGET_COLDFIRE) | |
1739 && GET_CODE (x) == MULT | |
1740 && GET_CODE (XEXP (x, 1)) == CONST_INT | |
1741 && (INTVAL (XEXP (x, 1)) == 2 | |
1742 || INTVAL (XEXP (x, 1)) == 4 | |
1743 || (INTVAL (XEXP (x, 1)) == 8 | |
1744 && (TARGET_COLDFIRE_FPU || !TARGET_COLDFIRE)))) | |
1745 { | |
1746 scale = INTVAL (XEXP (x, 1)); | |
1747 x = XEXP (x, 0); | |
1748 } | |
1749 | |
1750 /* Check for a word extension. */ | |
1751 if (!TARGET_COLDFIRE | |
1752 && GET_CODE (x) == SIGN_EXTEND | |
1753 && GET_MODE (XEXP (x, 0)) == HImode) | |
1754 x = XEXP (x, 0); | |
1755 | |
1756 if (m68k_legitimate_index_reg_p (x, strict_p)) | |
1757 { | |
1758 address->scale = scale; | |
1759 address->index = x; | |
1760 return true; | |
1761 } | |
1762 | |
1763 return false; | |
1764 } | |
1765 | |
1766 /* Return true if X is an illegitimate symbolic constant. */ | |
1767 | |
1768 bool | |
1769 m68k_illegitimate_symbolic_constant_p (rtx x) | |
1770 { | |
1771 rtx base, offset; | |
1772 | |
1773 if (M68K_OFFSETS_MUST_BE_WITHIN_SECTIONS_P) | |
1774 { | |
1775 split_const (x, &base, &offset); | |
1776 if (GET_CODE (base) == SYMBOL_REF | |
1777 && !offset_within_block_p (base, INTVAL (offset))) | |
1778 return true; | |
1779 } | |
1780 return false; | |
1781 } | |
1782 | |
1783 /* Return true if X is a legitimate constant address that can reach | |
1784 bytes in the range [X, X + REACH). STRICT_P says whether we need | |
1785 strict checking. */ | |
1786 | |
1787 static bool | |
1788 m68k_legitimate_constant_address_p (rtx x, unsigned int reach, bool strict_p) | |
1789 { | |
1790 rtx base, offset; | |
1791 | |
1792 if (!CONSTANT_ADDRESS_P (x)) | |
1793 return false; | |
1794 | |
1795 if (flag_pic | |
1796 && !(strict_p && TARGET_PCREL) | |
1797 && symbolic_operand (x, VOIDmode)) | |
1798 return false; | |
1799 | |
1800 if (M68K_OFFSETS_MUST_BE_WITHIN_SECTIONS_P && reach > 1) | |
1801 { | |
1802 split_const (x, &base, &offset); | |
1803 if (GET_CODE (base) == SYMBOL_REF | |
1804 && !offset_within_block_p (base, INTVAL (offset) + reach - 1)) | |
1805 return false; | |
1806 } | |
1807 | |
1808 return true; | |
1809 } | |
1810 | |
1811 /* Return true if X is a LABEL_REF for a jump table. Assume that unplaced | |
1812 labels will become jump tables. */ | |
1813 | |
1814 static bool | |
1815 m68k_jump_table_ref_p (rtx x) | |
1816 { | |
1817 if (GET_CODE (x) != LABEL_REF) | |
1818 return false; | |
1819 | |
1820 x = XEXP (x, 0); | |
1821 if (!NEXT_INSN (x) && !PREV_INSN (x)) | |
1822 return true; | |
1823 | |
1824 x = next_nonnote_insn (x); | |
1825 return x && JUMP_TABLE_DATA_P (x); | |
1826 } | |
1827 | |
1828 /* Return true if X is a legitimate address for values of mode MODE. | |
1829 STRICT_P says whether strict checking is needed. If the address | |
1830 is valid, describe its components in *ADDRESS. */ | |
1831 | |
1832 static bool | |
1833 m68k_decompose_address (enum machine_mode mode, rtx x, | |
1834 bool strict_p, struct m68k_address *address) | |
1835 { | |
1836 unsigned int reach; | |
1837 | |
1838 memset (address, 0, sizeof (*address)); | |
1839 | |
1840 if (mode == BLKmode) | |
1841 reach = 1; | |
1842 else | |
1843 reach = GET_MODE_SIZE (mode); | |
1844 | |
1845 /* Check for (An) (mode 2). */ | |
1846 if (m68k_legitimate_base_reg_p (x, strict_p)) | |
1847 { | |
1848 address->base = x; | |
1849 return true; | |
1850 } | |
1851 | |
1852 /* Check for -(An) and (An)+ (modes 3 and 4). */ | |
1853 if ((GET_CODE (x) == PRE_DEC || GET_CODE (x) == POST_INC) | |
1854 && m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p)) | |
1855 { | |
1856 address->code = GET_CODE (x); | |
1857 address->base = XEXP (x, 0); | |
1858 return true; | |
1859 } | |
1860 | |
1861 /* Check for (d16,An) (mode 5). */ | |
1862 if (GET_CODE (x) == PLUS | |
1863 && GET_CODE (XEXP (x, 1)) == CONST_INT | |
1864 && IN_RANGE (INTVAL (XEXP (x, 1)), -0x8000, 0x8000 - reach) | |
1865 && m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p)) | |
1866 { | |
1867 address->base = XEXP (x, 0); | |
1868 address->offset = XEXP (x, 1); | |
1869 return true; | |
1870 } | |
1871 | |
1872 /* Check for GOT loads. These are (bd,An,Xn) addresses if | |
1873 TARGET_68020 && flag_pic == 2, otherwise they are (d16,An) | |
1874 addresses. */ | |
1875 if (flag_pic | |
1876 && GET_CODE (x) == PLUS | |
1877 && XEXP (x, 0) == pic_offset_table_rtx | |
1878 && (GET_CODE (XEXP (x, 1)) == SYMBOL_REF | |
1879 || GET_CODE (XEXP (x, 1)) == LABEL_REF)) | |
1880 { | |
1881 address->base = XEXP (x, 0); | |
1882 address->offset = XEXP (x, 1); | |
1883 return true; | |
1884 } | |
1885 | |
1886 /* The ColdFire FPU only accepts addressing modes 2-5. */ | |
1887 if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT) | |
1888 return false; | |
1889 | |
1890 /* Check for (xxx).w and (xxx).l. Also, in the TARGET_PCREL case, | |
1891 check for (d16,PC) or (bd,PC,Xn) with a suppressed index register. | |
1892 All these modes are variations of mode 7. */ | |
1893 if (m68k_legitimate_constant_address_p (x, reach, strict_p)) | |
1894 { | |
1895 address->offset = x; | |
1896 return true; | |
1897 } | |
1898 | |
1899 /* Check for (d8,PC,Xn), a mode 7 form. This case is needed for | |
1900 tablejumps. | |
1901 | |
1902 ??? do_tablejump creates these addresses before placing the target | |
1903 label, so we have to assume that unplaced labels are jump table | |
1904 references. It seems unlikely that we would ever generate indexed | |
1905 accesses to unplaced labels in other cases. */ | |
1906 if (GET_CODE (x) == PLUS | |
1907 && m68k_jump_table_ref_p (XEXP (x, 1)) | |
1908 && m68k_decompose_index (XEXP (x, 0), strict_p, address)) | |
1909 { | |
1910 address->offset = XEXP (x, 1); | |
1911 return true; | |
1912 } | |
1913 | |
1914 /* Everything hereafter deals with (d8,An,Xn.SIZE*SCALE) or | |
1915 (bd,An,Xn.SIZE*SCALE) addresses. */ | |
1916 | |
1917 if (TARGET_68020) | |
1918 { | |
1919 /* Check for a nonzero base displacement. */ | |
1920 if (GET_CODE (x) == PLUS | |
1921 && m68k_legitimate_constant_address_p (XEXP (x, 1), reach, strict_p)) | |
1922 { | |
1923 address->offset = XEXP (x, 1); | |
1924 x = XEXP (x, 0); | |
1925 } | |
1926 | |
1927 /* Check for a suppressed index register. */ | |
1928 if (m68k_legitimate_base_reg_p (x, strict_p)) | |
1929 { | |
1930 address->base = x; | |
1931 return true; | |
1932 } | |
1933 | |
1934 /* Check for a suppressed base register. Do not allow this case | |
1935 for non-symbolic offsets as it effectively gives gcc freedom | |
1936 to treat data registers as base registers, which can generate | |
1937 worse code. */ | |
1938 if (address->offset | |
1939 && symbolic_operand (address->offset, VOIDmode) | |
1940 && m68k_decompose_index (x, strict_p, address)) | |
1941 return true; | |
1942 } | |
1943 else | |
1944 { | |
1945 /* Check for a nonzero base displacement. */ | |
1946 if (GET_CODE (x) == PLUS | |
1947 && GET_CODE (XEXP (x, 1)) == CONST_INT | |
1948 && IN_RANGE (INTVAL (XEXP (x, 1)), -0x80, 0x80 - reach)) | |
1949 { | |
1950 address->offset = XEXP (x, 1); | |
1951 x = XEXP (x, 0); | |
1952 } | |
1953 } | |
1954 | |
1955 /* We now expect the sum of a base and an index. */ | |
1956 if (GET_CODE (x) == PLUS) | |
1957 { | |
1958 if (m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p) | |
1959 && m68k_decompose_index (XEXP (x, 1), strict_p, address)) | |
1960 { | |
1961 address->base = XEXP (x, 0); | |
1962 return true; | |
1963 } | |
1964 | |
1965 if (m68k_legitimate_base_reg_p (XEXP (x, 1), strict_p) | |
1966 && m68k_decompose_index (XEXP (x, 0), strict_p, address)) | |
1967 { | |
1968 address->base = XEXP (x, 1); | |
1969 return true; | |
1970 } | |
1971 } | |
1972 return false; | |
1973 } | |
1974 | |
1975 /* Return true if X is a legitimate address for values of mode MODE. | |
1976 STRICT_P says whether strict checking is needed. */ | |
1977 | |
1978 bool | |
1979 m68k_legitimate_address_p (enum machine_mode mode, rtx x, bool strict_p) | |
1980 { | |
1981 struct m68k_address address; | |
1982 | |
1983 return m68k_decompose_address (mode, x, strict_p, &address); | |
1984 } | |
1985 | |
1986 /* Return true if X is a memory, describing its address in ADDRESS if so. | |
1987 Apply strict checking if called during or after reload. */ | |
1988 | |
1989 static bool | |
1990 m68k_legitimate_mem_p (rtx x, struct m68k_address *address) | |
1991 { | |
1992 return (MEM_P (x) | |
1993 && m68k_decompose_address (GET_MODE (x), XEXP (x, 0), | |
1994 reload_in_progress || reload_completed, | |
1995 address)); | |
1996 } | |
1997 | |
1998 /* Return true if X matches the 'Q' constraint. It must be a memory | |
1999 with a base address and no constant offset or index. */ | |
2000 | |
2001 bool | |
2002 m68k_matches_q_p (rtx x) | |
2003 { | |
2004 struct m68k_address address; | |
2005 | |
2006 return (m68k_legitimate_mem_p (x, &address) | |
2007 && address.code == UNKNOWN | |
2008 && address.base | |
2009 && !address.offset | |
2010 && !address.index); | |
2011 } | |
2012 | |
2013 /* Return true if X matches the 'U' constraint. It must be a base address | |
2014 with a constant offset and no index. */ | |
2015 | |
2016 bool | |
2017 m68k_matches_u_p (rtx x) | |
2018 { | |
2019 struct m68k_address address; | |
2020 | |
2021 return (m68k_legitimate_mem_p (x, &address) | |
2022 && address.code == UNKNOWN | |
2023 && address.base | |
2024 && address.offset | |
2025 && !address.index); | |
2026 } | |
2027 | |
2028 /* Legitimize PIC addresses. If the address is already | |
2029 position-independent, we return ORIG. Newly generated | |
2030 position-independent addresses go to REG. If we need more | |
2031 than one register, we lose. | |
2032 | |
2033 An address is legitimized by making an indirect reference | |
2034 through the Global Offset Table with the name of the symbol | |
2035 used as an offset. | |
2036 | |
2037 The assembler and linker are responsible for placing the | |
2038 address of the symbol in the GOT. The function prologue | |
2039 is responsible for initializing a5 to the starting address | |
2040 of the GOT. | |
2041 | |
2042 The assembler is also responsible for translating a symbol name | |
2043 into a constant displacement from the start of the GOT. | |
2044 | |
2045 A quick example may make things a little clearer: | |
2046 | |
2047 When not generating PIC code to store the value 12345 into _foo | |
2048 we would generate the following code: | |
2049 | |
2050 movel #12345, _foo | |
2051 | |
2052 When generating PIC two transformations are made. First, the compiler | |
2053 loads the address of foo into a register. So the first transformation makes: | |
2054 | |
2055 lea _foo, a0 | |
2056 movel #12345, a0@ | |
2057 | |
2058 The code in movsi will intercept the lea instruction and call this | |
2059 routine which will transform the instructions into: | |
2060 | |
2061 movel a5@(_foo:w), a0 | |
2062 movel #12345, a0@ | |
2063 | |
2064 | |
2065 That (in a nutshell) is how *all* symbol and label references are | |
2066 handled. */ | |
2067 | |
2068 rtx | |
2069 legitimize_pic_address (rtx orig, enum machine_mode mode ATTRIBUTE_UNUSED, | |
2070 rtx reg) | |
2071 { | |
2072 rtx pic_ref = orig; | |
2073 | |
2074 /* First handle a simple SYMBOL_REF or LABEL_REF */ | |
2075 if (GET_CODE (orig) == SYMBOL_REF || GET_CODE (orig) == LABEL_REF) | |
2076 { | |
2077 gcc_assert (reg); | |
2078 | |
2079 if (TARGET_COLDFIRE && TARGET_XGOT) | |
2080 /* When compiling with -mxgot switch the code for the above | |
2081 example will look like this: | |
2082 | |
2083 movel a5, a0 | |
2084 addl _foo@GOT, a0 | |
2085 movel a0@, a0 | |
2086 movel #12345, a0@ */ | |
2087 { | |
2088 rtx pic_offset; | |
2089 | |
2090 /* Wrap ORIG in UNSPEC_GOTOFF to tip m68k_output_addr_const_extra | |
2091 to put @GOT after reference. */ | |
2092 pic_offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, orig), | |
2093 UNSPEC_GOTOFF); | |
2094 pic_offset = gen_rtx_CONST (Pmode, pic_offset); | |
2095 emit_move_insn (reg, pic_offset); | |
2096 emit_insn (gen_addsi3 (reg, reg, pic_offset_table_rtx)); | |
2097 pic_ref = gen_rtx_MEM (Pmode, reg); | |
2098 } | |
2099 else | |
2100 pic_ref = gen_rtx_MEM (Pmode, | |
2101 gen_rtx_PLUS (Pmode, | |
2102 pic_offset_table_rtx, orig)); | |
2103 crtl->uses_pic_offset_table = 1; | |
2104 MEM_READONLY_P (pic_ref) = 1; | |
2105 emit_move_insn (reg, pic_ref); | |
2106 return reg; | |
2107 } | |
2108 else if (GET_CODE (orig) == CONST) | |
2109 { | |
2110 rtx base; | |
2111 | |
2112 /* Make sure this has not already been legitimized. */ | |
2113 if (GET_CODE (XEXP (orig, 0)) == PLUS | |
2114 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx) | |
2115 return orig; | |
2116 | |
2117 gcc_assert (reg); | |
2118 | |
2119 /* legitimize both operands of the PLUS */ | |
2120 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS); | |
2121 | |
2122 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg); | |
2123 orig = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode, | |
2124 base == reg ? 0 : reg); | |
2125 | |
2126 if (GET_CODE (orig) == CONST_INT) | |
2127 return plus_constant (base, INTVAL (orig)); | |
2128 pic_ref = gen_rtx_PLUS (Pmode, base, orig); | |
2129 /* Likewise, should we set special REG_NOTEs here? */ | |
2130 } | |
2131 return pic_ref; | |
2132 } | |
2133 | |
2134 | |
2135 | |
2136 #define USE_MOVQ(i) ((unsigned) ((i) + 128) <= 255) | |
2137 | |
2138 /* Return the type of move that should be used for integer I. */ | |
2139 | |
2140 M68K_CONST_METHOD | |
2141 m68k_const_method (HOST_WIDE_INT i) | |
2142 { | |
2143 unsigned u; | |
2144 | |
2145 if (USE_MOVQ (i)) | |
2146 return MOVQ; | |
2147 | |
2148 /* The ColdFire doesn't have byte or word operations. */ | |
2149 /* FIXME: This may not be useful for the m68060 either. */ | |
2150 if (!TARGET_COLDFIRE) | |
2151 { | |
2152 /* if -256 < N < 256 but N is not in range for a moveq | |
2153 N^ff will be, so use moveq #N^ff, dreg; not.b dreg. */ | |
2154 if (USE_MOVQ (i ^ 0xff)) | |
2155 return NOTB; | |
2156 /* Likewise, try with not.w */ | |
2157 if (USE_MOVQ (i ^ 0xffff)) | |
2158 return NOTW; | |
2159 /* This is the only value where neg.w is useful */ | |
2160 if (i == -65408) | |
2161 return NEGW; | |
2162 } | |
2163 | |
2164 /* Try also with swap. */ | |
2165 u = i; | |
2166 if (USE_MOVQ ((u >> 16) | (u << 16))) | |
2167 return SWAP; | |
2168 | |
2169 if (TARGET_ISAB) | |
2170 { | |
2171 /* Try using MVZ/MVS with an immediate value to load constants. */ | |
2172 if (i >= 0 && i <= 65535) | |
2173 return MVZ; | |
2174 if (i >= -32768 && i <= 32767) | |
2175 return MVS; | |
2176 } | |
2177 | |
2178 /* Otherwise, use move.l */ | |
2179 return MOVL; | |
2180 } | |
2181 | |
2182 /* Return the cost of moving constant I into a data register. */ | |
2183 | |
2184 static int | |
2185 const_int_cost (HOST_WIDE_INT i) | |
2186 { | |
2187 switch (m68k_const_method (i)) | |
2188 { | |
2189 case MOVQ: | |
2190 /* Constants between -128 and 127 are cheap due to moveq. */ | |
2191 return 0; | |
2192 case MVZ: | |
2193 case MVS: | |
2194 case NOTB: | |
2195 case NOTW: | |
2196 case NEGW: | |
2197 case SWAP: | |
2198 /* Constants easily generated by moveq + not.b/not.w/neg.w/swap. */ | |
2199 return 1; | |
2200 case MOVL: | |
2201 return 2; | |
2202 default: | |
2203 gcc_unreachable (); | |
2204 } | |
2205 } | |
2206 | |
2207 static bool | |
2208 m68k_rtx_costs (rtx x, int code, int outer_code, int *total, | |
2209 bool speed ATTRIBUTE_UNUSED) | |
2210 { | |
2211 switch (code) | |
2212 { | |
2213 case CONST_INT: | |
2214 /* Constant zero is super cheap due to clr instruction. */ | |
2215 if (x == const0_rtx) | |
2216 *total = 0; | |
2217 else | |
2218 *total = const_int_cost (INTVAL (x)); | |
2219 return true; | |
2220 | |
2221 case CONST: | |
2222 case LABEL_REF: | |
2223 case SYMBOL_REF: | |
2224 *total = 3; | |
2225 return true; | |
2226 | |
2227 case CONST_DOUBLE: | |
2228 /* Make 0.0 cheaper than other floating constants to | |
2229 encourage creating tstsf and tstdf insns. */ | |
2230 if (outer_code == COMPARE | |
2231 && (x == CONST0_RTX (SFmode) || x == CONST0_RTX (DFmode))) | |
2232 *total = 4; | |
2233 else | |
2234 *total = 5; | |
2235 return true; | |
2236 | |
2237 /* These are vaguely right for a 68020. */ | |
2238 /* The costs for long multiply have been adjusted to work properly | |
2239 in synth_mult on the 68020, relative to an average of the time | |
2240 for add and the time for shift, taking away a little more because | |
2241 sometimes move insns are needed. */ | |
2242 /* div?.w is relatively cheaper on 68000 counted in COSTS_N_INSNS | |
2243 terms. */ | |
2244 #define MULL_COST \ | |
2245 (TUNE_68060 ? 2 \ | |
2246 : TUNE_68040 ? 5 \ | |
2247 : (TUNE_CFV2 && TUNE_EMAC) ? 3 \ | |
2248 : (TUNE_CFV2 && TUNE_MAC) ? 4 \ | |
2249 : TUNE_CFV2 ? 8 \ | |
2250 : TARGET_COLDFIRE ? 3 : 13) | |
2251 | |
2252 #define MULW_COST \ | |
2253 (TUNE_68060 ? 2 \ | |
2254 : TUNE_68040 ? 3 \ | |
2255 : TUNE_68000_10 ? 5 \ | |
2256 : (TUNE_CFV2 && TUNE_EMAC) ? 3 \ | |
2257 : (TUNE_CFV2 && TUNE_MAC) ? 2 \ | |
2258 : TUNE_CFV2 ? 8 \ | |
2259 : TARGET_COLDFIRE ? 2 : 8) | |
2260 | |
2261 #define DIVW_COST \ | |
2262 (TARGET_CF_HWDIV ? 11 \ | |
2263 : TUNE_68000_10 || TARGET_COLDFIRE ? 12 : 27) | |
2264 | |
2265 case PLUS: | |
2266 /* An lea costs about three times as much as a simple add. */ | |
2267 if (GET_MODE (x) == SImode | |
2268 && GET_CODE (XEXP (x, 1)) == REG | |
2269 && GET_CODE (XEXP (x, 0)) == MULT | |
2270 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG | |
2271 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT | |
2272 && (INTVAL (XEXP (XEXP (x, 0), 1)) == 2 | |
2273 || INTVAL (XEXP (XEXP (x, 0), 1)) == 4 | |
2274 || INTVAL (XEXP (XEXP (x, 0), 1)) == 8)) | |
2275 { | |
2276 /* lea an@(dx:l:i),am */ | |
2277 *total = COSTS_N_INSNS (TARGET_COLDFIRE ? 2 : 3); | |
2278 return true; | |
2279 } | |
2280 return false; | |
2281 | |
2282 case ASHIFT: | |
2283 case ASHIFTRT: | |
2284 case LSHIFTRT: | |
2285 if (TUNE_68060) | |
2286 { | |
2287 *total = COSTS_N_INSNS(1); | |
2288 return true; | |
2289 } | |
2290 if (TUNE_68000_10) | |
2291 { | |
2292 if (GET_CODE (XEXP (x, 1)) == CONST_INT) | |
2293 { | |
2294 if (INTVAL (XEXP (x, 1)) < 16) | |
2295 *total = COSTS_N_INSNS (2) + INTVAL (XEXP (x, 1)) / 2; | |
2296 else | |
2297 /* We're using clrw + swap for these cases. */ | |
2298 *total = COSTS_N_INSNS (4) + (INTVAL (XEXP (x, 1)) - 16) / 2; | |
2299 } | |
2300 else | |
2301 *total = COSTS_N_INSNS (10); /* Worst case. */ | |
2302 return true; | |
2303 } | |
2304 /* A shift by a big integer takes an extra instruction. */ | |
2305 if (GET_CODE (XEXP (x, 1)) == CONST_INT | |
2306 && (INTVAL (XEXP (x, 1)) == 16)) | |
2307 { | |
2308 *total = COSTS_N_INSNS (2); /* clrw;swap */ | |
2309 return true; | |
2310 } | |
2311 if (GET_CODE (XEXP (x, 1)) == CONST_INT | |
2312 && !(INTVAL (XEXP (x, 1)) > 0 | |
2313 && INTVAL (XEXP (x, 1)) <= 8)) | |
2314 { | |
2315 *total = COSTS_N_INSNS (TARGET_COLDFIRE ? 1 : 3); /* lsr #i,dn */ | |
2316 return true; | |
2317 } | |
2318 return false; | |
2319 | |
2320 case MULT: | |
2321 if ((GET_CODE (XEXP (x, 0)) == ZERO_EXTEND | |
2322 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND) | |
2323 && GET_MODE (x) == SImode) | |
2324 *total = COSTS_N_INSNS (MULW_COST); | |
2325 else if (GET_MODE (x) == QImode || GET_MODE (x) == HImode) | |
2326 *total = COSTS_N_INSNS (MULW_COST); | |
2327 else | |
2328 *total = COSTS_N_INSNS (MULL_COST); | |
2329 return true; | |
2330 | |
2331 case DIV: | |
2332 case UDIV: | |
2333 case MOD: | |
2334 case UMOD: | |
2335 if (GET_MODE (x) == QImode || GET_MODE (x) == HImode) | |
2336 *total = COSTS_N_INSNS (DIVW_COST); /* div.w */ | |
2337 else if (TARGET_CF_HWDIV) | |
2338 *total = COSTS_N_INSNS (18); | |
2339 else | |
2340 *total = COSTS_N_INSNS (43); /* div.l */ | |
2341 return true; | |
2342 | |
2343 default: | |
2344 return false; | |
2345 } | |
2346 } | |
2347 | |
2348 /* Return an instruction to move CONST_INT OPERANDS[1] into data register | |
2349 OPERANDS[0]. */ | |
2350 | |
2351 static const char * | |
2352 output_move_const_into_data_reg (rtx *operands) | |
2353 { | |
2354 HOST_WIDE_INT i; | |
2355 | |
2356 i = INTVAL (operands[1]); | |
2357 switch (m68k_const_method (i)) | |
2358 { | |
2359 case MVZ: | |
2360 return "mvzw %1,%0"; | |
2361 case MVS: | |
2362 return "mvsw %1,%0"; | |
2363 case MOVQ: | |
2364 return "moveq %1,%0"; | |
2365 case NOTB: | |
2366 CC_STATUS_INIT; | |
2367 operands[1] = GEN_INT (i ^ 0xff); | |
2368 return "moveq %1,%0\n\tnot%.b %0"; | |
2369 case NOTW: | |
2370 CC_STATUS_INIT; | |
2371 operands[1] = GEN_INT (i ^ 0xffff); | |
2372 return "moveq %1,%0\n\tnot%.w %0"; | |
2373 case NEGW: | |
2374 CC_STATUS_INIT; | |
2375 return "moveq #-128,%0\n\tneg%.w %0"; | |
2376 case SWAP: | |
2377 { | |
2378 unsigned u = i; | |
2379 | |
2380 operands[1] = GEN_INT ((u << 16) | (u >> 16)); | |
2381 return "moveq %1,%0\n\tswap %0"; | |
2382 } | |
2383 case MOVL: | |
2384 return "move%.l %1,%0"; | |
2385 default: | |
2386 gcc_unreachable (); | |
2387 } | |
2388 } | |
2389 | |
2390 /* Return true if I can be handled by ISA B's mov3q instruction. */ | |
2391 | |
2392 bool | |
2393 valid_mov3q_const (HOST_WIDE_INT i) | |
2394 { | |
2395 return TARGET_ISAB && (i == -1 || IN_RANGE (i, 1, 7)); | |
2396 } | |
2397 | |
2398 /* Return an instruction to move CONST_INT OPERANDS[1] into OPERANDS[0]. | |
2399 I is the value of OPERANDS[1]. */ | |
2400 | |
2401 static const char * | |
2402 output_move_simode_const (rtx *operands) | |
2403 { | |
2404 rtx dest; | |
2405 HOST_WIDE_INT src; | |
2406 | |
2407 dest = operands[0]; | |
2408 src = INTVAL (operands[1]); | |
2409 if (src == 0 | |
2410 && (DATA_REG_P (dest) || MEM_P (dest)) | |
2411 /* clr insns on 68000 read before writing. */ | |
2412 && ((TARGET_68010 || TARGET_COLDFIRE) | |
2413 || !(MEM_P (dest) && MEM_VOLATILE_P (dest)))) | |
2414 return "clr%.l %0"; | |
2415 else if (GET_MODE (dest) == SImode && valid_mov3q_const (src)) | |
2416 return "mov3q%.l %1,%0"; | |
2417 else if (src == 0 && ADDRESS_REG_P (dest)) | |
2418 return "sub%.l %0,%0"; | |
2419 else if (DATA_REG_P (dest)) | |
2420 return output_move_const_into_data_reg (operands); | |
2421 else if (ADDRESS_REG_P (dest) && IN_RANGE (src, -0x8000, 0x7fff)) | |
2422 { | |
2423 if (valid_mov3q_const (src)) | |
2424 return "mov3q%.l %1,%0"; | |
2425 return "move%.w %1,%0"; | |
2426 } | |
2427 else if (MEM_P (dest) | |
2428 && GET_CODE (XEXP (dest, 0)) == PRE_DEC | |
2429 && REGNO (XEXP (XEXP (dest, 0), 0)) == STACK_POINTER_REGNUM | |
2430 && IN_RANGE (src, -0x8000, 0x7fff)) | |
2431 { | |
2432 if (valid_mov3q_const (src)) | |
2433 return "mov3q%.l %1,%-"; | |
2434 return "pea %a1"; | |
2435 } | |
2436 return "move%.l %1,%0"; | |
2437 } | |
2438 | |
2439 const char * | |
2440 output_move_simode (rtx *operands) | |
2441 { | |
2442 if (GET_CODE (operands[1]) == CONST_INT) | |
2443 return output_move_simode_const (operands); | |
2444 else if ((GET_CODE (operands[1]) == SYMBOL_REF | |
2445 || GET_CODE (operands[1]) == CONST) | |
2446 && push_operand (operands[0], SImode)) | |
2447 return "pea %a1"; | |
2448 else if ((GET_CODE (operands[1]) == SYMBOL_REF | |
2449 || GET_CODE (operands[1]) == CONST) | |
2450 && ADDRESS_REG_P (operands[0])) | |
2451 return "lea %a1,%0"; | |
2452 return "move%.l %1,%0"; | |
2453 } | |
2454 | |
2455 const char * | |
2456 output_move_himode (rtx *operands) | |
2457 { | |
2458 if (GET_CODE (operands[1]) == CONST_INT) | |
2459 { | |
2460 if (operands[1] == const0_rtx | |
2461 && (DATA_REG_P (operands[0]) | |
2462 || GET_CODE (operands[0]) == MEM) | |
2463 /* clr insns on 68000 read before writing. */ | |
2464 && ((TARGET_68010 || TARGET_COLDFIRE) | |
2465 || !(GET_CODE (operands[0]) == MEM | |
2466 && MEM_VOLATILE_P (operands[0])))) | |
2467 return "clr%.w %0"; | |
2468 else if (operands[1] == const0_rtx | |
2469 && ADDRESS_REG_P (operands[0])) | |
2470 return "sub%.l %0,%0"; | |
2471 else if (DATA_REG_P (operands[0]) | |
2472 && INTVAL (operands[1]) < 128 | |
2473 && INTVAL (operands[1]) >= -128) | |
2474 return "moveq %1,%0"; | |
2475 else if (INTVAL (operands[1]) < 0x8000 | |
2476 && INTVAL (operands[1]) >= -0x8000) | |
2477 return "move%.w %1,%0"; | |
2478 } | |
2479 else if (CONSTANT_P (operands[1])) | |
2480 return "move%.l %1,%0"; | |
2481 return "move%.w %1,%0"; | |
2482 } | |
2483 | |
2484 const char * | |
2485 output_move_qimode (rtx *operands) | |
2486 { | |
2487 /* 68k family always modifies the stack pointer by at least 2, even for | |
2488 byte pushes. The 5200 (ColdFire) does not do this. */ | |
2489 | |
2490 /* This case is generated by pushqi1 pattern now. */ | |
2491 gcc_assert (!(GET_CODE (operands[0]) == MEM | |
2492 && GET_CODE (XEXP (operands[0], 0)) == PRE_DEC | |
2493 && XEXP (XEXP (operands[0], 0), 0) == stack_pointer_rtx | |
2494 && ! ADDRESS_REG_P (operands[1]) | |
2495 && ! TARGET_COLDFIRE)); | |
2496 | |
2497 /* clr and st insns on 68000 read before writing. */ | |
2498 if (!ADDRESS_REG_P (operands[0]) | |
2499 && ((TARGET_68010 || TARGET_COLDFIRE) | |
2500 || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0])))) | |
2501 { | |
2502 if (operands[1] == const0_rtx) | |
2503 return "clr%.b %0"; | |
2504 if ((!TARGET_COLDFIRE || DATA_REG_P (operands[0])) | |
2505 && GET_CODE (operands[1]) == CONST_INT | |
2506 && (INTVAL (operands[1]) & 255) == 255) | |
2507 { | |
2508 CC_STATUS_INIT; | |
2509 return "st %0"; | |
2510 } | |
2511 } | |
2512 if (GET_CODE (operands[1]) == CONST_INT | |
2513 && DATA_REG_P (operands[0]) | |
2514 && INTVAL (operands[1]) < 128 | |
2515 && INTVAL (operands[1]) >= -128) | |
2516 return "moveq %1,%0"; | |
2517 if (operands[1] == const0_rtx && ADDRESS_REG_P (operands[0])) | |
2518 return "sub%.l %0,%0"; | |
2519 if (GET_CODE (operands[1]) != CONST_INT && CONSTANT_P (operands[1])) | |
2520 return "move%.l %1,%0"; | |
2521 /* 68k family (including the 5200 ColdFire) does not support byte moves to | |
2522 from address registers. */ | |
2523 if (ADDRESS_REG_P (operands[0]) || ADDRESS_REG_P (operands[1])) | |
2524 return "move%.w %1,%0"; | |
2525 return "move%.b %1,%0"; | |
2526 } | |
2527 | |
2528 const char * | |
2529 output_move_stricthi (rtx *operands) | |
2530 { | |
2531 if (operands[1] == const0_rtx | |
2532 /* clr insns on 68000 read before writing. */ | |
2533 && ((TARGET_68010 || TARGET_COLDFIRE) | |
2534 || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0])))) | |
2535 return "clr%.w %0"; | |
2536 return "move%.w %1,%0"; | |
2537 } | |
2538 | |
2539 const char * | |
2540 output_move_strictqi (rtx *operands) | |
2541 { | |
2542 if (operands[1] == const0_rtx | |
2543 /* clr insns on 68000 read before writing. */ | |
2544 && ((TARGET_68010 || TARGET_COLDFIRE) | |
2545 || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0])))) | |
2546 return "clr%.b %0"; | |
2547 return "move%.b %1,%0"; | |
2548 } | |
2549 | |
2550 /* Return the best assembler insn template | |
2551 for moving operands[1] into operands[0] as a fullword. */ | |
2552 | |
2553 static const char * | |
2554 singlemove_string (rtx *operands) | |
2555 { | |
2556 if (GET_CODE (operands[1]) == CONST_INT) | |
2557 return output_move_simode_const (operands); | |
2558 return "move%.l %1,%0"; | |
2559 } | |
2560 | |
2561 | |
2562 /* Output assembler or rtl code to perform a doubleword move insn | |
2563 with operands OPERANDS. | |
2564 Pointers to 3 helper functions should be specified: | |
2565 HANDLE_REG_ADJUST to adjust a register by a small value, | |
2566 HANDLE_COMPADR to compute an address and | |
2567 HANDLE_MOVSI to move 4 bytes. */ | |
2568 | |
2569 static void | |
2570 handle_move_double (rtx operands[2], | |
2571 void (*handle_reg_adjust) (rtx, int), | |
2572 void (*handle_compadr) (rtx [2]), | |
2573 void (*handle_movsi) (rtx [2])) | |
2574 { | |
2575 enum | |
2576 { | |
2577 REGOP, OFFSOP, MEMOP, PUSHOP, POPOP, CNSTOP, RNDOP | |
2578 } optype0, optype1; | |
2579 rtx latehalf[2]; | |
2580 rtx middlehalf[2]; | |
2581 rtx xops[2]; | |
2582 rtx addreg0 = 0, addreg1 = 0; | |
2583 int dest_overlapped_low = 0; | |
2584 int size = GET_MODE_SIZE (GET_MODE (operands[0])); | |
2585 | |
2586 middlehalf[0] = 0; | |
2587 middlehalf[1] = 0; | |
2588 | |
2589 /* First classify both operands. */ | |
2590 | |
2591 if (REG_P (operands[0])) | |
2592 optype0 = REGOP; | |
2593 else if (offsettable_memref_p (operands[0])) | |
2594 optype0 = OFFSOP; | |
2595 else if (GET_CODE (XEXP (operands[0], 0)) == POST_INC) | |
2596 optype0 = POPOP; | |
2597 else if (GET_CODE (XEXP (operands[0], 0)) == PRE_DEC) | |
2598 optype0 = PUSHOP; | |
2599 else if (GET_CODE (operands[0]) == MEM) | |
2600 optype0 = MEMOP; | |
2601 else | |
2602 optype0 = RNDOP; | |
2603 | |
2604 if (REG_P (operands[1])) | |
2605 optype1 = REGOP; | |
2606 else if (CONSTANT_P (operands[1])) | |
2607 optype1 = CNSTOP; | |
2608 else if (offsettable_memref_p (operands[1])) | |
2609 optype1 = OFFSOP; | |
2610 else if (GET_CODE (XEXP (operands[1], 0)) == POST_INC) | |
2611 optype1 = POPOP; | |
2612 else if (GET_CODE (XEXP (operands[1], 0)) == PRE_DEC) | |
2613 optype1 = PUSHOP; | |
2614 else if (GET_CODE (operands[1]) == MEM) | |
2615 optype1 = MEMOP; | |
2616 else | |
2617 optype1 = RNDOP; | |
2618 | |
2619 /* Check for the cases that the operand constraints are not supposed | |
2620 to allow to happen. Generating code for these cases is | |
2621 painful. */ | |
2622 gcc_assert (optype0 != RNDOP && optype1 != RNDOP); | |
2623 | |
2624 /* If one operand is decrementing and one is incrementing | |
2625 decrement the former register explicitly | |
2626 and change that operand into ordinary indexing. */ | |
2627 | |
2628 if (optype0 == PUSHOP && optype1 == POPOP) | |
2629 { | |
2630 operands[0] = XEXP (XEXP (operands[0], 0), 0); | |
2631 | |
2632 handle_reg_adjust (operands[0], -size); | |
2633 | |
2634 if (GET_MODE (operands[1]) == XFmode) | |
2635 operands[0] = gen_rtx_MEM (XFmode, operands[0]); | |
2636 else if (GET_MODE (operands[0]) == DFmode) | |
2637 operands[0] = gen_rtx_MEM (DFmode, operands[0]); | |
2638 else | |
2639 operands[0] = gen_rtx_MEM (DImode, operands[0]); | |
2640 optype0 = OFFSOP; | |
2641 } | |
2642 if (optype0 == POPOP && optype1 == PUSHOP) | |
2643 { | |
2644 operands[1] = XEXP (XEXP (operands[1], 0), 0); | |
2645 | |
2646 handle_reg_adjust (operands[1], -size); | |
2647 | |
2648 if (GET_MODE (operands[1]) == XFmode) | |
2649 operands[1] = gen_rtx_MEM (XFmode, operands[1]); | |
2650 else if (GET_MODE (operands[1]) == DFmode) | |
2651 operands[1] = gen_rtx_MEM (DFmode, operands[1]); | |
2652 else | |
2653 operands[1] = gen_rtx_MEM (DImode, operands[1]); | |
2654 optype1 = OFFSOP; | |
2655 } | |
2656 | |
2657 /* If an operand is an unoffsettable memory ref, find a register | |
2658 we can increment temporarily to make it refer to the second word. */ | |
2659 | |
2660 if (optype0 == MEMOP) | |
2661 addreg0 = find_addr_reg (XEXP (operands[0], 0)); | |
2662 | |
2663 if (optype1 == MEMOP) | |
2664 addreg1 = find_addr_reg (XEXP (operands[1], 0)); | |
2665 | |
2666 /* Ok, we can do one word at a time. | |
2667 Normally we do the low-numbered word first, | |
2668 but if either operand is autodecrementing then we | |
2669 do the high-numbered word first. | |
2670 | |
2671 In either case, set up in LATEHALF the operands to use | |
2672 for the high-numbered word and in some cases alter the | |
2673 operands in OPERANDS to be suitable for the low-numbered word. */ | |
2674 | |
2675 if (size == 12) | |
2676 { | |
2677 if (optype0 == REGOP) | |
2678 { | |
2679 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 2); | |
2680 middlehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1); | |
2681 } | |
2682 else if (optype0 == OFFSOP) | |
2683 { | |
2684 middlehalf[0] = adjust_address (operands[0], SImode, 4); | |
2685 latehalf[0] = adjust_address (operands[0], SImode, size - 4); | |
2686 } | |
2687 else | |
2688 { | |
2689 middlehalf[0] = adjust_address (operands[0], SImode, 0); | |
2690 latehalf[0] = adjust_address (operands[0], SImode, 0); | |
2691 } | |
2692 | |
2693 if (optype1 == REGOP) | |
2694 { | |
2695 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 2); | |
2696 middlehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1); | |
2697 } | |
2698 else if (optype1 == OFFSOP) | |
2699 { | |
2700 middlehalf[1] = adjust_address (operands[1], SImode, 4); | |
2701 latehalf[1] = adjust_address (operands[1], SImode, size - 4); | |
2702 } | |
2703 else if (optype1 == CNSTOP) | |
2704 { | |
2705 if (GET_CODE (operands[1]) == CONST_DOUBLE) | |
2706 { | |
2707 REAL_VALUE_TYPE r; | |
2708 long l[3]; | |
2709 | |
2710 REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]); | |
2711 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l); | |
2712 operands[1] = GEN_INT (l[0]); | |
2713 middlehalf[1] = GEN_INT (l[1]); | |
2714 latehalf[1] = GEN_INT (l[2]); | |
2715 } | |
2716 else | |
2717 { | |
2718 /* No non-CONST_DOUBLE constant should ever appear | |
2719 here. */ | |
2720 gcc_assert (!CONSTANT_P (operands[1])); | |
2721 } | |
2722 } | |
2723 else | |
2724 { | |
2725 middlehalf[1] = adjust_address (operands[1], SImode, 0); | |
2726 latehalf[1] = adjust_address (operands[1], SImode, 0); | |
2727 } | |
2728 } | |
2729 else | |
2730 /* size is not 12: */ | |
2731 { | |
2732 if (optype0 == REGOP) | |
2733 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1); | |
2734 else if (optype0 == OFFSOP) | |
2735 latehalf[0] = adjust_address (operands[0], SImode, size - 4); | |
2736 else | |
2737 latehalf[0] = adjust_address (operands[0], SImode, 0); | |
2738 | |
2739 if (optype1 == REGOP) | |
2740 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1); | |
2741 else if (optype1 == OFFSOP) | |
2742 latehalf[1] = adjust_address (operands[1], SImode, size - 4); | |
2743 else if (optype1 == CNSTOP) | |
2744 split_double (operands[1], &operands[1], &latehalf[1]); | |
2745 else | |
2746 latehalf[1] = adjust_address (operands[1], SImode, 0); | |
2747 } | |
2748 | |
2749 /* If insn is effectively movd N(sp),-(sp) then we will do the | |
2750 high word first. We should use the adjusted operand 1 (which is N+4(sp)) | |
2751 for the low word as well, to compensate for the first decrement of sp. */ | |
2752 if (optype0 == PUSHOP | |
2753 && REGNO (XEXP (XEXP (operands[0], 0), 0)) == STACK_POINTER_REGNUM | |
2754 && reg_overlap_mentioned_p (stack_pointer_rtx, operands[1])) | |
2755 operands[1] = middlehalf[1] = latehalf[1]; | |
2756 | |
2757 /* For (set (reg:DI N) (mem:DI ... (reg:SI N) ...)), | |
2758 if the upper part of reg N does not appear in the MEM, arrange to | |
2759 emit the move late-half first. Otherwise, compute the MEM address | |
2760 into the upper part of N and use that as a pointer to the memory | |
2761 operand. */ | |
2762 if (optype0 == REGOP | |
2763 && (optype1 == OFFSOP || optype1 == MEMOP)) | |
2764 { | |
2765 rtx testlow = gen_rtx_REG (SImode, REGNO (operands[0])); | |
2766 | |
2767 if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0)) | |
2768 && reg_overlap_mentioned_p (latehalf[0], XEXP (operands[1], 0))) | |
2769 { | |
2770 /* If both halves of dest are used in the src memory address, | |
2771 compute the address into latehalf of dest. | |
2772 Note that this can't happen if the dest is two data regs. */ | |
2773 compadr: | |
2774 xops[0] = latehalf[0]; | |
2775 xops[1] = XEXP (operands[1], 0); | |
2776 | |
2777 handle_compadr (xops); | |
2778 if (GET_MODE (operands[1]) == XFmode) | |
2779 { | |
2780 operands[1] = gen_rtx_MEM (XFmode, latehalf[0]); | |
2781 middlehalf[1] = adjust_address (operands[1], DImode, size - 8); | |
2782 latehalf[1] = adjust_address (operands[1], DImode, size - 4); | |
2783 } | |
2784 else | |
2785 { | |
2786 operands[1] = gen_rtx_MEM (DImode, latehalf[0]); | |
2787 latehalf[1] = adjust_address (operands[1], DImode, size - 4); | |
2788 } | |
2789 } | |
2790 else if (size == 12 | |
2791 && reg_overlap_mentioned_p (middlehalf[0], | |
2792 XEXP (operands[1], 0))) | |
2793 { | |
2794 /* Check for two regs used by both source and dest. | |
2795 Note that this can't happen if the dest is all data regs. | |
2796 It can happen if the dest is d6, d7, a0. | |
2797 But in that case, latehalf is an addr reg, so | |
2798 the code at compadr does ok. */ | |
2799 | |
2800 if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0)) | |
2801 || reg_overlap_mentioned_p (latehalf[0], XEXP (operands[1], 0))) | |
2802 goto compadr; | |
2803 | |
2804 /* JRV says this can't happen: */ | |
2805 gcc_assert (!addreg0 && !addreg1); | |
2806 | |
2807 /* Only the middle reg conflicts; simply put it last. */ | |
2808 handle_movsi (operands); | |
2809 handle_movsi (latehalf); | |
2810 handle_movsi (middlehalf); | |
2811 | |
2812 return; | |
2813 } | |
2814 else if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0))) | |
2815 /* If the low half of dest is mentioned in the source memory | |
2816 address, the arrange to emit the move late half first. */ | |
2817 dest_overlapped_low = 1; | |
2818 } | |
2819 | |
2820 /* If one or both operands autodecrementing, | |
2821 do the two words, high-numbered first. */ | |
2822 | |
2823 /* Likewise, the first move would clobber the source of the second one, | |
2824 do them in the other order. This happens only for registers; | |
2825 such overlap can't happen in memory unless the user explicitly | |
2826 sets it up, and that is an undefined circumstance. */ | |
2827 | |
2828 if (optype0 == PUSHOP || optype1 == PUSHOP | |
2829 || (optype0 == REGOP && optype1 == REGOP | |
2830 && ((middlehalf[1] && REGNO (operands[0]) == REGNO (middlehalf[1])) | |
2831 || REGNO (operands[0]) == REGNO (latehalf[1]))) | |
2832 || dest_overlapped_low) | |
2833 { | |
2834 /* Make any unoffsettable addresses point at high-numbered word. */ | |
2835 if (addreg0) | |
2836 handle_reg_adjust (addreg0, size - 4); | |
2837 if (addreg1) | |
2838 handle_reg_adjust (addreg1, size - 4); | |
2839 | |
2840 /* Do that word. */ | |
2841 handle_movsi (latehalf); | |
2842 | |
2843 /* Undo the adds we just did. */ | |
2844 if (addreg0) | |
2845 handle_reg_adjust (addreg0, -4); | |
2846 if (addreg1) | |
2847 handle_reg_adjust (addreg1, -4); | |
2848 | |
2849 if (size == 12) | |
2850 { | |
2851 handle_movsi (middlehalf); | |
2852 | |
2853 if (addreg0) | |
2854 handle_reg_adjust (addreg0, -4); | |
2855 if (addreg1) | |
2856 handle_reg_adjust (addreg1, -4); | |
2857 } | |
2858 | |
2859 /* Do low-numbered word. */ | |
2860 | |
2861 handle_movsi (operands); | |
2862 return; | |
2863 } | |
2864 | |
2865 /* Normal case: do the two words, low-numbered first. */ | |
2866 | |
2867 handle_movsi (operands); | |
2868 | |
2869 /* Do the middle one of the three words for long double */ | |
2870 if (size == 12) | |
2871 { | |
2872 if (addreg0) | |
2873 handle_reg_adjust (addreg0, 4); | |
2874 if (addreg1) | |
2875 handle_reg_adjust (addreg1, 4); | |
2876 | |
2877 handle_movsi (middlehalf); | |
2878 } | |
2879 | |
2880 /* Make any unoffsettable addresses point at high-numbered word. */ | |
2881 if (addreg0) | |
2882 handle_reg_adjust (addreg0, 4); | |
2883 if (addreg1) | |
2884 handle_reg_adjust (addreg1, 4); | |
2885 | |
2886 /* Do that word. */ | |
2887 handle_movsi (latehalf); | |
2888 | |
2889 /* Undo the adds we just did. */ | |
2890 if (addreg0) | |
2891 handle_reg_adjust (addreg0, -(size - 4)); | |
2892 if (addreg1) | |
2893 handle_reg_adjust (addreg1, -(size - 4)); | |
2894 | |
2895 return; | |
2896 } | |
2897 | |
2898 /* Output assembler code to adjust REG by N. */ | |
2899 static void | |
2900 output_reg_adjust (rtx reg, int n) | |
2901 { | |
2902 const char *s; | |
2903 | |
2904 gcc_assert (GET_MODE (reg) == SImode | |
2905 && -12 <= n && n != 0 && n <= 12); | |
2906 | |
2907 switch (n) | |
2908 { | |
2909 case 12: | |
2910 s = "add%.l #12,%0"; | |
2911 break; | |
2912 | |
2913 case 8: | |
2914 s = "addq%.l #8,%0"; | |
2915 break; | |
2916 | |
2917 case 4: | |
2918 s = "addq%.l #4,%0"; | |
2919 break; | |
2920 | |
2921 case -12: | |
2922 s = "sub%.l #12,%0"; | |
2923 break; | |
2924 | |
2925 case -8: | |
2926 s = "subq%.l #8,%0"; | |
2927 break; | |
2928 | |
2929 case -4: | |
2930 s = "subq%.l #4,%0"; | |
2931 break; | |
2932 | |
2933 default: | |
2934 gcc_unreachable (); | |
2935 s = NULL; | |
2936 } | |
2937 | |
2938 output_asm_insn (s, ®); | |
2939 } | |
2940 | |
2941 /* Emit rtl code to adjust REG by N. */ | |
2942 static void | |
2943 emit_reg_adjust (rtx reg1, int n) | |
2944 { | |
2945 rtx reg2; | |
2946 | |
2947 gcc_assert (GET_MODE (reg1) == SImode | |
2948 && -12 <= n && n != 0 && n <= 12); | |
2949 | |
2950 reg1 = copy_rtx (reg1); | |
2951 reg2 = copy_rtx (reg1); | |
2952 | |
2953 if (n < 0) | |
2954 emit_insn (gen_subsi3 (reg1, reg2, GEN_INT (-n))); | |
2955 else if (n > 0) | |
2956 emit_insn (gen_addsi3 (reg1, reg2, GEN_INT (n))); | |
2957 else | |
2958 gcc_unreachable (); | |
2959 } | |
2960 | |
2961 /* Output assembler to load address OPERANDS[0] to register OPERANDS[1]. */ | |
2962 static void | |
2963 output_compadr (rtx operands[2]) | |
2964 { | |
2965 output_asm_insn ("lea %a1,%0", operands); | |
2966 } | |
2967 | |
2968 /* Output the best assembler insn for moving operands[1] into operands[0] | |
2969 as a fullword. */ | |
2970 static void | |
2971 output_movsi (rtx operands[2]) | |
2972 { | |
2973 output_asm_insn (singlemove_string (operands), operands); | |
2974 } | |
2975 | |
2976 /* Copy OP and change its mode to MODE. */ | |
2977 static rtx | |
2978 copy_operand (rtx op, enum machine_mode mode) | |
2979 { | |
2980 /* ??? This looks really ugly. There must be a better way | |
2981 to change a mode on the operand. */ | |
2982 if (GET_MODE (op) != VOIDmode) | |
2983 { | |
2984 if (REG_P (op)) | |
2985 op = gen_rtx_REG (mode, REGNO (op)); | |
2986 else | |
2987 { | |
2988 op = copy_rtx (op); | |
2989 PUT_MODE (op, mode); | |
2990 } | |
2991 } | |
2992 | |
2993 return op; | |
2994 } | |
2995 | |
2996 /* Emit rtl code for moving operands[1] into operands[0] as a fullword. */ | |
2997 static void | |
2998 emit_movsi (rtx operands[2]) | |
2999 { | |
3000 operands[0] = copy_operand (operands[0], SImode); | |
3001 operands[1] = copy_operand (operands[1], SImode); | |
3002 | |
3003 emit_insn (gen_movsi (operands[0], operands[1])); | |
3004 } | |
3005 | |
3006 /* Output assembler code to perform a doubleword move insn | |
3007 with operands OPERANDS. */ | |
3008 const char * | |
3009 output_move_double (rtx *operands) | |
3010 { | |
3011 handle_move_double (operands, | |
3012 output_reg_adjust, output_compadr, output_movsi); | |
3013 | |
3014 return ""; | |
3015 } | |
3016 | |
3017 /* Output rtl code to perform a doubleword move insn | |
3018 with operands OPERANDS. */ | |
3019 void | |
3020 m68k_emit_move_double (rtx operands[2]) | |
3021 { | |
3022 handle_move_double (operands, emit_reg_adjust, emit_movsi, emit_movsi); | |
3023 } | |
3024 | |
3025 /* Ensure mode of ORIG, a REG rtx, is MODE. Returns either ORIG or a | |
3026 new rtx with the correct mode. */ | |
3027 | |
3028 static rtx | |
3029 force_mode (enum machine_mode mode, rtx orig) | |
3030 { | |
3031 if (mode == GET_MODE (orig)) | |
3032 return orig; | |
3033 | |
3034 if (REGNO (orig) >= FIRST_PSEUDO_REGISTER) | |
3035 abort (); | |
3036 | |
3037 return gen_rtx_REG (mode, REGNO (orig)); | |
3038 } | |
3039 | |
3040 static int | |
3041 fp_reg_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) | |
3042 { | |
3043 return reg_renumber && FP_REG_P (op); | |
3044 } | |
3045 | |
3046 /* Emit insns to move operands[1] into operands[0]. | |
3047 | |
3048 Return 1 if we have written out everything that needs to be done to | |
3049 do the move. Otherwise, return 0 and the caller will emit the move | |
3050 normally. | |
3051 | |
3052 Note SCRATCH_REG may not be in the proper mode depending on how it | |
3053 will be used. This routine is responsible for creating a new copy | |
3054 of SCRATCH_REG in the proper mode. */ | |
3055 | |
3056 int | |
3057 emit_move_sequence (rtx *operands, enum machine_mode mode, rtx scratch_reg) | |
3058 { | |
3059 register rtx operand0 = operands[0]; | |
3060 register rtx operand1 = operands[1]; | |
3061 register rtx tem; | |
3062 | |
3063 if (scratch_reg | |
3064 && reload_in_progress && GET_CODE (operand0) == REG | |
3065 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER) | |
3066 operand0 = reg_equiv_mem[REGNO (operand0)]; | |
3067 else if (scratch_reg | |
3068 && reload_in_progress && GET_CODE (operand0) == SUBREG | |
3069 && GET_CODE (SUBREG_REG (operand0)) == REG | |
3070 && REGNO (SUBREG_REG (operand0)) >= FIRST_PSEUDO_REGISTER) | |
3071 { | |
3072 /* We must not alter SUBREG_BYTE (operand0) since that would confuse | |
3073 the code which tracks sets/uses for delete_output_reload. */ | |
3074 rtx temp = gen_rtx_SUBREG (GET_MODE (operand0), | |
3075 reg_equiv_mem [REGNO (SUBREG_REG (operand0))], | |
3076 SUBREG_BYTE (operand0)); | |
3077 operand0 = alter_subreg (&temp); | |
3078 } | |
3079 | |
3080 if (scratch_reg | |
3081 && reload_in_progress && GET_CODE (operand1) == REG | |
3082 && REGNO (operand1) >= FIRST_PSEUDO_REGISTER) | |
3083 operand1 = reg_equiv_mem[REGNO (operand1)]; | |
3084 else if (scratch_reg | |
3085 && reload_in_progress && GET_CODE (operand1) == SUBREG | |
3086 && GET_CODE (SUBREG_REG (operand1)) == REG | |
3087 && REGNO (SUBREG_REG (operand1)) >= FIRST_PSEUDO_REGISTER) | |
3088 { | |
3089 /* We must not alter SUBREG_BYTE (operand0) since that would confuse | |
3090 the code which tracks sets/uses for delete_output_reload. */ | |
3091 rtx temp = gen_rtx_SUBREG (GET_MODE (operand1), | |
3092 reg_equiv_mem [REGNO (SUBREG_REG (operand1))], | |
3093 SUBREG_BYTE (operand1)); | |
3094 operand1 = alter_subreg (&temp); | |
3095 } | |
3096 | |
3097 if (scratch_reg && reload_in_progress && GET_CODE (operand0) == MEM | |
3098 && ((tem = find_replacement (&XEXP (operand0, 0))) | |
3099 != XEXP (operand0, 0))) | |
3100 operand0 = gen_rtx_MEM (GET_MODE (operand0), tem); | |
3101 if (scratch_reg && reload_in_progress && GET_CODE (operand1) == MEM | |
3102 && ((tem = find_replacement (&XEXP (operand1, 0))) | |
3103 != XEXP (operand1, 0))) | |
3104 operand1 = gen_rtx_MEM (GET_MODE (operand1), tem); | |
3105 | |
3106 /* Handle secondary reloads for loads/stores of FP registers where | |
3107 the address is symbolic by using the scratch register */ | |
3108 if (fp_reg_operand (operand0, mode) | |
3109 && ((GET_CODE (operand1) == MEM | |
3110 && ! memory_address_p (DFmode, XEXP (operand1, 0))) | |
3111 || ((GET_CODE (operand1) == SUBREG | |
3112 && GET_CODE (XEXP (operand1, 0)) == MEM | |
3113 && !memory_address_p (DFmode, XEXP (XEXP (operand1, 0), 0))))) | |
3114 && scratch_reg) | |
3115 { | |
3116 if (GET_CODE (operand1) == SUBREG) | |
3117 operand1 = XEXP (operand1, 0); | |
3118 | |
3119 /* SCRATCH_REG will hold an address. We want | |
3120 it in SImode regardless of what mode it was originally given | |
3121 to us. */ | |
3122 scratch_reg = force_mode (SImode, scratch_reg); | |
3123 | |
3124 /* D might not fit in 14 bits either; for such cases load D into | |
3125 scratch reg. */ | |
3126 if (!memory_address_p (Pmode, XEXP (operand1, 0))) | |
3127 { | |
3128 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1)); | |
3129 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1, 0)), | |
3130 Pmode, | |
3131 XEXP (XEXP (operand1, 0), 0), | |
3132 scratch_reg)); | |
3133 } | |
3134 else | |
3135 emit_move_insn (scratch_reg, XEXP (operand1, 0)); | |
3136 emit_insn (gen_rtx_SET (VOIDmode, operand0, | |
3137 gen_rtx_MEM (mode, scratch_reg))); | |
3138 return 1; | |
3139 } | |
3140 else if (fp_reg_operand (operand1, mode) | |
3141 && ((GET_CODE (operand0) == MEM | |
3142 && ! memory_address_p (DFmode, XEXP (operand0, 0))) | |
3143 || ((GET_CODE (operand0) == SUBREG) | |
3144 && GET_CODE (XEXP (operand0, 0)) == MEM | |
3145 && !memory_address_p (DFmode, XEXP (XEXP (operand0, 0), 0)))) | |
3146 && scratch_reg) | |
3147 { | |
3148 if (GET_CODE (operand0) == SUBREG) | |
3149 operand0 = XEXP (operand0, 0); | |
3150 | |
3151 /* SCRATCH_REG will hold an address and maybe the actual data. We want | |
3152 it in SIMODE regardless of what mode it was originally given | |
3153 to us. */ | |
3154 scratch_reg = force_mode (SImode, scratch_reg); | |
3155 | |
3156 /* D might not fit in 14 bits either; for such cases load D into | |
3157 scratch reg. */ | |
3158 if (!memory_address_p (Pmode, XEXP (operand0, 0))) | |
3159 { | |
3160 emit_move_insn (scratch_reg, XEXP (XEXP (operand0, 0), 1)); | |
3161 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand0, | |
3162 0)), | |
3163 Pmode, | |
3164 XEXP (XEXP (operand0, 0), | |
3165 0), | |
3166 scratch_reg)); | |
3167 } | |
3168 else | |
3169 emit_move_insn (scratch_reg, XEXP (operand0, 0)); | |
3170 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_MEM (mode, scratch_reg), | |
3171 operand1)); | |
3172 return 1; | |
3173 } | |
3174 /* Handle secondary reloads for loads of FP registers from constant | |
3175 expressions by forcing the constant into memory. | |
3176 | |
3177 use scratch_reg to hold the address of the memory location. | |
3178 | |
3179 The proper fix is to change PREFERRED_RELOAD_CLASS to return | |
3180 NO_REGS when presented with a const_int and an register class | |
3181 containing only FP registers. Doing so unfortunately creates | |
3182 more problems than it solves. Fix this for 2.5. */ | |
3183 else if (fp_reg_operand (operand0, mode) | |
3184 && CONSTANT_P (operand1) | |
3185 && scratch_reg) | |
3186 { | |
3187 rtx xoperands[2]; | |
3188 | |
3189 /* SCRATCH_REG will hold an address and maybe the actual data. We want | |
3190 it in SIMODE regardless of what mode it was originally given | |
3191 to us. */ | |
3192 scratch_reg = force_mode (SImode, scratch_reg); | |
3193 | |
3194 /* Force the constant into memory and put the address of the | |
3195 memory location into scratch_reg. */ | |
3196 xoperands[0] = scratch_reg; | |
3197 xoperands[1] = XEXP (force_const_mem (mode, operand1), 0); | |
3198 emit_insn (gen_rtx_SET (mode, scratch_reg, xoperands[1])); | |
3199 | |
3200 /* Now load the destination register. */ | |
3201 emit_insn (gen_rtx_SET (mode, operand0, | |
3202 gen_rtx_MEM (mode, scratch_reg))); | |
3203 return 1; | |
3204 } | |
3205 | |
3206 /* Now have insn-emit do whatever it normally does. */ | |
3207 return 0; | |
3208 } | |
3209 | |
3210 /* Split one or more DImode RTL references into pairs of SImode | |
3211 references. The RTL can be REG, offsettable MEM, integer constant, or | |
3212 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to | |
3213 split and "num" is its length. lo_half and hi_half are output arrays | |
3214 that parallel "operands". */ | |
3215 | |
3216 void | |
3217 split_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[]) | |
3218 { | |
3219 while (num--) | |
3220 { | |
3221 rtx op = operands[num]; | |
3222 | |
3223 /* simplify_subreg refuses to split volatile memory addresses, | |
3224 but we still have to handle it. */ | |
3225 if (GET_CODE (op) == MEM) | |
3226 { | |
3227 lo_half[num] = adjust_address (op, SImode, 4); | |
3228 hi_half[num] = adjust_address (op, SImode, 0); | |
3229 } | |
3230 else | |
3231 { | |
3232 lo_half[num] = simplify_gen_subreg (SImode, op, | |
3233 GET_MODE (op) == VOIDmode | |
3234 ? DImode : GET_MODE (op), 4); | |
3235 hi_half[num] = simplify_gen_subreg (SImode, op, | |
3236 GET_MODE (op) == VOIDmode | |
3237 ? DImode : GET_MODE (op), 0); | |
3238 } | |
3239 } | |
3240 } | |
3241 | |
3242 /* Split X into a base and a constant offset, storing them in *BASE | |
3243 and *OFFSET respectively. */ | |
3244 | |
3245 static void | |
3246 m68k_split_offset (rtx x, rtx *base, HOST_WIDE_INT *offset) | |
3247 { | |
3248 *offset = 0; | |
3249 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT) | |
3250 { | |
3251 *offset += INTVAL (XEXP (x, 1)); | |
3252 x = XEXP (x, 0); | |
3253 } | |
3254 *base = x; | |
3255 } | |
3256 | |
3257 /* Return true if PATTERN is a PARALLEL suitable for a movem or fmovem | |
3258 instruction. STORE_P says whether the move is a load or store. | |
3259 | |
3260 If the instruction uses post-increment or pre-decrement addressing, | |
3261 AUTOMOD_BASE is the base register and AUTOMOD_OFFSET is the total | |
3262 adjustment. This adjustment will be made by the first element of | |
3263 PARALLEL, with the loads or stores starting at element 1. If the | |
3264 instruction does not use post-increment or pre-decrement addressing, | |
3265 AUTOMOD_BASE is null, AUTOMOD_OFFSET is 0, and the loads or stores | |
3266 start at element 0. */ | |
3267 | |
3268 bool | |
3269 m68k_movem_pattern_p (rtx pattern, rtx automod_base, | |
3270 HOST_WIDE_INT automod_offset, bool store_p) | |
3271 { | |
3272 rtx base, mem_base, set, mem, reg, last_reg; | |
3273 HOST_WIDE_INT offset, mem_offset; | |
3274 int i, first, len; | |
3275 enum reg_class rclass; | |
3276 | |
3277 len = XVECLEN (pattern, 0); | |
3278 first = (automod_base != NULL); | |
3279 | |
3280 if (automod_base) | |
3281 { | |
3282 /* Stores must be pre-decrement and loads must be post-increment. */ | |
3283 if (store_p != (automod_offset < 0)) | |
3284 return false; | |
3285 | |
3286 /* Work out the base and offset for lowest memory location. */ | |
3287 base = automod_base; | |
3288 offset = (automod_offset < 0 ? automod_offset : 0); | |
3289 } | |
3290 else | |
3291 { | |
3292 /* Allow any valid base and offset in the first access. */ | |
3293 base = NULL; | |
3294 offset = 0; | |
3295 } | |
3296 | |
3297 last_reg = NULL; | |
3298 rclass = NO_REGS; | |
3299 for (i = first; i < len; i++) | |
3300 { | |
3301 /* We need a plain SET. */ | |
3302 set = XVECEXP (pattern, 0, i); | |
3303 if (GET_CODE (set) != SET) | |
3304 return false; | |
3305 | |
3306 /* Check that we have a memory location... */ | |
3307 mem = XEXP (set, !store_p); | |
3308 if (!MEM_P (mem) || !memory_operand (mem, VOIDmode)) | |
3309 return false; | |
3310 | |
3311 /* ...with the right address. */ | |
3312 if (base == NULL) | |
3313 { | |
3314 m68k_split_offset (XEXP (mem, 0), &base, &offset); | |
3315 /* The ColdFire instruction only allows (An) and (d16,An) modes. | |
3316 There are no mode restrictions for 680x0 besides the | |
3317 automodification rules enforced above. */ | |
3318 if (TARGET_COLDFIRE | |
3319 && !m68k_legitimate_base_reg_p (base, reload_completed)) | |
3320 return false; | |
3321 } | |
3322 else | |
3323 { | |
3324 m68k_split_offset (XEXP (mem, 0), &mem_base, &mem_offset); | |
3325 if (!rtx_equal_p (base, mem_base) || offset != mem_offset) | |
3326 return false; | |
3327 } | |
3328 | |
3329 /* Check that we have a register of the required mode and class. */ | |
3330 reg = XEXP (set, store_p); | |
3331 if (!REG_P (reg) | |
3332 || !HARD_REGISTER_P (reg) | |
3333 || GET_MODE (reg) != reg_raw_mode[REGNO (reg)]) | |
3334 return false; | |
3335 | |
3336 if (last_reg) | |
3337 { | |
3338 /* The register must belong to RCLASS and have a higher number | |
3339 than the register in the previous SET. */ | |
3340 if (!TEST_HARD_REG_BIT (reg_class_contents[rclass], REGNO (reg)) | |
3341 || REGNO (last_reg) >= REGNO (reg)) | |
3342 return false; | |
3343 } | |
3344 else | |
3345 { | |
3346 /* Work out which register class we need. */ | |
3347 if (INT_REGNO_P (REGNO (reg))) | |
3348 rclass = GENERAL_REGS; | |
3349 else if (FP_REGNO_P (REGNO (reg))) | |
3350 rclass = FP_REGS; | |
3351 else | |
3352 return false; | |
3353 } | |
3354 | |
3355 last_reg = reg; | |
3356 offset += GET_MODE_SIZE (GET_MODE (reg)); | |
3357 } | |
3358 | |
3359 /* If we have an automodification, check whether the final offset is OK. */ | |
3360 if (automod_base && offset != (automod_offset < 0 ? 0 : automod_offset)) | |
3361 return false; | |
3362 | |
3363 /* Reject unprofitable cases. */ | |
3364 if (len < first + (rclass == FP_REGS ? MIN_FMOVEM_REGS : MIN_MOVEM_REGS)) | |
3365 return false; | |
3366 | |
3367 return true; | |
3368 } | |
3369 | |
3370 /* Return the assembly code template for a movem or fmovem instruction | |
3371 whose pattern is given by PATTERN. Store the template's operands | |
3372 in OPERANDS. | |
3373 | |
3374 If the instruction uses post-increment or pre-decrement addressing, | |
3375 AUTOMOD_OFFSET is the total adjustment, otherwise it is 0. STORE_P | |
3376 is true if this is a store instruction. */ | |
3377 | |
3378 const char * | |
3379 m68k_output_movem (rtx *operands, rtx pattern, | |
3380 HOST_WIDE_INT automod_offset, bool store_p) | |
3381 { | |
3382 unsigned int mask; | |
3383 int i, first; | |
3384 | |
3385 gcc_assert (GET_CODE (pattern) == PARALLEL); | |
3386 mask = 0; | |
3387 first = (automod_offset != 0); | |
3388 for (i = first; i < XVECLEN (pattern, 0); i++) | |
3389 { | |
3390 /* When using movem with pre-decrement addressing, register X + D0_REG | |
3391 is controlled by bit 15 - X. For all other addressing modes, | |
3392 register X + D0_REG is controlled by bit X. Confusingly, the | |
3393 register mask for fmovem is in the opposite order to that for | |
3394 movem. */ | |
3395 unsigned int regno; | |
3396 | |
3397 gcc_assert (MEM_P (XEXP (XVECEXP (pattern, 0, i), !store_p))); | |
3398 gcc_assert (REG_P (XEXP (XVECEXP (pattern, 0, i), store_p))); | |
3399 regno = REGNO (XEXP (XVECEXP (pattern, 0, i), store_p)); | |
3400 if (automod_offset < 0) | |
3401 { | |
3402 if (FP_REGNO_P (regno)) | |
3403 mask |= 1 << (regno - FP0_REG); | |
3404 else | |
3405 mask |= 1 << (15 - (regno - D0_REG)); | |
3406 } | |
3407 else | |
3408 { | |
3409 if (FP_REGNO_P (regno)) | |
3410 mask |= 1 << (7 - (regno - FP0_REG)); | |
3411 else | |
3412 mask |= 1 << (regno - D0_REG); | |
3413 } | |
3414 } | |
3415 CC_STATUS_INIT; | |
3416 | |
3417 if (automod_offset == 0) | |
3418 operands[0] = XEXP (XEXP (XVECEXP (pattern, 0, first), !store_p), 0); | |
3419 else if (automod_offset < 0) | |
3420 operands[0] = gen_rtx_PRE_DEC (Pmode, SET_DEST (XVECEXP (pattern, 0, 0))); | |
3421 else | |
3422 operands[0] = gen_rtx_POST_INC (Pmode, SET_DEST (XVECEXP (pattern, 0, 0))); | |
3423 operands[1] = GEN_INT (mask); | |
3424 if (FP_REGNO_P (REGNO (XEXP (XVECEXP (pattern, 0, first), store_p)))) | |
3425 { | |
3426 if (store_p) | |
3427 return "fmovem %1,%a0"; | |
3428 else | |
3429 return "fmovem %a0,%1"; | |
3430 } | |
3431 else | |
3432 { | |
3433 if (store_p) | |
3434 return "movem%.l %1,%a0"; | |
3435 else | |
3436 return "movem%.l %a0,%1"; | |
3437 } | |
3438 } | |
3439 | |
3440 /* Return a REG that occurs in ADDR with coefficient 1. | |
3441 ADDR can be effectively incremented by incrementing REG. */ | |
3442 | |
3443 static rtx | |
3444 find_addr_reg (rtx addr) | |
3445 { | |
3446 while (GET_CODE (addr) == PLUS) | |
3447 { | |
3448 if (GET_CODE (XEXP (addr, 0)) == REG) | |
3449 addr = XEXP (addr, 0); | |
3450 else if (GET_CODE (XEXP (addr, 1)) == REG) | |
3451 addr = XEXP (addr, 1); | |
3452 else if (CONSTANT_P (XEXP (addr, 0))) | |
3453 addr = XEXP (addr, 1); | |
3454 else if (CONSTANT_P (XEXP (addr, 1))) | |
3455 addr = XEXP (addr, 0); | |
3456 else | |
3457 gcc_unreachable (); | |
3458 } | |
3459 gcc_assert (GET_CODE (addr) == REG); | |
3460 return addr; | |
3461 } | |
3462 | |
3463 /* Output assembler code to perform a 32-bit 3-operand add. */ | |
3464 | |
3465 const char * | |
3466 output_addsi3 (rtx *operands) | |
3467 { | |
3468 if (! operands_match_p (operands[0], operands[1])) | |
3469 { | |
3470 if (!ADDRESS_REG_P (operands[1])) | |
3471 { | |
3472 rtx tmp = operands[1]; | |
3473 | |
3474 operands[1] = operands[2]; | |
3475 operands[2] = tmp; | |
3476 } | |
3477 | |
3478 /* These insns can result from reloads to access | |
3479 stack slots over 64k from the frame pointer. */ | |
3480 if (GET_CODE (operands[2]) == CONST_INT | |
3481 && (INTVAL (operands[2]) < -32768 || INTVAL (operands[2]) > 32767)) | |
3482 return "move%.l %2,%0\n\tadd%.l %1,%0"; | |
3483 if (GET_CODE (operands[2]) == REG) | |
3484 return MOTOROLA ? "lea (%1,%2.l),%0" : "lea %1@(0,%2:l),%0"; | |
3485 return MOTOROLA ? "lea (%c2,%1),%0" : "lea %1@(%c2),%0"; | |
3486 } | |
3487 if (GET_CODE (operands[2]) == CONST_INT) | |
3488 { | |
3489 if (INTVAL (operands[2]) > 0 | |
3490 && INTVAL (operands[2]) <= 8) | |
3491 return "addq%.l %2,%0"; | |
3492 if (INTVAL (operands[2]) < 0 | |
3493 && INTVAL (operands[2]) >= -8) | |
3494 { | |
3495 operands[2] = GEN_INT (- INTVAL (operands[2])); | |
3496 return "subq%.l %2,%0"; | |
3497 } | |
3498 /* On the CPU32 it is faster to use two addql instructions to | |
3499 add a small integer (8 < N <= 16) to a register. | |
3500 Likewise for subql. */ | |
3501 if (TUNE_CPU32 && REG_P (operands[0])) | |
3502 { | |
3503 if (INTVAL (operands[2]) > 8 | |
3504 && INTVAL (operands[2]) <= 16) | |
3505 { | |
3506 operands[2] = GEN_INT (INTVAL (operands[2]) - 8); | |
3507 return "addq%.l #8,%0\n\taddq%.l %2,%0"; | |
3508 } | |
3509 if (INTVAL (operands[2]) < -8 | |
3510 && INTVAL (operands[2]) >= -16) | |
3511 { | |
3512 operands[2] = GEN_INT (- INTVAL (operands[2]) - 8); | |
3513 return "subq%.l #8,%0\n\tsubq%.l %2,%0"; | |
3514 } | |
3515 } | |
3516 if (ADDRESS_REG_P (operands[0]) | |
3517 && INTVAL (operands[2]) >= -0x8000 | |
3518 && INTVAL (operands[2]) < 0x8000) | |
3519 { | |
3520 if (TUNE_68040) | |
3521 return "add%.w %2,%0"; | |
3522 else | |
3523 return MOTOROLA ? "lea (%c2,%0),%0" : "lea %0@(%c2),%0"; | |
3524 } | |
3525 } | |
3526 return "add%.l %2,%0"; | |
3527 } | |
3528 | |
3529 /* Store in cc_status the expressions that the condition codes will | |
3530 describe after execution of an instruction whose pattern is EXP. | |
3531 Do not alter them if the instruction would not alter the cc's. */ | |
3532 | |
3533 /* On the 68000, all the insns to store in an address register fail to | |
3534 set the cc's. However, in some cases these instructions can make it | |
3535 possibly invalid to use the saved cc's. In those cases we clear out | |
3536 some or all of the saved cc's so they won't be used. */ | |
3537 | |
3538 void | |
3539 notice_update_cc (rtx exp, rtx insn) | |
3540 { | |
3541 if (GET_CODE (exp) == SET) | |
3542 { | |
3543 if (GET_CODE (SET_SRC (exp)) == CALL) | |
3544 CC_STATUS_INIT; | |
3545 else if (ADDRESS_REG_P (SET_DEST (exp))) | |
3546 { | |
3547 if (cc_status.value1 && modified_in_p (cc_status.value1, insn)) | |
3548 cc_status.value1 = 0; | |
3549 if (cc_status.value2 && modified_in_p (cc_status.value2, insn)) | |
3550 cc_status.value2 = 0; | |
3551 } | |
3552 /* fmoves to memory or data registers do not set the condition | |
3553 codes. Normal moves _do_ set the condition codes, but not in | |
3554 a way that is appropriate for comparison with 0, because -0.0 | |
3555 would be treated as a negative nonzero number. Note that it | |
3556 isn't appropriate to conditionalize this restriction on | |
3557 HONOR_SIGNED_ZEROS because that macro merely indicates whether | |
3558 we care about the difference between -0.0 and +0.0. */ | |
3559 else if (!FP_REG_P (SET_DEST (exp)) | |
3560 && SET_DEST (exp) != cc0_rtx | |
3561 && (FP_REG_P (SET_SRC (exp)) | |
3562 || GET_CODE (SET_SRC (exp)) == FIX | |
3563 || FLOAT_MODE_P (GET_MODE (SET_DEST (exp))))) | |
3564 CC_STATUS_INIT; | |
3565 /* A pair of move insns doesn't produce a useful overall cc. */ | |
3566 else if (!FP_REG_P (SET_DEST (exp)) | |
3567 && !FP_REG_P (SET_SRC (exp)) | |
3568 && GET_MODE_SIZE (GET_MODE (SET_SRC (exp))) > 4 | |
3569 && (GET_CODE (SET_SRC (exp)) == REG | |
3570 || GET_CODE (SET_SRC (exp)) == MEM | |
3571 || GET_CODE (SET_SRC (exp)) == CONST_DOUBLE)) | |
3572 CC_STATUS_INIT; | |
3573 else if (SET_DEST (exp) != pc_rtx) | |
3574 { | |
3575 cc_status.flags = 0; | |
3576 cc_status.value1 = SET_DEST (exp); | |
3577 cc_status.value2 = SET_SRC (exp); | |
3578 } | |
3579 } | |
3580 else if (GET_CODE (exp) == PARALLEL | |
3581 && GET_CODE (XVECEXP (exp, 0, 0)) == SET) | |
3582 { | |
3583 rtx dest = SET_DEST (XVECEXP (exp, 0, 0)); | |
3584 rtx src = SET_SRC (XVECEXP (exp, 0, 0)); | |
3585 | |
3586 if (ADDRESS_REG_P (dest)) | |
3587 CC_STATUS_INIT; | |
3588 else if (dest != pc_rtx) | |
3589 { | |
3590 cc_status.flags = 0; | |
3591 cc_status.value1 = dest; | |
3592 cc_status.value2 = src; | |
3593 } | |
3594 } | |
3595 else | |
3596 CC_STATUS_INIT; | |
3597 if (cc_status.value2 != 0 | |
3598 && ADDRESS_REG_P (cc_status.value2) | |
3599 && GET_MODE (cc_status.value2) == QImode) | |
3600 CC_STATUS_INIT; | |
3601 if (cc_status.value2 != 0) | |
3602 switch (GET_CODE (cc_status.value2)) | |
3603 { | |
3604 case ASHIFT: case ASHIFTRT: case LSHIFTRT: | |
3605 case ROTATE: case ROTATERT: | |
3606 /* These instructions always clear the overflow bit, and set | |
3607 the carry to the bit shifted out. */ | |
3608 cc_status.flags |= CC_OVERFLOW_UNUSABLE | CC_NO_CARRY; | |
3609 break; | |
3610 | |
3611 case PLUS: case MINUS: case MULT: | |
3612 case DIV: case UDIV: case MOD: case UMOD: case NEG: | |
3613 if (GET_MODE (cc_status.value2) != VOIDmode) | |
3614 cc_status.flags |= CC_NO_OVERFLOW; | |
3615 break; | |
3616 case ZERO_EXTEND: | |
3617 /* (SET r1 (ZERO_EXTEND r2)) on this machine | |
3618 ends with a move insn moving r2 in r2's mode. | |
3619 Thus, the cc's are set for r2. | |
3620 This can set N bit spuriously. */ | |
3621 cc_status.flags |= CC_NOT_NEGATIVE; | |
3622 | |
3623 default: | |
3624 break; | |
3625 } | |
3626 if (cc_status.value1 && GET_CODE (cc_status.value1) == REG | |
3627 && cc_status.value2 | |
3628 && reg_overlap_mentioned_p (cc_status.value1, cc_status.value2)) | |
3629 cc_status.value2 = 0; | |
3630 if (((cc_status.value1 && FP_REG_P (cc_status.value1)) | |
3631 || (cc_status.value2 && FP_REG_P (cc_status.value2)))) | |
3632 cc_status.flags = CC_IN_68881; | |
3633 if (cc_status.value2 && GET_CODE (cc_status.value2) == COMPARE | |
3634 && GET_MODE_CLASS (GET_MODE (XEXP (cc_status.value2, 0))) == MODE_FLOAT) | |
3635 { | |
3636 cc_status.flags = CC_IN_68881; | |
3637 if (!FP_REG_P (XEXP (cc_status.value2, 0))) | |
3638 cc_status.flags |= CC_REVERSED; | |
3639 } | |
3640 } | |
3641 | |
3642 const char * | |
3643 output_move_const_double (rtx *operands) | |
3644 { | |
3645 int code = standard_68881_constant_p (operands[1]); | |
3646 | |
3647 if (code != 0) | |
3648 { | |
3649 static char buf[40]; | |
3650 | |
3651 sprintf (buf, "fmovecr #0x%x,%%0", code & 0xff); | |
3652 return buf; | |
3653 } | |
3654 return "fmove%.d %1,%0"; | |
3655 } | |
3656 | |
3657 const char * | |
3658 output_move_const_single (rtx *operands) | |
3659 { | |
3660 int code = standard_68881_constant_p (operands[1]); | |
3661 | |
3662 if (code != 0) | |
3663 { | |
3664 static char buf[40]; | |
3665 | |
3666 sprintf (buf, "fmovecr #0x%x,%%0", code & 0xff); | |
3667 return buf; | |
3668 } | |
3669 return "fmove%.s %f1,%0"; | |
3670 } | |
3671 | |
3672 /* Return nonzero if X, a CONST_DOUBLE, has a value that we can get | |
3673 from the "fmovecr" instruction. | |
3674 The value, anded with 0xff, gives the code to use in fmovecr | |
3675 to get the desired constant. */ | |
3676 | |
3677 /* This code has been fixed for cross-compilation. */ | |
3678 | |
3679 static int inited_68881_table = 0; | |
3680 | |
3681 static const char *const strings_68881[7] = { | |
3682 "0.0", | |
3683 "1.0", | |
3684 "10.0", | |
3685 "100.0", | |
3686 "10000.0", | |
3687 "1e8", | |
3688 "1e16" | |
3689 }; | |
3690 | |
3691 static const int codes_68881[7] = { | |
3692 0x0f, | |
3693 0x32, | |
3694 0x33, | |
3695 0x34, | |
3696 0x35, | |
3697 0x36, | |
3698 0x37 | |
3699 }; | |
3700 | |
3701 REAL_VALUE_TYPE values_68881[7]; | |
3702 | |
3703 /* Set up values_68881 array by converting the decimal values | |
3704 strings_68881 to binary. */ | |
3705 | |
3706 void | |
3707 init_68881_table (void) | |
3708 { | |
3709 int i; | |
3710 REAL_VALUE_TYPE r; | |
3711 enum machine_mode mode; | |
3712 | |
3713 mode = SFmode; | |
3714 for (i = 0; i < 7; i++) | |
3715 { | |
3716 if (i == 6) | |
3717 mode = DFmode; | |
3718 r = REAL_VALUE_ATOF (strings_68881[i], mode); | |
3719 values_68881[i] = r; | |
3720 } | |
3721 inited_68881_table = 1; | |
3722 } | |
3723 | |
3724 int | |
3725 standard_68881_constant_p (rtx x) | |
3726 { | |
3727 REAL_VALUE_TYPE r; | |
3728 int i; | |
3729 | |
3730 /* fmovecr must be emulated on the 68040 and 68060, so it shouldn't be | |
3731 used at all on those chips. */ | |
3732 if (TUNE_68040_60) | |
3733 return 0; | |
3734 | |
3735 if (! inited_68881_table) | |
3736 init_68881_table (); | |
3737 | |
3738 REAL_VALUE_FROM_CONST_DOUBLE (r, x); | |
3739 | |
3740 /* Use REAL_VALUES_IDENTICAL instead of REAL_VALUES_EQUAL so that -0.0 | |
3741 is rejected. */ | |
3742 for (i = 0; i < 6; i++) | |
3743 { | |
3744 if (REAL_VALUES_IDENTICAL (r, values_68881[i])) | |
3745 return (codes_68881[i]); | |
3746 } | |
3747 | |
3748 if (GET_MODE (x) == SFmode) | |
3749 return 0; | |
3750 | |
3751 if (REAL_VALUES_EQUAL (r, values_68881[6])) | |
3752 return (codes_68881[6]); | |
3753 | |
3754 /* larger powers of ten in the constants ram are not used | |
3755 because they are not equal to a `double' C constant. */ | |
3756 return 0; | |
3757 } | |
3758 | |
3759 /* If X is a floating-point constant, return the logarithm of X base 2, | |
3760 or 0 if X is not a power of 2. */ | |
3761 | |
3762 int | |
3763 floating_exact_log2 (rtx x) | |
3764 { | |
3765 REAL_VALUE_TYPE r, r1; | |
3766 int exp; | |
3767 | |
3768 REAL_VALUE_FROM_CONST_DOUBLE (r, x); | |
3769 | |
3770 if (REAL_VALUES_LESS (r, dconst1)) | |
3771 return 0; | |
3772 | |
3773 exp = real_exponent (&r); | |
3774 real_2expN (&r1, exp, DFmode); | |
3775 if (REAL_VALUES_EQUAL (r1, r)) | |
3776 return exp; | |
3777 | |
3778 return 0; | |
3779 } | |
3780 | |
3781 /* A C compound statement to output to stdio stream STREAM the | |
3782 assembler syntax for an instruction operand X. X is an RTL | |
3783 expression. | |
3784 | |
3785 CODE is a value that can be used to specify one of several ways | |
3786 of printing the operand. It is used when identical operands | |
3787 must be printed differently depending on the context. CODE | |
3788 comes from the `%' specification that was used to request | |
3789 printing of the operand. If the specification was just `%DIGIT' | |
3790 then CODE is 0; if the specification was `%LTR DIGIT' then CODE | |
3791 is the ASCII code for LTR. | |
3792 | |
3793 If X is a register, this macro should print the register's name. | |
3794 The names can be found in an array `reg_names' whose type is | |
3795 `char *[]'. `reg_names' is initialized from `REGISTER_NAMES'. | |
3796 | |
3797 When the machine description has a specification `%PUNCT' (a `%' | |
3798 followed by a punctuation character), this macro is called with | |
3799 a null pointer for X and the punctuation character for CODE. | |
3800 | |
3801 The m68k specific codes are: | |
3802 | |
3803 '.' for dot needed in Motorola-style opcode names. | |
3804 '-' for an operand pushing on the stack: | |
3805 sp@-, -(sp) or -(%sp) depending on the style of syntax. | |
3806 '+' for an operand pushing on the stack: | |
3807 sp@+, (sp)+ or (%sp)+ depending on the style of syntax. | |
3808 '@' for a reference to the top word on the stack: | |
3809 sp@, (sp) or (%sp) depending on the style of syntax. | |
3810 '#' for an immediate operand prefix (# in MIT and Motorola syntax | |
3811 but & in SGS syntax). | |
3812 '!' for the cc register (used in an `and to cc' insn). | |
3813 '$' for the letter `s' in an op code, but only on the 68040. | |
3814 '&' for the letter `d' in an op code, but only on the 68040. | |
3815 '/' for register prefix needed by longlong.h. | |
3816 '?' for m68k_library_id_string | |
3817 | |
3818 'b' for byte insn (no effect, on the Sun; this is for the ISI). | |
3819 'd' to force memory addressing to be absolute, not relative. | |
3820 'f' for float insn (print a CONST_DOUBLE as a float rather than in hex) | |
3821 'x' for float insn (print a CONST_DOUBLE as a float rather than in hex), | |
3822 or print pair of registers as rx:ry. | |
3823 'p' print an address with @PLTPC attached, but only if the operand | |
3824 is not locally-bound. */ | |
3825 | |
3826 void | |
3827 print_operand (FILE *file, rtx op, int letter) | |
3828 { | |
3829 if (letter == '.') | |
3830 { | |
3831 if (MOTOROLA) | |
3832 fprintf (file, "."); | |
3833 } | |
3834 else if (letter == '#') | |
3835 asm_fprintf (file, "%I"); | |
3836 else if (letter == '-') | |
3837 asm_fprintf (file, MOTOROLA ? "-(%Rsp)" : "%Rsp@-"); | |
3838 else if (letter == '+') | |
3839 asm_fprintf (file, MOTOROLA ? "(%Rsp)+" : "%Rsp@+"); | |
3840 else if (letter == '@') | |
3841 asm_fprintf (file, MOTOROLA ? "(%Rsp)" : "%Rsp@"); | |
3842 else if (letter == '!') | |
3843 asm_fprintf (file, "%Rfpcr"); | |
3844 else if (letter == '$') | |
3845 { | |
3846 if (TARGET_68040) | |
3847 fprintf (file, "s"); | |
3848 } | |
3849 else if (letter == '&') | |
3850 { | |
3851 if (TARGET_68040) | |
3852 fprintf (file, "d"); | |
3853 } | |
3854 else if (letter == '/') | |
3855 asm_fprintf (file, "%R"); | |
3856 else if (letter == '?') | |
3857 asm_fprintf (file, m68k_library_id_string); | |
3858 else if (letter == 'p') | |
3859 { | |
3860 output_addr_const (file, op); | |
3861 if (!(GET_CODE (op) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (op))) | |
3862 fprintf (file, "@PLTPC"); | |
3863 } | |
3864 else if (GET_CODE (op) == REG) | |
3865 { | |
3866 if (letter == 'R') | |
3867 /* Print out the second register name of a register pair. | |
3868 I.e., R (6) => 7. */ | |
3869 fputs (M68K_REGNAME(REGNO (op) + 1), file); | |
3870 else | |
3871 fputs (M68K_REGNAME(REGNO (op)), file); | |
3872 } | |
3873 else if (GET_CODE (op) == MEM) | |
3874 { | |
3875 output_address (XEXP (op, 0)); | |
3876 if (letter == 'd' && ! TARGET_68020 | |
3877 && CONSTANT_ADDRESS_P (XEXP (op, 0)) | |
3878 && !(GET_CODE (XEXP (op, 0)) == CONST_INT | |
3879 && INTVAL (XEXP (op, 0)) < 0x8000 | |
3880 && INTVAL (XEXP (op, 0)) >= -0x8000)) | |
3881 fprintf (file, MOTOROLA ? ".l" : ":l"); | |
3882 } | |
3883 else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == SFmode) | |
3884 { | |
3885 REAL_VALUE_TYPE r; | |
3886 long l; | |
3887 REAL_VALUE_FROM_CONST_DOUBLE (r, op); | |
3888 REAL_VALUE_TO_TARGET_SINGLE (r, l); | |
3889 asm_fprintf (file, "%I0x%lx", l & 0xFFFFFFFF); | |
3890 } | |
3891 else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == XFmode) | |
3892 { | |
3893 REAL_VALUE_TYPE r; | |
3894 long l[3]; | |
3895 REAL_VALUE_FROM_CONST_DOUBLE (r, op); | |
3896 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l); | |
3897 asm_fprintf (file, "%I0x%lx%08lx%08lx", l[0] & 0xFFFFFFFF, | |
3898 l[1] & 0xFFFFFFFF, l[2] & 0xFFFFFFFF); | |
3899 } | |
3900 else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == DFmode) | |
3901 { | |
3902 REAL_VALUE_TYPE r; | |
3903 long l[2]; | |
3904 REAL_VALUE_FROM_CONST_DOUBLE (r, op); | |
3905 REAL_VALUE_TO_TARGET_DOUBLE (r, l); | |
3906 asm_fprintf (file, "%I0x%lx%08lx", l[0] & 0xFFFFFFFF, l[1] & 0xFFFFFFFF); | |
3907 } | |
3908 else | |
3909 { | |
3910 /* Use `print_operand_address' instead of `output_addr_const' | |
3911 to ensure that we print relevant PIC stuff. */ | |
3912 asm_fprintf (file, "%I"); | |
3913 if (TARGET_PCREL | |
3914 && (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == CONST)) | |
3915 print_operand_address (file, op); | |
3916 else | |
3917 output_addr_const (file, op); | |
3918 } | |
3919 } | |
3920 | |
3921 /* m68k implementation of OUTPUT_ADDR_CONST_EXTRA. */ | |
3922 | |
3923 bool | |
3924 m68k_output_addr_const_extra (FILE *file, rtx x) | |
3925 { | |
3926 if (GET_CODE (x) != UNSPEC || XINT (x, 1) != UNSPEC_GOTOFF) | |
3927 return false; | |
3928 | |
3929 output_addr_const (file, XVECEXP (x, 0, 0)); | |
3930 /* ??? What is the non-MOTOROLA syntax? */ | |
3931 fputs ("@GOT", file); | |
3932 return true; | |
3933 } | |
3934 | |
3935 | |
3936 /* A C compound statement to output to stdio stream STREAM the | |
3937 assembler syntax for an instruction operand that is a memory | |
3938 reference whose address is ADDR. ADDR is an RTL expression. | |
3939 | |
3940 Note that this contains a kludge that knows that the only reason | |
3941 we have an address (plus (label_ref...) (reg...)) when not generating | |
3942 PIC code is in the insn before a tablejump, and we know that m68k.md | |
3943 generates a label LInnn: on such an insn. | |
3944 | |
3945 It is possible for PIC to generate a (plus (label_ref...) (reg...)) | |
3946 and we handle that just like we would a (plus (symbol_ref...) (reg...)). | |
3947 | |
3948 This routine is responsible for distinguishing between -fpic and -fPIC | |
3949 style relocations in an address. When generating -fpic code the | |
3950 offset is output in word mode (e.g. movel a5@(_foo:w), a0). When generating | |
3951 -fPIC code the offset is output in long mode (e.g. movel a5@(_foo:l), a0) */ | |
3952 | |
3953 void | |
3954 print_operand_address (FILE *file, rtx addr) | |
3955 { | |
3956 struct m68k_address address; | |
3957 | |
3958 if (!m68k_decompose_address (QImode, addr, true, &address)) | |
3959 gcc_unreachable (); | |
3960 | |
3961 if (address.code == PRE_DEC) | |
3962 fprintf (file, MOTOROLA ? "-(%s)" : "%s@-", | |
3963 M68K_REGNAME (REGNO (address.base))); | |
3964 else if (address.code == POST_INC) | |
3965 fprintf (file, MOTOROLA ? "(%s)+" : "%s@+", | |
3966 M68K_REGNAME (REGNO (address.base))); | |
3967 else if (!address.base && !address.index) | |
3968 { | |
3969 /* A constant address. */ | |
3970 gcc_assert (address.offset == addr); | |
3971 if (GET_CODE (addr) == CONST_INT) | |
3972 { | |
3973 /* (xxx).w or (xxx).l. */ | |
3974 if (IN_RANGE (INTVAL (addr), -0x8000, 0x7fff)) | |
3975 fprintf (file, MOTOROLA ? "%d.w" : "%d:w", (int) INTVAL (addr)); | |
3976 else | |
3977 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (addr)); | |
3978 } | |
3979 else if (TARGET_PCREL) | |
3980 { | |
3981 /* (d16,PC) or (bd,PC,Xn) (with suppressed index register). */ | |
3982 fputc ('(', file); | |
3983 output_addr_const (file, addr); | |
3984 asm_fprintf (file, flag_pic == 1 ? ":w,%Rpc)" : ":l,%Rpc)"); | |
3985 } | |
3986 else | |
3987 { | |
3988 /* (xxx).l. We need a special case for SYMBOL_REF if the symbol | |
3989 name ends in `.<letter>', as the last 2 characters can be | |
3990 mistaken as a size suffix. Put the name in parentheses. */ | |
3991 if (GET_CODE (addr) == SYMBOL_REF | |
3992 && strlen (XSTR (addr, 0)) > 2 | |
3993 && XSTR (addr, 0)[strlen (XSTR (addr, 0)) - 2] == '.') | |
3994 { | |
3995 putc ('(', file); | |
3996 output_addr_const (file, addr); | |
3997 putc (')', file); | |
3998 } | |
3999 else | |
4000 output_addr_const (file, addr); | |
4001 } | |
4002 } | |
4003 else | |
4004 { | |
4005 int labelno; | |
4006 | |
4007 /* If ADDR is a (d8,pc,Xn) address, this is the number of the | |
4008 label being accessed, otherwise it is -1. */ | |
4009 labelno = (address.offset | |
4010 && !address.base | |
4011 && GET_CODE (address.offset) == LABEL_REF | |
4012 ? CODE_LABEL_NUMBER (XEXP (address.offset, 0)) | |
4013 : -1); | |
4014 if (MOTOROLA) | |
4015 { | |
4016 /* Print the "offset(base" component. */ | |
4017 if (labelno >= 0) | |
4018 asm_fprintf (file, "%LL%d(%Rpc,", labelno); | |
4019 else | |
4020 { | |
4021 if (address.offset) | |
4022 { | |
4023 output_addr_const (file, address.offset); | |
4024 if (flag_pic && address.base == pic_offset_table_rtx) | |
4025 { | |
4026 fprintf (file, "@GOT"); | |
4027 if (flag_pic == 1 && TARGET_68020) | |
4028 fprintf (file, ".w"); | |
4029 } | |
4030 } | |
4031 putc ('(', file); | |
4032 if (address.base) | |
4033 fputs (M68K_REGNAME (REGNO (address.base)), file); | |
4034 } | |
4035 /* Print the ",index" component, if any. */ | |
4036 if (address.index) | |
4037 { | |
4038 if (address.base) | |
4039 putc (',', file); | |
4040 fprintf (file, "%s.%c", | |
4041 M68K_REGNAME (REGNO (address.index)), | |
4042 GET_MODE (address.index) == HImode ? 'w' : 'l'); | |
4043 if (address.scale != 1) | |
4044 fprintf (file, "*%d", address.scale); | |
4045 } | |
4046 putc (')', file); | |
4047 } | |
4048 else /* !MOTOROLA */ | |
4049 { | |
4050 if (!address.offset && !address.index) | |
4051 fprintf (file, "%s@", M68K_REGNAME (REGNO (address.base))); | |
4052 else | |
4053 { | |
4054 /* Print the "base@(offset" component. */ | |
4055 if (labelno >= 0) | |
4056 asm_fprintf (file, "%Rpc@(%LL%d", labelno); | |
4057 else | |
4058 { | |
4059 if (address.base) | |
4060 fputs (M68K_REGNAME (REGNO (address.base)), file); | |
4061 fprintf (file, "@("); | |
4062 if (address.offset) | |
4063 { | |
4064 output_addr_const (file, address.offset); | |
4065 if (address.base == pic_offset_table_rtx && TARGET_68020) | |
4066 switch (flag_pic) | |
4067 { | |
4068 case 1: | |
4069 fprintf (file, ":w"); break; | |
4070 case 2: | |
4071 fprintf (file, ":l"); break; | |
4072 default: | |
4073 break; | |
4074 } | |
4075 } | |
4076 } | |
4077 /* Print the ",index" component, if any. */ | |
4078 if (address.index) | |
4079 { | |
4080 fprintf (file, ",%s:%c", | |
4081 M68K_REGNAME (REGNO (address.index)), | |
4082 GET_MODE (address.index) == HImode ? 'w' : 'l'); | |
4083 if (address.scale != 1) | |
4084 fprintf (file, ":%d", address.scale); | |
4085 } | |
4086 putc (')', file); | |
4087 } | |
4088 } | |
4089 } | |
4090 } | |
4091 | |
4092 /* Check for cases where a clr insns can be omitted from code using | |
4093 strict_low_part sets. For example, the second clrl here is not needed: | |
4094 clrl d0; movw a0@+,d0; use d0; clrl d0; movw a0@+; use d0; ... | |
4095 | |
4096 MODE is the mode of this STRICT_LOW_PART set. FIRST_INSN is the clear | |
4097 insn we are checking for redundancy. TARGET is the register set by the | |
4098 clear insn. */ | |
4099 | |
4100 bool | |
4101 strict_low_part_peephole_ok (enum machine_mode mode, rtx first_insn, | |
4102 rtx target) | |
4103 { | |
4104 rtx p = first_insn; | |
4105 | |
4106 while ((p = PREV_INSN (p))) | |
4107 { | |
4108 if (NOTE_INSN_BASIC_BLOCK_P (p)) | |
4109 return false; | |
4110 | |
4111 if (NOTE_P (p)) | |
4112 continue; | |
4113 | |
4114 /* If it isn't an insn, then give up. */ | |
4115 if (!INSN_P (p)) | |
4116 return false; | |
4117 | |
4118 if (reg_set_p (target, p)) | |
4119 { | |
4120 rtx set = single_set (p); | |
4121 rtx dest; | |
4122 | |
4123 /* If it isn't an easy to recognize insn, then give up. */ | |
4124 if (! set) | |
4125 return false; | |
4126 | |
4127 dest = SET_DEST (set); | |
4128 | |
4129 /* If this sets the entire target register to zero, then our | |
4130 first_insn is redundant. */ | |
4131 if (rtx_equal_p (dest, target) | |
4132 && SET_SRC (set) == const0_rtx) | |
4133 return true; | |
4134 else if (GET_CODE (dest) == STRICT_LOW_PART | |
4135 && GET_CODE (XEXP (dest, 0)) == REG | |
4136 && REGNO (XEXP (dest, 0)) == REGNO (target) | |
4137 && (GET_MODE_SIZE (GET_MODE (XEXP (dest, 0))) | |
4138 <= GET_MODE_SIZE (mode))) | |
4139 /* This is a strict low part set which modifies less than | |
4140 we are using, so it is safe. */ | |
4141 ; | |
4142 else | |
4143 return false; | |
4144 } | |
4145 } | |
4146 | |
4147 return false; | |
4148 } | |
4149 | |
4150 /* Operand predicates for implementing asymmetric pc-relative addressing | |
4151 on m68k. The m68k supports pc-relative addressing (mode 7, register 2) | |
4152 when used as a source operand, but not as a destination operand. | |
4153 | |
4154 We model this by restricting the meaning of the basic predicates | |
4155 (general_operand, memory_operand, etc) to forbid the use of this | |
4156 addressing mode, and then define the following predicates that permit | |
4157 this addressing mode. These predicates can then be used for the | |
4158 source operands of the appropriate instructions. | |
4159 | |
4160 n.b. While it is theoretically possible to change all machine patterns | |
4161 to use this addressing more where permitted by the architecture, | |
4162 it has only been implemented for "common" cases: SImode, HImode, and | |
4163 QImode operands, and only for the principle operations that would | |
4164 require this addressing mode: data movement and simple integer operations. | |
4165 | |
4166 In parallel with these new predicates, two new constraint letters | |
4167 were defined: 'S' and 'T'. 'S' is the -mpcrel analog of 'm'. | |
4168 'T' replaces 's' in the non-pcrel case. It is a no-op in the pcrel case. | |
4169 In the pcrel case 's' is only valid in combination with 'a' registers. | |
4170 See addsi3, subsi3, cmpsi, and movsi patterns for a better understanding | |
4171 of how these constraints are used. | |
4172 | |
4173 The use of these predicates is strictly optional, though patterns that | |
4174 don't will cause an extra reload register to be allocated where one | |
4175 was not necessary: | |
4176 | |
4177 lea (abc:w,%pc),%a0 ; need to reload address | |
4178 moveq &1,%d1 ; since write to pc-relative space | |
4179 movel %d1,%a0@ ; is not allowed | |
4180 ... | |
4181 lea (abc:w,%pc),%a1 ; no need to reload address here | |
4182 movel %a1@,%d0 ; since "movel (abc:w,%pc),%d0" is ok | |
4183 | |
4184 For more info, consult tiemann@cygnus.com. | |
4185 | |
4186 | |
4187 All of the ugliness with predicates and constraints is due to the | |
4188 simple fact that the m68k does not allow a pc-relative addressing | |
4189 mode as a destination. gcc does not distinguish between source and | |
4190 destination addresses. Hence, if we claim that pc-relative address | |
4191 modes are valid, e.g. GO_IF_LEGITIMATE_ADDRESS accepts them, then we | |
4192 end up with invalid code. To get around this problem, we left | |
4193 pc-relative modes as invalid addresses, and then added special | |
4194 predicates and constraints to accept them. | |
4195 | |
4196 A cleaner way to handle this is to modify gcc to distinguish | |
4197 between source and destination addresses. We can then say that | |
4198 pc-relative is a valid source address but not a valid destination | |
4199 address, and hopefully avoid a lot of the predicate and constraint | |
4200 hackery. Unfortunately, this would be a pretty big change. It would | |
4201 be a useful change for a number of ports, but there aren't any current | |
4202 plans to undertake this. | |
4203 | |
4204 ***************************************************************************/ | |
4205 | |
4206 | |
4207 const char * | |
4208 output_andsi3 (rtx *operands) | |
4209 { | |
4210 int logval; | |
4211 if (GET_CODE (operands[2]) == CONST_INT | |
4212 && (INTVAL (operands[2]) | 0xffff) == -1 | |
4213 && (DATA_REG_P (operands[0]) | |
4214 || offsettable_memref_p (operands[0])) | |
4215 && !TARGET_COLDFIRE) | |
4216 { | |
4217 if (GET_CODE (operands[0]) != REG) | |
4218 operands[0] = adjust_address (operands[0], HImode, 2); | |
4219 operands[2] = GEN_INT (INTVAL (operands[2]) & 0xffff); | |
4220 /* Do not delete a following tstl %0 insn; that would be incorrect. */ | |
4221 CC_STATUS_INIT; | |
4222 if (operands[2] == const0_rtx) | |
4223 return "clr%.w %0"; | |
4224 return "and%.w %2,%0"; | |
4225 } | |
4226 if (GET_CODE (operands[2]) == CONST_INT | |
4227 && (logval = exact_log2 (~ INTVAL (operands[2]))) >= 0 | |
4228 && (DATA_REG_P (operands[0]) | |
4229 || offsettable_memref_p (operands[0]))) | |
4230 { | |
4231 if (DATA_REG_P (operands[0])) | |
4232 operands[1] = GEN_INT (logval); | |
4233 else | |
4234 { | |
4235 operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8)); | |
4236 operands[1] = GEN_INT (logval % 8); | |
4237 } | |
4238 /* This does not set condition codes in a standard way. */ | |
4239 CC_STATUS_INIT; | |
4240 return "bclr %1,%0"; | |
4241 } | |
4242 return "and%.l %2,%0"; | |
4243 } | |
4244 | |
4245 const char * | |
4246 output_iorsi3 (rtx *operands) | |
4247 { | |
4248 register int logval; | |
4249 if (GET_CODE (operands[2]) == CONST_INT | |
4250 && INTVAL (operands[2]) >> 16 == 0 | |
4251 && (DATA_REG_P (operands[0]) | |
4252 || offsettable_memref_p (operands[0])) | |
4253 && !TARGET_COLDFIRE) | |
4254 { | |
4255 if (GET_CODE (operands[0]) != REG) | |
4256 operands[0] = adjust_address (operands[0], HImode, 2); | |
4257 /* Do not delete a following tstl %0 insn; that would be incorrect. */ | |
4258 CC_STATUS_INIT; | |
4259 if (INTVAL (operands[2]) == 0xffff) | |
4260 return "mov%.w %2,%0"; | |
4261 return "or%.w %2,%0"; | |
4262 } | |
4263 if (GET_CODE (operands[2]) == CONST_INT | |
4264 && (logval = exact_log2 (INTVAL (operands[2]))) >= 0 | |
4265 && (DATA_REG_P (operands[0]) | |
4266 || offsettable_memref_p (operands[0]))) | |
4267 { | |
4268 if (DATA_REG_P (operands[0])) | |
4269 operands[1] = GEN_INT (logval); | |
4270 else | |
4271 { | |
4272 operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8)); | |
4273 operands[1] = GEN_INT (logval % 8); | |
4274 } | |
4275 CC_STATUS_INIT; | |
4276 return "bset %1,%0"; | |
4277 } | |
4278 return "or%.l %2,%0"; | |
4279 } | |
4280 | |
4281 const char * | |
4282 output_xorsi3 (rtx *operands) | |
4283 { | |
4284 register int logval; | |
4285 if (GET_CODE (operands[2]) == CONST_INT | |
4286 && INTVAL (operands[2]) >> 16 == 0 | |
4287 && (offsettable_memref_p (operands[0]) || DATA_REG_P (operands[0])) | |
4288 && !TARGET_COLDFIRE) | |
4289 { | |
4290 if (! DATA_REG_P (operands[0])) | |
4291 operands[0] = adjust_address (operands[0], HImode, 2); | |
4292 /* Do not delete a following tstl %0 insn; that would be incorrect. */ | |
4293 CC_STATUS_INIT; | |
4294 if (INTVAL (operands[2]) == 0xffff) | |
4295 return "not%.w %0"; | |
4296 return "eor%.w %2,%0"; | |
4297 } | |
4298 if (GET_CODE (operands[2]) == CONST_INT | |
4299 && (logval = exact_log2 (INTVAL (operands[2]))) >= 0 | |
4300 && (DATA_REG_P (operands[0]) | |
4301 || offsettable_memref_p (operands[0]))) | |
4302 { | |
4303 if (DATA_REG_P (operands[0])) | |
4304 operands[1] = GEN_INT (logval); | |
4305 else | |
4306 { | |
4307 operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8)); | |
4308 operands[1] = GEN_INT (logval % 8); | |
4309 } | |
4310 CC_STATUS_INIT; | |
4311 return "bchg %1,%0"; | |
4312 } | |
4313 return "eor%.l %2,%0"; | |
4314 } | |
4315 | |
4316 /* Return the instruction that should be used for a call to address X, | |
4317 which is known to be in operand 0. */ | |
4318 | |
4319 const char * | |
4320 output_call (rtx x) | |
4321 { | |
4322 if (symbolic_operand (x, VOIDmode)) | |
4323 return m68k_symbolic_call; | |
4324 else | |
4325 return "jsr %a0"; | |
4326 } | |
4327 | |
4328 /* Likewise sibling calls. */ | |
4329 | |
4330 const char * | |
4331 output_sibcall (rtx x) | |
4332 { | |
4333 if (symbolic_operand (x, VOIDmode)) | |
4334 return m68k_symbolic_jump; | |
4335 else | |
4336 return "jmp %a0"; | |
4337 } | |
4338 | |
4339 #ifdef M68K_TARGET_COFF | |
4340 | |
4341 /* Output assembly to switch to section NAME with attribute FLAGS. */ | |
4342 | |
4343 static void | |
4344 m68k_coff_asm_named_section (const char *name, unsigned int flags, | |
4345 tree decl ATTRIBUTE_UNUSED) | |
4346 { | |
4347 char flagchar; | |
4348 | |
4349 if (flags & SECTION_WRITE) | |
4350 flagchar = 'd'; | |
4351 else | |
4352 flagchar = 'x'; | |
4353 | |
4354 fprintf (asm_out_file, "\t.section\t%s,\"%c\"\n", name, flagchar); | |
4355 } | |
4356 | |
4357 #endif /* M68K_TARGET_COFF */ | |
4358 | |
4359 static void | |
4360 m68k_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED, | |
4361 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset, | |
4362 tree function) | |
4363 { | |
4364 rtx this_slot, offset, addr, mem, insn; | |
4365 | |
4366 /* Pretend to be a post-reload pass while generating rtl. */ | |
4367 reload_completed = 1; | |
4368 | |
4369 /* The "this" pointer is stored at 4(%sp). */ | |
4370 this_slot = gen_rtx_MEM (Pmode, plus_constant (stack_pointer_rtx, 4)); | |
4371 | |
4372 /* Add DELTA to THIS. */ | |
4373 if (delta != 0) | |
4374 { | |
4375 /* Make the offset a legitimate operand for memory addition. */ | |
4376 offset = GEN_INT (delta); | |
4377 if ((delta < -8 || delta > 8) | |
4378 && (TARGET_COLDFIRE || USE_MOVQ (delta))) | |
4379 { | |
4380 emit_move_insn (gen_rtx_REG (Pmode, D0_REG), offset); | |
4381 offset = gen_rtx_REG (Pmode, D0_REG); | |
4382 } | |
4383 emit_insn (gen_add3_insn (copy_rtx (this_slot), | |
4384 copy_rtx (this_slot), offset)); | |
4385 } | |
4386 | |
4387 /* If needed, add *(*THIS + VCALL_OFFSET) to THIS. */ | |
4388 if (vcall_offset != 0) | |
4389 { | |
4390 /* Set the static chain register to *THIS. */ | |
4391 emit_move_insn (static_chain_rtx, this_slot); | |
4392 emit_move_insn (static_chain_rtx, gen_rtx_MEM (Pmode, static_chain_rtx)); | |
4393 | |
4394 /* Set ADDR to a legitimate address for *THIS + VCALL_OFFSET. */ | |
4395 addr = plus_constant (static_chain_rtx, vcall_offset); | |
4396 if (!m68k_legitimate_address_p (Pmode, addr, true)) | |
4397 { | |
4398 emit_insn (gen_rtx_SET (VOIDmode, static_chain_rtx, addr)); | |
4399 addr = static_chain_rtx; | |
4400 } | |
4401 | |
4402 /* Load the offset into %d0 and add it to THIS. */ | |
4403 emit_move_insn (gen_rtx_REG (Pmode, D0_REG), | |
4404 gen_rtx_MEM (Pmode, addr)); | |
4405 emit_insn (gen_add3_insn (copy_rtx (this_slot), | |
4406 copy_rtx (this_slot), | |
4407 gen_rtx_REG (Pmode, D0_REG))); | |
4408 } | |
4409 | |
4410 /* Jump to the target function. Use a sibcall if direct jumps are | |
4411 allowed, otherwise load the address into a register first. */ | |
4412 mem = DECL_RTL (function); | |
4413 if (!sibcall_operand (XEXP (mem, 0), VOIDmode)) | |
4414 { | |
4415 gcc_assert (flag_pic); | |
4416 | |
4417 if (!TARGET_SEP_DATA) | |
4418 { | |
4419 /* Use the static chain register as a temporary (call-clobbered) | |
4420 GOT pointer for this function. We can use the static chain | |
4421 register because it isn't live on entry to the thunk. */ | |
4422 SET_REGNO (pic_offset_table_rtx, STATIC_CHAIN_REGNUM); | |
4423 emit_insn (gen_load_got (pic_offset_table_rtx)); | |
4424 } | |
4425 legitimize_pic_address (XEXP (mem, 0), Pmode, static_chain_rtx); | |
4426 mem = replace_equiv_address (mem, static_chain_rtx); | |
4427 } | |
4428 insn = emit_call_insn (gen_sibcall (mem, const0_rtx)); | |
4429 SIBLING_CALL_P (insn) = 1; | |
4430 | |
4431 /* Run just enough of rest_of_compilation. */ | |
4432 insn = get_insns (); | |
4433 split_all_insns_noflow (); | |
4434 final_start_function (insn, file, 1); | |
4435 final (insn, file, 1); | |
4436 final_end_function (); | |
4437 | |
4438 /* Clean up the vars set above. */ | |
4439 reload_completed = 0; | |
4440 | |
4441 /* Restore the original PIC register. */ | |
4442 if (flag_pic) | |
4443 SET_REGNO (pic_offset_table_rtx, PIC_REG); | |
4444 free_after_compilation (cfun); | |
4445 } | |
4446 | |
4447 /* Worker function for TARGET_STRUCT_VALUE_RTX. */ | |
4448 | |
4449 static rtx | |
4450 m68k_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED, | |
4451 int incoming ATTRIBUTE_UNUSED) | |
4452 { | |
4453 return gen_rtx_REG (Pmode, M68K_STRUCT_VALUE_REGNUM); | |
4454 } | |
4455 | |
4456 /* Return nonzero if register old_reg can be renamed to register new_reg. */ | |
4457 int | |
4458 m68k_hard_regno_rename_ok (unsigned int old_reg ATTRIBUTE_UNUSED, | |
4459 unsigned int new_reg) | |
4460 { | |
4461 | |
4462 /* Interrupt functions can only use registers that have already been | |
4463 saved by the prologue, even if they would normally be | |
4464 call-clobbered. */ | |
4465 | |
4466 if ((m68k_get_function_kind (current_function_decl) | |
4467 == m68k_fk_interrupt_handler) | |
4468 && !df_regs_ever_live_p (new_reg)) | |
4469 return 0; | |
4470 | |
4471 return 1; | |
4472 } | |
4473 | |
4474 /* Value is true if hard register REGNO can hold a value of machine-mode | |
4475 MODE. On the 68000, we let the cpu registers can hold any mode, but | |
4476 restrict the 68881 registers to floating-point modes. */ | |
4477 | |
4478 bool | |
4479 m68k_regno_mode_ok (int regno, enum machine_mode mode) | |
4480 { | |
4481 if (DATA_REGNO_P (regno)) | |
4482 { | |
4483 /* Data Registers, can hold aggregate if fits in. */ | |
4484 if (regno + GET_MODE_SIZE (mode) / 4 <= 8) | |
4485 return true; | |
4486 } | |
4487 else if (ADDRESS_REGNO_P (regno)) | |
4488 { | |
4489 if (regno + GET_MODE_SIZE (mode) / 4 <= 16) | |
4490 return true; | |
4491 } | |
4492 else if (FP_REGNO_P (regno)) | |
4493 { | |
4494 /* FPU registers, hold float or complex float of long double or | |
4495 smaller. */ | |
4496 if ((GET_MODE_CLASS (mode) == MODE_FLOAT | |
4497 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT) | |
4498 && GET_MODE_UNIT_SIZE (mode) <= TARGET_FP_REG_SIZE) | |
4499 return true; | |
4500 } | |
4501 return false; | |
4502 } | |
4503 | |
4504 /* Implement SECONDARY_RELOAD_CLASS. */ | |
4505 | |
4506 enum reg_class | |
4507 m68k_secondary_reload_class (enum reg_class rclass, | |
4508 enum machine_mode mode, rtx x) | |
4509 { | |
4510 int regno; | |
4511 | |
4512 regno = true_regnum (x); | |
4513 | |
4514 /* If one operand of a movqi is an address register, the other | |
4515 operand must be a general register or constant. Other types | |
4516 of operand must be reloaded through a data register. */ | |
4517 if (GET_MODE_SIZE (mode) == 1 | |
4518 && reg_classes_intersect_p (rclass, ADDR_REGS) | |
4519 && !(INT_REGNO_P (regno) || CONSTANT_P (x))) | |
4520 return DATA_REGS; | |
4521 | |
4522 /* PC-relative addresses must be loaded into an address register first. */ | |
4523 if (TARGET_PCREL | |
4524 && !reg_class_subset_p (rclass, ADDR_REGS) | |
4525 && symbolic_operand (x, VOIDmode)) | |
4526 return ADDR_REGS; | |
4527 | |
4528 return NO_REGS; | |
4529 } | |
4530 | |
4531 /* Implement PREFERRED_RELOAD_CLASS. */ | |
4532 | |
4533 enum reg_class | |
4534 m68k_preferred_reload_class (rtx x, enum reg_class rclass) | |
4535 { | |
4536 enum reg_class secondary_class; | |
4537 | |
4538 /* If RCLASS might need a secondary reload, try restricting it to | |
4539 a class that doesn't. */ | |
4540 secondary_class = m68k_secondary_reload_class (rclass, GET_MODE (x), x); | |
4541 if (secondary_class != NO_REGS | |
4542 && reg_class_subset_p (secondary_class, rclass)) | |
4543 return secondary_class; | |
4544 | |
4545 /* Prefer to use moveq for in-range constants. */ | |
4546 if (GET_CODE (x) == CONST_INT | |
4547 && reg_class_subset_p (DATA_REGS, rclass) | |
4548 && IN_RANGE (INTVAL (x), -0x80, 0x7f)) | |
4549 return DATA_REGS; | |
4550 | |
4551 /* ??? Do we really need this now? */ | |
4552 if (GET_CODE (x) == CONST_DOUBLE | |
4553 && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT) | |
4554 { | |
4555 if (TARGET_HARD_FLOAT && reg_class_subset_p (FP_REGS, rclass)) | |
4556 return FP_REGS; | |
4557 | |
4558 return NO_REGS; | |
4559 } | |
4560 | |
4561 return rclass; | |
4562 } | |
4563 | |
4564 /* Return floating point values in a 68881 register. This makes 68881 code | |
4565 a little bit faster. It also makes -msoft-float code incompatible with | |
4566 hard-float code, so people have to be careful not to mix the two. | |
4567 For ColdFire it was decided the ABI incompatibility is undesirable. | |
4568 If there is need for a hard-float ABI it is probably worth doing it | |
4569 properly and also passing function arguments in FP registers. */ | |
4570 rtx | |
4571 m68k_libcall_value (enum machine_mode mode) | |
4572 { | |
4573 switch (mode) { | |
4574 case SFmode: | |
4575 case DFmode: | |
4576 case XFmode: | |
4577 if (TARGET_68881) | |
4578 return gen_rtx_REG (mode, FP0_REG); | |
4579 break; | |
4580 default: | |
4581 break; | |
4582 } | |
4583 return gen_rtx_REG (mode, D0_REG); | |
4584 } | |
4585 | |
4586 rtx | |
4587 m68k_function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED) | |
4588 { | |
4589 enum machine_mode mode; | |
4590 | |
4591 mode = TYPE_MODE (valtype); | |
4592 switch (mode) { | |
4593 case SFmode: | |
4594 case DFmode: | |
4595 case XFmode: | |
4596 if (TARGET_68881) | |
4597 return gen_rtx_REG (mode, FP0_REG); | |
4598 break; | |
4599 default: | |
4600 break; | |
4601 } | |
4602 | |
4603 /* If the function returns a pointer, push that into %a0. */ | |
4604 if (func && POINTER_TYPE_P (TREE_TYPE (TREE_TYPE (func)))) | |
4605 /* For compatibility with the large body of existing code which | |
4606 does not always properly declare external functions returning | |
4607 pointer types, the m68k/SVR4 convention is to copy the value | |
4608 returned for pointer functions from a0 to d0 in the function | |
4609 epilogue, so that callers that have neglected to properly | |
4610 declare the callee can still find the correct return value in | |
4611 d0. */ | |
4612 return gen_rtx_PARALLEL | |
4613 (mode, | |
4614 gen_rtvec (2, | |
4615 gen_rtx_EXPR_LIST (VOIDmode, | |
4616 gen_rtx_REG (mode, A0_REG), | |
4617 const0_rtx), | |
4618 gen_rtx_EXPR_LIST (VOIDmode, | |
4619 gen_rtx_REG (mode, D0_REG), | |
4620 const0_rtx))); | |
4621 else if (POINTER_TYPE_P (valtype)) | |
4622 return gen_rtx_REG (mode, A0_REG); | |
4623 else | |
4624 return gen_rtx_REG (mode, D0_REG); | |
4625 } | |
4626 | |
4627 /* Worker function for TARGET_RETURN_IN_MEMORY. */ | |
4628 #if M68K_HONOR_TARGET_STRICT_ALIGNMENT | |
4629 static bool | |
4630 m68k_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED) | |
4631 { | |
4632 enum machine_mode mode = TYPE_MODE (type); | |
4633 | |
4634 if (mode == BLKmode) | |
4635 return true; | |
4636 | |
4637 /* If TYPE's known alignment is less than the alignment of MODE that | |
4638 would contain the structure, then return in memory. We need to | |
4639 do so to maintain the compatibility between code compiled with | |
4640 -mstrict-align and that compiled with -mno-strict-align. */ | |
4641 if (AGGREGATE_TYPE_P (type) | |
4642 && TYPE_ALIGN (type) < GET_MODE_ALIGNMENT (mode)) | |
4643 return true; | |
4644 | |
4645 return false; | |
4646 } | |
4647 #endif | |
4648 | |
4649 /* CPU to schedule the program for. */ | |
4650 enum attr_cpu m68k_sched_cpu; | |
4651 | |
4652 /* MAC to schedule the program for. */ | |
4653 enum attr_mac m68k_sched_mac; | |
4654 | |
4655 /* Operand type. */ | |
4656 enum attr_op_type | |
4657 { | |
4658 /* No operand. */ | |
4659 OP_TYPE_NONE, | |
4660 | |
4661 /* Integer register. */ | |
4662 OP_TYPE_RN, | |
4663 | |
4664 /* FP register. */ | |
4665 OP_TYPE_FPN, | |
4666 | |
4667 /* Implicit mem reference (e.g. stack). */ | |
4668 OP_TYPE_MEM1, | |
4669 | |
4670 /* Memory without offset or indexing. EA modes 2, 3 and 4. */ | |
4671 OP_TYPE_MEM234, | |
4672 | |
4673 /* Memory with offset but without indexing. EA mode 5. */ | |
4674 OP_TYPE_MEM5, | |
4675 | |
4676 /* Memory with indexing. EA mode 6. */ | |
4677 OP_TYPE_MEM6, | |
4678 | |
4679 /* Memory referenced by absolute address. EA mode 7. */ | |
4680 OP_TYPE_MEM7, | |
4681 | |
4682 /* Immediate operand that doesn't require extension word. */ | |
4683 OP_TYPE_IMM_Q, | |
4684 | |
4685 /* Immediate 16 bit operand. */ | |
4686 OP_TYPE_IMM_W, | |
4687 | |
4688 /* Immediate 32 bit operand. */ | |
4689 OP_TYPE_IMM_L | |
4690 }; | |
4691 | |
4692 /* Return type of memory ADDR_RTX refers to. */ | |
4693 static enum attr_op_type | |
4694 sched_address_type (enum machine_mode mode, rtx addr_rtx) | |
4695 { | |
4696 struct m68k_address address; | |
4697 | |
4698 if (symbolic_operand (addr_rtx, VOIDmode)) | |
4699 return OP_TYPE_MEM7; | |
4700 | |
4701 if (!m68k_decompose_address (mode, addr_rtx, | |
4702 reload_completed, &address)) | |
4703 { | |
4704 gcc_assert (!reload_completed); | |
4705 /* Reload will likely fix the address to be in the register. */ | |
4706 return OP_TYPE_MEM234; | |
4707 } | |
4708 | |
4709 if (address.scale != 0) | |
4710 return OP_TYPE_MEM6; | |
4711 | |
4712 if (address.base != NULL_RTX) | |
4713 { | |
4714 if (address.offset == NULL_RTX) | |
4715 return OP_TYPE_MEM234; | |
4716 | |
4717 return OP_TYPE_MEM5; | |
4718 } | |
4719 | |
4720 gcc_assert (address.offset != NULL_RTX); | |
4721 | |
4722 return OP_TYPE_MEM7; | |
4723 } | |
4724 | |
4725 /* Return X or Y (depending on OPX_P) operand of INSN. */ | |
4726 static rtx | |
4727 sched_get_operand (rtx insn, bool opx_p) | |
4728 { | |
4729 int i; | |
4730 | |
4731 if (recog_memoized (insn) < 0) | |
4732 gcc_unreachable (); | |
4733 | |
4734 extract_constrain_insn_cached (insn); | |
4735 | |
4736 if (opx_p) | |
4737 i = get_attr_opx (insn); | |
4738 else | |
4739 i = get_attr_opy (insn); | |
4740 | |
4741 if (i >= recog_data.n_operands) | |
4742 return NULL; | |
4743 | |
4744 return recog_data.operand[i]; | |
4745 } | |
4746 | |
4747 /* Return type of INSN's operand X (if OPX_P) or operand Y (if !OPX_P). | |
4748 If ADDRESS_P is true, return type of memory location operand refers to. */ | |
4749 static enum attr_op_type | |
4750 sched_attr_op_type (rtx insn, bool opx_p, bool address_p) | |
4751 { | |
4752 rtx op; | |
4753 | |
4754 op = sched_get_operand (insn, opx_p); | |
4755 | |
4756 if (op == NULL) | |
4757 { | |
4758 gcc_assert (!reload_completed); | |
4759 return OP_TYPE_RN; | |
4760 } | |
4761 | |
4762 if (address_p) | |
4763 return sched_address_type (QImode, op); | |
4764 | |
4765 if (memory_operand (op, VOIDmode)) | |
4766 return sched_address_type (GET_MODE (op), XEXP (op, 0)); | |
4767 | |
4768 if (register_operand (op, VOIDmode)) | |
4769 { | |
4770 if ((!reload_completed && FLOAT_MODE_P (GET_MODE (op))) | |
4771 || (reload_completed && FP_REG_P (op))) | |
4772 return OP_TYPE_FPN; | |
4773 | |
4774 return OP_TYPE_RN; | |
4775 } | |
4776 | |
4777 if (GET_CODE (op) == CONST_INT) | |
4778 { | |
4779 int ival; | |
4780 | |
4781 ival = INTVAL (op); | |
4782 | |
4783 /* Check for quick constants. */ | |
4784 switch (get_attr_type (insn)) | |
4785 { | |
4786 case TYPE_ALUQ_L: | |
4787 if (IN_RANGE (ival, 1, 8) || IN_RANGE (ival, -8, -1)) | |
4788 return OP_TYPE_IMM_Q; | |
4789 | |
4790 gcc_assert (!reload_completed); | |
4791 break; | |
4792 | |
4793 case TYPE_MOVEQ_L: | |
4794 if (USE_MOVQ (ival)) | |
4795 return OP_TYPE_IMM_Q; | |
4796 | |
4797 gcc_assert (!reload_completed); | |
4798 break; | |
4799 | |
4800 case TYPE_MOV3Q_L: | |
4801 if (valid_mov3q_const (ival)) | |
4802 return OP_TYPE_IMM_Q; | |
4803 | |
4804 gcc_assert (!reload_completed); | |
4805 break; | |
4806 | |
4807 default: | |
4808 break; | |
4809 } | |
4810 | |
4811 if (IN_RANGE (ival, -0x8000, 0x7fff)) | |
4812 return OP_TYPE_IMM_W; | |
4813 | |
4814 return OP_TYPE_IMM_L; | |
4815 } | |
4816 | |
4817 if (GET_CODE (op) == CONST_DOUBLE) | |
4818 { | |
4819 switch (GET_MODE (op)) | |
4820 { | |
4821 case SFmode: | |
4822 return OP_TYPE_IMM_W; | |
4823 | |
4824 case VOIDmode: | |
4825 case DFmode: | |
4826 return OP_TYPE_IMM_L; | |
4827 | |
4828 default: | |
4829 gcc_unreachable (); | |
4830 } | |
4831 } | |
4832 | |
4833 if (GET_CODE (op) == CONST | |
4834 || symbolic_operand (op, VOIDmode) | |
4835 || LABEL_P (op)) | |
4836 { | |
4837 switch (GET_MODE (op)) | |
4838 { | |
4839 case QImode: | |
4840 return OP_TYPE_IMM_Q; | |
4841 | |
4842 case HImode: | |
4843 return OP_TYPE_IMM_W; | |
4844 | |
4845 case SImode: | |
4846 return OP_TYPE_IMM_L; | |
4847 | |
4848 default: | |
4849 if (GET_CODE (op) == SYMBOL_REF) | |
4850 /* ??? Just a guess. Probably we can guess better using length | |
4851 attribute of the instructions. */ | |
4852 return OP_TYPE_IMM_W; | |
4853 | |
4854 return OP_TYPE_IMM_L; | |
4855 } | |
4856 } | |
4857 | |
4858 gcc_assert (!reload_completed); | |
4859 | |
4860 if (FLOAT_MODE_P (GET_MODE (op))) | |
4861 return OP_TYPE_FPN; | |
4862 | |
4863 return OP_TYPE_RN; | |
4864 } | |
4865 | |
4866 /* Implement opx_type attribute. | |
4867 Return type of INSN's operand X. | |
4868 If ADDRESS_P is true, return type of memory location operand refers to. */ | |
4869 enum attr_opx_type | |
4870 m68k_sched_attr_opx_type (rtx insn, int address_p) | |
4871 { | |
4872 switch (sched_attr_op_type (insn, true, address_p != 0)) | |
4873 { | |
4874 case OP_TYPE_RN: | |
4875 return OPX_TYPE_RN; | |
4876 | |
4877 case OP_TYPE_FPN: | |
4878 return OPX_TYPE_FPN; | |
4879 | |
4880 case OP_TYPE_MEM1: | |
4881 return OPX_TYPE_MEM1; | |
4882 | |
4883 case OP_TYPE_MEM234: | |
4884 return OPX_TYPE_MEM234; | |
4885 | |
4886 case OP_TYPE_MEM5: | |
4887 return OPX_TYPE_MEM5; | |
4888 | |
4889 case OP_TYPE_MEM6: | |
4890 return OPX_TYPE_MEM6; | |
4891 | |
4892 case OP_TYPE_MEM7: | |
4893 return OPX_TYPE_MEM7; | |
4894 | |
4895 case OP_TYPE_IMM_Q: | |
4896 return OPX_TYPE_IMM_Q; | |
4897 | |
4898 case OP_TYPE_IMM_W: | |
4899 return OPX_TYPE_IMM_W; | |
4900 | |
4901 case OP_TYPE_IMM_L: | |
4902 return OPX_TYPE_IMM_L; | |
4903 | |
4904 default: | |
4905 gcc_unreachable (); | |
4906 return 0; | |
4907 } | |
4908 } | |
4909 | |
4910 /* Implement opy_type attribute. | |
4911 Return type of INSN's operand Y. | |
4912 If ADDRESS_P is true, return type of memory location operand refers to. */ | |
4913 enum attr_opy_type | |
4914 m68k_sched_attr_opy_type (rtx insn, int address_p) | |
4915 { | |
4916 switch (sched_attr_op_type (insn, false, address_p != 0)) | |
4917 { | |
4918 case OP_TYPE_RN: | |
4919 return OPY_TYPE_RN; | |
4920 | |
4921 case OP_TYPE_FPN: | |
4922 return OPY_TYPE_FPN; | |
4923 | |
4924 case OP_TYPE_MEM1: | |
4925 return OPY_TYPE_MEM1; | |
4926 | |
4927 case OP_TYPE_MEM234: | |
4928 return OPY_TYPE_MEM234; | |
4929 | |
4930 case OP_TYPE_MEM5: | |
4931 return OPY_TYPE_MEM5; | |
4932 | |
4933 case OP_TYPE_MEM6: | |
4934 return OPY_TYPE_MEM6; | |
4935 | |
4936 case OP_TYPE_MEM7: | |
4937 return OPY_TYPE_MEM7; | |
4938 | |
4939 case OP_TYPE_IMM_Q: | |
4940 return OPY_TYPE_IMM_Q; | |
4941 | |
4942 case OP_TYPE_IMM_W: | |
4943 return OPY_TYPE_IMM_W; | |
4944 | |
4945 case OP_TYPE_IMM_L: | |
4946 return OPY_TYPE_IMM_L; | |
4947 | |
4948 default: | |
4949 gcc_unreachable (); | |
4950 return 0; | |
4951 } | |
4952 } | |
4953 | |
4954 /* Return size of INSN as int. */ | |
4955 static int | |
4956 sched_get_attr_size_int (rtx insn) | |
4957 { | |
4958 int size; | |
4959 | |
4960 switch (get_attr_type (insn)) | |
4961 { | |
4962 case TYPE_IGNORE: | |
4963 /* There should be no references to m68k_sched_attr_size for 'ignore' | |
4964 instructions. */ | |
4965 gcc_unreachable (); | |
4966 return 0; | |
4967 | |
4968 case TYPE_MUL_L: | |
4969 size = 2; | |
4970 break; | |
4971 | |
4972 default: | |
4973 size = 1; | |
4974 break; | |
4975 } | |
4976 | |
4977 switch (get_attr_opx_type (insn)) | |
4978 { | |
4979 case OPX_TYPE_NONE: | |
4980 case OPX_TYPE_RN: | |
4981 case OPX_TYPE_FPN: | |
4982 case OPX_TYPE_MEM1: | |
4983 case OPX_TYPE_MEM234: | |
4984 case OPY_TYPE_IMM_Q: | |
4985 break; | |
4986 | |
4987 case OPX_TYPE_MEM5: | |
4988 case OPX_TYPE_MEM6: | |
4989 /* Here we assume that most absolute references are short. */ | |
4990 case OPX_TYPE_MEM7: | |
4991 case OPY_TYPE_IMM_W: | |
4992 ++size; | |
4993 break; | |
4994 | |
4995 case OPY_TYPE_IMM_L: | |
4996 size += 2; | |
4997 break; | |
4998 | |
4999 default: | |
5000 gcc_unreachable (); | |
5001 } | |
5002 | |
5003 switch (get_attr_opy_type (insn)) | |
5004 { | |
5005 case OPY_TYPE_NONE: | |
5006 case OPY_TYPE_RN: | |
5007 case OPY_TYPE_FPN: | |
5008 case OPY_TYPE_MEM1: | |
5009 case OPY_TYPE_MEM234: | |
5010 case OPY_TYPE_IMM_Q: | |
5011 break; | |
5012 | |
5013 case OPY_TYPE_MEM5: | |
5014 case OPY_TYPE_MEM6: | |
5015 /* Here we assume that most absolute references are short. */ | |
5016 case OPY_TYPE_MEM7: | |
5017 case OPY_TYPE_IMM_W: | |
5018 ++size; | |
5019 break; | |
5020 | |
5021 case OPY_TYPE_IMM_L: | |
5022 size += 2; | |
5023 break; | |
5024 | |
5025 default: | |
5026 gcc_unreachable (); | |
5027 } | |
5028 | |
5029 if (size > 3) | |
5030 { | |
5031 gcc_assert (!reload_completed); | |
5032 | |
5033 size = 3; | |
5034 } | |
5035 | |
5036 return size; | |
5037 } | |
5038 | |
5039 /* Return size of INSN as attribute enum value. */ | |
5040 enum attr_size | |
5041 m68k_sched_attr_size (rtx insn) | |
5042 { | |
5043 switch (sched_get_attr_size_int (insn)) | |
5044 { | |
5045 case 1: | |
5046 return SIZE_1; | |
5047 | |
5048 case 2: | |
5049 return SIZE_2; | |
5050 | |
5051 case 3: | |
5052 return SIZE_3; | |
5053 | |
5054 default: | |
5055 gcc_unreachable (); | |
5056 return 0; | |
5057 } | |
5058 } | |
5059 | |
5060 /* Return operand X or Y (depending on OPX_P) of INSN, | |
5061 if it is a MEM, or NULL overwise. */ | |
5062 static enum attr_op_type | |
5063 sched_get_opxy_mem_type (rtx insn, bool opx_p) | |
5064 { | |
5065 if (opx_p) | |
5066 { | |
5067 switch (get_attr_opx_type (insn)) | |
5068 { | |
5069 case OPX_TYPE_NONE: | |
5070 case OPX_TYPE_RN: | |
5071 case OPX_TYPE_FPN: | |
5072 case OPX_TYPE_IMM_Q: | |
5073 case OPX_TYPE_IMM_W: | |
5074 case OPX_TYPE_IMM_L: | |
5075 return OP_TYPE_RN; | |
5076 | |
5077 case OPX_TYPE_MEM1: | |
5078 case OPX_TYPE_MEM234: | |
5079 case OPX_TYPE_MEM5: | |
5080 case OPX_TYPE_MEM7: | |
5081 return OP_TYPE_MEM1; | |
5082 | |
5083 case OPX_TYPE_MEM6: | |
5084 return OP_TYPE_MEM6; | |
5085 | |
5086 default: | |
5087 gcc_unreachable (); | |
5088 return 0; | |
5089 } | |
5090 } | |
5091 else | |
5092 { | |
5093 switch (get_attr_opy_type (insn)) | |
5094 { | |
5095 case OPY_TYPE_NONE: | |
5096 case OPY_TYPE_RN: | |
5097 case OPY_TYPE_FPN: | |
5098 case OPY_TYPE_IMM_Q: | |
5099 case OPY_TYPE_IMM_W: | |
5100 case OPY_TYPE_IMM_L: | |
5101 return OP_TYPE_RN; | |
5102 | |
5103 case OPY_TYPE_MEM1: | |
5104 case OPY_TYPE_MEM234: | |
5105 case OPY_TYPE_MEM5: | |
5106 case OPY_TYPE_MEM7: | |
5107 return OP_TYPE_MEM1; | |
5108 | |
5109 case OPY_TYPE_MEM6: | |
5110 return OP_TYPE_MEM6; | |
5111 | |
5112 default: | |
5113 gcc_unreachable (); | |
5114 return 0; | |
5115 } | |
5116 } | |
5117 } | |
5118 | |
5119 /* Implement op_mem attribute. */ | |
5120 enum attr_op_mem | |
5121 m68k_sched_attr_op_mem (rtx insn) | |
5122 { | |
5123 enum attr_op_type opx; | |
5124 enum attr_op_type opy; | |
5125 | |
5126 opx = sched_get_opxy_mem_type (insn, true); | |
5127 opy = sched_get_opxy_mem_type (insn, false); | |
5128 | |
5129 if (opy == OP_TYPE_RN && opx == OP_TYPE_RN) | |
5130 return OP_MEM_00; | |
5131 | |
5132 if (opy == OP_TYPE_RN && opx == OP_TYPE_MEM1) | |
5133 { | |
5134 switch (get_attr_opx_access (insn)) | |
5135 { | |
5136 case OPX_ACCESS_R: | |
5137 return OP_MEM_10; | |
5138 | |
5139 case OPX_ACCESS_W: | |
5140 return OP_MEM_01; | |
5141 | |
5142 case OPX_ACCESS_RW: | |
5143 return OP_MEM_11; | |
5144 | |
5145 default: | |
5146 gcc_unreachable (); | |
5147 return 0; | |
5148 } | |
5149 } | |
5150 | |
5151 if (opy == OP_TYPE_RN && opx == OP_TYPE_MEM6) | |
5152 { | |
5153 switch (get_attr_opx_access (insn)) | |
5154 { | |
5155 case OPX_ACCESS_R: | |
5156 return OP_MEM_I0; | |
5157 | |
5158 case OPX_ACCESS_W: | |
5159 return OP_MEM_0I; | |
5160 | |
5161 case OPX_ACCESS_RW: | |
5162 return OP_MEM_I1; | |
5163 | |
5164 default: | |
5165 gcc_unreachable (); | |
5166 return 0; | |
5167 } | |
5168 } | |
5169 | |
5170 if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_RN) | |
5171 return OP_MEM_10; | |
5172 | |
5173 if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_MEM1) | |
5174 { | |
5175 switch (get_attr_opx_access (insn)) | |
5176 { | |
5177 case OPX_ACCESS_W: | |
5178 return OP_MEM_11; | |
5179 | |
5180 default: | |
5181 gcc_assert (!reload_completed); | |
5182 return OP_MEM_11; | |
5183 } | |
5184 } | |
5185 | |
5186 if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_MEM6) | |
5187 { | |
5188 switch (get_attr_opx_access (insn)) | |
5189 { | |
5190 case OPX_ACCESS_W: | |
5191 return OP_MEM_1I; | |
5192 | |
5193 default: | |
5194 gcc_assert (!reload_completed); | |
5195 return OP_MEM_1I; | |
5196 } | |
5197 } | |
5198 | |
5199 if (opy == OP_TYPE_MEM6 && opx == OP_TYPE_RN) | |
5200 return OP_MEM_I0; | |
5201 | |
5202 if (opy == OP_TYPE_MEM6 && opx == OP_TYPE_MEM1) | |
5203 { | |
5204 switch (get_attr_opx_access (insn)) | |
5205 { | |
5206 case OPX_ACCESS_W: | |
5207 return OP_MEM_I1; | |
5208 | |
5209 default: | |
5210 gcc_assert (!reload_completed); | |
5211 return OP_MEM_I1; | |
5212 } | |
5213 } | |
5214 | |
5215 gcc_assert (opy == OP_TYPE_MEM6 && opx == OP_TYPE_MEM6); | |
5216 gcc_assert (!reload_completed); | |
5217 return OP_MEM_I1; | |
5218 } | |
5219 | |
5220 /* Jump instructions types. Indexed by INSN_UID. | |
5221 The same rtl insn can be expanded into different asm instructions | |
5222 depending on the cc0_status. To properly determine type of jump | |
5223 instructions we scan instruction stream and map jumps types to this | |
5224 array. */ | |
5225 static enum attr_type *sched_branch_type; | |
5226 | |
5227 /* Return the type of the jump insn. */ | |
5228 enum attr_type | |
5229 m68k_sched_branch_type (rtx insn) | |
5230 { | |
5231 enum attr_type type; | |
5232 | |
5233 type = sched_branch_type[INSN_UID (insn)]; | |
5234 | |
5235 gcc_assert (type != 0); | |
5236 | |
5237 return type; | |
5238 } | |
5239 | |
5240 /* Data for ColdFire V4 index bypass. | |
5241 Producer modifies register that is used as index in consumer with | |
5242 specified scale. */ | |
5243 static struct | |
5244 { | |
5245 /* Producer instruction. */ | |
5246 rtx pro; | |
5247 | |
5248 /* Consumer instruction. */ | |
5249 rtx con; | |
5250 | |
5251 /* Scale of indexed memory access within consumer. | |
5252 Or zero if bypass should not be effective at the moment. */ | |
5253 int scale; | |
5254 } sched_cfv4_bypass_data; | |
5255 | |
5256 /* An empty state that is used in m68k_sched_adjust_cost. */ | |
5257 static state_t sched_adjust_cost_state; | |
5258 | |
5259 /* Implement adjust_cost scheduler hook. | |
5260 Return adjusted COST of dependency LINK between DEF_INSN and INSN. */ | |
5261 static int | |
5262 m68k_sched_adjust_cost (rtx insn, rtx link ATTRIBUTE_UNUSED, rtx def_insn, | |
5263 int cost) | |
5264 { | |
5265 int delay; | |
5266 | |
5267 if (recog_memoized (def_insn) < 0 | |
5268 || recog_memoized (insn) < 0) | |
5269 return cost; | |
5270 | |
5271 if (sched_cfv4_bypass_data.scale == 1) | |
5272 /* Handle ColdFire V4 bypass for indexed address with 1x scale. */ | |
5273 { | |
5274 /* haifa-sched.c: insn_cost () calls bypass_p () just before | |
5275 targetm.sched.adjust_cost (). Hence, we can be relatively sure | |
5276 that the data in sched_cfv4_bypass_data is up to date. */ | |
5277 gcc_assert (sched_cfv4_bypass_data.pro == def_insn | |
5278 && sched_cfv4_bypass_data.con == insn); | |
5279 | |
5280 if (cost < 3) | |
5281 cost = 3; | |
5282 | |
5283 sched_cfv4_bypass_data.pro = NULL; | |
5284 sched_cfv4_bypass_data.con = NULL; | |
5285 sched_cfv4_bypass_data.scale = 0; | |
5286 } | |
5287 else | |
5288 gcc_assert (sched_cfv4_bypass_data.pro == NULL | |
5289 && sched_cfv4_bypass_data.con == NULL | |
5290 && sched_cfv4_bypass_data.scale == 0); | |
5291 | |
5292 /* Don't try to issue INSN earlier than DFA permits. | |
5293 This is especially useful for instructions that write to memory, | |
5294 as their true dependence (default) latency is better to be set to 0 | |
5295 to workaround alias analysis limitations. | |
5296 This is, in fact, a machine independent tweak, so, probably, | |
5297 it should be moved to haifa-sched.c: insn_cost (). */ | |
5298 delay = min_insn_conflict_delay (sched_adjust_cost_state, def_insn, insn); | |
5299 if (delay > cost) | |
5300 cost = delay; | |
5301 | |
5302 return cost; | |
5303 } | |
5304 | |
5305 /* Return maximal number of insns that can be scheduled on a single cycle. */ | |
5306 static int | |
5307 m68k_sched_issue_rate (void) | |
5308 { | |
5309 switch (m68k_sched_cpu) | |
5310 { | |
5311 case CPU_CFV1: | |
5312 case CPU_CFV2: | |
5313 case CPU_CFV3: | |
5314 return 1; | |
5315 | |
5316 case CPU_CFV4: | |
5317 return 2; | |
5318 | |
5319 default: | |
5320 gcc_unreachable (); | |
5321 return 0; | |
5322 } | |
5323 } | |
5324 | |
5325 /* Maximal length of instruction for current CPU. | |
5326 E.g. it is 3 for any ColdFire core. */ | |
5327 static int max_insn_size; | |
5328 | |
5329 /* Data to model instruction buffer of CPU. */ | |
5330 struct _sched_ib | |
5331 { | |
5332 /* True if instruction buffer model is modeled for current CPU. */ | |
5333 bool enabled_p; | |
5334 | |
5335 /* Size of the instruction buffer in words. */ | |
5336 int size; | |
5337 | |
5338 /* Number of filled words in the instruction buffer. */ | |
5339 int filled; | |
5340 | |
5341 /* Additional information about instruction buffer for CPUs that have | |
5342 a buffer of instruction records, rather then a plain buffer | |
5343 of instruction words. */ | |
5344 struct _sched_ib_records | |
5345 { | |
5346 /* Size of buffer in records. */ | |
5347 int n_insns; | |
5348 | |
5349 /* Array to hold data on adjustements made to the size of the buffer. */ | |
5350 int *adjust; | |
5351 | |
5352 /* Index of the above array. */ | |
5353 int adjust_index; | |
5354 } records; | |
5355 | |
5356 /* An insn that reserves (marks empty) one word in the instruction buffer. */ | |
5357 rtx insn; | |
5358 }; | |
5359 | |
5360 static struct _sched_ib sched_ib; | |
5361 | |
5362 /* ID of memory unit. */ | |
5363 static int sched_mem_unit_code; | |
5364 | |
5365 /* Implementation of the targetm.sched.variable_issue () hook. | |
5366 It is called after INSN was issued. It returns the number of insns | |
5367 that can possibly get scheduled on the current cycle. | |
5368 It is used here to determine the effect of INSN on the instruction | |
5369 buffer. */ | |
5370 static int | |
5371 m68k_sched_variable_issue (FILE *sched_dump ATTRIBUTE_UNUSED, | |
5372 int sched_verbose ATTRIBUTE_UNUSED, | |
5373 rtx insn, int can_issue_more) | |
5374 { | |
5375 int insn_size; | |
5376 | |
5377 if (recog_memoized (insn) >= 0 && get_attr_type (insn) != TYPE_IGNORE) | |
5378 { | |
5379 switch (m68k_sched_cpu) | |
5380 { | |
5381 case CPU_CFV1: | |
5382 case CPU_CFV2: | |
5383 insn_size = sched_get_attr_size_int (insn); | |
5384 break; | |
5385 | |
5386 case CPU_CFV3: | |
5387 insn_size = sched_get_attr_size_int (insn); | |
5388 | |
5389 /* ColdFire V3 and V4 cores have instruction buffers that can | |
5390 accumulate up to 8 instructions regardless of instructions' | |
5391 sizes. So we should take care not to "prefetch" 24 one-word | |
5392 or 12 two-words instructions. | |
5393 To model this behavior we temporarily decrease size of the | |
5394 buffer by (max_insn_size - insn_size) for next 7 instructions. */ | |
5395 { | |
5396 int adjust; | |
5397 | |
5398 adjust = max_insn_size - insn_size; | |
5399 sched_ib.size -= adjust; | |
5400 | |
5401 if (sched_ib.filled > sched_ib.size) | |
5402 sched_ib.filled = sched_ib.size; | |
5403 | |
5404 sched_ib.records.adjust[sched_ib.records.adjust_index] = adjust; | |
5405 } | |
5406 | |
5407 ++sched_ib.records.adjust_index; | |
5408 if (sched_ib.records.adjust_index == sched_ib.records.n_insns) | |
5409 sched_ib.records.adjust_index = 0; | |
5410 | |
5411 /* Undo adjustement we did 7 instructions ago. */ | |
5412 sched_ib.size | |
5413 += sched_ib.records.adjust[sched_ib.records.adjust_index]; | |
5414 | |
5415 break; | |
5416 | |
5417 case CPU_CFV4: | |
5418 gcc_assert (!sched_ib.enabled_p); | |
5419 insn_size = 0; | |
5420 break; | |
5421 | |
5422 default: | |
5423 gcc_unreachable (); | |
5424 } | |
5425 | |
5426 gcc_assert (insn_size <= sched_ib.filled); | |
5427 --can_issue_more; | |
5428 } | |
5429 else if (GET_CODE (PATTERN (insn)) == ASM_INPUT | |
5430 || asm_noperands (PATTERN (insn)) >= 0) | |
5431 insn_size = sched_ib.filled; | |
5432 else | |
5433 insn_size = 0; | |
5434 | |
5435 sched_ib.filled -= insn_size; | |
5436 | |
5437 return can_issue_more; | |
5438 } | |
5439 | |
5440 /* Return how many instructions should scheduler lookahead to choose the | |
5441 best one. */ | |
5442 static int | |
5443 m68k_sched_first_cycle_multipass_dfa_lookahead (void) | |
5444 { | |
5445 return m68k_sched_issue_rate () - 1; | |
5446 } | |
5447 | |
5448 /* Implementation of targetm.sched.md_init_global () hook. | |
5449 It is invoked once per scheduling pass and is used here | |
5450 to initialize scheduler constants. */ | |
5451 static void | |
5452 m68k_sched_md_init_global (FILE *sched_dump ATTRIBUTE_UNUSED, | |
5453 int sched_verbose ATTRIBUTE_UNUSED, | |
5454 int n_insns ATTRIBUTE_UNUSED) | |
5455 { | |
5456 /* Init branch types. */ | |
5457 { | |
5458 rtx insn; | |
5459 | |
5460 sched_branch_type = XCNEWVEC (enum attr_type, get_max_uid () + 1); | |
5461 | |
5462 for (insn = get_insns (); insn != NULL_RTX; insn = NEXT_INSN (insn)) | |
5463 { | |
5464 if (JUMP_P (insn)) | |
5465 /* !!! FIXME: Implement real scan here. */ | |
5466 sched_branch_type[INSN_UID (insn)] = TYPE_BCC; | |
5467 } | |
5468 } | |
5469 | |
5470 #ifdef ENABLE_CHECKING | |
5471 /* Check that all instructions have DFA reservations and | |
5472 that all instructions can be issued from a clean state. */ | |
5473 { | |
5474 rtx insn; | |
5475 state_t state; | |
5476 | |
5477 state = alloca (state_size ()); | |
5478 | |
5479 for (insn = get_insns (); insn != NULL_RTX; insn = NEXT_INSN (insn)) | |
5480 { | |
5481 if (INSN_P (insn) && recog_memoized (insn) >= 0) | |
5482 { | |
5483 gcc_assert (insn_has_dfa_reservation_p (insn)); | |
5484 | |
5485 state_reset (state); | |
5486 if (state_transition (state, insn) >= 0) | |
5487 gcc_unreachable (); | |
5488 } | |
5489 } | |
5490 } | |
5491 #endif | |
5492 | |
5493 /* Setup target cpu. */ | |
5494 | |
5495 /* ColdFire V4 has a set of features to keep its instruction buffer full | |
5496 (e.g., a separate memory bus for instructions) and, hence, we do not model | |
5497 buffer for this CPU. */ | |
5498 sched_ib.enabled_p = (m68k_sched_cpu != CPU_CFV4); | |
5499 | |
5500 switch (m68k_sched_cpu) | |
5501 { | |
5502 case CPU_CFV4: | |
5503 sched_ib.filled = 0; | |
5504 | |
5505 /* FALLTHRU */ | |
5506 | |
5507 case CPU_CFV1: | |
5508 case CPU_CFV2: | |
5509 max_insn_size = 3; | |
5510 sched_ib.records.n_insns = 0; | |
5511 sched_ib.records.adjust = NULL; | |
5512 break; | |
5513 | |
5514 case CPU_CFV3: | |
5515 max_insn_size = 3; | |
5516 sched_ib.records.n_insns = 8; | |
5517 sched_ib.records.adjust = XNEWVEC (int, sched_ib.records.n_insns); | |
5518 break; | |
5519 | |
5520 default: | |
5521 gcc_unreachable (); | |
5522 } | |
5523 | |
5524 sched_mem_unit_code = get_cpu_unit_code ("cf_mem1"); | |
5525 | |
5526 sched_adjust_cost_state = xmalloc (state_size ()); | |
5527 state_reset (sched_adjust_cost_state); | |
5528 | |
5529 start_sequence (); | |
5530 emit_insn (gen_ib ()); | |
5531 sched_ib.insn = get_insns (); | |
5532 end_sequence (); | |
5533 } | |
5534 | |
5535 /* Scheduling pass is now finished. Free/reset static variables. */ | |
5536 static void | |
5537 m68k_sched_md_finish_global (FILE *dump ATTRIBUTE_UNUSED, | |
5538 int verbose ATTRIBUTE_UNUSED) | |
5539 { | |
5540 sched_ib.insn = NULL; | |
5541 | |
5542 free (sched_adjust_cost_state); | |
5543 sched_adjust_cost_state = NULL; | |
5544 | |
5545 sched_mem_unit_code = 0; | |
5546 | |
5547 free (sched_ib.records.adjust); | |
5548 sched_ib.records.adjust = NULL; | |
5549 sched_ib.records.n_insns = 0; | |
5550 max_insn_size = 0; | |
5551 | |
5552 free (sched_branch_type); | |
5553 sched_branch_type = NULL; | |
5554 } | |
5555 | |
5556 /* Implementation of targetm.sched.md_init () hook. | |
5557 It is invoked each time scheduler starts on the new block (basic block or | |
5558 extended basic block). */ | |
5559 static void | |
5560 m68k_sched_md_init (FILE *sched_dump ATTRIBUTE_UNUSED, | |
5561 int sched_verbose ATTRIBUTE_UNUSED, | |
5562 int n_insns ATTRIBUTE_UNUSED) | |
5563 { | |
5564 switch (m68k_sched_cpu) | |
5565 { | |
5566 case CPU_CFV1: | |
5567 case CPU_CFV2: | |
5568 sched_ib.size = 6; | |
5569 break; | |
5570 | |
5571 case CPU_CFV3: | |
5572 sched_ib.size = sched_ib.records.n_insns * max_insn_size; | |
5573 | |
5574 memset (sched_ib.records.adjust, 0, | |
5575 sched_ib.records.n_insns * sizeof (*sched_ib.records.adjust)); | |
5576 sched_ib.records.adjust_index = 0; | |
5577 break; | |
5578 | |
5579 case CPU_CFV4: | |
5580 gcc_assert (!sched_ib.enabled_p); | |
5581 sched_ib.size = 0; | |
5582 break; | |
5583 | |
5584 default: | |
5585 gcc_unreachable (); | |
5586 } | |
5587 | |
5588 if (sched_ib.enabled_p) | |
5589 /* haifa-sched.c: schedule_block () calls advance_cycle () just before | |
5590 the first cycle. Workaround that. */ | |
5591 sched_ib.filled = -2; | |
5592 } | |
5593 | |
5594 /* Implementation of targetm.sched.dfa_pre_advance_cycle () hook. | |
5595 It is invoked just before current cycle finishes and is used here | |
5596 to track if instruction buffer got its two words this cycle. */ | |
5597 static void | |
5598 m68k_sched_dfa_pre_advance_cycle (void) | |
5599 { | |
5600 if (!sched_ib.enabled_p) | |
5601 return; | |
5602 | |
5603 if (!cpu_unit_reservation_p (curr_state, sched_mem_unit_code)) | |
5604 { | |
5605 sched_ib.filled += 2; | |
5606 | |
5607 if (sched_ib.filled > sched_ib.size) | |
5608 sched_ib.filled = sched_ib.size; | |
5609 } | |
5610 } | |
5611 | |
5612 /* Implementation of targetm.sched.dfa_post_advance_cycle () hook. | |
5613 It is invoked just after new cycle begins and is used here | |
5614 to setup number of filled words in the instruction buffer so that | |
5615 instructions which won't have all their words prefetched would be | |
5616 stalled for a cycle. */ | |
5617 static void | |
5618 m68k_sched_dfa_post_advance_cycle (void) | |
5619 { | |
5620 int i; | |
5621 | |
5622 if (!sched_ib.enabled_p) | |
5623 return; | |
5624 | |
5625 /* Setup number of prefetched instruction words in the instruction | |
5626 buffer. */ | |
5627 i = max_insn_size - sched_ib.filled; | |
5628 | |
5629 while (--i >= 0) | |
5630 { | |
5631 if (state_transition (curr_state, sched_ib.insn) >= 0) | |
5632 gcc_unreachable (); | |
5633 } | |
5634 } | |
5635 | |
5636 /* Return X or Y (depending on OPX_P) operand of INSN, | |
5637 if it is an integer register, or NULL overwise. */ | |
5638 static rtx | |
5639 sched_get_reg_operand (rtx insn, bool opx_p) | |
5640 { | |
5641 rtx op = NULL; | |
5642 | |
5643 if (opx_p) | |
5644 { | |
5645 if (get_attr_opx_type (insn) == OPX_TYPE_RN) | |
5646 { | |
5647 op = sched_get_operand (insn, true); | |
5648 gcc_assert (op != NULL); | |
5649 | |
5650 if (!reload_completed && !REG_P (op)) | |
5651 return NULL; | |
5652 } | |
5653 } | |
5654 else | |
5655 { | |
5656 if (get_attr_opy_type (insn) == OPY_TYPE_RN) | |
5657 { | |
5658 op = sched_get_operand (insn, false); | |
5659 gcc_assert (op != NULL); | |
5660 | |
5661 if (!reload_completed && !REG_P (op)) | |
5662 return NULL; | |
5663 } | |
5664 } | |
5665 | |
5666 return op; | |
5667 } | |
5668 | |
5669 /* Return true, if X or Y (depending on OPX_P) operand of INSN | |
5670 is a MEM. */ | |
5671 static bool | |
5672 sched_mem_operand_p (rtx insn, bool opx_p) | |
5673 { | |
5674 switch (sched_get_opxy_mem_type (insn, opx_p)) | |
5675 { | |
5676 case OP_TYPE_MEM1: | |
5677 case OP_TYPE_MEM6: | |
5678 return true; | |
5679 | |
5680 default: | |
5681 return false; | |
5682 } | |
5683 } | |
5684 | |
5685 /* Return X or Y (depending on OPX_P) operand of INSN, | |
5686 if it is a MEM, or NULL overwise. */ | |
5687 static rtx | |
5688 sched_get_mem_operand (rtx insn, bool must_read_p, bool must_write_p) | |
5689 { | |
5690 bool opx_p; | |
5691 bool opy_p; | |
5692 | |
5693 opx_p = false; | |
5694 opy_p = false; | |
5695 | |
5696 if (must_read_p) | |
5697 { | |
5698 opx_p = true; | |
5699 opy_p = true; | |
5700 } | |
5701 | |
5702 if (must_write_p) | |
5703 { | |
5704 opx_p = true; | |
5705 opy_p = false; | |
5706 } | |
5707 | |
5708 if (opy_p && sched_mem_operand_p (insn, false)) | |
5709 return sched_get_operand (insn, false); | |
5710 | |
5711 if (opx_p && sched_mem_operand_p (insn, true)) | |
5712 return sched_get_operand (insn, true); | |
5713 | |
5714 gcc_unreachable (); | |
5715 return NULL; | |
5716 } | |
5717 | |
5718 /* Return non-zero if PRO modifies register used as part of | |
5719 address in CON. */ | |
5720 int | |
5721 m68k_sched_address_bypass_p (rtx pro, rtx con) | |
5722 { | |
5723 rtx pro_x; | |
5724 rtx con_mem_read; | |
5725 | |
5726 pro_x = sched_get_reg_operand (pro, true); | |
5727 if (pro_x == NULL) | |
5728 return 0; | |
5729 | |
5730 con_mem_read = sched_get_mem_operand (con, true, false); | |
5731 gcc_assert (con_mem_read != NULL); | |
5732 | |
5733 if (reg_mentioned_p (pro_x, con_mem_read)) | |
5734 return 1; | |
5735 | |
5736 return 0; | |
5737 } | |
5738 | |
5739 /* Helper function for m68k_sched_indexed_address_bypass_p. | |
5740 if PRO modifies register used as index in CON, | |
5741 return scale of indexed memory access in CON. Return zero overwise. */ | |
5742 static int | |
5743 sched_get_indexed_address_scale (rtx pro, rtx con) | |
5744 { | |
5745 rtx reg; | |
5746 rtx mem; | |
5747 struct m68k_address address; | |
5748 | |
5749 reg = sched_get_reg_operand (pro, true); | |
5750 if (reg == NULL) | |
5751 return 0; | |
5752 | |
5753 mem = sched_get_mem_operand (con, true, false); | |
5754 gcc_assert (mem != NULL && MEM_P (mem)); | |
5755 | |
5756 if (!m68k_decompose_address (GET_MODE (mem), XEXP (mem, 0), reload_completed, | |
5757 &address)) | |
5758 gcc_unreachable (); | |
5759 | |
5760 if (REGNO (reg) == REGNO (address.index)) | |
5761 { | |
5762 gcc_assert (address.scale != 0); | |
5763 return address.scale; | |
5764 } | |
5765 | |
5766 return 0; | |
5767 } | |
5768 | |
5769 /* Return non-zero if PRO modifies register used | |
5770 as index with scale 2 or 4 in CON. */ | |
5771 int | |
5772 m68k_sched_indexed_address_bypass_p (rtx pro, rtx con) | |
5773 { | |
5774 gcc_assert (sched_cfv4_bypass_data.pro == NULL | |
5775 && sched_cfv4_bypass_data.con == NULL | |
5776 && sched_cfv4_bypass_data.scale == 0); | |
5777 | |
5778 switch (sched_get_indexed_address_scale (pro, con)) | |
5779 { | |
5780 case 1: | |
5781 /* We can't have a variable latency bypass, so | |
5782 remember to adjust the insn cost in adjust_cost hook. */ | |
5783 sched_cfv4_bypass_data.pro = pro; | |
5784 sched_cfv4_bypass_data.con = con; | |
5785 sched_cfv4_bypass_data.scale = 1; | |
5786 return 0; | |
5787 | |
5788 case 2: | |
5789 case 4: | |
5790 return 1; | |
5791 | |
5792 default: | |
5793 return 0; | |
5794 } | |
5795 } |