comparison gcc/optabs.c @ 111:04ced10e8804

gcc 7
author kono
date Fri, 27 Oct 2017 22:46:09 +0900
parents f6334be47118
children 84e7813d76e9
comparison
equal deleted inserted replaced
68:561a7518be6b 111:04ced10e8804
1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler. 1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 2 Copyright (C) 1987-2017 Free Software Foundation, Inc.
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
5 3
6 This file is part of GCC. 4 This file is part of GCC.
7 5
8 GCC is free software; you can redistribute it and/or modify it under 6 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free 7 the terms of the GNU General Public License as published by the Free
21 19
22 20
23 #include "config.h" 21 #include "config.h"
24 #include "system.h" 22 #include "system.h"
25 #include "coretypes.h" 23 #include "coretypes.h"
26 #include "tm.h" 24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "memmodel.h"
29 #include "predict.h"
30 #include "tm_p.h"
31 #include "expmed.h"
32 #include "optabs.h"
33 #include "emit-rtl.h"
34 #include "recog.h"
27 #include "diagnostic-core.h" 35 #include "diagnostic-core.h"
28 36
29 /* Include insn-config.h before expr.h so that HAVE_conditional_move 37 /* Include insn-config.h before expr.h so that HAVE_conditional_move
30 is properly defined. */ 38 is properly defined. */
31 #include "insn-config.h" 39 #include "stor-layout.h"
32 #include "rtl.h"
33 #include "tree.h"
34 #include "tm_p.h"
35 #include "flags.h"
36 #include "function.h"
37 #include "except.h" 40 #include "except.h"
41 #include "dojump.h"
42 #include "explow.h"
38 #include "expr.h" 43 #include "expr.h"
39 #include "optabs.h" 44 #include "optabs-tree.h"
40 #include "libfuncs.h" 45 #include "libfuncs.h"
41 #include "recog.h"
42 #include "reload.h"
43 #include "ggc.h"
44 #include "basic-block.h"
45 #include "target.h"
46
47 struct target_optabs default_target_optabs;
48 struct target_libfuncs default_target_libfuncs;
49 #if SWITCHABLE_TARGET
50 struct target_optabs *this_target_optabs = &default_target_optabs;
51 struct target_libfuncs *this_target_libfuncs = &default_target_libfuncs;
52 #endif
53
54 #define libfunc_hash \
55 (this_target_libfuncs->x_libfunc_hash)
56
57 /* Contains the optab used for each rtx code. */
58 optab code_to_optab[NUM_RTX_CODE + 1];
59 46
60 static void prepare_float_lib_cmp (rtx, rtx, enum rtx_code, rtx *, 47 static void prepare_float_lib_cmp (rtx, rtx, enum rtx_code, rtx *,
61 enum machine_mode *); 48 machine_mode *);
62 static rtx expand_unop_direct (enum machine_mode, optab, rtx, rtx, int); 49 static rtx expand_unop_direct (machine_mode, optab, rtx, rtx, int);
50 static void emit_libcall_block_1 (rtx_insn *, rtx, rtx, rtx, bool);
63 51
64 /* Debug facility for use in GDB. */ 52 /* Debug facility for use in GDB. */
65 void debug_optab_libfuncs (void); 53 void debug_optab_libfuncs (void);
66
67 /* Prefixes for the current version of decimal floating point (BID vs. DPD) */
68 #if ENABLE_DECIMAL_BID_FORMAT
69 #define DECIMAL_PREFIX "bid_"
70 #else
71 #define DECIMAL_PREFIX "dpd_"
72 #endif
73
74 /* Used for libfunc_hash. */
75
76 static hashval_t
77 hash_libfunc (const void *p)
78 {
79 const struct libfunc_entry *const e = (const struct libfunc_entry *) p;
80
81 return (((int) e->mode1 + (int) e->mode2 * NUM_MACHINE_MODES)
82 ^ e->optab);
83 }
84
85 /* Used for libfunc_hash. */
86
87 static int
88 eq_libfunc (const void *p, const void *q)
89 {
90 const struct libfunc_entry *const e1 = (const struct libfunc_entry *) p;
91 const struct libfunc_entry *const e2 = (const struct libfunc_entry *) q;
92
93 return (e1->optab == e2->optab
94 && e1->mode1 == e2->mode1
95 && e1->mode2 == e2->mode2);
96 }
97
98 /* Return libfunc corresponding operation defined by OPTAB converting
99 from MODE2 to MODE1. Trigger lazy initialization if needed, return NULL
100 if no libfunc is available. */
101 rtx
102 convert_optab_libfunc (convert_optab optab, enum machine_mode mode1,
103 enum machine_mode mode2)
104 {
105 struct libfunc_entry e;
106 struct libfunc_entry **slot;
107
108 e.optab = (size_t) (optab - &convert_optab_table[0]);
109 e.mode1 = mode1;
110 e.mode2 = mode2;
111 slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash, &e, NO_INSERT);
112 if (!slot)
113 {
114 if (optab->libcall_gen)
115 {
116 optab->libcall_gen (optab, optab->libcall_basename, mode1, mode2);
117 slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash, &e, NO_INSERT);
118 if (slot)
119 return (*slot)->libfunc;
120 else
121 return NULL;
122 }
123 return NULL;
124 }
125 return (*slot)->libfunc;
126 }
127
128 /* Return libfunc corresponding operation defined by OPTAB in MODE.
129 Trigger lazy initialization if needed, return NULL if no libfunc is
130 available. */
131 rtx
132 optab_libfunc (optab optab, enum machine_mode mode)
133 {
134 struct libfunc_entry e;
135 struct libfunc_entry **slot;
136
137 e.optab = (size_t) (optab - &optab_table[0]);
138 e.mode1 = mode;
139 e.mode2 = VOIDmode;
140 slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash, &e, NO_INSERT);
141 if (!slot)
142 {
143 if (optab->libcall_gen)
144 {
145 optab->libcall_gen (optab, optab->libcall_basename,
146 optab->libcall_suffix, mode);
147 slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash,
148 &e, NO_INSERT);
149 if (slot)
150 return (*slot)->libfunc;
151 else
152 return NULL;
153 }
154 return NULL;
155 }
156 return (*slot)->libfunc;
157 }
158
159 54
160 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to 55 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
161 the result of operation CODE applied to OP0 (and OP1 if it is a binary 56 the result of operation CODE applied to OP0 (and OP1 if it is a binary
162 operation). 57 operation).
163 58
164 If the last insn does not set TARGET, don't do anything, but return 1. 59 If the last insn does not set TARGET, don't do anything, but return 1.
165 60
166 If a previous insn sets TARGET and TARGET is one of OP0 or OP1, 61 If the last insn or a previous insn sets TARGET and TARGET is one of OP0
167 don't add the REG_EQUAL note but return 0. Our caller can then try 62 or OP1, don't add the REG_EQUAL note but return 0. Our caller can then
168 again, ensuring that TARGET is not one of the operands. */ 63 try again, ensuring that TARGET is not one of the operands. */
169 64
170 static int 65 static int
171 add_equal_note (rtx insns, rtx target, enum rtx_code code, rtx op0, rtx op1) 66 add_equal_note (rtx_insn *insns, rtx target, enum rtx_code code, rtx op0, rtx op1)
172 { 67 {
173 rtx last_insn, insn, set; 68 rtx_insn *last_insn;
69 rtx set;
174 rtx note; 70 rtx note;
175 71
176 gcc_assert (insns && INSN_P (insns) && NEXT_INSN (insns)); 72 gcc_assert (insns && INSN_P (insns) && NEXT_INSN (insns));
177 73
178 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH 74 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH
188 for (last_insn = insns; 84 for (last_insn = insns;
189 NEXT_INSN (last_insn) != NULL_RTX; 85 NEXT_INSN (last_insn) != NULL_RTX;
190 last_insn = NEXT_INSN (last_insn)) 86 last_insn = NEXT_INSN (last_insn))
191 ; 87 ;
192 88
193 set = single_set (last_insn); 89 /* If TARGET is in OP0 or OP1, punt. We'd end up with a note referencing
90 a value changing in the insn, so the note would be invalid for CSE. */
91 if (reg_overlap_mentioned_p (target, op0)
92 || (op1 && reg_overlap_mentioned_p (target, op1)))
93 {
94 if (MEM_P (target)
95 && (rtx_equal_p (target, op0)
96 || (op1 && rtx_equal_p (target, op1))))
97 {
98 /* For MEM target, with MEM = MEM op X, prefer no REG_EQUAL note
99 over expanding it as temp = MEM op X, MEM = temp. If the target
100 supports MEM = MEM op X instructions, it is sometimes too hard
101 to reconstruct that form later, especially if X is also a memory,
102 and due to multiple occurrences of addresses the address might
103 be forced into register unnecessarily.
104 Note that not emitting the REG_EQUIV note might inhibit
105 CSE in some cases. */
106 set = single_set (last_insn);
107 if (set
108 && GET_CODE (SET_SRC (set)) == code
109 && MEM_P (SET_DEST (set))
110 && (rtx_equal_p (SET_DEST (set), XEXP (SET_SRC (set), 0))
111 || (op1 && rtx_equal_p (SET_DEST (set),
112 XEXP (SET_SRC (set), 1)))))
113 return 1;
114 }
115 return 0;
116 }
117
118 set = set_for_reg_notes (last_insn);
194 if (set == NULL_RTX) 119 if (set == NULL_RTX)
195 return 1; 120 return 1;
196 121
197 if (! rtx_equal_p (SET_DEST (set), target) 122 if (! rtx_equal_p (SET_DEST (set), target)
198 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */ 123 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
199 && (GET_CODE (SET_DEST (set)) != STRICT_LOW_PART 124 && (GET_CODE (SET_DEST (set)) != STRICT_LOW_PART
200 || ! rtx_equal_p (XEXP (SET_DEST (set), 0), target))) 125 || ! rtx_equal_p (XEXP (SET_DEST (set), 0), target)))
201 return 1; 126 return 1;
202 127
203 /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
204 besides the last insn. */
205 if (reg_overlap_mentioned_p (target, op0)
206 || (op1 && reg_overlap_mentioned_p (target, op1)))
207 {
208 insn = PREV_INSN (last_insn);
209 while (insn != NULL_RTX)
210 {
211 if (reg_set_p (target, insn))
212 return 0;
213
214 insn = PREV_INSN (insn);
215 }
216 }
217
218 if (GET_RTX_CLASS (code) == RTX_UNARY) 128 if (GET_RTX_CLASS (code) == RTX_UNARY)
219 note = gen_rtx_fmt_e (code, GET_MODE (target), copy_rtx (op0)); 129 switch (code)
130 {
131 case FFS:
132 case CLZ:
133 case CTZ:
134 case CLRSB:
135 case POPCOUNT:
136 case PARITY:
137 case BSWAP:
138 if (GET_MODE (op0) != VOIDmode && GET_MODE (target) != GET_MODE (op0))
139 {
140 note = gen_rtx_fmt_e (code, GET_MODE (op0), copy_rtx (op0));
141 if (GET_MODE_UNIT_SIZE (GET_MODE (op0))
142 > GET_MODE_UNIT_SIZE (GET_MODE (target)))
143 note = simplify_gen_unary (TRUNCATE, GET_MODE (target),
144 note, GET_MODE (op0));
145 else
146 note = simplify_gen_unary (ZERO_EXTEND, GET_MODE (target),
147 note, GET_MODE (op0));
148 break;
149 }
150 /* FALLTHRU */
151 default:
152 note = gen_rtx_fmt_e (code, GET_MODE (target), copy_rtx (op0));
153 break;
154 }
220 else 155 else
221 note = gen_rtx_fmt_ee (code, GET_MODE (target), copy_rtx (op0), copy_rtx (op1)); 156 note = gen_rtx_fmt_ee (code, GET_MODE (target), copy_rtx (op0), copy_rtx (op1));
222 157
223 set_unique_reg_note (last_insn, REG_EQUAL, note); 158 set_unique_reg_note (last_insn, REG_EQUAL, note);
224 159
225 return 1; 160 return 1;
161 }
162
163 /* Given two input operands, OP0 and OP1, determine what the correct from_mode
164 for a widening operation would be. In most cases this would be OP0, but if
165 that's a constant it'll be VOIDmode, which isn't useful. */
166
167 static machine_mode
168 widened_mode (machine_mode to_mode, rtx op0, rtx op1)
169 {
170 machine_mode m0 = GET_MODE (op0);
171 machine_mode m1 = GET_MODE (op1);
172 machine_mode result;
173
174 if (m0 == VOIDmode && m1 == VOIDmode)
175 return to_mode;
176 else if (m0 == VOIDmode || GET_MODE_UNIT_SIZE (m0) < GET_MODE_UNIT_SIZE (m1))
177 result = m1;
178 else
179 result = m0;
180
181 if (GET_MODE_UNIT_SIZE (result) > GET_MODE_UNIT_SIZE (to_mode))
182 return to_mode;
183
184 return result;
226 } 185 }
227 186
228 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP 187 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
229 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need 188 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
230 not actually do a sign-extend or zero-extend, but can leave the 189 not actually do a sign-extend or zero-extend, but can leave the
231 higher-order bits of the result rtx undefined, for example, in the case 190 higher-order bits of the result rtx undefined, for example, in the case
232 of logical operations, but not right shifts. */ 191 of logical operations, but not right shifts. */
233 192
234 static rtx 193 static rtx
235 widen_operand (rtx op, enum machine_mode mode, enum machine_mode oldmode, 194 widen_operand (rtx op, machine_mode mode, machine_mode oldmode,
236 int unsignedp, int no_extend) 195 int unsignedp, int no_extend)
237 { 196 {
238 rtx result; 197 rtx result;
198 scalar_int_mode int_mode;
239 199
240 /* If we don't have to extend and this is a constant, return it. */ 200 /* If we don't have to extend and this is a constant, return it. */
241 if (no_extend && GET_MODE (op) == VOIDmode) 201 if (no_extend && GET_MODE (op) == VOIDmode)
242 return op; 202 return op;
243 203
244 /* If we must extend do so. If OP is a SUBREG for a promoted object, also 204 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
245 extend since it will be more efficient to do so unless the signedness of 205 extend since it will be more efficient to do so unless the signedness of
246 a promoted object differs from our extension. */ 206 a promoted object differs from our extension. */
247 if (! no_extend 207 if (! no_extend
208 || !is_a <scalar_int_mode> (mode, &int_mode)
248 || (GET_CODE (op) == SUBREG && SUBREG_PROMOTED_VAR_P (op) 209 || (GET_CODE (op) == SUBREG && SUBREG_PROMOTED_VAR_P (op)
249 && SUBREG_PROMOTED_UNSIGNED_P (op) == unsignedp)) 210 && SUBREG_CHECK_PROMOTED_SIGN (op, unsignedp)))
250 return convert_modes (mode, oldmode, op, unsignedp); 211 return convert_modes (mode, oldmode, op, unsignedp);
251 212
252 /* If MODE is no wider than a single word, we return a paradoxical 213 /* If MODE is no wider than a single word, we return a lowpart or paradoxical
253 SUBREG. */ 214 SUBREG. */
254 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD) 215 if (GET_MODE_SIZE (int_mode) <= UNITS_PER_WORD)
255 return gen_rtx_SUBREG (mode, force_reg (GET_MODE (op), op), 0); 216 return gen_lowpart (int_mode, force_reg (GET_MODE (op), op));
256 217
257 /* Otherwise, get an object of MODE, clobber it, and set the low-order 218 /* Otherwise, get an object of MODE, clobber it, and set the low-order
258 part to OP. */ 219 part to OP. */
259 220
260 result = gen_reg_rtx (mode); 221 result = gen_reg_rtx (int_mode);
261 emit_clobber (result); 222 emit_clobber (result);
262 emit_move_insn (gen_lowpart (GET_MODE (op), result), op); 223 emit_move_insn (gen_lowpart (GET_MODE (op), result), op);
263 return result; 224 return result;
264 } 225 }
265 226
266 /* Return the optab used for computing the operation given by the tree code,
267 CODE and the tree EXP. This function is not always usable (for example, it
268 cannot give complete results for multiplication or division) but probably
269 ought to be relied on more widely throughout the expander. */
270 optab
271 optab_for_tree_code (enum tree_code code, const_tree type,
272 enum optab_subtype subtype)
273 {
274 bool trapv;
275 switch (code)
276 {
277 case BIT_AND_EXPR:
278 return and_optab;
279
280 case BIT_IOR_EXPR:
281 return ior_optab;
282
283 case BIT_NOT_EXPR:
284 return one_cmpl_optab;
285
286 case BIT_XOR_EXPR:
287 return xor_optab;
288
289 case TRUNC_MOD_EXPR:
290 case CEIL_MOD_EXPR:
291 case FLOOR_MOD_EXPR:
292 case ROUND_MOD_EXPR:
293 return TYPE_UNSIGNED (type) ? umod_optab : smod_optab;
294
295 case RDIV_EXPR:
296 case TRUNC_DIV_EXPR:
297 case CEIL_DIV_EXPR:
298 case FLOOR_DIV_EXPR:
299 case ROUND_DIV_EXPR:
300 case EXACT_DIV_EXPR:
301 if (TYPE_SATURATING(type))
302 return TYPE_UNSIGNED(type) ? usdiv_optab : ssdiv_optab;
303 return TYPE_UNSIGNED (type) ? udiv_optab : sdiv_optab;
304
305 case LSHIFT_EXPR:
306 if (TREE_CODE (type) == VECTOR_TYPE)
307 {
308 if (subtype == optab_vector)
309 return TYPE_SATURATING (type) ? NULL : vashl_optab;
310
311 gcc_assert (subtype == optab_scalar);
312 }
313 if (TYPE_SATURATING(type))
314 return TYPE_UNSIGNED(type) ? usashl_optab : ssashl_optab;
315 return ashl_optab;
316
317 case RSHIFT_EXPR:
318 if (TREE_CODE (type) == VECTOR_TYPE)
319 {
320 if (subtype == optab_vector)
321 return TYPE_UNSIGNED (type) ? vlshr_optab : vashr_optab;
322
323 gcc_assert (subtype == optab_scalar);
324 }
325 return TYPE_UNSIGNED (type) ? lshr_optab : ashr_optab;
326
327 case LROTATE_EXPR:
328 if (TREE_CODE (type) == VECTOR_TYPE)
329 {
330 if (subtype == optab_vector)
331 return vrotl_optab;
332
333 gcc_assert (subtype == optab_scalar);
334 }
335 return rotl_optab;
336
337 case RROTATE_EXPR:
338 if (TREE_CODE (type) == VECTOR_TYPE)
339 {
340 if (subtype == optab_vector)
341 return vrotr_optab;
342
343 gcc_assert (subtype == optab_scalar);
344 }
345 return rotr_optab;
346
347 case MAX_EXPR:
348 return TYPE_UNSIGNED (type) ? umax_optab : smax_optab;
349
350 case MIN_EXPR:
351 return TYPE_UNSIGNED (type) ? umin_optab : smin_optab;
352
353 case REALIGN_LOAD_EXPR:
354 return vec_realign_load_optab;
355
356 case WIDEN_SUM_EXPR:
357 return TYPE_UNSIGNED (type) ? usum_widen_optab : ssum_widen_optab;
358
359 case DOT_PROD_EXPR:
360 return TYPE_UNSIGNED (type) ? udot_prod_optab : sdot_prod_optab;
361
362 case WIDEN_MULT_PLUS_EXPR:
363 return (TYPE_UNSIGNED (type)
364 ? (TYPE_SATURATING (type)
365 ? usmadd_widen_optab : umadd_widen_optab)
366 : (TYPE_SATURATING (type)
367 ? ssmadd_widen_optab : smadd_widen_optab));
368
369 case WIDEN_MULT_MINUS_EXPR:
370 return (TYPE_UNSIGNED (type)
371 ? (TYPE_SATURATING (type)
372 ? usmsub_widen_optab : umsub_widen_optab)
373 : (TYPE_SATURATING (type)
374 ? ssmsub_widen_optab : smsub_widen_optab));
375
376 case FMA_EXPR:
377 return fma_optab;
378
379 case REDUC_MAX_EXPR:
380 return TYPE_UNSIGNED (type) ? reduc_umax_optab : reduc_smax_optab;
381
382 case REDUC_MIN_EXPR:
383 return TYPE_UNSIGNED (type) ? reduc_umin_optab : reduc_smin_optab;
384
385 case REDUC_PLUS_EXPR:
386 return TYPE_UNSIGNED (type) ? reduc_uplus_optab : reduc_splus_optab;
387
388 case VEC_LSHIFT_EXPR:
389 return vec_shl_optab;
390
391 case VEC_RSHIFT_EXPR:
392 return vec_shr_optab;
393
394 case VEC_WIDEN_MULT_HI_EXPR:
395 return TYPE_UNSIGNED (type) ?
396 vec_widen_umult_hi_optab : vec_widen_smult_hi_optab;
397
398 case VEC_WIDEN_MULT_LO_EXPR:
399 return TYPE_UNSIGNED (type) ?
400 vec_widen_umult_lo_optab : vec_widen_smult_lo_optab;
401
402 case VEC_UNPACK_HI_EXPR:
403 return TYPE_UNSIGNED (type) ?
404 vec_unpacku_hi_optab : vec_unpacks_hi_optab;
405
406 case VEC_UNPACK_LO_EXPR:
407 return TYPE_UNSIGNED (type) ?
408 vec_unpacku_lo_optab : vec_unpacks_lo_optab;
409
410 case VEC_UNPACK_FLOAT_HI_EXPR:
411 /* The signedness is determined from input operand. */
412 return TYPE_UNSIGNED (type) ?
413 vec_unpacku_float_hi_optab : vec_unpacks_float_hi_optab;
414
415 case VEC_UNPACK_FLOAT_LO_EXPR:
416 /* The signedness is determined from input operand. */
417 return TYPE_UNSIGNED (type) ?
418 vec_unpacku_float_lo_optab : vec_unpacks_float_lo_optab;
419
420 case VEC_PACK_TRUNC_EXPR:
421 return vec_pack_trunc_optab;
422
423 case VEC_PACK_SAT_EXPR:
424 return TYPE_UNSIGNED (type) ? vec_pack_usat_optab : vec_pack_ssat_optab;
425
426 case VEC_PACK_FIX_TRUNC_EXPR:
427 /* The signedness is determined from output operand. */
428 return TYPE_UNSIGNED (type) ?
429 vec_pack_ufix_trunc_optab : vec_pack_sfix_trunc_optab;
430
431 default:
432 break;
433 }
434
435 trapv = INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_TRAPS (type);
436 switch (code)
437 {
438 case POINTER_PLUS_EXPR:
439 case PLUS_EXPR:
440 if (TYPE_SATURATING(type))
441 return TYPE_UNSIGNED(type) ? usadd_optab : ssadd_optab;
442 return trapv ? addv_optab : add_optab;
443
444 case MINUS_EXPR:
445 if (TYPE_SATURATING(type))
446 return TYPE_UNSIGNED(type) ? ussub_optab : sssub_optab;
447 return trapv ? subv_optab : sub_optab;
448
449 case MULT_EXPR:
450 if (TYPE_SATURATING(type))
451 return TYPE_UNSIGNED(type) ? usmul_optab : ssmul_optab;
452 return trapv ? smulv_optab : smul_optab;
453
454 case NEGATE_EXPR:
455 if (TYPE_SATURATING(type))
456 return TYPE_UNSIGNED(type) ? usneg_optab : ssneg_optab;
457 return trapv ? negv_optab : neg_optab;
458
459 case ABS_EXPR:
460 return trapv ? absv_optab : abs_optab;
461
462 case VEC_EXTRACT_EVEN_EXPR:
463 return vec_extract_even_optab;
464
465 case VEC_EXTRACT_ODD_EXPR:
466 return vec_extract_odd_optab;
467
468 case VEC_INTERLEAVE_HIGH_EXPR:
469 return vec_interleave_high_optab;
470
471 case VEC_INTERLEAVE_LOW_EXPR:
472 return vec_interleave_low_optab;
473
474 default:
475 return NULL;
476 }
477 }
478
479
480 /* Expand vector widening operations. 227 /* Expand vector widening operations.
481 228
482 There are two different classes of operations handled here: 229 There are two different classes of operations handled here:
483 1) Operations whose result is wider than all the arguments to the operation. 230 1) Operations whose result is wider than all the arguments to the operation.
484 Examples: VEC_UNPACK_HI/LO_EXPR, VEC_WIDEN_MULT_HI/LO_EXPR 231 Examples: VEC_UNPACK_HI/LO_EXPR, VEC_WIDEN_MULT_HI/LO_EXPR
499 246
500 rtx 247 rtx
501 expand_widen_pattern_expr (sepops ops, rtx op0, rtx op1, rtx wide_op, 248 expand_widen_pattern_expr (sepops ops, rtx op0, rtx op1, rtx wide_op,
502 rtx target, int unsignedp) 249 rtx target, int unsignedp)
503 { 250 {
251 struct expand_operand eops[4];
504 tree oprnd0, oprnd1, oprnd2; 252 tree oprnd0, oprnd1, oprnd2;
505 enum machine_mode wmode = VOIDmode, tmode0, tmode1 = VOIDmode; 253 machine_mode wmode = VOIDmode, tmode0, tmode1 = VOIDmode;
506 optab widen_pattern_optab; 254 optab widen_pattern_optab;
507 int icode; 255 enum insn_code icode;
508 enum machine_mode xmode0, xmode1 = VOIDmode, wxmode = VOIDmode;
509 rtx temp;
510 rtx pat;
511 rtx xop0, xop1, wxop;
512 int nops = TREE_CODE_LENGTH (ops->code); 256 int nops = TREE_CODE_LENGTH (ops->code);
257 int op;
513 258
514 oprnd0 = ops->op0; 259 oprnd0 = ops->op0;
515 tmode0 = TYPE_MODE (TREE_TYPE (oprnd0)); 260 tmode0 = TYPE_MODE (TREE_TYPE (oprnd0));
516 widen_pattern_optab = 261 widen_pattern_optab =
517 optab_for_tree_code (ops->code, TREE_TYPE (oprnd0), optab_default); 262 optab_for_tree_code (ops->code, TREE_TYPE (oprnd0), optab_default);
518 if (ops->code == WIDEN_MULT_PLUS_EXPR 263 if (ops->code == WIDEN_MULT_PLUS_EXPR
519 || ops->code == WIDEN_MULT_MINUS_EXPR) 264 || ops->code == WIDEN_MULT_MINUS_EXPR)
520 icode = (int) optab_handler (widen_pattern_optab, 265 icode = find_widening_optab_handler (widen_pattern_optab,
521 TYPE_MODE (TREE_TYPE (ops->op2))); 266 TYPE_MODE (TREE_TYPE (ops->op2)),
267 tmode0, 0);
522 else 268 else
523 icode = (int) optab_handler (widen_pattern_optab, tmode0); 269 icode = optab_handler (widen_pattern_optab, tmode0);
524 gcc_assert (icode != CODE_FOR_nothing); 270 gcc_assert (icode != CODE_FOR_nothing);
525 xmode0 = insn_data[icode].operand[1].mode;
526 271
527 if (nops >= 2) 272 if (nops >= 2)
528 { 273 {
529 oprnd1 = ops->op1; 274 oprnd1 = ops->op1;
530 tmode1 = TYPE_MODE (TREE_TYPE (oprnd1)); 275 tmode1 = TYPE_MODE (TREE_TYPE (oprnd1));
531 xmode1 = insn_data[icode].operand[2].mode;
532 } 276 }
533 277
534 /* The last operand is of a wider mode than the rest of the operands. */ 278 /* The last operand is of a wider mode than the rest of the operands. */
535 if (nops == 2) 279 if (nops == 2)
536 { 280 wmode = tmode1;
537 wmode = tmode1;
538 wxmode = xmode1;
539 }
540 else if (nops == 3) 281 else if (nops == 3)
541 { 282 {
542 gcc_assert (tmode1 == tmode0); 283 gcc_assert (tmode1 == tmode0);
543 gcc_assert (op1); 284 gcc_assert (op1);
544 oprnd2 = ops->op2; 285 oprnd2 = ops->op2;
545 wmode = TYPE_MODE (TREE_TYPE (oprnd2)); 286 wmode = TYPE_MODE (TREE_TYPE (oprnd2));
546 wxmode = insn_data[icode].operand[3].mode; 287 }
547 } 288
548 289 op = 0;
549 if (!wide_op) 290 create_output_operand (&eops[op++], target, TYPE_MODE (ops->type));
550 wmode = wxmode = insn_data[icode].operand[0].mode; 291 create_convert_operand_from (&eops[op++], op0, tmode0, unsignedp);
551
552 if (!target
553 || ! (*insn_data[icode].operand[0].predicate) (target, wmode))
554 temp = gen_reg_rtx (wmode);
555 else
556 temp = target;
557
558 xop0 = op0;
559 xop1 = op1;
560 wxop = wide_op;
561
562 /* In case the insn wants input operands in modes different from
563 those of the actual operands, convert the operands. It would
564 seem that we don't need to convert CONST_INTs, but we do, so
565 that they're properly zero-extended, sign-extended or truncated
566 for their mode. */
567
568 if (GET_MODE (op0) != xmode0 && xmode0 != VOIDmode)
569 xop0 = convert_modes (xmode0,
570 GET_MODE (op0) != VOIDmode
571 ? GET_MODE (op0)
572 : tmode0,
573 xop0, unsignedp);
574
575 if (op1) 292 if (op1)
576 if (GET_MODE (op1) != xmode1 && xmode1 != VOIDmode) 293 create_convert_operand_from (&eops[op++], op1, tmode1, unsignedp);
577 xop1 = convert_modes (xmode1,
578 GET_MODE (op1) != VOIDmode
579 ? GET_MODE (op1)
580 : tmode1,
581 xop1, unsignedp);
582
583 if (wide_op) 294 if (wide_op)
584 if (GET_MODE (wide_op) != wxmode && wxmode != VOIDmode) 295 create_convert_operand_from (&eops[op++], wide_op, wmode, unsignedp);
585 wxop = convert_modes (wxmode, 296 expand_insn (icode, op, eops);
586 GET_MODE (wide_op) != VOIDmode 297 return eops[0].value;
587 ? GET_MODE (wide_op)
588 : wmode,
589 wxop, unsignedp);
590
591 /* Now, if insn's predicates don't allow our operands, put them into
592 pseudo regs. */
593
594 if (! (*insn_data[icode].operand[1].predicate) (xop0, xmode0)
595 && xmode0 != VOIDmode)
596 xop0 = copy_to_mode_reg (xmode0, xop0);
597
598 if (op1)
599 {
600 if (! (*insn_data[icode].operand[2].predicate) (xop1, xmode1)
601 && xmode1 != VOIDmode)
602 xop1 = copy_to_mode_reg (xmode1, xop1);
603
604 if (wide_op)
605 {
606 if (! (*insn_data[icode].operand[3].predicate) (wxop, wxmode)
607 && wxmode != VOIDmode)
608 wxop = copy_to_mode_reg (wxmode, wxop);
609
610 pat = GEN_FCN (icode) (temp, xop0, xop1, wxop);
611 }
612 else
613 pat = GEN_FCN (icode) (temp, xop0, xop1);
614 }
615 else
616 {
617 if (wide_op)
618 {
619 if (! (*insn_data[icode].operand[2].predicate) (wxop, wxmode)
620 && wxmode != VOIDmode)
621 wxop = copy_to_mode_reg (wxmode, wxop);
622
623 pat = GEN_FCN (icode) (temp, xop0, wxop);
624 }
625 else
626 pat = GEN_FCN (icode) (temp, xop0);
627 }
628
629 emit_insn (pat);
630 return temp;
631 } 298 }
632 299
633 /* Generate code to perform an operation specified by TERNARY_OPTAB 300 /* Generate code to perform an operation specified by TERNARY_OPTAB
634 on operands OP0, OP1 and OP2, with result having machine-mode MODE. 301 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
635 302
640 is generated there, if it is convenient to do so. 307 is generated there, if it is convenient to do so.
641 In all cases an rtx is returned for the locus of the value; 308 In all cases an rtx is returned for the locus of the value;
642 this may or may not be TARGET. */ 309 this may or may not be TARGET. */
643 310
644 rtx 311 rtx
645 expand_ternary_op (enum machine_mode mode, optab ternary_optab, rtx op0, 312 expand_ternary_op (machine_mode mode, optab ternary_optab, rtx op0,
646 rtx op1, rtx op2, rtx target, int unsignedp) 313 rtx op1, rtx op2, rtx target, int unsignedp)
647 { 314 {
648 int icode = (int) optab_handler (ternary_optab, mode); 315 struct expand_operand ops[4];
649 enum machine_mode mode0 = insn_data[icode].operand[1].mode; 316 enum insn_code icode = optab_handler (ternary_optab, mode);
650 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
651 enum machine_mode mode2 = insn_data[icode].operand[3].mode;
652 rtx temp;
653 rtx pat;
654 rtx xop0 = op0, xop1 = op1, xop2 = op2;
655 317
656 gcc_assert (optab_handler (ternary_optab, mode) != CODE_FOR_nothing); 318 gcc_assert (optab_handler (ternary_optab, mode) != CODE_FOR_nothing);
657 319
658 if (!target || !insn_data[icode].operand[0].predicate (target, mode)) 320 create_output_operand (&ops[0], target, mode);
659 temp = gen_reg_rtx (mode); 321 create_convert_operand_from (&ops[1], op0, mode, unsignedp);
660 else 322 create_convert_operand_from (&ops[2], op1, mode, unsignedp);
661 temp = target; 323 create_convert_operand_from (&ops[3], op2, mode, unsignedp);
662 324 expand_insn (icode, 4, ops);
663 /* In case the insn wants input operands in modes different from 325 return ops[0].value;
664 those of the actual operands, convert the operands. It would
665 seem that we don't need to convert CONST_INTs, but we do, so
666 that they're properly zero-extended, sign-extended or truncated
667 for their mode. */
668
669 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
670 xop0 = convert_modes (mode0,
671 GET_MODE (op0) != VOIDmode
672 ? GET_MODE (op0)
673 : mode,
674 xop0, unsignedp);
675
676 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
677 xop1 = convert_modes (mode1,
678 GET_MODE (op1) != VOIDmode
679 ? GET_MODE (op1)
680 : mode,
681 xop1, unsignedp);
682
683 if (GET_MODE (op2) != mode2 && mode2 != VOIDmode)
684 xop2 = convert_modes (mode2,
685 GET_MODE (op2) != VOIDmode
686 ? GET_MODE (op2)
687 : mode,
688 xop2, unsignedp);
689
690 /* Now, if insn's predicates don't allow our operands, put them into
691 pseudo regs. */
692
693 if (!insn_data[icode].operand[1].predicate (xop0, mode0)
694 && mode0 != VOIDmode)
695 xop0 = copy_to_mode_reg (mode0, xop0);
696
697 if (!insn_data[icode].operand[2].predicate (xop1, mode1)
698 && mode1 != VOIDmode)
699 xop1 = copy_to_mode_reg (mode1, xop1);
700
701 if (!insn_data[icode].operand[3].predicate (xop2, mode2)
702 && mode2 != VOIDmode)
703 xop2 = copy_to_mode_reg (mode2, xop2);
704
705 pat = GEN_FCN (icode) (temp, xop0, xop1, xop2);
706
707 emit_insn (pat);
708 return temp;
709 } 326 }
710 327
711 328
712 /* Like expand_binop, but return a constant rtx if the result can be 329 /* Like expand_binop, but return a constant rtx if the result can be
713 calculated at compile time. The arguments and return value are 330 calculated at compile time. The arguments and return value are
714 otherwise the same as for expand_binop. */ 331 otherwise the same as for expand_binop. */
715 332
716 static rtx 333 rtx
717 simplify_expand_binop (enum machine_mode mode, optab binoptab, 334 simplify_expand_binop (machine_mode mode, optab binoptab,
718 rtx op0, rtx op1, rtx target, int unsignedp, 335 rtx op0, rtx op1, rtx target, int unsignedp,
719 enum optab_methods methods) 336 enum optab_methods methods)
720 { 337 {
721 if (CONSTANT_P (op0) && CONSTANT_P (op1)) 338 if (CONSTANT_P (op0) && CONSTANT_P (op1))
722 { 339 {
723 rtx x = simplify_binary_operation (binoptab->code, mode, op0, op1); 340 rtx x = simplify_binary_operation (optab_to_code (binoptab),
724 341 mode, op0, op1);
725 if (x) 342 if (x)
726 return x; 343 return x;
727 } 344 }
728 345
729 return expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods); 346 return expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods);
731 348
732 /* Like simplify_expand_binop, but always put the result in TARGET. 349 /* Like simplify_expand_binop, but always put the result in TARGET.
733 Return true if the expansion succeeded. */ 350 Return true if the expansion succeeded. */
734 351
735 bool 352 bool
736 force_expand_binop (enum machine_mode mode, optab binoptab, 353 force_expand_binop (machine_mode mode, optab binoptab,
737 rtx op0, rtx op1, rtx target, int unsignedp, 354 rtx op0, rtx op1, rtx target, int unsignedp,
738 enum optab_methods methods) 355 enum optab_methods methods)
739 { 356 {
740 rtx x = simplify_expand_binop (mode, binoptab, op0, op1, 357 rtx x = simplify_expand_binop (mode, binoptab, op0, op1,
741 target, unsignedp, methods); 358 target, unsignedp, methods);
744 if (x != target) 361 if (x != target)
745 emit_move_insn (target, x); 362 emit_move_insn (target, x);
746 return true; 363 return true;
747 } 364 }
748 365
749 /* Generate insns for VEC_LSHIFT_EXPR, VEC_RSHIFT_EXPR. */ 366 /* Create a new vector value in VMODE with all elements set to OP. The
750 367 mode of OP must be the element mode of VMODE. If OP is a constant,
751 rtx 368 then the return value will be a constant. */
752 expand_vec_shift_expr (sepops ops, rtx target) 369
370 static rtx
371 expand_vector_broadcast (machine_mode vmode, rtx op)
753 { 372 {
754 enum insn_code icode; 373 enum insn_code icode;
755 rtx rtx_op1, rtx_op2; 374 rtvec vec;
756 enum machine_mode mode1; 375 rtx ret;
757 enum machine_mode mode2; 376 int i, n;
758 enum machine_mode mode = TYPE_MODE (ops->type); 377
759 tree vec_oprnd = ops->op0; 378 gcc_checking_assert (VECTOR_MODE_P (vmode));
760 tree shift_oprnd = ops->op1; 379
761 optab shift_optab; 380 n = GET_MODE_NUNITS (vmode);
762 rtx pat; 381 vec = rtvec_alloc (n);
763 382 for (i = 0; i < n; ++i)
764 switch (ops->code) 383 RTVEC_ELT (vec, i) = op;
765 { 384
766 case VEC_RSHIFT_EXPR: 385 if (CONSTANT_P (op))
767 shift_optab = vec_shr_optab; 386 return gen_rtx_CONST_VECTOR (vmode, vec);
768 break; 387
769 case VEC_LSHIFT_EXPR: 388 /* ??? If the target doesn't have a vec_init, then we have no easy way
770 shift_optab = vec_shl_optab; 389 of performing this operation. Most of this sort of generic support
771 break; 390 is hidden away in the vector lowering support in gimple. */
772 default: 391 icode = convert_optab_handler (vec_init_optab, vmode,
773 gcc_unreachable (); 392 GET_MODE_INNER (vmode));
774 } 393 if (icode == CODE_FOR_nothing)
775 394 return NULL;
776 icode = optab_handler (shift_optab, mode); 395
777 gcc_assert (icode != CODE_FOR_nothing); 396 ret = gen_reg_rtx (vmode);
778 397 emit_insn (GEN_FCN (icode) (ret, gen_rtx_PARALLEL (vmode, vec)));
779 mode1 = insn_data[icode].operand[1].mode; 398
780 mode2 = insn_data[icode].operand[2].mode; 399 return ret;
781
782 rtx_op1 = expand_normal (vec_oprnd);
783 if (!(*insn_data[icode].operand[1].predicate) (rtx_op1, mode1)
784 && mode1 != VOIDmode)
785 rtx_op1 = force_reg (mode1, rtx_op1);
786
787 rtx_op2 = expand_normal (shift_oprnd);
788 if (!(*insn_data[icode].operand[2].predicate) (rtx_op2, mode2)
789 && mode2 != VOIDmode)
790 rtx_op2 = force_reg (mode2, rtx_op2);
791
792 if (!target
793 || ! (*insn_data[icode].operand[0].predicate) (target, mode))
794 target = gen_reg_rtx (mode);
795
796 /* Emit instruction */
797 pat = GEN_FCN (icode) (target, rtx_op1, rtx_op2);
798 gcc_assert (pat);
799 emit_insn (pat);
800
801 return target;
802 } 400 }
803 401
804 /* This subroutine of expand_doubleword_shift handles the cases in which 402 /* This subroutine of expand_doubleword_shift handles the cases in which
805 the effective shift value is >= BITS_PER_WORD. The arguments and return 403 the effective shift value is >= BITS_PER_WORD. The arguments and return
806 value are the same as for the parent routine, except that SUPERWORD_OP1 404 value are the same as for the parent routine, except that SUPERWORD_OP1
835 /* This subroutine of expand_doubleword_shift handles the cases in which 433 /* This subroutine of expand_doubleword_shift handles the cases in which
836 the effective shift value is < BITS_PER_WORD. The arguments and return 434 the effective shift value is < BITS_PER_WORD. The arguments and return
837 value are the same as for the parent routine. */ 435 value are the same as for the parent routine. */
838 436
839 static bool 437 static bool
840 expand_subword_shift (enum machine_mode op1_mode, optab binoptab, 438 expand_subword_shift (scalar_int_mode op1_mode, optab binoptab,
841 rtx outof_input, rtx into_input, rtx op1, 439 rtx outof_input, rtx into_input, rtx op1,
842 rtx outof_target, rtx into_target, 440 rtx outof_target, rtx into_target,
843 int unsignedp, enum optab_methods methods, 441 int unsignedp, enum optab_methods methods,
844 unsigned HOST_WIDE_INT shift_mask) 442 unsigned HOST_WIDE_INT shift_mask)
845 { 443 {
853 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in 451 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
854 the opposite direction to BINOPTAB. */ 452 the opposite direction to BINOPTAB. */
855 if (CONSTANT_P (op1) || shift_mask >= BITS_PER_WORD) 453 if (CONSTANT_P (op1) || shift_mask >= BITS_PER_WORD)
856 { 454 {
857 carries = outof_input; 455 carries = outof_input;
858 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode); 456 tmp = immed_wide_int_const (wi::shwi (BITS_PER_WORD,
457 op1_mode), op1_mode);
859 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1, 458 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
860 0, true, methods); 459 0, true, methods);
861 } 460 }
862 else 461 else
863 { 462 {
868 are truncated to the mode size. */ 467 are truncated to the mode size. */
869 carries = expand_binop (word_mode, reverse_unsigned_shift, 468 carries = expand_binop (word_mode, reverse_unsigned_shift,
870 outof_input, const1_rtx, 0, unsignedp, methods); 469 outof_input, const1_rtx, 0, unsignedp, methods);
871 if (shift_mask == BITS_PER_WORD - 1) 470 if (shift_mask == BITS_PER_WORD - 1)
872 { 471 {
873 tmp = immed_double_const (-1, -1, op1_mode); 472 tmp = immed_wide_int_const
473 (wi::minus_one (GET_MODE_PRECISION (op1_mode)), op1_mode);
874 tmp = simplify_expand_binop (op1_mode, xor_optab, op1, tmp, 474 tmp = simplify_expand_binop (op1_mode, xor_optab, op1, tmp,
875 0, true, methods); 475 0, true, methods);
876 } 476 }
877 else 477 else
878 { 478 {
879 tmp = immed_double_const (BITS_PER_WORD - 1, 0, op1_mode); 479 tmp = immed_wide_int_const (wi::shwi (BITS_PER_WORD - 1,
480 op1_mode), op1_mode);
880 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1, 481 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
881 0, true, methods); 482 0, true, methods);
882 } 483 }
883 } 484 }
884 if (tmp == 0 || carries == 0) 485 if (tmp == 0 || carries == 0)
908 509
909 return true; 510 return true;
910 } 511 }
911 512
912 513
913 #ifdef HAVE_conditional_move
914 /* Try implementing expand_doubleword_shift using conditional moves. 514 /* Try implementing expand_doubleword_shift using conditional moves.
915 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true, 515 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
916 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1 516 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
917 are the shift counts to use in the former and latter case. All other 517 are the shift counts to use in the former and latter case. All other
918 arguments are the same as the parent routine. */ 518 arguments are the same as the parent routine. */
919 519
920 static bool 520 static bool
921 expand_doubleword_shift_condmove (enum machine_mode op1_mode, optab binoptab, 521 expand_doubleword_shift_condmove (scalar_int_mode op1_mode, optab binoptab,
922 enum rtx_code cmp_code, rtx cmp1, rtx cmp2, 522 enum rtx_code cmp_code, rtx cmp1, rtx cmp2,
923 rtx outof_input, rtx into_input, 523 rtx outof_input, rtx into_input,
924 rtx subword_op1, rtx superword_op1, 524 rtx subword_op1, rtx superword_op1,
925 rtx outof_target, rtx into_target, 525 rtx outof_target, rtx into_target,
926 int unsignedp, enum optab_methods methods, 526 int unsignedp, enum optab_methods methods,
968 word_mode, false)) 568 word_mode, false))
969 return false; 569 return false;
970 570
971 return true; 571 return true;
972 } 572 }
973 #endif
974 573
975 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts. 574 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
976 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first 575 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
977 input operand; the shift moves bits in the direction OUTOF_INPUT-> 576 input operand; the shift moves bits in the direction OUTOF_INPUT->
978 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words 577 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
1000 function wants to calculate it itself. 599 function wants to calculate it itself.
1001 600
1002 Return true if the shift could be successfully synthesized. */ 601 Return true if the shift could be successfully synthesized. */
1003 602
1004 static bool 603 static bool
1005 expand_doubleword_shift (enum machine_mode op1_mode, optab binoptab, 604 expand_doubleword_shift (scalar_int_mode op1_mode, optab binoptab,
1006 rtx outof_input, rtx into_input, rtx op1, 605 rtx outof_input, rtx into_input, rtx op1,
1007 rtx outof_target, rtx into_target, 606 rtx outof_target, rtx into_target,
1008 int unsignedp, enum optab_methods methods, 607 int unsignedp, enum optab_methods methods,
1009 unsigned HOST_WIDE_INT shift_mask) 608 unsigned HOST_WIDE_INT shift_mask)
1010 { 609 {
1011 rtx superword_op1, tmp, cmp1, cmp2; 610 rtx superword_op1, tmp, cmp1, cmp2;
1012 rtx subword_label, done_label;
1013 enum rtx_code cmp_code; 611 enum rtx_code cmp_code;
1014 612
1015 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will 613 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
1016 fill the result with sign or zero bits as appropriate. If so, the value 614 fill the result with sign or zero bits as appropriate. If so, the value
1017 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call 615 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
1037 635
1038 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2) 636 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
1039 is true when the effective shift value is less than BITS_PER_WORD. 637 is true when the effective shift value is less than BITS_PER_WORD.
1040 Set SUPERWORD_OP1 to the shift count that should be used to shift 638 Set SUPERWORD_OP1 to the shift count that should be used to shift
1041 OUTOF_INPUT into INTO_TARGET when the condition is false. */ 639 OUTOF_INPUT into INTO_TARGET when the condition is false. */
1042 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode); 640 tmp = immed_wide_int_const (wi::shwi (BITS_PER_WORD, op1_mode), op1_mode);
1043 if (!CONSTANT_P (op1) && shift_mask == BITS_PER_WORD - 1) 641 if (!CONSTANT_P (op1) && shift_mask == BITS_PER_WORD - 1)
1044 { 642 {
1045 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1 643 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
1046 is a subword shift count. */ 644 is a subword shift count. */
1047 cmp1 = simplify_expand_binop (op1_mode, and_optab, op1, tmp, 645 cmp1 = simplify_expand_binop (op1_mode, and_optab, op1, tmp,
1076 outof_input, into_input, op1, 674 outof_input, into_input, op1,
1077 outof_target, into_target, 675 outof_target, into_target,
1078 unsignedp, methods, shift_mask); 676 unsignedp, methods, shift_mask);
1079 } 677 }
1080 678
1081 #ifdef HAVE_conditional_move
1082 /* Try using conditional moves to generate straight-line code. */ 679 /* Try using conditional moves to generate straight-line code. */
1083 { 680 if (HAVE_conditional_move)
1084 rtx start = get_last_insn (); 681 {
1085 if (expand_doubleword_shift_condmove (op1_mode, binoptab, 682 rtx_insn *start = get_last_insn ();
1086 cmp_code, cmp1, cmp2, 683 if (expand_doubleword_shift_condmove (op1_mode, binoptab,
1087 outof_input, into_input, 684 cmp_code, cmp1, cmp2,
1088 op1, superword_op1, 685 outof_input, into_input,
1089 outof_target, into_target, 686 op1, superword_op1,
1090 unsignedp, methods, shift_mask)) 687 outof_target, into_target,
1091 return true; 688 unsignedp, methods, shift_mask))
1092 delete_insns_since (start); 689 return true;
1093 } 690 delete_insns_since (start);
1094 #endif 691 }
1095 692
1096 /* As a last resort, use branches to select the correct alternative. */ 693 /* As a last resort, use branches to select the correct alternative. */
1097 subword_label = gen_label_rtx (); 694 rtx_code_label *subword_label = gen_label_rtx ();
1098 done_label = gen_label_rtx (); 695 rtx_code_label *done_label = gen_label_rtx ();
1099 696
1100 NO_DEFER_POP; 697 NO_DEFER_POP;
1101 do_compare_rtx_and_jump (cmp1, cmp2, cmp_code, false, op1_mode, 698 do_compare_rtx_and_jump (cmp1, cmp2, cmp_code, false, op1_mode,
1102 0, 0, subword_label, -1); 699 0, 0, subword_label,
700 profile_probability::uninitialized ());
1103 OK_DEFER_POP; 701 OK_DEFER_POP;
1104 702
1105 if (!expand_superword_shift (binoptab, outof_input, superword_op1, 703 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
1106 outof_target, into_target, 704 outof_target, into_target,
1107 unsignedp, methods)) 705 unsignedp, methods))
1108 return false; 706 return false;
1109 707
1110 emit_jump_insn (gen_jump (done_label)); 708 emit_jump_insn (targetm.gen_jump (done_label));
1111 emit_barrier (); 709 emit_barrier ();
1112 emit_label (subword_label); 710 emit_label (subword_label);
1113 711
1114 if (!expand_subword_shift (op1_mode, binoptab, 712 if (!expand_subword_shift (op1_mode, binoptab,
1115 outof_input, into_input, op1, 713 outof_input, into_input, op1,
1175 op0_high (op1_high) before it is used to calculate 2b (2a). If no 773 op0_high (op1_high) before it is used to calculate 2b (2a). If no
1176 logical shift exists, we do an arithmetic right shift and subtract 774 logical shift exists, we do an arithmetic right shift and subtract
1177 the 0 or -1. */ 775 the 0 or -1. */
1178 776
1179 static rtx 777 static rtx
1180 expand_doubleword_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target, 778 expand_doubleword_mult (machine_mode mode, rtx op0, rtx op1, rtx target,
1181 bool umulp, enum optab_methods methods) 779 bool umulp, enum optab_methods methods)
1182 { 780 {
1183 int low = (WORDS_BIG_ENDIAN ? 1 : 0); 781 int low = (WORDS_BIG_ENDIAN ? 1 : 0);
1184 int high = (WORDS_BIG_ENDIAN ? 0 : 1); 782 int high = (WORDS_BIG_ENDIAN ? 0 : 1);
1185 rtx wordm1 = umulp ? NULL_RTX : GEN_INT (BITS_PER_WORD - 1); 783 rtx wordm1 = umulp ? NULL_RTX : GEN_INT (BITS_PER_WORD - 1);
1283 881
1284 /* Wrapper around expand_binop which takes an rtx code to specify 882 /* Wrapper around expand_binop which takes an rtx code to specify
1285 the operation to perform, not an optab pointer. All other 883 the operation to perform, not an optab pointer. All other
1286 arguments are the same. */ 884 arguments are the same. */
1287 rtx 885 rtx
1288 expand_simple_binop (enum machine_mode mode, enum rtx_code code, rtx op0, 886 expand_simple_binop (machine_mode mode, enum rtx_code code, rtx op0,
1289 rtx op1, rtx target, int unsignedp, 887 rtx op1, rtx target, int unsignedp,
1290 enum optab_methods methods) 888 enum optab_methods methods)
1291 { 889 {
1292 optab binop = code_to_optab[(int) code]; 890 optab binop = code_to_optab (code);
1293 gcc_assert (binop); 891 gcc_assert (binop);
1294 892
1295 return expand_binop (mode, binop, op0, op1, target, unsignedp, methods); 893 return expand_binop (mode, binop, op0, op1, target, unsignedp, methods);
1296 } 894 }
1297 895
1321 /* Return true if BINOPTAB implements a shift operation. */ 919 /* Return true if BINOPTAB implements a shift operation. */
1322 920
1323 static bool 921 static bool
1324 shift_optab_p (optab binoptab) 922 shift_optab_p (optab binoptab)
1325 { 923 {
1326 switch (binoptab->code) 924 switch (optab_to_code (binoptab))
1327 { 925 {
1328 case ASHIFT: 926 case ASHIFT:
1329 case SS_ASHIFT: 927 case SS_ASHIFT:
1330 case US_ASHIFT: 928 case US_ASHIFT:
1331 case ASHIFTRT: 929 case ASHIFTRT:
1342 /* Return true if BINOPTAB implements a commutative binary operation. */ 940 /* Return true if BINOPTAB implements a commutative binary operation. */
1343 941
1344 static bool 942 static bool
1345 commutative_optab_p (optab binoptab) 943 commutative_optab_p (optab binoptab)
1346 { 944 {
1347 return (GET_RTX_CLASS (binoptab->code) == RTX_COMM_ARITH 945 return (GET_RTX_CLASS (optab_to_code (binoptab)) == RTX_COMM_ARITH
1348 || binoptab == smul_widen_optab 946 || binoptab == smul_widen_optab
1349 || binoptab == umul_widen_optab 947 || binoptab == umul_widen_optab
1350 || binoptab == smul_highpart_optab 948 || binoptab == smul_highpart_optab
1351 || binoptab == umul_highpart_optab); 949 || binoptab == umul_highpart_optab);
1352 } 950 }
1353 951
1354 /* X is to be used in mode MODE as an operand to BINOPTAB. If we're 952 /* X is to be used in mode MODE as operand OPN to BINOPTAB. If we're
1355 optimizing, and if the operand is a constant that costs more than 953 optimizing, and if the operand is a constant that costs more than
1356 1 instruction, force the constant into a register and return that 954 1 instruction, force the constant into a register and return that
1357 register. Return X otherwise. UNSIGNEDP says whether X is unsigned. */ 955 register. Return X otherwise. UNSIGNEDP says whether X is unsigned. */
1358 956
1359 static rtx 957 static rtx
1360 avoid_expensive_constant (enum machine_mode mode, optab binoptab, 958 avoid_expensive_constant (machine_mode mode, optab binoptab,
1361 rtx x, bool unsignedp) 959 int opn, rtx x, bool unsignedp)
1362 { 960 {
1363 bool speed = optimize_insn_for_speed_p (); 961 bool speed = optimize_insn_for_speed_p ();
1364 962
1365 if (mode != VOIDmode 963 if (mode != VOIDmode
1366 && optimize 964 && optimize
1367 && CONSTANT_P (x) 965 && CONSTANT_P (x)
1368 && rtx_cost (x, binoptab->code, speed) > rtx_cost (x, SET, speed)) 966 && (rtx_cost (x, mode, optab_to_code (binoptab), opn, speed)
967 > set_src_cost (x, mode, speed)))
1369 { 968 {
1370 if (CONST_INT_P (x)) 969 if (CONST_INT_P (x))
1371 { 970 {
1372 HOST_WIDE_INT intval = trunc_int_for_mode (INTVAL (x), mode); 971 HOST_WIDE_INT intval = trunc_int_for_mode (INTVAL (x), mode);
1373 if (intval != INTVAL (x)) 972 if (intval != INTVAL (x))
1382 981
1383 /* Helper function for expand_binop: handle the case where there 982 /* Helper function for expand_binop: handle the case where there
1384 is an insn that directly implements the indicated operation. 983 is an insn that directly implements the indicated operation.
1385 Returns null if this is not possible. */ 984 Returns null if this is not possible. */
1386 static rtx 985 static rtx
1387 expand_binop_directly (enum machine_mode mode, optab binoptab, 986 expand_binop_directly (machine_mode mode, optab binoptab,
1388 rtx op0, rtx op1, 987 rtx op0, rtx op1,
1389 rtx target, int unsignedp, enum optab_methods methods, 988 rtx target, int unsignedp, enum optab_methods methods,
1390 rtx last) 989 rtx_insn *last)
1391 { 990 {
1392 int icode = (int) optab_handler (binoptab, mode); 991 machine_mode from_mode = widened_mode (mode, op0, op1);
1393 enum machine_mode mode0 = insn_data[icode].operand[1].mode; 992 enum insn_code icode = find_widening_optab_handler (binoptab, mode,
1394 enum machine_mode mode1 = insn_data[icode].operand[2].mode; 993 from_mode, 1);
1395 enum machine_mode tmp_mode; 994 machine_mode xmode0 = insn_data[(int) icode].operand[1].mode;
995 machine_mode xmode1 = insn_data[(int) icode].operand[2].mode;
996 machine_mode mode0, mode1, tmp_mode;
997 struct expand_operand ops[3];
1396 bool commutative_p; 998 bool commutative_p;
1397 rtx pat; 999 rtx_insn *pat;
1398 rtx xop0 = op0, xop1 = op1; 1000 rtx xop0 = op0, xop1 = op1;
1399 rtx temp; 1001 bool canonicalize_op1 = false;
1400 rtx swap;
1401
1402 if (target)
1403 temp = target;
1404 else
1405 temp = gen_reg_rtx (mode);
1406 1002
1407 /* If it is a commutative operator and the modes would match 1003 /* If it is a commutative operator and the modes would match
1408 if we would swap the operands, we can save the conversions. */ 1004 if we would swap the operands, we can save the conversions. */
1409 commutative_p = commutative_optab_p (binoptab); 1005 commutative_p = commutative_optab_p (binoptab);
1410 if (commutative_p 1006 if (commutative_p
1411 && GET_MODE (xop0) != mode0 && GET_MODE (xop1) != mode1 1007 && GET_MODE (xop0) != xmode0 && GET_MODE (xop1) != xmode1
1412 && GET_MODE (xop0) == mode1 && GET_MODE (xop1) == mode1) 1008 && GET_MODE (xop0) == xmode1 && GET_MODE (xop1) == xmode1)
1413 { 1009 std::swap (xop0, xop1);
1414 swap = xop0;
1415 xop0 = xop1;
1416 xop1 = swap;
1417 }
1418 1010
1419 /* If we are optimizing, force expensive constants into a register. */ 1011 /* If we are optimizing, force expensive constants into a register. */
1420 xop0 = avoid_expensive_constant (mode0, binoptab, xop0, unsignedp); 1012 xop0 = avoid_expensive_constant (xmode0, binoptab, 0, xop0, unsignedp);
1421 if (!shift_optab_p (binoptab)) 1013 if (!shift_optab_p (binoptab))
1422 xop1 = avoid_expensive_constant (mode1, binoptab, xop1, unsignedp); 1014 xop1 = avoid_expensive_constant (xmode1, binoptab, 1, xop1, unsignedp);
1015 else
1016 /* Shifts and rotates often use a different mode for op1 from op0;
1017 for VOIDmode constants we don't know the mode, so force it
1018 to be canonicalized using convert_modes. */
1019 canonicalize_op1 = true;
1423 1020
1424 /* In case the insn wants input operands in modes different from 1021 /* In case the insn wants input operands in modes different from
1425 those of the actual operands, convert the operands. It would 1022 those of the actual operands, convert the operands. It would
1426 seem that we don't need to convert CONST_INTs, but we do, so 1023 seem that we don't need to convert CONST_INTs, but we do, so
1427 that they're properly zero-extended, sign-extended or truncated 1024 that they're properly zero-extended, sign-extended or truncated
1428 for their mode. */ 1025 for their mode. */
1429 1026
1430 if (GET_MODE (xop0) != mode0 && mode0 != VOIDmode) 1027 mode0 = GET_MODE (xop0) != VOIDmode ? GET_MODE (xop0) : mode;
1431 xop0 = convert_modes (mode0, 1028 if (xmode0 != VOIDmode && xmode0 != mode0)
1432 GET_MODE (xop0) != VOIDmode 1029 {
1433 ? GET_MODE (xop0) 1030 xop0 = convert_modes (xmode0, mode0, xop0, unsignedp);
1434 : mode, 1031 mode0 = xmode0;
1435 xop0, unsignedp); 1032 }
1436 1033
1437 if (GET_MODE (xop1) != mode1 && mode1 != VOIDmode) 1034 mode1 = ((GET_MODE (xop1) != VOIDmode || canonicalize_op1)
1438 xop1 = convert_modes (mode1, 1035 ? GET_MODE (xop1) : mode);
1439 GET_MODE (xop1) != VOIDmode 1036 if (xmode1 != VOIDmode && xmode1 != mode1)
1440 ? GET_MODE (xop1) 1037 {
1441 : mode, 1038 xop1 = convert_modes (xmode1, mode1, xop1, unsignedp);
1442 xop1, unsignedp); 1039 mode1 = xmode1;
1040 }
1443 1041
1444 /* If operation is commutative, 1042 /* If operation is commutative,
1445 try to make the first operand a register. 1043 try to make the first operand a register.
1446 Even better, try to make it the same as the target. 1044 Even better, try to make it the same as the target.
1447 Also try to make the last operand a constant. */ 1045 Also try to make the last operand a constant. */
1448 if (commutative_p 1046 if (commutative_p
1449 && swap_commutative_operands_with_target (target, xop0, xop1)) 1047 && swap_commutative_operands_with_target (target, xop0, xop1))
1450 { 1048 std::swap (xop0, xop1);
1451 swap = xop1;
1452 xop1 = xop0;
1453 xop0 = swap;
1454 }
1455 1049
1456 /* Now, if insn's predicates don't allow our operands, put them into 1050 /* Now, if insn's predicates don't allow our operands, put them into
1457 pseudo regs. */ 1051 pseudo regs. */
1458
1459 if (!insn_data[icode].operand[1].predicate (xop0, mode0)
1460 && mode0 != VOIDmode)
1461 xop0 = copy_to_mode_reg (mode0, xop0);
1462
1463 if (!insn_data[icode].operand[2].predicate (xop1, mode1)
1464 && mode1 != VOIDmode)
1465 xop1 = copy_to_mode_reg (mode1, xop1);
1466 1052
1467 if (binoptab == vec_pack_trunc_optab 1053 if (binoptab == vec_pack_trunc_optab
1468 || binoptab == vec_pack_usat_optab 1054 || binoptab == vec_pack_usat_optab
1469 || binoptab == vec_pack_ssat_optab 1055 || binoptab == vec_pack_ssat_optab
1470 || binoptab == vec_pack_ufix_trunc_optab 1056 || binoptab == vec_pack_ufix_trunc_optab
1471 || binoptab == vec_pack_sfix_trunc_optab) 1057 || binoptab == vec_pack_sfix_trunc_optab)
1472 { 1058 {
1473 /* The mode of the result is different then the mode of the 1059 /* The mode of the result is different then the mode of the
1474 arguments. */ 1060 arguments. */
1475 tmp_mode = insn_data[icode].operand[0].mode; 1061 tmp_mode = insn_data[(int) icode].operand[0].mode;
1476 if (GET_MODE_NUNITS (tmp_mode) != 2 * GET_MODE_NUNITS (mode)) 1062 if (VECTOR_MODE_P (mode)
1477 return 0; 1063 && GET_MODE_NUNITS (tmp_mode) != 2 * GET_MODE_NUNITS (mode))
1064 {
1065 delete_insns_since (last);
1066 return NULL_RTX;
1067 }
1478 } 1068 }
1479 else 1069 else
1480 tmp_mode = mode; 1070 tmp_mode = mode;
1481 1071
1482 if (!insn_data[icode].operand[0].predicate (temp, tmp_mode)) 1072 create_output_operand (&ops[0], target, tmp_mode);
1483 temp = gen_reg_rtx (tmp_mode); 1073 create_input_operand (&ops[1], xop0, mode0);
1484 1074 create_input_operand (&ops[2], xop1, mode1);
1485 pat = GEN_FCN (icode) (temp, xop0, xop1); 1075 pat = maybe_gen_insn (icode, 3, ops);
1486 if (pat) 1076 if (pat)
1487 { 1077 {
1488 /* If PAT is composed of more than one insn, try to add an appropriate 1078 /* If PAT is composed of more than one insn, try to add an appropriate
1489 REG_EQUAL note to it. If we can't because TEMP conflicts with an 1079 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1490 operand, call expand_binop again, this time without a target. */ 1080 operand, call expand_binop again, this time without a target. */
1491 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX 1081 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
1492 && ! add_equal_note (pat, temp, binoptab->code, xop0, xop1)) 1082 && ! add_equal_note (pat, ops[0].value,
1083 optab_to_code (binoptab),
1084 ops[1].value, ops[2].value))
1493 { 1085 {
1494 delete_insns_since (last); 1086 delete_insns_since (last);
1495 return expand_binop (mode, binoptab, op0, op1, NULL_RTX, 1087 return expand_binop (mode, binoptab, op0, op1, NULL_RTX,
1496 unsignedp, methods); 1088 unsignedp, methods);
1497 } 1089 }
1498 1090
1499 emit_insn (pat); 1091 emit_insn (pat);
1500 return temp; 1092 return ops[0].value;
1501 } 1093 }
1502
1503 delete_insns_since (last); 1094 delete_insns_since (last);
1504 return NULL_RTX; 1095 return NULL_RTX;
1505 } 1096 }
1506 1097
1507 /* Generate code to perform an operation specified by BINOPTAB 1098 /* Generate code to perform an operation specified by BINOPTAB
1514 is generated there, if it is convenient to do so. 1105 is generated there, if it is convenient to do so.
1515 In all cases an rtx is returned for the locus of the value; 1106 In all cases an rtx is returned for the locus of the value;
1516 this may or may not be TARGET. */ 1107 this may or may not be TARGET. */
1517 1108
1518 rtx 1109 rtx
1519 expand_binop (enum machine_mode mode, optab binoptab, rtx op0, rtx op1, 1110 expand_binop (machine_mode mode, optab binoptab, rtx op0, rtx op1,
1520 rtx target, int unsignedp, enum optab_methods methods) 1111 rtx target, int unsignedp, enum optab_methods methods)
1521 { 1112 {
1522 enum optab_methods next_methods 1113 enum optab_methods next_methods
1523 = (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN 1114 = (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN
1524 ? OPTAB_WIDEN : methods); 1115 ? OPTAB_WIDEN : methods);
1525 enum mode_class mclass; 1116 enum mode_class mclass;
1526 enum machine_mode wider_mode; 1117 machine_mode wider_mode;
1118 scalar_int_mode int_mode;
1527 rtx libfunc; 1119 rtx libfunc;
1528 rtx temp; 1120 rtx temp;
1529 rtx entry_last = get_last_insn (); 1121 rtx_insn *entry_last = get_last_insn ();
1530 rtx last; 1122 rtx_insn *last;
1531 1123
1532 mclass = GET_MODE_CLASS (mode); 1124 mclass = GET_MODE_CLASS (mode);
1533 1125
1534 /* If subtracting an integer constant, convert this into an addition of 1126 /* If subtracting an integer constant, convert this into an addition of
1535 the negated constant. */ 1127 the negated constant. */
1537 if (binoptab == sub_optab && CONST_INT_P (op1)) 1129 if (binoptab == sub_optab && CONST_INT_P (op1))
1538 { 1130 {
1539 op1 = negate_rtx (mode, op1); 1131 op1 = negate_rtx (mode, op1);
1540 binoptab = add_optab; 1132 binoptab = add_optab;
1541 } 1133 }
1134 /* For shifts, constant invalid op1 might be expanded from different
1135 mode than MODE. As those are invalid, force them to a register
1136 to avoid further problems during expansion. */
1137 else if (CONST_INT_P (op1)
1138 && shift_optab_p (binoptab)
1139 && UINTVAL (op1) >= GET_MODE_BITSIZE (GET_MODE_INNER (mode)))
1140 {
1141 op1 = gen_int_mode (INTVAL (op1), GET_MODE_INNER (mode));
1142 op1 = force_reg (GET_MODE_INNER (mode), op1);
1143 }
1542 1144
1543 /* Record where to delete back to if we backtrack. */ 1145 /* Record where to delete back to if we backtrack. */
1544 last = get_last_insn (); 1146 last = get_last_insn ();
1545 1147
1546 /* If we can do it with a three-operand insn, do so. */ 1148 /* If we can do it with a three-operand insn, do so. */
1547 1149
1548 if (methods != OPTAB_MUST_WIDEN 1150 if (methods != OPTAB_MUST_WIDEN
1549 && optab_handler (binoptab, mode) != CODE_FOR_nothing) 1151 && find_widening_optab_handler (binoptab, mode,
1152 widened_mode (mode, op0, op1), 1)
1153 != CODE_FOR_nothing)
1550 { 1154 {
1551 temp = expand_binop_directly (mode, binoptab, op0, op1, target, 1155 temp = expand_binop_directly (mode, binoptab, op0, op1, target,
1552 unsignedp, methods, last); 1156 unsignedp, methods, last);
1553 if (temp) 1157 if (temp)
1554 return temp; 1158 return temp;
1558 the other direction before falling back to shifts and bitwise-or. */ 1162 the other direction before falling back to shifts and bitwise-or. */
1559 if (((binoptab == rotl_optab 1163 if (((binoptab == rotl_optab
1560 && optab_handler (rotr_optab, mode) != CODE_FOR_nothing) 1164 && optab_handler (rotr_optab, mode) != CODE_FOR_nothing)
1561 || (binoptab == rotr_optab 1165 || (binoptab == rotr_optab
1562 && optab_handler (rotl_optab, mode) != CODE_FOR_nothing)) 1166 && optab_handler (rotl_optab, mode) != CODE_FOR_nothing))
1563 && mclass == MODE_INT) 1167 && is_int_mode (mode, &int_mode))
1564 { 1168 {
1565 optab otheroptab = (binoptab == rotl_optab ? rotr_optab : rotl_optab); 1169 optab otheroptab = (binoptab == rotl_optab ? rotr_optab : rotl_optab);
1566 rtx newop1; 1170 rtx newop1;
1567 unsigned int bits = GET_MODE_BITSIZE (mode); 1171 unsigned int bits = GET_MODE_PRECISION (int_mode);
1568 1172
1569 if (CONST_INT_P (op1)) 1173 if (CONST_INT_P (op1))
1570 newop1 = GEN_INT (bits - INTVAL (op1)); 1174 newop1 = GEN_INT (bits - INTVAL (op1));
1571 else if (targetm.shift_truncation_mask (mode) == bits - 1) 1175 else if (targetm.shift_truncation_mask (int_mode) == bits - 1)
1572 newop1 = negate_rtx (GET_MODE (op1), op1); 1176 newop1 = negate_rtx (GET_MODE (op1), op1);
1573 else 1177 else
1574 newop1 = expand_binop (GET_MODE (op1), sub_optab, 1178 newop1 = expand_binop (GET_MODE (op1), sub_optab,
1575 GEN_INT (bits), op1, 1179 gen_int_mode (bits, GET_MODE (op1)), op1,
1576 NULL_RTX, unsignedp, OPTAB_DIRECT); 1180 NULL_RTX, unsignedp, OPTAB_DIRECT);
1577 1181
1578 temp = expand_binop_directly (mode, otheroptab, op0, newop1, 1182 temp = expand_binop_directly (int_mode, otheroptab, op0, newop1,
1579 target, unsignedp, methods, last); 1183 target, unsignedp, methods, last);
1580 if (temp) 1184 if (temp)
1581 return temp; 1185 return temp;
1582 } 1186 }
1583 1187
1584 /* If this is a multiply, see if we can do a widening operation that 1188 /* If this is a multiply, see if we can do a widening operation that
1585 takes operands of this mode and makes a wider mode. */ 1189 takes operands of this mode and makes a wider mode. */
1586 1190
1587 if (binoptab == smul_optab 1191 if (binoptab == smul_optab
1588 && GET_MODE_WIDER_MODE (mode) != VOIDmode 1192 && GET_MODE_2XWIDER_MODE (mode).exists (&wider_mode)
1589 && (optab_handler ((unsignedp ? umul_widen_optab : smul_widen_optab), 1193 && (convert_optab_handler ((unsignedp
1590 GET_MODE_WIDER_MODE (mode)) 1194 ? umul_widen_optab
1591 != CODE_FOR_nothing)) 1195 : smul_widen_optab),
1592 { 1196 wider_mode, mode) != CODE_FOR_nothing))
1593 temp = expand_binop (GET_MODE_WIDER_MODE (mode), 1197 {
1198 temp = expand_binop (wider_mode,
1594 unsignedp ? umul_widen_optab : smul_widen_optab, 1199 unsignedp ? umul_widen_optab : smul_widen_optab,
1595 op0, op1, NULL_RTX, unsignedp, OPTAB_DIRECT); 1200 op0, op1, NULL_RTX, unsignedp, OPTAB_DIRECT);
1596 1201
1597 if (temp != 0) 1202 if (temp != 0)
1598 { 1203 {
1599 if (GET_MODE_CLASS (mode) == MODE_INT 1204 if (GET_MODE_CLASS (mode) == MODE_INT
1600 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode), 1205 && TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (temp)))
1601 GET_MODE_BITSIZE (GET_MODE (temp))))
1602 return gen_lowpart (mode, temp); 1206 return gen_lowpart (mode, temp);
1603 else 1207 else
1604 return convert_to_mode (mode, temp, unsignedp); 1208 return convert_to_mode (mode, temp, unsignedp);
1605 } 1209 }
1606 } 1210 }
1607 1211
1212 /* If this is a vector shift by a scalar, see if we can do a vector
1213 shift by a vector. If so, broadcast the scalar into a vector. */
1214 if (mclass == MODE_VECTOR_INT)
1215 {
1216 optab otheroptab = unknown_optab;
1217
1218 if (binoptab == ashl_optab)
1219 otheroptab = vashl_optab;
1220 else if (binoptab == ashr_optab)
1221 otheroptab = vashr_optab;
1222 else if (binoptab == lshr_optab)
1223 otheroptab = vlshr_optab;
1224 else if (binoptab == rotl_optab)
1225 otheroptab = vrotl_optab;
1226 else if (binoptab == rotr_optab)
1227 otheroptab = vrotr_optab;
1228
1229 if (otheroptab && optab_handler (otheroptab, mode) != CODE_FOR_nothing)
1230 {
1231 /* The scalar may have been extended to be too wide. Truncate
1232 it back to the proper size to fit in the broadcast vector. */
1233 scalar_mode inner_mode = GET_MODE_INNER (mode);
1234 if (!CONST_INT_P (op1)
1235 && (GET_MODE_BITSIZE (as_a <scalar_int_mode> (GET_MODE (op1)))
1236 > GET_MODE_BITSIZE (inner_mode)))
1237 op1 = force_reg (inner_mode,
1238 simplify_gen_unary (TRUNCATE, inner_mode, op1,
1239 GET_MODE (op1)));
1240 rtx vop1 = expand_vector_broadcast (mode, op1);
1241 if (vop1)
1242 {
1243 temp = expand_binop_directly (mode, otheroptab, op0, vop1,
1244 target, unsignedp, methods, last);
1245 if (temp)
1246 return temp;
1247 }
1248 }
1249 }
1250
1608 /* Look for a wider mode of the same class for which we think we 1251 /* Look for a wider mode of the same class for which we think we
1609 can open-code the operation. Check for a widening multiply at the 1252 can open-code the operation. Check for a widening multiply at the
1610 wider mode as well. */ 1253 wider mode as well. */
1611 1254
1612 if (CLASS_HAS_WIDER_MODES_P (mclass) 1255 if (CLASS_HAS_WIDER_MODES_P (mclass)
1613 && methods != OPTAB_DIRECT && methods != OPTAB_LIB) 1256 && methods != OPTAB_DIRECT && methods != OPTAB_LIB)
1614 for (wider_mode = GET_MODE_WIDER_MODE (mode); 1257 FOR_EACH_WIDER_MODE (wider_mode, mode)
1615 wider_mode != VOIDmode;
1616 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1617 { 1258 {
1259 machine_mode next_mode;
1618 if (optab_handler (binoptab, wider_mode) != CODE_FOR_nothing 1260 if (optab_handler (binoptab, wider_mode) != CODE_FOR_nothing
1619 || (binoptab == smul_optab 1261 || (binoptab == smul_optab
1620 && GET_MODE_WIDER_MODE (wider_mode) != VOIDmode 1262 && GET_MODE_WIDER_MODE (wider_mode).exists (&next_mode)
1621 && (optab_handler ((unsignedp ? umul_widen_optab 1263 && (find_widening_optab_handler ((unsignedp
1622 : smul_widen_optab), 1264 ? umul_widen_optab
1623 GET_MODE_WIDER_MODE (wider_mode)) 1265 : smul_widen_optab),
1266 next_mode, mode, 0)
1624 != CODE_FOR_nothing))) 1267 != CODE_FOR_nothing)))
1625 { 1268 {
1626 rtx xop0 = op0, xop1 = op1; 1269 rtx xop0 = op0, xop1 = op1;
1627 int no_extend = 0; 1270 int no_extend = 0;
1628 1271
1635 || binoptab == add_optab || binoptab == sub_optab 1278 || binoptab == add_optab || binoptab == sub_optab
1636 || binoptab == smul_optab || binoptab == ashl_optab) 1279 || binoptab == smul_optab || binoptab == ashl_optab)
1637 && mclass == MODE_INT) 1280 && mclass == MODE_INT)
1638 { 1281 {
1639 no_extend = 1; 1282 no_extend = 1;
1640 xop0 = avoid_expensive_constant (mode, binoptab, 1283 xop0 = avoid_expensive_constant (mode, binoptab, 0,
1641 xop0, unsignedp); 1284 xop0, unsignedp);
1642 if (binoptab != ashl_optab) 1285 if (binoptab != ashl_optab)
1643 xop1 = avoid_expensive_constant (mode, binoptab, 1286 xop1 = avoid_expensive_constant (mode, binoptab, 1,
1644 xop1, unsignedp); 1287 xop1, unsignedp);
1645 } 1288 }
1646 1289
1647 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp, no_extend); 1290 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp, no_extend);
1648 1291
1653 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX, 1296 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1654 unsignedp, OPTAB_DIRECT); 1297 unsignedp, OPTAB_DIRECT);
1655 if (temp) 1298 if (temp)
1656 { 1299 {
1657 if (mclass != MODE_INT 1300 if (mclass != MODE_INT
1658 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode), 1301 || !TRULY_NOOP_TRUNCATION_MODES_P (mode, wider_mode))
1659 GET_MODE_BITSIZE (wider_mode)))
1660 { 1302 {
1661 if (target == 0) 1303 if (target == 0)
1662 target = gen_reg_rtx (mode); 1304 target = gen_reg_rtx (mode);
1663 convert_move (target, temp, 0); 1305 convert_move (target, temp, 0);
1664 return target; 1306 return target;
1675 try to make the first operand a register. 1317 try to make the first operand a register.
1676 Even better, try to make it the same as the target. 1318 Even better, try to make it the same as the target.
1677 Also try to make the last operand a constant. */ 1319 Also try to make the last operand a constant. */
1678 if (commutative_optab_p (binoptab) 1320 if (commutative_optab_p (binoptab)
1679 && swap_commutative_operands_with_target (target, op0, op1)) 1321 && swap_commutative_operands_with_target (target, op0, op1))
1680 { 1322 std::swap (op0, op1);
1681 temp = op1;
1682 op1 = op0;
1683 op0 = temp;
1684 }
1685 1323
1686 /* These can be done a word at a time. */ 1324 /* These can be done a word at a time. */
1687 if ((binoptab == and_optab || binoptab == ior_optab || binoptab == xor_optab) 1325 if ((binoptab == and_optab || binoptab == ior_optab || binoptab == xor_optab)
1688 && mclass == MODE_INT 1326 && is_int_mode (mode, &int_mode)
1689 && GET_MODE_SIZE (mode) > UNITS_PER_WORD 1327 && GET_MODE_SIZE (int_mode) > UNITS_PER_WORD
1690 && optab_handler (binoptab, word_mode) != CODE_FOR_nothing) 1328 && optab_handler (binoptab, word_mode) != CODE_FOR_nothing)
1691 { 1329 {
1692 int i; 1330 int i;
1693 rtx insns; 1331 rtx_insn *insns;
1694 1332
1695 /* If TARGET is the same as one of the operands, the REG_EQUAL note 1333 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1696 won't be accurate, so use a new target. */ 1334 won't be accurate, so use a new target. */
1697 if (target == 0 || target == op0 || target == op1) 1335 if (target == 0
1698 target = gen_reg_rtx (mode); 1336 || target == op0
1337 || target == op1
1338 || !valid_multiword_target_p (target))
1339 target = gen_reg_rtx (int_mode);
1699 1340
1700 start_sequence (); 1341 start_sequence ();
1701 1342
1702 /* Do the actual arithmetic. */ 1343 /* Do the actual arithmetic. */
1703 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++) 1344 for (i = 0; i < GET_MODE_BITSIZE (int_mode) / BITS_PER_WORD; i++)
1704 { 1345 {
1705 rtx target_piece = operand_subword (target, i, 1, mode); 1346 rtx target_piece = operand_subword (target, i, 1, int_mode);
1706 rtx x = expand_binop (word_mode, binoptab, 1347 rtx x = expand_binop (word_mode, binoptab,
1707 operand_subword_force (op0, i, mode), 1348 operand_subword_force (op0, i, int_mode),
1708 operand_subword_force (op1, i, mode), 1349 operand_subword_force (op1, i, int_mode),
1709 target_piece, unsignedp, next_methods); 1350 target_piece, unsignedp, next_methods);
1710 1351
1711 if (x == 0) 1352 if (x == 0)
1712 break; 1353 break;
1713 1354
1716 } 1357 }
1717 1358
1718 insns = get_insns (); 1359 insns = get_insns ();
1719 end_sequence (); 1360 end_sequence ();
1720 1361
1721 if (i == GET_MODE_BITSIZE (mode) / BITS_PER_WORD) 1362 if (i == GET_MODE_BITSIZE (int_mode) / BITS_PER_WORD)
1722 { 1363 {
1723 emit_insn (insns); 1364 emit_insn (insns);
1724 return target; 1365 return target;
1725 } 1366 }
1726 } 1367 }
1727 1368
1728 /* Synthesize double word shifts from single word shifts. */ 1369 /* Synthesize double word shifts from single word shifts. */
1729 if ((binoptab == lshr_optab || binoptab == ashl_optab 1370 if ((binoptab == lshr_optab || binoptab == ashl_optab
1730 || binoptab == ashr_optab) 1371 || binoptab == ashr_optab)
1731 && mclass == MODE_INT 1372 && is_int_mode (mode, &int_mode)
1732 && (CONST_INT_P (op1) || optimize_insn_for_speed_p ()) 1373 && (CONST_INT_P (op1) || optimize_insn_for_speed_p ())
1733 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD 1374 && GET_MODE_SIZE (int_mode) == 2 * UNITS_PER_WORD
1375 && GET_MODE_PRECISION (int_mode) == GET_MODE_BITSIZE (int_mode)
1734 && optab_handler (binoptab, word_mode) != CODE_FOR_nothing 1376 && optab_handler (binoptab, word_mode) != CODE_FOR_nothing
1735 && optab_handler (ashl_optab, word_mode) != CODE_FOR_nothing 1377 && optab_handler (ashl_optab, word_mode) != CODE_FOR_nothing
1736 && optab_handler (lshr_optab, word_mode) != CODE_FOR_nothing) 1378 && optab_handler (lshr_optab, word_mode) != CODE_FOR_nothing)
1737 { 1379 {
1738 unsigned HOST_WIDE_INT shift_mask, double_shift_mask; 1380 unsigned HOST_WIDE_INT shift_mask, double_shift_mask;
1739 enum machine_mode op1_mode; 1381 scalar_int_mode op1_mode;
1740 1382
1741 double_shift_mask = targetm.shift_truncation_mask (mode); 1383 double_shift_mask = targetm.shift_truncation_mask (int_mode);
1742 shift_mask = targetm.shift_truncation_mask (word_mode); 1384 shift_mask = targetm.shift_truncation_mask (word_mode);
1743 op1_mode = GET_MODE (op1) != VOIDmode ? GET_MODE (op1) : word_mode; 1385 op1_mode = (GET_MODE (op1) != VOIDmode
1386 ? as_a <scalar_int_mode> (GET_MODE (op1))
1387 : word_mode);
1744 1388
1745 /* Apply the truncation to constant shifts. */ 1389 /* Apply the truncation to constant shifts. */
1746 if (double_shift_mask > 0 && CONST_INT_P (op1)) 1390 if (double_shift_mask > 0 && CONST_INT_P (op1))
1747 op1 = GEN_INT (INTVAL (op1) & double_shift_mask); 1391 op1 = GEN_INT (INTVAL (op1) & double_shift_mask);
1748 1392
1753 can handle. See the comments there for details. */ 1397 can handle. See the comments there for details. */
1754 if (double_shift_mask == 0 1398 if (double_shift_mask == 0
1755 || (shift_mask == BITS_PER_WORD - 1 1399 || (shift_mask == BITS_PER_WORD - 1
1756 && double_shift_mask == BITS_PER_WORD * 2 - 1)) 1400 && double_shift_mask == BITS_PER_WORD * 2 - 1))
1757 { 1401 {
1758 rtx insns; 1402 rtx_insn *insns;
1759 rtx into_target, outof_target; 1403 rtx into_target, outof_target;
1760 rtx into_input, outof_input; 1404 rtx into_input, outof_input;
1761 int left_shift, outof_word; 1405 int left_shift, outof_word;
1762 1406
1763 /* If TARGET is the same as one of the operands, the REG_EQUAL note 1407 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1764 won't be accurate, so use a new target. */ 1408 won't be accurate, so use a new target. */
1765 if (target == 0 || target == op0 || target == op1) 1409 if (target == 0
1766 target = gen_reg_rtx (mode); 1410 || target == op0
1411 || target == op1
1412 || !valid_multiword_target_p (target))
1413 target = gen_reg_rtx (int_mode);
1767 1414
1768 start_sequence (); 1415 start_sequence ();
1769 1416
1770 /* OUTOF_* is the word we are shifting bits away from, and 1417 /* OUTOF_* is the word we are shifting bits away from, and
1771 INTO_* is the word that we are shifting bits towards, thus 1418 INTO_* is the word that we are shifting bits towards, thus
1773 WORDS_BIG_ENDIAN. */ 1420 WORDS_BIG_ENDIAN. */
1774 1421
1775 left_shift = binoptab == ashl_optab; 1422 left_shift = binoptab == ashl_optab;
1776 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN; 1423 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1777 1424
1778 outof_target = operand_subword (target, outof_word, 1, mode); 1425 outof_target = operand_subword (target, outof_word, 1, int_mode);
1779 into_target = operand_subword (target, 1 - outof_word, 1, mode); 1426 into_target = operand_subword (target, 1 - outof_word, 1, int_mode);
1780 1427
1781 outof_input = operand_subword_force (op0, outof_word, mode); 1428 outof_input = operand_subword_force (op0, outof_word, int_mode);
1782 into_input = operand_subword_force (op0, 1 - outof_word, mode); 1429 into_input = operand_subword_force (op0, 1 - outof_word, int_mode);
1783 1430
1784 if (expand_doubleword_shift (op1_mode, binoptab, 1431 if (expand_doubleword_shift (op1_mode, binoptab,
1785 outof_input, into_input, op1, 1432 outof_input, into_input, op1,
1786 outof_target, into_target, 1433 outof_target, into_target,
1787 unsignedp, next_methods, shift_mask)) 1434 unsignedp, next_methods, shift_mask))
1796 } 1443 }
1797 } 1444 }
1798 1445
1799 /* Synthesize double word rotates from single word shifts. */ 1446 /* Synthesize double word rotates from single word shifts. */
1800 if ((binoptab == rotl_optab || binoptab == rotr_optab) 1447 if ((binoptab == rotl_optab || binoptab == rotr_optab)
1801 && mclass == MODE_INT 1448 && is_int_mode (mode, &int_mode)
1802 && CONST_INT_P (op1) 1449 && CONST_INT_P (op1)
1803 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD 1450 && GET_MODE_PRECISION (int_mode) == 2 * BITS_PER_WORD
1804 && optab_handler (ashl_optab, word_mode) != CODE_FOR_nothing 1451 && optab_handler (ashl_optab, word_mode) != CODE_FOR_nothing
1805 && optab_handler (lshr_optab, word_mode) != CODE_FOR_nothing) 1452 && optab_handler (lshr_optab, word_mode) != CODE_FOR_nothing)
1806 { 1453 {
1807 rtx insns; 1454 rtx_insn *insns;
1808 rtx into_target, outof_target; 1455 rtx into_target, outof_target;
1809 rtx into_input, outof_input; 1456 rtx into_input, outof_input;
1810 rtx inter; 1457 rtx inter;
1811 int shift_count, left_shift, outof_word; 1458 int shift_count, left_shift, outof_word;
1812 1459
1814 won't be accurate, so use a new target. Do this also if target is not 1461 won't be accurate, so use a new target. Do this also if target is not
1815 a REG, first because having a register instead may open optimization 1462 a REG, first because having a register instead may open optimization
1816 opportunities, and second because if target and op0 happen to be MEMs 1463 opportunities, and second because if target and op0 happen to be MEMs
1817 designating the same location, we would risk clobbering it too early 1464 designating the same location, we would risk clobbering it too early
1818 in the code sequence we generate below. */ 1465 in the code sequence we generate below. */
1819 if (target == 0 || target == op0 || target == op1 || ! REG_P (target)) 1466 if (target == 0
1820 target = gen_reg_rtx (mode); 1467 || target == op0
1468 || target == op1
1469 || !REG_P (target)
1470 || !valid_multiword_target_p (target))
1471 target = gen_reg_rtx (int_mode);
1821 1472
1822 start_sequence (); 1473 start_sequence ();
1823 1474
1824 shift_count = INTVAL (op1); 1475 shift_count = INTVAL (op1);
1825 1476
1829 WORDS_BIG_ENDIAN. */ 1480 WORDS_BIG_ENDIAN. */
1830 1481
1831 left_shift = (binoptab == rotl_optab); 1482 left_shift = (binoptab == rotl_optab);
1832 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN; 1483 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1833 1484
1834 outof_target = operand_subword (target, outof_word, 1, mode); 1485 outof_target = operand_subword (target, outof_word, 1, int_mode);
1835 into_target = operand_subword (target, 1 - outof_word, 1, mode); 1486 into_target = operand_subword (target, 1 - outof_word, 1, int_mode);
1836 1487
1837 outof_input = operand_subword_force (op0, outof_word, mode); 1488 outof_input = operand_subword_force (op0, outof_word, int_mode);
1838 into_input = operand_subword_force (op0, 1 - outof_word, mode); 1489 into_input = operand_subword_force (op0, 1 - outof_word, int_mode);
1839 1490
1840 if (shift_count == BITS_PER_WORD) 1491 if (shift_count == BITS_PER_WORD)
1841 { 1492 {
1842 /* This is just a word swap. */ 1493 /* This is just a word swap. */
1843 emit_move_insn (outof_target, into_input); 1494 emit_move_insn (outof_target, into_input);
1909 } 1560 }
1910 } 1561 }
1911 1562
1912 /* These can be done a word at a time by propagating carries. */ 1563 /* These can be done a word at a time by propagating carries. */
1913 if ((binoptab == add_optab || binoptab == sub_optab) 1564 if ((binoptab == add_optab || binoptab == sub_optab)
1914 && mclass == MODE_INT 1565 && is_int_mode (mode, &int_mode)
1915 && GET_MODE_SIZE (mode) >= 2 * UNITS_PER_WORD 1566 && GET_MODE_SIZE (int_mode) >= 2 * UNITS_PER_WORD
1916 && optab_handler (binoptab, word_mode) != CODE_FOR_nothing) 1567 && optab_handler (binoptab, word_mode) != CODE_FOR_nothing)
1917 { 1568 {
1918 unsigned int i; 1569 unsigned int i;
1919 optab otheroptab = binoptab == add_optab ? sub_optab : add_optab; 1570 optab otheroptab = binoptab == add_optab ? sub_optab : add_optab;
1920 const unsigned int nwords = GET_MODE_BITSIZE (mode) / BITS_PER_WORD; 1571 const unsigned int nwords = GET_MODE_BITSIZE (int_mode) / BITS_PER_WORD;
1921 rtx carry_in = NULL_RTX, carry_out = NULL_RTX; 1572 rtx carry_in = NULL_RTX, carry_out = NULL_RTX;
1922 rtx xop0, xop1, xtarget; 1573 rtx xop0, xop1, xtarget;
1923 1574
1924 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG 1575 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1925 value is one of those, use it. Otherwise, use 1 since it is the 1576 value is one of those, use it. Otherwise, use 1 since it is the
1929 #else 1580 #else
1930 int normalizep = 1; 1581 int normalizep = 1;
1931 #endif 1582 #endif
1932 1583
1933 /* Prepare the operands. */ 1584 /* Prepare the operands. */
1934 xop0 = force_reg (mode, op0); 1585 xop0 = force_reg (int_mode, op0);
1935 xop1 = force_reg (mode, op1); 1586 xop1 = force_reg (int_mode, op1);
1936 1587
1937 xtarget = gen_reg_rtx (mode); 1588 xtarget = gen_reg_rtx (int_mode);
1938 1589
1939 if (target == 0 || !REG_P (target)) 1590 if (target == 0 || !REG_P (target) || !valid_multiword_target_p (target))
1940 target = xtarget; 1591 target = xtarget;
1941 1592
1942 /* Indicate for flow that the entire target reg is being set. */ 1593 /* Indicate for flow that the entire target reg is being set. */
1943 if (REG_P (target)) 1594 if (REG_P (target))
1944 emit_clobber (xtarget); 1595 emit_clobber (xtarget);
1945 1596
1946 /* Do the actual arithmetic. */ 1597 /* Do the actual arithmetic. */
1947 for (i = 0; i < nwords; i++) 1598 for (i = 0; i < nwords; i++)
1948 { 1599 {
1949 int index = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i); 1600 int index = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i);
1950 rtx target_piece = operand_subword (xtarget, index, 1, mode); 1601 rtx target_piece = operand_subword (xtarget, index, 1, int_mode);
1951 rtx op0_piece = operand_subword_force (xop0, index, mode); 1602 rtx op0_piece = operand_subword_force (xop0, index, int_mode);
1952 rtx op1_piece = operand_subword_force (xop1, index, mode); 1603 rtx op1_piece = operand_subword_force (xop1, index, int_mode);
1953 rtx x; 1604 rtx x;
1954 1605
1955 /* Main add/subtract of the input operands. */ 1606 /* Main add/subtract of the input operands. */
1956 x = expand_binop (word_mode, binoptab, 1607 x = expand_binop (word_mode, binoptab,
1957 op0_piece, op1_piece, 1608 op0_piece, op1_piece,
2006 } 1657 }
2007 1658
2008 carry_in = carry_out; 1659 carry_in = carry_out;
2009 } 1660 }
2010 1661
2011 if (i == GET_MODE_BITSIZE (mode) / (unsigned) BITS_PER_WORD) 1662 if (i == GET_MODE_BITSIZE (int_mode) / (unsigned) BITS_PER_WORD)
2012 { 1663 {
2013 if (optab_handler (mov_optab, mode) != CODE_FOR_nothing 1664 if (optab_handler (mov_optab, int_mode) != CODE_FOR_nothing
2014 || ! rtx_equal_p (target, xtarget)) 1665 || ! rtx_equal_p (target, xtarget))
2015 { 1666 {
2016 rtx temp = emit_move_insn (target, xtarget); 1667 rtx_insn *temp = emit_move_insn (target, xtarget);
2017 1668
2018 set_unique_reg_note (temp, 1669 set_dst_reg_note (temp, REG_EQUAL,
2019 REG_EQUAL, 1670 gen_rtx_fmt_ee (optab_to_code (binoptab),
2020 gen_rtx_fmt_ee (binoptab->code, mode, 1671 int_mode, copy_rtx (xop0),
2021 copy_rtx (xop0), 1672 copy_rtx (xop1)),
2022 copy_rtx (xop1))); 1673 target);
2023 } 1674 }
2024 else 1675 else
2025 target = xtarget; 1676 target = xtarget;
2026 1677
2027 return target; 1678 return target;
2035 mode multiplications. We first attempt to generate a sequence using a 1686 mode multiplications. We first attempt to generate a sequence using a
2036 more efficient unsigned widening multiply, and if that fails we then 1687 more efficient unsigned widening multiply, and if that fails we then
2037 try using a signed widening multiply. */ 1688 try using a signed widening multiply. */
2038 1689
2039 if (binoptab == smul_optab 1690 if (binoptab == smul_optab
2040 && mclass == MODE_INT 1691 && is_int_mode (mode, &int_mode)
2041 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD 1692 && GET_MODE_SIZE (int_mode) == 2 * UNITS_PER_WORD
2042 && optab_handler (smul_optab, word_mode) != CODE_FOR_nothing 1693 && optab_handler (smul_optab, word_mode) != CODE_FOR_nothing
2043 && optab_handler (add_optab, word_mode) != CODE_FOR_nothing) 1694 && optab_handler (add_optab, word_mode) != CODE_FOR_nothing)
2044 { 1695 {
2045 rtx product = NULL_RTX; 1696 rtx product = NULL_RTX;
2046 1697 if (widening_optab_handler (umul_widen_optab, int_mode, word_mode)
2047 if (optab_handler (umul_widen_optab, mode) != CODE_FOR_nothing) 1698 != CODE_FOR_nothing)
2048 { 1699 {
2049 product = expand_doubleword_mult (mode, op0, op1, target, 1700 product = expand_doubleword_mult (int_mode, op0, op1, target,
2050 true, methods); 1701 true, methods);
2051 if (!product) 1702 if (!product)
2052 delete_insns_since (last); 1703 delete_insns_since (last);
2053 } 1704 }
2054 1705
2055 if (product == NULL_RTX 1706 if (product == NULL_RTX
2056 && optab_handler (smul_widen_optab, mode) != CODE_FOR_nothing) 1707 && (widening_optab_handler (smul_widen_optab, int_mode, word_mode)
2057 { 1708 != CODE_FOR_nothing))
2058 product = expand_doubleword_mult (mode, op0, op1, target, 1709 {
1710 product = expand_doubleword_mult (int_mode, op0, op1, target,
2059 false, methods); 1711 false, methods);
2060 if (!product) 1712 if (!product)
2061 delete_insns_since (last); 1713 delete_insns_since (last);
2062 } 1714 }
2063 1715
2064 if (product != NULL_RTX) 1716 if (product != NULL_RTX)
2065 { 1717 {
2066 if (optab_handler (mov_optab, mode) != CODE_FOR_nothing) 1718 if (optab_handler (mov_optab, int_mode) != CODE_FOR_nothing)
2067 { 1719 {
2068 temp = emit_move_insn (target ? target : product, product); 1720 rtx_insn *move = emit_move_insn (target ? target : product,
2069 set_unique_reg_note (temp, 1721 product);
2070 REG_EQUAL, 1722 set_dst_reg_note (move,
2071 gen_rtx_fmt_ee (MULT, mode, 1723 REG_EQUAL,
2072 copy_rtx (op0), 1724 gen_rtx_fmt_ee (MULT, int_mode,
2073 copy_rtx (op1))); 1725 copy_rtx (op0),
1726 copy_rtx (op1)),
1727 target ? target : product);
2074 } 1728 }
2075 return product; 1729 return product;
2076 } 1730 }
2077 } 1731 }
2078 1732
2081 1735
2082 libfunc = optab_libfunc (binoptab, mode); 1736 libfunc = optab_libfunc (binoptab, mode);
2083 if (libfunc 1737 if (libfunc
2084 && (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN)) 1738 && (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN))
2085 { 1739 {
2086 rtx insns; 1740 rtx_insn *insns;
2087 rtx op1x = op1; 1741 rtx op1x = op1;
2088 enum machine_mode op1_mode = mode; 1742 machine_mode op1_mode = mode;
2089 rtx value; 1743 rtx value;
2090 1744
2091 start_sequence (); 1745 start_sequence ();
2092 1746
2093 if (shift_optab_p (binoptab)) 1747 if (shift_optab_p (binoptab))
2103 op0 = convert_to_mode (mode, op0, unsignedp); 1757 op0 = convert_to_mode (mode, op0, unsignedp);
2104 1758
2105 /* Pass 1 for NO_QUEUE so we don't lose any increments 1759 /* Pass 1 for NO_QUEUE so we don't lose any increments
2106 if the libcall is cse'd or moved. */ 1760 if the libcall is cse'd or moved. */
2107 value = emit_library_call_value (libfunc, 1761 value = emit_library_call_value (libfunc,
2108 NULL_RTX, LCT_CONST, mode, 2, 1762 NULL_RTX, LCT_CONST, mode,
2109 op0, mode, op1x, op1_mode); 1763 op0, mode, op1x, op1_mode);
2110 1764
2111 insns = get_insns (); 1765 insns = get_insns ();
2112 end_sequence (); 1766 end_sequence ();
2113 1767
1768 bool trapv = trapv_binoptab_p (binoptab);
2114 target = gen_reg_rtx (mode); 1769 target = gen_reg_rtx (mode);
2115 emit_libcall_block (insns, target, value, 1770 emit_libcall_block_1 (insns, target, value,
2116 gen_rtx_fmt_ee (binoptab->code, mode, op0, op1)); 1771 trapv ? NULL_RTX
1772 : gen_rtx_fmt_ee (optab_to_code (binoptab),
1773 mode, op0, op1), trapv);
2117 1774
2118 return target; 1775 return target;
2119 } 1776 }
2120 1777
2121 delete_insns_since (last); 1778 delete_insns_since (last);
2138 /* Look for a wider mode of the same class for which it appears we can do 1795 /* Look for a wider mode of the same class for which it appears we can do
2139 the operation. */ 1796 the operation. */
2140 1797
2141 if (CLASS_HAS_WIDER_MODES_P (mclass)) 1798 if (CLASS_HAS_WIDER_MODES_P (mclass))
2142 { 1799 {
2143 for (wider_mode = GET_MODE_WIDER_MODE (mode); 1800 FOR_EACH_WIDER_MODE (wider_mode, mode)
2144 wider_mode != VOIDmode; 1801 {
2145 wider_mode = GET_MODE_WIDER_MODE (wider_mode)) 1802 if (find_widening_optab_handler (binoptab, wider_mode, mode, 1)
2146 { 1803 != CODE_FOR_nothing
2147 if (optab_handler (binoptab, wider_mode) != CODE_FOR_nothing
2148 || (methods == OPTAB_LIB 1804 || (methods == OPTAB_LIB
2149 && optab_libfunc (binoptab, wider_mode))) 1805 && optab_libfunc (binoptab, wider_mode)))
2150 { 1806 {
2151 rtx xop0 = op0, xop1 = op1; 1807 rtx xop0 = op0, xop1 = op1;
2152 int no_extend = 0; 1808 int no_extend = 0;
2172 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX, 1828 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
2173 unsignedp, methods); 1829 unsignedp, methods);
2174 if (temp) 1830 if (temp)
2175 { 1831 {
2176 if (mclass != MODE_INT 1832 if (mclass != MODE_INT
2177 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode), 1833 || !TRULY_NOOP_TRUNCATION_MODES_P (mode, wider_mode))
2178 GET_MODE_BITSIZE (wider_mode)))
2179 { 1834 {
2180 if (target == 0) 1835 if (target == 0)
2181 target = gen_reg_rtx (mode); 1836 target = gen_reg_rtx (mode);
2182 convert_move (target, temp, 0); 1837 convert_move (target, temp, 0);
2183 return target; 1838 return target;
2201 1856
2202 If we widen unsigned operands, we may use a signed wider operation instead 1857 If we widen unsigned operands, we may use a signed wider operation instead
2203 of an unsigned wider operation, since the result would be the same. */ 1858 of an unsigned wider operation, since the result would be the same. */
2204 1859
2205 rtx 1860 rtx
2206 sign_expand_binop (enum machine_mode mode, optab uoptab, optab soptab, 1861 sign_expand_binop (machine_mode mode, optab uoptab, optab soptab,
2207 rtx op0, rtx op1, rtx target, int unsignedp, 1862 rtx op0, rtx op1, rtx target, int unsignedp,
2208 enum optab_methods methods) 1863 enum optab_methods methods)
2209 { 1864 {
2210 rtx temp; 1865 rtx temp;
2211 optab direct_optab = unsignedp ? uoptab : soptab; 1866 optab direct_optab = unsignedp ? uoptab : soptab;
2212 struct optab_d wide_soptab; 1867 bool save_enable;
2213 1868
2214 /* Do it without widening, if possible. */ 1869 /* Do it without widening, if possible. */
2215 temp = expand_binop (mode, direct_optab, op0, op1, target, 1870 temp = expand_binop (mode, direct_optab, op0, op1, target,
2216 unsignedp, OPTAB_DIRECT); 1871 unsignedp, OPTAB_DIRECT);
2217 if (temp || methods == OPTAB_DIRECT) 1872 if (temp || methods == OPTAB_DIRECT)
2218 return temp; 1873 return temp;
2219 1874
2220 /* Try widening to a signed int. Make a fake signed optab that 1875 /* Try widening to a signed int. Disable any direct use of any
2221 hides any signed insn for direct use. */ 1876 signed insn in the current mode. */
2222 wide_soptab = *soptab; 1877 save_enable = swap_optab_enable (soptab, mode, false);
2223 set_optab_handler (&wide_soptab, mode, CODE_FOR_nothing); 1878
2224 /* We don't want to generate new hash table entries from this fake 1879 temp = expand_binop (mode, soptab, op0, op1, target,
2225 optab. */
2226 wide_soptab.libcall_gen = NULL;
2227
2228 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
2229 unsignedp, OPTAB_WIDEN); 1880 unsignedp, OPTAB_WIDEN);
2230 1881
2231 /* For unsigned operands, try widening to an unsigned int. */ 1882 /* For unsigned operands, try widening to an unsigned int. */
2232 if (temp == 0 && unsignedp) 1883 if (!temp && unsignedp)
2233 temp = expand_binop (mode, uoptab, op0, op1, target, 1884 temp = expand_binop (mode, uoptab, op0, op1, target,
2234 unsignedp, OPTAB_WIDEN); 1885 unsignedp, OPTAB_WIDEN);
2235 if (temp || methods == OPTAB_WIDEN) 1886 if (temp || methods == OPTAB_WIDEN)
2236 return temp; 1887 goto egress;
2237 1888
2238 /* Use the right width libcall if that exists. */ 1889 /* Use the right width libcall if that exists. */
2239 temp = expand_binop (mode, direct_optab, op0, op1, target, unsignedp, OPTAB_LIB); 1890 temp = expand_binop (mode, direct_optab, op0, op1, target,
1891 unsignedp, OPTAB_LIB);
2240 if (temp || methods == OPTAB_LIB) 1892 if (temp || methods == OPTAB_LIB)
2241 return temp; 1893 goto egress;
2242 1894
2243 /* Must widen and use a libcall, use either signed or unsigned. */ 1895 /* Must widen and use a libcall, use either signed or unsigned. */
2244 temp = expand_binop (mode, &wide_soptab, op0, op1, target, 1896 temp = expand_binop (mode, soptab, op0, op1, target,
2245 unsignedp, methods); 1897 unsignedp, methods);
2246 if (temp != 0) 1898 if (!temp && unsignedp)
2247 return temp; 1899 temp = expand_binop (mode, uoptab, op0, op1, target,
2248 if (unsignedp)
2249 return expand_binop (mode, uoptab, op0, op1, target,
2250 unsignedp, methods); 1900 unsignedp, methods);
2251 return 0; 1901
1902 egress:
1903 /* Undo the fiddling above. */
1904 if (save_enable)
1905 swap_optab_enable (soptab, mode, true);
1906 return temp;
2252 } 1907 }
2253 1908
2254 /* Generate code to perform an operation specified by UNOPPTAB 1909 /* Generate code to perform an operation specified by UNOPPTAB
2255 on operand OP0, with two results to TARG0 and TARG1. 1910 on operand OP0, with two results to TARG0 and TARG1.
2256 We assume that the order of the operands for the instruction 1911 We assume that the order of the operands for the instruction
2264 1919
2265 int 1920 int
2266 expand_twoval_unop (optab unoptab, rtx op0, rtx targ0, rtx targ1, 1921 expand_twoval_unop (optab unoptab, rtx op0, rtx targ0, rtx targ1,
2267 int unsignedp) 1922 int unsignedp)
2268 { 1923 {
2269 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1); 1924 machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
2270 enum mode_class mclass; 1925 enum mode_class mclass;
2271 enum machine_mode wider_mode; 1926 machine_mode wider_mode;
2272 rtx entry_last = get_last_insn (); 1927 rtx_insn *entry_last = get_last_insn ();
2273 rtx last; 1928 rtx_insn *last;
2274 1929
2275 mclass = GET_MODE_CLASS (mode); 1930 mclass = GET_MODE_CLASS (mode);
2276 1931
2277 if (!targ0) 1932 if (!targ0)
2278 targ0 = gen_reg_rtx (mode); 1933 targ0 = gen_reg_rtx (mode);
2282 /* Record where to go back to if we fail. */ 1937 /* Record where to go back to if we fail. */
2283 last = get_last_insn (); 1938 last = get_last_insn ();
2284 1939
2285 if (optab_handler (unoptab, mode) != CODE_FOR_nothing) 1940 if (optab_handler (unoptab, mode) != CODE_FOR_nothing)
2286 { 1941 {
2287 int icode = (int) optab_handler (unoptab, mode); 1942 struct expand_operand ops[3];
2288 enum machine_mode mode0 = insn_data[icode].operand[2].mode; 1943 enum insn_code icode = optab_handler (unoptab, mode);
2289 rtx pat; 1944
2290 rtx xop0 = op0; 1945 create_fixed_operand (&ops[0], targ0);
2291 1946 create_fixed_operand (&ops[1], targ1);
2292 if (GET_MODE (xop0) != VOIDmode 1947 create_convert_operand_from (&ops[2], op0, mode, unsignedp);
2293 && GET_MODE (xop0) != mode0) 1948 if (maybe_expand_insn (icode, 3, ops))
2294 xop0 = convert_to_mode (mode0, xop0, unsignedp); 1949 return 1;
2295
2296 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2297 if (!insn_data[icode].operand[2].predicate (xop0, mode0))
2298 xop0 = copy_to_mode_reg (mode0, xop0);
2299
2300 /* We could handle this, but we should always be called with a pseudo
2301 for our targets and all insns should take them as outputs. */
2302 gcc_assert (insn_data[icode].operand[0].predicate (targ0, mode));
2303 gcc_assert (insn_data[icode].operand[1].predicate (targ1, mode));
2304
2305 pat = GEN_FCN (icode) (targ0, targ1, xop0);
2306 if (pat)
2307 {
2308 emit_insn (pat);
2309 return 1;
2310 }
2311 else
2312 delete_insns_since (last);
2313 } 1950 }
2314 1951
2315 /* It can't be done in this mode. Can we do it in a wider mode? */ 1952 /* It can't be done in this mode. Can we do it in a wider mode? */
2316 1953
2317 if (CLASS_HAS_WIDER_MODES_P (mclass)) 1954 if (CLASS_HAS_WIDER_MODES_P (mclass))
2318 { 1955 {
2319 for (wider_mode = GET_MODE_WIDER_MODE (mode); 1956 FOR_EACH_WIDER_MODE (wider_mode, mode)
2320 wider_mode != VOIDmode;
2321 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2322 { 1957 {
2323 if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing) 1958 if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing)
2324 { 1959 {
2325 rtx t0 = gen_reg_rtx (wider_mode); 1960 rtx t0 = gen_reg_rtx (wider_mode);
2326 rtx t1 = gen_reg_rtx (wider_mode); 1961 rtx t1 = gen_reg_rtx (wider_mode);
2356 1991
2357 int 1992 int
2358 expand_twoval_binop (optab binoptab, rtx op0, rtx op1, rtx targ0, rtx targ1, 1993 expand_twoval_binop (optab binoptab, rtx op0, rtx op1, rtx targ0, rtx targ1,
2359 int unsignedp) 1994 int unsignedp)
2360 { 1995 {
2361 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1); 1996 machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
2362 enum mode_class mclass; 1997 enum mode_class mclass;
2363 enum machine_mode wider_mode; 1998 machine_mode wider_mode;
2364 rtx entry_last = get_last_insn (); 1999 rtx_insn *entry_last = get_last_insn ();
2365 rtx last; 2000 rtx_insn *last;
2366 2001
2367 mclass = GET_MODE_CLASS (mode); 2002 mclass = GET_MODE_CLASS (mode);
2368 2003
2369 if (!targ0) 2004 if (!targ0)
2370 targ0 = gen_reg_rtx (mode); 2005 targ0 = gen_reg_rtx (mode);
2374 /* Record where to go back to if we fail. */ 2009 /* Record where to go back to if we fail. */
2375 last = get_last_insn (); 2010 last = get_last_insn ();
2376 2011
2377 if (optab_handler (binoptab, mode) != CODE_FOR_nothing) 2012 if (optab_handler (binoptab, mode) != CODE_FOR_nothing)
2378 { 2013 {
2379 int icode = (int) optab_handler (binoptab, mode); 2014 struct expand_operand ops[4];
2380 enum machine_mode mode0 = insn_data[icode].operand[1].mode; 2015 enum insn_code icode = optab_handler (binoptab, mode);
2381 enum machine_mode mode1 = insn_data[icode].operand[2].mode; 2016 machine_mode mode0 = insn_data[icode].operand[1].mode;
2382 rtx pat; 2017 machine_mode mode1 = insn_data[icode].operand[2].mode;
2383 rtx xop0 = op0, xop1 = op1; 2018 rtx xop0 = op0, xop1 = op1;
2384 2019
2385 /* If we are optimizing, force expensive constants into a register. */ 2020 /* If we are optimizing, force expensive constants into a register. */
2386 xop0 = avoid_expensive_constant (mode0, binoptab, xop0, unsignedp); 2021 xop0 = avoid_expensive_constant (mode0, binoptab, 0, xop0, unsignedp);
2387 xop1 = avoid_expensive_constant (mode1, binoptab, xop1, unsignedp); 2022 xop1 = avoid_expensive_constant (mode1, binoptab, 1, xop1, unsignedp);
2388 2023
2389 /* In case the insn wants input operands in modes different from 2024 create_fixed_operand (&ops[0], targ0);
2390 those of the actual operands, convert the operands. It would 2025 create_convert_operand_from (&ops[1], op0, mode, unsignedp);
2391 seem that we don't need to convert CONST_INTs, but we do, so 2026 create_convert_operand_from (&ops[2], op1, mode, unsignedp);
2392 that they're properly zero-extended, sign-extended or truncated 2027 create_fixed_operand (&ops[3], targ1);
2393 for their mode. */ 2028 if (maybe_expand_insn (icode, 4, ops))
2394 2029 return 1;
2395 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode) 2030 delete_insns_since (last);
2396 xop0 = convert_modes (mode0,
2397 GET_MODE (op0) != VOIDmode
2398 ? GET_MODE (op0)
2399 : mode,
2400 xop0, unsignedp);
2401
2402 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
2403 xop1 = convert_modes (mode1,
2404 GET_MODE (op1) != VOIDmode
2405 ? GET_MODE (op1)
2406 : mode,
2407 xop1, unsignedp);
2408
2409 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2410 if (!insn_data[icode].operand[1].predicate (xop0, mode0))
2411 xop0 = copy_to_mode_reg (mode0, xop0);
2412
2413 if (!insn_data[icode].operand[2].predicate (xop1, mode1))
2414 xop1 = copy_to_mode_reg (mode1, xop1);
2415
2416 /* We could handle this, but we should always be called with a pseudo
2417 for our targets and all insns should take them as outputs. */
2418 gcc_assert (insn_data[icode].operand[0].predicate (targ0, mode));
2419 gcc_assert (insn_data[icode].operand[3].predicate (targ1, mode));
2420
2421 pat = GEN_FCN (icode) (targ0, xop0, xop1, targ1);
2422 if (pat)
2423 {
2424 emit_insn (pat);
2425 return 1;
2426 }
2427 else
2428 delete_insns_since (last);
2429 } 2031 }
2430 2032
2431 /* It can't be done in this mode. Can we do it in a wider mode? */ 2033 /* It can't be done in this mode. Can we do it in a wider mode? */
2432 2034
2433 if (CLASS_HAS_WIDER_MODES_P (mclass)) 2035 if (CLASS_HAS_WIDER_MODES_P (mclass))
2434 { 2036 {
2435 for (wider_mode = GET_MODE_WIDER_MODE (mode); 2037 FOR_EACH_WIDER_MODE (wider_mode, mode)
2436 wider_mode != VOIDmode;
2437 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2438 { 2038 {
2439 if (optab_handler (binoptab, wider_mode) != CODE_FOR_nothing) 2039 if (optab_handler (binoptab, wider_mode) != CODE_FOR_nothing)
2440 { 2040 {
2441 rtx t0 = gen_reg_rtx (wider_mode); 2041 rtx t0 = gen_reg_rtx (wider_mode);
2442 rtx t1 = gen_reg_rtx (wider_mode); 2042 rtx t1 = gen_reg_rtx (wider_mode);
2471 2071
2472 bool 2072 bool
2473 expand_twoval_binop_libfunc (optab binoptab, rtx op0, rtx op1, 2073 expand_twoval_binop_libfunc (optab binoptab, rtx op0, rtx op1,
2474 rtx targ0, rtx targ1, enum rtx_code code) 2074 rtx targ0, rtx targ1, enum rtx_code code)
2475 { 2075 {
2476 enum machine_mode mode; 2076 machine_mode mode;
2477 enum machine_mode libval_mode; 2077 machine_mode libval_mode;
2478 rtx libval; 2078 rtx libval;
2479 rtx insns; 2079 rtx_insn *insns;
2480 rtx libfunc; 2080 rtx libfunc;
2481 2081
2482 /* Exactly one of TARG0 or TARG1 should be non-NULL. */ 2082 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2483 gcc_assert (!targ0 != !targ1); 2083 gcc_assert (!targ0 != !targ1);
2484 2084
2487 if (!libfunc) 2087 if (!libfunc)
2488 return false; 2088 return false;
2489 2089
2490 /* The value returned by the library function will have twice as 2090 /* The value returned by the library function will have twice as
2491 many bits as the nominal MODE. */ 2091 many bits as the nominal MODE. */
2492 libval_mode = smallest_mode_for_size (2 * GET_MODE_BITSIZE (mode), 2092 libval_mode = smallest_int_mode_for_size (2 * GET_MODE_BITSIZE (mode));
2493 MODE_INT);
2494 start_sequence (); 2093 start_sequence ();
2495 libval = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST, 2094 libval = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
2496 libval_mode, 2, 2095 libval_mode,
2497 op0, mode, 2096 op0, mode,
2498 op1, mode); 2097 op1, mode);
2499 /* Get the part of VAL containing the value that we want. */ 2098 /* Get the part of VAL containing the value that we want. */
2500 libval = simplify_gen_subreg (mode, libval, libval_mode, 2099 libval = simplify_gen_subreg (mode, libval, libval_mode,
2501 targ0 ? 0 : GET_MODE_SIZE (mode)); 2100 targ0 ? 0 : GET_MODE_SIZE (mode));
2511 2110
2512 /* Wrapper around expand_unop which takes an rtx code to specify 2111 /* Wrapper around expand_unop which takes an rtx code to specify
2513 the operation to perform, not an optab pointer. All other 2112 the operation to perform, not an optab pointer. All other
2514 arguments are the same. */ 2113 arguments are the same. */
2515 rtx 2114 rtx
2516 expand_simple_unop (enum machine_mode mode, enum rtx_code code, rtx op0, 2115 expand_simple_unop (machine_mode mode, enum rtx_code code, rtx op0,
2517 rtx target, int unsignedp) 2116 rtx target, int unsignedp)
2518 { 2117 {
2519 optab unop = code_to_optab[(int) code]; 2118 optab unop = code_to_optab (code);
2520 gcc_assert (unop); 2119 gcc_assert (unop);
2521 2120
2522 return expand_unop (mode, unop, op0, target, unsignedp); 2121 return expand_unop (mode, unop, op0, target, unsignedp);
2523 } 2122 }
2524 2123
2525 /* Try calculating 2124 /* Try calculating
2526 (clz:narrow x) 2125 (clz:narrow x)
2527 as 2126 as
2528 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)). */ 2127 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)).
2128
2129 A similar operation can be used for clrsb. UNOPTAB says which operation
2130 we are trying to expand. */
2529 static rtx 2131 static rtx
2530 widen_clz (enum machine_mode mode, rtx op0, rtx target) 2132 widen_leading (scalar_int_mode mode, rtx op0, rtx target, optab unoptab)
2531 { 2133 {
2532 enum mode_class mclass = GET_MODE_CLASS (mode); 2134 opt_scalar_int_mode wider_mode_iter;
2533 if (CLASS_HAS_WIDER_MODES_P (mclass)) 2135 FOR_EACH_WIDER_MODE (wider_mode_iter, mode)
2534 { 2136 {
2535 enum machine_mode wider_mode; 2137 scalar_int_mode wider_mode = wider_mode_iter.require ();
2536 for (wider_mode = GET_MODE_WIDER_MODE (mode); 2138 if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing)
2537 wider_mode != VOIDmode; 2139 {
2538 wider_mode = GET_MODE_WIDER_MODE (wider_mode)) 2140 rtx xop0, temp;
2539 { 2141 rtx_insn *last;
2540 if (optab_handler (clz_optab, wider_mode) != CODE_FOR_nothing) 2142
2541 { 2143 last = get_last_insn ();
2542 rtx xop0, temp, last; 2144
2543 2145 if (target == 0)
2544 last = get_last_insn (); 2146 target = gen_reg_rtx (mode);
2545 2147 xop0 = widen_operand (op0, wider_mode, mode,
2546 if (target == 0) 2148 unoptab != clrsb_optab, false);
2547 target = gen_reg_rtx (mode); 2149 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2548 xop0 = widen_operand (op0, wider_mode, mode, true, false); 2150 unoptab != clrsb_optab);
2549 temp = expand_unop (wider_mode, clz_optab, xop0, NULL_RTX, true); 2151 if (temp != 0)
2550 if (temp != 0) 2152 temp = expand_binop
2551 temp = expand_binop (wider_mode, sub_optab, temp, 2153 (wider_mode, sub_optab, temp,
2552 GEN_INT (GET_MODE_BITSIZE (wider_mode) 2154 gen_int_mode (GET_MODE_PRECISION (wider_mode)
2553 - GET_MODE_BITSIZE (mode)), 2155 - GET_MODE_PRECISION (mode),
2554 target, true, OPTAB_DIRECT); 2156 wider_mode),
2555 if (temp == 0) 2157 target, true, OPTAB_DIRECT);
2556 delete_insns_since (last); 2158 if (temp == 0)
2557 2159 delete_insns_since (last);
2558 return temp; 2160
2559 } 2161 return temp;
2560 } 2162 }
2561 } 2163 }
2562 return 0; 2164 return 0;
2563 } 2165 }
2564 2166
2565 /* Try calculating clz of a double-word quantity as two clz's of word-sized 2167 /* Try calculating clz of a double-word quantity as two clz's of word-sized
2566 quantities, choosing which based on whether the high word is nonzero. */ 2168 quantities, choosing which based on whether the high word is nonzero. */
2567 static rtx 2169 static rtx
2568 expand_doubleword_clz (enum machine_mode mode, rtx op0, rtx target) 2170 expand_doubleword_clz (scalar_int_mode mode, rtx op0, rtx target)
2569 { 2171 {
2570 rtx xop0 = force_reg (mode, op0); 2172 rtx xop0 = force_reg (mode, op0);
2571 rtx subhi = gen_highpart (word_mode, xop0); 2173 rtx subhi = gen_highpart (word_mode, xop0);
2572 rtx sublo = gen_lowpart (word_mode, xop0); 2174 rtx sublo = gen_lowpart (word_mode, xop0);
2573 rtx hi0_label = gen_label_rtx (); 2175 rtx_code_label *hi0_label = gen_label_rtx ();
2574 rtx after_label = gen_label_rtx (); 2176 rtx_code_label *after_label = gen_label_rtx ();
2575 rtx seq, temp, result; 2177 rtx_insn *seq;
2178 rtx temp, result;
2576 2179
2577 /* If we were not given a target, use a word_mode register, not a 2180 /* If we were not given a target, use a word_mode register, not a
2578 'mode' register. The result will fit, and nobody is expecting 2181 'mode' register. The result will fit, and nobody is expecting
2579 anything bigger (the return type of __builtin_clz* is int). */ 2182 anything bigger (the return type of __builtin_clz* is int). */
2580 if (!target) 2183 if (!target)
2597 goto fail; 2200 goto fail;
2598 2201
2599 if (temp != result) 2202 if (temp != result)
2600 convert_move (result, temp, true); 2203 convert_move (result, temp, true);
2601 2204
2602 emit_jump_insn (gen_jump (after_label)); 2205 emit_jump_insn (targetm.gen_jump (after_label));
2603 emit_barrier (); 2206 emit_barrier ();
2604 2207
2605 /* Else clz of the full value is clz of the low word plus the number 2208 /* Else clz of the full value is clz of the low word plus the number
2606 of bits in the high word. */ 2209 of bits in the high word. */
2607 emit_label (hi0_label); 2210 emit_label (hi0_label);
2608 2211
2609 temp = expand_unop_direct (word_mode, clz_optab, sublo, 0, true); 2212 temp = expand_unop_direct (word_mode, clz_optab, sublo, 0, true);
2610 if (!temp) 2213 if (!temp)
2611 goto fail; 2214 goto fail;
2612 temp = expand_binop (word_mode, add_optab, temp, 2215 temp = expand_binop (word_mode, add_optab, temp,
2613 GEN_INT (GET_MODE_BITSIZE (word_mode)), 2216 gen_int_mode (GET_MODE_BITSIZE (word_mode), word_mode),
2614 result, true, OPTAB_DIRECT); 2217 result, true, OPTAB_DIRECT);
2615 if (!temp) 2218 if (!temp)
2616 goto fail; 2219 goto fail;
2617 if (temp != result) 2220 if (temp != result)
2618 convert_move (result, temp, true); 2221 convert_move (result, temp, true);
2630 fail: 2233 fail:
2631 end_sequence (); 2234 end_sequence ();
2632 return 0; 2235 return 0;
2633 } 2236 }
2634 2237
2238 /* Try calculating popcount of a double-word quantity as two popcount's of
2239 word-sized quantities and summing up the results. */
2240 static rtx
2241 expand_doubleword_popcount (scalar_int_mode mode, rtx op0, rtx target)
2242 {
2243 rtx t0, t1, t;
2244 rtx_insn *seq;
2245
2246 start_sequence ();
2247
2248 t0 = expand_unop_direct (word_mode, popcount_optab,
2249 operand_subword_force (op0, 0, mode), NULL_RTX,
2250 true);
2251 t1 = expand_unop_direct (word_mode, popcount_optab,
2252 operand_subword_force (op0, 1, mode), NULL_RTX,
2253 true);
2254 if (!t0 || !t1)
2255 {
2256 end_sequence ();
2257 return NULL_RTX;
2258 }
2259
2260 /* If we were not given a target, use a word_mode register, not a
2261 'mode' register. The result will fit, and nobody is expecting
2262 anything bigger (the return type of __builtin_popcount* is int). */
2263 if (!target)
2264 target = gen_reg_rtx (word_mode);
2265
2266 t = expand_binop (word_mode, add_optab, t0, t1, target, 0, OPTAB_DIRECT);
2267
2268 seq = get_insns ();
2269 end_sequence ();
2270
2271 add_equal_note (seq, t, POPCOUNT, op0, 0);
2272 emit_insn (seq);
2273 return t;
2274 }
2275
2276 /* Try calculating
2277 (parity:wide x)
2278 as
2279 (parity:narrow (low (x) ^ high (x))) */
2280 static rtx
2281 expand_doubleword_parity (scalar_int_mode mode, rtx op0, rtx target)
2282 {
2283 rtx t = expand_binop (word_mode, xor_optab,
2284 operand_subword_force (op0, 0, mode),
2285 operand_subword_force (op0, 1, mode),
2286 NULL_RTX, 0, OPTAB_DIRECT);
2287 return expand_unop (word_mode, parity_optab, t, target, true);
2288 }
2289
2635 /* Try calculating 2290 /* Try calculating
2636 (bswap:narrow x) 2291 (bswap:narrow x)
2637 as 2292 as
2638 (lshiftrt:wide (bswap:wide x) ((width wide) - (width narrow))). */ 2293 (lshiftrt:wide (bswap:wide x) ((width wide) - (width narrow))). */
2639 static rtx 2294 static rtx
2640 widen_bswap (enum machine_mode mode, rtx op0, rtx target) 2295 widen_bswap (scalar_int_mode mode, rtx op0, rtx target)
2641 { 2296 {
2642 enum mode_class mclass = GET_MODE_CLASS (mode); 2297 rtx x;
2643 enum machine_mode wider_mode; 2298 rtx_insn *last;
2644 rtx x, last; 2299 opt_scalar_int_mode wider_mode_iter;
2645 2300
2646 if (!CLASS_HAS_WIDER_MODES_P (mclass)) 2301 FOR_EACH_WIDER_MODE (wider_mode_iter, mode)
2302 if (optab_handler (bswap_optab, wider_mode_iter.require ())
2303 != CODE_FOR_nothing)
2304 break;
2305
2306 if (!wider_mode_iter.exists ())
2647 return NULL_RTX; 2307 return NULL_RTX;
2648 2308
2649 for (wider_mode = GET_MODE_WIDER_MODE (mode); 2309 scalar_int_mode wider_mode = wider_mode_iter.require ();
2650 wider_mode != VOIDmode;
2651 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2652 if (optab_handler (bswap_optab, wider_mode) != CODE_FOR_nothing)
2653 goto found;
2654 return NULL_RTX;
2655
2656 found:
2657 last = get_last_insn (); 2310 last = get_last_insn ();
2658 2311
2659 x = widen_operand (op0, wider_mode, mode, true, true); 2312 x = widen_operand (op0, wider_mode, mode, true, true);
2660 x = expand_unop (wider_mode, bswap_optab, x, NULL_RTX, true); 2313 x = expand_unop (wider_mode, bswap_optab, x, NULL_RTX, true);
2661 2314
2315 gcc_assert (GET_MODE_PRECISION (wider_mode) == GET_MODE_BITSIZE (wider_mode)
2316 && GET_MODE_PRECISION (mode) == GET_MODE_BITSIZE (mode));
2662 if (x != 0) 2317 if (x != 0)
2663 x = expand_shift (RSHIFT_EXPR, wider_mode, x, 2318 x = expand_shift (RSHIFT_EXPR, wider_mode, x,
2664 size_int (GET_MODE_BITSIZE (wider_mode) 2319 GET_MODE_BITSIZE (wider_mode)
2665 - GET_MODE_BITSIZE (mode)), 2320 - GET_MODE_BITSIZE (mode),
2666 NULL_RTX, true); 2321 NULL_RTX, true);
2667 2322
2668 if (x != 0) 2323 if (x != 0)
2669 { 2324 {
2670 if (target == 0) 2325 if (target == 0)
2678 } 2333 }
2679 2334
2680 /* Try calculating bswap as two bswaps of two word-sized operands. */ 2335 /* Try calculating bswap as two bswaps of two word-sized operands. */
2681 2336
2682 static rtx 2337 static rtx
2683 expand_doubleword_bswap (enum machine_mode mode, rtx op, rtx target) 2338 expand_doubleword_bswap (machine_mode mode, rtx op, rtx target)
2684 { 2339 {
2685 rtx t0, t1; 2340 rtx t0, t1;
2686 2341
2687 t1 = expand_unop (word_mode, bswap_optab, 2342 t1 = expand_unop (word_mode, bswap_optab,
2688 operand_subword_force (op, 0, mode), NULL_RTX, true); 2343 operand_subword_force (op, 0, mode), NULL_RTX, true);
2689 t0 = expand_unop (word_mode, bswap_optab, 2344 t0 = expand_unop (word_mode, bswap_optab,
2690 operand_subword_force (op, 1, mode), NULL_RTX, true); 2345 operand_subword_force (op, 1, mode), NULL_RTX, true);
2691 2346
2692 if (target == 0) 2347 if (target == 0 || !valid_multiword_target_p (target))
2693 target = gen_reg_rtx (mode); 2348 target = gen_reg_rtx (mode);
2694 if (REG_P (target)) 2349 if (REG_P (target))
2695 emit_clobber (target); 2350 emit_clobber (target);
2696 emit_move_insn (operand_subword (target, 0, 1, mode), t0); 2351 emit_move_insn (operand_subword (target, 0, 1, mode), t0);
2697 emit_move_insn (operand_subword (target, 1, 1, mode), t1); 2352 emit_move_insn (operand_subword (target, 1, 1, mode), t1);
2700 } 2355 }
2701 2356
2702 /* Try calculating (parity x) as (and (popcount x) 1), where 2357 /* Try calculating (parity x) as (and (popcount x) 1), where
2703 popcount can also be done in a wider mode. */ 2358 popcount can also be done in a wider mode. */
2704 static rtx 2359 static rtx
2705 expand_parity (enum machine_mode mode, rtx op0, rtx target) 2360 expand_parity (scalar_int_mode mode, rtx op0, rtx target)
2706 { 2361 {
2707 enum mode_class mclass = GET_MODE_CLASS (mode); 2362 enum mode_class mclass = GET_MODE_CLASS (mode);
2708 if (CLASS_HAS_WIDER_MODES_P (mclass)) 2363 opt_scalar_int_mode wider_mode_iter;
2709 { 2364 FOR_EACH_MODE_FROM (wider_mode_iter, mode)
2710 enum machine_mode wider_mode; 2365 {
2711 for (wider_mode = mode; wider_mode != VOIDmode; 2366 scalar_int_mode wider_mode = wider_mode_iter.require ();
2712 wider_mode = GET_MODE_WIDER_MODE (wider_mode)) 2367 if (optab_handler (popcount_optab, wider_mode) != CODE_FOR_nothing)
2713 { 2368 {
2714 if (optab_handler (popcount_optab, wider_mode) != CODE_FOR_nothing) 2369 rtx xop0, temp;
2370 rtx_insn *last;
2371
2372 last = get_last_insn ();
2373
2374 if (target == 0 || GET_MODE (target) != wider_mode)
2375 target = gen_reg_rtx (wider_mode);
2376
2377 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2378 temp = expand_unop (wider_mode, popcount_optab, xop0, NULL_RTX,
2379 true);
2380 if (temp != 0)
2381 temp = expand_binop (wider_mode, and_optab, temp, const1_rtx,
2382 target, true, OPTAB_DIRECT);
2383
2384 if (temp)
2715 { 2385 {
2716 rtx xop0, temp, last; 2386 if (mclass != MODE_INT
2717 2387 || !TRULY_NOOP_TRUNCATION_MODES_P (mode, wider_mode))
2718 last = get_last_insn (); 2388 return convert_to_mode (mode, temp, 0);
2719 2389 else
2720 if (target == 0) 2390 return gen_lowpart (mode, temp);
2721 target = gen_reg_rtx (mode);
2722 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2723 temp = expand_unop (wider_mode, popcount_optab, xop0, NULL_RTX,
2724 true);
2725 if (temp != 0)
2726 temp = expand_binop (wider_mode, and_optab, temp, const1_rtx,
2727 target, true, OPTAB_DIRECT);
2728 if (temp == 0)
2729 delete_insns_since (last);
2730
2731 return temp;
2732 } 2391 }
2392 else
2393 delete_insns_since (last);
2733 } 2394 }
2734 } 2395 }
2735 return 0; 2396 return 0;
2736 } 2397 }
2737 2398
2738 /* Try calculating ctz(x) as K - clz(x & -x) , 2399 /* Try calculating ctz(x) as K - clz(x & -x) ,
2739 where K is GET_MODE_BITSIZE(mode) - 1. 2400 where K is GET_MODE_PRECISION(mode) - 1.
2740 2401
2741 Both __builtin_ctz and __builtin_clz are undefined at zero, so we 2402 Both __builtin_ctz and __builtin_clz are undefined at zero, so we
2742 don't have to worry about what the hardware does in that case. (If 2403 don't have to worry about what the hardware does in that case. (If
2743 the clz instruction produces the usual value at 0, which is K, the 2404 the clz instruction produces the usual value at 0, which is K, the
2744 result of this code sequence will be -1; expand_ffs, below, relies 2405 result of this code sequence will be -1; expand_ffs, below, relies
2746 with the (very few) processors that provide a ctz with a defined 2407 with the (very few) processors that provide a ctz with a defined
2747 value, but that would take one more instruction, and it would be 2408 value, but that would take one more instruction, and it would be
2748 less convenient for expand_ffs anyway. */ 2409 less convenient for expand_ffs anyway. */
2749 2410
2750 static rtx 2411 static rtx
2751 expand_ctz (enum machine_mode mode, rtx op0, rtx target) 2412 expand_ctz (scalar_int_mode mode, rtx op0, rtx target)
2752 { 2413 {
2753 rtx seq, temp; 2414 rtx_insn *seq;
2415 rtx temp;
2754 2416
2755 if (optab_handler (clz_optab, mode) == CODE_FOR_nothing) 2417 if (optab_handler (clz_optab, mode) == CODE_FOR_nothing)
2756 return 0; 2418 return 0;
2757 2419
2758 start_sequence (); 2420 start_sequence ();
2762 temp = expand_binop (mode, and_optab, op0, temp, NULL_RTX, 2424 temp = expand_binop (mode, and_optab, op0, temp, NULL_RTX,
2763 true, OPTAB_DIRECT); 2425 true, OPTAB_DIRECT);
2764 if (temp) 2426 if (temp)
2765 temp = expand_unop_direct (mode, clz_optab, temp, NULL_RTX, true); 2427 temp = expand_unop_direct (mode, clz_optab, temp, NULL_RTX, true);
2766 if (temp) 2428 if (temp)
2767 temp = expand_binop (mode, sub_optab, GEN_INT (GET_MODE_BITSIZE (mode) - 1), 2429 temp = expand_binop (mode, sub_optab,
2430 gen_int_mode (GET_MODE_PRECISION (mode) - 1, mode),
2768 temp, target, 2431 temp, target,
2769 true, OPTAB_DIRECT); 2432 true, OPTAB_DIRECT);
2770 if (temp == 0) 2433 if (temp == 0)
2771 { 2434 {
2772 end_sequence (); 2435 end_sequence ();
2787 2450
2788 The ffs builtin promises to return zero for a zero value and ctz/clz 2451 The ffs builtin promises to return zero for a zero value and ctz/clz
2789 may have an undefined value in that case. If they do not give us a 2452 may have an undefined value in that case. If they do not give us a
2790 convenient value, we have to generate a test and branch. */ 2453 convenient value, we have to generate a test and branch. */
2791 static rtx 2454 static rtx
2792 expand_ffs (enum machine_mode mode, rtx op0, rtx target) 2455 expand_ffs (scalar_int_mode mode, rtx op0, rtx target)
2793 { 2456 {
2794 HOST_WIDE_INT val = 0; 2457 HOST_WIDE_INT val = 0;
2795 bool defined_at_zero = false; 2458 bool defined_at_zero = false;
2796 rtx temp, seq; 2459 rtx temp;
2460 rtx_insn *seq;
2797 2461
2798 if (optab_handler (ctz_optab, mode) != CODE_FOR_nothing) 2462 if (optab_handler (ctz_optab, mode) != CODE_FOR_nothing)
2799 { 2463 {
2800 start_sequence (); 2464 start_sequence ();
2801 2465
2813 goto fail; 2477 goto fail;
2814 2478
2815 if (CLZ_DEFINED_VALUE_AT_ZERO (mode, val) == 2) 2479 if (CLZ_DEFINED_VALUE_AT_ZERO (mode, val) == 2)
2816 { 2480 {
2817 defined_at_zero = true; 2481 defined_at_zero = true;
2818 val = (GET_MODE_BITSIZE (mode) - 1) - val; 2482 val = (GET_MODE_PRECISION (mode) - 1) - val;
2819 } 2483 }
2820 } 2484 }
2821 else 2485 else
2822 return 0; 2486 return 0;
2823 2487
2833 2497
2834 The test-and-branch is done after the operation itself, in case 2498 The test-and-branch is done after the operation itself, in case
2835 the operation sets condition codes that can be recycled for this. 2499 the operation sets condition codes that can be recycled for this.
2836 (This is true on i386, for instance.) */ 2500 (This is true on i386, for instance.) */
2837 2501
2838 rtx nonzero_label = gen_label_rtx (); 2502 rtx_code_label *nonzero_label = gen_label_rtx ();
2839 emit_cmp_and_jump_insns (op0, CONST0_RTX (mode), NE, 0, 2503 emit_cmp_and_jump_insns (op0, CONST0_RTX (mode), NE, 0,
2840 mode, true, nonzero_label); 2504 mode, true, nonzero_label);
2841 2505
2842 convert_move (temp, GEN_INT (-1), false); 2506 convert_move (temp, GEN_INT (-1), false);
2843 emit_label (nonzero_label); 2507 emit_label (nonzero_label);
2844 } 2508 }
2845 2509
2846 /* temp now has a value in the range -1..bitsize-1. ffs is supposed 2510 /* temp now has a value in the range -1..bitsize-1. ffs is supposed
2847 to produce a value in the range 0..bitsize. */ 2511 to produce a value in the range 0..bitsize. */
2848 temp = expand_binop (mode, add_optab, temp, GEN_INT (1), 2512 temp = expand_binop (mode, add_optab, temp, gen_int_mode (1, mode),
2849 target, false, OPTAB_DIRECT); 2513 target, false, OPTAB_DIRECT);
2850 if (!temp) 2514 if (!temp)
2851 goto fail; 2515 goto fail;
2852 2516
2853 seq = get_insns (); 2517 seq = get_insns ();
2866 conditions, VAL may already be a SUBREG against which we cannot generate 2530 conditions, VAL may already be a SUBREG against which we cannot generate
2867 a further SUBREG. In this case, we expect forcing the value into a 2531 a further SUBREG. In this case, we expect forcing the value into a
2868 register will work around the situation. */ 2532 register will work around the situation. */
2869 2533
2870 static rtx 2534 static rtx
2871 lowpart_subreg_maybe_copy (enum machine_mode omode, rtx val, 2535 lowpart_subreg_maybe_copy (machine_mode omode, rtx val,
2872 enum machine_mode imode) 2536 machine_mode imode)
2873 { 2537 {
2874 rtx ret; 2538 rtx ret;
2875 ret = lowpart_subreg (omode, val, imode); 2539 ret = lowpart_subreg (omode, val, imode);
2876 if (ret == NULL) 2540 if (ret == NULL)
2877 { 2541 {
2884 2548
2885 /* Expand a floating point absolute value or negation operation via a 2549 /* Expand a floating point absolute value or negation operation via a
2886 logical operation on the sign bit. */ 2550 logical operation on the sign bit. */
2887 2551
2888 static rtx 2552 static rtx
2889 expand_absneg_bit (enum rtx_code code, enum machine_mode mode, 2553 expand_absneg_bit (enum rtx_code code, scalar_float_mode mode,
2890 rtx op0, rtx target) 2554 rtx op0, rtx target)
2891 { 2555 {
2892 const struct real_format *fmt; 2556 const struct real_format *fmt;
2893 int bitpos, word, nwords, i; 2557 int bitpos, word, nwords, i;
2894 enum machine_mode imode; 2558 scalar_int_mode imode;
2895 double_int mask; 2559 rtx temp;
2896 rtx temp, insns; 2560 rtx_insn *insns;
2897 2561
2898 /* The format has to have a simple sign bit. */ 2562 /* The format has to have a simple sign bit. */
2899 fmt = REAL_MODE_FORMAT (mode); 2563 fmt = REAL_MODE_FORMAT (mode);
2900 if (fmt == NULL) 2564 if (fmt == NULL)
2901 return NULL_RTX; 2565 return NULL_RTX;
2908 if (code == NEG && !fmt->has_signed_zero) 2572 if (code == NEG && !fmt->has_signed_zero)
2909 return NULL_RTX; 2573 return NULL_RTX;
2910 2574
2911 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD) 2575 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2912 { 2576 {
2913 imode = int_mode_for_mode (mode); 2577 if (!int_mode_for_mode (mode).exists (&imode))
2914 if (imode == BLKmode)
2915 return NULL_RTX; 2578 return NULL_RTX;
2916 word = 0; 2579 word = 0;
2917 nwords = 1; 2580 nwords = 1;
2918 } 2581 }
2919 else 2582 else
2926 word = bitpos / BITS_PER_WORD; 2589 word = bitpos / BITS_PER_WORD;
2927 bitpos = bitpos % BITS_PER_WORD; 2590 bitpos = bitpos % BITS_PER_WORD;
2928 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD; 2591 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
2929 } 2592 }
2930 2593
2931 mask = double_int_setbit (double_int_zero, bitpos); 2594 wide_int mask = wi::set_bit_in_zero (bitpos, GET_MODE_PRECISION (imode));
2932 if (code == ABS) 2595 if (code == ABS)
2933 mask = double_int_not (mask); 2596 mask = ~mask;
2934 2597
2935 if (target == 0 || target == op0) 2598 if (target == 0
2599 || target == op0
2600 || (nwords > 1 && !valid_multiword_target_p (target)))
2936 target = gen_reg_rtx (mode); 2601 target = gen_reg_rtx (mode);
2937 2602
2938 if (nwords > 1) 2603 if (nwords > 1)
2939 { 2604 {
2940 start_sequence (); 2605 start_sequence ();
2946 2611
2947 if (i == word) 2612 if (i == word)
2948 { 2613 {
2949 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab, 2614 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2950 op0_piece, 2615 op0_piece,
2951 immed_double_int_const (mask, imode), 2616 immed_wide_int_const (mask, imode),
2952 targ_piece, 1, OPTAB_LIB_WIDEN); 2617 targ_piece, 1, OPTAB_LIB_WIDEN);
2953 if (temp != targ_piece) 2618 if (temp != targ_piece)
2954 emit_move_insn (targ_piece, temp); 2619 emit_move_insn (targ_piece, temp);
2955 } 2620 }
2956 else 2621 else
2964 } 2629 }
2965 else 2630 else
2966 { 2631 {
2967 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab, 2632 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2968 gen_lowpart (imode, op0), 2633 gen_lowpart (imode, op0),
2969 immed_double_int_const (mask, imode), 2634 immed_wide_int_const (mask, imode),
2970 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN); 2635 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
2971 target = lowpart_subreg_maybe_copy (mode, temp, imode); 2636 target = lowpart_subreg_maybe_copy (mode, temp, imode);
2972 2637
2973 set_unique_reg_note (get_last_insn (), REG_EQUAL, 2638 set_dst_reg_note (get_last_insn (), REG_EQUAL,
2974 gen_rtx_fmt_e (code, mode, copy_rtx (op0))); 2639 gen_rtx_fmt_e (code, mode, copy_rtx (op0)),
2640 target);
2975 } 2641 }
2976 2642
2977 return target; 2643 return target;
2978 } 2644 }
2979 2645
2980 /* As expand_unop, but will fail rather than attempt the operation in a 2646 /* As expand_unop, but will fail rather than attempt the operation in a
2981 different mode or with a libcall. */ 2647 different mode or with a libcall. */
2982 static rtx 2648 static rtx
2983 expand_unop_direct (enum machine_mode mode, optab unoptab, rtx op0, rtx target, 2649 expand_unop_direct (machine_mode mode, optab unoptab, rtx op0, rtx target,
2984 int unsignedp) 2650 int unsignedp)
2985 { 2651 {
2986 if (optab_handler (unoptab, mode) != CODE_FOR_nothing) 2652 if (optab_handler (unoptab, mode) != CODE_FOR_nothing)
2987 { 2653 {
2988 int icode = (int) optab_handler (unoptab, mode); 2654 struct expand_operand ops[2];
2989 enum machine_mode mode0 = insn_data[icode].operand[1].mode; 2655 enum insn_code icode = optab_handler (unoptab, mode);
2990 rtx xop0 = op0; 2656 rtx_insn *last = get_last_insn ();
2991 rtx last = get_last_insn (); 2657 rtx_insn *pat;
2992 rtx pat, temp; 2658
2993 2659 create_output_operand (&ops[0], target, mode);
2994 if (target) 2660 create_convert_operand_from (&ops[1], op0, mode, unsignedp);
2995 temp = target; 2661 pat = maybe_gen_insn (icode, 2, ops);
2996 else
2997 temp = gen_reg_rtx (mode);
2998
2999 if (GET_MODE (xop0) != VOIDmode
3000 && GET_MODE (xop0) != mode0)
3001 xop0 = convert_to_mode (mode0, xop0, unsignedp);
3002
3003 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
3004
3005 if (!insn_data[icode].operand[1].predicate (xop0, mode0))
3006 xop0 = copy_to_mode_reg (mode0, xop0);
3007
3008 if (!insn_data[icode].operand[0].predicate (temp, mode))
3009 temp = gen_reg_rtx (mode);
3010
3011 pat = GEN_FCN (icode) (temp, xop0);
3012 if (pat) 2662 if (pat)
3013 { 2663 {
3014 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX 2664 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
3015 && ! add_equal_note (pat, temp, unoptab->code, xop0, NULL_RTX)) 2665 && ! add_equal_note (pat, ops[0].value,
2666 optab_to_code (unoptab),
2667 ops[1].value, NULL_RTX))
3016 { 2668 {
3017 delete_insns_since (last); 2669 delete_insns_since (last);
3018 return expand_unop (mode, unoptab, op0, NULL_RTX, unsignedp); 2670 return expand_unop (mode, unoptab, op0, NULL_RTX, unsignedp);
3019 } 2671 }
3020 2672
3021 emit_insn (pat); 2673 emit_insn (pat);
3022 2674
3023 return temp; 2675 return ops[0].value;
3024 } 2676 }
3025 else
3026 delete_insns_since (last);
3027 } 2677 }
3028 return 0; 2678 return 0;
3029 } 2679 }
3030 2680
3031 /* Generate code to perform an operation specified by UNOPTAB 2681 /* Generate code to perform an operation specified by UNOPTAB
3038 is generated there, if it is convenient to do so. 2688 is generated there, if it is convenient to do so.
3039 In all cases an rtx is returned for the locus of the value; 2689 In all cases an rtx is returned for the locus of the value;
3040 this may or may not be TARGET. */ 2690 this may or may not be TARGET. */
3041 2691
3042 rtx 2692 rtx
3043 expand_unop (enum machine_mode mode, optab unoptab, rtx op0, rtx target, 2693 expand_unop (machine_mode mode, optab unoptab, rtx op0, rtx target,
3044 int unsignedp) 2694 int unsignedp)
3045 { 2695 {
3046 enum mode_class mclass = GET_MODE_CLASS (mode); 2696 enum mode_class mclass = GET_MODE_CLASS (mode);
3047 enum machine_mode wider_mode; 2697 machine_mode wider_mode;
2698 scalar_int_mode int_mode;
2699 scalar_float_mode float_mode;
3048 rtx temp; 2700 rtx temp;
3049 rtx libfunc; 2701 rtx libfunc;
3050 2702
3051 temp = expand_unop_direct (mode, unoptab, op0, target, unsignedp); 2703 temp = expand_unop_direct (mode, unoptab, op0, target, unsignedp);
3052 if (temp) 2704 if (temp)
3055 /* It can't be done in this mode. Can we open-code it in a wider mode? */ 2707 /* It can't be done in this mode. Can we open-code it in a wider mode? */
3056 2708
3057 /* Widening (or narrowing) clz needs special treatment. */ 2709 /* Widening (or narrowing) clz needs special treatment. */
3058 if (unoptab == clz_optab) 2710 if (unoptab == clz_optab)
3059 { 2711 {
3060 temp = widen_clz (mode, op0, target); 2712 if (is_a <scalar_int_mode> (mode, &int_mode))
2713 {
2714 temp = widen_leading (int_mode, op0, target, unoptab);
2715 if (temp)
2716 return temp;
2717
2718 if (GET_MODE_SIZE (int_mode) == 2 * UNITS_PER_WORD
2719 && optab_handler (unoptab, word_mode) != CODE_FOR_nothing)
2720 {
2721 temp = expand_doubleword_clz (int_mode, op0, target);
2722 if (temp)
2723 return temp;
2724 }
2725 }
2726
2727 goto try_libcall;
2728 }
2729
2730 if (unoptab == clrsb_optab)
2731 {
2732 if (is_a <scalar_int_mode> (mode, &int_mode))
2733 {
2734 temp = widen_leading (int_mode, op0, target, unoptab);
2735 if (temp)
2736 return temp;
2737 }
2738 goto try_libcall;
2739 }
2740
2741 if (unoptab == popcount_optab
2742 && is_a <scalar_int_mode> (mode, &int_mode)
2743 && GET_MODE_SIZE (int_mode) == 2 * UNITS_PER_WORD
2744 && optab_handler (unoptab, word_mode) != CODE_FOR_nothing
2745 && optimize_insn_for_speed_p ())
2746 {
2747 temp = expand_doubleword_popcount (int_mode, op0, target);
3061 if (temp) 2748 if (temp)
3062 return temp; 2749 return temp;
3063 2750 }
3064 if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD 2751
3065 && optab_handler (unoptab, word_mode) != CODE_FOR_nothing) 2752 if (unoptab == parity_optab
3066 { 2753 && is_a <scalar_int_mode> (mode, &int_mode)
3067 temp = expand_doubleword_clz (mode, op0, target); 2754 && GET_MODE_SIZE (int_mode) == 2 * UNITS_PER_WORD
2755 && (optab_handler (unoptab, word_mode) != CODE_FOR_nothing
2756 || optab_handler (popcount_optab, word_mode) != CODE_FOR_nothing)
2757 && optimize_insn_for_speed_p ())
2758 {
2759 temp = expand_doubleword_parity (int_mode, op0, target);
2760 if (temp)
2761 return temp;
2762 }
2763
2764 /* Widening (or narrowing) bswap needs special treatment. */
2765 if (unoptab == bswap_optab)
2766 {
2767 /* HImode is special because in this mode BSWAP is equivalent to ROTATE
2768 or ROTATERT. First try these directly; if this fails, then try the
2769 obvious pair of shifts with allowed widening, as this will probably
2770 be always more efficient than the other fallback methods. */
2771 if (mode == HImode)
2772 {
2773 rtx_insn *last;
2774 rtx temp1, temp2;
2775
2776 if (optab_handler (rotl_optab, mode) != CODE_FOR_nothing)
2777 {
2778 temp = expand_binop (mode, rotl_optab, op0, GEN_INT (8), target,
2779 unsignedp, OPTAB_DIRECT);
2780 if (temp)
2781 return temp;
2782 }
2783
2784 if (optab_handler (rotr_optab, mode) != CODE_FOR_nothing)
2785 {
2786 temp = expand_binop (mode, rotr_optab, op0, GEN_INT (8), target,
2787 unsignedp, OPTAB_DIRECT);
2788 if (temp)
2789 return temp;
2790 }
2791
2792 last = get_last_insn ();
2793
2794 temp1 = expand_binop (mode, ashl_optab, op0, GEN_INT (8), NULL_RTX,
2795 unsignedp, OPTAB_WIDEN);
2796 temp2 = expand_binop (mode, lshr_optab, op0, GEN_INT (8), NULL_RTX,
2797 unsignedp, OPTAB_WIDEN);
2798 if (temp1 && temp2)
2799 {
2800 temp = expand_binop (mode, ior_optab, temp1, temp2, target,
2801 unsignedp, OPTAB_WIDEN);
2802 if (temp)
2803 return temp;
2804 }
2805
2806 delete_insns_since (last);
2807 }
2808
2809 if (is_a <scalar_int_mode> (mode, &int_mode))
2810 {
2811 temp = widen_bswap (int_mode, op0, target);
3068 if (temp) 2812 if (temp)
3069 return temp; 2813 return temp;
3070 } 2814
3071 2815 if (GET_MODE_SIZE (int_mode) == 2 * UNITS_PER_WORD
3072 goto try_libcall; 2816 && optab_handler (unoptab, word_mode) != CODE_FOR_nothing)
3073 } 2817 {
3074 2818 temp = expand_doubleword_bswap (mode, op0, target);
3075 /* Widening (or narrowing) bswap needs special treatment. */ 2819 if (temp)
3076 if (unoptab == bswap_optab) 2820 return temp;
3077 { 2821 }
3078 temp = widen_bswap (mode, op0, target);
3079 if (temp)
3080 return temp;
3081
3082 if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
3083 && optab_handler (unoptab, word_mode) != CODE_FOR_nothing)
3084 {
3085 temp = expand_doubleword_bswap (mode, op0, target);
3086 if (temp)
3087 return temp;
3088 } 2822 }
3089 2823
3090 goto try_libcall; 2824 goto try_libcall;
3091 } 2825 }
3092 2826
3093 if (CLASS_HAS_WIDER_MODES_P (mclass)) 2827 if (CLASS_HAS_WIDER_MODES_P (mclass))
3094 for (wider_mode = GET_MODE_WIDER_MODE (mode); 2828 FOR_EACH_WIDER_MODE (wider_mode, mode)
3095 wider_mode != VOIDmode;
3096 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
3097 { 2829 {
3098 if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing) 2830 if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing)
3099 { 2831 {
3100 rtx xop0 = op0; 2832 rtx xop0 = op0;
3101 rtx last = get_last_insn (); 2833 rtx_insn *last = get_last_insn ();
3102 2834
3103 /* For certain operations, we need not actually extend 2835 /* For certain operations, we need not actually extend
3104 the narrow operand, as long as we will truncate the 2836 the narrow operand, as long as we will truncate the
3105 results to the same narrowness. */ 2837 results to the same narrowness. */
3106 2838
3113 unsignedp); 2845 unsignedp);
3114 2846
3115 if (temp) 2847 if (temp)
3116 { 2848 {
3117 if (mclass != MODE_INT 2849 if (mclass != MODE_INT
3118 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode), 2850 || !TRULY_NOOP_TRUNCATION_MODES_P (mode, wider_mode))
3119 GET_MODE_BITSIZE (wider_mode)))
3120 { 2851 {
3121 if (target == 0) 2852 if (target == 0)
3122 target = gen_reg_rtx (mode); 2853 target = gen_reg_rtx (mode);
3123 convert_move (target, temp, 0); 2854 convert_move (target, temp, 0);
3124 return target; 2855 return target;
3131 } 2862 }
3132 } 2863 }
3133 2864
3134 /* These can be done a word at a time. */ 2865 /* These can be done a word at a time. */
3135 if (unoptab == one_cmpl_optab 2866 if (unoptab == one_cmpl_optab
3136 && mclass == MODE_INT 2867 && is_int_mode (mode, &int_mode)
3137 && GET_MODE_SIZE (mode) > UNITS_PER_WORD 2868 && GET_MODE_SIZE (int_mode) > UNITS_PER_WORD
3138 && optab_handler (unoptab, word_mode) != CODE_FOR_nothing) 2869 && optab_handler (unoptab, word_mode) != CODE_FOR_nothing)
3139 { 2870 {
3140 int i; 2871 int i;
3141 rtx insns; 2872 rtx_insn *insns;
3142 2873
3143 if (target == 0 || target == op0) 2874 if (target == 0 || target == op0 || !valid_multiword_target_p (target))
3144 target = gen_reg_rtx (mode); 2875 target = gen_reg_rtx (int_mode);
3145 2876
3146 start_sequence (); 2877 start_sequence ();
3147 2878
3148 /* Do the actual arithmetic. */ 2879 /* Do the actual arithmetic. */
3149 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++) 2880 for (i = 0; i < GET_MODE_BITSIZE (int_mode) / BITS_PER_WORD; i++)
3150 { 2881 {
3151 rtx target_piece = operand_subword (target, i, 1, mode); 2882 rtx target_piece = operand_subword (target, i, 1, int_mode);
3152 rtx x = expand_unop (word_mode, unoptab, 2883 rtx x = expand_unop (word_mode, unoptab,
3153 operand_subword_force (op0, i, mode), 2884 operand_subword_force (op0, i, int_mode),
3154 target_piece, unsignedp); 2885 target_piece, unsignedp);
3155 2886
3156 if (target_piece != x) 2887 if (target_piece != x)
3157 emit_move_insn (target_piece, x); 2888 emit_move_insn (target_piece, x);
3158 } 2889 }
3162 2893
3163 emit_insn (insns); 2894 emit_insn (insns);
3164 return target; 2895 return target;
3165 } 2896 }
3166 2897
3167 if (unoptab->code == NEG) 2898 if (optab_to_code (unoptab) == NEG)
3168 { 2899 {
3169 /* Try negating floating point values by flipping the sign bit. */ 2900 /* Try negating floating point values by flipping the sign bit. */
3170 if (SCALAR_FLOAT_MODE_P (mode)) 2901 if (is_a <scalar_float_mode> (mode, &float_mode))
3171 { 2902 {
3172 temp = expand_absneg_bit (NEG, mode, op0, target); 2903 temp = expand_absneg_bit (NEG, float_mode, op0, target);
3173 if (temp) 2904 if (temp)
3174 return temp; 2905 return temp;
3175 } 2906 }
3176 2907
3177 /* If there is no negation pattern, and we have no negative zero, 2908 /* If there is no negation pattern, and we have no negative zero,
3186 return temp; 2917 return temp;
3187 } 2918 }
3188 } 2919 }
3189 2920
3190 /* Try calculating parity (x) as popcount (x) % 2. */ 2921 /* Try calculating parity (x) as popcount (x) % 2. */
3191 if (unoptab == parity_optab) 2922 if (unoptab == parity_optab && is_a <scalar_int_mode> (mode, &int_mode))
3192 { 2923 {
3193 temp = expand_parity (mode, op0, target); 2924 temp = expand_parity (int_mode, op0, target);
3194 if (temp) 2925 if (temp)
3195 return temp; 2926 return temp;
3196 } 2927 }
3197 2928
3198 /* Try implementing ffs (x) in terms of clz (x). */ 2929 /* Try implementing ffs (x) in terms of clz (x). */
3199 if (unoptab == ffs_optab) 2930 if (unoptab == ffs_optab && is_a <scalar_int_mode> (mode, &int_mode))
3200 { 2931 {
3201 temp = expand_ffs (mode, op0, target); 2932 temp = expand_ffs (int_mode, op0, target);
3202 if (temp) 2933 if (temp)
3203 return temp; 2934 return temp;
3204 } 2935 }
3205 2936
3206 /* Try implementing ctz (x) in terms of clz (x). */ 2937 /* Try implementing ctz (x) in terms of clz (x). */
3207 if (unoptab == ctz_optab) 2938 if (unoptab == ctz_optab && is_a <scalar_int_mode> (mode, &int_mode))
3208 { 2939 {
3209 temp = expand_ctz (mode, op0, target); 2940 temp = expand_ctz (int_mode, op0, target);
3210 if (temp) 2941 if (temp)
3211 return temp; 2942 return temp;
3212 } 2943 }
3213 2944
3214 try_libcall: 2945 try_libcall:
3215 /* Now try a library call in this mode. */ 2946 /* Now try a library call in this mode. */
3216 libfunc = optab_libfunc (unoptab, mode); 2947 libfunc = optab_libfunc (unoptab, mode);
3217 if (libfunc) 2948 if (libfunc)
3218 { 2949 {
3219 rtx insns; 2950 rtx_insn *insns;
3220 rtx value; 2951 rtx value;
3221 rtx eq_value; 2952 rtx eq_value;
3222 enum machine_mode outmode = mode; 2953 machine_mode outmode = mode;
3223 2954
3224 /* All of these functions return small values. Thus we choose to 2955 /* All of these functions return small values. Thus we choose to
3225 have them return something that isn't a double-word. */ 2956 have them return something that isn't a double-word. */
3226 if (unoptab == ffs_optab || unoptab == clz_optab || unoptab == ctz_optab 2957 if (unoptab == ffs_optab || unoptab == clz_optab || unoptab == ctz_optab
3227 || unoptab == popcount_optab || unoptab == parity_optab) 2958 || unoptab == clrsb_optab || unoptab == popcount_optab
2959 || unoptab == parity_optab)
3228 outmode 2960 outmode
3229 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node), 2961 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node),
3230 optab_libfunc (unoptab, mode))); 2962 optab_libfunc (unoptab, mode)));
3231 2963
3232 start_sequence (); 2964 start_sequence ();
3233 2965
3234 /* Pass 1 for NO_QUEUE so we don't lose any increments 2966 /* Pass 1 for NO_QUEUE so we don't lose any increments
3235 if the libcall is cse'd or moved. */ 2967 if the libcall is cse'd or moved. */
3236 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST, outmode, 2968 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST, outmode,
3237 1, op0, mode); 2969 op0, mode);
3238 insns = get_insns (); 2970 insns = get_insns ();
3239 end_sequence (); 2971 end_sequence ();
3240 2972
3241 target = gen_reg_rtx (outmode); 2973 target = gen_reg_rtx (outmode);
3242 eq_value = gen_rtx_fmt_e (unoptab->code, mode, op0); 2974 bool trapv = trapv_unoptab_p (unoptab);
3243 if (GET_MODE_SIZE (outmode) < GET_MODE_SIZE (mode)) 2975 if (trapv)
3244 eq_value = simplify_gen_unary (TRUNCATE, outmode, eq_value, mode); 2976 eq_value = NULL_RTX;
3245 else if (GET_MODE_SIZE (outmode) > GET_MODE_SIZE (mode)) 2977 else
3246 eq_value = simplify_gen_unary (ZERO_EXTEND, outmode, eq_value, mode); 2978 {
3247 emit_libcall_block (insns, target, value, eq_value); 2979 eq_value = gen_rtx_fmt_e (optab_to_code (unoptab), mode, op0);
2980 if (GET_MODE_UNIT_SIZE (outmode) < GET_MODE_UNIT_SIZE (mode))
2981 eq_value = simplify_gen_unary (TRUNCATE, outmode, eq_value, mode);
2982 else if (GET_MODE_UNIT_SIZE (outmode) > GET_MODE_UNIT_SIZE (mode))
2983 eq_value = simplify_gen_unary (ZERO_EXTEND,
2984 outmode, eq_value, mode);
2985 }
2986 emit_libcall_block_1 (insns, target, value, eq_value, trapv);
3248 2987
3249 return target; 2988 return target;
3250 } 2989 }
3251 2990
3252 /* It can't be done in this mode. Can we do it in a wider mode? */ 2991 /* It can't be done in this mode. Can we do it in a wider mode? */
3253 2992
3254 if (CLASS_HAS_WIDER_MODES_P (mclass)) 2993 if (CLASS_HAS_WIDER_MODES_P (mclass))
3255 { 2994 {
3256 for (wider_mode = GET_MODE_WIDER_MODE (mode); 2995 FOR_EACH_WIDER_MODE (wider_mode, mode)
3257 wider_mode != VOIDmode;
3258 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
3259 { 2996 {
3260 if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing 2997 if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing
3261 || optab_libfunc (unoptab, wider_mode)) 2998 || optab_libfunc (unoptab, wider_mode))
3262 { 2999 {
3263 rtx xop0 = op0; 3000 rtx xop0 = op0;
3264 rtx last = get_last_insn (); 3001 rtx_insn *last = get_last_insn ();
3265 3002
3266 /* For certain operations, we need not actually extend 3003 /* For certain operations, we need not actually extend
3267 the narrow operand, as long as we will truncate the 3004 the narrow operand, as long as we will truncate the
3268 results to the same narrowness. */ 3005 results to the same narrowness. */
3269
3270 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp, 3006 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
3271 (unoptab == neg_optab 3007 (unoptab == neg_optab
3272 || unoptab == one_cmpl_optab) 3008 || unoptab == one_cmpl_optab
3009 || unoptab == bswap_optab)
3273 && mclass == MODE_INT); 3010 && mclass == MODE_INT);
3274 3011
3275 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX, 3012 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
3276 unsignedp); 3013 unsignedp);
3277 3014
3278 /* If we are generating clz using wider mode, adjust the 3015 /* If we are generating clz using wider mode, adjust the
3279 result. */ 3016 result. Similarly for clrsb. */
3280 if (unoptab == clz_optab && temp != 0) 3017 if ((unoptab == clz_optab || unoptab == clrsb_optab)
3281 temp = expand_binop (wider_mode, sub_optab, temp, 3018 && temp != 0)
3282 GEN_INT (GET_MODE_BITSIZE (wider_mode) 3019 {
3283 - GET_MODE_BITSIZE (mode)), 3020 scalar_int_mode wider_int_mode
3284 target, true, OPTAB_DIRECT); 3021 = as_a <scalar_int_mode> (wider_mode);
3022 int_mode = as_a <scalar_int_mode> (mode);
3023 temp = expand_binop
3024 (wider_mode, sub_optab, temp,
3025 gen_int_mode (GET_MODE_PRECISION (wider_int_mode)
3026 - GET_MODE_PRECISION (int_mode),
3027 wider_int_mode),
3028 target, true, OPTAB_DIRECT);
3029 }
3030
3031 /* Likewise for bswap. */
3032 if (unoptab == bswap_optab && temp != 0)
3033 {
3034 scalar_int_mode wider_int_mode
3035 = as_a <scalar_int_mode> (wider_mode);
3036 int_mode = as_a <scalar_int_mode> (mode);
3037 gcc_assert (GET_MODE_PRECISION (wider_int_mode)
3038 == GET_MODE_BITSIZE (wider_int_mode)
3039 && GET_MODE_PRECISION (int_mode)
3040 == GET_MODE_BITSIZE (int_mode));
3041
3042 temp = expand_shift (RSHIFT_EXPR, wider_int_mode, temp,
3043 GET_MODE_BITSIZE (wider_int_mode)
3044 - GET_MODE_BITSIZE (int_mode),
3045 NULL_RTX, true);
3046 }
3285 3047
3286 if (temp) 3048 if (temp)
3287 { 3049 {
3288 if (mclass != MODE_INT) 3050 if (mclass != MODE_INT)
3289 { 3051 {
3301 } 3063 }
3302 } 3064 }
3303 3065
3304 /* One final attempt at implementing negation via subtraction, 3066 /* One final attempt at implementing negation via subtraction,
3305 this time allowing widening of the operand. */ 3067 this time allowing widening of the operand. */
3306 if (unoptab->code == NEG && !HONOR_SIGNED_ZEROS (mode)) 3068 if (optab_to_code (unoptab) == NEG && !HONOR_SIGNED_ZEROS (mode))
3307 { 3069 {
3308 rtx temp; 3070 rtx temp;
3309 temp = expand_binop (mode, 3071 temp = expand_binop (mode,
3310 unoptab == negv_optab ? subv_optab : sub_optab, 3072 unoptab == negv_optab ? subv_optab : sub_optab,
3311 CONST0_RTX (mode), op0, 3073 CONST0_RTX (mode), op0,
3325 different but can be deduced from MODE. 3087 different but can be deduced from MODE.
3326 3088
3327 */ 3089 */
3328 3090
3329 rtx 3091 rtx
3330 expand_abs_nojump (enum machine_mode mode, rtx op0, rtx target, 3092 expand_abs_nojump (machine_mode mode, rtx op0, rtx target,
3331 int result_unsignedp) 3093 int result_unsignedp)
3332 { 3094 {
3333 rtx temp; 3095 rtx temp;
3334 3096
3335 if (! flag_trapv) 3097 if (GET_MODE_CLASS (mode) != MODE_INT
3098 || ! flag_trapv)
3336 result_unsignedp = 1; 3099 result_unsignedp = 1;
3337 3100
3338 /* First try to do it with a special abs instruction. */ 3101 /* First try to do it with a special abs instruction. */
3339 temp = expand_unop (mode, result_unsignedp ? abs_optab : absv_optab, 3102 temp = expand_unop (mode, result_unsignedp ? abs_optab : absv_optab,
3340 op0, target, 0); 3103 op0, target, 0);
3341 if (temp != 0) 3104 if (temp != 0)
3342 return temp; 3105 return temp;
3343 3106
3344 /* For floating point modes, try clearing the sign bit. */ 3107 /* For floating point modes, try clearing the sign bit. */
3345 if (SCALAR_FLOAT_MODE_P (mode)) 3108 scalar_float_mode float_mode;
3346 { 3109 if (is_a <scalar_float_mode> (mode, &float_mode))
3347 temp = expand_absneg_bit (ABS, mode, op0, target); 3110 {
3111 temp = expand_absneg_bit (ABS, float_mode, op0, target);
3348 if (temp) 3112 if (temp)
3349 return temp; 3113 return temp;
3350 } 3114 }
3351 3115
3352 /* If we have a MAX insn, we can do this as MAX (x, -x). */ 3116 /* If we have a MAX insn, we can do this as MAX (x, -x). */
3353 if (optab_handler (smax_optab, mode) != CODE_FOR_nothing 3117 if (optab_handler (smax_optab, mode) != CODE_FOR_nothing
3354 && !HONOR_SIGNED_ZEROS (mode)) 3118 && !HONOR_SIGNED_ZEROS (mode))
3355 { 3119 {
3356 rtx last = get_last_insn (); 3120 rtx_insn *last = get_last_insn ();
3357 3121
3358 temp = expand_unop (mode, neg_optab, op0, NULL_RTX, 0); 3122 temp = expand_unop (mode, result_unsignedp ? neg_optab : negv_optab,
3123 op0, NULL_RTX, 0);
3359 if (temp != 0) 3124 if (temp != 0)
3360 temp = expand_binop (mode, smax_optab, op0, temp, target, 0, 3125 temp = expand_binop (mode, smax_optab, op0, temp, target, 0,
3361 OPTAB_WIDEN); 3126 OPTAB_WIDEN);
3362 3127
3363 if (temp != 0) 3128 if (temp != 0)
3368 3133
3369 /* If this machine has expensive jumps, we can do integer absolute 3134 /* If this machine has expensive jumps, we can do integer absolute
3370 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)), 3135 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
3371 where W is the width of MODE. */ 3136 where W is the width of MODE. */
3372 3137
3373 if (GET_MODE_CLASS (mode) == MODE_INT 3138 scalar_int_mode int_mode;
3139 if (is_int_mode (mode, &int_mode)
3374 && BRANCH_COST (optimize_insn_for_speed_p (), 3140 && BRANCH_COST (optimize_insn_for_speed_p (),
3375 false) >= 2) 3141 false) >= 2)
3376 { 3142 {
3377 rtx extended = expand_shift (RSHIFT_EXPR, mode, op0, 3143 rtx extended = expand_shift (RSHIFT_EXPR, int_mode, op0,
3378 size_int (GET_MODE_BITSIZE (mode) - 1), 3144 GET_MODE_PRECISION (int_mode) - 1,
3379 NULL_RTX, 0); 3145 NULL_RTX, 0);
3380 3146
3381 temp = expand_binop (mode, xor_optab, extended, op0, target, 0, 3147 temp = expand_binop (int_mode, xor_optab, extended, op0, target, 0,
3382 OPTAB_LIB_WIDEN); 3148 OPTAB_LIB_WIDEN);
3383 if (temp != 0) 3149 if (temp != 0)
3384 temp = expand_binop (mode, result_unsignedp ? sub_optab : subv_optab, 3150 temp = expand_binop (int_mode,
3151 result_unsignedp ? sub_optab : subv_optab,
3385 temp, extended, target, 0, OPTAB_LIB_WIDEN); 3152 temp, extended, target, 0, OPTAB_LIB_WIDEN);
3386 3153
3387 if (temp != 0) 3154 if (temp != 0)
3388 return temp; 3155 return temp;
3389 } 3156 }
3390 3157
3391 return NULL_RTX; 3158 return NULL_RTX;
3392 } 3159 }
3393 3160
3394 rtx 3161 rtx
3395 expand_abs (enum machine_mode mode, rtx op0, rtx target, 3162 expand_abs (machine_mode mode, rtx op0, rtx target,
3396 int result_unsignedp, int safe) 3163 int result_unsignedp, int safe)
3397 { 3164 {
3398 rtx temp, op1; 3165 rtx temp;
3399 3166 rtx_code_label *op1;
3400 if (! flag_trapv) 3167
3168 if (GET_MODE_CLASS (mode) != MODE_INT
3169 || ! flag_trapv)
3401 result_unsignedp = 1; 3170 result_unsignedp = 1;
3402 3171
3403 temp = expand_abs_nojump (mode, op0, target, result_unsignedp); 3172 temp = expand_abs_nojump (mode, op0, target, result_unsignedp);
3404 if (temp != 0) 3173 if (temp != 0)
3405 return temp; 3174 return temp;
3422 3191
3423 emit_move_insn (target, op0); 3192 emit_move_insn (target, op0);
3424 NO_DEFER_POP; 3193 NO_DEFER_POP;
3425 3194
3426 do_compare_rtx_and_jump (target, CONST0_RTX (mode), GE, 0, mode, 3195 do_compare_rtx_and_jump (target, CONST0_RTX (mode), GE, 0, mode,
3427 NULL_RTX, NULL_RTX, op1, -1); 3196 NULL_RTX, NULL, op1,
3197 profile_probability::uninitialized ());
3428 3198
3429 op0 = expand_unop (mode, result_unsignedp ? neg_optab : negv_optab, 3199 op0 = expand_unop (mode, result_unsignedp ? neg_optab : negv_optab,
3430 target, target, 0); 3200 target, target, 0);
3431 if (op0 != target) 3201 if (op0 != target)
3432 emit_move_insn (target, op0); 3202 emit_move_insn (target, op0);
3442 3212
3443 MODE is the mode of the operand; the mode of the result is 3213 MODE is the mode of the operand; the mode of the result is
3444 different but can be deduced from MODE. */ 3214 different but can be deduced from MODE. */
3445 3215
3446 rtx 3216 rtx
3447 expand_one_cmpl_abs_nojump (enum machine_mode mode, rtx op0, rtx target) 3217 expand_one_cmpl_abs_nojump (machine_mode mode, rtx op0, rtx target)
3448 { 3218 {
3449 rtx temp; 3219 rtx temp;
3450 3220
3451 /* Not applicable for floating point modes. */ 3221 /* Not applicable for floating point modes. */
3452 if (FLOAT_MODE_P (mode)) 3222 if (FLOAT_MODE_P (mode))
3453 return NULL_RTX; 3223 return NULL_RTX;
3454 3224
3455 /* If we have a MAX insn, we can do this as MAX (x, ~x). */ 3225 /* If we have a MAX insn, we can do this as MAX (x, ~x). */
3456 if (optab_handler (smax_optab, mode) != CODE_FOR_nothing) 3226 if (optab_handler (smax_optab, mode) != CODE_FOR_nothing)
3457 { 3227 {
3458 rtx last = get_last_insn (); 3228 rtx_insn *last = get_last_insn ();
3459 3229
3460 temp = expand_unop (mode, one_cmpl_optab, op0, NULL_RTX, 0); 3230 temp = expand_unop (mode, one_cmpl_optab, op0, NULL_RTX, 0);
3461 if (temp != 0) 3231 if (temp != 0)
3462 temp = expand_binop (mode, smax_optab, op0, temp, target, 0, 3232 temp = expand_binop (mode, smax_optab, op0, temp, target, 0,
3463 OPTAB_WIDEN); 3233 OPTAB_WIDEN);
3469 } 3239 }
3470 3240
3471 /* If this machine has expensive jumps, we can do one's complement 3241 /* If this machine has expensive jumps, we can do one's complement
3472 absolute value of X as (((signed) x >> (W-1)) ^ x). */ 3242 absolute value of X as (((signed) x >> (W-1)) ^ x). */
3473 3243
3474 if (GET_MODE_CLASS (mode) == MODE_INT 3244 scalar_int_mode int_mode;
3245 if (is_int_mode (mode, &int_mode)
3475 && BRANCH_COST (optimize_insn_for_speed_p (), 3246 && BRANCH_COST (optimize_insn_for_speed_p (),
3476 false) >= 2) 3247 false) >= 2)
3477 { 3248 {
3478 rtx extended = expand_shift (RSHIFT_EXPR, mode, op0, 3249 rtx extended = expand_shift (RSHIFT_EXPR, int_mode, op0,
3479 size_int (GET_MODE_BITSIZE (mode) - 1), 3250 GET_MODE_PRECISION (int_mode) - 1,
3480 NULL_RTX, 0); 3251 NULL_RTX, 0);
3481 3252
3482 temp = expand_binop (mode, xor_optab, extended, op0, target, 0, 3253 temp = expand_binop (int_mode, xor_optab, extended, op0, target, 0,
3483 OPTAB_LIB_WIDEN); 3254 OPTAB_LIB_WIDEN);
3484 3255
3485 if (temp != 0) 3256 if (temp != 0)
3486 return temp; 3257 return temp;
3487 } 3258 }
3493 abs and neg primitives advertised to exist on the target. The assumption 3264 abs and neg primitives advertised to exist on the target. The assumption
3494 is that we have a split register file, and leaving op0 in fp registers, 3265 is that we have a split register file, and leaving op0 in fp registers,
3495 and not playing with subregs so much, will help the register allocator. */ 3266 and not playing with subregs so much, will help the register allocator. */
3496 3267
3497 static rtx 3268 static rtx
3498 expand_copysign_absneg (enum machine_mode mode, rtx op0, rtx op1, rtx target, 3269 expand_copysign_absneg (scalar_float_mode mode, rtx op0, rtx op1, rtx target,
3499 int bitpos, bool op0_is_abs) 3270 int bitpos, bool op0_is_abs)
3500 { 3271 {
3501 enum machine_mode imode; 3272 scalar_int_mode imode;
3502 int icode; 3273 enum insn_code icode;
3503 rtx sign, label; 3274 rtx sign;
3275 rtx_code_label *label;
3504 3276
3505 if (target == op1) 3277 if (target == op1)
3506 target = NULL_RTX; 3278 target = NULL_RTX;
3507 3279
3508 /* Check if the back end provides an insn that handles signbit for the 3280 /* Check if the back end provides an insn that handles signbit for the
3509 argument's mode. */ 3281 argument's mode. */
3510 icode = (int) optab_handler (signbit_optab, mode); 3282 icode = optab_handler (signbit_optab, mode);
3511 if (icode != CODE_FOR_nothing) 3283 if (icode != CODE_FOR_nothing)
3512 { 3284 {
3513 imode = insn_data[icode].operand[0].mode; 3285 imode = as_a <scalar_int_mode> (insn_data[(int) icode].operand[0].mode);
3514 sign = gen_reg_rtx (imode); 3286 sign = gen_reg_rtx (imode);
3515 emit_unop_insn (icode, sign, op1, UNKNOWN); 3287 emit_unop_insn (icode, sign, op1, UNKNOWN);
3516 } 3288 }
3517 else 3289 else
3518 { 3290 {
3519 double_int mask;
3520
3521 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD) 3291 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3522 { 3292 {
3523 imode = int_mode_for_mode (mode); 3293 if (!int_mode_for_mode (mode).exists (&imode))
3524 if (imode == BLKmode)
3525 return NULL_RTX; 3294 return NULL_RTX;
3526 op1 = gen_lowpart (imode, op1); 3295 op1 = gen_lowpart (imode, op1);
3527 } 3296 }
3528 else 3297 else
3529 { 3298 {
3536 word = bitpos / BITS_PER_WORD; 3305 word = bitpos / BITS_PER_WORD;
3537 bitpos = bitpos % BITS_PER_WORD; 3306 bitpos = bitpos % BITS_PER_WORD;
3538 op1 = operand_subword_force (op1, word, mode); 3307 op1 = operand_subword_force (op1, word, mode);
3539 } 3308 }
3540 3309
3541 mask = double_int_setbit (double_int_zero, bitpos); 3310 wide_int mask = wi::set_bit_in_zero (bitpos, GET_MODE_PRECISION (imode));
3542
3543 sign = expand_binop (imode, and_optab, op1, 3311 sign = expand_binop (imode, and_optab, op1,
3544 immed_double_int_const (mask, imode), 3312 immed_wide_int_const (mask, imode),
3545 NULL_RTX, 1, OPTAB_LIB_WIDEN); 3313 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3546 } 3314 }
3547 3315
3548 if (!op0_is_abs) 3316 if (!op0_is_abs)
3549 { 3317 {
3561 } 3329 }
3562 3330
3563 label = gen_label_rtx (); 3331 label = gen_label_rtx ();
3564 emit_cmp_and_jump_insns (sign, const0_rtx, EQ, NULL_RTX, imode, 1, label); 3332 emit_cmp_and_jump_insns (sign, const0_rtx, EQ, NULL_RTX, imode, 1, label);
3565 3333
3566 if (GET_CODE (op0) == CONST_DOUBLE) 3334 if (CONST_DOUBLE_AS_FLOAT_P (op0))
3567 op0 = simplify_unary_operation (NEG, mode, op0, mode); 3335 op0 = simplify_unary_operation (NEG, mode, op0, mode);
3568 else 3336 else
3569 op0 = expand_unop (mode, neg_optab, op0, target, 0); 3337 op0 = expand_unop (mode, neg_optab, op0, target, 0);
3570 if (op0 != target) 3338 if (op0 != target)
3571 emit_move_insn (target, op0); 3339 emit_move_insn (target, op0);
3579 /* A subroutine of expand_copysign, perform the entire copysign operation 3347 /* A subroutine of expand_copysign, perform the entire copysign operation
3580 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS 3348 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
3581 is true if op0 is known to have its sign bit clear. */ 3349 is true if op0 is known to have its sign bit clear. */
3582 3350
3583 static rtx 3351 static rtx
3584 expand_copysign_bit (enum machine_mode mode, rtx op0, rtx op1, rtx target, 3352 expand_copysign_bit (scalar_float_mode mode, rtx op0, rtx op1, rtx target,
3585 int bitpos, bool op0_is_abs) 3353 int bitpos, bool op0_is_abs)
3586 { 3354 {
3587 enum machine_mode imode; 3355 scalar_int_mode imode;
3588 double_int mask;
3589 int word, nwords, i; 3356 int word, nwords, i;
3590 rtx temp, insns; 3357 rtx temp;
3358 rtx_insn *insns;
3591 3359
3592 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD) 3360 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3593 { 3361 {
3594 imode = int_mode_for_mode (mode); 3362 if (!int_mode_for_mode (mode).exists (&imode))
3595 if (imode == BLKmode)
3596 return NULL_RTX; 3363 return NULL_RTX;
3597 word = 0; 3364 word = 0;
3598 nwords = 1; 3365 nwords = 1;
3599 } 3366 }
3600 else 3367 else
3607 word = bitpos / BITS_PER_WORD; 3374 word = bitpos / BITS_PER_WORD;
3608 bitpos = bitpos % BITS_PER_WORD; 3375 bitpos = bitpos % BITS_PER_WORD;
3609 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD; 3376 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
3610 } 3377 }
3611 3378
3612 mask = double_int_setbit (double_int_zero, bitpos); 3379 wide_int mask = wi::set_bit_in_zero (bitpos, GET_MODE_PRECISION (imode));
3613 3380
3614 if (target == 0 || target == op0 || target == op1) 3381 if (target == 0
3382 || target == op0
3383 || target == op1
3384 || (nwords > 1 && !valid_multiword_target_p (target)))
3615 target = gen_reg_rtx (mode); 3385 target = gen_reg_rtx (mode);
3616 3386
3617 if (nwords > 1) 3387 if (nwords > 1)
3618 { 3388 {
3619 start_sequence (); 3389 start_sequence ();
3626 if (i == word) 3396 if (i == word)
3627 { 3397 {
3628 if (!op0_is_abs) 3398 if (!op0_is_abs)
3629 op0_piece 3399 op0_piece
3630 = expand_binop (imode, and_optab, op0_piece, 3400 = expand_binop (imode, and_optab, op0_piece,
3631 immed_double_int_const (double_int_not (mask), 3401 immed_wide_int_const (~mask, imode),
3632 imode),
3633 NULL_RTX, 1, OPTAB_LIB_WIDEN); 3402 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3634
3635 op1 = expand_binop (imode, and_optab, 3403 op1 = expand_binop (imode, and_optab,
3636 operand_subword_force (op1, i, mode), 3404 operand_subword_force (op1, i, mode),
3637 immed_double_int_const (mask, imode), 3405 immed_wide_int_const (mask, imode),
3638 NULL_RTX, 1, OPTAB_LIB_WIDEN); 3406 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3639 3407
3640 temp = expand_binop (imode, ior_optab, op0_piece, op1, 3408 temp = expand_binop (imode, ior_optab, op0_piece, op1,
3641 targ_piece, 1, OPTAB_LIB_WIDEN); 3409 targ_piece, 1, OPTAB_LIB_WIDEN);
3642 if (temp != targ_piece) 3410 if (temp != targ_piece)
3652 emit_insn (insns); 3420 emit_insn (insns);
3653 } 3421 }
3654 else 3422 else
3655 { 3423 {
3656 op1 = expand_binop (imode, and_optab, gen_lowpart (imode, op1), 3424 op1 = expand_binop (imode, and_optab, gen_lowpart (imode, op1),
3657 immed_double_int_const (mask, imode), 3425 immed_wide_int_const (mask, imode),
3658 NULL_RTX, 1, OPTAB_LIB_WIDEN); 3426 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3659 3427
3660 op0 = gen_lowpart (imode, op0); 3428 op0 = gen_lowpart (imode, op0);
3661 if (!op0_is_abs) 3429 if (!op0_is_abs)
3662 op0 = expand_binop (imode, and_optab, op0, 3430 op0 = expand_binop (imode, and_optab, op0,
3663 immed_double_int_const (double_int_not (mask), 3431 immed_wide_int_const (~mask, imode),
3664 imode),
3665 NULL_RTX, 1, OPTAB_LIB_WIDEN); 3432 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3666 3433
3667 temp = expand_binop (imode, ior_optab, op0, op1, 3434 temp = expand_binop (imode, ior_optab, op0, op1,
3668 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN); 3435 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
3669 target = lowpart_subreg_maybe_copy (mode, temp, imode); 3436 target = lowpart_subreg_maybe_copy (mode, temp, imode);
3677 expand the operation inline. */ 3444 expand the operation inline. */
3678 3445
3679 rtx 3446 rtx
3680 expand_copysign (rtx op0, rtx op1, rtx target) 3447 expand_copysign (rtx op0, rtx op1, rtx target)
3681 { 3448 {
3682 enum machine_mode mode = GET_MODE (op0); 3449 scalar_float_mode mode;
3683 const struct real_format *fmt; 3450 const struct real_format *fmt;
3684 bool op0_is_abs; 3451 bool op0_is_abs;
3685 rtx temp; 3452 rtx temp;
3686 3453
3687 gcc_assert (SCALAR_FLOAT_MODE_P (mode)); 3454 mode = as_a <scalar_float_mode> (GET_MODE (op0));
3688 gcc_assert (GET_MODE (op1) == mode); 3455 gcc_assert (GET_MODE (op1) == mode);
3689 3456
3690 /* First try to do it with a special instruction. */ 3457 /* First try to do it with a special instruction. */
3691 temp = expand_binop (mode, copysign_optab, op0, op1, 3458 temp = expand_binop (mode, copysign_optab, op0, op1,
3692 target, 0, OPTAB_DIRECT); 3459 target, 0, OPTAB_DIRECT);
3696 fmt = REAL_MODE_FORMAT (mode); 3463 fmt = REAL_MODE_FORMAT (mode);
3697 if (fmt == NULL || !fmt->has_signed_zero) 3464 if (fmt == NULL || !fmt->has_signed_zero)
3698 return NULL_RTX; 3465 return NULL_RTX;
3699 3466
3700 op0_is_abs = false; 3467 op0_is_abs = false;
3701 if (GET_CODE (op0) == CONST_DOUBLE) 3468 if (CONST_DOUBLE_AS_FLOAT_P (op0))
3702 { 3469 {
3703 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0))) 3470 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
3704 op0 = simplify_unary_operation (ABS, mode, op0, mode); 3471 op0 = simplify_unary_operation (ABS, mode, op0, mode);
3705 op0_is_abs = true; 3472 op0_is_abs = true;
3706 } 3473 }
3707 3474
3708 if (fmt->signbit_ro >= 0 3475 if (fmt->signbit_ro >= 0
3709 && (GET_CODE (op0) == CONST_DOUBLE 3476 && (CONST_DOUBLE_AS_FLOAT_P (op0)
3710 || (optab_handler (neg_optab, mode) != CODE_FOR_nothing 3477 || (optab_handler (neg_optab, mode) != CODE_FOR_nothing
3711 && optab_handler (abs_optab, mode) != CODE_FOR_nothing))) 3478 && optab_handler (abs_optab, mode) != CODE_FOR_nothing)))
3712 { 3479 {
3713 temp = expand_copysign_absneg (mode, op0, op1, target, 3480 temp = expand_copysign_absneg (mode, op0, op1, target,
3714 fmt->signbit_ro, op0_is_abs); 3481 fmt->signbit_ro, op0_is_abs);
3729 the value that is stored into TARGET. 3496 the value that is stored into TARGET.
3730 3497
3731 Return false if expansion failed. */ 3498 Return false if expansion failed. */
3732 3499
3733 bool 3500 bool
3734 maybe_emit_unop_insn (int icode, rtx target, rtx op0, enum rtx_code code) 3501 maybe_emit_unop_insn (enum insn_code icode, rtx target, rtx op0,
3735 { 3502 enum rtx_code code)
3736 rtx temp; 3503 {
3737 enum machine_mode mode0 = insn_data[icode].operand[1].mode; 3504 struct expand_operand ops[2];
3738 rtx pat; 3505 rtx_insn *pat;
3739 rtx last = get_last_insn (); 3506
3740 3507 create_output_operand (&ops[0], target, GET_MODE (target));
3741 temp = target; 3508 create_input_operand (&ops[1], op0, GET_MODE (op0));
3742 3509 pat = maybe_gen_insn (icode, 2, ops);
3743 /* Now, if insn does not accept our operands, put them into pseudos. */
3744
3745 if (!insn_data[icode].operand[1].predicate (op0, mode0))
3746 op0 = copy_to_mode_reg (mode0, op0);
3747
3748 if (!insn_data[icode].operand[0].predicate (temp, GET_MODE (temp)))
3749 temp = gen_reg_rtx (GET_MODE (temp));
3750
3751 pat = GEN_FCN (icode) (temp, op0);
3752 if (!pat) 3510 if (!pat)
3753 { 3511 return false;
3754 delete_insns_since (last); 3512
3755 return false; 3513 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
3756 } 3514 && code != UNKNOWN)
3757 3515 add_equal_note (pat, ops[0].value, code, ops[1].value, NULL_RTX);
3758 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX && code != UNKNOWN)
3759 add_equal_note (pat, temp, code, op0, NULL_RTX);
3760 3516
3761 emit_insn (pat); 3517 emit_insn (pat);
3762 3518
3763 if (temp != target) 3519 if (ops[0].value != target)
3764 emit_move_insn (target, temp); 3520 emit_move_insn (target, ops[0].value);
3765 return true; 3521 return true;
3766 } 3522 }
3767 /* Generate an instruction whose insn-code is INSN_CODE, 3523 /* Generate an instruction whose insn-code is INSN_CODE,
3768 with two operands: an output TARGET and an input OP0. 3524 with two operands: an output TARGET and an input OP0.
3769 TARGET *must* be nonzero, and the output is always stored there. 3525 TARGET *must* be nonzero, and the output is always stored there.
3770 CODE is an rtx code such that (CODE OP0) is an rtx that describes 3526 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3771 the value that is stored into TARGET. */ 3527 the value that is stored into TARGET. */
3772 3528
3773 void 3529 void
3774 emit_unop_insn (int icode, rtx target, rtx op0, enum rtx_code code) 3530 emit_unop_insn (enum insn_code icode, rtx target, rtx op0, enum rtx_code code)
3775 { 3531 {
3776 bool ok = maybe_emit_unop_insn (icode, target, op0, code); 3532 bool ok = maybe_emit_unop_insn (icode, target, op0, code);
3777 gcc_assert (ok); 3533 gcc_assert (ok);
3778 } 3534 }
3779 3535
3780 struct no_conflict_data 3536 struct no_conflict_data
3781 { 3537 {
3782 rtx target, first, insn; 3538 rtx target;
3539 rtx_insn *first, *insn;
3783 bool must_stay; 3540 bool must_stay;
3784 }; 3541 };
3785 3542
3786 /* Called via note_stores by emit_libcall_block. Set P->must_stay if 3543 /* Called via note_stores by emit_libcall_block. Set P->must_stay if
3787 the currently examined clobber / store has to stay in the list of 3544 the currently examined clobber / store has to stay in the list of
3828 loading constants into registers; doing so allows them to be safely cse'ed 3585 loading constants into registers; doing so allows them to be safely cse'ed
3829 between blocks. Then we emit all the other insns in the block, followed by 3586 between blocks. Then we emit all the other insns in the block, followed by
3830 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL 3587 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3831 note with an operand of EQUIV. */ 3588 note with an operand of EQUIV. */
3832 3589
3833 void 3590 static void
3834 emit_libcall_block (rtx insns, rtx target, rtx result, rtx equiv) 3591 emit_libcall_block_1 (rtx_insn *insns, rtx target, rtx result, rtx equiv,
3592 bool equiv_may_trap)
3835 { 3593 {
3836 rtx final_dest = target; 3594 rtx final_dest = target;
3837 rtx next, last, insn; 3595 rtx_insn *next, *last, *insn;
3838 3596
3839 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn 3597 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3840 into a MEM later. Protect the libcall block from this change. */ 3598 into a MEM later. Protect the libcall block from this change. */
3841 if (! REG_P (target) || REG_USERVAR_P (target)) 3599 if (! REG_P (target) || REG_USERVAR_P (target))
3842 target = gen_reg_rtx (GET_MODE (target)); 3600 target = gen_reg_rtx (GET_MODE (target));
3843 3601
3844 /* If we're using non-call exceptions, a libcall corresponding to an 3602 /* If we're using non-call exceptions, a libcall corresponding to an
3845 operation that may trap may also trap. */ 3603 operation that may trap may also trap. */
3846 /* ??? See the comment in front of make_reg_eh_region_note. */ 3604 /* ??? See the comment in front of make_reg_eh_region_note. */
3847 if (cfun->can_throw_non_call_exceptions && may_trap_p (equiv)) 3605 if (cfun->can_throw_non_call_exceptions
3606 && (equiv_may_trap || may_trap_p (equiv)))
3848 { 3607 {
3849 for (insn = insns; insn; insn = NEXT_INSN (insn)) 3608 for (insn = insns; insn; insn = NEXT_INSN (insn))
3850 if (CALL_P (insn)) 3609 if (CALL_P (insn))
3851 { 3610 {
3852 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX); 3611 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3892 data.must_stay = 0; 3651 data.must_stay = 0;
3893 note_stores (PATTERN (insn), no_conflict_move_test, &data); 3652 note_stores (PATTERN (insn), no_conflict_move_test, &data);
3894 if (! data.must_stay) 3653 if (! data.must_stay)
3895 { 3654 {
3896 if (PREV_INSN (insn)) 3655 if (PREV_INSN (insn))
3897 NEXT_INSN (PREV_INSN (insn)) = next; 3656 SET_NEXT_INSN (PREV_INSN (insn)) = next;
3898 else 3657 else
3899 insns = next; 3658 insns = next;
3900 3659
3901 if (next) 3660 if (next)
3902 PREV_INSN (next) = PREV_INSN (insn); 3661 SET_PREV_INSN (next) = PREV_INSN (insn);
3903 3662
3904 add_insn (insn); 3663 add_insn (insn);
3905 } 3664 }
3906 } 3665 }
3907 3666
3918 3677
3919 add_insn (insn); 3678 add_insn (insn);
3920 } 3679 }
3921 3680
3922 last = emit_move_insn (target, result); 3681 last = emit_move_insn (target, result);
3923 if (optab_handler (mov_optab, GET_MODE (target)) != CODE_FOR_nothing) 3682 if (equiv)
3924 set_unique_reg_note (last, REG_EQUAL, copy_rtx (equiv)); 3683 set_dst_reg_note (last, REG_EQUAL, copy_rtx (equiv), target);
3925 3684
3926 if (final_dest != target) 3685 if (final_dest != target)
3927 emit_move_insn (final_dest, target); 3686 emit_move_insn (final_dest, target);
3687 }
3688
3689 void
3690 emit_libcall_block (rtx_insn *insns, rtx target, rtx result, rtx equiv)
3691 {
3692 emit_libcall_block_1 (insns, target, result, equiv, false);
3928 } 3693 }
3929 3694
3930 /* Nonzero if we can perform a comparison of mode MODE straightforwardly. 3695 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3931 PURPOSE describes how this comparison will be used. CODE is the rtx 3696 PURPOSE describes how this comparison will be used. CODE is the rtx
3932 comparison code we will be using. 3697 comparison code we will be using.
3934 ??? Actually, CODE is slightly weaker than that. A target is still 3699 ??? Actually, CODE is slightly weaker than that. A target is still
3935 required to implement all of the normal bcc operations, but not 3700 required to implement all of the normal bcc operations, but not
3936 required to implement all (or any) of the unordered bcc operations. */ 3701 required to implement all (or any) of the unordered bcc operations. */
3937 3702
3938 int 3703 int
3939 can_compare_p (enum rtx_code code, enum machine_mode mode, 3704 can_compare_p (enum rtx_code code, machine_mode mode,
3940 enum can_compare_purpose purpose) 3705 enum can_compare_purpose purpose)
3941 { 3706 {
3942 rtx test; 3707 rtx test;
3943 test = gen_rtx_fmt_ee (code, mode, const0_rtx, const0_rtx); 3708 test = gen_rtx_fmt_ee (code, mode, const0_rtx, const0_rtx);
3944 do 3709 do
3945 { 3710 {
3946 int icode; 3711 enum insn_code icode;
3947 3712
3948 if (purpose == ccp_jump 3713 if (purpose == ccp_jump
3949 && (icode = optab_handler (cbranch_optab, mode)) != CODE_FOR_nothing 3714 && (icode = optab_handler (cbranch_optab, mode)) != CODE_FOR_nothing
3950 && insn_data[icode].operand[0].predicate (test, mode)) 3715 && insn_operand_matches (icode, 0, test))
3951 return 1; 3716 return 1;
3952 if (purpose == ccp_store_flag 3717 if (purpose == ccp_store_flag
3953 && (icode = optab_handler (cstore_optab, mode)) != CODE_FOR_nothing 3718 && (icode = optab_handler (cstore_optab, mode)) != CODE_FOR_nothing
3954 && insn_data[icode].operand[1].predicate (test, mode)) 3719 && insn_operand_matches (icode, 1, test))
3955 return 1; 3720 return 1;
3956 if (purpose == ccp_cmov 3721 if (purpose == ccp_cmov
3957 && optab_handler (cmov_optab, mode) != CODE_FOR_nothing) 3722 && optab_handler (cmov_optab, mode) != CODE_FOR_nothing)
3958 return 1; 3723 return 1;
3959 3724
3960 mode = GET_MODE_WIDER_MODE (mode); 3725 mode = GET_MODE_WIDER_MODE (mode).else_void ();
3961 PUT_MODE (test, mode); 3726 PUT_MODE (test, mode);
3962 } 3727 }
3963 while (mode != VOIDmode); 3728 while (mode != VOIDmode);
3964 3729
3965 return 0; 3730 return 0;
3966 } 3731 }
3967 3732
3968 /* This function is called when we are going to emit a compare instruction that 3733 /* This function is called when we are going to emit a compare instruction that
3969 compares the values found in *PX and *PY, using the rtl operator COMPARISON. 3734 compares the values found in X and Y, using the rtl operator COMPARISON.
3735
3736 If they have mode BLKmode, then SIZE specifies the size of both operands.
3737
3738 UNSIGNEDP nonzero says that the operands are unsigned;
3739 this matters if they need to be widened (as given by METHODS).
3740
3741 *PTEST is where the resulting comparison RTX is returned or NULL_RTX
3742 if we failed to produce one.
3970 3743
3971 *PMODE is the mode of the inputs (in case they are const_int). 3744 *PMODE is the mode of the inputs (in case they are const_int).
3972 *PUNSIGNEDP nonzero says that the operands are unsigned;
3973 this matters if they need to be widened (as given by METHODS).
3974
3975 If they have mode BLKmode, then SIZE specifies the size of both operands.
3976 3745
3977 This function performs all the setup necessary so that the caller only has 3746 This function performs all the setup necessary so that the caller only has
3978 to emit a single comparison insn. This setup can involve doing a BLKmode 3747 to emit a single comparison insn. This setup can involve doing a BLKmode
3979 comparison or emitting a library call to perform the comparison if no insn 3748 comparison or emitting a library call to perform the comparison if no insn
3980 is available to handle it. 3749 is available to handle it.
3983 comparisons must have already been folded. */ 3752 comparisons must have already been folded. */
3984 3753
3985 static void 3754 static void
3986 prepare_cmp_insn (rtx x, rtx y, enum rtx_code comparison, rtx size, 3755 prepare_cmp_insn (rtx x, rtx y, enum rtx_code comparison, rtx size,
3987 int unsignedp, enum optab_methods methods, 3756 int unsignedp, enum optab_methods methods,
3988 rtx *ptest, enum machine_mode *pmode) 3757 rtx *ptest, machine_mode *pmode)
3989 { 3758 {
3990 enum machine_mode mode = *pmode; 3759 machine_mode mode = *pmode;
3991 rtx libfunc, test; 3760 rtx libfunc, test;
3992 enum machine_mode cmp_mode; 3761 machine_mode cmp_mode;
3993 enum mode_class mclass; 3762 enum mode_class mclass;
3994 3763
3995 /* The other methods are not needed. */ 3764 /* The other methods are not needed. */
3996 gcc_assert (methods == OPTAB_DIRECT || methods == OPTAB_WIDEN 3765 gcc_assert (methods == OPTAB_DIRECT || methods == OPTAB_WIDEN
3997 || methods == OPTAB_LIB_WIDEN); 3766 || methods == OPTAB_LIB_WIDEN);
3998 3767
3999 /* If we are optimizing, force expensive constants into a register. */ 3768 /* If we are optimizing, force expensive constants into a register. */
4000 if (CONSTANT_P (x) && optimize 3769 if (CONSTANT_P (x) && optimize
4001 && (rtx_cost (x, COMPARE, optimize_insn_for_speed_p ()) 3770 && (rtx_cost (x, mode, COMPARE, 0, optimize_insn_for_speed_p ())
4002 > COSTS_N_INSNS (1))) 3771 > COSTS_N_INSNS (1)))
4003 x = force_reg (mode, x); 3772 x = force_reg (mode, x);
4004 3773
4005 if (CONSTANT_P (y) && optimize 3774 if (CONSTANT_P (y) && optimize
4006 && (rtx_cost (y, COMPARE, optimize_insn_for_speed_p ()) 3775 && (rtx_cost (y, mode, COMPARE, 1, optimize_insn_for_speed_p ())
4007 > COSTS_N_INSNS (1))) 3776 > COSTS_N_INSNS (1)))
4008 y = force_reg (mode, y); 3777 y = force_reg (mode, y);
4009 3778
4010 #ifdef HAVE_cc0 3779 #if HAVE_cc0
4011 /* Make sure if we have a canonical comparison. The RTL 3780 /* Make sure if we have a canonical comparison. The RTL
4012 documentation states that canonical comparisons are required only 3781 documentation states that canonical comparisons are required only
4013 for targets which have cc0. */ 3782 for targets which have cc0. */
4014 gcc_assert (!CONSTANT_P (x) || CONSTANT_P (y)); 3783 gcc_assert (!CONSTANT_P (x) || CONSTANT_P (y));
4015 #endif 3784 #endif
4022 3791
4023 /* Handle all BLKmode compares. */ 3792 /* Handle all BLKmode compares. */
4024 3793
4025 if (mode == BLKmode) 3794 if (mode == BLKmode)
4026 { 3795 {
4027 enum machine_mode result_mode; 3796 machine_mode result_mode;
4028 enum insn_code cmp_code; 3797 enum insn_code cmp_code;
4029 tree length_type;
4030 rtx libfunc;
4031 rtx result; 3798 rtx result;
4032 rtx opalign 3799 rtx opalign
4033 = GEN_INT (MIN (MEM_ALIGN (x), MEM_ALIGN (y)) / BITS_PER_UNIT); 3800 = GEN_INT (MIN (MEM_ALIGN (x), MEM_ALIGN (y)) / BITS_PER_UNIT);
4034 3801
4035 gcc_assert (size); 3802 gcc_assert (size);
4036 3803
4037 /* Try to use a memory block compare insn - either cmpstr 3804 /* Try to use a memory block compare insn - either cmpstr
4038 or cmpmem will do. */ 3805 or cmpmem will do. */
4039 for (cmp_mode = GET_CLASS_NARROWEST_MODE (MODE_INT); 3806 opt_scalar_int_mode cmp_mode_iter;
4040 cmp_mode != VOIDmode; 3807 FOR_EACH_MODE_IN_CLASS (cmp_mode_iter, MODE_INT)
4041 cmp_mode = GET_MODE_WIDER_MODE (cmp_mode)) 3808 {
4042 { 3809 scalar_int_mode cmp_mode = cmp_mode_iter.require ();
4043 cmp_code = direct_optab_handler (cmpmem_optab, cmp_mode); 3810 cmp_code = direct_optab_handler (cmpmem_optab, cmp_mode);
4044 if (cmp_code == CODE_FOR_nothing) 3811 if (cmp_code == CODE_FOR_nothing)
4045 cmp_code = direct_optab_handler (cmpstr_optab, cmp_mode); 3812 cmp_code = direct_optab_handler (cmpstr_optab, cmp_mode);
4046 if (cmp_code == CODE_FOR_nothing) 3813 if (cmp_code == CODE_FOR_nothing)
4047 cmp_code = direct_optab_handler (cmpstrn_optab, cmp_mode); 3814 cmp_code = direct_optab_handler (cmpstrn_optab, cmp_mode);
4048 if (cmp_code == CODE_FOR_nothing) 3815 if (cmp_code == CODE_FOR_nothing)
4049 continue; 3816 continue;
4050 3817
4051 /* Must make sure the size fits the insn's mode. */ 3818 /* Must make sure the size fits the insn's mode. */
4052 if ((CONST_INT_P (size) 3819 if (CONST_INT_P (size)
4053 && INTVAL (size) >= (1 << GET_MODE_BITSIZE (cmp_mode))) 3820 ? INTVAL (size) >= (1 << GET_MODE_BITSIZE (cmp_mode))
4054 || (GET_MODE_BITSIZE (GET_MODE (size)) 3821 : (GET_MODE_BITSIZE (as_a <scalar_int_mode> (GET_MODE (size)))
4055 > GET_MODE_BITSIZE (cmp_mode))) 3822 > GET_MODE_BITSIZE (cmp_mode)))
4056 continue; 3823 continue;
4057 3824
4058 result_mode = insn_data[cmp_code].operand[0].mode; 3825 result_mode = insn_data[cmp_code].operand[0].mode;
4059 result = gen_reg_rtx (result_mode); 3826 result = gen_reg_rtx (result_mode);
4060 size = convert_to_mode (cmp_mode, size, 1); 3827 size = convert_to_mode (cmp_mode, size, 1);
4066 } 3833 }
4067 3834
4068 if (methods != OPTAB_LIB && methods != OPTAB_LIB_WIDEN) 3835 if (methods != OPTAB_LIB && methods != OPTAB_LIB_WIDEN)
4069 goto fail; 3836 goto fail;
4070 3837
4071 /* Otherwise call a library function, memcmp. */ 3838 /* Otherwise call a library function. */
4072 libfunc = memcmp_libfunc; 3839 result = emit_block_comp_via_libcall (XEXP (x, 0), XEXP (y, 0), size);
4073 length_type = sizetype; 3840
4074 result_mode = TYPE_MODE (integer_type_node); 3841 x = result;
4075 cmp_mode = TYPE_MODE (length_type); 3842 y = const0_rtx;
4076 size = convert_to_mode (TYPE_MODE (length_type), size, 3843 mode = TYPE_MODE (integer_type_node);
4077 TYPE_UNSIGNED (length_type)); 3844 methods = OPTAB_LIB_WIDEN;
4078 3845 unsignedp = false;
4079 result = emit_library_call_value (libfunc, 0, LCT_PURE,
4080 result_mode, 3,
4081 XEXP (x, 0), Pmode,
4082 XEXP (y, 0), Pmode,
4083 size, cmp_mode);
4084
4085 *ptest = gen_rtx_fmt_ee (comparison, VOIDmode, result, const0_rtx);
4086 *pmode = result_mode;
4087 return;
4088 } 3846 }
4089 3847
4090 /* Don't allow operands to the compare to trap, as that can put the 3848 /* Don't allow operands to the compare to trap, as that can put the
4091 compare and branch in different basic blocks. */ 3849 compare and branch in different basic blocks. */
4092 if (cfun->can_throw_non_call_exceptions) 3850 if (cfun->can_throw_non_call_exceptions)
4093 { 3851 {
4094 if (may_trap_p (x)) 3852 if (may_trap_p (x))
4095 x = force_reg (mode, x); 3853 x = copy_to_reg (x);
4096 if (may_trap_p (y)) 3854 if (may_trap_p (y))
4097 y = force_reg (mode, y); 3855 y = copy_to_reg (y);
4098 } 3856 }
4099 3857
4100 if (GET_MODE_CLASS (mode) == MODE_CC) 3858 if (GET_MODE_CLASS (mode) == MODE_CC)
4101 { 3859 {
4102 gcc_assert (can_compare_p (comparison, CCmode, ccp_jump)); 3860 enum insn_code icode = optab_handler (cbranch_optab, CCmode);
4103 *ptest = gen_rtx_fmt_ee (comparison, VOIDmode, x, y); 3861 test = gen_rtx_fmt_ee (comparison, VOIDmode, x, y);
3862 gcc_assert (icode != CODE_FOR_nothing
3863 && insn_operand_matches (icode, 0, test));
3864 *ptest = test;
4104 return; 3865 return;
4105 } 3866 }
4106 3867
4107 mclass = GET_MODE_CLASS (mode); 3868 mclass = GET_MODE_CLASS (mode);
4108 test = gen_rtx_fmt_ee (comparison, VOIDmode, x, y); 3869 test = gen_rtx_fmt_ee (comparison, VOIDmode, x, y);
4109 cmp_mode = mode; 3870 FOR_EACH_MODE_FROM (cmp_mode, mode)
4110 do 3871 {
4111 {
4112 enum insn_code icode; 3872 enum insn_code icode;
4113 icode = optab_handler (cbranch_optab, cmp_mode); 3873 icode = optab_handler (cbranch_optab, cmp_mode);
4114 if (icode != CODE_FOR_nothing 3874 if (icode != CODE_FOR_nothing
4115 && insn_data[icode].operand[0].predicate (test, VOIDmode)) 3875 && insn_operand_matches (icode, 0, test))
4116 { 3876 {
4117 rtx last = get_last_insn (); 3877 rtx_insn *last = get_last_insn ();
4118 rtx op0 = prepare_operand (icode, x, 1, mode, cmp_mode, unsignedp); 3878 rtx op0 = prepare_operand (icode, x, 1, mode, cmp_mode, unsignedp);
4119 rtx op1 = prepare_operand (icode, y, 2, mode, cmp_mode, unsignedp); 3879 rtx op1 = prepare_operand (icode, y, 2, mode, cmp_mode, unsignedp);
4120 if (op0 && op1 3880 if (op0 && op1
4121 && insn_data[icode].operand[1].predicate 3881 && insn_operand_matches (icode, 1, op0)
4122 (op0, insn_data[icode].operand[1].mode) 3882 && insn_operand_matches (icode, 2, op1))
4123 && insn_data[icode].operand[2].predicate
4124 (op1, insn_data[icode].operand[2].mode))
4125 { 3883 {
4126 XEXP (test, 0) = op0; 3884 XEXP (test, 0) = op0;
4127 XEXP (test, 1) = op1; 3885 XEXP (test, 1) = op1;
4128 *ptest = test; 3886 *ptest = test;
4129 *pmode = cmp_mode; 3887 *pmode = cmp_mode;
4132 delete_insns_since (last); 3890 delete_insns_since (last);
4133 } 3891 }
4134 3892
4135 if (methods == OPTAB_DIRECT || !CLASS_HAS_WIDER_MODES_P (mclass)) 3893 if (methods == OPTAB_DIRECT || !CLASS_HAS_WIDER_MODES_P (mclass))
4136 break; 3894 break;
4137 cmp_mode = GET_MODE_WIDER_MODE (cmp_mode); 3895 }
4138 }
4139 while (cmp_mode != VOIDmode);
4140 3896
4141 if (methods != OPTAB_LIB_WIDEN) 3897 if (methods != OPTAB_LIB_WIDEN)
4142 goto fail; 3898 goto fail;
4143 3899
4144 if (!SCALAR_FLOAT_MODE_P (mode)) 3900 if (!SCALAR_FLOAT_MODE_P (mode))
4145 { 3901 {
4146 rtx result; 3902 rtx result;
3903 machine_mode ret_mode;
4147 3904
4148 /* Handle a libcall just for the mode we are using. */ 3905 /* Handle a libcall just for the mode we are using. */
4149 libfunc = optab_libfunc (cmp_optab, mode); 3906 libfunc = optab_libfunc (cmp_optab, mode);
4150 gcc_assert (libfunc); 3907 gcc_assert (libfunc);
4151 3908
4156 rtx ulibfunc = optab_libfunc (ucmp_optab, mode); 3913 rtx ulibfunc = optab_libfunc (ucmp_optab, mode);
4157 if (ulibfunc) 3914 if (ulibfunc)
4158 libfunc = ulibfunc; 3915 libfunc = ulibfunc;
4159 } 3916 }
4160 3917
3918 ret_mode = targetm.libgcc_cmp_return_mode ();
4161 result = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST, 3919 result = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4162 targetm.libgcc_cmp_return_mode (), 3920 ret_mode, x, mode, y, mode);
4163 2, x, mode, y, mode);
4164 3921
4165 /* There are two kinds of comparison routines. Biased routines 3922 /* There are two kinds of comparison routines. Biased routines
4166 return 0/1/2, and unbiased routines return -1/0/1. Other parts 3923 return 0/1/2, and unbiased routines return -1/0/1. Other parts
4167 of gcc expect that the comparison operation is equivalent 3924 of gcc expect that the comparison operation is equivalent
4168 to the modified comparison. For signed comparisons compare the 3925 to the modified comparison. For signed comparisons compare the
4169 result against 1 in the biased case, and zero in the unbiased 3926 result against 1 in the biased case, and zero in the unbiased
4170 case. For unsigned comparisons always compare against 1 after 3927 case. For unsigned comparisons always compare against 1 after
4171 biasing the unbiased result by adding 1. This gives us a way to 3928 biasing the unbiased result by adding 1. This gives us a way to
4172 represent LTU. */ 3929 represent LTU.
3930 The comparisons in the fixed-point helper library are always
3931 biased. */
4173 x = result; 3932 x = result;
4174 y = const1_rtx; 3933 y = const1_rtx;
4175 3934
4176 if (!TARGET_LIB_INT_CMP_BIASED) 3935 if (!TARGET_LIB_INT_CMP_BIASED && !ALL_FIXED_POINT_MODE_P (mode))
4177 { 3936 {
4178 if (unsignedp) 3937 if (unsignedp)
4179 x = plus_constant (result, 1); 3938 x = plus_constant (ret_mode, result, 1);
4180 else 3939 else
4181 y = const0_rtx; 3940 y = const0_rtx;
4182 } 3941 }
4183 3942
4184 *pmode = word_mode; 3943 *pmode = ret_mode;
4185 prepare_cmp_insn (x, y, comparison, NULL_RTX, unsignedp, methods, 3944 prepare_cmp_insn (x, y, comparison, NULL_RTX, unsignedp, methods,
4186 ptest, pmode); 3945 ptest, pmode);
4187 } 3946 }
4188 else 3947 else
4189 prepare_float_lib_cmp (x, y, comparison, ptest, pmode); 3948 prepare_float_lib_cmp (x, y, comparison, ptest, pmode);
4198 to be used for operand OPNUM of the insn, is converted from mode MODE to 3957 to be used for operand OPNUM of the insn, is converted from mode MODE to
4199 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and 3958 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
4200 that it is accepted by the operand predicate. Return the new value. */ 3959 that it is accepted by the operand predicate. Return the new value. */
4201 3960
4202 rtx 3961 rtx
4203 prepare_operand (int icode, rtx x, int opnum, enum machine_mode mode, 3962 prepare_operand (enum insn_code icode, rtx x, int opnum, machine_mode mode,
4204 enum machine_mode wider_mode, int unsignedp) 3963 machine_mode wider_mode, int unsignedp)
4205 { 3964 {
4206 if (mode != wider_mode) 3965 if (mode != wider_mode)
4207 x = convert_modes (wider_mode, mode, x, unsignedp); 3966 x = convert_modes (wider_mode, mode, x, unsignedp);
4208 3967
4209 if (!insn_data[icode].operand[opnum].predicate 3968 if (!insn_operand_matches (icode, opnum, x))
4210 (x, insn_data[icode].operand[opnum].mode)) 3969 {
4211 { 3970 machine_mode op_mode = insn_data[(int) icode].operand[opnum].mode;
4212 if (reload_completed) 3971 if (reload_completed)
4213 return NULL_RTX; 3972 return NULL_RTX;
4214 x = copy_to_mode_reg (insn_data[icode].operand[opnum].mode, x); 3973 if (GET_MODE (x) != op_mode && GET_MODE (x) != VOIDmode)
3974 return NULL_RTX;
3975 x = copy_to_mode_reg (op_mode, x);
4215 } 3976 }
4216 3977
4217 return x; 3978 return x;
4218 } 3979 }
4219 3980
4220 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know 3981 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
4221 we can do the branch. */ 3982 we can do the branch. */
4222 3983
4223 static void 3984 static void
4224 emit_cmp_and_jump_insn_1 (rtx test, enum machine_mode mode, rtx label) 3985 emit_cmp_and_jump_insn_1 (rtx test, machine_mode mode, rtx label,
4225 { 3986 profile_probability prob)
4226 enum machine_mode optab_mode; 3987 {
3988 machine_mode optab_mode;
4227 enum mode_class mclass; 3989 enum mode_class mclass;
4228 enum insn_code icode; 3990 enum insn_code icode;
3991 rtx_insn *insn;
4229 3992
4230 mclass = GET_MODE_CLASS (mode); 3993 mclass = GET_MODE_CLASS (mode);
4231 optab_mode = (mclass == MODE_CC) ? CCmode : mode; 3994 optab_mode = (mclass == MODE_CC) ? CCmode : mode;
4232 icode = optab_handler (cbranch_optab, optab_mode); 3995 icode = optab_handler (cbranch_optab, optab_mode);
4233 3996
4234 gcc_assert (icode != CODE_FOR_nothing); 3997 gcc_assert (icode != CODE_FOR_nothing);
4235 gcc_assert (insn_data[icode].operand[0].predicate (test, VOIDmode)); 3998 gcc_assert (insn_operand_matches (icode, 0, test));
4236 emit_jump_insn (GEN_FCN (icode) (test, XEXP (test, 0), XEXP (test, 1), label)); 3999 insn = emit_jump_insn (GEN_FCN (icode) (test, XEXP (test, 0),
4000 XEXP (test, 1), label));
4001 if (prob.initialized_p ()
4002 && profile_status_for_fn (cfun) != PROFILE_ABSENT
4003 && insn
4004 && JUMP_P (insn)
4005 && any_condjump_p (insn)
4006 && !find_reg_note (insn, REG_BR_PROB, 0))
4007 add_reg_br_prob_note (insn, prob);
4237 } 4008 }
4238 4009
4239 /* Generate code to compare X with Y so that the condition codes are 4010 /* Generate code to compare X with Y so that the condition codes are
4240 set and to jump to LABEL if the condition is true. If X is a 4011 set and to jump to LABEL if the condition is true. If X is a
4241 constant and Y is not a constant, then the comparison is swapped to 4012 constant and Y is not a constant, then the comparison is swapped to
4249 4020
4250 MODE is the mode of the inputs (in case they are const_int). 4021 MODE is the mode of the inputs (in case they are const_int).
4251 4022
4252 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). 4023 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.).
4253 It will be potentially converted into an unsigned variant based on 4024 It will be potentially converted into an unsigned variant based on
4254 UNSIGNEDP to select a proper jump instruction. */ 4025 UNSIGNEDP to select a proper jump instruction.
4026
4027 PROB is the probability of jumping to LABEL. */
4255 4028
4256 void 4029 void
4257 emit_cmp_and_jump_insns (rtx x, rtx y, enum rtx_code comparison, rtx size, 4030 emit_cmp_and_jump_insns (rtx x, rtx y, enum rtx_code comparison, rtx size,
4258 enum machine_mode mode, int unsignedp, rtx label) 4031 machine_mode mode, int unsignedp, rtx label,
4032 profile_probability prob)
4259 { 4033 {
4260 rtx op0 = x, op1 = y; 4034 rtx op0 = x, op1 = y;
4261 rtx test; 4035 rtx test;
4262 4036
4263 /* Swap operands and condition to ensure canonical RTL. */ 4037 /* Swap operands and condition to ensure canonical RTL. */
4277 if (unsignedp) 4051 if (unsignedp)
4278 comparison = unsigned_condition (comparison); 4052 comparison = unsigned_condition (comparison);
4279 4053
4280 prepare_cmp_insn (op0, op1, comparison, size, unsignedp, OPTAB_LIB_WIDEN, 4054 prepare_cmp_insn (op0, op1, comparison, size, unsignedp, OPTAB_LIB_WIDEN,
4281 &test, &mode); 4055 &test, &mode);
4282 emit_cmp_and_jump_insn_1 (test, mode, label); 4056 emit_cmp_and_jump_insn_1 (test, mode, label, prob);
4283 } 4057 }
4284 4058
4285 4059
4286 /* Emit a library call comparison between floating point X and Y. 4060 /* Emit a library call comparison between floating point X and Y.
4287 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */ 4061 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
4288 4062
4289 static void 4063 static void
4290 prepare_float_lib_cmp (rtx x, rtx y, enum rtx_code comparison, 4064 prepare_float_lib_cmp (rtx x, rtx y, enum rtx_code comparison,
4291 rtx *ptest, enum machine_mode *pmode) 4065 rtx *ptest, machine_mode *pmode)
4292 { 4066 {
4293 enum rtx_code swapped = swap_condition (comparison); 4067 enum rtx_code swapped = swap_condition (comparison);
4294 enum rtx_code reversed = reverse_condition_maybe_unordered (comparison); 4068 enum rtx_code reversed = reverse_condition_maybe_unordered (comparison);
4295 enum machine_mode orig_mode = GET_MODE (x); 4069 machine_mode orig_mode = GET_MODE (x);
4296 enum machine_mode mode, cmp_mode; 4070 machine_mode mode;
4297 rtx true_rtx, false_rtx; 4071 rtx true_rtx, false_rtx;
4298 rtx value, target, insns, equiv; 4072 rtx value, target, equiv;
4073 rtx_insn *insns;
4299 rtx libfunc = 0; 4074 rtx libfunc = 0;
4300 bool reversed_p = false; 4075 bool reversed_p = false;
4301 cmp_mode = targetm.libgcc_cmp_return_mode (); 4076 scalar_int_mode cmp_mode = targetm.libgcc_cmp_return_mode ();
4302 4077
4303 for (mode = orig_mode; 4078 FOR_EACH_MODE_FROM (mode, orig_mode)
4304 mode != VOIDmode; 4079 {
4305 mode = GET_MODE_WIDER_MODE (mode)) 4080 if (code_to_optab (comparison)
4306 { 4081 && (libfunc = optab_libfunc (code_to_optab (comparison), mode)))
4307 if (code_to_optab[comparison]
4308 && (libfunc = optab_libfunc (code_to_optab[comparison], mode)))
4309 break; 4082 break;
4310 4083
4311 if (code_to_optab[swapped] 4084 if (code_to_optab (swapped)
4312 && (libfunc = optab_libfunc (code_to_optab[swapped], mode))) 4085 && (libfunc = optab_libfunc (code_to_optab (swapped), mode)))
4313 { 4086 {
4314 rtx tmp; 4087 std::swap (x, y);
4315 tmp = x; x = y; y = tmp;
4316 comparison = swapped; 4088 comparison = swapped;
4317 break; 4089 break;
4318 } 4090 }
4319 4091
4320 if (code_to_optab[reversed] 4092 if (code_to_optab (reversed)
4321 && (libfunc = optab_libfunc (code_to_optab[reversed], mode))) 4093 && (libfunc = optab_libfunc (code_to_optab (reversed), mode)))
4322 { 4094 {
4323 comparison = reversed; 4095 comparison = reversed;
4324 reversed_p = true; 4096 reversed_p = true;
4325 break; 4097 break;
4326 } 4098 }
4397 equiv, true_rtx, false_rtx); 4169 equiv, true_rtx, false_rtx);
4398 } 4170 }
4399 4171
4400 start_sequence (); 4172 start_sequence ();
4401 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST, 4173 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4402 cmp_mode, 2, x, mode, y, mode); 4174 cmp_mode, x, mode, y, mode);
4403 insns = get_insns (); 4175 insns = get_insns ();
4404 end_sequence (); 4176 end_sequence ();
4405 4177
4406 target = gen_reg_rtx (cmp_mode); 4178 target = gen_reg_rtx (cmp_mode);
4407 emit_libcall_block (insns, target, value, equiv); 4179 emit_libcall_block (insns, target, value, equiv);
4419 /* Generate code to indirectly jump to a location given in the rtx LOC. */ 4191 /* Generate code to indirectly jump to a location given in the rtx LOC. */
4420 4192
4421 void 4193 void
4422 emit_indirect_jump (rtx loc) 4194 emit_indirect_jump (rtx loc)
4423 { 4195 {
4424 if (!insn_data[(int) CODE_FOR_indirect_jump].operand[0].predicate 4196 if (!targetm.have_indirect_jump ())
4425 (loc, Pmode)) 4197 sorry ("indirect jumps are not available on this target");
4426 loc = copy_to_mode_reg (Pmode, loc); 4198 else
4427 4199 {
4428 emit_jump_insn (gen_indirect_jump (loc)); 4200 struct expand_operand ops[1];
4429 emit_barrier (); 4201 create_address_operand (&ops[0], loc);
4202 expand_jump_insn (targetm.code_for_indirect_jump, 1, ops);
4203 emit_barrier ();
4204 }
4430 } 4205 }
4431 4206
4432 #ifdef HAVE_conditional_move
4433 4207
4434 /* Emit a conditional move instruction if the machine supports one for that 4208 /* Emit a conditional move instruction if the machine supports one for that
4435 condition and machine mode. 4209 condition and machine mode.
4436 4210
4437 OP0 and OP1 are the operands that should be compared using CODE. CMODE is 4211 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4445 The result is either TARGET (perhaps modified) or NULL_RTX if the operation 4219 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4446 is not supported. */ 4220 is not supported. */
4447 4221
4448 rtx 4222 rtx
4449 emit_conditional_move (rtx target, enum rtx_code code, rtx op0, rtx op1, 4223 emit_conditional_move (rtx target, enum rtx_code code, rtx op0, rtx op1,
4450 enum machine_mode cmode, rtx op2, rtx op3, 4224 machine_mode cmode, rtx op2, rtx op3,
4451 enum machine_mode mode, int unsignedp) 4225 machine_mode mode, int unsignedp)
4452 { 4226 {
4453 rtx tem, subtarget, comparison, insn; 4227 rtx comparison;
4228 rtx_insn *last;
4454 enum insn_code icode; 4229 enum insn_code icode;
4455 enum rtx_code reversed; 4230 enum rtx_code reversed;
4456 4231
4232 /* If the two source operands are identical, that's just a move. */
4233
4234 if (rtx_equal_p (op2, op3))
4235 {
4236 if (!target)
4237 target = gen_reg_rtx (mode);
4238
4239 emit_move_insn (target, op3);
4240 return target;
4241 }
4242
4457 /* If one operand is constant, make it the second one. Only do this 4243 /* If one operand is constant, make it the second one. Only do this
4458 if the other operand is not constant as well. */ 4244 if the other operand is not constant as well. */
4459 4245
4460 if (swap_commutative_operands_p (op0, op1)) 4246 if (swap_commutative_operands_p (op0, op1))
4461 { 4247 {
4462 tem = op0; 4248 std::swap (op0, op1);
4463 op0 = op1;
4464 op1 = tem;
4465 code = swap_condition (code); 4249 code = swap_condition (code);
4466 } 4250 }
4467 4251
4468 /* get_condition will prefer to generate LT and GT even if the old 4252 /* get_condition will prefer to generate LT and GT even if the old
4469 comparison was against zero, so undo that canonicalization here since 4253 comparison was against zero, so undo that canonicalization here since
4474 code = GE, op1 = const0_rtx; 4258 code = GE, op1 = const0_rtx;
4475 4259
4476 if (cmode == VOIDmode) 4260 if (cmode == VOIDmode)
4477 cmode = GET_MODE (op0); 4261 cmode = GET_MODE (op0);
4478 4262
4263 enum rtx_code orig_code = code;
4264 bool swapped = false;
4479 if (swap_commutative_operands_p (op2, op3) 4265 if (swap_commutative_operands_p (op2, op3)
4480 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL)) 4266 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4481 != UNKNOWN)) 4267 != UNKNOWN))
4482 { 4268 {
4483 tem = op2; 4269 std::swap (op2, op3);
4484 op2 = op3;
4485 op3 = tem;
4486 code = reversed; 4270 code = reversed;
4271 swapped = true;
4487 } 4272 }
4488 4273
4489 if (mode == VOIDmode) 4274 if (mode == VOIDmode)
4490 mode = GET_MODE (op2); 4275 mode = GET_MODE (op2);
4491 4276
4492 icode = direct_optab_handler (movcc_optab, mode); 4277 icode = direct_optab_handler (movcc_optab, mode);
4493 4278
4494 if (icode == CODE_FOR_nothing) 4279 if (icode == CODE_FOR_nothing)
4495 return 0; 4280 return NULL_RTX;
4496 4281
4497 if (!target) 4282 if (!target)
4498 target = gen_reg_rtx (mode); 4283 target = gen_reg_rtx (mode);
4499 4284
4500 subtarget = target; 4285 for (int pass = 0; ; pass++)
4501 4286 {
4502 /* If the insn doesn't accept these operands, put them in pseudos. */ 4287 code = unsignedp ? unsigned_condition (code) : code;
4503 4288 comparison = simplify_gen_relational (code, VOIDmode, cmode, op0, op1);
4504 if (!insn_data[icode].operand[0].predicate 4289
4505 (subtarget, insn_data[icode].operand[0].mode)) 4290 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4506 subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode); 4291 punt and let the caller figure out how best to deal with this
4507 4292 situation. */
4508 if (!insn_data[icode].operand[2].predicate 4293 if (COMPARISON_P (comparison))
4509 (op2, insn_data[icode].operand[2].mode)) 4294 {
4510 op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2); 4295 saved_pending_stack_adjust save;
4511 4296 save_pending_stack_adjust (&save);
4512 if (!insn_data[icode].operand[3].predicate 4297 last = get_last_insn ();
4513 (op3, insn_data[icode].operand[3].mode)) 4298 do_pending_stack_adjust ();
4514 op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3); 4299 prepare_cmp_insn (XEXP (comparison, 0), XEXP (comparison, 1),
4515 4300 GET_CODE (comparison), NULL_RTX, unsignedp,
4516 /* Everything should now be in the suitable form. */ 4301 OPTAB_WIDEN, &comparison, &cmode);
4517 4302 if (comparison)
4518 code = unsignedp ? unsigned_condition (code) : code; 4303 {
4519 comparison = simplify_gen_relational (code, VOIDmode, cmode, op0, op1); 4304 struct expand_operand ops[4];
4520 4305
4521 /* We can get const0_rtx or const_true_rtx in some circumstances. Just 4306 create_output_operand (&ops[0], target, mode);
4522 return NULL and let the caller figure out how best to deal with this 4307 create_fixed_operand (&ops[1], comparison);
4523 situation. */ 4308 create_input_operand (&ops[2], op2, mode);
4524 if (!COMPARISON_P (comparison)) 4309 create_input_operand (&ops[3], op3, mode);
4310 if (maybe_expand_insn (icode, 4, ops))
4311 {
4312 if (ops[0].value != target)
4313 convert_move (target, ops[0].value, false);
4314 return target;
4315 }
4316 }
4317 delete_insns_since (last);
4318 restore_pending_stack_adjust (&save);
4319 }
4320
4321 if (pass == 1)
4322 return NULL_RTX;
4323
4324 /* If the preferred op2/op3 order is not usable, retry with other
4325 operand order, perhaps it will expand successfully. */
4326 if (swapped)
4327 code = orig_code;
4328 else if ((reversed = reversed_comparison_code_parts (orig_code, op0, op1,
4329 NULL))
4330 != UNKNOWN)
4331 code = reversed;
4332 else
4333 return NULL_RTX;
4334 std::swap (op2, op3);
4335 }
4336 }
4337
4338
4339 /* Emit a conditional negate or bitwise complement using the
4340 negcc or notcc optabs if available. Return NULL_RTX if such operations
4341 are not available. Otherwise return the RTX holding the result.
4342 TARGET is the desired destination of the result. COMP is the comparison
4343 on which to negate. If COND is true move into TARGET the negation
4344 or bitwise complement of OP1. Otherwise move OP2 into TARGET.
4345 CODE is either NEG or NOT. MODE is the machine mode in which the
4346 operation is performed. */
4347
4348 rtx
4349 emit_conditional_neg_or_complement (rtx target, rtx_code code,
4350 machine_mode mode, rtx cond, rtx op1,
4351 rtx op2)
4352 {
4353 optab op = unknown_optab;
4354 if (code == NEG)
4355 op = negcc_optab;
4356 else if (code == NOT)
4357 op = notcc_optab;
4358 else
4359 gcc_unreachable ();
4360
4361 insn_code icode = direct_optab_handler (op, mode);
4362
4363 if (icode == CODE_FOR_nothing)
4525 return NULL_RTX; 4364 return NULL_RTX;
4526 4365
4527 do_pending_stack_adjust (); 4366 if (!target)
4528 start_sequence (); 4367 target = gen_reg_rtx (mode);
4529 prepare_cmp_insn (XEXP (comparison, 0), XEXP (comparison, 1), 4368
4530 GET_CODE (comparison), NULL_RTX, unsignedp, OPTAB_WIDEN, 4369 rtx_insn *last = get_last_insn ();
4531 &comparison, &cmode); 4370 struct expand_operand ops[4];
4532 if (!comparison) 4371
4533 insn = NULL_RTX; 4372 create_output_operand (&ops[0], target, mode);
4534 else 4373 create_fixed_operand (&ops[1], cond);
4535 insn = GEN_FCN (icode) (subtarget, comparison, op2, op3); 4374 create_input_operand (&ops[2], op1, mode);
4536 4375 create_input_operand (&ops[3], op2, mode);
4537 /* If that failed, then give up. */ 4376
4538 if (insn == 0) 4377 if (maybe_expand_insn (icode, 4, ops))
4539 { 4378 {
4540 end_sequence (); 4379 if (ops[0].value != target)
4541 return 0; 4380 convert_move (target, ops[0].value, false);
4542 } 4381
4543 4382 return target;
4544 emit_insn (insn); 4383 }
4545 insn = get_insns (); 4384 delete_insns_since (last);
4546 end_sequence (); 4385 return NULL_RTX;
4547 emit_insn (insn); 4386 }
4548 if (subtarget != target)
4549 convert_move (target, subtarget, 0);
4550
4551 return target;
4552 }
4553
4554 /* Return nonzero if a conditional move of mode MODE is supported.
4555
4556 This function is for combine so it can tell whether an insn that looks
4557 like a conditional move is actually supported by the hardware. If we
4558 guess wrong we lose a bit on optimization, but that's it. */
4559 /* ??? sparc64 supports conditionally moving integers values based on fp
4560 comparisons, and vice versa. How do we handle them? */
4561
4562 int
4563 can_conditionally_move_p (enum machine_mode mode)
4564 {
4565 if (direct_optab_handler (movcc_optab, mode) != CODE_FOR_nothing)
4566 return 1;
4567
4568 return 0;
4569 }
4570
4571 #endif /* HAVE_conditional_move */
4572 4387
4573 /* Emit a conditional addition instruction if the machine supports one for that 4388 /* Emit a conditional addition instruction if the machine supports one for that
4574 condition and machine mode. 4389 condition and machine mode.
4575 4390
4576 OP0 and OP1 are the operands that should be compared using CODE. CMODE is 4391 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4577 the mode to use should they be constants. If it is VOIDmode, they cannot 4392 the mode to use should they be constants. If it is VOIDmode, they cannot
4578 both be constants. 4393 both be constants.
4579 4394
4580 OP2 should be stored in TARGET if the comparison is true, otherwise OP2+OP3 4395 OP2 should be stored in TARGET if the comparison is false, otherwise OP2+OP3
4581 should be stored there. MODE is the mode to use should they be constants. 4396 should be stored there. MODE is the mode to use should they be constants.
4582 If it is VOIDmode, they cannot both be constants. 4397 If it is VOIDmode, they cannot both be constants.
4583 4398
4584 The result is either TARGET (perhaps modified) or NULL_RTX if the operation 4399 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4585 is not supported. */ 4400 is not supported. */
4586 4401
4587 rtx 4402 rtx
4588 emit_conditional_add (rtx target, enum rtx_code code, rtx op0, rtx op1, 4403 emit_conditional_add (rtx target, enum rtx_code code, rtx op0, rtx op1,
4589 enum machine_mode cmode, rtx op2, rtx op3, 4404 machine_mode cmode, rtx op2, rtx op3,
4590 enum machine_mode mode, int unsignedp) 4405 machine_mode mode, int unsignedp)
4591 { 4406 {
4592 rtx tem, subtarget, comparison, insn; 4407 rtx comparison;
4408 rtx_insn *last;
4593 enum insn_code icode; 4409 enum insn_code icode;
4594 enum rtx_code reversed;
4595 4410
4596 /* If one operand is constant, make it the second one. Only do this 4411 /* If one operand is constant, make it the second one. Only do this
4597 if the other operand is not constant as well. */ 4412 if the other operand is not constant as well. */
4598 4413
4599 if (swap_commutative_operands_p (op0, op1)) 4414 if (swap_commutative_operands_p (op0, op1))
4600 { 4415 {
4601 tem = op0; 4416 std::swap (op0, op1);
4602 op0 = op1;
4603 op1 = tem;
4604 code = swap_condition (code); 4417 code = swap_condition (code);
4605 } 4418 }
4606 4419
4607 /* get_condition will prefer to generate LT and GT even if the old 4420 /* get_condition will prefer to generate LT and GT even if the old
4608 comparison was against zero, so undo that canonicalization here since 4421 comparison was against zero, so undo that canonicalization here since
4613 code = GE, op1 = const0_rtx; 4426 code = GE, op1 = const0_rtx;
4614 4427
4615 if (cmode == VOIDmode) 4428 if (cmode == VOIDmode)
4616 cmode = GET_MODE (op0); 4429 cmode = GET_MODE (op0);
4617 4430
4618 if (swap_commutative_operands_p (op2, op3)
4619 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4620 != UNKNOWN))
4621 {
4622 tem = op2;
4623 op2 = op3;
4624 op3 = tem;
4625 code = reversed;
4626 }
4627
4628 if (mode == VOIDmode) 4431 if (mode == VOIDmode)
4629 mode = GET_MODE (op2); 4432 mode = GET_MODE (op2);
4630 4433
4631 icode = optab_handler (addcc_optab, mode); 4434 icode = optab_handler (addcc_optab, mode);
4632 4435
4633 if (icode == CODE_FOR_nothing) 4436 if (icode == CODE_FOR_nothing)
4634 return 0; 4437 return 0;
4635 4438
4636 if (!target) 4439 if (!target)
4637 target = gen_reg_rtx (mode); 4440 target = gen_reg_rtx (mode);
4638
4639 /* If the insn doesn't accept these operands, put them in pseudos. */
4640
4641 if (!insn_data[icode].operand[0].predicate
4642 (target, insn_data[icode].operand[0].mode))
4643 subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode);
4644 else
4645 subtarget = target;
4646
4647 if (!insn_data[icode].operand[2].predicate
4648 (op2, insn_data[icode].operand[2].mode))
4649 op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
4650
4651 if (!insn_data[icode].operand[3].predicate
4652 (op3, insn_data[icode].operand[3].mode))
4653 op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3);
4654
4655 /* Everything should now be in the suitable form. */
4656 4441
4657 code = unsignedp ? unsigned_condition (code) : code; 4442 code = unsignedp ? unsigned_condition (code) : code;
4658 comparison = simplify_gen_relational (code, VOIDmode, cmode, op0, op1); 4443 comparison = simplify_gen_relational (code, VOIDmode, cmode, op0, op1);
4659 4444
4660 /* We can get const0_rtx or const_true_rtx in some circumstances. Just 4445 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4662 situation. */ 4447 situation. */
4663 if (!COMPARISON_P (comparison)) 4448 if (!COMPARISON_P (comparison))
4664 return NULL_RTX; 4449 return NULL_RTX;
4665 4450
4666 do_pending_stack_adjust (); 4451 do_pending_stack_adjust ();
4667 start_sequence (); 4452 last = get_last_insn ();
4668 prepare_cmp_insn (XEXP (comparison, 0), XEXP (comparison, 1), 4453 prepare_cmp_insn (XEXP (comparison, 0), XEXP (comparison, 1),
4669 GET_CODE (comparison), NULL_RTX, unsignedp, OPTAB_WIDEN, 4454 GET_CODE (comparison), NULL_RTX, unsignedp, OPTAB_WIDEN,
4670 &comparison, &cmode); 4455 &comparison, &cmode);
4671 if (!comparison) 4456 if (comparison)
4672 insn = NULL_RTX; 4457 {
4673 else 4458 struct expand_operand ops[4];
4674 insn = GEN_FCN (icode) (subtarget, comparison, op2, op3); 4459
4675 4460 create_output_operand (&ops[0], target, mode);
4676 /* If that failed, then give up. */ 4461 create_fixed_operand (&ops[1], comparison);
4677 if (insn == 0) 4462 create_input_operand (&ops[2], op2, mode);
4678 { 4463 create_input_operand (&ops[3], op3, mode);
4679 end_sequence (); 4464 if (maybe_expand_insn (icode, 4, ops))
4680 return 0; 4465 {
4681 } 4466 if (ops[0].value != target)
4682 4467 convert_move (target, ops[0].value, false);
4683 emit_insn (insn); 4468 return target;
4684 insn = get_insns (); 4469 }
4685 end_sequence (); 4470 }
4686 emit_insn (insn); 4471 delete_insns_since (last);
4687 if (subtarget != target) 4472 return NULL_RTX;
4688 convert_move (target, subtarget, 0);
4689
4690 return target;
4691 } 4473 }
4692 4474
4693 /* These functions attempt to generate an insn body, rather than 4475 /* These functions attempt to generate an insn body, rather than
4694 emitting the insn, but if the gen function already emits them, we 4476 emitting the insn, but if the gen function already emits them, we
4695 make no attempt to turn them back into naked patterns. */ 4477 make no attempt to turn them back into naked patterns. */
4696 4478
4697 /* Generate and return an insn body to add Y to X. */ 4479 /* Generate and return an insn body to add Y to X. */
4698 4480
4699 rtx 4481 rtx_insn *
4700 gen_add2_insn (rtx x, rtx y) 4482 gen_add2_insn (rtx x, rtx y)
4701 { 4483 {
4702 int icode = (int) optab_handler (add_optab, GET_MODE (x)); 4484 enum insn_code icode = optab_handler (add_optab, GET_MODE (x));
4703 4485
4704 gcc_assert (insn_data[icode].operand[0].predicate 4486 gcc_assert (insn_operand_matches (icode, 0, x));
4705 (x, insn_data[icode].operand[0].mode)); 4487 gcc_assert (insn_operand_matches (icode, 1, x));
4706 gcc_assert (insn_data[icode].operand[1].predicate 4488 gcc_assert (insn_operand_matches (icode, 2, y));
4707 (x, insn_data[icode].operand[1].mode));
4708 gcc_assert (insn_data[icode].operand[2].predicate
4709 (y, insn_data[icode].operand[2].mode));
4710 4489
4711 return GEN_FCN (icode) (x, x, y); 4490 return GEN_FCN (icode) (x, x, y);
4712 } 4491 }
4713 4492
4714 /* Generate and return an insn body to add r1 and c, 4493 /* Generate and return an insn body to add r1 and c,
4715 storing the result in r0. */ 4494 storing the result in r0. */
4716 4495
4717 rtx 4496 rtx_insn *
4718 gen_add3_insn (rtx r0, rtx r1, rtx c) 4497 gen_add3_insn (rtx r0, rtx r1, rtx c)
4719 { 4498 {
4720 int icode = (int) optab_handler (add_optab, GET_MODE (r0)); 4499 enum insn_code icode = optab_handler (add_optab, GET_MODE (r0));
4721 4500
4722 if (icode == CODE_FOR_nothing 4501 if (icode == CODE_FOR_nothing
4723 || !(insn_data[icode].operand[0].predicate 4502 || !insn_operand_matches (icode, 0, r0)
4724 (r0, insn_data[icode].operand[0].mode)) 4503 || !insn_operand_matches (icode, 1, r1)
4725 || !(insn_data[icode].operand[1].predicate 4504 || !insn_operand_matches (icode, 2, c))
4726 (r1, insn_data[icode].operand[1].mode)) 4505 return NULL;
4727 || !(insn_data[icode].operand[2].predicate
4728 (c, insn_data[icode].operand[2].mode)))
4729 return NULL_RTX;
4730 4506
4731 return GEN_FCN (icode) (r0, r1, c); 4507 return GEN_FCN (icode) (r0, r1, c);
4732 } 4508 }
4733 4509
4734 int 4510 int
4735 have_add2_insn (rtx x, rtx y) 4511 have_add2_insn (rtx x, rtx y)
4736 { 4512 {
4737 int icode; 4513 enum insn_code icode;
4738 4514
4739 gcc_assert (GET_MODE (x) != VOIDmode); 4515 gcc_assert (GET_MODE (x) != VOIDmode);
4740 4516
4741 icode = (int) optab_handler (add_optab, GET_MODE (x)); 4517 icode = optab_handler (add_optab, GET_MODE (x));
4742 4518
4743 if (icode == CODE_FOR_nothing) 4519 if (icode == CODE_FOR_nothing)
4744 return 0; 4520 return 0;
4745 4521
4746 if (!(insn_data[icode].operand[0].predicate 4522 if (!insn_operand_matches (icode, 0, x)
4747 (x, insn_data[icode].operand[0].mode)) 4523 || !insn_operand_matches (icode, 1, x)
4748 || !(insn_data[icode].operand[1].predicate 4524 || !insn_operand_matches (icode, 2, y))
4749 (x, insn_data[icode].operand[1].mode))
4750 || !(insn_data[icode].operand[2].predicate
4751 (y, insn_data[icode].operand[2].mode)))
4752 return 0; 4525 return 0;
4753 4526
4754 return 1; 4527 return 1;
4755 } 4528 }
4756 4529
4530 /* Generate and return an insn body to add Y to X. */
4531
4532 rtx_insn *
4533 gen_addptr3_insn (rtx x, rtx y, rtx z)
4534 {
4535 enum insn_code icode = optab_handler (addptr3_optab, GET_MODE (x));
4536
4537 gcc_assert (insn_operand_matches (icode, 0, x));
4538 gcc_assert (insn_operand_matches (icode, 1, y));
4539 gcc_assert (insn_operand_matches (icode, 2, z));
4540
4541 return GEN_FCN (icode) (x, y, z);
4542 }
4543
4544 /* Return true if the target implements an addptr pattern and X, Y,
4545 and Z are valid for the pattern predicates. */
4546
4547 int
4548 have_addptr3_insn (rtx x, rtx y, rtx z)
4549 {
4550 enum insn_code icode;
4551
4552 gcc_assert (GET_MODE (x) != VOIDmode);
4553
4554 icode = optab_handler (addptr3_optab, GET_MODE (x));
4555
4556 if (icode == CODE_FOR_nothing)
4557 return 0;
4558
4559 if (!insn_operand_matches (icode, 0, x)
4560 || !insn_operand_matches (icode, 1, y)
4561 || !insn_operand_matches (icode, 2, z))
4562 return 0;
4563
4564 return 1;
4565 }
4566
4757 /* Generate and return an insn body to subtract Y from X. */ 4567 /* Generate and return an insn body to subtract Y from X. */
4758 4568
4759 rtx 4569 rtx_insn *
4760 gen_sub2_insn (rtx x, rtx y) 4570 gen_sub2_insn (rtx x, rtx y)
4761 { 4571 {
4762 int icode = (int) optab_handler (sub_optab, GET_MODE (x)); 4572 enum insn_code icode = optab_handler (sub_optab, GET_MODE (x));
4763 4573
4764 gcc_assert (insn_data[icode].operand[0].predicate 4574 gcc_assert (insn_operand_matches (icode, 0, x));
4765 (x, insn_data[icode].operand[0].mode)); 4575 gcc_assert (insn_operand_matches (icode, 1, x));
4766 gcc_assert (insn_data[icode].operand[1].predicate 4576 gcc_assert (insn_operand_matches (icode, 2, y));
4767 (x, insn_data[icode].operand[1].mode));
4768 gcc_assert (insn_data[icode].operand[2].predicate
4769 (y, insn_data[icode].operand[2].mode));
4770 4577
4771 return GEN_FCN (icode) (x, x, y); 4578 return GEN_FCN (icode) (x, x, y);
4772 } 4579 }
4773 4580
4774 /* Generate and return an insn body to subtract r1 and c, 4581 /* Generate and return an insn body to subtract r1 and c,
4775 storing the result in r0. */ 4582 storing the result in r0. */
4776 4583
4777 rtx 4584 rtx_insn *
4778 gen_sub3_insn (rtx r0, rtx r1, rtx c) 4585 gen_sub3_insn (rtx r0, rtx r1, rtx c)
4779 { 4586 {
4780 int icode = (int) optab_handler (sub_optab, GET_MODE (r0)); 4587 enum insn_code icode = optab_handler (sub_optab, GET_MODE (r0));
4781 4588
4782 if (icode == CODE_FOR_nothing 4589 if (icode == CODE_FOR_nothing
4783 || !(insn_data[icode].operand[0].predicate 4590 || !insn_operand_matches (icode, 0, r0)
4784 (r0, insn_data[icode].operand[0].mode)) 4591 || !insn_operand_matches (icode, 1, r1)
4785 || !(insn_data[icode].operand[1].predicate 4592 || !insn_operand_matches (icode, 2, c))
4786 (r1, insn_data[icode].operand[1].mode)) 4593 return NULL;
4787 || !(insn_data[icode].operand[2].predicate
4788 (c, insn_data[icode].operand[2].mode)))
4789 return NULL_RTX;
4790 4594
4791 return GEN_FCN (icode) (r0, r1, c); 4595 return GEN_FCN (icode) (r0, r1, c);
4792 } 4596 }
4793 4597
4794 int 4598 int
4795 have_sub2_insn (rtx x, rtx y) 4599 have_sub2_insn (rtx x, rtx y)
4796 { 4600 {
4797 int icode; 4601 enum insn_code icode;
4798 4602
4799 gcc_assert (GET_MODE (x) != VOIDmode); 4603 gcc_assert (GET_MODE (x) != VOIDmode);
4800 4604
4801 icode = (int) optab_handler (sub_optab, GET_MODE (x)); 4605 icode = optab_handler (sub_optab, GET_MODE (x));
4802 4606
4803 if (icode == CODE_FOR_nothing) 4607 if (icode == CODE_FOR_nothing)
4804 return 0; 4608 return 0;
4805 4609
4806 if (!(insn_data[icode].operand[0].predicate 4610 if (!insn_operand_matches (icode, 0, x)
4807 (x, insn_data[icode].operand[0].mode)) 4611 || !insn_operand_matches (icode, 1, x)
4808 || !(insn_data[icode].operand[1].predicate 4612 || !insn_operand_matches (icode, 2, y))
4809 (x, insn_data[icode].operand[1].mode))
4810 || !(insn_data[icode].operand[2].predicate
4811 (y, insn_data[icode].operand[2].mode)))
4812 return 0; 4613 return 0;
4813 4614
4814 return 1; 4615 return 1;
4815 } 4616 }
4816
4817 /* Generate the body of an instruction to copy Y into X.
4818 It may be a list of insns, if one insn isn't enough. */
4819
4820 rtx
4821 gen_move_insn (rtx x, rtx y)
4822 {
4823 rtx seq;
4824
4825 start_sequence ();
4826 emit_move_insn_1 (x, y);
4827 seq = get_insns ();
4828 end_sequence ();
4829 return seq;
4830 }
4831 4617
4832 /* Return the insn code used to extend FROM_MODE to TO_MODE.
4833 UNSIGNEDP specifies zero-extension instead of sign-extension. If
4834 no such operation exists, CODE_FOR_nothing will be returned. */
4835
4836 enum insn_code
4837 can_extend_p (enum machine_mode to_mode, enum machine_mode from_mode,
4838 int unsignedp)
4839 {
4840 convert_optab tab;
4841 #ifdef HAVE_ptr_extend
4842 if (unsignedp < 0)
4843 return CODE_FOR_ptr_extend;
4844 #endif
4845
4846 tab = unsignedp ? zext_optab : sext_optab;
4847 return convert_optab_handler (tab, to_mode, from_mode);
4848 }
4849
4850 /* Generate the body of an insn to extend Y (with mode MFROM) 4618 /* Generate the body of an insn to extend Y (with mode MFROM)
4851 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */ 4619 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4852 4620
4853 rtx 4621 rtx_insn *
4854 gen_extend_insn (rtx x, rtx y, enum machine_mode mto, 4622 gen_extend_insn (rtx x, rtx y, machine_mode mto,
4855 enum machine_mode mfrom, int unsignedp) 4623 machine_mode mfrom, int unsignedp)
4856 { 4624 {
4857 enum insn_code icode = can_extend_p (mto, mfrom, unsignedp); 4625 enum insn_code icode = can_extend_p (mto, mfrom, unsignedp);
4858 return GEN_FCN (icode) (x, y); 4626 return GEN_FCN (icode) (x, y);
4859 }
4860
4861 /* can_fix_p and can_float_p say whether the target machine
4862 can directly convert a given fixed point type to
4863 a given floating point type, or vice versa.
4864 The returned value is the CODE_FOR_... value to use,
4865 or CODE_FOR_nothing if these modes cannot be directly converted.
4866
4867 *TRUNCP_PTR is set to 1 if it is necessary to output
4868 an explicit FTRUNC insn before the fix insn; otherwise 0. */
4869
4870 static enum insn_code
4871 can_fix_p (enum machine_mode fixmode, enum machine_mode fltmode,
4872 int unsignedp, int *truncp_ptr)
4873 {
4874 convert_optab tab;
4875 enum insn_code icode;
4876
4877 tab = unsignedp ? ufixtrunc_optab : sfixtrunc_optab;
4878 icode = convert_optab_handler (tab, fixmode, fltmode);
4879 if (icode != CODE_FOR_nothing)
4880 {
4881 *truncp_ptr = 0;
4882 return icode;
4883 }
4884
4885 /* FIXME: This requires a port to define both FIX and FTRUNC pattern
4886 for this to work. We need to rework the fix* and ftrunc* patterns
4887 and documentation. */
4888 tab = unsignedp ? ufix_optab : sfix_optab;
4889 icode = convert_optab_handler (tab, fixmode, fltmode);
4890 if (icode != CODE_FOR_nothing
4891 && optab_handler (ftrunc_optab, fltmode) != CODE_FOR_nothing)
4892 {
4893 *truncp_ptr = 1;
4894 return icode;
4895 }
4896
4897 *truncp_ptr = 0;
4898 return CODE_FOR_nothing;
4899 }
4900
4901 static enum insn_code
4902 can_float_p (enum machine_mode fltmode, enum machine_mode fixmode,
4903 int unsignedp)
4904 {
4905 convert_optab tab;
4906
4907 tab = unsignedp ? ufloat_optab : sfloat_optab;
4908 return convert_optab_handler (tab, fltmode, fixmode);
4909 } 4627 }
4910 4628
4911 /* Generate code to convert FROM to floating point 4629 /* Generate code to convert FROM to floating point
4912 and store in TO. FROM must be fixed point and not VOIDmode. 4630 and store in TO. FROM must be fixed point and not VOIDmode.
4913 UNSIGNEDP nonzero means regard FROM as unsigned. 4631 UNSIGNEDP nonzero means regard FROM as unsigned.
4917 void 4635 void
4918 expand_float (rtx to, rtx from, int unsignedp) 4636 expand_float (rtx to, rtx from, int unsignedp)
4919 { 4637 {
4920 enum insn_code icode; 4638 enum insn_code icode;
4921 rtx target = to; 4639 rtx target = to;
4922 enum machine_mode fmode, imode; 4640 scalar_mode from_mode, to_mode;
4641 machine_mode fmode, imode;
4923 bool can_do_signed = false; 4642 bool can_do_signed = false;
4924 4643
4925 /* Crash now, because we won't be able to decide which mode to use. */ 4644 /* Crash now, because we won't be able to decide which mode to use. */
4926 gcc_assert (GET_MODE (from) != VOIDmode); 4645 gcc_assert (GET_MODE (from) != VOIDmode);
4927 4646
4928 /* Look for an insn to do the conversion. Do it in the specified 4647 /* Look for an insn to do the conversion. Do it in the specified
4929 modes if possible; otherwise convert either input, output or both to 4648 modes if possible; otherwise convert either input, output or both to
4930 wider mode. If the integer mode is wider than the mode of FROM, 4649 wider mode. If the integer mode is wider than the mode of FROM,
4931 we can do the conversion signed even if the input is unsigned. */ 4650 we can do the conversion signed even if the input is unsigned. */
4932 4651
4933 for (fmode = GET_MODE (to); fmode != VOIDmode; 4652 FOR_EACH_MODE_FROM (fmode, GET_MODE (to))
4934 fmode = GET_MODE_WIDER_MODE (fmode)) 4653 FOR_EACH_MODE_FROM (imode, GET_MODE (from))
4935 for (imode = GET_MODE (from); imode != VOIDmode;
4936 imode = GET_MODE_WIDER_MODE (imode))
4937 { 4654 {
4938 int doing_unsigned = unsignedp; 4655 int doing_unsigned = unsignedp;
4939 4656
4940 if (fmode != GET_MODE (to) 4657 if (fmode != GET_MODE (to)
4941 && significand_size (fmode) < GET_MODE_BITSIZE (GET_MODE (from))) 4658 && (significand_size (fmode)
4659 < GET_MODE_UNIT_PRECISION (GET_MODE (from))))
4942 continue; 4660 continue;
4943 4661
4944 icode = can_float_p (fmode, imode, unsignedp); 4662 icode = can_float_p (fmode, imode, unsignedp);
4945 if (icode == CODE_FOR_nothing && unsignedp) 4663 if (icode == CODE_FOR_nothing && unsignedp)
4946 { 4664 {
4968 } 4686 }
4969 } 4687 }
4970 4688
4971 /* Unsigned integer, and no way to convert directly. Convert as signed, 4689 /* Unsigned integer, and no way to convert directly. Convert as signed,
4972 then unconditionally adjust the result. */ 4690 then unconditionally adjust the result. */
4973 if (unsignedp && can_do_signed) 4691 if (unsignedp
4974 { 4692 && can_do_signed
4975 rtx label = gen_label_rtx (); 4693 && is_a <scalar_mode> (GET_MODE (to), &to_mode)
4694 && is_a <scalar_mode> (GET_MODE (from), &from_mode))
4695 {
4696 opt_scalar_mode fmode_iter;
4697 rtx_code_label *label = gen_label_rtx ();
4976 rtx temp; 4698 rtx temp;
4977 REAL_VALUE_TYPE offset; 4699 REAL_VALUE_TYPE offset;
4978 4700
4979 /* Look for a usable floating mode FMODE wider than the source and at 4701 /* Look for a usable floating mode FMODE wider than the source and at
4980 least as wide as the target. Using FMODE will avoid rounding woes 4702 least as wide as the target. Using FMODE will avoid rounding woes
4981 with unsigned values greater than the signed maximum value. */ 4703 with unsigned values greater than the signed maximum value. */
4982 4704
4983 for (fmode = GET_MODE (to); fmode != VOIDmode; 4705 FOR_EACH_MODE_FROM (fmode_iter, to_mode)
4984 fmode = GET_MODE_WIDER_MODE (fmode)) 4706 {
4985 if (GET_MODE_BITSIZE (GET_MODE (from)) < GET_MODE_BITSIZE (fmode) 4707 scalar_mode fmode = fmode_iter.require ();
4986 && can_float_p (fmode, GET_MODE (from), 0) != CODE_FOR_nothing) 4708 if (GET_MODE_PRECISION (from_mode) < GET_MODE_BITSIZE (fmode)
4987 break; 4709 && can_float_p (fmode, from_mode, 0) != CODE_FOR_nothing)
4988 4710 break;
4989 if (fmode == VOIDmode) 4711 }
4712
4713 if (!fmode_iter.exists (&fmode))
4990 { 4714 {
4991 /* There is no such mode. Pretend the target is wide enough. */ 4715 /* There is no such mode. Pretend the target is wide enough. */
4992 fmode = GET_MODE (to); 4716 fmode = to_mode;
4993 4717
4994 /* Avoid double-rounding when TO is narrower than FROM. */ 4718 /* Avoid double-rounding when TO is narrower than FROM. */
4995 if ((significand_size (fmode) + 1) 4719 if ((significand_size (fmode) + 1)
4996 < GET_MODE_BITSIZE (GET_MODE (from))) 4720 < GET_MODE_PRECISION (from_mode))
4997 { 4721 {
4998 rtx temp1; 4722 rtx temp1;
4999 rtx neglabel = gen_label_rtx (); 4723 rtx_code_label *neglabel = gen_label_rtx ();
5000 4724
5001 /* Don't use TARGET if it isn't a register, is a hard register, 4725 /* Don't use TARGET if it isn't a register, is a hard register,
5002 or is the wrong mode. */ 4726 or is the wrong mode. */
5003 if (!REG_P (target) 4727 if (!REG_P (target)
5004 || REGNO (target) < FIRST_PSEUDO_REGISTER 4728 || REGNO (target) < FIRST_PSEUDO_REGISTER
5005 || GET_MODE (target) != fmode) 4729 || GET_MODE (target) != fmode)
5006 target = gen_reg_rtx (fmode); 4730 target = gen_reg_rtx (fmode);
5007 4731
5008 imode = GET_MODE (from); 4732 imode = from_mode;
5009 do_pending_stack_adjust (); 4733 do_pending_stack_adjust ();
5010 4734
5011 /* Test whether the sign bit is set. */ 4735 /* Test whether the sign bit is set. */
5012 emit_cmp_and_jump_insns (from, const0_rtx, LT, NULL_RTX, imode, 4736 emit_cmp_and_jump_insns (from, const0_rtx, LT, NULL_RTX, imode,
5013 0, neglabel); 4737 0, neglabel);
5014 4738
5015 /* The sign bit is not set. Convert as signed. */ 4739 /* The sign bit is not set. Convert as signed. */
5016 expand_float (target, from, 0); 4740 expand_float (target, from, 0);
5017 emit_jump_insn (gen_jump (label)); 4741 emit_jump_insn (targetm.gen_jump (label));
5018 emit_barrier (); 4742 emit_barrier ();
5019 4743
5020 /* The sign bit is set. 4744 /* The sign bit is set.
5021 Convert to a usable (positive signed) value by shifting right 4745 Convert to a usable (positive signed) value by shifting right
5022 one bit, while remembering if a nonzero bit was shifted 4746 one bit, while remembering if a nonzero bit was shifted
5023 out; i.e., compute (from & 1) | (from >> 1). */ 4747 out; i.e., compute (from & 1) | (from >> 1). */
5024 4748
5025 emit_label (neglabel); 4749 emit_label (neglabel);
5026 temp = expand_binop (imode, and_optab, from, const1_rtx, 4750 temp = expand_binop (imode, and_optab, from, const1_rtx,
5027 NULL_RTX, 1, OPTAB_LIB_WIDEN); 4751 NULL_RTX, 1, OPTAB_LIB_WIDEN);
5028 temp1 = expand_shift (RSHIFT_EXPR, imode, from, integer_one_node, 4752 temp1 = expand_shift (RSHIFT_EXPR, imode, from, 1, NULL_RTX, 1);
5029 NULL_RTX, 1);
5030 temp = expand_binop (imode, ior_optab, temp, temp1, temp, 1, 4753 temp = expand_binop (imode, ior_optab, temp, temp1, temp, 1,
5031 OPTAB_LIB_WIDEN); 4754 OPTAB_LIB_WIDEN);
5032 expand_float (target, temp, 0); 4755 expand_float (target, temp, 0);
5033 4756
5034 /* Multiply by 2 to undo the shift above. */ 4757 /* Multiply by 2 to undo the shift above. */
5044 } 4767 }
5045 4768
5046 /* If we are about to do some arithmetic to correct for an 4769 /* If we are about to do some arithmetic to correct for an
5047 unsigned operand, do it in a pseudo-register. */ 4770 unsigned operand, do it in a pseudo-register. */
5048 4771
5049 if (GET_MODE (to) != fmode 4772 if (to_mode != fmode
5050 || !REG_P (to) || REGNO (to) < FIRST_PSEUDO_REGISTER) 4773 || !REG_P (to) || REGNO (to) < FIRST_PSEUDO_REGISTER)
5051 target = gen_reg_rtx (fmode); 4774 target = gen_reg_rtx (fmode);
5052 4775
5053 /* Convert as signed integer to floating. */ 4776 /* Convert as signed integer to floating. */
5054 expand_float (target, from, 0); 4777 expand_float (target, from, 0);
5055 4778
5056 /* If FROM is negative (and therefore TO is negative), 4779 /* If FROM is negative (and therefore TO is negative),
5057 correct its value by 2**bitwidth. */ 4780 correct its value by 2**bitwidth. */
5058 4781
5059 do_pending_stack_adjust (); 4782 do_pending_stack_adjust ();
5060 emit_cmp_and_jump_insns (from, const0_rtx, GE, NULL_RTX, GET_MODE (from), 4783 emit_cmp_and_jump_insns (from, const0_rtx, GE, NULL_RTX, from_mode,
5061 0, label); 4784 0, label);
5062 4785
5063 4786
5064 real_2expN (&offset, GET_MODE_BITSIZE (GET_MODE (from)), fmode); 4787 real_2expN (&offset, GET_MODE_PRECISION (from_mode), fmode);
5065 temp = expand_binop (fmode, add_optab, target, 4788 temp = expand_binop (fmode, add_optab, target,
5066 CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode), 4789 const_double_from_real_value (offset, fmode),
5067 target, 0, OPTAB_LIB_WIDEN); 4790 target, 0, OPTAB_LIB_WIDEN);
5068 if (temp != target) 4791 if (temp != target)
5069 emit_move_insn (target, temp); 4792 emit_move_insn (target, temp);
5070 4793
5071 do_pending_stack_adjust (); 4794 do_pending_stack_adjust ();
5074 } 4797 }
5075 4798
5076 /* No hardware instruction available; call a library routine. */ 4799 /* No hardware instruction available; call a library routine. */
5077 { 4800 {
5078 rtx libfunc; 4801 rtx libfunc;
5079 rtx insns; 4802 rtx_insn *insns;
5080 rtx value; 4803 rtx value;
5081 convert_optab tab = unsignedp ? ufloat_optab : sfloat_optab; 4804 convert_optab tab = unsignedp ? ufloat_optab : sfloat_optab;
5082 4805
5083 if (GET_MODE_SIZE (GET_MODE (from)) < GET_MODE_SIZE (SImode)) 4806 if (GET_MODE_PRECISION (GET_MODE (from)) < GET_MODE_PRECISION (SImode))
5084 from = convert_to_mode (SImode, from, unsignedp); 4807 from = convert_to_mode (SImode, from, unsignedp);
5085 4808
5086 libfunc = convert_optab_libfunc (tab, GET_MODE (to), GET_MODE (from)); 4809 libfunc = convert_optab_libfunc (tab, GET_MODE (to), GET_MODE (from));
5087 gcc_assert (libfunc); 4810 gcc_assert (libfunc);
5088 4811
5089 start_sequence (); 4812 start_sequence ();
5090 4813
5091 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST, 4814 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
5092 GET_MODE (to), 1, from, 4815 GET_MODE (to), from, GET_MODE (from));
5093 GET_MODE (from));
5094 insns = get_insns (); 4816 insns = get_insns ();
5095 end_sequence (); 4817 end_sequence ();
5096 4818
5097 emit_libcall_block (insns, target, value, 4819 emit_libcall_block (insns, target, value,
5098 gen_rtx_fmt_e (unsignedp ? UNSIGNED_FLOAT : FLOAT, 4820 gen_rtx_fmt_e (unsignedp ? UNSIGNED_FLOAT : FLOAT,
5119 void 4841 void
5120 expand_fix (rtx to, rtx from, int unsignedp) 4842 expand_fix (rtx to, rtx from, int unsignedp)
5121 { 4843 {
5122 enum insn_code icode; 4844 enum insn_code icode;
5123 rtx target = to; 4845 rtx target = to;
5124 enum machine_mode fmode, imode; 4846 machine_mode fmode, imode;
5125 int must_trunc = 0; 4847 opt_scalar_mode fmode_iter;
4848 bool must_trunc = false;
5126 4849
5127 /* We first try to find a pair of modes, one real and one integer, at 4850 /* We first try to find a pair of modes, one real and one integer, at
5128 least as wide as FROM and TO, respectively, in which we can open-code 4851 least as wide as FROM and TO, respectively, in which we can open-code
5129 this conversion. If the integer mode is wider than the mode of TO, 4852 this conversion. If the integer mode is wider than the mode of TO,
5130 we can do the conversion either signed or unsigned. */ 4853 we can do the conversion either signed or unsigned. */
5131 4854
5132 for (fmode = GET_MODE (from); fmode != VOIDmode; 4855 FOR_EACH_MODE_FROM (fmode, GET_MODE (from))
5133 fmode = GET_MODE_WIDER_MODE (fmode)) 4856 FOR_EACH_MODE_FROM (imode, GET_MODE (to))
5134 for (imode = GET_MODE (to); imode != VOIDmode;
5135 imode = GET_MODE_WIDER_MODE (imode))
5136 { 4857 {
5137 int doing_unsigned = unsignedp; 4858 int doing_unsigned = unsignedp;
5138 4859
5139 icode = can_fix_p (imode, fmode, unsignedp, &must_trunc); 4860 icode = can_fix_p (imode, fmode, unsignedp, &must_trunc);
5140 if (icode == CODE_FOR_nothing && imode != GET_MODE (to) && unsignedp) 4861 if (icode == CODE_FOR_nothing && imode != GET_MODE (to) && unsignedp)
5141 icode = can_fix_p (imode, fmode, 0, &must_trunc), doing_unsigned = 0; 4862 icode = can_fix_p (imode, fmode, 0, &must_trunc), doing_unsigned = 0;
5142 4863
5143 if (icode != CODE_FOR_nothing) 4864 if (icode != CODE_FOR_nothing)
5144 { 4865 {
5145 rtx last = get_last_insn (); 4866 rtx_insn *last = get_last_insn ();
5146 if (fmode != GET_MODE (from)) 4867 if (fmode != GET_MODE (from))
5147 from = convert_to_mode (fmode, from, 0); 4868 from = convert_to_mode (fmode, from, 0);
5148 4869
5149 if (must_trunc) 4870 if (must_trunc)
5150 { 4871 {
5189 inclusive. (as for other input overflow happens and result is undefined) 4910 inclusive. (as for other input overflow happens and result is undefined)
5190 So we know that the most important bit set in mantissa corresponds to 4911 So we know that the most important bit set in mantissa corresponds to
5191 2^63. The subtraction of 2^63 should not generate any rounding as it 4912 2^63. The subtraction of 2^63 should not generate any rounding as it
5192 simply clears out that bit. The rest is trivial. */ 4913 simply clears out that bit. The rest is trivial. */
5193 4914
5194 if (unsignedp && GET_MODE_BITSIZE (GET_MODE (to)) <= HOST_BITS_PER_WIDE_INT) 4915 scalar_int_mode to_mode;
5195 for (fmode = GET_MODE (from); fmode != VOIDmode; 4916 if (unsignedp
5196 fmode = GET_MODE_WIDER_MODE (fmode)) 4917 && is_a <scalar_int_mode> (GET_MODE (to), &to_mode)
5197 if (CODE_FOR_nothing != can_fix_p (GET_MODE (to), fmode, 0, &must_trunc) 4918 && HWI_COMPUTABLE_MODE_P (to_mode))
5198 && (!DECIMAL_FLOAT_MODE_P (fmode) 4919 FOR_EACH_MODE_FROM (fmode_iter, as_a <scalar_mode> (GET_MODE (from)))
5199 || GET_MODE_BITSIZE (fmode) > GET_MODE_BITSIZE (GET_MODE (to)))) 4920 {
5200 { 4921 scalar_mode fmode = fmode_iter.require ();
5201 int bitsize; 4922 if (CODE_FOR_nothing != can_fix_p (to_mode, fmode,
5202 REAL_VALUE_TYPE offset; 4923 0, &must_trunc)
5203 rtx limit, lab1, lab2, insn; 4924 && (!DECIMAL_FLOAT_MODE_P (fmode)
5204 4925 || (GET_MODE_BITSIZE (fmode) > GET_MODE_PRECISION (to_mode))))
5205 bitsize = GET_MODE_BITSIZE (GET_MODE (to)); 4926 {
5206 real_2expN (&offset, bitsize - 1, fmode); 4927 int bitsize;
5207 limit = CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode); 4928 REAL_VALUE_TYPE offset;
5208 lab1 = gen_label_rtx (); 4929 rtx limit;
5209 lab2 = gen_label_rtx (); 4930 rtx_code_label *lab1, *lab2;
5210 4931 rtx_insn *insn;
5211 if (fmode != GET_MODE (from)) 4932
5212 from = convert_to_mode (fmode, from, 0); 4933 bitsize = GET_MODE_PRECISION (to_mode);
5213 4934 real_2expN (&offset, bitsize - 1, fmode);
5214 /* See if we need to do the subtraction. */ 4935 limit = const_double_from_real_value (offset, fmode);
5215 do_pending_stack_adjust (); 4936 lab1 = gen_label_rtx ();
5216 emit_cmp_and_jump_insns (from, limit, GE, NULL_RTX, GET_MODE (from), 4937 lab2 = gen_label_rtx ();
5217 0, lab1); 4938
5218 4939 if (fmode != GET_MODE (from))
5219 /* If not, do the signed "fix" and branch around fixup code. */ 4940 from = convert_to_mode (fmode, from, 0);
5220 expand_fix (to, from, 0); 4941
5221 emit_jump_insn (gen_jump (lab2)); 4942 /* See if we need to do the subtraction. */
5222 emit_barrier (); 4943 do_pending_stack_adjust ();
5223 4944 emit_cmp_and_jump_insns (from, limit, GE, NULL_RTX,
5224 /* Otherwise, subtract 2**(N-1), convert to signed number, 4945 GET_MODE (from), 0, lab1);
5225 then add 2**(N-1). Do the addition using XOR since this 4946
5226 will often generate better code. */ 4947 /* If not, do the signed "fix" and branch around fixup code. */
5227 emit_label (lab1); 4948 expand_fix (to, from, 0);
5228 target = expand_binop (GET_MODE (from), sub_optab, from, limit, 4949 emit_jump_insn (targetm.gen_jump (lab2));
5229 NULL_RTX, 0, OPTAB_LIB_WIDEN); 4950 emit_barrier ();
5230 expand_fix (to, target, 0); 4951
5231 target = expand_binop (GET_MODE (to), xor_optab, to, 4952 /* Otherwise, subtract 2**(N-1), convert to signed number,
5232 gen_int_mode 4953 then add 2**(N-1). Do the addition using XOR since this
5233 ((HOST_WIDE_INT) 1 << (bitsize - 1), 4954 will often generate better code. */
5234 GET_MODE (to)), 4955 emit_label (lab1);
5235 to, 1, OPTAB_LIB_WIDEN); 4956 target = expand_binop (GET_MODE (from), sub_optab, from, limit,
5236 4957 NULL_RTX, 0, OPTAB_LIB_WIDEN);
5237 if (target != to) 4958 expand_fix (to, target, 0);
5238 emit_move_insn (to, target); 4959 target = expand_binop (to_mode, xor_optab, to,
5239 4960 gen_int_mode
5240 emit_label (lab2); 4961 (HOST_WIDE_INT_1 << (bitsize - 1),
5241 4962 to_mode),
5242 if (optab_handler (mov_optab, GET_MODE (to)) != CODE_FOR_nothing) 4963 to, 1, OPTAB_LIB_WIDEN);
5243 { 4964
5244 /* Make a place for a REG_NOTE and add it. */ 4965 if (target != to)
5245 insn = emit_move_insn (to, to); 4966 emit_move_insn (to, target);
5246 set_unique_reg_note (insn, 4967
5247 REG_EQUAL, 4968 emit_label (lab2);
5248 gen_rtx_fmt_e (UNSIGNED_FIX, 4969
5249 GET_MODE (to), 4970 if (optab_handler (mov_optab, to_mode) != CODE_FOR_nothing)
5250 copy_rtx (from))); 4971 {
5251 } 4972 /* Make a place for a REG_NOTE and add it. */
5252 4973 insn = emit_move_insn (to, to);
5253 return; 4974 set_dst_reg_note (insn, REG_EQUAL,
5254 } 4975 gen_rtx_fmt_e (UNSIGNED_FIX, to_mode,
4976 copy_rtx (from)),
4977 to);
4978 }
4979
4980 return;
4981 }
4982 }
5255 4983
5256 /* We can't do it with an insn, so use a library call. But first ensure 4984 /* We can't do it with an insn, so use a library call. But first ensure
5257 that the mode of TO is at least as wide as SImode, since those are the 4985 that the mode of TO is at least as wide as SImode, since those are the
5258 only library calls we know about. */ 4986 only library calls we know about. */
5259 4987
5260 if (GET_MODE_SIZE (GET_MODE (to)) < GET_MODE_SIZE (SImode)) 4988 if (GET_MODE_PRECISION (GET_MODE (to)) < GET_MODE_PRECISION (SImode))
5261 { 4989 {
5262 target = gen_reg_rtx (SImode); 4990 target = gen_reg_rtx (SImode);
5263 4991
5264 expand_fix (target, from, unsignedp); 4992 expand_fix (target, from, unsignedp);
5265 } 4993 }
5266 else 4994 else
5267 { 4995 {
5268 rtx insns; 4996 rtx_insn *insns;
5269 rtx value; 4997 rtx value;
5270 rtx libfunc; 4998 rtx libfunc;
5271 4999
5272 convert_optab tab = unsignedp ? ufix_optab : sfix_optab; 5000 convert_optab tab = unsignedp ? ufix_optab : sfix_optab;
5273 libfunc = convert_optab_libfunc (tab, GET_MODE (to), GET_MODE (from)); 5001 libfunc = convert_optab_libfunc (tab, GET_MODE (to), GET_MODE (from));
5274 gcc_assert (libfunc); 5002 gcc_assert (libfunc);
5275 5003
5276 start_sequence (); 5004 start_sequence ();
5277 5005
5278 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST, 5006 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
5279 GET_MODE (to), 1, from, 5007 GET_MODE (to), from, GET_MODE (from));
5280 GET_MODE (from));
5281 insns = get_insns (); 5008 insns = get_insns ();
5282 end_sequence (); 5009 end_sequence ();
5283 5010
5284 emit_libcall_block (insns, target, value, 5011 emit_libcall_block (insns, target, value,
5285 gen_rtx_fmt_e (unsignedp ? UNSIGNED_FIX : FIX, 5012 gen_rtx_fmt_e (unsignedp ? UNSIGNED_FIX : FIX,
5293 else 5020 else
5294 convert_move (to, target, 0); 5021 convert_move (to, target, 0);
5295 } 5022 }
5296 } 5023 }
5297 5024
5025
5026 /* Promote integer arguments for a libcall if necessary.
5027 emit_library_call_value cannot do the promotion because it does not
5028 know if it should do a signed or unsigned promotion. This is because
5029 there are no tree types defined for libcalls. */
5030
5031 static rtx
5032 prepare_libcall_arg (rtx arg, int uintp)
5033 {
5034 scalar_int_mode mode;
5035 machine_mode arg_mode;
5036 if (is_a <scalar_int_mode> (GET_MODE (arg), &mode))
5037 {
5038 /* If we need to promote the integer function argument we need to do
5039 it here instead of inside emit_library_call_value because in
5040 emit_library_call_value we don't know if we should do a signed or
5041 unsigned promotion. */
5042
5043 int unsigned_p = 0;
5044 arg_mode = promote_function_mode (NULL_TREE, mode,
5045 &unsigned_p, NULL_TREE, 0);
5046 if (arg_mode != mode)
5047 return convert_to_mode (arg_mode, arg, uintp);
5048 }
5049 return arg;
5050 }
5051
5298 /* Generate code to convert FROM or TO a fixed-point. 5052 /* Generate code to convert FROM or TO a fixed-point.
5299 If UINTP is true, either TO or FROM is an unsigned integer. 5053 If UINTP is true, either TO or FROM is an unsigned integer.
5300 If SATP is true, we need to saturate the result. */ 5054 If SATP is true, we need to saturate the result. */
5301 5055
5302 void 5056 void
5303 expand_fixed_convert (rtx to, rtx from, int uintp, int satp) 5057 expand_fixed_convert (rtx to, rtx from, int uintp, int satp)
5304 { 5058 {
5305 enum machine_mode to_mode = GET_MODE (to); 5059 machine_mode to_mode = GET_MODE (to);
5306 enum machine_mode from_mode = GET_MODE (from); 5060 machine_mode from_mode = GET_MODE (from);
5307 convert_optab tab; 5061 convert_optab tab;
5308 enum rtx_code this_code; 5062 enum rtx_code this_code;
5309 enum insn_code code; 5063 enum insn_code code;
5310 rtx insns, value; 5064 rtx_insn *insns;
5065 rtx value;
5311 rtx libfunc; 5066 rtx libfunc;
5312 5067
5313 if (to_mode == from_mode) 5068 if (to_mode == from_mode)
5314 { 5069 {
5315 emit_move_insn (to, from); 5070 emit_move_insn (to, from);
5334 } 5089 }
5335 5090
5336 libfunc = convert_optab_libfunc (tab, to_mode, from_mode); 5091 libfunc = convert_optab_libfunc (tab, to_mode, from_mode);
5337 gcc_assert (libfunc); 5092 gcc_assert (libfunc);
5338 5093
5094 from = prepare_libcall_arg (from, uintp);
5095 from_mode = GET_MODE (from);
5096
5339 start_sequence (); 5097 start_sequence ();
5340 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST, to_mode, 5098 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST, to_mode,
5341 1, from, from_mode); 5099 from, from_mode);
5342 insns = get_insns (); 5100 insns = get_insns ();
5343 end_sequence (); 5101 end_sequence ();
5344 5102
5345 emit_libcall_block (insns, to, value, 5103 emit_libcall_block (insns, to, value,
5346 gen_rtx_fmt_e (tab->code, to_mode, from)); 5104 gen_rtx_fmt_e (optab_to_code (tab), to_mode, from));
5347 } 5105 }
5348 5106
5349 /* Generate code to convert FROM to fixed point and store in TO. FROM 5107 /* Generate code to convert FROM to fixed point and store in TO. FROM
5350 must be floating point, TO must be signed. Use the conversion optab 5108 must be floating point, TO must be signed. Use the conversion optab
5351 TAB to do the conversion. */ 5109 TAB to do the conversion. */
5353 bool 5111 bool
5354 expand_sfix_optab (rtx to, rtx from, convert_optab tab) 5112 expand_sfix_optab (rtx to, rtx from, convert_optab tab)
5355 { 5113 {
5356 enum insn_code icode; 5114 enum insn_code icode;
5357 rtx target = to; 5115 rtx target = to;
5358 enum machine_mode fmode, imode; 5116 machine_mode fmode, imode;
5359 5117
5360 /* We first try to find a pair of modes, one real and one integer, at 5118 /* We first try to find a pair of modes, one real and one integer, at
5361 least as wide as FROM and TO, respectively, in which we can open-code 5119 least as wide as FROM and TO, respectively, in which we can open-code
5362 this conversion. If the integer mode is wider than the mode of TO, 5120 this conversion. If the integer mode is wider than the mode of TO,
5363 we can do the conversion either signed or unsigned. */ 5121 we can do the conversion either signed or unsigned. */
5364 5122
5365 for (fmode = GET_MODE (from); fmode != VOIDmode; 5123 FOR_EACH_MODE_FROM (fmode, GET_MODE (from))
5366 fmode = GET_MODE_WIDER_MODE (fmode)) 5124 FOR_EACH_MODE_FROM (imode, GET_MODE (to))
5367 for (imode = GET_MODE (to); imode != VOIDmode;
5368 imode = GET_MODE_WIDER_MODE (imode))
5369 { 5125 {
5370 icode = convert_optab_handler (tab, imode, fmode); 5126 icode = convert_optab_handler (tab, imode, fmode);
5371 if (icode != CODE_FOR_nothing) 5127 if (icode != CODE_FOR_nothing)
5372 { 5128 {
5373 rtx last = get_last_insn (); 5129 rtx_insn *last = get_last_insn ();
5374 if (fmode != GET_MODE (from)) 5130 if (fmode != GET_MODE (from))
5375 from = convert_to_mode (fmode, from, 0); 5131 from = convert_to_mode (fmode, from, 0);
5376 5132
5377 if (imode != GET_MODE (to)) 5133 if (imode != GET_MODE (to))
5378 target = gen_reg_rtx (imode); 5134 target = gen_reg_rtx (imode);
5392 } 5148 }
5393 5149
5394 /* Report whether we have an instruction to perform the operation 5150 /* Report whether we have an instruction to perform the operation
5395 specified by CODE on operands of mode MODE. */ 5151 specified by CODE on operands of mode MODE. */
5396 int 5152 int
5397 have_insn_for (enum rtx_code code, enum machine_mode mode) 5153 have_insn_for (enum rtx_code code, machine_mode mode)
5398 { 5154 {
5399 return (code_to_optab[(int) code] != 0 5155 return (code_to_optab (code)
5400 && (optab_handler (code_to_optab[(int) code], mode) 5156 && (optab_handler (code_to_optab (code), mode)
5401 != CODE_FOR_nothing)); 5157 != CODE_FOR_nothing));
5402 }
5403
5404 /* Set all insn_code fields to CODE_FOR_nothing. */
5405
5406 static void
5407 init_insn_codes (void)
5408 {
5409 memset (optab_table, 0, sizeof (optab_table));
5410 memset (convert_optab_table, 0, sizeof (convert_optab_table));
5411 memset (direct_optab_table, 0, sizeof (direct_optab_table));
5412 }
5413
5414 /* Initialize OP's code to CODE, and write it into the code_to_optab table. */
5415 static inline void
5416 init_optab (optab op, enum rtx_code code)
5417 {
5418 op->code = code;
5419 code_to_optab[(int) code] = op;
5420 }
5421
5422 /* Same, but fill in its code as CODE, and do _not_ write it into
5423 the code_to_optab table. */
5424 static inline void
5425 init_optabv (optab op, enum rtx_code code)
5426 {
5427 op->code = code;
5428 }
5429
5430 /* Conversion optabs never go in the code_to_optab table. */
5431 static void
5432 init_convert_optab (convert_optab op, enum rtx_code code)
5433 {
5434 op->code = code;
5435 }
5436
5437 /* Initialize the libfunc fields of an entire group of entries in some
5438 optab. Each entry is set equal to a string consisting of a leading
5439 pair of underscores followed by a generic operation name followed by
5440 a mode name (downshifted to lowercase) followed by a single character
5441 representing the number of operands for the given operation (which is
5442 usually one of the characters '2', '3', or '4').
5443
5444 OPTABLE is the table in which libfunc fields are to be initialized.
5445 OPNAME is the generic (string) name of the operation.
5446 SUFFIX is the character which specifies the number of operands for
5447 the given generic operation.
5448 MODE is the mode to generate for.
5449 */
5450
5451 static void
5452 gen_libfunc (optab optable, const char *opname, int suffix, enum machine_mode mode)
5453 {
5454 unsigned opname_len = strlen (opname);
5455 const char *mname = GET_MODE_NAME (mode);
5456 unsigned mname_len = strlen (mname);
5457 char *libfunc_name = XALLOCAVEC (char, 2 + opname_len + mname_len + 1 + 1);
5458 char *p;
5459 const char *q;
5460
5461 p = libfunc_name;
5462 *p++ = '_';
5463 *p++ = '_';
5464 for (q = opname; *q; )
5465 *p++ = *q++;
5466 for (q = mname; *q; q++)
5467 *p++ = TOLOWER (*q);
5468 *p++ = suffix;
5469 *p = '\0';
5470
5471 set_optab_libfunc (optable, mode,
5472 ggc_alloc_string (libfunc_name, p - libfunc_name));
5473 }
5474
5475 /* Like gen_libfunc, but verify that integer operation is involved. */
5476
5477 static void
5478 gen_int_libfunc (optab optable, const char *opname, char suffix,
5479 enum machine_mode mode)
5480 {
5481 int maxsize = 2 * BITS_PER_WORD;
5482
5483 if (GET_MODE_CLASS (mode) != MODE_INT)
5484 return;
5485 if (maxsize < LONG_LONG_TYPE_SIZE)
5486 maxsize = LONG_LONG_TYPE_SIZE;
5487 if (GET_MODE_CLASS (mode) != MODE_INT
5488 || mode < word_mode || GET_MODE_BITSIZE (mode) > maxsize)
5489 return;
5490 gen_libfunc (optable, opname, suffix, mode);
5491 }
5492
5493 /* Like gen_libfunc, but verify that FP and set decimal prefix if needed. */
5494
5495 static void
5496 gen_fp_libfunc (optab optable, const char *opname, char suffix,
5497 enum machine_mode mode)
5498 {
5499 char *dec_opname;
5500
5501 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
5502 gen_libfunc (optable, opname, suffix, mode);
5503 if (DECIMAL_FLOAT_MODE_P (mode))
5504 {
5505 dec_opname = XALLOCAVEC (char, sizeof (DECIMAL_PREFIX) + strlen (opname));
5506 /* For BID support, change the name to have either a bid_ or dpd_ prefix
5507 depending on the low level floating format used. */
5508 memcpy (dec_opname, DECIMAL_PREFIX, sizeof (DECIMAL_PREFIX) - 1);
5509 strcpy (dec_opname + sizeof (DECIMAL_PREFIX) - 1, opname);
5510 gen_libfunc (optable, dec_opname, suffix, mode);
5511 }
5512 }
5513
5514 /* Like gen_libfunc, but verify that fixed-point operation is involved. */
5515
5516 static void
5517 gen_fixed_libfunc (optab optable, const char *opname, char suffix,
5518 enum machine_mode mode)
5519 {
5520 if (!ALL_FIXED_POINT_MODE_P (mode))
5521 return;
5522 gen_libfunc (optable, opname, suffix, mode);
5523 }
5524
5525 /* Like gen_libfunc, but verify that signed fixed-point operation is
5526 involved. */
5527
5528 static void
5529 gen_signed_fixed_libfunc (optab optable, const char *opname, char suffix,
5530 enum machine_mode mode)
5531 {
5532 if (!SIGNED_FIXED_POINT_MODE_P (mode))
5533 return;
5534 gen_libfunc (optable, opname, suffix, mode);
5535 }
5536
5537 /* Like gen_libfunc, but verify that unsigned fixed-point operation is
5538 involved. */
5539
5540 static void
5541 gen_unsigned_fixed_libfunc (optab optable, const char *opname, char suffix,
5542 enum machine_mode mode)
5543 {
5544 if (!UNSIGNED_FIXED_POINT_MODE_P (mode))
5545 return;
5546 gen_libfunc (optable, opname, suffix, mode);
5547 }
5548
5549 /* Like gen_libfunc, but verify that FP or INT operation is involved. */
5550
5551 static void
5552 gen_int_fp_libfunc (optab optable, const char *name, char suffix,
5553 enum machine_mode mode)
5554 {
5555 if (DECIMAL_FLOAT_MODE_P (mode) || GET_MODE_CLASS (mode) == MODE_FLOAT)
5556 gen_fp_libfunc (optable, name, suffix, mode);
5557 if (INTEGRAL_MODE_P (mode))
5558 gen_int_libfunc (optable, name, suffix, mode);
5559 }
5560
5561 /* Like gen_libfunc, but verify that FP or INT operation is involved
5562 and add 'v' suffix for integer operation. */
5563
5564 static void
5565 gen_intv_fp_libfunc (optab optable, const char *name, char suffix,
5566 enum machine_mode mode)
5567 {
5568 if (DECIMAL_FLOAT_MODE_P (mode) || GET_MODE_CLASS (mode) == MODE_FLOAT)
5569 gen_fp_libfunc (optable, name, suffix, mode);
5570 if (GET_MODE_CLASS (mode) == MODE_INT)
5571 {
5572 int len = strlen (name);
5573 char *v_name = XALLOCAVEC (char, len + 2);
5574 strcpy (v_name, name);
5575 v_name[len] = 'v';
5576 v_name[len + 1] = 0;
5577 gen_int_libfunc (optable, v_name, suffix, mode);
5578 }
5579 }
5580
5581 /* Like gen_libfunc, but verify that FP or INT or FIXED operation is
5582 involved. */
5583
5584 static void
5585 gen_int_fp_fixed_libfunc (optab optable, const char *name, char suffix,
5586 enum machine_mode mode)
5587 {
5588 if (DECIMAL_FLOAT_MODE_P (mode) || GET_MODE_CLASS (mode) == MODE_FLOAT)
5589 gen_fp_libfunc (optable, name, suffix, mode);
5590 if (INTEGRAL_MODE_P (mode))
5591 gen_int_libfunc (optable, name, suffix, mode);
5592 if (ALL_FIXED_POINT_MODE_P (mode))
5593 gen_fixed_libfunc (optable, name, suffix, mode);
5594 }
5595
5596 /* Like gen_libfunc, but verify that FP or INT or signed FIXED operation is
5597 involved. */
5598
5599 static void
5600 gen_int_fp_signed_fixed_libfunc (optab optable, const char *name, char suffix,
5601 enum machine_mode mode)
5602 {
5603 if (DECIMAL_FLOAT_MODE_P (mode) || GET_MODE_CLASS (mode) == MODE_FLOAT)
5604 gen_fp_libfunc (optable, name, suffix, mode);
5605 if (INTEGRAL_MODE_P (mode))
5606 gen_int_libfunc (optable, name, suffix, mode);
5607 if (SIGNED_FIXED_POINT_MODE_P (mode))
5608 gen_signed_fixed_libfunc (optable, name, suffix, mode);
5609 }
5610
5611 /* Like gen_libfunc, but verify that INT or FIXED operation is
5612 involved. */
5613
5614 static void
5615 gen_int_fixed_libfunc (optab optable, const char *name, char suffix,
5616 enum machine_mode mode)
5617 {
5618 if (INTEGRAL_MODE_P (mode))
5619 gen_int_libfunc (optable, name, suffix, mode);
5620 if (ALL_FIXED_POINT_MODE_P (mode))
5621 gen_fixed_libfunc (optable, name, suffix, mode);
5622 }
5623
5624 /* Like gen_libfunc, but verify that INT or signed FIXED operation is
5625 involved. */
5626
5627 static void
5628 gen_int_signed_fixed_libfunc (optab optable, const char *name, char suffix,
5629 enum machine_mode mode)
5630 {
5631 if (INTEGRAL_MODE_P (mode))
5632 gen_int_libfunc (optable, name, suffix, mode);
5633 if (SIGNED_FIXED_POINT_MODE_P (mode))
5634 gen_signed_fixed_libfunc (optable, name, suffix, mode);
5635 }
5636
5637 /* Like gen_libfunc, but verify that INT or unsigned FIXED operation is
5638 involved. */
5639
5640 static void
5641 gen_int_unsigned_fixed_libfunc (optab optable, const char *name, char suffix,
5642 enum machine_mode mode)
5643 {
5644 if (INTEGRAL_MODE_P (mode))
5645 gen_int_libfunc (optable, name, suffix, mode);
5646 if (UNSIGNED_FIXED_POINT_MODE_P (mode))
5647 gen_unsigned_fixed_libfunc (optable, name, suffix, mode);
5648 }
5649
5650 /* Initialize the libfunc fields of an entire group of entries of an
5651 inter-mode-class conversion optab. The string formation rules are
5652 similar to the ones for init_libfuncs, above, but instead of having
5653 a mode name and an operand count these functions have two mode names
5654 and no operand count. */
5655
5656 static void
5657 gen_interclass_conv_libfunc (convert_optab tab,
5658 const char *opname,
5659 enum machine_mode tmode,
5660 enum machine_mode fmode)
5661 {
5662 size_t opname_len = strlen (opname);
5663 size_t mname_len = 0;
5664
5665 const char *fname, *tname;
5666 const char *q;
5667 char *libfunc_name, *suffix;
5668 char *nondec_name, *dec_name, *nondec_suffix, *dec_suffix;
5669 char *p;
5670
5671 /* If this is a decimal conversion, add the current BID vs. DPD prefix that
5672 depends on which underlying decimal floating point format is used. */
5673 const size_t dec_len = sizeof (DECIMAL_PREFIX) - 1;
5674
5675 mname_len = strlen (GET_MODE_NAME (tmode)) + strlen (GET_MODE_NAME (fmode));
5676
5677 nondec_name = XALLOCAVEC (char, 2 + opname_len + mname_len + 1 + 1);
5678 nondec_name[0] = '_';
5679 nondec_name[1] = '_';
5680 memcpy (&nondec_name[2], opname, opname_len);
5681 nondec_suffix = nondec_name + opname_len + 2;
5682
5683 dec_name = XALLOCAVEC (char, 2 + dec_len + opname_len + mname_len + 1 + 1);
5684 dec_name[0] = '_';
5685 dec_name[1] = '_';
5686 memcpy (&dec_name[2], DECIMAL_PREFIX, dec_len);
5687 memcpy (&dec_name[2+dec_len], opname, opname_len);
5688 dec_suffix = dec_name + dec_len + opname_len + 2;
5689
5690 fname = GET_MODE_NAME (fmode);
5691 tname = GET_MODE_NAME (tmode);
5692
5693 if (DECIMAL_FLOAT_MODE_P(fmode) || DECIMAL_FLOAT_MODE_P(tmode))
5694 {
5695 libfunc_name = dec_name;
5696 suffix = dec_suffix;
5697 }
5698 else
5699 {
5700 libfunc_name = nondec_name;
5701 suffix = nondec_suffix;
5702 }
5703
5704 p = suffix;
5705 for (q = fname; *q; p++, q++)
5706 *p = TOLOWER (*q);
5707 for (q = tname; *q; p++, q++)
5708 *p = TOLOWER (*q);
5709
5710 *p = '\0';
5711
5712 set_conv_libfunc (tab, tmode, fmode,
5713 ggc_alloc_string (libfunc_name, p - libfunc_name));
5714 }
5715
5716 /* Same as gen_interclass_conv_libfunc but verify that we are producing
5717 int->fp conversion. */
5718
5719 static void
5720 gen_int_to_fp_conv_libfunc (convert_optab tab,
5721 const char *opname,
5722 enum machine_mode tmode,
5723 enum machine_mode fmode)
5724 {
5725 if (GET_MODE_CLASS (fmode) != MODE_INT)
5726 return;
5727 if (GET_MODE_CLASS (tmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (tmode))
5728 return;
5729 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5730 }
5731
5732 /* ufloat_optab is special by using floatun for FP and floatuns decimal fp
5733 naming scheme. */
5734
5735 static void
5736 gen_ufloat_conv_libfunc (convert_optab tab,
5737 const char *opname ATTRIBUTE_UNUSED,
5738 enum machine_mode tmode,
5739 enum machine_mode fmode)
5740 {
5741 if (DECIMAL_FLOAT_MODE_P (tmode))
5742 gen_int_to_fp_conv_libfunc (tab, "floatuns", tmode, fmode);
5743 else
5744 gen_int_to_fp_conv_libfunc (tab, "floatun", tmode, fmode);
5745 }
5746
5747 /* Same as gen_interclass_conv_libfunc but verify that we are producing
5748 fp->int conversion. */
5749
5750 static void
5751 gen_int_to_fp_nondecimal_conv_libfunc (convert_optab tab,
5752 const char *opname,
5753 enum machine_mode tmode,
5754 enum machine_mode fmode)
5755 {
5756 if (GET_MODE_CLASS (fmode) != MODE_INT)
5757 return;
5758 if (GET_MODE_CLASS (tmode) != MODE_FLOAT)
5759 return;
5760 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5761 }
5762
5763 /* Same as gen_interclass_conv_libfunc but verify that we are producing
5764 fp->int conversion with no decimal floating point involved. */
5765
5766 static void
5767 gen_fp_to_int_conv_libfunc (convert_optab tab,
5768 const char *opname,
5769 enum machine_mode tmode,
5770 enum machine_mode fmode)
5771 {
5772 if (GET_MODE_CLASS (fmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (fmode))
5773 return;
5774 if (GET_MODE_CLASS (tmode) != MODE_INT)
5775 return;
5776 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5777 }
5778
5779 /* Initialize the libfunc fields of an of an intra-mode-class conversion optab.
5780 The string formation rules are
5781 similar to the ones for init_libfunc, above. */
5782
5783 static void
5784 gen_intraclass_conv_libfunc (convert_optab tab, const char *opname,
5785 enum machine_mode tmode, enum machine_mode fmode)
5786 {
5787 size_t opname_len = strlen (opname);
5788 size_t mname_len = 0;
5789
5790 const char *fname, *tname;
5791 const char *q;
5792 char *nondec_name, *dec_name, *nondec_suffix, *dec_suffix;
5793 char *libfunc_name, *suffix;
5794 char *p;
5795
5796 /* If this is a decimal conversion, add the current BID vs. DPD prefix that
5797 depends on which underlying decimal floating point format is used. */
5798 const size_t dec_len = sizeof (DECIMAL_PREFIX) - 1;
5799
5800 mname_len = strlen (GET_MODE_NAME (tmode)) + strlen (GET_MODE_NAME (fmode));
5801
5802 nondec_name = XALLOCAVEC (char, 2 + opname_len + mname_len + 1 + 1);
5803 nondec_name[0] = '_';
5804 nondec_name[1] = '_';
5805 memcpy (&nondec_name[2], opname, opname_len);
5806 nondec_suffix = nondec_name + opname_len + 2;
5807
5808 dec_name = XALLOCAVEC (char, 2 + dec_len + opname_len + mname_len + 1 + 1);
5809 dec_name[0] = '_';
5810 dec_name[1] = '_';
5811 memcpy (&dec_name[2], DECIMAL_PREFIX, dec_len);
5812 memcpy (&dec_name[2 + dec_len], opname, opname_len);
5813 dec_suffix = dec_name + dec_len + opname_len + 2;
5814
5815 fname = GET_MODE_NAME (fmode);
5816 tname = GET_MODE_NAME (tmode);
5817
5818 if (DECIMAL_FLOAT_MODE_P(fmode) || DECIMAL_FLOAT_MODE_P(tmode))
5819 {
5820 libfunc_name = dec_name;
5821 suffix = dec_suffix;
5822 }
5823 else
5824 {
5825 libfunc_name = nondec_name;
5826 suffix = nondec_suffix;
5827 }
5828
5829 p = suffix;
5830 for (q = fname; *q; p++, q++)
5831 *p = TOLOWER (*q);
5832 for (q = tname; *q; p++, q++)
5833 *p = TOLOWER (*q);
5834
5835 *p++ = '2';
5836 *p = '\0';
5837
5838 set_conv_libfunc (tab, tmode, fmode,
5839 ggc_alloc_string (libfunc_name, p - libfunc_name));
5840 }
5841
5842 /* Pick proper libcall for trunc_optab. We need to chose if we do
5843 truncation or extension and interclass or intraclass. */
5844
5845 static void
5846 gen_trunc_conv_libfunc (convert_optab tab,
5847 const char *opname,
5848 enum machine_mode tmode,
5849 enum machine_mode fmode)
5850 {
5851 if (GET_MODE_CLASS (tmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (tmode))
5852 return;
5853 if (GET_MODE_CLASS (fmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (fmode))
5854 return;
5855 if (tmode == fmode)
5856 return;
5857
5858 if ((GET_MODE_CLASS (tmode) == MODE_FLOAT && DECIMAL_FLOAT_MODE_P (fmode))
5859 || (GET_MODE_CLASS (fmode) == MODE_FLOAT && DECIMAL_FLOAT_MODE_P (tmode)))
5860 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5861
5862 if (GET_MODE_PRECISION (fmode) <= GET_MODE_PRECISION (tmode))
5863 return;
5864
5865 if ((GET_MODE_CLASS (tmode) == MODE_FLOAT
5866 && GET_MODE_CLASS (fmode) == MODE_FLOAT)
5867 || (DECIMAL_FLOAT_MODE_P (fmode) && DECIMAL_FLOAT_MODE_P (tmode)))
5868 gen_intraclass_conv_libfunc (tab, opname, tmode, fmode);
5869 }
5870
5871 /* Pick proper libcall for extend_optab. We need to chose if we do
5872 truncation or extension and interclass or intraclass. */
5873
5874 static void
5875 gen_extend_conv_libfunc (convert_optab tab,
5876 const char *opname ATTRIBUTE_UNUSED,
5877 enum machine_mode tmode,
5878 enum machine_mode fmode)
5879 {
5880 if (GET_MODE_CLASS (tmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (tmode))
5881 return;
5882 if (GET_MODE_CLASS (fmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (fmode))
5883 return;
5884 if (tmode == fmode)
5885 return;
5886
5887 if ((GET_MODE_CLASS (tmode) == MODE_FLOAT && DECIMAL_FLOAT_MODE_P (fmode))
5888 || (GET_MODE_CLASS (fmode) == MODE_FLOAT && DECIMAL_FLOAT_MODE_P (tmode)))
5889 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5890
5891 if (GET_MODE_PRECISION (fmode) > GET_MODE_PRECISION (tmode))
5892 return;
5893
5894 if ((GET_MODE_CLASS (tmode) == MODE_FLOAT
5895 && GET_MODE_CLASS (fmode) == MODE_FLOAT)
5896 || (DECIMAL_FLOAT_MODE_P (fmode) && DECIMAL_FLOAT_MODE_P (tmode)))
5897 gen_intraclass_conv_libfunc (tab, opname, tmode, fmode);
5898 }
5899
5900 /* Pick proper libcall for fract_optab. We need to chose if we do
5901 interclass or intraclass. */
5902
5903 static void
5904 gen_fract_conv_libfunc (convert_optab tab,
5905 const char *opname,
5906 enum machine_mode tmode,
5907 enum machine_mode fmode)
5908 {
5909 if (tmode == fmode)
5910 return;
5911 if (!(ALL_FIXED_POINT_MODE_P (tmode) || ALL_FIXED_POINT_MODE_P (fmode)))
5912 return;
5913
5914 if (GET_MODE_CLASS (tmode) == GET_MODE_CLASS (fmode))
5915 gen_intraclass_conv_libfunc (tab, opname, tmode, fmode);
5916 else
5917 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5918 }
5919
5920 /* Pick proper libcall for fractuns_optab. */
5921
5922 static void
5923 gen_fractuns_conv_libfunc (convert_optab tab,
5924 const char *opname,
5925 enum machine_mode tmode,
5926 enum machine_mode fmode)
5927 {
5928 if (tmode == fmode)
5929 return;
5930 /* One mode must be a fixed-point mode, and the other must be an integer
5931 mode. */
5932 if (!((ALL_FIXED_POINT_MODE_P (tmode) && GET_MODE_CLASS (fmode) == MODE_INT)
5933 || (ALL_FIXED_POINT_MODE_P (fmode)
5934 && GET_MODE_CLASS (tmode) == MODE_INT)))
5935 return;
5936
5937 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5938 }
5939
5940 /* Pick proper libcall for satfract_optab. We need to chose if we do
5941 interclass or intraclass. */
5942
5943 static void
5944 gen_satfract_conv_libfunc (convert_optab tab,
5945 const char *opname,
5946 enum machine_mode tmode,
5947 enum machine_mode fmode)
5948 {
5949 if (tmode == fmode)
5950 return;
5951 /* TMODE must be a fixed-point mode. */
5952 if (!ALL_FIXED_POINT_MODE_P (tmode))
5953 return;
5954
5955 if (GET_MODE_CLASS (tmode) == GET_MODE_CLASS (fmode))
5956 gen_intraclass_conv_libfunc (tab, opname, tmode, fmode);
5957 else
5958 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5959 }
5960
5961 /* Pick proper libcall for satfractuns_optab. */
5962
5963 static void
5964 gen_satfractuns_conv_libfunc (convert_optab tab,
5965 const char *opname,
5966 enum machine_mode tmode,
5967 enum machine_mode fmode)
5968 {
5969 if (tmode == fmode)
5970 return;
5971 /* TMODE must be a fixed-point mode, and FMODE must be an integer mode. */
5972 if (!(ALL_FIXED_POINT_MODE_P (tmode) && GET_MODE_CLASS (fmode) == MODE_INT))
5973 return;
5974
5975 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5976 }
5977
5978 /* A table of previously-created libfuncs, hashed by name. */
5979 static GTY ((param_is (union tree_node))) htab_t libfunc_decls;
5980
5981 /* Hashtable callbacks for libfunc_decls. */
5982
5983 static hashval_t
5984 libfunc_decl_hash (const void *entry)
5985 {
5986 return IDENTIFIER_HASH_VALUE (DECL_NAME ((const_tree) entry));
5987 }
5988
5989 static int
5990 libfunc_decl_eq (const void *entry1, const void *entry2)
5991 {
5992 return DECL_NAME ((const_tree) entry1) == (const_tree) entry2;
5993 }
5994
5995 /* Build a decl for a libfunc named NAME. */
5996
5997 tree
5998 build_libfunc_function (const char *name)
5999 {
6000 tree decl = build_decl (UNKNOWN_LOCATION, FUNCTION_DECL,
6001 get_identifier (name),
6002 build_function_type (integer_type_node, NULL_TREE));
6003 /* ??? We don't have any type information except for this is
6004 a function. Pretend this is "int foo()". */
6005 DECL_ARTIFICIAL (decl) = 1;
6006 DECL_EXTERNAL (decl) = 1;
6007 TREE_PUBLIC (decl) = 1;
6008 gcc_assert (DECL_ASSEMBLER_NAME (decl));
6009
6010 /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with
6011 are the flags assigned by targetm.encode_section_info. */
6012 SET_SYMBOL_REF_DECL (XEXP (DECL_RTL (decl), 0), NULL);
6013
6014 return decl;
6015 }
6016
6017 rtx
6018 init_one_libfunc (const char *name)
6019 {
6020 tree id, decl;
6021 void **slot;
6022 hashval_t hash;
6023
6024 if (libfunc_decls == NULL)
6025 libfunc_decls = htab_create_ggc (37, libfunc_decl_hash,
6026 libfunc_decl_eq, NULL);
6027
6028 /* See if we have already created a libfunc decl for this function. */
6029 id = get_identifier (name);
6030 hash = IDENTIFIER_HASH_VALUE (id);
6031 slot = htab_find_slot_with_hash (libfunc_decls, id, hash, INSERT);
6032 decl = (tree) *slot;
6033 if (decl == NULL)
6034 {
6035 /* Create a new decl, so that it can be passed to
6036 targetm.encode_section_info. */
6037 decl = build_libfunc_function (name);
6038 *slot = decl;
6039 }
6040 return XEXP (DECL_RTL (decl), 0);
6041 }
6042
6043 /* Adjust the assembler name of libfunc NAME to ASMSPEC. */
6044
6045 rtx
6046 set_user_assembler_libfunc (const char *name, const char *asmspec)
6047 {
6048 tree id, decl;
6049 void **slot;
6050 hashval_t hash;
6051
6052 id = get_identifier (name);
6053 hash = IDENTIFIER_HASH_VALUE (id);
6054 slot = htab_find_slot_with_hash (libfunc_decls, id, hash, NO_INSERT);
6055 gcc_assert (slot);
6056 decl = (tree) *slot;
6057 set_user_assembler_name (decl, asmspec);
6058 return XEXP (DECL_RTL (decl), 0);
6059 }
6060
6061 /* Call this to reset the function entry for one optab (OPTABLE) in mode
6062 MODE to NAME, which should be either 0 or a string constant. */
6063 void
6064 set_optab_libfunc (optab optable, enum machine_mode mode, const char *name)
6065 {
6066 rtx val;
6067 struct libfunc_entry e;
6068 struct libfunc_entry **slot;
6069 e.optab = (size_t) (optable - &optab_table[0]);
6070 e.mode1 = mode;
6071 e.mode2 = VOIDmode;
6072
6073 if (name)
6074 val = init_one_libfunc (name);
6075 else
6076 val = 0;
6077 slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash, &e, INSERT);
6078 if (*slot == NULL)
6079 *slot = ggc_alloc_libfunc_entry ();
6080 (*slot)->optab = (size_t) (optable - &optab_table[0]);
6081 (*slot)->mode1 = mode;
6082 (*slot)->mode2 = VOIDmode;
6083 (*slot)->libfunc = val;
6084 }
6085
6086 /* Call this to reset the function entry for one conversion optab
6087 (OPTABLE) from mode FMODE to mode TMODE to NAME, which should be
6088 either 0 or a string constant. */
6089 void
6090 set_conv_libfunc (convert_optab optable, enum machine_mode tmode,
6091 enum machine_mode fmode, const char *name)
6092 {
6093 rtx val;
6094 struct libfunc_entry e;
6095 struct libfunc_entry **slot;
6096 e.optab = (size_t) (optable - &convert_optab_table[0]);
6097 e.mode1 = tmode;
6098 e.mode2 = fmode;
6099
6100 if (name)
6101 val = init_one_libfunc (name);
6102 else
6103 val = 0;
6104 slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash, &e, INSERT);
6105 if (*slot == NULL)
6106 *slot = ggc_alloc_libfunc_entry ();
6107 (*slot)->optab = (size_t) (optable - &convert_optab_table[0]);
6108 (*slot)->mode1 = tmode;
6109 (*slot)->mode2 = fmode;
6110 (*slot)->libfunc = val;
6111 }
6112
6113 /* Call this to initialize the contents of the optabs
6114 appropriately for the current target machine. */
6115
6116 void
6117 init_optabs (void)
6118 {
6119 if (libfunc_hash)
6120 {
6121 htab_empty (libfunc_hash);
6122 /* We statically initialize the insn_codes with the equivalent of
6123 CODE_FOR_nothing. Repeat the process if reinitialising. */
6124 init_insn_codes ();
6125 }
6126 else
6127 libfunc_hash = htab_create_ggc (10, hash_libfunc, eq_libfunc, NULL);
6128
6129 init_optab (add_optab, PLUS);
6130 init_optabv (addv_optab, PLUS);
6131 init_optab (sub_optab, MINUS);
6132 init_optabv (subv_optab, MINUS);
6133 init_optab (ssadd_optab, SS_PLUS);
6134 init_optab (usadd_optab, US_PLUS);
6135 init_optab (sssub_optab, SS_MINUS);
6136 init_optab (ussub_optab, US_MINUS);
6137 init_optab (smul_optab, MULT);
6138 init_optab (ssmul_optab, SS_MULT);
6139 init_optab (usmul_optab, US_MULT);
6140 init_optabv (smulv_optab, MULT);
6141 init_optab (smul_highpart_optab, UNKNOWN);
6142 init_optab (umul_highpart_optab, UNKNOWN);
6143 init_optab (smul_widen_optab, UNKNOWN);
6144 init_optab (umul_widen_optab, UNKNOWN);
6145 init_optab (usmul_widen_optab, UNKNOWN);
6146 init_optab (smadd_widen_optab, UNKNOWN);
6147 init_optab (umadd_widen_optab, UNKNOWN);
6148 init_optab (ssmadd_widen_optab, UNKNOWN);
6149 init_optab (usmadd_widen_optab, UNKNOWN);
6150 init_optab (smsub_widen_optab, UNKNOWN);
6151 init_optab (umsub_widen_optab, UNKNOWN);
6152 init_optab (ssmsub_widen_optab, UNKNOWN);
6153 init_optab (usmsub_widen_optab, UNKNOWN);
6154 init_optab (sdiv_optab, DIV);
6155 init_optab (ssdiv_optab, SS_DIV);
6156 init_optab (usdiv_optab, US_DIV);
6157 init_optabv (sdivv_optab, DIV);
6158 init_optab (sdivmod_optab, UNKNOWN);
6159 init_optab (udiv_optab, UDIV);
6160 init_optab (udivmod_optab, UNKNOWN);
6161 init_optab (smod_optab, MOD);
6162 init_optab (umod_optab, UMOD);
6163 init_optab (fmod_optab, UNKNOWN);
6164 init_optab (remainder_optab, UNKNOWN);
6165 init_optab (ftrunc_optab, UNKNOWN);
6166 init_optab (and_optab, AND);
6167 init_optab (ior_optab, IOR);
6168 init_optab (xor_optab, XOR);
6169 init_optab (ashl_optab, ASHIFT);
6170 init_optab (ssashl_optab, SS_ASHIFT);
6171 init_optab (usashl_optab, US_ASHIFT);
6172 init_optab (ashr_optab, ASHIFTRT);
6173 init_optab (lshr_optab, LSHIFTRT);
6174 init_optab (rotl_optab, ROTATE);
6175 init_optab (rotr_optab, ROTATERT);
6176 init_optab (smin_optab, SMIN);
6177 init_optab (smax_optab, SMAX);
6178 init_optab (umin_optab, UMIN);
6179 init_optab (umax_optab, UMAX);
6180 init_optab (pow_optab, UNKNOWN);
6181 init_optab (atan2_optab, UNKNOWN);
6182 init_optab (fma_optab, FMA);
6183 init_optab (fms_optab, UNKNOWN);
6184 init_optab (fnma_optab, UNKNOWN);
6185 init_optab (fnms_optab, UNKNOWN);
6186
6187 /* These three have codes assigned exclusively for the sake of
6188 have_insn_for. */
6189 init_optab (mov_optab, SET);
6190 init_optab (movstrict_optab, STRICT_LOW_PART);
6191 init_optab (cbranch_optab, COMPARE);
6192
6193 init_optab (cmov_optab, UNKNOWN);
6194 init_optab (cstore_optab, UNKNOWN);
6195 init_optab (ctrap_optab, UNKNOWN);
6196
6197 init_optab (storent_optab, UNKNOWN);
6198
6199 init_optab (cmp_optab, UNKNOWN);
6200 init_optab (ucmp_optab, UNKNOWN);
6201
6202 init_optab (eq_optab, EQ);
6203 init_optab (ne_optab, NE);
6204 init_optab (gt_optab, GT);
6205 init_optab (ge_optab, GE);
6206 init_optab (lt_optab, LT);
6207 init_optab (le_optab, LE);
6208 init_optab (unord_optab, UNORDERED);
6209
6210 init_optab (neg_optab, NEG);
6211 init_optab (ssneg_optab, SS_NEG);
6212 init_optab (usneg_optab, US_NEG);
6213 init_optabv (negv_optab, NEG);
6214 init_optab (abs_optab, ABS);
6215 init_optabv (absv_optab, ABS);
6216 init_optab (addcc_optab, UNKNOWN);
6217 init_optab (one_cmpl_optab, NOT);
6218 init_optab (bswap_optab, BSWAP);
6219 init_optab (ffs_optab, FFS);
6220 init_optab (clz_optab, CLZ);
6221 init_optab (ctz_optab, CTZ);
6222 init_optab (popcount_optab, POPCOUNT);
6223 init_optab (parity_optab, PARITY);
6224 init_optab (sqrt_optab, SQRT);
6225 init_optab (floor_optab, UNKNOWN);
6226 init_optab (ceil_optab, UNKNOWN);
6227 init_optab (round_optab, UNKNOWN);
6228 init_optab (btrunc_optab, UNKNOWN);
6229 init_optab (nearbyint_optab, UNKNOWN);
6230 init_optab (rint_optab, UNKNOWN);
6231 init_optab (sincos_optab, UNKNOWN);
6232 init_optab (sin_optab, UNKNOWN);
6233 init_optab (asin_optab, UNKNOWN);
6234 init_optab (cos_optab, UNKNOWN);
6235 init_optab (acos_optab, UNKNOWN);
6236 init_optab (exp_optab, UNKNOWN);
6237 init_optab (exp10_optab, UNKNOWN);
6238 init_optab (exp2_optab, UNKNOWN);
6239 init_optab (expm1_optab, UNKNOWN);
6240 init_optab (ldexp_optab, UNKNOWN);
6241 init_optab (scalb_optab, UNKNOWN);
6242 init_optab (significand_optab, UNKNOWN);
6243 init_optab (logb_optab, UNKNOWN);
6244 init_optab (ilogb_optab, UNKNOWN);
6245 init_optab (log_optab, UNKNOWN);
6246 init_optab (log10_optab, UNKNOWN);
6247 init_optab (log2_optab, UNKNOWN);
6248 init_optab (log1p_optab, UNKNOWN);
6249 init_optab (tan_optab, UNKNOWN);
6250 init_optab (atan_optab, UNKNOWN);
6251 init_optab (copysign_optab, UNKNOWN);
6252 init_optab (signbit_optab, UNKNOWN);
6253
6254 init_optab (isinf_optab, UNKNOWN);
6255
6256 init_optab (strlen_optab, UNKNOWN);
6257 init_optab (push_optab, UNKNOWN);
6258
6259 init_optab (reduc_smax_optab, UNKNOWN);
6260 init_optab (reduc_umax_optab, UNKNOWN);
6261 init_optab (reduc_smin_optab, UNKNOWN);
6262 init_optab (reduc_umin_optab, UNKNOWN);
6263 init_optab (reduc_splus_optab, UNKNOWN);
6264 init_optab (reduc_uplus_optab, UNKNOWN);
6265
6266 init_optab (ssum_widen_optab, UNKNOWN);
6267 init_optab (usum_widen_optab, UNKNOWN);
6268 init_optab (sdot_prod_optab, UNKNOWN);
6269 init_optab (udot_prod_optab, UNKNOWN);
6270
6271 init_optab (vec_extract_optab, UNKNOWN);
6272 init_optab (vec_extract_even_optab, UNKNOWN);
6273 init_optab (vec_extract_odd_optab, UNKNOWN);
6274 init_optab (vec_interleave_high_optab, UNKNOWN);
6275 init_optab (vec_interleave_low_optab, UNKNOWN);
6276 init_optab (vec_set_optab, UNKNOWN);
6277 init_optab (vec_init_optab, UNKNOWN);
6278 init_optab (vec_shl_optab, UNKNOWN);
6279 init_optab (vec_shr_optab, UNKNOWN);
6280 init_optab (vec_realign_load_optab, UNKNOWN);
6281 init_optab (movmisalign_optab, UNKNOWN);
6282 init_optab (vec_widen_umult_hi_optab, UNKNOWN);
6283 init_optab (vec_widen_umult_lo_optab, UNKNOWN);
6284 init_optab (vec_widen_smult_hi_optab, UNKNOWN);
6285 init_optab (vec_widen_smult_lo_optab, UNKNOWN);
6286 init_optab (vec_unpacks_hi_optab, UNKNOWN);
6287 init_optab (vec_unpacks_lo_optab, UNKNOWN);
6288 init_optab (vec_unpacku_hi_optab, UNKNOWN);
6289 init_optab (vec_unpacku_lo_optab, UNKNOWN);
6290 init_optab (vec_unpacks_float_hi_optab, UNKNOWN);
6291 init_optab (vec_unpacks_float_lo_optab, UNKNOWN);
6292 init_optab (vec_unpacku_float_hi_optab, UNKNOWN);
6293 init_optab (vec_unpacku_float_lo_optab, UNKNOWN);
6294 init_optab (vec_pack_trunc_optab, UNKNOWN);
6295 init_optab (vec_pack_usat_optab, UNKNOWN);
6296 init_optab (vec_pack_ssat_optab, UNKNOWN);
6297 init_optab (vec_pack_ufix_trunc_optab, UNKNOWN);
6298 init_optab (vec_pack_sfix_trunc_optab, UNKNOWN);
6299
6300 init_optab (powi_optab, UNKNOWN);
6301
6302 /* Conversions. */
6303 init_convert_optab (sext_optab, SIGN_EXTEND);
6304 init_convert_optab (zext_optab, ZERO_EXTEND);
6305 init_convert_optab (trunc_optab, TRUNCATE);
6306 init_convert_optab (sfix_optab, FIX);
6307 init_convert_optab (ufix_optab, UNSIGNED_FIX);
6308 init_convert_optab (sfixtrunc_optab, UNKNOWN);
6309 init_convert_optab (ufixtrunc_optab, UNKNOWN);
6310 init_convert_optab (sfloat_optab, FLOAT);
6311 init_convert_optab (ufloat_optab, UNSIGNED_FLOAT);
6312 init_convert_optab (lrint_optab, UNKNOWN);
6313 init_convert_optab (lround_optab, UNKNOWN);
6314 init_convert_optab (lfloor_optab, UNKNOWN);
6315 init_convert_optab (lceil_optab, UNKNOWN);
6316
6317 init_convert_optab (fract_optab, FRACT_CONVERT);
6318 init_convert_optab (fractuns_optab, UNSIGNED_FRACT_CONVERT);
6319 init_convert_optab (satfract_optab, SAT_FRACT);
6320 init_convert_optab (satfractuns_optab, UNSIGNED_SAT_FRACT);
6321
6322 /* Fill in the optabs with the insns we support. */
6323 init_all_optabs ();
6324
6325 /* Initialize the optabs with the names of the library functions. */
6326 add_optab->libcall_basename = "add";
6327 add_optab->libcall_suffix = '3';
6328 add_optab->libcall_gen = gen_int_fp_fixed_libfunc;
6329 addv_optab->libcall_basename = "add";
6330 addv_optab->libcall_suffix = '3';
6331 addv_optab->libcall_gen = gen_intv_fp_libfunc;
6332 ssadd_optab->libcall_basename = "ssadd";
6333 ssadd_optab->libcall_suffix = '3';
6334 ssadd_optab->libcall_gen = gen_signed_fixed_libfunc;
6335 usadd_optab->libcall_basename = "usadd";
6336 usadd_optab->libcall_suffix = '3';
6337 usadd_optab->libcall_gen = gen_unsigned_fixed_libfunc;
6338 sub_optab->libcall_basename = "sub";
6339 sub_optab->libcall_suffix = '3';
6340 sub_optab->libcall_gen = gen_int_fp_fixed_libfunc;
6341 subv_optab->libcall_basename = "sub";
6342 subv_optab->libcall_suffix = '3';
6343 subv_optab->libcall_gen = gen_intv_fp_libfunc;
6344 sssub_optab->libcall_basename = "sssub";
6345 sssub_optab->libcall_suffix = '3';
6346 sssub_optab->libcall_gen = gen_signed_fixed_libfunc;
6347 ussub_optab->libcall_basename = "ussub";
6348 ussub_optab->libcall_suffix = '3';
6349 ussub_optab->libcall_gen = gen_unsigned_fixed_libfunc;
6350 smul_optab->libcall_basename = "mul";
6351 smul_optab->libcall_suffix = '3';
6352 smul_optab->libcall_gen = gen_int_fp_fixed_libfunc;
6353 smulv_optab->libcall_basename = "mul";
6354 smulv_optab->libcall_suffix = '3';
6355 smulv_optab->libcall_gen = gen_intv_fp_libfunc;
6356 ssmul_optab->libcall_basename = "ssmul";
6357 ssmul_optab->libcall_suffix = '3';
6358 ssmul_optab->libcall_gen = gen_signed_fixed_libfunc;
6359 usmul_optab->libcall_basename = "usmul";
6360 usmul_optab->libcall_suffix = '3';
6361 usmul_optab->libcall_gen = gen_unsigned_fixed_libfunc;
6362 sdiv_optab->libcall_basename = "div";
6363 sdiv_optab->libcall_suffix = '3';
6364 sdiv_optab->libcall_gen = gen_int_fp_signed_fixed_libfunc;
6365 sdivv_optab->libcall_basename = "divv";
6366 sdivv_optab->libcall_suffix = '3';
6367 sdivv_optab->libcall_gen = gen_int_libfunc;
6368 ssdiv_optab->libcall_basename = "ssdiv";
6369 ssdiv_optab->libcall_suffix = '3';
6370 ssdiv_optab->libcall_gen = gen_signed_fixed_libfunc;
6371 udiv_optab->libcall_basename = "udiv";
6372 udiv_optab->libcall_suffix = '3';
6373 udiv_optab->libcall_gen = gen_int_unsigned_fixed_libfunc;
6374 usdiv_optab->libcall_basename = "usdiv";
6375 usdiv_optab->libcall_suffix = '3';
6376 usdiv_optab->libcall_gen = gen_unsigned_fixed_libfunc;
6377 sdivmod_optab->libcall_basename = "divmod";
6378 sdivmod_optab->libcall_suffix = '4';
6379 sdivmod_optab->libcall_gen = gen_int_libfunc;
6380 udivmod_optab->libcall_basename = "udivmod";
6381 udivmod_optab->libcall_suffix = '4';
6382 udivmod_optab->libcall_gen = gen_int_libfunc;
6383 smod_optab->libcall_basename = "mod";
6384 smod_optab->libcall_suffix = '3';
6385 smod_optab->libcall_gen = gen_int_libfunc;
6386 umod_optab->libcall_basename = "umod";
6387 umod_optab->libcall_suffix = '3';
6388 umod_optab->libcall_gen = gen_int_libfunc;
6389 ftrunc_optab->libcall_basename = "ftrunc";
6390 ftrunc_optab->libcall_suffix = '2';
6391 ftrunc_optab->libcall_gen = gen_fp_libfunc;
6392 and_optab->libcall_basename = "and";
6393 and_optab->libcall_suffix = '3';
6394 and_optab->libcall_gen = gen_int_libfunc;
6395 ior_optab->libcall_basename = "ior";
6396 ior_optab->libcall_suffix = '3';
6397 ior_optab->libcall_gen = gen_int_libfunc;
6398 xor_optab->libcall_basename = "xor";
6399 xor_optab->libcall_suffix = '3';
6400 xor_optab->libcall_gen = gen_int_libfunc;
6401 ashl_optab->libcall_basename = "ashl";
6402 ashl_optab->libcall_suffix = '3';
6403 ashl_optab->libcall_gen = gen_int_fixed_libfunc;
6404 ssashl_optab->libcall_basename = "ssashl";
6405 ssashl_optab->libcall_suffix = '3';
6406 ssashl_optab->libcall_gen = gen_signed_fixed_libfunc;
6407 usashl_optab->libcall_basename = "usashl";
6408 usashl_optab->libcall_suffix = '3';
6409 usashl_optab->libcall_gen = gen_unsigned_fixed_libfunc;
6410 ashr_optab->libcall_basename = "ashr";
6411 ashr_optab->libcall_suffix = '3';
6412 ashr_optab->libcall_gen = gen_int_signed_fixed_libfunc;
6413 lshr_optab->libcall_basename = "lshr";
6414 lshr_optab->libcall_suffix = '3';
6415 lshr_optab->libcall_gen = gen_int_unsigned_fixed_libfunc;
6416 smin_optab->libcall_basename = "min";
6417 smin_optab->libcall_suffix = '3';
6418 smin_optab->libcall_gen = gen_int_fp_libfunc;
6419 smax_optab->libcall_basename = "max";
6420 smax_optab->libcall_suffix = '3';
6421 smax_optab->libcall_gen = gen_int_fp_libfunc;
6422 umin_optab->libcall_basename = "umin";
6423 umin_optab->libcall_suffix = '3';
6424 umin_optab->libcall_gen = gen_int_libfunc;
6425 umax_optab->libcall_basename = "umax";
6426 umax_optab->libcall_suffix = '3';
6427 umax_optab->libcall_gen = gen_int_libfunc;
6428 neg_optab->libcall_basename = "neg";
6429 neg_optab->libcall_suffix = '2';
6430 neg_optab->libcall_gen = gen_int_fp_fixed_libfunc;
6431 ssneg_optab->libcall_basename = "ssneg";
6432 ssneg_optab->libcall_suffix = '2';
6433 ssneg_optab->libcall_gen = gen_signed_fixed_libfunc;
6434 usneg_optab->libcall_basename = "usneg";
6435 usneg_optab->libcall_suffix = '2';
6436 usneg_optab->libcall_gen = gen_unsigned_fixed_libfunc;
6437 negv_optab->libcall_basename = "neg";
6438 negv_optab->libcall_suffix = '2';
6439 negv_optab->libcall_gen = gen_intv_fp_libfunc;
6440 one_cmpl_optab->libcall_basename = "one_cmpl";
6441 one_cmpl_optab->libcall_suffix = '2';
6442 one_cmpl_optab->libcall_gen = gen_int_libfunc;
6443 ffs_optab->libcall_basename = "ffs";
6444 ffs_optab->libcall_suffix = '2';
6445 ffs_optab->libcall_gen = gen_int_libfunc;
6446 clz_optab->libcall_basename = "clz";
6447 clz_optab->libcall_suffix = '2';
6448 clz_optab->libcall_gen = gen_int_libfunc;
6449 ctz_optab->libcall_basename = "ctz";
6450 ctz_optab->libcall_suffix = '2';
6451 ctz_optab->libcall_gen = gen_int_libfunc;
6452 popcount_optab->libcall_basename = "popcount";
6453 popcount_optab->libcall_suffix = '2';
6454 popcount_optab->libcall_gen = gen_int_libfunc;
6455 parity_optab->libcall_basename = "parity";
6456 parity_optab->libcall_suffix = '2';
6457 parity_optab->libcall_gen = gen_int_libfunc;
6458
6459 /* Comparison libcalls for integers MUST come in pairs,
6460 signed/unsigned. */
6461 cmp_optab->libcall_basename = "cmp";
6462 cmp_optab->libcall_suffix = '2';
6463 cmp_optab->libcall_gen = gen_int_fp_fixed_libfunc;
6464 ucmp_optab->libcall_basename = "ucmp";
6465 ucmp_optab->libcall_suffix = '2';
6466 ucmp_optab->libcall_gen = gen_int_libfunc;
6467
6468 /* EQ etc are floating point only. */
6469 eq_optab->libcall_basename = "eq";
6470 eq_optab->libcall_suffix = '2';
6471 eq_optab->libcall_gen = gen_fp_libfunc;
6472 ne_optab->libcall_basename = "ne";
6473 ne_optab->libcall_suffix = '2';
6474 ne_optab->libcall_gen = gen_fp_libfunc;
6475 gt_optab->libcall_basename = "gt";
6476 gt_optab->libcall_suffix = '2';
6477 gt_optab->libcall_gen = gen_fp_libfunc;
6478 ge_optab->libcall_basename = "ge";
6479 ge_optab->libcall_suffix = '2';
6480 ge_optab->libcall_gen = gen_fp_libfunc;
6481 lt_optab->libcall_basename = "lt";
6482 lt_optab->libcall_suffix = '2';
6483 lt_optab->libcall_gen = gen_fp_libfunc;
6484 le_optab->libcall_basename = "le";
6485 le_optab->libcall_suffix = '2';
6486 le_optab->libcall_gen = gen_fp_libfunc;
6487 unord_optab->libcall_basename = "unord";
6488 unord_optab->libcall_suffix = '2';
6489 unord_optab->libcall_gen = gen_fp_libfunc;
6490
6491 powi_optab->libcall_basename = "powi";
6492 powi_optab->libcall_suffix = '2';
6493 powi_optab->libcall_gen = gen_fp_libfunc;
6494
6495 /* Conversions. */
6496 sfloat_optab->libcall_basename = "float";
6497 sfloat_optab->libcall_gen = gen_int_to_fp_conv_libfunc;
6498 ufloat_optab->libcall_gen = gen_ufloat_conv_libfunc;
6499 sfix_optab->libcall_basename = "fix";
6500 sfix_optab->libcall_gen = gen_fp_to_int_conv_libfunc;
6501 ufix_optab->libcall_basename = "fixuns";
6502 ufix_optab->libcall_gen = gen_fp_to_int_conv_libfunc;
6503 lrint_optab->libcall_basename = "lrint";
6504 lrint_optab->libcall_gen = gen_int_to_fp_nondecimal_conv_libfunc;
6505 lround_optab->libcall_basename = "lround";
6506 lround_optab->libcall_gen = gen_int_to_fp_nondecimal_conv_libfunc;
6507 lfloor_optab->libcall_basename = "lfloor";
6508 lfloor_optab->libcall_gen = gen_int_to_fp_nondecimal_conv_libfunc;
6509 lceil_optab->libcall_basename = "lceil";
6510 lceil_optab->libcall_gen = gen_int_to_fp_nondecimal_conv_libfunc;
6511
6512 /* trunc_optab is also used for FLOAT_EXTEND. */
6513 sext_optab->libcall_basename = "extend";
6514 sext_optab->libcall_gen = gen_extend_conv_libfunc;
6515 trunc_optab->libcall_basename = "trunc";
6516 trunc_optab->libcall_gen = gen_trunc_conv_libfunc;
6517
6518 /* Conversions for fixed-point modes and other modes. */
6519 fract_optab->libcall_basename = "fract";
6520 fract_optab->libcall_gen = gen_fract_conv_libfunc;
6521 satfract_optab->libcall_basename = "satfract";
6522 satfract_optab->libcall_gen = gen_satfract_conv_libfunc;
6523 fractuns_optab->libcall_basename = "fractuns";
6524 fractuns_optab->libcall_gen = gen_fractuns_conv_libfunc;
6525 satfractuns_optab->libcall_basename = "satfractuns";
6526 satfractuns_optab->libcall_gen = gen_satfractuns_conv_libfunc;
6527
6528 /* The ffs function operates on `int'. Fall back on it if we do not
6529 have a libgcc2 function for that width. */
6530 if (INT_TYPE_SIZE < BITS_PER_WORD)
6531 set_optab_libfunc (ffs_optab, mode_for_size (INT_TYPE_SIZE, MODE_INT, 0),
6532 "ffs");
6533
6534 /* Explicitly initialize the bswap libfuncs since we need them to be
6535 valid for things other than word_mode. */
6536 set_optab_libfunc (bswap_optab, SImode, "__bswapsi2");
6537 set_optab_libfunc (bswap_optab, DImode, "__bswapdi2");
6538
6539 /* Use cabs for double complex abs, since systems generally have cabs.
6540 Don't define any libcall for float complex, so that cabs will be used. */
6541 if (complex_double_type_node)
6542 set_optab_libfunc (abs_optab, TYPE_MODE (complex_double_type_node), "cabs");
6543
6544 abort_libfunc = init_one_libfunc ("abort");
6545 memcpy_libfunc = init_one_libfunc ("memcpy");
6546 memmove_libfunc = init_one_libfunc ("memmove");
6547 memcmp_libfunc = init_one_libfunc ("memcmp");
6548 memset_libfunc = init_one_libfunc ("memset");
6549 setbits_libfunc = init_one_libfunc ("__setbits");
6550
6551 #ifndef DONT_USE_BUILTIN_SETJMP
6552 setjmp_libfunc = init_one_libfunc ("__builtin_setjmp");
6553 longjmp_libfunc = init_one_libfunc ("__builtin_longjmp");
6554 #else
6555 setjmp_libfunc = init_one_libfunc ("setjmp");
6556 longjmp_libfunc = init_one_libfunc ("longjmp");
6557 #endif
6558 unwind_sjlj_register_libfunc = init_one_libfunc ("_Unwind_SjLj_Register");
6559 unwind_sjlj_unregister_libfunc
6560 = init_one_libfunc ("_Unwind_SjLj_Unregister");
6561
6562 /* For function entry/exit instrumentation. */
6563 profile_function_entry_libfunc
6564 = init_one_libfunc ("__cyg_profile_func_enter");
6565 profile_function_exit_libfunc
6566 = init_one_libfunc ("__cyg_profile_func_exit");
6567
6568 gcov_flush_libfunc = init_one_libfunc ("__gcov_flush");
6569
6570 /* Allow the target to add more libcalls or rename some, etc. */
6571 targetm.init_libfuncs ();
6572 } 5158 }
6573 5159
6574 /* Print information about the current contents of the optabs on 5160 /* Print information about the current contents of the optabs on
6575 STDERR. */ 5161 STDERR. */
6576 5162
6577 DEBUG_FUNCTION void 5163 DEBUG_FUNCTION void
6578 debug_optab_libfuncs (void) 5164 debug_optab_libfuncs (void)
6579 { 5165 {
6580 int i; 5166 int i, j, k;
6581 int j;
6582 int k;
6583 5167
6584 /* Dump the arithmetic optabs. */ 5168 /* Dump the arithmetic optabs. */
6585 for (i = 0; i != (int) OTI_MAX; i++) 5169 for (i = FIRST_NORM_OPTAB; i <= LAST_NORMLIB_OPTAB; ++i)
6586 for (j = 0; j < NUM_MACHINE_MODES; ++j) 5170 for (j = 0; j < NUM_MACHINE_MODES; ++j)
6587 { 5171 {
6588 optab o; 5172 rtx l = optab_libfunc ((optab) i, (machine_mode) j);
6589 rtx l;
6590
6591 o = &optab_table[i];
6592 l = optab_libfunc (o, (enum machine_mode) j);
6593 if (l) 5173 if (l)
6594 { 5174 {
6595 gcc_assert (GET_CODE (l) == SYMBOL_REF); 5175 gcc_assert (GET_CODE (l) == SYMBOL_REF);
6596 fprintf (stderr, "%s\t%s:\t%s\n", 5176 fprintf (stderr, "%s\t%s:\t%s\n",
6597 GET_RTX_NAME (o->code), 5177 GET_RTX_NAME (optab_to_code ((optab) i)),
6598 GET_MODE_NAME (j), 5178 GET_MODE_NAME (j),
6599 XSTR (l, 0)); 5179 XSTR (l, 0));
6600 } 5180 }
6601 } 5181 }
6602 5182
6603 /* Dump the conversion optabs. */ 5183 /* Dump the conversion optabs. */
6604 for (i = 0; i < (int) COI_MAX; ++i) 5184 for (i = FIRST_CONV_OPTAB; i <= LAST_CONVLIB_OPTAB; ++i)
6605 for (j = 0; j < NUM_MACHINE_MODES; ++j) 5185 for (j = 0; j < NUM_MACHINE_MODES; ++j)
6606 for (k = 0; k < NUM_MACHINE_MODES; ++k) 5186 for (k = 0; k < NUM_MACHINE_MODES; ++k)
6607 { 5187 {
6608 convert_optab o; 5188 rtx l = convert_optab_libfunc ((optab) i, (machine_mode) j,
6609 rtx l; 5189 (machine_mode) k);
6610
6611 o = &convert_optab_table[i];
6612 l = convert_optab_libfunc (o, (enum machine_mode) j,
6613 (enum machine_mode) k);
6614 if (l) 5190 if (l)
6615 { 5191 {
6616 gcc_assert (GET_CODE (l) == SYMBOL_REF); 5192 gcc_assert (GET_CODE (l) == SYMBOL_REF);
6617 fprintf (stderr, "%s\t%s\t%s:\t%s\n", 5193 fprintf (stderr, "%s\t%s\t%s:\t%s\n",
6618 GET_RTX_NAME (o->code), 5194 GET_RTX_NAME (optab_to_code ((optab) i)),
6619 GET_MODE_NAME (j), 5195 GET_MODE_NAME (j),
6620 GET_MODE_NAME (k), 5196 GET_MODE_NAME (k),
6621 XSTR (l, 0)); 5197 XSTR (l, 0));
6622 } 5198 }
6623 } 5199 }
6624 } 5200 }
6625 5201
6626
6627 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition 5202 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
6628 CODE. Return 0 on failure. */ 5203 CODE. Return 0 on failure. */
6629 5204
6630 rtx 5205 rtx_insn *
6631 gen_cond_trap (enum rtx_code code, rtx op1, rtx op2, rtx tcode) 5206 gen_cond_trap (enum rtx_code code, rtx op1, rtx op2, rtx tcode)
6632 { 5207 {
6633 enum machine_mode mode = GET_MODE (op1); 5208 machine_mode mode = GET_MODE (op1);
6634 enum insn_code icode; 5209 enum insn_code icode;
6635 rtx insn; 5210 rtx_insn *insn;
6636 rtx trap_rtx; 5211 rtx trap_rtx;
6637 5212
6638 if (mode == VOIDmode) 5213 if (mode == VOIDmode)
6639 return 0; 5214 return 0;
6640 5215
6641 icode = optab_handler (ctrap_optab, mode); 5216 icode = optab_handler (ctrap_optab, mode);
6642 if (icode == CODE_FOR_nothing) 5217 if (icode == CODE_FOR_nothing)
6643 return 0; 5218 return 0;
6644 5219
6645 /* Some targets only accept a zero trap code. */ 5220 /* Some targets only accept a zero trap code. */
6646 if (insn_data[icode].operand[3].predicate 5221 if (!insn_operand_matches (icode, 3, tcode))
6647 && !insn_data[icode].operand[3].predicate (tcode, VOIDmode))
6648 return 0; 5222 return 0;
6649 5223
6650 do_pending_stack_adjust (); 5224 do_pending_stack_adjust ();
6651 start_sequence (); 5225 start_sequence ();
6652 prepare_cmp_insn (op1, op2, code, NULL_RTX, false, OPTAB_DIRECT, 5226 prepare_cmp_insn (op1, op2, code, NULL_RTX, false, OPTAB_DIRECT,
6653 &trap_rtx, &mode); 5227 &trap_rtx, &mode);
6654 if (!trap_rtx) 5228 if (!trap_rtx)
6655 insn = NULL_RTX; 5229 insn = NULL;
6656 else 5230 else
6657 insn = GEN_FCN (icode) (trap_rtx, XEXP (trap_rtx, 0), XEXP (trap_rtx, 1), 5231 insn = GEN_FCN (icode) (trap_rtx, XEXP (trap_rtx, 0), XEXP (trap_rtx, 1),
6658 tcode); 5232 tcode);
6659 5233
6660 /* If that failed, then give up. */ 5234 /* If that failed, then give up. */
6671 } 5245 }
6672 5246
6673 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed 5247 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
6674 or unsigned operation code. */ 5248 or unsigned operation code. */
6675 5249
6676 static enum rtx_code 5250 enum rtx_code
6677 get_rtx_code (enum tree_code tcode, bool unsignedp) 5251 get_rtx_code (enum tree_code tcode, bool unsignedp)
6678 { 5252 {
6679 enum rtx_code code; 5253 enum rtx_code code;
6680 switch (tcode) 5254 switch (tcode)
6681 { 5255 {
6721 break; 5295 break;
6722 case LTGT_EXPR: 5296 case LTGT_EXPR:
6723 code = LTGT; 5297 code = LTGT;
6724 break; 5298 break;
6725 5299
5300 case BIT_AND_EXPR:
5301 code = AND;
5302 break;
5303
5304 case BIT_IOR_EXPR:
5305 code = IOR;
5306 break;
5307
6726 default: 5308 default:
6727 gcc_unreachable (); 5309 gcc_unreachable ();
6728 } 5310 }
6729 return code; 5311 return code;
6730 } 5312 }
6731 5313
6732 /* Return comparison rtx for COND. Use UNSIGNEDP to select signed or 5314 /* Return a comparison rtx of mode CMP_MODE for COND. Use UNSIGNEDP to
6733 unsigned operators. Do not generate compare instruction. */ 5315 select signed or unsigned operators. OPNO holds the index of the
5316 first comparison operand for insn ICODE. Do not generate the
5317 compare instruction itself. */
6734 5318
6735 static rtx 5319 static rtx
6736 vector_compare_rtx (tree cond, bool unsignedp, enum insn_code icode) 5320 vector_compare_rtx (machine_mode cmp_mode, enum tree_code tcode,
6737 { 5321 tree t_op0, tree t_op1, bool unsignedp,
6738 enum rtx_code rcode; 5322 enum insn_code icode, unsigned int opno)
6739 tree t_op0, t_op1; 5323 {
5324 struct expand_operand ops[2];
6740 rtx rtx_op0, rtx_op1; 5325 rtx rtx_op0, rtx_op1;
6741 5326 machine_mode m0, m1;
6742 /* This is unlikely. While generating VEC_COND_EXPR, auto vectorizer 5327 enum rtx_code rcode = get_rtx_code (tcode, unsignedp);
6743 ensures that condition is a relational operation. */ 5328
6744 gcc_assert (COMPARISON_CLASS_P (cond)); 5329 gcc_assert (TREE_CODE_CLASS (tcode) == tcc_comparison);
6745 5330
6746 rcode = get_rtx_code (TREE_CODE (cond), unsignedp); 5331 /* Expand operands. For vector types with scalar modes, e.g. where int64x1_t
6747 t_op0 = TREE_OPERAND (cond, 0); 5332 has mode DImode, this can produce a constant RTX of mode VOIDmode; in such
6748 t_op1 = TREE_OPERAND (cond, 1); 5333 cases, use the original mode. */
6749
6750 /* Expand operands. */
6751 rtx_op0 = expand_expr (t_op0, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op0)), 5334 rtx_op0 = expand_expr (t_op0, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op0)),
6752 EXPAND_STACK_PARM); 5335 EXPAND_STACK_PARM);
5336 m0 = GET_MODE (rtx_op0);
5337 if (m0 == VOIDmode)
5338 m0 = TYPE_MODE (TREE_TYPE (t_op0));
5339
6753 rtx_op1 = expand_expr (t_op1, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op1)), 5340 rtx_op1 = expand_expr (t_op1, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op1)),
6754 EXPAND_STACK_PARM); 5341 EXPAND_STACK_PARM);
6755 5342 m1 = GET_MODE (rtx_op1);
6756 if (!insn_data[icode].operand[4].predicate (rtx_op0, GET_MODE (rtx_op0)) 5343 if (m1 == VOIDmode)
6757 && GET_MODE (rtx_op0) != VOIDmode) 5344 m1 = TYPE_MODE (TREE_TYPE (t_op1));
6758 rtx_op0 = force_reg (GET_MODE (rtx_op0), rtx_op0); 5345
6759 5346 create_input_operand (&ops[0], rtx_op0, m0);
6760 if (!insn_data[icode].operand[5].predicate (rtx_op1, GET_MODE (rtx_op1)) 5347 create_input_operand (&ops[1], rtx_op1, m1);
6761 && GET_MODE (rtx_op1) != VOIDmode) 5348 if (!maybe_legitimize_operands (icode, opno, 2, ops))
6762 rtx_op1 = force_reg (GET_MODE (rtx_op1), rtx_op1); 5349 gcc_unreachable ();
6763 5350 return gen_rtx_fmt_ee (rcode, cmp_mode, ops[0].value, ops[1].value);
6764 return gen_rtx_fmt_ee (rcode, VOIDmode, rtx_op0, rtx_op1); 5351 }
6765 } 5352
6766 5353 /* Checks if vec_perm mask SEL is a constant equivalent to a shift of the first
6767 /* Return insn code for TYPE, the type of a VEC_COND_EXPR. */ 5354 vec_perm operand, assuming the second operand is a constant vector of zeroes.
6768 5355 Return the shift distance in bits if so, or NULL_RTX if the vec_perm is not a
6769 static inline enum insn_code 5356 shift. */
6770 get_vcond_icode (tree type, enum machine_mode mode) 5357 static rtx
6771 { 5358 shift_amt_for_vec_perm_mask (rtx sel)
6772 enum insn_code icode = CODE_FOR_nothing; 5359 {
6773 5360 unsigned int i, first, nelt = GET_MODE_NUNITS (GET_MODE (sel));
6774 if (TYPE_UNSIGNED (type)) 5361 unsigned int bitsize = GET_MODE_UNIT_BITSIZE (GET_MODE (sel));
6775 icode = direct_optab_handler (vcondu_optab, mode); 5362
5363 if (GET_CODE (sel) != CONST_VECTOR)
5364 return NULL_RTX;
5365
5366 first = INTVAL (CONST_VECTOR_ELT (sel, 0));
5367 if (first >= nelt)
5368 return NULL_RTX;
5369 for (i = 1; i < nelt; i++)
5370 {
5371 int idx = INTVAL (CONST_VECTOR_ELT (sel, i));
5372 unsigned int expected = i + first;
5373 /* Indices into the second vector are all equivalent. */
5374 if (idx < 0 || (MIN (nelt, (unsigned) idx) != MIN (nelt, expected)))
5375 return NULL_RTX;
5376 }
5377
5378 return GEN_INT (first * bitsize);
5379 }
5380
5381 /* A subroutine of expand_vec_perm for expanding one vec_perm insn. */
5382
5383 static rtx
5384 expand_vec_perm_1 (enum insn_code icode, rtx target,
5385 rtx v0, rtx v1, rtx sel)
5386 {
5387 machine_mode tmode = GET_MODE (target);
5388 machine_mode smode = GET_MODE (sel);
5389 struct expand_operand ops[4];
5390
5391 create_output_operand (&ops[0], target, tmode);
5392 create_input_operand (&ops[3], sel, smode);
5393
5394 /* Make an effort to preserve v0 == v1. The target expander is able to
5395 rely on this to determine if we're permuting a single input operand. */
5396 if (rtx_equal_p (v0, v1))
5397 {
5398 if (!insn_operand_matches (icode, 1, v0))
5399 v0 = force_reg (tmode, v0);
5400 gcc_checking_assert (insn_operand_matches (icode, 1, v0));
5401 gcc_checking_assert (insn_operand_matches (icode, 2, v0));
5402
5403 create_fixed_operand (&ops[1], v0);
5404 create_fixed_operand (&ops[2], v0);
5405 }
6776 else 5406 else
6777 icode = direct_optab_handler (vcond_optab, mode); 5407 {
6778 return icode; 5408 create_input_operand (&ops[1], v0, tmode);
6779 } 5409 create_input_operand (&ops[2], v1, tmode);
6780 5410 }
6781 /* Return TRUE iff, appropriate vector insns are available 5411
6782 for vector cond expr with type TYPE in VMODE mode. */ 5412 if (maybe_expand_insn (icode, 4, ops))
6783 5413 return ops[0].value;
6784 bool 5414 return NULL_RTX;
6785 expand_vec_cond_expr_p (tree type, enum machine_mode vmode) 5415 }
6786 { 5416
6787 if (get_vcond_icode (type, vmode) == CODE_FOR_nothing) 5417 /* Generate instructions for vec_perm optab given its mode
6788 return false; 5418 and three operands. */
6789 return true; 5419
5420 rtx
5421 expand_vec_perm (machine_mode mode, rtx v0, rtx v1, rtx sel, rtx target)
5422 {
5423 enum insn_code icode;
5424 machine_mode qimode;
5425 unsigned int i, w, e, u;
5426 rtx tmp, sel_qi = NULL;
5427 rtvec vec;
5428
5429 if (!target || GET_MODE (target) != mode)
5430 target = gen_reg_rtx (mode);
5431
5432 w = GET_MODE_SIZE (mode);
5433 e = GET_MODE_NUNITS (mode);
5434 u = GET_MODE_UNIT_SIZE (mode);
5435
5436 /* Set QIMODE to a different vector mode with byte elements.
5437 If no such mode, or if MODE already has byte elements, use VOIDmode. */
5438 if (GET_MODE_INNER (mode) == QImode
5439 || !mode_for_vector (QImode, w).exists (&qimode)
5440 || !VECTOR_MODE_P (qimode))
5441 qimode = VOIDmode;
5442
5443 /* If the input is a constant, expand it specially. */
5444 gcc_assert (GET_MODE_CLASS (GET_MODE (sel)) == MODE_VECTOR_INT);
5445 if (GET_CODE (sel) == CONST_VECTOR)
5446 {
5447 /* See if this can be handled with a vec_shr. We only do this if the
5448 second vector is all zeroes. */
5449 enum insn_code shift_code = optab_handler (vec_shr_optab, mode);
5450 enum insn_code shift_code_qi = ((qimode != VOIDmode && qimode != mode)
5451 ? optab_handler (vec_shr_optab, qimode)
5452 : CODE_FOR_nothing);
5453 rtx shift_amt = NULL_RTX;
5454 if (v1 == CONST0_RTX (GET_MODE (v1))
5455 && (shift_code != CODE_FOR_nothing
5456 || shift_code_qi != CODE_FOR_nothing))
5457 {
5458 shift_amt = shift_amt_for_vec_perm_mask (sel);
5459 if (shift_amt)
5460 {
5461 struct expand_operand ops[3];
5462 if (shift_code != CODE_FOR_nothing)
5463 {
5464 create_output_operand (&ops[0], target, mode);
5465 create_input_operand (&ops[1], v0, mode);
5466 create_convert_operand_from_type (&ops[2], shift_amt,
5467 sizetype);
5468 if (maybe_expand_insn (shift_code, 3, ops))
5469 return ops[0].value;
5470 }
5471 if (shift_code_qi != CODE_FOR_nothing)
5472 {
5473 tmp = gen_reg_rtx (qimode);
5474 create_output_operand (&ops[0], tmp, qimode);
5475 create_input_operand (&ops[1], gen_lowpart (qimode, v0),
5476 qimode);
5477 create_convert_operand_from_type (&ops[2], shift_amt,
5478 sizetype);
5479 if (maybe_expand_insn (shift_code_qi, 3, ops))
5480 return gen_lowpart (mode, ops[0].value);
5481 }
5482 }
5483 }
5484
5485 icode = direct_optab_handler (vec_perm_const_optab, mode);
5486 if (icode != CODE_FOR_nothing)
5487 {
5488 tmp = expand_vec_perm_1 (icode, target, v0, v1, sel);
5489 if (tmp)
5490 return tmp;
5491 }
5492
5493 /* Fall back to a constant byte-based permutation. */
5494 if (qimode != VOIDmode)
5495 {
5496 vec = rtvec_alloc (w);
5497 for (i = 0; i < e; ++i)
5498 {
5499 unsigned int j, this_e;
5500
5501 this_e = INTVAL (CONST_VECTOR_ELT (sel, i));
5502 this_e &= 2 * e - 1;
5503 this_e *= u;
5504
5505 for (j = 0; j < u; ++j)
5506 RTVEC_ELT (vec, i * u + j) = GEN_INT (this_e + j);
5507 }
5508 sel_qi = gen_rtx_CONST_VECTOR (qimode, vec);
5509
5510 icode = direct_optab_handler (vec_perm_const_optab, qimode);
5511 if (icode != CODE_FOR_nothing)
5512 {
5513 tmp = mode != qimode ? gen_reg_rtx (qimode) : target;
5514 tmp = expand_vec_perm_1 (icode, tmp, gen_lowpart (qimode, v0),
5515 gen_lowpart (qimode, v1), sel_qi);
5516 if (tmp)
5517 return gen_lowpart (mode, tmp);
5518 }
5519 }
5520 }
5521
5522 /* Otherwise expand as a fully variable permuation. */
5523 icode = direct_optab_handler (vec_perm_optab, mode);
5524 if (icode != CODE_FOR_nothing)
5525 {
5526 tmp = expand_vec_perm_1 (icode, target, v0, v1, sel);
5527 if (tmp)
5528 return tmp;
5529 }
5530
5531 /* As a special case to aid several targets, lower the element-based
5532 permutation to a byte-based permutation and try again. */
5533 if (qimode == VOIDmode)
5534 return NULL_RTX;
5535 icode = direct_optab_handler (vec_perm_optab, qimode);
5536 if (icode == CODE_FOR_nothing)
5537 return NULL_RTX;
5538
5539 if (sel_qi == NULL)
5540 {
5541 /* Multiply each element by its byte size. */
5542 machine_mode selmode = GET_MODE (sel);
5543 if (u == 2)
5544 sel = expand_simple_binop (selmode, PLUS, sel, sel,
5545 NULL, 0, OPTAB_DIRECT);
5546 else
5547 sel = expand_simple_binop (selmode, ASHIFT, sel,
5548 GEN_INT (exact_log2 (u)),
5549 NULL, 0, OPTAB_DIRECT);
5550 gcc_assert (sel != NULL);
5551
5552 /* Broadcast the low byte each element into each of its bytes. */
5553 vec = rtvec_alloc (w);
5554 for (i = 0; i < w; ++i)
5555 {
5556 int this_e = i / u * u;
5557 if (BYTES_BIG_ENDIAN)
5558 this_e += u - 1;
5559 RTVEC_ELT (vec, i) = GEN_INT (this_e);
5560 }
5561 tmp = gen_rtx_CONST_VECTOR (qimode, vec);
5562 sel = gen_lowpart (qimode, sel);
5563 sel = expand_vec_perm (qimode, sel, sel, tmp, NULL);
5564 gcc_assert (sel != NULL);
5565
5566 /* Add the byte offset to each byte element. */
5567 /* Note that the definition of the indicies here is memory ordering,
5568 so there should be no difference between big and little endian. */
5569 vec = rtvec_alloc (w);
5570 for (i = 0; i < w; ++i)
5571 RTVEC_ELT (vec, i) = GEN_INT (i % u);
5572 tmp = gen_rtx_CONST_VECTOR (qimode, vec);
5573 sel_qi = expand_simple_binop (qimode, PLUS, sel, tmp,
5574 sel, 0, OPTAB_DIRECT);
5575 gcc_assert (sel_qi != NULL);
5576 }
5577
5578 tmp = mode != qimode ? gen_reg_rtx (qimode) : target;
5579 tmp = expand_vec_perm_1 (icode, tmp, gen_lowpart (qimode, v0),
5580 gen_lowpart (qimode, v1), sel_qi);
5581 if (tmp)
5582 tmp = gen_lowpart (mode, tmp);
5583 return tmp;
5584 }
5585
5586 /* Generate insns for a VEC_COND_EXPR with mask, given its TYPE and its
5587 three operands. */
5588
5589 rtx
5590 expand_vec_cond_mask_expr (tree vec_cond_type, tree op0, tree op1, tree op2,
5591 rtx target)
5592 {
5593 struct expand_operand ops[4];
5594 machine_mode mode = TYPE_MODE (vec_cond_type);
5595 machine_mode mask_mode = TYPE_MODE (TREE_TYPE (op0));
5596 enum insn_code icode = get_vcond_mask_icode (mode, mask_mode);
5597 rtx mask, rtx_op1, rtx_op2;
5598
5599 if (icode == CODE_FOR_nothing)
5600 return 0;
5601
5602 mask = expand_normal (op0);
5603 rtx_op1 = expand_normal (op1);
5604 rtx_op2 = expand_normal (op2);
5605
5606 mask = force_reg (mask_mode, mask);
5607 rtx_op1 = force_reg (GET_MODE (rtx_op1), rtx_op1);
5608
5609 create_output_operand (&ops[0], target, mode);
5610 create_input_operand (&ops[1], rtx_op1, mode);
5611 create_input_operand (&ops[2], rtx_op2, mode);
5612 create_input_operand (&ops[3], mask, mask_mode);
5613 expand_insn (icode, 4, ops);
5614
5615 return ops[0].value;
6790 } 5616 }
6791 5617
6792 /* Generate insns for a VEC_COND_EXPR, given its TYPE and its 5618 /* Generate insns for a VEC_COND_EXPR, given its TYPE and its
6793 three operands. */ 5619 three operands. */
6794 5620
6795 rtx 5621 rtx
6796 expand_vec_cond_expr (tree vec_cond_type, tree op0, tree op1, tree op2, 5622 expand_vec_cond_expr (tree vec_cond_type, tree op0, tree op1, tree op2,
6797 rtx target) 5623 rtx target)
6798 { 5624 {
5625 struct expand_operand ops[6];
6799 enum insn_code icode; 5626 enum insn_code icode;
6800 rtx comparison, rtx_op1, rtx_op2, cc_op0, cc_op1; 5627 rtx comparison, rtx_op1, rtx_op2;
6801 enum machine_mode mode = TYPE_MODE (vec_cond_type); 5628 machine_mode mode = TYPE_MODE (vec_cond_type);
6802 bool unsignedp = TYPE_UNSIGNED (vec_cond_type); 5629 machine_mode cmp_op_mode;
6803 5630 bool unsignedp;
6804 icode = get_vcond_icode (vec_cond_type, mode); 5631 tree op0a, op0b;
5632 enum tree_code tcode;
5633
5634 if (COMPARISON_CLASS_P (op0))
5635 {
5636 op0a = TREE_OPERAND (op0, 0);
5637 op0b = TREE_OPERAND (op0, 1);
5638 tcode = TREE_CODE (op0);
5639 }
5640 else
5641 {
5642 gcc_assert (VECTOR_BOOLEAN_TYPE_P (TREE_TYPE (op0)));
5643 if (get_vcond_mask_icode (mode, TYPE_MODE (TREE_TYPE (op0)))
5644 != CODE_FOR_nothing)
5645 return expand_vec_cond_mask_expr (vec_cond_type, op0, op1,
5646 op2, target);
5647 /* Fake op0 < 0. */
5648 else
5649 {
5650 gcc_assert (GET_MODE_CLASS (TYPE_MODE (TREE_TYPE (op0)))
5651 == MODE_VECTOR_INT);
5652 op0a = op0;
5653 op0b = build_zero_cst (TREE_TYPE (op0));
5654 tcode = LT_EXPR;
5655 }
5656 }
5657 cmp_op_mode = TYPE_MODE (TREE_TYPE (op0a));
5658 unsignedp = TYPE_UNSIGNED (TREE_TYPE (op0a));
5659
5660
5661 gcc_assert (GET_MODE_SIZE (mode) == GET_MODE_SIZE (cmp_op_mode)
5662 && GET_MODE_NUNITS (mode) == GET_MODE_NUNITS (cmp_op_mode));
5663
5664 icode = get_vcond_icode (mode, cmp_op_mode, unsignedp);
6805 if (icode == CODE_FOR_nothing) 5665 if (icode == CODE_FOR_nothing)
6806 return 0; 5666 {
6807 5667 if (tcode == EQ_EXPR || tcode == NE_EXPR)
6808 if (!target || !insn_data[icode].operand[0].predicate (target, mode)) 5668 icode = get_vcond_eq_icode (mode, cmp_op_mode);
6809 target = gen_reg_rtx (mode); 5669 if (icode == CODE_FOR_nothing)
6810 5670 return 0;
6811 /* Get comparison rtx. First expand both cond expr operands. */ 5671 }
6812 comparison = vector_compare_rtx (op0, 5672
6813 unsignedp, icode); 5673 comparison = vector_compare_rtx (VOIDmode, tcode, op0a, op0b, unsignedp,
6814 cc_op0 = XEXP (comparison, 0); 5674 icode, 4);
6815 cc_op1 = XEXP (comparison, 1);
6816 /* Expand both operands and force them in reg, if required. */
6817 rtx_op1 = expand_normal (op1); 5675 rtx_op1 = expand_normal (op1);
6818 if (!insn_data[icode].operand[1].predicate (rtx_op1, mode)
6819 && mode != VOIDmode)
6820 rtx_op1 = force_reg (mode, rtx_op1);
6821
6822 rtx_op2 = expand_normal (op2); 5676 rtx_op2 = expand_normal (op2);
6823 if (!insn_data[icode].operand[2].predicate (rtx_op2, mode) 5677
6824 && mode != VOIDmode) 5678 create_output_operand (&ops[0], target, mode);
6825 rtx_op2 = force_reg (mode, rtx_op2); 5679 create_input_operand (&ops[1], rtx_op1, mode);
6826 5680 create_input_operand (&ops[2], rtx_op2, mode);
6827 /* Emit instruction! */ 5681 create_fixed_operand (&ops[3], comparison);
6828 emit_insn (GEN_FCN (icode) (target, rtx_op1, rtx_op2, 5682 create_fixed_operand (&ops[4], XEXP (comparison, 0));
6829 comparison, cc_op0, cc_op1)); 5683 create_fixed_operand (&ops[5], XEXP (comparison, 1));
6830 5684 expand_insn (icode, 6, ops);
6831 return target; 5685 return ops[0].value;
6832 } 5686 }
6833 5687
5688 /* Generate insns for a vector comparison into a mask. */
5689
5690 rtx
5691 expand_vec_cmp_expr (tree type, tree exp, rtx target)
5692 {
5693 struct expand_operand ops[4];
5694 enum insn_code icode;
5695 rtx comparison;
5696 machine_mode mask_mode = TYPE_MODE (type);
5697 machine_mode vmode;
5698 bool unsignedp;
5699 tree op0a, op0b;
5700 enum tree_code tcode;
5701
5702 op0a = TREE_OPERAND (exp, 0);
5703 op0b = TREE_OPERAND (exp, 1);
5704 tcode = TREE_CODE (exp);
5705
5706 unsignedp = TYPE_UNSIGNED (TREE_TYPE (op0a));
5707 vmode = TYPE_MODE (TREE_TYPE (op0a));
5708
5709 icode = get_vec_cmp_icode (vmode, mask_mode, unsignedp);
5710 if (icode == CODE_FOR_nothing)
5711 {
5712 if (tcode == EQ_EXPR || tcode == NE_EXPR)
5713 icode = get_vec_cmp_eq_icode (vmode, mask_mode);
5714 if (icode == CODE_FOR_nothing)
5715 return 0;
5716 }
5717
5718 comparison = vector_compare_rtx (mask_mode, tcode, op0a, op0b,
5719 unsignedp, icode, 2);
5720 create_output_operand (&ops[0], target, mask_mode);
5721 create_fixed_operand (&ops[1], comparison);
5722 create_fixed_operand (&ops[2], XEXP (comparison, 0));
5723 create_fixed_operand (&ops[3], XEXP (comparison, 1));
5724 expand_insn (icode, 4, ops);
5725 return ops[0].value;
5726 }
5727
5728 /* Expand a highpart multiply. */
5729
5730 rtx
5731 expand_mult_highpart (machine_mode mode, rtx op0, rtx op1,
5732 rtx target, bool uns_p)
5733 {
5734 struct expand_operand eops[3];
5735 enum insn_code icode;
5736 int method, i, nunits;
5737 machine_mode wmode;
5738 rtx m1, m2, perm;
5739 optab tab1, tab2;
5740 rtvec v;
5741
5742 method = can_mult_highpart_p (mode, uns_p);
5743 switch (method)
5744 {
5745 case 0:
5746 return NULL_RTX;
5747 case 1:
5748 tab1 = uns_p ? umul_highpart_optab : smul_highpart_optab;
5749 return expand_binop (mode, tab1, op0, op1, target, uns_p,
5750 OPTAB_LIB_WIDEN);
5751 case 2:
5752 tab1 = uns_p ? vec_widen_umult_even_optab : vec_widen_smult_even_optab;
5753 tab2 = uns_p ? vec_widen_umult_odd_optab : vec_widen_smult_odd_optab;
5754 break;
5755 case 3:
5756 tab1 = uns_p ? vec_widen_umult_lo_optab : vec_widen_smult_lo_optab;
5757 tab2 = uns_p ? vec_widen_umult_hi_optab : vec_widen_smult_hi_optab;
5758 if (BYTES_BIG_ENDIAN)
5759 std::swap (tab1, tab2);
5760 break;
5761 default:
5762 gcc_unreachable ();
5763 }
5764
5765 icode = optab_handler (tab1, mode);
5766 nunits = GET_MODE_NUNITS (mode);
5767 wmode = insn_data[icode].operand[0].mode;
5768 gcc_checking_assert (2 * GET_MODE_NUNITS (wmode) == nunits);
5769 gcc_checking_assert (GET_MODE_SIZE (wmode) == GET_MODE_SIZE (mode));
5770
5771 create_output_operand (&eops[0], gen_reg_rtx (wmode), wmode);
5772 create_input_operand (&eops[1], op0, mode);
5773 create_input_operand (&eops[2], op1, mode);
5774 expand_insn (icode, 3, eops);
5775 m1 = gen_lowpart (mode, eops[0].value);
5776
5777 create_output_operand (&eops[0], gen_reg_rtx (wmode), wmode);
5778 create_input_operand (&eops[1], op0, mode);
5779 create_input_operand (&eops[2], op1, mode);
5780 expand_insn (optab_handler (tab2, mode), 3, eops);
5781 m2 = gen_lowpart (mode, eops[0].value);
5782
5783 v = rtvec_alloc (nunits);
5784 if (method == 2)
5785 {
5786 for (i = 0; i < nunits; ++i)
5787 RTVEC_ELT (v, i) = GEN_INT (!BYTES_BIG_ENDIAN + (i & ~1)
5788 + ((i & 1) ? nunits : 0));
5789 }
5790 else
5791 {
5792 for (i = 0; i < nunits; ++i)
5793 RTVEC_ELT (v, i) = GEN_INT (2 * i + (BYTES_BIG_ENDIAN ? 0 : 1));
5794 }
5795 perm = gen_rtx_CONST_VECTOR (mode, v);
5796
5797 return expand_vec_perm (mode, m1, m2, perm, target);
5798 }
6834 5799
6835 /* This is an internal subroutine of the other compare_and_swap expanders.
6836 MEM, OLD_VAL and NEW_VAL are as you'd expect for a compare-and-swap
6837 operation. TARGET is an optional place to store the value result of
6838 the operation. ICODE is the particular instruction to expand. Return
6839 the result of the operation. */
6840
6841 static rtx
6842 expand_val_compare_and_swap_1 (rtx mem, rtx old_val, rtx new_val,
6843 rtx target, enum insn_code icode)
6844 {
6845 enum machine_mode mode = GET_MODE (mem);
6846 rtx insn;
6847
6848 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
6849 target = gen_reg_rtx (mode);
6850
6851 if (GET_MODE (old_val) != VOIDmode && GET_MODE (old_val) != mode)
6852 old_val = convert_modes (mode, GET_MODE (old_val), old_val, 1);
6853 if (!insn_data[icode].operand[2].predicate (old_val, mode))
6854 old_val = force_reg (mode, old_val);
6855
6856 if (GET_MODE (new_val) != VOIDmode && GET_MODE (new_val) != mode)
6857 new_val = convert_modes (mode, GET_MODE (new_val), new_val, 1);
6858 if (!insn_data[icode].operand[3].predicate (new_val, mode))
6859 new_val = force_reg (mode, new_val);
6860
6861 insn = GEN_FCN (icode) (target, mem, old_val, new_val);
6862 if (insn == NULL_RTX)
6863 return NULL_RTX;
6864 emit_insn (insn);
6865
6866 return target;
6867 }
6868
6869 /* Expand a compare-and-swap operation and return its value. */
6870
6871 rtx
6872 expand_val_compare_and_swap (rtx mem, rtx old_val, rtx new_val, rtx target)
6873 {
6874 enum machine_mode mode = GET_MODE (mem);
6875 enum insn_code icode
6876 = direct_optab_handler (sync_compare_and_swap_optab, mode);
6877
6878 if (icode == CODE_FOR_nothing)
6879 return NULL_RTX;
6880
6881 return expand_val_compare_and_swap_1 (mem, old_val, new_val, target, icode);
6882 }
6883
6884 /* Helper function to find the MODE_CC set in a sync_compare_and_swap 5800 /* Helper function to find the MODE_CC set in a sync_compare_and_swap
6885 pattern. */ 5801 pattern. */
6886 5802
6887 static void 5803 static void
6888 find_cc_set (rtx x, const_rtx pat, void *data) 5804 find_cc_set (rtx x, const_rtx pat, void *data)
6892 { 5808 {
6893 rtx *p_cc_reg = (rtx *) data; 5809 rtx *p_cc_reg = (rtx *) data;
6894 gcc_assert (!*p_cc_reg); 5810 gcc_assert (!*p_cc_reg);
6895 *p_cc_reg = x; 5811 *p_cc_reg = x;
6896 } 5812 }
6897 }
6898
6899 /* Expand a compare-and-swap operation and store true into the result if
6900 the operation was successful and false otherwise. Return the result.
6901 Unlike other routines, TARGET is not optional. */
6902
6903 rtx
6904 expand_bool_compare_and_swap (rtx mem, rtx old_val, rtx new_val, rtx target)
6905 {
6906 enum machine_mode mode = GET_MODE (mem);
6907 enum insn_code icode;
6908 rtx subtarget, seq, cc_reg;
6909
6910 /* If the target supports a compare-and-swap pattern that simultaneously
6911 sets some flag for success, then use it. Otherwise use the regular
6912 compare-and-swap and follow that immediately with a compare insn. */
6913 icode = direct_optab_handler (sync_compare_and_swap_optab, mode);
6914 if (icode == CODE_FOR_nothing)
6915 return NULL_RTX;
6916
6917 do_pending_stack_adjust ();
6918 do
6919 {
6920 start_sequence ();
6921 subtarget = expand_val_compare_and_swap_1 (mem, old_val, new_val,
6922 NULL_RTX, icode);
6923 cc_reg = NULL_RTX;
6924 if (subtarget == NULL_RTX)
6925 {
6926 end_sequence ();
6927 return NULL_RTX;
6928 }
6929
6930 if (have_insn_for (COMPARE, CCmode))
6931 note_stores (PATTERN (get_last_insn ()), find_cc_set, &cc_reg);
6932 seq = get_insns ();
6933 end_sequence ();
6934
6935 /* We might be comparing against an old value. Try again. :-( */
6936 if (!cc_reg && MEM_P (old_val))
6937 {
6938 seq = NULL_RTX;
6939 old_val = force_reg (mode, old_val);
6940 }
6941 }
6942 while (!seq);
6943
6944 emit_insn (seq);
6945 if (cc_reg)
6946 return emit_store_flag_force (target, EQ, cc_reg, const0_rtx, VOIDmode, 0, 1);
6947 else
6948 return emit_store_flag_force (target, EQ, subtarget, old_val, VOIDmode, 1, 1);
6949 } 5813 }
6950 5814
6951 /* This is a helper function for the other atomic operations. This function 5815 /* This is a helper function for the other atomic operations. This function
6952 emits a loop that contains SEQ that iterates until a compare-and-swap 5816 emits a loop that contains SEQ that iterates until a compare-and-swap
6953 operation at the end succeeds. MEM is the memory to be modified. SEQ is 5817 operation at the end succeeds. MEM is the memory to be modified. SEQ is
6958 loop was generated successfully. */ 5822 loop was generated successfully. */
6959 5823
6960 static bool 5824 static bool
6961 expand_compare_and_swap_loop (rtx mem, rtx old_reg, rtx new_reg, rtx seq) 5825 expand_compare_and_swap_loop (rtx mem, rtx old_reg, rtx new_reg, rtx seq)
6962 { 5826 {
6963 enum machine_mode mode = GET_MODE (mem); 5827 machine_mode mode = GET_MODE (mem);
6964 enum insn_code icode; 5828 rtx_code_label *label;
6965 rtx label, cmp_reg, subtarget, cc_reg; 5829 rtx cmp_reg, success, oldval;
6966 5830
6967 /* The loop we want to generate looks like 5831 /* The loop we want to generate looks like
6968 5832
6969 cmp_reg = mem; 5833 cmp_reg = mem;
6970 label: 5834 label:
6971 old_reg = cmp_reg; 5835 old_reg = cmp_reg;
6972 seq; 5836 seq;
6973 cmp_reg = compare-and-swap(mem, old_reg, new_reg) 5837 (success, cmp_reg) = compare-and-swap(mem, old_reg, new_reg)
6974 if (cmp_reg != old_reg) 5838 if (success)
6975 goto label; 5839 goto label;
6976 5840
6977 Note that we only do the plain load from memory once. Subsequent 5841 Note that we only do the plain load from memory once. Subsequent
6978 iterations use the value loaded by the compare-and-swap pattern. */ 5842 iterations use the value loaded by the compare-and-swap pattern. */
6979 5843
6984 emit_label (label); 5848 emit_label (label);
6985 emit_move_insn (old_reg, cmp_reg); 5849 emit_move_insn (old_reg, cmp_reg);
6986 if (seq) 5850 if (seq)
6987 emit_insn (seq); 5851 emit_insn (seq);
6988 5852
6989 /* If the target supports a compare-and-swap pattern that simultaneously 5853 success = NULL_RTX;
6990 sets some flag for success, then use it. Otherwise use the regular 5854 oldval = cmp_reg;
6991 compare-and-swap and follow that immediately with a compare insn. */ 5855 if (!expand_atomic_compare_and_swap (&success, &oldval, mem, old_reg,
6992 icode = direct_optab_handler (sync_compare_and_swap_optab, mode); 5856 new_reg, false, MEMMODEL_SYNC_SEQ_CST,
6993 if (icode == CODE_FOR_nothing) 5857 MEMMODEL_RELAXED))
6994 return false; 5858 return false;
6995 5859
6996 subtarget = expand_val_compare_and_swap_1 (mem, old_reg, new_reg, 5860 if (oldval != cmp_reg)
6997 cmp_reg, icode); 5861 emit_move_insn (cmp_reg, oldval);
6998 if (subtarget == NULL_RTX) 5862
5863 /* Mark this jump predicted not taken. */
5864 emit_cmp_and_jump_insns (success, const0_rtx, EQ, const0_rtx,
5865 GET_MODE (success), 1, label,
5866 profile_probability::guessed_never ());
5867 return true;
5868 }
5869
5870
5871 /* This function tries to emit an atomic_exchange intruction. VAL is written
5872 to *MEM using memory model MODEL. The previous contents of *MEM are returned,
5873 using TARGET if possible. */
5874
5875 static rtx
5876 maybe_emit_atomic_exchange (rtx target, rtx mem, rtx val, enum memmodel model)
5877 {
5878 machine_mode mode = GET_MODE (mem);
5879 enum insn_code icode;
5880
5881 /* If the target supports the exchange directly, great. */
5882 icode = direct_optab_handler (atomic_exchange_optab, mode);
5883 if (icode != CODE_FOR_nothing)
5884 {
5885 struct expand_operand ops[4];
5886
5887 create_output_operand (&ops[0], target, mode);
5888 create_fixed_operand (&ops[1], mem);
5889 create_input_operand (&ops[2], val, mode);
5890 create_integer_operand (&ops[3], model);
5891 if (maybe_expand_insn (icode, 4, ops))
5892 return ops[0].value;
5893 }
5894
5895 return NULL_RTX;
5896 }
5897
5898 /* This function tries to implement an atomic exchange operation using
5899 __sync_lock_test_and_set. VAL is written to *MEM using memory model MODEL.
5900 The previous contents of *MEM are returned, using TARGET if possible.
5901 Since this instructionn is an acquire barrier only, stronger memory
5902 models may require additional barriers to be emitted. */
5903
5904 static rtx
5905 maybe_emit_sync_lock_test_and_set (rtx target, rtx mem, rtx val,
5906 enum memmodel model)
5907 {
5908 machine_mode mode = GET_MODE (mem);
5909 enum insn_code icode;
5910 rtx_insn *last_insn = get_last_insn ();
5911
5912 icode = optab_handler (sync_lock_test_and_set_optab, mode);
5913
5914 /* Legacy sync_lock_test_and_set is an acquire barrier. If the pattern
5915 exists, and the memory model is stronger than acquire, add a release
5916 barrier before the instruction. */
5917
5918 if (is_mm_seq_cst (model) || is_mm_release (model) || is_mm_acq_rel (model))
5919 expand_mem_thread_fence (model);
5920
5921 if (icode != CODE_FOR_nothing)
5922 {
5923 struct expand_operand ops[3];
5924 create_output_operand (&ops[0], target, mode);
5925 create_fixed_operand (&ops[1], mem);
5926 create_input_operand (&ops[2], val, mode);
5927 if (maybe_expand_insn (icode, 3, ops))
5928 return ops[0].value;
5929 }
5930
5931 /* If an external test-and-set libcall is provided, use that instead of
5932 any external compare-and-swap that we might get from the compare-and-
5933 swap-loop expansion later. */
5934 if (!can_compare_and_swap_p (mode, false))
5935 {
5936 rtx libfunc = optab_libfunc (sync_lock_test_and_set_optab, mode);
5937 if (libfunc != NULL)
5938 {
5939 rtx addr;
5940
5941 addr = convert_memory_address (ptr_mode, XEXP (mem, 0));
5942 return emit_library_call_value (libfunc, NULL_RTX, LCT_NORMAL,
5943 mode, addr, ptr_mode,
5944 val, mode);
5945 }
5946 }
5947
5948 /* If the test_and_set can't be emitted, eliminate any barrier that might
5949 have been emitted. */
5950 delete_insns_since (last_insn);
5951 return NULL_RTX;
5952 }
5953
5954 /* This function tries to implement an atomic exchange operation using a
5955 compare_and_swap loop. VAL is written to *MEM. The previous contents of
5956 *MEM are returned, using TARGET if possible. No memory model is required
5957 since a compare_and_swap loop is seq-cst. */
5958
5959 static rtx
5960 maybe_emit_compare_and_swap_exchange_loop (rtx target, rtx mem, rtx val)
5961 {
5962 machine_mode mode = GET_MODE (mem);
5963
5964 if (can_compare_and_swap_p (mode, true))
5965 {
5966 if (!target || !register_operand (target, mode))
5967 target = gen_reg_rtx (mode);
5968 if (expand_compare_and_swap_loop (mem, target, val, NULL_RTX))
5969 return target;
5970 }
5971
5972 return NULL_RTX;
5973 }
5974
5975 /* This function tries to implement an atomic test-and-set operation
5976 using the atomic_test_and_set instruction pattern. A boolean value
5977 is returned from the operation, using TARGET if possible. */
5978
5979 static rtx
5980 maybe_emit_atomic_test_and_set (rtx target, rtx mem, enum memmodel model)
5981 {
5982 machine_mode pat_bool_mode;
5983 struct expand_operand ops[3];
5984
5985 if (!targetm.have_atomic_test_and_set ())
5986 return NULL_RTX;
5987
5988 /* While we always get QImode from __atomic_test_and_set, we get
5989 other memory modes from __sync_lock_test_and_set. Note that we
5990 use no endian adjustment here. This matches the 4.6 behavior
5991 in the Sparc backend. */
5992 enum insn_code icode = targetm.code_for_atomic_test_and_set;
5993 gcc_checking_assert (insn_data[icode].operand[1].mode == QImode);
5994 if (GET_MODE (mem) != QImode)
5995 mem = adjust_address_nv (mem, QImode, 0);
5996
5997 pat_bool_mode = insn_data[icode].operand[0].mode;
5998 create_output_operand (&ops[0], target, pat_bool_mode);
5999 create_fixed_operand (&ops[1], mem);
6000 create_integer_operand (&ops[2], model);
6001
6002 if (maybe_expand_insn (icode, 3, ops))
6003 return ops[0].value;
6004 return NULL_RTX;
6005 }
6006
6007 /* This function expands the legacy _sync_lock test_and_set operation which is
6008 generally an atomic exchange. Some limited targets only allow the
6009 constant 1 to be stored. This is an ACQUIRE operation.
6010
6011 TARGET is an optional place to stick the return value.
6012 MEM is where VAL is stored. */
6013
6014 rtx
6015 expand_sync_lock_test_and_set (rtx target, rtx mem, rtx val)
6016 {
6017 rtx ret;
6018
6019 /* Try an atomic_exchange first. */
6020 ret = maybe_emit_atomic_exchange (target, mem, val, MEMMODEL_SYNC_ACQUIRE);
6021 if (ret)
6022 return ret;
6023
6024 ret = maybe_emit_sync_lock_test_and_set (target, mem, val,
6025 MEMMODEL_SYNC_ACQUIRE);
6026 if (ret)
6027 return ret;
6028
6029 ret = maybe_emit_compare_and_swap_exchange_loop (target, mem, val);
6030 if (ret)
6031 return ret;
6032
6033 /* If there are no other options, try atomic_test_and_set if the value
6034 being stored is 1. */
6035 if (val == const1_rtx)
6036 ret = maybe_emit_atomic_test_and_set (target, mem, MEMMODEL_SYNC_ACQUIRE);
6037
6038 return ret;
6039 }
6040
6041 /* This function expands the atomic test_and_set operation:
6042 atomically store a boolean TRUE into MEM and return the previous value.
6043
6044 MEMMODEL is the memory model variant to use.
6045 TARGET is an optional place to stick the return value. */
6046
6047 rtx
6048 expand_atomic_test_and_set (rtx target, rtx mem, enum memmodel model)
6049 {
6050 machine_mode mode = GET_MODE (mem);
6051 rtx ret, trueval, subtarget;
6052
6053 ret = maybe_emit_atomic_test_and_set (target, mem, model);
6054 if (ret)
6055 return ret;
6056
6057 /* Be binary compatible with non-default settings of trueval, and different
6058 cpu revisions. E.g. one revision may have atomic-test-and-set, but
6059 another only has atomic-exchange. */
6060 if (targetm.atomic_test_and_set_trueval == 1)
6061 {
6062 trueval = const1_rtx;
6063 subtarget = target ? target : gen_reg_rtx (mode);
6064 }
6065 else
6066 {
6067 trueval = gen_int_mode (targetm.atomic_test_and_set_trueval, mode);
6068 subtarget = gen_reg_rtx (mode);
6069 }
6070
6071 /* Try the atomic-exchange optab... */
6072 ret = maybe_emit_atomic_exchange (subtarget, mem, trueval, model);
6073
6074 /* ... then an atomic-compare-and-swap loop ... */
6075 if (!ret)
6076 ret = maybe_emit_compare_and_swap_exchange_loop (subtarget, mem, trueval);
6077
6078 /* ... before trying the vaguely defined legacy lock_test_and_set. */
6079 if (!ret)
6080 ret = maybe_emit_sync_lock_test_and_set (subtarget, mem, trueval, model);
6081
6082 /* Recall that the legacy lock_test_and_set optab was allowed to do magic
6083 things with the value 1. Thus we try again without trueval. */
6084 if (!ret && targetm.atomic_test_and_set_trueval != 1)
6085 ret = maybe_emit_sync_lock_test_and_set (subtarget, mem, const1_rtx, model);
6086
6087 /* Failing all else, assume a single threaded environment and simply
6088 perform the operation. */
6089 if (!ret)
6090 {
6091 /* If the result is ignored skip the move to target. */
6092 if (subtarget != const0_rtx)
6093 emit_move_insn (subtarget, mem);
6094
6095 emit_move_insn (mem, trueval);
6096 ret = subtarget;
6097 }
6098
6099 /* Recall that have to return a boolean value; rectify if trueval
6100 is not exactly one. */
6101 if (targetm.atomic_test_and_set_trueval != 1)
6102 ret = emit_store_flag_force (target, NE, ret, const0_rtx, mode, 0, 1);
6103
6104 return ret;
6105 }
6106
6107 /* This function expands the atomic exchange operation:
6108 atomically store VAL in MEM and return the previous value in MEM.
6109
6110 MEMMODEL is the memory model variant to use.
6111 TARGET is an optional place to stick the return value. */
6112
6113 rtx
6114 expand_atomic_exchange (rtx target, rtx mem, rtx val, enum memmodel model)
6115 {
6116 machine_mode mode = GET_MODE (mem);
6117 rtx ret;
6118
6119 /* If loads are not atomic for the required size and we are not called to
6120 provide a __sync builtin, do not do anything so that we stay consistent
6121 with atomic loads of the same size. */
6122 if (!can_atomic_load_p (mode) && !is_mm_sync (model))
6123 return NULL_RTX;
6124
6125 ret = maybe_emit_atomic_exchange (target, mem, val, model);
6126
6127 /* Next try a compare-and-swap loop for the exchange. */
6128 if (!ret)
6129 ret = maybe_emit_compare_and_swap_exchange_loop (target, mem, val);
6130
6131 return ret;
6132 }
6133
6134 /* This function expands the atomic compare exchange operation:
6135
6136 *PTARGET_BOOL is an optional place to store the boolean success/failure.
6137 *PTARGET_OVAL is an optional place to store the old value from memory.
6138 Both target parameters may be NULL or const0_rtx to indicate that we do
6139 not care about that return value. Both target parameters are updated on
6140 success to the actual location of the corresponding result.
6141
6142 MEMMODEL is the memory model variant to use.
6143
6144 The return value of the function is true for success. */
6145
6146 bool
6147 expand_atomic_compare_and_swap (rtx *ptarget_bool, rtx *ptarget_oval,
6148 rtx mem, rtx expected, rtx desired,
6149 bool is_weak, enum memmodel succ_model,
6150 enum memmodel fail_model)
6151 {
6152 machine_mode mode = GET_MODE (mem);
6153 struct expand_operand ops[8];
6154 enum insn_code icode;
6155 rtx target_oval, target_bool = NULL_RTX;
6156 rtx libfunc;
6157
6158 /* If loads are not atomic for the required size and we are not called to
6159 provide a __sync builtin, do not do anything so that we stay consistent
6160 with atomic loads of the same size. */
6161 if (!can_atomic_load_p (mode) && !is_mm_sync (succ_model))
6999 return false; 6162 return false;
7000 6163
7001 cc_reg = NULL_RTX; 6164 /* Load expected into a register for the compare and swap. */
7002 if (have_insn_for (COMPARE, CCmode)) 6165 if (MEM_P (expected))
7003 note_stores (PATTERN (get_last_insn ()), find_cc_set, &cc_reg); 6166 expected = copy_to_reg (expected);
7004 if (cc_reg) 6167
7005 { 6168 /* Make sure we always have some place to put the return oldval.
7006 cmp_reg = cc_reg; 6169 Further, make sure that place is distinct from the input expected,
7007 old_reg = const0_rtx; 6170 just in case we need that path down below. */
7008 } 6171 if (ptarget_oval && *ptarget_oval == const0_rtx)
6172 ptarget_oval = NULL;
6173
6174 if (ptarget_oval == NULL
6175 || (target_oval = *ptarget_oval) == NULL
6176 || reg_overlap_mentioned_p (expected, target_oval))
6177 target_oval = gen_reg_rtx (mode);
6178
6179 icode = direct_optab_handler (atomic_compare_and_swap_optab, mode);
6180 if (icode != CODE_FOR_nothing)
6181 {
6182 machine_mode bool_mode = insn_data[icode].operand[0].mode;
6183
6184 if (ptarget_bool && *ptarget_bool == const0_rtx)
6185 ptarget_bool = NULL;
6186
6187 /* Make sure we always have a place for the bool operand. */
6188 if (ptarget_bool == NULL
6189 || (target_bool = *ptarget_bool) == NULL
6190 || GET_MODE (target_bool) != bool_mode)
6191 target_bool = gen_reg_rtx (bool_mode);
6192
6193 /* Emit the compare_and_swap. */
6194 create_output_operand (&ops[0], target_bool, bool_mode);
6195 create_output_operand (&ops[1], target_oval, mode);
6196 create_fixed_operand (&ops[2], mem);
6197 create_input_operand (&ops[3], expected, mode);
6198 create_input_operand (&ops[4], desired, mode);
6199 create_integer_operand (&ops[5], is_weak);
6200 create_integer_operand (&ops[6], succ_model);
6201 create_integer_operand (&ops[7], fail_model);
6202 if (maybe_expand_insn (icode, 8, ops))
6203 {
6204 /* Return success/failure. */
6205 target_bool = ops[0].value;
6206 target_oval = ops[1].value;
6207 goto success;
6208 }
6209 }
6210
6211 /* Otherwise fall back to the original __sync_val_compare_and_swap
6212 which is always seq-cst. */
6213 icode = optab_handler (sync_compare_and_swap_optab, mode);
6214 if (icode != CODE_FOR_nothing)
6215 {
6216 rtx cc_reg;
6217
6218 create_output_operand (&ops[0], target_oval, mode);
6219 create_fixed_operand (&ops[1], mem);
6220 create_input_operand (&ops[2], expected, mode);
6221 create_input_operand (&ops[3], desired, mode);
6222 if (!maybe_expand_insn (icode, 4, ops))
6223 return false;
6224
6225 target_oval = ops[0].value;
6226
6227 /* If the caller isn't interested in the boolean return value,
6228 skip the computation of it. */
6229 if (ptarget_bool == NULL)
6230 goto success;
6231
6232 /* Otherwise, work out if the compare-and-swap succeeded. */
6233 cc_reg = NULL_RTX;
6234 if (have_insn_for (COMPARE, CCmode))
6235 note_stores (PATTERN (get_last_insn ()), find_cc_set, &cc_reg);
6236 if (cc_reg)
6237 {
6238 target_bool = emit_store_flag_force (target_bool, EQ, cc_reg,
6239 const0_rtx, VOIDmode, 0, 1);
6240 goto success;
6241 }
6242 goto success_bool_from_val;
6243 }
6244
6245 /* Also check for library support for __sync_val_compare_and_swap. */
6246 libfunc = optab_libfunc (sync_compare_and_swap_optab, mode);
6247 if (libfunc != NULL)
6248 {
6249 rtx addr = convert_memory_address (ptr_mode, XEXP (mem, 0));
6250 rtx target = emit_library_call_value (libfunc, NULL_RTX, LCT_NORMAL,
6251 mode, addr, ptr_mode,
6252 expected, mode, desired, mode);
6253 emit_move_insn (target_oval, target);
6254
6255 /* Compute the boolean return value only if requested. */
6256 if (ptarget_bool)
6257 goto success_bool_from_val;
6258 else
6259 goto success;
6260 }
6261
6262 /* Failure. */
6263 return false;
6264
6265 success_bool_from_val:
6266 target_bool = emit_store_flag_force (target_bool, EQ, target_oval,
6267 expected, VOIDmode, 1, 1);
6268 success:
6269 /* Make sure that the oval output winds up where the caller asked. */
6270 if (ptarget_oval)
6271 *ptarget_oval = target_oval;
6272 if (ptarget_bool)
6273 *ptarget_bool = target_bool;
6274 return true;
6275 }
6276
6277 /* Generate asm volatile("" : : : "memory") as the memory blockage. */
6278
6279 static void
6280 expand_asm_memory_blockage (void)
6281 {
6282 rtx asm_op, clob;
6283
6284 asm_op = gen_rtx_ASM_OPERANDS (VOIDmode, "", "", 0,
6285 rtvec_alloc (0), rtvec_alloc (0),
6286 rtvec_alloc (0), UNKNOWN_LOCATION);
6287 MEM_VOLATILE_P (asm_op) = 1;
6288
6289 clob = gen_rtx_SCRATCH (VOIDmode);
6290 clob = gen_rtx_MEM (BLKmode, clob);
6291 clob = gen_rtx_CLOBBER (VOIDmode, clob);
6292
6293 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, asm_op, clob)));
6294 }
6295
6296 /* Do not propagate memory accesses across this point. */
6297
6298 static void
6299 expand_memory_blockage (void)
6300 {
6301 if (targetm.have_memory_blockage ())
6302 emit_insn (targetm.gen_memory_blockage ());
7009 else 6303 else
7010 { 6304 expand_asm_memory_blockage ();
7011 if (subtarget != cmp_reg) 6305 }
7012 emit_move_insn (cmp_reg, subtarget); 6306
7013 } 6307 /* This routine will either emit the mem_thread_fence pattern or issue a
7014 6308 sync_synchronize to generate a fence for memory model MEMMODEL. */
7015 /* ??? Mark this jump predicted not taken? */ 6309
7016 emit_cmp_and_jump_insns (cmp_reg, old_reg, NE, const0_rtx, GET_MODE (cmp_reg), 1, 6310 void
7017 label); 6311 expand_mem_thread_fence (enum memmodel model)
7018 return true; 6312 {
7019 } 6313 if (is_mm_relaxed (model))
7020 6314 return;
7021 /* This function generates the atomic operation MEM CODE= VAL. In this 6315 if (targetm.have_mem_thread_fence ())
7022 case, we do not care about any resulting value. Returns NULL if we 6316 {
7023 cannot generate the operation. */ 6317 emit_insn (targetm.gen_mem_thread_fence (GEN_INT (model)));
6318 expand_memory_blockage ();
6319 }
6320 else if (targetm.have_memory_barrier ())
6321 emit_insn (targetm.gen_memory_barrier ());
6322 else if (synchronize_libfunc != NULL_RTX)
6323 emit_library_call (synchronize_libfunc, LCT_NORMAL, VOIDmode);
6324 else
6325 expand_memory_blockage ();
6326 }
6327
6328 /* Emit a signal fence with given memory model. */
6329
6330 void
6331 expand_mem_signal_fence (enum memmodel model)
6332 {
6333 /* No machine barrier is required to implement a signal fence, but
6334 a compiler memory barrier must be issued, except for relaxed MM. */
6335 if (!is_mm_relaxed (model))
6336 expand_memory_blockage ();
6337 }
6338
6339 /* This function expands the atomic load operation:
6340 return the atomically loaded value in MEM.
6341
6342 MEMMODEL is the memory model variant to use.
6343 TARGET is an option place to stick the return value. */
7024 6344
7025 rtx 6345 rtx
7026 expand_sync_operation (rtx mem, rtx val, enum rtx_code code) 6346 expand_atomic_load (rtx target, rtx mem, enum memmodel model)
7027 { 6347 {
7028 enum machine_mode mode = GET_MODE (mem); 6348 machine_mode mode = GET_MODE (mem);
7029 enum insn_code icode; 6349 enum insn_code icode;
7030 rtx insn; 6350
7031 6351 /* If the target supports the load directly, great. */
7032 /* Look to see if the target supports the operation directly. */ 6352 icode = direct_optab_handler (atomic_load_optab, mode);
6353 if (icode != CODE_FOR_nothing)
6354 {
6355 struct expand_operand ops[3];
6356 rtx_insn *last = get_last_insn ();
6357 if (is_mm_seq_cst (model))
6358 expand_memory_blockage ();
6359
6360 create_output_operand (&ops[0], target, mode);
6361 create_fixed_operand (&ops[1], mem);
6362 create_integer_operand (&ops[2], model);
6363 if (maybe_expand_insn (icode, 3, ops))
6364 {
6365 if (!is_mm_relaxed (model))
6366 expand_memory_blockage ();
6367 return ops[0].value;
6368 }
6369 delete_insns_since (last);
6370 }
6371
6372 /* If the size of the object is greater than word size on this target,
6373 then we assume that a load will not be atomic. We could try to
6374 emulate a load with a compare-and-swap operation, but the store that
6375 doing this could result in would be incorrect if this is a volatile
6376 atomic load or targetting read-only-mapped memory. */
6377 if (GET_MODE_PRECISION (mode) > BITS_PER_WORD)
6378 /* If there is no atomic load, leave the library call. */
6379 return NULL_RTX;
6380
6381 /* Otherwise assume loads are atomic, and emit the proper barriers. */
6382 if (!target || target == const0_rtx)
6383 target = gen_reg_rtx (mode);
6384
6385 /* For SEQ_CST, emit a barrier before the load. */
6386 if (is_mm_seq_cst (model))
6387 expand_mem_thread_fence (model);
6388
6389 emit_move_insn (target, mem);
6390
6391 /* Emit the appropriate barrier after the load. */
6392 expand_mem_thread_fence (model);
6393
6394 return target;
6395 }
6396
6397 /* This function expands the atomic store operation:
6398 Atomically store VAL in MEM.
6399 MEMMODEL is the memory model variant to use.
6400 USE_RELEASE is true if __sync_lock_release can be used as a fall back.
6401 function returns const0_rtx if a pattern was emitted. */
6402
6403 rtx
6404 expand_atomic_store (rtx mem, rtx val, enum memmodel model, bool use_release)
6405 {
6406 machine_mode mode = GET_MODE (mem);
6407 enum insn_code icode;
6408 struct expand_operand ops[3];
6409
6410 /* If the target supports the store directly, great. */
6411 icode = direct_optab_handler (atomic_store_optab, mode);
6412 if (icode != CODE_FOR_nothing)
6413 {
6414 rtx_insn *last = get_last_insn ();
6415 if (!is_mm_relaxed (model))
6416 expand_memory_blockage ();
6417 create_fixed_operand (&ops[0], mem);
6418 create_input_operand (&ops[1], val, mode);
6419 create_integer_operand (&ops[2], model);
6420 if (maybe_expand_insn (icode, 3, ops))
6421 {
6422 if (is_mm_seq_cst (model))
6423 expand_memory_blockage ();
6424 return const0_rtx;
6425 }
6426 delete_insns_since (last);
6427 }
6428
6429 /* If using __sync_lock_release is a viable alternative, try it.
6430 Note that this will not be set to true if we are expanding a generic
6431 __atomic_store_n. */
6432 if (use_release)
6433 {
6434 icode = direct_optab_handler (sync_lock_release_optab, mode);
6435 if (icode != CODE_FOR_nothing)
6436 {
6437 create_fixed_operand (&ops[0], mem);
6438 create_input_operand (&ops[1], const0_rtx, mode);
6439 if (maybe_expand_insn (icode, 2, ops))
6440 {
6441 /* lock_release is only a release barrier. */
6442 if (is_mm_seq_cst (model))
6443 expand_mem_thread_fence (model);
6444 return const0_rtx;
6445 }
6446 }
6447 }
6448
6449 /* If the size of the object is greater than word size on this target,
6450 a default store will not be atomic. */
6451 if (GET_MODE_PRECISION (mode) > BITS_PER_WORD)
6452 {
6453 /* If loads are atomic or we are called to provide a __sync builtin,
6454 we can try a atomic_exchange and throw away the result. Otherwise,
6455 don't do anything so that we do not create an inconsistency between
6456 loads and stores. */
6457 if (can_atomic_load_p (mode) || is_mm_sync (model))
6458 {
6459 rtx target = maybe_emit_atomic_exchange (NULL_RTX, mem, val, model);
6460 if (!target)
6461 target = maybe_emit_compare_and_swap_exchange_loop (NULL_RTX, mem,
6462 val);
6463 if (target)
6464 return const0_rtx;
6465 }
6466 return NULL_RTX;
6467 }
6468
6469 /* Otherwise assume stores are atomic, and emit the proper barriers. */
6470 expand_mem_thread_fence (model);
6471
6472 emit_move_insn (mem, val);
6473
6474 /* For SEQ_CST, also emit a barrier after the store. */
6475 if (is_mm_seq_cst (model))
6476 expand_mem_thread_fence (model);
6477
6478 return const0_rtx;
6479 }
6480
6481
6482 /* Structure containing the pointers and values required to process the
6483 various forms of the atomic_fetch_op and atomic_op_fetch builtins. */
6484
6485 struct atomic_op_functions
6486 {
6487 direct_optab mem_fetch_before;
6488 direct_optab mem_fetch_after;
6489 direct_optab mem_no_result;
6490 optab fetch_before;
6491 optab fetch_after;
6492 direct_optab no_result;
6493 enum rtx_code reverse_code;
6494 };
6495
6496
6497 /* Fill in structure pointed to by OP with the various optab entries for an
6498 operation of type CODE. */
6499
6500 static void
6501 get_atomic_op_for_code (struct atomic_op_functions *op, enum rtx_code code)
6502 {
6503 gcc_assert (op!= NULL);
6504
6505 /* If SWITCHABLE_TARGET is defined, then subtargets can be switched
6506 in the source code during compilation, and the optab entries are not
6507 computable until runtime. Fill in the values at runtime. */
7033 switch (code) 6508 switch (code)
7034 { 6509 {
7035 case PLUS: 6510 case PLUS:
7036 icode = direct_optab_handler (sync_add_optab, mode); 6511 op->mem_fetch_before = atomic_fetch_add_optab;
6512 op->mem_fetch_after = atomic_add_fetch_optab;
6513 op->mem_no_result = atomic_add_optab;
6514 op->fetch_before = sync_old_add_optab;
6515 op->fetch_after = sync_new_add_optab;
6516 op->no_result = sync_add_optab;
6517 op->reverse_code = MINUS;
6518 break;
6519 case MINUS:
6520 op->mem_fetch_before = atomic_fetch_sub_optab;
6521 op->mem_fetch_after = atomic_sub_fetch_optab;
6522 op->mem_no_result = atomic_sub_optab;
6523 op->fetch_before = sync_old_sub_optab;
6524 op->fetch_after = sync_new_sub_optab;
6525 op->no_result = sync_sub_optab;
6526 op->reverse_code = PLUS;
6527 break;
6528 case XOR:
6529 op->mem_fetch_before = atomic_fetch_xor_optab;
6530 op->mem_fetch_after = atomic_xor_fetch_optab;
6531 op->mem_no_result = atomic_xor_optab;
6532 op->fetch_before = sync_old_xor_optab;
6533 op->fetch_after = sync_new_xor_optab;
6534 op->no_result = sync_xor_optab;
6535 op->reverse_code = XOR;
6536 break;
6537 case AND:
6538 op->mem_fetch_before = atomic_fetch_and_optab;
6539 op->mem_fetch_after = atomic_and_fetch_optab;
6540 op->mem_no_result = atomic_and_optab;
6541 op->fetch_before = sync_old_and_optab;
6542 op->fetch_after = sync_new_and_optab;
6543 op->no_result = sync_and_optab;
6544 op->reverse_code = UNKNOWN;
7037 break; 6545 break;
7038 case IOR: 6546 case IOR:
7039 icode = direct_optab_handler (sync_ior_optab, mode); 6547 op->mem_fetch_before = atomic_fetch_or_optab;
7040 break; 6548 op->mem_fetch_after = atomic_or_fetch_optab;
7041 case XOR: 6549 op->mem_no_result = atomic_or_optab;
7042 icode = direct_optab_handler (sync_xor_optab, mode); 6550 op->fetch_before = sync_old_ior_optab;
7043 break; 6551 op->fetch_after = sync_new_ior_optab;
7044 case AND: 6552 op->no_result = sync_ior_optab;
7045 icode = direct_optab_handler (sync_and_optab, mode); 6553 op->reverse_code = UNKNOWN;
7046 break; 6554 break;
7047 case NOT: 6555 case NOT:
7048 icode = direct_optab_handler (sync_nand_optab, mode); 6556 op->mem_fetch_before = atomic_fetch_nand_optab;
6557 op->mem_fetch_after = atomic_nand_fetch_optab;
6558 op->mem_no_result = atomic_nand_optab;
6559 op->fetch_before = sync_old_nand_optab;
6560 op->fetch_after = sync_new_nand_optab;
6561 op->no_result = sync_nand_optab;
6562 op->reverse_code = UNKNOWN;
7049 break; 6563 break;
7050
7051 case MINUS:
7052 icode = direct_optab_handler (sync_sub_optab, mode);
7053 if (icode == CODE_FOR_nothing || CONST_INT_P (val))
7054 {
7055 icode = direct_optab_handler (sync_add_optab, mode);
7056 if (icode != CODE_FOR_nothing)
7057 {
7058 val = expand_simple_unop (mode, NEG, val, NULL_RTX, 1);
7059 code = PLUS;
7060 }
7061 }
7062 break;
7063
7064 default: 6564 default:
7065 gcc_unreachable (); 6565 gcc_unreachable ();
7066 } 6566 }
7067 6567 }
7068 /* Generate the direct operation, if present. */ 6568
7069 if (icode != CODE_FOR_nothing) 6569 /* See if there is a more optimal way to implement the operation "*MEM CODE VAL"
7070 { 6570 using memory order MODEL. If AFTER is true the operation needs to return
7071 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode) 6571 the value of *MEM after the operation, otherwise the previous value.
7072 val = convert_modes (mode, GET_MODE (val), val, 1); 6572 TARGET is an optional place to place the result. The result is unused if
7073 if (!insn_data[icode].operand[1].predicate (val, mode)) 6573 it is const0_rtx.
7074 val = force_reg (mode, val); 6574 Return the result if there is a better sequence, otherwise NULL_RTX. */
7075 6575
7076 insn = GEN_FCN (icode) (mem, val); 6576 static rtx
7077 if (insn) 6577 maybe_optimize_fetch_op (rtx target, rtx mem, rtx val, enum rtx_code code,
7078 { 6578 enum memmodel model, bool after)
7079 emit_insn (insn); 6579 {
7080 return const0_rtx; 6580 /* If the value is prefetched, or not used, it may be possible to replace
7081 } 6581 the sequence with a native exchange operation. */
7082 } 6582 if (!after || target == const0_rtx)
7083 6583 {
7084 /* Failing that, generate a compare-and-swap loop in which we perform the 6584 /* fetch_and (&x, 0, m) can be replaced with exchange (&x, 0, m). */
7085 operation with normal arithmetic instructions. */ 6585 if (code == AND && val == const0_rtx)
7086 if (direct_optab_handler (sync_compare_and_swap_optab, mode) 6586 {
7087 != CODE_FOR_nothing) 6587 if (target == const0_rtx)
7088 { 6588 target = gen_reg_rtx (GET_MODE (mem));
6589 return maybe_emit_atomic_exchange (target, mem, val, model);
6590 }
6591
6592 /* fetch_or (&x, -1, m) can be replaced with exchange (&x, -1, m). */
6593 if (code == IOR && val == constm1_rtx)
6594 {
6595 if (target == const0_rtx)
6596 target = gen_reg_rtx (GET_MODE (mem));
6597 return maybe_emit_atomic_exchange (target, mem, val, model);
6598 }
6599 }
6600
6601 return NULL_RTX;
6602 }
6603
6604 /* Try to emit an instruction for a specific operation varaition.
6605 OPTAB contains the OP functions.
6606 TARGET is an optional place to return the result. const0_rtx means unused.
6607 MEM is the memory location to operate on.
6608 VAL is the value to use in the operation.
6609 USE_MEMMODEL is TRUE if the variation with a memory model should be tried.
6610 MODEL is the memory model, if used.
6611 AFTER is true if the returned result is the value after the operation. */
6612
6613 static rtx
6614 maybe_emit_op (const struct atomic_op_functions *optab, rtx target, rtx mem,
6615 rtx val, bool use_memmodel, enum memmodel model, bool after)
6616 {
6617 machine_mode mode = GET_MODE (mem);
6618 struct expand_operand ops[4];
6619 enum insn_code icode;
6620 int op_counter = 0;
6621 int num_ops;
6622
6623 /* Check to see if there is a result returned. */
6624 if (target == const0_rtx)
6625 {
6626 if (use_memmodel)
6627 {
6628 icode = direct_optab_handler (optab->mem_no_result, mode);
6629 create_integer_operand (&ops[2], model);
6630 num_ops = 3;
6631 }
6632 else
6633 {
6634 icode = direct_optab_handler (optab->no_result, mode);
6635 num_ops = 2;
6636 }
6637 }
6638 /* Otherwise, we need to generate a result. */
6639 else
6640 {
6641 if (use_memmodel)
6642 {
6643 icode = direct_optab_handler (after ? optab->mem_fetch_after
6644 : optab->mem_fetch_before, mode);
6645 create_integer_operand (&ops[3], model);
6646 num_ops = 4;
6647 }
6648 else
6649 {
6650 icode = optab_handler (after ? optab->fetch_after
6651 : optab->fetch_before, mode);
6652 num_ops = 3;
6653 }
6654 create_output_operand (&ops[op_counter++], target, mode);
6655 }
6656 if (icode == CODE_FOR_nothing)
6657 return NULL_RTX;
6658
6659 create_fixed_operand (&ops[op_counter++], mem);
6660 /* VAL may have been promoted to a wider mode. Shrink it if so. */
6661 create_convert_operand_to (&ops[op_counter++], val, mode, true);
6662
6663 if (maybe_expand_insn (icode, num_ops, ops))
6664 return (target == const0_rtx ? const0_rtx : ops[0].value);
6665
6666 return NULL_RTX;
6667 }
6668
6669
6670 /* This function expands an atomic fetch_OP or OP_fetch operation:
6671 TARGET is an option place to stick the return value. const0_rtx indicates
6672 the result is unused.
6673 atomically fetch MEM, perform the operation with VAL and return it to MEM.
6674 CODE is the operation being performed (OP)
6675 MEMMODEL is the memory model variant to use.
6676 AFTER is true to return the result of the operation (OP_fetch).
6677 AFTER is false to return the value before the operation (fetch_OP).
6678
6679 This function will *only* generate instructions if there is a direct
6680 optab. No compare and swap loops or libcalls will be generated. */
6681
6682 static rtx
6683 expand_atomic_fetch_op_no_fallback (rtx target, rtx mem, rtx val,
6684 enum rtx_code code, enum memmodel model,
6685 bool after)
6686 {
6687 machine_mode mode = GET_MODE (mem);
6688 struct atomic_op_functions optab;
6689 rtx result;
6690 bool unused_result = (target == const0_rtx);
6691
6692 get_atomic_op_for_code (&optab, code);
6693
6694 /* Check to see if there are any better instructions. */
6695 result = maybe_optimize_fetch_op (target, mem, val, code, model, after);
6696 if (result)
6697 return result;
6698
6699 /* Check for the case where the result isn't used and try those patterns. */
6700 if (unused_result)
6701 {
6702 /* Try the memory model variant first. */
6703 result = maybe_emit_op (&optab, target, mem, val, true, model, true);
6704 if (result)
6705 return result;
6706
6707 /* Next try the old style withuot a memory model. */
6708 result = maybe_emit_op (&optab, target, mem, val, false, model, true);
6709 if (result)
6710 return result;
6711
6712 /* There is no no-result pattern, so try patterns with a result. */
6713 target = NULL_RTX;
6714 }
6715
6716 /* Try the __atomic version. */
6717 result = maybe_emit_op (&optab, target, mem, val, true, model, after);
6718 if (result)
6719 return result;
6720
6721 /* Try the older __sync version. */
6722 result = maybe_emit_op (&optab, target, mem, val, false, model, after);
6723 if (result)
6724 return result;
6725
6726 /* If the fetch value can be calculated from the other variation of fetch,
6727 try that operation. */
6728 if (after || unused_result || optab.reverse_code != UNKNOWN)
6729 {
6730 /* Try the __atomic version, then the older __sync version. */
6731 result = maybe_emit_op (&optab, target, mem, val, true, model, !after);
6732 if (!result)
6733 result = maybe_emit_op (&optab, target, mem, val, false, model, !after);
6734
6735 if (result)
6736 {
6737 /* If the result isn't used, no need to do compensation code. */
6738 if (unused_result)
6739 return result;
6740
6741 /* Issue compensation code. Fetch_after == fetch_before OP val.
6742 Fetch_before == after REVERSE_OP val. */
6743 if (!after)
6744 code = optab.reverse_code;
6745 if (code == NOT)
6746 {
6747 result = expand_simple_binop (mode, AND, result, val, NULL_RTX,
6748 true, OPTAB_LIB_WIDEN);
6749 result = expand_simple_unop (mode, NOT, result, target, true);
6750 }
6751 else
6752 result = expand_simple_binop (mode, code, result, val, target,
6753 true, OPTAB_LIB_WIDEN);
6754 return result;
6755 }
6756 }
6757
6758 /* No direct opcode can be generated. */
6759 return NULL_RTX;
6760 }
6761
6762
6763
6764 /* This function expands an atomic fetch_OP or OP_fetch operation:
6765 TARGET is an option place to stick the return value. const0_rtx indicates
6766 the result is unused.
6767 atomically fetch MEM, perform the operation with VAL and return it to MEM.
6768 CODE is the operation being performed (OP)
6769 MEMMODEL is the memory model variant to use.
6770 AFTER is true to return the result of the operation (OP_fetch).
6771 AFTER is false to return the value before the operation (fetch_OP). */
6772 rtx
6773 expand_atomic_fetch_op (rtx target, rtx mem, rtx val, enum rtx_code code,
6774 enum memmodel model, bool after)
6775 {
6776 machine_mode mode = GET_MODE (mem);
6777 rtx result;
6778 bool unused_result = (target == const0_rtx);
6779
6780 /* If loads are not atomic for the required size and we are not called to
6781 provide a __sync builtin, do not do anything so that we stay consistent
6782 with atomic loads of the same size. */
6783 if (!can_atomic_load_p (mode) && !is_mm_sync (model))
6784 return NULL_RTX;
6785
6786 result = expand_atomic_fetch_op_no_fallback (target, mem, val, code, model,
6787 after);
6788
6789 if (result)
6790 return result;
6791
6792 /* Add/sub can be implemented by doing the reverse operation with -(val). */
6793 if (code == PLUS || code == MINUS)
6794 {
6795 rtx tmp;
6796 enum rtx_code reverse = (code == PLUS ? MINUS : PLUS);
6797
6798 start_sequence ();
6799 tmp = expand_simple_unop (mode, NEG, val, NULL_RTX, true);
6800 result = expand_atomic_fetch_op_no_fallback (target, mem, tmp, reverse,
6801 model, after);
6802 if (result)
6803 {
6804 /* PLUS worked so emit the insns and return. */
6805 tmp = get_insns ();
6806 end_sequence ();
6807 emit_insn (tmp);
6808 return result;
6809 }
6810
6811 /* PLUS did not work, so throw away the negation code and continue. */
6812 end_sequence ();
6813 }
6814
6815 /* Try the __sync libcalls only if we can't do compare-and-swap inline. */
6816 if (!can_compare_and_swap_p (mode, false))
6817 {
6818 rtx libfunc;
6819 bool fixup = false;
6820 enum rtx_code orig_code = code;
6821 struct atomic_op_functions optab;
6822
6823 get_atomic_op_for_code (&optab, code);
6824 libfunc = optab_libfunc (after ? optab.fetch_after
6825 : optab.fetch_before, mode);
6826 if (libfunc == NULL
6827 && (after || unused_result || optab.reverse_code != UNKNOWN))
6828 {
6829 fixup = true;
6830 if (!after)
6831 code = optab.reverse_code;
6832 libfunc = optab_libfunc (after ? optab.fetch_before
6833 : optab.fetch_after, mode);
6834 }
6835 if (libfunc != NULL)
6836 {
6837 rtx addr = convert_memory_address (ptr_mode, XEXP (mem, 0));
6838 result = emit_library_call_value (libfunc, NULL, LCT_NORMAL, mode,
6839 addr, ptr_mode, val, mode);
6840
6841 if (!unused_result && fixup)
6842 result = expand_simple_binop (mode, code, result, val, target,
6843 true, OPTAB_LIB_WIDEN);
6844 return result;
6845 }
6846
6847 /* We need the original code for any further attempts. */
6848 code = orig_code;
6849 }
6850
6851 /* If nothing else has succeeded, default to a compare and swap loop. */
6852 if (can_compare_and_swap_p (mode, true))
6853 {
6854 rtx_insn *insn;
7089 rtx t0 = gen_reg_rtx (mode), t1; 6855 rtx t0 = gen_reg_rtx (mode), t1;
7090 6856
7091 start_sequence (); 6857 start_sequence ();
6858
6859 /* If the result is used, get a register for it. */
6860 if (!unused_result)
6861 {
6862 if (!target || !register_operand (target, mode))
6863 target = gen_reg_rtx (mode);
6864 /* If fetch_before, copy the value now. */
6865 if (!after)
6866 emit_move_insn (target, t0);
6867 }
6868 else
6869 target = const0_rtx;
7092 6870
7093 t1 = t0; 6871 t1 = t0;
7094 if (code == NOT) 6872 if (code == NOT)
7095 { 6873 {
7096 t1 = expand_simple_binop (mode, AND, t1, val, NULL_RTX, 6874 t1 = expand_simple_binop (mode, AND, t1, val, NULL_RTX,
7097 true, OPTAB_LIB_WIDEN); 6875 true, OPTAB_LIB_WIDEN);
7098 t1 = expand_simple_unop (mode, code, t1, NULL_RTX, true); 6876 t1 = expand_simple_unop (mode, code, t1, NULL_RTX, true);
7099 } 6877 }
7100 else 6878 else
7101 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX, 6879 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX, true,
7102 true, OPTAB_LIB_WIDEN); 6880 OPTAB_LIB_WIDEN);
6881
6882 /* For after, copy the value now. */
6883 if (!unused_result && after)
6884 emit_move_insn (target, t1);
7103 insn = get_insns (); 6885 insn = get_insns ();
7104 end_sequence (); 6886 end_sequence ();
7105 6887
7106 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn)) 6888 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
7107 return const0_rtx; 6889 return target;
7108 } 6890 }
7109 6891
7110 return NULL_RTX; 6892 return NULL_RTX;
7111 } 6893 }
7112 6894
7113 /* This function generates the atomic operation MEM CODE= VAL. In this 6895 /* Return true if OPERAND is suitable for operand number OPNO of
7114 case, we do care about the resulting value: if AFTER is true then 6896 instruction ICODE. */
7115 return the value MEM holds after the operation, if AFTER is false 6897
7116 then return the value MEM holds before the operation. TARGET is an 6898 bool
7117 optional place for the result value to be stored. */ 6899 insn_operand_matches (enum insn_code icode, unsigned int opno, rtx operand)
7118 6900 {
7119 rtx 6901 return (!insn_data[(int) icode].operand[opno].predicate
7120 expand_sync_fetch_operation (rtx mem, rtx val, enum rtx_code code, 6902 || (insn_data[(int) icode].operand[opno].predicate
7121 bool after, rtx target) 6903 (operand, insn_data[(int) icode].operand[opno].mode)));
7122 { 6904 }
7123 enum machine_mode mode = GET_MODE (mem); 6905
7124 enum insn_code old_code, new_code, icode; 6906 /* TARGET is a target of a multiword operation that we are going to
7125 bool compensate; 6907 implement as a series of word-mode operations. Return true if
7126 rtx insn; 6908 TARGET is suitable for this purpose. */
7127 6909
7128 /* Look to see if the target supports the operation directly. */ 6910 bool
7129 switch (code) 6911 valid_multiword_target_p (rtx target)
7130 { 6912 {
7131 case PLUS: 6913 machine_mode mode;
7132 old_code = direct_optab_handler (sync_old_add_optab, mode); 6914 int i;
7133 new_code = direct_optab_handler (sync_new_add_optab, mode); 6915
6916 mode = GET_MODE (target);
6917 for (i = 0; i < GET_MODE_SIZE (mode); i += UNITS_PER_WORD)
6918 if (!validate_subreg (word_mode, mode, target, i))
6919 return false;
6920 return true;
6921 }
6922
6923 /* Like maybe_legitimize_operand, but do not change the code of the
6924 current rtx value. */
6925
6926 static bool
6927 maybe_legitimize_operand_same_code (enum insn_code icode, unsigned int opno,
6928 struct expand_operand *op)
6929 {
6930 /* See if the operand matches in its current form. */
6931 if (insn_operand_matches (icode, opno, op->value))
6932 return true;
6933
6934 /* If the operand is a memory whose address has no side effects,
6935 try forcing the address into a non-virtual pseudo register.
6936 The check for side effects is important because copy_to_mode_reg
6937 cannot handle things like auto-modified addresses. */
6938 if (insn_data[(int) icode].operand[opno].allows_mem && MEM_P (op->value))
6939 {
6940 rtx addr, mem;
6941
6942 mem = op->value;
6943 addr = XEXP (mem, 0);
6944 if (!(REG_P (addr) && REGNO (addr) > LAST_VIRTUAL_REGISTER)
6945 && !side_effects_p (addr))
6946 {
6947 rtx_insn *last;
6948 machine_mode mode;
6949
6950 last = get_last_insn ();
6951 mode = get_address_mode (mem);
6952 mem = replace_equiv_address (mem, copy_to_mode_reg (mode, addr));
6953 if (insn_operand_matches (icode, opno, mem))
6954 {
6955 op->value = mem;
6956 return true;
6957 }
6958 delete_insns_since (last);
6959 }
6960 }
6961
6962 return false;
6963 }
6964
6965 /* Try to make OP match operand OPNO of instruction ICODE. Return true
6966 on success, storing the new operand value back in OP. */
6967
6968 static bool
6969 maybe_legitimize_operand (enum insn_code icode, unsigned int opno,
6970 struct expand_operand *op)
6971 {
6972 machine_mode mode, imode;
6973 bool old_volatile_ok, result;
6974
6975 mode = op->mode;
6976 switch (op->type)
6977 {
6978 case EXPAND_FIXED:
6979 old_volatile_ok = volatile_ok;
6980 volatile_ok = true;
6981 result = maybe_legitimize_operand_same_code (icode, opno, op);
6982 volatile_ok = old_volatile_ok;
6983 return result;
6984
6985 case EXPAND_OUTPUT:
6986 gcc_assert (mode != VOIDmode);
6987 if (op->value
6988 && op->value != const0_rtx
6989 && GET_MODE (op->value) == mode
6990 && maybe_legitimize_operand_same_code (icode, opno, op))
6991 return true;
6992
6993 op->value = gen_reg_rtx (mode);
6994 op->target = 0;
7134 break; 6995 break;
7135 case IOR: 6996
7136 old_code = direct_optab_handler (sync_old_ior_optab, mode); 6997 case EXPAND_INPUT:
7137 new_code = direct_optab_handler (sync_new_ior_optab, mode); 6998 input:
6999 gcc_assert (mode != VOIDmode);
7000 gcc_assert (GET_MODE (op->value) == VOIDmode
7001 || GET_MODE (op->value) == mode);
7002 if (maybe_legitimize_operand_same_code (icode, opno, op))
7003 return true;
7004
7005 op->value = copy_to_mode_reg (mode, op->value);
7138 break; 7006 break;
7139 case XOR: 7007
7140 old_code = direct_optab_handler (sync_old_xor_optab, mode); 7008 case EXPAND_CONVERT_TO:
7141 new_code = direct_optab_handler (sync_new_xor_optab, mode); 7009 gcc_assert (mode != VOIDmode);
7010 op->value = convert_to_mode (mode, op->value, op->unsigned_p);
7011 goto input;
7012
7013 case EXPAND_CONVERT_FROM:
7014 if (GET_MODE (op->value) != VOIDmode)
7015 mode = GET_MODE (op->value);
7016 else
7017 /* The caller must tell us what mode this value has. */
7018 gcc_assert (mode != VOIDmode);
7019
7020 imode = insn_data[(int) icode].operand[opno].mode;
7021 if (imode != VOIDmode && imode != mode)
7022 {
7023 op->value = convert_modes (imode, mode, op->value, op->unsigned_p);
7024 mode = imode;
7025 }
7026 goto input;
7027
7028 case EXPAND_ADDRESS:
7029 op->value = convert_memory_address (as_a <scalar_int_mode> (mode),
7030 op->value);
7031 goto input;
7032
7033 case EXPAND_INTEGER:
7034 mode = insn_data[(int) icode].operand[opno].mode;
7035 if (mode != VOIDmode && const_int_operand (op->value, mode))
7036 goto input;
7142 break; 7037 break;
7143 case AND: 7038 }
7144 old_code = direct_optab_handler (sync_old_and_optab, mode); 7039 return insn_operand_matches (icode, opno, op->value);
7145 new_code = direct_optab_handler (sync_new_and_optab, mode); 7040 }
7146 break; 7041
7147 case NOT: 7042 /* Make OP describe an input operand that should have the same value
7148 old_code = direct_optab_handler (sync_old_nand_optab, mode); 7043 as VALUE, after any mode conversion that the target might request.
7149 new_code = direct_optab_handler (sync_new_nand_optab, mode); 7044 TYPE is the type of VALUE. */
7150 break; 7045
7151 7046 void
7152 case MINUS: 7047 create_convert_operand_from_type (struct expand_operand *op,
7153 old_code = direct_optab_handler (sync_old_sub_optab, mode); 7048 rtx value, tree type)
7154 new_code = direct_optab_handler (sync_new_sub_optab, mode); 7049 {
7155 if ((old_code == CODE_FOR_nothing && new_code == CODE_FOR_nothing) 7050 create_convert_operand_from (op, value, TYPE_MODE (type),
7156 || CONST_INT_P (val)) 7051 TYPE_UNSIGNED (type));
7157 { 7052 }
7158 old_code = direct_optab_handler (sync_old_add_optab, mode); 7053
7159 new_code = direct_optab_handler (sync_new_add_optab, mode); 7054 /* Try to make operands [OPS, OPS + NOPS) match operands [OPNO, OPNO + NOPS)
7160 if (old_code != CODE_FOR_nothing || new_code != CODE_FOR_nothing) 7055 of instruction ICODE. Return true on success, leaving the new operand
7161 { 7056 values in the OPS themselves. Emit no code on failure. */
7162 val = expand_simple_unop (mode, NEG, val, NULL_RTX, 1); 7057
7163 code = PLUS; 7058 bool
7164 } 7059 maybe_legitimize_operands (enum insn_code icode, unsigned int opno,
7165 } 7060 unsigned int nops, struct expand_operand *ops)
7166 break; 7061 {
7167 7062 rtx_insn *last;
7168 default: 7063 unsigned int i;
7169 gcc_unreachable (); 7064
7170 } 7065 last = get_last_insn ();
7171 7066 for (i = 0; i < nops; i++)
7172 /* If the target does supports the proper new/old operation, great. But 7067 if (!maybe_legitimize_operand (icode, opno + i, &ops[i]))
7173 if we only support the opposite old/new operation, check to see if we 7068 {
7174 can compensate. In the case in which the old value is supported, then 7069 delete_insns_since (last);
7175 we can always perform the operation again with normal arithmetic. In 7070 return false;
7176 the case in which the new value is supported, then we can only handle 7071 }
7177 this in the case the operation is reversible. */ 7072 return true;
7178 compensate = false; 7073 }
7179 if (after) 7074
7180 { 7075 /* Try to generate instruction ICODE, using operands [OPS, OPS + NOPS)
7181 icode = new_code; 7076 as its operands. Return the instruction pattern on success,
7182 if (icode == CODE_FOR_nothing) 7077 and emit any necessary set-up code. Return null and emit no
7183 { 7078 code on failure. */
7184 icode = old_code; 7079
7185 if (icode != CODE_FOR_nothing) 7080 rtx_insn *
7186 compensate = true; 7081 maybe_gen_insn (enum insn_code icode, unsigned int nops,
7187 } 7082 struct expand_operand *ops)
7188 } 7083 {
7189 else 7084 gcc_assert (nops == (unsigned int) insn_data[(int) icode].n_generator_args);
7190 { 7085 if (!maybe_legitimize_operands (icode, 0, nops, ops))
7191 icode = old_code; 7086 return NULL;
7192 if (icode == CODE_FOR_nothing 7087
7193 && (code == PLUS || code == MINUS || code == XOR)) 7088 switch (nops)
7194 { 7089 {
7195 icode = new_code; 7090 case 1:
7196 if (icode != CODE_FOR_nothing) 7091 return GEN_FCN (icode) (ops[0].value);
7197 compensate = true; 7092 case 2:
7198 } 7093 return GEN_FCN (icode) (ops[0].value, ops[1].value);
7199 } 7094 case 3:
7200 7095 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value);
7201 /* If we found something supported, great. */ 7096 case 4:
7202 if (icode != CODE_FOR_nothing) 7097 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
7203 { 7098 ops[3].value);
7204 if (!target || !insn_data[icode].operand[0].predicate (target, mode)) 7099 case 5:
7205 target = gen_reg_rtx (mode); 7100 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
7206 7101 ops[3].value, ops[4].value);
7207 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode) 7102 case 6:
7208 val = convert_modes (mode, GET_MODE (val), val, 1); 7103 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
7209 if (!insn_data[icode].operand[2].predicate (val, mode)) 7104 ops[3].value, ops[4].value, ops[5].value);
7210 val = force_reg (mode, val); 7105 case 7:
7211 7106 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
7212 insn = GEN_FCN (icode) (target, mem, val); 7107 ops[3].value, ops[4].value, ops[5].value,
7213 if (insn) 7108 ops[6].value);
7214 { 7109 case 8:
7215 emit_insn (insn); 7110 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
7216 7111 ops[3].value, ops[4].value, ops[5].value,
7217 /* If we need to compensate for using an operation with the 7112 ops[6].value, ops[7].value);
7218 wrong return value, do so now. */ 7113 case 9:
7219 if (compensate) 7114 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
7220 { 7115 ops[3].value, ops[4].value, ops[5].value,
7221 if (!after) 7116 ops[6].value, ops[7].value, ops[8].value);
7222 { 7117 }
7223 if (code == PLUS) 7118 gcc_unreachable ();
7224 code = MINUS; 7119 }
7225 else if (code == MINUS) 7120
7226 code = PLUS; 7121 /* Try to emit instruction ICODE, using operands [OPS, OPS + NOPS)
7227 } 7122 as its operands. Return true on success and emit no code on failure. */
7228 7123
7229 if (code == NOT) 7124 bool
7230 { 7125 maybe_expand_insn (enum insn_code icode, unsigned int nops,
7231 target = expand_simple_binop (mode, AND, target, val, 7126 struct expand_operand *ops)
7232 NULL_RTX, true, 7127 {
7233 OPTAB_LIB_WIDEN); 7128 rtx_insn *pat = maybe_gen_insn (icode, nops, ops);
7234 target = expand_simple_unop (mode, code, target, 7129 if (pat)
7235 NULL_RTX, true); 7130 {
7236 } 7131 emit_insn (pat);
7237 else 7132 return true;
7238 target = expand_simple_binop (mode, code, target, val, 7133 }
7239 NULL_RTX, true, 7134 return false;
7240 OPTAB_LIB_WIDEN); 7135 }
7241 } 7136
7242 7137 /* Like maybe_expand_insn, but for jumps. */
7243 return target; 7138
7244 } 7139 bool
7245 } 7140 maybe_expand_jump_insn (enum insn_code icode, unsigned int nops,
7246 7141 struct expand_operand *ops)
7247 /* Failing that, generate a compare-and-swap loop in which we perform the 7142 {
7248 operation with normal arithmetic instructions. */ 7143 rtx_insn *pat = maybe_gen_insn (icode, nops, ops);
7249 if (direct_optab_handler (sync_compare_and_swap_optab, mode) 7144 if (pat)
7250 != CODE_FOR_nothing) 7145 {
7251 { 7146 emit_jump_insn (pat);
7252 rtx t0 = gen_reg_rtx (mode), t1; 7147 return true;
7253 7148 }
7254 if (!target || !register_operand (target, mode)) 7149 return false;
7255 target = gen_reg_rtx (mode); 7150 }
7256 7151
7257 start_sequence (); 7152 /* Emit instruction ICODE, using operands [OPS, OPS + NOPS)
7258 7153 as its operands. */
7259 if (!after) 7154
7260 emit_move_insn (target, t0); 7155 void
7261 t1 = t0; 7156 expand_insn (enum insn_code icode, unsigned int nops,
7262 if (code == NOT) 7157 struct expand_operand *ops)
7263 { 7158 {
7264 t1 = expand_simple_binop (mode, AND, t1, val, NULL_RTX, 7159 if (!maybe_expand_insn (icode, nops, ops))
7265 true, OPTAB_LIB_WIDEN); 7160 gcc_unreachable ();
7266 t1 = expand_simple_unop (mode, code, t1, NULL_RTX, true); 7161 }
7267 } 7162
7268 else 7163 /* Like expand_insn, but for jumps. */
7269 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX, 7164
7270 true, OPTAB_LIB_WIDEN); 7165 void
7271 if (after) 7166 expand_jump_insn (enum insn_code icode, unsigned int nops,
7272 emit_move_insn (target, t1); 7167 struct expand_operand *ops)
7273 7168 {
7274 insn = get_insns (); 7169 if (!maybe_expand_jump_insn (icode, nops, ops))
7275 end_sequence (); 7170 gcc_unreachable ();
7276 7171 }
7277 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
7278 return target;
7279 }
7280
7281 return NULL_RTX;
7282 }
7283
7284 /* This function expands a test-and-set operation. Ideally we atomically
7285 store VAL in MEM and return the previous value in MEM. Some targets
7286 may not support this operation and only support VAL with the constant 1;
7287 in this case while the return value will be 0/1, but the exact value
7288 stored in MEM is target defined. TARGET is an option place to stick
7289 the return value. */
7290
7291 rtx
7292 expand_sync_lock_test_and_set (rtx mem, rtx val, rtx target)
7293 {
7294 enum machine_mode mode = GET_MODE (mem);
7295 enum insn_code icode;
7296 rtx insn;
7297
7298 /* If the target supports the test-and-set directly, great. */
7299 icode = direct_optab_handler (sync_lock_test_and_set_optab, mode);
7300 if (icode != CODE_FOR_nothing)
7301 {
7302 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
7303 target = gen_reg_rtx (mode);
7304
7305 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
7306 val = convert_modes (mode, GET_MODE (val), val, 1);
7307 if (!insn_data[icode].operand[2].predicate (val, mode))
7308 val = force_reg (mode, val);
7309
7310 insn = GEN_FCN (icode) (target, mem, val);
7311 if (insn)
7312 {
7313 emit_insn (insn);
7314 return target;
7315 }
7316 }
7317
7318 /* Otherwise, use a compare-and-swap loop for the exchange. */
7319 if (direct_optab_handler (sync_compare_and_swap_optab, mode)
7320 != CODE_FOR_nothing)
7321 {
7322 if (!target || !register_operand (target, mode))
7323 target = gen_reg_rtx (mode);
7324 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
7325 val = convert_modes (mode, GET_MODE (val), val, 1);
7326 if (expand_compare_and_swap_loop (mem, target, val, NULL_RTX))
7327 return target;
7328 }
7329
7330 return NULL_RTX;
7331 }
7332
7333 #include "gt-optabs.h"