111
|
1 /* Subroutines used to remove unnecessary doubleword swaps
|
|
2 for p8 little-endian VSX code.
|
|
3 Copyright (C) 1991-2017 Free Software Foundation, Inc.
|
|
4
|
|
5 This file is part of GCC.
|
|
6
|
|
7 GCC is free software; you can redistribute it and/or modify it
|
|
8 under the terms of the GNU General Public License as published
|
|
9 by the Free Software Foundation; either version 3, or (at your
|
|
10 option) any later version.
|
|
11
|
|
12 GCC is distributed in the hope that it will be useful, but WITHOUT
|
|
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
|
|
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
|
|
15 License for more details.
|
|
16
|
|
17 You should have received a copy of the GNU General Public License
|
|
18 along with GCC; see the file COPYING3. If not see
|
|
19 <http://www.gnu.org/licenses/>. */
|
|
20
|
|
21 #include "config.h"
|
|
22 #include "system.h"
|
|
23 #include "coretypes.h"
|
|
24 #include "backend.h"
|
|
25 #include "rtl.h"
|
|
26 #include "tree.h"
|
|
27 #include "memmodel.h"
|
|
28 #include "df.h"
|
|
29 #include "tm_p.h"
|
|
30 #include "ira.h"
|
|
31 #include "print-tree.h"
|
|
32 #include "varasm.h"
|
|
33 #include "explow.h"
|
|
34 #include "expr.h"
|
|
35 #include "output.h"
|
|
36 #include "tree-pass.h"
|
|
37
|
|
38 /* Analyze vector computations and remove unnecessary doubleword
|
|
39 swaps (xxswapdi instructions). This pass is performed only
|
|
40 for little-endian VSX code generation.
|
|
41
|
|
42 For this specific case, loads and stores of 4x32 and 2x64 vectors
|
|
43 are inefficient. These are implemented using the lvx2dx and
|
|
44 stvx2dx instructions, which invert the order of doublewords in
|
|
45 a vector register. Thus the code generation inserts an xxswapdi
|
|
46 after each such load, and prior to each such store. (For spill
|
|
47 code after register assignment, an additional xxswapdi is inserted
|
|
48 following each store in order to return a hard register to its
|
|
49 unpermuted value.)
|
|
50
|
|
51 The extra xxswapdi instructions reduce performance. This can be
|
|
52 particularly bad for vectorized code. The purpose of this pass
|
|
53 is to reduce the number of xxswapdi instructions required for
|
|
54 correctness.
|
|
55
|
|
56 The primary insight is that much code that operates on vectors
|
|
57 does not care about the relative order of elements in a register,
|
|
58 so long as the correct memory order is preserved. If we have
|
|
59 a computation where all input values are provided by lvxd2x/xxswapdi
|
|
60 sequences, all outputs are stored using xxswapdi/stvxd2x sequences,
|
|
61 and all intermediate computations are pure SIMD (independent of
|
|
62 element order), then all the xxswapdi's associated with the loads
|
|
63 and stores may be removed.
|
|
64
|
|
65 This pass uses some of the infrastructure and logical ideas from
|
|
66 the "web" pass in web.c. We create maximal webs of computations
|
|
67 fitting the description above using union-find. Each such web is
|
|
68 then optimized by removing its unnecessary xxswapdi instructions.
|
|
69
|
|
70 The pass is placed prior to global optimization so that we can
|
|
71 perform the optimization in the safest and simplest way possible;
|
|
72 that is, by replacing each xxswapdi insn with a register copy insn.
|
|
73 Subsequent forward propagation will remove copies where possible.
|
|
74
|
|
75 There are some operations sensitive to element order for which we
|
|
76 can still allow the operation, provided we modify those operations.
|
|
77 These include CONST_VECTORs, for which we must swap the first and
|
|
78 second halves of the constant vector; and SUBREGs, for which we
|
|
79 must adjust the byte offset to account for the swapped doublewords.
|
|
80 A remaining opportunity would be non-immediate-form splats, for
|
|
81 which we should adjust the selected lane of the input. We should
|
|
82 also make code generation adjustments for sum-across operations,
|
|
83 since this is a common vectorizer reduction.
|
|
84
|
|
85 Because we run prior to the first split, we can see loads and stores
|
|
86 here that match *vsx_le_perm_{load,store}_<mode>. These are vanilla
|
|
87 vector loads and stores that have not yet been split into a permuting
|
|
88 load/store and a swap. (One way this can happen is with a builtin
|
|
89 call to vec_vsx_{ld,st}.) We can handle these as well, but rather
|
|
90 than deleting a swap, we convert the load/store into a permuting
|
|
91 load/store (which effectively removes the swap). */
|
|
92
|
|
93 /* Notes on Permutes
|
|
94
|
|
95 We do not currently handle computations that contain permutes. There
|
|
96 is a general transformation that can be performed correctly, but it
|
|
97 may introduce more expensive code than it replaces. To handle these
|
|
98 would require a cost model to determine when to perform the optimization.
|
|
99 This commentary records how this could be done if desired.
|
|
100
|
|
101 The most general permute is something like this (example for V16QI):
|
|
102
|
|
103 (vec_select:V16QI (vec_concat:V32QI (op1:V16QI) (op2:V16QI))
|
|
104 (parallel [(const_int a0) (const_int a1)
|
|
105 ...
|
|
106 (const_int a14) (const_int a15)]))
|
|
107
|
|
108 where a0,...,a15 are in [0,31] and select elements from op1 and op2
|
|
109 to produce in the result.
|
|
110
|
|
111 Regardless of mode, we can convert the PARALLEL to a mask of 16
|
|
112 byte-element selectors. Let's call this M, with M[i] representing
|
|
113 the ith byte-element selector value. Then if we swap doublewords
|
|
114 throughout the computation, we can get correct behavior by replacing
|
|
115 M with M' as follows:
|
|
116
|
|
117 M'[i] = { (M[i]+8)%16 : M[i] in [0,15]
|
|
118 { ((M[i]+8)%16)+16 : M[i] in [16,31]
|
|
119
|
|
120 This seems promising at first, since we are just replacing one mask
|
|
121 with another. But certain masks are preferable to others. If M
|
|
122 is a mask that matches a vmrghh pattern, for example, M' certainly
|
|
123 will not. Instead of a single vmrghh, we would generate a load of
|
|
124 M' and a vperm. So we would need to know how many xxswapd's we can
|
|
125 remove as a result of this transformation to determine if it's
|
|
126 profitable; and preferably the logic would need to be aware of all
|
|
127 the special preferable masks.
|
|
128
|
|
129 Another form of permute is an UNSPEC_VPERM, in which the mask is
|
|
130 already in a register. In some cases, this mask may be a constant
|
|
131 that we can discover with ud-chains, in which case the above
|
|
132 transformation is ok. However, the common usage here is for the
|
|
133 mask to be produced by an UNSPEC_LVSL, in which case the mask
|
|
134 cannot be known at compile time. In such a case we would have to
|
|
135 generate several instructions to compute M' as above at run time,
|
|
136 and a cost model is needed again.
|
|
137
|
|
138 However, when the mask M for an UNSPEC_VPERM is loaded from the
|
|
139 constant pool, we can replace M with M' as above at no cost
|
|
140 beyond adding a constant pool entry. */
|
|
141
|
|
142 /* This is based on the union-find logic in web.c. web_entry_base is
|
|
143 defined in df.h. */
|
|
144 class swap_web_entry : public web_entry_base
|
|
145 {
|
|
146 public:
|
|
147 /* Pointer to the insn. */
|
|
148 rtx_insn *insn;
|
|
149 /* Set if insn contains a mention of a vector register. All other
|
|
150 fields are undefined if this field is unset. */
|
|
151 unsigned int is_relevant : 1;
|
|
152 /* Set if insn is a load. */
|
|
153 unsigned int is_load : 1;
|
|
154 /* Set if insn is a store. */
|
|
155 unsigned int is_store : 1;
|
|
156 /* Set if insn is a doubleword swap. This can either be a register swap
|
|
157 or a permuting load or store (test is_load and is_store for this). */
|
|
158 unsigned int is_swap : 1;
|
|
159 /* Set if the insn has a live-in use of a parameter register. */
|
|
160 unsigned int is_live_in : 1;
|
|
161 /* Set if the insn has a live-out def of a return register. */
|
|
162 unsigned int is_live_out : 1;
|
|
163 /* Set if the insn contains a subreg reference of a vector register. */
|
|
164 unsigned int contains_subreg : 1;
|
|
165 /* Set if the insn contains a 128-bit integer operand. */
|
|
166 unsigned int is_128_int : 1;
|
|
167 /* Set if this is a call-insn. */
|
|
168 unsigned int is_call : 1;
|
|
169 /* Set if this insn does not perform a vector operation for which
|
|
170 element order matters, or if we know how to fix it up if it does.
|
|
171 Undefined if is_swap is set. */
|
|
172 unsigned int is_swappable : 1;
|
|
173 /* A nonzero value indicates what kind of special handling for this
|
|
174 insn is required if doublewords are swapped. Undefined if
|
|
175 is_swappable is not set. */
|
|
176 unsigned int special_handling : 4;
|
|
177 /* Set if the web represented by this entry cannot be optimized. */
|
|
178 unsigned int web_not_optimizable : 1;
|
|
179 /* Set if this insn should be deleted. */
|
|
180 unsigned int will_delete : 1;
|
|
181 };
|
|
182
|
|
183 enum special_handling_values {
|
|
184 SH_NONE = 0,
|
|
185 SH_CONST_VECTOR,
|
|
186 SH_SUBREG,
|
|
187 SH_NOSWAP_LD,
|
|
188 SH_NOSWAP_ST,
|
|
189 SH_EXTRACT,
|
|
190 SH_SPLAT,
|
|
191 SH_XXPERMDI,
|
|
192 SH_CONCAT,
|
|
193 SH_VPERM
|
|
194 };
|
|
195
|
|
196 /* Union INSN with all insns containing definitions that reach USE.
|
|
197 Detect whether USE is live-in to the current function. */
|
|
198 static void
|
|
199 union_defs (swap_web_entry *insn_entry, rtx insn, df_ref use)
|
|
200 {
|
|
201 struct df_link *link = DF_REF_CHAIN (use);
|
|
202
|
|
203 if (!link)
|
|
204 insn_entry[INSN_UID (insn)].is_live_in = 1;
|
|
205
|
|
206 while (link)
|
|
207 {
|
|
208 if (DF_REF_IS_ARTIFICIAL (link->ref))
|
|
209 insn_entry[INSN_UID (insn)].is_live_in = 1;
|
|
210
|
|
211 if (DF_REF_INSN_INFO (link->ref))
|
|
212 {
|
|
213 rtx def_insn = DF_REF_INSN (link->ref);
|
|
214 (void)unionfind_union (insn_entry + INSN_UID (insn),
|
|
215 insn_entry + INSN_UID (def_insn));
|
|
216 }
|
|
217
|
|
218 link = link->next;
|
|
219 }
|
|
220 }
|
|
221
|
|
222 /* Union INSN with all insns containing uses reached from DEF.
|
|
223 Detect whether DEF is live-out from the current function. */
|
|
224 static void
|
|
225 union_uses (swap_web_entry *insn_entry, rtx insn, df_ref def)
|
|
226 {
|
|
227 struct df_link *link = DF_REF_CHAIN (def);
|
|
228
|
|
229 if (!link)
|
|
230 insn_entry[INSN_UID (insn)].is_live_out = 1;
|
|
231
|
|
232 while (link)
|
|
233 {
|
|
234 /* This could be an eh use or some other artificial use;
|
|
235 we treat these all the same (killing the optimization). */
|
|
236 if (DF_REF_IS_ARTIFICIAL (link->ref))
|
|
237 insn_entry[INSN_UID (insn)].is_live_out = 1;
|
|
238
|
|
239 if (DF_REF_INSN_INFO (link->ref))
|
|
240 {
|
|
241 rtx use_insn = DF_REF_INSN (link->ref);
|
|
242 (void)unionfind_union (insn_entry + INSN_UID (insn),
|
|
243 insn_entry + INSN_UID (use_insn));
|
|
244 }
|
|
245
|
|
246 link = link->next;
|
|
247 }
|
|
248 }
|
|
249
|
|
250 /* Return 1 iff INSN is a load insn, including permuting loads that
|
|
251 represent an lvxd2x instruction; else return 0. */
|
|
252 static unsigned int
|
|
253 insn_is_load_p (rtx insn)
|
|
254 {
|
|
255 rtx body = PATTERN (insn);
|
|
256
|
|
257 if (GET_CODE (body) == SET)
|
|
258 {
|
|
259 if (GET_CODE (SET_SRC (body)) == MEM)
|
|
260 return 1;
|
|
261
|
|
262 if (GET_CODE (SET_SRC (body)) == VEC_SELECT
|
|
263 && GET_CODE (XEXP (SET_SRC (body), 0)) == MEM)
|
|
264 return 1;
|
|
265
|
|
266 return 0;
|
|
267 }
|
|
268
|
|
269 if (GET_CODE (body) != PARALLEL)
|
|
270 return 0;
|
|
271
|
|
272 rtx set = XVECEXP (body, 0, 0);
|
|
273
|
|
274 if (GET_CODE (set) == SET && GET_CODE (SET_SRC (set)) == MEM)
|
|
275 return 1;
|
|
276
|
|
277 return 0;
|
|
278 }
|
|
279
|
|
280 /* Return 1 iff INSN is a store insn, including permuting stores that
|
|
281 represent an stvxd2x instruction; else return 0. */
|
|
282 static unsigned int
|
|
283 insn_is_store_p (rtx insn)
|
|
284 {
|
|
285 rtx body = PATTERN (insn);
|
|
286 if (GET_CODE (body) == SET && GET_CODE (SET_DEST (body)) == MEM)
|
|
287 return 1;
|
|
288 if (GET_CODE (body) != PARALLEL)
|
|
289 return 0;
|
|
290 rtx set = XVECEXP (body, 0, 0);
|
|
291 if (GET_CODE (set) == SET && GET_CODE (SET_DEST (set)) == MEM)
|
|
292 return 1;
|
|
293 return 0;
|
|
294 }
|
|
295
|
|
296 /* Return 1 iff INSN swaps doublewords. This may be a reg-reg swap,
|
|
297 a permuting load, or a permuting store. */
|
|
298 static unsigned int
|
|
299 insn_is_swap_p (rtx insn)
|
|
300 {
|
|
301 rtx body = PATTERN (insn);
|
|
302 if (GET_CODE (body) != SET)
|
|
303 return 0;
|
|
304 rtx rhs = SET_SRC (body);
|
|
305 if (GET_CODE (rhs) != VEC_SELECT)
|
|
306 return 0;
|
|
307 rtx parallel = XEXP (rhs, 1);
|
|
308 if (GET_CODE (parallel) != PARALLEL)
|
|
309 return 0;
|
|
310 unsigned int len = XVECLEN (parallel, 0);
|
|
311 if (len != 2 && len != 4 && len != 8 && len != 16)
|
|
312 return 0;
|
|
313 for (unsigned int i = 0; i < len / 2; ++i)
|
|
314 {
|
|
315 rtx op = XVECEXP (parallel, 0, i);
|
|
316 if (GET_CODE (op) != CONST_INT || INTVAL (op) != len / 2 + i)
|
|
317 return 0;
|
|
318 }
|
|
319 for (unsigned int i = len / 2; i < len; ++i)
|
|
320 {
|
|
321 rtx op = XVECEXP (parallel, 0, i);
|
|
322 if (GET_CODE (op) != CONST_INT || INTVAL (op) != i - len / 2)
|
|
323 return 0;
|
|
324 }
|
|
325 return 1;
|
|
326 }
|
|
327
|
|
328 /* Return TRUE if insn is a swap fed by a load from the constant pool. */
|
|
329 static bool
|
|
330 const_load_sequence_p (swap_web_entry *insn_entry, rtx insn)
|
|
331 {
|
|
332 unsigned uid = INSN_UID (insn);
|
|
333 if (!insn_entry[uid].is_swap || insn_entry[uid].is_load)
|
|
334 return false;
|
|
335
|
|
336 const_rtx tocrel_base;
|
|
337
|
|
338 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
|
|
339 df_ref use;
|
|
340 FOR_EACH_INSN_INFO_USE (use, insn_info)
|
|
341 {
|
|
342 struct df_link *def_link = DF_REF_CHAIN (use);
|
|
343
|
|
344 /* If there is no def or the def is artificial or there are
|
|
345 multiple defs, punt. */
|
|
346 if (!def_link || !def_link->ref || DF_REF_IS_ARTIFICIAL (def_link->ref)
|
|
347 || def_link->next)
|
|
348 return false;
|
|
349
|
|
350 rtx def_insn = DF_REF_INSN (def_link->ref);
|
|
351 unsigned uid2 = INSN_UID (def_insn);
|
|
352 /* If this is not a load or is not a swap, return false. */
|
|
353 if (!insn_entry[uid2].is_load || !insn_entry[uid2].is_swap)
|
|
354 return false;
|
|
355
|
|
356 /* If the source of the rtl def is not a set from memory, return
|
|
357 false. */
|
|
358 rtx body = PATTERN (def_insn);
|
|
359 if (GET_CODE (body) != SET
|
|
360 || GET_CODE (SET_SRC (body)) != VEC_SELECT
|
|
361 || GET_CODE (XEXP (SET_SRC (body), 0)) != MEM)
|
|
362 return false;
|
|
363
|
|
364 rtx mem = XEXP (SET_SRC (body), 0);
|
|
365 rtx base_reg = XEXP (mem, 0);
|
|
366 /* If the base address for the memory expression is not
|
|
367 represented by a register, punt. */
|
|
368 if (!REG_P (base_reg))
|
|
369 return false;
|
|
370
|
|
371 df_ref base_use;
|
|
372 insn_info = DF_INSN_INFO_GET (def_insn);
|
|
373 FOR_EACH_INSN_INFO_USE (base_use, insn_info)
|
|
374 {
|
|
375 /* If base_use does not represent base_reg, look for another
|
|
376 use. */
|
|
377 if (!rtx_equal_p (DF_REF_REG (base_use), base_reg))
|
|
378 continue;
|
|
379
|
|
380 struct df_link *base_def_link = DF_REF_CHAIN (base_use);
|
|
381 if (!base_def_link || base_def_link->next)
|
|
382 return false;
|
|
383
|
|
384 /* Constants held on the stack are not "true" constants
|
|
385 because their values are not part of the static load
|
|
386 image. If this constant's base reference is a stack
|
|
387 or frame pointer, it is seen as an artificial
|
|
388 reference. */
|
|
389 if (DF_REF_IS_ARTIFICIAL (base_def_link->ref))
|
|
390 return false;
|
|
391
|
|
392 rtx tocrel_insn = DF_REF_INSN (base_def_link->ref);
|
|
393 rtx tocrel_body = PATTERN (tocrel_insn);
|
|
394 rtx base, offset;
|
|
395 if (GET_CODE (tocrel_body) != SET)
|
|
396 return false;
|
|
397 /* There is an extra level of indirection for small/large
|
|
398 code models. */
|
|
399 rtx tocrel_expr = SET_SRC (tocrel_body);
|
|
400 if (GET_CODE (tocrel_expr) == MEM)
|
|
401 tocrel_expr = XEXP (tocrel_expr, 0);
|
|
402 if (!toc_relative_expr_p (tocrel_expr, false, &tocrel_base, NULL))
|
|
403 return false;
|
|
404 split_const (XVECEXP (tocrel_base, 0, 0), &base, &offset);
|
|
405
|
|
406 if (GET_CODE (base) != SYMBOL_REF || !CONSTANT_POOL_ADDRESS_P (base))
|
|
407 return false;
|
|
408 else
|
|
409 {
|
|
410 /* FIXME: The conditions under which
|
|
411 ((GET_CODE (const_vector) == SYMBOL_REF) &&
|
|
412 !CONSTANT_POOL_ADDRESS_P (const_vector))
|
|
413 are not well understood. This code prevents
|
|
414 an internal compiler error which will occur in
|
|
415 replace_swapped_load_constant () if we were to return
|
|
416 true. Some day, we should figure out how to properly
|
|
417 handle this condition in
|
|
418 replace_swapped_load_constant () and then we can
|
|
419 remove this special test. */
|
|
420 rtx const_vector = get_pool_constant (base);
|
|
421 if (GET_CODE (const_vector) == SYMBOL_REF
|
|
422 && !CONSTANT_POOL_ADDRESS_P (const_vector))
|
|
423 return false;
|
|
424 }
|
|
425 }
|
|
426 }
|
|
427 return true;
|
|
428 }
|
|
429
|
|
430 /* Return TRUE iff OP matches a V2DF reduction pattern. See the
|
|
431 definition of vsx_reduc_<VEC_reduc_name>_v2df in vsx.md. */
|
|
432 static bool
|
|
433 v2df_reduction_p (rtx op)
|
|
434 {
|
|
435 if (GET_MODE (op) != V2DFmode)
|
|
436 return false;
|
|
437
|
|
438 enum rtx_code code = GET_CODE (op);
|
|
439 if (code != PLUS && code != SMIN && code != SMAX)
|
|
440 return false;
|
|
441
|
|
442 rtx concat = XEXP (op, 0);
|
|
443 if (GET_CODE (concat) != VEC_CONCAT)
|
|
444 return false;
|
|
445
|
|
446 rtx select0 = XEXP (concat, 0);
|
|
447 rtx select1 = XEXP (concat, 1);
|
|
448 if (GET_CODE (select0) != VEC_SELECT || GET_CODE (select1) != VEC_SELECT)
|
|
449 return false;
|
|
450
|
|
451 rtx reg0 = XEXP (select0, 0);
|
|
452 rtx reg1 = XEXP (select1, 0);
|
|
453 if (!rtx_equal_p (reg0, reg1) || !REG_P (reg0))
|
|
454 return false;
|
|
455
|
|
456 rtx parallel0 = XEXP (select0, 1);
|
|
457 rtx parallel1 = XEXP (select1, 1);
|
|
458 if (GET_CODE (parallel0) != PARALLEL || GET_CODE (parallel1) != PARALLEL)
|
|
459 return false;
|
|
460
|
|
461 if (!rtx_equal_p (XVECEXP (parallel0, 0, 0), const1_rtx)
|
|
462 || !rtx_equal_p (XVECEXP (parallel1, 0, 0), const0_rtx))
|
|
463 return false;
|
|
464
|
|
465 return true;
|
|
466 }
|
|
467
|
|
468 /* Return 1 iff OP is an operand that will not be affected by having
|
|
469 vector doublewords swapped in memory. */
|
|
470 static unsigned int
|
|
471 rtx_is_swappable_p (rtx op, unsigned int *special)
|
|
472 {
|
|
473 enum rtx_code code = GET_CODE (op);
|
|
474 int i, j;
|
|
475 rtx parallel;
|
|
476
|
|
477 switch (code)
|
|
478 {
|
|
479 case LABEL_REF:
|
|
480 case SYMBOL_REF:
|
|
481 case CLOBBER:
|
|
482 case REG:
|
|
483 return 1;
|
|
484
|
|
485 case VEC_CONCAT:
|
|
486 case ASM_INPUT:
|
|
487 case ASM_OPERANDS:
|
|
488 return 0;
|
|
489
|
|
490 case CONST_VECTOR:
|
|
491 {
|
|
492 *special = SH_CONST_VECTOR;
|
|
493 return 1;
|
|
494 }
|
|
495
|
|
496 case VEC_DUPLICATE:
|
|
497 /* Opportunity: If XEXP (op, 0) has the same mode as the result,
|
|
498 and XEXP (op, 1) is a PARALLEL with a single QImode const int,
|
|
499 it represents a vector splat for which we can do special
|
|
500 handling. */
|
|
501 if (GET_CODE (XEXP (op, 0)) == CONST_INT)
|
|
502 return 1;
|
|
503 else if (REG_P (XEXP (op, 0))
|
|
504 && GET_MODE_INNER (GET_MODE (op)) == GET_MODE (XEXP (op, 0)))
|
|
505 /* This catches V2DF and V2DI splat, at a minimum. */
|
|
506 return 1;
|
|
507 else if (GET_CODE (XEXP (op, 0)) == TRUNCATE
|
|
508 && REG_P (XEXP (XEXP (op, 0), 0))
|
|
509 && GET_MODE_INNER (GET_MODE (op)) == GET_MODE (XEXP (op, 0)))
|
|
510 /* This catches splat of a truncated value. */
|
|
511 return 1;
|
|
512 else if (GET_CODE (XEXP (op, 0)) == VEC_SELECT)
|
|
513 /* If the duplicated item is from a select, defer to the select
|
|
514 processing to see if we can change the lane for the splat. */
|
|
515 return rtx_is_swappable_p (XEXP (op, 0), special);
|
|
516 else
|
|
517 return 0;
|
|
518
|
|
519 case VEC_SELECT:
|
|
520 /* A vec_extract operation is ok if we change the lane. */
|
|
521 if (GET_CODE (XEXP (op, 0)) == REG
|
|
522 && GET_MODE_INNER (GET_MODE (XEXP (op, 0))) == GET_MODE (op)
|
|
523 && GET_CODE ((parallel = XEXP (op, 1))) == PARALLEL
|
|
524 && XVECLEN (parallel, 0) == 1
|
|
525 && GET_CODE (XVECEXP (parallel, 0, 0)) == CONST_INT)
|
|
526 {
|
|
527 *special = SH_EXTRACT;
|
|
528 return 1;
|
|
529 }
|
|
530 /* An XXPERMDI is ok if we adjust the lanes. Note that if the
|
|
531 XXPERMDI is a swap operation, it will be identified by
|
|
532 insn_is_swap_p and therefore we won't get here. */
|
|
533 else if (GET_CODE (XEXP (op, 0)) == VEC_CONCAT
|
|
534 && (GET_MODE (XEXP (op, 0)) == V4DFmode
|
|
535 || GET_MODE (XEXP (op, 0)) == V4DImode)
|
|
536 && GET_CODE ((parallel = XEXP (op, 1))) == PARALLEL
|
|
537 && XVECLEN (parallel, 0) == 2
|
|
538 && GET_CODE (XVECEXP (parallel, 0, 0)) == CONST_INT
|
|
539 && GET_CODE (XVECEXP (parallel, 0, 1)) == CONST_INT)
|
|
540 {
|
|
541 *special = SH_XXPERMDI;
|
|
542 return 1;
|
|
543 }
|
|
544 else if (v2df_reduction_p (op))
|
|
545 return 1;
|
|
546 else
|
|
547 return 0;
|
|
548
|
|
549 case UNSPEC:
|
|
550 {
|
|
551 /* Various operations are unsafe for this optimization, at least
|
|
552 without significant additional work. Permutes are obviously
|
|
553 problematic, as both the permute control vector and the ordering
|
|
554 of the target values are invalidated by doubleword swapping.
|
|
555 Vector pack and unpack modify the number of vector lanes.
|
|
556 Merge-high/low will not operate correctly on swapped operands.
|
|
557 Vector shifts across element boundaries are clearly uncool,
|
|
558 as are vector select and concatenate operations. Vector
|
|
559 sum-across instructions define one operand with a specific
|
|
560 order-dependent element, so additional fixup code would be
|
|
561 needed to make those work. Vector set and non-immediate-form
|
|
562 vector splat are element-order sensitive. A few of these
|
|
563 cases might be workable with special handling if required.
|
|
564 Adding cost modeling would be appropriate in some cases. */
|
|
565 int val = XINT (op, 1);
|
|
566 switch (val)
|
|
567 {
|
|
568 default:
|
|
569 break;
|
|
570 case UNSPEC_VMRGH_DIRECT:
|
|
571 case UNSPEC_VMRGL_DIRECT:
|
|
572 case UNSPEC_VPACK_SIGN_SIGN_SAT:
|
|
573 case UNSPEC_VPACK_SIGN_UNS_SAT:
|
|
574 case UNSPEC_VPACK_UNS_UNS_MOD:
|
|
575 case UNSPEC_VPACK_UNS_UNS_MOD_DIRECT:
|
|
576 case UNSPEC_VPACK_UNS_UNS_SAT:
|
|
577 case UNSPEC_VPERM:
|
|
578 case UNSPEC_VPERM_UNS:
|
|
579 case UNSPEC_VPERMHI:
|
|
580 case UNSPEC_VPERMSI:
|
|
581 case UNSPEC_VPKPX:
|
|
582 case UNSPEC_VSLDOI:
|
|
583 case UNSPEC_VSLO:
|
|
584 case UNSPEC_VSRO:
|
|
585 case UNSPEC_VSUM2SWS:
|
|
586 case UNSPEC_VSUM4S:
|
|
587 case UNSPEC_VSUM4UBS:
|
|
588 case UNSPEC_VSUMSWS:
|
|
589 case UNSPEC_VSUMSWS_DIRECT:
|
|
590 case UNSPEC_VSX_CONCAT:
|
|
591 case UNSPEC_VSX_SET:
|
|
592 case UNSPEC_VSX_SLDWI:
|
|
593 case UNSPEC_VUNPACK_HI_SIGN:
|
|
594 case UNSPEC_VUNPACK_HI_SIGN_DIRECT:
|
|
595 case UNSPEC_VUNPACK_LO_SIGN:
|
|
596 case UNSPEC_VUNPACK_LO_SIGN_DIRECT:
|
|
597 case UNSPEC_VUPKHPX:
|
|
598 case UNSPEC_VUPKHS_V4SF:
|
|
599 case UNSPEC_VUPKHU_V4SF:
|
|
600 case UNSPEC_VUPKLPX:
|
|
601 case UNSPEC_VUPKLS_V4SF:
|
|
602 case UNSPEC_VUPKLU_V4SF:
|
|
603 case UNSPEC_VSX_CVDPSPN:
|
|
604 case UNSPEC_VSX_CVSPDP:
|
|
605 case UNSPEC_VSX_CVSPDPN:
|
|
606 case UNSPEC_VSX_EXTRACT:
|
|
607 case UNSPEC_VSX_VSLO:
|
|
608 case UNSPEC_VSX_VEC_INIT:
|
|
609 return 0;
|
|
610 case UNSPEC_VSPLT_DIRECT:
|
|
611 case UNSPEC_VSX_XXSPLTD:
|
|
612 *special = SH_SPLAT;
|
|
613 return 1;
|
|
614 case UNSPEC_REDUC_PLUS:
|
|
615 case UNSPEC_REDUC:
|
|
616 return 1;
|
|
617 }
|
|
618 }
|
|
619
|
|
620 default:
|
|
621 break;
|
|
622 }
|
|
623
|
|
624 const char *fmt = GET_RTX_FORMAT (code);
|
|
625 int ok = 1;
|
|
626
|
|
627 for (i = 0; i < GET_RTX_LENGTH (code); ++i)
|
|
628 if (fmt[i] == 'e' || fmt[i] == 'u')
|
|
629 {
|
|
630 unsigned int special_op = SH_NONE;
|
|
631 ok &= rtx_is_swappable_p (XEXP (op, i), &special_op);
|
|
632 if (special_op == SH_NONE)
|
|
633 continue;
|
|
634 /* Ensure we never have two kinds of special handling
|
|
635 for the same insn. */
|
|
636 if (*special != SH_NONE && *special != special_op)
|
|
637 return 0;
|
|
638 *special = special_op;
|
|
639 }
|
|
640 else if (fmt[i] == 'E')
|
|
641 for (j = 0; j < XVECLEN (op, i); ++j)
|
|
642 {
|
|
643 unsigned int special_op = SH_NONE;
|
|
644 ok &= rtx_is_swappable_p (XVECEXP (op, i, j), &special_op);
|
|
645 if (special_op == SH_NONE)
|
|
646 continue;
|
|
647 /* Ensure we never have two kinds of special handling
|
|
648 for the same insn. */
|
|
649 if (*special != SH_NONE && *special != special_op)
|
|
650 return 0;
|
|
651 *special = special_op;
|
|
652 }
|
|
653
|
|
654 return ok;
|
|
655 }
|
|
656
|
|
657 /* Return 1 iff INSN is an operand that will not be affected by
|
|
658 having vector doublewords swapped in memory (in which case
|
|
659 *SPECIAL is unchanged), or that can be modified to be correct
|
|
660 if vector doublewords are swapped in memory (in which case
|
|
661 *SPECIAL is changed to a value indicating how). */
|
|
662 static unsigned int
|
|
663 insn_is_swappable_p (swap_web_entry *insn_entry, rtx insn,
|
|
664 unsigned int *special)
|
|
665 {
|
|
666 /* Calls are always bad. */
|
|
667 if (GET_CODE (insn) == CALL_INSN)
|
|
668 return 0;
|
|
669
|
|
670 /* Loads and stores seen here are not permuting, but we can still
|
|
671 fix them up by converting them to permuting ones. Exceptions:
|
|
672 UNSPEC_LVE, UNSPEC_LVX, and UNSPEC_STVX, which have a PARALLEL
|
|
673 body instead of a SET; and UNSPEC_STVE, which has an UNSPEC
|
|
674 for the SET source. Also we must now make an exception for lvx
|
|
675 and stvx when they are not in the UNSPEC_LVX/STVX form (with the
|
|
676 explicit "& -16") since this leads to unrecognizable insns. */
|
|
677 rtx body = PATTERN (insn);
|
|
678 int i = INSN_UID (insn);
|
|
679
|
|
680 if (insn_entry[i].is_load)
|
|
681 {
|
|
682 if (GET_CODE (body) == SET)
|
|
683 {
|
|
684 rtx rhs = SET_SRC (body);
|
|
685 /* Even without a swap, the RHS might be a vec_select for, say,
|
|
686 a byte-reversing load. */
|
|
687 if (GET_CODE (rhs) != MEM)
|
|
688 return 0;
|
|
689 if (GET_CODE (XEXP (rhs, 0)) == AND)
|
|
690 return 0;
|
|
691
|
|
692 *special = SH_NOSWAP_LD;
|
|
693 return 1;
|
|
694 }
|
|
695 else
|
|
696 return 0;
|
|
697 }
|
|
698
|
|
699 if (insn_entry[i].is_store)
|
|
700 {
|
|
701 if (GET_CODE (body) == SET
|
|
702 && GET_CODE (SET_SRC (body)) != UNSPEC)
|
|
703 {
|
|
704 rtx lhs = SET_DEST (body);
|
|
705 /* Even without a swap, the LHS might be a vec_select for, say,
|
|
706 a byte-reversing store. */
|
|
707 if (GET_CODE (lhs) != MEM)
|
|
708 return 0;
|
|
709 if (GET_CODE (XEXP (lhs, 0)) == AND)
|
|
710 return 0;
|
|
711
|
|
712 *special = SH_NOSWAP_ST;
|
|
713 return 1;
|
|
714 }
|
|
715 else
|
|
716 return 0;
|
|
717 }
|
|
718
|
|
719 /* A convert to single precision can be left as is provided that
|
|
720 all of its uses are in xxspltw instructions that splat BE element
|
|
721 zero. */
|
|
722 if (GET_CODE (body) == SET
|
|
723 && GET_CODE (SET_SRC (body)) == UNSPEC
|
|
724 && XINT (SET_SRC (body), 1) == UNSPEC_VSX_CVDPSPN)
|
|
725 {
|
|
726 df_ref def;
|
|
727 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
|
|
728
|
|
729 FOR_EACH_INSN_INFO_DEF (def, insn_info)
|
|
730 {
|
|
731 struct df_link *link = DF_REF_CHAIN (def);
|
|
732 if (!link)
|
|
733 return 0;
|
|
734
|
|
735 for (; link; link = link->next) {
|
|
736 rtx use_insn = DF_REF_INSN (link->ref);
|
|
737 rtx use_body = PATTERN (use_insn);
|
|
738 if (GET_CODE (use_body) != SET
|
|
739 || GET_CODE (SET_SRC (use_body)) != UNSPEC
|
|
740 || XINT (SET_SRC (use_body), 1) != UNSPEC_VSX_XXSPLTW
|
|
741 || XVECEXP (SET_SRC (use_body), 0, 1) != const0_rtx)
|
|
742 return 0;
|
|
743 }
|
|
744 }
|
|
745
|
|
746 return 1;
|
|
747 }
|
|
748
|
|
749 /* A concatenation of two doublewords is ok if we reverse the
|
|
750 order of the inputs. */
|
|
751 if (GET_CODE (body) == SET
|
|
752 && GET_CODE (SET_SRC (body)) == VEC_CONCAT
|
|
753 && (GET_MODE (SET_SRC (body)) == V2DFmode
|
|
754 || GET_MODE (SET_SRC (body)) == V2DImode))
|
|
755 {
|
|
756 *special = SH_CONCAT;
|
|
757 return 1;
|
|
758 }
|
|
759
|
|
760 /* V2DF reductions are always swappable. */
|
|
761 if (GET_CODE (body) == PARALLEL)
|
|
762 {
|
|
763 rtx expr = XVECEXP (body, 0, 0);
|
|
764 if (GET_CODE (expr) == SET
|
|
765 && v2df_reduction_p (SET_SRC (expr)))
|
|
766 return 1;
|
|
767 }
|
|
768
|
|
769 /* An UNSPEC_VPERM is ok if the mask operand is loaded from the
|
|
770 constant pool. */
|
|
771 if (GET_CODE (body) == SET
|
|
772 && GET_CODE (SET_SRC (body)) == UNSPEC
|
|
773 && XINT (SET_SRC (body), 1) == UNSPEC_VPERM
|
|
774 && XVECLEN (SET_SRC (body), 0) == 3
|
|
775 && GET_CODE (XVECEXP (SET_SRC (body), 0, 2)) == REG)
|
|
776 {
|
|
777 rtx mask_reg = XVECEXP (SET_SRC (body), 0, 2);
|
|
778 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
|
|
779 df_ref use;
|
|
780 FOR_EACH_INSN_INFO_USE (use, insn_info)
|
|
781 if (rtx_equal_p (DF_REF_REG (use), mask_reg))
|
|
782 {
|
|
783 struct df_link *def_link = DF_REF_CHAIN (use);
|
|
784 /* Punt if multiple definitions for this reg. */
|
|
785 if (def_link && !def_link->next &&
|
|
786 const_load_sequence_p (insn_entry,
|
|
787 DF_REF_INSN (def_link->ref)))
|
|
788 {
|
|
789 *special = SH_VPERM;
|
|
790 return 1;
|
|
791 }
|
|
792 }
|
|
793 }
|
|
794
|
|
795 /* Otherwise check the operands for vector lane violations. */
|
|
796 return rtx_is_swappable_p (body, special);
|
|
797 }
|
|
798
|
|
799 enum chain_purpose { FOR_LOADS, FOR_STORES };
|
|
800
|
|
801 /* Return true if the UD or DU chain headed by LINK is non-empty,
|
|
802 and every entry on the chain references an insn that is a
|
|
803 register swap. Furthermore, if PURPOSE is FOR_LOADS, each such
|
|
804 register swap must have only permuting loads as reaching defs.
|
|
805 If PURPOSE is FOR_STORES, each such register swap must have only
|
|
806 register swaps or permuting stores as reached uses. */
|
|
807 static bool
|
|
808 chain_contains_only_swaps (swap_web_entry *insn_entry, struct df_link *link,
|
|
809 enum chain_purpose purpose)
|
|
810 {
|
|
811 if (!link)
|
|
812 return false;
|
|
813
|
|
814 for (; link; link = link->next)
|
|
815 {
|
|
816 if (!ALTIVEC_OR_VSX_VECTOR_MODE (GET_MODE (DF_REF_REG (link->ref))))
|
|
817 continue;
|
|
818
|
|
819 if (DF_REF_IS_ARTIFICIAL (link->ref))
|
|
820 return false;
|
|
821
|
|
822 rtx reached_insn = DF_REF_INSN (link->ref);
|
|
823 unsigned uid = INSN_UID (reached_insn);
|
|
824 struct df_insn_info *insn_info = DF_INSN_INFO_GET (reached_insn);
|
|
825
|
|
826 if (!insn_entry[uid].is_swap || insn_entry[uid].is_load
|
|
827 || insn_entry[uid].is_store)
|
|
828 return false;
|
|
829
|
|
830 if (purpose == FOR_LOADS)
|
|
831 {
|
|
832 df_ref use;
|
|
833 FOR_EACH_INSN_INFO_USE (use, insn_info)
|
|
834 {
|
|
835 struct df_link *swap_link = DF_REF_CHAIN (use);
|
|
836
|
|
837 while (swap_link)
|
|
838 {
|
|
839 if (DF_REF_IS_ARTIFICIAL (link->ref))
|
|
840 return false;
|
|
841
|
|
842 rtx swap_def_insn = DF_REF_INSN (swap_link->ref);
|
|
843 unsigned uid2 = INSN_UID (swap_def_insn);
|
|
844
|
|
845 /* Only permuting loads are allowed. */
|
|
846 if (!insn_entry[uid2].is_swap || !insn_entry[uid2].is_load)
|
|
847 return false;
|
|
848
|
|
849 swap_link = swap_link->next;
|
|
850 }
|
|
851 }
|
|
852 }
|
|
853 else if (purpose == FOR_STORES)
|
|
854 {
|
|
855 df_ref def;
|
|
856 FOR_EACH_INSN_INFO_DEF (def, insn_info)
|
|
857 {
|
|
858 struct df_link *swap_link = DF_REF_CHAIN (def);
|
|
859
|
|
860 while (swap_link)
|
|
861 {
|
|
862 if (DF_REF_IS_ARTIFICIAL (link->ref))
|
|
863 return false;
|
|
864
|
|
865 rtx swap_use_insn = DF_REF_INSN (swap_link->ref);
|
|
866 unsigned uid2 = INSN_UID (swap_use_insn);
|
|
867
|
|
868 /* Permuting stores or register swaps are allowed. */
|
|
869 if (!insn_entry[uid2].is_swap || insn_entry[uid2].is_load)
|
|
870 return false;
|
|
871
|
|
872 swap_link = swap_link->next;
|
|
873 }
|
|
874 }
|
|
875 }
|
|
876 }
|
|
877
|
|
878 return true;
|
|
879 }
|
|
880
|
|
881 /* Mark the xxswapdi instructions associated with permuting loads and
|
|
882 stores for removal. Note that we only flag them for deletion here,
|
|
883 as there is a possibility of a swap being reached from multiple
|
|
884 loads, etc. */
|
|
885 static void
|
|
886 mark_swaps_for_removal (swap_web_entry *insn_entry, unsigned int i)
|
|
887 {
|
|
888 rtx insn = insn_entry[i].insn;
|
|
889 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
|
|
890
|
|
891 if (insn_entry[i].is_load)
|
|
892 {
|
|
893 df_ref def;
|
|
894 FOR_EACH_INSN_INFO_DEF (def, insn_info)
|
|
895 {
|
|
896 struct df_link *link = DF_REF_CHAIN (def);
|
|
897
|
|
898 /* We know by now that these are swaps, so we can delete
|
|
899 them confidently. */
|
|
900 while (link)
|
|
901 {
|
|
902 rtx use_insn = DF_REF_INSN (link->ref);
|
|
903 insn_entry[INSN_UID (use_insn)].will_delete = 1;
|
|
904 link = link->next;
|
|
905 }
|
|
906 }
|
|
907 }
|
|
908 else if (insn_entry[i].is_store)
|
|
909 {
|
|
910 df_ref use;
|
|
911 FOR_EACH_INSN_INFO_USE (use, insn_info)
|
|
912 {
|
|
913 /* Ignore uses for addressability. */
|
|
914 machine_mode mode = GET_MODE (DF_REF_REG (use));
|
|
915 if (!ALTIVEC_OR_VSX_VECTOR_MODE (mode))
|
|
916 continue;
|
|
917
|
|
918 struct df_link *link = DF_REF_CHAIN (use);
|
|
919
|
|
920 /* We know by now that these are swaps, so we can delete
|
|
921 them confidently. */
|
|
922 while (link)
|
|
923 {
|
|
924 rtx def_insn = DF_REF_INSN (link->ref);
|
|
925 insn_entry[INSN_UID (def_insn)].will_delete = 1;
|
|
926 link = link->next;
|
|
927 }
|
|
928 }
|
|
929 }
|
|
930 }
|
|
931
|
|
932 /* OP is either a CONST_VECTOR or an expression containing one.
|
|
933 Swap the first half of the vector with the second in the first
|
|
934 case. Recurse to find it in the second. */
|
|
935 static void
|
|
936 swap_const_vector_halves (rtx op)
|
|
937 {
|
|
938 int i;
|
|
939 enum rtx_code code = GET_CODE (op);
|
|
940 if (GET_CODE (op) == CONST_VECTOR)
|
|
941 {
|
|
942 int half_units = GET_MODE_NUNITS (GET_MODE (op)) / 2;
|
|
943 for (i = 0; i < half_units; ++i)
|
|
944 {
|
|
945 rtx temp = CONST_VECTOR_ELT (op, i);
|
|
946 CONST_VECTOR_ELT (op, i) = CONST_VECTOR_ELT (op, i + half_units);
|
|
947 CONST_VECTOR_ELT (op, i + half_units) = temp;
|
|
948 }
|
|
949 }
|
|
950 else
|
|
951 {
|
|
952 int j;
|
|
953 const char *fmt = GET_RTX_FORMAT (code);
|
|
954 for (i = 0; i < GET_RTX_LENGTH (code); ++i)
|
|
955 if (fmt[i] == 'e' || fmt[i] == 'u')
|
|
956 swap_const_vector_halves (XEXP (op, i));
|
|
957 else if (fmt[i] == 'E')
|
|
958 for (j = 0; j < XVECLEN (op, i); ++j)
|
|
959 swap_const_vector_halves (XVECEXP (op, i, j));
|
|
960 }
|
|
961 }
|
|
962
|
|
963 /* Find all subregs of a vector expression that perform a narrowing,
|
|
964 and adjust the subreg index to account for doubleword swapping. */
|
|
965 static void
|
|
966 adjust_subreg_index (rtx op)
|
|
967 {
|
|
968 enum rtx_code code = GET_CODE (op);
|
|
969 if (code == SUBREG
|
|
970 && (GET_MODE_SIZE (GET_MODE (op))
|
|
971 < GET_MODE_SIZE (GET_MODE (XEXP (op, 0)))))
|
|
972 {
|
|
973 unsigned int index = SUBREG_BYTE (op);
|
|
974 if (index < 8)
|
|
975 index += 8;
|
|
976 else
|
|
977 index -= 8;
|
|
978 SUBREG_BYTE (op) = index;
|
|
979 }
|
|
980
|
|
981 const char *fmt = GET_RTX_FORMAT (code);
|
|
982 int i,j;
|
|
983 for (i = 0; i < GET_RTX_LENGTH (code); ++i)
|
|
984 if (fmt[i] == 'e' || fmt[i] == 'u')
|
|
985 adjust_subreg_index (XEXP (op, i));
|
|
986 else if (fmt[i] == 'E')
|
|
987 for (j = 0; j < XVECLEN (op, i); ++j)
|
|
988 adjust_subreg_index (XVECEXP (op, i, j));
|
|
989 }
|
|
990
|
|
991 /* Convert the non-permuting load INSN to a permuting one. */
|
|
992 static void
|
|
993 permute_load (rtx_insn *insn)
|
|
994 {
|
|
995 rtx body = PATTERN (insn);
|
|
996 rtx mem_op = SET_SRC (body);
|
|
997 rtx tgt_reg = SET_DEST (body);
|
|
998 machine_mode mode = GET_MODE (tgt_reg);
|
|
999 int n_elts = GET_MODE_NUNITS (mode);
|
|
1000 int half_elts = n_elts / 2;
|
|
1001 rtx par = gen_rtx_PARALLEL (mode, rtvec_alloc (n_elts));
|
|
1002 int i, j;
|
|
1003 for (i = 0, j = half_elts; i < half_elts; ++i, ++j)
|
|
1004 XVECEXP (par, 0, i) = GEN_INT (j);
|
|
1005 for (i = half_elts, j = 0; j < half_elts; ++i, ++j)
|
|
1006 XVECEXP (par, 0, i) = GEN_INT (j);
|
|
1007 rtx sel = gen_rtx_VEC_SELECT (mode, mem_op, par);
|
|
1008 SET_SRC (body) = sel;
|
|
1009 INSN_CODE (insn) = -1; /* Force re-recognition. */
|
|
1010 df_insn_rescan (insn);
|
|
1011
|
|
1012 if (dump_file)
|
|
1013 fprintf (dump_file, "Replacing load %d with permuted load\n",
|
|
1014 INSN_UID (insn));
|
|
1015 }
|
|
1016
|
|
1017 /* Convert the non-permuting store INSN to a permuting one. */
|
|
1018 static void
|
|
1019 permute_store (rtx_insn *insn)
|
|
1020 {
|
|
1021 rtx body = PATTERN (insn);
|
|
1022 rtx src_reg = SET_SRC (body);
|
|
1023 machine_mode mode = GET_MODE (src_reg);
|
|
1024 int n_elts = GET_MODE_NUNITS (mode);
|
|
1025 int half_elts = n_elts / 2;
|
|
1026 rtx par = gen_rtx_PARALLEL (mode, rtvec_alloc (n_elts));
|
|
1027 int i, j;
|
|
1028 for (i = 0, j = half_elts; i < half_elts; ++i, ++j)
|
|
1029 XVECEXP (par, 0, i) = GEN_INT (j);
|
|
1030 for (i = half_elts, j = 0; j < half_elts; ++i, ++j)
|
|
1031 XVECEXP (par, 0, i) = GEN_INT (j);
|
|
1032 rtx sel = gen_rtx_VEC_SELECT (mode, src_reg, par);
|
|
1033 SET_SRC (body) = sel;
|
|
1034 INSN_CODE (insn) = -1; /* Force re-recognition. */
|
|
1035 df_insn_rescan (insn);
|
|
1036
|
|
1037 if (dump_file)
|
|
1038 fprintf (dump_file, "Replacing store %d with permuted store\n",
|
|
1039 INSN_UID (insn));
|
|
1040 }
|
|
1041
|
|
1042 /* Given OP that contains a vector extract operation, adjust the index
|
|
1043 of the extracted lane to account for the doubleword swap. */
|
|
1044 static void
|
|
1045 adjust_extract (rtx_insn *insn)
|
|
1046 {
|
|
1047 rtx pattern = PATTERN (insn);
|
|
1048 if (GET_CODE (pattern) == PARALLEL)
|
|
1049 pattern = XVECEXP (pattern, 0, 0);
|
|
1050 rtx src = SET_SRC (pattern);
|
|
1051 /* The vec_select may be wrapped in a vec_duplicate for a splat, so
|
|
1052 account for that. */
|
|
1053 rtx sel = GET_CODE (src) == VEC_DUPLICATE ? XEXP (src, 0) : src;
|
|
1054 rtx par = XEXP (sel, 1);
|
|
1055 int half_elts = GET_MODE_NUNITS (GET_MODE (XEXP (sel, 0))) >> 1;
|
|
1056 int lane = INTVAL (XVECEXP (par, 0, 0));
|
|
1057 lane = lane >= half_elts ? lane - half_elts : lane + half_elts;
|
|
1058 XVECEXP (par, 0, 0) = GEN_INT (lane);
|
|
1059 INSN_CODE (insn) = -1; /* Force re-recognition. */
|
|
1060 df_insn_rescan (insn);
|
|
1061
|
|
1062 if (dump_file)
|
|
1063 fprintf (dump_file, "Changing lane for extract %d\n", INSN_UID (insn));
|
|
1064 }
|
|
1065
|
|
1066 /* Given OP that contains a vector direct-splat operation, adjust the index
|
|
1067 of the source lane to account for the doubleword swap. */
|
|
1068 static void
|
|
1069 adjust_splat (rtx_insn *insn)
|
|
1070 {
|
|
1071 rtx body = PATTERN (insn);
|
|
1072 rtx unspec = XEXP (body, 1);
|
|
1073 int half_elts = GET_MODE_NUNITS (GET_MODE (unspec)) >> 1;
|
|
1074 int lane = INTVAL (XVECEXP (unspec, 0, 1));
|
|
1075 lane = lane >= half_elts ? lane - half_elts : lane + half_elts;
|
|
1076 XVECEXP (unspec, 0, 1) = GEN_INT (lane);
|
|
1077 INSN_CODE (insn) = -1; /* Force re-recognition. */
|
|
1078 df_insn_rescan (insn);
|
|
1079
|
|
1080 if (dump_file)
|
|
1081 fprintf (dump_file, "Changing lane for splat %d\n", INSN_UID (insn));
|
|
1082 }
|
|
1083
|
|
1084 /* Given OP that contains an XXPERMDI operation (that is not a doubleword
|
|
1085 swap), reverse the order of the source operands and adjust the indices
|
|
1086 of the source lanes to account for doubleword reversal. */
|
|
1087 static void
|
|
1088 adjust_xxpermdi (rtx_insn *insn)
|
|
1089 {
|
|
1090 rtx set = PATTERN (insn);
|
|
1091 rtx select = XEXP (set, 1);
|
|
1092 rtx concat = XEXP (select, 0);
|
|
1093 rtx src0 = XEXP (concat, 0);
|
|
1094 XEXP (concat, 0) = XEXP (concat, 1);
|
|
1095 XEXP (concat, 1) = src0;
|
|
1096 rtx parallel = XEXP (select, 1);
|
|
1097 int lane0 = INTVAL (XVECEXP (parallel, 0, 0));
|
|
1098 int lane1 = INTVAL (XVECEXP (parallel, 0, 1));
|
|
1099 int new_lane0 = 3 - lane1;
|
|
1100 int new_lane1 = 3 - lane0;
|
|
1101 XVECEXP (parallel, 0, 0) = GEN_INT (new_lane0);
|
|
1102 XVECEXP (parallel, 0, 1) = GEN_INT (new_lane1);
|
|
1103 INSN_CODE (insn) = -1; /* Force re-recognition. */
|
|
1104 df_insn_rescan (insn);
|
|
1105
|
|
1106 if (dump_file)
|
|
1107 fprintf (dump_file, "Changing lanes for xxpermdi %d\n", INSN_UID (insn));
|
|
1108 }
|
|
1109
|
|
1110 /* Given OP that contains a VEC_CONCAT operation of two doublewords,
|
|
1111 reverse the order of those inputs. */
|
|
1112 static void
|
|
1113 adjust_concat (rtx_insn *insn)
|
|
1114 {
|
|
1115 rtx set = PATTERN (insn);
|
|
1116 rtx concat = XEXP (set, 1);
|
|
1117 rtx src0 = XEXP (concat, 0);
|
|
1118 XEXP (concat, 0) = XEXP (concat, 1);
|
|
1119 XEXP (concat, 1) = src0;
|
|
1120 INSN_CODE (insn) = -1; /* Force re-recognition. */
|
|
1121 df_insn_rescan (insn);
|
|
1122
|
|
1123 if (dump_file)
|
|
1124 fprintf (dump_file, "Reversing inputs for concat %d\n", INSN_UID (insn));
|
|
1125 }
|
|
1126
|
|
1127 /* Given an UNSPEC_VPERM insn, modify the mask loaded from the
|
|
1128 constant pool to reflect swapped doublewords. */
|
|
1129 static void
|
|
1130 adjust_vperm (rtx_insn *insn)
|
|
1131 {
|
|
1132 /* We previously determined that the UNSPEC_VPERM was fed by a
|
|
1133 swap of a swapping load of a TOC-relative constant pool symbol.
|
|
1134 Find the MEM in the swapping load and replace it with a MEM for
|
|
1135 the adjusted mask constant. */
|
|
1136 rtx set = PATTERN (insn);
|
|
1137 rtx mask_reg = XVECEXP (SET_SRC (set), 0, 2);
|
|
1138
|
|
1139 /* Find the swap. */
|
|
1140 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
|
|
1141 df_ref use;
|
|
1142 rtx_insn *swap_insn = 0;
|
|
1143 FOR_EACH_INSN_INFO_USE (use, insn_info)
|
|
1144 if (rtx_equal_p (DF_REF_REG (use), mask_reg))
|
|
1145 {
|
|
1146 struct df_link *def_link = DF_REF_CHAIN (use);
|
|
1147 gcc_assert (def_link && !def_link->next);
|
|
1148 swap_insn = DF_REF_INSN (def_link->ref);
|
|
1149 break;
|
|
1150 }
|
|
1151 gcc_assert (swap_insn);
|
|
1152
|
|
1153 /* Find the load. */
|
|
1154 insn_info = DF_INSN_INFO_GET (swap_insn);
|
|
1155 rtx_insn *load_insn = 0;
|
|
1156 FOR_EACH_INSN_INFO_USE (use, insn_info)
|
|
1157 {
|
|
1158 struct df_link *def_link = DF_REF_CHAIN (use);
|
|
1159 gcc_assert (def_link && !def_link->next);
|
|
1160 load_insn = DF_REF_INSN (def_link->ref);
|
|
1161 break;
|
|
1162 }
|
|
1163 gcc_assert (load_insn);
|
|
1164
|
|
1165 /* Find the TOC-relative symbol access. */
|
|
1166 insn_info = DF_INSN_INFO_GET (load_insn);
|
|
1167 rtx_insn *tocrel_insn = 0;
|
|
1168 FOR_EACH_INSN_INFO_USE (use, insn_info)
|
|
1169 {
|
|
1170 struct df_link *def_link = DF_REF_CHAIN (use);
|
|
1171 gcc_assert (def_link && !def_link->next);
|
|
1172 tocrel_insn = DF_REF_INSN (def_link->ref);
|
|
1173 break;
|
|
1174 }
|
|
1175 gcc_assert (tocrel_insn);
|
|
1176
|
|
1177 /* Find the embedded CONST_VECTOR. We have to call toc_relative_expr_p
|
|
1178 to set tocrel_base; otherwise it would be unnecessary as we've
|
|
1179 already established it will return true. */
|
|
1180 rtx base, offset;
|
|
1181 const_rtx tocrel_base;
|
|
1182 rtx tocrel_expr = SET_SRC (PATTERN (tocrel_insn));
|
|
1183 /* There is an extra level of indirection for small/large code models. */
|
|
1184 if (GET_CODE (tocrel_expr) == MEM)
|
|
1185 tocrel_expr = XEXP (tocrel_expr, 0);
|
|
1186 if (!toc_relative_expr_p (tocrel_expr, false, &tocrel_base, NULL))
|
|
1187 gcc_unreachable ();
|
|
1188 split_const (XVECEXP (tocrel_base, 0, 0), &base, &offset);
|
|
1189 rtx const_vector = get_pool_constant (base);
|
|
1190 /* With the extra indirection, get_pool_constant will produce the
|
|
1191 real constant from the reg_equal expression, so get the real
|
|
1192 constant. */
|
|
1193 if (GET_CODE (const_vector) == SYMBOL_REF)
|
|
1194 const_vector = get_pool_constant (const_vector);
|
|
1195 gcc_assert (GET_CODE (const_vector) == CONST_VECTOR);
|
|
1196
|
|
1197 /* Create an adjusted mask from the initial mask. */
|
|
1198 unsigned int new_mask[16], i, val;
|
|
1199 for (i = 0; i < 16; ++i) {
|
|
1200 val = INTVAL (XVECEXP (const_vector, 0, i));
|
|
1201 if (val < 16)
|
|
1202 new_mask[i] = (val + 8) % 16;
|
|
1203 else
|
|
1204 new_mask[i] = ((val + 8) % 16) + 16;
|
|
1205 }
|
|
1206
|
|
1207 /* Create a new CONST_VECTOR and a MEM that references it. */
|
|
1208 rtx vals = gen_rtx_PARALLEL (V16QImode, rtvec_alloc (16));
|
|
1209 for (i = 0; i < 16; ++i)
|
|
1210 XVECEXP (vals, 0, i) = GEN_INT (new_mask[i]);
|
|
1211 rtx new_const_vector = gen_rtx_CONST_VECTOR (V16QImode, XVEC (vals, 0));
|
|
1212 rtx new_mem = force_const_mem (V16QImode, new_const_vector);
|
|
1213 /* This gives us a MEM whose base operand is a SYMBOL_REF, which we
|
|
1214 can't recognize. Force the SYMBOL_REF into a register. */
|
|
1215 if (!REG_P (XEXP (new_mem, 0))) {
|
|
1216 rtx base_reg = force_reg (Pmode, XEXP (new_mem, 0));
|
|
1217 XEXP (new_mem, 0) = base_reg;
|
|
1218 /* Move the newly created insn ahead of the load insn. */
|
|
1219 rtx_insn *force_insn = get_last_insn ();
|
|
1220 remove_insn (force_insn);
|
|
1221 rtx_insn *before_load_insn = PREV_INSN (load_insn);
|
|
1222 add_insn_after (force_insn, before_load_insn, BLOCK_FOR_INSN (load_insn));
|
|
1223 df_insn_rescan (before_load_insn);
|
|
1224 df_insn_rescan (force_insn);
|
|
1225 }
|
|
1226
|
|
1227 /* Replace the MEM in the load instruction and rescan it. */
|
|
1228 XEXP (SET_SRC (PATTERN (load_insn)), 0) = new_mem;
|
|
1229 INSN_CODE (load_insn) = -1; /* Force re-recognition. */
|
|
1230 df_insn_rescan (load_insn);
|
|
1231
|
|
1232 if (dump_file)
|
|
1233 fprintf (dump_file, "Adjusting mask for vperm %d\n", INSN_UID (insn));
|
|
1234 }
|
|
1235
|
|
1236 /* The insn described by INSN_ENTRY[I] can be swapped, but only
|
|
1237 with special handling. Take care of that here. */
|
|
1238 static void
|
|
1239 handle_special_swappables (swap_web_entry *insn_entry, unsigned i)
|
|
1240 {
|
|
1241 rtx_insn *insn = insn_entry[i].insn;
|
|
1242 rtx body = PATTERN (insn);
|
|
1243
|
|
1244 switch (insn_entry[i].special_handling)
|
|
1245 {
|
|
1246 default:
|
|
1247 gcc_unreachable ();
|
|
1248 case SH_CONST_VECTOR:
|
|
1249 {
|
|
1250 /* A CONST_VECTOR will only show up somewhere in the RHS of a SET. */
|
|
1251 gcc_assert (GET_CODE (body) == SET);
|
|
1252 rtx rhs = SET_SRC (body);
|
|
1253 swap_const_vector_halves (rhs);
|
|
1254 if (dump_file)
|
|
1255 fprintf (dump_file, "Swapping constant halves in insn %d\n", i);
|
|
1256 break;
|
|
1257 }
|
|
1258 case SH_SUBREG:
|
|
1259 /* A subreg of the same size is already safe. For subregs that
|
|
1260 select a smaller portion of a reg, adjust the index for
|
|
1261 swapped doublewords. */
|
|
1262 adjust_subreg_index (body);
|
|
1263 if (dump_file)
|
|
1264 fprintf (dump_file, "Adjusting subreg in insn %d\n", i);
|
|
1265 break;
|
|
1266 case SH_NOSWAP_LD:
|
|
1267 /* Convert a non-permuting load to a permuting one. */
|
|
1268 permute_load (insn);
|
|
1269 break;
|
|
1270 case SH_NOSWAP_ST:
|
|
1271 /* Convert a non-permuting store to a permuting one. */
|
|
1272 permute_store (insn);
|
|
1273 break;
|
|
1274 case SH_EXTRACT:
|
|
1275 /* Change the lane on an extract operation. */
|
|
1276 adjust_extract (insn);
|
|
1277 break;
|
|
1278 case SH_SPLAT:
|
|
1279 /* Change the lane on a direct-splat operation. */
|
|
1280 adjust_splat (insn);
|
|
1281 break;
|
|
1282 case SH_XXPERMDI:
|
|
1283 /* Change the lanes on an XXPERMDI operation. */
|
|
1284 adjust_xxpermdi (insn);
|
|
1285 break;
|
|
1286 case SH_CONCAT:
|
|
1287 /* Reverse the order of a concatenation operation. */
|
|
1288 adjust_concat (insn);
|
|
1289 break;
|
|
1290 case SH_VPERM:
|
|
1291 /* Change the mask loaded from the constant pool for a VPERM. */
|
|
1292 adjust_vperm (insn);
|
|
1293 break;
|
|
1294 }
|
|
1295 }
|
|
1296
|
|
1297 /* Find the insn from the Ith table entry, which is known to be a
|
|
1298 register swap Y = SWAP(X). Replace it with a copy Y = X. */
|
|
1299 static void
|
|
1300 replace_swap_with_copy (swap_web_entry *insn_entry, unsigned i)
|
|
1301 {
|
|
1302 rtx_insn *insn = insn_entry[i].insn;
|
|
1303 rtx body = PATTERN (insn);
|
|
1304 rtx src_reg = XEXP (SET_SRC (body), 0);
|
|
1305 rtx copy = gen_rtx_SET (SET_DEST (body), src_reg);
|
|
1306 rtx_insn *new_insn = emit_insn_before (copy, insn);
|
|
1307 set_block_for_insn (new_insn, BLOCK_FOR_INSN (insn));
|
|
1308 df_insn_rescan (new_insn);
|
|
1309
|
|
1310 if (dump_file)
|
|
1311 {
|
|
1312 unsigned int new_uid = INSN_UID (new_insn);
|
|
1313 fprintf (dump_file, "Replacing swap %d with copy %d\n", i, new_uid);
|
|
1314 }
|
|
1315
|
|
1316 df_insn_delete (insn);
|
|
1317 remove_insn (insn);
|
|
1318 insn->set_deleted ();
|
|
1319 }
|
|
1320
|
|
1321 /* Given that swap_insn represents a swap of a load of a constant
|
|
1322 vector value, replace with a single instruction that loads a
|
|
1323 swapped variant of the original constant.
|
|
1324
|
|
1325 The "natural" representation of a byte array in memory is the same
|
|
1326 for big endian and little endian.
|
|
1327
|
|
1328 unsigned char byte_array[] =
|
|
1329 { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, a, b, c, d, e, f };
|
|
1330
|
|
1331 However, when loaded into a vector register, the representation
|
|
1332 depends on endian conventions.
|
|
1333
|
|
1334 In big-endian mode, the register holds:
|
|
1335
|
|
1336 MSB LSB
|
|
1337 [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, a, b, c, d, e, f ]
|
|
1338
|
|
1339 In little-endian mode, the register holds:
|
|
1340
|
|
1341 MSB LSB
|
|
1342 [ f, e, d, c, b, a, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 ]
|
|
1343
|
|
1344 Word arrays require different handling. Consider the word array:
|
|
1345
|
|
1346 unsigned int word_array[] =
|
|
1347 { 0x00010203, 0x04050607, 0x08090a0b, 0x0c0d0e0f };
|
|
1348
|
|
1349 The in-memory representation depends on endian configuration. The
|
|
1350 equivalent array, declared as a byte array, in memory would be:
|
|
1351
|
|
1352 unsigned char big_endian_word_array_data[] =
|
|
1353 { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, a, b, c, d, e, f }
|
|
1354
|
|
1355 unsigned char little_endian_word_array_data[] =
|
|
1356 { 3, 2, 1, 0, 7, 6, 5, 4, b, a, 9, 8, f, e, d, c }
|
|
1357
|
|
1358 In big-endian mode, the register holds:
|
|
1359
|
|
1360 MSB LSB
|
|
1361 [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, a, b, c, d, e, f ]
|
|
1362
|
|
1363 In little-endian mode, the register holds:
|
|
1364
|
|
1365 MSB LSB
|
|
1366 [ c, d, e, f, 8, 9, a, b, 4, 5, 6, 7, 0, 1, 2, 3 ]
|
|
1367
|
|
1368
|
|
1369 Similar transformations apply to the vector of half-word and vector
|
|
1370 of double-word representations.
|
|
1371
|
|
1372 For now, don't handle vectors of quad-precision values. Just return.
|
|
1373 A better solution is to fix the code generator to emit lvx/stvx for
|
|
1374 those. */
|
|
1375 static void
|
|
1376 replace_swapped_load_constant (swap_web_entry *insn_entry, rtx swap_insn)
|
|
1377 {
|
|
1378 /* Find the load. */
|
|
1379 struct df_insn_info *insn_info = DF_INSN_INFO_GET (swap_insn);
|
|
1380 rtx_insn *load_insn;
|
|
1381 df_ref use = DF_INSN_INFO_USES (insn_info);
|
|
1382 struct df_link *def_link = DF_REF_CHAIN (use);
|
|
1383 gcc_assert (def_link && !def_link->next);
|
|
1384
|
|
1385 load_insn = DF_REF_INSN (def_link->ref);
|
|
1386 gcc_assert (load_insn);
|
|
1387
|
|
1388 /* Find the TOC-relative symbol access. */
|
|
1389 insn_info = DF_INSN_INFO_GET (load_insn);
|
|
1390 use = DF_INSN_INFO_USES (insn_info);
|
|
1391
|
|
1392 def_link = DF_REF_CHAIN (use);
|
|
1393 gcc_assert (def_link && !def_link->next);
|
|
1394
|
|
1395 rtx_insn *tocrel_insn = DF_REF_INSN (def_link->ref);
|
|
1396 gcc_assert (tocrel_insn);
|
|
1397
|
|
1398 /* Find the embedded CONST_VECTOR. We have to call toc_relative_expr_p
|
|
1399 to set tocrel_base; otherwise it would be unnecessary as we've
|
|
1400 already established it will return true. */
|
|
1401 rtx base, offset;
|
|
1402 rtx tocrel_expr = SET_SRC (PATTERN (tocrel_insn));
|
|
1403 const_rtx tocrel_base;
|
|
1404
|
|
1405 /* There is an extra level of indirection for small/large code models. */
|
|
1406 if (GET_CODE (tocrel_expr) == MEM)
|
|
1407 tocrel_expr = XEXP (tocrel_expr, 0);
|
|
1408
|
|
1409 if (!toc_relative_expr_p (tocrel_expr, false, &tocrel_base, NULL))
|
|
1410 gcc_unreachable ();
|
|
1411
|
|
1412 split_const (XVECEXP (tocrel_base, 0, 0), &base, &offset);
|
|
1413 rtx const_vector = get_pool_constant (base);
|
|
1414
|
|
1415 /* With the extra indirection, get_pool_constant will produce the
|
|
1416 real constant from the reg_equal expression, so get the real
|
|
1417 constant. */
|
|
1418 if (GET_CODE (const_vector) == SYMBOL_REF)
|
|
1419 const_vector = get_pool_constant (const_vector);
|
|
1420 gcc_assert (GET_CODE (const_vector) == CONST_VECTOR);
|
|
1421
|
|
1422 rtx new_mem;
|
|
1423 enum machine_mode mode = GET_MODE (const_vector);
|
|
1424
|
|
1425 /* Create an adjusted constant from the original constant. */
|
|
1426 if (mode == V1TImode)
|
|
1427 /* Leave this code as is. */
|
|
1428 return;
|
|
1429 else if (mode == V16QImode)
|
|
1430 {
|
|
1431 rtx vals = gen_rtx_PARALLEL (mode, rtvec_alloc (16));
|
|
1432 int i;
|
|
1433
|
|
1434 for (i = 0; i < 16; i++)
|
|
1435 XVECEXP (vals, 0, ((i+8) % 16)) = XVECEXP (const_vector, 0, i);
|
|
1436 rtx new_const_vector = gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0));
|
|
1437 new_mem = force_const_mem (mode, new_const_vector);
|
|
1438 }
|
|
1439 else if ((mode == V8HImode)
|
|
1440 #ifdef HAVE_V8HFmode
|
|
1441 || (mode == V8HFmode)
|
|
1442 #endif
|
|
1443 )
|
|
1444 {
|
|
1445 rtx vals = gen_rtx_PARALLEL (mode, rtvec_alloc (8));
|
|
1446 int i;
|
|
1447
|
|
1448 for (i = 0; i < 8; i++)
|
|
1449 XVECEXP (vals, 0, ((i+4) % 8)) = XVECEXP (const_vector, 0, i);
|
|
1450 rtx new_const_vector = gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0));
|
|
1451 new_mem = force_const_mem (mode, new_const_vector);
|
|
1452 }
|
|
1453 else if ((mode == V4SImode) || (mode == V4SFmode))
|
|
1454 {
|
|
1455 rtx vals = gen_rtx_PARALLEL (mode, rtvec_alloc (4));
|
|
1456 int i;
|
|
1457
|
|
1458 for (i = 0; i < 4; i++)
|
|
1459 XVECEXP (vals, 0, ((i+2) % 4)) = XVECEXP (const_vector, 0, i);
|
|
1460 rtx new_const_vector = gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0));
|
|
1461 new_mem = force_const_mem (mode, new_const_vector);
|
|
1462 }
|
|
1463 else if ((mode == V2DImode) || (mode == V2DFmode))
|
|
1464 {
|
|
1465 rtx vals = gen_rtx_PARALLEL (mode, rtvec_alloc (2));
|
|
1466 int i;
|
|
1467
|
|
1468 for (i = 0; i < 2; i++)
|
|
1469 XVECEXP (vals, 0, ((i+1) % 2)) = XVECEXP (const_vector, 0, i);
|
|
1470 rtx new_const_vector = gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0));
|
|
1471 new_mem = force_const_mem (mode, new_const_vector);
|
|
1472 }
|
|
1473 else
|
|
1474 {
|
|
1475 /* We do not expect other modes to be constant-load-swapped. */
|
|
1476 gcc_unreachable ();
|
|
1477 }
|
|
1478
|
|
1479 /* This gives us a MEM whose base operand is a SYMBOL_REF, which we
|
|
1480 can't recognize. Force the SYMBOL_REF into a register. */
|
|
1481 if (!REG_P (XEXP (new_mem, 0))) {
|
|
1482 rtx base_reg = force_reg (Pmode, XEXP (new_mem, 0));
|
|
1483 XEXP (new_mem, 0) = base_reg;
|
|
1484
|
|
1485 /* Move the newly created insn ahead of the load insn. */
|
|
1486 /* The last insn is the the insn that forced new_mem into a register. */
|
|
1487 rtx_insn *force_insn = get_last_insn ();
|
|
1488 /* Remove this insn from the end of the instruction sequence. */
|
|
1489 remove_insn (force_insn);
|
|
1490 rtx_insn *before_load_insn = PREV_INSN (load_insn);
|
|
1491
|
|
1492 /* And insert this insn back into the sequence before the previous
|
|
1493 load insn so this new expression will be available when the
|
|
1494 existing load is modified to load the swapped constant. */
|
|
1495 add_insn_after (force_insn, before_load_insn, BLOCK_FOR_INSN (load_insn));
|
|
1496 df_insn_rescan (before_load_insn);
|
|
1497 df_insn_rescan (force_insn);
|
|
1498 }
|
|
1499
|
|
1500 /* Replace the MEM in the load instruction and rescan it. */
|
|
1501 XEXP (SET_SRC (PATTERN (load_insn)), 0) = new_mem;
|
|
1502 INSN_CODE (load_insn) = -1; /* Force re-recognition. */
|
|
1503 df_insn_rescan (load_insn);
|
|
1504
|
|
1505 unsigned int uid = INSN_UID (swap_insn);
|
|
1506 mark_swaps_for_removal (insn_entry, uid);
|
|
1507 replace_swap_with_copy (insn_entry, uid);
|
|
1508 }
|
|
1509
|
|
1510 /* Dump the swap table to DUMP_FILE. */
|
|
1511 static void
|
|
1512 dump_swap_insn_table (swap_web_entry *insn_entry)
|
|
1513 {
|
|
1514 int e = get_max_uid ();
|
|
1515 fprintf (dump_file, "\nRelevant insns with their flag settings\n\n");
|
|
1516
|
|
1517 for (int i = 0; i < e; ++i)
|
|
1518 if (insn_entry[i].is_relevant)
|
|
1519 {
|
|
1520 swap_web_entry *pred_entry = (swap_web_entry *)insn_entry[i].pred ();
|
|
1521 fprintf (dump_file, "%6d %6d ", i,
|
|
1522 pred_entry && pred_entry->insn
|
|
1523 ? INSN_UID (pred_entry->insn) : 0);
|
|
1524 if (insn_entry[i].is_load)
|
|
1525 fputs ("load ", dump_file);
|
|
1526 if (insn_entry[i].is_store)
|
|
1527 fputs ("store ", dump_file);
|
|
1528 if (insn_entry[i].is_swap)
|
|
1529 fputs ("swap ", dump_file);
|
|
1530 if (insn_entry[i].is_live_in)
|
|
1531 fputs ("live-in ", dump_file);
|
|
1532 if (insn_entry[i].is_live_out)
|
|
1533 fputs ("live-out ", dump_file);
|
|
1534 if (insn_entry[i].contains_subreg)
|
|
1535 fputs ("subreg ", dump_file);
|
|
1536 if (insn_entry[i].is_128_int)
|
|
1537 fputs ("int128 ", dump_file);
|
|
1538 if (insn_entry[i].is_call)
|
|
1539 fputs ("call ", dump_file);
|
|
1540 if (insn_entry[i].is_swappable)
|
|
1541 {
|
|
1542 fputs ("swappable ", dump_file);
|
|
1543 if (insn_entry[i].special_handling == SH_CONST_VECTOR)
|
|
1544 fputs ("special:constvec ", dump_file);
|
|
1545 else if (insn_entry[i].special_handling == SH_SUBREG)
|
|
1546 fputs ("special:subreg ", dump_file);
|
|
1547 else if (insn_entry[i].special_handling == SH_NOSWAP_LD)
|
|
1548 fputs ("special:load ", dump_file);
|
|
1549 else if (insn_entry[i].special_handling == SH_NOSWAP_ST)
|
|
1550 fputs ("special:store ", dump_file);
|
|
1551 else if (insn_entry[i].special_handling == SH_EXTRACT)
|
|
1552 fputs ("special:extract ", dump_file);
|
|
1553 else if (insn_entry[i].special_handling == SH_SPLAT)
|
|
1554 fputs ("special:splat ", dump_file);
|
|
1555 else if (insn_entry[i].special_handling == SH_XXPERMDI)
|
|
1556 fputs ("special:xxpermdi ", dump_file);
|
|
1557 else if (insn_entry[i].special_handling == SH_CONCAT)
|
|
1558 fputs ("special:concat ", dump_file);
|
|
1559 else if (insn_entry[i].special_handling == SH_VPERM)
|
|
1560 fputs ("special:vperm ", dump_file);
|
|
1561 }
|
|
1562 if (insn_entry[i].web_not_optimizable)
|
|
1563 fputs ("unoptimizable ", dump_file);
|
|
1564 if (insn_entry[i].will_delete)
|
|
1565 fputs ("delete ", dump_file);
|
|
1566 fputs ("\n", dump_file);
|
|
1567 }
|
|
1568 fputs ("\n", dump_file);
|
|
1569 }
|
|
1570
|
|
1571 /* Return RTX with its address canonicalized to (reg) or (+ reg reg).
|
|
1572 Here RTX is an (& addr (const_int -16)). Always return a new copy
|
|
1573 to avoid problems with combine. */
|
|
1574 static rtx
|
|
1575 alignment_with_canonical_addr (rtx align)
|
|
1576 {
|
|
1577 rtx canon;
|
|
1578 rtx addr = XEXP (align, 0);
|
|
1579
|
|
1580 if (REG_P (addr))
|
|
1581 canon = addr;
|
|
1582
|
|
1583 else if (GET_CODE (addr) == PLUS)
|
|
1584 {
|
|
1585 rtx addrop0 = XEXP (addr, 0);
|
|
1586 rtx addrop1 = XEXP (addr, 1);
|
|
1587
|
|
1588 if (!REG_P (addrop0))
|
|
1589 addrop0 = force_reg (GET_MODE (addrop0), addrop0);
|
|
1590
|
|
1591 if (!REG_P (addrop1))
|
|
1592 addrop1 = force_reg (GET_MODE (addrop1), addrop1);
|
|
1593
|
|
1594 canon = gen_rtx_PLUS (GET_MODE (addr), addrop0, addrop1);
|
|
1595 }
|
|
1596
|
|
1597 else
|
|
1598 canon = force_reg (GET_MODE (addr), addr);
|
|
1599
|
|
1600 return gen_rtx_AND (GET_MODE (align), canon, GEN_INT (-16));
|
|
1601 }
|
|
1602
|
|
1603 /* Check whether an rtx is an alignment mask, and if so, return
|
|
1604 a fully-expanded rtx for the masking operation. */
|
|
1605 static rtx
|
|
1606 alignment_mask (rtx_insn *insn)
|
|
1607 {
|
|
1608 rtx body = PATTERN (insn);
|
|
1609
|
|
1610 if (GET_CODE (body) != SET
|
|
1611 || GET_CODE (SET_SRC (body)) != AND
|
|
1612 || !REG_P (XEXP (SET_SRC (body), 0)))
|
|
1613 return 0;
|
|
1614
|
|
1615 rtx mask = XEXP (SET_SRC (body), 1);
|
|
1616
|
|
1617 if (GET_CODE (mask) == CONST_INT)
|
|
1618 {
|
|
1619 if (INTVAL (mask) == -16)
|
|
1620 return alignment_with_canonical_addr (SET_SRC (body));
|
|
1621 else
|
|
1622 return 0;
|
|
1623 }
|
|
1624
|
|
1625 if (!REG_P (mask))
|
|
1626 return 0;
|
|
1627
|
|
1628 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
|
|
1629 df_ref use;
|
|
1630 rtx real_mask = 0;
|
|
1631
|
|
1632 FOR_EACH_INSN_INFO_USE (use, insn_info)
|
|
1633 {
|
|
1634 if (!rtx_equal_p (DF_REF_REG (use), mask))
|
|
1635 continue;
|
|
1636
|
|
1637 struct df_link *def_link = DF_REF_CHAIN (use);
|
|
1638 if (!def_link || def_link->next)
|
|
1639 return 0;
|
|
1640
|
|
1641 rtx_insn *const_insn = DF_REF_INSN (def_link->ref);
|
|
1642 rtx const_body = PATTERN (const_insn);
|
|
1643 if (GET_CODE (const_body) != SET)
|
|
1644 return 0;
|
|
1645
|
|
1646 real_mask = SET_SRC (const_body);
|
|
1647
|
|
1648 if (GET_CODE (real_mask) != CONST_INT
|
|
1649 || INTVAL (real_mask) != -16)
|
|
1650 return 0;
|
|
1651 }
|
|
1652
|
|
1653 if (real_mask == 0)
|
|
1654 return 0;
|
|
1655
|
|
1656 return alignment_with_canonical_addr (SET_SRC (body));
|
|
1657 }
|
|
1658
|
|
1659 /* Given INSN that's a load or store based at BASE_REG, look for a
|
|
1660 feeding computation that aligns its address on a 16-byte boundary.
|
|
1661 Return the rtx and its containing AND_INSN. */
|
|
1662 static rtx
|
|
1663 find_alignment_op (rtx_insn *insn, rtx base_reg, rtx_insn **and_insn)
|
|
1664 {
|
|
1665 df_ref base_use;
|
|
1666 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
|
|
1667 rtx and_operation = 0;
|
|
1668
|
|
1669 FOR_EACH_INSN_INFO_USE (base_use, insn_info)
|
|
1670 {
|
|
1671 if (!rtx_equal_p (DF_REF_REG (base_use), base_reg))
|
|
1672 continue;
|
|
1673
|
|
1674 struct df_link *base_def_link = DF_REF_CHAIN (base_use);
|
|
1675 if (!base_def_link || base_def_link->next)
|
|
1676 break;
|
|
1677
|
|
1678 /* With stack-protector code enabled, and possibly in other
|
|
1679 circumstances, there may not be an associated insn for
|
|
1680 the def. */
|
|
1681 if (DF_REF_IS_ARTIFICIAL (base_def_link->ref))
|
|
1682 break;
|
|
1683
|
|
1684 *and_insn = DF_REF_INSN (base_def_link->ref);
|
|
1685 and_operation = alignment_mask (*and_insn);
|
|
1686 if (and_operation != 0)
|
|
1687 break;
|
|
1688 }
|
|
1689
|
|
1690 return and_operation;
|
|
1691 }
|
|
1692
|
|
1693 struct del_info { bool replace; rtx_insn *replace_insn; };
|
|
1694
|
|
1695 /* If INSN is the load for an lvx pattern, put it in canonical form. */
|
|
1696 static void
|
|
1697 recombine_lvx_pattern (rtx_insn *insn, del_info *to_delete)
|
|
1698 {
|
|
1699 rtx body = PATTERN (insn);
|
|
1700 gcc_assert (GET_CODE (body) == SET
|
|
1701 && GET_CODE (SET_SRC (body)) == VEC_SELECT
|
|
1702 && GET_CODE (XEXP (SET_SRC (body), 0)) == MEM);
|
|
1703
|
|
1704 rtx mem = XEXP (SET_SRC (body), 0);
|
|
1705 rtx base_reg = XEXP (mem, 0);
|
|
1706
|
|
1707 rtx_insn *and_insn;
|
|
1708 rtx and_operation = find_alignment_op (insn, base_reg, &and_insn);
|
|
1709
|
|
1710 if (and_operation != 0)
|
|
1711 {
|
|
1712 df_ref def;
|
|
1713 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
|
|
1714 FOR_EACH_INSN_INFO_DEF (def, insn_info)
|
|
1715 {
|
|
1716 struct df_link *link = DF_REF_CHAIN (def);
|
|
1717 if (!link || link->next)
|
|
1718 break;
|
|
1719
|
|
1720 rtx_insn *swap_insn = DF_REF_INSN (link->ref);
|
|
1721 if (!insn_is_swap_p (swap_insn)
|
|
1722 || insn_is_load_p (swap_insn)
|
|
1723 || insn_is_store_p (swap_insn))
|
|
1724 break;
|
|
1725
|
|
1726 /* Expected lvx pattern found. Change the swap to
|
|
1727 a copy, and propagate the AND operation into the
|
|
1728 load. */
|
|
1729 to_delete[INSN_UID (swap_insn)].replace = true;
|
|
1730 to_delete[INSN_UID (swap_insn)].replace_insn = swap_insn;
|
|
1731
|
|
1732 /* However, first we must be sure that we make the
|
|
1733 base register from the AND operation available
|
|
1734 in case the register has been overwritten. Copy
|
|
1735 the base register to a new pseudo and use that
|
|
1736 as the base register of the AND operation in
|
|
1737 the new LVX instruction. */
|
|
1738 rtx and_base = XEXP (and_operation, 0);
|
|
1739 rtx new_reg = gen_reg_rtx (GET_MODE (and_base));
|
|
1740 rtx copy = gen_rtx_SET (new_reg, and_base);
|
|
1741 rtx_insn *new_insn = emit_insn_after (copy, and_insn);
|
|
1742 set_block_for_insn (new_insn, BLOCK_FOR_INSN (and_insn));
|
|
1743 df_insn_rescan (new_insn);
|
|
1744
|
|
1745 XEXP (mem, 0) = gen_rtx_AND (GET_MODE (and_base), new_reg,
|
|
1746 XEXP (and_operation, 1));
|
|
1747 SET_SRC (body) = mem;
|
|
1748 INSN_CODE (insn) = -1; /* Force re-recognition. */
|
|
1749 df_insn_rescan (insn);
|
|
1750
|
|
1751 if (dump_file)
|
|
1752 fprintf (dump_file, "lvx opportunity found at %d\n",
|
|
1753 INSN_UID (insn));
|
|
1754 }
|
|
1755 }
|
|
1756 }
|
|
1757
|
|
1758 /* If INSN is the store for an stvx pattern, put it in canonical form. */
|
|
1759 static void
|
|
1760 recombine_stvx_pattern (rtx_insn *insn, del_info *to_delete)
|
|
1761 {
|
|
1762 rtx body = PATTERN (insn);
|
|
1763 gcc_assert (GET_CODE (body) == SET
|
|
1764 && GET_CODE (SET_DEST (body)) == MEM
|
|
1765 && GET_CODE (SET_SRC (body)) == VEC_SELECT);
|
|
1766 rtx mem = SET_DEST (body);
|
|
1767 rtx base_reg = XEXP (mem, 0);
|
|
1768
|
|
1769 rtx_insn *and_insn;
|
|
1770 rtx and_operation = find_alignment_op (insn, base_reg, &and_insn);
|
|
1771
|
|
1772 if (and_operation != 0)
|
|
1773 {
|
|
1774 rtx src_reg = XEXP (SET_SRC (body), 0);
|
|
1775 df_ref src_use;
|
|
1776 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
|
|
1777 FOR_EACH_INSN_INFO_USE (src_use, insn_info)
|
|
1778 {
|
|
1779 if (!rtx_equal_p (DF_REF_REG (src_use), src_reg))
|
|
1780 continue;
|
|
1781
|
|
1782 struct df_link *link = DF_REF_CHAIN (src_use);
|
|
1783 if (!link || link->next)
|
|
1784 break;
|
|
1785
|
|
1786 rtx_insn *swap_insn = DF_REF_INSN (link->ref);
|
|
1787 if (!insn_is_swap_p (swap_insn)
|
|
1788 || insn_is_load_p (swap_insn)
|
|
1789 || insn_is_store_p (swap_insn))
|
|
1790 break;
|
|
1791
|
|
1792 /* Expected stvx pattern found. Change the swap to
|
|
1793 a copy, and propagate the AND operation into the
|
|
1794 store. */
|
|
1795 to_delete[INSN_UID (swap_insn)].replace = true;
|
|
1796 to_delete[INSN_UID (swap_insn)].replace_insn = swap_insn;
|
|
1797
|
|
1798 /* However, first we must be sure that we make the
|
|
1799 base register from the AND operation available
|
|
1800 in case the register has been overwritten. Copy
|
|
1801 the base register to a new pseudo and use that
|
|
1802 as the base register of the AND operation in
|
|
1803 the new STVX instruction. */
|
|
1804 rtx and_base = XEXP (and_operation, 0);
|
|
1805 rtx new_reg = gen_reg_rtx (GET_MODE (and_base));
|
|
1806 rtx copy = gen_rtx_SET (new_reg, and_base);
|
|
1807 rtx_insn *new_insn = emit_insn_after (copy, and_insn);
|
|
1808 set_block_for_insn (new_insn, BLOCK_FOR_INSN (and_insn));
|
|
1809 df_insn_rescan (new_insn);
|
|
1810
|
|
1811 XEXP (mem, 0) = gen_rtx_AND (GET_MODE (and_base), new_reg,
|
|
1812 XEXP (and_operation, 1));
|
|
1813 SET_SRC (body) = src_reg;
|
|
1814 INSN_CODE (insn) = -1; /* Force re-recognition. */
|
|
1815 df_insn_rescan (insn);
|
|
1816
|
|
1817 if (dump_file)
|
|
1818 fprintf (dump_file, "stvx opportunity found at %d\n",
|
|
1819 INSN_UID (insn));
|
|
1820 }
|
|
1821 }
|
|
1822 }
|
|
1823
|
|
1824 /* Look for patterns created from builtin lvx and stvx calls, and
|
|
1825 canonicalize them to be properly recognized as such. */
|
|
1826 static void
|
|
1827 recombine_lvx_stvx_patterns (function *fun)
|
|
1828 {
|
|
1829 int i;
|
|
1830 basic_block bb;
|
|
1831 rtx_insn *insn;
|
|
1832
|
|
1833 int num_insns = get_max_uid ();
|
|
1834 del_info *to_delete = XCNEWVEC (del_info, num_insns);
|
|
1835
|
|
1836 FOR_ALL_BB_FN (bb, fun)
|
|
1837 FOR_BB_INSNS (bb, insn)
|
|
1838 {
|
|
1839 if (!NONDEBUG_INSN_P (insn))
|
|
1840 continue;
|
|
1841
|
|
1842 if (insn_is_load_p (insn) && insn_is_swap_p (insn))
|
|
1843 recombine_lvx_pattern (insn, to_delete);
|
|
1844 else if (insn_is_store_p (insn) && insn_is_swap_p (insn))
|
|
1845 recombine_stvx_pattern (insn, to_delete);
|
|
1846 }
|
|
1847
|
|
1848 /* Turning swaps into copies is delayed until now, to avoid problems
|
|
1849 with deleting instructions during the insn walk. */
|
|
1850 for (i = 0; i < num_insns; i++)
|
|
1851 if (to_delete[i].replace)
|
|
1852 {
|
|
1853 rtx swap_body = PATTERN (to_delete[i].replace_insn);
|
|
1854 rtx src_reg = XEXP (SET_SRC (swap_body), 0);
|
|
1855 rtx copy = gen_rtx_SET (SET_DEST (swap_body), src_reg);
|
|
1856 rtx_insn *new_insn = emit_insn_before (copy,
|
|
1857 to_delete[i].replace_insn);
|
|
1858 set_block_for_insn (new_insn,
|
|
1859 BLOCK_FOR_INSN (to_delete[i].replace_insn));
|
|
1860 df_insn_rescan (new_insn);
|
|
1861 df_insn_delete (to_delete[i].replace_insn);
|
|
1862 remove_insn (to_delete[i].replace_insn);
|
|
1863 to_delete[i].replace_insn->set_deleted ();
|
|
1864 }
|
|
1865
|
|
1866 free (to_delete);
|
|
1867 }
|
|
1868
|
|
1869 /* Main entry point for this pass. */
|
|
1870 unsigned int
|
|
1871 rs6000_analyze_swaps (function *fun)
|
|
1872 {
|
|
1873 swap_web_entry *insn_entry;
|
|
1874 basic_block bb;
|
|
1875 rtx_insn *insn, *curr_insn = 0;
|
|
1876
|
|
1877 /* Dataflow analysis for use-def chains. */
|
|
1878 df_set_flags (DF_RD_PRUNE_DEAD_DEFS);
|
|
1879 df_chain_add_problem (DF_DU_CHAIN | DF_UD_CHAIN);
|
|
1880 df_analyze ();
|
|
1881 df_set_flags (DF_DEFER_INSN_RESCAN);
|
|
1882
|
|
1883 /* Pre-pass to recombine lvx and stvx patterns so we don't lose info. */
|
|
1884 recombine_lvx_stvx_patterns (fun);
|
|
1885 df_process_deferred_rescans ();
|
|
1886
|
|
1887 /* Allocate structure to represent webs of insns. */
|
|
1888 insn_entry = XCNEWVEC (swap_web_entry, get_max_uid ());
|
|
1889
|
|
1890 /* Walk the insns to gather basic data. */
|
|
1891 FOR_ALL_BB_FN (bb, fun)
|
|
1892 FOR_BB_INSNS_SAFE (bb, insn, curr_insn)
|
|
1893 {
|
|
1894 unsigned int uid = INSN_UID (insn);
|
|
1895 if (NONDEBUG_INSN_P (insn))
|
|
1896 {
|
|
1897 insn_entry[uid].insn = insn;
|
|
1898
|
|
1899 if (GET_CODE (insn) == CALL_INSN)
|
|
1900 insn_entry[uid].is_call = 1;
|
|
1901
|
|
1902 /* Walk the uses and defs to see if we mention vector regs.
|
|
1903 Record any constraints on optimization of such mentions. */
|
|
1904 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
|
|
1905 df_ref mention;
|
|
1906 FOR_EACH_INSN_INFO_USE (mention, insn_info)
|
|
1907 {
|
|
1908 /* We use DF_REF_REAL_REG here to get inside any subregs. */
|
|
1909 machine_mode mode = GET_MODE (DF_REF_REAL_REG (mention));
|
|
1910
|
|
1911 /* If a use gets its value from a call insn, it will be
|
|
1912 a hard register and will look like (reg:V4SI 3 3).
|
|
1913 The df analysis creates two mentions for GPR3 and GPR4,
|
|
1914 both DImode. We must recognize this and treat it as a
|
|
1915 vector mention to ensure the call is unioned with this
|
|
1916 use. */
|
|
1917 if (mode == DImode && DF_REF_INSN_INFO (mention))
|
|
1918 {
|
|
1919 rtx feeder = DF_REF_INSN (mention);
|
|
1920 /* FIXME: It is pretty hard to get from the df mention
|
|
1921 to the mode of the use in the insn. We arbitrarily
|
|
1922 pick a vector mode here, even though the use might
|
|
1923 be a real DImode. We can be too conservative
|
|
1924 (create a web larger than necessary) because of
|
|
1925 this, so consider eventually fixing this. */
|
|
1926 if (GET_CODE (feeder) == CALL_INSN)
|
|
1927 mode = V4SImode;
|
|
1928 }
|
|
1929
|
|
1930 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode) || mode == TImode)
|
|
1931 {
|
|
1932 insn_entry[uid].is_relevant = 1;
|
|
1933 if (mode == TImode || mode == V1TImode
|
|
1934 || FLOAT128_VECTOR_P (mode))
|
|
1935 insn_entry[uid].is_128_int = 1;
|
|
1936 if (DF_REF_INSN_INFO (mention))
|
|
1937 insn_entry[uid].contains_subreg
|
|
1938 = !rtx_equal_p (DF_REF_REG (mention),
|
|
1939 DF_REF_REAL_REG (mention));
|
|
1940 union_defs (insn_entry, insn, mention);
|
|
1941 }
|
|
1942 }
|
|
1943 FOR_EACH_INSN_INFO_DEF (mention, insn_info)
|
|
1944 {
|
|
1945 /* We use DF_REF_REAL_REG here to get inside any subregs. */
|
|
1946 machine_mode mode = GET_MODE (DF_REF_REAL_REG (mention));
|
|
1947
|
|
1948 /* If we're loading up a hard vector register for a call,
|
|
1949 it looks like (set (reg:V4SI 9 9) (...)). The df
|
|
1950 analysis creates two mentions for GPR9 and GPR10, both
|
|
1951 DImode. So relying on the mode from the mentions
|
|
1952 isn't sufficient to ensure we union the call into the
|
|
1953 web with the parameter setup code. */
|
|
1954 if (mode == DImode && GET_CODE (insn) == SET
|
|
1955 && ALTIVEC_OR_VSX_VECTOR_MODE (GET_MODE (SET_DEST (insn))))
|
|
1956 mode = GET_MODE (SET_DEST (insn));
|
|
1957
|
|
1958 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode) || mode == TImode)
|
|
1959 {
|
|
1960 insn_entry[uid].is_relevant = 1;
|
|
1961 if (mode == TImode || mode == V1TImode
|
|
1962 || FLOAT128_VECTOR_P (mode))
|
|
1963 insn_entry[uid].is_128_int = 1;
|
|
1964 if (DF_REF_INSN_INFO (mention))
|
|
1965 insn_entry[uid].contains_subreg
|
|
1966 = !rtx_equal_p (DF_REF_REG (mention),
|
|
1967 DF_REF_REAL_REG (mention));
|
|
1968 /* REG_FUNCTION_VALUE_P is not valid for subregs. */
|
|
1969 else if (REG_FUNCTION_VALUE_P (DF_REF_REG (mention)))
|
|
1970 insn_entry[uid].is_live_out = 1;
|
|
1971 union_uses (insn_entry, insn, mention);
|
|
1972 }
|
|
1973 }
|
|
1974
|
|
1975 if (insn_entry[uid].is_relevant)
|
|
1976 {
|
|
1977 /* Determine if this is a load or store. */
|
|
1978 insn_entry[uid].is_load = insn_is_load_p (insn);
|
|
1979 insn_entry[uid].is_store = insn_is_store_p (insn);
|
|
1980
|
|
1981 /* Determine if this is a doubleword swap. If not,
|
|
1982 determine whether it can legally be swapped. */
|
|
1983 if (insn_is_swap_p (insn))
|
|
1984 insn_entry[uid].is_swap = 1;
|
|
1985 else
|
|
1986 {
|
|
1987 unsigned int special = SH_NONE;
|
|
1988 insn_entry[uid].is_swappable
|
|
1989 = insn_is_swappable_p (insn_entry, insn, &special);
|
|
1990 if (special != SH_NONE && insn_entry[uid].contains_subreg)
|
|
1991 insn_entry[uid].is_swappable = 0;
|
|
1992 else if (special != SH_NONE)
|
|
1993 insn_entry[uid].special_handling = special;
|
|
1994 else if (insn_entry[uid].contains_subreg)
|
|
1995 insn_entry[uid].special_handling = SH_SUBREG;
|
|
1996 }
|
|
1997 }
|
|
1998 }
|
|
1999 }
|
|
2000
|
|
2001 if (dump_file)
|
|
2002 {
|
|
2003 fprintf (dump_file, "\nSwap insn entry table when first built\n");
|
|
2004 dump_swap_insn_table (insn_entry);
|
|
2005 }
|
|
2006
|
|
2007 /* Record unoptimizable webs. */
|
|
2008 unsigned e = get_max_uid (), i;
|
|
2009 for (i = 0; i < e; ++i)
|
|
2010 {
|
|
2011 if (!insn_entry[i].is_relevant)
|
|
2012 continue;
|
|
2013
|
|
2014 swap_web_entry *root
|
|
2015 = (swap_web_entry*)(&insn_entry[i])->unionfind_root ();
|
|
2016
|
|
2017 if (insn_entry[i].is_live_in || insn_entry[i].is_live_out
|
|
2018 || (insn_entry[i].contains_subreg
|
|
2019 && insn_entry[i].special_handling != SH_SUBREG)
|
|
2020 || insn_entry[i].is_128_int || insn_entry[i].is_call
|
|
2021 || !(insn_entry[i].is_swappable || insn_entry[i].is_swap))
|
|
2022 root->web_not_optimizable = 1;
|
|
2023
|
|
2024 /* If we have loads or stores that aren't permuting then the
|
|
2025 optimization isn't appropriate. */
|
|
2026 else if ((insn_entry[i].is_load || insn_entry[i].is_store)
|
|
2027 && !insn_entry[i].is_swap && !insn_entry[i].is_swappable)
|
|
2028 root->web_not_optimizable = 1;
|
|
2029
|
|
2030 /* If we have permuting loads or stores that are not accompanied
|
|
2031 by a register swap, the optimization isn't appropriate. */
|
|
2032 else if (insn_entry[i].is_load && insn_entry[i].is_swap)
|
|
2033 {
|
|
2034 rtx insn = insn_entry[i].insn;
|
|
2035 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
|
|
2036 df_ref def;
|
|
2037
|
|
2038 FOR_EACH_INSN_INFO_DEF (def, insn_info)
|
|
2039 {
|
|
2040 struct df_link *link = DF_REF_CHAIN (def);
|
|
2041
|
|
2042 if (!chain_contains_only_swaps (insn_entry, link, FOR_LOADS))
|
|
2043 {
|
|
2044 root->web_not_optimizable = 1;
|
|
2045 break;
|
|
2046 }
|
|
2047 }
|
|
2048 }
|
|
2049 else if (insn_entry[i].is_store && insn_entry[i].is_swap)
|
|
2050 {
|
|
2051 rtx insn = insn_entry[i].insn;
|
|
2052 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
|
|
2053 df_ref use;
|
|
2054
|
|
2055 FOR_EACH_INSN_INFO_USE (use, insn_info)
|
|
2056 {
|
|
2057 struct df_link *link = DF_REF_CHAIN (use);
|
|
2058
|
|
2059 if (!chain_contains_only_swaps (insn_entry, link, FOR_STORES))
|
|
2060 {
|
|
2061 root->web_not_optimizable = 1;
|
|
2062 break;
|
|
2063 }
|
|
2064 }
|
|
2065 }
|
|
2066 }
|
|
2067
|
|
2068 if (dump_file)
|
|
2069 {
|
|
2070 fprintf (dump_file, "\nSwap insn entry table after web analysis\n");
|
|
2071 dump_swap_insn_table (insn_entry);
|
|
2072 }
|
|
2073
|
|
2074 /* For each load and store in an optimizable web (which implies
|
|
2075 the loads and stores are permuting), find the associated
|
|
2076 register swaps and mark them for removal. Due to various
|
|
2077 optimizations we may mark the same swap more than once. Also
|
|
2078 perform special handling for swappable insns that require it. */
|
|
2079 for (i = 0; i < e; ++i)
|
|
2080 if ((insn_entry[i].is_load || insn_entry[i].is_store)
|
|
2081 && insn_entry[i].is_swap)
|
|
2082 {
|
|
2083 swap_web_entry* root_entry
|
|
2084 = (swap_web_entry*)((&insn_entry[i])->unionfind_root ());
|
|
2085 if (!root_entry->web_not_optimizable)
|
|
2086 mark_swaps_for_removal (insn_entry, i);
|
|
2087 }
|
|
2088 else if (insn_entry[i].is_swappable && insn_entry[i].special_handling)
|
|
2089 {
|
|
2090 swap_web_entry* root_entry
|
|
2091 = (swap_web_entry*)((&insn_entry[i])->unionfind_root ());
|
|
2092 if (!root_entry->web_not_optimizable)
|
|
2093 handle_special_swappables (insn_entry, i);
|
|
2094 }
|
|
2095
|
|
2096 /* Now delete the swaps marked for removal. */
|
|
2097 for (i = 0; i < e; ++i)
|
|
2098 if (insn_entry[i].will_delete)
|
|
2099 replace_swap_with_copy (insn_entry, i);
|
|
2100
|
|
2101 /* Clean up. */
|
|
2102 free (insn_entry);
|
|
2103
|
|
2104 /* Use additional pass over rtl to replace swap(load(vector constant))
|
|
2105 with load(swapped vector constant). */
|
|
2106 swap_web_entry *pass2_insn_entry;
|
|
2107 pass2_insn_entry = XCNEWVEC (swap_web_entry, get_max_uid ());
|
|
2108
|
|
2109 /* Walk the insns to gather basic data. */
|
|
2110 FOR_ALL_BB_FN (bb, fun)
|
|
2111 FOR_BB_INSNS_SAFE (bb, insn, curr_insn)
|
|
2112 {
|
|
2113 unsigned int uid = INSN_UID (insn);
|
|
2114 if (NONDEBUG_INSN_P (insn))
|
|
2115 {
|
|
2116 pass2_insn_entry[uid].insn = insn;
|
|
2117
|
|
2118 pass2_insn_entry[uid].is_relevant = 1;
|
|
2119 pass2_insn_entry[uid].is_load = insn_is_load_p (insn);
|
|
2120 pass2_insn_entry[uid].is_store = insn_is_store_p (insn);
|
|
2121
|
|
2122 /* Determine if this is a doubleword swap. If not,
|
|
2123 determine whether it can legally be swapped. */
|
|
2124 if (insn_is_swap_p (insn))
|
|
2125 pass2_insn_entry[uid].is_swap = 1;
|
|
2126 }
|
|
2127 }
|
|
2128
|
|
2129 e = get_max_uid ();
|
|
2130 for (unsigned i = 0; i < e; ++i)
|
|
2131 if (pass2_insn_entry[i].is_swap && !pass2_insn_entry[i].is_load
|
|
2132 && !pass2_insn_entry[i].is_store)
|
|
2133 {
|
|
2134 insn = pass2_insn_entry[i].insn;
|
|
2135 if (const_load_sequence_p (pass2_insn_entry, insn))
|
|
2136 replace_swapped_load_constant (pass2_insn_entry, insn);
|
|
2137 }
|
|
2138
|
|
2139 /* Clean up. */
|
|
2140 free (pass2_insn_entry);
|
|
2141 return 0;
|
|
2142 }
|
|
2143
|
|
2144 const pass_data pass_data_analyze_swaps =
|
|
2145 {
|
|
2146 RTL_PASS, /* type */
|
|
2147 "swaps", /* name */
|
|
2148 OPTGROUP_NONE, /* optinfo_flags */
|
|
2149 TV_NONE, /* tv_id */
|
|
2150 0, /* properties_required */
|
|
2151 0, /* properties_provided */
|
|
2152 0, /* properties_destroyed */
|
|
2153 0, /* todo_flags_start */
|
|
2154 TODO_df_finish, /* todo_flags_finish */
|
|
2155 };
|
|
2156
|
|
2157 class pass_analyze_swaps : public rtl_opt_pass
|
|
2158 {
|
|
2159 public:
|
|
2160 pass_analyze_swaps(gcc::context *ctxt)
|
|
2161 : rtl_opt_pass(pass_data_analyze_swaps, ctxt)
|
|
2162 {}
|
|
2163
|
|
2164 /* opt_pass methods: */
|
|
2165 virtual bool gate (function *)
|
|
2166 {
|
|
2167 return (optimize > 0 && !BYTES_BIG_ENDIAN && TARGET_VSX
|
|
2168 && !TARGET_P9_VECTOR && rs6000_optimize_swaps);
|
|
2169 }
|
|
2170
|
|
2171 virtual unsigned int execute (function *fun)
|
|
2172 {
|
|
2173 return rs6000_analyze_swaps (fun);
|
|
2174 }
|
|
2175
|
|
2176 opt_pass *clone ()
|
|
2177 {
|
|
2178 return new pass_analyze_swaps (m_ctxt);
|
|
2179 }
|
|
2180
|
|
2181 }; // class pass_analyze_swaps
|
|
2182
|
|
2183 rtl_opt_pass *
|
|
2184 make_pass_analyze_swaps (gcc::context *ctxt)
|
|
2185 {
|
|
2186 return new pass_analyze_swaps (ctxt);
|
|
2187 }
|
|
2188
|