comparison gcc/gimple-ssa-store-merging.c @ 132:d34655255c78

update gcc-8.2
author mir3636
date Thu, 25 Oct 2018 10:21:07 +0900
parents 84e7813d76e9
children 1830386684a0
comparison
equal deleted inserted replaced
130:e108057fa461 132:d34655255c78
1 /* GIMPLE store merging pass. 1 /* GIMPLE store merging and byte swapping passes.
2 Copyright (C) 2016-2017 Free Software Foundation, Inc. 2 Copyright (C) 2009-2018 Free Software Foundation, Inc.
3 Contributed by ARM Ltd. 3 Contributed by ARM Ltd.
4 4
5 This file is part of GCC. 5 This file is part of GCC.
6 6
7 GCC is free software; you can redistribute it and/or modify it 7 GCC is free software; you can redistribute it and/or modify it
16 16
17 You should have received a copy of the GNU General Public License 17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see 18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */ 19 <http://www.gnu.org/licenses/>. */
20 20
21 /* The purpose of this pass is to combine multiple memory stores of 21 /* The purpose of the store merging pass is to combine multiple memory stores
22 constant values to consecutive memory locations into fewer wider stores. 22 of constant values, values loaded from memory, bitwise operations on those,
23 or bit-field values, to consecutive locations, into fewer wider stores.
24
23 For example, if we have a sequence peforming four byte stores to 25 For example, if we have a sequence peforming four byte stores to
24 consecutive memory locations: 26 consecutive memory locations:
25 [p ] := imm1; 27 [p ] := imm1;
26 [p + 1B] := imm2; 28 [p + 1B] := imm2;
27 [p + 2B] := imm3; 29 [p + 2B] := imm3;
28 [p + 3B] := imm4; 30 [p + 3B] := imm4;
29 we can transform this into a single 4-byte store if the target supports it: 31 we can transform this into a single 4-byte store if the target supports it:
30 [p] := imm1:imm2:imm3:imm4 //concatenated immediates according to endianness. 32 [p] := imm1:imm2:imm3:imm4 concatenated according to endianness.
33
34 Or:
35 [p ] := [q ];
36 [p + 1B] := [q + 1B];
37 [p + 2B] := [q + 2B];
38 [p + 3B] := [q + 3B];
39 if there is no overlap can be transformed into a single 4-byte
40 load followed by single 4-byte store.
41
42 Or:
43 [p ] := [q ] ^ imm1;
44 [p + 1B] := [q + 1B] ^ imm2;
45 [p + 2B] := [q + 2B] ^ imm3;
46 [p + 3B] := [q + 3B] ^ imm4;
47 if there is no overlap can be transformed into a single 4-byte
48 load, xored with imm1:imm2:imm3:imm4 and stored using a single 4-byte store.
49
50 Or:
51 [p:1 ] := imm;
52 [p:31] := val & 0x7FFFFFFF;
53 we can transform this into a single 4-byte store if the target supports it:
54 [p] := imm:(val & 0x7FFFFFFF) concatenated according to endianness.
31 55
32 The algorithm is applied to each basic block in three phases: 56 The algorithm is applied to each basic block in three phases:
33 57
34 1) Scan through the basic block recording constant assignments to 58 1) Scan through the basic block and record assignments to destinations
35 destinations that can be expressed as a store to memory of a certain size 59 that can be expressed as a store to memory of a certain size at a certain
36 at a certain bit offset. Record store chains to different bases in a 60 bit offset from base expressions we can handle. For bit-fields we also
37 hash_map (m_stores) and make sure to terminate such chains when appropriate 61 record the surrounding bit region, i.e. bits that could be stored in
38 (for example when when the stored values get used subsequently). 62 a read-modify-write operation when storing the bit-field. Record store
63 chains to different bases in a hash_map (m_stores) and make sure to
64 terminate such chains when appropriate (for example when when the stored
65 values get used subsequently).
39 These stores can be a result of structure element initializers, array stores 66 These stores can be a result of structure element initializers, array stores
40 etc. A store_immediate_info object is recorded for every such store. 67 etc. A store_immediate_info object is recorded for every such store.
41 Record as many such assignments to a single base as possible until a 68 Record as many such assignments to a single base as possible until a
42 statement that interferes with the store sequence is encountered. 69 statement that interferes with the store sequence is encountered.
43 70 Each store has up to 2 operands, which can be a either constant, a memory
44 2) Analyze the chain of stores recorded in phase 1) (i.e. the vector of 71 load or an SSA name, from which the value to be stored can be computed.
72 At most one of the operands can be a constant. The operands are recorded
73 in store_operand_info struct.
74
75 2) Analyze the chains of stores recorded in phase 1) (i.e. the vector of
45 store_immediate_info objects) and coalesce contiguous stores into 76 store_immediate_info objects) and coalesce contiguous stores into
46 merged_store_group objects. 77 merged_store_group objects. For bit-field stores, we don't need to
78 require the stores to be contiguous, just their surrounding bit regions
79 have to be contiguous. If the expression being stored is different
80 between adjacent stores, such as one store storing a constant and
81 following storing a value loaded from memory, or if the loaded memory
82 objects are not adjacent, a new merged_store_group is created as well.
47 83
48 For example, given the stores: 84 For example, given the stores:
49 [p ] := 0; 85 [p ] := 0;
50 [p + 1B] := 1; 86 [p + 1B] := 1;
51 [p + 3B] := 0; 87 [p + 3B] := 0;
60 to generate the sequence of wider stores that set the contiguous memory 96 to generate the sequence of wider stores that set the contiguous memory
61 regions to the sequence of bytes that correspond to it. This may emit 97 regions to the sequence of bytes that correspond to it. This may emit
62 multiple stores per store group to handle contiguous stores that are not 98 multiple stores per store group to handle contiguous stores that are not
63 of a size that is a power of 2. For example it can try to emit a 40-bit 99 of a size that is a power of 2. For example it can try to emit a 40-bit
64 store as a 32-bit store followed by an 8-bit store. 100 store as a 32-bit store followed by an 8-bit store.
65 We try to emit as wide stores as we can while respecting STRICT_ALIGNMENT or 101 We try to emit as wide stores as we can while respecting STRICT_ALIGNMENT
66 TARGET_SLOW_UNALIGNED_ACCESS rules. 102 or TARGET_SLOW_UNALIGNED_ACCESS settings.
67 103
68 Note on endianness and example: 104 Note on endianness and example:
69 Consider 2 contiguous 16-bit stores followed by 2 contiguous 8-bit stores: 105 Consider 2 contiguous 16-bit stores followed by 2 contiguous 8-bit stores:
70 [p ] := 0x1234; 106 [p ] := 0x1234;
71 [p + 2B] := 0x5678; 107 [p + 2B] := 0x5678;
118 #include "params.h" 154 #include "params.h"
119 #include "print-tree.h" 155 #include "print-tree.h"
120 #include "tree-hash-traits.h" 156 #include "tree-hash-traits.h"
121 #include "gimple-iterator.h" 157 #include "gimple-iterator.h"
122 #include "gimplify.h" 158 #include "gimplify.h"
159 #include "gimple-fold.h"
123 #include "stor-layout.h" 160 #include "stor-layout.h"
124 #include "timevar.h" 161 #include "timevar.h"
125 #include "tree-cfg.h" 162 #include "tree-cfg.h"
126 #include "tree-eh.h" 163 #include "tree-eh.h"
127 #include "target.h" 164 #include "target.h"
128 #include "gimplify-me.h" 165 #include "gimplify-me.h"
166 #include "rtl.h"
167 #include "expr.h" /* For get_bit_range. */
168 #include "optabs-tree.h"
129 #include "selftest.h" 169 #include "selftest.h"
130 170
131 /* The maximum size (in bits) of the stores this pass should generate. */ 171 /* The maximum size (in bits) of the stores this pass should generate. */
132 #define MAX_STORE_BITSIZE (BITS_PER_WORD) 172 #define MAX_STORE_BITSIZE (BITS_PER_WORD)
133 #define MAX_STORE_BYTES (MAX_STORE_BITSIZE / BITS_PER_UNIT) 173 #define MAX_STORE_BYTES (MAX_STORE_BITSIZE / BITS_PER_UNIT)
134 174
175 /* Limit to bound the number of aliasing checks for loads with the same
176 vuse as the corresponding store. */
177 #define MAX_STORE_ALIAS_CHECKS 64
178
135 namespace { 179 namespace {
180
181 struct bswap_stat
182 {
183 /* Number of hand-written 16-bit nop / bswaps found. */
184 int found_16bit;
185
186 /* Number of hand-written 32-bit nop / bswaps found. */
187 int found_32bit;
188
189 /* Number of hand-written 64-bit nop / bswaps found. */
190 int found_64bit;
191 } nop_stats, bswap_stats;
192
193 /* A symbolic number structure is used to detect byte permutation and selection
194 patterns of a source. To achieve that, its field N contains an artificial
195 number consisting of BITS_PER_MARKER sized markers tracking where does each
196 byte come from in the source:
197
198 0 - target byte has the value 0
199 FF - target byte has an unknown value (eg. due to sign extension)
200 1..size - marker value is the byte index in the source (0 for lsb).
201
202 To detect permutations on memory sources (arrays and structures), a symbolic
203 number is also associated:
204 - a base address BASE_ADDR and an OFFSET giving the address of the source;
205 - a range which gives the difference between the highest and lowest accessed
206 memory location to make such a symbolic number;
207 - the address SRC of the source element of lowest address as a convenience
208 to easily get BASE_ADDR + offset + lowest bytepos;
209 - number of expressions N_OPS bitwise ored together to represent
210 approximate cost of the computation.
211
212 Note 1: the range is different from size as size reflects the size of the
213 type of the current expression. For instance, for an array char a[],
214 (short) a[0] | (short) a[3] would have a size of 2 but a range of 4 while
215 (short) a[0] | ((short) a[0] << 1) would still have a size of 2 but this
216 time a range of 1.
217
218 Note 2: for non-memory sources, range holds the same value as size.
219
220 Note 3: SRC points to the SSA_NAME in case of non-memory source. */
221
222 struct symbolic_number {
223 uint64_t n;
224 tree type;
225 tree base_addr;
226 tree offset;
227 poly_int64_pod bytepos;
228 tree src;
229 tree alias_set;
230 tree vuse;
231 unsigned HOST_WIDE_INT range;
232 int n_ops;
233 };
234
235 #define BITS_PER_MARKER 8
236 #define MARKER_MASK ((1 << BITS_PER_MARKER) - 1)
237 #define MARKER_BYTE_UNKNOWN MARKER_MASK
238 #define HEAD_MARKER(n, size) \
239 ((n) & ((uint64_t) MARKER_MASK << (((size) - 1) * BITS_PER_MARKER)))
240
241 /* The number which the find_bswap_or_nop_1 result should match in
242 order to have a nop. The number is masked according to the size of
243 the symbolic number before using it. */
244 #define CMPNOP (sizeof (int64_t) < 8 ? 0 : \
245 (uint64_t)0x08070605 << 32 | 0x04030201)
246
247 /* The number which the find_bswap_or_nop_1 result should match in
248 order to have a byte swap. The number is masked according to the
249 size of the symbolic number before using it. */
250 #define CMPXCHG (sizeof (int64_t) < 8 ? 0 : \
251 (uint64_t)0x01020304 << 32 | 0x05060708)
252
253 /* Perform a SHIFT or ROTATE operation by COUNT bits on symbolic
254 number N. Return false if the requested operation is not permitted
255 on a symbolic number. */
256
257 inline bool
258 do_shift_rotate (enum tree_code code,
259 struct symbolic_number *n,
260 int count)
261 {
262 int i, size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
263 unsigned head_marker;
264
265 if (count % BITS_PER_UNIT != 0)
266 return false;
267 count = (count / BITS_PER_UNIT) * BITS_PER_MARKER;
268
269 /* Zero out the extra bits of N in order to avoid them being shifted
270 into the significant bits. */
271 if (size < 64 / BITS_PER_MARKER)
272 n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1;
273
274 switch (code)
275 {
276 case LSHIFT_EXPR:
277 n->n <<= count;
278 break;
279 case RSHIFT_EXPR:
280 head_marker = HEAD_MARKER (n->n, size);
281 n->n >>= count;
282 /* Arithmetic shift of signed type: result is dependent on the value. */
283 if (!TYPE_UNSIGNED (n->type) && head_marker)
284 for (i = 0; i < count / BITS_PER_MARKER; i++)
285 n->n |= (uint64_t) MARKER_BYTE_UNKNOWN
286 << ((size - 1 - i) * BITS_PER_MARKER);
287 break;
288 case LROTATE_EXPR:
289 n->n = (n->n << count) | (n->n >> ((size * BITS_PER_MARKER) - count));
290 break;
291 case RROTATE_EXPR:
292 n->n = (n->n >> count) | (n->n << ((size * BITS_PER_MARKER) - count));
293 break;
294 default:
295 return false;
296 }
297 /* Zero unused bits for size. */
298 if (size < 64 / BITS_PER_MARKER)
299 n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1;
300 return true;
301 }
302
303 /* Perform sanity checking for the symbolic number N and the gimple
304 statement STMT. */
305
306 inline bool
307 verify_symbolic_number_p (struct symbolic_number *n, gimple *stmt)
308 {
309 tree lhs_type;
310
311 lhs_type = gimple_expr_type (stmt);
312
313 if (TREE_CODE (lhs_type) != INTEGER_TYPE)
314 return false;
315
316 if (TYPE_PRECISION (lhs_type) != TYPE_PRECISION (n->type))
317 return false;
318
319 return true;
320 }
321
322 /* Initialize the symbolic number N for the bswap pass from the base element
323 SRC manipulated by the bitwise OR expression. */
324
325 bool
326 init_symbolic_number (struct symbolic_number *n, tree src)
327 {
328 int size;
329
330 if (! INTEGRAL_TYPE_P (TREE_TYPE (src)))
331 return false;
332
333 n->base_addr = n->offset = n->alias_set = n->vuse = NULL_TREE;
334 n->src = src;
335
336 /* Set up the symbolic number N by setting each byte to a value between 1 and
337 the byte size of rhs1. The highest order byte is set to n->size and the
338 lowest order byte to 1. */
339 n->type = TREE_TYPE (src);
340 size = TYPE_PRECISION (n->type);
341 if (size % BITS_PER_UNIT != 0)
342 return false;
343 size /= BITS_PER_UNIT;
344 if (size > 64 / BITS_PER_MARKER)
345 return false;
346 n->range = size;
347 n->n = CMPNOP;
348 n->n_ops = 1;
349
350 if (size < 64 / BITS_PER_MARKER)
351 n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1;
352
353 return true;
354 }
355
356 /* Check if STMT might be a byte swap or a nop from a memory source and returns
357 the answer. If so, REF is that memory source and the base of the memory area
358 accessed and the offset of the access from that base are recorded in N. */
359
360 bool
361 find_bswap_or_nop_load (gimple *stmt, tree ref, struct symbolic_number *n)
362 {
363 /* Leaf node is an array or component ref. Memorize its base and
364 offset from base to compare to other such leaf node. */
365 poly_int64 bitsize, bitpos, bytepos;
366 machine_mode mode;
367 int unsignedp, reversep, volatilep;
368 tree offset, base_addr;
369
370 /* Not prepared to handle PDP endian. */
371 if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN)
372 return false;
373
374 if (!gimple_assign_load_p (stmt) || gimple_has_volatile_ops (stmt))
375 return false;
376
377 base_addr = get_inner_reference (ref, &bitsize, &bitpos, &offset, &mode,
378 &unsignedp, &reversep, &volatilep);
379
380 if (TREE_CODE (base_addr) == TARGET_MEM_REF)
381 /* Do not rewrite TARGET_MEM_REF. */
382 return false;
383 else if (TREE_CODE (base_addr) == MEM_REF)
384 {
385 poly_offset_int bit_offset = 0;
386 tree off = TREE_OPERAND (base_addr, 1);
387
388 if (!integer_zerop (off))
389 {
390 poly_offset_int boff = mem_ref_offset (base_addr);
391 boff <<= LOG2_BITS_PER_UNIT;
392 bit_offset += boff;
393 }
394
395 base_addr = TREE_OPERAND (base_addr, 0);
396
397 /* Avoid returning a negative bitpos as this may wreak havoc later. */
398 if (maybe_lt (bit_offset, 0))
399 {
400 tree byte_offset = wide_int_to_tree
401 (sizetype, bits_to_bytes_round_down (bit_offset));
402 bit_offset = num_trailing_bits (bit_offset);
403 if (offset)
404 offset = size_binop (PLUS_EXPR, offset, byte_offset);
405 else
406 offset = byte_offset;
407 }
408
409 bitpos += bit_offset.force_shwi ();
410 }
411 else
412 base_addr = build_fold_addr_expr (base_addr);
413
414 if (!multiple_p (bitpos, BITS_PER_UNIT, &bytepos))
415 return false;
416 if (!multiple_p (bitsize, BITS_PER_UNIT))
417 return false;
418 if (reversep)
419 return false;
420
421 if (!init_symbolic_number (n, ref))
422 return false;
423 n->base_addr = base_addr;
424 n->offset = offset;
425 n->bytepos = bytepos;
426 n->alias_set = reference_alias_ptr_type (ref);
427 n->vuse = gimple_vuse (stmt);
428 return true;
429 }
430
431 /* Compute the symbolic number N representing the result of a bitwise OR on 2
432 symbolic number N1 and N2 whose source statements are respectively
433 SOURCE_STMT1 and SOURCE_STMT2. */
434
435 gimple *
436 perform_symbolic_merge (gimple *source_stmt1, struct symbolic_number *n1,
437 gimple *source_stmt2, struct symbolic_number *n2,
438 struct symbolic_number *n)
439 {
440 int i, size;
441 uint64_t mask;
442 gimple *source_stmt;
443 struct symbolic_number *n_start;
444
445 tree rhs1 = gimple_assign_rhs1 (source_stmt1);
446 if (TREE_CODE (rhs1) == BIT_FIELD_REF
447 && TREE_CODE (TREE_OPERAND (rhs1, 0)) == SSA_NAME)
448 rhs1 = TREE_OPERAND (rhs1, 0);
449 tree rhs2 = gimple_assign_rhs1 (source_stmt2);
450 if (TREE_CODE (rhs2) == BIT_FIELD_REF
451 && TREE_CODE (TREE_OPERAND (rhs2, 0)) == SSA_NAME)
452 rhs2 = TREE_OPERAND (rhs2, 0);
453
454 /* Sources are different, cancel bswap if they are not memory location with
455 the same base (array, structure, ...). */
456 if (rhs1 != rhs2)
457 {
458 uint64_t inc;
459 HOST_WIDE_INT start1, start2, start_sub, end_sub, end1, end2, end;
460 struct symbolic_number *toinc_n_ptr, *n_end;
461 basic_block bb1, bb2;
462
463 if (!n1->base_addr || !n2->base_addr
464 || !operand_equal_p (n1->base_addr, n2->base_addr, 0))
465 return NULL;
466
467 if (!n1->offset != !n2->offset
468 || (n1->offset && !operand_equal_p (n1->offset, n2->offset, 0)))
469 return NULL;
470
471 start1 = 0;
472 if (!(n2->bytepos - n1->bytepos).is_constant (&start2))
473 return NULL;
474
475 if (start1 < start2)
476 {
477 n_start = n1;
478 start_sub = start2 - start1;
479 }
480 else
481 {
482 n_start = n2;
483 start_sub = start1 - start2;
484 }
485
486 bb1 = gimple_bb (source_stmt1);
487 bb2 = gimple_bb (source_stmt2);
488 if (dominated_by_p (CDI_DOMINATORS, bb1, bb2))
489 source_stmt = source_stmt1;
490 else
491 source_stmt = source_stmt2;
492
493 /* Find the highest address at which a load is performed and
494 compute related info. */
495 end1 = start1 + (n1->range - 1);
496 end2 = start2 + (n2->range - 1);
497 if (end1 < end2)
498 {
499 end = end2;
500 end_sub = end2 - end1;
501 }
502 else
503 {
504 end = end1;
505 end_sub = end1 - end2;
506 }
507 n_end = (end2 > end1) ? n2 : n1;
508
509 /* Find symbolic number whose lsb is the most significant. */
510 if (BYTES_BIG_ENDIAN)
511 toinc_n_ptr = (n_end == n1) ? n2 : n1;
512 else
513 toinc_n_ptr = (n_start == n1) ? n2 : n1;
514
515 n->range = end - MIN (start1, start2) + 1;
516
517 /* Check that the range of memory covered can be represented by
518 a symbolic number. */
519 if (n->range > 64 / BITS_PER_MARKER)
520 return NULL;
521
522 /* Reinterpret byte marks in symbolic number holding the value of
523 bigger weight according to target endianness. */
524 inc = BYTES_BIG_ENDIAN ? end_sub : start_sub;
525 size = TYPE_PRECISION (n1->type) / BITS_PER_UNIT;
526 for (i = 0; i < size; i++, inc <<= BITS_PER_MARKER)
527 {
528 unsigned marker
529 = (toinc_n_ptr->n >> (i * BITS_PER_MARKER)) & MARKER_MASK;
530 if (marker && marker != MARKER_BYTE_UNKNOWN)
531 toinc_n_ptr->n += inc;
532 }
533 }
534 else
535 {
536 n->range = n1->range;
537 n_start = n1;
538 source_stmt = source_stmt1;
539 }
540
541 if (!n1->alias_set
542 || alias_ptr_types_compatible_p (n1->alias_set, n2->alias_set))
543 n->alias_set = n1->alias_set;
544 else
545 n->alias_set = ptr_type_node;
546 n->vuse = n_start->vuse;
547 n->base_addr = n_start->base_addr;
548 n->offset = n_start->offset;
549 n->src = n_start->src;
550 n->bytepos = n_start->bytepos;
551 n->type = n_start->type;
552 size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
553
554 for (i = 0, mask = MARKER_MASK; i < size; i++, mask <<= BITS_PER_MARKER)
555 {
556 uint64_t masked1, masked2;
557
558 masked1 = n1->n & mask;
559 masked2 = n2->n & mask;
560 if (masked1 && masked2 && masked1 != masked2)
561 return NULL;
562 }
563 n->n = n1->n | n2->n;
564 n->n_ops = n1->n_ops + n2->n_ops;
565
566 return source_stmt;
567 }
568
569 /* find_bswap_or_nop_1 invokes itself recursively with N and tries to perform
570 the operation given by the rhs of STMT on the result. If the operation
571 could successfully be executed the function returns a gimple stmt whose
572 rhs's first tree is the expression of the source operand and NULL
573 otherwise. */
574
575 gimple *
576 find_bswap_or_nop_1 (gimple *stmt, struct symbolic_number *n, int limit)
577 {
578 enum tree_code code;
579 tree rhs1, rhs2 = NULL;
580 gimple *rhs1_stmt, *rhs2_stmt, *source_stmt1;
581 enum gimple_rhs_class rhs_class;
582
583 if (!limit || !is_gimple_assign (stmt))
584 return NULL;
585
586 rhs1 = gimple_assign_rhs1 (stmt);
587
588 if (find_bswap_or_nop_load (stmt, rhs1, n))
589 return stmt;
590
591 /* Handle BIT_FIELD_REF. */
592 if (TREE_CODE (rhs1) == BIT_FIELD_REF
593 && TREE_CODE (TREE_OPERAND (rhs1, 0)) == SSA_NAME)
594 {
595 unsigned HOST_WIDE_INT bitsize = tree_to_uhwi (TREE_OPERAND (rhs1, 1));
596 unsigned HOST_WIDE_INT bitpos = tree_to_uhwi (TREE_OPERAND (rhs1, 2));
597 if (bitpos % BITS_PER_UNIT == 0
598 && bitsize % BITS_PER_UNIT == 0
599 && init_symbolic_number (n, TREE_OPERAND (rhs1, 0)))
600 {
601 /* Handle big-endian bit numbering in BIT_FIELD_REF. */
602 if (BYTES_BIG_ENDIAN)
603 bitpos = TYPE_PRECISION (n->type) - bitpos - bitsize;
604
605 /* Shift. */
606 if (!do_shift_rotate (RSHIFT_EXPR, n, bitpos))
607 return NULL;
608
609 /* Mask. */
610 uint64_t mask = 0;
611 uint64_t tmp = (1 << BITS_PER_UNIT) - 1;
612 for (unsigned i = 0; i < bitsize / BITS_PER_UNIT;
613 i++, tmp <<= BITS_PER_UNIT)
614 mask |= (uint64_t) MARKER_MASK << (i * BITS_PER_MARKER);
615 n->n &= mask;
616
617 /* Convert. */
618 n->type = TREE_TYPE (rhs1);
619 if (!n->base_addr)
620 n->range = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
621
622 return verify_symbolic_number_p (n, stmt) ? stmt : NULL;
623 }
624
625 return NULL;
626 }
627
628 if (TREE_CODE (rhs1) != SSA_NAME)
629 return NULL;
630
631 code = gimple_assign_rhs_code (stmt);
632 rhs_class = gimple_assign_rhs_class (stmt);
633 rhs1_stmt = SSA_NAME_DEF_STMT (rhs1);
634
635 if (rhs_class == GIMPLE_BINARY_RHS)
636 rhs2 = gimple_assign_rhs2 (stmt);
637
638 /* Handle unary rhs and binary rhs with integer constants as second
639 operand. */
640
641 if (rhs_class == GIMPLE_UNARY_RHS
642 || (rhs_class == GIMPLE_BINARY_RHS
643 && TREE_CODE (rhs2) == INTEGER_CST))
644 {
645 if (code != BIT_AND_EXPR
646 && code != LSHIFT_EXPR
647 && code != RSHIFT_EXPR
648 && code != LROTATE_EXPR
649 && code != RROTATE_EXPR
650 && !CONVERT_EXPR_CODE_P (code))
651 return NULL;
652
653 source_stmt1 = find_bswap_or_nop_1 (rhs1_stmt, n, limit - 1);
654
655 /* If find_bswap_or_nop_1 returned NULL, STMT is a leaf node and
656 we have to initialize the symbolic number. */
657 if (!source_stmt1)
658 {
659 if (gimple_assign_load_p (stmt)
660 || !init_symbolic_number (n, rhs1))
661 return NULL;
662 source_stmt1 = stmt;
663 }
664
665 switch (code)
666 {
667 case BIT_AND_EXPR:
668 {
669 int i, size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
670 uint64_t val = int_cst_value (rhs2), mask = 0;
671 uint64_t tmp = (1 << BITS_PER_UNIT) - 1;
672
673 /* Only constants masking full bytes are allowed. */
674 for (i = 0; i < size; i++, tmp <<= BITS_PER_UNIT)
675 if ((val & tmp) != 0 && (val & tmp) != tmp)
676 return NULL;
677 else if (val & tmp)
678 mask |= (uint64_t) MARKER_MASK << (i * BITS_PER_MARKER);
679
680 n->n &= mask;
681 }
682 break;
683 case LSHIFT_EXPR:
684 case RSHIFT_EXPR:
685 case LROTATE_EXPR:
686 case RROTATE_EXPR:
687 if (!do_shift_rotate (code, n, (int) TREE_INT_CST_LOW (rhs2)))
688 return NULL;
689 break;
690 CASE_CONVERT:
691 {
692 int i, type_size, old_type_size;
693 tree type;
694
695 type = gimple_expr_type (stmt);
696 type_size = TYPE_PRECISION (type);
697 if (type_size % BITS_PER_UNIT != 0)
698 return NULL;
699 type_size /= BITS_PER_UNIT;
700 if (type_size > 64 / BITS_PER_MARKER)
701 return NULL;
702
703 /* Sign extension: result is dependent on the value. */
704 old_type_size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
705 if (!TYPE_UNSIGNED (n->type) && type_size > old_type_size
706 && HEAD_MARKER (n->n, old_type_size))
707 for (i = 0; i < type_size - old_type_size; i++)
708 n->n |= (uint64_t) MARKER_BYTE_UNKNOWN
709 << ((type_size - 1 - i) * BITS_PER_MARKER);
710
711 if (type_size < 64 / BITS_PER_MARKER)
712 {
713 /* If STMT casts to a smaller type mask out the bits not
714 belonging to the target type. */
715 n->n &= ((uint64_t) 1 << (type_size * BITS_PER_MARKER)) - 1;
716 }
717 n->type = type;
718 if (!n->base_addr)
719 n->range = type_size;
720 }
721 break;
722 default:
723 return NULL;
724 };
725 return verify_symbolic_number_p (n, stmt) ? source_stmt1 : NULL;
726 }
727
728 /* Handle binary rhs. */
729
730 if (rhs_class == GIMPLE_BINARY_RHS)
731 {
732 struct symbolic_number n1, n2;
733 gimple *source_stmt, *source_stmt2;
734
735 if (code != BIT_IOR_EXPR)
736 return NULL;
737
738 if (TREE_CODE (rhs2) != SSA_NAME)
739 return NULL;
740
741 rhs2_stmt = SSA_NAME_DEF_STMT (rhs2);
742
743 switch (code)
744 {
745 case BIT_IOR_EXPR:
746 source_stmt1 = find_bswap_or_nop_1 (rhs1_stmt, &n1, limit - 1);
747
748 if (!source_stmt1)
749 return NULL;
750
751 source_stmt2 = find_bswap_or_nop_1 (rhs2_stmt, &n2, limit - 1);
752
753 if (!source_stmt2)
754 return NULL;
755
756 if (TYPE_PRECISION (n1.type) != TYPE_PRECISION (n2.type))
757 return NULL;
758
759 if (n1.vuse != n2.vuse)
760 return NULL;
761
762 source_stmt
763 = perform_symbolic_merge (source_stmt1, &n1, source_stmt2, &n2, n);
764
765 if (!source_stmt)
766 return NULL;
767
768 if (!verify_symbolic_number_p (n, stmt))
769 return NULL;
770
771 break;
772 default:
773 return NULL;
774 }
775 return source_stmt;
776 }
777 return NULL;
778 }
779
780 /* Helper for find_bswap_or_nop and try_coalesce_bswap to compute
781 *CMPXCHG, *CMPNOP and adjust *N. */
782
783 void
784 find_bswap_or_nop_finalize (struct symbolic_number *n, uint64_t *cmpxchg,
785 uint64_t *cmpnop)
786 {
787 unsigned rsize;
788 uint64_t tmpn, mask;
789
790 /* The number which the find_bswap_or_nop_1 result should match in order
791 to have a full byte swap. The number is shifted to the right
792 according to the size of the symbolic number before using it. */
793 *cmpxchg = CMPXCHG;
794 *cmpnop = CMPNOP;
795
796 /* Find real size of result (highest non-zero byte). */
797 if (n->base_addr)
798 for (tmpn = n->n, rsize = 0; tmpn; tmpn >>= BITS_PER_MARKER, rsize++);
799 else
800 rsize = n->range;
801
802 /* Zero out the bits corresponding to untouched bytes in original gimple
803 expression. */
804 if (n->range < (int) sizeof (int64_t))
805 {
806 mask = ((uint64_t) 1 << (n->range * BITS_PER_MARKER)) - 1;
807 *cmpxchg >>= (64 / BITS_PER_MARKER - n->range) * BITS_PER_MARKER;
808 *cmpnop &= mask;
809 }
810
811 /* Zero out the bits corresponding to unused bytes in the result of the
812 gimple expression. */
813 if (rsize < n->range)
814 {
815 if (BYTES_BIG_ENDIAN)
816 {
817 mask = ((uint64_t) 1 << (rsize * BITS_PER_MARKER)) - 1;
818 *cmpxchg &= mask;
819 *cmpnop >>= (n->range - rsize) * BITS_PER_MARKER;
820 }
821 else
822 {
823 mask = ((uint64_t) 1 << (rsize * BITS_PER_MARKER)) - 1;
824 *cmpxchg >>= (n->range - rsize) * BITS_PER_MARKER;
825 *cmpnop &= mask;
826 }
827 n->range = rsize;
828 }
829
830 n->range *= BITS_PER_UNIT;
831 }
832
833 /* Check if STMT completes a bswap implementation or a read in a given
834 endianness consisting of ORs, SHIFTs and ANDs and sets *BSWAP
835 accordingly. It also sets N to represent the kind of operations
836 performed: size of the resulting expression and whether it works on
837 a memory source, and if so alias-set and vuse. At last, the
838 function returns a stmt whose rhs's first tree is the source
839 expression. */
840
841 gimple *
842 find_bswap_or_nop (gimple *stmt, struct symbolic_number *n, bool *bswap)
843 {
844 /* The last parameter determines the depth search limit. It usually
845 correlates directly to the number n of bytes to be touched. We
846 increase that number by log2(n) + 1 here in order to also
847 cover signed -> unsigned conversions of the src operand as can be seen
848 in libgcc, and for initial shift/and operation of the src operand. */
849 int limit = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (gimple_expr_type (stmt)));
850 limit += 1 + (int) ceil_log2 ((unsigned HOST_WIDE_INT) limit);
851 gimple *ins_stmt = find_bswap_or_nop_1 (stmt, n, limit);
852
853 if (!ins_stmt)
854 return NULL;
855
856 uint64_t cmpxchg, cmpnop;
857 find_bswap_or_nop_finalize (n, &cmpxchg, &cmpnop);
858
859 /* A complete byte swap should make the symbolic number to start with
860 the largest digit in the highest order byte. Unchanged symbolic
861 number indicates a read with same endianness as target architecture. */
862 if (n->n == cmpnop)
863 *bswap = false;
864 else if (n->n == cmpxchg)
865 *bswap = true;
866 else
867 return NULL;
868
869 /* Useless bit manipulation performed by code. */
870 if (!n->base_addr && n->n == cmpnop && n->n_ops == 1)
871 return NULL;
872
873 return ins_stmt;
874 }
875
876 const pass_data pass_data_optimize_bswap =
877 {
878 GIMPLE_PASS, /* type */
879 "bswap", /* name */
880 OPTGROUP_NONE, /* optinfo_flags */
881 TV_NONE, /* tv_id */
882 PROP_ssa, /* properties_required */
883 0, /* properties_provided */
884 0, /* properties_destroyed */
885 0, /* todo_flags_start */
886 0, /* todo_flags_finish */
887 };
888
889 class pass_optimize_bswap : public gimple_opt_pass
890 {
891 public:
892 pass_optimize_bswap (gcc::context *ctxt)
893 : gimple_opt_pass (pass_data_optimize_bswap, ctxt)
894 {}
895
896 /* opt_pass methods: */
897 virtual bool gate (function *)
898 {
899 return flag_expensive_optimizations && optimize && BITS_PER_UNIT == 8;
900 }
901
902 virtual unsigned int execute (function *);
903
904 }; // class pass_optimize_bswap
905
906 /* Perform the bswap optimization: replace the expression computed in the rhs
907 of gsi_stmt (GSI) (or if NULL add instead of replace) by an equivalent
908 bswap, load or load + bswap expression.
909 Which of these alternatives replace the rhs is given by N->base_addr (non
910 null if a load is needed) and BSWAP. The type, VUSE and set-alias of the
911 load to perform are also given in N while the builtin bswap invoke is given
912 in FNDEL. Finally, if a load is involved, INS_STMT refers to one of the
913 load statements involved to construct the rhs in gsi_stmt (GSI) and
914 N->range gives the size of the rhs expression for maintaining some
915 statistics.
916
917 Note that if the replacement involve a load and if gsi_stmt (GSI) is
918 non-NULL, that stmt is moved just after INS_STMT to do the load with the
919 same VUSE which can lead to gsi_stmt (GSI) changing of basic block. */
920
921 tree
922 bswap_replace (gimple_stmt_iterator gsi, gimple *ins_stmt, tree fndecl,
923 tree bswap_type, tree load_type, struct symbolic_number *n,
924 bool bswap)
925 {
926 tree src, tmp, tgt = NULL_TREE;
927 gimple *bswap_stmt;
928
929 gimple *cur_stmt = gsi_stmt (gsi);
930 src = n->src;
931 if (cur_stmt)
932 tgt = gimple_assign_lhs (cur_stmt);
933
934 /* Need to load the value from memory first. */
935 if (n->base_addr)
936 {
937 gimple_stmt_iterator gsi_ins = gsi;
938 if (ins_stmt)
939 gsi_ins = gsi_for_stmt (ins_stmt);
940 tree addr_expr, addr_tmp, val_expr, val_tmp;
941 tree load_offset_ptr, aligned_load_type;
942 gimple *load_stmt;
943 unsigned align = get_object_alignment (src);
944 poly_int64 load_offset = 0;
945
946 if (cur_stmt)
947 {
948 basic_block ins_bb = gimple_bb (ins_stmt);
949 basic_block cur_bb = gimple_bb (cur_stmt);
950 if (!dominated_by_p (CDI_DOMINATORS, cur_bb, ins_bb))
951 return NULL_TREE;
952
953 /* Move cur_stmt just before one of the load of the original
954 to ensure it has the same VUSE. See PR61517 for what could
955 go wrong. */
956 if (gimple_bb (cur_stmt) != gimple_bb (ins_stmt))
957 reset_flow_sensitive_info (gimple_assign_lhs (cur_stmt));
958 gsi_move_before (&gsi, &gsi_ins);
959 gsi = gsi_for_stmt (cur_stmt);
960 }
961 else
962 gsi = gsi_ins;
963
964 /* Compute address to load from and cast according to the size
965 of the load. */
966 addr_expr = build_fold_addr_expr (src);
967 if (is_gimple_mem_ref_addr (addr_expr))
968 addr_tmp = unshare_expr (addr_expr);
969 else
970 {
971 addr_tmp = unshare_expr (n->base_addr);
972 if (!is_gimple_mem_ref_addr (addr_tmp))
973 addr_tmp = force_gimple_operand_gsi_1 (&gsi, addr_tmp,
974 is_gimple_mem_ref_addr,
975 NULL_TREE, true,
976 GSI_SAME_STMT);
977 load_offset = n->bytepos;
978 if (n->offset)
979 {
980 tree off
981 = force_gimple_operand_gsi (&gsi, unshare_expr (n->offset),
982 true, NULL_TREE, true,
983 GSI_SAME_STMT);
984 gimple *stmt
985 = gimple_build_assign (make_ssa_name (TREE_TYPE (addr_tmp)),
986 POINTER_PLUS_EXPR, addr_tmp, off);
987 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
988 addr_tmp = gimple_assign_lhs (stmt);
989 }
990 }
991
992 /* Perform the load. */
993 aligned_load_type = load_type;
994 if (align < TYPE_ALIGN (load_type))
995 aligned_load_type = build_aligned_type (load_type, align);
996 load_offset_ptr = build_int_cst (n->alias_set, load_offset);
997 val_expr = fold_build2 (MEM_REF, aligned_load_type, addr_tmp,
998 load_offset_ptr);
999
1000 if (!bswap)
1001 {
1002 if (n->range == 16)
1003 nop_stats.found_16bit++;
1004 else if (n->range == 32)
1005 nop_stats.found_32bit++;
1006 else
1007 {
1008 gcc_assert (n->range == 64);
1009 nop_stats.found_64bit++;
1010 }
1011
1012 /* Convert the result of load if necessary. */
1013 if (tgt && !useless_type_conversion_p (TREE_TYPE (tgt), load_type))
1014 {
1015 val_tmp = make_temp_ssa_name (aligned_load_type, NULL,
1016 "load_dst");
1017 load_stmt = gimple_build_assign (val_tmp, val_expr);
1018 gimple_set_vuse (load_stmt, n->vuse);
1019 gsi_insert_before (&gsi, load_stmt, GSI_SAME_STMT);
1020 gimple_assign_set_rhs_with_ops (&gsi, NOP_EXPR, val_tmp);
1021 update_stmt (cur_stmt);
1022 }
1023 else if (cur_stmt)
1024 {
1025 gimple_assign_set_rhs_with_ops (&gsi, MEM_REF, val_expr);
1026 gimple_set_vuse (cur_stmt, n->vuse);
1027 update_stmt (cur_stmt);
1028 }
1029 else
1030 {
1031 tgt = make_ssa_name (load_type);
1032 cur_stmt = gimple_build_assign (tgt, MEM_REF, val_expr);
1033 gimple_set_vuse (cur_stmt, n->vuse);
1034 gsi_insert_before (&gsi, cur_stmt, GSI_SAME_STMT);
1035 }
1036
1037 if (dump_file)
1038 {
1039 fprintf (dump_file,
1040 "%d bit load in target endianness found at: ",
1041 (int) n->range);
1042 print_gimple_stmt (dump_file, cur_stmt, 0);
1043 }
1044 return tgt;
1045 }
1046 else
1047 {
1048 val_tmp = make_temp_ssa_name (aligned_load_type, NULL, "load_dst");
1049 load_stmt = gimple_build_assign (val_tmp, val_expr);
1050 gimple_set_vuse (load_stmt, n->vuse);
1051 gsi_insert_before (&gsi, load_stmt, GSI_SAME_STMT);
1052 }
1053 src = val_tmp;
1054 }
1055 else if (!bswap)
1056 {
1057 gimple *g = NULL;
1058 if (tgt && !useless_type_conversion_p (TREE_TYPE (tgt), TREE_TYPE (src)))
1059 {
1060 if (!is_gimple_val (src))
1061 return NULL_TREE;
1062 g = gimple_build_assign (tgt, NOP_EXPR, src);
1063 }
1064 else if (cur_stmt)
1065 g = gimple_build_assign (tgt, src);
1066 else
1067 tgt = src;
1068 if (n->range == 16)
1069 nop_stats.found_16bit++;
1070 else if (n->range == 32)
1071 nop_stats.found_32bit++;
1072 else
1073 {
1074 gcc_assert (n->range == 64);
1075 nop_stats.found_64bit++;
1076 }
1077 if (dump_file)
1078 {
1079 fprintf (dump_file,
1080 "%d bit reshuffle in target endianness found at: ",
1081 (int) n->range);
1082 if (cur_stmt)
1083 print_gimple_stmt (dump_file, cur_stmt, 0);
1084 else
1085 {
1086 print_generic_expr (dump_file, tgt, TDF_NONE);
1087 fprintf (dump_file, "\n");
1088 }
1089 }
1090 if (cur_stmt)
1091 gsi_replace (&gsi, g, true);
1092 return tgt;
1093 }
1094 else if (TREE_CODE (src) == BIT_FIELD_REF)
1095 src = TREE_OPERAND (src, 0);
1096
1097 if (n->range == 16)
1098 bswap_stats.found_16bit++;
1099 else if (n->range == 32)
1100 bswap_stats.found_32bit++;
1101 else
1102 {
1103 gcc_assert (n->range == 64);
1104 bswap_stats.found_64bit++;
1105 }
1106
1107 tmp = src;
1108
1109 /* Convert the src expression if necessary. */
1110 if (!useless_type_conversion_p (TREE_TYPE (tmp), bswap_type))
1111 {
1112 gimple *convert_stmt;
1113
1114 tmp = make_temp_ssa_name (bswap_type, NULL, "bswapsrc");
1115 convert_stmt = gimple_build_assign (tmp, NOP_EXPR, src);
1116 gsi_insert_before (&gsi, convert_stmt, GSI_SAME_STMT);
1117 }
1118
1119 /* Canonical form for 16 bit bswap is a rotate expression. Only 16bit values
1120 are considered as rotation of 2N bit values by N bits is generally not
1121 equivalent to a bswap. Consider for instance 0x01020304 r>> 16 which
1122 gives 0x03040102 while a bswap for that value is 0x04030201. */
1123 if (bswap && n->range == 16)
1124 {
1125 tree count = build_int_cst (NULL, BITS_PER_UNIT);
1126 src = fold_build2 (LROTATE_EXPR, bswap_type, tmp, count);
1127 bswap_stmt = gimple_build_assign (NULL, src);
1128 }
1129 else
1130 bswap_stmt = gimple_build_call (fndecl, 1, tmp);
1131
1132 if (tgt == NULL_TREE)
1133 tgt = make_ssa_name (bswap_type);
1134 tmp = tgt;
1135
1136 /* Convert the result if necessary. */
1137 if (!useless_type_conversion_p (TREE_TYPE (tgt), bswap_type))
1138 {
1139 gimple *convert_stmt;
1140
1141 tmp = make_temp_ssa_name (bswap_type, NULL, "bswapdst");
1142 convert_stmt = gimple_build_assign (tgt, NOP_EXPR, tmp);
1143 gsi_insert_after (&gsi, convert_stmt, GSI_SAME_STMT);
1144 }
1145
1146 gimple_set_lhs (bswap_stmt, tmp);
1147
1148 if (dump_file)
1149 {
1150 fprintf (dump_file, "%d bit bswap implementation found at: ",
1151 (int) n->range);
1152 if (cur_stmt)
1153 print_gimple_stmt (dump_file, cur_stmt, 0);
1154 else
1155 {
1156 print_generic_expr (dump_file, tgt, TDF_NONE);
1157 fprintf (dump_file, "\n");
1158 }
1159 }
1160
1161 if (cur_stmt)
1162 {
1163 gsi_insert_after (&gsi, bswap_stmt, GSI_SAME_STMT);
1164 gsi_remove (&gsi, true);
1165 }
1166 else
1167 gsi_insert_before (&gsi, bswap_stmt, GSI_SAME_STMT);
1168 return tgt;
1169 }
1170
1171 /* Find manual byte swap implementations as well as load in a given
1172 endianness. Byte swaps are turned into a bswap builtin invokation
1173 while endian loads are converted to bswap builtin invokation or
1174 simple load according to the target endianness. */
1175
1176 unsigned int
1177 pass_optimize_bswap::execute (function *fun)
1178 {
1179 basic_block bb;
1180 bool bswap32_p, bswap64_p;
1181 bool changed = false;
1182 tree bswap32_type = NULL_TREE, bswap64_type = NULL_TREE;
1183
1184 bswap32_p = (builtin_decl_explicit_p (BUILT_IN_BSWAP32)
1185 && optab_handler (bswap_optab, SImode) != CODE_FOR_nothing);
1186 bswap64_p = (builtin_decl_explicit_p (BUILT_IN_BSWAP64)
1187 && (optab_handler (bswap_optab, DImode) != CODE_FOR_nothing
1188 || (bswap32_p && word_mode == SImode)));
1189
1190 /* Determine the argument type of the builtins. The code later on
1191 assumes that the return and argument type are the same. */
1192 if (bswap32_p)
1193 {
1194 tree fndecl = builtin_decl_explicit (BUILT_IN_BSWAP32);
1195 bswap32_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
1196 }
1197
1198 if (bswap64_p)
1199 {
1200 tree fndecl = builtin_decl_explicit (BUILT_IN_BSWAP64);
1201 bswap64_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
1202 }
1203
1204 memset (&nop_stats, 0, sizeof (nop_stats));
1205 memset (&bswap_stats, 0, sizeof (bswap_stats));
1206 calculate_dominance_info (CDI_DOMINATORS);
1207
1208 FOR_EACH_BB_FN (bb, fun)
1209 {
1210 gimple_stmt_iterator gsi;
1211
1212 /* We do a reverse scan for bswap patterns to make sure we get the
1213 widest match. As bswap pattern matching doesn't handle previously
1214 inserted smaller bswap replacements as sub-patterns, the wider
1215 variant wouldn't be detected. */
1216 for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi);)
1217 {
1218 gimple *ins_stmt, *cur_stmt = gsi_stmt (gsi);
1219 tree fndecl = NULL_TREE, bswap_type = NULL_TREE, load_type;
1220 enum tree_code code;
1221 struct symbolic_number n;
1222 bool bswap;
1223
1224 /* This gsi_prev (&gsi) is not part of the for loop because cur_stmt
1225 might be moved to a different basic block by bswap_replace and gsi
1226 must not points to it if that's the case. Moving the gsi_prev
1227 there make sure that gsi points to the statement previous to
1228 cur_stmt while still making sure that all statements are
1229 considered in this basic block. */
1230 gsi_prev (&gsi);
1231
1232 if (!is_gimple_assign (cur_stmt))
1233 continue;
1234
1235 code = gimple_assign_rhs_code (cur_stmt);
1236 switch (code)
1237 {
1238 case LROTATE_EXPR:
1239 case RROTATE_EXPR:
1240 if (!tree_fits_uhwi_p (gimple_assign_rhs2 (cur_stmt))
1241 || tree_to_uhwi (gimple_assign_rhs2 (cur_stmt))
1242 % BITS_PER_UNIT)
1243 continue;
1244 /* Fall through. */
1245 case BIT_IOR_EXPR:
1246 break;
1247 default:
1248 continue;
1249 }
1250
1251 ins_stmt = find_bswap_or_nop (cur_stmt, &n, &bswap);
1252
1253 if (!ins_stmt)
1254 continue;
1255
1256 switch (n.range)
1257 {
1258 case 16:
1259 /* Already in canonical form, nothing to do. */
1260 if (code == LROTATE_EXPR || code == RROTATE_EXPR)
1261 continue;
1262 load_type = bswap_type = uint16_type_node;
1263 break;
1264 case 32:
1265 load_type = uint32_type_node;
1266 if (bswap32_p)
1267 {
1268 fndecl = builtin_decl_explicit (BUILT_IN_BSWAP32);
1269 bswap_type = bswap32_type;
1270 }
1271 break;
1272 case 64:
1273 load_type = uint64_type_node;
1274 if (bswap64_p)
1275 {
1276 fndecl = builtin_decl_explicit (BUILT_IN_BSWAP64);
1277 bswap_type = bswap64_type;
1278 }
1279 break;
1280 default:
1281 continue;
1282 }
1283
1284 if (bswap && !fndecl && n.range != 16)
1285 continue;
1286
1287 if (bswap_replace (gsi_for_stmt (cur_stmt), ins_stmt, fndecl,
1288 bswap_type, load_type, &n, bswap))
1289 changed = true;
1290 }
1291 }
1292
1293 statistics_counter_event (fun, "16-bit nop implementations found",
1294 nop_stats.found_16bit);
1295 statistics_counter_event (fun, "32-bit nop implementations found",
1296 nop_stats.found_32bit);
1297 statistics_counter_event (fun, "64-bit nop implementations found",
1298 nop_stats.found_64bit);
1299 statistics_counter_event (fun, "16-bit bswap implementations found",
1300 bswap_stats.found_16bit);
1301 statistics_counter_event (fun, "32-bit bswap implementations found",
1302 bswap_stats.found_32bit);
1303 statistics_counter_event (fun, "64-bit bswap implementations found",
1304 bswap_stats.found_64bit);
1305
1306 return (changed ? TODO_update_ssa : 0);
1307 }
1308
1309 } // anon namespace
1310
1311 gimple_opt_pass *
1312 make_pass_optimize_bswap (gcc::context *ctxt)
1313 {
1314 return new pass_optimize_bswap (ctxt);
1315 }
1316
1317 namespace {
1318
1319 /* Struct recording one operand for the store, which is either a constant,
1320 then VAL represents the constant and all the other fields are zero, or
1321 a memory load, then VAL represents the reference, BASE_ADDR is non-NULL
1322 and the other fields also reflect the memory load, or an SSA name, then
1323 VAL represents the SSA name and all the other fields are zero, */
1324
1325 struct store_operand_info
1326 {
1327 tree val;
1328 tree base_addr;
1329 poly_uint64 bitsize;
1330 poly_uint64 bitpos;
1331 poly_uint64 bitregion_start;
1332 poly_uint64 bitregion_end;
1333 gimple *stmt;
1334 bool bit_not_p;
1335 store_operand_info ();
1336 };
1337
1338 store_operand_info::store_operand_info ()
1339 : val (NULL_TREE), base_addr (NULL_TREE), bitsize (0), bitpos (0),
1340 bitregion_start (0), bitregion_end (0), stmt (NULL), bit_not_p (false)
1341 {
1342 }
136 1343
137 /* Struct recording the information about a single store of an immediate 1344 /* Struct recording the information about a single store of an immediate
138 to memory. These are created in the first phase and coalesced into 1345 to memory. These are created in the first phase and coalesced into
139 merged_store_group objects in the second phase. */ 1346 merged_store_group objects in the second phase. */
140 1347
141 struct store_immediate_info 1348 struct store_immediate_info
142 { 1349 {
143 unsigned HOST_WIDE_INT bitsize; 1350 unsigned HOST_WIDE_INT bitsize;
144 unsigned HOST_WIDE_INT bitpos; 1351 unsigned HOST_WIDE_INT bitpos;
1352 unsigned HOST_WIDE_INT bitregion_start;
1353 /* This is one past the last bit of the bit region. */
1354 unsigned HOST_WIDE_INT bitregion_end;
145 gimple *stmt; 1355 gimple *stmt;
146 unsigned int order; 1356 unsigned int order;
1357 /* INTEGER_CST for constant stores, MEM_REF for memory copy,
1358 BIT_*_EXPR for logical bitwise operation, BIT_INSERT_EXPR
1359 for bit insertion.
1360 LROTATE_EXPR if it can be only bswap optimized and
1361 ops are not really meaningful.
1362 NOP_EXPR if bswap optimization detected identity, ops
1363 are not meaningful. */
1364 enum tree_code rhs_code;
1365 /* Two fields for bswap optimization purposes. */
1366 struct symbolic_number n;
1367 gimple *ins_stmt;
1368 /* True if BIT_{AND,IOR,XOR}_EXPR result is inverted before storing. */
1369 bool bit_not_p;
1370 /* True if ops have been swapped and thus ops[1] represents
1371 rhs1 of BIT_{AND,IOR,XOR}_EXPR and ops[0] represents rhs2. */
1372 bool ops_swapped_p;
1373 /* Operands. For BIT_*_EXPR rhs_code both operands are used, otherwise
1374 just the first one. */
1375 store_operand_info ops[2];
147 store_immediate_info (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT, 1376 store_immediate_info (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT,
148 gimple *, unsigned int); 1377 unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT,
1378 gimple *, unsigned int, enum tree_code,
1379 struct symbolic_number &, gimple *, bool,
1380 const store_operand_info &,
1381 const store_operand_info &);
149 }; 1382 };
150 1383
151 store_immediate_info::store_immediate_info (unsigned HOST_WIDE_INT bs, 1384 store_immediate_info::store_immediate_info (unsigned HOST_WIDE_INT bs,
152 unsigned HOST_WIDE_INT bp, 1385 unsigned HOST_WIDE_INT bp,
1386 unsigned HOST_WIDE_INT brs,
1387 unsigned HOST_WIDE_INT bre,
153 gimple *st, 1388 gimple *st,
154 unsigned int ord) 1389 unsigned int ord,
155 : bitsize (bs), bitpos (bp), stmt (st), order (ord) 1390 enum tree_code rhscode,
156 { 1391 struct symbolic_number &nr,
157 } 1392 gimple *ins_stmtp,
1393 bool bitnotp,
1394 const store_operand_info &op0r,
1395 const store_operand_info &op1r)
1396 : bitsize (bs), bitpos (bp), bitregion_start (brs), bitregion_end (bre),
1397 stmt (st), order (ord), rhs_code (rhscode), n (nr),
1398 ins_stmt (ins_stmtp), bit_not_p (bitnotp), ops_swapped_p (false)
1399 #if __cplusplus >= 201103L
1400 , ops { op0r, op1r }
1401 {
1402 }
1403 #else
1404 {
1405 ops[0] = op0r;
1406 ops[1] = op1r;
1407 }
1408 #endif
158 1409
159 /* Struct representing a group of stores to contiguous memory locations. 1410 /* Struct representing a group of stores to contiguous memory locations.
160 These are produced by the second phase (coalescing) and consumed in the 1411 These are produced by the second phase (coalescing) and consumed in the
161 third phase that outputs the widened stores. */ 1412 third phase that outputs the widened stores. */
162 1413
163 struct merged_store_group 1414 struct merged_store_group
164 { 1415 {
165 unsigned HOST_WIDE_INT start; 1416 unsigned HOST_WIDE_INT start;
166 unsigned HOST_WIDE_INT width; 1417 unsigned HOST_WIDE_INT width;
167 /* The size of the allocated memory for val. */ 1418 unsigned HOST_WIDE_INT bitregion_start;
1419 unsigned HOST_WIDE_INT bitregion_end;
1420 /* The size of the allocated memory for val and mask. */
168 unsigned HOST_WIDE_INT buf_size; 1421 unsigned HOST_WIDE_INT buf_size;
1422 unsigned HOST_WIDE_INT align_base;
1423 poly_uint64 load_align_base[2];
169 1424
170 unsigned int align; 1425 unsigned int align;
1426 unsigned int load_align[2];
171 unsigned int first_order; 1427 unsigned int first_order;
172 unsigned int last_order; 1428 unsigned int last_order;
173 1429 bool bit_insertion;
174 auto_vec<struct store_immediate_info *> stores; 1430
1431 auto_vec<store_immediate_info *> stores;
175 /* We record the first and last original statements in the sequence because 1432 /* We record the first and last original statements in the sequence because
176 we'll need their vuse/vdef and replacement position. It's easier to keep 1433 we'll need their vuse/vdef and replacement position. It's easier to keep
177 track of them separately as 'stores' is reordered by apply_stores. */ 1434 track of them separately as 'stores' is reordered by apply_stores. */
178 gimple *last_stmt; 1435 gimple *last_stmt;
179 gimple *first_stmt; 1436 gimple *first_stmt;
180 unsigned char *val; 1437 unsigned char *val;
1438 unsigned char *mask;
181 1439
182 merged_store_group (store_immediate_info *); 1440 merged_store_group (store_immediate_info *);
183 ~merged_store_group (); 1441 ~merged_store_group ();
1442 bool can_be_merged_into (store_immediate_info *);
184 void merge_into (store_immediate_info *); 1443 void merge_into (store_immediate_info *);
185 void merge_overlapping (store_immediate_info *); 1444 void merge_overlapping (store_immediate_info *);
186 bool apply_stores (); 1445 bool apply_stores ();
1446 private:
1447 void do_merge (store_immediate_info *);
187 }; 1448 };
188 1449
189 /* Debug helper. Dump LEN elements of byte array PTR to FD in hex. */ 1450 /* Debug helper. Dump LEN elements of byte array PTR to FD in hex. */
190 1451
191 static void 1452 static void
193 { 1454 {
194 if (!fd) 1455 if (!fd)
195 return; 1456 return;
196 1457
197 for (unsigned int i = 0; i < len; i++) 1458 for (unsigned int i = 0; i < len; i++)
198 fprintf (fd, "%x ", ptr[i]); 1459 fprintf (fd, "%02x ", ptr[i]);
199 fprintf (fd, "\n"); 1460 fprintf (fd, "\n");
200 } 1461 }
201 1462
202 /* Shift left the bytes in PTR of SZ elements by AMNT bits, carrying over the 1463 /* Shift left the bytes in PTR of SZ elements by AMNT bits, carrying over the
203 bits between adjacent elements. AMNT should be within 1464 bits between adjacent elements. AMNT should be within
285 } 1546 }
286 else if (start == BITS_PER_UNIT - 1 1547 else if (start == BITS_PER_UNIT - 1
287 && len > BITS_PER_UNIT) 1548 && len > BITS_PER_UNIT)
288 { 1549 {
289 unsigned int nbytes = len / BITS_PER_UNIT; 1550 unsigned int nbytes = len / BITS_PER_UNIT;
290 for (unsigned int i = 0; i < nbytes; i++) 1551 memset (ptr, 0, nbytes);
291 ptr[i] = 0U;
292 if (len % BITS_PER_UNIT != 0) 1552 if (len % BITS_PER_UNIT != 0)
293 clear_bit_region_be (ptr + nbytes, BITS_PER_UNIT - 1, 1553 clear_bit_region_be (ptr + nbytes, BITS_PER_UNIT - 1,
294 len % BITS_PER_UNIT); 1554 len % BITS_PER_UNIT);
295 } 1555 }
296 else 1556 else
399 Finally we ORR the bytes of the shifted EXPR into the cleared region: 1659 Finally we ORR the bytes of the shifted EXPR into the cleared region:
400 ptr + first_byte |---xxxxx||xxxxxxxx||xxx-----|. 1660 ptr + first_byte |---xxxxx||xxxxxxxx||xxx-----|.
401 The awkwardness comes from the fact that bitpos is counted from the 1661 The awkwardness comes from the fact that bitpos is counted from the
402 most significant bit of a byte. */ 1662 most significant bit of a byte. */
403 1663
1664 /* We must be dealing with fixed-size data at this point, since the
1665 total size is also fixed. */
1666 fixed_size_mode mode = as_a <fixed_size_mode> (TYPE_MODE (TREE_TYPE (expr)));
404 /* Allocate an extra byte so that we have space to shift into. */ 1667 /* Allocate an extra byte so that we have space to shift into. */
405 unsigned int byte_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (expr))) + 1; 1668 unsigned int byte_size = GET_MODE_SIZE (mode) + 1;
406 unsigned char *tmpbuf = XALLOCAVEC (unsigned char, byte_size); 1669 unsigned char *tmpbuf = XALLOCAVEC (unsigned char, byte_size);
407 memset (tmpbuf, '\0', byte_size); 1670 memset (tmpbuf, '\0', byte_size);
408 /* The store detection code should only have allowed constants that are 1671 /* The store detection code should only have allowed constants that are
409 accepted by native_encode_expr. */ 1672 accepted by native_encode_expr. */
410 if (native_encode_expr (expr, tmpbuf, byte_size - 1) == 0) 1673 if (native_encode_expr (expr, tmpbuf, byte_size - 1) == 0)
547 1810
548 merged_store_group::merged_store_group (store_immediate_info *info) 1811 merged_store_group::merged_store_group (store_immediate_info *info)
549 { 1812 {
550 start = info->bitpos; 1813 start = info->bitpos;
551 width = info->bitsize; 1814 width = info->bitsize;
1815 bitregion_start = info->bitregion_start;
1816 bitregion_end = info->bitregion_end;
552 /* VAL has memory allocated for it in apply_stores once the group 1817 /* VAL has memory allocated for it in apply_stores once the group
553 width has been finalized. */ 1818 width has been finalized. */
554 val = NULL; 1819 val = NULL;
555 align = get_object_alignment (gimple_assign_lhs (info->stmt)); 1820 mask = NULL;
1821 bit_insertion = false;
1822 unsigned HOST_WIDE_INT align_bitpos = 0;
1823 get_object_alignment_1 (gimple_assign_lhs (info->stmt),
1824 &align, &align_bitpos);
1825 align_base = start - align_bitpos;
1826 for (int i = 0; i < 2; ++i)
1827 {
1828 store_operand_info &op = info->ops[i];
1829 if (op.base_addr == NULL_TREE)
1830 {
1831 load_align[i] = 0;
1832 load_align_base[i] = 0;
1833 }
1834 else
1835 {
1836 get_object_alignment_1 (op.val, &load_align[i], &align_bitpos);
1837 load_align_base[i] = op.bitpos - align_bitpos;
1838 }
1839 }
556 stores.create (1); 1840 stores.create (1);
557 stores.safe_push (info); 1841 stores.safe_push (info);
558 last_stmt = info->stmt; 1842 last_stmt = info->stmt;
559 last_order = info->order; 1843 last_order = info->order;
560 first_stmt = last_stmt; 1844 first_stmt = last_stmt;
566 { 1850 {
567 if (val) 1851 if (val)
568 XDELETEVEC (val); 1852 XDELETEVEC (val);
569 } 1853 }
570 1854
1855 /* Return true if the store described by INFO can be merged into the group. */
1856
1857 bool
1858 merged_store_group::can_be_merged_into (store_immediate_info *info)
1859 {
1860 /* Do not merge bswap patterns. */
1861 if (info->rhs_code == LROTATE_EXPR)
1862 return false;
1863
1864 /* The canonical case. */
1865 if (info->rhs_code == stores[0]->rhs_code)
1866 return true;
1867
1868 /* BIT_INSERT_EXPR is compatible with INTEGER_CST. */
1869 if (info->rhs_code == BIT_INSERT_EXPR && stores[0]->rhs_code == INTEGER_CST)
1870 return true;
1871
1872 if (stores[0]->rhs_code == BIT_INSERT_EXPR && info->rhs_code == INTEGER_CST)
1873 return true;
1874
1875 /* We can turn MEM_REF into BIT_INSERT_EXPR for bit-field stores. */
1876 if (info->rhs_code == MEM_REF
1877 && (stores[0]->rhs_code == INTEGER_CST
1878 || stores[0]->rhs_code == BIT_INSERT_EXPR)
1879 && info->bitregion_start == stores[0]->bitregion_start
1880 && info->bitregion_end == stores[0]->bitregion_end)
1881 return true;
1882
1883 if (stores[0]->rhs_code == MEM_REF
1884 && (info->rhs_code == INTEGER_CST
1885 || info->rhs_code == BIT_INSERT_EXPR)
1886 && info->bitregion_start == stores[0]->bitregion_start
1887 && info->bitregion_end == stores[0]->bitregion_end)
1888 return true;
1889
1890 return false;
1891 }
1892
1893 /* Helper method for merge_into and merge_overlapping to do
1894 the common part. */
1895
1896 void
1897 merged_store_group::do_merge (store_immediate_info *info)
1898 {
1899 bitregion_start = MIN (bitregion_start, info->bitregion_start);
1900 bitregion_end = MAX (bitregion_end, info->bitregion_end);
1901
1902 unsigned int this_align;
1903 unsigned HOST_WIDE_INT align_bitpos = 0;
1904 get_object_alignment_1 (gimple_assign_lhs (info->stmt),
1905 &this_align, &align_bitpos);
1906 if (this_align > align)
1907 {
1908 align = this_align;
1909 align_base = info->bitpos - align_bitpos;
1910 }
1911 for (int i = 0; i < 2; ++i)
1912 {
1913 store_operand_info &op = info->ops[i];
1914 if (!op.base_addr)
1915 continue;
1916
1917 get_object_alignment_1 (op.val, &this_align, &align_bitpos);
1918 if (this_align > load_align[i])
1919 {
1920 load_align[i] = this_align;
1921 load_align_base[i] = op.bitpos - align_bitpos;
1922 }
1923 }
1924
1925 gimple *stmt = info->stmt;
1926 stores.safe_push (info);
1927 if (info->order > last_order)
1928 {
1929 last_order = info->order;
1930 last_stmt = stmt;
1931 }
1932 else if (info->order < first_order)
1933 {
1934 first_order = info->order;
1935 first_stmt = stmt;
1936 }
1937 }
1938
571 /* Merge a store recorded by INFO into this merged store. 1939 /* Merge a store recorded by INFO into this merged store.
572 The store is not overlapping with the existing recorded 1940 The store is not overlapping with the existing recorded
573 stores. */ 1941 stores. */
574 1942
575 void 1943 void
576 merged_store_group::merge_into (store_immediate_info *info) 1944 merged_store_group::merge_into (store_immediate_info *info)
577 { 1945 {
578 unsigned HOST_WIDE_INT wid = info->bitsize;
579 /* Make sure we're inserting in the position we think we're inserting. */ 1946 /* Make sure we're inserting in the position we think we're inserting. */
580 gcc_assert (info->bitpos == start + width); 1947 gcc_assert (info->bitpos >= start + width
581 1948 && info->bitregion_start <= bitregion_end);
582 width += wid; 1949
583 gimple *stmt = info->stmt; 1950 width = info->bitpos + info->bitsize - start;
584 stores.safe_push (info); 1951 do_merge (info);
585 if (info->order > last_order)
586 {
587 last_order = info->order;
588 last_stmt = stmt;
589 }
590 else if (info->order < first_order)
591 {
592 first_order = info->order;
593 first_stmt = stmt;
594 }
595 } 1952 }
596 1953
597 /* Merge a store described by INFO into this merged store. 1954 /* Merge a store described by INFO into this merged store.
598 INFO overlaps in some way with the current store (i.e. it's not contiguous 1955 INFO overlaps in some way with the current store (i.e. it's not contiguous
599 which is handled by merged_store_group::merge_into). */ 1956 which is handled by merged_store_group::merge_into). */
600 1957
601 void 1958 void
602 merged_store_group::merge_overlapping (store_immediate_info *info) 1959 merged_store_group::merge_overlapping (store_immediate_info *info)
603 { 1960 {
604 gimple *stmt = info->stmt;
605 stores.safe_push (info);
606
607 /* If the store extends the size of the group, extend the width. */ 1961 /* If the store extends the size of the group, extend the width. */
608 if ((info->bitpos + info->bitsize) > (start + width)) 1962 if (info->bitpos + info->bitsize > start + width)
609 width += info->bitpos + info->bitsize - (start + width); 1963 width = info->bitpos + info->bitsize - start;
610 1964
611 if (info->order > last_order) 1965 do_merge (info);
612 {
613 last_order = info->order;
614 last_stmt = stmt;
615 }
616 else if (info->order < first_order)
617 {
618 first_order = info->order;
619 first_stmt = stmt;
620 }
621 } 1966 }
622 1967
623 /* Go through all the recorded stores in this group in program order and 1968 /* Go through all the recorded stores in this group in program order and
624 apply their values to the VAL byte array to create the final merged 1969 apply their values to the VAL byte array to create the final merged
625 value. Return true if the operation succeeded. */ 1970 value. Return true if the operation succeeded. */
626 1971
627 bool 1972 bool
628 merged_store_group::apply_stores () 1973 merged_store_group::apply_stores ()
629 { 1974 {
630 /* The total width of the stores must add up to a whole number of bytes 1975 /* Make sure we have more than one store in the group, otherwise we cannot
631 and start at a byte boundary. We don't support emitting bitfield 1976 merge anything. */
632 references for now. Also, make sure we have more than one store 1977 if (bitregion_start % BITS_PER_UNIT != 0
633 in the group, otherwise we cannot merge anything. */ 1978 || bitregion_end % BITS_PER_UNIT != 0
634 if (width % BITS_PER_UNIT != 0
635 || start % BITS_PER_UNIT != 0
636 || stores.length () == 1) 1979 || stores.length () == 1)
637 return false; 1980 return false;
638 1981
639 stores.qsort (sort_by_order); 1982 stores.qsort (sort_by_order);
640 struct store_immediate_info *info; 1983 store_immediate_info *info;
641 unsigned int i; 1984 unsigned int i;
642 /* Create a buffer of a size that is 2 times the number of bytes we're 1985 /* Create a power-of-2-sized buffer for native_encode_expr. */
643 storing. That way native_encode_expr can write power-of-2-sized 1986 buf_size = 1 << ceil_log2 ((bitregion_end - bitregion_start) / BITS_PER_UNIT);
644 chunks without overrunning. */ 1987 val = XNEWVEC (unsigned char, 2 * buf_size);
645 buf_size = 2 * (ROUND_UP (width, BITS_PER_UNIT) / BITS_PER_UNIT); 1988 mask = val + buf_size;
646 val = XCNEWVEC (unsigned char, buf_size); 1989 memset (val, 0, buf_size);
1990 memset (mask, ~0U, buf_size);
647 1991
648 FOR_EACH_VEC_ELT (stores, i, info) 1992 FOR_EACH_VEC_ELT (stores, i, info)
649 { 1993 {
650 unsigned int pos_in_buffer = info->bitpos - start; 1994 unsigned int pos_in_buffer = info->bitpos - bitregion_start;
651 bool ret = encode_tree_to_bitpos (gimple_assign_rhs1 (info->stmt), 1995 tree cst;
652 val, info->bitsize, 1996 if (info->ops[0].val && info->ops[0].base_addr == NULL_TREE)
653 pos_in_buffer, buf_size); 1997 cst = info->ops[0].val;
654 if (dump_file && (dump_flags & TDF_DETAILS)) 1998 else if (info->ops[1].val && info->ops[1].base_addr == NULL_TREE)
1999 cst = info->ops[1].val;
2000 else
2001 cst = NULL_TREE;
2002 bool ret = true;
2003 if (cst)
2004 {
2005 if (info->rhs_code == BIT_INSERT_EXPR)
2006 bit_insertion = true;
2007 else
2008 ret = encode_tree_to_bitpos (cst, val, info->bitsize,
2009 pos_in_buffer, buf_size);
2010 }
2011 unsigned char *m = mask + (pos_in_buffer / BITS_PER_UNIT);
2012 if (BYTES_BIG_ENDIAN)
2013 clear_bit_region_be (m, (BITS_PER_UNIT - 1
2014 - (pos_in_buffer % BITS_PER_UNIT)),
2015 info->bitsize);
2016 else
2017 clear_bit_region (m, pos_in_buffer % BITS_PER_UNIT, info->bitsize);
2018 if (cst && dump_file && (dump_flags & TDF_DETAILS))
655 { 2019 {
656 if (ret) 2020 if (ret)
657 { 2021 {
658 fprintf (dump_file, "After writing "); 2022 fputs ("After writing ", dump_file);
659 print_generic_expr (dump_file, 2023 print_generic_expr (dump_file, cst, TDF_NONE);
660 gimple_assign_rhs1 (info->stmt), 0);
661 fprintf (dump_file, " of size " HOST_WIDE_INT_PRINT_DEC 2024 fprintf (dump_file, " of size " HOST_WIDE_INT_PRINT_DEC
662 " at position %d the merged region contains:\n", 2025 " at position %d\n", info->bitsize, pos_in_buffer);
663 info->bitsize, pos_in_buffer); 2026 fputs (" the merged value contains ", dump_file);
664 dump_char_array (dump_file, val, buf_size); 2027 dump_char_array (dump_file, val, buf_size);
2028 fputs (" the merged mask contains ", dump_file);
2029 dump_char_array (dump_file, mask, buf_size);
2030 if (bit_insertion)
2031 fputs (" bit insertion is required\n", dump_file);
665 } 2032 }
666 else 2033 else
667 fprintf (dump_file, "Failed to merge stores\n"); 2034 fprintf (dump_file, "Failed to merge stores\n");
668 } 2035 }
669 if (!ret) 2036 if (!ret)
670 return false; 2037 return false;
671 } 2038 }
2039 stores.qsort (sort_by_bitpos);
672 return true; 2040 return true;
673 } 2041 }
674 2042
675 /* Structure describing the store chain. */ 2043 /* Structure describing the store chain. */
676 2044
680 PNXP (prev's next pointer) points to the head of a list, or to 2048 PNXP (prev's next pointer) points to the head of a list, or to
681 the next field in the previous chain in the list. 2049 the next field in the previous chain in the list.
682 See pass_store_merging::m_stores_head for more rationale. */ 2050 See pass_store_merging::m_stores_head for more rationale. */
683 imm_store_chain_info *next, **pnxp; 2051 imm_store_chain_info *next, **pnxp;
684 tree base_addr; 2052 tree base_addr;
685 auto_vec<struct store_immediate_info *> m_store_info; 2053 auto_vec<store_immediate_info *> m_store_info;
686 auto_vec<merged_store_group *> m_merged_store_groups; 2054 auto_vec<merged_store_group *> m_merged_store_groups;
687 2055
688 imm_store_chain_info (imm_store_chain_info *&inspt, tree b_a) 2056 imm_store_chain_info (imm_store_chain_info *&inspt, tree b_a)
689 : next (inspt), pnxp (&inspt), base_addr (b_a) 2057 : next (inspt), pnxp (&inspt), base_addr (b_a)
690 { 2058 {
703 gcc_checking_assert (&next == next->pnxp); 2071 gcc_checking_assert (&next == next->pnxp);
704 next->pnxp = pnxp; 2072 next->pnxp = pnxp;
705 } 2073 }
706 } 2074 }
707 bool terminate_and_process_chain (); 2075 bool terminate_and_process_chain ();
2076 bool try_coalesce_bswap (merged_store_group *, unsigned int, unsigned int);
708 bool coalesce_immediate_stores (); 2077 bool coalesce_immediate_stores ();
709 bool output_merged_store (merged_store_group *); 2078 bool output_merged_store (merged_store_group *);
710 bool output_merged_stores (); 2079 bool output_merged_stores ();
711 }; 2080 };
712 2081
728 pass_store_merging (gcc::context *ctxt) 2097 pass_store_merging (gcc::context *ctxt)
729 : gimple_opt_pass (pass_data_tree_store_merging, ctxt), m_stores_head () 2098 : gimple_opt_pass (pass_data_tree_store_merging, ctxt), m_stores_head ()
730 { 2099 {
731 } 2100 }
732 2101
733 /* Pass not supported for PDP-endianness. */ 2102 /* Pass not supported for PDP-endian, nor for insane hosts or
2103 target character sizes where native_{encode,interpret}_expr
2104 doesn't work properly. */
734 virtual bool 2105 virtual bool
735 gate (function *) 2106 gate (function *)
736 { 2107 {
737 return flag_store_merging && (WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN); 2108 return flag_store_merging
2109 && BYTES_BIG_ENDIAN == WORDS_BIG_ENDIAN
2110 && CHAR_BIT == 8
2111 && BITS_PER_UNIT == 8;
738 } 2112 }
739 2113
740 virtual unsigned int execute (function *); 2114 virtual unsigned int execute (function *);
741 2115
742 private: 2116 private:
751 orders, and when they get reused, subsequent passes end up 2125 orders, and when they get reused, subsequent passes end up
752 getting different SSA names, which may ultimately change 2126 getting different SSA names, which may ultimately change
753 decisions when going out of SSA). */ 2127 decisions when going out of SSA). */
754 imm_store_chain_info *m_stores_head; 2128 imm_store_chain_info *m_stores_head;
755 2129
2130 void process_store (gimple *);
756 bool terminate_and_process_all_chains (); 2131 bool terminate_and_process_all_chains ();
757 bool terminate_all_aliasing_chains (imm_store_chain_info **, 2132 bool terminate_all_aliasing_chains (imm_store_chain_info **, gimple *);
758 bool, gimple *);
759 bool terminate_and_release_chain (imm_store_chain_info *); 2133 bool terminate_and_release_chain (imm_store_chain_info *);
760 }; // class pass_store_merging 2134 }; // class pass_store_merging
761 2135
762 /* Terminate and process all recorded chains. Return true if any changes 2136 /* Terminate and process all recorded chains. Return true if any changes
763 were made. */ 2137 were made. */
772 gcc_assert (m_stores_head == NULL); 2146 gcc_assert (m_stores_head == NULL);
773 2147
774 return ret; 2148 return ret;
775 } 2149 }
776 2150
777 /* Terminate all chains that are affected by the assignment to DEST, appearing 2151 /* Terminate all chains that are affected by the statement STMT.
778 in statement STMT and ultimately points to the object BASE. Return true if 2152 CHAIN_INFO is the chain we should ignore from the checks if
779 at least one aliasing chain was terminated. BASE and DEST are allowed to 2153 non-NULL. */
780 be NULL_TREE. In that case the aliasing checks are performed on the whole
781 statement rather than a particular operand in it. VAR_OFFSET_P signifies
782 whether STMT represents a store to BASE offset by a variable amount.
783 If that is the case we have to terminate any chain anchored at BASE. */
784 2154
785 bool 2155 bool
786 pass_store_merging::terminate_all_aliasing_chains (imm_store_chain_info 2156 pass_store_merging::terminate_all_aliasing_chains (imm_store_chain_info
787 **chain_info, 2157 **chain_info,
788 bool var_offset_p,
789 gimple *stmt) 2158 gimple *stmt)
790 { 2159 {
791 bool ret = false; 2160 bool ret = false;
792 2161
793 /* If the statement doesn't touch memory it can't alias. */ 2162 /* If the statement doesn't touch memory it can't alias. */
794 if (!gimple_vuse (stmt)) 2163 if (!gimple_vuse (stmt))
795 return false; 2164 return false;
796 2165
797 /* Check if the assignment destination (BASE) is part of a store chain. 2166 tree store_lhs = gimple_store_p (stmt) ? gimple_get_lhs (stmt) : NULL_TREE;
798 This is to catch non-constant stores to destinations that may be part
799 of a chain. */
800 if (chain_info)
801 {
802 /* We have a chain at BASE and we're writing to [BASE + <variable>].
803 This can interfere with any of the stores so terminate
804 the chain. */
805 if (var_offset_p)
806 {
807 terminate_and_release_chain (*chain_info);
808 ret = true;
809 }
810 /* Otherwise go through every store in the chain to see if it
811 aliases with any of them. */
812 else
813 {
814 struct store_immediate_info *info;
815 unsigned int i;
816 FOR_EACH_VEC_ELT ((*chain_info)->m_store_info, i, info)
817 {
818 if (ref_maybe_used_by_stmt_p (stmt,
819 gimple_assign_lhs (info->stmt))
820 || stmt_may_clobber_ref_p (stmt,
821 gimple_assign_lhs (info->stmt)))
822 {
823 if (dump_file && (dump_flags & TDF_DETAILS))
824 {
825 fprintf (dump_file,
826 "stmt causes chain termination:\n");
827 print_gimple_stmt (dump_file, stmt, 0);
828 }
829 terminate_and_release_chain (*chain_info);
830 ret = true;
831 break;
832 }
833 }
834 }
835 }
836
837 /* Check for aliasing with all other store chains. */
838 for (imm_store_chain_info *next = m_stores_head, *cur = next; cur; cur = next) 2167 for (imm_store_chain_info *next = m_stores_head, *cur = next; cur; cur = next)
839 { 2168 {
840 next = cur->next; 2169 next = cur->next;
841 2170
842 /* We already checked all the stores in chain_info and terminated the 2171 /* We already checked all the stores in chain_info and terminated the
843 chain if necessary. Skip it here. */ 2172 chain if necessary. Skip it here. */
844 if (chain_info && (*chain_info) == cur) 2173 if (chain_info && *chain_info == cur)
845 continue; 2174 continue;
846 2175
847 /* We can't use the base object here as that does not reliably exist. 2176 store_immediate_info *info;
848 Build a ao_ref from the base object address (if we know the 2177 unsigned int i;
849 minimum and maximum offset and the maximum size we could improve 2178 FOR_EACH_VEC_ELT (cur->m_store_info, i, info)
850 things here). */ 2179 {
851 ao_ref chain_ref; 2180 tree lhs = gimple_assign_lhs (info->stmt);
852 ao_ref_init_from_ptr_and_size (&chain_ref, cur->base_addr, NULL_TREE); 2181 if (ref_maybe_used_by_stmt_p (stmt, lhs)
853 if (ref_maybe_used_by_stmt_p (stmt, &chain_ref) 2182 || stmt_may_clobber_ref_p (stmt, lhs)
854 || stmt_may_clobber_ref_p_1 (stmt, &chain_ref)) 2183 || (store_lhs && refs_output_dependent_p (store_lhs, lhs)))
855 { 2184 {
856 terminate_and_release_chain (cur); 2185 if (dump_file && (dump_flags & TDF_DETAILS))
857 ret = true; 2186 {
2187 fprintf (dump_file, "stmt causes chain termination:\n");
2188 print_gimple_stmt (dump_file, stmt, 0);
2189 }
2190 terminate_and_release_chain (cur);
2191 ret = true;
2192 break;
2193 }
858 } 2194 }
859 } 2195 }
860 2196
861 return ret; 2197 return ret;
862 } 2198 }
870 { 2206 {
871 bool ret = chain_info->terminate_and_process_chain (); 2207 bool ret = chain_info->terminate_and_process_chain ();
872 m_stores.remove (chain_info->base_addr); 2208 m_stores.remove (chain_info->base_addr);
873 delete chain_info; 2209 delete chain_info;
874 return ret; 2210 return ret;
2211 }
2212
2213 /* Return true if stmts in between FIRST (inclusive) and LAST (exclusive)
2214 may clobber REF. FIRST and LAST must be in the same basic block and
2215 have non-NULL vdef. We want to be able to sink load of REF across
2216 stores between FIRST and LAST, up to right before LAST. */
2217
2218 bool
2219 stmts_may_clobber_ref_p (gimple *first, gimple *last, tree ref)
2220 {
2221 ao_ref r;
2222 ao_ref_init (&r, ref);
2223 unsigned int count = 0;
2224 tree vop = gimple_vdef (last);
2225 gimple *stmt;
2226
2227 gcc_checking_assert (gimple_bb (first) == gimple_bb (last));
2228 do
2229 {
2230 stmt = SSA_NAME_DEF_STMT (vop);
2231 if (stmt_may_clobber_ref_p_1 (stmt, &r))
2232 return true;
2233 if (gimple_store_p (stmt)
2234 && refs_anti_dependent_p (ref, gimple_get_lhs (stmt)))
2235 return true;
2236 /* Avoid quadratic compile time by bounding the number of checks
2237 we perform. */
2238 if (++count > MAX_STORE_ALIAS_CHECKS)
2239 return true;
2240 vop = gimple_vuse (stmt);
2241 }
2242 while (stmt != first);
2243 return false;
2244 }
2245
2246 /* Return true if INFO->ops[IDX] is mergeable with the
2247 corresponding loads already in MERGED_STORE group.
2248 BASE_ADDR is the base address of the whole store group. */
2249
2250 bool
2251 compatible_load_p (merged_store_group *merged_store,
2252 store_immediate_info *info,
2253 tree base_addr, int idx)
2254 {
2255 store_immediate_info *infof = merged_store->stores[0];
2256 if (!info->ops[idx].base_addr
2257 || maybe_ne (info->ops[idx].bitpos - infof->ops[idx].bitpos,
2258 info->bitpos - infof->bitpos)
2259 || !operand_equal_p (info->ops[idx].base_addr,
2260 infof->ops[idx].base_addr, 0))
2261 return false;
2262
2263 store_immediate_info *infol = merged_store->stores.last ();
2264 tree load_vuse = gimple_vuse (info->ops[idx].stmt);
2265 /* In this case all vuses should be the same, e.g.
2266 _1 = s.a; _2 = s.b; _3 = _1 | 1; t.a = _3; _4 = _2 | 2; t.b = _4;
2267 or
2268 _1 = s.a; _2 = s.b; t.a = _1; t.b = _2;
2269 and we can emit the coalesced load next to any of those loads. */
2270 if (gimple_vuse (infof->ops[idx].stmt) == load_vuse
2271 && gimple_vuse (infol->ops[idx].stmt) == load_vuse)
2272 return true;
2273
2274 /* Otherwise, at least for now require that the load has the same
2275 vuse as the store. See following examples. */
2276 if (gimple_vuse (info->stmt) != load_vuse)
2277 return false;
2278
2279 if (gimple_vuse (infof->stmt) != gimple_vuse (infof->ops[idx].stmt)
2280 || (infof != infol
2281 && gimple_vuse (infol->stmt) != gimple_vuse (infol->ops[idx].stmt)))
2282 return false;
2283
2284 /* If the load is from the same location as the store, already
2285 the construction of the immediate chain info guarantees no intervening
2286 stores, so no further checks are needed. Example:
2287 _1 = s.a; _2 = _1 & -7; s.a = _2; _3 = s.b; _4 = _3 & -7; s.b = _4; */
2288 if (known_eq (info->ops[idx].bitpos, info->bitpos)
2289 && operand_equal_p (info->ops[idx].base_addr, base_addr, 0))
2290 return true;
2291
2292 /* Otherwise, we need to punt if any of the loads can be clobbered by any
2293 of the stores in the group, or any other stores in between those.
2294 Previous calls to compatible_load_p ensured that for all the
2295 merged_store->stores IDX loads, no stmts starting with
2296 merged_store->first_stmt and ending right before merged_store->last_stmt
2297 clobbers those loads. */
2298 gimple *first = merged_store->first_stmt;
2299 gimple *last = merged_store->last_stmt;
2300 unsigned int i;
2301 store_immediate_info *infoc;
2302 /* The stores are sorted by increasing store bitpos, so if info->stmt store
2303 comes before the so far first load, we'll be changing
2304 merged_store->first_stmt. In that case we need to give up if
2305 any of the earlier processed loads clobber with the stmts in the new
2306 range. */
2307 if (info->order < merged_store->first_order)
2308 {
2309 FOR_EACH_VEC_ELT (merged_store->stores, i, infoc)
2310 if (stmts_may_clobber_ref_p (info->stmt, first, infoc->ops[idx].val))
2311 return false;
2312 first = info->stmt;
2313 }
2314 /* Similarly, we could change merged_store->last_stmt, so ensure
2315 in that case no stmts in the new range clobber any of the earlier
2316 processed loads. */
2317 else if (info->order > merged_store->last_order)
2318 {
2319 FOR_EACH_VEC_ELT (merged_store->stores, i, infoc)
2320 if (stmts_may_clobber_ref_p (last, info->stmt, infoc->ops[idx].val))
2321 return false;
2322 last = info->stmt;
2323 }
2324 /* And finally, we'd be adding a new load to the set, ensure it isn't
2325 clobbered in the new range. */
2326 if (stmts_may_clobber_ref_p (first, last, info->ops[idx].val))
2327 return false;
2328
2329 /* Otherwise, we are looking for:
2330 _1 = s.a; _2 = _1 ^ 15; t.a = _2; _3 = s.b; _4 = _3 ^ 15; t.b = _4;
2331 or
2332 _1 = s.a; t.a = _1; _2 = s.b; t.b = _2; */
2333 return true;
2334 }
2335
2336 /* Add all refs loaded to compute VAL to REFS vector. */
2337
2338 void
2339 gather_bswap_load_refs (vec<tree> *refs, tree val)
2340 {
2341 if (TREE_CODE (val) != SSA_NAME)
2342 return;
2343
2344 gimple *stmt = SSA_NAME_DEF_STMT (val);
2345 if (!is_gimple_assign (stmt))
2346 return;
2347
2348 if (gimple_assign_load_p (stmt))
2349 {
2350 refs->safe_push (gimple_assign_rhs1 (stmt));
2351 return;
2352 }
2353
2354 switch (gimple_assign_rhs_class (stmt))
2355 {
2356 case GIMPLE_BINARY_RHS:
2357 gather_bswap_load_refs (refs, gimple_assign_rhs2 (stmt));
2358 /* FALLTHRU */
2359 case GIMPLE_UNARY_RHS:
2360 gather_bswap_load_refs (refs, gimple_assign_rhs1 (stmt));
2361 break;
2362 default:
2363 gcc_unreachable ();
2364 }
2365 }
2366
2367 /* Check if there are any stores in M_STORE_INFO after index I
2368 (where M_STORE_INFO must be sorted by sort_by_bitpos) that overlap
2369 a potential group ending with END that have their order
2370 smaller than LAST_ORDER. RHS_CODE is the kind of store in the
2371 group. Return true if there are no such stores.
2372 Consider:
2373 MEM[(long long int *)p_28] = 0;
2374 MEM[(long long int *)p_28 + 8B] = 0;
2375 MEM[(long long int *)p_28 + 16B] = 0;
2376 MEM[(long long int *)p_28 + 24B] = 0;
2377 _129 = (int) _130;
2378 MEM[(int *)p_28 + 8B] = _129;
2379 MEM[(int *)p_28].a = -1;
2380 We already have
2381 MEM[(long long int *)p_28] = 0;
2382 MEM[(int *)p_28].a = -1;
2383 stmts in the current group and need to consider if it is safe to
2384 add MEM[(long long int *)p_28 + 8B] = 0; store into the same group.
2385 There is an overlap between that store and the MEM[(int *)p_28 + 8B] = _129;
2386 store though, so if we add the MEM[(long long int *)p_28 + 8B] = 0;
2387 into the group and merging of those 3 stores is successful, merged
2388 stmts will be emitted at the latest store from that group, i.e.
2389 LAST_ORDER, which is the MEM[(int *)p_28].a = -1; store.
2390 The MEM[(int *)p_28 + 8B] = _129; store that originally follows
2391 the MEM[(long long int *)p_28 + 8B] = 0; would now be before it,
2392 so we need to refuse merging MEM[(long long int *)p_28 + 8B] = 0;
2393 into the group. That way it will be its own store group and will
2394 not be touched. If RHS_CODE is INTEGER_CST and there are overlapping
2395 INTEGER_CST stores, those are mergeable using merge_overlapping,
2396 so don't return false for those. */
2397
2398 static bool
2399 check_no_overlap (vec<store_immediate_info *> m_store_info, unsigned int i,
2400 enum tree_code rhs_code, unsigned int last_order,
2401 unsigned HOST_WIDE_INT end)
2402 {
2403 unsigned int len = m_store_info.length ();
2404 for (++i; i < len; ++i)
2405 {
2406 store_immediate_info *info = m_store_info[i];
2407 if (info->bitpos >= end)
2408 break;
2409 if (info->order < last_order
2410 && (rhs_code != INTEGER_CST || info->rhs_code != INTEGER_CST))
2411 return false;
2412 }
2413 return true;
2414 }
2415
2416 /* Return true if m_store_info[first] and at least one following store
2417 form a group which store try_size bitsize value which is byte swapped
2418 from a memory load or some value, or identity from some value.
2419 This uses the bswap pass APIs. */
2420
2421 bool
2422 imm_store_chain_info::try_coalesce_bswap (merged_store_group *merged_store,
2423 unsigned int first,
2424 unsigned int try_size)
2425 {
2426 unsigned int len = m_store_info.length (), last = first;
2427 unsigned HOST_WIDE_INT width = m_store_info[first]->bitsize;
2428 if (width >= try_size)
2429 return false;
2430 for (unsigned int i = first + 1; i < len; ++i)
2431 {
2432 if (m_store_info[i]->bitpos != m_store_info[first]->bitpos + width
2433 || m_store_info[i]->ins_stmt == NULL)
2434 return false;
2435 width += m_store_info[i]->bitsize;
2436 if (width >= try_size)
2437 {
2438 last = i;
2439 break;
2440 }
2441 }
2442 if (width != try_size)
2443 return false;
2444
2445 bool allow_unaligned
2446 = !STRICT_ALIGNMENT && PARAM_VALUE (PARAM_STORE_MERGING_ALLOW_UNALIGNED);
2447 /* Punt if the combined store would not be aligned and we need alignment. */
2448 if (!allow_unaligned)
2449 {
2450 unsigned int align = merged_store->align;
2451 unsigned HOST_WIDE_INT align_base = merged_store->align_base;
2452 for (unsigned int i = first + 1; i <= last; ++i)
2453 {
2454 unsigned int this_align;
2455 unsigned HOST_WIDE_INT align_bitpos = 0;
2456 get_object_alignment_1 (gimple_assign_lhs (m_store_info[i]->stmt),
2457 &this_align, &align_bitpos);
2458 if (this_align > align)
2459 {
2460 align = this_align;
2461 align_base = m_store_info[i]->bitpos - align_bitpos;
2462 }
2463 }
2464 unsigned HOST_WIDE_INT align_bitpos
2465 = (m_store_info[first]->bitpos - align_base) & (align - 1);
2466 if (align_bitpos)
2467 align = least_bit_hwi (align_bitpos);
2468 if (align < try_size)
2469 return false;
2470 }
2471
2472 tree type;
2473 switch (try_size)
2474 {
2475 case 16: type = uint16_type_node; break;
2476 case 32: type = uint32_type_node; break;
2477 case 64: type = uint64_type_node; break;
2478 default: gcc_unreachable ();
2479 }
2480 struct symbolic_number n;
2481 gimple *ins_stmt = NULL;
2482 int vuse_store = -1;
2483 unsigned int first_order = merged_store->first_order;
2484 unsigned int last_order = merged_store->last_order;
2485 gimple *first_stmt = merged_store->first_stmt;
2486 gimple *last_stmt = merged_store->last_stmt;
2487 unsigned HOST_WIDE_INT end = merged_store->start + merged_store->width;
2488 store_immediate_info *infof = m_store_info[first];
2489
2490 for (unsigned int i = first; i <= last; ++i)
2491 {
2492 store_immediate_info *info = m_store_info[i];
2493 struct symbolic_number this_n = info->n;
2494 this_n.type = type;
2495 if (!this_n.base_addr)
2496 this_n.range = try_size / BITS_PER_UNIT;
2497 else
2498 /* Update vuse in case it has changed by output_merged_stores. */
2499 this_n.vuse = gimple_vuse (info->ins_stmt);
2500 unsigned int bitpos = info->bitpos - infof->bitpos;
2501 if (!do_shift_rotate (LSHIFT_EXPR, &this_n,
2502 BYTES_BIG_ENDIAN
2503 ? try_size - info->bitsize - bitpos
2504 : bitpos))
2505 return false;
2506 if (this_n.base_addr && vuse_store)
2507 {
2508 unsigned int j;
2509 for (j = first; j <= last; ++j)
2510 if (this_n.vuse == gimple_vuse (m_store_info[j]->stmt))
2511 break;
2512 if (j > last)
2513 {
2514 if (vuse_store == 1)
2515 return false;
2516 vuse_store = 0;
2517 }
2518 }
2519 if (i == first)
2520 {
2521 n = this_n;
2522 ins_stmt = info->ins_stmt;
2523 }
2524 else
2525 {
2526 if (n.base_addr && n.vuse != this_n.vuse)
2527 {
2528 if (vuse_store == 0)
2529 return false;
2530 vuse_store = 1;
2531 }
2532 if (info->order > last_order)
2533 {
2534 last_order = info->order;
2535 last_stmt = info->stmt;
2536 }
2537 else if (info->order < first_order)
2538 {
2539 first_order = info->order;
2540 first_stmt = info->stmt;
2541 }
2542 end = MAX (end, info->bitpos + info->bitsize);
2543
2544 ins_stmt = perform_symbolic_merge (ins_stmt, &n, info->ins_stmt,
2545 &this_n, &n);
2546 if (ins_stmt == NULL)
2547 return false;
2548 }
2549 }
2550
2551 uint64_t cmpxchg, cmpnop;
2552 find_bswap_or_nop_finalize (&n, &cmpxchg, &cmpnop);
2553
2554 /* A complete byte swap should make the symbolic number to start with
2555 the largest digit in the highest order byte. Unchanged symbolic
2556 number indicates a read with same endianness as target architecture. */
2557 if (n.n != cmpnop && n.n != cmpxchg)
2558 return false;
2559
2560 if (n.base_addr == NULL_TREE && !is_gimple_val (n.src))
2561 return false;
2562
2563 if (!check_no_overlap (m_store_info, last, LROTATE_EXPR, last_order, end))
2564 return false;
2565
2566 /* Don't handle memory copy this way if normal non-bswap processing
2567 would handle it too. */
2568 if (n.n == cmpnop && (unsigned) n.n_ops == last - first + 1)
2569 {
2570 unsigned int i;
2571 for (i = first; i <= last; ++i)
2572 if (m_store_info[i]->rhs_code != MEM_REF)
2573 break;
2574 if (i == last + 1)
2575 return false;
2576 }
2577
2578 if (n.n == cmpxchg)
2579 switch (try_size)
2580 {
2581 case 16:
2582 /* Will emit LROTATE_EXPR. */
2583 break;
2584 case 32:
2585 if (builtin_decl_explicit_p (BUILT_IN_BSWAP32)
2586 && optab_handler (bswap_optab, SImode) != CODE_FOR_nothing)
2587 break;
2588 return false;
2589 case 64:
2590 if (builtin_decl_explicit_p (BUILT_IN_BSWAP64)
2591 && optab_handler (bswap_optab, DImode) != CODE_FOR_nothing)
2592 break;
2593 return false;
2594 default:
2595 gcc_unreachable ();
2596 }
2597
2598 if (!allow_unaligned && n.base_addr)
2599 {
2600 unsigned int align = get_object_alignment (n.src);
2601 if (align < try_size)
2602 return false;
2603 }
2604
2605 /* If each load has vuse of the corresponding store, need to verify
2606 the loads can be sunk right before the last store. */
2607 if (vuse_store == 1)
2608 {
2609 auto_vec<tree, 64> refs;
2610 for (unsigned int i = first; i <= last; ++i)
2611 gather_bswap_load_refs (&refs,
2612 gimple_assign_rhs1 (m_store_info[i]->stmt));
2613
2614 unsigned int i;
2615 tree ref;
2616 FOR_EACH_VEC_ELT (refs, i, ref)
2617 if (stmts_may_clobber_ref_p (first_stmt, last_stmt, ref))
2618 return false;
2619 n.vuse = NULL_TREE;
2620 }
2621
2622 infof->n = n;
2623 infof->ins_stmt = ins_stmt;
2624 for (unsigned int i = first; i <= last; ++i)
2625 {
2626 m_store_info[i]->rhs_code = n.n == cmpxchg ? LROTATE_EXPR : NOP_EXPR;
2627 m_store_info[i]->ops[0].base_addr = NULL_TREE;
2628 m_store_info[i]->ops[1].base_addr = NULL_TREE;
2629 if (i != first)
2630 merged_store->merge_into (m_store_info[i]);
2631 }
2632
2633 return true;
875 } 2634 }
876 2635
877 /* Go through the candidate stores recorded in m_store_info and merge them 2636 /* Go through the candidate stores recorded in m_store_info and merge them
878 into merged_store_group objects recorded into m_merged_store_groups 2637 into merged_store_group objects recorded into m_merged_store_groups
879 representing the widened stores. Return true if coalescing was successful 2638 representing the widened stores. Return true if coalescing was successful
886 /* Anything less can't be processed. */ 2645 /* Anything less can't be processed. */
887 if (m_store_info.length () < 2) 2646 if (m_store_info.length () < 2)
888 return false; 2647 return false;
889 2648
890 if (dump_file && (dump_flags & TDF_DETAILS)) 2649 if (dump_file && (dump_flags & TDF_DETAILS))
891 fprintf (dump_file, "Attempting to coalesce %u stores in chain.\n", 2650 fprintf (dump_file, "Attempting to coalesce %u stores in chain\n",
892 m_store_info.length ()); 2651 m_store_info.length ());
893 2652
894 store_immediate_info *info; 2653 store_immediate_info *info;
895 unsigned int i; 2654 unsigned int i, ignore = 0;
896 2655
897 /* Order the stores by the bitposition they write to. */ 2656 /* Order the stores by the bitposition they write to. */
898 m_store_info.qsort (sort_by_bitpos); 2657 m_store_info.qsort (sort_by_bitpos);
899 2658
900 info = m_store_info[0]; 2659 info = m_store_info[0];
901 merged_store_group *merged_store = new merged_store_group (info); 2660 merged_store_group *merged_store = new merged_store_group (info);
2661 if (dump_file && (dump_flags & TDF_DETAILS))
2662 fputs ("New store group\n", dump_file);
902 2663
903 FOR_EACH_VEC_ELT (m_store_info, i, info) 2664 FOR_EACH_VEC_ELT (m_store_info, i, info)
904 { 2665 {
2666 if (i <= ignore)
2667 goto done;
2668
2669 /* First try to handle group of stores like:
2670 p[0] = data >> 24;
2671 p[1] = data >> 16;
2672 p[2] = data >> 8;
2673 p[3] = data;
2674 using the bswap framework. */
2675 if (info->bitpos == merged_store->start + merged_store->width
2676 && merged_store->stores.length () == 1
2677 && merged_store->stores[0]->ins_stmt != NULL
2678 && info->ins_stmt != NULL)
2679 {
2680 unsigned int try_size;
2681 for (try_size = 64; try_size >= 16; try_size >>= 1)
2682 if (try_coalesce_bswap (merged_store, i - 1, try_size))
2683 break;
2684
2685 if (try_size >= 16)
2686 {
2687 ignore = i + merged_store->stores.length () - 1;
2688 m_merged_store_groups.safe_push (merged_store);
2689 if (ignore < m_store_info.length ())
2690 merged_store = new merged_store_group (m_store_info[ignore]);
2691 else
2692 merged_store = NULL;
2693 goto done;
2694 }
2695 }
2696
2697 /* |---store 1---|
2698 |---store 2---|
2699 Overlapping stores. */
2700 if (IN_RANGE (info->bitpos, merged_store->start,
2701 merged_store->start + merged_store->width - 1))
2702 {
2703 /* Only allow overlapping stores of constants. */
2704 if (info->rhs_code == INTEGER_CST)
2705 {
2706 bool only_constants = true;
2707 store_immediate_info *infoj;
2708 unsigned int j;
2709 FOR_EACH_VEC_ELT (merged_store->stores, j, infoj)
2710 if (infoj->rhs_code != INTEGER_CST)
2711 {
2712 only_constants = false;
2713 break;
2714 }
2715 unsigned int last_order
2716 = MAX (merged_store->last_order, info->order);
2717 unsigned HOST_WIDE_INT end
2718 = MAX (merged_store->start + merged_store->width,
2719 info->bitpos + info->bitsize);
2720 if (only_constants
2721 && check_no_overlap (m_store_info, i, INTEGER_CST,
2722 last_order, end))
2723 {
2724 /* check_no_overlap call above made sure there are no
2725 overlapping stores with non-INTEGER_CST rhs_code
2726 in between the first and last of the stores we've
2727 just merged. If there are any INTEGER_CST rhs_code
2728 stores in between, we need to merge_overlapping them
2729 even if in the sort_by_bitpos order there are other
2730 overlapping stores in between. Keep those stores as is.
2731 Example:
2732 MEM[(int *)p_28] = 0;
2733 MEM[(char *)p_28 + 3B] = 1;
2734 MEM[(char *)p_28 + 1B] = 2;
2735 MEM[(char *)p_28 + 2B] = MEM[(char *)p_28 + 6B];
2736 We can't merge the zero store with the store of two and
2737 not merge anything else, because the store of one is
2738 in the original order in between those two, but in
2739 store_by_bitpos order it comes after the last store that
2740 we can't merge with them. We can merge the first 3 stores
2741 and keep the last store as is though. */
2742 unsigned int len = m_store_info.length (), k = i;
2743 for (unsigned int j = i + 1; j < len; ++j)
2744 {
2745 store_immediate_info *info2 = m_store_info[j];
2746 if (info2->bitpos >= end)
2747 break;
2748 if (info2->order < last_order)
2749 {
2750 if (info2->rhs_code != INTEGER_CST)
2751 {
2752 /* Normally check_no_overlap makes sure this
2753 doesn't happen, but if end grows below, then
2754 we need to process more stores than
2755 check_no_overlap verified. Example:
2756 MEM[(int *)p_5] = 0;
2757 MEM[(short *)p_5 + 3B] = 1;
2758 MEM[(char *)p_5 + 4B] = _9;
2759 MEM[(char *)p_5 + 2B] = 2; */
2760 k = 0;
2761 break;
2762 }
2763 k = j;
2764 end = MAX (end, info2->bitpos + info2->bitsize);
2765 }
2766 }
2767
2768 if (k != 0)
2769 {
2770 merged_store->merge_overlapping (info);
2771
2772 for (unsigned int j = i + 1; j <= k; j++)
2773 {
2774 store_immediate_info *info2 = m_store_info[j];
2775 gcc_assert (info2->bitpos < end);
2776 if (info2->order < last_order)
2777 {
2778 gcc_assert (info2->rhs_code == INTEGER_CST);
2779 merged_store->merge_overlapping (info2);
2780 }
2781 /* Other stores are kept and not merged in any
2782 way. */
2783 }
2784 ignore = k;
2785 goto done;
2786 }
2787 }
2788 }
2789 }
2790 /* |---store 1---||---store 2---|
2791 This store is consecutive to the previous one.
2792 Merge it into the current store group. There can be gaps in between
2793 the stores, but there can't be gaps in between bitregions. */
2794 else if (info->bitregion_start <= merged_store->bitregion_end
2795 && merged_store->can_be_merged_into (info))
2796 {
2797 store_immediate_info *infof = merged_store->stores[0];
2798
2799 /* All the rhs_code ops that take 2 operands are commutative,
2800 swap the operands if it could make the operands compatible. */
2801 if (infof->ops[0].base_addr
2802 && infof->ops[1].base_addr
2803 && info->ops[0].base_addr
2804 && info->ops[1].base_addr
2805 && known_eq (info->ops[1].bitpos - infof->ops[0].bitpos,
2806 info->bitpos - infof->bitpos)
2807 && operand_equal_p (info->ops[1].base_addr,
2808 infof->ops[0].base_addr, 0))
2809 {
2810 std::swap (info->ops[0], info->ops[1]);
2811 info->ops_swapped_p = true;
2812 }
2813 if (check_no_overlap (m_store_info, i, info->rhs_code,
2814 MAX (merged_store->last_order, info->order),
2815 MAX (merged_store->start + merged_store->width,
2816 info->bitpos + info->bitsize)))
2817 {
2818 /* Turn MEM_REF into BIT_INSERT_EXPR for bit-field stores. */
2819 if (info->rhs_code == MEM_REF && infof->rhs_code != MEM_REF)
2820 {
2821 info->rhs_code = BIT_INSERT_EXPR;
2822 info->ops[0].val = gimple_assign_rhs1 (info->stmt);
2823 info->ops[0].base_addr = NULL_TREE;
2824 }
2825 else if (infof->rhs_code == MEM_REF && info->rhs_code != MEM_REF)
2826 {
2827 store_immediate_info *infoj;
2828 unsigned int j;
2829 FOR_EACH_VEC_ELT (merged_store->stores, j, infoj)
2830 {
2831 infoj->rhs_code = BIT_INSERT_EXPR;
2832 infoj->ops[0].val = gimple_assign_rhs1 (infoj->stmt);
2833 infoj->ops[0].base_addr = NULL_TREE;
2834 }
2835 }
2836 if ((infof->ops[0].base_addr
2837 ? compatible_load_p (merged_store, info, base_addr, 0)
2838 : !info->ops[0].base_addr)
2839 && (infof->ops[1].base_addr
2840 ? compatible_load_p (merged_store, info, base_addr, 1)
2841 : !info->ops[1].base_addr))
2842 {
2843 merged_store->merge_into (info);
2844 goto done;
2845 }
2846 }
2847 }
2848
2849 /* |---store 1---| <gap> |---store 2---|.
2850 Gap between stores or the rhs not compatible. Start a new group. */
2851
2852 /* Try to apply all the stores recorded for the group to determine
2853 the bitpattern they write and discard it if that fails.
2854 This will also reject single-store groups. */
2855 if (merged_store->apply_stores ())
2856 m_merged_store_groups.safe_push (merged_store);
2857 else
2858 delete merged_store;
2859
2860 merged_store = new merged_store_group (info);
905 if (dump_file && (dump_flags & TDF_DETAILS)) 2861 if (dump_file && (dump_flags & TDF_DETAILS))
2862 fputs ("New store group\n", dump_file);
2863
2864 done:
2865 if (dump_file && (dump_flags & TDF_DETAILS))
906 { 2866 {
907 fprintf (dump_file, "Store %u:\nbitsize:" HOST_WIDE_INT_PRINT_DEC 2867 fprintf (dump_file, "Store %u:\nbitsize:" HOST_WIDE_INT_PRINT_DEC
908 " bitpos:" HOST_WIDE_INT_PRINT_DEC " val:\n", 2868 " bitpos:" HOST_WIDE_INT_PRINT_DEC " val:",
909 i, info->bitsize, info->bitpos); 2869 i, info->bitsize, info->bitpos);
910 print_generic_expr (dump_file, gimple_assign_rhs1 (info->stmt)); 2870 print_generic_expr (dump_file, gimple_assign_rhs1 (info->stmt));
911 fprintf (dump_file, "\n------------\n"); 2871 fputc ('\n', dump_file);
912 } 2872 }
913 2873 }
914 if (i == 0) 2874
915 continue; 2875 /* Record or discard the last store group. */
916 2876 if (merged_store)
917 /* |---store 1---| 2877 {
918 |---store 2---| 2878 if (merged_store->apply_stores ())
919 Overlapping stores. */ 2879 m_merged_store_groups.safe_push (merged_store);
920 unsigned HOST_WIDE_INT start = info->bitpos; 2880 else
921 if (IN_RANGE (start, merged_store->start, 2881 delete merged_store;
922 merged_store->start + merged_store->width - 1)) 2882 }
923 {
924 merged_store->merge_overlapping (info);
925 continue;
926 }
927
928 /* |---store 1---| <gap> |---store 2---|.
929 Gap between stores. Start a new group. */
930 if (start != merged_store->start + merged_store->width)
931 {
932 /* Try to apply all the stores recorded for the group to determine
933 the bitpattern they write and discard it if that fails.
934 This will also reject single-store groups. */
935 if (!merged_store->apply_stores ())
936 delete merged_store;
937 else
938 m_merged_store_groups.safe_push (merged_store);
939
940 merged_store = new merged_store_group (info);
941
942 continue;
943 }
944
945 /* |---store 1---||---store 2---|
946 This store is consecutive to the previous one.
947 Merge it into the current store group. */
948 merged_store->merge_into (info);
949 }
950
951 /* Record or discard the last store group. */
952 if (!merged_store->apply_stores ())
953 delete merged_store;
954 else
955 m_merged_store_groups.safe_push (merged_store);
956 2883
957 gcc_assert (m_merged_store_groups.length () <= m_store_info.length ()); 2884 gcc_assert (m_merged_store_groups.length () <= m_store_info.length ());
2885
958 bool success 2886 bool success
959 = !m_merged_store_groups.is_empty () 2887 = !m_merged_store_groups.is_empty ()
960 && m_merged_store_groups.length () < m_store_info.length (); 2888 && m_merged_store_groups.length () < m_store_info.length ();
961 2889
962 if (success && dump_file) 2890 if (success && dump_file)
963 fprintf (dump_file, "Coalescing successful!\n" 2891 fprintf (dump_file, "Coalescing successful!\nMerged into %u stores\n",
964 "Merged into %u stores\n", 2892 m_merged_store_groups.length ());
965 m_merged_store_groups.length ());
966 2893
967 return success; 2894 return success;
968 } 2895 }
969 2896
970 /* Return the type to use for the merged stores described by STMTS. 2897 /* Return the type to use for the merged stores or loads described by STMTS.
971 This is needed to get the alias sets right. */ 2898 This is needed to get the alias sets right. If IS_LOAD, look for rhs,
2899 otherwise lhs. Additionally set *CLIQUEP and *BASEP to MR_DEPENDENCE_*
2900 of the MEM_REFs if any. */
972 2901
973 static tree 2902 static tree
974 get_alias_type_for_stmts (auto_vec<gimple *> &stmts) 2903 get_alias_type_for_stmts (vec<gimple *> &stmts, bool is_load,
2904 unsigned short *cliquep, unsigned short *basep)
975 { 2905 {
976 gimple *stmt; 2906 gimple *stmt;
977 unsigned int i; 2907 unsigned int i;
978 tree lhs = gimple_assign_lhs (stmts[0]); 2908 tree type = NULL_TREE;
979 tree type = reference_alias_ptr_type (lhs); 2909 tree ret = NULL_TREE;
2910 *cliquep = 0;
2911 *basep = 0;
980 2912
981 FOR_EACH_VEC_ELT (stmts, i, stmt) 2913 FOR_EACH_VEC_ELT (stmts, i, stmt)
982 { 2914 {
2915 tree ref = is_load ? gimple_assign_rhs1 (stmt)
2916 : gimple_assign_lhs (stmt);
2917 tree type1 = reference_alias_ptr_type (ref);
2918 tree base = get_base_address (ref);
2919
983 if (i == 0) 2920 if (i == 0)
984 continue; 2921 {
985 2922 if (TREE_CODE (base) == MEM_REF)
986 lhs = gimple_assign_lhs (stmt); 2923 {
987 tree type1 = reference_alias_ptr_type (lhs); 2924 *cliquep = MR_DEPENDENCE_CLIQUE (base);
2925 *basep = MR_DEPENDENCE_BASE (base);
2926 }
2927 ret = type = type1;
2928 continue;
2929 }
988 if (!alias_ptr_types_compatible_p (type, type1)) 2930 if (!alias_ptr_types_compatible_p (type, type1))
989 return ptr_type_node; 2931 ret = ptr_type_node;
990 } 2932 if (TREE_CODE (base) != MEM_REF
991 return type; 2933 || *cliquep != MR_DEPENDENCE_CLIQUE (base)
2934 || *basep != MR_DEPENDENCE_BASE (base))
2935 {
2936 *cliquep = 0;
2937 *basep = 0;
2938 }
2939 }
2940 return ret;
992 } 2941 }
993 2942
994 /* Return the location_t information we can find among the statements 2943 /* Return the location_t information we can find among the statements
995 in STMTS. */ 2944 in STMTS. */
996 2945
997 static location_t 2946 static location_t
998 get_location_for_stmts (auto_vec<gimple *> &stmts) 2947 get_location_for_stmts (vec<gimple *> &stmts)
999 { 2948 {
1000 gimple *stmt; 2949 gimple *stmt;
1001 unsigned int i; 2950 unsigned int i;
1002 2951
1003 FOR_EACH_VEC_ELT (stmts, i, stmt) 2952 FOR_EACH_VEC_ELT (stmts, i, stmt)
1013 struct split_store 2962 struct split_store
1014 { 2963 {
1015 unsigned HOST_WIDE_INT bytepos; 2964 unsigned HOST_WIDE_INT bytepos;
1016 unsigned HOST_WIDE_INT size; 2965 unsigned HOST_WIDE_INT size;
1017 unsigned HOST_WIDE_INT align; 2966 unsigned HOST_WIDE_INT align;
1018 auto_vec<gimple *> orig_stmts; 2967 auto_vec<store_immediate_info *> orig_stores;
2968 /* True if there is a single orig stmt covering the whole split store. */
2969 bool orig;
1019 split_store (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT, 2970 split_store (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT,
1020 unsigned HOST_WIDE_INT); 2971 unsigned HOST_WIDE_INT);
1021 }; 2972 };
1022 2973
1023 /* Simple constructor. */ 2974 /* Simple constructor. */
1024 2975
1025 split_store::split_store (unsigned HOST_WIDE_INT bp, 2976 split_store::split_store (unsigned HOST_WIDE_INT bp,
1026 unsigned HOST_WIDE_INT sz, 2977 unsigned HOST_WIDE_INT sz,
1027 unsigned HOST_WIDE_INT al) 2978 unsigned HOST_WIDE_INT al)
1028 : bytepos (bp), size (sz), align (al) 2979 : bytepos (bp), size (sz), align (al), orig (false)
1029 { 2980 {
1030 orig_stmts.create (0); 2981 orig_stores.create (0);
1031 } 2982 }
1032 2983
1033 /* Record all statements corresponding to stores in GROUP that write to 2984 /* Record all stores in GROUP that write to the region starting at BITPOS and
1034 the region starting at BITPOS and is of size BITSIZE. Record such 2985 is of size BITSIZE. Record infos for such statements in STORES if
1035 statements in STMTS. The stores in GROUP must be sorted by 2986 non-NULL. The stores in GROUP must be sorted by bitposition. Return INFO
1036 bitposition. */ 2987 if there is exactly one original store in the range. */
1037 2988
1038 static void 2989 static store_immediate_info *
1039 find_constituent_stmts (struct merged_store_group *group, 2990 find_constituent_stores (struct merged_store_group *group,
1040 auto_vec<gimple *> &stmts, 2991 vec<store_immediate_info *> *stores,
2992 unsigned int *first,
1041 unsigned HOST_WIDE_INT bitpos, 2993 unsigned HOST_WIDE_INT bitpos,
1042 unsigned HOST_WIDE_INT bitsize) 2994 unsigned HOST_WIDE_INT bitsize)
1043 { 2995 {
1044 struct store_immediate_info *info; 2996 store_immediate_info *info, *ret = NULL;
1045 unsigned int i; 2997 unsigned int i;
2998 bool second = false;
2999 bool update_first = true;
1046 unsigned HOST_WIDE_INT end = bitpos + bitsize; 3000 unsigned HOST_WIDE_INT end = bitpos + bitsize;
1047 FOR_EACH_VEC_ELT (group->stores, i, info) 3001 for (i = *first; group->stores.iterate (i, &info); ++i)
1048 { 3002 {
1049 unsigned HOST_WIDE_INT stmt_start = info->bitpos; 3003 unsigned HOST_WIDE_INT stmt_start = info->bitpos;
1050 unsigned HOST_WIDE_INT stmt_end = stmt_start + info->bitsize; 3004 unsigned HOST_WIDE_INT stmt_end = stmt_start + info->bitsize;
1051 if (stmt_end < bitpos) 3005 if (stmt_end <= bitpos)
1052 continue; 3006 {
3007 /* BITPOS passed to this function never decreases from within the
3008 same split_group call, so optimize and don't scan info records
3009 which are known to end before or at BITPOS next time.
3010 Only do it if all stores before this one also pass this. */
3011 if (update_first)
3012 *first = i + 1;
3013 continue;
3014 }
3015 else
3016 update_first = false;
3017
1053 /* The stores in GROUP are ordered by bitposition so if we're past 3018 /* The stores in GROUP are ordered by bitposition so if we're past
1054 the region for this group return early. */ 3019 the region for this group return early. */
1055 if (stmt_start > end) 3020 if (stmt_start >= end)
1056 return; 3021 return ret;
1057 3022
1058 if (IN_RANGE (stmt_start, bitpos, bitpos + bitsize) 3023 if (stores)
1059 || IN_RANGE (stmt_end, bitpos, end) 3024 {
1060 /* The statement writes a region that completely encloses the region 3025 stores->safe_push (info);
1061 that this group writes. Unlikely to occur but let's 3026 if (ret)
1062 handle it. */ 3027 {
1063 || IN_RANGE (bitpos, stmt_start, stmt_end)) 3028 ret = NULL;
1064 stmts.safe_push (info->stmt); 3029 second = true;
3030 }
3031 }
3032 else if (ret)
3033 return NULL;
3034 if (!second)
3035 ret = info;
3036 }
3037 return ret;
3038 }
3039
3040 /* Return how many SSA_NAMEs used to compute value to store in the INFO
3041 store have multiple uses. If any SSA_NAME has multiple uses, also
3042 count statements needed to compute it. */
3043
3044 static unsigned
3045 count_multiple_uses (store_immediate_info *info)
3046 {
3047 gimple *stmt = info->stmt;
3048 unsigned ret = 0;
3049 switch (info->rhs_code)
3050 {
3051 case INTEGER_CST:
3052 return 0;
3053 case BIT_AND_EXPR:
3054 case BIT_IOR_EXPR:
3055 case BIT_XOR_EXPR:
3056 if (info->bit_not_p)
3057 {
3058 if (!has_single_use (gimple_assign_rhs1 (stmt)))
3059 ret = 1; /* Fall through below to return
3060 the BIT_NOT_EXPR stmt and then
3061 BIT_{AND,IOR,XOR}_EXPR and anything it
3062 uses. */
3063 else
3064 /* stmt is after this the BIT_NOT_EXPR. */
3065 stmt = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
3066 }
3067 if (!has_single_use (gimple_assign_rhs1 (stmt)))
3068 {
3069 ret += 1 + info->ops[0].bit_not_p;
3070 if (info->ops[1].base_addr)
3071 ret += 1 + info->ops[1].bit_not_p;
3072 return ret + 1;
3073 }
3074 stmt = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
3075 /* stmt is now the BIT_*_EXPR. */
3076 if (!has_single_use (gimple_assign_rhs1 (stmt)))
3077 ret += 1 + info->ops[info->ops_swapped_p].bit_not_p;
3078 else if (info->ops[info->ops_swapped_p].bit_not_p)
3079 {
3080 gimple *stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
3081 if (!has_single_use (gimple_assign_rhs1 (stmt2)))
3082 ++ret;
3083 }
3084 if (info->ops[1].base_addr == NULL_TREE)
3085 {
3086 gcc_checking_assert (!info->ops_swapped_p);
3087 return ret;
3088 }
3089 if (!has_single_use (gimple_assign_rhs2 (stmt)))
3090 ret += 1 + info->ops[1 - info->ops_swapped_p].bit_not_p;
3091 else if (info->ops[1 - info->ops_swapped_p].bit_not_p)
3092 {
3093 gimple *stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs2 (stmt));
3094 if (!has_single_use (gimple_assign_rhs1 (stmt2)))
3095 ++ret;
3096 }
3097 return ret;
3098 case MEM_REF:
3099 if (!has_single_use (gimple_assign_rhs1 (stmt)))
3100 return 1 + info->ops[0].bit_not_p;
3101 else if (info->ops[0].bit_not_p)
3102 {
3103 stmt = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
3104 if (!has_single_use (gimple_assign_rhs1 (stmt)))
3105 return 1;
3106 }
3107 return 0;
3108 case BIT_INSERT_EXPR:
3109 return has_single_use (gimple_assign_rhs1 (stmt)) ? 0 : 1;
3110 default:
3111 gcc_unreachable ();
1065 } 3112 }
1066 } 3113 }
1067 3114
1068 /* Split a merged store described by GROUP by populating the SPLIT_STORES 3115 /* Split a merged store described by GROUP by populating the SPLIT_STORES
1069 vector with split_store structs describing the byte offset (from the base), 3116 vector (if non-NULL) with split_store structs describing the byte offset
1070 the bit size and alignment of each store as well as the original statements 3117 (from the base), the bit size and alignment of each store as well as the
1071 involved in each such split group. 3118 original statements involved in each such split group.
1072 This is to separate the splitting strategy from the statement 3119 This is to separate the splitting strategy from the statement
1073 building/emission/linking done in output_merged_store. 3120 building/emission/linking done in output_merged_store.
1074 At the moment just start with the widest possible size and keep emitting 3121 Return number of new stores.
1075 the widest we can until we have emitted all the bytes, halving the size 3122 If ALLOW_UNALIGNED_STORE is false, then all stores must be aligned.
1076 when appropriate. */ 3123 If ALLOW_UNALIGNED_LOAD is false, then all loads must be aligned.
1077 3124 If SPLIT_STORES is NULL, it is just a dry run to count number of
1078 static bool 3125 new stores. */
1079 split_group (merged_store_group *group, 3126
1080 auto_vec<struct split_store *> &split_stores) 3127 static unsigned int
1081 { 3128 split_group (merged_store_group *group, bool allow_unaligned_store,
1082 unsigned HOST_WIDE_INT pos = group->start; 3129 bool allow_unaligned_load,
1083 unsigned HOST_WIDE_INT size = group->width; 3130 vec<struct split_store *> *split_stores,
3131 unsigned *total_orig,
3132 unsigned *total_new)
3133 {
3134 unsigned HOST_WIDE_INT pos = group->bitregion_start;
3135 unsigned HOST_WIDE_INT size = group->bitregion_end - pos;
1084 unsigned HOST_WIDE_INT bytepos = pos / BITS_PER_UNIT; 3136 unsigned HOST_WIDE_INT bytepos = pos / BITS_PER_UNIT;
1085 unsigned HOST_WIDE_INT align = group->align; 3137 unsigned HOST_WIDE_INT group_align = group->align;
1086 3138 unsigned HOST_WIDE_INT align_base = group->align_base;
1087 /* We don't handle partial bitfields for now. We shouldn't have 3139 unsigned HOST_WIDE_INT group_load_align = group_align;
1088 reached this far. */ 3140 bool any_orig = false;
3141
1089 gcc_assert ((size % BITS_PER_UNIT == 0) && (pos % BITS_PER_UNIT == 0)); 3142 gcc_assert ((size % BITS_PER_UNIT == 0) && (pos % BITS_PER_UNIT == 0));
1090 3143
1091 bool allow_unaligned 3144 if (group->stores[0]->rhs_code == LROTATE_EXPR
1092 = !STRICT_ALIGNMENT && PARAM_VALUE (PARAM_STORE_MERGING_ALLOW_UNALIGNED); 3145 || group->stores[0]->rhs_code == NOP_EXPR)
1093 3146 {
1094 unsigned int try_size = MAX_STORE_BITSIZE; 3147 /* For bswap framework using sets of stores, all the checking
1095 while (try_size > size 3148 has been done earlier in try_coalesce_bswap and needs to be
1096 || (!allow_unaligned 3149 emitted as a single store. */
1097 && try_size > align)) 3150 if (total_orig)
1098 { 3151 {
1099 try_size /= 2; 3152 /* Avoid the old/new stmt count heuristics. It should be
1100 if (try_size < BITS_PER_UNIT) 3153 always beneficial. */
1101 return false; 3154 total_new[0] = 1;
1102 } 3155 total_orig[0] = 2;
1103 3156 }
3157
3158 if (split_stores)
3159 {
3160 unsigned HOST_WIDE_INT align_bitpos
3161 = (group->start - align_base) & (group_align - 1);
3162 unsigned HOST_WIDE_INT align = group_align;
3163 if (align_bitpos)
3164 align = least_bit_hwi (align_bitpos);
3165 bytepos = group->start / BITS_PER_UNIT;
3166 struct split_store *store
3167 = new split_store (bytepos, group->width, align);
3168 unsigned int first = 0;
3169 find_constituent_stores (group, &store->orig_stores,
3170 &first, group->start, group->width);
3171 split_stores->safe_push (store);
3172 }
3173
3174 return 1;
3175 }
3176
3177 unsigned int ret = 0, first = 0;
1104 unsigned HOST_WIDE_INT try_pos = bytepos; 3178 unsigned HOST_WIDE_INT try_pos = bytepos;
1105 group->stores.qsort (sort_by_bitpos); 3179
3180 if (total_orig)
3181 {
3182 unsigned int i;
3183 store_immediate_info *info = group->stores[0];
3184
3185 total_new[0] = 0;
3186 total_orig[0] = 1; /* The orig store. */
3187 info = group->stores[0];
3188 if (info->ops[0].base_addr)
3189 total_orig[0]++;
3190 if (info->ops[1].base_addr)
3191 total_orig[0]++;
3192 switch (info->rhs_code)
3193 {
3194 case BIT_AND_EXPR:
3195 case BIT_IOR_EXPR:
3196 case BIT_XOR_EXPR:
3197 total_orig[0]++; /* The orig BIT_*_EXPR stmt. */
3198 break;
3199 default:
3200 break;
3201 }
3202 total_orig[0] *= group->stores.length ();
3203
3204 FOR_EACH_VEC_ELT (group->stores, i, info)
3205 {
3206 total_new[0] += count_multiple_uses (info);
3207 total_orig[0] += (info->bit_not_p
3208 + info->ops[0].bit_not_p
3209 + info->ops[1].bit_not_p);
3210 }
3211 }
3212
3213 if (!allow_unaligned_load)
3214 for (int i = 0; i < 2; ++i)
3215 if (group->load_align[i])
3216 group_load_align = MIN (group_load_align, group->load_align[i]);
1106 3217
1107 while (size > 0) 3218 while (size > 0)
1108 { 3219 {
1109 struct split_store *store = new split_store (try_pos, try_size, align); 3220 if ((allow_unaligned_store || group_align <= BITS_PER_UNIT)
3221 && group->mask[try_pos - bytepos] == (unsigned char) ~0U)
3222 {
3223 /* Skip padding bytes. */
3224 ++try_pos;
3225 size -= BITS_PER_UNIT;
3226 continue;
3227 }
3228
1110 unsigned HOST_WIDE_INT try_bitpos = try_pos * BITS_PER_UNIT; 3229 unsigned HOST_WIDE_INT try_bitpos = try_pos * BITS_PER_UNIT;
1111 find_constituent_stmts (group, store->orig_stmts, try_bitpos, try_size); 3230 unsigned int try_size = MAX_STORE_BITSIZE, nonmasked;
1112 split_stores.safe_push (store); 3231 unsigned HOST_WIDE_INT align_bitpos
3232 = (try_bitpos - align_base) & (group_align - 1);
3233 unsigned HOST_WIDE_INT align = group_align;
3234 if (align_bitpos)
3235 align = least_bit_hwi (align_bitpos);
3236 if (!allow_unaligned_store)
3237 try_size = MIN (try_size, align);
3238 if (!allow_unaligned_load)
3239 {
3240 /* If we can't do or don't want to do unaligned stores
3241 as well as loads, we need to take the loads into account
3242 as well. */
3243 unsigned HOST_WIDE_INT load_align = group_load_align;
3244 align_bitpos = (try_bitpos - align_base) & (load_align - 1);
3245 if (align_bitpos)
3246 load_align = least_bit_hwi (align_bitpos);
3247 for (int i = 0; i < 2; ++i)
3248 if (group->load_align[i])
3249 {
3250 align_bitpos
3251 = known_alignment (try_bitpos
3252 - group->stores[0]->bitpos
3253 + group->stores[0]->ops[i].bitpos
3254 - group->load_align_base[i]);
3255 if (align_bitpos & (group_load_align - 1))
3256 {
3257 unsigned HOST_WIDE_INT a = least_bit_hwi (align_bitpos);
3258 load_align = MIN (load_align, a);
3259 }
3260 }
3261 try_size = MIN (try_size, load_align);
3262 }
3263 store_immediate_info *info
3264 = find_constituent_stores (group, NULL, &first, try_bitpos, try_size);
3265 if (info)
3266 {
3267 /* If there is just one original statement for the range, see if
3268 we can just reuse the original store which could be even larger
3269 than try_size. */
3270 unsigned HOST_WIDE_INT stmt_end
3271 = ROUND_UP (info->bitpos + info->bitsize, BITS_PER_UNIT);
3272 info = find_constituent_stores (group, NULL, &first, try_bitpos,
3273 stmt_end - try_bitpos);
3274 if (info && info->bitpos >= try_bitpos)
3275 {
3276 try_size = stmt_end - try_bitpos;
3277 goto found;
3278 }
3279 }
3280
3281 /* Approximate store bitsize for the case when there are no padding
3282 bits. */
3283 while (try_size > size)
3284 try_size /= 2;
3285 /* Now look for whole padding bytes at the end of that bitsize. */
3286 for (nonmasked = try_size / BITS_PER_UNIT; nonmasked > 0; --nonmasked)
3287 if (group->mask[try_pos - bytepos + nonmasked - 1]
3288 != (unsigned char) ~0U)
3289 break;
3290 if (nonmasked == 0)
3291 {
3292 /* If entire try_size range is padding, skip it. */
3293 try_pos += try_size / BITS_PER_UNIT;
3294 size -= try_size;
3295 continue;
3296 }
3297 /* Otherwise try to decrease try_size if second half, last 3 quarters
3298 etc. are padding. */
3299 nonmasked *= BITS_PER_UNIT;
3300 while (nonmasked <= try_size / 2)
3301 try_size /= 2;
3302 if (!allow_unaligned_store && group_align > BITS_PER_UNIT)
3303 {
3304 /* Now look for whole padding bytes at the start of that bitsize. */
3305 unsigned int try_bytesize = try_size / BITS_PER_UNIT, masked;
3306 for (masked = 0; masked < try_bytesize; ++masked)
3307 if (group->mask[try_pos - bytepos + masked] != (unsigned char) ~0U)
3308 break;
3309 masked *= BITS_PER_UNIT;
3310 gcc_assert (masked < try_size);
3311 if (masked >= try_size / 2)
3312 {
3313 while (masked >= try_size / 2)
3314 {
3315 try_size /= 2;
3316 try_pos += try_size / BITS_PER_UNIT;
3317 size -= try_size;
3318 masked -= try_size;
3319 }
3320 /* Need to recompute the alignment, so just retry at the new
3321 position. */
3322 continue;
3323 }
3324 }
3325
3326 found:
3327 ++ret;
3328
3329 if (split_stores)
3330 {
3331 struct split_store *store
3332 = new split_store (try_pos, try_size, align);
3333 info = find_constituent_stores (group, &store->orig_stores,
3334 &first, try_bitpos, try_size);
3335 if (info
3336 && info->bitpos >= try_bitpos
3337 && info->bitpos + info->bitsize <= try_bitpos + try_size)
3338 {
3339 store->orig = true;
3340 any_orig = true;
3341 }
3342 split_stores->safe_push (store);
3343 }
1113 3344
1114 try_pos += try_size / BITS_PER_UNIT; 3345 try_pos += try_size / BITS_PER_UNIT;
1115
1116 size -= try_size; 3346 size -= try_size;
1117 align = try_size; 3347 }
1118 while (size < try_size) 3348
1119 try_size /= 2; 3349 if (total_orig)
1120 } 3350 {
1121 return true; 3351 unsigned int i;
3352 struct split_store *store;
3353 /* If we are reusing some original stores and any of the
3354 original SSA_NAMEs had multiple uses, we need to subtract
3355 those now before we add the new ones. */
3356 if (total_new[0] && any_orig)
3357 {
3358 FOR_EACH_VEC_ELT (*split_stores, i, store)
3359 if (store->orig)
3360 total_new[0] -= count_multiple_uses (store->orig_stores[0]);
3361 }
3362 total_new[0] += ret; /* The new store. */
3363 store_immediate_info *info = group->stores[0];
3364 if (info->ops[0].base_addr)
3365 total_new[0] += ret;
3366 if (info->ops[1].base_addr)
3367 total_new[0] += ret;
3368 switch (info->rhs_code)
3369 {
3370 case BIT_AND_EXPR:
3371 case BIT_IOR_EXPR:
3372 case BIT_XOR_EXPR:
3373 total_new[0] += ret; /* The new BIT_*_EXPR stmt. */
3374 break;
3375 default:
3376 break;
3377 }
3378 FOR_EACH_VEC_ELT (*split_stores, i, store)
3379 {
3380 unsigned int j;
3381 bool bit_not_p[3] = { false, false, false };
3382 /* If all orig_stores have certain bit_not_p set, then
3383 we'd use a BIT_NOT_EXPR stmt and need to account for it.
3384 If some orig_stores have certain bit_not_p set, then
3385 we'd use a BIT_XOR_EXPR with a mask and need to account for
3386 it. */
3387 FOR_EACH_VEC_ELT (store->orig_stores, j, info)
3388 {
3389 if (info->ops[0].bit_not_p)
3390 bit_not_p[0] = true;
3391 if (info->ops[1].bit_not_p)
3392 bit_not_p[1] = true;
3393 if (info->bit_not_p)
3394 bit_not_p[2] = true;
3395 }
3396 total_new[0] += bit_not_p[0] + bit_not_p[1] + bit_not_p[2];
3397 }
3398
3399 }
3400
3401 return ret;
3402 }
3403
3404 /* Return the operation through which the operand IDX (if < 2) or
3405 result (IDX == 2) should be inverted. If NOP_EXPR, no inversion
3406 is done, if BIT_NOT_EXPR, all bits are inverted, if BIT_XOR_EXPR,
3407 the bits should be xored with mask. */
3408
3409 static enum tree_code
3410 invert_op (split_store *split_store, int idx, tree int_type, tree &mask)
3411 {
3412 unsigned int i;
3413 store_immediate_info *info;
3414 unsigned int cnt = 0;
3415 bool any_paddings = false;
3416 FOR_EACH_VEC_ELT (split_store->orig_stores, i, info)
3417 {
3418 bool bit_not_p = idx < 2 ? info->ops[idx].bit_not_p : info->bit_not_p;
3419 if (bit_not_p)
3420 {
3421 ++cnt;
3422 tree lhs = gimple_assign_lhs (info->stmt);
3423 if (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
3424 && TYPE_PRECISION (TREE_TYPE (lhs)) < info->bitsize)
3425 any_paddings = true;
3426 }
3427 }
3428 mask = NULL_TREE;
3429 if (cnt == 0)
3430 return NOP_EXPR;
3431 if (cnt == split_store->orig_stores.length () && !any_paddings)
3432 return BIT_NOT_EXPR;
3433
3434 unsigned HOST_WIDE_INT try_bitpos = split_store->bytepos * BITS_PER_UNIT;
3435 unsigned buf_size = split_store->size / BITS_PER_UNIT;
3436 unsigned char *buf
3437 = XALLOCAVEC (unsigned char, buf_size);
3438 memset (buf, ~0U, buf_size);
3439 FOR_EACH_VEC_ELT (split_store->orig_stores, i, info)
3440 {
3441 bool bit_not_p = idx < 2 ? info->ops[idx].bit_not_p : info->bit_not_p;
3442 if (!bit_not_p)
3443 continue;
3444 /* Clear regions with bit_not_p and invert afterwards, rather than
3445 clear regions with !bit_not_p, so that gaps in between stores aren't
3446 set in the mask. */
3447 unsigned HOST_WIDE_INT bitsize = info->bitsize;
3448 unsigned HOST_WIDE_INT prec = bitsize;
3449 unsigned int pos_in_buffer = 0;
3450 if (any_paddings)
3451 {
3452 tree lhs = gimple_assign_lhs (info->stmt);
3453 if (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
3454 && TYPE_PRECISION (TREE_TYPE (lhs)) < bitsize)
3455 prec = TYPE_PRECISION (TREE_TYPE (lhs));
3456 }
3457 if (info->bitpos < try_bitpos)
3458 {
3459 gcc_assert (info->bitpos + bitsize > try_bitpos);
3460 if (!BYTES_BIG_ENDIAN)
3461 {
3462 if (prec <= try_bitpos - info->bitpos)
3463 continue;
3464 prec -= try_bitpos - info->bitpos;
3465 }
3466 bitsize -= try_bitpos - info->bitpos;
3467 if (BYTES_BIG_ENDIAN && prec > bitsize)
3468 prec = bitsize;
3469 }
3470 else
3471 pos_in_buffer = info->bitpos - try_bitpos;
3472 if (prec < bitsize)
3473 {
3474 /* If this is a bool inversion, invert just the least significant
3475 prec bits rather than all bits of it. */
3476 if (BYTES_BIG_ENDIAN)
3477 {
3478 pos_in_buffer += bitsize - prec;
3479 if (pos_in_buffer >= split_store->size)
3480 continue;
3481 }
3482 bitsize = prec;
3483 }
3484 if (pos_in_buffer + bitsize > split_store->size)
3485 bitsize = split_store->size - pos_in_buffer;
3486 unsigned char *p = buf + (pos_in_buffer / BITS_PER_UNIT);
3487 if (BYTES_BIG_ENDIAN)
3488 clear_bit_region_be (p, (BITS_PER_UNIT - 1
3489 - (pos_in_buffer % BITS_PER_UNIT)), bitsize);
3490 else
3491 clear_bit_region (p, pos_in_buffer % BITS_PER_UNIT, bitsize);
3492 }
3493 for (unsigned int i = 0; i < buf_size; ++i)
3494 buf[i] = ~buf[i];
3495 mask = native_interpret_expr (int_type, buf, buf_size);
3496 return BIT_XOR_EXPR;
1122 } 3497 }
1123 3498
1124 /* Given a merged store group GROUP output the widened version of it. 3499 /* Given a merged store group GROUP output the widened version of it.
1125 The store chain is against the base object BASE. 3500 The store chain is against the base object BASE.
1126 Try store sizes of at most MAX_STORE_BITSIZE bits wide and don't output 3501 Try store sizes of at most MAX_STORE_BITSIZE bits wide and don't output
1130 return true. */ 3505 return true. */
1131 3506
1132 bool 3507 bool
1133 imm_store_chain_info::output_merged_store (merged_store_group *group) 3508 imm_store_chain_info::output_merged_store (merged_store_group *group)
1134 { 3509 {
1135 unsigned HOST_WIDE_INT start_byte_pos = group->start / BITS_PER_UNIT; 3510 split_store *split_store;
3511 unsigned int i;
3512 unsigned HOST_WIDE_INT start_byte_pos
3513 = group->bitregion_start / BITS_PER_UNIT;
1136 3514
1137 unsigned int orig_num_stmts = group->stores.length (); 3515 unsigned int orig_num_stmts = group->stores.length ();
1138 if (orig_num_stmts < 2) 3516 if (orig_num_stmts < 2)
1139 return false; 3517 return false;
1140 3518
1141 auto_vec<struct split_store *> split_stores; 3519 auto_vec<struct split_store *, 32> split_stores;
1142 split_stores.create (0); 3520 bool allow_unaligned_store
1143 if (!split_group (group, split_stores)) 3521 = !STRICT_ALIGNMENT && PARAM_VALUE (PARAM_STORE_MERGING_ALLOW_UNALIGNED);
1144 return false; 3522 bool allow_unaligned_load = allow_unaligned_store;
3523 if (allow_unaligned_store)
3524 {
3525 /* If unaligned stores are allowed, see how many stores we'd emit
3526 for unaligned and how many stores we'd emit for aligned stores.
3527 Only use unaligned stores if it allows fewer stores than aligned. */
3528 unsigned aligned_cnt
3529 = split_group (group, false, allow_unaligned_load, NULL, NULL, NULL);
3530 unsigned unaligned_cnt
3531 = split_group (group, true, allow_unaligned_load, NULL, NULL, NULL);
3532 if (aligned_cnt <= unaligned_cnt)
3533 allow_unaligned_store = false;
3534 }
3535 unsigned total_orig, total_new;
3536 split_group (group, allow_unaligned_store, allow_unaligned_load,
3537 &split_stores, &total_orig, &total_new);
3538
3539 if (split_stores.length () >= orig_num_stmts)
3540 {
3541 /* We didn't manage to reduce the number of statements. Bail out. */
3542 if (dump_file && (dump_flags & TDF_DETAILS))
3543 fprintf (dump_file, "Exceeded original number of stmts (%u)."
3544 " Not profitable to emit new sequence.\n",
3545 orig_num_stmts);
3546 FOR_EACH_VEC_ELT (split_stores, i, split_store)
3547 delete split_store;
3548 return false;
3549 }
3550 if (total_orig <= total_new)
3551 {
3552 /* If number of estimated new statements is above estimated original
3553 statements, bail out too. */
3554 if (dump_file && (dump_flags & TDF_DETAILS))
3555 fprintf (dump_file, "Estimated number of original stmts (%u)"
3556 " not larger than estimated number of new"
3557 " stmts (%u).\n",
3558 total_orig, total_new);
3559 FOR_EACH_VEC_ELT (split_stores, i, split_store)
3560 delete split_store;
3561 return false;
3562 }
1145 3563
1146 gimple_stmt_iterator last_gsi = gsi_for_stmt (group->last_stmt); 3564 gimple_stmt_iterator last_gsi = gsi_for_stmt (group->last_stmt);
1147 gimple_seq seq = NULL; 3565 gimple_seq seq = NULL;
1148 unsigned int num_stmts = 0;
1149 tree last_vdef, new_vuse; 3566 tree last_vdef, new_vuse;
1150 last_vdef = gimple_vdef (group->last_stmt); 3567 last_vdef = gimple_vdef (group->last_stmt);
1151 new_vuse = gimple_vuse (group->last_stmt); 3568 new_vuse = gimple_vuse (group->last_stmt);
3569 tree bswap_res = NULL_TREE;
3570
3571 if (group->stores[0]->rhs_code == LROTATE_EXPR
3572 || group->stores[0]->rhs_code == NOP_EXPR)
3573 {
3574 tree fndecl = NULL_TREE, bswap_type = NULL_TREE, load_type;
3575 gimple *ins_stmt = group->stores[0]->ins_stmt;
3576 struct symbolic_number *n = &group->stores[0]->n;
3577 bool bswap = group->stores[0]->rhs_code == LROTATE_EXPR;
3578
3579 switch (n->range)
3580 {
3581 case 16:
3582 load_type = bswap_type = uint16_type_node;
3583 break;
3584 case 32:
3585 load_type = uint32_type_node;
3586 if (bswap)
3587 {
3588 fndecl = builtin_decl_explicit (BUILT_IN_BSWAP32);
3589 bswap_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
3590 }
3591 break;
3592 case 64:
3593 load_type = uint64_type_node;
3594 if (bswap)
3595 {
3596 fndecl = builtin_decl_explicit (BUILT_IN_BSWAP64);
3597 bswap_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
3598 }
3599 break;
3600 default:
3601 gcc_unreachable ();
3602 }
3603
3604 /* If the loads have each vuse of the corresponding store,
3605 we've checked the aliasing already in try_coalesce_bswap and
3606 we want to sink the need load into seq. So need to use new_vuse
3607 on the load. */
3608 if (n->base_addr)
3609 {
3610 if (n->vuse == NULL)
3611 {
3612 n->vuse = new_vuse;
3613 ins_stmt = NULL;
3614 }
3615 else
3616 /* Update vuse in case it has changed by output_merged_stores. */
3617 n->vuse = gimple_vuse (ins_stmt);
3618 }
3619 bswap_res = bswap_replace (gsi_start (seq), ins_stmt, fndecl,
3620 bswap_type, load_type, n, bswap);
3621 gcc_assert (bswap_res);
3622 }
1152 3623
1153 gimple *stmt = NULL; 3624 gimple *stmt = NULL;
1154 /* The new SSA names created. Keep track of them so that we can free them 3625 auto_vec<gimple *, 32> orig_stmts;
1155 if we decide to not use the new sequence. */ 3626 gimple_seq this_seq;
1156 auto_vec<tree> new_ssa_names; 3627 tree addr = force_gimple_operand_1 (unshare_expr (base_addr), &this_seq,
1157 split_store *split_store;
1158 unsigned int i;
1159 bool fail = false;
1160
1161 tree addr = force_gimple_operand_1 (unshare_expr (base_addr), &seq,
1162 is_gimple_mem_ref_addr, NULL_TREE); 3628 is_gimple_mem_ref_addr, NULL_TREE);
3629 gimple_seq_add_seq_without_update (&seq, this_seq);
3630
3631 tree load_addr[2] = { NULL_TREE, NULL_TREE };
3632 gimple_seq load_seq[2] = { NULL, NULL };
3633 gimple_stmt_iterator load_gsi[2] = { gsi_none (), gsi_none () };
3634 for (int j = 0; j < 2; ++j)
3635 {
3636 store_operand_info &op = group->stores[0]->ops[j];
3637 if (op.base_addr == NULL_TREE)
3638 continue;
3639
3640 store_immediate_info *infol = group->stores.last ();
3641 if (gimple_vuse (op.stmt) == gimple_vuse (infol->ops[j].stmt))
3642 {
3643 /* We can't pick the location randomly; while we've verified
3644 all the loads have the same vuse, they can be still in different
3645 basic blocks and we need to pick the one from the last bb:
3646 int x = q[0];
3647 if (x == N) return;
3648 int y = q[1];
3649 p[0] = x;
3650 p[1] = y;
3651 otherwise if we put the wider load at the q[0] load, we might
3652 segfault if q[1] is not mapped. */
3653 basic_block bb = gimple_bb (op.stmt);
3654 gimple *ostmt = op.stmt;
3655 store_immediate_info *info;
3656 FOR_EACH_VEC_ELT (group->stores, i, info)
3657 {
3658 gimple *tstmt = info->ops[j].stmt;
3659 basic_block tbb = gimple_bb (tstmt);
3660 if (dominated_by_p (CDI_DOMINATORS, tbb, bb))
3661 {
3662 ostmt = tstmt;
3663 bb = tbb;
3664 }
3665 }
3666 load_gsi[j] = gsi_for_stmt (ostmt);
3667 load_addr[j]
3668 = force_gimple_operand_1 (unshare_expr (op.base_addr),
3669 &load_seq[j], is_gimple_mem_ref_addr,
3670 NULL_TREE);
3671 }
3672 else if (operand_equal_p (base_addr, op.base_addr, 0))
3673 load_addr[j] = addr;
3674 else
3675 {
3676 load_addr[j]
3677 = force_gimple_operand_1 (unshare_expr (op.base_addr),
3678 &this_seq, is_gimple_mem_ref_addr,
3679 NULL_TREE);
3680 gimple_seq_add_seq_without_update (&seq, this_seq);
3681 }
3682 }
3683
1163 FOR_EACH_VEC_ELT (split_stores, i, split_store) 3684 FOR_EACH_VEC_ELT (split_stores, i, split_store)
1164 { 3685 {
1165 unsigned HOST_WIDE_INT try_size = split_store->size; 3686 unsigned HOST_WIDE_INT try_size = split_store->size;
1166 unsigned HOST_WIDE_INT try_pos = split_store->bytepos; 3687 unsigned HOST_WIDE_INT try_pos = split_store->bytepos;
3688 unsigned HOST_WIDE_INT try_bitpos = try_pos * BITS_PER_UNIT;
1167 unsigned HOST_WIDE_INT align = split_store->align; 3689 unsigned HOST_WIDE_INT align = split_store->align;
1168 tree offset_type = get_alias_type_for_stmts (split_store->orig_stmts); 3690 tree dest, src;
1169 location_t loc = get_location_for_stmts (split_store->orig_stmts); 3691 location_t loc;
1170 3692 if (split_store->orig)
1171 tree int_type = build_nonstandard_integer_type (try_size, UNSIGNED); 3693 {
1172 int_type = build_aligned_type (int_type, align); 3694 /* If there is just a single constituent store which covers
1173 tree dest = fold_build2 (MEM_REF, int_type, addr, 3695 the whole area, just reuse the lhs and rhs. */
1174 build_int_cst (offset_type, try_pos)); 3696 gimple *orig_stmt = split_store->orig_stores[0]->stmt;
1175 3697 dest = gimple_assign_lhs (orig_stmt);
1176 tree src = native_interpret_expr (int_type, 3698 src = gimple_assign_rhs1 (orig_stmt);
1177 group->val + try_pos - start_byte_pos, 3699 loc = gimple_location (orig_stmt);
1178 group->buf_size); 3700 }
3701 else
3702 {
3703 store_immediate_info *info;
3704 unsigned short clique, base;
3705 unsigned int k;
3706 FOR_EACH_VEC_ELT (split_store->orig_stores, k, info)
3707 orig_stmts.safe_push (info->stmt);
3708 tree offset_type
3709 = get_alias_type_for_stmts (orig_stmts, false, &clique, &base);
3710 loc = get_location_for_stmts (orig_stmts);
3711 orig_stmts.truncate (0);
3712
3713 tree int_type = build_nonstandard_integer_type (try_size, UNSIGNED);
3714 int_type = build_aligned_type (int_type, align);
3715 dest = fold_build2 (MEM_REF, int_type, addr,
3716 build_int_cst (offset_type, try_pos));
3717 if (TREE_CODE (dest) == MEM_REF)
3718 {
3719 MR_DEPENDENCE_CLIQUE (dest) = clique;
3720 MR_DEPENDENCE_BASE (dest) = base;
3721 }
3722
3723 tree mask;
3724 if (bswap_res)
3725 mask = integer_zero_node;
3726 else
3727 mask = native_interpret_expr (int_type,
3728 group->mask + try_pos
3729 - start_byte_pos,
3730 group->buf_size);
3731
3732 tree ops[2];
3733 for (int j = 0;
3734 j < 1 + (split_store->orig_stores[0]->ops[1].val != NULL_TREE);
3735 ++j)
3736 {
3737 store_operand_info &op = split_store->orig_stores[0]->ops[j];
3738 if (bswap_res)
3739 ops[j] = bswap_res;
3740 else if (op.base_addr)
3741 {
3742 FOR_EACH_VEC_ELT (split_store->orig_stores, k, info)
3743 orig_stmts.safe_push (info->ops[j].stmt);
3744
3745 offset_type = get_alias_type_for_stmts (orig_stmts, true,
3746 &clique, &base);
3747 location_t load_loc = get_location_for_stmts (orig_stmts);
3748 orig_stmts.truncate (0);
3749
3750 unsigned HOST_WIDE_INT load_align = group->load_align[j];
3751 unsigned HOST_WIDE_INT align_bitpos
3752 = known_alignment (try_bitpos
3753 - split_store->orig_stores[0]->bitpos
3754 + op.bitpos);
3755 if (align_bitpos & (load_align - 1))
3756 load_align = least_bit_hwi (align_bitpos);
3757
3758 tree load_int_type
3759 = build_nonstandard_integer_type (try_size, UNSIGNED);
3760 load_int_type
3761 = build_aligned_type (load_int_type, load_align);
3762
3763 poly_uint64 load_pos
3764 = exact_div (try_bitpos
3765 - split_store->orig_stores[0]->bitpos
3766 + op.bitpos,
3767 BITS_PER_UNIT);
3768 ops[j] = fold_build2 (MEM_REF, load_int_type, load_addr[j],
3769 build_int_cst (offset_type, load_pos));
3770 if (TREE_CODE (ops[j]) == MEM_REF)
3771 {
3772 MR_DEPENDENCE_CLIQUE (ops[j]) = clique;
3773 MR_DEPENDENCE_BASE (ops[j]) = base;
3774 }
3775 if (!integer_zerop (mask))
3776 /* The load might load some bits (that will be masked off
3777 later on) uninitialized, avoid -W*uninitialized
3778 warnings in that case. */
3779 TREE_NO_WARNING (ops[j]) = 1;
3780
3781 stmt = gimple_build_assign (make_ssa_name (int_type),
3782 ops[j]);
3783 gimple_set_location (stmt, load_loc);
3784 if (gsi_bb (load_gsi[j]))
3785 {
3786 gimple_set_vuse (stmt, gimple_vuse (op.stmt));
3787 gimple_seq_add_stmt_without_update (&load_seq[j], stmt);
3788 }
3789 else
3790 {
3791 gimple_set_vuse (stmt, new_vuse);
3792 gimple_seq_add_stmt_without_update (&seq, stmt);
3793 }
3794 ops[j] = gimple_assign_lhs (stmt);
3795 tree xor_mask;
3796 enum tree_code inv_op
3797 = invert_op (split_store, j, int_type, xor_mask);
3798 if (inv_op != NOP_EXPR)
3799 {
3800 stmt = gimple_build_assign (make_ssa_name (int_type),
3801 inv_op, ops[j], xor_mask);
3802 gimple_set_location (stmt, load_loc);
3803 ops[j] = gimple_assign_lhs (stmt);
3804
3805 if (gsi_bb (load_gsi[j]))
3806 gimple_seq_add_stmt_without_update (&load_seq[j],
3807 stmt);
3808 else
3809 gimple_seq_add_stmt_without_update (&seq, stmt);
3810 }
3811 }
3812 else
3813 ops[j] = native_interpret_expr (int_type,
3814 group->val + try_pos
3815 - start_byte_pos,
3816 group->buf_size);
3817 }
3818
3819 switch (split_store->orig_stores[0]->rhs_code)
3820 {
3821 case BIT_AND_EXPR:
3822 case BIT_IOR_EXPR:
3823 case BIT_XOR_EXPR:
3824 FOR_EACH_VEC_ELT (split_store->orig_stores, k, info)
3825 {
3826 tree rhs1 = gimple_assign_rhs1 (info->stmt);
3827 orig_stmts.safe_push (SSA_NAME_DEF_STMT (rhs1));
3828 }
3829 location_t bit_loc;
3830 bit_loc = get_location_for_stmts (orig_stmts);
3831 orig_stmts.truncate (0);
3832
3833 stmt
3834 = gimple_build_assign (make_ssa_name (int_type),
3835 split_store->orig_stores[0]->rhs_code,
3836 ops[0], ops[1]);
3837 gimple_set_location (stmt, bit_loc);
3838 /* If there is just one load and there is a separate
3839 load_seq[0], emit the bitwise op right after it. */
3840 if (load_addr[1] == NULL_TREE && gsi_bb (load_gsi[0]))
3841 gimple_seq_add_stmt_without_update (&load_seq[0], stmt);
3842 /* Otherwise, if at least one load is in seq, we need to
3843 emit the bitwise op right before the store. If there
3844 are two loads and are emitted somewhere else, it would
3845 be better to emit the bitwise op as early as possible;
3846 we don't track where that would be possible right now
3847 though. */
3848 else
3849 gimple_seq_add_stmt_without_update (&seq, stmt);
3850 src = gimple_assign_lhs (stmt);
3851 tree xor_mask;
3852 enum tree_code inv_op;
3853 inv_op = invert_op (split_store, 2, int_type, xor_mask);
3854 if (inv_op != NOP_EXPR)
3855 {
3856 stmt = gimple_build_assign (make_ssa_name (int_type),
3857 inv_op, src, xor_mask);
3858 gimple_set_location (stmt, bit_loc);
3859 if (load_addr[1] == NULL_TREE && gsi_bb (load_gsi[0]))
3860 gimple_seq_add_stmt_without_update (&load_seq[0], stmt);
3861 else
3862 gimple_seq_add_stmt_without_update (&seq, stmt);
3863 src = gimple_assign_lhs (stmt);
3864 }
3865 break;
3866 case LROTATE_EXPR:
3867 case NOP_EXPR:
3868 src = ops[0];
3869 if (!is_gimple_val (src))
3870 {
3871 stmt = gimple_build_assign (make_ssa_name (TREE_TYPE (src)),
3872 src);
3873 gimple_seq_add_stmt_without_update (&seq, stmt);
3874 src = gimple_assign_lhs (stmt);
3875 }
3876 if (!useless_type_conversion_p (int_type, TREE_TYPE (src)))
3877 {
3878 stmt = gimple_build_assign (make_ssa_name (int_type),
3879 NOP_EXPR, src);
3880 gimple_seq_add_stmt_without_update (&seq, stmt);
3881 src = gimple_assign_lhs (stmt);
3882 }
3883 inv_op = invert_op (split_store, 2, int_type, xor_mask);
3884 if (inv_op != NOP_EXPR)
3885 {
3886 stmt = gimple_build_assign (make_ssa_name (int_type),
3887 inv_op, src, xor_mask);
3888 gimple_set_location (stmt, loc);
3889 gimple_seq_add_stmt_without_update (&seq, stmt);
3890 src = gimple_assign_lhs (stmt);
3891 }
3892 break;
3893 default:
3894 src = ops[0];
3895 break;
3896 }
3897
3898 /* If bit insertion is required, we use the source as an accumulator
3899 into which the successive bit-field values are manually inserted.
3900 FIXME: perhaps use BIT_INSERT_EXPR instead in some cases? */
3901 if (group->bit_insertion)
3902 FOR_EACH_VEC_ELT (split_store->orig_stores, k, info)
3903 if (info->rhs_code == BIT_INSERT_EXPR
3904 && info->bitpos < try_bitpos + try_size
3905 && info->bitpos + info->bitsize > try_bitpos)
3906 {
3907 /* Mask, truncate, convert to final type, shift and ior into
3908 the accumulator. Note that every step can be a no-op. */
3909 const HOST_WIDE_INT start_gap = info->bitpos - try_bitpos;
3910 const HOST_WIDE_INT end_gap
3911 = (try_bitpos + try_size) - (info->bitpos + info->bitsize);
3912 tree tem = info->ops[0].val;
3913 if (TYPE_PRECISION (TREE_TYPE (tem)) <= info->bitsize)
3914 {
3915 tree bitfield_type
3916 = build_nonstandard_integer_type (info->bitsize,
3917 UNSIGNED);
3918 tem = gimple_convert (&seq, loc, bitfield_type, tem);
3919 }
3920 else if ((BYTES_BIG_ENDIAN ? start_gap : end_gap) > 0)
3921 {
3922 const unsigned HOST_WIDE_INT imask
3923 = (HOST_WIDE_INT_1U << info->bitsize) - 1;
3924 tem = gimple_build (&seq, loc,
3925 BIT_AND_EXPR, TREE_TYPE (tem), tem,
3926 build_int_cst (TREE_TYPE (tem),
3927 imask));
3928 }
3929 const HOST_WIDE_INT shift
3930 = (BYTES_BIG_ENDIAN ? end_gap : start_gap);
3931 if (shift < 0)
3932 tem = gimple_build (&seq, loc,
3933 RSHIFT_EXPR, TREE_TYPE (tem), tem,
3934 build_int_cst (NULL_TREE, -shift));
3935 tem = gimple_convert (&seq, loc, int_type, tem);
3936 if (shift > 0)
3937 tem = gimple_build (&seq, loc,
3938 LSHIFT_EXPR, int_type, tem,
3939 build_int_cst (NULL_TREE, shift));
3940 src = gimple_build (&seq, loc,
3941 BIT_IOR_EXPR, int_type, tem, src);
3942 }
3943
3944 if (!integer_zerop (mask))
3945 {
3946 tree tem = make_ssa_name (int_type);
3947 tree load_src = unshare_expr (dest);
3948 /* The load might load some or all bits uninitialized,
3949 avoid -W*uninitialized warnings in that case.
3950 As optimization, it would be nice if all the bits are
3951 provably uninitialized (no stores at all yet or previous
3952 store a CLOBBER) we'd optimize away the load and replace
3953 it e.g. with 0. */
3954 TREE_NO_WARNING (load_src) = 1;
3955 stmt = gimple_build_assign (tem, load_src);
3956 gimple_set_location (stmt, loc);
3957 gimple_set_vuse (stmt, new_vuse);
3958 gimple_seq_add_stmt_without_update (&seq, stmt);
3959
3960 /* FIXME: If there is a single chunk of zero bits in mask,
3961 perhaps use BIT_INSERT_EXPR instead? */
3962 stmt = gimple_build_assign (make_ssa_name (int_type),
3963 BIT_AND_EXPR, tem, mask);
3964 gimple_set_location (stmt, loc);
3965 gimple_seq_add_stmt_without_update (&seq, stmt);
3966 tem = gimple_assign_lhs (stmt);
3967
3968 if (TREE_CODE (src) == INTEGER_CST)
3969 src = wide_int_to_tree (int_type,
3970 wi::bit_and_not (wi::to_wide (src),
3971 wi::to_wide (mask)));
3972 else
3973 {
3974 tree nmask
3975 = wide_int_to_tree (int_type,
3976 wi::bit_not (wi::to_wide (mask)));
3977 stmt = gimple_build_assign (make_ssa_name (int_type),
3978 BIT_AND_EXPR, src, nmask);
3979 gimple_set_location (stmt, loc);
3980 gimple_seq_add_stmt_without_update (&seq, stmt);
3981 src = gimple_assign_lhs (stmt);
3982 }
3983 stmt = gimple_build_assign (make_ssa_name (int_type),
3984 BIT_IOR_EXPR, tem, src);
3985 gimple_set_location (stmt, loc);
3986 gimple_seq_add_stmt_without_update (&seq, stmt);
3987 src = gimple_assign_lhs (stmt);
3988 }
3989 }
1179 3990
1180 stmt = gimple_build_assign (dest, src); 3991 stmt = gimple_build_assign (dest, src);
1181 gimple_set_location (stmt, loc); 3992 gimple_set_location (stmt, loc);
1182 gimple_set_vuse (stmt, new_vuse); 3993 gimple_set_vuse (stmt, new_vuse);
1183 gimple_seq_add_stmt_without_update (&seq, stmt); 3994 gimple_seq_add_stmt_without_update (&seq, stmt);
1184 3995
1185 /* We didn't manage to reduce the number of statements. Bail out. */
1186 if (++num_stmts == orig_num_stmts)
1187 {
1188 if (dump_file && (dump_flags & TDF_DETAILS))
1189 {
1190 fprintf (dump_file, "Exceeded original number of stmts (%u)."
1191 " Not profitable to emit new sequence.\n",
1192 orig_num_stmts);
1193 }
1194 unsigned int ssa_count;
1195 tree ssa_name;
1196 /* Don't forget to cleanup the temporary SSA names. */
1197 FOR_EACH_VEC_ELT (new_ssa_names, ssa_count, ssa_name)
1198 release_ssa_name (ssa_name);
1199
1200 fail = true;
1201 break;
1202 }
1203
1204 tree new_vdef; 3996 tree new_vdef;
1205 if (i < split_stores.length () - 1) 3997 if (i < split_stores.length () - 1)
1206 { 3998 new_vdef = make_ssa_name (gimple_vop (cfun), stmt);
1207 new_vdef = make_ssa_name (gimple_vop (cfun), stmt);
1208 new_ssa_names.safe_push (new_vdef);
1209 }
1210 else 3999 else
1211 new_vdef = last_vdef; 4000 new_vdef = last_vdef;
1212 4001
1213 gimple_set_vdef (stmt, new_vdef); 4002 gimple_set_vdef (stmt, new_vdef);
1214 SSA_NAME_DEF_STMT (new_vdef) = stmt; 4003 SSA_NAME_DEF_STMT (new_vdef) = stmt;
1216 } 4005 }
1217 4006
1218 FOR_EACH_VEC_ELT (split_stores, i, split_store) 4007 FOR_EACH_VEC_ELT (split_stores, i, split_store)
1219 delete split_store; 4008 delete split_store;
1220 4009
1221 if (fail)
1222 return false;
1223
1224 gcc_assert (seq); 4010 gcc_assert (seq);
1225 if (dump_file) 4011 if (dump_file)
1226 { 4012 {
1227 fprintf (dump_file, 4013 fprintf (dump_file,
1228 "New sequence of %u stmts to replace old one of %u stmts\n", 4014 "New sequence of %u stores to replace old one of %u stores\n",
1229 num_stmts, orig_num_stmts); 4015 split_stores.length (), orig_num_stmts);
1230 if (dump_flags & TDF_DETAILS) 4016 if (dump_flags & TDF_DETAILS)
1231 print_gimple_seq (dump_file, seq, 0, TDF_VOPS | TDF_MEMSYMS); 4017 print_gimple_seq (dump_file, seq, 0, TDF_VOPS | TDF_MEMSYMS);
1232 } 4018 }
1233 gsi_insert_seq_after (&last_gsi, seq, GSI_SAME_STMT); 4019 gsi_insert_seq_after (&last_gsi, seq, GSI_SAME_STMT);
4020 for (int j = 0; j < 2; ++j)
4021 if (load_seq[j])
4022 gsi_insert_seq_after (&load_gsi[j], load_seq[j], GSI_SAME_STMT);
1234 4023
1235 return true; 4024 return true;
1236 } 4025 }
1237 4026
1238 /* Process the merged_store_group objects created in the coalescing phase. 4027 /* Process the merged_store_group objects created in the coalescing phase.
1323 native_encode_expr accepts. */ 4112 native_encode_expr accepts. */
1324 4113
1325 static bool 4114 static bool
1326 rhs_valid_for_store_merging_p (tree rhs) 4115 rhs_valid_for_store_merging_p (tree rhs)
1327 { 4116 {
1328 return native_encode_expr (rhs, NULL, 4117 unsigned HOST_WIDE_INT size;
1329 GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (rhs)))) != 0; 4118 return (GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (rhs))).is_constant (&size)
4119 && native_encode_expr (rhs, NULL, size) != 0);
4120 }
4121
4122 /* If MEM is a memory reference usable for store merging (either as
4123 store destination or for loads), return the non-NULL base_addr
4124 and set *PBITSIZE, *PBITPOS, *PBITREGION_START and *PBITREGION_END.
4125 Otherwise return NULL, *PBITPOS should be still valid even for that
4126 case. */
4127
4128 static tree
4129 mem_valid_for_store_merging (tree mem, poly_uint64 *pbitsize,
4130 poly_uint64 *pbitpos,
4131 poly_uint64 *pbitregion_start,
4132 poly_uint64 *pbitregion_end)
4133 {
4134 poly_int64 bitsize, bitpos;
4135 poly_uint64 bitregion_start = 0, bitregion_end = 0;
4136 machine_mode mode;
4137 int unsignedp = 0, reversep = 0, volatilep = 0;
4138 tree offset;
4139 tree base_addr = get_inner_reference (mem, &bitsize, &bitpos, &offset, &mode,
4140 &unsignedp, &reversep, &volatilep);
4141 *pbitsize = bitsize;
4142 if (known_eq (bitsize, 0))
4143 return NULL_TREE;
4144
4145 if (TREE_CODE (mem) == COMPONENT_REF
4146 && DECL_BIT_FIELD_TYPE (TREE_OPERAND (mem, 1)))
4147 {
4148 get_bit_range (&bitregion_start, &bitregion_end, mem, &bitpos, &offset);
4149 if (maybe_ne (bitregion_end, 0U))
4150 bitregion_end += 1;
4151 }
4152
4153 if (reversep)
4154 return NULL_TREE;
4155
4156 /* We do not want to rewrite TARGET_MEM_REFs. */
4157 if (TREE_CODE (base_addr) == TARGET_MEM_REF)
4158 return NULL_TREE;
4159 /* In some cases get_inner_reference may return a
4160 MEM_REF [ptr + byteoffset]. For the purposes of this pass
4161 canonicalize the base_addr to MEM_REF [ptr] and take
4162 byteoffset into account in the bitpos. This occurs in
4163 PR 23684 and this way we can catch more chains. */
4164 else if (TREE_CODE (base_addr) == MEM_REF)
4165 {
4166 poly_offset_int byte_off = mem_ref_offset (base_addr);
4167 poly_offset_int bit_off = byte_off << LOG2_BITS_PER_UNIT;
4168 bit_off += bitpos;
4169 if (known_ge (bit_off, 0) && bit_off.to_shwi (&bitpos))
4170 {
4171 if (maybe_ne (bitregion_end, 0U))
4172 {
4173 bit_off = byte_off << LOG2_BITS_PER_UNIT;
4174 bit_off += bitregion_start;
4175 if (bit_off.to_uhwi (&bitregion_start))
4176 {
4177 bit_off = byte_off << LOG2_BITS_PER_UNIT;
4178 bit_off += bitregion_end;
4179 if (!bit_off.to_uhwi (&bitregion_end))
4180 bitregion_end = 0;
4181 }
4182 else
4183 bitregion_end = 0;
4184 }
4185 }
4186 else
4187 return NULL_TREE;
4188 base_addr = TREE_OPERAND (base_addr, 0);
4189 }
4190 /* get_inner_reference returns the base object, get at its
4191 address now. */
4192 else
4193 {
4194 if (maybe_lt (bitpos, 0))
4195 return NULL_TREE;
4196 base_addr = build_fold_addr_expr (base_addr);
4197 }
4198
4199 if (known_eq (bitregion_end, 0U))
4200 {
4201 bitregion_start = round_down_to_byte_boundary (bitpos);
4202 bitregion_end = bitpos;
4203 bitregion_end = round_up_to_byte_boundary (bitregion_end + bitsize);
4204 }
4205
4206 if (offset != NULL_TREE)
4207 {
4208 /* If the access is variable offset then a base decl has to be
4209 address-taken to be able to emit pointer-based stores to it.
4210 ??? We might be able to get away with re-using the original
4211 base up to the first variable part and then wrapping that inside
4212 a BIT_FIELD_REF. */
4213 tree base = get_base_address (base_addr);
4214 if (! base
4215 || (DECL_P (base) && ! TREE_ADDRESSABLE (base)))
4216 return NULL_TREE;
4217
4218 base_addr = build2 (POINTER_PLUS_EXPR, TREE_TYPE (base_addr),
4219 base_addr, offset);
4220 }
4221
4222 *pbitsize = bitsize;
4223 *pbitpos = bitpos;
4224 *pbitregion_start = bitregion_start;
4225 *pbitregion_end = bitregion_end;
4226 return base_addr;
4227 }
4228
4229 /* Return true if STMT is a load that can be used for store merging.
4230 In that case fill in *OP. BITSIZE, BITPOS, BITREGION_START and
4231 BITREGION_END are properties of the corresponding store. */
4232
4233 static bool
4234 handled_load (gimple *stmt, store_operand_info *op,
4235 poly_uint64 bitsize, poly_uint64 bitpos,
4236 poly_uint64 bitregion_start, poly_uint64 bitregion_end)
4237 {
4238 if (!is_gimple_assign (stmt))
4239 return false;
4240 if (gimple_assign_rhs_code (stmt) == BIT_NOT_EXPR)
4241 {
4242 tree rhs1 = gimple_assign_rhs1 (stmt);
4243 if (TREE_CODE (rhs1) == SSA_NAME
4244 && handled_load (SSA_NAME_DEF_STMT (rhs1), op, bitsize, bitpos,
4245 bitregion_start, bitregion_end))
4246 {
4247 /* Don't allow _1 = load; _2 = ~1; _3 = ~_2; which should have
4248 been optimized earlier, but if allowed here, would confuse the
4249 multiple uses counting. */
4250 if (op->bit_not_p)
4251 return false;
4252 op->bit_not_p = !op->bit_not_p;
4253 return true;
4254 }
4255 return false;
4256 }
4257 if (gimple_vuse (stmt)
4258 && gimple_assign_load_p (stmt)
4259 && !stmt_can_throw_internal (cfun, stmt)
4260 && !gimple_has_volatile_ops (stmt))
4261 {
4262 tree mem = gimple_assign_rhs1 (stmt);
4263 op->base_addr
4264 = mem_valid_for_store_merging (mem, &op->bitsize, &op->bitpos,
4265 &op->bitregion_start,
4266 &op->bitregion_end);
4267 if (op->base_addr != NULL_TREE
4268 && known_eq (op->bitsize, bitsize)
4269 && multiple_p (op->bitpos - bitpos, BITS_PER_UNIT)
4270 && known_ge (op->bitpos - op->bitregion_start,
4271 bitpos - bitregion_start)
4272 && known_ge (op->bitregion_end - op->bitpos,
4273 bitregion_end - bitpos))
4274 {
4275 op->stmt = stmt;
4276 op->val = mem;
4277 op->bit_not_p = false;
4278 return true;
4279 }
4280 }
4281 return false;
4282 }
4283
4284 /* Record the store STMT for store merging optimization if it can be
4285 optimized. */
4286
4287 void
4288 pass_store_merging::process_store (gimple *stmt)
4289 {
4290 tree lhs = gimple_assign_lhs (stmt);
4291 tree rhs = gimple_assign_rhs1 (stmt);
4292 poly_uint64 bitsize, bitpos;
4293 poly_uint64 bitregion_start, bitregion_end;
4294 tree base_addr
4295 = mem_valid_for_store_merging (lhs, &bitsize, &bitpos,
4296 &bitregion_start, &bitregion_end);
4297 if (known_eq (bitsize, 0U))
4298 return;
4299
4300 bool invalid = (base_addr == NULL_TREE
4301 || (maybe_gt (bitsize,
4302 (unsigned int) MAX_BITSIZE_MODE_ANY_INT)
4303 && (TREE_CODE (rhs) != INTEGER_CST)));
4304 enum tree_code rhs_code = ERROR_MARK;
4305 bool bit_not_p = false;
4306 struct symbolic_number n;
4307 gimple *ins_stmt = NULL;
4308 store_operand_info ops[2];
4309 if (invalid)
4310 ;
4311 else if (rhs_valid_for_store_merging_p (rhs))
4312 {
4313 rhs_code = INTEGER_CST;
4314 ops[0].val = rhs;
4315 }
4316 else if (TREE_CODE (rhs) != SSA_NAME)
4317 invalid = true;
4318 else
4319 {
4320 gimple *def_stmt = SSA_NAME_DEF_STMT (rhs), *def_stmt1, *def_stmt2;
4321 if (!is_gimple_assign (def_stmt))
4322 invalid = true;
4323 else if (handled_load (def_stmt, &ops[0], bitsize, bitpos,
4324 bitregion_start, bitregion_end))
4325 rhs_code = MEM_REF;
4326 else if (gimple_assign_rhs_code (def_stmt) == BIT_NOT_EXPR)
4327 {
4328 tree rhs1 = gimple_assign_rhs1 (def_stmt);
4329 if (TREE_CODE (rhs1) == SSA_NAME
4330 && is_gimple_assign (SSA_NAME_DEF_STMT (rhs1)))
4331 {
4332 bit_not_p = true;
4333 def_stmt = SSA_NAME_DEF_STMT (rhs1);
4334 }
4335 }
4336
4337 if (rhs_code == ERROR_MARK && !invalid)
4338 switch ((rhs_code = gimple_assign_rhs_code (def_stmt)))
4339 {
4340 case BIT_AND_EXPR:
4341 case BIT_IOR_EXPR:
4342 case BIT_XOR_EXPR:
4343 tree rhs1, rhs2;
4344 rhs1 = gimple_assign_rhs1 (def_stmt);
4345 rhs2 = gimple_assign_rhs2 (def_stmt);
4346 invalid = true;
4347 if (TREE_CODE (rhs1) != SSA_NAME)
4348 break;
4349 def_stmt1 = SSA_NAME_DEF_STMT (rhs1);
4350 if (!is_gimple_assign (def_stmt1)
4351 || !handled_load (def_stmt1, &ops[0], bitsize, bitpos,
4352 bitregion_start, bitregion_end))
4353 break;
4354 if (rhs_valid_for_store_merging_p (rhs2))
4355 ops[1].val = rhs2;
4356 else if (TREE_CODE (rhs2) != SSA_NAME)
4357 break;
4358 else
4359 {
4360 def_stmt2 = SSA_NAME_DEF_STMT (rhs2);
4361 if (!is_gimple_assign (def_stmt2))
4362 break;
4363 else if (!handled_load (def_stmt2, &ops[1], bitsize, bitpos,
4364 bitregion_start, bitregion_end))
4365 break;
4366 }
4367 invalid = false;
4368 break;
4369 default:
4370 invalid = true;
4371 break;
4372 }
4373
4374 unsigned HOST_WIDE_INT const_bitsize;
4375 if (bitsize.is_constant (&const_bitsize)
4376 && (const_bitsize % BITS_PER_UNIT) == 0
4377 && const_bitsize <= 64
4378 && multiple_p (bitpos, BITS_PER_UNIT))
4379 {
4380 ins_stmt = find_bswap_or_nop_1 (def_stmt, &n, 12);
4381 if (ins_stmt)
4382 {
4383 uint64_t nn = n.n;
4384 for (unsigned HOST_WIDE_INT i = 0;
4385 i < const_bitsize;
4386 i += BITS_PER_UNIT, nn >>= BITS_PER_MARKER)
4387 if ((nn & MARKER_MASK) == 0
4388 || (nn & MARKER_MASK) == MARKER_BYTE_UNKNOWN)
4389 {
4390 ins_stmt = NULL;
4391 break;
4392 }
4393 if (ins_stmt)
4394 {
4395 if (invalid)
4396 {
4397 rhs_code = LROTATE_EXPR;
4398 ops[0].base_addr = NULL_TREE;
4399 ops[1].base_addr = NULL_TREE;
4400 }
4401 invalid = false;
4402 }
4403 }
4404 }
4405
4406 if (invalid
4407 && bitsize.is_constant (&const_bitsize)
4408 && ((const_bitsize % BITS_PER_UNIT) != 0
4409 || !multiple_p (bitpos, BITS_PER_UNIT))
4410 && const_bitsize <= 64)
4411 {
4412 /* Bypass a conversion to the bit-field type. */
4413 if (!bit_not_p
4414 && is_gimple_assign (def_stmt)
4415 && CONVERT_EXPR_CODE_P (rhs_code))
4416 {
4417 tree rhs1 = gimple_assign_rhs1 (def_stmt);
4418 if (TREE_CODE (rhs1) == SSA_NAME
4419 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
4420 rhs = rhs1;
4421 }
4422 rhs_code = BIT_INSERT_EXPR;
4423 bit_not_p = false;
4424 ops[0].val = rhs;
4425 ops[0].base_addr = NULL_TREE;
4426 ops[1].base_addr = NULL_TREE;
4427 invalid = false;
4428 }
4429 }
4430
4431 unsigned HOST_WIDE_INT const_bitsize, const_bitpos;
4432 unsigned HOST_WIDE_INT const_bitregion_start, const_bitregion_end;
4433 if (invalid
4434 || !bitsize.is_constant (&const_bitsize)
4435 || !bitpos.is_constant (&const_bitpos)
4436 || !bitregion_start.is_constant (&const_bitregion_start)
4437 || !bitregion_end.is_constant (&const_bitregion_end))
4438 {
4439 terminate_all_aliasing_chains (NULL, stmt);
4440 return;
4441 }
4442
4443 if (!ins_stmt)
4444 memset (&n, 0, sizeof (n));
4445
4446 struct imm_store_chain_info **chain_info = NULL;
4447 if (base_addr)
4448 chain_info = m_stores.get (base_addr);
4449
4450 store_immediate_info *info;
4451 if (chain_info)
4452 {
4453 unsigned int ord = (*chain_info)->m_store_info.length ();
4454 info = new store_immediate_info (const_bitsize, const_bitpos,
4455 const_bitregion_start,
4456 const_bitregion_end,
4457 stmt, ord, rhs_code, n, ins_stmt,
4458 bit_not_p, ops[0], ops[1]);
4459 if (dump_file && (dump_flags & TDF_DETAILS))
4460 {
4461 fprintf (dump_file, "Recording immediate store from stmt:\n");
4462 print_gimple_stmt (dump_file, stmt, 0);
4463 }
4464 (*chain_info)->m_store_info.safe_push (info);
4465 terminate_all_aliasing_chains (chain_info, stmt);
4466 /* If we reach the limit of stores to merge in a chain terminate and
4467 process the chain now. */
4468 if ((*chain_info)->m_store_info.length ()
4469 == (unsigned int) PARAM_VALUE (PARAM_MAX_STORES_TO_MERGE))
4470 {
4471 if (dump_file && (dump_flags & TDF_DETAILS))
4472 fprintf (dump_file,
4473 "Reached maximum number of statements to merge:\n");
4474 terminate_and_release_chain (*chain_info);
4475 }
4476 return;
4477 }
4478
4479 /* Store aliases any existing chain? */
4480 terminate_all_aliasing_chains (NULL, stmt);
4481 /* Start a new chain. */
4482 struct imm_store_chain_info *new_chain
4483 = new imm_store_chain_info (m_stores_head, base_addr);
4484 info = new store_immediate_info (const_bitsize, const_bitpos,
4485 const_bitregion_start,
4486 const_bitregion_end,
4487 stmt, 0, rhs_code, n, ins_stmt,
4488 bit_not_p, ops[0], ops[1]);
4489 new_chain->m_store_info.safe_push (info);
4490 m_stores.put (base_addr, new_chain);
4491 if (dump_file && (dump_flags & TDF_DETAILS))
4492 {
4493 fprintf (dump_file, "Starting new chain with statement:\n");
4494 print_gimple_stmt (dump_file, stmt, 0);
4495 fprintf (dump_file, "The base object is:\n");
4496 print_generic_expr (dump_file, base_addr);
4497 fprintf (dump_file, "\n");
4498 }
1330 } 4499 }
1331 4500
1332 /* Entry point for the pass. Go over each basic block recording chains of 4501 /* Entry point for the pass. Go over each basic block recording chains of
1333 immediate stores. Upon encountering a terminating statement (as defined 4502 immediate stores. Upon encountering a terminating statement (as defined
1334 by stmt_terminates_chain_p) process the recorded stores and emit the widened 4503 by stmt_terminates_chain_p) process the recorded stores and emit the widened
1335 variants. */ 4504 variants. */
1336 4505
1337 unsigned int 4506 unsigned int
1338 pass_store_merging::execute (function *fun) 4507 pass_store_merging::execute (function *fun)
1339 { 4508 {
1340 basic_block bb; 4509 basic_block bb;
1341 hash_set<gimple *> orig_stmts; 4510 hash_set<gimple *> orig_stmts;
4511
4512 calculate_dominance_info (CDI_DOMINATORS);
1342 4513
1343 FOR_EACH_BB_FN (bb, fun) 4514 FOR_EACH_BB_FN (bb, fun)
1344 { 4515 {
1345 gimple_stmt_iterator gsi; 4516 gimple_stmt_iterator gsi;
1346 unsigned HOST_WIDE_INT num_statements = 0; 4517 unsigned HOST_WIDE_INT num_statements = 0;
1378 terminate_and_process_all_chains (); 4549 terminate_and_process_all_chains ();
1379 continue; 4550 continue;
1380 } 4551 }
1381 4552
1382 if (gimple_assign_single_p (stmt) && gimple_vdef (stmt) 4553 if (gimple_assign_single_p (stmt) && gimple_vdef (stmt)
1383 && !stmt_can_throw_internal (stmt) 4554 && !stmt_can_throw_internal (cfun, stmt)
1384 && lhs_valid_for_store_merging_p (gimple_assign_lhs (stmt))) 4555 && lhs_valid_for_store_merging_p (gimple_assign_lhs (stmt)))
1385 { 4556 process_store (stmt);
1386 tree lhs = gimple_assign_lhs (stmt); 4557 else
1387 tree rhs = gimple_assign_rhs1 (stmt); 4558 terminate_all_aliasing_chains (NULL, stmt);
1388
1389 HOST_WIDE_INT bitsize, bitpos;
1390 machine_mode mode;
1391 int unsignedp = 0, reversep = 0, volatilep = 0;
1392 tree offset, base_addr;
1393 base_addr
1394 = get_inner_reference (lhs, &bitsize, &bitpos, &offset, &mode,
1395 &unsignedp, &reversep, &volatilep);
1396 /* As a future enhancement we could handle stores with the same
1397 base and offset. */
1398 bool invalid = reversep
1399 || ((bitsize > MAX_BITSIZE_MODE_ANY_INT)
1400 && (TREE_CODE (rhs) != INTEGER_CST))
1401 || !rhs_valid_for_store_merging_p (rhs);
1402
1403 /* We do not want to rewrite TARGET_MEM_REFs. */
1404 if (TREE_CODE (base_addr) == TARGET_MEM_REF)
1405 invalid = true;
1406 /* In some cases get_inner_reference may return a
1407 MEM_REF [ptr + byteoffset]. For the purposes of this pass
1408 canonicalize the base_addr to MEM_REF [ptr] and take
1409 byteoffset into account in the bitpos. This occurs in
1410 PR 23684 and this way we can catch more chains. */
1411 else if (TREE_CODE (base_addr) == MEM_REF)
1412 {
1413 offset_int bit_off, byte_off = mem_ref_offset (base_addr);
1414 bit_off = byte_off << LOG2_BITS_PER_UNIT;
1415 bit_off += bitpos;
1416 if (!wi::neg_p (bit_off) && wi::fits_shwi_p (bit_off))
1417 bitpos = bit_off.to_shwi ();
1418 else
1419 invalid = true;
1420 base_addr = TREE_OPERAND (base_addr, 0);
1421 }
1422 /* get_inner_reference returns the base object, get at its
1423 address now. */
1424 else
1425 {
1426 if (bitpos < 0)
1427 invalid = true;
1428 base_addr = build_fold_addr_expr (base_addr);
1429 }
1430
1431 if (! invalid
1432 && offset != NULL_TREE)
1433 {
1434 /* If the access is variable offset then a base
1435 decl has to be address-taken to be able to
1436 emit pointer-based stores to it.
1437 ??? We might be able to get away with
1438 re-using the original base up to the first
1439 variable part and then wrapping that inside
1440 a BIT_FIELD_REF. */
1441 tree base = get_base_address (base_addr);
1442 if (! base
1443 || (DECL_P (base)
1444 && ! TREE_ADDRESSABLE (base)))
1445 invalid = true;
1446 else
1447 base_addr = build2 (POINTER_PLUS_EXPR,
1448 TREE_TYPE (base_addr),
1449 base_addr, offset);
1450 }
1451
1452 struct imm_store_chain_info **chain_info
1453 = m_stores.get (base_addr);
1454
1455 if (!invalid)
1456 {
1457 store_immediate_info *info;
1458 if (chain_info)
1459 {
1460 info = new store_immediate_info (
1461 bitsize, bitpos, stmt,
1462 (*chain_info)->m_store_info.length ());
1463 if (dump_file && (dump_flags & TDF_DETAILS))
1464 {
1465 fprintf (dump_file,
1466 "Recording immediate store from stmt:\n");
1467 print_gimple_stmt (dump_file, stmt, 0);
1468 }
1469 (*chain_info)->m_store_info.safe_push (info);
1470 /* If we reach the limit of stores to merge in a chain
1471 terminate and process the chain now. */
1472 if ((*chain_info)->m_store_info.length ()
1473 == (unsigned int)
1474 PARAM_VALUE (PARAM_MAX_STORES_TO_MERGE))
1475 {
1476 if (dump_file && (dump_flags & TDF_DETAILS))
1477 fprintf (dump_file,
1478 "Reached maximum number of statements"
1479 " to merge:\n");
1480 terminate_and_release_chain (*chain_info);
1481 }
1482 continue;
1483 }
1484
1485 /* Store aliases any existing chain? */
1486 terminate_all_aliasing_chains (chain_info, false, stmt);
1487 /* Start a new chain. */
1488 struct imm_store_chain_info *new_chain
1489 = new imm_store_chain_info (m_stores_head, base_addr);
1490 info = new store_immediate_info (bitsize, bitpos,
1491 stmt, 0);
1492 new_chain->m_store_info.safe_push (info);
1493 m_stores.put (base_addr, new_chain);
1494 if (dump_file && (dump_flags & TDF_DETAILS))
1495 {
1496 fprintf (dump_file,
1497 "Starting new chain with statement:\n");
1498 print_gimple_stmt (dump_file, stmt, 0);
1499 fprintf (dump_file, "The base object is:\n");
1500 print_generic_expr (dump_file, base_addr);
1501 fprintf (dump_file, "\n");
1502 }
1503 }
1504 else
1505 terminate_all_aliasing_chains (chain_info,
1506 offset != NULL_TREE, stmt);
1507
1508 continue;
1509 }
1510
1511 terminate_all_aliasing_chains (NULL, false, stmt);
1512 } 4559 }
1513 terminate_and_process_all_chains (); 4560 terminate_and_process_all_chains ();
1514 } 4561 }
1515 return 0; 4562 return 0;
1516 } 4563 }