comparison gcc/expmed.c @ 131:84e7813d76e9

gcc-8.2
author mir3636
date Thu, 25 Oct 2018 07:37:49 +0900
parents 04ced10e8804
children 1830386684a0
comparison
equal deleted inserted replaced
111:04ced10e8804 131:84e7813d76e9
1 /* Medium-level subroutines: convert bit-field store and extract 1 /* Medium-level subroutines: convert bit-field store and extract
2 and shifts, multiplies and divides to rtl instructions. 2 and shifts, multiplies and divides to rtl instructions.
3 Copyright (C) 1987-2017 Free Software Foundation, Inc. 3 Copyright (C) 1987-2018 Free Software Foundation, Inc.
4 4
5 This file is part of GCC. 5 This file is part of GCC.
6 6
7 GCC is free software; you can redistribute it and/or modify it under 7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free 8 the terms of the GNU General Public License as published by the Free
38 #include "stor-layout.h" 38 #include "stor-layout.h"
39 #include "dojump.h" 39 #include "dojump.h"
40 #include "explow.h" 40 #include "explow.h"
41 #include "expr.h" 41 #include "expr.h"
42 #include "langhooks.h" 42 #include "langhooks.h"
43 #include "tree-vector-builder.h"
43 44
44 struct target_expmed default_target_expmed; 45 struct target_expmed default_target_expmed;
45 #if SWITCHABLE_TARGET 46 #if SWITCHABLE_TARGET
46 struct target_expmed *this_target_expmed = &default_target_expmed; 47 struct target_expmed *this_target_expmed = &default_target_expmed;
47 #endif 48 #endif
48 49
50 static bool store_integral_bit_field (rtx, opt_scalar_int_mode,
51 unsigned HOST_WIDE_INT,
52 unsigned HOST_WIDE_INT,
53 poly_uint64, poly_uint64,
54 machine_mode, rtx, bool, bool);
49 static void store_fixed_bit_field (rtx, opt_scalar_int_mode, 55 static void store_fixed_bit_field (rtx, opt_scalar_int_mode,
50 unsigned HOST_WIDE_INT, 56 unsigned HOST_WIDE_INT,
51 unsigned HOST_WIDE_INT, 57 unsigned HOST_WIDE_INT,
52 unsigned HOST_WIDE_INT, 58 poly_uint64, poly_uint64,
53 unsigned HOST_WIDE_INT,
54 rtx, scalar_int_mode, bool); 59 rtx, scalar_int_mode, bool);
55 static void store_fixed_bit_field_1 (rtx, scalar_int_mode, 60 static void store_fixed_bit_field_1 (rtx, scalar_int_mode,
56 unsigned HOST_WIDE_INT, 61 unsigned HOST_WIDE_INT,
57 unsigned HOST_WIDE_INT, 62 unsigned HOST_WIDE_INT,
58 rtx, scalar_int_mode, bool); 63 rtx, scalar_int_mode, bool);
59 static void store_split_bit_field (rtx, opt_scalar_int_mode, 64 static void store_split_bit_field (rtx, opt_scalar_int_mode,
60 unsigned HOST_WIDE_INT, 65 unsigned HOST_WIDE_INT,
61 unsigned HOST_WIDE_INT, 66 unsigned HOST_WIDE_INT,
62 unsigned HOST_WIDE_INT, 67 poly_uint64, poly_uint64,
63 unsigned HOST_WIDE_INT,
64 rtx, scalar_int_mode, bool); 68 rtx, scalar_int_mode, bool);
69 static rtx extract_integral_bit_field (rtx, opt_scalar_int_mode,
70 unsigned HOST_WIDE_INT,
71 unsigned HOST_WIDE_INT, int, rtx,
72 machine_mode, machine_mode, bool, bool);
65 static rtx extract_fixed_bit_field (machine_mode, rtx, opt_scalar_int_mode, 73 static rtx extract_fixed_bit_field (machine_mode, rtx, opt_scalar_int_mode,
66 unsigned HOST_WIDE_INT, 74 unsigned HOST_WIDE_INT,
67 unsigned HOST_WIDE_INT, rtx, int, bool); 75 unsigned HOST_WIDE_INT, rtx, int, bool);
68 static rtx extract_fixed_bit_field_1 (machine_mode, rtx, scalar_int_mode, 76 static rtx extract_fixed_bit_field_1 (machine_mode, rtx, scalar_int_mode,
69 unsigned HOST_WIDE_INT, 77 unsigned HOST_WIDE_INT,
220 && GET_MODE_WIDER_MODE (int_mode_to).exists (&wider_mode)) 228 && GET_MODE_WIDER_MODE (int_mode_to).exists (&wider_mode))
221 { 229 {
222 PUT_MODE (all->zext, wider_mode); 230 PUT_MODE (all->zext, wider_mode);
223 PUT_MODE (all->wide_mult, wider_mode); 231 PUT_MODE (all->wide_mult, wider_mode);
224 PUT_MODE (all->wide_lshr, wider_mode); 232 PUT_MODE (all->wide_lshr, wider_mode);
225 XEXP (all->wide_lshr, 1) = GEN_INT (mode_bitsize); 233 XEXP (all->wide_lshr, 1)
234 = gen_int_shift_amount (wider_mode, mode_bitsize);
226 235
227 set_mul_widen_cost (speed, wider_mode, 236 set_mul_widen_cost (speed, wider_mode,
228 set_src_cost (all->wide_mult, wider_mode, speed)); 237 set_src_cost (all->wide_mult, wider_mode, speed));
229 set_mul_highpart_cost (speed, int_mode_to, 238 set_mul_highpart_cost (speed, int_mode_to,
230 set_src_cost (all->wide_trunc, 239 set_src_cost (all->wide_trunc,
458 467
459 static rtx 468 static rtx
460 adjust_bit_field_mem_for_reg (enum extraction_pattern pattern, 469 adjust_bit_field_mem_for_reg (enum extraction_pattern pattern,
461 rtx op0, HOST_WIDE_INT bitsize, 470 rtx op0, HOST_WIDE_INT bitsize,
462 HOST_WIDE_INT bitnum, 471 HOST_WIDE_INT bitnum,
463 unsigned HOST_WIDE_INT bitregion_start, 472 poly_uint64 bitregion_start,
464 unsigned HOST_WIDE_INT bitregion_end, 473 poly_uint64 bitregion_end,
465 machine_mode fieldmode, 474 machine_mode fieldmode,
466 unsigned HOST_WIDE_INT *new_bitnum) 475 unsigned HOST_WIDE_INT *new_bitnum)
467 { 476 {
468 bit_field_mode_iterator iter (bitsize, bitnum, bitregion_start, 477 bit_field_mode_iterator iter (bitsize, bitnum, bitregion_start,
469 bitregion_end, MEM_ALIGN (op0), 478 bitregion_end, MEM_ALIGN (op0),
500 /* Return true if a bitfield of size BITSIZE at bit number BITNUM within 509 /* Return true if a bitfield of size BITSIZE at bit number BITNUM within
501 a structure of mode STRUCT_MODE represents a lowpart subreg. The subreg 510 a structure of mode STRUCT_MODE represents a lowpart subreg. The subreg
502 offset is then BITNUM / BITS_PER_UNIT. */ 511 offset is then BITNUM / BITS_PER_UNIT. */
503 512
504 static bool 513 static bool
505 lowpart_bit_field_p (unsigned HOST_WIDE_INT bitnum, 514 lowpart_bit_field_p (poly_uint64 bitnum, poly_uint64 bitsize,
506 unsigned HOST_WIDE_INT bitsize,
507 machine_mode struct_mode) 515 machine_mode struct_mode)
508 { 516 {
517 poly_uint64 regsize = REGMODE_NATURAL_SIZE (struct_mode);
509 if (BYTES_BIG_ENDIAN) 518 if (BYTES_BIG_ENDIAN)
510 return (bitnum % BITS_PER_UNIT == 0 519 return (multiple_p (bitnum, BITS_PER_UNIT)
511 && (bitnum + bitsize == GET_MODE_BITSIZE (struct_mode) 520 && (known_eq (bitnum + bitsize, GET_MODE_BITSIZE (struct_mode))
512 || (bitnum + bitsize) % BITS_PER_WORD == 0)); 521 || multiple_p (bitnum + bitsize,
522 regsize * BITS_PER_UNIT)));
513 else 523 else
514 return bitnum % BITS_PER_WORD == 0; 524 return multiple_p (bitnum, regsize * BITS_PER_UNIT);
515 } 525 }
516 526
517 /* Return true if -fstrict-volatile-bitfields applies to an access of OP0 527 /* Return true if -fstrict-volatile-bitfields applies to an access of OP0
518 containing BITSIZE bits starting at BITNUM, with field mode FIELDMODE. 528 containing BITSIZE bits starting at BITNUM, with field mode FIELDMODE.
519 Return false if the access would touch memory outside the range 529 Return false if the access would touch memory outside the range
522 532
523 static bool 533 static bool
524 strict_volatile_bitfield_p (rtx op0, unsigned HOST_WIDE_INT bitsize, 534 strict_volatile_bitfield_p (rtx op0, unsigned HOST_WIDE_INT bitsize,
525 unsigned HOST_WIDE_INT bitnum, 535 unsigned HOST_WIDE_INT bitnum,
526 scalar_int_mode fieldmode, 536 scalar_int_mode fieldmode,
527 unsigned HOST_WIDE_INT bitregion_start, 537 poly_uint64 bitregion_start,
528 unsigned HOST_WIDE_INT bitregion_end) 538 poly_uint64 bitregion_end)
529 { 539 {
530 unsigned HOST_WIDE_INT modesize = GET_MODE_BITSIZE (fieldmode); 540 unsigned HOST_WIDE_INT modesize = GET_MODE_BITSIZE (fieldmode);
531 541
532 /* -fstrict-volatile-bitfields must be enabled and we must have a 542 /* -fstrict-volatile-bitfields must be enabled and we must have a
533 volatile MEM. */ 543 volatile MEM. */
550 touch anything after the end of the structure. */ 560 touch anything after the end of the structure. */
551 if (MEM_ALIGN (op0) < modesize) 561 if (MEM_ALIGN (op0) < modesize)
552 return false; 562 return false;
553 563
554 /* Check for cases where the C++ memory model applies. */ 564 /* Check for cases where the C++ memory model applies. */
555 if (bitregion_end != 0 565 if (maybe_ne (bitregion_end, 0U)
556 && (bitnum - bitnum % modesize < bitregion_start 566 && (maybe_lt (bitnum - bitnum % modesize, bitregion_start)
557 || bitnum - bitnum % modesize + modesize - 1 > bitregion_end)) 567 || maybe_gt (bitnum - bitnum % modesize + modesize - 1,
568 bitregion_end)))
558 return false; 569 return false;
559 570
560 return true; 571 return true;
561 } 572 }
562 573
563 /* Return true if OP is a memory and if a bitfield of size BITSIZE at 574 /* Return true if OP is a memory and if a bitfield of size BITSIZE at
564 bit number BITNUM can be treated as a simple value of mode MODE. */ 575 bit number BITNUM can be treated as a simple value of mode MODE.
576 Store the byte offset in *BYTENUM if so. */
565 577
566 static bool 578 static bool
567 simple_mem_bitfield_p (rtx op0, unsigned HOST_WIDE_INT bitsize, 579 simple_mem_bitfield_p (rtx op0, poly_uint64 bitsize, poly_uint64 bitnum,
568 unsigned HOST_WIDE_INT bitnum, machine_mode mode) 580 machine_mode mode, poly_uint64 *bytenum)
569 { 581 {
570 return (MEM_P (op0) 582 return (MEM_P (op0)
571 && bitnum % BITS_PER_UNIT == 0 583 && multiple_p (bitnum, BITS_PER_UNIT, bytenum)
572 && bitsize == GET_MODE_BITSIZE (mode) 584 && known_eq (bitsize, GET_MODE_BITSIZE (mode))
573 && (!targetm.slow_unaligned_access (mode, MEM_ALIGN (op0)) 585 && (!targetm.slow_unaligned_access (mode, MEM_ALIGN (op0))
574 || (bitnum % GET_MODE_ALIGNMENT (mode) == 0 586 || (multiple_p (bitnum, GET_MODE_ALIGNMENT (mode))
575 && MEM_ALIGN (op0) >= GET_MODE_ALIGNMENT (mode)))); 587 && MEM_ALIGN (op0) >= GET_MODE_ALIGNMENT (mode))));
576 } 588 }
577 589
578 /* Try to use instruction INSV to store VALUE into a field of OP0. 590 /* Try to use instruction INSV to store VALUE into a field of OP0.
579 If OP0_MODE is defined, it is the mode of OP0, otherwise OP0 is a 591 If OP0_MODE is defined, it is the mode of OP0, otherwise OP0 is a
714 If FALLBACK_P is true, fall back to store_fixed_bit_field if we have 726 If FALLBACK_P is true, fall back to store_fixed_bit_field if we have
715 no other way of implementing the operation. If FALLBACK_P is false, 727 no other way of implementing the operation. If FALLBACK_P is false,
716 return false instead. */ 728 return false instead. */
717 729
718 static bool 730 static bool
719 store_bit_field_1 (rtx str_rtx, unsigned HOST_WIDE_INT bitsize, 731 store_bit_field_1 (rtx str_rtx, poly_uint64 bitsize, poly_uint64 bitnum,
720 unsigned HOST_WIDE_INT bitnum, 732 poly_uint64 bitregion_start, poly_uint64 bitregion_end,
721 unsigned HOST_WIDE_INT bitregion_start,
722 unsigned HOST_WIDE_INT bitregion_end,
723 machine_mode fieldmode, 733 machine_mode fieldmode,
724 rtx value, bool reverse, bool fallback_p) 734 rtx value, bool reverse, bool fallback_p)
725 { 735 {
726 rtx op0 = str_rtx; 736 rtx op0 = str_rtx;
727 rtx orig_value;
728 737
729 while (GET_CODE (op0) == SUBREG) 738 while (GET_CODE (op0) == SUBREG)
730 { 739 {
731 bitnum += subreg_memory_offset (op0) * BITS_PER_UNIT; 740 bitnum += subreg_memory_offset (op0) * BITS_PER_UNIT;
732 op0 = SUBREG_REG (op0); 741 op0 = SUBREG_REG (op0);
733 } 742 }
734 743
735 /* No action is needed if the target is a register and if the field 744 /* No action is needed if the target is a register and if the field
736 lies completely outside that register. This can occur if the source 745 lies completely outside that register. This can occur if the source
737 code contains an out-of-bounds access to a small array. */ 746 code contains an out-of-bounds access to a small array. */
738 if (REG_P (op0) && bitnum >= GET_MODE_BITSIZE (GET_MODE (op0))) 747 if (REG_P (op0) && known_ge (bitnum, GET_MODE_BITSIZE (GET_MODE (op0))))
739 return true; 748 return true;
740 749
741 /* Use vec_set patterns for inserting parts of vectors whenever 750 /* Use vec_set patterns for inserting parts of vectors whenever
742 available. */ 751 available. */
743 machine_mode outermode = GET_MODE (op0); 752 machine_mode outermode = GET_MODE (op0);
744 scalar_mode innermode = GET_MODE_INNER (outermode); 753 scalar_mode innermode = GET_MODE_INNER (outermode);
754 poly_uint64 pos;
745 if (VECTOR_MODE_P (outermode) 755 if (VECTOR_MODE_P (outermode)
746 && !MEM_P (op0) 756 && !MEM_P (op0)
747 && optab_handler (vec_set_optab, outermode) != CODE_FOR_nothing 757 && optab_handler (vec_set_optab, outermode) != CODE_FOR_nothing
748 && fieldmode == innermode 758 && fieldmode == innermode
749 && bitsize == GET_MODE_BITSIZE (innermode) 759 && known_eq (bitsize, GET_MODE_BITSIZE (innermode))
750 && !(bitnum % GET_MODE_BITSIZE (innermode))) 760 && multiple_p (bitnum, GET_MODE_BITSIZE (innermode), &pos))
751 { 761 {
752 struct expand_operand ops[3]; 762 struct expand_operand ops[3];
753 enum insn_code icode = optab_handler (vec_set_optab, outermode); 763 enum insn_code icode = optab_handler (vec_set_optab, outermode);
754 int pos = bitnum / GET_MODE_BITSIZE (innermode);
755 764
756 create_fixed_operand (&ops[0], op0); 765 create_fixed_operand (&ops[0], op0);
757 create_input_operand (&ops[1], value, innermode); 766 create_input_operand (&ops[1], value, innermode);
758 create_integer_operand (&ops[2], pos); 767 create_integer_operand (&ops[2], pos);
759 if (maybe_expand_insn (icode, 3, ops)) 768 if (maybe_expand_insn (icode, 3, ops))
761 } 770 }
762 771
763 /* If the target is a register, overwriting the entire object, or storing 772 /* If the target is a register, overwriting the entire object, or storing
764 a full-word or multi-word field can be done with just a SUBREG. */ 773 a full-word or multi-word field can be done with just a SUBREG. */
765 if (!MEM_P (op0) 774 if (!MEM_P (op0)
766 && bitsize == GET_MODE_BITSIZE (fieldmode) 775 && known_eq (bitsize, GET_MODE_BITSIZE (fieldmode)))
767 && ((bitsize == GET_MODE_BITSIZE (GET_MODE (op0)) && bitnum == 0)
768 || (bitsize % BITS_PER_WORD == 0 && bitnum % BITS_PER_WORD == 0)))
769 { 776 {
770 /* Use the subreg machinery either to narrow OP0 to the required 777 /* Use the subreg machinery either to narrow OP0 to the required
771 words or to cope with mode punning between equal-sized modes. 778 words or to cope with mode punning between equal-sized modes.
772 In the latter case, use subreg on the rhs side, not lhs. */ 779 In the latter case, use subreg on the rhs side, not lhs. */
773 rtx sub; 780 rtx sub;
774 781 HOST_WIDE_INT regnum;
775 if (bitsize == GET_MODE_BITSIZE (GET_MODE (op0))) 782 poly_uint64 regsize = REGMODE_NATURAL_SIZE (GET_MODE (op0));
783 if (known_eq (bitnum, 0U)
784 && known_eq (bitsize, GET_MODE_BITSIZE (GET_MODE (op0))))
776 { 785 {
777 sub = simplify_gen_subreg (GET_MODE (op0), value, fieldmode, 0); 786 sub = simplify_gen_subreg (GET_MODE (op0), value, fieldmode, 0);
778 if (sub) 787 if (sub)
779 { 788 {
780 if (reverse) 789 if (reverse)
781 sub = flip_storage_order (GET_MODE (op0), sub); 790 sub = flip_storage_order (GET_MODE (op0), sub);
782 emit_move_insn (op0, sub); 791 emit_move_insn (op0, sub);
783 return true; 792 return true;
784 } 793 }
785 } 794 }
786 else 795 else if (constant_multiple_p (bitnum, regsize * BITS_PER_UNIT, &regnum)
796 && multiple_p (bitsize, regsize * BITS_PER_UNIT))
787 { 797 {
788 sub = simplify_gen_subreg (fieldmode, op0, GET_MODE (op0), 798 sub = simplify_gen_subreg (fieldmode, op0, GET_MODE (op0),
789 bitnum / BITS_PER_UNIT); 799 regnum * regsize);
790 if (sub) 800 if (sub)
791 { 801 {
792 if (reverse) 802 if (reverse)
793 value = flip_storage_order (fieldmode, value); 803 value = flip_storage_order (fieldmode, value);
794 emit_move_insn (sub, value); 804 emit_move_insn (sub, value);
798 } 808 }
799 809
800 /* If the target is memory, storing any naturally aligned field can be 810 /* If the target is memory, storing any naturally aligned field can be
801 done with a simple store. For targets that support fast unaligned 811 done with a simple store. For targets that support fast unaligned
802 memory, any naturally sized, unit aligned field can be done directly. */ 812 memory, any naturally sized, unit aligned field can be done directly. */
803 if (simple_mem_bitfield_p (op0, bitsize, bitnum, fieldmode)) 813 poly_uint64 bytenum;
804 { 814 if (simple_mem_bitfield_p (op0, bitsize, bitnum, fieldmode, &bytenum))
805 op0 = adjust_bitfield_address (op0, fieldmode, bitnum / BITS_PER_UNIT); 815 {
816 op0 = adjust_bitfield_address (op0, fieldmode, bytenum);
806 if (reverse) 817 if (reverse)
807 value = flip_storage_order (fieldmode, value); 818 value = flip_storage_order (fieldmode, value);
808 emit_move_insn (op0, value); 819 emit_move_insn (op0, value);
809 return true; 820 return true;
810 } 821 }
822
823 /* It's possible we'll need to handle other cases here for
824 polynomial bitnum and bitsize. */
825
826 /* From here on we need to be looking at a fixed-size insertion. */
827 unsigned HOST_WIDE_INT ibitsize = bitsize.to_constant ();
828 unsigned HOST_WIDE_INT ibitnum = bitnum.to_constant ();
811 829
812 /* Make sure we are playing with integral modes. Pun with subregs 830 /* Make sure we are playing with integral modes. Pun with subregs
813 if we aren't. This must come after the entire register case above, 831 if we aren't. This must come after the entire register case above,
814 since that case is valid for any mode. The following cases are only 832 since that case is valid for any mode. The following cases are only
815 valid for integral modes. */ 833 valid for integral modes. */
822 0, MEM_SIZE (op0)); 840 0, MEM_SIZE (op0));
823 else 841 else
824 op0 = gen_lowpart (op0_mode.require (), op0); 842 op0 = gen_lowpart (op0_mode.require (), op0);
825 } 843 }
826 844
845 return store_integral_bit_field (op0, op0_mode, ibitsize, ibitnum,
846 bitregion_start, bitregion_end,
847 fieldmode, value, reverse, fallback_p);
848 }
849
850 /* Subroutine of store_bit_field_1, with the same arguments, except
851 that BITSIZE and BITNUM are constant. Handle cases specific to
852 integral modes. If OP0_MODE is defined, it is the mode of OP0,
853 otherwise OP0 is a BLKmode MEM. */
854
855 static bool
856 store_integral_bit_field (rtx op0, opt_scalar_int_mode op0_mode,
857 unsigned HOST_WIDE_INT bitsize,
858 unsigned HOST_WIDE_INT bitnum,
859 poly_uint64 bitregion_start,
860 poly_uint64 bitregion_end,
861 machine_mode fieldmode,
862 rtx value, bool reverse, bool fallback_p)
863 {
827 /* Storing an lsb-aligned field in a register 864 /* Storing an lsb-aligned field in a register
828 can be done with a movstrict instruction. */ 865 can be done with a movstrict instruction. */
829 866
830 if (!MEM_P (op0) 867 if (!MEM_P (op0)
831 && !reverse 868 && !reverse
832 && lowpart_bit_field_p (bitnum, bitsize, GET_MODE (op0)) 869 && lowpart_bit_field_p (bitnum, bitsize, op0_mode.require ())
833 && bitsize == GET_MODE_BITSIZE (fieldmode) 870 && known_eq (bitsize, GET_MODE_BITSIZE (fieldmode))
834 && optab_handler (movstrict_optab, fieldmode) != CODE_FOR_nothing) 871 && optab_handler (movstrict_optab, fieldmode) != CODE_FOR_nothing)
835 { 872 {
836 struct expand_operand ops[2]; 873 struct expand_operand ops[2];
837 enum insn_code icode = optab_handler (movstrict_optab, fieldmode); 874 enum insn_code icode = optab_handler (movstrict_optab, fieldmode);
838 rtx arg0 = op0; 875 rtx arg0 = op0;
879 916
880 /* This is the mode we must force value to, so that there will be enough 917 /* This is the mode we must force value to, so that there will be enough
881 subwords to extract. Note that fieldmode will often (always?) be 918 subwords to extract. Note that fieldmode will often (always?) be
882 VOIDmode, because that is what store_field uses to indicate that this 919 VOIDmode, because that is what store_field uses to indicate that this
883 is a bit field, but passing VOIDmode to operand_subword_force 920 is a bit field, but passing VOIDmode to operand_subword_force
884 is not allowed. */ 921 is not allowed.
885 fieldmode = GET_MODE (value); 922
886 if (fieldmode == VOIDmode) 923 The mode must be fixed-size, since insertions into variable-sized
887 fieldmode = smallest_int_mode_for_size (nwords * BITS_PER_WORD); 924 objects are meant to be handled before calling this function. */
925 fixed_size_mode value_mode = as_a <fixed_size_mode> (GET_MODE (value));
926 if (value_mode == VOIDmode)
927 value_mode = smallest_int_mode_for_size (nwords * BITS_PER_WORD);
888 928
889 last = get_last_insn (); 929 last = get_last_insn ();
890 for (i = 0; i < nwords; i++) 930 for (i = 0; i < nwords; i++)
891 { 931 {
892 /* If I is 0, use the low-order word in both field and target; 932 /* If I is 0, use the low-order word in both field and target;
893 if I is 1, use the next to lowest word; and so on. */ 933 if I is 1, use the next to lowest word; and so on. */
894 unsigned int wordnum = (backwards 934 unsigned int wordnum = (backwards
895 ? GET_MODE_SIZE (fieldmode) / UNITS_PER_WORD 935 ? GET_MODE_SIZE (value_mode) / UNITS_PER_WORD
896 - i - 1 936 - i - 1
897 : i); 937 : i);
898 unsigned int bit_offset = (backwards ^ reverse 938 unsigned int bit_offset = (backwards ^ reverse
899 ? MAX ((int) bitsize - ((int) i + 1) 939 ? MAX ((int) bitsize - ((int) i + 1)
900 * BITS_PER_WORD, 940 * BITS_PER_WORD,
901 0) 941 0)
902 : (int) i * BITS_PER_WORD); 942 : (int) i * BITS_PER_WORD);
903 rtx value_word = operand_subword_force (value, wordnum, fieldmode); 943 rtx value_word = operand_subword_force (value, wordnum, value_mode);
904 unsigned HOST_WIDE_INT new_bitsize = 944 unsigned HOST_WIDE_INT new_bitsize =
905 MIN (BITS_PER_WORD, bitsize - i * BITS_PER_WORD); 945 MIN (BITS_PER_WORD, bitsize - i * BITS_PER_WORD);
906 946
907 /* If the remaining chunk doesn't have full wordsize we have 947 /* If the remaining chunk doesn't have full wordsize we have
908 to make sure that for big-endian machines the higher order 948 to make sure that for big-endian machines the higher order
909 bits are used. */ 949 bits are used. */
910 if (new_bitsize < BITS_PER_WORD && BYTES_BIG_ENDIAN && !backwards) 950 if (new_bitsize < BITS_PER_WORD && BYTES_BIG_ENDIAN && !backwards)
911 value_word = simplify_expand_binop (word_mode, lshr_optab, 951 {
912 value_word, 952 int shift = BITS_PER_WORD - new_bitsize;
913 GEN_INT (BITS_PER_WORD 953 rtx shift_rtx = gen_int_shift_amount (word_mode, shift);
914 - new_bitsize), 954 value_word = simplify_expand_binop (word_mode, lshr_optab,
915 NULL_RTX, true, 955 value_word, shift_rtx,
916 OPTAB_LIB_WIDEN); 956 NULL_RTX, true,
957 OPTAB_LIB_WIDEN);
958 }
917 959
918 if (!store_bit_field_1 (op0, new_bitsize, 960 if (!store_bit_field_1 (op0, new_bitsize,
919 bitnum + bit_offset, 961 bitnum + bit_offset,
920 bitregion_start, bitregion_end, 962 bitregion_start, bitregion_end,
921 word_mode, 963 word_mode,
930 972
931 /* If VALUE has a floating-point or complex mode, access it as an 973 /* If VALUE has a floating-point or complex mode, access it as an
932 integer of the corresponding size. This can occur on a machine 974 integer of the corresponding size. This can occur on a machine
933 with 64 bit registers that uses SFmode for float. It can also 975 with 64 bit registers that uses SFmode for float. It can also
934 occur for unaligned float or complex fields. */ 976 occur for unaligned float or complex fields. */
935 orig_value = value; 977 rtx orig_value = value;
936 scalar_int_mode value_mode; 978 scalar_int_mode value_mode;
937 if (GET_MODE (value) == VOIDmode) 979 if (GET_MODE (value) == VOIDmode)
938 /* By this point we've dealt with values that are bigger than a word, 980 /* By this point we've dealt with values that are bigger than a word,
939 so word_mode is a conservatively correct choice. */ 981 so word_mode is a conservatively correct choice. */
940 value_mode = word_mode; 982 value_mode = word_mode;
1038 FIELDMODE is the machine-mode of the FIELD_DECL node for this field. 1080 FIELDMODE is the machine-mode of the FIELD_DECL node for this field.
1039 1081
1040 If REVERSE is true, the store is to be done in reverse order. */ 1082 If REVERSE is true, the store is to be done in reverse order. */
1041 1083
1042 void 1084 void
1043 store_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize, 1085 store_bit_field (rtx str_rtx, poly_uint64 bitsize, poly_uint64 bitnum,
1044 unsigned HOST_WIDE_INT bitnum, 1086 poly_uint64 bitregion_start, poly_uint64 bitregion_end,
1045 unsigned HOST_WIDE_INT bitregion_start,
1046 unsigned HOST_WIDE_INT bitregion_end,
1047 machine_mode fieldmode, 1087 machine_mode fieldmode,
1048 rtx value, bool reverse) 1088 rtx value, bool reverse)
1049 { 1089 {
1050 /* Handle -fstrict-volatile-bitfields in the cases where it applies. */ 1090 /* Handle -fstrict-volatile-bitfields in the cases where it applies. */
1091 unsigned HOST_WIDE_INT ibitsize = 0, ibitnum = 0;
1051 scalar_int_mode int_mode; 1092 scalar_int_mode int_mode;
1052 if (is_a <scalar_int_mode> (fieldmode, &int_mode) 1093 if (bitsize.is_constant (&ibitsize)
1053 && strict_volatile_bitfield_p (str_rtx, bitsize, bitnum, int_mode, 1094 && bitnum.is_constant (&ibitnum)
1095 && is_a <scalar_int_mode> (fieldmode, &int_mode)
1096 && strict_volatile_bitfield_p (str_rtx, ibitsize, ibitnum, int_mode,
1054 bitregion_start, bitregion_end)) 1097 bitregion_start, bitregion_end))
1055 { 1098 {
1056 /* Storing of a full word can be done with a simple store. 1099 /* Storing of a full word can be done with a simple store.
1057 We know here that the field can be accessed with one single 1100 We know here that the field can be accessed with one single
1058 instruction. For targets that support unaligned memory, 1101 instruction. For targets that support unaligned memory,
1059 an unaligned access may be necessary. */ 1102 an unaligned access may be necessary. */
1060 if (bitsize == GET_MODE_BITSIZE (int_mode)) 1103 if (ibitsize == GET_MODE_BITSIZE (int_mode))
1061 { 1104 {
1062 str_rtx = adjust_bitfield_address (str_rtx, int_mode, 1105 str_rtx = adjust_bitfield_address (str_rtx, int_mode,
1063 bitnum / BITS_PER_UNIT); 1106 ibitnum / BITS_PER_UNIT);
1064 if (reverse) 1107 if (reverse)
1065 value = flip_storage_order (int_mode, value); 1108 value = flip_storage_order (int_mode, value);
1066 gcc_assert (bitnum % BITS_PER_UNIT == 0); 1109 gcc_assert (ibitnum % BITS_PER_UNIT == 0);
1067 emit_move_insn (str_rtx, value); 1110 emit_move_insn (str_rtx, value);
1068 } 1111 }
1069 else 1112 else
1070 { 1113 {
1071 rtx temp; 1114 rtx temp;
1072 1115
1073 str_rtx = narrow_bit_field_mem (str_rtx, int_mode, bitsize, bitnum, 1116 str_rtx = narrow_bit_field_mem (str_rtx, int_mode, ibitsize,
1074 &bitnum); 1117 ibitnum, &ibitnum);
1075 gcc_assert (bitnum + bitsize <= GET_MODE_BITSIZE (int_mode)); 1118 gcc_assert (ibitnum + ibitsize <= GET_MODE_BITSIZE (int_mode));
1076 temp = copy_to_reg (str_rtx); 1119 temp = copy_to_reg (str_rtx);
1077 if (!store_bit_field_1 (temp, bitsize, bitnum, 0, 0, 1120 if (!store_bit_field_1 (temp, ibitsize, ibitnum, 0, 0,
1078 int_mode, value, reverse, true)) 1121 int_mode, value, reverse, true))
1079 gcc_unreachable (); 1122 gcc_unreachable ();
1080 1123
1081 emit_move_insn (str_rtx, temp); 1124 emit_move_insn (str_rtx, temp);
1082 } 1125 }
1085 } 1128 }
1086 1129
1087 /* Under the C++0x memory model, we must not touch bits outside the 1130 /* Under the C++0x memory model, we must not touch bits outside the
1088 bit region. Adjust the address to start at the beginning of the 1131 bit region. Adjust the address to start at the beginning of the
1089 bit region. */ 1132 bit region. */
1090 if (MEM_P (str_rtx) && bitregion_start > 0) 1133 if (MEM_P (str_rtx) && maybe_ne (bitregion_start, 0U))
1091 { 1134 {
1092 scalar_int_mode best_mode; 1135 scalar_int_mode best_mode;
1093 machine_mode addr_mode = VOIDmode; 1136 machine_mode addr_mode = VOIDmode;
1094 HOST_WIDE_INT offset, size; 1137
1095 1138 poly_uint64 offset = exact_div (bitregion_start, BITS_PER_UNIT);
1096 gcc_assert ((bitregion_start % BITS_PER_UNIT) == 0);
1097
1098 offset = bitregion_start / BITS_PER_UNIT;
1099 bitnum -= bitregion_start; 1139 bitnum -= bitregion_start;
1100 size = (bitnum + bitsize + BITS_PER_UNIT - 1) / BITS_PER_UNIT; 1140 poly_int64 size = bits_to_bytes_round_up (bitnum + bitsize);
1101 bitregion_end -= bitregion_start; 1141 bitregion_end -= bitregion_start;
1102 bitregion_start = 0; 1142 bitregion_start = 0;
1103 if (get_best_mode (bitsize, bitnum, 1143 if (bitsize.is_constant (&ibitsize)
1104 bitregion_start, bitregion_end, 1144 && bitnum.is_constant (&ibitnum)
1105 MEM_ALIGN (str_rtx), INT_MAX, 1145 && get_best_mode (ibitsize, ibitnum,
1106 MEM_VOLATILE_P (str_rtx), &best_mode)) 1146 bitregion_start, bitregion_end,
1147 MEM_ALIGN (str_rtx), INT_MAX,
1148 MEM_VOLATILE_P (str_rtx), &best_mode))
1107 addr_mode = best_mode; 1149 addr_mode = best_mode;
1108 str_rtx = adjust_bitfield_address_size (str_rtx, addr_mode, 1150 str_rtx = adjust_bitfield_address_size (str_rtx, addr_mode,
1109 offset, size); 1151 offset, size);
1110 } 1152 }
1111 1153
1124 1166
1125 static void 1167 static void
1126 store_fixed_bit_field (rtx op0, opt_scalar_int_mode op0_mode, 1168 store_fixed_bit_field (rtx op0, opt_scalar_int_mode op0_mode,
1127 unsigned HOST_WIDE_INT bitsize, 1169 unsigned HOST_WIDE_INT bitsize,
1128 unsigned HOST_WIDE_INT bitnum, 1170 unsigned HOST_WIDE_INT bitnum,
1129 unsigned HOST_WIDE_INT bitregion_start, 1171 poly_uint64 bitregion_start, poly_uint64 bitregion_end,
1130 unsigned HOST_WIDE_INT bitregion_end,
1131 rtx value, scalar_int_mode value_mode, bool reverse) 1172 rtx value, scalar_int_mode value_mode, bool reverse)
1132 { 1173 {
1133 /* There is a case not handled here: 1174 /* There is a case not handled here:
1134 a structure with a known alignment of just a halfword 1175 a structure with a known alignment of just a halfword
1135 and a field split across two aligned halfwords within the structure. 1176 and a field split across two aligned halfwords within the structure.
1280 1321
1281 static void 1322 static void
1282 store_split_bit_field (rtx op0, opt_scalar_int_mode op0_mode, 1323 store_split_bit_field (rtx op0, opt_scalar_int_mode op0_mode,
1283 unsigned HOST_WIDE_INT bitsize, 1324 unsigned HOST_WIDE_INT bitsize,
1284 unsigned HOST_WIDE_INT bitpos, 1325 unsigned HOST_WIDE_INT bitpos,
1285 unsigned HOST_WIDE_INT bitregion_start, 1326 poly_uint64 bitregion_start, poly_uint64 bitregion_end,
1286 unsigned HOST_WIDE_INT bitregion_end,
1287 rtx value, scalar_int_mode value_mode, bool reverse) 1327 rtx value, scalar_int_mode value_mode, bool reverse)
1288 { 1328 {
1289 unsigned int unit, total_bits, bitsdone = 0; 1329 unsigned int unit, total_bits, bitsdone = 0;
1290 1330
1291 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that 1331 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
1329 1369
1330 /* When region of bytes we can touch is restricted, decrease 1370 /* When region of bytes we can touch is restricted, decrease
1331 UNIT close to the end of the region as needed. If op0 is a REG 1371 UNIT close to the end of the region as needed. If op0 is a REG
1332 or SUBREG of REG, don't do this, as there can't be data races 1372 or SUBREG of REG, don't do this, as there can't be data races
1333 on a register and we can expand shorter code in some cases. */ 1373 on a register and we can expand shorter code in some cases. */
1334 if (bitregion_end 1374 if (maybe_ne (bitregion_end, 0U)
1335 && unit > BITS_PER_UNIT 1375 && unit > BITS_PER_UNIT
1336 && bitpos + bitsdone - thispos + unit > bitregion_end + 1 1376 && maybe_gt (bitpos + bitsdone - thispos + unit, bitregion_end + 1)
1337 && !REG_P (op0) 1377 && !REG_P (op0)
1338 && (GET_CODE (op0) != SUBREG || !REG_P (SUBREG_REG (op0)))) 1378 && (GET_CODE (op0) != SUBREG || !REG_P (SUBREG_REG (op0))))
1339 { 1379 {
1340 unit = unit / 2; 1380 unit = unit / 2;
1341 continue; 1381 continue;
1528 return convert_extracted_bit_field (target, mode, tmode, unsignedp); 1568 return convert_extracted_bit_field (target, mode, tmode, unsignedp);
1529 } 1569 }
1530 return NULL_RTX; 1570 return NULL_RTX;
1531 } 1571 }
1532 1572
1573 /* See whether it would be valid to extract the part of OP0 described
1574 by BITNUM and BITSIZE into a value of mode MODE using a subreg
1575 operation. Return the subreg if so, otherwise return null. */
1576
1577 static rtx
1578 extract_bit_field_as_subreg (machine_mode mode, rtx op0,
1579 poly_uint64 bitsize, poly_uint64 bitnum)
1580 {
1581 poly_uint64 bytenum;
1582 if (multiple_p (bitnum, BITS_PER_UNIT, &bytenum)
1583 && known_eq (bitsize, GET_MODE_BITSIZE (mode))
1584 && lowpart_bit_field_p (bitnum, bitsize, GET_MODE (op0))
1585 && TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op0)))
1586 return simplify_gen_subreg (mode, op0, GET_MODE (op0), bytenum);
1587 return NULL_RTX;
1588 }
1589
1533 /* A subroutine of extract_bit_field, with the same arguments. 1590 /* A subroutine of extract_bit_field, with the same arguments.
1534 If FALLBACK_P is true, fall back to extract_fixed_bit_field 1591 If FALLBACK_P is true, fall back to extract_fixed_bit_field
1535 if we can find no other means of implementing the operation. 1592 if we can find no other means of implementing the operation.
1536 if FALLBACK_P is false, return NULL instead. */ 1593 if FALLBACK_P is false, return NULL instead. */
1537 1594
1538 static rtx 1595 static rtx
1539 extract_bit_field_1 (rtx str_rtx, unsigned HOST_WIDE_INT bitsize, 1596 extract_bit_field_1 (rtx str_rtx, poly_uint64 bitsize, poly_uint64 bitnum,
1540 unsigned HOST_WIDE_INT bitnum, int unsignedp, rtx target, 1597 int unsignedp, rtx target, machine_mode mode,
1541 machine_mode mode, machine_mode tmode, 1598 machine_mode tmode, bool reverse, bool fallback_p,
1542 bool reverse, bool fallback_p, rtx *alt_rtl) 1599 rtx *alt_rtl)
1543 { 1600 {
1544 rtx op0 = str_rtx; 1601 rtx op0 = str_rtx;
1545 machine_mode mode1; 1602 machine_mode mode1;
1546 1603
1547 if (tmode == VOIDmode) 1604 if (tmode == VOIDmode)
1554 } 1611 }
1555 1612
1556 /* If we have an out-of-bounds access to a register, just return an 1613 /* If we have an out-of-bounds access to a register, just return an
1557 uninitialized register of the required mode. This can occur if the 1614 uninitialized register of the required mode. This can occur if the
1558 source code contains an out-of-bounds access to a small array. */ 1615 source code contains an out-of-bounds access to a small array. */
1559 if (REG_P (op0) && bitnum >= GET_MODE_BITSIZE (GET_MODE (op0))) 1616 if (REG_P (op0) && known_ge (bitnum, GET_MODE_BITSIZE (GET_MODE (op0))))
1560 return gen_reg_rtx (tmode); 1617 return gen_reg_rtx (tmode);
1561 1618
1562 if (REG_P (op0) 1619 if (REG_P (op0)
1563 && mode == GET_MODE (op0) 1620 && mode == GET_MODE (op0)
1564 && bitnum == 0 1621 && known_eq (bitnum, 0U)
1565 && bitsize == GET_MODE_BITSIZE (GET_MODE (op0))) 1622 && known_eq (bitsize, GET_MODE_BITSIZE (GET_MODE (op0))))
1566 { 1623 {
1567 if (reverse) 1624 if (reverse)
1568 op0 = flip_storage_order (mode, op0); 1625 op0 = flip_storage_order (mode, op0);
1569 /* We're trying to extract a full register from itself. */ 1626 /* We're trying to extract a full register from itself. */
1570 return op0; 1627 return op0;
1572 1629
1573 /* First try to check for vector from vector extractions. */ 1630 /* First try to check for vector from vector extractions. */
1574 if (VECTOR_MODE_P (GET_MODE (op0)) 1631 if (VECTOR_MODE_P (GET_MODE (op0))
1575 && !MEM_P (op0) 1632 && !MEM_P (op0)
1576 && VECTOR_MODE_P (tmode) 1633 && VECTOR_MODE_P (tmode)
1577 && GET_MODE_SIZE (GET_MODE (op0)) > GET_MODE_SIZE (tmode)) 1634 && known_eq (bitsize, GET_MODE_BITSIZE (tmode))
1635 && maybe_gt (GET_MODE_SIZE (GET_MODE (op0)), GET_MODE_SIZE (tmode)))
1578 { 1636 {
1579 machine_mode new_mode = GET_MODE (op0); 1637 machine_mode new_mode = GET_MODE (op0);
1580 if (GET_MODE_INNER (new_mode) != GET_MODE_INNER (tmode)) 1638 if (GET_MODE_INNER (new_mode) != GET_MODE_INNER (tmode))
1581 { 1639 {
1582 scalar_mode inner_mode = GET_MODE_INNER (tmode); 1640 scalar_mode inner_mode = GET_MODE_INNER (tmode);
1583 unsigned int nunits = (GET_MODE_BITSIZE (GET_MODE (op0)) 1641 poly_uint64 nunits;
1584 / GET_MODE_UNIT_BITSIZE (tmode)); 1642 if (!multiple_p (GET_MODE_BITSIZE (GET_MODE (op0)),
1585 if (!mode_for_vector (inner_mode, nunits).exists (&new_mode) 1643 GET_MODE_UNIT_BITSIZE (tmode), &nunits)
1644 || !mode_for_vector (inner_mode, nunits).exists (&new_mode)
1586 || !VECTOR_MODE_P (new_mode) 1645 || !VECTOR_MODE_P (new_mode)
1587 || GET_MODE_SIZE (new_mode) != GET_MODE_SIZE (GET_MODE (op0)) 1646 || maybe_ne (GET_MODE_SIZE (new_mode),
1647 GET_MODE_SIZE (GET_MODE (op0)))
1588 || GET_MODE_INNER (new_mode) != GET_MODE_INNER (tmode) 1648 || GET_MODE_INNER (new_mode) != GET_MODE_INNER (tmode)
1589 || !targetm.vector_mode_supported_p (new_mode)) 1649 || !targetm.vector_mode_supported_p (new_mode))
1590 new_mode = VOIDmode; 1650 new_mode = VOIDmode;
1591 } 1651 }
1652 poly_uint64 pos;
1592 if (new_mode != VOIDmode 1653 if (new_mode != VOIDmode
1593 && (convert_optab_handler (vec_extract_optab, new_mode, tmode) 1654 && (convert_optab_handler (vec_extract_optab, new_mode, tmode)
1594 != CODE_FOR_nothing) 1655 != CODE_FOR_nothing)
1595 && ((bitnum + bitsize - 1) / GET_MODE_BITSIZE (tmode) 1656 && multiple_p (bitnum, GET_MODE_BITSIZE (tmode), &pos))
1596 == bitnum / GET_MODE_BITSIZE (tmode)))
1597 { 1657 {
1598 struct expand_operand ops[3]; 1658 struct expand_operand ops[3];
1599 machine_mode outermode = new_mode; 1659 machine_mode outermode = new_mode;
1600 machine_mode innermode = tmode; 1660 machine_mode innermode = tmode;
1601 enum insn_code icode 1661 enum insn_code icode
1602 = convert_optab_handler (vec_extract_optab, outermode, innermode); 1662 = convert_optab_handler (vec_extract_optab, outermode, innermode);
1603 unsigned HOST_WIDE_INT pos = bitnum / GET_MODE_BITSIZE (innermode);
1604 1663
1605 if (new_mode != GET_MODE (op0)) 1664 if (new_mode != GET_MODE (op0))
1606 op0 = gen_lowpart (new_mode, op0); 1665 op0 = gen_lowpart (new_mode, op0);
1607 create_output_operand (&ops[0], target, innermode); 1666 create_output_operand (&ops[0], target, innermode);
1608 ops[0].target = 1; 1667 ops[0].target = 1;
1639 new_mode = MIN_MODE_VECTOR_UACCUM; 1698 new_mode = MIN_MODE_VECTOR_UACCUM;
1640 else 1699 else
1641 new_mode = MIN_MODE_VECTOR_INT; 1700 new_mode = MIN_MODE_VECTOR_INT;
1642 1701
1643 FOR_EACH_MODE_FROM (new_mode, new_mode) 1702 FOR_EACH_MODE_FROM (new_mode, new_mode)
1644 if (GET_MODE_SIZE (new_mode) == GET_MODE_SIZE (GET_MODE (op0)) 1703 if (known_eq (GET_MODE_SIZE (new_mode), GET_MODE_SIZE (GET_MODE (op0)))
1645 && GET_MODE_UNIT_SIZE (new_mode) == GET_MODE_SIZE (tmode) 1704 && known_eq (GET_MODE_UNIT_SIZE (new_mode), GET_MODE_SIZE (tmode))
1646 && targetm.vector_mode_supported_p (new_mode)) 1705 && targetm.vector_mode_supported_p (new_mode))
1647 break; 1706 break;
1648 if (new_mode != VOIDmode) 1707 if (new_mode != VOIDmode)
1649 op0 = gen_lowpart (new_mode, op0); 1708 op0 = gen_lowpart (new_mode, op0);
1650 } 1709 }
1651 1710
1652 /* Use vec_extract patterns for extracting parts of vectors whenever 1711 /* Use vec_extract patterns for extracting parts of vectors whenever
1653 available. */ 1712 available. If that fails, see whether the current modes and bitregion
1713 give a natural subreg. */
1654 machine_mode outermode = GET_MODE (op0); 1714 machine_mode outermode = GET_MODE (op0);
1655 scalar_mode innermode = GET_MODE_INNER (outermode); 1715 if (VECTOR_MODE_P (outermode) && !MEM_P (op0))
1656 if (VECTOR_MODE_P (outermode) 1716 {
1657 && !MEM_P (op0) 1717 scalar_mode innermode = GET_MODE_INNER (outermode);
1658 && (convert_optab_handler (vec_extract_optab, outermode, innermode)
1659 != CODE_FOR_nothing)
1660 && ((bitnum + bitsize - 1) / GET_MODE_BITSIZE (innermode)
1661 == bitnum / GET_MODE_BITSIZE (innermode)))
1662 {
1663 struct expand_operand ops[3];
1664 enum insn_code icode 1718 enum insn_code icode
1665 = convert_optab_handler (vec_extract_optab, outermode, innermode); 1719 = convert_optab_handler (vec_extract_optab, outermode, innermode);
1666 unsigned HOST_WIDE_INT pos = bitnum / GET_MODE_BITSIZE (innermode); 1720 poly_uint64 pos;
1667 1721 if (icode != CODE_FOR_nothing
1668 create_output_operand (&ops[0], target, innermode); 1722 && known_eq (bitsize, GET_MODE_BITSIZE (innermode))
1669 ops[0].target = 1; 1723 && multiple_p (bitnum, GET_MODE_BITSIZE (innermode), &pos))
1670 create_input_operand (&ops[1], op0, outermode); 1724 {
1671 create_integer_operand (&ops[2], pos); 1725 struct expand_operand ops[3];
1672 if (maybe_expand_insn (icode, 3, ops)) 1726
1673 { 1727 create_output_operand (&ops[0], target, innermode);
1674 if (alt_rtl && ops[0].target) 1728 ops[0].target = 1;
1675 *alt_rtl = target; 1729 create_input_operand (&ops[1], op0, outermode);
1676 target = ops[0].value; 1730 create_integer_operand (&ops[2], pos);
1677 if (GET_MODE (target) != mode) 1731 if (maybe_expand_insn (icode, 3, ops))
1678 return gen_lowpart (tmode, target); 1732 {
1679 return target; 1733 if (alt_rtl && ops[0].target)
1734 *alt_rtl = target;
1735 target = ops[0].value;
1736 if (GET_MODE (target) != mode)
1737 return gen_lowpart (tmode, target);
1738 return target;
1739 }
1740 }
1741 /* Using subregs is useful if we're extracting one register vector
1742 from a multi-register vector. extract_bit_field_as_subreg checks
1743 for valid bitsize and bitnum, so we don't need to do that here. */
1744 if (VECTOR_MODE_P (mode))
1745 {
1746 rtx sub = extract_bit_field_as_subreg (mode, op0, bitsize, bitnum);
1747 if (sub)
1748 return sub;
1680 } 1749 }
1681 } 1750 }
1682 1751
1683 /* Make sure we are playing with integral modes. Pun with subregs 1752 /* Make sure we are playing with integral modes. Pun with subregs
1684 if we aren't. */ 1753 if we aren't. */
1698 if (GET_CODE (op0) == SUBREG) 1767 if (GET_CODE (op0) == SUBREG)
1699 op0 = force_reg (imode, op0); 1768 op0 = force_reg (imode, op0);
1700 } 1769 }
1701 else 1770 else
1702 { 1771 {
1703 HOST_WIDE_INT size = GET_MODE_SIZE (GET_MODE (op0)); 1772 poly_int64 size = GET_MODE_SIZE (GET_MODE (op0));
1704 rtx mem = assign_stack_temp (GET_MODE (op0), size); 1773 rtx mem = assign_stack_temp (GET_MODE (op0), size);
1705 emit_move_insn (mem, op0); 1774 emit_move_insn (mem, op0);
1706 op0 = adjust_bitfield_address_size (mem, BLKmode, 0, size); 1775 op0 = adjust_bitfield_address_size (mem, BLKmode, 0, size);
1707 } 1776 }
1708 } 1777 }
1719 gcc_assert (mode1 != BLKmode); 1788 gcc_assert (mode1 != BLKmode);
1720 1789
1721 /* Extraction of a full MODE1 value can be done with a subreg as long 1790 /* Extraction of a full MODE1 value can be done with a subreg as long
1722 as the least significant bit of the value is the least significant 1791 as the least significant bit of the value is the least significant
1723 bit of either OP0 or a word of OP0. */ 1792 bit of either OP0 or a word of OP0. */
1724 if (!MEM_P (op0) 1793 if (!MEM_P (op0) && !reverse)
1725 && !reverse 1794 {
1726 && lowpart_bit_field_p (bitnum, bitsize, op0_mode.require ()) 1795 rtx sub = extract_bit_field_as_subreg (mode1, op0, bitsize, bitnum);
1727 && bitsize == GET_MODE_BITSIZE (mode1)
1728 && TRULY_NOOP_TRUNCATION_MODES_P (mode1, op0_mode.require ()))
1729 {
1730 rtx sub = simplify_gen_subreg (mode1, op0, op0_mode.require (),
1731 bitnum / BITS_PER_UNIT);
1732 if (sub) 1796 if (sub)
1733 return convert_extracted_bit_field (sub, mode, tmode, unsignedp); 1797 return convert_extracted_bit_field (sub, mode, tmode, unsignedp);
1734 } 1798 }
1735 1799
1736 /* Extraction of a full MODE1 value can be done with a load as long as 1800 /* Extraction of a full MODE1 value can be done with a load as long as
1737 the field is on a byte boundary and is sufficiently aligned. */ 1801 the field is on a byte boundary and is sufficiently aligned. */
1738 if (simple_mem_bitfield_p (op0, bitsize, bitnum, mode1)) 1802 poly_uint64 bytenum;
1739 { 1803 if (simple_mem_bitfield_p (op0, bitsize, bitnum, mode1, &bytenum))
1740 op0 = adjust_bitfield_address (op0, mode1, bitnum / BITS_PER_UNIT); 1804 {
1805 op0 = adjust_bitfield_address (op0, mode1, bytenum);
1741 if (reverse) 1806 if (reverse)
1742 op0 = flip_storage_order (mode1, op0); 1807 op0 = flip_storage_order (mode1, op0);
1743 return convert_extracted_bit_field (op0, mode, tmode, unsignedp); 1808 return convert_extracted_bit_field (op0, mode, tmode, unsignedp);
1744 } 1809 }
1745 1810
1811 /* If we have a memory source and a non-constant bit offset, restrict
1812 the memory to the referenced bytes. This is a worst-case fallback
1813 but is useful for things like vector booleans. */
1814 if (MEM_P (op0) && !bitnum.is_constant ())
1815 {
1816 bytenum = bits_to_bytes_round_down (bitnum);
1817 bitnum = num_trailing_bits (bitnum);
1818 poly_uint64 bytesize = bits_to_bytes_round_up (bitnum + bitsize);
1819 op0 = adjust_bitfield_address_size (op0, BLKmode, bytenum, bytesize);
1820 op0_mode = opt_scalar_int_mode ();
1821 }
1822
1823 /* It's possible we'll need to handle other cases here for
1824 polynomial bitnum and bitsize. */
1825
1826 /* From here on we need to be looking at a fixed-size insertion. */
1827 return extract_integral_bit_field (op0, op0_mode, bitsize.to_constant (),
1828 bitnum.to_constant (), unsignedp,
1829 target, mode, tmode, reverse, fallback_p);
1830 }
1831
1832 /* Subroutine of extract_bit_field_1, with the same arguments, except
1833 that BITSIZE and BITNUM are constant. Handle cases specific to
1834 integral modes. If OP0_MODE is defined, it is the mode of OP0,
1835 otherwise OP0 is a BLKmode MEM. */
1836
1837 static rtx
1838 extract_integral_bit_field (rtx op0, opt_scalar_int_mode op0_mode,
1839 unsigned HOST_WIDE_INT bitsize,
1840 unsigned HOST_WIDE_INT bitnum, int unsignedp,
1841 rtx target, machine_mode mode, machine_mode tmode,
1842 bool reverse, bool fallback_p)
1843 {
1746 /* Handle fields bigger than a word. */ 1844 /* Handle fields bigger than a word. */
1747 1845
1748 if (bitsize > BITS_PER_WORD) 1846 if (bitsize > BITS_PER_WORD)
1749 { 1847 {
1750 /* Here we transfer the words of the field 1848 /* Here we transfer the words of the field
1760 if (target == 0 || !REG_P (target) || !valid_multiword_target_p (target)) 1858 if (target == 0 || !REG_P (target) || !valid_multiword_target_p (target))
1761 target = gen_reg_rtx (mode); 1859 target = gen_reg_rtx (mode);
1762 1860
1763 /* In case we're about to clobber a base register or something 1861 /* In case we're about to clobber a base register or something
1764 (see gcc.c-torture/execute/20040625-1.c). */ 1862 (see gcc.c-torture/execute/20040625-1.c). */
1765 if (reg_mentioned_p (target, str_rtx)) 1863 if (reg_mentioned_p (target, op0))
1766 target = gen_reg_rtx (mode); 1864 target = gen_reg_rtx (mode);
1767 1865
1768 /* Indicate for flow that the entire target reg is being set. */ 1866 /* Indicate for flow that the entire target reg is being set. */
1769 emit_clobber (target); 1867 emit_clobber (target);
1770 1868
1869 /* The mode must be fixed-size, since extract_bit_field_1 handles
1870 extractions from variable-sized objects before calling this
1871 function. */
1872 unsigned int target_size
1873 = GET_MODE_SIZE (GET_MODE (target)).to_constant ();
1771 last = get_last_insn (); 1874 last = get_last_insn ();
1772 for (i = 0; i < nwords; i++) 1875 for (i = 0; i < nwords; i++)
1773 { 1876 {
1774 /* If I is 0, use the low-order word in both field and target; 1877 /* If I is 0, use the low-order word in both field and target;
1775 if I is 1, use the next to lowest word; and so on. */ 1878 if I is 1, use the next to lowest word; and so on. */
1776 /* Word number in TARGET to use. */ 1879 /* Word number in TARGET to use. */
1777 unsigned int wordnum 1880 unsigned int wordnum
1778 = (backwards 1881 = (backwards ? target_size / UNITS_PER_WORD - i - 1 : i);
1779 ? GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD - i - 1
1780 : i);
1781 /* Offset from start of field in OP0. */ 1882 /* Offset from start of field in OP0. */
1782 unsigned int bit_offset = (backwards ^ reverse 1883 unsigned int bit_offset = (backwards ^ reverse
1783 ? MAX ((int) bitsize - ((int) i + 1) 1884 ? MAX ((int) bitsize - ((int) i + 1)
1784 * BITS_PER_WORD, 1885 * BITS_PER_WORD,
1785 0) 1886 0)
1804 1905
1805 if (unsignedp) 1906 if (unsignedp)
1806 { 1907 {
1807 /* Unless we've filled TARGET, the upper regs in a multi-reg value 1908 /* Unless we've filled TARGET, the upper regs in a multi-reg value
1808 need to be zero'd out. */ 1909 need to be zero'd out. */
1809 if (GET_MODE_SIZE (GET_MODE (target)) > nwords * UNITS_PER_WORD) 1910 if (target_size > nwords * UNITS_PER_WORD)
1810 { 1911 {
1811 unsigned int i, total_words; 1912 unsigned int i, total_words;
1812 1913
1813 total_words = GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD; 1914 total_words = target_size / UNITS_PER_WORD;
1814 for (i = nwords; i < total_words; i++) 1915 for (i = nwords; i < total_words; i++)
1815 emit_move_insn 1916 emit_move_insn
1816 (operand_subword (target, 1917 (operand_subword (target,
1817 backwards ? total_words - i - 1 : i, 1918 backwards ? total_words - i - 1 : i,
1818 1, VOIDmode), 1919 1, VOIDmode),
1946 we do so, and return TARGET. 2047 we do so, and return TARGET.
1947 Otherwise, we return a REG of mode TMODE or MODE, with TMODE preferred 2048 Otherwise, we return a REG of mode TMODE or MODE, with TMODE preferred
1948 if they are equally easy. */ 2049 if they are equally easy. */
1949 2050
1950 rtx 2051 rtx
1951 extract_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize, 2052 extract_bit_field (rtx str_rtx, poly_uint64 bitsize, poly_uint64 bitnum,
1952 unsigned HOST_WIDE_INT bitnum, int unsignedp, rtx target, 2053 int unsignedp, rtx target, machine_mode mode,
1953 machine_mode mode, machine_mode tmode, bool reverse, 2054 machine_mode tmode, bool reverse, rtx *alt_rtl)
1954 rtx *alt_rtl)
1955 { 2055 {
1956 machine_mode mode1; 2056 machine_mode mode1;
1957 2057
1958 /* Handle -fstrict-volatile-bitfields in the cases where it applies. */ 2058 /* Handle -fstrict-volatile-bitfields in the cases where it applies. */
1959 if (GET_MODE_BITSIZE (GET_MODE (str_rtx)) > 0) 2059 if (maybe_ne (GET_MODE_BITSIZE (GET_MODE (str_rtx)), 0))
1960 mode1 = GET_MODE (str_rtx); 2060 mode1 = GET_MODE (str_rtx);
1961 else if (target && GET_MODE_BITSIZE (GET_MODE (target)) > 0) 2061 else if (target && maybe_ne (GET_MODE_BITSIZE (GET_MODE (target)), 0))
1962 mode1 = GET_MODE (target); 2062 mode1 = GET_MODE (target);
1963 else 2063 else
1964 mode1 = tmode; 2064 mode1 = tmode;
1965 2065
2066 unsigned HOST_WIDE_INT ibitsize, ibitnum;
1966 scalar_int_mode int_mode; 2067 scalar_int_mode int_mode;
1967 if (is_a <scalar_int_mode> (mode1, &int_mode) 2068 if (bitsize.is_constant (&ibitsize)
1968 && strict_volatile_bitfield_p (str_rtx, bitsize, bitnum, int_mode, 0, 0)) 2069 && bitnum.is_constant (&ibitnum)
2070 && is_a <scalar_int_mode> (mode1, &int_mode)
2071 && strict_volatile_bitfield_p (str_rtx, ibitsize, ibitnum,
2072 int_mode, 0, 0))
1969 { 2073 {
1970 /* Extraction of a full INT_MODE value can be done with a simple load. 2074 /* Extraction of a full INT_MODE value can be done with a simple load.
1971 We know here that the field can be accessed with one single 2075 We know here that the field can be accessed with one single
1972 instruction. For targets that support unaligned memory, 2076 instruction. For targets that support unaligned memory,
1973 an unaligned access may be necessary. */ 2077 an unaligned access may be necessary. */
1974 if (bitsize == GET_MODE_BITSIZE (int_mode)) 2078 if (ibitsize == GET_MODE_BITSIZE (int_mode))
1975 { 2079 {
1976 rtx result = adjust_bitfield_address (str_rtx, int_mode, 2080 rtx result = adjust_bitfield_address (str_rtx, int_mode,
1977 bitnum / BITS_PER_UNIT); 2081 ibitnum / BITS_PER_UNIT);
1978 if (reverse) 2082 if (reverse)
1979 result = flip_storage_order (int_mode, result); 2083 result = flip_storage_order (int_mode, result);
1980 gcc_assert (bitnum % BITS_PER_UNIT == 0); 2084 gcc_assert (ibitnum % BITS_PER_UNIT == 0);
1981 return convert_extracted_bit_field (result, mode, tmode, unsignedp); 2085 return convert_extracted_bit_field (result, mode, tmode, unsignedp);
1982 } 2086 }
1983 2087
1984 str_rtx = narrow_bit_field_mem (str_rtx, int_mode, bitsize, bitnum, 2088 str_rtx = narrow_bit_field_mem (str_rtx, int_mode, ibitsize, ibitnum,
1985 &bitnum); 2089 &ibitnum);
1986 gcc_assert (bitnum + bitsize <= GET_MODE_BITSIZE (int_mode)); 2090 gcc_assert (ibitnum + ibitsize <= GET_MODE_BITSIZE (int_mode));
1987 str_rtx = copy_to_reg (str_rtx); 2091 str_rtx = copy_to_reg (str_rtx);
2092 return extract_bit_field_1 (str_rtx, ibitsize, ibitnum, unsignedp,
2093 target, mode, tmode, reverse, true, alt_rtl);
1988 } 2094 }
1989 2095
1990 return extract_bit_field_1 (str_rtx, bitsize, bitnum, unsignedp, 2096 return extract_bit_field_1 (str_rtx, bitsize, bitnum, unsignedp,
1991 target, mode, tmode, reverse, true, alt_rtl); 2097 target, mode, tmode, reverse, true, alt_rtl);
1992 } 2098 }
2250 if (CONSTANT_P (src)) 2356 if (CONSTANT_P (src))
2251 { 2357 {
2252 /* simplify_gen_subreg can't be used here, as if simplify_subreg 2358 /* simplify_gen_subreg can't be used here, as if simplify_subreg
2253 fails, it will happily create (subreg (symbol_ref)) or similar 2359 fails, it will happily create (subreg (symbol_ref)) or similar
2254 invalid SUBREGs. */ 2360 invalid SUBREGs. */
2255 unsigned int byte = subreg_lowpart_offset (mode, src_mode); 2361 poly_uint64 byte = subreg_lowpart_offset (mode, src_mode);
2256 rtx ret = simplify_subreg (mode, src, src_mode, byte); 2362 rtx ret = simplify_subreg (mode, src, src_mode, byte);
2257 if (ret) 2363 if (ret)
2258 return ret; 2364 return ret;
2259 2365
2260 if (GET_MODE (src) == VOIDmode 2366 if (GET_MODE (src) == VOIDmode
2266 } 2372 }
2267 2373
2268 if (GET_MODE_CLASS (mode) == MODE_CC || GET_MODE_CLASS (src_mode) == MODE_CC) 2374 if (GET_MODE_CLASS (mode) == MODE_CC || GET_MODE_CLASS (src_mode) == MODE_CC)
2269 return NULL_RTX; 2375 return NULL_RTX;
2270 2376
2271 if (GET_MODE_BITSIZE (mode) == GET_MODE_BITSIZE (src_mode) 2377 if (known_eq (GET_MODE_BITSIZE (mode), GET_MODE_BITSIZE (src_mode))
2272 && targetm.modes_tieable_p (mode, src_mode)) 2378 && targetm.modes_tieable_p (mode, src_mode))
2273 { 2379 {
2274 rtx x = gen_lowpart_common (mode, src); 2380 rtx x = gen_lowpart_common (mode, src);
2275 if (x) 2381 if (x)
2276 return x; 2382 return x;
2284 return NULL_RTX; 2390 return NULL_RTX;
2285 if (!targetm.modes_tieable_p (int_mode, mode)) 2391 if (!targetm.modes_tieable_p (int_mode, mode))
2286 return NULL_RTX; 2392 return NULL_RTX;
2287 2393
2288 src = gen_lowpart (src_int_mode, src); 2394 src = gen_lowpart (src_int_mode, src);
2395 if (!validate_subreg (int_mode, src_int_mode, src,
2396 subreg_lowpart_offset (int_mode, src_int_mode)))
2397 return NULL_RTX;
2398
2289 src = convert_modes (int_mode, src_int_mode, src, true); 2399 src = convert_modes (int_mode, src_int_mode, src, true);
2290 src = gen_lowpart (mode, src); 2400 src = gen_lowpart (mode, src);
2291 return src; 2401 return src;
2292 } 2402 }
2293 2403
2362 if (SHIFT_COUNT_TRUNCATED) 2472 if (SHIFT_COUNT_TRUNCATED)
2363 { 2473 {
2364 if (CONST_INT_P (op1) 2474 if (CONST_INT_P (op1)
2365 && ((unsigned HOST_WIDE_INT) INTVAL (op1) >= 2475 && ((unsigned HOST_WIDE_INT) INTVAL (op1) >=
2366 (unsigned HOST_WIDE_INT) GET_MODE_BITSIZE (scalar_mode))) 2476 (unsigned HOST_WIDE_INT) GET_MODE_BITSIZE (scalar_mode)))
2367 op1 = GEN_INT ((unsigned HOST_WIDE_INT) INTVAL (op1) 2477 op1 = gen_int_shift_amount (mode,
2368 % GET_MODE_BITSIZE (scalar_mode)); 2478 (unsigned HOST_WIDE_INT) INTVAL (op1)
2479 % GET_MODE_BITSIZE (scalar_mode));
2369 else if (GET_CODE (op1) == SUBREG 2480 else if (GET_CODE (op1) == SUBREG
2370 && subreg_lowpart_p (op1) 2481 && subreg_lowpart_p (op1)
2371 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op1))) 2482 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op1)))
2372 && SCALAR_INT_MODE_P (GET_MODE (op1))) 2483 && SCALAR_INT_MODE_P (GET_MODE (op1)))
2373 op1 = SUBREG_REG (op1); 2484 op1 = SUBREG_REG (op1);
2380 if (rotate 2491 if (rotate
2381 && CONST_INT_P (op1) 2492 && CONST_INT_P (op1)
2382 && IN_RANGE (INTVAL (op1), GET_MODE_BITSIZE (scalar_mode) / 2 + left, 2493 && IN_RANGE (INTVAL (op1), GET_MODE_BITSIZE (scalar_mode) / 2 + left,
2383 GET_MODE_BITSIZE (scalar_mode) - 1)) 2494 GET_MODE_BITSIZE (scalar_mode) - 1))
2384 { 2495 {
2385 op1 = GEN_INT (GET_MODE_BITSIZE (scalar_mode) - INTVAL (op1)); 2496 op1 = gen_int_shift_amount (mode, (GET_MODE_BITSIZE (scalar_mode)
2497 - INTVAL (op1)));
2386 left = !left; 2498 left = !left;
2387 code = left ? LROTATE_EXPR : RROTATE_EXPR; 2499 code = left ? LROTATE_EXPR : RROTATE_EXPR;
2388 } 2500 }
2389 2501
2390 /* Rotation of 16bit values by 8 bits is effectively equivalent to a bswaphi. 2502 /* Rotation of 16bit values by 8 bits is effectively equivalent to a bswaphi.
2393 0x04030201 (bswapsi). */ 2505 0x04030201 (bswapsi). */
2394 if (rotate 2506 if (rotate
2395 && CONST_INT_P (op1) 2507 && CONST_INT_P (op1)
2396 && INTVAL (op1) == BITS_PER_UNIT 2508 && INTVAL (op1) == BITS_PER_UNIT
2397 && GET_MODE_SIZE (scalar_mode) == 2 2509 && GET_MODE_SIZE (scalar_mode) == 2
2398 && optab_handler (bswap_optab, HImode) != CODE_FOR_nothing) 2510 && optab_handler (bswap_optab, mode) != CODE_FOR_nothing)
2399 return expand_unop (HImode, bswap_optab, shifted, NULL_RTX, 2511 return expand_unop (mode, bswap_optab, shifted, NULL_RTX, unsignedp);
2400 unsignedp);
2401 2512
2402 if (op1 == const0_rtx) 2513 if (op1 == const0_rtx)
2403 return shifted; 2514 return shifted;
2404 2515
2405 /* Check whether its cheaper to implement a left shift by a constant 2516 /* Check whether its cheaper to implement a left shift by a constant
2460 2571
2461 new_amount = op1; 2572 new_amount = op1;
2462 if (op1 == const0_rtx) 2573 if (op1 == const0_rtx)
2463 return shifted; 2574 return shifted;
2464 else if (CONST_INT_P (op1)) 2575 else if (CONST_INT_P (op1))
2465 other_amount = GEN_INT (GET_MODE_BITSIZE (scalar_mode) 2576 other_amount = gen_int_shift_amount
2466 - INTVAL (op1)); 2577 (mode, GET_MODE_BITSIZE (scalar_mode) - INTVAL (op1));
2467 else 2578 else
2468 { 2579 {
2469 other_amount 2580 other_amount
2470 = simplify_gen_unary (NEG, GET_MODE (op1), 2581 = simplify_gen_unary (NEG, GET_MODE (op1),
2471 op1, GET_MODE (op1)); 2582 op1, GET_MODE (op1));
2532 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic. 2643 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
2533 Return the rtx for where the value is. */ 2644 Return the rtx for where the value is. */
2534 2645
2535 rtx 2646 rtx
2536 expand_shift (enum tree_code code, machine_mode mode, rtx shifted, 2647 expand_shift (enum tree_code code, machine_mode mode, rtx shifted,
2537 int amount, rtx target, int unsignedp) 2648 poly_int64 amount, rtx target, int unsignedp)
2538 { 2649 {
2539 return expand_shift_1 (code, mode, 2650 return expand_shift_1 (code, mode, shifted,
2540 shifted, GEN_INT (amount), target, unsignedp); 2651 gen_int_shift_amount (mode, amount),
2652 target, unsignedp);
2541 } 2653 }
2542 2654
2543 /* Likewise, but return 0 if that cannot be done. */ 2655 /* Likewise, but return 0 if that cannot be done. */
2544 2656
2545 static rtx 2657 static rtx
3233 if (SCALAR_INT_MODE_P (mode)) 3345 if (SCALAR_INT_MODE_P (mode))
3234 { 3346 {
3235 /* Write a REG_EQUAL note on the last insn so that we can cse 3347 /* Write a REG_EQUAL note on the last insn so that we can cse
3236 multiplication sequences. Note that if ACCUM is a SUBREG, 3348 multiplication sequences. Note that if ACCUM is a SUBREG,
3237 we've set the inner register and must properly indicate that. */ 3349 we've set the inner register and must properly indicate that. */
3238 tem = op0, nmode = mode; 3350 tem = op0, nmode = mode;
3239 accum_inner = accum; 3351 accum_inner = accum;
3240 if (GET_CODE (accum) == SUBREG) 3352 if (GET_CODE (accum) == SUBREG)
3241 { 3353 {
3242 accum_inner = SUBREG_REG (accum); 3354 accum_inner = SUBREG_REG (accum);
3243 nmode = GET_MODE (accum_inner); 3355 nmode = GET_MODE (accum_inner);
3244 tem = gen_lowpart (nmode, op0); 3356 tem = gen_lowpart (nmode, op0);
3245 } 3357 }
3246 3358
3247 insn = get_last_insn (); 3359 insn = get_last_insn ();
3248 set_dst_reg_note (insn, REG_EQUAL, 3360 wide_int wval_so_far
3249 gen_rtx_MULT (nmode, tem, 3361 = wi::uhwi (val_so_far,
3250 gen_int_mode (val_so_far, nmode)), 3362 GET_MODE_PRECISION (as_a <scalar_mode> (nmode)));
3363 rtx c = immed_wide_int_const (wval_so_far, nmode);
3364 set_dst_reg_note (insn, REG_EQUAL, gen_rtx_MULT (nmode, tem, c),
3251 accum_inner); 3365 accum_inner);
3252 } 3366 }
3253 } 3367 }
3254 3368
3255 if (variant == negate_variant) 3369 if (variant == negate_variant)
3281 If you want this check for OP0 as well, then before calling 3395 If you want this check for OP0 as well, then before calling
3282 you should swap the two operands if OP0 would be constant. */ 3396 you should swap the two operands if OP0 would be constant. */
3283 3397
3284 rtx 3398 rtx
3285 expand_mult (machine_mode mode, rtx op0, rtx op1, rtx target, 3399 expand_mult (machine_mode mode, rtx op0, rtx op1, rtx target,
3286 int unsignedp) 3400 int unsignedp, bool no_libcall)
3287 { 3401 {
3288 enum mult_variant variant; 3402 enum mult_variant variant;
3289 struct algorithm algorithm; 3403 struct algorithm algorithm;
3290 rtx scalar_op1; 3404 rtx scalar_op1;
3291 int max_cost; 3405 int max_cost;
3417 if (CONST_DOUBLE_AS_FLOAT_P (scalar_op1) 3531 if (CONST_DOUBLE_AS_FLOAT_P (scalar_op1)
3418 && real_equal (CONST_DOUBLE_REAL_VALUE (scalar_op1), &dconst2)) 3532 && real_equal (CONST_DOUBLE_REAL_VALUE (scalar_op1), &dconst2))
3419 { 3533 {
3420 op0 = force_reg (GET_MODE (op0), op0); 3534 op0 = force_reg (GET_MODE (op0), op0);
3421 return expand_binop (mode, add_optab, op0, op0, 3535 return expand_binop (mode, add_optab, op0, op0,
3422 target, unsignedp, OPTAB_LIB_WIDEN); 3536 target, unsignedp,
3537 no_libcall ? OPTAB_WIDEN : OPTAB_LIB_WIDEN);
3423 } 3538 }
3424 3539
3425 /* This used to use umul_optab if unsigned, but for non-widening multiply 3540 /* This used to use umul_optab if unsigned, but for non-widening multiply
3426 there is no difference between signed and unsigned. */ 3541 there is no difference between signed and unsigned. */
3427 op0 = expand_binop (mode, do_trapv ? smulv_optab : smul_optab, 3542 op0 = expand_binop (mode, do_trapv ? smulv_optab : smul_optab,
3428 op0, op1, target, unsignedp, OPTAB_LIB_WIDEN); 3543 op0, op1, target, unsignedp,
3429 gcc_assert (op0); 3544 no_libcall ? OPTAB_WIDEN : OPTAB_LIB_WIDEN);
3545 gcc_assert (op0 || no_libcall);
3430 return op0; 3546 return op0;
3431 } 3547 }
3432 3548
3433 /* Return a cost estimate for multiplying a register by the given 3549 /* Return a cost estimate for multiplying a register by the given
3434 COEFFicient in the given MODE and SPEED. */ 3550 COEFFicient in the given MODE and SPEED. */
3566 *lgup_ptr = lgup; 3682 *lgup_ptr = lgup;
3567 if (n < HOST_BITS_PER_WIDE_INT) 3683 if (n < HOST_BITS_PER_WIDE_INT)
3568 { 3684 {
3569 unsigned HOST_WIDE_INT mask = (HOST_WIDE_INT_1U << n) - 1; 3685 unsigned HOST_WIDE_INT mask = (HOST_WIDE_INT_1U << n) - 1;
3570 *multiplier_ptr = mhigh.to_uhwi () & mask; 3686 *multiplier_ptr = mhigh.to_uhwi () & mask;
3571 return mhigh.to_uhwi () >= mask; 3687 return mhigh.to_uhwi () > mask;
3572 } 3688 }
3573 else 3689 else
3574 { 3690 {
3575 *multiplier_ptr = mhigh.to_uhwi (); 3691 *multiplier_ptr = mhigh.to_uhwi ();
3576 return wi::extract_uhwi (mhigh, HOST_BITS_PER_WIDE_INT, 1); 3692 return wi::extract_uhwi (mhigh, HOST_BITS_PER_WIDE_INT, 1);
3697 tem, unsignedp); 3813 tem, unsignedp);
3698 } 3814 }
3699 3815
3700 /* Try widening multiplication. */ 3816 /* Try widening multiplication. */
3701 moptab = unsignedp ? umul_widen_optab : smul_widen_optab; 3817 moptab = unsignedp ? umul_widen_optab : smul_widen_optab;
3702 if (widening_optab_handler (moptab, wider_mode, mode) != CODE_FOR_nothing 3818 if (convert_optab_handler (moptab, wider_mode, mode) != CODE_FOR_nothing
3703 && mul_widen_cost (speed, wider_mode) < max_cost) 3819 && mul_widen_cost (speed, wider_mode) < max_cost)
3704 { 3820 {
3705 tem = expand_binop (wider_mode, moptab, op0, narrow_op1, 0, 3821 tem = expand_binop (wider_mode, moptab, op0, narrow_op1, 0,
3706 unsignedp, OPTAB_WIDEN); 3822 unsignedp, OPTAB_WIDEN);
3707 if (tem) 3823 if (tem)
3736 } 3852 }
3737 } 3853 }
3738 3854
3739 /* Try widening multiplication of opposite signedness, and adjust. */ 3855 /* Try widening multiplication of opposite signedness, and adjust. */
3740 moptab = unsignedp ? smul_widen_optab : umul_widen_optab; 3856 moptab = unsignedp ? smul_widen_optab : umul_widen_optab;
3741 if (widening_optab_handler (moptab, wider_mode, mode) != CODE_FOR_nothing 3857 if (convert_optab_handler (moptab, wider_mode, mode) != CODE_FOR_nothing
3742 && size - 1 < BITS_PER_WORD 3858 && size - 1 < BITS_PER_WORD
3743 && (mul_widen_cost (speed, wider_mode) 3859 && (mul_widen_cost (speed, wider_mode)
3744 + 2 * shift_cost (speed, mode, size-1) 3860 + 2 * shift_cost (speed, mode, size-1)
3745 + 4 * add_cost (speed, mode) < max_cost)) 3861 + 4 * add_cost (speed, mode) < max_cost))
3746 { 3862 {
3851 mode, 0, -1); 3967 mode, 0, -1);
3852 if (signmask) 3968 if (signmask)
3853 { 3969 {
3854 HOST_WIDE_INT masklow = (HOST_WIDE_INT_1 << logd) - 1; 3970 HOST_WIDE_INT masklow = (HOST_WIDE_INT_1 << logd) - 1;
3855 signmask = force_reg (mode, signmask); 3971 signmask = force_reg (mode, signmask);
3856 shift = GEN_INT (GET_MODE_BITSIZE (mode) - logd); 3972 shift = gen_int_shift_amount (mode, GET_MODE_BITSIZE (mode) - logd);
3857 3973
3858 /* Use the rtx_cost of a LSHIFTRT instruction to determine 3974 /* Use the rtx_cost of a LSHIFTRT instruction to determine
3859 which instruction sequence to use. If logical right shifts 3975 which instruction sequence to use. If logical right shifts
3860 are expensive the use 2 XORs, 2 SUBs and an AND, otherwise 3976 are expensive the use 2 XORs, 2 SUBs and an AND, otherwise
3861 use a LSHIFTRT, 1 ADD, 1 SUB and an AND. */ 3977 use a LSHIFTRT, 1 ADD, 1 SUB and an AND. */
4368 int lgup, post_shift; 4484 int lgup, post_shift;
4369 rtx mlr; 4485 rtx mlr;
4370 HOST_WIDE_INT d = INTVAL (op1); 4486 HOST_WIDE_INT d = INTVAL (op1);
4371 unsigned HOST_WIDE_INT abs_d; 4487 unsigned HOST_WIDE_INT abs_d;
4372 4488
4489 /* Not prepared to handle division/remainder by
4490 0xffffffffffffffff8000000000000000 etc. */
4491 if (d == HOST_WIDE_INT_MIN && size > HOST_BITS_PER_WIDE_INT)
4492 break;
4493
4373 /* Since d might be INT_MIN, we have to cast to 4494 /* Since d might be INT_MIN, we have to cast to
4374 unsigned HOST_WIDE_INT before negating to avoid 4495 unsigned HOST_WIDE_INT before negating to avoid
4375 undefined signed overflow. */ 4496 undefined signed overflow. */
4376 abs_d = (d >= 0 4497 abs_d = (d >= 0
4377 ? (unsigned HOST_WIDE_INT) d 4498 ? (unsigned HOST_WIDE_INT) d
4410 int_mode) 4531 int_mode)
4411 != CODE_FOR_nothing) 4532 != CODE_FOR_nothing)
4412 || (optab_handler (sdivmod_optab, int_mode) 4533 || (optab_handler (sdivmod_optab, int_mode)
4413 != CODE_FOR_nothing))) 4534 != CODE_FOR_nothing)))
4414 ; 4535 ;
4415 else if (EXACT_POWER_OF_2_OR_ZERO_P (abs_d) 4536 else if (EXACT_POWER_OF_2_OR_ZERO_P (abs_d))
4416 && (size <= HOST_BITS_PER_WIDE_INT
4417 || abs_d != (unsigned HOST_WIDE_INT) d))
4418 { 4537 {
4419 if (rem_flag) 4538 if (rem_flag)
4420 { 4539 {
4421 remainder = expand_smod_pow2 (int_mode, op0, d); 4540 remainder = expand_smod_pow2 (int_mode, op0, d);
4422 if (remainder) 4541 if (remainder)
5174 5293
5175 return t; 5294 return t;
5176 5295
5177 case CONST_VECTOR: 5296 case CONST_VECTOR:
5178 { 5297 {
5179 int units = CONST_VECTOR_NUNITS (x); 5298 unsigned int npatterns = CONST_VECTOR_NPATTERNS (x);
5299 unsigned int nelts_per_pattern = CONST_VECTOR_NELTS_PER_PATTERN (x);
5180 tree itype = TREE_TYPE (type); 5300 tree itype = TREE_TYPE (type);
5181 int i;
5182 5301
5183 /* Build a tree with vector elements. */ 5302 /* Build a tree with vector elements. */
5184 auto_vec<tree, 32> elts (units); 5303 tree_vector_builder elts (type, npatterns, nelts_per_pattern);
5185 for (i = 0; i < units; ++i) 5304 unsigned int count = elts.encoded_nelts ();
5305 for (unsigned int i = 0; i < count; ++i)
5186 { 5306 {
5187 rtx elt = CONST_VECTOR_ELT (x, i); 5307 rtx elt = CONST_VECTOR_ELT (x, i);
5188 elts.quick_push (make_tree (itype, elt)); 5308 elts.quick_push (make_tree (itype, elt));
5189 } 5309 }
5190 5310
5191 return build_vector (type, elts); 5311 return elts.build ();
5192 } 5312 }
5193 5313
5194 case PLUS: 5314 case PLUS:
5195 return fold_build2 (PLUS_EXPR, type, make_tree (type, XEXP (x, 0)), 5315 return fold_build2 (PLUS_EXPR, type, make_tree (type, XEXP (x, 0)),
5196 make_tree (type, XEXP (x, 1))); 5316 make_tree (type, XEXP (x, 1)));
5251 if (t) 5371 if (t)
5252 return fold_convert (type, build_fold_addr_expr (t)); 5372 return fold_convert (type, build_fold_addr_expr (t));
5253 /* fall through. */ 5373 /* fall through. */
5254 5374
5255 default: 5375 default:
5376 if (CONST_POLY_INT_P (x))
5377 return wide_int_to_tree (t, const_poly_int_value (x));
5378
5256 t = build_decl (RTL_LOCATION (x), VAR_DECL, NULL_TREE, type); 5379 t = build_decl (RTL_LOCATION (x), VAR_DECL, NULL_TREE, type);
5257 5380
5258 /* If TYPE is a POINTER_TYPE, we might need to convert X from 5381 /* If TYPE is a POINTER_TYPE, we might need to convert X from
5259 address mode to pointer mode. */ 5382 address mode to pointer mode. */
5260 if (POINTER_TYPE_P (type)) 5383 if (POINTER_TYPE_P (type))
5339 testing a single bit. This mostly benefits the 68k. 5462 testing a single bit. This mostly benefits the 68k.
5340 5463
5341 If STORE_FLAG_VALUE does not have the sign bit set when 5464 If STORE_FLAG_VALUE does not have the sign bit set when
5342 interpreted in MODE, we can do this conversion as unsigned, which 5465 interpreted in MODE, we can do this conversion as unsigned, which
5343 is usually more efficient. */ 5466 is usually more efficient. */
5344 if (GET_MODE_SIZE (int_target_mode) > GET_MODE_SIZE (result_mode)) 5467 if (GET_MODE_PRECISION (int_target_mode) > GET_MODE_PRECISION (result_mode))
5345 { 5468 {
5346 convert_move (target, subtarget, 5469 gcc_assert (GET_MODE_PRECISION (result_mode) != 1
5347 val_signbit_known_clear_p (result_mode, 5470 || STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1);
5348 STORE_FLAG_VALUE)); 5471
5472 bool unsignedp = (STORE_FLAG_VALUE >= 0);
5473 convert_move (target, subtarget, unsignedp);
5474
5349 op0 = target; 5475 op0 = target;
5350 result_mode = int_target_mode; 5476 result_mode = int_target_mode;
5351 } 5477 }
5352 else 5478 else
5353 op0 = subtarget; 5479 op0 = subtarget;
5421 code = swap_condition (code); 5547 code = swap_condition (code);
5422 } 5548 }
5423 5549
5424 if (mode == VOIDmode) 5550 if (mode == VOIDmode)
5425 mode = GET_MODE (op0); 5551 mode = GET_MODE (op0);
5552
5553 if (CONST_SCALAR_INT_P (op1))
5554 canonicalize_comparison (mode, &code, &op1);
5426 5555
5427 /* For some comparisons with 1 and -1, we can convert this to 5556 /* For some comparisons with 1 and -1, we can convert this to
5428 comparisons with zero. This will often produce more opportunities for 5557 comparisons with zero. This will often produce more opportunities for
5429 store-flag insns. */ 5558 store-flag insns. */
5430 5559
5922 } 6051 }
5923 6052
5924 if (!HAVE_conditional_move) 6053 if (!HAVE_conditional_move)
5925 return 0; 6054 return 0;
5926 6055
6056 /* Do not turn a trapping comparison into a non-trapping one. */
6057 if ((code != EQ && code != NE && code != UNEQ && code != LTGT)
6058 && flag_trapping_math)
6059 return 0;
6060
5927 /* Try using a setcc instruction for ORDERED/UNORDERED, followed by a 6061 /* Try using a setcc instruction for ORDERED/UNORDERED, followed by a
5928 conditional move. */ 6062 conditional move. */
5929 tem = emit_store_flag_1 (subtarget, first_code, op0, op1, mode, 0, 6063 tem = emit_store_flag_1 (subtarget, first_code, op0, op1, mode, 0,
5930 normalizep, target_mode); 6064 normalizep, target_mode);
5931 if (tem == 0) 6065 if (tem == 0)
5965 6099
5966 /* First see if emit_store_flag can do the job. */ 6100 /* First see if emit_store_flag can do the job. */
5967 tem = emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep); 6101 tem = emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep);
5968 if (tem != 0) 6102 if (tem != 0)
5969 return tem; 6103 return tem;
6104
6105 /* If one operand is constant, make it the second one. Only do this
6106 if the other operand is not constant as well. */
6107 if (swap_commutative_operands_p (op0, op1))
6108 {
6109 std::swap (op0, op1);
6110 code = swap_condition (code);
6111 }
6112
6113 if (mode == VOIDmode)
6114 mode = GET_MODE (op0);
5970 6115
5971 if (!target) 6116 if (!target)
5972 target = gen_reg_rtx (word_mode); 6117 target = gen_reg_rtx (word_mode);
5973 6118
5974 /* If this failed, we have to do this with set/compare/jump/set code. 6119 /* If this failed, we have to do this with set/compare/jump/set code.
6026 emit_move_insn (target, falseval); 6171 emit_move_insn (target, falseval);
6027 emit_label (label); 6172 emit_label (label);
6028 6173
6029 return target; 6174 return target;
6030 } 6175 }
6176
6177 /* Helper function for canonicalize_cmp_for_target. Swap between inclusive
6178 and exclusive ranges in order to create an equivalent comparison. See
6179 canonicalize_cmp_for_target for the possible cases. */
6180
6181 static enum rtx_code
6182 equivalent_cmp_code (enum rtx_code code)
6183 {
6184 switch (code)
6185 {
6186 case GT:
6187 return GE;
6188 case GE:
6189 return GT;
6190 case LT:
6191 return LE;
6192 case LE:
6193 return LT;
6194 case GTU:
6195 return GEU;
6196 case GEU:
6197 return GTU;
6198 case LTU:
6199 return LEU;
6200 case LEU:
6201 return LTU;
6202
6203 default:
6204 return code;
6205 }
6206 }
6207
6208 /* Choose the more appropiate immediate in scalar integer comparisons. The
6209 purpose of this is to end up with an immediate which can be loaded into a
6210 register in fewer moves, if possible.
6211
6212 For each integer comparison there exists an equivalent choice:
6213 i) a > b or a >= b + 1
6214 ii) a <= b or a < b + 1
6215 iii) a >= b or a > b - 1
6216 iv) a < b or a <= b - 1
6217
6218 MODE is the mode of the first operand.
6219 CODE points to the comparison code.
6220 IMM points to the rtx containing the immediate. *IMM must satisfy
6221 CONST_SCALAR_INT_P on entry and continues to satisfy CONST_SCALAR_INT_P
6222 on exit. */
6223
6224 void
6225 canonicalize_comparison (machine_mode mode, enum rtx_code *code, rtx *imm)
6226 {
6227 if (!SCALAR_INT_MODE_P (mode))
6228 return;
6229
6230 int to_add = 0;
6231 enum signop sgn = unsigned_condition_p (*code) ? UNSIGNED : SIGNED;
6232
6233 /* Extract the immediate value from the rtx. */
6234 wide_int imm_val = rtx_mode_t (*imm, mode);
6235
6236 if (*code == GT || *code == GTU || *code == LE || *code == LEU)
6237 to_add = 1;
6238 else if (*code == GE || *code == GEU || *code == LT || *code == LTU)
6239 to_add = -1;
6240 else
6241 return;
6242
6243 /* Check for overflow/underflow in the case of signed values and
6244 wrapping around in the case of unsigned values. If any occur
6245 cancel the optimization. */
6246 wi::overflow_type overflow = wi::OVF_NONE;
6247 wide_int imm_modif;
6248
6249 if (to_add == 1)
6250 imm_modif = wi::add (imm_val, 1, sgn, &overflow);
6251 else
6252 imm_modif = wi::sub (imm_val, 1, sgn, &overflow);
6253
6254 if (overflow)
6255 return;
6256
6257 /* The following creates a pseudo; if we cannot do that, bail out. */
6258 if (!can_create_pseudo_p ())
6259 return;
6260
6261 rtx reg = gen_rtx_REG (mode, LAST_VIRTUAL_REGISTER + 1);
6262 rtx new_imm = immed_wide_int_const (imm_modif, mode);
6263
6264 rtx_insn *old_rtx = gen_move_insn (reg, *imm);
6265 rtx_insn *new_rtx = gen_move_insn (reg, new_imm);
6266
6267 /* Update the immediate and the code. */
6268 if (insn_cost (old_rtx, true) > insn_cost (new_rtx, true))
6269 {
6270 *code = equivalent_cmp_code (*code);
6271 *imm = new_imm;
6272 }
6273 }
6274
6275
6031 6276
6032 /* Perform possibly multi-word comparison and conditional jump to LABEL 6277 /* Perform possibly multi-word comparison and conditional jump to LABEL
6033 if ARG1 OP ARG2 true where ARG1 and ARG2 are of mode MODE. This is 6278 if ARG1 OP ARG2 true where ARG1 and ARG2 are of mode MODE. This is
6034 now a thin wrapper around do_compare_rtx_and_jump. */ 6279 now a thin wrapper around do_compare_rtx_and_jump. */
6035 6280