comparison gcc/config/ia64/ia64.c @ 55:77e2b8dfacca gcc-4.4.5

update it from 4.4.3 to 4.5.0
author ryoma <e075725@ie.u-ryukyu.ac.jp>
date Fri, 12 Feb 2010 23:39:51 +0900
parents 3bfb6c00c1e0
children b7f97abdc517
comparison
equal deleted inserted replaced
52:c156f1bd5cd9 55:77e2b8dfacca
1 /* Definitions of target machine for GNU compiler. 1 /* Definitions of target machine for GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008,
3 2009 Free Software Foundation, Inc. 3 2009
4 Free Software Foundation, Inc.
4 Contributed by James E. Wilson <wilson@cygnus.com> and 5 Contributed by James E. Wilson <wilson@cygnus.com> and
5 David Mosberger <davidm@hpl.hp.com>. 6 David Mosberger <davidm@hpl.hp.com>.
6 7
7 This file is part of GCC. 8 This file is part of GCC.
8 9
39 #include "optabs.h" 40 #include "optabs.h"
40 #include "except.h" 41 #include "except.h"
41 #include "function.h" 42 #include "function.h"
42 #include "ggc.h" 43 #include "ggc.h"
43 #include "basic-block.h" 44 #include "basic-block.h"
45 #include "libfuncs.h"
44 #include "toplev.h" 46 #include "toplev.h"
45 #include "sched-int.h" 47 #include "sched-int.h"
46 #include "timevar.h" 48 #include "timevar.h"
47 #include "target.h" 49 #include "target.h"
48 #include "target-def.h" 50 #include "target-def.h"
60 #include "sel-sched.h" 62 #include "sel-sched.h"
61 63
62 /* This is used for communication between ASM_OUTPUT_LABEL and 64 /* This is used for communication between ASM_OUTPUT_LABEL and
63 ASM_OUTPUT_LABELREF. */ 65 ASM_OUTPUT_LABELREF. */
64 int ia64_asm_output_label = 0; 66 int ia64_asm_output_label = 0;
65
66 /* Define the information needed to generate branch and scc insns. This is
67 stored from the compare operation. */
68 struct rtx_def * ia64_compare_op0;
69 struct rtx_def * ia64_compare_op1;
70 67
71 /* Register names for ia64_expand_prologue. */ 68 /* Register names for ia64_expand_prologue. */
72 static const char * const ia64_reg_numbers[96] = 69 static const char * const ia64_reg_numbers[96] =
73 { "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39", 70 { "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
74 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47", 71 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
201 static void do_restore (rtx (*)(rtx, rtx, rtx), rtx, HOST_WIDE_INT); 198 static void do_restore (rtx (*)(rtx, rtx, rtx), rtx, HOST_WIDE_INT);
202 static rtx gen_movdi_x (rtx, rtx, rtx); 199 static rtx gen_movdi_x (rtx, rtx, rtx);
203 static rtx gen_fr_spill_x (rtx, rtx, rtx); 200 static rtx gen_fr_spill_x (rtx, rtx, rtx);
204 static rtx gen_fr_restore_x (rtx, rtx, rtx); 201 static rtx gen_fr_restore_x (rtx, rtx, rtx);
205 202
203 static bool ia64_can_eliminate (const int, const int);
206 static enum machine_mode hfa_element_mode (const_tree, bool); 204 static enum machine_mode hfa_element_mode (const_tree, bool);
207 static void ia64_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode, 205 static void ia64_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
208 tree, int *, int); 206 tree, int *, int);
209 static int ia64_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode, 207 static int ia64_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
210 tree, bool); 208 tree, bool);
279 ATTRIBUTE_UNUSED; 277 ATTRIBUTE_UNUSED;
280 static void ia64_vms_init_libfuncs (void) 278 static void ia64_vms_init_libfuncs (void)
281 ATTRIBUTE_UNUSED; 279 ATTRIBUTE_UNUSED;
282 static void ia64_soft_fp_init_libfuncs (void) 280 static void ia64_soft_fp_init_libfuncs (void)
283 ATTRIBUTE_UNUSED; 281 ATTRIBUTE_UNUSED;
282 static bool ia64_vms_valid_pointer_mode (enum machine_mode mode)
283 ATTRIBUTE_UNUSED;
284 static tree ia64_vms_common_object_attribute (tree *, tree, tree, int, bool *)
285 ATTRIBUTE_UNUSED;
284 286
285 static tree ia64_handle_model_attribute (tree *, tree, tree, int, bool *); 287 static tree ia64_handle_model_attribute (tree *, tree, tree, int, bool *);
286 static tree ia64_handle_version_id_attribute (tree *, tree, tree, int, bool *); 288 static tree ia64_handle_version_id_attribute (tree *, tree, tree, int, bool *);
287 static void ia64_encode_section_info (tree, rtx, int); 289 static void ia64_encode_section_info (tree, rtx, int);
288 static rtx ia64_struct_value_rtx (tree, int); 290 static rtx ia64_struct_value_rtx (tree, int);
293 static const char *ia64_mangle_type (const_tree); 295 static const char *ia64_mangle_type (const_tree);
294 static const char *ia64_invalid_conversion (const_tree, const_tree); 296 static const char *ia64_invalid_conversion (const_tree, const_tree);
295 static const char *ia64_invalid_unary_op (int, const_tree); 297 static const char *ia64_invalid_unary_op (int, const_tree);
296 static const char *ia64_invalid_binary_op (int, const_tree, const_tree); 298 static const char *ia64_invalid_binary_op (int, const_tree, const_tree);
297 static enum machine_mode ia64_c_mode_for_suffix (char); 299 static enum machine_mode ia64_c_mode_for_suffix (char);
300 static enum machine_mode ia64_promote_function_mode (const_tree,
301 enum machine_mode,
302 int *,
303 const_tree,
304 int);
305 static void ia64_trampoline_init (rtx, tree, rtx);
306 static void ia64_override_options_after_change (void);
298 307
299 /* Table of valid machine attributes. */ 308 /* Table of valid machine attributes. */
300 static const struct attribute_spec ia64_attribute_table[] = 309 static const struct attribute_spec ia64_attribute_table[] =
301 { 310 {
302 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */ 311 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
303 { "syscall_linkage", 0, 0, false, true, true, NULL }, 312 { "syscall_linkage", 0, 0, false, true, true, NULL },
304 { "model", 1, 1, true, false, false, ia64_handle_model_attribute }, 313 { "model", 1, 1, true, false, false, ia64_handle_model_attribute },
314 #if TARGET_ABI_OPEN_VMS
315 { "common_object", 1, 1, true, false, false, ia64_vms_common_object_attribute},
316 #endif
305 { "version_id", 1, 1, true, false, false, 317 { "version_id", 1, 1, true, false, false,
306 ia64_handle_version_id_attribute }, 318 ia64_handle_version_id_attribute },
307 { NULL, 0, 0, false, false, false, NULL } 319 { NULL, 0, 0, false, false, false, NULL }
308 }; 320 };
309 321
460 #ifdef HAVE_AS_TLS 472 #ifdef HAVE_AS_TLS
461 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL 473 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
462 #define TARGET_ASM_OUTPUT_DWARF_DTPREL ia64_output_dwarf_dtprel 474 #define TARGET_ASM_OUTPUT_DWARF_DTPREL ia64_output_dwarf_dtprel
463 #endif 475 #endif
464 476
465 /* ??? ABI doesn't allow us to define this. */ 477 #undef TARGET_PROMOTE_FUNCTION_MODE
466 #if 0 478 #define TARGET_PROMOTE_FUNCTION_MODE ia64_promote_function_mode
467 #undef TARGET_PROMOTE_FUNCTION_ARGS
468 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
469 #endif
470
471 /* ??? ABI doesn't allow us to define this. */
472 #if 0
473 #undef TARGET_PROMOTE_FUNCTION_RETURN
474 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
475 #endif
476 479
477 /* ??? Investigate. */ 480 /* ??? Investigate. */
478 #if 0 481 #if 0
479 #undef TARGET_PROMOTE_PROTOTYPES 482 #undef TARGET_PROMOTE_PROTOTYPES
480 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true 483 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
526 #define TARGET_INVALID_BINARY_OP ia64_invalid_binary_op 529 #define TARGET_INVALID_BINARY_OP ia64_invalid_binary_op
527 530
528 #undef TARGET_C_MODE_FOR_SUFFIX 531 #undef TARGET_C_MODE_FOR_SUFFIX
529 #define TARGET_C_MODE_FOR_SUFFIX ia64_c_mode_for_suffix 532 #define TARGET_C_MODE_FOR_SUFFIX ia64_c_mode_for_suffix
530 533
534 #undef TARGET_CAN_ELIMINATE
535 #define TARGET_CAN_ELIMINATE ia64_can_eliminate
536
537 #undef TARGET_TRAMPOLINE_INIT
538 #define TARGET_TRAMPOLINE_INIT ia64_trampoline_init
539
540 #undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
541 #define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE ia64_override_options_after_change
542
531 struct gcc_target targetm = TARGET_INITIALIZER; 543 struct gcc_target targetm = TARGET_INITIALIZER;
532 544
533 typedef enum 545 typedef enum
534 { 546 {
535 ADDR_AREA_NORMAL, /* normal address area */ 547 ADDR_AREA_NORMAL, /* normal address area */
584 { 596 {
585 addr_area = ADDR_AREA_SMALL; 597 addr_area = ADDR_AREA_SMALL;
586 } 598 }
587 else 599 else
588 { 600 {
589 warning (OPT_Wattributes, "invalid argument of %qs attribute", 601 warning (OPT_Wattributes, "invalid argument of %qE attribute",
590 IDENTIFIER_POINTER (name)); 602 name);
591 *no_add_attrs = true; 603 *no_add_attrs = true;
592 } 604 }
593 605
594 switch (TREE_CODE (decl)) 606 switch (TREE_CODE (decl))
595 { 607 {
596 case VAR_DECL: 608 case VAR_DECL:
597 if ((DECL_CONTEXT (decl) && TREE_CODE (DECL_CONTEXT (decl)) 609 if ((DECL_CONTEXT (decl) && TREE_CODE (DECL_CONTEXT (decl))
598 == FUNCTION_DECL) 610 == FUNCTION_DECL)
599 && !TREE_STATIC (decl)) 611 && !TREE_STATIC (decl))
600 { 612 {
601 error ("%Jan address area attribute cannot be specified for " 613 error_at (DECL_SOURCE_LOCATION (decl),
602 "local variables", decl); 614 "an address area attribute cannot be specified for "
615 "local variables");
603 *no_add_attrs = true; 616 *no_add_attrs = true;
604 } 617 }
605 area = ia64_get_addr_area (decl); 618 area = ia64_get_addr_area (decl);
606 if (area != ADDR_AREA_NORMAL && addr_area != area) 619 if (area != ADDR_AREA_NORMAL && addr_area != area)
607 { 620 {
610 *no_add_attrs = true; 623 *no_add_attrs = true;
611 } 624 }
612 break; 625 break;
613 626
614 case FUNCTION_DECL: 627 case FUNCTION_DECL:
615 error ("%Jaddress area attribute cannot be specified for functions", 628 error_at (DECL_SOURCE_LOCATION (decl),
616 decl); 629 "address area attribute cannot be specified for "
630 "functions");
617 *no_add_attrs = true; 631 *no_add_attrs = true;
618 break; 632 break;
619 633
620 default: 634 default:
621 warning (OPT_Wattributes, "%qs attribute ignored", 635 warning (OPT_Wattributes, "%qE attribute ignored",
622 IDENTIFIER_POINTER (name)); 636 name);
623 *no_add_attrs = true; 637 *no_add_attrs = true;
624 break; 638 break;
625 } 639 }
626 640
627 return NULL_TREE; 641 return NULL_TREE;
642 }
643
644 /* The section must have global and overlaid attributes. */
645 #define SECTION_VMS_OVERLAY SECTION_MACH_DEP
646
647 /* Part of the low level implementation of DEC Ada pragma Common_Object which
648 enables the shared use of variables stored in overlaid linker areas
649 corresponding to the use of Fortran COMMON. */
650
651 static tree
652 ia64_vms_common_object_attribute (tree *node, tree name, tree args,
653 int flags ATTRIBUTE_UNUSED,
654 bool *no_add_attrs)
655 {
656 tree decl = *node;
657 tree id, val;
658 if (! DECL_P (decl))
659 abort ();
660
661 DECL_COMMON (decl) = 1;
662 id = TREE_VALUE (args);
663 if (TREE_CODE (id) == IDENTIFIER_NODE)
664 val = build_string (IDENTIFIER_LENGTH (id), IDENTIFIER_POINTER (id));
665 else if (TREE_CODE (id) == STRING_CST)
666 val = id;
667 else
668 {
669 warning (OPT_Wattributes,
670 "%qE attribute requires a string constant argument", name);
671 *no_add_attrs = true;
672 return NULL_TREE;
673 }
674 DECL_SECTION_NAME (decl) = val;
675 return NULL_TREE;
676 }
677
678 /* Part of the low level implementation of DEC Ada pragma Common_Object. */
679
680 void
681 ia64_vms_output_aligned_decl_common (FILE *file, tree decl, const char *name,
682 unsigned HOST_WIDE_INT size,
683 unsigned int align)
684 {
685 tree attr = DECL_ATTRIBUTES (decl);
686
687 /* As common_object attribute set DECL_SECTION_NAME check it before
688 looking up the attribute. */
689 if (DECL_SECTION_NAME (decl) && attr)
690 attr = lookup_attribute ("common_object", attr);
691 else
692 attr = NULL_TREE;
693
694 if (!attr)
695 {
696 /* Code from elfos.h. */
697 fprintf (file, "%s", COMMON_ASM_OP);
698 assemble_name (file, name);
699 fprintf (file, ","HOST_WIDE_INT_PRINT_UNSIGNED",%u\n",
700 size, align / BITS_PER_UNIT);
701 }
702 else
703 {
704 ASM_OUTPUT_ALIGN (file, floor_log2 (align / BITS_PER_UNIT));
705 ASM_OUTPUT_LABEL (file, name);
706 ASM_OUTPUT_SKIP (file, size ? size : 1);
707 }
708 }
709
710 /* Definition of TARGET_ASM_NAMED_SECTION for VMS. */
711
712 void
713 ia64_vms_elf_asm_named_section (const char *name, unsigned int flags,
714 tree decl)
715 {
716 if (!(flags & SECTION_VMS_OVERLAY))
717 {
718 default_elf_asm_named_section (name, flags, decl);
719 return;
720 }
721 if (flags != (SECTION_VMS_OVERLAY | SECTION_WRITE))
722 abort ();
723
724 if (flags & SECTION_DECLARED)
725 {
726 fprintf (asm_out_file, "\t.section\t%s\n", name);
727 return;
728 }
729
730 fprintf (asm_out_file, "\t.section\t%s,\"awgO\"\n", name);
628 } 731 }
629 732
630 static void 733 static void
631 ia64_encode_addr_area (tree decl, rtx symbol) 734 ia64_encode_addr_area (tree decl, rtx symbol)
632 { 735 {
735 /* Return the TLS model to use for ADDR. */ 838 /* Return the TLS model to use for ADDR. */
736 839
737 static enum tls_model 840 static enum tls_model
738 tls_symbolic_operand_type (rtx addr) 841 tls_symbolic_operand_type (rtx addr)
739 { 842 {
740 enum tls_model tls_kind = 0; 843 enum tls_model tls_kind = TLS_MODEL_NONE;
741 844
742 if (GET_CODE (addr) == CONST) 845 if (GET_CODE (addr) == CONST)
743 { 846 {
744 if (GET_CODE (XEXP (addr, 0)) == PLUS 847 if (GET_CODE (XEXP (addr, 0)) == PLUS
745 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF) 848 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF)
762 case CONST_INT: 865 case CONST_INT:
763 case LABEL_REF: 866 case LABEL_REF:
764 return true; 867 return true;
765 868
766 case CONST_DOUBLE: 869 case CONST_DOUBLE:
767 if (GET_MODE (x) == VOIDmode) 870 if (GET_MODE (x) == VOIDmode || GET_MODE (x) == SFmode
871 || GET_MODE (x) == DFmode)
768 return true; 872 return true;
769 return satisfies_constraint_G (x); 873 return satisfies_constraint_G (x);
770 874
771 case CONST: 875 case CONST:
772 case SYMBOL_REF: 876 case SYMBOL_REF:
1309 #define MAYBE_ADD_REG_INC_NOTE(INSN, EXP) \ 1413 #define MAYBE_ADD_REG_INC_NOTE(INSN, EXP) \
1310 if (GET_CODE (EXP) == MEM \ 1414 if (GET_CODE (EXP) == MEM \
1311 && (GET_CODE (XEXP (EXP, 0)) == POST_MODIFY \ 1415 && (GET_CODE (XEXP (EXP, 0)) == POST_MODIFY \
1312 || GET_CODE (XEXP (EXP, 0)) == POST_INC \ 1416 || GET_CODE (XEXP (EXP, 0)) == POST_INC \
1313 || GET_CODE (XEXP (EXP, 0)) == POST_DEC)) \ 1417 || GET_CODE (XEXP (EXP, 0)) == POST_DEC)) \
1314 REG_NOTES (INSN) = gen_rtx_EXPR_LIST (REG_INC, \ 1418 add_reg_note (insn, REG_INC, XEXP (XEXP (EXP, 0), 0))
1315 XEXP (XEXP (EXP, 0), 0), \
1316 REG_NOTES (INSN))
1317 1419
1318 insn = emit_insn (gen_rtx_SET (VOIDmode, out[0], in[0])); 1420 insn = emit_insn (gen_rtx_SET (VOIDmode, out[0], in[0]));
1319 MAYBE_ADD_REG_INC_NOTE (insn, in[0]); 1421 MAYBE_ADD_REG_INC_NOTE (insn, in[0]);
1320 MAYBE_ADD_REG_INC_NOTE (insn, out[0]); 1422 MAYBE_ADD_REG_INC_NOTE (insn, out[0]);
1321 1423
1492 } 1594 }
1493 1595
1494 return false; 1596 return false;
1495 } 1597 }
1496 1598
1497 /* Emit comparison instruction if necessary, returning the expression 1599 /* Emit comparison instruction if necessary, replacing *EXPR, *OP0, *OP1
1498 that holds the compare result in the proper mode. */ 1600 with the expression that holds the compare result (in VOIDmode). */
1499 1601
1500 static GTY(()) rtx cmptf_libfunc; 1602 static GTY(()) rtx cmptf_libfunc;
1501 1603
1502 rtx 1604 void
1503 ia64_expand_compare (enum rtx_code code, enum machine_mode mode) 1605 ia64_expand_compare (rtx *expr, rtx *op0, rtx *op1)
1504 { 1606 {
1505 rtx op0 = ia64_compare_op0, op1 = ia64_compare_op1; 1607 enum rtx_code code = GET_CODE (*expr);
1506 rtx cmp; 1608 rtx cmp;
1507 1609
1508 /* If we have a BImode input, then we already have a compare result, and 1610 /* If we have a BImode input, then we already have a compare result, and
1509 do not need to emit another comparison. */ 1611 do not need to emit another comparison. */
1510 if (GET_MODE (op0) == BImode) 1612 if (GET_MODE (*op0) == BImode)
1511 { 1613 {
1512 gcc_assert ((code == NE || code == EQ) && op1 == const0_rtx); 1614 gcc_assert ((code == NE || code == EQ) && *op1 == const0_rtx);
1513 cmp = op0; 1615 cmp = *op0;
1514 } 1616 }
1515 /* HPUX TFmode compare requires a library call to _U_Qfcmp, which takes a 1617 /* HPUX TFmode compare requires a library call to _U_Qfcmp, which takes a
1516 magic number as its third argument, that indicates what to do. 1618 magic number as its third argument, that indicates what to do.
1517 The return value is an integer to be compared against zero. */ 1619 The return value is an integer to be compared against zero. */
1518 else if (TARGET_HPUX && GET_MODE (op0) == TFmode) 1620 else if (TARGET_HPUX && GET_MODE (*op0) == TFmode)
1519 { 1621 {
1520 enum qfcmp_magic { 1622 enum qfcmp_magic {
1521 QCMP_INV = 1, /* Raise FP_INVALID on SNaN as a side effect. */ 1623 QCMP_INV = 1, /* Raise FP_INVALID on SNaN as a side effect. */
1522 QCMP_UNORD = 2, 1624 QCMP_UNORD = 2,
1523 QCMP_EQ = 4, 1625 QCMP_EQ = 4,
1524 QCMP_LT = 8, 1626 QCMP_LT = 8,
1525 QCMP_GT = 16 1627 QCMP_GT = 16
1526 } magic; 1628 };
1629 int magic;
1527 enum rtx_code ncode; 1630 enum rtx_code ncode;
1528 rtx ret, insns; 1631 rtx ret, insns;
1529 1632
1530 gcc_assert (cmptf_libfunc && GET_MODE (op1) == TFmode); 1633 gcc_assert (cmptf_libfunc && GET_MODE (*op1) == TFmode);
1531 switch (code) 1634 switch (code)
1532 { 1635 {
1533 /* 1 = equal, 0 = not equal. Equality operators do 1636 /* 1 = equal, 0 = not equal. Equality operators do
1534 not raise FP_INVALID when given an SNaN operand. */ 1637 not raise FP_INVALID when given an SNaN operand. */
1535 case EQ: magic = QCMP_EQ; ncode = NE; break; 1638 case EQ: magic = QCMP_EQ; ncode = NE; break;
1550 } 1653 }
1551 1654
1552 start_sequence (); 1655 start_sequence ();
1553 1656
1554 ret = emit_library_call_value (cmptf_libfunc, 0, LCT_CONST, DImode, 3, 1657 ret = emit_library_call_value (cmptf_libfunc, 0, LCT_CONST, DImode, 3,
1555 op0, TFmode, op1, TFmode, 1658 *op0, TFmode, *op1, TFmode,
1556 GEN_INT (magic), DImode); 1659 GEN_INT (magic), DImode);
1557 cmp = gen_reg_rtx (BImode); 1660 cmp = gen_reg_rtx (BImode);
1558 emit_insn (gen_rtx_SET (VOIDmode, cmp, 1661 emit_insn (gen_rtx_SET (VOIDmode, cmp,
1559 gen_rtx_fmt_ee (ncode, BImode, 1662 gen_rtx_fmt_ee (ncode, BImode,
1560 ret, const0_rtx))); 1663 ret, const0_rtx)));
1561 1664
1562 insns = get_insns (); 1665 insns = get_insns ();
1563 end_sequence (); 1666 end_sequence ();
1564 1667
1565 emit_libcall_block (insns, cmp, cmp, 1668 emit_libcall_block (insns, cmp, cmp,
1566 gen_rtx_fmt_ee (code, BImode, op0, op1)); 1669 gen_rtx_fmt_ee (code, BImode, *op0, *op1));
1567 code = NE; 1670 code = NE;
1568 } 1671 }
1569 else 1672 else
1570 { 1673 {
1571 cmp = gen_reg_rtx (BImode); 1674 cmp = gen_reg_rtx (BImode);
1572 emit_insn (gen_rtx_SET (VOIDmode, cmp, 1675 emit_insn (gen_rtx_SET (VOIDmode, cmp,
1573 gen_rtx_fmt_ee (code, BImode, op0, op1))); 1676 gen_rtx_fmt_ee (code, BImode, *op0, *op1)));
1574 code = NE; 1677 code = NE;
1575 } 1678 }
1576 1679
1577 return gen_rtx_fmt_ee (code, mode, cmp, const0_rtx); 1680 *expr = gen_rtx_fmt_ee (code, VOIDmode, cmp, const0_rtx);
1681 *op0 = cmp;
1682 *op1 = const0_rtx;
1578 } 1683 }
1579 1684
1580 /* Generate an integral vector comparison. Return true if the condition has 1685 /* Generate an integral vector comparison. Return true if the condition has
1581 been reversed, and so the sense of the comparison should be inverted. */ 1686 been reversed, and so the sense of the comparison should be inverted. */
1582 1687
1626 { 1731 {
1627 case V2SImode: 1732 case V2SImode:
1628 { 1733 {
1629 rtx t1, t2, mask; 1734 rtx t1, t2, mask;
1630 1735
1631 /* Subtract (-(INT MAX) - 1) from both operands to make 1736 /* Perform a parallel modulo subtraction. */
1632 them signed. */ 1737 t1 = gen_reg_rtx (V2SImode);
1633 mask = GEN_INT (0x80000000); 1738 emit_insn (gen_subv2si3 (t1, op0, op1));
1739
1740 /* Extract the original sign bit of op0. */
1741 mask = GEN_INT (-0x80000000);
1634 mask = gen_rtx_CONST_VECTOR (V2SImode, gen_rtvec (2, mask, mask)); 1742 mask = gen_rtx_CONST_VECTOR (V2SImode, gen_rtvec (2, mask, mask));
1635 mask = force_reg (mode, mask); 1743 mask = force_reg (V2SImode, mask);
1636 t1 = gen_reg_rtx (mode); 1744 t2 = gen_reg_rtx (V2SImode);
1637 emit_insn (gen_subv2si3 (t1, op0, mask)); 1745 emit_insn (gen_andv2si3 (t2, op0, mask));
1638 t2 = gen_reg_rtx (mode); 1746
1639 emit_insn (gen_subv2si3 (t2, op1, mask)); 1747 /* XOR it back into the result of the subtraction. This results
1640 op0 = t1; 1748 in the sign bit set iff we saw unsigned underflow. */
1641 op1 = t2; 1749 x = gen_reg_rtx (V2SImode);
1750 emit_insn (gen_xorv2si3 (x, t1, t2));
1751
1642 code = GT; 1752 code = GT;
1753 op0 = x;
1754 op1 = CONST0_RTX (mode);
1643 } 1755 }
1644 break; 1756 break;
1645 1757
1646 case V8QImode: 1758 case V8QImode:
1647 case V4HImode: 1759 case V4HImode:
1916 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx); 2028 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
1917 } 2029 }
1918 2030
1919 if (sibcall_p) 2031 if (sibcall_p)
1920 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), b0); 2032 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), b0);
2033
2034 if (TARGET_ABI_OPEN_VMS)
2035 use_reg (&CALL_INSN_FUNCTION_USAGE (insn),
2036 gen_rtx_REG (DImode, GR_REG (25)));
1921 } 2037 }
1922 2038
1923 static void 2039 static void
1924 reg_emitted (enum ia64_frame_regs r) 2040 reg_emitted (enum ia64_frame_regs r)
1925 { 2041 {
1937 } 2053 }
1938 2054
1939 static bool 2055 static bool
1940 is_emitted (int regno) 2056 is_emitted (int regno)
1941 { 2057 {
1942 enum ia64_frame_regs r; 2058 unsigned int r;
1943 2059
1944 for (r = reg_fp; r < number_of_ia64_frame_regs; r++) 2060 for (r = reg_fp; r < number_of_ia64_frame_regs; r++)
1945 if (emitted_frame_related_regs[r] == regno) 2061 if (emitted_frame_related_regs[r] == regno)
1946 return true; 2062 return true;
1947 return false; 2063 return false;
2628 COPY_HARD_REG_SET (current_frame_info.mask, mask); 2744 COPY_HARD_REG_SET (current_frame_info.mask, mask);
2629 current_frame_info.n_spilled = n_spilled; 2745 current_frame_info.n_spilled = n_spilled;
2630 current_frame_info.initialized = reload_completed; 2746 current_frame_info.initialized = reload_completed;
2631 } 2747 }
2632 2748
2749 /* Worker function for TARGET_CAN_ELIMINATE. */
2750
2751 bool
2752 ia64_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
2753 {
2754 return (to == BR_REG (0) ? current_function_is_leaf : true);
2755 }
2756
2633 /* Compute the initial difference between the specified pair of registers. */ 2757 /* Compute the initial difference between the specified pair of registers. */
2634 2758
2635 HOST_WIDE_INT 2759 HOST_WIDE_INT
2636 ia64_initial_elimination_offset (int from, int to) 2760 ia64_initial_elimination_offset (int from, int to)
2637 { 2761 {
2761 *spill_fill_data.prev_addr[iter] 2885 *spill_fill_data.prev_addr[iter]
2762 = gen_rtx_POST_MODIFY (DImode, spill_fill_data.iter_reg[iter], 2886 = gen_rtx_POST_MODIFY (DImode, spill_fill_data.iter_reg[iter],
2763 gen_rtx_PLUS (DImode, 2887 gen_rtx_PLUS (DImode,
2764 spill_fill_data.iter_reg[iter], 2888 spill_fill_data.iter_reg[iter],
2765 disp_rtx)); 2889 disp_rtx));
2766 REG_NOTES (spill_fill_data.prev_insn[iter]) 2890 add_reg_note (spill_fill_data.prev_insn[iter],
2767 = gen_rtx_EXPR_LIST (REG_INC, spill_fill_data.iter_reg[iter], 2891 REG_INC, spill_fill_data.iter_reg[iter]);
2768 REG_NOTES (spill_fill_data.prev_insn[iter]));
2769 } 2892 }
2770 else 2893 else
2771 { 2894 {
2772 /* ??? Could use register post_modify for loads. */ 2895 /* ??? Could use register post_modify for loads. */
2773 if (!satisfies_constraint_I (disp_rtx)) 2896 if (!satisfies_constraint_I (disp_rtx))
2880 { 3003 {
2881 base = stack_pointer_rtx; 3004 base = stack_pointer_rtx;
2882 off = current_frame_info.total_size - cfa_off; 3005 off = current_frame_info.total_size - cfa_off;
2883 } 3006 }
2884 3007
2885 REG_NOTES (insn) 3008 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
2886 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, 3009 gen_rtx_SET (VOIDmode,
2887 gen_rtx_SET (VOIDmode, 3010 gen_rtx_MEM (GET_MODE (reg),
2888 gen_rtx_MEM (GET_MODE (reg), 3011 plus_constant (base, off)),
2889 plus_constant (base, off)), 3012 frame_reg));
2890 frame_reg),
2891 REG_NOTES (insn));
2892 } 3013 }
2893 } 3014 }
2894 3015
2895 static void 3016 static void
2896 do_restore (rtx (*move_fn) (rtx, rtx, rtx), rtx reg, HOST_WIDE_INT cfa_off) 3017 do_restore (rtx (*move_fn) (rtx, rtx, rtx), rtx reg, HOST_WIDE_INT cfa_off)
3086 3207
3087 if (! frame_pointer_needed) 3208 if (! frame_pointer_needed)
3088 { 3209 {
3089 RTX_FRAME_RELATED_P (insn) = 1; 3210 RTX_FRAME_RELATED_P (insn) = 1;
3090 if (GET_CODE (offset) != CONST_INT) 3211 if (GET_CODE (offset) != CONST_INT)
3091 { 3212 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3092 REG_NOTES (insn) 3213 gen_rtx_SET (VOIDmode,
3093 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, 3214 stack_pointer_rtx,
3094 gen_rtx_SET (VOIDmode, 3215 gen_rtx_PLUS (DImode,
3095 stack_pointer_rtx, 3216 stack_pointer_rtx,
3096 gen_rtx_PLUS (DImode, 3217 frame_size_rtx)));
3097 stack_pointer_rtx,
3098 frame_size_rtx)),
3099 REG_NOTES (insn));
3100 }
3101 } 3218 }
3102 3219
3103 /* ??? At this point we must generate a magic insn that appears to 3220 /* ??? At this point we must generate a magic insn that appears to
3104 modify the stack pointer, the frame pointer, and all spill 3221 modify the stack pointer, the frame pointer, and all spill
3105 iterators. This would allow the most scheduling freedom. For 3222 iterators. This would allow the most scheduling freedom. For
3162 insn = emit_move_insn (alt_reg, reg); 3279 insn = emit_move_insn (alt_reg, reg);
3163 3280
3164 /* ??? Denote pr spill/fill by a DImode move that modifies all 3281 /* ??? Denote pr spill/fill by a DImode move that modifies all
3165 64 hard registers. */ 3282 64 hard registers. */
3166 RTX_FRAME_RELATED_P (insn) = 1; 3283 RTX_FRAME_RELATED_P (insn) = 1;
3167 REG_NOTES (insn) 3284 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3168 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, 3285 gen_rtx_SET (VOIDmode, alt_reg, reg));
3169 gen_rtx_SET (VOIDmode, alt_reg, reg),
3170 REG_NOTES (insn));
3171 3286
3172 /* Even if we're not going to generate an epilogue, we still 3287 /* Even if we're not going to generate an epilogue, we still
3173 need to save the register so that EH works. */ 3288 need to save the register so that EH works. */
3174 if (! epilogue_p) 3289 if (! epilogue_p)
3175 emit_insn (gen_prologue_use (alt_reg)); 3290 emit_insn (gen_prologue_use (alt_reg));
3524 insn = emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx, 3639 insn = emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
3525 offset)); 3640 offset));
3526 3641
3527 RTX_FRAME_RELATED_P (insn) = 1; 3642 RTX_FRAME_RELATED_P (insn) = 1;
3528 if (GET_CODE (offset) != CONST_INT) 3643 if (GET_CODE (offset) != CONST_INT)
3529 { 3644 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3530 REG_NOTES (insn) 3645 gen_rtx_SET (VOIDmode,
3531 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, 3646 stack_pointer_rtx,
3532 gen_rtx_SET (VOIDmode, 3647 gen_rtx_PLUS (DImode,
3533 stack_pointer_rtx, 3648 stack_pointer_rtx,
3534 gen_rtx_PLUS (DImode, 3649 frame_size_rtx)));
3535 stack_pointer_rtx,
3536 frame_size_rtx)),
3537 REG_NOTES (insn));
3538 }
3539 } 3650 }
3540 3651
3541 if (cfun->machine->ia64_eh_epilogue_bsp) 3652 if (cfun->machine->ia64_eh_epilogue_bsp)
3542 emit_insn (gen_set_bsp (cfun->machine->ia64_eh_epilogue_bsp)); 3653 emit_insn (gen_set_bsp (cfun->machine->ia64_eh_epilogue_bsp));
3543 3654
3664 3775
3665 int 3776 int
3666 ia64_hard_regno_rename_ok (int from, int to) 3777 ia64_hard_regno_rename_ok (int from, int to)
3667 { 3778 {
3668 /* Don't clobber any of the registers we reserved for the prologue. */ 3779 /* Don't clobber any of the registers we reserved for the prologue. */
3669 enum ia64_frame_regs r; 3780 unsigned int r;
3670 3781
3671 for (r = reg_fp; r <= reg_save_ar_lc; r++) 3782 for (r = reg_fp; r <= reg_save_ar_lc; r++)
3672 if (to == current_frame_info.r[r] 3783 if (to == current_frame_info.r[r]
3673 || from == current_frame_info.r[r] 3784 || from == current_frame_info.r[r]
3674 || to == emitted_frame_related_regs[r] 3785 || to == emitted_frame_related_regs[r]
3840 + current_frame_info.n_local_regs + regno - OUT_REG (0)); 3951 + current_frame_info.n_local_regs + regno - OUT_REG (0));
3841 else 3952 else
3842 return regno; 3953 return regno;
3843 } 3954 }
3844 3955
3845 void 3956 /* Implement TARGET_TRAMPOLINE_INIT.
3846 ia64_initialize_trampoline (rtx addr, rtx fnaddr, rtx static_chain) 3957
3847 { 3958 The trampoline should set the static chain pointer to value placed
3848 rtx addr_reg, eight = GEN_INT (8); 3959 into the trampoline and should branch to the specified routine.
3960 To make the normal indirect-subroutine calling convention work,
3961 the trampoline must look like a function descriptor; the first
3962 word being the target address and the second being the target's
3963 global pointer.
3964
3965 We abuse the concept of a global pointer by arranging for it
3966 to point to the data we need to load. The complete trampoline
3967 has the following form:
3968
3969 +-------------------+ \
3970 TRAMP: | __ia64_trampoline | |
3971 +-------------------+ > fake function descriptor
3972 | TRAMP+16 | |
3973 +-------------------+ /
3974 | target descriptor |
3975 +-------------------+
3976 | static link |
3977 +-------------------+
3978 */
3979
3980 static void
3981 ia64_trampoline_init (rtx m_tramp, tree fndecl, rtx static_chain)
3982 {
3983 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
3984 rtx addr, addr_reg, tramp, eight = GEN_INT (8);
3849 3985
3850 /* The Intel assembler requires that the global __ia64_trampoline symbol 3986 /* The Intel assembler requires that the global __ia64_trampoline symbol
3851 be declared explicitly */ 3987 be declared explicitly */
3852 if (!TARGET_GNU_AS) 3988 if (!TARGET_GNU_AS)
3853 { 3989 {
3860 "__ia64_trampoline"); 3996 "__ia64_trampoline");
3861 } 3997 }
3862 } 3998 }
3863 3999
3864 /* Make sure addresses are Pmode even if we are in ILP32 mode. */ 4000 /* Make sure addresses are Pmode even if we are in ILP32 mode. */
3865 addr = convert_memory_address (Pmode, addr); 4001 addr = convert_memory_address (Pmode, XEXP (m_tramp, 0));
3866 fnaddr = convert_memory_address (Pmode, fnaddr); 4002 fnaddr = convert_memory_address (Pmode, fnaddr);
3867 static_chain = convert_memory_address (Pmode, static_chain); 4003 static_chain = convert_memory_address (Pmode, static_chain);
3868 4004
3869 /* Load up our iterator. */ 4005 /* Load up our iterator. */
3870 addr_reg = gen_reg_rtx (Pmode); 4006 addr_reg = copy_to_reg (addr);
3871 emit_move_insn (addr_reg, addr); 4007 m_tramp = adjust_automodify_address (m_tramp, Pmode, addr_reg, 0);
3872 4008
3873 /* The first two words are the fake descriptor: 4009 /* The first two words are the fake descriptor:
3874 __ia64_trampoline, ADDR+16. */ 4010 __ia64_trampoline, ADDR+16. */
3875 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg), 4011 tramp = gen_rtx_SYMBOL_REF (Pmode, "__ia64_trampoline");
3876 gen_rtx_SYMBOL_REF (Pmode, "__ia64_trampoline")); 4012 if (TARGET_ABI_OPEN_VMS)
4013 {
4014 /* HP decided to break the ELF ABI on VMS (to deal with an ambiguity
4015 in the Macro-32 compiler) and changed the semantics of the LTOFF22
4016 relocation against function symbols to make it identical to the
4017 LTOFF_FPTR22 relocation. Emit the latter directly to stay within
4018 strict ELF and dereference to get the bare code address. */
4019 rtx reg = gen_reg_rtx (Pmode);
4020 SYMBOL_REF_FLAGS (tramp) |= SYMBOL_FLAG_FUNCTION;
4021 emit_move_insn (reg, tramp);
4022 emit_move_insn (reg, gen_rtx_MEM (Pmode, reg));
4023 tramp = reg;
4024 }
4025 emit_move_insn (m_tramp, tramp);
3877 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight)); 4026 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
3878 4027 m_tramp = adjust_automodify_address (m_tramp, VOIDmode, NULL, 8);
3879 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg), 4028
3880 copy_to_reg (plus_constant (addr, 16))); 4029 emit_move_insn (m_tramp, force_reg (Pmode, plus_constant (addr, 16)));
3881 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight)); 4030 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
4031 m_tramp = adjust_automodify_address (m_tramp, VOIDmode, NULL, 8);
3882 4032
3883 /* The third word is the target descriptor. */ 4033 /* The third word is the target descriptor. */
3884 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg), fnaddr); 4034 emit_move_insn (m_tramp, force_reg (Pmode, fnaddr));
3885 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight)); 4035 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
4036 m_tramp = adjust_automodify_address (m_tramp, VOIDmode, NULL, 8);
3886 4037
3887 /* The fourth word is the static chain. */ 4038 /* The fourth word is the static chain. */
3888 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg), static_chain); 4039 emit_move_insn (m_tramp, static_chain);
3889 } 4040 }
3890 4041
3891 /* Do any needed setup for a variadic function. CUM has not been updated 4042 /* Do any needed setup for a variadic function. CUM has not been updated
3892 for the last named argument which has type TYPE and mode MODE. 4043 for the last named argument which has type TYPE and mode MODE.
3893 4044
4028 all as if they had 16 byte alignment. Such aggregates can occur 4179 all as if they had 16 byte alignment. Such aggregates can occur
4029 only if gcc extensions are used. */ 4180 only if gcc extensions are used. */
4030 static int 4181 static int
4031 ia64_function_arg_offset (CUMULATIVE_ARGS *cum, tree type, int words) 4182 ia64_function_arg_offset (CUMULATIVE_ARGS *cum, tree type, int words)
4032 { 4183 {
4033 if ((cum->words & 1) == 0) 4184 /* No registers are skipped on VMS. */
4185 if (TARGET_ABI_OPEN_VMS || (cum->words & 1) == 0)
4034 return 0; 4186 return 0;
4035 4187
4036 if (type 4188 if (type
4037 && TREE_CODE (type) != INTEGER_TYPE 4189 && TREE_CODE (type) != INTEGER_TYPE
4038 && TREE_CODE (type) != REAL_TYPE) 4190 && TREE_CODE (type) != REAL_TYPE)
4052 { 4204 {
4053 int basereg = (incoming ? GR_ARG_FIRST : AR_ARG_FIRST); 4205 int basereg = (incoming ? GR_ARG_FIRST : AR_ARG_FIRST);
4054 int words = ia64_function_arg_words (type, mode); 4206 int words = ia64_function_arg_words (type, mode);
4055 int offset = ia64_function_arg_offset (cum, type, words); 4207 int offset = ia64_function_arg_offset (cum, type, words);
4056 enum machine_mode hfa_mode = VOIDmode; 4208 enum machine_mode hfa_mode = VOIDmode;
4209
4210 /* For OPEN VMS, emit the instruction setting up the argument register here,
4211 when we know this will be together with the other arguments setup related
4212 insns. This is not the conceptually best place to do this, but this is
4213 the easiest as we have convenient access to cumulative args info. */
4214
4215 if (TARGET_ABI_OPEN_VMS && mode == VOIDmode && type == void_type_node
4216 && named == 1)
4217 {
4218 unsigned HOST_WIDE_INT regval = cum->words;
4219 int i;
4220
4221 for (i = 0; i < 8; i++)
4222 regval |= ((int) cum->atypes[i]) << (i * 3 + 8);
4223
4224 emit_move_insn (gen_rtx_REG (DImode, GR_REG (25)),
4225 GEN_INT (regval));
4226 }
4057 4227
4058 /* If all argument slots are used, then it must go on the stack. */ 4228 /* If all argument slots are used, then it must go on the stack. */
4059 if (cum->words + offset >= MAX_ARGUMENT_SLOTS) 4229 if (cum->words + offset >= MAX_ARGUMENT_SLOTS)
4060 return 0; 4230 return 0;
4061 4231
4141 int_regs++; 4311 int_regs++;
4142 else if (gr_size > UNITS_PER_WORD) 4312 else if (gr_size > UNITS_PER_WORD)
4143 int_regs += gr_size / UNITS_PER_WORD; 4313 int_regs += gr_size / UNITS_PER_WORD;
4144 } 4314 }
4145 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc)); 4315 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
4316 }
4317
4318 /* On OpenVMS variable argument is either in Rn or Fn. */
4319 else if (TARGET_ABI_OPEN_VMS && named == 0)
4320 {
4321 if (FLOAT_MODE_P (mode))
4322 return gen_rtx_REG (mode, FR_ARG_FIRST + cum->words);
4323 else
4324 return gen_rtx_REG (mode, basereg + cum->words);
4146 } 4325 }
4147 4326
4148 /* Integral and aggregates go in general registers. If we have run out of 4327 /* Integral and aggregates go in general registers. If we have run out of
4149 FR registers, then FP values must also go in general registers. This can 4328 FR registers, then FP values must also go in general registers. This can
4150 happen when we have a SFmode HFA. */ 4329 happen when we have a SFmode HFA. */
4234 return 0; 4413 return 0;
4235 4414
4236 return (MAX_ARGUMENT_SLOTS - cum->words - offset) * UNITS_PER_WORD; 4415 return (MAX_ARGUMENT_SLOTS - cum->words - offset) * UNITS_PER_WORD;
4237 } 4416 }
4238 4417
4418 /* Return ivms_arg_type based on machine_mode. */
4419
4420 static enum ivms_arg_type
4421 ia64_arg_type (enum machine_mode mode)
4422 {
4423 switch (mode)
4424 {
4425 case SFmode:
4426 return FS;
4427 case DFmode:
4428 return FT;
4429 default:
4430 return I64;
4431 }
4432 }
4433
4239 /* Update CUM to point after this argument. This is patterned after 4434 /* Update CUM to point after this argument. This is patterned after
4240 ia64_function_arg. */ 4435 ia64_function_arg. */
4241 4436
4242 void 4437 void
4243 ia64_function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode, 4438 ia64_function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4247 int offset = ia64_function_arg_offset (cum, type, words); 4442 int offset = ia64_function_arg_offset (cum, type, words);
4248 enum machine_mode hfa_mode = VOIDmode; 4443 enum machine_mode hfa_mode = VOIDmode;
4249 4444
4250 /* If all arg slots are already full, then there is nothing to do. */ 4445 /* If all arg slots are already full, then there is nothing to do. */
4251 if (cum->words >= MAX_ARGUMENT_SLOTS) 4446 if (cum->words >= MAX_ARGUMENT_SLOTS)
4252 return; 4447 {
4253 4448 cum->words += words + offset;
4449 return;
4450 }
4451
4452 cum->atypes[cum->words] = ia64_arg_type (mode);
4254 cum->words += words + offset; 4453 cum->words += words + offset;
4255 4454
4256 /* Check for and handle homogeneous FP aggregates. */ 4455 /* Check for and handle homogeneous FP aggregates. */
4257 if (type) 4456 if (type)
4258 hfa_mode = hfa_element_mode (type, 0); 4457 hfa_mode = hfa_element_mode (type, 0);
4289 args_byte_size += hfa_size; 4488 args_byte_size += hfa_size;
4290 fp_regs++; 4489 fp_regs++;
4291 } 4490 }
4292 4491
4293 cum->fp_regs = fp_regs; 4492 cum->fp_regs = fp_regs;
4493 }
4494
4495 /* On OpenVMS variable argument is either in Rn or Fn. */
4496 else if (TARGET_ABI_OPEN_VMS && named == 0)
4497 {
4498 cum->int_regs = cum->words;
4499 cum->fp_regs = cum->words;
4294 } 4500 }
4295 4501
4296 /* Integral and aggregates go in general registers. So do TFmode FP values. 4502 /* Integral and aggregates go in general registers. So do TFmode FP values.
4297 If we have run out of FR registers, then other FP values must also go in 4503 If we have run out of FR registers, then other FP values must also go in
4298 general registers. This can happen when we have a SFmode HFA. */ 4504 general registers. This can happen when we have a SFmode HFA. */
4435 } 4641 }
4436 4642
4437 /* Return rtx for register that holds the function return value. */ 4643 /* Return rtx for register that holds the function return value. */
4438 4644
4439 rtx 4645 rtx
4440 ia64_function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED) 4646 ia64_function_value (const_tree valtype, const_tree func)
4441 { 4647 {
4442 enum machine_mode mode; 4648 enum machine_mode mode;
4443 enum machine_mode hfa_mode; 4649 enum machine_mode hfa_mode;
4650 int unsignedp;
4444 4651
4445 mode = TYPE_MODE (valtype); 4652 mode = TYPE_MODE (valtype);
4446 hfa_mode = hfa_element_mode (valtype, 0); 4653 hfa_mode = hfa_element_mode (valtype, 0);
4447 4654
4448 if (hfa_mode != VOIDmode) 4655 if (hfa_mode != VOIDmode)
4509 offset += UNITS_PER_WORD; 4716 offset += UNITS_PER_WORD;
4510 } 4717 }
4511 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc)); 4718 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
4512 } 4719 }
4513 4720
4721 mode = ia64_promote_function_mode (valtype, mode, &unsignedp,
4722 func ? TREE_TYPE (func) : NULL_TREE,
4723 true);
4724
4514 return gen_rtx_REG (mode, GR_RET_FIRST); 4725 return gen_rtx_REG (mode, GR_RET_FIRST);
4515 } 4726 }
4516 } 4727 }
4517 4728
4518 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL. 4729 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
4546 D Print an FP comparison operator. 4757 D Print an FP comparison operator.
4547 E Print 32 - constant, for SImode shifts as extract. 4758 E Print 32 - constant, for SImode shifts as extract.
4548 e Print 64 - constant, for DImode rotates. 4759 e Print 64 - constant, for DImode rotates.
4549 F A floating point constant 0.0 emitted as f0, or 1.0 emitted as f1, or 4760 F A floating point constant 0.0 emitted as f0, or 1.0 emitted as f1, or
4550 a floating point register emitted normally. 4761 a floating point register emitted normally.
4762 G A floating point constant.
4551 I Invert a predicate register by adding 1. 4763 I Invert a predicate register by adding 1.
4552 J Select the proper predicate register for a condition. 4764 J Select the proper predicate register for a condition.
4553 j Select the inverse predicate register for a condition. 4765 j Select the inverse predicate register for a condition.
4554 O Append .acq for volatile load. 4766 O Append .acq for volatile load.
4555 P Postincrement of a MEM. 4767 P Postincrement of a MEM.
4633 str = reg_names [REGNO (x)]; 4845 str = reg_names [REGNO (x)];
4634 } 4846 }
4635 fputs (str, file); 4847 fputs (str, file);
4636 return; 4848 return;
4637 4849
4850 case 'G':
4851 {
4852 long val[4];
4853 REAL_VALUE_TYPE rv;
4854 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
4855 real_to_target (val, &rv, GET_MODE (x));
4856 if (GET_MODE (x) == SFmode)
4857 fprintf (file, "0x%08lx", val[0] & 0xffffffff);
4858 else if (GET_MODE (x) == DFmode)
4859 fprintf (file, "0x%08lx%08lx", (WORDS_BIG_ENDIAN ? val[0] : val[1])
4860 & 0xffffffff,
4861 (WORDS_BIG_ENDIAN ? val[1] : val[0])
4862 & 0xffffffff);
4863 else
4864 output_operand_lossage ("invalid %%G mode");
4865 }
4866 return;
4867
4638 case 'I': 4868 case 'I':
4639 fputs (reg_names [REGNO (x) + 1], file); 4869 fputs (reg_names [REGNO (x) + 1], file);
4640 return; 4870 return;
4641 4871
4642 case 'J': 4872 case 'J':
5203 /* Implement TARGET_HANDLE_OPTION. */ 5433 /* Implement TARGET_HANDLE_OPTION. */
5204 5434
5205 static bool 5435 static bool
5206 ia64_handle_option (size_t code, const char *arg, int value) 5436 ia64_handle_option (size_t code, const char *arg, int value)
5207 { 5437 {
5208 static bool warned_itanium1_deprecated;
5209
5210 switch (code) 5438 switch (code)
5211 { 5439 {
5212 case OPT_mfixed_range_: 5440 case OPT_mfixed_range_:
5213 fix_range (arg); 5441 fix_range (arg);
5214 return true; 5442 return true;
5225 const char *name; /* processor name or nickname. */ 5453 const char *name; /* processor name or nickname. */
5226 enum processor_type processor; 5454 enum processor_type processor;
5227 } 5455 }
5228 const processor_alias_table[] = 5456 const processor_alias_table[] =
5229 { 5457 {
5230 {"itanium", PROCESSOR_ITANIUM},
5231 {"itanium1", PROCESSOR_ITANIUM},
5232 {"merced", PROCESSOR_ITANIUM},
5233 {"itanium2", PROCESSOR_ITANIUM2}, 5458 {"itanium2", PROCESSOR_ITANIUM2},
5234 {"mckinley", PROCESSOR_ITANIUM2}, 5459 {"mckinley", PROCESSOR_ITANIUM2},
5235 }; 5460 };
5236 int const pta_size = ARRAY_SIZE (processor_alias_table); 5461 int const pta_size = ARRAY_SIZE (processor_alias_table);
5237 int i; 5462 int i;
5238 5463
5239 for (i = 0; i < pta_size; i++) 5464 for (i = 0; i < pta_size; i++)
5240 if (!strcmp (arg, processor_alias_table[i].name)) 5465 if (!strcmp (arg, processor_alias_table[i].name))
5241 { 5466 {
5242 ia64_tune = processor_alias_table[i].processor; 5467 ia64_tune = processor_alias_table[i].processor;
5243 if (ia64_tune == PROCESSOR_ITANIUM
5244 && ! warned_itanium1_deprecated)
5245 {
5246 inform (0,
5247 "value %<%s%> for -mtune= switch is deprecated",
5248 arg);
5249 inform (0, "GCC 4.4 is the last release with "
5250 "Itanium1 tuning support");
5251 warned_itanium1_deprecated = true;
5252 }
5253 break; 5468 break;
5254 } 5469 }
5255 if (i == pta_size) 5470 if (i == pta_size)
5256 error ("bad value %<%s%> for -mtune= switch", arg); 5471 error ("bad value %<%s%> for -mtune= switch", arg);
5257 return true; 5472 return true;
5268 ia64_override_options (void) 5483 ia64_override_options (void)
5269 { 5484 {
5270 if (TARGET_AUTO_PIC) 5485 if (TARGET_AUTO_PIC)
5271 target_flags |= MASK_CONST_GP; 5486 target_flags |= MASK_CONST_GP;
5272 5487
5273 if (TARGET_INLINE_SQRT == INL_MIN_LAT) 5488 /* Numerous experiment shows that IRA based loop pressure
5274 { 5489 calculation works better for RTL loop invariant motion on targets
5275 warning (0, "not yet implemented: latency-optimized inline square root"); 5490 with enough (>= 32) registers. It is an expensive optimization.
5276 TARGET_INLINE_SQRT = INL_MAX_THR; 5491 So it is on only for peak performance. */
5277 } 5492 if (optimize >= 3)
5278 5493 flag_ira_loop_pressure = 1;
5279 ia64_flag_schedule_insns2 = flag_schedule_insns_after_reload; 5494
5280 flag_schedule_insns_after_reload = 0;
5281
5282 if (optimize >= 3
5283 && ! sel_sched_switch_set)
5284 {
5285 flag_selective_scheduling2 = 1;
5286 flag_sel_sched_pipelining = 1;
5287 }
5288 if (mflag_sched_control_spec == 2)
5289 {
5290 /* Control speculation is on by default for the selective scheduler,
5291 but not for the Haifa scheduler. */
5292 mflag_sched_control_spec = flag_selective_scheduling2 ? 1 : 0;
5293 }
5294 if (flag_sel_sched_pipelining && flag_auto_inc_dec)
5295 {
5296 /* FIXME: remove this when we'd implement breaking autoinsns as
5297 a transformation. */
5298 flag_auto_inc_dec = 0;
5299 }
5300 5495
5301 ia64_section_threshold = g_switch_set ? g_switch_value : IA64_DEFAULT_GVALUE; 5496 ia64_section_threshold = g_switch_set ? g_switch_value : IA64_DEFAULT_GVALUE;
5302 5497
5303 init_machine_status = ia64_init_machine_status; 5498 init_machine_status = ia64_init_machine_status;
5304 5499
5305 if (align_functions <= 0) 5500 if (align_functions <= 0)
5306 align_functions = 64; 5501 align_functions = 64;
5307 if (align_loops <= 0) 5502 if (align_loops <= 0)
5308 align_loops = 32; 5503 align_loops = 32;
5504 if (TARGET_ABI_OPEN_VMS)
5505 flag_no_common = 1;
5506
5507 ia64_override_options_after_change();
5508 }
5509
5510 /* Implement targetm.override_options_after_change. */
5511
5512 static void
5513 ia64_override_options_after_change (void)
5514 {
5515 ia64_flag_schedule_insns2 = flag_schedule_insns_after_reload;
5516 flag_schedule_insns_after_reload = 0;
5517
5518 if (optimize >= 3
5519 && ! sel_sched_switch_set)
5520 {
5521 flag_selective_scheduling2 = 1;
5522 flag_sel_sched_pipelining = 1;
5523 }
5524 if (mflag_sched_control_spec == 2)
5525 {
5526 /* Control speculation is on by default for the selective scheduler,
5527 but not for the Haifa scheduler. */
5528 mflag_sched_control_spec = flag_selective_scheduling2 ? 1 : 0;
5529 }
5530 if (flag_sel_sched_pipelining && flag_auto_inc_dec)
5531 {
5532 /* FIXME: remove this when we'd implement breaking autoinsns as
5533 a transformation. */
5534 flag_auto_inc_dec = 0;
5535 }
5309 } 5536 }
5310 5537
5311 /* Initialize the record of emitted frame related registers. */ 5538 /* Initialize the record of emitted frame related registers. */
5312 5539
5313 void ia64_init_expanders (void) 5540 void ia64_init_expanders (void)
5327 static enum attr_itanium_class 5554 static enum attr_itanium_class
5328 ia64_safe_itanium_class (rtx insn) 5555 ia64_safe_itanium_class (rtx insn)
5329 { 5556 {
5330 if (recog_memoized (insn) >= 0) 5557 if (recog_memoized (insn) >= 0)
5331 return get_attr_itanium_class (insn); 5558 return get_attr_itanium_class (insn);
5559 else if (DEBUG_INSN_P (insn))
5560 return ITANIUM_CLASS_IGNORE;
5332 else 5561 else
5333 return ITANIUM_CLASS_UNKNOWN; 5562 return ITANIUM_CLASS_UNKNOWN;
5334 } 5563 }
5335 5564
5336 static enum attr_type 5565 static enum attr_type
6083 6312
6084 memset (&flags, 0, sizeof (flags)); 6313 memset (&flags, 0, sizeof (flags));
6085 switch (GET_CODE (insn)) 6314 switch (GET_CODE (insn))
6086 { 6315 {
6087 case NOTE: 6316 case NOTE:
6317 case DEBUG_INSN:
6088 break; 6318 break;
6089 6319
6090 case BARRIER: 6320 case BARRIER:
6091 /* A barrier doesn't imply an instruction group boundary. */ 6321 /* A barrier doesn't imply an instruction group boundary. */
6092 break; 6322 break;
6240 && XINT (PATTERN (insn), 1) == UNSPECV_INSN_GROUP_BARRIER) 6470 && XINT (PATTERN (insn), 1) == UNSPECV_INSN_GROUP_BARRIER)
6241 { 6471 {
6242 init_insn_group_barriers (); 6472 init_insn_group_barriers ();
6243 last_label = 0; 6473 last_label = 0;
6244 } 6474 }
6245 else if (INSN_P (insn)) 6475 else if (NONDEBUG_INSN_P (insn))
6246 { 6476 {
6247 insns_since_last_label = 1; 6477 insns_since_last_label = 1;
6248 6478
6249 if (group_barrier_needed (insn)) 6479 if (group_barrier_needed (insn))
6250 { 6480 {
6288 if (recog_memoized (last) != CODE_FOR_insn_group_barrier) 6518 if (recog_memoized (last) != CODE_FOR_insn_group_barrier)
6289 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)), last); 6519 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)), last);
6290 6520
6291 init_insn_group_barriers (); 6521 init_insn_group_barriers ();
6292 } 6522 }
6293 else if (INSN_P (insn)) 6523 else if (NONDEBUG_INSN_P (insn))
6294 { 6524 {
6295 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier) 6525 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier)
6296 init_insn_group_barriers (); 6526 init_insn_group_barriers ();
6297 else if (group_barrier_needed (insn)) 6527 else if (group_barrier_needed (insn))
6298 { 6528 {
6371 6601
6372 /* The following variable value is length of the arrays `clocks' and 6602 /* The following variable value is length of the arrays `clocks' and
6373 `add_cycles'. */ 6603 `add_cycles'. */
6374 6604
6375 static int clocks_length; 6605 static int clocks_length;
6376
6377 /* The following array element values are cycles on which the
6378 corresponding insn will be issued. The array is used only for
6379 Itanium1. */
6380
6381 static int *clocks;
6382
6383 /* The following array element values are numbers of cycles should be
6384 added to improve insn scheduling for MM_insns for Itanium1. */
6385
6386 static int *add_cycles;
6387 6606
6388 /* The following variable value is number of data speculations in progress. */ 6607 /* The following variable value is number of data speculations in progress. */
6389 static int pending_data_specs = 0; 6608 static int pending_data_specs = 0;
6390 6609
6391 /* Number of memory references on current and three future processor cycles. */ 6610 /* Number of memory references on current and three future processor cycles. */
6756 static int 6975 static int
6757 ia64_sched_reorder2 (FILE *dump ATTRIBUTE_UNUSED, 6976 ia64_sched_reorder2 (FILE *dump ATTRIBUTE_UNUSED,
6758 int sched_verbose ATTRIBUTE_UNUSED, rtx *ready, 6977 int sched_verbose ATTRIBUTE_UNUSED, rtx *ready,
6759 int *pn_ready, int clock_var) 6978 int *pn_ready, int clock_var)
6760 { 6979 {
6761 if (ia64_tune == PROCESSOR_ITANIUM && reload_completed && last_scheduled_insn)
6762 clocks [INSN_UID (last_scheduled_insn)] = clock_var;
6763 return ia64_dfa_sched_reorder (dump, sched_verbose, ready, pn_ready, 6980 return ia64_dfa_sched_reorder (dump, sched_verbose, ready, pn_ready,
6764 clock_var, 1); 6981 clock_var, 1);
6765 } 6982 }
6766 6983
6767 /* We are about to issue INSN. Return the number of insns left on the 6984 /* We are about to issue INSN. Return the number of insns left on the
6780 if (DONE_SPEC (insn) & BEGIN_DATA) 6997 if (DONE_SPEC (insn) & BEGIN_DATA)
6781 pending_data_specs++; 6998 pending_data_specs++;
6782 if (CHECK_SPEC (insn) & BEGIN_DATA) 6999 if (CHECK_SPEC (insn) & BEGIN_DATA)
6783 pending_data_specs--; 7000 pending_data_specs--;
6784 } 7001 }
7002
7003 if (DEBUG_INSN_P (insn))
7004 return 1;
6785 7005
6786 last_scheduled_insn = insn; 7006 last_scheduled_insn = insn;
6787 memcpy (prev_cycle_state, curr_state, dfa_state_size); 7007 memcpy (prev_cycle_state, curr_state, dfa_state_size);
6788 if (reload_completed) 7008 if (reload_completed)
6789 { 7009 {
6863 int clock, int *sort_p) 7083 int clock, int *sort_p)
6864 { 7084 {
6865 int setup_clocks_p = FALSE; 7085 int setup_clocks_p = FALSE;
6866 7086
6867 gcc_assert (insn && INSN_P (insn)); 7087 gcc_assert (insn && INSN_P (insn));
7088
7089 if (DEBUG_INSN_P (insn))
7090 return 0;
7091
6868 /* When a group barrier is needed for insn, last_scheduled_insn 7092 /* When a group barrier is needed for insn, last_scheduled_insn
6869 should be set. */ 7093 should be set. */
6870 gcc_assert (!(reload_completed && safe_group_barrier_needed (insn)) 7094 gcc_assert (!(reload_completed && safe_group_barrier_needed (insn))
6871 || last_scheduled_insn); 7095 || last_scheduled_insn);
6872 7096
6919 } 7143 }
6920 } 7144 }
6921 else if (reload_completed) 7145 else if (reload_completed)
6922 setup_clocks_p = TRUE; 7146 setup_clocks_p = TRUE;
6923 7147
6924 if (setup_clocks_p && ia64_tune == PROCESSOR_ITANIUM
6925 && GET_CODE (PATTERN (insn)) != ASM_INPUT
6926 && asm_noperands (PATTERN (insn)) < 0)
6927 {
6928 enum attr_itanium_class c = ia64_safe_itanium_class (insn);
6929
6930 if (c != ITANIUM_CLASS_MMMUL && c != ITANIUM_CLASS_MMSHF)
6931 {
6932 sd_iterator_def sd_it;
6933 dep_t dep;
6934 int d = -1;
6935
6936 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
6937 if (DEP_TYPE (dep) == REG_DEP_TRUE)
6938 {
6939 enum attr_itanium_class dep_class;
6940 rtx dep_insn = DEP_PRO (dep);
6941
6942 dep_class = ia64_safe_itanium_class (dep_insn);
6943 if ((dep_class == ITANIUM_CLASS_MMMUL
6944 || dep_class == ITANIUM_CLASS_MMSHF)
6945 && last_clock - clocks [INSN_UID (dep_insn)] < 4
6946 && (d < 0
6947 || last_clock - clocks [INSN_UID (dep_insn)] < d))
6948 d = last_clock - clocks [INSN_UID (dep_insn)];
6949 }
6950 if (d >= 0)
6951 add_cycles [INSN_UID (insn)] = 3 - d;
6952 }
6953 }
6954
6955 return 0; 7148 return 0;
6956 } 7149 }
6957 7150
6958 /* Implement targetm.sched.h_i_d_extended hook. 7151 /* Implement targetm.sched.h_i_d_extended hook.
6959 Extend internal data structures. */ 7152 Extend internal data structures. */
6961 ia64_h_i_d_extended (void) 7154 ia64_h_i_d_extended (void)
6962 { 7155 {
6963 if (stops_p != NULL) 7156 if (stops_p != NULL)
6964 { 7157 {
6965 int new_clocks_length = get_max_uid () * 3 / 2; 7158 int new_clocks_length = get_max_uid () * 3 / 2;
6966
6967 stops_p = (char *) xrecalloc (stops_p, new_clocks_length, clocks_length, 1); 7159 stops_p = (char *) xrecalloc (stops_p, new_clocks_length, clocks_length, 1);
6968
6969 if (ia64_tune == PROCESSOR_ITANIUM)
6970 {
6971 clocks = (int *) xrecalloc (clocks, new_clocks_length, clocks_length,
6972 sizeof (int));
6973 add_cycles = (int *) xrecalloc (add_cycles, new_clocks_length,
6974 clocks_length, sizeof (int));
6975 }
6976
6977 clocks_length = new_clocks_length; 7160 clocks_length = new_clocks_length;
6978 } 7161 }
6979 } 7162 }
6980 7163
6981 7164
7922 static int 8105 static int
7923 insert_bundle_state (struct bundle_state *bundle_state) 8106 insert_bundle_state (struct bundle_state *bundle_state)
7924 { 8107 {
7925 void **entry_ptr; 8108 void **entry_ptr;
7926 8109
7927 entry_ptr = htab_find_slot (bundle_state_table, bundle_state, 1); 8110 entry_ptr = htab_find_slot (bundle_state_table, bundle_state, INSERT);
7928 if (*entry_ptr == NULL) 8111 if (*entry_ptr == NULL)
7929 { 8112 {
7930 bundle_state->next = index_to_bundle_states [bundle_state->insn_num]; 8113 bundle_state->next = index_to_bundle_states [bundle_state->insn_num];
7931 index_to_bundle_states [bundle_state->insn_num] = bundle_state; 8114 index_to_bundle_states [bundle_state->insn_num] = bundle_state;
7932 *entry_ptr = (void *) bundle_state; 8115 *entry_ptr = (void *) bundle_state;
8286 gcc_assert ((code = recog_memoized (insn)) == CODE_FOR_nop 8469 gcc_assert ((code = recog_memoized (insn)) == CODE_FOR_nop
8287 || code == CODE_FOR_nop_b); 8470 || code == CODE_FOR_nop_b);
8288 if (find_reg_note (insn, REG_EH_REGION, NULL_RTX)) 8471 if (find_reg_note (insn, REG_EH_REGION, NULL_RTX))
8289 note = NULL_RTX; 8472 note = NULL_RTX;
8290 else 8473 else
8291 REG_NOTES (insn) 8474 add_reg_note (insn, REG_EH_REGION, XEXP (note, 0));
8292 = gen_rtx_EXPR_LIST (REG_EH_REGION, XEXP (note, 0),
8293 REG_NOTES (insn));
8294 } 8475 }
8295 } 8476 }
8296 } 8477 }
8297 #endif 8478 #endif
8298 } 8479 }
8330 best sequence and then, moving back in EBB, insert templates for 8511 best sequence and then, moving back in EBB, insert templates for
8331 the best alternative. The templates are taken from querying 8512 the best alternative. The templates are taken from querying
8332 automaton state for each insn in chosen bundle states. 8513 automaton state for each insn in chosen bundle states.
8333 8514
8334 So the algorithm makes two (forward and backward) passes through 8515 So the algorithm makes two (forward and backward) passes through
8335 EBB. There is an additional forward pass through EBB for Itanium1 8516 EBB. */
8336 processor. This pass inserts more nops to make dependency between
8337 a producer insn and MMMUL/MMSHF at least 4 cycles long. */
8338 8517
8339 static void 8518 static void
8340 bundling (FILE *dump, int verbose, rtx prev_head_insn, rtx tail) 8519 bundling (FILE *dump, int verbose, rtx prev_head_insn, rtx tail)
8341 { 8520 {
8342 struct bundle_state *curr_state, *next_state, *best_state; 8521 struct bundle_state *curr_state, *next_state, *best_state;
8431 bundle_end_p 8610 bundle_end_p
8432 = (only_bundle_end_p || next_insn == NULL_RTX 8611 = (only_bundle_end_p || next_insn == NULL_RTX
8433 || (GET_MODE (next_insn) == TImode 8612 || (GET_MODE (next_insn) == TImode
8434 && INSN_CODE (insn) != CODE_FOR_insn_group_barrier)); 8613 && INSN_CODE (insn) != CODE_FOR_insn_group_barrier));
8435 if (type == TYPE_F || type == TYPE_B || type == TYPE_L 8614 if (type == TYPE_F || type == TYPE_B || type == TYPE_L
8436 || type == TYPE_S 8615 || type == TYPE_S)
8437 /* We need to insert 2 nops for cases like M_MII. To
8438 guarantee issuing all insns on the same cycle for
8439 Itanium 1, we need to issue 2 nops after the first M
8440 insn (MnnMII where n is a nop insn). */
8441 || ((type == TYPE_M || type == TYPE_A)
8442 && ia64_tune == PROCESSOR_ITANIUM
8443 && !bundle_end_p && pos == 1))
8444 issue_nops_and_insn (curr_state, 2, insn, bundle_end_p, 8616 issue_nops_and_insn (curr_state, 2, insn, bundle_end_p,
8445 only_bundle_end_p); 8617 only_bundle_end_p);
8446 issue_nops_and_insn (curr_state, 1, insn, bundle_end_p, 8618 issue_nops_and_insn (curr_state, 1, insn, bundle_end_p,
8447 only_bundle_end_p); 8619 only_bundle_end_p);
8448 issue_nops_and_insn (curr_state, 0, insn, bundle_end_p, 8620 issue_nops_and_insn (curr_state, 0, insn, bundle_end_p,
8474 ? -1 : curr_state->originator->unique_num), 8646 ? -1 : curr_state->originator->unique_num),
8475 curr_state->cost, 8647 curr_state->cost,
8476 curr_state->before_nops_num, curr_state->after_nops_num, 8648 curr_state->before_nops_num, curr_state->after_nops_num,
8477 curr_state->accumulated_insns_num, curr_state->branch_deviation, 8649 curr_state->accumulated_insns_num, curr_state->branch_deviation,
8478 curr_state->middle_bundle_stops, 8650 curr_state->middle_bundle_stops,
8479 (ia64_tune == PROCESSOR_ITANIUM 8651 ((struct DFA_chip *) curr_state->dfa_state)->twob_automaton_state,
8480 ? ((struct DFA_chip *) curr_state->dfa_state)->oneb_automaton_state
8481 : ((struct DFA_chip *) curr_state->dfa_state)->twob_automaton_state),
8482 INSN_UID (insn)); 8652 INSN_UID (insn));
8483 } 8653 }
8484 } 8654 }
8485 8655
8486 /* We should find a solution because the 2nd insn scheduling has 8656 /* We should find a solution because the 2nd insn scheduling has
8539 ? -1 : curr_state->originator->unique_num), 8709 ? -1 : curr_state->originator->unique_num),
8540 curr_state->cost, 8710 curr_state->cost,
8541 curr_state->before_nops_num, curr_state->after_nops_num, 8711 curr_state->before_nops_num, curr_state->after_nops_num,
8542 curr_state->accumulated_insns_num, curr_state->branch_deviation, 8712 curr_state->accumulated_insns_num, curr_state->branch_deviation,
8543 curr_state->middle_bundle_stops, 8713 curr_state->middle_bundle_stops,
8544 (ia64_tune == PROCESSOR_ITANIUM 8714 ((struct DFA_chip *) curr_state->dfa_state)->twob_automaton_state,
8545 ? ((struct DFA_chip *) curr_state->dfa_state)->oneb_automaton_state
8546 : ((struct DFA_chip *) curr_state->dfa_state)->twob_automaton_state),
8547 INSN_UID (insn)); 8715 INSN_UID (insn));
8548 } 8716 }
8549 /* Find the position in the current bundle window. The window can 8717 /* Find the position in the current bundle window. The window can
8550 contain at most two bundles. Two bundle window means that 8718 contain at most two bundles. Two bundle window means that
8551 the processor will make two bundle rotation. */ 8719 the processor will make two bundle rotation. */
8640 template0 = template1; 8808 template0 = template1;
8641 template1 = -1; 8809 template1 = -1;
8642 } 8810 }
8643 } 8811 }
8644 } 8812 }
8645 if (ia64_tune == PROCESSOR_ITANIUM)
8646 /* Insert additional cycles for MM-insns (MMMUL and MMSHF).
8647 Itanium1 has a strange design, if the distance between an insn
8648 and dependent MM-insn is less 4 then we have a 6 additional
8649 cycles stall. So we make the distance equal to 4 cycles if it
8650 is less. */
8651 for (insn = get_next_important_insn (NEXT_INSN (prev_head_insn), tail);
8652 insn != NULL_RTX;
8653 insn = next_insn)
8654 {
8655 gcc_assert (INSN_P (insn)
8656 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
8657 && GET_CODE (PATTERN (insn)) != USE
8658 && GET_CODE (PATTERN (insn)) != CLOBBER);
8659 next_insn = get_next_important_insn (NEXT_INSN (insn), tail);
8660 if (INSN_UID (insn) < clocks_length && add_cycles [INSN_UID (insn)])
8661 /* We found a MM-insn which needs additional cycles. */
8662 {
8663 rtx last;
8664 int i, j, n;
8665 int pred_stop_p;
8666
8667 /* Now we are searching for a template of the bundle in
8668 which the MM-insn is placed and the position of the
8669 insn in the bundle (0, 1, 2). Also we are searching
8670 for that there is a stop before the insn. */
8671 last = prev_active_insn (insn);
8672 pred_stop_p = recog_memoized (last) == CODE_FOR_insn_group_barrier;
8673 if (pred_stop_p)
8674 last = prev_active_insn (last);
8675 n = 0;
8676 for (;; last = prev_active_insn (last))
8677 if (recog_memoized (last) == CODE_FOR_bundle_selector)
8678 {
8679 template0 = XINT (XVECEXP (PATTERN (last), 0, 0), 0);
8680 if (template0 == 9)
8681 /* The insn is in MLX bundle. Change the template
8682 onto MFI because we will add nops before the
8683 insn. It simplifies subsequent code a lot. */
8684 PATTERN (last)
8685 = gen_bundle_selector (const2_rtx); /* -> MFI */
8686 break;
8687 }
8688 else if (recog_memoized (last) != CODE_FOR_insn_group_barrier
8689 && (ia64_safe_itanium_class (last)
8690 != ITANIUM_CLASS_IGNORE))
8691 n++;
8692 /* Some check of correctness: the stop is not at the
8693 bundle start, there are no more 3 insns in the bundle,
8694 and the MM-insn is not at the start of bundle with
8695 template MLX. */
8696 gcc_assert ((!pred_stop_p || n)
8697 && n <= 2
8698 && (template0 != 9 || !n));
8699 /* Put nops after the insn in the bundle. */
8700 for (j = 3 - n; j > 0; j --)
8701 ia64_emit_insn_before (gen_nop (), insn);
8702 /* It takes into account that we will add more N nops
8703 before the insn lately -- please see code below. */
8704 add_cycles [INSN_UID (insn)]--;
8705 if (!pred_stop_p || add_cycles [INSN_UID (insn)])
8706 ia64_emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
8707 insn);
8708 if (pred_stop_p)
8709 add_cycles [INSN_UID (insn)]--;
8710 for (i = add_cycles [INSN_UID (insn)]; i > 0; i--)
8711 {
8712 /* Insert "MII;" template. */
8713 ia64_emit_insn_before (gen_bundle_selector (const0_rtx),
8714 insn);
8715 ia64_emit_insn_before (gen_nop (), insn);
8716 ia64_emit_insn_before (gen_nop (), insn);
8717 if (i > 1)
8718 {
8719 /* To decrease code size, we use "MI;I;"
8720 template. */
8721 ia64_emit_insn_before
8722 (gen_insn_group_barrier (GEN_INT (3)), insn);
8723 i--;
8724 }
8725 ia64_emit_insn_before (gen_nop (), insn);
8726 ia64_emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
8727 insn);
8728 }
8729 /* Put the MM-insn in the same slot of a bundle with the
8730 same template as the original one. */
8731 ia64_add_bundle_selector_before (template0, insn);
8732 /* To put the insn in the same slot, add necessary number
8733 of nops. */
8734 for (j = n; j > 0; j --)
8735 ia64_emit_insn_before (gen_nop (), insn);
8736 /* Put the stop if the original bundle had it. */
8737 if (pred_stop_p)
8738 ia64_emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
8739 insn);
8740 }
8741 }
8742 8813
8743 #ifdef ENABLE_CHECKING 8814 #ifdef ENABLE_CHECKING
8744 { 8815 {
8745 /* Assert right calculation of middle_bundle_stops. */ 8816 /* Assert right calculation of middle_bundle_stops. */
8746 int num = best_state->middle_bundle_stops; 8817 int num = best_state->middle_bundle_stops;
8851 init_insn_group_barriers (); 8922 init_insn_group_barriers ();
8852 seen_good_insn = 0; 8923 seen_good_insn = 0;
8853 need_barrier_p = 0; 8924 need_barrier_p = 0;
8854 prev_insn = NULL_RTX; 8925 prev_insn = NULL_RTX;
8855 } 8926 }
8856 else if (INSN_P (insn)) 8927 else if (NONDEBUG_INSN_P (insn))
8857 { 8928 {
8858 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier) 8929 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier)
8859 { 8930 {
8860 init_insn_group_barriers (); 8931 init_insn_group_barriers ();
8861 seen_good_insn = 0; 8932 seen_good_insn = 0;
9131 ia64_nop = make_insn_raw (gen_nop ()); 9202 ia64_nop = make_insn_raw (gen_nop ());
9132 PREV_INSN (ia64_nop) = NEXT_INSN (ia64_nop) = NULL_RTX; 9203 PREV_INSN (ia64_nop) = NEXT_INSN (ia64_nop) = NULL_RTX;
9133 recog_memoized (ia64_nop); 9204 recog_memoized (ia64_nop);
9134 clocks_length = get_max_uid () + 1; 9205 clocks_length = get_max_uid () + 1;
9135 stops_p = XCNEWVEC (char, clocks_length); 9206 stops_p = XCNEWVEC (char, clocks_length);
9136 if (ia64_tune == PROCESSOR_ITANIUM) 9207
9137 {
9138 clocks = XCNEWVEC (int, clocks_length);
9139 add_cycles = XCNEWVEC (int, clocks_length);
9140 }
9141 if (ia64_tune == PROCESSOR_ITANIUM2) 9208 if (ia64_tune == PROCESSOR_ITANIUM2)
9142 { 9209 {
9143 pos_1 = get_cpu_unit_code ("2_1"); 9210 pos_1 = get_cpu_unit_code ("2_1");
9144 pos_2 = get_cpu_unit_code ("2_2"); 9211 pos_2 = get_cpu_unit_code ("2_2");
9145 pos_3 = get_cpu_unit_code ("2_3"); 9212 pos_3 = get_cpu_unit_code ("2_3");
9207 compute_alignments (); 9274 compute_alignments ();
9208 9275
9209 /* We cannot reuse this one because it has been corrupted by the 9276 /* We cannot reuse this one because it has been corrupted by the
9210 evil glat. */ 9277 evil glat. */
9211 finish_bundle_states (); 9278 finish_bundle_states ();
9212 if (ia64_tune == PROCESSOR_ITANIUM)
9213 {
9214 free (add_cycles);
9215 free (clocks);
9216 }
9217 free (stops_p); 9279 free (stops_p);
9218 stops_p = NULL; 9280 stops_p = NULL;
9219 emit_insn_group_barriers (dump_file); 9281 emit_insn_group_barriers (dump_file);
9220 9282
9221 ia64_final_schedule = 0; 9283 ia64_final_schedule = 0;
9235 int saw_stop = 0; 9297 int saw_stop = 0;
9236 9298
9237 insn = get_last_insn (); 9299 insn = get_last_insn ();
9238 if (! INSN_P (insn)) 9300 if (! INSN_P (insn))
9239 insn = prev_active_insn (insn); 9301 insn = prev_active_insn (insn);
9240 /* Skip over insns that expand to nothing. */ 9302 if (insn)
9241 while (GET_CODE (insn) == INSN && get_attr_empty (insn) == EMPTY_YES) 9303 {
9242 { 9304 /* Skip over insns that expand to nothing. */
9243 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE 9305 while (GET_CODE (insn) == INSN
9244 && XINT (PATTERN (insn), 1) == UNSPECV_INSN_GROUP_BARRIER) 9306 && get_attr_empty (insn) == EMPTY_YES)
9245 saw_stop = 1; 9307 {
9246 insn = prev_active_insn (insn); 9308 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
9247 } 9309 && XINT (PATTERN (insn), 1) == UNSPECV_INSN_GROUP_BARRIER)
9248 if (GET_CODE (insn) == CALL_INSN) 9310 saw_stop = 1;
9249 { 9311 insn = prev_active_insn (insn);
9250 if (! saw_stop) 9312 }
9251 emit_insn (gen_insn_group_barrier (GEN_INT (3))); 9313 if (GET_CODE (insn) == CALL_INSN)
9252 emit_insn (gen_break_f ()); 9314 {
9253 emit_insn (gen_insn_group_barrier (GEN_INT (3))); 9315 if (! saw_stop)
9316 emit_insn (gen_insn_group_barrier (GEN_INT (3)));
9317 emit_insn (gen_break_f ());
9318 emit_insn (gen_insn_group_barrier (GEN_INT (3)));
9319 }
9254 } 9320 }
9255 } 9321 }
9256 9322
9257 emit_predicate_relation_info (); 9323 emit_predicate_relation_info ();
9258 9324
9307 /* Return true if REGNO is used by the frame unwinder. */ 9373 /* Return true if REGNO is used by the frame unwinder. */
9308 9374
9309 int 9375 int
9310 ia64_eh_uses (int regno) 9376 ia64_eh_uses (int regno)
9311 { 9377 {
9312 enum ia64_frame_regs r; 9378 unsigned int r;
9313 9379
9314 if (! reload_completed) 9380 if (! reload_completed)
9315 return 0; 9381 return 0;
9316 9382
9317 if (regno == 0) 9383 if (regno == 0)
9409 } 9475 }
9410 9476
9411 /* Define the CFA after INSN with the steady-state definition. */ 9477 /* Define the CFA after INSN with the steady-state definition. */
9412 9478
9413 static void 9479 static void
9414 ia64_dwarf2out_def_steady_cfa (rtx insn) 9480 ia64_dwarf2out_def_steady_cfa (rtx insn, bool frame)
9415 { 9481 {
9416 rtx fp = frame_pointer_needed 9482 rtx fp = frame_pointer_needed
9417 ? hard_frame_pointer_rtx 9483 ? hard_frame_pointer_rtx
9418 : stack_pointer_rtx; 9484 : stack_pointer_rtx;
9485 const char *label = ia64_emit_deleted_label_after_insn (insn);
9486
9487 if (!frame)
9488 return;
9419 9489
9420 dwarf2out_def_cfa 9490 dwarf2out_def_cfa
9421 (ia64_emit_deleted_label_after_insn (insn), 9491 (label, REGNO (fp),
9422 REGNO (fp),
9423 ia64_initial_elimination_offset 9492 ia64_initial_elimination_offset
9424 (REGNO (arg_pointer_rtx), REGNO (fp)) 9493 (REGNO (arg_pointer_rtx), REGNO (fp))
9425 + ARG_POINTER_CFA_OFFSET (current_function_decl)); 9494 + ARG_POINTER_CFA_OFFSET (current_function_decl));
9426 } 9495 }
9427 9496
9510 { 9579 {
9511 gcc_assert (!frame_pointer_needed); 9580 gcc_assert (!frame_pointer_needed);
9512 if (unwind) 9581 if (unwind)
9513 fprintf (asm_out_file, "\t.fframe "HOST_WIDE_INT_PRINT_DEC"\n", 9582 fprintf (asm_out_file, "\t.fframe "HOST_WIDE_INT_PRINT_DEC"\n",
9514 -INTVAL (op1)); 9583 -INTVAL (op1));
9515 if (frame) 9584 ia64_dwarf2out_def_steady_cfa (insn, frame);
9516 ia64_dwarf2out_def_steady_cfa (insn);
9517 } 9585 }
9518 else 9586 else
9519 process_epilogue (asm_out_file, insn, unwind, frame); 9587 process_epilogue (asm_out_file, insn, unwind, frame);
9520 } 9588 }
9521 else 9589 else
9569 gcc_assert (dest_regno == HARD_FRAME_POINTER_REGNUM 9637 gcc_assert (dest_regno == HARD_FRAME_POINTER_REGNUM
9570 && frame_pointer_needed); 9638 && frame_pointer_needed);
9571 if (unwind) 9639 if (unwind)
9572 fprintf (asm_out_file, "\t.vframe r%d\n", 9640 fprintf (asm_out_file, "\t.vframe r%d\n",
9573 ia64_dbx_register_number (dest_regno)); 9641 ia64_dbx_register_number (dest_regno));
9574 if (frame) 9642 ia64_dwarf2out_def_steady_cfa (insn, frame);
9575 ia64_dwarf2out_def_steady_cfa (insn);
9576 return 1; 9643 return 1;
9577 9644
9578 default: 9645 default:
9579 /* Everything else should indicate being stored to memory. */ 9646 /* Everything else should indicate being stored to memory. */
9580 gcc_unreachable (); 9647 gcc_unreachable ();
9715 { 9782 {
9716 fprintf (asm_out_file, "\t.body\n"); 9783 fprintf (asm_out_file, "\t.body\n");
9717 fprintf (asm_out_file, "\t.copy_state %d\n", 9784 fprintf (asm_out_file, "\t.copy_state %d\n",
9718 cfun->machine->state_num); 9785 cfun->machine->state_num);
9719 } 9786 }
9720 if (IA64_CHANGE_CFA_IN_EPILOGUE && frame) 9787 if (IA64_CHANGE_CFA_IN_EPILOGUE)
9721 ia64_dwarf2out_def_steady_cfa (insn); 9788 ia64_dwarf2out_def_steady_cfa (insn, frame);
9722 need_copy_state = false; 9789 need_copy_state = false;
9723 } 9790 }
9724 } 9791 }
9725 9792
9726 if (GET_CODE (insn) == NOTE || ! RTX_FRAME_RELATED_P (insn)) 9793 if (GET_CODE (insn) == NOTE || ! RTX_FRAME_RELATED_P (insn))
9762 { 9829 {
9763 IA64_BUILTIN_BSP, 9830 IA64_BUILTIN_BSP,
9764 IA64_BUILTIN_COPYSIGNQ, 9831 IA64_BUILTIN_COPYSIGNQ,
9765 IA64_BUILTIN_FABSQ, 9832 IA64_BUILTIN_FABSQ,
9766 IA64_BUILTIN_FLUSHRS, 9833 IA64_BUILTIN_FLUSHRS,
9767 IA64_BUILTIN_INFQ 9834 IA64_BUILTIN_INFQ,
9835 IA64_BUILTIN_HUGE_VALQ
9768 }; 9836 };
9769 9837
9770 void 9838 void
9771 ia64_init_builtins (void) 9839 ia64_init_builtins (void)
9772 { 9840 {
9797 9865
9798 /* TFmode support builtins. */ 9866 /* TFmode support builtins. */
9799 ftype = build_function_type (float128_type, void_list_node); 9867 ftype = build_function_type (float128_type, void_list_node);
9800 add_builtin_function ("__builtin_infq", ftype, 9868 add_builtin_function ("__builtin_infq", ftype,
9801 IA64_BUILTIN_INFQ, BUILT_IN_MD, 9869 IA64_BUILTIN_INFQ, BUILT_IN_MD,
9870 NULL, NULL_TREE);
9871
9872 add_builtin_function ("__builtin_huge_valq", ftype,
9873 IA64_BUILTIN_HUGE_VALQ, BUILT_IN_MD,
9802 NULL, NULL_TREE); 9874 NULL, NULL_TREE);
9803 9875
9804 ftype = build_function_type_list (float128_type, 9876 ftype = build_function_type_list (float128_type,
9805 float128_type, 9877 float128_type,
9806 NULL_TREE); 9878 NULL_TREE);
9820 } 9892 }
9821 else 9893 else
9822 /* Under HPUX, this is a synonym for "long double". */ 9894 /* Under HPUX, this is a synonym for "long double". */
9823 (*lang_hooks.types.register_builtin_type) (long_double_type_node, 9895 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
9824 "__float128"); 9896 "__float128");
9897
9898 /* Fwrite on VMS is non-standard. */
9899 if (TARGET_ABI_OPEN_VMS)
9900 {
9901 implicit_built_in_decls[(int) BUILT_IN_FWRITE] = NULL_TREE;
9902 implicit_built_in_decls[(int) BUILT_IN_FWRITE_UNLOCKED] = NULL_TREE;
9903 }
9825 9904
9826 #define def_builtin(name, type, code) \ 9905 #define def_builtin(name, type, code) \
9827 add_builtin_function ((name), (type), (code), BUILT_IN_MD, \ 9906 add_builtin_function ((name), (type), (code), BUILT_IN_MD, \
9828 NULL, NULL_TREE) 9907 NULL, NULL_TREE)
9829 9908
9873 case IA64_BUILTIN_FLUSHRS: 9952 case IA64_BUILTIN_FLUSHRS:
9874 emit_insn (gen_flushrs ()); 9953 emit_insn (gen_flushrs ());
9875 return const0_rtx; 9954 return const0_rtx;
9876 9955
9877 case IA64_BUILTIN_INFQ: 9956 case IA64_BUILTIN_INFQ:
9957 case IA64_BUILTIN_HUGE_VALQ:
9878 { 9958 {
9879 REAL_VALUE_TYPE inf; 9959 REAL_VALUE_TYPE inf;
9880 rtx tmp; 9960 rtx tmp;
9881 9961
9882 real_inf (&inf); 9962 real_inf (&inf);
9931 { 10011 {
9932 /* maybe_assemble_visibility will return 1 if the assembler 10012 /* maybe_assemble_visibility will return 1 if the assembler
9933 visibility directive is output. */ 10013 visibility directive is output. */
9934 int need_visibility = ((*targetm.binds_local_p) (decl) 10014 int need_visibility = ((*targetm.binds_local_p) (decl)
9935 && maybe_assemble_visibility (decl)); 10015 && maybe_assemble_visibility (decl));
10016
10017 #ifdef DO_CRTL_NAMES
10018 DO_CRTL_NAMES;
10019 #endif
9936 10020
9937 /* GNU as does not need anything here, but the HP linker does 10021 /* GNU as does not need anything here, but the HP linker does
9938 need something for external functions. */ 10022 need something for external functions. */
9939 if ((TARGET_HPUX_LD || !TARGET_GNU_AS) 10023 if ((TARGET_HPUX_LD || !TARGET_GNU_AS)
9940 && TREE_CODE (decl) == FUNCTION_DECL) 10024 && TREE_CODE (decl) == FUNCTION_DECL)
10034 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL"); 10118 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
10035 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I"); 10119 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
10036 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L"); 10120 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
10037 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI"); 10121 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
10038 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL"); 10122 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
10123 abort_libfunc = init_one_libfunc ("decc$abort");
10124 memcmp_libfunc = init_one_libfunc ("decc$memcmp");
10125 #ifdef MEM_LIBFUNCS_INIT
10126 MEM_LIBFUNCS_INIT;
10127 #endif
10039 } 10128 }
10040 10129
10041 /* Rename the TFmode libfuncs available from soft-fp in glibc using 10130 /* Rename the TFmode libfuncs available from soft-fp in glibc using
10042 the HPUX conventions. */ 10131 the HPUX conventions. */
10043 10132
10064 10153
10065 static void 10154 static void
10066 ia64_soft_fp_init_libfuncs (void) 10155 ia64_soft_fp_init_libfuncs (void)
10067 { 10156 {
10068 } 10157 }
10158
10159 static bool
10160 ia64_vms_valid_pointer_mode (enum machine_mode mode)
10161 {
10162 return (mode == SImode || mode == DImode);
10163 }
10069 10164
10070 /* For HPUX, it is illegal to have relocations in shared segments. */ 10165 /* For HPUX, it is illegal to have relocations in shared segments. */
10071 10166
10072 static int 10167 static int
10073 ia64_hpux_reloc_rw_mask (void) 10168 ia64_hpux_reloc_rw_mask (void)
10112 || strncmp (name, ".gnu.linkonce.s2.", 17) == 0 10207 || strncmp (name, ".gnu.linkonce.s2.", 17) == 0
10113 || strcmp (name, ".sbss") == 0 10208 || strcmp (name, ".sbss") == 0
10114 || strncmp (name, ".sbss.", 6) == 0 10209 || strncmp (name, ".sbss.", 6) == 0
10115 || strncmp (name, ".gnu.linkonce.sb.", 17) == 0) 10210 || strncmp (name, ".gnu.linkonce.sb.", 17) == 0)
10116 flags = SECTION_SMALL; 10211 flags = SECTION_SMALL;
10212
10213 #if TARGET_ABI_OPEN_VMS
10214 if (decl && DECL_ATTRIBUTES (decl)
10215 && lookup_attribute ("common_object", DECL_ATTRIBUTES (decl)))
10216 flags |= SECTION_VMS_OVERLAY;
10217 #endif
10117 10218
10118 flags |= default_section_type_flags (decl, name, reloc); 10219 flags |= default_section_type_flags (decl, name, reloc);
10119 return flags; 10220 return flags;
10120 } 10221 }
10121 10222
10278 insn = get_insns (); 10379 insn = get_insns ();
10279 shorten_branches (insn); 10380 shorten_branches (insn);
10280 final_start_function (insn, file, 1); 10381 final_start_function (insn, file, 1);
10281 final (insn, file, 1); 10382 final (insn, file, 1);
10282 final_end_function (); 10383 final_end_function ();
10283 free_after_compilation (cfun);
10284 10384
10285 reload_completed = 0; 10385 reload_completed = 0;
10286 epilogue_completed = 0; 10386 epilogue_completed = 0;
10287 } 10387 }
10288 10388
10290 10390
10291 static rtx 10391 static rtx
10292 ia64_struct_value_rtx (tree fntype, 10392 ia64_struct_value_rtx (tree fntype,
10293 int incoming ATTRIBUTE_UNUSED) 10393 int incoming ATTRIBUTE_UNUSED)
10294 { 10394 {
10295 if (fntype && ia64_struct_retval_addr_is_first_parm_p (fntype)) 10395 if (TARGET_ABI_OPEN_VMS ||
10396 (fntype && ia64_struct_retval_addr_is_first_parm_p (fntype)))
10296 return NULL_RTX; 10397 return NULL_RTX;
10297 return gen_rtx_REG (Pmode, GR_REG (8)); 10398 return gen_rtx_REG (Pmode, GR_REG (8));
10298 } 10399 }
10299 10400
10300 static bool 10401 static bool
10555 return XFmode; 10656 return XFmode;
10556 10657
10557 return VOIDmode; 10658 return VOIDmode;
10558 } 10659 }
10559 10660
10661 static enum machine_mode
10662 ia64_promote_function_mode (const_tree type,
10663 enum machine_mode mode,
10664 int *punsignedp,
10665 const_tree funtype,
10666 int for_return)
10667 {
10668 /* Special processing required for OpenVMS ... */
10669
10670 if (!TARGET_ABI_OPEN_VMS)
10671 return default_promote_function_mode(type, mode, punsignedp, funtype,
10672 for_return);
10673
10674 /* HP OpenVMS Calling Standard dated June, 2004, that describes
10675 HP OpenVMS I64 Version 8.2EFT,
10676 chapter 4 "OpenVMS I64 Conventions"
10677 section 4.7 "Procedure Linkage"
10678 subsection 4.7.5.2, "Normal Register Parameters"
10679
10680 "Unsigned integral (except unsigned 32-bit), set, and VAX floating-point
10681 values passed in registers are zero-filled; signed integral values as
10682 well as unsigned 32-bit integral values are sign-extended to 64 bits.
10683 For all other types passed in the general registers, unused bits are
10684 undefined." */
10685
10686 if (!AGGREGATE_TYPE_P (type)
10687 && GET_MODE_CLASS (mode) == MODE_INT
10688 && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
10689 {
10690 if (mode == SImode)
10691 *punsignedp = 0;
10692 return DImode;
10693 }
10694 else
10695 return promote_mode (type, mode, punsignedp);
10696 }
10697
10698 static GTY(()) rtx ia64_dconst_0_5_rtx;
10699
10700 rtx
10701 ia64_dconst_0_5 (void)
10702 {
10703 if (! ia64_dconst_0_5_rtx)
10704 {
10705 REAL_VALUE_TYPE rv;
10706 real_from_string (&rv, "0.5");
10707 ia64_dconst_0_5_rtx = const_double_from_real_value (rv, DFmode);
10708 }
10709 return ia64_dconst_0_5_rtx;
10710 }
10711
10712 static GTY(()) rtx ia64_dconst_0_375_rtx;
10713
10714 rtx
10715 ia64_dconst_0_375 (void)
10716 {
10717 if (! ia64_dconst_0_375_rtx)
10718 {
10719 REAL_VALUE_TYPE rv;
10720 real_from_string (&rv, "0.375");
10721 ia64_dconst_0_375_rtx = const_double_from_real_value (rv, DFmode);
10722 }
10723 return ia64_dconst_0_375_rtx;
10724 }
10725
10726
10560 #include "gt-ia64.h" 10727 #include "gt-ia64.h"