view gcc/config/i386/i386.c @ 132:d34655255c78

update gcc-8.2
author mir3636
date Thu, 25 Oct 2018 10:21:07 +0900
parents ab0bcb71f44d 84e7813d76e9
children 351920fa3827
line wrap: on
line source

/* Subroutines used for code generation on IA-32.
   Copyright (C) 1988-2018 Free Software Foundation, Inc.

This file is part of GCC.

GCC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.

GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
GNU General Public License for more details.

You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3.  If not see
<http://www.gnu.org/licenses/>.  */

#define IN_TARGET_CODE 1

#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "backend.h"
#include "rtl.h"
#include "tree.h"
#include "memmodel.h"
#include "gimple.h"
#include "cfghooks.h"
#include "cfgloop.h"
#include "df.h"
#include "tm_p.h"
#include "stringpool.h"
#include "expmed.h"
#include "optabs.h"
#include "regs.h"
#include "emit-rtl.h"
#include "recog.h"
#include "cgraph.h"
#include "diagnostic.h"
#include "cfgbuild.h"
#include "alias.h"
#include "fold-const.h"
#include "attribs.h"
#include "calls.h"
#include "stor-layout.h"
#include "varasm.h"
#include "output.h"
#include "insn-attr.h"
#include "flags.h"
#include "except.h"
#include "explow.h"
#include "expr.h"
#include "cfgrtl.h"
#include "common/common-target.h"
#include "langhooks.h"
#include "reload.h"
#include "gimplify.h"
#include "dwarf2.h"
#include "tm-constrs.h"
#include "params.h"
#include "cselib.h"
#include "sched-int.h"
#include "opts.h"
#include "tree-pass.h"
#include "context.h"
#include "pass_manager.h"
#include "target-globals.h"
#include "gimple-iterator.h"
#include "tree-vectorizer.h"
#include "shrink-wrap.h"
#include "builtins.h"
#include "rtl-iter.h"
#include "tree-iterator.h"
#include "dbgcnt.h"
#include "case-cfn-macros.h"
#include "dojump.h"
#include "fold-const-call.h"
#include "tree-vrp.h"
#include "tree-ssanames.h"
#include "selftest.h"
#include "selftest-rtl.h"
#include "print-rtl.h"
#include "intl.h"
#include "ifcvt.h"
#include "symbol-summary.h"
#include "ipa-prop.h"
#include "ipa-fnsummary.h"
#include "wide-int-bitmask.h"
#include "tree-vector-builder.h"
#include "debug.h"
#include "dwarf2out.h"

/* This file should be included last.  */
#include "target-def.h"

#include "x86-tune-costs.h"

static rtx legitimize_dllimport_symbol (rtx, bool);
static rtx legitimize_pe_coff_extern_decl (rtx, bool);
static rtx legitimize_pe_coff_symbol (rtx, bool);
static void ix86_print_operand_address_as (FILE *, rtx, addr_space_t, bool);
static bool ix86_save_reg (unsigned int, bool, bool);
static bool ix86_function_naked (const_tree);
static bool ix86_notrack_prefixed_insn_p (rtx);
static void ix86_emit_restore_reg_using_pop (rtx);


#ifndef CHECK_STACK_LIMIT
#define CHECK_STACK_LIMIT (-1)
#endif

/* Return index of given mode in mult and division cost tables.  */
#define MODE_INDEX(mode)					\
  ((mode) == QImode ? 0						\
   : (mode) == HImode ? 1					\
   : (mode) == SImode ? 2					\
   : (mode) == DImode ? 3					\
   : 4)


/* Set by -mtune.  */
const struct processor_costs *ix86_tune_cost = NULL;

/* Set by -mtune or -Os.  */
const struct processor_costs *ix86_cost = NULL;

/* Processor feature/optimization bitmasks.  */
#define m_386 (HOST_WIDE_INT_1U<<PROCESSOR_I386)
#define m_486 (HOST_WIDE_INT_1U<<PROCESSOR_I486)
#define m_PENT (HOST_WIDE_INT_1U<<PROCESSOR_PENTIUM)
#define m_LAKEMONT (HOST_WIDE_INT_1U<<PROCESSOR_LAKEMONT)
#define m_PPRO (HOST_WIDE_INT_1U<<PROCESSOR_PENTIUMPRO)
#define m_PENT4 (HOST_WIDE_INT_1U<<PROCESSOR_PENTIUM4)
#define m_NOCONA (HOST_WIDE_INT_1U<<PROCESSOR_NOCONA)
#define m_P4_NOCONA (m_PENT4 | m_NOCONA)
#define m_CORE2 (HOST_WIDE_INT_1U<<PROCESSOR_CORE2)
#define m_NEHALEM (HOST_WIDE_INT_1U<<PROCESSOR_NEHALEM)
#define m_SANDYBRIDGE (HOST_WIDE_INT_1U<<PROCESSOR_SANDYBRIDGE)
#define m_HASWELL (HOST_WIDE_INT_1U<<PROCESSOR_HASWELL)
#define m_BONNELL (HOST_WIDE_INT_1U<<PROCESSOR_BONNELL)
#define m_SILVERMONT (HOST_WIDE_INT_1U<<PROCESSOR_SILVERMONT)
#define m_KNL (HOST_WIDE_INT_1U<<PROCESSOR_KNL)
#define m_KNM (HOST_WIDE_INT_1U<<PROCESSOR_KNM)
#define m_SKYLAKE (HOST_WIDE_INT_1U<<PROCESSOR_SKYLAKE)
#define m_SKYLAKE_AVX512 (HOST_WIDE_INT_1U<<PROCESSOR_SKYLAKE_AVX512)
#define m_CANNONLAKE (HOST_WIDE_INT_1U<<PROCESSOR_CANNONLAKE)
#define m_ICELAKE_CLIENT (HOST_WIDE_INT_1U<<PROCESSOR_ICELAKE_CLIENT)
#define m_ICELAKE_SERVER (HOST_WIDE_INT_1U<<PROCESSOR_ICELAKE_SERVER)
#define m_CORE_AVX512 (m_SKYLAKE_AVX512 | m_CANNONLAKE \
		       | m_ICELAKE_CLIENT | m_ICELAKE_SERVER)
#define m_CORE_AVX2 (m_HASWELL | m_SKYLAKE | m_CORE_AVX512)
#define m_CORE_ALL (m_CORE2 | m_NEHALEM  | m_SANDYBRIDGE | m_CORE_AVX2)
#define m_GOLDMONT (HOST_WIDE_INT_1U<<PROCESSOR_GOLDMONT)
#define m_GOLDMONT_PLUS (HOST_WIDE_INT_1U<<PROCESSOR_GOLDMONT_PLUS)
#define m_TREMONT (HOST_WIDE_INT_1U<<PROCESSOR_TREMONT)
#define m_INTEL (HOST_WIDE_INT_1U<<PROCESSOR_INTEL)

#define m_GEODE (HOST_WIDE_INT_1U<<PROCESSOR_GEODE)
#define m_K6 (HOST_WIDE_INT_1U<<PROCESSOR_K6)
#define m_K6_GEODE (m_K6 | m_GEODE)
#define m_K8 (HOST_WIDE_INT_1U<<PROCESSOR_K8)
#define m_ATHLON (HOST_WIDE_INT_1U<<PROCESSOR_ATHLON)
#define m_ATHLON_K8 (m_K8 | m_ATHLON)
#define m_AMDFAM10 (HOST_WIDE_INT_1U<<PROCESSOR_AMDFAM10)
#define m_BDVER1 (HOST_WIDE_INT_1U<<PROCESSOR_BDVER1)
#define m_BDVER2 (HOST_WIDE_INT_1U<<PROCESSOR_BDVER2)
#define m_BDVER3 (HOST_WIDE_INT_1U<<PROCESSOR_BDVER3)
#define m_BDVER4 (HOST_WIDE_INT_1U<<PROCESSOR_BDVER4)
#define m_ZNVER1 (HOST_WIDE_INT_1U<<PROCESSOR_ZNVER1)
#define m_BTVER1 (HOST_WIDE_INT_1U<<PROCESSOR_BTVER1)
#define m_BTVER2 (HOST_WIDE_INT_1U<<PROCESSOR_BTVER2)
#define m_BDVER	(m_BDVER1 | m_BDVER2 | m_BDVER3 | m_BDVER4)
#define m_BTVER (m_BTVER1 | m_BTVER2)
#define m_AMD_MULTIPLE (m_ATHLON_K8 | m_AMDFAM10 | m_BDVER | m_BTVER \
			| m_ZNVER1)

#define m_GENERIC (HOST_WIDE_INT_1U<<PROCESSOR_GENERIC)

const char* ix86_tune_feature_names[X86_TUNE_LAST] = {
#undef DEF_TUNE
#define DEF_TUNE(tune, name, selector) name,
#include "x86-tune.def"
#undef DEF_TUNE
};

/* Feature tests against the various tunings.  */
unsigned char ix86_tune_features[X86_TUNE_LAST];

/* Feature tests against the various tunings used to create ix86_tune_features
   based on the processor mask.  */
static unsigned HOST_WIDE_INT initial_ix86_tune_features[X86_TUNE_LAST] = {
#undef DEF_TUNE
#define DEF_TUNE(tune, name, selector) selector,
#include "x86-tune.def"
#undef DEF_TUNE
};

/* Feature tests against the various architecture variations.  */
unsigned char ix86_arch_features[X86_ARCH_LAST];

/* Feature tests against the various architecture variations, used to create
   ix86_arch_features based on the processor mask.  */
static unsigned HOST_WIDE_INT initial_ix86_arch_features[X86_ARCH_LAST] = {
  /* X86_ARCH_CMOV: Conditional move was added for pentiumpro.  */
  ~(m_386 | m_486 | m_PENT | m_LAKEMONT | m_K6),

  /* X86_ARCH_CMPXCHG: Compare and exchange was added for 80486.  */
  ~m_386,

  /* X86_ARCH_CMPXCHG8B: Compare and exchange 8 bytes was added for pentium. */
  ~(m_386 | m_486),

  /* X86_ARCH_XADD: Exchange and add was added for 80486.  */
  ~m_386,

  /* X86_ARCH_BSWAP: Byteswap was added for 80486.  */
  ~m_386,
};

/* In case the average insn count for single function invocation is
   lower than this constant, emit fast (but longer) prologue and
   epilogue code.  */
#define FAST_PROLOGUE_INSN_COUNT 20

/* Names for 8 (low), 8 (high), and 16-bit registers, respectively.  */
static const char *const qi_reg_name[] = QI_REGISTER_NAMES;
static const char *const qi_high_reg_name[] = QI_HIGH_REGISTER_NAMES;
static const char *const hi_reg_name[] = HI_REGISTER_NAMES;

/* Array of the smallest class containing reg number REGNO, indexed by
   REGNO.  Used by REGNO_REG_CLASS in i386.h.  */

enum reg_class const regclass_map[FIRST_PSEUDO_REGISTER] =
{
  /* ax, dx, cx, bx */
  AREG, DREG, CREG, BREG,
  /* si, di, bp, sp */
  SIREG, DIREG, NON_Q_REGS, NON_Q_REGS,
  /* FP registers */
  FP_TOP_REG, FP_SECOND_REG, FLOAT_REGS, FLOAT_REGS,
  FLOAT_REGS, FLOAT_REGS, FLOAT_REGS, FLOAT_REGS,
  /* arg pointer, flags, fpsr, frame */
  NON_Q_REGS, NO_REGS, NO_REGS, NON_Q_REGS,
  /* SSE registers */
  SSE_FIRST_REG, SSE_REGS, SSE_REGS, SSE_REGS,
  SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
  /* MMX registers */
  MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS,
  MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS,
  /* REX registers */
  GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
  GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
  /* SSE REX registers */
  SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
  SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
  /* AVX-512 SSE registers */
  ALL_SSE_REGS, ALL_SSE_REGS, ALL_SSE_REGS, ALL_SSE_REGS,
  ALL_SSE_REGS, ALL_SSE_REGS, ALL_SSE_REGS, ALL_SSE_REGS,
  ALL_SSE_REGS, ALL_SSE_REGS, ALL_SSE_REGS, ALL_SSE_REGS,
  ALL_SSE_REGS, ALL_SSE_REGS, ALL_SSE_REGS, ALL_SSE_REGS,
  /* Mask registers.  */
  ALL_MASK_REGS, MASK_REGS, MASK_REGS, MASK_REGS,
  MASK_REGS, MASK_REGS, MASK_REGS, MASK_REGS
};

/* The "default" register map used in 32bit mode.  */

int const dbx_register_map[FIRST_PSEUDO_REGISTER] =
{
  0, 2, 1, 3, 6, 7, 4, 5,		/* general regs */
  12, 13, 14, 15, 16, 17, 18, 19,	/* fp regs */
  -1, -1, -1, -1,			/* arg, flags, fpsr, frame */
  21, 22, 23, 24, 25, 26, 27, 28,	/* SSE */
  29, 30, 31, 32, 33, 34, 35, 36,       /* MMX */
  -1, -1, -1, -1, -1, -1, -1, -1,	/* extended integer registers */
  -1, -1, -1, -1, -1, -1, -1, -1,	/* extended SSE registers */
  -1, -1, -1, -1, -1, -1, -1, -1,       /* AVX-512 registers 16-23*/
  -1, -1, -1, -1, -1, -1, -1, -1,       /* AVX-512 registers 24-31*/
  93, 94, 95, 96, 97, 98, 99, 100       /* Mask registers */
};

/* The "default" register map used in 64bit mode.  */

int const dbx64_register_map[FIRST_PSEUDO_REGISTER] =
{
  0, 1, 2, 3, 4, 5, 6, 7,		/* general regs */
  33, 34, 35, 36, 37, 38, 39, 40,	/* fp regs */
  -1, -1, -1, -1,			/* arg, flags, fpsr, frame */
  17, 18, 19, 20, 21, 22, 23, 24,	/* SSE */
  41, 42, 43, 44, 45, 46, 47, 48,       /* MMX */
  8,9,10,11,12,13,14,15,		/* extended integer registers */
  25, 26, 27, 28, 29, 30, 31, 32,	/* extended SSE registers */
  67, 68, 69, 70, 71, 72, 73, 74,       /* AVX-512 registers 16-23 */
  75, 76, 77, 78, 79, 80, 81, 82,       /* AVX-512 registers 24-31 */
  118, 119, 120, 121, 122, 123, 124, 125 /* Mask registers */
};

/* Define the register numbers to be used in Dwarf debugging information.
   The SVR4 reference port C compiler uses the following register numbers
   in its Dwarf output code:
	0 for %eax (gcc regno = 0)
	1 for %ecx (gcc regno = 2)
	2 for %edx (gcc regno = 1)
	3 for %ebx (gcc regno = 3)
	4 for %esp (gcc regno = 7)
	5 for %ebp (gcc regno = 6)
	6 for %esi (gcc regno = 4)
	7 for %edi (gcc regno = 5)
   The following three DWARF register numbers are never generated by
   the SVR4 C compiler or by the GNU compilers, but SDB on x86/svr4
   believed these numbers have these meanings.
	8  for %eip    (no gcc equivalent)
	9  for %eflags (gcc regno = 17)
	10 for %trapno (no gcc equivalent)
   It is not at all clear how we should number the FP stack registers
   for the x86 architecture.  If the version of SDB on x86/svr4 were
   a bit less brain dead with respect to floating-point then we would
   have a precedent to follow with respect to DWARF register numbers
   for x86 FP registers, but the SDB on x86/svr4 was so completely
   broken with respect to FP registers that it is hardly worth thinking
   of it as something to strive for compatibility with.
   The version of x86/svr4 SDB I had does (partially)
   seem to believe that DWARF register number 11 is associated with
   the x86 register %st(0), but that's about all.  Higher DWARF
   register numbers don't seem to be associated with anything in
   particular, and even for DWARF regno 11, SDB only seemed to under-
   stand that it should say that a variable lives in %st(0) (when
   asked via an `=' command) if we said it was in DWARF regno 11,
   but SDB still printed garbage when asked for the value of the
   variable in question (via a `/' command).
   (Also note that the labels SDB printed for various FP stack regs
   when doing an `x' command were all wrong.)
   Note that these problems generally don't affect the native SVR4
   C compiler because it doesn't allow the use of -O with -g and
   because when it is *not* optimizing, it allocates a memory
   location for each floating-point variable, and the memory
   location is what gets described in the DWARF AT_location
   attribute for the variable in question.
   Regardless of the severe mental illness of the x86/svr4 SDB, we
   do something sensible here and we use the following DWARF
   register numbers.  Note that these are all stack-top-relative
   numbers.
	11 for %st(0) (gcc regno = 8)
	12 for %st(1) (gcc regno = 9)
	13 for %st(2) (gcc regno = 10)
	14 for %st(3) (gcc regno = 11)
	15 for %st(4) (gcc regno = 12)
	16 for %st(5) (gcc regno = 13)
	17 for %st(6) (gcc regno = 14)
	18 for %st(7) (gcc regno = 15)
*/
int const svr4_dbx_register_map[FIRST_PSEUDO_REGISTER] =
{
  0, 2, 1, 3, 6, 7, 5, 4,		/* general regs */
  11, 12, 13, 14, 15, 16, 17, 18,	/* fp regs */
  -1, 9, -1, -1,			/* arg, flags, fpsr, frame */
  21, 22, 23, 24, 25, 26, 27, 28,	/* SSE registers */
  29, 30, 31, 32, 33, 34, 35, 36,	/* MMX registers */
  -1, -1, -1, -1, -1, -1, -1, -1,	/* extended integer registers */
  -1, -1, -1, -1, -1, -1, -1, -1,	/* extended SSE registers */
  -1, -1, -1, -1, -1, -1, -1, -1,       /* AVX-512 registers 16-23*/
  -1, -1, -1, -1, -1, -1, -1, -1,       /* AVX-512 registers 24-31*/
  93, 94, 95, 96, 97, 98, 99, 100       /* Mask registers */
};

/* Define parameter passing and return registers.  */

static int const x86_64_int_parameter_registers[6] =
{
  DI_REG, SI_REG, DX_REG, CX_REG, R8_REG, R9_REG
};

static int const x86_64_ms_abi_int_parameter_registers[4] =
{
  CX_REG, DX_REG, R8_REG, R9_REG
};

static int const x86_64_int_return_registers[4] =
{
  AX_REG, DX_REG, DI_REG, SI_REG
};

/* Additional registers that are clobbered by SYSV calls.  */

#define NUM_X86_64_MS_CLOBBERED_REGS 12
static int const x86_64_ms_sysv_extra_clobbered_registers
		 [NUM_X86_64_MS_CLOBBERED_REGS] =
{
  SI_REG, DI_REG,
  XMM6_REG, XMM7_REG,
  XMM8_REG, XMM9_REG, XMM10_REG, XMM11_REG,
  XMM12_REG, XMM13_REG, XMM14_REG, XMM15_REG
};

enum xlogue_stub {
  XLOGUE_STUB_SAVE,
  XLOGUE_STUB_RESTORE,
  XLOGUE_STUB_RESTORE_TAIL,
  XLOGUE_STUB_SAVE_HFP,
  XLOGUE_STUB_RESTORE_HFP,
  XLOGUE_STUB_RESTORE_HFP_TAIL,

  XLOGUE_STUB_COUNT
};

enum xlogue_stub_sets {
  XLOGUE_SET_ALIGNED,
  XLOGUE_SET_ALIGNED_PLUS_8,
  XLOGUE_SET_HFP_ALIGNED_OR_REALIGN,
  XLOGUE_SET_HFP_ALIGNED_PLUS_8,

  XLOGUE_SET_COUNT
};

/* Register save/restore layout used by out-of-line stubs.  */
class xlogue_layout {
public:
  struct reginfo
  {
    unsigned regno;
    HOST_WIDE_INT offset;	/* Offset used by stub base pointer (rax or
				   rsi) to where each register is stored.  */
  };

  unsigned get_nregs () const			{return m_nregs;}
  HOST_WIDE_INT get_stack_align_off_in () const	{return m_stack_align_off_in;}

  const reginfo &get_reginfo (unsigned reg) const
  {
    gcc_assert (reg < m_nregs);
    return m_regs[reg];
  }

  static const char *get_stub_name (enum xlogue_stub stub,
				    unsigned n_extra_args);

  /* Returns an rtx for the stub's symbol based upon
       1.) the specified stub (save, restore or restore_ret) and
       2.) the value of cfun->machine->call_ms2sysv_extra_regs and
       3.) rather or not stack alignment is being performed.  */
  static rtx get_stub_rtx (enum xlogue_stub stub);

  /* Returns the amount of stack space (including padding) that the stub
     needs to store registers based upon data in the machine_function.  */
  HOST_WIDE_INT get_stack_space_used () const
  {
    const struct machine_function *m = cfun->machine;
    unsigned last_reg = m->call_ms2sysv_extra_regs + MIN_REGS - 1;

    gcc_assert (m->call_ms2sysv_extra_regs <= MAX_EXTRA_REGS);
    return m_regs[last_reg].offset + STUB_INDEX_OFFSET;
  }

  /* Returns the offset for the base pointer used by the stub.  */
  HOST_WIDE_INT get_stub_ptr_offset () const
  {
    return STUB_INDEX_OFFSET + m_stack_align_off_in;
  }

  static const struct xlogue_layout &get_instance ();
  static unsigned count_stub_managed_regs ();
  static bool is_stub_managed_reg (unsigned regno, unsigned count);

  static const HOST_WIDE_INT STUB_INDEX_OFFSET = 0x70;
  static const unsigned MIN_REGS = NUM_X86_64_MS_CLOBBERED_REGS;
  static const unsigned MAX_REGS = 18;
  static const unsigned MAX_EXTRA_REGS = MAX_REGS - MIN_REGS;
  static const unsigned VARIANT_COUNT = MAX_EXTRA_REGS + 1;
  static const unsigned STUB_NAME_MAX_LEN = 20;
  static const char * const STUB_BASE_NAMES[XLOGUE_STUB_COUNT];
  static const unsigned REG_ORDER[MAX_REGS];
  static const unsigned REG_ORDER_REALIGN[MAX_REGS];

private:
  xlogue_layout ();
  xlogue_layout (HOST_WIDE_INT stack_align_off_in, bool hfp);
  xlogue_layout (const xlogue_layout &);

  /* True if hard frame pointer is used.  */
  bool m_hfp;

  /* Max number of register this layout manages.  */
  unsigned m_nregs;

  /* Incoming offset from 16-byte alignment.  */
  HOST_WIDE_INT m_stack_align_off_in;

  /* Register order and offsets.  */
  struct reginfo m_regs[MAX_REGS];

  /* Lazy-inited cache of symbol names for stubs.  */
  static char s_stub_names[2][XLOGUE_STUB_COUNT][VARIANT_COUNT]
			  [STUB_NAME_MAX_LEN];

  static const xlogue_layout s_instances[XLOGUE_SET_COUNT];
};

const char * const xlogue_layout::STUB_BASE_NAMES[XLOGUE_STUB_COUNT] = {
  "savms64",
  "resms64",
  "resms64x",
  "savms64f",
  "resms64f",
  "resms64fx"
};

const unsigned xlogue_layout::REG_ORDER[xlogue_layout::MAX_REGS] = {
/* The below offset values are where each register is stored for the layout
   relative to incoming stack pointer.  The value of each m_regs[].offset will
   be relative to the incoming base pointer (rax or rsi) used by the stub.

    s_instances:   0		1		2		3
    Offset:					realigned or	aligned + 8
    Register	   aligned	aligned + 8	aligned w/HFP	w/HFP	*/
    XMM15_REG,	/* 0x10		0x18		0x10		0x18	*/
    XMM14_REG,	/* 0x20		0x28		0x20		0x28	*/
    XMM13_REG,	/* 0x30		0x38		0x30		0x38	*/
    XMM12_REG,	/* 0x40		0x48		0x40		0x48	*/
    XMM11_REG,	/* 0x50		0x58		0x50		0x58	*/
    XMM10_REG,	/* 0x60		0x68		0x60		0x68	*/
    XMM9_REG,	/* 0x70		0x78		0x70		0x78	*/
    XMM8_REG,	/* 0x80		0x88		0x80		0x88	*/
    XMM7_REG,	/* 0x90		0x98		0x90		0x98	*/
    XMM6_REG,	/* 0xa0		0xa8		0xa0		0xa8	*/
    SI_REG,	/* 0xa8		0xb0		0xa8		0xb0	*/
    DI_REG,	/* 0xb0		0xb8		0xb0		0xb8	*/
    BX_REG,	/* 0xb8		0xc0		0xb8		0xc0	*/
    BP_REG,	/* 0xc0		0xc8		N/A		N/A	*/
    R12_REG,	/* 0xc8		0xd0		0xc0		0xc8	*/
    R13_REG,	/* 0xd0		0xd8		0xc8		0xd0	*/
    R14_REG,	/* 0xd8		0xe0		0xd0		0xd8	*/
    R15_REG,	/* 0xe0		0xe8		0xd8		0xe0	*/
};

/* Instantiate static const values.  */
const HOST_WIDE_INT xlogue_layout::STUB_INDEX_OFFSET;
const unsigned xlogue_layout::MIN_REGS;
const unsigned xlogue_layout::MAX_REGS;
const unsigned xlogue_layout::MAX_EXTRA_REGS;
const unsigned xlogue_layout::VARIANT_COUNT;
const unsigned xlogue_layout::STUB_NAME_MAX_LEN;

/* Initialize xlogue_layout::s_stub_names to zero.  */
char xlogue_layout::s_stub_names[2][XLOGUE_STUB_COUNT][VARIANT_COUNT]
				[STUB_NAME_MAX_LEN];

/* Instantiates all xlogue_layout instances.  */
const xlogue_layout xlogue_layout::s_instances[XLOGUE_SET_COUNT] = {
  xlogue_layout (0, false),
  xlogue_layout (8, false),
  xlogue_layout (0, true),
  xlogue_layout (8, true)
};

/* Return an appropriate const instance of xlogue_layout based upon values
   in cfun->machine and crtl.  */
const struct xlogue_layout &
xlogue_layout::get_instance ()
{
  enum xlogue_stub_sets stub_set;
  bool aligned_plus_8 = cfun->machine->call_ms2sysv_pad_in;

  if (stack_realign_fp)
    stub_set = XLOGUE_SET_HFP_ALIGNED_OR_REALIGN;
  else if (frame_pointer_needed)
    stub_set = aligned_plus_8
	      ? XLOGUE_SET_HFP_ALIGNED_PLUS_8
	      : XLOGUE_SET_HFP_ALIGNED_OR_REALIGN;
  else
    stub_set = aligned_plus_8 ? XLOGUE_SET_ALIGNED_PLUS_8 : XLOGUE_SET_ALIGNED;

  return s_instances[stub_set];
}

/* Determine how many clobbered registers can be saved by the stub.
   Returns the count of registers the stub will save and restore.  */
unsigned
xlogue_layout::count_stub_managed_regs ()
{
  bool hfp = frame_pointer_needed || stack_realign_fp;
  unsigned i, count;
  unsigned regno;

  for (count = i = MIN_REGS; i < MAX_REGS; ++i)
    {
      regno = REG_ORDER[i];
      if (regno == BP_REG && hfp)
	continue;
      if (!ix86_save_reg (regno, false, false))
	break;
      ++count;
    }
  return count;
}

/* Determine if register REGNO is a stub managed register given the
   total COUNT of stub managed registers.  */
bool
xlogue_layout::is_stub_managed_reg (unsigned regno, unsigned count)
{
  bool hfp = frame_pointer_needed || stack_realign_fp;
  unsigned i;

  for (i = 0; i < count; ++i)
    {
      gcc_assert (i < MAX_REGS);
      if (REG_ORDER[i] == BP_REG && hfp)
	++count;
      else if (REG_ORDER[i] == regno)
	return true;
    }
  return false;
}

/* Constructor for xlogue_layout.  */
xlogue_layout::xlogue_layout (HOST_WIDE_INT stack_align_off_in, bool hfp)
  : m_hfp (hfp) , m_nregs (hfp ? 17 : 18),
    m_stack_align_off_in (stack_align_off_in)
{
  HOST_WIDE_INT offset = stack_align_off_in;
  unsigned i, j;

  for (i = j = 0; i < MAX_REGS; ++i)
    {
      unsigned regno = REG_ORDER[i];

      if (regno == BP_REG && hfp)
	continue;
      if (SSE_REGNO_P (regno))
	{
	  offset += 16;
	  /* Verify that SSE regs are always aligned.  */
	  gcc_assert (!((stack_align_off_in + offset) & 15));
	}
      else
	offset += 8;

      m_regs[j].regno    = regno;
      m_regs[j++].offset = offset - STUB_INDEX_OFFSET;
    }
  gcc_assert (j == m_nregs);
}

const char *
xlogue_layout::get_stub_name (enum xlogue_stub stub,
			      unsigned n_extra_regs)
{
  const int have_avx = TARGET_AVX;
  char *name = s_stub_names[!!have_avx][stub][n_extra_regs];

  /* Lazy init */
  if (!*name)
    {
      int res = snprintf (name, STUB_NAME_MAX_LEN, "__%s_%s_%u",
			  (have_avx ? "avx" : "sse"),
			  STUB_BASE_NAMES[stub],
			  MIN_REGS + n_extra_regs);
      gcc_checking_assert (res < (int)STUB_NAME_MAX_LEN);
    }

  return name;
}

/* Return rtx of a symbol ref for the entry point (based upon
   cfun->machine->call_ms2sysv_extra_regs) of the specified stub.  */
rtx
xlogue_layout::get_stub_rtx (enum xlogue_stub stub)
{
  const unsigned n_extra_regs = cfun->machine->call_ms2sysv_extra_regs;
  gcc_checking_assert (n_extra_regs <= MAX_EXTRA_REGS);
  gcc_assert (stub < XLOGUE_STUB_COUNT);
  gcc_assert (crtl->stack_realign_finalized);

  return gen_rtx_SYMBOL_REF (Pmode, get_stub_name (stub, n_extra_regs));
}

/* Define the structure for the machine field in struct function.  */

struct GTY(()) stack_local_entry {
  unsigned short mode;
  unsigned short n;
  rtx rtl;
  struct stack_local_entry *next;
};

/* Which cpu are we scheduling for.  */
enum attr_cpu ix86_schedule;

/* Which cpu are we optimizing for.  */
enum processor_type ix86_tune;

/* Which instruction set architecture to use.  */
enum processor_type ix86_arch;

/* True if processor has SSE prefetch instruction.  */
unsigned char x86_prefetch_sse;

/* -mstackrealign option */
static const char ix86_force_align_arg_pointer_string[]
  = "force_align_arg_pointer";

static rtx (*ix86_gen_leave) (void);
static rtx (*ix86_gen_add3) (rtx, rtx, rtx);
static rtx (*ix86_gen_sub3) (rtx, rtx, rtx);
static rtx (*ix86_gen_sub3_carry) (rtx, rtx, rtx, rtx, rtx);
static rtx (*ix86_gen_one_cmpl2) (rtx, rtx);
static rtx (*ix86_gen_monitor) (rtx, rtx, rtx);
static rtx (*ix86_gen_monitorx) (rtx, rtx, rtx);
static rtx (*ix86_gen_clzero) (rtx);
static rtx (*ix86_gen_andsp) (rtx, rtx, rtx);
static rtx (*ix86_gen_allocate_stack_worker) (rtx, rtx);
static rtx (*ix86_gen_adjust_stack_and_probe) (rtx, rtx, rtx);
static rtx (*ix86_gen_probe_stack_range) (rtx, rtx, rtx);
static rtx (*ix86_gen_tls_global_dynamic_64) (rtx, rtx, rtx);
static rtx (*ix86_gen_tls_local_dynamic_base_64) (rtx, rtx);

/* Preferred alignment for stack boundary in bits.  */
unsigned int ix86_preferred_stack_boundary;

/* Alignment for incoming stack boundary in bits specified at
   command line.  */
static unsigned int ix86_user_incoming_stack_boundary;

/* Default alignment for incoming stack boundary in bits.  */
static unsigned int ix86_default_incoming_stack_boundary;

/* Alignment for incoming stack boundary in bits.  */
unsigned int ix86_incoming_stack_boundary;

/* Calling abi specific va_list type nodes.  */
static GTY(()) tree sysv_va_list_type_node;
static GTY(()) tree ms_va_list_type_node;

/* Prefix built by ASM_GENERATE_INTERNAL_LABEL.  */
char internal_label_prefix[16];
int internal_label_prefix_len;

/* Fence to use after loop using movnt.  */
tree x86_mfence;

/* Register class used for passing given 64bit part of the argument.
   These represent classes as documented by the PS ABI, with the exception
   of SSESF, SSEDF classes, that are basically SSE class, just gcc will
   use SF or DFmode move instead of DImode to avoid reformatting penalties.

   Similarly we play games with INTEGERSI_CLASS to use cheaper SImode moves
   whenever possible (upper half does contain padding).  */
enum x86_64_reg_class
  {
    X86_64_NO_CLASS,
    X86_64_INTEGER_CLASS,
    X86_64_INTEGERSI_CLASS,
    X86_64_SSE_CLASS,
    X86_64_SSESF_CLASS,
    X86_64_SSEDF_CLASS,
    X86_64_SSEUP_CLASS,
    X86_64_X87_CLASS,
    X86_64_X87UP_CLASS,
    X86_64_COMPLEX_X87_CLASS,
    X86_64_MEMORY_CLASS
  };

#define MAX_CLASSES 8

/* Table of constants used by fldpi, fldln2, etc....  */
static REAL_VALUE_TYPE ext_80387_constants_table [5];
static bool ext_80387_constants_init;


static struct machine_function * ix86_init_machine_status (void);
static rtx ix86_function_value (const_tree, const_tree, bool);
static bool ix86_function_value_regno_p (const unsigned int);
static unsigned int ix86_function_arg_boundary (machine_mode,
						const_tree);
static rtx ix86_static_chain (const_tree, bool);
static int ix86_function_regparm (const_tree, const_tree);
static void ix86_compute_frame_layout (void);
static bool ix86_expand_vector_init_one_nonzero (bool, machine_mode,
						 rtx, rtx, int);
static void ix86_add_new_builtins (HOST_WIDE_INT, HOST_WIDE_INT);
static tree ix86_canonical_va_list_type (tree);
static void predict_jump (int);
static unsigned int split_stack_prologue_scratch_regno (void);
static bool i386_asm_output_addr_const_extra (FILE *, rtx);

enum ix86_function_specific_strings
{
  IX86_FUNCTION_SPECIFIC_ARCH,
  IX86_FUNCTION_SPECIFIC_TUNE,
  IX86_FUNCTION_SPECIFIC_MAX
};

static char *ix86_target_string (HOST_WIDE_INT, HOST_WIDE_INT, int, int,
				 const char *, const char *, enum fpmath_unit,
				 bool);
static void ix86_function_specific_save (struct cl_target_option *,
					 struct gcc_options *opts);
static void ix86_function_specific_restore (struct gcc_options *opts,
					    struct cl_target_option *);
static void ix86_function_specific_post_stream_in (struct cl_target_option *);
static void ix86_function_specific_print (FILE *, int,
					  struct cl_target_option *);
static bool ix86_valid_target_attribute_p (tree, tree, tree, int);
static bool ix86_valid_target_attribute_inner_p (tree, char *[],
						 struct gcc_options *,
						 struct gcc_options *,
						 struct gcc_options *);
static bool ix86_can_inline_p (tree, tree);
static void ix86_set_current_function (tree);
static unsigned int ix86_minimum_incoming_stack_boundary (bool);

static enum calling_abi ix86_function_abi (const_tree);


#ifndef SUBTARGET32_DEFAULT_CPU
#define SUBTARGET32_DEFAULT_CPU "i386"
#endif

/* Whether -mtune= or -march= were specified */
static int ix86_tune_defaulted;
static int ix86_arch_specified;

/* Vectorization library interface and handlers.  */
static tree (*ix86_veclib_handler) (combined_fn, tree, tree);

static tree ix86_veclibabi_svml (combined_fn, tree, tree);
static tree ix86_veclibabi_acml (combined_fn, tree, tree);

/* This table must be in sync with enum processor_type in i386.h.  */ 
static const struct processor_costs *processor_cost_table[PROCESSOR_max] =
{
  &generic_cost,
  &i386_cost,
  &i486_cost,
  &pentium_cost,
  &lakemont_cost,
  &pentiumpro_cost,
  &pentium4_cost,
  &nocona_cost,
  &core_cost,
  &core_cost,
  &core_cost,
  &core_cost,
  &atom_cost,
  &slm_cost,
  &slm_cost,
  &slm_cost,
  &slm_cost,
  &slm_cost,
  &slm_cost,
  &skylake_cost,
  &skylake_cost,
  &skylake_cost,
  &skylake_cost,
  &skylake_cost,
  &intel_cost,
  &geode_cost,
  &k6_cost,
  &athlon_cost,
  &k8_cost,
  &amdfam10_cost,
  &bdver_cost,
  &bdver_cost,
  &bdver_cost,
  &bdver_cost,
  &btver1_cost,
  &btver2_cost,
  &znver1_cost,
};

static unsigned int
rest_of_handle_insert_vzeroupper (void)
{
  int i;

  /* vzeroupper instructions are inserted immediately after reload to
     account for possible spills from 256bit or 512bit registers.  The pass
     reuses mode switching infrastructure by re-running mode insertion
     pass, so disable entities that have already been processed.  */
  for (i = 0; i < MAX_386_ENTITIES; i++)
    ix86_optimize_mode_switching[i] = 0;

  ix86_optimize_mode_switching[AVX_U128] = 1;

  /* Call optimize_mode_switching.  */
  g->get_passes ()->execute_pass_mode_switching ();
  return 0;
}

/* Return 1 if INSN uses or defines a hard register.
   Hard register uses in a memory address are ignored.
   Clobbers and flags definitions are ignored.  */

static bool
has_non_address_hard_reg (rtx_insn *insn)
{
  df_ref ref;
  FOR_EACH_INSN_DEF (ref, insn)
    if (HARD_REGISTER_P (DF_REF_REAL_REG (ref))
	&& !DF_REF_FLAGS_IS_SET (ref, DF_REF_MUST_CLOBBER)
	&& DF_REF_REGNO (ref) != FLAGS_REG)
      return true;

  FOR_EACH_INSN_USE (ref, insn)
    if (!DF_REF_REG_MEM_P (ref) && HARD_REGISTER_P (DF_REF_REAL_REG (ref)))
      return true;

  return false;
}

/* Check if comparison INSN may be transformed
   into vector comparison.  Currently we transform
   zero checks only which look like:

   (set (reg:CCZ 17 flags)
        (compare:CCZ (ior:SI (subreg:SI (reg:DI x) 4)
                             (subreg:SI (reg:DI x) 0))
		     (const_int 0 [0])))  */

static bool
convertible_comparison_p (rtx_insn *insn)
{
  if (!TARGET_SSE4_1)
    return false;

  rtx def_set = single_set (insn);

  gcc_assert (def_set);

  rtx src = SET_SRC (def_set);
  rtx dst = SET_DEST (def_set);

  gcc_assert (GET_CODE (src) == COMPARE);

  if (GET_CODE (dst) != REG
      || REGNO (dst) != FLAGS_REG
      || GET_MODE (dst) != CCZmode)
    return false;

  rtx op1 = XEXP (src, 0);
  rtx op2 = XEXP (src, 1);

  if (op2 != CONST0_RTX (GET_MODE (op2)))
    return false;

  if (GET_CODE (op1) != IOR)
    return false;

  op2 = XEXP (op1, 1);
  op1 = XEXP (op1, 0);

  if (!SUBREG_P (op1)
      || !SUBREG_P (op2)
      || GET_MODE (op1) != SImode
      || GET_MODE (op2) != SImode
      || ((SUBREG_BYTE (op1) != 0
	   || SUBREG_BYTE (op2) != GET_MODE_SIZE (SImode))
	  && (SUBREG_BYTE (op2) != 0
	      || SUBREG_BYTE (op1) != GET_MODE_SIZE (SImode))))
    return false;

  op1 = SUBREG_REG (op1);
  op2 = SUBREG_REG (op2);

  if (op1 != op2
      || !REG_P (op1)
      || GET_MODE (op1) != DImode)
    return false;

  return true;
}

/* The DImode version of scalar_to_vector_candidate_p.  */

static bool
dimode_scalar_to_vector_candidate_p (rtx_insn *insn)
{
  rtx def_set = single_set (insn);

  if (!def_set)
    return false;

  if (has_non_address_hard_reg (insn))
    return false;

  rtx src = SET_SRC (def_set);
  rtx dst = SET_DEST (def_set);

  if (GET_CODE (src) == COMPARE)
    return convertible_comparison_p (insn);

  /* We are interested in DImode promotion only.  */
  if ((GET_MODE (src) != DImode
       && !CONST_INT_P (src))
      || GET_MODE (dst) != DImode)
    return false;

  if (!REG_P (dst) && !MEM_P (dst))
    return false;

  switch (GET_CODE (src))
    {
    case ASHIFTRT:
      if (!TARGET_AVX512VL)
	return false;
      /* FALLTHRU */

    case ASHIFT:
    case LSHIFTRT:
      if (!REG_P (XEXP (src, 1))
	  && (!SUBREG_P (XEXP (src, 1))
	      || SUBREG_BYTE (XEXP (src, 1)) != 0
	      || !REG_P (SUBREG_REG (XEXP (src, 1))))
	  && (!CONST_INT_P (XEXP (src, 1))
	      || !IN_RANGE (INTVAL (XEXP (src, 1)), 0, 63)))
	return false;

      if (GET_MODE (XEXP (src, 1)) != QImode
	  && !CONST_INT_P (XEXP (src, 1)))
	return false;
      break;

    case PLUS:
    case MINUS:
    case IOR:
    case XOR:
    case AND:
      if (!REG_P (XEXP (src, 1))
	  && !MEM_P (XEXP (src, 1))
	  && !CONST_INT_P (XEXP (src, 1)))
	return false;

      if (GET_MODE (XEXP (src, 1)) != DImode
	  && !CONST_INT_P (XEXP (src, 1)))
	return false;
      break;

    case NEG:
    case NOT:
      break;

    case REG:
      return true;

    case MEM:
    case CONST_INT:
      return REG_P (dst);

    default:
      return false;
    }

  if (!REG_P (XEXP (src, 0))
      && !MEM_P (XEXP (src, 0))
      && !CONST_INT_P (XEXP (src, 0))
      /* Check for andnot case.  */
      && (GET_CODE (src) != AND
	  || GET_CODE (XEXP (src, 0)) != NOT
	  || !REG_P (XEXP (XEXP (src, 0), 0))))
      return false;

  if (GET_MODE (XEXP (src, 0)) != DImode
      && !CONST_INT_P (XEXP (src, 0)))
    return false;

  return true;
}

/* The TImode version of scalar_to_vector_candidate_p.  */

static bool
timode_scalar_to_vector_candidate_p (rtx_insn *insn)
{
  rtx def_set = single_set (insn);

  if (!def_set)
    return false;

  if (has_non_address_hard_reg (insn))
    return false;

  rtx src = SET_SRC (def_set);
  rtx dst = SET_DEST (def_set);

  /* Only TImode load and store are allowed.  */
  if (GET_MODE (dst) != TImode)
    return false;

  if (MEM_P (dst))
    {
      /* Check for store.  Memory must be aligned or unaligned store
	 is optimal.  Only support store from register, standard SSE
	 constant or CONST_WIDE_INT generated from piecewise store.

	 ??? Verify performance impact before enabling CONST_INT for
	 __int128 store.  */
      if (misaligned_operand (dst, TImode)
	  && !TARGET_SSE_UNALIGNED_STORE_OPTIMAL)
	return false;

      switch (GET_CODE (src))
	{
	default:
	  return false;

	case REG:
	case CONST_WIDE_INT:
	  return true;

	case CONST_INT:
	  return standard_sse_constant_p (src, TImode);
	}
    }
  else if (MEM_P (src))
    {
      /* Check for load.  Memory must be aligned or unaligned load is
	 optimal.  */
      return (REG_P (dst)
	      && (!misaligned_operand (src, TImode)
		  || TARGET_SSE_UNALIGNED_LOAD_OPTIMAL));
    }

  return false;
}

/* Return 1 if INSN may be converted into vector
   instruction.  */

static bool
scalar_to_vector_candidate_p (rtx_insn *insn)
{
  if (TARGET_64BIT)
    return timode_scalar_to_vector_candidate_p (insn);
  else
    return dimode_scalar_to_vector_candidate_p (insn);
}

/* The DImode version of remove_non_convertible_regs.  */

static void
dimode_remove_non_convertible_regs (bitmap candidates)
{
  bitmap_iterator bi;
  unsigned id;
  bitmap regs = BITMAP_ALLOC (NULL);

  EXECUTE_IF_SET_IN_BITMAP (candidates, 0, id, bi)
    {
      rtx def_set = single_set (DF_INSN_UID_GET (id)->insn);
      rtx reg = SET_DEST (def_set);

      if (!REG_P (reg)
	  || bitmap_bit_p (regs, REGNO (reg))
	  || HARD_REGISTER_P (reg))
	continue;

      for (df_ref def = DF_REG_DEF_CHAIN (REGNO (reg));
	   def;
	   def = DF_REF_NEXT_REG (def))
	{
	  if (!bitmap_bit_p (candidates, DF_REF_INSN_UID (def)))
	    {
	      if (dump_file)
		fprintf (dump_file,
			 "r%d has non convertible definition in insn %d\n",
			 REGNO (reg), DF_REF_INSN_UID (def));

	      bitmap_set_bit (regs, REGNO (reg));
	      break;
	    }
	}
    }

  EXECUTE_IF_SET_IN_BITMAP (regs, 0, id, bi)
    {
      for (df_ref def = DF_REG_DEF_CHAIN (id);
	   def;
	   def = DF_REF_NEXT_REG (def))
	if (bitmap_bit_p (candidates, DF_REF_INSN_UID (def)))
	  {
	    if (dump_file)
	      fprintf (dump_file, "Removing insn %d from candidates list\n",
		       DF_REF_INSN_UID (def));

	    bitmap_clear_bit (candidates, DF_REF_INSN_UID (def));
	  }
    }

  BITMAP_FREE (regs);
}

/* For a register REGNO, scan instructions for its defs and uses.
   Put REGNO in REGS if a def or use isn't in CANDIDATES.  */

static void
timode_check_non_convertible_regs (bitmap candidates, bitmap regs,
				   unsigned int regno)
{
  for (df_ref def = DF_REG_DEF_CHAIN (regno);
       def;
       def = DF_REF_NEXT_REG (def))
    {
      if (!bitmap_bit_p (candidates, DF_REF_INSN_UID (def)))
	{
	  if (dump_file)
	    fprintf (dump_file,
		     "r%d has non convertible def in insn %d\n",
		     regno, DF_REF_INSN_UID (def));

	  bitmap_set_bit (regs, regno);
	  break;
	}
    }

  for (df_ref ref = DF_REG_USE_CHAIN (regno);
       ref;
       ref = DF_REF_NEXT_REG (ref))
    {
      /* Debug instructions are skipped.  */
      if (NONDEBUG_INSN_P (DF_REF_INSN (ref))
	  && !bitmap_bit_p (candidates, DF_REF_INSN_UID (ref)))
	{
	  if (dump_file)
	    fprintf (dump_file,
		     "r%d has non convertible use in insn %d\n",
		     regno, DF_REF_INSN_UID (ref));

	  bitmap_set_bit (regs, regno);
	  break;
	}
    }
}

/* The TImode version of remove_non_convertible_regs.  */

static void
timode_remove_non_convertible_regs (bitmap candidates)
{
  bitmap_iterator bi;
  unsigned id;
  bitmap regs = BITMAP_ALLOC (NULL);

  EXECUTE_IF_SET_IN_BITMAP (candidates, 0, id, bi)
    {
      rtx def_set = single_set (DF_INSN_UID_GET (id)->insn);
      rtx dest = SET_DEST (def_set);
      rtx src = SET_SRC (def_set);

      if ((!REG_P (dest)
	   || bitmap_bit_p (regs, REGNO (dest))
	   || HARD_REGISTER_P (dest))
	  && (!REG_P (src)
	      || bitmap_bit_p (regs, REGNO (src))
	      || HARD_REGISTER_P (src)))
	continue;

      if (REG_P (dest))
	timode_check_non_convertible_regs (candidates, regs,
					   REGNO (dest));

      if (REG_P (src))
	timode_check_non_convertible_regs (candidates, regs,
					   REGNO (src));
    }

  EXECUTE_IF_SET_IN_BITMAP (regs, 0, id, bi)
    {
      for (df_ref def = DF_REG_DEF_CHAIN (id);
	   def;
	   def = DF_REF_NEXT_REG (def))
	if (bitmap_bit_p (candidates, DF_REF_INSN_UID (def)))
	  {
	    if (dump_file)
	      fprintf (dump_file, "Removing insn %d from candidates list\n",
		       DF_REF_INSN_UID (def));

	    bitmap_clear_bit (candidates, DF_REF_INSN_UID (def));
	  }

      for (df_ref ref = DF_REG_USE_CHAIN (id);
	   ref;
	   ref = DF_REF_NEXT_REG (ref))
	if (bitmap_bit_p (candidates, DF_REF_INSN_UID (ref)))
	  {
	    if (dump_file)
	      fprintf (dump_file, "Removing insn %d from candidates list\n",
		       DF_REF_INSN_UID (ref));

	    bitmap_clear_bit (candidates, DF_REF_INSN_UID (ref));
	  }
    }

  BITMAP_FREE (regs);
}

/* For a given bitmap of insn UIDs scans all instruction and
   remove insn from CANDIDATES in case it has both convertible
   and not convertible definitions.

   All insns in a bitmap are conversion candidates according to
   scalar_to_vector_candidate_p.  Currently it implies all insns
   are single_set.  */

static void
remove_non_convertible_regs (bitmap candidates)
{
  if (TARGET_64BIT)
    timode_remove_non_convertible_regs (candidates);
  else
    dimode_remove_non_convertible_regs (candidates);
}

class scalar_chain
{
 public:
  scalar_chain ();
  virtual ~scalar_chain ();

  static unsigned max_id;

  /* ID of a chain.  */
  unsigned int chain_id;
  /* A queue of instructions to be included into a chain.  */
  bitmap queue;
  /* Instructions included into a chain.  */
  bitmap insns;
  /* All registers defined by a chain.  */
  bitmap defs;
  /* Registers used in both vector and sclar modes.  */
  bitmap defs_conv;

  void build (bitmap candidates, unsigned insn_uid);
  virtual int compute_convert_gain () = 0;
  int convert ();

 protected:
  void add_to_queue (unsigned insn_uid);
  void emit_conversion_insns (rtx insns, rtx_insn *pos);

 private:
  void add_insn (bitmap candidates, unsigned insn_uid);
  void analyze_register_chain (bitmap candidates, df_ref ref);
  virtual void mark_dual_mode_def (df_ref def) = 0;
  virtual void convert_insn (rtx_insn *insn) = 0;
  virtual void convert_registers () = 0;
};

class dimode_scalar_chain : public scalar_chain
{
 public:
  int compute_convert_gain ();
 private:
  void mark_dual_mode_def (df_ref def);
  rtx replace_with_subreg (rtx x, rtx reg, rtx subreg);
  void replace_with_subreg_in_insn (rtx_insn *insn, rtx reg, rtx subreg);
  void convert_insn (rtx_insn *insn);
  void convert_op (rtx *op, rtx_insn *insn);
  void convert_reg (unsigned regno);
  void make_vector_copies (unsigned regno);
  void convert_registers ();
  int vector_const_cost (rtx exp);
};

class timode_scalar_chain : public scalar_chain
{
 public:
  /* Convert from TImode to V1TImode is always faster.  */
  int compute_convert_gain () { return 1; }

 private:
  void mark_dual_mode_def (df_ref def);
  void fix_debug_reg_uses (rtx reg);
  void convert_insn (rtx_insn *insn);
  /* We don't convert registers to difference size.  */
  void convert_registers () {}
};

unsigned scalar_chain::max_id = 0;

/* Initialize new chain.  */

scalar_chain::scalar_chain ()
{
  chain_id = ++max_id;

   if (dump_file)
    fprintf (dump_file, "Created a new instruction chain #%d\n", chain_id);

  bitmap_obstack_initialize (NULL);
  insns = BITMAP_ALLOC (NULL);
  defs = BITMAP_ALLOC (NULL);
  defs_conv = BITMAP_ALLOC (NULL);
  queue = NULL;
}

/* Free chain's data.  */

scalar_chain::~scalar_chain ()
{
  BITMAP_FREE (insns);
  BITMAP_FREE (defs);
  BITMAP_FREE (defs_conv);
  bitmap_obstack_release (NULL);
}

/* Add instruction into chains' queue.  */

void
scalar_chain::add_to_queue (unsigned insn_uid)
{
  if (bitmap_bit_p (insns, insn_uid)
      || bitmap_bit_p (queue, insn_uid))
    return;

  if (dump_file)
    fprintf (dump_file, "  Adding insn %d into chain's #%d queue\n",
	     insn_uid, chain_id);
  bitmap_set_bit (queue, insn_uid);
}

/* For DImode conversion, mark register defined by DEF as requiring
   conversion.  */

void
dimode_scalar_chain::mark_dual_mode_def (df_ref def)
{
  gcc_assert (DF_REF_REG_DEF_P (def));

  if (bitmap_bit_p (defs_conv, DF_REF_REGNO (def)))
    return;

  if (dump_file)
    fprintf (dump_file,
	     "  Mark r%d def in insn %d as requiring both modes in chain #%d\n",
	     DF_REF_REGNO (def), DF_REF_INSN_UID (def), chain_id);

  bitmap_set_bit (defs_conv, DF_REF_REGNO (def));
}

/* For TImode conversion, it is unused.  */

void
timode_scalar_chain::mark_dual_mode_def (df_ref)
{
  gcc_unreachable ();
}

/* Check REF's chain to add new insns into a queue
   and find registers requiring conversion.  */

void
scalar_chain::analyze_register_chain (bitmap candidates, df_ref ref)
{
  df_link *chain;

  gcc_assert (bitmap_bit_p (insns, DF_REF_INSN_UID (ref))
	      || bitmap_bit_p (candidates, DF_REF_INSN_UID (ref)));
  add_to_queue (DF_REF_INSN_UID (ref));

  for (chain = DF_REF_CHAIN (ref); chain; chain = chain->next)
    {
      unsigned uid = DF_REF_INSN_UID (chain->ref);

      if (!NONDEBUG_INSN_P (DF_REF_INSN (chain->ref)))
	continue;

      if (!DF_REF_REG_MEM_P (chain->ref))
	{
	  if (bitmap_bit_p (insns, uid))
	    continue;

	  if (bitmap_bit_p (candidates, uid))
	    {
	      add_to_queue (uid);
	      continue;
	    }
	}

      if (DF_REF_REG_DEF_P (chain->ref))
	{
	  if (dump_file)
	    fprintf (dump_file, "  r%d def in insn %d isn't convertible\n",
		     DF_REF_REGNO (chain->ref), uid);
	  mark_dual_mode_def (chain->ref);
	}
      else
	{
	  if (dump_file)
	    fprintf (dump_file, "  r%d use in insn %d isn't convertible\n",
		     DF_REF_REGNO (chain->ref), uid);
	  mark_dual_mode_def (ref);
	}
    }
}

/* Add instruction into a chain.  */

void
scalar_chain::add_insn (bitmap candidates, unsigned int insn_uid)
{
  if (bitmap_bit_p (insns, insn_uid))
    return;

  if (dump_file)
    fprintf (dump_file, "  Adding insn %d to chain #%d\n", insn_uid, chain_id);

  bitmap_set_bit (insns, insn_uid);

  rtx_insn *insn = DF_INSN_UID_GET (insn_uid)->insn;
  rtx def_set = single_set (insn);
  if (def_set && REG_P (SET_DEST (def_set))
      && !HARD_REGISTER_P (SET_DEST (def_set)))
    bitmap_set_bit (defs, REGNO (SET_DEST (def_set)));

  df_ref ref;
  df_ref def;
  for (ref = DF_INSN_UID_DEFS (insn_uid); ref; ref = DF_REF_NEXT_LOC (ref))
    if (!HARD_REGISTER_P (DF_REF_REG (ref)))
      for (def = DF_REG_DEF_CHAIN (DF_REF_REGNO (ref));
	   def;
	   def = DF_REF_NEXT_REG (def))
	analyze_register_chain (candidates, def);
  for (ref = DF_INSN_UID_USES (insn_uid); ref; ref = DF_REF_NEXT_LOC (ref))
    if (!DF_REF_REG_MEM_P (ref))
      analyze_register_chain (candidates, ref);
}

/* Build new chain starting from insn INSN_UID recursively
   adding all dependent uses and definitions.  */

void
scalar_chain::build (bitmap candidates, unsigned insn_uid)
{
  queue = BITMAP_ALLOC (NULL);
  bitmap_set_bit (queue, insn_uid);

  if (dump_file)
    fprintf (dump_file, "Building chain #%d...\n", chain_id);

  while (!bitmap_empty_p (queue))
    {
      insn_uid = bitmap_first_set_bit (queue);
      bitmap_clear_bit (queue, insn_uid);
      bitmap_clear_bit (candidates, insn_uid);
      add_insn (candidates, insn_uid);
    }

  if (dump_file)
    {
      fprintf (dump_file, "Collected chain #%d...\n", chain_id);
      fprintf (dump_file, "  insns: ");
      dump_bitmap (dump_file, insns);
      if (!bitmap_empty_p (defs_conv))
	{
	  bitmap_iterator bi;
	  unsigned id;
	  const char *comma = "";
	  fprintf (dump_file, "  defs to convert: ");
	  EXECUTE_IF_SET_IN_BITMAP (defs_conv, 0, id, bi)
	    {
	      fprintf (dump_file, "%sr%d", comma, id);
	      comma = ", ";
	    }
	  fprintf (dump_file, "\n");
	}
    }

  BITMAP_FREE (queue);
}

/* Return a cost of building a vector costant
   instead of using a scalar one.  */

int
dimode_scalar_chain::vector_const_cost (rtx exp)
{
  gcc_assert (CONST_INT_P (exp));

  if (standard_sse_constant_p (exp, V2DImode))
    return COSTS_N_INSNS (1);
  return ix86_cost->sse_load[1];
}

/* Compute a gain for chain conversion.  */

int
dimode_scalar_chain::compute_convert_gain ()
{
  bitmap_iterator bi;
  unsigned insn_uid;
  int gain = 0;
  int cost = 0;

  if (dump_file)
    fprintf (dump_file, "Computing gain for chain #%d...\n", chain_id);

  EXECUTE_IF_SET_IN_BITMAP (insns, 0, insn_uid, bi)
    {
      rtx_insn *insn = DF_INSN_UID_GET (insn_uid)->insn;
      rtx def_set = single_set (insn);
      rtx src = SET_SRC (def_set);
      rtx dst = SET_DEST (def_set);

      if (REG_P (src) && REG_P (dst))
	gain += COSTS_N_INSNS (2) - ix86_cost->xmm_move;
      else if (REG_P (src) && MEM_P (dst))
	gain += 2 * ix86_cost->int_store[2] - ix86_cost->sse_store[1];
      else if (MEM_P (src) && REG_P (dst))
	gain += 2 * ix86_cost->int_load[2] - ix86_cost->sse_load[1];
      else if (GET_CODE (src) == ASHIFT
	       || GET_CODE (src) == ASHIFTRT
	       || GET_CODE (src) == LSHIFTRT)
	{
    	  if (CONST_INT_P (XEXP (src, 0)))
	    gain -= vector_const_cost (XEXP (src, 0));
	  if (CONST_INT_P (XEXP (src, 1)))
	    {
	      gain += ix86_cost->shift_const;
	      if (INTVAL (XEXP (src, 1)) >= 32)
		gain -= COSTS_N_INSNS (1);
	    }
	  else
	    /* Additional gain for omitting two CMOVs.  */
	    gain += ix86_cost->shift_var + COSTS_N_INSNS (2);
	}
      else if (GET_CODE (src) == PLUS
	       || GET_CODE (src) == MINUS
	       || GET_CODE (src) == IOR
	       || GET_CODE (src) == XOR
	       || GET_CODE (src) == AND)
	{
	  gain += ix86_cost->add;
	  /* Additional gain for andnot for targets without BMI.  */
	  if (GET_CODE (XEXP (src, 0)) == NOT
	      && !TARGET_BMI)
	    gain += 2 * ix86_cost->add;

	  if (CONST_INT_P (XEXP (src, 0)))
	    gain -= vector_const_cost (XEXP (src, 0));
	  if (CONST_INT_P (XEXP (src, 1)))
	    gain -= vector_const_cost (XEXP (src, 1));
	}
      else if (GET_CODE (src) == NEG
	       || GET_CODE (src) == NOT)
	gain += ix86_cost->add - COSTS_N_INSNS (1);
      else if (GET_CODE (src) == COMPARE)
	{
	  /* Assume comparison cost is the same.  */
	}
      else if (CONST_INT_P (src))
	{
	  if (REG_P (dst))
	    gain += COSTS_N_INSNS (2);
	  else if (MEM_P (dst))
	    gain += 2 * ix86_cost->int_store[2] - ix86_cost->sse_store[1];
	  gain -= vector_const_cost (src);
	}
      else
	gcc_unreachable ();
    }

  if (dump_file)
    fprintf (dump_file, "  Instruction conversion gain: %d\n", gain);

  EXECUTE_IF_SET_IN_BITMAP (defs_conv, 0, insn_uid, bi)
    cost += DF_REG_DEF_COUNT (insn_uid) * ix86_cost->mmxsse_to_integer;

  if (dump_file)
    fprintf (dump_file, "  Registers conversion cost: %d\n", cost);

  gain -= cost;

  if (dump_file)
    fprintf (dump_file, "  Total gain: %d\n", gain);

  return gain;
}

/* Replace REG in X with a V2DI subreg of NEW_REG.  */

rtx
dimode_scalar_chain::replace_with_subreg (rtx x, rtx reg, rtx new_reg)
{
  if (x == reg)
    return gen_rtx_SUBREG (V2DImode, new_reg, 0);

  const char *fmt = GET_RTX_FORMAT (GET_CODE (x));
  int i, j;
  for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
    {
      if (fmt[i] == 'e')
	XEXP (x, i) = replace_with_subreg (XEXP (x, i), reg, new_reg);
      else if (fmt[i] == 'E')
	for (j = XVECLEN (x, i) - 1; j >= 0; j--)
	  XVECEXP (x, i, j) = replace_with_subreg (XVECEXP (x, i, j),
						   reg, new_reg);
    }

  return x;
}

/* Replace REG in INSN with a V2DI subreg of NEW_REG.  */

void
dimode_scalar_chain::replace_with_subreg_in_insn (rtx_insn *insn,
						  rtx reg, rtx new_reg)
{
  replace_with_subreg (single_set (insn), reg, new_reg);
}

/* Insert generated conversion instruction sequence INSNS
   after instruction AFTER.  New BB may be required in case
   instruction has EH region attached.  */

void
scalar_chain::emit_conversion_insns (rtx insns, rtx_insn *after)
{
  if (!control_flow_insn_p (after))
    {
      emit_insn_after (insns, after);
      return;
    }

  basic_block bb = BLOCK_FOR_INSN (after);
  edge e = find_fallthru_edge (bb->succs);
  gcc_assert (e);

  basic_block new_bb = split_edge (e);
  emit_insn_after (insns, BB_HEAD (new_bb));
}

/* Make vector copies for all register REGNO definitions
   and replace its uses in a chain.  */

void
dimode_scalar_chain::make_vector_copies (unsigned regno)
{
  rtx reg = regno_reg_rtx[regno];
  rtx vreg = gen_reg_rtx (DImode);
  bool count_reg = false;
  df_ref ref;

  for (ref = DF_REG_DEF_CHAIN (regno); ref; ref = DF_REF_NEXT_REG (ref))
    if (!bitmap_bit_p (insns, DF_REF_INSN_UID (ref)))
      {
	df_ref use;

	/* Detect the count register of a shift instruction.  */
	for (use = DF_REG_USE_CHAIN (regno); use; use = DF_REF_NEXT_REG (use))
	  if (bitmap_bit_p (insns, DF_REF_INSN_UID (use)))
	    {
	      rtx_insn *insn = DF_REF_INSN (use);
	      rtx def_set = single_set (insn);

	      gcc_assert (def_set);

	      rtx src = SET_SRC (def_set);

	      if ((GET_CODE (src) == ASHIFT
		   || GET_CODE (src) == ASHIFTRT
		   || GET_CODE (src) == LSHIFTRT)
		  && !CONST_INT_P (XEXP (src, 1))
		  && reg_or_subregno (XEXP (src, 1)) == regno)
		count_reg = true;
	    }

	start_sequence ();
	if (count_reg)
	  {
	    rtx qreg = gen_lowpart (QImode, reg);
	    rtx tmp = gen_reg_rtx (SImode);

	    if (TARGET_ZERO_EXTEND_WITH_AND
		&& optimize_function_for_speed_p (cfun))
	      {
		emit_move_insn (tmp, const0_rtx);
		emit_insn (gen_movstrictqi
			   (gen_lowpart (QImode, tmp), qreg));
	      }
	    else
	      emit_insn (gen_rtx_SET
			 (tmp, gen_rtx_ZERO_EXTEND (SImode, qreg)));

	    if (!TARGET_INTER_UNIT_MOVES_TO_VEC)
	      {
		rtx slot = assign_386_stack_local (SImode, SLOT_STV_TEMP);
		emit_move_insn (slot, tmp);
		tmp = copy_rtx (slot);
	      }

	    emit_insn (gen_zero_extendsidi2 (vreg, tmp));
	  }
	else if (!TARGET_INTER_UNIT_MOVES_TO_VEC)
	  {
	    rtx tmp = assign_386_stack_local (DImode, SLOT_STV_TEMP);
	    emit_move_insn (adjust_address (tmp, SImode, 0),
			    gen_rtx_SUBREG (SImode, reg, 0));
	    emit_move_insn (adjust_address (tmp, SImode, 4),
			    gen_rtx_SUBREG (SImode, reg, 4));
	    emit_move_insn (vreg, tmp);
	  }
	else if (TARGET_SSE4_1)
	  {
	    emit_insn (gen_sse2_loadld (gen_rtx_SUBREG (V4SImode, vreg, 0),
					CONST0_RTX (V4SImode),
					gen_rtx_SUBREG (SImode, reg, 0)));
	    emit_insn (gen_sse4_1_pinsrd (gen_rtx_SUBREG (V4SImode, vreg, 0),
					  gen_rtx_SUBREG (V4SImode, vreg, 0),
					  gen_rtx_SUBREG (SImode, reg, 4),
					  GEN_INT (2)));
	  }
	else
	  {
	    rtx tmp = gen_reg_rtx (DImode);
	    emit_insn (gen_sse2_loadld (gen_rtx_SUBREG (V4SImode, vreg, 0),
					CONST0_RTX (V4SImode),
					gen_rtx_SUBREG (SImode, reg, 0)));
	    emit_insn (gen_sse2_loadld (gen_rtx_SUBREG (V4SImode, tmp, 0),
					CONST0_RTX (V4SImode),
					gen_rtx_SUBREG (SImode, reg, 4)));
	    emit_insn (gen_vec_interleave_lowv4si
		       (gen_rtx_SUBREG (V4SImode, vreg, 0),
			gen_rtx_SUBREG (V4SImode, vreg, 0),
			gen_rtx_SUBREG (V4SImode, tmp, 0)));
	  }
	rtx_insn *seq = get_insns ();
	end_sequence ();
	rtx_insn *insn = DF_REF_INSN (ref);
	emit_conversion_insns (seq, insn);

	if (dump_file)
	  fprintf (dump_file,
		   "  Copied r%d to a vector register r%d for insn %d\n",
		   regno, REGNO (vreg), INSN_UID (insn));
      }

  for (ref = DF_REG_USE_CHAIN (regno); ref; ref = DF_REF_NEXT_REG (ref))
    if (bitmap_bit_p (insns, DF_REF_INSN_UID (ref)))
      {
	rtx_insn *insn = DF_REF_INSN (ref);
	if (count_reg)
	  {
	    rtx def_set = single_set (insn);
	    gcc_assert (def_set);

	    rtx src = SET_SRC (def_set);

	    if ((GET_CODE (src) == ASHIFT
		 || GET_CODE (src) == ASHIFTRT
		 || GET_CODE (src) == LSHIFTRT)
		&& !CONST_INT_P (XEXP (src, 1))
		&& reg_or_subregno (XEXP (src, 1)) == regno)
	      XEXP (src, 1) = vreg;
	  }
	else
	  replace_with_subreg_in_insn (insn, reg, vreg);

	if (dump_file)
	  fprintf (dump_file, "  Replaced r%d with r%d in insn %d\n",
		   regno, REGNO (vreg), INSN_UID (insn));
      }
}

/* Convert all definitions of register REGNO
   and fix its uses.  Scalar copies may be created
   in case register is used in not convertible insn.  */

void
dimode_scalar_chain::convert_reg (unsigned regno)
{
  bool scalar_copy = bitmap_bit_p (defs_conv, regno);
  rtx reg = regno_reg_rtx[regno];
  rtx scopy = NULL_RTX;
  df_ref ref;
  bitmap conv;

  conv = BITMAP_ALLOC (NULL);
  bitmap_copy (conv, insns);

  if (scalar_copy)
    scopy = gen_reg_rtx (DImode);

  for (ref = DF_REG_DEF_CHAIN (regno); ref; ref = DF_REF_NEXT_REG (ref))
    {
      rtx_insn *insn = DF_REF_INSN (ref);
      rtx def_set = single_set (insn);
      rtx src = SET_SRC (def_set);
      rtx reg = DF_REF_REG (ref);

      if (!MEM_P (src))
	{
	  replace_with_subreg_in_insn (insn, reg, reg);
	  bitmap_clear_bit (conv, INSN_UID (insn));
	}

      if (scalar_copy)
	{
	  start_sequence ();
	  if (!TARGET_INTER_UNIT_MOVES_FROM_VEC)
	    {
	      rtx tmp = assign_386_stack_local (DImode, SLOT_STV_TEMP);
	      emit_move_insn (tmp, reg);
	      emit_move_insn (gen_rtx_SUBREG (SImode, scopy, 0),
			      adjust_address (tmp, SImode, 0));
	      emit_move_insn (gen_rtx_SUBREG (SImode, scopy, 4),
			      adjust_address (tmp, SImode, 4));
	    }
	  else if (TARGET_SSE4_1)
	    {
	      rtx tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
	      emit_insn
		(gen_rtx_SET
		 (gen_rtx_SUBREG (SImode, scopy, 0),
		  gen_rtx_VEC_SELECT (SImode,
				      gen_rtx_SUBREG (V4SImode, reg, 0), tmp)));

	      tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const1_rtx));
	      emit_insn
		(gen_rtx_SET
		 (gen_rtx_SUBREG (SImode, scopy, 4),
		  gen_rtx_VEC_SELECT (SImode,
				      gen_rtx_SUBREG (V4SImode, reg, 0), tmp)));
	    }
	  else
	    {
	      rtx vcopy = gen_reg_rtx (V2DImode);
	      emit_move_insn (vcopy, gen_rtx_SUBREG (V2DImode, reg, 0));
	      emit_move_insn (gen_rtx_SUBREG (SImode, scopy, 0),
			      gen_rtx_SUBREG (SImode, vcopy, 0));
	      emit_move_insn (vcopy,
			      gen_rtx_LSHIFTRT (V2DImode, vcopy, GEN_INT (32)));
	      emit_move_insn (gen_rtx_SUBREG (SImode, scopy, 4),
			      gen_rtx_SUBREG (SImode, vcopy, 0));
	    }
	  rtx_insn *seq = get_insns ();
	  end_sequence ();
	  emit_conversion_insns (seq, insn);

	  if (dump_file)
	    fprintf (dump_file,
		     "  Copied r%d to a scalar register r%d for insn %d\n",
		     regno, REGNO (scopy), INSN_UID (insn));
	}
    }

  for (ref = DF_REG_USE_CHAIN (regno); ref; ref = DF_REF_NEXT_REG (ref))
    if (bitmap_bit_p (insns, DF_REF_INSN_UID (ref)))
      {
	if (bitmap_bit_p (conv, DF_REF_INSN_UID (ref)))
	  {
	    rtx_insn *insn = DF_REF_INSN (ref);

	    rtx def_set = single_set (insn);
	    gcc_assert (def_set);

	    rtx src = SET_SRC (def_set);
	    rtx dst = SET_DEST (def_set);

	    if ((GET_CODE (src) == ASHIFT
		 || GET_CODE (src) == ASHIFTRT
		 || GET_CODE (src) == LSHIFTRT)
		&& !CONST_INT_P (XEXP (src, 1))
		&& reg_or_subregno (XEXP (src, 1)) == regno)
	      {
		rtx tmp2 = gen_reg_rtx (V2DImode);

		start_sequence ();

		if (TARGET_SSE4_1)
		  emit_insn (gen_sse4_1_zero_extendv2qiv2di2
			     (tmp2, gen_rtx_SUBREG (V16QImode, reg, 0)));
		else
		  {
		    rtx vec_cst
		      = gen_rtx_CONST_VECTOR (V2DImode,
					      gen_rtvec (2, GEN_INT (0xff),
							 const0_rtx));
		    vec_cst
		      = validize_mem (force_const_mem (V2DImode, vec_cst));

		    emit_insn (gen_rtx_SET
			       (tmp2,
				gen_rtx_AND (V2DImode,
					     gen_rtx_SUBREG (V2DImode, reg, 0),
					     vec_cst)));
		  }
		rtx_insn *seq = get_insns ();
		end_sequence ();

		emit_insn_before (seq, insn);

		XEXP (src, 1) = gen_rtx_SUBREG (DImode, tmp2, 0);
	      }
	    else if (!MEM_P (dst) || !REG_P (src))
	      replace_with_subreg_in_insn (insn, reg, reg);

	    bitmap_clear_bit (conv, INSN_UID (insn));
	  }
      }
    /* Skip debug insns and uninitialized uses.  */
    else if (DF_REF_CHAIN (ref)
	     && NONDEBUG_INSN_P (DF_REF_INSN (ref)))
      {
	gcc_assert (scopy);
	replace_rtx (DF_REF_INSN (ref), reg, scopy);
	df_insn_rescan (DF_REF_INSN (ref));
      }

  BITMAP_FREE (conv);
}

/* Convert operand OP in INSN.  We should handle
   memory operands and uninitialized registers.
   All other register uses are converted during
   registers conversion.  */

void
dimode_scalar_chain::convert_op (rtx *op, rtx_insn *insn)
{
  *op = copy_rtx_if_shared (*op);

  if (GET_CODE (*op) == NOT)
    {
      convert_op (&XEXP (*op, 0), insn);
      PUT_MODE (*op, V2DImode);
    }
  else if (MEM_P (*op))
    {
      rtx tmp = gen_reg_rtx (DImode);

      emit_insn_before (gen_move_insn (tmp, *op), insn);
      *op = gen_rtx_SUBREG (V2DImode, tmp, 0);

      if (dump_file)
	fprintf (dump_file, "  Preloading operand for insn %d into r%d\n",
		 INSN_UID (insn), REGNO (tmp));
    }
  else if (REG_P (*op))
    {
      /* We may have not converted register usage in case
	 this register has no definition.  Otherwise it
	 should be converted in convert_reg.  */
      df_ref ref;
      FOR_EACH_INSN_USE (ref, insn)
	if (DF_REF_REGNO (ref) == REGNO (*op))
	  {
	    gcc_assert (!DF_REF_CHAIN (ref));
	    break;
	  }
      *op = gen_rtx_SUBREG (V2DImode, *op, 0);
    }
  else if (CONST_INT_P (*op))
    {
      rtx vec_cst;
      rtx tmp = gen_rtx_SUBREG (V2DImode, gen_reg_rtx (DImode), 0);

      /* Prefer all ones vector in case of -1.  */
      if (constm1_operand (*op, GET_MODE (*op)))
	vec_cst = CONSTM1_RTX (V2DImode);
      else
	vec_cst = gen_rtx_CONST_VECTOR (V2DImode,
					gen_rtvec (2, *op, const0_rtx));

      if (!standard_sse_constant_p (vec_cst, V2DImode))
	{
	  start_sequence ();
	  vec_cst = validize_mem (force_const_mem (V2DImode, vec_cst));
	  rtx_insn *seq = get_insns ();
	  end_sequence ();
	  emit_insn_before (seq, insn);
	}

      emit_insn_before (gen_move_insn (copy_rtx (tmp), vec_cst), insn);
      *op = tmp;
    }
  else
    {
      gcc_assert (SUBREG_P (*op));
      gcc_assert (GET_MODE (*op) == V2DImode);
    }
}

/* Convert INSN to vector mode.  */

void
dimode_scalar_chain::convert_insn (rtx_insn *insn)
{
  rtx def_set = single_set (insn);
  rtx src = SET_SRC (def_set);
  rtx dst = SET_DEST (def_set);
  rtx subreg;

  if (MEM_P (dst) && !REG_P (src))
    {
      /* There are no scalar integer instructions and therefore
	 temporary register usage is required.  */
      rtx tmp = gen_reg_rtx (DImode);
      emit_conversion_insns (gen_move_insn (dst, tmp), insn);
      dst = gen_rtx_SUBREG (V2DImode, tmp, 0);
    }

  switch (GET_CODE (src))
    {
    case ASHIFT:
    case ASHIFTRT:
    case LSHIFTRT:
      convert_op (&XEXP (src, 0), insn);
      PUT_MODE (src, V2DImode);
      break;

    case PLUS:
    case MINUS:
    case IOR:
    case XOR:
    case AND:
      convert_op (&XEXP (src, 0), insn);
      convert_op (&XEXP (src, 1), insn);
      PUT_MODE (src, V2DImode);
      break;

    case NEG:
      src = XEXP (src, 0);
      convert_op (&src, insn);
      subreg = gen_reg_rtx (V2DImode);
      emit_insn_before (gen_move_insn (subreg, CONST0_RTX (V2DImode)), insn);
      src = gen_rtx_MINUS (V2DImode, subreg, src);
      break;

    case NOT:
      src = XEXP (src, 0);
      convert_op (&src, insn);
      subreg = gen_reg_rtx (V2DImode);
      emit_insn_before (gen_move_insn (subreg, CONSTM1_RTX (V2DImode)), insn);
      src = gen_rtx_XOR (V2DImode, src, subreg);
      break;

    case MEM:
      if (!REG_P (dst))
	convert_op (&src, insn);
      break;

    case REG:
      if (!MEM_P (dst))
	convert_op (&src, insn);
      break;

    case SUBREG:
      gcc_assert (GET_MODE (src) == V2DImode);
      break;

    case COMPARE:
      src = SUBREG_REG (XEXP (XEXP (src, 0), 0));

      gcc_assert ((REG_P (src) && GET_MODE (src) == DImode)
		  || (SUBREG_P (src) && GET_MODE (src) == V2DImode));

      if (REG_P (src))
	subreg = gen_rtx_SUBREG (V2DImode, src, 0);
      else
	subreg = copy_rtx_if_shared (src);
      emit_insn_before (gen_vec_interleave_lowv2di (copy_rtx_if_shared (subreg),
						    copy_rtx_if_shared (subreg),
						    copy_rtx_if_shared (subreg)),
			insn);
      dst = gen_rtx_REG (CCmode, FLAGS_REG);
      src = gen_rtx_UNSPEC (CCmode, gen_rtvec (2, copy_rtx_if_shared (src),
					       copy_rtx_if_shared (src)),
			    UNSPEC_PTEST);
      break;

    case CONST_INT:
      convert_op (&src, insn);
      break;

    default:
      gcc_unreachable ();
    }

  SET_SRC (def_set) = src;
  SET_DEST (def_set) = dst;

  /* Drop possible dead definitions.  */
  PATTERN (insn) = def_set;

  INSN_CODE (insn) = -1;
  recog_memoized (insn);
  df_insn_rescan (insn);
}

/* Fix uses of converted REG in debug insns.  */

void
timode_scalar_chain::fix_debug_reg_uses (rtx reg)
{
  if (!flag_var_tracking)
    return;

  df_ref ref, next;
  for (ref = DF_REG_USE_CHAIN (REGNO (reg)); ref; ref = next)
    {
      rtx_insn *insn = DF_REF_INSN (ref);
      /* Make sure the next ref is for a different instruction,
         so that we're not affected by the rescan.  */
      next = DF_REF_NEXT_REG (ref);
      while (next && DF_REF_INSN (next) == insn)
	next = DF_REF_NEXT_REG (next);

      if (DEBUG_INSN_P (insn))
	{
	  /* It may be a debug insn with a TImode variable in
	     register.  */
	  bool changed = false;
	  for (; ref != next; ref = DF_REF_NEXT_REG (ref))
	    {
	      rtx *loc = DF_REF_LOC (ref);
	      if (REG_P (*loc) && GET_MODE (*loc) == V1TImode)
		{
		  *loc = gen_rtx_SUBREG (TImode, *loc, 0);
		  changed = true;
		}
	    }
	  if (changed)
	    df_insn_rescan (insn);
	}
    }
}

/* Convert INSN from TImode to V1T1mode.  */

void
timode_scalar_chain::convert_insn (rtx_insn *insn)
{
  rtx def_set = single_set (insn);
  rtx src = SET_SRC (def_set);
  rtx dst = SET_DEST (def_set);

  switch (GET_CODE (dst))
    {
    case REG:
      {
	rtx tmp = find_reg_equal_equiv_note (insn);
	if (tmp)
	  PUT_MODE (XEXP (tmp, 0), V1TImode);
	PUT_MODE (dst, V1TImode);
	fix_debug_reg_uses (dst);
      }
      break;
    case MEM:
      PUT_MODE (dst, V1TImode);
      break;

    default:
      gcc_unreachable ();
    }

  switch (GET_CODE (src))
    {
    case REG:
      PUT_MODE (src, V1TImode);
      /* Call fix_debug_reg_uses only if SRC is never defined.  */
      if (!DF_REG_DEF_CHAIN (REGNO (src)))
	fix_debug_reg_uses (src);
      break;

    case MEM:
      PUT_MODE (src, V1TImode);
      break;

    case CONST_WIDE_INT:
      if (NONDEBUG_INSN_P (insn))
	{
	  /* Since there are no instructions to store 128-bit constant,
	     temporary register usage is required.  */
	  rtx tmp = gen_reg_rtx (V1TImode);
	  start_sequence ();
	  src = gen_rtx_CONST_VECTOR (V1TImode, gen_rtvec (1, src));
	  src = validize_mem (force_const_mem (V1TImode, src));
	  rtx_insn *seq = get_insns ();
	  end_sequence ();
	  if (seq)
	    emit_insn_before (seq, insn);
	  emit_conversion_insns (gen_rtx_SET (dst, tmp), insn);
	  dst = tmp;
	}
      break;

    case CONST_INT:
      switch (standard_sse_constant_p (src, TImode))
	{
	case 1:
	  src = CONST0_RTX (GET_MODE (dst));
	  break;
	case 2:
	  src = CONSTM1_RTX (GET_MODE (dst));
	  break;
	default:
	  gcc_unreachable ();
	}
      if (NONDEBUG_INSN_P (insn))
	{
	  rtx tmp = gen_reg_rtx (V1TImode);
	  /* Since there are no instructions to store standard SSE
	     constant, temporary register usage is required.  */
	  emit_conversion_insns (gen_rtx_SET (dst, tmp), insn);
	  dst = tmp;
	}
      break;

    default:
      gcc_unreachable ();
    }

  SET_SRC (def_set) = src;
  SET_DEST (def_set) = dst;

  /* Drop possible dead definitions.  */
  PATTERN (insn) = def_set;

  INSN_CODE (insn) = -1;
  recog_memoized (insn);
  df_insn_rescan (insn);
}

void
dimode_scalar_chain::convert_registers ()
{
  bitmap_iterator bi;
  unsigned id;

  EXECUTE_IF_SET_IN_BITMAP (defs, 0, id, bi)
    convert_reg (id);

  EXECUTE_IF_AND_COMPL_IN_BITMAP (defs_conv, defs, 0, id, bi)
    make_vector_copies (id);
}

/* Convert whole chain creating required register
   conversions and copies.  */

int
scalar_chain::convert ()
{
  bitmap_iterator bi;
  unsigned id;
  int converted_insns = 0;

  if (!dbg_cnt (stv_conversion))
    return 0;

  if (dump_file)
    fprintf (dump_file, "Converting chain #%d...\n", chain_id);

  convert_registers ();

  EXECUTE_IF_SET_IN_BITMAP (insns, 0, id, bi)
    {
      convert_insn (DF_INSN_UID_GET (id)->insn);
      converted_insns++;
    }

  return converted_insns;
}

/* Main STV pass function.  Find and convert scalar
   instructions into vector mode when profitable.  */

static unsigned int
convert_scalars_to_vector ()
{
  basic_block bb;
  bitmap candidates;
  int converted_insns = 0;

  bitmap_obstack_initialize (NULL);
  candidates = BITMAP_ALLOC (NULL);

  calculate_dominance_info (CDI_DOMINATORS);
  df_set_flags (DF_DEFER_INSN_RESCAN);
  df_chain_add_problem (DF_DU_CHAIN | DF_UD_CHAIN);
  df_md_add_problem ();
  df_analyze ();

  /* Find all instructions we want to convert into vector mode.  */
  if (dump_file)
    fprintf (dump_file, "Searching for mode conversion candidates...\n");

  FOR_EACH_BB_FN (bb, cfun)
    {
      rtx_insn *insn;
      FOR_BB_INSNS (bb, insn)
	if (scalar_to_vector_candidate_p (insn))
	  {
	    if (dump_file)
	      fprintf (dump_file, "  insn %d is marked as a candidate\n",
		       INSN_UID (insn));

	    bitmap_set_bit (candidates, INSN_UID (insn));
	  }
    }

  remove_non_convertible_regs (candidates);

  if (bitmap_empty_p (candidates))
    if (dump_file)
      fprintf (dump_file, "There are no candidates for optimization.\n");

  while (!bitmap_empty_p (candidates))
    {
      unsigned uid = bitmap_first_set_bit (candidates);
      scalar_chain *chain;

      if (TARGET_64BIT)
	chain = new timode_scalar_chain;
      else
	chain = new dimode_scalar_chain;

      /* Find instructions chain we want to convert to vector mode.
	 Check all uses and definitions to estimate all required
	 conversions.  */
      chain->build (candidates, uid);

      if (chain->compute_convert_gain () > 0)
	converted_insns += chain->convert ();
      else
	if (dump_file)
	  fprintf (dump_file, "Chain #%d conversion is not profitable\n",
		   chain->chain_id);

      delete chain;
    }

  if (dump_file)
    fprintf (dump_file, "Total insns converted: %d\n", converted_insns);

  BITMAP_FREE (candidates);
  bitmap_obstack_release (NULL);
  df_process_deferred_rescans ();

  /* Conversion means we may have 128bit register spills/fills
     which require aligned stack.  */
  if (converted_insns)
    {
      if (crtl->stack_alignment_needed < 128)
	crtl->stack_alignment_needed = 128;
      if (crtl->stack_alignment_estimated < 128)
	crtl->stack_alignment_estimated = 128;
      /* Fix up DECL_RTL/DECL_INCOMING_RTL of arguments.  */
      if (TARGET_64BIT)
	for (tree parm = DECL_ARGUMENTS (current_function_decl);
	     parm; parm = DECL_CHAIN (parm))
	  {
	    if (TYPE_MODE (TREE_TYPE (parm)) != TImode)
	      continue;
	    if (DECL_RTL_SET_P (parm)
		&& GET_MODE (DECL_RTL (parm)) == V1TImode)
	      {
		rtx r = DECL_RTL (parm);
		if (REG_P (r))
		  SET_DECL_RTL (parm, gen_rtx_SUBREG (TImode, r, 0));
	      }
	    if (DECL_INCOMING_RTL (parm)
		&& GET_MODE (DECL_INCOMING_RTL (parm)) == V1TImode)
	      {
		rtx r = DECL_INCOMING_RTL (parm);
		if (REG_P (r))
		  DECL_INCOMING_RTL (parm) = gen_rtx_SUBREG (TImode, r, 0);
	      }
	  }
    }

  return 0;
}

namespace {

const pass_data pass_data_insert_vzeroupper =
{
  RTL_PASS, /* type */
  "vzeroupper", /* name */
  OPTGROUP_NONE, /* optinfo_flags */
  TV_MACH_DEP, /* tv_id */
  0, /* properties_required */
  0, /* properties_provided */
  0, /* properties_destroyed */
  0, /* todo_flags_start */
  TODO_df_finish, /* todo_flags_finish */
};

class pass_insert_vzeroupper : public rtl_opt_pass
{
public:
  pass_insert_vzeroupper(gcc::context *ctxt)
    : rtl_opt_pass(pass_data_insert_vzeroupper, ctxt)
  {}

  /* opt_pass methods: */
  virtual bool gate (function *)
    {
      return TARGET_AVX
	     && TARGET_VZEROUPPER && flag_expensive_optimizations
	     && !optimize_size;
    }

  virtual unsigned int execute (function *)
    {
      return rest_of_handle_insert_vzeroupper ();
    }

}; // class pass_insert_vzeroupper

const pass_data pass_data_stv =
{
  RTL_PASS, /* type */
  "stv", /* name */
  OPTGROUP_NONE, /* optinfo_flags */
  TV_MACH_DEP, /* tv_id */
  0, /* properties_required */
  0, /* properties_provided */
  0, /* properties_destroyed */
  0, /* todo_flags_start */
  TODO_df_finish, /* todo_flags_finish */
};

class pass_stv : public rtl_opt_pass
{
public:
  pass_stv (gcc::context *ctxt)
    : rtl_opt_pass (pass_data_stv, ctxt),
      timode_p (false)
  {}

  /* opt_pass methods: */
  virtual bool gate (function *)
    {
      return (timode_p == !!TARGET_64BIT
	      && TARGET_STV && TARGET_SSE2 && optimize > 1);
    }

  virtual unsigned int execute (function *)
    {
      return convert_scalars_to_vector ();
    }

  opt_pass *clone ()
    {
      return new pass_stv (m_ctxt);
    }

  void set_pass_param (unsigned int n, bool param)
    {
      gcc_assert (n == 0);
      timode_p = param;
    }

private:
  bool timode_p;
}; // class pass_stv

} // anon namespace

rtl_opt_pass *
make_pass_insert_vzeroupper (gcc::context *ctxt)
{
  return new pass_insert_vzeroupper (ctxt);
}

rtl_opt_pass *
make_pass_stv (gcc::context *ctxt)
{
  return new pass_stv (ctxt);
}

/* Inserting ENDBRANCH instructions.  */

static unsigned int
rest_of_insert_endbranch (void)
{
  timevar_push (TV_MACH_DEP);

  rtx cet_eb;
  rtx_insn *insn;
  basic_block bb;

  /* Currently emit EB if it's a tracking function, i.e. 'nocf_check' is
     absent among function attributes.  Later an optimization will be
     introduced to make analysis if an address of a static function is
     taken.  A static function whose address is not taken will get a
     nocf_check attribute.  This will allow to reduce the number of EB.  */

  if (!lookup_attribute ("nocf_check",
			 TYPE_ATTRIBUTES (TREE_TYPE (cfun->decl)))
      && !cgraph_node::get (cfun->decl)->only_called_directly_p ())
    {
      /* Queue ENDBR insertion to x86_function_profiler.  */
      if (crtl->profile && flag_fentry)
	cfun->machine->endbr_queued_at_entrance = true;
      else
	{
	  cet_eb = gen_nop_endbr ();

	  bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb;
	  insn = BB_HEAD (bb);
	  emit_insn_before (cet_eb, insn);
	}
    }

  bb = 0;
  FOR_EACH_BB_FN (bb, cfun)
    {
      for (insn = BB_HEAD (bb); insn != NEXT_INSN (BB_END (bb));
	   insn = NEXT_INSN (insn))
	{
	  if (CALL_P (insn))
	    {
	      bool need_endbr;
	      need_endbr = find_reg_note (insn, REG_SETJMP, NULL) != NULL;
	      if (!need_endbr && !SIBLING_CALL_P (insn))
		{
		  rtx call = get_call_rtx_from (insn);
		  rtx fnaddr = XEXP (call, 0);
		  tree fndecl = NULL_TREE;

		  /* Also generate ENDBRANCH for non-tail call which
		     may return via indirect branch.  */
		  if (GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF)
		    fndecl = SYMBOL_REF_DECL (XEXP (fnaddr, 0));
		  if (fndecl == NULL_TREE)
		    fndecl = MEM_EXPR (fnaddr);
		  if (fndecl
		      && TREE_CODE (TREE_TYPE (fndecl)) != FUNCTION_TYPE
		      && TREE_CODE (TREE_TYPE (fndecl)) != METHOD_TYPE)
		    fndecl = NULL_TREE;
		  if (fndecl && TYPE_ARG_TYPES (TREE_TYPE (fndecl)))
		    {
		      tree fntype = TREE_TYPE (fndecl);
		      if (lookup_attribute ("indirect_return",
					    TYPE_ATTRIBUTES (fntype)))
			need_endbr = true;
		    }
		}
	      if (!need_endbr)
		continue;
	      /* Generate ENDBRANCH after CALL, which can return more than
		 twice, setjmp-like functions.  */

	      cet_eb = gen_nop_endbr ();
	      emit_insn_after_setloc (cet_eb, insn, INSN_LOCATION (insn));
	      continue;
	    }

	  if (JUMP_P (insn) && flag_cet_switch)
	    {
	      rtx target = JUMP_LABEL (insn);
	      if (target == NULL_RTX || ANY_RETURN_P (target))
		continue;

	      /* Check the jump is a switch table.  */
	      rtx_insn *label = as_a<rtx_insn *> (target);
	      rtx_insn *table = next_insn (label);
	      if (table == NULL_RTX || !JUMP_TABLE_DATA_P (table))
		continue;

	      /* For the indirect jump find out all places it jumps and insert
		 ENDBRANCH there.  It should be done under a special flag to
		 control ENDBRANCH generation for switch stmts.  */
	      edge_iterator ei;
	      edge e;
	      basic_block dest_blk;

	      FOR_EACH_EDGE (e, ei, bb->succs)
		{
		  rtx_insn *insn;

		  dest_blk = e->dest;
		  insn = BB_HEAD (dest_blk);
		  gcc_assert (LABEL_P (insn));
		  cet_eb = gen_nop_endbr ();
		  emit_insn_after (cet_eb, insn);
		}
	      continue;
	    }

	  if ((LABEL_P (insn) && LABEL_PRESERVE_P (insn))
	      || (NOTE_P (insn)
		  && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL))
	    /* TODO.  Check /s bit also.  */
	    {
	      cet_eb = gen_nop_endbr ();
	      emit_insn_after (cet_eb, insn);
	      continue;
	    }
	}
    }

  timevar_pop (TV_MACH_DEP);
  return 0;
}

namespace {

const pass_data pass_data_insert_endbranch =
{
  RTL_PASS, /* type.  */
  "cet", /* name.  */
  OPTGROUP_NONE, /* optinfo_flags.  */
  TV_MACH_DEP, /* tv_id.  */
  0, /* properties_required.  */
  0, /* properties_provided.  */
  0, /* properties_destroyed.  */
  0, /* todo_flags_start.  */
  0, /* todo_flags_finish.  */
};

class pass_insert_endbranch : public rtl_opt_pass
{
public:
  pass_insert_endbranch (gcc::context *ctxt)
    : rtl_opt_pass (pass_data_insert_endbranch, ctxt)
  {}

  /* opt_pass methods: */
  virtual bool gate (function *)
    {
      return ((flag_cf_protection & CF_BRANCH));
    }

  virtual unsigned int execute (function *)
    {
      return rest_of_insert_endbranch ();
    }

}; // class pass_insert_endbranch

} // anon namespace

rtl_opt_pass *
make_pass_insert_endbranch (gcc::context *ctxt)
{
  return new pass_insert_endbranch (ctxt);
}

/* Return true if a red-zone is in use.  We can't use red-zone when
   there are local indirect jumps, like "indirect_jump" or "tablejump",
   which jumps to another place in the function, since "call" in the
   indirect thunk pushes the return address onto stack, destroying
   red-zone.

   TODO: If we can reserve the first 2 WORDs, for PUSH and, another
   for CALL, in red-zone, we can allow local indirect jumps with
   indirect thunk.  */

bool
ix86_using_red_zone (void)
{
  return (TARGET_RED_ZONE
	  && !TARGET_64BIT_MS_ABI
	  && (!cfun->machine->has_local_indirect_jump
	      || cfun->machine->indirect_branch_type == indirect_branch_keep));
}

/* Return a string that documents the current -m options.  The caller is
   responsible for freeing the string.  */

static char *
ix86_target_string (HOST_WIDE_INT isa, HOST_WIDE_INT isa2,
		    int flags, int flags2,
		    const char *arch, const char *tune,
		    enum fpmath_unit fpmath, bool add_nl_p)
{
  struct ix86_target_opts
  {
    const char *option;		/* option string */
    HOST_WIDE_INT mask;		/* isa mask options */
  };

  /* This table is ordered so that options like -msse4.2 that imply other
     ISAs come first.  Target string will be displayed in the same order.  */
  static struct ix86_target_opts isa2_opts[] =
  {
    { "-mcx16",		OPTION_MASK_ISA_CX16 },
    { "-mvaes",		OPTION_MASK_ISA_VAES },
    { "-mrdpid",	OPTION_MASK_ISA_RDPID },
    { "-mpconfig",	OPTION_MASK_ISA_PCONFIG },
    { "-mwbnoinvd",     OPTION_MASK_ISA_WBNOINVD },
    { "-msgx",		OPTION_MASK_ISA_SGX },
    { "-mavx5124vnniw", OPTION_MASK_ISA_AVX5124VNNIW },
    { "-mavx5124fmaps", OPTION_MASK_ISA_AVX5124FMAPS },
    { "-mhle",		OPTION_MASK_ISA_HLE },
    { "-mmovbe",	OPTION_MASK_ISA_MOVBE },
    { "-mclzero",	OPTION_MASK_ISA_CLZERO },
    { "-mmwaitx",	OPTION_MASK_ISA_MWAITX },
    { "-mmovdir64b",	OPTION_MASK_ISA_MOVDIR64B },
    { "-mwaitpkg",	OPTION_MASK_ISA_WAITPKG },
    { "-mcldemote",	OPTION_MASK_ISA_CLDEMOTE }
  };
  static struct ix86_target_opts isa_opts[] =
  {
    { "-mavx512vpopcntdq", OPTION_MASK_ISA_AVX512VPOPCNTDQ },
    { "-mavx512bitalg", OPTION_MASK_ISA_AVX512BITALG },
    { "-mvpclmulqdq",	OPTION_MASK_ISA_VPCLMULQDQ },
    { "-mgfni",		OPTION_MASK_ISA_GFNI },
    { "-mavx512vnni",	OPTION_MASK_ISA_AVX512VNNI },
    { "-mavx512vbmi2",	OPTION_MASK_ISA_AVX512VBMI2 },
    { "-mavx512vbmi",	OPTION_MASK_ISA_AVX512VBMI },
    { "-mavx512ifma",	OPTION_MASK_ISA_AVX512IFMA },
    { "-mavx512vl",	OPTION_MASK_ISA_AVX512VL },
    { "-mavx512bw",	OPTION_MASK_ISA_AVX512BW },
    { "-mavx512dq",	OPTION_MASK_ISA_AVX512DQ },
    { "-mavx512er",	OPTION_MASK_ISA_AVX512ER },
    { "-mavx512pf",	OPTION_MASK_ISA_AVX512PF },
    { "-mavx512cd",	OPTION_MASK_ISA_AVX512CD },
    { "-mavx512f",	OPTION_MASK_ISA_AVX512F },
    { "-mavx2",		OPTION_MASK_ISA_AVX2 },
    { "-mfma",		OPTION_MASK_ISA_FMA },
    { "-mxop",		OPTION_MASK_ISA_XOP },
    { "-mfma4",		OPTION_MASK_ISA_FMA4 },
    { "-mf16c",		OPTION_MASK_ISA_F16C },
    { "-mavx",		OPTION_MASK_ISA_AVX },
/*  { "-msse4"		OPTION_MASK_ISA_SSE4 }, */
    { "-msse4.2",	OPTION_MASK_ISA_SSE4_2 },
    { "-msse4.1",	OPTION_MASK_ISA_SSE4_1 },
    { "-msse4a",	OPTION_MASK_ISA_SSE4A },
    { "-mssse3",	OPTION_MASK_ISA_SSSE3 },
    { "-msse3",		OPTION_MASK_ISA_SSE3 },
    { "-maes",		OPTION_MASK_ISA_AES },
    { "-msha",		OPTION_MASK_ISA_SHA },
    { "-mpclmul",	OPTION_MASK_ISA_PCLMUL },
    { "-msse2",		OPTION_MASK_ISA_SSE2 },
    { "-msse",		OPTION_MASK_ISA_SSE },
    { "-m3dnowa",	OPTION_MASK_ISA_3DNOW_A },
    { "-m3dnow",	OPTION_MASK_ISA_3DNOW },
    { "-mmmx",		OPTION_MASK_ISA_MMX },
    { "-mrtm",		OPTION_MASK_ISA_RTM },
    { "-mprfchw",	OPTION_MASK_ISA_PRFCHW },
    { "-mrdseed",	OPTION_MASK_ISA_RDSEED },
    { "-madx",		OPTION_MASK_ISA_ADX },
    { "-mprefetchwt1",	OPTION_MASK_ISA_PREFETCHWT1 },
    { "-mclflushopt",	OPTION_MASK_ISA_CLFLUSHOPT },
    { "-mxsaves",	OPTION_MASK_ISA_XSAVES },
    { "-mxsavec",	OPTION_MASK_ISA_XSAVEC },
    { "-mxsaveopt",	OPTION_MASK_ISA_XSAVEOPT },
    { "-mxsave",	OPTION_MASK_ISA_XSAVE },
    { "-mabm",		OPTION_MASK_ISA_ABM },
    { "-mbmi",		OPTION_MASK_ISA_BMI },
    { "-mbmi2",		OPTION_MASK_ISA_BMI2 },
    { "-mlzcnt",	OPTION_MASK_ISA_LZCNT },
    { "-mtbm",		OPTION_MASK_ISA_TBM },
    { "-mpopcnt",	OPTION_MASK_ISA_POPCNT },
    { "-msahf",		OPTION_MASK_ISA_SAHF },
    { "-mcrc32",	OPTION_MASK_ISA_CRC32 },
    { "-mfsgsbase",	OPTION_MASK_ISA_FSGSBASE },
    { "-mrdrnd",	OPTION_MASK_ISA_RDRND },
    { "-mpku",		OPTION_MASK_ISA_PKU },
    { "-mlwp",		OPTION_MASK_ISA_LWP },
    { "-mfxsr",		OPTION_MASK_ISA_FXSR },
    { "-mclwb",		OPTION_MASK_ISA_CLWB },
    { "-mshstk",	OPTION_MASK_ISA_SHSTK },
    { "-mmovdiri",	OPTION_MASK_ISA_MOVDIRI }
  };

  /* Flag options.  */
  static struct ix86_target_opts flag_opts[] =
  {
    { "-m128bit-long-double",		MASK_128BIT_LONG_DOUBLE },
    { "-mlong-double-128",		MASK_LONG_DOUBLE_128 },
    { "-mlong-double-64",		MASK_LONG_DOUBLE_64 },
    { "-m80387",			MASK_80387 },
    { "-maccumulate-outgoing-args",	MASK_ACCUMULATE_OUTGOING_ARGS },
    { "-malign-double",			MASK_ALIGN_DOUBLE },
    { "-mcld",				MASK_CLD },
    { "-mfp-ret-in-387",		MASK_FLOAT_RETURNS },
    { "-mieee-fp",			MASK_IEEE_FP },
    { "-minline-all-stringops",		MASK_INLINE_ALL_STRINGOPS },
    { "-minline-stringops-dynamically",	MASK_INLINE_STRINGOPS_DYNAMICALLY },
    { "-mms-bitfields",			MASK_MS_BITFIELD_LAYOUT },
    { "-mno-align-stringops",		MASK_NO_ALIGN_STRINGOPS },
    { "-mno-fancy-math-387",		MASK_NO_FANCY_MATH_387 },
    { "-mno-push-args",			MASK_NO_PUSH_ARGS },
    { "-mno-red-zone",			MASK_NO_RED_ZONE },
    { "-momit-leaf-frame-pointer",	MASK_OMIT_LEAF_FRAME_POINTER },
    { "-mrecip",			MASK_RECIP },
    { "-mrtd",				MASK_RTD },
    { "-msseregparm",			MASK_SSEREGPARM },
    { "-mstack-arg-probe",		MASK_STACK_PROBE },
    { "-mtls-direct-seg-refs",		MASK_TLS_DIRECT_SEG_REFS },
    { "-mvect8-ret-in-mem",		MASK_VECT8_RETURNS },
    { "-m8bit-idiv",			MASK_USE_8BIT_IDIV },
    { "-mvzeroupper",			MASK_VZEROUPPER },
    { "-mstv",				MASK_STV },
    { "-mavx256-split-unaligned-load",	MASK_AVX256_SPLIT_UNALIGNED_LOAD },
    { "-mavx256-split-unaligned-store",	MASK_AVX256_SPLIT_UNALIGNED_STORE },
    { "-mcall-ms2sysv-xlogues",		MASK_CALL_MS2SYSV_XLOGUES }
  };

  /* Additional flag options.  */
  static struct ix86_target_opts flag2_opts[] =
  {
    { "-mgeneral-regs-only",		OPTION_MASK_GENERAL_REGS_ONLY }
  };

  const char *opts[ARRAY_SIZE (isa_opts) + ARRAY_SIZE (isa2_opts)
		   + ARRAY_SIZE (flag_opts) + ARRAY_SIZE (flag2_opts) + 6][2];

  char isa_other[40];
  char isa2_other[40];
  char flags_other[40];
  char flags2_other[40];
  unsigned num = 0;
  unsigned i, j;
  char *ret;
  char *ptr;
  size_t len;
  size_t line_len;
  size_t sep_len;
  const char *abi;

  memset (opts, '\0', sizeof (opts));

  /* Add -march= option.  */
  if (arch)
    {
      opts[num][0] = "-march=";
      opts[num++][1] = arch;
    }

  /* Add -mtune= option.  */
  if (tune)
    {
      opts[num][0] = "-mtune=";
      opts[num++][1] = tune;
    }

  /* Add -m32/-m64/-mx32.  */
  if ((isa & OPTION_MASK_ISA_64BIT) != 0)
    {
      if ((isa & OPTION_MASK_ABI_64) != 0)
	abi = "-m64";
      else
	abi = "-mx32";
      isa &= ~ (OPTION_MASK_ISA_64BIT
		| OPTION_MASK_ABI_64
		| OPTION_MASK_ABI_X32);
    }
  else
    abi = "-m32";
  opts[num++][0] = abi;

  /* Pick out the options in isa2 options.  */
  for (i = 0; i < ARRAY_SIZE (isa2_opts); i++)
    {
      if ((isa2 & isa2_opts[i].mask) != 0)
	{
	  opts[num++][0] = isa2_opts[i].option;
	  isa2 &= ~ isa2_opts[i].mask;
	}
    }

  if (isa2 && add_nl_p)
    {
      opts[num++][0] = isa2_other;
      sprintf (isa2_other, "(other isa2: %#" HOST_WIDE_INT_PRINT "x)", isa2);
    }

  /* Pick out the options in isa options.  */
  for (i = 0; i < ARRAY_SIZE (isa_opts); i++)
    {
      if ((isa & isa_opts[i].mask) != 0)
	{
	  opts[num++][0] = isa_opts[i].option;
	  isa &= ~ isa_opts[i].mask;
	}
    }

  if (isa && add_nl_p)
    {
      opts[num++][0] = isa_other;
      sprintf (isa_other, "(other isa: %#" HOST_WIDE_INT_PRINT "x)", isa);
    }

  /* Add flag options.  */
  for (i = 0; i < ARRAY_SIZE (flag_opts); i++)
    {
      if ((flags & flag_opts[i].mask) != 0)
	{
	  opts[num++][0] = flag_opts[i].option;
	  flags &= ~ flag_opts[i].mask;
	}
    }

  if (flags && add_nl_p)
    {
      opts[num++][0] = flags_other;
      sprintf (flags_other, "(other flags: %#x)", flags);
    }

    /* Add additional flag options.  */
  for (i = 0; i < ARRAY_SIZE (flag2_opts); i++)
    {
      if ((flags2 & flag2_opts[i].mask) != 0)
	{
	  opts[num++][0] = flag2_opts[i].option;
	  flags2 &= ~ flag2_opts[i].mask;
	}
    }

  if (flags2 && add_nl_p)
    {
      opts[num++][0] = flags2_other;
      sprintf (flags2_other, "(other flags2: %#x)", flags2);
    }

  /* Add -fpmath= option.  */
  if (fpmath)
    {
      opts[num][0] = "-mfpmath=";
      switch ((int) fpmath)
	{
	case FPMATH_387:
	  opts[num++][1] = "387";
	  break;

	case FPMATH_SSE:
	  opts[num++][1] = "sse";
	  break;

	case FPMATH_387 | FPMATH_SSE:
	  opts[num++][1] = "sse+387";
	  break;

	default:
	  gcc_unreachable ();
	}
    }

  /* Any options?  */
  if (num == 0)
    return NULL;

  gcc_assert (num < ARRAY_SIZE (opts));

  /* Size the string.  */
  len = 0;
  sep_len = (add_nl_p) ? 3 : 1;
  for (i = 0; i < num; i++)
    {
      len += sep_len;
      for (j = 0; j < 2; j++)
	if (opts[i][j])
	  len += strlen (opts[i][j]);
    }

  /* Build the string.  */
  ret = ptr = (char *) xmalloc (len);
  line_len = 0;

  for (i = 0; i < num; i++)
    {
      size_t len2[2];

      for (j = 0; j < 2; j++)
	len2[j] = (opts[i][j]) ? strlen (opts[i][j]) : 0;

      if (i != 0)
	{
	  *ptr++ = ' ';
	  line_len++;

	  if (add_nl_p && line_len + len2[0] + len2[1] > 70)
	    {
	      *ptr++ = '\\';
	      *ptr++ = '\n';
	      line_len = 0;
	    }
	}

      for (j = 0; j < 2; j++)
	if (opts[i][j])
	  {
	    memcpy (ptr, opts[i][j], len2[j]);
	    ptr += len2[j];
	    line_len += len2[j];
	  }
    }

  *ptr = '\0';
  gcc_assert (ret + len >= ptr);

  return ret;
}

/* Return true, if profiling code should be emitted before
   prologue. Otherwise it returns false.
   Note: For x86 with "hotfix" it is sorried.  */
static bool
ix86_profile_before_prologue (void)
{
  return flag_fentry != 0;
}

/* Function that is callable from the debugger to print the current
   options.  */
void ATTRIBUTE_UNUSED
ix86_debug_options (void)
{
  char *opts = ix86_target_string (ix86_isa_flags, ix86_isa_flags2,
				   target_flags, ix86_target_flags,
				   ix86_arch_string,ix86_tune_string,
				   ix86_fpmath, true);

  if (opts)
    {
      fprintf (stderr, "%s\n\n", opts);
      free (opts);
    }
  else
    fputs ("<no options>\n\n", stderr);

  return;
}

static const char *stringop_alg_names[] = {
#define DEF_ENUM
#define DEF_ALG(alg, name) #name,
#include "stringop.def"
#undef DEF_ENUM
#undef DEF_ALG
};

/* Parse parameter string passed to -mmemcpy-strategy= or -mmemset-strategy=.
   The string is of the following form (or comma separated list of it):

     strategy_alg:max_size:[align|noalign]

   where the full size range for the strategy is either [0, max_size] or
   [min_size, max_size], in which min_size is the max_size + 1 of the
   preceding range.  The last size range must have max_size == -1.

   Examples:

    1.
       -mmemcpy-strategy=libcall:-1:noalign

      this is equivalent to (for known size memcpy) -mstringop-strategy=libcall


   2.
      -mmemset-strategy=rep_8byte:16:noalign,vector_loop:2048:align,libcall:-1:noalign

      This is to tell the compiler to use the following strategy for memset
      1) when the expected size is between [1, 16], use rep_8byte strategy;
      2) when the size is between [17, 2048], use vector_loop;
      3) when the size is > 2048, use libcall.  */

struct stringop_size_range
{
  int max;
  stringop_alg alg;
  bool noalign;
};

static void
ix86_parse_stringop_strategy_string (char *strategy_str, bool is_memset)
{
  const struct stringop_algs *default_algs;
  stringop_size_range input_ranges[MAX_STRINGOP_ALGS];
  char *curr_range_str, *next_range_str;
  const char *opt = is_memset ? "-mmemset_strategy=" : "-mmemcpy_strategy=";
  int i = 0, n = 0;

  if (is_memset)
    default_algs = &ix86_cost->memset[TARGET_64BIT != 0];
  else
    default_algs = &ix86_cost->memcpy[TARGET_64BIT != 0];

  curr_range_str = strategy_str;

  do
    {
      int maxs;
      char alg_name[128];
      char align[16];
      next_range_str = strchr (curr_range_str, ',');
      if (next_range_str)
        *next_range_str++ = '\0';

      if (sscanf (curr_range_str, "%20[^:]:%d:%10s", alg_name, &maxs,
		  align) != 3)
        {
	  error ("wrong argument %qs to option %qs", curr_range_str, opt);
          return;
        }

      if (n > 0 && (maxs < (input_ranges[n - 1].max + 1) && maxs != -1))
        {
	  error ("size ranges of option %qs should be increasing", opt);
          return;
        }

      for (i = 0; i < last_alg; i++)
	if (!strcmp (alg_name, stringop_alg_names[i]))
	  break;

      if (i == last_alg)
        {
	  error ("wrong strategy name %qs specified for option %qs",
		 alg_name, opt);

	  auto_vec <const char *> candidates;
	  for (i = 0; i < last_alg; i++)
	    if ((stringop_alg) i != rep_prefix_8_byte || TARGET_64BIT)
	      candidates.safe_push (stringop_alg_names[i]);

	  char *s;
	  const char *hint
	    = candidates_list_and_hint (alg_name, s, candidates);
	  if (hint)
	    inform (input_location,
		    "valid arguments to %qs are: %s; did you mean %qs?",
		    opt, s, hint);
	  else
	    inform (input_location, "valid arguments to %qs are: %s",
		    opt, s);
	  XDELETEVEC (s);
          return;
        }

      if ((stringop_alg) i == rep_prefix_8_byte
	  && !TARGET_64BIT)
	{
	  /* rep; movq isn't available in 32-bit code.  */
	  error ("strategy name %qs specified for option %qs "
		 "not supported for 32-bit code", alg_name, opt);
	  return;
	}

      input_ranges[n].max = maxs;
      input_ranges[n].alg = (stringop_alg) i;
      if (!strcmp (align, "align"))
        input_ranges[n].noalign = false;
      else if (!strcmp (align, "noalign"))
        input_ranges[n].noalign = true;
      else
        {
	  error ("unknown alignment %qs specified for option %qs", align, opt);
          return;
        }
      n++;
      curr_range_str = next_range_str;
    }
  while (curr_range_str);

  if (input_ranges[n - 1].max != -1)
    {
      error ("the max value for the last size range should be -1"
             " for option %qs", opt);
      return;
    }

  if (n > MAX_STRINGOP_ALGS)
    {
      error ("too many size ranges specified in option %qs", opt);
      return;
    }

  /* Now override the default algs array.  */
  for (i = 0; i < n; i++)
    {
      *const_cast<int *>(&default_algs->size[i].max) = input_ranges[i].max;
      *const_cast<stringop_alg *>(&default_algs->size[i].alg)
          = input_ranges[i].alg;
      *const_cast<int *>(&default_algs->size[i].noalign)
          = input_ranges[i].noalign;
    }
}


/* parse -mtune-ctrl= option. When DUMP is true,
   print the features that are explicitly set.  */

static void
parse_mtune_ctrl_str (bool dump)
{
  if (!ix86_tune_ctrl_string)
    return;

  char *next_feature_string = NULL;
  char *curr_feature_string = xstrdup (ix86_tune_ctrl_string);
  char *orig = curr_feature_string;
  int i;
  do
    {
      bool clear = false;

      next_feature_string = strchr (curr_feature_string, ',');
      if (next_feature_string)
        *next_feature_string++ = '\0';
      if (*curr_feature_string == '^')
        {
          curr_feature_string++;
          clear = true;
        }
      for (i = 0; i < X86_TUNE_LAST; i++)
        {
          if (!strcmp (curr_feature_string, ix86_tune_feature_names[i]))
            {
              ix86_tune_features[i] = !clear;
              if (dump)
                fprintf (stderr, "Explicitly %s feature %s\n",
                         clear ? "clear" : "set", ix86_tune_feature_names[i]);
              break;
            }
        }
      if (i == X86_TUNE_LAST)
        error ("unknown parameter to option -mtune-ctrl: %s",
               clear ? curr_feature_string - 1 : curr_feature_string);
      curr_feature_string = next_feature_string;
    }
  while (curr_feature_string);
  free (orig);
}

/* Helper function to set ix86_tune_features. IX86_TUNE is the
   processor type.  */

static void
set_ix86_tune_features (enum processor_type ix86_tune, bool dump)
{
  unsigned HOST_WIDE_INT ix86_tune_mask = HOST_WIDE_INT_1U << ix86_tune;
  int i;

  for (i = 0; i < X86_TUNE_LAST; ++i)
    {
      if (ix86_tune_no_default)
        ix86_tune_features[i] = 0;
      else
	ix86_tune_features[i]
	  = !!(initial_ix86_tune_features[i] & ix86_tune_mask);
    }

  if (dump)
    {
      fprintf (stderr, "List of x86 specific tuning parameter names:\n");
      for (i = 0; i < X86_TUNE_LAST; i++)
        fprintf (stderr, "%s : %s\n", ix86_tune_feature_names[i],
                 ix86_tune_features[i] ? "on" : "off");
    }

  parse_mtune_ctrl_str (dump);
}


/* Default align_* from the processor table.  */

static void
ix86_default_align (struct gcc_options *opts)
{
  /* -falign-foo without argument: supply one.  */
  if (opts->x_flag_align_loops && !opts->x_str_align_loops)
    opts->x_str_align_loops = processor_cost_table[ix86_tune]->align_loop;
  if (opts->x_flag_align_jumps && !opts->x_str_align_jumps)
    opts->x_str_align_jumps = processor_cost_table[ix86_tune]->align_jump;
  if (opts->x_flag_align_labels && !opts->x_str_align_labels)
    opts->x_str_align_labels = processor_cost_table[ix86_tune]->align_label;
  if (opts->x_flag_align_functions && !opts->x_str_align_functions)
    opts->x_str_align_functions = processor_cost_table[ix86_tune]->align_func;
}

/* Implement TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE hook.  */

static void
ix86_override_options_after_change (void)
{
  ix86_default_align (&global_options);
}



/* Override various settings based on options.  If MAIN_ARGS_P, the
   options are from the command line, otherwise they are from
   attributes.  Return true if there's an error related to march
   option.  */

static bool
ix86_option_override_internal (bool main_args_p,
			       struct gcc_options *opts,
			       struct gcc_options *opts_set)
{
  int i;
  unsigned HOST_WIDE_INT ix86_arch_mask;
  const bool ix86_tune_specified = (opts->x_ix86_tune_string != NULL);

  /* -mrecip options.  */
  static struct
    {
      const char *string;           /* option name */
      unsigned int mask;            /* mask bits to set */
    }
  const recip_options[] =
    {
      { "all",       RECIP_MASK_ALL },
      { "none",      RECIP_MASK_NONE },
      { "div",       RECIP_MASK_DIV },
      { "sqrt",      RECIP_MASK_SQRT },
      { "vec-div",   RECIP_MASK_VEC_DIV },
      { "vec-sqrt",  RECIP_MASK_VEC_SQRT },
    };


  /* Turn off both OPTION_MASK_ABI_64 and OPTION_MASK_ABI_X32 if
     TARGET_64BIT_DEFAULT is true and TARGET_64BIT is false.  */
  if (TARGET_64BIT_DEFAULT && !TARGET_64BIT_P (opts->x_ix86_isa_flags))
    opts->x_ix86_isa_flags &= ~(OPTION_MASK_ABI_64 | OPTION_MASK_ABI_X32);
#ifdef TARGET_BI_ARCH
  else
    {
#if TARGET_BI_ARCH == 1
      /* When TARGET_BI_ARCH == 1, by default, OPTION_MASK_ABI_64
	 is on and OPTION_MASK_ABI_X32 is off.  We turn off
	 OPTION_MASK_ABI_64 if OPTION_MASK_ABI_X32 is turned on by
	 -mx32.  */
      if (TARGET_X32_P (opts->x_ix86_isa_flags))
	opts->x_ix86_isa_flags &= ~OPTION_MASK_ABI_64;
#else
      /* When TARGET_BI_ARCH == 2, by default, OPTION_MASK_ABI_X32 is
	 on and OPTION_MASK_ABI_64 is off.  We turn off
	 OPTION_MASK_ABI_X32 if OPTION_MASK_ABI_64 is turned on by
	 -m64 or OPTION_MASK_CODE16 is turned on by -m16.  */
      if (TARGET_LP64_P (opts->x_ix86_isa_flags)
	  || TARGET_16BIT_P (opts->x_ix86_isa_flags))
	opts->x_ix86_isa_flags &= ~OPTION_MASK_ABI_X32;
#endif
      if (TARGET_64BIT_P (opts->x_ix86_isa_flags)
	  && TARGET_IAMCU_P (opts->x_target_flags))
	sorry ("Intel MCU psABI isn%'t supported in %s mode",
	       TARGET_X32_P (opts->x_ix86_isa_flags) ? "x32" : "64-bit");
    }
#endif

  if (TARGET_X32_P (opts->x_ix86_isa_flags))
    {
      /* Always turn on OPTION_MASK_ISA_64BIT and turn off
	 OPTION_MASK_ABI_64 for TARGET_X32.  */
      opts->x_ix86_isa_flags |= OPTION_MASK_ISA_64BIT;
      opts->x_ix86_isa_flags &= ~OPTION_MASK_ABI_64;
    }
  else if (TARGET_16BIT_P (opts->x_ix86_isa_flags))
    opts->x_ix86_isa_flags &= ~(OPTION_MASK_ISA_64BIT
				| OPTION_MASK_ABI_X32
				| OPTION_MASK_ABI_64);
  else if (TARGET_LP64_P (opts->x_ix86_isa_flags))
    {
      /* Always turn on OPTION_MASK_ISA_64BIT and turn off
	 OPTION_MASK_ABI_X32 for TARGET_LP64.  */
      opts->x_ix86_isa_flags |= OPTION_MASK_ISA_64BIT;
      opts->x_ix86_isa_flags &= ~OPTION_MASK_ABI_X32;
    }

#ifdef SUBTARGET_OVERRIDE_OPTIONS
  SUBTARGET_OVERRIDE_OPTIONS;
#endif

#ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
  SUBSUBTARGET_OVERRIDE_OPTIONS;
#endif

  /* -fPIC is the default for x86_64.  */
  if (TARGET_MACHO && TARGET_64BIT_P (opts->x_ix86_isa_flags))
    opts->x_flag_pic = 2;

  /* Need to check -mtune=generic first.  */
  if (opts->x_ix86_tune_string)
    {
      /* As special support for cross compilers we read -mtune=native
	     as -mtune=generic.  With native compilers we won't see the
	     -mtune=native, as it was changed by the driver.  */
      if (!strcmp (opts->x_ix86_tune_string, "native"))
	{
	  opts->x_ix86_tune_string = "generic";
	}
      else if (!strcmp (opts->x_ix86_tune_string, "x86-64"))
        warning (OPT_Wdeprecated,
		 main_args_p
		 ? G_("%<-mtune=x86-64%> is deprecated; use %<-mtune=k8%> "
		      "or %<-mtune=generic%> instead as appropriate")
		 : G_("%<target(\"tune=x86-64\")%> is deprecated; use "
		      "%<target(\"tune=k8\")%> or %<target(\"tune=generic\")%>"
		      " instead as appropriate"));
    }
  else
    {
      if (opts->x_ix86_arch_string)
	opts->x_ix86_tune_string = opts->x_ix86_arch_string;
      if (!opts->x_ix86_tune_string)
	{
	  opts->x_ix86_tune_string = processor_names[TARGET_CPU_DEFAULT];
	  ix86_tune_defaulted = 1;
	}

      /* opts->x_ix86_tune_string is set to opts->x_ix86_arch_string
	 or defaulted.  We need to use a sensible tune option.  */
      if (!strcmp (opts->x_ix86_tune_string, "x86-64"))
	{
	  opts->x_ix86_tune_string = "generic";
	}
    }

  if (opts->x_ix86_stringop_alg == rep_prefix_8_byte
      && !TARGET_64BIT_P (opts->x_ix86_isa_flags))
    {
      /* rep; movq isn't available in 32-bit code.  */
      error ("-mstringop-strategy=rep_8byte not supported for 32-bit code");
      opts->x_ix86_stringop_alg = no_stringop;
    }

  if (!opts->x_ix86_arch_string)
    opts->x_ix86_arch_string
      = TARGET_64BIT_P (opts->x_ix86_isa_flags)
	? "x86-64" : SUBTARGET32_DEFAULT_CPU;
  else
    ix86_arch_specified = 1;

  if (opts_set->x_ix86_pmode)
    {
      if ((TARGET_LP64_P (opts->x_ix86_isa_flags)
	   && opts->x_ix86_pmode == PMODE_SI)
	  || (!TARGET_64BIT_P (opts->x_ix86_isa_flags)
	       && opts->x_ix86_pmode == PMODE_DI))
	error ("address mode %qs not supported in the %s bit mode",
	       TARGET_64BIT_P (opts->x_ix86_isa_flags) ? "short" : "long",
	       TARGET_64BIT_P (opts->x_ix86_isa_flags) ? "64" : "32");
    }
  else
    opts->x_ix86_pmode = TARGET_LP64_P (opts->x_ix86_isa_flags)
			 ? PMODE_DI : PMODE_SI;

  if (!opts_set->x_ix86_abi)
    opts->x_ix86_abi = DEFAULT_ABI;

  if (opts->x_ix86_abi == MS_ABI && TARGET_X32_P (opts->x_ix86_isa_flags))
    error ("-mabi=ms not supported with X32 ABI");
  gcc_assert (opts->x_ix86_abi == SYSV_ABI || opts->x_ix86_abi == MS_ABI);

  /* For targets using ms ABI enable ms-extensions, if not
     explicit turned off.  For non-ms ABI we turn off this
     option.  */
  if (!opts_set->x_flag_ms_extensions)
    opts->x_flag_ms_extensions = (MS_ABI == DEFAULT_ABI);

  if (opts_set->x_ix86_cmodel)
    {
      switch (opts->x_ix86_cmodel)
	{
	case CM_SMALL:
	case CM_SMALL_PIC:
	  if (opts->x_flag_pic)
	    opts->x_ix86_cmodel = CM_SMALL_PIC;
	  if (!TARGET_64BIT_P (opts->x_ix86_isa_flags))
	    error ("code model %qs not supported in the %s bit mode",
		   "small", "32");
	  break;

	case CM_MEDIUM:
	case CM_MEDIUM_PIC:
	  if (opts->x_flag_pic)
	    opts->x_ix86_cmodel = CM_MEDIUM_PIC;
	  if (!TARGET_64BIT_P (opts->x_ix86_isa_flags))
	    error ("code model %qs not supported in the %s bit mode",
		   "medium", "32");
	  else if (TARGET_X32_P (opts->x_ix86_isa_flags))
	    error ("code model %qs not supported in x32 mode",
		   "medium");
	  break;

	case CM_LARGE:
	case CM_LARGE_PIC:
	  if (opts->x_flag_pic)
	    opts->x_ix86_cmodel = CM_LARGE_PIC;
	  if (!TARGET_64BIT_P (opts->x_ix86_isa_flags))
	    error ("code model %qs not supported in the %s bit mode",
		   "large", "32");
	  else if (TARGET_X32_P (opts->x_ix86_isa_flags))
	    error ("code model %qs not supported in x32 mode",
		   "large");
	  break;

	case CM_32:
	  if (opts->x_flag_pic)
	    error ("code model %s does not support PIC mode", "32");
	  if (TARGET_64BIT_P (opts->x_ix86_isa_flags))
	    error ("code model %qs not supported in the %s bit mode",
		   "32", "64");
	  break;

	case CM_KERNEL:
	  if (opts->x_flag_pic)
	    {
	      error ("code model %s does not support PIC mode", "kernel");
	      opts->x_ix86_cmodel = CM_32;
	    }
	  if (!TARGET_64BIT_P (opts->x_ix86_isa_flags))
	    error ("code model %qs not supported in the %s bit mode",
		   "kernel", "32");
	  break;

	default:
	  gcc_unreachable ();
	}
    }
  else
    {
      /* For TARGET_64BIT and MS_ABI, force pic on, in order to enable the
	 use of rip-relative addressing.  This eliminates fixups that
	 would otherwise be needed if this object is to be placed in a
	 DLL, and is essentially just as efficient as direct addressing.  */
      if (TARGET_64BIT_P (opts->x_ix86_isa_flags)
	  && (TARGET_RDOS || TARGET_PECOFF))
	opts->x_ix86_cmodel = CM_MEDIUM_PIC, opts->x_flag_pic = 1;
      else if (TARGET_64BIT_P (opts->x_ix86_isa_flags))
	opts->x_ix86_cmodel = opts->x_flag_pic ? CM_SMALL_PIC : CM_SMALL;
      else
	opts->x_ix86_cmodel = CM_32;
    }
  if (TARGET_MACHO && opts->x_ix86_asm_dialect == ASM_INTEL)
    {
      error ("-masm=intel not supported in this configuration");
      opts->x_ix86_asm_dialect = ASM_ATT;
    }
  if ((TARGET_64BIT_P (opts->x_ix86_isa_flags) != 0)
      != ((opts->x_ix86_isa_flags & OPTION_MASK_ISA_64BIT) != 0))
    sorry ("%i-bit mode not compiled in",
	   (opts->x_ix86_isa_flags & OPTION_MASK_ISA_64BIT) ? 64 : 32);

  for (i = 0; i < pta_size; i++)
    if (! strcmp (opts->x_ix86_arch_string, processor_alias_table[i].name))
      {
	if (!strcmp (opts->x_ix86_arch_string, "generic"))
	  {
	    error (main_args_p
		   ? G_("%<generic%> CPU can be used only for %<-mtune=%> "
			"switch")
		   : G_("%<generic%> CPU can be used only for "
			"%<target(\"tune=\")%> attribute"));
	    return false;
	  }
	else if (!strcmp (opts->x_ix86_arch_string, "intel"))
	  {
	    error (main_args_p
		   ? G_("%<intel%> CPU can be used only for %<-mtune=%> "
			"switch")
		   : G_("%<intel%> CPU can be used only for "
			"%<target(\"tune=\")%> attribute"));
	    return false;
	  }

	if (TARGET_64BIT_P (opts->x_ix86_isa_flags)
	    && !((processor_alias_table[i].flags & PTA_64BIT) != 0))
	  {
	    error ("CPU you selected does not support x86-64 "
		   "instruction set");
	    return false;
	  }

	ix86_schedule = processor_alias_table[i].schedule;
	ix86_arch = processor_alias_table[i].processor;
	/* Default cpu tuning to the architecture.  */
	ix86_tune = ix86_arch;

	if (((processor_alias_table[i].flags & PTA_MMX) != 0)
	    && !(opts->x_ix86_isa_flags_explicit & OPTION_MASK_ISA_MMX))
	  opts->x_ix86_isa_flags |= OPTION_MASK_ISA_MMX;
	if (((processor_alias_table[i].flags & PTA_3DNOW) != 0)
	    && !(opts->x_ix86_isa_flags_explicit & OPTION_MASK_ISA_3DNOW))
	  opts->x_ix86_isa_flags |= OPTION_MASK_ISA_3DNOW;
	if (((processor_alias_table[i].flags & PTA_3DNOW_A) != 0)
	    && !(opts->x_ix86_isa_flags_explicit & OPTION_MASK_ISA_3DNOW_A))
	  opts->x_ix86_isa_flags |= OPTION_MASK_ISA_3DNOW_A;
	if (((processor_alias_table[i].flags & PTA_SSE) != 0)
	    && !(opts->x_ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE))
	  opts->x_ix86_isa_flags |= OPTION_MASK_ISA_SSE;
	if (((processor_alias_table[i].flags & PTA_SSE2) != 0)
	    && !(opts->x_ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE2))
	  opts->x_ix86_isa_flags |= OPTION_MASK_ISA_SSE2;
	if (((processor_alias_table[i].flags & PTA_SSE3) != 0)
	    && !(opts->x_ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE3))
	  opts->x_ix86_isa_flags |= OPTION_MASK_ISA_SSE3;
	if (((processor_alias_table[i].flags & PTA_SSSE3) != 0)
	    && !(opts->x_ix86_isa_flags_explicit & OPTION_MASK_ISA_SSSE3))
	  opts->x_ix86_isa_flags |= OPTION_MASK_ISA_SSSE3;
	if (((processor_alias_table[i].flags & PTA_SSE4_1) != 0)
	    && !(opts->x_ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4_1))
	  opts->x_ix86_isa_flags |= OPTION_MASK_ISA_SSE4_1;
	if (((processor_alias_table[i].flags & PTA_SSE4_2) != 0)
	    && !(opts->x_ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4_2))
	  opts->x_ix86_isa_flags |= OPTION_MASK_ISA_SSE4_2;
	if (((processor_alias_table[i].flags & PTA_AVX) != 0)
	    && !(opts->x_ix86_isa_flags_explicit & OPTION_MASK_ISA_AVX))
	  opts->x_ix86_isa_flags |= OPTION_MASK_ISA_AVX;
	if (((processor_alias_table[i].flags & PTA_AVX2) != 0)
	    && !(opts->x_ix86_isa_flags_explicit & OPTION_MASK_ISA_AVX2))
	  opts->x_ix86_isa_flags |= OPTION_MASK_ISA_AVX2;
	if (((processor_alias_table[i].flags & PTA_FMA) != 0)
	    && !(opts->x_ix86_isa_flags_explicit & OPTION_MASK_ISA_FMA))
	  opts->x_ix86_isa_flags |= OPTION_MASK_ISA_FMA;
	if (((processor_alias_table[i].flags & PTA_SSE4A) != 0)
	    && !(opts->x_ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4A))
	  opts->x_ix86_isa_flags |= OPTION_MASK_ISA_SSE4A;
	if (((processor_alias_table[i].flags & PTA_FMA4) != 0)
	    && !(opts->x_ix86_isa_flags_explicit & OPTION_MASK_ISA_FMA4))
	  opts->x_ix86_isa_flags |= OPTION_MASK_ISA_FMA4;
	if (((processor_alias_table[i].flags & PTA_XOP) != 0)
	    && !(opts->x_ix86_isa_flags_explicit & OPTION_MASK_ISA_XOP))
	  opts->x_ix86_isa_flags |= OPTION_MASK_ISA_XOP;
	if (((processor_alias_table[i].flags & PTA_LWP) != 0)
	    && !(opts->x_ix86_isa_flags_explicit & OPTION_MASK_ISA_LWP))
	  opts->x_ix86_isa_flags |= OPTION_MASK_ISA_LWP;
	if (((processor_alias_table[i].flags & PTA_ABM) != 0)
	    && !(opts->x_ix86_isa_flags_explicit & OPTION_MASK_ISA_ABM))
	  opts->x_ix86_isa_flags |= OPTION_MASK_ISA_ABM;
	if (((processor_alias_table[i].flags & PTA_BMI) != 0)
	    && !(opts->x_ix86_isa_flags_explicit & OPTION_MASK_ISA_BMI))
	  opts->x_ix86_isa_flags |= OPTION_MASK_ISA_BMI;
	if (((processor_alias_table[i].flags & (PTA_LZCNT | PTA_ABM)) != 0)
	    && !(opts->x_ix86_isa_flags_explicit & OPTION_MASK_ISA_LZCNT))
	  opts->x_ix86_isa_flags |= OPTION_MASK_ISA_LZCNT;
	if (((processor_alias_table[i].flags & PTA_TBM) != 0)
	    && !(opts->x_ix86_isa_flags_explicit & OPTION_MASK_ISA_TBM))
	  opts->x_ix86_isa_flags |= OPTION_MASK_ISA_TBM;
	if (((processor_alias_table[i].flags & PTA_BMI2) != 0)
	    && !(opts->x_ix86_isa_flags_explicit & OPTION_MASK_ISA_BMI2))
	  opts->x_ix86_isa_flags |= OPTION_MASK_ISA_BMI2;
	if (((processor_alias_table[i].flags & PTA_CX16) != 0)
	    && !(opts->x_ix86_isa_flags2_explicit & OPTION_MASK_ISA_CX16))
	  opts->x_ix86_isa_flags2 |= OPTION_MASK_ISA_CX16;
	if (((processor_alias_table[i].flags & (PTA_POPCNT | PTA_ABM)) != 0)
	    && !(opts->x_ix86_isa_flags_explicit & OPTION_MASK_ISA_POPCNT))
	  opts->x_ix86_isa_flags |= OPTION_MASK_ISA_POPCNT;
	if (!(TARGET_64BIT_P (opts->x_ix86_isa_flags)
	    && ((processor_alias_table[i].flags & PTA_NO_SAHF) != 0))
	    && !(opts->x_ix86_isa_flags_explicit & OPTION_MASK_ISA_SAHF))
	  opts->x_ix86_isa_flags |= OPTION_MASK_ISA_SAHF;
	if (((processor_alias_table[i].flags & PTA_MOVBE) != 0)
	    && !(opts->x_ix86_isa_flags2_explicit & OPTION_MASK_ISA_MOVBE))
	  opts->x_ix86_isa_flags2 |= OPTION_MASK_ISA_MOVBE;
	if (((processor_alias_table[i].flags & PTA_AES) != 0)
	    && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_AES))
	  ix86_isa_flags |= OPTION_MASK_ISA_AES;
	if (((processor_alias_table[i].flags & PTA_SHA) != 0)
	    && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SHA))
	  ix86_isa_flags |= OPTION_MASK_ISA_SHA;
	if (((processor_alias_table[i].flags & PTA_PCLMUL) != 0)
	    && !(opts->x_ix86_isa_flags_explicit & OPTION_MASK_ISA_PCLMUL))
	  opts->x_ix86_isa_flags |= OPTION_MASK_ISA_PCLMUL;
	if (((processor_alias_table[i].flags & PTA_FSGSBASE) != 0)
	    && !(opts->x_ix86_isa_flags_explicit & OPTION_MASK_ISA_FSGSBASE))
	  opts->x_ix86_isa_flags |= OPTION_MASK_ISA_FSGSBASE;
	if (((processor_alias_table[i].flags & PTA_RDRND) != 0)
	    && !(opts->x_ix86_isa_flags_explicit & OPTION_MASK_ISA_RDRND))
	  opts->x_ix86_isa_flags |= OPTION_MASK_ISA_RDRND;
	if (((processor_alias_table[i].flags & PTA_F16C) != 0)
	    && !(opts->x_ix86_isa_flags_explicit & OPTION_MASK_ISA_F16C))
	  opts->x_ix86_isa_flags |= OPTION_MASK_ISA_F16C;
	if (((processor_alias_table[i].flags & PTA_RTM) != 0)
	    && !(opts->x_ix86_isa_flags_explicit & OPTION_MASK_ISA_RTM))
	  opts->x_ix86_isa_flags |= OPTION_MASK_ISA_RTM;
	if (((processor_alias_table[i].flags & PTA_HLE) != 0)
	    && !(opts->x_ix86_isa_flags2_explicit & OPTION_MASK_ISA_HLE))
	  opts->x_ix86_isa_flags2 |= OPTION_MASK_ISA_HLE;
	if (((processor_alias_table[i].flags & PTA_PRFCHW) != 0)
	    && !(opts->x_ix86_isa_flags_explicit & OPTION_MASK_ISA_PRFCHW))
	  opts->x_ix86_isa_flags |= OPTION_MASK_ISA_PRFCHW;
	if (((processor_alias_table[i].flags & PTA_RDSEED) != 0)
	    && !(opts->x_ix86_isa_flags_explicit & OPTION_MASK_ISA_RDSEED))
	  opts->x_ix86_isa_flags |= OPTION_MASK_ISA_RDSEED;
	if (((processor_alias_table[i].flags & PTA_ADX) != 0)
	    && !(opts->x_ix86_isa_flags_explicit & OPTION_MASK_ISA_ADX))
	  opts->x_ix86_isa_flags |= OPTION_MASK_ISA_ADX;
	if (((processor_alias_table[i].flags & PTA_FXSR) != 0)
	    && !(opts->x_ix86_isa_flags_explicit & OPTION_MASK_ISA_FXSR))
	  opts->x_ix86_isa_flags |= OPTION_MASK_ISA_FXSR;
	if (((processor_alias_table[i].flags & PTA_XSAVE) != 0)
	    && !(opts->x_ix86_isa_flags_explicit & OPTION_MASK_ISA_XSAVE))
	  opts->x_ix86_isa_flags |= OPTION_MASK_ISA_XSAVE;
	if (((processor_alias_table[i].flags & PTA_XSAVEOPT) != 0)
	    && !(opts->x_ix86_isa_flags_explicit & OPTION_MASK_ISA_XSAVEOPT))
	  opts->x_ix86_isa_flags |= OPTION_MASK_ISA_XSAVEOPT;
	if (((processor_alias_table[i].flags & PTA_AVX512F) != 0)
	    && !(opts->x_ix86_isa_flags_explicit & OPTION_MASK_ISA_AVX512F))
	  opts->x_ix86_isa_flags |= OPTION_MASK_ISA_AVX512F;
	if (((processor_alias_table[i].flags & PTA_AVX512ER) != 0)
	    && !(opts->x_ix86_isa_flags_explicit & OPTION_MASK_ISA_AVX512ER))
	  opts->x_ix86_isa_flags |= OPTION_MASK_ISA_AVX512ER;
	if (((processor_alias_table[i].flags & PTA_AVX512PF) != 0)
	    && !(opts->x_ix86_isa_flags_explicit & OPTION_MASK_ISA_AVX512PF))
	  opts->x_ix86_isa_flags |= OPTION_MASK_ISA_AVX512PF;
	if (((processor_alias_table[i].flags & PTA_AVX512CD) != 0)
	    && !(opts->x_ix86_isa_flags_explicit & OPTION_MASK_ISA_AVX512CD))
	  opts->x_ix86_isa_flags |= OPTION_MASK_ISA_AVX512CD;
	if (((processor_alias_table[i].flags & PTA_PREFETCHWT1) != 0)
	    && !(opts->x_ix86_isa_flags_explicit & OPTION_MASK_ISA_PREFETCHWT1))
	  opts->x_ix86_isa_flags |= OPTION_MASK_ISA_PREFETCHWT1;
	if (((processor_alias_table[i].flags & PTA_CLWB) != 0)
	    && !(opts->x_ix86_isa_flags_explicit & OPTION_MASK_ISA_CLWB))
	  opts->x_ix86_isa_flags |= OPTION_MASK_ISA_CLWB;
	if (((processor_alias_table[i].flags & PTA_CLFLUSHOPT) != 0)
	    && !(opts->x_ix86_isa_flags_explicit & OPTION_MASK_ISA_CLFLUSHOPT))
	  opts->x_ix86_isa_flags |= OPTION_MASK_ISA_CLFLUSHOPT;
	if (((processor_alias_table[i].flags & PTA_CLZERO) != 0)
	    && !(opts->x_ix86_isa_flags2_explicit & OPTION_MASK_ISA_CLZERO))
	  opts->x_ix86_isa_flags2 |= OPTION_MASK_ISA_CLZERO;
	if (((processor_alias_table[i].flags & PTA_XSAVEC) != 0)
	    && !(opts->x_ix86_isa_flags_explicit & OPTION_MASK_ISA_XSAVEC))
	  opts->x_ix86_isa_flags |= OPTION_MASK_ISA_XSAVEC;
	if (((processor_alias_table[i].flags & PTA_XSAVES) != 0)
	    && !(opts->x_ix86_isa_flags_explicit & OPTION_MASK_ISA_XSAVES))
	  opts->x_ix86_isa_flags |= OPTION_MASK_ISA_XSAVES;
	if (((processor_alias_table[i].flags & PTA_AVX512DQ) != 0)
	    && !(opts->x_ix86_isa_flags_explicit & OPTION_MASK_ISA_AVX512DQ))
	  opts->x_ix86_isa_flags |= OPTION_MASK_ISA_AVX512DQ;
	if (((processor_alias_table[i].flags & PTA_AVX512BW) != 0)
	    && !(opts->x_ix86_isa_flags_explicit & OPTION_MASK_ISA_AVX512BW))
	  opts->x_ix86_isa_flags |= OPTION_MASK_ISA_AVX512BW;
	if (((processor_alias_table[i].flags & PTA_AVX512VL) != 0)
	    && !(opts->x_ix86_isa_flags_explicit & OPTION_MASK_ISA_AVX512VL))
	  opts->x_ix86_isa_flags |= OPTION_MASK_ISA_AVX512VL;
	if (((processor_alias_table[i].flags & PTA_AVX512VBMI) != 0)
	    && !(opts->x_ix86_isa_flags_explicit & OPTION_MASK_ISA_AVX512VBMI))
	  opts->x_ix86_isa_flags |= OPTION_MASK_ISA_AVX512VBMI;
	if (((processor_alias_table[i].flags & PTA_AVX512IFMA) != 0)
	    && !(opts->x_ix86_isa_flags_explicit & OPTION_MASK_ISA_AVX512IFMA))
	  opts->x_ix86_isa_flags |= OPTION_MASK_ISA_AVX512IFMA;
	if (((processor_alias_table[i].flags & PTA_AVX512VNNI) != 0)
	    && !(opts->x_ix86_isa_flags_explicit & OPTION_MASK_ISA_AVX512VNNI))
	  opts->x_ix86_isa_flags |= OPTION_MASK_ISA_AVX512VNNI;
	if (((processor_alias_table[i].flags & PTA_GFNI) != 0)
	    && !(opts->x_ix86_isa_flags_explicit & OPTION_MASK_ISA_GFNI))
	  opts->x_ix86_isa_flags |= OPTION_MASK_ISA_GFNI;
	if (((processor_alias_table[i].flags & PTA_AVX512VBMI2) != 0)
	    && !(opts->x_ix86_isa_flags_explicit
	    & OPTION_MASK_ISA_AVX512VBMI2))
	  opts->x_ix86_isa_flags |= OPTION_MASK_ISA_AVX512VBMI2;
	if (((processor_alias_table[i].flags & PTA_VPCLMULQDQ) != 0)
	    && !(opts->x_ix86_isa_flags_explicit & OPTION_MASK_ISA_VPCLMULQDQ))
	  opts->x_ix86_isa_flags |= OPTION_MASK_ISA_VPCLMULQDQ;
	if (((processor_alias_table[i].flags & PTA_AVX512BITALG) != 0)
	    && !(opts->x_ix86_isa_flags_explicit
	    & OPTION_MASK_ISA_AVX512BITALG))
	  opts->x_ix86_isa_flags |= OPTION_MASK_ISA_AVX512BITALG;

	if (((processor_alias_table[i].flags & PTA_AVX5124VNNIW) != 0)
	    && !(opts->x_ix86_isa_flags2_explicit
		 & OPTION_MASK_ISA_AVX5124VNNIW))
	  opts->x_ix86_isa_flags2 |= OPTION_MASK_ISA_AVX5124VNNIW;
	if (((processor_alias_table[i].flags & PTA_AVX5124FMAPS) != 0)
	    && !(opts->x_ix86_isa_flags2_explicit
		 & OPTION_MASK_ISA_AVX5124FMAPS))
	  opts->x_ix86_isa_flags2 |= OPTION_MASK_ISA_AVX5124FMAPS;
	if (((processor_alias_table[i].flags & PTA_AVX512VPOPCNTDQ) != 0)
	    && !(opts->x_ix86_isa_flags_explicit
		 & OPTION_MASK_ISA_AVX512VPOPCNTDQ))
	  opts->x_ix86_isa_flags |= OPTION_MASK_ISA_AVX512VPOPCNTDQ;
	if (((processor_alias_table[i].flags & PTA_SGX) != 0)
	    && !(opts->x_ix86_isa_flags2_explicit & OPTION_MASK_ISA_SGX))
	  opts->x_ix86_isa_flags2 |= OPTION_MASK_ISA_SGX;
	if (((processor_alias_table[i].flags & PTA_VAES) != 0)
	    && !(opts->x_ix86_isa_flags2_explicit & OPTION_MASK_ISA_VAES))
	  opts->x_ix86_isa_flags2 |= OPTION_MASK_ISA_VAES;
	if (((processor_alias_table[i].flags & PTA_RDPID) != 0)
	    && !(opts->x_ix86_isa_flags2_explicit & OPTION_MASK_ISA_RDPID))
	  opts->x_ix86_isa_flags2 |= OPTION_MASK_ISA_RDPID;
	if (((processor_alias_table[i].flags & PTA_PCONFIG) != 0)
	    && !(opts->x_ix86_isa_flags2_explicit & OPTION_MASK_ISA_PCONFIG))
	  opts->x_ix86_isa_flags2 |= OPTION_MASK_ISA_PCONFIG;
	if (((processor_alias_table[i].flags & PTA_WBNOINVD) != 0)
	    && !(opts->x_ix86_isa_flags2_explicit & OPTION_MASK_ISA_WBNOINVD))
	  opts->x_ix86_isa_flags2 |= OPTION_MASK_ISA_WBNOINVD;

	if ((processor_alias_table[i].flags
	   & (PTA_PREFETCH_SSE | PTA_SSE)) != 0)
	  x86_prefetch_sse = true;
	if (((processor_alias_table[i].flags & PTA_MWAITX) != 0)
	    && !(opts->x_ix86_isa_flags2_explicit & OPTION_MASK_ISA_MWAITX))
	  opts->x_ix86_isa_flags2 |= OPTION_MASK_ISA_MWAITX;
	if (((processor_alias_table[i].flags & PTA_PKU) != 0)
	    && !(opts->x_ix86_isa_flags_explicit & OPTION_MASK_ISA_PKU))
	  opts->x_ix86_isa_flags |= OPTION_MASK_ISA_PKU;

	/* Don't enable x87 instructions if only
	   general registers are allowed.  */
	if (!(opts_set->x_ix86_target_flags & OPTION_MASK_GENERAL_REGS_ONLY)
	    && !(opts_set->x_target_flags & MASK_80387))
	  {
	    if (((processor_alias_table[i].flags & PTA_NO_80387) != 0))
	      opts->x_target_flags &= ~MASK_80387;
	    else
	      opts->x_target_flags |= MASK_80387;
	  }
	break;
      }

  if (i == pta_size)
    {
      error (main_args_p
	     ? G_("bad value (%qs) for %<-march=%> switch")
	     : G_("bad value (%qs) for %<target(\"arch=\")%> attribute"),
	     opts->x_ix86_arch_string);

      auto_vec <const char *> candidates;
      for (i = 0; i < pta_size; i++)
	if (strcmp (processor_alias_table[i].name, "generic")
	    && strcmp (processor_alias_table[i].name, "intel")
	    && (!TARGET_64BIT_P (opts->x_ix86_isa_flags)
		|| ((processor_alias_table[i].flags & PTA_64BIT) != 0)))
	  candidates.safe_push (processor_alias_table[i].name);

#ifdef HAVE_LOCAL_CPU_DETECT
      /* Add also "native" as possible value.  */
      candidates.safe_push ("native");
#endif

      char *s;
      const char *hint
	= candidates_list_and_hint (opts->x_ix86_arch_string, s, candidates);
      if (hint)
	inform (input_location,
		main_args_p
		? G_("valid arguments to %<-march=%> switch are: "
		     "%s; did you mean %qs?")
		: G_("valid arguments to %<target(\"arch=\")%> attribute are: "
		     "%s; did you mean %qs?"), s, hint);
      else
	inform (input_location,
		main_args_p
		? G_("valid arguments to %<-march=%> switch are: %s")
		: G_("valid arguments to %<target(\"arch=\")%> attribute "
		     "are: %s"), s);
      XDELETEVEC (s);
    }

  ix86_arch_mask = HOST_WIDE_INT_1U << ix86_arch;
  for (i = 0; i < X86_ARCH_LAST; ++i)
    ix86_arch_features[i] = !!(initial_ix86_arch_features[i] & ix86_arch_mask);

  for (i = 0; i < pta_size; i++)
    if (! strcmp (opts->x_ix86_tune_string, processor_alias_table[i].name))
      {
	ix86_schedule = processor_alias_table[i].schedule;
	ix86_tune = processor_alias_table[i].processor;
	if (TARGET_64BIT_P (opts->x_ix86_isa_flags))
	  {
	    if (!((processor_alias_table[i].flags & PTA_64BIT) != 0))
	      {
		if (ix86_tune_defaulted)
		  {
		    opts->x_ix86_tune_string = "x86-64";
		    for (i = 0; i < pta_size; i++)
		      if (! strcmp (opts->x_ix86_tune_string,
				    processor_alias_table[i].name))
			break;
		    ix86_schedule = processor_alias_table[i].schedule;
		    ix86_tune = processor_alias_table[i].processor;
		  }
		else
		  error ("CPU you selected does not support x86-64 "
			 "instruction set");
	      }
	  }
	/* Intel CPUs have always interpreted SSE prefetch instructions as
	   NOPs; so, we can enable SSE prefetch instructions even when
	   -mtune (rather than -march) points us to a processor that has them.
	   However, the VIA C3 gives a SIGILL, so we only do that for i686 and
	   higher processors.  */
	if (TARGET_CMOV
	    && ((processor_alias_table[i].flags
	      & (PTA_PREFETCH_SSE | PTA_SSE)) != 0))
	  x86_prefetch_sse = true;
	break;
      }

  if (ix86_tune_specified && i == pta_size)
    {
      error (main_args_p
	     ? G_("bad value (%qs) for %<-mtune=%> switch")
	     : G_("bad value (%qs) for %<target(\"tune=\")%> attribute"),
	     opts->x_ix86_tune_string);

      auto_vec <const char *> candidates;
      for (i = 0; i < pta_size; i++)
	if (!TARGET_64BIT_P (opts->x_ix86_isa_flags)
	    || ((processor_alias_table[i].flags & PTA_64BIT) != 0))
	  candidates.safe_push (processor_alias_table[i].name);

#ifdef HAVE_LOCAL_CPU_DETECT
      /* Add also "native" as possible value.  */
      candidates.safe_push ("native");
#endif

      char *s;
      const char *hint
	= candidates_list_and_hint (opts->x_ix86_tune_string, s, candidates);
      if (hint)
	inform (input_location,
		main_args_p
		? G_("valid arguments to %<-mtune=%> switch are: "
		     "%s; did you mean %qs?")
		: G_("valid arguments to %<target(\"tune=\")%> attribute are: "
		     "%s; did you mean %qs?"), s, hint);
      else
	inform (input_location,
		main_args_p
		? G_("valid arguments to %<-mtune=%> switch are: %s")
		: G_("valid arguments to %<target(\"tune=\")%> attribute "
		     "are: %s"), s);
      XDELETEVEC (s);
    }

  set_ix86_tune_features (ix86_tune, opts->x_ix86_dump_tunes);

#ifndef USE_IX86_FRAME_POINTER
#define USE_IX86_FRAME_POINTER 0
#endif

#ifndef USE_X86_64_FRAME_POINTER
#define USE_X86_64_FRAME_POINTER 0
#endif

  /* Set the default values for switches whose default depends on TARGET_64BIT
     in case they weren't overwritten by command line options.  */
  if (TARGET_64BIT_P (opts->x_ix86_isa_flags))
    {
      if (opts->x_optimize >= 1 && !opts_set->x_flag_omit_frame_pointer)
	opts->x_flag_omit_frame_pointer = !USE_X86_64_FRAME_POINTER;
      if (opts->x_flag_asynchronous_unwind_tables
	  && !opts_set->x_flag_unwind_tables
	  && TARGET_64BIT_MS_ABI)
	opts->x_flag_unwind_tables = 1;
      if (opts->x_flag_asynchronous_unwind_tables == 2)
	opts->x_flag_unwind_tables
	  = opts->x_flag_asynchronous_unwind_tables = 1;
      if (opts->x_flag_pcc_struct_return == 2)
	opts->x_flag_pcc_struct_return = 0;
    }
  else
    {
      if (opts->x_optimize >= 1 && !opts_set->x_flag_omit_frame_pointer)
	opts->x_flag_omit_frame_pointer
	  = !(USE_IX86_FRAME_POINTER || opts->x_optimize_size);
      if (opts->x_flag_asynchronous_unwind_tables == 2)
	opts->x_flag_asynchronous_unwind_tables = !USE_IX86_FRAME_POINTER;
      if (opts->x_flag_pcc_struct_return == 2)
	{
	  /* Intel MCU psABI specifies that -freg-struct-return should
	     be on.  Instead of setting DEFAULT_PCC_STRUCT_RETURN to 1,
	     we check -miamcu so that -freg-struct-return is always
	     turned on if -miamcu is used.  */
	  if (TARGET_IAMCU_P (opts->x_target_flags))
	    opts->x_flag_pcc_struct_return = 0;
	  else
	    opts->x_flag_pcc_struct_return = DEFAULT_PCC_STRUCT_RETURN;
	}
    }

  ix86_tune_cost = processor_cost_table[ix86_tune];
  /* TODO: ix86_cost should be chosen at instruction or function granuality
     so for cold code we use size_cost even in !optimize_size compilation.  */
  if (opts->x_optimize_size)
    ix86_cost = &ix86_size_cost;
  else
    ix86_cost = ix86_tune_cost;

  /* Arrange to set up i386_stack_locals for all functions.  */
  init_machine_status = ix86_init_machine_status;

  /* Validate -mregparm= value.  */
  if (opts_set->x_ix86_regparm)
    {
      if (TARGET_64BIT_P (opts->x_ix86_isa_flags))
	warning (0, "-mregparm is ignored in 64-bit mode");
      else if (TARGET_IAMCU_P (opts->x_target_flags))
	warning (0, "-mregparm is ignored for Intel MCU psABI");
      if (opts->x_ix86_regparm > REGPARM_MAX)
	{
	  error ("-mregparm=%d is not between 0 and %d",
		 opts->x_ix86_regparm, REGPARM_MAX);
	  opts->x_ix86_regparm = 0;
	}
    }
  if (TARGET_IAMCU_P (opts->x_target_flags)
      || TARGET_64BIT_P (opts->x_ix86_isa_flags))
    opts->x_ix86_regparm = REGPARM_MAX;

  /* Default align_* from the processor table.  */
  ix86_default_align (opts);

  /* Provide default for -mbranch-cost= value.  */
  if (!opts_set->x_ix86_branch_cost)
    opts->x_ix86_branch_cost = ix86_tune_cost->branch_cost;

  if (TARGET_64BIT_P (opts->x_ix86_isa_flags))
    {
      opts->x_target_flags
	|= TARGET_SUBTARGET64_DEFAULT & ~opts_set->x_target_flags;

      /* Enable by default the SSE and MMX builtins.  Do allow the user to
	 explicitly disable any of these.  In particular, disabling SSE and
	 MMX for kernel code is extremely useful.  */
      if (!ix86_arch_specified)
      opts->x_ix86_isa_flags
	|= ((OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_MMX
	     | TARGET_SUBTARGET64_ISA_DEFAULT)
            & ~opts->x_ix86_isa_flags_explicit);

      if (TARGET_RTD_P (opts->x_target_flags))
	warning (0,
		 main_args_p
		 ? G_("%<-mrtd%> is ignored in 64bit mode")
		 : G_("%<target(\"rtd\")%> is ignored in 64bit mode"));
    }
  else
    {
      opts->x_target_flags
	|= TARGET_SUBTARGET32_DEFAULT & ~opts_set->x_target_flags;

      if (!ix86_arch_specified)
        opts->x_ix86_isa_flags
	  |= TARGET_SUBTARGET32_ISA_DEFAULT & ~opts->x_ix86_isa_flags_explicit;

      /* i386 ABI does not specify red zone.  It still makes sense to use it
         when programmer takes care to stack from being destroyed.  */
      if (!(opts_set->x_target_flags & MASK_NO_RED_ZONE))
        opts->x_target_flags |= MASK_NO_RED_ZONE;
    }

  /* Keep nonleaf frame pointers.  */
  if (opts->x_flag_omit_frame_pointer)
    opts->x_target_flags &= ~MASK_OMIT_LEAF_FRAME_POINTER;
  else if (TARGET_OMIT_LEAF_FRAME_POINTER_P (opts->x_target_flags))
    opts->x_flag_omit_frame_pointer = 1;

  /* If we're doing fast math, we don't care about comparison order
     wrt NaNs.  This lets us use a shorter comparison sequence.  */
  if (opts->x_flag_finite_math_only)
    opts->x_target_flags &= ~MASK_IEEE_FP;

  /* If the architecture always has an FPU, turn off NO_FANCY_MATH_387,
     since the insns won't need emulation.  */
  if (ix86_tune_features [X86_TUNE_ALWAYS_FANCY_MATH_387])
    opts->x_target_flags &= ~MASK_NO_FANCY_MATH_387;

  /* Likewise, if the target doesn't have a 387, or we've specified
     software floating point, don't use 387 inline intrinsics.  */
  if (!TARGET_80387_P (opts->x_target_flags))
    opts->x_target_flags |= MASK_NO_FANCY_MATH_387;

  /* Turn on MMX builtins for -msse.  */
  if (TARGET_SSE_P (opts->x_ix86_isa_flags))
    opts->x_ix86_isa_flags
      |= OPTION_MASK_ISA_MMX & ~opts->x_ix86_isa_flags_explicit;

  /* Enable SSE prefetch.  */
  if (TARGET_SSE_P (opts->x_ix86_isa_flags)
      || (TARGET_PRFCHW_P (opts->x_ix86_isa_flags)
	  && !TARGET_3DNOW_P (opts->x_ix86_isa_flags))
      || TARGET_PREFETCHWT1_P (opts->x_ix86_isa_flags))
    x86_prefetch_sse = true;

  /* Enable popcnt instruction for -msse4.2 or -mabm.  */
  if (TARGET_SSE4_2_P (opts->x_ix86_isa_flags)
      || TARGET_ABM_P (opts->x_ix86_isa_flags))
    opts->x_ix86_isa_flags
      |= OPTION_MASK_ISA_POPCNT & ~opts->x_ix86_isa_flags_explicit;

  /* Enable lzcnt instruction for -mabm.  */
  if (TARGET_ABM_P(opts->x_ix86_isa_flags))
    opts->x_ix86_isa_flags
      |= OPTION_MASK_ISA_LZCNT & ~opts->x_ix86_isa_flags_explicit;

  /* Disable BMI, BMI2 and TBM instructions for -m16.  */
  if (TARGET_16BIT_P(opts->x_ix86_isa_flags))
    opts->x_ix86_isa_flags
      &= ~((OPTION_MASK_ISA_BMI | OPTION_MASK_ISA_BMI2 | OPTION_MASK_ISA_TBM)
	   & ~opts->x_ix86_isa_flags_explicit);

  /* Validate -mpreferred-stack-boundary= value or default it to
     PREFERRED_STACK_BOUNDARY_DEFAULT.  */
  ix86_preferred_stack_boundary = PREFERRED_STACK_BOUNDARY_DEFAULT;
  if (opts_set->x_ix86_preferred_stack_boundary_arg)
    {
      int min = TARGET_64BIT_P (opts->x_ix86_isa_flags)? 3 : 2;
      int max = TARGET_SEH ? 4 : 12;

      if (opts->x_ix86_preferred_stack_boundary_arg < min
	  || opts->x_ix86_preferred_stack_boundary_arg > max)
	{
	  if (min == max)
	    error ("-mpreferred-stack-boundary is not supported "
		   "for this target");
	  else
	    error ("-mpreferred-stack-boundary=%d is not between %d and %d",
		   opts->x_ix86_preferred_stack_boundary_arg, min, max);
	}
      else
	ix86_preferred_stack_boundary
	  = (1 << opts->x_ix86_preferred_stack_boundary_arg) * BITS_PER_UNIT;
    }

  /* Set the default value for -mstackrealign.  */
  if (!opts_set->x_ix86_force_align_arg_pointer)
    opts->x_ix86_force_align_arg_pointer = STACK_REALIGN_DEFAULT;

  ix86_default_incoming_stack_boundary = PREFERRED_STACK_BOUNDARY;

  /* Validate -mincoming-stack-boundary= value or default it to
     MIN_STACK_BOUNDARY/PREFERRED_STACK_BOUNDARY.  */
  ix86_incoming_stack_boundary = ix86_default_incoming_stack_boundary;
  if (opts_set->x_ix86_incoming_stack_boundary_arg)
    {
      int min = TARGET_64BIT_P (opts->x_ix86_isa_flags) ? 3 : 2;

      if (opts->x_ix86_incoming_stack_boundary_arg < min
	  || opts->x_ix86_incoming_stack_boundary_arg > 12)
	error ("-mincoming-stack-boundary=%d is not between %d and 12",
	       opts->x_ix86_incoming_stack_boundary_arg, min);
      else
	{
	  ix86_user_incoming_stack_boundary
	    = (1 << opts->x_ix86_incoming_stack_boundary_arg) * BITS_PER_UNIT;
	  ix86_incoming_stack_boundary
	    = ix86_user_incoming_stack_boundary;
	}
    }

#ifndef NO_PROFILE_COUNTERS
  if (flag_nop_mcount)
    error ("-mnop-mcount is not compatible with this target");
#endif
  if (flag_nop_mcount && flag_pic)
    error ("-mnop-mcount is not implemented for -fPIC");

  /* Accept -msseregparm only if at least SSE support is enabled.  */
  if (TARGET_SSEREGPARM_P (opts->x_target_flags)
      && ! TARGET_SSE_P (opts->x_ix86_isa_flags))
    error (main_args_p
	   ? G_("%<-msseregparm%> used without SSE enabled")
	   : G_("%<target(\"sseregparm\")%> used without SSE enabled"));

  if (opts_set->x_ix86_fpmath)
    {
      if (opts->x_ix86_fpmath & FPMATH_SSE)
	{
	  if (!TARGET_SSE_P (opts->x_ix86_isa_flags))
	    {
	      if (TARGET_80387_P (opts->x_target_flags))
		{
		  warning (0, "SSE instruction set disabled, using 387 arithmetics");
		  opts->x_ix86_fpmath = FPMATH_387;
		}
	    }
	  else if ((opts->x_ix86_fpmath & FPMATH_387)
		   && !TARGET_80387_P (opts->x_target_flags))
	    {
	      warning (0, "387 instruction set disabled, using SSE arithmetics");
	      opts->x_ix86_fpmath = FPMATH_SSE;
	    }
	}
    }
  /* For all chips supporting SSE2, -mfpmath=sse performs better than
     fpmath=387.  The second is however default at many targets since the
     extra 80bit precision of temporaries is considered to be part of ABI.
     Overwrite the default at least for -ffast-math. 
     TODO: -mfpmath=both seems to produce same performing code with bit
     smaller binaries.  It is however not clear if register allocation is
     ready for this setting.
     Also -mfpmath=387 is overall a lot more compact (bout 4-5%) than SSE
     codegen.  We may switch to 387 with -ffast-math for size optimized
     functions. */
  else if (fast_math_flags_set_p (&global_options)
	   && TARGET_SSE2_P (opts->x_ix86_isa_flags))
    opts->x_ix86_fpmath = FPMATH_SSE;
  else
    opts->x_ix86_fpmath = TARGET_FPMATH_DEFAULT_P (opts->x_ix86_isa_flags);

  /* Use external vectorized library in vectorizing intrinsics.  */
  if (opts_set->x_ix86_veclibabi_type)
    switch (opts->x_ix86_veclibabi_type)
      {
      case ix86_veclibabi_type_svml:
	ix86_veclib_handler = ix86_veclibabi_svml;
	break;

      case ix86_veclibabi_type_acml:
	ix86_veclib_handler = ix86_veclibabi_acml;
	break;

      default:
	gcc_unreachable ();
      }

  if (ix86_tune_features [X86_TUNE_ACCUMULATE_OUTGOING_ARGS]
      && !(opts_set->x_target_flags & MASK_ACCUMULATE_OUTGOING_ARGS))
    opts->x_target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;

  /* If stack probes are required, the space used for large function
     arguments on the stack must also be probed, so enable
     -maccumulate-outgoing-args so this happens in the prologue.  */
  if (TARGET_STACK_PROBE_P (opts->x_target_flags)
      && !(opts->x_target_flags & MASK_ACCUMULATE_OUTGOING_ARGS))
    {
      if (opts_set->x_target_flags & MASK_ACCUMULATE_OUTGOING_ARGS)
	warning (0,
		 main_args_p
		 ? G_("stack probing requires %<-maccumulate-outgoing-args%> "
		      "for correctness")
		 : G_("stack probing requires "
		      "%<target(\"accumulate-outgoing-args\")%> for "
		      "correctness"));
      opts->x_target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
    }

  /* Stack realignment without -maccumulate-outgoing-args requires %ebp,
     so enable -maccumulate-outgoing-args when %ebp is fixed.  */
  if (fixed_regs[BP_REG]
      && !(opts->x_target_flags & MASK_ACCUMULATE_OUTGOING_ARGS))
    {
      if (opts_set->x_target_flags & MASK_ACCUMULATE_OUTGOING_ARGS)
	warning (0,
		 main_args_p
		 ? G_("fixed ebp register requires "
		      "%<-maccumulate-outgoing-args%>")
		 : G_("fixed ebp register requires "
		      "%<target(\"accumulate-outgoing-args\")%>"));
      opts->x_target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
    }

  /* Figure out what ASM_GENERATE_INTERNAL_LABEL builds as a prefix.  */
  {
    char *p;
    ASM_GENERATE_INTERNAL_LABEL (internal_label_prefix, "LX", 0);
    p = strchr (internal_label_prefix, 'X');
    internal_label_prefix_len = p - internal_label_prefix;
    *p = '\0';
  }

  /* When scheduling description is not available, disable scheduler pass
     so it won't slow down the compilation and make x87 code slower.  */
  if (!TARGET_SCHEDULE)
    opts->x_flag_schedule_insns_after_reload = opts->x_flag_schedule_insns = 0;

  maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
			 ix86_tune_cost->simultaneous_prefetches,
			 opts->x_param_values,
			 opts_set->x_param_values);
  maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
			 ix86_tune_cost->prefetch_block,
			 opts->x_param_values,
			 opts_set->x_param_values);
  maybe_set_param_value (PARAM_L1_CACHE_SIZE,
			 ix86_tune_cost->l1_cache_size,
			 opts->x_param_values,
			 opts_set->x_param_values);
  maybe_set_param_value (PARAM_L2_CACHE_SIZE,
			 ix86_tune_cost->l2_cache_size,
			 opts->x_param_values,
			 opts_set->x_param_values);

  /* Enable sw prefetching at -O3 for CPUS that prefetching is helpful.  */
  if (opts->x_flag_prefetch_loop_arrays < 0
      && HAVE_prefetch
      && (opts->x_optimize >= 3 || opts->x_flag_profile_use)
      && !opts->x_optimize_size
      && TARGET_SOFTWARE_PREFETCHING_BENEFICIAL)
    opts->x_flag_prefetch_loop_arrays = 1;

  /* If using typedef char *va_list, signal that __builtin_va_start (&ap, 0)
     can be opts->x_optimized to ap = __builtin_next_arg (0).  */
  if (!TARGET_64BIT_P (opts->x_ix86_isa_flags) && !opts->x_flag_split_stack)
    targetm.expand_builtin_va_start = NULL;

  if (TARGET_64BIT_P (opts->x_ix86_isa_flags))
    {
      ix86_gen_leave = gen_leave_rex64;
      if (Pmode == DImode)
	{
	  ix86_gen_tls_global_dynamic_64 = gen_tls_global_dynamic_64_di;
	  ix86_gen_tls_local_dynamic_base_64
	    = gen_tls_local_dynamic_base_64_di;
	}
      else
	{
	  ix86_gen_tls_global_dynamic_64 = gen_tls_global_dynamic_64_si;
	  ix86_gen_tls_local_dynamic_base_64
	    = gen_tls_local_dynamic_base_64_si;
	}
    }
  else
    ix86_gen_leave = gen_leave;

  if (Pmode == DImode)
    {
      ix86_gen_add3 = gen_adddi3;
      ix86_gen_sub3 = gen_subdi3;
      ix86_gen_sub3_carry = gen_subdi3_carry;
      ix86_gen_one_cmpl2 = gen_one_cmpldi2;
      ix86_gen_andsp = gen_anddi3;
      ix86_gen_allocate_stack_worker = gen_allocate_stack_worker_probe_di;
      ix86_gen_adjust_stack_and_probe = gen_adjust_stack_and_probedi;
      ix86_gen_probe_stack_range = gen_probe_stack_rangedi;
      ix86_gen_monitor = gen_sse3_monitor_di;
      ix86_gen_monitorx = gen_monitorx_di;
      ix86_gen_clzero = gen_clzero_di;
    }
  else
    {
      ix86_gen_add3 = gen_addsi3;
      ix86_gen_sub3 = gen_subsi3;
      ix86_gen_sub3_carry = gen_subsi3_carry;
      ix86_gen_one_cmpl2 = gen_one_cmplsi2;
      ix86_gen_andsp = gen_andsi3;
      ix86_gen_allocate_stack_worker = gen_allocate_stack_worker_probe_si;
      ix86_gen_adjust_stack_and_probe = gen_adjust_stack_and_probesi;
      ix86_gen_probe_stack_range = gen_probe_stack_rangesi;
      ix86_gen_monitor = gen_sse3_monitor_si;
      ix86_gen_monitorx = gen_monitorx_si;
      ix86_gen_clzero = gen_clzero_si;
    }

#ifdef USE_IX86_CLD
  /* Use -mcld by default for 32-bit code if configured with --enable-cld.  */
  if (!TARGET_64BIT_P (opts->x_ix86_isa_flags))
    opts->x_target_flags |= MASK_CLD & ~opts_set->x_target_flags;
#endif

  /* Set the default value for -mfentry.  */
  if (!opts_set->x_flag_fentry)
    opts->x_flag_fentry = TARGET_SEH;
  else
    {
      if (!TARGET_64BIT_P (opts->x_ix86_isa_flags) && opts->x_flag_pic
	  && opts->x_flag_fentry)
	sorry ("-mfentry isn%'t supported for 32-bit in combination "
	       "with -fpic");
      else if (TARGET_SEH && !opts->x_flag_fentry)
	sorry ("-mno-fentry isn%'t compatible with SEH");
    }

  if (TARGET_SEH && TARGET_CALL_MS2SYSV_XLOGUES)
    sorry ("-mcall-ms2sysv-xlogues isn%'t currently supported with SEH");

  if (!(opts_set->x_target_flags & MASK_VZEROUPPER)
      && TARGET_EMIT_VZEROUPPER)
    opts->x_target_flags |= MASK_VZEROUPPER;
  if (!(opts_set->x_target_flags & MASK_STV))
    opts->x_target_flags |= MASK_STV;
  /* Disable STV if -mpreferred-stack-boundary={2,3} or
     -mincoming-stack-boundary={2,3} or -mstackrealign - the needed
     stack realignment will be extra cost the pass doesn't take into
     account and the pass can't realign the stack.  */
  if (ix86_preferred_stack_boundary < 128
      || ix86_incoming_stack_boundary < 128
      || opts->x_ix86_force_align_arg_pointer)
    opts->x_target_flags &= ~MASK_STV;
  if (!ix86_tune_features[X86_TUNE_AVX256_UNALIGNED_LOAD_OPTIMAL]
      && !(opts_set->x_target_flags & MASK_AVX256_SPLIT_UNALIGNED_LOAD))
    opts->x_target_flags |= MASK_AVX256_SPLIT_UNALIGNED_LOAD;
  if (!ix86_tune_features[X86_TUNE_AVX256_UNALIGNED_STORE_OPTIMAL]
      && !(opts_set->x_target_flags & MASK_AVX256_SPLIT_UNALIGNED_STORE))
    opts->x_target_flags |= MASK_AVX256_SPLIT_UNALIGNED_STORE;

  /* Enable 128-bit AVX instruction generation
     for the auto-vectorizer.  */
  if (TARGET_AVX128_OPTIMAL
      && (opts_set->x_prefer_vector_width_type == PVW_NONE))
    opts->x_prefer_vector_width_type = PVW_AVX128;

  /* Use 256-bit AVX instruction generation
     in the auto-vectorizer.  */
  if (ix86_tune_features[X86_TUNE_AVX256_OPTIMAL]
      && (opts_set->x_prefer_vector_width_type == PVW_NONE))
    opts->x_prefer_vector_width_type = PVW_AVX256;

  if (opts->x_ix86_recip_name)
    {
      char *p = ASTRDUP (opts->x_ix86_recip_name);
      char *q;
      unsigned int mask, i;
      bool invert;

      while ((q = strtok (p, ",")) != NULL)
	{
	  p = NULL;
	  if (*q == '!')
	    {
	      invert = true;
	      q++;
	    }
	  else
	    invert = false;

	  if (!strcmp (q, "default"))
	    mask = RECIP_MASK_ALL;
	  else
	    {
	      for (i = 0; i < ARRAY_SIZE (recip_options); i++)
		if (!strcmp (q, recip_options[i].string))
		  {
		    mask = recip_options[i].mask;
		    break;
		  }

	      if (i == ARRAY_SIZE (recip_options))
		{
		  error ("unknown option for -mrecip=%s", q);
		  invert = false;
		  mask = RECIP_MASK_NONE;
		}
	    }

	  opts->x_recip_mask_explicit |= mask;
	  if (invert)
	    opts->x_recip_mask &= ~mask;
	  else
	    opts->x_recip_mask |= mask;
	}
    }

  if (TARGET_RECIP_P (opts->x_target_flags))
    opts->x_recip_mask |= RECIP_MASK_ALL & ~opts->x_recip_mask_explicit;
  else if (opts_set->x_target_flags & MASK_RECIP)
    opts->x_recip_mask &= ~(RECIP_MASK_ALL & ~opts->x_recip_mask_explicit);

  /* Default long double to 64-bit for 32-bit Bionic and to __float128
     for 64-bit Bionic.  Also default long double to 64-bit for Intel
     MCU psABI.  */
  if ((TARGET_HAS_BIONIC || TARGET_IAMCU)
      && !(opts_set->x_target_flags
	   & (MASK_LONG_DOUBLE_64 | MASK_LONG_DOUBLE_128)))
    opts->x_target_flags |= (TARGET_64BIT
			     ? MASK_LONG_DOUBLE_128
			     : MASK_LONG_DOUBLE_64);

  /* Only one of them can be active.  */
  gcc_assert ((opts->x_target_flags & MASK_LONG_DOUBLE_64) == 0
	      || (opts->x_target_flags & MASK_LONG_DOUBLE_128) == 0);

  /* Handle stack protector */
  if (!opts_set->x_ix86_stack_protector_guard)
    opts->x_ix86_stack_protector_guard
      = TARGET_HAS_BIONIC ? SSP_GLOBAL : SSP_TLS;

#ifdef TARGET_THREAD_SSP_OFFSET
  ix86_stack_protector_guard_offset = TARGET_THREAD_SSP_OFFSET;
#endif

  if (global_options_set.x_ix86_stack_protector_guard_offset_str)
    {
      char *endp;
      const char *str = ix86_stack_protector_guard_offset_str;

      errno = 0;
      int64_t offset;

#if defined(INT64_T_IS_LONG)
      offset = strtol (str, &endp, 0);
#else
      offset = strtoll (str, &endp, 0);
#endif

      if (!*str || *endp || errno)
	error ("%qs is not a valid number "
	       "in -mstack-protector-guard-offset=", str);

      if (!IN_RANGE (offset, HOST_WIDE_INT_C (-0x80000000),
		     HOST_WIDE_INT_C (0x7fffffff)))
	error ("%qs is not a valid offset "
	       "in -mstack-protector-guard-offset=", str);

      ix86_stack_protector_guard_offset = offset;
    }

  ix86_stack_protector_guard_reg = DEFAULT_TLS_SEG_REG;

  /* The kernel uses a different segment register for performance
     reasons; a system call would not have to trash the userspace
     segment register, which would be expensive.  */
  if (ix86_cmodel == CM_KERNEL)
    ix86_stack_protector_guard_reg = ADDR_SPACE_SEG_GS;

  if (global_options_set.x_ix86_stack_protector_guard_reg_str)
    {
      const char *str = ix86_stack_protector_guard_reg_str;
      addr_space_t seg = ADDR_SPACE_GENERIC;

      /* Discard optional register prefix.  */
      if (str[0] == '%')
	str++;

      if (strlen (str) == 2 && str[1] == 's')
	{
	  if (str[0] == 'f')
	    seg = ADDR_SPACE_SEG_FS;
	  else if (str[0] == 'g')
	    seg = ADDR_SPACE_SEG_GS;
	}

      if (seg == ADDR_SPACE_GENERIC)
	error ("%qs is not a valid base register "
	       "in -mstack-protector-guard-reg=",
	       ix86_stack_protector_guard_reg_str);

      ix86_stack_protector_guard_reg = seg;
    }

  /* Handle -mmemcpy-strategy= and -mmemset-strategy=  */
  if (opts->x_ix86_tune_memcpy_strategy)
    {
      char *str = xstrdup (opts->x_ix86_tune_memcpy_strategy);
      ix86_parse_stringop_strategy_string (str, false);
      free (str);
    }

  if (opts->x_ix86_tune_memset_strategy)
    {
      char *str = xstrdup (opts->x_ix86_tune_memset_strategy);
      ix86_parse_stringop_strategy_string (str, true);
      free (str);
    }

  /* Save the initial options in case the user does function specific
     options.  */
  if (main_args_p)
    target_option_default_node = target_option_current_node
      = build_target_option_node (opts);

  if (opts->x_flag_cf_protection != CF_NONE)
    opts->x_flag_cf_protection =
      (cf_protection_level) (opts->x_flag_cf_protection | CF_SET);

  if (ix86_tune_features [X86_TUNE_AVOID_128FMA_CHAINS])
    maybe_set_param_value (PARAM_AVOID_FMA_MAX_BITS, 128,
			   opts->x_param_values,
			   opts_set->x_param_values);

  return true;
}

/* Implement the TARGET_OPTION_OVERRIDE hook.  */

static void
ix86_option_override (void)
{
  ix86_option_override_internal (true, &global_options, &global_options_set);
}

/* Implement the TARGET_OFFLOAD_OPTIONS hook.  */
static char *
ix86_offload_options (void)
{
  if (TARGET_LP64)
    return xstrdup ("-foffload-abi=lp64");
  return xstrdup ("-foffload-abi=ilp32");
}

/* Update register usage after having seen the compiler flags.  */

static void
ix86_conditional_register_usage (void)
{
  int i, c_mask;

  /* If there are no caller-saved registers, preserve all registers.
     except fixed_regs and registers used for function return value
     since aggregate_value_p checks call_used_regs[regno] on return
     value.  */
  if (cfun && cfun->machine->no_caller_saved_registers)
    for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
      if (!fixed_regs[i] && !ix86_function_value_regno_p (i))
	call_used_regs[i] = 0;

  /* For 32-bit targets, squash the REX registers.  */
  if (! TARGET_64BIT)
    {
      for (i = FIRST_REX_INT_REG; i <= LAST_REX_INT_REG; i++)
	fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = "";
      for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
	fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = "";
      for (i = FIRST_EXT_REX_SSE_REG; i <= LAST_EXT_REX_SSE_REG; i++)
	fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = "";
    }

  /*  See the definition of CALL_USED_REGISTERS in i386.h.  */
  c_mask = CALL_USED_REGISTERS_MASK (TARGET_64BIT_MS_ABI);
  
  CLEAR_HARD_REG_SET (reg_class_contents[(int)CLOBBERED_REGS]);

  for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
    {
      /* Set/reset conditionally defined registers from
	 CALL_USED_REGISTERS initializer.  */
      if (call_used_regs[i] > 1)
	call_used_regs[i] = !!(call_used_regs[i] & c_mask);

      /* Calculate registers of CLOBBERED_REGS register set
	 as call used registers from GENERAL_REGS register set.  */
      if (TEST_HARD_REG_BIT (reg_class_contents[(int)GENERAL_REGS], i)
	  && call_used_regs[i])
	SET_HARD_REG_BIT (reg_class_contents[(int)CLOBBERED_REGS], i);
    }

  /* If MMX is disabled, squash the registers.  */
  if (! TARGET_MMX)
    for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
      if (TEST_HARD_REG_BIT (reg_class_contents[(int)MMX_REGS], i))
	fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = "";

  /* If SSE is disabled, squash the registers.  */
  if (! TARGET_SSE)
    for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
      if (TEST_HARD_REG_BIT (reg_class_contents[(int)SSE_REGS], i))
	fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = "";

  /* If the FPU is disabled, squash the registers.  */
  if (! (TARGET_80387 || TARGET_FLOAT_RETURNS_IN_80387))
    for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
      if (TEST_HARD_REG_BIT (reg_class_contents[(int)FLOAT_REGS], i))
	fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = "";

  /* If AVX512F is disabled, squash the registers.  */
  if (! TARGET_AVX512F)
    {
      for (i = FIRST_EXT_REX_SSE_REG; i <= LAST_EXT_REX_SSE_REG; i++)
	fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = "";

      for (i = FIRST_MASK_REG; i <= LAST_MASK_REG; i++)
	fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = "";
    }
}

/* Canonicalize a comparison from one we don't have to one we do have.  */

static void
ix86_canonicalize_comparison (int *code, rtx *op0, rtx *op1,
			      bool op0_preserve_value)
{
  /* The order of operands in x87 ficom compare is forced by combine in
     simplify_comparison () function. Float operator is treated as RTX_OBJ
     with a precedence over other operators and is always put in the first
     place. Swap condition and operands to match ficom instruction.  */
  if (!op0_preserve_value
      && GET_CODE (*op0) == FLOAT && MEM_P (XEXP (*op0, 0)) && REG_P (*op1))
    {
      enum rtx_code scode = swap_condition ((enum rtx_code) *code);

      /* We are called only for compares that are split to SAHF instruction.
	 Ensure that we have setcc/jcc insn for the swapped condition.  */
      if (ix86_fp_compare_code_to_integer (scode) != UNKNOWN)
	{
	  std::swap (*op0, *op1);
	  *code = (int) scode;
	}
    }
}

/* Save the current options */

static void
ix86_function_specific_save (struct cl_target_option *ptr,
			     struct gcc_options *opts)
{
  ptr->arch = ix86_arch;
  ptr->schedule = ix86_schedule;
  ptr->prefetch_sse = x86_prefetch_sse;
  ptr->tune = ix86_tune;
  ptr->branch_cost = ix86_branch_cost;
  ptr->tune_defaulted = ix86_tune_defaulted;
  ptr->arch_specified = ix86_arch_specified;
  ptr->x_ix86_isa_flags_explicit = opts->x_ix86_isa_flags_explicit;
  ptr->x_ix86_isa_flags2_explicit = opts->x_ix86_isa_flags2_explicit;
  ptr->x_recip_mask_explicit = opts->x_recip_mask_explicit;
  ptr->x_ix86_arch_string = opts->x_ix86_arch_string;
  ptr->x_ix86_tune_string = opts->x_ix86_tune_string;
  ptr->x_ix86_cmodel = opts->x_ix86_cmodel;
  ptr->x_ix86_abi = opts->x_ix86_abi;
  ptr->x_ix86_asm_dialect = opts->x_ix86_asm_dialect;
  ptr->x_ix86_branch_cost = opts->x_ix86_branch_cost;
  ptr->x_ix86_dump_tunes = opts->x_ix86_dump_tunes;
  ptr->x_ix86_force_align_arg_pointer = opts->x_ix86_force_align_arg_pointer;
  ptr->x_ix86_force_drap = opts->x_ix86_force_drap;
  ptr->x_ix86_incoming_stack_boundary_arg = opts->x_ix86_incoming_stack_boundary_arg;
  ptr->x_ix86_pmode = opts->x_ix86_pmode;
  ptr->x_ix86_preferred_stack_boundary_arg = opts->x_ix86_preferred_stack_boundary_arg;
  ptr->x_ix86_recip_name = opts->x_ix86_recip_name;
  ptr->x_ix86_regparm = opts->x_ix86_regparm;
  ptr->x_ix86_section_threshold = opts->x_ix86_section_threshold;
  ptr->x_ix86_sse2avx = opts->x_ix86_sse2avx;
  ptr->x_ix86_stack_protector_guard = opts->x_ix86_stack_protector_guard;
  ptr->x_ix86_stringop_alg = opts->x_ix86_stringop_alg;
  ptr->x_ix86_tls_dialect = opts->x_ix86_tls_dialect;
  ptr->x_ix86_tune_ctrl_string = opts->x_ix86_tune_ctrl_string;
  ptr->x_ix86_tune_memcpy_strategy = opts->x_ix86_tune_memcpy_strategy;
  ptr->x_ix86_tune_memset_strategy = opts->x_ix86_tune_memset_strategy;
  ptr->x_ix86_tune_no_default = opts->x_ix86_tune_no_default;
  ptr->x_ix86_veclibabi_type = opts->x_ix86_veclibabi_type;

  /* The fields are char but the variables are not; make sure the
     values fit in the fields.  */
  gcc_assert (ptr->arch == ix86_arch);
  gcc_assert (ptr->schedule == ix86_schedule);
  gcc_assert (ptr->tune == ix86_tune);
  gcc_assert (ptr->branch_cost == ix86_branch_cost);
}

/* Restore the current options */

static void
ix86_function_specific_restore (struct gcc_options *opts,
				struct cl_target_option *ptr)
{
  enum processor_type old_tune = ix86_tune;
  enum processor_type old_arch = ix86_arch;
  unsigned HOST_WIDE_INT ix86_arch_mask;
  int i;

  /* We don't change -fPIC.  */
  opts->x_flag_pic = flag_pic;

  ix86_arch = (enum processor_type) ptr->arch;
  ix86_schedule = (enum attr_cpu) ptr->schedule;
  ix86_tune = (enum processor_type) ptr->tune;
  x86_prefetch_sse = ptr->prefetch_sse;
  opts->x_ix86_branch_cost = ptr->branch_cost;
  ix86_tune_defaulted = ptr->tune_defaulted;
  ix86_arch_specified = ptr->arch_specified;
  opts->x_ix86_isa_flags_explicit = ptr->x_ix86_isa_flags_explicit;
  opts->x_ix86_isa_flags2_explicit = ptr->x_ix86_isa_flags2_explicit;
  opts->x_recip_mask_explicit = ptr->x_recip_mask_explicit;
  opts->x_ix86_arch_string = ptr->x_ix86_arch_string;
  opts->x_ix86_tune_string = ptr->x_ix86_tune_string;
  opts->x_ix86_cmodel = ptr->x_ix86_cmodel;
  opts->x_ix86_abi = ptr->x_ix86_abi;
  opts->x_ix86_asm_dialect = ptr->x_ix86_asm_dialect;
  opts->x_ix86_branch_cost = ptr->x_ix86_branch_cost;
  opts->x_ix86_dump_tunes = ptr->x_ix86_dump_tunes;
  opts->x_ix86_force_align_arg_pointer = ptr->x_ix86_force_align_arg_pointer;
  opts->x_ix86_force_drap = ptr->x_ix86_force_drap;
  opts->x_ix86_incoming_stack_boundary_arg = ptr->x_ix86_incoming_stack_boundary_arg;
  opts->x_ix86_pmode = ptr->x_ix86_pmode;
  opts->x_ix86_preferred_stack_boundary_arg = ptr->x_ix86_preferred_stack_boundary_arg;
  opts->x_ix86_recip_name = ptr->x_ix86_recip_name;
  opts->x_ix86_regparm = ptr->x_ix86_regparm;
  opts->x_ix86_section_threshold = ptr->x_ix86_section_threshold;
  opts->x_ix86_sse2avx = ptr->x_ix86_sse2avx;
  opts->x_ix86_stack_protector_guard = ptr->x_ix86_stack_protector_guard;
  opts->x_ix86_stringop_alg = ptr->x_ix86_stringop_alg;
  opts->x_ix86_tls_dialect = ptr->x_ix86_tls_dialect;
  opts->x_ix86_tune_ctrl_string = ptr->x_ix86_tune_ctrl_string;
  opts->x_ix86_tune_memcpy_strategy = ptr->x_ix86_tune_memcpy_strategy;
  opts->x_ix86_tune_memset_strategy = ptr->x_ix86_tune_memset_strategy;
  opts->x_ix86_tune_no_default = ptr->x_ix86_tune_no_default;
  opts->x_ix86_veclibabi_type = ptr->x_ix86_veclibabi_type;
  ix86_tune_cost = processor_cost_table[ix86_tune];
  /* TODO: ix86_cost should be chosen at instruction or function granuality
     so for cold code we use size_cost even in !optimize_size compilation.  */
  if (opts->x_optimize_size)
    ix86_cost = &ix86_size_cost;
  else
    ix86_cost = ix86_tune_cost;

  /* Recreate the arch feature tests if the arch changed */
  if (old_arch != ix86_arch)
    {
      ix86_arch_mask = HOST_WIDE_INT_1U << ix86_arch;
      for (i = 0; i < X86_ARCH_LAST; ++i)
	ix86_arch_features[i]
	  = !!(initial_ix86_arch_features[i] & ix86_arch_mask);
    }

  /* Recreate the tune optimization tests */
  if (old_tune != ix86_tune)
    set_ix86_tune_features (ix86_tune, false);
}

/* Adjust target options after streaming them in.  This is mainly about
   reconciling them with global options.  */

static void
ix86_function_specific_post_stream_in (struct cl_target_option *ptr)
{
  /* flag_pic is a global option, but ix86_cmodel is target saved option
     partly computed from flag_pic.  If flag_pic is on, adjust x_ix86_cmodel
     for PIC, or error out.  */
  if (flag_pic)
    switch (ptr->x_ix86_cmodel)
      {
      case CM_SMALL:
	ptr->x_ix86_cmodel = CM_SMALL_PIC;
	break;

      case CM_MEDIUM:
	ptr->x_ix86_cmodel = CM_MEDIUM_PIC;
	break;

      case CM_LARGE:
	ptr->x_ix86_cmodel = CM_LARGE_PIC;
	break;

      case CM_KERNEL:
	error ("code model %s does not support PIC mode", "kernel");
	break;

      default:
	break;
      }
  else
    switch (ptr->x_ix86_cmodel)
      {
      case CM_SMALL_PIC:
	ptr->x_ix86_cmodel = CM_SMALL;
	break;

      case CM_MEDIUM_PIC:
	ptr->x_ix86_cmodel = CM_MEDIUM;
	break;

      case CM_LARGE_PIC:
	ptr->x_ix86_cmodel = CM_LARGE;
	break;

      default:
	break;
      }
}

/* Print the current options */

static void
ix86_function_specific_print (FILE *file, int indent,
			      struct cl_target_option *ptr)
{
  char *target_string
    = ix86_target_string (ptr->x_ix86_isa_flags, ptr->x_ix86_isa_flags2,
			  ptr->x_target_flags, ptr->x_ix86_target_flags,
			  NULL, NULL, ptr->x_ix86_fpmath, false);

  gcc_assert (ptr->arch < PROCESSOR_max);
  fprintf (file, "%*sarch = %d (%s)\n",
	   indent, "",
	   ptr->arch, processor_names[ptr->arch]);

  gcc_assert (ptr->tune < PROCESSOR_max);
  fprintf (file, "%*stune = %d (%s)\n",
	   indent, "",
	   ptr->tune, processor_names[ptr->tune]);

  fprintf (file, "%*sbranch_cost = %d\n", indent, "", ptr->branch_cost);

  if (target_string)
    {
      fprintf (file, "%*s%s\n", indent, "", target_string);
      free (target_string);
    }
}


/* Inner function to process the attribute((target(...))), take an argument and
   set the current options from the argument. If we have a list, recursively go
   over the list.  */

static bool
ix86_valid_target_attribute_inner_p (tree args, char *p_strings[],
				     struct gcc_options *opts,
				     struct gcc_options *opts_set,
				     struct gcc_options *enum_opts_set)
{
  char *next_optstr;
  bool ret = true;

#define IX86_ATTR_ISA(S,O)   { S, sizeof (S)-1, ix86_opt_isa, O, 0 }
#define IX86_ATTR_STR(S,O)   { S, sizeof (S)-1, ix86_opt_str, O, 0 }
#define IX86_ATTR_ENUM(S,O)  { S, sizeof (S)-1, ix86_opt_enum, O, 0 }
#define IX86_ATTR_YES(S,O,M) { S, sizeof (S)-1, ix86_opt_yes, O, M }
#define IX86_ATTR_NO(S,O,M)  { S, sizeof (S)-1, ix86_opt_no,  O, M }

  enum ix86_opt_type
  {
    ix86_opt_unknown,
    ix86_opt_yes,
    ix86_opt_no,
    ix86_opt_str,
    ix86_opt_enum,
    ix86_opt_isa
  };

  static const struct
  {
    const char *string;
    size_t len;
    enum ix86_opt_type type;
    int opt;
    int mask;
  } attrs[] = {
    /* isa options */
    IX86_ATTR_ISA ("pconfig",	OPT_mpconfig),
    IX86_ATTR_ISA ("wbnoinvd",	OPT_mwbnoinvd),
    IX86_ATTR_ISA ("sgx",	OPT_msgx),
    IX86_ATTR_ISA ("avx5124fmaps", OPT_mavx5124fmaps),
    IX86_ATTR_ISA ("avx5124vnniw", OPT_mavx5124vnniw),
    IX86_ATTR_ISA ("avx512vpopcntdq", OPT_mavx512vpopcntdq),
    IX86_ATTR_ISA ("avx512vbmi2", OPT_mavx512vbmi2),
    IX86_ATTR_ISA ("avx512vnni", OPT_mavx512vnni),
    IX86_ATTR_ISA ("avx512bitalg", OPT_mavx512bitalg),

    IX86_ATTR_ISA ("avx512vbmi", OPT_mavx512vbmi),
    IX86_ATTR_ISA ("avx512ifma", OPT_mavx512ifma),
    IX86_ATTR_ISA ("avx512vl",	OPT_mavx512vl),
    IX86_ATTR_ISA ("avx512bw",	OPT_mavx512bw),
    IX86_ATTR_ISA ("avx512dq",	OPT_mavx512dq),
    IX86_ATTR_ISA ("avx512er",	OPT_mavx512er),
    IX86_ATTR_ISA ("avx512pf",	OPT_mavx512pf),
    IX86_ATTR_ISA ("avx512cd",	OPT_mavx512cd),
    IX86_ATTR_ISA ("avx512f",	OPT_mavx512f),
    IX86_ATTR_ISA ("avx2",	OPT_mavx2),
    IX86_ATTR_ISA ("fma",	OPT_mfma),
    IX86_ATTR_ISA ("xop",	OPT_mxop),
    IX86_ATTR_ISA ("fma4",	OPT_mfma4),
    IX86_ATTR_ISA ("f16c",	OPT_mf16c),
    IX86_ATTR_ISA ("avx",	OPT_mavx),
    IX86_ATTR_ISA ("sse4",	OPT_msse4),
    IX86_ATTR_ISA ("sse4.2",	OPT_msse4_2),
    IX86_ATTR_ISA ("sse4.1",	OPT_msse4_1),
    IX86_ATTR_ISA ("sse4a",	OPT_msse4a),
    IX86_ATTR_ISA ("ssse3",	OPT_mssse3),
    IX86_ATTR_ISA ("sse3",	OPT_msse3),
    IX86_ATTR_ISA ("aes",	OPT_maes),
    IX86_ATTR_ISA ("sha",	OPT_msha),
    IX86_ATTR_ISA ("pclmul",	OPT_mpclmul),
    IX86_ATTR_ISA ("sse2",	OPT_msse2),
    IX86_ATTR_ISA ("sse",	OPT_msse),
    IX86_ATTR_ISA ("3dnowa",	OPT_m3dnowa),
    IX86_ATTR_ISA ("3dnow",	OPT_m3dnow),
    IX86_ATTR_ISA ("mmx",	OPT_mmmx),
    IX86_ATTR_ISA ("rtm",	OPT_mrtm),
    IX86_ATTR_ISA ("prfchw",	OPT_mprfchw),
    IX86_ATTR_ISA ("rdseed",	OPT_mrdseed),
    IX86_ATTR_ISA ("adx",	OPT_madx),
    IX86_ATTR_ISA ("prefetchwt1", OPT_mprefetchwt1),
    IX86_ATTR_ISA ("clflushopt", OPT_mclflushopt),
    IX86_ATTR_ISA ("xsaves",	OPT_mxsaves),
    IX86_ATTR_ISA ("xsavec",	OPT_mxsavec),
    IX86_ATTR_ISA ("xsaveopt",	OPT_mxsaveopt),
    IX86_ATTR_ISA ("xsave",	OPT_mxsave),
    IX86_ATTR_ISA ("abm",	OPT_mabm),
    IX86_ATTR_ISA ("bmi",	OPT_mbmi),
    IX86_ATTR_ISA ("bmi2",	OPT_mbmi2),
    IX86_ATTR_ISA ("lzcnt",	OPT_mlzcnt),
    IX86_ATTR_ISA ("tbm",	OPT_mtbm),
    IX86_ATTR_ISA ("popcnt",	OPT_mpopcnt),
    IX86_ATTR_ISA ("cx16",	OPT_mcx16),
    IX86_ATTR_ISA ("sahf",	OPT_msahf),
    IX86_ATTR_ISA ("movbe",	OPT_mmovbe),
    IX86_ATTR_ISA ("crc32",	OPT_mcrc32),
    IX86_ATTR_ISA ("fsgsbase",	OPT_mfsgsbase),
    IX86_ATTR_ISA ("rdrnd",	OPT_mrdrnd),
    IX86_ATTR_ISA ("mwaitx",	OPT_mmwaitx),
    IX86_ATTR_ISA ("clzero",	OPT_mclzero),
    IX86_ATTR_ISA ("pku",	OPT_mpku),
    IX86_ATTR_ISA ("lwp",	OPT_mlwp),
    IX86_ATTR_ISA ("hle",	OPT_mhle),
    IX86_ATTR_ISA ("fxsr",	OPT_mfxsr),
    IX86_ATTR_ISA ("clwb",	OPT_mclwb),
    IX86_ATTR_ISA ("rdpid",	OPT_mrdpid),
    IX86_ATTR_ISA ("gfni",	OPT_mgfni),
    IX86_ATTR_ISA ("shstk",	OPT_mshstk),
    IX86_ATTR_ISA ("vaes",	OPT_mvaes),
    IX86_ATTR_ISA ("vpclmulqdq", OPT_mvpclmulqdq),
    IX86_ATTR_ISA ("movdiri", OPT_mmovdiri),
    IX86_ATTR_ISA ("movdir64b", OPT_mmovdir64b),
    IX86_ATTR_ISA ("waitpkg", OPT_mwaitpkg),
    IX86_ATTR_ISA ("cldemote", OPT_mcldemote),

    /* enum options */
    IX86_ATTR_ENUM ("fpmath=",	OPT_mfpmath_),

    /* string options */
    IX86_ATTR_STR ("arch=",	IX86_FUNCTION_SPECIFIC_ARCH),
    IX86_ATTR_STR ("tune=",	IX86_FUNCTION_SPECIFIC_TUNE),

    /* flag options */
    IX86_ATTR_YES ("cld",
		   OPT_mcld,
		   MASK_CLD),

    IX86_ATTR_NO ("fancy-math-387",
		  OPT_mfancy_math_387,
		  MASK_NO_FANCY_MATH_387),

    IX86_ATTR_YES ("ieee-fp",
		   OPT_mieee_fp,
		   MASK_IEEE_FP),

    IX86_ATTR_YES ("inline-all-stringops",
		   OPT_minline_all_stringops,
		   MASK_INLINE_ALL_STRINGOPS),

    IX86_ATTR_YES ("inline-stringops-dynamically",
		   OPT_minline_stringops_dynamically,
		   MASK_INLINE_STRINGOPS_DYNAMICALLY),

    IX86_ATTR_NO ("align-stringops",
		  OPT_mno_align_stringops,
		  MASK_NO_ALIGN_STRINGOPS),

    IX86_ATTR_YES ("recip",
		   OPT_mrecip,
		   MASK_RECIP),

  };

  /* If this is a list, recurse to get the options.  */
  if (TREE_CODE (args) == TREE_LIST)
    {
      bool ret = true;

      for (; args; args = TREE_CHAIN (args))
	if (TREE_VALUE (args)
	    && !ix86_valid_target_attribute_inner_p (TREE_VALUE (args),
						     p_strings, opts, opts_set,
						     enum_opts_set))
	  ret = false;

      return ret;
    }

  else if (TREE_CODE (args) != STRING_CST)
    {
      error ("attribute %<target%> argument not a string");
      return false;
    }

  /* Handle multiple arguments separated by commas.  */
  next_optstr = ASTRDUP (TREE_STRING_POINTER (args));

  while (next_optstr && *next_optstr != '\0')
    {
      char *p = next_optstr;
      char *orig_p = p;
      char *comma = strchr (next_optstr, ',');
      const char *opt_string;
      size_t len, opt_len;
      int opt;
      bool opt_set_p;
      char ch;
      unsigned i;
      enum ix86_opt_type type = ix86_opt_unknown;
      int mask = 0;

      if (comma)
	{
	  *comma = '\0';
	  len = comma - next_optstr;
	  next_optstr = comma + 1;
	}
      else
	{
	  len = strlen (p);
	  next_optstr = NULL;
	}

      /* Recognize no-xxx.  */
      if (len > 3 && p[0] == 'n' && p[1] == 'o' && p[2] == '-')
	{
	  opt_set_p = false;
	  p += 3;
	  len -= 3;
	}
      else
	opt_set_p = true;

      /* Find the option.  */
      ch = *p;
      opt = N_OPTS;
      for (i = 0; i < ARRAY_SIZE (attrs); i++)
	{
	  type = attrs[i].type;
	  opt_len = attrs[i].len;
	  if (ch == attrs[i].string[0]
	      && ((type != ix86_opt_str && type != ix86_opt_enum)
		  ? len == opt_len
		  : len > opt_len)
	      && memcmp (p, attrs[i].string, opt_len) == 0)
	    {
	      opt = attrs[i].opt;
	      mask = attrs[i].mask;
	      opt_string = attrs[i].string;
	      break;
	    }
	}

      /* Process the option.  */
      if (opt == N_OPTS)
	{
	  error ("attribute(target(\"%s\")) is unknown", orig_p);
	  ret = false;
	}

      else if (type == ix86_opt_isa)
	{
	  struct cl_decoded_option decoded;

	  generate_option (opt, NULL, opt_set_p, CL_TARGET, &decoded);
	  ix86_handle_option (opts, opts_set,
			      &decoded, input_location);
	}

      else if (type == ix86_opt_yes || type == ix86_opt_no)
	{
	  if (type == ix86_opt_no)
	    opt_set_p = !opt_set_p;

	  if (opt_set_p)
	    opts->x_target_flags |= mask;
	  else
	    opts->x_target_flags &= ~mask;
	}

      else if (type == ix86_opt_str)
	{
	  if (p_strings[opt])
	    {
	      error ("option(\"%s\") was already specified", opt_string);
	      ret = false;
	    }
	  else
	    p_strings[opt] = xstrdup (p + opt_len);
	}

      else if (type == ix86_opt_enum)
	{
	  bool arg_ok;
	  int value;

	  arg_ok = opt_enum_arg_to_value (opt, p + opt_len, &value, CL_TARGET);
	  if (arg_ok)
	    set_option (opts, enum_opts_set, opt, value,
			p + opt_len, DK_UNSPECIFIED, input_location,
			global_dc);
	  else
	    {
	      error ("attribute(target(\"%s\")) is unknown", orig_p);
	      ret = false;
	    }
	}

      else
	gcc_unreachable ();
    }

  return ret;
}

/* Release allocated strings.  */
static void
release_options_strings (char **option_strings)
{
  /* Free up memory allocated to hold the strings */
  for (unsigned i = 0; i < IX86_FUNCTION_SPECIFIC_MAX; i++)
    free (option_strings[i]);
}

/* Return a TARGET_OPTION_NODE tree of the target options listed or NULL.  */

tree
ix86_valid_target_attribute_tree (tree args,
				  struct gcc_options *opts,
				  struct gcc_options *opts_set)
{
  const char *orig_arch_string = opts->x_ix86_arch_string;
  const char *orig_tune_string = opts->x_ix86_tune_string;
  enum fpmath_unit orig_fpmath_set = opts_set->x_ix86_fpmath;
  int orig_tune_defaulted = ix86_tune_defaulted;
  int orig_arch_specified = ix86_arch_specified;
  char *option_strings[IX86_FUNCTION_SPECIFIC_MAX] = { NULL, NULL };
  tree t = NULL_TREE;
  struct cl_target_option *def
    = TREE_TARGET_OPTION (target_option_default_node);
  struct gcc_options enum_opts_set;

  memset (&enum_opts_set, 0, sizeof (enum_opts_set));

  /* Process each of the options on the chain.  */
  if (! ix86_valid_target_attribute_inner_p (args, option_strings, opts,
					     opts_set, &enum_opts_set))
    return error_mark_node;

  /* If the changed options are different from the default, rerun
     ix86_option_override_internal, and then save the options away.
     The string options are attribute options, and will be undone
     when we copy the save structure.  */
  if (opts->x_ix86_isa_flags != def->x_ix86_isa_flags
      || opts->x_ix86_isa_flags2 != def->x_ix86_isa_flags2
      || opts->x_target_flags != def->x_target_flags
      || option_strings[IX86_FUNCTION_SPECIFIC_ARCH]
      || option_strings[IX86_FUNCTION_SPECIFIC_TUNE]
      || enum_opts_set.x_ix86_fpmath)
    {
      /* If we are using the default tune= or arch=, undo the string assigned,
	 and use the default.  */
      if (option_strings[IX86_FUNCTION_SPECIFIC_ARCH])
	{
	  opts->x_ix86_arch_string
	    = ggc_strdup (option_strings[IX86_FUNCTION_SPECIFIC_ARCH]);

	  /* If arch= is set,  clear all bits in x_ix86_isa_flags,
	     except for ISA_64BIT, ABI_64, ABI_X32, and CODE16.  */
	  opts->x_ix86_isa_flags &= (OPTION_MASK_ISA_64BIT
				     | OPTION_MASK_ABI_64
				     | OPTION_MASK_ABI_X32
				     | OPTION_MASK_CODE16);
	  opts->x_ix86_isa_flags2 = 0;
	}
      else if (!orig_arch_specified)
	opts->x_ix86_arch_string = NULL;

      if (option_strings[IX86_FUNCTION_SPECIFIC_TUNE])
	opts->x_ix86_tune_string
	  = ggc_strdup (option_strings[IX86_FUNCTION_SPECIFIC_TUNE]);
      else if (orig_tune_defaulted)
	opts->x_ix86_tune_string = NULL;

      /* If fpmath= is not set, and we now have sse2 on 32-bit, use it.  */
      if (enum_opts_set.x_ix86_fpmath)
	opts_set->x_ix86_fpmath = (enum fpmath_unit) 1;

      /* Do any overrides, such as arch=xxx, or tune=xxx support.  */
      bool r = ix86_option_override_internal (false, opts, opts_set);
      if (!r)
	{
	  release_options_strings (option_strings);
	  return error_mark_node;
	}

      /* Add any builtin functions with the new isa if any.  */
      ix86_add_new_builtins (opts->x_ix86_isa_flags, opts->x_ix86_isa_flags2);

      /* Save the current options unless we are validating options for
	 #pragma.  */
      t = build_target_option_node (opts);

      opts->x_ix86_arch_string = orig_arch_string;
      opts->x_ix86_tune_string = orig_tune_string;
      opts_set->x_ix86_fpmath = orig_fpmath_set;

      release_options_strings (option_strings);
    }

  return t;
}

/* Hook to validate attribute((target("string"))).  */

static bool
ix86_valid_target_attribute_p (tree fndecl,
			       tree ARG_UNUSED (name),
			       tree args,
			       int ARG_UNUSED (flags))
{
  struct gcc_options func_options;
  tree new_target, new_optimize;
  bool ret = true;

  /* attribute((target("default"))) does nothing, beyond
     affecting multi-versioning.  */
  if (TREE_VALUE (args)
      && TREE_CODE (TREE_VALUE (args)) == STRING_CST
      && TREE_CHAIN (args) == NULL_TREE
      && strcmp (TREE_STRING_POINTER (TREE_VALUE (args)), "default") == 0)
    return true;

  tree old_optimize = build_optimization_node (&global_options);

  /* Get the optimization options of the current function.  */  
  tree func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
 
  if (!func_optimize)
    func_optimize = old_optimize;

  /* Init func_options.  */
  memset (&func_options, 0, sizeof (func_options));
  init_options_struct (&func_options, NULL);
  lang_hooks.init_options_struct (&func_options);
 
  cl_optimization_restore (&func_options,
			   TREE_OPTIMIZATION (func_optimize));

  /* Initialize func_options to the default before its target options can
     be set.  */
  cl_target_option_restore (&func_options,
			    TREE_TARGET_OPTION (target_option_default_node));

  new_target = ix86_valid_target_attribute_tree (args, &func_options,
						 &global_options_set);

  new_optimize = build_optimization_node (&func_options);

  if (new_target == error_mark_node)
    ret = false;

  else if (fndecl && new_target)
    {
      DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;

      if (old_optimize != new_optimize)
	DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
    }

  finalize_options_struct (&func_options);

  return ret;
}


/* Hook to determine if one function can safely inline another.  */

static bool
ix86_can_inline_p (tree caller, tree callee)
{
  tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
  tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);

  /* Changes of those flags can be tolerated for always inlines. Lets hope
     user knows what he is doing.  */
  const unsigned HOST_WIDE_INT always_inline_safe_mask
	 = (MASK_USE_8BIT_IDIV | MASK_ACCUMULATE_OUTGOING_ARGS
	    | MASK_NO_ALIGN_STRINGOPS | MASK_AVX256_SPLIT_UNALIGNED_LOAD
	    | MASK_AVX256_SPLIT_UNALIGNED_STORE | MASK_CLD
	    | MASK_NO_FANCY_MATH_387 | MASK_IEEE_FP | MASK_INLINE_ALL_STRINGOPS
	    | MASK_INLINE_STRINGOPS_DYNAMICALLY | MASK_RECIP | MASK_STACK_PROBE
	    | MASK_STV | MASK_TLS_DIRECT_SEG_REFS | MASK_VZEROUPPER
	    | MASK_NO_PUSH_ARGS | MASK_OMIT_LEAF_FRAME_POINTER);


  if (!callee_tree)
    callee_tree = target_option_default_node;
  if (!caller_tree)
    caller_tree = target_option_default_node;
  if (callee_tree == caller_tree)
    return true;

  struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
  struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
  bool ret = false;
  bool always_inline =
     (DECL_DISREGARD_INLINE_LIMITS (callee)
      && lookup_attribute ("always_inline",
			   DECL_ATTRIBUTES (callee)));

  cgraph_node *callee_node = cgraph_node::get (callee);
  /* Callee's isa options should be a subset of the caller's, i.e. a SSE4
     function can inline a SSE2 function but a SSE2 function can't inline
     a SSE4 function.  */
  if (((caller_opts->x_ix86_isa_flags & callee_opts->x_ix86_isa_flags)
       != callee_opts->x_ix86_isa_flags)
      || ((caller_opts->x_ix86_isa_flags2 & callee_opts->x_ix86_isa_flags2)
	  != callee_opts->x_ix86_isa_flags2))
    ret = false;

  /* See if we have the same non-isa options.  */
  else if ((!always_inline
	    && caller_opts->x_target_flags != callee_opts->x_target_flags)
	   || (caller_opts->x_target_flags & ~always_inline_safe_mask)
	       != (callee_opts->x_target_flags & ~always_inline_safe_mask))
    ret = false;

  /* See if arch, tune, etc. are the same.  */
  else if (caller_opts->arch != callee_opts->arch)
    ret = false;

  else if (!always_inline && caller_opts->tune != callee_opts->tune)
    ret = false;

  else if (caller_opts->x_ix86_fpmath != callee_opts->x_ix86_fpmath
	   /* If the calle doesn't use FP expressions differences in
	      ix86_fpmath can be ignored.  We are called from FEs
	      for multi-versioning call optimization, so beware of
	      ipa_fn_summaries not available.  */
	   && (! ipa_fn_summaries
	       || ipa_fn_summaries->get (callee_node) == NULL
	       || ipa_fn_summaries->get (callee_node)->fp_expressions))
    ret = false;

  else if (!always_inline
	   && caller_opts->branch_cost != callee_opts->branch_cost)
    ret = false;

  else
    ret = true;

  return ret;
}


/* Remember the last target of ix86_set_current_function.  */
static GTY(()) tree ix86_previous_fndecl;

/* Set targets globals to the default (or current #pragma GCC target
   if active).  Invalidate ix86_previous_fndecl cache.  */

void
ix86_reset_previous_fndecl (void)
{
  tree new_tree = target_option_current_node;
  cl_target_option_restore (&global_options, TREE_TARGET_OPTION (new_tree));
  if (TREE_TARGET_GLOBALS (new_tree))
    restore_target_globals (TREE_TARGET_GLOBALS (new_tree));
  else if (new_tree == target_option_default_node)
    restore_target_globals (&default_target_globals);
  else
    TREE_TARGET_GLOBALS (new_tree) = save_target_globals_default_opts ();
  ix86_previous_fndecl = NULL_TREE;
}

/* Set the func_type field from the function FNDECL.  */

static void
ix86_set_func_type (tree fndecl)
{
  if (cfun->machine->func_type == TYPE_UNKNOWN)
    {
      if (lookup_attribute ("interrupt",
			    TYPE_ATTRIBUTES (TREE_TYPE (fndecl))))
	{
	  if (ix86_function_naked (fndecl))
	    error_at (DECL_SOURCE_LOCATION (fndecl),
		      "interrupt and naked attributes are not compatible");

	  int nargs = 0;
	  for (tree arg = DECL_ARGUMENTS (fndecl);
	       arg;
	       arg = TREE_CHAIN (arg))
	    nargs++;
	  cfun->machine->no_caller_saved_registers = true;
	  cfun->machine->func_type
	    = nargs == 2 ? TYPE_EXCEPTION : TYPE_INTERRUPT;

	  ix86_optimize_mode_switching[X86_DIRFLAG] = 1;

	  /* Only dwarf2out.c can handle -WORD(AP) as a pointer argument.  */
	  if (write_symbols != NO_DEBUG && write_symbols != DWARF2_DEBUG)
	    sorry ("Only DWARF debug format is supported for interrupt "
		   "service routine.");
	}
      else
	{
	  cfun->machine->func_type = TYPE_NORMAL;
	  if (lookup_attribute ("no_caller_saved_registers",
				TYPE_ATTRIBUTES (TREE_TYPE (fndecl))))
	    cfun->machine->no_caller_saved_registers = true;
	}
    }
}

/* Set the indirect_branch_type field from the function FNDECL.  */

static void
ix86_set_indirect_branch_type (tree fndecl)
{
  if (cfun->machine->indirect_branch_type == indirect_branch_unset)
    {
      tree attr = lookup_attribute ("indirect_branch",
				    DECL_ATTRIBUTES (fndecl));
      if (attr != NULL)
	{
	  tree args = TREE_VALUE (attr);
	  if (args == NULL)
	    gcc_unreachable ();
	  tree cst = TREE_VALUE (args);
	  if (strcmp (TREE_STRING_POINTER (cst), "keep") == 0)
	    cfun->machine->indirect_branch_type = indirect_branch_keep;
	  else if (strcmp (TREE_STRING_POINTER (cst), "thunk") == 0)
	    cfun->machine->indirect_branch_type = indirect_branch_thunk;
	  else if (strcmp (TREE_STRING_POINTER (cst), "thunk-inline") == 0)
	    cfun->machine->indirect_branch_type = indirect_branch_thunk_inline;
	  else if (strcmp (TREE_STRING_POINTER (cst), "thunk-extern") == 0)
	    cfun->machine->indirect_branch_type = indirect_branch_thunk_extern;
	  else
	    gcc_unreachable ();
	}
      else
	cfun->machine->indirect_branch_type = ix86_indirect_branch;

      /* -mcmodel=large is not compatible with -mindirect-branch=thunk
	 nor -mindirect-branch=thunk-extern.  */
      if ((ix86_cmodel == CM_LARGE || ix86_cmodel == CM_LARGE_PIC)
	  && ((cfun->machine->indirect_branch_type
	       == indirect_branch_thunk_extern)
	      || (cfun->machine->indirect_branch_type
		  == indirect_branch_thunk)))
	error ("%<-mindirect-branch=%s%> and %<-mcmodel=large%> are not "
	       "compatible",
	       ((cfun->machine->indirect_branch_type
		 == indirect_branch_thunk_extern)
		? "thunk-extern" : "thunk"));
    }

  if (cfun->machine->function_return_type == indirect_branch_unset)
    {
      tree attr = lookup_attribute ("function_return",
				    DECL_ATTRIBUTES (fndecl));
      if (attr != NULL)
	{
	  tree args = TREE_VALUE (attr);
	  if (args == NULL)
	    gcc_unreachable ();
	  tree cst = TREE_VALUE (args);
	  if (strcmp (TREE_STRING_POINTER (cst), "keep") == 0)
	    cfun->machine->function_return_type = indirect_branch_keep;
	  else if (strcmp (TREE_STRING_POINTER (cst), "thunk") == 0)
	    cfun->machine->function_return_type = indirect_branch_thunk;
	  else if (strcmp (TREE_STRING_POINTER (cst), "thunk-inline") == 0)
	    cfun->machine->function_return_type = indirect_branch_thunk_inline;
	  else if (strcmp (TREE_STRING_POINTER (cst), "thunk-extern") == 0)
	    cfun->machine->function_return_type = indirect_branch_thunk_extern;
	  else
	    gcc_unreachable ();
	}
      else
	cfun->machine->function_return_type = ix86_function_return;

      /* -mcmodel=large is not compatible with -mfunction-return=thunk
	 nor -mfunction-return=thunk-extern.  */
      if ((ix86_cmodel == CM_LARGE || ix86_cmodel == CM_LARGE_PIC)
	  && ((cfun->machine->function_return_type
	       == indirect_branch_thunk_extern)
	      || (cfun->machine->function_return_type
		  == indirect_branch_thunk)))
	error ("%<-mfunction-return=%s%> and %<-mcmodel=large%> are not "
	       "compatible",
	       ((cfun->machine->function_return_type
		 == indirect_branch_thunk_extern)
		? "thunk-extern" : "thunk"));
    }
}

/* Establish appropriate back-end context for processing the function
   FNDECL.  The argument might be NULL to indicate processing at top
   level, outside of any function scope.  */
static void
ix86_set_current_function (tree fndecl)
{
  /* Only change the context if the function changes.  This hook is called
     several times in the course of compiling a function, and we don't want to
     slow things down too much or call target_reinit when it isn't safe.  */
  if (fndecl == ix86_previous_fndecl)
    {
      /* There may be 2 function bodies for the same function FNDECL,
	 one is extern inline and one isn't.  Call ix86_set_func_type
	 to set the func_type field.  */
      if (fndecl != NULL_TREE)
	{
	  ix86_set_func_type (fndecl);
	  ix86_set_indirect_branch_type (fndecl);
	}
      return;
    }

  tree old_tree;
  if (ix86_previous_fndecl == NULL_TREE)
    old_tree = target_option_current_node;
  else if (DECL_FUNCTION_SPECIFIC_TARGET (ix86_previous_fndecl))
    old_tree = DECL_FUNCTION_SPECIFIC_TARGET (ix86_previous_fndecl);
  else
    old_tree = target_option_default_node;

  if (fndecl == NULL_TREE)
    {
      if (old_tree != target_option_current_node)
	ix86_reset_previous_fndecl ();
      return;
    }

  ix86_set_func_type (fndecl);
  ix86_set_indirect_branch_type (fndecl);

  tree new_tree = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
  if (new_tree == NULL_TREE)
    new_tree = target_option_default_node;

  if (old_tree != new_tree)
    {
      cl_target_option_restore (&global_options, TREE_TARGET_OPTION (new_tree));
      if (TREE_TARGET_GLOBALS (new_tree))
	restore_target_globals (TREE_TARGET_GLOBALS (new_tree));
      else if (new_tree == target_option_default_node)
	restore_target_globals (&default_target_globals);
      else
	TREE_TARGET_GLOBALS (new_tree) = save_target_globals_default_opts ();
    }
  ix86_previous_fndecl = fndecl;

  static bool prev_no_caller_saved_registers;

  /* 64-bit MS and SYSV ABI have different set of call used registers.
     Avoid expensive re-initialization of init_regs each time we switch
     function context.  */
  if (TARGET_64BIT
      && (call_used_regs[SI_REG]
	  == (cfun->machine->call_abi == MS_ABI)))
    reinit_regs ();
  /* Need to re-initialize init_regs if caller-saved registers are
     changed.  */
  else if (prev_no_caller_saved_registers
	   != cfun->machine->no_caller_saved_registers)
    reinit_regs ();

  if (cfun->machine->func_type != TYPE_NORMAL
      || cfun->machine->no_caller_saved_registers)
    {
      /* Don't allow SSE, MMX nor x87 instructions since they
	 may change processor state.  */
      const char *isa;
      if (TARGET_SSE)
	isa = "SSE";
      else if (TARGET_MMX)
	isa = "MMX/3Dnow";
      else if (TARGET_80387)
	isa = "80387";
      else
	isa = NULL;
      if (isa != NULL)
	{
	  if (cfun->machine->func_type != TYPE_NORMAL)
	    sorry ("%s instructions aren't allowed in %s service routine",
		   isa, (cfun->machine->func_type == TYPE_EXCEPTION
			 ? "exception" : "interrupt"));
	  else
	    sorry ("%s instructions aren't allowed in function with "
		   "no_caller_saved_registers attribute", isa);
	  /* Don't issue the same error twice.  */
	  cfun->machine->func_type = TYPE_NORMAL;
	  cfun->machine->no_caller_saved_registers = false;
	}
    }

  prev_no_caller_saved_registers
    = cfun->machine->no_caller_saved_registers;
}


/* Return true if this goes in large data/bss.  */

static bool
ix86_in_large_data_p (tree exp)
{
  if (ix86_cmodel != CM_MEDIUM && ix86_cmodel != CM_MEDIUM_PIC)
    return false;

  if (exp == NULL_TREE)
    return false;

  /* Functions are never large data.  */
  if (TREE_CODE (exp) == FUNCTION_DECL)
    return false;

  /* Automatic variables are never large data.  */
  if (VAR_P (exp) && !is_global_var (exp))
    return false;

  if (VAR_P (exp) && DECL_SECTION_NAME (exp))
    {
      const char *section = DECL_SECTION_NAME (exp);
      if (strcmp (section, ".ldata") == 0
	  || strcmp (section, ".lbss") == 0)
	return true;
      return false;
    }
  else
    {
      HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));

      /* If this is an incomplete type with size 0, then we can't put it
	 in data because it might be too big when completed.  Also,
	 int_size_in_bytes returns -1 if size can vary or is larger than
	 an integer in which case also it is safer to assume that it goes in
	 large data.  */
      if (size <= 0 || size > ix86_section_threshold)
	return true;
    }

  return false;
}

/* i386-specific section flag to mark large sections.  */
#define SECTION_LARGE SECTION_MACH_DEP

/* Switch to the appropriate section for output of DECL.
   DECL is either a `VAR_DECL' node or a constant of some sort.
   RELOC indicates whether forming the initial value of DECL requires
   link-time relocations.  */

ATTRIBUTE_UNUSED static section *
x86_64_elf_select_section (tree decl, int reloc,
			   unsigned HOST_WIDE_INT align)
{
  if (ix86_in_large_data_p (decl))
    {
      const char *sname = NULL;
      unsigned int flags = SECTION_WRITE | SECTION_LARGE;
      switch (categorize_decl_for_section (decl, reloc))
	{
	case SECCAT_DATA:
	  sname = ".ldata";
	  break;
	case SECCAT_DATA_REL:
	  sname = ".ldata.rel";
	  break;
	case SECCAT_DATA_REL_LOCAL:
	  sname = ".ldata.rel.local";
	  break;
	case SECCAT_DATA_REL_RO:
	  sname = ".ldata.rel.ro";
	  break;
	case SECCAT_DATA_REL_RO_LOCAL:
	  sname = ".ldata.rel.ro.local";
	  break;
	case SECCAT_BSS:
	  sname = ".lbss";
	  flags |= SECTION_BSS;
	  break;
	case SECCAT_RODATA:
	case SECCAT_RODATA_MERGE_STR:
	case SECCAT_RODATA_MERGE_STR_INIT:
	case SECCAT_RODATA_MERGE_CONST:
	  sname = ".lrodata";
	  flags &= ~SECTION_WRITE;
	  break;
	case SECCAT_SRODATA:
	case SECCAT_SDATA:
	case SECCAT_SBSS:
	  gcc_unreachable ();
	case SECCAT_TEXT:
	case SECCAT_TDATA:
	case SECCAT_TBSS:
	  /* We don't split these for medium model.  Place them into
	     default sections and hope for best.  */
	  break;
	}
      if (sname)
	{
	  /* We might get called with string constants, but get_named_section
	     doesn't like them as they are not DECLs.  Also, we need to set
	     flags in that case.  */
	  if (!DECL_P (decl))
	    return get_section (sname, flags, NULL);
	  return get_named_section (decl, sname, reloc);
	}
    }
  return default_elf_select_section (decl, reloc, align);
}

/* Select a set of attributes for section NAME based on the properties
   of DECL and whether or not RELOC indicates that DECL's initializer
   might contain runtime relocations.  */

static unsigned int ATTRIBUTE_UNUSED
x86_64_elf_section_type_flags (tree decl, const char *name, int reloc)
{
  unsigned int flags = default_section_type_flags (decl, name, reloc);

  if (ix86_in_large_data_p (decl))
    flags |= SECTION_LARGE;

  if (decl == NULL_TREE
      && (strcmp (name, ".ldata.rel.ro") == 0
	  || strcmp (name, ".ldata.rel.ro.local") == 0))
    flags |= SECTION_RELRO;

  if (strcmp (name, ".lbss") == 0
      || strncmp (name, ".lbss.", 5) == 0
      || strncmp (name, ".gnu.linkonce.lb.", 16) == 0)
    flags |= SECTION_BSS;

  return flags;
}

/* Build up a unique section name, expressed as a
   STRING_CST node, and assign it to DECL_SECTION_NAME (decl).
   RELOC indicates whether the initial value of EXP requires
   link-time relocations.  */

static void ATTRIBUTE_UNUSED
x86_64_elf_unique_section (tree decl, int reloc)
{
  if (ix86_in_large_data_p (decl))
    {
      const char *prefix = NULL;
      /* We only need to use .gnu.linkonce if we don't have COMDAT groups.  */
      bool one_only = DECL_COMDAT_GROUP (decl) && !HAVE_COMDAT_GROUP;

      switch (categorize_decl_for_section (decl, reloc))
	{
	case SECCAT_DATA:
	case SECCAT_DATA_REL:
	case SECCAT_DATA_REL_LOCAL:
	case SECCAT_DATA_REL_RO:
	case SECCAT_DATA_REL_RO_LOCAL:
          prefix = one_only ? ".ld" : ".ldata";
	  break;
	case SECCAT_BSS:
          prefix = one_only ? ".lb" : ".lbss";
	  break;
	case SECCAT_RODATA:
	case SECCAT_RODATA_MERGE_STR:
	case SECCAT_RODATA_MERGE_STR_INIT:
	case SECCAT_RODATA_MERGE_CONST:
          prefix = one_only ? ".lr" : ".lrodata";
	  break;
	case SECCAT_SRODATA:
	case SECCAT_SDATA:
	case SECCAT_SBSS:
	  gcc_unreachable ();
	case SECCAT_TEXT:
	case SECCAT_TDATA:
	case SECCAT_TBSS:
	  /* We don't split these for medium model.  Place them into
	     default sections and hope for best.  */
	  break;
	}
      if (prefix)
	{
	  const char *name, *linkonce;
	  char *string;

	  name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
	  name = targetm.strip_name_encoding (name);

	  /* If we're using one_only, then there needs to be a .gnu.linkonce
     	     prefix to the section name.  */
	  linkonce = one_only ? ".gnu.linkonce" : "";

	  string = ACONCAT ((linkonce, prefix, ".", name, NULL));

	  set_decl_section_name (decl, string);
	  return;
	}
    }
  default_unique_section (decl, reloc);
}

#ifdef COMMON_ASM_OP

#ifndef LARGECOMM_SECTION_ASM_OP
#define LARGECOMM_SECTION_ASM_OP "\t.largecomm\t"
#endif

/* This says how to output assembler code to declare an
   uninitialized external linkage data object.

   For medium model x86-64 we need to use LARGECOMM_SECTION_ASM_OP opcode for
   large objects.  */
void
x86_elf_aligned_decl_common (FILE *file, tree decl,
			const char *name, unsigned HOST_WIDE_INT size,
			int align)
{
  if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
      && size > (unsigned int)ix86_section_threshold)
    {
      switch_to_section (get_named_section (decl, ".lbss", 0));
      fputs (LARGECOMM_SECTION_ASM_OP, file);
    }
  else
    fputs (COMMON_ASM_OP, file);
  assemble_name (file, name);
  fprintf (file, "," HOST_WIDE_INT_PRINT_UNSIGNED ",%u\n",
	   size, align / BITS_PER_UNIT);
}
#endif

/* Utility function for targets to use in implementing
   ASM_OUTPUT_ALIGNED_BSS.  */

void
x86_output_aligned_bss (FILE *file, tree decl, const char *name,
		       	unsigned HOST_WIDE_INT size, int align)
{
  if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
      && size > (unsigned int)ix86_section_threshold)
    switch_to_section (get_named_section (decl, ".lbss", 0));
  else
    switch_to_section (bss_section);
  ASM_OUTPUT_ALIGN (file, floor_log2 (align / BITS_PER_UNIT));
#ifdef ASM_DECLARE_OBJECT_NAME
  last_assemble_variable_decl = decl;
  ASM_DECLARE_OBJECT_NAME (file, name, decl);
#else
  /* Standard thing is just output label for the object.  */
  ASM_OUTPUT_LABEL (file, name);
#endif /* ASM_DECLARE_OBJECT_NAME */
  ASM_OUTPUT_SKIP (file, size ? size : 1);
}

/* Decide whether we must probe the stack before any space allocation
   on this target.  It's essentially TARGET_STACK_PROBE except when
   -fstack-check causes the stack to be already probed differently.  */

bool
ix86_target_stack_probe (void)
{
  /* Do not probe the stack twice if static stack checking is enabled.  */
  if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
    return false;

  return TARGET_STACK_PROBE;
}

/* Decide whether we can make a sibling call to a function.  DECL is the
   declaration of the function being targeted by the call and EXP is the
   CALL_EXPR representing the call.  */

static bool
ix86_function_ok_for_sibcall (tree decl, tree exp)
{
  tree type, decl_or_type;
  rtx a, b;
  bool bind_global = decl && !targetm.binds_local_p (decl);

  if (ix86_function_naked (current_function_decl))
    return false;

  /* Sibling call isn't OK if there are no caller-saved registers
     since all registers must be preserved before return.  */
  if (cfun->machine->no_caller_saved_registers)
    return false;

  /* If we are generating position-independent code, we cannot sibcall
     optimize direct calls to global functions, as the PLT requires
     %ebx be live. (Darwin does not have a PLT.)  */
  if (!TARGET_MACHO
      && !TARGET_64BIT
      && flag_pic
      && flag_plt
      && bind_global)
    return false;

  /* If we need to align the outgoing stack, then sibcalling would
     unalign the stack, which may break the called function.  */
  if (ix86_minimum_incoming_stack_boundary (true)
      < PREFERRED_STACK_BOUNDARY)
    return false;

  if (decl)
    {
      decl_or_type = decl;
      type = TREE_TYPE (decl);
    }
  else
    {
      /* We're looking at the CALL_EXPR, we need the type of the function.  */
      type = CALL_EXPR_FN (exp);		/* pointer expression */
      type = TREE_TYPE (type);			/* pointer type */
      type = TREE_TYPE (type);			/* function type */
      decl_or_type = type;
    }

  /* Check that the return value locations are the same.  Like
     if we are returning floats on the 80387 register stack, we cannot
     make a sibcall from a function that doesn't return a float to a
     function that does or, conversely, from a function that does return
     a float to a function that doesn't; the necessary stack adjustment
     would not be executed.  This is also the place we notice
     differences in the return value ABI.  Note that it is ok for one
     of the functions to have void return type as long as the return
     value of the other is passed in a register.  */
  a = ix86_function_value (TREE_TYPE (exp), decl_or_type, false);
  b = ix86_function_value (TREE_TYPE (DECL_RESULT (cfun->decl)),
			   cfun->decl, false);
  if (STACK_REG_P (a) || STACK_REG_P (b))
    {
      if (!rtx_equal_p (a, b))
	return false;
    }
  else if (VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun->decl))))
    ;
  else if (!rtx_equal_p (a, b))
    return false;

  if (TARGET_64BIT)
    {
      /* The SYSV ABI has more call-clobbered registers;
	 disallow sibcalls from MS to SYSV.  */
      if (cfun->machine->call_abi == MS_ABI
	  && ix86_function_type_abi (type) == SYSV_ABI)
	return false;
    }
  else
    {
      /* If this call is indirect, we'll need to be able to use a
	 call-clobbered register for the address of the target function.
	 Make sure that all such registers are not used for passing
	 parameters.  Note that DLLIMPORT functions and call to global
	 function via GOT slot are indirect.  */
      if (!decl
	  || (bind_global && flag_pic && !flag_plt)
	  || (TARGET_DLLIMPORT_DECL_ATTRIBUTES && DECL_DLLIMPORT_P (decl))
	  || flag_force_indirect_call)
	{
	  /* Check if regparm >= 3 since arg_reg_available is set to
	     false if regparm == 0.  If regparm is 1 or 2, there is
	     always a call-clobbered register available.

	     ??? The symbol indirect call doesn't need a call-clobbered
	     register.  But we don't know if this is a symbol indirect
	     call or not here.  */
	  if (ix86_function_regparm (type, decl) >= 3
	      && !cfun->machine->arg_reg_available)
	    return false;
	}
    }

  /* Otherwise okay.  That also includes certain types of indirect calls.  */
  return true;
}

/* Handle "cdecl", "stdcall", "fastcall", "regparm", "thiscall",
   and "sseregparm" calling convention attributes;
   arguments as in struct attribute_spec.handler.  */

static tree
ix86_handle_cconv_attribute (tree *node, tree name, tree args, int,
			     bool *no_add_attrs)
{
  if (TREE_CODE (*node) != FUNCTION_TYPE
      && TREE_CODE (*node) != METHOD_TYPE
      && TREE_CODE (*node) != FIELD_DECL
      && TREE_CODE (*node) != TYPE_DECL)
    {
      warning (OPT_Wattributes, "%qE attribute only applies to functions",
	       name);
      *no_add_attrs = true;
      return NULL_TREE;
    }

  /* Can combine regparm with all attributes but fastcall, and thiscall.  */
  if (is_attribute_p ("regparm", name))
    {
      tree cst;

      if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
        {
	  error ("fastcall and regparm attributes are not compatible");
	}

      if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node)))
	{
	  error ("regparam and thiscall attributes are not compatible");
	}

      cst = TREE_VALUE (args);
      if (TREE_CODE (cst) != INTEGER_CST)
	{
	  warning (OPT_Wattributes,
		   "%qE attribute requires an integer constant argument",
		   name);
	  *no_add_attrs = true;
	}
      else if (compare_tree_int (cst, REGPARM_MAX) > 0)
	{
	  warning (OPT_Wattributes, "argument to %qE attribute larger than %d",
		   name, REGPARM_MAX);
	  *no_add_attrs = true;
	}

      return NULL_TREE;
    }

  if (TARGET_64BIT)
    {
      /* Do not warn when emulating the MS ABI.  */
      if ((TREE_CODE (*node) != FUNCTION_TYPE
	   && TREE_CODE (*node) != METHOD_TYPE)
	  || ix86_function_type_abi (*node) != MS_ABI)
	warning (OPT_Wattributes, "%qE attribute ignored",
	         name);
      *no_add_attrs = true;
      return NULL_TREE;
    }

  /* Can combine fastcall with stdcall (redundant) and sseregparm.  */
  if (is_attribute_p ("fastcall", name))
    {
      if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
        {
	  error ("fastcall and cdecl attributes are not compatible");
	}
      if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
        {
	  error ("fastcall and stdcall attributes are not compatible");
	}
      if (lookup_attribute ("regparm", TYPE_ATTRIBUTES (*node)))
        {
	  error ("fastcall and regparm attributes are not compatible");
	}
      if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node)))
	{
	  error ("fastcall and thiscall attributes are not compatible");
	}
    }

  /* Can combine stdcall with fastcall (redundant), regparm and
     sseregparm.  */
  else if (is_attribute_p ("stdcall", name))
    {
      if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
        {
	  error ("stdcall and cdecl attributes are not compatible");
	}
      if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
        {
	  error ("stdcall and fastcall attributes are not compatible");
	}
      if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node)))
	{
	  error ("stdcall and thiscall attributes are not compatible");
	}
    }

  /* Can combine cdecl with regparm and sseregparm.  */
  else if (is_attribute_p ("cdecl", name))
    {
      if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
        {
	  error ("stdcall and cdecl attributes are not compatible");
	}
      if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
        {
	  error ("fastcall and cdecl attributes are not compatible");
	}
      if (lookup_attribute ("thiscall", TYPE_ATTRIBUTES (*node)))
	{
	  error ("cdecl and thiscall attributes are not compatible");
	}
    }
  else if (is_attribute_p ("thiscall", name))
    {
      if (TREE_CODE (*node) != METHOD_TYPE && pedantic)
	warning (OPT_Wattributes, "%qE attribute is used for non-class method",
	         name);
      if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
	{
	  error ("stdcall and thiscall attributes are not compatible");
	}
      if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
	{
	  error ("fastcall and thiscall attributes are not compatible");
	}
      if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
	{
	  error ("cdecl and thiscall attributes are not compatible");
	}
    }

  /* Can combine sseregparm with all attributes.  */

  return NULL_TREE;
}

/* The transactional memory builtins are implicitly regparm or fastcall
   depending on the ABI.  Override the generic do-nothing attribute that
   these builtins were declared with, and replace it with one of the two
   attributes that we expect elsewhere.  */

static tree
ix86_handle_tm_regparm_attribute (tree *node, tree, tree,
				  int flags, bool *no_add_attrs)
{
  tree alt;

  /* In no case do we want to add the placeholder attribute.  */
  *no_add_attrs = true;

  /* The 64-bit ABI is unchanged for transactional memory.  */
  if (TARGET_64BIT)
    return NULL_TREE;

  /* ??? Is there a better way to validate 32-bit windows?  We have
     cfun->machine->call_abi, but that seems to be set only for 64-bit.  */
  if (CHECK_STACK_LIMIT > 0)
    alt = tree_cons (get_identifier ("fastcall"), NULL, NULL);
  else
    {
      alt = tree_cons (NULL, build_int_cst (NULL, 2), NULL);
      alt = tree_cons (get_identifier ("regparm"), alt, NULL);
    }
  decl_attributes (node, alt, flags);

  return NULL_TREE;
}

/* This function determines from TYPE the calling-convention.  */

unsigned int
ix86_get_callcvt (const_tree type)
{
  unsigned int ret = 0;
  bool is_stdarg;
  tree attrs;

  if (TARGET_64BIT)
    return IX86_CALLCVT_CDECL;

  attrs = TYPE_ATTRIBUTES (type);
  if (attrs != NULL_TREE)
    {
      if (lookup_attribute ("cdecl", attrs))
	ret |= IX86_CALLCVT_CDECL;
      else if (lookup_attribute ("stdcall", attrs))
	ret |= IX86_CALLCVT_STDCALL;
      else if (lookup_attribute ("fastcall", attrs))
	ret |= IX86_CALLCVT_FASTCALL;
      else if (lookup_attribute ("thiscall", attrs))
	ret |= IX86_CALLCVT_THISCALL;

      /* Regparam isn't allowed for thiscall and fastcall.  */
      if ((ret & (IX86_CALLCVT_THISCALL | IX86_CALLCVT_FASTCALL)) == 0)
	{
	  if (lookup_attribute ("regparm", attrs))
	    ret |= IX86_CALLCVT_REGPARM;
	  if (lookup_attribute ("sseregparm", attrs))
	    ret |= IX86_CALLCVT_SSEREGPARM;
	}

      if (IX86_BASE_CALLCVT(ret) != 0)
	return ret;
    }

  is_stdarg = stdarg_p (type);
  if (TARGET_RTD && !is_stdarg)
    return IX86_CALLCVT_STDCALL | ret;

  if (ret != 0
      || is_stdarg
      || TREE_CODE (type) != METHOD_TYPE
      || ix86_function_type_abi (type) != MS_ABI)
    return IX86_CALLCVT_CDECL | ret;

  return IX86_CALLCVT_THISCALL;
}

/* Return 0 if the attributes for two types are incompatible, 1 if they
   are compatible, and 2 if they are nearly compatible (which causes a
   warning to be generated).  */

static int
ix86_comp_type_attributes (const_tree type1, const_tree type2)
{
  unsigned int ccvt1, ccvt2;

  if (TREE_CODE (type1) != FUNCTION_TYPE
      && TREE_CODE (type1) != METHOD_TYPE)
    return 1;

  ccvt1 = ix86_get_callcvt (type1);
  ccvt2 = ix86_get_callcvt (type2);
  if (ccvt1 != ccvt2)
    return 0;
  if (ix86_function_regparm (type1, NULL)
      != ix86_function_regparm (type2, NULL))
    return 0;

  return 1;
}

/* Return the regparm value for a function with the indicated TYPE and DECL.
   DECL may be NULL when calling function indirectly
   or considering a libcall.  */

static int
ix86_function_regparm (const_tree type, const_tree decl)
{
  tree attr;
  int regparm;
  unsigned int ccvt;

  if (TARGET_64BIT)
    return (ix86_function_type_abi (type) == SYSV_ABI
	    ? X86_64_REGPARM_MAX : X86_64_MS_REGPARM_MAX);
  ccvt = ix86_get_callcvt (type);
  regparm = ix86_regparm;

  if ((ccvt & IX86_CALLCVT_REGPARM) != 0)
    {
      attr = lookup_attribute ("regparm", TYPE_ATTRIBUTES (type));
      if (attr)
	{
	  regparm = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (attr)));
	  return regparm;
	}
    }
  else if ((ccvt & IX86_CALLCVT_FASTCALL) != 0)
    return 2;
  else if ((ccvt & IX86_CALLCVT_THISCALL) != 0)
    return 1;

  /* Use register calling convention for local functions when possible.  */
  if (decl
      && TREE_CODE (decl) == FUNCTION_DECL)
    {
      cgraph_node *target = cgraph_node::get (decl);
      if (target)
	target = target->function_symbol ();

      /* Caller and callee must agree on the calling convention, so
	 checking here just optimize means that with
	 __attribute__((optimize (...))) caller could use regparm convention
	 and callee not, or vice versa.  Instead look at whether the callee
	 is optimized or not.  */
      if (target && opt_for_fn (target->decl, optimize)
	  && !(profile_flag && !flag_fentry))
	{
	  cgraph_local_info *i = &target->local;
	  if (i && i->local && i->can_change_signature)
	    {
	      int local_regparm, globals = 0, regno;

	      /* Make sure no regparm register is taken by a
		 fixed register variable.  */
	      for (local_regparm = 0; local_regparm < REGPARM_MAX;
		   local_regparm++)
		if (fixed_regs[local_regparm])
		  break;

	      /* We don't want to use regparm(3) for nested functions as
		 these use a static chain pointer in the third argument.  */
	      if (local_regparm == 3 && DECL_STATIC_CHAIN (target->decl))
		local_regparm = 2;

	      /* Save a register for the split stack.  */
	      if (flag_split_stack)
		{
		  if (local_regparm == 3)
		    local_regparm = 2;
		  else if (local_regparm == 2
			   && DECL_STATIC_CHAIN (target->decl))
		    local_regparm = 1;
		}

	      /* Each fixed register usage increases register pressure,
		 so less registers should be used for argument passing.
		 This functionality can be overriden by an explicit
		 regparm value.  */
	      for (regno = AX_REG; regno <= DI_REG; regno++)
		if (fixed_regs[regno])
		  globals++;

	      local_regparm
		= globals < local_regparm ? local_regparm - globals : 0;

	      if (local_regparm > regparm)
		regparm = local_regparm;
	    }
	}
    }

  return regparm;
}

/* Return 1 or 2, if we can pass up to SSE_REGPARM_MAX SFmode (1) and
   DFmode (2) arguments in SSE registers for a function with the
   indicated TYPE and DECL.  DECL may be NULL when calling function
   indirectly or considering a libcall.  Return -1 if any FP parameter
   should be rejected by error.  This is used in siutation we imply SSE
   calling convetion but the function is called from another function with
   SSE disabled. Otherwise return 0.  */

static int
ix86_function_sseregparm (const_tree type, const_tree decl, bool warn)
{
  gcc_assert (!TARGET_64BIT);

  /* Use SSE registers to pass SFmode and DFmode arguments if requested
     by the sseregparm attribute.  */
  if (TARGET_SSEREGPARM
      || (type && lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type))))
    {
      if (!TARGET_SSE)
	{
	  if (warn)
	    {
	      if (decl)
		error ("calling %qD with attribute sseregparm without "
		       "SSE/SSE2 enabled", decl);
	      else
		error ("calling %qT with attribute sseregparm without "
		       "SSE/SSE2 enabled", type);
	    }
	  return 0;
	}

      return 2;
    }

  if (!decl)
    return 0;

  cgraph_node *target = cgraph_node::get (decl);
  if (target)
    target = target->function_symbol ();

  /* For local functions, pass up to SSE_REGPARM_MAX SFmode
     (and DFmode for SSE2) arguments in SSE registers.  */
  if (target
      /* TARGET_SSE_MATH */
      && (target_opts_for_fn (target->decl)->x_ix86_fpmath & FPMATH_SSE)
      && opt_for_fn (target->decl, optimize)
      && !(profile_flag && !flag_fentry))
    {
      cgraph_local_info *i = &target->local;
      if (i && i->local && i->can_change_signature)
	{
	  /* Refuse to produce wrong code when local function with SSE enabled
	     is called from SSE disabled function.
	     FIXME: We need a way to detect these cases cross-ltrans partition
	     and avoid using SSE calling conventions on local functions called
	     from function with SSE disabled.  For now at least delay the
	     warning until we know we are going to produce wrong code.
	     See PR66047  */
	  if (!TARGET_SSE && warn)
	    return -1;
	  return TARGET_SSE2_P (target_opts_for_fn (target->decl)
				->x_ix86_isa_flags) ? 2 : 1;
	}
    }

  return 0;
}

/* Return true if EAX is live at the start of the function.  Used by
   ix86_expand_prologue to determine if we need special help before
   calling allocate_stack_worker.  */

static bool
ix86_eax_live_at_start_p (void)
{
  /* Cheat.  Don't bother working forward from ix86_function_regparm
     to the function type to whether an actual argument is located in
     eax.  Instead just look at cfg info, which is still close enough
     to correct at this point.  This gives false positives for broken
     functions that might use uninitialized data that happens to be
     allocated in eax, but who cares?  */
  return REGNO_REG_SET_P (df_get_live_out (ENTRY_BLOCK_PTR_FOR_FN (cfun)), 0);
}

static bool
ix86_keep_aggregate_return_pointer (tree fntype)
{
  tree attr;

  if (!TARGET_64BIT)
    {
      attr = lookup_attribute ("callee_pop_aggregate_return",
			       TYPE_ATTRIBUTES (fntype));
      if (attr)
	return (TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (attr))) == 0);

      /* For 32-bit MS-ABI the default is to keep aggregate
         return pointer.  */
      if (ix86_function_type_abi (fntype) == MS_ABI)
	return true;
    }
  return KEEP_AGGREGATE_RETURN_POINTER != 0;
}

/* Value is the number of bytes of arguments automatically
   popped when returning from a subroutine call.
   FUNDECL is the declaration node of the function (as a tree),
   FUNTYPE is the data type of the function (as a tree),
   or for a library call it is an identifier node for the subroutine name.
   SIZE is the number of bytes of arguments passed on the stack.

   On the 80386, the RTD insn may be used to pop them if the number
     of args is fixed, but if the number is variable then the caller
     must pop them all.  RTD can't be used for library calls now
     because the library is compiled with the Unix compiler.
   Use of RTD is a selectable option, since it is incompatible with
   standard Unix calling sequences.  If the option is not selected,
   the caller must always pop the args.

   The attribute stdcall is equivalent to RTD on a per module basis.  */

static poly_int64
ix86_return_pops_args (tree fundecl, tree funtype, poly_int64 size)
{
  unsigned int ccvt;

  /* None of the 64-bit ABIs pop arguments.  */
  if (TARGET_64BIT)
    return 0;

  ccvt = ix86_get_callcvt (funtype);

  if ((ccvt & (IX86_CALLCVT_STDCALL | IX86_CALLCVT_FASTCALL
	       | IX86_CALLCVT_THISCALL)) != 0
      && ! stdarg_p (funtype))
    return size;

  /* Lose any fake structure return argument if it is passed on the stack.  */
  if (aggregate_value_p (TREE_TYPE (funtype), fundecl)
      && !ix86_keep_aggregate_return_pointer (funtype))
    {
      int nregs = ix86_function_regparm (funtype, fundecl);
      if (nregs == 0)
	return GET_MODE_SIZE (Pmode);
    }

  return 0;
}

/* Implement the TARGET_LEGITIMATE_COMBINED_INSN hook.  */

static bool
ix86_legitimate_combined_insn (rtx_insn *insn)
{
  int i;

  /* Check operand constraints in case hard registers were propagated
     into insn pattern.  This check prevents combine pass from
     generating insn patterns with invalid hard register operands.
     These invalid insns can eventually confuse reload to error out
     with a spill failure.  See also PRs 46829 and 46843.  */

  gcc_assert (INSN_CODE (insn) >= 0);

  extract_insn (insn);
  preprocess_constraints (insn);

  int n_operands = recog_data.n_operands;
  int n_alternatives = recog_data.n_alternatives;
  for (i = 0; i < n_operands; i++)
    {
      rtx op = recog_data.operand[i];
      machine_mode mode = GET_MODE (op);
      const operand_alternative *op_alt;
      int offset = 0;
      bool win;
      int j;

      /* A unary operator may be accepted by the predicate, but it
	 is irrelevant for matching constraints.  */
      if (UNARY_P (op))
	op = XEXP (op, 0);

      if (SUBREG_P (op))
	{
	  if (REG_P (SUBREG_REG (op))
	      && REGNO (SUBREG_REG (op)) < FIRST_PSEUDO_REGISTER)
	    offset = subreg_regno_offset (REGNO (SUBREG_REG (op)),
					  GET_MODE (SUBREG_REG (op)),
					  SUBREG_BYTE (op),
					  GET_MODE (op));
	  op = SUBREG_REG (op);
	}

      if (!(REG_P (op) && HARD_REGISTER_P (op)))
	continue;

      op_alt = recog_op_alt;

      /* Operand has no constraints, anything is OK.  */
      win = !n_alternatives;

      alternative_mask preferred = get_preferred_alternatives (insn);
      for (j = 0; j < n_alternatives; j++, op_alt += n_operands)
	{
	  if (!TEST_BIT (preferred, j))
	    continue;
	  if (op_alt[i].anything_ok
	      || (op_alt[i].matches != -1
		  && operands_match_p
		  (recog_data.operand[i],
		   recog_data.operand[op_alt[i].matches]))
	      || reg_fits_class_p (op, op_alt[i].cl, offset, mode))
	    {
	      win = true;
	      break;
	    }
	}

      if (!win)
	return false;
    }

  return true;
}

/* Implement the TARGET_ASAN_SHADOW_OFFSET hook.  */

static unsigned HOST_WIDE_INT
ix86_asan_shadow_offset (void)
{
  return TARGET_LP64 ? (TARGET_MACHO ? (HOST_WIDE_INT_1 << 44)
				     : HOST_WIDE_INT_C (0x7fff8000))
		     : (HOST_WIDE_INT_1 << 29);
}

/* Argument support functions.  */

/* Return true when register may be used to pass function parameters.  */
bool
ix86_function_arg_regno_p (int regno)
{
  int i;
  enum calling_abi call_abi;
  const int *parm_regs;

  if (!TARGET_64BIT)
    {
      if (TARGET_MACHO)
        return (regno < REGPARM_MAX
                || (TARGET_SSE && SSE_REGNO_P (regno) && !fixed_regs[regno]));
      else
        return (regno < REGPARM_MAX
	        || (TARGET_MMX && MMX_REGNO_P (regno)
	  	    && (regno < FIRST_MMX_REG + MMX_REGPARM_MAX))
	        || (TARGET_SSE && SSE_REGNO_P (regno)
		    && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX)));
    }

  if (TARGET_SSE && SSE_REGNO_P (regno)
      && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX))
    return true;

  /* TODO: The function should depend on current function ABI but
     builtins.c would need updating then. Therefore we use the
     default ABI.  */
  call_abi = ix86_cfun_abi ();

  /* RAX is used as hidden argument to va_arg functions.  */
  if (call_abi == SYSV_ABI && regno == AX_REG)
    return true;

  if (call_abi == MS_ABI)
    parm_regs = x86_64_ms_abi_int_parameter_registers;
  else
    parm_regs = x86_64_int_parameter_registers;

  for (i = 0; i < (call_abi == MS_ABI
		   ? X86_64_MS_REGPARM_MAX : X86_64_REGPARM_MAX); i++)
    if (regno == parm_regs[i])
      return true;
  return false;
}

/* Return if we do not know how to pass TYPE solely in registers.  */

static bool
ix86_must_pass_in_stack (machine_mode mode, const_tree type)
{
  if (must_pass_in_stack_var_size_or_pad (mode, type))
    return true;

  /* For 32-bit, we want TImode aggregates to go on the stack.  But watch out!
     The layout_type routine is crafty and tries to trick us into passing
     currently unsupported vector types on the stack by using TImode.  */
  return (!TARGET_64BIT && mode == TImode
	  && type && TREE_CODE (type) != VECTOR_TYPE);
}

/* It returns the size, in bytes, of the area reserved for arguments passed
   in registers for the function represented by fndecl dependent to the used
   abi format.  */
int
ix86_reg_parm_stack_space (const_tree fndecl)
{
  enum calling_abi call_abi = SYSV_ABI;
  if (fndecl != NULL_TREE && TREE_CODE (fndecl) == FUNCTION_DECL)
    call_abi = ix86_function_abi (fndecl);
  else
    call_abi = ix86_function_type_abi (fndecl);
  if (TARGET_64BIT && call_abi == MS_ABI)
    return 32;
  return 0;
}

/* We add this as a workaround in order to use libc_has_function
   hook in i386.md.  */
bool
ix86_libc_has_function (enum function_class fn_class)
{
  return targetm.libc_has_function (fn_class);
}

/* Returns value SYSV_ABI, MS_ABI dependent on fntype,
   specifying the call abi used.  */
enum calling_abi
ix86_function_type_abi (const_tree fntype)
{
  enum calling_abi abi = ix86_abi;

  if (fntype == NULL_TREE || TYPE_ATTRIBUTES (fntype) == NULL_TREE)
    return abi;

  if (abi == SYSV_ABI
      && lookup_attribute ("ms_abi", TYPE_ATTRIBUTES (fntype)))
    {
      static int warned;
      if (TARGET_X32 && !warned)
	{
	  error ("X32 does not support ms_abi attribute");
	  warned = 1;
	}

      abi = MS_ABI;
    }
  else if (abi == MS_ABI
	   && lookup_attribute ("sysv_abi", TYPE_ATTRIBUTES (fntype)))
    abi = SYSV_ABI;

  return abi;
}

static enum calling_abi
ix86_function_abi (const_tree fndecl)
{
  return fndecl ? ix86_function_type_abi (TREE_TYPE (fndecl)) : ix86_abi;
}

/* Returns value SYSV_ABI, MS_ABI dependent on cfun,
   specifying the call abi used.  */
enum calling_abi
ix86_cfun_abi (void)
{
  return cfun ? cfun->machine->call_abi : ix86_abi;
}

static bool
ix86_function_ms_hook_prologue (const_tree fn)
{
  if (fn && lookup_attribute ("ms_hook_prologue", DECL_ATTRIBUTES (fn)))
    {
      if (decl_function_context (fn) != NULL_TREE)
	error_at (DECL_SOURCE_LOCATION (fn),
		  "ms_hook_prologue is not compatible with nested function");
      else
        return true;
    }
  return false;
}

static bool
ix86_function_naked (const_tree fn)
{
  if (fn && lookup_attribute ("naked", DECL_ATTRIBUTES (fn)))
    return true;

  return false;
}

/* Write the extra assembler code needed to declare a function properly.  */

void
ix86_asm_output_function_label (FILE *asm_out_file, const char *fname,
				tree decl)
{
  bool is_ms_hook = ix86_function_ms_hook_prologue (decl);

  if (is_ms_hook)
    {
      int i, filler_count = (TARGET_64BIT ? 32 : 16);
      unsigned int filler_cc = 0xcccccccc;

      for (i = 0; i < filler_count; i += 4)
        fprintf (asm_out_file, ASM_LONG " %#x\n", filler_cc);
    }

#ifdef SUBTARGET_ASM_UNWIND_INIT
  SUBTARGET_ASM_UNWIND_INIT (asm_out_file);
#endif

  ASM_OUTPUT_LABEL (asm_out_file, fname);

  /* Output magic byte marker, if hot-patch attribute is set.  */
  if (is_ms_hook)
    {
      if (TARGET_64BIT)
	{
	  /* leaq [%rsp + 0], %rsp  */
	  fputs (ASM_BYTE "0x48, 0x8d, 0xa4, 0x24, 0x00, 0x00, 0x00, 0x00\n",
		 asm_out_file);
	}
      else
	{
          /* movl.s %edi, %edi
	     push   %ebp
	     movl.s %esp, %ebp */
	  fputs (ASM_BYTE "0x8b, 0xff, 0x55, 0x8b, 0xec\n", asm_out_file);
	}
    }
}

/* Implementation of call abi switching target hook. Specific to FNDECL
   the specific call register sets are set.  See also
   ix86_conditional_register_usage for more details.  */
void
ix86_call_abi_override (const_tree fndecl)
{
  cfun->machine->call_abi = ix86_function_abi (fndecl);
}

/* Return 1 if pseudo register should be created and used to hold
   GOT address for PIC code.  */
bool
ix86_use_pseudo_pic_reg (void)
{
  if ((TARGET_64BIT
       && (ix86_cmodel == CM_SMALL_PIC
	   || TARGET_PECOFF))
      || !flag_pic)
    return false;
  return true;
}

/* Initialize large model PIC register.  */

static void
ix86_init_large_pic_reg (unsigned int tmp_regno)
{
  rtx_code_label *label;
  rtx tmp_reg;

  gcc_assert (Pmode == DImode);
  label = gen_label_rtx ();
  emit_label (label);
  LABEL_PRESERVE_P (label) = 1;
  tmp_reg = gen_rtx_REG (Pmode, tmp_regno);
  gcc_assert (REGNO (pic_offset_table_rtx) != tmp_regno);
  emit_insn (gen_set_rip_rex64 (pic_offset_table_rtx,
				label));
  emit_insn (gen_set_got_offset_rex64 (tmp_reg, label));
  emit_insn (ix86_gen_add3 (pic_offset_table_rtx,
			    pic_offset_table_rtx, tmp_reg));
  const char *name = LABEL_NAME (label);
  PUT_CODE (label, NOTE);
  NOTE_KIND (label) = NOTE_INSN_DELETED_LABEL;
  NOTE_DELETED_LABEL_NAME (label) = name;
}

/* Create and initialize PIC register if required.  */
static void
ix86_init_pic_reg (void)
{
  edge entry_edge;
  rtx_insn *seq;

  if (!ix86_use_pseudo_pic_reg ())
    return;

  start_sequence ();

  if (TARGET_64BIT)
    {
      if (ix86_cmodel == CM_LARGE_PIC)
	ix86_init_large_pic_reg (R11_REG);
      else
	emit_insn (gen_set_got_rex64 (pic_offset_table_rtx));
    }
  else
    {
      /*  If there is future mcount call in the function it is more profitable
	  to emit SET_GOT into ABI defined REAL_PIC_OFFSET_TABLE_REGNUM.  */
      rtx reg = crtl->profile
		? gen_rtx_REG (Pmode, REAL_PIC_OFFSET_TABLE_REGNUM)
		: pic_offset_table_rtx;
      rtx_insn *insn = emit_insn (gen_set_got (reg));
      RTX_FRAME_RELATED_P (insn) = 1;
      if (crtl->profile)
        emit_move_insn (pic_offset_table_rtx, reg);
      add_reg_note (insn, REG_CFA_FLUSH_QUEUE, NULL_RTX);
    }

  seq = get_insns ();
  end_sequence ();

  entry_edge = single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun));
  insert_insn_on_edge (seq, entry_edge);
  commit_one_edge_insertion (entry_edge);
}

/* Initialize a variable CUM of type CUMULATIVE_ARGS
   for a call to a function whose data type is FNTYPE.
   For a library call, FNTYPE is 0.  */

void
init_cumulative_args (CUMULATIVE_ARGS *cum,  /* Argument info to initialize */
		      tree fntype,	/* tree ptr for function decl */
		      rtx libname,	/* SYMBOL_REF of library name or 0 */
		      tree fndecl,
		      int caller)
{
  struct cgraph_local_info *i = NULL;
  struct cgraph_node *target = NULL;

  memset (cum, 0, sizeof (*cum));

  if (fndecl)
    {
      target = cgraph_node::get (fndecl);
      if (target)
	{
	  target = target->function_symbol ();
	  i = cgraph_node::local_info (target->decl);
	  cum->call_abi = ix86_function_abi (target->decl);
	}
      else
	cum->call_abi = ix86_function_abi (fndecl);
    }
  else
    cum->call_abi = ix86_function_type_abi (fntype);

  cum->caller = caller;

  /* Set up the number of registers to use for passing arguments.  */
  cum->nregs = ix86_regparm;
  if (TARGET_64BIT)
    {
      cum->nregs = (cum->call_abi == SYSV_ABI
                   ? X86_64_REGPARM_MAX
                   : X86_64_MS_REGPARM_MAX);
    }
  if (TARGET_SSE)
    {
      cum->sse_nregs = SSE_REGPARM_MAX;
      if (TARGET_64BIT)
        {
          cum->sse_nregs = (cum->call_abi == SYSV_ABI
                           ? X86_64_SSE_REGPARM_MAX
                           : X86_64_MS_SSE_REGPARM_MAX);
        }
    }
  if (TARGET_MMX)
    cum->mmx_nregs = MMX_REGPARM_MAX;
  cum->warn_avx512f = true;
  cum->warn_avx = true;
  cum->warn_sse = true;
  cum->warn_mmx = true;

  /* Because type might mismatch in between caller and callee, we need to
     use actual type of function for local calls.
     FIXME: cgraph_analyze can be told to actually record if function uses
     va_start so for local functions maybe_vaarg can be made aggressive
     helping K&R code.
     FIXME: once typesytem is fixed, we won't need this code anymore.  */
  if (i && i->local && i->can_change_signature)
    fntype = TREE_TYPE (target->decl);
  cum->stdarg = stdarg_p (fntype);
  cum->maybe_vaarg = (fntype
		      ? (!prototype_p (fntype) || stdarg_p (fntype))
		      : !libname);

  cum->decl = fndecl;

  cum->warn_empty = !warn_abi || cum->stdarg;
  if (!cum->warn_empty && fntype)
    {
      function_args_iterator iter;
      tree argtype;
      bool seen_empty_type = false;
      FOREACH_FUNCTION_ARGS (fntype, argtype, iter)
	{
	  if (argtype == error_mark_node || VOID_TYPE_P (argtype))
	    break;
	  if (TYPE_EMPTY_P (argtype))
	    seen_empty_type = true;
	  else if (seen_empty_type)
	    {
	      cum->warn_empty = true;
	      break;
	    }
	}
    }

  if (!TARGET_64BIT)
    {
      /* If there are variable arguments, then we won't pass anything
         in registers in 32-bit mode. */
      if (stdarg_p (fntype))
	{
	  cum->nregs = 0;
	  /* Since in 32-bit, variable arguments are always passed on
	     stack, there is scratch register available for indirect
	     sibcall.  */
	  cfun->machine->arg_reg_available = true;
	  cum->sse_nregs = 0;
	  cum->mmx_nregs = 0;
	  cum->warn_avx512f = false;
	  cum->warn_avx = false;
	  cum->warn_sse = false;
	  cum->warn_mmx = false;
	  return;
	}

      /* Use ecx and edx registers if function has fastcall attribute,
	 else look for regparm information.  */
      if (fntype)
	{
	  unsigned int ccvt = ix86_get_callcvt (fntype);
	  if ((ccvt & IX86_CALLCVT_THISCALL) != 0)
	    {
	      cum->nregs = 1;
	      cum->fastcall = 1; /* Same first register as in fastcall.  */
	    }
	  else if ((ccvt & IX86_CALLCVT_FASTCALL) != 0)
	    {
	      cum->nregs = 2;
	      cum->fastcall = 1;
	    }
	  else
	    cum->nregs = ix86_function_regparm (fntype, fndecl);
	}

      /* Set up the number of SSE registers used for passing SFmode
	 and DFmode arguments.  Warn for mismatching ABI.  */
      cum->float_in_sse = ix86_function_sseregparm (fntype, fndecl, true);
    }

  cfun->machine->arg_reg_available = (cum->nregs > 0);
}

/* Return the "natural" mode for TYPE.  In most cases, this is just TYPE_MODE.
   But in the case of vector types, it is some vector mode.

   When we have only some of our vector isa extensions enabled, then there
   are some modes for which vector_mode_supported_p is false.  For these
   modes, the generic vector support in gcc will choose some non-vector mode
   in order to implement the type.  By computing the natural mode, we'll
   select the proper ABI location for the operand and not depend on whatever
   the middle-end decides to do with these vector types.

   The midde-end can't deal with the vector types > 16 bytes.  In this
   case, we return the original mode and warn ABI change if CUM isn't
   NULL. 

   If INT_RETURN is true, warn ABI change if the vector mode isn't
   available for function return value.  */

static machine_mode
type_natural_mode (const_tree type, const CUMULATIVE_ARGS *cum,
		   bool in_return)
{
  machine_mode mode = TYPE_MODE (type);

  if (TREE_CODE (type) == VECTOR_TYPE && !VECTOR_MODE_P (mode))
    {
      HOST_WIDE_INT size = int_size_in_bytes (type);
      if ((size == 8 || size == 16 || size == 32 || size == 64)
	  /* ??? Generic code allows us to create width 1 vectors.  Ignore.  */
	  && TYPE_VECTOR_SUBPARTS (type) > 1)
	{
	  machine_mode innermode = TYPE_MODE (TREE_TYPE (type));

	  /* There are no XFmode vector modes.  */
	  if (innermode == XFmode)
	    return mode;

	  if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
	    mode = MIN_MODE_VECTOR_FLOAT;
	  else
	    mode = MIN_MODE_VECTOR_INT;

	  /* Get the mode which has this inner mode and number of units.  */
	  FOR_EACH_MODE_FROM (mode, mode)
	    if (GET_MODE_NUNITS (mode) == TYPE_VECTOR_SUBPARTS (type)
		&& GET_MODE_INNER (mode) == innermode)
	      {
		if (size == 64 && !TARGET_AVX512F && !TARGET_IAMCU)
		  {
		    static bool warnedavx512f;
		    static bool warnedavx512f_ret;

		    if (cum && cum->warn_avx512f && !warnedavx512f)
		      {
			if (warning (OPT_Wpsabi, "AVX512F vector argument "
				     "without AVX512F enabled changes the ABI"))
			  warnedavx512f = true;
		      }
		    else if (in_return && !warnedavx512f_ret)
		      {
			if (warning (OPT_Wpsabi, "AVX512F vector return "
				     "without AVX512F enabled changes the ABI"))
			  warnedavx512f_ret = true;
		      }

		    return TYPE_MODE (type);
		  }
		else if (size == 32 && !TARGET_AVX && !TARGET_IAMCU)
		  {
		    static bool warnedavx;
		    static bool warnedavx_ret;

		    if (cum && cum->warn_avx && !warnedavx)
		      {
			if (warning (OPT_Wpsabi, "AVX vector argument "
				     "without AVX enabled changes the ABI"))
			  warnedavx = true;
		      }
		    else if (in_return && !warnedavx_ret)
		      {
			if (warning (OPT_Wpsabi, "AVX vector return "
				     "without AVX enabled changes the ABI"))
			  warnedavx_ret = true;
		      }

		    return TYPE_MODE (type);
		  }
		else if (((size == 8 && TARGET_64BIT) || size == 16)
			 && !TARGET_SSE
			 && !TARGET_IAMCU)
		  {
		    static bool warnedsse;
		    static bool warnedsse_ret;

		    if (cum && cum->warn_sse && !warnedsse)
		      {
			if (warning (OPT_Wpsabi, "SSE vector argument "
				     "without SSE enabled changes the ABI"))
			  warnedsse = true;
		      }
		    else if (!TARGET_64BIT && in_return && !warnedsse_ret)
		      {
			if (warning (OPT_Wpsabi, "SSE vector return "
				     "without SSE enabled changes the ABI"))
			  warnedsse_ret = true;
		      }
		  }
		else if ((size == 8 && !TARGET_64BIT)
			 && (!cfun
			     || cfun->machine->func_type == TYPE_NORMAL)
			 && !TARGET_MMX
			 && !TARGET_IAMCU)
		  {
		    static bool warnedmmx;
		    static bool warnedmmx_ret;

		    if (cum && cum->warn_mmx && !warnedmmx)
		      {
			if (warning (OPT_Wpsabi, "MMX vector argument "
				     "without MMX enabled changes the ABI"))
			  warnedmmx = true;
		      }
		    else if (in_return && !warnedmmx_ret)
		      {
			if (warning (OPT_Wpsabi, "MMX vector return "
				     "without MMX enabled changes the ABI"))
			  warnedmmx_ret = true;
		      }
		  }
		return mode;
	      }

	  gcc_unreachable ();
	}
    }

  return mode;
}

/* We want to pass a value in REGNO whose "natural" mode is MODE.  However,
   this may not agree with the mode that the type system has chosen for the
   register, which is ORIG_MODE.  If ORIG_MODE is not BLKmode, then we can
   go ahead and use it.  Otherwise we have to build a PARALLEL instead.  */

static rtx
gen_reg_or_parallel (machine_mode mode, machine_mode orig_mode,
		     unsigned int regno)
{
  rtx tmp;

  if (orig_mode != BLKmode)
    tmp = gen_rtx_REG (orig_mode, regno);
  else
    {
      tmp = gen_rtx_REG (mode, regno);
      tmp = gen_rtx_EXPR_LIST (VOIDmode, tmp, const0_rtx);
      tmp = gen_rtx_PARALLEL (orig_mode, gen_rtvec (1, tmp));
    }

  return tmp;
}

/* x86-64 register passing implementation.  See x86-64 ABI for details.  Goal
   of this code is to classify each 8bytes of incoming argument by the register
   class and assign registers accordingly.  */

/* Return the union class of CLASS1 and CLASS2.
   See the x86-64 PS ABI for details.  */

static enum x86_64_reg_class
merge_classes (enum x86_64_reg_class class1, enum x86_64_reg_class class2)
{
  /* Rule #1: If both classes are equal, this is the resulting class.  */
  if (class1 == class2)
    return class1;

  /* Rule #2: If one of the classes is NO_CLASS, the resulting class is
     the other class.  */
  if (class1 == X86_64_NO_CLASS)
    return class2;
  if (class2 == X86_64_NO_CLASS)
    return class1;

  /* Rule #3: If one of the classes is MEMORY, the result is MEMORY.  */
  if (class1 == X86_64_MEMORY_CLASS || class2 == X86_64_MEMORY_CLASS)
    return X86_64_MEMORY_CLASS;

  /* Rule #4: If one of the classes is INTEGER, the result is INTEGER.  */
  if ((class1 == X86_64_INTEGERSI_CLASS && class2 == X86_64_SSESF_CLASS)
      || (class2 == X86_64_INTEGERSI_CLASS && class1 == X86_64_SSESF_CLASS))
    return X86_64_INTEGERSI_CLASS;
  if (class1 == X86_64_INTEGER_CLASS || class1 == X86_64_INTEGERSI_CLASS
      || class2 == X86_64_INTEGER_CLASS || class2 == X86_64_INTEGERSI_CLASS)
    return X86_64_INTEGER_CLASS;

  /* Rule #5: If one of the classes is X87, X87UP, or COMPLEX_X87 class,
     MEMORY is used.  */
  if (class1 == X86_64_X87_CLASS
      || class1 == X86_64_X87UP_CLASS
      || class1 == X86_64_COMPLEX_X87_CLASS
      || class2 == X86_64_X87_CLASS
      || class2 == X86_64_X87UP_CLASS
      || class2 == X86_64_COMPLEX_X87_CLASS)
    return X86_64_MEMORY_CLASS;

  /* Rule #6: Otherwise class SSE is used.  */
  return X86_64_SSE_CLASS;
}

/* Classify the argument of type TYPE and mode MODE.
   CLASSES will be filled by the register class used to pass each word
   of the operand.  The number of words is returned.  In case the parameter
   should be passed in memory, 0 is returned. As a special case for zero
   sized containers, classes[0] will be NO_CLASS and 1 is returned.

   BIT_OFFSET is used internally for handling records and specifies offset
   of the offset in bits modulo 512 to avoid overflow cases.

   See the x86-64 PS ABI for details.
*/

static int
classify_argument (machine_mode mode, const_tree type,
		   enum x86_64_reg_class classes[MAX_CLASSES], int bit_offset)
{
  HOST_WIDE_INT bytes =
    (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
  int words = CEIL (bytes + (bit_offset % 64) / 8, UNITS_PER_WORD);

  /* Variable sized entities are always passed/returned in memory.  */
  if (bytes < 0)
    return 0;

  if (mode != VOIDmode
      && targetm.calls.must_pass_in_stack (mode, type))
    return 0;

  if (type && AGGREGATE_TYPE_P (type))
    {
      int i;
      tree field;
      enum x86_64_reg_class subclasses[MAX_CLASSES];

      /* On x86-64 we pass structures larger than 64 bytes on the stack.  */
      if (bytes > 64)
	return 0;

      for (i = 0; i < words; i++)
	classes[i] = X86_64_NO_CLASS;

      /* Zero sized arrays or structures are NO_CLASS.  We return 0 to
	 signalize memory class, so handle it as special case.  */
      if (!words)
	{
	  classes[0] = X86_64_NO_CLASS;
	  return 1;
	}

      /* Classify each field of record and merge classes.  */
      switch (TREE_CODE (type))
	{
	case RECORD_TYPE:
	  /* And now merge the fields of structure.  */
	  for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
	    {
	      if (TREE_CODE (field) == FIELD_DECL)
		{
		  int num;

		  if (TREE_TYPE (field) == error_mark_node)
		    continue;

		  /* Bitfields are always classified as integer.  Handle them
		     early, since later code would consider them to be
		     misaligned integers.  */
		  if (DECL_BIT_FIELD (field))
		    {
		      for (i = (int_bit_position (field)
				+ (bit_offset % 64)) / 8 / 8;
			   i < ((int_bit_position (field) + (bit_offset % 64))
			        + tree_to_shwi (DECL_SIZE (field))
				+ 63) / 8 / 8; i++)
			classes[i] =
			  merge_classes (X86_64_INTEGER_CLASS,
					 classes[i]);
		    }
		  else
		    {
		      int pos;

		      type = TREE_TYPE (field);

		      /* Flexible array member is ignored.  */
		      if (TYPE_MODE (type) == BLKmode
			  && TREE_CODE (type) == ARRAY_TYPE
			  && TYPE_SIZE (type) == NULL_TREE
			  && TYPE_DOMAIN (type) != NULL_TREE
			  && (TYPE_MAX_VALUE (TYPE_DOMAIN (type))
			      == NULL_TREE))
			{
			  static bool warned;

			  if (!warned && warn_psabi)
			    {
			      warned = true;
			      inform (input_location,
				      "the ABI of passing struct with"
				      " a flexible array member has"
				      " changed in GCC 4.4");
			    }
			  continue;
			}
		      num = classify_argument (TYPE_MODE (type), type,
					       subclasses,
					       (int_bit_position (field)
						+ bit_offset) % 512);
		      if (!num)
			return 0;
		      pos = (int_bit_position (field)
			     + (bit_offset % 64)) / 8 / 8;
		      for (i = 0; i < num && (i + pos) < words; i++)
			classes[i + pos] =
			  merge_classes (subclasses[i], classes[i + pos]);
		    }
		}
	    }
	  break;

	case ARRAY_TYPE:
	  /* Arrays are handled as small records.  */
	  {
	    int num;
	    num = classify_argument (TYPE_MODE (TREE_TYPE (type)),
				     TREE_TYPE (type), subclasses, bit_offset);
	    if (!num)
	      return 0;

	    /* The partial classes are now full classes.  */
	    if (subclasses[0] == X86_64_SSESF_CLASS && bytes != 4)
	      subclasses[0] = X86_64_SSE_CLASS;
	    if (subclasses[0] == X86_64_INTEGERSI_CLASS
		&& !((bit_offset % 64) == 0 && bytes == 4))
	      subclasses[0] = X86_64_INTEGER_CLASS;

	    for (i = 0; i < words; i++)
	      classes[i] = subclasses[i % num];

	    break;
	  }
	case UNION_TYPE:
	case QUAL_UNION_TYPE:
	  /* Unions are similar to RECORD_TYPE but offset is always 0.
	     */
	  for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
	    {
	      if (TREE_CODE (field) == FIELD_DECL)
		{
		  int num;

		  if (TREE_TYPE (field) == error_mark_node)
		    continue;

		  num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
					   TREE_TYPE (field), subclasses,
					   bit_offset);
		  if (!num)
		    return 0;
		  for (i = 0; i < num && i < words; i++)
		    classes[i] = merge_classes (subclasses[i], classes[i]);
		}
	    }
	  break;

	default:
	  gcc_unreachable ();
	}

      if (words > 2)
	{
	  /* When size > 16 bytes, if the first one isn't
	     X86_64_SSE_CLASS or any other ones aren't
	     X86_64_SSEUP_CLASS, everything should be passed in
	     memory.  */
	  if (classes[0] != X86_64_SSE_CLASS)
	      return 0;

	  for (i = 1; i < words; i++)
	    if (classes[i] != X86_64_SSEUP_CLASS)
	      return 0;
	}

      /* Final merger cleanup.  */
      for (i = 0; i < words; i++)
	{
	  /* If one class is MEMORY, everything should be passed in
	     memory.  */
	  if (classes[i] == X86_64_MEMORY_CLASS)
	    return 0;

	  /* The X86_64_SSEUP_CLASS should be always preceded by
	     X86_64_SSE_CLASS or X86_64_SSEUP_CLASS.  */
	  if (classes[i] == X86_64_SSEUP_CLASS
	      && classes[i - 1] != X86_64_SSE_CLASS
	      && classes[i - 1] != X86_64_SSEUP_CLASS)
	    {
	      /* The first one should never be X86_64_SSEUP_CLASS.  */
	      gcc_assert (i != 0);
	      classes[i] = X86_64_SSE_CLASS;
	    }

	  /*  If X86_64_X87UP_CLASS isn't preceded by X86_64_X87_CLASS,
	       everything should be passed in memory.  */
	  if (classes[i] == X86_64_X87UP_CLASS
	      && (classes[i - 1] != X86_64_X87_CLASS))
	    {
	      static bool warned;

	      /* The first one should never be X86_64_X87UP_CLASS.  */
	      gcc_assert (i != 0);
	      if (!warned && warn_psabi)
		{
		  warned = true;
		  inform (input_location,
			  "the ABI of passing union with long double"
			  " has changed in GCC 4.4");
		}
	      return 0;
	    }
	}
      return words;
    }

  /* Compute alignment needed.  We align all types to natural boundaries with
     exception of XFmode that is aligned to 64bits.  */
  if (mode != VOIDmode && mode != BLKmode)
    {
      int mode_alignment = GET_MODE_BITSIZE (mode);

      if (mode == XFmode)
	mode_alignment = 128;
      else if (mode == XCmode)
	mode_alignment = 256;
      if (COMPLEX_MODE_P (mode))
	mode_alignment /= 2;
      /* Misaligned fields are always returned in memory.  */
      if (bit_offset % mode_alignment)
	return 0;
    }

  /* for V1xx modes, just use the base mode */
  if (VECTOR_MODE_P (mode) && mode != V1DImode && mode != V1TImode
      && GET_MODE_UNIT_SIZE (mode) == bytes)
    mode = GET_MODE_INNER (mode);

  /* Classification of atomic types.  */
  switch (mode)
    {
    case E_SDmode:
    case E_DDmode:
      classes[0] = X86_64_SSE_CLASS;
      return 1;
    case E_TDmode:
      classes[0] = X86_64_SSE_CLASS;
      classes[1] = X86_64_SSEUP_CLASS;
      return 2;
    case E_DImode:
    case E_SImode:
    case E_HImode:
    case E_QImode:
    case E_CSImode:
    case E_CHImode:
    case E_CQImode:
      {
	int size = bit_offset + (int) GET_MODE_BITSIZE (mode);

	/* Analyze last 128 bits only.  */
	size = (size - 1) & 0x7f;

	if (size < 32)
	  {
	    classes[0] = X86_64_INTEGERSI_CLASS;
	    return 1;
	  }
	else if (size < 64)
	  {
	    classes[0] = X86_64_INTEGER_CLASS;
	    return 1;
	  }
	else if (size < 64+32)
	  {
	    classes[0] = X86_64_INTEGER_CLASS;
	    classes[1] = X86_64_INTEGERSI_CLASS;
	    return 2;
	  }
	else if (size < 64+64)
	  {
	    classes[0] = classes[1] = X86_64_INTEGER_CLASS;
	    return 2;
	  }
	else
	  gcc_unreachable ();
      }
    case E_CDImode:
    case E_TImode:
      classes[0] = classes[1] = X86_64_INTEGER_CLASS;
      return 2;
    case E_COImode:
    case E_OImode:
      /* OImode shouldn't be used directly.  */
      gcc_unreachable ();
    case E_CTImode:
      return 0;
    case E_SFmode:
      if (!(bit_offset % 64))
	classes[0] = X86_64_SSESF_CLASS;
      else
	classes[0] = X86_64_SSE_CLASS;
      return 1;
    case E_DFmode:
      classes[0] = X86_64_SSEDF_CLASS;
      return 1;
    case E_XFmode:
      classes[0] = X86_64_X87_CLASS;
      classes[1] = X86_64_X87UP_CLASS;
      return 2;
    case E_TFmode:
      classes[0] = X86_64_SSE_CLASS;
      classes[1] = X86_64_SSEUP_CLASS;
      return 2;
    case E_SCmode:
      classes[0] = X86_64_SSE_CLASS;
      if (!(bit_offset % 64))
	return 1;
      else
	{
	  static bool warned;

	  if (!warned && warn_psabi)
	    {
	      warned = true;
	      inform (input_location,
		      "the ABI of passing structure with complex float"
		      " member has changed in GCC 4.4");
	    }
	  classes[1] = X86_64_SSESF_CLASS;
	  return 2;
	}
    case E_DCmode:
      classes[0] = X86_64_SSEDF_CLASS;
      classes[1] = X86_64_SSEDF_CLASS;
      return 2;
    case E_XCmode:
      classes[0] = X86_64_COMPLEX_X87_CLASS;
      return 1;
    case E_TCmode:
      /* This modes is larger than 16 bytes.  */
      return 0;
    case E_V8SFmode:
    case E_V8SImode:
    case E_V32QImode:
    case E_V16HImode:
    case E_V4DFmode:
    case E_V4DImode:
      classes[0] = X86_64_SSE_CLASS;
      classes[1] = X86_64_SSEUP_CLASS;
      classes[2] = X86_64_SSEUP_CLASS;
      classes[3] = X86_64_SSEUP_CLASS;
      return 4;
    case E_V8DFmode:
    case E_V16SFmode:
    case E_V8DImode:
    case E_V16SImode:
    case E_V32HImode:
    case E_V64QImode:
      classes[0] = X86_64_SSE_CLASS;
      classes[1] = X86_64_SSEUP_CLASS;
      classes[2] = X86_64_SSEUP_CLASS;
      classes[3] = X86_64_SSEUP_CLASS;
      classes[4] = X86_64_SSEUP_CLASS;
      classes[5] = X86_64_SSEUP_CLASS;
      classes[6] = X86_64_SSEUP_CLASS;
      classes[7] = X86_64_SSEUP_CLASS;
      return 8;
    case E_V4SFmode:
    case E_V4SImode:
    case E_V16QImode:
    case E_V8HImode:
    case E_V2DFmode:
    case E_V2DImode:
      classes[0] = X86_64_SSE_CLASS;
      classes[1] = X86_64_SSEUP_CLASS;
      return 2;
    case E_V1TImode:
    case E_V1DImode:
    case E_V2SFmode:
    case E_V2SImode:
    case E_V4HImode:
    case E_V8QImode:
      classes[0] = X86_64_SSE_CLASS;
      return 1;
    case E_BLKmode:
    case E_VOIDmode:
      return 0;
    default:
      gcc_assert (VECTOR_MODE_P (mode));

      if (bytes > 16)
	return 0;

      gcc_assert (GET_MODE_CLASS (GET_MODE_INNER (mode)) == MODE_INT);

      if (bit_offset + GET_MODE_BITSIZE (mode) <= 32)
	classes[0] = X86_64_INTEGERSI_CLASS;
      else
	classes[0] = X86_64_INTEGER_CLASS;
      classes[1] = X86_64_INTEGER_CLASS;
      return 1 + (bytes > 8);
    }
}

/* Examine the argument and return set number of register required in each
   class.  Return true iff parameter should be passed in memory.  */

static bool
examine_argument (machine_mode mode, const_tree type, int in_return,
		  int *int_nregs, int *sse_nregs)
{
  enum x86_64_reg_class regclass[MAX_CLASSES];
  int n = classify_argument (mode, type, regclass, 0);

  *int_nregs = 0;
  *sse_nregs = 0;

  if (!n)
    return true;
  for (n--; n >= 0; n--)
    switch (regclass[n])
      {
      case X86_64_INTEGER_CLASS:
      case X86_64_INTEGERSI_CLASS:
	(*int_nregs)++;
	break;
      case X86_64_SSE_CLASS:
      case X86_64_SSESF_CLASS:
      case X86_64_SSEDF_CLASS:
	(*sse_nregs)++;
	break;
      case X86_64_NO_CLASS:
      case X86_64_SSEUP_CLASS:
	break;
      case X86_64_X87_CLASS:
      case X86_64_X87UP_CLASS:
      case X86_64_COMPLEX_X87_CLASS:
	if (!in_return)
	  return true;
	break;
      case X86_64_MEMORY_CLASS:
	gcc_unreachable ();
      }

  return false;
}

/* Construct container for the argument used by GCC interface.  See
   FUNCTION_ARG for the detailed description.  */

static rtx
construct_container (machine_mode mode, machine_mode orig_mode,
		     const_tree type, int in_return, int nintregs, int nsseregs,
		     const int *intreg, int sse_regno)
{
  /* The following variables hold the static issued_error state.  */
  static bool issued_sse_arg_error;
  static bool issued_sse_ret_error;
  static bool issued_x87_ret_error;

  machine_mode tmpmode;
  int bytes =
    (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
  enum x86_64_reg_class regclass[MAX_CLASSES];
  int n;
  int i;
  int nexps = 0;
  int needed_sseregs, needed_intregs;
  rtx exp[MAX_CLASSES];
  rtx ret;

  n = classify_argument (mode, type, regclass, 0);
  if (!n)
    return NULL;
  if (examine_argument (mode, type, in_return, &needed_intregs,
			&needed_sseregs))
    return NULL;
  if (needed_intregs > nintregs || needed_sseregs > nsseregs)
    return NULL;

  /* We allowed the user to turn off SSE for kernel mode.  Don't crash if
     some less clueful developer tries to use floating-point anyway.  */
  if (needed_sseregs && !TARGET_SSE)
    {
      if (in_return)
	{
	  if (!issued_sse_ret_error)
	    {
	      error ("SSE register return with SSE disabled");
	      issued_sse_ret_error = true;
	    }
	}
      else if (!issued_sse_arg_error)
	{
	  error ("SSE register argument with SSE disabled");
	  issued_sse_arg_error = true;
	}
      return NULL;
    }

  /* Likewise, error if the ABI requires us to return values in the
     x87 registers and the user specified -mno-80387.  */
  if (!TARGET_FLOAT_RETURNS_IN_80387 && in_return)
    for (i = 0; i < n; i++)
      if (regclass[i] == X86_64_X87_CLASS
	  || regclass[i] == X86_64_X87UP_CLASS
	  || regclass[i] == X86_64_COMPLEX_X87_CLASS)
	{
	  if (!issued_x87_ret_error)
	    {
	      error ("x87 register return with x87 disabled");
	      issued_x87_ret_error = true;
	    }
	  return NULL;
	}

  /* First construct simple cases.  Avoid SCmode, since we want to use
     single register to pass this type.  */
  if (n == 1 && mode != SCmode)
    switch (regclass[0])
      {
      case X86_64_INTEGER_CLASS:
      case X86_64_INTEGERSI_CLASS:
	return gen_rtx_REG (mode, intreg[0]);
      case X86_64_SSE_CLASS:
      case X86_64_SSESF_CLASS:
      case X86_64_SSEDF_CLASS:
	if (mode != BLKmode)
	  return gen_reg_or_parallel (mode, orig_mode,
				      GET_SSE_REGNO (sse_regno));
	break;
      case X86_64_X87_CLASS:
      case X86_64_COMPLEX_X87_CLASS:
	return gen_rtx_REG (mode, FIRST_STACK_REG);
      case X86_64_NO_CLASS:
	/* Zero sized array, struct or class.  */
	return NULL;
      default:
	gcc_unreachable ();
      }
  if (n == 2
      && regclass[0] == X86_64_SSE_CLASS
      && regclass[1] == X86_64_SSEUP_CLASS
      && mode != BLKmode)
    return gen_reg_or_parallel (mode, orig_mode,
				GET_SSE_REGNO (sse_regno));
  if (n == 4
      && regclass[0] == X86_64_SSE_CLASS
      && regclass[1] == X86_64_SSEUP_CLASS
      && regclass[2] == X86_64_SSEUP_CLASS
      && regclass[3] == X86_64_SSEUP_CLASS
      && mode != BLKmode)
    return gen_reg_or_parallel (mode, orig_mode,
				GET_SSE_REGNO (sse_regno));
  if (n == 8
      && regclass[0] == X86_64_SSE_CLASS
      && regclass[1] == X86_64_SSEUP_CLASS
      && regclass[2] == X86_64_SSEUP_CLASS
      && regclass[3] == X86_64_SSEUP_CLASS
      && regclass[4] == X86_64_SSEUP_CLASS
      && regclass[5] == X86_64_SSEUP_CLASS
      && regclass[6] == X86_64_SSEUP_CLASS
      && regclass[7] == X86_64_SSEUP_CLASS
      && mode != BLKmode)
    return gen_reg_or_parallel (mode, orig_mode,
				GET_SSE_REGNO (sse_regno));
  if (n == 2
      && regclass[0] == X86_64_X87_CLASS
      && regclass[1] == X86_64_X87UP_CLASS)
    return gen_rtx_REG (XFmode, FIRST_STACK_REG);

  if (n == 2
      && regclass[0] == X86_64_INTEGER_CLASS
      && regclass[1] == X86_64_INTEGER_CLASS
      && (mode == CDImode || mode == TImode || mode == BLKmode)
      && intreg[0] + 1 == intreg[1])
    {
      if (mode == BLKmode)
	{
	  /* Use TImode for BLKmode values in 2 integer registers.  */
	  exp[0] = gen_rtx_EXPR_LIST (VOIDmode,
				      gen_rtx_REG (TImode, intreg[0]),
				      GEN_INT (0));
	  ret = gen_rtx_PARALLEL (mode, rtvec_alloc (1));
	  XVECEXP (ret, 0, 0) = exp[0];
	  return ret;
	}
      else
	return gen_rtx_REG (mode, intreg[0]);
    }

  /* Otherwise figure out the entries of the PARALLEL.  */
  for (i = 0; i < n; i++)
    {
      int pos;

      switch (regclass[i])
        {
	  case X86_64_NO_CLASS:
	    break;
	  case X86_64_INTEGER_CLASS:
	  case X86_64_INTEGERSI_CLASS:
	    /* Merge TImodes on aligned occasions here too.  */
	    if (i * 8 + 8 > bytes)
	      {
		unsigned int tmpbits = (bytes - i * 8) * BITS_PER_UNIT;
		if (!int_mode_for_size (tmpbits, 0).exists (&tmpmode))
		  /* We've requested 24 bytes we
		     don't have mode for.  Use DImode.  */
		  tmpmode = DImode;
	      }
	    else if (regclass[i] == X86_64_INTEGERSI_CLASS)
	      tmpmode = SImode;
	    else
	      tmpmode = DImode;
	    exp [nexps++]
	      = gen_rtx_EXPR_LIST (VOIDmode,
				   gen_rtx_REG (tmpmode, *intreg),
				   GEN_INT (i*8));
	    intreg++;
	    break;
	  case X86_64_SSESF_CLASS:
	    exp [nexps++]
	      = gen_rtx_EXPR_LIST (VOIDmode,
				   gen_rtx_REG (SFmode,
						GET_SSE_REGNO (sse_regno)),
				   GEN_INT (i*8));
	    sse_regno++;
	    break;
	  case X86_64_SSEDF_CLASS:
	    exp [nexps++]
	      = gen_rtx_EXPR_LIST (VOIDmode,
				   gen_rtx_REG (DFmode,
						GET_SSE_REGNO (sse_regno)),
				   GEN_INT (i*8));
	    sse_regno++;
	    break;
	  case X86_64_SSE_CLASS:
	    pos = i;
	    switch (n)
	      {
	      case 1:
		tmpmode = DImode;
		break;
	      case 2:
		if (i == 0 && regclass[1] == X86_64_SSEUP_CLASS)
		  {
		    tmpmode = TImode;
		    i++;
		  }
		else
		  tmpmode = DImode;
		break;
	      case 4:
		gcc_assert (i == 0
			    && regclass[1] == X86_64_SSEUP_CLASS
			    && regclass[2] == X86_64_SSEUP_CLASS
			    && regclass[3] == X86_64_SSEUP_CLASS);
		tmpmode = OImode;
		i += 3;
		break;
	      case 8:
		gcc_assert (i == 0
			    && regclass[1] == X86_64_SSEUP_CLASS
			    && regclass[2] == X86_64_SSEUP_CLASS
			    && regclass[3] == X86_64_SSEUP_CLASS
			    && regclass[4] == X86_64_SSEUP_CLASS
			    && regclass[5] == X86_64_SSEUP_CLASS
			    && regclass[6] == X86_64_SSEUP_CLASS
			    && regclass[7] == X86_64_SSEUP_CLASS);
		tmpmode = XImode;
		i += 7;
		break;
	      default:
		gcc_unreachable ();
	      }
	    exp [nexps++]
	      = gen_rtx_EXPR_LIST (VOIDmode,
				   gen_rtx_REG (tmpmode,
						GET_SSE_REGNO (sse_regno)),
				   GEN_INT (pos*8));
	    sse_regno++;
	    break;
	  default:
	    gcc_unreachable ();
	}
    }

  /* Empty aligned struct, union or class.  */
  if (nexps == 0)
    return NULL;

  ret =  gen_rtx_PARALLEL (mode, rtvec_alloc (nexps));
  for (i = 0; i < nexps; i++)
    XVECEXP (ret, 0, i) = exp [i];
  return ret;
}

/* Update the data in CUM to advance over an argument of mode MODE
   and data type TYPE.  (TYPE is null for libcalls where that information
   may not be available.)

   Return a number of integer regsiters advanced over.  */

static int
function_arg_advance_32 (CUMULATIVE_ARGS *cum, machine_mode mode,
			 const_tree type, HOST_WIDE_INT bytes,
			 HOST_WIDE_INT words)
{
  int res = 0;
  bool error_p = false;

  if (TARGET_IAMCU)
    {
      /* Intel MCU psABI passes scalars and aggregates no larger than 8
	 bytes in registers.  */
      if (!VECTOR_MODE_P (mode) && bytes <= 8)
	goto pass_in_reg;
      return res;
    }

  switch (mode)
    {
    default:
      break;

    case E_BLKmode:
      if (bytes < 0)
	break;
      /* FALLTHRU */

    case E_DImode:
    case E_SImode:
    case E_HImode:
    case E_QImode:
pass_in_reg:
      cum->words += words;
      cum->nregs -= words;
      cum->regno += words;
      if (cum->nregs >= 0)
	res = words;
      if (cum->nregs <= 0)
	{
	  cum->nregs = 0;
	  cfun->machine->arg_reg_available = false;
	  cum->regno = 0;
	}
      break;

    case E_OImode:
      /* OImode shouldn't be used directly.  */
      gcc_unreachable ();

    case E_DFmode:
      if (cum->float_in_sse == -1)
	error_p = true;
      if (cum->float_in_sse < 2)
	break;
      /* FALLTHRU */
    case E_SFmode:
      if (cum->float_in_sse == -1)
	error_p = true;
      if (cum->float_in_sse < 1)
	break;
      /* FALLTHRU */

    case E_V8SFmode:
    case E_V8SImode:
    case E_V64QImode:
    case E_V32HImode:
    case E_V16SImode:
    case E_V8DImode:
    case E_V16SFmode:
    case E_V8DFmode:
    case E_V32QImode:
    case E_V16HImode:
    case E_V4DFmode:
    case E_V4DImode:
    case E_TImode:
    case E_V16QImode:
    case E_V8HImode:
    case E_V4SImode:
    case E_V2DImode:
    case E_V4SFmode:
    case E_V2DFmode:
      if (!type || !AGGREGATE_TYPE_P (type))
	{
	  cum->sse_words += words;
	  cum->sse_nregs -= 1;
	  cum->sse_regno += 1;
	  if (cum->sse_nregs <= 0)
	    {
	      cum->sse_nregs = 0;
	      cum->sse_regno = 0;
	    }
	}
      break;

    case E_V8QImode:
    case E_V4HImode:
    case E_V2SImode:
    case E_V2SFmode:
    case E_V1TImode:
    case E_V1DImode:
      if (!type || !AGGREGATE_TYPE_P (type))
	{
	  cum->mmx_words += words;
	  cum->mmx_nregs -= 1;
	  cum->mmx_regno += 1;
	  if (cum->mmx_nregs <= 0)
	    {
	      cum->mmx_nregs = 0;
	      cum->mmx_regno = 0;
	    }
	}
      break;
    }
  if (error_p)
    {
      cum->float_in_sse = 0;
      error ("calling %qD with SSE calling convention without "
	     "SSE/SSE2 enabled", cum->decl);
      sorry ("this is a GCC bug that can be worked around by adding "
	     "attribute used to function called");
    }

  return res;
}

static int
function_arg_advance_64 (CUMULATIVE_ARGS *cum, machine_mode mode,
			 const_tree type, HOST_WIDE_INT words, bool named)
{
  int int_nregs, sse_nregs;

  /* Unnamed 512 and 256bit vector mode parameters are passed on stack.  */
  if (!named && (VALID_AVX512F_REG_MODE (mode)
		 || VALID_AVX256_REG_MODE (mode)))
    return 0;

  if (!examine_argument (mode, type, 0, &int_nregs, &sse_nregs)
      && sse_nregs <= cum->sse_nregs && int_nregs <= cum->nregs)
    {
      cum->nregs -= int_nregs;
      cum->sse_nregs -= sse_nregs;
      cum->regno += int_nregs;
      cum->sse_regno += sse_nregs;
      return int_nregs;
    }
  else
    {
      int align = ix86_function_arg_boundary (mode, type) / BITS_PER_WORD;
      cum->words = ROUND_UP (cum->words, align);
      cum->words += words;
      return 0;
    }
}

static int
function_arg_advance_ms_64 (CUMULATIVE_ARGS *cum, HOST_WIDE_INT bytes,
			    HOST_WIDE_INT words)
{
  /* Otherwise, this should be passed indirect.  */
  gcc_assert (bytes == 1 || bytes == 2 || bytes == 4 || bytes == 8);

  cum->words += words;
  if (cum->nregs > 0)
    {
      cum->nregs -= 1;
      cum->regno += 1;
      return 1;
    }
  return 0;
}

/* Update the data in CUM to advance over an argument of mode MODE and
   data type TYPE.  (TYPE is null for libcalls where that information
   may not be available.)  */

static void
ix86_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
			   const_tree type, bool named)
{
  CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
  HOST_WIDE_INT bytes, words;
  int nregs;

  /* The argument of interrupt handler is a special case and is
     handled in ix86_function_arg.  */
  if (!cum->caller && cfun->machine->func_type != TYPE_NORMAL)
    return;

  if (mode == BLKmode)
    bytes = int_size_in_bytes (type);
  else
    bytes = GET_MODE_SIZE (mode);
  words = CEIL (bytes, UNITS_PER_WORD);

  if (type)
    mode = type_natural_mode (type, NULL, false);

  if (TARGET_64BIT)
    {
      enum calling_abi call_abi = cum ? cum->call_abi : ix86_abi;

      if (call_abi == MS_ABI)
	nregs = function_arg_advance_ms_64 (cum, bytes, words);
      else
	nregs = function_arg_advance_64 (cum, mode, type, words, named);
    }
  else
    nregs = function_arg_advance_32 (cum, mode, type, bytes, words);

  /* For pointers passed in memory we expect bounds passed in Bounds
     Table.  */
  if (!nregs)
    {
      /* Track if there are outgoing arguments on stack.  */
      if (cum->caller)
	cfun->machine->outgoing_args_on_stack = true;
    }
}

/* Define where to put the arguments to a function.
   Value is zero to push the argument on the stack,
   or a hard register in which to store the argument.

   MODE is the argument's machine mode.
   TYPE is the data type of the argument (as a tree).
    This is null for libcalls where that information may
    not be available.
   CUM is a variable of type CUMULATIVE_ARGS which gives info about
    the preceding args and about the function being called.
   NAMED is nonzero if this argument is a named parameter
    (otherwise it is an extra parameter matching an ellipsis).  */

static rtx
function_arg_32 (CUMULATIVE_ARGS *cum, machine_mode mode,
		 machine_mode orig_mode, const_tree type,
		 HOST_WIDE_INT bytes, HOST_WIDE_INT words)
{
  bool error_p = false;

  /* Avoid the AL settings for the Unix64 ABI.  */
  if (mode == VOIDmode)
    return constm1_rtx;

  if (TARGET_IAMCU)
    {
      /* Intel MCU psABI passes scalars and aggregates no larger than 8
	 bytes in registers.  */
      if (!VECTOR_MODE_P (mode) && bytes <= 8)
	goto pass_in_reg;
      return NULL_RTX;
    }

  switch (mode)
    {
    default:
      break;

    case E_BLKmode:
      if (bytes < 0)
	break;
      /* FALLTHRU */
    case E_DImode:
    case E_SImode:
    case E_HImode:
    case E_QImode:
pass_in_reg:
      if (words <= cum->nregs)
	{
	  int regno = cum->regno;

	  /* Fastcall allocates the first two DWORD (SImode) or
            smaller arguments to ECX and EDX if it isn't an
            aggregate type .  */
	  if (cum->fastcall)
	    {
	      if (mode == BLKmode
		  || mode == DImode
		  || (type && AGGREGATE_TYPE_P (type)))
	        break;

	      /* ECX not EAX is the first allocated register.  */
	      if (regno == AX_REG)
		regno = CX_REG;
	    }
	  return gen_rtx_REG (mode, regno);
	}
      break;

    case E_DFmode:
      if (cum->float_in_sse == -1)
	error_p = true;
      if (cum->float_in_sse < 2)
	break;
      /* FALLTHRU */
    case E_SFmode:
      if (cum->float_in_sse == -1)
	error_p = true;
      if (cum->float_in_sse < 1)
	break;
      /* FALLTHRU */
    case E_TImode:
      /* In 32bit, we pass TImode in xmm registers.  */
    case E_V16QImode:
    case E_V8HImode:
    case E_V4SImode:
    case E_V2DImode:
    case E_V4SFmode:
    case E_V2DFmode:
      if (!type || !AGGREGATE_TYPE_P (type))
	{
	  if (cum->sse_nregs)
	    return gen_reg_or_parallel (mode, orig_mode,
				        cum->sse_regno + FIRST_SSE_REG);
	}
      break;

    case E_OImode:
    case E_XImode:
      /* OImode and XImode shouldn't be used directly.  */
      gcc_unreachable ();

    case E_V64QImode:
    case E_V32HImode:
    case E_V16SImode:
    case E_V8DImode:
    case E_V16SFmode:
    case E_V8DFmode:
    case E_V8SFmode:
    case E_V8SImode:
    case E_V32QImode:
    case E_V16HImode:
    case E_V4DFmode:
    case E_V4DImode:
      if (!type || !AGGREGATE_TYPE_P (type))
	{
	  if (cum->sse_nregs)
	    return gen_reg_or_parallel (mode, orig_mode,
				        cum->sse_regno + FIRST_SSE_REG);
	}
      break;

    case E_V8QImode:
    case E_V4HImode:
    case E_V2SImode:
    case E_V2SFmode:
    case E_V1TImode:
    case E_V1DImode:
      if (!type || !AGGREGATE_TYPE_P (type))
	{
	  if (cum->mmx_nregs)
	    return gen_reg_or_parallel (mode, orig_mode,
				        cum->mmx_regno + FIRST_MMX_REG);
	}
      break;
    }
  if (error_p)
    {
      cum->float_in_sse = 0;
      error ("calling %qD with SSE calling convention without "
	     "SSE/SSE2 enabled", cum->decl);
      sorry ("this is a GCC bug that can be worked around by adding "
	     "attribute used to function called");
    }

  return NULL_RTX;
}

static rtx
function_arg_64 (const CUMULATIVE_ARGS *cum, machine_mode mode,
		 machine_mode orig_mode, const_tree type, bool named)
{
  /* Handle a hidden AL argument containing number of registers
     for varargs x86-64 functions.  */
  if (mode == VOIDmode)
    return GEN_INT (cum->maybe_vaarg
		    ? (cum->sse_nregs < 0
		       ? X86_64_SSE_REGPARM_MAX
		       : cum->sse_regno)
		    : -1);

  switch (mode)
    {
    default:
      break;

    case E_V8SFmode:
    case E_V8SImode:
    case E_V32QImode:
    case E_V16HImode:
    case E_V4DFmode:
    case E_V4DImode:
    case E_V16SFmode:
    case E_V16SImode:
    case E_V64QImode:
    case E_V32HImode:
    case E_V8DFmode:
    case E_V8DImode:
      /* Unnamed 256 and 512bit vector mode parameters are passed on stack.  */
      if (!named)
	return NULL;
      break;
    }

  return construct_container (mode, orig_mode, type, 0, cum->nregs,
			      cum->sse_nregs,
			      &x86_64_int_parameter_registers [cum->regno],
			      cum->sse_regno);
}

static rtx
function_arg_ms_64 (const CUMULATIVE_ARGS *cum, machine_mode mode,
		    machine_mode orig_mode, bool named,
		    HOST_WIDE_INT bytes)
{
  unsigned int regno;

  /* We need to add clobber for MS_ABI->SYSV ABI calls in expand_call.
     We use value of -2 to specify that current function call is MSABI.  */
  if (mode == VOIDmode)
    return GEN_INT (-2);

  /* If we've run out of registers, it goes on the stack.  */
  if (cum->nregs == 0)
    return NULL_RTX;

  regno = x86_64_ms_abi_int_parameter_registers[cum->regno];

  /* Only floating point modes are passed in anything but integer regs.  */
  if (TARGET_SSE && (mode == SFmode || mode == DFmode))
    {
      if (named)
	regno = cum->regno + FIRST_SSE_REG;
      else
	{
	  rtx t1, t2;

	  /* Unnamed floating parameters are passed in both the
	     SSE and integer registers.  */
	  t1 = gen_rtx_REG (mode, cum->regno + FIRST_SSE_REG);
	  t2 = gen_rtx_REG (mode, regno);
	  t1 = gen_rtx_EXPR_LIST (VOIDmode, t1, const0_rtx);
	  t2 = gen_rtx_EXPR_LIST (VOIDmode, t2, const0_rtx);
	  return gen_rtx_PARALLEL (mode, gen_rtvec (2, t1, t2));
	}
    }
  /* Handle aggregated types passed in register.  */
  if (orig_mode == BLKmode)
    {
      if (bytes > 0 && bytes <= 8)
        mode = (bytes > 4 ? DImode : SImode);
      if (mode == BLKmode)
        mode = DImode;
    }

  return gen_reg_or_parallel (mode, orig_mode, regno);
}

/* Return where to put the arguments to a function.
   Return zero to push the argument on the stack, or a hard register in which to store the argument.

   MODE is the argument's machine mode.  TYPE is the data type of the
   argument.  It is null for libcalls where that information may not be
   available.  CUM gives information about the preceding args and about
   the function being called.  NAMED is nonzero if this argument is a
   named parameter (otherwise it is an extra parameter matching an
   ellipsis).  */

static rtx
ix86_function_arg (cumulative_args_t cum_v, machine_mode omode,
		   const_tree type, bool named)
{
  CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
  machine_mode mode = omode;
  HOST_WIDE_INT bytes, words;
  rtx arg;

  if (!cum->caller && cfun->machine->func_type != TYPE_NORMAL)
    {
      gcc_assert (type != NULL_TREE);
      if (POINTER_TYPE_P (type))
	{
	  /* This is the pointer argument.  */
	  gcc_assert (TYPE_MODE (type) == Pmode);
	  /* It is at -WORD(AP) in the current frame in interrupt and
	     exception handlers.  */
	  arg = plus_constant (Pmode, arg_pointer_rtx, -UNITS_PER_WORD);
	}
      else
	{
	  gcc_assert (cfun->machine->func_type == TYPE_EXCEPTION
		      && TREE_CODE (type) == INTEGER_TYPE
		      && TYPE_MODE (type) == word_mode);
	  /* The error code is the word-mode integer argument at
	     -2 * WORD(AP) in the current frame of the exception
	     handler.  */
	  arg = gen_rtx_MEM (word_mode,
			     plus_constant (Pmode,
					    arg_pointer_rtx,
					    -2 * UNITS_PER_WORD));
	}
      return arg;
    }

  if (mode == BLKmode)
    bytes = int_size_in_bytes (type);
  else
    bytes = GET_MODE_SIZE (mode);
  words = CEIL (bytes, UNITS_PER_WORD);

  /* To simplify the code below, represent vector types with a vector mode
     even if MMX/SSE are not active.  */
  if (type && TREE_CODE (type) == VECTOR_TYPE)
    mode = type_natural_mode (type, cum, false);

  if (TARGET_64BIT)
    {
      enum calling_abi call_abi = cum ? cum->call_abi : ix86_abi;

      if (call_abi == MS_ABI)
	arg = function_arg_ms_64 (cum, mode, omode, named, bytes);
      else
	arg = function_arg_64 (cum, mode, omode, type, named);
    }
  else
    arg = function_arg_32 (cum, mode, omode, type, bytes, words);

  /* Track if there are outgoing arguments on stack.  */
  if (arg == NULL_RTX && cum->caller)
    cfun->machine->outgoing_args_on_stack = true;

  return arg;
}

/* A C expression that indicates when an argument must be passed by
   reference.  If nonzero for an argument, a copy of that argument is
   made in memory and a pointer to the argument is passed instead of
   the argument itself.  The pointer is passed in whatever way is
   appropriate for passing a pointer to that type.  */

static bool
ix86_pass_by_reference (cumulative_args_t cum_v, machine_mode mode,
			const_tree type, bool)
{
  CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);

  if (TARGET_64BIT)
    {
      enum calling_abi call_abi = cum ? cum->call_abi : ix86_abi;

      /* See Windows x64 Software Convention.  */
      if (call_abi == MS_ABI)
	{
	  HOST_WIDE_INT msize = GET_MODE_SIZE (mode);

	  if (type)
	    {
	      /* Arrays are passed by reference.  */
	      if (TREE_CODE (type) == ARRAY_TYPE)
		return true;

	      if (RECORD_OR_UNION_TYPE_P (type))
		{
		  /* Structs/unions of sizes other than 8, 16, 32, or 64 bits
		     are passed by reference.  */
		  msize = int_size_in_bytes (type);
		}
	    }

	  /* __m128 is passed by reference.  */
	  return msize != 1 && msize != 2 && msize != 4 && msize != 8;
	}
      else if (type && int_size_in_bytes (type) == -1)
	return true;
    }

  return false;
}

/* Return true when TYPE should be 128bit aligned for 32bit argument
   passing ABI.  XXX: This function is obsolete and is only used for
   checking psABI compatibility with previous versions of GCC.  */

static bool
ix86_compat_aligned_value_p (const_tree type)
{
  machine_mode mode = TYPE_MODE (type);
  if (((TARGET_SSE && SSE_REG_MODE_P (mode))
       || mode == TDmode
       || mode == TFmode
       || mode == TCmode)
      && (!TYPE_USER_ALIGN (type) || TYPE_ALIGN (type) > 128))
    return true;
  if (TYPE_ALIGN (type) < 128)
    return false;

  if (AGGREGATE_TYPE_P (type))
    {
      /* Walk the aggregates recursively.  */
      switch (TREE_CODE (type))
	{
	case RECORD_TYPE:
	case UNION_TYPE:
	case QUAL_UNION_TYPE:
	  {
	    tree field;

	    /* Walk all the structure fields.  */
	    for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
	      {
		if (TREE_CODE (field) == FIELD_DECL
		    && ix86_compat_aligned_value_p (TREE_TYPE (field)))
		  return true;
	      }
	    break;
	  }

	case ARRAY_TYPE:
	  /* Just for use if some languages passes arrays by value.  */
	  if (ix86_compat_aligned_value_p (TREE_TYPE (type)))
	    return true;
	  break;

	default:
	  gcc_unreachable ();
	}
    }
  return false;
}

/* Return the alignment boundary for MODE and TYPE with alignment ALIGN.
   XXX: This function is obsolete and is only used for checking psABI
   compatibility with previous versions of GCC.  */

static unsigned int
ix86_compat_function_arg_boundary (machine_mode mode,
				   const_tree type, unsigned int align)
{
  /* In 32bit, only _Decimal128 and __float128 are aligned to their
     natural boundaries.  */
  if (!TARGET_64BIT && mode != TDmode && mode != TFmode)
    {
      /* i386 ABI defines all arguments to be 4 byte aligned.  We have to
	 make an exception for SSE modes since these require 128bit
	 alignment.

	 The handling here differs from field_alignment.  ICC aligns MMX
	 arguments to 4 byte boundaries, while structure fields are aligned
	 to 8 byte boundaries.  */
      if (!type)
	{
	  if (!(TARGET_SSE && SSE_REG_MODE_P (mode)))
	    align = PARM_BOUNDARY;
	}
      else
	{
	  if (!ix86_compat_aligned_value_p (type))
	    align = PARM_BOUNDARY;
	}
    }
  if (align > BIGGEST_ALIGNMENT)
    align = BIGGEST_ALIGNMENT;
  return align;
}

/* Return true when TYPE should be 128bit aligned for 32bit argument
   passing ABI.  */

static bool
ix86_contains_aligned_value_p (const_tree type)
{
  machine_mode mode = TYPE_MODE (type);

  if (mode == XFmode || mode == XCmode)
    return false;

  if (TYPE_ALIGN (type) < 128)
    return false;

  if (AGGREGATE_TYPE_P (type))
    {
      /* Walk the aggregates recursively.  */
      switch (TREE_CODE (type))
	{
	case RECORD_TYPE:
	case UNION_TYPE:
	case QUAL_UNION_TYPE:
	  {
	    tree field;

	    /* Walk all the structure fields.  */
	    for (field = TYPE_FIELDS (type);
		 field;
		 field = DECL_CHAIN (field))
	      {
		if (TREE_CODE (field) == FIELD_DECL
		    && ix86_contains_aligned_value_p (TREE_TYPE (field)))
		  return true;
	      }
	    break;
	  }

	case ARRAY_TYPE:
	  /* Just for use if some languages passes arrays by value.  */
	  if (ix86_contains_aligned_value_p (TREE_TYPE (type)))
	    return true;
	  break;

	default:
	  gcc_unreachable ();
	}
    }
  else
    return TYPE_ALIGN (type) >= 128;

  return false;
}

/* Gives the alignment boundary, in bits, of an argument with the
   specified mode and type.  */

static unsigned int
ix86_function_arg_boundary (machine_mode mode, const_tree type)
{
  unsigned int align;
  if (type)
    {
      /* Since the main variant type is used for call, we convert it to
	 the main variant type.  */
      type = TYPE_MAIN_VARIANT (type);
      align = TYPE_ALIGN (type);
      if (TYPE_EMPTY_P (type))
	return PARM_BOUNDARY;
    }
  else
    align = GET_MODE_ALIGNMENT (mode);
  if (align < PARM_BOUNDARY)
    align = PARM_BOUNDARY;
  else
    {
      static bool warned;
      unsigned int saved_align = align;

      if (!TARGET_64BIT)
	{
	  /* i386 ABI defines XFmode arguments to be 4 byte aligned.  */
	  if (!type)
	    {
	      if (mode == XFmode || mode == XCmode)
		align = PARM_BOUNDARY;
	    }
	  else if (!ix86_contains_aligned_value_p (type))
	    align = PARM_BOUNDARY;

	  if (align < 128)
	    align = PARM_BOUNDARY;
	}

      if (warn_psabi
	  && !warned
	  && align != ix86_compat_function_arg_boundary (mode, type,
							 saved_align))
	{
	  warned = true;
	  inform (input_location,
		  "The ABI for passing parameters with %d-byte"
		  " alignment has changed in GCC 4.6",
		  align / BITS_PER_UNIT);
	}
    }

  return align;
}

/* Return true if N is a possible register number of function value.  */

static bool
ix86_function_value_regno_p (const unsigned int regno)
{
  switch (regno)
    {
    case AX_REG:
      return true;
    case DX_REG:
      return (!TARGET_64BIT || ix86_cfun_abi () != MS_ABI);
    case DI_REG:
    case SI_REG:
      return TARGET_64BIT && ix86_cfun_abi () != MS_ABI;

      /* Complex values are returned in %st(0)/%st(1) pair.  */
    case ST0_REG:
    case ST1_REG:
      /* TODO: The function should depend on current function ABI but
       builtins.c would need updating then. Therefore we use the
       default ABI.  */
      if (TARGET_64BIT && ix86_cfun_abi () == MS_ABI)
	return false;
      return TARGET_FLOAT_RETURNS_IN_80387;

      /* Complex values are returned in %xmm0/%xmm1 pair.  */
    case XMM0_REG:
    case XMM1_REG:
      return TARGET_SSE;

    case MM0_REG:
      if (TARGET_MACHO || TARGET_64BIT)
	return false;
      return TARGET_MMX;
    }

  return false;
}

/* Define how to find the value returned by a function.
   VALTYPE is the data type of the value (as a tree).
   If the precise function being called is known, FUNC is its FUNCTION_DECL;
   otherwise, FUNC is 0.  */

static rtx
function_value_32 (machine_mode orig_mode, machine_mode mode,
		   const_tree fntype, const_tree fn)
{
  unsigned int regno;

  /* 8-byte vector modes in %mm0. See ix86_return_in_memory for where
     we normally prevent this case when mmx is not available.  However
     some ABIs may require the result to be returned like DImode.  */
  if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
    regno = FIRST_MMX_REG;

  /* 16-byte vector modes in %xmm0.  See ix86_return_in_memory for where
     we prevent this case when sse is not available.  However some ABIs
     may require the result to be returned like integer TImode.  */
  else if (mode == TImode
	   || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
    regno = FIRST_SSE_REG;

  /* 32-byte vector modes in %ymm0.   */
  else if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 32)
    regno = FIRST_SSE_REG;

  /* 64-byte vector modes in %zmm0.   */
  else if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 64)
    regno = FIRST_SSE_REG;

  /* Floating point return values in %st(0) (unless -mno-fp-ret-in-387).  */
  else if (X87_FLOAT_MODE_P (mode) && TARGET_FLOAT_RETURNS_IN_80387)
    regno = FIRST_FLOAT_REG;
  else
    /* Most things go in %eax.  */
    regno = AX_REG;

  /* Override FP return register with %xmm0 for local functions when
     SSE math is enabled or for functions with sseregparm attribute.  */
  if ((fn || fntype) && (mode == SFmode || mode == DFmode))
    {
      int sse_level = ix86_function_sseregparm (fntype, fn, false);
      if (sse_level == -1)
	{
	  error ("calling %qD with SSE calling convention without "
		 "SSE/SSE2 enabled", fn);
	  sorry ("this is a GCC bug that can be worked around by adding "
		 "attribute used to function called");
	}
      else if ((sse_level >= 1 && mode == SFmode)
	       || (sse_level == 2 && mode == DFmode))
	regno = FIRST_SSE_REG;
    }

  /* OImode shouldn't be used directly.  */
  gcc_assert (mode != OImode);

  return gen_rtx_REG (orig_mode, regno);
}

static rtx
function_value_64 (machine_mode orig_mode, machine_mode mode,
		   const_tree valtype)
{
  rtx ret;

  /* Handle libcalls, which don't provide a type node.  */
  if (valtype == NULL)
    {
      unsigned int regno;

      switch (mode)
	{
	case E_SFmode:
	case E_SCmode:
	case E_DFmode:
	case E_DCmode:
	case E_TFmode:
	case E_SDmode:
	case E_DDmode:
	case E_TDmode:
	  regno = FIRST_SSE_REG;
	  break;
	case E_XFmode:
	case E_XCmode:
	  regno = FIRST_FLOAT_REG;
	  break;
	case E_TCmode:
	  return NULL;
	default:
	  regno = AX_REG;
	}

      return gen_rtx_REG (mode, regno);
    }
  else if (POINTER_TYPE_P (valtype))
    {
      /* Pointers are always returned in word_mode.  */
      mode = word_mode;
    }

  ret = construct_container (mode, orig_mode, valtype, 1,
			     X86_64_REGPARM_MAX, X86_64_SSE_REGPARM_MAX,
			     x86_64_int_return_registers, 0);

  /* For zero sized structures, construct_container returns NULL, but we
     need to keep rest of compiler happy by returning meaningful value.  */
  if (!ret)
    ret = gen_rtx_REG (orig_mode, AX_REG);

  return ret;
}

static rtx
function_value_ms_64 (machine_mode orig_mode, machine_mode mode,
		      const_tree valtype)
{
  unsigned int regno = AX_REG;

  if (TARGET_SSE)
    {
      switch (GET_MODE_SIZE (mode))
	{
	case 16:
	  if (valtype != NULL_TREE
	      && !VECTOR_INTEGER_TYPE_P (valtype)
	      && !VECTOR_INTEGER_TYPE_P (valtype)
	      && !INTEGRAL_TYPE_P (valtype)
	      && !VECTOR_FLOAT_TYPE_P (valtype))
	    break;
	  if ((SCALAR_INT_MODE_P (mode) || VECTOR_MODE_P (mode))
	      && !COMPLEX_MODE_P (mode))
	    regno = FIRST_SSE_REG;
	  break;
	case 8:
	case 4:
	  if (mode == SFmode || mode == DFmode)
	    regno = FIRST_SSE_REG;
	  break;
	default:
	  break;
        }
    }
  return gen_rtx_REG (orig_mode, regno);
}

static rtx
ix86_function_value_1 (const_tree valtype, const_tree fntype_or_decl,
		       machine_mode orig_mode, machine_mode mode)
{
  const_tree fn, fntype;

  fn = NULL_TREE;
  if (fntype_or_decl && DECL_P (fntype_or_decl))
    fn = fntype_or_decl;
  fntype = fn ? TREE_TYPE (fn) : fntype_or_decl;

  if (TARGET_64BIT && ix86_function_type_abi (fntype) == MS_ABI)
    return function_value_ms_64 (orig_mode, mode, valtype);
  else if (TARGET_64BIT)
    return function_value_64 (orig_mode, mode, valtype);
  else
    return function_value_32 (orig_mode, mode, fntype, fn);
}

static rtx
ix86_function_value (const_tree valtype, const_tree fntype_or_decl, bool)
{
  machine_mode mode, orig_mode;

  orig_mode = TYPE_MODE (valtype);
  mode = type_natural_mode (valtype, NULL, true);
  return ix86_function_value_1 (valtype, fntype_or_decl, orig_mode, mode);
}

/* Pointer function arguments and return values are promoted to
   word_mode for normal functions.  */

static machine_mode
ix86_promote_function_mode (const_tree type, machine_mode mode,
			    int *punsignedp, const_tree fntype,
			    int for_return)
{
  if (cfun->machine->func_type == TYPE_NORMAL
      && type != NULL_TREE
      && POINTER_TYPE_P (type))
    {
      *punsignedp = POINTERS_EXTEND_UNSIGNED;
      return word_mode;
    }
  return default_promote_function_mode (type, mode, punsignedp, fntype,
					for_return);
}

/* Return true if a structure, union or array with MODE containing FIELD
   should be accessed using BLKmode.  */

static bool
ix86_member_type_forces_blk (const_tree field, machine_mode mode)
{
  /* Union with XFmode must be in BLKmode.  */
  return (mode == XFmode
	  && (TREE_CODE (DECL_FIELD_CONTEXT (field)) == UNION_TYPE
	      || TREE_CODE (DECL_FIELD_CONTEXT (field)) == QUAL_UNION_TYPE));
}

rtx
ix86_libcall_value (machine_mode mode)
{
  return ix86_function_value_1 (NULL, NULL, mode, mode);
}

/* Return true iff type is returned in memory.  */

static bool
ix86_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
{
#ifdef SUBTARGET_RETURN_IN_MEMORY
  return SUBTARGET_RETURN_IN_MEMORY (type, fntype);
#else
  const machine_mode mode = type_natural_mode (type, NULL, true);
  HOST_WIDE_INT size;

  if (TARGET_64BIT)
    {
      if (ix86_function_type_abi (fntype) == MS_ABI)
	{
	  size = int_size_in_bytes (type);

	  /* __m128 is returned in xmm0.  */
	  if ((!type || VECTOR_INTEGER_TYPE_P (type)
	       || INTEGRAL_TYPE_P (type)
	       || VECTOR_FLOAT_TYPE_P (type))
	      && (SCALAR_INT_MODE_P (mode) || VECTOR_MODE_P (mode))
	      && !COMPLEX_MODE_P (mode)
	      && (GET_MODE_SIZE (mode) == 16 || size == 16))
	    return false;

	  /* Otherwise, the size must be exactly in [1248]. */
	  return size != 1 && size != 2 && size != 4 && size != 8;
	}
      else
	{
	  int needed_intregs, needed_sseregs;

	  return examine_argument (mode, type, 1,
				   &needed_intregs, &needed_sseregs);
	}
    }
  else
    {
      size = int_size_in_bytes (type);

      /* Intel MCU psABI returns scalars and aggregates no larger than 8
	 bytes in registers.  */
      if (TARGET_IAMCU)
	return VECTOR_MODE_P (mode) || size < 0 || size > 8;

      if (mode == BLKmode)
	return true;

      if (MS_AGGREGATE_RETURN && AGGREGATE_TYPE_P (type) && size <= 8)
	return false;

      if (VECTOR_MODE_P (mode) || mode == TImode)
	{
	  /* User-created vectors small enough to fit in EAX.  */
	  if (size < 8)
	    return false;

	  /* Unless ABI prescibes otherwise,
	     MMX/3dNow values are returned in MM0 if available.  */
	     
	  if (size == 8)
	    return TARGET_VECT8_RETURNS || !TARGET_MMX;

	  /* SSE values are returned in XMM0 if available.  */
	  if (size == 16)
	    return !TARGET_SSE;

	  /* AVX values are returned in YMM0 if available.  */
	  if (size == 32)
	    return !TARGET_AVX;

	  /* AVX512F values are returned in ZMM0 if available.  */
	  if (size == 64)
	    return !TARGET_AVX512F;
	}

      if (mode == XFmode)
	return false;

      if (size > 12)
	return true;

      /* OImode shouldn't be used directly.  */
      gcc_assert (mode != OImode);

      return false;
    }
#endif
}


/* Create the va_list data type.  */

static tree
ix86_build_builtin_va_list_64 (void)
{
  tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;

  record = lang_hooks.types.make_type (RECORD_TYPE);
  type_decl = build_decl (BUILTINS_LOCATION,
			  TYPE_DECL, get_identifier ("__va_list_tag"), record);

  f_gpr = build_decl (BUILTINS_LOCATION,
		      FIELD_DECL, get_identifier ("gp_offset"),
		      unsigned_type_node);
  f_fpr = build_decl (BUILTINS_LOCATION,
		      FIELD_DECL, get_identifier ("fp_offset"),
		      unsigned_type_node);
  f_ovf = build_decl (BUILTINS_LOCATION,
		      FIELD_DECL, get_identifier ("overflow_arg_area"),
		      ptr_type_node);
  f_sav = build_decl (BUILTINS_LOCATION,
		      FIELD_DECL, get_identifier ("reg_save_area"),
		      ptr_type_node);

  va_list_gpr_counter_field = f_gpr;
  va_list_fpr_counter_field = f_fpr;

  DECL_FIELD_CONTEXT (f_gpr) = record;
  DECL_FIELD_CONTEXT (f_fpr) = record;
  DECL_FIELD_CONTEXT (f_ovf) = record;
  DECL_FIELD_CONTEXT (f_sav) = record;

  TYPE_STUB_DECL (record) = type_decl;
  TYPE_NAME (record) = type_decl;
  TYPE_FIELDS (record) = f_gpr;
  DECL_CHAIN (f_gpr) = f_fpr;
  DECL_CHAIN (f_fpr) = f_ovf;
  DECL_CHAIN (f_ovf) = f_sav;

  layout_type (record);

  TYPE_ATTRIBUTES (record) = tree_cons (get_identifier ("sysv_abi va_list"),
					NULL_TREE, TYPE_ATTRIBUTES (record));

  /* The correct type is an array type of one element.  */
  return build_array_type (record, build_index_type (size_zero_node));
}

/* Setup the builtin va_list data type and for 64-bit the additional
   calling convention specific va_list data types.  */

static tree
ix86_build_builtin_va_list (void)
{
  if (TARGET_64BIT)
    {
      /* Initialize ABI specific va_list builtin types.

	 In lto1, we can encounter two va_list types:
	 - one as a result of the type-merge across TUs, and
	 - the one constructed here.
	 These two types will not have the same TYPE_MAIN_VARIANT, and therefore
	 a type identity check in canonical_va_list_type based on
	 TYPE_MAIN_VARIANT (which we used to have) will not work.
	 Instead, we tag each va_list_type_node with its unique attribute, and
	 look for the attribute in the type identity check in
	 canonical_va_list_type.

	 Tagging sysv_va_list_type_node directly with the attribute is
	 problematic since it's a array of one record, which will degrade into a
	 pointer to record when used as parameter (see build_va_arg comments for
	 an example), dropping the attribute in the process.  So we tag the
	 record instead.  */

      /* For SYSV_ABI we use an array of one record.  */
      sysv_va_list_type_node = ix86_build_builtin_va_list_64 ();
	
      /* For MS_ABI we use plain pointer to argument area.  */
      tree char_ptr_type = build_pointer_type (char_type_node);
      tree attr = tree_cons (get_identifier ("ms_abi va_list"), NULL_TREE,
			     TYPE_ATTRIBUTES (char_ptr_type));
      ms_va_list_type_node = build_type_attribute_variant (char_ptr_type, attr);

      return ((ix86_abi == MS_ABI)
	      ? ms_va_list_type_node
	      : sysv_va_list_type_node);
    }
  else
    {
      /* For i386 we use plain pointer to argument area.  */
      return build_pointer_type (char_type_node);
    }
}

/* Worker function for TARGET_SETUP_INCOMING_VARARGS.  */

static void
setup_incoming_varargs_64 (CUMULATIVE_ARGS *cum)
{
  rtx save_area, mem;
  alias_set_type set;
  int i, max;

  /* GPR size of varargs save area.  */
  if (cfun->va_list_gpr_size)
    ix86_varargs_gpr_size = X86_64_REGPARM_MAX * UNITS_PER_WORD;
  else
    ix86_varargs_gpr_size = 0;

  /* FPR size of varargs save area.  We don't need it if we don't pass
     anything in SSE registers.  */
  if (TARGET_SSE && cfun->va_list_fpr_size)
    ix86_varargs_fpr_size = X86_64_SSE_REGPARM_MAX * 16;
  else
    ix86_varargs_fpr_size = 0;

  if (! ix86_varargs_gpr_size && ! ix86_varargs_fpr_size)
    return;

  save_area = frame_pointer_rtx;
  set = get_varargs_alias_set ();

  max = cum->regno + cfun->va_list_gpr_size / UNITS_PER_WORD;
  if (max > X86_64_REGPARM_MAX)
    max = X86_64_REGPARM_MAX;

  for (i = cum->regno; i < max; i++)
    {
      mem = gen_rtx_MEM (word_mode,
			 plus_constant (Pmode, save_area, i * UNITS_PER_WORD));
      MEM_NOTRAP_P (mem) = 1;
      set_mem_alias_set (mem, set);
      emit_move_insn (mem,
		      gen_rtx_REG (word_mode,
				   x86_64_int_parameter_registers[i]));
    }

  if (ix86_varargs_fpr_size)
    {
      machine_mode smode;
      rtx_code_label *label;
      rtx test;

      /* Now emit code to save SSE registers.  The AX parameter contains number
	 of SSE parameter registers used to call this function, though all we
	 actually check here is the zero/non-zero status.  */

      label = gen_label_rtx ();
      test = gen_rtx_EQ (VOIDmode, gen_rtx_REG (QImode, AX_REG), const0_rtx);
      emit_jump_insn (gen_cbranchqi4 (test, XEXP (test, 0), XEXP (test, 1),
				      label));

      /* ??? If !TARGET_SSE_TYPELESS_STORES, would we perform better if
	 we used movdqa (i.e. TImode) instead?  Perhaps even better would
	 be if we could determine the real mode of the data, via a hook
	 into pass_stdarg.  Ignore all that for now.  */
      smode = V4SFmode;
      if (crtl->stack_alignment_needed < GET_MODE_ALIGNMENT (smode))
	crtl->stack_alignment_needed = GET_MODE_ALIGNMENT (smode);

      max = cum->sse_regno + cfun->va_list_fpr_size / 16;
      if (max > X86_64_SSE_REGPARM_MAX)
	max = X86_64_SSE_REGPARM_MAX;

      for (i = cum->sse_regno; i < max; ++i)
	{
	  mem = plus_constant (Pmode, save_area,
			       i * 16 + ix86_varargs_gpr_size);
	  mem = gen_rtx_MEM (smode, mem);
	  MEM_NOTRAP_P (mem) = 1;
	  set_mem_alias_set (mem, set);
	  set_mem_align (mem, GET_MODE_ALIGNMENT (smode));

	  emit_move_insn (mem, gen_rtx_REG (smode, GET_SSE_REGNO (i)));
	}

      emit_label (label);
    }
}

static void
setup_incoming_varargs_ms_64 (CUMULATIVE_ARGS *cum)
{
  alias_set_type set = get_varargs_alias_set ();
  int i;

  /* Reset to zero, as there might be a sysv vaarg used
     before.  */
  ix86_varargs_gpr_size = 0;
  ix86_varargs_fpr_size = 0;

  for (i = cum->regno; i < X86_64_MS_REGPARM_MAX; i++)
    {
      rtx reg, mem;

      mem = gen_rtx_MEM (Pmode,
			 plus_constant (Pmode, virtual_incoming_args_rtx,
					i * UNITS_PER_WORD));
      MEM_NOTRAP_P (mem) = 1;
      set_mem_alias_set (mem, set);

      reg = gen_rtx_REG (Pmode, x86_64_ms_abi_int_parameter_registers[i]);
      emit_move_insn (mem, reg);
    }
}

static void
ix86_setup_incoming_varargs (cumulative_args_t cum_v, machine_mode mode,
			     tree type, int *, int no_rtl)
{
  CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
  CUMULATIVE_ARGS next_cum;
  tree fntype;

  /* This argument doesn't appear to be used anymore.  Which is good,
     because the old code here didn't suppress rtl generation.  */
  gcc_assert (!no_rtl);

  if (!TARGET_64BIT)
    return;

  fntype = TREE_TYPE (current_function_decl);

  /* For varargs, we do not want to skip the dummy va_dcl argument.
     For stdargs, we do want to skip the last named argument.  */
  next_cum = *cum;
  if (stdarg_p (fntype))
    ix86_function_arg_advance (pack_cumulative_args (&next_cum), mode, type,
			       true);

  if (cum->call_abi == MS_ABI)
    setup_incoming_varargs_ms_64 (&next_cum);
  else
    setup_incoming_varargs_64 (&next_cum);
}

static void
ix86_setup_incoming_vararg_bounds (cumulative_args_t cum_v,
				   machine_mode mode,
				   tree type,
				   int *pretend_size ATTRIBUTE_UNUSED,
				   int no_rtl)
{
  CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
  CUMULATIVE_ARGS next_cum;
  tree fntype;
  int max;

  gcc_assert (!no_rtl);

  /* Do nothing if we use plain pointer to argument area.  */
  if (!TARGET_64BIT || cum->call_abi == MS_ABI)
    return;

  fntype = TREE_TYPE (current_function_decl);

  /* For varargs, we do not want to skip the dummy va_dcl argument.
     For stdargs, we do want to skip the last named argument.  */
  next_cum = *cum;
  if (stdarg_p (fntype))
    ix86_function_arg_advance (pack_cumulative_args (&next_cum), mode, type,
			       true);

  max = cum->regno + cfun->va_list_gpr_size / UNITS_PER_WORD;
  if (max > X86_64_REGPARM_MAX)
    max = X86_64_REGPARM_MAX;
}


/* Checks if TYPE is of kind va_list char *.  */

static bool
is_va_list_char_pointer (tree type)
{
  tree canonic;

  /* For 32-bit it is always true.  */
  if (!TARGET_64BIT)
    return true;
  canonic = ix86_canonical_va_list_type (type);
  return (canonic == ms_va_list_type_node
          || (ix86_abi == MS_ABI && canonic == va_list_type_node));
}

/* Implement va_start.  */

static void
ix86_va_start (tree valist, rtx nextarg)
{
  HOST_WIDE_INT words, n_gpr, n_fpr;
  tree f_gpr, f_fpr, f_ovf, f_sav;
  tree gpr, fpr, ovf, sav, t;
  tree type;
  rtx ovf_rtx;

  if (flag_split_stack
      && cfun->machine->split_stack_varargs_pointer == NULL_RTX)
    {
      unsigned int scratch_regno;

      /* When we are splitting the stack, we can't refer to the stack
	 arguments using internal_arg_pointer, because they may be on
	 the old stack.  The split stack prologue will arrange to
	 leave a pointer to the old stack arguments in a scratch
	 register, which we here copy to a pseudo-register.  The split
	 stack prologue can't set the pseudo-register directly because
	 it (the prologue) runs before any registers have been saved.  */

      scratch_regno = split_stack_prologue_scratch_regno ();
      if (scratch_regno != INVALID_REGNUM)
	{
	  rtx reg;
	  rtx_insn *seq;

	  reg = gen_reg_rtx (Pmode);
	  cfun->machine->split_stack_varargs_pointer = reg;

	  start_sequence ();
	  emit_move_insn (reg, gen_rtx_REG (Pmode, scratch_regno));
	  seq = get_insns ();
	  end_sequence ();

	  push_topmost_sequence ();
	  emit_insn_after (seq, entry_of_function ());
	  pop_topmost_sequence ();
	}
    }

  /* Only 64bit target needs something special.  */
  if (is_va_list_char_pointer (TREE_TYPE (valist)))
    {
      if (cfun->machine->split_stack_varargs_pointer == NULL_RTX)
	std_expand_builtin_va_start (valist, nextarg);
      else
	{
	  rtx va_r, next;

	  va_r = expand_expr (valist, NULL_RTX, VOIDmode, EXPAND_WRITE);
	  next = expand_binop (ptr_mode, add_optab,
			       cfun->machine->split_stack_varargs_pointer,
			       crtl->args.arg_offset_rtx,
			       NULL_RTX, 0, OPTAB_LIB_WIDEN);
	  convert_move (va_r, next, 0);
	}
      return;
    }

  f_gpr = TYPE_FIELDS (TREE_TYPE (sysv_va_list_type_node));
  f_fpr = DECL_CHAIN (f_gpr);
  f_ovf = DECL_CHAIN (f_fpr);
  f_sav = DECL_CHAIN (f_ovf);

  valist = build_simple_mem_ref (valist);
  TREE_TYPE (valist) = TREE_TYPE (sysv_va_list_type_node);
  /* The following should be folded into the MEM_REF offset.  */
  gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), unshare_expr (valist),
		f_gpr, NULL_TREE);
  fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
		f_fpr, NULL_TREE);
  ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
		f_ovf, NULL_TREE);
  sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
		f_sav, NULL_TREE);

  /* Count number of gp and fp argument registers used.  */
  words = crtl->args.info.words;
  n_gpr = crtl->args.info.regno;
  n_fpr = crtl->args.info.sse_regno;

  if (cfun->va_list_gpr_size)
    {
      type = TREE_TYPE (gpr);
      t = build2 (MODIFY_EXPR, type,
		  gpr, build_int_cst (type, n_gpr * 8));
      TREE_SIDE_EFFECTS (t) = 1;
      expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
    }

  if (TARGET_SSE && cfun->va_list_fpr_size)
    {
      type = TREE_TYPE (fpr);
      t = build2 (MODIFY_EXPR, type, fpr,
		  build_int_cst (type, n_fpr * 16 + 8*X86_64_REGPARM_MAX));
      TREE_SIDE_EFFECTS (t) = 1;
      expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
    }

  /* Find the overflow area.  */
  type = TREE_TYPE (ovf);
  if (cfun->machine->split_stack_varargs_pointer == NULL_RTX)
    ovf_rtx = crtl->args.internal_arg_pointer;
  else
    ovf_rtx = cfun->machine->split_stack_varargs_pointer;
  t = make_tree (type, ovf_rtx);
  if (words != 0)
    t = fold_build_pointer_plus_hwi (t, words * UNITS_PER_WORD);

  t = build2 (MODIFY_EXPR, type, ovf, t);
  TREE_SIDE_EFFECTS (t) = 1;
  expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);

  if (ix86_varargs_gpr_size || ix86_varargs_fpr_size)
    {
      /* Find the register save area.
	 Prologue of the function save it right above stack frame.  */
      type = TREE_TYPE (sav);
      t = make_tree (type, frame_pointer_rtx);
      if (!ix86_varargs_gpr_size)
	t = fold_build_pointer_plus_hwi (t, -8 * X86_64_REGPARM_MAX);

      t = build2 (MODIFY_EXPR, type, sav, t);
      TREE_SIDE_EFFECTS (t) = 1;
      expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
    }
}

/* Implement va_arg.  */

static tree
ix86_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
		      gimple_seq *post_p)
{
  static const int intreg[6] = { 0, 1, 2, 3, 4, 5 };
  tree f_gpr, f_fpr, f_ovf, f_sav;
  tree gpr, fpr, ovf, sav, t;
  int size, rsize;
  tree lab_false, lab_over = NULL_TREE;
  tree addr, t2;
  rtx container;
  int indirect_p = 0;
  tree ptrtype;
  machine_mode nat_mode;
  unsigned int arg_boundary;

  /* Only 64bit target needs something special.  */
  if (is_va_list_char_pointer (TREE_TYPE (valist)))
    return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);

  f_gpr = TYPE_FIELDS (TREE_TYPE (sysv_va_list_type_node));
  f_fpr = DECL_CHAIN (f_gpr);
  f_ovf = DECL_CHAIN (f_fpr);
  f_sav = DECL_CHAIN (f_ovf);

  gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr),
		valist, f_gpr, NULL_TREE);

  fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
  ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
  sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);

  indirect_p = pass_by_reference (NULL, TYPE_MODE (type), type, false);
  if (indirect_p)
    type = build_pointer_type (type);
  size = arg_int_size_in_bytes (type);
  rsize = CEIL (size, UNITS_PER_WORD);

  nat_mode = type_natural_mode (type, NULL, false);
  switch (nat_mode)
    {
    case E_V8SFmode:
    case E_V8SImode:
    case E_V32QImode:
    case E_V16HImode:
    case E_V4DFmode:
    case E_V4DImode:
    case E_V16SFmode:
    case E_V16SImode:
    case E_V64QImode:
    case E_V32HImode:
    case E_V8DFmode:
    case E_V8DImode:
      /* Unnamed 256 and 512bit vector mode parameters are passed on stack.  */
      if (!TARGET_64BIT_MS_ABI)
	{
	  container = NULL;
	  break;
	}
      /* FALLTHRU */

    default:
      container = construct_container (nat_mode, TYPE_MODE (type),
				       type, 0, X86_64_REGPARM_MAX,
				       X86_64_SSE_REGPARM_MAX, intreg,
				       0);
      break;
    }

  /* Pull the value out of the saved registers.  */

  addr = create_tmp_var (ptr_type_node, "addr");

  if (container)
    {
      int needed_intregs, needed_sseregs;
      bool need_temp;
      tree int_addr, sse_addr;

      lab_false = create_artificial_label (UNKNOWN_LOCATION);
      lab_over = create_artificial_label (UNKNOWN_LOCATION);

      examine_argument (nat_mode, type, 0, &needed_intregs, &needed_sseregs);

      need_temp = (!REG_P (container)
		   && ((needed_intregs && TYPE_ALIGN (type) > 64)
		       || TYPE_ALIGN (type) > 128));

      /* In case we are passing structure, verify that it is consecutive block
         on the register save area.  If not we need to do moves.  */
      if (!need_temp && !REG_P (container))
	{
	  /* Verify that all registers are strictly consecutive  */
	  if (SSE_REGNO_P (REGNO (XEXP (XVECEXP (container, 0, 0), 0))))
	    {
	      int i;

	      for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
		{
		  rtx slot = XVECEXP (container, 0, i);
		  if (REGNO (XEXP (slot, 0)) != FIRST_SSE_REG + (unsigned int) i
		      || INTVAL (XEXP (slot, 1)) != i * 16)
		    need_temp = true;
		}
	    }
	  else
	    {
	      int i;

	      for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
		{
		  rtx slot = XVECEXP (container, 0, i);
		  if (REGNO (XEXP (slot, 0)) != (unsigned int) i
		      || INTVAL (XEXP (slot, 1)) != i * 8)
		    need_temp = true;
		}
	    }
	}
      if (!need_temp)
	{
	  int_addr = addr;
	  sse_addr = addr;
	}
      else
	{
	  int_addr = create_tmp_var (ptr_type_node, "int_addr");
	  sse_addr = create_tmp_var (ptr_type_node, "sse_addr");
	}

      /* First ensure that we fit completely in registers.  */
      if (needed_intregs)
	{
	  t = build_int_cst (TREE_TYPE (gpr),
			     (X86_64_REGPARM_MAX - needed_intregs + 1) * 8);
	  t = build2 (GE_EXPR, boolean_type_node, gpr, t);
	  t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
	  t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
	  gimplify_and_add (t, pre_p);
	}
      if (needed_sseregs)
	{
	  t = build_int_cst (TREE_TYPE (fpr),
			     (X86_64_SSE_REGPARM_MAX - needed_sseregs + 1) * 16
			     + X86_64_REGPARM_MAX * 8);
	  t = build2 (GE_EXPR, boolean_type_node, fpr, t);
	  t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
	  t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
	  gimplify_and_add (t, pre_p);
	}

      /* Compute index to start of area used for integer regs.  */
      if (needed_intregs)
	{
	  /* int_addr = gpr + sav; */
	  t = fold_build_pointer_plus (sav, gpr);
	  gimplify_assign (int_addr, t, pre_p);
	}
      if (needed_sseregs)
	{
	  /* sse_addr = fpr + sav; */
	  t = fold_build_pointer_plus (sav, fpr);
	  gimplify_assign (sse_addr, t, pre_p);
	}
      if (need_temp)
	{
	  int i, prev_size = 0;
	  tree temp = create_tmp_var (type, "va_arg_tmp");

	  /* addr = &temp; */
	  t = build1 (ADDR_EXPR, build_pointer_type (type), temp);
	  gimplify_assign (addr, t, pre_p);

	  for (i = 0; i < XVECLEN (container, 0); i++)
	    {
	      rtx slot = XVECEXP (container, 0, i);
	      rtx reg = XEXP (slot, 0);
	      machine_mode mode = GET_MODE (reg);
	      tree piece_type;
	      tree addr_type;
	      tree daddr_type;
	      tree src_addr, src;
	      int src_offset;
	      tree dest_addr, dest;
	      int cur_size = GET_MODE_SIZE (mode);

	      gcc_assert (prev_size <= INTVAL (XEXP (slot, 1)));
	      prev_size = INTVAL (XEXP (slot, 1));
	      if (prev_size + cur_size > size)
		{
		  cur_size = size - prev_size;
		  unsigned int nbits = cur_size * BITS_PER_UNIT;
		  if (!int_mode_for_size (nbits, 1).exists (&mode))
		    mode = QImode;
		}
	      piece_type = lang_hooks.types.type_for_mode (mode, 1);
	      if (mode == GET_MODE (reg))
		addr_type = build_pointer_type (piece_type);
	      else
		addr_type = build_pointer_type_for_mode (piece_type, ptr_mode,
							 true);
	      daddr_type = build_pointer_type_for_mode (piece_type, ptr_mode,
							true);

	      if (SSE_REGNO_P (REGNO (reg)))
		{
		  src_addr = sse_addr;
		  src_offset = (REGNO (reg) - FIRST_SSE_REG) * 16;
		}
	      else
		{
		  src_addr = int_addr;
		  src_offset = REGNO (reg) * 8;
		}
	      src_addr = fold_convert (addr_type, src_addr);
	      src_addr = fold_build_pointer_plus_hwi (src_addr, src_offset);

	      dest_addr = fold_convert (daddr_type, addr);
	      dest_addr = fold_build_pointer_plus_hwi (dest_addr, prev_size);
	      if (cur_size == GET_MODE_SIZE (mode))
		{
		  src = build_va_arg_indirect_ref (src_addr);
		  dest = build_va_arg_indirect_ref (dest_addr);

		  gimplify_assign (dest, src, pre_p);
		}
	      else
		{
		  tree copy
		    = build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY),
				       3, dest_addr, src_addr,
				       size_int (cur_size));
		  gimplify_and_add (copy, pre_p);
		}
	      prev_size += cur_size;
	    }
	}

      if (needed_intregs)
	{
	  t = build2 (PLUS_EXPR, TREE_TYPE (gpr), gpr,
		      build_int_cst (TREE_TYPE (gpr), needed_intregs * 8));
	  gimplify_assign (gpr, t, pre_p);
	}

      if (needed_sseregs)
	{
	  t = build2 (PLUS_EXPR, TREE_TYPE (fpr), fpr,
		      build_int_cst (TREE_TYPE (fpr), needed_sseregs * 16));
	  gimplify_assign (unshare_expr (fpr), t, pre_p);
	}

      gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));

      gimple_seq_add_stmt (pre_p, gimple_build_label (lab_false));
    }

  /* ... otherwise out of the overflow area.  */

  /* When we align parameter on stack for caller, if the parameter
     alignment is beyond MAX_SUPPORTED_STACK_ALIGNMENT, it will be
     aligned at MAX_SUPPORTED_STACK_ALIGNMENT.  We will match callee
     here with caller.  */
  arg_boundary = ix86_function_arg_boundary (VOIDmode, type);
  if ((unsigned int) arg_boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
    arg_boundary = MAX_SUPPORTED_STACK_ALIGNMENT;

  /* Care for on-stack alignment if needed.  */
  if (arg_boundary <= 64 || size == 0)
    t = ovf;
 else
    {
      HOST_WIDE_INT align = arg_boundary / 8;
      t = fold_build_pointer_plus_hwi (ovf, align - 1);
      t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
		  build_int_cst (TREE_TYPE (t), -align));
    }

  gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
  gimplify_assign (addr, t, pre_p);

  t = fold_build_pointer_plus_hwi (t, rsize * UNITS_PER_WORD);
  gimplify_assign (unshare_expr (ovf), t, pre_p);

  if (container)
    gimple_seq_add_stmt (pre_p, gimple_build_label (lab_over));

  ptrtype = build_pointer_type_for_mode (type, ptr_mode, true);
  addr = fold_convert (ptrtype, addr);

  if (indirect_p)
    addr = build_va_arg_indirect_ref (addr);
  return build_va_arg_indirect_ref (addr);
}

/* Return true if OPNUM's MEM should be matched
   in movabs* patterns.  */

bool
ix86_check_movabs (rtx insn, int opnum)
{
  rtx set, mem;

  set = PATTERN (insn);
  if (GET_CODE (set) == PARALLEL)
    set = XVECEXP (set, 0, 0);
  gcc_assert (GET_CODE (set) == SET);
  mem = XEXP (set, opnum);
  while (SUBREG_P (mem))
    mem = SUBREG_REG (mem);
  gcc_assert (MEM_P (mem));
  return volatile_ok || !MEM_VOLATILE_P (mem);
}

/* Return false if INSN contains a MEM with a non-default address space.  */
bool
ix86_check_no_addr_space (rtx insn)
{
  subrtx_var_iterator::array_type array;
  FOR_EACH_SUBRTX_VAR (iter, array, PATTERN (insn), ALL)
    {
      rtx x = *iter;
      if (MEM_P (x) && !ADDR_SPACE_GENERIC_P (MEM_ADDR_SPACE (x)))
	return false;
    }
  return true;
}

/* Initialize the table of extra 80387 mathematical constants.  */

static void
init_ext_80387_constants (void)
{
  static const char * cst[5] =
  {
    "0.3010299956639811952256464283594894482",  /* 0: fldlg2  */
    "0.6931471805599453094286904741849753009",  /* 1: fldln2  */
    "1.4426950408889634073876517827983434472",  /* 2: fldl2e  */
    "3.3219280948873623478083405569094566090",  /* 3: fldl2t  */
    "3.1415926535897932385128089594061862044",  /* 4: fldpi   */
  };
  int i;

  for (i = 0; i < 5; i++)
    {
      real_from_string (&ext_80387_constants_table[i], cst[i]);
      /* Ensure each constant is rounded to XFmode precision.  */
      real_convert (&ext_80387_constants_table[i],
		    XFmode, &ext_80387_constants_table[i]);
    }

  ext_80387_constants_init = 1;
}

/* Return non-zero if the constant is something that
   can be loaded with a special instruction.  */

int
standard_80387_constant_p (rtx x)
{
  machine_mode mode = GET_MODE (x);

  const REAL_VALUE_TYPE *r;

  if (!(CONST_DOUBLE_P (x) && X87_FLOAT_MODE_P (mode)))
    return -1;

  if (x == CONST0_RTX (mode))
    return 1;
  if (x == CONST1_RTX (mode))
    return 2;

  r = CONST_DOUBLE_REAL_VALUE (x);

  /* For XFmode constants, try to find a special 80387 instruction when
     optimizing for size or on those CPUs that benefit from them.  */
  if (mode == XFmode
      && (optimize_function_for_size_p (cfun) || TARGET_EXT_80387_CONSTANTS))
    {
      int i;

      if (! ext_80387_constants_init)
	init_ext_80387_constants ();

      for (i = 0; i < 5; i++)
        if (real_identical (r, &ext_80387_constants_table[i]))
	  return i + 3;
    }

  /* Load of the constant -0.0 or -1.0 will be split as
     fldz;fchs or fld1;fchs sequence.  */
  if (real_isnegzero (r))
    return 8;
  if (real_identical (r, &dconstm1))
    return 9;

  return 0;
}

/* Return the opcode of the special instruction to be used to load
   the constant X.  */

const char *
standard_80387_constant_opcode (rtx x)
{
  switch (standard_80387_constant_p (x))
    {
    case 1:
      return "fldz";
    case 2:
      return "fld1";
    case 3:
      return "fldlg2";
    case 4:
      return "fldln2";
    case 5:
      return "fldl2e";
    case 6:
      return "fldl2t";
    case 7:
      return "fldpi";
    case 8:
    case 9:
      return "#";
    default:
      gcc_unreachable ();
    }
}

/* Return the CONST_DOUBLE representing the 80387 constant that is
   loaded by the specified special instruction.  The argument IDX
   matches the return value from standard_80387_constant_p.  */

rtx
standard_80387_constant_rtx (int idx)
{
  int i;

  if (! ext_80387_constants_init)
    init_ext_80387_constants ();

  switch (idx)
    {
    case 3:
    case 4:
    case 5:
    case 6:
    case 7:
      i = idx - 3;
      break;

    default:
      gcc_unreachable ();
    }

  return const_double_from_real_value (ext_80387_constants_table[i],
				       XFmode);
}

/* Return 1 if X is all bits 0 and 2 if X is all bits 1
   in supported SSE/AVX vector mode.  */

int
standard_sse_constant_p (rtx x, machine_mode pred_mode)
{
  machine_mode mode;

  if (!TARGET_SSE)
    return 0;

  mode = GET_MODE (x);

  if (x == const0_rtx || const0_operand (x, mode))
    return 1;

  if (x == constm1_rtx || vector_all_ones_operand (x, mode))
    {
      /* VOIDmode integer constant, get mode from the predicate.  */
      if (mode == VOIDmode)
	mode = pred_mode;

      switch (GET_MODE_SIZE (mode))
	{
	case 64:
	  if (TARGET_AVX512F)
	    return 2;
	  break;
	case 32:
	  if (TARGET_AVX2)
	    return 2;
	  break;
	case 16:
	  if (TARGET_SSE2)
	    return 2;
	  break;
	case 0:
	  /* VOIDmode */
	  gcc_unreachable ();
	default:
	  break;
	}
    }

  return 0;
}

/* Return the opcode of the special instruction to be used to load
   the constant operands[1] into operands[0].  */

const char *
standard_sse_constant_opcode (rtx_insn *insn, rtx *operands)
{
  machine_mode mode;
  rtx x = operands[1];

  gcc_assert (TARGET_SSE);

  mode = GET_MODE (x);

  if (x == const0_rtx || const0_operand (x, mode))
    {
      switch (get_attr_mode (insn))
	{
	case MODE_TI:
	  if (!EXT_REX_SSE_REG_P (operands[0]))
	    return "%vpxor\t%0, %d0";
	  /* FALLTHRU */
	case MODE_XI:
	case MODE_OI:
	  if (EXT_REX_SSE_REG_P (operands[0]))
	    return (TARGET_AVX512VL
		    ? "vpxord\t%x0, %x0, %x0"
		    : "vpxord\t%g0, %g0, %g0");
	  return "vpxor\t%x0, %x0, %x0";

	case MODE_V2DF:
	  if (!EXT_REX_SSE_REG_P (operands[0]))
	    return "%vxorpd\t%0, %d0";
	  /* FALLTHRU */
	case MODE_V8DF:
	case MODE_V4DF:
	  if (!EXT_REX_SSE_REG_P (operands[0]))
	    return "vxorpd\t%x0, %x0, %x0";
	  else if (TARGET_AVX512DQ)
	    return (TARGET_AVX512VL
		    ? "vxorpd\t%x0, %x0, %x0"
		    : "vxorpd\t%g0, %g0, %g0");
	  else
	    return (TARGET_AVX512VL
		    ? "vpxorq\t%x0, %x0, %x0"
		    : "vpxorq\t%g0, %g0, %g0");

	case MODE_V4SF:
	  if (!EXT_REX_SSE_REG_P (operands[0]))
	    return "%vxorps\t%0, %d0";
	  /* FALLTHRU */
	case MODE_V16SF:
	case MODE_V8SF:
	  if (!EXT_REX_SSE_REG_P (operands[0]))
	    return "vxorps\t%x0, %x0, %x0";
	  else if (TARGET_AVX512DQ)
	    return (TARGET_AVX512VL
		    ? "vxorps\t%x0, %x0, %x0"
		    : "vxorps\t%g0, %g0, %g0");
	  else
	    return (TARGET_AVX512VL
		    ? "vpxord\t%x0, %x0, %x0"
		    : "vpxord\t%g0, %g0, %g0");

	default:
	  gcc_unreachable ();
	}
    }
  else if (x == constm1_rtx || vector_all_ones_operand (x, mode))
    {
      enum attr_mode insn_mode = get_attr_mode (insn);
      
      switch (insn_mode)
	{
	case MODE_XI:
	case MODE_V8DF:
	case MODE_V16SF:
	  gcc_assert (TARGET_AVX512F);
	  return "vpternlogd\t{$0xFF, %g0, %g0, %g0|%g0, %g0, %g0, 0xFF}";

	case MODE_OI:
	case MODE_V4DF:
	case MODE_V8SF:
	  gcc_assert (TARGET_AVX2);
	  /* FALLTHRU */
	case MODE_TI:
	case MODE_V2DF:
	case MODE_V4SF:
	  gcc_assert (TARGET_SSE2);
	  if (!EXT_REX_SSE_REG_P (operands[0]))
	    return (TARGET_AVX
		    ? "vpcmpeqd\t%0, %0, %0"
		    : "pcmpeqd\t%0, %0");
	  else if (TARGET_AVX512VL)
	    return "vpternlogd\t{$0xFF, %0, %0, %0|%0, %0, %0, 0xFF}";
	  else
	    return "vpternlogd\t{$0xFF, %g0, %g0, %g0|%g0, %g0, %g0, 0xFF}";

	default:
	  gcc_unreachable ();
	}
   }

  gcc_unreachable ();
}

/* Returns true if INSN can be transformed from a memory load
   to a supported FP constant load.  */

bool
ix86_standard_x87sse_constant_load_p (const rtx_insn *insn, rtx dst)
{
  rtx src = find_constant_src (insn);

  gcc_assert (REG_P (dst));

  if (src == NULL
      || (SSE_REGNO_P (REGNO (dst))
	  && standard_sse_constant_p (src, GET_MODE (dst)) != 1)
      || (STACK_REGNO_P (REGNO (dst))
	   && standard_80387_constant_p (src) < 1))
    return false;

  return true;
}

/* Returns true if OP contains a symbol reference */

bool
symbolic_reference_mentioned_p (rtx op)
{
  const char *fmt;
  int i;

  if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
    return true;

  fmt = GET_RTX_FORMAT (GET_CODE (op));
  for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
    {
      if (fmt[i] == 'E')
	{
	  int j;

	  for (j = XVECLEN (op, i) - 1; j >= 0; j--)
	    if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
	      return true;
	}

      else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
	return true;
    }

  return false;
}

/* Return true if it is appropriate to emit `ret' instructions in the
   body of a function.  Do this only if the epilogue is simple, needing a
   couple of insns.  Prior to reloading, we can't tell how many registers
   must be saved, so return false then.  Return false if there is no frame
   marker to de-allocate.  */

bool
ix86_can_use_return_insn_p (void)
{
  if (ix86_function_naked (current_function_decl))
    return false;

  /* Don't use `ret' instruction in interrupt handler.  */
  if (! reload_completed
      || frame_pointer_needed
      || cfun->machine->func_type != TYPE_NORMAL)
    return 0;

  /* Don't allow more than 32k pop, since that's all we can do
     with one instruction.  */
  if (crtl->args.pops_args && crtl->args.size >= 32768)
    return 0;

  struct ix86_frame &frame = cfun->machine->frame;
  return (frame.stack_pointer_offset == UNITS_PER_WORD
	  && (frame.nregs + frame.nsseregs) == 0);
}

/* Value should be nonzero if functions must have frame pointers.
   Zero means the frame pointer need not be set up (and parms may
   be accessed via the stack pointer) in functions that seem suitable.  */

static bool
ix86_frame_pointer_required (void)
{
  /* If we accessed previous frames, then the generated code expects
     to be able to access the saved ebp value in our frame.  */
  if (cfun->machine->accesses_prev_frame)
    return true;

  /* Several x86 os'es need a frame pointer for other reasons,
     usually pertaining to setjmp.  */
  if (SUBTARGET_FRAME_POINTER_REQUIRED)
    return true;

  /* For older 32-bit runtimes setjmp requires valid frame-pointer.  */
  if (TARGET_32BIT_MS_ABI && cfun->calls_setjmp)
    return true;

  /* Win64 SEH, very large frames need a frame-pointer as maximum stack
     allocation is 4GB.  */
  if (TARGET_64BIT_MS_ABI && get_frame_size () > SEH_MAX_FRAME_SIZE)
    return true;

  /* SSE saves require frame-pointer when stack is misaligned.  */
  if (TARGET_64BIT_MS_ABI && ix86_incoming_stack_boundary < 128)
    return true;
  
  /* In ix86_option_override_internal, TARGET_OMIT_LEAF_FRAME_POINTER
     turns off the frame pointer by default.  Turn it back on now if
     we've not got a leaf function.  */
  if (TARGET_OMIT_LEAF_FRAME_POINTER
      && (!crtl->is_leaf
	  || ix86_current_function_calls_tls_descriptor))
    return true;

  if (crtl->profile && !flag_fentry)
    return true;

  return false;
}

/* Record that the current function accesses previous call frames.  */

void
ix86_setup_frame_addresses (void)
{
  cfun->machine->accesses_prev_frame = 1;
}

#ifndef USE_HIDDEN_LINKONCE
# if defined(HAVE_GAS_HIDDEN) && (SUPPORTS_ONE_ONLY - 0)
#  define USE_HIDDEN_LINKONCE 1
# else
#  define USE_HIDDEN_LINKONCE 0
# endif
#endif

/* Label count for call and return thunks.  It is used to make unique
   labels in call and return thunks.  */
static int indirectlabelno;

/* True if call thunk function is needed.  */
static bool indirect_thunk_needed = false;

/* Bit masks of integer registers, which contain branch target, used
   by call thunk functions.  */
static int indirect_thunks_used;

/* True if return thunk function is needed.  */
static bool indirect_return_needed = false;

/* True if return thunk function via CX is needed.  */
static bool indirect_return_via_cx;

#ifndef INDIRECT_LABEL
# define INDIRECT_LABEL "LIND"
#endif

/* Indicate what prefix is needed for an indirect branch.  */
enum indirect_thunk_prefix
{
  indirect_thunk_prefix_none,
  indirect_thunk_prefix_nt
};

/* Return the prefix needed for an indirect branch INSN.  */

enum indirect_thunk_prefix
indirect_thunk_need_prefix (rtx_insn *insn)
{
  enum indirect_thunk_prefix need_prefix;
  if ((cfun->machine->indirect_branch_type
	    == indirect_branch_thunk_extern)
	   && ix86_notrack_prefixed_insn_p (insn))
    {
      /* NOTRACK prefix is only used with external thunk so that it
	 can be properly updated to support CET at run-time.  */
      need_prefix = indirect_thunk_prefix_nt;
    }
  else
    need_prefix = indirect_thunk_prefix_none;
  return need_prefix;
}

/* Fills in the label name that should be used for the indirect thunk.  */

static void
indirect_thunk_name (char name[32], unsigned int regno,
		     enum indirect_thunk_prefix need_prefix,
		     bool ret_p)
{
  if (regno != INVALID_REGNUM && regno != CX_REG && ret_p)
    gcc_unreachable ();

  if (USE_HIDDEN_LINKONCE)
    {
      const char *prefix;

      if (need_prefix == indirect_thunk_prefix_nt
	  && regno != INVALID_REGNUM)
	{
	  /* NOTRACK prefix is only used with external thunk via
	     register so that NOTRACK prefix can be added to indirect
	     branch via register to support CET at run-time.  */
	  prefix = "_nt";
	}
      else
	prefix = "";

      const char *ret = ret_p ? "return" : "indirect";

      if (regno != INVALID_REGNUM)
	{
	  const char *reg_prefix;
	  if (LEGACY_INT_REGNO_P (regno))
	    reg_prefix = TARGET_64BIT ? "r" : "e";
	  else
	    reg_prefix = "";
	  sprintf (name, "__x86_%s_thunk%s_%s%s",
		   ret, prefix, reg_prefix, reg_names[regno]);
	}
      else
	sprintf (name, "__x86_%s_thunk%s", ret, prefix);
    }
  else
    {
      if (regno != INVALID_REGNUM)
	ASM_GENERATE_INTERNAL_LABEL (name, "LITR", regno);
      else
	{
	  if (ret_p)
	    ASM_GENERATE_INTERNAL_LABEL (name, "LRT", 0);
	  else
	    ASM_GENERATE_INTERNAL_LABEL (name, "LIT", 0);
	}
    }
}

/* Output a call and return thunk for indirect branch.  If REGNO != -1,
   the function address is in REGNO and the call and return thunk looks like:

	call	L2
   L1:
	pause
	lfence
	jmp	L1
   L2:
	mov	%REG, (%sp)
	ret

   Otherwise, the function address is on the top of stack and the
   call and return thunk looks like:

	call L2
  L1:
	pause
	lfence
	jmp L1
  L2:
	lea WORD_SIZE(%sp), %sp
	ret
 */

static void
output_indirect_thunk (unsigned int regno)
{
  char indirectlabel1[32];
  char indirectlabel2[32];

  ASM_GENERATE_INTERNAL_LABEL (indirectlabel1, INDIRECT_LABEL,
			       indirectlabelno++);
  ASM_GENERATE_INTERNAL_LABEL (indirectlabel2, INDIRECT_LABEL,
			       indirectlabelno++);

  /* Call */
  fputs ("\tcall\t", asm_out_file);
  assemble_name_raw (asm_out_file, indirectlabel2);
  fputc ('\n', asm_out_file);

  ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, indirectlabel1);

  /* AMD and Intel CPUs prefer each a different instruction as loop filler.
     Usage of both pause + lfence is compromise solution.  */
  fprintf (asm_out_file, "\tpause\n\tlfence\n");

  /* Jump.  */
  fputs ("\tjmp\t", asm_out_file);
  assemble_name_raw (asm_out_file, indirectlabel1);
  fputc ('\n', asm_out_file);

  ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, indirectlabel2);

  /* The above call insn pushed a word to stack.  Adjust CFI info.  */
  if (flag_asynchronous_unwind_tables && dwarf2out_do_frame ())
    {
      if (! dwarf2out_do_cfi_asm ())
	{
	  dw_cfi_ref xcfi = ggc_cleared_alloc<dw_cfi_node> ();
	  xcfi->dw_cfi_opc = DW_CFA_advance_loc4;
	  xcfi->dw_cfi_oprnd1.dw_cfi_addr = ggc_strdup (indirectlabel2);
	  vec_safe_push (cfun->fde->dw_fde_cfi, xcfi);
	}
      dw_cfi_ref xcfi = ggc_cleared_alloc<dw_cfi_node> ();
      xcfi->dw_cfi_opc = DW_CFA_def_cfa_offset;
      xcfi->dw_cfi_oprnd1.dw_cfi_offset = 2 * UNITS_PER_WORD;
      vec_safe_push (cfun->fde->dw_fde_cfi, xcfi);
      dwarf2out_emit_cfi (xcfi);
    }

  if (regno != INVALID_REGNUM)
    {
      /* MOV.  */
      rtx xops[2];
      xops[0] = gen_rtx_MEM (word_mode, stack_pointer_rtx);
      xops[1] = gen_rtx_REG (word_mode, regno);
      output_asm_insn ("mov\t{%1, %0|%0, %1}", xops);
    }
  else
    {
      /* LEA.  */
      rtx xops[2];
      xops[0] = stack_pointer_rtx;
      xops[1] = plus_constant (Pmode, stack_pointer_rtx, UNITS_PER_WORD);
      output_asm_insn ("lea\t{%E1, %0|%0, %E1}", xops);
    }

  fputs ("\tret\n", asm_out_file);
}

/* Output a funtion with a call and return thunk for indirect branch.
   If REGNO != INVALID_REGNUM, the function address is in REGNO.
   Otherwise, the function address is on the top of stack.  Thunk is
   used for function return if RET_P is true.  */

static void
output_indirect_thunk_function (enum indirect_thunk_prefix need_prefix,
				unsigned int regno, bool ret_p)
{
  char name[32];
  tree decl;

  /* Create __x86_indirect_thunk.  */
  indirect_thunk_name (name, regno, need_prefix, ret_p);
  decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
		     get_identifier (name),
		     build_function_type_list (void_type_node, NULL_TREE));
  DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
				   NULL_TREE, void_type_node);
  TREE_PUBLIC (decl) = 1;
  TREE_STATIC (decl) = 1;
  DECL_IGNORED_P (decl) = 1;

#if TARGET_MACHO
  if (TARGET_MACHO)
    {
      switch_to_section (darwin_sections[picbase_thunk_section]);
      fputs ("\t.weak_definition\t", asm_out_file);
      assemble_name (asm_out_file, name);
      fputs ("\n\t.private_extern\t", asm_out_file);
      assemble_name (asm_out_file, name);
      putc ('\n', asm_out_file);
      ASM_OUTPUT_LABEL (asm_out_file, name);
      DECL_WEAK (decl) = 1;
    }
  else
#endif
    if (USE_HIDDEN_LINKONCE)
      {
	cgraph_node::create (decl)->set_comdat_group (DECL_ASSEMBLER_NAME (decl));

	targetm.asm_out.unique_section (decl, 0);
	switch_to_section (get_named_section (decl, NULL, 0));

	targetm.asm_out.globalize_label (asm_out_file, name);
	fputs ("\t.hidden\t", asm_out_file);
	assemble_name (asm_out_file, name);
	putc ('\n', asm_out_file);
	ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
      }
    else
      {
	switch_to_section (text_section);
	ASM_OUTPUT_LABEL (asm_out_file, name);
      }

  DECL_INITIAL (decl) = make_node (BLOCK);
  current_function_decl = decl;
  allocate_struct_function (decl, false);
  init_function_start (decl);
  /* We're about to hide the function body from callees of final_* by
     emitting it directly; tell them we're a thunk, if they care.  */
  cfun->is_thunk = true;
  first_function_block_is_cold = false;
  /* Make sure unwind info is emitted for the thunk if needed.  */
  final_start_function (emit_barrier (), asm_out_file, 1);

  output_indirect_thunk (regno);

  final_end_function ();
  init_insn_lengths ();
  free_after_compilation (cfun);
  set_cfun (NULL);
  current_function_decl = NULL;
}

static int pic_labels_used;

/* Fills in the label name that should be used for a pc thunk for
   the given register.  */

static void
get_pc_thunk_name (char name[32], unsigned int regno)
{
  gcc_assert (!TARGET_64BIT);

  if (USE_HIDDEN_LINKONCE)
    sprintf (name, "__x86.get_pc_thunk.%s", reg_names[regno]);
  else
    ASM_GENERATE_INTERNAL_LABEL (name, "LPR", regno);
}


/* This function generates code for -fpic that loads %ebx with
   the return address of the caller and then returns.  */

static void
ix86_code_end (void)
{
  rtx xops[2];
  unsigned int regno;

  if (indirect_return_needed)
    output_indirect_thunk_function (indirect_thunk_prefix_none,
				    INVALID_REGNUM, true);
  if (indirect_return_via_cx)
    output_indirect_thunk_function (indirect_thunk_prefix_none,
				    CX_REG, true);
  if (indirect_thunk_needed)
    output_indirect_thunk_function (indirect_thunk_prefix_none,
				    INVALID_REGNUM, false);

  for (regno = FIRST_REX_INT_REG; regno <= LAST_REX_INT_REG; regno++)
    {
      unsigned int i = regno - FIRST_REX_INT_REG + LAST_INT_REG + 1;
      if ((indirect_thunks_used & (1 << i)))
	output_indirect_thunk_function (indirect_thunk_prefix_none,
					regno, false);
    }

  for (regno = FIRST_INT_REG; regno <= LAST_INT_REG; regno++)
    {
      char name[32];
      tree decl;

      if ((indirect_thunks_used & (1 << regno)))
	output_indirect_thunk_function (indirect_thunk_prefix_none,
					regno, false);

      if (!(pic_labels_used & (1 << regno)))
	continue;

      get_pc_thunk_name (name, regno);

      decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
			 get_identifier (name),
			 build_function_type_list (void_type_node, NULL_TREE));
      DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
				       NULL_TREE, void_type_node);
      TREE_PUBLIC (decl) = 1;
      TREE_STATIC (decl) = 1;
      DECL_IGNORED_P (decl) = 1;

#if TARGET_MACHO
      if (TARGET_MACHO)
	{
	  switch_to_section (darwin_sections[picbase_thunk_section]);
	  fputs ("\t.weak_definition\t", asm_out_file);
	  assemble_name (asm_out_file, name);
	  fputs ("\n\t.private_extern\t", asm_out_file);
	  assemble_name (asm_out_file, name);
	  putc ('\n', asm_out_file);
	  ASM_OUTPUT_LABEL (asm_out_file, name);
	  DECL_WEAK (decl) = 1;
	}
      else
#endif
      if (USE_HIDDEN_LINKONCE)
	{
	  cgraph_node::create (decl)->set_comdat_group (DECL_ASSEMBLER_NAME (decl));

	  targetm.asm_out.unique_section (decl, 0);
	  switch_to_section (get_named_section (decl, NULL, 0));

	  targetm.asm_out.globalize_label (asm_out_file, name);
	  fputs ("\t.hidden\t", asm_out_file);
	  assemble_name (asm_out_file, name);
	  putc ('\n', asm_out_file);
	  ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
	}
      else
	{
	  switch_to_section (text_section);
	  ASM_OUTPUT_LABEL (asm_out_file, name);
	}

      DECL_INITIAL (decl) = make_node (BLOCK);
      current_function_decl = decl;
      allocate_struct_function (decl, false);
      init_function_start (decl);
      /* We're about to hide the function body from callees of final_* by
	 emitting it directly; tell them we're a thunk, if they care.  */
      cfun->is_thunk = true;
      first_function_block_is_cold = false;
      /* Make sure unwind info is emitted for the thunk if needed.  */
      final_start_function (emit_barrier (), asm_out_file, 1);

      /* Pad stack IP move with 4 instructions (two NOPs count
	 as one instruction).  */
      if (TARGET_PAD_SHORT_FUNCTION)
	{
	  int i = 8;

	  while (i--)
	    fputs ("\tnop\n", asm_out_file);
	}

      xops[0] = gen_rtx_REG (Pmode, regno);
      xops[1] = gen_rtx_MEM (Pmode, stack_pointer_rtx);
      output_asm_insn ("mov%z0\t{%1, %0|%0, %1}", xops);
      output_asm_insn ("%!ret", NULL);
      final_end_function ();
      init_insn_lengths ();
      free_after_compilation (cfun);
      set_cfun (NULL);
      current_function_decl = NULL;
    }

  if (flag_split_stack)
    file_end_indicate_split_stack ();
}

/* Emit code for the SET_GOT patterns.  */

const char *
output_set_got (rtx dest, rtx label)
{
  rtx xops[3];

  xops[0] = dest;

  if (TARGET_VXWORKS_RTP && flag_pic)
    {
      /* Load (*VXWORKS_GOTT_BASE) into the PIC register.  */
      xops[2] = gen_rtx_MEM (Pmode,
			     gen_rtx_SYMBOL_REF (Pmode, VXWORKS_GOTT_BASE));
      output_asm_insn ("mov{l}\t{%2, %0|%0, %2}", xops);

      /* Load (*VXWORKS_GOTT_BASE)[VXWORKS_GOTT_INDEX] into the PIC register.
	 Use %P and a local symbol in order to print VXWORKS_GOTT_INDEX as
	 an unadorned address.  */
      xops[2] = gen_rtx_SYMBOL_REF (Pmode, VXWORKS_GOTT_INDEX);
      SYMBOL_REF_FLAGS (xops[2]) |= SYMBOL_FLAG_LOCAL;
      output_asm_insn ("mov{l}\t{%P2(%0), %0|%0, DWORD PTR %P2[%0]}", xops);
      return "";
    }

  xops[1] = gen_rtx_SYMBOL_REF (Pmode, GOT_SYMBOL_NAME);

  if (flag_pic)
    {
      char name[32];
      get_pc_thunk_name (name, REGNO (dest));
      pic_labels_used |= 1 << REGNO (dest);

      xops[2] = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
      xops[2] = gen_rtx_MEM (QImode, xops[2]);
      output_asm_insn ("%!call\t%X2", xops);

#if TARGET_MACHO
      /* Output the Mach-O "canonical" pic base label name ("Lxx$pb") here.
         This is what will be referenced by the Mach-O PIC subsystem.  */
      if (machopic_should_output_picbase_label () || !label)
	ASM_OUTPUT_LABEL (asm_out_file, MACHOPIC_FUNCTION_BASE_NAME);

      /* When we are restoring the pic base at the site of a nonlocal label,
         and we decided to emit the pic base above, we will still output a
         local label used for calculating the correction offset (even though
         the offset will be 0 in that case).  */
      if (label)
        targetm.asm_out.internal_label (asm_out_file, "L",
					   CODE_LABEL_NUMBER (label));
#endif
    }
  else
    {
      if (TARGET_MACHO)
	/* We don't need a pic base, we're not producing pic.  */
	gcc_unreachable ();

      xops[2] = gen_rtx_LABEL_REF (Pmode, label ? label : gen_label_rtx ());
      output_asm_insn ("mov%z0\t{%2, %0|%0, %2}", xops);
      targetm.asm_out.internal_label (asm_out_file, "L",
				      CODE_LABEL_NUMBER (XEXP (xops[2], 0)));
    }

  if (!TARGET_MACHO)
    output_asm_insn ("add%z0\t{%1, %0|%0, %1}", xops);

  return "";
}

/* Generate an "push" pattern for input ARG.  */

static rtx
gen_push (rtx arg)
{
  struct machine_function *m = cfun->machine;

  if (m->fs.cfa_reg == stack_pointer_rtx)
    m->fs.cfa_offset += UNITS_PER_WORD;
  m->fs.sp_offset += UNITS_PER_WORD;

  if (REG_P (arg) && GET_MODE (arg) != word_mode)
    arg = gen_rtx_REG (word_mode, REGNO (arg));

  return gen_rtx_SET (gen_rtx_MEM (word_mode,
				   gen_rtx_PRE_DEC (Pmode,
						    stack_pointer_rtx)),
		      arg);
}

/* Generate an "pop" pattern for input ARG.  */

static rtx
gen_pop (rtx arg)
{
  if (REG_P (arg) && GET_MODE (arg) != word_mode)
    arg = gen_rtx_REG (word_mode, REGNO (arg));

  return gen_rtx_SET (arg,
		      gen_rtx_MEM (word_mode,
				   gen_rtx_POST_INC (Pmode,
						     stack_pointer_rtx)));
}

/* Return >= 0 if there is an unused call-clobbered register available
   for the entire function.  */

static unsigned int
ix86_select_alt_pic_regnum (void)
{
  if (ix86_use_pseudo_pic_reg ())
    return INVALID_REGNUM;

  if (crtl->is_leaf
      && !crtl->profile
      && !ix86_current_function_calls_tls_descriptor)
    {
      int i, drap;
      /* Can't use the same register for both PIC and DRAP.  */
      if (crtl->drap_reg)
	drap = REGNO (crtl->drap_reg);
      else
	drap = -1;
      for (i = 2; i >= 0; --i)
        if (i != drap && !df_regs_ever_live_p (i))
	  return i;
    }

  return INVALID_REGNUM;
}

/* Return true if REGNO is used by the epilogue.  */

bool
ix86_epilogue_uses (int regno)
{
  /* If there are no caller-saved registers, we preserve all registers,
     except for MMX and x87 registers which aren't supported when saving
     and restoring registers.  Don't explicitly save SP register since
     it is always preserved.  */
  return (epilogue_completed
	  && cfun->machine->no_caller_saved_registers
	  && !fixed_regs[regno]
	  && !STACK_REGNO_P (regno)
	  && !MMX_REGNO_P (regno));
}

/* Return nonzero if register REGNO can be used as a scratch register
   in peephole2.  */

static bool
ix86_hard_regno_scratch_ok (unsigned int regno)
{
  /* If there are no caller-saved registers, we can't use any register
     as a scratch register after epilogue and use REGNO as scratch
     register only if it has been used before to avoid saving and
     restoring it.  */
  return (!cfun->machine->no_caller_saved_registers
	  || (!epilogue_completed
	      && df_regs_ever_live_p (regno)));
}

/* Return TRUE if we need to save REGNO.  */

static bool
ix86_save_reg (unsigned int regno, bool maybe_eh_return, bool ignore_outlined)
{
  /* If there are no caller-saved registers, we preserve all registers,
     except for MMX and x87 registers which aren't supported when saving
     and restoring registers.  Don't explicitly save SP register since
     it is always preserved.  */
  if (cfun->machine->no_caller_saved_registers)
    {
      /* Don't preserve registers used for function return value.  */
      rtx reg = crtl->return_rtx;
      if (reg)
	{
	  unsigned int i = REGNO (reg);
	  unsigned int nregs = REG_NREGS (reg);
	  while (nregs-- > 0)
	    if ((i + nregs) == regno)
	      return false;
	}

      return (df_regs_ever_live_p (regno)
	      && !fixed_regs[regno]
	      && !STACK_REGNO_P (regno)
	      && !MMX_REGNO_P (regno)
	      && (regno != HARD_FRAME_POINTER_REGNUM
		  || !frame_pointer_needed));
    }

  if (regno == REAL_PIC_OFFSET_TABLE_REGNUM
      && pic_offset_table_rtx)
    {
      if (ix86_use_pseudo_pic_reg ())
	{
	  /* REAL_PIC_OFFSET_TABLE_REGNUM used by call to
	  _mcount in prologue.  */
	  if (!TARGET_64BIT && flag_pic && crtl->profile)
	    return true;
	}
      else if (df_regs_ever_live_p (REAL_PIC_OFFSET_TABLE_REGNUM)
	       || crtl->profile
	       || crtl->calls_eh_return
	       || crtl->uses_const_pool
	       || cfun->has_nonlocal_label)
        return ix86_select_alt_pic_regnum () == INVALID_REGNUM;
    }

  if (crtl->calls_eh_return && maybe_eh_return)
    {
      unsigned i;
      for (i = 0; ; i++)
	{
	  unsigned test = EH_RETURN_DATA_REGNO (i);
	  if (test == INVALID_REGNUM)
	    break;
	  if (test == regno)
	    return true;
	}
    }

  if (ignore_outlined && cfun->machine->call_ms2sysv)
    {
      unsigned count = cfun->machine->call_ms2sysv_extra_regs
		       + xlogue_layout::MIN_REGS;
      if (xlogue_layout::is_stub_managed_reg (regno, count))
	return false;
    }

  if (crtl->drap_reg
      && regno == REGNO (crtl->drap_reg)
      && !cfun->machine->no_drap_save_restore)
    return true;

  return (df_regs_ever_live_p (regno)
	  && !call_used_regs[regno]
	  && !fixed_regs[regno]
	  && (regno != HARD_FRAME_POINTER_REGNUM || !frame_pointer_needed));
}

/* Return number of saved general prupose registers.  */

static int
ix86_nsaved_regs (void)
{
  int nregs = 0;
  int regno;

  for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
    if (GENERAL_REGNO_P (regno) && ix86_save_reg (regno, true, true))
      nregs ++;
  return nregs;
}

/* Return number of saved SSE registers.  */

static int
ix86_nsaved_sseregs (void)
{
  int nregs = 0;
  int regno;

  if (!TARGET_64BIT_MS_ABI)
    return 0;
  for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
    if (SSE_REGNO_P (regno) && ix86_save_reg (regno, true, true))
      nregs ++;
  return nregs;
}

/* Given FROM and TO register numbers, say whether this elimination is
   allowed.  If stack alignment is needed, we can only replace argument
   pointer with hard frame pointer, or replace frame pointer with stack
   pointer.  Otherwise, frame pointer elimination is automatically
   handled and all other eliminations are valid.  */

static bool
ix86_can_eliminate (const int from, const int to)
{
  if (stack_realign_fp)
    return ((from == ARG_POINTER_REGNUM
	     && to == HARD_FRAME_POINTER_REGNUM)
	    || (from == FRAME_POINTER_REGNUM
		&& to == STACK_POINTER_REGNUM));
  else
    return to == STACK_POINTER_REGNUM ? !frame_pointer_needed : true;
}

/* Return the offset between two registers, one to be eliminated, and the other
   its replacement, at the start of a routine.  */

HOST_WIDE_INT
ix86_initial_elimination_offset (int from, int to)
{
  struct ix86_frame &frame = cfun->machine->frame;

  if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
    return frame.hard_frame_pointer_offset;
  else if (from == FRAME_POINTER_REGNUM
	   && to == HARD_FRAME_POINTER_REGNUM)
    return frame.hard_frame_pointer_offset - frame.frame_pointer_offset;
  else
    {
      gcc_assert (to == STACK_POINTER_REGNUM);

      if (from == ARG_POINTER_REGNUM)
	return frame.stack_pointer_offset;

      gcc_assert (from == FRAME_POINTER_REGNUM);
      return frame.stack_pointer_offset - frame.frame_pointer_offset;
    }
}

/* In a dynamically-aligned function, we can't know the offset from
   stack pointer to frame pointer, so we must ensure that setjmp
   eliminates fp against the hard fp (%ebp) rather than trying to
   index from %esp up to the top of the frame across a gap that is
   of unknown (at compile-time) size.  */
static rtx
ix86_builtin_setjmp_frame_value (void)
{
  return stack_realign_fp ? hard_frame_pointer_rtx : virtual_stack_vars_rtx;
}

/* Emits a warning for unsupported msabi to sysv pro/epilogues.  */
static void warn_once_call_ms2sysv_xlogues (const char *feature)
{
  static bool warned_once = false;
  if (!warned_once)
    {
      warning (0, "-mcall-ms2sysv-xlogues is not compatible with %s",
	       feature);
      warned_once = true;
    }
}

/* Return the probing interval for -fstack-clash-protection.  */

static HOST_WIDE_INT
get_probe_interval (void)
{
  if (flag_stack_clash_protection)
    return (HOST_WIDE_INT_1U
	    << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL));
  else
    return (HOST_WIDE_INT_1U << STACK_CHECK_PROBE_INTERVAL_EXP);
}

/* When using -fsplit-stack, the allocation routines set a field in
   the TCB to the bottom of the stack plus this much space, measured
   in bytes.  */

#define SPLIT_STACK_AVAILABLE 256

/* Fill structure ix86_frame about frame of currently computed function.  */

static void
ix86_compute_frame_layout (void)
{
  struct ix86_frame *frame = &cfun->machine->frame;
  struct machine_function *m = cfun->machine;
  unsigned HOST_WIDE_INT stack_alignment_needed;
  HOST_WIDE_INT offset;
  unsigned HOST_WIDE_INT preferred_alignment;
  HOST_WIDE_INT size = get_frame_size ();
  HOST_WIDE_INT to_allocate;

  /* m->call_ms2sysv is initially enabled in ix86_expand_call for all 64-bit
   * ms_abi functions that call a sysv function.  We now need to prune away
   * cases where it should be disabled.  */
  if (TARGET_64BIT && m->call_ms2sysv)
    {
      gcc_assert (TARGET_64BIT_MS_ABI);
      gcc_assert (TARGET_CALL_MS2SYSV_XLOGUES);
      gcc_assert (!TARGET_SEH);
      gcc_assert (TARGET_SSE);
      gcc_assert (!ix86_using_red_zone ());

      if (crtl->calls_eh_return)
	{
	  gcc_assert (!reload_completed);
	  m->call_ms2sysv = false;
	  warn_once_call_ms2sysv_xlogues ("__builtin_eh_return");
	}

      else if (ix86_static_chain_on_stack)
	{
	  gcc_assert (!reload_completed);
	  m->call_ms2sysv = false;
	  warn_once_call_ms2sysv_xlogues ("static call chains");
	}

      /* Finally, compute which registers the stub will manage.  */
      else
	{
	  unsigned count = xlogue_layout::count_stub_managed_regs ();
	  m->call_ms2sysv_extra_regs = count - xlogue_layout::MIN_REGS;
	  m->call_ms2sysv_pad_in = 0;
	}
    }

  frame->nregs = ix86_nsaved_regs ();
  frame->nsseregs = ix86_nsaved_sseregs ();

  /* 64-bit MS ABI seem to require stack alignment to be always 16,
     except for function prologues, leaf functions and when the defult
     incoming stack boundary is overriden at command line or via
     force_align_arg_pointer attribute.  */
  if ((TARGET_64BIT_MS_ABI && crtl->preferred_stack_boundary < 128)
      && (!crtl->is_leaf || cfun->calls_alloca != 0
	  || ix86_current_function_calls_tls_descriptor
	  || ix86_incoming_stack_boundary < 128))
    {
      crtl->preferred_stack_boundary = 128;
      crtl->stack_alignment_needed = 128;
    }

  stack_alignment_needed = crtl->stack_alignment_needed / BITS_PER_UNIT;
  preferred_alignment = crtl->preferred_stack_boundary / BITS_PER_UNIT;

  gcc_assert (!size || stack_alignment_needed);
  gcc_assert (preferred_alignment >= STACK_BOUNDARY / BITS_PER_UNIT);
  gcc_assert (preferred_alignment <= stack_alignment_needed);

  /* The only ABI saving SSE regs should be 64-bit ms_abi.  */
  gcc_assert (TARGET_64BIT || !frame->nsseregs);
  if (TARGET_64BIT && m->call_ms2sysv)
    {
      gcc_assert (stack_alignment_needed >= 16);
      gcc_assert (!frame->nsseregs);
    }

  /* For SEH we have to limit the amount of code movement into the prologue.
     At present we do this via a BLOCKAGE, at which point there's very little
     scheduling that can be done, which means that there's very little point
     in doing anything except PUSHs.  */
  if (TARGET_SEH)
    m->use_fast_prologue_epilogue = false;
  else if (!optimize_bb_for_size_p (ENTRY_BLOCK_PTR_FOR_FN (cfun)))
    {
      int count = frame->nregs;
      struct cgraph_node *node = cgraph_node::get (current_function_decl);

      /* The fast prologue uses move instead of push to save registers.  This
         is significantly longer, but also executes faster as modern hardware
         can execute the moves in parallel, but can't do that for push/pop.

	 Be careful about choosing what prologue to emit:  When function takes
	 many instructions to execute we may use slow version as well as in
	 case function is known to be outside hot spot (this is known with
	 feedback only).  Weight the size of function by number of registers
	 to save as it is cheap to use one or two push instructions but very
	 slow to use many of them.  */
      if (count)
	count = (count - 1) * FAST_PROLOGUE_INSN_COUNT;
      if (node->frequency < NODE_FREQUENCY_NORMAL
	  || (flag_branch_probabilities
	      && node->frequency < NODE_FREQUENCY_HOT))
	m->use_fast_prologue_epilogue = false;
      else
	m->use_fast_prologue_epilogue
	   = !expensive_function_p (count);
    }

  frame->save_regs_using_mov
    = (TARGET_PROLOGUE_USING_MOVE && m->use_fast_prologue_epilogue
       /* If static stack checking is enabled and done with probes,
	  the registers need to be saved before allocating the frame.  */
       && flag_stack_check != STATIC_BUILTIN_STACK_CHECK);

  /* Skip return address and error code in exception handler.  */
  offset = INCOMING_FRAME_SP_OFFSET;

  /* Skip pushed static chain.  */
  if (ix86_static_chain_on_stack)
    offset += UNITS_PER_WORD;

  /* Skip saved base pointer.  */
  if (frame_pointer_needed)
    offset += UNITS_PER_WORD;
  frame->hfp_save_offset = offset;

  /* The traditional frame pointer location is at the top of the frame.  */
  frame->hard_frame_pointer_offset = offset;

  /* Register save area */
  offset += frame->nregs * UNITS_PER_WORD;
  frame->reg_save_offset = offset;

  /* On SEH target, registers are pushed just before the frame pointer
     location.  */
  if (TARGET_SEH)
    frame->hard_frame_pointer_offset = offset;

  /* Calculate the size of the va-arg area (not including padding, if any).  */
  frame->va_arg_size = ix86_varargs_gpr_size + ix86_varargs_fpr_size;

  /* Also adjust stack_realign_offset for the largest alignment of
     stack slot actually used.  */
  if (stack_realign_fp
      || (cfun->machine->max_used_stack_alignment != 0
	  && (offset % cfun->machine->max_used_stack_alignment) != 0))
    {
      /* We may need a 16-byte aligned stack for the remainder of the
	 register save area, but the stack frame for the local function
	 may require a greater alignment if using AVX/2/512.  In order
	 to avoid wasting space, we first calculate the space needed for
	 the rest of the register saves, add that to the stack pointer,
	 and then realign the stack to the boundary of the start of the
	 frame for the local function.  */
      HOST_WIDE_INT space_needed = 0;
      HOST_WIDE_INT sse_reg_space_needed = 0;

      if (TARGET_64BIT)
	{
	  if (m->call_ms2sysv)
	    {
	      m->call_ms2sysv_pad_in = 0;
	      space_needed = xlogue_layout::get_instance ().get_stack_space_used ();
	    }

	  else if (frame->nsseregs)
	    /* The only ABI that has saved SSE registers (Win64) also has a
	       16-byte aligned default stack.  However, many programs violate
	       the ABI, and Wine64 forces stack realignment to compensate.  */
	    space_needed = frame->nsseregs * 16;

	  sse_reg_space_needed = space_needed = ROUND_UP (space_needed, 16);

	  /* 64-bit frame->va_arg_size should always be a multiple of 16, but
	     rounding to be pedantic.  */
	  space_needed = ROUND_UP (space_needed + frame->va_arg_size, 16);
	}
      else
	space_needed = frame->va_arg_size;

      /* Record the allocation size required prior to the realignment AND.  */
      frame->stack_realign_allocate = space_needed;

      /* The re-aligned stack starts at frame->stack_realign_offset.  Values
	 before this point are not directly comparable with values below
	 this point.  Use sp_valid_at to determine if the stack pointer is
	 valid for a given offset, fp_valid_at for the frame pointer, or
	 choose_baseaddr to have a base register chosen for you.

	 Note that the result of (frame->stack_realign_offset
	 & (stack_alignment_needed - 1)) may not equal zero.  */
      offset = ROUND_UP (offset + space_needed, stack_alignment_needed);
      frame->stack_realign_offset = offset - space_needed;
      frame->sse_reg_save_offset = frame->stack_realign_offset
							+ sse_reg_space_needed;
    }
  else
    {
      frame->stack_realign_offset = offset;

      if (TARGET_64BIT && m->call_ms2sysv)
	{
	  m->call_ms2sysv_pad_in = !!(offset & UNITS_PER_WORD);
	  offset += xlogue_layout::get_instance ().get_stack_space_used ();
	}

      /* Align and set SSE register save area.  */
      else if (frame->nsseregs)
	{
	  /* If the incoming stack boundary is at least 16 bytes, or DRAP is
	     required and the DRAP re-alignment boundary is at least 16 bytes,
	     then we want the SSE register save area properly aligned.  */
	  if (ix86_incoming_stack_boundary >= 128
		  || (stack_realign_drap && stack_alignment_needed >= 16))
	    offset = ROUND_UP (offset, 16);
	  offset += frame->nsseregs * 16;
	}
      frame->sse_reg_save_offset = offset;
      offset += frame->va_arg_size;
    }

  /* Align start of frame for local function.  When a function call
     is removed, it may become a leaf function.  But if argument may
     be passed on stack, we need to align the stack when there is no
     tail call.  */
  if (m->call_ms2sysv
      || frame->va_arg_size != 0
      || size != 0
      || !crtl->is_leaf
      || (!crtl->tail_call_emit
	  && cfun->machine->outgoing_args_on_stack)
      || cfun->calls_alloca
      || ix86_current_function_calls_tls_descriptor)
    offset = ROUND_UP (offset, stack_alignment_needed);

  /* Frame pointer points here.  */
  frame->frame_pointer_offset = offset;

  offset += size;

  /* Add outgoing arguments area.  Can be skipped if we eliminated
     all the function calls as dead code.
     Skipping is however impossible when function calls alloca.  Alloca
     expander assumes that last crtl->outgoing_args_size
     of stack frame are unused.  */
  if (ACCUMULATE_OUTGOING_ARGS
      && (!crtl->is_leaf || cfun->calls_alloca
	  || ix86_current_function_calls_tls_descriptor))
    {
      offset += crtl->outgoing_args_size;
      frame->outgoing_arguments_size = crtl->outgoing_args_size;
    }
  else
    frame->outgoing_arguments_size = 0;

  /* Align stack boundary.  Only needed if we're calling another function
     or using alloca.  */
  if (!crtl->is_leaf || cfun->calls_alloca
      || ix86_current_function_calls_tls_descriptor)
    offset = ROUND_UP (offset, preferred_alignment);

  /* We've reached end of stack frame.  */
  frame->stack_pointer_offset = offset;

  /* Size prologue needs to allocate.  */
  to_allocate = offset - frame->sse_reg_save_offset;

  if ((!to_allocate && frame->nregs <= 1)
      || (TARGET_64BIT && to_allocate >= HOST_WIDE_INT_C (0x80000000))
      /* If stack clash probing needs a loop, then it needs a
	 scratch register.  But the returned register is only guaranteed
	 to be safe to use after register saves are complete.  So if
	 stack clash protections are enabled and the allocated frame is
	 larger than the probe interval, then use pushes to save
	 callee saved registers.  */
      || (flag_stack_clash_protection && to_allocate > get_probe_interval ()))
    frame->save_regs_using_mov = false;

  if (ix86_using_red_zone ()
      && crtl->sp_is_unchanging
      && crtl->is_leaf
      && !ix86_pc_thunk_call_expanded
      && !ix86_current_function_calls_tls_descriptor)
    {
      frame->red_zone_size = to_allocate;
      if (frame->save_regs_using_mov)
	frame->red_zone_size += frame->nregs * UNITS_PER_WORD;
      if (frame->red_zone_size > RED_ZONE_SIZE - RED_ZONE_RESERVE)
	frame->red_zone_size = RED_ZONE_SIZE - RED_ZONE_RESERVE;
    }
  else
    frame->red_zone_size = 0;
  frame->stack_pointer_offset -= frame->red_zone_size;

  /* The SEH frame pointer location is near the bottom of the frame.
     This is enforced by the fact that the difference between the
     stack pointer and the frame pointer is limited to 240 bytes in
     the unwind data structure.  */
  if (TARGET_SEH)
    {
      HOST_WIDE_INT diff;

      /* If we can leave the frame pointer where it is, do so.  Also, returns
	 the establisher frame for __builtin_frame_address (0).  */
      diff = frame->stack_pointer_offset - frame->hard_frame_pointer_offset;
      if (diff <= SEH_MAX_FRAME_SIZE
	  && (diff > 240 || (diff & 15) != 0)
	  && !crtl->accesses_prior_frames)
	{
	  /* Ideally we'd determine what portion of the local stack frame
	     (within the constraint of the lowest 240) is most heavily used.
	     But without that complication, simply bias the frame pointer
	     by 128 bytes so as to maximize the amount of the local stack
	     frame that is addressable with 8-bit offsets.  */
	  frame->hard_frame_pointer_offset = frame->stack_pointer_offset - 128;
	}
    }
}

/* This is semi-inlined memory_address_length, but simplified
   since we know that we're always dealing with reg+offset, and
   to avoid having to create and discard all that rtl.  */

static inline int
choose_baseaddr_len (unsigned int regno, HOST_WIDE_INT offset)
{
  int len = 4;

  if (offset == 0)
    {
      /* EBP and R13 cannot be encoded without an offset.  */
      len = (regno == BP_REG || regno == R13_REG);
    }
  else if (IN_RANGE (offset, -128, 127))
    len = 1;

  /* ESP and R12 must be encoded with a SIB byte.  */
  if (regno == SP_REG || regno == R12_REG)
    len++;

  return len;
}

/* Determine if the stack pointer is valid for accessing the CFA_OFFSET in
   the frame save area.  The register is saved at CFA - CFA_OFFSET.  */

static bool
sp_valid_at (HOST_WIDE_INT cfa_offset)
{
  const struct machine_frame_state &fs = cfun->machine->fs;
  if (fs.sp_realigned && cfa_offset <= fs.sp_realigned_offset)
    {
      /* Validate that the cfa_offset isn't in a "no-man's land".  */
      gcc_assert (cfa_offset <= fs.sp_realigned_fp_last);
      return false;
    }
  return fs.sp_valid;
}

/* Determine if the frame pointer is valid for accessing the CFA_OFFSET in
   the frame save area.  The register is saved at CFA - CFA_OFFSET.  */

static inline bool
fp_valid_at (HOST_WIDE_INT cfa_offset)
{
  const struct machine_frame_state &fs = cfun->machine->fs;
  if (fs.sp_realigned && cfa_offset > fs.sp_realigned_fp_last)
    {
      /* Validate that the cfa_offset isn't in a "no-man's land".  */
      gcc_assert (cfa_offset >= fs.sp_realigned_offset);
      return false;
    }
  return fs.fp_valid;
}

/* Choose a base register based upon alignment requested, speed and/or
   size.  */

static void
choose_basereg (HOST_WIDE_INT cfa_offset, rtx &base_reg,
		HOST_WIDE_INT &base_offset,
		unsigned int align_reqested, unsigned int *align)
{
  const struct machine_function *m = cfun->machine;
  unsigned int hfp_align;
  unsigned int drap_align;
  unsigned int sp_align;
  bool hfp_ok  = fp_valid_at (cfa_offset);
  bool drap_ok = m->fs.drap_valid;
  bool sp_ok   = sp_valid_at (cfa_offset);

  hfp_align = drap_align = sp_align = INCOMING_STACK_BOUNDARY;

  /* Filter out any registers that don't meet the requested alignment
     criteria.  */
  if (align_reqested)
    {
      if (m->fs.realigned)
	hfp_align = drap_align = sp_align = crtl->stack_alignment_needed;
      /* SEH unwind code does do not currently support REG_CFA_EXPRESSION
	 notes (which we would need to use a realigned stack pointer),
	 so disable on SEH targets.  */
      else if (m->fs.sp_realigned)
	sp_align = crtl->stack_alignment_needed;

      hfp_ok = hfp_ok && hfp_align >= align_reqested;
      drap_ok = drap_ok && drap_align >= align_reqested;
      sp_ok = sp_ok && sp_align >= align_reqested;
    }

  if (m->use_fast_prologue_epilogue)
    {
      /* Choose the base register most likely to allow the most scheduling
         opportunities.  Generally FP is valid throughout the function,
         while DRAP must be reloaded within the epilogue.  But choose either
         over the SP due to increased encoding size.  */

      if (hfp_ok)
	{
	  base_reg = hard_frame_pointer_rtx;
	  base_offset = m->fs.fp_offset - cfa_offset;
	}
      else if (drap_ok)
	{
	  base_reg = crtl->drap_reg;
	  base_offset = 0 - cfa_offset;
	}
      else if (sp_ok)
	{
	  base_reg = stack_pointer_rtx;
	  base_offset = m->fs.sp_offset - cfa_offset;
	}
    }
  else
    {
      HOST_WIDE_INT toffset;
      int len = 16, tlen;

      /* Choose the base register with the smallest address encoding.
         With a tie, choose FP > DRAP > SP.  */
      if (sp_ok)
	{
	  base_reg = stack_pointer_rtx;
	  base_offset = m->fs.sp_offset - cfa_offset;
          len = choose_baseaddr_len (STACK_POINTER_REGNUM, base_offset);
	}
      if (drap_ok)
	{
	  toffset = 0 - cfa_offset;
	  tlen = choose_baseaddr_len (REGNO (crtl->drap_reg), toffset);
	  if (tlen <= len)
	    {
	      base_reg = crtl->drap_reg;
	      base_offset = toffset;
	      len = tlen;
	    }
	}
      if (hfp_ok)
	{
	  toffset = m->fs.fp_offset - cfa_offset;
	  tlen = choose_baseaddr_len (HARD_FRAME_POINTER_REGNUM, toffset);
	  if (tlen <= len)
	    {
	      base_reg = hard_frame_pointer_rtx;
	      base_offset = toffset;
	      len = tlen;
	    }
	}
    }

    /* Set the align return value.  */
    if (align)
      {
	if (base_reg == stack_pointer_rtx)
	  *align = sp_align;
	else if (base_reg == crtl->drap_reg)
	  *align = drap_align;
	else if (base_reg == hard_frame_pointer_rtx)
	  *align = hfp_align;
      }
}

/* Return an RTX that points to CFA_OFFSET within the stack frame and
   the alignment of address.  If ALIGN is non-null, it should point to
   an alignment value (in bits) that is preferred or zero and will
   recieve the alignment of the base register that was selected,
   irrespective of rather or not CFA_OFFSET is a multiple of that
   alignment value.  If it is possible for the base register offset to be
   non-immediate then SCRATCH_REGNO should specify a scratch register to
   use.

   The valid base registers are taken from CFUN->MACHINE->FS.  */

static rtx
choose_baseaddr (HOST_WIDE_INT cfa_offset, unsigned int *align,
		 unsigned int scratch_regno = INVALID_REGNUM)
{
  rtx base_reg = NULL;
  HOST_WIDE_INT base_offset = 0;

  /* If a specific alignment is requested, try to get a base register
     with that alignment first.  */
  if (align && *align)
    choose_basereg (cfa_offset, base_reg, base_offset, *align, align);

  if (!base_reg)
    choose_basereg (cfa_offset, base_reg, base_offset, 0, align);

  gcc_assert (base_reg != NULL);

  rtx base_offset_rtx = GEN_INT (base_offset);

  if (!x86_64_immediate_operand (base_offset_rtx, Pmode))
    {
      gcc_assert (scratch_regno != INVALID_REGNUM);

      rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
      emit_move_insn (scratch_reg, base_offset_rtx);

      return gen_rtx_PLUS (Pmode, base_reg, scratch_reg);
    }

  return plus_constant (Pmode, base_reg, base_offset);
}

/* Emit code to save registers in the prologue.  */

static void
ix86_emit_save_regs (void)
{
  unsigned int regno;
  rtx_insn *insn;

  for (regno = FIRST_PSEUDO_REGISTER - 1; regno-- > 0; )
    if (GENERAL_REGNO_P (regno) && ix86_save_reg (regno, true, true))
      {
	insn = emit_insn (gen_push (gen_rtx_REG (word_mode, regno)));
	RTX_FRAME_RELATED_P (insn) = 1;
      }
}

/* Emit a single register save at CFA - CFA_OFFSET.  */

static void
ix86_emit_save_reg_using_mov (machine_mode mode, unsigned int regno,
			      HOST_WIDE_INT cfa_offset)
{
  struct machine_function *m = cfun->machine;
  rtx reg = gen_rtx_REG (mode, regno);
  rtx mem, addr, base, insn;
  unsigned int align = GET_MODE_ALIGNMENT (mode);

  addr = choose_baseaddr (cfa_offset, &align);
  mem = gen_frame_mem (mode, addr);

  /* The location aligment depends upon the base register.  */
  align = MIN (GET_MODE_ALIGNMENT (mode), align);
  gcc_assert (! (cfa_offset & (align / BITS_PER_UNIT - 1)));
  set_mem_align (mem, align);

  insn = emit_insn (gen_rtx_SET (mem, reg));
  RTX_FRAME_RELATED_P (insn) = 1;

  base = addr;
  if (GET_CODE (base) == PLUS)
    base = XEXP (base, 0);
  gcc_checking_assert (REG_P (base));

  /* When saving registers into a re-aligned local stack frame, avoid
     any tricky guessing by dwarf2out.  */
  if (m->fs.realigned)
    {
      gcc_checking_assert (stack_realign_drap);

      if (regno == REGNO (crtl->drap_reg))
	{
	  /* A bit of a hack.  We force the DRAP register to be saved in
	     the re-aligned stack frame, which provides us with a copy
	     of the CFA that will last past the prologue.  Install it.  */
	  gcc_checking_assert (cfun->machine->fs.fp_valid);
	  addr = plus_constant (Pmode, hard_frame_pointer_rtx,
				cfun->machine->fs.fp_offset - cfa_offset);
	  mem = gen_rtx_MEM (mode, addr);
	  add_reg_note (insn, REG_CFA_DEF_CFA, mem);
	}
      else
	{
	  /* The frame pointer is a stable reference within the
	     aligned frame.  Use it.  */
	  gcc_checking_assert (cfun->machine->fs.fp_valid);
	  addr = plus_constant (Pmode, hard_frame_pointer_rtx,
				cfun->machine->fs.fp_offset - cfa_offset);
	  mem = gen_rtx_MEM (mode, addr);
	  add_reg_note (insn, REG_CFA_EXPRESSION, gen_rtx_SET (mem, reg));
	}
    }

  else if (base == stack_pointer_rtx && m->fs.sp_realigned
	   && cfa_offset >= m->fs.sp_realigned_offset)
    {
      gcc_checking_assert (stack_realign_fp);
      add_reg_note (insn, REG_CFA_EXPRESSION, gen_rtx_SET (mem, reg));
    }

  /* The memory may not be relative to the current CFA register,
     which means that we may need to generate a new pattern for
     use by the unwind info.  */
  else if (base != m->fs.cfa_reg)
    {
      addr = plus_constant (Pmode, m->fs.cfa_reg,
			    m->fs.cfa_offset - cfa_offset);
      mem = gen_rtx_MEM (mode, addr);
      add_reg_note (insn, REG_CFA_OFFSET, gen_rtx_SET (mem, reg));
    }
}

/* Emit code to save registers using MOV insns.
   First register is stored at CFA - CFA_OFFSET.  */
static void
ix86_emit_save_regs_using_mov (HOST_WIDE_INT cfa_offset)
{
  unsigned int regno;

  for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
    if (GENERAL_REGNO_P (regno) && ix86_save_reg (regno, true, true))
      {
        ix86_emit_save_reg_using_mov (word_mode, regno, cfa_offset);
	cfa_offset -= UNITS_PER_WORD;
      }
}

/* Emit code to save SSE registers using MOV insns.
   First register is stored at CFA - CFA_OFFSET.  */
static void
ix86_emit_save_sse_regs_using_mov (HOST_WIDE_INT cfa_offset)
{
  unsigned int regno;

  for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
    if (SSE_REGNO_P (regno) && ix86_save_reg (regno, true, true))
      {
	ix86_emit_save_reg_using_mov (V4SFmode, regno, cfa_offset);
	cfa_offset -= GET_MODE_SIZE (V4SFmode);
      }
}

static GTY(()) rtx queued_cfa_restores;

/* Add a REG_CFA_RESTORE REG note to INSN or queue them until next stack
   manipulation insn.  The value is on the stack at CFA - CFA_OFFSET.
   Don't add the note if the previously saved value will be left untouched
   within stack red-zone till return, as unwinders can find the same value
   in the register and on the stack.  */

static void
ix86_add_cfa_restore_note (rtx_insn *insn, rtx reg, HOST_WIDE_INT cfa_offset)
{
  if (!crtl->shrink_wrapped
      && cfa_offset <= cfun->machine->fs.red_zone_offset)
    return;

  if (insn)
    {
      add_reg_note (insn, REG_CFA_RESTORE, reg);
      RTX_FRAME_RELATED_P (insn) = 1;
    }
  else
    queued_cfa_restores
      = alloc_reg_note (REG_CFA_RESTORE, reg, queued_cfa_restores);
}

/* Add queued REG_CFA_RESTORE notes if any to INSN.  */

static void
ix86_add_queued_cfa_restore_notes (rtx insn)
{
  rtx last;
  if (!queued_cfa_restores)
    return;
  for (last = queued_cfa_restores; XEXP (last, 1); last = XEXP (last, 1))
    ;
  XEXP (last, 1) = REG_NOTES (insn);
  REG_NOTES (insn) = queued_cfa_restores;
  queued_cfa_restores = NULL_RTX;
  RTX_FRAME_RELATED_P (insn) = 1;
}

/* Expand prologue or epilogue stack adjustment.
   The pattern exist to put a dependency on all ebp-based memory accesses.
   STYLE should be negative if instructions should be marked as frame related,
   zero if %r11 register is live and cannot be freely used and positive
   otherwise.  */

static rtx
pro_epilogue_adjust_stack (rtx dest, rtx src, rtx offset,
			   int style, bool set_cfa)
{
  struct machine_function *m = cfun->machine;
  rtx insn;
  bool add_frame_related_expr = false;

  if (Pmode == SImode)
    insn = gen_pro_epilogue_adjust_stack_si_add (dest, src, offset);
  else if (x86_64_immediate_operand (offset, DImode))
    insn = gen_pro_epilogue_adjust_stack_di_add (dest, src, offset);
  else
    {
      rtx tmp;
      /* r11 is used by indirect sibcall return as well, set before the
	 epilogue and used after the epilogue.  */
      if (style)
        tmp = gen_rtx_REG (DImode, R11_REG);
      else
	{
	  gcc_assert (src != hard_frame_pointer_rtx
		      && dest != hard_frame_pointer_rtx);
	  tmp = hard_frame_pointer_rtx;
	}
      insn = emit_insn (gen_rtx_SET (tmp, offset));
      if (style < 0)
	add_frame_related_expr = true;

      insn = gen_pro_epilogue_adjust_stack_di_add (dest, src, tmp);
    }

  insn = emit_insn (insn);
  if (style >= 0)
    ix86_add_queued_cfa_restore_notes (insn);

  if (set_cfa)
    {
      rtx r;

      gcc_assert (m->fs.cfa_reg == src);
      m->fs.cfa_offset += INTVAL (offset);
      m->fs.cfa_reg = dest;

      r = gen_rtx_PLUS (Pmode, src, offset);
      r = gen_rtx_SET (dest, r);
      add_reg_note (insn, REG_CFA_ADJUST_CFA, r);
      RTX_FRAME_RELATED_P (insn) = 1;
    }
  else if (style < 0)
    {
      RTX_FRAME_RELATED_P (insn) = 1;
      if (add_frame_related_expr)
	{
	  rtx r = gen_rtx_PLUS (Pmode, src, offset);
	  r = gen_rtx_SET (dest, r);
	  add_reg_note (insn, REG_FRAME_RELATED_EXPR, r);
	}
    }

  if (dest == stack_pointer_rtx)
    {
      HOST_WIDE_INT ooffset = m->fs.sp_offset;
      bool valid = m->fs.sp_valid;
      bool realigned = m->fs.sp_realigned;

      if (src == hard_frame_pointer_rtx)
	{
	  valid = m->fs.fp_valid;
	  realigned = false;
	  ooffset = m->fs.fp_offset;
	}
      else if (src == crtl->drap_reg)
	{
	  valid = m->fs.drap_valid;
	  realigned = false;
	  ooffset = 0;
	}
      else
	{
	  /* Else there are two possibilities: SP itself, which we set
	     up as the default above.  Or EH_RETURN_STACKADJ_RTX, which is
	     taken care of this by hand along the eh_return path.  */
	  gcc_checking_assert (src == stack_pointer_rtx
			       || offset == const0_rtx);
	}

      m->fs.sp_offset = ooffset - INTVAL (offset);
      m->fs.sp_valid = valid;
      m->fs.sp_realigned = realigned;
    }
  return insn;
}

/* Find an available register to be used as dynamic realign argument
   pointer regsiter.  Such a register will be written in prologue and
   used in begin of body, so it must not be
	1. parameter passing register.
	2. GOT pointer.
   We reuse static-chain register if it is available.  Otherwise, we
   use DI for i386 and R13 for x86-64.  We chose R13 since it has
   shorter encoding.

   Return: the regno of chosen register.  */

static unsigned int
find_drap_reg (void)
{
  tree decl = cfun->decl;

  /* Always use callee-saved register if there are no caller-saved
     registers.  */
  if (TARGET_64BIT)
    {
      /* Use R13 for nested function or function need static chain.
	 Since function with tail call may use any caller-saved
	 registers in epilogue, DRAP must not use caller-saved
	 register in such case.  */
      if (DECL_STATIC_CHAIN (decl)
	  || cfun->machine->no_caller_saved_registers
	  || crtl->tail_call_emit)
	return R13_REG;

      return R10_REG;
    }
  else
    {
      /* Use DI for nested function or function need static chain.
	 Since function with tail call may use any caller-saved
	 registers in epilogue, DRAP must not use caller-saved
	 register in such case.  */
      if (DECL_STATIC_CHAIN (decl)
	  || cfun->machine->no_caller_saved_registers
	  || crtl->tail_call_emit)
	return DI_REG;

      /* Reuse static chain register if it isn't used for parameter
         passing.  */
      if (ix86_function_regparm (TREE_TYPE (decl), decl) <= 2)
	{
	  unsigned int ccvt = ix86_get_callcvt (TREE_TYPE (decl));
	  if ((ccvt & (IX86_CALLCVT_FASTCALL | IX86_CALLCVT_THISCALL)) == 0)
	    return CX_REG;
	}
      return DI_REG;
    }
}

/* Handle a "force_align_arg_pointer" attribute.  */

static tree
ix86_handle_force_align_arg_pointer_attribute (tree *node, tree name,
					       tree, int, bool *no_add_attrs)
{
  if (TREE_CODE (*node) != FUNCTION_TYPE
      && TREE_CODE (*node) != METHOD_TYPE
      && TREE_CODE (*node) != FIELD_DECL
      && TREE_CODE (*node) != TYPE_DECL)
    {
      warning (OPT_Wattributes, "%qE attribute only applies to functions",
	       name);
      *no_add_attrs = true;
    }

  return NULL_TREE;
}

/* Return minimum incoming stack alignment.  */

static unsigned int
ix86_minimum_incoming_stack_boundary (bool sibcall)
{
  unsigned int incoming_stack_boundary;

  /* Stack of interrupt handler is aligned to 128 bits in 64bit mode.  */
  if (cfun->machine->func_type != TYPE_NORMAL)
    incoming_stack_boundary = TARGET_64BIT ? 128 : MIN_STACK_BOUNDARY;
  /* Prefer the one specified at command line. */
  else if (ix86_user_incoming_stack_boundary)
    incoming_stack_boundary = ix86_user_incoming_stack_boundary;
  /* In 32bit, use MIN_STACK_BOUNDARY for incoming stack boundary
     if -mstackrealign is used, it isn't used for sibcall check and
     estimated stack alignment is 128bit.  */
  else if (!sibcall
	   && ix86_force_align_arg_pointer
	   && crtl->stack_alignment_estimated == 128)
    incoming_stack_boundary = MIN_STACK_BOUNDARY;
  else
    incoming_stack_boundary = ix86_default_incoming_stack_boundary;

  /* Incoming stack alignment can be changed on individual functions
     via force_align_arg_pointer attribute.  We use the smallest
     incoming stack boundary.  */
  if (incoming_stack_boundary > MIN_STACK_BOUNDARY
      && lookup_attribute (ix86_force_align_arg_pointer_string,
			   TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
    incoming_stack_boundary = MIN_STACK_BOUNDARY;

  /* The incoming stack frame has to be aligned at least at
     parm_stack_boundary.  */
  if (incoming_stack_boundary < crtl->parm_stack_boundary)
    incoming_stack_boundary = crtl->parm_stack_boundary;

  /* Stack at entrance of main is aligned by runtime.  We use the
     smallest incoming stack boundary. */
  if (incoming_stack_boundary > MAIN_STACK_BOUNDARY
      && DECL_NAME (current_function_decl)
      && MAIN_NAME_P (DECL_NAME (current_function_decl))
      && DECL_FILE_SCOPE_P (current_function_decl))
    incoming_stack_boundary = MAIN_STACK_BOUNDARY;

  return incoming_stack_boundary;
}

/* Update incoming stack boundary and estimated stack alignment.  */

static void
ix86_update_stack_boundary (void)
{
  ix86_incoming_stack_boundary
    = ix86_minimum_incoming_stack_boundary (false);

  /* x86_64 vararg needs 16byte stack alignment for register save
     area.  */
  if (TARGET_64BIT
      && cfun->stdarg
      && crtl->stack_alignment_estimated < 128)
    crtl->stack_alignment_estimated = 128;

  /* __tls_get_addr needs to be called with 16-byte aligned stack.  */
  if (ix86_tls_descriptor_calls_expanded_in_cfun
      && crtl->preferred_stack_boundary < 128)
    crtl->preferred_stack_boundary = 128;
}

/* Handle the TARGET_GET_DRAP_RTX hook.  Return NULL if no DRAP is
   needed or an rtx for DRAP otherwise.  */

static rtx
ix86_get_drap_rtx (void)
{
  /* We must use DRAP if there are outgoing arguments on stack and
     ACCUMULATE_OUTGOING_ARGS is false.  */
  if (ix86_force_drap
      || (cfun->machine->outgoing_args_on_stack
	  && !ACCUMULATE_OUTGOING_ARGS))
    crtl->need_drap = true;

  if (stack_realign_drap)
    {
      /* Assign DRAP to vDRAP and returns vDRAP */
      unsigned int regno = find_drap_reg ();
      rtx drap_vreg;
      rtx arg_ptr;
      rtx_insn *seq, *insn;

      arg_ptr = gen_rtx_REG (Pmode, regno);
      crtl->drap_reg = arg_ptr;

      start_sequence ();
      drap_vreg = copy_to_reg (arg_ptr);
      seq = get_insns ();
      end_sequence ();

      insn = emit_insn_before (seq, NEXT_INSN (entry_of_function ()));
      if (!optimize)
	{
	  add_reg_note (insn, REG_CFA_SET_VDRAP, drap_vreg);
	  RTX_FRAME_RELATED_P (insn) = 1;
	}
      return drap_vreg;
    }
  else
    return NULL;
}

/* Handle the TARGET_INTERNAL_ARG_POINTER hook.  */

static rtx
ix86_internal_arg_pointer (void)
{
  return virtual_incoming_args_rtx;
}

struct scratch_reg {
  rtx reg;
  bool saved;
};

/* Return a short-lived scratch register for use on function entry.
   In 32-bit mode, it is valid only after the registers are saved
   in the prologue.  This register must be released by means of
   release_scratch_register_on_entry once it is dead.  */

static void
get_scratch_register_on_entry (struct scratch_reg *sr)
{
  int regno;

  sr->saved = false;

  if (TARGET_64BIT)
    {
      /* We always use R11 in 64-bit mode.  */
      regno = R11_REG;
    }
  else
    {
      tree decl = current_function_decl, fntype = TREE_TYPE (decl);
      bool fastcall_p
	= lookup_attribute ("fastcall", TYPE_ATTRIBUTES (fntype)) != NULL_TREE;
      bool thiscall_p
	= lookup_attribute ("thiscall", TYPE_ATTRIBUTES (fntype)) != NULL_TREE;
      bool static_chain_p = DECL_STATIC_CHAIN (decl);
      int regparm = ix86_function_regparm (fntype, decl);
      int drap_regno
	= crtl->drap_reg ? REGNO (crtl->drap_reg) : INVALID_REGNUM;

      /* 'fastcall' sets regparm to 2, uses ecx/edx for arguments and eax
	  for the static chain register.  */
      if ((regparm < 1 || (fastcall_p && !static_chain_p))
	  && drap_regno != AX_REG)
	regno = AX_REG;
      /* 'thiscall' sets regparm to 1, uses ecx for arguments and edx
	  for the static chain register.  */
      else if (thiscall_p && !static_chain_p && drap_regno != AX_REG)
        regno = AX_REG;
      else if (regparm < 2 && !thiscall_p && drap_regno != DX_REG)
	regno = DX_REG;
      /* ecx is the static chain register.  */
      else if (regparm < 3 && !fastcall_p && !thiscall_p
	       && !static_chain_p
	       && drap_regno != CX_REG)
	regno = CX_REG;
      else if (ix86_save_reg (BX_REG, true, false))
	regno = BX_REG;
      /* esi is the static chain register.  */
      else if (!(regparm == 3 && static_chain_p)
	       && ix86_save_reg (SI_REG, true, false))
	regno = SI_REG;
      else if (ix86_save_reg (DI_REG, true, false))
	regno = DI_REG;
      else
	{
	  regno = (drap_regno == AX_REG ? DX_REG : AX_REG);
	  sr->saved = true;
	}
    }

  sr->reg = gen_rtx_REG (Pmode, regno);
  if (sr->saved)
    {
      rtx_insn *insn = emit_insn (gen_push (sr->reg));
      RTX_FRAME_RELATED_P (insn) = 1;
    }
}

/* Release a scratch register obtained from the preceding function.

   If RELEASE_VIA_POP is true, we just pop the register off the stack
   to release it.  This is what non-Linux systems use with -fstack-check.

   Otherwise we use OFFSET to locate the saved register and the
   allocated stack space becomes part of the local frame and is
   deallocated by the epilogue.  */

static void
release_scratch_register_on_entry (struct scratch_reg *sr, HOST_WIDE_INT offset,
				   bool release_via_pop)
{
  if (sr->saved)
    {
      if (release_via_pop)
	{
	  struct machine_function *m = cfun->machine;
	  rtx x, insn = emit_insn (gen_pop (sr->reg));

	  /* The RX FRAME_RELATED_P mechanism doesn't know about pop.  */
	  RTX_FRAME_RELATED_P (insn) = 1;
	  x = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (UNITS_PER_WORD));
	  x = gen_rtx_SET (stack_pointer_rtx, x);
	  add_reg_note (insn, REG_FRAME_RELATED_EXPR, x);
	  m->fs.sp_offset -= UNITS_PER_WORD;
	}
      else
	{
	  rtx x = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (offset));
	  x = gen_rtx_SET (sr->reg, gen_rtx_MEM (word_mode, x));
	  emit_insn (x);
	}
    }
}

/* Emit code to adjust the stack pointer by SIZE bytes while probing it.

   This differs from the next routine in that it tries hard to prevent
   attacks that jump the stack guard.  Thus it is never allowed to allocate
   more than PROBE_INTERVAL bytes of stack space without a suitable
   probe.

   INT_REGISTERS_SAVED is true if integer registers have already been
   pushed on the stack.  */

static void
ix86_adjust_stack_and_probe_stack_clash (HOST_WIDE_INT size,
					 const bool int_registers_saved)
{
  struct machine_function *m = cfun->machine;

  /* If this function does not statically allocate stack space, then
     no probes are needed.  */
  if (!size)
    {
      /* However, the allocation of space via pushes for register
	 saves could be viewed as allocating space, but without the
	 need to probe.  */
      if (m->frame.nregs || m->frame.nsseregs || frame_pointer_needed)
        dump_stack_clash_frame_info (NO_PROBE_SMALL_FRAME, true);
      else
	dump_stack_clash_frame_info (NO_PROBE_NO_FRAME, false);
      return;
    }

  /* If we are a noreturn function, then we have to consider the
     possibility that we're called via a jump rather than a call.

     Thus we don't have the implicit probe generated by saving the
     return address into the stack at the call.  Thus, the stack
     pointer could be anywhere in the guard page.  The safe thing
     to do is emit a probe now.

     The probe can be avoided if we have already emitted any callee
     register saves into the stack or have a frame pointer (which will
     have been saved as well).  Those saves will function as implicit
     probes.

     ?!? This should be revamped to work like aarch64 and s390 where
     we track the offset from the most recent probe.  Normally that
     offset would be zero.  For a noreturn function we would reset
     it to PROBE_INTERVAL - (STACK_BOUNDARY / BITS_PER_UNIT).   Then
     we just probe when we cross PROBE_INTERVAL.  */
  if (TREE_THIS_VOLATILE (cfun->decl)
      && !(m->frame.nregs || m->frame.nsseregs || frame_pointer_needed))
    {
      /* We can safely use any register here since we're just going to push
	 its value and immediately pop it back.  But we do try and avoid
	 argument passing registers so as not to introduce dependencies in
	 the pipeline.  For 32 bit we use %esi and for 64 bit we use %rax.  */
      rtx dummy_reg = gen_rtx_REG (word_mode, TARGET_64BIT ? AX_REG : SI_REG);
      rtx_insn *insn_push = emit_insn (gen_push (dummy_reg));
      rtx_insn *insn_pop = emit_insn (gen_pop (dummy_reg));
      m->fs.sp_offset -= UNITS_PER_WORD;
      if (m->fs.cfa_reg == stack_pointer_rtx)
	{
	  m->fs.cfa_offset -= UNITS_PER_WORD;
	  rtx x = plus_constant (Pmode, stack_pointer_rtx, -UNITS_PER_WORD);
	  x = gen_rtx_SET (stack_pointer_rtx, x);
	  add_reg_note (insn_push, REG_CFA_ADJUST_CFA, x);
	  RTX_FRAME_RELATED_P (insn_push) = 1;
	  x = plus_constant (Pmode, stack_pointer_rtx, UNITS_PER_WORD);
	  x = gen_rtx_SET (stack_pointer_rtx, x);
	  add_reg_note (insn_pop, REG_CFA_ADJUST_CFA, x);
	  RTX_FRAME_RELATED_P (insn_pop) = 1;
	}
      emit_insn (gen_blockage ());
    }

  /* If we allocate less than the size of the guard statically,
     then no probing is necessary, but we do need to allocate
     the stack.  */
  if (size < (1 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE)))
    {
      pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
			         GEN_INT (-size), -1,
			         m->fs.cfa_reg == stack_pointer_rtx);
      dump_stack_clash_frame_info (NO_PROBE_SMALL_FRAME, true);
      return;
    }

  /* We're allocating a large enough stack frame that we need to
     emit probes.  Either emit them inline or in a loop depending
     on the size.  */
  HOST_WIDE_INT probe_interval = get_probe_interval ();
  if (size <= 4 * probe_interval)
    {
      HOST_WIDE_INT i;
      for (i = probe_interval; i <= size; i += probe_interval)
	{
	  /* Allocate PROBE_INTERVAL bytes.  */
	  rtx insn
	    = pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
					 GEN_INT (-probe_interval), -1,
					 m->fs.cfa_reg == stack_pointer_rtx);
	  add_reg_note (insn, REG_STACK_CHECK, const0_rtx);

	  /* And probe at *sp.  */
	  emit_stack_probe (stack_pointer_rtx);
	  emit_insn (gen_blockage ());
	}

      /* We need to allocate space for the residual, but we do not need
	 to probe the residual.  */
      HOST_WIDE_INT residual = (i - probe_interval - size);
      if (residual)
	pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
				   GEN_INT (residual), -1,
				   m->fs.cfa_reg == stack_pointer_rtx);
      dump_stack_clash_frame_info (PROBE_INLINE, residual != 0);
    }
  else
    {
      /* We expect the GP registers to be saved when probes are used
	 as the probing sequences might need a scratch register and
	 the routine to allocate one assumes the integer registers
	 have already been saved.  */
      gcc_assert (int_registers_saved);

      struct scratch_reg sr;
      get_scratch_register_on_entry (&sr);

      /* If we needed to save a register, then account for any space
	 that was pushed (we are not going to pop the register when
	 we do the restore).  */
      if (sr.saved)
	size -= UNITS_PER_WORD;

      /* Step 1: round SIZE down to a multiple of the interval.  */
      HOST_WIDE_INT rounded_size = size & -probe_interval;

      /* Step 2: compute final value of the loop counter.  Use lea if
	 possible.  */
      rtx addr = plus_constant (Pmode, stack_pointer_rtx, -rounded_size);
      rtx insn;
      if (address_no_seg_operand (addr, Pmode))
	insn = emit_insn (gen_rtx_SET (sr.reg, addr));
      else
	{
	  emit_move_insn (sr.reg, GEN_INT (-rounded_size));
	  insn = emit_insn (gen_rtx_SET (sr.reg,
					 gen_rtx_PLUS (Pmode, sr.reg,
						       stack_pointer_rtx)));
	}
      if (m->fs.cfa_reg == stack_pointer_rtx)
	{
	  add_reg_note (insn, REG_CFA_DEF_CFA,
			plus_constant (Pmode, sr.reg,
				       m->fs.cfa_offset + rounded_size));
	  RTX_FRAME_RELATED_P (insn) = 1;
	}

      /* Step 3: the loop.  */
      rtx size_rtx = GEN_INT (rounded_size);
      insn = emit_insn (ix86_gen_adjust_stack_and_probe (sr.reg, sr.reg,
							 size_rtx));
      if (m->fs.cfa_reg == stack_pointer_rtx)
	{
	  m->fs.cfa_offset += rounded_size;
	  add_reg_note (insn, REG_CFA_DEF_CFA,
			plus_constant (Pmode, stack_pointer_rtx,
				       m->fs.cfa_offset));
	  RTX_FRAME_RELATED_P (insn) = 1;
	}
      m->fs.sp_offset += rounded_size;
      emit_insn (gen_blockage ());

      /* Step 4: adjust SP if we cannot assert at compile-time that SIZE
	 is equal to ROUNDED_SIZE.  */

      if (size != rounded_size)
	pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
				   GEN_INT (rounded_size - size), -1,
				   m->fs.cfa_reg == stack_pointer_rtx);
      dump_stack_clash_frame_info (PROBE_LOOP, size != rounded_size);

      /* This does not deallocate the space reserved for the scratch
	 register.  That will be deallocated in the epilogue.  */
      release_scratch_register_on_entry (&sr, size, false);
    }

  /* Make sure nothing is scheduled before we are done.  */
  emit_insn (gen_blockage ());
}

/* Emit code to adjust the stack pointer by SIZE bytes while probing it.

   INT_REGISTERS_SAVED is true if integer registers have already been
   pushed on the stack.  */

static void
ix86_adjust_stack_and_probe (HOST_WIDE_INT size,
			     const bool int_registers_saved)
{
  /* We skip the probe for the first interval + a small dope of 4 words and
     probe that many bytes past the specified size to maintain a protection
     area at the botton of the stack.  */
  const int dope = 4 * UNITS_PER_WORD;
  rtx size_rtx = GEN_INT (size), last;

  /* See if we have a constant small number of probes to generate.  If so,
     that's the easy case.  The run-time loop is made up of 9 insns in the
     generic case while the compile-time loop is made up of 3+2*(n-1) insns
     for n # of intervals.  */
  if (size <= 4 * get_probe_interval ())
    {
      HOST_WIDE_INT i, adjust;
      bool first_probe = true;

      /* Adjust SP and probe at PROBE_INTERVAL + N * PROBE_INTERVAL for
	 values of N from 1 until it exceeds SIZE.  If only one probe is
	 needed, this will not generate any code.  Then adjust and probe
	 to PROBE_INTERVAL + SIZE.  */
      for (i = get_probe_interval (); i < size; i += get_probe_interval ())
	{
	  if (first_probe)
	    {
	      adjust = 2 * get_probe_interval () + dope;
	      first_probe = false;
	    }
	  else
	    adjust = get_probe_interval ();

	  emit_insn (gen_rtx_SET (stack_pointer_rtx,
				  plus_constant (Pmode, stack_pointer_rtx,
						 -adjust)));
	  emit_stack_probe (stack_pointer_rtx);
	}

      if (first_probe)
	adjust = size + get_probe_interval () + dope;
      else
        adjust = size + get_probe_interval () - i;

      emit_insn (gen_rtx_SET (stack_pointer_rtx,
			      plus_constant (Pmode, stack_pointer_rtx,
					     -adjust)));
      emit_stack_probe (stack_pointer_rtx);

      /* Adjust back to account for the additional first interval.  */
      last = emit_insn (gen_rtx_SET (stack_pointer_rtx,
				     plus_constant (Pmode, stack_pointer_rtx,
						    (get_probe_interval ()
						     + dope))));
    }

  /* Otherwise, do the same as above, but in a loop.  Note that we must be
     extra careful with variables wrapping around because we might be at
     the very top (or the very bottom) of the address space and we have
     to be able to handle this case properly; in particular, we use an
     equality test for the loop condition.  */
  else
    {
      /* We expect the GP registers to be saved when probes are used
	 as the probing sequences might need a scratch register and
	 the routine to allocate one assumes the integer registers
	 have already been saved.  */
      gcc_assert (int_registers_saved);

      HOST_WIDE_INT rounded_size;
      struct scratch_reg sr;

      get_scratch_register_on_entry (&sr);

      /* If we needed to save a register, then account for any space
	 that was pushed (we are not going to pop the register when
	 we do the restore).  */
      if (sr.saved)
	size -= UNITS_PER_WORD;

      /* Step 1: round SIZE to the previous multiple of the interval.  */

      rounded_size = ROUND_DOWN (size, get_probe_interval ());


      /* Step 2: compute initial and final value of the loop counter.  */

      /* SP = SP_0 + PROBE_INTERVAL.  */
      emit_insn (gen_rtx_SET (stack_pointer_rtx,
			      plus_constant (Pmode, stack_pointer_rtx,
					     - (get_probe_interval () + dope))));

      /* LAST_ADDR = SP_0 + PROBE_INTERVAL + ROUNDED_SIZE.  */
      if (rounded_size <= (HOST_WIDE_INT_1 << 31))
	emit_insn (gen_rtx_SET (sr.reg,
				plus_constant (Pmode, stack_pointer_rtx,
					       -rounded_size)));
      else
	{
	  emit_move_insn (sr.reg, GEN_INT (-rounded_size));
	  emit_insn (gen_rtx_SET (sr.reg,
				  gen_rtx_PLUS (Pmode, sr.reg,
						stack_pointer_rtx)));
	}


      /* Step 3: the loop

	 do
	   {
	     SP = SP + PROBE_INTERVAL
	     probe at SP
	   }
	 while (SP != LAST_ADDR)

	 adjusts SP and probes to PROBE_INTERVAL + N * PROBE_INTERVAL for
	 values of N from 1 until it is equal to ROUNDED_SIZE.  */

      emit_insn (ix86_gen_adjust_stack_and_probe (sr.reg, sr.reg, size_rtx));


      /* Step 4: adjust SP and probe at PROBE_INTERVAL + SIZE if we cannot
	 assert at compile-time that SIZE is equal to ROUNDED_SIZE.  */

      if (size != rounded_size)
	{
	  emit_insn (gen_rtx_SET (stack_pointer_rtx,
			          plus_constant (Pmode, stack_pointer_rtx,
						 rounded_size - size)));
	  emit_stack_probe (stack_pointer_rtx);
	}

      /* Adjust back to account for the additional first interval.  */
      last = emit_insn (gen_rtx_SET (stack_pointer_rtx,
				     plus_constant (Pmode, stack_pointer_rtx,
						    (get_probe_interval ()
						     + dope))));

      /* This does not deallocate the space reserved for the scratch
	 register.  That will be deallocated in the epilogue.  */
      release_scratch_register_on_entry (&sr, size, false);
    }

  /* Even if the stack pointer isn't the CFA register, we need to correctly
     describe the adjustments made to it, in particular differentiate the
     frame-related ones from the frame-unrelated ones.  */
  if (size > 0)
    {
      rtx expr = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (2));
      XVECEXP (expr, 0, 0)
	= gen_rtx_SET (stack_pointer_rtx,
		       plus_constant (Pmode, stack_pointer_rtx, -size));
      XVECEXP (expr, 0, 1)
	= gen_rtx_SET (stack_pointer_rtx,
		       plus_constant (Pmode, stack_pointer_rtx,
				      get_probe_interval () + dope + size));
      add_reg_note (last, REG_FRAME_RELATED_EXPR, expr);
      RTX_FRAME_RELATED_P (last) = 1;

      cfun->machine->fs.sp_offset += size;
    }

  /* Make sure nothing is scheduled before we are done.  */
  emit_insn (gen_blockage ());
}

/* Adjust the stack pointer up to REG while probing it.  */

const char *
output_adjust_stack_and_probe (rtx reg)
{
  static int labelno = 0;
  char loop_lab[32];
  rtx xops[2];

  ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno++);

  /* Loop.  */
  ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);

  /* SP = SP + PROBE_INTERVAL.  */
  xops[0] = stack_pointer_rtx;
  xops[1] = GEN_INT (get_probe_interval ());
  output_asm_insn ("sub%z0\t{%1, %0|%0, %1}", xops);

  /* Probe at SP.  */
  xops[1] = const0_rtx;
  output_asm_insn ("or%z0\t{%1, (%0)|DWORD PTR [%0], %1}", xops);

  /* Test if SP == LAST_ADDR.  */
  xops[0] = stack_pointer_rtx;
  xops[1] = reg;
  output_asm_insn ("cmp%z0\t{%1, %0|%0, %1}", xops);

  /* Branch.  */
  fputs ("\tjne\t", asm_out_file);
  assemble_name_raw (asm_out_file, loop_lab);
  fputc ('\n', asm_out_file);

  return "";
}

/* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
   inclusive.  These are offsets from the current stack pointer.

   INT_REGISTERS_SAVED is true if integer registers have already been
   pushed on the stack.  */

static void
ix86_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size,
			     const bool int_registers_saved)
{
  /* See if we have a constant small number of probes to generate.  If so,
     that's the easy case.  The run-time loop is made up of 6 insns in the
     generic case while the compile-time loop is made up of n insns for n #
     of intervals.  */
  if (size <= 6 * get_probe_interval ())
    {
      HOST_WIDE_INT i;

      /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
	 it exceeds SIZE.  If only one probe is needed, this will not
	 generate any code.  Then probe at FIRST + SIZE.  */
      for (i = get_probe_interval (); i < size; i += get_probe_interval ())
	emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
					 -(first + i)));

      emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
				       -(first + size)));
    }

  /* Otherwise, do the same as above, but in a loop.  Note that we must be
     extra careful with variables wrapping around because we might be at
     the very top (or the very bottom) of the address space and we have
     to be able to handle this case properly; in particular, we use an
     equality test for the loop condition.  */
  else
    {
      /* We expect the GP registers to be saved when probes are used
	 as the probing sequences might need a scratch register and
	 the routine to allocate one assumes the integer registers
	 have already been saved.  */
      gcc_assert (int_registers_saved);

      HOST_WIDE_INT rounded_size, last;
      struct scratch_reg sr;

      get_scratch_register_on_entry (&sr);


      /* Step 1: round SIZE to the previous multiple of the interval.  */

      rounded_size = ROUND_DOWN (size, get_probe_interval ());


      /* Step 2: compute initial and final value of the loop counter.  */

      /* TEST_OFFSET = FIRST.  */
      emit_move_insn (sr.reg, GEN_INT (-first));

      /* LAST_OFFSET = FIRST + ROUNDED_SIZE.  */
      last = first + rounded_size;


      /* Step 3: the loop

	 do
	   {
	     TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
	     probe at TEST_ADDR
	   }
	 while (TEST_ADDR != LAST_ADDR)

         probes at FIRST + N * PROBE_INTERVAL for values of N from 1
         until it is equal to ROUNDED_SIZE.  */

      emit_insn (ix86_gen_probe_stack_range (sr.reg, sr.reg, GEN_INT (-last)));


      /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
	 that SIZE is equal to ROUNDED_SIZE.  */

      if (size != rounded_size)
	emit_stack_probe (plus_constant (Pmode,
					 gen_rtx_PLUS (Pmode,
						       stack_pointer_rtx,
						       sr.reg),
					 rounded_size - size));

      release_scratch_register_on_entry (&sr, size, true);
    }

  /* Make sure nothing is scheduled before we are done.  */
  emit_insn (gen_blockage ());
}

/* Probe a range of stack addresses from REG to END, inclusive.  These are
   offsets from the current stack pointer.  */

const char *
output_probe_stack_range (rtx reg, rtx end)
{
  static int labelno = 0;
  char loop_lab[32];
  rtx xops[3];

  ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno++);

  /* Loop.  */
  ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);

  /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL.  */
  xops[0] = reg;
  xops[1] = GEN_INT (get_probe_interval ());
  output_asm_insn ("sub%z0\t{%1, %0|%0, %1}", xops);

  /* Probe at TEST_ADDR.  */
  xops[0] = stack_pointer_rtx;
  xops[1] = reg;
  xops[2] = const0_rtx;
  output_asm_insn ("or%z0\t{%2, (%0,%1)|DWORD PTR [%0+%1], %2}", xops);

  /* Test if TEST_ADDR == LAST_ADDR.  */
  xops[0] = reg;
  xops[1] = end;
  output_asm_insn ("cmp%z0\t{%1, %0|%0, %1}", xops);

  /* Branch.  */
  fputs ("\tjne\t", asm_out_file);
  assemble_name_raw (asm_out_file, loop_lab);
  fputc ('\n', asm_out_file);

  return "";
}

/* Return true if stack frame is required.  Update STACK_ALIGNMENT
   to the largest alignment, in bits, of stack slot used if stack
   frame is required and CHECK_STACK_SLOT is true.  */

static bool
ix86_find_max_used_stack_alignment (unsigned int &stack_alignment,
				    bool check_stack_slot)
{
  HARD_REG_SET set_up_by_prologue, prologue_used;
  basic_block bb;

  CLEAR_HARD_REG_SET (prologue_used);
  CLEAR_HARD_REG_SET (set_up_by_prologue);
  add_to_hard_reg_set (&set_up_by_prologue, Pmode, STACK_POINTER_REGNUM);
  add_to_hard_reg_set (&set_up_by_prologue, Pmode, ARG_POINTER_REGNUM);
  add_to_hard_reg_set (&set_up_by_prologue, Pmode,
		       HARD_FRAME_POINTER_REGNUM);

  /* The preferred stack alignment is the minimum stack alignment.  */
  if (stack_alignment > crtl->preferred_stack_boundary)
    stack_alignment = crtl->preferred_stack_boundary;

  bool require_stack_frame = false;

  FOR_EACH_BB_FN (bb, cfun)
    {
      rtx_insn *insn;
      FOR_BB_INSNS (bb, insn)
	if (NONDEBUG_INSN_P (insn)
	    && requires_stack_frame_p (insn, prologue_used,
				       set_up_by_prologue))
	  {
	    require_stack_frame = true;

	    if (check_stack_slot)
	      {
		/* Find the maximum stack alignment.  */
		subrtx_iterator::array_type array;
		FOR_EACH_SUBRTX (iter, array, PATTERN (insn), ALL)
		  if (MEM_P (*iter)
		      && (reg_mentioned_p (stack_pointer_rtx,
					   *iter)
			  || reg_mentioned_p (frame_pointer_rtx,
					      *iter)))
		    {
		      unsigned int alignment = MEM_ALIGN (*iter);
		      if (alignment > stack_alignment)
			stack_alignment = alignment;
		    }
	      }
	  }
    }

  return require_stack_frame;
}

/* Finalize stack_realign_needed and frame_pointer_needed flags, which
   will guide prologue/epilogue to be generated in correct form.  */

static void
ix86_finalize_stack_frame_flags (void)
{
  /* Check if stack realign is really needed after reload, and
     stores result in cfun */
  unsigned int incoming_stack_boundary
    = (crtl->parm_stack_boundary > ix86_incoming_stack_boundary
       ? crtl->parm_stack_boundary : ix86_incoming_stack_boundary);
  unsigned int stack_alignment
    = (crtl->is_leaf && !ix86_current_function_calls_tls_descriptor
       ? crtl->max_used_stack_slot_alignment
       : crtl->stack_alignment_needed);
  unsigned int stack_realign
    = (incoming_stack_boundary < stack_alignment);
  bool recompute_frame_layout_p = false;

  if (crtl->stack_realign_finalized)
    {
      /* After stack_realign_needed is finalized, we can't no longer
	 change it.  */
      gcc_assert (crtl->stack_realign_needed == stack_realign);
      return;
    }

  /* If the only reason for frame_pointer_needed is that we conservatively
     assumed stack realignment might be needed or -fno-omit-frame-pointer
     is used, but in the end nothing that needed the stack alignment had
     been spilled nor stack access, clear frame_pointer_needed and say we
     don't need stack realignment.  */
  if ((stack_realign || (!flag_omit_frame_pointer && optimize))
      && frame_pointer_needed
      && crtl->is_leaf
      && crtl->sp_is_unchanging
      && !ix86_current_function_calls_tls_descriptor
      && !crtl->accesses_prior_frames
      && !cfun->calls_alloca
      && !crtl->calls_eh_return
      /* See ira_setup_eliminable_regset for the rationale.  */
      && !(STACK_CHECK_MOVING_SP
	   && flag_stack_check
	   && flag_exceptions
	   && cfun->can_throw_non_call_exceptions)
      && !ix86_frame_pointer_required ()
      && get_frame_size () == 0
      && ix86_nsaved_sseregs () == 0
      && ix86_varargs_gpr_size + ix86_varargs_fpr_size == 0)
    {
      if (ix86_find_max_used_stack_alignment (stack_alignment,
					      stack_realign))
	{
	  /* Stack frame is required.  If stack alignment needed is less
	     than incoming stack boundary, don't realign stack.  */
	  stack_realign = incoming_stack_boundary < stack_alignment;
	  if (!stack_realign)
	    {
	      crtl->max_used_stack_slot_alignment
		= incoming_stack_boundary;
	      crtl->stack_alignment_needed
		= incoming_stack_boundary;
	      /* Also update preferred_stack_boundary for leaf
	         functions.  */
	      crtl->preferred_stack_boundary
		= incoming_stack_boundary;
	    }
	}
      else
	{
	  /* If drap has been set, but it actually isn't live at the
	     start of the function, there is no reason to set it up.  */
	  if (crtl->drap_reg)
	    {
	      basic_block bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb;
	      if (! REGNO_REG_SET_P (DF_LR_IN (bb),
				     REGNO (crtl->drap_reg)))
		{
		  crtl->drap_reg = NULL_RTX;
		  crtl->need_drap = false;
		}
	    }
	  else
	    cfun->machine->no_drap_save_restore = true;

	  frame_pointer_needed = false;
	  stack_realign = false;
	  crtl->max_used_stack_slot_alignment = incoming_stack_boundary;
	  crtl->stack_alignment_needed = incoming_stack_boundary;
	  crtl->stack_alignment_estimated = incoming_stack_boundary;
	  if (crtl->preferred_stack_boundary > incoming_stack_boundary)
	    crtl->preferred_stack_boundary = incoming_stack_boundary;
	  df_finish_pass (true);
	  df_scan_alloc (NULL);
	  df_scan_blocks ();
	  df_compute_regs_ever_live (true);
	  df_analyze ();

	  if (flag_var_tracking)
	    {
	      /* Since frame pointer is no longer available, replace it with
		 stack pointer - UNITS_PER_WORD in debug insns.  */
	      df_ref ref, next;
	      for (ref = DF_REG_USE_CHAIN (HARD_FRAME_POINTER_REGNUM);
		   ref; ref = next)
		{
		  next = DF_REF_NEXT_REG (ref);
		  if (!DF_REF_INSN_INFO (ref))
		    continue;

		  /* Make sure the next ref is for a different instruction,
		     so that we're not affected by the rescan.  */
		  rtx_insn *insn = DF_REF_INSN (ref);
		  while (next && DF_REF_INSN (next) == insn)
		    next = DF_REF_NEXT_REG (next);

		  if (DEBUG_INSN_P (insn))
		    {
		      bool changed = false;
		      for (; ref != next; ref = DF_REF_NEXT_REG (ref))
			{
			  rtx *loc = DF_REF_LOC (ref);
			  if (*loc == hard_frame_pointer_rtx)
			    {
			      *loc = plus_constant (Pmode,
						    stack_pointer_rtx,
						    -UNITS_PER_WORD);
			      changed = true;
			    }
			}
		      if (changed)
			df_insn_rescan (insn);
		    }
		}
	    }

	  recompute_frame_layout_p = true;
	}
    }
  else if (crtl->max_used_stack_slot_alignment >= 128)
    {
      /* We don't need to realign stack.  max_used_stack_alignment is
	 used to decide how stack frame should be aligned.  This is
	 independent of any psABIs nor 32-bit vs 64-bit.  It is always
	 safe to compute max_used_stack_alignment.  We compute it only
	 if 128-bit aligned load/store may be generated on misaligned
	 stack slot which will lead to segfault.   */
      if (ix86_find_max_used_stack_alignment (stack_alignment, true))
	cfun->machine->max_used_stack_alignment
	  = stack_alignment / BITS_PER_UNIT;
    }

  if (crtl->stack_realign_needed != stack_realign)
    recompute_frame_layout_p = true;
  crtl->stack_realign_needed = stack_realign;
  crtl->stack_realign_finalized = true;
  if (recompute_frame_layout_p)
    ix86_compute_frame_layout ();
}

/* Delete SET_GOT right after entry block if it is allocated to reg.  */

static void
ix86_elim_entry_set_got (rtx reg)
{
  basic_block bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb;
  rtx_insn *c_insn = BB_HEAD (bb);
  if (!NONDEBUG_INSN_P (c_insn))
    c_insn = next_nonnote_nondebug_insn (c_insn);
  if (c_insn && NONJUMP_INSN_P (c_insn))
    {
      rtx pat = PATTERN (c_insn);
      if (GET_CODE (pat) == PARALLEL)
	{
	  rtx vec = XVECEXP (pat, 0, 0);
	  if (GET_CODE (vec) == SET
	      && XINT (XEXP (vec, 1), 1) == UNSPEC_SET_GOT
	      && REGNO (XEXP (vec, 0)) == REGNO (reg))
	    delete_insn (c_insn);
	}
    }
}

static rtx
gen_frame_set (rtx reg, rtx frame_reg, int offset, bool store)
{
  rtx addr, mem;

  if (offset)
    addr = gen_rtx_PLUS (Pmode, frame_reg, GEN_INT (offset));
  mem = gen_frame_mem (GET_MODE (reg), offset ? addr : frame_reg);
  return gen_rtx_SET (store ? mem : reg, store ? reg : mem);
}

static inline rtx
gen_frame_load (rtx reg, rtx frame_reg, int offset)
{
  return gen_frame_set (reg, frame_reg, offset, false);
}

static inline rtx
gen_frame_store (rtx reg, rtx frame_reg, int offset)
{
  return gen_frame_set (reg, frame_reg, offset, true);
}

static void
ix86_emit_outlined_ms2sysv_save (const struct ix86_frame &frame)
{
  struct machine_function *m = cfun->machine;
  const unsigned ncregs = NUM_X86_64_MS_CLOBBERED_REGS
			  + m->call_ms2sysv_extra_regs;
  rtvec v = rtvec_alloc (ncregs + 1);
  unsigned int align, i, vi = 0;
  rtx_insn *insn;
  rtx sym, addr;
  rtx rax = gen_rtx_REG (word_mode, AX_REG);
  const struct xlogue_layout &xlogue = xlogue_layout::get_instance ();

  /* AL should only be live with sysv_abi.  */
  gcc_assert (!ix86_eax_live_at_start_p ());
  gcc_assert (m->fs.sp_offset >= frame.sse_reg_save_offset);

  /* Setup RAX as the stub's base pointer.  We use stack_realign_offset rather
     we've actually realigned the stack or not.  */
  align = GET_MODE_ALIGNMENT (V4SFmode);
  addr = choose_baseaddr (frame.stack_realign_offset
			  + xlogue.get_stub_ptr_offset (), &align, AX_REG);
  gcc_assert (align >= GET_MODE_ALIGNMENT (V4SFmode));

  emit_insn (gen_rtx_SET (rax, addr));

  /* Get the stub symbol.  */
  sym = xlogue.get_stub_rtx (frame_pointer_needed ? XLOGUE_STUB_SAVE_HFP
						  : XLOGUE_STUB_SAVE);
  RTVEC_ELT (v, vi++) = gen_rtx_USE (VOIDmode, sym);

  for (i = 0; i < ncregs; ++i)
    {
      const xlogue_layout::reginfo &r = xlogue.get_reginfo (i);
      rtx reg = gen_rtx_REG ((SSE_REGNO_P (r.regno) ? V4SFmode : word_mode),
			     r.regno);
      RTVEC_ELT (v, vi++) = gen_frame_store (reg, rax, -r.offset);
    }

  gcc_assert (vi == (unsigned)GET_NUM_ELEM (v));

  insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, v));
  RTX_FRAME_RELATED_P (insn) = true;
}

/* Expand the prologue into a bunch of separate insns.  */

void
ix86_expand_prologue (void)
{
  struct machine_function *m = cfun->machine;
  rtx insn, t;
  HOST_WIDE_INT allocate;
  bool int_registers_saved;
  bool sse_registers_saved;
  bool save_stub_call_needed;
  rtx static_chain = NULL_RTX;

  if (ix86_function_naked (current_function_decl))
    return;

  ix86_finalize_stack_frame_flags ();

  /* DRAP should not coexist with stack_realign_fp */
  gcc_assert (!(crtl->drap_reg && stack_realign_fp));

  memset (&m->fs, 0, sizeof (m->fs));

  /* Initialize CFA state for before the prologue.  */
  m->fs.cfa_reg = stack_pointer_rtx;
  m->fs.cfa_offset = INCOMING_FRAME_SP_OFFSET;

  /* Track SP offset to the CFA.  We continue tracking this after we've
     swapped the CFA register away from SP.  In the case of re-alignment
     this is fudged; we're interested to offsets within the local frame.  */
  m->fs.sp_offset = INCOMING_FRAME_SP_OFFSET;
  m->fs.sp_valid = true;
  m->fs.sp_realigned = false;

  const struct ix86_frame &frame = cfun->machine->frame;

  if (!TARGET_64BIT && ix86_function_ms_hook_prologue (current_function_decl))
    {
      /* We should have already generated an error for any use of
         ms_hook on a nested function.  */
      gcc_checking_assert (!ix86_static_chain_on_stack);

      /* Check if profiling is active and we shall use profiling before
         prologue variant. If so sorry.  */
      if (crtl->profile && flag_fentry != 0)
        sorry ("ms_hook_prologue attribute isn%'t compatible "
	       "with -mfentry for 32-bit");

      /* In ix86_asm_output_function_label we emitted:
	 8b ff     movl.s %edi,%edi
	 55        push   %ebp
	 8b ec     movl.s %esp,%ebp

	 This matches the hookable function prologue in Win32 API
	 functions in Microsoft Windows XP Service Pack 2 and newer.
	 Wine uses this to enable Windows apps to hook the Win32 API
	 functions provided by Wine.

	 What that means is that we've already set up the frame pointer.  */

      if (frame_pointer_needed
	  && !(crtl->drap_reg && crtl->stack_realign_needed))
	{
	  rtx push, mov;

	  /* We've decided to use the frame pointer already set up.
	     Describe this to the unwinder by pretending that both
	     push and mov insns happen right here.

	     Putting the unwind info here at the end of the ms_hook
	     is done so that we can make absolutely certain we get
	     the required byte sequence at the start of the function,
	     rather than relying on an assembler that can produce
	     the exact encoding required.

	     However it does mean (in the unpatched case) that we have
	     a 1 insn window where the asynchronous unwind info is
	     incorrect.  However, if we placed the unwind info at
	     its correct location we would have incorrect unwind info
	     in the patched case.  Which is probably all moot since
	     I don't expect Wine generates dwarf2 unwind info for the
	     system libraries that use this feature.  */

	  insn = emit_insn (gen_blockage ());

	  push = gen_push (hard_frame_pointer_rtx);
	  mov = gen_rtx_SET (hard_frame_pointer_rtx,
			     stack_pointer_rtx);
	  RTX_FRAME_RELATED_P (push) = 1;
	  RTX_FRAME_RELATED_P (mov) = 1;

	  RTX_FRAME_RELATED_P (insn) = 1;
	  add_reg_note (insn, REG_FRAME_RELATED_EXPR,
			gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, push, mov)));

	  /* Note that gen_push incremented m->fs.cfa_offset, even
	     though we didn't emit the push insn here.  */
	  m->fs.cfa_reg = hard_frame_pointer_rtx;
	  m->fs.fp_offset = m->fs.cfa_offset;
	  m->fs.fp_valid = true;
	}
      else
	{
	  /* The frame pointer is not needed so pop %ebp again.
	     This leaves us with a pristine state.  */
	  emit_insn (gen_pop (hard_frame_pointer_rtx));
	}
    }

  /* The first insn of a function that accepts its static chain on the
     stack is to push the register that would be filled in by a direct
     call.  This insn will be skipped by the trampoline.  */
  else if (ix86_static_chain_on_stack)
    {
      static_chain = ix86_static_chain (cfun->decl, false);
      insn = emit_insn (gen_push (static_chain));
      emit_insn (gen_blockage ());

      /* We don't want to interpret this push insn as a register save,
	 only as a stack adjustment.  The real copy of the register as
	 a save will be done later, if needed.  */
      t = plus_constant (Pmode, stack_pointer_rtx, -UNITS_PER_WORD);
      t = gen_rtx_SET (stack_pointer_rtx, t);
      add_reg_note (insn, REG_CFA_ADJUST_CFA, t);
      RTX_FRAME_RELATED_P (insn) = 1;
    }

  /* Emit prologue code to adjust stack alignment and setup DRAP, in case
     of DRAP is needed and stack realignment is really needed after reload */
  if (stack_realign_drap)
    {
      int align_bytes = crtl->stack_alignment_needed / BITS_PER_UNIT;

      /* Can't use DRAP in interrupt function.  */
      if (cfun->machine->func_type != TYPE_NORMAL)
	sorry ("Dynamic Realign Argument Pointer (DRAP) not supported "
	       "in interrupt service routine.  This may be worked "
	       "around by avoiding functions with aggregate return.");

      /* Only need to push parameter pointer reg if it is caller saved.  */
      if (!call_used_regs[REGNO (crtl->drap_reg)])
	{
	  /* Push arg pointer reg */
	  insn = emit_insn (gen_push (crtl->drap_reg));
	  RTX_FRAME_RELATED_P (insn) = 1;
	}

      /* Grab the argument pointer.  */
      t = plus_constant (Pmode, stack_pointer_rtx, m->fs.sp_offset);
      insn = emit_insn (gen_rtx_SET (crtl->drap_reg, t));
      RTX_FRAME_RELATED_P (insn) = 1;
      m->fs.cfa_reg = crtl->drap_reg;
      m->fs.cfa_offset = 0;

      /* Align the stack.  */
      insn = emit_insn (ix86_gen_andsp (stack_pointer_rtx,
					stack_pointer_rtx,
					GEN_INT (-align_bytes)));
      RTX_FRAME_RELATED_P (insn) = 1;

      /* Replicate the return address on the stack so that return
	 address can be reached via (argp - 1) slot.  This is needed
	 to implement macro RETURN_ADDR_RTX and intrinsic function
	 expand_builtin_return_addr etc.  */
      t = plus_constant (Pmode, crtl->drap_reg, -UNITS_PER_WORD);
      t = gen_frame_mem (word_mode, t);
      insn = emit_insn (gen_push (t));
      RTX_FRAME_RELATED_P (insn) = 1;

      /* For the purposes of frame and register save area addressing,
	 we've started over with a new frame.  */
      m->fs.sp_offset = INCOMING_FRAME_SP_OFFSET;
      m->fs.realigned = true;

      if (static_chain)
	{
	  /* Replicate static chain on the stack so that static chain
	     can be reached via (argp - 2) slot.  This is needed for
	     nested function with stack realignment.  */
	  insn = emit_insn (gen_push (static_chain));
	  RTX_FRAME_RELATED_P (insn) = 1;
	}
    }

  int_registers_saved = (frame.nregs == 0);
  sse_registers_saved = (frame.nsseregs == 0);
  save_stub_call_needed = (m->call_ms2sysv);
  gcc_assert (sse_registers_saved || !save_stub_call_needed);

  if (frame_pointer_needed && !m->fs.fp_valid)
    {
      /* Note: AT&T enter does NOT have reversed args.  Enter is probably
         slower on all targets.  Also sdb didn't like it.  */
      insn = emit_insn (gen_push (hard_frame_pointer_rtx));
      RTX_FRAME_RELATED_P (insn) = 1;

      /* Push registers now, before setting the frame pointer
	 on SEH target.  */
      if (!int_registers_saved
	  && TARGET_SEH
	  && !frame.save_regs_using_mov)
	{
	  ix86_emit_save_regs ();
	  int_registers_saved = true;
	  gcc_assert (m->fs.sp_offset == frame.reg_save_offset);
	}

      if (m->fs.sp_offset == frame.hard_frame_pointer_offset)
	{
	  insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
	  RTX_FRAME_RELATED_P (insn) = 1;

	  if (m->fs.cfa_reg == stack_pointer_rtx)
	    m->fs.cfa_reg = hard_frame_pointer_rtx;
	  m->fs.fp_offset = m->fs.sp_offset;
	  m->fs.fp_valid = true;
	}
    }

  if (!int_registers_saved)
    {
      /* If saving registers via PUSH, do so now.  */
      if (!frame.save_regs_using_mov)
	{
	  ix86_emit_save_regs ();
	  int_registers_saved = true;
	  gcc_assert (m->fs.sp_offset == frame.reg_save_offset);
	}

      /* When using red zone we may start register saving before allocating
	 the stack frame saving one cycle of the prologue.  However, avoid
	 doing this if we have to probe the stack; at least on x86_64 the
	 stack probe can turn into a call that clobbers a red zone location. */
      else if (ix86_using_red_zone ()
	       && (! TARGET_STACK_PROBE
		   || frame.stack_pointer_offset < CHECK_STACK_LIMIT))
	{
	  ix86_emit_save_regs_using_mov (frame.reg_save_offset);
	  int_registers_saved = true;
	}
    }

  if (stack_realign_fp)
    {
      int align_bytes = crtl->stack_alignment_needed / BITS_PER_UNIT;
      gcc_assert (align_bytes > MIN_STACK_BOUNDARY / BITS_PER_UNIT);

      /* Record last valid frame pointer offset.  */
      m->fs.sp_realigned_fp_last = frame.reg_save_offset;

      /* The computation of the size of the re-aligned stack frame means
	 that we must allocate the size of the register save area before
	 performing the actual alignment.  Otherwise we cannot guarantee
	 that there's enough storage above the realignment point.  */
      allocate = frame.reg_save_offset - m->fs.sp_offset
		 + frame.stack_realign_allocate;
      if (allocate)
        pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
				   GEN_INT (-allocate), -1, false);

      /* Align the stack.  */
      insn = emit_insn (ix86_gen_andsp (stack_pointer_rtx,
					stack_pointer_rtx,
					GEN_INT (-align_bytes)));
      m->fs.sp_offset = ROUND_UP (m->fs.sp_offset, align_bytes);
      m->fs.sp_realigned_offset = m->fs.sp_offset
					      - frame.stack_realign_allocate;
      /* The stack pointer may no longer be equal to CFA - m->fs.sp_offset.
	 Beyond this point, stack access should be done via choose_baseaddr or
	 by using sp_valid_at and fp_valid_at to determine the correct base
	 register.  Henceforth, any CFA offset should be thought of as logical
	 and not physical.  */
      gcc_assert (m->fs.sp_realigned_offset >= m->fs.sp_realigned_fp_last);
      gcc_assert (m->fs.sp_realigned_offset == frame.stack_realign_offset);
      m->fs.sp_realigned = true;

      /* SEH unwind emit doesn't currently support REG_CFA_EXPRESSION, which
	 is needed to describe where a register is saved using a realigned
	 stack pointer, so we need to invalidate the stack pointer for that
	 target.  */
      if (TARGET_SEH)
	m->fs.sp_valid = false;

      /* If SP offset is non-immediate after allocation of the stack frame,
	 then emit SSE saves or stub call prior to allocating the rest of the
	 stack frame.  This is less efficient for the out-of-line stub because
	 we can't combine allocations across the call barrier, but it's better
	 than using a scratch register.  */
      else if (!x86_64_immediate_operand (GEN_INT (frame.stack_pointer_offset
						   - m->fs.sp_realigned_offset),
					  Pmode))
	{
	  if (!sse_registers_saved)
	    {
	      ix86_emit_save_sse_regs_using_mov (frame.sse_reg_save_offset);
	      sse_registers_saved = true;
	    }
	  else if (save_stub_call_needed)
	    {
	      ix86_emit_outlined_ms2sysv_save (frame);
	      save_stub_call_needed = false;
	    }
	}
    }

  allocate = frame.stack_pointer_offset - m->fs.sp_offset;

  if (flag_stack_usage_info)
    {
      /* We start to count from ARG_POINTER.  */
      HOST_WIDE_INT stack_size = frame.stack_pointer_offset;

      /* If it was realigned, take into account the fake frame.  */
      if (stack_realign_drap)
	{
	  if (ix86_static_chain_on_stack)
	    stack_size += UNITS_PER_WORD;

	  if (!call_used_regs[REGNO (crtl->drap_reg)])
	    stack_size += UNITS_PER_WORD;

	  /* This over-estimates by 1 minimal-stack-alignment-unit but
	     mitigates that by counting in the new return address slot.  */
	  current_function_dynamic_stack_size
	    += crtl->stack_alignment_needed / BITS_PER_UNIT;
	}

      current_function_static_stack_size = stack_size;
    }

  /* On SEH target with very large frame size, allocate an area to save
     SSE registers (as the very large allocation won't be described).  */
  if (TARGET_SEH
      && frame.stack_pointer_offset > SEH_MAX_FRAME_SIZE
      && !sse_registers_saved)
    {
      HOST_WIDE_INT sse_size =
	frame.sse_reg_save_offset - frame.reg_save_offset;

      gcc_assert (int_registers_saved);

      /* No need to do stack checking as the area will be immediately
	 written.  */
      pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
			         GEN_INT (-sse_size), -1,
				 m->fs.cfa_reg == stack_pointer_rtx);
      allocate -= sse_size;
      ix86_emit_save_sse_regs_using_mov (frame.sse_reg_save_offset);
      sse_registers_saved = true;
    }

  /* The stack has already been decremented by the instruction calling us
     so probe if the size is non-negative to preserve the protection area.  */
  if (allocate >= 0
      && (flag_stack_check == STATIC_BUILTIN_STACK_CHECK
	  || flag_stack_clash_protection))
    {
      if (flag_stack_clash_protection)
	{
	  ix86_adjust_stack_and_probe_stack_clash (allocate,
						   int_registers_saved);
	  allocate = 0;
	}
      else if (STACK_CHECK_MOVING_SP)
	{
	  if (!(crtl->is_leaf && !cfun->calls_alloca
		&& allocate <= get_probe_interval ()))
	    {
	      ix86_adjust_stack_and_probe (allocate, int_registers_saved);
	      allocate = 0;
	    }
	}
      else
	{
	  HOST_WIDE_INT size = allocate;

	  if (TARGET_64BIT && size >= HOST_WIDE_INT_C (0x80000000))
	    size = 0x80000000 - get_stack_check_protect () - 1;

	  if (TARGET_STACK_PROBE)
	    {
	      if (crtl->is_leaf && !cfun->calls_alloca)
		{
		  if (size > get_probe_interval ())
		    ix86_emit_probe_stack_range (0, size, int_registers_saved);
		}
	      else
		ix86_emit_probe_stack_range (0,
					     size + get_stack_check_protect (),
					     int_registers_saved);
	    }
	  else
	    {
	      if (crtl->is_leaf && !cfun->calls_alloca)
		{
		  if (size > get_probe_interval ()
		      && size > get_stack_check_protect ())
		    ix86_emit_probe_stack_range (get_stack_check_protect (),
						 (size
						  - get_stack_check_protect ()),
						 int_registers_saved);
		}
	      else
		ix86_emit_probe_stack_range (get_stack_check_protect (), size,
					     int_registers_saved);
	    }
	}
    }

  if (allocate == 0)
    ;
  else if (!ix86_target_stack_probe ()
	   || frame.stack_pointer_offset < CHECK_STACK_LIMIT)
    {
      pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
			         GEN_INT (-allocate), -1,
			         m->fs.cfa_reg == stack_pointer_rtx);
    }
  else
    {
      rtx eax = gen_rtx_REG (Pmode, AX_REG);
      rtx r10 = NULL;
      rtx (*adjust_stack_insn)(rtx, rtx, rtx);
      const bool sp_is_cfa_reg = (m->fs.cfa_reg == stack_pointer_rtx);
      bool eax_live = ix86_eax_live_at_start_p ();
      bool r10_live = false;

      if (TARGET_64BIT)
        r10_live = (DECL_STATIC_CHAIN (current_function_decl) != 0);

      if (eax_live)
	{
	  insn = emit_insn (gen_push (eax));
	  allocate -= UNITS_PER_WORD;
	  /* Note that SEH directives need to continue tracking the stack
	     pointer even after the frame pointer has been set up.  */
	  if (sp_is_cfa_reg || TARGET_SEH)
	    {
	      if (sp_is_cfa_reg)
		m->fs.cfa_offset += UNITS_PER_WORD;
	      RTX_FRAME_RELATED_P (insn) = 1;
	      add_reg_note (insn, REG_FRAME_RELATED_EXPR,
			    gen_rtx_SET (stack_pointer_rtx,
					 plus_constant (Pmode, stack_pointer_rtx,
							-UNITS_PER_WORD)));
	    }
	}

      if (r10_live)
	{
	  r10 = gen_rtx_REG (Pmode, R10_REG);
	  insn = emit_insn (gen_push (r10));
	  allocate -= UNITS_PER_WORD;
	  if (sp_is_cfa_reg || TARGET_SEH)
	    {
	      if (sp_is_cfa_reg)
		m->fs.cfa_offset += UNITS_PER_WORD;
	      RTX_FRAME_RELATED_P (insn) = 1;
	      add_reg_note (insn, REG_FRAME_RELATED_EXPR,
			    gen_rtx_SET (stack_pointer_rtx,
					 plus_constant (Pmode, stack_pointer_rtx,
							-UNITS_PER_WORD)));
	    }
	}

      emit_move_insn (eax, GEN_INT (allocate));
      emit_insn (ix86_gen_allocate_stack_worker (eax, eax));

      /* Use the fact that AX still contains ALLOCATE.  */
      adjust_stack_insn = (Pmode == DImode
			   ? gen_pro_epilogue_adjust_stack_di_sub
			   : gen_pro_epilogue_adjust_stack_si_sub);

      insn = emit_insn (adjust_stack_insn (stack_pointer_rtx,
					   stack_pointer_rtx, eax));

      if (sp_is_cfa_reg || TARGET_SEH)
	{
	  if (sp_is_cfa_reg)
	    m->fs.cfa_offset += allocate;
	  RTX_FRAME_RELATED_P (insn) = 1;
	  add_reg_note (insn, REG_FRAME_RELATED_EXPR,
			gen_rtx_SET (stack_pointer_rtx,
				     plus_constant (Pmode, stack_pointer_rtx,
						    -allocate)));
	}
      m->fs.sp_offset += allocate;

      /* Use stack_pointer_rtx for relative addressing so that code
	 works for realigned stack, too.  */
      if (r10_live && eax_live)
        {
	  t = gen_rtx_PLUS (Pmode, stack_pointer_rtx, eax);
	  emit_move_insn (gen_rtx_REG (word_mode, R10_REG),
			  gen_frame_mem (word_mode, t));
	  t = plus_constant (Pmode, t, UNITS_PER_WORD);
	  emit_move_insn (gen_rtx_REG (word_mode, AX_REG),
			  gen_frame_mem (word_mode, t));
	}
      else if (eax_live || r10_live)
	{
	  t = gen_rtx_PLUS (Pmode, stack_pointer_rtx, eax);
	  emit_move_insn (gen_rtx_REG (word_mode,
				       (eax_live ? AX_REG : R10_REG)),
			  gen_frame_mem (word_mode, t));
	}
    }
  gcc_assert (m->fs.sp_offset == frame.stack_pointer_offset);

  /* If we havn't already set up the frame pointer, do so now.  */
  if (frame_pointer_needed && !m->fs.fp_valid)
    {
      insn = ix86_gen_add3 (hard_frame_pointer_rtx, stack_pointer_rtx,
			    GEN_INT (frame.stack_pointer_offset
				     - frame.hard_frame_pointer_offset));
      insn = emit_insn (insn);
      RTX_FRAME_RELATED_P (insn) = 1;
      add_reg_note (insn, REG_CFA_ADJUST_CFA, NULL);

      if (m->fs.cfa_reg == stack_pointer_rtx)
	m->fs.cfa_reg = hard_frame_pointer_rtx;
      m->fs.fp_offset = frame.hard_frame_pointer_offset;
      m->fs.fp_valid = true;
    }

  if (!int_registers_saved)
    ix86_emit_save_regs_using_mov (frame.reg_save_offset);
  if (!sse_registers_saved)
    ix86_emit_save_sse_regs_using_mov (frame.sse_reg_save_offset);
  else if (save_stub_call_needed)
    ix86_emit_outlined_ms2sysv_save (frame);

  /* For the mcount profiling on 32 bit PIC mode we need to emit SET_GOT
     in PROLOGUE.  */
  if (!TARGET_64BIT && pic_offset_table_rtx && crtl->profile && !flag_fentry)
    {
      rtx pic = gen_rtx_REG (Pmode, REAL_PIC_OFFSET_TABLE_REGNUM);
      insn = emit_insn (gen_set_got (pic));
      RTX_FRAME_RELATED_P (insn) = 1;
      add_reg_note (insn, REG_CFA_FLUSH_QUEUE, NULL_RTX);
      emit_insn (gen_prologue_use (pic));
      /* Deleting already emmitted SET_GOT if exist and allocated to
	 REAL_PIC_OFFSET_TABLE_REGNUM.  */
      ix86_elim_entry_set_got (pic);
    }

  if (crtl->drap_reg && !crtl->stack_realign_needed)
    {
      /* vDRAP is setup but after reload it turns out stack realign
         isn't necessary, here we will emit prologue to setup DRAP
         without stack realign adjustment */
      t = choose_baseaddr (0, NULL);
      emit_insn (gen_rtx_SET (crtl->drap_reg, t));
    }

  /* Prevent instructions from being scheduled into register save push
     sequence when access to the redzone area is done through frame pointer.
     The offset between the frame pointer and the stack pointer is calculated
     relative to the value of the stack pointer at the end of the function
     prologue, and moving instructions that access redzone area via frame
     pointer inside push sequence violates this assumption.  */
  if (frame_pointer_needed && frame.red_zone_size)
    emit_insn (gen_memory_blockage ());

  /* SEH requires that the prologue end within 256 bytes of the start of
     the function.  Prevent instruction sc