Mercurial > hg > CbC > CbC_gcc
diff gcc/config/arm/arm_neon.h @ 145:1830386684a0
gcc-9.2.0
author | anatofuz |
---|---|
date | Thu, 13 Feb 2020 11:34:05 +0900 |
parents | 84e7813d76e9 |
children |
line wrap: on
line diff
--- a/gcc/config/arm/arm_neon.h Thu Oct 25 07:37:49 2018 +0900 +++ b/gcc/config/arm/arm_neon.h Thu Feb 13 11:34:05 2020 +0900 @@ -1,6 +1,6 @@ /* ARM NEON intrinsics include file. - Copyright (C) 2006-2018 Free Software Foundation, Inc. + Copyright (C) 2006-2020 Free Software Foundation, Inc. Contributed by CodeSourcery. This file is part of GCC. @@ -39,6 +39,7 @@ #endif #include <arm_fp16.h> +#include <arm_bf16.h> #include <stdint.h> typedef __simd64_int8_t int8x8_t; @@ -83,6 +84,9 @@ typedef float float32_t; +typedef __simd128_bfloat16_t bfloat16x8_t; +typedef __simd64_bfloat16_t bfloat16x4_t; + /* The Poly types are user visible and live in their own world, keep them that way. */ typedef __builtin_neon_poly8 poly8_t; @@ -16938,37 +16942,32 @@ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vsha1h_u32 (uint32_t __hash_e) { - uint32x4_t __t = vdupq_n_u32 (0); - __t = vsetq_lane_u32 (__hash_e, __t, 0); - __t = __builtin_arm_crypto_sha1h (__t); - return vgetq_lane_u32 (__t, 0); + return vgetq_lane_u32 (__builtin_arm_crypto_sha1h (vdupq_n_u32 (__hash_e)), + 0); } __extension__ extern __inline uint32x4_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vsha1cq_u32 (uint32x4_t __hash_abcd, uint32_t __hash_e, uint32x4_t __wk) { - uint32x4_t __t = vdupq_n_u32 (0); - __t = vsetq_lane_u32 (__hash_e, __t, 0); - return __builtin_arm_crypto_sha1c (__hash_abcd, __t, __wk); + return __builtin_arm_crypto_sha1c (__hash_abcd, vdupq_n_u32 (__hash_e), + __wk); } __extension__ extern __inline uint32x4_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vsha1pq_u32 (uint32x4_t __hash_abcd, uint32_t __hash_e, uint32x4_t __wk) { - uint32x4_t __t = vdupq_n_u32 (0); - __t = vsetq_lane_u32 (__hash_e, __t, 0); - return __builtin_arm_crypto_sha1p (__hash_abcd, __t, __wk); + return __builtin_arm_crypto_sha1p (__hash_abcd, vdupq_n_u32 (__hash_e), + __wk); } __extension__ extern __inline uint32x4_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vsha1mq_u32 (uint32x4_t __hash_abcd, uint32_t __hash_e, uint32x4_t __wk) { - uint32x4_t __t = vdupq_n_u32 (0); - __t = vsetq_lane_u32 (__hash_e, __t, 0); - return __builtin_arm_crypto_sha1m (__hash_abcd, __t, __wk); + return __builtin_arm_crypto_sha1m (__hash_abcd, vdupq_n_u32 (__hash_e), + __wk); } __extension__ extern __inline uint32x4_t @@ -18106,63 +18105,63 @@ __extension__ extern __inline float32x2_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) -vfmlal_low_u32 (float32x2_t __r, float16x4_t __a, float16x4_t __b) +vfmlal_low_f16 (float32x2_t __r, float16x4_t __a, float16x4_t __b) { return __builtin_neon_vfmal_lowv2sf (__r, __a, __b); } __extension__ extern __inline float32x2_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) -vfmlsl_low_u32 (float32x2_t __r, float16x4_t __a, float16x4_t __b) +vfmlsl_low_f16 (float32x2_t __r, float16x4_t __a, float16x4_t __b) { return __builtin_neon_vfmsl_lowv2sf (__r, __a, __b); } __extension__ extern __inline float32x2_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) -vfmlal_high_u32 (float32x2_t __r, float16x4_t __a, float16x4_t __b) +vfmlal_high_f16 (float32x2_t __r, float16x4_t __a, float16x4_t __b) { return __builtin_neon_vfmal_highv2sf (__r, __a, __b); } __extension__ extern __inline float32x2_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) -vfmlsl_high_u32 (float32x2_t __r, float16x4_t __a, float16x4_t __b) +vfmlsl_high_f16 (float32x2_t __r, float16x4_t __a, float16x4_t __b) { return __builtin_neon_vfmsl_highv2sf (__r, __a, __b); } __extension__ extern __inline float32x4_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) -vfmlalq_low_u32 (float32x4_t __r, float16x8_t __a, float16x8_t __b) +vfmlalq_low_f16 (float32x4_t __r, float16x8_t __a, float16x8_t __b) { return __builtin_neon_vfmal_lowv4sf (__r, __a, __b); } __extension__ extern __inline float32x4_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) -vfmlslq_low_u32 (float32x4_t __r, float16x8_t __a, float16x8_t __b) +vfmlslq_low_f16 (float32x4_t __r, float16x8_t __a, float16x8_t __b) { return __builtin_neon_vfmsl_lowv4sf (__r, __a, __b); } __extension__ extern __inline float32x4_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) -vfmlalq_high_u32 (float32x4_t __r, float16x8_t __a, float16x8_t __b) +vfmlalq_high_f16 (float32x4_t __r, float16x8_t __a, float16x8_t __b) { return __builtin_neon_vfmal_highv4sf (__r, __a, __b); } __extension__ extern __inline float32x4_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) -vfmlslq_high_u32 (float32x4_t __r, float16x8_t __a, float16x8_t __b) +vfmlslq_high_f16 (float32x4_t __r, float16x8_t __a, float16x8_t __b) { return __builtin_neon_vfmsl_highv4sf (__r, __a, __b); } __extension__ extern __inline float32x2_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) -vfmlal_lane_low_u32 (float32x2_t __r, float16x4_t __a, float16x4_t __b, +vfmlal_lane_low_f16 (float32x2_t __r, float16x4_t __a, float16x4_t __b, const int __index) { __builtin_arm_lane_check (4, __index); @@ -18171,7 +18170,7 @@ __extension__ extern __inline float32x2_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) -vfmlal_lane_high_u32 (float32x2_t __r, float16x4_t __a, float16x4_t __b, +vfmlal_lane_high_f16 (float32x2_t __r, float16x4_t __a, float16x4_t __b, const int __index) { __builtin_arm_lane_check (4, __index); @@ -18180,7 +18179,7 @@ __extension__ extern __inline float32x4_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) -vfmlalq_laneq_low_u32 (float32x4_t __r, float16x8_t __a, float16x8_t __b, +vfmlalq_laneq_low_f16 (float32x4_t __r, float16x8_t __a, float16x8_t __b, const int __index) { __builtin_arm_lane_check (8, __index); @@ -18189,7 +18188,7 @@ __extension__ extern __inline float32x4_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) -vfmlalq_lane_low_u32 (float32x4_t __r, float16x8_t __a, float16x4_t __b, +vfmlalq_lane_low_f16 (float32x4_t __r, float16x8_t __a, float16x4_t __b, const int __index) { __builtin_arm_lane_check (4, __index); @@ -18198,7 +18197,7 @@ __extension__ extern __inline float32x2_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) -vfmlal_laneq_low_u32 (float32x2_t __r, float16x4_t __a, float16x8_t __b, +vfmlal_laneq_low_f16 (float32x2_t __r, float16x4_t __a, float16x8_t __b, const int __index) { __builtin_arm_lane_check (8, __index); @@ -18207,7 +18206,7 @@ __extension__ extern __inline float32x4_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) -vfmlalq_laneq_high_u32 (float32x4_t __r, float16x8_t __a, float16x8_t __b, +vfmlalq_laneq_high_f16 (float32x4_t __r, float16x8_t __a, float16x8_t __b, const int __index) { __builtin_arm_lane_check (8, __index); @@ -18216,7 +18215,7 @@ __extension__ extern __inline float32x4_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) -vfmlalq_lane_high_u32 (float32x4_t __r, float16x8_t __a, float16x4_t __b, +vfmlalq_lane_high_f16 (float32x4_t __r, float16x8_t __a, float16x4_t __b, const int __index) { __builtin_arm_lane_check (4, __index); @@ -18225,7 +18224,7 @@ __extension__ extern __inline float32x2_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) -vfmlal_laneq_high_u32 (float32x2_t __r, float16x4_t __a, float16x8_t __b, +vfmlal_laneq_high_f16 (float32x2_t __r, float16x4_t __a, float16x8_t __b, const int __index) { __builtin_arm_lane_check (8, __index); @@ -18234,7 +18233,7 @@ __extension__ extern __inline float32x2_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) -vfmlsl_lane_low_u32 (float32x2_t __r, float16x4_t __a, float16x4_t __b, +vfmlsl_lane_low_f16 (float32x2_t __r, float16x4_t __a, float16x4_t __b, const int __index) { __builtin_arm_lane_check (4, __index); @@ -18243,7 +18242,7 @@ __extension__ extern __inline float32x2_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) -vfmlsl_lane_high_u32 (float32x2_t __r, float16x4_t __a, float16x4_t __b, +vfmlsl_lane_high_f16 (float32x2_t __r, float16x4_t __a, float16x4_t __b, const int __index) { __builtin_arm_lane_check (4, __index); @@ -18252,7 +18251,7 @@ __extension__ extern __inline float32x4_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) -vfmlslq_laneq_low_u32 (float32x4_t __r, float16x8_t __a, float16x8_t __b, +vfmlslq_laneq_low_f16 (float32x4_t __r, float16x8_t __a, float16x8_t __b, const int __index) { __builtin_arm_lane_check (8, __index); @@ -18261,7 +18260,7 @@ __extension__ extern __inline float32x4_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) -vfmlslq_lane_low_u32 (float32x4_t __r, float16x8_t __a, float16x4_t __b, +vfmlslq_lane_low_f16 (float32x4_t __r, float16x8_t __a, float16x4_t __b, const int __index) { __builtin_arm_lane_check (4, __index); @@ -18270,7 +18269,7 @@ __extension__ extern __inline float32x2_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) -vfmlsl_laneq_low_u32 (float32x2_t __r, float16x4_t __a, float16x8_t __b, +vfmlsl_laneq_low_f16 (float32x2_t __r, float16x4_t __a, float16x8_t __b, const int __index) { __builtin_arm_lane_check (8, __index); @@ -18279,7 +18278,7 @@ __extension__ extern __inline float32x4_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) -vfmlslq_laneq_high_u32 (float32x4_t __r, float16x8_t __a, float16x8_t __b, +vfmlslq_laneq_high_f16 (float32x4_t __r, float16x8_t __a, float16x8_t __b, const int __index) { __builtin_arm_lane_check (8, __index); @@ -18288,7 +18287,7 @@ __extension__ extern __inline float32x4_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) -vfmlslq_lane_high_u32 (float32x4_t __r, float16x8_t __a, float16x4_t __b, +vfmlslq_lane_high_f16 (float32x4_t __r, float16x8_t __a, float16x4_t __b, const int __index) { __builtin_arm_lane_check (4, __index); @@ -18297,7 +18296,7 @@ __extension__ extern __inline float32x2_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) -vfmlsl_laneq_high_u32 (float32x2_t __r, float16x4_t __a, float16x8_t __b, +vfmlsl_laneq_high_f16 (float32x2_t __r, float16x4_t __a, float16x8_t __b, const int __index) { __builtin_arm_lane_check (8, __index); @@ -18307,6 +18306,491 @@ #pragma GCC pop_options #endif +/* AdvSIMD Complex numbers intrinsics. */ +#if __ARM_ARCH >= 8 +#pragma GCC push_options +#pragma GCC target ("arch=armv8.3-a") + + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +#pragma GCC push_options +#pragma GCC target ("+fp16") +__extension__ extern __inline float16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcadd_rot90_f16 (float16x4_t __a, float16x4_t __b) +{ + return __builtin_neon_vcadd90v4hf (__a, __b); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcaddq_rot90_f16 (float16x8_t __a, float16x8_t __b) +{ + return __builtin_neon_vcadd90v8hf (__a, __b); +} + +__extension__ extern __inline float16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcadd_rot270_f16 (float16x4_t __a, float16x4_t __b) +{ + return __builtin_neon_vcadd90v4hf (__a, __b); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcaddq_rot270_f16 (float16x8_t __a, float16x8_t __b) +{ + return __builtin_neon_vcadd90v8hf (__a, __b); +} + +__extension__ extern __inline float16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcmla_f16 (float16x4_t __r, float16x4_t __a, float16x4_t __b) +{ + return __builtin_neon_vcmla0v4hf (__r, __a, __b); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcmlaq_f16 (float16x8_t __r, float16x8_t __a, float16x8_t __b) +{ + return __builtin_neon_vcmla0v8hf (__r, __a, __b); +} + +__extension__ extern __inline float16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcmla_lane_f16 (float16x4_t __r, float16x4_t __a, float16x4_t __b, + const int __index) +{ + return __builtin_neon_vcmla_lane0v4hf (__r, __a, __b, __index); +} + +__extension__ extern __inline float16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcmla_laneq_f16 (float16x4_t __r, float16x4_t __a, float16x8_t __b, + const int __index) +{ + return __builtin_neon_vcmla_laneq0v4hf (__r, __a, __b, __index); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcmlaq_lane_f16 (float16x8_t __r, float16x8_t __a, float16x4_t __b, + const int __index) +{ + return __builtin_neon_vcmlaq_lane0v8hf (__r, __a, __b, __index); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcmlaq_laneq_f16 (float16x8_t __r, float16x8_t __a, float16x8_t __b, + const int __index) +{ + return __builtin_neon_vcmla_lane0v8hf (__r, __a, __b, __index); +} + +__extension__ extern __inline float16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcmla_rot90_f16 (float16x4_t __r, float16x4_t __a, float16x4_t __b) +{ + return __builtin_neon_vcmla90v4hf (__r, __a, __b); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcmlaq_rot90_f16 (float16x8_t __r, float16x8_t __a, float16x8_t __b) +{ + return __builtin_neon_vcmla90v8hf (__r, __a, __b); +} + +__extension__ extern __inline float16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcmla_rot90_lane_f16 (float16x4_t __r, float16x4_t __a, float16x4_t __b, + const int __index) +{ + return __builtin_neon_vcmla_lane90v4hf (__r, __a, __b, __index); +} + +__extension__ extern __inline float16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcmla_rot90_laneq_f16 (float16x4_t __r, float16x4_t __a, float16x8_t __b, + const int __index) +{ + return __builtin_neon_vcmla_laneq90v4hf (__r, __a, __b, __index); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcmlaq_rot90_lane_f16 (float16x8_t __r, float16x8_t __a, float16x4_t __b, + const int __index) +{ + return __builtin_neon_vcmlaq_lane90v8hf (__r, __a, __b, __index); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcmlaq_rot90_laneq_f16 (float16x8_t __r, float16x8_t __a, float16x8_t __b, + const int __index) +{ + return __builtin_neon_vcmla_lane90v8hf (__r, __a, __b, __index); +} + +__extension__ extern __inline float16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcmla_rot180_f16 (float16x4_t __r, float16x4_t __a, float16x4_t __b) +{ + return __builtin_neon_vcmla180v4hf (__r, __a, __b); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcmlaq_rot180_f16 (float16x8_t __r, float16x8_t __a, float16x8_t __b) +{ + return __builtin_neon_vcmla180v8hf (__r, __a, __b); +} + +__extension__ extern __inline float16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcmla_rot180_lane_f16 (float16x4_t __r, float16x4_t __a, float16x4_t __b, + const int __index) +{ + return __builtin_neon_vcmla_lane180v4hf (__r, __a, __b, __index); +} + +__extension__ extern __inline float16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcmla_rot180_laneq_f16 (float16x4_t __r, float16x4_t __a, float16x8_t __b, + const int __index) +{ + return __builtin_neon_vcmla_laneq180v4hf (__r, __a, __b, __index); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcmlaq_rot180_lane_f16 (float16x8_t __r, float16x8_t __a, float16x4_t __b, + const int __index) +{ + return __builtin_neon_vcmlaq_lane180v8hf (__r, __a, __b, __index); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcmlaq_rot180_laneq_f16 (float16x8_t __r, float16x8_t __a, float16x8_t __b, + const int __index) +{ + return __builtin_neon_vcmla_lane180v8hf (__r, __a, __b, __index); +} + +__extension__ extern __inline float16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcmla_rot270_f16 (float16x4_t __r, float16x4_t __a, float16x4_t __b) +{ + return __builtin_neon_vcmla270v4hf (__r, __a, __b); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcmlaq_rot270_f16 (float16x8_t __r, float16x8_t __a, float16x8_t __b) +{ + return __builtin_neon_vcmla270v8hf (__r, __a, __b); +} + +__extension__ extern __inline float16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcmla_rot270_lane_f16 (float16x4_t __r, float16x4_t __a, float16x4_t __b, + const int __index) +{ + return __builtin_neon_vcmla_lane270v4hf (__r, __a, __b, __index); +} + +__extension__ extern __inline float16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcmla_rot270_laneq_f16 (float16x4_t __r, float16x4_t __a, float16x8_t __b, + const int __index) +{ + return __builtin_neon_vcmla_laneq270v4hf (__r, __a, __b, __index); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcmlaq_rot270_lane_f16 (float16x8_t __r, float16x8_t __a, float16x4_t __b, + const int __index) +{ + return __builtin_neon_vcmlaq_lane270v8hf (__r, __a, __b, __index); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcmlaq_rot270_laneq_f16 (float16x8_t __r, float16x8_t __a, float16x8_t __b, + const int __index) +{ + return __builtin_neon_vcmla_lane270v8hf (__r, __a, __b, __index); +} + +#pragma GCC pop_options +#endif + +__extension__ extern __inline float32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcadd_rot90_f32 (float32x2_t __a, float32x2_t __b) +{ + return __builtin_neon_vcadd90v2sf (__a, __b); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcaddq_rot90_f32 (float32x4_t __a, float32x4_t __b) +{ + return __builtin_neon_vcadd90v4sf (__a, __b); +} + +__extension__ extern __inline float32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcadd_rot270_f32 (float32x2_t __a, float32x2_t __b) +{ + return __builtin_neon_vcadd90v2sf (__a, __b); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcaddq_rot270_f32 (float32x4_t __a, float32x4_t __b) +{ + return __builtin_neon_vcadd90v4sf (__a, __b); +} + +__extension__ extern __inline float32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcmla_f32 (float32x2_t __r, float32x2_t __a, float32x2_t __b) +{ + return __builtin_neon_vcmla0v2sf (__r, __a, __b); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcmlaq_f32 (float32x4_t __r, float32x4_t __a, float32x4_t __b) +{ + return __builtin_neon_vcmla0v4sf (__r, __a, __b); +} + +__extension__ extern __inline float32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcmla_lane_f32 (float32x2_t __r, float32x2_t __a, float32x2_t __b, + const int __index) +{ + return __builtin_neon_vcmla_lane0v2sf (__r, __a, __b, __index); +} + +__extension__ extern __inline float32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcmla_laneq_f32 (float32x2_t __r, float32x2_t __a, float32x4_t __b, + const int __index) +{ + return __builtin_neon_vcmla_laneq0v2sf (__r, __a, __b, __index); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcmlaq_lane_f32 (float32x4_t __r, float32x4_t __a, float32x2_t __b, + const int __index) +{ + return __builtin_neon_vcmlaq_lane0v4sf (__r, __a, __b, __index); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcmlaq_laneq_f32 (float32x4_t __r, float32x4_t __a, float32x4_t __b, + const int __index) +{ + return __builtin_neon_vcmla_lane0v4sf (__r, __a, __b, __index); +} + +__extension__ extern __inline float32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcmla_rot90_f32 (float32x2_t __r, float32x2_t __a, float32x2_t __b) +{ + return __builtin_neon_vcmla90v2sf (__r, __a, __b); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcmlaq_rot90_f32 (float32x4_t __r, float32x4_t __a, float32x4_t __b) +{ + return __builtin_neon_vcmla90v4sf (__r, __a, __b); +} + +__extension__ extern __inline float32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcmla_rot90_lane_f32 (float32x2_t __r, float32x2_t __a, float32x2_t __b, + const int __index) +{ + return __builtin_neon_vcmla_lane90v2sf (__r, __a, __b, __index); +} + +__extension__ extern __inline float32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcmla_rot90_laneq_f32 (float32x2_t __r, float32x2_t __a, float32x4_t __b, + const int __index) +{ + return __builtin_neon_vcmla_laneq90v2sf (__r, __a, __b, __index); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcmlaq_rot90_lane_f32 (float32x4_t __r, float32x4_t __a, float32x2_t __b, + const int __index) +{ + return __builtin_neon_vcmlaq_lane90v4sf (__r, __a, __b, __index); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcmlaq_rot90_laneq_f32 (float32x4_t __r, float32x4_t __a, float32x4_t __b, + const int __index) +{ + return __builtin_neon_vcmla_lane90v4sf (__r, __a, __b, __index); +} + +__extension__ extern __inline float32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcmla_rot180_f32 (float32x2_t __r, float32x2_t __a, float32x2_t __b) +{ + return __builtin_neon_vcmla180v2sf (__r, __a, __b); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcmlaq_rot180_f32 (float32x4_t __r, float32x4_t __a, float32x4_t __b) +{ + return __builtin_neon_vcmla180v4sf (__r, __a, __b); +} + +__extension__ extern __inline float32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcmla_rot180_lane_f32 (float32x2_t __r, float32x2_t __a, float32x2_t __b, + const int __index) +{ + return __builtin_neon_vcmla_lane180v2sf (__r, __a, __b, __index); +} + +__extension__ extern __inline float32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcmla_rot180_laneq_f32 (float32x2_t __r, float32x2_t __a, float32x4_t __b, + const int __index) +{ + return __builtin_neon_vcmla_laneq180v2sf (__r, __a, __b, __index); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcmlaq_rot180_lane_f32 (float32x4_t __r, float32x4_t __a, float32x2_t __b, + const int __index) +{ + return __builtin_neon_vcmlaq_lane180v4sf (__r, __a, __b, __index); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcmlaq_rot180_laneq_f32 (float32x4_t __r, float32x4_t __a, float32x4_t __b, + const int __index) +{ + return __builtin_neon_vcmla_lane180v4sf (__r, __a, __b, __index); +} + +__extension__ extern __inline float32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcmla_rot270_f32 (float32x2_t __r, float32x2_t __a, float32x2_t __b) +{ + return __builtin_neon_vcmla270v2sf (__r, __a, __b); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcmlaq_rot270_f32 (float32x4_t __r, float32x4_t __a, float32x4_t __b) +{ + return __builtin_neon_vcmla270v4sf (__r, __a, __b); +} + +__extension__ extern __inline float32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcmla_rot270_lane_f32 (float32x2_t __r, float32x2_t __a, float32x2_t __b, + const int __index) +{ + return __builtin_neon_vcmla_lane270v2sf (__r, __a, __b, __index); +} + +__extension__ extern __inline float32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcmla_rot270_laneq_f32 (float32x2_t __r, float32x2_t __a, float32x4_t __b, + const int __index) +{ + return __builtin_neon_vcmla_laneq270v2sf (__r, __a, __b, __index); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcmlaq_rot270_lane_f32 (float32x4_t __r, float32x4_t __a, float32x2_t __b, + const int __index) +{ + return __builtin_neon_vcmlaq_lane270v4sf (__r, __a, __b, __index); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcmlaq_rot270_laneq_f32 (float32x4_t __r, float32x4_t __a, float32x4_t __b, + const int __index) +{ + return __builtin_neon_vcmla_lane270v4sf (__r, __a, __b, __index); +} + + +/* AdvSIMD Matrix Multiply-Accumulate and Dot Product intrinsics. */ +#pragma GCC push_options +#pragma GCC target ("arch=armv8.2-a+i8mm") + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vusdot_s32 (int32x2_t __r, uint8x8_t __a, int8x8_t __b) +{ + return __builtin_neon_usdotv8qi_ssus (__r, __a, __b); +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vusdot_lane_s32 (int32x2_t __r, uint8x8_t __a, + int8x8_t __b, const int __index) +{ + return __builtin_neon_usdot_lanev8qi_ssuss (__r, __a, __b, __index); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vusdotq_lane_s32 (int32x4_t __r, uint8x16_t __a, + int8x8_t __b, const int __index) +{ + return __builtin_neon_usdot_lanev16qi_ssuss (__r, __a, __b, __index); +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsudot_lane_s32 (int32x2_t __r, int8x8_t __a, + uint8x8_t __b, const int __index) +{ + return __builtin_neon_sudot_lanev8qi_sssus (__r, __a, __b, __index); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsudotq_lane_s32 (int32x4_t __r, int8x16_t __a, + uint8x8_t __b, const int __index) +{ + return __builtin_neon_sudot_lanev16qi_sssus (__r, __a, __b, __index); +} + +#pragma GCC pop_options + +#pragma GCC pop_options +#endif + #ifdef __cplusplus } #endif