From 1baf4ed878639536c50a7aab9e7be64da43356fd Mon Sep 17 00:00:00 2001 From: Jonathan Wright Date: Tue, 16 Feb 2021 23:59:22 +0000 Subject: [PATCH] aarch64: Use RTL builtins for FP ml[as][q]_lane intrinsics Rewrite floating-point vml[as][q]_lane Neon intrinsics to use RTL builtins rather than relying on the GCC vector extensions. Using RTL builtins allows control over the emission of fmla/fmls instructions (which we don't want here.) With this commit, the code generated by these intrinsics changes from a fused multiply-add/subtract instruction to an fmul followed by an fadd/fsub instruction. If the programmer really wants fmla/fmls instructions, they can use the vfm[as] intrinsics. gcc/ChangeLog: 2021-02-16 Jonathan Wright * config/aarch64/aarch64-simd-builtins.def: Add float_ml[as]_lane builtin generator macros. * config/aarch64/aarch64-simd.md (*aarch64_mul3_elt): Rename to... (mul_lane3): This, and re-order arguments. (aarch64_float_mla_lane): Define. (aarch64_float_mls_lane): Define. * config/aarch64/arm_neon.h (vmla_lane_f32): Use RTL builtin instead of GCC vector extensions. (vmlaq_lane_f32): Likewise. (vmls_lane_f32): Likewise. (vmlsq_lane_f32): Likewise. --- gcc/config/aarch64/aarch64-simd-builtins.def | 2 + gcc/config/aarch64/aarch64-simd.md | 58 +++++++++++++++++--- gcc/config/aarch64/arm_neon.h | 8 +-- 3 files changed, 55 insertions(+), 13 deletions(-) diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def b/gcc/config/aarch64/aarch64-simd-builtins.def index 2a2fc2076b1..8e4b4edc8a4 100644 --- a/gcc/config/aarch64/aarch64-simd-builtins.def +++ b/gcc/config/aarch64/aarch64-simd-builtins.def @@ -672,6 +672,8 @@ BUILTIN_VDQF_DF (TERNOP, float_mls, 0, FP) BUILTIN_VDQSF (TERNOP, float_mla_n, 0, FP) BUILTIN_VDQSF (TERNOP, float_mls_n, 0, FP) + BUILTIN_VDQSF (QUADOP_LANE, float_mla_lane, 0, FP) + BUILTIN_VDQSF (QUADOP_LANE, float_mls_lane, 0, FP) /* Implemented by aarch64_simd_bsl. */ BUILTIN_VDQQH (BSL_P, simd_bsl, 0, NONE) diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md index 0f96cd0bd51..bdee49f74f4 100644 --- a/gcc/config/aarch64/aarch64-simd.md +++ b/gcc/config/aarch64/aarch64-simd.md @@ -718,18 +718,18 @@ } ) -(define_insn "*aarch64_mul3_elt" +(define_insn "mul_lane3" [(set (match_operand:VMUL 0 "register_operand" "=w") - (mult:VMUL - (vec_duplicate:VMUL - (vec_select: - (match_operand:VMUL 1 "register_operand" "") - (parallel [(match_operand:SI 2 "immediate_operand")]))) - (match_operand:VMUL 3 "register_operand" "w")))] + (mult:VMUL + (vec_duplicate:VMUL + (vec_select: + (match_operand:VMUL 2 "register_operand" "") + (parallel [(match_operand:SI 3 "immediate_operand" "i")]))) + (match_operand:VMUL 1 "register_operand" "w")))] "TARGET_SIMD" { - operands[2] = aarch64_endian_lane_rtx (mode, INTVAL (operands[2])); - return "mul\\t%0., %3., %1.[%2]"; + operands[3] = aarch64_endian_lane_rtx (mode, INTVAL (operands[3])); + return "mul\\t%0., %1., %2.[%3]"; } [(set_attr "type" "neon_mul__scalar")] ) @@ -2702,6 +2702,46 @@ } ) +(define_expand "aarch64_float_mla_lane" + [(set (match_operand:VDQSF 0 "register_operand") + (plus:VDQSF + (mult:VDQSF + (vec_duplicate:VDQSF + (vec_select: + (match_operand:V2SF 3 "register_operand") + (parallel [(match_operand:SI 4 "immediate_operand")]))) + (match_operand:VDQSF 2 "register_operand")) + (match_operand:VDQSF 1 "register_operand")))] + "TARGET_SIMD" + { + rtx scratch = gen_reg_rtx (mode); + emit_insn (gen_mul_lane3 (scratch, operands[2], + operands[3], operands[4])); + emit_insn (gen_add3 (operands[0], operands[1], scratch)); + DONE; + } +) + +(define_expand "aarch64_float_mls_lane" + [(set (match_operand:VDQSF 0 "register_operand") + (minus:VDQSF + (match_operand:VDQSF 1 "register_operand") + (mult:VDQSF + (vec_duplicate:VDQSF + (vec_select: + (match_operand:V2SF 3 "register_operand") + (parallel [(match_operand:SI 4 "immediate_operand")]))) + (match_operand:VDQSF 2 "register_operand"))))] + "TARGET_SIMD" + { + rtx scratch = gen_reg_rtx (mode); + emit_insn (gen_mul_lane3 (scratch, operands[2], + operands[3], operands[4])); + emit_insn (gen_sub3 (operands[0], operands[1], scratch)); + DONE; + } +) + (define_insn "fma4" [(set (match_operand:VHSDF 0 "register_operand" "=w") (fma:VHSDF (match_operand:VHSDF 1 "register_operand" "w") diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h index 0227cadb7e8..5328d447a42 100644 --- a/gcc/config/aarch64/arm_neon.h +++ b/gcc/config/aarch64/arm_neon.h @@ -20378,7 +20378,7 @@ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vmla_lane_f32 (float32x2_t __a, float32x2_t __b, float32x2_t __c, const int __lane) { - return (__a + (__b * __aarch64_vget_lane_any (__c, __lane))); + return __builtin_aarch64_float_mla_lanev2sf (__a, __b, __c, __lane); } __extension__ extern __inline int16x4_t @@ -20462,7 +20462,7 @@ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vmlaq_lane_f32 (float32x4_t __a, float32x4_t __b, float32x2_t __c, const int __lane) { - return (__a + (__b * __aarch64_vget_lane_any (__c, __lane))); + return __builtin_aarch64_float_mla_lanev4sf (__a, __b, __c, __lane); } __extension__ extern __inline int16x8_t @@ -20576,7 +20576,7 @@ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vmls_lane_f32 (float32x2_t __a, float32x2_t __b, float32x2_t __c, const int __lane) { - return (__a - (__b * __aarch64_vget_lane_any (__c, __lane))); + return __builtin_aarch64_float_mls_lanev2sf (__a, __b, __c, __lane); } __extension__ extern __inline int16x4_t @@ -20660,7 +20660,7 @@ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vmlsq_lane_f32 (float32x4_t __a, float32x4_t __b, float32x2_t __c, const int __lane) { - return (__a - (__b * __aarch64_vget_lane_any (__c, __lane))); + return __builtin_aarch64_float_mls_lanev4sf (__a, __b, __c, __lane); } __extension__ extern __inline int16x8_t