aarch64: Use RTL builtins for FP ml[as][q]_lane intrinsics

Rewrite floating-point vml[as][q]_lane Neon intrinsics to use RTL
builtins rather than relying on the GCC vector extensions. Using RTL
builtins allows control over the emission of fmla/fmls instructions
(which we don't want here.)

With this commit, the code generated by these intrinsics changes from
a fused multiply-add/subtract instruction to an fmul followed by an
fadd/fsub instruction. If the programmer really wants fmla/fmls
instructions, they can use the vfm[as] intrinsics.

gcc/ChangeLog:

2021-02-16  Jonathan Wright  <jonathan.wright@arm.com>

	* config/aarch64/aarch64-simd-builtins.def: Add
	float_ml[as]_lane builtin generator macros.
	* config/aarch64/aarch64-simd.md (*aarch64_mul3_elt<mode>):
	Rename to...
	(mul_lane<mode>3): This, and re-order arguments.
	(aarch64_float_mla_lane<mode>): Define.
	(aarch64_float_mls_lane<mode>): Define.
	* config/aarch64/arm_neon.h (vmla_lane_f32): Use RTL builtin
	instead of GCC vector extensions.
	(vmlaq_lane_f32): Likewise.
	(vmls_lane_f32): Likewise.
	(vmlsq_lane_f32): Likewise.
This commit is contained in:
Jonathan Wright 2021-02-16 23:59:22 +00:00
parent b0d9aac899
commit 1baf4ed878
3 changed files with 55 additions and 13 deletions

View file

@ -672,6 +672,8 @@
BUILTIN_VDQF_DF (TERNOP, float_mls, 0, FP)
BUILTIN_VDQSF (TERNOP, float_mla_n, 0, FP)
BUILTIN_VDQSF (TERNOP, float_mls_n, 0, FP)
BUILTIN_VDQSF (QUADOP_LANE, float_mla_lane, 0, FP)
BUILTIN_VDQSF (QUADOP_LANE, float_mls_lane, 0, FP)
/* Implemented by aarch64_simd_bsl<mode>. */
BUILTIN_VDQQH (BSL_P, simd_bsl, 0, NONE)

View file

@ -718,18 +718,18 @@
}
)
(define_insn "*aarch64_mul3_elt<mode>"
(define_insn "mul_lane<mode>3"
[(set (match_operand:VMUL 0 "register_operand" "=w")
(mult:VMUL
(vec_duplicate:VMUL
(vec_select:<VEL>
(match_operand:VMUL 1 "register_operand" "<h_con>")
(parallel [(match_operand:SI 2 "immediate_operand")])))
(match_operand:VMUL 3 "register_operand" "w")))]
(mult:VMUL
(vec_duplicate:VMUL
(vec_select:<VEL>
(match_operand:VMUL 2 "register_operand" "<h_con>")
(parallel [(match_operand:SI 3 "immediate_operand" "i")])))
(match_operand:VMUL 1 "register_operand" "w")))]
"TARGET_SIMD"
{
operands[2] = aarch64_endian_lane_rtx (<MODE>mode, INTVAL (operands[2]));
return "<f>mul\\t%0.<Vtype>, %3.<Vtype>, %1.<Vetype>[%2]";
operands[3] = aarch64_endian_lane_rtx (<MODE>mode, INTVAL (operands[3]));
return "<f>mul\\t%0.<Vtype>, %1.<Vtype>, %2.<Vetype>[%3]";
}
[(set_attr "type" "neon<fp>_mul_<stype>_scalar<q>")]
)
@ -2702,6 +2702,46 @@
}
)
(define_expand "aarch64_float_mla_lane<mode>"
[(set (match_operand:VDQSF 0 "register_operand")
(plus:VDQSF
(mult:VDQSF
(vec_duplicate:VDQSF
(vec_select:<VEL>
(match_operand:V2SF 3 "register_operand")
(parallel [(match_operand:SI 4 "immediate_operand")])))
(match_operand:VDQSF 2 "register_operand"))
(match_operand:VDQSF 1 "register_operand")))]
"TARGET_SIMD"
{
rtx scratch = gen_reg_rtx (<MODE>mode);
emit_insn (gen_mul_lane<mode>3 (scratch, operands[2],
operands[3], operands[4]));
emit_insn (gen_add<mode>3 (operands[0], operands[1], scratch));
DONE;
}
)
(define_expand "aarch64_float_mls_lane<mode>"
[(set (match_operand:VDQSF 0 "register_operand")
(minus:VDQSF
(match_operand:VDQSF 1 "register_operand")
(mult:VDQSF
(vec_duplicate:VDQSF
(vec_select:<VEL>
(match_operand:V2SF 3 "register_operand")
(parallel [(match_operand:SI 4 "immediate_operand")])))
(match_operand:VDQSF 2 "register_operand"))))]
"TARGET_SIMD"
{
rtx scratch = gen_reg_rtx (<MODE>mode);
emit_insn (gen_mul_lane<mode>3 (scratch, operands[2],
operands[3], operands[4]));
emit_insn (gen_sub<mode>3 (operands[0], operands[1], scratch));
DONE;
}
)
(define_insn "fma<mode>4"
[(set (match_operand:VHSDF 0 "register_operand" "=w")
(fma:VHSDF (match_operand:VHSDF 1 "register_operand" "w")

View file

@ -20378,7 +20378,7 @@ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmla_lane_f32 (float32x2_t __a, float32x2_t __b,
float32x2_t __c, const int __lane)
{
return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
return __builtin_aarch64_float_mla_lanev2sf (__a, __b, __c, __lane);
}
__extension__ extern __inline int16x4_t
@ -20462,7 +20462,7 @@ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlaq_lane_f32 (float32x4_t __a, float32x4_t __b,
float32x2_t __c, const int __lane)
{
return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
return __builtin_aarch64_float_mla_lanev4sf (__a, __b, __c, __lane);
}
__extension__ extern __inline int16x8_t
@ -20576,7 +20576,7 @@ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmls_lane_f32 (float32x2_t __a, float32x2_t __b,
float32x2_t __c, const int __lane)
{
return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
return __builtin_aarch64_float_mls_lanev2sf (__a, __b, __c, __lane);
}
__extension__ extern __inline int16x4_t
@ -20660,7 +20660,7 @@ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlsq_lane_f32 (float32x4_t __a, float32x4_t __b,
float32x2_t __c, const int __lane)
{
return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
return __builtin_aarch64_float_mls_lanev4sf (__a, __b, __c, __lane);
}
__extension__ extern __inline int16x8_t