aarch64: Reimplement vshrn_high_n* intrinsics using builtins

This patch reimplements the vshrn_high_n* intrinsics that generate the
SHRN2 instruction.
It is a vec_concat of the narrowing shift with the bottom part of the
destination register, so we need a little-endian and a big-endian version and an expander to
pick between them.

gcc/ChangeLog:

	* config/aarch64/aarch64-simd-builtins.def (shrn2): Define
	builtin.
	* config/aarch64/aarch64-simd.md (aarch64_shrn2<mode>_insn_le):
	Define.
	(aarch64_shrn2<mode>_insn_be): Likewise.
	(aarch64_shrn2<mode>): Likewise.
	* config/aarch64/arm_neon.h (vshrn_high_n_s16): Reimlplement
	using builtins.
	(vshrn_high_n_s32): Likewise.
	(vshrn_high_n_s64): Likewise.
	(vshrn_high_n_u16): Likewise.
	(vshrn_high_n_u32): Likewise.
	(vshrn_high_n_u64): Likewise.
This commit is contained in:
Kyrylo Tkachov 2021-01-25 09:50:54 +00:00
parent fdb904a182
commit d61ca09ec9
3 changed files with 85 additions and 84 deletions

View file

@ -191,6 +191,9 @@
/* Implemented by aarch64_shrn<mode>". */
BUILTIN_VQN (SHIFTIMM, shrn, 0, NONE)
/* Implemented by aarch64_shrn2<mode>. */
BUILTIN_VQN (SHIFTACC, shrn2, 0, NONE)
/* Implemented by aarch64_<su>mlsl<mode>. */
BUILTIN_VD_BHSI (TERNOP, smlsl, 0, NONE)
BUILTIN_VD_BHSI (TERNOPU, umlsl, 0, NONE)

View file

@ -1728,6 +1728,49 @@
}
)
(define_insn "aarch64_shrn2<mode>_insn_le"
[(set (match_operand:<VNARROWQ2> 0 "register_operand" "=w")
(vec_concat:<VNARROWQ2>
(match_operand:<VNARROWQ> 1 "register_operand" "0")
(truncate:<VNARROWQ>
(lshiftrt:VQN (match_operand:VQN 2 "register_operand" "w")
(match_operand:VQN 3 "aarch64_simd_rshift_imm")))))]
"TARGET_SIMD && !BYTES_BIG_ENDIAN"
"shrn2\\t%0.<V2ntype>, %2.<Vtype>, %3"
[(set_attr "type" "neon_shift_imm_narrow_q")]
)
(define_insn "aarch64_shrn2<mode>_insn_be"
[(set (match_operand:<VNARROWQ2> 0 "register_operand" "=w")
(vec_concat:<VNARROWQ2>
(truncate:<VNARROWQ>
(lshiftrt:VQN (match_operand:VQN 2 "register_operand" "w")
(match_operand:VQN 3 "aarch64_simd_rshift_imm")))
(match_operand:<VNARROWQ> 1 "register_operand" "0")))]
"TARGET_SIMD && BYTES_BIG_ENDIAN"
"shrn2\\t%0.<V2ntype>, %2.<Vtype>, %3"
[(set_attr "type" "neon_shift_imm_narrow_q")]
)
(define_expand "aarch64_shrn2<mode>"
[(match_operand:<VNARROWQ2> 0 "register_operand")
(match_operand:<VNARROWQ> 1 "register_operand")
(match_operand:VQN 2 "register_operand")
(match_operand:SI 3 "aarch64_simd_shift_imm_offset_<vn_mode>")]
"TARGET_SIMD"
{
operands[3] = aarch64_simd_gen_const_vector_dup (<MODE>mode,
INTVAL (operands[3]));
if (BYTES_BIG_ENDIAN)
emit_insn (gen_aarch64_shrn2<mode>_insn_be (operands[0], operands[1],
operands[2], operands[3]));
else
emit_insn (gen_aarch64_shrn2<mode>_insn_le (operands[0], operands[1],
operands[2], operands[3]));
DONE;
}
)
;; For quads.

View file

@ -9809,95 +9809,50 @@ vrsqrteq_u32 (uint32x4_t __a)
return __result;
}
#define vshrn_high_n_s16(a, b, c) \
__extension__ \
({ \
int16x8_t b_ = (b); \
int8x8_t a_ = (a); \
int8x16_t result = vcombine_s8 \
(a_, vcreate_s8 \
(__AARCH64_UINT64_C (0x0))); \
__asm__ ("shrn2 %0.16b,%1.8h,#%2" \
: "+w"(result) \
: "w"(b_), "i"(c) \
: /* No clobbers */); \
result; \
})
__extension__ extern __inline int8x16_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vshrn_high_n_s16 (int8x8_t __a, int16x8_t __b, const int __c)
{
return __builtin_aarch64_shrn2v8hi (__a, __b, __c);
}
#define vshrn_high_n_s32(a, b, c) \
__extension__ \
({ \
int32x4_t b_ = (b); \
int16x4_t a_ = (a); \
int16x8_t result = vcombine_s16 \
(a_, vcreate_s16 \
(__AARCH64_UINT64_C (0x0))); \
__asm__ ("shrn2 %0.8h,%1.4s,#%2" \
: "+w"(result) \
: "w"(b_), "i"(c) \
: /* No clobbers */); \
result; \
})
__extension__ extern __inline int16x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vshrn_high_n_s32 (int16x4_t __a, int32x4_t __b, const int __c)
{
return __builtin_aarch64_shrn2v4si (__a, __b, __c);
}
#define vshrn_high_n_s64(a, b, c) \
__extension__ \
({ \
int64x2_t b_ = (b); \
int32x2_t a_ = (a); \
int32x4_t result = vcombine_s32 \
(a_, vcreate_s32 \
(__AARCH64_UINT64_C (0x0))); \
__asm__ ("shrn2 %0.4s,%1.2d,#%2" \
: "+w"(result) \
: "w"(b_), "i"(c) \
: /* No clobbers */); \
result; \
})
__extension__ extern __inline int32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vshrn_high_n_s64 (int32x2_t __a, int64x2_t __b, const int __c)
{
return __builtin_aarch64_shrn2v2di (__a, __b, __c);
}
#define vshrn_high_n_u16(a, b, c) \
__extension__ \
({ \
uint16x8_t b_ = (b); \
uint8x8_t a_ = (a); \
uint8x16_t result = vcombine_u8 \
(a_, vcreate_u8 \
(__AARCH64_UINT64_C (0x0))); \
__asm__ ("shrn2 %0.16b,%1.8h,#%2" \
: "+w"(result) \
: "w"(b_), "i"(c) \
: /* No clobbers */); \
result; \
})
__extension__ extern __inline uint8x16_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vshrn_high_n_u16 (uint8x8_t __a, uint16x8_t __b, const int __c)
{
return (uint8x16_t)
__builtin_aarch64_shrn2v8hi ((int8x8_t) __a, (int16x8_t) __b, __c);
}
#define vshrn_high_n_u32(a, b, c) \
__extension__ \
({ \
uint32x4_t b_ = (b); \
uint16x4_t a_ = (a); \
uint16x8_t result = vcombine_u16 \
(a_, vcreate_u16 \
(__AARCH64_UINT64_C (0x0))); \
__asm__ ("shrn2 %0.8h,%1.4s,#%2" \
: "+w"(result) \
: "w"(b_), "i"(c) \
: /* No clobbers */); \
result; \
})
__extension__ extern __inline uint16x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vshrn_high_n_u32 (uint16x4_t __a, uint32x4_t __b, const int __c)
{
return (uint16x8_t)
__builtin_aarch64_shrn2v4si ((int16x4_t) __a, (int32x4_t) __b, __c);
}
#define vshrn_high_n_u64(a, b, c) \
__extension__ \
({ \
uint64x2_t b_ = (b); \
uint32x2_t a_ = (a); \
uint32x4_t result = vcombine_u32 \
(a_, vcreate_u32 \
(__AARCH64_UINT64_C (0x0))); \
__asm__ ("shrn2 %0.4s,%1.2d,#%2" \
: "+w"(result) \
: "w"(b_), "i"(c) \
: /* No clobbers */); \
result; \
})
__extension__ extern __inline uint32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vshrn_high_n_u64 (uint32x2_t __a, uint64x2_t __b, const int __c)
{
return (uint32x4_t)
__builtin_aarch64_shrn2v2di ((int32x2_t) __a, (int64x2_t) __b, __c);
}
#define vsli_n_p8(a, b, c) \
__extension__ \