[ARM][PATCH]: Add support for MVE ACLE intrinsics polymorphic variants for +mve.fp option.
For the following MVE ACLE intrinsics, polymorphic variant supports only +mve option, support for +mve.fp is missing. vabavq_p_s16, vabavq_p_s32, vabavq_p_s8, vabavq_p_u16, vabavq_p_u32, vabavq_p_u8, vabavq_s16, vabavq_s32, vabavq_s8, vabavq_u16, vabavq_u32, vabavq_u8, vaddlvaq_p_s32, vaddlvaq_p_u32, vaddlvaq_s32, vaddlvaq_u32, vaddlvq_p_s32, vaddlvq_p_u32, vaddlvq_u32, vaddvaq_p_s16, vaddvaq_p_s32, vaddvaq_p_s8, vaddvaq_p_u16, vaddvaq_p_u32, vaddvaq_p_u8, vaddvaq_s16, vaddvaq_s32, vaddvaq_s8, vaddvaq_u16, vaddvaq_u32, vaddvaq_u8, vaddvq_p_s16, vaddvq_p_s32, vaddvq_p_s8, vaddvq_p_u16, vaddvq_p_u32, vaddvq_p_u8, vaddvq_s8, vaddvq_u16, vaddvq_u32, vaddvq_u8, vcmpcsq_m_n_u16, vcmpcsq_m_n_u32, vcmpcsq_m_n_u8, vcmpcsq_m_u16, vcmpcsq_m_u32, vcmpcsq_m_u8, vcmpcsq_n_u16, vcmpcsq_n_u32, vcmpcsq_n_u8, vcmpcsq_u16, vcmpcsq_u32, vcmpcsq_u8, vcmpeqq_n_f16, vcmpeqq_n_f32, vcmpgeq_m_n_s16, vcmpgeq_m_n_s32, vcmpgeq_m_n_s8, vcmpgtq_m_n_f16, vcmpgtq_m_n_f32, vcmpgtq_n_f16, vcmpgtq_n_f32, vcmphiq_m_n_u16, vcmphiq_m_n_u32, vcmphiq_m_n_u8, vcmphiq_m_u16, vcmphiq_m_u32, vcmphiq_m_u8, vcmphiq_n_u16, vcmphiq_n_u32, vcmphiq_n_u8, vcmphiq_u16, vcmphiq_u32, vcmphiq_u8, vcmpleq_m_n_f16, vcmpleq_m_n_f32, vcmpleq_n_f16, vcmpleq_n_f32, vcmpltq_m_n_f16, vcmpltq_m_n_f32, vcmpneq_m_n_f16, vcmpneq_m_n_f32, vcmpneq_n_f16, vcmpneq_n_f32, vmaxavq_p_s16, vmaxavq_p_s32, vmaxavq_p_s8, vmaxavq_s16, vmaxavq_s32, vmaxavq_s8, vmaxq_x_s16, vmaxq_x_s32, vmaxq_x_s8, vmaxq_x_u16, vmaxq_x_u32, vmaxq_x_u8, vmaxvq_p_s16, vmaxvq_p_s32, vmaxvq_p_s8, vmaxvq_p_u16, vmaxvq_p_u32, vmaxvq_p_u8, vmaxvq_s16, vmaxvq_s32, vmaxvq_s8, vmaxvq_u16, vmaxvq_u32, vmaxvq_u8, vminavq_p_s16, vminavq_p_s32, vminavq_p_s8, vminavq_s16, vminavq_s32, vminavq_s8, vminq_x_s16, vminq_x_s32, vminq_x_s8, vminq_x_u16, vminq_x_u32, vminq_x_u8, vminvq_p_s16, vminvq_p_s32, vminvq_p_s8, vminvq_p_u16, vminvq_p_u32, vminvq_p_u8, vminvq_s16, vminvq_s32, vminvq_s8, vminvq_u16, vminvq_u32, vminvq_u8, vmladavaq_p_s16, vmladavaq_p_s32, vmladavaq_p_s8, vmladavaq_p_u16, vmladavaq_p_u32, vmladavaq_p_u8, vmladavaq_s16, vmladavaq_s32, vmladavaq_s8, vmladavaq_u16, vmladavaq_u32, vmladavaq_u8, vmladavaxq_s16, vmladavaxq_s32, vmladavaxq_s8, vmladavq_p_s16, vmladavq_p_s32, vmladavq_p_s8, vmladavq_p_u16, vmladavq_p_u32, vmladavq_p_u8, vmladavq_s16, vmladavq_s32, vmladavq_s8, vmladavq_u16, vmladavq_u32, vmladavq_u8, vmladavxq_p_s16, vmladavxq_p_s32, vmladavxq_p_s8, vmladavxq_s16, vmladavxq_s32, vmladavxq_s8, vmlaldavaq_s16, vmlaldavaq_s32, vmlaldavaq_u16, vmlaldavaq_u32, vmlaldavaxq_s16, vmlaldavaxq_s32, vmlaldavq_p_s16, vmlaldavq_p_s32, vmlaldavq_p_u16, vmlaldavq_p_u32, vmlaldavq_s16, vmlaldavq_s32, vmlaldavq_u16, vmlaldavq_u32, vmlaldavxq_p_s16, vmlaldavxq_p_s32, vmlsdavaq_s16, vmlsdavaq_s32, vmlsdavaq_s8, vmlsdavaxq_s16, vmlsdavaxq_s32, vmlsdavaxq_s8, vmlsdavq_p_s16, vmlsdavq_p_s32, vmlsdavq_p_s8, vmlsdavq_s16, vmlsdavq_s32, vmlsdavq_s8, vmlsdavxq_p_s16, vmlsdavxq_p_s32, vmlsdavxq_p_s8, vmlsdavxq_s16, vmlsdavxq_s32, vmlsdavxq_s8, vmlsldavaq_s16, vmlsldavaq_s32, vmlsldavaxq_s16, vmlsldavaxq_s32, vmlsldavq_p_s16, vmlsldavq_p_s32, vmlsldavq_s16, vmlsldavq_s32, vmlsldavxq_p_s16, vmlsldavxq_p_s32, vmlsldavxq_s16, vmlsldavxq_s32, vmovlbq_x_s16, vmovlbq_x_s8, vmovlbq_x_u16, vmovlbq_x_u8, vmovltq_x_s16, vmovltq_x_s8, vmovltq_x_u16, vmovltq_x_u8, vmulhq_x_s16, vmulhq_x_s32, vmulhq_x_s8, vmulhq_x_u16, vmulhq_x_u32, vmulhq_x_u8, vmullbq_int_x_s16, vmullbq_int_x_s32, vmullbq_int_x_s8, vmullbq_int_x_u16, vmullbq_int_x_u32, vmullbq_int_x_u8, vmullbq_poly_x_p16, vmullbq_poly_x_p8, vmulltq_int_x_s16, vmulltq_int_x_s32, vmulltq_int_x_s8, vmulltq_int_x_u16, vmulltq_int_x_u32, vmulltq_int_x_u8, vmulltq_poly_x_p16, vmulltq_poly_x_p8, vrmlaldavhaq_s32, vrmlaldavhaq_u32, vrmlaldavhaxq_s32, vrmlaldavhq_p_s32, vrmlaldavhq_p_u32, vrmlaldavhq_s32, vrmlaldavhq_u32, vrmlaldavhxq_p_s32, vrmlaldavhxq_s32, vrmlsldavhaq_s32, vrmlsldavhaxq_s32, vrmlsldavhq_p_s32, vrmlsldavhq_s32, vrmlsldavhxq_p_s32, vrmlsldavhxq_s32, vstrbq_p_s16, vstrbq_p_s32, vstrbq_p_s8, vstrbq_p_u16, vstrbq_p_u32, vstrbq_p_u8, vstrbq_s16, vstrbq_s32, vstrbq_s8, vstrbq_scatter_offset_p_s16, vstrbq_scatter_offset_p_s32, vstrbq_scatter_offset_p_s8, vstrbq_scatter_offset_p_u16, vstrbq_scatter_offset_p_u32, vstrbq_scatter_offset_p_u8, vstrbq_scatter_offset_s16, vstrbq_scatter_offset_s32, vstrbq_scatter_offset_s8, vstrbq_scatter_offset_u16, vstrbq_scatter_offset_u32, vstrbq_scatter_offset_u8, vstrbq_u16, vstrbq_u32, vstrbq_u8, vstrdq_scatter_base_p_s64, vstrdq_scatter_base_p_u64, vstrdq_scatter_base_s64, vstrdq_scatter_base_u64, vstrdq_scatter_offset_p_s64, vstrdq_scatter_offset_p_u64, vstrdq_scatter_offset_s64, vstrdq_scatter_offset_u64, vstrdq_scatter_shifted_offset_p_s64, vstrdq_scatter_shifted_offset_p_u64, vstrdq_scatter_shifted_offset_s64, vstrdq_scatter_shifted_offset_u64. This patch adds the support for MVE ACLE intrinsics polymorphic variants with +mve.fp option. Please refer to M-profile Vector Extension (MVE) intrinsics [1] for more details. [1] https://developer.arm.com/architectures/instruction-sets/simd-isas/helium/mve-intrinsics 2020-03-31 Srinath Parvathaneni <srinath.parvathaneni@arm.com> * config/arm/arm_mve.h (vaddlvq): Move the polymorphic variant to the common section of both MVE Integer and MVE Floating Point. (vaddvq): Likewise. (vaddlvq_p): Likewise. (vaddvaq): Likewise. (vaddvq_p): Likewise. (vcmpcsq): Likewise. (vmlsdavxq): Likewise. (vmlsdavq): Likewise. (vmladavxq): Likewise. (vmladavq): Likewise. (vminvq): Likewise. (vminavq): Likewise. (vmaxvq): Likewise. (vmaxavq): Likewise. (vmlaldavq): Likewise. (vcmphiq): Likewise. (vaddlvaq): Likewise. (vrmlaldavhq): Likewise. (vrmlaldavhxq): Likewise. (vrmlsldavhq): Likewise. (vrmlsldavhxq): Likewise. (vmlsldavxq): Likewise. (vmlsldavq): Likewise. (vabavq): Likewise. (vrmlaldavhaq): Likewise. (vcmpgeq_m_n): Likewise. (vmlsdavxq_p): Likewise. (vmlsdavq_p): Likewise. (vmlsdavaxq): Likewise. (vmlsdavaq): Likewise. (vaddvaq_p): Likewise. (vcmpcsq_m_n): Likewise. (vcmpcsq_m): Likewise. (vmladavxq_p): Likewise. (vmladavq_p): Likewise. (vmladavaxq): Likewise. (vmladavaq): Likewise. (vminvq_p): Likewise. (vminavq_p): Likewise. (vmaxvq_p): Likewise. (vmaxavq_p): Likewise. (vcmphiq_m): Likewise. (vaddlvaq_p): Likewise. (vmlaldavaq): Likewise. (vmlaldavaxq): Likewise. (vmlaldavq_p): Likewise. (vmlaldavxq_p): Likewise. (vmlsldavaq): Likewise. (vmlsldavaxq): Likewise. (vmlsldavq_p): Likewise. (vmlsldavxq_p): Likewise. (vrmlaldavhaxq): Likewise. (vrmlaldavhq_p): Likewise. (vrmlaldavhxq_p): Likewise. (vrmlsldavhaq): Likewise. (vrmlsldavhaxq): Likewise. (vrmlsldavhq_p): Likewise. (vrmlsldavhxq_p): Likewise. (vabavq_p): Likewise. (vmladavaq_p): Likewise. (vstrbq_scatter_offset): Likewise. (vstrbq_p): Likewise. (vstrbq_scatter_offset_p): Likewise. (vstrdq_scatter_base_p): Likewise. (vstrdq_scatter_base): Likewise. (vstrdq_scatter_offset_p): Likewise. (vstrdq_scatter_offset): Likewise. (vstrdq_scatter_shifted_offset_p): Likewise. (vstrdq_scatter_shifted_offset): Likewise. (vmaxq_x): Likewise. (vminq_x): Likewise. (vmovlbq_x): Likewise. (vmovltq_x): Likewise. (vmulhq_x): Likewise. (vmullbq_int_x): Likewise. (vmullbq_poly_x): Likewise. (vmulltq_int_x): Likewise. (vmulltq_poly_x): Likewise. (vstrbq): Likewise. gcc/testsuite/ChangeLog: 2020-03-31 Srinath Parvathaneni <srinath.parvathaneni@arm.com> * gcc.target/arm/mve/intrinsics/vcmpcsq_m_n_u16.c: Modify. * gcc.target/arm/mve/intrinsics/vcmpcsq_m_n_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpcsq_m_n_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpeqq_n_f16.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpeqq_n_f32.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpgeq_m_n_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpgeq_m_n_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpgeq_m_n_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpgtq_m_n_f16.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpgtq_m_n_f32.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpgtq_n_f16.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpgtq_n_f32.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpleq_m_n_f16.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpleq_m_n_f32.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpleq_n_f16.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpleq_n_f32.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpltq_m_n_f16.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpltq_m_n_f32.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpneq_m_n_f16.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpneq_m_n_f32.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpneq_n_f16.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpneq_n_f32.c: Likewise.
This commit is contained in:
parent
a27c534794
commit
e81d0d9ec7
25 changed files with 781 additions and 686 deletions
|
@ -1,3 +1,86 @@
|
|||
2020-03-31 Srinath Parvathaneni <srinath.parvathaneni@arm.com>
|
||||
|
||||
* config/arm/arm_mve.h (vaddlvq): Move the polymorphic variant to the
|
||||
common section of both MVE Integer and MVE Floating Point.
|
||||
(vaddvq): Likewise.
|
||||
(vaddlvq_p): Likewise.
|
||||
(vaddvaq): Likewise.
|
||||
(vaddvq_p): Likewise.
|
||||
(vcmpcsq): Likewise.
|
||||
(vmlsdavxq): Likewise.
|
||||
(vmlsdavq): Likewise.
|
||||
(vmladavxq): Likewise.
|
||||
(vmladavq): Likewise.
|
||||
(vminvq): Likewise.
|
||||
(vminavq): Likewise.
|
||||
(vmaxvq): Likewise.
|
||||
(vmaxavq): Likewise.
|
||||
(vmlaldavq): Likewise.
|
||||
(vcmphiq): Likewise.
|
||||
(vaddlvaq): Likewise.
|
||||
(vrmlaldavhq): Likewise.
|
||||
(vrmlaldavhxq): Likewise.
|
||||
(vrmlsldavhq): Likewise.
|
||||
(vrmlsldavhxq): Likewise.
|
||||
(vmlsldavxq): Likewise.
|
||||
(vmlsldavq): Likewise.
|
||||
(vabavq): Likewise.
|
||||
(vrmlaldavhaq): Likewise.
|
||||
(vcmpgeq_m_n): Likewise.
|
||||
(vmlsdavxq_p): Likewise.
|
||||
(vmlsdavq_p): Likewise.
|
||||
(vmlsdavaxq): Likewise.
|
||||
(vmlsdavaq): Likewise.
|
||||
(vaddvaq_p): Likewise.
|
||||
(vcmpcsq_m_n): Likewise.
|
||||
(vcmpcsq_m): Likewise.
|
||||
(vmladavxq_p): Likewise.
|
||||
(vmladavq_p): Likewise.
|
||||
(vmladavaxq): Likewise.
|
||||
(vmladavaq): Likewise.
|
||||
(vminvq_p): Likewise.
|
||||
(vminavq_p): Likewise.
|
||||
(vmaxvq_p): Likewise.
|
||||
(vmaxavq_p): Likewise.
|
||||
(vcmphiq_m): Likewise.
|
||||
(vaddlvaq_p): Likewise.
|
||||
(vmlaldavaq): Likewise.
|
||||
(vmlaldavaxq): Likewise.
|
||||
(vmlaldavq_p): Likewise.
|
||||
(vmlaldavxq_p): Likewise.
|
||||
(vmlsldavaq): Likewise.
|
||||
(vmlsldavaxq): Likewise.
|
||||
(vmlsldavq_p): Likewise.
|
||||
(vmlsldavxq_p): Likewise.
|
||||
(vrmlaldavhaxq): Likewise.
|
||||
(vrmlaldavhq_p): Likewise.
|
||||
(vrmlaldavhxq_p): Likewise.
|
||||
(vrmlsldavhaq): Likewise.
|
||||
(vrmlsldavhaxq): Likewise.
|
||||
(vrmlsldavhq_p): Likewise.
|
||||
(vrmlsldavhxq_p): Likewise.
|
||||
(vabavq_p): Likewise.
|
||||
(vmladavaq_p): Likewise.
|
||||
(vstrbq_scatter_offset): Likewise.
|
||||
(vstrbq_p): Likewise.
|
||||
(vstrbq_scatter_offset_p): Likewise.
|
||||
(vstrdq_scatter_base_p): Likewise.
|
||||
(vstrdq_scatter_base): Likewise.
|
||||
(vstrdq_scatter_offset_p): Likewise.
|
||||
(vstrdq_scatter_offset): Likewise.
|
||||
(vstrdq_scatter_shifted_offset_p): Likewise.
|
||||
(vstrdq_scatter_shifted_offset): Likewise.
|
||||
(vmaxq_x): Likewise.
|
||||
(vminq_x): Likewise.
|
||||
(vmovlbq_x): Likewise.
|
||||
(vmovltq_x): Likewise.
|
||||
(vmulhq_x): Likewise.
|
||||
(vmullbq_int_x): Likewise.
|
||||
(vmullbq_poly_x): Likewise.
|
||||
(vmulltq_int_x): Likewise.
|
||||
(vmulltq_poly_x): Likewise.
|
||||
(vstrbq): Likewise.
|
||||
|
||||
2020-03-31 Jakub Jelinek <jakub@redhat.com>
|
||||
|
||||
PR target/94368
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -1,3 +1,28 @@
|
|||
2020-03-31 Srinath Parvathaneni <srinath.parvathaneni@arm.com>
|
||||
|
||||
* gcc.target/arm/mve/intrinsics/vcmpcsq_m_n_u16.c: Modify.
|
||||
* gcc.target/arm/mve/intrinsics/vcmpcsq_m_n_u32.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vcmpcsq_m_n_u8.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vcmpeqq_n_f16.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vcmpeqq_n_f32.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vcmpgeq_m_n_s16.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vcmpgeq_m_n_s32.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vcmpgeq_m_n_s8.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vcmpgtq_m_n_f16.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vcmpgtq_m_n_f32.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vcmpgtq_n_f16.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vcmpgtq_n_f32.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vcmpleq_m_n_f16.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vcmpleq_m_n_f32.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vcmpleq_n_f16.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vcmpleq_n_f32.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vcmpltq_m_n_f16.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vcmpltq_m_n_f32.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vcmpneq_m_n_f16.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vcmpneq_m_n_f32.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vcmpneq_n_f16.c: Likewise.
|
||||
* gcc.target/arm/mve/intrinsics/vcmpneq_n_f32.c: Likewise.
|
||||
|
||||
2020-03-31 Jakub Jelinek <jakub@redhat.com>
|
||||
|
||||
PR target/94368
|
||||
|
|
|
@ -16,7 +16,7 @@ foo (uint16x8_t a, uint16_t b, mve_pred16_t p)
|
|||
mve_pred16_t
|
||||
foo1 (uint16x8_t a, uint16_t b, mve_pred16_t p)
|
||||
{
|
||||
return vcmpcsq_m_n (a, b, p);
|
||||
return vcmpcsq_m (a, b, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
|
|
|
@ -16,7 +16,7 @@ foo (uint32x4_t a, uint32_t b, mve_pred16_t p)
|
|||
mve_pred16_t
|
||||
foo1 (uint32x4_t a, uint32_t b, mve_pred16_t p)
|
||||
{
|
||||
return vcmpcsq_m_n (a, b, p);
|
||||
return vcmpcsq_m (a, b, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
|
|
|
@ -16,7 +16,7 @@ foo (uint8x16_t a, uint8_t b, mve_pred16_t p)
|
|||
mve_pred16_t
|
||||
foo1 (uint8x16_t a, uint8_t b, mve_pred16_t p)
|
||||
{
|
||||
return vcmpcsq_m_n (a, b, p);
|
||||
return vcmpcsq_m (a, b, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
|
|
|
@ -15,7 +15,7 @@ foo (float16x8_t a, float16_t b)
|
|||
mve_pred16_t
|
||||
foo1 (float16x8_t a, float16_t b)
|
||||
{
|
||||
return vcmpeqq_n (a, b);
|
||||
return vcmpeqq (a, b);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vcmp.f16" } } */
|
||||
|
|
|
@ -15,7 +15,7 @@ foo (float32x4_t a, float32_t b)
|
|||
mve_pred16_t
|
||||
foo1 (float32x4_t a, float32_t b)
|
||||
{
|
||||
return vcmpeqq_n (a, b);
|
||||
return vcmpeqq (a, b);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vcmp.f32" } } */
|
||||
|
|
|
@ -16,7 +16,7 @@ foo (int16x8_t a, int16_t b, mve_pred16_t p)
|
|||
mve_pred16_t
|
||||
foo1 (int16x8_t a, int16_t b, mve_pred16_t p)
|
||||
{
|
||||
return vcmpgeq_m_n (a, b, p);
|
||||
return vcmpgeq_m (a, b, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
|
|
|
@ -16,7 +16,7 @@ foo (int32x4_t a, int32_t b, mve_pred16_t p)
|
|||
mve_pred16_t
|
||||
foo1 (int32x4_t a, int32_t b, mve_pred16_t p)
|
||||
{
|
||||
return vcmpgeq_m_n (a, b, p);
|
||||
return vcmpgeq_m (a, b, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
|
|
|
@ -16,7 +16,7 @@ foo (int8x16_t a, int8_t b, mve_pred16_t p)
|
|||
mve_pred16_t
|
||||
foo1 (int8x16_t a, int8_t b, mve_pred16_t p)
|
||||
{
|
||||
return vcmpgeq_m_n (a, b, p);
|
||||
return vcmpgeq_m (a, b, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
|
|
|
@ -16,7 +16,7 @@ foo (float16x8_t a, float16_t b, mve_pred16_t p)
|
|||
mve_pred16_t
|
||||
foo1 (float16x8_t a, float16_t b, mve_pred16_t p)
|
||||
{
|
||||
return vcmpgtq_m_n (a, b, p);
|
||||
return vcmpgtq_m (a, b, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
|
|
|
@ -16,7 +16,7 @@ foo (float32x4_t a, float32_t b, mve_pred16_t p)
|
|||
mve_pred16_t
|
||||
foo1 (float32x4_t a, float32_t b, mve_pred16_t p)
|
||||
{
|
||||
return vcmpgtq_m_n (a, b, p);
|
||||
return vcmpgtq_m (a, b, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
|
|
|
@ -15,7 +15,7 @@ foo (float16x8_t a, float16_t b)
|
|||
mve_pred16_t
|
||||
foo1 (float16x8_t a, float16_t b)
|
||||
{
|
||||
return vcmpgtq_n (a, b);
|
||||
return vcmpgtq (a, b);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vcmp.f16" } } */
|
||||
|
|
|
@ -15,7 +15,7 @@ foo (float32x4_t a, float32_t b)
|
|||
mve_pred16_t
|
||||
foo1 (float32x4_t a, float32_t b)
|
||||
{
|
||||
return vcmpgtq_n (a, b);
|
||||
return vcmpgtq (a, b);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vcmp.f32" } } */
|
||||
|
|
|
@ -16,7 +16,7 @@ foo (float16x8_t a, float16_t b, mve_pred16_t p)
|
|||
mve_pred16_t
|
||||
foo1 (float16x8_t a, float16_t b, mve_pred16_t p)
|
||||
{
|
||||
return vcmpleq_m_n (a, b, p);
|
||||
return vcmpleq_m (a, b, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
|
|
|
@ -16,7 +16,7 @@ foo (float32x4_t a, float32_t b, mve_pred16_t p)
|
|||
mve_pred16_t
|
||||
foo1 (float32x4_t a, float32_t b, mve_pred16_t p)
|
||||
{
|
||||
return vcmpleq_m_n (a, b, p);
|
||||
return vcmpleq_m (a, b, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
|
|
|
@ -15,7 +15,7 @@ foo (float16x8_t a, float16_t b)
|
|||
mve_pred16_t
|
||||
foo1 (float16x8_t a, float16_t b)
|
||||
{
|
||||
return vcmpleq_n (a, b);
|
||||
return vcmpleq (a, b);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vcmp.f16" } } */
|
||||
|
|
|
@ -15,7 +15,7 @@ foo (float32x4_t a, float32_t b)
|
|||
mve_pred16_t
|
||||
foo1 (float32x4_t a, float32_t b)
|
||||
{
|
||||
return vcmpleq_n (a, b);
|
||||
return vcmpleq (a, b);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vcmp.f32" } } */
|
||||
|
|
|
@ -16,7 +16,7 @@ foo (float16x8_t a, float16_t b, mve_pred16_t p)
|
|||
mve_pred16_t
|
||||
foo1 (float16x8_t a, float16_t b, mve_pred16_t p)
|
||||
{
|
||||
return vcmpltq_m_n (a, b, p);
|
||||
return vcmpltq_m (a, b, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
|
|
|
@ -16,7 +16,7 @@ foo (float32x4_t a, float32_t b, mve_pred16_t p)
|
|||
mve_pred16_t
|
||||
foo1 (float32x4_t a, float32_t b, mve_pred16_t p)
|
||||
{
|
||||
return vcmpltq_m_n (a, b, p);
|
||||
return vcmpltq_m (a, b, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
|
|
|
@ -16,7 +16,7 @@ foo (float16x8_t a, float16_t b, mve_pred16_t p)
|
|||
mve_pred16_t
|
||||
foo1 (float16x8_t a, float16_t b, mve_pred16_t p)
|
||||
{
|
||||
return vcmpneq_m_n (a, b, p);
|
||||
return vcmpneq_m (a, b, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
|
|
|
@ -16,7 +16,7 @@ foo (float32x4_t a, float32_t b, mve_pred16_t p)
|
|||
mve_pred16_t
|
||||
foo1 (float32x4_t a, float32_t b, mve_pred16_t p)
|
||||
{
|
||||
return vcmpneq_m_n (a, b, p);
|
||||
return vcmpneq_m (a, b, p);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vpst" } } */
|
||||
|
|
|
@ -15,7 +15,7 @@ foo (float16x8_t a, float16_t b)
|
|||
mve_pred16_t
|
||||
foo1 (float16x8_t a, float16_t b)
|
||||
{
|
||||
return vcmpneq_n (a, b);
|
||||
return vcmpneq (a, b);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vcmp.f16" } } */
|
||||
|
|
|
@ -15,7 +15,7 @@ foo (float32x4_t a, float32_t b)
|
|||
mve_pred16_t
|
||||
foo1 (float32x4_t a, float32_t b)
|
||||
{
|
||||
return vcmpneq_n (a, b);
|
||||
return vcmpneq (a, b);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vcmp.f32" } } */
|
||||
|
|
Loading…
Add table
Reference in a new issue