rs6000.c (vspltis_constant): Correct for little-endian.

gcc/
	* config/rs6000/rs6000.c (vspltis_constant): Correct for little-endian.
	(gen_easy_altivec_constant): Likewise.
	* config/rs6000/predicates.md (easy_vector_constant_add_self,
	easy_vector_constant_msb): Likewise.
gcc/testsuite/
	* gcc.target/powerpc/altivec-consts.c: Correct for little-endian.
	Add scan-assembler-not "lvx".
	* gcc.target/powerpc/le-altivec-consts.c: New.

From-SVN: r200357
This commit is contained in:
Alan Modra 2013-06-24 10:22:34 +09:30 committed by Alan Modra
parent dab7667132
commit 8345965a52
6 changed files with 580 additions and 122 deletions

View file

@ -1,3 +1,10 @@
2013-06-24 Alan Modra <amodra@gmail.com>
* config/rs6000/rs6000.c (vspltis_constant): Correct for little-endian.
(gen_easy_altivec_constant): Likewise.
* config/rs6000/predicates.md (easy_vector_constant_add_self,
easy_vector_constant_msb): Likewise.
2013-06-23 Jakub Jelinek <jakub@redhat.com>
PR target/57688

View file

@ -527,9 +527,11 @@
(match_test "easy_altivec_constant (op, mode)")))
{
HOST_WIDE_INT val;
int elt;
if (mode == V2DImode || mode == V2DFmode)
return 0;
val = const_vector_elt_as_int (op, GET_MODE_NUNITS (mode) - 1);
elt = BYTES_BIG_ENDIAN ? GET_MODE_NUNITS (mode) - 1 : 0;
val = const_vector_elt_as_int (op, elt);
val = ((val & 0xff) ^ 0x80) - 0x80;
return EASY_VECTOR_15_ADD_SELF (val);
})
@ -541,9 +543,11 @@
(match_test "easy_altivec_constant (op, mode)")))
{
HOST_WIDE_INT val;
int elt;
if (mode == V2DImode || mode == V2DFmode)
return 0;
val = const_vector_elt_as_int (op, GET_MODE_NUNITS (mode) - 1);
elt = BYTES_BIG_ENDIAN ? GET_MODE_NUNITS (mode) - 1 : 0;
val = const_vector_elt_as_int (op, elt);
return EASY_VECTOR_MSB (val, GET_MODE_INNER (mode));
})

View file

@ -4657,7 +4657,7 @@ vspltis_constant (rtx op, unsigned step, unsigned copies)
bitsize = GET_MODE_BITSIZE (inner);
mask = GET_MODE_MASK (inner);
val = const_vector_elt_as_int (op, nunits - 1);
val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
splat_val = val;
msb_val = val > 0 ? 0 : -1;
@ -4697,7 +4697,7 @@ vspltis_constant (rtx op, unsigned step, unsigned copies)
for (i = 0; i < nunits - 1; ++i)
{
HOST_WIDE_INT desired_val;
if (((i + 1) & (step - 1)) == 0)
if (((BYTES_BIG_ENDIAN ? i + 1 : i) & (step - 1)) == 0)
desired_val = val;
else
desired_val = msb_val;
@ -4782,13 +4782,13 @@ gen_easy_altivec_constant (rtx op)
{
enum machine_mode mode = GET_MODE (op);
int nunits = GET_MODE_NUNITS (mode);
rtx last = CONST_VECTOR_ELT (op, nunits - 1);
rtx val = CONST_VECTOR_ELT (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
unsigned step = nunits / 4;
unsigned copies = 1;
/* Start with a vspltisw. */
if (vspltis_constant (op, step, copies))
return gen_rtx_VEC_DUPLICATE (V4SImode, gen_lowpart (SImode, last));
return gen_rtx_VEC_DUPLICATE (V4SImode, gen_lowpart (SImode, val));
/* Then try with a vspltish. */
if (step == 1)
@ -4797,7 +4797,7 @@ gen_easy_altivec_constant (rtx op)
step >>= 1;
if (vspltis_constant (op, step, copies))
return gen_rtx_VEC_DUPLICATE (V8HImode, gen_lowpart (HImode, last));
return gen_rtx_VEC_DUPLICATE (V8HImode, gen_lowpart (HImode, val));
/* And finally a vspltisb. */
if (step == 1)
@ -4806,7 +4806,7 @@ gen_easy_altivec_constant (rtx op)
step >>= 1;
if (vspltis_constant (op, step, copies))
return gen_rtx_VEC_DUPLICATE (V16QImode, gen_lowpart (QImode, last));
return gen_rtx_VEC_DUPLICATE (V16QImode, gen_lowpart (QImode, val));
gcc_unreachable ();
}

View file

@ -1,3 +1,9 @@
2013-06-24 Alan Modra <amodra@gmail.com>
* gcc.target/powerpc/altivec-consts.c: Correct for little-endian.
Add scan-assembler-not "lvx".
* gcc.target/powerpc/le-altivec-consts.c: New.
2013-06-23 Paolo Carlini <paolo.carlini@oracle.com>
* g++.dg/cpp0x/sfinae47.C: New.

View file

@ -11,31 +11,24 @@ typedef __attribute__ ((vector_size (16))) unsigned char v16qi;
typedef __attribute__ ((vector_size (16))) unsigned short v8hi;
typedef __attribute__ ((vector_size (16))) unsigned int v4si;
char w[16] __attribute__((aligned(16)));
/* Emulate the vspltis? instructions on a 16-byte array of chars. */
typedef __attribute__((aligned(16))) char c16[16];
typedef __attribute__((aligned(16))) short s8[8];
typedef __attribute__((aligned(16))) int i4[4];
void vspltisb (char *v, int val)
{
int i;
for (i = 0; i < 16; i++)
v[i] = val;
}
#define V16QI(V1,V2,V3,V4,V5,V6,V7,V8,V9,V10,V11,V12,V13,V14,V15,V16) \
v16qi v = {V1,V2,V3,V4,V5,V6,V7,V8,V9,V10,V11,V12,V13,V14,V15,V16}; \
static c16 w = {V1,V2,V3,V4,V5,V6,V7,V8,V9,V10,V11,V12,V13,V14,V15,V16}; \
check_v16qi (v, w);
void vspltish (char *v, int val)
{
int i;
for (i = 0; i < 16; i += 2)
v[i] = val >> 7, v[i + 1] = val;
}
#define V8HI(V1,V2,V3,V4,V5,V6,V7,V8) \
v8hi v = {V1,V2,V3,V4,V5,V6,V7,V8}; \
static s8 w = {V1,V2,V3,V4,V5,V6,V7,V8}; \
check_v8hi (v, w);
void vspltisw (char *v, int val)
{
int i;
for (i = 0; i < 16; i += 4)
v[i] = v[i + 1] = v[i + 2] = val >> 7, v[i + 3] = val;
}
#define V4SI(V1,V2,V3,V4) \
v4si v = {V1,V2,V3,V4}; \
static i4 w = {V1,V2,V3,V4}; \
check_v4si (v, w);
/* Use three different check functions for each mode-instruction pair.
@ -48,13 +41,13 @@ void __attribute__ ((noinline)) check_v16qi (v16qi v1, char *v2)
abort ();
}
void __attribute__ ((noinline)) check_v8hi (v8hi v1, char *v2)
void __attribute__ ((noinline)) check_v8hi (v8hi v1, short *v2)
{
if (memcmp (&v1, v2, 16))
abort ();
}
void __attribute__ ((noinline)) check_v4si (v4si v1, char *v2)
void __attribute__ ((noinline)) check_v4si (v4si v1, int *v2)
{
if (memcmp (&v1, v2, 16))
abort ();
@ -65,72 +58,52 @@ void __attribute__ ((noinline)) check_v4si (v4si v1, char *v2)
void v16qi_vspltisb ()
{
v16qi v = { 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15 };
vspltisb (w, 15);
check_v16qi (v, w);
V16QI (15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15);
}
void v16qi_vspltisb_neg ()
{
v16qi v = { -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5 };
vspltisb (w, -5);
check_v16qi (v, w);
V16QI (-5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5);
}
void v16qi_vspltisb_addself ()
{
v16qi v = { 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30 };
vspltisb (w, 30);
check_v16qi (v, w);
V16QI (30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30);
}
void v16qi_vspltisb_neg_addself ()
{
v16qi v = { -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24 };
vspltisb (w, -24);
check_v16qi (v, w);
V16QI (-24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24);
}
void v16qi_vspltish ()
{
v16qi v = { 0, 15, 0, 15, 0, 15, 0, 15, 0, 15, 0, 15, 0, 15, 0, 15 };
vspltish (w, 15);
check_v16qi (v, w);
V16QI (0, 15, 0, 15, 0, 15, 0, 15, 0, 15, 0, 15, 0, 15, 0, 15);
}
void v16qi_vspltish_addself ()
{
v16qi v = { 0, 30, 0, 30, 0, 30, 0, 30, 0, 30, 0, 30, 0, 30, 0, 30 };
vspltish (w, 30);
check_v16qi (v, w);
V16QI (0, 30, 0, 30, 0, 30, 0, 30, 0, 30, 0, 30, 0, 30, 0, 30);
}
void v16qi_vspltish_neg ()
{
v16qi v = { -1, -5, -1, -5, -1, -5, -1, -5, -1, -5, -1, -5, -1, -5, -1, -5 };
vspltish (w, -5);
check_v16qi (v, w);
V16QI (-1, -5, -1, -5, -1, -5, -1, -5, -1, -5, -1, -5, -1, -5, -1, -5);
}
void v16qi_vspltisw ()
{
v16qi v = { 0, 0, 0, 15, 0, 0, 0, 15, 0, 0, 0, 15, 0, 0, 0, 15 };
vspltisw (w, 15);
check_v16qi (v, w);
V16QI (0, 0, 0, 15, 0, 0, 0, 15, 0, 0, 0, 15, 0, 0, 0, 15);
}
void v16qi_vspltisw_addself ()
{
v16qi v = { 0, 0, 0, 30, 0, 0, 0, 30, 0, 0, 0, 30, 0, 0, 0, 30 };
vspltisw (w, 30);
check_v16qi (v, w);
V16QI (0, 0, 0, 30, 0, 0, 0, 30, 0, 0, 0, 30, 0, 0, 0, 30);
}
void v16qi_vspltisw_neg ()
{
v16qi v = { -1, -1, -1, -5, -1, -1, -1, -5, -1, -1, -1, -5, -1, -1, -1, -5 };
vspltisw (w, -5);
check_v16qi (v, w);
V16QI (-1, -1, -1, -5, -1, -1, -1, -5, -1, -1, -1, -5, -1, -1, -1, -5);
}
@ -138,144 +111,104 @@ void v16qi_vspltisw_neg ()
void v8hi_vspltisb ()
{
v8hi v = { 0x0F0F, 0x0F0F, 0x0F0F, 0x0F0F, 0x0F0F, 0x0F0F, 0x0F0F, 0x0F0F };
vspltisb (w, 15);
check_v8hi (v, w);
V8HI (0x0F0F, 0x0F0F, 0x0F0F, 0x0F0F, 0x0F0F, 0x0F0F, 0x0F0F, 0x0F0F);
}
void v8hi_vspltisb_addself ()
{
v8hi v = { 0x1E1E, 0x1E1E, 0x1E1E, 0x1E1E, 0x1E1E, 0x1E1E, 0x1E1E, 0x1E1E };
vspltisb (w, 30);
check_v8hi (v, w);
V8HI (0x1E1E, 0x1E1E, 0x1E1E, 0x1E1E, 0x1E1E, 0x1E1E, 0x1E1E, 0x1E1E);
}
void v8hi_vspltisb_neg ()
{
v8hi v = { 0xFBFB, 0xFBFB, 0xFBFB, 0xFBFB, 0xFBFB, 0xFBFB, 0xFBFB, 0xFBFB };
vspltisb (w, -5);
check_v8hi (v, w);
V8HI (0xFBFB, 0xFBFB, 0xFBFB, 0xFBFB, 0xFBFB, 0xFBFB, 0xFBFB, 0xFBFB);
}
void v8hi_vspltish ()
{
v8hi v = { 15, 15, 15, 15, 15, 15, 15, 15 };
vspltish (w, 15);
check_v8hi (v, w);
V8HI (15, 15, 15, 15, 15, 15, 15, 15);
}
void v8hi_vspltish_neg ()
{
v8hi v = { -5, -5, -5, -5, -5, -5, -5, -5 };
vspltish (w, -5);
check_v8hi (v, w);
V8HI (-5, -5, -5, -5, -5, -5, -5, -5);
}
void v8hi_vspltish_addself ()
{
v8hi v = { 30, 30, 30, 30, 30, 30, 30, 30 };
vspltish (w, 30);
check_v8hi (v, w);
V8HI (30, 30, 30, 30, 30, 30, 30, 30);
}
void v8hi_vspltish_neg_addself ()
{
v8hi v = { -24, -24, -24, -24, -24, -24, -24, -24 };
vspltish (w, -24);
check_v8hi (v, w);
V8HI (-24, -24, -24, -24, -24, -24, -24, -24);
}
void v8hi_vspltisw ()
{
v8hi v = { 0, 15, 0, 15, 0, 15, 0, 15 };
vspltisw (w, 15);
check_v8hi (v, w);
V8HI (0, 15, 0, 15, 0, 15, 0, 15);
}
void v8hi_vspltisw_addself ()
{
v8hi v = { 0, 30, 0, 30, 0, 30, 0, 30 };
vspltisw (w, 30);
check_v8hi (v, w);
V8HI (0, 30, 0, 30, 0, 30, 0, 30);
}
void v8hi_vspltisw_neg ()
{
v8hi v = { -1, -5, -1, -5, -1, -5, -1, -5 };
vspltisw (w, -5);
check_v8hi (v, w);
V8HI (-1, -5, -1, -5, -1, -5, -1, -5);
}
/* V4SI tests. */
void v4si_vspltisb ()
{
v4si v = { 0x0F0F0F0F, 0x0F0F0F0F, 0x0F0F0F0F, 0x0F0F0F0F };
vspltisb (w, 15);
check_v4si (v, w);
V4SI (0x0F0F0F0F, 0x0F0F0F0F, 0x0F0F0F0F, 0x0F0F0F0F);
}
void v4si_vspltisb_addself ()
{
v4si v = { 0x1E1E1E1E, 0x1E1E1E1E, 0x1E1E1E1E, 0x1E1E1E1E };
vspltisb (w, 30);
check_v4si (v, w);
V4SI (0x1E1E1E1E, 0x1E1E1E1E, 0x1E1E1E1E, 0x1E1E1E1E);
}
void v4si_vspltisb_neg ()
{
v4si v = { 0xFBFBFBFB, 0xFBFBFBFB, 0xFBFBFBFB, 0xFBFBFBFB };
vspltisb (w, -5);
check_v4si (v, w);
V4SI (0xFBFBFBFB, 0xFBFBFBFB, 0xFBFBFBFB, 0xFBFBFBFB);
}
void v4si_vspltish ()
{
v4si v = { 0x000F000F, 0x000F000F, 0x000F000F, 0x000F000F };
vspltish (w, 15);
check_v4si (v, w);
V4SI (0x000F000F, 0x000F000F, 0x000F000F, 0x000F000F);
}
void v4si_vspltish_addself ()
{
v4si v = { 0x001E001E, 0x001E001E, 0x001E001E, 0x001E001E };
vspltish (w, 30);
check_v4si (v, w);
V4SI (0x001E001E, 0x001E001E, 0x001E001E, 0x001E001E);
}
void v4si_vspltish_neg ()
{
v4si v = { 0xFFFBFFFB, 0xFFFBFFFB, 0xFFFBFFFB, 0xFFFBFFFB };
vspltish (w, -5);
check_v4si (v, w);
V4SI (0xFFFBFFFB, 0xFFFBFFFB, 0xFFFBFFFB, 0xFFFBFFFB);
}
void v4si_vspltisw ()
{
v4si v = { 15, 15, 15, 15 };
vspltisw (w, 15);
check_v4si (v, w);
V4SI (15, 15, 15, 15);
}
void v4si_vspltisw_neg ()
{
v4si v = { -5, -5, -5, -5 };
vspltisw (w, -5);
check_v4si (v, w);
V4SI (-5, -5, -5, -5);
}
void v4si_vspltisw_addself ()
{
v4si v = { 30, 30, 30, 30 };
vspltisw (w, 30);
check_v4si (v, w);
V4SI (30, 30, 30, 30);
}
void v4si_vspltisw_neg_addself ()
{
v4si v = { -24, -24, -24, -24 };
vspltisw (w, -24);
check_v4si (v, w);
V4SI (-24, -24, -24, -24);
}
@ -316,3 +249,5 @@ int main ()
v4si_vspltisw_neg_addself ();
return 0;
}
/* { dg-final { scan-assembler-not "lvx" { target { ! powerpc*le-*-* } } } } */

View file

@ -0,0 +1,506 @@
/* { dg-do run { target { powerpc*-*-* && vmx_hw } } } */
/* { dg-do compile { target { powerpc*-*-* && { ! vmx_hw } } } } */
/* { dg-require-effective-target powerpc_altivec_ok } */
/* { dg-options "-maltivec -mabi=altivec -O2" } */
/* Check that "easy" AltiVec constants are correctly synthesized. */
extern void abort (void);
typedef __attribute__ ((vector_size (16))) unsigned char v16qi;
typedef __attribute__ ((vector_size (16))) unsigned short v8hi;
typedef __attribute__ ((vector_size (16))) unsigned int v4si;
typedef __attribute__((aligned(16))) char c16[16];
typedef __attribute__((aligned(16))) short s8[8];
typedef __attribute__((aligned(16))) int i4[4];
#define V16QI(V1,V2,V3,V4,V5,V6,V7,V8,V9,V10,V11,V12,V13,V14,V15,V16) \
v16qi v = {V1,V2,V3,V4,V5,V6,V7,V8,V9,V10,V11,V12,V13,V14,V15,V16}; \
static c16 w = {V1,V2,V3,V4,V5,V6,V7,V8,V9,V10,V11,V12,V13,V14,V15,V16}; \
check_v16qi (v, w);
#define V8HI(V1,V2,V3,V4,V5,V6,V7,V8) \
v8hi v = {V1,V2,V3,V4,V5,V6,V7,V8}; \
static s8 w = {V1,V2,V3,V4,V5,V6,V7,V8}; \
check_v8hi (v, w);
#define V4SI(V1,V2,V3,V4) \
v4si v = {V1,V2,V3,V4}; \
static i4 w = {V1,V2,V3,V4}; \
check_v4si (v, w);
/* Use three different check functions for each mode-instruction pair.
The callers have no typecasting and no addressable vectors, to make
the test more robust. */
void __attribute__ ((noinline)) check_v16qi (v16qi v1, char *v2)
{
if (memcmp (&v1, v2, 16))
abort ();
}
void __attribute__ ((noinline)) check_v8hi (v8hi v1, short *v2)
{
if (memcmp (&v1, v2, 16))
abort ();
}
void __attribute__ ((noinline)) check_v4si (v4si v1, int *v2)
{
if (memcmp (&v1, v2, 16))
abort ();
}
/* V16QI tests. */
void v16qi_vspltisb ()
{
V16QI (15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15);
}
void v16qi_vspltisb_neg ()
{
V16QI (-5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5);
}
void v16qi_vspltisb_addself ()
{
V16QI (30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30);
}
void v16qi_vspltisb_neg_addself ()
{
V16QI (-24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24);
}
void v16qi_vspltish ()
{
V16QI (15, 0, 15, 0, 15, 0, 15, 0, 15, 0, 15, 0, 15, 0, 15, 0);
}
void v16qi_vspltish_addself ()
{
V16QI (30, 0, 30, 0, 30, 0, 30, 0, 30, 0, 30, 0, 30, 0, 30, 0);
}
void v16qi_vspltish_neg ()
{
V16QI (-5, -1, -5, -1, -5, -1, -5, -1, -5, -1, -5, -1, -5, -1, -5, -1);
}
void v16qi_vspltisw ()
{
V16QI (15, 0, 0, 0, 15, 0, 0, 0, 15, 0, 0, 0, 15, 0, 0, 0);
}
void v16qi_vspltisw_addself ()
{
V16QI (30, 0, 0, 0, 30, 0, 0, 0, 30, 0, 0, 0, 30, 0, 0, 0);
}
void v16qi_vspltisw_neg ()
{
V16QI (-5, -1, -1, -1, -5, -1, -1, -1, -5, -1, -1, -1, -5, -1, -1, -1);
}
/* V8HI tests. */
void v8hi_vspltisb ()
{
V8HI (0x0F0F, 0x0F0F, 0x0F0F, 0x0F0F, 0x0F0F, 0x0F0F, 0x0F0F, 0x0F0F);
}
void v8hi_vspltisb_addself ()
{
V8HI (0x1E1E, 0x1E1E, 0x1E1E, 0x1E1E, 0x1E1E, 0x1E1E, 0x1E1E, 0x1E1E);
}
void v8hi_vspltisb_neg ()
{
V8HI (0xFBFB, 0xFBFB, 0xFBFB, 0xFBFB, 0xFBFB, 0xFBFB, 0xFBFB, 0xFBFB);
}
void v8hi_vspltish ()
{
V8HI (15, 15, 15, 15, 15, 15, 15, 15);
}
void v8hi_vspltish_neg ()
{
V8HI (-5, -5, -5, -5, -5, -5, -5, -5);
}
void v8hi_vspltish_addself ()
{
V8HI (30, 30, 30, 30, 30, 30, 30, 30);
}
void v8hi_vspltish_neg_addself ()
{
V8HI (-24, -24, -24, -24, -24, -24, -24, -24);
}
void v8hi_vspltisw ()
{
V8HI (15, 0, 15, 0, 15, 0, 15, 0);
}
void v8hi_vspltisw_addself ()
{
V8HI (30, 0, 30, 0, 30, 0, 30, 0);
}
void v8hi_vspltisw_neg ()
{
V8HI (-5, -1, -5, -1, -5, -1, -5, -1);
}
/* V4SI tests. */
void v4si_vspltisb ()
{
V4SI (0x0F0F0F0F, 0x0F0F0F0F, 0x0F0F0F0F, 0x0F0F0F0F);
}
void v4si_vspltisb_addself ()
{
V4SI (0x1E1E1E1E, 0x1E1E1E1E, 0x1E1E1E1E, 0x1E1E1E1E);
}
void v4si_vspltisb_neg ()
{
V4SI (0xFBFBFBFB, 0xFBFBFBFB, 0xFBFBFBFB, 0xFBFBFBFB);
}
void v4si_vspltish ()
{
V4SI (0x000F000F, 0x000F000F, 0x000F000F, 0x000F000F);
}
void v4si_vspltish_addself ()
{
V4SI (0x001E001E, 0x001E001E, 0x001E001E, 0x001E001E);
}
void v4si_vspltish_neg ()
{
V4SI (0xFFFBFFFB, 0xFFFBFFFB, 0xFFFBFFFB, 0xFFFBFFFB);
}
void v4si_vspltisw ()
{
V4SI (15, 15, 15, 15);
}
void v4si_vspltisw_neg ()
{
V4SI (-5, -5, -5, -5);
}
void v4si_vspltisw_addself ()
{
V4SI (30, 30, 30, 30);
}
void v4si_vspltisw_neg_addself ()
{
V4SI (-24, -24, -24, -24);
}
int main ()
{
v16qi_vspltisb ();
v16qi_vspltisb_neg ();
v16qi_vspltisb_addself ();
v16qi_vspltisb_neg_addself ();
v16qi_vspltish ();
v16qi_vspltish_addself ();
v16qi_vspltish_neg ();
v16qi_vspltisw ();
v16qi_vspltisw_addself ();
v16qi_vspltisw_neg ();
v8hi_vspltisb ();
v8hi_vspltisb_addself ();
v8hi_vspltisb_neg ();
v8hi_vspltish ();
v8hi_vspltish_neg ();
v8hi_vspltish_addself ();
v8hi_vspltish_neg_addself ();
v8hi_vspltisw ();
v8hi_vspltisw_addself ();
v8hi_vspltisw_neg ();
v4si_vspltisb ();
v4si_vspltisb_addself ();
v4si_vspltisb_neg ();
v4si_vspltish ();
v4si_vspltish_addself ();
v4si_vspltish_neg ();
v4si_vspltisw ();
v4si_vspltisw_neg ();
v4si_vspltisw_addself ();
v4si_vspltisw_neg_addself ();
return 0;
}
/* { dg-final { scan-assembler-not "lvx" { target { powerpc*le-*-* } } } } */
/* { dg-do run { target { powerpc*-*-* && vmx_hw } } } */
/* { dg-do compile { target { powerpc*-*-* && { ! vmx_hw } } } } */
/* { dg-require-effective-target powerpc_altivec_ok } */
/* { dg-options "-maltivec -mabi=altivec -O2" } */
/* Check that "easy" AltiVec constants are correctly synthesized. */
extern void abort (void);
typedef __attribute__ ((vector_size (16))) unsigned char v16qi;
typedef __attribute__ ((vector_size (16))) unsigned short v8hi;
typedef __attribute__ ((vector_size (16))) unsigned int v4si;
typedef __attribute__((aligned(16))) char c16[16];
typedef __attribute__((aligned(16))) short s8[8];
typedef __attribute__((aligned(16))) int i4[4];
#define V16QI(V1,V2,V3,V4,V5,V6,V7,V8,V9,V10,V11,V12,V13,V14,V15,V16) \
v16qi v = {V1,V2,V3,V4,V5,V6,V7,V8,V9,V10,V11,V12,V13,V14,V15,V16}; \
static c16 w = {V1,V2,V3,V4,V5,V6,V7,V8,V9,V10,V11,V12,V13,V14,V15,V16}; \
check_v16qi (v, w);
#define V8HI(V1,V2,V3,V4,V5,V6,V7,V8) \
v8hi v = {V1,V2,V3,V4,V5,V6,V7,V8}; \
static s8 w = {V1,V2,V3,V4,V5,V6,V7,V8}; \
check_v8hi (v, w);
#define V4SI(V1,V2,V3,V4) \
v4si v = {V1,V2,V3,V4}; \
static i4 w = {V1,V2,V3,V4}; \
check_v4si (v, w);
/* Use three different check functions for each mode-instruction pair.
The callers have no typecasting and no addressable vectors, to make
the test more robust. */
void __attribute__ ((noinline)) check_v16qi (v16qi v1, char *v2)
{
if (memcmp (&v1, v2, 16))
abort ();
}
void __attribute__ ((noinline)) check_v8hi (v8hi v1, short *v2)
{
if (memcmp (&v1, v2, 16))
abort ();
}
void __attribute__ ((noinline)) check_v4si (v4si v1, int *v2)
{
if (memcmp (&v1, v2, 16))
abort ();
}
/* V16QI tests. */
void v16qi_vspltisb ()
{
V16QI (15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15);
}
void v16qi_vspltisb_neg ()
{
V16QI (-5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5);
}
void v16qi_vspltisb_addself ()
{
V16QI (30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30);
}
void v16qi_vspltisb_neg_addself ()
{
V16QI (-24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24);
}
void v16qi_vspltish ()
{
V16QI (15, 0, 15, 0, 15, 0, 15, 0, 15, 0, 15, 0, 15, 0, 15, 0);
}
void v16qi_vspltish_addself ()
{
V16QI (30, 0, 30, 0, 30, 0, 30, 0, 30, 0, 30, 0, 30, 0, 30, 0);
}
void v16qi_vspltish_neg ()
{
V16QI (-5, -1, -5, -1, -5, -1, -5, -1, -5, -1, -5, -1, -5, -1, -5, -1);
}
void v16qi_vspltisw ()
{
V16QI (15, 0, 0, 0, 15, 0, 0, 0, 15, 0, 0, 0, 15, 0, 0, 0);
}
void v16qi_vspltisw_addself ()
{
V16QI (30, 0, 0, 0, 30, 0, 0, 0, 30, 0, 0, 0, 30, 0, 0, 0);
}
void v16qi_vspltisw_neg ()
{
V16QI (-5, -1, -1, -1, -5, -1, -1, -1, -5, -1, -1, -1, -5, -1, -1, -1);
}
/* V8HI tests. */
void v8hi_vspltisb ()
{
V8HI (0x0F0F, 0x0F0F, 0x0F0F, 0x0F0F, 0x0F0F, 0x0F0F, 0x0F0F, 0x0F0F);
}
void v8hi_vspltisb_addself ()
{
V8HI (0x1E1E, 0x1E1E, 0x1E1E, 0x1E1E, 0x1E1E, 0x1E1E, 0x1E1E, 0x1E1E);
}
void v8hi_vspltisb_neg ()
{
V8HI (0xFBFB, 0xFBFB, 0xFBFB, 0xFBFB, 0xFBFB, 0xFBFB, 0xFBFB, 0xFBFB);
}
void v8hi_vspltish ()
{
V8HI (15, 15, 15, 15, 15, 15, 15, 15);
}
void v8hi_vspltish_neg ()
{
V8HI (-5, -5, -5, -5, -5, -5, -5, -5);
}
void v8hi_vspltish_addself ()
{
V8HI (30, 30, 30, 30, 30, 30, 30, 30);
}
void v8hi_vspltish_neg_addself ()
{
V8HI (-24, -24, -24, -24, -24, -24, -24, -24);
}
void v8hi_vspltisw ()
{
V8HI (15, 0, 15, 0, 15, 0, 15, 0);
}
void v8hi_vspltisw_addself ()
{
V8HI (30, 0, 30, 0, 30, 0, 30, 0);
}
void v8hi_vspltisw_neg ()
{
V8HI (-5, -1, -5, -1, -5, -1, -5, -1);
}
/* V4SI tests. */
void v4si_vspltisb ()
{
V4SI (0x0F0F0F0F, 0x0F0F0F0F, 0x0F0F0F0F, 0x0F0F0F0F);
}
void v4si_vspltisb_addself ()
{
V4SI (0x1E1E1E1E, 0x1E1E1E1E, 0x1E1E1E1E, 0x1E1E1E1E);
}
void v4si_vspltisb_neg ()
{
V4SI (0xFBFBFBFB, 0xFBFBFBFB, 0xFBFBFBFB, 0xFBFBFBFB);
}
void v4si_vspltish ()
{
V4SI (0x000F000F, 0x000F000F, 0x000F000F, 0x000F000F);
}
void v4si_vspltish_addself ()
{
V4SI (0x001E001E, 0x001E001E, 0x001E001E, 0x001E001E);
}
void v4si_vspltish_neg ()
{
V4SI (0xFFFBFFFB, 0xFFFBFFFB, 0xFFFBFFFB, 0xFFFBFFFB);
}
void v4si_vspltisw ()
{
V4SI (15, 15, 15, 15);
}
void v4si_vspltisw_neg ()
{
V4SI (-5, -5, -5, -5);
}
void v4si_vspltisw_addself ()
{
V4SI (30, 30, 30, 30);
}
void v4si_vspltisw_neg_addself ()
{
V4SI (-24, -24, -24, -24);
}
int main ()
{
v16qi_vspltisb ();
v16qi_vspltisb_neg ();
v16qi_vspltisb_addself ();
v16qi_vspltisb_neg_addself ();
v16qi_vspltish ();
v16qi_vspltish_addself ();
v16qi_vspltish_neg ();
v16qi_vspltisw ();
v16qi_vspltisw_addself ();
v16qi_vspltisw_neg ();
v8hi_vspltisb ();
v8hi_vspltisb_addself ();
v8hi_vspltisb_neg ();
v8hi_vspltish ();
v8hi_vspltish_neg ();
v8hi_vspltish_addself ();
v8hi_vspltish_neg_addself ();
v8hi_vspltisw ();
v8hi_vspltisw_addself ();
v8hi_vspltisw_neg ();
v4si_vspltisb ();
v4si_vspltisb_addself ();
v4si_vspltisb_neg ();
v4si_vspltish ();
v4si_vspltish_addself ();
v4si_vspltish_neg ();
v4si_vspltisw ();
v4si_vspltisw_neg ();
v4si_vspltisw_addself ();
v4si_vspltisw_neg_addself ();
return 0;
}
/* { dg-final { scan-assembler-not "lvx" { target { powerpc*le-*-* } } } } */