rs6000: Correct definition of macro of fixed point efficient unaligned

Marco TARGET_EFFICIENT_OVERLAPPING_UNALIGNED is used in rs6000-string.cc
to guard the platform which is efficient on fixed point unaligned
load/store.  It's originally defined by TARGET_EFFICIENT_UNALIGNED_VSX
which is enabled from P8 and can be disabled by mno-vsx option. So the
definition is improper.  This patch corrects it and call
slow_unaligned_access to judge if fixed point unaligned load/store is
efficient or not.

gcc/
	* config/rs6000/rs6000.h (TARGET_EFFICIENT_OVERLAPPING_UNALIGNED):
	Remove.
	* config/rs6000/rs6000-string.cc (select_block_compare_mode):
	Replace TARGET_EFFICIENT_OVERLAPPING_UNALIGNED with
	targetm.slow_unaligned_access.
	(expand_block_compare_gpr): Likewise.
	(expand_block_compare): Likewise.
	(expand_strncmp_gpr_sequence): Likewise.

gcc/testsuite/
	* gcc.target/powerpc/block-cmp-1.c: New.
	* gcc.target/powerpc/block-cmp-2.c: New.
This commit is contained in:
Haochen Gui 2023-12-27 10:30:06 +08:00
parent f2d47aa70e
commit 78bd9e2560
4 changed files with 32 additions and 10 deletions

View file

@ -305,7 +305,7 @@ select_block_compare_mode (unsigned HOST_WIDE_INT offset,
else if (bytes == GET_MODE_SIZE (QImode))
return QImode;
else if (bytes < GET_MODE_SIZE (SImode)
&& TARGET_EFFICIENT_OVERLAPPING_UNALIGNED
&& !targetm.slow_unaligned_access (SImode, align * BITS_PER_UNIT)
&& offset >= GET_MODE_SIZE (SImode) - bytes)
/* This matches the case were we have SImode and 3 bytes
and offset >= 1 and permits us to move back one and overlap
@ -313,7 +313,7 @@ select_block_compare_mode (unsigned HOST_WIDE_INT offset,
unwanted bytes off of the input. */
return SImode;
else if (word_mode_ok && bytes < UNITS_PER_WORD
&& TARGET_EFFICIENT_OVERLAPPING_UNALIGNED
&& !targetm.slow_unaligned_access (word_mode, align * BITS_PER_UNIT)
&& offset >= UNITS_PER_WORD-bytes)
/* Similarly, if we can use DImode it will get matched here and
can do an overlapping read that ends at the end of the block. */
@ -1749,7 +1749,8 @@ expand_block_compare_gpr(unsigned HOST_WIDE_INT bytes, unsigned int base_align,
load_mode_size = GET_MODE_SIZE (load_mode);
if (bytes >= load_mode_size)
cmp_bytes = load_mode_size;
else if (TARGET_EFFICIENT_OVERLAPPING_UNALIGNED)
else if (!targetm.slow_unaligned_access (load_mode,
align * BITS_PER_UNIT))
{
/* Move this load back so it doesn't go past the end.
P8/P9 can do this efficiently. */
@ -1987,7 +1988,8 @@ expand_block_compare (rtx operands[])
if (!CONST_INT_P (align_rtx))
return false;
unsigned int base_align = UINTVAL (align_rtx) / BITS_PER_UNIT;
unsigned int align_by_bits = UINTVAL (align_rtx);
unsigned int base_align = align_by_bits / BITS_PER_UNIT;
gcc_assert (GET_MODE (target) == SImode);
@ -2026,7 +2028,7 @@ expand_block_compare (rtx operands[])
/* The code generated for p7 and older is not faster than glibc
memcmp if alignment is small and length is not short, so bail
out to avoid those conditions. */
if (!TARGET_EFFICIENT_OVERLAPPING_UNALIGNED
if (targetm.slow_unaligned_access (word_mode, align_by_bits)
&& ((base_align == 1 && bytes > 16)
|| (base_align == 2 && bytes > 32)))
return false;
@ -2168,7 +2170,8 @@ expand_strncmp_gpr_sequence (unsigned HOST_WIDE_INT bytes_to_compare,
load_mode_size = GET_MODE_SIZE (load_mode);
if (bytes_to_compare >= load_mode_size)
cmp_bytes = load_mode_size;
else if (TARGET_EFFICIENT_OVERLAPPING_UNALIGNED)
else if (!targetm.slow_unaligned_access (load_mode,
align * BITS_PER_UNIT))
{
/* Move this load back so it doesn't go past the end.
P8/P9 can do this efficiently. */

View file

@ -485,10 +485,6 @@ extern int rs6000_vector_align[];
#define TARGET_NO_SF_SUBREG TARGET_DIRECT_MOVE_64BIT
#define TARGET_ALLOW_SF_SUBREG (!TARGET_DIRECT_MOVE_64BIT)
/* This wants to be set for p8 and newer. On p7, overlapping unaligned
loads are slow. */
#define TARGET_EFFICIENT_OVERLAPPING_UNALIGNED TARGET_EFFICIENT_UNALIGNED_VSX
/* Byte/char syncs were added as phased in for ISA 2.06B, but are not present
in power7, so conditionalize them on p8 features. TImode syncs need quad
memory support. */

View file

@ -0,0 +1,11 @@
/* { dg-do compile } */
/* { dg-options "-O2 -mdejagnu-cpu=power8 -mno-vsx" } */
/* { dg-final { scan-assembler-not {\mb[l]? memcmp\M} } } */
/* Test that it still can do expand for memcmpsi instead of calling library
on P8 with vsx disabled. */
int foo (const char* s1, const char* s2)
{
return __builtin_memcmp (s1, s2, 20);
}

View file

@ -0,0 +1,12 @@
/* { dg-do compile } */
/* { dg-require-effective-target opt_mstrict_align } */
/* { dg-options "-O2 -mstrict-align" } */
/* { dg-final { scan-assembler-times {\mb[l]? memcmp\M} 1 } } */
/* Test that it calls library for block memory compare when strict-align
is set. The flag causes rs6000_slow_unaligned_access returns true. */
int foo (const char* s1, const char* s2)
{
return __builtin_memcmp (s1, s2, 20);
}