Allow functions for AVX target inline functions for lower target.

Allow functions for AVX target inline functions for lower target.  This bug was
reported here: http://gcc.gnu.org/ml/gcc-patches/2013-06/msg01620.html

2013-07-02  Sriraman Tallam  <tmsriram@google.com>

	* config/i386/i386.c (gate_insert_vzeroupper): Check if target
	ISA is AVX.
	(ix86_option_override_internal):Turn on all -mavx target flags by
	default as they are dependent on AVX anyway.

	* gcc.target/i386/avx-inline.c: New test.

From-SVN: r200627
This commit is contained in:
Sriraman Tallam 2013-07-03 00:03:06 +00:00 committed by Sriraman Tallam
parent 4b04355371
commit 786fb9b6c4
4 changed files with 52 additions and 29 deletions

View file

@ -1,3 +1,10 @@
2013-07-02 Sriraman Tallam <tmsriram@google.com>
* config/i386/i386.c (gate_insert_vzeroupper): Check if target
ISA is AVX.
(ix86_option_override_internal):Turn on all -mavx target flags by
default as they are dependent on AVX anyway.
2013-07-02 Cary Coutant <ccoutant@google.com> 2013-07-02 Cary Coutant <ccoutant@google.com>
* dwarf2out.c (loc_checksum): Call hash_loc_operands for a * dwarf2out.c (loc_checksum): Call hash_loc_operands for a

View file

@ -2578,7 +2578,7 @@ static const char *const cpu_names[TARGET_CPU_DEFAULT_max] =
static bool static bool
gate_insert_vzeroupper (void) gate_insert_vzeroupper (void)
{ {
return TARGET_VZEROUPPER; return TARGET_AVX && TARGET_VZEROUPPER;
} }
static unsigned int static unsigned int
@ -3936,34 +3936,26 @@ ix86_option_override_internal (bool main_args_p)
#endif #endif
} }
if (TARGET_AVX) /* When not optimize for size, enable vzeroupper optimization for
{ TARGET_AVX with -fexpensive-optimizations and split 32-byte
/* When not optimize for size, enable vzeroupper optimization for AVX unaligned load/store. */
TARGET_AVX with -fexpensive-optimizations and split 32-byte if (!optimize_size)
AVX unaligned load/store. */ {
if (!optimize_size) if (flag_expensive_optimizations
{ && !(target_flags_explicit & MASK_VZEROUPPER))
if (flag_expensive_optimizations target_flags |= MASK_VZEROUPPER;
&& !(target_flags_explicit & MASK_VZEROUPPER)) if ((x86_avx256_split_unaligned_load & ix86_tune_mask)
target_flags |= MASK_VZEROUPPER; && !(target_flags_explicit & MASK_AVX256_SPLIT_UNALIGNED_LOAD))
if ((x86_avx256_split_unaligned_load & ix86_tune_mask) target_flags |= MASK_AVX256_SPLIT_UNALIGNED_LOAD;
&& !(target_flags_explicit & MASK_AVX256_SPLIT_UNALIGNED_LOAD)) if ((x86_avx256_split_unaligned_store & ix86_tune_mask)
target_flags |= MASK_AVX256_SPLIT_UNALIGNED_LOAD; && !(target_flags_explicit & MASK_AVX256_SPLIT_UNALIGNED_STORE))
if ((x86_avx256_split_unaligned_store & ix86_tune_mask) target_flags |= MASK_AVX256_SPLIT_UNALIGNED_STORE;
&& !(target_flags_explicit & MASK_AVX256_SPLIT_UNALIGNED_STORE)) /* Enable 128-bit AVX instruction generation
target_flags |= MASK_AVX256_SPLIT_UNALIGNED_STORE; for the auto-vectorizer. */
/* Enable 128-bit AVX instruction generation if (TARGET_AVX128_OPTIMAL
for the auto-vectorizer. */ && !(target_flags_explicit & MASK_PREFER_AVX128))
if (TARGET_AVX128_OPTIMAL target_flags |= MASK_PREFER_AVX128;
&& !(target_flags_explicit & MASK_PREFER_AVX128)) }
target_flags |= MASK_PREFER_AVX128;
}
}
else
{
/* Disable vzeroupper pass if TARGET_AVX is disabled. */
target_flags &= ~MASK_VZEROUPPER;
}
if (ix86_recip_name) if (ix86_recip_name)
{ {

View file

@ -1,3 +1,7 @@
2013-07-02 Sriraman Tallam <tmsriram@google.com>
* gcc.target/i386/avx-inline.c: New test.
2013-07-02 Maciej W. Rozycki <macro@codesourcery.com> 2013-07-02 Maciej W. Rozycki <macro@codesourcery.com>
* gcc.target/mips/call-1.c: Accept JALRS and JALR. * gcc.target/mips/call-1.c: Accept JALRS and JALR.

View file

@ -0,0 +1,20 @@
/* Check if avx target functions can inline lower target functions. */
/* { dg-do compile } */
/* { dg-options "-O0 -mno-avx -mno-sse3" } */
__attribute__((always_inline,target("sse3")))
inline int callee ()
{
return 0;
}
__attribute__((target("avx")))
inline int caller ()
{
return callee ();
}
int main ()
{
return caller ();
}