diff --git a/gcc/ChangeLog b/gcc/ChangeLog index 82ca731e229..48b8b4c4564 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,10 @@ +2013-07-02 Sriraman Tallam + + * config/i386/i386.c (gate_insert_vzeroupper): Check if target + ISA is AVX. + (ix86_option_override_internal):Turn on all -mavx target flags by + default as they are dependent on AVX anyway. + 2013-07-02 Cary Coutant * dwarf2out.c (loc_checksum): Call hash_loc_operands for a diff --git a/gcc/config/i386/i386.c b/gcc/config/i386/i386.c index 3dbaddfa1bb..6a431877231 100644 --- a/gcc/config/i386/i386.c +++ b/gcc/config/i386/i386.c @@ -2578,7 +2578,7 @@ static const char *const cpu_names[TARGET_CPU_DEFAULT_max] = static bool gate_insert_vzeroupper (void) { - return TARGET_VZEROUPPER; + return TARGET_AVX && TARGET_VZEROUPPER; } static unsigned int @@ -3936,34 +3936,26 @@ ix86_option_override_internal (bool main_args_p) #endif } - if (TARGET_AVX) - { - /* When not optimize for size, enable vzeroupper optimization for - TARGET_AVX with -fexpensive-optimizations and split 32-byte - AVX unaligned load/store. */ - if (!optimize_size) - { - if (flag_expensive_optimizations - && !(target_flags_explicit & MASK_VZEROUPPER)) - target_flags |= MASK_VZEROUPPER; - if ((x86_avx256_split_unaligned_load & ix86_tune_mask) - && !(target_flags_explicit & MASK_AVX256_SPLIT_UNALIGNED_LOAD)) - target_flags |= MASK_AVX256_SPLIT_UNALIGNED_LOAD; - if ((x86_avx256_split_unaligned_store & ix86_tune_mask) - && !(target_flags_explicit & MASK_AVX256_SPLIT_UNALIGNED_STORE)) - target_flags |= MASK_AVX256_SPLIT_UNALIGNED_STORE; - /* Enable 128-bit AVX instruction generation - for the auto-vectorizer. */ - if (TARGET_AVX128_OPTIMAL - && !(target_flags_explicit & MASK_PREFER_AVX128)) - target_flags |= MASK_PREFER_AVX128; - } - } - else - { - /* Disable vzeroupper pass if TARGET_AVX is disabled. */ - target_flags &= ~MASK_VZEROUPPER; - } + /* When not optimize for size, enable vzeroupper optimization for + TARGET_AVX with -fexpensive-optimizations and split 32-byte + AVX unaligned load/store. */ + if (!optimize_size) + { + if (flag_expensive_optimizations + && !(target_flags_explicit & MASK_VZEROUPPER)) + target_flags |= MASK_VZEROUPPER; + if ((x86_avx256_split_unaligned_load & ix86_tune_mask) + && !(target_flags_explicit & MASK_AVX256_SPLIT_UNALIGNED_LOAD)) + target_flags |= MASK_AVX256_SPLIT_UNALIGNED_LOAD; + if ((x86_avx256_split_unaligned_store & ix86_tune_mask) + && !(target_flags_explicit & MASK_AVX256_SPLIT_UNALIGNED_STORE)) + target_flags |= MASK_AVX256_SPLIT_UNALIGNED_STORE; + /* Enable 128-bit AVX instruction generation + for the auto-vectorizer. */ + if (TARGET_AVX128_OPTIMAL + && !(target_flags_explicit & MASK_PREFER_AVX128)) + target_flags |= MASK_PREFER_AVX128; + } if (ix86_recip_name) { diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog index 5b0acaf62d9..e0c5efcba49 100644 --- a/gcc/testsuite/ChangeLog +++ b/gcc/testsuite/ChangeLog @@ -1,3 +1,7 @@ +2013-07-02 Sriraman Tallam + + * gcc.target/i386/avx-inline.c: New test. + 2013-07-02 Maciej W. Rozycki * gcc.target/mips/call-1.c: Accept JALRS and JALR. diff --git a/gcc/testsuite/gcc.target/i386/avx-inline.c b/gcc/testsuite/gcc.target/i386/avx-inline.c new file mode 100644 index 00000000000..05df95e0524 --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/avx-inline.c @@ -0,0 +1,20 @@ +/* Check if avx target functions can inline lower target functions. */ +/* { dg-do compile } */ +/* { dg-options "-O0 -mno-avx -mno-sse3" } */ + +__attribute__((always_inline,target("sse3"))) +inline int callee () +{ + return 0; +} + +__attribute__((target("avx"))) +inline int caller () +{ + return callee (); +} + +int main () +{ + return caller (); +}