re PR tree-optimization/68306 (ICE: in vectorizable_store, at tree-vect-stmts.c:5651)

2015-11-13  Richard Biener  <rguenther@suse.de>

	PR tree-optimization/68306
	* tree-vect-data-refs.c (verify_data_ref_alignment): Move
	loop related checks ...
	(vect_verify_datarefs_alignment): ... here.
	(vect_slp_analyze_and_verify_node_alignment): Compute and
	verify alignment of the single DR that it matters.
	* tree-vect-stmts.c (vectorizable_store): Add an assert.
	(vectorizable_load): Add a comment.
	* tree-vect-slp.c (vect_analyze_slp_cost_1): Fix DR used
	for determining load cost.

	* gcc.dg/pr68306.c: Adjust.
	* gcc.dg/pr68306-2.c: New testcase.
	* gcc.dg/pr68306-3.c: Likewise.

From-SVN: r230310
This commit is contained in:
Richard Biener 2015-11-13 12:14:57 +00:00 committed by Richard Biener
parent e4af0bc465
commit 52eab3788d
8 changed files with 94 additions and 40 deletions

View file

@ -1,3 +1,16 @@
2015-11-13 Richard Biener <rguenther@suse.de>
PR tree-optimization/68306
* tree-vect-data-refs.c (verify_data_ref_alignment): Move
loop related checks ...
(vect_verify_datarefs_alignment): ... here.
(vect_slp_analyze_and_verify_node_alignment): Compute and
verify alignment of the single DR that it matters.
* tree-vect-stmts.c (vectorizable_store): Add an assert.
(vectorizable_load): Add a comment.
* tree-vect-slp.c (vect_analyze_slp_cost_1): Fix DR used
for determining load cost.
2015-11-13 Ilya Enkovich <enkovich.gnu@gmail.com>
* tree-vect-loop.c (vect_determine_vectorization_factor): Check

View file

@ -1,3 +1,10 @@
2015-11-13 Richard Biener <rguenther@suse.de>
PR tree-optimization/68306
* gcc.dg/pr68306.c: Adjust.
* gcc.dg/pr68306-2.c: New testcase.
* gcc.dg/pr68306-3.c: Likewise.
2015-11-13 Ilya Enkovich <enkovich.gnu@gmail.com>
* g++.dg/vect/simd-bool-comparison-1.cc: New test.

View file

@ -0,0 +1,12 @@
/* { dg-do compile } */
/* { dg-options "-O3" } */
/* { dg-additional-options "-mno-sse -mno-mmx" { target x86_64-*-* } } */
struct {
int tz_minuteswest;
int tz_dsttime;
} a, b;
void fn1() {
b.tz_minuteswest = a.tz_minuteswest;
b.tz_dsttime = a.tz_dsttime;
}

View file

@ -0,0 +1,21 @@
/* { dg-do compile } */
/* { dg-options "-O3" } */
/* { dg-additional-options "-mno-sse -mno-mmx" { target x86_64-*-* } } */
/* { dg-additional-options "-mno-altivec -mno-vsx" { target powerpc*-*-* } } */
extern void fn2();
struct {
unsigned qp_num;
unsigned starting_psn;
void *private_data;
} a;
struct {
unsigned id;
unsigned qpn;
unsigned psn;
} b;
void fn1() {
a.qp_num = b.qpn;
a.starting_psn = b.psn;
fn2(b.id);
}

View file

@ -1,5 +1,6 @@
/* { dg-do compile } */
/* { dg-options "-O3" } */
/* { dg-additional-options "-mno-sse -mno-mmx" { target x86_64-*-* } } */
enum powerpc_pmc_type { PPC_PMC_IBM };
struct {

View file

@ -916,22 +916,8 @@ vect_update_misalignment_for_peel (struct data_reference *dr,
static bool
verify_data_ref_alignment (data_reference_p dr)
{
enum dr_alignment_support supportable_dr_alignment;
gimple *stmt = DR_STMT (dr);
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
/* For interleaving, only the alignment of the first access matters. */
if (STMT_VINFO_GROUPED_ACCESS (stmt_info)
&& GROUP_FIRST_ELEMENT (stmt_info) != stmt)
return true;
/* Strided accesses perform only component accesses, alignment is
irrelevant for them. */
if (STMT_VINFO_STRIDED_P (stmt_info)
&& !STMT_VINFO_GROUPED_ACCESS (stmt_info))
return true;
supportable_dr_alignment = vect_supportable_dr_alignment (dr, false);
enum dr_alignment_support supportable_dr_alignment
= vect_supportable_dr_alignment (dr, false);
if (!supportable_dr_alignment)
{
if (dump_enabled_p ())
@ -977,6 +963,18 @@ vect_verify_datarefs_alignment (loop_vec_info vinfo)
if (!STMT_VINFO_RELEVANT_P (stmt_info))
continue;
/* For interleaving, only the alignment of the first access matters. */
if (STMT_VINFO_GROUPED_ACCESS (stmt_info)
&& GROUP_FIRST_ELEMENT (stmt_info) != stmt)
return true;
/* Strided accesses perform only component accesses, alignment is
irrelevant for them. */
if (STMT_VINFO_STRIDED_P (stmt_info)
&& !STMT_VINFO_GROUPED_ACCESS (stmt_info))
return true;
if (! verify_data_ref_alignment (dr))
return false;
}
@ -2100,28 +2098,22 @@ vect_analyze_data_refs_alignment (loop_vec_info vinfo)
static bool
vect_slp_analyze_and_verify_node_alignment (slp_tree node)
{
unsigned i;
gimple *stmt;
FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
/* We vectorize from the first scalar stmt in the node unless
the node is permuted in which case we start from the first
element in the group. */
gimple *first_stmt = SLP_TREE_SCALAR_STMTS (node)[0];
if (SLP_TREE_LOAD_PERMUTATION (node).exists ())
first_stmt = GROUP_FIRST_ELEMENT (vinfo_for_stmt (first_stmt));
data_reference_p dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
if (! vect_compute_data_ref_alignment (dr)
|| ! verify_data_ref_alignment (dr))
{
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
/* Strided accesses perform only component accesses, misalignment
information is irrelevant for them. */
if (STMT_VINFO_STRIDED_P (stmt_info)
&& !STMT_VINFO_GROUPED_ACCESS (stmt_info))
continue;
data_reference_p dr = STMT_VINFO_DATA_REF (stmt_info);
if (! vect_compute_data_ref_alignment (dr)
|| ! verify_data_ref_alignment (dr))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: bad data alignment in basic "
"block.\n");
return false;
}
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: bad data alignment in basic "
"block.\n");
return false;
}
return true;

View file

@ -1429,6 +1429,13 @@ vect_analyze_slp_cost_1 (slp_instance instance, slp_tree node,
{
int i;
gcc_checking_assert (DR_IS_READ (STMT_VINFO_DATA_REF (stmt_info)));
/* If the load is permuted then the alignment is determined by
the first group element not by the first scalar stmt DR. */
if (SLP_TREE_LOAD_PERMUTATION (node).exists ())
{
stmt = GROUP_FIRST_ELEMENT (stmt_info);
stmt_info = vinfo_for_stmt (stmt);
}
vect_model_load_cost (stmt_info, ncopies_for_cost, false,
node, prologue_cost_vec, body_cost_vec);
/* If the load is permuted record the cost for the permutation.

View file

@ -5464,6 +5464,7 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
group. */
vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
gcc_assert (GROUP_FIRST_ELEMENT (vinfo_for_stmt (first_stmt)) == first_stmt);
first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
op = gimple_assign_rhs1 (first_stmt);
}
@ -6658,9 +6659,9 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
if (grouped_load)
{
first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
if (slp
&& !SLP_TREE_LOAD_PERMUTATION (slp_node).exists ()
&& first_stmt != SLP_TREE_SCALAR_STMTS (slp_node)[0])
/* For BB vectorization we directly vectorize a subchain
without permutation. */
if (slp && ! SLP_TREE_LOAD_PERMUTATION (slp_node).exists ())
first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
/* Check if the chain of loads is already vectorized. */