[AArch64] Add branch-cost to cpu tuning information.

* gcc/config/aarch64-protos.h (struct cpu_branch_cost): New.
        (tune_params): Add field branch_costs.
	(aarch64_branch_cost): Declare.
	* gcc/config/aarch64.c (generic_branch_cost): New.
	(generic_tunings): Set field cpu_branch_cost to generic_branch_cost.
	(cortexa53_tunings): Likewise.
	(cortexa57_tunings): Likewise.
	(thunderx_tunings): Likewise.
	(xgene1_tunings): Likewise.
	(aarch64_branch_cost): Define.
	* gcc/config/aarch64/aarch64.h (BRANCH_COST): Redefine.

From-SVN: r222805
This commit is contained in:
Matthew Wahab 2015-05-05 14:01:28 +00:00 committed by Matthew Wahab
parent 6987c38487
commit b9066f5ae8
4 changed files with 54 additions and 1 deletions

View file

@ -1,3 +1,17 @@
2015-05-05 Matthew Wahab <matthew.wahab@arm.com>
* gcc/config/aarch64-protos.h (struct cpu_branch_cost): New.
(tune_params): Add field branch_costs.
(aarch64_branch_cost): Declare.
* gcc/config/aarch64.c (generic_branch_cost): New.
(generic_tunings): Set field cpu_branch_cost to generic_branch_cost.
(cortexa53_tunings): Likewise.
(cortexa57_tunings): Likewise.
(thunderx_tunings): Likewise.
(xgene1_tunings): Likewise.
(aarch64_branch_cost): Define.
* gcc/config/aarch64/aarch64.h (BRANCH_COST): Redefine.
2015-05-05 Uros Bizjak <ubizjak@gmail.com>
* config/i386/i386.c: Use HOST_WIDE_INT_1 instead of (HOST_WIDE_INT) 1

View file

@ -162,12 +162,20 @@ struct cpu_vector_cost
const int cond_not_taken_branch_cost; /* Cost of not taken branch. */
};
/* Branch costs. */
struct cpu_branch_cost
{
const int predictable; /* Predictable branch or optimizing for size. */
const int unpredictable; /* Unpredictable branch or optimizing for speed. */
};
struct tune_params
{
const struct cpu_cost_table *const insn_extra_cost;
const struct cpu_addrcost_table *const addr_cost;
const struct cpu_regmove_cost *const regmove_cost;
const struct cpu_vector_cost *const vec_costs;
const struct cpu_branch_cost *const branch_costs;
const int memmov_cost;
const int issue_rate;
const unsigned int fuseable_ops;
@ -184,6 +192,7 @@ struct tune_params
HOST_WIDE_INT aarch64_initial_elimination_offset (unsigned, unsigned);
int aarch64_get_condition_code (rtx);
bool aarch64_bitmask_imm (HOST_WIDE_INT val, machine_mode);
int aarch64_branch_cost (bool, bool);
enum aarch64_symbol_type
aarch64_classify_symbolic_expression (rtx, enum aarch64_symbol_context);
bool aarch64_const_vec_all_same_int_p (rtx, HOST_WIDE_INT);

View file

@ -340,12 +340,20 @@ static const struct cpu_vector_cost xgene1_vector_cost =
#define AARCH64_FUSE_ADRP_LDR (1 << 3)
#define AARCH64_FUSE_CMP_BRANCH (1 << 4)
/* Generic costs for branch instructions. */
static const struct cpu_branch_cost generic_branch_cost =
{
2, /* Predictable. */
2 /* Unpredictable. */
};
static const struct tune_params generic_tunings =
{
&cortexa57_extra_costs,
&generic_addrcost_table,
&generic_regmove_cost,
&generic_vector_cost,
&generic_branch_cost,
4, /* memmov_cost */
2, /* issue_rate */
AARCH64_FUSE_NOTHING, /* fuseable_ops */
@ -365,6 +373,7 @@ static const struct tune_params cortexa53_tunings =
&generic_addrcost_table,
&cortexa53_regmove_cost,
&generic_vector_cost,
&generic_branch_cost,
4, /* memmov_cost */
2, /* issue_rate */
(AARCH64_FUSE_MOV_MOVK | AARCH64_FUSE_ADRP_ADD
@ -385,6 +394,7 @@ static const struct tune_params cortexa57_tunings =
&cortexa57_addrcost_table,
&cortexa57_regmove_cost,
&cortexa57_vector_cost,
&generic_branch_cost,
4, /* memmov_cost */
3, /* issue_rate */
(AARCH64_FUSE_MOV_MOVK | AARCH64_FUSE_ADRP_ADD
@ -405,6 +415,7 @@ static const struct tune_params thunderx_tunings =
&generic_addrcost_table,
&thunderx_regmove_cost,
&generic_vector_cost,
&generic_branch_cost,
6, /* memmov_cost */
2, /* issue_rate */
AARCH64_FUSE_CMP_BRANCH, /* fuseable_ops */
@ -424,6 +435,7 @@ static const struct tune_params xgene1_tunings =
&xgene1_addrcost_table,
&xgene1_regmove_cost,
&xgene1_vector_cost,
&generic_branch_cost,
6, /* memmov_cost */
4, /* issue_rate */
AARCH64_FUSE_NOTHING, /* fuseable_ops */
@ -5409,6 +5421,23 @@ aarch64_address_cost (rtx x,
return cost;
}
/* Return the cost of a branch. If SPEED_P is true then the compiler is
optimizing for speed. If PREDICTABLE_P is true then the branch is predicted
to be taken. */
int
aarch64_branch_cost (bool speed_p, bool predictable_p)
{
/* When optimizing for speed, use the cost of unpredictable branches. */
const struct cpu_branch_cost *branch_costs =
aarch64_tune_params->branch_costs;
if (!speed_p || predictable_p)
return branch_costs->predictable;
else
return branch_costs->unpredictable;
}
/* Return true if the RTX X in mode MODE is a zero or sign extract
usable in an ADD or SUB (extended register) instruction. */
static bool

View file

@ -827,7 +827,8 @@ do { \
#define TRAMPOLINE_SECTION text_section
/* To start with. */
#define BRANCH_COST(SPEED_P, PREDICTABLE_P) 2
#define BRANCH_COST(SPEED_P, PREDICTABLE_P) \
(aarch64_branch_cost (SPEED_P, PREDICTABLE_P))
/* Assembly output. */