summaryrefslogtreecommitdiff
path: root/gcc/tree-vect-loop.c
diff options
context:
space:
mode:
Diffstat (limited to 'gcc/tree-vect-loop.c')
-rw-r--r--gcc/tree-vect-loop.c54
1 files changed, 36 insertions, 18 deletions
diff --git a/gcc/tree-vect-loop.c b/gcc/tree-vect-loop.c
index 83cd35b308c..02e00dd1820 100644
--- a/gcc/tree-vect-loop.c
+++ b/gcc/tree-vect-loop.c
@@ -41,6 +41,7 @@ along with GCC; see the file COPYING3. If not see
#include "tree-chrec.h"
#include "tree-scalar-evolution.h"
#include "tree-vectorizer.h"
+#include "target.h"
/* Loop Vectorization Pass.
@@ -1265,6 +1266,15 @@ vect_analyze_loop_form (struct loop *loop)
}
+/* Get cost by calling cost target builtin. */
+
+static inline
+int vect_get_cost (enum vect_cost_for_stmt type_of_cost)
+{
+ return targetm.vectorize.builtin_vectorization_cost (type_of_cost);
+}
+
+
/* Function vect_analyze_loop_operations.
Scan the loop stmts and make sure they are all vectorizable. */
@@ -2227,7 +2237,7 @@ vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo)
if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
|| LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
- vec_outside_cost += TARG_COND_TAKEN_BRANCH_COST;
+ vec_outside_cost += vect_get_cost (cond_branch_taken);
/* Count statements in scalar loop. Using this as scalar cost for a single
iteration for now.
@@ -2296,8 +2306,8 @@ vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo)
branch per peeled loop. Even if scalar loop iterations are known,
vector iterations are not known since peeled prologue iterations are
not known. Hence guards remain the same. */
- peel_guard_costs += 2 * (TARG_COND_TAKEN_BRANCH_COST
- + TARG_COND_NOT_TAKEN_BRANCH_COST);
+ peel_guard_costs += 2 * (vect_get_cost (cond_branch_taken)
+ + vect_get_cost (cond_branch_not_taken));
}
else
{
@@ -2323,8 +2333,7 @@ vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo)
/* If peeled iterations are known but number of scalar loop
iterations are unknown, count a taken branch per peeled loop. */
- peel_guard_costs += 2 * TARG_COND_TAKEN_BRANCH_COST;
-
+ peel_guard_costs += 2 * vect_get_cost (cond_branch_taken);
}
else
{
@@ -2399,16 +2408,16 @@ vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo)
/* Cost model check occurs at versioning. */
if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
|| LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
- scalar_outside_cost += TARG_COND_NOT_TAKEN_BRANCH_COST;
+ scalar_outside_cost += vect_get_cost (cond_branch_not_taken);
else
{
/* Cost model check occurs at prologue generation. */
if (LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo) < 0)
- scalar_outside_cost += 2 * TARG_COND_TAKEN_BRANCH_COST
- + TARG_COND_NOT_TAKEN_BRANCH_COST;
+ scalar_outside_cost += 2 * vect_get_cost (cond_branch_taken)
+ + vect_get_cost (cond_branch_not_taken);
/* Cost model check occurs at epilogue generation. */
else
- scalar_outside_cost += 2 * TARG_COND_TAKEN_BRANCH_COST;
+ scalar_outside_cost += 2 * vect_get_cost (cond_branch_taken);
}
}
@@ -2518,7 +2527,8 @@ vect_model_reduction_cost (stmt_vec_info stmt_info, enum tree_code reduc_code,
/* Cost of reduction op inside loop. */
- STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info) += ncopies * TARG_VEC_STMT_COST;
+ STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info)
+ += ncopies * vect_get_cost (vector_stmt);
stmt = STMT_VINFO_STMT (stmt_info);
@@ -2558,7 +2568,7 @@ vect_model_reduction_cost (stmt_vec_info stmt_info, enum tree_code reduc_code,
code = gimple_assign_rhs_code (orig_stmt);
/* Add in cost for initial definition. */
- outer_cost += TARG_SCALAR_TO_VEC_COST;
+ outer_cost += vect_get_cost (scalar_to_vec);
/* Determine cost of epilogue code.
@@ -2568,7 +2578,8 @@ vect_model_reduction_cost (stmt_vec_info stmt_info, enum tree_code reduc_code,
if (!nested_in_vect_loop_p (loop, orig_stmt))
{
if (reduc_code != ERROR_MARK)
- outer_cost += TARG_VEC_STMT_COST + TARG_VEC_TO_SCALAR_COST;
+ outer_cost += vect_get_cost (vector_stmt)
+ + vect_get_cost (vec_to_scalar);
else
{
int vec_size_in_bits = tree_low_cst (TYPE_SIZE (vectype), 1);
@@ -2585,12 +2596,14 @@ vect_model_reduction_cost (stmt_vec_info stmt_info, enum tree_code reduc_code,
&& optab_handler (vec_shr_optab, mode)->insn_code != CODE_FOR_nothing)
/* Final reduction via vector shifts and the reduction operator. Also
requires scalar extract. */
- outer_cost += ((exact_log2(nelements) * 2) * TARG_VEC_STMT_COST
- + TARG_VEC_TO_SCALAR_COST);
+ outer_cost += ((exact_log2(nelements) * 2)
+ * vect_get_cost (vector_stmt)
+ + vect_get_cost (vec_to_scalar));
else
/* Use extracts and reduction op for final reduction. For N elements,
we have N extracts and N-1 reduction ops. */
- outer_cost += ((nelements + nelements - 1) * TARG_VEC_STMT_COST);
+ outer_cost += ((nelements + nelements - 1)
+ * vect_get_cost (vector_stmt));
}
}
@@ -2613,9 +2626,11 @@ static void
vect_model_induction_cost (stmt_vec_info stmt_info, int ncopies)
{
/* loop cost for vec_loop. */
- STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info) = ncopies * TARG_VEC_STMT_COST;
+ STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info)
+ = ncopies * vect_get_cost (vector_stmt);
/* prologue cost for vec_init and vec_step. */
- STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info) = 2 * TARG_SCALAR_TO_VEC_COST;
+ STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info)
+ = 2 * vect_get_cost (scalar_to_vec);
if (vect_print_dump_info (REPORT_COST))
fprintf (vect_dump, "vect_model_induction_cost: inside_cost = %d, "
@@ -3028,12 +3043,15 @@ get_initial_def_for_reduction (gimple stmt, tree init_val,
*adjustment_def = init_val;
}
- if (code == MULT_EXPR || code == BIT_AND_EXPR)
+ if (code == MULT_EXPR)
{
real_init_val = dconst1;
int_init_val = 1;
}
+ if (code == BIT_AND_EXPR)
+ int_init_val = -1;
+
if (SCALAR_FLOAT_TYPE_P (scalar_type))
def_for_init = build_real (scalar_type, real_init_val);
else