summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--gcc/ChangeLog46
-rw-r--r--gcc/testsuite/gcc.dg/vect/vect-outer-1-big-array.c4
-rw-r--r--gcc/testsuite/gcc.dg/vect/vect-outer-1.c4
-rw-r--r--gcc/testsuite/gcc.dg/vect/vect-outer-1a-big-array.c4
-rw-r--r--gcc/testsuite/gcc.dg/vect/vect-outer-1a.c4
-rw-r--r--gcc/testsuite/gcc.dg/vect/vect-outer-1b-big-array.c4
-rw-r--r--gcc/testsuite/gcc.dg/vect/vect-outer-1b.c4
-rw-r--r--gcc/testsuite/gcc.dg/vect/vect-outer-2b.c4
-rw-r--r--gcc/testsuite/gcc.dg/vect/vect-outer-3b.c4
-rw-r--r--gcc/tree-vect-data-refs.c86
-rw-r--r--gcc/tree-vect-loop.c12
-rw-r--r--gcc/tree-vect-slp.c50
-rw-r--r--gcc/tree-vect-stmts.c100
-rw-r--r--gcc/tree-vectorizer.h24
14 files changed, 198 insertions, 152 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index d346e435a24..dfb564c85c8 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,3 +1,49 @@
+2010-04-10 Michael Matz <matz@suse.de>
+
+ * tree-vectorizer.h (_loop_vec_info.strided_stores): Rename to
+ grouped_stores.
+ (LOOP_VINFO_STRIDED_STORES): Rename to LOOP_VINFO_GROUPED_STORES.
+ (struce _bb_vec_info.strided_stores): Rename to grouped_stores.
+ (BB_VINFO_STRIDED_STORES): Rename to BB_VINFO_GROUPED_STORES.
+ (STMT_VINFO_STRIDED_ACCESS): Rename to STMT_VINFO_GROUPED_ACCESS.
+ (vect_strided_store_supported): Rename to vect_grouped_store_supported.
+ (vect_strided_load_supported): Rename to vect_grouped_load_supported.
+ (vect_transform_strided_load): Rename to vect_transform_grouped_load.
+ (vect_record_strided_load_vectors): Rename to
+ vect_record_grouped_load_vectors.
+ * tree-vect-data-refs.c (vect_update_misalignment_for_peel):
+ Rename use of above macros.
+ (vect_verify_datarefs_alignment): Ditto.
+ (vector_alignment_reachable_p): Ditto.
+ (vect_peeling_hash_get_lowest_cost): Ditto.
+ (vect_enhance_data_refs_alignment): Ditto.
+ (vect_analyze_group_access): Ditto and rename stride to groupsize.
+ (vect_analyze_data_ref_access): Rename "strided" to "grouped".
+ (vect_strided_store_supported): Rename to vect_grouped_store_supported.
+ (vect_strided_load_supported): Rename to vect_grouped_load_supported.
+ (vect_transform_strided_load): Rename to vect_transform_grouped_load.
+ (vect_record_strided_load_vectors): Rename to
+ vect_record_grouped_load_vectors.
+ * tree-vect-loop.c (new_loop_vec_info): Rename use of above macros.
+ (destroy_loop_vec_info): Ditto.
+ (vect_transform_loop): Ditto and rename strided_store to grouped_store.
+ * tree-vect-slp.c (vect_build_slp_tree): Rename use of above macros.
+ (vect_analyze_slp): Ditto.
+ (new_bb_vec_info): Ditto.
+ (destroy_bb_vec_info): Ditto.
+ (vect_schedule_slp_instance): Ditto and rename strided_store to
+ grouped_store.
+ * tree-vect-stmts.c (vect_cost_strided_group_size): Rename to
+ vect_cost_group_size.
+ (vect_model_store_cost): Rename use of above macros and call
+ to vect_cost_strided_group_size.
+ (vect_model_load_cost): Ditto.
+ (vectorizable_store): Ditto, rename strided_store to grouped_store
+ and calls to renamed tree-vectorizer.h functions.
+ (vectorizable_load): Ditto.
+ (vect_transform_stmt): Rename use of above macros and strided_store
+ to grouped_store.
+
2012-04-10 Jan Hubicka <jh@suse.cz>
* cgraph.h: Remove misledaing comment on ipa-ref.h.
diff --git a/gcc/testsuite/gcc.dg/vect/vect-outer-1-big-array.c b/gcc/testsuite/gcc.dg/vect/vect-outer-1-big-array.c
index 804c3867bd0..b896faa7f95 100644
--- a/gcc/testsuite/gcc.dg/vect/vect-outer-1-big-array.c
+++ b/gcc/testsuite/gcc.dg/vect/vect-outer-1-big-array.c
@@ -22,6 +22,6 @@ foo (){
}
/* { dg-final { scan-tree-dump-times "OUTER LOOP VECTORIZED" 1 "vect" { xfail *-*-* } } } */
-/* { dg-final { scan-tree-dump-times "strided access in outer loop" 1 "vect" { xfail vect_multiple_sizes } } } */
-/* { dg-final { scan-tree-dump-times "strided access in outer loop" 2 "vect" { target vect_multiple_sizes } } } */
+/* { dg-final { scan-tree-dump-times "grouped access in outer loop" 1 "vect" { xfail vect_multiple_sizes } } } */
+/* { dg-final { scan-tree-dump-times "grouped access in outer loop" 2 "vect" { target vect_multiple_sizes } } } */
/* { dg-final { cleanup-tree-dump "vect" } } */
diff --git a/gcc/testsuite/gcc.dg/vect/vect-outer-1.c b/gcc/testsuite/gcc.dg/vect/vect-outer-1.c
index 2ce8f8ebac8..2abcb179458 100644
--- a/gcc/testsuite/gcc.dg/vect/vect-outer-1.c
+++ b/gcc/testsuite/gcc.dg/vect/vect-outer-1.c
@@ -22,6 +22,6 @@ foo (){
}
/* { dg-final { scan-tree-dump-times "OUTER LOOP VECTORIZED" 1 "vect" { xfail *-*-* } } } */
-/* { dg-final { scan-tree-dump-times "strided access in outer loop" 1 "vect" { xfail vect_multiple_sizes } } } */
-/* { dg-final { scan-tree-dump-times "strided access in outer loop" 2 "vect" { target vect_multiple_sizes } } } */
+/* { dg-final { scan-tree-dump-times "grouped access in outer loop" 1 "vect" { xfail vect_multiple_sizes } } } */
+/* { dg-final { scan-tree-dump-times "grouped access in outer loop" 2 "vect" { target vect_multiple_sizes } } } */
/* { dg-final { cleanup-tree-dump "vect" } } */
diff --git a/gcc/testsuite/gcc.dg/vect/vect-outer-1a-big-array.c b/gcc/testsuite/gcc.dg/vect/vect-outer-1a-big-array.c
index 9b418fabaa9..0a53c25484a 100644
--- a/gcc/testsuite/gcc.dg/vect/vect-outer-1a-big-array.c
+++ b/gcc/testsuite/gcc.dg/vect/vect-outer-1a-big-array.c
@@ -20,6 +20,6 @@ foo (){
}
/* { dg-final { scan-tree-dump-times "OUTER LOOP VECTORIZED" 1 "vect" { xfail *-*-* } } } */
-/* { dg-final { scan-tree-dump-times "strided access in outer loop" 1 "vect" { xfail vect_multiple_sizes } } } */
-/* { dg-final { scan-tree-dump-times "strided access in outer loop" 2 "vect" { target vect_multiple_sizes } } } */
+/* { dg-final { scan-tree-dump-times "grouped access in outer loop" 1 "vect" { xfail vect_multiple_sizes } } } */
+/* { dg-final { scan-tree-dump-times "grouped access in outer loop" 2 "vect" { target vect_multiple_sizes } } } */
/* { dg-final { cleanup-tree-dump "vect" } } */
diff --git a/gcc/testsuite/gcc.dg/vect/vect-outer-1a.c b/gcc/testsuite/gcc.dg/vect/vect-outer-1a.c
index a9b786e235c..acd504c9e0b 100644
--- a/gcc/testsuite/gcc.dg/vect/vect-outer-1a.c
+++ b/gcc/testsuite/gcc.dg/vect/vect-outer-1a.c
@@ -20,6 +20,6 @@ foo (){
}
/* { dg-final { scan-tree-dump-times "OUTER LOOP VECTORIZED" 1 "vect" { xfail *-*-* } } } */
-/* { dg-final { scan-tree-dump-times "strided access in outer loop" 1 "vect" { xfail vect_multiple_sizes } } } */
-/* { dg-final { scan-tree-dump-times "strided access in outer loop" 2 "vect" { target vect_multiple_sizes } } } */
+/* { dg-final { scan-tree-dump-times "grouped access in outer loop" 1 "vect" { xfail vect_multiple_sizes } } } */
+/* { dg-final { scan-tree-dump-times "grouped access in outer loop" 2 "vect" { target vect_multiple_sizes } } } */
/* { dg-final { cleanup-tree-dump "vect" } } */
diff --git a/gcc/testsuite/gcc.dg/vect/vect-outer-1b-big-array.c b/gcc/testsuite/gcc.dg/vect/vect-outer-1b-big-array.c
index 48b7180784e..551c89fba38 100644
--- a/gcc/testsuite/gcc.dg/vect/vect-outer-1b-big-array.c
+++ b/gcc/testsuite/gcc.dg/vect/vect-outer-1b-big-array.c
@@ -22,6 +22,6 @@ foo (){
}
/* { dg-final { scan-tree-dump-times "OUTER LOOP VECTORIZED" 1 "vect" { xfail *-*-* } } } */
-/* { dg-final { scan-tree-dump-times "strided access in outer loop" 1 "vect" { xfail vect_multiple_sizes } } } */
-/* { dg-final { scan-tree-dump-times "strided access in outer loop" 2 "vect" { target vect_multiple_sizes } } } */
+/* { dg-final { scan-tree-dump-times "grouped access in outer loop" 1 "vect" { xfail vect_multiple_sizes } } } */
+/* { dg-final { scan-tree-dump-times "grouped access in outer loop" 2 "vect" { target vect_multiple_sizes } } } */
/* { dg-final { cleanup-tree-dump "vect" } } */
diff --git a/gcc/testsuite/gcc.dg/vect/vect-outer-1b.c b/gcc/testsuite/gcc.dg/vect/vect-outer-1b.c
index 815758c766f..c475a5e443e 100644
--- a/gcc/testsuite/gcc.dg/vect/vect-outer-1b.c
+++ b/gcc/testsuite/gcc.dg/vect/vect-outer-1b.c
@@ -22,6 +22,6 @@ foo (){
}
/* { dg-final { scan-tree-dump-times "OUTER LOOP VECTORIZED" 1 "vect" { xfail *-*-* } } } */
-/* { dg-final { scan-tree-dump-times "strided access in outer loop" 1 "vect" { xfail vect_multiple_sizes } } } */
-/* { dg-final { scan-tree-dump-times "strided access in outer loop" 2 "vect" { target vect_multiple_sizes } } } */
+/* { dg-final { scan-tree-dump-times "grouped access in outer loop" 1 "vect" { xfail vect_multiple_sizes } } } */
+/* { dg-final { scan-tree-dump-times "grouped access in outer loop" 2 "vect" { target vect_multiple_sizes } } } */
/* { dg-final { cleanup-tree-dump "vect" } } */
diff --git a/gcc/testsuite/gcc.dg/vect/vect-outer-2b.c b/gcc/testsuite/gcc.dg/vect/vect-outer-2b.c
index cb62881f004..2b3351626ec 100644
--- a/gcc/testsuite/gcc.dg/vect/vect-outer-2b.c
+++ b/gcc/testsuite/gcc.dg/vect/vect-outer-2b.c
@@ -37,6 +37,6 @@ int main (void)
return 0;
}
-/* { dg-final { scan-tree-dump-times "strided access in outer loop" 1 "vect" { xfail vect_multiple_sizes } } } */
-/* { dg-final { scan-tree-dump-times "strided access in outer loop" 2 "vect" { target vect_multiple_sizes } } } */
+/* { dg-final { scan-tree-dump-times "grouped access in outer loop" 1 "vect" { xfail vect_multiple_sizes } } } */
+/* { dg-final { scan-tree-dump-times "grouped access in outer loop" 2 "vect" { target vect_multiple_sizes } } } */
/* { dg-final { cleanup-tree-dump "vect" } } */
diff --git a/gcc/testsuite/gcc.dg/vect/vect-outer-3b.c b/gcc/testsuite/gcc.dg/vect/vect-outer-3b.c
index fda8727bf68..4a86af28d8a 100644
--- a/gcc/testsuite/gcc.dg/vect/vect-outer-3b.c
+++ b/gcc/testsuite/gcc.dg/vect/vect-outer-3b.c
@@ -49,6 +49,6 @@ int main (void)
}
/* { dg-final { scan-tree-dump-times "OUTER LOOP VECTORIZED" 1 "vect" { xfail *-*-* } } } */
-/* { dg-final { scan-tree-dump-times "strided access in outer loop" 2 "vect" { xfail vect_multiple_sizes } } } */
-/* { dg-final { scan-tree-dump-times "strided access in outer loop" 4 "vect" { target vect_multiple_sizes } } } */
+/* { dg-final { scan-tree-dump-times "grouped access in outer loop" 2 "vect" { xfail vect_multiple_sizes } } } */
+/* { dg-final { scan-tree-dump-times "grouped access in outer loop" 4 "vect" { target vect_multiple_sizes } } } */
/* { dg-final { cleanup-tree-dump "vect" } } */
diff --git a/gcc/tree-vect-data-refs.c b/gcc/tree-vect-data-refs.c
index d9d1abfd480..4d550a41aba 100644
--- a/gcc/tree-vect-data-refs.c
+++ b/gcc/tree-vect-data-refs.c
@@ -605,7 +605,7 @@ vect_analyze_data_ref_dependence (struct data_dependence_relation *ddr,
}
/* When vectorizing a basic block unknown depnedence can still mean
- strided access. */
+ grouped access. */
if (vect_check_interleaving (dra, drb))
return false;
@@ -1000,9 +1000,9 @@ vect_update_misalignment_for_peel (struct data_reference *dr,
/* For interleaved data accesses the step in the loop must be multiplied by
the size of the interleaving group. */
- if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
+ if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
dr_size *= GROUP_SIZE (vinfo_for_stmt (GROUP_FIRST_ELEMENT (stmt_info)));
- if (STMT_VINFO_STRIDED_ACCESS (peel_stmt_info))
+ if (STMT_VINFO_GROUPED_ACCESS (peel_stmt_info))
dr_peel_size *= GROUP_SIZE (peel_stmt_info);
/* It can be assumed that the data refs with the same alignment as dr_peel
@@ -1062,7 +1062,7 @@ vect_verify_datarefs_alignment (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
/* For interleaving, only the alignment of the first access matters.
Skip statements marked as not vectorizable. */
- if ((STMT_VINFO_STRIDED_ACCESS (stmt_info)
+ if ((STMT_VINFO_GROUPED_ACCESS (stmt_info)
&& GROUP_FIRST_ELEMENT (stmt_info) != stmt)
|| !STMT_VINFO_VECTORIZABLE (stmt_info))
continue;
@@ -1103,7 +1103,7 @@ vector_alignment_reachable_p (struct data_reference *dr)
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
tree vectype = STMT_VINFO_VECTYPE (stmt_info);
- if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
+ if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
{
/* For interleaved access we peel only if number of iterations in
the prolog loop ({VF - misalignment}), is a multiple of the
@@ -1288,7 +1288,7 @@ vect_peeling_hash_get_lowest_cost (void **slot, void *data)
stmt_info = vinfo_for_stmt (stmt);
/* For interleaving, only the alignment of the first access
matters. */
- if (STMT_VINFO_STRIDED_ACCESS (stmt_info)
+ if (STMT_VINFO_GROUPED_ACCESS (stmt_info)
&& GROUP_FIRST_ELEMENT (stmt_info) != stmt)
continue;
@@ -1503,7 +1503,7 @@ vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo)
/* For interleaving, only the alignment of the first access
matters. */
- if (STMT_VINFO_STRIDED_ACCESS (stmt_info)
+ if (STMT_VINFO_GROUPED_ACCESS (stmt_info)
&& GROUP_FIRST_ELEMENT (stmt_info) != stmt)
continue;
@@ -1745,7 +1745,7 @@ vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo)
members of the group, therefore we divide the number of iterations
by the group size. */
stmt_info = vinfo_for_stmt (DR_STMT (dr0));
- if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
+ if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
npeel /= GROUP_SIZE (stmt_info);
if (vect_print_dump_info (REPORT_DETAILS))
@@ -1764,7 +1764,7 @@ vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo)
stmt_info = vinfo_for_stmt (stmt);
/* For interleaving, only the alignment of the first access
matters. */
- if (STMT_VINFO_STRIDED_ACCESS (stmt_info)
+ if (STMT_VINFO_GROUPED_ACCESS (stmt_info)
&& GROUP_FIRST_ELEMENT (stmt_info) != stmt)
continue;
@@ -1846,7 +1846,7 @@ vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo)
/* For interleaving, only the alignment of the first access
matters. */
if (aligned_access_p (dr)
- || (STMT_VINFO_STRIDED_ACCESS (stmt_info)
+ || (STMT_VINFO_GROUPED_ACCESS (stmt_info)
&& GROUP_FIRST_ELEMENT (stmt_info) != stmt))
continue;
@@ -2041,9 +2041,9 @@ vect_analyze_data_refs_alignment (loop_vec_info loop_vinfo,
}
-/* Analyze groups of strided accesses: check that DR belongs to a group of
- strided accesses of legal size, step, etc. Detect gaps, single element
- interleaving, and other special cases. Set strided access info.
+/* Analyze groups of accesses: check that DR belongs to a group of
+ accesses of legal size, step, etc. Detect gaps, single element
+ interleaving, and other special cases. Set grouped access info.
Collect groups of strided stores for further use in SLP analysis. */
static bool
@@ -2057,16 +2057,16 @@ vect_analyze_group_access (struct data_reference *dr)
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
HOST_WIDE_INT dr_step = TREE_INT_CST_LOW (step);
- HOST_WIDE_INT stride, last_accessed_element = 1;
+ HOST_WIDE_INT groupsize, last_accessed_element = 1;
bool slp_impossible = false;
struct loop *loop = NULL;
if (loop_vinfo)
loop = LOOP_VINFO_LOOP (loop_vinfo);
- /* For interleaving, STRIDE is STEP counted in elements, i.e., the size of the
- interleaving group (including gaps). */
- stride = dr_step / type_size;
+ /* For interleaving, GROUPSIZE is STEP counted in elements, i.e., the
+ size of the interleaving group (including gaps). */
+ groupsize = dr_step / type_size;
/* Not consecutive access is possible only if it is a part of interleaving. */
if (!GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
@@ -2078,11 +2078,11 @@ vect_analyze_group_access (struct data_reference *dr)
size. The size of the group must be a power of 2. */
if (DR_IS_READ (dr)
&& (dr_step % type_size) == 0
- && stride > 0
- && exact_log2 (stride) != -1)
+ && groupsize > 0
+ && exact_log2 (groupsize) != -1)
{
GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = stmt;
- GROUP_SIZE (vinfo_for_stmt (stmt)) = stride;
+ GROUP_SIZE (vinfo_for_stmt (stmt)) = groupsize;
if (vect_print_dump_info (REPORT_DR_DETAILS))
{
fprintf (vect_dump, "Detected single element interleaving ");
@@ -2239,9 +2239,9 @@ vect_analyze_group_access (struct data_reference *dr)
{
slp_impossible = true;
/* There is a gap after the last load in the group. This gap is a
- difference between the stride and the number of elements. When
- there is no gap, this difference should be 0. */
- GROUP_GAP (vinfo_for_stmt (stmt)) = stride - count;
+ difference between the groupsize and the number of elements.
+ When there is no gap, this difference should be 0. */
+ GROUP_GAP (vinfo_for_stmt (stmt)) = groupsize - count;
}
else
{
@@ -2265,27 +2265,27 @@ vect_analyze_group_access (struct data_reference *dr)
return false;
}
- if (stride == 0)
- stride = count;
+ if (groupsize == 0)
+ groupsize = count;
- GROUP_SIZE (vinfo_for_stmt (stmt)) = stride;
+ GROUP_SIZE (vinfo_for_stmt (stmt)) = groupsize;
if (vect_print_dump_info (REPORT_DETAILS))
- fprintf (vect_dump, "Detected interleaving of size %d", (int)stride);
+ fprintf (vect_dump, "Detected interleaving of size %d", (int)groupsize);
/* SLP: create an SLP data structure for every interleaving group of
stores for further analysis in vect_analyse_slp. */
if (DR_IS_WRITE (dr) && !slp_impossible)
{
if (loop_vinfo)
- VEC_safe_push (gimple, heap, LOOP_VINFO_STRIDED_STORES (loop_vinfo),
+ VEC_safe_push (gimple, heap, LOOP_VINFO_GROUPED_STORES (loop_vinfo),
stmt);
if (bb_vinfo)
- VEC_safe_push (gimple, heap, BB_VINFO_STRIDED_STORES (bb_vinfo),
+ VEC_safe_push (gimple, heap, BB_VINFO_GROUPED_STORES (bb_vinfo),
stmt);
}
/* There is a gap in the end of the group. */
- if (stride - last_accessed_element > 0 && loop_vinfo)
+ if (groupsize - last_accessed_element > 0 && loop_vinfo)
{
if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "Data access with gaps requires scalar "
@@ -2307,7 +2307,7 @@ vect_analyze_group_access (struct data_reference *dr)
/* Analyze the access pattern of the data-reference DR.
In case of non-consecutive accesses call vect_analyze_group_access() to
- analyze groups of strided accesses. */
+ analyze groups of accesses. */
static bool
vect_analyze_data_ref_access (struct data_reference *dr)
@@ -2372,7 +2372,7 @@ vect_analyze_data_ref_access (struct data_reference *dr)
if (loop && nested_in_vect_loop_p (loop, stmt))
{
if (vect_print_dump_info (REPORT_ALIGNMENT))
- fprintf (vect_dump, "strided access in outer loop.");
+ fprintf (vect_dump, "grouped access in outer loop.");
return false;
}
@@ -3792,13 +3792,13 @@ vect_create_destination_var (tree scalar_dest, tree vectype)
return vec_dest;
}
-/* Function vect_strided_store_supported.
+/* Function vect_grouped_store_supported.
Returns TRUE if interleave high and interleave low permutations
are supported, and FALSE otherwise. */
bool
-vect_strided_store_supported (tree vectype, unsigned HOST_WIDE_INT count)
+vect_grouped_store_supported (tree vectype, unsigned HOST_WIDE_INT count)
{
enum machine_mode mode = TYPE_MODE (vectype);
@@ -3806,7 +3806,7 @@ vect_strided_store_supported (tree vectype, unsigned HOST_WIDE_INT count)
if (exact_log2 (count) == -1)
{
if (vect_print_dump_info (REPORT_DETAILS))
- fprintf (vect_dump, "the size of the group of strided accesses"
+ fprintf (vect_dump, "the size of the group of accesses"
" is not a power of 2");
return false;
}
@@ -4243,13 +4243,13 @@ vect_setup_realignment (gimple stmt, gimple_stmt_iterator *gsi,
}
-/* Function vect_strided_load_supported.
+/* Function vect_grouped_load_supported.
Returns TRUE if even and odd permutations are supported,
and FALSE otherwise. */
bool
-vect_strided_load_supported (tree vectype, unsigned HOST_WIDE_INT count)
+vect_grouped_load_supported (tree vectype, unsigned HOST_WIDE_INT count)
{
enum machine_mode mode = TYPE_MODE (vectype);
@@ -4257,7 +4257,7 @@ vect_strided_load_supported (tree vectype, unsigned HOST_WIDE_INT count)
if (exact_log2 (count) == -1)
{
if (vect_print_dump_info (REPORT_DETAILS))
- fprintf (vect_dump, "the size of the group of strided accesses"
+ fprintf (vect_dump, "the size of the group of accesses"
" is not a power of 2");
return false;
}
@@ -4442,7 +4442,7 @@ vect_permute_load_chain (VEC(tree,heap) *dr_chain,
}
-/* Function vect_transform_strided_load.
+/* Function vect_transform_grouped_load.
Given a chain of input interleaved data-refs (in DR_CHAIN), build statements
to perform their permutation and ascribe the result vectorized statements to
@@ -4450,7 +4450,7 @@ vect_permute_load_chain (VEC(tree,heap) *dr_chain,
*/
void
-vect_transform_strided_load (gimple stmt, VEC(tree,heap) *dr_chain, int size,
+vect_transform_grouped_load (gimple stmt, VEC(tree,heap) *dr_chain, int size,
gimple_stmt_iterator *gsi)
{
VEC(tree,heap) *result_chain = NULL;
@@ -4460,16 +4460,16 @@ vect_transform_strided_load (gimple stmt, VEC(tree,heap) *dr_chain, int size,
vectors, that are ready for vector computation. */
result_chain = VEC_alloc (tree, heap, size);
vect_permute_load_chain (dr_chain, size, stmt, gsi, &result_chain);
- vect_record_strided_load_vectors (stmt, result_chain);
+ vect_record_grouped_load_vectors (stmt, result_chain);
VEC_free (tree, heap, result_chain);
}
-/* RESULT_CHAIN contains the output of a group of strided loads that were
+/* RESULT_CHAIN contains the output of a group of grouped loads that were
generated as part of the vectorization of STMT. Assign the statement
for each vector to the associated scalar statement. */
void
-vect_record_strided_load_vectors (gimple stmt, VEC(tree,heap) *result_chain)
+vect_record_grouped_load_vectors (gimple stmt, VEC(tree,heap) *result_chain)
{
gimple first_stmt = GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt));
gimple next_stmt, new_stmt;
diff --git a/gcc/tree-vect-loop.c b/gcc/tree-vect-loop.c
index 3df0e4b61ba..91a98295759 100644
--- a/gcc/tree-vect-loop.c
+++ b/gcc/tree-vect-loop.c
@@ -847,7 +847,7 @@ new_loop_vec_info (struct loop *loop)
LOOP_VINFO_MAY_ALIAS_DDRS (res) =
VEC_alloc (ddr_p, heap,
PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS));
- LOOP_VINFO_STRIDED_STORES (res) = VEC_alloc (gimple, heap, 10);
+ LOOP_VINFO_GROUPED_STORES (res) = VEC_alloc (gimple, heap, 10);
LOOP_VINFO_REDUCTIONS (res) = VEC_alloc (gimple, heap, 10);
LOOP_VINFO_REDUCTION_CHAINS (res) = VEC_alloc (gimple, heap, 10);
LOOP_VINFO_SLP_INSTANCES (res) = VEC_alloc (slp_instance, heap, 10);
@@ -923,7 +923,7 @@ destroy_loop_vec_info (loop_vec_info loop_vinfo, bool clean_stmts)
vect_free_slp_instance (instance);
VEC_free (slp_instance, heap, LOOP_VINFO_SLP_INSTANCES (loop_vinfo));
- VEC_free (gimple, heap, LOOP_VINFO_STRIDED_STORES (loop_vinfo));
+ VEC_free (gimple, heap, LOOP_VINFO_GROUPED_STORES (loop_vinfo));
VEC_free (gimple, heap, LOOP_VINFO_REDUCTIONS (loop_vinfo));
VEC_free (gimple, heap, LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo));
@@ -5221,7 +5221,7 @@ vect_transform_loop (loop_vec_info loop_vinfo)
int i;
tree ratio = NULL;
int vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
- bool strided_store;
+ bool grouped_store;
bool slp_scheduled = false;
unsigned int nunits;
tree cond_expr = NULL_TREE;
@@ -5460,11 +5460,11 @@ vect_transform_loop (loop_vec_info loop_vinfo)
if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "transform statement.");
- strided_store = false;
- is_store = vect_transform_stmt (stmt, &si, &strided_store, NULL, NULL);
+ grouped_store = false;
+ is_store = vect_transform_stmt (stmt, &si, &grouped_store, NULL, NULL);
if (is_store)
{
- if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
+ if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
{
/* Interleaving. If IS_STORE is TRUE, the vectorization of the
interleaving chain was completed - free all the stores in
diff --git a/gcc/tree-vect-slp.c b/gcc/tree-vect-slp.c
index 014fa988722..e189c5071f0 100644
--- a/gcc/tree-vect-slp.c
+++ b/gcc/tree-vect-slp.c
@@ -651,7 +651,7 @@ vect_build_slp_tree (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
|| rhs_code != REALPART_EXPR)
&& (first_stmt_code != REALPART_EXPR
|| rhs_code != IMAGPART_EXPR)
- && !(STMT_VINFO_STRIDED_ACCESS (vinfo_for_stmt (stmt))
+ && !(STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt))
&& (first_stmt_code == ARRAY_REF
|| first_stmt_code == INDIRECT_REF
|| first_stmt_code == COMPONENT_REF
@@ -704,8 +704,8 @@ vect_build_slp_tree (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
}
}
- /* Strided store or load. */
- if (STMT_VINFO_STRIDED_ACCESS (vinfo_for_stmt (stmt)))
+ /* Grouped store or load. */
+ if (STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt)))
{
if (REFERENCE_CLASS_P (lhs))
{
@@ -729,7 +729,7 @@ vect_build_slp_tree (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
{
if (vect_print_dump_info (REPORT_SLP))
{
- fprintf (vect_dump, "Build SLP failed: strided "
+ fprintf (vect_dump, "Build SLP failed: grouped "
"loads have gaps ");
print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
}
@@ -815,19 +815,19 @@ vect_build_slp_tree (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
stop_recursion = true;
continue;
}
- } /* Strided access. */
+ } /* Grouped access. */
else
{
if (TREE_CODE_CLASS (rhs_code) == tcc_reference)
{
- /* Not strided load. */
+ /* Not grouped load. */
if (vect_print_dump_info (REPORT_SLP))
{
- fprintf (vect_dump, "Build SLP failed: not strided load ");
+ fprintf (vect_dump, "Build SLP failed: not grouped load ");
print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
}
- /* FORNOW: Not strided loads are not supported. */
+ /* FORNOW: Not grouped loads are not supported. */
vect_free_oprnd_info (&oprnds_info);
return false;
}
@@ -884,7 +884,7 @@ vect_build_slp_tree (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
*inside_cost += SLP_TREE_INSIDE_OF_LOOP_COST (*node);
*outside_cost += SLP_TREE_OUTSIDE_OF_LOOP_COST (*node);
- /* Strided loads were reached - stop the recursion. */
+ /* Grouped loads were reached - stop the recursion. */
if (stop_recursion)
{
VEC_safe_push (slp_tree, heap, *loads, *node);
@@ -1109,7 +1109,7 @@ vect_slp_rearrange_stmts (slp_tree node, unsigned int group_size,
/* Check if the required load permutation is supported.
LOAD_PERMUTATION contains a list of indices of the loads.
- In SLP this permutation is relative to the order of strided stores that are
+ In SLP this permutation is relative to the order of grouped stores that are
the base of the SLP instance. */
static bool
@@ -1138,7 +1138,7 @@ vect_supported_load_permutation_p (slp_instance slp_instn, int group_size,
/* In case of reduction every load permutation is allowed, since the order
of the reduction statements is not important (as opposed to the case of
- strided stores). The only condition we need to check is that all the
+ grouped stores). The only condition we need to check is that all the
load nodes are of the same size and have the same permutation (and then
rearrange all the nodes of the SLP instance according to this
permutation). */
@@ -1444,7 +1444,7 @@ vect_find_last_store_in_slp_instance (slp_instance instance)
}
-/* Analyze an SLP instance starting from a group of strided stores. Call
+/* Analyze an SLP instance starting from a group of grouped stores. Call
vect_build_slp_tree to build a tree of packed stmts if possible.
Return FALSE if it's impossible to SLP any stmt in the loop. */
@@ -1517,7 +1517,7 @@ vect_analyze_slp_instance (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
return false;
}
- /* Create a node (a root of the SLP tree) for the packed strided stores. */
+ /* Create a node (a root of the SLP tree) for the packed grouped stores. */
scalar_stmts = VEC_alloc (gimple, heap, group_size);
next = stmt;
if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
@@ -1635,7 +1635,7 @@ bool
vect_analyze_slp (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
{
unsigned int i;
- VEC (gimple, heap) *strided_stores, *reductions = NULL, *reduc_chains = NULL;
+ VEC (gimple, heap) *grouped_stores, *reductions = NULL, *reduc_chains = NULL;
gimple first_element;
bool ok = false;
@@ -1644,15 +1644,15 @@ vect_analyze_slp (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
if (loop_vinfo)
{
- strided_stores = LOOP_VINFO_STRIDED_STORES (loop_vinfo);
+ grouped_stores = LOOP_VINFO_GROUPED_STORES (loop_vinfo);
reduc_chains = LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo);
reductions = LOOP_VINFO_REDUCTIONS (loop_vinfo);
}
else
- strided_stores = BB_VINFO_STRIDED_STORES (bb_vinfo);
+ grouped_stores = BB_VINFO_GROUPED_STORES (bb_vinfo);
- /* Find SLP sequences starting from groups of strided stores. */
- FOR_EACH_VEC_ELT (gimple, strided_stores, i, first_element)
+ /* Find SLP sequences starting from groups of grouped stores. */
+ FOR_EACH_VEC_ELT (gimple, grouped_stores, i, first_element)
if (vect_analyze_slp_instance (loop_vinfo, bb_vinfo, first_element))
ok = true;
@@ -1810,7 +1810,7 @@ new_bb_vec_info (basic_block bb)
set_vinfo_for_stmt (stmt, new_stmt_vec_info (stmt, NULL, res));
}
- BB_VINFO_STRIDED_STORES (res) = VEC_alloc (gimple, heap, 10);
+ BB_VINFO_GROUPED_STORES (res) = VEC_alloc (gimple, heap, 10);
BB_VINFO_SLP_INSTANCES (res) = VEC_alloc (slp_instance, heap, 2);
bb->aux = res;
@@ -1844,7 +1844,7 @@ destroy_bb_vec_info (bb_vec_info bb_vinfo)
free_data_refs (BB_VINFO_DATAREFS (bb_vinfo));
free_dependence_relations (BB_VINFO_DDRS (bb_vinfo));
- VEC_free (gimple, heap, BB_VINFO_STRIDED_STORES (bb_vinfo));
+ VEC_free (gimple, heap, BB_VINFO_GROUPED_STORES (bb_vinfo));
VEC_free (slp_instance, heap, BB_VINFO_SLP_INSTANCES (bb_vinfo));
free (bb_vinfo);
bb->aux = NULL;
@@ -2859,7 +2859,7 @@ vect_schedule_slp_instance (slp_tree node, slp_instance instance,
unsigned int vectorization_factor)
{
gimple stmt;
- bool strided_store, is_store;
+ bool grouped_store, is_store;
gimple_stmt_iterator si;
stmt_vec_info stmt_info;
unsigned int vec_stmts_size, nunits, group_size;
@@ -2919,7 +2919,7 @@ vect_schedule_slp_instance (slp_tree node, slp_instance instance,
/* Loads should be inserted before the first load. */
if (SLP_INSTANCE_FIRST_LOAD_STMT (instance)
- && STMT_VINFO_STRIDED_ACCESS (stmt_info)
+ && STMT_VINFO_GROUPED_ACCESS (stmt_info)
&& !REFERENCE_CLASS_P (gimple_get_lhs (stmt))
&& SLP_INSTANCE_LOAD_PERMUTATION (instance))
si = gsi_for_stmt (SLP_INSTANCE_FIRST_LOAD_STMT (instance));
@@ -2929,7 +2929,7 @@ vect_schedule_slp_instance (slp_tree node, slp_instance instance,
si = gsi_for_stmt (stmt);
/* Stores should be inserted just before the last store. */
- if (STMT_VINFO_STRIDED_ACCESS (stmt_info)
+ if (STMT_VINFO_GROUPED_ACCESS (stmt_info)
&& REFERENCE_CLASS_P (gimple_get_lhs (stmt)))
{
gimple last_store = vect_find_last_store_in_slp_instance (instance);
@@ -2941,14 +2941,14 @@ vect_schedule_slp_instance (slp_tree node, slp_instance instance,
/* Mark the first element of the reduction chain as reduction to properly
transform the node. In the analysis phase only the last element of the
chain is marked as reduction. */
- if (GROUP_FIRST_ELEMENT (stmt_info) && !STMT_VINFO_STRIDED_ACCESS (stmt_info)
+ if (GROUP_FIRST_ELEMENT (stmt_info) && !STMT_VINFO_GROUPED_ACCESS (stmt_info)
&& GROUP_FIRST_ELEMENT (stmt_info) == stmt)
{
STMT_VINFO_DEF_TYPE (stmt_info) = vect_reduction_def;
STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type;
}
- is_store = vect_transform_stmt (stmt, &si, &strided_store, node, instance);
+ is_store = vect_transform_stmt (stmt, &si, &grouped_store, node, instance);
return is_store;
}
diff --git a/gcc/tree-vect-stmts.c b/gcc/tree-vect-stmts.c
index dabb63d7a92..968e4ed1cd7 100644
--- a/gcc/tree-vect-stmts.c
+++ b/gcc/tree-vect-stmts.c
@@ -851,14 +851,14 @@ vect_model_promotion_demotion_cost (stmt_vec_info stmt_info,
stmt_vinfo_set_outside_of_loop_cost (stmt_info, NULL, outside_cost);
}
-/* Function vect_cost_strided_group_size
+/* Function vect_cost_group_size
- For strided load or store, return the group_size only if it is the first
+ For grouped load or store, return the group_size only if it is the first
load or store of a group, else return 1. This ensures that group size is
only returned once per group. */
static int
-vect_cost_strided_group_size (stmt_vec_info stmt_info)
+vect_cost_group_size (stmt_vec_info stmt_info)
{
gimple first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
@@ -871,8 +871,8 @@ vect_cost_strided_group_size (stmt_vec_info stmt_info)
/* Function vect_model_store_cost
- Models cost for stores. In the case of strided accesses, one access
- has the overhead of the strided access attributed to it. */
+ Models cost for stores. In the case of grouped accesses, one access
+ has the overhead of the grouped access attributed to it. */
void
vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
@@ -891,8 +891,8 @@ vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
if (dt == vect_constant_def || dt == vect_external_def)
outside_cost = vect_get_stmt_cost (scalar_to_vec);
- /* Strided access? */
- if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
+ /* Grouped access? */
+ if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
{
if (slp_node)
{
@@ -902,12 +902,12 @@ vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
else
{
first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
- group_size = vect_cost_strided_group_size (stmt_info);
+ group_size = vect_cost_group_size (stmt_info);
}
first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
}
- /* Not a strided access. */
+ /* Not a grouped access. */
else
{
group_size = 1;
@@ -915,7 +915,7 @@ vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
}
/* We assume that the cost of a single store-lanes instruction is
- equivalent to the cost of GROUP_SIZE separate stores. If a strided
+ equivalent to the cost of GROUP_SIZE separate stores. If a grouped
access is instead being provided by a permute-and-store operation,
include the cost of the permutes. */
if (!store_lanes_p && group_size > 1)
@@ -987,8 +987,8 @@ vect_get_store_cost (struct data_reference *dr, int ncopies,
/* Function vect_model_load_cost
- Models cost for loads. In the case of strided accesses, the last access
- has the overhead of the strided access attributed to it. Since unaligned
+ Models cost for loads. In the case of grouped accesses, the last access
+ has the overhead of the grouped access attributed to it. Since unaligned
accesses are supported for loads, we also account for the costs of the
access scheme chosen. */
@@ -1005,14 +1005,14 @@ vect_model_load_cost (stmt_vec_info stmt_info, int ncopies, bool load_lanes_p,
if (PURE_SLP_STMT (stmt_info))
return;
- /* Strided accesses? */
+ /* Grouped accesses? */
first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
- if (STMT_VINFO_STRIDED_ACCESS (stmt_info) && first_stmt && !slp_node)
+ if (STMT_VINFO_GROUPED_ACCESS (stmt_info) && first_stmt && !slp_node)
{
- group_size = vect_cost_strided_group_size (stmt_info);
+ group_size = vect_cost_group_size (stmt_info);
first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
}
- /* Not a strided access. */
+ /* Not a grouped access. */
else
{
group_size = 1;
@@ -1020,7 +1020,7 @@ vect_model_load_cost (stmt_vec_info stmt_info, int ncopies, bool load_lanes_p,
}
/* We assume that the cost of a single load-lanes instruction is
- equivalent to the cost of GROUP_SIZE separate loads. If a strided
+ equivalent to the cost of GROUP_SIZE separate loads. If a grouped
access is instead being provided by a load-and-permute operation,
include the cost of the permutes. */
if (!load_lanes_p && group_size > 1)
@@ -1036,7 +1036,7 @@ vect_model_load_cost (stmt_vec_info stmt_info, int ncopies, bool load_lanes_p,
/* The loads themselves. */
vect_get_load_cost (first_dr, ncopies,
- ((!STMT_VINFO_STRIDED_ACCESS (stmt_info)) || group_size > 1
+ ((!STMT_VINFO_GROUPED_ACCESS (stmt_info)) || group_size > 1
|| slp_node),
&inside_cost, &outside_cost);
@@ -1109,7 +1109,7 @@ vect_get_load_cost (struct data_reference *dr, int ncopies,
/* Unaligned software pipeline has a load of an address, an initial
load, and possibly a mask operation to "prime" the loop. However,
- if this is an access in a group of loads, which provide strided
+ if this is an access in a group of loads, which provide grouped
access, then the above cost should only be considered for one
access in the group. Inside the loop, there is a load op
and a realignment op. */
@@ -3692,7 +3692,7 @@ vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
int ncopies;
int j;
gimple next_stmt, first_stmt = NULL;
- bool strided_store = false;
+ bool grouped_store = false;
bool store_lanes_p = false;
unsigned int group_size, i;
VEC(tree,heap) *dr_chain = NULL, *oprnds = NULL, *result_chain = NULL;
@@ -3777,16 +3777,16 @@ vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
return false;
}
- if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
+ if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
{
- strided_store = true;
+ grouped_store = true;
first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
if (!slp && !PURE_SLP_STMT (stmt_info))
{
group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
if (vect_store_lanes_supported (vectype, group_size))
store_lanes_p = true;
- else if (!vect_strided_store_supported (vectype, group_size))
+ else if (!vect_grouped_store_supported (vectype, group_size))
return false;
}
@@ -3820,7 +3820,7 @@ vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
/** Transform. **/
- if (strided_store)
+ if (grouped_store)
{
first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
@@ -3842,7 +3842,7 @@ vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
if (slp)
{
- strided_store = false;
+ grouped_store = false;
/* VEC_NUM is the number of vect stmts to be created for this
group. */
vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
@@ -3887,7 +3887,7 @@ vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
vector stmt by a factor VF/nunits. For more details see documentation in
vect_get_vec_def_for_copy_stmt. */
- /* In case of interleaving (non-unit strided access):
+ /* In case of interleaving (non-unit grouped access):
S1: &base + 2 = x2
S2: &base = x0
@@ -3943,7 +3943,7 @@ vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
used as an input to vect_permute_store_chain(), and OPRNDS as
an input to vect_get_vec_def_for_stmt_copy() for the next copy.
- If the store is not strided, GROUP_SIZE is 1, and DR_CHAIN and
+ If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
OPRNDS are of size 1. */
next_stmt = first_stmt;
for (i = 0; i < group_size; i++)
@@ -3980,7 +3980,7 @@ vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
DR_CHAIN is then used as an input to vect_permute_store_chain(),
and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
next copy.
- If the store is not strided, GROUP_SIZE is 1, and DR_CHAIN and
+ If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
OPRNDS are of size 1. */
for (i = 0; i < group_size; i++)
{
@@ -4018,7 +4018,7 @@ vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
else
{
new_stmt = NULL;
- if (strided_store)
+ if (grouped_store)
{
result_chain = VEC_alloc (tree, heap, group_size);
/* Permute. */
@@ -4038,8 +4038,8 @@ vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
if (slp)
vec_oprnd = VEC_index (tree, vec_oprnds, i);
- else if (strided_store)
- /* For strided stores vectorized defs are interleaved in
+ else if (grouped_store)
+ /* For grouped stores vectorized defs are interleaved in
vect_permute_store_chain(). */
vec_oprnd = VEC_index (tree, result_chain, i);
@@ -4208,7 +4208,7 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
tree realignment_token = NULL_TREE;
gimple phi = NULL;
VEC(tree,heap) *dr_chain = NULL;
- bool strided_load = false;
+ bool grouped_load = false;
bool load_lanes_p = false;
gimple first_stmt;
bool inv_p;
@@ -4305,9 +4305,9 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
}
/* Check if the load is a part of an interleaving chain. */
- if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
+ if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
{
- strided_load = true;
+ grouped_load = true;
/* FORNOW */
gcc_assert (! nested_in_vect_loop && !STMT_VINFO_GATHER_P (stmt_info));
@@ -4317,14 +4317,14 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
if (vect_load_lanes_supported (vectype, group_size))
load_lanes_p = true;
- else if (!vect_strided_load_supported (vectype, group_size))
+ else if (!vect_grouped_load_supported (vectype, group_size))
return false;
}
}
if (negative)
{
- gcc_assert (!strided_load && !STMT_VINFO_GATHER_P (stmt_info));
+ gcc_assert (!grouped_load && !STMT_VINFO_GATHER_P (stmt_info));
alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
if (alignment_support_scheme != dr_aligned
&& alignment_support_scheme != dr_unaligned_supported)
@@ -4525,7 +4525,7 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
return true;
}
- if (strided_load)
+ if (grouped_load)
{
first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
if (slp
@@ -4545,7 +4545,7 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
/* VEC_NUM is the number of vect stmts to be created for this group. */
if (slp)
{
- strided_load = false;
+ grouped_load = false;
vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
if (SLP_INSTANCE_LOAD_PERMUTATION (slp_node_instance))
slp_perm = true;
@@ -4603,7 +4603,7 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
information we recorded in RELATED_STMT field is used to vectorize
stmt S2. */
- /* In case of interleaving (non-unit strided access):
+ /* In case of interleaving (non-unit grouped access):
S1: x2 = &base + 2
S2: x0 = &base
@@ -4629,7 +4629,7 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
corresponds to the order of scalar stmts in the interleaving chain - see
the documentation of vect_permute_load_chain()).
The generation of permutation stmts and recording them in
- STMT_VINFO_VEC_STMT is done in vect_transform_strided_load().
+ STMT_VINFO_VEC_STMT is done in vect_transform_grouped_load().
In case of both multiple types and interleaving, the vector loads and
permutation stmts above are created for every copy. The result vector
@@ -4715,7 +4715,7 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
TYPE_SIZE_UNIT (aggr_type));
- if (strided_load || slp_perm)
+ if (grouped_load || slp_perm)
dr_chain = VEC_alloc (tree, heap, vec_num);
if (load_lanes_p)
@@ -4741,7 +4741,7 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
}
/* Record the mapping between SSA_NAMEs and statements. */
- vect_record_strided_load_vectors (stmt, dr_chain);
+ vect_record_grouped_load_vectors (stmt, dr_chain);
}
else
{
@@ -4896,7 +4896,7 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
if (inv_p && !bb_vinfo)
{
gimple_stmt_iterator gsi2 = *gsi;
- gcc_assert (!strided_load);
+ gcc_assert (!grouped_load);
gsi_next (&gsi2);
new_temp = vect_init_vector (stmt, scalar_dest,
vectype, &gsi2);
@@ -4912,8 +4912,8 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
}
/* Collect vector loads and later create their permutation in
- vect_transform_strided_load (). */
- if (strided_load || slp_perm)
+ vect_transform_grouped_load (). */
+ if (grouped_load || slp_perm)
VEC_quick_push (tree, dr_chain, new_temp);
/* Store vector loads in the corresponding SLP_NODE. */
@@ -4937,10 +4937,10 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
}
else
{
- if (strided_load)
+ if (grouped_load)
{
if (!load_lanes_p)
- vect_transform_strided_load (stmt, dr_chain, group_size, gsi);
+ vect_transform_grouped_load (stmt, dr_chain, group_size, gsi);
*vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
}
else
@@ -5494,7 +5494,7 @@ vect_analyze_stmt (gimple stmt, bool *need_to_vectorize, slp_tree node)
bool
vect_transform_stmt (gimple stmt, gimple_stmt_iterator *gsi,
- bool *strided_store, slp_tree slp_node,
+ bool *grouped_store, slp_tree slp_node,
slp_instance slp_node_instance)
{
bool is_store = false;
@@ -5541,13 +5541,13 @@ vect_transform_stmt (gimple stmt, gimple_stmt_iterator *gsi,
case store_vec_info_type:
done = vectorizable_store (stmt, gsi, &vec_stmt, slp_node);
gcc_assert (done);
- if (STMT_VINFO_STRIDED_ACCESS (stmt_info) && !slp_node)
+ if (STMT_VINFO_GROUPED_ACCESS (stmt_info) && !slp_node)
{
/* In case of interleaving, the whole chain is vectorized when the
last store in the chain is reached. Store stmts before the last
one are skipped, and there vec_stmt_info shouldn't be freed
meanwhile. */
- *strided_store = true;
+ *grouped_store = true;
if (STMT_VINFO_VEC_STMT (stmt_info))
is_store = true;
}
diff --git a/gcc/tree-vectorizer.h b/gcc/tree-vectorizer.h
index 5f031b52f2e..6804fdce3f0 100644
--- a/gcc/tree-vectorizer.h
+++ b/gcc/tree-vectorizer.h
@@ -253,9 +253,9 @@ typedef struct _loop_vec_info {
/* All interleaving chains of stores in the loop, represented by the first
stmt in the chain. */
- VEC(gimple, heap) *strided_stores;
+ VEC(gimple, heap) *grouped_stores;
- /* All SLP instances in the loop. This is a subset of the set of STRIDED_STORES
+ /* All SLP instances in the loop. This is a subset of the set of GROUP_STORES
of the loop. */
VEC(slp_instance, heap) *slp_instances;
@@ -273,7 +273,7 @@ typedef struct _loop_vec_info {
/* Hash table used to choose the best peeling option. */
htab_t peeling_htab;
- /* When we have strided data accesses with gaps, we may introduce invalid
+ /* When we have grouped data accesses with gaps, we may introduce invalid
memory accesses. We peel the last iteration of the loop to prevent
this. */
bool peeling_for_gaps;
@@ -300,7 +300,7 @@ typedef struct _loop_vec_info {
#define LOOP_VINFO_MAY_MISALIGN_STMTS(L) (L)->may_misalign_stmts
#define LOOP_VINFO_LOC(L) (L)->loop_line_number
#define LOOP_VINFO_MAY_ALIAS_DDRS(L) (L)->may_alias_ddrs
-#define LOOP_VINFO_STRIDED_STORES(L) (L)->strided_stores
+#define LOOP_VINFO_GROUPED_STORES(L) (L)->grouped_stores
#define LOOP_VINFO_SLP_INSTANCES(L) (L)->slp_instances
#define LOOP_VINFO_SLP_UNROLLING_FACTOR(L) (L)->slp_unrolling_factor
#define LOOP_VINFO_REDUCTIONS(L) (L)->reductions
@@ -338,10 +338,10 @@ typedef struct _bb_vec_info {
basic_block bb;
/* All interleaving chains of stores in the basic block, represented by the
first stmt in the chain. */
- VEC(gimple, heap) *strided_stores;
+ VEC(gimple, heap) *grouped_stores;
/* All SLP instances in the basic block. This is a subset of the set of
- STRIDED_STORES of the basic block. */
+ GROUP_STORES of the basic block. */
VEC(slp_instance, heap) *slp_instances;
/* All data references in the basic block. */
@@ -352,7 +352,7 @@ typedef struct _bb_vec_info {
} *bb_vec_info;
#define BB_VINFO_BB(B) (B)->bb
-#define BB_VINFO_STRIDED_STORES(B) (B)->strided_stores
+#define BB_VINFO_GROUPED_STORES(B) (B)->grouped_stores
#define BB_VINFO_SLP_INSTANCES(B) (B)->slp_instances
#define BB_VINFO_DATAREFS(B) (B)->datarefs
#define BB_VINFO_DDRS(B) (B)->ddrs
@@ -578,7 +578,7 @@ typedef struct _stmt_vec_info {
#define STMT_VINFO_GROUP_GAP(S) (S)->gap
#define STMT_VINFO_GROUP_SAME_DR_STMT(S) (S)->same_dr_stmt
#define STMT_VINFO_GROUP_READ_WRITE_DEPENDENCE(S) (S)->read_write_dep
-#define STMT_VINFO_STRIDED_ACCESS(S) ((S)->first_element != NULL && (S)->data_ref_info)
+#define STMT_VINFO_GROUPED_ACCESS(S) ((S)->first_element != NULL && (S)->data_ref_info)
#define STMT_VINFO_LOOP_PHI_EVOLUTION_PART(S) (S)->loop_phi_evolution_part
#define GROUP_FIRST_ELEMENT(S) (S)->first_element
@@ -881,18 +881,18 @@ extern tree vect_create_data_ref_ptr (gimple, tree, struct loop *, tree,
gimple *, bool, bool *);
extern tree bump_vector_ptr (tree, gimple, gimple_stmt_iterator *, gimple, tree);
extern tree vect_create_destination_var (tree, tree);
-extern bool vect_strided_store_supported (tree, unsigned HOST_WIDE_INT);
+extern bool vect_grouped_store_supported (tree, unsigned HOST_WIDE_INT);
extern bool vect_store_lanes_supported (tree, unsigned HOST_WIDE_INT);
-extern bool vect_strided_load_supported (tree, unsigned HOST_WIDE_INT);
+extern bool vect_grouped_load_supported (tree, unsigned HOST_WIDE_INT);
extern bool vect_load_lanes_supported (tree, unsigned HOST_WIDE_INT);
extern void vect_permute_store_chain (VEC(tree,heap) *,unsigned int, gimple,
gimple_stmt_iterator *, VEC(tree,heap) **);
extern tree vect_setup_realignment (gimple, gimple_stmt_iterator *, tree *,
enum dr_alignment_support, tree,
struct loop **);
-extern void vect_transform_strided_load (gimple, VEC(tree,heap) *, int,
+extern void vect_transform_grouped_load (gimple, VEC(tree,heap) *, int,
gimple_stmt_iterator *);
-extern void vect_record_strided_load_vectors (gimple, VEC(tree,heap) *);
+extern void vect_record_grouped_load_vectors (gimple, VEC(tree,heap) *);
extern int vect_get_place_in_interleaving_chain (gimple, gimple);
extern tree vect_get_new_vect_var (tree, enum vect_var_kind, const char *);
extern tree vect_create_addr_base_for_vector_ref (gimple, gimple_seq *,