summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndre Vehreschild <vehre@gmx.de>2015-06-22 10:07:00 +0200
committerAndre Vehreschild <vehre@gmx.de>2015-06-22 10:07:00 +0200
commit2a0988848179762499e2f8e2bc0fe6cdbf0b19c6 (patch)
tree6066a710508eb678bf6f316f7f13ce5c05da1b8c
parent483c93d548f5cda1bd05d58471e1e7ea9e159218 (diff)
parentef6580d9ae921f92df5c08ef7b454dcd567f71ab (diff)
downloadgcc-2a0988848179762499e2f8e2bc0fe6cdbf0b19c6.tar.gz
Merge branch 'vehre/pr58586' into vehre/pr66035
-rw-r--r--gcc/ChangeLog114
-rw-r--r--gcc/DATESTAMP2
-rw-r--r--gcc/ada/ChangeLog39
-rw-r--r--gcc/ada/gcc-interface/decl.c137
-rw-r--r--gcc/ada/gcc-interface/gigi.h49
-rw-r--r--gcc/ada/gcc-interface/misc.c2
-rw-r--r--gcc/ada/gcc-interface/trans.c109
-rw-r--r--gcc/ada/gcc-interface/utils.c99
-rw-r--r--gcc/auto-inc-dec.c34
-rw-r--r--gcc/c-family/ChangeLog5
-rw-r--r--gcc/c-family/c-common.c6
-rw-r--r--gcc/combine.c6
-rw-r--r--gcc/common.opt2
-rw-r--r--gcc/config/aarch64/aarch64.md12
-rw-r--r--gcc/config/arm/arm.c58
-rw-r--r--gcc/config/arm/arm.opt4
-rw-r--r--gcc/config/i386/i386.c8
-rw-r--r--gcc/config/sh/sh.c22
-rw-r--r--gcc/config/vax/vax.md6
-rw-r--r--gcc/cp/ChangeLog32
-rw-r--r--gcc/cp/call.c10
-rw-r--r--gcc/cp/constexpr.c8
-rw-r--r--gcc/cp/decl.c14
-rw-r--r--gcc/cp/pt.c36
-rw-r--r--gcc/cp/semantics.c4
-rw-r--r--gcc/cp/typeck.c15
-rw-r--r--gcc/df-core.c5
-rw-r--r--gcc/df-scan.c10
-rw-r--r--gcc/dominance.c6
-rw-r--r--gcc/expr.c21
-rw-r--r--gcc/fold-const.c4
-rw-r--r--gcc/fortran/ChangeLog7
-rw-r--r--gcc/fortran/resolve.c13
-rw-r--r--gcc/genattrtab.c6
-rw-r--r--gcc/gimple-match-head.c18
-rw-r--r--gcc/ifcvt.c10
-rw-r--r--gcc/internal-fn.c40
-rw-r--r--gcc/ipa-devirt.c7
-rw-r--r--gcc/ipa-icf.c127
-rw-r--r--gcc/ipa-icf.h14
-rw-r--r--gcc/ipa-polymorphic-call.c20
-rw-r--r--gcc/loop-iv.c10
-rw-r--r--gcc/lra-lives.c14
-rw-r--r--gcc/lra.c5
-rw-r--r--gcc/lto-streamer-out.c7
-rw-r--r--gcc/modulo-sched.c4
-rw-r--r--gcc/omega.c6
-rw-r--r--gcc/optabs.c9
-rw-r--r--gcc/reload1.c6
-rw-r--r--gcc/sel-sched-ir.c48
-rw-r--r--gcc/simplify-rtx.c5
-rw-r--r--gcc/testsuite/ChangeLog128
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/lambda/lambda-nsdmi8.C15
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/lambda/lambda-rep1.C14
-rw-r--r--gcc/testsuite/g++.dg/cpp1y/constexpr-empty1.C6
-rw-r--r--gcc/testsuite/g++.dg/cpp1y/var-templ31.C8
-rw-r--r--gcc/testsuite/g++.dg/diagnostic/inhibit-warn-1.C32
-rw-r--r--gcc/testsuite/g++.dg/diagnostic/inhibit-warn-2.C36
-rw-r--r--gcc/testsuite/g++.dg/ipa/pr65908.C27
-rw-r--r--gcc/testsuite/g++.dg/overload/pmf3.C70
-rw-r--r--gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/arm-neon-ref.h19
-rw-r--r--gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vrecpe.c154
-rw-r--r--gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vrecps.c117
-rw-r--r--gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vreinterpret.c741
-rw-r--r--gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vrev.c200
-rw-r--r--gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vrshl.c627
-rw-r--r--gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vrshr_n.c504
-rw-r--r--gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vrshrn_n.c143
-rw-r--r--gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vrsqrte.c157
-rw-r--r--gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vrsqrts.c118
-rw-r--r--gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vrsra_n.c553
-rw-r--r--gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vset_lane.c99
-rw-r--r--gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vshl_n.c96
-rw-r--r--gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vshll_n.c56
-rw-r--r--gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vshr_n.c95
-rw-r--r--gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vshrn_n.c70
-rw-r--r--gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vsra_n.c117
-rw-r--r--gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vst1_lane.c93
-rw-r--r--gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vstX_lane.c578
-rw-r--r--gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vtbX.c289
-rw-r--r--gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vtst.c120
-rw-r--r--gcc/testsuite/gcc.target/aarch64/fmovd-zero-mem.c (renamed from gcc/testsuite/gcc.target/aarch64/fmovd-zero.c)2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/fmovd-zero-reg.c11
-rw-r--r--gcc/testsuite/gcc.target/aarch64/fmovf-zero-mem.c (renamed from gcc/testsuite/gcc.target/aarch64/fmovf-zero.c)2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/fmovf-zero-reg.c11
-rw-r--r--gcc/testsuite/gcc.target/aarch64/fmovld-zero-mem.c10
-rw-r--r--gcc/testsuite/gcc.target/aarch64/fmovld-zero-reg.c11
-rw-r--r--gcc/testsuite/gcc.target/aarch64/pr62308.c6
-rw-r--r--gcc/testsuite/gcc.target/arm/flip-thumb.c24
-rw-r--r--gcc/testsuite/gfortran.dg/gomp/omp_parallel_1.f9037
-rw-r--r--gcc/testsuite/gnat.dg/specs/debug1.ads2
-rw-r--r--gcc/tree-if-conv.c24
-rw-r--r--gcc/tree-loop-distribution.c8
-rw-r--r--gcc/tree-predcom.c6
-rw-r--r--gcc/tree-ssa-alias.c23
-rw-r--r--gcc/tree-ssa-ifcombine.c6
-rw-r--r--gcc/tree-ssa-loop-ivopts.c8
-rw-r--r--gcc/tree-ssa-loop-niter.c10
-rw-r--r--gcc/tree-ssa-phiopt.c8
-rw-r--r--gcc/tree-ssa-sccvn.c6
-rw-r--r--gcc/tree-vect-slp.c5
-rw-r--r--gcc/tree-vect-stmts.c6
-rw-r--r--gcc/tree-vrp.c30
103 files changed, 6121 insertions, 672 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index e567ed0db71..14f3121a82d 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,3 +1,117 @@
+2015-06-22 Christian Bruel <christian.bruel@st.com>
+
+ PR target/52144
+ * config/arm/arm.c (add_attribute, arm_insert_attributes): New functions
+ (TARGET_INSERT_ATTRIBUTES): Define.
+ (thumb_flipper): New var.
+ * config/arm/arm.opt (-mflip-thumb): New switch.
+
+2015-06-22 Jan Hubicka <hubicka@ucw.cz>
+ Martin Liska <mliska@suse.cz>
+
+ PR ipa/65908
+ * ipa-icf.c (sem_item::target_supports_symbol_aliases): Remove
+ construction of arg_types.
+ (sem_function::sem_function): Likewise.
+ (sem_function::~sem_function): Remove destruction of arg_types.
+ (sem_function::compatible_parm_types_p): New function.
+ (sem_function::equals_wpa): Reorg matching of return values
+ and parameter types.
+ (sem_function::equals_private): Reorg mathcing of argument types.
+ (sem_function::parse_tree_args): Remove.
+ * ipa-icf.h (init_wpa): Do not call it.
+ (parse_tree_args): Remove.
+ (compatible_parm_types_p): Declare.
+ (result_type): Remove.
+ (arg_types): Remove.
+
+2015-06-22 Jan Hubicka <hubicka@ucw.cz>
+
+ PR ipa/66351
+ * ipa-polymorphic-call.c
+ (ipa_polymorphic_call_context::get_dynamic_type): Fix thinko when
+ initializing alias oracle; fix formating; set base_alias_set if it
+ is known.
+
+2015-06-22 Mikhail Maltsev <maltsevm@gmail.com>
+
+ * auto-inc-dec.c (reverse_mem, reverse_inc): Remove.
+ (parse_add_or_inc): Use std::swap instead of reverse_{mem,inc}.
+ (find_inc): Likewise.
+ * combine.c (combine_simplify_rtx): Use std::swap instead of manually
+ swapping.
+ * df-core.c (df_worklist_dataflow_doublequeue): Likewise.
+ * df-scan.c (df_swap_refs): Remove.
+ (df_sort_and_compress_refs): Use std::swap instead of df_swap_refs.
+ * dominance.c (link_roots): Use std::swap instead of manually swapping.
+ * expr.c (expand_expr_real_2, do_store_flag): Likewise.
+ * fold-const.c (fold_relational_const): Likewise.
+ * genattrtab.c (simplify_test_exp): Likewise.
+ * gimple-match-head.c (gimple_resimplify2, gimple_resimplify3,
+ gimple_simplify): Likewise.
+ * ifcvt.c (noce_try_abs, find_if_header): Likewise.
+ * internal-fn.c (expand_addsub_overflow, expand_mul_overflow): Likewise.
+ * ipa-devirt.c (add_type_duplicate): Likewise.
+ * loop-iv.c (get_biv_step_1, iv_number_of_iterations): Likewise.
+ * lra-lives.c (lra_setup_reload_pseudo_preferenced_hard_reg): Likewise.
+ * lra.c (lra_create_copy): Likewise.
+ * lto-streamer-out.c (DFS::DFS): Likewise.
+ * modulo-sched.c (get_sched_window): Likewise.
+ * omega.c (omega_pretty_print_problem): Likewise.
+ * optabs.c (prepare_float_lib_cmp, expand_mult_highpart): Likewise.
+ * reload1.c (reloads_unique_chain_p): Likewise.
+ * sel-sched-ir.c (exchange_lv_sets, exchange_av_sets): Remove.
+ (exchange_data_sets): Move logic from exchange_{av,lv}_sets here and
+ use std::swap.
+ * simplify-rtx.c (simplify_unary_operation_1): Use std::swap instead of
+ manually swapping.
+ * tree-if-conv.c (is_cond_scalar_reduction, predicate_scalar_phi,
+ predicate_mem_writes): Likewise.
+ * tree-loop-distribution.c (pg_add_dependence_edges): Likewise.
+ * tree-predcom.c (combine_chains): Likewise.
+ * tree-ssa-alias.c (nonoverlapping_component_refs_p,
+ refs_may_alias_p_1): Likewise.
+ * tree-ssa-ifcombine.c (recognize_if_then_else): Likewise.
+ * tree-ssa-loop-ivopts.c (extract_cond_operands): Likewise.
+ * tree-ssa-loop-niter.c (refine_bounds_using_guard,
+ number_of_iterations_cond): Likewise.
+ * tree-ssa-phiopt.c (tree_ssa_phiopt_worker): Likewise.
+ * tree-ssa-sccvn.c (vn_nary_op_compute_hash): Likewise.
+ * tree-vect-slp.c (vect_build_slp_tree): Likewise.
+ * tree-vect-stmts.c (supportable_widening_operation): Likewise.
+ * tree-vrp.c (extract_range_from_binary_expr_1,
+ extract_range_from_unary_expr_1): Likewise.
+
+2015-06-20 Marek Polacek <polacek@redhat.com>
+
+ * common.opt (fsanitize-undefined-trap-on-error): Add Driver.
+
+2015-06-19 Kaz Kojima <kkojima@gcc.gnu.org>
+
+ PR target/66591
+ * config/sh/sh.c (prepare_move_operands): Replace subreg
+ index term with R0 for base and index addressing.
+
+2015-06-19 Jim Wilson <jim.wilson@linaro.org>
+
+ * config/aarch64/aarch64.md (mov<mode>:GPF): Don't call force_reg if
+ op1 is an fp zero.
+ (movsf_aarch64): Change condition from register_operand to
+ aarch64_reg_or_fp_zero for op1. Change type for alternative 6 to
+ load1. Change type for alternative 7 to store1.
+ (movdf_aarch64): Likewise.
+
+2015-06-19 James Greenhalgh <james.greenhalgh@arm.com>
+
+ * config/vax/vax.md: Adjust sign/zero extend patterns to
+ handle SUBREGs in operands[1].
+
+2015-06-19 Kyrylo Tkachov <kyrylo.tkachov@arm.com>
+
+ * config/i386/i386.c (ix86_function_versions): Use std::swap instead
+ of manually swapping.
+ (expand_vec_perm_interleave2): Likewise.
+
2015-06-19 Ilya Enkovich <enkovich.gnu@gmail.com>
* tree-chkp.c (chkp_compute_bounds_for_assignment): Don't
diff --git a/gcc/DATESTAMP b/gcc/DATESTAMP
index 102da99173d..08541fb7100 100644
--- a/gcc/DATESTAMP
+++ b/gcc/DATESTAMP
@@ -1 +1 @@
-20150619
+20150622
diff --git a/gcc/ada/ChangeLog b/gcc/ada/ChangeLog
index 47aaea51e4f..801be514716 100644
--- a/gcc/ada/ChangeLog
+++ b/gcc/ada/ChangeLog
@@ -1,3 +1,42 @@
+2015-06-19 Eric Botcazou <ebotcazou@adacore.com>
+
+ * gcc-interface/gigi.h (record_builtin_type): Adjust comment.
+ (tree create_type_decl): Likewise.
+ (create_var_decl_1): Add artificial_p and debug_info_p parameters.
+ (create_var_decl): Likewise.
+ (create_true_var_decl): Delete.
+ (create_subprog_decl): Add debug_info_p parameter.
+ * gcc-interface/decl.c (gnat_to_gnu_entity): Add artificial_p local
+ variable and use it throughout. Remove DECL_ARTIFICIAL settings.
+ <E_Variable>: Adjust calls to create_var_decl and create_var_decl_1.
+ Remove DECL_IGNORED_P settings.
+ <E_Enumeration_Type>: Likewise.
+ <E_Record_Type>: Likewise.
+ <E_Subprogram_Type>: Likewise. Remove artificial_flag local variable.
+ Adjust call to create_subprog_decl.
+ (get_minimal_subprog_decl): Likewise.
+ (elaborate_expression_1): Adjust call to create_var_decl.
+ * gcc-interface/trans.c (gigi): Adjust calls to create_var_decl and
+ create_subprog_decl. Remove DECL_ARTIFICIAL & DECL_IGNORED_P settings.
+ * gcc-interface/utils.c (maybe_pad_type): Likewise.
+ (record_builtin_type): Adjust comment.
+ (create_type_stub_decl): Remove obsolete comment.
+ (create_var_decl_1): Add artificial_p and debug_info_p parameters.
+ Set DECL_ARTIFICIAL and DECL_IGNORED_P accordingly.
+ (create_subprog_decl): Add debug_info_p parameter. Set DECL_IGNORED_P
+ accordingly.
+
+2015-06-19 Eric Botcazou <ebotcazou@adacore.com>
+
+ * gcc-interface/misc.c (LANG_HOOKS_WARN_UNUSED_GLOBAL_DECL): Define.
+
+2015-06-19 Eric Botcazou <ebotcazou@adacore.com>
+
+ * gcc-interface/decl.c (gnat_to_gnu_entity) <E_Function>: Make sure
+ the size of a padding type built around the return type is positive.
+ * gcc-interface/trans.c (gnat_to_gnu) <N_Simple_Return_Statement>:
+ Use INIT_EXPR instead of MODIFY_EXPR to assign to the return object.
+
2015-06-17 Andrew MacLeod <amacleod@redhat.com>
* gcc-interface/cuintp.c: Do not include input.h, line-map.h or is-a.h.
diff --git a/gcc/ada/gcc-interface/decl.c b/gcc/ada/gcc-interface/decl.c
index a17eab6dbfb..af2d11ed50a 100644
--- a/gcc/ada/gcc-interface/decl.c
+++ b/gcc/ada/gcc-interface/decl.c
@@ -244,6 +244,8 @@ gnat_to_gnu_entity (Entity_Id gnat_entity, tree gnu_expr, int definition)
const Entity_Kind kind = Ekind (gnat_entity);
/* True if this is a type. */
const bool is_type = IN (kind, Type_Kind);
+ /* True if this is an artificial entity. */
+ const bool artificial_p = !Comes_From_Source (gnat_entity);
/* True if debug info is requested for this entity. */
const bool debug_info_p = Needs_Debug_Info (gnat_entity);
/* True if this entity is to be considered as imported. */
@@ -1348,8 +1350,8 @@ gnat_to_gnu_entity (Entity_Id gnat_entity, tree gnu_expr, int definition)
tree gnu_new_var
= create_var_decl (create_concat_name (gnat_entity, "ALIGN"),
NULL_TREE, gnu_new_type, NULL_TREE, false,
- false, false, false, NULL, gnat_entity);
- DECL_ARTIFICIAL (gnu_new_var) = 1;
+ false, false, false, true, debug_info_p,
+ NULL, gnat_entity);
/* Initialize the aligned field if we have an initializer. */
if (gnu_expr)
@@ -1389,12 +1391,15 @@ gnat_to_gnu_entity (Entity_Id gnat_entity, tree gnu_expr, int definition)
just above, we have nothing to do here. */
if (!TYPE_IS_THIN_POINTER_P (gnu_type))
{
+ /* This variable is a GNAT encoding used by Workbench: let it
+ go through the debugging information but mark it as
+ artificial: users are not interested in it. */
tree gnu_unc_var
= create_var_decl (concat_name (gnu_entity_name, "UNC"),
NULL_TREE, gnu_type, gnu_expr,
const_flag, Is_Public (gnat_entity),
imported_p || !definition, static_p,
- NULL, gnat_entity);
+ true, debug_info_p, NULL, gnat_entity);
gnu_expr = build_unary_op (ADDR_EXPR, NULL_TREE, gnu_unc_var);
TREE_CONSTANT (gnu_expr) = 1;
@@ -1448,7 +1453,8 @@ gnat_to_gnu_entity (Entity_Id gnat_entity, tree gnu_expr, int definition)
= create_var_decl_1 (gnu_entity_name, gnu_ext_name, gnu_type,
gnu_expr, const_flag, Is_Public (gnat_entity),
imported_p || !definition, static_p,
- !renamed_obj, attr_list, gnat_entity);
+ artificial_p, debug_info_p, !renamed_obj,
+ attr_list, gnat_entity);
DECL_BY_REF_P (gnu_decl) = used_by_ref;
DECL_POINTS_TO_READONLY_P (gnu_decl) = used_by_ref && inner_const_flag;
DECL_CAN_NEVER_BE_NULL_P (gnu_decl) = Can_Never_Be_Null (gnat_entity);
@@ -1497,19 +1503,13 @@ gnat_to_gnu_entity (Entity_Id gnat_entity, tree gnu_expr, int definition)
|| Is_Aliased (Etype (gnat_entity))))
{
tree gnu_corr_var
- = create_true_var_decl (gnu_entity_name, gnu_ext_name, gnu_type,
- gnu_expr, true, Is_Public (gnat_entity),
- !definition, static_p, attr_list,
- gnat_entity);
+ = create_var_decl_1 (gnu_entity_name, gnu_ext_name, gnu_type,
+ gnu_expr, true, Is_Public (gnat_entity),
+ !definition, static_p, artificial_p,
+ debug_info_p, false, attr_list,
+ gnat_entity);
SET_DECL_CONST_CORRESPONDING_VAR (gnu_decl, gnu_corr_var);
-
- /* As debugging information will be generated for the variable,
- do not generate debugging information for the constant. */
- if (debug_info_p)
- DECL_IGNORED_P (gnu_decl) = 1;
- else
- DECL_IGNORED_P (gnu_corr_var) = 1;
}
/* If this is a constant, even if we don't need a true variable, we
@@ -1618,12 +1618,12 @@ gnat_to_gnu_entity (Entity_Id gnat_entity, tree gnu_expr, int definition)
{
tree gnu_value
= UI_To_gnu (Enumeration_Rep (gnat_literal), gnu_type);
+ /* Do not generate debug info for individual enumerators. */
tree gnu_literal
= create_var_decl (get_entity_name (gnat_literal), NULL_TREE,
gnu_type, gnu_value, true, false, false,
+ false, !Comes_From_Source (gnat_literal),
false, NULL, gnat_literal);
- /* Do not generate debug info for individual enumerators. */
- DECL_IGNORED_P (gnu_literal) = 1;
save_gnu_tree (gnat_literal, gnu_literal, false);
gnu_list
= tree_cons (DECL_NAME (gnu_literal), gnu_value, gnu_list);
@@ -1731,12 +1731,12 @@ gnat_to_gnu_entity (Entity_Id gnat_entity, tree gnu_expr, int definition)
SET_TYPE_RM_MIN_VALUE
(gnu_type, elaborate_expression (Type_Low_Bound (gnat_entity),
gnat_entity, "L", definition, true,
- Needs_Debug_Info (gnat_entity)));
+ debug_info_p));
SET_TYPE_RM_MAX_VALUE
(gnu_type, elaborate_expression (Type_High_Bound (gnat_entity),
gnat_entity, "U", definition, true,
- Needs_Debug_Info (gnat_entity)));
+ debug_info_p));
TYPE_BIASED_REPRESENTATION_P (gnu_type)
= Has_Biased_Representation (gnat_entity);
@@ -1911,12 +1911,12 @@ gnat_to_gnu_entity (Entity_Id gnat_entity, tree gnu_expr, int definition)
SET_TYPE_RM_MIN_VALUE
(gnu_type, elaborate_expression (Type_Low_Bound (gnat_entity),
gnat_entity, "L", definition, true,
- Needs_Debug_Info (gnat_entity)));
+ debug_info_p));
SET_TYPE_RM_MAX_VALUE
(gnu_type, elaborate_expression (Type_High_Bound (gnat_entity),
gnat_entity, "U", definition, true,
- Needs_Debug_Info (gnat_entity)));
+ debug_info_p));
/* Inherit our alias set from what we're a subtype of, as for
integer subtypes. */
@@ -2215,8 +2215,7 @@ gnat_to_gnu_entity (Entity_Id gnat_entity, tree gnu_expr, int definition)
}
create_type_decl (create_concat_name (gnat_entity, "XUA"), tem,
- !Comes_From_Source (gnat_entity), debug_info_p,
- gnat_entity);
+ artificial_p, debug_info_p, gnat_entity);
/* Give the fat pointer type a name. If this is a packed array, tell
the debugger how to interpret the underlying bits. */
@@ -2225,8 +2224,7 @@ gnat_to_gnu_entity (Entity_Id gnat_entity, tree gnu_expr, int definition)
else
gnat_name = gnat_entity;
create_type_decl (create_concat_name (gnat_name, "XUP"), gnu_fat_type,
- !Comes_From_Source (gnat_entity), debug_info_p,
- gnat_entity);
+ artificial_p, debug_info_p, gnat_entity);
/* Create the type to be designated by thin pointers: a record type for
the array and its template. We used to shift the fields to have the
@@ -2672,8 +2670,8 @@ gnat_to_gnu_entity (Entity_Id gnat_entity, tree gnu_expr, int definition)
gnu_decl
= create_type_decl (gnu_entity_name, gnu_type,
!Comes_From_Source (Etype (gnat_entity))
- && !Comes_From_Source (gnat_entity),
- debug_info_p, gnat_entity);
+ && artificial_p, debug_info_p,
+ gnat_entity);
/* Save it as our equivalent in case the call below elaborates
this type again. */
@@ -3174,7 +3172,7 @@ gnat_to_gnu_entity (Entity_Id gnat_entity, tree gnu_expr, int definition)
components_to_record (gnu_type, Component_List (record_definition),
gnu_field_list, packed, definition, false,
all_rep, is_unchecked_union,
- !Comes_From_Source (gnat_entity), debug_info_p,
+ artificial_p, debug_info_p,
false, OK_To_Reorder_Components (gnat_entity),
all_rep ? NULL_TREE : bitsize_zero_node, NULL);
@@ -3605,8 +3603,8 @@ gnat_to_gnu_entity (Entity_Id gnat_entity, tree gnu_expr, int definition)
= create_var_decl (create_concat_name (gnat_entity,
"XVZ"),
NULL_TREE, sizetype, gnu_size_unit,
- false, false, false, false, NULL,
- gnat_entity);
+ false, false, false, false, true,
+ debug_info_p, NULL, gnat_entity);
}
gnu_variant_list.release ();
@@ -3665,8 +3663,8 @@ gnat_to_gnu_entity (Entity_Id gnat_entity, tree gnu_expr, int definition)
= build_pointer_type
(make_dummy_type (Directly_Designated_Type (gnat_entity)));
gnu_decl = create_type_decl (gnu_entity_name, gnu_type,
- !Comes_From_Source (gnat_entity),
- debug_info_p, gnat_entity);
+ artificial_p, debug_info_p,
+ gnat_entity);
this_made_decl = true;
gnu_type = TREE_TYPE (gnu_decl);
save_gnu_tree (gnat_entity, gnu_decl, false);
@@ -3920,8 +3918,8 @@ gnat_to_gnu_entity (Entity_Id gnat_entity, tree gnu_expr, int definition)
process_attributes (&gnu_type, &attr_list, false, gnat_entity);
gnu_decl = create_type_decl (gnu_entity_name, gnu_type,
- !Comes_From_Source (gnat_entity),
- debug_info_p, gnat_entity);
+ artificial_p, debug_info_p,
+ gnat_entity);
this_made_decl = true;
gnu_type = TREE_TYPE (gnu_decl);
save_gnu_tree (gnat_entity, gnu_decl, false);
@@ -4104,7 +4102,6 @@ gnat_to_gnu_entity (Entity_Id gnat_entity, tree gnu_expr, int definition)
|| imported_p
|| (Convention (gnat_entity) == Convention_Intrinsic
&& Has_Pragma_Inline_Always (gnat_entity)));
- bool artificial_flag = !Comes_From_Source (gnat_entity);
/* The semantics of "pure" in Ada essentially matches that of "const"
in the back-end. In particular, both properties are orthogonal to
the "nothrow" property if the EH circuitry is explicit in the
@@ -4242,12 +4239,23 @@ gnat_to_gnu_entity (Entity_Id gnat_entity, tree gnu_expr, int definition)
if (CONTAINS_PLACEHOLDER_P (TYPE_SIZE (gnu_return_type)))
{
tree orig_type = gnu_return_type;
+ tree max_return_size
+ = max_size (TYPE_SIZE (gnu_return_type), true);
+
+ /* If the size overflows to 0, set it to an arbitrary positive
+ value so that assignments in the type are preserved. Their
+ actual size is independent of this positive value. */
+ if (TREE_CODE (max_return_size) == INTEGER_CST
+ && TREE_OVERFLOW (max_return_size)
+ && integer_zerop (max_return_size))
+ {
+ max_return_size = copy_node (bitsize_unit_node);
+ TREE_OVERFLOW (max_return_size) = 1;
+ }
gnu_return_type
- = maybe_pad_type (gnu_return_type,
- max_size (TYPE_SIZE (gnu_return_type),
- true),
- 0, gnat_entity, false, false, definition,
+ = maybe_pad_type (gnu_return_type, max_return_size, 0,
+ gnat_entity, false, false, definition,
true);
/* Declare it now since it will never be declared otherwise.
@@ -4600,7 +4608,8 @@ gnat_to_gnu_entity (Entity_Id gnat_entity, tree gnu_expr, int definition)
gnu_decl
= create_var_decl (gnu_entity_name, gnu_ext_name, gnu_type,
gnu_address, false, Is_Public (gnat_entity),
- extern_flag, false, NULL, gnat_entity);
+ extern_flag, false, artificial_p,
+ debug_info_p, NULL, gnat_entity);
DECL_BY_REF_P (gnu_decl) = 1;
}
@@ -4608,7 +4617,7 @@ gnat_to_gnu_entity (Entity_Id gnat_entity, tree gnu_expr, int definition)
{
process_attributes (&gnu_type, &attr_list, false, gnat_entity);
gnu_decl
- = create_type_decl (gnu_entity_name, gnu_type, artificial_flag,
+ = create_type_decl (gnu_entity_name, gnu_type, artificial_p,
debug_info_p, gnat_entity);
}
else
@@ -4616,8 +4625,8 @@ gnat_to_gnu_entity (Entity_Id gnat_entity, tree gnu_expr, int definition)
gnu_decl
= create_subprog_decl (gnu_entity_name, gnu_ext_name, gnu_type,
gnu_param_list, inline_status,
- public_flag, extern_flag, artificial_flag,
- attr_list, gnat_entity);
+ public_flag, extern_flag, artificial_p,
+ debug_info_p, attr_list, gnat_entity);
/* This is unrelated to the stub built right above. */
DECL_STUBBED_P (gnu_decl)
= Convention (gnat_entity) == Convention_Stubbed;
@@ -5009,8 +5018,8 @@ gnat_to_gnu_entity (Entity_Id gnat_entity, tree gnu_expr, int definition)
if (!gnu_decl)
gnu_decl = create_type_decl (gnu_entity_name, gnu_type,
- !Comes_From_Source (gnat_entity),
- debug_info_p, gnat_entity);
+ artificial_p, debug_info_p,
+ gnat_entity);
else
{
TREE_TYPE (gnu_decl) = gnu_type;
@@ -5174,29 +5183,6 @@ gnat_to_gnu_entity (Entity_Id gnat_entity, tree gnu_expr, int definition)
Set_RM_Size (gnat_entity, annotate_value (rm_size (gnu_type)));
}
- /* If we really have a ..._DECL node, set a couple of flags on it. But we
- cannot do so if we are reusing the ..._DECL node made for an equivalent
- type or an alias or a renamed object as the predicates don't apply to it
- but to GNAT_ENTITY. */
- if (DECL_P (gnu_decl)
- && !(is_type && gnat_equiv_type != gnat_entity)
- && !Present (Alias (gnat_entity))
- && !(Present (Renamed_Object (gnat_entity)) && saved))
- {
- /* ??? DECL_ARTIFICIAL, and possibly DECL_IGNORED_P below, should
- be set before calling rest_of_decl_compilation above (through
- create_var_decl_1). This is because rest_of_decl_compilation
- calls the debugging backend and will create a DIE without
- DW_AT_artificial.
-
- This is currently causing gnat.dg/specs/debug1.ads to FAIL. */
- if (!Comes_From_Source (gnat_entity))
- DECL_ARTIFICIAL (gnu_decl) = 1;
-
- if (!debug_info_p)
- DECL_IGNORED_P (gnu_decl) = 1;
- }
-
/* If we haven't already, associate the ..._DECL node that we just made with
the input GNAT entity node. */
if (!saved)
@@ -5385,7 +5371,8 @@ get_minimal_subprog_decl (Entity_Id gnat_entity)
return
create_subprog_decl (gnu_entity_name, gnu_ext_name, void_ftype, NULL_TREE,
- is_disabled, true, true, true, attr_list, gnat_entity);
+ is_disabled, true, true, true, false, attr_list,
+ gnat_entity);
}
/* Return whether the E_Subprogram_Type/E_Function/E_Procedure GNAT_ENTITY is
@@ -6250,14 +6237,10 @@ elaborate_expression_1 (tree gnu_expr, Entity_Id gnat_entity, const char *s,
new variable must not be tagged "external", as we used to do here as
soon as DEFINITION was false. */
tree gnu_decl
- = create_var_decl_1 (create_concat_name (gnat_entity, s), NULL_TREE,
- TREE_TYPE (gnu_expr), gnu_expr, true,
- expr_public_p, !definition && expr_global_p,
- expr_global_p, !need_debug, NULL, gnat_entity);
-
- /* Whether or not gnat_entity comes from source, this variable is a
- compilation artifact. */
- DECL_ARTIFICIAL (gnu_decl) = 1;
+ = create_var_decl (create_concat_name (gnat_entity, s), NULL_TREE,
+ TREE_TYPE (gnu_expr), gnu_expr, true,
+ expr_public_p, !definition && expr_global_p,
+ expr_global_p, true, need_debug, NULL, gnat_entity);
/* Using this variable at debug time (if need_debug is true) requires a
proper location. The back-end will compute a location for this
diff --git a/gcc/ada/gcc-interface/gigi.h b/gcc/ada/gcc-interface/gigi.h
index b85f3512d40..118ce33ccd0 100644
--- a/gcc/ada/gcc-interface/gigi.h
+++ b/gcc/ada/gcc-interface/gigi.h
@@ -599,7 +599,7 @@ extern void build_dummy_unc_pointer_types (Entity_Id gnat_desig_type,
tree gnu_desig_type);
/* Record TYPE as a builtin type for Ada. NAME is the name of the type.
- ARTIFICIAL_P is true if it's a type that was generated by the compiler. */
+ ARTIFICIAL_P is true if the type was generated by the compiler. */
extern void record_builtin_type (const char *name, tree type,
bool artificial_p);
@@ -660,10 +660,10 @@ extern tree create_range_type (tree type, tree min, tree max);
extern tree create_type_stub_decl (tree type_name, tree type);
/* Return a TYPE_DECL node. TYPE_NAME gives the name of the type and TYPE
- is a ..._TYPE node giving its data type. ARTIFICIAL_P is true if this
- is a declaration that was generated by the compiler. DEBUG_INFO_P is
- true if we need to write debug information about this type. GNAT_NODE
- is used for the position of the decl. */
+ is a ..._TYPE node giving its data type. ARTIFICIAL_P is true if the
+ declaration was generated by the compiler. DEBUG_INFO_P is true if we
+ need to write debug information about this type. GNAT_NODE is used for
+ the position of the decl. */
extern tree create_type_decl (tree type_name, tree type, bool artificial_p,
bool debug_info_p, Node_Id gnat_node);
@@ -686,32 +686,28 @@ extern tree create_type_decl (tree type_name, tree type, bool artificial_p,
STATIC_FLAG is only relevant when not at top level. In that case
it indicates whether to always allocate storage to the variable.
+ ARTIFICIAL_P is true if the variable was generated by the compiler.
+
+ DEBUG_INFO_P is true if we need to write debug information for it.
+
GNAT_NODE is used for the position of the decl. */
extern tree
create_var_decl_1 (tree var_name, tree asm_name, tree type, tree var_init,
bool const_flag, bool public_flag, bool extern_flag,
- bool static_flag, bool const_decl_allowed_p,
- struct attrib *attr_list, Node_Id gnat_node);
+ bool static_flag, bool artificial_p, bool debug_info_p,
+ bool const_decl_allowed_p, struct attrib *attr_list,
+ Node_Id gnat_node);
/* Wrapper around create_var_decl_1 for cases where we don't care whether
a VAR or a CONST decl node is created. */
#define create_var_decl(var_name, asm_name, type, var_init, \
const_flag, public_flag, extern_flag, \
- static_flag, attr_list, gnat_node) \
+ static_flag, artificial_p, debug_info_p,\
+ attr_list, gnat_node) \
create_var_decl_1 (var_name, asm_name, type, var_init, \
const_flag, public_flag, extern_flag, \
- static_flag, true, attr_list, gnat_node)
-
-/* Wrapper around create_var_decl_1 for cases where a VAR_DECL node is
- required. The primary intent is for DECL_CONST_CORRESPONDING_VARs, which
- must be VAR_DECLs and on which we want TREE_READONLY set to have them
- possibly assigned to a readonly data section. */
-#define create_true_var_decl(var_name, asm_name, type, var_init, \
- const_flag, public_flag, extern_flag, \
- static_flag, attr_list, gnat_node) \
- create_var_decl_1 (var_name, asm_name, type, var_init, \
- const_flag, public_flag, extern_flag, \
- static_flag, false, attr_list, gnat_node)
+ static_flag, artificial_p, debug_info_p, \
+ true, attr_list, gnat_node)
/* Return a FIELD_DECL node. FIELD_NAME is the field's name, FIELD_TYPE is
its type and RECORD_TYPE is the type of the enclosing record. If SIZE is
@@ -739,14 +735,19 @@ extern tree create_label_decl (tree label_name, Node_Id gnat_node);
node), PARAM_DECL_LIST is the list of the subprogram arguments (a list of
PARM_DECL nodes chained through the DECL_CHAIN field).
- INLINE_STATUS, PUBLIC_FLAG, EXTERN_FLAG, ARTIFICIAL_FLAG and ATTR_LIST are
- used to set the appropriate fields in the FUNCTION_DECL. GNAT_NODE is
- used for the position of the decl. */
+ INLINE_STATUS, PUBLIC_FLAG, EXTERN_FLAG and ATTR_LIST are used to set the
+ appropriate fields in the FUNCTION_DECL.
+
+ ARTIFICIAL_P is true if the subprogram was generated by the compiler.
+
+ DEBUG_INFO_P is true if we need to write debug information for it.
+
+ GNAT_NODE is used for the position of the decl. */
extern tree create_subprog_decl (tree subprog_name, tree asm_name,
tree subprog_type, tree param_decl_list,
enum inline_status_t inline_status,
bool public_flag, bool extern_flag,
- bool artificial_flag,
+ bool artificial_p, bool debug_info_p,
struct attrib *attr_list, Node_Id gnat_node);
/* Process the attributes in ATTR_LIST for NODE, which is either a DECL or
diff --git a/gcc/ada/gcc-interface/misc.c b/gcc/ada/gcc-interface/misc.c
index cc1f92345cc..55d40dd5527 100644
--- a/gcc/ada/gcc-interface/misc.c
+++ b/gcc/ada/gcc-interface/misc.c
@@ -1002,6 +1002,8 @@ gnat_init_ts (void)
#define LANG_HOOKS_DEEP_UNSHARING true
#undef LANG_HOOKS_INIT_TS
#define LANG_HOOKS_INIT_TS gnat_init_ts
+#undef LANG_HOOKS_WARN_UNUSED_GLOBAL_DECL
+#define LANG_HOOKS_WARN_UNUSED_GLOBAL_DECL hook_bool_const_tree_false
struct lang_hooks lang_hooks = LANG_HOOKS_INITIALIZER;
diff --git a/gcc/ada/gcc-interface/trans.c b/gcc/ada/gcc-interface/trans.c
index 85a77ea5faf..bf15955e4df 100644
--- a/gcc/ada/gcc-interface/trans.c
+++ b/gcc/ada/gcc-interface/trans.c
@@ -390,16 +390,14 @@ gigi (Node_Id gnat_root,
gcc_assert (t == boolean_false_node);
t = create_var_decl (get_entity_name (gnat_literal), NULL_TREE,
boolean_type_node, t, true, false, false, false,
- NULL, gnat_literal);
- DECL_IGNORED_P (t) = 1;
+ true, false, NULL, gnat_literal);
save_gnu_tree (gnat_literal, t, false);
gnat_literal = Next_Literal (gnat_literal);
t = UI_To_gnu (Enumeration_Rep (gnat_literal), boolean_type_node);
gcc_assert (t == boolean_true_node);
t = create_var_decl (get_entity_name (gnat_literal), NULL_TREE,
boolean_type_node, t, true, false, false, false,
- NULL, gnat_literal);
- DECL_IGNORED_P (t) = 1;
+ true, false, NULL, gnat_literal);
save_gnu_tree (gnat_literal, t, false);
void_ftype = build_function_type_list (void_type_node, NULL_TREE);
@@ -412,7 +410,8 @@ gigi (Node_Id gnat_root,
memory. */
malloc_decl
= create_subprog_decl (get_identifier ("__gnat_malloc"), NULL_TREE,
- ftype, NULL_TREE, is_disabled, true, true, true,
+ ftype,
+ NULL_TREE, is_disabled, true, true, true, false,
NULL, Empty);
DECL_IS_MALLOC (malloc_decl) = 1;
@@ -422,8 +421,8 @@ gigi (Node_Id gnat_root,
build_function_type_list (void_type_node,
ptr_type_node,
NULL_TREE),
- NULL_TREE, is_disabled, true, true, true, NULL,
- Empty);
+ NULL_TREE, is_disabled, true, true, true, false,
+ NULL, Empty);
/* This is used for 64-bit multiplication with overflow checking. */
int64_type = gnat_type_for_size (64, 0);
@@ -431,8 +430,8 @@ gigi (Node_Id gnat_root,
= create_subprog_decl (get_identifier ("__gnat_mulv64"), NULL_TREE,
build_function_type_list (int64_type, int64_type,
int64_type, NULL_TREE),
- NULL_TREE, is_disabled, true, true, true, NULL,
- Empty);
+ NULL_TREE, is_disabled, true, true, true, false,
+ NULL, Empty);
/* Name of the _Parent field in tagged record types. */
parent_name_id = get_identifier (Get_Name_String (Name_uParent));
@@ -453,16 +452,14 @@ gigi (Node_Id gnat_root,
= create_subprog_decl
(get_identifier ("system__soft_links__get_jmpbuf_address_soft"),
NULL_TREE, build_function_type_list (jmpbuf_ptr_type, NULL_TREE),
- NULL_TREE, is_disabled, true, true, true, NULL, Empty);
- DECL_IGNORED_P (get_jmpbuf_decl) = 1;
+ NULL_TREE, is_disabled, true, true, true, false, NULL, Empty);
set_jmpbuf_decl
= create_subprog_decl
(get_identifier ("system__soft_links__set_jmpbuf_address_soft"),
NULL_TREE, build_function_type_list (void_type_node, jmpbuf_ptr_type,
NULL_TREE),
- NULL_TREE, is_disabled, true, true, true, NULL, Empty);
- DECL_IGNORED_P (set_jmpbuf_decl) = 1;
+ NULL_TREE, is_disabled, true, true, true, false, NULL, Empty);
/* setjmp returns an integer and has one operand, which is a pointer to
a jmpbuf. */
@@ -471,7 +468,7 @@ gigi (Node_Id gnat_root,
(get_identifier ("__builtin_setjmp"), NULL_TREE,
build_function_type_list (integer_type_node, jmpbuf_ptr_type,
NULL_TREE),
- NULL_TREE, is_disabled, true, true, true, NULL, Empty);
+ NULL_TREE, is_disabled, true, true, true, false, NULL, Empty);
DECL_BUILT_IN_CLASS (setjmp_decl) = BUILT_IN_NORMAL;
DECL_FUNCTION_CODE (setjmp_decl) = BUILT_IN_SETJMP;
@@ -481,7 +478,7 @@ gigi (Node_Id gnat_root,
= create_subprog_decl
(get_identifier ("__builtin_update_setjmp_buf"), NULL_TREE,
build_function_type_list (void_type_node, jmpbuf_ptr_type, NULL_TREE),
- NULL_TREE, is_disabled, true, true, true, NULL, Empty);
+ NULL_TREE, is_disabled, true, true, true, false, NULL, Empty);
DECL_BUILT_IN_CLASS (update_setjmp_buf_decl) = BUILT_IN_NORMAL;
DECL_FUNCTION_CODE (update_setjmp_buf_decl) = BUILT_IN_UPDATE_SETJMP_BUF;
@@ -490,29 +487,28 @@ gigi (Node_Id gnat_root,
begin_handler_decl
= create_subprog_decl (get_identifier ("__gnat_begin_handler"), NULL_TREE,
- ftype, NULL_TREE, is_disabled, true, true, true,
+ ftype, NULL_TREE,
+ is_disabled, true, true, true, false,
NULL, Empty);
- DECL_IGNORED_P (begin_handler_decl) = 1;
end_handler_decl
= create_subprog_decl (get_identifier ("__gnat_end_handler"), NULL_TREE,
- ftype, NULL_TREE, is_disabled, true, true, true,
+ ftype, NULL_TREE,
+ is_disabled, true, true, true, false,
NULL, Empty);
- DECL_IGNORED_P (end_handler_decl) = 1;
unhandled_except_decl
= create_subprog_decl (get_identifier ("__gnat_unhandled_except_handler"),
- NULL_TREE,
- ftype, NULL_TREE, is_disabled, true, true, true,
+ NULL_TREE, ftype, NULL_TREE,
+ is_disabled, true, true, true, false,
NULL, Empty);
- DECL_IGNORED_P (unhandled_except_decl) = 1;
reraise_zcx_decl
= create_subprog_decl (get_identifier ("__gnat_reraise_zcx"), NULL_TREE,
- ftype, NULL_TREE, is_disabled, true, true, true,
+ ftype, NULL_TREE,
+ is_disabled, true, true, true, false,
NULL, Empty);
/* Indicate that these never return. */
- DECL_IGNORED_P (reraise_zcx_decl) = 1;
TREE_THIS_VOLATILE (reraise_zcx_decl) = 1;
TREE_SIDE_EFFECTS (reraise_zcx_decl) = 1;
TREE_TYPE (reraise_zcx_decl)
@@ -530,7 +526,7 @@ gigi (Node_Id gnat_root,
build_pointer_type
(unsigned_char_type_node),
integer_type_node, NULL_TREE),
- NULL_TREE, is_disabled, true, true, true, NULL, Empty);
+ NULL_TREE, is_disabled, true, true, true, false, NULL, Empty);
TREE_THIS_VOLATILE (decl) = 1;
TREE_SIDE_EFFECTS (decl) = 1;
TREE_TYPE (decl)
@@ -561,15 +557,14 @@ gigi (Node_Id gnat_root,
(get_identifier ("system__soft_links__get_gnat_exception"), NULL_TREE,
build_function_type_list (build_pointer_type (except_type_node),
NULL_TREE),
- NULL_TREE, is_disabled, true, true, true, NULL, Empty);
- DECL_IGNORED_P (get_excptr_decl) = 1;
+ NULL_TREE, is_disabled, true, true, true, false, NULL, Empty);
set_exception_parameter_decl
= create_subprog_decl
(get_identifier ("__gnat_set_exception_parameter"), NULL_TREE,
build_function_type_list (void_type_node, ptr_type_node, ptr_type_node,
NULL_TREE),
- NULL_TREE, is_disabled, true, true, true, NULL, Empty);
+ NULL_TREE, is_disabled, true, true, true, false, NULL, Empty);
raise_nodefer_decl
= create_subprog_decl
@@ -577,7 +572,7 @@ gigi (Node_Id gnat_root,
build_function_type_list (void_type_node,
build_pointer_type (except_type_node),
NULL_TREE),
- NULL_TREE, is_disabled, true, true, true, NULL, Empty);
+ NULL_TREE, is_disabled, true, true, true, false, NULL, Empty);
/* Indicate that it never returns. */
TREE_THIS_VOLATILE (raise_nodefer_decl) = 1;
@@ -625,20 +620,23 @@ gigi (Node_Id gnat_root,
others_decl
= create_var_decl (get_identifier ("OTHERS"),
get_identifier ("__gnat_others_value"),
- unsigned_char_type_node,
- NULL_TREE, true, false, true, false, NULL, Empty);
+ unsigned_char_type_node, NULL_TREE,
+ true, false, true, false, true, false,
+ NULL, Empty);
all_others_decl
= create_var_decl (get_identifier ("ALL_OTHERS"),
get_identifier ("__gnat_all_others_value"),
- unsigned_char_type_node,
- NULL_TREE, true, false, true, false, NULL, Empty);
+ unsigned_char_type_node, NULL_TREE,
+ true, false, true, false, true, false,
+ NULL, Empty);
unhandled_others_decl
= create_var_decl (get_identifier ("UNHANDLED_OTHERS"),
get_identifier ("__gnat_unhandled_others_value"),
- unsigned_char_type_node,
- NULL_TREE, true, false, true, false, NULL, Empty);
+ unsigned_char_type_node, NULL_TREE,
+ true, false, true, false, true, false,
+ NULL, Empty);
main_identifier_node = get_identifier ("main");
@@ -750,7 +748,8 @@ build_raise_check (int check, enum exception_info_kind kind)
result
= create_subprog_decl (get_identifier (Name_Buffer),
NULL_TREE, ftype, NULL_TREE,
- is_disabled, true, true, true, NULL, Empty);
+ is_disabled, true, true, true, false,
+ NULL, Empty);
/* Indicate that it never returns. */
TREE_THIS_VOLATILE (result) = 1;
@@ -3664,7 +3663,8 @@ Subprogram_Body_to_gnu (Node_Id gnat_node)
gnu_return_var
= create_var_decl (get_identifier ("RETVAL"), NULL_TREE,
gnu_return_type, NULL_TREE, false, false,
- false, false, NULL, gnat_subprog_id);
+ false, false, true, false,
+ NULL, gnat_subprog_id);
TREE_VALUE (gnu_return_var_elmt) = gnu_return_var;
}
@@ -4068,10 +4068,7 @@ create_temporary (const char *prefix, tree type)
{
tree gnu_temp = create_var_decl (create_tmp_var_name (prefix), NULL_TREE,
type, NULL_TREE, false, false, false, false,
- NULL, Empty);
- DECL_ARTIFICIAL (gnu_temp) = 1;
- DECL_IGNORED_P (gnu_temp) = 1;
-
+ true, false, NULL, Empty);
return gnu_temp;
}
@@ -4847,8 +4844,8 @@ Handled_Sequence_Of_Statements_to_gnu (Node_Id gnat_node)
= create_var_decl (get_identifier ("JMPBUF_SAVE"), NULL_TREE,
jmpbuf_ptr_type,
build_call_n_expr (get_jmpbuf_decl, 0),
- false, false, false, false, NULL, gnat_node);
- DECL_ARTIFICIAL (gnu_jmpsave_decl) = 1;
+ false, false, false, false, true, false,
+ NULL, gnat_node);
/* The __builtin_setjmp receivers will immediately reinstall it. Now
because of the unstructured form of EH used by setjmp_longjmp, there
@@ -4859,8 +4856,8 @@ Handled_Sequence_Of_Statements_to_gnu (Node_Id gnat_node)
= create_var_decl (get_identifier ("JMP_BUF"), NULL_TREE,
jmpbuf_type,
NULL_TREE,
- false, false, false, false, NULL, gnat_node);
- DECL_ARTIFICIAL (gnu_jmpbuf_decl) = 1;
+ false, false, false, false, true, false,
+ NULL, gnat_node);
set_block_jmpbuf_decl (gnu_jmpbuf_decl);
@@ -4917,7 +4914,7 @@ Handled_Sequence_Of_Statements_to_gnu (Node_Id gnat_node)
create_var_decl (get_identifier ("EXCEPT_PTR"), NULL_TREE,
build_pointer_type (except_type_node),
build_call_n_expr (get_excptr_decl, 0),
- false, false, false, false,
+ false, false, false, false, true, false,
NULL, gnat_node));
/* Generate code for each handler. The N_Exception_Handler case does the
@@ -5163,10 +5160,11 @@ Exception_Handler_to_gnu_zcx (Node_Id gnat_node)
= build_call_expr (builtin_decl_explicit (BUILT_IN_EH_POINTER),
1, integer_zero_node);
prev_gnu_incoming_exc_ptr = gnu_incoming_exc_ptr;
- gnu_incoming_exc_ptr = create_var_decl (get_identifier ("EXPTR"), NULL_TREE,
- ptr_type_node, gnu_current_exc_ptr,
- false, false, false, false,
- NULL, gnat_node);
+ gnu_incoming_exc_ptr
+ = create_var_decl (get_identifier ("EXPTR"), NULL_TREE,
+ ptr_type_node, gnu_current_exc_ptr,
+ false, false, false, false, true, true,
+ NULL, gnat_node);
add_stmt_with_node (build_call_n_expr (begin_handler_decl, 1,
gnu_incoming_exc_ptr),
@@ -5212,8 +5210,8 @@ Compilation_Unit_to_gnu (Node_Id gnat_node)
tree gnu_elab_proc_decl
= create_subprog_decl
(create_concat_name (gnat_unit_entity, body_p ? "elabb" : "elabs"),
- NULL_TREE, void_ftype, NULL_TREE, is_disabled, true, false, true, NULL,
- gnat_unit);
+ NULL_TREE, void_ftype, NULL_TREE, is_disabled, true, false, true, true,
+ NULL, gnat_unit);
struct elab_info *info;
vec_safe_push (gnu_elab_proc_stack, gnu_elab_proc_decl);
@@ -6127,7 +6125,7 @@ gnat_to_gnu (Node_Id gnat_node)
(Entity (Prefix (gnat_node)),
attr == Attr_Elab_Body ? "elabb" : "elabs"),
NULL_TREE, void_ftype, NULL_TREE, is_disabled,
- true, true, true, NULL, gnat_node);
+ true, true, true, true, NULL, gnat_node);
gnu_result = Attribute_to_gnu (gnat_node, &gnu_result_type, attr);
}
@@ -6861,7 +6859,7 @@ gnat_to_gnu (Node_Id gnat_node)
tree gnu_ret_deref
= build_unary_op (INDIRECT_REF, TREE_TYPE (gnu_ret_val),
gnu_ret_obj);
- gnu_result = build2 (MODIFY_EXPR, void_type_node,
+ gnu_result = build2 (INIT_EXPR, void_type_node,
gnu_ret_deref, gnu_ret_val);
add_stmt_with_node (gnu_result, gnat_node);
gnu_ret_val = NULL_TREE;
@@ -7087,7 +7085,8 @@ gnat_to_gnu (Node_Id gnat_node)
deallocated. */
gnu_expr = create_var_decl (get_identifier ("SAVED_EXPTR"), NULL_TREE,
ptr_type_node, gnu_incoming_exc_ptr,
- false, false, false, false, NULL, gnat_node);
+ false, false, false, false, true, true,
+ NULL, gnat_node);
add_stmt (build_binary_op (MODIFY_EXPR, NULL_TREE, gnu_incoming_exc_ptr,
convert (ptr_type_node, integer_zero_node)));
diff --git a/gcc/ada/gcc-interface/utils.c b/gcc/ada/gcc-interface/utils.c
index e4b96d7b120..fbdf4733833 100644
--- a/gcc/ada/gcc-interface/utils.c
+++ b/gcc/ada/gcc-interface/utils.c
@@ -1377,8 +1377,25 @@ maybe_pad_type (tree type, tree size, unsigned int align,
&& !(TREE_CODE (TYPE_NAME (type)) == TYPE_DECL
&& DECL_IGNORED_P (TYPE_NAME (type))))
{
- tree marker = make_node (RECORD_TYPE);
tree name = TYPE_IDENTIFIER (record);
+ tree size_unit = TYPE_SIZE_UNIT (record);
+
+ /* A variable that holds the size is required even with no encoding since
+ it will be referenced by debugging information attributes. At global
+ level, we need a single variable across all translation units. */
+ if (size
+ && TREE_CODE (size) != INTEGER_CST
+ && (definition || global_bindings_p ()))
+ {
+ size_unit
+ = create_var_decl (concat_name (name, "XVZ"), NULL_TREE, sizetype,
+ size_unit, true, global_bindings_p (),
+ !definition && global_bindings_p (), false,
+ true, true, NULL, gnat_entity);
+ TYPE_SIZE_UNIT (record) = size_unit;
+ }
+
+ tree marker = make_node (RECORD_TYPE);
tree orig_name = TYPE_IDENTIFIER (type);
TYPE_NAME (marker) = concat_name (name, "XVS");
@@ -1388,14 +1405,9 @@ maybe_pad_type (tree type, tree size, unsigned int align,
marker, NULL_TREE, NULL_TREE,
0, 0),
0, true);
+ TYPE_SIZE_UNIT (marker) = size_unit;
add_parallel_type (record, marker);
-
- if (definition && size && TREE_CODE (size) != INTEGER_CST)
- TYPE_SIZE_UNIT (marker)
- = create_var_decl (concat_name (name, "XVZ"), NULL_TREE, sizetype,
- TYPE_SIZE_UNIT (record), false, false, false,
- false, NULL, gnat_entity);
}
rest_of_record_type_compilation (record);
@@ -1537,7 +1549,7 @@ relate_alias_sets (tree gnu_new_type, tree gnu_old_type, enum alias_set_op op)
}
/* Record TYPE as a builtin type for Ada. NAME is the name of the type.
- ARTIFICIAL_P is true if it's a type that was generated by the compiler. */
+ ARTIFICIAL_P is true if the type was generated by the compiler. */
void
record_builtin_type (const char *name, tree type, bool artificial_p)
@@ -2241,9 +2253,6 @@ create_range_type (tree type, tree min, tree max)
tree
create_type_stub_decl (tree type_name, tree type)
{
- /* Using a named TYPE_DECL ensures that a type name marker is emitted in
- STABS while setting DECL_ARTIFICIAL ensures that no DW_TAG_typedef is
- emitted in DWARF. */
tree type_decl = build_decl (input_location, TYPE_DECL, type_name, type);
DECL_ARTIFICIAL (type_decl) = 1;
TYPE_ARTIFICIAL (type) = 1;
@@ -2251,10 +2260,10 @@ create_type_stub_decl (tree type_name, tree type)
}
/* Return a TYPE_DECL node. TYPE_NAME gives the name of the type and TYPE
- is a ..._TYPE node giving its data type. ARTIFICIAL_P is true if this
- is a declaration that was generated by the compiler. DEBUG_INFO_P is
- true if we need to write debug information about this type. GNAT_NODE
- is used for the position of the decl. */
+ is a ..._TYPE node giving its data type. ARTIFICIAL_P is true if the
+ declaration was generated by the compiler. DEBUG_INFO_P is true if we
+ need to write debug information about this type. GNAT_NODE is used for
+ the position of the decl. */
tree
create_type_decl (tree type_name, tree type, bool artificial_p,
@@ -2322,13 +2331,18 @@ create_type_decl (tree type_name, tree type, bool artificial_p,
STATIC_FLAG is only relevant when not at top level. In that case
it indicates whether to always allocate storage to the variable.
+ ARTIFICIAL_P is true if the variable was generated by the compiler.
+
+ DEBUG_INFO_P is true if we need to write debug information for it.
+
GNAT_NODE is used for the position of the decl. */
tree
create_var_decl_1 (tree var_name, tree asm_name, tree type, tree var_init,
bool const_flag, bool public_flag, bool extern_flag,
- bool static_flag, bool const_decl_allowed_p,
- struct attrib *attr_list, Node_Id gnat_node)
+ bool static_flag, bool artificial_p, bool debug_info_p,
+ bool const_decl_allowed_p, struct attrib *attr_list,
+ Node_Id gnat_node)
{
/* Whether the object has static storage duration, either explicitly or by
virtue of being declared at the global level. */
@@ -2379,10 +2393,14 @@ create_var_decl_1 (tree var_name, tree asm_name, tree type, tree var_init,
if (var_init && !init_const && global_bindings_p ())
Check_Elaboration_Code_Allowed (gnat_node);
- DECL_INITIAL (var_decl) = var_init;
- TREE_READONLY (var_decl) = const_flag;
+ /* Attach the initializer, if any. */
+ DECL_INITIAL (var_decl) = var_init;
+
+ /* Directly set some flags. */
+ DECL_ARTIFICIAL (var_decl) = artificial_p;
DECL_EXTERNAL (var_decl) = extern_flag;
TREE_CONSTANT (var_decl) = constant_p;
+ TREE_READONLY (var_decl) = const_flag;
/* We need to allocate static storage for an object with static storage
duration if it isn't external. */
@@ -2402,14 +2420,18 @@ create_var_decl_1 (tree var_name, tree asm_name, tree type, tree var_init,
&& !have_global_bss_p ())
DECL_COMMON (var_decl) = 1;
- /* For an external constant whose initializer is not absolute, do not emit
- debug info. In DWARF this would mean a global relocation in a read-only
- section which runs afoul of the PE-COFF run-time relocation mechanism. */
- if (extern_flag
- && constant_p
- && var_init
- && initializer_constant_valid_p (var_init, TREE_TYPE (var_init))
- != null_pointer_node)
+ /* Do not emit debug info for a CONST_DECL if optimization isn't enabled,
+ since we will create an associated variable. Likewise for an external
+ constant whose initializer is not absolute, because this would mean a
+ global relocation in a read-only section which runs afoul of the PE-COFF
+ run-time relocation mechanism. */
+ if (!debug_info_p
+ || (TREE_CODE (var_decl) == CONST_DECL && !optimize)
+ || (extern_flag
+ && constant_p
+ && var_init
+ && initializer_constant_valid_p (var_init, TREE_TYPE (var_init))
+ != null_pointer_node))
DECL_IGNORED_P (var_decl) = 1;
if (TYPE_VOLATILE (type))
@@ -3023,15 +3045,21 @@ create_label_decl (tree label_name, Node_Id gnat_node)
node), PARAM_DECL_LIST is the list of the subprogram arguments (a list of
PARM_DECL nodes chained through the DECL_CHAIN field).
- INLINE_STATUS, PUBLIC_FLAG, EXTERN_FLAG, ARTIFICIAL_FLAG and ATTR_LIST are
- used to set the appropriate fields in the FUNCTION_DECL. GNAT_NODE is
- used for the position of the decl. */
+ INLINE_STATUS, PUBLIC_FLAG, EXTERN_FLAG and ATTR_LIST are used to set the
+ appropriate fields in the FUNCTION_DECL.
+
+ ARTIFICIAL_P is true if the subprogram was generated by the compiler.
+
+ DEBUG_INFO_P is true if we need to write debug information for it.
+
+ GNAT_NODE is used for the position of the decl. */
tree
create_subprog_decl (tree subprog_name, tree asm_name, tree subprog_type,
tree param_decl_list, enum inline_status_t inline_status,
- bool public_flag, bool extern_flag, bool artificial_flag,
- struct attrib *attr_list, Node_Id gnat_node)
+ bool public_flag, bool extern_flag, bool artificial_p,
+ bool debug_info_p, struct attrib *attr_list,
+ Node_Id gnat_node)
{
tree subprog_decl = build_decl (input_location, FUNCTION_DECL, subprog_name,
subprog_type);
@@ -3039,7 +3067,7 @@ create_subprog_decl (tree subprog_name, tree asm_name, tree subprog_type,
TREE_TYPE (subprog_type));
DECL_ARGUMENTS (subprog_decl) = param_decl_list;
- DECL_ARTIFICIAL (subprog_decl) = artificial_flag;
+ DECL_ARTIFICIAL (subprog_decl) = artificial_p;
DECL_EXTERNAL (subprog_decl) = extern_flag;
switch (inline_status)
@@ -3062,13 +3090,16 @@ create_subprog_decl (tree subprog_name, tree asm_name, tree subprog_type,
case is_enabled:
DECL_DECLARED_INLINE_P (subprog_decl) = 1;
- DECL_NO_INLINE_WARNING_P (subprog_decl) = artificial_flag;
+ DECL_NO_INLINE_WARNING_P (subprog_decl) = artificial_p;
break;
default:
gcc_unreachable ();
}
+ if (!debug_info_p)
+ DECL_IGNORED_P (subprog_decl) = 1;
+
TREE_PUBLIC (subprog_decl) = public_flag;
TREE_READONLY (subprog_decl) = TYPE_READONLY (subprog_type);
TREE_THIS_VOLATILE (subprog_decl) = TYPE_VOLATILE (subprog_type);
diff --git a/gcc/auto-inc-dec.c b/gcc/auto-inc-dec.c
index 3241ed7c1e2..df52229322d 100644
--- a/gcc/auto-inc-dec.c
+++ b/gcc/auto-inc-dec.c
@@ -767,28 +767,6 @@ get_next_ref (int regno, basic_block bb, rtx_insn **next_array)
}
-/* Reverse the operands in a mem insn. */
-
-static void
-reverse_mem (void)
-{
- rtx tmp = mem_insn.reg1;
- mem_insn.reg1 = mem_insn.reg0;
- mem_insn.reg0 = tmp;
-}
-
-
-/* Reverse the operands in a inc insn. */
-
-static void
-reverse_inc (void)
-{
- rtx tmp = inc_insn.reg1;
- inc_insn.reg1 = inc_insn.reg0;
- inc_insn.reg0 = tmp;
-}
-
-
/* Return true if INSN is of a form "a = b op c" where a and b are
regs. op is + if c is a reg and +|- if c is a const. Fill in
INC_INSN with what is found.
@@ -857,7 +835,7 @@ parse_add_or_inc (rtx_insn *insn, bool before_mem)
{
/* Reverse the two operands and turn *_ADD into *_INC since
a = c + a. */
- reverse_inc ();
+ std::swap (inc_insn.reg0, inc_insn.reg1);
inc_insn.form = before_mem ? FORM_PRE_INC : FORM_POST_INC;
return true;
}
@@ -1017,7 +995,7 @@ find_inc (bool first_try)
find this. Only try it once though. */
if (first_try && !mem_insn.reg1_is_const)
{
- reverse_mem ();
+ std::swap (mem_insn.reg0, mem_insn.reg1);
return find_inc (false);
}
else
@@ -1118,7 +1096,7 @@ find_inc (bool first_try)
return false;
if (!rtx_equal_p (mem_insn.reg0, inc_insn.reg0))
- reverse_inc ();
+ std::swap (inc_insn.reg0, inc_insn.reg1);
}
other_insn
@@ -1168,7 +1146,7 @@ find_inc (bool first_try)
/* See comment above on find_inc (false) call. */
if (first_try)
{
- reverse_mem ();
+ std::swap (mem_insn.reg0, mem_insn.reg1);
return find_inc (false);
}
else
@@ -1187,7 +1165,7 @@ find_inc (bool first_try)
{
/* We know that mem_insn.reg0 must equal inc_insn.reg1
or else we would not have found the inc insn. */
- reverse_mem ();
+ std::swap (mem_insn.reg0, mem_insn.reg1);
if (!rtx_equal_p (mem_insn.reg0, inc_insn.reg0))
{
/* See comment above on find_inc (false) call. */
@@ -1226,7 +1204,7 @@ find_inc (bool first_try)
{
if (first_try)
{
- reverse_mem ();
+ std::swap (mem_insn.reg0, mem_insn.reg1);
return find_inc (false);
}
else
diff --git a/gcc/c-family/ChangeLog b/gcc/c-family/ChangeLog
index 33cf7690bb6..39de58a9ea5 100644
--- a/gcc/c-family/ChangeLog
+++ b/gcc/c-family/ChangeLog
@@ -1,3 +1,8 @@
+2015-06-22 Mikhail Maltsev <maltsevm@gmail.com>
+
+ * c-common.c (scalar_to_vector): Use std::swap instead of manually
+ swapping.
+
2015-06-17 Andrew MacLeod <amacleod@redhat.com>
* array-notation-common.c: Do not include input.h, line-map.h or is-a.h.
diff --git a/gcc/c-family/c-common.c b/gcc/c-family/c-common.c
index dc2bf00ebbc..c39a36deb30 100644
--- a/gcc/c-family/c-common.c
+++ b/gcc/c-family/c-common.c
@@ -12605,11 +12605,9 @@ scalar_to_vector (location_t loc, enum tree_code code, tree op0, tree op1,
/* What about UNLT_EXPR? */
if (TREE_CODE (type0) == VECTOR_TYPE)
{
- tree tmp;
ret = stv_secondarg;
- /* Swap TYPE0 with TYPE1 and OP0 with OP1 */
- tmp = type0; type0 = type1; type1 = tmp;
- tmp = op0; op0 = op1; op1 = tmp;
+ std::swap (type0, type1);
+ std::swap (op0, op1);
}
if (TREE_CODE (type0) == INTEGER_TYPE
diff --git a/gcc/combine.c b/gcc/combine.c
index 171e51dc4c7..f3802d7379a 100644
--- a/gcc/combine.c
+++ b/gcc/combine.c
@@ -5684,11 +5684,7 @@ combine_simplify_rtx (rtx x, machine_mode op0_mode, int in_dest,
/* Make sure we pass the constant operand if any as the second
one if this is a commutative operation. */
if (CONSTANT_P (inner_op0) && COMMUTATIVE_ARITH_P (x))
- {
- rtx tem = inner_op0;
- inner_op0 = inner_op1;
- inner_op1 = tem;
- }
+ std::swap (inner_op0, inner_op1);
inner = simplify_binary_operation (code == MINUS ? PLUS
: code == DIV ? MULT
: code,
diff --git a/gcc/common.opt b/gcc/common.opt
index 32b416a324f..dd49ae31880 100644
--- a/gcc/common.opt
+++ b/gcc/common.opt
@@ -918,7 +918,7 @@ Common Report
This switch is deprecated; use -fsanitize-recover= instead
fsanitize-undefined-trap-on-error
-Common Report Var(flag_sanitize_undefined_trap_on_error) Init(0)
+Common Driver Report Var(flag_sanitize_undefined_trap_on_error) Init(0)
Use trap instead of a library function for undefined behavior sanitization
fasynchronous-unwind-tables
diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md
index 1efe57c91b1..d3f5d5b2063 100644
--- a/gcc/config/aarch64/aarch64.md
+++ b/gcc/config/aarch64/aarch64.md
@@ -986,7 +986,9 @@
FAIL;
}
- if (GET_CODE (operands[0]) == MEM)
+ if (GET_CODE (operands[0]) == MEM
+ && ! (GET_CODE (operands[1]) == CONST_DOUBLE
+ && aarch64_float_const_zero_rtx_p (operands[1])))
operands[1] = force_reg (<MODE>mode, operands[1]);
"
)
@@ -995,7 +997,7 @@
[(set (match_operand:SF 0 "nonimmediate_operand" "=w, ?r,w,w ,w,m,r,m ,r")
(match_operand:SF 1 "general_operand" "?rY, w,w,Ufc,m,w,m,rY,r"))]
"TARGET_FLOAT && (register_operand (operands[0], SFmode)
- || register_operand (operands[1], SFmode))"
+ || aarch64_reg_or_fp_zero (operands[1], SFmode))"
"@
fmov\\t%s0, %w1
fmov\\t%w0, %s1
@@ -1007,14 +1009,14 @@
str\\t%w1, %0
mov\\t%w0, %w1"
[(set_attr "type" "f_mcr,f_mrc,fmov,fconsts,\
- f_loads,f_stores,f_loads,f_stores,mov_reg")]
+ f_loads,f_stores,load1,store1,mov_reg")]
)
(define_insn "*movdf_aarch64"
[(set (match_operand:DF 0 "nonimmediate_operand" "=w, ?r,w,w ,w,m,r,m ,r")
(match_operand:DF 1 "general_operand" "?rY, w,w,Ufc,m,w,m,rY,r"))]
"TARGET_FLOAT && (register_operand (operands[0], DFmode)
- || register_operand (operands[1], DFmode))"
+ || aarch64_reg_or_fp_zero (operands[1], DFmode))"
"@
fmov\\t%d0, %x1
fmov\\t%x0, %d1
@@ -1026,7 +1028,7 @@
str\\t%x1, %0
mov\\t%x0, %x1"
[(set_attr "type" "f_mcr,f_mrc,fmov,fconstd,\
- f_loadd,f_stored,f_loadd,f_stored,mov_reg")]
+ f_loadd,f_stored,load1,store1,mov_reg")]
)
(define_expand "movtf"
diff --git a/gcc/config/arm/arm.c b/gcc/config/arm/arm.c
index e79a36939d0..ced4231cd8c 100644
--- a/gcc/config/arm/arm.c
+++ b/gcc/config/arm/arm.c
@@ -84,6 +84,7 @@
#include "tm-constrs.h"
#include "rtl-iter.h"
#include "sched-int.h"
+#include "tree.h"
/* Forward definitions of types. */
typedef struct minipool_node Mnode;
@@ -218,6 +219,7 @@ static void arm_encode_section_info (tree, rtx, int);
static void arm_file_end (void);
static void arm_file_start (void);
+static void arm_insert_attributes (tree, tree *);
static void arm_setup_incoming_varargs (cumulative_args_t, machine_mode,
tree, int *, int);
@@ -376,6 +378,9 @@ static const struct attribute_spec arm_attribute_table[] =
#undef TARGET_ATTRIBUTE_TABLE
#define TARGET_ATTRIBUTE_TABLE arm_attribute_table
+#undef TARGET_INSERT_ATTRIBUTES
+#define TARGET_INSERT_ATTRIBUTES arm_insert_attributes
+
#undef TARGET_ASM_FILE_START
#define TARGET_ASM_FILE_START arm_file_start
#undef TARGET_ASM_FILE_END
@@ -2792,6 +2797,10 @@ arm_option_params_internal (struct gcc_options *opts)
? 1 : current_tune->max_insns_skipped;
}
+/* True if -mflip-thumb should next add an attribute for the default
+ mode, false if it should next add an attribute for the opposite mode. */
+static GTY(()) bool thumb_flipper;
+
/* Options after initial target override. */
static GTY(()) tree init_optimize;
@@ -3369,6 +3378,9 @@ arm_option_override (void)
options. */
target_option_default_node = target_option_current_node
= build_target_option_node (&global_options);
+
+ /* Init initial mode for testing. */
+ thumb_flipper = TARGET_THUMB;
}
static void
@@ -29459,6 +29471,52 @@ arm_valid_target_attribute_tree (tree args, struct gcc_options *opts,
return build_target_option_node (opts);
}
+static void
+add_attribute (const char * mode, tree *attributes)
+{
+ size_t len = strlen (mode);
+ tree value = build_string (len, mode);
+
+ TREE_TYPE (value) = build_array_type (char_type_node,
+ build_index_type (size_int (len)));
+
+ *attributes = tree_cons (get_identifier ("target"),
+ build_tree_list (NULL_TREE, value),
+ *attributes);
+}
+
+/* For testing. Insert thumb or arm modes alternatively on functions. */
+
+static void
+arm_insert_attributes (tree fndecl, tree * attributes)
+{
+ const char *mode;
+
+ if (! TARGET_FLIP_THUMB)
+ return;
+
+ if (TREE_CODE (fndecl) != FUNCTION_DECL || DECL_EXTERNAL(fndecl)
+ || DECL_BUILT_IN (fndecl) || DECL_ARTIFICIAL (fndecl))
+ return;
+
+ /* Nested definitions must inherit mode. */
+ if (current_function_decl)
+ {
+ mode = TARGET_THUMB ? "thumb" : "arm";
+ add_attribute (mode, attributes);
+ return;
+ }
+
+ /* If there is already a setting don't change it. */
+ if (lookup_attribute ("target", *attributes) != NULL)
+ return;
+
+ mode = thumb_flipper ? "thumb" : "arm";
+ add_attribute (mode, attributes);
+
+ thumb_flipper = !thumb_flipper;
+}
+
/* Hook to validate attribute((target("string"))). */
static bool
diff --git a/gcc/config/arm/arm.opt b/gcc/config/arm/arm.opt
index 59e5385803e..c9095b9eeab 100644
--- a/gcc/config/arm/arm.opt
+++ b/gcc/config/arm/arm.opt
@@ -122,6 +122,10 @@ Enum(float_abi_type) String(softfp) Value(ARM_FLOAT_ABI_SOFTFP)
EnumValue
Enum(float_abi_type) String(hard) Value(ARM_FLOAT_ABI_HARD)
+mflip-thumb
+Target Report Var(TARGET_FLIP_THUMB) Undocumented
+Switch ARM/Thumb modes on alternating functions for compiler testing
+
mfp16-format=
Target RejectNegative Joined Enum(arm_fp16_format_type) Var(arm_fp16_format) Init(ARM_FP16_FORMAT_NONE)
Specify the __fp16 floating-point format
diff --git a/gcc/config/i386/i386.c b/gcc/config/i386/i386.c
index bd548a969fb..24fccfca53d 100644
--- a/gcc/config/i386/i386.c
+++ b/gcc/config/i386/i386.c
@@ -34971,9 +34971,7 @@ ix86_function_versions (tree fn1, tree fn2)
{
if (attr2 != NULL_TREE)
{
- tree tem = fn1;
- fn1 = fn2;
- fn2 = tem;
+ std::swap (fn1, fn2);
attr1 = attr2;
}
error_at (DECL_SOURCE_LOCATION (fn2),
@@ -47990,9 +47988,7 @@ expand_vec_perm_interleave2 (struct expand_vec_perm_d *d)
{
/* Attempt to increase the likelihood that dfinal
shuffle will be intra-lane. */
- char tmph = nonzero_halves[0];
- nonzero_halves[0] = nonzero_halves[1];
- nonzero_halves[1] = tmph;
+ std::swap (nonzero_halves[0], nonzero_halves[1]);
}
/* vperm2f128 or vperm2i128. */
diff --git a/gcc/config/sh/sh.c b/gcc/config/sh/sh.c
index e5fcd7683ef..6f03206ccb9 100644
--- a/gcc/config/sh/sh.c
+++ b/gcc/config/sh/sh.c
@@ -1775,10 +1775,14 @@ prepare_move_operands (rtx operands[], machine_mode mode)
target/55212.
We split possible load/store to two move insns via r0 so as to
shorten R0 live range. It will make some codes worse but will
- win on avarage for LRA. */
+ win on average for LRA.
+ Also when base+index addressing is used and the index term is
+ a subreg, LRA assumes that more hard registers can be available
+ in some situation. It isn't the case for SH in the problematic
+ case. We can pre-allocate R0 for that index term to avoid
+ the issue. See PR target/66591. */
else if (sh_lra_p ()
&& TARGET_SH1 && ! TARGET_SH2A
- && (mode == QImode || mode == HImode)
&& ((REG_P (operands[0]) && MEM_P (operands[1]))
|| (REG_P (operands[1]) && MEM_P (operands[0]))))
{
@@ -1786,7 +1790,8 @@ prepare_move_operands (rtx operands[], machine_mode mode)
rtx reg = operands[load_p ? 0 : 1];
rtx adr = XEXP (operands[load_p ? 1 : 0], 0);
- if (REGNO (reg) >= FIRST_PSEUDO_REGISTER
+ if ((mode == QImode || mode == HImode)
+ && REGNO (reg) >= FIRST_PSEUDO_REGISTER
&& GET_CODE (adr) == PLUS
&& REG_P (XEXP (adr, 0))
&& (REGNO (XEXP (adr, 0)) >= FIRST_PSEUDO_REGISTER)
@@ -1798,6 +1803,17 @@ prepare_move_operands (rtx operands[], machine_mode mode)
emit_move_insn (r0_rtx, operands[1]);
operands[1] = r0_rtx;
}
+ if (REGNO (reg) >= FIRST_PSEUDO_REGISTER
+ && GET_CODE (adr) == PLUS
+ && REG_P (XEXP (adr, 0))
+ && (REGNO (XEXP (adr, 0)) >= FIRST_PSEUDO_REGISTER)
+ && SUBREG_P (XEXP (adr, 1))
+ && REG_P (SUBREG_REG (XEXP (adr, 1))))
+ {
+ rtx r0_rtx = gen_rtx_REG (GET_MODE (XEXP (adr, 1)), R0_REG);
+ emit_move_insn (r0_rtx, XEXP (adr, 1));
+ XEXP (adr, 1) = r0_rtx;
+ }
}
}
diff --git a/gcc/config/vax/vax.md b/gcc/config/vax/vax.md
index 44d162f891a..d5caa156370 100644
--- a/gcc/config/vax/vax.md
+++ b/gcc/config/vax/vax.md
@@ -780,7 +780,7 @@
(match_operand:SI 3 "general_operand" "g"))]
"(INTVAL (operands[1]) == 8 || INTVAL (operands[1]) == 16)
&& INTVAL (operands[2]) % INTVAL (operands[1]) == 0
- && (REG_P (operands[0])
+ && (!MEM_P (operands[0])
|| ! mode_dependent_address_p (XEXP (operands[0], 0),
MEM_ADDR_SPACE (operands[0])))"
"*
@@ -809,7 +809,7 @@
(match_operand:SI 3 "const_int_operand" "n")))]
"(INTVAL (operands[2]) == 8 || INTVAL (operands[2]) == 16)
&& INTVAL (operands[3]) % INTVAL (operands[2]) == 0
- && (REG_P (operands[1])
+ && (!MEM_P (operands[1])
|| ! mode_dependent_address_p (XEXP (operands[1], 0),
MEM_ADDR_SPACE (operands[1])))"
"*
@@ -837,7 +837,7 @@
(match_operand:SI 3 "const_int_operand" "n")))]
"(INTVAL (operands[2]) == 8 || INTVAL (operands[2]) == 16)
&& INTVAL (operands[3]) % INTVAL (operands[2]) == 0
- && (REG_P (operands[1])
+ && (!MEM_P (operands[1])
|| ! mode_dependent_address_p (XEXP (operands[1], 0),
MEM_ADDR_SPACE (operands[1])))"
"*
diff --git a/gcc/cp/ChangeLog b/gcc/cp/ChangeLog
index eb8d97ab1e8..7b3cd2ce720 100644
--- a/gcc/cp/ChangeLog
+++ b/gcc/cp/ChangeLog
@@ -1,3 +1,35 @@
+2015-06-22 Mikhail Maltsev <maltsevm@gmail.com>
+
+ * pt.c (maybe_adjust_types_for_deduction): Use std::swap instead of
+ manually swapping.
+ * semantics.c (finish_omp_atomic): Likewise.
+ * typeck.c (cp_build_array_ref): Likewise.
+
+2015-06-20 Mikhail Maltsev <maltsevm@gmail.com>
+
+ PR c++/65882
+ * call.c (build_new_op_1): Check tf_warning flag in all cases.
+
+2015-06-19 Jason Merrill <jason@redhat.com>
+
+ PR c++/66585
+ * pt.c (instantiate_class_template_1): Clear
+ cp_unevaluated_operand and c_inhibit_evaluation_warnings.
+
+ PR c++/65880
+ * decl.c (build_ptrmemfunc_type): Check TYPE_GET_PTRMEMFUNC_TYPE after
+ cv-qualifiers.
+ * typeck.c (merge_types): build_ptrmemfunc_type before applying
+ quals and attributes.
+
+ PR c++/65973
+ * constexpr.c (build_constexpr_constructor_member_initializers):
+ Handle an empty STATEMENT_LIST.
+
+ PR c++/65843
+ * pt.c (tsubst_copy_and_build): Register a capture proxy in
+ local_specializations.
+
2015-06-17 Jason Merrill <jason@redhat.com>
PR c++/66001
diff --git a/gcc/cp/call.c b/gcc/cp/call.c
index 5d1891d2c15..ba5da4c36da 100644
--- a/gcc/cp/call.c
+++ b/gcc/cp/call.c
@@ -5640,8 +5640,9 @@ build_new_op_1 (location_t loc, enum tree_code code, int flags, tree arg1,
case TRUTH_ORIF_EXPR:
case TRUTH_AND_EXPR:
case TRUTH_OR_EXPR:
- warn_logical_operator (loc, code, boolean_type_node,
- code_orig_arg1, arg1, code_orig_arg2, arg2);
+ if (complain & tf_warning)
+ warn_logical_operator (loc, code, boolean_type_node,
+ code_orig_arg1, arg1, code_orig_arg2, arg2);
/* Fall through. */
case GT_EXPR:
case LT_EXPR:
@@ -5649,8 +5650,9 @@ build_new_op_1 (location_t loc, enum tree_code code, int flags, tree arg1,
case LE_EXPR:
case EQ_EXPR:
case NE_EXPR:
- if ((code_orig_arg1 == BOOLEAN_TYPE)
- ^ (code_orig_arg2 == BOOLEAN_TYPE))
+ if ((complain & tf_warning)
+ && ((code_orig_arg1 == BOOLEAN_TYPE)
+ ^ (code_orig_arg2 == BOOLEAN_TYPE)))
maybe_warn_bool_compare (loc, code, arg1, arg2);
/* Fall through. */
case PLUS_EXPR:
diff --git a/gcc/cp/constexpr.c b/gcc/cp/constexpr.c
index 56885883d72..f6e2bc8508c 100644
--- a/gcc/cp/constexpr.c
+++ b/gcc/cp/constexpr.c
@@ -537,16 +537,16 @@ build_constexpr_constructor_member_initializers (tree type, tree body)
body = TREE_OPERAND (body, 0);
if (TREE_CODE (body) == STATEMENT_LIST)
{
- tree_stmt_iterator i = tsi_start (body);
- while (true)
+ for (tree_stmt_iterator i = tsi_start (body);
+ !tsi_end_p (i); tsi_next (&i))
{
body = tsi_stmt (i);
if (TREE_CODE (body) == BIND_EXPR)
break;
- tsi_next (&i);
}
}
- body = BIND_EXPR_BODY (body);
+ if (TREE_CODE (body) == BIND_EXPR)
+ body = BIND_EXPR_BODY (body);
if (TREE_CODE (body) == CLEANUP_POINT_EXPR)
{
body = TREE_OPERAND (body, 0);
diff --git a/gcc/cp/decl.c b/gcc/cp/decl.c
index c102a4ee717..515c2d33e10 100644
--- a/gcc/cp/decl.c
+++ b/gcc/cp/decl.c
@@ -8224,13 +8224,6 @@ build_ptrmemfunc_type (tree type)
if (type == error_mark_node)
return type;
- /* If a canonical type already exists for this type, use it. We use
- this method instead of type_hash_canon, because it only does a
- simple equality check on the list of field members. */
-
- if ((t = TYPE_GET_PTRMEMFUNC_TYPE (type)))
- return t;
-
/* Make sure that we always have the unqualified pointer-to-member
type first. */
if (cp_cv_quals quals = cp_type_quals (type))
@@ -8239,6 +8232,13 @@ build_ptrmemfunc_type (tree type)
return cp_build_qualified_type (unqual, quals);
}
+ /* If a canonical type already exists for this type, use it. We use
+ this method instead of type_hash_canon, because it only does a
+ simple equality check on the list of field members. */
+
+ if ((t = TYPE_GET_PTRMEMFUNC_TYPE (type)))
+ return t;
+
t = make_node (RECORD_TYPE);
/* Let the front end know this is a pointer to member function. */
diff --git a/gcc/cp/pt.c b/gcc/cp/pt.c
index ccce90dba95..8800af819ec 100644
--- a/gcc/cp/pt.c
+++ b/gcc/cp/pt.c
@@ -9215,12 +9215,20 @@ instantiate_class_template_1 (tree type)
it now. */
push_deferring_access_checks (dk_no_deferred);
+ int saved_unevaluated_operand = cp_unevaluated_operand;
+ int saved_inhibit_evaluation_warnings = c_inhibit_evaluation_warnings;
+
fn_context = decl_function_context (TYPE_MAIN_DECL (type));
/* Also avoid push_to_top_level for a lambda in an NSDMI. */
if (!fn_context && LAMBDA_TYPE_P (type) && TYPE_CLASS_SCOPE_P (type))
fn_context = error_mark_node;
if (!fn_context)
push_to_top_level ();
+ else
+ {
+ cp_unevaluated_operand = 0;
+ c_inhibit_evaluation_warnings = 0;
+ }
/* Use #pragma pack from the template context. */
saved_maximum_field_alignment = maximum_field_alignment;
maximum_field_alignment = TYPE_PRECISION (pattern);
@@ -9636,6 +9644,14 @@ instantiate_class_template_1 (tree type)
}
}
+ if (fn_context)
+ {
+ /* Restore these before substituting into the lambda capture
+ initializers. */
+ cp_unevaluated_operand = saved_unevaluated_operand;
+ c_inhibit_evaluation_warnings = saved_inhibit_evaluation_warnings;
+ }
+
if (tree expr = CLASSTYPE_LAMBDA_EXPR (type))
{
tree decl = lambda_function (type);
@@ -15665,7 +15681,11 @@ tsubst_copy_and_build (tree t,
r = build_cxx_call (wrap, 0, NULL, tf_warning_or_error);
}
else if (outer_automatic_var_p (r))
- r = process_outer_var_ref (r, complain);
+ {
+ r = process_outer_var_ref (r, complain);
+ if (is_capture_proxy (r))
+ register_local_specialization (r, t);
+ }
if (TREE_CODE (TREE_TYPE (t)) != REFERENCE_TYPE)
/* If the original type was a reference, we'll be wrapped in
@@ -16525,15 +16545,11 @@ maybe_adjust_types_for_deduction (unification_kind_t strict,
break;
case DEDUCE_CONV:
- {
- /* Swap PARM and ARG throughout the remainder of this
- function; the handling is precisely symmetric since PARM
- will initialize ARG rather than vice versa. */
- tree* temp = parm;
- parm = arg;
- arg = temp;
- break;
- }
+ /* Swap PARM and ARG throughout the remainder of this
+ function; the handling is precisely symmetric since PARM
+ will initialize ARG rather than vice versa. */
+ std::swap (parm, arg);
+ break;
case DEDUCE_EXACT:
/* Core issue #873: Do the DR606 thing (see below) for these cases,
diff --git a/gcc/cp/semantics.c b/gcc/cp/semantics.c
index 9b5f050dfa4..aeb5f7ba298 100644
--- a/gcc/cp/semantics.c
+++ b/gcc/cp/semantics.c
@@ -6870,9 +6870,7 @@ finish_omp_atomic (enum tree_code code, enum tree_code opcode, tree lhs,
bool swapped = false;
if (rhs1 && cp_tree_equal (lhs, rhs))
{
- tree tem = rhs;
- rhs = rhs1;
- rhs1 = tem;
+ std::swap (rhs, rhs1);
swapped = !commutative_tree_code (opcode);
}
if (rhs1 && !cp_tree_equal (lhs, rhs1))
diff --git a/gcc/cp/typeck.c b/gcc/cp/typeck.c
index c33ffd57a0b..5b3fdfbf76b 100644
--- a/gcc/cp/typeck.c
+++ b/gcc/cp/typeck.c
@@ -786,15 +786,16 @@ merge_types (tree t1, tree t2)
int quals = cp_type_quals (t1);
if (code1 == POINTER_TYPE)
- t1 = build_pointer_type (target);
+ {
+ t1 = build_pointer_type (target);
+ if (TREE_CODE (target) == METHOD_TYPE)
+ t1 = build_ptrmemfunc_type (t1);
+ }
else
t1 = cp_build_reference_type (target, TYPE_REF_IS_RVALUE (t1));
t1 = build_type_attribute_variant (t1, attributes);
t1 = cp_build_qualified_type (t1, quals);
- if (TREE_CODE (target) == METHOD_TYPE)
- t1 = build_ptrmemfunc_type (t1);
-
return t1;
}
@@ -3178,11 +3179,7 @@ cp_build_array_ref (location_t loc, tree array, tree idx,
/* Put the integer in IND to simplify error checking. */
if (TREE_CODE (TREE_TYPE (ar)) == INTEGER_TYPE)
- {
- tree temp = ar;
- ar = ind;
- ind = temp;
- }
+ std::swap (ar, ind);
if (ar == error_mark_node || ind == error_mark_node)
return error_mark_node;
diff --git a/gcc/df-core.c b/gcc/df-core.c
index 68d18497133..ea1f16842a2 100644
--- a/gcc/df-core.c
+++ b/gcc/df-core.c
@@ -1043,10 +1043,7 @@ df_worklist_dataflow_doublequeue (struct dataflow *dataflow,
bitmap_iterator bi;
unsigned int index;
- /* Swap pending and worklist. */
- bitmap temp = worklist;
- worklist = pending;
- pending = temp;
+ std::swap (pending, worklist);
EXECUTE_IF_SET_IN_BITMAP (worklist, 0, index, bi)
{
diff --git a/gcc/df-scan.c b/gcc/df-scan.c
index 07e6ad5de51..ef9cdbbbe32 100644
--- a/gcc/df-scan.c
+++ b/gcc/df-scan.c
@@ -2133,14 +2133,6 @@ df_ref_ptr_compare (const void *r1, const void *r2)
return df_ref_compare (*(const df_ref *) r1, *(const df_ref *) r2);
}
-static void
-df_swap_refs (vec<df_ref, va_heap> *ref_vec, int i, int j)
-{
- df_ref tmp = (*ref_vec)[i];
- (*ref_vec)[i] = (*ref_vec)[j];
- (*ref_vec)[j] = tmp;
-}
-
/* Sort and compress a set of refs. */
static void
@@ -2160,7 +2152,7 @@ df_sort_and_compress_refs (vec<df_ref, va_heap> *ref_vec)
df_ref r0 = (*ref_vec)[0];
df_ref r1 = (*ref_vec)[1];
if (df_ref_compare (r0, r1) > 0)
- df_swap_refs (ref_vec, 0, 1);
+ std::swap ((*ref_vec)[0], (*ref_vec)[1]);
}
else
{
diff --git a/gcc/dominance.c b/gcc/dominance.c
index b39cd799225..a9e042ef4b5 100644
--- a/gcc/dominance.c
+++ b/gcc/dominance.c
@@ -484,11 +484,7 @@ link_roots (struct dom_info *di, TBB v, TBB w)
di->path_min[s] = di->path_min[w];
di->set_size[v] += di->set_size[w];
if (di->set_size[v] < 2 * di->set_size[w])
- {
- TBB tmp = s;
- s = di->set_child[v];
- di->set_child[v] = tmp;
- }
+ std::swap (di->set_child[v], s);
/* Merge all subtrees. */
while (s)
diff --git a/gcc/expr.c b/gcc/expr.c
index 4eb1ab32c2f..78904c24bf2 100644
--- a/gcc/expr.c
+++ b/gcc/expr.c
@@ -8426,11 +8426,7 @@ expand_expr_real_2 (sepops ops, rtx target, machine_mode tmode,
Thus the following special case checks need only
check the second operand. */
if (TREE_CODE (treeop0) == INTEGER_CST)
- {
- tree t1 = treeop0;
- treeop0 = treeop1;
- treeop1 = t1;
- }
+ std::swap (treeop0, treeop1);
/* First, check if we have a multiplication of one signed and one
unsigned operand. */
@@ -8555,11 +8551,7 @@ expand_expr_real_2 (sepops ops, rtx target, machine_mode tmode,
def0 = get_def_for_expr (treeop1, NEGATE_EXPR);
/* Swap operands if the 2nd operand is fed by a negate. */
if (def0)
- {
- tree tem = treeop0;
- treeop0 = treeop1;
- treeop1 = tem;
- }
+ std::swap (treeop0, treeop1);
}
def2 = get_def_for_expr (treeop2, NEGATE_EXPR);
@@ -8606,11 +8598,7 @@ expand_expr_real_2 (sepops ops, rtx target, machine_mode tmode,
Thus the following special case checks need only
check the second operand. */
if (TREE_CODE (treeop0) == INTEGER_CST)
- {
- tree t1 = treeop0;
- treeop0 = treeop1;
- treeop1 = t1;
- }
+ std::swap (treeop0, treeop1);
/* Attempt to return something suitable for generating an
indexed address, for machines that support that. */
@@ -10910,7 +10898,6 @@ do_store_flag (sepops ops, rtx target, machine_mode mode)
{
enum rtx_code code;
tree arg0, arg1, type;
- tree tem;
machine_mode operand_mode;
int unsignedp;
rtx op0, op1;
@@ -11033,7 +11020,7 @@ do_store_flag (sepops ops, rtx target, machine_mode mode)
if (TREE_CODE (arg0) == REAL_CST || TREE_CODE (arg0) == INTEGER_CST
|| TREE_CODE (arg0) == FIXED_CST)
{
- tem = arg0; arg0 = arg1; arg1 = tem;
+ std::swap (arg0, arg1);
code = swap_condition (code);
}
diff --git a/gcc/fold-const.c b/gcc/fold-const.c
index e61d9463462..60aa2104960 100644
--- a/gcc/fold-const.c
+++ b/gcc/fold-const.c
@@ -15664,9 +15664,7 @@ fold_relational_const (enum tree_code code, tree type, tree op0, tree op1)
if (code == LE_EXPR || code == GT_EXPR)
{
- tree tem = op0;
- op0 = op1;
- op1 = tem;
+ std::swap (op0, op1);
code = swap_tree_comparison (code);
}
diff --git a/gcc/fortran/ChangeLog b/gcc/fortran/ChangeLog
index ee6b190c9c5..148bc80cb13 100644
--- a/gcc/fortran/ChangeLog
+++ b/gcc/fortran/ChangeLog
@@ -1,3 +1,10 @@
+2015-06-19 Mikael Morin <mikael@gcc.gnu.org>
+
+ PR fortran/66549
+ * resolve.c (resolve_global_procedure): Don't save and restore
+ OpenMP state around the call to gfc_resolve.
+ (gfc_resolve): Save OpenMP state on entry and restore it on return.
+
2015-06-17 Andrew MacLeod <amacleod@redhat.com>
* convert.c: Do not include input.h, line-map.h or is-a.h.
diff --git a/gcc/fortran/resolve.c b/gcc/fortran/resolve.c
index 6f0779db41a..7fb6d4c197c 100644
--- a/gcc/fortran/resolve.c
+++ b/gcc/fortran/resolve.c
@@ -2384,14 +2384,11 @@ resolve_global_procedure (gfc_symbol *sym, locus *where,
if (!gsym->ns->resolved)
{
gfc_dt_list *old_dt_list;
- struct gfc_omp_saved_state old_omp_state;
/* Stash away derived types so that the backend_decls do not
get mixed up. */
old_dt_list = gfc_derived_types;
gfc_derived_types = NULL;
- /* And stash away openmp state. */
- gfc_omp_save_and_clear_state (&old_omp_state);
gfc_resolve (gsym->ns);
@@ -2401,8 +2398,6 @@ resolve_global_procedure (gfc_symbol *sym, locus *where,
/* Restore the derived types of this namespace. */
gfc_derived_types = old_dt_list;
- /* And openmp state. */
- gfc_omp_restore_state (&old_omp_state);
}
/* Make sure that translation for the gsymbol occurs before
@@ -15226,6 +15221,7 @@ gfc_resolve (gfc_namespace *ns)
{
gfc_namespace *old_ns;
code_stack *old_cs_base;
+ struct gfc_omp_saved_state old_omp_state;
if (ns->resolved)
return;
@@ -15234,6 +15230,11 @@ gfc_resolve (gfc_namespace *ns)
old_ns = gfc_current_ns;
old_cs_base = cs_base;
+ /* As gfc_resolve can be called during resolution of an OpenMP construct
+ body, we should clear any state associated to it, so that say NS's
+ DO loops are not interpreted as OpenMP loops. */
+ gfc_omp_save_and_clear_state (&old_omp_state);
+
resolve_types (ns);
component_assignment_level = 0;
resolve_codes (ns);
@@ -15243,4 +15244,6 @@ gfc_resolve (gfc_namespace *ns)
ns->resolved = 1;
gfc_run_passes (ns);
+
+ gfc_omp_restore_state (&old_omp_state);
}
diff --git a/gcc/genattrtab.c b/gcc/genattrtab.c
index 2ec02c366eb..85ec738a9a4 100644
--- a/gcc/genattrtab.c
+++ b/gcc/genattrtab.c
@@ -2532,11 +2532,7 @@ simplify_test_exp (rtx exp, int insn_code, int insn_index)
&& compute_alternative_mask (right, IOR))
{
if (GET_CODE (left) == IOR)
- {
- rtx tem = left;
- left = right;
- right = tem;
- }
+ std::swap (left, right);
newexp = attr_rtx (IOR,
attr_rtx (AND, left, XEXP (right, 0)),
diff --git a/gcc/gimple-match-head.c b/gcc/gimple-match-head.c
index 86ba78bab80..7b1f1ace8f9 100644
--- a/gcc/gimple-match-head.c
+++ b/gcc/gimple-match-head.c
@@ -190,9 +190,7 @@ gimple_resimplify2 (gimple_seq *seq,
|| commutative_tree_code (*res_code))
&& tree_swap_operands_p (res_ops[0], res_ops[1], false))
{
- tree tem = res_ops[0];
- res_ops[0] = res_ops[1];
- res_ops[1] = tem;
+ std::swap (res_ops[0], res_ops[1]);
if (TREE_CODE_CLASS ((enum tree_code) *res_code) == tcc_comparison)
*res_code = swap_tree_comparison (*res_code);
canonicalized = true;
@@ -262,9 +260,7 @@ gimple_resimplify3 (gimple_seq *seq,
&& commutative_ternary_tree_code (*res_code)
&& tree_swap_operands_p (res_ops[0], res_ops[1], false))
{
- tree tem = res_ops[0];
- res_ops[0] = res_ops[1];
- res_ops[1] = tem;
+ std::swap (res_ops[0], res_ops[1]);
canonicalized = true;
}
@@ -427,9 +423,7 @@ gimple_simplify (enum tree_code code, tree type,
|| TREE_CODE_CLASS (code) == tcc_comparison)
&& tree_swap_operands_p (op0, op1, false))
{
- tree tem = op0;
- op0 = op1;
- op1 = tem;
+ std::swap (op0, op1);
if (TREE_CODE_CLASS (code) == tcc_comparison)
code = swap_tree_comparison (code);
}
@@ -462,11 +456,7 @@ gimple_simplify (enum tree_code code, tree type,
generation. */
if (commutative_ternary_tree_code (code)
&& tree_swap_operands_p (op0, op1, false))
- {
- tree tem = op0;
- op0 = op1;
- op1 = tem;
- }
+ std::swap (op0, op1);
code_helper rcode;
tree ops[3] = {};
diff --git a/gcc/ifcvt.c b/gcc/ifcvt.c
index 648965c4208..7d4ad934003 100644
--- a/gcc/ifcvt.c
+++ b/gcc/ifcvt.c
@@ -2065,7 +2065,7 @@ noce_try_abs (struct noce_if_info *if_info)
negate = 0;
else if (GET_CODE (b) == NEG && rtx_equal_p (XEXP (b, 0), a))
{
- c = a; a = b; b = c;
+ std::swap (a, b);
negate = 1;
}
else if (GET_CODE (a) == NOT && rtx_equal_p (XEXP (a, 0), b))
@@ -2075,7 +2075,7 @@ noce_try_abs (struct noce_if_info *if_info)
}
else if (GET_CODE (b) == NOT && rtx_equal_p (XEXP (b, 0), a))
{
- c = a; a = b; b = c;
+ std::swap (a, b);
negate = 1;
one_cmpl = true;
}
@@ -3390,11 +3390,7 @@ find_if_header (basic_block test_bb, int pass)
if (then_edge->flags & EDGE_FALLTHRU)
;
else if (else_edge->flags & EDGE_FALLTHRU)
- {
- edge e = else_edge;
- else_edge = then_edge;
- then_edge = e;
- }
+ std::swap (then_edge, else_edge);
else
/* Otherwise this must be a multiway branch of some sort. */
return NULL;
diff --git a/gcc/internal-fn.c b/gcc/internal-fn.c
index 64ce8e34f4d..c398c125bcf 100644
--- a/gcc/internal-fn.c
+++ b/gcc/internal-fn.c
@@ -526,14 +526,10 @@ expand_addsub_overflow (location_t loc, tree_code code, tree lhs,
/* PLUS_EXPR is commutative, if operand signedness differs,
canonicalize to the first operand being signed and second
unsigned to simplify following code. */
- rtx tem = op1;
- op1 = op0;
- op0 = tem;
- tree t = arg1;
- arg1 = arg0;
- arg0 = t;
- uns0_p = 0;
- uns1_p = 1;
+ std::swap (op0, op1);
+ std::swap (arg0, arg1);
+ uns0_p = false;
+ uns1_p = true;
}
/* u1 +- u2 -> ur */
@@ -674,9 +670,7 @@ expand_addsub_overflow (location_t loc, tree_code code, tree lhs,
int pos_neg0 = get_range_pos_neg (arg0);
if (pos_neg0 != 3 && pos_neg == 3)
{
- rtx tem = op1;
- op1 = op0;
- op0 = tem;
+ std::swap (op0, op1);
pos_neg = pos_neg0;
}
}
@@ -781,22 +775,14 @@ expand_addsub_overflow (location_t loc, tree_code code, tree lhs,
do_compare_rtx_and_jump will be just folded. Otherwise try
to use range info if available. */
if (code == PLUS_EXPR && CONST_INT_P (op0))
- {
- rtx tem = op0;
- op0 = op1;
- op1 = tem;
- }
+ std::swap (op0, op1);
else if (CONST_INT_P (op1))
;
else if (code == PLUS_EXPR && TREE_CODE (arg0) == SSA_NAME)
{
pos_neg = get_range_pos_neg (arg0);
if (pos_neg != 3)
- {
- rtx tem = op0;
- op0 = op1;
- op1 = tem;
- }
+ std::swap (op0, op1);
}
if (pos_neg == 3 && !CONST_INT_P (op1) && TREE_CODE (arg1) == SSA_NAME)
pos_neg = get_range_pos_neg (arg1);
@@ -1023,14 +1009,10 @@ expand_mul_overflow (location_t loc, tree lhs, tree arg0, tree arg1,
/* Multiplication is commutative, if operand signedness differs,
canonicalize to the first operand being signed and second
unsigned to simplify following code. */
- rtx tem = op1;
- op1 = op0;
- op0 = tem;
- tree t = arg1;
- arg1 = arg0;
- arg0 = t;
- uns0_p = 0;
- uns1_p = 1;
+ std::swap (op0, op1);
+ std::swap (arg0, arg1);
+ uns0_p = false;
+ uns1_p = true;
}
int pos_neg0 = get_range_pos_neg (arg0);
diff --git a/gcc/ipa-devirt.c b/gcc/ipa-devirt.c
index 6fba692577f..1bf4b1acb6b 100644
--- a/gcc/ipa-devirt.c
+++ b/gcc/ipa-devirt.c
@@ -1846,12 +1846,7 @@ add_type_duplicate (odr_type val, tree type)
}
if (prevail)
- {
- tree tmp = type;
-
- type = val->type;
- val->type = tmp;
- }
+ std::swap (val->type, type);
val->types_set->add (type);
diff --git a/gcc/ipa-icf.c b/gcc/ipa-icf.c
index e998fbf216c..9a8133faa2c 100644
--- a/gcc/ipa-icf.c
+++ b/gcc/ipa-icf.c
@@ -259,7 +259,6 @@ sem_item::target_supports_symbol_aliases_p (void)
sem_function::sem_function (bitmap_obstack *stack): sem_item (FUNC, stack),
m_checker (NULL), m_compared_func (NULL)
{
- arg_types.create (0);
bb_sizes.create (0);
bb_sorted.create (0);
}
@@ -271,7 +270,6 @@ sem_function::sem_function (cgraph_node *node, hashval_t hash,
sem_item (FUNC, node, hash, stack),
m_checker (NULL), m_compared_func (NULL)
{
- arg_types.create (0);
bb_sizes.create (0);
bb_sorted.create (0);
}
@@ -281,7 +279,6 @@ sem_function::~sem_function ()
for (unsigned i = 0; i < bb_sorted.length (); i++)
delete (bb_sorted[i]);
- arg_types.release ();
bb_sizes.release ();
bb_sorted.release ();
}
@@ -581,6 +578,30 @@ sem_function::param_used_p (unsigned int i)
return ipa_is_param_used (IPA_NODE_REF (get_node ()), i);
}
+/* Perform additional check needed to match types function parameters that are
+ used. Unlike for normal decls it matters if type is TYPE_RESTRICT and we
+ make an assumption that REFERENCE_TYPE parameters are always non-NULL. */
+
+bool
+sem_function::compatible_parm_types_p (tree parm1, tree parm2)
+{
+ /* Be sure that parameters are TBAA compatible. */
+ if (!func_checker::compatible_types_p (parm1, parm2))
+ return return_false_with_msg ("parameter type is not compatible");
+
+ if (POINTER_TYPE_P (parm1)
+ && (TYPE_RESTRICT (parm1) != TYPE_RESTRICT (parm2)))
+ return return_false_with_msg ("argument restrict flag mismatch");
+
+ /* nonnull_arg_p implies non-zero range to REFERENCE types. */
+ if (POINTER_TYPE_P (parm1)
+ && TREE_CODE (parm1) != TREE_CODE (parm2)
+ && opt_for_fn (decl, flag_delete_null_pointer_checks))
+ return return_false_with_msg ("pointer wrt reference mismatch");
+
+ return true;
+}
+
/* Fast equality function based on knowledge known in WPA. */
bool
@@ -593,9 +614,6 @@ sem_function::equals_wpa (sem_item *item,
m_compared_func = static_cast<sem_function *> (item);
- if (arg_types.length () != m_compared_func->arg_types.length ())
- return return_false_with_msg ("different number of arguments");
-
if (cnode->thunk.thunk_p != cnode2->thunk.thunk_p)
return return_false_with_msg ("thunk_p mismatch");
@@ -684,38 +702,40 @@ sem_function::equals_wpa (sem_item *item,
}
/* Result type checking. */
- if (!func_checker::compatible_types_p (result_type,
- m_compared_func->result_type))
+ if (!func_checker::compatible_types_p
+ (TREE_TYPE (TREE_TYPE (decl)),
+ TREE_TYPE (TREE_TYPE (m_compared_func->decl))))
return return_false_with_msg ("result types are different");
/* Checking types of arguments. */
- for (unsigned i = 0; i < arg_types.length (); i++)
+ tree list1 = TYPE_ARG_TYPES (TREE_TYPE (decl)),
+ list2 = TYPE_ARG_TYPES (TREE_TYPE (m_compared_func->decl));
+ for (unsigned i = 0; list1 && list2;
+ list1 = TREE_CHAIN (list1), list2 = TREE_CHAIN (list2), i++)
{
+ tree parm1 = TREE_VALUE (list1);
+ tree parm2 = TREE_VALUE (list2);
+
/* This guard is here for function pointer with attributes (pr59927.c). */
- if (!arg_types[i] || !m_compared_func->arg_types[i])
+ if (!parm1 || !parm2)
return return_false_with_msg ("NULL argument type");
- /* We always need to match types so we are sure the callin conventions
- are compatible. */
- if (!func_checker::compatible_types_p (arg_types[i],
- m_compared_func->arg_types[i]))
- return return_false_with_msg ("argument type is different");
+ /* Verify that types are compatible to ensure that both functions
+ have same calling conventions. */
+ if (!types_compatible_p (parm1, parm2))
+ return return_false_with_msg ("parameter types are not compatible");
- /* On used arguments we need to do a bit more of work. */
if (!param_used_p (i))
continue;
- if (POINTER_TYPE_P (arg_types[i])
- && (TYPE_RESTRICT (arg_types[i])
- != TYPE_RESTRICT (m_compared_func->arg_types[i])))
- return return_false_with_msg ("argument restrict flag mismatch");
- /* nonnull_arg_p implies non-zero range to REFERENCE types. */
- if (POINTER_TYPE_P (arg_types[i])
- && TREE_CODE (arg_types[i])
- != TREE_CODE (m_compared_func->arg_types[i])
- && opt_for_fn (decl, flag_delete_null_pointer_checks))
- return return_false_with_msg ("pointer wrt reference mismatch");
+
+ /* Perform additional checks for used parameters. */
+ if (!compatible_parm_types_p (parm1, parm2))
+ return false;
}
+ if (list1 || list2)
+ return return_false_with_msg ("Mismatched number of parameters");
+
if (node->num_references () != item->node->num_references ())
return return_false_with_msg ("different number of references");
@@ -922,11 +942,23 @@ sem_function::equals_private (sem_item *item)
false,
&refs_set,
&m_compared_func->refs_set);
- for (arg1 = DECL_ARGUMENTS (decl),
- arg2 = DECL_ARGUMENTS (m_compared_func->decl);
- arg1; arg1 = DECL_CHAIN (arg1), arg2 = DECL_CHAIN (arg2))
- if (!m_checker->compare_decl (arg1, arg2))
- return return_false ();
+ arg1 = DECL_ARGUMENTS (decl);
+ arg2 = DECL_ARGUMENTS (m_compared_func->decl);
+ for (unsigned i = 0;
+ arg1 && arg2; arg1 = DECL_CHAIN (arg1), arg2 = DECL_CHAIN (arg2), i++)
+ {
+ if (!types_compatible_p (TREE_TYPE (arg1), TREE_TYPE (arg2)))
+ return return_false_with_msg ("argument types are not compatible");
+ if (!param_used_p (i))
+ continue;
+ /* Perform additional checks for used parameters. */
+ if (!compatible_parm_types_p (TREE_TYPE (arg1), TREE_TYPE (arg2)))
+ return false;
+ if (!m_checker->compare_decl (arg1, arg2))
+ return return_false ();
+ }
+ if (arg1 || arg2)
+ return return_false_with_msg ("Mismatched number of arguments");
if (!dyn_cast <cgraph_node *> (node)->has_gimple_body_p ())
return true;
@@ -1439,8 +1471,6 @@ sem_function::init (void)
hstate.add_flag (cnode->thunk.add_pointer_bounds_args);
gcode_hash = hstate.end ();
}
-
- parse_tree_args ();
}
/* Accumulate to HSTATE a hash of expression EXP.
@@ -1691,37 +1721,6 @@ sem_function::parse (cgraph_node *node, bitmap_obstack *stack)
return f;
}
-/* Parses function arguments and result type. */
-
-void
-sem_function::parse_tree_args (void)
-{
- tree result;
-
- if (arg_types.exists ())
- arg_types.release ();
-
- arg_types.create (4);
- tree fnargs = DECL_ARGUMENTS (decl);
-
- for (tree parm = fnargs; parm; parm = DECL_CHAIN (parm))
- arg_types.safe_push (DECL_ARG_TYPE (parm));
-
- /* Function result type. */
- result = DECL_RESULT (decl);
- result_type = result ? TREE_TYPE (result) : NULL;
-
- /* During WPA, we can get arguments by following method. */
- if (!fnargs)
- {
- tree type = TYPE_ARG_TYPES (TREE_TYPE (decl));
- for (tree parm = type; parm; parm = TREE_CHAIN (parm))
- arg_types.safe_push (TYPE_CANONICAL (TREE_VALUE (parm)));
-
- result_type = TREE_TYPE (TREE_TYPE (decl));
- }
-}
-
/* For given basic blocks BB1 and BB2 (from functions FUNC1 and FUNC),
return true if phi nodes are semantically equivalent in these blocks . */
diff --git a/gcc/ipa-icf.h b/gcc/ipa-icf.h
index a3b9ab93678..ee35ee27ac2 100644
--- a/gcc/ipa-icf.h
+++ b/gcc/ipa-icf.h
@@ -292,7 +292,6 @@ public:
inline virtual void init_wpa (void)
{
- parse_tree_args ();
}
virtual void init (void);
@@ -310,9 +309,6 @@ public:
dump_function_to_file (decl, file, TDF_DETAILS);
}
- /* Parses function arguments and result type. */
- void parse_tree_args (void);
-
/* Returns cgraph_node. */
inline cgraph_node *get_node (void)
{
@@ -329,15 +325,13 @@ public:
semantic function item. */
static sem_function *parse (cgraph_node *node, bitmap_obstack *stack);
+ /* Perform additional checks needed to match types of used function
+ paramters. */
+ bool compatible_parm_types_p (tree, tree);
+
/* Exception handling region tree. */
eh_region region_tree;
- /* Result type tree node. */
- tree result_type;
-
- /* Array of argument tree types. */
- vec <tree> arg_types;
-
/* Number of function arguments. */
unsigned int arg_count;
diff --git a/gcc/ipa-polymorphic-call.c b/gcc/ipa-polymorphic-call.c
index 0036565a844..ac8c78ea8f1 100644
--- a/gcc/ipa-polymorphic-call.c
+++ b/gcc/ipa-polymorphic-call.c
@@ -1574,13 +1574,15 @@ ipa_polymorphic_call_context::get_dynamic_type (tree instance,
tree base_ref = get_ref_base_and_extent
(ref_exp, &offset2, &size, &max_size);
- /* Finally verify that what we found looks like read from OTR_OBJECT
- or from INSTANCE with offset OFFSET. */
+ /* Finally verify that what we found looks like read from
+ OTR_OBJECT or from INSTANCE with offset OFFSET. */
if (base_ref
&& ((TREE_CODE (base_ref) == MEM_REF
&& ((offset2 == instance_offset
&& TREE_OPERAND (base_ref, 0) == instance)
- || (!offset2 && TREE_OPERAND (base_ref, 0) == otr_object)))
+ || (!offset2
+ && TREE_OPERAND (base_ref, 0)
+ == otr_object)))
|| (DECL_P (instance) && base_ref == instance
&& offset2 == instance_offset)))
{
@@ -1608,9 +1610,17 @@ ipa_polymorphic_call_context::get_dynamic_type (tree instance,
/* We look for vtbl pointer read. */
ao.size = POINTER_SIZE;
ao.max_size = ao.size;
+ /* We are looking for stores to vptr pointer within the instance of
+ outer type.
+ TODO: The vptr pointer type is globally known, we probably should
+ keep it and do that even when otr_type is unknown. */
if (otr_type)
- ao.ref_alias_set
- = get_deref_alias_set (TREE_TYPE (BINFO_VTABLE (TYPE_BINFO (otr_type))));
+ {
+ ao.base_alias_set
+ = get_alias_set (outer_type ? outer_type : otr_type);
+ ao.ref_alias_set
+ = get_alias_set (TREE_TYPE (BINFO_VTABLE (TYPE_BINFO (otr_type))));
+ }
if (dump_file)
{
diff --git a/gcc/loop-iv.c b/gcc/loop-iv.c
index 141b43672ce..c0d6a1d3f3f 100644
--- a/gcc/loop-iv.c
+++ b/gcc/loop-iv.c
@@ -664,7 +664,7 @@ get_biv_step_1 (df_ref def, rtx reg,
rtx *outer_step)
{
rtx set, rhs, op0 = NULL_RTX, op1 = NULL_RTX;
- rtx next, nextr, tmp;
+ rtx next, nextr;
enum rtx_code code;
rtx_insn *insn = DF_REF_INSN (def);
df_ref next_def;
@@ -694,9 +694,7 @@ get_biv_step_1 (df_ref def, rtx reg,
op1 = XEXP (rhs, 1);
if (code == PLUS && CONSTANT_P (op0))
- {
- tmp = op0; op0 = op1; op1 = tmp;
- }
+ std::swap (op0, op1);
if (!simple_reg_p (op0)
|| !CONSTANT_P (op1))
@@ -2347,7 +2345,7 @@ iv_number_of_iterations (struct loop *loop, rtx_insn *insn, rtx condition,
struct niter_desc *desc)
{
rtx op0, op1, delta, step, bound, may_xform, tmp, tmp0, tmp1;
- struct rtx_iv iv0, iv1, tmp_iv;
+ struct rtx_iv iv0, iv1;
rtx assumption, may_not_xform;
enum rtx_code cond;
machine_mode mode, comp_mode;
@@ -2410,7 +2408,7 @@ iv_number_of_iterations (struct loop *loop, rtx_insn *insn, rtx condition,
case GT:
case GEU:
case GTU:
- tmp_iv = iv0; iv0 = iv1; iv1 = tmp_iv;
+ std::swap (iv0, iv1);
cond = swap_condition (cond);
break;
case NE:
diff --git a/gcc/lra-lives.c b/gcc/lra-lives.c
index 8bf217f6865..16e1f386b3e 100644
--- a/gcc/lra-lives.c
+++ b/gcc/lra-lives.c
@@ -572,16 +572,10 @@ lra_setup_reload_pseudo_preferenced_hard_reg (int regno,
&& (lra_reg_info[regno].preferred_hard_regno_profit2
> lra_reg_info[regno].preferred_hard_regno_profit1))
{
- int temp;
-
- temp = lra_reg_info[regno].preferred_hard_regno1;
- lra_reg_info[regno].preferred_hard_regno1
- = lra_reg_info[regno].preferred_hard_regno2;
- lra_reg_info[regno].preferred_hard_regno2 = temp;
- temp = lra_reg_info[regno].preferred_hard_regno_profit1;
- lra_reg_info[regno].preferred_hard_regno_profit1
- = lra_reg_info[regno].preferred_hard_regno_profit2;
- lra_reg_info[regno].preferred_hard_regno_profit2 = temp;
+ std::swap (lra_reg_info[regno].preferred_hard_regno1,
+ lra_reg_info[regno].preferred_hard_regno2);
+ std::swap (lra_reg_info[regno].preferred_hard_regno_profit1,
+ lra_reg_info[regno].preferred_hard_regno_profit2);
}
if (lra_dump_file != NULL)
{
diff --git a/gcc/lra.c b/gcc/lra.c
index 15e65f7b02f..5d1f429658f 100644
--- a/gcc/lra.c
+++ b/gcc/lra.c
@@ -1377,11 +1377,8 @@ lra_create_copy (int regno1, int regno2, int freq)
regno1_dest_p = true;
if (regno1 > regno2)
{
- int temp = regno2;
-
+ std::swap (regno1, regno2);
regno1_dest_p = false;
- regno2 = regno1;
- regno1 = temp;
}
cp = new lra_copy ();
copy_vec.safe_push (cp);
diff --git a/gcc/lto-streamer-out.c b/gcc/lto-streamer-out.c
index 4671702d249..363da4c02d7 100644
--- a/gcc/lto-streamer-out.c
+++ b/gcc/lto-streamer-out.c
@@ -608,11 +608,8 @@ DFS::DFS (struct output_block *ob, tree expr, bool ref_p, bool this_ref_p,
}
}
for (unsigned i = 0; i < scc_entry_len; ++i)
- {
- scc_entry tem = sccstack[first + i];
- sccstack[first + i] = sccstack[first + entry_start + i];
- sccstack[first + entry_start + i] = tem;
- }
+ std::swap (sccstack[first + i],
+ sccstack[first + entry_start + i]);
if (scc_entry_len == 1)
; /* We already sorted SCC deterministically in hash_scc. */
diff --git a/gcc/modulo-sched.c b/gcc/modulo-sched.c
index db76095fb65..60b39f81d18 100644
--- a/gcc/modulo-sched.c
+++ b/gcc/modulo-sched.c
@@ -2006,9 +2006,7 @@ get_sched_window (partial_schedule_ptr ps, ddg_node_ptr u_node,
node close to its successors. */
if (pss_not_empty && count_succs >= count_preds)
{
- int tmp = end;
- end = start;
- start = tmp;
+ std::swap (start, end);
step = -1;
}
diff --git a/gcc/omega.c b/gcc/omega.c
index 4654df8cccd..829520a338a 100644
--- a/gcc/omega.c
+++ b/gcc/omega.c
@@ -547,11 +547,7 @@ omega_pretty_print_problem (FILE *file, omega_pb pb)
else
{
if (pb->geqs[e].coef[v1] == 1)
- {
- v3 = v2;
- v2 = v1;
- v1 = v3;
- }
+ std::swap (v1, v2);
/* Relation is v1 <= v2 or v1 < v2. */
po[v1][v2] = ((pb->geqs[e].coef[0] == 0) ? le : lt);
diff --git a/gcc/optabs.c b/gcc/optabs.c
index 94739a945bc..491341b8887 100644
--- a/gcc/optabs.c
+++ b/gcc/optabs.c
@@ -4383,8 +4383,7 @@ prepare_float_lib_cmp (rtx x, rtx y, enum rtx_code comparison,
if (code_to_optab (swapped)
&& (libfunc = optab_libfunc (code_to_optab (swapped), mode)))
{
- rtx tmp;
- tmp = x; x = y; y = tmp;
+ std::swap (x, y);
comparison = swapped;
break;
}
@@ -6967,11 +6966,7 @@ expand_mult_highpart (machine_mode mode, rtx op0, rtx op1,
tab1 = uns_p ? vec_widen_umult_lo_optab : vec_widen_smult_lo_optab;
tab2 = uns_p ? vec_widen_umult_hi_optab : vec_widen_smult_hi_optab;
if (BYTES_BIG_ENDIAN)
- {
- optab t = tab1;
- tab1 = tab2;
- tab2 = t;
- }
+ std::swap (tab1, tab2);
break;
default:
gcc_unreachable ();
diff --git a/gcc/reload1.c b/gcc/reload1.c
index a86084a8e29..57837a1fc90 100644
--- a/gcc/reload1.c
+++ b/gcc/reload1.c
@@ -5612,11 +5612,7 @@ reloads_unique_chain_p (int r1, int r2)
/* The following loop assumes that r1 is the reload that feeds r2. */
if (r1 > r2)
- {
- int tmp = r2;
- r2 = r1;
- r1 = tmp;
- }
+ std::swap (r1, r2);
for (i = 0; i < n_reloads; i ++)
/* Look for input reloads that aren't our two */
diff --git a/gcc/sel-sched-ir.c b/gcc/sel-sched-ir.c
index bb55e3ddfa8..f9b7655af12 100644
--- a/gcc/sel-sched-ir.c
+++ b/gcc/sel-sched-ir.c
@@ -4414,51 +4414,17 @@ free_data_sets (basic_block bb)
free_av_set (bb);
}
-/* Exchange lv sets of TO and FROM. */
-static void
-exchange_lv_sets (basic_block to, basic_block from)
-{
- {
- regset to_lv_set = BB_LV_SET (to);
-
- BB_LV_SET (to) = BB_LV_SET (from);
- BB_LV_SET (from) = to_lv_set;
- }
-
- {
- bool to_lv_set_valid_p = BB_LV_SET_VALID_P (to);
-
- BB_LV_SET_VALID_P (to) = BB_LV_SET_VALID_P (from);
- BB_LV_SET_VALID_P (from) = to_lv_set_valid_p;
- }
-}
-
-
-/* Exchange av sets of TO and FROM. */
-static void
-exchange_av_sets (basic_block to, basic_block from)
-{
- {
- av_set_t to_av_set = BB_AV_SET (to);
-
- BB_AV_SET (to) = BB_AV_SET (from);
- BB_AV_SET (from) = to_av_set;
- }
-
- {
- int to_av_level = BB_AV_LEVEL (to);
-
- BB_AV_LEVEL (to) = BB_AV_LEVEL (from);
- BB_AV_LEVEL (from) = to_av_level;
- }
-}
-
/* Exchange data sets of TO and FROM. */
void
exchange_data_sets (basic_block to, basic_block from)
{
- exchange_lv_sets (to, from);
- exchange_av_sets (to, from);
+ /* Exchange lv sets of TO and FROM. */
+ std::swap (BB_LV_SET (from), BB_LV_SET (to));
+ std::swap (BB_LV_SET_VALID_P (from), BB_LV_SET_VALID_P (to));
+
+ /* Exchange av sets of TO and FROM. */
+ std::swap (BB_AV_SET (from), BB_AV_SET (to));
+ std::swap (BB_AV_LEVEL (from), BB_AV_LEVEL (to));
}
/* Copy data sets of FROM to TO. */
diff --git a/gcc/simplify-rtx.c b/gcc/simplify-rtx.c
index a5731e83595..521fecf566e 100644
--- a/gcc/simplify-rtx.c
+++ b/gcc/simplify-rtx.c
@@ -944,10 +944,7 @@ simplify_unary_operation_1 (enum rtx_code code, machine_mode mode, rtx op)
in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
- {
- rtx tem = in2;
- in2 = in1; in1 = tem;
- }
+ std::swap (in1, in2);
return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
mode, in1, in2);
diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog
index 1ca4b686ddc..666acc1b2d4 100644
--- a/gcc/testsuite/ChangeLog
+++ b/gcc/testsuite/ChangeLog
@@ -1,3 +1,131 @@
+2015-06-22 Christian Bruel <christian.bruel@st.com>
+
+ PR target/52144
+ * gcc.target/arm/flip-thumb.c: New test.
+
+2015-06-22 Jan Hubicka <hubicka@ucw.cz>
+ Martin Liska <mliska@suse.cz>
+
+ PR ipa/65908
+ * g++.dg/ipa/pr65908.C: New testcase.
+
+2015-06-20 Mikhail Maltsev <maltsevm@gmail.com>
+
+ PR c++/65882
+ * g++.dg/diagnostic/inhibit-warn-1.C: New test.
+ * g++.dg/diagnostic/inhibit-warn-2.C: New test.
+
+2015-06-19 Eric Botcazou <ebotcazou@adacore.com>
+
+ * gnat.dg/specs/debug1.ads: Adjust.
+
+2015-06-19 Jim Wilson <jim.wilson@linaro.org>
+
+ * gcc.target/aarch64/fmovd-zero-mem.c: New.
+ * gcc.target/aarch64/fmovd-zero-reg.c: New.
+ * gcc.target/aarch64/fmovf-zero-mem.c: New.
+ * gcc.target/aarch64/fmovf-zero-reg.c: New.
+ * gcc.target/aarch64/fmovld-zero-mem.c: New.
+ * gcc.target/aarch64/fmovld-zero-mem.c: New.
+ * gcc.target/aarch64/fmovd-zero.c: Delete.
+ * gcc.target/aarch64/fmovf-zero.c: Delete.
+
+2015-06-19 James Greenhalgh <james.greenhalgh@arm.com>
+
+ * gcc.target/vax/bswapdi-1.c: New.
+
+2015-06-19 Christophe Lyon <christophe.lyon@linaro.org>
+
+ * gcc.target/aarch64/advsimd-intrinsics/vtst.c: New file.
+
+2015-06-19 Christophe Lyon <christophe.lyon@linaro.org>
+
+ * gcc.target/aarch64/advsimd-intrinsics/vtbX.c: New file.
+
+2015-06-19 Christophe Lyon <christophe.lyon@linaro.org>
+
+ * gcc.target/aarch64/advsimd-intrinsics/vstX_lane.c: New file.
+
+2015-06-19 Christophe Lyon <christophe.lyon@linaro.org>
+
+ * gcc.target/aarch64/advsimd-intrinsics/vst1_lane.c: New file.
+
+2015-06-19 Christophe Lyon <christophe.lyon@linaro.org>
+
+ * gcc.target/aarch64/advsimd-intrinsics/vsra_n.c: New file.
+
+2015-06-19 Christophe Lyon <christophe.lyon@linaro.org>
+
+ * gcc.target/aarch64/advsimd-intrinsics/vshrn_n.c: New file.
+
+2015-06-19 Christophe Lyon <christophe.lyon@linaro.org>
+
+ * gcc.target/aarch64/advsimd-intrinsics/vshl_n.c: New file.
+
+2015-06-19 Christophe Lyon <christophe.lyon@linaro.org>
+
+ * gcc.target/aarch64/advsimd-intrinsics/vshll_n.c: New file.
+
+2015-06-19 Christophe Lyon <christophe.lyon@linaro.org>
+
+ * gcc.target/aarch64/advsimd-intrinsics/vset_lane.c: New file.
+
+2015-06-19 Christophe Lyon <christophe.lyon@linaro.org>
+
+ * gcc.target/aarch64/advsimd-intrinsics/vrsra_n.c: New file.
+
+2015-06-19 Christophe Lyon <christophe.lyon@linaro.org>
+
+ * gcc.target/aarch64/advsimd-intrinsics/vrsqrts.c: New file.
+
+2015-06-19 Christophe Lyon <christophe.lyon@linaro.org>
+
+ * gcc.target/aarch64/advsimd-intrinsics/vrsqrte.c: New file.
+
+2015-06-19 Christophe Lyon <christophe.lyon@linaro.org>
+
+ * gcc.target/aarch64/advsimd-intrinsics/vrshrn_n.c: New file.
+
+2015-06-19 Christophe Lyon <christophe.lyon@linaro.org>
+
+ * gcc.target/aarch64/advsimd-intrinsics/vrshr_n.c: New file.
+
+2015-06-19 Christophe Lyon <christophe.lyon@linaro.org>
+
+ * gcc.target/aarch64/advsimd-intrinsics/vshr_n.c: New file.
+
+2015-06-19 Christophe Lyon <christophe.lyon@linaro.org>
+
+ * gcc.target/aarch64/advsimd-intrinsics/vrshl.c: New file.
+
+2015-06-19 Christophe Lyon <christophe.lyon@linaro.org>
+
+ * gcc.target/aarch64/advsimd-intrinsics/vrev.c: New file.
+
+2015-06-19 Christophe Lyon <christophe.lyon@linaro.org>
+
+ * gcc.target/aarch64/advsimd-intrinsics/vreinterpret.c: New file.
+
+2015-06-19 Christophe Lyon <christophe.lyon@linaro.org>
+
+ * gcc.target/aarch64/advsimd-intrinsics/vrecps.c: New file.
+
+2015-06-19 Christophe Lyon <christophe.lyon@linaro.org>
+
+ * gcc.target/aarch64/advsimd-intrinsics/arm-neon-ref.h
+ (_ARM_FPSCR): Add FZ field.
+ (clean_results): Force FZ=1 on AArch64.
+ * gcc.target/aarch64/advsimd-intrinsics/vrecpe.c: New file.
+
+2015-06-19 Christophe Lyon <christophe.lyon@linaro.org>
+
+ * gcc.target/aarch64/pr62308.c: New test.
+
+2015-06-19 Mikael Morin <mikael@gcc.gnu.org>
+
+ PR fortran/66549
+ * gfortran.dg/gomp/omp_parallel_1.f90: New file.
+
2015-06-19 Ilya Enkovich <enkovich.gnu@gmail.com>
* gcc.target/i386/mpx/pr66581.c: New test.
diff --git a/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-nsdmi8.C b/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-nsdmi8.C
new file mode 100644
index 00000000000..8c0adfad45f
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-nsdmi8.C
@@ -0,0 +1,15 @@
+// PR c++/66585
+// { dg-do compile { target c++11 } }
+
+class A {
+ template <typename, typename> using _Requires = int;
+
+public:
+ template <typename _Functor, typename = _Requires<_Functor, void>>
+ A(_Functor);
+};
+template <class T> class B {
+ A f = [](T) {};
+};
+
+B<int> a;
diff --git a/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-rep1.C b/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-rep1.C
new file mode 100644
index 00000000000..a35060b0a52
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-rep1.C
@@ -0,0 +1,14 @@
+// PR c++/65843
+// { dg-do compile { target c++11 } }
+
+template<class T>
+void test(T b)
+{
+ const int a = b;
+ [&] () { return a, a; }();
+}
+
+int main() {
+ test(1);
+ return 0;
+}
diff --git a/gcc/testsuite/g++.dg/cpp1y/constexpr-empty1.C b/gcc/testsuite/g++.dg/cpp1y/constexpr-empty1.C
new file mode 100644
index 00000000000..5be44eabdd0
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp1y/constexpr-empty1.C
@@ -0,0 +1,6 @@
+// PR c++/65973
+// { dg-do compile { target c++14 } }
+
+class foo {
+ constexpr foo() noexcept { __func__; };
+};
diff --git a/gcc/testsuite/g++.dg/cpp1y/var-templ31.C b/gcc/testsuite/g++.dg/cpp1y/var-templ31.C
new file mode 100644
index 00000000000..e2bc59bd489
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp1y/var-templ31.C
@@ -0,0 +1,8 @@
+// PR c++/66061
+// { dg-do compile { target c++14 } }
+
+template<int...>
+int x = 1;
+
+template<int n, int... m>
+int x<n, m...> = 1;
diff --git a/gcc/testsuite/g++.dg/diagnostic/inhibit-warn-1.C b/gcc/testsuite/g++.dg/diagnostic/inhibit-warn-1.C
new file mode 100644
index 00000000000..5655eb44fd3
--- /dev/null
+++ b/gcc/testsuite/g++.dg/diagnostic/inhibit-warn-1.C
@@ -0,0 +1,32 @@
+// PR c++/65882
+// { dg-do compile { target c++11 } }
+// { dg-options "-Wbool-compare" }
+
+// Check that we don't ICE because of reentering error reporting routines while
+// evaluating template parameters
+
+template<typename>
+struct type_function {
+ static constexpr bool value = false;
+};
+
+template<bool>
+struct dependent_type {
+ typedef int type;
+};
+
+template<typename T>
+typename dependent_type<(5 > type_function<T>::value)>::type
+bar();
+
+template<typename T>
+typename dependent_type<(5 > type_function<T>::value)>::type
+foo()
+{
+ return bar<int>();
+}
+
+int main()
+{
+ foo<int>();
+}
diff --git a/gcc/testsuite/g++.dg/diagnostic/inhibit-warn-2.C b/gcc/testsuite/g++.dg/diagnostic/inhibit-warn-2.C
new file mode 100644
index 00000000000..cb16b4cef8d
--- /dev/null
+++ b/gcc/testsuite/g++.dg/diagnostic/inhibit-warn-2.C
@@ -0,0 +1,36 @@
+// PR c++/65882
+// PR c++/66467
+// { dg-do compile }
+
+template <bool>
+struct A
+{
+ typedef int type;
+};
+
+struct B
+{
+ static const int value = 0;
+};
+
+template <class>
+struct C
+{
+ typedef int type;
+};
+
+template <class>
+struct F : B {};
+
+class D
+{
+ template <class Expr>
+ typename A<F<typename C<Expr>::type>::value || B::value>::type
+ operator=(Expr); // { dg-message "declared" }
+};
+
+void fn1()
+{
+ D opt;
+ opt = 0; // { dg-error "private" }
+}
diff --git a/gcc/testsuite/g++.dg/ipa/pr65908.C b/gcc/testsuite/g++.dg/ipa/pr65908.C
new file mode 100644
index 00000000000..38730bd0b2d
--- /dev/null
+++ b/gcc/testsuite/g++.dg/ipa/pr65908.C
@@ -0,0 +1,27 @@
+// PR ipa/65908
+// { dg-do compile }
+// { dg-options "-O2" }
+// { dg-additional-options "-fPIC" { target fpic } }
+
+class A
+{
+ A (A &);
+};
+class B
+{
+ const A &m_fn1 () const;
+};
+class C
+{
+ A m_fn2 () const;
+};
+A
+C::m_fn2 () const
+{
+ throw 0;
+}
+const A &
+B::m_fn1 () const
+{
+ throw 0;
+}
diff --git a/gcc/testsuite/g++.dg/overload/pmf3.C b/gcc/testsuite/g++.dg/overload/pmf3.C
new file mode 100644
index 00000000000..a71f554441b
--- /dev/null
+++ b/gcc/testsuite/g++.dg/overload/pmf3.C
@@ -0,0 +1,70 @@
+// PR c++/65880
+
+class Test
+{
+ public:
+ Test();
+ ~Test();
+
+ bool barl(void);
+
+ private:
+ bool fool(bool (Test::* const *fms)(void));
+ bool foo(void);
+ bool bar(void);
+};
+
+Test::Test()
+{
+}
+
+Test::~Test()
+{
+}
+
+bool Test::fool(bool (Test::* const *fms)(void))
+{
+ bool retval = false;
+
+ int i = 0;
+ bool (Test::*f)(void) = fms[i++];
+
+ while (f) {
+ retval = (this->*f)();
+ if (retval) break;
+ f = fms[i++];
+ }
+
+ return retval;
+}
+
+
+bool Test::barl(void)
+{
+ static bool (Test::* const fms[])(void) = {
+ &Test::foo,
+ &Test::bar,
+ 0
+ };
+
+
+
+ return fool(fms);
+}
+
+
+bool Test::foo(void)
+{
+ return false;
+}
+
+bool Test::bar(void)
+{
+ return true;
+}
+
+int main(int argc, const char *argv[])
+{
+ Test t;
+ return t.barl();
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/arm-neon-ref.h b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/arm-neon-ref.h
index 1742e996418..4e728d5572c 100644
--- a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/arm-neon-ref.h
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/arm-neon-ref.h
@@ -235,7 +235,8 @@ extern ARRAY(expected, hfloat, 64, 2);
typedef union {
struct {
- int _xxx:25;
+ int _xxx:24;
+ unsigned int FZ:1;
unsigned int DN:1;
unsigned int AHP:1;
unsigned int QC:1;
@@ -258,7 +259,8 @@ typedef union {
unsigned int QC:1;
unsigned int AHP:1;
unsigned int DN:1;
- int _dnm:25;
+ unsigned int FZ:1;
+ int _dnm:24;
} b;
unsigned int word;
} _ARM_FPSCR;
@@ -395,10 +397,15 @@ static void clean_results (void)
#if defined(__aarch64__)
/* On AArch64, make sure to return DefaultNaN to have the same
results as on AArch32. */
- _ARM_FPSCR _afpscr_for_dn;
- asm volatile ("mrs %0,fpcr" : "=r" (_afpscr_for_dn));
- _afpscr_for_dn.b.DN = 1;
- asm volatile ("msr fpcr,%0" : : "r" (_afpscr_for_dn));
+ _ARM_FPSCR _afpscr;
+ asm volatile ("mrs %0,fpcr" : "=r" (_afpscr));
+ _afpscr.b.DN = 1;
+
+ /* On AArch64, make sure to flush to zero by default, as on
+ AArch32. */
+ _afpscr.b.FZ = 1;
+
+ asm volatile ("msr fpcr,%0" : : "r" (_afpscr));
#endif
}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vrecpe.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vrecpe.c
new file mode 100644
index 00000000000..55b45b7a3b9
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vrecpe.c
@@ -0,0 +1,154 @@
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+#include <math.h>
+
+/* Expected results with positive input. */
+VECT_VAR_DECL(expected_positive,uint,32,2) [] = { 0xffffffff, 0xffffffff };
+VECT_VAR_DECL(expected_positive,uint,32,4) [] = { 0xbf000000, 0xbf000000,
+ 0xbf000000, 0xbf000000 };
+VECT_VAR_DECL(expected_positive,hfloat,32,2) [] = { 0x3f068000, 0x3f068000 };
+VECT_VAR_DECL(expected_positive,hfloat,32,4) [] = { 0x3c030000, 0x3c030000,
+ 0x3c030000, 0x3c030000 };
+
+/* Expected results with negative input. */
+VECT_VAR_DECL(expected_negative,uint,32,2) [] = { 0x80000000, 0x80000000 };
+VECT_VAR_DECL(expected_negative,uint,32,4) [] = { 0xee800000, 0xee800000,
+ 0xee800000, 0xee800000 };
+VECT_VAR_DECL(expected_negative,hfloat,32,2) [] = { 0xbdcc8000, 0xbdcc8000 };
+VECT_VAR_DECL(expected_negative,hfloat,32,4) [] = { 0xbc030000, 0xbc030000,
+ 0xbc030000, 0xbc030000 };
+
+/* Expected results with FP special values (NaN, infinity). */
+VECT_VAR_DECL(expected_fp1,hfloat,32,2) [] = { 0x7fc00000, 0x7fc00000 };
+VECT_VAR_DECL(expected_fp1,hfloat,32,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+
+/* Expected results with FP special values (zero, large value). */
+VECT_VAR_DECL(expected_fp2,hfloat,32,2) [] = { 0x7f800000, 0x7f800000 };
+VECT_VAR_DECL(expected_fp2,hfloat,32,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+
+/* Expected results with FP special values (-0, -infinity). */
+VECT_VAR_DECL(expected_fp3,hfloat,32,2) [] = { 0xff800000, 0xff800000 };
+VECT_VAR_DECL(expected_fp3,hfloat,32,4) [] = { 0x80000000, 0x80000000,
+ 0x80000000, 0x80000000 };
+
+/* Expected results with FP special large negative value. */
+VECT_VAR_DECL(expected_fp4,hfloat,32,2) [] = { 0x80000000, 0x80000000 };
+
+#define TEST_MSG "VRECPE/VRECPEQ"
+void exec_vrecpe(void)
+{
+ int i;
+
+ /* Basic test: y=vrecpe(x), then store the result. */
+#define TEST_VRECPE(Q, T1, T2, W, N) \
+ VECT_VAR(vector_res, T1, W, N) = \
+ vrecpe##Q##_##T2##W(VECT_VAR(vector, T1, W, N)); \
+ vst1##Q##_##T2##W(VECT_VAR(result, T1, W, N), \
+ VECT_VAR(vector_res, T1, W, N))
+
+ /* No need for 64 bits variants. */
+ DECL_VARIABLE(vector, uint, 32, 2);
+ DECL_VARIABLE(vector, uint, 32, 4);
+ DECL_VARIABLE(vector, float, 32, 2);
+ DECL_VARIABLE(vector, float, 32, 4);
+
+ DECL_VARIABLE(vector_res, uint, 32, 2);
+ DECL_VARIABLE(vector_res, uint, 32, 4);
+ DECL_VARIABLE(vector_res, float, 32, 2);
+ DECL_VARIABLE(vector_res, float, 32, 4);
+
+ clean_results ();
+
+ /* Choose init value arbitrarily, positive. */
+ VDUP(vector, , uint, u, 32, 2, 0x12345678);
+ VDUP(vector, , float, f, 32, 2, 1.9f);
+ VDUP(vector, q, uint, u, 32, 4, 0xABCDEF10);
+ VDUP(vector, q, float, f, 32, 4, 125.0f);
+
+ /* Apply the operator. */
+ TEST_VRECPE(, uint, u, 32, 2);
+ TEST_VRECPE(, float, f, 32, 2);
+ TEST_VRECPE(q, uint, u, 32, 4);
+ TEST_VRECPE(q, float, f, 32, 4);
+
+#define CMT " (positive input)"
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_positive, CMT);
+ CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected_positive, CMT);
+ CHECK_FP(TEST_MSG, float, 32, 2, PRIx32, expected_positive, CMT);
+ CHECK_FP(TEST_MSG, float, 32, 4, PRIx32, expected_positive, CMT);
+
+ /* Choose init value arbitrarily,negative. */
+ VDUP(vector, , uint, u, 32, 2, 0xFFFFFFFF);
+ VDUP(vector, , float, f, 32, 2, -10.0f);
+ VDUP(vector, q, uint, u, 32, 4, 0x89081234);
+ VDUP(vector, q, float, f, 32, 4, -125.0f);
+
+ /* Apply the operator. */
+ TEST_VRECPE(, uint, u, 32, 2);
+ TEST_VRECPE(, float, f, 32, 2);
+ TEST_VRECPE(q, uint, u, 32, 4);
+ TEST_VRECPE(q, float, f, 32, 4);
+
+#undef CMT
+#define CMT " (negative input)"
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_negative, CMT);
+ CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected_negative, CMT);
+ CHECK_FP(TEST_MSG, float, 32, 2, PRIx32, expected_negative, CMT);
+ CHECK_FP(TEST_MSG, float, 32, 4, PRIx32, expected_negative, CMT);
+
+ /* Test FP variants with special input values (NaN, infinity). */
+ VDUP(vector, , float, f, 32, 2, NAN);
+ VDUP(vector, q, float, f, 32, 4, HUGE_VALF);
+
+ /* Apply the operator. */
+ TEST_VRECPE(, float, f, 32, 2);
+ TEST_VRECPE(q, float, f, 32, 4);
+
+#undef CMT
+#define CMT " FP special (NaN, infinity)"
+ CHECK_FP(TEST_MSG, float, 32, 2, PRIx32, expected_fp1, CMT);
+ CHECK_FP(TEST_MSG, float, 32, 4, PRIx32, expected_fp1, CMT);
+
+ /* Test FP variants with special input values (zero, large value). */
+ VDUP(vector, , float, f, 32, 2, 0.0f);
+ VDUP(vector, q, float, f, 32, 4, 8.97229e37f /*9.0e37f*/);
+
+ /* Apply the operator. */
+ TEST_VRECPE(, float, f, 32, 2);
+ TEST_VRECPE(q, float, f, 32, 4);
+
+#undef CMT
+#define CMT " FP special (zero, large value)"
+ CHECK_FP(TEST_MSG, float, 32, 2, PRIx32, expected_fp2, CMT);
+ CHECK_FP(TEST_MSG, float, 32, 4, PRIx32, expected_fp2, CMT);
+
+ /* Test FP variants with special input values (-0, -infinity). */
+ VDUP(vector, , float, f, 32, 2, -0.0f);
+ VDUP(vector, q, float, f, 32, 4, -HUGE_VALF);
+
+ /* Apply the operator. */
+ TEST_VRECPE(, float, f, 32, 2);
+ TEST_VRECPE(q, float, f, 32, 4);
+
+#undef CMT
+#define CMT " FP special (-0, -infinity)"
+ CHECK_FP(TEST_MSG, float, 32, 2, PRIx32, expected_fp3, CMT);
+ CHECK_FP(TEST_MSG, float, 32, 4, PRIx32, expected_fp3, CMT);
+
+ /* Test FP variants with special input values (large negative value). */
+ VDUP(vector, , float, f, 32, 2, -9.0e37f);
+
+ /* Apply the operator. */
+ TEST_VRECPE(, float, f, 32, 2);
+
+#undef CMT
+#define CMT " FP special (large negative value)"
+ CHECK_FP(TEST_MSG, float, 32, 2, PRIx32, expected_fp4, CMT);
+}
+
+int main (void)
+{
+ exec_vrecpe ();
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vrecps.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vrecps.c
new file mode 100644
index 00000000000..0e41947f6d5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vrecps.c
@@ -0,0 +1,117 @@
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+#include <math.h>
+
+/* Expected results with positive input. */
+VECT_VAR_DECL(expected,hfloat,32,2) [] = { 0xc2e19eb7, 0xc2e19eb7 };
+VECT_VAR_DECL(expected,hfloat,32,4) [] = { 0xc1db851f, 0xc1db851f,
+ 0xc1db851f, 0xc1db851f };
+
+/* Expected results with FP special values (NaN). */
+VECT_VAR_DECL(expected_fp1,hfloat,32,2) [] = { 0x7fc00000, 0x7fc00000 };
+VECT_VAR_DECL(expected_fp1,hfloat,32,4) [] = { 0x7fc00000, 0x7fc00000,
+ 0x7fc00000, 0x7fc00000 };
+
+/* Expected results with FP special values (infinity, 0) and normal
+ values. */
+VECT_VAR_DECL(expected_fp2,hfloat,32,2) [] = { 0xff800000, 0xff800000 };
+VECT_VAR_DECL(expected_fp2,hfloat,32,4) [] = { 0x40000000, 0x40000000,
+ 0x40000000, 0x40000000 };
+
+/* Expected results with FP special values (infinity, 0). */
+VECT_VAR_DECL(expected_fp3,hfloat,32,2) [] = { 0x40000000, 0x40000000 };
+VECT_VAR_DECL(expected_fp3,hfloat,32,4) [] = { 0x40000000, 0x40000000,
+ 0x40000000, 0x40000000 };
+
+#define TEST_MSG "VRECPS/VRECPSQ"
+void exec_vrecps(void)
+{
+ int i;
+
+ /* Basic test: y=vrecps(x), then store the result. */
+#define TEST_VRECPS(Q, T1, T2, W, N) \
+ VECT_VAR(vector_res, T1, W, N) = \
+ vrecps##Q##_##T2##W(VECT_VAR(vector, T1, W, N), \
+ VECT_VAR(vector2, T1, W, N)); \
+ vst1##Q##_##T2##W(VECT_VAR(result, T1, W, N), \
+ VECT_VAR(vector_res, T1, W, N))
+
+ /* No need for integer variants. */
+ DECL_VARIABLE(vector, float, 32, 2);
+ DECL_VARIABLE(vector, float, 32, 4);
+
+ DECL_VARIABLE(vector2, float, 32, 2);
+ DECL_VARIABLE(vector2, float, 32, 4);
+
+ DECL_VARIABLE(vector_res, float, 32, 2);
+ DECL_VARIABLE(vector_res, float, 32, 4);
+
+ clean_results ();
+
+ /* Choose init value arbitrarily. */
+ VDUP(vector, , float, f, 32, 2, 12.9f);
+ VDUP(vector, q, float, f, 32, 4, 9.2f);
+
+ VDUP(vector2, , float, f, 32, 2, 8.9f);
+ VDUP(vector2, q, float, f, 32, 4, 3.2f);
+
+ /* Apply the operator. */
+ TEST_VRECPS(, float, f, 32, 2);
+ TEST_VRECPS(q, float, f, 32, 4);
+
+#define CMT " (positive input)"
+ CHECK_FP(TEST_MSG, float, 32, 2, PRIx32, expected, CMT);
+ CHECK_FP(TEST_MSG, float, 32, 4, PRIx32, expected, CMT);
+
+
+ /* Test FP variants with special input values (NaN). */
+ VDUP(vector, , float, f, 32, 2, NAN);
+ VDUP(vector2, q, float, f, 32, 4, NAN);
+
+ /* Apply the operator. */
+ TEST_VRECPS(, float, f, 32, 2);
+ TEST_VRECPS(q, float, f, 32, 4);
+
+#undef CMT
+#define CMT " FP special (NaN)"
+ CHECK_FP(TEST_MSG, float, 32, 2, PRIx32, expected_fp1, CMT);
+ CHECK_FP(TEST_MSG, float, 32, 4, PRIx32, expected_fp1, CMT);
+
+
+ /* Test FP variants with special input values (infinity, 0). */
+ VDUP(vector, , float, f, 32, 2, HUGE_VALF);
+ VDUP(vector, q, float, f, 32, 4, 0.0f);
+ VDUP(vector2, q, float, f, 32, 4, 3.2f); /* Restore a normal value. */
+
+ /* Apply the operator. */
+ TEST_VRECPS(, float, f, 32, 2);
+ TEST_VRECPS(q, float, f, 32, 4);
+
+#undef CMT
+#define CMT " FP special (infinity, 0) and normal value"
+ CHECK_FP(TEST_MSG, float, 32, 2, PRIx32, expected_fp2, CMT);
+ CHECK_FP(TEST_MSG, float, 32, 4, PRIx32, expected_fp2, CMT);
+
+
+ /* Test FP variants with only special input values (infinity, 0). */
+ VDUP(vector, , float, f, 32, 2, HUGE_VALF);
+ VDUP(vector, q, float, f, 32, 4, 0.0f);
+ VDUP(vector2, , float, f, 32, 2, 0.0f);
+ VDUP(vector2, q, float, f, 32, 4, HUGE_VALF);
+
+ /* Apply the operator */
+ TEST_VRECPS(, float, f, 32, 2);
+ TEST_VRECPS(q, float, f, 32, 4);
+
+#undef CMT
+#define CMT " FP special (infinity, 0)"
+ CHECK_FP(TEST_MSG, float, 32, 2, PRIx32, expected_fp3, CMT);
+ CHECK_FP(TEST_MSG, float, 32, 4, PRIx32, expected_fp3, CMT);
+}
+
+int main (void)
+{
+ exec_vrecps ();
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vreinterpret.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vreinterpret.c
new file mode 100644
index 00000000000..9e45e25cc3a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vreinterpret.c
@@ -0,0 +1,741 @@
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+/* Expected results for vreinterpret_s8_xx. */
+VECT_VAR_DECL(expected_s8_1,int,8,8) [] = { 0xf0, 0xff, 0xf1, 0xff,
+ 0xf2, 0xff, 0xf3, 0xff };
+VECT_VAR_DECL(expected_s8_2,int,8,8) [] = { 0xf0, 0xff, 0xff, 0xff,
+ 0xf1, 0xff, 0xff, 0xff };
+VECT_VAR_DECL(expected_s8_3,int,8,8) [] = { 0xf0, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff };
+VECT_VAR_DECL(expected_s8_4,int,8,8) [] = { 0xf0, 0xf1, 0xf2, 0xf3,
+ 0xf4, 0xf5, 0xf6, 0xf7 };
+VECT_VAR_DECL(expected_s8_5,int,8,8) [] = { 0xf0, 0xff, 0xf1, 0xff,
+ 0xf2, 0xff, 0xf3, 0xff };
+VECT_VAR_DECL(expected_s8_6,int,8,8) [] = { 0xf0, 0xff, 0xff, 0xff,
+ 0xf1, 0xff, 0xff, 0xff };
+VECT_VAR_DECL(expected_s8_7,int,8,8) [] = { 0xf0, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff };
+VECT_VAR_DECL(expected_s8_8,int,8,8) [] = { 0xf0, 0xf1, 0xf2, 0xf3,
+ 0xf4, 0xf5, 0xf6, 0xf7 };
+VECT_VAR_DECL(expected_s8_9,int,8,8) [] = { 0xf0, 0xff, 0xf1, 0xff,
+ 0xf2, 0xff, 0xf3, 0xff };
+
+/* Expected results for vreinterpret_s16_xx. */
+VECT_VAR_DECL(expected_s16_1,int,16,4) [] = { 0xf1f0, 0xf3f2, 0xf5f4, 0xf7f6 };
+VECT_VAR_DECL(expected_s16_2,int,16,4) [] = { 0xfff0, 0xffff, 0xfff1, 0xffff };
+VECT_VAR_DECL(expected_s16_3,int,16,4) [] = { 0xfff0, 0xffff, 0xffff, 0xffff };
+VECT_VAR_DECL(expected_s16_4,int,16,4) [] = { 0xf1f0, 0xf3f2, 0xf5f4, 0xf7f6 };
+VECT_VAR_DECL(expected_s16_5,int,16,4) [] = { 0xfff0, 0xfff1, 0xfff2, 0xfff3 };
+VECT_VAR_DECL(expected_s16_6,int,16,4) [] = { 0xfff0, 0xffff, 0xfff1, 0xffff };
+VECT_VAR_DECL(expected_s16_7,int,16,4) [] = { 0xfff0, 0xffff, 0xffff, 0xffff };
+VECT_VAR_DECL(expected_s16_8,int,16,4) [] = { 0xf1f0, 0xf3f2, 0xf5f4, 0xf7f6 };
+VECT_VAR_DECL(expected_s16_9,int,16,4) [] = { 0xfff0, 0xfff1, 0xfff2, 0xfff3 };
+
+/* Expected results for vreinterpret_s32_xx. */
+VECT_VAR_DECL(expected_s32_1,int,32,2) [] = { 0xf3f2f1f0, 0xf7f6f5f4 };
+VECT_VAR_DECL(expected_s32_2,int,32,2) [] = { 0xfff1fff0, 0xfff3fff2 };
+VECT_VAR_DECL(expected_s32_3,int,32,2) [] = { 0xfffffff0, 0xffffffff };
+VECT_VAR_DECL(expected_s32_4,int,32,2) [] = { 0xf3f2f1f0, 0xf7f6f5f4 };
+VECT_VAR_DECL(expected_s32_5,int,32,2) [] = { 0xfff1fff0, 0xfff3fff2 };
+VECT_VAR_DECL(expected_s32_6,int,32,2) [] = { 0xfffffff0, 0xfffffff1 };
+VECT_VAR_DECL(expected_s32_7,int,32,2) [] = { 0xfffffff0, 0xffffffff };
+VECT_VAR_DECL(expected_s32_8,int,32,2) [] = { 0xf3f2f1f0, 0xf7f6f5f4 };
+VECT_VAR_DECL(expected_s32_9,int,32,2) [] = { 0xfff1fff0, 0xfff3fff2 };
+
+/* Expected results for vreinterpret_s64_xx. */
+VECT_VAR_DECL(expected_s64_1,int,64,1) [] = { 0xf7f6f5f4f3f2f1f0 };
+VECT_VAR_DECL(expected_s64_2,int,64,1) [] = { 0xfff3fff2fff1fff0 };
+VECT_VAR_DECL(expected_s64_3,int,64,1) [] = { 0xfffffff1fffffff0 };
+VECT_VAR_DECL(expected_s64_4,int,64,1) [] = { 0xf7f6f5f4f3f2f1f0 };
+VECT_VAR_DECL(expected_s64_5,int,64,1) [] = { 0xfff3fff2fff1fff0 };
+VECT_VAR_DECL(expected_s64_6,int,64,1) [] = { 0xfffffff1fffffff0 };
+VECT_VAR_DECL(expected_s64_7,int,64,1) [] = { 0xfffffffffffffff0 };
+VECT_VAR_DECL(expected_s64_8,int,64,1) [] = { 0xf7f6f5f4f3f2f1f0 };
+VECT_VAR_DECL(expected_s64_9,int,64,1) [] = { 0xfff3fff2fff1fff0 };
+
+/* Expected results for vreinterpret_u8_xx. */
+VECT_VAR_DECL(expected_u8_1,uint,8,8) [] = { 0xf0, 0xf1, 0xf2, 0xf3,
+ 0xf4, 0xf5, 0xf6, 0xf7 };
+VECT_VAR_DECL(expected_u8_2,uint,8,8) [] = { 0xf0, 0xff, 0xf1, 0xff,
+ 0xf2, 0xff, 0xf3, 0xff };
+VECT_VAR_DECL(expected_u8_3,uint,8,8) [] = { 0xf0, 0xff, 0xff, 0xff,
+ 0xf1, 0xff, 0xff, 0xff };
+VECT_VAR_DECL(expected_u8_4,uint,8,8) [] = { 0xf0, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff };
+VECT_VAR_DECL(expected_u8_5,uint,8,8) [] = { 0xf0, 0xff, 0xf1, 0xff,
+ 0xf2, 0xff, 0xf3, 0xff };
+VECT_VAR_DECL(expected_u8_6,uint,8,8) [] = { 0xf0, 0xff, 0xff, 0xff,
+ 0xf1, 0xff, 0xff, 0xff };
+VECT_VAR_DECL(expected_u8_7,uint,8,8) [] = { 0xf0, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff };
+VECT_VAR_DECL(expected_u8_8,uint,8,8) [] = { 0xf0, 0xf1, 0xf2, 0xf3,
+ 0xf4, 0xf5, 0xf6, 0xf7 };
+VECT_VAR_DECL(expected_u8_9,uint,8,8) [] = { 0xf0, 0xff, 0xf1, 0xff,
+ 0xf2, 0xff, 0xf3, 0xff };
+
+/* Expected results for vreinterpret_u16_xx. */
+VECT_VAR_DECL(expected_u16_1,uint,16,4) [] = { 0xf1f0, 0xf3f2, 0xf5f4, 0xf7f6 };
+VECT_VAR_DECL(expected_u16_2,uint,16,4) [] = { 0xfff0, 0xfff1, 0xfff2, 0xfff3 };
+VECT_VAR_DECL(expected_u16_3,uint,16,4) [] = { 0xfff0, 0xffff, 0xfff1, 0xffff };
+VECT_VAR_DECL(expected_u16_4,uint,16,4) [] = { 0xfff0, 0xffff, 0xffff, 0xffff };
+VECT_VAR_DECL(expected_u16_5,uint,16,4) [] = { 0xf1f0, 0xf3f2, 0xf5f4, 0xf7f6 };
+VECT_VAR_DECL(expected_u16_6,uint,16,4) [] = { 0xfff0, 0xffff, 0xfff1, 0xffff };
+VECT_VAR_DECL(expected_u16_7,uint,16,4) [] = { 0xfff0, 0xffff, 0xffff, 0xffff };
+VECT_VAR_DECL(expected_u16_8,uint,16,4) [] = { 0xf1f0, 0xf3f2, 0xf5f4, 0xf7f6 };
+VECT_VAR_DECL(expected_u16_9,uint,16,4) [] = { 0xfff0, 0xfff1, 0xfff2, 0xfff3 };
+
+/* Expected results for vreinterpret_u32_xx. */
+VECT_VAR_DECL(expected_u32_1,uint,32,2) [] = { 0xf3f2f1f0, 0xf7f6f5f4 };
+VECT_VAR_DECL(expected_u32_2,uint,32,2) [] = { 0xfff1fff0, 0xfff3fff2 };
+VECT_VAR_DECL(expected_u32_3,uint,32,2) [] = { 0xfffffff0, 0xfffffff1 };
+VECT_VAR_DECL(expected_u32_4,uint,32,2) [] = { 0xfffffff0, 0xffffffff };
+VECT_VAR_DECL(expected_u32_5,uint,32,2) [] = { 0xf3f2f1f0, 0xf7f6f5f4 };
+VECT_VAR_DECL(expected_u32_6,uint,32,2) [] = { 0xfff1fff0, 0xfff3fff2 };
+VECT_VAR_DECL(expected_u32_7,uint,32,2) [] = { 0xfffffff0, 0xffffffff };
+VECT_VAR_DECL(expected_u32_8,uint,32,2) [] = { 0xf3f2f1f0, 0xf7f6f5f4 };
+VECT_VAR_DECL(expected_u32_9,uint,32,2) [] = { 0xfff1fff0, 0xfff3fff2 };
+
+/* Expected results for vreinterpret_u64_xx. */
+VECT_VAR_DECL(expected_u64_1,uint,64,1) [] = { 0xf7f6f5f4f3f2f1f0 };
+VECT_VAR_DECL(expected_u64_2,uint,64,1) [] = { 0xfff3fff2fff1fff0 };
+VECT_VAR_DECL(expected_u64_3,uint,64,1) [] = { 0xfffffff1fffffff0 };
+VECT_VAR_DECL(expected_u64_4,uint,64,1) [] = { 0xfffffffffffffff0 };
+VECT_VAR_DECL(expected_u64_5,uint,64,1) [] = { 0xf7f6f5f4f3f2f1f0 };
+VECT_VAR_DECL(expected_u64_6,uint,64,1) [] = { 0xfff3fff2fff1fff0 };
+VECT_VAR_DECL(expected_u64_7,uint,64,1) [] = { 0xfffffff1fffffff0 };
+VECT_VAR_DECL(expected_u64_8,uint,64,1) [] = { 0xf7f6f5f4f3f2f1f0 };
+VECT_VAR_DECL(expected_u64_9,uint,64,1) [] = { 0xfff3fff2fff1fff0 };
+
+/* Expected results for vreinterpret_p8_xx. */
+VECT_VAR_DECL(expected_p8_1,poly,8,8) [] = { 0xf0, 0xf1, 0xf2, 0xf3,
+ 0xf4, 0xf5, 0xf6, 0xf7 };
+VECT_VAR_DECL(expected_p8_2,poly,8,8) [] = { 0xf0, 0xff, 0xf1, 0xff,
+ 0xf2, 0xff, 0xf3, 0xff };
+VECT_VAR_DECL(expected_p8_3,poly,8,8) [] = { 0xf0, 0xff, 0xff, 0xff,
+ 0xf1, 0xff, 0xff, 0xff };
+VECT_VAR_DECL(expected_p8_4,poly,8,8) [] = { 0xf0, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff };
+VECT_VAR_DECL(expected_p8_5,poly,8,8) [] = { 0xf0, 0xf1, 0xf2, 0xf3,
+ 0xf4, 0xf5, 0xf6, 0xf7 };
+VECT_VAR_DECL(expected_p8_6,poly,8,8) [] = { 0xf0, 0xff, 0xf1, 0xff,
+ 0xf2, 0xff, 0xf3, 0xff };
+VECT_VAR_DECL(expected_p8_7,poly,8,8) [] = { 0xf0, 0xff, 0xff, 0xff,
+ 0xf1, 0xff, 0xff, 0xff };
+VECT_VAR_DECL(expected_p8_8,poly,8,8) [] = { 0xf0, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff };
+VECT_VAR_DECL(expected_p8_9,poly,8,8) [] = { 0xf0, 0xff, 0xf1, 0xff,
+ 0xf2, 0xff, 0xf3, 0xff };
+
+/* Expected results for vreinterpret_p16_xx. */
+VECT_VAR_DECL(expected_p16_1,poly,16,4) [] = { 0xf1f0, 0xf3f2, 0xf5f4, 0xf7f6 };
+VECT_VAR_DECL(expected_p16_2,poly,16,4) [] = { 0xfff0, 0xfff1, 0xfff2, 0xfff3 };
+VECT_VAR_DECL(expected_p16_3,poly,16,4) [] = { 0xfff0, 0xffff, 0xfff1, 0xffff };
+VECT_VAR_DECL(expected_p16_4,poly,16,4) [] = { 0xfff0, 0xffff, 0xffff, 0xffff };
+VECT_VAR_DECL(expected_p16_5,poly,16,4) [] = { 0xf1f0, 0xf3f2, 0xf5f4, 0xf7f6 };
+VECT_VAR_DECL(expected_p16_6,poly,16,4) [] = { 0xfff0, 0xfff1, 0xfff2, 0xfff3 };
+VECT_VAR_DECL(expected_p16_7,poly,16,4) [] = { 0xfff0, 0xffff, 0xfff1, 0xffff };
+VECT_VAR_DECL(expected_p16_8,poly,16,4) [] = { 0xfff0, 0xffff, 0xffff, 0xffff };
+VECT_VAR_DECL(expected_p16_9,poly,16,4) [] = { 0xf1f0, 0xf3f2, 0xf5f4, 0xf7f6 };
+
+/* Expected results for vreinterpretq_s8_xx. */
+VECT_VAR_DECL(expected_q_s8_1,int,8,16) [] = { 0xf0, 0xff, 0xf1, 0xff,
+ 0xf2, 0xff, 0xf3, 0xff,
+ 0xf4, 0xff, 0xf5, 0xff,
+ 0xf6, 0xff, 0xf7, 0xff };
+VECT_VAR_DECL(expected_q_s8_2,int,8,16) [] = { 0xf0, 0xff, 0xff, 0xff,
+ 0xf1, 0xff, 0xff, 0xff,
+ 0xf2, 0xff, 0xff, 0xff,
+ 0xf3, 0xff, 0xff, 0xff };
+VECT_VAR_DECL(expected_q_s8_3,int,8,16) [] = { 0xf0, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xf1, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff };
+VECT_VAR_DECL(expected_q_s8_4,int,8,16) [] = { 0xf0, 0xf1, 0xf2, 0xf3,
+ 0xf4, 0xf5, 0xf6, 0xf7,
+ 0xf8, 0xf9, 0xfa, 0xfb,
+ 0xfc, 0xfd, 0xfe, 0xff };
+VECT_VAR_DECL(expected_q_s8_5,int,8,16) [] = { 0xf0, 0xff, 0xf1, 0xff,
+ 0xf2, 0xff, 0xf3, 0xff,
+ 0xf4, 0xff, 0xf5, 0xff,
+ 0xf6, 0xff, 0xf7, 0xff };
+VECT_VAR_DECL(expected_q_s8_6,int,8,16) [] = { 0xf0, 0xff, 0xff, 0xff,
+ 0xf1, 0xff, 0xff, 0xff,
+ 0xf2, 0xff, 0xff, 0xff,
+ 0xf3, 0xff, 0xff, 0xff };
+VECT_VAR_DECL(expected_q_s8_7,int,8,16) [] = { 0xf0, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xf1, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff };
+VECT_VAR_DECL(expected_q_s8_8,int,8,16) [] = { 0xf0, 0xf1, 0xf2, 0xf3,
+ 0xf4, 0xf5, 0xf6, 0xf7,
+ 0xf8, 0xf9, 0xfa, 0xfb,
+ 0xfc, 0xfd, 0xfe, 0xff };
+VECT_VAR_DECL(expected_q_s8_9,int,8,16) [] = { 0xf0, 0xff, 0xf1, 0xff,
+ 0xf2, 0xff, 0xf3, 0xff,
+ 0xf4, 0xff, 0xf5, 0xff,
+ 0xf6, 0xff, 0xf7, 0xff };
+
+/* Expected results for vreinterpretq_s16_xx. */
+VECT_VAR_DECL(expected_q_s16_1,int,16,8) [] = { 0xf1f0, 0xf3f2,
+ 0xf5f4, 0xf7f6,
+ 0xf9f8, 0xfbfa,
+ 0xfdfc, 0xfffe };
+VECT_VAR_DECL(expected_q_s16_2,int,16,8) [] = { 0xfff0, 0xffff,
+ 0xfff1, 0xffff,
+ 0xfff2, 0xffff,
+ 0xfff3, 0xffff };
+VECT_VAR_DECL(expected_q_s16_3,int,16,8) [] = { 0xfff0, 0xffff,
+ 0xffff, 0xffff,
+ 0xfff1, 0xffff,
+ 0xffff, 0xffff };
+VECT_VAR_DECL(expected_q_s16_4,int,16,8) [] = { 0xf1f0, 0xf3f2,
+ 0xf5f4, 0xf7f6,
+ 0xf9f8, 0xfbfa,
+ 0xfdfc, 0xfffe };
+VECT_VAR_DECL(expected_q_s16_5,int,16,8) [] = { 0xfff0, 0xfff1,
+ 0xfff2, 0xfff3,
+ 0xfff4, 0xfff5,
+ 0xfff6, 0xfff7 };
+VECT_VAR_DECL(expected_q_s16_6,int,16,8) [] = { 0xfff0, 0xffff,
+ 0xfff1, 0xffff,
+ 0xfff2, 0xffff,
+ 0xfff3, 0xffff };
+VECT_VAR_DECL(expected_q_s16_7,int,16,8) [] = { 0xfff0, 0xffff,
+ 0xffff, 0xffff,
+ 0xfff1, 0xffff,
+ 0xffff, 0xffff };
+VECT_VAR_DECL(expected_q_s16_8,int,16,8) [] = { 0xf1f0, 0xf3f2,
+ 0xf5f4, 0xf7f6,
+ 0xf9f8, 0xfbfa,
+ 0xfdfc, 0xfffe };
+VECT_VAR_DECL(expected_q_s16_9,int,16,8) [] = { 0xfff0, 0xfff1,
+ 0xfff2, 0xfff3,
+ 0xfff4, 0xfff5,
+ 0xfff6, 0xfff7 };
+
+/* Expected results for vreinterpretq_s32_xx. */
+VECT_VAR_DECL(expected_q_s32_1,int,32,4) [] = { 0xf3f2f1f0, 0xf7f6f5f4,
+ 0xfbfaf9f8, 0xfffefdfc };
+VECT_VAR_DECL(expected_q_s32_2,int,32,4) [] = { 0xfff1fff0, 0xfff3fff2,
+ 0xfff5fff4, 0xfff7fff6 };
+VECT_VAR_DECL(expected_q_s32_3,int,32,4) [] = { 0xfffffff0, 0xffffffff,
+ 0xfffffff1, 0xffffffff };
+VECT_VAR_DECL(expected_q_s32_4,int,32,4) [] = { 0xf3f2f1f0, 0xf7f6f5f4,
+ 0xfbfaf9f8, 0xfffefdfc };
+VECT_VAR_DECL(expected_q_s32_5,int,32,4) [] = { 0xfff1fff0, 0xfff3fff2,
+ 0xfff5fff4, 0xfff7fff6 };
+VECT_VAR_DECL(expected_q_s32_6,int,32,4) [] = { 0xfffffff0, 0xfffffff1,
+ 0xfffffff2, 0xfffffff3 };
+VECT_VAR_DECL(expected_q_s32_7,int,32,4) [] = { 0xfffffff0, 0xffffffff,
+ 0xfffffff1, 0xffffffff };
+VECT_VAR_DECL(expected_q_s32_8,int,32,4) [] = { 0xf3f2f1f0, 0xf7f6f5f4,
+ 0xfbfaf9f8, 0xfffefdfc };
+VECT_VAR_DECL(expected_q_s32_9,int,32,4) [] = { 0xfff1fff0, 0xfff3fff2,
+ 0xfff5fff4, 0xfff7fff6 };
+
+/* Expected results for vreinterpretq_s64_xx. */
+VECT_VAR_DECL(expected_q_s64_1,int,64,2) [] = { 0xf7f6f5f4f3f2f1f0,
+ 0xfffefdfcfbfaf9f8 };
+VECT_VAR_DECL(expected_q_s64_2,int,64,2) [] = { 0xfff3fff2fff1fff0,
+ 0xfff7fff6fff5fff4 };
+VECT_VAR_DECL(expected_q_s64_3,int,64,2) [] = { 0xfffffff1fffffff0,
+ 0xfffffff3fffffff2 };
+VECT_VAR_DECL(expected_q_s64_4,int,64,2) [] = { 0xf7f6f5f4f3f2f1f0,
+ 0xfffefdfcfbfaf9f8 };
+VECT_VAR_DECL(expected_q_s64_5,int,64,2) [] = { 0xfff3fff2fff1fff0,
+ 0xfff7fff6fff5fff4 };
+VECT_VAR_DECL(expected_q_s64_6,int,64,2) [] = { 0xfffffff1fffffff0,
+ 0xfffffff3fffffff2 };
+VECT_VAR_DECL(expected_q_s64_7,int,64,2) [] = { 0xfffffffffffffff0,
+ 0xfffffffffffffff1 };
+VECT_VAR_DECL(expected_q_s64_8,int,64,2) [] = { 0xf7f6f5f4f3f2f1f0,
+ 0xfffefdfcfbfaf9f8 };
+VECT_VAR_DECL(expected_q_s64_9,int,64,2) [] = { 0xfff3fff2fff1fff0,
+ 0xfff7fff6fff5fff4 };
+
+/* Expected results for vreinterpretq_u8_xx. */
+VECT_VAR_DECL(expected_q_u8_1,uint,8,16) [] = { 0xf0, 0xf1, 0xf2, 0xf3,
+ 0xf4, 0xf5, 0xf6, 0xf7,
+ 0xf8, 0xf9, 0xfa, 0xfb,
+ 0xfc, 0xfd, 0xfe, 0xff };
+VECT_VAR_DECL(expected_q_u8_2,uint,8,16) [] = { 0xf0, 0xff, 0xf1, 0xff,
+ 0xf2, 0xff, 0xf3, 0xff,
+ 0xf4, 0xff, 0xf5, 0xff,
+ 0xf6, 0xff, 0xf7, 0xff };
+VECT_VAR_DECL(expected_q_u8_3,uint,8,16) [] = { 0xf0, 0xff, 0xff, 0xff,
+ 0xf1, 0xff, 0xff, 0xff,
+ 0xf2, 0xff, 0xff, 0xff,
+ 0xf3, 0xff, 0xff, 0xff };
+VECT_VAR_DECL(expected_q_u8_4,uint,8,16) [] = { 0xf0, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xf1, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff };
+VECT_VAR_DECL(expected_q_u8_5,uint,8,16) [] = { 0xf0, 0xff, 0xf1, 0xff,
+ 0xf2, 0xff, 0xf3, 0xff,
+ 0xf4, 0xff, 0xf5, 0xff,
+ 0xf6, 0xff, 0xf7, 0xff };
+VECT_VAR_DECL(expected_q_u8_6,uint,8,16) [] = { 0xf0, 0xff, 0xff, 0xff,
+ 0xf1, 0xff, 0xff, 0xff,
+ 0xf2, 0xff, 0xff, 0xff,
+ 0xf3, 0xff, 0xff, 0xff };
+VECT_VAR_DECL(expected_q_u8_7,uint,8,16) [] = { 0xf0, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xf1, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff };
+VECT_VAR_DECL(expected_q_u8_8,uint,8,16) [] = { 0xf0, 0xf1, 0xf2, 0xf3,
+ 0xf4, 0xf5, 0xf6, 0xf7,
+ 0xf8, 0xf9, 0xfa, 0xfb,
+ 0xfc, 0xfd, 0xfe, 0xff };
+VECT_VAR_DECL(expected_q_u8_9,uint,8,16) [] = { 0xf0, 0xff, 0xf1, 0xff,
+ 0xf2, 0xff, 0xf3, 0xff,
+ 0xf4, 0xff, 0xf5, 0xff,
+ 0xf6, 0xff, 0xf7, 0xff };
+
+/* Expected results for vreinterpretq_u16_xx. */
+VECT_VAR_DECL(expected_q_u16_1,uint,16,8) [] = { 0xf1f0, 0xf3f2,
+ 0xf5f4, 0xf7f6,
+ 0xf9f8, 0xfbfa,
+ 0xfdfc, 0xfffe };
+VECT_VAR_DECL(expected_q_u16_2,uint,16,8) [] = { 0xfff0, 0xfff1,
+ 0xfff2, 0xfff3,
+ 0xfff4, 0xfff5,
+ 0xfff6, 0xfff7 };
+VECT_VAR_DECL(expected_q_u16_3,uint,16,8) [] = { 0xfff0, 0xffff,
+ 0xfff1, 0xffff,
+ 0xfff2, 0xffff,
+ 0xfff3, 0xffff };
+VECT_VAR_DECL(expected_q_u16_4,uint,16,8) [] = { 0xfff0, 0xffff,
+ 0xffff, 0xffff,
+ 0xfff1, 0xffff,
+ 0xffff, 0xffff };
+VECT_VAR_DECL(expected_q_u16_5,uint,16,8) [] = { 0xf1f0, 0xf3f2,
+ 0xf5f4, 0xf7f6,
+ 0xf9f8, 0xfbfa,
+ 0xfdfc, 0xfffe };
+VECT_VAR_DECL(expected_q_u16_6,uint,16,8) [] = { 0xfff0, 0xffff,
+ 0xfff1, 0xffff,
+ 0xfff2, 0xffff,
+ 0xfff3, 0xffff };
+VECT_VAR_DECL(expected_q_u16_7,uint,16,8) [] = { 0xfff0, 0xffff,
+ 0xffff, 0xffff,
+ 0xfff1, 0xffff,
+ 0xffff, 0xffff };
+VECT_VAR_DECL(expected_q_u16_8,uint,16,8) [] = { 0xf1f0, 0xf3f2,
+ 0xf5f4, 0xf7f6,
+ 0xf9f8, 0xfbfa,
+ 0xfdfc, 0xfffe };
+VECT_VAR_DECL(expected_q_u16_9,uint,16,8) [] = { 0xfff0, 0xfff1,
+ 0xfff2, 0xfff3,
+ 0xfff4, 0xfff5,
+ 0xfff6, 0xfff7 };
+
+/* Expected results for vreinterpretq_u32_xx. */
+VECT_VAR_DECL(expected_q_u32_1,uint,32,4) [] = { 0xf3f2f1f0, 0xf7f6f5f4,
+ 0xfbfaf9f8, 0xfffefdfc };
+VECT_VAR_DECL(expected_q_u32_2,uint,32,4) [] = { 0xfff1fff0, 0xfff3fff2,
+ 0xfff5fff4, 0xfff7fff6 };
+VECT_VAR_DECL(expected_q_u32_3,uint,32,4) [] = { 0xfffffff0, 0xfffffff1,
+ 0xfffffff2, 0xfffffff3 };
+VECT_VAR_DECL(expected_q_u32_4,uint,32,4) [] = { 0xfffffff0, 0xffffffff,
+ 0xfffffff1, 0xffffffff };
+VECT_VAR_DECL(expected_q_u32_5,uint,32,4) [] = { 0xf3f2f1f0, 0xf7f6f5f4,
+ 0xfbfaf9f8, 0xfffefdfc };
+VECT_VAR_DECL(expected_q_u32_6,uint,32,4) [] = { 0xfff1fff0, 0xfff3fff2,
+ 0xfff5fff4, 0xfff7fff6 };
+VECT_VAR_DECL(expected_q_u32_7,uint,32,4) [] = { 0xfffffff0, 0xffffffff,
+ 0xfffffff1, 0xffffffff };
+VECT_VAR_DECL(expected_q_u32_8,uint,32,4) [] = { 0xf3f2f1f0, 0xf7f6f5f4,
+ 0xfbfaf9f8, 0xfffefdfc };
+VECT_VAR_DECL(expected_q_u32_9,uint,32,4) [] = { 0xfff1fff0, 0xfff3fff2,
+ 0xfff5fff4, 0xfff7fff6 };
+
+/* Expected results for vreinterpretq_u64_xx. */
+VECT_VAR_DECL(expected_q_u64_1,uint,64,2) [] = { 0xf7f6f5f4f3f2f1f0,
+ 0xfffefdfcfbfaf9f8 };
+VECT_VAR_DECL(expected_q_u64_2,uint,64,2) [] = { 0xfff3fff2fff1fff0,
+ 0xfff7fff6fff5fff4 };
+VECT_VAR_DECL(expected_q_u64_3,uint,64,2) [] = { 0xfffffff1fffffff0,
+ 0xfffffff3fffffff2 };
+VECT_VAR_DECL(expected_q_u64_4,uint,64,2) [] = { 0xfffffffffffffff0,
+ 0xfffffffffffffff1 };
+VECT_VAR_DECL(expected_q_u64_5,uint,64,2) [] = { 0xf7f6f5f4f3f2f1f0,
+ 0xfffefdfcfbfaf9f8 };
+VECT_VAR_DECL(expected_q_u64_6,uint,64,2) [] = { 0xfff3fff2fff1fff0,
+ 0xfff7fff6fff5fff4 };
+VECT_VAR_DECL(expected_q_u64_7,uint,64,2) [] = { 0xfffffff1fffffff0,
+ 0xfffffff3fffffff2 };
+VECT_VAR_DECL(expected_q_u64_8,uint,64,2) [] = { 0xf7f6f5f4f3f2f1f0,
+ 0xfffefdfcfbfaf9f8 };
+VECT_VAR_DECL(expected_q_u64_9,uint,64,2) [] = { 0xfff3fff2fff1fff0,
+ 0xfff7fff6fff5fff4 };
+
+/* Expected results for vreinterpret_f32_xx. */
+VECT_VAR_DECL(expected_f32_1,hfloat,32,2) [] = { 0xf3f2f1f0, 0xf7f6f5f4 };
+VECT_VAR_DECL(expected_f32_2,hfloat,32,2) [] = { 0xfff1fff0, 0xfff3fff2 };
+VECT_VAR_DECL(expected_f32_3,hfloat,32,2) [] = { 0xfffffff0, 0xfffffff1 };
+VECT_VAR_DECL(expected_f32_4,hfloat,32,2) [] = { 0xfffffff0, 0xffffffff };
+VECT_VAR_DECL(expected_f32_5,hfloat,32,2) [] = { 0xf3f2f1f0, 0xf7f6f5f4 };
+VECT_VAR_DECL(expected_f32_6,hfloat,32,2) [] = { 0xfff1fff0, 0xfff3fff2 };
+VECT_VAR_DECL(expected_f32_7,hfloat,32,2) [] = { 0xfffffff0, 0xfffffff1 };
+VECT_VAR_DECL(expected_f32_8,hfloat,32,2) [] = { 0xfffffff0, 0xffffffff };
+VECT_VAR_DECL(expected_f32_9,hfloat,32,2) [] = { 0xf3f2f1f0, 0xf7f6f5f4 };
+VECT_VAR_DECL(expected_f32_10,hfloat,32,2) [] = { 0xfff1fff0, 0xfff3fff2 };
+
+/* Expected results for vreinterpretq_f32_xx. */
+VECT_VAR_DECL(expected_q_f32_1,hfloat,32,4) [] = { 0xf3f2f1f0, 0xf7f6f5f4,
+ 0xfbfaf9f8, 0xfffefdfc };
+VECT_VAR_DECL(expected_q_f32_2,hfloat,32,4) [] = { 0xfff1fff0, 0xfff3fff2,
+ 0xfff5fff4, 0xfff7fff6 };
+VECT_VAR_DECL(expected_q_f32_3,hfloat,32,4) [] = { 0xfffffff0, 0xfffffff1,
+ 0xfffffff2, 0xfffffff3 };
+VECT_VAR_DECL(expected_q_f32_4,hfloat,32,4) [] = { 0xfffffff0, 0xffffffff,
+ 0xfffffff1, 0xffffffff };
+VECT_VAR_DECL(expected_q_f32_5,hfloat,32,4) [] = { 0xf3f2f1f0, 0xf7f6f5f4,
+ 0xfbfaf9f8, 0xfffefdfc };
+VECT_VAR_DECL(expected_q_f32_6,hfloat,32,4) [] = { 0xfff1fff0, 0xfff3fff2,
+ 0xfff5fff4, 0xfff7fff6 };
+VECT_VAR_DECL(expected_q_f32_7,hfloat,32,4) [] = { 0xfffffff0, 0xfffffff1,
+ 0xfffffff2, 0xfffffff3 };
+VECT_VAR_DECL(expected_q_f32_8,hfloat,32,4) [] = { 0xfffffff0, 0xffffffff,
+ 0xfffffff1, 0xffffffff };
+VECT_VAR_DECL(expected_q_f32_9,hfloat,32,4) [] = { 0xf3f2f1f0, 0xf7f6f5f4,
+ 0xfbfaf9f8, 0xfffefdfc };
+VECT_VAR_DECL(expected_q_f32_10,hfloat,32,4) [] = { 0xfff1fff0, 0xfff3fff2,
+ 0xfff5fff4, 0xfff7fff6 };
+
+/* Expected results for vreinterpretq_xx_f32. */
+VECT_VAR_DECL(expected_xx_f32_1,int,8,8) [] = { 0x0, 0x0, 0x80, 0xc1,
+ 0x0, 0x0, 0x70, 0xc1 };
+VECT_VAR_DECL(expected_xx_f32_2,int,16,4) [] = { 0x0, 0xc180, 0x0, 0xc170 };
+VECT_VAR_DECL(expected_xx_f32_3,int,32,2) [] = { 0xc1800000, 0xc1700000 };
+VECT_VAR_DECL(expected_xx_f32_4,int,64,1) [] = { 0xc1700000c1800000 };
+VECT_VAR_DECL(expected_xx_f32_5,uint,8,8) [] = { 0x0, 0x0, 0x80, 0xc1,
+ 0x0, 0x0, 0x70, 0xc1 };
+VECT_VAR_DECL(expected_xx_f32_6,uint,16,4) [] = { 0x0, 0xc180, 0x0, 0xc170 };
+VECT_VAR_DECL(expected_xx_f32_7,uint,32,2) [] = { 0xc1800000, 0xc1700000 };
+VECT_VAR_DECL(expected_xx_f32_8,uint,64,1) [] = { 0xc1700000c1800000 };
+VECT_VAR_DECL(expected_xx_f32_9,poly,8,8) [] = { 0x0, 0x0, 0x80, 0xc1,
+ 0x0, 0x0, 0x70, 0xc1 };
+VECT_VAR_DECL(expected_xx_f32_10,poly,16,4) [] = { 0x0, 0xc180, 0x0, 0xc170 };
+
+/* Expected results for vreinterpretq_xx_f32. */
+VECT_VAR_DECL(expected_q_xx_f32_1,int,8,16) [] = { 0x0, 0x0, 0x80, 0xc1,
+ 0x0, 0x0, 0x70, 0xc1,
+ 0x0, 0x0, 0x60, 0xc1,
+ 0x0, 0x0, 0x50, 0xc1 };
+VECT_VAR_DECL(expected_q_xx_f32_2,int,16,8) [] = { 0x0, 0xc180, 0x0, 0xc170,
+ 0x0, 0xc160, 0x0, 0xc150 };
+VECT_VAR_DECL(expected_q_xx_f32_3,int,32,4) [] = { 0xc1800000, 0xc1700000,
+ 0xc1600000, 0xc1500000 };
+VECT_VAR_DECL(expected_q_xx_f32_4,int,64,2) [] = { 0xc1700000c1800000,
+ 0xc1500000c1600000 };
+VECT_VAR_DECL(expected_q_xx_f32_5,uint,8,16) [] = { 0x0, 0x0, 0x80, 0xc1,
+ 0x0, 0x0, 0x70, 0xc1,
+ 0x0, 0x0, 0x60, 0xc1,
+ 0x0, 0x0, 0x50, 0xc1 };
+VECT_VAR_DECL(expected_q_xx_f32_6,uint,16,8) [] = { 0x0, 0xc180, 0x0, 0xc170,
+ 0x0, 0xc160, 0x0, 0xc150 };
+VECT_VAR_DECL(expected_q_xx_f32_7,uint,32,4) [] = { 0xc1800000, 0xc1700000,
+ 0xc1600000, 0xc1500000 };
+VECT_VAR_DECL(expected_q_xx_f32_8,uint,64,2) [] = { 0xc1700000c1800000,
+ 0xc1500000c1600000 };
+VECT_VAR_DECL(expected_q_xx_f32_9,poly,8,16) [] = { 0x0, 0x0, 0x80, 0xc1,
+ 0x0, 0x0, 0x70, 0xc1,
+ 0x0, 0x0, 0x60, 0xc1,
+ 0x0, 0x0, 0x50, 0xc1 };
+VECT_VAR_DECL(expected_q_xx_f32_10,poly,16,8) [] = { 0x0, 0xc180, 0x0, 0xc170,
+ 0x0, 0xc160, 0x0, 0xc150 };
+
+#define TEST_MSG "VREINTERPRET/VREINTERPRETQ"
+
+void exec_vreinterpret (void)
+{
+ int i;
+
+ /* Basic test: y=vreinterpret(x), then store the result. */
+#define TEST_VREINTERPRET(Q, T1, T2, W, N, TS1, TS2, WS, NS, EXPECTED) \
+ VECT_VAR(vector_res, T1, W, N) = \
+ vreinterpret##Q##_##T2##W##_##TS2##WS(VECT_VAR(vector, TS1, WS, NS)); \
+ vst1##Q##_##T2##W(VECT_VAR(result, T1, W, N), \
+ VECT_VAR(vector_res, T1, W, N)); \
+ CHECK(TEST_MSG, T1, W, N, PRIx##W, EXPECTED, "");
+
+#define TEST_VREINTERPRET_POLY(Q, T1, T2, W, N, TS1, TS2, WS, NS, EXPECTED) \
+ VECT_VAR(vector_res, T1, W, N) = \
+ vreinterpret##Q##_##T2##W##_##TS2##WS(VECT_VAR(vector, TS1, WS, NS)); \
+ vst1##Q##_##T2##W(VECT_VAR(result, T1, W, N), \
+ VECT_VAR(vector_res, T1, W, N)); \
+ CHECK(TEST_MSG, T1, W, N, PRIx##W, EXPECTED, "");
+
+#define TEST_VREINTERPRET_FP(Q, T1, T2, W, N, TS1, TS2, WS, NS, EXPECTED) \
+ VECT_VAR(vector_res, T1, W, N) = \
+ vreinterpret##Q##_##T2##W##_##TS2##WS(VECT_VAR(vector, TS1, WS, NS)); \
+ vst1##Q##_##T2##W(VECT_VAR(result, T1, W, N), \
+ VECT_VAR(vector_res, T1, W, N)); \
+ CHECK_FP(TEST_MSG, T1, W, N, PRIx##W, EXPECTED, "");
+
+ DECL_VARIABLE_ALL_VARIANTS(vector);
+ DECL_VARIABLE_ALL_VARIANTS(vector_res);
+
+ clean_results ();
+
+
+ /* Initialize input "vector" from "buffer". */
+ TEST_MACRO_ALL_VARIANTS_2_5(VLOAD, vector, buffer);
+ VLOAD(vector, buffer, , float, f, 32, 2);
+ VLOAD(vector, buffer, q, float, f, 32, 4);
+
+ /* vreinterpret_s8_xx. */
+ TEST_VREINTERPRET(, int, s, 8, 8, int, s, 16, 4, expected_s8_1);
+ TEST_VREINTERPRET(, int, s, 8, 8, int, s, 32, 2, expected_s8_2);
+ TEST_VREINTERPRET(, int, s, 8, 8, int, s, 64, 1, expected_s8_3);
+ TEST_VREINTERPRET(, int, s, 8, 8, uint, u, 8, 8, expected_s8_4);
+ TEST_VREINTERPRET(, int, s, 8, 8, uint, u, 16, 4, expected_s8_5);
+ TEST_VREINTERPRET(, int, s, 8, 8, uint, u, 32, 2, expected_s8_6);
+ TEST_VREINTERPRET(, int, s, 8, 8, uint, u, 64, 1, expected_s8_7);
+ TEST_VREINTERPRET(, int, s, 8, 8, poly, p, 8, 8, expected_s8_8);
+ TEST_VREINTERPRET(, int, s, 8, 8, poly, p, 16, 4, expected_s8_9);
+
+ /* vreinterpret_s16_xx. */
+ TEST_VREINTERPRET(, int, s, 16, 4, int, s, 8, 8, expected_s16_1);
+ TEST_VREINTERPRET(, int, s, 16, 4, int, s, 32, 2, expected_s16_2);
+ TEST_VREINTERPRET(, int, s, 16, 4, int, s, 64, 1, expected_s16_3);
+ TEST_VREINTERPRET(, int, s, 16, 4, uint, u, 8, 8, expected_s16_4);
+ TEST_VREINTERPRET(, int, s, 16, 4, uint, u, 16, 4, expected_s16_5);
+ TEST_VREINTERPRET(, int, s, 16, 4, uint, u, 32, 2, expected_s16_6);
+ TEST_VREINTERPRET(, int, s, 16, 4, uint, u, 64, 1, expected_s16_7);
+ TEST_VREINTERPRET(, int, s, 16, 4, poly, p, 8, 8, expected_s16_8);
+ TEST_VREINTERPRET(, int, s, 16, 4, poly, p, 16, 4, expected_s16_9);
+
+ /* vreinterpret_s32_xx. */
+ TEST_VREINTERPRET(, int, s, 32, 2, int, s, 8, 8, expected_s32_1);
+ TEST_VREINTERPRET(, int, s, 32, 2, int, s, 16, 4, expected_s32_2);
+ TEST_VREINTERPRET(, int, s, 32, 2, int, s, 64, 1, expected_s32_3);
+ TEST_VREINTERPRET(, int, s, 32, 2, uint, u, 8, 8, expected_s32_4);
+ TEST_VREINTERPRET(, int, s, 32, 2, uint, u, 16, 4, expected_s32_5);
+ TEST_VREINTERPRET(, int, s, 32, 2, uint, u, 32, 2, expected_s32_6);
+ TEST_VREINTERPRET(, int, s, 32, 2, uint, u, 64, 1, expected_s32_7);
+ TEST_VREINTERPRET(, int, s, 32, 2, poly, p, 8, 8, expected_s32_8);
+ TEST_VREINTERPRET(, int, s, 32, 2, poly, p, 16, 4, expected_s32_9);
+
+ /* vreinterpret_s64_xx. */
+ TEST_VREINTERPRET(, int, s, 64, 1, int, s, 8, 8, expected_s64_1);
+ TEST_VREINTERPRET(, int, s, 64, 1, int, s, 16, 4, expected_s64_2);
+ TEST_VREINTERPRET(, int, s, 64, 1, int, s, 32, 2, expected_s64_3);
+ TEST_VREINTERPRET(, int, s, 64, 1, uint, u, 8, 8, expected_s64_4);
+ TEST_VREINTERPRET(, int, s, 64, 1, uint, u, 16, 4, expected_s64_5);
+ TEST_VREINTERPRET(, int, s, 64, 1, uint, u, 32, 2, expected_s64_6);
+ TEST_VREINTERPRET(, int, s, 64, 1, uint, u, 64, 1, expected_s64_7);
+ TEST_VREINTERPRET(, int, s, 64, 1, poly, p, 8, 8, expected_s64_8);
+ TEST_VREINTERPRET(, int, s, 64, 1, poly, p, 16, 4, expected_s64_9);
+
+ /* vreinterpret_u8_xx. */
+ TEST_VREINTERPRET(, uint, u, 8, 8, int, s, 8, 8, expected_u8_1);
+ TEST_VREINTERPRET(, uint, u, 8, 8, int, s, 16, 4, expected_u8_2);
+ TEST_VREINTERPRET(, uint, u, 8, 8, int, s, 32, 2, expected_u8_3);
+ TEST_VREINTERPRET(, uint, u, 8, 8, int, s, 64, 1, expected_u8_4);
+ TEST_VREINTERPRET(, uint, u, 8, 8, uint, u, 16, 4, expected_u8_5);
+ TEST_VREINTERPRET(, uint, u, 8, 8, uint, u, 32, 2, expected_u8_6);
+ TEST_VREINTERPRET(, uint, u, 8, 8, uint, u, 64, 1, expected_u8_7);
+ TEST_VREINTERPRET(, uint, u, 8, 8, poly, p, 8, 8, expected_u8_8);
+ TEST_VREINTERPRET(, uint, u, 8, 8, poly, p, 16, 4, expected_u8_9);
+
+ /* vreinterpret_u16_xx. */
+ TEST_VREINTERPRET(, uint, u, 16, 4, int, s, 8, 8, expected_u16_1);
+ TEST_VREINTERPRET(, uint, u, 16, 4, int, s, 16, 4, expected_u16_2);
+ TEST_VREINTERPRET(, uint, u, 16, 4, int, s, 32, 2, expected_u16_3);
+ TEST_VREINTERPRET(, uint, u, 16, 4, int, s, 64, 1, expected_u16_4);
+ TEST_VREINTERPRET(, uint, u, 16, 4, uint, u, 8, 8, expected_u16_5);
+ TEST_VREINTERPRET(, uint, u, 16, 4, uint, u, 32, 2, expected_u16_6);
+ TEST_VREINTERPRET(, uint, u, 16, 4, uint, u, 64, 1, expected_u16_7);
+ TEST_VREINTERPRET(, uint, u, 16, 4, poly, p, 8, 8, expected_u16_8);
+ TEST_VREINTERPRET(, uint, u, 16, 4, poly, p, 16, 4, expected_u16_9);
+
+ /* vreinterpret_u32_xx. */
+ TEST_VREINTERPRET(, uint, u, 32, 2, int, s, 8, 8, expected_u32_1);
+ TEST_VREINTERPRET(, uint, u, 32, 2, int, s, 16, 4, expected_u32_2);
+ TEST_VREINTERPRET(, uint, u, 32, 2, int, s, 32, 2, expected_u32_3);
+ TEST_VREINTERPRET(, uint, u, 32, 2, int, s, 64, 1, expected_u32_4);
+ TEST_VREINTERPRET(, uint, u, 32, 2, uint, u, 8, 8, expected_u32_5);
+ TEST_VREINTERPRET(, uint, u, 32, 2, uint, u, 16, 4, expected_u32_6);
+ TEST_VREINTERPRET(, uint, u, 32, 2, uint, u, 64, 1, expected_u32_7);
+ TEST_VREINTERPRET(, uint, u, 32, 2, poly, p, 8, 8, expected_u32_8);
+ TEST_VREINTERPRET(, uint, u, 32, 2, poly, p, 16, 4, expected_u32_9);
+
+ /* vreinterpret_u64_xx. */
+ TEST_VREINTERPRET(, uint, u, 64, 1, int, s, 8, 8, expected_u64_1);
+ TEST_VREINTERPRET(, uint, u, 64, 1, int, s, 16, 4, expected_u64_2);
+ TEST_VREINTERPRET(, uint, u, 64, 1, int, s, 32, 2, expected_u64_3);
+ TEST_VREINTERPRET(, uint, u, 64, 1, int, s, 64, 1, expected_u64_4);
+ TEST_VREINTERPRET(, uint, u, 64, 1, uint, u, 8, 8, expected_u64_5);
+ TEST_VREINTERPRET(, uint, u, 64, 1, uint, u, 16, 4, expected_u64_6);
+ TEST_VREINTERPRET(, uint, u, 64, 1, uint, u, 32, 2, expected_u64_7);
+ TEST_VREINTERPRET(, uint, u, 64, 1, poly, p, 8, 8, expected_u64_8);
+ TEST_VREINTERPRET(, uint, u, 64, 1, poly, p, 16, 4, expected_u64_9);
+
+ /* vreinterpret_p8_xx. */
+ TEST_VREINTERPRET_POLY(, poly, p, 8, 8, int, s, 8, 8, expected_p8_1);
+ TEST_VREINTERPRET_POLY(, poly, p, 8, 8, int, s, 16, 4, expected_p8_2);
+ TEST_VREINTERPRET_POLY(, poly, p, 8, 8, int, s, 32, 2, expected_p8_3);
+ TEST_VREINTERPRET_POLY(, poly, p, 8, 8, int, s, 64, 1, expected_p8_4);
+ TEST_VREINTERPRET_POLY(, poly, p, 8, 8, uint, u, 8, 8, expected_p8_5);
+ TEST_VREINTERPRET_POLY(, poly, p, 8, 8, uint, u, 16, 4, expected_p8_6);
+ TEST_VREINTERPRET_POLY(, poly, p, 8, 8, uint, u, 32, 2, expected_p8_7);
+ TEST_VREINTERPRET_POLY(, poly, p, 8, 8, uint, u, 64, 1, expected_p8_8);
+ TEST_VREINTERPRET_POLY(, poly, p, 8, 8, poly, p, 16, 4, expected_p8_9);
+
+ /* vreinterpret_p16_xx. */
+ TEST_VREINTERPRET_POLY(, poly, p, 16, 4, int, s, 8, 8, expected_p16_1);
+ TEST_VREINTERPRET_POLY(, poly, p, 16, 4, int, s, 16, 4, expected_p16_2);
+ TEST_VREINTERPRET_POLY(, poly, p, 16, 4, int, s, 32, 2, expected_p16_3);
+ TEST_VREINTERPRET_POLY(, poly, p, 16, 4, int, s, 64, 1, expected_p16_4);
+ TEST_VREINTERPRET_POLY(, poly, p, 16, 4, uint, u, 8, 8, expected_p16_5);
+ TEST_VREINTERPRET_POLY(, poly, p, 16, 4, uint, u, 16, 4, expected_p16_6);
+ TEST_VREINTERPRET_POLY(, poly, p, 16, 4, uint, u, 32, 2, expected_p16_7);
+ TEST_VREINTERPRET_POLY(, poly, p, 16, 4, uint, u, 64, 1, expected_p16_8);
+ TEST_VREINTERPRET_POLY(, poly, p, 16, 4, poly, p, 8, 8, expected_p16_9);
+
+ /* vreinterpretq_s8_xx. */
+ TEST_VREINTERPRET(q, int, s, 8, 16, int, s, 16, 8, expected_q_s8_1);
+ TEST_VREINTERPRET(q, int, s, 8, 16, int, s, 32, 4, expected_q_s8_2);
+ TEST_VREINTERPRET(q, int, s, 8, 16, int, s, 64, 2, expected_q_s8_3);
+ TEST_VREINTERPRET(q, int, s, 8, 16, uint, u, 8, 16, expected_q_s8_4);
+ TEST_VREINTERPRET(q, int, s, 8, 16, uint, u, 16, 8, expected_q_s8_5);
+ TEST_VREINTERPRET(q, int, s, 8, 16, uint, u, 32, 4, expected_q_s8_6);
+ TEST_VREINTERPRET(q, int, s, 8, 16, uint, u, 64, 2, expected_q_s8_7);
+ TEST_VREINTERPRET(q, int, s, 8, 16, poly, p, 8, 16, expected_q_s8_8);
+ TEST_VREINTERPRET(q, int, s, 8, 16, poly, p, 16, 8, expected_q_s8_9);
+
+ /* vreinterpretq_s16_xx. */
+ TEST_VREINTERPRET(q, int, s, 16, 8, int, s, 8, 16, expected_q_s16_1);
+ TEST_VREINTERPRET(q, int, s, 16, 8, int, s, 32, 4, expected_q_s16_2);
+ TEST_VREINTERPRET(q, int, s, 16, 8, int, s, 64, 2, expected_q_s16_3);
+ TEST_VREINTERPRET(q, int, s, 16, 8, uint, u, 8, 16, expected_q_s16_4);
+ TEST_VREINTERPRET(q, int, s, 16, 8, uint, u, 16, 8, expected_q_s16_5);
+ TEST_VREINTERPRET(q, int, s, 16, 8, uint, u, 32, 4, expected_q_s16_6);
+ TEST_VREINTERPRET(q, int, s, 16, 8, uint, u, 64, 2, expected_q_s16_7);
+ TEST_VREINTERPRET(q, int, s, 16, 8, poly, p, 8, 16, expected_q_s16_8);
+ TEST_VREINTERPRET(q, int, s, 16, 8, poly, p, 16, 8, expected_q_s16_9);
+
+ /* vreinterpretq_s32_xx. */
+ TEST_VREINTERPRET(q, int, s, 32, 4, int, s, 8, 16, expected_q_s32_1);
+ TEST_VREINTERPRET(q, int, s, 32, 4, int, s, 16, 8, expected_q_s32_2);
+ TEST_VREINTERPRET(q, int, s, 32, 4, int, s, 64, 2, expected_q_s32_3);
+ TEST_VREINTERPRET(q, int, s, 32, 4, uint, u, 8, 16, expected_q_s32_4);
+ TEST_VREINTERPRET(q, int, s, 32, 4, uint, u, 16, 8, expected_q_s32_5);
+ TEST_VREINTERPRET(q, int, s, 32, 4, uint, u, 32, 4, expected_q_s32_6);
+ TEST_VREINTERPRET(q, int, s, 32, 4, uint, u, 64, 2, expected_q_s32_7);
+ TEST_VREINTERPRET(q, int, s, 32, 4, poly, p, 8, 16, expected_q_s32_8);
+ TEST_VREINTERPRET(q, int, s, 32, 4, poly, p, 16, 8, expected_q_s32_9);
+
+ /* vreinterpretq_s64_xx. */
+ TEST_VREINTERPRET(q, int, s, 64, 2, int, s, 8, 16, expected_q_s64_1);
+ TEST_VREINTERPRET(q, int, s, 64, 2, int, s, 16, 8, expected_q_s64_2);
+ TEST_VREINTERPRET(q, int, s, 64, 2, int, s, 32, 4, expected_q_s64_3);
+ TEST_VREINTERPRET(q, int, s, 64, 2, uint, u, 8, 16, expected_q_s64_4);
+ TEST_VREINTERPRET(q, int, s, 64, 2, uint, u, 16, 8, expected_q_s64_5);
+ TEST_VREINTERPRET(q, int, s, 64, 2, uint, u, 32, 4, expected_q_s64_6);
+ TEST_VREINTERPRET(q, int, s, 64, 2, uint, u, 64, 2, expected_q_s64_7);
+ TEST_VREINTERPRET(q, int, s, 64, 2, poly, p, 8, 16, expected_q_s64_8);
+ TEST_VREINTERPRET(q, int, s, 64, 2, poly, p, 16, 8, expected_q_s64_9);
+
+ /* vreinterpretq_u8_xx. */
+ TEST_VREINTERPRET(q, uint, u, 8, 16, int, s, 8, 16, expected_q_u8_1);
+ TEST_VREINTERPRET(q, uint, u, 8, 16, int, s, 16, 8, expected_q_u8_2);
+ TEST_VREINTERPRET(q, uint, u, 8, 16, int, s, 32, 4, expected_q_u8_3);
+ TEST_VREINTERPRET(q, uint, u, 8, 16, int, s, 64, 2, expected_q_u8_4);
+ TEST_VREINTERPRET(q, uint, u, 8, 16, uint, u, 16, 8, expected_q_u8_5);
+ TEST_VREINTERPRET(q, uint, u, 8, 16, uint, u, 32, 4, expected_q_u8_6);
+ TEST_VREINTERPRET(q, uint, u, 8, 16, uint, u, 64, 2, expected_q_u8_7);
+ TEST_VREINTERPRET(q, uint, u, 8, 16, poly, p, 8, 16, expected_q_u8_8);
+ TEST_VREINTERPRET(q, uint, u, 8, 16, poly, p, 16, 8, expected_q_u8_9);
+
+ /* vreinterpretq_u16_xx. */
+ TEST_VREINTERPRET(q, uint, u, 16, 8, int, s, 8, 16, expected_q_u16_1);
+ TEST_VREINTERPRET(q, uint, u, 16, 8, int, s, 16, 8, expected_q_u16_2);
+ TEST_VREINTERPRET(q, uint, u, 16, 8, int, s, 32, 4, expected_q_u16_3);
+ TEST_VREINTERPRET(q, uint, u, 16, 8, int, s, 64, 2, expected_q_u16_4);
+ TEST_VREINTERPRET(q, uint, u, 16, 8, uint, u, 8, 16, expected_q_u16_5);
+ TEST_VREINTERPRET(q, uint, u, 16, 8, uint, u, 32, 4, expected_q_u16_6);
+ TEST_VREINTERPRET(q, uint, u, 16, 8, uint, u, 64, 2, expected_q_u16_7);
+ TEST_VREINTERPRET(q, uint, u, 16, 8, poly, p, 8, 16, expected_q_u16_8);
+ TEST_VREINTERPRET(q, uint, u, 16, 8, poly, p, 16, 8, expected_q_u16_9);
+
+ /* vreinterpretq_u32_xx. */
+ TEST_VREINTERPRET(q, uint, u, 32, 4, int, s, 8, 16, expected_q_u32_1);
+ TEST_VREINTERPRET(q, uint, u, 32, 4, int, s, 16, 8, expected_q_u32_2);
+ TEST_VREINTERPRET(q, uint, u, 32, 4, int, s, 32, 4, expected_q_u32_3);
+ TEST_VREINTERPRET(q, uint, u, 32, 4, int, s, 64, 2, expected_q_u32_4);
+ TEST_VREINTERPRET(q, uint, u, 32, 4, uint, u, 8, 16, expected_q_u32_5);
+ TEST_VREINTERPRET(q, uint, u, 32, 4, uint, u, 16, 8, expected_q_u32_6);
+ TEST_VREINTERPRET(q, uint, u, 32, 4, uint, u, 64, 2, expected_q_u32_7);
+ TEST_VREINTERPRET(q, uint, u, 32, 4, poly, p, 8, 16, expected_q_u32_8);
+ TEST_VREINTERPRET(q, uint, u, 32, 4, poly, p, 16, 8, expected_q_u32_9);
+
+ /* vreinterpretq_u64_xx. */
+ TEST_VREINTERPRET(q, uint, u, 64, 2, int, s, 8, 16, expected_q_u64_1);
+ TEST_VREINTERPRET(q, uint, u, 64, 2, int, s, 16, 8, expected_q_u64_2);
+ TEST_VREINTERPRET(q, uint, u, 64, 2, int, s, 32, 4, expected_q_u64_3);
+ TEST_VREINTERPRET(q, uint, u, 64, 2, int, s, 64, 2, expected_q_u64_4);
+ TEST_VREINTERPRET(q, uint, u, 64, 2, uint, u, 8, 16, expected_q_u64_5);
+ TEST_VREINTERPRET(q, uint, u, 64, 2, uint, u, 16, 8, expected_q_u64_6);
+ TEST_VREINTERPRET(q, uint, u, 64, 2, uint, u, 32, 4, expected_q_u64_7);
+ TEST_VREINTERPRET(q, uint, u, 64, 2, poly, p, 8, 16, expected_q_u64_8);
+ TEST_VREINTERPRET(q, uint, u, 64, 2, poly, p, 16, 8, expected_q_u64_9);
+
+ /* vreinterpret_f32_xx. */
+ TEST_VREINTERPRET_FP(, float, f, 32, 2, int, s, 8, 8, expected_f32_1);
+ TEST_VREINTERPRET_FP(, float, f, 32, 2, int, s, 16, 4, expected_f32_2);
+ TEST_VREINTERPRET_FP(, float, f, 32, 2, int, s, 32, 2, expected_f32_3);
+ TEST_VREINTERPRET_FP(, float, f, 32, 2, int, s, 64, 1, expected_f32_4);
+ TEST_VREINTERPRET_FP(, float, f, 32, 2, uint, u, 8, 8, expected_f32_5);
+ TEST_VREINTERPRET_FP(, float, f, 32, 2, uint, u, 16, 4, expected_f32_6);
+ TEST_VREINTERPRET_FP(, float, f, 32, 2, uint, u, 32, 2, expected_f32_7);
+ TEST_VREINTERPRET_FP(, float, f, 32, 2, uint, u, 64, 1, expected_f32_8);
+ TEST_VREINTERPRET_FP(, float, f, 32, 2, poly, p, 8, 8, expected_f32_9);
+ TEST_VREINTERPRET_FP(, float, f, 32, 2, poly, p, 16, 4, expected_f32_10);
+
+ /* vreinterpretq_f32_xx. */
+ TEST_VREINTERPRET_FP(q, float, f, 32, 4, int, s, 8, 16, expected_q_f32_1);
+ TEST_VREINTERPRET_FP(q, float, f, 32, 4, int, s, 16, 8, expected_q_f32_2);
+ TEST_VREINTERPRET_FP(q, float, f, 32, 4, int, s, 32, 4, expected_q_f32_3);
+ TEST_VREINTERPRET_FP(q, float, f, 32, 4, int, s, 64, 2, expected_q_f32_4);
+ TEST_VREINTERPRET_FP(q, float, f, 32, 4, uint, u, 8, 16, expected_q_f32_5);
+ TEST_VREINTERPRET_FP(q, float, f, 32, 4, uint, u, 16, 8, expected_q_f32_6);
+ TEST_VREINTERPRET_FP(q, float, f, 32, 4, uint, u, 32, 4, expected_q_f32_7);
+ TEST_VREINTERPRET_FP(q, float, f, 32, 4, uint, u, 64, 2, expected_q_f32_8);
+ TEST_VREINTERPRET_FP(q, float, f, 32, 4, poly, p, 8, 16, expected_q_f32_9);
+ TEST_VREINTERPRET_FP(q, float, f, 32, 4, poly, p, 16, 8, expected_q_f32_10);
+
+ /* vreinterpret_xx_f32. */
+ TEST_VREINTERPRET(, int, s, 8, 8, float, f, 32, 2, expected_xx_f32_1);
+ TEST_VREINTERPRET(, int, s, 16, 4, float, f, 32, 2, expected_xx_f32_2);
+ TEST_VREINTERPRET(, int, s, 32, 2, float, f, 32, 2, expected_xx_f32_3);
+ TEST_VREINTERPRET(, int, s, 64, 1, float, f, 32, 2, expected_xx_f32_4);
+ TEST_VREINTERPRET(, uint, u, 8, 8, float, f, 32, 2, expected_xx_f32_5);
+ TEST_VREINTERPRET(, uint, u, 16, 4, float, f, 32, 2, expected_xx_f32_6);
+ TEST_VREINTERPRET(, uint, u, 32, 2, float, f, 32, 2, expected_xx_f32_7);
+ TEST_VREINTERPRET(, uint, u, 64, 1, float, f, 32, 2, expected_xx_f32_8);
+ TEST_VREINTERPRET_POLY(, poly, p, 8, 8, float, f, 32, 2, expected_xx_f32_9);
+ TEST_VREINTERPRET_POLY(, poly, p, 16, 4, float, f, 32, 2, expected_xx_f32_10);
+
+ /* vreinterpretq_xx_f32. */
+ TEST_VREINTERPRET(q, int, s, 8, 16, float, f, 32, 4, expected_q_xx_f32_1);
+ TEST_VREINTERPRET(q, int, s, 16, 8, float, f, 32, 4, expected_q_xx_f32_2);
+ TEST_VREINTERPRET(q, int, s, 32, 4, float, f, 32, 4, expected_q_xx_f32_3);
+ TEST_VREINTERPRET(q, int, s, 64, 2, float, f, 32, 4, expected_q_xx_f32_4);
+ TEST_VREINTERPRET(q, uint, u, 8, 16, float, f, 32, 4, expected_q_xx_f32_5);
+ TEST_VREINTERPRET(q, uint, u, 16, 8, float, f, 32, 4, expected_q_xx_f32_6);
+ TEST_VREINTERPRET(q, uint, u, 32, 4, float, f, 32, 4, expected_q_xx_f32_7);
+ TEST_VREINTERPRET(q, uint, u, 64, 2, float, f, 32, 4, expected_q_xx_f32_8);
+ TEST_VREINTERPRET_POLY(q, poly, p, 8, 16, float, f, 32, 4, expected_q_xx_f32_9);
+ TEST_VREINTERPRET_POLY(q, poly, p, 16, 8, float, f, 32, 4, expected_q_xx_f32_10);
+}
+
+int main (void)
+{
+ exec_vreinterpret ();
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vrev.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vrev.c
new file mode 100644
index 00000000000..3b574da403a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vrev.c
@@ -0,0 +1,200 @@
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+/* Expected results for vrev16. */
+VECT_VAR_DECL(expected_vrev16,int,8,8) [] = { 0xf1, 0xf0, 0xf3, 0xf2,
+ 0xf5, 0xf4, 0xf7, 0xf6 };
+VECT_VAR_DECL(expected_vrev16,uint,8,8) [] = { 0xf1, 0xf0, 0xf3, 0xf2,
+ 0xf5, 0xf4, 0xf7, 0xf6 };
+VECT_VAR_DECL(expected_vrev16,poly,8,8) [] = { 0xf1, 0xf0, 0xf3, 0xf2,
+ 0xf5, 0xf4, 0xf7, 0xf6 };
+VECT_VAR_DECL(expected_vrev16,int,8,16) [] = { 0xf1, 0xf0, 0xf3, 0xf2,
+ 0xf5, 0xf4, 0xf7, 0xf6,
+ 0xf9, 0xf8, 0xfb, 0xfa,
+ 0xfd, 0xfc, 0xff, 0xfe };
+VECT_VAR_DECL(expected_vrev16,uint,8,16) [] = { 0xf1, 0xf0, 0xf3, 0xf2,
+ 0xf5, 0xf4, 0xf7, 0xf6,
+ 0xf9, 0xf8, 0xfb, 0xfa,
+ 0xfd, 0xfc, 0xff, 0xfe };
+VECT_VAR_DECL(expected_vrev16,poly,8,16) [] = { 0xf1, 0xf0, 0xf3, 0xf2,
+ 0xf5, 0xf4, 0xf7, 0xf6,
+ 0xf9, 0xf8, 0xfb, 0xfa,
+ 0xfd, 0xfc, 0xff, 0xfe };
+
+/* Expected results for vrev32. */
+VECT_VAR_DECL(expected_vrev32,int,8,8) [] = { 0xf3, 0xf2, 0xf1, 0xf0,
+ 0xf7, 0xf6, 0xf5, 0xf4 };
+VECT_VAR_DECL(expected_vrev32,int,16,4) [] = { 0xfff1, 0xfff0, 0xfff3, 0xfff2 };
+VECT_VAR_DECL(expected_vrev32,uint,8,8) [] = { 0xf3, 0xf2, 0xf1, 0xf0,
+ 0xf7, 0xf6, 0xf5, 0xf4 };
+VECT_VAR_DECL(expected_vrev32,uint,16,4) [] = { 0xfff1, 0xfff0, 0xfff3, 0xfff2 };
+VECT_VAR_DECL(expected_vrev32,poly,8,8) [] = { 0xf3, 0xf2, 0xf1, 0xf0,
+ 0xf7, 0xf6, 0xf5, 0xf4 };
+VECT_VAR_DECL(expected_vrev32,poly,16,4) [] = { 0xfff1, 0xfff0, 0xfff3, 0xfff2 };
+VECT_VAR_DECL(expected_vrev32,int,8,16) [] = { 0xf3, 0xf2, 0xf1, 0xf0,
+ 0xf7, 0xf6, 0xf5, 0xf4,
+ 0xfb, 0xfa, 0xf9, 0xf8,
+ 0xff, 0xfe, 0xfd, 0xfc };
+VECT_VAR_DECL(expected_vrev32,int,16,8) [] = { 0xfff1, 0xfff0, 0xfff3, 0xfff2,
+ 0xfff5, 0xfff4, 0xfff7, 0xfff6 };
+VECT_VAR_DECL(expected_vrev32,uint,8,16) [] = { 0xf3, 0xf2, 0xf1, 0xf0,
+ 0xf7, 0xf6, 0xf5, 0xf4,
+ 0xfb, 0xfa, 0xf9, 0xf8,
+ 0xff, 0xfe, 0xfd, 0xfc };
+VECT_VAR_DECL(expected_vrev32,uint,16,8) [] = { 0xfff1, 0xfff0, 0xfff3, 0xfff2,
+ 0xfff5, 0xfff4, 0xfff7, 0xfff6 };
+VECT_VAR_DECL(expected_vrev32,poly,8,16) [] = { 0xf3, 0xf2, 0xf1, 0xf0,
+ 0xf7, 0xf6, 0xf5, 0xf4,
+ 0xfb, 0xfa, 0xf9, 0xf8,
+ 0xff, 0xfe, 0xfd, 0xfc };
+VECT_VAR_DECL(expected_vrev32,poly,16,8) [] = { 0xfff1, 0xfff0, 0xfff3, 0xfff2,
+ 0xfff5, 0xfff4, 0xfff7, 0xfff6 };
+
+/* Expected results for vrev64. */
+VECT_VAR_DECL(expected_vrev64,int,8,8) [] = { 0xf7, 0xf6, 0xf5, 0xf4,
+ 0xf3, 0xf2, 0xf1, 0xf0 };
+VECT_VAR_DECL(expected_vrev64,int,16,4) [] = { 0xfff3, 0xfff2, 0xfff1, 0xfff0 };
+VECT_VAR_DECL(expected_vrev64,int,32,2) [] = { 0xfffffff1, 0xfffffff0 };
+VECT_VAR_DECL(expected_vrev64,uint,8,8) [] = { 0xf7, 0xf6, 0xf5, 0xf4, 0xf3,
+ 0xf2, 0xf1, 0xf0 };
+VECT_VAR_DECL(expected_vrev64,uint,16,4) [] = { 0xfff3, 0xfff2, 0xfff1, 0xfff0 };
+VECT_VAR_DECL(expected_vrev64,uint,32,2) [] = { 0xfffffff1, 0xfffffff0 };
+VECT_VAR_DECL(expected_vrev64,poly,8,8) [] = { 0xf7, 0xf6, 0xf5, 0xf4,
+ 0xf3, 0xf2, 0xf1, 0xf0 };
+VECT_VAR_DECL(expected_vrev64,poly,16,4) [] = { 0xfff3, 0xfff2, 0xfff1, 0xfff0 };
+VECT_VAR_DECL(expected_vrev64,hfloat,32,2) [] = { 0xc1700000, 0xc1800000 };
+VECT_VAR_DECL(expected_vrev64,int,8,16) [] = { 0xf7, 0xf6, 0xf5, 0xf4,
+ 0xf3, 0xf2, 0xf1, 0xf0,
+ 0xff, 0xfe, 0xfd, 0xfc,
+ 0xfb, 0xfa, 0xf9, 0xf8 };
+VECT_VAR_DECL(expected_vrev64,int,16,8) [] = { 0xfff3, 0xfff2, 0xfff1, 0xfff0,
+ 0xfff7, 0xfff6, 0xfff5, 0xfff4 };
+VECT_VAR_DECL(expected_vrev64,int,32,4) [] = { 0xfffffff1, 0xfffffff0,
+ 0xfffffff3, 0xfffffff2 };
+VECT_VAR_DECL(expected_vrev64,uint,8,16) [] = { 0xf7, 0xf6, 0xf5, 0xf4,
+ 0xf3, 0xf2, 0xf1, 0xf0,
+ 0xff, 0xfe, 0xfd, 0xfc,
+ 0xfb, 0xfa, 0xf9, 0xf8 };
+VECT_VAR_DECL(expected_vrev64,uint,16,8) [] = { 0xfff3, 0xfff2, 0xfff1, 0xfff0,
+ 0xfff7, 0xfff6, 0xfff5, 0xfff4 };
+VECT_VAR_DECL(expected_vrev64,uint,32,4) [] = { 0xfffffff1, 0xfffffff0,
+ 0xfffffff3, 0xfffffff2 };
+VECT_VAR_DECL(expected_vrev64,poly,8,16) [] = { 0xf7, 0xf6, 0xf5, 0xf4,
+ 0xf3, 0xf2, 0xf1, 0xf0,
+ 0xff, 0xfe, 0xfd, 0xfc,
+ 0xfb, 0xfa, 0xf9, 0xf8 };
+VECT_VAR_DECL(expected_vrev64,poly,16,8) [] = { 0xfff3, 0xfff2, 0xfff1, 0xfff0,
+ 0xfff7, 0xfff6, 0xfff5, 0xfff4 };
+VECT_VAR_DECL(expected_vrev64,hfloat,32,4) [] = { 0xc1700000, 0xc1800000,
+ 0xc1500000, 0xc1600000 };
+
+void exec_vrev (void)
+{
+ /* Basic test: y=vrev(x), then store the result. */
+#define TEST_VREV(Q, T1, T2, W, N, W2) \
+ VECT_VAR(vector_res, T1, W, N) = \
+ vrev##W2##Q##_##T2##W(VECT_VAR(vector, T1, W, N)); \
+ vst1##Q##_##T2##W(VECT_VAR(result, T1, W, N), VECT_VAR(vector_res, T1, W, N))
+
+ DECL_VARIABLE_ALL_VARIANTS(vector);
+ DECL_VARIABLE_ALL_VARIANTS(vector_res);
+
+ clean_results ();
+
+ /* Initialize input "vector" from "buffer". */
+ TEST_MACRO_ALL_VARIANTS_2_5(VLOAD, vector, buffer);
+ VLOAD(vector, buffer, , float, f, 32, 2);
+ VLOAD(vector, buffer, q, float, f, 32, 4);
+
+ /* Check vrev in each of the existing combinations. */
+#define TEST_MSG "VREV16"
+ TEST_VREV(, int, s, 8, 8, 16);
+ TEST_VREV(, uint, u, 8, 8, 16);
+ TEST_VREV(, poly, p, 8, 8, 16);
+ TEST_VREV(q, int, s, 8, 16, 16);
+ TEST_VREV(q, uint, u, 8, 16, 16);
+ TEST_VREV(q, poly, p, 8, 16, 16);
+
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_vrev16, "");
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_vrev16, "");
+ CHECK(TEST_MSG, poly, 8, 8, PRIx8, expected_vrev16, "");
+ CHECK(TEST_MSG, int, 8, 16, PRIx8, expected_vrev16, "");
+ CHECK(TEST_MSG, uint, 8, 16, PRIx8, expected_vrev16, "");
+ CHECK(TEST_MSG, poly, 8, 16, PRIx8, expected_vrev16, "");
+
+#undef TEST_MSG
+#define TEST_MSG "VREV32"
+ TEST_VREV(, int, s, 8, 8, 32);
+ TEST_VREV(, int, s, 16, 4, 32);
+ TEST_VREV(, uint, u, 8, 8, 32);
+ TEST_VREV(, uint, u, 16, 4, 32);
+ TEST_VREV(, poly, p, 8, 8, 32);
+ TEST_VREV(, poly, p, 16, 4, 32);
+ TEST_VREV(q, int, s, 8, 16, 32);
+ TEST_VREV(q, int, s, 16, 8, 32);
+ TEST_VREV(q, uint, u, 8, 16, 32);
+ TEST_VREV(q, uint, u, 16, 8, 32);
+ TEST_VREV(q, poly, p, 8, 16, 32);
+ TEST_VREV(q, poly, p, 16, 8, 32);
+
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_vrev32, "");
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_vrev32, "");
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_vrev32, "");
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_vrev32, "");
+ CHECK(TEST_MSG, poly, 8, 8, PRIx8, expected_vrev32, "");
+ CHECK(TEST_MSG, poly, 16, 4, PRIx16, expected_vrev32, "");
+ CHECK(TEST_MSG, int, 8, 16, PRIx8, expected_vrev32, "");
+ CHECK(TEST_MSG, int, 16, 8, PRIx16, expected_vrev32, "");
+ CHECK(TEST_MSG, uint, 8, 16, PRIx8, expected_vrev32, "");
+ CHECK(TEST_MSG, uint, 16, 8, PRIx16, expected_vrev32, "");
+ CHECK(TEST_MSG, poly, 8, 16, PRIx8, expected_vrev32, "");
+ CHECK(TEST_MSG, poly, 16, 8, PRIx16, expected_vrev32, "");
+
+#undef TEST_MSG
+#define TEST_MSG "VREV64"
+ TEST_VREV(, int, s, 8, 8, 64);
+ TEST_VREV(, int, s, 16, 4, 64);
+ TEST_VREV(, int, s, 32, 2, 64);
+ TEST_VREV(, uint, u, 8, 8, 64);
+ TEST_VREV(, uint, u, 16, 4, 64);
+ TEST_VREV(, uint, u, 32, 2, 64);
+ TEST_VREV(, poly, p, 8, 8, 64);
+ TEST_VREV(, poly, p, 16, 4, 64);
+ TEST_VREV(q, int, s, 8, 16, 64);
+ TEST_VREV(q, int, s, 16, 8, 64);
+ TEST_VREV(q, int, s, 32, 4, 64);
+ TEST_VREV(q, uint, u, 8, 16, 64);
+ TEST_VREV(q, uint, u, 16, 8, 64);
+ TEST_VREV(q, uint, u, 32, 4, 64);
+ TEST_VREV(q, poly, p, 8, 16, 64);
+ TEST_VREV(q, poly, p, 16, 8, 64);
+
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_vrev64, "");
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_vrev64, "");
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected_vrev64, "");
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_vrev64, "");
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_vrev64, "");
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_vrev64, "");
+ CHECK(TEST_MSG, poly, 8, 8, PRIx8, expected_vrev64, "");
+ CHECK(TEST_MSG, poly, 16, 4, PRIx16, expected_vrev64, "");
+ CHECK(TEST_MSG, int, 8, 16, PRIx8, expected_vrev64, "");
+ CHECK(TEST_MSG, int, 16, 8, PRIx16, expected_vrev64, "");
+ CHECK(TEST_MSG, int, 32, 4, PRIx32, expected_vrev64, "");
+ CHECK(TEST_MSG, uint, 8, 16, PRIx8, expected_vrev64, "");
+ CHECK(TEST_MSG, uint, 16, 8, PRIx16, expected_vrev64, "");
+ CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected_vrev64, "");
+ CHECK(TEST_MSG, poly, 8, 16, PRIx8, expected_vrev64, "");
+ CHECK(TEST_MSG, poly, 16, 8, PRIx16, expected_vrev64, "");
+
+ TEST_VREV(, float, f, 32, 2, 64);
+ TEST_VREV(q, float, f, 32, 4, 64);
+ CHECK_FP(TEST_MSG, float, 32, 2, PRIx32, expected_vrev64, "");
+ CHECK_FP(TEST_MSG, float, 32, 4, PRIx32, expected_vrev64, "");
+}
+
+int main (void)
+{
+ exec_vrev ();
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vrshl.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vrshl.c
new file mode 100644
index 00000000000..d970fbda3f7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vrshl.c
@@ -0,0 +1,627 @@
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+/* Expected results with input=0. */
+VECT_VAR_DECL(expected_0,int,8,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_0,int,16,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_0,int,32,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_0,int,64,1) [] = { 0x0 };
+VECT_VAR_DECL(expected_0,uint,8,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_0,uint,16,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_0,uint,32,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_0,uint,64,1) [] = { 0x0 };
+VECT_VAR_DECL(expected_0,int,8,16) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_0,int,16,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_0,int,32,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_0,int,64,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_0,uint,8,16) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_0,uint,16,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_0,uint,32,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_0,uint,64,2) [] = { 0x0, 0x0 };
+
+/* Expected results with input=0 and negative shift amount. */
+VECT_VAR_DECL(expected_0_sh_neg,int,8,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_0_sh_neg,int,16,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_0_sh_neg,int,32,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_0_sh_neg,int,64,1) [] = { 0x0 };
+VECT_VAR_DECL(expected_0_sh_neg,uint,8,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_0_sh_neg,uint,16,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_0_sh_neg,uint,32,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_0_sh_neg,uint,64,1) [] = { 0x0 };
+VECT_VAR_DECL(expected_0_sh_neg,int,8,16) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_0_sh_neg,int,16,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_0_sh_neg,int,32,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_0_sh_neg,int,64,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_0_sh_neg,uint,8,16) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_0_sh_neg,uint,16,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_0_sh_neg,uint,32,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_0_sh_neg,uint,64,2) [] = { 0x0, 0x0 };
+
+/* Expected results. */
+VECT_VAR_DECL(expected,int,8,8) [] = { 0xe0, 0xe2, 0xe4, 0xe6,
+ 0xe8, 0xea, 0xec, 0xee };
+VECT_VAR_DECL(expected,int,16,4) [] = { 0xff80, 0xff88, 0xff90, 0xff98 };
+VECT_VAR_DECL(expected,int,32,2) [] = { 0xfffff000, 0xfffff100 };
+VECT_VAR_DECL(expected,int,64,1) [] = { 0xfffffffffffffffe };
+VECT_VAR_DECL(expected,uint,8,8) [] = { 0xe0, 0xe2, 0xe4, 0xe6,
+ 0xe8, 0xea, 0xec, 0xee };
+VECT_VAR_DECL(expected,uint,16,4) [] = { 0xff80, 0xff88, 0xff90, 0xff98 };
+VECT_VAR_DECL(expected,uint,32,2) [] = { 0xfffff000, 0xfffff100 };
+VECT_VAR_DECL(expected,uint,64,1) [] = { 0x1ffffffffffffffe };
+VECT_VAR_DECL(expected,int,8,16) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected,int,16,8) [] = { 0x0, 0x1000, 0x2000, 0x3000,
+ 0x4000, 0x5000, 0x6000, 0x7000 };
+VECT_VAR_DECL(expected,int,32,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected,int,64,2) [] = { 0x0, 0x8000000000000000 };
+VECT_VAR_DECL(expected,uint,8,16) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected,uint,16,8) [] = { 0x0, 0x1000, 0x2000, 0x3000,
+ 0x4000, 0x5000, 0x6000, 0x7000 };
+VECT_VAR_DECL(expected,uint,32,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected,uint,64,2) [] = { 0x0, 0x8000000000000000 };
+
+/* Expected results with negative shift amount. */
+VECT_VAR_DECL(expected_sh_neg,int,8,8) [] = { 0xf8, 0xf9, 0xf9, 0xfa,
+ 0xfa, 0xfb, 0xfb, 0xfc };
+VECT_VAR_DECL(expected_sh_neg,int,16,4) [] = { 0xfffc, 0xfffc, 0xfffd, 0xfffd };
+VECT_VAR_DECL(expected_sh_neg,int,32,2) [] = { 0xfffffffe, 0xfffffffe };
+VECT_VAR_DECL(expected_sh_neg,int,64,1) [] = { 0xffffffffffffffff };
+VECT_VAR_DECL(expected_sh_neg,uint,8,8) [] = { 0x78, 0x79, 0x79, 0x7a,
+ 0x7a, 0x7b, 0x7b, 0x7c };
+VECT_VAR_DECL(expected_sh_neg,uint,16,4) [] = { 0x3ffc, 0x3ffc, 0x3ffd, 0x3ffd };
+VECT_VAR_DECL(expected_sh_neg,uint,32,2) [] = { 0x1ffffffe, 0x1ffffffe };
+VECT_VAR_DECL(expected_sh_neg,uint,64,1) [] = { 0xfffffffffffffff };
+VECT_VAR_DECL(expected_sh_neg,int,8,16) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_sh_neg,int,16,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_sh_neg,int,32,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_sh_neg,int,64,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_sh_neg,uint,8,16) [] = { 0x2, 0x2, 0x2, 0x2,
+ 0x2, 0x2, 0x2, 0x2,
+ 0x2, 0x2, 0x2, 0x2,
+ 0x2, 0x2, 0x2, 0x2 };
+VECT_VAR_DECL(expected_sh_neg,uint,16,8) [] = { 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20 };
+VECT_VAR_DECL(expected_sh_neg,uint,32,4) [] = { 0x80000, 0x80000,
+ 0x80000, 0x80000 };
+VECT_VAR_DECL(expected_sh_neg,uint,64,2) [] = { 0x100000000000, 0x100000000000 };
+
+/* Expected results with max input value shifted by -1 to test
+ round_const. */
+VECT_VAR_DECL(expected_max_sh_minus1,int,8,8) [] = { 0x40, 0x40, 0x40, 0x40,
+ 0x40, 0x40, 0x40, 0x40 };
+VECT_VAR_DECL(expected_max_sh_minus1,int,16,4) [] = { 0x4000, 0x4000,
+ 0x4000, 0x4000 };
+VECT_VAR_DECL(expected_max_sh_minus1,int,32,2) [] = { 0x40000000, 0x40000000 };
+VECT_VAR_DECL(expected_max_sh_minus1,int,64,1) [] = { 0x4000000000000000 };
+VECT_VAR_DECL(expected_max_sh_minus1,uint,8,8) [] = { 0x80, 0x80, 0x80, 0x80,
+ 0x80, 0x80, 0x80, 0x80 };
+VECT_VAR_DECL(expected_max_sh_minus1,uint,16,4) [] = { 0x8000, 0x8000,
+ 0x8000, 0x8000 };
+VECT_VAR_DECL(expected_max_sh_minus1,uint,32,2) [] = { 0x80000000, 0x80000000 };
+VECT_VAR_DECL(expected_max_sh_minus1,uint,64,1) [] = { 0x8000000000000000 };
+VECT_VAR_DECL(expected_max_sh_minus1,int,8,16) [] = { 0x40, 0x40, 0x40, 0x40,
+ 0x40, 0x40, 0x40, 0x40,
+ 0x40, 0x40, 0x40, 0x40,
+ 0x40, 0x40, 0x40, 0x40 };
+VECT_VAR_DECL(expected_max_sh_minus1,int,16,8) [] = { 0x4000, 0x4000,
+ 0x4000, 0x4000,
+ 0x4000, 0x4000,
+ 0x4000, 0x4000 };
+VECT_VAR_DECL(expected_max_sh_minus1,int,32,4) [] = { 0x40000000, 0x40000000,
+ 0x40000000, 0x40000000 };
+VECT_VAR_DECL(expected_max_sh_minus1,int,64,2) [] = { 0x4000000000000000,
+ 0x4000000000000000 };
+VECT_VAR_DECL(expected_max_sh_minus1,uint,8,16) [] = { 0x80, 0x80, 0x80, 0x80,
+ 0x80, 0x80, 0x80, 0x80,
+ 0x80, 0x80, 0x80, 0x80,
+ 0x80, 0x80, 0x80, 0x80 };
+VECT_VAR_DECL(expected_max_sh_minus1,uint,16,8) [] = { 0x8000, 0x8000,
+ 0x8000, 0x8000,
+ 0x8000, 0x8000,
+ 0x8000, 0x8000 };
+VECT_VAR_DECL(expected_max_sh_minus1,uint,32,4) [] = { 0x80000000, 0x80000000,
+ 0x80000000, 0x80000000 };
+VECT_VAR_DECL(expected_max_sh_minus1,uint,64,2) [] = { 0x8000000000000000,
+ 0x8000000000000000 };
+
+/* Expected results with max input value shifted by -3 to test
+ round_const. */
+VECT_VAR_DECL(expected_max_sh_minus3,int,8,8) [] = { 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x10, 0x10, 0x10 };
+VECT_VAR_DECL(expected_max_sh_minus3,int,16,4) [] = { 0x1000, 0x1000,
+ 0x1000, 0x1000 };
+VECT_VAR_DECL(expected_max_sh_minus3,int,32,2) [] = { 0x10000000, 0x10000000 };
+VECT_VAR_DECL(expected_max_sh_minus3,int,64,1) [] = { 0x1000000000000000 };
+VECT_VAR_DECL(expected_max_sh_minus3,uint,8,8) [] = { 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20 };
+VECT_VAR_DECL(expected_max_sh_minus3,uint,16,4) [] = { 0x2000, 0x2000,
+ 0x2000, 0x2000 };
+VECT_VAR_DECL(expected_max_sh_minus3,uint,32,2) [] = { 0x20000000, 0x20000000 };
+VECT_VAR_DECL(expected_max_sh_minus3,uint,64,1) [] = { 0x2000000000000000 };
+VECT_VAR_DECL(expected_max_sh_minus3,int,8,16) [] = { 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x10, 0x10, 0x10 };
+VECT_VAR_DECL(expected_max_sh_minus3,int,16,8) [] = { 0x1000, 0x1000,
+ 0x1000, 0x1000,
+ 0x1000, 0x1000,
+ 0x1000, 0x1000 };
+VECT_VAR_DECL(expected_max_sh_minus3,int,32,4) [] = { 0x10000000, 0x10000000,
+ 0x10000000, 0x10000000 };
+VECT_VAR_DECL(expected_max_sh_minus3,int,64,2) [] = { 0x1000000000000000,
+ 0x1000000000000000 };
+VECT_VAR_DECL(expected_max_sh_minus3,uint,8,16) [] = { 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20 };
+VECT_VAR_DECL(expected_max_sh_minus3,uint,16,8) [] = { 0x2000, 0x2000,
+ 0x2000, 0x2000,
+ 0x2000, 0x2000,
+ 0x2000, 0x2000 };
+VECT_VAR_DECL(expected_max_sh_minus3,uint,32,4) [] = { 0x20000000, 0x20000000,
+ 0x20000000, 0x20000000 };
+VECT_VAR_DECL(expected_max_sh_minus3,uint,64,2) [] = { 0x2000000000000000,
+ 0x2000000000000000 };
+
+/* Expected results with negative shift by vector width. */
+VECT_VAR_DECL(expected_max_sh_minus_width,int,8,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_max_sh_minus_width,int,16,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_max_sh_minus_width,int,32,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_max_sh_minus_width,int,64,1) [] = { 0x0 };
+VECT_VAR_DECL(expected_max_sh_minus_width,uint,8,8) [] = { 0x1, 0x1, 0x1, 0x1,
+ 0x1, 0x1, 0x1, 0x1 };
+VECT_VAR_DECL(expected_max_sh_minus_width,uint,16,4) [] = { 0x1, 0x1, 0x1, 0x1 };
+VECT_VAR_DECL(expected_max_sh_minus_width,uint,32,2) [] = { 0x1, 0x1 };
+VECT_VAR_DECL(expected_max_sh_minus_width,uint,64,1) [] = { 0x1 };
+VECT_VAR_DECL(expected_max_sh_minus_width,int,8,16) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_max_sh_minus_width,int,16,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_max_sh_minus_width,int,32,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_max_sh_minus_width,int,64,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_max_sh_minus_width,uint,8,16) [] = { 0x1, 0x1, 0x1, 0x1,
+ 0x1, 0x1, 0x1, 0x1,
+ 0x1, 0x1, 0x1, 0x1,
+ 0x1, 0x1, 0x1, 0x1 };
+VECT_VAR_DECL(expected_max_sh_minus_width,uint,16,8) [] = { 0x1, 0x1, 0x1, 0x1,
+ 0x1, 0x1, 0x1, 0x1 };
+VECT_VAR_DECL(expected_max_sh_minus_width,uint,32,4) [] = { 0x1, 0x1, 0x1, 0x1 };
+VECT_VAR_DECL(expected_max_sh_minus_width,uint,64,2) [] = { 0x1, 0x1 };
+
+/* Expected results with large shift amount. */
+VECT_VAR_DECL(expected_max_sh_large,int,8,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_max_sh_large,int,16,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_max_sh_large,int,32,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_max_sh_large,int,64,1) [] = { 0x0 };
+VECT_VAR_DECL(expected_max_sh_large,uint,8,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_max_sh_large,uint,16,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_max_sh_large,uint,32,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_max_sh_large,uint,64,1) [] = { 0x0 };
+VECT_VAR_DECL(expected_max_sh_large,int,8,16) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_max_sh_large,int,16,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_max_sh_large,int,32,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_max_sh_large,int,64,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_max_sh_large,uint,8,16) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_max_sh_large,uint,16,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_max_sh_large,uint,32,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_max_sh_large,uint,64,2) [] = { 0x0, 0x0 };
+
+/* Expected results with large negative shift amount. */
+VECT_VAR_DECL(expected_max_sh_large_neg,int,8,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_max_sh_large_neg,int,16,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_max_sh_large_neg,int,32,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_max_sh_large_neg,int,64,1) [] = { 0x0 };
+VECT_VAR_DECL(expected_max_sh_large_neg,uint,8,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_max_sh_large_neg,uint,16,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_max_sh_large_neg,uint,32,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_max_sh_large_neg,uint,64,1) [] = { 0x0 };
+VECT_VAR_DECL(expected_max_sh_large_neg,int,8,16) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_max_sh_large_neg,int,16,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_max_sh_large_neg,int,32,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_max_sh_large_neg,int,64,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_max_sh_large_neg,uint,8,16) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_max_sh_large_neg,uint,16,8) [] = { 0x1, 0x1, 0x1, 0x1,
+ 0x1, 0x1, 0x1, 0x1 };
+VECT_VAR_DECL(expected_max_sh_large_neg,uint,32,4) [] = { 0x1, 0x1, 0x1, 0x1 };
+VECT_VAR_DECL(expected_max_sh_large_neg,uint,64,2) [] = { 0x1, 0x1 };
+
+#define TEST_MSG "VRSHL/VRSHLQ"
+void exec_vrshl (void)
+{
+ /* Basic test: v3=vrshl(v1,v2), then store the result. */
+#define TEST_VRSHL(T3, Q, T1, T2, W, N) \
+ VECT_VAR(vector_res, T1, W, N) = \
+ vrshl##Q##_##T2##W(VECT_VAR(vector, T1, W, N), \
+ VECT_VAR(vector_shift, T3, W, N)); \
+ vst1##Q##_##T2##W(VECT_VAR(result, T1, W, N), VECT_VAR(vector_res, T1, W, N))
+
+ DECL_VARIABLE_ALL_VARIANTS(vector);
+ DECL_VARIABLE_ALL_VARIANTS(vector_res);
+
+ DECL_VARIABLE_SIGNED_VARIANTS(vector_shift);
+
+ clean_results ();
+
+ /* Fill input vector with 0, to check behavior on limits. */
+ VDUP(vector, , int, s, 8, 8, 0);
+ VDUP(vector, , int, s, 16, 4, 0);
+ VDUP(vector, , int, s, 32, 2, 0);
+ VDUP(vector, , int, s, 64, 1, 0);
+ VDUP(vector, , uint, u, 8, 8, 0);
+ VDUP(vector, , uint, u, 16, 4, 0);
+ VDUP(vector, , uint, u, 32, 2, 0);
+ VDUP(vector, , uint, u, 64, 1, 0);
+ VDUP(vector, q, int, s, 8, 16, 0);
+ VDUP(vector, q, int, s, 16, 8, 0);
+ VDUP(vector, q, int, s, 32, 4, 0);
+ VDUP(vector, q, int, s, 64, 2, 0);
+ VDUP(vector, q, uint, u, 8, 16, 0);
+ VDUP(vector, q, uint, u, 16, 8, 0);
+ VDUP(vector, q, uint, u, 32, 4, 0);
+ VDUP(vector, q, uint, u, 64, 2, 0);
+
+ /* Choose init value arbitrarily, will be used as shift amount. */
+ /* Use values equal to one-less-than the type width to check
+ behaviour on limits. */
+ VDUP(vector_shift, , int, s, 8, 8, 7);
+ VDUP(vector_shift, , int, s, 16, 4, 15);
+ VDUP(vector_shift, , int, s, 32, 2, 31);
+ VDUP(vector_shift, , int, s, 64, 1, 63);
+ VDUP(vector_shift, q, int, s, 8, 16, 7);
+ VDUP(vector_shift, q, int, s, 16, 8, 15);
+ VDUP(vector_shift, q, int, s, 32, 4, 31);
+ VDUP(vector_shift, q, int, s, 64, 2, 63);
+
+ TEST_MACRO_ALL_VARIANTS_1_5(TEST_VRSHL, int);
+
+#define CMT " (with input = 0)"
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_0, CMT);
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_0, CMT);
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected_0, CMT);
+ CHECK(TEST_MSG, int, 64, 1, PRIx64, expected_0, CMT);
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_0, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_0, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_0, CMT);
+ CHECK(TEST_MSG, uint, 64, 1, PRIx64, expected_0, CMT);
+ CHECK(TEST_MSG, int, 8, 16, PRIx8, expected_0, CMT);
+ CHECK(TEST_MSG, int, 16, 8, PRIx16, expected_0, CMT);
+ CHECK(TEST_MSG, int, 32, 4, PRIx32, expected_0, CMT);
+ CHECK(TEST_MSG, int, 64, 2, PRIx64, expected_0, CMT);
+ CHECK(TEST_MSG, uint, 8, 16, PRIx8, expected_0, CMT);
+ CHECK(TEST_MSG, uint, 16, 8, PRIx16, expected_0, CMT);
+ CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected_0, CMT);
+ CHECK(TEST_MSG, uint, 64, 2, PRIx64, expected_0, CMT);
+
+
+ /* Use negative shift amounts. */
+ VDUP(vector_shift, , int, s, 8, 8, -1);
+ VDUP(vector_shift, , int, s, 16, 4, -2);
+ VDUP(vector_shift, , int, s, 32, 2, -3);
+ VDUP(vector_shift, , int, s, 64, 1, -4);
+ VDUP(vector_shift, q, int, s, 8, 16, -7);
+ VDUP(vector_shift, q, int, s, 16, 8, -11);
+ VDUP(vector_shift, q, int, s, 32, 4, -13);
+ VDUP(vector_shift, q, int, s, 64, 2, -20);
+
+ TEST_MACRO_ALL_VARIANTS_1_5(TEST_VRSHL, int);
+
+#undef CMT
+#define CMT " (input 0 and negative shift amount)"
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_0_sh_neg, CMT);
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_0_sh_neg, CMT);
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected_0_sh_neg, CMT);
+ CHECK(TEST_MSG, int, 64, 1, PRIx64, expected_0_sh_neg, CMT);
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_0_sh_neg, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_0_sh_neg, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_0_sh_neg, CMT);
+ CHECK(TEST_MSG, uint, 64, 1, PRIx64, expected_0_sh_neg, CMT);
+ CHECK(TEST_MSG, int, 8, 16, PRIx8, expected_0_sh_neg, CMT);
+ CHECK(TEST_MSG, int, 16, 8, PRIx16, expected_0_sh_neg, CMT);
+ CHECK(TEST_MSG, int, 32, 4, PRIx32, expected_0_sh_neg, CMT);
+ CHECK(TEST_MSG, int, 64, 2, PRIx64, expected_0_sh_neg, CMT);
+ CHECK(TEST_MSG, uint, 8, 16, PRIx8, expected_0_sh_neg, CMT);
+ CHECK(TEST_MSG, uint, 16, 8, PRIx16, expected_0_sh_neg, CMT);
+ CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected_0_sh_neg, CMT);
+ CHECK(TEST_MSG, uint, 64, 2, PRIx64, expected_0_sh_neg, CMT);
+
+
+ /* Test again, with predefined input values. */
+ TEST_MACRO_ALL_VARIANTS_2_5(VLOAD, vector, buffer);
+
+ /* Choose init value arbitrarily, will be used as shift amount. */
+ VDUP(vector_shift, , int, s, 8, 8, 1);
+ VDUP(vector_shift, , int, s, 16, 4, 3);
+ VDUP(vector_shift, , int, s, 32, 2, 8);
+ VDUP(vector_shift, , int, s, 64, 1, -3);
+ VDUP(vector_shift, q, int, s, 8, 16, 10);
+ VDUP(vector_shift, q, int, s, 16, 8, 12);
+ VDUP(vector_shift, q, int, s, 32, 4, 32);
+ VDUP(vector_shift, q, int, s, 64, 2, 63);
+
+ TEST_MACRO_ALL_VARIANTS_1_5(TEST_VRSHL, int);
+
+#undef CMT
+#define CMT ""
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected, CMT);
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected, CMT);
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected, CMT);
+ CHECK(TEST_MSG, int, 64, 1, PRIx64, expected, CMT);
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected, CMT);
+ CHECK(TEST_MSG, uint, 64, 1, PRIx64, expected, CMT);
+ CHECK(TEST_MSG, int, 8, 16, PRIx8, expected, CMT);
+ CHECK(TEST_MSG, int, 16, 8, PRIx16, expected, CMT);
+ CHECK(TEST_MSG, int, 32, 4, PRIx32, expected, CMT);
+ CHECK(TEST_MSG, int, 64, 2, PRIx64, expected, CMT);
+ CHECK(TEST_MSG, uint, 8, 16, PRIx8, expected, CMT);
+ CHECK(TEST_MSG, uint, 16, 8, PRIx16, expected, CMT);
+ CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected, CMT);
+ CHECK(TEST_MSG, uint, 64, 2, PRIx64, expected, CMT);
+
+
+ /* Use negative shift amounts. */
+ VDUP(vector_shift, , int, s, 8, 8, -1);
+ VDUP(vector_shift, , int, s, 16, 4, -2);
+ VDUP(vector_shift, , int, s, 32, 2, -3);
+ VDUP(vector_shift, , int, s, 64, 1, -4);
+ VDUP(vector_shift, q, int, s, 8, 16, -7);
+ VDUP(vector_shift, q, int, s, 16, 8, -11);
+ VDUP(vector_shift, q, int, s, 32, 4, -13);
+ VDUP(vector_shift, q, int, s, 64, 2, -20);
+
+ TEST_MACRO_ALL_VARIANTS_1_5(TEST_VRSHL, int);
+
+#undef CMT
+#define CMT " (negative shift amount)"
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_sh_neg, CMT);
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_sh_neg, CMT);
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected_sh_neg, CMT);
+ CHECK(TEST_MSG, int, 64, 1, PRIx64, expected_sh_neg, CMT);
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_sh_neg, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_sh_neg, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_sh_neg, CMT);
+ CHECK(TEST_MSG, uint, 64, 1, PRIx64, expected_sh_neg, CMT);
+ CHECK(TEST_MSG, int, 8, 16, PRIx8, expected_sh_neg, CMT);
+ CHECK(TEST_MSG, int, 16, 8, PRIx16, expected_sh_neg, CMT);
+ CHECK(TEST_MSG, int, 32, 4, PRIx32, expected_sh_neg, CMT);
+ CHECK(TEST_MSG, int, 64, 2, PRIx64, expected_sh_neg, CMT);
+ CHECK(TEST_MSG, uint, 8, 16, PRIx8, expected_sh_neg, CMT);
+ CHECK(TEST_MSG, uint, 16, 8, PRIx16, expected_sh_neg, CMT);
+ CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected_sh_neg, CMT);
+ CHECK(TEST_MSG, uint, 64, 2, PRIx64, expected_sh_neg, CMT);
+
+ /* Fill input vector with max value, to check behavior on limits. */
+ VDUP(vector, , int, s, 8, 8, 0x7F);
+ VDUP(vector, , int, s, 16, 4, 0x7FFF);
+ VDUP(vector, , int, s, 32, 2, 0x7FFFFFFF);
+ VDUP(vector, , int, s, 64, 1, 0x7FFFFFFFFFFFFFFFLL);
+ VDUP(vector, , uint, u, 8, 8, 0xFF);
+ VDUP(vector, , uint, u, 16, 4, 0xFFFF);
+ VDUP(vector, , uint, u, 32, 2, 0xFFFFFFFF);
+ VDUP(vector, , uint, u, 64, 1, 0xFFFFFFFFFFFFFFFFULL);
+ VDUP(vector, q, int, s, 8, 16, 0x7F);
+ VDUP(vector, q, int, s, 16, 8, 0x7FFF);
+ VDUP(vector, q, int, s, 32, 4, 0x7FFFFFFF);
+ VDUP(vector, q, int, s, 64, 2, 0x7FFFFFFFFFFFFFFFLL);
+ VDUP(vector, q, uint, u, 8, 16, 0xFF);
+ VDUP(vector, q, uint, u, 16, 8, 0xFFFF);
+ VDUP(vector, q, uint, u, 32, 4, 0xFFFFFFFF);
+ VDUP(vector, q, uint, u, 64, 2, 0xFFFFFFFFFFFFFFFFULL);
+
+ /* Use -1 shift amount to check overflow with round_const. */
+ VDUP(vector_shift, , int, s, 8, 8, -1);
+ VDUP(vector_shift, , int, s, 16, 4, -1);
+ VDUP(vector_shift, , int, s, 32, 2, -1);
+ VDUP(vector_shift, , int, s, 64, 1, -1);
+ VDUP(vector_shift, q, int, s, 8, 16, -1);
+ VDUP(vector_shift, q, int, s, 16, 8, -1);
+ VDUP(vector_shift, q, int, s, 32, 4, -1);
+ VDUP(vector_shift, q, int, s, 64, 2, -1);
+
+ TEST_MACRO_ALL_VARIANTS_1_5(TEST_VRSHL, int);
+
+#undef CMT
+#define CMT " (max input, shift by -1)"
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_max_sh_minus1, CMT);
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_max_sh_minus1, CMT);
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected_max_sh_minus1, CMT);
+ CHECK(TEST_MSG, int, 64, 1, PRIx64, expected_max_sh_minus1, CMT);
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_max_sh_minus1, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_max_sh_minus1, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_max_sh_minus1, CMT);
+ CHECK(TEST_MSG, uint, 64, 1, PRIx64, expected_max_sh_minus1, CMT);
+ CHECK(TEST_MSG, int, 8, 16, PRIx8, expected_max_sh_minus1, CMT);
+ CHECK(TEST_MSG, int, 16, 8, PRIx16, expected_max_sh_minus1, CMT);
+ CHECK(TEST_MSG, int, 32, 4, PRIx32, expected_max_sh_minus1, CMT);
+ CHECK(TEST_MSG, int, 64, 2, PRIx64, expected_max_sh_minus1, CMT);
+ CHECK(TEST_MSG, uint, 8, 16, PRIx8, expected_max_sh_minus1, CMT);
+ CHECK(TEST_MSG, uint, 16, 8, PRIx16, expected_max_sh_minus1, CMT);
+ CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected_max_sh_minus1, CMT);
+ CHECK(TEST_MSG, uint, 64, 2, PRIx64, expected_max_sh_minus1, CMT);
+
+ /* Use -3 shift amount to check overflow with round_const. */
+ VDUP(vector_shift, , int, s, 8, 8, -3);
+ VDUP(vector_shift, , int, s, 16, 4, -3);
+ VDUP(vector_shift, , int, s, 32, 2, -3);
+ VDUP(vector_shift, , int, s, 64, 1, -3);
+ VDUP(vector_shift, q, int, s, 8, 16, -3);
+ VDUP(vector_shift, q, int, s, 16, 8, -3);
+ VDUP(vector_shift, q, int, s, 32, 4, -3);
+ VDUP(vector_shift, q, int, s, 64, 2, -3);
+
+ TEST_MACRO_ALL_VARIANTS_1_5(TEST_VRSHL, int);
+
+#undef CMT
+#define CMT " (check rounding constant: max input, shift by -3)"
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_max_sh_minus3, CMT);
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_max_sh_minus3, CMT);
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected_max_sh_minus3, CMT);
+ CHECK(TEST_MSG, int, 64, 1, PRIx64, expected_max_sh_minus3, CMT);
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_max_sh_minus3, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_max_sh_minus3, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_max_sh_minus3, CMT);
+ CHECK(TEST_MSG, uint, 64, 1, PRIx64, expected_max_sh_minus3, CMT);
+ CHECK(TEST_MSG, int, 8, 16, PRIx8, expected_max_sh_minus3, CMT);
+ CHECK(TEST_MSG, int, 16, 8, PRIx16, expected_max_sh_minus3, CMT);
+ CHECK(TEST_MSG, int, 32, 4, PRIx32, expected_max_sh_minus3, CMT);
+ CHECK(TEST_MSG, int, 64, 2, PRIx64, expected_max_sh_minus3, CMT);
+ CHECK(TEST_MSG, uint, 8, 16, PRIx8, expected_max_sh_minus3, CMT);
+ CHECK(TEST_MSG, uint, 16, 8, PRIx16, expected_max_sh_minus3, CMT);
+ CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected_max_sh_minus3, CMT);
+ CHECK(TEST_MSG, uint, 64, 2, PRIx64, expected_max_sh_minus3, CMT);
+
+
+ /* Use negative shift amount as large as input vector width. */
+ VDUP(vector_shift, , int, s, 8, 8, -8);
+ VDUP(vector_shift, , int, s, 16, 4, -16);
+ VDUP(vector_shift, , int, s, 32, 2, -32);
+ VDUP(vector_shift, , int, s, 64, 1, -64);
+ VDUP(vector_shift, q, int, s, 8, 16, -8);
+ VDUP(vector_shift, q, int, s, 16, 8, -16);
+ VDUP(vector_shift, q, int, s, 32, 4, -32);
+ VDUP(vector_shift, q, int, s, 64, 2, -64);
+
+ TEST_MACRO_ALL_VARIANTS_1_5(TEST_VRSHL, int);
+
+#undef CMT
+#define CMT " (max input, right shift by vector width)"
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_max_sh_minus_width, CMT);
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_max_sh_minus_width, CMT);
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected_max_sh_minus_width, CMT);
+ CHECK(TEST_MSG, int, 64, 1, PRIx64, expected_max_sh_minus_width, CMT);
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_max_sh_minus_width, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_max_sh_minus_width, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_max_sh_minus_width, CMT);
+ CHECK(TEST_MSG, uint, 64, 1, PRIx64, expected_max_sh_minus_width, CMT);
+ CHECK(TEST_MSG, int, 8, 16, PRIx8, expected_max_sh_minus_width, CMT);
+ CHECK(TEST_MSG, int, 16, 8, PRIx16, expected_max_sh_minus_width, CMT);
+ CHECK(TEST_MSG, int, 32, 4, PRIx32, expected_max_sh_minus_width, CMT);
+ CHECK(TEST_MSG, int, 64, 2, PRIx64, expected_max_sh_minus_width, CMT);
+ CHECK(TEST_MSG, uint, 8, 16, PRIx8, expected_max_sh_minus_width, CMT);
+ CHECK(TEST_MSG, uint, 16, 8, PRIx16, expected_max_sh_minus_width, CMT);
+ CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected_max_sh_minus_width, CMT);
+ CHECK(TEST_MSG, uint, 64, 2, PRIx64, expected_max_sh_minus_width, CMT);
+
+
+ /* Test large shift amount. */
+ VDUP(vector_shift, , int, s, 8, 8, 10);
+ VDUP(vector_shift, , int, s, 16, 4, 20);
+ VDUP(vector_shift, , int, s, 32, 2, 33);
+ VDUP(vector_shift, , int, s, 64, 1, 65);
+ VDUP(vector_shift, q, int, s, 8, 16, 9);
+ VDUP(vector_shift, q, int, s, 16, 8, 16);
+ VDUP(vector_shift, q, int, s, 32, 4, 32);
+ VDUP(vector_shift, q, int, s, 64, 2, 64);
+
+ TEST_MACRO_ALL_VARIANTS_1_5(TEST_VRSHL, int);
+
+#undef CMT
+#define CMT " (max input, large shift amount)"
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_max_sh_large, CMT);
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_max_sh_large, CMT);
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected_max_sh_large, CMT);
+ CHECK(TEST_MSG, int, 64, 1, PRIx64, expected_max_sh_large, CMT);
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_max_sh_large, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_max_sh_large, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_max_sh_large, CMT);
+ CHECK(TEST_MSG, uint, 64, 1, PRIx64, expected_max_sh_large, CMT);
+ CHECK(TEST_MSG, int, 8, 16, PRIx8, expected_max_sh_large, CMT);
+ CHECK(TEST_MSG, int, 16, 8, PRIx16, expected_max_sh_large, CMT);
+ CHECK(TEST_MSG, int, 32, 4, PRIx32, expected_max_sh_large, CMT);
+ CHECK(TEST_MSG, int, 64, 2, PRIx64, expected_max_sh_large, CMT);
+ CHECK(TEST_MSG, uint, 8, 16, PRIx8, expected_max_sh_large, CMT);
+ CHECK(TEST_MSG, uint, 16, 8, PRIx16, expected_max_sh_large, CMT);
+ CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected_max_sh_large, CMT);
+ CHECK(TEST_MSG, uint, 64, 2, PRIx64, expected_max_sh_large, CMT);
+
+
+ /* Test large negative shift amount. */
+ VDUP(vector_shift, , int, s, 8, 8, -10);
+ VDUP(vector_shift, , int, s, 16, 4, -20);
+ VDUP(vector_shift, , int, s, 32, 2, -33);
+ VDUP(vector_shift, , int, s, 64, 1, -65);
+ VDUP(vector_shift, q, int, s, 8, 16, -9);
+ VDUP(vector_shift, q, int, s, 16, 8, -16);
+ VDUP(vector_shift, q, int, s, 32, 4, -32);
+ VDUP(vector_shift, q, int, s, 64, 2, -64);
+
+ TEST_MACRO_ALL_VARIANTS_1_5(TEST_VRSHL, int);
+
+#undef CMT
+#define CMT " (max input, large negative shift amount)"
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_max_sh_large_neg, CMT);
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_max_sh_large_neg, CMT);
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected_max_sh_large_neg, CMT);
+ CHECK(TEST_MSG, int, 64, 1, PRIx64, expected_max_sh_large_neg, CMT);
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_max_sh_large_neg, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_max_sh_large_neg, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_max_sh_large_neg, CMT);
+ CHECK(TEST_MSG, uint, 64, 1, PRIx64, expected_max_sh_large_neg, CMT);
+ CHECK(TEST_MSG, int, 8, 16, PRIx8, expected_max_sh_large_neg, CMT);
+ CHECK(TEST_MSG, int, 16, 8, PRIx16, expected_max_sh_large_neg, CMT);
+ CHECK(TEST_MSG, int, 32, 4, PRIx32, expected_max_sh_large_neg, CMT);
+ CHECK(TEST_MSG, int, 64, 2, PRIx64, expected_max_sh_large_neg, CMT);
+ CHECK(TEST_MSG, uint, 8, 16, PRIx8, expected_max_sh_large_neg, CMT);
+ CHECK(TEST_MSG, uint, 16, 8, PRIx16, expected_max_sh_large_neg, CMT);
+ CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected_max_sh_large_neg, CMT);
+ CHECK(TEST_MSG, uint, 64, 2, PRIx64, expected_max_sh_large_neg, CMT);
+}
+
+int main (void)
+{
+ exec_vrshl ();
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vrshr_n.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vrshr_n.c
new file mode 100644
index 00000000000..6f9ef5a9d84
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vrshr_n.c
@@ -0,0 +1,504 @@
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+/* Expected results. */
+VECT_VAR_DECL(expected,int,8,8) [] = { 0xf8, 0xf9, 0xf9, 0xfa,
+ 0xfa, 0xfb, 0xfb, 0xfc };
+VECT_VAR_DECL(expected,int,16,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected,int,32,2) [] = { 0xfffffffc, 0xfffffffc };
+VECT_VAR_DECL(expected,int,64,1) [] = { 0x0 };
+VECT_VAR_DECL(expected,uint,8,8) [] = { 0x3c, 0x3c, 0x3d, 0x3d,
+ 0x3d, 0x3d, 0x3e, 0x3e };
+VECT_VAR_DECL(expected,uint,16,4) [] = { 0x1ffe, 0x1ffe, 0x1ffe, 0x1ffe };
+VECT_VAR_DECL(expected,uint,32,2) [] = { 0x8000000, 0x8000000 };
+VECT_VAR_DECL(expected,uint,64,1) [] = { 0x80000000 };
+VECT_VAR_DECL(expected,int,8,16) [] = { 0xf8, 0xf9, 0xf9, 0xfa,
+ 0xfa, 0xfb, 0xfb, 0xfc,
+ 0xfc, 0xfd, 0xfd, 0xfe,
+ 0xfe, 0xff, 0xff, 0x0 };
+VECT_VAR_DECL(expected,int,16,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected,int,32,4) [] = { 0xfffffffc, 0xfffffffc,
+ 0xfffffffd, 0xfffffffd };
+VECT_VAR_DECL(expected,int,64,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected,uint,8,16) [] = { 0x3c, 0x3c, 0x3d, 0x3d,
+ 0x3d, 0x3d, 0x3e, 0x3e,
+ 0x3e, 0x3e, 0x3f, 0x3f,
+ 0x3f, 0x3f, 0x40, 0x40 };
+VECT_VAR_DECL(expected,uint,16,8) [] = { 0x1ffe, 0x1ffe, 0x1ffe, 0x1ffe,
+ 0x1fff, 0x1fff, 0x1fff, 0x1fff };
+VECT_VAR_DECL(expected,uint,32,4) [] = { 0x8000000, 0x8000000,
+ 0x8000000, 0x8000000 };
+VECT_VAR_DECL(expected,uint,64,2) [] = { 0x80000000, 0x80000000 };
+
+/* Expected results with maximum input and max shift amount. */
+VECT_VAR_DECL(expected_max_sh_max,int,8,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_max_sh_max,int,16,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_max_sh_max,int,32,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_max_sh_max,int,64,1) [] = { 0x0 };
+VECT_VAR_DECL(expected_max_sh_max,uint,8,8) [] = { 0x1, 0x1, 0x1, 0x1,
+ 0x1, 0x1, 0x1, 0x1 };
+VECT_VAR_DECL(expected_max_sh_max,uint,16,4) [] = { 0x1, 0x1, 0x1, 0x1 };
+VECT_VAR_DECL(expected_max_sh_max,uint,32,2) [] = { 0x1, 0x1 };
+VECT_VAR_DECL(expected_max_sh_max,uint,64,1) [] = { 0x1 };
+VECT_VAR_DECL(expected_max_sh_max,int,8,16) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_max_sh_max,int,16,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_max_sh_max,int,32,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_max_sh_max,int,64,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_max_sh_max,uint,8,16) [] = { 0x1, 0x1, 0x1, 0x1,
+ 0x1, 0x1, 0x1, 0x1,
+ 0x1, 0x1, 0x1, 0x1,
+ 0x1, 0x1, 0x1, 0x1 };
+VECT_VAR_DECL(expected_max_sh_max,uint,16,8) [] = { 0x1, 0x1, 0x1, 0x1,
+ 0x1, 0x1, 0x1, 0x1 };
+VECT_VAR_DECL(expected_max_sh_max,uint,32,4) [] = { 0x1, 0x1, 0x1, 0x1 };
+VECT_VAR_DECL(expected_max_sh_max,uint,64,2) [] = { 0x1, 0x1 };
+
+/* Expected results with maximum input and shift by 1. */
+VECT_VAR_DECL(expected_max_sh_1,int,8,8) [] = { 0x40, 0x40, 0x40, 0x40,
+ 0x40, 0x40, 0x40, 0x40 };
+VECT_VAR_DECL(expected_max_sh_1,int,16,4) [] = { 0x4000, 0x4000,
+ 0x4000, 0x4000 };
+VECT_VAR_DECL(expected_max_sh_1,int,32,2) [] = { 0x40000000, 0x40000000 };
+VECT_VAR_DECL(expected_max_sh_1,int,64,1) [] = { 0x4000000000000000 };
+VECT_VAR_DECL(expected_max_sh_1,uint,8,8) [] = { 0x80, 0x80, 0x80, 0x80,
+ 0x80, 0x80, 0x80, 0x80 };
+VECT_VAR_DECL(expected_max_sh_1,uint,16,4) [] = { 0x8000, 0x8000,
+ 0x8000, 0x8000 };
+VECT_VAR_DECL(expected_max_sh_1,uint,32,2) [] = { 0x80000000, 0x80000000 };
+VECT_VAR_DECL(expected_max_sh_1,uint,64,1) [] = { 0x8000000000000000 };
+VECT_VAR_DECL(expected_max_sh_1,int,8,16) [] = { 0x40, 0x40, 0x40, 0x40,
+ 0x40, 0x40, 0x40, 0x40,
+ 0x40, 0x40, 0x40, 0x40,
+ 0x40, 0x40, 0x40, 0x40 };
+VECT_VAR_DECL(expected_max_sh_1,int,16,8) [] = { 0x4000, 0x4000,
+ 0x4000, 0x4000,
+ 0x4000, 0x4000,
+ 0x4000, 0x4000 };
+VECT_VAR_DECL(expected_max_sh_1,int,32,4) [] = { 0x40000000, 0x40000000,
+ 0x40000000, 0x40000000 };
+VECT_VAR_DECL(expected_max_sh_1,int,64,2) [] = { 0x4000000000000000,
+ 0x4000000000000000 };
+VECT_VAR_DECL(expected_max_sh_1,uint,8,16) [] = { 0x80, 0x80, 0x80, 0x80,
+ 0x80, 0x80, 0x80, 0x80,
+ 0x80, 0x80, 0x80, 0x80,
+ 0x80, 0x80, 0x80, 0x80 };
+VECT_VAR_DECL(expected_max_sh_1,uint,16,8) [] = { 0x8000, 0x8000,
+ 0x8000, 0x8000,
+ 0x8000, 0x8000,
+ 0x8000, 0x8000 };
+VECT_VAR_DECL(expected_max_sh_1,uint,32,4) [] = { 0x80000000, 0x80000000,
+ 0x80000000, 0x80000000 };
+VECT_VAR_DECL(expected_max_sh_1,uint,64,2) [] = { 0x8000000000000000,
+ 0x8000000000000000 };
+
+/* Expected results with maximum input and shift by 3. */
+VECT_VAR_DECL(expected_max_sh_3,int,8,8) [] = { 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x10, 0x10, 0x10 };
+VECT_VAR_DECL(expected_max_sh_3,int,16,4) [] = { 0x1000, 0x1000,
+ 0x1000, 0x1000 };
+VECT_VAR_DECL(expected_max_sh_3,int,32,2) [] = { 0x10000000, 0x10000000 };
+VECT_VAR_DECL(expected_max_sh_3,int,64,1) [] = { 0x1000000000000000 };
+VECT_VAR_DECL(expected_max_sh_3,uint,8,8) [] = { 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20 };
+VECT_VAR_DECL(expected_max_sh_3,uint,16,4) [] = { 0x2000, 0x2000,
+ 0x2000, 0x2000 };
+VECT_VAR_DECL(expected_max_sh_3,uint,32,2) [] = { 0x20000000, 0x20000000 };
+VECT_VAR_DECL(expected_max_sh_3,uint,64,1) [] = { 0x2000000000000000 };
+VECT_VAR_DECL(expected_max_sh_3,int,8,16) [] = { 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x10, 0x10, 0x10 };
+VECT_VAR_DECL(expected_max_sh_3,int,16,8) [] = { 0x1000, 0x1000,
+ 0x1000, 0x1000,
+ 0x1000, 0x1000,
+ 0x1000, 0x1000 };
+VECT_VAR_DECL(expected_max_sh_3,int,32,4) [] = { 0x10000000, 0x10000000,
+ 0x10000000, 0x10000000 };
+VECT_VAR_DECL(expected_max_sh_3,int,64,2) [] = { 0x1000000000000000,
+ 0x1000000000000000 };
+VECT_VAR_DECL(expected_max_sh_3,uint,8,16) [] = { 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20 };
+VECT_VAR_DECL(expected_max_sh_3,uint,16,8) [] = { 0x2000, 0x2000,
+ 0x2000, 0x2000,
+ 0x2000, 0x2000,
+ 0x2000, 0x2000 };
+VECT_VAR_DECL(expected_max_sh_3,uint,32,4) [] = { 0x20000000, 0x20000000,
+ 0x20000000, 0x20000000 };
+VECT_VAR_DECL(expected_max_sh_3,uint,64,2) [] = { 0x2000000000000000,
+ 0x2000000000000000 };
+
+/* Expected results with max negative input (for signed types, shift
+ by 1. */
+VECT_VAR_DECL(expected_max_neg_sh_1,int,8,8) [] = { 0xc0, 0xc0, 0xc0, 0xc0,
+ 0xc0, 0xc0, 0xc0, 0xc0 };
+VECT_VAR_DECL(expected_max_neg_sh_1,int,16,4) [] = { 0xc000, 0xc000,
+ 0xc000, 0xc000 };
+VECT_VAR_DECL(expected_max_neg_sh_1,int,32,2) [] = { 0xc0000000, 0xc0000000 };
+VECT_VAR_DECL(expected_max_neg_sh_1,int,64,1) [] = { 0xc000000000000000 };
+VECT_VAR_DECL(expected_max_neg_sh_1,uint,8,8) [] = { 0x80, 0x80, 0x80, 0x80,
+ 0x80, 0x80, 0x80, 0x80 };
+VECT_VAR_DECL(expected_max_neg_sh_1,uint,16,4) [] = { 0x8000, 0x8000,
+ 0x8000, 0x8000 };
+VECT_VAR_DECL(expected_max_neg_sh_1,uint,32,2) [] = { 0x80000000, 0x80000000 };
+VECT_VAR_DECL(expected_max_neg_sh_1,uint,64,1) [] = { 0x8000000000000000 };
+VECT_VAR_DECL(expected_max_neg_sh_1,int,8,16) [] = { 0xc0, 0xc0, 0xc0, 0xc0,
+ 0xc0, 0xc0, 0xc0, 0xc0,
+ 0xc0, 0xc0, 0xc0, 0xc0,
+ 0xc0, 0xc0, 0xc0, 0xc0 };
+VECT_VAR_DECL(expected_max_neg_sh_1,int,16,8) [] = { 0xc000, 0xc000,
+ 0xc000, 0xc000,
+ 0xc000, 0xc000,
+ 0xc000, 0xc000 };
+VECT_VAR_DECL(expected_max_neg_sh_1,int,32,4) [] = { 0xc0000000, 0xc0000000,
+ 0xc0000000, 0xc0000000 };
+VECT_VAR_DECL(expected_max_neg_sh_1,int,64,2) [] = { 0xc000000000000000,
+ 0xc000000000000000 };
+VECT_VAR_DECL(expected_max_neg_sh_1,uint,8,16) [] = { 0x80, 0x80, 0x80, 0x80,
+ 0x80, 0x80, 0x80, 0x80,
+ 0x80, 0x80, 0x80, 0x80,
+ 0x80, 0x80, 0x80, 0x80 };
+VECT_VAR_DECL(expected_max_neg_sh_1,uint,16,8) [] = { 0x8000, 0x8000,
+ 0x8000, 0x8000,
+ 0x8000, 0x8000,
+ 0x8000, 0x8000 };
+VECT_VAR_DECL(expected_max_neg_sh_1,uint,32,4) [] = { 0x80000000, 0x80000000,
+ 0x80000000, 0x80000000 };
+VECT_VAR_DECL(expected_max_neg_sh_1,uint,64,2) [] = { 0x8000000000000000,
+ 0x8000000000000000 };
+
+/* Expected results with max negative input (for signed types, shift
+ by 3. */
+VECT_VAR_DECL(expected_max_neg_sh_3,int,8,8) [] = { 0xf0, 0xf0, 0xf0, 0xf0,
+ 0xf0, 0xf0, 0xf0, 0xf0 };
+VECT_VAR_DECL(expected_max_neg_sh_3,int,16,4) [] = { 0xf000, 0xf000,
+ 0xf000, 0xf000 };
+VECT_VAR_DECL(expected_max_neg_sh_3,int,32,2) [] = { 0xf0000000, 0xf0000000 };
+VECT_VAR_DECL(expected_max_neg_sh_3,int,64,1) [] = { 0xf000000000000000 };
+VECT_VAR_DECL(expected_max_neg_sh_3,uint,8,8) [] = { 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20 };
+VECT_VAR_DECL(expected_max_neg_sh_3,uint,16,4) [] = { 0x2000, 0x2000,
+ 0x2000, 0x2000 };
+VECT_VAR_DECL(expected_max_neg_sh_3,uint,32,2) [] = { 0x20000000, 0x20000000 };
+VECT_VAR_DECL(expected_max_neg_sh_3,uint,64,1) [] = { 0x2000000000000000 };
+VECT_VAR_DECL(expected_max_neg_sh_3,int,8,16) [] = { 0xf0, 0xf0, 0xf0, 0xf0,
+ 0xf0, 0xf0, 0xf0, 0xf0,
+ 0xf0, 0xf0, 0xf0, 0xf0,
+ 0xf0, 0xf0, 0xf0, 0xf0 };
+VECT_VAR_DECL(expected_max_neg_sh_3,int,16,8) [] = { 0xf000, 0xf000,
+ 0xf000, 0xf000,
+ 0xf000, 0xf000,
+ 0xf000, 0xf000 };
+VECT_VAR_DECL(expected_max_neg_sh_3,int,32,4) [] = { 0xf0000000, 0xf0000000,
+ 0xf0000000, 0xf0000000 };
+VECT_VAR_DECL(expected_max_neg_sh_3,int,64,2) [] = { 0xf000000000000000,
+ 0xf000000000000000 };
+VECT_VAR_DECL(expected_max_neg_sh_3,uint,8,16) [] = { 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20 };
+VECT_VAR_DECL(expected_max_neg_sh_3,uint,16,8) [] = { 0x2000, 0x2000,
+ 0x2000, 0x2000,
+ 0x2000, 0x2000,
+ 0x2000, 0x2000 };
+VECT_VAR_DECL(expected_max_neg_sh_3,uint,32,4) [] = { 0x20000000, 0x20000000,
+ 0x20000000, 0x20000000 };
+VECT_VAR_DECL(expected_max_neg_sh_3,uint,64,2) [] = { 0x2000000000000000,
+ 0x2000000000000000 };
+
+#define TEST_MSG "VRSHR_N"
+void exec_vrshr_n (void)
+{
+ /* Basic test: y=vrshr_n(x,v), then store the result. */
+#define TEST_VRSHR_N(Q, T1, T2, W, N, V) \
+ VECT_VAR(vector_res, T1, W, N) = \
+ vrshr##Q##_n_##T2##W(VECT_VAR(vector, T1, W, N), \
+ V); \
+ vst1##Q##_##T2##W(VECT_VAR(result, T1, W, N), VECT_VAR(vector_res, T1, W, N))
+
+ DECL_VARIABLE_ALL_VARIANTS(vector);
+ DECL_VARIABLE_ALL_VARIANTS(vector_res);
+
+ clean_results ();
+
+ /* Initialize input "vector" from "buffer". */
+ TEST_MACRO_ALL_VARIANTS_2_5(VLOAD, vector, buffer);
+
+ /* Choose shift amount arbitrarily. */
+ TEST_VRSHR_N(, int, s, 8, 8, 1);
+ TEST_VRSHR_N(, int, s, 16, 4, 12);
+ TEST_VRSHR_N(, int, s, 32, 2, 2);
+ TEST_VRSHR_N(, int, s, 64, 1, 32);
+ TEST_VRSHR_N(, uint, u, 8, 8, 2);
+ TEST_VRSHR_N(, uint, u, 16, 4, 3);
+ TEST_VRSHR_N(, uint, u, 32, 2, 5);
+ TEST_VRSHR_N(, uint, u, 64, 1, 33);
+
+ TEST_VRSHR_N(q, int, s, 8, 16, 1);
+ TEST_VRSHR_N(q, int, s, 16, 8, 12);
+ TEST_VRSHR_N(q, int, s, 32, 4, 2);
+ TEST_VRSHR_N(q, int, s, 64, 2, 32);
+ TEST_VRSHR_N(q, uint, u, 8, 16, 2);
+ TEST_VRSHR_N(q, uint, u, 16, 8, 3);
+ TEST_VRSHR_N(q, uint, u, 32, 4, 5);
+ TEST_VRSHR_N(q, uint, u, 64, 2, 33);
+
+#define CMT ""
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected, CMT);
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected, CMT);
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected, CMT);
+ CHECK(TEST_MSG, int, 64, 1, PRIx64, expected, CMT);
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected, CMT);
+ CHECK(TEST_MSG, uint, 64, 1, PRIx64, expected, CMT);
+ CHECK(TEST_MSG, int, 8, 16, PRIx8, expected, CMT);
+ CHECK(TEST_MSG, int, 16, 8, PRIx16, expected, CMT);
+ CHECK(TEST_MSG, int, 32, 4, PRIx32, expected, CMT);
+ CHECK(TEST_MSG, int, 64, 2, PRIx64, expected, CMT);
+ CHECK(TEST_MSG, uint, 8, 16, PRIx8, expected, CMT);
+ CHECK(TEST_MSG, uint, 16, 8, PRIx16, expected, CMT);
+ CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected, CMT);
+ CHECK(TEST_MSG, uint, 64, 2, PRIx64, expected, CMT);
+
+
+ /* Use maximum positive input value. */
+ VDUP(vector, , int, s, 8, 8, 0x7F);
+ VDUP(vector, , int, s, 16, 4, 0x7FFF);
+ VDUP(vector, , int, s, 32, 2, 0x7FFFFFFF);
+ VDUP(vector, , int, s, 64, 1, 0x7FFFFFFFFFFFFFFFLL);
+ VDUP(vector, , uint, u, 8, 8, 0xFF);
+ VDUP(vector, , uint, u, 16, 4, 0xFFFF);
+ VDUP(vector, , uint, u, 32, 2, 0xFFFFFFFF);
+ VDUP(vector, , uint, u, 64, 1, 0xFFFFFFFFFFFFFFFFULL);
+ VDUP(vector, q, int, s, 8, 16, 0x7F);
+ VDUP(vector, q, int, s, 16, 8, 0x7FFF);
+ VDUP(vector, q, int, s, 32, 4, 0x7FFFFFFF);
+ VDUP(vector, q, int, s, 64, 2, 0x7FFFFFFFFFFFFFFFLL);
+ VDUP(vector, q, uint, u, 8, 16, 0xFF);
+ VDUP(vector, q, uint, u, 16, 8, 0xFFFF);
+ VDUP(vector, q, uint, u, 32, 4, 0xFFFFFFFF);
+ VDUP(vector, q, uint, u, 64, 2, 0xFFFFFFFFFFFFFFFFULL);
+
+ /* Use max shift amount, to exercise saturation. */
+ TEST_VRSHR_N(, int, s, 8, 8, 8);
+ TEST_VRSHR_N(, int, s, 16, 4, 16);
+ TEST_VRSHR_N(, int, s, 32, 2, 32);
+ TEST_VRSHR_N(, int, s, 64, 1, 64);
+ TEST_VRSHR_N(, uint, u, 8, 8, 8);
+ TEST_VRSHR_N(, uint, u, 16, 4, 16);
+ TEST_VRSHR_N(, uint, u, 32, 2, 32);
+ TEST_VRSHR_N(, uint, u, 64, 1, 64);
+ TEST_VRSHR_N(q, int, s, 8, 16, 8);
+ TEST_VRSHR_N(q, int, s, 16, 8, 16);
+ TEST_VRSHR_N(q, int, s, 32, 4, 32);
+ TEST_VRSHR_N(q, int, s, 64, 2, 64);
+ TEST_VRSHR_N(q, uint, u, 8, 16, 8);
+ TEST_VRSHR_N(q, uint, u, 16, 8, 16);
+ TEST_VRSHR_N(q, uint, u, 32, 4, 32);
+ TEST_VRSHR_N(q, uint, u, 64, 2, 64);
+
+#undef CMT
+#define CMT " (overflow test: max shift amount, max positive input)"
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_max_sh_max, CMT);
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_max_sh_max, CMT);
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected_max_sh_max, CMT);
+ CHECK(TEST_MSG, int, 64, 1, PRIx64, expected_max_sh_max, CMT);
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_max_sh_max, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_max_sh_max, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_max_sh_max, CMT);
+ CHECK(TEST_MSG, uint, 64, 1, PRIx64, expected_max_sh_max, CMT);
+ CHECK(TEST_MSG, int, 8, 16, PRIx8, expected_max_sh_max, CMT);
+ CHECK(TEST_MSG, int, 16, 8, PRIx16, expected_max_sh_max, CMT);
+ CHECK(TEST_MSG, int, 32, 4, PRIx32, expected_max_sh_max, CMT);
+ CHECK(TEST_MSG, int, 64, 2, PRIx64, expected_max_sh_max, CMT);
+ CHECK(TEST_MSG, uint, 8, 16, PRIx8, expected_max_sh_max, CMT);
+ CHECK(TEST_MSG, uint, 16, 8, PRIx16, expected_max_sh_max, CMT);
+ CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected_max_sh_max, CMT);
+ CHECK(TEST_MSG, uint, 64, 2, PRIx64, expected_max_sh_max, CMT);
+
+
+ /* Use 1 as shift amount, to exercise saturation. */
+ TEST_VRSHR_N(, int, s, 8, 8, 1);
+ TEST_VRSHR_N(, int, s, 16, 4, 1);
+ TEST_VRSHR_N(, int, s, 32, 2, 1);
+ TEST_VRSHR_N(, int, s, 64, 1, 1);
+ TEST_VRSHR_N(, uint, u, 8, 8, 1);
+ TEST_VRSHR_N(, uint, u, 16, 4, 1);
+ TEST_VRSHR_N(, uint, u, 32, 2, 1);
+ TEST_VRSHR_N(, uint, u, 64, 1, 1);
+ TEST_VRSHR_N(q, int, s, 8, 16, 1);
+ TEST_VRSHR_N(q, int, s, 16, 8, 1);
+ TEST_VRSHR_N(q, int, s, 32, 4, 1);
+ TEST_VRSHR_N(q, int, s, 64, 2, 1);
+ TEST_VRSHR_N(q, uint, u, 8, 16, 1);
+ TEST_VRSHR_N(q, uint, u, 16, 8, 1);
+ TEST_VRSHR_N(q, uint, u, 32, 4, 1);
+ TEST_VRSHR_N(q, uint, u, 64, 2, 1);
+
+#undef CMT
+#define CMT " (overflow test: shift by 1, with max input)"
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_max_sh_1, CMT);
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_max_sh_1, CMT);
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected_max_sh_1, CMT);
+ CHECK(TEST_MSG, int, 64, 1, PRIx64, expected_max_sh_1, CMT);
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_max_sh_1, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_max_sh_1, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_max_sh_1, CMT);
+ CHECK(TEST_MSG, uint, 64, 1, PRIx64, expected_max_sh_1, CMT);
+ CHECK(TEST_MSG, int, 8, 16, PRIx8, expected_max_sh_1, CMT);
+ CHECK(TEST_MSG, int, 16, 8, PRIx16, expected_max_sh_1, CMT);
+ CHECK(TEST_MSG, int, 32, 4, PRIx32, expected_max_sh_1, CMT);
+ CHECK(TEST_MSG, int, 64, 2, PRIx64, expected_max_sh_1, CMT);
+ CHECK(TEST_MSG, uint, 8, 16, PRIx8, expected_max_sh_1, CMT);
+ CHECK(TEST_MSG, uint, 16, 8, PRIx16, expected_max_sh_1, CMT);
+ CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected_max_sh_1, CMT);
+ CHECK(TEST_MSG, uint, 64, 2, PRIx64, expected_max_sh_1, CMT);
+
+
+ /* Use 3 as shift amount, to exercise saturation. */
+ TEST_VRSHR_N(, int, s, 8, 8, 3);
+ TEST_VRSHR_N(, int, s, 16, 4, 3);
+ TEST_VRSHR_N(, int, s, 32, 2, 3);
+ TEST_VRSHR_N(, int, s, 64, 1, 3);
+ TEST_VRSHR_N(, uint, u, 8, 8, 3);
+ TEST_VRSHR_N(, uint, u, 16, 4, 3);
+ TEST_VRSHR_N(, uint, u, 32, 2, 3);
+ TEST_VRSHR_N(, uint, u, 64, 1, 3);
+ TEST_VRSHR_N(q, int, s, 8, 16, 3);
+ TEST_VRSHR_N(q, int, s, 16, 8, 3);
+ TEST_VRSHR_N(q, int, s, 32, 4, 3);
+ TEST_VRSHR_N(q, int, s, 64, 2, 3);
+ TEST_VRSHR_N(q, uint, u, 8, 16, 3);
+ TEST_VRSHR_N(q, uint, u, 16, 8, 3);
+ TEST_VRSHR_N(q, uint, u, 32, 4, 3);
+ TEST_VRSHR_N(q, uint, u, 64, 2, 3);
+
+#undef CMT
+#define CMT " (overflow test: shift by 3, with max input)"
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_max_sh_3, CMT);
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_max_sh_3, CMT);
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected_max_sh_3, CMT);
+ CHECK(TEST_MSG, int, 64, 1, PRIx64, expected_max_sh_3, CMT);
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_max_sh_3, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_max_sh_3, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_max_sh_3, CMT);
+ CHECK(TEST_MSG, uint, 64, 1, PRIx64, expected_max_sh_3, CMT);
+ CHECK(TEST_MSG, int, 8, 16, PRIx8, expected_max_sh_3, CMT);
+ CHECK(TEST_MSG, int, 16, 8, PRIx16, expected_max_sh_3, CMT);
+ CHECK(TEST_MSG, int, 32, 4, PRIx32, expected_max_sh_3, CMT);
+ CHECK(TEST_MSG, int, 64, 2, PRIx64, expected_max_sh_3, CMT);
+ CHECK(TEST_MSG, uint, 8, 16, PRIx8, expected_max_sh_3, CMT);
+ CHECK(TEST_MSG, uint, 16, 8, PRIx16, expected_max_sh_3, CMT);
+ CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected_max_sh_3, CMT);
+ CHECK(TEST_MSG, uint, 64, 2, PRIx64, expected_max_sh_3, CMT);
+
+
+ /* Use minimum negative input for signed types. */
+ VDUP(vector, , int, s, 8, 8, 0x80);
+ VDUP(vector, , int, s, 16, 4, 0x8000);
+ VDUP(vector, , int, s, 32, 2, 0x80000000);
+ VDUP(vector, , int, s, 64, 1, 0x8000000000000000LL);
+ VDUP(vector, , uint, u, 8, 8, 0xFF);
+ VDUP(vector, , uint, u, 16, 4, 0xFFFF);
+ VDUP(vector, , uint, u, 32, 2, 0xFFFFFFFF);
+ VDUP(vector, , uint, u, 64, 1, 0xFFFFFFFFFFFFFFFFULL);
+ VDUP(vector, q, int, s, 8, 16, 0x80);
+ VDUP(vector, q, int, s, 16, 8, 0x8000);
+ VDUP(vector, q, int, s, 32, 4, 0x80000000);
+ VDUP(vector, q, int, s, 64, 2, 0x8000000000000000LL);
+ VDUP(vector, q, uint, u, 8, 16, 0xFF);
+ VDUP(vector, q, uint, u, 16, 8, 0xFFFF);
+ VDUP(vector, q, uint, u, 32, 4, 0xFFFFFFFF);
+ VDUP(vector, q, uint, u, 64, 2, 0xFFFFFFFFFFFFFFFFULL);
+
+
+ /* Use 1 as shift amount, to exercise saturation code. */
+ TEST_VRSHR_N(, int, s, 8, 8, 1);
+ TEST_VRSHR_N(, int, s, 16, 4, 1);
+ TEST_VRSHR_N(, int, s, 32, 2, 1);
+ TEST_VRSHR_N(, int, s, 64, 1, 1);
+ TEST_VRSHR_N(, uint, u, 8, 8, 1);
+ TEST_VRSHR_N(, uint, u, 16, 4, 1);
+ TEST_VRSHR_N(, uint, u, 32, 2, 1);
+ TEST_VRSHR_N(, uint, u, 64, 1, 1);
+ TEST_VRSHR_N(q, int, s, 8, 16, 1);
+ TEST_VRSHR_N(q, int, s, 16, 8, 1);
+ TEST_VRSHR_N(q, int, s, 32, 4, 1);
+ TEST_VRSHR_N(q, int, s, 64, 2, 1);
+ TEST_VRSHR_N(q, uint, u, 8, 16, 1);
+ TEST_VRSHR_N(q, uint, u, 16, 8, 1);
+ TEST_VRSHR_N(q, uint, u, 32, 4, 1);
+ TEST_VRSHR_N(q, uint, u, 64, 2, 1);
+
+#undef CMT
+#define CMT " (overflow test: shift by 1, with negative input)"
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_max_neg_sh_1, CMT);
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_max_neg_sh_1, CMT);
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected_max_neg_sh_1, CMT);
+ CHECK(TEST_MSG, int, 64, 1, PRIx64, expected_max_neg_sh_1, CMT);
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_max_neg_sh_1, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_max_neg_sh_1, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_max_neg_sh_1, CMT);
+ CHECK(TEST_MSG, uint, 64, 1, PRIx64, expected_max_neg_sh_1, CMT);
+ CHECK(TEST_MSG, int, 8, 16, PRIx8, expected_max_neg_sh_1, CMT);
+ CHECK(TEST_MSG, int, 16, 8, PRIx16, expected_max_neg_sh_1, CMT);
+ CHECK(TEST_MSG, int, 32, 4, PRIx32, expected_max_neg_sh_1, CMT);
+ CHECK(TEST_MSG, int, 64, 2, PRIx64, expected_max_neg_sh_1, CMT);
+ CHECK(TEST_MSG, uint, 8, 16, PRIx8, expected_max_neg_sh_1, CMT);
+ CHECK(TEST_MSG, uint, 16, 8, PRIx16, expected_max_neg_sh_1, CMT);
+ CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected_max_neg_sh_1, CMT);
+ CHECK(TEST_MSG, uint, 64, 2, PRIx64, expected_max_neg_sh_1, CMT);
+
+
+ /* Use 3 as shift amount, to exercise saturation code. */
+ TEST_VRSHR_N(, int, s, 8, 8, 3);
+ TEST_VRSHR_N(, int, s, 16, 4, 3);
+ TEST_VRSHR_N(, int, s, 32, 2, 3);
+ TEST_VRSHR_N(, int, s, 64, 1, 3);
+ TEST_VRSHR_N(, uint, u, 8, 8, 3);
+ TEST_VRSHR_N(, uint, u, 16, 4, 3);
+ TEST_VRSHR_N(, uint, u, 32, 2, 3);
+ TEST_VRSHR_N(, uint, u, 64, 1, 3);
+ TEST_VRSHR_N(q, int, s, 8, 16, 3);
+ TEST_VRSHR_N(q, int, s, 16, 8, 3);
+ TEST_VRSHR_N(q, int, s, 32, 4, 3);
+ TEST_VRSHR_N(q, int, s, 64, 2, 3);
+ TEST_VRSHR_N(q, uint, u, 8, 16, 3);
+ TEST_VRSHR_N(q, uint, u, 16, 8, 3);
+ TEST_VRSHR_N(q, uint, u, 32, 4, 3);
+ TEST_VRSHR_N(q, uint, u, 64, 2, 3);
+
+#undef CMT
+#define CMT " (overflow test: shift by 3, with negative input)"
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_max_neg_sh_3, CMT);
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_max_neg_sh_3, CMT);
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected_max_neg_sh_3, CMT);
+ CHECK(TEST_MSG, int, 64, 1, PRIx64, expected_max_neg_sh_3, CMT);
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_max_neg_sh_3, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_max_neg_sh_3, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_max_neg_sh_3, CMT);
+ CHECK(TEST_MSG, uint, 64, 1, PRIx64, expected_max_neg_sh_3, CMT);
+ CHECK(TEST_MSG, int, 8, 16, PRIx8, expected_max_neg_sh_3, CMT);
+ CHECK(TEST_MSG, int, 16, 8, PRIx16, expected_max_neg_sh_3, CMT);
+ CHECK(TEST_MSG, int, 32, 4, PRIx32, expected_max_neg_sh_3, CMT);
+ CHECK(TEST_MSG, int, 64, 2, PRIx64, expected_max_neg_sh_3, CMT);
+ CHECK(TEST_MSG, uint, 8, 16, PRIx8, expected_max_neg_sh_3, CMT);
+ CHECK(TEST_MSG, uint, 16, 8, PRIx16, expected_max_neg_sh_3, CMT);
+ CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected_max_neg_sh_3, CMT);
+ CHECK(TEST_MSG, uint, 64, 2, PRIx64, expected_max_neg_sh_3, CMT);
+}
+
+int main (void)
+{
+ exec_vrshr_n ();
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vrshrn_n.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vrshrn_n.c
new file mode 100644
index 00000000000..a2b40b8ae13
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vrshrn_n.c
@@ -0,0 +1,143 @@
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+/* Expected results with input=0. */
+VECT_VAR_DECL(expected_0,int,8,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_0,int,16,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_0,int,32,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_0,uint,8,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_0,uint,16,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_0,uint,32,2) [] = { 0x0, 0x0 };
+
+/* Expected results. */
+VECT_VAR_DECL(expected,int,8,8) [] = { 0xf8, 0xf9, 0xf9, 0xfa,
+ 0xfa, 0xfb, 0xfb, 0xfc };
+VECT_VAR_DECL(expected,int,16,4) [] = { 0xfff8, 0xfff9, 0xfff9, 0xfffa };
+VECT_VAR_DECL(expected,int,32,2) [] = { 0xfffffffc, 0xfffffffc };
+VECT_VAR_DECL(expected,uint,8,8) [] = { 0xfc, 0xfc, 0xfd, 0xfd,
+ 0xfd, 0xfd, 0xfe, 0xfe };
+VECT_VAR_DECL(expected,uint,16,4) [] = { 0xfffe, 0xfffe, 0xfffe, 0xfffe };
+VECT_VAR_DECL(expected,uint,32,2) [] = { 0xfffffffe, 0xfffffffe };
+
+/* Expected results with large shift amount. */
+VECT_VAR_DECL(expected_sh_large,int,8,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_sh_large,int,16,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_sh_large,int,32,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_sh_large,uint,8,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_sh_large,uint,16,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_sh_large,uint,32,2) [] = { 0x0, 0x0 };
+
+#define TEST_MSG "VRSHRN_N"
+void exec_vrshrn_n (void)
+{
+ /* Basic test: v2=vrshrn_n(v1,v), then store the result. */
+#define TEST_VRSHRN_N(T1, T2, W, N, W2, V) \
+ VECT_VAR(vector_res, T1, W2, N) = \
+ vrshrn_n_##T2##W(VECT_VAR(vector, T1, W, N), \
+ V); \
+ vst1_##T2##W2(VECT_VAR(result, T1, W2, N), VECT_VAR(vector_res, T1, W2, N))
+
+ /* vector is twice as large as vector_res. */
+ DECL_VARIABLE(vector, int, 16, 8);
+ DECL_VARIABLE(vector, int, 32, 4);
+ DECL_VARIABLE(vector, int, 64, 2);
+ DECL_VARIABLE(vector, uint, 16, 8);
+ DECL_VARIABLE(vector, uint, 32, 4);
+ DECL_VARIABLE(vector, uint, 64, 2);
+
+ DECL_VARIABLE(vector_res, int, 8, 8);
+ DECL_VARIABLE(vector_res, int, 16, 4);
+ DECL_VARIABLE(vector_res, int, 32, 2);
+ DECL_VARIABLE(vector_res, uint, 8, 8);
+ DECL_VARIABLE(vector_res, uint, 16, 4);
+ DECL_VARIABLE(vector_res, uint, 32, 2);
+
+ clean_results ();
+
+ /* Fill input vector with 0, to check behavior on limits. */
+ VDUP(vector, q, int, s, 16, 8, 0);
+ VDUP(vector, q, int, s, 32, 4, 0);
+ VDUP(vector, q, int, s, 64, 2, 0);
+ VDUP(vector, q, uint, u, 16, 8, 0);
+ VDUP(vector, q, uint, u, 32, 4, 0);
+ VDUP(vector, q, uint, u, 64, 2, 0);
+
+ /* Choose shift amount arbitrarily. */
+ TEST_VRSHRN_N(int, s, 16, 8, 8, 1);
+ TEST_VRSHRN_N(int, s, 32, 4, 16, 1);
+ TEST_VRSHRN_N(int, s, 64, 2, 32, 2);
+ TEST_VRSHRN_N(uint, u, 16, 8, 8, 2);
+ TEST_VRSHRN_N(uint, u, 32, 4, 16, 3);
+ TEST_VRSHRN_N(uint, u, 64, 2, 32, 3);
+
+#define CMT " (with input = 0)"
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_0, CMT);
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_0, CMT);
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected_0, CMT);
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_0, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_0, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_0, CMT);
+
+
+ /* Test again, with predefined input values. */
+ VLOAD(vector, buffer, q, int, s, 16, 8);
+ VLOAD(vector, buffer, q, int, s, 32, 4);
+ VLOAD(vector, buffer, q, int, s, 64, 2);
+ VLOAD(vector, buffer, q, uint, u, 16, 8);
+ VLOAD(vector, buffer, q, uint, u, 32, 4);
+ VLOAD(vector, buffer, q, uint, u, 64, 2);
+
+ /* Choose shift amount arbitrarily. */
+ TEST_VRSHRN_N(int, s, 16, 8, 8, 1);
+ TEST_VRSHRN_N(int, s, 32, 4, 16, 1);
+ TEST_VRSHRN_N(int, s, 64, 2, 32, 2);
+ TEST_VRSHRN_N(uint, u, 16, 8, 8, 2);
+ TEST_VRSHRN_N(uint, u, 32, 4, 16, 3);
+ TEST_VRSHRN_N(uint, u, 64, 2, 32, 3);
+
+#undef CMT
+#define CMT ""
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected, CMT);
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected, CMT);
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected, CMT);
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected, CMT);
+
+
+ /* Fill input arbitrary values. */
+ VDUP(vector, q, int, s, 16, 8, 30);
+ VDUP(vector, q, int, s, 32, 4, 0);
+ VDUP(vector, q, int, s, 64, 2, 0);
+ VDUP(vector, q, uint, u, 16, 8, 0xFFF0);
+ VDUP(vector, q, uint, u, 32, 4, 0xFFFFFFF0);
+ VDUP(vector, q, uint, u, 64, 2, 0);
+
+ /* Choose large shift amount arbitrarily. */
+ TEST_VRSHRN_N(int, s, 16, 8, 8, 7);
+ TEST_VRSHRN_N(int, s, 32, 4, 16, 14);
+ TEST_VRSHRN_N(int, s, 64, 2, 32, 31);
+ TEST_VRSHRN_N(uint, u, 16, 8, 8, 7);
+ TEST_VRSHRN_N(uint, u, 32, 4, 16, 16);
+ TEST_VRSHRN_N(uint, u, 64, 2, 32, 3);
+
+#undef CMT
+#define CMT " (with large shift amount)"
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_sh_large, CMT);
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_sh_large, CMT);
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected_sh_large, CMT);
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_sh_large, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_sh_large, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_sh_large, CMT);
+}
+
+int main (void)
+{
+ exec_vrshrn_n ();
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vrsqrte.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vrsqrte.c
new file mode 100644
index 00000000000..0291ec00c5e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vrsqrte.c
@@ -0,0 +1,157 @@
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+#include <math.h>
+
+/* Expected results. */
+VECT_VAR_DECL(expected,uint,32,2) [] = { 0xffffffff, 0xffffffff };
+VECT_VAR_DECL(expected,uint,32,4) [] = { 0x9c800000, 0x9c800000,
+ 0x9c800000, 0x9c800000 };
+VECT_VAR_DECL(expected,hfloat,32,2) [] = { 0x3e498000, 0x3e498000 };
+VECT_VAR_DECL(expected,hfloat,32,4) [] = { 0x3e700000, 0x3e700000,
+ 0x3e700000, 0x3e700000 };
+
+/* Expected results with large uint #1. */
+VECT_VAR_DECL(expected_1,uint,32,2) [] = { 0x80000000, 0x80000000 };
+VECT_VAR_DECL(expected_1,uint,32,4) [] = { 0xae800000, 0xae800000,
+ 0xae800000, 0xae800000 };
+
+/* Expected results with large uint #2. */
+VECT_VAR_DECL(expected_2,uint,32,2) [] = { 0xb4800000, 0xb4800000 };
+VECT_VAR_DECL(expected_2,uint,32,4) [] = { 0xed000000, 0xed000000,
+ 0xed000000, 0xed000000 };
+
+/* Expected results with FP special inputs values (NaNs, ...). */
+VECT_VAR_DECL(expected_fp1,hfloat,32,2) [] = { 0x7fc00000, 0x7fc00000 };
+VECT_VAR_DECL(expected_fp1,hfloat,32,4) [] = { 0x7f800000, 0x7f800000,
+ 0x7f800000, 0x7f800000 };
+
+/* Expected results with FP special inputs values
+ (negative, infinity). */
+VECT_VAR_DECL(expected_fp2,hfloat,32,2) [] = { 0x7fc00000, 0x7fc00000 };
+VECT_VAR_DECL(expected_fp2,hfloat,32,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+
+/* Expected results with FP special inputs values
+ (-0, -infinity). */
+VECT_VAR_DECL(expected_fp3,hfloat,32,2) [] = { 0xff800000, 0xff800000 };
+VECT_VAR_DECL(expected_fp3,hfloat,32,4) [] = { 0x7fc00000, 0x7fc00000,
+ 0x7fc00000, 0x7fc00000 };
+
+#define TEST_MSG "VRSQRTE/VRSQRTEQ"
+void exec_vrsqrte(void)
+{
+ int i;
+
+ /* Basic test: y=vrsqrte(x), then store the result. */
+#define TEST_VRSQRTE(Q, T1, T2, W, N) \
+ VECT_VAR(vector_res, T1, W, N) = \
+ vrsqrte##Q##_##T2##W(VECT_VAR(vector, T1, W, N)); \
+ vst1##Q##_##T2##W(VECT_VAR(result, T1, W, N), \
+ VECT_VAR(vector_res, T1, W, N))
+
+ DECL_VARIABLE(vector, uint, 32, 2);
+ DECL_VARIABLE(vector, float, 32, 2);
+ DECL_VARIABLE(vector, uint, 32, 4);
+ DECL_VARIABLE(vector, float, 32, 4);
+
+ DECL_VARIABLE(vector_res, uint, 32, 2);
+ DECL_VARIABLE(vector_res, float, 32, 2);
+ DECL_VARIABLE(vector_res, uint, 32, 4);
+ DECL_VARIABLE(vector_res, float, 32, 4);
+
+ clean_results ();
+
+ /* Choose init value arbitrarily. */
+ VDUP(vector, , uint, u, 32, 2, 0x12345678);
+ VDUP(vector, , float, f, 32, 2, 25.799999f);
+ VDUP(vector, q, uint, u, 32, 4, 0xABCDEF10);
+ VDUP(vector, q, float, f, 32, 4, 18.2f);
+
+ /* Apply the operator. */
+ TEST_VRSQRTE(, uint, u, 32, 2);
+ TEST_VRSQRTE(, float, f, 32, 2);
+ TEST_VRSQRTE(q, uint, u, 32, 4);
+ TEST_VRSQRTE(q, float, f, 32, 4);
+
+#define CMT ""
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected, CMT);
+ CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected, CMT);
+ CHECK_FP(TEST_MSG, float, 32, 2, PRIx32, expected, CMT);
+ CHECK_FP(TEST_MSG, float, 32, 4, PRIx32, expected, CMT);
+
+
+ /* Don't test FP variants with negative inputs. */
+ /* Use input with various values of bits 30 and 31. */
+ VDUP(vector, , uint, u, 32, 2, 0xFFFFFFFF);
+ VDUP(vector, q, uint, u, 32, 4, 0x89081234);
+
+ /* Apply the operator. */
+ TEST_VRSQRTE(, uint, u, 32, 2);
+ TEST_VRSQRTE(q, uint, u, 32, 4);
+
+#undef CMT
+#define CMT " (large uint #1)"
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_1, CMT);
+ CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected_1, CMT);
+
+
+ /* Choose init value arbitrarily. */
+ VDUP(vector, , uint, u, 32, 2, 0x80000000);
+ VDUP(vector, q, uint, u, 32, 4, 0x4ABCDEF0);
+
+ /* Apply the operator. */
+ TEST_VRSQRTE(, uint, u, 32, 2);
+ TEST_VRSQRTE(q, uint, u, 32, 4);
+
+#undef CMT
+#define CMT " (large uint #2)"
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_2, CMT);
+ CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected_2, CMT);
+
+
+ /* Test FP variants with special input values (NaNs, ...). */
+ VDUP(vector, , float, f, 32, 2, NAN);
+ VDUP(vector, q, float, f, 32, 4, 0.0f);
+
+ /* Apply the operator. */
+ TEST_VRSQRTE(, float, f, 32, 2);
+ TEST_VRSQRTE(q, float, f, 32, 4);
+
+#undef CMT
+#define CMT " FP special (NaN, 0)"
+ CHECK_FP(TEST_MSG, float, 32, 2, PRIx32, expected_fp1, CMT);
+ CHECK_FP(TEST_MSG, float, 32, 4, PRIx32, expected_fp1, CMT);
+
+
+ /* Test FP variants with special input values (negative, infinity). */
+ VDUP(vector, , float, f, 32, 2, -1.0f);
+ VDUP(vector, q, float, f, 32, 4, HUGE_VALF);
+
+ /* Apply the operator. */
+ TEST_VRSQRTE(, float, f, 32, 2);
+ TEST_VRSQRTE(q, float, f, 32, 4);
+
+#undef CMT
+#define CMT " FP special (negative, infinity)"
+ CHECK_FP(TEST_MSG, float, 32, 2, PRIx32, expected_fp2, CMT);
+ CHECK_FP(TEST_MSG, float, 32, 4, PRIx32, expected_fp2, CMT);
+
+ /* Test FP variants with special input values (-0, -infinity). */
+ VDUP(vector, , float, f, 32, 2, -0.0f);
+ VDUP(vector, q, float, f, 32, 4, -HUGE_VALF);
+
+ /* Apply the operator. */
+ TEST_VRSQRTE(, float, f, 32, 2);
+ TEST_VRSQRTE(q, float, f, 32, 4);
+
+#undef CMT
+#define CMT " FP special (-0, -infinity)"
+ CHECK_FP(TEST_MSG, float, 32, 2, PRIx32, expected_fp3, CMT);
+ CHECK_FP(TEST_MSG, float, 32, 4, PRIx32, expected_fp3, CMT);
+}
+
+int main (void)
+{
+ exec_vrsqrte ();
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vrsqrts.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vrsqrts.c
new file mode 100644
index 00000000000..4531026dc4c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vrsqrts.c
@@ -0,0 +1,118 @@
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+#include <math.h>
+
+/* Expected results. */
+VECT_VAR_DECL(expected,hfloat,32,2) [] = { 0xc2796b84, 0xc2796b84 };
+VECT_VAR_DECL(expected,hfloat,32,4) [] = { 0xc0e4a3d8, 0xc0e4a3d8,
+ 0xc0e4a3d8, 0xc0e4a3d8 };
+
+/* Expected results with input=NaN. */
+VECT_VAR_DECL(expected_nan,hfloat,32,2) [] = { 0x7fc00000, 0x7fc00000 };
+VECT_VAR_DECL(expected_nan,hfloat,32,4) [] = { 0x7fc00000, 0x7fc00000,
+ 0x7fc00000, 0x7fc00000 };
+
+/* Expected results with FP special inputs values (infinity, 0). */
+VECT_VAR_DECL(expected_fp1,hfloat,32,2) [] = { 0xff800000, 0xff800000 };
+VECT_VAR_DECL(expected_fp1,hfloat,32,4) [] = { 0x3fc00000, 0x3fc00000,
+ 0x3fc00000, 0x3fc00000 };
+
+/* Expected results with only FP special inputs values (infinity,
+ 0). */
+VECT_VAR_DECL(expected_fp2,hfloat,32,2) [] = { 0x3fc00000, 0x3fc00000 };
+VECT_VAR_DECL(expected_fp2,hfloat,32,4) [] = { 0x3fc00000, 0x3fc00000,
+ 0x3fc00000, 0x3fc00000 };
+
+#define TEST_MSG "VRSQRTS/VRSQRTSQ"
+void exec_vrsqrts(void)
+{
+ int i;
+
+ /* Basic test: y=vrsqrts(x), then store the result. */
+#define TEST_VRSQRTS(Q, T1, T2, W, N) \
+ VECT_VAR(vector_res, T1, W, N) = \
+ vrsqrts##Q##_##T2##W(VECT_VAR(vector, T1, W, N), \
+ VECT_VAR(vector2, T1, W, N)); \
+ vst1##Q##_##T2##W(VECT_VAR(result, T1, W, N), \
+ VECT_VAR(vector_res, T1, W, N))
+
+ /* No need for integer variants. */
+ DECL_VARIABLE(vector, float, 32, 2);
+ DECL_VARIABLE(vector, float, 32, 4);
+
+ DECL_VARIABLE(vector2, float, 32, 2);
+ DECL_VARIABLE(vector2, float, 32, 4);
+
+ DECL_VARIABLE(vector_res, float, 32, 2);
+ DECL_VARIABLE(vector_res, float, 32, 4);
+
+ clean_results ();
+
+ /* Choose init value arbitrarily. */
+ VDUP(vector, , float, f, 32, 2, 12.9f);
+ VDUP(vector, q, float, f, 32, 4, 9.1f);
+
+ VDUP(vector2, , float, f, 32, 2, 9.9f);
+ VDUP(vector2, q, float, f, 32, 4, 1.9f);
+
+ /* Apply the operator. */
+ TEST_VRSQRTS(, float, f, 32, 2);
+ TEST_VRSQRTS(q, float, f, 32, 4);
+
+#define CMT ""
+ CHECK_FP(TEST_MSG, float, 32, 2, PRIx32, expected, CMT);
+ CHECK_FP(TEST_MSG, float, 32, 4, PRIx32, expected, CMT);
+
+
+ /* Test FP variants with special input values (NaN). */
+ VDUP(vector, , float, f, 32, 2, NAN);
+ VDUP(vector2, q, float, f, 32, 4, NAN);
+
+ /* Apply the operator. */
+ TEST_VRSQRTS(, float, f, 32, 2);
+ TEST_VRSQRTS(q, float, f, 32, 4);
+
+#undef CMT
+#define CMT " FP special (NAN) and normal values"
+ CHECK_FP(TEST_MSG, float, 32, 2, PRIx32, expected_nan, CMT);
+ CHECK_FP(TEST_MSG, float, 32, 4, PRIx32, expected_nan, CMT);
+
+
+ /* Test FP variants with special input values (infinity, 0). */
+ VDUP(vector, , float, f, 32, 2, HUGE_VALF);
+ VDUP(vector, q, float, f, 32, 4, 0.0f);
+ /* Restore a normal value in vector2. */
+ VDUP(vector2, q, float, f, 32, 4, 3.2f);
+
+ /* Apply the operator. */
+ TEST_VRSQRTS(, float, f, 32, 2);
+ TEST_VRSQRTS(q, float, f, 32, 4);
+
+#undef CMT
+#define CMT " FP special (infinity, 0) and normal values"
+ CHECK_FP(TEST_MSG, float, 32, 2, PRIx32, expected_fp1, CMT);
+ CHECK_FP(TEST_MSG, float, 32, 4, PRIx32, expected_fp1, CMT);
+
+
+ /* Test FP variants with only special input values (infinity, 0). */
+ VDUP(vector, , float, f, 32, 2, HUGE_VALF);
+ VDUP(vector, q, float, f, 32, 4, 0.0f);
+ VDUP(vector2, , float, f, 32, 2, -0.0f);
+ VDUP(vector2, q, float, f, 32, 4, HUGE_VALF);
+
+ /* Apply the operator. */
+ TEST_VRSQRTS(, float, f, 32, 2);
+ TEST_VRSQRTS(q, float, f, 32, 4);
+
+#undef CMT
+#define CMT " only FP special (infinity, 0)"
+ CHECK_FP(TEST_MSG, float, 32, 2, PRIx32, expected_fp2, CMT);
+ CHECK_FP(TEST_MSG, float, 32, 4, PRIx32, expected_fp2, CMT);
+}
+
+int main (void)
+{
+ exec_vrsqrts ();
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vrsra_n.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vrsra_n.c
new file mode 100644
index 00000000000..a9eda2287dc
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vrsra_n.c
@@ -0,0 +1,553 @@
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+/* Expected results. */
+VECT_VAR_DECL(expected,int,8,8) [] = { 0xf9, 0xfa, 0xfb, 0xfc,
+ 0xfd, 0xfe, 0xff, 0x0 };
+VECT_VAR_DECL(expected,int,16,4) [] = { 0xfff0, 0xfff1, 0xfff2, 0xfff3 };
+VECT_VAR_DECL(expected,int,32,2) [] = { 0xfffffffd, 0xfffffffe };
+VECT_VAR_DECL(expected,int,64,1) [] = { 0xfffffffffffffff0 };
+VECT_VAR_DECL(expected,uint,8,8) [] = { 0x5, 0x6, 0x7, 0x8,
+ 0x9, 0xa, 0xb, 0xc };
+VECT_VAR_DECL(expected,uint,16,4) [] = { 0xfffd, 0xfffe, 0xffff, 0x0 };
+VECT_VAR_DECL(expected,uint,32,2) [] = { 0xfffffff4, 0xfffffff5 };
+VECT_VAR_DECL(expected,uint,64,1) [] = { 0xfffffffffffffff0 };
+VECT_VAR_DECL(expected,int,8,16) [] = { 0xf9, 0xfa, 0xfb, 0xfc,
+ 0xfd, 0xfe, 0xff, 0x0,
+ 0x1, 0x2, 0x3, 0x4,
+ 0x5, 0x6, 0x7, 0x8 };
+VECT_VAR_DECL(expected,int,16,8) [] = { 0xfff0, 0xfff1, 0xfff2, 0xfff3,
+ 0xfff4, 0xfff5, 0xfff6, 0xfff7 };
+VECT_VAR_DECL(expected,int,32,4) [] = { 0xfffffffd, 0xfffffffe,
+ 0xffffffff, 0x0 };
+VECT_VAR_DECL(expected,int,64,2) [] = { 0xfffffffffffffff0, 0xfffffffffffffff1 };
+VECT_VAR_DECL(expected,uint,8,16) [] = { 0x5, 0x6, 0x7, 0x8,
+ 0x9, 0xa, 0xb, 0xc,
+ 0xd, 0xe, 0xf, 0x10,
+ 0x11, 0x12, 0x13, 0x14 };
+VECT_VAR_DECL(expected,uint,16,8) [] = { 0xfffd, 0xfffe, 0xffff, 0x0,
+ 0x1, 0x2, 0x3, 0x4 };
+VECT_VAR_DECL(expected,uint,32,4) [] = { 0xfffffff4, 0xfffffff5,
+ 0xfffffff6, 0xfffffff7 };
+VECT_VAR_DECL(expected,uint,64,2) [] = { 0xfffffffffffffff0,
+ 0xfffffffffffffff1 };
+
+/* Expected results with max input and shift by 1. */
+VECT_VAR_DECL(expected_max_sh1,int,8,8) [] = { 0x40, 0x40, 0x40, 0x40,
+ 0x40, 0x40, 0x40, 0x40 };
+VECT_VAR_DECL(expected_max_sh1,int,16,4) [] = { 0x4000, 0x4000, 0x4000, 0x4000 };
+VECT_VAR_DECL(expected_max_sh1,int,32,2) [] = { 0x40000000, 0x40000000 };
+VECT_VAR_DECL(expected_max_sh1,int,64,1) [] = { 0x4000000000000000 };
+VECT_VAR_DECL(expected_max_sh1,uint,8,8) [] = { 0x80, 0x80, 0x80, 0x80,
+ 0x80, 0x80, 0x80, 0x80 };
+VECT_VAR_DECL(expected_max_sh1,uint,16,4) [] = { 0x8000, 0x8000,
+ 0x8000, 0x8000 };
+VECT_VAR_DECL(expected_max_sh1,uint,32,2) [] = { 0x80000000, 0x80000000 };
+VECT_VAR_DECL(expected_max_sh1,uint,64,1) [] = { 0x8000000000000000 };
+VECT_VAR_DECL(expected_max_sh1,int,8,16) [] = { 0x40, 0x40, 0x40, 0x40,
+ 0x40, 0x40, 0x40, 0x40,
+ 0x40, 0x40, 0x40, 0x40,
+ 0x40, 0x40, 0x40, 0x40 };
+VECT_VAR_DECL(expected_max_sh1,int,16,8) [] = { 0x4000, 0x4000, 0x4000, 0x4000,
+ 0x4000, 0x4000, 0x4000, 0x4000 };
+VECT_VAR_DECL(expected_max_sh1,int,32,4) [] = { 0x40000000, 0x40000000,
+ 0x40000000, 0x40000000 };
+VECT_VAR_DECL(expected_max_sh1,int,64,2) [] = { 0x4000000000000000,
+ 0x4000000000000000 };
+VECT_VAR_DECL(expected_max_sh1,uint,8,16) [] = { 0x80, 0x80, 0x80, 0x80,
+ 0x80, 0x80, 0x80, 0x80,
+ 0x80, 0x80, 0x80, 0x80,
+ 0x80, 0x80, 0x80, 0x80 };
+VECT_VAR_DECL(expected_max_sh1,uint,16,8) [] = { 0x8000, 0x8000,
+ 0x8000, 0x8000,
+ 0x8000, 0x8000,
+ 0x8000, 0x8000 };
+VECT_VAR_DECL(expected_max_sh1,uint,32,4) [] = { 0x80000000, 0x80000000,
+ 0x80000000, 0x80000000 };
+VECT_VAR_DECL(expected_max_sh1,uint,64,2) [] = { 0x8000000000000000,
+ 0x8000000000000000 };
+
+/* Expected results with max input and shift by 3. */
+VECT_VAR_DECL(expected_max_sh3,int,8,8) [] = { 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x10, 0x10, 0x10 };
+VECT_VAR_DECL(expected_max_sh3,int,16,4) [] = { 0x1000, 0x1000, 0x1000, 0x1000 };
+VECT_VAR_DECL(expected_max_sh3,int,32,2) [] = { 0x10000000, 0x10000000 };
+VECT_VAR_DECL(expected_max_sh3,int,64,1) [] = { 0x1000000000000000 };
+VECT_VAR_DECL(expected_max_sh3,uint,8,8) [] = { 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20 };
+VECT_VAR_DECL(expected_max_sh3,uint,16,4) [] = { 0x2000, 0x2000,
+ 0x2000, 0x2000 };
+VECT_VAR_DECL(expected_max_sh3,uint,32,2) [] = { 0x20000000, 0x20000000 };
+VECT_VAR_DECL(expected_max_sh3,uint,64,1) [] = { 0x2000000000000000 };
+VECT_VAR_DECL(expected_max_sh3,int,8,16) [] = { 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x10, 0x10, 0x10 };
+VECT_VAR_DECL(expected_max_sh3,int,16,8) [] = { 0x1000, 0x1000, 0x1000, 0x1000,
+ 0x1000, 0x1000, 0x1000, 0x1000 };
+VECT_VAR_DECL(expected_max_sh3,int,32,4) [] = { 0x10000000, 0x10000000,
+ 0x10000000, 0x10000000 };
+VECT_VAR_DECL(expected_max_sh3,int,64,2) [] = { 0x1000000000000000,
+ 0x1000000000000000 };
+VECT_VAR_DECL(expected_max_sh3,uint,8,16) [] = { 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20 };
+VECT_VAR_DECL(expected_max_sh3,uint,16,8) [] = { 0x2000, 0x2000,
+ 0x2000, 0x2000,
+ 0x2000, 0x2000,
+ 0x2000, 0x2000 };
+VECT_VAR_DECL(expected_max_sh3,uint,32,4) [] = { 0x20000000, 0x20000000,
+ 0x20000000, 0x20000000 };
+VECT_VAR_DECL(expected_max_sh3,uint,64,2) [] = { 0x2000000000000000,
+ 0x2000000000000000 };
+
+/* Expected results with max input and shift by type width. */
+VECT_VAR_DECL(expected_max_shmax,int,8,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_max_shmax,int,16,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_max_shmax,int,32,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_max_shmax,int,64,1) [] = { 0x0 };
+VECT_VAR_DECL(expected_max_shmax,uint,8,8) [] = { 0x1, 0x1, 0x1, 0x1,
+ 0x1, 0x1, 0x1, 0x1 };
+VECT_VAR_DECL(expected_max_shmax,uint,16,4) [] = { 0x1, 0x1, 0x1, 0x1 };
+VECT_VAR_DECL(expected_max_shmax,uint,32,2) [] = { 0x1, 0x1 };
+VECT_VAR_DECL(expected_max_shmax,uint,64,1) [] = { 0x1 };
+VECT_VAR_DECL(expected_max_shmax,int,8,16) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_max_shmax,int,16,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_max_shmax,int,32,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_max_shmax,int,64,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_max_shmax,uint,8,16) [] = { 0x1, 0x1, 0x1, 0x1,
+ 0x1, 0x1, 0x1, 0x1,
+ 0x1, 0x1, 0x1, 0x1,
+ 0x1, 0x1, 0x1, 0x1 };
+VECT_VAR_DECL(expected_max_shmax,uint,16,8) [] = { 0x1, 0x1, 0x1, 0x1,
+ 0x1, 0x1, 0x1, 0x1 };
+VECT_VAR_DECL(expected_max_shmax,uint,32,4) [] = { 0x1, 0x1, 0x1, 0x1 };
+VECT_VAR_DECL(expected_max_shmax,uint,64,2) [] = { 0x1, 0x1 };
+
+/* Expected results with min negative input and shift by 1. */
+VECT_VAR_DECL(expected_min_sh1,int,8,8) [] = { 0xc0, 0xc0, 0xc0, 0xc0,
+ 0xc0, 0xc0, 0xc0, 0xc0 };
+VECT_VAR_DECL(expected_min_sh1,int,16,4) [] = { 0xc000, 0xc000, 0xc000, 0xc000 };
+VECT_VAR_DECL(expected_min_sh1,int,32,2) [] = { 0xc0000000, 0xc0000000 };
+VECT_VAR_DECL(expected_min_sh1,int,64,1) [] = { 0xc000000000000000 };
+VECT_VAR_DECL(expected_min_sh1,uint,8,8) [] = { 0x1, 0x1, 0x1, 0x1,
+ 0x1, 0x1, 0x1, 0x1 };
+VECT_VAR_DECL(expected_min_sh1,uint,16,4) [] = { 0x1, 0x1, 0x1, 0x1 };
+VECT_VAR_DECL(expected_min_sh1,uint,32,2) [] = { 0x1, 0x1 };
+VECT_VAR_DECL(expected_min_sh1,uint,64,1) [] = { 0x1 };
+VECT_VAR_DECL(expected_min_sh1,int,8,16) [] = { 0xc0, 0xc0, 0xc0, 0xc0,
+ 0xc0, 0xc0, 0xc0, 0xc0,
+ 0xc0, 0xc0, 0xc0, 0xc0,
+ 0xc0, 0xc0, 0xc0, 0xc0 };
+VECT_VAR_DECL(expected_min_sh1,int,16,8) [] = { 0xc000, 0xc000, 0xc000, 0xc000,
+ 0xc000, 0xc000, 0xc000, 0xc000 };
+VECT_VAR_DECL(expected_min_sh1,int,32,4) [] = { 0xc0000000, 0xc0000000,
+ 0xc0000000, 0xc0000000 };
+VECT_VAR_DECL(expected_min_sh1,int,64,2) [] = { 0xc000000000000000,
+ 0xc000000000000000 };
+VECT_VAR_DECL(expected_min_sh1,uint,8,16) [] = { 0x1, 0x1, 0x1, 0x1,
+ 0x1, 0x1, 0x1, 0x1,
+ 0x1, 0x1, 0x1, 0x1,
+ 0x1, 0x1, 0x1, 0x1 };
+VECT_VAR_DECL(expected_min_sh1,uint,16,8) [] = { 0x1, 0x1, 0x1, 0x1,
+ 0x1, 0x1, 0x1, 0x1 };
+VECT_VAR_DECL(expected_min_sh1,uint,32,4) [] = { 0x1, 0x1, 0x1, 0x1 };
+VECT_VAR_DECL(expected_min_sh1,uint,64,2) [] = { 0x1, 0x1 };
+
+/* Expected results with min negative input and shift by 3. */
+VECT_VAR_DECL(expected_min_sh3,int,8,8) [] = { 0xf0, 0xf0, 0xf0, 0xf0,
+ 0xf0, 0xf0, 0xf0, 0xf0 };
+VECT_VAR_DECL(expected_min_sh3,int,16,4) [] = { 0xf000, 0xf000, 0xf000, 0xf000 };
+VECT_VAR_DECL(expected_min_sh3,int,32,2) [] = { 0xf0000000, 0xf0000000 };
+VECT_VAR_DECL(expected_min_sh3,int,64,1) [] = { 0xf000000000000000 };
+VECT_VAR_DECL(expected_min_sh3,uint,8,8) [] = { 0x1, 0x1, 0x1, 0x1,
+ 0x1, 0x1, 0x1, 0x1 };
+VECT_VAR_DECL(expected_min_sh3,uint,16,4) [] = { 0x1, 0x1, 0x1, 0x1 };
+VECT_VAR_DECL(expected_min_sh3,uint,32,2) [] = { 0x1, 0x1 };
+VECT_VAR_DECL(expected_min_sh3,uint,64,1) [] = { 0x1 };
+VECT_VAR_DECL(expected_min_sh3,int,8,16) [] = { 0xf0, 0xf0, 0xf0, 0xf0,
+ 0xf0, 0xf0, 0xf0, 0xf0,
+ 0xf0, 0xf0, 0xf0, 0xf0,
+ 0xf0, 0xf0, 0xf0, 0xf0 };
+VECT_VAR_DECL(expected_min_sh3,int,16,8) [] = { 0xf000, 0xf000, 0xf000, 0xf000,
+ 0xf000, 0xf000, 0xf000, 0xf000 };
+VECT_VAR_DECL(expected_min_sh3,int,32,4) [] = { 0xf0000000, 0xf0000000,
+ 0xf0000000, 0xf0000000 };
+VECT_VAR_DECL(expected_min_sh3,int,64,2) [] = { 0xf000000000000000,
+ 0xf000000000000000 };
+VECT_VAR_DECL(expected_min_sh3,uint,8,16) [] = { 0x1, 0x1, 0x1, 0x1,
+ 0x1, 0x1, 0x1, 0x1,
+ 0x1, 0x1, 0x1, 0x1,
+ 0x1, 0x1, 0x1, 0x1 };
+VECT_VAR_DECL(expected_min_sh3,uint,16,8) [] = { 0x1, 0x1, 0x1, 0x1,
+ 0x1, 0x1, 0x1, 0x1 };
+VECT_VAR_DECL(expected_min_sh3,uint,32,4) [] = { 0x1, 0x1, 0x1, 0x1 };
+VECT_VAR_DECL(expected_min_sh3,uint,64,2) [] = { 0x1, 0x1 };
+
+/* Expected results with min negative input and shift by type width. */
+VECT_VAR_DECL(expected_min_shmax,int,8,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_min_shmax,int,16,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_min_shmax,int,32,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_min_shmax,int,64,1) [] = { 0x0 };
+VECT_VAR_DECL(expected_min_shmax,uint,8,8) [] = { 0x1, 0x1, 0x1, 0x1,
+ 0x1, 0x1, 0x1, 0x1 };
+VECT_VAR_DECL(expected_min_shmax,uint,16,4) [] = { 0x1, 0x1, 0x1, 0x1 };
+VECT_VAR_DECL(expected_min_shmax,uint,32,2) [] = { 0x1, 0x1 };
+VECT_VAR_DECL(expected_min_shmax,uint,64,1) [] = { 0x1 };
+VECT_VAR_DECL(expected_min_shmax,int,8,16) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_min_shmax,int,16,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_min_shmax,int,32,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_min_shmax,int,64,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_min_shmax,uint,8,16) [] = { 0x1, 0x1, 0x1, 0x1,
+ 0x1, 0x1, 0x1, 0x1,
+ 0x1, 0x1, 0x1, 0x1,
+ 0x1, 0x1, 0x1, 0x1 };
+VECT_VAR_DECL(expected_min_shmax,uint,16,8) [] = { 0x1, 0x1, 0x1, 0x1,
+ 0x1, 0x1, 0x1, 0x1 };
+VECT_VAR_DECL(expected_min_shmax,uint,32,4) [] = { 0x1, 0x1, 0x1, 0x1 };
+VECT_VAR_DECL(expected_min_shmax,uint,64,2) [] = { 0x1, 0x1 };
+
+#define TEST_MSG "VRSRA_N"
+void exec_vrsra_n (void)
+{
+ /* Basic test: y=vrsra_n(x,v), then store the result. */
+#define TEST_VRSRA_N(Q, T1, T2, W, N, V) \
+ VECT_VAR(vector_res, T1, W, N) = \
+ vrsra##Q##_n_##T2##W(VECT_VAR(vector, T1, W, N), \
+ VECT_VAR(vector2, T1, W, N), \
+ V); \
+ vst1##Q##_##T2##W(VECT_VAR(result, T1, W, N), VECT_VAR(vector_res, T1, W, N))
+
+ DECL_VARIABLE_ALL_VARIANTS(vector);
+ DECL_VARIABLE_ALL_VARIANTS(vector2);
+ DECL_VARIABLE_ALL_VARIANTS(vector_res);
+
+ clean_results ();
+
+ /* Initialize input "vector" from "buffer". */
+ TEST_MACRO_ALL_VARIANTS_2_5(VLOAD, vector, buffer);
+
+ /* Choose arbitrary initialization values. */
+ VDUP(vector2, , int, s, 8, 8, 0x11);
+ VDUP(vector2, , int, s, 16, 4, 0x22);
+ VDUP(vector2, , int, s, 32, 2, 0x33);
+ VDUP(vector2, , int, s, 64, 1, 0x44);
+ VDUP(vector2, , uint, u, 8, 8, 0x55);
+ VDUP(vector2, , uint, u, 16, 4, 0x66);
+ VDUP(vector2, , uint, u, 32, 2, 0x77);
+ VDUP(vector2, , uint, u, 64, 1, 0x88);
+
+ VDUP(vector2, q, int, s, 8, 16, 0x11);
+ VDUP(vector2, q, int, s, 16, 8, 0x22);
+ VDUP(vector2, q, int, s, 32, 4, 0x33);
+ VDUP(vector2, q, int, s, 64, 2, 0x44);
+ VDUP(vector2, q, uint, u, 8, 16, 0x55);
+ VDUP(vector2, q, uint, u, 16, 8, 0x66);
+ VDUP(vector2, q, uint, u, 32, 4, 0x77);
+ VDUP(vector2, q, uint, u, 64, 2, 0x88);
+
+ /* Choose shift amount arbitrarily. */
+ TEST_VRSRA_N(, int, s, 8, 8, 1);
+ TEST_VRSRA_N(, int, s, 16, 4, 12);
+ TEST_VRSRA_N(, int, s, 32, 2, 2);
+ TEST_VRSRA_N(, int, s, 64, 1, 32);
+ TEST_VRSRA_N(, uint, u, 8, 8, 2);
+ TEST_VRSRA_N(, uint, u, 16, 4, 3);
+ TEST_VRSRA_N(, uint, u, 32, 2, 5);
+ TEST_VRSRA_N(, uint, u, 64, 1, 33);
+
+ TEST_VRSRA_N(q, int, s, 8, 16, 1);
+ TEST_VRSRA_N(q, int, s, 16, 8, 12);
+ TEST_VRSRA_N(q, int, s, 32, 4, 2);
+ TEST_VRSRA_N(q, int, s, 64, 2, 32);
+ TEST_VRSRA_N(q, uint, u, 8, 16, 2);
+ TEST_VRSRA_N(q, uint, u, 16, 8, 3);
+ TEST_VRSRA_N(q, uint, u, 32, 4, 5);
+ TEST_VRSRA_N(q, uint, u, 64, 2, 33);
+
+#define CMT ""
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected, CMT);
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected, CMT);
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected, CMT);
+ CHECK(TEST_MSG, int, 64, 1, PRIx64, expected, CMT);
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected, CMT);
+ CHECK(TEST_MSG, uint, 64, 1, PRIx64, expected, CMT);
+ CHECK(TEST_MSG, int, 8, 16, PRIx8, expected, CMT);
+ CHECK(TEST_MSG, int, 16, 8, PRIx16, expected, CMT);
+ CHECK(TEST_MSG, int, 32, 4, PRIx32, expected, CMT);
+ CHECK(TEST_MSG, int, 64, 2, PRIx64, expected, CMT);
+ CHECK(TEST_MSG, uint, 8, 16, PRIx8, expected, CMT);
+ CHECK(TEST_MSG, uint, 16, 8, PRIx16, expected, CMT);
+ CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected, CMT);
+ CHECK(TEST_MSG, uint, 64, 2, PRIx64, expected, CMT);
+
+
+ /* Initialize the accumulator with 0. */
+ VDUP(vector, , int, s, 8, 8, 0);
+ VDUP(vector, , int, s, 16, 4, 0);
+ VDUP(vector, , int, s, 32, 2, 0);
+ VDUP(vector, , int, s, 64, 1, 0);
+ VDUP(vector, , uint, u, 8, 8, 0);
+ VDUP(vector, , uint, u, 16, 4, 0);
+ VDUP(vector, , uint, u, 32, 2, 0);
+ VDUP(vector, , uint, u, 64, 1, 0);
+ VDUP(vector, q, int, s, 8, 16, 0);
+ VDUP(vector, q, int, s, 16, 8, 0);
+ VDUP(vector, q, int, s, 32, 4, 0);
+ VDUP(vector, q, int, s, 64, 2, 0);
+ VDUP(vector, q, uint, u, 8, 16, 0);
+ VDUP(vector, q, uint, u, 16, 8, 0);
+ VDUP(vector, q, uint, u, 32, 4, 0);
+ VDUP(vector, q, uint, u, 64, 2, 0);
+
+ /* Initialize with max values to check overflow. */
+ VDUP(vector2, , int, s, 8, 8, 0x7F);
+ VDUP(vector2, , int, s, 16, 4, 0x7FFF);
+ VDUP(vector2, , int, s, 32, 2, 0x7FFFFFFF);
+ VDUP(vector2, , int, s, 64, 1, 0x7FFFFFFFFFFFFFFFLL);
+ VDUP(vector2, , uint, u, 8, 8, 0xFF);
+ VDUP(vector2, , uint, u, 16, 4, 0xFFFF);
+ VDUP(vector2, , uint, u, 32, 2, 0xFFFFFFFF);
+ VDUP(vector2, , uint, u, 64, 1, 0xFFFFFFFFFFFFFFFFULL);
+ VDUP(vector2, q, int, s, 8, 16, 0x7F);
+ VDUP(vector2, q, int, s, 16, 8, 0x7FFF);
+ VDUP(vector2, q, int, s, 32, 4, 0x7FFFFFFF);
+ VDUP(vector2, q, int, s, 64, 2, 0x7FFFFFFFFFFFFFFFLL);
+ VDUP(vector2, q, uint, u, 8, 16, 0xFF);
+ VDUP(vector2, q, uint, u, 16, 8, 0xFFFF);
+ VDUP(vector2, q, uint, u, 32, 4, 0xFFFFFFFF);
+ VDUP(vector2, q, uint, u, 64, 2, 0xFFFFFFFFFFFFFFFFULL);
+
+ /* Shift by 1 to check overflow with rounding constant. */
+ TEST_VRSRA_N(, int, s, 8, 8, 1);
+ TEST_VRSRA_N(, int, s, 16, 4, 1);
+ TEST_VRSRA_N(, int, s, 32, 2, 1);
+ TEST_VRSRA_N(, int, s, 64, 1, 1);
+ TEST_VRSRA_N(, uint, u, 8, 8, 1);
+ TEST_VRSRA_N(, uint, u, 16, 4, 1);
+ TEST_VRSRA_N(, uint, u, 32, 2, 1);
+ TEST_VRSRA_N(, uint, u, 64, 1, 1);
+ TEST_VRSRA_N(q, int, s, 8, 16, 1);
+ TEST_VRSRA_N(q, int, s, 16, 8, 1);
+ TEST_VRSRA_N(q, int, s, 32, 4, 1);
+ TEST_VRSRA_N(q, int, s, 64, 2, 1);
+ TEST_VRSRA_N(q, uint, u, 8, 16, 1);
+ TEST_VRSRA_N(q, uint, u, 16, 8, 1);
+ TEST_VRSRA_N(q, uint, u, 32, 4, 1);
+ TEST_VRSRA_N(q, uint, u, 64, 2, 1);
+
+#undef CMT
+#define CMT " (checking overflow: shift by 1, max input)"
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_max_sh1, CMT);
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_max_sh1, CMT);
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected_max_sh1, CMT);
+ CHECK(TEST_MSG, int, 64, 1, PRIx64, expected_max_sh1, CMT);
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_max_sh1, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_max_sh1, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_max_sh1, CMT);
+ CHECK(TEST_MSG, uint, 64, 1, PRIx64, expected_max_sh1, CMT);
+ CHECK(TEST_MSG, int, 8, 16, PRIx8, expected_max_sh1, CMT);
+ CHECK(TEST_MSG, int, 16, 8, PRIx16, expected_max_sh1, CMT);
+ CHECK(TEST_MSG, int, 32, 4, PRIx32, expected_max_sh1, CMT);
+ CHECK(TEST_MSG, int, 64, 2, PRIx64, expected_max_sh1, CMT);
+ CHECK(TEST_MSG, uint, 8, 16, PRIx8, expected_max_sh1, CMT);
+ CHECK(TEST_MSG, uint, 16, 8, PRIx16, expected_max_sh1, CMT);
+ CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected_max_sh1, CMT);
+ CHECK(TEST_MSG, uint, 64, 2, PRIx64, expected_max_sh1, CMT);
+
+
+ /* Shift by 3 to check overflow with rounding constant. */
+ TEST_VRSRA_N(, int, s, 8, 8, 3);
+ TEST_VRSRA_N(, int, s, 16, 4, 3);
+ TEST_VRSRA_N(, int, s, 32, 2, 3);
+ TEST_VRSRA_N(, int, s, 64, 1, 3);
+ TEST_VRSRA_N(, uint, u, 8, 8, 3);
+ TEST_VRSRA_N(, uint, u, 16, 4, 3);
+ TEST_VRSRA_N(, uint, u, 32, 2, 3);
+ TEST_VRSRA_N(, uint, u, 64, 1, 3);
+ TEST_VRSRA_N(q, int, s, 8, 16, 3);
+ TEST_VRSRA_N(q, int, s, 16, 8, 3);
+ TEST_VRSRA_N(q, int, s, 32, 4, 3);
+ TEST_VRSRA_N(q, int, s, 64, 2, 3);
+ TEST_VRSRA_N(q, uint, u, 8, 16, 3);
+ TEST_VRSRA_N(q, uint, u, 16, 8, 3);
+ TEST_VRSRA_N(q, uint, u, 32, 4, 3);
+ TEST_VRSRA_N(q, uint, u, 64, 2, 3);
+
+#undef CMT
+#define CMT " (checking overflow: shift by 3, max input)"
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_max_sh3, CMT);
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_max_sh3, CMT);
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected_max_sh3, CMT);
+ CHECK(TEST_MSG, int, 64, 1, PRIx64, expected_max_sh3, CMT);
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_max_sh3, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_max_sh3, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_max_sh3, CMT);
+ CHECK(TEST_MSG, uint, 64, 1, PRIx64, expected_max_sh3, CMT);
+ CHECK(TEST_MSG, int, 8, 16, PRIx8, expected_max_sh3, CMT);
+ CHECK(TEST_MSG, int, 16, 8, PRIx16, expected_max_sh3, CMT);
+ CHECK(TEST_MSG, int, 32, 4, PRIx32, expected_max_sh3, CMT);
+ CHECK(TEST_MSG, int, 64, 2, PRIx64, expected_max_sh3, CMT);
+ CHECK(TEST_MSG, uint, 8, 16, PRIx8, expected_max_sh3, CMT);
+ CHECK(TEST_MSG, uint, 16, 8, PRIx16, expected_max_sh3, CMT);
+ CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected_max_sh3, CMT);
+ CHECK(TEST_MSG, uint, 64, 2, PRIx64, expected_max_sh3, CMT);
+
+
+ /* Shift by max to check overflow with rounding constant. */
+ TEST_VRSRA_N(, int, s, 8, 8, 8);
+ TEST_VRSRA_N(, int, s, 16, 4, 16);
+ TEST_VRSRA_N(, int, s, 32, 2, 32);
+ TEST_VRSRA_N(, int, s, 64, 1, 64);
+ TEST_VRSRA_N(, uint, u, 8, 8, 8);
+ TEST_VRSRA_N(, uint, u, 16, 4, 16);
+ TEST_VRSRA_N(, uint, u, 32, 2, 32);
+ TEST_VRSRA_N(, uint, u, 64, 1, 64);
+ TEST_VRSRA_N(q, int, s, 8, 16, 8);
+ TEST_VRSRA_N(q, int, s, 16, 8, 16);
+ TEST_VRSRA_N(q, int, s, 32, 4, 32);
+ TEST_VRSRA_N(q, int, s, 64, 2, 64);
+ TEST_VRSRA_N(q, uint, u, 8, 16, 8);
+ TEST_VRSRA_N(q, uint, u, 16, 8, 16);
+ TEST_VRSRA_N(q, uint, u, 32, 4, 32);
+ TEST_VRSRA_N(q, uint, u, 64, 2, 64);
+
+#undef CMT
+#define CMT " (checking overflow: shift by max, max input)"
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_max_shmax, CMT);
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_max_shmax, CMT);
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected_max_shmax, CMT);
+ CHECK(TEST_MSG, int, 64, 1, PRIx64, expected_max_shmax, CMT);
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_max_shmax, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_max_shmax, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_max_shmax, CMT);
+ CHECK(TEST_MSG, uint, 64, 1, PRIx64, expected_max_shmax, CMT);
+ CHECK(TEST_MSG, int, 8, 16, PRIx8, expected_max_shmax, CMT);
+ CHECK(TEST_MSG, int, 16, 8, PRIx16, expected_max_shmax, CMT);
+ CHECK(TEST_MSG, int, 32, 4, PRIx32, expected_max_shmax, CMT);
+ CHECK(TEST_MSG, int, 64, 2, PRIx64, expected_max_shmax, CMT);
+ CHECK(TEST_MSG, uint, 8, 16, PRIx8, expected_max_shmax, CMT);
+ CHECK(TEST_MSG, uint, 16, 8, PRIx16, expected_max_shmax, CMT);
+ CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected_max_shmax, CMT);
+ CHECK(TEST_MSG, uint, 64, 2, PRIx64, expected_max_shmax, CMT);
+
+
+ /* Initialize with min values to check overflow. */
+ VDUP(vector2, , int, s, 8, 8, 0x80);
+ VDUP(vector2, , int, s, 16, 4, 0x8000);
+ VDUP(vector2, , int, s, 32, 2, 0x80000000);
+ VDUP(vector2, , int, s, 64, 1, 0x8000000000000000LL);
+ VDUP(vector2, q, int, s, 8, 16, 0x80);
+ VDUP(vector2, q, int, s, 16, 8, 0x8000);
+ VDUP(vector2, q, int, s, 32, 4, 0x80000000);
+ VDUP(vector2, q, int, s, 64, 2, 0x8000000000000000ULL);
+
+ /* Shift by 1 to check overflow with rounding constant. */
+ TEST_VRSRA_N(, int, s, 8, 8, 1);
+ TEST_VRSRA_N(, int, s, 16, 4, 1);
+ TEST_VRSRA_N(, int, s, 32, 2, 1);
+ TEST_VRSRA_N(, int, s, 64, 1, 1);
+ TEST_VRSRA_N(q, int, s, 8, 16, 1);
+ TEST_VRSRA_N(q, int, s, 16, 8, 1);
+ TEST_VRSRA_N(q, int, s, 32, 4, 1);
+ TEST_VRSRA_N(q, int, s, 64, 2, 1);
+
+#undef CMT
+#define CMT " (checking overflow: shift by 1, min negative input)"
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_min_sh1, CMT);
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_min_sh1, CMT);
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected_min_sh1, CMT);
+ CHECK(TEST_MSG, int, 64, 1, PRIx64, expected_min_sh1, CMT);
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_min_sh1, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_min_sh1, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_min_sh1, CMT);
+ CHECK(TEST_MSG, uint, 64, 1, PRIx64, expected_min_sh1, CMT);
+ CHECK(TEST_MSG, int, 8, 16, PRIx8, expected_min_sh1, CMT);
+ CHECK(TEST_MSG, int, 16, 8, PRIx16, expected_min_sh1, CMT);
+ CHECK(TEST_MSG, int, 32, 4, PRIx32, expected_min_sh1, CMT);
+ CHECK(TEST_MSG, int, 64, 2, PRIx64, expected_min_sh1, CMT);
+ CHECK(TEST_MSG, uint, 8, 16, PRIx8, expected_min_sh1, CMT);
+ CHECK(TEST_MSG, uint, 16, 8, PRIx16, expected_min_sh1, CMT);
+ CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected_min_sh1, CMT);
+ CHECK(TEST_MSG, uint, 64, 2, PRIx64, expected_min_sh1, CMT);
+
+
+ /* Shift by 3 to check overflow with rounding constant. */
+ TEST_VRSRA_N(, int, s, 8, 8, 3);
+ TEST_VRSRA_N(, int, s, 16, 4, 3);
+ TEST_VRSRA_N(, int, s, 32, 2, 3);
+ TEST_VRSRA_N(, int, s, 64, 1, 3);
+ TEST_VRSRA_N(q, int, s, 8, 16, 3);
+ TEST_VRSRA_N(q, int, s, 16, 8, 3);
+ TEST_VRSRA_N(q, int, s, 32, 4, 3);
+ TEST_VRSRA_N(q, int, s, 64, 2, 3);
+
+#undef CMT
+#define CMT " (checking overflow: shift by 3, min negative input)"
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_min_sh3, CMT);
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_min_sh3, CMT);
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected_min_sh3, CMT);
+ CHECK(TEST_MSG, int, 64, 1, PRIx64, expected_min_sh3, CMT);
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_min_sh3, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_min_sh3, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_min_sh3, CMT);
+ CHECK(TEST_MSG, uint, 64, 1, PRIx64, expected_min_sh3, CMT);
+ CHECK(TEST_MSG, int, 8, 16, PRIx8, expected_min_sh3, CMT);
+ CHECK(TEST_MSG, int, 16, 8, PRIx16, expected_min_sh3, CMT);
+ CHECK(TEST_MSG, int, 32, 4, PRIx32, expected_min_sh3, CMT);
+ CHECK(TEST_MSG, int, 64, 2, PRIx64, expected_min_sh3, CMT);
+ CHECK(TEST_MSG, uint, 8, 16, PRIx8, expected_min_sh3, CMT);
+ CHECK(TEST_MSG, uint, 16, 8, PRIx16, expected_min_sh3, CMT);
+ CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected_min_sh3, CMT);
+ CHECK(TEST_MSG, uint, 64, 2, PRIx64, expected_min_sh3, CMT);
+
+
+ /* Shift by max to check overflow with rounding constant. */
+ TEST_VRSRA_N(, int, s, 8, 8, 8);
+ TEST_VRSRA_N(, int, s, 16, 4, 16);
+ TEST_VRSRA_N(, int, s, 32, 2, 32);
+ TEST_VRSRA_N(, int, s, 64, 1, 64);
+ TEST_VRSRA_N(q, int, s, 8, 16, 8);
+ TEST_VRSRA_N(q, int, s, 16, 8, 16);
+ TEST_VRSRA_N(q, int, s, 32, 4, 32);
+ TEST_VRSRA_N(q, int, s, 64, 2, 64);
+
+#undef CMT
+#define CMT " (checking overflow: shift by max, min negative input)"
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_min_shmax, CMT);
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_min_shmax, CMT);
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected_min_shmax, CMT);
+ CHECK(TEST_MSG, int, 64, 1, PRIx64, expected_min_shmax, CMT);
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_min_shmax, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_min_shmax, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_min_shmax, CMT);
+ CHECK(TEST_MSG, uint, 64, 1, PRIx64, expected_min_shmax, CMT);
+ CHECK(TEST_MSG, int, 8, 16, PRIx8, expected_min_shmax, CMT);
+ CHECK(TEST_MSG, int, 16, 8, PRIx16, expected_min_shmax, CMT);
+ CHECK(TEST_MSG, int, 32, 4, PRIx32, expected_min_shmax, CMT);
+ CHECK(TEST_MSG, int, 64, 2, PRIx64, expected_min_shmax, CMT);
+ CHECK(TEST_MSG, uint, 8, 16, PRIx8, expected_min_shmax, CMT);
+ CHECK(TEST_MSG, uint, 16, 8, PRIx16, expected_min_shmax, CMT);
+ CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected_min_shmax, CMT);
+ CHECK(TEST_MSG, uint, 64, 2, PRIx64, expected_min_shmax, CMT);
+}
+
+int main (void)
+{
+ exec_vrsra_n ();
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vset_lane.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vset_lane.c
new file mode 100644
index 00000000000..51594068364
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vset_lane.c
@@ -0,0 +1,99 @@
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+/* Expected results. */
+VECT_VAR_DECL(expected,int,8,8) [] = { 0xf0, 0xf1, 0xf2, 0xf3,
+ 0xf4, 0xf5, 0xf6, 0x11 };
+VECT_VAR_DECL(expected,int,16,4) [] = { 0xfff0, 0xfff1, 0xfff2, 0x22 };
+VECT_VAR_DECL(expected,int,32,2) [] = { 0xfffffff0, 0x33 };
+VECT_VAR_DECL(expected,int,64,1) [] = { 0x44 };
+VECT_VAR_DECL(expected,uint,8,8) [] = { 0xf0, 0xf1, 0xf2, 0xf3,
+ 0xf4, 0xf5, 0x55, 0xf7 };
+VECT_VAR_DECL(expected,uint,16,4) [] = { 0xfff0, 0xfff1, 0x66, 0xfff3 };
+VECT_VAR_DECL(expected,uint,32,2) [] = { 0xfffffff0, 0x77 };
+VECT_VAR_DECL(expected,uint,64,1) [] = { 0x88 };
+VECT_VAR_DECL(expected,poly,8,8) [] = { 0xf0, 0xf1, 0xf2, 0xf3,
+ 0xf4, 0xf5, 0x55, 0xf7 };
+VECT_VAR_DECL(expected,poly,16,4) [] = { 0xfff0, 0xfff1, 0x66, 0xfff3 };
+VECT_VAR_DECL(expected,hfloat,32,2) [] = { 0xc1800000, 0x4204cccd };
+VECT_VAR_DECL(expected,int,8,16) [] = { 0xf0, 0xf1, 0xf2, 0xf3,
+ 0xf4, 0xf5, 0xf6, 0xf7,
+ 0xf8, 0xf9, 0xfa, 0xfb,
+ 0xfc, 0xfd, 0xfe, 0x99 };
+VECT_VAR_DECL(expected,int,16,8) [] = { 0xfff0, 0xfff1, 0xfff2, 0xfff3,
+ 0xfff4, 0xaa, 0xfff6, 0xfff7 };
+VECT_VAR_DECL(expected,int,32,4) [] = { 0xfffffff0, 0xfffffff1,
+ 0xfffffff2, 0xbb };
+VECT_VAR_DECL(expected,int,64,2) [] = { 0xfffffffffffffff0, 0xcc };
+VECT_VAR_DECL(expected,uint,8,16) [] = { 0xf0, 0xf1, 0xf2, 0xf3,
+ 0xf4, 0xf5, 0xf6, 0xf7,
+ 0xf8, 0xf9, 0xfa, 0xfb,
+ 0xfc, 0xfd, 0xdd, 0xff };
+VECT_VAR_DECL(expected,uint,16,8) [] = { 0xfff0, 0xfff1, 0xfff2, 0xfff3,
+ 0xfff4, 0xfff5, 0xee, 0xfff7 };
+VECT_VAR_DECL(expected,uint,32,4) [] = { 0xfffffff0, 0xfffffff1,
+ 0xff, 0xfffffff3 };
+VECT_VAR_DECL(expected,uint,64,2) [] = { 0xfffffffffffffff0, 0x11 };
+VECT_VAR_DECL(expected,poly,8,16) [] = { 0xf0, 0xf1, 0xf2, 0xf3,
+ 0xf4, 0xf5, 0xf6, 0xf7,
+ 0xf8, 0xf9, 0xfa, 0xfb,
+ 0xfc, 0xfd, 0xdd, 0xff };
+VECT_VAR_DECL(expected,poly,16,8) [] = { 0xfff0, 0xfff1, 0xfff2, 0xfff3,
+ 0xfff4, 0xfff5, 0xee, 0xfff7 };
+VECT_VAR_DECL(expected,hfloat,32,4) [] = { 0xc1800000, 0xc1700000,
+ 0xc1600000, 0x41333333 };
+
+#define TEST_MSG "VSET_LANE/VSET_LANEQ"
+void exec_vset_lane (void)
+{
+ /* vec=vset_lane(val, vec, lane), then store the result. */
+#define TEST_VSET_LANE(Q, T1, T2, W, N, V, L) \
+ VECT_VAR(vector, T1, W, N) = \
+ vset##Q##_lane_##T2##W(V, \
+ VECT_VAR(vector, T1, W, N), \
+ L); \
+ vst1##Q##_##T2##W(VECT_VAR(result, T1, W, N), VECT_VAR(vector, T1, W, N))
+
+ DECL_VARIABLE_ALL_VARIANTS(vector);
+
+ clean_results ();
+
+ /* Initialize input "vector" from "buffer". */
+ TEST_MACRO_ALL_VARIANTS_2_5(VLOAD, vector, buffer);
+ VLOAD(vector, buffer, , float, f, 32, 2);
+ VLOAD(vector, buffer, q, float, f, 32, 4);
+
+ /* Choose value and lane arbitrarily. */
+ TEST_VSET_LANE(, int, s, 8, 8, 0x11, 7);
+ TEST_VSET_LANE(, int, s, 16, 4, 0x22, 3);
+ TEST_VSET_LANE(, int, s, 32, 2, 0x33, 1);
+ TEST_VSET_LANE(, int, s, 64, 1, 0x44, 0);
+ TEST_VSET_LANE(, uint, u, 8, 8, 0x55, 6);
+ TEST_VSET_LANE(, uint, u, 16, 4, 0x66, 2);
+ TEST_VSET_LANE(, uint, u, 32, 2, 0x77, 1);
+ TEST_VSET_LANE(, uint, u, 64, 1, 0x88, 0);
+ TEST_VSET_LANE(, poly, p, 8, 8, 0x55, 6);
+ TEST_VSET_LANE(, poly, p, 16, 4, 0x66, 2);
+ TEST_VSET_LANE(, float, f, 32, 2, 33.2f, 1);
+
+ TEST_VSET_LANE(q, int, s, 8, 16, 0x99, 15);
+ TEST_VSET_LANE(q, int, s, 16, 8, 0xAA, 5);
+ TEST_VSET_LANE(q, int, s, 32, 4, 0xBB, 3);
+ TEST_VSET_LANE(q, int, s, 64, 2, 0xCC, 1);
+ TEST_VSET_LANE(q, uint, u, 8, 16, 0xDD, 14);
+ TEST_VSET_LANE(q, uint, u, 16, 8, 0xEE, 6);
+ TEST_VSET_LANE(q, uint, u, 32, 4, 0xFF, 2);
+ TEST_VSET_LANE(q, uint, u, 64, 2, 0x11, 1);
+ TEST_VSET_LANE(q, poly, p, 8, 16, 0xDD, 14);
+ TEST_VSET_LANE(q, poly, p, 16, 8, 0xEE, 6);
+ TEST_VSET_LANE(q, float, f, 32, 4, 11.2f, 3);
+
+ CHECK_RESULTS(TEST_MSG, "");
+}
+
+int main (void)
+{
+ exec_vset_lane ();
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vshl_n.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vshl_n.c
new file mode 100644
index 00000000000..d807ebbfdbc
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vshl_n.c
@@ -0,0 +1,96 @@
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+/* Expected results. */
+VECT_VAR_DECL(expected,int,8,8) [] = { 0xe0, 0xe2, 0xe4, 0xe6,
+ 0xe8, 0xea, 0xec, 0xee };
+VECT_VAR_DECL(expected,int,16,4) [] = { 0xffe0, 0xffe2, 0xffe4, 0xffe6 };
+VECT_VAR_DECL(expected,int,32,2) [] = { 0xffffff80, 0xffffff88 };
+VECT_VAR_DECL(expected,int,64,1) [] = { 0xffffffffffffffc0 };
+VECT_VAR_DECL(expected,uint,8,8) [] = { 0xc0, 0xc4, 0xc8, 0xcc,
+ 0xd0, 0xd4, 0xd8, 0xdc };
+VECT_VAR_DECL(expected,uint,16,4) [] = { 0xff00, 0xff10, 0xff20, 0xff30 };
+VECT_VAR_DECL(expected,uint,32,2) [] = { 0xffffff80, 0xffffff88 };
+VECT_VAR_DECL(expected,uint,64,1) [] = { 0xffffffffffffffe0 };
+VECT_VAR_DECL(expected,int,8,16) [] = { 0x0, 0x20, 0x40, 0x60,
+ 0x80, 0xa0, 0xc0, 0xe0,
+ 0x0, 0x20, 0x40, 0x60,
+ 0x80, 0xa0, 0xc0, 0xe0 };
+VECT_VAR_DECL(expected,int,16,8) [] = { 0xffe0, 0xffe2, 0xffe4, 0xffe6,
+ 0xffe8, 0xffea, 0xffec, 0xffee };
+VECT_VAR_DECL(expected,int,32,4) [] = { 0xffffffc0, 0xffffffc4,
+ 0xffffffc8, 0xffffffcc };
+VECT_VAR_DECL(expected,int,64,2) [] = { 0xffffffffffffffc0, 0xffffffffffffffc4 };
+VECT_VAR_DECL(expected,uint,8,16) [] = { 0xc0, 0xc4, 0xc8, 0xcc,
+ 0xd0, 0xd4, 0xd8, 0xdc,
+ 0xe0, 0xe4, 0xe8, 0xec,
+ 0xf0, 0xf4, 0xf8, 0xfc };
+VECT_VAR_DECL(expected,uint,16,8) [] = { 0xff80, 0xff88, 0xff90, 0xff98,
+ 0xffa0, 0xffa8, 0xffb0, 0xffb8 };
+VECT_VAR_DECL(expected,uint,32,4) [] = { 0xffffffc0, 0xffffffc4,
+ 0xffffffc8, 0xffffffcc };
+VECT_VAR_DECL(expected,uint,64,2) [] = { 0xffffffffffffffe0,
+ 0xffffffffffffffe2 };
+
+#define TEST_MSG "VSHL_N"
+void exec_vshl_n (void)
+{
+ /* Basic test: v2=vshl_n(v1,v), then store the result. */
+#define TEST_VSHL_N(Q, T1, T2, W, N, V) \
+ VECT_VAR(vector_res, T1, W, N) = \
+ vshl##Q##_n_##T2##W(VECT_VAR(vector, T1, W, N), \
+ V); \
+ vst1##Q##_##T2##W(VECT_VAR(result, T1, W, N), VECT_VAR(vector_res, T1, W, N))
+
+ DECL_VARIABLE_ALL_VARIANTS(vector);
+ DECL_VARIABLE_ALL_VARIANTS(vector_res);
+
+ clean_results ();
+
+ /* Initialize input "vector" from "buffer". */
+ TEST_MACRO_ALL_VARIANTS_2_5(VLOAD, vector, buffer);
+
+ /* Choose shift amount arbitrarily. */
+ TEST_VSHL_N(, int, s, 8, 8, 1);
+ TEST_VSHL_N(, int, s, 16, 4, 1);
+ TEST_VSHL_N(, int, s, 32, 2, 3);
+ TEST_VSHL_N(, int, s, 64, 1, 2);
+ TEST_VSHL_N(, uint, u, 8, 8, 2);
+ TEST_VSHL_N(, uint, u, 16, 4, 4);
+ TEST_VSHL_N(, uint, u, 32, 2, 3);
+ TEST_VSHL_N(, uint, u, 64, 1, 1);
+
+ TEST_VSHL_N(q, int, s, 8, 16, 5);
+ TEST_VSHL_N(q, int, s, 16, 8, 1);
+ TEST_VSHL_N(q, int, s, 32, 4, 2);
+ TEST_VSHL_N(q, int, s, 64, 2, 2);
+ TEST_VSHL_N(q, uint, u, 8, 16, 2);
+ TEST_VSHL_N(q, uint, u, 16, 8, 3);
+ TEST_VSHL_N(q, uint, u, 32, 4, 2);
+ TEST_VSHL_N(q, uint, u, 64, 2, 1);
+
+#define CMT ""
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected, CMT);
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected, CMT);
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected, CMT);
+ CHECK(TEST_MSG, int, 64, 1, PRIx64, expected, CMT);
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected, CMT);
+ CHECK(TEST_MSG, uint, 64, 1, PRIx64, expected, CMT);
+ CHECK(TEST_MSG, int, 8, 16, PRIx8, expected, CMT);
+ CHECK(TEST_MSG, int, 16, 8, PRIx16, expected, CMT);
+ CHECK(TEST_MSG, int, 32, 4, PRIx32, expected, CMT);
+ CHECK(TEST_MSG, int, 64, 2, PRIx64, expected, CMT);
+ CHECK(TEST_MSG, uint, 8, 16, PRIx8, expected, CMT);
+ CHECK(TEST_MSG, uint, 16, 8, PRIx16, expected, CMT);
+ CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected, CMT);
+ CHECK(TEST_MSG, uint, 64, 2, PRIx64, expected, CMT);
+}
+
+int main (void)
+{
+ exec_vshl_n ();
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vshll_n.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vshll_n.c
new file mode 100644
index 00000000000..07bc904d0d1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vshll_n.c
@@ -0,0 +1,56 @@
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+/* Expected results. */
+VECT_VAR_DECL(expected,int,16,8) [] = { 0xffe0, 0xffe2, 0xffe4, 0xffe6,
+ 0xffe8, 0xffea, 0xffec, 0xffee };
+VECT_VAR_DECL(expected,int,32,4) [] = { 0xffffffe0, 0xffffffe2,
+ 0xffffffe4, 0xffffffe6 };
+VECT_VAR_DECL(expected,int,64,2) [] = { 0xffffffffffffff80, 0xffffffffffffff88 };
+VECT_VAR_DECL(expected,uint,16,8) [] = { 0x3c0, 0x3c4, 0x3c8, 0x3cc,
+ 0x3d0, 0x3d4, 0x3d8, 0x3dc };
+VECT_VAR_DECL(expected,uint,32,4) [] = { 0xfff00, 0xfff10, 0xfff20, 0xfff30 };
+VECT_VAR_DECL(expected,uint,64,2) [] = { 0x7ffffff80, 0x7ffffff88 };
+
+#define TEST_MSG "VSHLL_N"
+void exec_vshll_n (void)
+{
+ /* Basic test: v2=vshll_n(v1,v), then store the result. */
+#define TEST_VSHLL_N(T1, T2, W, W2, N, V) \
+ VECT_VAR(vector_res, T1, W2, N) = \
+ vshll##_n_##T2##W(VECT_VAR(vector, T1, W, N), \
+ V); \
+ vst1q##_##T2##W2(VECT_VAR(result, T1, W2, N), VECT_VAR(vector_res, T1, W2, N))
+
+ DECL_VARIABLE_ALL_VARIANTS(vector);
+ DECL_VARIABLE_ALL_VARIANTS(vector_res);
+
+ clean_results ();
+
+ /* Initialize input "vector" from "buffer". */
+ TEST_MACRO_ALL_VARIANTS_2_5(VLOAD, vector, buffer);
+
+ /* Choose shift amount arbitrarily. */
+ TEST_VSHLL_N(int, s, 8, 16, 8, 1);
+ TEST_VSHLL_N(int, s, 16, 32, 4, 1);
+ TEST_VSHLL_N(int, s, 32, 64, 2, 3);
+ TEST_VSHLL_N(uint, u, 8, 16, 8, 2);
+ TEST_VSHLL_N(uint, u, 16, 32, 4, 4);
+ TEST_VSHLL_N(uint, u, 32, 64, 2, 3);
+
+#undef CMT
+#define CMT ""
+ CHECK(TEST_MSG, int, 16, 8, PRIx16, expected, CMT);
+ CHECK(TEST_MSG, int, 32, 4, PRIx32, expected, CMT);
+ CHECK(TEST_MSG, int, 64, 2, PRIx64, expected, CMT);
+ CHECK(TEST_MSG, uint, 16, 8, PRIx16, expected, CMT);
+ CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected, CMT);
+ CHECK(TEST_MSG, uint, 64, 2, PRIx64, expected, CMT);
+}
+
+int main (void)
+{
+ exec_vshll_n ();
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vshr_n.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vshr_n.c
new file mode 100644
index 00000000000..122ce41fb5c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vshr_n.c
@@ -0,0 +1,95 @@
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+/* Expected results. */
+VECT_VAR_DECL(expected,int,8,8) [] = { 0xf8, 0xf8, 0xf9, 0xf9,
+ 0xfa, 0xfa, 0xfb, 0xfb };
+VECT_VAR_DECL(expected,int,16,4) [] = { 0xffff, 0xffff, 0xffff, 0xffff };
+VECT_VAR_DECL(expected,int,32,2) [] = { 0xfffffffc, 0xfffffffc };
+VECT_VAR_DECL(expected,int,64,1) [] = { 0xffffffffffffffff };
+VECT_VAR_DECL(expected,uint,8,8) [] = { 0x3c, 0x3c, 0x3c, 0x3c,
+ 0x3d, 0x3d, 0x3d, 0x3d };
+VECT_VAR_DECL(expected,uint,16,4) [] = { 0x1ffe, 0x1ffe, 0x1ffe, 0x1ffe };
+VECT_VAR_DECL(expected,uint,32,2) [] = { 0x7ffffff, 0x7ffffff };
+VECT_VAR_DECL(expected,uint,64,1) [] = { 0x7fffffff };
+VECT_VAR_DECL(expected,int,8,16) [] = { 0xf8, 0xf8, 0xf9, 0xf9,
+ 0xfa, 0xfa, 0xfb, 0xfb,
+ 0xfc, 0xfc, 0xfd, 0xfd,
+ 0xfe, 0xfe, 0xff, 0xff };
+VECT_VAR_DECL(expected,int,16,8) [] = { 0xffff, 0xffff, 0xffff, 0xffff,
+ 0xffff, 0xffff, 0xffff, 0xffff };
+VECT_VAR_DECL(expected,int,32,4) [] = { 0xfffffffc, 0xfffffffc,
+ 0xfffffffc, 0xfffffffc };
+VECT_VAR_DECL(expected,int,64,2) [] = { 0xffffffffffffffff, 0xffffffffffffffff };
+VECT_VAR_DECL(expected,uint,8,16) [] = { 0x3c, 0x3c, 0x3c, 0x3c,
+ 0x3d, 0x3d, 0x3d, 0x3d,
+ 0x3e, 0x3e, 0x3e, 0x3e,
+ 0x3f, 0x3f, 0x3f, 0x3f };
+VECT_VAR_DECL(expected,uint,16,8) [] = { 0x1ffe, 0x1ffe, 0x1ffe, 0x1ffe,
+ 0x1ffe, 0x1ffe, 0x1ffe, 0x1ffe };
+VECT_VAR_DECL(expected,uint,32,4) [] = { 0x7ffffff, 0x7ffffff,
+ 0x7ffffff, 0x7ffffff };
+VECT_VAR_DECL(expected,uint,64,2) [] = { 0x7fffffff, 0x7fffffff };
+
+#define TEST_MSG "VSHR_N"
+void exec_vshr_n (void)
+{
+ /* Basic test: y=vshr_n(x,v), then store the result. */
+#define TEST_VSHR_N(Q, T1, T2, W, N, V) \
+ VECT_VAR(vector_res, T1, W, N) = \
+ vshr##Q##_n_##T2##W(VECT_VAR(vector, T1, W, N), \
+ V); \
+ vst1##Q##_##T2##W(VECT_VAR(result, T1, W, N), VECT_VAR(vector_res, T1, W, N))
+
+ DECL_VARIABLE_ALL_VARIANTS(vector);
+ DECL_VARIABLE_ALL_VARIANTS(vector_res);
+
+ clean_results ();
+
+ /* Initialize input "vector" from "buffer". */
+ TEST_MACRO_ALL_VARIANTS_2_5(VLOAD, vector, buffer);
+
+ /* Choose shift amount arbitrarily. */
+ TEST_VSHR_N(, int, s, 8, 8, 1);
+ TEST_VSHR_N(, int, s, 16, 4, 12);
+ TEST_VSHR_N(, int, s, 32, 2, 2);
+ TEST_VSHR_N(, int, s, 64, 1, 32);
+ TEST_VSHR_N(, uint, u, 8, 8, 2);
+ TEST_VSHR_N(, uint, u, 16, 4, 3);
+ TEST_VSHR_N(, uint, u, 32, 2, 5);
+ TEST_VSHR_N(, uint, u, 64, 1, 33);
+
+ TEST_VSHR_N(q, int, s, 8, 16, 1);
+ TEST_VSHR_N(q, int, s, 16, 8, 12);
+ TEST_VSHR_N(q, int, s, 32, 4, 2);
+ TEST_VSHR_N(q, int, s, 64, 2, 32);
+ TEST_VSHR_N(q, uint, u, 8, 16, 2);
+ TEST_VSHR_N(q, uint, u, 16, 8, 3);
+ TEST_VSHR_N(q, uint, u, 32, 4, 5);
+ TEST_VSHR_N(q, uint, u, 64, 2, 33);
+
+#define CMT ""
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected, CMT);
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected, CMT);
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected, CMT);
+ CHECK(TEST_MSG, int, 64, 1, PRIx64, expected, CMT);
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected, CMT);
+ CHECK(TEST_MSG, uint, 64, 1, PRIx64, expected, CMT);
+ CHECK(TEST_MSG, int, 8, 16, PRIx8, expected, CMT);
+ CHECK(TEST_MSG, int, 16, 8, PRIx16, expected, CMT);
+ CHECK(TEST_MSG, int, 32, 4, PRIx32, expected, CMT);
+ CHECK(TEST_MSG, int, 64, 2, PRIx64, expected, CMT);
+ CHECK(TEST_MSG, uint, 8, 16, PRIx8, expected, CMT);
+ CHECK(TEST_MSG, uint, 16, 8, PRIx16, expected, CMT);
+ CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected, CMT);
+ CHECK(TEST_MSG, uint, 64, 2, PRIx64, expected, CMT);
+}
+
+int main (void)
+{
+ exec_vshr_n ();
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vshrn_n.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vshrn_n.c
new file mode 100644
index 00000000000..6d2f4dd5189
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vshrn_n.c
@@ -0,0 +1,70 @@
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+/* Expected results. */
+VECT_VAR_DECL(expected,int,8,8) [] = { 0xf8, 0xf8, 0xf9, 0xf9,
+ 0xfa, 0xfa, 0xfb, 0xfb };
+VECT_VAR_DECL(expected,int,16,4) [] = { 0xfff8, 0xfff8, 0xfff9, 0xfff9 };
+VECT_VAR_DECL(expected,int,32,2) [] = { 0xfffffffc, 0xfffffffc };
+VECT_VAR_DECL(expected,uint,8,8) [] = { 0xfc, 0xfc, 0xfc, 0xfc,
+ 0xfd, 0xfd, 0xfd, 0xfd };
+VECT_VAR_DECL(expected,uint,16,4) [] = { 0xfffe, 0xfffe, 0xfffe, 0xfffe };
+VECT_VAR_DECL(expected,uint,32,2) [] = { 0xfffffffe, 0xfffffffe };
+
+#define TEST_MSG "VSHRN_N"
+void exec_vshrn_n (void)
+{
+ /* Basic test: y=vshrn_n(x,v), then store the result. */
+#define TEST_VSHRN_N(T1, T2, W, W2, N, V) \
+ VECT_VAR(vector_res, T1, W2, N) = \
+ vshrn_n_##T2##W(VECT_VAR(vector, T1, W, N), \
+ V); \
+ vst1_##T2##W2(VECT_VAR(result, T1, W2, N), VECT_VAR(vector_res, T1, W2, N))
+
+ /* vector is twice as large as vector_res. */
+ DECL_VARIABLE(vector, int, 16, 8);
+ DECL_VARIABLE(vector, int, 32, 4);
+ DECL_VARIABLE(vector, int, 64, 2);
+ DECL_VARIABLE(vector, uint, 16, 8);
+ DECL_VARIABLE(vector, uint, 32, 4);
+ DECL_VARIABLE(vector, uint, 64, 2);
+
+ DECL_VARIABLE(vector_res, int, 8, 8);
+ DECL_VARIABLE(vector_res, int, 16, 4);
+ DECL_VARIABLE(vector_res, int, 32, 2);
+ DECL_VARIABLE(vector_res, uint, 8, 8);
+ DECL_VARIABLE(vector_res, uint, 16, 4);
+ DECL_VARIABLE(vector_res, uint, 32, 2);
+
+ clean_results ();
+
+ VLOAD(vector, buffer, q, int, s, 16, 8);
+ VLOAD(vector, buffer, q, int, s, 32, 4);
+ VLOAD(vector, buffer, q, int, s, 64, 2);
+ VLOAD(vector, buffer, q, uint, u, 16, 8);
+ VLOAD(vector, buffer, q, uint, u, 32, 4);
+ VLOAD(vector, buffer, q, uint, u, 64, 2);
+
+ /* Choose shift amount arbitrarily. */
+ TEST_VSHRN_N(int, s, 16, 8, 8, 1);
+ TEST_VSHRN_N(int, s, 32, 16, 4, 1);
+ TEST_VSHRN_N(int, s, 64, 32, 2, 2);
+ TEST_VSHRN_N(uint, u, 16, 8, 8, 2);
+ TEST_VSHRN_N(uint, u, 32, 16, 4, 3);
+ TEST_VSHRN_N(uint, u, 64, 32, 2, 3);
+
+#define CMT ""
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected, CMT);
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected, CMT);
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected, CMT);
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected, CMT);
+}
+
+int main (void)
+{
+ exec_vshrn_n ();
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vsra_n.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vsra_n.c
new file mode 100644
index 00000000000..3c00497d18d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vsra_n.c
@@ -0,0 +1,117 @@
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+/* Expected results. */
+VECT_VAR_DECL(expected,int,8,8) [] = { 0xf8, 0xf9, 0xfa, 0xfb,
+ 0xfc, 0xfd, 0xfe, 0xff };
+VECT_VAR_DECL(expected,int,16,4) [] = { 0xfff0, 0xfff1, 0xfff2, 0xfff3 };
+VECT_VAR_DECL(expected,int,32,2) [] = { 0xfffffffc, 0xfffffffd };
+VECT_VAR_DECL(expected,int,64,1) [] = { 0xfffffffffffffff0 };
+VECT_VAR_DECL(expected,uint,8,8) [] = { 0x5, 0x6, 0x7, 0x8,
+ 0x9, 0xa, 0xb, 0xc };
+VECT_VAR_DECL(expected,uint,16,4) [] = { 0xfffc, 0xfffd, 0xfffe, 0xffff };
+VECT_VAR_DECL(expected,uint,32,2) [] = { 0xfffffff3, 0xfffffff4 };
+VECT_VAR_DECL(expected,uint,64,1) [] = { 0xfffffffffffffff0 };
+VECT_VAR_DECL(expected,int,8,16) [] = { 0xf8, 0xf9, 0xfa, 0xfb,
+ 0xfc, 0xfd, 0xfe, 0xff,
+ 0x0, 0x1, 0x2, 0x3,
+ 0x4, 0x5, 0x6, 0x7 };
+VECT_VAR_DECL(expected,int,16,8) [] = { 0xfff0, 0xfff1, 0xfff2, 0xfff3,
+ 0xfff4, 0xfff5, 0xfff6, 0xfff7 };
+VECT_VAR_DECL(expected,int,32,4) [] = { 0xfffffffc, 0xfffffffd,
+ 0xfffffffe, 0xffffffff };
+VECT_VAR_DECL(expected,int,64,2) [] = { 0xfffffffffffffff0,
+ 0xfffffffffffffff1 };
+VECT_VAR_DECL(expected,uint,8,16) [] = { 0x5, 0x6, 0x7, 0x8,
+ 0x9, 0xa, 0xb, 0xc,
+ 0xd, 0xe, 0xf, 0x10,
+ 0x11, 0x12, 0x13, 0x14 };
+VECT_VAR_DECL(expected,uint,16,8) [] = { 0xfffc, 0xfffd, 0xfffe, 0xffff,
+ 0x0, 0x1, 0x2, 0x3 };
+VECT_VAR_DECL(expected,uint,32,4) [] = { 0xfffffff3, 0xfffffff4,
+ 0xfffffff5, 0xfffffff6 };
+VECT_VAR_DECL(expected,uint,64,2) [] = { 0xfffffffffffffff0,
+ 0xfffffffffffffff1 };
+
+#define TEST_MSG "VSRA_N"
+void exec_vsra_n (void)
+{
+ /* Basic test: y=vsra_n(x,v), then store the result. */
+#define TEST_VSRA_N(Q, T1, T2, W, N, V) \
+ VECT_VAR(vector_res, T1, W, N) = \
+ vsra##Q##_n_##T2##W(VECT_VAR(vector, T1, W, N), \
+ VECT_VAR(vector2, T1, W, N), \
+ V); \
+ vst1##Q##_##T2##W(VECT_VAR(result, T1, W, N), VECT_VAR(vector_res, T1, W, N))
+
+ DECL_VARIABLE_ALL_VARIANTS(vector);
+ DECL_VARIABLE_ALL_VARIANTS(vector2);
+ DECL_VARIABLE_ALL_VARIANTS(vector_res);
+
+ clean_results ();
+
+ /* Initialize input "vector" from "buffer". */
+ TEST_MACRO_ALL_VARIANTS_2_5(VLOAD, vector, buffer);
+
+ /* Choose arbitrary initialization values. */
+ VDUP(vector2, , int, s, 8, 8, 0x11);
+ VDUP(vector2, , int, s, 16, 4, 0x22);
+ VDUP(vector2, , int, s, 32, 2, 0x33);
+ VDUP(vector2, , int, s, 64, 1, 0x44);
+ VDUP(vector2, , uint, u, 8, 8, 0x55);
+ VDUP(vector2, , uint, u, 16, 4, 0x66);
+ VDUP(vector2, , uint, u, 32, 2, 0x77);
+ VDUP(vector2, , uint, u, 64, 1, 0x88);
+
+ VDUP(vector2, q, int, s, 8, 16, 0x11);
+ VDUP(vector2, q, int, s, 16, 8, 0x22);
+ VDUP(vector2, q, int, s, 32, 4, 0x33);
+ VDUP(vector2, q, int, s, 64, 2, 0x44);
+ VDUP(vector2, q, uint, u, 8, 16, 0x55);
+ VDUP(vector2, q, uint, u, 16, 8, 0x66);
+ VDUP(vector2, q, uint, u, 32, 4, 0x77);
+ VDUP(vector2, q, uint, u, 64, 2, 0x88);
+
+ /* Choose shift amount arbitrarily. */
+ TEST_VSRA_N(, int, s, 8, 8, 1);
+ TEST_VSRA_N(, int, s, 16, 4, 12);
+ TEST_VSRA_N(, int, s, 32, 2, 2);
+ TEST_VSRA_N(, int, s, 64, 1, 32);
+ TEST_VSRA_N(, uint, u, 8, 8, 2);
+ TEST_VSRA_N(, uint, u, 16, 4, 3);
+ TEST_VSRA_N(, uint, u, 32, 2, 5);
+ TEST_VSRA_N(, uint, u, 64, 1, 33);
+
+ TEST_VSRA_N(q, int, s, 8, 16, 1);
+ TEST_VSRA_N(q, int, s, 16, 8, 12);
+ TEST_VSRA_N(q, int, s, 32, 4, 2);
+ TEST_VSRA_N(q, int, s, 64, 2, 32);
+ TEST_VSRA_N(q, uint, u, 8, 16, 2);
+ TEST_VSRA_N(q, uint, u, 16, 8, 3);
+ TEST_VSRA_N(q, uint, u, 32, 4, 5);
+ TEST_VSRA_N(q, uint, u, 64, 2, 33);
+
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected, "");
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected, "");
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected, "");
+ CHECK(TEST_MSG, int, 64, 1, PRIx64, expected, "");
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected, "");
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected, "");
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected, "");
+ CHECK(TEST_MSG, uint, 64, 1, PRIx64, expected, "");
+ CHECK(TEST_MSG, int, 8, 16, PRIx8, expected, "");
+ CHECK(TEST_MSG, int, 16, 8, PRIx16, expected, "");
+ CHECK(TEST_MSG, int, 32, 4, PRIx32, expected, "");
+ CHECK(TEST_MSG, int, 64, 2, PRIx64, expected, "");
+ CHECK(TEST_MSG, uint, 8, 16, PRIx8, expected, "");
+ CHECK(TEST_MSG, uint, 16, 8, PRIx16, expected, "");
+ CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected, "");
+ CHECK(TEST_MSG, uint, 64, 2, PRIx64, expected, "");
+}
+
+int main (void)
+{
+ exec_vsra_n ();
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vst1_lane.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vst1_lane.c
new file mode 100644
index 00000000000..08583b88cf3
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vst1_lane.c
@@ -0,0 +1,93 @@
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+/* Expected results. */
+VECT_VAR_DECL(expected,int,8,8) [] = { 0xf7, 0x33, 0x33, 0x33,
+ 0x33, 0x33, 0x33, 0x33 };
+VECT_VAR_DECL(expected,int,16,4) [] = { 0xfff3, 0x3333, 0x3333, 0x3333 };
+VECT_VAR_DECL(expected,int,32,2) [] = { 0xfffffff1, 0x33333333 };
+VECT_VAR_DECL(expected,int,64,1) [] = { 0xfffffffffffffff0 };
+VECT_VAR_DECL(expected,uint,8,8) [] = { 0xf6, 0x33, 0x33, 0x33,
+ 0x33, 0x33, 0x33, 0x33 };
+VECT_VAR_DECL(expected,uint,16,4) [] = { 0xfff2, 0x3333, 0x3333, 0x3333 };
+VECT_VAR_DECL(expected,uint,32,2) [] = { 0xfffffff0, 0x33333333 };
+VECT_VAR_DECL(expected,uint,64,1) [] = { 0xfffffffffffffff0 };
+VECT_VAR_DECL(expected,poly,8,8) [] = { 0xf6, 0x33, 0x33, 0x33,
+ 0x33, 0x33, 0x33, 0x33 };
+VECT_VAR_DECL(expected,poly,16,4) [] = { 0xfff2, 0x3333, 0x3333, 0x3333 };
+VECT_VAR_DECL(expected,hfloat,32,2) [] = { 0xc1700000, 0x33333333 };
+VECT_VAR_DECL(expected,int,8,16) [] = { 0xff, 0x33, 0x33, 0x33,
+ 0x33, 0x33, 0x33, 0x33,
+ 0x33, 0x33, 0x33, 0x33,
+ 0x33, 0x33, 0x33, 0x33 };
+VECT_VAR_DECL(expected,int,16,8) [] = { 0xfff5, 0x3333, 0x3333, 0x3333,
+ 0x3333, 0x3333, 0x3333, 0x3333 };
+VECT_VAR_DECL(expected,int,32,4) [] = { 0xfffffff1, 0x33333333,
+ 0x33333333, 0x33333333 };
+VECT_VAR_DECL(expected,int,64,2) [] = { 0xfffffffffffffff1, 0x3333333333333333 };
+VECT_VAR_DECL(expected,uint,8,16) [] = { 0xfa, 0x33, 0x33, 0x33,
+ 0x33, 0x33, 0x33, 0x33,
+ 0x33, 0x33, 0x33, 0x33,
+ 0x33, 0x33, 0x33, 0x33 };
+VECT_VAR_DECL(expected,uint,16,8) [] = { 0xfff4, 0x3333, 0x3333, 0x3333,
+ 0x3333, 0x3333, 0x3333, 0x3333 };
+VECT_VAR_DECL(expected,uint,32,4) [] = { 0xfffffff3, 0x33333333,
+ 0x33333333, 0x33333333 };
+VECT_VAR_DECL(expected,uint,64,2) [] = { 0xfffffffffffffff0,
+ 0x3333333333333333 };
+VECT_VAR_DECL(expected,poly,8,16) [] = { 0xfa, 0x33, 0x33, 0x33,
+ 0x33, 0x33, 0x33, 0x33,
+ 0x33, 0x33, 0x33, 0x33,
+ 0x33, 0x33, 0x33, 0x33 };
+VECT_VAR_DECL(expected,poly,16,8) [] = { 0xfff4, 0x3333, 0x3333, 0x3333,
+ 0x3333, 0x3333, 0x3333, 0x3333 };
+VECT_VAR_DECL(expected,hfloat,32,4) [] = { 0xc1700000, 0x33333333,
+ 0x33333333, 0x33333333 };
+
+#define TEST_MSG "VST1_LANE/VST1_LANEQ"
+void exec_vst1_lane (void)
+{
+#define TEST_VST1_LANE(Q, T1, T2, W, N, L) \
+ VECT_VAR(vector, T1, W, N) = \
+ vld1##Q##_##T2##W(VECT_VAR(buffer, T1, W, N)); \
+ vst1##Q##_lane_##T2##W(VECT_VAR(result, T1, W, N), \
+ VECT_VAR(vector, T1, W, N), L)
+
+ DECL_VARIABLE_ALL_VARIANTS(vector);
+
+ clean_results ();
+
+ /* Choose lane arbitrarily. */
+ TEST_VST1_LANE(, int, s, 8, 8, 7);
+ TEST_VST1_LANE(, int, s, 16, 4, 3);
+ TEST_VST1_LANE(, int, s, 32, 2, 1);
+ TEST_VST1_LANE(, int, s, 64, 1, 0);
+ TEST_VST1_LANE(, uint, u, 8, 8, 6);
+ TEST_VST1_LANE(, uint, u, 16, 4, 2);
+ TEST_VST1_LANE(, uint, u, 32, 2, 0);
+ TEST_VST1_LANE(, uint, u, 64, 1, 0);
+ TEST_VST1_LANE(, poly, p, 8, 8, 6);
+ TEST_VST1_LANE(, poly, p, 16, 4, 2);
+ TEST_VST1_LANE(, float, f, 32, 2, 1);
+
+ TEST_VST1_LANE(q, int, s, 8, 16, 15);
+ TEST_VST1_LANE(q, int, s, 16, 8, 5);
+ TEST_VST1_LANE(q, int, s, 32, 4, 1);
+ TEST_VST1_LANE(q, int, s, 64, 2, 1);
+ TEST_VST1_LANE(q, uint, u, 8, 16, 10);
+ TEST_VST1_LANE(q, uint, u, 16, 8, 4);
+ TEST_VST1_LANE(q, uint, u, 32, 4, 3);
+ TEST_VST1_LANE(q, uint, u, 64, 2, 0);
+ TEST_VST1_LANE(q, poly, p, 8, 16, 10);
+ TEST_VST1_LANE(q, poly, p, 16, 8, 4);
+ TEST_VST1_LANE(q, float, f, 32, 4, 1);
+
+ CHECK_RESULTS(TEST_MSG, "");
+}
+
+int main (void)
+{
+ exec_vst1_lane ();
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vstX_lane.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vstX_lane.c
new file mode 100644
index 00000000000..680e5bdd5f7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vstX_lane.c
@@ -0,0 +1,578 @@
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+/* Expected results for vst2, chunk 0. */
+VECT_VAR_DECL(expected_st2_0,int,8,8) [] = { 0xf0, 0xf1, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st2_0,int,16,4) [] = { 0xfff0, 0xfff1, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st2_0,int,32,2) [] = { 0xfffffff0, 0xfffffff1 };
+VECT_VAR_DECL(expected_st2_0,uint,8,8) [] = { 0xf0, 0xf1, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st2_0,uint,16,4) [] = { 0xfff0, 0xfff1, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st2_0,uint,32,2) [] = { 0xfffffff0, 0xfffffff1 };
+VECT_VAR_DECL(expected_st2_0,poly,8,8) [] = { 0xf0, 0xf1, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st2_0,poly,16,4) [] = { 0xfff0, 0xfff1, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st2_0,hfloat,32,2) [] = { 0xc1800000, 0xc1700000 };
+VECT_VAR_DECL(expected_st2_0,int,16,8) [] = { 0xfff0, 0xfff1, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st2_0,int,32,4) [] = { 0xfffffff0, 0xfffffff1, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st2_0,uint,16,8) [] = { 0xfff0, 0xfff1, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st2_0,uint,32,4) [] = { 0xfffffff0, 0xfffffff1,
+ 0x0, 0x0 };
+VECT_VAR_DECL(expected_st2_0,poly,16,8) [] = { 0xfff0, 0xfff1, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st2_0,hfloat,32,4) [] = { 0xc1800000, 0xc1700000,
+ 0x0, 0x0 };
+
+/* Expected results for vst2, chunk 1. */
+VECT_VAR_DECL(expected_st2_1,int,8,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st2_1,int,16,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st2_1,int,32,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_st2_1,uint,8,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st2_1,uint,16,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st2_1,uint,32,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_st2_1,poly,8,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st2_1,poly,16,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st2_1,hfloat,32,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_st2_1,int,16,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st2_1,int,32,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st2_1,uint,16,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st2_1,uint,32,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st2_1,poly,16,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st2_1,hfloat,32,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+
+/* Expected results for vst3, chunk 0. */
+VECT_VAR_DECL(expected_st3_0,int,8,8) [] = { 0xf0, 0xf1, 0xf2, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st3_0,int,16,4) [] = { 0xfff0, 0xfff1, 0xfff2, 0x0 };
+VECT_VAR_DECL(expected_st3_0,int,32,2) [] = { 0xfffffff0, 0xfffffff1 };
+VECT_VAR_DECL(expected_st3_0,uint,8,8) [] = { 0xf0, 0xf1, 0xf2, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st3_0,uint,16,4) [] = { 0xfff0, 0xfff1, 0xfff2, 0x0 };
+VECT_VAR_DECL(expected_st3_0,uint,32,2) [] = { 0xfffffff0, 0xfffffff1 };
+VECT_VAR_DECL(expected_st3_0,poly,8,8) [] = { 0xf0, 0xf1, 0xf2, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st3_0,poly,16,4) [] = { 0xfff0, 0xfff1, 0xfff2, 0x0 };
+VECT_VAR_DECL(expected_st3_0,hfloat,32,2) [] = { 0xc1800000, 0xc1700000 };
+VECT_VAR_DECL(expected_st3_0,int,16,8) [] = { 0xfff0, 0xfff1, 0xfff2, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st3_0,int,32,4) [] = { 0xfffffff0, 0xfffffff1,
+ 0xfffffff2, 0x0 };
+VECT_VAR_DECL(expected_st3_0,uint,16,8) [] = { 0xfff0, 0xfff1, 0xfff2, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st3_0,uint,32,4) [] = { 0xfffffff0, 0xfffffff1,
+ 0xfffffff2, 0x0 };
+VECT_VAR_DECL(expected_st3_0,poly,16,8) [] = { 0xfff0, 0xfff1, 0xfff2, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st3_0,hfloat,32,4) [] = { 0xc1800000, 0xc1700000,
+ 0xc1600000, 0x0 };
+
+/* Expected results for vst3, chunk 1. */
+VECT_VAR_DECL(expected_st3_1,int,8,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st3_1,int,16,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st3_1,int,32,2) [] = { 0xfffffff2, 0x0 };
+VECT_VAR_DECL(expected_st3_1,uint,8,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st3_1,uint,16,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st3_1,uint,32,2) [] = { 0xfffffff2, 0x0 };
+VECT_VAR_DECL(expected_st3_1,poly,8,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st3_1,poly,16,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st3_1,hfloat,32,2) [] = { 0xc1600000, 0x0 };
+VECT_VAR_DECL(expected_st3_1,int,16,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st3_1,int,32,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st3_1,uint,16,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st3_1,uint,32,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st3_1,poly,16,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st3_1,hfloat,32,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+
+/* Expected results for vst3, chunk 2. */
+VECT_VAR_DECL(expected_st3_2,int,8,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st3_2,int,16,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st3_2,int,32,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_st3_2,uint,8,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st3_2,uint,16,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st3_2,uint,32,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_st3_2,poly,8,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st3_2,poly,16,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st3_2,hfloat,32,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_st3_2,int,16,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st3_2,int,32,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st3_2,uint,16,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st3_2,uint,32,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st3_2,poly,16,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st3_2,hfloat,32,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+
+/* Expected results for vst4, chunk 0. */
+VECT_VAR_DECL(expected_st4_0,int,8,8) [] = { 0xf0, 0xf1, 0xf2, 0xf3,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_0,int,16,4) [] = { 0xfff0, 0xfff1, 0xfff2, 0xfff3 };
+VECT_VAR_DECL(expected_st4_0,int,32,2) [] = { 0xfffffff0, 0xfffffff1 };
+VECT_VAR_DECL(expected_st4_0,uint,8,8) [] = { 0xf0, 0xf1, 0xf2, 0xf3,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_0,uint,16,4) [] = { 0xfff0, 0xfff1, 0xfff2, 0xfff3 };
+VECT_VAR_DECL(expected_st4_0,uint,32,2) [] = { 0xfffffff0, 0xfffffff1 };
+VECT_VAR_DECL(expected_st4_0,poly,8,8) [] = { 0xf0, 0xf1, 0xf2, 0xf3,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_0,poly,16,4) [] = { 0xfff0, 0xfff1, 0xfff2, 0xfff3 };
+VECT_VAR_DECL(expected_st4_0,hfloat,32,2) [] = { 0xc1800000, 0xc1700000 };
+VECT_VAR_DECL(expected_st4_0,int,16,8) [] = { 0xfff0, 0xfff1, 0xfff2, 0xfff3,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_0,int,32,4) [] = { 0xfffffff0, 0xfffffff1,
+ 0xfffffff2, 0xfffffff3 };
+VECT_VAR_DECL(expected_st4_0,uint,16,8) [] = { 0xfff0, 0xfff1, 0xfff2, 0xfff3,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_0,uint,32,4) [] = { 0xfffffff0, 0xfffffff1,
+ 0xfffffff2, 0xfffffff3 };
+VECT_VAR_DECL(expected_st4_0,poly,16,8) [] = { 0xfff0, 0xfff1, 0xfff2, 0xfff3,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_0,hfloat,32,4) [] = { 0xc1800000, 0xc1700000,
+ 0xc1600000, 0xc1500000 };
+
+/* Expected results for vst4, chunk 1. */
+VECT_VAR_DECL(expected_st4_1,int,8,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_1,int,16,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_1,int,32,2) [] = { 0xfffffff2, 0xfffffff3 };
+VECT_VAR_DECL(expected_st4_1,uint,8,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_1,uint,16,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_1,uint,32,2) [] = { 0xfffffff2, 0xfffffff3 };
+VECT_VAR_DECL(expected_st4_1,poly,8,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_1,poly,16,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_1,hfloat,32,2) [] = { 0xc1600000, 0xc1500000 };
+VECT_VAR_DECL(expected_st4_1,int,16,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_1,int,32,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_1,uint,16,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_1,uint,32,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_1,poly,16,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_1,hfloat,32,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+
+/* Expected results for vst4, chunk 2. */
+VECT_VAR_DECL(expected_st4_2,int,8,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_2,int,16,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_2,int,32,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_2,uint,8,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_2,uint,16,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_2,uint,32,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_2,poly,8,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_2,poly,16,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_2,hfloat,32,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_2,int,16,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_2,int,32,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_2,uint,16,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_2,uint,32,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_2,poly,16,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_2,hfloat,32,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+
+/* Expected results for vst4, chunk 3. */
+VECT_VAR_DECL(expected_st4_3,int,8,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_3,int,16,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_3,int,32,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_3,uint,8,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_3,uint,16,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_3,uint,32,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_3,poly,8,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_3,poly,16,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_3,hfloat,32,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_3,int,16,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_3,int,32,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_3,uint,16,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_3,uint,32,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_3,poly,16,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_3,hfloat,32,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+
+/* Declare additional input buffers as needed. */
+/* Input buffers for vld2_lane. */
+VECT_VAR_DECL_INIT(buffer_vld2_lane, int, 8, 2);
+VECT_VAR_DECL_INIT(buffer_vld2_lane, int, 16, 2);
+VECT_VAR_DECL_INIT(buffer_vld2_lane, int, 32, 2);
+VECT_VAR_DECL_INIT(buffer_vld2_lane, int, 64, 2);
+VECT_VAR_DECL_INIT(buffer_vld2_lane, uint, 8, 2);
+VECT_VAR_DECL_INIT(buffer_vld2_lane, uint, 16, 2);
+VECT_VAR_DECL_INIT(buffer_vld2_lane, uint, 32, 2);
+VECT_VAR_DECL_INIT(buffer_vld2_lane, uint, 64, 2);
+VECT_VAR_DECL_INIT(buffer_vld2_lane, poly, 8, 2);
+VECT_VAR_DECL_INIT(buffer_vld2_lane, poly, 16, 2);
+VECT_VAR_DECL_INIT(buffer_vld2_lane, float, 32, 2);
+
+/* Input buffers for vld3_lane. */
+VECT_VAR_DECL_INIT(buffer_vld3_lane, int, 8, 3);
+VECT_VAR_DECL_INIT(buffer_vld3_lane, int, 16, 3);
+VECT_VAR_DECL_INIT(buffer_vld3_lane, int, 32, 3);
+VECT_VAR_DECL_INIT(buffer_vld3_lane, int, 64, 3);
+VECT_VAR_DECL_INIT(buffer_vld3_lane, uint, 8, 3);
+VECT_VAR_DECL_INIT(buffer_vld3_lane, uint, 16, 3);
+VECT_VAR_DECL_INIT(buffer_vld3_lane, uint, 32, 3);
+VECT_VAR_DECL_INIT(buffer_vld3_lane, uint, 64, 3);
+VECT_VAR_DECL_INIT(buffer_vld3_lane, poly, 8, 3);
+VECT_VAR_DECL_INIT(buffer_vld3_lane, poly, 16, 3);
+VECT_VAR_DECL_INIT(buffer_vld3_lane, float, 32, 3);
+
+/* Input buffers for vld4_lane. */
+VECT_VAR_DECL_INIT(buffer_vld4_lane, int, 8, 4);
+VECT_VAR_DECL_INIT(buffer_vld4_lane, int, 16, 4);
+VECT_VAR_DECL_INIT(buffer_vld4_lane, int, 32, 4);
+VECT_VAR_DECL_INIT(buffer_vld4_lane, int, 64, 4);
+VECT_VAR_DECL_INIT(buffer_vld4_lane, uint, 8, 4);
+VECT_VAR_DECL_INIT(buffer_vld4_lane, uint, 16, 4);
+VECT_VAR_DECL_INIT(buffer_vld4_lane, uint, 32, 4);
+VECT_VAR_DECL_INIT(buffer_vld4_lane, uint, 64, 4);
+VECT_VAR_DECL_INIT(buffer_vld4_lane, poly, 8, 4);
+VECT_VAR_DECL_INIT(buffer_vld4_lane, poly, 16, 4);
+VECT_VAR_DECL_INIT(buffer_vld4_lane, float, 32, 4);
+
+void exec_vstX_lane (void)
+{
+ /* In this case, input variables are arrays of vectors. */
+#define DECL_VSTX_LANE(T1, W, N, X) \
+ VECT_ARRAY_TYPE(T1, W, N, X) VECT_ARRAY_VAR(vector, T1, W, N, X); \
+ VECT_ARRAY_TYPE(T1, W, N, X) VECT_ARRAY_VAR(vector_src, T1, W, N, X); \
+ VECT_VAR_DECL(result_bis_##X, T1, W, N)[X * N]
+
+ /* We need to use a temporary result buffer (result_bis), because
+ the one used for other tests is not large enough. A subset of the
+ result data is moved from result_bis to result, and it is this
+ subset which is used to check the actual behaviour. The next
+ macro enables to move another chunk of data from result_bis to
+ result. */
+ /* We also use another extra input buffer (buffer_src), which we
+ fill with 0xAA, and which it used to load a vector from which we
+ read a given lane. */
+#define TEST_VSTX_LANE(Q, T1, T2, W, N, X, L) \
+ memset (VECT_VAR(buffer_src, T1, W, N), 0xAA, \
+ sizeof(VECT_VAR(buffer_src, T1, W, N))); \
+ memset (VECT_VAR(result_bis_##X, T1, W, N), 0, \
+ sizeof(VECT_VAR(result_bis_##X, T1, W, N))); \
+ \
+ VECT_ARRAY_VAR(vector_src, T1, W, N, X) = \
+ vld##X##Q##_##T2##W(VECT_VAR(buffer_src, T1, W, N)); \
+ \
+ VECT_ARRAY_VAR(vector, T1, W, N, X) = \
+ /* Use dedicated init buffer, of size X. */ \
+ vld##X##Q##_lane_##T2##W(VECT_VAR(buffer_vld##X##_lane, T1, W, X), \
+ VECT_ARRAY_VAR(vector_src, T1, W, N, X), \
+ L); \
+ vst##X##Q##_lane_##T2##W(VECT_VAR(result_bis_##X, T1, W, N), \
+ VECT_ARRAY_VAR(vector, T1, W, N, X), \
+ L); \
+ memcpy(VECT_VAR(result, T1, W, N), VECT_VAR(result_bis_##X, T1, W, N), \
+ sizeof(VECT_VAR(result, T1, W, N)));
+
+ /* Overwrite "result" with the contents of "result_bis"[Y]. */
+#define TEST_EXTRA_CHUNK(T1, W, N, X, Y) \
+ memcpy(VECT_VAR(result, T1, W, N), \
+ &(VECT_VAR(result_bis_##X, T1, W, N)[Y*N]), \
+ sizeof(VECT_VAR(result, T1, W, N)));
+
+ /* We need all variants in 64 bits, but there is no 64x2 variant,
+ nor 128 bits vectors of int8/uint8/poly8. */
+#define DECL_ALL_VSTX_LANE(X) \
+ DECL_VSTX_LANE(int, 8, 8, X); \
+ DECL_VSTX_LANE(int, 16, 4, X); \
+ DECL_VSTX_LANE(int, 32, 2, X); \
+ DECL_VSTX_LANE(uint, 8, 8, X); \
+ DECL_VSTX_LANE(uint, 16, 4, X); \
+ DECL_VSTX_LANE(uint, 32, 2, X); \
+ DECL_VSTX_LANE(poly, 8, 8, X); \
+ DECL_VSTX_LANE(poly, 16, 4, X); \
+ DECL_VSTX_LANE(float, 32, 2, X); \
+ DECL_VSTX_LANE(int, 16, 8, X); \
+ DECL_VSTX_LANE(int, 32, 4, X); \
+ DECL_VSTX_LANE(uint, 16, 8, X); \
+ DECL_VSTX_LANE(uint, 32, 4, X); \
+ DECL_VSTX_LANE(poly, 16, 8, X); \
+ DECL_VSTX_LANE(float, 32, 4, X)
+
+#define DUMMY_ARRAY(V, T, W, N, L) VECT_VAR_DECL(V,T,W,N)[N*L]
+
+ /* Use the same lanes regardless of the size of the array (X), for
+ simplicity. */
+#define TEST_ALL_VSTX_LANE(X) \
+ TEST_VSTX_LANE(, int, s, 8, 8, X, 7); \
+ TEST_VSTX_LANE(, int, s, 16, 4, X, 2); \
+ TEST_VSTX_LANE(, int, s, 32, 2, X, 0); \
+ TEST_VSTX_LANE(, float, f, 32, 2, X, 0); \
+ TEST_VSTX_LANE(, uint, u, 8, 8, X, 4); \
+ TEST_VSTX_LANE(, uint, u, 16, 4, X, 3); \
+ TEST_VSTX_LANE(, uint, u, 32, 2, X, 1); \
+ TEST_VSTX_LANE(, poly, p, 8, 8, X, 4); \
+ TEST_VSTX_LANE(, poly, p, 16, 4, X, 3); \
+ TEST_VSTX_LANE(q, int, s, 16, 8, X, 6); \
+ TEST_VSTX_LANE(q, int, s, 32, 4, X, 2); \
+ TEST_VSTX_LANE(q, uint, u, 16, 8, X, 5); \
+ TEST_VSTX_LANE(q, uint, u, 32, 4, X, 0); \
+ TEST_VSTX_LANE(q, poly, p, 16, 8, X, 5); \
+ TEST_VSTX_LANE(q, float, f, 32, 4, X, 2)
+
+#define TEST_ALL_EXTRA_CHUNKS(X, Y) \
+ TEST_EXTRA_CHUNK(int, 8, 8, X, Y); \
+ TEST_EXTRA_CHUNK(int, 16, 4, X, Y); \
+ TEST_EXTRA_CHUNK(int, 32, 2, X, Y); \
+ TEST_EXTRA_CHUNK(uint, 8, 8, X, Y); \
+ TEST_EXTRA_CHUNK(uint, 16, 4, X, Y); \
+ TEST_EXTRA_CHUNK(uint, 32, 2, X, Y); \
+ TEST_EXTRA_CHUNK(poly, 8, 8, X, Y); \
+ TEST_EXTRA_CHUNK(poly, 16, 4, X, Y); \
+ TEST_EXTRA_CHUNK(float, 32, 2, X, Y); \
+ TEST_EXTRA_CHUNK(int, 16, 8, X, Y); \
+ TEST_EXTRA_CHUNK(int, 32, 4, X, Y); \
+ TEST_EXTRA_CHUNK(uint, 16, 8, X, Y); \
+ TEST_EXTRA_CHUNK(uint, 32, 4, X, Y); \
+ TEST_EXTRA_CHUNK(poly, 16, 8, X, Y); \
+ TEST_EXTRA_CHUNK(float, 32, 4, X, Y)
+
+ /* Declare the temporary buffers / variables. */
+ DECL_ALL_VSTX_LANE(2);
+ DECL_ALL_VSTX_LANE(3);
+ DECL_ALL_VSTX_LANE(4);
+
+ /* Define dummy input arrays, large enough for x4 vectors. */
+ DUMMY_ARRAY(buffer_src, int, 8, 8, 4);
+ DUMMY_ARRAY(buffer_src, int, 16, 4, 4);
+ DUMMY_ARRAY(buffer_src, int, 32, 2, 4);
+ DUMMY_ARRAY(buffer_src, uint, 8, 8, 4);
+ DUMMY_ARRAY(buffer_src, uint, 16, 4, 4);
+ DUMMY_ARRAY(buffer_src, uint, 32, 2, 4);
+ DUMMY_ARRAY(buffer_src, poly, 8, 8, 4);
+ DUMMY_ARRAY(buffer_src, poly, 16, 4, 4);
+ DUMMY_ARRAY(buffer_src, float, 32, 2, 4);
+ DUMMY_ARRAY(buffer_src, int, 16, 8, 4);
+ DUMMY_ARRAY(buffer_src, int, 32, 4, 4);
+ DUMMY_ARRAY(buffer_src, uint, 16, 8, 4);
+ DUMMY_ARRAY(buffer_src, uint, 32, 4, 4);
+ DUMMY_ARRAY(buffer_src, poly, 16, 8, 4);
+ DUMMY_ARRAY(buffer_src, float, 32, 4, 4);
+
+ /* Check vst2_lane/vst2q_lane. */
+ clean_results ();
+#define TEST_MSG "VST2_LANE/VST2Q_LANE"
+ TEST_ALL_VSTX_LANE(2);
+
+#define CMT " (chunk 0)"
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_st2_0, CMT);
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_st2_0, CMT);
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected_st2_0, CMT);
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_st2_0, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_st2_0, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_st2_0, CMT);
+ CHECK(TEST_MSG, poly, 8, 8, PRIx8, expected_st2_0, CMT);
+ CHECK(TEST_MSG, poly, 16, 4, PRIx16, expected_st2_0, CMT);
+ CHECK_FP(TEST_MSG, float, 32, 2, PRIx32, expected_st2_0, CMT);
+ CHECK(TEST_MSG, int, 16, 8, PRIx16, expected_st2_0, CMT);
+ CHECK(TEST_MSG, int, 32, 4, PRIx32, expected_st2_0, CMT);
+ CHECK(TEST_MSG, uint, 16, 8, PRIx16, expected_st2_0, CMT);
+ CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected_st2_0, CMT);
+ CHECK(TEST_MSG, poly, 16, 8, PRIx16, expected_st2_0, CMT);
+ CHECK_FP(TEST_MSG, float, 32, 4, PRIx32, expected_st2_0, CMT);
+
+ TEST_ALL_EXTRA_CHUNKS(2, 1);
+#undef CMT
+#define CMT " chunk 1"
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_st2_1, CMT);
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_st2_1, CMT);
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected_st2_1, CMT);
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_st2_1, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_st2_1, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_st2_1, CMT);
+ CHECK(TEST_MSG, poly, 8, 8, PRIx8, expected_st2_1, CMT);
+ CHECK(TEST_MSG, poly, 16, 4, PRIx16, expected_st2_1, CMT);
+ CHECK_FP(TEST_MSG, float, 32, 2, PRIx32, expected_st2_1, CMT);
+ CHECK(TEST_MSG, int, 16, 8, PRIx16, expected_st2_1, CMT);
+ CHECK(TEST_MSG, int, 32, 4, PRIx32, expected_st2_1, CMT);
+ CHECK(TEST_MSG, uint, 16, 8, PRIx16, expected_st2_1, CMT);
+ CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected_st2_1, CMT);
+ CHECK(TEST_MSG, poly, 16, 8, PRIx16, expected_st2_1, CMT);
+ CHECK_FP(TEST_MSG, float, 32, 4, PRIx32, expected_st2_1, CMT);
+
+
+ /* Check vst3_lane/vst3q_lane. */
+ clean_results ();
+#undef TEST_MSG
+#define TEST_MSG "VST3_LANE/VST3Q_LANE"
+ TEST_ALL_VSTX_LANE(3);
+
+#undef CMT
+#define CMT " (chunk 0)"
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_st3_0, CMT);
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_st3_0, CMT);
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected_st3_0, CMT);
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_st3_0, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_st3_0, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_st3_0, CMT);
+ CHECK(TEST_MSG, poly, 8, 8, PRIx8, expected_st3_0, CMT);
+ CHECK(TEST_MSG, poly, 16, 4, PRIx16, expected_st3_0, CMT);
+ CHECK_FP(TEST_MSG, float, 32, 2, PRIx32, expected_st3_0, CMT);
+ CHECK(TEST_MSG, int, 16, 8, PRIx16, expected_st3_0, CMT);
+ CHECK(TEST_MSG, int, 32, 4, PRIx32, expected_st3_0, CMT);
+ CHECK(TEST_MSG, uint, 16, 8, PRIx16, expected_st3_0, CMT);
+ CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected_st3_0, CMT);
+ CHECK(TEST_MSG, poly, 16, 8, PRIx16, expected_st3_0, CMT);
+ CHECK_FP(TEST_MSG, float, 32, 4, PRIx32, expected_st3_0, CMT);
+
+ TEST_ALL_EXTRA_CHUNKS(3, 1);
+
+#undef CMT
+#define CMT " (chunk 1)"
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_st3_1, CMT);
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_st3_1, CMT);
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected_st3_1, CMT);
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_st3_1, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_st3_1, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_st3_1, CMT);
+ CHECK(TEST_MSG, poly, 8, 8, PRIx8, expected_st3_1, CMT);
+ CHECK(TEST_MSG, poly, 16, 4, PRIx16, expected_st3_1, CMT);
+ CHECK_FP(TEST_MSG, float, 32, 2, PRIx32, expected_st3_1, CMT);
+ CHECK(TEST_MSG, int, 16, 8, PRIx16, expected_st3_1, CMT);
+ CHECK(TEST_MSG, int, 32, 4, PRIx32, expected_st3_1, CMT);
+ CHECK(TEST_MSG, uint, 16, 8, PRIx16, expected_st3_1, CMT);
+ CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected_st3_1, CMT);
+ CHECK(TEST_MSG, poly, 16, 8, PRIx16, expected_st3_1, CMT);
+ CHECK_FP(TEST_MSG, float, 32, 4, PRIx32, expected_st3_1, CMT);
+
+ TEST_ALL_EXTRA_CHUNKS(3, 2);
+
+#undef CMT
+#define CMT " (chunk 2)"
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_st3_2, CMT);
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_st3_2, CMT);
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected_st3_2, CMT);
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_st3_2, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_st3_2, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_st3_2, CMT);
+ CHECK(TEST_MSG, poly, 8, 8, PRIx8, expected_st3_2, CMT);
+ CHECK(TEST_MSG, poly, 16, 4, PRIx16, expected_st3_2, CMT);
+ CHECK_FP(TEST_MSG, float, 32, 2, PRIx32, expected_st3_2, CMT);
+ CHECK(TEST_MSG, int, 16, 8, PRIx16, expected_st3_2, CMT);
+ CHECK(TEST_MSG, int, 32, 4, PRIx32, expected_st3_2, CMT);
+ CHECK(TEST_MSG, uint, 16, 8, PRIx16, expected_st3_2, CMT);
+ CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected_st3_2, CMT);
+ CHECK(TEST_MSG, poly, 16, 8, PRIx16, expected_st3_2, CMT);
+ CHECK_FP(TEST_MSG, float, 32, 4, PRIx32, expected_st3_2, CMT);
+
+
+ /* Check vst4_lane/vst4q_lane. */
+ clean_results ();
+#undef TEST_MSG
+#define TEST_MSG "VST4_LANE/VST4Q_LANE"
+ TEST_ALL_VSTX_LANE(4);
+
+#undef CMT
+#define CMT " (chunk 0)"
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_st4_0, CMT);
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_st4_0, CMT);
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected_st4_0, CMT);
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_st4_0, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_st4_0, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_st4_0, CMT);
+ CHECK(TEST_MSG, poly, 8, 8, PRIx8, expected_st4_0, CMT);
+ CHECK(TEST_MSG, poly, 16, 4, PRIx16, expected_st4_0, CMT);
+ CHECK_FP(TEST_MSG, float, 32, 2, PRIx32, expected_st4_0, CMT);
+ CHECK(TEST_MSG, int, 16, 8, PRIx16, expected_st4_0, CMT);
+ CHECK(TEST_MSG, int, 32, 4, PRIx32, expected_st4_0, CMT);
+ CHECK(TEST_MSG, uint, 16, 8, PRIx16, expected_st4_0, CMT);
+ CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected_st4_0, CMT);
+ CHECK(TEST_MSG, poly, 16, 8, PRIx16, expected_st4_0, CMT);
+ CHECK_FP(TEST_MSG, float, 32, 4, PRIx32, expected_st4_0, CMT);
+
+ TEST_ALL_EXTRA_CHUNKS(4, 1);
+
+#undef CMT
+#define CMT " (chunk 1)"
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_st4_1, CMT);
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_st4_1, CMT);
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected_st4_1, CMT);
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_st4_1, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_st4_1, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_st4_1, CMT);
+ CHECK(TEST_MSG, poly, 8, 8, PRIx8, expected_st4_1, CMT);
+ CHECK(TEST_MSG, poly, 16, 4, PRIx16, expected_st4_1, CMT);
+ CHECK_FP(TEST_MSG, float, 32, 2, PRIx32, expected_st4_1, CMT);
+ CHECK(TEST_MSG, int, 16, 8, PRIx16, expected_st4_1, CMT);
+ CHECK(TEST_MSG, int, 32, 4, PRIx32, expected_st4_1, CMT);
+ CHECK(TEST_MSG, uint, 16, 8, PRIx16, expected_st4_1, CMT);
+ CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected_st4_1, CMT);
+ CHECK(TEST_MSG, poly, 16, 8, PRIx16, expected_st4_1, CMT);
+ CHECK_FP(TEST_MSG, float, 32, 4, PRIx32, expected_st4_1, CMT);
+
+ TEST_ALL_EXTRA_CHUNKS(4, 2);
+
+#undef CMT
+#define CMT " (chunk 2)"
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_st4_2, CMT);
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_st4_2, CMT);
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected_st4_2, CMT);
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_st4_2, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_st4_2, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_st4_2, CMT);
+ CHECK(TEST_MSG, poly, 8, 8, PRIx8, expected_st4_2, CMT);
+ CHECK(TEST_MSG, poly, 16, 4, PRIx16, expected_st4_2, CMT);
+ CHECK_FP(TEST_MSG, float, 32, 2, PRIx32, expected_st4_2, CMT);
+ CHECK(TEST_MSG, int, 16, 8, PRIx16, expected_st4_2, CMT);
+ CHECK(TEST_MSG, int, 32, 4, PRIx32, expected_st4_2, CMT);
+ CHECK(TEST_MSG, uint, 16, 8, PRIx16, expected_st4_2, CMT);
+ CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected_st4_2, CMT);
+ CHECK(TEST_MSG, poly, 16, 8, PRIx16, expected_st4_2, CMT);
+ CHECK_FP(TEST_MSG, float, 32, 4, PRIx32, expected_st4_2, CMT);
+
+ TEST_ALL_EXTRA_CHUNKS(4, 3);
+
+#undef CMT
+#define CMT " (chunk 3)"
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_st4_3, CMT);
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_st4_3, CMT);
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected_st4_3, CMT);
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_st4_3, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_st4_3, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_st4_3, CMT);
+ CHECK(TEST_MSG, poly, 8, 8, PRIx8, expected_st4_3, CMT);
+ CHECK(TEST_MSG, poly, 16, 4, PRIx16, expected_st4_3, CMT);
+ CHECK_FP(TEST_MSG, float, 32, 2, PRIx32, expected_st4_3, CMT);
+ CHECK(TEST_MSG, int, 16, 8, PRIx16, expected_st4_3, CMT);
+ CHECK(TEST_MSG, int, 32, 4, PRIx32, expected_st4_3, CMT);
+ CHECK(TEST_MSG, uint, 16, 8, PRIx16, expected_st4_3, CMT);
+ CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected_st4_3, CMT);
+ CHECK(TEST_MSG, poly, 16, 8, PRIx16, expected_st4_3, CMT);
+ CHECK_FP(TEST_MSG, float, 32, 4, PRIx32, expected_st4_3, CMT);
+}
+
+int main (void)
+{
+ exec_vstX_lane ();
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vtbX.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vtbX.c
new file mode 100644
index 00000000000..0557efd6e7e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vtbX.c
@@ -0,0 +1,289 @@
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+/* Expected results for vtbl1. */
+VECT_VAR_DECL(expected_vtbl1,int,8,8) [] = { 0x0, 0xf2, 0xf2, 0xf2,
+ 0x0, 0x0, 0xf2, 0xf2 };
+VECT_VAR_DECL(expected_vtbl1,uint,8,8) [] = { 0x0, 0xf3, 0xf3, 0xf3,
+ 0x0, 0x0, 0xf3, 0xf3 };
+VECT_VAR_DECL(expected_vtbl1,poly,8,8) [] = { 0x0, 0xf3, 0xf3, 0xf3,
+ 0x0, 0x0, 0xf3, 0xf3 };
+
+/* Expected results for vtbl2. */
+VECT_VAR_DECL(expected_vtbl2,int,8,8) [] = { 0xf6, 0xf3, 0xf3, 0xf3,
+ 0x0, 0x0, 0xf3, 0xf3 };
+VECT_VAR_DECL(expected_vtbl2,uint,8,8) [] = { 0xf6, 0xf5, 0xf5, 0xf5,
+ 0x0, 0x0, 0xf5, 0xf5 };
+VECT_VAR_DECL(expected_vtbl2,poly,8,8) [] = { 0xf6, 0xf5, 0xf5, 0xf5,
+ 0x0, 0x0, 0xf5, 0xf5 };
+
+/* Expected results for vtbl3. */
+VECT_VAR_DECL(expected_vtbl3,int,8,8) [] = { 0xf8, 0xf4, 0xf4, 0xf4,
+ 0xff, 0x0, 0xf4, 0xf4 };
+VECT_VAR_DECL(expected_vtbl3,uint,8,8) [] = { 0xf8, 0xf7, 0xf7, 0xf7,
+ 0xff, 0x0, 0xf7, 0xf7 };
+VECT_VAR_DECL(expected_vtbl3,poly,8,8) [] = { 0xf8, 0xf7, 0xf7, 0xf7,
+ 0xff, 0x0, 0xf7, 0xf7 };
+
+/* Expected results for vtbl4. */
+VECT_VAR_DECL(expected_vtbl4,int,8,8) [] = { 0xfa, 0xf5, 0xf5, 0xf5,
+ 0x3, 0x0, 0xf5, 0xf5 };
+VECT_VAR_DECL(expected_vtbl4,uint,8,8) [] = { 0xfa, 0xf9, 0xf9, 0xf9,
+ 0x3, 0x0, 0xf9, 0xf9 };
+VECT_VAR_DECL(expected_vtbl4,poly,8,8) [] = { 0xfa, 0xf9, 0xf9, 0xf9,
+ 0x3, 0x0, 0xf9, 0xf9 };
+
+/* Expected results for vtbx1. */
+VECT_VAR_DECL(expected_vtbx1,int,8,8) [] = { 0x33, 0xf2, 0xf2, 0xf2,
+ 0x33, 0x33, 0xf2, 0xf2 };
+VECT_VAR_DECL(expected_vtbx1,uint,8,8) [] = { 0xcc, 0xf3, 0xf3, 0xf3,
+ 0xcc, 0xcc, 0xf3, 0xf3 };
+VECT_VAR_DECL(expected_vtbx1,poly,8,8) [] = { 0xcc, 0xf3, 0xf3, 0xf3,
+ 0xcc, 0xcc, 0xf3, 0xf3 };
+
+/* Expected results for vtbx2. */
+VECT_VAR_DECL(expected_vtbx2,int,8,8) [] = { 0xf6, 0xf3, 0xf3, 0xf3,
+ 0x33, 0x33, 0xf3, 0xf3 };
+VECT_VAR_DECL(expected_vtbx2,uint,8,8) [] = { 0xf6, 0xf5, 0xf5, 0xf5,
+ 0xcc, 0xcc, 0xf5, 0xf5 };
+VECT_VAR_DECL(expected_vtbx2,poly,8,8) [] = { 0xf6, 0xf5, 0xf5, 0xf5,
+ 0xcc, 0xcc, 0xf5, 0xf5 };
+
+/* Expected results for vtbx3. */
+VECT_VAR_DECL(expected_vtbx3,int,8,8) [] = { 0xf8, 0xf4, 0xf4, 0xf4,
+ 0xff, 0x33, 0xf4, 0xf4 };
+VECT_VAR_DECL(expected_vtbx3,uint,8,8) [] = { 0xf8, 0xf7, 0xf7, 0xf7,
+ 0xff, 0xcc, 0xf7, 0xf7 };
+VECT_VAR_DECL(expected_vtbx3,poly,8,8) [] = { 0xf8, 0xf7, 0xf7, 0xf7,
+ 0xff, 0xcc, 0xf7, 0xf7 };
+
+/* Expected results for vtbx4. */
+VECT_VAR_DECL(expected_vtbx4,int,8,8) [] = { 0xfa, 0xf5, 0xf5, 0xf5,
+ 0x3, 0x33, 0xf5, 0xf5 };
+VECT_VAR_DECL(expected_vtbx4,uint,8,8) [] = { 0xfa, 0xf9, 0xf9, 0xf9,
+ 0x3, 0xcc, 0xf9, 0xf9 };
+VECT_VAR_DECL(expected_vtbx4,poly,8,8) [] = { 0xfa, 0xf9, 0xf9, 0xf9,
+ 0x3, 0xcc, 0xf9, 0xf9 };
+
+void exec_vtbX (void)
+{
+ int i;
+
+ /* In this case, input variables are arrays of vectors. */
+#define DECL_VTBX(T1, W, N, X) \
+ VECT_ARRAY_TYPE(T1, W, N, X) VECT_ARRAY_VAR(table_vector, T1, W, N, X)
+
+ /* The vtbl1 variant is different from vtbl{2,3,4} because it takes a
+ vector as 1st param, instead of an array of vectors. */
+#define TEST_VTBL1(T1, T2, T3, W, N) \
+ VECT_VAR(table_vector, T1, W, N) = \
+ vld1##_##T2##W((T1##W##_t *)lookup_table); \
+ \
+ VECT_VAR(vector_res, T1, W, N) = \
+ vtbl1_##T2##W(VECT_VAR(table_vector, T1, W, N), \
+ VECT_VAR(vector, T3, W, N)); \
+ vst1_##T2##W(VECT_VAR(result, T1, W, N), \
+ VECT_VAR(vector_res, T1, W, N));
+
+#define TEST_VTBLX(T1, T2, T3, W, N, X) \
+ VECT_ARRAY_VAR(table_vector, T1, W, N, X) = \
+ vld##X##_##T2##W((T1##W##_t *)lookup_table); \
+ \
+ VECT_VAR(vector_res, T1, W, N) = \
+ vtbl##X##_##T2##W(VECT_ARRAY_VAR(table_vector, T1, W, N, X), \
+ VECT_VAR(vector, T3, W, N)); \
+ vst1_##T2##W(VECT_VAR(result, T1, W, N), \
+ VECT_VAR(vector_res, T1, W, N));
+
+ /* We need to define a lookup table. */
+ uint8_t lookup_table[32];
+
+ DECL_VARIABLE(vector, int, 8, 8);
+ DECL_VARIABLE(vector, uint, 8, 8);
+ DECL_VARIABLE(vector, poly, 8, 8);
+ DECL_VARIABLE(vector_res, int, 8, 8);
+ DECL_VARIABLE(vector_res, uint, 8, 8);
+ DECL_VARIABLE(vector_res, poly, 8, 8);
+
+ /* For vtbl1. */
+ DECL_VARIABLE(table_vector, int, 8, 8);
+ DECL_VARIABLE(table_vector, uint, 8, 8);
+ DECL_VARIABLE(table_vector, poly, 8, 8);
+
+ /* For vtbx*. */
+ DECL_VARIABLE(default_vector, int, 8, 8);
+ DECL_VARIABLE(default_vector, uint, 8, 8);
+ DECL_VARIABLE(default_vector, poly, 8, 8);
+
+ /* We need only 8 bits variants. */
+#define DECL_ALL_VTBLX(X) \
+ DECL_VTBX(int, 8, 8, X); \
+ DECL_VTBX(uint, 8, 8, X); \
+ DECL_VTBX(poly, 8, 8, X)
+
+#define TEST_ALL_VTBL1() \
+ TEST_VTBL1(int, s, int, 8, 8); \
+ TEST_VTBL1(uint, u, uint, 8, 8); \
+ TEST_VTBL1(poly, p, uint, 8, 8)
+
+#define TEST_ALL_VTBLX(X) \
+ TEST_VTBLX(int, s, int, 8, 8, X); \
+ TEST_VTBLX(uint, u, uint, 8, 8, X); \
+ TEST_VTBLX(poly, p, uint, 8, 8, X)
+
+ /* Declare the temporary buffers / variables. */
+ DECL_ALL_VTBLX(2);
+ DECL_ALL_VTBLX(3);
+ DECL_ALL_VTBLX(4);
+
+ /* Fill the lookup table. */
+ for (i=0; i<32; i++) {
+ lookup_table[i] = i-15;
+ }
+
+ /* Choose init value arbitrarily, will be used as table index. */
+ VDUP(vector, , int, s, 8, 8, 1);
+ VDUP(vector, , uint, u, 8, 8, 2);
+ VDUP(vector, , poly, p, 8, 8, 2);
+
+ /* To ensure coverage, add some indexes larger than 8,16 and 32
+ except: lane 0 (by 6), lane 1 (by 8) and lane 2 (by 9). */
+ VSET_LANE(vector, , int, s, 8, 8, 0, 10);
+ VSET_LANE(vector, , int, s, 8, 8, 4, 20);
+ VSET_LANE(vector, , int, s, 8, 8, 5, 40);
+ VSET_LANE(vector, , uint, u, 8, 8, 0, 10);
+ VSET_LANE(vector, , uint, u, 8, 8, 4, 20);
+ VSET_LANE(vector, , uint, u, 8, 8, 5, 40);
+ VSET_LANE(vector, , poly, p, 8, 8, 0, 10);
+ VSET_LANE(vector, , poly, p, 8, 8, 4, 20);
+ VSET_LANE(vector, , poly, p, 8, 8, 5, 40);
+
+
+ /* Check vtbl1. */
+ clean_results ();
+#define TEST_MSG "VTBL1"
+ TEST_ALL_VTBL1();
+
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_vtbl1, "");
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_vtbl1, "");
+ CHECK(TEST_MSG, poly, 8, 8, PRIx8, expected_vtbl1, "");
+
+ /* Check vtbl2. */
+ clean_results ();
+#undef TEST_MSG
+#define TEST_MSG "VTBL2"
+ TEST_ALL_VTBLX(2);
+
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_vtbl2, "");
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_vtbl2, "");
+ CHECK(TEST_MSG, poly, 8, 8, PRIx8, expected_vtbl2, "");
+
+ /* Check vtbl3. */
+ clean_results ();
+#undef TEST_MSG
+#define TEST_MSG "VTBL3"
+ TEST_ALL_VTBLX(3);
+
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_vtbl3, "");
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_vtbl3, "");
+ CHECK(TEST_MSG, poly, 8, 8, PRIx8, expected_vtbl3, "");
+
+ /* Check vtbl4. */
+ clean_results ();
+#undef TEST_MSG
+#define TEST_MSG "VTBL4"
+ TEST_ALL_VTBLX(4);
+
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_vtbl4, "");
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_vtbl4, "");
+ CHECK(TEST_MSG, poly, 8, 8, PRIx8, expected_vtbl4, "");
+
+
+ /* Now test VTBX. */
+
+ /* The vtbx1 variant is different from vtbx{2,3,4} because it takes a
+ vector as 1st param, instead of an array of vectors. */
+#define TEST_VTBX1(T1, T2, T3, W, N) \
+ VECT_VAR(table_vector, T1, W, N) = \
+ vld1##_##T2##W((T1##W##_t *)lookup_table); \
+ \
+ VECT_VAR(vector_res, T1, W, N) = \
+ vtbx1_##T2##W(VECT_VAR(default_vector, T1, W, N), \
+ VECT_VAR(table_vector, T1, W, N), \
+ VECT_VAR(vector, T3, W, N)); \
+ vst1_##T2##W(VECT_VAR(result, T1, W, N), \
+ VECT_VAR(vector_res, T1, W, N));
+
+#define TEST_VTBXX(T1, T2, T3, W, N, X) \
+ VECT_ARRAY_VAR(table_vector, T1, W, N, X) = \
+ vld##X##_##T2##W((T1##W##_t *)lookup_table); \
+ \
+ VECT_VAR(vector_res, T1, W, N) = \
+ vtbx##X##_##T2##W(VECT_VAR(default_vector, T1, W, N), \
+ VECT_ARRAY_VAR(table_vector, T1, W, N, X), \
+ VECT_VAR(vector, T3, W, N)); \
+ vst1_##T2##W(VECT_VAR(result, T1, W, N), \
+ VECT_VAR(vector_res, T1, W, N));
+
+#define TEST_ALL_VTBX1() \
+ TEST_VTBX1(int, s, int, 8, 8); \
+ TEST_VTBX1(uint, u, uint, 8, 8); \
+ TEST_VTBX1(poly, p, uint, 8, 8)
+
+#define TEST_ALL_VTBXX(X) \
+ TEST_VTBXX(int, s, int, 8, 8, X); \
+ TEST_VTBXX(uint, u, uint, 8, 8, X); \
+ TEST_VTBXX(poly, p, uint, 8, 8, X)
+
+ /* Choose init value arbitrarily, will be used as default value. */
+ VDUP(default_vector, , int, s, 8, 8, 0x33);
+ VDUP(default_vector, , uint, u, 8, 8, 0xCC);
+ VDUP(default_vector, , poly, p, 8, 8, 0xCC);
+
+ /* Check vtbx1. */
+ clean_results ();
+#undef TEST_MSG
+#define TEST_MSG "VTBX1"
+ TEST_ALL_VTBX1();
+
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_vtbx1, "");
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_vtbx1, "");
+ CHECK(TEST_MSG, poly, 8, 8, PRIx8, expected_vtbx1, "");
+
+ /* Check vtbx2. */
+ clean_results ();
+#undef TEST_MSG
+#define TEST_MSG "VTBX2"
+ TEST_ALL_VTBXX(2);
+
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_vtbx2, "");
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_vtbx2, "");
+ CHECK(TEST_MSG, poly, 8, 8, PRIx8, expected_vtbx2, "");
+
+ /* Check vtbx3. */
+ clean_results ();
+#undef TEST_MSG
+#define TEST_MSG "VTBX3"
+ TEST_ALL_VTBXX(3);
+
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_vtbx3, "");
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_vtbx3, "");
+ CHECK(TEST_MSG, poly, 8, 8, PRIx8, expected_vtbx3, "");
+
+ /* Check vtbx4. */
+ clean_results ();
+#undef TEST_MSG
+#define TEST_MSG "VTBX4"
+ TEST_ALL_VTBXX(4);
+
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_vtbx4, "");
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_vtbx4, "");
+ CHECK(TEST_MSG, poly, 8, 8, PRIx8, expected_vtbx4, "");
+}
+
+int main (void)
+{
+ exec_vtbX ();
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vtst.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vtst.c
new file mode 100644
index 00000000000..7f965407d67
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vtst.c
@@ -0,0 +1,120 @@
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+/* Expected results with signed input. */
+VECT_VAR_DECL(expected_signed,uint,8,8) [] = { 0x0, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff };
+VECT_VAR_DECL(expected_signed,uint,16,4) [] = { 0x0, 0xffff, 0x0, 0xffff };
+VECT_VAR_DECL(expected_signed,uint,32,2) [] = { 0x0, 0xffffffff };
+VECT_VAR_DECL(expected_signed,uint,8,16) [] = { 0x0, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff };
+VECT_VAR_DECL(expected_signed,uint,16,8) [] = { 0x0, 0xffff, 0x0, 0xffff,
+ 0xffff, 0xffff, 0xffff, 0xffff };
+VECT_VAR_DECL(expected_signed,uint,32,4) [] = { 0x0, 0xffffffff,
+ 0x0, 0xffffffff };
+
+/* Expected results with unsigned input. */
+VECT_VAR_DECL(expected_unsigned,uint,8,8) [] = { 0x0, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff };
+VECT_VAR_DECL(expected_unsigned,uint,16,4) [] = { 0x0, 0xffff, 0x0, 0xffff };
+VECT_VAR_DECL(expected_unsigned,uint,32,2) [] = { 0x0, 0xffffffff };
+VECT_VAR_DECL(expected_unsigned,uint,8,16) [] = { 0x0, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff };
+VECT_VAR_DECL(expected_unsigned,uint,16,8) [] = { 0x0, 0xffff,
+ 0x0, 0xffff,
+ 0xffff, 0xffff,
+ 0xffff, 0xffff };
+VECT_VAR_DECL(expected_unsigned,uint,32,4) [] = { 0x0, 0xffffffff,
+ 0x0, 0xffffffff };
+
+#ifndef INSN_NAME
+#define INSN_NAME vtst
+#define TEST_MSG "VTST/VTSTQ"
+#endif
+
+/* We can't use the standard ref_v_binary_op.c template because vtst
+ has no 64 bits variant, and outputs are always of uint type. */
+#define FNNAME1(NAME) void exec_ ## NAME (void)
+#define FNNAME(NAME) FNNAME1(NAME)
+
+FNNAME (INSN_NAME)
+{
+ /* Basic test: y=OP(x,x), then store the result. */
+#define TEST_BINARY_OP1(INSN, Q, T1, T2, W, N) \
+ VECT_VAR(vector_res, uint, W, N) = \
+ INSN##Q##_##T2##W(VECT_VAR(vector, T1, W, N), \
+ VECT_VAR(vector2, T1, W, N)); \
+ vst1##Q##_u##W(VECT_VAR(result, uint, W, N), \
+ VECT_VAR(vector_res, uint, W, N))
+
+#define TEST_BINARY_OP(INSN, Q, T1, T2, W, N) \
+ TEST_BINARY_OP1(INSN, Q, T1, T2, W, N) \
+
+ DECL_VARIABLE_ALL_VARIANTS(vector);
+ DECL_VARIABLE_ALL_VARIANTS(vector2);
+ DECL_VARIABLE_UNSIGNED_VARIANTS(vector_res);
+
+
+ clean_results ();
+
+ /* Initialize input "vector" from "buffer". */
+ TEST_MACRO_ALL_VARIANTS_2_5(VLOAD, vector, buffer);
+
+ /* Choose init value arbitrarily, will be used as comparison
+ value. */
+ VDUP(vector2, , int, s, 8, 8, 15);
+ VDUP(vector2, , int, s, 16, 4, 5);
+ VDUP(vector2, , int, s, 32, 2, 1);
+ VDUP(vector2, , uint, u, 8, 8, 15);
+ VDUP(vector2, , uint, u, 16, 4, 5);
+ VDUP(vector2, , uint, u, 32, 2, 1);
+ VDUP(vector2, q, int, s, 8, 16, 15);
+ VDUP(vector2, q, int, s, 16, 8, 5);
+ VDUP(vector2, q, int, s, 32, 4, 1);
+ VDUP(vector2, q, uint, u, 8, 16, 15);
+ VDUP(vector2, q, uint, u, 16, 8, 5);
+ VDUP(vector2, q, uint, u, 32, 4, 1);
+
+#define TEST_MACRO_NO64BIT_VARIANT_1_5(MACRO, VAR, T1, T2) \
+ MACRO(VAR, , T1, T2, 8, 8); \
+ MACRO(VAR, , T1, T2, 16, 4); \
+ MACRO(VAR, , T1, T2, 32, 2); \
+ MACRO(VAR, q, T1, T2, 8, 16); \
+ MACRO(VAR, q, T1, T2, 16, 8); \
+ MACRO(VAR, q, T1, T2, 32, 4)
+
+ /* Split the test, as both signed and unsigned variants output their
+ result in an unsigned form (thus the same output variable is used
+ in these tests). */
+ TEST_MACRO_NO64BIT_VARIANT_1_5(TEST_BINARY_OP, INSN_NAME, int, s);
+
+#define CMT " (signed input)"
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_signed, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_signed, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_signed, CMT);
+ CHECK(TEST_MSG, uint, 8, 16, PRIx8, expected_signed, CMT);
+ CHECK(TEST_MSG, uint, 16, 8, PRIx16, expected_signed, CMT);
+ CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected_signed, CMT);
+
+ TEST_MACRO_NO64BIT_VARIANT_1_5(TEST_BINARY_OP, INSN_NAME, uint, u);
+
+#undef CMT
+#define CMT " (unsigned input)"
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_unsigned, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_unsigned, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_unsigned, CMT);
+ CHECK(TEST_MSG, uint, 8, 16, PRIx8, expected_unsigned, CMT);
+ CHECK(TEST_MSG, uint, 16, 8, PRIx16, expected_unsigned, CMT);
+ CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected_unsigned, CMT);
+}
+
+int main (void)
+{
+ exec_vtst ();
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/fmovd-zero.c b/gcc/testsuite/gcc.target/aarch64/fmovd-zero-mem.c
index 7e4590afe61..9245c482265 100644
--- a/gcc/testsuite/gcc.target/aarch64/fmovd-zero.c
+++ b/gcc/testsuite/gcc.target/aarch64/fmovd-zero-mem.c
@@ -7,4 +7,4 @@ foo (double *output)
*output = 0.0;
}
-/* { dg-final { scan-assembler "fmov\\td\[0-9\]+, xzr" } } */
+/* { dg-final { scan-assembler "str\\txzr, \\\[x0\\\]" } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/fmovd-zero-reg.c b/gcc/testsuite/gcc.target/aarch64/fmovd-zero-reg.c
new file mode 100644
index 00000000000..0a3e5940297
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/fmovd-zero-reg.c
@@ -0,0 +1,11 @@
+/* { dg-do compile } */
+/* { dg-options "-O2" } */
+
+void bar (double);
+void
+foo (void)
+{
+ bar (0.0);
+}
+
+/* { dg-final { scan-assembler "fmov\\td0, xzr" } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/fmovf-zero.c b/gcc/testsuite/gcc.target/aarch64/fmovf-zero-mem.c
index 5050ac31038..518eff0626f 100644
--- a/gcc/testsuite/gcc.target/aarch64/fmovf-zero.c
+++ b/gcc/testsuite/gcc.target/aarch64/fmovf-zero-mem.c
@@ -7,4 +7,4 @@ foo (float *output)
*output = 0.0;
}
-/* { dg-final { scan-assembler "fmov\\ts\[0-9\]+, wzr" } } */
+/* { dg-final { scan-assembler "str\\twzr, \\\[x0\\\]" } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/fmovf-zero-reg.c b/gcc/testsuite/gcc.target/aarch64/fmovf-zero-reg.c
new file mode 100644
index 00000000000..4213450d6e5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/fmovf-zero-reg.c
@@ -0,0 +1,11 @@
+/* { dg-do compile } */
+/* { dg-options "-O2" } */
+
+void bar (float);
+void
+foo (void)
+{
+ bar (0.0);
+}
+
+/* { dg-final { scan-assembler "fmov\\ts0, wzr" } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/fmovld-zero-mem.c b/gcc/testsuite/gcc.target/aarch64/fmovld-zero-mem.c
new file mode 100644
index 00000000000..e649404ae07
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/fmovld-zero-mem.c
@@ -0,0 +1,10 @@
+/* { dg-do compile } */
+/* { dg-options "-O2" } */
+
+void
+foo (long double *output)
+{
+ *output = 0.0;
+}
+
+/* { dg-final { scan-assembler "stp\\txzr, xzr, \\\[x0\\\]" } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/fmovld-zero-reg.c b/gcc/testsuite/gcc.target/aarch64/fmovld-zero-reg.c
new file mode 100644
index 00000000000..ca602cb381f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/fmovld-zero-reg.c
@@ -0,0 +1,11 @@
+/* { dg-do compile } */
+/* { dg-options "-O2" } */
+
+void bar (long double);
+void
+foo (void)
+{
+ bar (0.0);
+}
+
+/* { dg-final { scan-assembler "movi\\tv0\.2d, #0" } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/pr62308.c b/gcc/testsuite/gcc.target/aarch64/pr62308.c
new file mode 100644
index 00000000000..1cf6e212dca
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/pr62308.c
@@ -0,0 +1,6 @@
+/* { dg-do compile } */
+/* { dg-options "-mbig-endian" } */
+
+typedef int __attribute__((vector_size(16))) v4si;
+struct S2823 {v4si a;int b[0];};
+void checkx2823 (struct S2823 args){};
diff --git a/gcc/testsuite/gcc.target/arm/flip-thumb.c b/gcc/testsuite/gcc.target/arm/flip-thumb.c
new file mode 100644
index 00000000000..05f6bb7b423
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/flip-thumb.c
@@ -0,0 +1,24 @@
+/* Check -mflip-thumb. */
+/* { dg-do compile } */
+/* { dg-options "-O2 -mflip-thumb -mno-restrict-it" } */
+/* { dg-final { scan-assembler ".arm" } } */
+/* { dg-final { scan-assembler-times ".thumb_func" 1} } */
+
+int
+foo(int a)
+{
+ return a ? 1 : 5;
+}
+
+int
+bar(int a)
+{
+ return a ? 1 : 5;
+}
+
+/* { dg-final { scan-assembler-times "ite" 1 { target { arm_thumb2_ok } } } } */
+
+
+
+
+
diff --git a/gcc/testsuite/gfortran.dg/gomp/omp_parallel_1.f90 b/gcc/testsuite/gfortran.dg/gomp/omp_parallel_1.f90
new file mode 100644
index 00000000000..4bcb5631b7c
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/gomp/omp_parallel_1.f90
@@ -0,0 +1,37 @@
+! { dg-do compile }
+! { dg-additional-options "-fdump-tree-original" }
+!
+! PR fortran/66549
+! The resolution of CVN in the middle CLWF's OpenMP construct was
+! making the DO loop (wrongly) interpreted as an OpenMP-managed loop, leading
+! to an ICE.
+!
+! Contributed by Andrew Benson <abensonca@gmail.com>.
+
+module smfa
+ type :: sgc
+ contains
+ procedure :: sla => sa
+ end type sgc
+ class(sgc), pointer :: sg_
+ double precision, allocatable, dimension(:) :: vni
+contains
+ double precision function sa(self,i)
+ class(sgc), intent(in ) :: self
+ end function sa
+ subroutine cvn(sg_,vn)
+ class(sgc), intent(inout) :: sg_
+ double precision, intent( out), dimension(:) :: vn
+ integer :: i
+ do i=1,2
+ vn(i)= sg_%sla(i)
+ end do
+ end subroutine cvn
+ subroutine clwf()
+ !$omp parallel
+ call cvn(sg_,vni)
+ !$omp end parallel
+ end subroutine clwf
+end module smfa
+
+! { dg-final { scan-tree-dump-times "#pragma\\s+omp\\s+parallel\\n" 1 "original" } }
diff --git a/gcc/testsuite/gnat.dg/specs/debug1.ads b/gcc/testsuite/gnat.dg/specs/debug1.ads
index 92e9184e473..de0a7b90798 100644
--- a/gcc/testsuite/gnat.dg/specs/debug1.ads
+++ b/gcc/testsuite/gnat.dg/specs/debug1.ads
@@ -11,4 +11,4 @@ package Debug1 is
end Debug1;
--- { dg-final { scan-assembler-times "DW_AT_artificial" 17 } }
+-- { dg-final { scan-assembler-times "DW_AT_artificial" 15 } }
diff --git a/gcc/tree-if-conv.c b/gcc/tree-if-conv.c
index a5e17b85b08..e0ac936d72c 100644
--- a/gcc/tree-if-conv.c
+++ b/gcc/tree-if-conv.c
@@ -1510,11 +1510,7 @@ is_cond_scalar_reduction (gimple phi, gimple *reduc, tree arg_0, tree arg_1,
/* Make R_OP1 to hold reduction variable. */
if (r_op2 == PHI_RESULT (header_phi)
&& reduction_op == PLUS_EXPR)
- {
- tree tmp = r_op1;
- r_op1 = r_op2;
- r_op2 = tmp;
- }
+ std::swap (r_op1, r_op2);
else if (r_op1 != PHI_RESULT (header_phi))
return false;
@@ -1708,11 +1704,7 @@ predicate_scalar_phi (gphi *phi, gimple_stmt_iterator *gsi)
second_edge = EDGE_PRED (bb, 1);
cond = bb_predicate (first_edge->src);
if (TREE_CODE (cond) == TRUTH_NOT_EXPR)
- {
- edge tmp_edge = first_edge;
- first_edge = second_edge;
- second_edge = tmp_edge;
- }
+ std::swap (first_edge, second_edge);
if (EDGE_COUNT (first_edge->src->succs) > 1)
{
cond = bb_predicate (second_edge->src);
@@ -1795,11 +1787,7 @@ predicate_scalar_phi (gphi *phi, gimple_stmt_iterator *gsi)
/* Put element with max number of occurences to the end of ARGS. */
if (max_ind != -1 && max_ind +1 != (int) args_len)
- {
- tree tmp = args[args_len - 1];
- args[args_len - 1] = args[max_ind];
- args[max_ind] = tmp;
- }
+ std::swap (args[args_len - 1], args[max_ind]);
/* Handle one special case when number of arguments with different values
is equal 2 and one argument has the only occurrence. Such PHI can be
@@ -2174,11 +2162,7 @@ predicate_mem_writes (loop_p loop)
lhs = ifc_temp_var (type, unshare_expr (lhs), &gsi);
rhs = ifc_temp_var (type, unshare_expr (rhs), &gsi);
if (swap)
- {
- tree tem = lhs;
- lhs = rhs;
- rhs = tem;
- }
+ std::swap (lhs, rhs);
cond = force_gimple_operand_gsi_1 (&gsi, unshare_expr (cond),
is_gimple_condexpr, NULL_TREE,
true, GSI_SAME_STMT);
diff --git a/gcc/tree-loop-distribution.c b/gcc/tree-loop-distribution.c
index 96fbc3620f5..19523b941d2 100644
--- a/gcc/tree-loop-distribution.c
+++ b/gcc/tree-loop-distribution.c
@@ -1360,9 +1360,7 @@ pg_add_dependence_edges (struct graph *rdg, vec<loop_p> loops, int dir,
if (rdg_vertex_for_stmt (rdg, DR_STMT (dr1))
> rdg_vertex_for_stmt (rdg, DR_STMT (dr2)))
{
- data_reference_p tem = dr1;
- dr1 = dr2;
- dr2 = tem;
+ std::swap (dr1, dr2);
this_dir = -this_dir;
}
ddr = initialize_data_dependence_relation (dr1, dr2, loops);
@@ -1373,9 +1371,7 @@ pg_add_dependence_edges (struct graph *rdg, vec<loop_p> loops, int dir,
{
if (DDR_REVERSED_P (ddr))
{
- data_reference_p tem = dr1;
- dr1 = dr2;
- dr2 = tem;
+ std::swap (dr1, dr2);
this_dir = -this_dir;
}
/* Known dependences can still be unordered througout the
diff --git a/gcc/tree-predcom.c b/gcc/tree-predcom.c
index 341bcc77a8e..eadad7e0ad2 100644
--- a/gcc/tree-predcom.c
+++ b/gcc/tree-predcom.c
@@ -2301,11 +2301,7 @@ combine_chains (chain_p ch1, chain_p ch2)
}
if (swap)
- {
- chain_p tmp = ch1;
- ch1 = ch2;
- ch2 = tmp;
- }
+ std::swap (ch1, ch2);
new_chain = XCNEW (struct chain);
new_chain->type = CT_COMBINATION;
diff --git a/gcc/tree-ssa-alias.c b/gcc/tree-ssa-alias.c
index d9aae421fbc..a1f0beeae63 100644
--- a/gcc/tree-ssa-alias.c
+++ b/gcc/tree-ssa-alias.c
@@ -953,11 +953,7 @@ nonoverlapping_component_refs_p (const_tree x, const_tree y)
if (fieldsx.length () == 2)
{
if (ncr_compar (&fieldsx[0], &fieldsx[1]) == 1)
- {
- const_tree tem = fieldsx[0];
- fieldsx[0] = fieldsx[1];
- fieldsx[1] = tem;
- }
+ std::swap (fieldsx[0], fieldsx[1]);
}
else
fieldsx.qsort (ncr_compar);
@@ -965,11 +961,7 @@ nonoverlapping_component_refs_p (const_tree x, const_tree y)
if (fieldsy.length () == 2)
{
if (ncr_compar (&fieldsy[0], &fieldsy[1]) == 1)
- {
- const_tree tem = fieldsy[0];
- fieldsy[0] = fieldsy[1];
- fieldsy[1] = tem;
- }
+ std::swap (fieldsy[0], fieldsy[1]);
}
else
fieldsy.qsort (ncr_compar);
@@ -1426,13 +1418,10 @@ refs_may_alias_p_1 (ao_ref *ref1, ao_ref *ref2, bool tbaa_p)
/* Canonicalize the pointer-vs-decl case. */
if (ind1_p && var2_p)
{
- HOST_WIDE_INT tmp1;
- tree tmp2;
- ao_ref *tmp3;
- tmp1 = offset1; offset1 = offset2; offset2 = tmp1;
- tmp1 = max_size1; max_size1 = max_size2; max_size2 = tmp1;
- tmp2 = base1; base1 = base2; base2 = tmp2;
- tmp3 = ref1; ref1 = ref2; ref2 = tmp3;
+ std::swap (offset1, offset2);
+ std::swap (max_size1, max_size2);
+ std::swap (base1, base2);
+ std::swap (ref1, ref2);
var1_p = true;
ind1_p = false;
var2_p = false;
diff --git a/gcc/tree-ssa-ifcombine.c b/gcc/tree-ssa-ifcombine.c
index 650624e49f3..f55a4528d49 100644
--- a/gcc/tree-ssa-ifcombine.c
+++ b/gcc/tree-ssa-ifcombine.c
@@ -98,11 +98,7 @@ recognize_if_then_else (basic_block cond_bb,
t = EDGE_SUCC (cond_bb, 0);
e = EDGE_SUCC (cond_bb, 1);
if (!(t->flags & EDGE_TRUE_VALUE))
- {
- edge tmp = t;
- t = e;
- e = tmp;
- }
+ std::swap (t, e);
if (!(t->flags & EDGE_TRUE_VALUE)
|| !(e->flags & EDGE_FALSE_VALUE))
return false;
diff --git a/gcc/tree-ssa-loop-ivopts.c b/gcc/tree-ssa-loop-ivopts.c
index cb5f054720d..cab5acfc8d5 100644
--- a/gcc/tree-ssa-loop-ivopts.c
+++ b/gcc/tree-ssa-loop-ivopts.c
@@ -1485,8 +1485,8 @@ extract_cond_operands (struct ivopts_data *data, gimple stmt,
/* The objects returned when COND has constant operands. */
static struct iv const_iv;
static tree zero;
- tree *op0 = &zero, *op1 = &zero, *tmp_op;
- struct iv *iv0 = &const_iv, *iv1 = &const_iv, *tmp_iv;
+ tree *op0 = &zero, *op1 = &zero;
+ struct iv *iv0 = &const_iv, *iv1 = &const_iv;
bool ret = false;
if (gimple_code (stmt) == GIMPLE_COND)
@@ -1517,8 +1517,8 @@ extract_cond_operands (struct ivopts_data *data, gimple stmt,
if (integer_zerop (iv0->step))
{
/* Control variable may be on the other side. */
- tmp_op = op0; op0 = op1; op1 = tmp_op;
- tmp_iv = iv0; iv0 = iv1; iv1 = tmp_iv;
+ std::swap (op0, op1);
+ std::swap (iv0, iv1);
}
ret = !integer_zerop (iv0->step) && integer_zerop (iv1->step);
diff --git a/gcc/tree-ssa-loop-niter.c b/gcc/tree-ssa-loop-niter.c
index 64c9a73e03f..213d72e3dc4 100644
--- a/gcc/tree-ssa-loop-niter.c
+++ b/gcc/tree-ssa-loop-niter.c
@@ -73,8 +73,6 @@ along with GCC; see the file COPYING3. If not see
#include "wide-int-print.h"
-#define SWAP(X, Y) do { affine_iv *tmp = (X); (X) = (Y); (Y) = tmp; } while (0)
-
/* The maximum number of dominator BBs we search for conditions
of loop header copies we use for simplifying a conditional
expression. */
@@ -301,7 +299,7 @@ refine_bounds_using_guard (tree type, tree varx, mpz_t offx,
tree c0, enum tree_code cmp, tree c1,
bounds *bnds)
{
- tree varc0, varc1, tmp, ctype;
+ tree varc0, varc1, ctype;
mpz_t offc0, offc1, loffx, loffy, bnd;
bool lbound = false;
bool no_wrap = nowrap_type_p (type);
@@ -371,7 +369,7 @@ refine_bounds_using_guard (tree type, tree varx, mpz_t offx,
if (operand_equal_p (varx, varc1, 0))
{
- tmp = varc0; varc0 = varc1; varc1 = tmp;
+ std::swap (varc0, varc1);
mpz_swap (offc0, offc1);
cmp = swap_tree_comparison (cmp);
}
@@ -385,7 +383,7 @@ refine_bounds_using_guard (tree type, tree varx, mpz_t offx,
if (cmp == GT_EXPR || cmp == GE_EXPR)
{
- tmp = varx; varx = vary; vary = tmp;
+ std::swap (varx, vary);
mpz_swap (offc0, offc1);
mpz_swap (loffx, loffy);
cmp = swap_tree_comparison (cmp);
@@ -1365,7 +1363,7 @@ number_of_iterations_cond (struct loop *loop,
if (code == GE_EXPR || code == GT_EXPR
|| (code == NE_EXPR && integer_zerop (iv0->step)))
{
- SWAP (iv0, iv1);
+ std::swap (iv0, iv1);
code = swap_tree_comparison (code);
}
diff --git a/gcc/tree-ssa-phiopt.c b/gcc/tree-ssa-phiopt.c
index 098a02868e2..d2a5cee2329 100644
--- a/gcc/tree-ssa-phiopt.c
+++ b/gcc/tree-ssa-phiopt.c
@@ -237,12 +237,8 @@ tree_ssa_phiopt_worker (bool do_store_elim, bool do_hoist_loads)
;
else if (EDGE_SUCC (bb2, 0)->dest == bb1)
{
- basic_block bb_tmp = bb1;
- edge e_tmp = e1;
- bb1 = bb2;
- bb2 = bb_tmp;
- e1 = e2;
- e2 = e_tmp;
+ std::swap (bb1, bb2);
+ std::swap (e1, e2);
}
else if (do_store_elim
&& EDGE_SUCC (bb1, 0)->dest == EDGE_SUCC (bb2, 0)->dest)
diff --git a/gcc/tree-ssa-sccvn.c b/gcc/tree-ssa-sccvn.c
index 3db42b03bbb..ccfa6b603ec 100644
--- a/gcc/tree-ssa-sccvn.c
+++ b/gcc/tree-ssa-sccvn.c
@@ -2389,11 +2389,7 @@ vn_nary_op_compute_hash (const vn_nary_op_t vno1)
if (vno1->length == 2
&& commutative_tree_code (vno1->opcode)
&& tree_swap_operands_p (vno1->op[0], vno1->op[1], false))
- {
- tree temp = vno1->op[0];
- vno1->op[0] = vno1->op[1];
- vno1->op[1] = temp;
- }
+ std::swap (vno1->op[0], vno1->op[1]);
hstate.add_int (vno1->opcode);
for (i = 0; i < vno1->length; ++i)
diff --git a/gcc/tree-vect-slp.c b/gcc/tree-vect-slp.c
index 47d8a42474e..91ddc0fc444 100644
--- a/gcc/tree-vect-slp.c
+++ b/gcc/tree-vect-slp.c
@@ -1124,9 +1124,8 @@ vect_build_slp_tree (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
for (j = 0; j < group_size; ++j)
if (!matches[j])
{
- gimple tem = oprnds_info[0]->def_stmts[j];
- oprnds_info[0]->def_stmts[j] = oprnds_info[1]->def_stmts[j];
- oprnds_info[1]->def_stmts[j] = tem;
+ std::swap (oprnds_info[0]->def_stmts[j],
+ oprnds_info[1]->def_stmts[j]);
dump_printf (MSG_NOTE, "%d ", j);
}
dump_printf (MSG_NOTE, "\n");
diff --git a/gcc/tree-vect-stmts.c b/gcc/tree-vect-stmts.c
index 9760d9a471f..0b9c8d6e1f9 100644
--- a/gcc/tree-vect-stmts.c
+++ b/gcc/tree-vect-stmts.c
@@ -8259,11 +8259,7 @@ supportable_widening_operation (enum tree_code code, gimple stmt,
}
if (BYTES_BIG_ENDIAN && c1 != VEC_WIDEN_MULT_EVEN_EXPR)
- {
- enum tree_code ctmp = c1;
- c1 = c2;
- c2 = ctmp;
- }
+ std::swap (c1, c2);
if (code == FIX_TRUNC_EXPR)
{
diff --git a/gcc/tree-vrp.c b/gcc/tree-vrp.c
index a7e66f14f2b..fdaebe4c2fc 100644
--- a/gcc/tree-vrp.c
+++ b/gcc/tree-vrp.c
@@ -2914,33 +2914,17 @@ extract_range_from_binary_expr_1 (value_range_t *vr,
prod3. */
/* min0min1 > max0max1 */
if (wi::gts_p (prod0, prod3))
- {
- vrp_int tmp = prod3;
- prod3 = prod0;
- prod0 = tmp;
- }
+ std::swap (prod0, prod3);
/* min0max1 > max0min1 */
if (wi::gts_p (prod1, prod2))
- {
- vrp_int tmp = prod2;
- prod2 = prod1;
- prod1 = tmp;
- }
+ std::swap (prod1, prod2);
if (wi::gts_p (prod0, prod1))
- {
- vrp_int tmp = prod1;
- prod1 = prod0;
- prod0 = tmp;
- }
+ std::swap (prod0, prod1);
if (wi::gts_p (prod2, prod3))
- {
- vrp_int tmp = prod3;
- prod3 = prod2;
- prod2 = tmp;
- }
+ std::swap (prod2, prod3);
/* diff = max - min. */
prod2 = prod3 - prod0;
@@ -3723,11 +3707,7 @@ extract_range_from_unary_expr_1 (value_range_t *vr,
{
/* If the range was reversed, swap MIN and MAX. */
if (cmp == 1)
- {
- tree t = min;
- min = max;
- max = t;
- }
+ std::swap (min, max);
}
cmp = compare_values (min, max);