summaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
Diffstat (limited to 'gcc')
-rw-r--r--gcc/ChangeLog387
-rw-r--r--gcc/DATESTAMP2
-rw-r--r--gcc/builtins.c13
-rw-r--r--gcc/c-typeck.c2
-rw-r--r--gcc/cgraphunit.c15
-rw-r--r--gcc/config/arm/arm-protos.h3
-rw-r--r--gcc/config/arm/arm.c278
-rw-r--r--gcc/config/arm/arm.md4
-rw-r--r--gcc/config/arm/cirrus.md10
-rw-r--r--gcc/config/arm/fpa.md4
-rw-r--r--gcc/config/arm/iwmmxt.md4
-rw-r--r--gcc/config/arm/neon.md2
-rw-r--r--gcc/config/arm/vfp.md19
-rw-r--r--gcc/config/avr/avr.c327
-rw-r--r--gcc/config/avr/avr.md29
-rw-r--r--gcc/config/i386/i386-protos.h1
-rw-r--r--gcc/config/i386/i386.c46
-rw-r--r--gcc/config/i386/i386.md24
-rw-r--r--gcc/config/i386/mingw32.h3
-rw-r--r--gcc/config/m32c/m32c-protos.h2
-rw-r--r--gcc/config/m32c/m32c.c24
-rw-r--r--gcc/config/m32c/m32c.h2
-rw-r--r--gcc/config/mmix/mmix.c2
-rw-r--r--gcc/config/spu/spu-protos.h1
-rw-r--r--gcc/config/spu/spu.c73
-rw-r--r--gcc/config/spu/spu.h11
-rw-r--r--gcc/cp/ChangeLog31
-rw-r--r--gcc/cp/call.c6
-rw-r--r--gcc/cp/decl.c6
-rw-r--r--gcc/cp/error.c4
-rw-r--r--gcc/cp/lex.c2
-rw-r--r--gcc/cp/name-lookup.c9
-rw-r--r--gcc/cp/name-lookup.h1
-rw-r--r--gcc/cp/pt.c39
-rw-r--r--gcc/cp/typeck2.c6
-rw-r--r--gcc/doc/extend.texi8
-rw-r--r--gcc/explow.c7
-rw-r--r--gcc/expr.c4
-rw-r--r--gcc/fold-const.c12
-rw-r--r--gcc/fortran/ChangeLog27
-rw-r--r--gcc/fortran/decl.c42
-rw-r--r--gcc/fortran/module.c2
-rw-r--r--gcc/fortran/resolve.c99
-rw-r--r--gcc/ggc.h8
-rw-r--r--gcc/gimple-fold.c2
-rw-r--r--gcc/gimplify.c2
-rw-r--r--gcc/go/gofrontend/lex.cc8
-rw-r--r--gcc/graphite-clast-to-gimple.c8
-rw-r--r--gcc/graphite-sese-to-poly.c7
-rw-r--r--gcc/stmt.c5
-rw-r--r--gcc/testsuite/ChangeLog49
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/decltype32.C12
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/initlist57.C8
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/variadic-unresolved.C12
-rw-r--r--gcc/testsuite/g++.dg/diagnostic/expr1.C9
-rw-r--r--gcc/testsuite/g++.dg/ext/attr-used-1.C17
-rw-r--r--gcc/testsuite/g++.dg/opt/life1.C2
-rw-r--r--gcc/testsuite/g++.old-deja/g++.brendan/README1
-rw-r--r--gcc/testsuite/gcc.dg/tree-ssa/vrp61.c2
-rw-r--r--gcc/testsuite/gcc.target/arm/pr43597.c28
-rw-r--r--gcc/testsuite/gfortran.dg/coarray_26.f9053
-rw-r--r--gcc/testsuite/gfortran.dg/common_13.f9011
-rw-r--r--gcc/testsuite/gfortran.dg/func_result_7.f9011
-rw-r--r--gcc/tree-cfg.c18
-rw-r--r--gcc/tree-chrec.c20
-rw-r--r--gcc/tree-chrec.h2
-rw-r--r--gcc/tree-loop-distribution.c11
-rw-r--r--gcc/tree-mudflap.c22
-rw-r--r--gcc/tree-predcom.c11
-rw-r--r--gcc/tree-profile.c3
-rw-r--r--gcc/tree-scalar-evolution.c2
-rw-r--r--gcc/tree-ssa-address.c10
-rw-r--r--gcc/tree-ssa-forwprop.c47
-rw-r--r--gcc/tree-ssa-loop-ivopts.c2
-rw-r--r--gcc/tree-ssa-loop-manip.c6
-rw-r--r--gcc/tree-ssa-pre.c4
-rw-r--r--gcc/tree-ssa-sccvn.c9
-rw-r--r--gcc/tree-ssa-structalias.c2
-rw-r--r--gcc/tree-vrp.c553
-rw-r--r--gcc/tree.c3
-rw-r--r--gcc/tree.h19
-rw-r--r--gcc/varasm.c14
82 files changed, 1824 insertions, 782 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index b600d256a6e..f96f6902042 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,7 +1,202 @@
-2011-08-12 David Li <davidxl@google.com>
+2011-08-17 Richard Guenther <rguenther@suse.de>
+
+ * tree.h (convert_to_ptrofftype_loc): New function.
+ (convert_to_ptrofftype): Define.
+ * builtins.c (expand_builtin_bzero): Use size_type_node.
+ (fold_builtin_bzero): Likewise.
+ (std_gimplify_va_arg_expr): Build the BIT_AND_EXPR on the pointer.
+ * c-typeck.c (build_unary_op): Use convert_to_ptrofftype_loc.
+ * cgraphunit.c (thunk_adjust): Use fold_build_pointer_plus_loc.
+ (cgraph_redirect_edge_call_stmt_to_callee): Use size_int.
+ * expr.c (expand_expr_addr_expr_1): Use fold_build_pointer_plus.
+ * fold-const.c (build_range_check): Negate using the original
+ type.
+ (fold_unary_loc): Use fold_build_pointer_plus_loc.
+ * gimple-fold.c (gimple_adjust_this_by_delta): Use
+ convert_to_ptrofftype.
+ * gimplify.c (gimplify_self_mod_expr): Likewise.
+ * graphite-clast-to-gimple.c (clast_to_gcc_expression): Likewise.
+ (graphite_create_new_loop_guard): Likewise.
+ * graphite-sese-to-poly.c (my_long_long): Remove.
+ (scop_ivs_can_be_represented): Adjust.
+ * tree-cfg.c (verify_gimple_assign_unary): Use ptrofftype_p.
+ * tree-chrec.c (chrec_fold_plus_1): Use fold_build_pointer_plus.
+ * tree-loop-distribution.c (build_size_arg_loc): Use
+ size_type_node.
+ (generate_memset_zero): Simplify.
+ * tree-mudflap.c: Use fold_convert, not convert.
+ * tree-predcom.c (suitable_reference_p): Expand DR_OFFSET in
+ its own type.
+ (determine_offset): Likewise for DR_STEP.
+ (valid_initializer_p): Likewise.
+ * tree-profile.c (prepare_instrumented_value): Convert the pointer
+ to an integer type of same size.
+ * tree-scalar-evolution.c (interpret_rhs_expr): Do not refer
+ to sizetype without need.
+ * tree-ssa-address.c (tree_mem_ref_addr): Likewise.
+ * tree-ssa-loop-ivopts.c (find_bivs): Use convert_to_ptrofftype.
+ * tree-ssa-loop-manip.c (create_iv): Likewise.
+ (determine_exit_conditions): Adjust comment.
+ * tree-ssa-pre.c (create_expression_by_pieces): Use
+ convert_to_ptrofftype.
+ * tree-ssa-structalias.c (get_constraint_for_1): Likewise.
+ * varasm.c (array_size_for_constructor): Compute using double_ints.
+
+2011-08-16 Ulrich Weigand <Ulrich.Weigand@de.ibm.com>
+
+ * config/spu/spu.c (spu_emit_branch_or_set): Avoid reverse tests
+ when generating an integer result where possible. Short-cut
+ comparison against 0 also for QImode.
+
+2011-08-16 Ulrich Weigand <Ulrich.Weigand@de.ibm.com>
+
+ * config/spu/spu.h (LEGITIMIZE_RELOAD_ADDRESS): New macro.
+ * config/spu/spu-protos.h (spu_legitimize_reload_address): Add
+ prototype.
+ * config/spu/spu.c (spu_legitimize_reload_address): New function.
+ (spu_legitimate_address_p): Do not check displacement if the base
+ is an eliminable stack register.
+
+2011-08-16 Anatoly Sokolov <aesok@post.ru>
+
+ * config/m32c/m32c.h (PREFERRED_RELOAD_CLASS,
+ PREFERRED_OUTPUT_RELOAD_CLASS): Remove macro.
+ * config/m32c/m32c-protos.h (m32c_preferred_reload_class,
+ m32c_preferred_output_reload_class): Remove.
+ * config/m32c/m32c.c (m32c_preferred_reload_class): Make static.
+ Change rclass argument and return types to reg_class_t. Use
+ reg_class_subset_p instead of class_sizes.
+ (m32c_preferred_output_reload_class): Make static. Change rclass
+ argument and return types to reg_class_t.
+ (TARGET_PREFERRED_RELOAD_CLASS,
+ TARGET_PREFERRED_OUTPUT_RELOAD_CLASS): Define.
+
+2011-08-16 Kai Tietz <ktietz@redhat.com>
+
+ * config/i386/mingw32.h (GOMP_SELF_SPEC): Add -pthread option.
+
+2011-08-16 Richard GUenther <rguenther@suse.de>
+
+ PR tree-optimization/50082
+ * tree-ssa-forwprop.c (combine_cond_expr_cond): Handle overflow
+ warnings here, instead of ...
+ (ssa_forward_propagate_and_combine): ... here.
+ (forward_propagate_into_comparison_1): Adjust.
+ (forward_propagate_into_comparison): Likewise.
+ (forward_propagate_into_gimple_cond): Likewise.
+ (forward_propagate_into_cond): Likewise.
+
+2011-08-16 Andreas Schwab <schwab@redhat.com>
+
+ * ggc.h (ggc_alloc_rtvec_sized): Use ggc_alloc_zone_rtvec_def
+ instead of ggc_alloc_zone_vec_rtvec_def.
+
+2011-08-16 Richard Guenther <rguenther@suse.de>
+
+ * tree.h (ptrofftype_p): New helper function.
+ * tree-cfg.c (verify_expr): Use ptrofftype_p for POINTER_PLUS_EXPR
+ offset verification.
+ (verify_gimple_assign_binary): Likewise.
+ * tree.c (build2_stat): Likewise.
+ * tree-chrec.c (chrec_fold_plus_poly_poly): Likewise.
+ (reset_evolution_in_loop): Likewise.
+ * tree-chrec.h (build_polynomial_chrec): Likewise.
+
+2011-08-16 Liang Wang <lwang1@marvell.com>
+
+ * ggc.h (ggc_alloc_rtvec_sized): Change arguments of
+ ggc_alloc_zone_vec_rtvec_def.
+
+2011-08-16 Richard Guenther <rguenther@suse.de>
+
+ * tree-vrp.c (extract_range_from_multiplicative_op_1): New
+ helper factored out from ...
+ (extract_range_from_binary_expr_1): ... here. Re-structure
+ to not glob handling too different tree codes.
+
+2011-08-15 Richard Henderson <rth@redhat.com>
+
+ PR middle-end/50006
+ * explow.c (allocate_dynamic_stack_space): Move suppress_reg_args_size
+ setting out to include allocate_stack named pattern as well.
+ * builtins.c (expand_builtin_apply): Add ARG_SIZE 0 note.
+ * stmt.c (expand_stack_restore): Likewise.
+
+2011-08-15 Richard Guenther <rguenther@suse.de>
+
+ PR middle-end/50082
+ * fold-const.c (maybe_canonicalize_comparison_1): Properly
+ convert the modified operand to the other operand type.
+ (fold_comparison): Call maybe_canonicalize_comparison_1 with
+ useless conversions stripped from comparison operands.
+
+2011-08-15 Richard Guenther <rguenther@suse.de>
+
+ * tree-vrp.c (value_range_nonnegative_p): Fix anti-range case.
+ (extract_range_from_unary_expr_1): Restructure.
+
+2011-08-15 Richard Guenther <rguenther@suse.de>
+
+ PR tree-optimization/50058
+ * tree-ssa-sccvn.c (vn_reference_lookup_3): Relax aggregate
+ copy matching.
+
+2011-08-15 Ramana Radhakrishnan <ramana.radhakrishnan@linaro.org>
+
+ PR target/50022
+ * config/arm/arm.c (output_move_double): Add 2 parameters
+ to count the number of insns emitted and whether to emit or not.
+ Use the flag to decide when to emit and count number of instructions
+ that will be emitted.
+ Handle case where output_move_double might be called for calculating
+ lengths with an invalid constant.
+ (arm_count_output_move_double_insns): Define.
+ * config/arm/arm-protos.h (arm_count_output_move_double_insns): Declare.
+ (output_move_double): Adjust prototype.
+ * config/arm/vfp.md ("*movdi_vfp"): Adjust call to
+ output_move_double.
+ ("*movdi_vfp_cortexa8"): Likewise and add attribute
+ for ce_count.
+ * config/arm/arm.md ("*arm_movdi"): Adjust call to output_move_double.
+ ("*movdf_soft_insn"): Likewise.
+ * config/arm/cirrus.md ("*cirrus_arm_movdi"): Likewise.
+ ("*cirrus_thumb2_movdi"): Likewise.
+ ("*thumb2_cirrus_movdf_hard_insn"): Likewise.
+ ("*cirrus_movdf_hard_insn"): Likewise.
+ * config/arm/neon.md (*neon_mov<mode> VD): Likewise.
+ * config/arm/iwmmxt.md ("*iwmmxt_arm_movdi"): Likewise.
+ ("mov<mode>_internal VMMX"): Likewise.
+ * config/arm/fpa.md (*movdf_fpa, *thumb2_movdf_fpa): Likewise.
+
+2011-08-14 Uros Bizjak <ubizjak@gmail.com>
+
+ * config/i386/i386.c (ix86_expand_round_sse4): New function.
+ * config/i386/i386-protos.h (ix86_expand_round_sse4): New prototype.
+ * config/i386/i386.md (round<mode>2): Use ix86_expand_round_sse4
+ for TARGET_ROUND.
+
+ (rint<mode>2): Simplify TARGET_ROUND check.
+ (floor<mode>2): Ditto.
+ (ceil<mode>2): Ditto.
+ (btrunc<mode>2): Ditto.
+
+2011-08-14 Anatoly Sokolov <aesok@post.ru>
+
+ * config/mmix/mmix.c (TARGET_PREFERRED_OUTPUT_RELOAD_CLASS): Redefine
+ as mmix_preferred_output_reload_class.
+
+2011-08-14 Georg-Johann Lay <avr@gjlay.de>
- * cp/class.c (update_vtable_entry_for_fn): Set
- LOST_PRIMARY bit properly.
+ * PR target/49903
+ * config/avr/avr.md (UNSPEC_IDENTITY): New c_enum.
+ (branch_unspec): New insn.
+ (branch): Beauty farm.
+ * config/avr/avr.c (compare_condition): Use JUMP_P. Test SET_SRC
+ to be IF_THEN_ELSE.
+ (avr_compare_pattern, avr_reorg_remove_redundant_compare):
+ New static functions.
+ (avr_reorg): Use them. Use next_real_insn instead of NEXT_INSN.
+ Use CONST_INT_P. Beauty.
2011-08-12 Richard Henderson <rth@redhat.com>
@@ -20,98 +215,97 @@
2011-08-12 Diego Novillo <dnovillo@google.com>
* data-streamer.h (streamer_write_zero): Rename from output_zero.
- (streamer_write_uhwi): Rename from lto_output_uleb128.
- (streamer_write_hwi): Rename from output_sleb128.
- (streamer_write_string): Rename from lto_output_string.
- (streamer_string_index): Rename from lto_string_index.
- (streamer_write_string_with_length): Rename from
- lto_output_string_with_length.
- (streamer_write_uhwi_stream): Rename from lto_output_uleb128_stream.
- (streamer_write_hwi_stream): Rename from lto_output_sleb128_stream.
- (streamer_read_string): Rename from lto_input_string.
- (streamer_read_indexed_string): Rename from input_string_internal.
- (streamer_read_uhwi): Rename from lto_input_uleb128.
- (streamer_read_hwi): Rename from lto_input_sleb128.
- (streamer_write_hwi_in_range): Rename from lto_output_int_in_range.
- (streamer_read_hwi_in_range): Rename from lto_input_int_in_range.
- (streamer_write_enum): Rename from lto_output_enum.
- (streamer_read_enum): Rename from lto_input_enum.
- (streamer_write_record_start): Rename from output_record_start.
- (streamer_read_record_start): Rename from input_record_start.
- (streamer_write_bitpack): Rename from lto_output_bitpack.
- (streamer_read_bitpack): Rename from lto_input_bitpack.
- (streamer_write_char_stream): Rename from lto_output_1_stream.
- (streamer_read_uchar): Rename from lto_input_1_unsigned.
- * tree-streamer.h (streamer_cache_d): Rename from lto_streamer_cache_d.
- (streamer_handle_as_builtin_p): Rename from lto_stream_as_builtin_p.
- (streamer_read_string_cst): Rename from input_string_cst.
- (streamer_read_chain): Rename from lto_input_chain.
- (streamer_alloc_tree): Rename from lto_materialize_tree.
- (streamer_read_tree_body): Rename from lto_input_tree_pointers.
- (streamer_get_pickled_tree): Rename from lto_get_pickled_tree.
- (streamer_get_builtin_tree): Rename from lto_get_builtin_tree.
- (streamer_read_integer_cst): Rename from lto_input_integer_cst.
- (streamer_read_tree_bitfields): Rename from tree_read_bitfields.
- (streamer_write_chain): Rename from lto_output_chain.
- (streamer_write_tree_header): Rename from lto_output_tree_header.
- (streamer_pack_tree_bitfields): Rename from pack_value_fields.
- (streamer_write_tree_body): Rename from lto_output_tree_pointers.
- (streamer_write_integer_cst): Rename from lto_output_integer_cst.
- (streamer_write_builtin): Rename from lto_output_builtin_tree.
- (streamer_check_handled_ts_structures): Rename from
- check_handled_ts_structures.
- (streamer_tree_cache_insert): Rename from lto_streamer_cache_insert.
- (streamer_tree_cache_insert_at): Rename from
- lto_streamer_cache_insert_at.
- (streamer_tree_cache_append): Rename from lto_streamer_cache_append.
- (streamer_tree_cache_lookup): Rename from lto_streamer_cache_lookup.
- (streamer_tree_cache_get): Rename from lto_streamer_cache_get.
- (streamer_tree_cache_create): Rename from lto_streamer_cache_create.
- (streamer_tree_cache_delete): Rename from lto_streamer_cache_delete.
- * tree-streamer-out.c (write_string_cst): Rename from output_string_cst.
- (write_identifier): Rename from output_identifier.
- (write_ts_common_tree_pointers): Rename from
- lto_output_ts_common_tree_pointers.
- (write_ts_vector_tree_pointers): Rename from
- lto_output_ts_vector_tree_pointers.
- (write_ts_complex_tree_pointers): Rename from
- lto_output_ts_complex_tree_pointers.
- (write_ts_decl_minimal_tree_pointers): Rename from
- lto_output_ts_decl_minimal_tree_pointers.
- (write_ts_decl_common_tree_pointers): Rename from
- lto_output_ts_decl_common_tree_pointers.
- (write_ts_decl_non_common_tree_pointers): Rename from
- lto_output_ts_decl_non_common_tree_pointers.
- (write_ts_decl_with_vis_tree_pointers): Rename from
- lto_output_ts_decl_with_vis_tree_pointers.
- (write_ts_field_decl_tree_pointers): Rename from
- lto_output_ts_field_decl_tree_pointers.
- (write_ts_function_decl_tree_pointers): Rename from
- lto_output_ts_function_decl_tree_pointers.
- (write_ts_type_common_tree_pointers): Rename from
- lto_output_ts_type_common_tree_pointers.
- (write_ts_type_non_common_tree_pointers): Rename from
- lto_output_ts_type_non_common_tree_pointers.
- (write_ts_list_tree_pointers): Rename from
- lto_output_ts_list_tree_pointers.
- (write_ts_vec_tree_pointers): Rename from
- lto_output_ts_vec_tree_pointers.
- (write_ts_exp_tree_pointers): Rename from
- lto_output_ts_exp_tree_pointers.
- (write_ts_block_tree_pointers): Rename from
- lto_output_ts_block_tree_pointers.
- (write_ts_binfo_tree_pointers): Rename from
- lto_output_ts_binfo_tree_pointers.
- (write_ts_constructor_tree_pointers): Rename from
- lto_output_ts_constructor_tree_pointers.
- (write_ts_target_option): Rename from
- lto_output_ts_target_option.
- (write_ts_translation_unit_decl_tree_pointers): Rename from
- lto_output_ts_translation_unit_decl_tree_pointers.
- * tree-streamer.c (streamer_tree_cache_add_to_node_array):
- Rename from lto_streamer_cache_add_to_node_array.
- (streamer_tree_cache_insert_1): Rename from lto_streamer_cache_insert_1.
- (record_common_node): Rename from lto_record_common_node.
+ (streamer_write_uhwi): Rename from lto_output_uleb128.
+ (streamer_write_hwi): Rename from output_sleb128.
+ (streamer_write_string): Rename from lto_output_string.
+ (streamer_string_index): Rename from lto_string_index.
+ (streamer_write_string_with_length): Rename from
+ lto_output_string_with_length.
+ (streamer_write_uhwi_stream): Rename from lto_output_uleb128_stream.
+ (streamer_write_hwi_stream): Rename from lto_output_sleb128_stream.
+ (streamer_read_string): Rename from lto_input_string.
+ (streamer_read_indexed_string): Rename from input_string_internal.
+ (streamer_read_uhwi): Rename from lto_input_uleb128.
+ (streamer_read_hwi): Rename from lto_input_sleb128.
+ (streamer_write_hwi_in_range): Rename from lto_output_int_in_range.
+ (streamer_read_hwi_in_range): Rename from lto_input_int_in_range.
+ (streamer_write_enum): Rename from lto_output_enum.
+ (streamer_read_enum): Rename from lto_input_enum.
+ (streamer_write_record_start): Rename from output_record_start.
+ (streamer_read_record_start): Rename from input_record_start.
+ (streamer_write_bitpack): Rename from lto_output_bitpack.
+ (streamer_read_bitpack): Rename from lto_input_bitpack.
+ (streamer_write_char_stream): Rename from lto_output_1_stream.
+ (streamer_read_uchar): Rename from lto_input_1_unsigned.
+ * tree-streamer.h (streamer_cache_d): Rename from lto_streamer_cache_d.
+ (streamer_handle_as_builtin_p): Rename from lto_stream_as_builtin_p.
+ (streamer_read_string_cst): Rename from input_string_cst.
+ (streamer_read_chain): Rename from lto_input_chain.
+ (streamer_alloc_tree): Rename from lto_materialize_tree.
+ (streamer_read_tree_body): Rename from lto_input_tree_pointers.
+ (streamer_get_pickled_tree): Rename from lto_get_pickled_tree.
+ (streamer_get_builtin_tree): Rename from lto_get_builtin_tree.
+ (streamer_read_integer_cst): Rename from lto_input_integer_cst.
+ (streamer_read_tree_bitfields): Rename from tree_read_bitfields.
+ (streamer_write_chain): Rename from lto_output_chain.
+ (streamer_write_tree_header): Rename from lto_output_tree_header.
+ (streamer_pack_tree_bitfields): Rename from pack_value_fields.
+ (streamer_write_tree_body): Rename from lto_output_tree_pointers.
+ (streamer_write_integer_cst): Rename from lto_output_integer_cst.
+ (streamer_write_builtin): Rename from lto_output_builtin_tree.
+ (streamer_check_handled_ts_structures): Rename from
+ check_handled_ts_structures.
+ (streamer_tree_cache_insert): Rename from lto_streamer_cache_insert.
+ (streamer_tree_cache_insert_at): Rename from
+ lto_streamer_cache_insert_at.
+ (streamer_tree_cache_append): Rename from lto_streamer_cache_append.
+ (streamer_tree_cache_lookup): Rename from lto_streamer_cache_lookup.
+ (streamer_tree_cache_get): Rename from lto_streamer_cache_get.
+ (streamer_tree_cache_create): Rename from lto_streamer_cache_create.
+ (streamer_tree_cache_delete): Rename from lto_streamer_cache_delete.
+ * tree-streamer-out.c (write_string_cst): Rename from output_string_cst.
+ (write_identifier): Rename from output_identifier.
+ (write_ts_common_tree_pointers): Rename from
+ lto_output_ts_common_tree_pointers.
+ (write_ts_vector_tree_pointers): Rename from
+ lto_output_ts_vector_tree_pointers.
+ (write_ts_complex_tree_pointers): Rename from
+ lto_output_ts_complex_tree_pointers.
+ (write_ts_decl_minimal_tree_pointers): Rename from
+ lto_output_ts_decl_minimal_tree_pointers.
+ (write_ts_decl_common_tree_pointers): Rename from
+ lto_output_ts_decl_common_tree_pointers.
+ (write_ts_decl_non_common_tree_pointers): Rename from
+ lto_output_ts_decl_non_common_tree_pointers.
+ (write_ts_decl_with_vis_tree_pointers): Rename from
+ lto_output_ts_decl_with_vis_tree_pointers.
+ (write_ts_field_decl_tree_pointers): Rename from
+ lto_output_ts_field_decl_tree_pointers.
+ (write_ts_function_decl_tree_pointers): Rename from
+ lto_output_ts_function_decl_tree_pointers.
+ (write_ts_type_common_tree_pointers): Rename from
+ lto_output_ts_type_common_tree_pointers.
+ (write_ts_type_non_common_tree_pointers): Rename from
+ lto_output_ts_type_non_common_tree_pointers.
+ (write_ts_list_tree_pointers): Rename from
+ lto_output_ts_list_tree_pointers.
+ (write_ts_vec_tree_pointers): Rename from
+ lto_output_ts_vec_tree_pointers.
+ (write_ts_exp_tree_pointers): Rename from
+ lto_output_ts_exp_tree_pointers.
+ (write_ts_block_tree_pointers): Rename from
+ lto_output_ts_block_tree_pointers.
+ (write_ts_binfo_tree_pointers): Rename from
+ lto_output_ts_binfo_tree_pointers.
+ (write_ts_constructor_tree_pointers): Rename from
+ lto_output_ts_constructor_tree_pointers.
+ (write_ts_target_option): Rename from lto_output_ts_target_option.
+ (write_ts_translation_unit_decl_tree_pointers): Rename from
+ lto_output_ts_translation_unit_decl_tree_pointers.
+ * tree-streamer.c (streamer_tree_cache_add_to_node_array):
+ Rename from lto_streamer_cache_add_to_node_array.
+ (streamer_tree_cache_insert_1): Rename from lto_streamer_cache_insert_1.
+ (record_common_node): Rename from lto_record_common_node.
* streamer-hooks.h (bitpack_d, lto_streamer_cache_d): Remove forward
declarations.
@@ -126,8 +320,7 @@
2011-08-12 Nick Clifton <nickc@redhat.com>
- * builtins.c (expand_builtin_memcmp): Do not use cmpstrnsi
- pattern.
+ * builtins.c (expand_builtin_memcmp): Do not use cmpstrnsi pattern.
* doc/md.texi (cmpstrn): Note that the comparison stops if both
fetched bytes are zero.
(cmpstr): Likewise.
diff --git a/gcc/DATESTAMP b/gcc/DATESTAMP
index 972110728e8..5bf258bd131 100644
--- a/gcc/DATESTAMP
+++ b/gcc/DATESTAMP
@@ -1 +1 @@
-20110813
+20110817
diff --git a/gcc/builtins.c b/gcc/builtins.c
index e0afc908d5a..1f263073fe5 100644
--- a/gcc/builtins.c
+++ b/gcc/builtins.c
@@ -1680,6 +1680,7 @@ expand_builtin_apply (rtx function, rtx arguments, rtx argsize)
else
#endif
emit_stack_restore (SAVE_BLOCK, old_stack_level);
+ fixup_args_size_notes (call_insn, get_last_insn(), 0);
OK_DEFER_POP;
@@ -3630,7 +3631,8 @@ expand_builtin_bzero (tree exp)
calling bzero instead of memset. */
return expand_builtin_memset_args (dest, integer_zero_node,
- fold_convert_loc (loc, sizetype, size),
+ fold_convert_loc (loc,
+ size_type_node, size),
const0_rtx, VOIDmode, exp);
}
@@ -4224,11 +4226,10 @@ std_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p,
fold_build_pointer_plus_hwi (valist_tmp, boundary - 1));
gimplify_and_add (t, pre_p);
- t = fold_convert (sizetype, valist_tmp);
t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
- fold_convert (TREE_TYPE (valist),
- fold_build2 (BIT_AND_EXPR, sizetype, t,
- size_int (-boundary))));
+ fold_build2 (BIT_AND_EXPR, TREE_TYPE (valist),
+ valist_tmp,
+ build_int_cst (TREE_TYPE (valist), -boundary)));
gimplify_and_add (t, pre_p);
}
else
@@ -7968,7 +7969,7 @@ fold_builtin_bzero (location_t loc, tree dest, tree size, bool ignore)
calling bzero instead of memset. */
return fold_builtin_memset (loc, dest, integer_zero_node,
- fold_convert_loc (loc, sizetype, size),
+ fold_convert_loc (loc, size_type_node, size),
void_type_node, ignore);
}
diff --git a/gcc/c-typeck.c b/gcc/c-typeck.c
index 7577f4f830f..bd932dba54a 100644
--- a/gcc/c-typeck.c
+++ b/gcc/c-typeck.c
@@ -3652,7 +3652,7 @@ build_unary_op (location_t location,
}
inc = c_size_in_bytes (TREE_TYPE (argtype));
- inc = fold_convert_loc (location, sizetype, inc);
+ inc = convert_to_ptrofftype_loc (location, inc);
}
else if (FRACT_MODE_P (TYPE_MODE (argtype)))
{
diff --git a/gcc/cgraphunit.c b/gcc/cgraphunit.c
index 93664f9d8a4..5e368f87208 100644
--- a/gcc/cgraphunit.c
+++ b/gcc/cgraphunit.c
@@ -1478,7 +1478,6 @@ thunk_adjust (gimple_stmt_iterator * bsi,
tree vtabletmp;
tree vtabletmp2;
tree vtabletmp3;
- tree offsettmp;
if (!vtable_entry_type)
{
@@ -1527,15 +1526,10 @@ thunk_adjust (gimple_stmt_iterator * bsi,
mark_symbols_for_renaming (stmt);
find_referenced_vars_in (stmt);
- /* Cast to sizetype. */
- offsettmp = create_tmp_var (sizetype, "offset");
- stmt = gimple_build_assign (offsettmp, fold_convert (sizetype, vtabletmp3));
- gsi_insert_after (bsi, stmt, GSI_NEW_STMT);
- mark_symbols_for_renaming (stmt);
- find_referenced_vars_in (stmt);
-
/* Adjust the `this' pointer. */
- ptr = fold_build_pointer_plus_loc (input_location, ptr, offsettmp);
+ ptr = fold_build_pointer_plus_loc (input_location, ptr, vtabletmp3);
+ ptr = force_gimple_operand_gsi (bsi, ptr, true, NULL_TREE, false,
+ GSI_CONTINUE_LINKING);
}
if (!this_adjusting
@@ -2417,8 +2411,7 @@ cgraph_redirect_edge_call_stmt_to_callee (struct cgraph_edge *e)
gsi = gsi_for_stmt (e->call_stmt);
gsi_computed = true;
gimple_adjust_this_by_delta (&gsi,
- build_int_cst (sizetype,
- e->indirect_info->thunk_delta));
+ size_int (e->indirect_info->thunk_delta));
e->indirect_info->thunk_delta = 0;
}
diff --git a/gcc/config/arm/arm-protos.h b/gcc/config/arm/arm-protos.h
index 2f7c508cfc5..235370483a1 100644
--- a/gcc/config/arm/arm-protos.h
+++ b/gcc/config/arm/arm-protos.h
@@ -131,8 +131,9 @@ extern const char *output_mov_long_double_arm_from_fpa (rtx *);
extern const char *output_mov_long_double_arm_from_arm (rtx *);
extern const char *output_mov_double_fpa_from_arm (rtx *);
extern const char *output_mov_double_arm_from_fpa (rtx *);
-extern const char *output_move_double (rtx *);
+extern const char *output_move_double (rtx *, bool, int *count);
extern const char *output_move_quad (rtx *);
+extern int arm_count_output_move_double_insns (rtx *);
extern const char *output_move_vfp (rtx *operands);
extern const char *output_move_neon (rtx *operands);
extern int arm_attr_length_move_neon (rtx);
diff --git a/gcc/config/arm/arm.c b/gcc/config/arm/arm.c
index afa989e2ba2..fc2fd474b7c 100644
--- a/gcc/config/arm/arm.c
+++ b/gcc/config/arm/arm.c
@@ -13284,11 +13284,24 @@ output_mov_double_arm_from_fpa (rtx *operands)
/* Output a move between double words. It must be REG<-MEM
or MEM<-REG. */
const char *
-output_move_double (rtx *operands)
+output_move_double (rtx *operands, bool emit, int *count)
{
enum rtx_code code0 = GET_CODE (operands[0]);
enum rtx_code code1 = GET_CODE (operands[1]);
rtx otherops[3];
+ if (count)
+ *count = 1;
+
+ /* The only case when this might happen is when
+ you are looking at the length of a DImode instruction
+ that has an invalid constant in it. */
+ if (code0 == REG && code1 != MEM)
+ {
+ gcc_assert (!emit);
+ *count = 2;
+ return "";
+ }
+
if (code0 == REG)
{
@@ -13301,35 +13314,49 @@ output_move_double (rtx *operands)
switch (GET_CODE (XEXP (operands[1], 0)))
{
case REG:
- if (TARGET_LDRD
- && !(fix_cm3_ldrd && reg0 == REGNO(XEXP (operands[1], 0))))
- output_asm_insn ("ldr%(d%)\t%0, [%m1]", operands);
- else
- output_asm_insn ("ldm%(ia%)\t%m1, %M0", operands);
+
+ if (emit)
+ {
+ if (TARGET_LDRD
+ && !(fix_cm3_ldrd && reg0 == REGNO(XEXP (operands[1], 0))))
+ output_asm_insn ("ldr%(d%)\t%0, [%m1]", operands);
+ else
+ output_asm_insn ("ldm%(ia%)\t%m1, %M0", operands);
+ }
break;
case PRE_INC:
gcc_assert (TARGET_LDRD);
- output_asm_insn ("ldr%(d%)\t%0, [%m1, #8]!", operands);
+ if (emit)
+ output_asm_insn ("ldr%(d%)\t%0, [%m1, #8]!", operands);
+
break;
case PRE_DEC:
- if (TARGET_LDRD)
- output_asm_insn ("ldr%(d%)\t%0, [%m1, #-8]!", operands);
- else
- output_asm_insn ("ldm%(db%)\t%m1!, %M0", operands);
+ if (emit)
+ {
+ if (TARGET_LDRD)
+ output_asm_insn ("ldr%(d%)\t%0, [%m1, #-8]!", operands);
+ else
+ output_asm_insn ("ldm%(db%)\t%m1!, %M0", operands);
+ }
break;
case POST_INC:
- if (TARGET_LDRD)
- output_asm_insn ("ldr%(d%)\t%0, [%m1], #8", operands);
- else
- output_asm_insn ("ldm%(ia%)\t%m1!, %M0", operands);
+
+ if (emit)
+ {
+ if (TARGET_LDRD)
+ output_asm_insn ("ldr%(d%)\t%0, [%m1], #8", operands);
+ else
+ output_asm_insn ("ldm%(ia%)\t%m1!, %M0", operands);
+ }
break;
case POST_DEC:
gcc_assert (TARGET_LDRD);
- output_asm_insn ("ldr%(d%)\t%0, [%m1], #-8", operands);
+ if (emit)
+ output_asm_insn ("ldr%(d%)\t%0, [%m1], #-8", operands);
break;
case PRE_MODIFY:
@@ -13347,8 +13374,13 @@ output_move_double (rtx *operands)
if (reg_overlap_mentioned_p (otherops[0], otherops[2]))
{
/* Registers overlap so split out the increment. */
- output_asm_insn ("add%?\t%1, %1, %2", otherops);
- output_asm_insn ("ldr%(d%)\t%0, [%1] @split", otherops);
+ if (emit)
+ {
+ output_asm_insn ("add%?\t%1, %1, %2", otherops);
+ output_asm_insn ("ldr%(d%)\t%0, [%1] @split", otherops);
+ }
+ if (count)
+ *count = 2;
}
else
{
@@ -13359,11 +13391,20 @@ output_move_double (rtx *operands)
|| GET_CODE (otherops[2]) != CONST_INT
|| (INTVAL (otherops[2]) > -256
&& INTVAL (otherops[2]) < 256))
- output_asm_insn ("ldr%(d%)\t%0, [%1, %2]!", otherops);
+ {
+ if (emit)
+ output_asm_insn ("ldr%(d%)\t%0, [%1, %2]!", otherops);
+ }
else
{
- output_asm_insn ("ldr%?\t%0, [%1, %2]!", otherops);
- output_asm_insn ("ldr%?\t%H0, [%1, #4]", otherops);
+ if (emit)
+ {
+ output_asm_insn ("ldr%?\t%0, [%1, %2]!", otherops);
+ output_asm_insn ("ldr%?\t%H0, [%1, #4]", otherops);
+ }
+ if (count)
+ *count = 2;
+
}
}
}
@@ -13376,11 +13417,19 @@ output_move_double (rtx *operands)
|| GET_CODE (otherops[2]) != CONST_INT
|| (INTVAL (otherops[2]) > -256
&& INTVAL (otherops[2]) < 256))
- output_asm_insn ("ldr%(d%)\t%0, [%1], %2", otherops);
+ {
+ if (emit)
+ output_asm_insn ("ldr%(d%)\t%0, [%1], %2", otherops);
+ }
else
{
- output_asm_insn ("ldr%?\t%H0, [%1, #4]", otherops);
- output_asm_insn ("ldr%?\t%0, [%1], %2", otherops);
+ if (emit)
+ {
+ output_asm_insn ("ldr%?\t%H0, [%1, #4]", otherops);
+ output_asm_insn ("ldr%?\t%0, [%1], %2", otherops);
+ }
+ if (count)
+ *count = 2;
}
}
break;
@@ -13393,12 +13442,19 @@ output_move_double (rtx *operands)
/* Use the second register of the pair to avoid problematic
overlap. */
otherops[1] = operands[1];
- output_asm_insn ("adr%?\t%0, %1", otherops);
+ if (emit)
+ output_asm_insn ("adr%?\t%0, %1", otherops);
operands[1] = otherops[0];
- if (TARGET_LDRD)
- output_asm_insn ("ldr%(d%)\t%0, [%1]", operands);
- else
- output_asm_insn ("ldm%(ia%)\t%1, %M0", operands);
+ if (emit)
+ {
+ if (TARGET_LDRD)
+ output_asm_insn ("ldr%(d%)\t%0, [%1]", operands);
+ else
+ output_asm_insn ("ldm%(ia%)\t%1, %M0", operands);
+ }
+
+ if (count)
+ *count = 2;
break;
/* ??? This needs checking for thumb2. */
@@ -13417,17 +13473,20 @@ output_move_double (rtx *operands)
switch ((int) INTVAL (otherops[2]))
{
case -8:
- output_asm_insn ("ldm%(db%)\t%1, %M0", otherops);
+ if (emit)
+ output_asm_insn ("ldm%(db%)\t%1, %M0", otherops);
return "";
case -4:
if (TARGET_THUMB2)
break;
- output_asm_insn ("ldm%(da%)\t%1, %M0", otherops);
+ if (emit)
+ output_asm_insn ("ldm%(da%)\t%1, %M0", otherops);
return "";
case 4:
if (TARGET_THUMB2)
break;
- output_asm_insn ("ldm%(ib%)\t%1, %M0", otherops);
+ if (emit)
+ output_asm_insn ("ldm%(ib%)\t%1, %M0", otherops);
return "";
}
}
@@ -13455,34 +13514,50 @@ output_move_double (rtx *operands)
if (reg_overlap_mentioned_p (operands[0], otherops[2])
|| (fix_cm3_ldrd && reg0 == REGNO (otherops[1])))
{
- output_asm_insn ("add%?\t%0, %1, %2", otherops);
- output_asm_insn ("ldr%(d%)\t%0, [%1]", operands);
+ if (emit)
+ {
+ output_asm_insn ("add%?\t%0, %1, %2", otherops);
+ output_asm_insn ("ldr%(d%)\t%0, [%1]", operands);
+ }
+ if (count)
+ *count = 2;
}
else
{
otherops[0] = operands[0];
- output_asm_insn ("ldr%(d%)\t%0, [%1, %2]", otherops);
+ if (emit)
+ output_asm_insn ("ldr%(d%)\t%0, [%1, %2]", otherops);
}
return "";
}
if (GET_CODE (otherops[2]) == CONST_INT)
+ {
+ if (emit)
+ {
+ if (!(const_ok_for_arm (INTVAL (otherops[2]))))
+ output_asm_insn ("sub%?\t%0, %1, #%n2", otherops);
+ else
+ output_asm_insn ("add%?\t%0, %1, %2", otherops);
+ }
+
+ }
+ else
{
- if (!(const_ok_for_arm (INTVAL (otherops[2]))))
- output_asm_insn ("sub%?\t%0, %1, #%n2", otherops);
- else
+ if (emit)
output_asm_insn ("add%?\t%0, %1, %2", otherops);
}
- else
- output_asm_insn ("add%?\t%0, %1, %2", otherops);
}
else
- output_asm_insn ("sub%?\t%0, %1, %2", otherops);
+ {
+ if (emit)
+ output_asm_insn ("sub%?\t%0, %1, %2", otherops);
+ }
if (TARGET_LDRD)
return "ldr%(d%)\t%0, [%1]";
-
- return "ldm%(ia%)\t%1, %M0";
+
+ return "ldm%(ia%)\t%1, %M0";
}
else
{
@@ -13490,13 +13565,24 @@ output_move_double (rtx *operands)
/* Take care of overlapping base/data reg. */
if (reg_mentioned_p (operands[0], operands[1]))
{
- output_asm_insn ("ldr%?\t%0, %1", otherops);
- output_asm_insn ("ldr%?\t%0, %1", operands);
+ if (emit)
+ {
+ output_asm_insn ("ldr%?\t%0, %1", otherops);
+ output_asm_insn ("ldr%?\t%0, %1", operands);
+ }
+ if (count)
+ *count = 2;
+
}
else
{
- output_asm_insn ("ldr%?\t%0, %1", operands);
- output_asm_insn ("ldr%?\t%0, %1", otherops);
+ if (emit)
+ {
+ output_asm_insn ("ldr%?\t%0, %1", operands);
+ output_asm_insn ("ldr%?\t%0, %1", otherops);
+ }
+ if (count)
+ *count = 2;
}
}
}
@@ -13510,34 +13596,45 @@ output_move_double (rtx *operands)
switch (GET_CODE (XEXP (operands[0], 0)))
{
case REG:
- if (TARGET_LDRD)
- output_asm_insn ("str%(d%)\t%1, [%m0]", operands);
- else
- output_asm_insn ("stm%(ia%)\t%m0, %M1", operands);
+ if (emit)
+ {
+ if (TARGET_LDRD)
+ output_asm_insn ("str%(d%)\t%1, [%m0]", operands);
+ else
+ output_asm_insn ("stm%(ia%)\t%m0, %M1", operands);
+ }
break;
case PRE_INC:
gcc_assert (TARGET_LDRD);
- output_asm_insn ("str%(d%)\t%1, [%m0, #8]!", operands);
+ if (emit)
+ output_asm_insn ("str%(d%)\t%1, [%m0, #8]!", operands);
break;
case PRE_DEC:
- if (TARGET_LDRD)
- output_asm_insn ("str%(d%)\t%1, [%m0, #-8]!", operands);
- else
- output_asm_insn ("stm%(db%)\t%m0!, %M1", operands);
+ if (emit)
+ {
+ if (TARGET_LDRD)
+ output_asm_insn ("str%(d%)\t%1, [%m0, #-8]!", operands);
+ else
+ output_asm_insn ("stm%(db%)\t%m0!, %M1", operands);
+ }
break;
case POST_INC:
- if (TARGET_LDRD)
- output_asm_insn ("str%(d%)\t%1, [%m0], #8", operands);
- else
- output_asm_insn ("stm%(ia%)\t%m0!, %M1", operands);
+ if (emit)
+ {
+ if (TARGET_LDRD)
+ output_asm_insn ("str%(d%)\t%1, [%m0], #8", operands);
+ else
+ output_asm_insn ("stm%(ia%)\t%m0!, %M1", operands);
+ }
break;
case POST_DEC:
gcc_assert (TARGET_LDRD);
- output_asm_insn ("str%(d%)\t%1, [%m0], #-8", operands);
+ if (emit)
+ output_asm_insn ("str%(d%)\t%1, [%m0], #-8", operands);
break;
case PRE_MODIFY:
@@ -13555,19 +13652,35 @@ output_move_double (rtx *operands)
{
if (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY)
{
- output_asm_insn ("str%?\t%0, [%1, %2]!", otherops);
- output_asm_insn ("str%?\t%H0, [%1, #4]", otherops);
+ if (emit)
+ {
+ output_asm_insn ("str%?\t%0, [%1, %2]!", otherops);
+ output_asm_insn ("str%?\t%H0, [%1, #4]", otherops);
+ }
+ if (count)
+ *count = 2;
}
else
{
- output_asm_insn ("str%?\t%H0, [%1, #4]", otherops);
- output_asm_insn ("str%?\t%0, [%1], %2", otherops);
+ if (emit)
+ {
+ output_asm_insn ("str%?\t%H0, [%1, #4]", otherops);
+ output_asm_insn ("str%?\t%0, [%1], %2", otherops);
+ }
+ if (count)
+ *count = 2;
}
}
else if (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY)
- output_asm_insn ("str%(d%)\t%0, [%1, %2]!", otherops);
+ {
+ if (emit)
+ output_asm_insn ("str%(d%)\t%0, [%1, %2]!", otherops);
+ }
else
- output_asm_insn ("str%(d%)\t%0, [%1], %2", otherops);
+ {
+ if (emit)
+ output_asm_insn ("str%(d%)\t%0, [%1], %2", otherops);
+ }
break;
case PLUS:
@@ -13577,19 +13690,22 @@ output_move_double (rtx *operands)
switch ((int) INTVAL (XEXP (XEXP (operands[0], 0), 1)))
{
case -8:
- output_asm_insn ("stm%(db%)\t%m0, %M1", operands);
+ if (emit)
+ output_asm_insn ("stm%(db%)\t%m0, %M1", operands);
return "";
case -4:
if (TARGET_THUMB2)
break;
- output_asm_insn ("stm%(da%)\t%m0, %M1", operands);
+ if (emit)
+ output_asm_insn ("stm%(da%)\t%m0, %M1", operands);
return "";
case 4:
if (TARGET_THUMB2)
break;
- output_asm_insn ("stm%(ib%)\t%m0, %M1", operands);
+ if (emit)
+ output_asm_insn ("stm%(ib%)\t%m0, %M1", operands);
return "";
}
}
@@ -13602,7 +13718,8 @@ output_move_double (rtx *operands)
{
otherops[0] = operands[1];
otherops[1] = XEXP (XEXP (operands[0], 0), 0);
- output_asm_insn ("str%(d%)\t%0, [%1, %2]", otherops);
+ if (emit)
+ output_asm_insn ("str%(d%)\t%0, [%1, %2]", otherops);
return "";
}
/* Fall through */
@@ -13610,8 +13727,14 @@ output_move_double (rtx *operands)
default:
otherops[0] = adjust_address (operands[0], SImode, 4);
otherops[1] = operands[1];
- output_asm_insn ("str%?\t%1, %0", operands);
- output_asm_insn ("str%?\t%H1, %0", otherops);
+ if (emit)
+ {
+ output_asm_insn ("str%?\t%1, %0", operands);
+ output_asm_insn ("str%?\t%H1, %0", otherops);
+ }
+ if (count)
+ *count = 2;
+
}
}
@@ -24205,4 +24328,13 @@ arm_attr_length_push_multi(rtx parallel_op, rtx first_op)
return 4;
}
+/* Compute the number of instructions emitted by output_move_double. */
+int
+arm_count_output_move_double_insns (rtx *operands)
+{
+ int count;
+ output_move_double (operands, false, &count);
+ return count;
+}
+
#include "gt-arm.h"
diff --git a/gcc/config/arm/arm.md b/gcc/config/arm/arm.md
index 3d4dcfa42a5..4cbd5e563ad 100644
--- a/gcc/config/arm/arm.md
+++ b/gcc/config/arm/arm.md
@@ -4981,7 +4981,7 @@
case 2:
return \"#\";
default:
- return output_move_double (operands);
+ return output_move_double (operands, true, NULL);
}
"
[(set_attr "length" "8,12,16,8,8")
@@ -6341,7 +6341,7 @@
case 2:
return \"#\";
default:
- return output_move_double (operands);
+ return output_move_double (operands, true, NULL);
}
"
[(set_attr "length" "8,12,16,8,8")
diff --git a/gcc/config/arm/cirrus.md b/gcc/config/arm/cirrus.md
index f08da0bdca4..bfd2bb85263 100644
--- a/gcc/config/arm/cirrus.md
+++ b/gcc/config/arm/cirrus.md
@@ -1,4 +1,4 @@
-;; Cirrus EP9312 "Maverick" ARM floating point co-processor description.
+;; CIRRUS EP9312 "Maverick" ARM floating point co-processor description.
;; Copyright (C) 2003, 2004, 2005, 2007 Free Software Foundation, Inc.
;; Contributed by Red Hat.
;; Written by Aldy Hernandez (aldyh@redhat.com)
@@ -379,7 +379,7 @@
return \"#\";
case 1:
case 2:
- return output_move_double (operands);
+ return output_move_double (operands, true, NULL);
case 3: return \"cfmv64lr%?\\t%V0, %Q1\;cfmv64hr%?\\t%V0, %R1\";
case 4: return \"cfmvr64l%?\\t%Q0, %V1\;cfmvr64h%?\\t%R0, %V1\";
@@ -439,7 +439,7 @@
case 0: return \"ldm%?ia\\t%m1, %M0\\t%@ double\";
case 1: return \"stm%?ia\\t%m0, %M1\\t%@ double\";
case 2: return \"#\";
- case 3: case 4: return output_move_double (operands);
+ case 3: case 4: return output_move_double (operands, true, NULL);
case 5: return \"cfcpyd%?\\t%V0, %V1\";
case 6: return \"cfldrd%?\\t%V0, %1\";
case 7: return \"cfmvdlr\\t%V0, %Q1\;cfmvdhr%?\\t%V0, %R1\";
@@ -466,7 +466,7 @@
case 0:
case 1:
case 2:
- return (output_move_double (operands));
+ return (output_move_double (operands, true, NULL));
case 3: return \"cfmv64lr%?\\t%V0, %Q1\;cfmv64hr%?\\t%V0, %R1\";
case 4: return \"cfmvr64l%?\\t%Q0, %V1\;cfmvr64h%?\\t%R0, %V1\";
@@ -522,7 +522,7 @@
{
case 0: return \"ldm%?ia\\t%m1, %M0\\t%@ double\";
case 1: return \"stm%?ia\\t%m0, %M1\\t%@ double\";
- case 2: case 3: case 4: return output_move_double (operands);
+ case 2: case 3: case 4: return output_move_double (operands, true, NULL);
case 5: return \"cfcpyd%?\\t%V0, %V1\";
case 6: return \"cfldrd%?\\t%V0, %1\";
case 7: return \"cfmvdlr\\t%V0, %Q1\;cfmvdhr%?\\t%V0, %R1\";
diff --git a/gcc/config/arm/fpa.md b/gcc/config/arm/fpa.md
index 6e6dd8d43c1..f2113a98458 100644
--- a/gcc/config/arm/fpa.md
+++ b/gcc/config/arm/fpa.md
@@ -567,7 +567,7 @@
case 0: return \"ldm%(ia%)\\t%m1, %M0\\t%@ double\";
case 1: return \"stm%(ia%)\\t%m0, %M1\\t%@ double\";
case 2: return \"#\";
- case 3: case 4: return output_move_double (operands);
+ case 3: case 4: return output_move_double (operands, true, NULL);
case 5: return \"mvf%?d\\t%0, %1\";
case 6: return \"mnf%?d\\t%0, #%N1\";
case 7: return \"ldf%?d\\t%0, %1\";
@@ -657,7 +657,7 @@
default:
case 0: return \"ldm%(ia%)\\t%m1, %M0\\t%@ double\";
case 1: return \"stm%(ia%)\\t%m0, %M1\\t%@ double\";
- case 2: case 3: case 4: return output_move_double (operands);
+ case 2: case 3: case 4: return output_move_double (operands, true, NULL);
case 5: return \"mvf%?d\\t%0, %1\";
case 6: return \"mnf%?d\\t%0, #%N1\";
case 7: return \"ldf%?d\\t%0, %1\";
diff --git a/gcc/config/arm/iwmmxt.md b/gcc/config/arm/iwmmxt.md
index 7f13ae49b9b..bc0b80defc9 100644
--- a/gcc/config/arm/iwmmxt.md
+++ b/gcc/config/arm/iwmmxt.md
@@ -76,7 +76,7 @@
switch (which_alternative)
{
default:
- return output_move_double (operands);
+ return output_move_double (operands, true, NULL);
case 0:
return \"#\";
case 3:
@@ -173,7 +173,7 @@
case 3: return \"tmrrc%?\\t%Q0, %R0, %1\";
case 4: return \"tmcrr%?\\t%0, %Q1, %R1\";
case 5: return \"#\";
- default: return output_move_double (operands);
+ default: return output_move_double (operands, true, NULL);
}"
[(set_attr "predicable" "yes")
(set_attr "length" "4, 4, 4,4,4,8, 8,8")
diff --git a/gcc/config/arm/neon.md b/gcc/config/arm/neon.md
index 1f9ea676dec..24dd9419bec 100644
--- a/gcc/config/arm/neon.md
+++ b/gcc/config/arm/neon.md
@@ -190,7 +190,7 @@
case 2: gcc_unreachable ();
case 4: return "vmov\t%Q0, %R0, %P1 @ <mode>";
case 5: return "vmov\t%P0, %Q1, %R1 @ <mode>";
- default: return output_move_double (operands);
+ default: return output_move_double (operands, true, NULL);
}
}
[(set_attr "neon_type" "neon_int_1,*,neon_vmov,*,neon_mrrc,neon_mcr_2_mcrr,*,*,*")
diff --git a/gcc/config/arm/vfp.md b/gcc/config/arm/vfp.md
index 3311ae09e92..991b5174cf8 100644
--- a/gcc/config/arm/vfp.md
+++ b/gcc/config/arm/vfp.md
@@ -150,7 +150,7 @@
case 4:
case 5:
case 6:
- return output_move_double (operands);
+ return output_move_double (operands, true, NULL);
case 7:
return \"fmdrr%?\\t%P0, %Q1, %R1\\t%@ int\";
case 8:
@@ -199,7 +199,7 @@
case 4:
case 5:
case 6:
- return output_move_double (operands);
+ return output_move_double (operands, true, NULL);
case 7:
return \"fmdrr%?\\t%P0, %Q1, %R1\\t%@ int\";
case 8:
@@ -213,10 +213,19 @@
}
"
[(set_attr "type" "*,*,*,*,load2,load2,store2,r_2_f,f_2_r,ffarithd,f_loadd,f_stored")
- (set_attr "length" "4,8,12,16,8,8,8,4,4,4,4,4")
+ (set (attr "length") (cond [(eq_attr "alternative" "1") (const_int 8)
+ (eq_attr "alternative" "2") (const_int 12)
+ (eq_attr "alternative" "3") (const_int 16)
+ (eq_attr "alternative" "4,5,6")
+ (symbol_ref
+ "arm_count_output_move_double_insns (operands) \
+ * 4")]
+ (const_int 4)))
(set_attr "predicable" "yes")
(set_attr "pool_range" "*,*,*,*,1020,4096,*,*,*,*,1020,*")
(set_attr "neg_pool_range" "*,*,*,*,1008,0,*,*,*,*,1008,*")
+ (set (attr "ce_count")
+ (symbol_ref "get_attr_length (insn) / 4"))
(set_attr "arch" "t2,any,any,any,a,t2,any,any,any,any,any,any")]
)
@@ -427,7 +436,7 @@
case 3: case 4:
return output_move_vfp (operands);
case 5: case 6:
- return output_move_double (operands);
+ return output_move_double (operands, true, NULL);
case 7:
if (TARGET_VFP_SINGLE)
return \"fcpys%?\\t%0, %1\;fcpys%?\\t%p0, %p1\";
@@ -473,7 +482,7 @@
case 3: case 4:
return output_move_vfp (operands);
case 5: case 6: case 8:
- return output_move_double (operands);
+ return output_move_double (operands, true, NULL);
case 7:
if (TARGET_VFP_SINGLE)
return \"fcpys%?\\t%0, %1\;fcpys%?\\t%p0, %p1\";
diff --git a/gcc/config/avr/avr.c b/gcc/config/avr/avr.c
index 76542a87dc0..6bb236cb584 100644
--- a/gcc/config/avr/avr.c
+++ b/gcc/config/avr/avr.c
@@ -2947,15 +2947,17 @@ static RTX_CODE
compare_condition (rtx insn)
{
rtx next = next_real_insn (insn);
- RTX_CODE cond = UNKNOWN;
- if (next && GET_CODE (next) == JUMP_INSN)
+
+ if (next && JUMP_P (next))
{
rtx pat = PATTERN (next);
rtx src = SET_SRC (pat);
- rtx t = XEXP (src, 0);
- cond = GET_CODE (t);
+
+ if (IF_THEN_ELSE == GET_CODE (src))
+ return GET_CODE (XEXP (src, 0));
}
- return cond;
+
+ return UNKNOWN;
}
/* Returns nonzero if INSN is a tst insn that only tests the sign. */
@@ -6046,82 +6048,265 @@ avr_normalize_condition (RTX_CODE condition)
}
}
-/* This function optimizes conditional jumps. */
+/* Helper function for `avr_reorg'. */
+
+static rtx
+avr_compare_pattern (rtx insn)
+{
+ rtx pattern = single_set (insn);
+
+ if (pattern
+ && NONJUMP_INSN_P (insn)
+ && SET_DEST (pattern) == cc0_rtx
+ && GET_CODE (SET_SRC (pattern)) == COMPARE)
+ {
+ return pattern;
+ }
+
+ return NULL_RTX;
+}
+
+/* Helper function for `avr_reorg'. */
+
+/* Expansion of switch/case decision trees leads to code like
+
+ cc0 = compare (Reg, Num)
+ if (cc0 == 0)
+ goto L1
+
+ cc0 = compare (Reg, Num)
+ if (cc0 > 0)
+ goto L2
+
+ The second comparison is superfluous and can be deleted.
+ The second jump condition can be transformed from a
+ "difficult" one to a "simple" one because "cc0 > 0" and
+ "cc0 >= 0" will have the same effect here.
+
+ This function relies on the way switch/case is being expaned
+ as binary decision tree. For example code see PR 49903.
+
+ Return TRUE if optimization performed.
+ Return FALSE if nothing changed.
+
+ INSN1 is a comparison, i.e. avr_compare_pattern != 0.
+
+ We don't want to do this in text peephole because it is
+ tedious to work out jump offsets there and the second comparison
+ might have been transormed by `avr_reorg'.
+
+ RTL peephole won't do because peephole2 does not scan across
+ basic blocks. */
+
+static bool
+avr_reorg_remove_redundant_compare (rtx insn1)
+{
+ rtx comp1, ifelse1, xcond1, branch1;
+ rtx comp2, ifelse2, xcond2, branch2, insn2;
+ enum rtx_code code;
+ rtx jump, target, cond;
+
+ /* Look out for: compare1 - branch1 - compare2 - branch2 */
+
+ branch1 = next_nonnote_nondebug_insn (insn1);
+ if (!branch1 || !JUMP_P (branch1))
+ return false;
+
+ insn2 = next_nonnote_nondebug_insn (branch1);
+ if (!insn2 || !avr_compare_pattern (insn2))
+ return false;
+
+ branch2 = next_nonnote_nondebug_insn (insn2);
+ if (!branch2 || !JUMP_P (branch2))
+ return false;
+
+ comp1 = avr_compare_pattern (insn1);
+ comp2 = avr_compare_pattern (insn2);
+ xcond1 = single_set (branch1);
+ xcond2 = single_set (branch2);
+
+ if (!comp1 || !comp2
+ || !rtx_equal_p (comp1, comp2)
+ || !xcond1 || SET_DEST (xcond1) != pc_rtx
+ || !xcond2 || SET_DEST (xcond2) != pc_rtx
+ || IF_THEN_ELSE != GET_CODE (SET_SRC (xcond1))
+ || IF_THEN_ELSE != GET_CODE (SET_SRC (xcond2)))
+ {
+ return false;
+ }
+
+ comp1 = SET_SRC (comp1);
+ ifelse1 = SET_SRC (xcond1);
+ ifelse2 = SET_SRC (xcond2);
+
+ /* comp<n> is COMPARE now and ifelse<n> is IF_THEN_ELSE. */
+
+ if (EQ != GET_CODE (XEXP (ifelse1, 0))
+ || !REG_P (XEXP (comp1, 0))
+ || !CONST_INT_P (XEXP (comp1, 1))
+ || XEXP (ifelse1, 2) != pc_rtx
+ || XEXP (ifelse2, 2) != pc_rtx
+ || LABEL_REF != GET_CODE (XEXP (ifelse1, 1))
+ || LABEL_REF != GET_CODE (XEXP (ifelse2, 1))
+ || !COMPARISON_P (XEXP (ifelse2, 0))
+ || cc0_rtx != XEXP (XEXP (ifelse1, 0), 0)
+ || cc0_rtx != XEXP (XEXP (ifelse2, 0), 0)
+ || const0_rtx != XEXP (XEXP (ifelse1, 0), 1)
+ || const0_rtx != XEXP (XEXP (ifelse2, 0), 1))
+ {
+ return false;
+ }
+
+ /* We filtered the insn sequence to look like
+
+ (set (cc0)
+ (compare (reg:M N)
+ (const_int VAL)))
+ (set (pc)
+ (if_then_else (eq (cc0)
+ (const_int 0))
+ (label_ref L1)
+ (pc)))
+
+ (set (cc0)
+ (compare (reg:M N)
+ (const_int VAL)))
+ (set (pc)
+ (if_then_else (CODE (cc0)
+ (const_int 0))
+ (label_ref L2)
+ (pc)))
+ */
+
+ code = GET_CODE (XEXP (ifelse2, 0));
+
+ /* Map GT/GTU to GE/GEU which is easier for AVR.
+ The first two instructions compare/branch on EQ
+ so we may replace the difficult
+
+ if (x == VAL) goto L1;
+ if (x > VAL) goto L2;
+
+ with easy
+
+ if (x == VAL) goto L1;
+ if (x >= VAL) goto L2;
+
+ Similarly, replace LE/LEU by LT/LTU. */
+
+ switch (code)
+ {
+ case EQ:
+ case LT: case LTU:
+ case GE: case GEU:
+ break;
+
+ case LE: case LEU:
+ case GT: case GTU:
+ code = avr_normalize_condition (code);
+ break;
+
+ default:
+ return false;
+ }
+
+ /* Wrap the branches into UNSPECs so they won't be changed or
+ optimized in the remainder. */
+
+ target = XEXP (XEXP (ifelse1, 1), 0);
+ cond = XEXP (ifelse1, 0);
+ jump = emit_jump_insn_after (gen_branch_unspec (target, cond), insn1);
+
+ JUMP_LABEL (jump) = JUMP_LABEL (branch1);
+
+ target = XEXP (XEXP (ifelse2, 1), 0);
+ cond = gen_rtx_fmt_ee (code, VOIDmode, cc0_rtx, const0_rtx);
+ jump = emit_jump_insn_after (gen_branch_unspec (target, cond), insn2);
+
+ JUMP_LABEL (jump) = JUMP_LABEL (branch2);
+
+ /* The comparisons in insn1 and insn2 are exactly the same;
+ insn2 is superfluous so delete it. */
+
+ delete_insn (insn2);
+ delete_insn (branch1);
+ delete_insn (branch2);
+
+ return true;
+}
+
+
+/* Implement `TARGET_MACHINE_DEPENDENT_REORG'. */
+/* Optimize conditional jumps. */
static void
avr_reorg (void)
{
- rtx insn, pattern;
+ rtx insn = get_insns();
- for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
+ for (insn = next_real_insn (insn); insn; insn = next_real_insn (insn))
{
- if (! (GET_CODE (insn) == INSN
- || GET_CODE (insn) == CALL_INSN
- || GET_CODE (insn) == JUMP_INSN)
- || !single_set (insn))
- continue;
+ rtx pattern = avr_compare_pattern (insn);
+
+ if (!pattern)
+ continue;
- pattern = PATTERN (insn);
+ if (optimize
+ && avr_reorg_remove_redundant_compare (insn))
+ {
+ continue;
+ }
- if (GET_CODE (pattern) == PARALLEL)
- pattern = XVECEXP (pattern, 0, 0);
- if (GET_CODE (pattern) == SET
- && SET_DEST (pattern) == cc0_rtx
- && compare_diff_p (insn))
+ if (compare_diff_p (insn))
{
- if (GET_CODE (SET_SRC (pattern)) == COMPARE)
- {
- /* Now we work under compare insn. */
-
- pattern = SET_SRC (pattern);
- if (true_regnum (XEXP (pattern,0)) >= 0
- && true_regnum (XEXP (pattern,1)) >= 0 )
- {
- rtx x = XEXP (pattern,0);
- rtx next = next_real_insn (insn);
- rtx pat = PATTERN (next);
- rtx src = SET_SRC (pat);
- rtx t = XEXP (src,0);
- PUT_CODE (t, swap_condition (GET_CODE (t)));
- XEXP (pattern,0) = XEXP (pattern,1);
- XEXP (pattern,1) = x;
- INSN_CODE (next) = -1;
- }
- else if (true_regnum (XEXP (pattern, 0)) >= 0
- && XEXP (pattern, 1) == const0_rtx)
- {
- /* This is a tst insn, we can reverse it. */
- rtx next = next_real_insn (insn);
- rtx pat = PATTERN (next);
- rtx src = SET_SRC (pat);
- rtx t = XEXP (src,0);
+ /* Now we work under compare insn with difficult branch. */
+
+ rtx next = next_real_insn (insn);
+ rtx pat = PATTERN (next);
+
+ pattern = SET_SRC (pattern);
+
+ if (true_regnum (XEXP (pattern, 0)) >= 0
+ && true_regnum (XEXP (pattern, 1)) >= 0)
+ {
+ rtx x = XEXP (pattern, 0);
+ rtx src = SET_SRC (pat);
+ rtx t = XEXP (src,0);
+ PUT_CODE (t, swap_condition (GET_CODE (t)));
+ XEXP (pattern, 0) = XEXP (pattern, 1);
+ XEXP (pattern, 1) = x;
+ INSN_CODE (next) = -1;
+ }
+ else if (true_regnum (XEXP (pattern, 0)) >= 0
+ && XEXP (pattern, 1) == const0_rtx)
+ {
+ /* This is a tst insn, we can reverse it. */
+ rtx src = SET_SRC (pat);
+ rtx t = XEXP (src,0);
- PUT_CODE (t, swap_condition (GET_CODE (t)));
- XEXP (pattern, 1) = XEXP (pattern, 0);
- XEXP (pattern, 0) = const0_rtx;
- INSN_CODE (next) = -1;
- INSN_CODE (insn) = -1;
- }
- else if (true_regnum (XEXP (pattern,0)) >= 0
- && GET_CODE (XEXP (pattern,1)) == CONST_INT)
- {
- rtx x = XEXP (pattern,1);
- rtx next = next_real_insn (insn);
- rtx pat = PATTERN (next);
- rtx src = SET_SRC (pat);
- rtx t = XEXP (src,0);
- enum machine_mode mode = GET_MODE (XEXP (pattern, 0));
-
- if (avr_simplify_comparison_p (mode, GET_CODE (t), x))
- {
- XEXP (pattern, 1) = gen_int_mode (INTVAL (x) + 1, mode);
- PUT_CODE (t, avr_normalize_condition (GET_CODE (t)));
- INSN_CODE (next) = -1;
- INSN_CODE (insn) = -1;
- }
- }
- }
- }
+ PUT_CODE (t, swap_condition (GET_CODE (t)));
+ XEXP (pattern, 1) = XEXP (pattern, 0);
+ XEXP (pattern, 0) = const0_rtx;
+ INSN_CODE (next) = -1;
+ INSN_CODE (insn) = -1;
+ }
+ else if (true_regnum (XEXP (pattern, 0)) >= 0
+ && CONST_INT_P (XEXP (pattern, 1)))
+ {
+ rtx x = XEXP (pattern, 1);
+ rtx src = SET_SRC (pat);
+ rtx t = XEXP (src,0);
+ enum machine_mode mode = GET_MODE (XEXP (pattern, 0));
+
+ if (avr_simplify_comparison_p (mode, GET_CODE (t), x))
+ {
+ XEXP (pattern, 1) = gen_int_mode (INTVAL (x) + 1, mode);
+ PUT_CODE (t, avr_normalize_condition (GET_CODE (t)));
+ INSN_CODE (next) = -1;
+ INSN_CODE (insn) = -1;
+ }
+ }
+ }
}
}
diff --git a/gcc/config/avr/avr.md b/gcc/config/avr/avr.md
index 356b5095438..ad0febc333e 100644
--- a/gcc/config/avr/avr.md
+++ b/gcc/config/avr/avr.md
@@ -56,6 +56,7 @@
UNSPEC_FMULS
UNSPEC_FMULSU
UNSPEC_COPYSIGN
+ UNSPEC_IDENTITY
])
(define_c_enum "unspecv"
@@ -3339,16 +3340,36 @@
(define_insn "branch"
[(set (pc)
(if_then_else (match_operator 1 "simple_comparison_operator"
- [(cc0)
- (const_int 0)])
+ [(cc0)
+ (const_int 0)])
(label_ref (match_operand 0 "" ""))
(pc)))]
""
- "*
- return ret_cond_branch (operands[1], avr_jump_mode (operands[0],insn), 0);"
+ {
+ return ret_cond_branch (operands[1], avr_jump_mode (operands[0], insn), 0);
+ }
[(set_attr "type" "branch")
(set_attr "cc" "clobber")])
+
+;; Same as above but wrap SET_SRC so that this branch won't be transformed
+;; or optimized in the remainder.
+
+(define_insn "branch_unspec"
+ [(set (pc)
+ (unspec [(if_then_else (match_operator 1 "simple_comparison_operator"
+ [(cc0)
+ (const_int 0)])
+ (label_ref (match_operand 0 "" ""))
+ (pc))
+ ] UNSPEC_IDENTITY))]
+ ""
+ {
+ return ret_cond_branch (operands[1], avr_jump_mode (operands[0], insn), 0);
+ }
+ [(set_attr "type" "branch")
+ (set_attr "cc" "none")])
+
;; ****************************************************************
;; AVR does not have following conditional jumps: LE,LEU,GT,GTU.
;; Convert them all to proper jumps.
diff --git a/gcc/config/i386/i386-protos.h b/gcc/config/i386/i386-protos.h
index c3eb150fbb9..7deeae732ae 100644
--- a/gcc/config/i386/i386-protos.h
+++ b/gcc/config/i386/i386-protos.h
@@ -174,6 +174,7 @@ extern void ix86_expand_lfloorceil (rtx, rtx, bool);
extern void ix86_expand_rint (rtx, rtx);
extern void ix86_expand_floorceil (rtx, rtx, bool);
extern void ix86_expand_floorceildf_32 (rtx, rtx, bool);
+extern void ix86_expand_round_sse4 (rtx, rtx);
extern void ix86_expand_round (rtx, rtx);
extern void ix86_expand_rounddf_32 (rtx, rtx);
extern void ix86_expand_trunc (rtx, rtx);
diff --git a/gcc/config/i386/i386.c b/gcc/config/i386/i386.c
index fedb2cacc71..fe6ccbeb59c 100644
--- a/gcc/config/i386/i386.c
+++ b/gcc/config/i386/i386.c
@@ -32676,6 +32676,52 @@ ix86_expand_round (rtx operand0, rtx operand1)
emit_move_insn (operand0, res);
}
+
+/* Expand SSE sequence for computing round
+ from OP1 storing into OP0 using sse4 round insn. */
+void
+ix86_expand_round_sse4 (rtx op0, rtx op1)
+{
+ enum machine_mode mode = GET_MODE (op0);
+ rtx e1, e2, e3, res, half, mask;
+ const struct real_format *fmt;
+ REAL_VALUE_TYPE pred_half, half_minus_pred_half;
+ rtx (*gen_round) (rtx, rtx, rtx);
+
+ switch (mode)
+ {
+ case SFmode:
+ gen_round = gen_sse4_1_roundsf2;
+ break;
+ case DFmode:
+ gen_round = gen_sse4_1_rounddf2;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+
+ /* e1 = fabs(op1) */
+ e1 = ix86_expand_sse_fabs (op1, &mask);
+
+ /* load nextafter (0.5, 0.0) */
+ fmt = REAL_MODE_FORMAT (mode);
+ real_2expN (&half_minus_pred_half, -(fmt->p) - 1, mode);
+ REAL_ARITHMETIC (pred_half, MINUS_EXPR, dconsthalf, half_minus_pred_half);
+
+ /* e2 = e1 + 0.5 */
+ half = force_reg (mode, const_double_from_real_value (pred_half, mode));
+ e2 = expand_simple_binop (mode, PLUS, e1, half, NULL_RTX, 0, OPTAB_DIRECT);
+
+ /* e3 = trunc(e2) */
+ e3 = gen_reg_rtx (mode);
+ emit_insn (gen_round (e3, e2, GEN_INT (ROUND_TRUNC)));
+
+ /* res = copysign (e3, op1) */
+ res = gen_reg_rtx (mode);
+ ix86_sse_copysign_to_positive (res, e3, op1, mask);
+
+ emit_move_insn (op0, res);
+}
/* Table of valid machine attributes. */
diff --git a/gcc/config/i386/i386.md b/gcc/config/i386/i386.md
index e61b0f4a03e..e7ae3970511 100644
--- a/gcc/config/i386/i386.md
+++ b/gcc/config/i386/i386.md
@@ -14394,11 +14394,11 @@
if (SSE_FLOAT_MODE_P (<MODE>mode) && TARGET_SSE_MATH
&& !flag_trapping_math)
{
- if (!TARGET_ROUND && optimize_insn_for_size_p ())
- FAIL;
if (TARGET_ROUND)
emit_insn (gen_sse4_1_round<mode>2
(operands[0], operands[1], GEN_INT (ROUND_MXCSR)));
+ else if (optimize_insn_for_size_p ())
+ FAIL;
else
ix86_expand_rint (operand0, operand1);
}
@@ -14431,7 +14431,12 @@
if (SSE_FLOAT_MODE_P (<MODE>mode) && TARGET_SSE_MATH
&& !flag_trapping_math && !flag_rounding_math)
{
- if (TARGET_64BIT || (<MODE>mode != DFmode))
+ if (TARGET_ROUND)
+ {
+ operands[1] = force_reg (<MODE>mode, operands[1]);
+ ix86_expand_round_sse4 (operands[0], operands[1]);
+ }
+ else if (TARGET_64BIT || (<MODE>mode != DFmode))
ix86_expand_round (operands[0], operands[1]);
else
ix86_expand_rounddf_32 (operands[0], operands[1]);
@@ -14663,14 +14668,13 @@
&& !flag_trapping_math)"
{
if (SSE_FLOAT_MODE_P (<MODE>mode) && TARGET_SSE_MATH
- && !flag_trapping_math
- && (TARGET_ROUND || optimize_insn_for_speed_p ()))
+ && !flag_trapping_math)
{
- if (!TARGET_ROUND && optimize_insn_for_size_p ())
- FAIL;
if (TARGET_ROUND)
emit_insn (gen_sse4_1_round<mode>2
(operands[0], operands[1], GEN_INT (ROUND_FLOOR)));
+ else if (optimize_insn_for_size_p ())
+ FAIL;
else if (TARGET_64BIT || (<MODE>mode != DFmode))
ix86_expand_floorceil (operand0, operand1, true);
else
@@ -14922,8 +14926,7 @@
&& !flag_trapping_math)"
{
if (SSE_FLOAT_MODE_P (<MODE>mode) && TARGET_SSE_MATH
- && !flag_trapping_math
- && (TARGET_ROUND || optimize_insn_for_speed_p ()))
+ && !flag_trapping_math)
{
if (TARGET_ROUND)
emit_insn (gen_sse4_1_round<mode>2
@@ -15179,8 +15182,7 @@
&& !flag_trapping_math)"
{
if (SSE_FLOAT_MODE_P (<MODE>mode) && TARGET_SSE_MATH
- && !flag_trapping_math
- && (TARGET_ROUND || optimize_insn_for_speed_p ()))
+ && !flag_trapping_math)
{
if (TARGET_ROUND)
emit_insn (gen_sse4_1_round<mode>2
diff --git a/gcc/config/i386/mingw32.h b/gcc/config/i386/mingw32.h
index 3c835c68690..7cb280eda32 100644
--- a/gcc/config/i386/mingw32.h
+++ b/gcc/config/i386/mingw32.h
@@ -189,7 +189,8 @@ do { \
/* mingw32 uses the -mthreads option to enable thread support. */
#undef GOMP_SELF_SPECS
-#define GOMP_SELF_SPECS "%{fopenmp: -mthreads}"
+#define GOMP_SELF_SPECS "%{fopenmp|ftree-parallelize-loops=*: " \
+ "-mthreads -pthread}"
/* mingw32 atexit function is safe to use in shared libraries. Use it
to register C++ static destructors. */
diff --git a/gcc/config/m32c/m32c-protos.h b/gcc/config/m32c/m32c-protos.h
index e858e150a06..eb242e0018f 100644
--- a/gcc/config/m32c/m32c-protos.h
+++ b/gcc/config/m32c/m32c-protos.h
@@ -66,8 +66,6 @@ int m32c_limit_reload_class (enum machine_mode, int);
int m32c_modes_tieable_p (enum machine_mode, enum machine_mode);
bool m32c_mov_ok (rtx *, enum machine_mode);
char * m32c_output_compare (rtx, rtx *);
-int m32c_preferred_output_reload_class (rtx, int);
-int m32c_preferred_reload_class (rtx, int);
int m32c_prepare_move (rtx *, enum machine_mode);
int m32c_prepare_shift (rtx *, int, int);
int m32c_reg_ok_for_base_p (rtx, int);
diff --git a/gcc/config/m32c/m32c.c b/gcc/config/m32c/m32c.c
index 4aeeb3d9ebb..a967e217135 100644
--- a/gcc/config/m32c/m32c.c
+++ b/gcc/config/m32c/m32c.c
@@ -729,12 +729,16 @@ m32c_regno_ok_for_base_p (int regno)
#define DEBUG_RELOAD 0
-/* Implements PREFERRED_RELOAD_CLASS. In general, prefer general
+/* Implements TARGET_PREFERRED_RELOAD_CLASS. In general, prefer general
registers of the appropriate size. */
-int
-m32c_preferred_reload_class (rtx x, int rclass)
+
+#undef TARGET_PREFERRED_RELOAD_CLASS
+#define TARGET_PREFERRED_RELOAD_CLASS m32c_preferred_reload_class
+
+static reg_class_t
+m32c_preferred_reload_class (rtx x, reg_class_t rclass)
{
- int newclass = rclass;
+ reg_class_t newclass = rclass;
#if DEBUG_RELOAD
fprintf (stderr, "\npreferred_reload_class for %s is ",
@@ -759,7 +763,7 @@ m32c_preferred_reload_class (rtx x, int rclass)
else if (newclass == QI_REGS && GET_MODE_SIZE (GET_MODE (x)) > 2)
newclass = SI_REGS;
else if (GET_MODE_SIZE (GET_MODE (x)) > 4
- && ~class_contents[rclass][0] & 0x000f)
+ && ! reg_class_subset_p (R03_REGS, rclass))
newclass = DI_REGS;
rclass = reduce_class (rclass, newclass, rclass);
@@ -779,9 +783,13 @@ m32c_preferred_reload_class (rtx x, int rclass)
return rclass;
}
-/* Implements PREFERRED_OUTPUT_RELOAD_CLASS. */
-int
-m32c_preferred_output_reload_class (rtx x, int rclass)
+/* Implements TARGET_PREFERRED_OUTPUT_RELOAD_CLASS. */
+
+#undef TARGET_PREFERRED_OUTPUT_RELOAD_CLASS
+#define TARGET_PREFERRED_OUTPUT_RELOAD_CLASS m32c_preferred_output_reload_class
+
+static reg_class_t
+m32c_preferred_output_reload_class (rtx x, reg_class_t rclass)
{
return m32c_preferred_reload_class (x, rclass);
}
diff --git a/gcc/config/m32c/m32c.h b/gcc/config/m32c/m32c.h
index e7b388b6f00..8239aa0a3ff 100644
--- a/gcc/config/m32c/m32c.h
+++ b/gcc/config/m32c/m32c.h
@@ -417,8 +417,6 @@ enum reg_class
#define REGNO_OK_FOR_BASE_P(NUM) m32c_regno_ok_for_base_p (NUM)
#define REGNO_OK_FOR_INDEX_P(NUM) 0
-#define PREFERRED_RELOAD_CLASS(X,CLASS) m32c_preferred_reload_class (X, CLASS)
-#define PREFERRED_OUTPUT_RELOAD_CLASS(X,CLASS) m32c_preferred_output_reload_class (X, CLASS)
#define LIMIT_RELOAD_CLASS(MODE,CLASS) \
(enum reg_class) m32c_limit_reload_class (MODE, CLASS)
diff --git a/gcc/config/mmix/mmix.c b/gcc/config/mmix/mmix.c
index f249a66081c..29826b5c1b9 100644
--- a/gcc/config/mmix/mmix.c
+++ b/gcc/config/mmix/mmix.c
@@ -260,7 +260,7 @@ static void mmix_conditional_register_usage (void);
#undef TARGET_PREFERRED_RELOAD_CLASS
#define TARGET_PREFERRED_RELOAD_CLASS mmix_preferred_reload_class
#undef TARGET_PREFERRED_OUTPUT_RELOAD_CLASS
-#define TARGET_PREFERRED_OUTPUT_RELOAD_CLASS mmix_preferred_reload_class
+#define TARGET_PREFERRED_OUTPUT_RELOAD_CLASS mmix_preferred_output_reload_class
#undef TARGET_LEGITIMATE_ADDRESS_P
#define TARGET_LEGITIMATE_ADDRESS_P mmix_legitimate_address_p
diff --git a/gcc/config/spu/spu-protos.h b/gcc/config/spu/spu-protos.h
index cb5cc241539..9485f384f1f 100644
--- a/gcc/config/spu/spu-protos.h
+++ b/gcc/config/spu/spu-protos.h
@@ -76,6 +76,7 @@ extern void spu_builtin_insert (rtx ops[]);
extern void spu_builtin_promote (rtx ops[]);
extern void spu_expand_sign_extend (rtx ops[]);
extern void spu_expand_vector_init (rtx target, rtx vals);
+extern rtx spu_legitimize_reload_address (rtx, enum machine_mode, int, int);
#endif /* RTX_CODE */
extern void spu_init_expanders (void);
diff --git a/gcc/config/spu/spu.c b/gcc/config/spu/spu.c
index c6db6c3b3b0..b8e3fb35da3 100644
--- a/gcc/config/spu/spu.c
+++ b/gcc/config/spu/spu.c
@@ -982,6 +982,27 @@ spu_emit_branch_or_set (int is_set, rtx cmp, rtx operands[])
}
}
+ /* However, if we generate an integer result, performing a reverse test
+ would require an extra negation, so avoid that where possible. */
+ if (GET_CODE (op1) == CONST_INT && is_set == 1)
+ {
+ HOST_WIDE_INT val = INTVAL (op1) + 1;
+ if (trunc_int_for_mode (val, GET_MODE (op0)) == val)
+ switch (code)
+ {
+ case LE:
+ op1 = GEN_INT (val);
+ code = LT;
+ break;
+ case LEU:
+ op1 = GEN_INT (val);
+ code = LTU;
+ break;
+ default:
+ break;
+ }
+ }
+
comp_mode = SImode;
op_mode = GET_MODE (op0);
@@ -1113,7 +1134,8 @@ spu_emit_branch_or_set (int is_set, rtx cmp, rtx operands[])
if (is_set == 0 && op1 == const0_rtx
&& (GET_MODE (op0) == SImode
- || GET_MODE (op0) == HImode) && scode == SPU_EQ)
+ || GET_MODE (op0) == HImode
+ || GET_MODE (op0) == QImode) && scode == SPU_EQ)
{
/* Don't need to set a register with the result when we are
comparing against zero and branching. */
@@ -3803,8 +3825,14 @@ spu_legitimate_address_p (enum machine_mode mode,
if (GET_CODE (op0) == REG
&& INT_REG_OK_FOR_BASE_P (op0, reg_ok_strict)
&& GET_CODE (op1) == CONST_INT
- && INTVAL (op1) >= -0x2000
- && INTVAL (op1) <= 0x1fff
+ && ((INTVAL (op1) >= -0x2000 && INTVAL (op1) <= 0x1fff)
+ /* If virtual registers are involved, the displacement will
+ change later on anyway, so checking would be premature.
+ Reload will make sure the final displacement after
+ register elimination is OK. */
+ || op0 == arg_pointer_rtx
+ || op0 == frame_pointer_rtx
+ || op0 == virtual_stack_vars_rtx)
&& (!aligned || (INTVAL (op1) & 15) == 0))
return TRUE;
if (GET_CODE (op0) == REG
@@ -3877,6 +3905,45 @@ spu_addr_space_legitimize_address (rtx x, rtx oldx, enum machine_mode mode,
return spu_legitimize_address (x, oldx, mode);
}
+/* Reload reg + const_int for out-of-range displacements. */
+rtx
+spu_legitimize_reload_address (rtx ad, enum machine_mode mode ATTRIBUTE_UNUSED,
+ int opnum, int type)
+{
+ bool removed_and = false;
+
+ if (GET_CODE (ad) == AND
+ && CONST_INT_P (XEXP (ad, 1))
+ && INTVAL (XEXP (ad, 1)) == (HOST_WIDE_INT) - 16)
+ {
+ ad = XEXP (ad, 0);
+ removed_and = true;
+ }
+
+ if (GET_CODE (ad) == PLUS
+ && REG_P (XEXP (ad, 0))
+ && CONST_INT_P (XEXP (ad, 1))
+ && !(INTVAL (XEXP (ad, 1)) >= -0x2000
+ && INTVAL (XEXP (ad, 1)) <= 0x1fff))
+ {
+ /* Unshare the sum. */
+ ad = copy_rtx (ad);
+
+ /* Reload the displacement. */
+ push_reload (XEXP (ad, 1), NULL_RTX, &XEXP (ad, 1), NULL,
+ BASE_REG_CLASS, GET_MODE (ad), VOIDmode, 0, 0,
+ opnum, (enum reload_type) type);
+
+ /* Add back AND for alignment if we stripped it. */
+ if (removed_and)
+ ad = gen_rtx_AND (GET_MODE (ad), ad, GEN_INT (-16));
+
+ return ad;
+ }
+
+ return NULL_RTX;
+}
+
/* Handle an attribute requiring a FUNCTION_DECL; arguments as in
struct attribute_spec.handler. */
static tree
diff --git a/gcc/config/spu/spu.h b/gcc/config/spu/spu.h
index c69cf7efc4e..d89bf49f2d2 100644
--- a/gcc/config/spu/spu.h
+++ b/gcc/config/spu/spu.h
@@ -390,6 +390,17 @@ targetm.resolve_overloaded_builtin = spu_resolve_overloaded_builtin; \
#define MAX_REGS_PER_ADDRESS 2
+#define LEGITIMIZE_RELOAD_ADDRESS(AD, MODE, OPNUM, TYPE, IND, WIN) \
+do { \
+ rtx new_rtx = spu_legitimize_reload_address (AD, MODE, OPNUM, \
+ (int)(TYPE)); \
+ if (new_rtx) \
+ { \
+ (AD) = new_rtx; \
+ goto WIN; \
+ } \
+} while (0)
+
/* Costs */
diff --git a/gcc/cp/ChangeLog b/gcc/cp/ChangeLog
index bb6d614fb3c..4d6c353a178 100644
--- a/gcc/cp/ChangeLog
+++ b/gcc/cp/ChangeLog
@@ -1,3 +1,34 @@
+2011-08-16 Jason Merrill <jason@redhat.com>
+
+ PR c++/50086
+ * pt.c (unify_pack_expansion): Correct overloaded unification
+ logic.
+
+ * pt.c (instantiate_class_template_1): If DECL_PRESERVE_P is set
+ on a member function or static data member, call mark_used.
+
+ PR c++/50054
+ * typeck2.c (cxx_incomplete_type_diagnostic): Handle
+ init_list_type_node.
+
+2011-08-13 Jason Merrill <jason@redhat.com>
+
+ PR c++/50075
+ * name-lookup.c (local_bindings_p): New.
+ * name-lookup.h: Declare it.
+ * lex.c (unqualified_name_lookup_error): Use it.
+
+ PR c++/50059
+ * error.c (dump_expr): Handle MODIFY_EXPR properly.
+
+ * decl.c (grok_reference_init): Handle constexpr here.
+ * call.c (initialize_reference): Not here.
+
+2011-08-12 David Li <davidxl@google.com>
+
+ * class.c (update_vtable_entry_for_fn): Set
+ LOST_PRIMARY bit properly.
+
2011-08-12 Jason Merrill <jason@redhat.com>
PR c++/50034
diff --git a/gcc/cp/call.c b/gcc/cp/call.c
index e8fb68d833a..d2700cbeac8 100644
--- a/gcc/cp/call.c
+++ b/gcc/cp/call.c
@@ -8820,12 +8820,6 @@ initialize_reference (tree type, tree expr, tree decl, tree *cleanup,
(build_pointer_type (base_conv_type), expr,
complain));
expr = build_nop (type, expr);
- if (DECL_DECLARED_CONSTEXPR_P (decl))
- {
- expr = cxx_constant_value (expr);
- DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (decl)
- = reduced_constant_expression_p (expr);
- }
}
}
else
diff --git a/gcc/cp/decl.c b/gcc/cp/decl.c
index 1db074809c8..c125f05478e 100644
--- a/gcc/cp/decl.c
+++ b/gcc/cp/decl.c
@@ -4597,6 +4597,12 @@ grok_reference_init (tree decl, tree type, tree init, tree *cleanup)
explicitly); we need to allow the temporary to be initialized
first. */
tmp = initialize_reference (type, init, decl, cleanup, tf_warning_or_error);
+ if (DECL_DECLARED_CONSTEXPR_P (decl))
+ {
+ tmp = cxx_constant_value (tmp);
+ DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (decl)
+ = reduced_constant_expression_p (tmp);
+ }
if (tmp == error_mark_node)
return NULL_TREE;
diff --git a/gcc/cp/error.c b/gcc/cp/error.c
index d435bbe9fce..598ddf10e5d 100644
--- a/gcc/cp/error.c
+++ b/gcc/cp/error.c
@@ -1868,6 +1868,10 @@ dump_expr (tree t, int flags)
case INIT_EXPR:
case MODIFY_EXPR:
+ dump_binary_op (assignment_operator_name_info[(int)NOP_EXPR].name,
+ t, flags);
+ break;
+
case PLUS_EXPR:
case MINUS_EXPR:
case MULT_EXPR:
diff --git a/gcc/cp/lex.c b/gcc/cp/lex.c
index 691a2ec1311..c11e3b31561 100644
--- a/gcc/cp/lex.c
+++ b/gcc/cp/lex.c
@@ -456,7 +456,7 @@ unqualified_name_lookup_error (tree name)
}
/* Prevent repeated error messages by creating a VAR_DECL with
this NAME in the innermost block scope. */
- if (current_function_decl)
+ if (local_bindings_p ())
{
tree decl;
decl = build_decl (input_location,
diff --git a/gcc/cp/name-lookup.c b/gcc/cp/name-lookup.c
index 1afd9edffec..64456b49699 100644
--- a/gcc/cp/name-lookup.c
+++ b/gcc/cp/name-lookup.c
@@ -1608,6 +1608,15 @@ namespace_bindings_p (void)
return b->kind == sk_namespace;
}
+/* True if the innermost non-class scope is a block scope. */
+
+bool
+local_bindings_p (void)
+{
+ cp_binding_level *b = innermost_nonclass_level ();
+ return b->kind < sk_function_parms || b->kind == sk_omp;
+}
+
/* True if the current level needs to have a BLOCK made. */
bool
diff --git a/gcc/cp/name-lookup.h b/gcc/cp/name-lookup.h
index 5974dce3288..a37afdb9b33 100644
--- a/gcc/cp/name-lookup.h
+++ b/gcc/cp/name-lookup.h
@@ -292,6 +292,7 @@ extern bool kept_level_p (void);
extern bool global_bindings_p (void);
extern bool toplevel_bindings_p (void);
extern bool namespace_bindings_p (void);
+extern bool local_bindings_p (void);
extern bool template_parm_scope_p (void);
extern scope_kind innermost_scope_kind (void);
extern cp_binding_level *begin_scope (scope_kind, tree);
diff --git a/gcc/cp/pt.c b/gcc/cp/pt.c
index 10fdceda462..9ab110aa225 100644
--- a/gcc/cp/pt.c
+++ b/gcc/cp/pt.c
@@ -8675,6 +8675,9 @@ instantiate_class_template_1 (tree type)
--processing_template_decl;
set_current_access_from_decl (r);
finish_member_declaration (r);
+ /* Instantiate members marked with attribute used. */
+ if (r != error_mark_node && DECL_PRESERVE_P (r))
+ mark_used (r);
}
else
{
@@ -8724,6 +8727,9 @@ instantiate_class_template_1 (tree type)
/*init_const_expr_p=*/false,
/*asmspec_tree=*/NULL_TREE,
/*flags=*/0);
+ /* Instantiate members marked with attribute used. */
+ if (r != error_mark_node && DECL_PRESERVE_P (r))
+ mark_used (r);
}
else if (TREE_CODE (r) == FIELD_DECL)
{
@@ -15428,7 +15434,6 @@ unify_pack_expansion (tree tparms, tree targs, tree packed_parms,
tree arg = TREE_VEC_ELT (packed_args, i);
tree arg_expr = NULL_TREE;
int arg_strict = strict;
- bool skip_arg_p = false;
if (call_args_p)
{
@@ -15471,19 +15476,15 @@ unify_pack_expansion (tree tparms, tree targs, tree packed_parms,
if (resolve_overloaded_unification
(tparms, targs, parm, arg,
(unification_kind_t) strict,
- sub_strict, explain_p)
- != 0)
- return 1;
- skip_arg_p = true;
+ sub_strict, explain_p))
+ goto unified;
+ return unify_overload_resolution_failure (explain_p, arg);
}
- if (!skip_arg_p)
- {
- arg_expr = arg;
- arg = unlowered_expr_type (arg);
- if (arg == error_mark_node)
- return 1;
- }
+ arg_expr = arg;
+ arg = unlowered_expr_type (arg);
+ if (arg == error_mark_node)
+ return unify_invalid (explain_p);
}
arg_strict = sub_strict;
@@ -15494,16 +15495,14 @@ unify_pack_expansion (tree tparms, tree targs, tree packed_parms,
&parm, &arg, arg_expr);
}
- if (!skip_arg_p)
- {
- /* For deduction from an init-list we need the actual list. */
- if (arg_expr && BRACE_ENCLOSED_INITIALIZER_P (arg_expr))
- arg = arg_expr;
- RECUR_AND_CHECK_FAILURE (tparms, targs, parm, arg, arg_strict,
- explain_p);
- }
+ /* For deduction from an init-list we need the actual list. */
+ if (arg_expr && BRACE_ENCLOSED_INITIALIZER_P (arg_expr))
+ arg = arg_expr;
+ RECUR_AND_CHECK_FAILURE (tparms, targs, parm, arg, arg_strict,
+ explain_p);
}
+ unified:
/* For each parameter pack, collect the deduced value. */
for (pack = packs; pack; pack = TREE_CHAIN (pack))
{
diff --git a/gcc/cp/typeck2.c b/gcc/cp/typeck2.c
index 07881387eba..79aa354ad02 100644
--- a/gcc/cp/typeck2.c
+++ b/gcc/cp/typeck2.c
@@ -450,6 +450,12 @@ cxx_incomplete_type_diagnostic (const_tree value, const_tree type,
break;
case LANG_TYPE:
+ if (type == init_list_type_node)
+ {
+ emit_diagnostic (diag_kind, input_location, 0,
+ "invalid use of brace-enclosed initializer list");
+ break;
+ }
gcc_assert (type == unknown_type_node);
if (value && TREE_CODE (value) == COMPONENT_REF)
goto bad_member;
diff --git a/gcc/doc/extend.texi b/gcc/doc/extend.texi
index 49a81253593..786c18ddae2 100644
--- a/gcc/doc/extend.texi
+++ b/gcc/doc/extend.texi
@@ -3647,6 +3647,10 @@ for the function even if it appears that the function is not referenced.
This is useful, for example, when the function is referenced only in
inline assembly.
+When applied to a member function of a C++ class template, the
+attribute also means that the function will be instantiated if the
+class itself is instantiated.
+
@item version_id
@cindex @code{version_id} attribute
This IA64 HP-UX attribute, attached to a global variable or function, renames a
@@ -4457,6 +4461,10 @@ variable.
This attribute, attached to a variable, means that the variable must be
emitted even if it appears that the variable is not referenced.
+When applied to a static data member of a C++ class template, the
+attribute also means that the member will be instantiated if the
+class itself is instantiated.
+
@item vector_size (@var{bytes})
This attribute specifies the vector size for the variable, measured in
bytes. For example, the declaration:
diff --git a/gcc/explow.c b/gcc/explow.c
index ed2f621f7af..ecf29f2186c 100644
--- a/gcc/explow.c
+++ b/gcc/explow.c
@@ -1380,6 +1380,9 @@ allocate_dynamic_stack_space (rtx size, unsigned size_align,
else if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
probe_stack_range (STACK_CHECK_PROTECT, size);
+ /* Don't let anti_adjust_stack emit notes. */
+ suppress_reg_args_size = true;
+
/* Perform the required allocation from the stack. Some systems do
this differently than simply incrementing/decrementing from the
stack pointer, such as acquiring the space by calling malloc(). */
@@ -1430,7 +1433,6 @@ allocate_dynamic_stack_space (rtx size, unsigned size_align,
}
saved_stack_pointer_delta = stack_pointer_delta;
- suppress_reg_args_size = true;
if (flag_stack_check && STACK_CHECK_MOVING_SP)
anti_adjust_stack_and_probe (size, false);
@@ -1441,13 +1443,14 @@ allocate_dynamic_stack_space (rtx size, unsigned size_align,
The constant size alloca should preserve
crtl->preferred_stack_boundary alignment. */
stack_pointer_delta = saved_stack_pointer_delta;
- suppress_reg_args_size = false;
#ifdef STACK_GROWS_DOWNWARD
emit_move_insn (target, virtual_stack_dynamic_rtx);
#endif
}
+ suppress_reg_args_size = false;
+
/* Finish up the split stack handling. */
if (final_label != NULL_RTX)
{
diff --git a/gcc/expr.c b/gcc/expr.c
index 997eb3e0223..98e6cff2f20 100644
--- a/gcc/expr.c
+++ b/gcc/expr.c
@@ -7231,9 +7231,7 @@ expand_expr_addr_expr_1 (tree exp, rtx target, enum machine_mode tmode,
{
tree tem = TREE_OPERAND (exp, 0);
if (!integer_zerop (TREE_OPERAND (exp, 1)))
- tem = build2 (POINTER_PLUS_EXPR, TREE_TYPE (TREE_OPERAND (exp, 1)),
- tem,
- double_int_to_tree (sizetype, mem_ref_offset (exp)));
+ tem = fold_build_pointer_plus (tem, TREE_OPERAND (exp, 1));
return expand_expr (tem, target, tmode, modifier);
}
diff --git a/gcc/fold-const.c b/gcc/fold-const.c
index 6abce5924e9..a73b1e6a9ba 100644
--- a/gcc/fold-const.c
+++ b/gcc/fold-const.c
@@ -4218,8 +4218,7 @@ build_range_check (location_t loc, tree type, tree exp, int in_p,
{
if (value != 0 && !TREE_OVERFLOW (value))
{
- low = fold_convert_loc (loc, sizetype, low);
- low = fold_build1_loc (loc, NEGATE_EXPR, sizetype, low);
+ low = fold_build1_loc (loc, NEGATE_EXPR, TREE_TYPE (low), low);
return build_range_check (loc, type,
fold_build_pointer_plus_loc (loc, exp, low),
1, build_int_cst (etype, 0), value);
@@ -7862,10 +7861,8 @@ fold_unary_loc (location_t loc, enum tree_code code, tree type, tree op0)
tree arg00 = TREE_OPERAND (arg0, 0);
tree arg01 = TREE_OPERAND (arg0, 1);
- return fold_build2_loc (loc,
- TREE_CODE (arg0), type,
- fold_convert_loc (loc, type, arg00),
- fold_convert_loc (loc, sizetype, arg01));
+ return fold_build_pointer_plus_loc
+ (loc, fold_convert_loc (loc, type, arg00), arg01);
}
/* Convert (T1)(~(T2)X) into ~(T1)X if T1 and T2 are integral types
@@ -8446,6 +8443,7 @@ maybe_canonicalize_comparison_1 (location_t loc, enum tree_code code, tree type,
cst0, build_int_cst (TREE_TYPE (cst0), 1));
if (code0 != INTEGER_CST)
t = fold_build2_loc (loc, code0, TREE_TYPE (arg0), TREE_OPERAND (arg0, 0), t);
+ t = fold_convert (TREE_TYPE (arg1), t);
/* If swapping might yield to a more canonical form, do so. */
if (swap)
@@ -8935,7 +8933,7 @@ fold_comparison (location_t loc, enum tree_code code, tree type,
return fold_build2_loc (loc, cmp_code, type, variable1, const2);
}
- tem = maybe_canonicalize_comparison (loc, code, type, op0, op1);
+ tem = maybe_canonicalize_comparison (loc, code, type, arg0, arg1);
if (tem)
return tem;
diff --git a/gcc/fortran/ChangeLog b/gcc/fortran/ChangeLog
index 2ead4fadebb..d7f4b6d37d5 100644
--- a/gcc/fortran/ChangeLog
+++ b/gcc/fortran/ChangeLog
@@ -1,3 +1,30 @@
+2011-08-17 Janus Weil <janus@gcc.gnu.org>
+
+ PR fortran/50070
+ * resolve.c (resolve_fl_variable): Reject non-constant character lengths
+ in COMMON variables.
+
+2011-08-16 Tobias Burnus <burnus@net-b.de>
+ Dominique Dhumieres <dominiq@lps.ens.fr>
+
+ PR fortran/50094
+ * resolve.c (resolve_symbol): Fix stupid typo.
+
+2011-08-15 Tobias Burnus <burnus@net-b.de>
+
+ * resolve.c (resolve_symbol): Fix coarray result-var check.
+
+2011-08-14 Steven G. Kargl <kargl@gcc.gnu.org>
+
+ * module.c (use_iso_fortran_env_module): Spell 'referrenced' correctly.
+
+2011-08-14 Janus Weil <janus@gcc.gnu.org>
+
+ PR fortran/50073
+ * decl.c (check_function_name): New function, separated off from
+ 'variable_decl' and slightly extended.
+ (variable_decl,attr_decl1): Call it.
+
2011-08-08 Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE>
* Make-lang.in (gfortran$(exeext)): Add $(EXTRA_GCC_LIBS).
diff --git a/gcc/fortran/decl.c b/gcc/fortran/decl.c
index 661bb14486f..18e2651c81d 100644
--- a/gcc/fortran/decl.c
+++ b/gcc/fortran/decl.c
@@ -1729,6 +1729,30 @@ match_pointer_init (gfc_expr **init, int procptr)
}
+static gfc_try
+check_function_name (char *name)
+{
+ /* In functions that have a RESULT variable defined, the function name always
+ refers to function calls. Therefore, the name is not allowed to appear in
+ specification statements. When checking this, be careful about
+ 'hidden' procedure pointer results ('ppr@'). */
+
+ if (gfc_current_state () == COMP_FUNCTION)
+ {
+ gfc_symbol *block = gfc_current_block ();
+ if (block && block->result && block->result != block
+ && strcmp (block->result->name, "ppr@") != 0
+ && strcmp (block->name, name) == 0)
+ {
+ gfc_error ("Function name '%s' not allowed at %C", name);
+ return FAILURE;
+ }
+ }
+
+ return SUCCESS;
+}
+
+
/* Match a variable name with an optional initializer. When this
subroutine is called, a variable is expected to be parsed next.
Depending on what is happening at the moment, updates either the
@@ -1935,17 +1959,9 @@ variable_decl (int elem)
goto cleanup;
}
}
-
- /* In functions that have a RESULT variable defined, the function
- name always refers to function calls. Therefore, the name is
- not allowed to appear in specification statements. */
- if (gfc_current_state () == COMP_FUNCTION
- && gfc_current_block () != NULL
- && gfc_current_block ()->result != NULL
- && gfc_current_block ()->result != gfc_current_block ()
- && strcmp (gfc_current_block ()->name, name) == 0)
+
+ if (check_function_name (name) == FAILURE)
{
- gfc_error ("Function name '%s' not allowed at %C", name);
m = MATCH_ERROR;
goto cleanup;
}
@@ -5995,6 +6011,12 @@ attr_decl1 (void)
if (find_special (name, &sym, false))
return MATCH_ERROR;
+ if (check_function_name (name) == FAILURE)
+ {
+ m = MATCH_ERROR;
+ goto cleanup;
+ }
+
var_locus = gfc_current_locus;
/* Deal with possible array specification for certain attributes. */
diff --git a/gcc/fortran/module.c b/gcc/fortran/module.c
index b62ad8d08e0..aef340464c5 100644
--- a/gcc/fortran/module.c
+++ b/gcc/fortran/module.c
@@ -5577,7 +5577,7 @@ use_iso_fortran_env_module (void)
u->found = 1;
if (gfc_notify_std (symbol[i].standard, "The symbol '%s', "
- "referrenced at %C, is not in the selected "
+ "referenced at %C, is not in the selected "
"standard", symbol[i].name) == FAILURE)
continue;
diff --git a/gcc/fortran/resolve.c b/gcc/fortran/resolve.c
index 6245666f620..7557ab8891d 100644
--- a/gcc/fortran/resolve.c
+++ b/gcc/fortran/resolve.c
@@ -10169,15 +10169,22 @@ resolve_fl_variable (gfc_symbol *sym, int mp_flag)
if (!gfc_is_constant_expr (e)
&& !(e->expr_type == EXPR_VARIABLE
- && e->symtree->n.sym->attr.flavor == FL_PARAMETER)
- && sym->ns->proc_name
- && (sym->ns->proc_name->attr.flavor == FL_MODULE
- || sym->ns->proc_name->attr.is_main_program)
- && !sym->attr.use_assoc)
- {
- gfc_error ("'%s' at %L must have constant character length "
- "in this context", sym->name, &sym->declared_at);
- return FAILURE;
+ && e->symtree->n.sym->attr.flavor == FL_PARAMETER))
+ {
+ if (!sym->attr.use_assoc && sym->ns->proc_name
+ && (sym->ns->proc_name->attr.flavor == FL_MODULE
+ || sym->ns->proc_name->attr.is_main_program))
+ {
+ gfc_error ("'%s' at %L must have constant character length "
+ "in this context", sym->name, &sym->declared_at);
+ return FAILURE;
+ }
+ if (sym->attr.in_common)
+ {
+ gfc_error ("COMMON variable '%s' at %L must have constant "
+ "character length", sym->name, &sym->declared_at);
+ return FAILURE;
+ }
}
}
@@ -12246,29 +12253,41 @@ resolve_symbol (gfc_symbol *sym)
/* F2008, C542. */
if (sym->ts.type == BT_DERIVED && sym->attr.dummy
&& sym->attr.intent == INTENT_OUT && sym->attr.lock_comp)
- gfc_error ("Dummy argument '%s' at %L of LOCK_TYPE shall not be "
- "INTENT(OUT)", sym->name, &sym->declared_at);
+ {
+ gfc_error ("Dummy argument '%s' at %L of LOCK_TYPE shall not be "
+ "INTENT(OUT)", sym->name, &sym->declared_at);
+ return;
+ }
- /* F2008, C526. */
+ /* F2008, C525. */
if (((sym->ts.type == BT_DERIVED && sym->ts.u.derived->attr.coarray_comp)
|| sym->attr.codimension)
- && sym->attr.result)
- gfc_error ("Function result '%s' at %L shall not be a coarray or have "
- "a coarray component", sym->name, &sym->declared_at);
+ && (sym->attr.result || sym->result == sym))
+ {
+ gfc_error ("Function result '%s' at %L shall not be a coarray or have "
+ "a coarray component", sym->name, &sym->declared_at);
+ return;
+ }
/* F2008, C524. */
if (sym->attr.codimension && sym->ts.type == BT_DERIVED
&& sym->ts.u.derived->ts.is_iso_c)
- gfc_error ("Variable '%s' at %L of TYPE(C_PTR) or TYPE(C_FUNPTR) "
- "shall not be a coarray", sym->name, &sym->declared_at);
+ {
+ gfc_error ("Variable '%s' at %L of TYPE(C_PTR) or TYPE(C_FUNPTR) "
+ "shall not be a coarray", sym->name, &sym->declared_at);
+ return;
+ }
/* F2008, C525. */
if (sym->ts.type == BT_DERIVED && sym->ts.u.derived->attr.coarray_comp
&& (sym->attr.codimension || sym->attr.pointer || sym->attr.dimension
|| sym->attr.allocatable))
- gfc_error ("Variable '%s' at %L with coarray component "
- "shall be a nonpointer, nonallocatable scalar",
- sym->name, &sym->declared_at);
+ {
+ gfc_error ("Variable '%s' at %L with coarray component "
+ "shall be a nonpointer, nonallocatable scalar",
+ sym->name, &sym->declared_at);
+ return;
+ }
/* F2008, C526. The function-result case was handled above. */
if (sym->attr.codimension
@@ -12277,32 +12296,46 @@ resolve_symbol (gfc_symbol *sym)
|| sym->ns->proc_name->attr.flavor == FL_MODULE
|| sym->ns->proc_name->attr.is_main_program
|| sym->attr.function || sym->attr.result || sym->attr.use_assoc))
- gfc_error ("Variable '%s' at %L is a coarray and is not ALLOCATABLE, SAVE "
- "nor a dummy argument", sym->name, &sym->declared_at);
+ {
+ gfc_error ("Variable '%s' at %L is a coarray and is not ALLOCATABLE, SAVE "
+ "nor a dummy argument", sym->name, &sym->declared_at);
+ return;
+ }
/* F2008, C528. */ /* FIXME: sym->as check due to PR 43412. */
else if (sym->attr.codimension && !sym->attr.allocatable
&& sym->as && sym->as->cotype == AS_DEFERRED)
- gfc_error ("Coarray variable '%s' at %L shall not have codimensions with "
- "deferred shape", sym->name, &sym->declared_at);
+ {
+ gfc_error ("Coarray variable '%s' at %L shall not have codimensions with "
+ "deferred shape", sym->name, &sym->declared_at);
+ return;
+ }
else if (sym->attr.codimension && sym->attr.allocatable
&& (sym->as->type != AS_DEFERRED || sym->as->cotype != AS_DEFERRED))
- gfc_error ("Allocatable coarray variable '%s' at %L must have "
- "deferred shape", sym->name, &sym->declared_at);
-
+ {
+ gfc_error ("Allocatable coarray variable '%s' at %L must have "
+ "deferred shape", sym->name, &sym->declared_at);
+ return;
+ }
/* F2008, C541. */
if (((sym->ts.type == BT_DERIVED && sym->ts.u.derived->attr.coarray_comp)
|| (sym->attr.codimension && sym->attr.allocatable))
&& sym->attr.dummy && sym->attr.intent == INTENT_OUT)
- gfc_error ("Variable '%s' at %L is INTENT(OUT) and can thus not be an "
- "allocatable coarray or have coarray components",
- sym->name, &sym->declared_at);
+ {
+ gfc_error ("Variable '%s' at %L is INTENT(OUT) and can thus not be an "
+ "allocatable coarray or have coarray components",
+ sym->name, &sym->declared_at);
+ return;
+ }
if (sym->attr.codimension && sym->attr.dummy
&& sym->ns->proc_name && sym->ns->proc_name->attr.is_bind_c)
- gfc_error ("Coarray dummy variable '%s' at %L not allowed in BIND(C) "
- "procedure '%s'", sym->name, &sym->declared_at,
- sym->ns->proc_name->name);
+ {
+ gfc_error ("Coarray dummy variable '%s' at %L not allowed in BIND(C) "
+ "procedure '%s'", sym->name, &sym->declared_at,
+ sym->ns->proc_name->name);
+ return;
+ }
switch (sym->attr.flavor)
{
diff --git a/gcc/ggc.h b/gcc/ggc.h
index 7f2144c0f12..30eca66c302 100644
--- a/gcc/ggc.h
+++ b/gcc/ggc.h
@@ -1,7 +1,7 @@
/* Garbage collection for the GNU compiler.
Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2007,
- 2008, 2009, 2010 Free Software Foundation, Inc.
+ 2008, 2009, 2010, 2011 Free Software Foundation, Inc.
This file is part of GCC.
@@ -266,9 +266,9 @@ extern struct alloc_zone tree_zone;
extern struct alloc_zone tree_id_zone;
#define ggc_alloc_rtvec_sized(NELT) \
- (ggc_alloc_zone_vec_rtvec_def (sizeof (rtx), \
- sizeof (struct rtvec_def) + ((NELT) - 1), \
- &rtl_zone))
+ ggc_alloc_zone_rtvec_def (sizeof (struct rtvec_def) \
+ + ((NELT) - 1) * sizeof (rtx), \
+ &rtl_zone)
#if defined (GGC_ZONE) && !defined (GENERATOR_FILE)
diff --git a/gcc/gimple-fold.c b/gcc/gimple-fold.c
index cd988b9c1db..12b2d4e4a4b 100644
--- a/gcc/gimple-fold.c
+++ b/gcc/gimple-fold.c
@@ -1436,7 +1436,7 @@ gimple_adjust_this_by_delta (gimple_stmt_iterator *gsi, tree delta)
tree parm, tmp;
gimple new_stmt;
- delta = fold_convert (sizetype, delta);
+ delta = convert_to_ptrofftype (delta);
gcc_assert (gimple_call_num_args (call_stmt) >= 1);
parm = gimple_call_arg (call_stmt, 0);
gcc_assert (POINTER_TYPE_P (TREE_TYPE (parm)));
diff --git a/gcc/gimplify.c b/gcc/gimplify.c
index 2668aa6e032..85033a99865 100644
--- a/gcc/gimplify.c
+++ b/gcc/gimplify.c
@@ -2208,7 +2208,7 @@ gimplify_self_mod_expr (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p,
/* For POINTERs increment, use POINTER_PLUS_EXPR. */
if (POINTER_TYPE_P (TREE_TYPE (lhs)))
{
- rhs = fold_convert_loc (loc, sizetype, rhs);
+ rhs = convert_to_ptrofftype_loc (loc, rhs);
if (arith_code == MINUS_EXPR)
rhs = fold_build1_loc (loc, NEGATE_EXPR, TREE_TYPE (rhs), rhs);
arith_code = POINTER_PLUS_EXPR;
diff --git a/gcc/go/gofrontend/lex.cc b/gcc/go/gofrontend/lex.cc
index 9f26911a16c..167c7ddffc0 100644
--- a/gcc/go/gofrontend/lex.cc
+++ b/gcc/go/gofrontend/lex.cc
@@ -518,9 +518,7 @@ Lex::require_line()
source_location
Lex::location() const
{
- source_location location;
- LINEMAP_POSITION_FOR_COLUMN(location, line_table, this->lineoff_ + 1);
- return location;
+ return linemap_position_for_column (line_table, this->lineoff_ + 1);
}
// Get a location slightly before the current one. This is used for
@@ -529,9 +527,7 @@ Lex::location() const
source_location
Lex::earlier_location(int chars) const
{
- source_location location;
- LINEMAP_POSITION_FOR_COLUMN(location, line_table, this->lineoff_ + 1 - chars);
- return location;
+ return linemap_position_for_column (line_table, this->lineoff_ + 1 - chars);
}
// Get the next token.
diff --git a/gcc/graphite-clast-to-gimple.c b/gcc/graphite-clast-to-gimple.c
index 5f7a7479e7a..abf88778f9e 100644
--- a/gcc/graphite-clast-to-gimple.c
+++ b/gcc/graphite-clast-to-gimple.c
@@ -346,7 +346,7 @@ clast_to_gcc_expression (tree type, struct clast_expr *e, ivs_params_p ip)
tree name = clast_name_to_gcc (t->var, ip);
if (POINTER_TYPE_P (TREE_TYPE (name)) != POINTER_TYPE_P (type))
- name = fold_convert (sizetype, name);
+ name = convert_to_ptrofftype (name);
name = fold_convert (type, name);
return name;
@@ -357,7 +357,7 @@ clast_to_gcc_expression (tree type, struct clast_expr *e, ivs_params_p ip)
tree name = clast_name_to_gcc (t->var, ip);
if (POINTER_TYPE_P (TREE_TYPE (name)) != POINTER_TYPE_P (type))
- name = fold_convert (sizetype, name);
+ name = convert_to_ptrofftype (name);
name = fold_convert (type, name);
@@ -369,7 +369,7 @@ clast_to_gcc_expression (tree type, struct clast_expr *e, ivs_params_p ip)
tree cst = gmp_cst_to_tree (type, t->val);
if (POINTER_TYPE_P (TREE_TYPE (name)) != POINTER_TYPE_P (type))
- name = fold_convert (sizetype, name);
+ name = convert_to_ptrofftype (name);
name = fold_convert (type, name);
@@ -1064,7 +1064,7 @@ graphite_create_new_loop_guard (edge entry_edge, struct clast_for *stmt,
else
{
tree one = (POINTER_TYPE_P (*type)
- ? size_one_node
+ ? convert_to_ptrofftype (integer_one_node)
: fold_convert (*type, integer_one_node));
/* Adding +1 and using LT_EXPR helps with loop latches that have a
loop iteration count of "PARAMETER - 1". For PARAMETER == 0 this becomes
diff --git a/gcc/graphite-sese-to-poly.c b/gcc/graphite-sese-to-poly.c
index 7e23c9d69de..206df46bd19 100644
--- a/gcc/graphite-sese-to-poly.c
+++ b/gcc/graphite-sese-to-poly.c
@@ -3219,9 +3219,6 @@ rewrite_commutative_reductions_out_of_ssa (scop_p scop)
}
}
-/* Java does not initialize long_long_integer_type_node. */
-#define my_long_long (long_long_integer_type_node ? long_long_integer_type_node : ssizetype)
-
/* Can all ivs be represented by a signed integer?
As CLooG might generate negative values in its expressions, signed loop ivs
are required in the backend. */
@@ -3246,7 +3243,7 @@ scop_ivs_can_be_represented (scop_p scop)
tree type = TREE_TYPE (res);
if (TYPE_UNSIGNED (type)
- && TYPE_PRECISION (type) >= TYPE_PRECISION (my_long_long))
+ && TYPE_PRECISION (type) >= TYPE_PRECISION (long_long_integer_type_node))
return false;
}
}
@@ -3254,8 +3251,6 @@ scop_ivs_can_be_represented (scop_p scop)
return true;
}
-#undef my_long_long
-
/* Builds the polyhedral representation for a SESE region. */
void
diff --git a/gcc/stmt.c b/gcc/stmt.c
index 2fb4b18dd55..be21a5708cf 100644
--- a/gcc/stmt.c
+++ b/gcc/stmt.c
@@ -2016,10 +2016,13 @@ expand_stack_save (void)
void
expand_stack_restore (tree var)
{
- rtx sa = expand_normal (var);
+ rtx prev, sa = expand_normal (var);
sa = convert_memory_address (Pmode, sa);
+
+ prev = get_last_insn ();
emit_stack_restore (SAVE_BLOCK, sa);
+ fixup_args_size_notes (prev, get_last_insn (), 0);
}
/* Do the insertion of a case label into case_list. The labels are
diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog
index 65097a3abde..955764bd935 100644
--- a/gcc/testsuite/ChangeLog
+++ b/gcc/testsuite/ChangeLog
@@ -1,3 +1,52 @@
+2011-08-17 Tom de Vries <tom@codesourcery.com>
+
+ PR target/43597
+ * gcc.target/arm/pr43597.c: New test.
+
+2011-08-17 Janus Weil <janus@gcc.gnu.org>
+
+ PR fortran/50070
+ * gfortran.dg/common_13.f90: New.
+
+2011-08-16 Jason Merrill <jason@redhat.com>
+
+ PR c++/50086
+ * g++.dg/cpp0x/variadic-unresolved.C: New.
+
+ * g++.old-deja/g++.brendan/README: Add R.I.P.
+
+ * g++.dg/ext/attr-used-1.C: New.
+
+ PR c++/50054
+ * g++.dg/cpp0x/initlist56.C: New.
+
+2011-08-15 H.J. Lu <hongjiu.lu@intel.com>
+
+ PR testsuite/50085
+ * g++.dg/opt/life1.C: Only run on Linux.
+
+2011-08-15 Tobias Burnus <burnus@net-b.de>
+
+ * gfortran.dg/coarray_26.f90: New.
+
+2011-08-15 Hans-Peter Nilsson <hp@axis.com>
+
+ * gcc.dg/tree-ssa/vrp61.c: Use -fdump-tree-vrp1-nouid instead of
+ -fdump-tree-vrp1.
+
+2011-08-14 Janus Weil <janus@gcc.gnu.org>
+
+ PR fortran/50073
+ * gfortran.dg/func_result_7.f90: New.
+
+2011-08-13 Jason Merrill <jason@redhat.com>
+
+ PR c++/50075
+ * g++.dg/cpp0x/decltype32.C: New.
+
+ PR c++/50059
+ * g++.dg/diagnostic/expr1.C: New.
+
2011-08-12 David Li <davidxl@google.com>
* g++.dg/abi/vbase15.C: New test.
diff --git a/gcc/testsuite/g++.dg/cpp0x/decltype32.C b/gcc/testsuite/g++.dg/cpp0x/decltype32.C
new file mode 100644
index 00000000000..66731cc947d
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp0x/decltype32.C
@@ -0,0 +1,12 @@
+// PR c++/50075
+// { dg-options -std=c++0x }
+
+template <typename T>
+auto make_array(const T& il) -> // { dg-error "not declared" }
+decltype(make_array(il))
+{ }
+
+int main()
+{
+ int z = make_array(1); // { dg-error "no match" }
+}
diff --git a/gcc/testsuite/g++.dg/cpp0x/initlist57.C b/gcc/testsuite/g++.dg/cpp0x/initlist57.C
new file mode 100644
index 00000000000..d945a468988
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp0x/initlist57.C
@@ -0,0 +1,8 @@
+// PR c++/50054
+// { dg-options -std=c++0x }
+
+void g( const int& (a)[1] ) {} // { dg-error "array of references" }
+
+int main () {
+ g( { 1, 2 } ); // { dg-error "initializer list" }
+}
diff --git a/gcc/testsuite/g++.dg/cpp0x/variadic-unresolved.C b/gcc/testsuite/g++.dg/cpp0x/variadic-unresolved.C
new file mode 100644
index 00000000000..a8463de1980
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp0x/variadic-unresolved.C
@@ -0,0 +1,12 @@
+// PR c++/50086
+// { dg-options -std=c++0x }
+
+template<typename T> void tfun();
+template<typename T> void fun1(T);
+template<typename... Types> void fun2(Types... args);
+
+int main()
+{
+ fun1(tfun<int>); // ok
+ fun2(tfun<int>); // error: unresolved overloaded function type
+}
diff --git a/gcc/testsuite/g++.dg/diagnostic/expr1.C b/gcc/testsuite/g++.dg/diagnostic/expr1.C
new file mode 100644
index 00000000000..5651030d4ca
--- /dev/null
+++ b/gcc/testsuite/g++.dg/diagnostic/expr1.C
@@ -0,0 +1,9 @@
+// PR c++/50059
+
+int i;
+struct A { };
+void f(A);
+void g()
+{
+ f(i = 0); // { dg-error "i = 0" }
+}
diff --git a/gcc/testsuite/g++.dg/ext/attr-used-1.C b/gcc/testsuite/g++.dg/ext/attr-used-1.C
new file mode 100644
index 00000000000..6754c7ffb50
--- /dev/null
+++ b/gcc/testsuite/g++.dg/ext/attr-used-1.C
@@ -0,0 +1,17 @@
+// Attribute used on a member function or static data member
+// of a template should cause them to be instantiated along
+// with the class itself.
+
+// { dg-final { scan-assembler "_ZN1AIiE1fEv" } }
+// { dg-final { scan-assembler "_ZN1AIiE1tE" } }
+
+template <class T> struct A
+{
+ void f() __attribute ((used));
+ static T t __attribute ((used));
+};
+
+template <class T> void A<T>::f() { }
+template <class T> T A<T>::t;
+
+A<int> a;
diff --git a/gcc/testsuite/g++.dg/opt/life1.C b/gcc/testsuite/g++.dg/opt/life1.C
index 42fd7444693..dd840f9abec 100644
--- a/gcc/testsuite/g++.dg/opt/life1.C
+++ b/gcc/testsuite/g++.dg/opt/life1.C
@@ -1,6 +1,6 @@
// This testcase did not set up the pic register on IA-32 due
// to bug in calculate_global_regs_live EH edge handling.
-// { dg-do compile { target { { i?86-*-* x86_64-*-* } && ia32 } } }
+// { dg-do compile { target { { i?86-*-linux* x86_64-*-linux* } && ia32 } } }
// { dg-require-effective-target fpic }
// { dg-options "-O2 -fPIC" }
diff --git a/gcc/testsuite/g++.old-deja/g++.brendan/README b/gcc/testsuite/g++.old-deja/g++.brendan/README
index 93febfb3517..c547c1bdda2 100644
--- a/gcc/testsuite/g++.old-deja/g++.brendan/README
+++ b/gcc/testsuite/g++.old-deja/g++.brendan/README
@@ -1,3 +1,4 @@
+R.I.P. Brendan Patrick Kehoe, December 3, 1970 - July 19, 2011.
abstract - abstract functions
alignof - gcc alignof builtin
diff --git a/gcc/testsuite/gcc.dg/tree-ssa/vrp61.c b/gcc/testsuite/gcc.dg/tree-ssa/vrp61.c
index 93bcbc9151d..33eb44bb1f5 100644
--- a/gcc/testsuite/gcc.dg/tree-ssa/vrp61.c
+++ b/gcc/testsuite/gcc.dg/tree-ssa/vrp61.c
@@ -1,5 +1,5 @@
/* { dg-do compile } */
-/* { dg-options "-O2 -fdump-tree-vrp1" } */
+/* { dg-options "-O2 -fdump-tree-vrp1-nouid" } */
int f (int x, int y)
{
diff --git a/gcc/testsuite/gcc.target/arm/pr43597.c b/gcc/testsuite/gcc.target/arm/pr43597.c
new file mode 100644
index 00000000000..af382ba72df
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/pr43597.c
@@ -0,0 +1,28 @@
+/* { dg-do assemble } */
+/* { dg-options "-Os -save-temps -mthumb" } */
+/* { dg-require-effective-target arm_thumb2_ok } */
+
+extern int bar ();
+extern void bar2 (int);
+
+int
+foo4 ()
+{
+ int result = 0;
+ int f = -1;
+ f = bar ();
+ if (f < 0)
+ {
+ result = 1;
+ goto bail;
+ }
+ bar ();
+ bail:
+ bar2 (f);
+ return result;
+}
+
+/* { dg-final { scan-assembler-times "sub" 1 } } */
+/* { dg-final { scan-assembler-times "cmp" 0 } } */
+/* { dg-final { object-size text <= 30 } } */
+/* { dg-final { cleanup-saved-temps "pr43597" } } */
diff --git a/gcc/testsuite/gfortran.dg/coarray_26.f90 b/gcc/testsuite/gfortran.dg/coarray_26.f90
new file mode 100644
index 00000000000..06ff4cf79f7
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/coarray_26.f90
@@ -0,0 +1,53 @@
+! { dg-do compile }
+! { dg-options "-fcoarray=single" }
+!
+! Coarray declaration constraint checks
+!
+
+function foo3a() result(res)
+ implicit none
+ integer :: res
+ codimension :: res[*] ! { dg-error "CODIMENSION attribute conflicts with RESULT" }
+end
+
+function foo2a() result(res)
+ integer :: res[*] ! { dg-error "CODIMENSION attribute conflicts with RESULT" }
+end
+
+function fooa() result(res) ! { dg-error "shall not be a coarray or have a coarray component" }
+ implicit none
+ type t
+ integer, allocatable :: A[:]
+ end type t
+ type(t):: res
+end
+
+function foo3() ! { dg-error "shall not be a coarray or have a coarray component" }
+ implicit none
+ integer :: foo3
+ codimension :: foo3[*]
+end
+
+function foo2() ! { dg-error "shall not be a coarray or have a coarray component" }
+ implicit none
+ integer :: foo2[*]
+end
+
+function foo() ! { dg-error "shall not be a coarray or have a coarray component" }
+ type t
+ integer, allocatable :: A[:]
+ end type t
+ type(t):: foo
+end
+
+subroutine test()
+ use iso_c_binding
+ implicit none
+ type(c_ptr), save :: caf[*] ! { dg-error "shall not be a coarray" }
+end subroutine test
+
+subroutine test2()
+ use iso_c_binding
+ implicit none
+ type(c_funptr), save :: caf[*] ! { dg-error "shall not be a coarray" }
+end subroutine test2
diff --git a/gcc/testsuite/gfortran.dg/common_13.f90 b/gcc/testsuite/gfortran.dg/common_13.f90
new file mode 100644
index 00000000000..07c78f1a73c
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/common_13.f90
@@ -0,0 +1,11 @@
+! { dg-do compile }
+!
+! PR 50070: Segmentation fault at size_binop_loc in fold-const.c
+!
+! Contributed by Vittorio Zecca <zeccav@gmail.com>
+
+subroutine sub
+ common n,z ! { dg-error "must have constant character length" }
+ integer :: n
+ character(len=n) :: z
+end
diff --git a/gcc/testsuite/gfortran.dg/func_result_7.f90 b/gcc/testsuite/gfortran.dg/func_result_7.f90
new file mode 100644
index 00000000000..9a982f1e6fd
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/func_result_7.f90
@@ -0,0 +1,11 @@
+! { dg-do compile }
+!
+! PR 50073: gfortran must not accept function name when result name is present
+!
+! Contributed by Vittorio Zecca <zeccav@gmail.com>
+
+function fun() result(f)
+ pointer fun ! { dg-error "not allowed" }
+ dimension fun(1) ! { dg-error "not allowed" }
+ f=0
+end
diff --git a/gcc/tree-cfg.c b/gcc/tree-cfg.c
index 12079081914..e42f7e9bb32 100644
--- a/gcc/tree-cfg.c
+++ b/gcc/tree-cfg.c
@@ -2772,13 +2772,11 @@ verify_expr (tree *tp, int *walk_subtrees, void *data ATTRIBUTE_UNUSED)
error ("invalid operand to pointer plus, first operand is not a pointer");
return t;
}
- /* Check to make sure the second operand is an integer with type of
- sizetype. */
- if (!useless_type_conversion_p (sizetype,
- TREE_TYPE (TREE_OPERAND (t, 1))))
+ /* Check to make sure the second operand is a ptrofftype. */
+ if (!ptrofftype_p (TREE_TYPE (TREE_OPERAND (t, 1))))
{
error ("invalid operand to pointer plus, second operand is not an "
- "integer with type of sizetype");
+ "integer type of appropriate width");
return t;
}
/* FALLTHROUGH */
@@ -3248,17 +3246,17 @@ verify_gimple_assign_unary (gimple stmt)
{
/* Allow conversions between integral types and pointers only if
there is no sign or zero extension involved.
- For targets were the precision of sizetype doesn't match that
+ For targets were the precision of ptrofftype doesn't match that
of pointers we need to allow arbitrary conversions from and
- to sizetype. */
+ to ptrofftype. */
if ((POINTER_TYPE_P (lhs_type)
&& INTEGRAL_TYPE_P (rhs1_type)
&& (TYPE_PRECISION (lhs_type) >= TYPE_PRECISION (rhs1_type)
- || rhs1_type == sizetype))
+ || ptrofftype_p (rhs1_type)))
|| (POINTER_TYPE_P (rhs1_type)
&& INTEGRAL_TYPE_P (lhs_type)
&& (TYPE_PRECISION (rhs1_type) >= TYPE_PRECISION (lhs_type)
- || lhs_type == sizetype)))
+ || ptrofftype_p (sizetype))))
return false;
/* Allow conversion from integer to offset type and vice versa. */
@@ -3525,7 +3523,7 @@ verify_gimple_assign_binary (gimple stmt)
do_pointer_plus_expr_check:
if (!POINTER_TYPE_P (rhs1_type)
|| !useless_type_conversion_p (lhs_type, rhs1_type)
- || !useless_type_conversion_p (sizetype, rhs2_type))
+ || !ptrofftype_p (rhs2_type))
{
error ("type mismatch in pointer plus expression");
debug_generic_stmt (lhs_type);
diff --git a/gcc/tree-chrec.c b/gcc/tree-chrec.c
index f9bebee7fe6..fbd61c08ea6 100644
--- a/gcc/tree-chrec.c
+++ b/gcc/tree-chrec.c
@@ -95,14 +95,14 @@ chrec_fold_plus_poly_poly (enum tree_code code,
tree left, right;
struct loop *loop0 = get_chrec_loop (poly0);
struct loop *loop1 = get_chrec_loop (poly1);
- tree rtype = code == POINTER_PLUS_EXPR ? sizetype : type;
+ tree rtype = code == POINTER_PLUS_EXPR ? chrec_type (poly1) : type;
gcc_assert (poly0);
gcc_assert (poly1);
gcc_assert (TREE_CODE (poly0) == POLYNOMIAL_CHREC);
gcc_assert (TREE_CODE (poly1) == POLYNOMIAL_CHREC);
if (POINTER_TYPE_P (chrec_type (poly0)))
- gcc_assert (chrec_type (poly1) == sizetype);
+ gcc_assert (ptrofftype_p (chrec_type (poly1)));
else
gcc_assert (chrec_type (poly0) == chrec_type (poly1));
gcc_assert (type == chrec_type (poly0));
@@ -262,8 +262,6 @@ static tree
chrec_fold_plus_1 (enum tree_code code, tree type,
tree op0, tree op1)
{
- tree op1_type = code == POINTER_PLUS_EXPR ? sizetype : type;
-
if (automatically_generated_chrec_p (op0)
|| automatically_generated_chrec_p (op1))
return chrec_fold_automatically_generated_operands (op0, op1);
@@ -327,9 +325,15 @@ chrec_fold_plus_1 (enum tree_code code, tree type,
&& size < PARAM_VALUE (PARAM_SCEV_MAX_EXPR_SIZE))
return build2 (code, type, op0, op1);
else if (size < PARAM_VALUE (PARAM_SCEV_MAX_EXPR_SIZE))
- return fold_build2 (code, type,
- fold_convert (type, op0),
- fold_convert (op1_type, op1));
+ {
+ if (code == POINTER_PLUS_EXPR)
+ return fold_build_pointer_plus (fold_convert (type, op0),
+ op1);
+ else
+ return fold_build2 (code, type,
+ fold_convert (type, op0),
+ fold_convert (type, op1));
+ }
else
return chrec_dont_know;
}
@@ -831,7 +835,7 @@ reset_evolution_in_loop (unsigned loop_num,
struct loop *loop = get_loop (loop_num);
if (POINTER_TYPE_P (chrec_type (chrec)))
- gcc_assert (sizetype == chrec_type (new_evol));
+ gcc_assert (ptrofftype_p (chrec_type (new_evol)));
else
gcc_assert (chrec_type (chrec) == chrec_type (new_evol));
diff --git a/gcc/tree-chrec.h b/gcc/tree-chrec.h
index 9b971bde1af..bf9bff0f999 100644
--- a/gcc/tree-chrec.h
+++ b/gcc/tree-chrec.h
@@ -145,7 +145,7 @@ build_polynomial_chrec (unsigned loop_num,
/* Types of left and right sides of a chrec should be compatible. */
if (POINTER_TYPE_P (TREE_TYPE (left)))
- gcc_assert (sizetype == TREE_TYPE (right));
+ gcc_assert (ptrofftype_p (TREE_TYPE (right)));
else
gcc_assert (TREE_TYPE (left) == TREE_TYPE (right));
diff --git a/gcc/tree-loop-distribution.c b/gcc/tree-loop-distribution.c
index 165431184ab..f1db27a007d 100644
--- a/gcc/tree-loop-distribution.c
+++ b/gcc/tree-loop-distribution.c
@@ -242,9 +242,10 @@ build_size_arg_loc (location_t loc, tree nb_iter, tree op,
gimple_seq *stmt_list)
{
gimple_seq stmts;
- tree x = size_binop_loc (loc, MULT_EXPR,
- fold_convert_loc (loc, sizetype, nb_iter),
- TYPE_SIZE_UNIT (TREE_TYPE (op)));
+ tree x = fold_build2_loc (loc, MULT_EXPR, size_type_node,
+ fold_convert_loc (loc, size_type_node, nb_iter),
+ fold_convert_loc (loc, size_type_node,
+ TYPE_SIZE_UNIT (TREE_TYPE (op))));
x = force_gimple_operand (x, &stmts, true, NULL);
gimple_seq_add_seq (stmt_list, stmts);
@@ -275,9 +276,7 @@ generate_memset_zero (gimple stmt, tree op0, tree nb_iter,
addr_base = fold_convert_loc (loc, sizetype, addr_base);
/* Test for a negative stride, iterating over every element. */
- if (integer_zerop (size_binop (PLUS_EXPR,
- TYPE_SIZE_UNIT (TREE_TYPE (op0)),
- fold_convert (sizetype, DR_STEP (dr)))))
+ if (tree_int_cst_sgn (DR_STEP (dr)) == -1)
{
addr_base = size_binop_loc (loc, MINUS_EXPR, addr_base,
fold_convert_loc (loc, sizetype, nb_bytes));
diff --git a/gcc/tree-mudflap.c b/gcc/tree-mudflap.c
index 26ef23e15f7..f9b45994917 100644
--- a/gcc/tree-mudflap.c
+++ b/gcc/tree-mudflap.c
@@ -851,7 +851,7 @@ mf_xform_derefs_1 (gimple_stmt_iterator *iter, tree *tp,
limit = fold_build2_loc (location, MINUS_EXPR, mf_uintptr_type,
fold_build2_loc (location, PLUS_EXPR, mf_uintptr_type,
- convert (mf_uintptr_type, addr),
+ fold_convert (mf_uintptr_type, addr),
size),
integer_one_node);
}
@@ -897,20 +897,17 @@ mf_xform_derefs_1 (gimple_stmt_iterator *iter, tree *tp,
return;
bpu = bitsize_int (BITS_PER_UNIT);
- ofs = convert (bitsizetype, TREE_OPERAND (t, 2));
+ ofs = fold_convert (bitsizetype, TREE_OPERAND (t, 2));
rem = size_binop_loc (location, TRUNC_MOD_EXPR, ofs, bpu);
- ofs = fold_convert_loc (location,
- sizetype,
- size_binop_loc (location,
- TRUNC_DIV_EXPR, ofs, bpu));
+ ofs = size_binop_loc (location, TRUNC_DIV_EXPR, ofs, bpu);
- size = convert (bitsizetype, TREE_OPERAND (t, 1));
+ size = fold_convert (bitsizetype, TREE_OPERAND (t, 1));
size = size_binop_loc (location, PLUS_EXPR, size, rem);
size = size_binop_loc (location, CEIL_DIV_EXPR, size, bpu);
- size = convert (sizetype, size);
+ size = fold_convert (sizetype, size);
addr = TREE_OPERAND (TREE_OPERAND (t, 0), 0);
- addr = convert (ptr_type_node, addr);
+ addr = fold_convert (ptr_type_node, addr);
addr = fold_build_pointer_plus_loc (location, addr, ofs);
base = addr;
@@ -1049,7 +1046,8 @@ mx_register_decls (tree decl, gimple_seq seq, location_t location)
/* Variable-sized objects should have sizes already been
gimplified when we got here. */
- size = convert (size_type_node, TYPE_SIZE_UNIT (TREE_TYPE (decl)));
+ size = fold_convert (size_type_node,
+ TYPE_SIZE_UNIT (TREE_TYPE (decl)));
gcc_assert (is_gimple_val (size));
@@ -1233,11 +1231,11 @@ mudflap_register_call (tree obj, tree object_size, tree varname)
tree arg, call_stmt;
arg = build1 (ADDR_EXPR, build_pointer_type (TREE_TYPE (obj)), obj);
- arg = convert (ptr_type_node, arg);
+ arg = fold_convert (ptr_type_node, arg);
call_stmt = build_call_expr (mf_register_fndecl, 4,
arg,
- convert (size_type_node, object_size),
+ fold_convert (size_type_node, object_size),
/* __MF_TYPE_STATIC */
build_int_cst (integer_type_node, 4),
varname);
diff --git a/gcc/tree-predcom.c b/gcc/tree-predcom.c
index ac2314778a8..7dfb480e9e1 100644
--- a/gcc/tree-predcom.c
+++ b/gcc/tree-predcom.c
@@ -618,11 +618,12 @@ suitable_reference_p (struct data_reference *a, enum ref_step_type *ref_step)
static void
aff_combination_dr_offset (struct data_reference *dr, aff_tree *offset)
{
+ tree type = TREE_TYPE (DR_OFFSET (dr));
aff_tree delta;
- tree_to_aff_combination_expand (DR_OFFSET (dr), sizetype, offset,
+ tree_to_aff_combination_expand (DR_OFFSET (dr), type, offset,
&name_expansions);
- aff_combination_const (&delta, sizetype, tree_to_double_int (DR_INIT (dr)));
+ aff_combination_const (&delta, type, tree_to_double_int (DR_INIT (dr)));
aff_combination_add (offset, &delta);
}
@@ -667,7 +668,7 @@ determine_offset (struct data_reference *a, struct data_reference *b,
aff_combination_scale (&baseb, double_int_minus_one);
aff_combination_add (&diff, &baseb);
- tree_to_aff_combination_expand (DR_STEP (a), sizetype,
+ tree_to_aff_combination_expand (DR_STEP (a), TREE_TYPE (DR_STEP (a)),
&step, &name_expansions);
return aff_combination_constant_multiple_p (&diff, &step, off);
}
@@ -1050,8 +1051,8 @@ valid_initializer_p (struct data_reference *ref,
aff_combination_scale (&base, double_int_minus_one);
aff_combination_add (&diff, &base);
- tree_to_aff_combination_expand (DR_STEP (root), sizetype, &step,
- &name_expansions);
+ tree_to_aff_combination_expand (DR_STEP (root), TREE_TYPE (DR_STEP (root)),
+ &step, &name_expansions);
if (!aff_combination_constant_multiple_p (&diff, &step, &off))
return false;
diff --git a/gcc/tree-profile.c b/gcc/tree-profile.c
index cd9b49d4304..98f7d678792 100644
--- a/gcc/tree-profile.c
+++ b/gcc/tree-profile.c
@@ -241,7 +241,8 @@ prepare_instrumented_value (gimple_stmt_iterator *gsi, histogram_value value)
{
tree val = value->hvalue.value;
if (POINTER_TYPE_P (TREE_TYPE (val)))
- val = fold_convert (sizetype, val);
+ val = fold_convert (build_nonstandard_integer_type
+ (TYPE_PRECISION (TREE_TYPE (val)), 1), val);
return force_gimple_operand_gsi (gsi, fold_convert (gcov_type_node, val),
true, NULL_TREE, true, GSI_SAME_STMT);
}
diff --git a/gcc/tree-scalar-evolution.c b/gcc/tree-scalar-evolution.c
index 10aaba4e5b9..646b4f1c568 100644
--- a/gcc/tree-scalar-evolution.c
+++ b/gcc/tree-scalar-evolution.c
@@ -1727,7 +1727,7 @@ interpret_rhs_expr (struct loop *loop, gimple at_stmt,
chrec1 = analyze_scalar_evolution (loop, rhs1);
chrec2 = analyze_scalar_evolution (loop, rhs2);
chrec1 = chrec_convert (type, chrec1, at_stmt);
- chrec2 = chrec_convert (sizetype, chrec2, at_stmt);
+ chrec2 = chrec_convert (TREE_TYPE (rhs2), chrec2, at_stmt);
res = chrec_fold_plus (type, chrec1, chrec2);
break;
diff --git a/gcc/tree-ssa-address.c b/gcc/tree-ssa-address.c
index e4e944bf815..34479b33ae1 100644
--- a/gcc/tree-ssa-address.c
+++ b/gcc/tree-ssa-address.c
@@ -279,7 +279,8 @@ tree_mem_ref_addr (tree type, tree mem_ref)
if (act_elem)
{
if (step)
- act_elem = fold_build2 (MULT_EXPR, sizetype, act_elem, step);
+ act_elem = fold_build2 (MULT_EXPR, TREE_TYPE (act_elem),
+ act_elem, step);
addr_off = act_elem;
}
@@ -287,16 +288,17 @@ tree_mem_ref_addr (tree type, tree mem_ref)
if (act_elem)
{
if (addr_off)
- addr_off = fold_build2 (PLUS_EXPR, sizetype, addr_off, act_elem);
+ addr_off = fold_build2 (PLUS_EXPR, TREE_TYPE (addr_off),
+ addr_off, act_elem);
else
addr_off = act_elem;
}
if (offset && !integer_zerop (offset))
{
- offset = fold_convert (sizetype, offset);
if (addr_off)
- addr_off = fold_build2 (PLUS_EXPR, sizetype, addr_off, offset);
+ addr_off = fold_build2 (PLUS_EXPR, TREE_TYPE (addr_off), addr_off,
+ fold_convert (TREE_TYPE (addr_off), offset));
else
addr_off = offset;
}
diff --git a/gcc/tree-ssa-forwprop.c b/gcc/tree-ssa-forwprop.c
index afbe525b99d..00121796613 100644
--- a/gcc/tree-ssa-forwprop.c
+++ b/gcc/tree-ssa-forwprop.c
@@ -369,16 +369,20 @@ rhs_to_tree (tree type, gimple stmt)
considered simplified. */
static tree
-combine_cond_expr_cond (location_t loc, enum tree_code code, tree type,
+combine_cond_expr_cond (gimple stmt, enum tree_code code, tree type,
tree op0, tree op1, bool invariant_only)
{
tree t;
gcc_assert (TREE_CODE_CLASS (code) == tcc_comparison);
- t = fold_binary_loc (loc, code, type, op0, op1);
+ fold_defer_overflow_warnings ();
+ t = fold_binary_loc (gimple_location (stmt), code, type, op0, op1);
if (!t)
- return NULL_TREE;
+ {
+ fold_undefer_overflow_warnings (false, NULL, 0);
+ return NULL_TREE;
+ }
/* Require that we got a boolean type out if we put one in. */
gcc_assert (TREE_CODE (TREE_TYPE (t)) == TREE_CODE (type));
@@ -388,7 +392,12 @@ combine_cond_expr_cond (location_t loc, enum tree_code code, tree type,
/* Bail out if we required an invariant but didn't get one. */
if (!t || (invariant_only && !is_gimple_min_invariant (t)))
- return NULL_TREE;
+ {
+ fold_undefer_overflow_warnings (false, NULL, 0);
+ return NULL_TREE;
+ }
+
+ fold_undefer_overflow_warnings (!gimple_no_warning_p (stmt), stmt, 0);
return t;
}
@@ -398,7 +407,7 @@ combine_cond_expr_cond (location_t loc, enum tree_code code, tree type,
were no simplifying combines. */
static tree
-forward_propagate_into_comparison_1 (location_t loc,
+forward_propagate_into_comparison_1 (gimple stmt,
enum tree_code code, tree type,
tree op0, tree op1)
{
@@ -414,7 +423,7 @@ forward_propagate_into_comparison_1 (location_t loc,
if (def_stmt && can_propagate_from (def_stmt))
{
rhs0 = rhs_to_tree (TREE_TYPE (op1), def_stmt);
- tmp = combine_cond_expr_cond (loc, code, type,
+ tmp = combine_cond_expr_cond (stmt, code, type,
rhs0, op1, !single_use0_p);
if (tmp)
return tmp;
@@ -428,7 +437,7 @@ forward_propagate_into_comparison_1 (location_t loc,
if (def_stmt && can_propagate_from (def_stmt))
{
rhs1 = rhs_to_tree (TREE_TYPE (op0), def_stmt);
- tmp = combine_cond_expr_cond (loc, code, type,
+ tmp = combine_cond_expr_cond (stmt, code, type,
op0, rhs1, !single_use1_p);
if (tmp)
return tmp;
@@ -438,7 +447,7 @@ forward_propagate_into_comparison_1 (location_t loc,
/* If that wasn't successful either, try both operands. */
if (rhs0 != NULL_TREE
&& rhs1 != NULL_TREE)
- tmp = combine_cond_expr_cond (loc, code, type,
+ tmp = combine_cond_expr_cond (stmt, code, type,
rhs0, rhs1,
!(single_use0_p && single_use1_p));
@@ -460,7 +469,7 @@ forward_propagate_into_comparison (gimple_stmt_iterator *gsi)
tree rhs2 = gimple_assign_rhs2 (stmt);
/* Combine the comparison with defining statements. */
- tmp = forward_propagate_into_comparison_1 (gimple_location (stmt),
+ tmp = forward_propagate_into_comparison_1 (stmt,
gimple_assign_rhs_code (stmt),
TREE_TYPE
(gimple_assign_lhs (stmt)),
@@ -491,7 +500,6 @@ forward_propagate_into_comparison (gimple_stmt_iterator *gsi)
static int
forward_propagate_into_gimple_cond (gimple stmt)
{
- location_t loc = gimple_location (stmt);
tree tmp;
enum tree_code code = gimple_cond_code (stmt);
bool cfg_changed = false;
@@ -502,7 +510,7 @@ forward_propagate_into_gimple_cond (gimple stmt)
if (TREE_CODE_CLASS (gimple_cond_code (stmt)) != tcc_comparison)
return 0;
- tmp = forward_propagate_into_comparison_1 (loc, code,
+ tmp = forward_propagate_into_comparison_1 (stmt, code,
boolean_type_node,
rhs1, rhs2);
if (tmp)
@@ -541,13 +549,12 @@ static int
forward_propagate_into_cond (gimple_stmt_iterator *gsi_p)
{
gimple stmt = gsi_stmt (*gsi_p);
- location_t loc = gimple_location (stmt);
tree tmp = NULL_TREE;
tree cond = gimple_assign_rhs1 (stmt);
/* We can do tree combining on SSA_NAME and comparison expressions. */
if (COMPARISON_CLASS_P (cond))
- tmp = forward_propagate_into_comparison_1 (loc, TREE_CODE (cond),
+ tmp = forward_propagate_into_comparison_1 (stmt, TREE_CODE (cond),
boolean_type_node,
TREE_OPERAND (cond, 0),
TREE_OPERAND (cond, 1));
@@ -559,7 +566,7 @@ forward_propagate_into_cond (gimple_stmt_iterator *gsi_p)
return 0;
rhs0 = gimple_assign_rhs1 (def_stmt);
- tmp = combine_cond_expr_cond (loc, NE_EXPR, boolean_type_node, rhs0,
+ tmp = combine_cond_expr_cond (stmt, NE_EXPR, boolean_type_node, rhs0,
build_int_cst (TREE_TYPE (rhs0), 0),
false);
}
@@ -2440,27 +2447,18 @@ ssa_forward_propagate_and_combine (void)
{
/* In this case the entire COND_EXPR is in rhs1. */
int did_something;
- fold_defer_overflow_warnings ();
did_something = forward_propagate_into_cond (&gsi);
stmt = gsi_stmt (gsi);
if (did_something == 2)
cfg_changed = true;
- fold_undefer_overflow_warnings
- (!TREE_NO_WARNING (rhs1) && did_something, stmt,
- WARN_STRICT_OVERFLOW_CONDITIONAL);
changed = did_something != 0;
}
else if (TREE_CODE_CLASS (code) == tcc_comparison)
{
- bool no_warning = gimple_no_warning_p (stmt);
int did_something;
- fold_defer_overflow_warnings ();
did_something = forward_propagate_into_comparison (&gsi);
if (did_something == 2)
cfg_changed = true;
- fold_undefer_overflow_warnings
- (!no_warning && changed,
- stmt, WARN_STRICT_OVERFLOW_CONDITIONAL);
changed = did_something != 0;
}
else if (code == BIT_AND_EXPR
@@ -2489,12 +2487,9 @@ ssa_forward_propagate_and_combine (void)
case GIMPLE_COND:
{
int did_something;
- fold_defer_overflow_warnings ();
did_something = forward_propagate_into_gimple_cond (stmt);
if (did_something == 2)
cfg_changed = true;
- fold_undefer_overflow_warnings
- (did_something, stmt, WARN_STRICT_OVERFLOW_CONDITIONAL);
changed = did_something != 0;
break;
}
diff --git a/gcc/tree-ssa-loop-ivopts.c b/gcc/tree-ssa-loop-ivopts.c
index 79fff3f4eae..6cf14383f6c 100644
--- a/gcc/tree-ssa-loop-ivopts.c
+++ b/gcc/tree-ssa-loop-ivopts.c
@@ -1035,7 +1035,7 @@ find_bivs (struct ivopts_data *data)
if (step)
{
if (POINTER_TYPE_P (type))
- step = fold_convert (sizetype, step);
+ step = convert_to_ptrofftype (step);
else
step = fold_convert (type, step);
}
diff --git a/gcc/tree-ssa-loop-manip.c b/gcc/tree-ssa-loop-manip.c
index 0cec7872b81..f6e2e9c937f 100644
--- a/gcc/tree-ssa-loop-manip.c
+++ b/gcc/tree-ssa-loop-manip.c
@@ -100,9 +100,9 @@ create_iv (tree base, tree step, tree var, struct loop *loop,
{
if (TREE_CODE (base) == ADDR_EXPR)
mark_addressable (TREE_OPERAND (base, 0));
- step = fold_convert (sizetype, step);
+ step = convert_to_ptrofftype (step);
if (incr_op == MINUS_EXPR)
- step = fold_build1 (NEGATE_EXPR, sizetype, step);
+ step = fold_build1 (NEGATE_EXPR, TREE_TYPE (step), step);
incr_op = POINTER_PLUS_EXPR;
}
/* Gimplify the step if necessary. We put the computations in front of the
@@ -705,7 +705,7 @@ determine_exit_conditions (struct loop *loop, struct tree_niter_desc *desc,
enum tree_code cmp = desc->cmp;
tree cond = boolean_true_node, assum;
- /* For pointers, do the arithmetics in the type of step (sizetype). */
+ /* For pointers, do the arithmetics in the type of step. */
base = fold_convert (type, base);
bound = fold_convert (type, bound);
diff --git a/gcc/tree-ssa-pre.c b/gcc/tree-ssa-pre.c
index a50c837db41..d784bac6818 100644
--- a/gcc/tree-ssa-pre.c
+++ b/gcc/tree-ssa-pre.c
@@ -3099,12 +3099,12 @@ create_expression_by_pieces (basic_block block, pre_expr expr,
stmts, domstmt);
if (!genop1 || !genop2)
return NULL_TREE;
- /* Ensure op2 is a sizetype for POINTER_PLUS_EXPR. It
+ /* Ensure op2 is a ptrofftype for POINTER_PLUS_EXPR. It
may be a constant with the wrong type. */
if (nary->opcode == POINTER_PLUS_EXPR)
{
genop1 = fold_convert (nary->type, genop1);
- genop2 = fold_convert (sizetype, genop2);
+ genop2 = convert_to_ptrofftype (genop2);
}
else
{
diff --git a/gcc/tree-ssa-sccvn.c b/gcc/tree-ssa-sccvn.c
index d65b9ebe9c2..4ccc0a29fd5 100644
--- a/gcc/tree-ssa-sccvn.c
+++ b/gcc/tree-ssa-sccvn.c
@@ -1485,12 +1485,11 @@ vn_reference_lookup_3 (ao_ref *ref, tree vuse, void *vr_)
may fail when comparing types for compatibility. But we really
don't care here - further lookups with the rewritten operands
will simply fail if we messed up types too badly. */
- if (j == 0 && i == 0
+ if (j == 0 && i >= 0
&& VEC_index (vn_reference_op_s, lhs_ops, 0)->opcode == MEM_REF
- && VEC_index (vn_reference_op_s, vr->operands, i)->opcode == MEM_REF
- && tree_int_cst_equal
- (VEC_index (vn_reference_op_s, lhs_ops, 0)->op0,
- VEC_index (vn_reference_op_s, vr->operands, i)->op0))
+ && VEC_index (vn_reference_op_s, lhs_ops, 0)->off != -1
+ && (VEC_index (vn_reference_op_s, lhs_ops, 0)->off
+ == VEC_index (vn_reference_op_s, vr->operands, i)->off))
i--, j--;
/* i now points to the first additional op.
diff --git a/gcc/tree-ssa-structalias.c b/gcc/tree-ssa-structalias.c
index 9bfbad63f70..79d485289c1 100644
--- a/gcc/tree-ssa-structalias.c
+++ b/gcc/tree-ssa-structalias.c
@@ -3260,7 +3260,7 @@ get_constraint_for_1 (tree t, VEC (ce_s, heap) **results, bool address_p,
{
struct constraint_expr cs;
varinfo_t vi, curr;
- tree off = double_int_to_tree (sizetype, mem_ref_offset (t));
+ tree off = convert_to_ptrofftype (TREE_OPERAND (t, 1));
get_constraint_for_ptr_offset (TREE_OPERAND (t, 0), off, results);
do_deref (results);
diff --git a/gcc/tree-vrp.c b/gcc/tree-vrp.c
index df7a9a251ca..df19cbbfdd1 100644
--- a/gcc/tree-vrp.c
+++ b/gcc/tree-vrp.c
@@ -1398,16 +1398,14 @@ range_includes_zero_p (value_range_t *vr)
static inline bool
value_range_nonnegative_p (value_range_t *vr)
{
+ /* Testing for VR_ANTI_RANGE is not useful here as any anti-range
+ which would return a useful value should be encoded as a
+ VR_RANGE. */
if (vr->type == VR_RANGE)
{
int result = compare_values (vr->min, integer_zero_node);
return (result == 0 || result == 1);
}
- else if (vr->type == VR_ANTI_RANGE)
- {
- int result = compare_values (vr->max, integer_zero_node);
- return result == -1;
- }
return false;
}
@@ -2183,6 +2181,158 @@ zero_nonzero_bits_from_vr (value_range_t *vr,
return true;
}
+/* Helper to extract a value-range *VR for a multiplicative operation
+ *VR0 CODE *VR1. */
+
+static void
+extract_range_from_multiplicative_op_1 (value_range_t *vr,
+ enum tree_code code,
+ value_range_t *vr0, value_range_t *vr1)
+{
+ enum value_range_type type;
+ tree val[4];
+ size_t i;
+ tree min, max;
+ bool sop;
+ int cmp;
+
+ /* Multiplications, divisions and shifts are a bit tricky to handle,
+ depending on the mix of signs we have in the two ranges, we
+ need to operate on different values to get the minimum and
+ maximum values for the new range. One approach is to figure
+ out all the variations of range combinations and do the
+ operations.
+
+ However, this involves several calls to compare_values and it
+ is pretty convoluted. It's simpler to do the 4 operations
+ (MIN0 OP MIN1, MIN0 OP MAX1, MAX0 OP MIN1 and MAX0 OP MAX0 OP
+ MAX1) and then figure the smallest and largest values to form
+ the new range. */
+ gcc_assert (code == MULT_EXPR
+ || code == TRUNC_DIV_EXPR
+ || code == FLOOR_DIV_EXPR
+ || code == CEIL_DIV_EXPR
+ || code == EXACT_DIV_EXPR
+ || code == ROUND_DIV_EXPR
+ || code == RSHIFT_EXPR);
+ gcc_assert ((vr0->type == VR_RANGE
+ || (code == MULT_EXPR && vr0->type == VR_ANTI_RANGE))
+ && vr0->type == vr1->type);
+
+ type = vr0->type;
+
+ /* Compute the 4 cross operations. */
+ sop = false;
+ val[0] = vrp_int_const_binop (code, vr0->min, vr1->min);
+ if (val[0] == NULL_TREE)
+ sop = true;
+
+ if (vr1->max == vr1->min)
+ val[1] = NULL_TREE;
+ else
+ {
+ val[1] = vrp_int_const_binop (code, vr0->min, vr1->max);
+ if (val[1] == NULL_TREE)
+ sop = true;
+ }
+
+ if (vr0->max == vr0->min)
+ val[2] = NULL_TREE;
+ else
+ {
+ val[2] = vrp_int_const_binop (code, vr0->max, vr1->min);
+ if (val[2] == NULL_TREE)
+ sop = true;
+ }
+
+ if (vr0->min == vr0->max || vr1->min == vr1->max)
+ val[3] = NULL_TREE;
+ else
+ {
+ val[3] = vrp_int_const_binop (code, vr0->max, vr1->max);
+ if (val[3] == NULL_TREE)
+ sop = true;
+ }
+
+ if (sop)
+ {
+ set_value_range_to_varying (vr);
+ return;
+ }
+
+ /* Set MIN to the minimum of VAL[i] and MAX to the maximum
+ of VAL[i]. */
+ min = val[0];
+ max = val[0];
+ for (i = 1; i < 4; i++)
+ {
+ if (!is_gimple_min_invariant (min)
+ || (TREE_OVERFLOW (min) && !is_overflow_infinity (min))
+ || !is_gimple_min_invariant (max)
+ || (TREE_OVERFLOW (max) && !is_overflow_infinity (max)))
+ break;
+
+ if (val[i])
+ {
+ if (!is_gimple_min_invariant (val[i])
+ || (TREE_OVERFLOW (val[i])
+ && !is_overflow_infinity (val[i])))
+ {
+ /* If we found an overflowed value, set MIN and MAX
+ to it so that we set the resulting range to
+ VARYING. */
+ min = max = val[i];
+ break;
+ }
+
+ if (compare_values (val[i], min) == -1)
+ min = val[i];
+
+ if (compare_values (val[i], max) == 1)
+ max = val[i];
+ }
+ }
+
+ /* If either MIN or MAX overflowed, then set the resulting range to
+ VARYING. But we do accept an overflow infinity
+ representation. */
+ if (min == NULL_TREE
+ || !is_gimple_min_invariant (min)
+ || (TREE_OVERFLOW (min) && !is_overflow_infinity (min))
+ || max == NULL_TREE
+ || !is_gimple_min_invariant (max)
+ || (TREE_OVERFLOW (max) && !is_overflow_infinity (max)))
+ {
+ set_value_range_to_varying (vr);
+ return;
+ }
+
+ /* We punt if:
+ 1) [-INF, +INF]
+ 2) [-INF, +-INF(OVF)]
+ 3) [+-INF(OVF), +INF]
+ 4) [+-INF(OVF), +-INF(OVF)]
+ We learn nothing when we have INF and INF(OVF) on both sides.
+ Note that we do accept [-INF, -INF] and [+INF, +INF] without
+ overflow. */
+ if ((vrp_val_is_min (min) || is_overflow_infinity (min))
+ && (vrp_val_is_max (max) || is_overflow_infinity (max)))
+ {
+ set_value_range_to_varying (vr);
+ return;
+ }
+
+ cmp = compare_values (min, max);
+ if (cmp == -2 || cmp == 1)
+ {
+ /* If the new range has its limits swapped around (MIN > MAX),
+ then the operation caused one of them to wrap around, mark
+ the new range VARYING. */
+ set_value_range_to_varying (vr);
+ }
+ else
+ set_value_range (vr, type, min, max, NULL);
+}
/* Extract range information from a binary operation CODE based on
the ranges of each of its operands, *VR0 and *VR1 with resulting
@@ -2195,9 +2345,16 @@ extract_range_from_binary_expr_1 (value_range_t *vr,
{
value_range_t vr0 = *vr0_, vr1 = *vr1_;
enum value_range_type type;
- tree min, max;
+ tree min = NULL_TREE, max = NULL_TREE;
int cmp;
+ if (!INTEGRAL_TYPE_P (expr_type)
+ && !POINTER_TYPE_P (expr_type))
+ {
+ set_value_range_to_varying (vr);
+ return;
+ }
+
/* Not all binary expressions can be applied to ranges in a
meaningful way. Handle only arithmetic operations. */
if (code != PLUS_EXPR
@@ -2309,9 +2466,7 @@ extract_range_from_binary_expr_1 (value_range_t *vr,
/* For integer ranges, apply the operation to each end of the
range and see what we end up with. */
- if (code == PLUS_EXPR
- || code == MIN_EXPR
- || code == MAX_EXPR)
+ if (code == PLUS_EXPR)
{
/* If we have a PLUS_EXPR with two VR_ANTI_RANGEs, drop to
VR_VARYING. It would take more effort to compute a precise
@@ -2322,32 +2477,21 @@ extract_range_from_binary_expr_1 (value_range_t *vr,
this point. */
if (vr0.type == VR_ANTI_RANGE)
{
- if (code == PLUS_EXPR)
- {
- set_value_range_to_varying (vr);
- return;
- }
- /* For MIN_EXPR and MAX_EXPR with two VR_ANTI_RANGEs,
- the resulting VR_ANTI_RANGE is the same - intersection
- of the two ranges. */
- min = vrp_int_const_binop (MAX_EXPR, vr0.min, vr1.min);
- max = vrp_int_const_binop (MIN_EXPR, vr0.max, vr1.max);
- }
- else
- {
- /* For operations that make the resulting range directly
- proportional to the original ranges, apply the operation to
- the same end of each range. */
- min = vrp_int_const_binop (code, vr0.min, vr1.min);
- max = vrp_int_const_binop (code, vr0.max, vr1.max);
+ set_value_range_to_varying (vr);
+ return;
}
+ /* For operations that make the resulting range directly
+ proportional to the original ranges, apply the operation to
+ the same end of each range. */
+ min = vrp_int_const_binop (code, vr0.min, vr1.min);
+ max = vrp_int_const_binop (code, vr0.max, vr1.max);
+
/* If both additions overflowed the range kind is still correct.
This happens regularly with subtracting something in unsigned
arithmetic.
??? See PR30318 for all the cases we do not handle. */
- if (code == PLUS_EXPR
- && (TREE_OVERFLOW (min) && !is_overflow_infinity (min))
+ if ((TREE_OVERFLOW (min) && !is_overflow_infinity (min))
&& (TREE_OVERFLOW (max) && !is_overflow_infinity (max)))
{
min = build_int_cst_wide (TREE_TYPE (min),
@@ -2358,18 +2502,28 @@ extract_range_from_binary_expr_1 (value_range_t *vr,
TREE_INT_CST_HIGH (max));
}
}
- else if (code == MULT_EXPR
- || code == TRUNC_DIV_EXPR
- || code == FLOOR_DIV_EXPR
- || code == CEIL_DIV_EXPR
- || code == EXACT_DIV_EXPR
- || code == ROUND_DIV_EXPR
- || code == RSHIFT_EXPR)
+ else if (code == MIN_EXPR
+ || code == MAX_EXPR)
+ {
+ if (vr0.type == VR_ANTI_RANGE)
+ {
+ /* For MIN_EXPR and MAX_EXPR with two VR_ANTI_RANGEs,
+ the resulting VR_ANTI_RANGE is the same - intersection
+ of the two ranges. */
+ min = vrp_int_const_binop (MAX_EXPR, vr0.min, vr1.min);
+ max = vrp_int_const_binop (MIN_EXPR, vr0.max, vr1.max);
+ }
+ else
+ {
+ /* For operations that make the resulting range directly
+ proportional to the original ranges, apply the operation to
+ the same end of each range. */
+ min = vrp_int_const_binop (code, vr0.min, vr1.min);
+ max = vrp_int_const_binop (code, vr0.max, vr1.max);
+ }
+ }
+ else if (code == MULT_EXPR)
{
- tree val[4];
- size_t i;
- bool sop;
-
/* If we have an unsigned MULT_EXPR with two VR_ANTI_RANGEs,
drop to VR_VARYING. It would take more effort to compute a
precise range for such a case. For example, if we have
@@ -2378,14 +2532,18 @@ extract_range_from_binary_expr_1 (value_range_t *vr,
we cannot claim that the product is in ~[0,0]. Note that we
are guaranteed to have vr0.type == vr1.type at this
point. */
- if (code == MULT_EXPR
- && vr0.type == VR_ANTI_RANGE
+ if (vr0.type == VR_ANTI_RANGE
&& !TYPE_OVERFLOW_UNDEFINED (expr_type))
{
set_value_range_to_varying (vr);
return;
}
+ extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
+ return;
+ }
+ else if (code == RSHIFT_EXPR)
+ {
/* If we have a RSHIFT_EXPR with any shift values outside [0..prec-1],
then drop to VR_VARYING. Outside of this range we get undefined
behavior from the shift operation. We cannot even trust
@@ -2404,12 +2562,16 @@ extract_range_from_binary_expr_1 (value_range_t *vr,
}
}
- else if ((code == TRUNC_DIV_EXPR
- || code == FLOOR_DIV_EXPR
- || code == CEIL_DIV_EXPR
- || code == EXACT_DIV_EXPR
- || code == ROUND_DIV_EXPR)
- && (vr0.type != VR_RANGE || symbolic_range_p (&vr0)))
+ extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
+ return;
+ }
+ else if (code == TRUNC_DIV_EXPR
+ || code == FLOOR_DIV_EXPR
+ || code == CEIL_DIV_EXPR
+ || code == EXACT_DIV_EXPR
+ || code == ROUND_DIV_EXPR)
+ {
+ if (vr0.type != VR_RANGE || symbolic_range_p (&vr0))
{
/* For division, if op1 has VR_RANGE but op0 does not, something
can be deduced just from that range. Say [min, max] / [4, max]
@@ -2431,12 +2593,7 @@ extract_range_from_binary_expr_1 (value_range_t *vr,
/* For divisions, if flag_non_call_exceptions is true, we must
not eliminate a division by zero. */
- if ((code == TRUNC_DIV_EXPR
- || code == FLOOR_DIV_EXPR
- || code == CEIL_DIV_EXPR
- || code == EXACT_DIV_EXPR
- || code == ROUND_DIV_EXPR)
- && cfun->can_throw_non_call_exceptions
+ if (cfun->can_throw_non_call_exceptions
&& (vr1.type != VR_RANGE
|| symbolic_range_p (&vr1)
|| range_includes_zero_p (&vr1)))
@@ -2448,12 +2605,7 @@ extract_range_from_binary_expr_1 (value_range_t *vr,
/* For divisions, if op0 is VR_RANGE, we can deduce a range
even if op1 is VR_VARYING, VR_ANTI_RANGE, symbolic or can
include 0. */
- if ((code == TRUNC_DIV_EXPR
- || code == FLOOR_DIV_EXPR
- || code == CEIL_DIV_EXPR
- || code == EXACT_DIV_EXPR
- || code == ROUND_DIV_EXPR)
- && vr0.type == VR_RANGE
+ if (vr0.type == VR_RANGE
&& (vr1.type != VR_RANGE
|| symbolic_range_p (&vr1)
|| range_includes_zero_p (&vr1)))
@@ -2461,7 +2613,6 @@ extract_range_from_binary_expr_1 (value_range_t *vr,
tree zero = build_int_cst (TREE_TYPE (vr0.min), 0);
int cmp;
- sop = false;
min = NULL_TREE;
max = NULL_TREE;
if (TYPE_UNSIGNED (expr_type)
@@ -2500,96 +2651,10 @@ extract_range_from_binary_expr_1 (value_range_t *vr,
return;
}
}
-
- /* Multiplications and divisions are a bit tricky to handle,
- depending on the mix of signs we have in the two ranges, we
- need to operate on different values to get the minimum and
- maximum values for the new range. One approach is to figure
- out all the variations of range combinations and do the
- operations.
-
- However, this involves several calls to compare_values and it
- is pretty convoluted. It's simpler to do the 4 operations
- (MIN0 OP MIN1, MIN0 OP MAX1, MAX0 OP MIN1 and MAX0 OP MAX0 OP
- MAX1) and then figure the smallest and largest values to form
- the new range. */
else
{
- gcc_assert ((vr0.type == VR_RANGE
- || (code == MULT_EXPR && vr0.type == VR_ANTI_RANGE))
- && vr0.type == vr1.type);
-
- /* Compute the 4 cross operations. */
- sop = false;
- val[0] = vrp_int_const_binop (code, vr0.min, vr1.min);
- if (val[0] == NULL_TREE)
- sop = true;
-
- if (vr1.max == vr1.min)
- val[1] = NULL_TREE;
- else
- {
- val[1] = vrp_int_const_binop (code, vr0.min, vr1.max);
- if (val[1] == NULL_TREE)
- sop = true;
- }
-
- if (vr0.max == vr0.min)
- val[2] = NULL_TREE;
- else
- {
- val[2] = vrp_int_const_binop (code, vr0.max, vr1.min);
- if (val[2] == NULL_TREE)
- sop = true;
- }
-
- if (vr0.min == vr0.max || vr1.min == vr1.max)
- val[3] = NULL_TREE;
- else
- {
- val[3] = vrp_int_const_binop (code, vr0.max, vr1.max);
- if (val[3] == NULL_TREE)
- sop = true;
- }
-
- if (sop)
- {
- set_value_range_to_varying (vr);
- return;
- }
-
- /* Set MIN to the minimum of VAL[i] and MAX to the maximum
- of VAL[i]. */
- min = val[0];
- max = val[0];
- for (i = 1; i < 4; i++)
- {
- if (!is_gimple_min_invariant (min)
- || (TREE_OVERFLOW (min) && !is_overflow_infinity (min))
- || !is_gimple_min_invariant (max)
- || (TREE_OVERFLOW (max) && !is_overflow_infinity (max)))
- break;
-
- if (val[i])
- {
- if (!is_gimple_min_invariant (val[i])
- || (TREE_OVERFLOW (val[i])
- && !is_overflow_infinity (val[i])))
- {
- /* If we found an overflowed value, set MIN and MAX
- to it so that we set the resulting range to
- VARYING. */
- min = max = val[i];
- break;
- }
-
- if (compare_values (val[i], min) == -1)
- min = val[i];
-
- if (compare_values (val[i], max) == 1)
- max = val[i];
- }
- }
+ extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
+ return;
}
}
else if (code == TRUNC_MOD_EXPR)
@@ -2735,11 +2800,6 @@ extract_range_from_binary_expr_1 (value_range_t *vr,
else
max = min = NULL_TREE;
}
- else
- {
- set_value_range_to_varying (vr);
- return;
- }
}
else
gcc_unreachable ();
@@ -2826,61 +2886,51 @@ extract_range_from_unary_expr_1 (value_range_t *vr,
value_range_t *vr0_, tree op0_type)
{
value_range_t vr0 = *vr0_;
- tree min, max;
- int cmp;
- /* If VR0 is UNDEFINED, so is the result. */
- if (vr0.type == VR_UNDEFINED)
- {
- set_value_range_to_undefined (vr);
- return;
- }
-
- /* Refuse to operate on certain unary expressions for which we
- cannot easily determine a resulting range. */
- if (code == FIX_TRUNC_EXPR
- || code == FLOAT_EXPR
- || code == CONJ_EXPR)
+ /* VRP only operates on integral and pointer types. */
+ if (!(INTEGRAL_TYPE_P (op0_type)
+ || POINTER_TYPE_P (op0_type))
+ || !(INTEGRAL_TYPE_P (type)
+ || POINTER_TYPE_P (type)))
{
set_value_range_to_varying (vr);
return;
}
- /* Refuse to operate on symbolic ranges, or if neither operand is
- a pointer or integral type. */
- if ((!INTEGRAL_TYPE_P (op0_type)
- && !POINTER_TYPE_P (op0_type))
- || (vr0.type != VR_VARYING
- && symbolic_range_p (&vr0)))
- {
- set_value_range_to_varying (vr);
- return;
- }
-
- /* If the expression involves pointers, we are only interested in
- determining if it evaluates to NULL [0, 0] or non-NULL (~[0, 0]). */
- if (POINTER_TYPE_P (type) || POINTER_TYPE_P (op0_type))
+ /* If VR0 is UNDEFINED, so is the result. */
+ if (vr0.type == VR_UNDEFINED)
{
- if (range_is_nonnull (&vr0))
- set_value_range_to_nonnull (vr, type);
- else if (range_is_null (&vr0))
- set_value_range_to_null (vr, type);
- else
- set_value_range_to_varying (vr);
+ set_value_range_to_undefined (vr);
return;
}
- /* Handle unary expressions on integer ranges. */
- if (CONVERT_EXPR_CODE_P (code)
- && INTEGRAL_TYPE_P (type)
- && INTEGRAL_TYPE_P (op0_type))
+ if (CONVERT_EXPR_CODE_P (code))
{
tree inner_type = op0_type;
tree outer_type = type;
+ /* If the expression evaluates to a pointer, we are only interested in
+ determining if it evaluates to NULL [0, 0] or non-NULL (~[0, 0]). */
+ if (POINTER_TYPE_P (type))
+ {
+ if (CONVERT_EXPR_CODE_P (code))
+ {
+ if (range_is_nonnull (&vr0))
+ set_value_range_to_nonnull (vr, type);
+ else if (range_is_null (&vr0))
+ set_value_range_to_null (vr, type);
+ else
+ set_value_range_to_varying (vr);
+ }
+ else
+ set_value_range_to_varying (vr);
+ return;
+ }
+
/* If VR0 is varying and we increase the type precision, assume
a full range for the following transformation. */
if (vr0.type == VR_VARYING
+ && INTEGRAL_TYPE_P (inner_type)
&& TYPE_PRECISION (inner_type) < TYPE_PRECISION (outer_type))
{
vr0.type = VR_RANGE;
@@ -2933,20 +2983,7 @@ extract_range_from_unary_expr_1 (value_range_t *vr,
set_value_range_to_varying (vr);
return;
}
-
- /* Conversion of a VR_VARYING value to a wider type can result
- in a usable range. So wait until after we've handled conversions
- before dropping the result to VR_VARYING if we had a source
- operand that is VR_VARYING. */
- if (vr0.type == VR_VARYING)
- {
- set_value_range_to_varying (vr);
- return;
- }
-
- /* Apply the operation to each end of the range and see what we end
- up with. */
- if (code == NEGATE_EXPR)
+ else if (code == NEGATE_EXPR)
{
/* -X is simply 0 - X, so re-use existing code that also handles
anti-ranges fine. */
@@ -2955,17 +2992,35 @@ extract_range_from_unary_expr_1 (value_range_t *vr,
extract_range_from_binary_expr_1 (vr, MINUS_EXPR, type, &zero, &vr0);
return;
}
- else if (code == ABS_EXPR
- && !TYPE_UNSIGNED (type))
+ else if (code == ABS_EXPR)
{
+ tree min, max;
+ int cmp;
+
+ /* Pass through vr0 in the easy cases. */
+ if (TYPE_UNSIGNED (type)
+ || value_range_nonnegative_p (&vr0))
+ {
+ copy_value_range (vr, &vr0);
+ return;
+ }
+
+ /* For the remaining varying or symbolic ranges we can't do anything
+ useful. */
+ if (vr0.type == VR_VARYING
+ || symbolic_range_p (&vr0))
+ {
+ set_value_range_to_varying (vr);
+ return;
+ }
+
/* -TYPE_MIN_VALUE = TYPE_MIN_VALUE with flag_wrapv so we can't get a
useful range. */
if (!TYPE_OVERFLOW_UNDEFINED (type)
&& ((vr0.type == VR_RANGE
&& vrp_val_is_min (vr0.min))
|| (vr0.type == VR_ANTI_RANGE
- && !vrp_val_is_min (vr0.min)
- && !range_includes_zero_p (&vr0))))
+ && !vrp_val_is_min (vr0.min))))
{
set_value_range_to_varying (vr);
return;
@@ -3077,6 +3132,18 @@ extract_range_from_unary_expr_1 (value_range_t *vr,
max = t;
}
}
+
+ cmp = compare_values (min, max);
+ if (cmp == -2 || cmp == 1)
+ {
+ /* If the new range has its limits swapped around (MIN > MAX),
+ then the operation caused one of them to wrap around, mark
+ the new range VARYING. */
+ set_value_range_to_varying (vr);
+ }
+ else
+ set_value_range (vr, vr0.type, min, max, NULL);
+ return;
}
else if (code == BIT_NOT_EXPR)
{
@@ -3088,69 +3155,15 @@ extract_range_from_unary_expr_1 (value_range_t *vr,
type, &minusone, &vr0);
return;
}
- else
+ else if (code == PAREN_EXPR)
{
- /* Otherwise, operate on each end of the range. */
- min = fold_unary_to_constant (code, type, vr0.min);
- max = fold_unary_to_constant (code, type, vr0.max);
-
- if (needs_overflow_infinity (type))
- {
- gcc_assert (code != NEGATE_EXPR && code != ABS_EXPR);
-
- /* If both sides have overflowed, we don't know
- anything. */
- if ((is_overflow_infinity (vr0.min)
- || TREE_OVERFLOW (min))
- && (is_overflow_infinity (vr0.max)
- || TREE_OVERFLOW (max)))
- {
- set_value_range_to_varying (vr);
- return;
- }
-
- if (is_overflow_infinity (vr0.min))
- min = vr0.min;
- else if (TREE_OVERFLOW (min))
- {
- if (supports_overflow_infinity (type))
- min = (tree_int_cst_sgn (min) >= 0
- ? positive_overflow_infinity (TREE_TYPE (min))
- : negative_overflow_infinity (TREE_TYPE (min)));
- else
- {
- set_value_range_to_varying (vr);
- return;
- }
- }
-
- if (is_overflow_infinity (vr0.max))
- max = vr0.max;
- else if (TREE_OVERFLOW (max))
- {
- if (supports_overflow_infinity (type))
- max = (tree_int_cst_sgn (max) >= 0
- ? positive_overflow_infinity (TREE_TYPE (max))
- : negative_overflow_infinity (TREE_TYPE (max)));
- else
- {
- set_value_range_to_varying (vr);
- return;
- }
- }
- }
+ copy_value_range (vr, &vr0);
+ return;
}
- cmp = compare_values (min, max);
- if (cmp == -2 || cmp == 1)
- {
- /* If the new range has its limits swapped around (MIN > MAX),
- then the operation caused one of them to wrap around, mark
- the new range VARYING. */
- set_value_range_to_varying (vr);
- }
- else
- set_value_range (vr, vr0.type, min, max, NULL);
+ /* For unhandled operations fall back to varying. */
+ set_value_range_to_varying (vr);
+ return;
}
diff --git a/gcc/tree.c b/gcc/tree.c
index d20751a9c7b..3eaf2f90ea2 100644
--- a/gcc/tree.c
+++ b/gcc/tree.c
@@ -3784,8 +3784,7 @@ build2_stat (enum tree_code code, tree tt, tree arg0, tree arg1 MEM_STAT_DECL)
if (code == POINTER_PLUS_EXPR && arg0 && arg1 && tt)
gcc_assert (POINTER_TYPE_P (tt) && POINTER_TYPE_P (TREE_TYPE (arg0))
- && INTEGRAL_TYPE_P (TREE_TYPE (arg1))
- && useless_type_conversion_p (sizetype, TREE_TYPE (arg1)));
+ && ptrofftype_p (TREE_TYPE (arg1)));
t = make_node_stat (code PASS_MEM_STAT);
TREE_TYPE (t) = tt;
diff --git a/gcc/tree.h b/gcc/tree.h
index c8d292a3d4f..820431fae0b 100644
--- a/gcc/tree.h
+++ b/gcc/tree.h
@@ -5313,6 +5313,25 @@ truth_value_p (enum tree_code code)
|| code == TRUTH_XOR_EXPR || code == TRUTH_NOT_EXPR);
}
+/* Return whether TYPE is a type suitable for an offset for
+ a POINTER_PLUS_EXPR. */
+static inline bool
+ptrofftype_p (tree type)
+{
+ return (INTEGRAL_TYPE_P (type)
+ && TYPE_PRECISION (type) == TYPE_PRECISION (sizetype)
+ && TYPE_UNSIGNED (type) == TYPE_UNSIGNED (sizetype));
+}
+
+/* Return OFF converted to a pointer offset type suitable as offset for
+ POINTER_PLUS_EXPR. Use location LOC for this conversion. */
+static inline tree
+convert_to_ptrofftype_loc (location_t loc, tree off)
+{
+ return fold_convert_loc (loc, sizetype, off);
+}
+#define convert_to_ptrofftype(t) convert_to_ptrofftype_loc (UNKNOWN_LOCATION, t)
+
/* Build and fold a POINTER_PLUS_EXPR at LOC offsetting PTR by OFF. */
static inline tree
fold_build_pointer_plus_loc (location_t loc, tree ptr, tree off)
diff --git a/gcc/varasm.c b/gcc/varasm.c
index 88aea9bb354..ca568136cb3 100644
--- a/gcc/varasm.c
+++ b/gcc/varasm.c
@@ -4628,9 +4628,10 @@ output_constant (tree exp, unsigned HOST_WIDE_INT size, unsigned int align)
static unsigned HOST_WIDE_INT
array_size_for_constructor (tree val)
{
- tree max_index, i;
+ tree max_index;
unsigned HOST_WIDE_INT cnt;
tree index, value, tmp;
+ double_int i;
/* This code used to attempt to handle string constants that are not
arrays of single-bytes, but nothing else does, so there's no point in
@@ -4652,14 +4653,15 @@ array_size_for_constructor (tree val)
/* Compute the total number of array elements. */
tmp = TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (val)));
- i = size_binop (MINUS_EXPR, fold_convert (sizetype, max_index),
- fold_convert (sizetype, tmp));
- i = size_binop (PLUS_EXPR, i, size_one_node);
+ i = double_int_sub (tree_to_double_int (max_index), tree_to_double_int (tmp));
+ i = double_int_add (i, double_int_one);
/* Multiply by the array element unit size to find number of bytes. */
- i = size_binop (MULT_EXPR, i, TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (val))));
+ i = double_int_mul (i, tree_to_double_int
+ (TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (val)))));
- return tree_low_cst (i, 1);
+ gcc_assert (double_int_fits_in_uhwi_p (i));
+ return i.low;
}
/* Other datastructures + helpers for output_constructor. */