diff options
author | bstarynk <bstarynk@138bc75d-0d04-0410-961f-82ee72b054a4> | 2016-02-10 20:16:25 +0000 |
---|---|---|
committer | bstarynk <bstarynk@138bc75d-0d04-0410-961f-82ee72b054a4> | 2016-02-10 20:16:25 +0000 |
commit | efdfa4676cd3506381bd987f7365767bb05c934a (patch) | |
tree | e8666879ccfb59066bec1568f2ac1b5ce42e75a1 /gcc | |
parent | eb76579392e0d61b9f33c90fdd8b620e563d0a12 (diff) | |
download | gcc-efdfa4676cd3506381bd987f7365767bb05c934a.tar.gz |
2016-02-10 Basile Starynkevitch <basile@starynkevitch.net>
{{merging with even more of GCC 6, using subversion 1.9
svn merge -r227701:227820 ^/trunk
}}
git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/branches/melt-branch@233307 138bc75d-0d04-0410-961f-82ee72b054a4
Diffstat (limited to 'gcc')
171 files changed, 4110 insertions, 2025 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog index abb1d6d0e53..cf5461be6d2 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,665 @@ +2015-09-16 Eric Botcazou <ebotcazou@adacore.com> + + * tree-ssa-sccvn.c (ao_ref_init_from_vn_reference): Use offset_int for + offset and size computations instead of HOST_WIDE_INT. + +2015-09-16 Richard Biener <rguenther@suse.de> + + PR middle-end/67442 + * fold-const.c (extract_muldiv_1): Properly extend multiplication + result before builting a tree via wide_int_to_tree. + +2015-09-16 Mikhail Maltsev <maltsevm@gmail.com> + + * Makefile.in: Add memory-block.cc + (pool_allocator::initialize): Use fixed block size. + (pool_allocator::release): Use memory_block_pool. + (pool_allocator::allocate): Likewise. + * asan.c (asan_mem_ref_pool): Adjust to use common block size in all + object pools. + * cfg.c (initialize_original_copy_tables): Likewise. + * cselib.c (elt_list_pool, elt_loc_list_pool, + cselib_val_pool): Likewise. + * df-problems.c (df_chain_alloc): Likewise. + * df-scan.c (df_scan_alloc): Likewise. + * dse.c (cse_store_info_pool, rtx_store_info_pool, + read_info_type_pool, insn_info_type_pool, bb_info_pool, + group_info_pool, deferred_change_pool): Likewise. + * et-forest.c (et_nodes, et_occurrences): Likewise. + * ipa-cp.c (ipcp_cst_values_pool, ipcp_sources_pool, + ipcp_agg_lattice_pool): Likewise. + * ipa-inline-analysis.c (edge_predicate_pool): Likewise. + * ipa-profile.c (histogram_pool): Likewise. + * ipa-prop.c (ipa_refdesc_pool): Likewise. + * ira-build.c (live_range_pool, allocno_pool, object_pool, + initiate_cost_vectors, pref_pool, copy_pool): Likewise. + * ira-color.c (update_cost_record_pool): Likewise. + * lra-lives.c (lra_live_range_pool): Likewise. + * lra.c (lra_insn_reg_pool, lra_copy_pool): Likewise. + * memory-block.cc: New file. + * memory-block.h: New file. + * regcprop.c (queued_debug_insn_change_pool): Use common block size. + * sched-deps.c (sched_deps_init): Likewise. + * sel-sched-ir.c (sched_lists_pool): Likewise. + * stmt.c (expand_case, expand_sjlj_dispatch_table): Likewise. + * tree-sra.c (access_pool): Likewise. + * tree-ssa-math-opts.c (pass_cse_reciprocals::execute): Likewise. + * tree-ssa-pre.c (pre_expr_pool, bitmap_set_pool): Likewise. + * tree-ssa-reassoc.c (operand_entry_pool): Likewise. + * tree-ssa-sccvn.c (allocate_vn_table): Likewise. + * tree-ssa-strlen.c (strinfo_pool): Likewise. + * tree-ssa-structalias.c (variable_info_pool): Likewise. + * var-tracking.c (attrs_def_pool, var_pool, valvar_pool, + location_chain_pool, shared_hash_pool, loc_exp_dep_pool): Likewise. + +2015-09-15 Max Filippov <jcmvbkbc@gmail.com> + + * config/xtensa/xtensa.h (DWARF_ALT_FRAME_RETURN_COLUMN): New + definition. + (DWARF_FRAME_REGISTERS): Reserve space for one extra register in + call0 ABI. + +2015-09-15 Max Filippov <jcmvbkbc@gmail.com> + + * config/xtensa/xtensa.c (xtensa_call_tls_desc): Use a10 or a2 + to pass TLS call argument, according to current ABI. + * config/xtensa/xtensa.md (tls_call pattern): Use callx8 or + callx0 for TLS call, according to current ABI. + +2015-09-15 Eric Botcazou <ebotcazou@adacore.com> + + * tree-eh.c (lower_try_finally_dup_block): Clear location information + on stack restore statements. + (decide_copy_try_finally): Do not consider a stack restore statement as + coming from sources. + +2015-09-15 Uros Bizjak <ubizjak@gmail.com> + + * config/alpha/alpha.c (alpha_expand_block_clear): Use + HOST_WIDE_INT_M1U instead of ~(HOST_WIDE_INT)0 when shifting. + +2015-09-15 Jeff Law <law@redhat.com> + + PR tree-optimization/47679 + * tree-ssa-dom.c (expr_hash_elt): Now a class with ctors/dtors, + methods and private members. + (avail_exprs_stack): Similarly. Change type of global + from a pair of expr_hash_elt_t to the new class. + (expr_elt_hasher::hash): Corresponding changes. + (expr_elt_hasher::equal): Similarly. + (avail_expr_hash): Similarly. + (pass_dominator::execute): Similarly. + (dom_opt_dom_walker::thread_across_edge): Similarly. + (record_cond): Similarly. + (dom_opt_dom_walker::before_dom_children): Similarly. + (dom_opt_dom_walker::after_dom_children): Similarly. + (lookup_avail_expr): Likewise. + (initialize_hash_element): Now a expr_hash_elt constructor. + (initialize_hash_element_from_expr): Similarly. + (free_expr_hash_elt_contents): Now a dtor for class expr_hash_elt. + (free_expr_hash_elt): Call dtor for the element. + (remove_local_expressions_from_table): Now the "pop_to_marker" + method in the available_exprs_stack class. + (avail_expr_stack::record_expr): Method factored out. + (print_expr_hash_elt): Now a method in the expr_hash_elt class. + Fix formatting. + (hashable_expr_equal_p): Fix formatting. + +2015-09-15 David Malcolm <dmalcolm@redhat.com> + + * input.h (location_get_source_line): Drop "expanded_location" + param in favor of a file and line number. + * input.c (location_get_source_line): Likewise. + (dump_location_info): Update for change in signature of + location_get_source_line. + * diagnostic.c (diagnostic_print_caret_line): Likewise. + +2015-09-15 Eric Botcazou <ebotcazou@adacore.com> + + * defaults.h (STACK_OLD_CHECK_PROTECT): Adjust for -fno-exceptions. + Bump to 4KB for SJLJ exceptions. + (STACK_CHECK_PROTECT): Likewise. Bump to 8KB for SJLJ exceptions. + * doc/tm.texi.in (STACK_CHECK_PROTECT): Adjust. + * doc/tm.texi: Regenerate. + +2015-09-15 Kyrylo Tkachov <kyrylo.tkachov@arm.com> + + * config/arm/arm.c (arm_gen_constant): Use HOST_WIDE_INT_M1U instead + of -1 when shifting. Change type of val to unsigned HOST_WIDE_INT. + Update prototype. + +2015-09-15 Richard Biener <rguenther@suse.de> + + PR tree-optimization/67470 + * tree-ssa-loop-im.c (execute_sm_if_changed): Preserve PHI + structure for PHI hoisting by inserting a forwarder block + if appropriate. + +2015-09-15 Christian Bruel <christian.bruel@st.com> + + * config/arm/arm.c (TARGET_OPTION_PRINT): Define. + (arm_option_print): New function. + +2015-09-15 Christian Bruel <christian.bruel@st.com> + + PR target/52144 + * config/arm/arm.c (arm_option_params_internal): Remove opts parameter. + * config/arm/arm-c.c (arm_cpu_builtins): Declare static. + Remove flags parameter. + * config/arm/arm.h (TARGET_32BIT_P, TARGET_ARM_QBIT_P) + (TARGET_ARM_SAT_P, TARGET_IDIV_P, TARGET_HAVE_LDREX_P) + (TARGET_HAVE_LDREXBH_P, TARGET_HAVE_LDREXD_P, TARGET_DSP_MULTIPLY_P) + (TARGET_ARM_FEATURE_LDREX_P, TARGET_INT_SIMD_P): Redefine macros with... + (TARGET_ARM_SAT, TARGET_IDIV, TARGET_HAVE_LDREX) + (TARGET_HAVE_LDREXBH, TARGET_HAVE_LDREXD, TARGET_ARM_FEATURE_LDREX) + (TARGET_DSP_MULTIPLY, TARGET_INT_SIMD): Redefined macros. + * gcc/config/arm/arm-protos.h (arm_cpu_builtins): Remove declaration. + +2015-09-15 Alan Lawrence <alan.lawrence@arm.com> + + * config/aarch64/aarch64.h (AARCH64_VALID_SIMD_DREG_MODE): New. + + * config/aarch64/aarch64.c (aarch64_array_mode_supported_p): Add + AARCH64_VALID_SIMD_DREG_MODE. + +2015-09-15 Alan Lawrence <alan.lawrence@arm.com> + + * config/aarch64/aarch64-simd.md (aarch64_ld2r<mode>, + aarch64_ld3r<mode>, aarch64_ld4r<mode>): Combine together, making... + (aarch64_simd_ld<VSTRUCT:nregs>r<VALLDIF:mode>): ...this. + (aarch64_ld2_lane<mode>, aarch64_ld3_lane<mode>, + aarch64_ld4_lane<mode>): Combine together, making... + (aarch64_ld<VSTRUCT:nregs>_lane<VALLDIF:mode>): ...this. + (aarch64_st2_lane<mode>, aarch64_st3_lane<mode>, + aarch64_st4_lane<mode>): Combine together, making... + (aarch64_st<VSTRUCT:nregs>_lane<VALLDIF:mode>): ...this. + * config/aarch64/iterators.md (nregs): Add comment. + +2015-09-15 Alan Lawrence <alan.lawrence@arm.com> + + * config/aarch64/aarch64-simd.md (aarch64_simd_ld2r<mode>): + Change operand mode from <V_TWO_ELEM> to BLK. + (aarch64_vec_load_lanesoi_lane<mode>): Likewise. + (aarch64_vec_store_lanesoi_lane<mode): Likewise + (aarch64_ld2r<mode>): Generate MEM rtx with BLKmode, call set_mem_size. + (aarch64_ld2_lane<mode>): Likewise. + (aarch64_st2_lane<VQ:mode>): Likewise. + * config/aarch64/iterators.md (V_TWO_ELEM): Remove. + +2015-09-15 Alan Lawrence <alan.lawrence@arm.com> + + * config/aarch64/aarch64-simd.md (aarch64_simd_ld4r<mode>): + Change operand mode from <V_FOUR_ELEM> to BLK. + (aarch64_vec_load_lanesxi_lane<mode>): Likewise. + (aarch64_vec_store_lanesxi_lane<mode): Likewise. + (aarch64_ld4r<mode>): Generate MEM rtx with BLKmode, call set_mem_size. + (aarch64_ld4_lane<mode>): Likewise. + (aarch64_st4_lane<mode>): Likewise. + * config/aarch64/iterators.md (V_FOUR_ELEM): Remove. + +2015-09-15 Richard Biener <rguenther@suse.de> + + PR middle-end/67563 + * gimple-fold.c (gimplify_and_update_call_from_tree): Do not + transfer EH info from old to new stmt. + (replace_call_with_value): Likewise. + (replace_call_with_call_and_fold): Likewise. + (gimple_fold_builtin_memory_op): Likewise. + (gimple_fold_builtin_memset): Likewise. + (gimple_fold_builtin_stpcpy): Likewise. + (gimple_fold_call): Likewise. + +2015-09-15 Alan Lawrence <alan.lawrence@arm.com> + + * config/aarch64/aarch64.c (aarch64_simd_attr_length_rglist): Update + comment. + * config/aarch64/aarch64-builtins.c (ei_UP): Remove. + (aarch64_simd_intEI_type_node): Likewise. + (aarch64_simd_builtin_std_type): Remove EImode case. + (aarch64_init_simd_builtin_types): Don't create/add intEI_type_node. + * config/aarch64/aarch64-modes.def: Remove EImode. + +2015-09-15 Alan Lawrence <alan.lawrence@arm.com> + + * config/aarch64/aarch64-simd.md (aarch64_simd_ld3r<mode>): + Change operand mode from <V_THREE_ELEM> to BLK. + (aarch64_vec_load_lanesci_lane<mode>): Likewise. + (aarch64_vec_store_lanesci_lane<mode>): Likewise. + (aarch64_ld3r<mode>): Generate MEM rtx with BLKmode, call set_mem_size. + (aarch64_ld3_lane<mode>): Likewise. + (aarch64_st3_lane<mode>): Likewise. + * config/aarch64/iterators.md (V_THREE_ELEM): Remove. + +2015-09-15 Alan Lawrence <alan.lawrence@arm.com> + + * config/aarch64/aarch64-simd.md + (aarch64_ld2<mode>_dreg VD & DX, aarch64_st2<mode>_dreg VD & DX ): + Change all TImode operands to BLKmode. + (aarch64_ld3<mode>_dreg VD & DX, aarch64_st3<mode>_dreg VD & DX): + Change all EImode operands to BLKmode. + (aarch64_ld4<mode>_dreg VD & DX, aarch64_st4<mode>_dreg VD & DX): + Change all OImode operands to BLKmode. + + (aarch64_ld<VSTRUCT:nregs><VDC:mode>): Generate MEM rtx with BLKmode + and call set_mem_size. + (aarch64_st<VSTRUCT:nregs><VDC:mode>): Likewise. + + * config/aarch64/iterators.md (VSTRUCT_DREG): Remove. + +2015-09-15 Alan Lawrence <alan.lawrence@arm.com> + + * config/aarch64/aarch64-simd.md (vec_store_lanesoi_lane<mode>): Rename + to... + (aarch64_vec_store_lanesoi_lane<mode>): ...this. + + (vec_store_lanesci_lane<mode>): Rename to... + (aarch64_vec_store_lanesci_lane<mode>): ...this. + + (vec_store_lanesxi_lane<mode>): Rename to... + (aarch64_vec_store_lanesxi_lane<mode>): ...this. + + (aarch64_st2_lane<mode>, aarch64_st3_lane<mode>, + aarch64_st4_lane<mode>): Follow renaming. + +2015-09-15 Andreas Krebbel <krebbel@linux.vnet.ibm.com> + + * config/s390/s390.c (s390_const_operand_ok): Add missing + brackets. + +2015-09-15 Richard Biener <rguenther@suse.de> + + PR lto/67568 + * lto-streamer.h (lto_location_cache::current_sysp): Properly + initialize. + * lto-streamer-out.c (clear_line_info): Likewise. + +2015-09-15 Richard Biener <rguenther@suse.de> + + * doc/match-and-simplify.texi: Fix wording. + +2015-09-15 Bin Cheng <bin.cheng@arm.com> + + * tree-ssa-loop-ivopts.c (get_shiftadd_cost): Strip + unnecessary type conversion in op1. + +2015-09-14 Segher Boessenkool <segher@kernel.crashing.org> + + * shrink-wrap.c (requires_stack_frame_p): Fix formatting. + (dup_block_and_redirect): Delete function. + (can_dup_for_shrink_wrapping): New function. + (fix_fake_fallthrough_edge): New function. + (try_shrink_wrapping): Rewrite function. + (convert_to_simple_return): Call fix_fake_fallthrough_edge. + +2015-09-14 Rich Felker <dalias@libc.org> + + * configure.ac: Change target pattern for sh TLS support + test from "sh[34]-*-*" to "sh[123456789lbe]*-*-*". + * configure: Regenerate. + +2015-09-14 Jeff Law <law@redhat.com> + + PR tree-optimization/47679 + * tree-ssa-dom.c (avail_expr_hash): Pass a pointer to a real + type rather than void *. + +2015-09-14 Manuel López-Ibáñez <manu@gcc.gnu.org> + + PR fortran/67460 + * diagnostic.c (diagnostic_initialize): Do not set + some_warnings_are_errors. + (diagnostic_finish): Use DK_WERROR count instead. + (diagnostic_report_diagnostic): Do not set + some_warnings_are_errors. + * diagnostic.h (struct diagnostic_context): Remove + some_warnings_are_errors. + +2015-09-14 Richard Sandiford <richard.sandiford@arm.com> + + * config/sparc/predicates.md (const_all_ones_operand): Use + CONSTM1_RTX to simplify definition. + +2015-09-14 Oleg Endo <olegendo@gcc.gnu.org> + + PR target/67061 + * config/sh/sh-protos.h (sh_find_set_of_reg): Simplfiy for-loop. + Handle call insns. + +2015-09-14 Chung-Lin Tang <cltang@codesourcery.com> + + * lto-wrapper.c (merge_and_complain): Add OPT_fdiagnostics_show_caret, + OPT_fdiagnostics_show_option, OPT_fdiagnostics_show_location_, and + OPT_fshow_column to handled saved option cases. + (append_compiler_options): Do not skip the above added options. + +2015-09-14 Ramana Radhakrishnan <ramana.radhakrishnan@arm.com> + + PR target/63304 + * config/aarch64/aarch64.c (aarch64_expand_mov_immediate): Handle + nopcrelative_literal_loads. + (aarch64_classify_address): Likewise. + (aarch64_constant_pool_reload_icode): Define. + (aarch64_secondary_reload): Handle secondary reloads for + literal pools. + (aarch64_override_options): Handle nopcrelative_literal_loads. + (aarch64_classify_symbol): Handle nopcrelative_literal_loads. + * config/aarch64/aarch64.md (aarch64_reload_movcp<GPF_TF:mode><P:mode>): + Define. + (aarch64_reload_movcp<VALL:mode><P:mode>): Likewise. + * config/aarch64/aarch64.opt (mpc-relative-literal-loads): New option. + * config/aarch64/predicates.md (aarch64_constant_pool_symref): New + predicate. + * doc/invoke.texi (mpc-relative-literal-loads): Document. + +2015-09-14 John David Anglin <danglin@gcc.gnu.org> + + PR middle-end/67401 + * optabs.c (expand_atomic_compare_and_swap): Move result of emitting + sync_compare_and_swap_optab libcall to target_oval. + +2015-09-14 Marek Polacek <polacek@redhat.com> + + * rtlanal.c (split_double): Cast to unsigned when shifting a negative + value. + * sched-int.h (UNKNOWN_DEP_COST): Likewise. + +2015-09-11 Mark Wielaard <mjw@redhat.com> + + PR c/28901 + * toplev.c (check_global_declaration): Check and use + warn_unused_const_variable. + * doc/invoke.texi (Warning Options): Add -Wunused-const-variable. + (-Wunused-variable): Remove non-constant. For C implies + -Wunused-const-variable. + (-Wunused-const-variable): New. + +2015-09-14 Richard Biener <rguenther@suse.de> + + * doc/match-and-simplify.texi: Update for changed syntax + of inner ifs and the new switch expression. + +2015-09-14 Yuri Rumyantsev <ysrumyan@gmail.com> + + * config/i386/haswell.md: New file describing Haswell pipeline. + * config/i386/i386.c (processor_alias_table): Use CPU_HASWELL for + haswell-like processors. + (ix86_reassociation_width): Increase reassociation width for 64-bit + Haswell processor family. + * config/i386/i386.md: Introduce haswell cpu and include new md file. + +2015-09-14 Richard Biener <rguenther@suse.de> + + * doc/match-and-simplify.texi: Fixup some formatting issues + and document the 's' flag. + +2015-09-13 Olivier Hainque <hainque@adacore.com> + Eric Botcazou <ebotcazou@adacore.com> + + * config.gcc (visium-*-*): Enable --with-cpu option, accept gr5 and + gr6 as possible values, defaulting to gr5. Set target_cpu_default2. + * config/visium/visium.h (OPTION_DEFAULT_SPECS): Define. + (TARGET_CPU_gr5): Likewise. + (TARGET_CPU_gr6): Likewise. + (MULTILIB_DEFAULTS): Likewise. + * config/visium/t-visium (MULTILIB_OPTIONS): Request distinct variants + for mcpu=gr5 and mcpu=gr6. + (MULTILIB_DIRNAMES): Adjust accordingly. + +2015-09-13 Trevor Saunders <tbsaunde+gcc@tbsaunde.org> + + * tree-ssa-loop-im.c (mem_ref_loc_p): Remove typedef. + (mem_ref_p): Likewise. + (outermost_indep_loop): Adjust. + (mem_ref_in_stmt): Likewise. + (determine_max_movement): Likewise. + (mem_ref_alloc): Likewise. + (record_mem_ref_loc): Likewise. + (set_ref_stored_in_loop): Likewise. + (mark_ref_stored): Likewise. + (gather_mem_refs_stmt): Likewise. + (mem_refs_may_alias_p): Likewise. + (for_all_locs_in_loop): Likewise. + (struct rewrite_mem_ref_loc): Likewise. + (rewrite_mem_refs): Likewise. + (struct first_mem_ref_loc_1): Likewise. + (first_mem_ref_loc): Likewise. + (struct sm_set_flag_if_changed): Likewise. + (execute_sm_if_changed_flag_set): Likewise. + (execute_sm): Likewise. + (hoist_memory_references): + (struct ref_always_accessed): Likewise. + (ref_always_accessed_p): Likewise. + (refs_independent_p): Likewise. + (record_dep_loop): Likewise. + (ref_indep_loop_p_1): Likewise. + (ref_indep_loop_p_2): Likewise. + (ref_indep_loop_p): Likewise. + (can_sm_ref_p): Likewise. + (find_refs_for_sm): Likewise. + (tree_ssa_lim_finalize): Likewise. + +2015-09-13 Trevor Saunders <tbsaunde+gcc@tbsaunde.org> + + * dwarf2out.c (dw_attr_ref): Remove typedef. + (dw_line_info_ref): Likewise. + (pubname_ref): Likewise. + (dw_ranges_ref): Likewise. + (dw_ranges_by_label_ref): Likewise. + (comdat_type_node_ref): Likewise. + (get_AT): Adjust. + (get_AT_low_pc): Likewise. + (get_AT_hi_pc): Likewise. + (get_AT_string): Likewise. + (get_AT_flag): Likewise. + (get_AT_unsigned): Likewise. + (get_AT_ref): Likewise. + (get_AT_file): Likewise. + (remove_AT): Likewise. + (print_die): Likewise. + (check_die): Likewise. + (die_checksum): Likewise. + (attr_checksum_ordered): Likewise. + (struct checksum_attributes): Likewise. + (collect_checksum_attributes): Likewise. + (die_checksum_ordered): Likewise. + (same_die_p): Likewise. + (is_declaration_die): Likewise. + (clone_die): Likewise. + (clone_as_declaration): Likewise. + (copy_declaration_context): Likewise. + (break_out_comdat_types): Likewise. + (copy_decls_walk): Likewise. + (output_location_lists): Likewise. + (external_ref_hasher::hash): Likewise. + (optimize_external_refs_1): Likewise. + (build_abbrev_table): Likewise. + (size_of_die): Likewise. + (unmark_all_dies): Likewise. + (size_of_pubnames): Likewise. + (output_die_abbrevs): Likewise. + (output_die): Likewise. + (output_pubnames): Likewise. + (add_ranges_num): Likewise. + (add_ranges_by_labels): Likewise. + (add_high_low_attributes): Likewise. + (gen_producer_string): Likewise. + (dwarf2out_set_name): Likewise. + (new_line_info_table): Likewise. + (prune_unused_types_walk_attribs): Likewise. + (prune_unused_types_update_strings): Likewise. + (prune_unused_types): Likewise. + (resolve_addr): Likewise. + (optimize_location_lists_1): Likewise. + (index_location_lists): Likewise. + (dwarf2out_finish): Likewise. + +2015-09-13 Trevor Saunders <tbsaunde+gcc@tbsaunde.org> + + * dwarf2cfi.c (dw_trace_info_ref): Remove typedef. + +2015-09-13 Trevor Saunders <tbsaunde+gcc@tbsaunde.org> + + * tree-vrp.c (struct assert_locus_d): Rename to assert_locus. + (dump_asserts_for): Adjust. + (register_new_assert_for): Likewise. + (process_assert_insertions): Likewise. + (insert_range_assertions): Likewise. + +2015-09-13 Trevor Saunders <tbsaunde+gcc@tbsaunde.org> + + * tree-ssa-ter.c (temp_expr_table_d): Rename to temp_expr_table + and remove typedef. + (new_temp_expr_table): Adjust. + (free_temp_expr_table): Likewise. + (version_to_be_replaced_p): Likewise. + (make_dependent_on_partition): Likewise. + (add_to_partition_kill_list): Likewise. + (remove_from_partition_kill_list): Likewise. + (add_dependence): Likewise. + (finished_with_expr): Likewise. + (process_replaceable): Likewise. + (kill_expr): Likewise. + (kill_virtual_exprs): Likewise. + (mark_replaceable): Likewise. + (find_replaceable_in_bb): Likewise. + (find_replaceable_exprs): Likewise. + (debug_ter): Likewise. + +2015-09-13 Trevor Saunders <tbsaunde+gcc@tbsaunde.org> + + * bt-load.c (struct btr_def_group): Rename from btr_def_group_s. + (struct btr_user): Rename from btr_user_s. + (struct btr_def): Rename from btr_def_s. + (find_btr_def_group): Adjust. + (add_btr_def): Likewise. + (new_btr_user): Likewise. + (note_other_use_this_block): Likewise. + (compute_defs_uses_and_gen): Likewise. + (link_btr_uses): Likewise. + (build_btr_def_use_webs): Likewise. + (block_at_edge_of_live_range_p): Likewise. + (btr_def_live_range): Likewise. + (combine_btr_defs): Likewise. + (move_btr_def): Likewise. + (migrate_btr_def): Likewise. + (migrate_btr_defs): Likewise. + +2015-09-13 Trevor Saunders <tbsaunde+gcc@tbsaunde.org> + + * var-tracking.c (shared_hash_def): Rename to shared_hash. + (shared_hash): Remove typedef. + (struct dataflow_set): Adjust. + (shared_hash_unshare): Likewise. + (dataflow_set_merge): Likewise. + (vt_initialize): Likewise. + (vt_finalize): Likewise. + +2015-09-13 Trevor Saunders <tbsaunde+gcc@tbsaunde.org> + + * var-tracking.c (struct location_chain): Rename from + location_chain_def. + (struct variable_part): Adjust. + (variable_htab_free): Likewise. + (unshare_variable): Likewise. + (get_init_value): Likewise. + (get_addr_from_local_cache): Likewise. + (drop_overlapping_mem_locs): Likewise. + (val_reset): Likewise. + (struct variable_union_info): Likewise. + (variable_union): Likewise. + (find_loc_in_1pdv): Likewise. + (insert_into_intersection): Likewise. + (intersect_loc_chains): Likewise. + (canonicalize_loc_order_check): Likewise. + (canonicalize_values_mark): Likewise. + (canonicalize_values_star): Likewise. + (canonicalize_vars_star): Likewise. + (variable_merge_over_cur): Likewise. + (remove_duplicate_values): Likewise. + (variable_post_merge_new_vals): Likewise. + (variable_post_merge_perm_vals): Likewise. + (find_mem_expr_in_1pdv): Likewise. + (dataflow_set_preserve_mem_locs): Likewise. + (dataflow_set_remove_mem_locs): Likewise. + (variable_part_different_p): Likewise. + (onepart_variable_different_p): Likewise. + (find_src_set_src): Likewise. + (dump_var): Likewise. + (set_slot_part): Likewise. + (clobber_slot_part): Likewise. + (delete_slot_part): Likewise. + (vt_expand_var_loc_chain): Likewise. + (emit_note_insn_var_location): Likewise. + (vt_finalize): Likewise. + +2015-09-13 Trevor Saunders <tbsaunde+gcc@tbsaunde.org> + + * dse.c (store_info_t): Remove typedef. + (group_info_t): Likewise. + (const_group_info_t): Likewise. + (deferred_change_t): Likewise. + (get_group_info): Adjust. + (free_store_info): Likewise. + (canon_address): Likewise. + (clear_rhs_from_active_local_stores): Likewise. + (record_store): Likewise. + (replace_read): Likewise. + (check_mem_read_rtx): Likewise. + (scan_insn): Likewise. + (remove_useless_values): Likewise. + (dse_step1): Likewise. + (dse_step2_init): Likewise. + (dse_step2_nospill): Likewise. + (scan_stores_nospill): Likewise. + (scan_reads_nospill): Likewise. + (dse_step3_exit_block_scan): Likewise. + (dse_step3): Likewise. + (dse_step5_nospill): Likewise. + (dse_step6): Likewise. + +2015-09-13 Trevor Saunders <tbsaunde+gcc@tbsaunde.org> + + * alias.c (alias_set_entry_d): Rename to alias_set_entry. + (alias_set_entry): Remove typedef. + (alias_set_subset_of): Adjust. + (alias_sets_conflict_p): Likewise. + (init_alias_set_entry): Likewise. + (get_alias_set): Likewise. + (new_alias_set): Likewise. + (record_alias_subset): Likewise. + +2015-09-13 Gerald Pfeifer <gerald@pfeifer.com> + + * doc/install.texi (Downloading the source): Mark up + contrib/download_prerequisites properly and drop leading "./". + +2015-09-12 Trevor Saunders <tbsaunde+gcc@tbsaunde.org> + + * config/arc/arc.h: Remove define of STRUCT_VALUE. + * config/lm32/lm32.h: Likewise. + * config/mep/mep.h: Likewise. + * config/visium/visium.h: Likewise. + * system.h: Poison STRUCT_VALUE macro. + +2015-09-12 John David Anglin <danglin@gcc.gnu.org> + + * config/pa/pa.c (pa_output_move_double): Enhance to handle HIGH + CONSTANT_P operands. + +2015-09-11 David S. Miller <davem@davemloft.net> + + * config/sparc/constraints.md: Make "U" constraint a real register + constraint. + * config/sparc/sparc.c (TARGET_LRA_P): Define. + (D_MODES, DF_MODES): Add missing cast. + (TF_MODES, TF_MODES_NO_S): Include T_MODE. + (OF_MODES, OF_MODES_NO_S): Include O_MODE. + (sparc_register_move_cost): Decrease Niagara/UltrsSPARC memory + cost to 8. + * config/sparc/sparc.h (PROMOTE_MODE): Define. + * config/sparc/sparc.md (*movsi_lo_sum, *movsi_high): Do not + provide these insn when flag_pic. + 2015-09-11 Jeff Law <law@redhat.com> PR tree-optimization/47679 @@ -10,31 +672,31 @@ 2015-09-11 Aditya Kumar <aditya.k7@samsung.com> - * graphite-optimize-isl.c (disable_tiling): Remove. + * graphite-optimize-isl.c (disable_tiling): Remove. (get_schedule_for_band): Do not use disable_tiling. (get_prevector_map): Delete function. (enable_polly_vector): Remove. - (get_schedule_for_band_list): Remove dead code. + (get_schedule_for_band_list): Remove dead code. 2015-09-11 Aditya Kumar <aditya.k7@samsung.com> - * graphite-optimize-isl.c (get_tile_map): Refactor. - (get_schedule_for_band): Same. - (getScheduleForBand): Same. - (get_prevector_map): Same. - (get_schedule_for_band_list): Same. - (get_schedule_map): Same. - (get_single_map): Same. - (apply_schedule_map_to_scop): Same. - (optimize_isl): Same. + * graphite-optimize-isl.c (get_tile_map): Refactor. + (get_schedule_for_band): Same. + (getScheduleForBand): Same. + (get_prevector_map): Same. + (get_schedule_for_band_list): Same. + (get_schedule_map): Same. + (get_single_map): Same. + (apply_schedule_map_to_scop): Same. + (optimize_isl): Same. 2015-09-10 Ramana Radhakrishnan <ramana.radhakrishnan@arm.com> PR target/63304 - * config/aarch64/aarch.md (mov<mode>:GPF_F16): Use GPF_TF_F16. - (movtf): Delete. - * config/aarch64/iterators.md (GPF_TF_F16): New. - (GPF_F16): Delete. + * config/aarch64/aarch.md (mov<mode>:GPF_F16): Use GPF_TF_F16. + (movtf): Delete. + * config/aarch64/iterators.md (GPF_TF_F16): New. + (GPF_F16): Delete. 2015-09-10 Nathan Sidwell <nathan@acm.org> @@ -228,7 +890,7 @@ (compute_deps): Same. 2015-09-08 Aditya Kumar <hiraditya@msn.com> - Sebastian Pop <s.pop@samsung.com> + Sebastian Pop <s.pop@samsung.com> * graphite-isl-ast-to-gimple.c (gcc_expression_from_isl_ast_expr_id): Return the parameter if it was saved in corresponding @@ -9996,7 +10658,7 @@ * tree-vect-slp.c (vect_build_slp_tree_1): Init vectype. -2015-06-24 Renlin Li <renlin.li@arm.com> +2015-06-24 Renlin Li <renlin.li@arm.com> * config/aarch64/aarch64.h(TARGET_CPU_CPP_BUILTINS): Add __ARM_ALIGN_MAX_PWR, __ARM_ALIGN_MAX_STACK_PWR. diff --git a/gcc/DATESTAMP b/gcc/DATESTAMP index 68e1defad14..d6110d2fc11 100644 --- a/gcc/DATESTAMP +++ b/gcc/DATESTAMP @@ -1 +1 @@ -20150911 +20150916 diff --git a/gcc/Makefile.in b/gcc/Makefile.in index 0343d7a4ac0..c12fc9b49a4 100644 --- a/gcc/Makefile.in +++ b/gcc/Makefile.in @@ -1515,7 +1515,7 @@ OBJS = \ # Objects in libcommon.a, potentially used by all host binaries and with # no target dependencies. OBJS-libcommon = diagnostic.o diagnostic-color.o pretty-print.o intl.o \ - vec.o input.o version.o hash-table.o ggc-none.o + vec.o input.o version.o hash-table.o ggc-none.o memory-block.o # Objects in libcommon-target.a, used by drivers and by the core # compiler and containing target-dependent code. diff --git a/gcc/ada/ChangeLog b/gcc/ada/ChangeLog index 60a7e692d37..d11227b0839 100644 --- a/gcc/ada/ChangeLog +++ b/gcc/ada/ChangeLog @@ -1,3 +1,34 @@ +2015-09-14 Pierre-Marie de Rodat <derodat@adacore.com> + + * gcc-interface/misc.c (gnat_post_options): Issue a warning if + generating STABS debugging information when not the default. + +2015-09-14 Eric Botcazou <ebotcazou@adacore.com> + + * gcc-interface/gigi.h (ref_filename): Delete. + (Sloc_to_locus): Add clean_column parameter defaulting to false. + (build_call_raise): Adjust comment. + (build_call_raise_range): Move around. + * gcc-interface/trans.c (ref_filename): Delete. + (gigi): Fix formatting. + (block_end_locus_sink): Delete. + (Sloc_to_locus1): Tidy up and reformat. Rename into... + (Sloc_to_locus): ...this. Add default for clean_colmun parameter. + (set_expr_location_from_node1): Rename into... + (set_expr_location_from_node): ...this. + (set_end_locus_from_node): Move around. Adjust for renaming. + (Handled_Sequence_Of_Statements_to_gnu): Likewise. + (add_cleanup): Likewise. + * gcc-interface/utils2.c (expand_sloc): New static function. + (build_call_raise): Call it. + (build_call_raise_column): Likewise. + (build_call_raise_range): Likewise. Move around. + +2015-09-14 Eric Botcazou <ebotcazou@adacore.com> + + * gcc-interface/utils2.c (gnat_rewrite_reference) <COMPOUND_EXPR>: Add + another acceptable pattern for the RHS. + 2015-07-24 Micahel Darling <darlingm@gmail.com> PR other/66259 diff --git a/gcc/ada/gcc-interface/decl.c b/gcc/ada/gcc-interface/decl.c index 971c0669969..cb3d7788346 100644 --- a/gcc/ada/gcc-interface/decl.c +++ b/gcc/ada/gcc-interface/decl.c @@ -6241,7 +6241,7 @@ elaborate_expression_1 (tree gnu_expr, Entity_Id gnat_entity, const char *s, Returning the variable ensures the caller will use it in generated code. Note that there is no need for a location if the debug info contains an integer constant. - FIXME: when the encoding-based debug scheme is dropped, move this + TODO: when the encoding-based debug scheme is dropped, move this condition to the top-level IF block: we will not need to create a variable anymore in such cases, then. */ if (use_variable || (need_debug && !TREE_CONSTANT (gnu_expr))) diff --git a/gcc/ada/gcc-interface/gigi.h b/gcc/ada/gcc-interface/gigi.h index 67977b01f87..9420fd83f75 100644 --- a/gcc/ada/gcc-interface/gigi.h +++ b/gcc/ada/gcc-interface/gigi.h @@ -227,9 +227,6 @@ extern Node_Id error_gnat_node; types with representation information. */ extern bool type_annotate_only; -/* Current file name without path. */ -extern const char *ref_filename; - /* This structure must be kept synchronized with Call_Back_End. */ struct File_Info_Type { @@ -288,9 +285,10 @@ extern int gnat_gimplify_expr (tree *expr_p, gimple_seq *pre_p, extern void process_type (Entity_Id gnat_entity); /* Convert SLOC into LOCUS. Return true if SLOC corresponds to a source code - location and false if it doesn't. In the former case, set the Gigi global - variable REF_FILENAME to the simple debug file name as given by sinput. */ -extern bool Sloc_to_locus (Source_Ptr Sloc, location_t *locus); + location and false if it doesn't. If CLEAR_COLUMN is true, set the column + information to 0. */ +extern bool Sloc_to_locus (Source_Ptr Sloc, location_t *locus, + bool clear_column = false); /* Post an error message. MSG is the error message, properly annotated. NODE is the node at which to post the error and the node to use for the @@ -874,27 +872,23 @@ extern tree build_compound_expr (tree result_type, tree stmt_operand, this doesn't fold the call, hence it will always return a CALL_EXPR. */ extern tree build_call_n_expr (tree fndecl, int n, ...); -/* Call a function that raises an exception and pass the line number and file - name, if requested. MSG says which exception function to call. - - GNAT_NODE is the gnat node conveying the source location for which the - error should be signaled, or Empty in which case the error is signaled on - the current ref_file_name/input_line. - - KIND says which kind of exception this is for - (N_Raise_{Constraint,Storage,Program}_Error). */ +/* Build a call to a function that raises an exception and passes file name + and line number, if requested. MSG says which exception function to call. + GNAT_NODE is the node conveying the source location for which the error + should be signaled, or Empty in which case the error is signaled for the + current location. KIND says which kind of exception node this is for, + among N_Raise_{Constraint,Storage,Program}_Error. */ extern tree build_call_raise (int msg, Node_Id gnat_node, char kind); -/* Similar to build_call_raise, for an index or range check exception as - determined by MSG, with extra information generated of the form - "INDEX out of range FIRST..LAST". */ -extern tree build_call_raise_range (int msg, Node_Id gnat_node, - tree index, tree first, tree last); - /* Similar to build_call_raise, with extra information about the column where the check failed. */ extern tree build_call_raise_column (int msg, Node_Id gnat_node); +/* Similar to build_call_raise_column, for an index or range check exception , + with extra information of the form "INDEX out of range FIRST..LAST". */ +extern tree build_call_raise_range (int msg, Node_Id gnat_node, + tree index, tree first, tree last); + /* Return a CONSTRUCTOR of TYPE whose elements are V. This is not the same as build_constructor in the language-independent tree.c. */ extern tree gnat_build_constructor (tree type, vec<constructor_elt, va_gc> *v); diff --git a/gcc/ada/gcc-interface/misc.c b/gcc/ada/gcc-interface/misc.c index 5b2d8b3d55b..da91c6f326d 100644 --- a/gcc/ada/gcc-interface/misc.c +++ b/gcc/ada/gcc-interface/misc.c @@ -268,6 +268,13 @@ gnat_post_options (const char **pfilename ATTRIBUTE_UNUSED) if (!global_options_set.x_flag_diagnostics_show_caret) global_dc->show_caret = false; + /* Warn only if STABS is not the default: we don't want to emit a warning if + the user did not use a -gstabs option. */ + if (PREFERRED_DEBUGGING_TYPE != DBX_DEBUG && write_symbols == DBX_DEBUG) + warning (0, "STABS debugging information for Ada is obsolete and not " + "supported anymore"); + + /* Copy global settings to local versions. */ optimize = global_options.x_optimize; optimize_size = global_options.x_optimize_size; flag_compare_debug = global_options.x_flag_compare_debug; @@ -658,7 +665,7 @@ gnat_get_array_descr_info (const_tree type, struct array_descr_info *info) info->ndimensions = i; convention_fortran_p = TYPE_CONVENTION_FORTRAN_P (type); - /* TODO: For row major ordering, we probably want to emit nothing and + /* TODO: for row major ordering, we probably want to emit nothing and instead specify it as the default in Dw_TAG_compile_unit. */ info->ordering = (convention_fortran_p ? array_descr_ordering_column_major diff --git a/gcc/ada/gcc-interface/trans.c b/gcc/ada/gcc-interface/trans.c index 413550a5211..fea8e15c5d3 100644 --- a/gcc/ada/gcc-interface/trans.c +++ b/gcc/ada/gcc-interface/trans.c @@ -75,13 +75,6 @@ instead. */ #define ALLOCA_THRESHOLD 1000 -/* In configurations where blocks have no end_locus attached, just - sink assignments into a dummy global. */ -#ifndef BLOCK_SOURCE_END_LOCATION -static location_t block_end_locus_sink; -#define BLOCK_SOURCE_END_LOCATION(BLOCK) block_end_locus_sink -#endif - /* Pointers to front-end tables accessed through macros. */ struct Node *Nodes_Ptr; struct Flags *Flags_Ptr; @@ -104,10 +97,6 @@ Node_Id error_gnat_node; types with representation information. */ bool type_annotate_only; -/* Current filename without path. */ -const char *ref_filename; - - /* List of N_Validate_Unchecked_Conversion nodes in the unit. */ static vec<Node_Id> gnat_validate_uc_list; @@ -255,11 +244,9 @@ static tree extract_values (tree, tree); static tree pos_to_constructor (Node_Id, tree, Entity_Id); static void validate_unchecked_conversion (Node_Id); static tree maybe_implicit_deref (tree); -static void set_expr_location_from_node (tree, Node_Id); -static void set_expr_location_from_node1 (tree, Node_Id, bool); -static bool Sloc_to_locus1 (Source_Ptr, location_t *, bool); -static bool set_end_locus_from_node (tree, Node_Id); +static void set_expr_location_from_node (tree, Node_Id, bool = false); static void set_gnu_expr_location_from_node (tree, Node_Id); +static bool set_end_locus_from_node (tree, Node_Id); static int lvalue_required_p (Node_Id, tree, bool, bool, bool); static tree build_raise_check (int, enum exception_info_kind); static tree create_init_temporary (const char *, tree, tree *, Node_Id); @@ -5014,7 +5001,7 @@ Handled_Sequence_Of_Statements_to_gnu (Node_Id gnat_node) implicit transient block does not incorrectly inherit the slocs of a decision, which would otherwise confuse control flow based coverage analysis tools. */ - set_expr_location_from_node1 (gnu_result, gnat_node, true); + set_expr_location_from_node (gnu_result, gnat_node, true); } else gnu_result = gnu_inner_block; @@ -7772,7 +7759,7 @@ add_decl_expr (tree gnu_decl, Entity_Id gnat_entity) add_stmt_with_node (gnu_stmt, gnat_entity); /* If this is a variable and an initializer is attached to it, it must be - valid for the context. Similar to init_const in create_var_decl_1. */ + valid for the context. Similar to init_const in create_var_decl. */ if (TREE_CODE (gnu_decl) == VAR_DECL && (gnu_init = DECL_INITIAL (gnu_decl)) != NULL_TREE && (!gnat_types_compatible_p (type, TREE_TYPE (gnu_init)) @@ -7840,7 +7827,7 @@ static void add_cleanup (tree gnu_cleanup, Node_Id gnat_node) { if (Present (gnat_node)) - set_expr_location_from_node1 (gnu_cleanup, gnat_node, true); + set_expr_location_from_node (gnu_cleanup, gnat_node, true); append_to_statement_list (gnu_cleanup, ¤t_stmt_group->cleanups); } @@ -9507,12 +9494,11 @@ maybe_implicit_deref (tree exp) } /* Convert SLOC into LOCUS. Return true if SLOC corresponds to a source code - location and false if it doesn't. In the former case, set the Gigi global - variable REF_FILENAME to the simple debug file name as given by sinput. - If clear_column is true, set column information to 0. */ + location and false if it doesn't. If CLEAR_COLUMN is true, set the column + information to 0. */ -static bool -Sloc_to_locus1 (Source_Ptr Sloc, location_t *locus, bool clear_column) +bool +Sloc_to_locus (Source_Ptr Sloc, location_t *locus, bool clear_column) { if (Sloc == No_Location) return false; @@ -9522,59 +9508,37 @@ Sloc_to_locus1 (Source_Ptr Sloc, location_t *locus, bool clear_column) *locus = BUILTINS_LOCATION; return false; } - else - { - Source_File_Index file = Get_Source_File_Index (Sloc); - Logical_Line_Number line = Get_Logical_Line_Number (Sloc); - Column_Number column = (clear_column ? 0 : Get_Column_Number (Sloc)); - line_map_ordinary *map = LINEMAPS_ORDINARY_MAP_AT (line_table, file - 1); - /* We can have zero if pragma Source_Reference is in effect. */ - if (line < 1) - line = 1; + Source_File_Index file = Get_Source_File_Index (Sloc); + Logical_Line_Number line = Get_Logical_Line_Number (Sloc); + Column_Number column = (clear_column ? 0 : Get_Column_Number (Sloc)); + line_map_ordinary *map = LINEMAPS_ORDINARY_MAP_AT (line_table, file - 1); - /* Translate the location. */ - *locus = linemap_position_for_line_and_column (map, line, column); - } + /* We can have zero if pragma Source_Reference is in effect. */ + if (line < 1) + line = 1; - ref_filename - = IDENTIFIER_POINTER - (get_identifier - (Get_Name_String (Debug_Source_Name (Get_Source_File_Index (Sloc)))));; + /* Translate the location. */ + *locus = linemap_position_for_line_and_column (map, line, column); return true; } -/* Similar to the above, not clearing the column information. */ - -bool -Sloc_to_locus (Source_Ptr Sloc, location_t *locus) -{ - return Sloc_to_locus1 (Sloc, locus, false); -} - /* Similar to set_expr_location, but start with the Sloc of GNAT_NODE and - don't do anything if it doesn't correspond to a source location. */ + don't do anything if it doesn't correspond to a source location. And, + if CLEAR_COLUMN is true, set the column information to 0. */ static void -set_expr_location_from_node1 (tree node, Node_Id gnat_node, bool clear_column) +set_expr_location_from_node (tree node, Node_Id gnat_node, bool clear_column) { location_t locus; - if (!Sloc_to_locus1 (Sloc (gnat_node), &locus, clear_column)) + if (!Sloc_to_locus (Sloc (gnat_node), &locus, clear_column)) return; SET_EXPR_LOCATION (node, locus); } -/* Similar to the above, not clearing the column information. */ - -static void -set_expr_location_from_node (tree node, Node_Id gnat_node) -{ - set_expr_location_from_node1 (node, gnat_node, false); -} - /* More elaborate version of set_expr_location_from_node to be used in more general contexts, for example the result of the translation of a generic GNAT node. */ @@ -9609,6 +9573,65 @@ set_gnu_expr_location_from_node (tree node, Node_Id gnat_node) break; } } + +/* Set the end_locus information for GNU_NODE, if any, from an explicit end + location associated with GNAT_NODE or GNAT_NODE itself, whichever makes + most sense. Return true if a sensible assignment was performed. */ + +static bool +set_end_locus_from_node (tree gnu_node, Node_Id gnat_node) +{ + Node_Id gnat_end_label; + location_t end_locus; + + /* Pick the GNAT node of which we'll take the sloc to assign to the GCC node + end_locus when there is one. We consider only GNAT nodes with a possible + End_Label attached. If the End_Label actually was unassigned, fallback + on the original node. We'd better assign an explicit sloc associated with + the outer construct in any case. */ + + switch (Nkind (gnat_node)) + { + case N_Package_Body: + case N_Subprogram_Body: + case N_Block_Statement: + gnat_end_label = End_Label (Handled_Statement_Sequence (gnat_node)); + break; + + case N_Package_Declaration: + gnat_end_label = End_Label (Specification (gnat_node)); + break; + + default: + return false; + } + + if (Present (gnat_end_label)) + gnat_node = gnat_end_label; + + /* Some expanded subprograms have neither an End_Label nor a Sloc + attached. Notify that to callers. For a block statement with no + End_Label, clear column information, so that the tree for a + transient block does not receive the sloc of a source condition. */ + if (!Sloc_to_locus (Sloc (gnat_node), &end_locus, + No (gnat_end_label) + && (Nkind (gnat_node) == N_Block_Statement))) + return false; + + switch (TREE_CODE (gnu_node)) + { + case BIND_EXPR: + BLOCK_SOURCE_END_LOCATION (BIND_EXPR_BLOCK (gnu_node)) = end_locus; + return true; + + case FUNCTION_DECL: + DECL_STRUCT_FUNCTION (gnu_node)->function_end_locus = end_locus; + return true; + + default: + return false; + } +} /* Return a colon-separated list of encodings contained in encoded Ada name. */ @@ -9679,65 +9702,6 @@ post_error_ne_num (const char *msg, Node_Id node, Entity_Id ent, int num) post_error_ne (msg, node, ent); } -/* Set the end_locus information for GNU_NODE, if any, from an explicit end - location associated with GNAT_NODE or GNAT_NODE itself, whichever makes - most sense. Return true if a sensible assignment was performed. */ - -static bool -set_end_locus_from_node (tree gnu_node, Node_Id gnat_node) -{ - Node_Id gnat_end_label = Empty; - location_t end_locus; - - /* Pick the GNAT node of which we'll take the sloc to assign to the GCC node - end_locus when there is one. We consider only GNAT nodes with a possible - End_Label attached. If the End_Label actually was unassigned, fallback - on the original node. We'd better assign an explicit sloc associated with - the outer construct in any case. */ - - switch (Nkind (gnat_node)) - { - case N_Package_Body: - case N_Subprogram_Body: - case N_Block_Statement: - gnat_end_label = End_Label (Handled_Statement_Sequence (gnat_node)); - break; - - case N_Package_Declaration: - gnat_end_label = End_Label (Specification (gnat_node)); - break; - - default: - return false; - } - - gnat_node = Present (gnat_end_label) ? gnat_end_label : gnat_node; - - /* Some expanded subprograms have neither an End_Label nor a Sloc - attached. Notify that to callers. For a block statement with no - End_Label, clear column information, so that the tree for a - transient block does not receive the sloc of a source condition. */ - - if (!Sloc_to_locus1 (Sloc (gnat_node), &end_locus, - No (gnat_end_label) && - (Nkind (gnat_node) == N_Block_Statement))) - return false; - - switch (TREE_CODE (gnu_node)) - { - case BIND_EXPR: - BLOCK_SOURCE_END_LOCATION (BIND_EXPR_BLOCK (gnu_node)) = end_locus; - return true; - - case FUNCTION_DECL: - DECL_STRUCT_FUNCTION (gnu_node)->function_end_locus = end_locus; - return true; - - default: - return false; - } -} - /* Similar to post_error_ne, but T is a GCC tree representing the number to write. If T represents a constant, the text inside curly brackets in MSG will be output (presumably including a '^'). Otherwise it will not diff --git a/gcc/ada/gcc-interface/utils.c b/gcc/ada/gcc-interface/utils.c index 00328399c5b..f54f2f02694 100644 --- a/gcc/ada/gcc-interface/utils.c +++ b/gcc/ada/gcc-interface/utils.c @@ -5278,7 +5278,7 @@ builtin_decl_for (tree name) heavily inspired from the "C" family implementation, with chunks copied verbatim from there. - Two obvious TODO candidates are + Two obvious improvement candidates are: o Use a more efficient name/decl mapping scheme o Devise a middle-end infrastructure to avoid having to copy pieces between front-ends. */ @@ -5627,7 +5627,7 @@ handle_pure_attribute (tree *node, tree name, tree ARG_UNUSED (args), { if (TREE_CODE (*node) == FUNCTION_DECL) DECL_PURE_P (*node) = 1; - /* ??? TODO: Support types. */ + /* TODO: support types. */ else { warning (OPT_Wattributes, "%qs attribute ignored", diff --git a/gcc/ada/gcc-interface/utils2.c b/gcc/ada/gcc-interface/utils2.c index 6a998d3ada9..70737a9990b 100644 --- a/gcc/ada/gcc-interface/utils2.c +++ b/gcc/ada/gcc-interface/utils2.c @@ -1754,25 +1754,58 @@ build_call_n_expr (tree fndecl, int n, ...) return fn; } -/* Call a function that raises an exception and pass the line number and file - name, if requested. MSG says which exception function to call. +/* Expand the SLOC of GNAT_NODE, if present, into tree location information + pointed to by FILENAME, LINE and COL. Fall back to the current location + if GNAT_NODE is absent or has no SLOC. */ - GNAT_NODE is the gnat node conveying the source location for which the - error should be signaled, or Empty in which case the error is signaled on - the current ref_file_name/input_line. +static void +expand_sloc (Node_Id gnat_node, tree *filename, tree *line, tree *col) +{ + const char *str; + int line_number, column_number; + + if (Debug_Flag_NN || Exception_Locations_Suppressed) + { + str = ""; + line_number = 0; + column_number = 0; + } + else if (Present (gnat_node) && Sloc (gnat_node) != No_Location) + { + str = Get_Name_String + (Debug_Source_Name (Get_Source_File_Index (Sloc (gnat_node)))); + line_number = Get_Logical_Line_Number (Sloc (gnat_node)); + column_number = Get_Column_Number (Sloc (gnat_node)); + } + else + { + str = lbasename (LOCATION_FILE (input_location)); + line_number = LOCATION_LINE (input_location); + column_number = LOCATION_COLUMN (input_location); + } - KIND says which kind of exception this is for - (N_Raise_{Constraint,Storage,Program}_Error). */ + const int len = strlen (str); + *filename = build_string (len, str); + TREE_TYPE (*filename) = build_array_type (unsigned_char_type_node, + build_index_type (size_int (len))); + *line = build_int_cst (NULL_TREE, line_number); + if (col) + *col = build_int_cst (NULL_TREE, column_number); +} + +/* Build a call to a function that raises an exception and passes file name + and line number, if requested. MSG says which exception function to call. + GNAT_NODE is the node conveying the source location for which the error + should be signaled, or Empty in which case the error is signaled for the + current location. KIND says which kind of exception node this is for, + among N_Raise_{Constraint,Storage,Program}_Error. */ tree build_call_raise (int msg, Node_Id gnat_node, char kind) { tree fndecl = gnat_raise_decls[msg]; tree label = get_exception_label (kind); - tree filename; - int line_number; - const char *str; - int len; + tree filename, line; /* If this is to be done as a goto, handle that case. */ if (label) @@ -1780,8 +1813,7 @@ build_call_raise (int msg, Node_Id gnat_node, char kind) Entity_Id local_raise = Get_Local_Raise_Call_Entity (); tree gnu_result = build1 (GOTO_EXPR, void_type_node, label); - /* If Local_Raise is present, generate - Local_Raise (exception'Identity); */ + /* If Local_Raise is present, build Local_Raise (Exception'Identity). */ if (Present (local_raise)) { tree gnu_local_raise @@ -1792,138 +1824,63 @@ build_call_raise (int msg, Node_Id gnat_node, char kind) = build_call_n_expr (gnu_local_raise, 1, build_unary_op (ADDR_EXPR, NULL_TREE, gnu_exception_entity)); - - gnu_result = build2 (COMPOUND_EXPR, void_type_node, - gnu_call, gnu_result);} + gnu_result + = build2 (COMPOUND_EXPR, void_type_node, gnu_call, gnu_result); + } return gnu_result; } - str - = (Debug_Flag_NN || Exception_Locations_Suppressed) - ? "" - : (gnat_node != Empty && Sloc (gnat_node) != No_Location) - ? IDENTIFIER_POINTER - (get_identifier (Get_Name_String - (Debug_Source_Name - (Get_Source_File_Index (Sloc (gnat_node)))))) - : ref_filename; - - len = strlen (str); - filename = build_string (len, str); - line_number - = (gnat_node != Empty && Sloc (gnat_node) != No_Location) - ? Get_Logical_Line_Number (Sloc(gnat_node)) - : LOCATION_LINE (input_location); - - TREE_TYPE (filename) = build_array_type (unsigned_char_type_node, - build_index_type (size_int (len))); + expand_sloc (gnat_node, &filename, &line, NULL); return build_call_n_expr (fndecl, 2, build1 (ADDR_EXPR, build_pointer_type (unsigned_char_type_node), filename), - build_int_cst (NULL_TREE, line_number)); + line); } -/* Similar to build_call_raise, for an index or range check exception as - determined by MSG, with extra information generated of the form - "INDEX out of range FIRST..LAST". */ +/* Similar to build_call_raise, with extra information about the column + where the check failed. */ tree -build_call_raise_range (int msg, Node_Id gnat_node, - tree index, tree first, tree last) +build_call_raise_column (int msg, Node_Id gnat_node) { tree fndecl = gnat_raise_decls_ext[msg]; - tree filename; - int line_number, column_number; - const char *str; - int len; - - str - = (Debug_Flag_NN || Exception_Locations_Suppressed) - ? "" - : (gnat_node != Empty && Sloc (gnat_node) != No_Location) - ? IDENTIFIER_POINTER - (get_identifier (Get_Name_String - (Debug_Source_Name - (Get_Source_File_Index (Sloc (gnat_node)))))) - : ref_filename; - - len = strlen (str); - filename = build_string (len, str); - if (gnat_node != Empty && Sloc (gnat_node) != No_Location) - { - line_number = Get_Logical_Line_Number (Sloc (gnat_node)); - column_number = Get_Column_Number (Sloc (gnat_node)); - } - else - { - line_number = LOCATION_LINE (input_location); - column_number = 0; - } + tree filename, line, col; - TREE_TYPE (filename) = build_array_type (unsigned_char_type_node, - build_index_type (size_int (len))); + expand_sloc (gnat_node, &filename, &line, &col); return - build_call_n_expr (fndecl, 6, + build_call_n_expr (fndecl, 3, build1 (ADDR_EXPR, build_pointer_type (unsigned_char_type_node), filename), - build_int_cst (NULL_TREE, line_number), - build_int_cst (NULL_TREE, column_number), - convert (integer_type_node, index), - convert (integer_type_node, first), - convert (integer_type_node, last)); + line, col); } -/* Similar to build_call_raise, with extra information about the column - where the check failed. */ +/* Similar to build_call_raise_column, for an index or range check exception , + with extra information of the form "INDEX out of range FIRST..LAST". */ tree -build_call_raise_column (int msg, Node_Id gnat_node) +build_call_raise_range (int msg, Node_Id gnat_node, + tree index, tree first, tree last) { tree fndecl = gnat_raise_decls_ext[msg]; - tree filename; - int line_number, column_number; - const char *str; - int len; - - str - = (Debug_Flag_NN || Exception_Locations_Suppressed) - ? "" - : (gnat_node != Empty && Sloc (gnat_node) != No_Location) - ? IDENTIFIER_POINTER - (get_identifier (Get_Name_String - (Debug_Source_Name - (Get_Source_File_Index (Sloc (gnat_node)))))) - : ref_filename; - - len = strlen (str); - filename = build_string (len, str); - if (gnat_node != Empty && Sloc (gnat_node) != No_Location) - { - line_number = Get_Logical_Line_Number (Sloc (gnat_node)); - column_number = Get_Column_Number (Sloc (gnat_node)); - } - else - { - line_number = LOCATION_LINE (input_location); - column_number = 0; - } + tree filename, line, col; - TREE_TYPE (filename) = build_array_type (unsigned_char_type_node, - build_index_type (size_int (len))); + expand_sloc (gnat_node, &filename, &line, &col); return - build_call_n_expr (fndecl, 3, + build_call_n_expr (fndecl, 6, build1 (ADDR_EXPR, build_pointer_type (unsigned_char_type_node), filename), - build_int_cst (NULL_TREE, line_number), - build_int_cst (NULL_TREE, column_number)); + line, col, + convert (integer_type_node, index), + convert (integer_type_node, first), + convert (integer_type_node, last)); } /* qsort comparer for the bit positions of two constructor elements @@ -2807,7 +2764,9 @@ gnat_rewrite_reference (tree ref, rewrite_fn func, void *data, tree *init) gcc_assert (*init == NULL_TREE); *init = TREE_OPERAND (ref, 0); /* We expect only the pattern built in Call_to_gnu. */ - gcc_assert (DECL_P (TREE_OPERAND (ref, 1))); + gcc_assert (DECL_P (TREE_OPERAND (ref, 1)) + || (TREE_CODE (TREE_OPERAND (ref, 1)) == COMPONENT_REF + && DECL_P (TREE_OPERAND (TREE_OPERAND (ref, 1), 0)))); return TREE_OPERAND (ref, 1); case CALL_EXPR: diff --git a/gcc/alias.c b/gcc/alias.c index f12d9d12a2b..1c58547df72 100644 --- a/gcc/alias.c +++ b/gcc/alias.c @@ -134,7 +134,7 @@ along with GCC; see the file COPYING3. If not see struct alias_set_hash : int_hash <int, INT_MIN, INT_MIN + 1> {}; -struct GTY(()) alias_set_entry_d { +struct GTY(()) alias_set_entry { /* The alias set number, as stored in MEM_ALIAS_SET. */ alias_set_type alias_set; @@ -158,7 +158,6 @@ struct GTY(()) alias_set_entry_d { /* Nonzero if is_pointer or if one of childs have has_pointer set. */ bool has_pointer; }; -typedef struct alias_set_entry_d *alias_set_entry; static int rtx_equal_for_memref_p (const_rtx, const_rtx); static int memrefs_conflict_p (int, rtx, int, rtx, HOST_WIDE_INT); @@ -167,7 +166,7 @@ static int base_alias_check (rtx, rtx, rtx, rtx, machine_mode, machine_mode); static rtx find_base_value (rtx); static int mems_in_disjoint_alias_sets_p (const_rtx, const_rtx); -static alias_set_entry get_alias_set_entry (alias_set_type); +static alias_set_entry *get_alias_set_entry (alias_set_type); static tree decl_for_component_ref (tree); static int write_dependence_p (const_rtx, const_rtx, machine_mode, rtx, @@ -288,7 +287,7 @@ static bool copying_arguments; /* The splay-tree used to store the various alias set entries. */ -static GTY (()) vec<alias_set_entry, va_gc> *alias_sets; +static GTY (()) vec<alias_set_entry *, va_gc> *alias_sets; /* Build a decomposed reference object for querying the alias-oracle from the MEM rtx and store it in *REF. @@ -395,7 +394,7 @@ rtx_refs_may_alias_p (const_rtx x, const_rtx mem, bool tbaa_p) /* Returns a pointer to the alias set entry for ALIAS_SET, if there is such an entry, or NULL otherwise. */ -static inline alias_set_entry +static inline alias_set_entry * get_alias_set_entry (alias_set_type alias_set) { return (*alias_sets)[alias_set]; @@ -417,7 +416,7 @@ mems_in_disjoint_alias_sets_p (const_rtx mem1, const_rtx mem2) bool alias_set_subset_of (alias_set_type set1, alias_set_type set2) { - alias_set_entry ase2; + alias_set_entry *ase2; /* Everything is a subset of the "aliases everything" set. */ if (set2 == 0) @@ -453,7 +452,7 @@ alias_set_subset_of (alias_set_type set1, alias_set_type set2) get_alias_set for more details. */ if (ase2 && ase2->has_pointer) { - alias_set_entry ase1 = get_alias_set_entry (set1); + alias_set_entry *ase1 = get_alias_set_entry (set1); if (ase1 && ase1->is_pointer) { @@ -477,8 +476,8 @@ alias_set_subset_of (alias_set_type set1, alias_set_type set2) int alias_sets_conflict_p (alias_set_type set1, alias_set_type set2) { - alias_set_entry ase1; - alias_set_entry ase2; + alias_set_entry *ase1; + alias_set_entry *ase2; /* The easy case. */ if (alias_sets_must_conflict_p (set1, set2)) @@ -808,10 +807,10 @@ alias_ptr_types_compatible_p (tree t1, tree t2) /* Create emptry alias set entry. */ -alias_set_entry +alias_set_entry * init_alias_set_entry (alias_set_type set) { - alias_set_entry ase = ggc_alloc<alias_set_entry_d> (); + alias_set_entry *ase = ggc_alloc<alias_set_entry> (); ase->alias_set = set; ase->children = NULL; ase->has_zero_child = false; @@ -1057,7 +1056,7 @@ get_alias_set (tree t) /* We treat pointer types specially in alias_set_subset_of. */ if (POINTER_TYPE_P (t) && set) { - alias_set_entry ase = get_alias_set_entry (set); + alias_set_entry *ase = get_alias_set_entry (set); if (!ase) ase = init_alias_set_entry (set); ase->is_pointer = true; @@ -1075,8 +1074,8 @@ new_alias_set (void) if (flag_strict_aliasing) { if (alias_sets == 0) - vec_safe_push (alias_sets, (alias_set_entry) 0); - vec_safe_push (alias_sets, (alias_set_entry) 0); + vec_safe_push (alias_sets, (alias_set_entry *) NULL); + vec_safe_push (alias_sets, (alias_set_entry *) NULL); return alias_sets->length () - 1; } else @@ -1099,8 +1098,8 @@ new_alias_set (void) void record_alias_subset (alias_set_type superset, alias_set_type subset) { - alias_set_entry superset_entry; - alias_set_entry subset_entry; + alias_set_entry *superset_entry; + alias_set_entry *subset_entry; /* It is possible in complex type situations for both sets to be the same, in which case we can ignore this operation. */ diff --git a/gcc/alloc-pool.h b/gcc/alloc-pool.h index 03bde635bf5..70105ba4ebc 100644 --- a/gcc/alloc-pool.h +++ b/gcc/alloc-pool.h @@ -20,6 +20,7 @@ along with GCC; see the file COPYING3. If not see #ifndef ALLOC_POOL_H #define ALLOC_POOL_H +#include "memory-block.h" extern void dump_alloc_pool_statistics (void); @@ -95,18 +96,53 @@ struct pool_usage: public mem_usage extern mem_alloc_description<pool_usage> pool_allocator_usage; +#if 0 +/* If a pool with custom block size is needed, one might use the following + template. An instance of this template can be used as a parameter for + instantiating base_pool_allocator template: + + typedef custom_block_allocator <128*1024> huge_block_allocator; + ... + static base_pool_allocator <huge_block_allocator> + value_pool ("value", 16384); + + Right now it's not used anywhere in the code, and is given here as an + example). */ + +template <size_t BlockSize> +class custom_block_allocator +{ +public: + static const size_t block_size = BlockSize; + + static inline void * + allocate () ATTRIBUTE_MALLOC + { + return XNEWVEC (char, BlockSize); + } + + static inline void + release (void *block) + { + XDELETEVEC (block); + } +}; +#endif + /* Generic pool allocator. */ -class pool_allocator + +template <typename TBlockAllocator> +class base_pool_allocator { public: - /* Default constructor for pool allocator called NAME. Each block - has NUM elements. */ - pool_allocator (const char *name, size_t num, size_t size CXX_MEM_STAT_INFO); - ~pool_allocator (); + /* Default constructor for pool allocator called NAME. */ + base_pool_allocator (const char *name, size_t size CXX_MEM_STAT_INFO); + ~base_pool_allocator (); void release (); void release_if_empty (); void *allocate () ATTRIBUTE_MALLOC; void remove (void *object); + size_t num_elts_current (); private: struct allocation_pool_list @@ -151,7 +187,7 @@ private: }; /* Align X to 8. */ - size_t + static inline size_t align_eight (size_t x) { return (((x+7) >> 3) << 3); @@ -180,8 +216,6 @@ private: size_t m_blocks_allocated; /* List of blocks that are used to allocate new objects. */ allocation_pool_list *m_block_list; - /* The number of elements in a block. */ - size_t m_block_size; /* Size of a pool elements in bytes. */ size_t m_elt_size; /* Size in bytes that should be allocated for each element. */ @@ -192,24 +226,24 @@ private: mem_location m_location; }; +template <typename TBlockAllocator> inline -pool_allocator::pool_allocator (const char *name, size_t num, - size_t size MEM_STAT_DECL): - m_name (name), m_id (0), m_elts_per_block (num), m_returned_free_list (NULL), +base_pool_allocator <TBlockAllocator>::base_pool_allocator ( + const char *name, size_t size MEM_STAT_DECL): + m_name (name), m_id (0), m_elts_per_block (0), m_returned_free_list (NULL), m_virgin_free_list (NULL), m_virgin_elts_remaining (0), m_elts_allocated (0), - m_elts_free (0), m_blocks_allocated (0), m_block_list (NULL), - m_block_size (0), m_size (size), m_initialized (false), - m_location (ALLOC_POOL_ORIGIN, false PASS_MEM_STAT) {} + m_elts_free (0), m_blocks_allocated (0), m_block_list (NULL), m_size (size), + m_initialized (false), m_location (ALLOC_POOL_ORIGIN, false PASS_MEM_STAT) {} /* Initialize a pool allocator. */ +template <typename TBlockAllocator> inline void -pool_allocator::initialize () +base_pool_allocator <TBlockAllocator>::initialize () { gcc_checking_assert (!m_initialized); m_initialized = true; - size_t header_size; size_t size = m_size; gcc_checking_assert (m_name); @@ -218,15 +252,12 @@ pool_allocator::initialize () if (size < sizeof (allocation_pool_list*)) size = sizeof (allocation_pool_list*); - /* Now align the size to a multiple of 4. */ + /* Now align the size to a multiple of 8. */ size = align_eight (size); /* Add the aligned size of ID. */ size += offsetof (allocation_object, u.data); - /* Um, we can't really allocate 0 elements per block. */ - gcc_checking_assert (m_elts_per_block); - m_elt_size = size; if (GATHER_STATISTICS) @@ -239,9 +270,10 @@ pool_allocator::initialize () } /* List header size should be a multiple of 8. */ - header_size = align_eight (sizeof (allocation_pool_list)); + size_t header_size = align_eight (sizeof (allocation_pool_list)); - m_block_size = (size * m_elts_per_block) + header_size; + m_elts_per_block = (TBlockAllocator::block_size - header_size) / size; + gcc_checking_assert (m_elts_per_block != 0); #ifdef ENABLE_CHECKING /* Increase the last used ID and use it for this pool. @@ -255,8 +287,9 @@ pool_allocator::initialize () } /* Free all memory allocated for the given memory pool. */ +template <typename TBlockAllocator> inline void -pool_allocator::release () +base_pool_allocator <TBlockAllocator>::release () { if (!m_initialized) return; @@ -267,7 +300,7 @@ pool_allocator::release () for (block = m_block_list; block != NULL; block = next_block) { next_block = block->next; - free (block); + TBlockAllocator::release (block); } if (GATHER_STATISTICS) @@ -285,21 +318,24 @@ pool_allocator::release () m_block_list = NULL; } -void -inline pool_allocator::release_if_empty () +template <typename TBlockAllocator> +inline void +base_pool_allocator <TBlockAllocator>::release_if_empty () { if (m_elts_free == m_elts_allocated) release (); } -inline pool_allocator::~pool_allocator () +template <typename TBlockAllocator> +inline base_pool_allocator <TBlockAllocator>::~base_pool_allocator () { release (); } /* Allocates one element from the pool specified. */ +template <typename TBlockAllocator> inline void* -pool_allocator::allocate () +base_pool_allocator <TBlockAllocator>::allocate () { if (!m_initialized) initialize (); @@ -327,7 +363,7 @@ pool_allocator::allocate () allocation_pool_list *block_header; /* Make the block. */ - block = XNEWVEC (char, m_block_size); + block = reinterpret_cast<char *> (TBlockAllocator::allocate ()); block_header = (allocation_pool_list*) block; block += align_eight (sizeof (allocation_pool_list)); @@ -378,8 +414,9 @@ pool_allocator::allocate () } /* Puts PTR back on POOL's free list. */ +template <typename TBlockAllocator> inline void -pool_allocator::remove (void *object) +base_pool_allocator <TBlockAllocator>::remove (void *object) { gcc_checking_assert (m_initialized); @@ -412,15 +449,28 @@ pool_allocator::remove (void *object) } } +/* Number of elements currently active (not returned to pool). Used for cheap + consistency checks. */ +template <typename TBlockAllocator> +inline size_t +base_pool_allocator <TBlockAllocator>::num_elts_current () +{ + return m_elts_allocated - m_elts_free; +} + +/* Specialization of base_pool_allocator which should be used in most cases. + Another specialization may be needed, if object size is greater than + memory_block_pool::block_size (64 KB). */ +typedef base_pool_allocator <memory_block_pool> pool_allocator; + /* Type based memory pool allocator. */ template <typename T> class object_allocator { public: - /* Default constructor for pool allocator called NAME. Each block - has NUM elements. */ - object_allocator (const char *name, size_t num CXX_MEM_STAT_INFO): - m_allocator (name, num, sizeof (T) PASS_MEM_STAT) {} + /* Default constructor for pool allocator called NAME. */ + object_allocator (const char *name CXX_MEM_STAT_INFO): + m_allocator (name, sizeof (T) PASS_MEM_STAT) {} inline void release () @@ -448,6 +498,12 @@ public: m_allocator.remove (object); } + inline size_t + num_elts_current () + { + return m_allocator.num_elts_current (); + } + private: pool_allocator m_allocator; }; diff --git a/gcc/asan.c b/gcc/asan.c index 4f5adaa60a1..7c243cdc451 100644 --- a/gcc/asan.c +++ b/gcc/asan.c @@ -350,7 +350,7 @@ struct asan_mem_ref HOST_WIDE_INT access_size; }; -object_allocator <asan_mem_ref> asan_mem_ref_pool ("asan_mem_ref", 10); +object_allocator <asan_mem_ref> asan_mem_ref_pool ("asan_mem_ref"); /* Initializes an instance of asan_mem_ref. */ diff --git a/gcc/bt-load.c b/gcc/bt-load.c index 5d8b752bd2c..9b1d366bf00 100644 --- a/gcc/bt-load.c +++ b/gcc/bt-load.c @@ -51,18 +51,20 @@ along with GCC; see the file COPYING3. If not see #include "rtl-iter.h" #include "fibonacci_heap.h" +struct btr_def; + /* Target register optimizations - these are performed after reload. */ -typedef struct btr_def_group_s +struct btr_def_group { - struct btr_def_group_s *next; + btr_def_group *next; rtx src; - struct btr_def_s *members; -} *btr_def_group; + btr_def *members; +}; -typedef struct btr_user_s +struct btr_user { - struct btr_user_s *next; + btr_user *next; basic_block bb; int luid; rtx_insn *insn; @@ -74,7 +76,7 @@ typedef struct btr_user_s int n_reaching_defs; int first_reaching_def; char other_use_this_block; -} *btr_user; +}; /* btr_def structs appear on three lists: 1. A list of all btr_def structures (head is @@ -85,10 +87,10 @@ typedef struct btr_user_s group (head is in a BTR_DEF_GROUP struct, linked by NEXT_THIS_GROUP field). */ -typedef struct btr_def_s +struct btr_def { - struct btr_def_s *next_this_bb; - struct btr_def_s *next_this_group; + btr_def *next_this_bb; + btr_def *next_this_group; basic_block bb; int luid; rtx_insn *insn; @@ -98,8 +100,8 @@ typedef struct btr_def_s source (i.e. a label), group links together all the insns with the same source. For other branch register setting insns, group is NULL. */ - btr_def_group group; - btr_user uses; + btr_def_group *group; + btr_user *uses; /* If this def has a reaching use which is not a simple use in a branch instruction, then has_ambiguous_use will be true, and we will not attempt to migrate this definition. */ @@ -119,38 +121,38 @@ typedef struct btr_def_s to clear out trs_live_at_end again. */ char own_end; bitmap live_range; -} *btr_def; +}; -typedef fibonacci_heap <long, btr_def_s> btr_heap_t; -typedef fibonacci_node <long, btr_def_s> btr_heap_node_t; +typedef fibonacci_heap <long, btr_def> btr_heap_t; +typedef fibonacci_node <long, btr_def> btr_heap_node_t; static int issue_rate; static int basic_block_freq (const_basic_block); static int insn_sets_btr_p (const rtx_insn *, int, int *); -static void find_btr_def_group (btr_def_group *, btr_def); -static btr_def add_btr_def (btr_heap_t *, basic_block, int, rtx_insn *, - unsigned int, int, btr_def_group *); -static btr_user new_btr_user (basic_block, int, rtx_insn *); +static void find_btr_def_group (btr_def_group **, btr_def *); +static btr_def *add_btr_def (btr_heap_t *, basic_block, int, rtx_insn *, + unsigned int, int, btr_def_group **); +static btr_user *new_btr_user (basic_block, int, rtx_insn *); static void dump_hard_reg_set (HARD_REG_SET); static void dump_btrs_live (int); -static void note_other_use_this_block (unsigned int, btr_user); -static void compute_defs_uses_and_gen (btr_heap_t *, btr_def *,btr_user *, +static void note_other_use_this_block (unsigned int, btr_user *); +static void compute_defs_uses_and_gen (btr_heap_t *, btr_def **, btr_user **, sbitmap *, sbitmap *, HARD_REG_SET *); static void compute_kill (sbitmap *, sbitmap *, HARD_REG_SET *); static void compute_out (sbitmap *bb_out, sbitmap *, sbitmap *, int); -static void link_btr_uses (btr_def *, btr_user *, sbitmap *, sbitmap *, int); +static void link_btr_uses (btr_def **, btr_user **, sbitmap *, sbitmap *, int); static void build_btr_def_use_webs (btr_heap_t *); -static int block_at_edge_of_live_range_p (int, btr_def); -static void clear_btr_from_live_range (btr_def def); -static void add_btr_to_live_range (btr_def, int); +static int block_at_edge_of_live_range_p (int, btr_def *); +static void clear_btr_from_live_range (btr_def *def); +static void add_btr_to_live_range (btr_def *, int); static void augment_live_range (bitmap, HARD_REG_SET *, basic_block, basic_block, int); static int choose_btr (HARD_REG_SET); -static void combine_btr_defs (btr_def, HARD_REG_SET *); -static void btr_def_live_range (btr_def, HARD_REG_SET *); -static void move_btr_def (basic_block, int, btr_def, bitmap, HARD_REG_SET *); -static int migrate_btr_def (btr_def, int); +static void combine_btr_defs (btr_def *, HARD_REG_SET *); +static void btr_def_live_range (btr_def *, HARD_REG_SET *); +static void move_btr_def (basic_block, int, btr_def *, bitmap, HARD_REG_SET *); +static int migrate_btr_def (btr_def *, int); static void migrate_btr_defs (enum reg_class, int); static int can_move_up (const_basic_block, const rtx_insn *, int); static void note_btr_set (rtx, const_rtx, void *); @@ -257,11 +259,11 @@ insn_sets_btr_p (const rtx_insn *insn, int check_const, int *regno) to in the list starting with *ALL_BTR_DEF_GROUPS. If no such group exists, create one. Add def to the group. */ static void -find_btr_def_group (btr_def_group *all_btr_def_groups, btr_def def) +find_btr_def_group (btr_def_group **all_btr_def_groups, btr_def *def) { if (insn_sets_btr_p (def->insn, 1, NULL)) { - btr_def_group this_group; + btr_def_group *this_group; rtx def_src = SET_SRC (single_set (def->insn)); /* ?? This linear search is an efficiency concern, particularly @@ -274,7 +276,7 @@ find_btr_def_group (btr_def_group *all_btr_def_groups, btr_def def) if (!this_group) { - this_group = XOBNEW (&migrate_btrl_obstack, struct btr_def_group_s); + this_group = XOBNEW (&migrate_btrl_obstack, btr_def_group); this_group->src = def_src; this_group->members = NULL; this_group->next = *all_btr_def_groups; @@ -291,13 +293,13 @@ find_btr_def_group (btr_def_group *all_btr_def_groups, btr_def def) /* Create a new target register definition structure, for a definition in block BB, instruction INSN, and insert it into ALL_BTR_DEFS. Return the new definition. */ -static btr_def +static btr_def * add_btr_def (btr_heap_t *all_btr_defs, basic_block bb, int insn_luid, rtx_insn *insn, unsigned int dest_reg, int other_btr_uses_before_def, - btr_def_group *all_btr_def_groups) + btr_def_group **all_btr_def_groups) { - btr_def this_def = XOBNEW (&migrate_btrl_obstack, struct btr_def_s); + btr_def *this_def = XOBNEW (&migrate_btrl_obstack, btr_def); this_def->bb = bb; this_def->luid = insn_luid; this_def->insn = insn; @@ -325,7 +327,7 @@ add_btr_def (btr_heap_t *all_btr_defs, basic_block bb, int insn_luid, /* Create a new target register user structure, for a use in block BB, instruction INSN. Return the new user. */ -static btr_user +static btr_user * new_btr_user (basic_block bb, int insn_luid, rtx_insn *insn) { /* This instruction reads target registers. We need @@ -334,7 +336,7 @@ new_btr_user (basic_block bb, int insn_luid, rtx_insn *insn) */ rtx *usep = find_btr_use (PATTERN (insn)); rtx use; - btr_user user = NULL; + btr_user *user = NULL; if (usep) { @@ -348,7 +350,7 @@ new_btr_user (basic_block bb, int insn_luid, rtx_insn *insn) usep = NULL; } use = usep ? *usep : NULL_RTX; - user = XOBNEW (&migrate_btrl_obstack, struct btr_user_s); + user = XOBNEW (&migrate_btrl_obstack, btr_user); user->bb = bb; user->luid = insn_luid; user->insn = insn; @@ -395,9 +397,9 @@ dump_btrs_live (int bb) If any of them use the same register, set their other_use_this_block flag. */ static void -note_other_use_this_block (unsigned int regno, btr_user users_this_bb) +note_other_use_this_block (unsigned int regno, btr_user *users_this_bb) { - btr_user user; + btr_user *user; for (user = users_this_bb; user != NULL; user = user->next) if (user->use && REGNO (user->use) == regno) @@ -405,7 +407,7 @@ note_other_use_this_block (unsigned int regno, btr_user users_this_bb) } struct defs_uses_info { - btr_user users_this_bb; + btr_user *users_this_bb; HARD_REG_SET btrs_written_in_block; HARD_REG_SET btrs_live_in_block; sbitmap bb_gen; @@ -438,8 +440,8 @@ note_btr_set (rtx dest, const_rtx set ATTRIBUTE_UNUSED, void *data) } static void -compute_defs_uses_and_gen (btr_heap_t *all_btr_defs, btr_def *def_array, - btr_user *use_array, sbitmap *btr_defset, +compute_defs_uses_and_gen (btr_heap_t *all_btr_defs, btr_def **def_array, + btr_user **use_array, sbitmap *btr_defset, sbitmap *bb_gen, HARD_REG_SET *btrs_written) { /* Scan the code building up the set of all defs and all uses. @@ -450,7 +452,7 @@ compute_defs_uses_and_gen (btr_heap_t *all_btr_defs, btr_def *def_array, */ int i; int insn_luid = 0; - btr_def_group all_btr_def_groups = NULL; + btr_def_group *all_btr_def_groups = NULL; defs_uses_info info; bitmap_vector_clear (bb_gen, last_basic_block_for_fn (cfun)); @@ -458,7 +460,7 @@ compute_defs_uses_and_gen (btr_heap_t *all_btr_defs, btr_def *def_array, { basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i); int reg; - btr_def defs_this_bb = NULL; + btr_def *defs_this_bb = NULL; rtx_insn *insn; rtx_insn *last; int can_throw = 0; @@ -485,7 +487,7 @@ compute_defs_uses_and_gen (btr_heap_t *all_btr_defs, btr_def *def_array, if (insn_sets_btr_p (insn, 0, ®no)) { - btr_def def = add_btr_def ( + btr_def *def = add_btr_def ( all_btr_defs, bb, insn_luid, insn, regno, TEST_HARD_REG_BIT (info.btrs_live_in_block, regno), &all_btr_def_groups); @@ -505,7 +507,7 @@ compute_defs_uses_and_gen (btr_heap_t *all_btr_defs, btr_def *def_array, else if (cfun->has_nonlocal_label && GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE) { - btr_user user; + btr_user *user; /* Do the equivalent of calling note_other_use_this_block for every target register. */ @@ -521,7 +523,7 @@ compute_defs_uses_and_gen (btr_heap_t *all_btr_defs, btr_def *def_array, { if (find_btr_use (PATTERN (insn))) { - btr_user user = new_btr_user (bb, insn_luid, insn); + btr_user *user = new_btr_user (bb, insn_luid, insn); use_array[insn_uid] = user; if (user->use) @@ -655,7 +657,7 @@ compute_out (sbitmap *bb_out, sbitmap *bb_gen, sbitmap *bb_kill, int max_uid) } static void -link_btr_uses (btr_def *def_array, btr_user *use_array, sbitmap *bb_out, +link_btr_uses (btr_def **def_array, btr_user **use_array, sbitmap *bb_out, sbitmap *btr_defset, int max_uid) { int i; @@ -678,8 +680,8 @@ link_btr_uses (btr_def *def_array, btr_user *use_array, sbitmap *bb_out, { int insn_uid = INSN_UID (insn); - btr_def def = def_array[insn_uid]; - btr_user user = use_array[insn_uid]; + btr_def *def = def_array[insn_uid]; + btr_user *user = use_array[insn_uid]; if (def != NULL) { /* Remove all reaching defs of regno except @@ -716,7 +718,7 @@ link_btr_uses (btr_def *def_array, btr_user *use_array, sbitmap *bb_out, } EXECUTE_IF_SET_IN_BITMAP (reaching_defs_of_reg, 0, uid, sbi) { - btr_def def = def_array[uid]; + btr_def *def = def_array[uid]; /* We now know that def reaches user. */ @@ -770,8 +772,8 @@ static void build_btr_def_use_webs (btr_heap_t *all_btr_defs) { const int max_uid = get_max_uid (); - btr_def *def_array = XCNEWVEC (btr_def, max_uid); - btr_user *use_array = XCNEWVEC (btr_user, max_uid); + btr_def **def_array = XCNEWVEC (btr_def *, max_uid); + btr_user **use_array = XCNEWVEC (btr_user *, max_uid); sbitmap *btr_defset = sbitmap_vector_alloc ( (last_btr - first_btr) + 1, max_uid); sbitmap *bb_gen = sbitmap_vector_alloc (last_basic_block_for_fn (cfun), @@ -808,14 +810,14 @@ build_btr_def_use_webs (btr_heap_t *all_btr_defs) live range of the definition DEF, AND there are other live ranges of the same target register that include BB. */ static int -block_at_edge_of_live_range_p (int bb, btr_def def) +block_at_edge_of_live_range_p (int bb, btr_def *def) { if (def->other_btr_uses_before_def && BASIC_BLOCK_FOR_FN (cfun, bb) == def->bb) return 1; else if (def->other_btr_uses_after_use) { - btr_user user; + btr_user *user; for (user = def->uses; user != NULL; user = user->next) if (BASIC_BLOCK_FOR_FN (cfun, bb) == user->bb) return 1; @@ -832,7 +834,7 @@ block_at_edge_of_live_range_p (int bb, btr_def def) to remove the target register from the live set of these blocks only if they do not contain other live ranges for the same register. */ static void -clear_btr_from_live_range (btr_def def) +clear_btr_from_live_range (btr_def *def) { unsigned bb; bitmap_iterator bi; @@ -860,7 +862,7 @@ clear_btr_from_live_range (btr_def def) If OWN_END is set, also show that the register is live from our definitions at the end of the basic block where it is defined. */ static void -add_btr_to_live_range (btr_def def, int own_end) +add_btr_to_live_range (btr_def *def, int own_end) { unsigned bb; bitmap_iterator bi; @@ -1003,11 +1005,11 @@ choose_btr (HARD_REG_SET used_btrs) in this live range, but ignore the live range represented by DEF when calculating this set. */ static void -btr_def_live_range (btr_def def, HARD_REG_SET *btrs_live_in_range) +btr_def_live_range (btr_def *def, HARD_REG_SET *btrs_live_in_range) { if (!def->live_range) { - btr_user user; + btr_user *user; def->live_range = BITMAP_ALLOC (NULL); @@ -1050,9 +1052,9 @@ btr_def_live_range (btr_def def, HARD_REG_SET *btrs_live_in_range) group that are dominated by DEF, provided that there is a target register available to allocate to the merged web. */ static void -combine_btr_defs (btr_def def, HARD_REG_SET *btrs_live_in_range) +combine_btr_defs (btr_def *def, HARD_REG_SET *btrs_live_in_range) { - btr_def other_def; + btr_def *other_def; for (other_def = def->group->members; other_def != NULL; @@ -1070,7 +1072,7 @@ combine_btr_defs (btr_def def, HARD_REG_SET *btrs_live_in_range) int btr; HARD_REG_SET combined_btrs_live; bitmap combined_live_range = BITMAP_ALLOC (NULL); - btr_user user; + btr_user *user; if (other_def->live_range == NULL) { @@ -1100,7 +1102,7 @@ combine_btr_defs (btr_def def, HARD_REG_SET *btrs_live_in_range) user = other_def->uses; while (user != NULL) { - btr_user next = user->next; + btr_user *next = user->next; user->next = def->uses; def->uses = user; @@ -1139,7 +1141,7 @@ combine_btr_defs (btr_def def, HARD_REG_SET *btrs_live_in_range) If this new position means that other defs in the same group can be combined with DEF then combine them. */ static void -move_btr_def (basic_block new_def_bb, int btr, btr_def def, bitmap live_range, +move_btr_def (basic_block new_def_bb, int btr, btr_def *def, bitmap live_range, HARD_REG_SET *btrs_live_in_range) { /* We can move the instruction. @@ -1154,7 +1156,7 @@ move_btr_def (basic_block new_def_bb, int btr, btr_def def, bitmap live_range, rtx btr_rtx; rtx_insn *new_insn; machine_mode btr_mode; - btr_user user; + btr_user *user; rtx set; if (dump_file) @@ -1264,7 +1266,7 @@ can_move_up (const_basic_block bb, const rtx_insn *insn, int n_insns) MIN_COST, but we may be able to reduce it further). Return zero if no further migration is possible. */ static int -migrate_btr_def (btr_def def, int min_cost) +migrate_btr_def (btr_def *def, int min_cost) { bitmap live_range; HARD_REG_SET btrs_live_in_range; @@ -1273,7 +1275,7 @@ migrate_btr_def (btr_def def, int min_cost) basic_block attempt; int give_up = 0; int def_moved = 0; - btr_user user; + btr_user *user; int def_latency; if (dump_file) @@ -1432,7 +1434,7 @@ migrate_btr_defs (enum reg_class btr_class, int allow_callee_save) while (!all_btr_defs.empty ()) { int min_cost = -all_btr_defs.min_key (); - btr_def def = all_btr_defs.extract_min (); + btr_def *def = all_btr_defs.extract_min (); if (migrate_btr_def (def, min_cost)) { all_btr_defs.insert (-def->cost, def); diff --git a/gcc/c-family/ChangeLog b/gcc/c-family/ChangeLog index 3acc84f433b..9a631698c1c 100644 --- a/gcc/c-family/ChangeLog +++ b/gcc/c-family/ChangeLog @@ -1,3 +1,31 @@ +2015-09-16 Mikhail Maltsev <maltsevm@gmail.com> + + * c-format.c (check_format_arg): Adjust to use common block size in all + object pools. + +2015-09-15 David Malcolm <dmalcolm@redhat.com> + + * c-format.c (location_from_offset): Update for change in + signature of location_get_source_line. + * c-indentation.c (get_visual_column): Likewise. + (line_contains_hash_if): Likewise. + +2015-09-14 Marek Polacek <polacek@redhat.com> + + * c-opts.c (c_common_post_options): Set C++ standard earlier, before + setting various warnings. + +2015-09-14 Marek Polacek <polacek@redhat.com> + + * c-common.c (warn_for_sign_compare): Cast to unsigned when shifting + a negative value. + +2015-09-11 Mark Wielaard <mjw@redhat.com> + + PR c/28901 + * c.opt (Wunused-variable): Option from common.opt. + (Wunused-const-variable): New option. + 2015-09-09 Paolo Carlini <paolo.carlini@oracle.com> PR c++/53184 diff --git a/gcc/c-family/c-common.c b/gcc/c-family/c-common.c index 02866235119..534ff38de4a 100644 --- a/gcc/c-family/c-common.c +++ b/gcc/c-family/c-common.c @@ -12142,7 +12142,7 @@ warn_for_sign_compare (location_t location, if (bits < TYPE_PRECISION (result_type) && bits < HOST_BITS_PER_LONG && unsignedp) { - mask = (~ (HOST_WIDE_INT) 0) << bits; + mask = (~ (unsigned HOST_WIDE_INT) 0) << bits; if ((mask & constant) != mask) { if (constant == 0) diff --git a/gcc/c-family/c-format.c b/gcc/c-family/c-format.c index 4bc31476d5a..728f0838e83 100644 --- a/gcc/c-family/c-format.c +++ b/gcc/c-family/c-format.c @@ -132,7 +132,7 @@ location_from_offset (location_t loc, int offset) expanded_location s = expand_location_to_spelling_point (loc); int line_width; - const char *line = location_get_source_line (s, &line_width); + const char *line = location_get_source_line (s.file, s.line, &line_width); if (line == NULL) return loc; line += s.column - 1 ; @@ -1687,8 +1687,7 @@ check_format_arg (void *ctx, tree format_tree, will decrement it if it finds there are extra arguments, but this way need not adjust it for every return. */ res->number_other++; - object_allocator <format_wanted_type> fwt_pool ("format_wanted_type pool", - 10); + object_allocator <format_wanted_type> fwt_pool ("format_wanted_type pool"); check_format_info_main (res, info, format_chars, format_length, params, arg_num, fwt_pool); } diff --git a/gcc/c-family/c-indentation.c b/gcc/c-family/c-indentation.c index fdfe0a93412..dd3522360b4 100644 --- a/gcc/c-family/c-indentation.c +++ b/gcc/c-family/c-indentation.c @@ -45,7 +45,8 @@ get_visual_column (expanded_location exploc, unsigned int *first_nws = NULL) { int line_len; - const char *line = location_get_source_line (exploc, &line_len); + const char *line = location_get_source_line (exploc.file, exploc.line, + &line_len); if (!line) return false; unsigned int vis_column = 0; @@ -84,13 +85,8 @@ get_visual_column (expanded_location exploc, static bool line_contains_hash_if (const char *file, int line_num) { - expanded_location exploc; - exploc.file = file; - exploc.line = line_num; - exploc.column = 1; - int line_len; - const char *line = location_get_source_line (exploc, &line_len); + const char *line = location_get_source_line (file, line_num, &line_len); if (!line) return false; diff --git a/gcc/c-family/c-opts.c b/gcc/c-family/c-opts.c index 3239a853546..f358b62a908 100644 --- a/gcc/c-family/c-opts.c +++ b/gcc/c-family/c-opts.c @@ -800,6 +800,10 @@ c_common_post_options (const char **pfilename) && flag_no_builtin) flag_tree_loop_distribute_patterns = 0; + /* Set C++ standard to C++14 if not specified on the command line. */ + if (c_dialect_cxx () && cxx_dialect == cxx_unset) + set_std_cxx14 (/*ISO*/false); + /* -Woverlength-strings is off by default, but is enabled by -Wpedantic. It is never enabled in C++, as the minimum limit is not normative in that standard. */ @@ -887,10 +891,6 @@ c_common_post_options (const char **pfilename) if (flag_abi_version == 0) flag_abi_version = 10; - /* Set C++ standard to C++14 if not specified on the command line. */ - if (c_dialect_cxx () && cxx_dialect == cxx_unset) - set_std_cxx14 (/*ISO*/false); - if (cxx_dialect >= cxx11) { /* If we're allowing C++0x constructs, don't warn about C++98 diff --git a/gcc/c-family/c.opt b/gcc/c-family/c.opt index d519d7a000b..47ba0704da8 100644 --- a/gcc/c-family/c.opt +++ b/gcc/c-family/c.opt @@ -912,6 +912,14 @@ Wunused-result C ObjC C++ ObjC++ Var(warn_unused_result) Init(1) Warning Warn if a caller of a function, marked with attribute warn_unused_result, does not use its return value +Wunused-variable +C ObjC C++ ObjC++ LangEnabledBy(C ObjC C++ ObjC++,Wunused) +; documented in common.opt + +Wunused-const-variable +C ObjC C++ ObjC++ Var(warn_unused_const_variable) Warning LangEnabledBy(C ObjC,Wunused-variable) +Warn when a const variable is unused + Wvariadic-macros C ObjC C++ ObjC++ CPP(warn_variadic_macros) CppReason(CPP_W_VARIADIC_MACROS) Var(cpp_warn_variadic_macros) Init(0) Warning LangEnabledBy(C ObjC C++ ObjC++,Wpedantic || Wtraditional) Warn about using variadic macros diff --git a/gcc/c/ChangeLog b/gcc/c/ChangeLog index 325686a4b1c..27659e231c3 100644 --- a/gcc/c/ChangeLog +++ b/gcc/c/ChangeLog @@ -1,3 +1,28 @@ +2015-09-15 Marek Polacek <polacek@redhat.com> + + PR c/67580 + * c-decl.c (tag_exists_p): New function. + * c-parser.c (c_parser_declaration_or_fndef): Give a hint when + struct/union/enum keywords are missing. + * c-tree.h (tag_exists_p): Declare. + +2015-09-15 Marek Polacek <polacek@redhat.com> + + * c-decl.c (lookup_label): Return NULL_TREE instead of 0. + (lookup_tag): Change the type of THISLEVEL_ONLY to bool. + Return NULL_TREE instead of 0. + (lookup_name): Return NULL_TREE instead of 0. + (lookup_name_in_scope): Likewise. + (shadow_tag_warned): Use true instead of 1 and NULL_TREE instead of 0. + (parser_xref_tag): Use false instead of 0. + (start_struct): Use true instead of 1. + (start_enum): Use true instead of 1 and NULL_TREE instead of 0. + +2015-09-14 Marek Polacek <polacek@redhat.com> + + * c-typeck.c (set_nonincremental_init_from_string): Use + HOST_WIDE_INT_M1U when shifting a negative value. + 2015-09-09 Mark Wielaard <mjw@redhat.com> * c-typeck.c (build_binary_op): Check and warn when nonnull arg diff --git a/gcc/c/c-decl.c b/gcc/c/c-decl.c index 5e5b6d7dfc2..a110226d01f 100644 --- a/gcc/c/c-decl.c +++ b/gcc/c/c-decl.c @@ -3474,7 +3474,7 @@ lookup_label (tree name) if (current_function_scope == 0) { error ("label %qE referenced outside of any function", name); - return 0; + return NULL_TREE; } /* Use a label already defined or ref'd with this name, but not if @@ -3811,14 +3811,14 @@ c_check_switch_jump_warnings (struct c_spot_bindings *switch_bindings, If the wrong kind of type is found, an error is reported. */ static tree -lookup_tag (enum tree_code code, tree name, int thislevel_only, +lookup_tag (enum tree_code code, tree name, bool thislevel_only, location_t *ploc) { struct c_binding *b = I_TAG_BINDING (name); - int thislevel = 0; + bool thislevel = false; if (!b || !b->decl) - return 0; + return NULL_TREE; /* We only care about whether it's in this level if thislevel_only was set or it might be a type clash. */ @@ -3830,11 +3830,11 @@ lookup_tag (enum tree_code code, tree name, int thislevel_only, file scope is created.) */ if (B_IN_CURRENT_SCOPE (b) || (current_scope == file_scope && B_IN_EXTERNAL_SCOPE (b))) - thislevel = 1; + thislevel = true; } if (thislevel_only && !thislevel) - return 0; + return NULL_TREE; if (TREE_CODE (b->decl) != code) { @@ -3856,6 +3856,18 @@ lookup_tag (enum tree_code code, tree name, int thislevel_only, return b->decl; } +/* Return true if a definition exists for NAME with code CODE. */ + +bool +tag_exists_p (enum tree_code code, tree name) +{ + struct c_binding *b = I_TAG_BINDING (name); + + if (b == NULL || b->decl == NULL_TREE) + return false; + return TREE_CODE (b->decl) == code; +} + /* Print an error message now for a recent invalid struct, union or enum cross reference. We don't print them immediately because they are not invalid @@ -3885,7 +3897,7 @@ lookup_name (tree name) maybe_record_typedef_use (b->decl); return b->decl; } - return 0; + return NULL_TREE; } /* Similar to `lookup_name' but look only at the indicated scope. */ @@ -3898,7 +3910,7 @@ lookup_name_in_scope (tree name, struct c_scope *scope) for (b = I_SYMBOL_BINDING (name); b; b = b->shadowed) if (B_IN_SCOPE (b, scope)) return b->decl; - return 0; + return NULL_TREE; } /* Create the predefined scalar types of C, @@ -4138,9 +4150,9 @@ shadow_tag_warned (const struct c_declspecs *declspecs, int warned) else { pending_invalid_xref = 0; - t = lookup_tag (code, name, 1, NULL); + t = lookup_tag (code, name, true, NULL); - if (t == 0) + if (t == NULL_TREE) { t = make_node (code); pushtag (input_location, name, t); @@ -7082,7 +7094,7 @@ parser_xref_tag (location_t loc, enum tree_code code, tree name) /* If a cross reference is requested, look up the type already defined for this tag and return it. */ - ref = lookup_tag (code, name, 0, &refloc); + ref = lookup_tag (code, name, false, &refloc); /* If this is the right type of tag, return what we found. (This reference will be shadowed by shadow_tag later if appropriate.) If this is the wrong type of tag, do not return it. If it was the @@ -7186,7 +7198,7 @@ start_struct (location_t loc, enum tree_code code, tree name, location_t refloc = UNKNOWN_LOCATION; if (name != NULL_TREE) - ref = lookup_tag (code, name, 1, &refloc); + ref = lookup_tag (code, name, true, &refloc); if (ref && TREE_CODE (ref) == code) { if (TYPE_SIZE (ref)) @@ -7905,9 +7917,9 @@ start_enum (location_t loc, struct c_enum_contents *the_enum, tree name) forward reference. */ if (name != NULL_TREE) - enumtype = lookup_tag (ENUMERAL_TYPE, name, 1, &enumloc); + enumtype = lookup_tag (ENUMERAL_TYPE, name, true, &enumloc); - if (enumtype == 0 || TREE_CODE (enumtype) != ENUMERAL_TYPE) + if (enumtype == NULL_TREE || TREE_CODE (enumtype) != ENUMERAL_TYPE) { enumtype = make_node (ENUMERAL_TYPE); pushtag (loc, name, enumtype); diff --git a/gcc/c/c-parser.c b/gcc/c/c-parser.c index 52eec97152b..691baef15d7 100644 --- a/gcc/c/c-parser.c +++ b/gcc/c/c-parser.c @@ -1539,8 +1539,16 @@ c_parser_declaration_or_fndef (c_parser *parser, bool fndef_ok, || c_parser_peek_2nd_token (parser)->type == CPP_MULT) && (!nested || !lookup_name (c_parser_peek_token (parser)->value))) { - error_at (here, "unknown type name %qE", - c_parser_peek_token (parser)->value); + tree name = c_parser_peek_token (parser)->value; + error_at (here, "unknown type name %qE", name); + /* Give a hint to the user. This is not C++ with its implicit + typedef. */ + if (tag_exists_p (RECORD_TYPE, name)) + inform (here, "use %<struct%> keyword to refer to the type"); + else if (tag_exists_p (UNION_TYPE, name)) + inform (here, "use %<union%> keyword to refer to the type"); + else if (tag_exists_p (ENUMERAL_TYPE, name)) + inform (here, "use %<enum%> keyword to refer to the type"); /* Parse declspecs normally to get a correct pointer type, but avoid a further "fails to be a type name" error. Refuse nested functions diff --git a/gcc/c/c-tree.h b/gcc/c/c-tree.h index 28b58c636e4..cf66c22d699 100644 --- a/gcc/c/c-tree.h +++ b/gcc/c/c-tree.h @@ -701,6 +701,7 @@ extern tree c_omp_reduction_lookup (tree, tree); extern tree c_check_omp_declare_reduction_r (tree *, int *, void *); extern void c_pushtag (location_t, tree, tree); extern void c_bind (location_t, tree, bool); +extern bool tag_exists_p (enum tree_code, tree); /* In c-errors.c */ extern void pedwarn_c90 (location_t, int opt, const char *, ...) diff --git a/gcc/c/c-typeck.c b/gcc/c/c-typeck.c index 4108f27ab7c..3b2623140e3 100644 --- a/gcc/c/c-typeck.c +++ b/gcc/c/c-typeck.c @@ -8276,7 +8276,7 @@ set_nonincremental_init_from_string (tree str, { if (val[0] & (((HOST_WIDE_INT) 1) << (bitpos - 1))) { - val[0] |= ((HOST_WIDE_INT) -1) << bitpos; + val[0] |= HOST_WIDE_INT_M1U << bitpos; val[1] = -1; } } @@ -8287,8 +8287,7 @@ set_nonincremental_init_from_string (tree str, } else if (val[1] & (((HOST_WIDE_INT) 1) << (bitpos - 1 - HOST_BITS_PER_WIDE_INT))) - val[1] |= ((HOST_WIDE_INT) -1) - << (bitpos - HOST_BITS_PER_WIDE_INT); + val[1] |= HOST_WIDE_INT_M1U << (bitpos - HOST_BITS_PER_WIDE_INT); } value = wide_int_to_tree (type, diff --git a/gcc/cfg.c b/gcc/cfg.c index c99849265cf..2bc785731f7 100644 --- a/gcc/cfg.c +++ b/gcc/cfg.c @@ -1052,7 +1052,7 @@ void initialize_original_copy_tables (void) { original_copy_bb_pool = new object_allocator<htab_bb_copy_original_entry> - ("original_copy", 10); + ("original_copy"); bb_original = new hash_table<bb_copy_hasher> (10); bb_copy = new hash_table<bb_copy_hasher> (10); loop_copy = new hash_table<bb_copy_hasher> (10); diff --git a/gcc/config.gcc b/gcc/config.gcc index 5712547ed10..75807f59277 100644 --- a/gcc/config.gcc +++ b/gcc/config.gcc @@ -3346,6 +3346,9 @@ if test x$with_cpu = x ; then ;; esac ;; + visium-*-*) + with_cpu=gr5 + ;; esac # Avoid overriding --with-cpu-32 and --with-cpu-64 values. @@ -4295,6 +4298,16 @@ case "${target}" in ;; esac ;; + visium-*-*) + supported_defaults="cpu" + case $with_cpu in + "" | gr5 | gr6) + ;; + *) echo "Unknown cpu used in --with-cpu=$with_cpu" 1>&2 + exit 1 + ;; + esac + ;; esac # Set some miscellaneous flags for particular targets. @@ -4449,6 +4462,9 @@ case ${target} in ;; esac ;; + visium-*-*) + target_cpu_default2="TARGET_CPU_$with_cpu" + ;; esac t= diff --git a/gcc/config/aarch64/aarch64-builtins.c b/gcc/config/aarch64/aarch64-builtins.c index 5a0426348ee..c86f47d374f 100644 --- a/gcc/config/aarch64/aarch64-builtins.c +++ b/gcc/config/aarch64/aarch64-builtins.c @@ -75,7 +75,6 @@ #define v2di_UP V2DImode #define v2df_UP V2DFmode #define ti_UP TImode -#define ei_UP EImode #define oi_UP OImode #define ci_UP CImode #define xi_UP XImode @@ -449,7 +448,6 @@ static struct aarch64_simd_type_info aarch64_simd_types [] = { static tree aarch64_fp16_type_node = NULL_TREE; static tree aarch64_simd_intOI_type_node = NULL_TREE; -static tree aarch64_simd_intEI_type_node = NULL_TREE; static tree aarch64_simd_intCI_type_node = NULL_TREE; static tree aarch64_simd_intXI_type_node = NULL_TREE; @@ -523,8 +521,6 @@ aarch64_simd_builtin_std_type (enum machine_mode mode, return QUAL_TYPE (TI); case OImode: return aarch64_simd_intOI_type_node; - case EImode: - return aarch64_simd_intEI_type_node; case CImode: return aarch64_simd_intCI_type_node; case XImode: @@ -641,15 +637,11 @@ aarch64_init_simd_builtin_types (void) #define AARCH64_BUILD_SIGNED_TYPE(mode) \ make_signed_type (GET_MODE_PRECISION (mode)); aarch64_simd_intOI_type_node = AARCH64_BUILD_SIGNED_TYPE (OImode); - aarch64_simd_intEI_type_node = AARCH64_BUILD_SIGNED_TYPE (EImode); aarch64_simd_intCI_type_node = AARCH64_BUILD_SIGNED_TYPE (CImode); aarch64_simd_intXI_type_node = AARCH64_BUILD_SIGNED_TYPE (XImode); #undef AARCH64_BUILD_SIGNED_TYPE tdecl = add_builtin_type - ("__builtin_aarch64_simd_ei" , aarch64_simd_intEI_type_node); - TYPE_NAME (aarch64_simd_intEI_type_node) = tdecl; - tdecl = add_builtin_type ("__builtin_aarch64_simd_oi" , aarch64_simd_intOI_type_node); TYPE_NAME (aarch64_simd_intOI_type_node) = tdecl; tdecl = add_builtin_type diff --git a/gcc/config/aarch64/aarch64-modes.def b/gcc/config/aarch64/aarch64-modes.def index 3160bef1105..3bf3b2dea3c 100644 --- a/gcc/config/aarch64/aarch64-modes.def +++ b/gcc/config/aarch64/aarch64-modes.def @@ -50,9 +50,8 @@ VECTOR_MODE (FLOAT, DF, 1); /* V1DF. */ /* Oct Int: 256-bit integer mode needed for 32-byte vector arguments. */ INT_MODE (OI, 32); -/* Opaque integer modes for 3, 6 or 8 Neon double registers (2 is - TImode). */ -INT_MODE (EI, 24); +/* Opaque integer modes for 3 or 4 Neon q-registers / 6 or 8 Neon d-registers + (2 d-regs = 1 q-reg = TImode). */ INT_MODE (CI, 48); INT_MODE (XI, 64); diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md index a4eaecae2a0..dbe52591b59 100644 --- a/gcc/config/aarch64/aarch64-simd.md +++ b/gcc/config/aarch64/aarch64-simd.md @@ -3928,7 +3928,7 @@ (define_insn "aarch64_simd_ld2r<mode>" [(set (match_operand:OI 0 "register_operand" "=w") - (unspec:OI [(match_operand:<V_TWO_ELEM> 1 "aarch64_simd_struct_operand" "Utv") + (unspec:OI [(match_operand:BLK 1 "aarch64_simd_struct_operand" "Utv") (unspec:VALLDIF [(const_int 0)] UNSPEC_VSTRUCTDUMMY) ] UNSPEC_LD2_DUP))] "TARGET_SIMD" @@ -3938,7 +3938,7 @@ (define_insn "aarch64_vec_load_lanesoi_lane<mode>" [(set (match_operand:OI 0 "register_operand" "=w") - (unspec:OI [(match_operand:<V_TWO_ELEM> 1 "aarch64_simd_struct_operand" "Utv") + (unspec:OI [(match_operand:BLK 1 "aarch64_simd_struct_operand" "Utv") (match_operand:OI 2 "register_operand" "0") (match_operand:SI 3 "immediate_operand" "i") (unspec:VALLDIF [(const_int 0)] UNSPEC_VSTRUCTDUMMY) ] @@ -3981,9 +3981,9 @@ ) ;; RTL uses GCC vector extension indices, so flip only for assembly. -(define_insn "vec_store_lanesoi_lane<mode>" - [(set (match_operand:<V_TWO_ELEM> 0 "aarch64_simd_struct_operand" "=Utv") - (unspec:<V_TWO_ELEM> [(match_operand:OI 1 "register_operand" "w") +(define_insn "aarch64_vec_store_lanesoi_lane<mode>" + [(set (match_operand:BLK 0 "aarch64_simd_struct_operand" "=Utv") + (unspec:BLK [(match_operand:OI 1 "register_operand" "w") (unspec:VALLDIF [(const_int 0)] UNSPEC_VSTRUCTDUMMY) (match_operand:SI 2 "immediate_operand" "i")] UNSPEC_ST2_LANE))] @@ -4026,7 +4026,7 @@ (define_insn "aarch64_simd_ld3r<mode>" [(set (match_operand:CI 0 "register_operand" "=w") - (unspec:CI [(match_operand:<V_THREE_ELEM> 1 "aarch64_simd_struct_operand" "Utv") + (unspec:CI [(match_operand:BLK 1 "aarch64_simd_struct_operand" "Utv") (unspec:VALLDIF [(const_int 0)] UNSPEC_VSTRUCTDUMMY) ] UNSPEC_LD3_DUP))] "TARGET_SIMD" @@ -4036,7 +4036,7 @@ (define_insn "aarch64_vec_load_lanesci_lane<mode>" [(set (match_operand:CI 0 "register_operand" "=w") - (unspec:CI [(match_operand:<V_THREE_ELEM> 1 "aarch64_simd_struct_operand" "Utv") + (unspec:CI [(match_operand:BLK 1 "aarch64_simd_struct_operand" "Utv") (match_operand:CI 2 "register_operand" "0") (match_operand:SI 3 "immediate_operand" "i") (unspec:VALLDIF [(const_int 0)] UNSPEC_VSTRUCTDUMMY)] @@ -4079,12 +4079,12 @@ ) ;; RTL uses GCC vector extension indices, so flip only for assembly. -(define_insn "vec_store_lanesci_lane<mode>" - [(set (match_operand:<V_THREE_ELEM> 0 "aarch64_simd_struct_operand" "=Utv") - (unspec:<V_THREE_ELEM> [(match_operand:CI 1 "register_operand" "w") - (unspec:VALLDIF [(const_int 0)] UNSPEC_VSTRUCTDUMMY) - (match_operand:SI 2 "immediate_operand" "i")] - UNSPEC_ST3_LANE))] +(define_insn "aarch64_vec_store_lanesci_lane<mode>" + [(set (match_operand:BLK 0 "aarch64_simd_struct_operand" "=Utv") + (unspec:BLK [(match_operand:CI 1 "register_operand" "w") + (unspec:VALLDIF [(const_int 0)] UNSPEC_VSTRUCTDUMMY) + (match_operand:SI 2 "immediate_operand" "i")] + UNSPEC_ST3_LANE))] "TARGET_SIMD" { operands[2] = GEN_INT (ENDIAN_LANE_N (<MODE>mode, INTVAL (operands[2]))); @@ -4124,7 +4124,7 @@ (define_insn "aarch64_simd_ld4r<mode>" [(set (match_operand:XI 0 "register_operand" "=w") - (unspec:XI [(match_operand:<V_FOUR_ELEM> 1 "aarch64_simd_struct_operand" "Utv") + (unspec:XI [(match_operand:BLK 1 "aarch64_simd_struct_operand" "Utv") (unspec:VALLDIF [(const_int 0)] UNSPEC_VSTRUCTDUMMY) ] UNSPEC_LD4_DUP))] "TARGET_SIMD" @@ -4134,7 +4134,7 @@ (define_insn "aarch64_vec_load_lanesxi_lane<mode>" [(set (match_operand:XI 0 "register_operand" "=w") - (unspec:XI [(match_operand:<V_FOUR_ELEM> 1 "aarch64_simd_struct_operand" "Utv") + (unspec:XI [(match_operand:BLK 1 "aarch64_simd_struct_operand" "Utv") (match_operand:XI 2 "register_operand" "0") (match_operand:SI 3 "immediate_operand" "i") (unspec:VALLDIF [(const_int 0)] UNSPEC_VSTRUCTDUMMY)] @@ -4177,12 +4177,12 @@ ) ;; RTL uses GCC vector extension indices, so flip only for assembly. -(define_insn "vec_store_lanesxi_lane<mode>" - [(set (match_operand:<V_FOUR_ELEM> 0 "aarch64_simd_struct_operand" "=Utv") - (unspec:<V_FOUR_ELEM> [(match_operand:XI 1 "register_operand" "w") - (unspec:VALLDIF [(const_int 0)] UNSPEC_VSTRUCTDUMMY) - (match_operand:SI 2 "immediate_operand" "i")] - UNSPEC_ST4_LANE))] +(define_insn "aarch64_vec_store_lanesxi_lane<mode>" + [(set (match_operand:BLK 0 "aarch64_simd_struct_operand" "=Utv") + (unspec:BLK [(match_operand:XI 1 "register_operand" "w") + (unspec:VALLDIF [(const_int 0)] UNSPEC_VSTRUCTDUMMY) + (match_operand:SI 2 "immediate_operand" "i")] + UNSPEC_ST4_LANE))] "TARGET_SIMD" { operands[2] = GEN_INT (ENDIAN_LANE_N (<MODE>mode, INTVAL (operands[2]))); @@ -4381,42 +4381,18 @@ FAIL; }) -(define_expand "aarch64_ld2r<mode>" - [(match_operand:OI 0 "register_operand" "=w") - (match_operand:DI 1 "register_operand" "w") - (unspec:VALLDIF [(const_int 0)] UNSPEC_VSTRUCTDUMMY)] - "TARGET_SIMD" -{ - machine_mode mode = <V_TWO_ELEM>mode; - rtx mem = gen_rtx_MEM (mode, operands[1]); - - emit_insn (gen_aarch64_simd_ld2r<mode> (operands[0], mem)); - DONE; -}) - -(define_expand "aarch64_ld3r<mode>" - [(match_operand:CI 0 "register_operand" "=w") +(define_expand "aarch64_ld<VSTRUCT:nregs>r<VALLDIF:mode>" + [(match_operand:VSTRUCT 0 "register_operand" "=w") (match_operand:DI 1 "register_operand" "w") (unspec:VALLDIF [(const_int 0)] UNSPEC_VSTRUCTDUMMY)] "TARGET_SIMD" { - machine_mode mode = <V_THREE_ELEM>mode; - rtx mem = gen_rtx_MEM (mode, operands[1]); + rtx mem = gen_rtx_MEM (BLKmode, operands[1]); + set_mem_size (mem, GET_MODE_SIZE (GET_MODE_INNER (<VALLDIF:MODE>mode)) + * <VSTRUCT:nregs>); - emit_insn (gen_aarch64_simd_ld3r<mode> (operands[0], mem)); - DONE; -}) - -(define_expand "aarch64_ld4r<mode>" - [(match_operand:XI 0 "register_operand" "=w") - (match_operand:DI 1 "register_operand" "w") - (unspec:VALLDIF [(const_int 0)] UNSPEC_VSTRUCTDUMMY)] - "TARGET_SIMD" -{ - machine_mode mode = <V_FOUR_ELEM>mode; - rtx mem = gen_rtx_MEM (mode, operands[1]); - - emit_insn (gen_aarch64_simd_ld4r<mode> (operands[0],mem)); + emit_insn (gen_aarch64_simd_ld<VSTRUCT:nregs>r<VALLDIF:mode> (operands[0], + mem)); DONE; }) @@ -4425,8 +4401,9 @@ (subreg:OI (vec_concat:<VRL2> (vec_concat:<VDBL> - (unspec:VD [(match_operand:TI 1 "aarch64_simd_struct_operand" "Utv")] - UNSPEC_LD2) + (unspec:VD + [(match_operand:BLK 1 "aarch64_simd_struct_operand" "Utv")] + UNSPEC_LD2) (vec_duplicate:VD (const_int 0))) (vec_concat:<VDBL> (unspec:VD [(match_dup 1)] @@ -4442,8 +4419,9 @@ (subreg:OI (vec_concat:<VRL2> (vec_concat:<VDBL> - (unspec:DX [(match_operand:TI 1 "aarch64_simd_struct_operand" "Utv")] - UNSPEC_LD2) + (unspec:DX + [(match_operand:BLK 1 "aarch64_simd_struct_operand" "Utv")] + UNSPEC_LD2) (const_int 0)) (vec_concat:<VDBL> (unspec:DX [(match_dup 1)] @@ -4460,8 +4438,9 @@ (vec_concat:<VRL3> (vec_concat:<VRL2> (vec_concat:<VDBL> - (unspec:VD [(match_operand:EI 1 "aarch64_simd_struct_operand" "Utv")] - UNSPEC_LD3) + (unspec:VD + [(match_operand:BLK 1 "aarch64_simd_struct_operand" "Utv")] + UNSPEC_LD3) (vec_duplicate:VD (const_int 0))) (vec_concat:<VDBL> (unspec:VD [(match_dup 1)] @@ -4482,8 +4461,9 @@ (vec_concat:<VRL3> (vec_concat:<VRL2> (vec_concat:<VDBL> - (unspec:DX [(match_operand:EI 1 "aarch64_simd_struct_operand" "Utv")] - UNSPEC_LD3) + (unspec:DX + [(match_operand:BLK 1 "aarch64_simd_struct_operand" "Utv")] + UNSPEC_LD3) (const_int 0)) (vec_concat:<VDBL> (unspec:DX [(match_dup 1)] @@ -4504,8 +4484,9 @@ (vec_concat:<VRL4> (vec_concat:<VRL2> (vec_concat:<VDBL> - (unspec:VD [(match_operand:OI 1 "aarch64_simd_struct_operand" "Utv")] - UNSPEC_LD4) + (unspec:VD + [(match_operand:BLK 1 "aarch64_simd_struct_operand" "Utv")] + UNSPEC_LD4) (vec_duplicate:VD (const_int 0))) (vec_concat:<VDBL> (unspec:VD [(match_dup 1)] @@ -4531,8 +4512,9 @@ (vec_concat:<VRL4> (vec_concat:<VRL2> (vec_concat:<VDBL> - (unspec:DX [(match_operand:OI 1 "aarch64_simd_struct_operand" "Utv")] - UNSPEC_LD4) + (unspec:DX + [(match_operand:BLK 1 "aarch64_simd_struct_operand" "Utv")] + UNSPEC_LD4) (const_int 0)) (vec_concat:<VDBL> (unspec:DX [(match_dup 1)] @@ -4558,8 +4540,8 @@ (unspec:VDC [(const_int 0)] UNSPEC_VSTRUCTDUMMY)] "TARGET_SIMD" { - machine_mode mode = <VSTRUCT:VSTRUCT_DREG>mode; - rtx mem = gen_rtx_MEM (mode, operands[1]); + rtx mem = gen_rtx_MEM (BLKmode, operands[1]); + set_mem_size (mem, <VSTRUCT:nregs> * 8); emit_insn (gen_aarch64_ld<VSTRUCT:nregs><VDC:mode>_dreg (operands[0], mem)); DONE; @@ -4593,62 +4575,26 @@ DONE; }) -(define_expand "aarch64_ld2_lane<mode>" - [(match_operand:OI 0 "register_operand" "=w") +(define_expand "aarch64_ld<VSTRUCT:nregs>_lane<VALLDIF:mode>" + [(match_operand:VSTRUCT 0 "register_operand" "=w") (match_operand:DI 1 "register_operand" "w") - (match_operand:OI 2 "register_operand" "0") + (match_operand:VSTRUCT 2 "register_operand" "0") (match_operand:SI 3 "immediate_operand" "i") (unspec:VALLDIF [(const_int 0)] UNSPEC_VSTRUCTDUMMY)] "TARGET_SIMD" { - machine_mode mode = <V_TWO_ELEM>mode; - rtx mem = gen_rtx_MEM (mode, operands[1]); - - emit_insn (gen_aarch64_vec_load_lanesoi_lane<mode> (operands[0], - mem, - operands[2], - operands[3])); + rtx mem = gen_rtx_MEM (BLKmode, operands[1]); + set_mem_size (mem, GET_MODE_SIZE (GET_MODE_INNER (<VALLDIF:MODE>mode)) + * <VSTRUCT:nregs>); + + aarch64_simd_lane_bounds (operands[3], 0, + GET_MODE_NUNITS (<VALLDIF:MODE>mode), + NULL); + emit_insn (gen_aarch64_vec_load_lanes<VSTRUCT:mode>_lane<VALLDIF:mode> ( + operands[0], mem, operands[2], operands[3])); DONE; }) -(define_expand "aarch64_ld3_lane<mode>" - [(match_operand:CI 0 "register_operand" "=w") - (match_operand:DI 1 "register_operand" "w") - (match_operand:CI 2 "register_operand" "0") - (match_operand:SI 3 "immediate_operand" "i") - (unspec:VALLDIF [(const_int 0)] UNSPEC_VSTRUCTDUMMY)] - "TARGET_SIMD" -{ - machine_mode mode = <V_THREE_ELEM>mode; - rtx mem = gen_rtx_MEM (mode, operands[1]); - - emit_insn (gen_aarch64_vec_load_lanesci_lane<mode> (operands[0], - mem, - operands[2], - operands[3])); - DONE; -}) - -(define_expand "aarch64_ld4_lane<mode>" - [(match_operand:XI 0 "register_operand" "=w") - (match_operand:DI 1 "register_operand" "w") - (match_operand:XI 2 "register_operand" "0") - (match_operand:SI 3 "immediate_operand" "i") - (unspec:VALLDIF [(const_int 0)] UNSPEC_VSTRUCTDUMMY)] - "TARGET_SIMD" -{ - machine_mode mode = <V_FOUR_ELEM>mode; - rtx mem = gen_rtx_MEM (mode, operands[1]); - - emit_insn (gen_aarch64_vec_load_lanesxi_lane<mode> (operands[0], - mem, - operands[2], - operands[3])); - DONE; -}) - - - ;; Expanders for builtins to extract vector registers from large ;; opaque integer modes. @@ -4791,8 +4737,8 @@ ) (define_insn "aarch64_st2<mode>_dreg" - [(set (match_operand:TI 0 "aarch64_simd_struct_operand" "=Utv") - (unspec:TI [(match_operand:OI 1 "register_operand" "w") + [(set (match_operand:BLK 0 "aarch64_simd_struct_operand" "=Utv") + (unspec:BLK [(match_operand:OI 1 "register_operand" "w") (unspec:VD [(const_int 0)] UNSPEC_VSTRUCTDUMMY)] UNSPEC_ST2))] "TARGET_SIMD" @@ -4801,8 +4747,8 @@ ) (define_insn "aarch64_st2<mode>_dreg" - [(set (match_operand:TI 0 "aarch64_simd_struct_operand" "=Utv") - (unspec:TI [(match_operand:OI 1 "register_operand" "w") + [(set (match_operand:BLK 0 "aarch64_simd_struct_operand" "=Utv") + (unspec:BLK [(match_operand:OI 1 "register_operand" "w") (unspec:DX [(const_int 0)] UNSPEC_VSTRUCTDUMMY)] UNSPEC_ST2))] "TARGET_SIMD" @@ -4811,8 +4757,8 @@ ) (define_insn "aarch64_st3<mode>_dreg" - [(set (match_operand:EI 0 "aarch64_simd_struct_operand" "=Utv") - (unspec:EI [(match_operand:CI 1 "register_operand" "w") + [(set (match_operand:BLK 0 "aarch64_simd_struct_operand" "=Utv") + (unspec:BLK [(match_operand:CI 1 "register_operand" "w") (unspec:VD [(const_int 0)] UNSPEC_VSTRUCTDUMMY)] UNSPEC_ST3))] "TARGET_SIMD" @@ -4821,8 +4767,8 @@ ) (define_insn "aarch64_st3<mode>_dreg" - [(set (match_operand:EI 0 "aarch64_simd_struct_operand" "=Utv") - (unspec:EI [(match_operand:CI 1 "register_operand" "w") + [(set (match_operand:BLK 0 "aarch64_simd_struct_operand" "=Utv") + (unspec:BLK [(match_operand:CI 1 "register_operand" "w") (unspec:DX [(const_int 0)] UNSPEC_VSTRUCTDUMMY)] UNSPEC_ST3))] "TARGET_SIMD" @@ -4831,8 +4777,8 @@ ) (define_insn "aarch64_st4<mode>_dreg" - [(set (match_operand:OI 0 "aarch64_simd_struct_operand" "=Utv") - (unspec:OI [(match_operand:XI 1 "register_operand" "w") + [(set (match_operand:BLK 0 "aarch64_simd_struct_operand" "=Utv") + (unspec:BLK [(match_operand:XI 1 "register_operand" "w") (unspec:VD [(const_int 0)] UNSPEC_VSTRUCTDUMMY)] UNSPEC_ST4))] "TARGET_SIMD" @@ -4841,8 +4787,8 @@ ) (define_insn "aarch64_st4<mode>_dreg" - [(set (match_operand:OI 0 "aarch64_simd_struct_operand" "=Utv") - (unspec:OI [(match_operand:XI 1 "register_operand" "w") + [(set (match_operand:BLK 0 "aarch64_simd_struct_operand" "=Utv") + (unspec:BLK [(match_operand:XI 1 "register_operand" "w") (unspec:DX [(const_int 0)] UNSPEC_VSTRUCTDUMMY)] UNSPEC_ST4))] "TARGET_SIMD" @@ -4856,8 +4802,8 @@ (unspec:VDC [(const_int 0)] UNSPEC_VSTRUCTDUMMY)] "TARGET_SIMD" { - machine_mode mode = <VSTRUCT:VSTRUCT_DREG>mode; - rtx mem = gen_rtx_MEM (mode, operands[0]); + rtx mem = gen_rtx_MEM (BLKmode, operands[0]); + set_mem_size (mem, <VSTRUCT:nregs> * 8); emit_insn (gen_aarch64_st<VSTRUCT:nregs><VDC:mode>_dreg (mem, operands[1])); DONE; @@ -4876,45 +4822,19 @@ DONE; }) -(define_expand "aarch64_st2_lane<mode>" - [(match_operand:DI 0 "register_operand" "r") - (match_operand:OI 1 "register_operand" "w") - (unspec:VALLDIF [(const_int 0)] UNSPEC_VSTRUCTDUMMY) - (match_operand:SI 2 "immediate_operand")] - "TARGET_SIMD" -{ - machine_mode mode = <V_TWO_ELEM>mode; - rtx mem = gen_rtx_MEM (mode, operands[0]); - - emit_insn (gen_vec_store_lanesoi_lane<mode> (mem, operands[1], operands[2])); - DONE; -}) - -(define_expand "aarch64_st3_lane<mode>" +(define_expand "aarch64_st<VSTRUCT:nregs>_lane<VALLDIF:mode>" [(match_operand:DI 0 "register_operand" "r") - (match_operand:CI 1 "register_operand" "w") - (unspec:VALLDIF [(const_int 0)] UNSPEC_VSTRUCTDUMMY) - (match_operand:SI 2 "immediate_operand")] - "TARGET_SIMD" -{ - machine_mode mode = <V_THREE_ELEM>mode; - rtx mem = gen_rtx_MEM (mode, operands[0]); - - emit_insn (gen_vec_store_lanesci_lane<mode> (mem, operands[1], operands[2])); - DONE; -}) - -(define_expand "aarch64_st4_lane<mode>" - [(match_operand:DI 0 "register_operand" "r") - (match_operand:XI 1 "register_operand" "w") + (match_operand:VSTRUCT 1 "register_operand" "w") (unspec:VALLDIF [(const_int 0)] UNSPEC_VSTRUCTDUMMY) (match_operand:SI 2 "immediate_operand")] "TARGET_SIMD" { - machine_mode mode = <V_FOUR_ELEM>mode; - rtx mem = gen_rtx_MEM (mode, operands[0]); + rtx mem = gen_rtx_MEM (BLKmode, operands[0]); + set_mem_size (mem, GET_MODE_SIZE (GET_MODE_INNER (<VALLDIF:MODE>mode)) + * <VSTRUCT:nregs>); - emit_insn (gen_vec_store_lanesxi_lane<mode> (mem, operands[1], operands[2])); + emit_insn (gen_aarch64_vec_store_lanes<VSTRUCT:mode>_lane<VALLDIF:mode> ( + mem, operands[1], operands[2])); DONE; }) diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c index b2a481b4c29..bbac271488f 100644 --- a/gcc/config/aarch64/aarch64.c +++ b/gcc/config/aarch64/aarch64.c @@ -677,7 +677,8 @@ aarch64_array_mode_supported_p (machine_mode mode, unsigned HOST_WIDE_INT nelems) { if (TARGET_SIMD - && AARCH64_VALID_SIMD_QREG_MODE (mode) + && (AARCH64_VALID_SIMD_QREG_MODE (mode) + || AARCH64_VALID_SIMD_DREG_MODE (mode)) && (nelems >= 2 && nelems <= 4)) return true; @@ -1734,11 +1735,27 @@ aarch64_expand_mov_immediate (rtx dest, rtx imm) aarch64_emit_move (dest, base); return; } + mem = force_const_mem (ptr_mode, imm); gcc_assert (mem); + + /* If we aren't generating PC relative literals, then + we need to expand the literal pool access carefully. + This is something that needs to be done in a number + of places, so could well live as a separate function. */ + if (nopcrelative_literal_loads) + { + gcc_assert (can_create_pseudo_p ()); + base = gen_reg_rtx (ptr_mode); + aarch64_expand_mov_immediate (base, XEXP (mem, 0)); + mem = gen_rtx_MEM (ptr_mode, base); + } + if (mode != ptr_mode) mem = gen_rtx_ZERO_EXTEND (mode, mem); + emit_insn (gen_rtx_SET (dest, mem)); + return; case SYMBOL_SMALL_TLSGD: @@ -3854,9 +3871,10 @@ aarch64_classify_address (struct aarch64_address_info *info, rtx sym, addend; split_const (x, &sym, &addend); - return (GET_CODE (sym) == LABEL_REF - || (GET_CODE (sym) == SYMBOL_REF - && CONSTANT_POOL_ADDRESS_P (sym))); + return ((GET_CODE (sym) == LABEL_REF + || (GET_CODE (sym) == SYMBOL_REF + && CONSTANT_POOL_ADDRESS_P (sym) + && !nopcrelative_literal_loads))); } return false; @@ -5039,12 +5057,69 @@ aarch64_legitimize_reload_address (rtx *x_p, } +/* Return the reload icode required for a constant pool in mode. */ +static enum insn_code +aarch64_constant_pool_reload_icode (machine_mode mode) +{ + switch (mode) + { + case SFmode: + return CODE_FOR_aarch64_reload_movcpsfdi; + + case DFmode: + return CODE_FOR_aarch64_reload_movcpdfdi; + + case TFmode: + return CODE_FOR_aarch64_reload_movcptfdi; + + case V8QImode: + return CODE_FOR_aarch64_reload_movcpv8qidi; + + case V16QImode: + return CODE_FOR_aarch64_reload_movcpv16qidi; + + case V4HImode: + return CODE_FOR_aarch64_reload_movcpv4hidi; + + case V8HImode: + return CODE_FOR_aarch64_reload_movcpv8hidi; + + case V2SImode: + return CODE_FOR_aarch64_reload_movcpv2sidi; + + case V4SImode: + return CODE_FOR_aarch64_reload_movcpv4sidi; + + case V2DImode: + return CODE_FOR_aarch64_reload_movcpv2didi; + + case V2DFmode: + return CODE_FOR_aarch64_reload_movcpv2dfdi; + + default: + gcc_unreachable (); + } + + gcc_unreachable (); +} static reg_class_t aarch64_secondary_reload (bool in_p ATTRIBUTE_UNUSED, rtx x, reg_class_t rclass, machine_mode mode, secondary_reload_info *sri) { + + /* If we have to disable direct literal pool loads and stores because the + function is too big, then we need a scratch register. */ + if (MEM_P (x) && GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x) + && (SCALAR_FLOAT_MODE_P (GET_MODE (x)) + || targetm.vector_mode_supported_p (GET_MODE (x))) + && nopcrelative_literal_loads) + { + sri->icode = aarch64_constant_pool_reload_icode (mode); + return NO_REGS; + } + /* Without the TARGET_SIMD instructions we cannot move a Q register to a Q register directly. We need a scratch. */ if (REG_P (x) && (mode == TFmode || mode == TImode) && mode == GET_MODE (x) @@ -7693,6 +7768,24 @@ aarch64_override_options_after_change_1 (struct gcc_options *opts) if (opts->x_align_functions <= 0) opts->x_align_functions = aarch64_tune_params.function_align; } + + /* If nopcrelative_literal_loads is set on the command line, this + implies that the user asked for PC relative literal loads. */ + if (nopcrelative_literal_loads == 1) + nopcrelative_literal_loads = 0; + + /* If it is not set on the command line, we default to no + pc relative literal loads. */ + if (nopcrelative_literal_loads == 2) + nopcrelative_literal_loads = 1; + + /* In the tiny memory model it makes no sense + to disallow non PC relative literal pool loads + as many other things will break anyway. */ + if (nopcrelative_literal_loads + && (aarch64_cmodel == AARCH64_CMODEL_TINY + || aarch64_cmodel == AARCH64_CMODEL_TINY_PIC)) + nopcrelative_literal_loads = 0; } /* 'Unpack' up the internal tuning structs and update the options @@ -8884,7 +8977,16 @@ aarch64_classify_symbol (rtx x, rtx offset, if (GET_CODE (x) == SYMBOL_REF) { if (aarch64_cmodel == AARCH64_CMODEL_LARGE) - return SYMBOL_FORCE_TO_MEM; + { + /* This is alright even in PIC code as the constant + pool reference is always PC relative and within + the same translation unit. */ + if (nopcrelative_literal_loads + && CONSTANT_POOL_ADDRESS_P (x)) + return SYMBOL_SMALL_ABSOLUTE; + else + return SYMBOL_FORCE_TO_MEM; + } if (aarch64_tls_symbol_p (x)) return aarch64_classify_tls_symbol (x); @@ -10476,7 +10578,7 @@ aarch64_simd_attr_length_move (rtx_insn *insn) } /* Compute and return the length of aarch64_simd_reglist<mode>, where <mode> is - one of VSTRUCT modes: OI, CI, EI, or XI. */ + one of VSTRUCT modes: OI, CI, or XI. */ int aarch64_simd_attr_length_rglist (enum machine_mode mode) { diff --git a/gcc/config/aarch64/aarch64.h b/gcc/config/aarch64/aarch64.h index 9669e014882..5a8db763222 100644 --- a/gcc/config/aarch64/aarch64.h +++ b/gcc/config/aarch64/aarch64.h @@ -872,6 +872,12 @@ extern enum aarch64_code_model aarch64_cmodel; (aarch64_cmodel == AARCH64_CMODEL_TINY \ || aarch64_cmodel == AARCH64_CMODEL_TINY_PIC) +/* Modes valid for AdvSIMD D registers, i.e. that fit in half a Q register. */ +#define AARCH64_VALID_SIMD_DREG_MODE(MODE) \ + ((MODE) == V2SImode || (MODE) == V4HImode || (MODE) == V8QImode \ + || (MODE) == V2SFmode || (MODE) == V4HFmode || (MODE) == DImode \ + || (MODE) == DFmode) + /* Modes valid for AdvSIMD Q registers. */ #define AARCH64_VALID_SIMD_QREG_MODE(MODE) \ ((MODE) == V4SImode || (MODE) == V8HImode || (MODE) == V16QImode \ diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md index 5a005b572c8..88ba72e3ac7 100644 --- a/gcc/config/aarch64/aarch64.md +++ b/gcc/config/aarch64/aarch64.md @@ -4415,6 +4415,32 @@ ;; ------------------------------------------------------------------- ;; Reload support ;; ------------------------------------------------------------------- +;; Reload Scalar Floating point modes from constant pool. +;; The AArch64 port doesn't have __int128 constant move support. +(define_expand "aarch64_reload_movcp<GPF_TF:mode><P:mode>" + [(set (match_operand:GPF_TF 0 "register_operand" "=w") + (mem:GPF_TF (match_operand 1 "aarch64_constant_pool_symref" "S"))) + (clobber (match_operand:P 2 "register_operand" "=&r"))] + "TARGET_FLOAT && nopcrelative_literal_loads" + { + aarch64_expand_mov_immediate (operands[2], XEXP (operands[1], 0)); + emit_move_insn (operands[0], gen_rtx_MEM (<GPF_TF:MODE>mode, operands[2])); + DONE; + } +) + +;; Reload Vector modes from constant pool. +(define_expand "aarch64_reload_movcp<VALL:mode><P:mode>" + [(set (match_operand:VALL 0 "register_operand" "=w") + (mem:VALL (match_operand 1 "aarch64_constant_pool_symref" "S"))) + (clobber (match_operand:P 2 "register_operand" "=&r"))] + "TARGET_FLOAT && nopcrelative_literal_loads" + { + aarch64_expand_mov_immediate (operands[2], XEXP (operands[1], 0)); + emit_move_insn (operands[0], gen_rtx_MEM (<VALL:MODE>mode, operands[2])); + DONE; + } +) (define_expand "aarch64_reload_mov<mode>" [(set (match_operand:TX 0 "register_operand" "=w") diff --git a/gcc/config/aarch64/aarch64.opt b/gcc/config/aarch64/aarch64.opt index 8642bdb74f3..a1ce58d4ea8 100644 --- a/gcc/config/aarch64/aarch64.opt +++ b/gcc/config/aarch64/aarch64.opt @@ -144,3 +144,7 @@ Enum(aarch64_abi) String(ilp32) Value(AARCH64_ABI_ILP32) EnumValue Enum(aarch64_abi) String(lp64) Value(AARCH64_ABI_LP64) + +mpc-relative-literal-loads +Target Report Save Var(nopcrelative_literal_loads) Init(2) Save +PC relative literal loads. diff --git a/gcc/config/aarch64/iterators.md b/gcc/config/aarch64/iterators.md index ff698001d68..38c5a2424e4 100644 --- a/gcc/config/aarch64/iterators.md +++ b/gcc/config/aarch64/iterators.md @@ -44,6 +44,9 @@ ;; Double vector modes. (define_mode_iterator VDF [V2SF V4HF]) +;; Iterator for all scalar floating point modes (SF, DF and TF) +(define_mode_iterator GPF_TF [SF DF TF]) + ;; Integer vector modes. (define_mode_iterator VDQ_I [V8QI V16QI V4HI V8HI V2SI V4SI V2DI]) @@ -574,6 +577,9 @@ (define_mode_attr Vendreg [(OI "T") (CI "U") (XI "V")]) +;; This is both the number of Q-Registers needed to hold the corresponding +;; opaque large integer mode, and the number of elements touched by the +;; ld..._lane and st..._lane operations. (define_mode_attr nregs [(OI "2") (CI "3") (XI "4")]) (define_mode_attr VRL2 [(V8QI "V32QI") (V4HI "V16HI") @@ -591,37 +597,6 @@ (V2SI "V16SI") (V2SF "V16SF") (DI "V8DI") (DF "V8DF")]) -(define_mode_attr VSTRUCT_DREG [(OI "TI") (CI "EI") (XI "OI")]) - -;; Mode of pair of elements for each vector mode, to define transfer -;; size for structure lane/dup loads and stores. -(define_mode_attr V_TWO_ELEM [(V8QI "HI") (V16QI "HI") - (V4HI "SI") (V8HI "SI") - (V2SI "V2SI") (V4SI "V2SI") - (DI "V2DI") (V2DI "V2DI") - (V2SF "V2SF") (V4SF "V2SF") - (V4HF "SF") (V8HF "SF") - (DF "V2DI") (V2DF "V2DI")]) - -;; Similar, for three elements. -(define_mode_attr V_THREE_ELEM [(V8QI "BLK") (V16QI "BLK") - (V4HI "BLK") (V8HI "BLK") - (V2SI "BLK") (V4SI "BLK") - (DI "EI") (V2DI "EI") - (V2SF "BLK") (V4SF "BLK") - (V4HF "BLK") (V8HF "BLK") - (DF "EI") (V2DF "EI")]) - -;; Similar, for four elements. -(define_mode_attr V_FOUR_ELEM [(V8QI "SI") (V16QI "SI") - (V4HI "V4HI") (V8HI "V4HI") - (V2SI "V4SI") (V4SI "V4SI") - (DI "OI") (V2DI "OI") - (V2SF "V4SF") (V4SF "V4SF") - (V4HF "V4HF") (V8HF "V4HF") - (DF "OI") (V2DF "OI")]) - - ;; Mode for atomic operation suffixes (define_mode_attr atomic_sfx [(QI "b") (HI "h") (SI "") (DI "")]) diff --git a/gcc/config/aarch64/predicates.md b/gcc/config/aarch64/predicates.md index 39792097220..7b852a43ff5 100644 --- a/gcc/config/aarch64/predicates.md +++ b/gcc/config/aarch64/predicates.md @@ -362,3 +362,7 @@ (define_predicate "aarch64_simd_shift_imm_bitsize_di" (and (match_code "const_int") (match_test "IN_RANGE (INTVAL (op), 0, 64)"))) + +(define_predicate "aarch64_constant_pool_symref" + (and (match_code "symbol_ref") + (match_test "CONSTANT_POOL_ADDRESS_P (op)"))) diff --git a/gcc/config/alpha/alpha.c b/gcc/config/alpha/alpha.c index 32bb36eec33..cae819fa782 100644 --- a/gcc/config/alpha/alpha.c +++ b/gcc/config/alpha/alpha.c @@ -4057,10 +4057,10 @@ alpha_expand_block_clear (rtx operands[]) mem = adjust_address (orig_dst, mode, ofs - inv_alignofs); set_mem_alias_set (mem, 0); - mask = ~(~(HOST_WIDE_INT)0 << (inv_alignofs * 8)); + mask = ~(HOST_WIDE_INT_M1U << (inv_alignofs * 8)); if (bytes < alignofs) { - mask |= ~(HOST_WIDE_INT)0 << ((inv_alignofs + bytes) * 8); + mask |= HOST_WIDE_INT_M1U << ((inv_alignofs + bytes) * 8); ofs += bytes; bytes = 0; } @@ -4206,7 +4206,7 @@ alpha_expand_block_clear (rtx operands[]) mem = adjust_address (orig_dst, DImode, ofs); set_mem_alias_set (mem, 0); - mask = ~(HOST_WIDE_INT)0 << (bytes * 8); + mask = HOST_WIDE_INT_M1U << (bytes * 8); tmp = expand_binop (DImode, and_optab, mem, GEN_INT (mask), NULL_RTX, 1, OPTAB_WIDEN); @@ -4222,7 +4222,7 @@ alpha_expand_block_clear (rtx operands[]) mem = adjust_address (orig_dst, SImode, ofs); set_mem_alias_set (mem, 0); - mask = ~(HOST_WIDE_INT)0 << (bytes * 8); + mask = HOST_WIDE_INT_M1U << (bytes * 8); tmp = expand_binop (SImode, and_optab, mem, GEN_INT (mask), NULL_RTX, 1, OPTAB_WIDEN); diff --git a/gcc/config/arc/arc.h b/gcc/config/arc/arc.h index 874b118421d..e8baf5b8d79 100644 --- a/gcc/config/arc/arc.h +++ b/gcc/config/arc/arc.h @@ -896,10 +896,6 @@ arc_return_addr_rtx(COUNT,FRAME) /* Tell GCC to use RETURN_IN_MEMORY. */ #define DEFAULT_PCC_STRUCT_RETURN 0 -/* Register in which address to store a structure value - is passed to a function, or 0 to use `invisible' first argument. */ -#define STRUCT_VALUE 0 - /* EXIT_IGNORE_STACK should be nonzero if, when returning from a function, the stack pointer does not matter. The value is tested only in functions that have frame pointers. diff --git a/gcc/config/arm/arm-c.c b/gcc/config/arm/arm-c.c index 9bf3973f290..4754a15f52c 100644 --- a/gcc/config/arm/arm-c.c +++ b/gcc/config/arm/arm-c.c @@ -54,23 +54,20 @@ arm_lang_object_attributes_init (void) #pragma GCC target, we need to adjust the macros dynamically. */ static void -def_or_undef_macro(struct cpp_reader* pfile, const char *name, bool def_p) +def_or_undef_macro(struct cpp_reader* pfile, const char *name, bool def_p) { if (def_p) - cpp_define (pfile, name); - else - cpp_undef (pfile, name); -} + cpp_define (pfile, name); + else + cpp_undef (pfile, name); +} -void -arm_cpu_builtins (struct cpp_reader* pfile, int flags) +static void +arm_cpu_builtins (struct cpp_reader* pfile) { - def_or_undef_macro (pfile, "__ARM_FEATURE_DSP", - TARGET_DSP_MULTIPLY_P (flags)); - def_or_undef_macro (pfile, "__ARM_FEATURE_QBIT", - TARGET_ARM_QBIT_P (flags)); - def_or_undef_macro (pfile, "__ARM_FEATURE_SAT", - TARGET_ARM_SAT_P (flags)); + def_or_undef_macro (pfile, "__ARM_FEATURE_DSP", TARGET_DSP_MULTIPLY); + def_or_undef_macro (pfile, "__ARM_FEATURE_QBIT", TARGET_ARM_QBIT); + def_or_undef_macro (pfile, "__ARM_FEATURE_SAT", TARGET_ARM_SAT); if (TARGET_CRYPTO) builtin_define ("__ARM_FEATURE_CRYPTO"); if (unaligned_access) @@ -78,19 +75,19 @@ arm_cpu_builtins (struct cpp_reader* pfile, int flags) if (TARGET_CRC32) builtin_define ("__ARM_FEATURE_CRC32"); - def_or_undef_macro (pfile, "__ARM_32BIT_STATE", TARGET_32BIT_P (flags)); + def_or_undef_macro (pfile, "__ARM_32BIT_STATE", TARGET_32BIT); - if (TARGET_ARM_FEATURE_LDREX_P (flags)) + if (TARGET_ARM_FEATURE_LDREX) builtin_define_with_int_value ("__ARM_FEATURE_LDREX", - TARGET_ARM_FEATURE_LDREX_P (flags)); + TARGET_ARM_FEATURE_LDREX); else cpp_undef (pfile, "__ARM_FEATURE_LDREX"); def_or_undef_macro (pfile, "__ARM_FEATURE_CLZ", - ((TARGET_ARM_ARCH >= 5 && !TARGET_THUMB_P (flags)) + ((TARGET_ARM_ARCH >= 5 && !TARGET_THUMB) || TARGET_ARM_ARCH_ISA_THUMB >=2)); - def_or_undef_macro (pfile, "__ARM_FEATURE_SIMD32", TARGET_INT_SIMD_P (flags)); + def_or_undef_macro (pfile, "__ARM_FEATURE_SIMD32", TARGET_INT_SIMD); builtin_define_with_int_value ("__ARM_SIZEOF_MINIMAL_ENUM", flag_short_enums ? 1 : 4); @@ -108,12 +105,12 @@ arm_cpu_builtins (struct cpp_reader* pfile, int flags) builtin_define ("__ARM_ARCH_ISA_ARM"); builtin_define ("__APCS_32__"); - def_or_undef_macro (pfile, "__thumb__", TARGET_THUMB_P (flags)); - def_or_undef_macro (pfile, "__thumb2__", TARGET_THUMB2_P (flags)); + def_or_undef_macro (pfile, "__thumb__", TARGET_THUMB); + def_or_undef_macro (pfile, "__thumb2__", TARGET_THUMB2); if (TARGET_BIG_END) - def_or_undef_macro (pfile, "__THUMBEB__", TARGET_THUMB_P (flags)); + def_or_undef_macro (pfile, "__THUMBEB__", TARGET_THUMB); else - def_or_undef_macro (pfile, "__THUMBEL__", TARGET_THUMB_P (flags)); + def_or_undef_macro (pfile, "__THUMBEL__", TARGET_THUMB); if (TARGET_ARM_ARCH_ISA_THUMB) builtin_define_with_int_value ("__ARM_ARCH_ISA_THUMB", @@ -181,8 +178,8 @@ arm_cpu_builtins (struct cpp_reader* pfile, int flags) builtin_define ("__ARM_EABI__"); } - def_or_undef_macro (pfile, "__ARM_ARCH_EXT_IDIV__", TARGET_IDIV_P (flags)); - def_or_undef_macro (pfile, "__ARM_FEATURE_IDIV", TARGET_IDIV_P (flags)); + def_or_undef_macro (pfile, "__ARM_ARCH_EXT_IDIV__", TARGET_IDIV); + def_or_undef_macro (pfile, "__ARM_FEATURE_IDIV", TARGET_IDIV); def_or_undef_macro (pfile, "__ARM_ASM_SYNTAX_UNIFIED__", inline_asm_unified); } @@ -193,7 +190,7 @@ arm_cpu_cpp_builtins (struct cpp_reader * pfile) builtin_assert ("cpu=arm"); builtin_assert ("machine=arm"); - arm_cpu_builtins (pfile, target_flags); + arm_cpu_builtins (pfile); } /* Hook to validate the current #pragma GCC target and set the arch custom @@ -245,7 +242,8 @@ arm_pragma_target_parse (tree args, tree pop_target) cpp_opts->warn_unused_macros = 0; /* Update macros. */ - arm_cpu_builtins (parse_in, cur_opt->x_target_flags); + gcc_assert (cur_opt->x_target_flags == target_flags); + arm_cpu_builtins (parse_in); cpp_opts->warn_unused_macros = saved_warn_unused_macros; } diff --git a/gcc/config/arm/arm-protos.h b/gcc/config/arm/arm-protos.h index 8df312f3c67..d3d7216488e 100644 --- a/gcc/config/arm/arm-protos.h +++ b/gcc/config/arm/arm-protos.h @@ -340,7 +340,6 @@ extern const char *arm_rewrite_selected_cpu (const char *name); extern void arm_lang_object_attributes_init (void); extern void arm_register_target_pragmas (void); extern void arm_cpu_cpp_builtins (struct cpp_reader *); -extern void arm_cpu_builtins (struct cpp_reader *, int); extern bool arm_is_constant_pool_ref (rtx); diff --git a/gcc/config/arm/arm.c b/gcc/config/arm/arm.c index 5f3180d38ce..62a63abc68f 100644 --- a/gcc/config/arm/arm.c +++ b/gcc/config/arm/arm.c @@ -95,7 +95,7 @@ static int arm_compute_static_chain_stack_bytes (void); static arm_stack_offsets *arm_get_frame_offsets (void); static void arm_add_gc_roots (void); static int arm_gen_constant (enum rtx_code, machine_mode, rtx, - HOST_WIDE_INT, rtx, rtx, int, int); + unsigned HOST_WIDE_INT, rtx, rtx, int, int); static unsigned bit_count (unsigned long); static unsigned feature_count (const arm_feature_set*); static int arm_address_register_rtx_p (rtx, int); @@ -245,6 +245,7 @@ static tree arm_build_builtin_va_list (void); static void arm_expand_builtin_va_start (tree, rtx); static tree arm_gimplify_va_arg_expr (tree, tree, gimple_seq *, gimple_seq *); static void arm_option_override (void); +static void arm_option_print (FILE *, int, struct cl_target_option *); static void arm_set_current_function (tree); static bool arm_can_inline_p (tree, tree); static bool arm_valid_target_attribute_p (tree, tree, tree, int); @@ -405,6 +406,9 @@ static const struct attribute_spec arm_attribute_table[] = #undef TARGET_OPTION_OVERRIDE #define TARGET_OPTION_OVERRIDE arm_option_override +#undef TARGET_OPTION_PRINT +#define TARGET_OPTION_PRINT arm_option_print + #undef TARGET_COMP_TYPE_ATTRIBUTES #define TARGET_COMP_TYPE_ATTRIBUTES arm_comp_type_attributes @@ -2751,15 +2755,14 @@ arm_option_check_internal (struct gcc_options *opts) error ("-mslow-flash-data only supports non-pic code on armv7-m targets"); } -/* Set params depending on attributes and optimization options. */ +/* Recompute the global settings depending on target attribute options. */ + static void -arm_option_params_internal (struct gcc_options *opts) +arm_option_params_internal (void) { - int flags = opts->x_target_flags; - - /* If we are not using the default (ARM mode) section anchor offset + /* If we are not using the default (ARM mode) section anchor offset ranges, then set the correct ranges now. */ - if (TARGET_THUMB1_P (flags)) + if (TARGET_THUMB1) { /* Thumb-1 LDR instructions cannot have negative offsets. Permissible positive offset ranges are 5-bit (for byte loads), @@ -2769,7 +2772,7 @@ arm_option_params_internal (struct gcc_options *opts) targetm.min_anchor_offset = 0; targetm.max_anchor_offset = 127; } - else if (TARGET_THUMB2_P (flags)) + else if (TARGET_THUMB2) { /* The minimum is set such that the total size of the block for a particular anchor is 248 + 1 + 4095 bytes, which is @@ -2790,14 +2793,13 @@ arm_option_params_internal (struct gcc_options *opts) max_insns_skipped = 6; /* For THUMB2, we limit the conditional sequence to one IT block. */ - if (TARGET_THUMB2_P (flags)) - max_insns_skipped = opts->x_arm_restrict_it ? 1 : 4; + if (TARGET_THUMB2) + max_insns_skipped = arm_restrict_it ? 1 : 4; } else /* When -mrestrict-it is in use tone down the if-conversion. */ - max_insns_skipped - = (TARGET_THUMB2_P (opts->x_target_flags) && opts->x_arm_restrict_it) - ? 1 : current_tune->max_insns_skipped; + max_insns_skipped = (TARGET_THUMB2 && arm_restrict_it) + ? 1 : current_tune->max_insns_skipped; } /* True if -mflip-thumb should next add an attribute for the default @@ -3385,7 +3387,7 @@ arm_option_override (void) arm_option_override_internal (&global_options, &global_options_set); arm_option_check_internal (&global_options); - arm_option_params_internal (&global_options); + arm_option_params_internal (); /* Register global variables with the garbage collector. */ arm_add_gc_roots (); @@ -4227,8 +4229,8 @@ emit_constant_insn (rtx cond, rtx pattern) static int arm_gen_constant (enum rtx_code code, machine_mode mode, rtx cond, - HOST_WIDE_INT val, rtx target, rtx source, int subtargets, - int generate) + unsigned HOST_WIDE_INT val, rtx target, rtx source, + int subtargets, int generate) { int can_invert = 0; int can_negate = 0; @@ -4598,7 +4600,7 @@ arm_gen_constant (enum rtx_code code, machine_mode mode, rtx cond, mvn r0, r0, asl #12 mvn r0, r0, lsr #12 */ if (set_sign_bit_copies > 8 - && (val & (-1 << (32 - set_sign_bit_copies))) == val) + && (val & (HOST_WIDE_INT_M1U << (32 - set_sign_bit_copies))) == val) { if (generate) { @@ -29482,7 +29484,20 @@ arm_set_current_function (tree fndecl) = save_target_globals_default_opts (); } - arm_option_params_internal (&global_options); + arm_option_params_internal (); +} + +/* Implement TARGET_OPTION_PRINT. */ + +static void +arm_option_print (FILE *file, int indent, struct cl_target_option *ptr) +{ + int flags = ptr->x_target_flags; + + fprintf (file, "%*sselected arch %s\n", indent, "", + TARGET_THUMB2_P (flags) ? "thumb2" : + TARGET_THUMB_P (flags) ? "thumb1" : + "arm"); } /* Hook to determine if one function can safely inline another. */ @@ -29501,7 +29516,7 @@ arm_can_inline_p (tree caller ATTRIBUTE_UNUSED, tree callee ATTRIBUTE_UNUSED) go over the list. */ static bool -arm_valid_target_attribute_rec (tree args, struct gcc_options *opts) +arm_valid_target_attribute_rec (tree args, struct gcc_options *opts) { if (TREE_CODE (args) == TREE_LIST) { diff --git a/gcc/config/arm/arm.h b/gcc/config/arm/arm.h index f7a9d638673..87c9f904894 100644 --- a/gcc/config/arm/arm.h +++ b/gcc/config/arm/arm.h @@ -160,8 +160,6 @@ extern void (*arm_lang_output_object_attributes_hook)(void); #define TARGET_THUMB1 (TARGET_THUMB && !arm_arch_thumb2) /* Arm or Thumb-2 32-bit code. */ #define TARGET_32BIT (TARGET_ARM || arm_arch_thumb2) -#define TARGET_32BIT_P(flags) (TARGET_ARM_P (flags) \ - || arm_arch_thumb2) /* 32-bit Thumb-2 code. */ #define TARGET_THUMB2 (TARGET_THUMB && arm_arch_thumb2) /* Thumb-1 only. */ @@ -220,23 +218,18 @@ extern void (*arm_lang_output_object_attributes_hook)(void); (TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP \ && ARM_FPU_FSET_HAS (arm_fpu_desc->features, FPU_FL_NEON)) - /* Q-bit is present. */ -#define TARGET_ARM_QBIT_P(flags) \ - (TARGET_32BIT_P (flags) && arm_arch5e && (arm_arch_notm || arm_arch7)) -#define TARGET_ARM_QBIT TARGET_ARM_QBIT_P(target_flags) +#define TARGET_ARM_QBIT \ + (TARGET_32BIT && arm_arch5e && (arm_arch_notm || arm_arch7)) /* Saturation operation, e.g. SSAT. */ -#define TARGET_ARM_SAT_P(flags) \ - (TARGET_32BIT_P (flags) && arm_arch6 && (arm_arch_notm || arm_arch7)) -#define TARGET_ARM_SAT TARGET_ARM_SAT_P(target_flags) +#define TARGET_ARM_SAT \ + (TARGET_32BIT && arm_arch6 && (arm_arch_notm || arm_arch7)) /* "DSP" multiply instructions, eg. SMULxy. */ -#define TARGET_DSP_MULTIPLY_P(flags) \ - (TARGET_32BIT_P (flags) && arm_arch5e && (arm_arch_notm || arm_arch7em)) -#define TARGET_DSP_MULTIPLY TARGET_DSP_MULTIPLY_P(target_flags) +#define TARGET_DSP_MULTIPLY \ + (TARGET_32BIT && arm_arch5e && (arm_arch_notm || arm_arch7em)) /* Integer SIMD instructions, and extend-accumulate instructions. */ -#define TARGET_INT_SIMD_P(flags) \ - (TARGET_32BIT_P (flags) && arm_arch6 && (arm_arch_notm || arm_arch7em)) -#define TARGET_INT_SIMD TARGET_INT_SIMD_P(target_flags) +#define TARGET_INT_SIMD \ + (TARGET_32BIT && arm_arch6 && (arm_arch_notm || arm_arch7em)) /* Should MOVW/MOVT be used in preference to a constant pool. */ #define TARGET_USE_MOVT \ @@ -259,30 +252,21 @@ extern void (*arm_lang_output_object_attributes_hook)(void); #define TARGET_HAVE_MEMORY_BARRIER (TARGET_HAVE_DMB || TARGET_HAVE_DMB_MCR) /* Nonzero if this chip supports ldrex and strex */ -#define TARGET_HAVE_LDREX_P(flags) ((arm_arch6 && TARGET_ARM_P (flags)) \ - || arm_arch7) -#define TARGET_HAVE_LDREX TARGET_HAVE_LDREX_P (target_flags) +#define TARGET_HAVE_LDREX ((arm_arch6 && TARGET_ARM) || arm_arch7) /* Nonzero if this chip supports ldrex{bh} and strex{bh}. */ -#define TARGET_HAVE_LDREXBH_P(flags) ((arm_arch6k && TARGET_ARM_P (flags)) \ - || arm_arch7) -#define TARGET_HAVE_LDREXBH TARGET_HAVE_LDREXBH_P (target_flags) +#define TARGET_HAVE_LDREXBH ((arm_arch6k && TARGET_ARM) || arm_arch7) /* Nonzero if this chip supports ldrexd and strexd. */ -#define TARGET_HAVE_LDREXD_P(flags) (((arm_arch6k && TARGET_ARM_P (flags)) \ - || arm_arch7) && arm_arch_notm) -#define TARGET_HAVE_LDREXD TARGET_HAVE_LDREXD_P (target_flags) - +#define TARGET_HAVE_LDREXD (((arm_arch6k && TARGET_ARM) \ + || arm_arch7) && arm_arch_notm) /* Nonzero if this chip supports load-acquire and store-release. */ #define TARGET_HAVE_LDACQ (TARGET_ARM_ARCH >= 8) /* Nonzero if integer division instructions supported. */ -#define TARGET_IDIV_P(flags) ((TARGET_ARM_P (flags) && arm_arch_arm_hwdiv) \ - || (TARGET_THUMB2_P (flags) \ - && arm_arch_thumb_hwdiv)) -#define TARGET_IDIV TARGET_IDIV_P (target_flags) - +#define TARGET_IDIV ((TARGET_ARM && arm_arch_arm_hwdiv) \ + || (TARGET_THUMB2 && arm_arch_thumb_hwdiv)) /* Nonzero if disallow volatile memory access in IT block. */ #define TARGET_NO_VOLATILE_CE (arm_arch_no_volatile_ce) @@ -2220,11 +2204,6 @@ extern int making_const_table; | (TARGET_HAVE_LDREXBH ? 3 : 0) \ | (TARGET_HAVE_LDREXD ? 8 : 0)) -#define TARGET_ARM_FEATURE_LDREX_P(flags) \ - ((TARGET_HAVE_LDREX_P (flags) ? 4 : 0) \ - | (TARGET_HAVE_LDREXBH_P (flags) ? 3 : 0) \ - | (TARGET_HAVE_LDREXD_P (flags) ? 8 : 0)) - /* Set as a bit mask indicating the available widths of hardware floating point types. Where bit 1 indicates 16-bit support, bit 2 indicates 32-bit support, bit 3 indicates 64-bit support. */ diff --git a/gcc/config/i386/haswell.md b/gcc/config/i386/haswell.md new file mode 100644 index 00000000000..2bb0ac9873c --- /dev/null +++ b/gcc/config/i386/haswell.md @@ -0,0 +1,618 @@ +;; Scheduling for Haswell and derived processors. +;; Copyright (C) 2004-2015 Free Software Foundation, Inc. +;; +;; This file is part of GCC. +;; +;; GCC is free software; you can redistribute it and/or modify +;; it under the terms of the GNU General Public License as published by +;; the Free Software Foundation; either version 3, or (at your option) +;; any later version. +;; +;; GCC is distributed in the hope that it will be useful, +;; but WITHOUT ANY WARRANTY; without even the implied warranty of +;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +;; GNU General Public License for more details. +;; +;; You should have received a copy of the GNU General Public License +;; along with GCC; see the file COPYING3. If not see +;; <http://www.gnu.org/licenses/>. */ + +;; The scheduling description in this file is based on core2.md. +;; The major difference from the CORE2 pipeline is that HASWELL has +;; two MU for load and one MU for store. +(define_automaton "haswell_decoder,haswell_core,haswell_idiv,haswell_fdiv,haswell_ssediv,haswell_load,haswell_store") + +;; The CPU domain, used for HASWELL bypass latencies +(define_attr "hsw_domain" "int,float,simd" + (cond [(eq_attr "type" "fmov,fop,fsgn,fmul,fdiv,fpspc,fcmov,fcmp,fxch,fistp,fisttp,frndint") + (const_string "float") + (eq_attr "type" "sselog,sselog1,sseiadd,sseiadd1,sseishft,sseishft1,sseimul, + sse,ssemov,sseadd,sseadd1,ssemul,ssecmp,ssecomi,ssecvt, + ssecvt1,sseicvt,ssediv,sseins,ssemuladd,sse4arg") + (cond [(eq_attr "mode" "V4DF,V8SF,V2DF,V4SF,SF,DF") + (const_string "float") + (eq_attr "mode" "SI") + (const_string "int")] + (const_string "simd")) + (eq_attr "type" "mmx,mmxmov,mmxadd,mmxmul,mmxcmp,mmxcvt,mmxshft") + (const_string "simd")] + (const_string "int"))) + +(define_cpu_unit "hsw_decoder0" "haswell_decoder") +(define_cpu_unit "hsw_decoder1" "haswell_decoder") +(define_cpu_unit "hsw_decoder2" "haswell_decoder") +(define_cpu_unit "hsw_decoder3" "haswell_decoder") + +;; We first wish to find an instruction for hsw_decoder0, so exclude +;; other hsw_decoders from being reserved until hsw_decoder0 is +;; reserved. +(presence_set "hsw_decoder1" "hsw_decoder0") +(presence_set "hsw_decoder2" "hsw_decoder0") +(presence_set "hsw_decoder3" "hsw_decoder0") + +;; Most instructions can be decoded on any of the three decoders. +(define_reservation "hsw_decodern" "(hsw_decoder0|hsw_decoder1|hsw_decoder2|hsw_decoder3)") + +;; The out-of-order core has eight pipelines. These are similar to the +;; Pentium Pro's five pipelines. Port 2,3 are responsible for memory loads, +;; port 7 for store address calculations, port 4 for memory stores, and +;; ports 0, 1, 5 and 6 for everything else. + +(define_cpu_unit "hsw_p0,hsw_p1,hsw_p5,hsw_p6" "haswell_core") +(define_cpu_unit "hsw_p2,hsw_p3" "haswell_load") +(define_cpu_unit "hsw_p4,hsw_p7" "haswell_store") +(define_cpu_unit "hsw_idiv" "haswell_idiv") +(define_cpu_unit "hsw_fdiv" "haswell_fdiv") +(define_cpu_unit "hsw_ssediv" "haswell_ssediv") + +(define_reservation "hsw_p0156" "hsw_p0|hsw_p1|hsw_p5|hsw_p6") +(define_reservation "hsw_p0p1p5p6" "hsw_p0+hsw_p1+hsw_p5+hsw_p6") +(define_reservation "hsw_p23" "hsw_p2|hsw_p3") +(define_reservation "hsw_p4p7" "hsw_p4+hsw_p7") +(define_reservation "hsw_p237" "hsw_p2|hsw_p3|hsw_p7") +(define_reservation "hsw_p015" "hsw_p0|hsw_p1|hsw_p5") +(define_reservation "hsw_p01" "hsw_p0|hsw_p1") + +(define_insn_reservation "hsw_complex_insn" 6 + (and (eq_attr "cpu" "haswell") + (eq_attr "type" "other,multi,str")) + "hsw_decoder0") + +(define_insn_reservation "hsw_call" 1 + (and (eq_attr "cpu" "haswell") + (eq_attr "type" "call,callv")) + "hsw_decoder0") + +;; imov with memory operands does not use the integer units. +;; imovx always decodes to one uop, and also doesn't use the integer +;; units if it has memory operands. +(define_insn_reservation "hsw_imov" 1 + (and (eq_attr "cpu" "haswell") + (and (eq_attr "memory" "none") + (eq_attr "type" "imov,imovx"))) + "hsw_decodern,hsw_p0156") + +(define_insn_reservation "hsw_imov_load" 2 + (and (eq_attr "cpu" "haswell") + (and (eq_attr "memory" "load") + (eq_attr "type" "imov,imovx"))) + "hsw_decodern,hsw_p23") + +(define_insn_reservation "hsw_imov_store" 3 + (and (eq_attr "cpu" "haswell") + (and (eq_attr "memory" "store") + (eq_attr "type" "imov"))) + "hsw_decodern,hsw_p4+(hsw_p2|hsw_p3|hsw_p7)") + +(define_insn_reservation "hsw_icmov" 2 + (and (eq_attr "cpu" "haswell") + (and (eq_attr "memory" "none") + (eq_attr "type" "icmov"))) + "hsw_decodern,hsw_p0156,hsw_p0156") + +(define_insn_reservation "hsw_icmov_load" 2 + (and (eq_attr "cpu" "haswell") + (and (eq_attr "memory" "load") + (eq_attr "type" "icmov"))) + "hsw_decodern,hsw_p23+hsw_p0156,hsw_p0156") + +(define_insn_reservation "hsw_push_reg" 3 + (and (eq_attr "cpu" "haswell") + (and (eq_attr "memory" "store") + (eq_attr "type" "push"))) + "hsw_decodern,hsw_p4+hsw_p237") + +(define_insn_reservation "hsw_push_mem" 3 + (and (eq_attr "cpu" "haswell") + (and (eq_attr "memory" "both") + (eq_attr "type" "push"))) + "hsw_decodern,hsw_p4+hsw_p237,hsw_p237") + +;; Consider lea latency as having 2 components. +(define_insn_reservation "hsw_lea" 1 + (and (eq_attr "cpu" "haswell") + (and (eq_attr "memory" "none") + (eq_attr "type" "lea"))) + "hsw_decodern,hsw_p1|hsw_p5") + +(define_insn_reservation "hsw_shift_rotate" 1 + (and (eq_attr "cpu" "haswell") + (and (eq_attr "memory" "none") + (eq_attr "type" "ishift,ishift1,rotate,rotate1"))) + "hsw_decodern,hsw_p0|hsw_p6") + +(define_insn_reservation "hsw_shift_rotate_mem" 1 + (and (eq_attr "cpu" "haswell") + (and (eq_attr "memory" "!none") + (eq_attr "type" "ishift,ishift1,rotate,rotate1"))) + "hsw_decodern,(hsw_p0|hsw_p6)+hsw_p237+hsw_p4") + +(define_insn_reservation "hsw_branch" 1 + (and (eq_attr "cpu" "haswell") + (and (eq_attr "memory" "none") + (eq_attr "type" "ibr"))) + "hsw_decodern,hsw_p6") + +(define_insn_reservation "hsw_indirect_branch" 2 + (and (eq_attr "cpu" "haswell") + (and (eq_attr "memory" "!none") + (eq_attr "type" "ibr"))) + "hsw_decoder0,hsw_p23+hsw_p6") + +(define_insn_reservation "hsw_leave" 4 + (and (eq_attr "cpu" "haswell") + (eq_attr "type" "leave")) + "hsw_decoder0,hsw_p23+hsw_p0156,hsw_p0156") + +;; imul and imulx with two/three operands only execute on port 1. +(define_insn_reservation "hsw_imul" 3 + (and (eq_attr "cpu" "haswell") + (and (eq_attr "memory" "none") + (eq_attr "type" "imul"))) + "hsw_decodern,hsw_p1") + +(define_insn_reservation "hsw_imul_mem" 3 + (and (eq_attr "cpu" "haswell") + (and (eq_attr "memory" "!none") + (eq_attr "type" "imul"))) + "hsw_decodern,hsw_p23+hsw_p1") + +(define_insn_reservation "hsw_imulx" 4 + (and (eq_attr "cpu" "haswell") + (and (eq_attr "memory" "none") + (eq_attr "type" "imulx"))) + "hsw_decodern,hsw_p0156,hsw_p0156") + +(define_insn_reservation "hsw_imulx_mem" 4 + (and (eq_attr "cpu" "haswell") + (and (eq_attr "memory" "!none") + (eq_attr "type" "imulx"))) + "hsw_decodern,hsw_p23+hsw_p0156,(hsw_p0|hsw_p6|hsw_p6)") + + +;; div and idiv are very similar, so we model them the same. +;; Use the same latency for all QI,HI and SI modes. +(define_insn_reservation "hsw_idiv" 23 + (and (eq_attr "cpu" "haswell") + (and (eq_attr "memory" "none") + (eq_attr "type" "idiv"))) + "hsw_decoder0,(hsw_p0p1p5p6+hsw_idiv)*9") + +(define_insn_reservation "hsw_idiv_load" 23 + (and (eq_attr "cpu" "haswell") + (and (eq_attr "memory" "load") + (eq_attr "type" "idiv"))) + "hsw_decoder0,hsw_p23+hsw_p0+hsw_idiv,(hsw_p0p1p5p6+hsw_idiv)*9") + +;; x87 floating point operations. + +(define_insn_reservation "hsw_fxch" 0 + (and (eq_attr "cpu" "haswell") + (eq_attr "type" "fxch")) + "hsw_decodern") + +(define_insn_reservation "hsw_fop" 3 + (and (eq_attr "cpu" "haswell") + (and (eq_attr "memory" "none,unknown") + (eq_attr "type" "fop"))) + "hsw_decodern,hsw_p1") + +(define_insn_reservation "hsw_fop_load" 5 + (and (eq_attr "cpu" "haswell") + (and (eq_attr "memory" "load") + (eq_attr "type" "fop"))) + "hsw_decodern,hsw_p23+hsw_p1,hsw_p1") + +(define_insn_reservation "hsw_fop_store" 3 + (and (eq_attr "cpu" "haswell") + (and (eq_attr "memory" "store") + (eq_attr "type" "fop"))) + "hsw_decodern,hsw_p0,hsw_p0,hsw_p0+hsw_p4+hsw_p3") + +(define_insn_reservation "hsw_fop_both" 5 + (and (eq_attr "cpu" "haswell") + (and (eq_attr "memory" "both") + (eq_attr "type" "fop"))) + "hsw_decodern,hsw_p2+hsw_p0,hsw_p0+hsw_p4+hsw_p3") + +(define_insn_reservation "hsw_fsgn" 1 + (and (eq_attr "cpu" "haswell") + (eq_attr "type" "fsgn")) + "hsw_decodern,hsw_p0") + +(define_insn_reservation "hsw_fistp" 7 + (and (eq_attr "cpu" "haswell") + (eq_attr "type" "fistp")) + "hsw_decoder0,hsw_p1+hsw_p4+hsw_p23") + +(define_insn_reservation "hsw_fcmov" 2 + (and (eq_attr "cpu" "haswell") + (eq_attr "type" "fcmov")) + "hsw_decoder0,hsw_p0+hsw_p5,hsw_p0") + +(define_insn_reservation "hsw_fcmp" 1 + (and (eq_attr "cpu" "haswell") + (and (eq_attr "memory" "none") + (eq_attr "type" "fcmp"))) + "hsw_decodern,hsw_p1") + +(define_insn_reservation "hsw_fcmp_load" 1 + (and (eq_attr "cpu" "haswell") + (and (eq_attr "memory" "load") + (eq_attr "type" "fcmp"))) + "hsw_decodern,hsw_p23+hsw_p1") + +(define_insn_reservation "hsw_fmov" 1 + (and (eq_attr "cpu" "haswell") + (and (eq_attr "memory" "none") + (eq_attr "type" "fmov"))) + "hsw_decodern,hsw_p01") + +(define_insn_reservation "hsw_fmov_load" 3 + (and (eq_attr "cpu" "haswell") + (and (eq_attr "memory" "load") + (and (eq_attr "mode" "!XF") + (eq_attr "type" "fmov")))) + "hsw_decodern,hsw_p23") + +(define_insn_reservation "hsw_fmov_XF_load" 3 + (and (eq_attr "cpu" "haswell") + (and (eq_attr "memory" "load") + (and (eq_attr "mode" "XF") + (eq_attr "type" "fmov")))) + "hsw_decodern,(hsw_p23+hsw_p0)*2") + +(define_insn_reservation "hsw_fmov_store" 1 + (and (eq_attr "cpu" "haswell") + (and (eq_attr "memory" "store") + (and (eq_attr "mode" "!XF") + (eq_attr "type" "fmov")))) + "hsw_decodern,hsw_p4p7") + +(define_insn_reservation "hsw_fmov_XF_store" 3 + (and (eq_attr "cpu" "haswell") + (and (eq_attr "memory" "store") + (and (eq_attr "mode" "XF") + (eq_attr "type" "fmov")))) + "hsw_decodern,hsw_p4p7,hsw_p4p7") + +(define_insn_reservation "hsw_fmul" 4 + (and (eq_attr "cpu" "haswell") + (and (eq_attr "memory" "none") + (eq_attr "type" "fmul"))) + "hsw_decodern,hsw_p01") + +(define_insn_reservation "hsw_fmul_load" 4 + (and (eq_attr "cpu" "haswell") + (and (eq_attr "memory" "load") + (eq_attr "type" "fmul"))) + "hsw_decodern,hsw_p23+hsw_p01") + +;; fdiv latencies depend on the mode of the operands. XFmode gives +;; a latency of 38 cycles, DFmode gives 32, and SFmode gives latency 18. +;; Division by a power of 2 takes only 9 cycles, but we cannot model +;; that. Throughput is equal to latency - 1, which we model using the +;; hsw_div automaton. +(define_insn_reservation "hsw_fdiv_SF" 18 + (and (eq_attr "cpu" "haswell") + (and (eq_attr "memory" "none") + (and (eq_attr "mode" "SF") + (eq_attr "type" "fdiv,fpspc")))) + "hsw_decodern,hsw_p0+hsw_fdiv,hsw_fdiv*16") + +(define_insn_reservation "hsw_fdiv_SF_load" 19 + (and (eq_attr "cpu" "haswell") + (and (eq_attr "memory" "load") + (and (eq_attr "mode" "SF") + (eq_attr "type" "fdiv,fpspc")))) + "hsw_decodern,hsw_p23+hsw_p0+hsw_fdiv,hsw_fdiv*16") + +(define_insn_reservation "hsw_fdiv_DF" 32 + (and (eq_attr "cpu" "haswell") + (and (eq_attr "memory" "none") + (and (eq_attr "mode" "DF") + (eq_attr "type" "fdiv,fpspc")))) + "hsw_decodern,hsw_p0+hsw_fdiv,hsw_fdiv*30") + +(define_insn_reservation "hsw_fdiv_DF_load" 33 + (and (eq_attr "cpu" "haswell") + (and (eq_attr "memory" "load") + (and (eq_attr "mode" "DF") + (eq_attr "type" "fdiv,fpspc")))) + "hsw_decodern,hsw_p23+hsw_p0+hsw_fdiv,hsw_fdiv*30") + +(define_insn_reservation "hsw_fdiv_XF" 38 + (and (eq_attr "cpu" "haswell") + (and (eq_attr "memory" "none") + (and (eq_attr "mode" "XF") + (eq_attr "type" "fdiv,fpspc")))) + "hsw_decodern,hsw_p0+hsw_fdiv,hsw_fdiv*36") + +(define_insn_reservation "hsw_fdiv_XF_load" 39 + (and (eq_attr "cpu" "haswell") + (and (eq_attr "memory" "load") + (and (eq_attr "mode" "XF") + (eq_attr "type" "fdiv,fpspc")))) + "hsw_decodern,hsw_p2+hsw_p0+hsw_fdiv,hsw_fdiv*36") + +;; MMX instructions. + +(define_insn_reservation "hsw_mmx_add" 1 + (and (eq_attr "cpu" "haswell") + (and (eq_attr "memory" "none") + (eq_attr "type" "mmxadd,sseiadd"))) + "hsw_decodern,hsw_p1|hsw_p5") + +(define_insn_reservation "hsw_mmx_add_load" 2 + (and (eq_attr "cpu" "haswell") + (and (eq_attr "memory" "load") + (eq_attr "type" "mmxadd,sseiadd"))) + "hsw_decodern,hsw_p23+(hsw_p1|hsw_p5)") + +(define_insn_reservation "hsw_mmx_shft" 1 + (and (eq_attr "cpu" "haswell") + (and (eq_attr "memory" "none") + (eq_attr "type" "mmxshft"))) + "hsw_decodern,hsw_p0") + +(define_insn_reservation "hsw_mmx_shft_load" 2 + (and (eq_attr "cpu" "haswell") + (and (eq_attr "memory" "load") + (eq_attr "type" "mmxshft"))) + "hsw_decodern,hsw_p23+hsw_p0") + +(define_insn_reservation "hsw_mmx_sse_shft" 1 + (and (eq_attr "cpu" "haswell") + (and (eq_attr "memory" "none") + (and (eq_attr "type" "sseishft") + (eq_attr "length_immediate" "!0")))) + "hsw_decodern,hsw_p01") + +(define_insn_reservation "hsw_mmx_sse_shft_load" 2 + (and (eq_attr "cpu" "haswell") + (and (eq_attr "memory" "load") + (and (eq_attr "type" "sseishft") + (eq_attr "length_immediate" "!0")))) + "hsw_decodern,hsw_p01+hsw_p23") + +(define_insn_reservation "hsw_mmx_sse_shft1" 2 + (and (eq_attr "cpu" "haswell") + (and (eq_attr "memory" "none") + (and (eq_attr "type" "sseishft") + (eq_attr "length_immediate" "0")))) + "hsw_decodern,hsw_p01") + +(define_insn_reservation "hsw_mmx_sse_shft1_load" 3 + (and (eq_attr "cpu" "haswell") + (and (eq_attr "memory" "load") + (and (eq_attr "type" "sseishft") + (eq_attr "length_immediate" "0")))) + "hsw_decodern,hsw_p01+hsw_p23") + +(define_insn_reservation "hsw_mmx_mul" 5 + (and (eq_attr "cpu" "haswell") + (and (eq_attr "memory" "none") + (eq_attr "type" "mmxmul,sseimul"))) + "hsw_decodern,hsw_p01") + +(define_insn_reservation "hsw_mmx_mul_load" 5 + (and (eq_attr "cpu" "haswell") + (and (eq_attr "memory" "none") + (eq_attr "type" "mmxmul,sseimul"))) + "hsw_decodern,hsw_p23+hsw_p01") + +(define_insn_reservation "hsw_sse_mmxcvt" 4 + (and (eq_attr "cpu" "haswell") + (and (eq_attr "mode" "DI") + (eq_attr "type" "mmxcvt"))) + "hsw_decodern,hsw_p1") + +;; (define_insn_reservation "hsw_sse_mmxshft" 2 +;; (and (eq_attr "cpu" "haswell") +;; (and (eq_attr "mode" "TI") +;; (eq_attr "type" "mmxshft"))) +;; "hsw_decodern,hsw_p01") + +;; The sfence instruction. +(define_insn_reservation "hsw_sse_sfence" 2 + (and (eq_attr "cpu" "haswell") + (and (eq_attr "memory" "unknown") + (eq_attr "type" "sse"))) + "hsw_decoder0,hsw_p23+hsw_p4") + +(define_insn_reservation "hsw_sse_SFDF" 3 + (and (eq_attr "cpu" "haswell") + (and (eq_attr "mode" "SF,DF") + (eq_attr "type" "sse"))) + "hsw_decodern,hsw_p01") + +(define_insn_reservation "hsw_sse_V4SF" 4 + (and (eq_attr "cpu" "haswell") + (and (eq_attr "mode" "V4SF") + (eq_attr "type" "sse"))) + "hsw_decodern,hsw_p01") + +(define_insn_reservation "hsw_sse_V8SF" 4 + (and (eq_attr "cpu" "haswell") + (and (eq_attr "mode" "V8SF,V4DF") + (eq_attr "type" "sse"))) + "hsw_decodern,hsw_p01") + +(define_insn_reservation "hsw_sse_addcmp" 3 + (and (eq_attr "cpu" "haswell") + (and (eq_attr "memory" "none") + (eq_attr "type" "sseadd1,ssecmp,ssecomi"))) + "hsw_decodern,hsw_p01") + +(define_insn_reservation "hsw_sse_addcmp_load" 3 + (and (eq_attr "cpu" "haswell") + (and (eq_attr "memory" "load") + (eq_attr "type" "sseadd1,ssecmp,ssecomi"))) + "hsw_decodern,hsw_p23+hsw_p01") + +(define_insn_reservation "hsw_sse_logic" 1 + (and (eq_attr "cpu" "haswell") + (and (eq_attr "memory" "none") + (eq_attr "type" "sselog,sselog1"))) + "hsw_decodern,hsw_p015") + +(define_insn_reservation "hsw_sse_logic_load" 2 + (and (eq_attr "cpu" "haswell") + (and (eq_attr "memory" "load") + (eq_attr "type" "sselog,sselog1"))) + "hsw_decodern,hsw_p015+hsw_p23") + +(define_insn_reservation "hsw_sse_add" 3 + (and (eq_attr "cpu" "haswell") + (and (eq_attr "memory" "none") + (eq_attr "type" "sseadd"))) + "hsw_decodern,hsw_p1|hsw_p5") + +(define_insn_reservation "hsw_sse_add_load" 3 + (and (eq_attr "cpu" "haswell") + (and (eq_attr "memory" "load") + (eq_attr "type" "sseadd"))) + "hsw_decodern,(hsw_p1|hsw_p5)+hsw_p23") + +(define_insn_reservation "hsw_sse_mul" 5 + (and (eq_attr "cpu" "haswell") + (and (eq_attr "memory" "none") + (eq_attr "type" "ssemul"))) + "hsw_decodern,hsw_p0") + +(define_insn_reservation "hsw_sse_mul_load" 5 + (and (eq_attr "cpu" "haswell") + (and (eq_attr "memory" "load") + (eq_attr "type" "ssemul"))) + "hsw_decodern,hsw_p0+hsw_p23") +;; Use skylake pipeline. +(define_insn_reservation "hsw_sse_muladd" 5 + (and (eq_attr "cpu" "haswell") + (and (eq_attr "memory" "none") + (eq_attr "type" "ssemuladd"))) + "hsw_decodern,hsw_p01") + +(define_insn_reservation "hsw_sse_muladd_load" 5 + (and (eq_attr "cpu" "haswell") + (and (eq_attr "memory" "load") + (eq_attr "type" "ssemuladd"))) + "hsw_decodern,hsw_p01+hsw_p23") + +(define_insn_reservation "hsw_sse_div_SF" 18 + (and (eq_attr "cpu" "haswell") + (and (eq_attr "memory" "none") + (and (eq_attr "mode" "SF,V4SF,V8SF") + (eq_attr "type" "ssediv")))) + "hsw_decodern,hsw_p0,hsw_ssediv*14") + +(define_insn_reservation "hsw_sse_div_SF_load" 18 + (and (eq_attr "cpu" "haswell") + (and (eq_attr "memory" "none") + (and (eq_attr "mode" "SF,V4SF,V8SF") + (eq_attr "type" "ssediv")))) + "hsw_decodern,(hsw_p23+hsw_p0),hsw_ssediv*14") + +(define_insn_reservation "hsw_sse_div_DF" 28 + (and (eq_attr "cpu" "haswell") + (and (eq_attr "memory" "none") + (and (eq_attr "mode" "DF,V2DF,V4DF") + (eq_attr "type" "ssediv")))) + "hsw_decodern,hsw_p0,hsw_ssediv*20") + +(define_insn_reservation "hsw_sse_div_DF_load" 28 + (and (eq_attr "cpu" "haswell") + (and (eq_attr "memory" "none") + (and (eq_attr "mode" "DF,V2DF,V4DF") + (eq_attr "type" "ssediv")))) + "hsw_decodern,(hsw_p23+hsw_p0),hsw_ssediv*20") + +(define_insn_reservation "hsw_sse_icvt" 4 + (and (eq_attr "cpu" "haswell") + (and (eq_attr "memory" "none") + (eq_attr "type" "sseicvt"))) + "hsw_decodern,hsw_p1") + +(define_insn_reservation "hsw_sse_icvt_load" 4 + (and (eq_attr "cpu" "haswell") + (and (eq_attr "memory" "!none") + (eq_attr "type" "sseicvt"))) + "hsw_decodern,hsw_p23+hsw_p1") + + +(define_insn_reservation "hsw_sse_icvt_SI" 3 + (and (eq_attr "cpu" "haswell") + (and (eq_attr "memory" "none") + (and (eq_attr "mode" "SI") + (eq_attr "type" "sseicvt")))) + "hsw_decodern,hsw_p1") + +(define_insn_reservation "hsw_sse_icvt_SI_load" 3 + (and (eq_attr "cpu" "haswell") + (and (eq_attr "memory" "!none") + (and (eq_attr "mode" "SI") + (eq_attr "type" "sseicvt")))) + "hsw_decodern,hsw_p23+hsw_p1") + +(define_insn_reservation "hsw_sse_mov" 1 + (and (eq_attr "cpu" "haswell") + (and (eq_attr "memory" "none") + (eq_attr "type" "ssemov"))) + "hsw_decodern,hsw_p015") + +(define_insn_reservation "hsw_sse_mov_load" 2 + (and (eq_attr "cpu" "haswell") + (and (eq_attr "memory" "load") + (eq_attr "type" "ssemov"))) + "hsw_decodern,hsw_p23") + +(define_insn_reservation "hsw_sse_mov_store" 1 + (and (eq_attr "cpu" "haswell") + (and (eq_attr "memory" "store") + (eq_attr "type" "ssemov"))) + "hsw_decodern,hsw_p4p7") + +(define_insn_reservation "hsw_insn" 1 + (and (eq_attr "cpu" "haswell") + (and (eq_attr "memory" "none,unknown") + (eq_attr "type" "alu,alu1,negnot,incdec,icmp,test,setcc,sseishft1,mmx,mmxcmp"))) + "hsw_decodern,hsw_p0156") + +(define_insn_reservation "hsw_insn_load" 1 + (and (eq_attr "cpu" "haswell") + (and (eq_attr "memory" "load") + (eq_attr "type" "alu,alu1,negnot,incdec,icmp,test,setcc,pop,sseishft1,mmx,mmxcmp"))) + "hsw_decodern,hsw_p23+hsw_p0156") + +(define_insn_reservation "hsw_insn_store" 1 + (and (eq_attr "cpu" "haswell") + (and (eq_attr "memory" "store") + (eq_attr "type" "alu,alu1,negnot,incdec,icmp,test,setcc,sseishft1,mmx,mmxcmp"))) + "hsw_decodern,hsw_p0156+hsw_p4p7") + +;; read-modify-store instructions produce 4 uops so they have to be +;; decoded on hsw_decoder0 as well. +(define_insn_reservation "hsw_insn_both" 4 + (and (eq_attr "cpu" "haswell") + (and (eq_attr "memory" "both") + (eq_attr "type" "alu,alu1,negnot,incdec,icmp,test,setcc,pop,sseishft1,mmx,mmxcmp"))) + "hsw_decodern,hsw_p23+hsw_p0156+hsw_p4p7") diff --git a/gcc/config/i386/i386.c b/gcc/config/i386/i386.c index d78f4e7f175..00e7006ec9a 100644 --- a/gcc/config/i386/i386.c +++ b/gcc/config/i386/i386.c @@ -3345,10 +3345,10 @@ ix86_option_override_internal (bool main_args_p, PTA_IVYBRIDGE}, {"core-avx-i", PROCESSOR_SANDYBRIDGE, CPU_NEHALEM, PTA_IVYBRIDGE}, - {"haswell", PROCESSOR_HASWELL, CPU_NEHALEM, PTA_HASWELL}, - {"core-avx2", PROCESSOR_HASWELL, CPU_NEHALEM, PTA_HASWELL}, - {"broadwell", PROCESSOR_HASWELL, CPU_NEHALEM, PTA_BROADWELL}, - {"skylake", PROCESSOR_HASWELL, CPU_NEHALEM, PTA_SKYLAKE}, + {"haswell", PROCESSOR_HASWELL, CPU_HASWELL, PTA_HASWELL}, + {"core-avx2", PROCESSOR_HASWELL, CPU_HASWELL, PTA_HASWELL}, + {"broadwell", PROCESSOR_HASWELL, CPU_HASWELL, PTA_BROADWELL}, + {"skylake", PROCESSOR_HASWELL, CPU_HASWELL, PTA_SKYLAKE}, {"bonnell", PROCESSOR_BONNELL, CPU_ATOM, PTA_BONNELL}, {"atom", PROCESSOR_BONNELL, CPU_ATOM, PTA_BONNELL}, {"silvermont", PROCESSOR_SILVERMONT, CPU_SLM, PTA_SILVERMONT}, @@ -51732,7 +51732,7 @@ ix86_reassociation_width (unsigned int, machine_mode mode) if (INTEGRAL_MODE_P (mode) && TARGET_REASSOC_INT_TO_PARALLEL) return 2; else if (FLOAT_MODE_P (mode) && TARGET_REASSOC_FP_TO_PARALLEL) - return 2; + return ((TARGET_64BIT && ix86_tune == PROCESSOR_HASWELL)? 4 : 2); else return 1; } diff --git a/gcc/config/i386/i386.md b/gcc/config/i386/i386.md index 7017913afe2..7808705d49c 100644 --- a/gcc/config/i386/i386.md +++ b/gcc/config/i386/i386.md @@ -407,8 +407,8 @@ ;; Processor type. (define_attr "cpu" "none,pentium,pentiumpro,geode,k6,athlon,k8,core2,nehalem, - atom,slm,generic,amdfam10,bdver1,bdver2,bdver3,bdver4, - btver2" + atom,slm,haswell,generic,amdfam10,bdver1,bdver2,bdver3, + bdver4,btver2" (const (symbol_ref "ix86_schedule"))) ;; A basic instruction type. Refinements due to arguments to be @@ -1166,6 +1166,7 @@ (include "atom.md") (include "slm.md") (include "core2.md") +(include "haswell.md") ;; Operand and operator predicates and constraints diff --git a/gcc/config/lm32/lm32.h b/gcc/config/lm32/lm32.h index 986383f91b0..c65538adcd8 100644 --- a/gcc/config/lm32/lm32.h +++ b/gcc/config/lm32/lm32.h @@ -302,8 +302,6 @@ enum reg_class LM32_NUM_INTS ((MODE) == BLKmode ? \ int_size_in_bytes (TYPE) : GET_MODE_SIZE (MODE)) -#define STRUCT_VALUE 0 - /*---------------------------*/ /* Function entry and exit. */ /*---------------------------*/ diff --git a/gcc/config/mep/mep.h b/gcc/config/mep/mep.h index f7322cb7c56..4d335b05adf 100644 --- a/gcc/config/mep/mep.h +++ b/gcc/config/mep/mep.h @@ -499,8 +499,6 @@ typedef struct #define DEFAULT_PCC_STRUCT_RETURN 0 -#define STRUCT_VALUE 0 - #define FUNCTION_OK_FOR_SIBCALL(DECL) mep_function_ok_for_sibcall(DECL) /* Prologue and epilogues are all handled via RTL. */ diff --git a/gcc/config/pa/pa.c b/gcc/config/pa/pa.c index 7cf540c7785..e16e58d14ef 100644 --- a/gcc/config/pa/pa.c +++ b/gcc/config/pa/pa.c @@ -2464,6 +2464,7 @@ pa_output_move_double (rtx *operands) enum { REGOP, OFFSOP, MEMOP, CNSTOP, RNDOP } optype0, optype1; rtx latehalf[2]; rtx addreg0 = 0, addreg1 = 0; + int highonly = 0; /* First classify both operands. */ @@ -2674,7 +2675,14 @@ pa_output_move_double (rtx *operands) else if (optype1 == OFFSOP) latehalf[1] = adjust_address_nv (operands[1], SImode, 4); else if (optype1 == CNSTOP) - split_double (operands[1], &operands[1], &latehalf[1]); + { + if (GET_CODE (operands[1]) == HIGH) + { + operands[1] = XEXP (operands[1], 0); + highonly = 1; + } + split_double (operands[1], &operands[1], &latehalf[1]); + } else latehalf[1] = operands[1]; @@ -2727,8 +2735,11 @@ pa_output_move_double (rtx *operands) if (addreg1) output_asm_insn ("ldo 4(%0),%0", &addreg1); - /* Do that word. */ - output_asm_insn (pa_singlemove_string (latehalf), latehalf); + /* Do high-numbered word. */ + if (highonly) + output_asm_insn ("ldil L'%1,%0", latehalf); + else + output_asm_insn (pa_singlemove_string (latehalf), latehalf); /* Undo the adds we just did. */ if (addreg0) diff --git a/gcc/config/s390/s390.c b/gcc/config/s390/s390.c index d276ab21ab3..ad3132f5cc6 100644 --- a/gcc/config/s390/s390.c +++ b/gcc/config/s390/s390.c @@ -728,7 +728,7 @@ s390_const_operand_ok (tree arg, int argnum, int op_flags, tree decl) HOST_WIDE_INT_PRINT_DEC ".." HOST_WIDE_INT_PRINT_DEC ")", argnum, decl, - -(HOST_WIDE_INT)1 << (bitwidth - 1), + -((HOST_WIDE_INT)1 << (bitwidth - 1)), ((HOST_WIDE_INT)1 << (bitwidth - 1)) - 1); return false; } diff --git a/gcc/config/sh/sh-protos.h b/gcc/config/sh/sh-protos.h index 3e4211be4de..bb7003c18cd 100644 --- a/gcc/config/sh/sh-protos.h +++ b/gcc/config/sh/sh-protos.h @@ -192,18 +192,19 @@ sh_find_set_of_reg (rtx reg, rtx_insn* insn, F stepfunc, if (!REG_P (reg) || insn == NULL_RTX) return result; - rtx_insn* previnsn = insn; - - for (result.insn = stepfunc (insn); result.insn != NULL_RTX; - previnsn = result.insn, result.insn = stepfunc (result.insn)) + for (rtx_insn* i = stepfunc (insn); i != NULL_RTX; i = stepfunc (i)) { - if (BARRIER_P (result.insn)) + if (BARRIER_P (i)) break; - if (!NONJUMP_INSN_P (result.insn)) - continue; - if (reg_set_p (reg, result.insn)) + if (!INSN_P (i) || DEBUG_INSN_P (i)) + continue; + if (reg_set_p (reg, i)) { - result.set_rtx = set_of (reg, result.insn); + if (CALL_P (i)) + break; + + result.insn = i; + result.set_rtx = set_of (reg, i); if (result.set_rtx == NULL_RTX || GET_CODE (result.set_rtx) != SET) break; @@ -226,12 +227,6 @@ sh_find_set_of_reg (rtx reg, rtx_insn* insn, F stepfunc, } } - /* If the loop above stopped at the first insn in the list, - result.insn will be null. Use the insn from the previous iteration - in this case. */ - if (result.insn == NULL) - result.insn = previnsn; - if (result.set_src != NULL) gcc_assert (result.insn != NULL && result.set_rtx != NULL); diff --git a/gcc/config/sparc/predicates.md b/gcc/config/sparc/predicates.md index 88537c64f0c..aa45f8ec66f 100644 --- a/gcc/config/sparc/predicates.md +++ b/gcc/config/sparc/predicates.md @@ -27,31 +27,9 @@ ;; Return true if the integer representation of OP is ;; all-ones. (define_predicate "const_all_ones_operand" - (match_code "const_int,const_double,const_vector") -{ - if (GET_CODE (op) == CONST_INT && INTVAL (op) == -1) - return true; -#if HOST_BITS_PER_WIDE_INT == 32 - if (GET_CODE (op) == CONST_DOUBLE - && GET_MODE (op) == VOIDmode - && CONST_DOUBLE_HIGH (op) == ~(HOST_WIDE_INT)0 - && CONST_DOUBLE_LOW (op) == ~(HOST_WIDE_INT)0) - return true; -#endif - if (GET_CODE (op) == CONST_VECTOR) - { - int i, num_elem = CONST_VECTOR_NUNITS (op); - - for (i = 0; i < num_elem; i++) - { - rtx n = CONST_VECTOR_ELT (op, i); - if (! const_all_ones_operand (n, mode)) - return false; - } - return true; - } - return false; -}) + (and (match_code "const_int,const_double,const_vector") + (match_test "INTEGRAL_MODE_P (GET_MODE (op))") + (match_test "op == CONSTM1_RTX (GET_MODE (op))"))) ;; Return true if OP is the integer constant 4096. (define_predicate "const_4096_operand" diff --git a/gcc/config/visium/t-visium b/gcc/config/visium/t-visium index e06141c349b..46234f05b78 100644 --- a/gcc/config/visium/t-visium +++ b/gcc/config/visium/t-visium @@ -17,5 +17,7 @@ # along with GCC; see the file COPYING3. If not see # <http://www.gnu.org/licenses/>. -MULTILIB_OPTIONS = mcpu=gr6 -MULTILIB_DIRNAMES = gr6 +# The compiler defaults to -mcpu=gr5 but this may be overridden via --with-cpu +# at configure time so the -mcpu setting must be symmetrical. +MULTILIB_OPTIONS = mcpu=gr5/mcpu=gr6 +MULTILIB_DIRNAMES = gr5 gr6 diff --git a/gcc/config/visium/visium.h b/gcc/config/visium/visium.h index a2ab61c540b..c5b65b4be34 100644 --- a/gcc/config/visium/visium.h +++ b/gcc/config/visium/visium.h @@ -1075,14 +1075,6 @@ struct visium_args If not defined, this defaults to the value 1. */ #define DEFAULT_PCC_STRUCT_RETURN 0 -/* `STRUCT_VALUE' - - If the structure value address is not passed in a register, define - `STRUCT_VALUE' as an expression returning an RTX for the place - where the address is passed. If it returns 0, the address is - passed as an "invisible" first argument. */ -#define STRUCT_VALUE 0 - /* Caller-Saves Register Allocation If you enable it, GNU CC can save registers around function calls. @@ -1735,3 +1727,19 @@ extern int visium_indent_opcode; visium_indent_opcode = 0; \ } \ } while (0) + +/* Configure-time default values for common options. */ +#define OPTION_DEFAULT_SPECS { "cpu", "%{!mcpu=*:-mcpu=%(VALUE)}" } + +/* Values of TARGET_CPU_DEFAULT specified via --with-cpu. */ +#define TARGET_CPU_gr5 0 +#define TARGET_CPU_gr6 1 + +/* Default -mcpu multilib for above values. */ +#if TARGET_CPU_DEFAULT == TARGET_CPU_gr5 +#define MULTILIB_DEFAULTS { "mcpu=gr5" } +#elif TARGET_CPU_DEFAULT == TARGET_CPU_gr6 +#define MULTILIB_DEFAULTS { "mcpu=gr6" } +#else +#error Unrecognized value in TARGET_CPU_DEFAULT +#endif diff --git a/gcc/config/xtensa/xtensa.c b/gcc/config/xtensa/xtensa.c index 1910061fdb5..1e1ac6baebd 100644 --- a/gcc/config/xtensa/xtensa.c +++ b/gcc/config/xtensa/xtensa.c @@ -1874,23 +1874,23 @@ xtensa_tls_module_base (void) static rtx_insn * xtensa_call_tls_desc (rtx sym, rtx *retp) { - rtx fn, arg, a10; + rtx fn, arg, a_io; rtx_insn *call_insn, *insns; start_sequence (); fn = gen_reg_rtx (Pmode); arg = gen_reg_rtx (Pmode); - a10 = gen_rtx_REG (Pmode, 10); + a_io = gen_rtx_REG (Pmode, WINDOW_SIZE + 2); emit_insn (gen_tls_func (fn, sym)); emit_insn (gen_tls_arg (arg, sym)); - emit_move_insn (a10, arg); - call_insn = emit_call_insn (gen_tls_call (a10, fn, sym, const1_rtx)); - use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), a10); + emit_move_insn (a_io, arg); + call_insn = emit_call_insn (gen_tls_call (a_io, fn, sym, const1_rtx)); + use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), a_io); insns = get_insns (); end_sequence (); - *retp = a10; + *retp = a_io; return insns; } diff --git a/gcc/config/xtensa/xtensa.h b/gcc/config/xtensa/xtensa.h index ebc8792f90a..01d93e98b6c 100644 --- a/gcc/config/xtensa/xtensa.h +++ b/gcc/config/xtensa/xtensa.h @@ -813,7 +813,9 @@ typedef struct xtensa_args for debugging. */ #define INCOMING_RETURN_ADDR_RTX gen_rtx_REG (Pmode, 0) #define DWARF_FRAME_RETURN_COLUMN DWARF_FRAME_REGNUM (0) -#define DWARF_FRAME_REGISTERS 16 +#define DWARF_ALT_FRAME_RETURN_COLUMN 16 +#define DWARF_FRAME_REGISTERS (DWARF_ALT_FRAME_RETURN_COLUMN \ + + (TARGET_WINDOWED_ABI ? 0 : 1)) #define EH_RETURN_DATA_REGNO(N) ((N) < 2 ? (N) + 2 : INVALID_REGNUM) #define ASM_PREFERRED_EH_DATA_FORMAT(CODE, GLOBAL) \ (flag_pic \ diff --git a/gcc/config/xtensa/xtensa.md b/gcc/config/xtensa/xtensa.md index 3a3a9029a31..a4228da2bb4 100644 --- a/gcc/config/xtensa/xtensa.md +++ b/gcc/config/xtensa/xtensa.md @@ -1883,7 +1883,12 @@ UNSPEC_TLS_CALL)) (match_operand 3 "" "i")))] "TARGET_THREADPTR && HAVE_AS_TLS" - "callx8.tls %1, %2@TLSCALL" +{ + if (TARGET_WINDOWED_ABI) + return "callx8.tls %1, %2@TLSCALL"; + else + return "callx0.tls %1, %2@TLSCALL"; +} [(set_attr "type" "call") (set_attr "mode" "none") (set_attr "length" "3")]) diff --git a/gcc/configure b/gcc/configure index 846c996342f..6fb11a7c407 100755 --- a/gcc/configure +++ b/gcc/configure @@ -23977,7 +23977,7 @@ foo: .long 25 tls_first_minor=14 tls_as_opt="-m64 -Aesame --fatal-warnings" ;; - sh-*-* | sh[34]-*-*) + sh-*-* | sh[123456789lbe]*-*-*) conftest_s=' .section ".tdata","awT",@progbits foo: .long 25 diff --git a/gcc/configure.ac b/gcc/configure.ac index 34c43d54228..a6e078a998a 100644 --- a/gcc/configure.ac +++ b/gcc/configure.ac @@ -3325,7 +3325,7 @@ foo: .long 25 tls_first_minor=14 tls_as_opt="-m64 -Aesame --fatal-warnings" ;; - sh-*-* | sh[34]-*-*) + sh-*-* | sh[123456789lbe]*-*-*) conftest_s=' .section ".tdata","awT",@progbits foo: .long 25 diff --git a/gcc/coretypes.h b/gcc/coretypes.h index 17e2b40c825..41bb58e7a4b 100644 --- a/gcc/coretypes.h +++ b/gcc/coretypes.h @@ -225,9 +225,16 @@ struct basic_block_def; typedef struct basic_block_def *basic_block; typedef const struct basic_block_def *const_basic_block; -#define obstack_chunk_alloc xmalloc -#define obstack_chunk_free free -#define OBSTACK_CHUNK_SIZE 0 +#if !defined (GENERATOR_FILE) +# define OBSTACK_CHUNK_SIZE memory_block_pool::block_size +# define obstack_chunk_alloc mempool_obstack_chunk_alloc +# define obstack_chunk_free mempool_obstack_chunk_free +#else +# define OBSTACK_CHUNK_SIZE 0 +# define obstack_chunk_alloc xmalloc +# define obstack_chunk_free free +#endif + #define gcc_obstack_init(OBSTACK) \ obstack_specify_allocation ((OBSTACK), OBSTACK_CHUNK_SIZE, 0, \ obstack_chunk_alloc, \ @@ -328,6 +335,7 @@ typedef unsigned char uchar; #include "hash-set.h" #include "input.h" #include "is-a.h" +#include "memory-block.h" #endif /* GENERATOR_FILE && !USED_FOR_TARGET */ #endif /* coretypes.h */ diff --git a/gcc/cp/ChangeLog b/gcc/cp/ChangeLog index a9952fcae75..c264f489f80 100644 --- a/gcc/cp/ChangeLog +++ b/gcc/cp/ChangeLog @@ -1,3 +1,21 @@ +2015-09-14 Jason Merrill <jason@redhat.com> + + PR c++/44282 + * mangle.c (write_CV_qualifiers_for_type): Also warn about regparm + mangling with lower -fabi-version. + +2015-09-14 Paolo Carlini <paolo.carlini@oracle.com> + + PR c++/51911 + * parser.c (cp_parser_new_expression): Enforce 5.3.4/2 (as amended + per the spirit of DR 1467). + +2015-09-11 Mark Wielaard <mjw@redhat.com> + + PR c/28901 + * cp-objcp-common.c (cxx_warn_unused_global_decl): Remove hard-coded + VAR_P TREE_READONLY override. + 2015-09-10 Paolo Carlini <paolo.carlini@oracle.com> PR c++/67318 diff --git a/gcc/cp/cp-objcp-common.c b/gcc/cp/cp-objcp-common.c index 2cab89c1552..808defdb986 100644 --- a/gcc/cp/cp-objcp-common.c +++ b/gcc/cp/cp-objcp-common.c @@ -62,10 +62,6 @@ cxx_warn_unused_global_decl (const_tree decl) if (DECL_IN_SYSTEM_HEADER (decl)) return false; - /* Const variables take the place of #defines in C++. */ - if (VAR_P (decl) && TREE_READONLY (decl)) - return false; - return true; } diff --git a/gcc/cp/mangle.c b/gcc/cp/mangle.c index 4518f20cc8e..d87ae30f051 100644 --- a/gcc/cp/mangle.c +++ b/gcc/cp/mangle.c @@ -2196,7 +2196,7 @@ write_CV_qualifiers_for_type (const tree type) We don't do this with classes and enums because their attributes are part of their definitions, not something added on. */ - if (abi_version_at_least (10) && !OVERLOAD_TYPE_P (type)) + if (!OVERLOAD_TYPE_P (type)) { auto_vec<tree> vec; for (tree a = TYPE_ATTRIBUTES (type); a; a = TREE_CHAIN (a)) @@ -2207,31 +2207,34 @@ write_CV_qualifiers_for_type (const tree type) && !is_attribute_p ("abi_tag", name)) vec.safe_push (a); } - vec.qsort (attr_strcmp); - while (!vec.is_empty()) + if (abi_version_crosses (10) && !vec.is_empty ()) + G.need_abi_warning = true; + if (abi_version_at_least (10)) { - tree a = vec.pop(); - const attribute_spec *as - = lookup_attribute_spec (get_attribute_name (a)); - - write_char ('U'); - write_unsigned_number (strlen (as->name)); - write_string (as->name); - if (TREE_VALUE (a)) + vec.qsort (attr_strcmp); + while (!vec.is_empty()) { - write_char ('I'); - for (tree args = TREE_VALUE (a); args; - args = TREE_CHAIN (args)) + tree a = vec.pop(); + const attribute_spec *as + = lookup_attribute_spec (get_attribute_name (a)); + + write_char ('U'); + write_unsigned_number (strlen (as->name)); + write_string (as->name); + if (TREE_VALUE (a)) { - tree arg = TREE_VALUE (args); - write_template_arg (arg); + write_char ('I'); + for (tree args = TREE_VALUE (a); args; + args = TREE_CHAIN (args)) + { + tree arg = TREE_VALUE (args); + write_template_arg (arg); + } + write_char ('E'); } - write_char ('E'); - } - ++num_qualifiers; - if (abi_version_crosses (10)) - G.need_abi_warning = true; + ++num_qualifiers; + } } } diff --git a/gcc/cp/parser.c b/gcc/cp/parser.c index 2e8e34eaf5e..ab6d7f1f2ba 100644 --- a/gcc/cp/parser.c +++ b/gcc/cp/parser.c @@ -7591,8 +7591,9 @@ cp_parser_new_expression (cp_parser* parser) type = cp_parser_new_type_id (parser, &nelts); /* If the next token is a `(' or '{', then we have a new-initializer. */ - if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN) - || cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE)) + cp_token *token = cp_lexer_peek_token (parser->lexer); + if (token->type == CPP_OPEN_PAREN + || token->type == CPP_OPEN_BRACE) initializer = cp_parser_new_initializer (parser); else initializer = NULL; @@ -7601,6 +7602,21 @@ cp_parser_new_expression (cp_parser* parser) expression. */ if (cp_parser_non_integral_constant_expression (parser, NIC_NEW)) ret = error_mark_node; + /* 5.3.4/2: "If the auto type-specifier appears in the type-specifier-seq + of a new-type-id or type-id of a new-expression, the new-expression shall + contain a new-initializer of the form ( assignment-expression )". + Additionally, consistently with the spirit of DR 1467, we want to accept + 'new auto { 2 }' too. */ + else if (type_uses_auto (type) + && (vec_safe_length (initializer) != 1 + || (BRACE_ENCLOSED_INITIALIZER_P ((*initializer)[0]) + && CONSTRUCTOR_NELTS ((*initializer)[0]) != 1))) + { + error_at (token->location, + "initialization of new-expression for type %<auto%> " + "requires exactly one element"); + ret = error_mark_node; + } else { /* Create a representation of the new-expression. */ diff --git a/gcc/cselib.c b/gcc/cselib.c index 214995927f5..4264394b072 100644 --- a/gcc/cselib.c +++ b/gcc/cselib.c @@ -246,11 +246,11 @@ static unsigned int cfa_base_preserved_regno = INVALID_REGNUM; each time memory is invalidated. */ static cselib_val *first_containing_mem = &dummy_val; -static object_allocator<elt_list> elt_list_pool ("elt_list", 10); -static object_allocator<elt_loc_list> elt_loc_list_pool ("elt_loc_list", 10); -static object_allocator<cselib_val> cselib_val_pool ("cselib_val_list", 10); +static object_allocator<elt_list> elt_list_pool ("elt_list"); +static object_allocator<elt_loc_list> elt_loc_list_pool ("elt_loc_list"); +static object_allocator<cselib_val> cselib_val_pool ("cselib_val_list"); -static pool_allocator value_pool ("value", 100, RTX_CODE_SIZE (VALUE)); +static pool_allocator value_pool ("value", RTX_CODE_SIZE (VALUE)); /* If nonnull, cselib will call this function before freeing useless VALUEs. A VALUE is deemed useless if its "locs" field is null. */ diff --git a/gcc/defaults.h b/gcc/defaults.h index d4d3a565c17..9ca9df2e117 100644 --- a/gcc/defaults.h +++ b/gcc/defaults.h @@ -1406,9 +1406,11 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see #define STACK_OLD_CHECK_PROTECT STACK_CHECK_PROTECT #else #define STACK_OLD_CHECK_PROTECT \ - (targetm_common.except_unwind_info (&global_options) == UI_SJLJ \ + (!global_options.x_flag_exceptions \ ? 75 * UNITS_PER_WORD \ - : 8 * 1024) + : targetm_common.except_unwind_info (&global_options) == UI_SJLJ \ + ? 4 * 1024 \ + : 8 * 1024) #endif /* Minimum amount of stack required to recover from an anticipated stack @@ -1416,9 +1418,11 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see of stack required to propagate an exception. */ #ifndef STACK_CHECK_PROTECT #define STACK_CHECK_PROTECT \ - (targetm_common.except_unwind_info (&global_options) == UI_SJLJ \ - ? 75 * UNITS_PER_WORD \ - : 12 * 1024) + (!global_options.x_flag_exceptions \ + ? 4 * 1024 \ + : targetm_common.except_unwind_info (&global_options) == UI_SJLJ \ + ? 8 * 1024 \ + : 12 * 1024) #endif /* Make the maximum frame size be the largest we can and still only need diff --git a/gcc/df-problems.c b/gcc/df-problems.c index d4b5d76662e..0ab533fe0b1 100644 --- a/gcc/df-problems.c +++ b/gcc/df-problems.c @@ -1997,8 +1997,7 @@ static void df_chain_alloc (bitmap all_blocks ATTRIBUTE_UNUSED) { df_chain_remove_problem (); - df_chain->block_pool = new object_allocator<df_link> ("df_chain_block pool", - 50); + df_chain->block_pool = new object_allocator<df_link> ("df_chain_block pool"); df_chain->optional_p = true; } diff --git a/gcc/df-scan.c b/gcc/df-scan.c index 259c9591307..eea93df1a31 100644 --- a/gcc/df-scan.c +++ b/gcc/df-scan.c @@ -133,8 +133,6 @@ static const unsigned int copy_all = copy_defs | copy_uses | copy_eq_uses it gets run. It also has no need for the iterative solver. ----------------------------------------------------------------------------*/ -#define SCAN_PROBLEM_DATA_BLOCK_SIZE 512 - /* Problem data for the scanning dataflow function. */ struct df_scan_problem_data { @@ -253,17 +251,17 @@ df_scan_alloc (bitmap all_blocks ATTRIBUTE_UNUSED) df_scan->computed = true; problem_data->ref_base_pool = new object_allocator<df_base_ref> - ("df_scan ref base", SCAN_PROBLEM_DATA_BLOCK_SIZE); + ("df_scan ref base"); problem_data->ref_artificial_pool = new object_allocator<df_artificial_ref> - ("df_scan ref artificial", SCAN_PROBLEM_DATA_BLOCK_SIZE); + ("df_scan ref artificial"); problem_data->ref_regular_pool = new object_allocator<df_regular_ref> - ("df_scan ref regular", SCAN_PROBLEM_DATA_BLOCK_SIZE); + ("df_scan ref regular"); problem_data->insn_pool = new object_allocator<df_insn_info> - ("df_scan insn", SCAN_PROBLEM_DATA_BLOCK_SIZE); + ("df_scan insn"); problem_data->reg_pool = new object_allocator<df_reg_info> - ("df_scan reg", SCAN_PROBLEM_DATA_BLOCK_SIZE); + ("df_scan reg"); problem_data->mw_reg_pool = new object_allocator<df_mw_hardreg> - ("df_scan mw_reg", SCAN_PROBLEM_DATA_BLOCK_SIZE / 16); + ("df_scan mw_reg"); bitmap_obstack_initialize (&problem_data->reg_bitmaps); bitmap_obstack_initialize (&problem_data->insn_bitmaps); diff --git a/gcc/diagnostic.c b/gcc/diagnostic.c index 01a8e35d73b..a1ffe72ea95 100644 --- a/gcc/diagnostic.c +++ b/gcc/diagnostic.c @@ -137,7 +137,6 @@ diagnostic_initialize (diagnostic_context *context, int n_opts) new (context->printer) pretty_printer (); memset (context->diagnostic_count, 0, sizeof context->diagnostic_count); - context->some_warnings_are_errors = false; context->warning_as_error_requested = false; context->n_opts = n_opts; context->classify_diagnostic = XNEWVEC (diagnostic_t, n_opts); @@ -204,7 +203,7 @@ void diagnostic_finish (diagnostic_context *context) { /* Some of the errors may actually have been warnings. */ - if (context->some_warnings_are_errors) + if (diagnostic_kind_count (context, DK_WERROR)) { /* -Werror was given. */ if (context->warning_as_error_requested) @@ -378,7 +377,8 @@ diagnostic_print_caret_line (diagnostic_context * context, int cmax = MAX (xloc1.column, xloc2.column); int line_width; - const char *line = location_get_source_line (xloc1, &line_width); + const char *line = location_get_source_line (xloc1.file, xloc1.line, + &line_width); if (line == NULL || cmax > line_width) return; @@ -861,9 +861,6 @@ diagnostic_report_diagnostic (diagnostic_context *context, return false; } - if (orig_diag_kind == DK_WARNING && diagnostic->kind == DK_ERROR) - context->some_warnings_are_errors = true; - context->lock++; if (diagnostic->kind == DK_ICE || diagnostic->kind == DK_ICE_NOBT) diff --git a/gcc/diagnostic.h b/gcc/diagnostic.h index 1b9b7d42865..7fcb6a8cd0e 100644 --- a/gcc/diagnostic.h +++ b/gcc/diagnostic.h @@ -66,10 +66,6 @@ struct diagnostic_context /* The number of times we have issued diagnostics. */ int diagnostic_count[DK_LAST_DIAGNOSTIC_KIND]; - /* True if we should display the "warnings are being tread as error" - message, usually displayed once per compiler run. */ - bool some_warnings_are_errors; - /* True if it has been requested that warnings be treated as errors. */ bool warning_as_error_requested; diff --git a/gcc/doc/install.texi b/gcc/doc/install.texi index c18dbdd9a74..1fd773e8e20 100644 --- a/gcc/doc/install.texi +++ b/gcc/doc/install.texi @@ -566,7 +566,7 @@ components of the binutils you intend to build alongside the compiler Likewise the GMP, MPFR and MPC libraries can be automatically built together with GCC. You may simply run the -./contrib/download_prerequisites script in the GCC source directory +@command{contrib/download_prerequisites} script in the GCC source directory to set up everything. Otherwise unpack the GMP, MPFR and/or MPC source distributions in the directory containing the GCC sources and rename diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi index 25775cf9305..b12d9d56feb 100644 --- a/gcc/doc/invoke.texi +++ b/gcc/doc/invoke.texi @@ -290,6 +290,7 @@ Objective-C and Objective-C++ Dialects}. -Wunsuffixed-float-constants -Wunused -Wunused-function @gol -Wunused-label -Wunused-local-typedefs -Wunused-parameter @gol -Wno-unused-result -Wunused-value @gol -Wunused-variable @gol +-Wunused-const-variable @gol -Wunused-but-set-parameter -Wunused-but-set-variable @gol -Wuseless-cast -Wvariadic-macros -Wvector-operation-performance @gol -Wvla -Wvolatile-register-var -Wwrite-strings @gol @@ -4147,9 +4148,20 @@ its return value. The default is @option{-Wunused-result}. @item -Wunused-variable @opindex Wunused-variable @opindex Wno-unused-variable -Warn whenever a local variable or non-constant static variable is unused -aside from its declaration. -This warning is enabled by @option{-Wall}. +Warn whenever a local or static variable is unused aside from its +declaration. This option implies @option{-Wunused-const-variable} for C, +but not for C++. This warning is enabled by @option{-Wall}. + +To suppress this warning use the @code{unused} attribute +(@pxref{Variable Attributes}). + +@item -Wunused-const-variable +@opindex Wunused-const-variable +@opindex Wno-unused-const-variable +Warn whenever a constant static variable is unused aside from its declaration. +This warning is enabled by @option{-Wunused-variable} for C, but not for C++. +In C++ this is normally not an error since const variables take the place of +@code{#define}s in C++. To suppress this warning use the @code{unused} attribute (@pxref{Variable Attributes}). @@ -12442,6 +12454,14 @@ for @var{string} in this option are not guaranteed to be consistent across releases. This option is only intended to be useful when developing GCC. + +@item -mpc-relative-literal-loads +@opindex mpcrelativeliteralloads +Enable PC relative literal loads. If this option is used, literal +pools are assumed to have a range of up to 1MiB and an appropriate +instruction sequence is used. This option has no impact when used +with @option{-mcmodel=tiny}. + @end table @subsubsection @option{-march} and @option{-mcpu} Feature Modifiers diff --git a/gcc/doc/match-and-simplify.texi b/gcc/doc/match-and-simplify.texi index 876483f681f..c5c2b7ec3c7 100644 --- a/gcc/doc/match-and-simplify.texi +++ b/gcc/doc/match-and-simplify.texi @@ -118,8 +118,8 @@ be a valid GIMPLE operand (so you cannot generate expressions in C code). @smallexample (simplify (trunc_mod integer_zerop@@0 @@1) - (if (!integer_zerop (@@1))) - @@0) + (if (!integer_zerop (@@1)) + @@0)) @end smallexample Here @code{@@0} captures the first operand of the trunc_mod expression @@ -130,9 +130,11 @@ can be unconstrained or capture expresions or predicates. This example introduces an optional operand of simplify, the if-expression. This condition is evaluated after the expression matched in the IL and is required to evaluate to true -to enable the replacement expression. The expression operand -of the @code{if} is a standard C expression which may contain references -to captures. +to enable the replacement expression in the second operand +position. The expression operand of the @code{if} is a standard C +expression which may contain references to captures. The @code{if} +has an optional third operand which may contain the replacement +expression that is enabled when the condition evaluates to false. A @code{if} expression can be used to specify a common condition for multiple simplify patterns, avoiding the need @@ -149,8 +151,48 @@ to repeat that multiple times: (negate @@1))) @end smallexample +Note that @code{if}s in outer position do not have the optional +else clause but instead have multiple then clauses. + Ifs can be nested. +There exists a @code{switch} expression which can be used to +chain conditions avoiding nesting @code{if}s too much: + +@smallexample +(simplify + (simple_comparison @@0 REAL_CST@@1) + (switch + /* a CMP (-0) -> a CMP 0 */ + (if (REAL_VALUE_MINUS_ZERO (TREE_REAL_CST (@@1))) + (cmp @@0 @{ build_real (TREE_TYPE (@@1), dconst0); @})) + /* x != NaN is always true, other ops are always false. */ + (if (REAL_VALUE_ISNAN (TREE_REAL_CST (@@1)) + && ! HONOR_SNANS (@@1)) + @{ constant_boolean_node (cmp == NE_EXPR, type); @}))) +@end smallexample + +Is equal to + +@smallexample +(simplify + (simple_comparison @@0 REAL_CST@@1) + (switch + /* a CMP (-0) -> a CMP 0 */ + (if (REAL_VALUE_MINUS_ZERO (TREE_REAL_CST (@@1))) + (cmp @@0 @{ build_real (TREE_TYPE (@@1), dconst0); @}) + /* x != NaN is always true, other ops are always false. */ + (if (REAL_VALUE_ISNAN (TREE_REAL_CST (@@1)) + && ! HONOR_SNANS (@@1)) + @{ constant_boolean_node (cmp == NE_EXPR, type); @})))) +@end smallexample + +which has the second @code{if} in the else operand of the first. +The @code{switch} expression takes @code{if} expressions as +operands (which may not have else clauses) and as a last operand +a replacement expression which should be enabled by default if +no other condition evaluated to true. + Captures can also be used for capturing results of sub-expressions. @smallexample @@ -186,20 +228,36 @@ preprocessor directives. (bit_and @@1 @@0)) @end smallexample -Here we introduce flags on match expressions. There is currently -a single flag, @code{c}, which denotes that the expression should +Here we introduce flags on match expressions. The flag used +above, @code{c}, denotes that the expression should be also matched commutated. Thus the above match expression is really the following four match expressions: +@smallexample (bit_and integral_op_p@@0 (bit_ior (bit_not @@0) @@1)) (bit_and (bit_ior (bit_not @@0) @@1) integral_op_p@@0) (bit_and integral_op_p@@0 (bit_ior @@1 (bit_not @@0))) (bit_and (bit_ior @@1 (bit_not @@0)) integral_op_p@@0) +@end smallexample Usual canonicalizations you know from GENERIC expressions are applied before matching, so for example constant operands always come second in commutative expressions. +The second supported flag is @code{s} which tells the code +generator to fail the pattern if the expression marked with +@code{s} does have more than one use. For example in + +@smallexample +(simplify + (pointer_plus (pointer_plus:s @@0 @@1) @@3) + (pointer_plus @@0 (plus @@1 @@3))) +@end smallexample + +this avoids the association if @code{(pointer_plus @@0 @@1)} is +used outside of the matched expression and thus it would stay +live and not trivially removed by dead code elimination. + More features exist to avoid too much repetition. @smallexample @@ -291,17 +349,17 @@ with a @code{?}: @smallexample (simplify - (eq (convert@@0 @@1) (convert? @@2)) + (eq (convert@@0 @@1) (convert@? @@2)) (eq @@1 (convert @@2))) @end smallexample which will match both @code{(eq (convert @@1) (convert @@2))} and @code{(eq (convert @@1) @@2)}. The optional converts are supposed to be all either present or not, thus -@code{(eq (convert? @@1) (convert? @@2))} will result in two +@code{(eq (convert@? @@1) (convert@? @@2))} will result in two patterns only. If you want to match all four combinations you have access to two additional conditional converts as in -@code{(eq (convert1? @@1) (convert2? @@2))}. +@code{(eq (convert1@? @@1) (convert2@? @@2))}. Predicates available from the GCC middle-end need to be made available explicitely via @code{define_predicates}: diff --git a/gcc/doc/tm.texi b/gcc/doc/tm.texi index d548d96b234..65fb8656a18 100644 --- a/gcc/doc/tm.texi +++ b/gcc/doc/tm.texi @@ -3360,10 +3360,10 @@ default value of this macro is zero. @defmac STACK_CHECK_PROTECT The number of bytes of stack needed to recover from a stack overflow, for -languages where such a recovery is supported. The default value of 75 words +languages where such a recovery is supported. The default value of 4KB/8KB with the @code{setjmp}/@code{longjmp}-based exception handling mechanism and -8192 bytes with other exception handling mechanisms should be adequate for -most machines. +8KB/12KB with other exception handling mechanisms should be adequate for most +architectures and operating systems. @end defmac The following macros are relevant only if neither STACK_CHECK_BUILTIN diff --git a/gcc/doc/tm.texi.in b/gcc/doc/tm.texi.in index 9bef4a59bed..8fe193c4f25 100644 --- a/gcc/doc/tm.texi.in +++ b/gcc/doc/tm.texi.in @@ -2946,10 +2946,10 @@ default value of this macro is zero. @defmac STACK_CHECK_PROTECT The number of bytes of stack needed to recover from a stack overflow, for -languages where such a recovery is supported. The default value of 75 words +languages where such a recovery is supported. The default value of 4KB/8KB with the @code{setjmp}/@code{longjmp}-based exception handling mechanism and -8192 bytes with other exception handling mechanisms should be adequate for -most machines. +8KB/12KB with other exception handling mechanisms should be adequate for most +architectures and operating systems. @end defmac The following macros are relevant only if neither STACK_CHECK_BUILTIN diff --git a/gcc/dse.c b/gcc/dse.c index ff26fc0cb05..bb229713f48 100644 --- a/gcc/dse.c +++ b/gcc/dse.c @@ -307,12 +307,9 @@ lowpart_bitmask (int n) return mask >> (HOST_BITS_PER_WIDE_INT - n); } -typedef struct store_info *store_info_t; -static object_allocator<store_info> cse_store_info_pool ("cse_store_info_pool", - 100); +static object_allocator<store_info> cse_store_info_pool ("cse_store_info_pool"); -static object_allocator<store_info> rtx_store_info_pool ("rtx_store_info_pool", - 100); +static object_allocator<store_info> rtx_store_info_pool ("rtx_store_info_pool"); /* This structure holds information about a load. These are only built for rtx bases. */ @@ -337,8 +334,7 @@ struct read_info_type }; typedef struct read_info_type *read_info_t; -static object_allocator<read_info_type> read_info_type_pool - ("read_info_pool", 100); +static object_allocator<read_info_type> read_info_type_pool ("read_info_pool"); /* One of these records is created for each insn. */ @@ -400,7 +396,7 @@ struct insn_info_type But it could also contain clobbers. Insns that contain more than one mem set are not deletable, but each of those mems are here in order to provide info to delete other insns. */ - store_info_t store_rec; + store_info *store_rec; /* The linked list of mem uses in this insn. Only the reads from rtx bases are listed here. The reads to cselib bases are @@ -427,8 +423,7 @@ struct insn_info_type }; typedef struct insn_info_type *insn_info_t; -static object_allocator<insn_info_type> insn_info_type_pool - ("insn_info_pool", 100); +static object_allocator<insn_info_type> insn_info_type_pool ("insn_info_pool"); /* The linked list of stores that are under consideration in this basic block. */ @@ -495,7 +490,7 @@ struct dse_bb_info_type typedef struct dse_bb_info_type *bb_info_t; static object_allocator<dse_bb_info_type> dse_bb_info_type_pool - ("bb_info_pool", 100); + ("bb_info_pool"); /* Table to hold all bb_infos. */ static bb_info_t *bb_table; @@ -564,17 +559,14 @@ struct group_info int *offset_map_n, *offset_map_p; int offset_map_size_n, offset_map_size_p; }; -typedef struct group_info *group_info_t; -typedef const struct group_info *const_group_info_t; -static object_allocator<group_info> group_info_pool - ("rtx_group_info_pool", 100); +static object_allocator<group_info> group_info_pool ("rtx_group_info_pool"); /* Index into the rtx_group_vec. */ static int rtx_group_next_id; -static vec<group_info_t> rtx_group_vec; +static vec<group_info *> rtx_group_vec; /* This structure holds the set of changes that are being deferred @@ -591,15 +583,13 @@ struct deferred_change struct deferred_change *next; }; -typedef struct deferred_change *deferred_change_t; - static object_allocator<deferred_change> deferred_change_pool - ("deferred_change_pool", 10); + ("deferred_change_pool"); -static deferred_change_t deferred_change_list = NULL; +static deferred_change *deferred_change_list = NULL; /* The group that holds all of the clear_alias_sets. */ -static group_info_t clear_alias_group; +static group_info *clear_alias_group; /* The modes of the clear_alias_sets. */ static htab_t clear_alias_mode_table; @@ -680,11 +670,11 @@ static hash_table<invariant_group_base_hasher> *rtx_group_table; /* Get the GROUP for BASE. Add a new group if it is not there. */ -static group_info_t +static group_info * get_group_info (rtx base) { struct group_info tmp_gi; - group_info_t gi; + group_info *gi; group_info **slot; if (base) @@ -693,7 +683,7 @@ get_group_info (rtx base) if necessary. */ tmp_gi.rtx_base = base; slot = rtx_group_table->find_slot (&tmp_gi, INSERT); - gi = (group_info_t) *slot; + gi = *slot; } else { @@ -790,17 +780,17 @@ dse_step0 (void) static void free_store_info (insn_info_t insn_info) { - store_info_t store_info = insn_info->store_rec; - while (store_info) + store_info *cur = insn_info->store_rec; + while (cur) { - store_info_t next = store_info->next; - if (store_info->is_large) - BITMAP_FREE (store_info->positions_needed.large.bmap); - if (store_info->cse_base) - cse_store_info_pool.remove (store_info); + store_info *next = cur->next; + if (cur->is_large) + BITMAP_FREE (cur->positions_needed.large.bmap); + if (cur->cse_base) + cse_store_info_pool.remove (cur); else - rtx_store_info_pool.remove (store_info); - store_info = next; + rtx_store_info_pool.remove (cur); + cur = next; } insn_info->cannot_delete = true; @@ -1015,7 +1005,7 @@ can_escape (tree expr) OFFSET and WIDTH. */ static void -set_usage_bits (group_info_t group, HOST_WIDE_INT offset, HOST_WIDE_INT width, +set_usage_bits (group_info *group, HOST_WIDE_INT offset, HOST_WIDE_INT width, tree expr) { HOST_WIDE_INT i; @@ -1240,7 +1230,7 @@ canon_address (rtx mem, if (ADDR_SPACE_GENERIC_P (MEM_ADDR_SPACE (mem)) && const_or_frame_p (address)) { - group_info_t group = get_group_info (address); + group_info *group = get_group_info (address); if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, " gid=%d offset=%d \n", @@ -1276,7 +1266,7 @@ clear_rhs_from_active_local_stores (void) while (ptr) { - store_info_t store_info = ptr->store_rec; + store_info *store_info = ptr->store_rec; /* Skip the clobbers. */ while (!store_info->is_set) store_info = store_info->next; @@ -1292,7 +1282,7 @@ clear_rhs_from_active_local_stores (void) /* Mark byte POS bytes from the beginning of store S_INFO as unneeded. */ static inline void -set_position_unneeded (store_info_t s_info, int pos) +set_position_unneeded (store_info *s_info, int pos) { if (__builtin_expect (s_info->is_large, false)) { @@ -1307,7 +1297,7 @@ set_position_unneeded (store_info_t s_info, int pos) /* Mark the whole store S_INFO as unneeded. */ static inline void -set_all_positions_unneeded (store_info_t s_info) +set_all_positions_unneeded (store_info *s_info) { if (__builtin_expect (s_info->is_large, false)) { @@ -1323,7 +1313,7 @@ set_all_positions_unneeded (store_info_t s_info) /* Return TRUE if any bytes from S_INFO store are needed. */ static inline bool -any_positions_needed_p (store_info_t s_info) +any_positions_needed_p (store_info *s_info) { if (__builtin_expect (s_info->is_large, false)) return (s_info->positions_needed.large.count @@ -1337,7 +1327,7 @@ any_positions_needed_p (store_info_t s_info) store are needed. */ static inline bool -all_positions_needed_p (store_info_t s_info, int start, int width) +all_positions_needed_p (store_info *s_info, int start, int width) { if (__builtin_expect (s_info->is_large, false)) { @@ -1355,7 +1345,7 @@ all_positions_needed_p (store_info_t s_info, int start, int width) } -static rtx get_stored_val (store_info_t, machine_mode, HOST_WIDE_INT, +static rtx get_stored_val (store_info *, machine_mode, HOST_WIDE_INT, HOST_WIDE_INT, basic_block, bool); @@ -1371,7 +1361,7 @@ record_store (rtx body, bb_info_t bb_info) HOST_WIDE_INT width = 0; alias_set_type spill_alias_set; insn_info_t insn_info = bb_info->last_insn; - store_info_t store_info = NULL; + store_info *store_info = NULL; int group_id; cselib_val *base = NULL; insn_info_t ptr, last, redundant_reason; @@ -1467,7 +1457,7 @@ record_store (rtx body, bb_info_t bb_info) /* In the restrictive case where the base is a constant or the frame pointer we can do global analysis. */ - group_info_t group + group_info *group = rtx_group_vec[group_id]; tree expr = MEM_EXPR (mem); @@ -1537,7 +1527,7 @@ record_store (rtx body, bb_info_t bb_info) mem_addr = base->val_rtx; else { - group_info_t group + group_info *group = rtx_group_vec[group_id]; mem_addr = group->canon_base_addr; } @@ -1552,7 +1542,7 @@ record_store (rtx body, bb_info_t bb_info) while (ptr) { insn_info_t next = ptr->next_local_store; - store_info_t s_info = ptr->store_rec; + struct store_info *s_info = ptr->store_rec; bool del = true; /* Skip the clobbers. We delete the active insn if this insn @@ -1722,7 +1712,7 @@ dump_insn_info (const char * start, insn_info_t insn_info) static rtx find_shift_sequence (int access_size, - store_info_t store_info, + store_info *store_info, machine_mode read_mode, int shift, bool speed, bool require_cst) { @@ -1854,7 +1844,7 @@ look_for_hardregs (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data) if not successful. If REQUIRE_CST is true, return always constant. */ static rtx -get_stored_val (store_info_t store_info, machine_mode read_mode, +get_stored_val (store_info *store_info, machine_mode read_mode, HOST_WIDE_INT read_begin, HOST_WIDE_INT read_end, basic_block bb, bool require_cst) { @@ -1954,7 +1944,7 @@ get_stored_val (store_info_t store_info, machine_mode read_mode, went ok. */ static bool -replace_read (store_info_t store_info, insn_info_t store_insn, +replace_read (store_info *store_info, insn_info_t store_insn, read_info_t read_info, insn_info_t read_insn, rtx *loc, bitmap regs_live) { @@ -2029,7 +2019,7 @@ replace_read (store_info_t store_info, insn_info_t store_insn, if (validate_change (read_insn->insn, loc, read_reg, 0)) { - deferred_change_t change = deferred_change_pool.allocate (); + deferred_change *change = deferred_change_pool.allocate (); /* Insert this right before the store insn where it will be safe from later insns that might change it before the read. */ @@ -2150,7 +2140,7 @@ check_mem_read_rtx (rtx *loc, bb_info_t bb_info) mem_addr = base->val_rtx; else { - group_info_t group + group_info *group = rtx_group_vec[group_id]; mem_addr = group->canon_base_addr; } @@ -2176,7 +2166,7 @@ check_mem_read_rtx (rtx *loc, bb_info_t bb_info) while (i_ptr) { - store_info_t store_info = i_ptr->store_rec; + store_info *store_info = i_ptr->store_rec; /* Skip the clobbers. */ while (!store_info->is_set) @@ -2218,7 +2208,7 @@ check_mem_read_rtx (rtx *loc, bb_info_t bb_info) while (i_ptr) { bool remove = false; - store_info_t store_info = i_ptr->store_rec; + store_info *store_info = i_ptr->store_rec; /* Skip the clobbers. */ while (!store_info->is_set) @@ -2302,7 +2292,7 @@ check_mem_read_rtx (rtx *loc, bb_info_t bb_info) while (i_ptr) { bool remove = false; - store_info_t store_info = i_ptr->store_rec; + store_info *store_info = i_ptr->store_rec; if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, " processing cselib load against insn %d\n", @@ -2532,7 +2522,7 @@ scan_insn (bb_info_t bb_info, rtx_insn *insn) /* If the frame is read, the frame related stores are killed. */ else if (insn_info->frame_read) { - store_info_t store_info = i_ptr->store_rec; + store_info *store_info = i_ptr->store_rec; /* Skip the clobbers. */ while (!store_info->is_set) @@ -2659,7 +2649,7 @@ remove_useless_values (cselib_val *base) while (insn_info) { - store_info_t store_info = insn_info->store_rec; + store_info *store_info = insn_info->store_rec; bool del = false; /* If ANY of the store_infos match the cselib group that is @@ -2756,7 +2746,7 @@ dse_step1 (void) insn_info_t i_ptr = active_local_stores; while (i_ptr) { - store_info_t store_info = i_ptr->store_rec; + store_info *store_info = i_ptr->store_rec; /* Skip the clobbers. */ while (!store_info->is_set) @@ -2766,7 +2756,7 @@ dse_step1 (void) else if (store_info->group_id >= 0) { - group_info_t group + group_info *group = rtx_group_vec[store_info->group_id]; if (group->frame_related && !i_ptr->cannot_delete) delete_dead_store_insn (i_ptr); @@ -2780,7 +2770,7 @@ dse_step1 (void) replace_read. Cselib is finished with this block. */ while (deferred_change_list) { - deferred_change_t next = deferred_change_list->next; + deferred_change *next = deferred_change_list->next; /* There is no reason to validate this change. That was done earlier. */ @@ -2797,7 +2787,7 @@ dse_step1 (void) { if (ptr->contains_cselib_groups) { - store_info_t s_info = ptr->store_rec; + store_info *s_info = ptr->store_rec; while (s_info && !s_info->is_set) s_info = s_info->next; if (s_info @@ -2818,7 +2808,7 @@ dse_step1 (void) } else { - store_info_t s_info; + store_info *s_info; /* Free at least positions_needed bitmaps. */ for (s_info = ptr->store_rec; s_info; s_info = s_info->next) @@ -2854,7 +2844,7 @@ static void dse_step2_init (void) { unsigned int i; - group_info_t group; + group_info *group; FOR_EACH_VEC_ELT (rtx_group_vec, i, group) { @@ -2905,7 +2895,7 @@ static bool dse_step2_nospill (void) { unsigned int i; - group_info_t group; + group_info *group; /* Position 0 is unused because 0 is used in the maps to mean unused. */ current_position = 1; @@ -2954,7 +2944,7 @@ dse_step2_nospill (void) there, return 0. */ static int -get_bitmap_index (group_info_t group_info, HOST_WIDE_INT offset) +get_bitmap_index (group_info *group_info, HOST_WIDE_INT offset) { if (offset < 0) { @@ -2976,12 +2966,12 @@ get_bitmap_index (group_info_t group_info, HOST_WIDE_INT offset) may be NULL. */ static void -scan_stores_nospill (store_info_t store_info, bitmap gen, bitmap kill) +scan_stores_nospill (store_info *store_info, bitmap gen, bitmap kill) { while (store_info) { HOST_WIDE_INT i; - group_info_t group_info + group_info *group_info = rtx_group_vec[store_info->group_id]; if (group_info->process_globally) for (i = store_info->begin; i < store_info->end; i++) @@ -3003,7 +2993,7 @@ scan_stores_nospill (store_info_t store_info, bitmap gen, bitmap kill) may be NULL. */ static void -scan_stores_spill (store_info_t store_info, bitmap gen, bitmap kill) +scan_stores_spill (store_info *store_info, bitmap gen, bitmap kill) { while (store_info) { @@ -3031,7 +3021,7 @@ scan_reads_nospill (insn_info_t insn_info, bitmap gen, bitmap kill) { read_info_t read_info = insn_info->read_rec; int i; - group_info_t group; + group_info *group; /* If this insn reads the frame, kill all the frame related stores. */ if (insn_info->frame_read) @@ -3239,7 +3229,7 @@ dse_step3_exit_block_scan (bb_info_t bb_info) if (stores_off_frame_dead_at_return) { unsigned int i; - group_info_t group; + group_info *group; FOR_EACH_VEC_ELT (rtx_group_vec, i, group) { @@ -3320,7 +3310,7 @@ dse_step3 (bool for_spills) if (!all_ones) { unsigned int j; - group_info_t group; + group_info *group; all_ones = BITMAP_ALLOC (&dse_bitmap_obstack); FOR_EACH_VEC_ELT (rtx_group_vec, j, group) @@ -3524,7 +3514,7 @@ dse_step5_nospill (void) && (!insn_info->cannot_delete) && (!bitmap_empty_p (v))) { - store_info_t store_info = insn_info->store_rec; + store_info *store_info = insn_info->store_rec; /* Try to delete the current insn. */ deleted = true; @@ -3538,7 +3528,7 @@ dse_step5_nospill (void) else { HOST_WIDE_INT i; - group_info_t group_info + group_info *group_info = rtx_group_vec[store_info->group_id]; for (i = store_info->begin; i < store_info->end; i++) @@ -3624,7 +3614,7 @@ dse_step6 (void) && INSN_P (insn_info->insn) && !insn_info->cannot_delete) { - store_info_t s_info = insn_info->store_rec; + store_info *s_info = insn_info->store_rec; while (s_info && !s_info->is_set) s_info = s_info->next; diff --git a/gcc/dwarf2cfi.c b/gcc/dwarf2cfi.c index 138bae045dc..19ec9a54b92 100644 --- a/gcc/dwarf2cfi.c +++ b/gcc/dwarf2cfi.c @@ -160,9 +160,6 @@ typedef struct } dw_trace_info; -typedef dw_trace_info *dw_trace_info_ref; - - /* Hashtable helpers. */ struct trace_info_hasher : nofree_ptr_hash <dw_trace_info> @@ -186,7 +183,7 @@ trace_info_hasher::equal (const dw_trace_info *a, const dw_trace_info *b) /* The variables making up the pseudo-cfg, as described above. */ static vec<dw_trace_info> trace_info; -static vec<dw_trace_info_ref> trace_work_list; +static vec<dw_trace_info *> trace_work_list; static hash_table<trace_info_hasher> *trace_index; /* A vector of call frame insns for the CIE. */ diff --git a/gcc/dwarf2out.c b/gcc/dwarf2out.c index b6ab869e0d1..7d098d8cb0e 100644 --- a/gcc/dwarf2out.c +++ b/gcc/dwarf2out.c @@ -2550,14 +2550,7 @@ const struct gcc_debug_hooks dwarf2_lineno_debug_hooks = typedef long int dw_offset; -/* Define typedefs here to avoid circular dependencies. */ - -typedef struct dw_attr_struct *dw_attr_ref; -typedef struct dw_line_info_struct *dw_line_info_ref; -typedef struct pubname_struct *pubname_ref; -typedef struct dw_ranges_struct *dw_ranges_ref; -typedef struct dw_ranges_by_label_struct *dw_ranges_by_label_ref; -typedef struct comdat_type_struct *comdat_type_node_ref; +struct comdat_type_node; /* The entries in the line_info table more-or-less mirror the opcodes that are used in the real dwarf line table. Arrays of these entries @@ -2596,7 +2589,7 @@ typedef struct GTY(()) dw_line_info_struct { } dw_line_info_entry; -typedef struct GTY(()) dw_line_info_table_struct { +struct GTY(()) dw_line_info_table { /* The label that marks the end of this section. */ const char *end_label; @@ -2610,9 +2603,7 @@ typedef struct GTY(()) dw_line_info_table_struct { bool in_use; vec<dw_line_info_entry, va_gc> *entries; -} dw_line_info_table; - -typedef dw_line_info_table *dw_line_info_table_p; +}; /* Each DIE attribute has a field specifying the attribute kind, @@ -2634,7 +2625,7 @@ typedef struct GTY((chain_circular ("%h.die_sib"), for_user)) die_struct { union die_symbol_or_type_node { const char * GTY ((tag ("0"))) die_symbol; - comdat_type_node_ref GTY ((tag ("1"))) die_type_node; + comdat_type_node *GTY ((tag ("1"))) die_type_node; } GTY ((desc ("%0.comdat_type_p"))) die_id; vec<dw_attr_node, va_gc> *die_attr; @@ -2680,7 +2671,7 @@ typedef struct GTY(()) pubname_struct { pubname_entry; -struct GTY(()) dw_ranges_struct { +struct GTY(()) dw_ranges { /* If this is positive, it's a block number, otherwise it's a bitwise-negated index into dw_ranges_by_label. */ int num; @@ -2696,21 +2687,20 @@ typedef struct GTY(()) macinfo_struct { macinfo_entry; -struct GTY(()) dw_ranges_by_label_struct { +struct GTY(()) dw_ranges_by_label { const char *begin; const char *end; }; /* The comdat type node structure. */ -typedef struct GTY(()) comdat_type_struct +struct GTY(()) comdat_type_node { dw_die_ref root_die; dw_die_ref type_die; dw_die_ref skeleton_die; char signature[DWARF_TYPE_SIGNATURE_SIZE]; - struct comdat_type_struct *next; -} -comdat_type_node; + comdat_type_node *next; +}; /* A list of DIEs for which we can't determine ancestry (parent_die field) just yet. Later in dwarf2out_finish we will fill in the @@ -2985,7 +2975,7 @@ static GTY(()) dw_line_info_table *text_section_line_info; static GTY(()) dw_line_info_table *cold_text_section_line_info; /* The set of all non-default tables of line number info. */ -static GTY(()) vec<dw_line_info_table_p, va_gc> *separate_line_info; +static GTY(()) vec<dw_line_info_table *, va_gc> *separate_line_info; /* A flag to tell pubnames/types export if there is an info section to refer to. */ @@ -3010,7 +3000,7 @@ static GTY (()) vec<macinfo_entry, va_gc> *macinfo_table; && !macinfo_table->is_empty ()) /* Array of dies for which we should generate .debug_ranges info. */ -static GTY ((length ("ranges_table_allocated"))) dw_ranges_ref ranges_table; +static GTY ((length ("ranges_table_allocated"))) dw_ranges *ranges_table; /* Number of elements currently allocated for ranges_table. */ static GTY(()) unsigned ranges_table_allocated; @@ -3020,7 +3010,7 @@ static GTY(()) unsigned ranges_table_in_use; /* Array of pairs of labels referenced in ranges_table. */ static GTY ((length ("ranges_by_label_allocated"))) - dw_ranges_by_label_ref ranges_by_label; + dw_ranges_by_label *ranges_by_label; /* Number of elements currently allocated for ranges_by_label. */ static GTY(()) unsigned ranges_by_label_allocated; @@ -3081,38 +3071,38 @@ static const char *dwarf_attr_name (unsigned); static const char *dwarf_form_name (unsigned); static tree decl_ultimate_origin (const_tree); static tree decl_class_context (tree); -static void add_dwarf_attr (dw_die_ref, dw_attr_ref); -static inline enum dw_val_class AT_class (dw_attr_ref); -static inline unsigned int AT_index (dw_attr_ref); +static void add_dwarf_attr (dw_die_ref, dw_attr_node *); +static inline enum dw_val_class AT_class (dw_attr_node *); +static inline unsigned int AT_index (dw_attr_node *); static void add_AT_flag (dw_die_ref, enum dwarf_attribute, unsigned); -static inline unsigned AT_flag (dw_attr_ref); +static inline unsigned AT_flag (dw_attr_node *); static void add_AT_int (dw_die_ref, enum dwarf_attribute, HOST_WIDE_INT); -static inline HOST_WIDE_INT AT_int (dw_attr_ref); +static inline HOST_WIDE_INT AT_int (dw_attr_node *); static void add_AT_unsigned (dw_die_ref, enum dwarf_attribute, unsigned HOST_WIDE_INT); -static inline unsigned HOST_WIDE_INT AT_unsigned (dw_attr_ref); +static inline unsigned HOST_WIDE_INT AT_unsigned (dw_attr_node *); static void add_AT_double (dw_die_ref, enum dwarf_attribute, HOST_WIDE_INT, unsigned HOST_WIDE_INT); static inline void add_AT_vec (dw_die_ref, enum dwarf_attribute, unsigned int, unsigned int, unsigned char *); static void add_AT_data8 (dw_die_ref, enum dwarf_attribute, unsigned char *); static void add_AT_string (dw_die_ref, enum dwarf_attribute, const char *); -static inline const char *AT_string (dw_attr_ref); -static enum dwarf_form AT_string_form (dw_attr_ref); +static inline const char *AT_string (dw_attr_node *); +static enum dwarf_form AT_string_form (dw_attr_node *); static void add_AT_die_ref (dw_die_ref, enum dwarf_attribute, dw_die_ref); static void add_AT_specification (dw_die_ref, dw_die_ref); -static inline dw_die_ref AT_ref (dw_attr_ref); -static inline int AT_ref_external (dw_attr_ref); -static inline void set_AT_ref_external (dw_attr_ref, int); +static inline dw_die_ref AT_ref (dw_attr_node *); +static inline int AT_ref_external (dw_attr_node *); +static inline void set_AT_ref_external (dw_attr_node *, int); static void add_AT_fde_ref (dw_die_ref, enum dwarf_attribute, unsigned); static void add_AT_loc (dw_die_ref, enum dwarf_attribute, dw_loc_descr_ref); -static inline dw_loc_descr_ref AT_loc (dw_attr_ref); +static inline dw_loc_descr_ref AT_loc (dw_attr_node *); static void add_AT_loc_list (dw_die_ref, enum dwarf_attribute, dw_loc_list_ref); -static inline dw_loc_list_ref AT_loc_list (dw_attr_ref); +static inline dw_loc_list_ref AT_loc_list (dw_attr_node *); static addr_table_entry *add_addr_table_entry (void *, enum ate_kind); static void remove_addr_table_entry (addr_table_entry *); static void add_AT_addr (dw_die_ref, enum dwarf_attribute, rtx, bool); -static inline rtx AT_addr (dw_attr_ref); +static inline rtx AT_addr (dw_attr_node *); static void add_AT_lbl_id (dw_die_ref, enum dwarf_attribute, const char *); static void add_AT_lineptr (dw_die_ref, enum dwarf_attribute, const char *); static void add_AT_macptr (dw_die_ref, enum dwarf_attribute, const char *); @@ -3120,8 +3110,8 @@ static void add_AT_offset (dw_die_ref, enum dwarf_attribute, unsigned HOST_WIDE_INT); static void add_AT_range_list (dw_die_ref, enum dwarf_attribute, unsigned long, bool); -static inline const char *AT_lbl (dw_attr_ref); -static dw_attr_ref get_AT (dw_die_ref, enum dwarf_attribute); +static inline const char *AT_lbl (dw_attr_node *); +static dw_attr_node *get_AT (dw_die_ref, enum dwarf_attribute); static const char *get_AT_low_pc (dw_die_ref); static const char *get_AT_hi_pc (dw_die_ref); static const char *get_AT_string (dw_die_ref, enum dwarf_attribute); @@ -3148,12 +3138,12 @@ static void print_die (dw_die_ref, FILE *); static dw_die_ref push_new_compile_unit (dw_die_ref, dw_die_ref); static dw_die_ref pop_compile_unit (dw_die_ref); static void loc_checksum (dw_loc_descr_ref, struct md5_ctx *); -static void attr_checksum (dw_attr_ref, struct md5_ctx *, int *); +static void attr_checksum (dw_attr_node *, struct md5_ctx *, int *); static void die_checksum (dw_die_ref, struct md5_ctx *, int *); static void checksum_sleb128 (HOST_WIDE_INT, struct md5_ctx *); static void checksum_uleb128 (unsigned HOST_WIDE_INT, struct md5_ctx *); static void loc_checksum_ordered (dw_loc_descr_ref, struct md5_ctx *); -static void attr_checksum_ordered (enum dwarf_tag, dw_attr_ref, +static void attr_checksum_ordered (enum dwarf_tag, dw_attr_node *, struct md5_ctx *, int *); struct checksum_attributes; static void collect_checksum_attributes (struct checksum_attributes *, dw_die_ref); @@ -3162,7 +3152,7 @@ static void checksum_die_context (dw_die_ref, struct md5_ctx *); static void generate_type_signature (dw_die_ref, comdat_type_node *); static int same_loc_p (dw_loc_descr_ref, dw_loc_descr_ref, int *); static int same_dw_val_p (const dw_val_node *, const dw_val_node *, int *); -static int same_attr_p (dw_attr_ref, dw_attr_ref, int *); +static int same_attr_p (dw_attr_node *, dw_attr_node *, int *); static int same_die_p (dw_die_ref, dw_die_ref, int *); static int same_die_p_wrap (dw_die_ref, dw_die_ref); static void compute_section_prefix (dw_die_ref); @@ -3198,8 +3188,8 @@ static void unmark_dies (dw_die_ref); static void unmark_all_dies (dw_die_ref); static unsigned long size_of_pubnames (vec<pubname_entry, va_gc> *); static unsigned long size_of_aranges (void); -static enum dwarf_form value_format (dw_attr_ref); -static void output_value_format (dw_attr_ref); +static enum dwarf_form value_format (dw_attr_node *); +static void output_value_format (dw_attr_node *); static void output_abbrev_section (void); static void output_die_abbrevs (unsigned long, dw_die_ref); static void output_die_symbol (dw_die_ref); @@ -3357,8 +3347,8 @@ static void prune_unused_types_walk_attribs (dw_die_ref); static void prune_unused_types_prune (dw_die_ref); static void prune_unused_types (void); static int maybe_emit_file (struct dwarf_file_data *fd); -static inline const char *AT_vms_delta1 (dw_attr_ref); -static inline const char *AT_vms_delta2 (dw_attr_ref); +static inline const char *AT_vms_delta1 (dw_attr_node *); +static inline const char *AT_vms_delta2 (dw_attr_node *); static inline void add_AT_vms_delta (dw_die_ref, enum dwarf_attribute, const char *, const char *); static void append_entry_to_tmpl_value_parm_die_table (dw_die_ref, tree); @@ -3800,7 +3790,7 @@ decl_class_context (tree decl) /* Add an attribute/value pair to a DIE. */ static inline void -add_dwarf_attr (dw_die_ref die, dw_attr_ref attr) +add_dwarf_attr (dw_die_ref die, dw_attr_node *attr) { /* Maybe this should be an assert? */ if (die == NULL) @@ -3811,7 +3801,7 @@ add_dwarf_attr (dw_die_ref die, dw_attr_ref attr) } static inline enum dw_val_class -AT_class (dw_attr_ref a) +AT_class (dw_attr_node *a) { return a->dw_attr_val.val_class; } @@ -3822,7 +3812,7 @@ AT_class (dw_attr_ref a) pruning. */ static inline unsigned int -AT_index (dw_attr_ref a) +AT_index (dw_attr_node *a) { if (AT_class (a) == dw_val_class_str) return a->dw_attr_val.v.val_str->index; @@ -3846,7 +3836,7 @@ add_AT_flag (dw_die_ref die, enum dwarf_attribute attr_kind, unsigned int flag) } static inline unsigned -AT_flag (dw_attr_ref a) +AT_flag (dw_attr_node *a) { gcc_assert (a && AT_class (a) == dw_val_class_flag); return a->dw_attr_val.v.val_flag; @@ -3867,7 +3857,7 @@ add_AT_int (dw_die_ref die, enum dwarf_attribute attr_kind, HOST_WIDE_INT int_va } static inline HOST_WIDE_INT -AT_int (dw_attr_ref a) +AT_int (dw_attr_node *a) { gcc_assert (a && AT_class (a) == dw_val_class_const); return a->dw_attr_val.v.val_int; @@ -3889,7 +3879,7 @@ add_AT_unsigned (dw_die_ref die, enum dwarf_attribute attr_kind, } static inline unsigned HOST_WIDE_INT -AT_unsigned (dw_attr_ref a) +AT_unsigned (dw_attr_node *a) { gcc_assert (a && AT_class (a) == dw_val_class_unsigned_const); return a->dw_attr_val.v.val_unsigned; @@ -4064,7 +4054,7 @@ add_AT_string (dw_die_ref die, enum dwarf_attribute attr_kind, const char *str) } static inline const char * -AT_string (dw_attr_ref a) +AT_string (dw_attr_node *a) { gcc_assert (a && AT_class (a) == dw_val_class_str); return a->dw_attr_val.v.val_str->str; @@ -4134,7 +4124,7 @@ find_string_form (struct indirect_string_node *node) output inline in DIE or out-of-line in .debug_str section. */ static enum dwarf_form -AT_string_form (dw_attr_ref a) +AT_string_form (dw_attr_node *a) { gcc_assert (a && AT_class (a) == dw_val_class_str); return find_string_form (a->dw_attr_val.v.val_str); @@ -4167,7 +4157,7 @@ add_AT_die_ref (dw_die_ref die, enum dwarf_attribute attr_kind, dw_die_ref targ_ /* Change DIE reference REF to point to NEW_DIE instead. */ static inline void -change_AT_die_ref (dw_attr_ref ref, dw_die_ref new_die) +change_AT_die_ref (dw_attr_node *ref, dw_die_ref new_die) { gcc_assert (ref->dw_attr_val.val_class == dw_val_class_die_ref); ref->dw_attr_val.v.val_die_ref.die = new_die; @@ -4186,14 +4176,14 @@ add_AT_specification (dw_die_ref die, dw_die_ref targ_die) } static inline dw_die_ref -AT_ref (dw_attr_ref a) +AT_ref (dw_attr_node *a) { gcc_assert (a && AT_class (a) == dw_val_class_die_ref); return a->dw_attr_val.v.val_die_ref.die; } static inline int -AT_ref_external (dw_attr_ref a) +AT_ref_external (dw_attr_node *a) { if (a && AT_class (a) == dw_val_class_die_ref) return a->dw_attr_val.v.val_die_ref.external; @@ -4202,7 +4192,7 @@ AT_ref_external (dw_attr_ref a) } static inline void -set_AT_ref_external (dw_attr_ref a, int i) +set_AT_ref_external (dw_attr_node *a, int i) { gcc_assert (a && AT_class (a) == dw_val_class_die_ref); a->dw_attr_val.v.val_die_ref.external = i; @@ -4237,7 +4227,7 @@ add_AT_loc (dw_die_ref die, enum dwarf_attribute attr_kind, dw_loc_descr_ref loc } static inline dw_loc_descr_ref -AT_loc (dw_attr_ref a) +AT_loc (dw_attr_node *a) { gcc_assert (a && AT_class (a) == dw_val_class_loc); return a->dw_attr_val.v.val_loc; @@ -4257,14 +4247,14 @@ add_AT_loc_list (dw_die_ref die, enum dwarf_attribute attr_kind, dw_loc_list_ref } static inline dw_loc_list_ref -AT_loc_list (dw_attr_ref a) +AT_loc_list (dw_attr_node *a) { gcc_assert (a && AT_class (a) == dw_val_class_loc_list); return a->dw_attr_val.v.val_loc_list; } static inline dw_loc_list_ref * -AT_loc_list_ptr (dw_attr_ref a) +AT_loc_list_ptr (dw_attr_node *a) { gcc_assert (a && AT_class (a) == dw_val_class_loc_list); return &a->dw_attr_val.v.val_loc_list; @@ -4444,7 +4434,7 @@ add_AT_addr (dw_die_ref die, enum dwarf_attribute attr_kind, rtx addr, /* Get the RTX from to an address DIE attribute. */ static inline rtx -AT_addr (dw_attr_ref a) +AT_addr (dw_attr_node *a) { gcc_assert (a && AT_class (a) == dw_val_class_addr); return a->dw_attr_val.v.val_addr; @@ -4468,7 +4458,7 @@ add_AT_file (dw_die_ref die, enum dwarf_attribute attr_kind, /* Get the dwarf_file_data from a file DIE attribute. */ static inline struct dwarf_file_data * -AT_file (dw_attr_ref a) +AT_file (dw_attr_node *a) { gcc_assert (a && AT_class (a) == dw_val_class_file); return a->dw_attr_val.v.val_file; @@ -4586,7 +4576,7 @@ add_AT_range_list (dw_die_ref die, enum dwarf_attribute attr_kind, /* Return the start label of a delta attribute. */ static inline const char * -AT_vms_delta1 (dw_attr_ref a) +AT_vms_delta1 (dw_attr_node *a) { gcc_assert (a && (AT_class (a) == dw_val_class_vms_delta)); return a->dw_attr_val.v.val_vms_delta.lbl1; @@ -4595,14 +4585,14 @@ AT_vms_delta1 (dw_attr_ref a) /* Return the end label of a delta attribute. */ static inline const char * -AT_vms_delta2 (dw_attr_ref a) +AT_vms_delta2 (dw_attr_node *a) { gcc_assert (a && (AT_class (a) == dw_val_class_vms_delta)); return a->dw_attr_val.v.val_vms_delta.lbl2; } static inline const char * -AT_lbl (dw_attr_ref a) +AT_lbl (dw_attr_node *a) { gcc_assert (a && (AT_class (a) == dw_val_class_lbl_id || AT_class (a) == dw_val_class_lineptr @@ -4613,10 +4603,10 @@ AT_lbl (dw_attr_ref a) /* Get the attribute of type attr_kind. */ -static dw_attr_ref +static dw_attr_node * get_AT (dw_die_ref die, enum dwarf_attribute attr_kind) { - dw_attr_ref a; + dw_attr_node *a; unsigned ix; dw_die_ref spec = NULL; @@ -4660,7 +4650,7 @@ get_die_parent (dw_die_ref die) static inline const char * get_AT_low_pc (dw_die_ref die) { - dw_attr_ref a = get_AT (die, DW_AT_low_pc); + dw_attr_node *a = get_AT (die, DW_AT_low_pc); return a ? AT_lbl (a) : NULL; } @@ -4672,7 +4662,7 @@ get_AT_low_pc (dw_die_ref die) static inline const char * get_AT_hi_pc (dw_die_ref die) { - dw_attr_ref a = get_AT (die, DW_AT_high_pc); + dw_attr_node *a = get_AT (die, DW_AT_high_pc); return a ? AT_lbl (a) : NULL; } @@ -4683,7 +4673,7 @@ get_AT_hi_pc (dw_die_ref die) static inline const char * get_AT_string (dw_die_ref die, enum dwarf_attribute attr_kind) { - dw_attr_ref a = get_AT (die, attr_kind); + dw_attr_node *a = get_AT (die, attr_kind); return a ? AT_string (a) : NULL; } @@ -4694,7 +4684,7 @@ get_AT_string (dw_die_ref die, enum dwarf_attribute attr_kind) static inline int get_AT_flag (dw_die_ref die, enum dwarf_attribute attr_kind) { - dw_attr_ref a = get_AT (die, attr_kind); + dw_attr_node *a = get_AT (die, attr_kind); return a ? AT_flag (a) : 0; } @@ -4705,7 +4695,7 @@ get_AT_flag (dw_die_ref die, enum dwarf_attribute attr_kind) static inline unsigned get_AT_unsigned (dw_die_ref die, enum dwarf_attribute attr_kind) { - dw_attr_ref a = get_AT (die, attr_kind); + dw_attr_node *a = get_AT (die, attr_kind); return a ? AT_unsigned (a) : 0; } @@ -4713,7 +4703,7 @@ get_AT_unsigned (dw_die_ref die, enum dwarf_attribute attr_kind) static inline dw_die_ref get_AT_ref (dw_die_ref die, enum dwarf_attribute attr_kind) { - dw_attr_ref a = get_AT (die, attr_kind); + dw_attr_node *a = get_AT (die, attr_kind); return a ? AT_ref (a) : NULL; } @@ -4721,7 +4711,7 @@ get_AT_ref (dw_die_ref die, enum dwarf_attribute attr_kind) static inline struct dwarf_file_data * get_AT_file (dw_die_ref die, enum dwarf_attribute attr_kind) { - dw_attr_ref a = get_AT (die, attr_kind); + dw_attr_node *a = get_AT (die, attr_kind); return a ? AT_file (a) : NULL; } @@ -4777,7 +4767,7 @@ is_ada (void) static bool remove_AT (dw_die_ref die, enum dwarf_attribute attr_kind) { - dw_attr_ref a; + dw_attr_node *a; unsigned ix; if (! die) @@ -5588,7 +5578,7 @@ print_dw_val (dw_val_node *val, bool recurse, FILE *outfile) /* Likewise, for a DIE attribute. */ static void -print_attribute (dw_attr_ref a, bool recurse, FILE *outfile) +print_attribute (dw_attr_node *a, bool recurse, FILE *outfile) { print_dw_val (&a->dw_attr_val, recurse, outfile); } @@ -5635,7 +5625,7 @@ print_loc_descr (dw_loc_descr_ref loc, FILE *outfile) static void print_die (dw_die_ref die, FILE *outfile) { - dw_attr_ref a; + dw_attr_node *a; dw_die_ref c; unsigned ix; @@ -5724,7 +5714,7 @@ static void check_die (dw_die_ref die) { unsigned ix; - dw_attr_ref a; + dw_attr_node *a; bool inline_found = false; int n_location = 0, n_low_pc = 0, n_high_pc = 0, n_artificial = 0; int n_decl_line = 0, n_decl_file = 0; @@ -5830,7 +5820,7 @@ loc_checksum (dw_loc_descr_ref loc, struct md5_ctx *ctx) /* Calculate the checksum of an attribute. */ static void -attr_checksum (dw_attr_ref at, struct md5_ctx *ctx, int *mark) +attr_checksum (dw_attr_node *at, struct md5_ctx *ctx, int *mark) { dw_loc_descr_ref loc; rtx r; @@ -5916,7 +5906,7 @@ static void die_checksum (dw_die_ref die, struct md5_ctx *ctx, int *mark) { dw_die_ref c; - dw_attr_ref a; + dw_attr_node *a; unsigned ix; /* To avoid infinite recursion. */ @@ -6050,7 +6040,7 @@ loc_checksum_ordered (dw_loc_descr_ref loc, struct md5_ctx *ctx) /* Calculate the checksum of an attribute. */ static void -attr_checksum_ordered (enum dwarf_tag tag, dw_attr_ref at, +attr_checksum_ordered (enum dwarf_tag tag, dw_attr_node *at, struct md5_ctx *ctx, int *mark) { dw_loc_descr_ref loc; @@ -6073,7 +6063,7 @@ attr_checksum_ordered (enum dwarf_tag tag, dw_attr_ref at, || (at->dw_attr == DW_AT_friend && tag == DW_TAG_friend)) { - dw_attr_ref name_attr = get_AT (target_die, DW_AT_name); + dw_attr_node *name_attr = get_AT (target_die, DW_AT_name); if (name_attr != NULL) { @@ -6205,53 +6195,53 @@ attr_checksum_ordered (enum dwarf_tag tag, dw_attr_ref at, struct checksum_attributes { - dw_attr_ref at_name; - dw_attr_ref at_type; - dw_attr_ref at_friend; - dw_attr_ref at_accessibility; - dw_attr_ref at_address_class; - dw_attr_ref at_allocated; - dw_attr_ref at_artificial; - dw_attr_ref at_associated; - dw_attr_ref at_binary_scale; - dw_attr_ref at_bit_offset; - dw_attr_ref at_bit_size; - dw_attr_ref at_bit_stride; - dw_attr_ref at_byte_size; - dw_attr_ref at_byte_stride; - dw_attr_ref at_const_value; - dw_attr_ref at_containing_type; - dw_attr_ref at_count; - dw_attr_ref at_data_location; - dw_attr_ref at_data_member_location; - dw_attr_ref at_decimal_scale; - dw_attr_ref at_decimal_sign; - dw_attr_ref at_default_value; - dw_attr_ref at_digit_count; - dw_attr_ref at_discr; - dw_attr_ref at_discr_list; - dw_attr_ref at_discr_value; - dw_attr_ref at_encoding; - dw_attr_ref at_endianity; - dw_attr_ref at_explicit; - dw_attr_ref at_is_optional; - dw_attr_ref at_location; - dw_attr_ref at_lower_bound; - dw_attr_ref at_mutable; - dw_attr_ref at_ordering; - dw_attr_ref at_picture_string; - dw_attr_ref at_prototyped; - dw_attr_ref at_small; - dw_attr_ref at_segment; - dw_attr_ref at_string_length; - dw_attr_ref at_threads_scaled; - dw_attr_ref at_upper_bound; - dw_attr_ref at_use_location; - dw_attr_ref at_use_UTF8; - dw_attr_ref at_variable_parameter; - dw_attr_ref at_virtuality; - dw_attr_ref at_visibility; - dw_attr_ref at_vtable_elem_location; + dw_attr_node *at_name; + dw_attr_node *at_type; + dw_attr_node *at_friend; + dw_attr_node *at_accessibility; + dw_attr_node *at_address_class; + dw_attr_node *at_allocated; + dw_attr_node *at_artificial; + dw_attr_node *at_associated; + dw_attr_node *at_binary_scale; + dw_attr_node *at_bit_offset; + dw_attr_node *at_bit_size; + dw_attr_node *at_bit_stride; + dw_attr_node *at_byte_size; + dw_attr_node *at_byte_stride; + dw_attr_node *at_const_value; + dw_attr_node *at_containing_type; + dw_attr_node *at_count; + dw_attr_node *at_data_location; + dw_attr_node *at_data_member_location; + dw_attr_node *at_decimal_scale; + dw_attr_node *at_decimal_sign; + dw_attr_node *at_default_value; + dw_attr_node *at_digit_count; + dw_attr_node *at_discr; + dw_attr_node *at_discr_list; + dw_attr_node *at_discr_value; + dw_attr_node *at_encoding; + dw_attr_node *at_endianity; + dw_attr_node *at_explicit; + dw_attr_node *at_is_optional; + dw_attr_node *at_location; + dw_attr_node *at_lower_bound; + dw_attr_node *at_mutable; + dw_attr_node *at_ordering; + dw_attr_node *at_picture_string; + dw_attr_node *at_prototyped; + dw_attr_node *at_small; + dw_attr_node *at_segment; + dw_attr_node *at_string_length; + dw_attr_node *at_threads_scaled; + dw_attr_node *at_upper_bound; + dw_attr_node *at_use_location; + dw_attr_node *at_use_UTF8; + dw_attr_node *at_variable_parameter; + dw_attr_node *at_virtuality; + dw_attr_node *at_visibility; + dw_attr_node *at_vtable_elem_location; }; /* Collect the attributes that we will want to use for the checksum. */ @@ -6259,7 +6249,7 @@ struct checksum_attributes static void collect_checksum_attributes (struct checksum_attributes *attrs, dw_die_ref die) { - dw_attr_ref a; + dw_attr_node *a; unsigned ix; FOR_EACH_VEC_SAFE_ELT (die->die_attr, ix, a) @@ -6483,7 +6473,7 @@ die_checksum_ordered (dw_die_ref die, struct md5_ctx *ctx, int *mark) /* Checksum the child DIEs. */ c = die->die_child; if (c) do { - dw_attr_ref name_attr; + dw_attr_node *name_attr; c = c->die_sib; name_attr = get_AT (c, DW_AT_name); @@ -6686,7 +6676,7 @@ same_dw_val_p (const dw_val_node *v1, const dw_val_node *v2, int *mark) /* Do the attributes look the same? */ static int -same_attr_p (dw_attr_ref at1, dw_attr_ref at2, int *mark) +same_attr_p (dw_attr_node *at1, dw_attr_node *at2, int *mark) { if (at1->dw_attr != at2->dw_attr) return 0; @@ -6705,7 +6695,7 @@ static int same_die_p (dw_die_ref die1, dw_die_ref die2, int *mark) { dw_die_ref c1, c2; - dw_attr_ref a1; + dw_attr_node *a1; unsigned ix; /* To avoid infinite recursion. */ @@ -7145,7 +7135,7 @@ break_out_includes (dw_die_ref die) static int is_declaration_die (dw_die_ref die) { - dw_attr_ref a; + dw_attr_node *a; unsigned ix; FOR_EACH_VEC_SAFE_ELT (die->die_attr, ix, a) @@ -7228,7 +7218,7 @@ static dw_die_ref clone_die (dw_die_ref die) { dw_die_ref clone; - dw_attr_ref a; + dw_attr_node *a; unsigned ix; clone = ggc_cleared_alloc<die_node> (); @@ -7260,7 +7250,7 @@ clone_as_declaration (dw_die_ref die) { dw_die_ref clone; dw_die_ref decl; - dw_attr_ref a; + dw_attr_node *a; unsigned ix; /* If the DIE is already a declaration, just clone it. */ @@ -7420,7 +7410,7 @@ copy_declaration_context (dw_die_ref unit, dw_die_ref die) { unsigned ix; dw_die_ref c; - dw_attr_ref a; + dw_attr_node *a; /* The original DIE will be changed to a declaration, and must be moved to be a child of the original declaration DIE. */ @@ -7627,7 +7617,7 @@ break_out_comdat_types (dw_die_ref die) if (should_move_die_to_comdat (c)) { dw_die_ref replacement; - comdat_type_node_ref type_node; + comdat_type_node *type_node; /* Break out nested types into their own type units. */ break_out_comdat_types (c); @@ -7712,7 +7702,7 @@ static void copy_decls_walk (dw_die_ref unit, dw_die_ref die, decl_hash_type *decl_table) { dw_die_ref c; - dw_attr_ref a; + dw_attr_node *a; unsigned ix; FOR_EACH_VEC_SAFE_ELT (die->die_attr, ix, a) @@ -7837,7 +7827,7 @@ static void output_location_lists (dw_die_ref die) { dw_die_ref c; - dw_attr_ref a; + dw_attr_node *a; unsigned ix; FOR_EACH_VEC_SAFE_ELT (die->die_attr, ix, a) @@ -7887,7 +7877,7 @@ external_ref_hasher::hash (const external_ref *r) { /* We have a type signature; use a subset of the bits as the hash. The 8-byte signature is at least as large as hashval_t. */ - comdat_type_node_ref type_node = die->die_id.die_type_node; + comdat_type_node *type_node = die->die_id.die_type_node; memcpy (&h, type_node->signature, sizeof (h)); } return h; @@ -7929,7 +7919,7 @@ static void optimize_external_refs_1 (dw_die_ref die, external_ref_hash_type *map) { dw_die_ref c; - dw_attr_ref a; + dw_attr_node *a; unsigned ix; struct external_ref *ref_p; @@ -8017,7 +8007,7 @@ build_abbrev_table (dw_die_ref die, external_ref_hash_type *extern_map) unsigned long abbrev_id; unsigned int n_alloc; dw_die_ref c; - dw_attr_ref a; + dw_attr_node *a; unsigned ix; /* Scan the DIE references, and replace any that refer to @@ -8041,7 +8031,7 @@ build_abbrev_table (dw_die_ref die, external_ref_hash_type *extern_map) for (abbrev_id = 1; abbrev_id < abbrev_die_table_in_use; ++abbrev_id) { dw_die_ref abbrev = abbrev_die_table[abbrev_id]; - dw_attr_ref die_a, abbrev_a; + dw_attr_node *die_a, *abbrev_a; unsigned ix; bool ok = true; @@ -8113,7 +8103,7 @@ static unsigned long size_of_die (dw_die_ref die) { unsigned long size = 0; - dw_attr_ref a; + dw_attr_node *a; unsigned ix; enum dwarf_form form; @@ -8352,7 +8342,7 @@ static void unmark_all_dies (dw_die_ref die) { dw_die_ref c; - dw_attr_ref a; + dw_attr_node *a; unsigned ix; if (!die->die_mark) @@ -8406,7 +8396,7 @@ size_of_pubnames (vec<pubname_entry, va_gc> *names) { unsigned long size; unsigned i; - pubname_ref p; + pubname_entry *p; int space_for_flags = (debug_generate_pub_sections == 2) ? 1 : 0; size = DWARF_PUBNAMES_HEADER_SIZE; @@ -8456,7 +8446,7 @@ size_of_aranges (void) /* Select the encoding of an attribute value. */ static enum dwarf_form -value_format (dw_attr_ref a) +value_format (dw_attr_node *a) { switch (AT_class (a)) { @@ -8647,7 +8637,7 @@ value_format (dw_attr_ref a) /* Output the encoding of an attribute value. */ static void -output_value_format (dw_attr_ref a) +output_value_format (dw_attr_node *a) { enum dwarf_form form = value_format (a); @@ -8660,7 +8650,7 @@ static void output_die_abbrevs (unsigned long abbrev_id, dw_die_ref abbrev) { unsigned ix; - dw_attr_ref a_attr; + dw_attr_node *a_attr; dw2_asm_output_data_uleb128 (abbrev_id, "(abbrev code)"); dw2_asm_output_data_uleb128 (abbrev->die_tag, "(TAG: %s)", @@ -8835,7 +8825,7 @@ output_loc_list (dw_loc_list_ref list_head) indirect reference. */ static void -output_range_list_offset (dw_attr_ref a) +output_range_list_offset (dw_attr_node *a) { const char *name = dwarf_attr_name (a->dw_attr); @@ -8855,7 +8845,7 @@ output_range_list_offset (dw_attr_ref a) /* Output the offset into the debug_loc section. */ static void -output_loc_list_offset (dw_attr_ref a) +output_loc_list_offset (dw_attr_node *a) { char *sym = AT_loc_list (a)->ll_symbol; @@ -8871,7 +8861,7 @@ output_loc_list_offset (dw_attr_ref a) /* Output an attribute's index or value appropriately. */ static void -output_attr_index_or_value (dw_attr_ref a) +output_attr_index_or_value (dw_attr_node *a) { const char *name = dwarf_attr_name (a->dw_attr); @@ -8914,7 +8904,7 @@ output_signature (const char *sig, const char *name) static void output_die (dw_die_ref die) { - dw_attr_ref a; + dw_attr_node *a; dw_die_ref c; unsigned long size; unsigned ix; @@ -9081,7 +9071,7 @@ output_die (dw_die_ref die) { if (AT_ref (a)->comdat_type_p) { - comdat_type_node_ref type_node = + comdat_type_node *type_node = AT_ref (a)->die_id.die_type_node; gcc_assert (type_node); @@ -9642,7 +9632,7 @@ output_pubnames (vec<pubname_entry, va_gc> *names) { unsigned i; unsigned long pubnames_length = size_of_pubnames (names); - pubname_ref pub; + pubname_entry *pub; if (DWARF_INITIAL_LENGTH_SIZE - DWARF_OFFSET_SIZE == 4) dw2_asm_output_data (4, 0xffffffff, @@ -9679,7 +9669,7 @@ output_pubnames (vec<pubname_entry, va_gc> *names) the skeleton DIE (if there is one). */ if (pub->die->comdat_type_p && names == pubtype_table) { - comdat_type_node_ref type_node = pub->die->die_id.die_type_node; + comdat_type_node *type_node = pub->die->die_id.die_type_node; if (type_node != NULL) die_offset = (type_node->skeleton_die != NULL @@ -9811,10 +9801,10 @@ add_ranges_num (int num) if (in_use == ranges_table_allocated) { ranges_table_allocated += RANGES_TABLE_INCREMENT; - ranges_table = GGC_RESIZEVEC (struct dw_ranges_struct, ranges_table, + ranges_table = GGC_RESIZEVEC (dw_ranges, ranges_table, ranges_table_allocated); memset (ranges_table + ranges_table_in_use, 0, - RANGES_TABLE_INCREMENT * sizeof (struct dw_ranges_struct)); + RANGES_TABLE_INCREMENT * sizeof (dw_ranges)); } ranges_table[in_use].num = num; @@ -9847,12 +9837,10 @@ add_ranges_by_labels (dw_die_ref die, const char *begin, const char *end, if (in_use == ranges_by_label_allocated) { ranges_by_label_allocated += RANGES_TABLE_INCREMENT; - ranges_by_label = GGC_RESIZEVEC (struct dw_ranges_by_label_struct, - ranges_by_label, + ranges_by_label = GGC_RESIZEVEC (dw_ranges_by_label, ranges_by_label, ranges_by_label_allocated); memset (ranges_by_label + ranges_by_label_in_use, 0, - RANGES_TABLE_INCREMENT - * sizeof (struct dw_ranges_by_label_struct)); + RANGES_TABLE_INCREMENT * sizeof (dw_ranges_by_label)); } ranges_by_label[in_use].begin = begin; @@ -19787,7 +19775,7 @@ add_high_low_attributes (tree stmt, dw_die_ref die) { tree chain, superblock = NULL_TREE; dw_die_ref pdie; - dw_attr_ref attr = NULL; + dw_attr_node *attr = NULL; if (inlined_function_outer_scope_p (stmt)) { @@ -19805,7 +19793,7 @@ add_high_low_attributes (tree stmt, dw_die_ref die) BLOCK_SAME_RANGE (chain); chain = BLOCK_SUPERCONTEXT (chain)) { - dw_attr_ref new_attr; + dw_attr_node *new_attr; pdie = pdie->die_parent; if (pdie == NULL) @@ -20057,8 +20045,6 @@ gen_ptr_to_mbr_type_die (tree type, dw_die_ref context_die) context_die); } -typedef const char *dchar_p; /* For DEF_VEC_P. */ - static char *producer_string; /* Return a heap allocated producer string including command line options @@ -20068,7 +20054,7 @@ static char * gen_producer_string (void) { size_t j; - auto_vec<dchar_p> switches; + auto_vec<const char *> switches; const char *language_string = lang_hooks.name; char *producer, *tail; const char *p; @@ -22183,7 +22169,7 @@ static void dwarf2out_set_name (tree decl, tree name) { dw_die_ref die; - dw_attr_ref attr; + dw_attr_node *attr; const char *dname; die = TYPE_SYMTAB_DIE (decl); @@ -22443,7 +22429,7 @@ new_line_info_table (void) { dw_line_info_table *table; - table = ggc_cleared_alloc<dw_line_info_table_struct> (); + table = ggc_cleared_alloc<dw_line_info_table> (); table->file_num = 1; table->line_num = 1; table->is_stmt = DWARF_LINE_DEFAULT_IS_STMT_START; @@ -23554,7 +23540,7 @@ prune_unmark_dies (dw_die_ref die) static void prune_unused_types_walk_attribs (dw_die_ref die) { - dw_attr_ref a; + dw_attr_node *a; unsigned ix; FOR_EACH_VEC_SAFE_ELT (die->die_attr, ix, a) @@ -23761,7 +23747,7 @@ prune_unused_types_walk (dw_die_ref die) static void prune_unused_types_update_strings (dw_die_ref die) { - dw_attr_ref a; + dw_attr_node *a; unsigned ix; FOR_EACH_VEC_SAFE_ELT (die->die_attr, ix, a) @@ -23829,7 +23815,7 @@ prune_unused_types (void) unsigned int i; limbo_die_node *node; comdat_type_node *ctnode; - pubname_ref pub; + pubname_entry *pub; dw_die_ref base_type; #if ENABLE_ASSERT_CHECKING @@ -24440,7 +24426,7 @@ static void resolve_addr (dw_die_ref die) { dw_die_ref c; - dw_attr_ref a; + dw_attr_node *a; dw_loc_list_ref *curr, *start, loc; unsigned ix; @@ -25063,7 +25049,7 @@ static void optimize_location_lists_1 (dw_die_ref die, loc_list_hash_type *htab) { dw_die_ref c; - dw_attr_ref a; + dw_attr_node *a; unsigned ix; dw_loc_list_struct **slot; @@ -25092,7 +25078,7 @@ static void index_location_lists (dw_die_ref die) { dw_die_ref c; - dw_attr_ref a; + dw_attr_node *a; unsigned ix; FOR_EACH_VEC_SAFE_ELT (die->die_attr, ix, a) @@ -25202,7 +25188,7 @@ dwarf2out_finish (const char *filename) /* PCH might result in DW_AT_producer string being restored from the header compilation, so always fill it with empty string initially and overwrite only here. */ - dw_attr_ref producer = get_AT (comp_unit_die (), DW_AT_producer); + dw_attr_node *producer = get_AT (comp_unit_die (), DW_AT_producer); producer_string = gen_producer_string (); producer->dw_attr_val.v.val_str->refcount--; producer->dw_attr_val.v.val_str = find_AT_string (producer_string); diff --git a/gcc/et-forest.c b/gcc/et-forest.c index 1931285158a..4f919d45563 100644 --- a/gcc/et-forest.c +++ b/gcc/et-forest.c @@ -54,8 +54,8 @@ struct et_occ depth. */ }; -static object_allocator<et_node> et_nodes ("et_nodes pool", 300); -static object_allocator<et_occ> et_occurrences ("et_occ pool", 300); +static object_allocator<et_node> et_nodes ("et_nodes pool"); +static object_allocator<et_occ> et_occurrences ("et_occ pool"); /* Changes depth of OCC to D. */ diff --git a/gcc/fold-const.c b/gcc/fold-const.c index e9366e2de6e..fd1c87ed5ad 100644 --- a/gcc/fold-const.c +++ b/gcc/fold-const.c @@ -6166,8 +6166,12 @@ extract_muldiv_1 (tree t, tree c, enum tree_code code, tree wide_type, && ((sign == UNSIGNED && tcode != MULT_EXPR) || sign == SIGNED)) overflow_p = true; if (!overflow_p) - return fold_build2 (tcode, ctype, fold_convert (ctype, op0), - wide_int_to_tree (ctype, mul)); + { + mul = wide_int::from (mul, TYPE_PRECISION (ctype), + TYPE_SIGN (TREE_TYPE (op1))); + return fold_build2 (tcode, ctype, fold_convert (ctype, op0), + wide_int_to_tree (ctype, mul)); + } } /* If these operations "cancel" each other, we have the main diff --git a/gcc/gimple-fold.c b/gcc/gimple-fold.c index c79f9b3f436..ac9f46c860f 100644 --- a/gcc/gimple-fold.c +++ b/gcc/gimple-fold.c @@ -548,7 +548,7 @@ gimplify_and_update_call_from_tree (gimple_stmt_iterator *si_p, tree expr) unlink_stmt_vdef (stmt); release_defs (stmt); } - gsi_replace (si_p, gimple_build_nop (), true); + gsi_replace (si_p, gimple_build_nop (), false); return; } } @@ -589,7 +589,7 @@ replace_call_with_value (gimple_stmt_iterator *gsi, tree val) unlink_stmt_vdef (stmt); release_ssa_name (vdef); } - gsi_replace (gsi, repl, true); + gsi_replace (gsi, repl, false); } /* Replace the call at *GSI with the new call REPL and fold that @@ -608,7 +608,7 @@ replace_call_with_call_and_fold (gimple_stmt_iterator *gsi, gimple repl) gimple_set_vuse (repl, gimple_vuse (stmt)); SSA_NAME_DEF_STMT (gimple_vdef (repl)) = repl; } - gsi_replace (gsi, repl, true); + gsi_replace (gsi, repl, false); fold_stmt (gsi); } @@ -655,7 +655,7 @@ gimple_fold_builtin_memory_op (gimple_stmt_iterator *gsi, unlink_stmt_vdef (stmt); release_ssa_name (vdef); } - gsi_replace (gsi, repl, true); + gsi_replace (gsi, repl, false); return true; } @@ -668,7 +668,7 @@ gimple_fold_builtin_memory_op (gimple_stmt_iterator *gsi, release_ssa_name (gimple_vdef (stmt)); if (!lhs) { - gsi_replace (gsi, gimple_build_nop (), true); + gsi_replace (gsi, gimple_build_nop (), false); return true; } goto done; @@ -750,7 +750,7 @@ gimple_fold_builtin_memory_op (gimple_stmt_iterator *gsi, SSA_NAME_DEF_STMT (gimple_vdef (new_stmt)) = new_stmt; if (!lhs) { - gsi_replace (gsi, new_stmt, true); + gsi_replace (gsi, new_stmt, false); return true; } gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT); @@ -1030,7 +1030,7 @@ gimple_fold_builtin_memory_op (gimple_stmt_iterator *gsi, SSA_NAME_DEF_STMT (gimple_vdef (new_stmt)) = new_stmt; if (!lhs) { - gsi_replace (gsi, new_stmt, true); + gsi_replace (gsi, new_stmt, false); return true; } gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT); @@ -1048,7 +1048,7 @@ done: dest = force_gimple_operand_gsi (gsi, dest, false, NULL_TREE, true, GSI_SAME_STMT); gimple repl = gimple_build_assign (lhs, dest); - gsi_replace (gsi, repl, true); + gsi_replace (gsi, repl, false); return true; } @@ -1130,7 +1130,7 @@ gimple_fold_builtin_memset (gimple_stmt_iterator *gsi, tree c, tree len) if (gimple_call_lhs (stmt)) { gimple asgn = gimple_build_assign (gimple_call_lhs (stmt), dest); - gsi_replace (gsi, asgn, true); + gsi_replace (gsi, asgn, false); } else { @@ -1980,7 +1980,7 @@ gimple_fold_builtin_stpcpy (gimple_stmt_iterator *gsi) gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT); gassign *ret = gimple_build_assign (gimple_call_lhs (stmt), POINTER_PLUS_EXPR, dest, tem); - gsi_replace (gsi, ret, true); + gsi_replace (gsi, ret, false); /* Finally fold the memcpy call. */ gimple_stmt_iterator gsi2 = *gsi; gsi_prev (&gsi2); @@ -3062,7 +3062,7 @@ gimple_fold_call (gimple_stmt_iterator *gsi, bool inplace) && tree_int_cst_le (gimple_call_arg (stmt, 1), gimple_call_arg (stmt, 2)))) { - gsi_replace (gsi, gimple_build_nop (), true); + gsi_replace (gsi, gimple_build_nop (), false); unlink_stmt_vdef (stmt); release_defs (stmt); return true; diff --git a/gcc/go/gofrontend/MERGE b/gcc/go/gofrontend/MERGE index ef21b544a5f..bcaabac4b9c 100644 --- a/gcc/go/gofrontend/MERGE +++ b/gcc/go/gofrontend/MERGE @@ -1,4 +1,4 @@ -aea4360ca9c37f8e929f177ae7e42593ee62aa79 +01a574c1b2bb244be764b6a18aab980ca0aef43c The first line of this file holds the git revision number of the last merge done from the gofrontend repository. diff --git a/gcc/go/gofrontend/expressions.cc b/gcc/go/gofrontend/expressions.cc index dc37cf0b01e..1c329b83050 100644 --- a/gcc/go/gofrontend/expressions.cc +++ b/gcc/go/gofrontend/expressions.cc @@ -5307,6 +5307,14 @@ Binary_expression::do_determine_type(const Type_context* context) || this->op_ == OPERATOR_GT || this->op_ == OPERATOR_GE); + // For constant expressions, the context of the result is not useful in + // determining the types of the operands. It is only legal to use abstract + // boolean, numeric, and string constants as operands where it is legal to + // use non-abstract boolean, numeric, and string constants, respectively. + // Any issues with the operation will be resolved in the check_types pass. + bool is_constant_expr = (this->left_->is_constant() + && this->right_->is_constant()); + Type_context subcontext(*context); if (is_comparison) @@ -5351,7 +5359,8 @@ Binary_expression::do_determine_type(const Type_context* context) subcontext.type = subcontext.type->make_non_abstract_type(); } - this->left_->determine_type(&subcontext); + if (!is_constant_expr) + this->left_->determine_type(&subcontext); if (is_shift_op) { @@ -5371,7 +5380,8 @@ Binary_expression::do_determine_type(const Type_context* context) subcontext.may_be_abstract = false; } - this->right_->determine_type(&subcontext); + if (!is_constant_expr) + this->right_->determine_type(&subcontext); if (is_comparison) { @@ -5396,7 +5406,8 @@ Binary_expression::check_operator_type(Operator op, Type* type, Type* otype, { case OPERATOR_OROR: case OPERATOR_ANDAND: - if (!type->is_boolean_type()) + if (!type->is_boolean_type() + || !otype->is_boolean_type()) { error_at(location, "expected boolean type"); return false; @@ -5431,10 +5442,8 @@ Binary_expression::check_operator_type(Operator op, Type* type, Type* otype, case OPERATOR_PLUS: case OPERATOR_PLUSEQ: - if (type->integer_type() == NULL - && type->float_type() == NULL - && type->complex_type() == NULL - && !type->is_string_type()) + if ((!type->is_numeric_type() && !type->is_string_type()) + || (!otype->is_numeric_type() && !otype->is_string_type())) { error_at(location, "expected integer, floating, complex, or string type"); @@ -5448,9 +5457,7 @@ Binary_expression::check_operator_type(Operator op, Type* type, Type* otype, case OPERATOR_MULTEQ: case OPERATOR_DIV: case OPERATOR_DIVEQ: - if (type->integer_type() == NULL - && type->float_type() == NULL - && type->complex_type() == NULL) + if (!type->is_numeric_type() || !otype->is_numeric_type()) { error_at(location, "expected integer, floating, or complex type"); return false; @@ -5467,7 +5474,7 @@ Binary_expression::check_operator_type(Operator op, Type* type, Type* otype, case OPERATOR_XOREQ: case OPERATOR_BITCLEAR: case OPERATOR_BITCLEAREQ: - if (type->integer_type() == NULL) + if (type->integer_type() == NULL || otype->integer_type() == NULL) { error_at(location, "expected integer type"); return false; @@ -6878,11 +6885,6 @@ Builtin_call_expression::do_flatten(Gogo*, Named_object*, Statement_inserter* inserter) { Location loc = this->location(); - if (this->is_erroneous_call()) - { - go_assert(saw_errors()); - return Expression::make_error(loc); - } switch (this->code_) { @@ -8057,6 +8059,13 @@ Builtin_call_expression::do_get_backend(Translate_context* context) { Gogo* gogo = context->gogo(); Location location = this->location(); + + if (this->is_erroneous_call()) + { + go_assert(saw_errors()); + return gogo->backend()->error_expression(); + } + switch (this->code_) { case BUILTIN_INVALID: diff --git a/gcc/input.c b/gcc/input.c index 59cab5cebef..e7302a42589 100644 --- a/gcc/input.c +++ b/gcc/input.c @@ -684,27 +684,27 @@ read_line_num (fcache *c, size_t line_num, return read_next_line (c, line, line_len); } -/* Return the physical source line that corresponds to xloc in a +/* Return the physical source line that corresponds to FILE_PATH/LINE in a buffer that is statically allocated. The newline is replaced by the null character. Note that the line can contain several null characters, so LINE_LEN, if non-null, points to the actual length of the line. */ const char * -location_get_source_line (expanded_location xloc, +location_get_source_line (const char *file_path, int line, int *line_len) { static char *buffer; static ssize_t len; - if (xloc.line == 0) + if (line == 0) return NULL; - fcache *c = lookup_or_add_file_to_cache_tab (xloc.file); + fcache *c = lookup_or_add_file_to_cache_tab (file_path); if (c == NULL) return NULL; - bool read = read_line_num (c, xloc.line, &buffer, &len); + bool read = read_line_num (c, line, &buffer, &len); if (read && line_len) *line_len = len; @@ -971,7 +971,9 @@ dump_location_info (FILE *stream) /* Beginning of a new source line: draw the line. */ int line_size; - const char *line_text = location_get_source_line (exploc, &line_size); + const char *line_text = location_get_source_line (exploc.file, + exploc.line, + &line_size); if (!line_text) break; fprintf (stream, diff --git a/gcc/input.h b/gcc/input.h index 5ba4d3b46f5..07d8544ed6f 100644 --- a/gcc/input.h +++ b/gcc/input.h @@ -38,7 +38,7 @@ extern char builtins_location_check[(BUILTINS_LOCATION extern bool is_location_from_builtin_token (source_location); extern expanded_location expand_location (source_location); -extern const char *location_get_source_line (expanded_location xloc, +extern const char *location_get_source_line (const char *file_path, int line, int *line_size); extern expanded_location expand_location_to_spelling_point (source_location); extern source_location expansion_point_location_if_in_system_header (source_location); diff --git a/gcc/ipa-cp.c b/gcc/ipa-cp.c index 8de7e56847e..69a181d2004 100644 --- a/gcc/ipa-cp.c +++ b/gcc/ipa-cp.c @@ -276,16 +276,16 @@ public: /* Allocation pools for values and their sources in ipa-cp. */ object_allocator<ipcp_value<tree> > ipcp_cst_values_pool - ("IPA-CP constant values", 32); + ("IPA-CP constant values"); object_allocator<ipcp_value<ipa_polymorphic_call_context> > - ipcp_poly_ctx_values_pool ("IPA-CP polymorphic contexts", 32); + ipcp_poly_ctx_values_pool ("IPA-CP polymorphic contexts"); object_allocator<ipcp_value_source<tree> > ipcp_sources_pool - ("IPA-CP value sources", 64); + ("IPA-CP value sources"); object_allocator<ipcp_agg_lattice> ipcp_agg_lattice_pool - ("IPA_CP aggregate lattices", 32); + ("IPA_CP aggregate lattices"); /* Maximal count found in program. */ diff --git a/gcc/ipa-inline-analysis.c b/gcc/ipa-inline-analysis.c index 3a8f0eced9d..4822329bfca 100644 --- a/gcc/ipa-inline-analysis.c +++ b/gcc/ipa-inline-analysis.c @@ -143,7 +143,7 @@ vec<inline_edge_summary_t> inline_edge_summary_vec; vec<edge_growth_cache_entry> edge_growth_cache; /* Edge predicates goes here. */ -static object_allocator<predicate> edge_predicate_pool ("edge predicates", 10); +static object_allocator<predicate> edge_predicate_pool ("edge predicates"); /* Return true predicate (tautology). We represent it by empty list of clauses. */ diff --git a/gcc/ipa-profile.c b/gcc/ipa-profile.c index 1b929c6d8ef..382897cf1a7 100644 --- a/gcc/ipa-profile.c +++ b/gcc/ipa-profile.c @@ -87,8 +87,7 @@ struct histogram_entry duplicate entries. */ vec<histogram_entry *> histogram; -static object_allocator<histogram_entry> histogram_pool - ("IPA histogram", 10); +static object_allocator<histogram_entry> histogram_pool ("IPA histogram"); /* Hashtable support for storing SSA names hashed by their SSA_NAME_VAR. */ diff --git a/gcc/ipa-prop.c b/gcc/ipa-prop.c index c862cfff8d9..8e0f182ea62 100644 --- a/gcc/ipa-prop.c +++ b/gcc/ipa-prop.c @@ -95,7 +95,7 @@ struct ipa_cst_ref_desc /* Allocation pool for reference descriptions. */ static object_allocator<ipa_cst_ref_desc> ipa_refdesc_pool - ("IPA-PROP ref descriptions", 32); + ("IPA-PROP ref descriptions"); /* Return true if DECL_FUNCTION_SPECIFIC_OPTIMIZATION of the decl associated with NODE should prevent us from analyzing it for the purposes of IPA-CP. */ diff --git a/gcc/ira-build.c b/gcc/ira-build.c index 9f0d7db828a..f49591c6e53 100644 --- a/gcc/ira-build.c +++ b/gcc/ira-build.c @@ -420,9 +420,9 @@ rebuild_regno_allocno_maps (void) /* Pools for allocnos, allocno live ranges and objects. */ -static object_allocator<live_range> live_range_pool ("live ranges", 100); -static object_allocator<ira_allocno> allocno_pool ("allocnos", 100); -static object_allocator<ira_object> object_pool ("objects", 100); +static object_allocator<live_range> live_range_pool ("live ranges"); +static object_allocator<ira_allocno> allocno_pool ("allocnos"); +static object_allocator<ira_object> object_pool ("objects"); /* Vec containing references to all created allocnos. It is a container of array allocnos. */ @@ -1170,7 +1170,7 @@ finish_allocnos (void) /* Pools for allocno preferences. */ -static object_allocator <ira_allocno_pref> pref_pool ("prefs", 100); +static object_allocator <ira_allocno_pref> pref_pool ("prefs"); /* Vec containing references to all created preferences. It is a container of array ira_prefs. */ @@ -1357,7 +1357,7 @@ finish_prefs (void) /* Pools for copies. */ -static object_allocator<ira_allocno_copy> copy_pool ("copies", 100); +static object_allocator<ira_allocno_copy> copy_pool ("copies"); /* Vec containing references to all created copies. It is a container of array ira_copies. */ @@ -1630,8 +1630,7 @@ initiate_cost_vectors (void) { aclass = ira_allocno_classes[i]; cost_vector_pool[aclass] = new pool_allocator - ("cost vectors", 100, - sizeof (int) * (ira_class_hard_regs_num[aclass])); + ("cost vectors", sizeof (int) * (ira_class_hard_regs_num[aclass])); } } diff --git a/gcc/ira-color.c b/gcc/ira-color.c index 74d2c2ed608..e6533c6654d 100644 --- a/gcc/ira-color.c +++ b/gcc/ira-color.c @@ -1157,7 +1157,7 @@ setup_profitable_hard_regs (void) /* Pool for update cost records. */ static object_allocator<update_cost_record> update_cost_record_pool - ("update cost records", 100); + ("update cost records"); /* Return new update cost record with given params. */ static struct update_cost_record * diff --git a/gcc/lra-lives.c b/gcc/lra-lives.c index 1da5204da45..253bc181827 100644 --- a/gcc/lra-lives.c +++ b/gcc/lra-lives.c @@ -107,8 +107,7 @@ static sparseset unused_set, dead_set; static bitmap_head temp_bitmap; /* Pool for pseudo live ranges. */ -static object_allocator<lra_live_range> lra_live_range_pool - ("live ranges", 100); +static object_allocator<lra_live_range> lra_live_range_pool ("live ranges"); /* Free live range list LR. */ static void diff --git a/gcc/lra.c b/gcc/lra.c index a836cab630f..bdbfe513553 100644 --- a/gcc/lra.c +++ b/gcc/lra.c @@ -533,7 +533,7 @@ lra_update_dups (lra_insn_recog_data_t id, signed char *nops) insns. */ /* Pools for insn reg info. */ -object_allocator<lra_insn_reg> lra_insn_reg_pool ("insn regs", 100); +object_allocator<lra_insn_reg> lra_insn_reg_pool ("insn regs"); /* Create LRA insn related info about a reference to REGNO in INSN with TYPE (in/out/inout), biggest reference mode MODE, flag that it is @@ -744,7 +744,7 @@ free_insn_recog_data (lra_insn_recog_data_t data) } /* Pools for copies. */ -static object_allocator<lra_copy> lra_copy_pool ("lra copies", 100); +static object_allocator<lra_copy> lra_copy_pool ("lra copies"); /* Finish LRA data about all insns. */ static void diff --git a/gcc/lto-streamer-out.c b/gcc/lto-streamer-out.c index b6bc515201d..d8a7ced5142 100644 --- a/gcc/lto-streamer-out.c +++ b/gcc/lto-streamer-out.c @@ -66,6 +66,7 @@ clear_line_info (struct output_block *ob) ob->current_file = NULL; ob->current_line = 0; ob->current_col = 0; + ob->current_sysp = false; } diff --git a/gcc/lto-streamer.h b/gcc/lto-streamer.h index 21c41c5a621..5aae9e9bfa7 100644 --- a/gcc/lto-streamer.h +++ b/gcc/lto-streamer.h @@ -320,7 +320,7 @@ public: struct data_in *data_in); lto_location_cache () : loc_cache (), accepted_length (0), current_file (NULL), current_line (0), - current_col (0), current_loc (UNKNOWN_LOCATION) + current_col (0), current_sysp (false), current_loc (UNKNOWN_LOCATION) { gcc_assert (!current_cache); current_cache = this; diff --git a/gcc/lto-wrapper.c b/gcc/lto-wrapper.c index d59bd8c6635..150d36845d6 100644 --- a/gcc/lto-wrapper.c +++ b/gcc/lto-wrapper.c @@ -232,6 +232,10 @@ merge_and_complain (struct cl_decoded_option **decoded_options, break; /* Fallthru. */ + case OPT_fdiagnostics_show_caret: + case OPT_fdiagnostics_show_option: + case OPT_fdiagnostics_show_location_: + case OPT_fshow_column: case OPT_fPIC: case OPT_fpic: case OPT_fPIE: @@ -479,6 +483,10 @@ append_compiler_options (obstack *argv_obstack, struct cl_decoded_option *opts, on any CL_TARGET flag and a few selected others. */ switch (option->opt_index) { + case OPT_fdiagnostics_show_caret: + case OPT_fdiagnostics_show_option: + case OPT_fdiagnostics_show_location_: + case OPT_fshow_column: case OPT_fPIC: case OPT_fpic: case OPT_fPIE: diff --git a/gcc/memory-block.cc b/gcc/memory-block.cc new file mode 100644 index 00000000000..8470c7adf73 --- /dev/null +++ b/gcc/memory-block.cc @@ -0,0 +1,64 @@ +/* Shared pool of memory blocks for pool allocators. + Copyright (C) 2015 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +<http://www.gnu.org/licenses/>. */ + +#include "config.h" +#include "system.h" +#include "coretypes.h" +#include "memory-block.h" +#include "obstack.h" + +/* Global singleton-like instance. */ +memory_block_pool memory_block_pool::instance; + +memory_block_pool::memory_block_pool () : m_blocks (NULL) {} + +/* Return all blocks from free list to the OS. */ +void +memory_block_pool::clear_free_list () +{ + while (m_blocks) + { + block_list *next = m_blocks->m_next; + XDELETEVEC (m_blocks); + m_blocks = next; + } +} + +/* Allocate a chunk for obstack. Use the pool if requested chunk size matches + the size of blocks in the pool. */ +void * +mempool_obstack_chunk_alloc (size_t size) +{ + if (size == memory_block_pool::block_size) + return memory_block_pool::allocate (); + else + return XNEWVEC (char, size); +} + +/* Free previously allocated obstack chunk. */ +void +mempool_obstack_chunk_free (void *chunk) +{ + size_t size = (reinterpret_cast<_obstack_chunk *> (chunk)->limit + - reinterpret_cast<char *> (chunk)); + if (size == memory_block_pool::block_size) + memory_block_pool::release (chunk); + else + XDELETEVEC (chunk); +} diff --git a/gcc/memory-block.h b/gcc/memory-block.h new file mode 100644 index 00000000000..1a495eaea0e --- /dev/null +++ b/gcc/memory-block.h @@ -0,0 +1,75 @@ +/* Shared pool of memory blocks for pool allocators. + Copyright (C) 2015 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +<http://www.gnu.org/licenses/>. */ + + +#ifndef MEMORY_BLOCK_H +#define MEMORY_BLOCK_H + +/* Shared pool which allows other memory pools to reuse each others' allocated + memory blocks instead of calling free/malloc again. */ +class memory_block_pool +{ +public: + /* Blocks have fixed size. This is necessary for sharing. */ + static const size_t block_size = 64 * 1024; + + memory_block_pool (); + + static inline void *allocate () ATTRIBUTE_MALLOC; + static inline void release (void *); + void clear_free_list (); + +private: + /* memory_block_pool singleton instance, defined in memory-block.cc. */ + static memory_block_pool instance; + + struct block_list + { + block_list *m_next; + }; + + /* Free list. */ + block_list *m_blocks; +}; + +/* Allocate a single block. Reuse a previously returned block, if possible. */ +inline void * +memory_block_pool::allocate () +{ + if (instance.m_blocks == NULL) + return XNEWVEC (char, block_size); + + void *result = instance.m_blocks; + instance.m_blocks = instance.m_blocks->m_next; + return result; +} + +/* Return UNCAST_BLOCK to the pool. */ +inline void +memory_block_pool::release (void *uncast_block) +{ + block_list *block = new (uncast_block) block_list; + block->m_next = instance.m_blocks; + instance.m_blocks = block; +} + +extern void *mempool_obstack_chunk_alloc (size_t) ATTRIBUTE_MALLOC; +extern void mempool_obstack_chunk_free (void *); + +#endif /* MEMORY_BLOCK_H */ diff --git a/gcc/optabs.c b/gcc/optabs.c index 79c6f06b991..c2a9b1ce9ef 100644 --- a/gcc/optabs.c +++ b/gcc/optabs.c @@ -7521,9 +7521,10 @@ expand_atomic_compare_and_swap (rtx *ptarget_bool, rtx *ptarget_oval, if (libfunc != NULL) { rtx addr = convert_memory_address (ptr_mode, XEXP (mem, 0)); - target_oval = emit_library_call_value (libfunc, NULL_RTX, LCT_NORMAL, - mode, 3, addr, ptr_mode, - expected, mode, desired, mode); + rtx target = emit_library_call_value (libfunc, NULL_RTX, LCT_NORMAL, + mode, 3, addr, ptr_mode, + expected, mode, desired, mode); + emit_move_insn (target_oval, target); /* Compute the boolean return value only if requested. */ if (ptarget_bool) diff --git a/gcc/regcprop.c b/gcc/regcprop.c index 97433f04eaf..6f7d01e6af4 100644 --- a/gcc/regcprop.c +++ b/gcc/regcprop.c @@ -75,7 +75,7 @@ struct value_data }; static object_allocator<queued_debug_insn_change> queued_debug_insn_change_pool - ("debug insn changes pool", 256); + ("debug insn changes pool"); static bool skip_debug_insn_p; diff --git a/gcc/rtlanal.c b/gcc/rtlanal.c index ef98f4bfb5c..b1be4db0dc1 100644 --- a/gcc/rtlanal.c +++ b/gcc/rtlanal.c @@ -5805,9 +5805,9 @@ split_double (rtx value, rtx *first, rtx *second) if (BITS_PER_WORD < HOST_BITS_PER_LONG && BITS_PER_WORD == 32) { if (l[0] & ((long) 1 << 31)) - l[0] |= ((long) (-1) << 32); + l[0] |= ((unsigned long) (-1) << 32); if (l[1] & ((long) 1 << 31)) - l[1] |= ((long) (-1) << 32); + l[1] |= ((unsigned long) (-1) << 32); } #endif diff --git a/gcc/sched-deps.c b/gcc/sched-deps.c index 8a40eea4f99..9683055a857 100644 --- a/gcc/sched-deps.c +++ b/gcc/sched-deps.c @@ -4059,14 +4059,10 @@ sched_deps_init (bool global_p) if (global_p) { - dl_pool = new object_allocator<_deps_list> ("deps_list", - /* Allocate lists for one block at a time. */ - insns_in_block); - dn_pool = new object_allocator<_dep_node> ("dep_node", - /* Allocate nodes for one block at a time. - We assume that average insn has - 5 producers. */ - 5 * insns_in_block); + dl_pool = new object_allocator<_deps_list> ("deps_list"); + /* Allocate lists for one block at a time. */ + dn_pool = new object_allocator<_dep_node> ("dep_node"); + /* Allocate nodes for one block at a time. */ } } diff --git a/gcc/sched-int.h b/gcc/sched-int.h index 61825ce5689..800262c4a4a 100644 --- a/gcc/sched-int.h +++ b/gcc/sched-int.h @@ -240,7 +240,7 @@ struct _dep int cost:20; }; -#define UNKNOWN_DEP_COST (-1<<19) +#define UNKNOWN_DEP_COST ((int) ((unsigned int) -1 << 19)) typedef struct _dep dep_def; typedef dep_def *dep_t; diff --git a/gcc/sel-sched-ir.c b/gcc/sel-sched-ir.c index 998828593e0..8ea4dce8bc7 100644 --- a/gcc/sel-sched-ir.c +++ b/gcc/sel-sched-ir.c @@ -59,7 +59,7 @@ vec<sel_region_bb_info_def> sel_region_bb_info = vNULL; /* A pool for allocating all lists. */ -object_allocator<_list_node> sched_lists_pool ("sel-sched-lists", 500); +object_allocator<_list_node> sched_lists_pool ("sel-sched-lists"); /* This contains information about successors for compute_av_set. */ struct succs_info current_succs; diff --git a/gcc/shrink-wrap.c b/gcc/shrink-wrap.c index d10795a0b3a..138759432ee 100644 --- a/gcc/shrink-wrap.c +++ b/gcc/shrink-wrap.c @@ -91,8 +91,7 @@ requires_stack_frame_p (rtx_insn *insn, HARD_REG_SET prologue_used, if (!REG_P (dreg)) continue; - add_to_hard_reg_set (&hardregs, GET_MODE (dreg), - REGNO (dreg)); + add_to_hard_reg_set (&hardregs, GET_MODE (dreg), REGNO (dreg)); } if (hard_reg_set_intersect_p (hardregs, prologue_used)) return true; @@ -463,414 +462,469 @@ prepare_shrink_wrap (basic_block entry_block) } } -/* Create a copy of BB instructions and insert at BEFORE. Redirect - preds of BB to COPY_BB if they don't appear in NEED_PROLOGUE. */ -static void -dup_block_and_redirect (basic_block bb, basic_block copy_bb, rtx_insn *before, - bitmap_head *need_prologue) +/* Return whether we can duplicate basic block BB for shrink wrapping. We + cannot if the block cannot be duplicated at all, or if any of its incoming + edges are complex and come from a block that does not require a prologue + (we cannot redirect such edges), or if the block is too big to copy. + PRO is the basic block before which we would put the prologue, MAX_SIZE is + the maximum size block we allow to be copied. */ + +static bool +can_dup_for_shrink_wrapping (basic_block bb, basic_block pro, unsigned max_size) { - edge_iterator ei; - edge e; - rtx_insn *insn = BB_END (bb); + if (!can_duplicate_block_p (bb)) + return false; - /* We know BB has a single successor, so there is no need to copy a - simple jump at the end of BB. */ - if (simplejump_p (insn)) - insn = PREV_INSN (insn); + edge e; + edge_iterator ei; + FOR_EACH_EDGE (e, ei, bb->preds) + if (e->flags & EDGE_COMPLEX + && !dominated_by_p (CDI_DOMINATORS, e->src, pro)) + return false; - start_sequence (); - duplicate_insn_chain (BB_HEAD (bb), insn); - if (dump_file) - { - unsigned count = 0; - for (insn = get_insns (); insn; insn = NEXT_INSN (insn)) - if (active_insn_p (insn)) - ++count; - fprintf (dump_file, "Duplicating bb %d to bb %d, %u active insns.\n", - bb->index, copy_bb->index, count); - } - insn = get_insns (); - end_sequence (); - emit_insn_before (insn, before); + unsigned size = 0; - /* Redirect all the paths that need no prologue into copy_bb. */ - for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei));) - if (!bitmap_bit_p (need_prologue, e->src->index)) + rtx_insn *insn; + FOR_BB_INSNS (bb, insn) + if (NONDEBUG_INSN_P (insn)) { - int freq = EDGE_FREQUENCY (e); - copy_bb->count += e->count; - copy_bb->frequency += EDGE_FREQUENCY (e); - e->dest->count -= e->count; - if (e->dest->count < 0) - e->dest->count = 0; - e->dest->frequency -= freq; - if (e->dest->frequency < 0) - e->dest->frequency = 0; - redirect_edge_and_branch_force (e, copy_bb); - continue; + size += get_attr_min_length (insn); + if (size > max_size) + return false; } - else - ei_next (&ei); + + return true; } +/* If the source of edge E has more than one successor, the verifier for + branch probabilities gets confused by the fake edges we make where + simple_return statements will be inserted later (because those are not + marked as fallthrough edges). Fix this by creating an extra block just + for that fallthrough. */ + +static edge +fix_fake_fallthrough_edge (edge e) +{ + if (EDGE_COUNT (e->src->succs) <= 1) + return e; + + basic_block old_bb = e->src; + rtx_insn *end = BB_END (old_bb); + rtx_note *note = emit_note_after (NOTE_INSN_DELETED, end); + basic_block new_bb = create_basic_block (note, note, old_bb); + BB_COPY_PARTITION (new_bb, old_bb); + BB_END (old_bb) = end; + + redirect_edge_succ (e, new_bb); + e->flags |= EDGE_FALLTHRU; + e->flags &= ~EDGE_FAKE; + + return make_edge (new_bb, EXIT_BLOCK_PTR_FOR_FN (cfun), EDGE_FAKE); +} /* Try to perform a kind of shrink-wrapping, making sure the prologue/epilogue is emitted only around those parts of the - function that require it. */ + function that require it. + + There will be exactly one prologue, and it will be executed either + zero or one time, on any path. Depending on where the prologue is + placed, some of the basic blocks can be reached via both paths with + and without a prologue. Such blocks will be duplicated here, and the + edges changed to match. + + Paths that go to the exit without going through the prologue will use + a simple_return instead of the epilogue. We maximize the number of + those, making sure to only duplicate blocks that can be duplicated. + If the prologue can then still be placed in multiple locations, we + place it as early as possible. + + An example, where we duplicate blocks with control flow (legend: + _B_egin, _R_eturn and _S_imple_return; edges without arrowhead should + be taken to point down or to the right, to simplify the diagram; here, + block 3 needs a prologue, the rest does not): + + + B B + | | + 2 2 + |\ |\ + | 3 becomes | 3 + |/ | \ + 4 7 4 + |\ |\ |\ + | 5 | 8 | 5 + |/ |/ |/ + 6 9 6 + | | | + R S R + + + (bb 4 is duplicated to 7, and so on; the prologue is inserted on the + edge 2->3). + + Another example, where part of a loop is duplicated (again, bb 3 is + the only block that needs a prologue): + + + B 3<-- B ->3<-- + | | | | | | | + | v | becomes | | v | + 2---4--- 2---5-- 4--- + | | | + R S R + + + (bb 4 is duplicated to 5; the prologue is inserted on the edge 5->3). + + ENTRY_EDGE is the edge where the prologue will be placed, possibly + changed by this function. ORIG_ENTRY_EDGE is the edge where it + would be placed without shrink-wrapping. BB_WITH is a bitmap that, + if we do shrink-wrap, will on return contain the interesting blocks + that run with prologue. PROLOGUE_SEQ is the prologue we will insert. */ void try_shrink_wrapping (edge *entry_edge, edge orig_entry_edge, - bitmap_head *bb_flags, rtx_insn *prologue_seq) + bitmap_head *bb_with, rtx_insn *prologue_seq) { - edge e; - edge_iterator ei; - bool nonempty_prologue = false; - unsigned max_grow_size; - rtx_insn *seq; + /* If we cannot shrink-wrap, are told not to shrink-wrap, or it makes + no sense to shrink-wrap: then do not shrink-wrap! */ + + if (!SHRINK_WRAPPING_ENABLED) + return; + + if (crtl->profile && !targetm.profile_before_prologue ()) + return; - for (seq = prologue_seq; seq; seq = NEXT_INSN (seq)) - if (!NOTE_P (seq) || NOTE_KIND (seq) != NOTE_INSN_PROLOGUE_END) + if (crtl->calls_eh_return) + return; + + bool empty_prologue = true; + for (rtx_insn *insn = prologue_seq; insn; insn = NEXT_INSN (insn)) + if (!(NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_PROLOGUE_END)) { - nonempty_prologue = true; + empty_prologue = false; break; } + if (empty_prologue) + return; + + /* Move some code down to expose more shrink-wrapping opportunities. */ + + basic_block entry = (*entry_edge)->dest; + prepare_shrink_wrap (entry); + + if (dump_file) + fprintf (dump_file, "Attempting shrink-wrapping optimization.\n"); + + /* Compute the registers set and used in the prologue. */ + + HARD_REG_SET prologue_clobbered, prologue_used; + CLEAR_HARD_REG_SET (prologue_clobbered); + CLEAR_HARD_REG_SET (prologue_used); + for (rtx_insn *insn = prologue_seq; insn; insn = NEXT_INSN (insn)) + if (NONDEBUG_INSN_P (insn)) + { + HARD_REG_SET this_used; + CLEAR_HARD_REG_SET (this_used); + note_uses (&PATTERN (insn), record_hard_reg_uses, &this_used); + AND_COMPL_HARD_REG_SET (this_used, prologue_clobbered); + IOR_HARD_REG_SET (prologue_used, this_used); + note_stores (PATTERN (insn), record_hard_reg_sets, &prologue_clobbered); + } - if (SHRINK_WRAPPING_ENABLED - && (targetm.profile_before_prologue () || !crtl->profile) - && nonempty_prologue && !crtl->calls_eh_return) + /* Find out what registers are set up by the prologue; any use of these + cannot happen before the prologue. */ + + struct hard_reg_set_container set_up_by_prologue; + CLEAR_HARD_REG_SET (set_up_by_prologue.set); + add_to_hard_reg_set (&set_up_by_prologue.set, Pmode, STACK_POINTER_REGNUM); + add_to_hard_reg_set (&set_up_by_prologue.set, Pmode, ARG_POINTER_REGNUM); + if (frame_pointer_needed) + add_to_hard_reg_set (&set_up_by_prologue.set, Pmode, + HARD_FRAME_POINTER_REGNUM); + if (pic_offset_table_rtx + && (unsigned) PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM) + add_to_hard_reg_set (&set_up_by_prologue.set, Pmode, + PIC_OFFSET_TABLE_REGNUM); + if (crtl->drap_reg) + add_to_hard_reg_set (&set_up_by_prologue.set, + GET_MODE (crtl->drap_reg), + REGNO (crtl->drap_reg)); + if (targetm.set_up_by_prologue) + targetm.set_up_by_prologue (&set_up_by_prologue); + + /* We will insert the prologue before the basic block PRO. PRO should + dominate all basic blocks that need the prologue to be executed + before them. First, make PRO the "tightest wrap" possible. */ + + calculate_dominance_info (CDI_DOMINATORS); + + basic_block pro = 0; + + basic_block bb; + edge e; + edge_iterator ei; + FOR_EACH_BB_FN (bb, cfun) { - HARD_REG_SET prologue_clobbered, prologue_used, live_on_edge; - struct hard_reg_set_container set_up_by_prologue; - rtx_insn *p_insn; - vec<basic_block> vec; - basic_block bb; - bitmap_head bb_antic_flags; - bitmap_head bb_on_list; - bitmap_head bb_tail; + rtx_insn *insn; + FOR_BB_INSNS (bb, insn) + if (NONDEBUG_INSN_P (insn) + && requires_stack_frame_p (insn, prologue_used, + set_up_by_prologue.set)) + { + if (dump_file) + fprintf (dump_file, "Block %d needs the prologue.\n", bb->index); + pro = nearest_common_dominator (CDI_DOMINATORS, pro, bb); + break; + } + } + /* If nothing needs a prologue, just put it at the start. This really + shouldn't happen, but we cannot fix it here. */ + + if (pro == 0) + { if (dump_file) - fprintf (dump_file, "Attempting shrink-wrapping optimization.\n"); + fprintf(dump_file, "Nothing needs a prologue, but it isn't empty; " + "putting it at the start.\n"); + pro = entry; + } - /* Compute the registers set and used in the prologue. */ - CLEAR_HARD_REG_SET (prologue_clobbered); - CLEAR_HARD_REG_SET (prologue_used); - for (p_insn = prologue_seq; p_insn; p_insn = NEXT_INSN (p_insn)) - { - HARD_REG_SET this_used; - if (!NONDEBUG_INSN_P (p_insn)) - continue; - - CLEAR_HARD_REG_SET (this_used); - note_uses (&PATTERN (p_insn), record_hard_reg_uses, - &this_used); - AND_COMPL_HARD_REG_SET (this_used, prologue_clobbered); - IOR_HARD_REG_SET (prologue_used, this_used); - note_stores (PATTERN (p_insn), record_hard_reg_sets, - &prologue_clobbered); - } + if (dump_file) + fprintf (dump_file, "After wrapping required blocks, PRO is now %d\n", + pro->index); - prepare_shrink_wrap ((*entry_edge)->dest); - - bitmap_initialize (&bb_antic_flags, &bitmap_default_obstack); - bitmap_initialize (&bb_on_list, &bitmap_default_obstack); - bitmap_initialize (&bb_tail, &bitmap_default_obstack); - - /* Find the set of basic blocks that require a stack frame, - and blocks that are too big to be duplicated. */ - - vec.create (n_basic_blocks_for_fn (cfun)); - - CLEAR_HARD_REG_SET (set_up_by_prologue.set); - add_to_hard_reg_set (&set_up_by_prologue.set, Pmode, - STACK_POINTER_REGNUM); - add_to_hard_reg_set (&set_up_by_prologue.set, Pmode, ARG_POINTER_REGNUM); - if (frame_pointer_needed) - add_to_hard_reg_set (&set_up_by_prologue.set, Pmode, - HARD_FRAME_POINTER_REGNUM); - if (pic_offset_table_rtx - && (unsigned) PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM) - add_to_hard_reg_set (&set_up_by_prologue.set, Pmode, - PIC_OFFSET_TABLE_REGNUM); - if (crtl->drap_reg) - add_to_hard_reg_set (&set_up_by_prologue.set, - GET_MODE (crtl->drap_reg), - REGNO (crtl->drap_reg)); - if (targetm.set_up_by_prologue) - targetm.set_up_by_prologue (&set_up_by_prologue); - - /* We don't use a different max size depending on - optimize_bb_for_speed_p because increasing shrink-wrapping - opportunities by duplicating tail blocks can actually result - in an overall decrease in code size. */ - max_grow_size = get_uncond_jump_length (); - max_grow_size *= PARAM_VALUE (PARAM_MAX_GROW_COPY_BB_INSNS); - - FOR_EACH_BB_FN (bb, cfun) - { - rtx_insn *insn; - unsigned size = 0; + /* Now see if we can put the prologue at the start of PRO. Putting it + there might require duplicating a block that cannot be duplicated; + if so, try again with the immediate dominator of PRO, and so on. - FOR_BB_INSNS (bb, insn) - if (NONDEBUG_INSN_P (insn)) - { - if (requires_stack_frame_p (insn, prologue_used, - set_up_by_prologue.set)) - { - if (bb == (*entry_edge)->dest) - goto fail_shrinkwrap; - bitmap_set_bit (bb_flags, bb->index); - vec.quick_push (bb); - break; - } - else if (size <= max_grow_size) - { - size += get_attr_min_length (insn); - if (size > max_grow_size) - bitmap_set_bit (&bb_on_list, bb->index); - } - } - } + The blocks that need duplicating are those reachable from PRO but + not dominated by it. We keep in BB_WITH a bitmap of the blocks + reachable from PRO that we already found, and in VEC a stack of + those we still need to consider (to find successors). */ - /* Blocks that really need a prologue, or are too big for tails. */ - bitmap_ior_into (&bb_on_list, bb_flags); + bitmap_set_bit (bb_with, pro->index); - /* For every basic block that needs a prologue, mark all blocks - reachable from it, so as to ensure they are also seen as - requiring a prologue. */ - while (!vec.is_empty ()) - { - basic_block tmp_bb = vec.pop (); + vec<basic_block> vec; + vec.create (n_basic_blocks_for_fn (cfun)); + vec.quick_push (pro); - FOR_EACH_EDGE (e, ei, tmp_bb->succs) - if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun) - && bitmap_set_bit (bb_flags, e->dest->index)) - vec.quick_push (e->dest); - } + unsigned max_grow_size = get_uncond_jump_length (); + max_grow_size *= PARAM_VALUE (PARAM_MAX_GROW_COPY_BB_INSNS); - /* Find the set of basic blocks that need no prologue, have a - single successor, can be duplicated, meet a max size - requirement, and go to the exit via like blocks. */ - vec.quick_push (EXIT_BLOCK_PTR_FOR_FN (cfun)); - while (!vec.is_empty ()) - { - basic_block tmp_bb = vec.pop (); + while (!vec.is_empty () && pro != entry) + { + basic_block bb = vec.pop (); + if (!can_dup_for_shrink_wrapping (bb, pro, max_grow_size)) + while (!dominated_by_p (CDI_DOMINATORS, bb, pro)) + { + gcc_assert (pro != entry); - FOR_EACH_EDGE (e, ei, tmp_bb->preds) - if (single_succ_p (e->src) - && !bitmap_bit_p (&bb_on_list, e->src->index) - && can_duplicate_block_p (e->src)) - { - edge pe; - edge_iterator pei; - - /* If there is predecessor of e->src which doesn't - need prologue and the edge is complex, - we might not be able to redirect the branch - to a copy of e->src. */ - FOR_EACH_EDGE (pe, pei, e->src->preds) - if ((pe->flags & EDGE_COMPLEX) != 0 - && !bitmap_bit_p (bb_flags, pe->src->index)) - break; - if (pe == NULL && bitmap_set_bit (&bb_tail, e->src->index)) - vec.quick_push (e->src); - } - } + pro = get_immediate_dominator (CDI_DOMINATORS, pro); + + bitmap_set_bit (bb_with, pro->index); + vec.quick_push (pro); + } + + FOR_EACH_EDGE (e, ei, bb->succs) + if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun) + && bitmap_set_bit (bb_with, e->dest->index)) + vec.quick_push (e->dest); + } + + vec.release (); + + if (dump_file) + fprintf (dump_file, "Avoiding non-duplicatable blocks, PRO is now %d\n", + pro->index); + + /* If we can move PRO back without having to duplicate more blocks, do so. + We can move back to a block PRE if every path from PRE will eventually + need a prologue, that is, PRO is a post-dominator of PRE. */ + + if (pro != entry) + { + calculate_dominance_info (CDI_POST_DOMINATORS); - /* Now walk backwards from every block that is marked as needing - a prologue to compute the bb_antic_flags bitmap. Exclude - tail blocks; They can be duplicated to be used on paths not - needing a prologue. */ - bitmap_clear (&bb_on_list); - bitmap_and_compl (&bb_antic_flags, bb_flags, &bb_tail); - FOR_EACH_BB_FN (bb, cfun) + while (pro != entry) { - if (!bitmap_bit_p (&bb_antic_flags, bb->index)) - continue; - FOR_EACH_EDGE (e, ei, bb->preds) - if (!bitmap_bit_p (&bb_antic_flags, e->src->index) - && bitmap_set_bit (&bb_on_list, e->src->index)) - vec.quick_push (e->src); + basic_block pre = get_immediate_dominator (CDI_DOMINATORS, pro); + if (dominated_by_p (CDI_POST_DOMINATORS, pre, pro)) + pro = pre; + else + break; } - while (!vec.is_empty ()) - { - basic_block tmp_bb = vec.pop (); - bool all_set = true; - bitmap_clear_bit (&bb_on_list, tmp_bb->index); - FOR_EACH_EDGE (e, ei, tmp_bb->succs) - if (!bitmap_bit_p (&bb_antic_flags, e->dest->index)) - { - all_set = false; - break; - } + free_dominance_info (CDI_POST_DOMINATORS); + } - if (all_set) - { - bitmap_set_bit (&bb_antic_flags, tmp_bb->index); - FOR_EACH_EDGE (e, ei, tmp_bb->preds) - if (!bitmap_bit_p (&bb_antic_flags, e->src->index) - && bitmap_set_bit (&bb_on_list, e->src->index)) - vec.quick_push (e->src); - } - } - /* Find exactly one edge that leads to a block in ANTIC from - a block that isn't. */ - if (!bitmap_bit_p (&bb_antic_flags, (*entry_edge)->dest->index)) - FOR_EACH_BB_FN (bb, cfun) + if (dump_file) + fprintf (dump_file, "Bumping back to anticipatable blocks, PRO is now %d\n", + pro->index); + + /* If there is more than one predecessor of PRO not dominated by PRO, fail. + Also find that single edge that leads to PRO. */ + + bool multi = false; + edge the_edge = 0; + FOR_EACH_EDGE (e, ei, pro->preds) + if (!dominated_by_p (CDI_DOMINATORS, e->src, pro)) + { + if (the_edge) + multi = true; + else + the_edge = e; + } + + if (multi) + { + the_edge = orig_entry_edge; + + if (dump_file) + fprintf (dump_file, "More than one candidate edge.\n"); + } + + if (dump_file) + fprintf (dump_file, "Found candidate edge for shrink-wrapping, %d->%d.\n", + the_edge->src->index, the_edge->dest->index); + + *entry_edge = the_edge; + + /* Compute what fraction of the frequency and count of the blocks that run + both with and without prologue are for running with prologue. This gives + the correct answer for reducible flow graphs; for irreducible flow graphs + our profile is messed up beyond repair anyway. */ + + int num = (*entry_edge)->probability; + int den = REG_BR_PROB_BASE; + + if (*entry_edge == orig_entry_edge) + goto out; + + /* Test whether the prologue is known to clobber any register + (other than FP or SP) which are live on the edge. */ + + HARD_REG_SET live_on_edge; + CLEAR_HARD_REG_BIT (prologue_clobbered, STACK_POINTER_REGNUM); + if (frame_pointer_needed) + CLEAR_HARD_REG_BIT (prologue_clobbered, HARD_FRAME_POINTER_REGNUM); + REG_SET_TO_HARD_REG_SET (live_on_edge, + df_get_live_in ((*entry_edge)->dest)); + if (hard_reg_set_intersect_p (live_on_edge, prologue_clobbered)) + { + *entry_edge = orig_entry_edge; + if (dump_file) + fprintf (dump_file, + "Shrink-wrapping aborted due to clobber.\n"); + goto out; + } + + /* All is okay, so do it. */ + + crtl->shrink_wrapped = true; + if (dump_file) + fprintf (dump_file, "Performing shrink-wrapping.\n"); + + /* Copy the blocks that can run both with and without prologue. The + originals run with prologue, the copies without. Store a pointer to + the copy in the ->aux field of the original. */ + + FOR_EACH_BB_FN (bb, cfun) + if (bitmap_bit_p (bb_with, bb->index) + && !dominated_by_p (CDI_DOMINATORS, bb, pro)) + { + basic_block dup = duplicate_block (bb, 0, 0); + + bb->aux = dup; + + if (JUMP_P (BB_END (dup)) && !any_condjump_p (BB_END (dup))) + emit_barrier_after_bb (dup); + + if (EDGE_COUNT (dup->succs) == 0) + emit_barrier_after_bb (dup); + + if (dump_file) + fprintf (dump_file, "Duplicated %d to %d\n", bb->index, dup->index); + + bb->frequency = RDIV (num * bb->frequency, den); + dup->frequency -= bb->frequency; + bb->count = RDIV (num * bb->count, den); + dup->count -= bb->count; + } + + /* Change ENTRY_EDGE, if its src is duplicated. Do this first, before + the redirects have had a chance to create new blocks on the edge we + want to use for the prologue, which makes us not find it. */ + + gcc_assert (!dominated_by_p (CDI_DOMINATORS, (*entry_edge)->src, pro)); + + if (bitmap_bit_p (bb_with, (*entry_edge)->src->index)) + { + basic_block src = (basic_block) (*entry_edge)->src->aux; + FOR_EACH_EDGE (e, ei, src->succs) + if (e->dest == pro) + *entry_edge = e; + } + + /* Now change the edges to point to the copies, where appropriate. */ + + FOR_EACH_BB_FN (bb, cfun) + if (!dominated_by_p (CDI_DOMINATORS, bb, pro)) + { + basic_block src = bb; + if (bitmap_bit_p (bb_with, bb->index)) + src = (basic_block) bb->aux; + + FOR_EACH_EDGE (e, ei, src->succs) { - if (!bitmap_bit_p (&bb_antic_flags, bb->index)) + if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)) continue; - FOR_EACH_EDGE (e, ei, bb->preds) - if (!bitmap_bit_p (&bb_antic_flags, e->src->index)) - { - if (*entry_edge != orig_entry_edge) - { - *entry_edge = orig_entry_edge; - if (dump_file) - fprintf (dump_file, "More than one candidate edge.\n"); - goto fail_shrinkwrap; - } - if (dump_file) - fprintf (dump_file, "Found candidate edge for " - "shrink-wrapping, %d->%d.\n", e->src->index, - e->dest->index); - *entry_edge = e; - } - } - if (*entry_edge != orig_entry_edge) - { - /* Test whether the prologue is known to clobber any register - (other than FP or SP) which are live on the edge. */ - CLEAR_HARD_REG_BIT (prologue_clobbered, STACK_POINTER_REGNUM); - if (frame_pointer_needed) - CLEAR_HARD_REG_BIT (prologue_clobbered, HARD_FRAME_POINTER_REGNUM); - REG_SET_TO_HARD_REG_SET (live_on_edge, - df_get_live_in ((*entry_edge)->dest)); - if (hard_reg_set_intersect_p (live_on_edge, prologue_clobbered)) - { - *entry_edge = orig_entry_edge; - if (dump_file) - fprintf (dump_file, - "Shrink-wrapping aborted due to clobber.\n"); - } - } - if (*entry_edge != orig_entry_edge) - { - crtl->shrink_wrapped = true; - if (dump_file) - fprintf (dump_file, "Performing shrink-wrapping.\n"); - - /* Find tail blocks reachable from both blocks needing a - prologue and blocks not needing a prologue. */ - if (!bitmap_empty_p (&bb_tail)) - FOR_EACH_BB_FN (bb, cfun) + if (bitmap_bit_p (bb_with, e->dest->index) + && !dominated_by_p (CDI_DOMINATORS, e->dest, pro)) { - bool some_pro, some_no_pro; - if (!bitmap_bit_p (&bb_tail, bb->index)) - continue; - some_pro = some_no_pro = false; - FOR_EACH_EDGE (e, ei, bb->preds) - { - if (bitmap_bit_p (bb_flags, e->src->index)) - some_pro = true; - else - some_no_pro = true; - } - if (some_pro && some_no_pro) - vec.quick_push (bb); - else - bitmap_clear_bit (&bb_tail, bb->index); + if (dump_file) + fprintf (dump_file, "Redirecting edge %d->%d to %d\n", + e->src->index, e->dest->index, + ((basic_block) e->dest->aux)->index); + redirect_edge_and_branch_force (e, (basic_block) e->dest->aux); } - /* Find the head of each tail. */ - while (!vec.is_empty ()) - { - basic_block tbb = vec.pop (); + else if (e->flags & EDGE_FALLTHRU + && bitmap_bit_p (bb_with, bb->index)) + force_nonfallthru (e); + } + } - if (!bitmap_bit_p (&bb_tail, tbb->index)) - continue; + /* Also redirect the function entry edge if necessary. */ - while (single_succ_p (tbb)) - { - tbb = single_succ (tbb); - bitmap_clear_bit (&bb_tail, tbb->index); - } - } - /* Now duplicate the tails. */ - if (!bitmap_empty_p (&bb_tail)) - FOR_EACH_BB_REVERSE_FN (bb, cfun) - { - basic_block copy_bb, tbb; - int eflags; - - if (!bitmap_clear_bit (&bb_tail, bb->index)) - continue; - - /* Create a copy of BB, instructions and all, for - use on paths that don't need a prologue. - Ideal placement of the copy is on a fall-thru edge - or after a block that would jump to the copy. */ - FOR_EACH_EDGE (e, ei, bb->preds) - if (!bitmap_bit_p (bb_flags, e->src->index) - && single_succ_p (e->src)) - break; - if (e) - { - /* Make sure we insert after any barriers. */ - rtx_insn *end = get_last_bb_insn (e->src); - copy_bb = create_basic_block (NEXT_INSN (end), - NULL_RTX, e->src); - BB_COPY_PARTITION (copy_bb, e->src); - } - else - { - /* Otherwise put the copy at the end of the function. */ - copy_bb = create_basic_block (NULL_RTX, NULL_RTX, - EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb); - BB_COPY_PARTITION (copy_bb, bb); - } - - rtx_note *insert_point = emit_note_after (NOTE_INSN_DELETED, - BB_END (copy_bb)); - emit_barrier_after (BB_END (copy_bb)); - - tbb = bb; - while (1) - { - dup_block_and_redirect (tbb, copy_bb, insert_point, - bb_flags); - tbb = single_succ (tbb); - if (tbb == EXIT_BLOCK_PTR_FOR_FN (cfun)) - break; - e = split_block (copy_bb, PREV_INSN (insert_point)); - copy_bb = e->dest; - } - - /* Quiet verify_flow_info by (ab)using EDGE_FAKE. - We have yet to add a simple_return to the tails, - as we'd like to first convert_jumps_to_returns in - case the block is no longer used after that. */ - eflags = EDGE_FAKE; - if (CALL_P (PREV_INSN (insert_point)) - && SIBLING_CALL_P (PREV_INSN (insert_point))) - eflags = EDGE_SIBCALL | EDGE_ABNORMAL; - make_single_succ_edge (copy_bb, EXIT_BLOCK_PTR_FOR_FN (cfun), - eflags); - - /* verify_flow_info doesn't like a note after a - sibling call. */ - delete_insn (insert_point); - if (bitmap_empty_p (&bb_tail)) - break; - } - } + FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs) + if (bitmap_bit_p (bb_with, e->dest->index) + && !dominated_by_p (CDI_DOMINATORS, e->dest, pro)) + { + basic_block split_bb = split_edge (e); + e = single_succ_edge (split_bb); + redirect_edge_and_branch_force (e, (basic_block) e->dest->aux); + } - fail_shrinkwrap: - bitmap_clear (&bb_tail); - bitmap_clear (&bb_antic_flags); - bitmap_clear (&bb_on_list); - vec.release (); - } + /* Change all the exits that should get a simple_return to FAKE. + They will be converted later. */ + + FOR_EACH_BB_FN (bb, cfun) + if (!bitmap_bit_p (bb_with, bb->index)) + FOR_EACH_EDGE (e, ei, bb->succs) + if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)) + { + e = fix_fake_fallthrough_edge (e); + + e->flags &= ~EDGE_FALLTHRU; + if (!(e->flags & EDGE_SIBCALL)) + e->flags |= EDGE_FAKE; + + emit_barrier_after_bb (e->src); + } + +out: + free_dominance_info (CDI_DOMINATORS); } /* If we're allowed to generate a simple return instruction, then by @@ -1018,6 +1072,8 @@ convert_to_simple_return (edge entry_edge, edge orig_entry_edge, && (e->flags & EDGE_FAKE) != 0 && !bitmap_bit_p (&bb_flags, e->src->index)) { + e = fix_fake_fallthrough_edge (e); + emit_return_into_block (true, e->src); e->flags &= ~(EDGE_FALLTHRU | EDGE_FAKE); } diff --git a/gcc/stmt.c b/gcc/stmt.c index 9d33cbe03e4..134d78e751b 100644 --- a/gcc/stmt.c +++ b/gcc/stmt.c @@ -1138,7 +1138,7 @@ expand_case (gswitch *stmt) struct case_node *case_list = 0; /* A pool for case nodes. */ - object_allocator<case_node> case_node_pool ("struct case_node pool", 100); + object_allocator<case_node> case_node_pool ("struct case_node pool"); /* An ERROR_MARK occurs for various reasons including invalid data type. ??? Can this still happen, with GIMPLE and all? */ @@ -1314,8 +1314,7 @@ expand_sjlj_dispatch_table (rtx dispatch_index, { /* Similar to expand_case, but much simpler. */ struct case_node *case_list = 0; - object_allocator<case_node> case_node_pool ("struct sjlj_case pool", - ncases); + object_allocator<case_node> case_node_pool ("struct sjlj_case pool"); tree index_expr = make_tree (index_type, dispatch_index); tree minval = build_int_cst (index_type, 0); tree maxval = CASE_LOW (dispatch_table.last ()); diff --git a/gcc/system.h b/gcc/system.h index 1cc5d408df0..3189bda8e5f 100644 --- a/gcc/system.h +++ b/gcc/system.h @@ -956,7 +956,7 @@ extern void fancy_abort (const char *, int, const char *) ATTRIBUTE_NORETURN; EXTRA_ADDRESS_CONSTRAINT CONST_DOUBLE_OK_FOR_CONSTRAINT_P \ CALLER_SAVE_PROFITABLE LARGEST_EXPONENT_IS_NORMAL \ ROUND_TOWARDS_ZERO SF_SIZE DF_SIZE XF_SIZE TF_SIZE LIBGCC2_TF_CEXT \ - LIBGCC2_LONG_DOUBLE_TYPE_SIZE + LIBGCC2_LONG_DOUBLE_TYPE_SIZE STRUCT_VALUE /* Hooks that are no longer used. */ #pragma GCC poison LANG_HOOKS_FUNCTION_MARK LANG_HOOKS_FUNCTION_FREE \ diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog index f75a579bc04..945db718bd0 100644 --- a/gcc/testsuite/ChangeLog +++ b/gcc/testsuite/ChangeLog @@ -1,3 +1,101 @@ +2015-09-16 Eric Botcazou <ebotcazou@adacore.com> + + * gnat.dg/opt49.adb: New test. + +2015-09-16 Richard Biener <rguenther@suse.de> + + PR middle-end/67442 + * gcc.dg/torture/pr67442.c: New testcase. + +2015-09-15 Eric Botcazou <ebotcazou@adacore.com> + + * gnat.dg/array24.adb: New test. + * gnat.dg/blocklocs.adb: Delete. + +2015-09-15 Marek Polacek <polacek@redhat.com> + + PR c/67580 + * gcc.dg/pr67580.c: New test. + +2015-09-15 Richard Biener <rguenther@suse.de> + + PR tree-optimization/67470 + * gcc.dg/torture/pr67470.c: New testcase. + +2015-09-15 Alan Lawrence <alan.lawrence@arm.com> + + * gcc.target/aarch64/vect_int32x2x4_1.c: New. + +2015-09-15 Richard Biener <rguenther@suse.de> + + PR middle-end/67563 + * gcc.dg/pr67563.c: New testcase. + +2015-09-14 Manuel López-Ibáñez <manu@gcc.gnu.org> + + PR fortran/67460 + * gfortran.dg/pr67460.f90: New test. + +2015-09-14 Uros Bizjak <ubizjak@gmail.com> + + * gcc.dg/gomp/dump-new-function-3.c (dg-final): Also scan for $loopfn. + * gcc.dg/gomp/notify-new-function-3.c (dg-final): Ditto. + +2015-09-14 Paolo Carlini <paolo.carlini@oracle.com> + + PR c++/51911 + * g++.dg/cpp0x/new-auto1.C: New. + +2015-09-11 Mark Wielaard <mjw@redhat.com> + + PR c/28901 + * g++.dg/warn/unused-variable-1.C: New test. + * g++.dg/warn/unused-variable-2.C: Likewise. + * gcc.dg/unused-4.c: Adjust warning for static const. + * gcc.dg/unused-variable-1.c: New test. + * gcc.dg/unused-variable-2.c: Likewise. + +2015-09-13 David Edelsohn <dje.gcc@gmail.com> + + * gfortran.dg/private_type_3.f90: Require visibility. + * gfortran.dg/module_variable_2.f90: Same. + * gfortran.dg/nested_forall_1.f: Same. + * gfortran.dg/elemental_dependency_4.f90: Same. + * gfortran.dg/bind_c_usage_25.f90: Same. + * gfortran.dg/access_spec_1.f90: Same. + * gfortran.dg/public_private_module_2.f90: Same. + * gfortran.dg/host_assoc_variable_1.f90: Same. + * gfortran.dg/public_private_module_6.f90: Same. + * gfortran.dg/module_variable_1.f90: Same. + * gfortran.dg/pr37286.f90: Same. + * gfortran.dg/internal_pack_7.f90: Same. + * gfortran.dg/submodule_10.f08: Same. + * gfortran.dg/allocatable_function_8.f90: Same. + * gfortran.dg/merge_init_expr_2.f90: Same. + * gfortran.dg/class_37.f03: Same. + * gfortran.dg/vect/fast-math-vect-8.f90: Same. + * gfortran.dg/typebound_call_20.f03: Same. + * gfortran.dg/proc_ptr_result_3.f90: Same. + * gfortran.dg/pr32535.f90: Same. + * gfortran.dg/typebound_proc_19.f90: Same. + * gfortran.dg/initialization_10.f90: Same. + * gfortran.dg/bind_c_usage_8.f03: Same. + * gfortran.dg/pr61335.f90: Same. + * gfortran.dg/elemental_dependency_5.f90: Same. + * gfortran.dg/proc_ptr_3.f90: Same. + * gfortran.dg/debug/pr46756.f: XFAIL on AIX. + +2015-09-13 Eric Botcazou <ebotcazou@adacore.com> + + * gcc.dg/torture/type-generic-1.c: Pass -DUNSAFE for Visium. + * gcc.dg/Wno-frame-address.c: Skip for Visium. + * gcc.dg/loop-8.c: Likewise. + +2015-09-12 Eric Botcazou <ebotcazou@adacore.com> + + PR ada/66965 + * gnat.dg/specs/addr1.ads: Remove. + 2015-09-11 Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE> * gcc.dg/pie-link.c: Add -pie to dg-options. @@ -115,7 +213,7 @@ * gcc.dg/gomp/pr67495.c: New test. 2015-09-09 Aditya Kumar <hiraditya@msn.com> - Sebastian Pop <s.pop@samsung.com> + Sebastian Pop <s.pop@samsung.com> PR tree-optimization/53852 * gcc.dg/graphite/uns-interchange-12.c: Adjust pattern to pass with @@ -125,7 +223,7 @@ * gcc.dg/graphite/uns-interchange-mvt.c: Same. 2015-09-08 Aditya Kumar <hiraditya@msn.com> - Sebastian Pop <s.pop@samsung.com> + Sebastian Pop <s.pop@samsung.com> * gcc.dg/graphite/block-0.c: Modifed test case to match current output. * gcc.dg/graphite/block-1.c: Same. @@ -380,7 +478,8 @@ * lib/ubsan-dg.exp: Likewise. 2015-09-01 Kenneth Zadeck <zadeck@naturalbridge.com> - * gcc.c-torture/execute/ieee/20000320-1.c Fixed misplaced test case. + + * gcc.c-torture/execute/ieee/20000320-1.c Fixed misplaced test case. 2015-09-01 Matthew Fortune <matthew.fortune@imgtec.com> Andrew Bennett <andrew.bennett@imgtec.com> @@ -468,7 +567,7 @@ * gcc.target/powerpc/swaps-p8-19.c: New test. -2015-08-29 Jerry DeLisle <jvdelisle@gcc.gnu.org> +2015-08-29 Jerry DeLisle <jvdelisle@gcc.gnu.org> PR fortran/67367 * gfortran.dg/read_dir.f90: New test. May fail on some platforms. @@ -1038,8 +1137,8 @@ PR tree-optimization/67221 * gcc.dg/torture/pr67221.c: New testcase. -2015-08-17 Mike Stump <mikestump@comcast.net> - Kyrylo Tkachov <kyrylo.tkachov@arm.com> +2015-08-17 Mike Stump <mikestump@comcast.net> + Kyrylo Tkachov <kyrylo.tkachov@arm.com> * gcc.target/arm/memcpy-aligned-1.c: New test. diff --git a/gcc/testsuite/g++.dg/abi/mangle-regparm1a.C b/gcc/testsuite/g++.dg/abi/mangle-regparm1a.C new file mode 100644 index 00000000000..bfa6c9b0039 --- /dev/null +++ b/gcc/testsuite/g++.dg/abi/mangle-regparm1a.C @@ -0,0 +1,21 @@ +// { dg-do run { target { { i?86-*-* x86_64-*-* } && ia32 } } } +// { dg-options "-fabi-version=8 -Wabi -save-temps" } +// { dg-final { scan-assembler "_Z18IndirectExternCallIPFviiEiEvT_T0_S3_" } } + +template <typename F, typename T> +void IndirectExternCall(F f, T t1, T t2) { // { dg-warning "mangled name" } + typedef F (*WrapF)(F); + f (t1, t2); +} + +__attribute__((regparm(3), stdcall)) +void regparm_func (int i, int j) +{ + if (i != 24 || j != 42) + __builtin_abort(); +} + +int main() +{ + IndirectExternCall (regparm_func, 24, 42); +} diff --git a/gcc/testsuite/g++.dg/cpp0x/new-auto1.C b/gcc/testsuite/g++.dg/cpp0x/new-auto1.C new file mode 100644 index 00000000000..be09f94dd69 --- /dev/null +++ b/gcc/testsuite/g++.dg/cpp0x/new-auto1.C @@ -0,0 +1,10 @@ +// PR c++/51911 +// { dg-do compile { target c++11 } } + +#include <initializer_list> + +auto foo1 = new auto { 3, 4, 5 }; // { dg-error "22:initialization of new-expression for type 'auto'" } +auto bar1 = new auto { 2 }; + +auto foo2 = new auto ( 3, 4, 5 ); // { dg-error "22:initialization of new-expression for type 'auto'" } +auto bar2 = new auto ( 2 ); diff --git a/gcc/testsuite/g++.dg/warn/unused-variable-1.C b/gcc/testsuite/g++.dg/warn/unused-variable-1.C new file mode 100644 index 00000000000..cf531c01f18 --- /dev/null +++ b/gcc/testsuite/g++.dg/warn/unused-variable-1.C @@ -0,0 +1,7 @@ +/* { dg-do compile } */ +/* { dg-options "-Wunused-variable" } */ + +static int a = 0; /* { dg-warning "defined but not used" } */ +static const int b = 0; /* Unlike C, this doesn't cause a warning in C++. */ +static int c __attribute__ ((unused)) = 0; +static const char rcsid[] = "version-string"; /* Likewise. */ diff --git a/gcc/testsuite/g++.dg/warn/unused-variable-2.C b/gcc/testsuite/g++.dg/warn/unused-variable-2.C new file mode 100644 index 00000000000..b608fbce038 --- /dev/null +++ b/gcc/testsuite/g++.dg/warn/unused-variable-2.C @@ -0,0 +1,7 @@ +/* { dg-do compile } */ +/* { dg-options "-Wunused-variable -Wunused-const-variable" } */ + +static int a = 0; /* { dg-warning "defined but not used" } */ +static const int b = 0; /* { dg-warning "defined but not used" } */ +static int c __attribute__ ((unused)) = 0; +static const char rcsid[] __attribute__ ((unused)) = "version-string"; diff --git a/gcc/testsuite/gcc.dg/Wno-frame-address.c b/gcc/testsuite/gcc.dg/Wno-frame-address.c index cef924ea0a3..d464ad6f1e1 100644 --- a/gcc/testsuite/gcc.dg/Wno-frame-address.c +++ b/gcc/testsuite/gcc.dg/Wno-frame-address.c @@ -1,5 +1,5 @@ /* { dg-do compile } */ -/* { dg-skip-if "Cannot access arbitrary stack frames." { arm*-*-* } } */ +/* { dg-skip-if "Cannot access arbitrary stack frames" { arm*-*-* visium-*-* } } */ /* { dg-options "-Werror" } */ /* Verify that -Wframe-address is not enabled by default by enabling diff --git a/gcc/testsuite/gcc.dg/gomp/dump-new-function-3.c b/gcc/testsuite/gcc.dg/gomp/dump-new-function-3.c index 1854179eded..87707546791 100644 --- a/gcc/testsuite/gcc.dg/gomp/dump-new-function-3.c +++ b/gcc/testsuite/gcc.dg/gomp/dump-new-function-3.c @@ -10,4 +10,4 @@ foo (int *__restrict a, int *__restrict b, int *__restrict c) } /* Check that new function does not end up in gimple dump. */ -/* { dg-final { scan-tree-dump-not "foo\\._loopfn\\.0" "gimple" } } */ +/* { dg-final { scan-tree-dump-not "foo\\.\[\\\$_\]loopfn\\.0" "gimple" } } */ diff --git a/gcc/testsuite/gcc.dg/gomp/notify-new-function-3.c b/gcc/testsuite/gcc.dg/gomp/notify-new-function-3.c index f173b8e2367..a8f24b15dc1 100644 --- a/gcc/testsuite/gcc.dg/gomp/notify-new-function-3.c +++ b/gcc/testsuite/gcc.dg/gomp/notify-new-function-3.c @@ -11,4 +11,4 @@ foo (int *__restrict a, int *__restrict b, int *__restrict c) /* Check for new function notification in ompexpssa dump. */ -/* { dg-final { scan-tree-dump-times "Added new ssa gimple function foo\\._loopfn\\.0 to callgraph" 1 "ompexpssa" } } */ +/* { dg-final { scan-tree-dump-times "Added new ssa gimple function foo\\.\[\\\$_\]loopfn\\.0 to callgraph" 1 "ompexpssa" } } */ diff --git a/gcc/testsuite/gcc.dg/loop-8.c b/gcc/testsuite/gcc.dg/loop-8.c index bedfa5d1ac4..529f5c8f3dd 100644 --- a/gcc/testsuite/gcc.dg/loop-8.c +++ b/gcc/testsuite/gcc.dg/loop-8.c @@ -1,5 +1,6 @@ /* { dg-do compile } */ /* { dg-options "-O1 -fdump-rtl-loop2_invariant" } */ +/* { dg-skip-if "unexpected IV" { "visium-*-*" } { "*" } { "" } } */ void f (int *a, int *b) diff --git a/gcc/testsuite/gcc.dg/pr67563.c b/gcc/testsuite/gcc.dg/pr67563.c new file mode 100644 index 00000000000..34a78a23a93 --- /dev/null +++ b/gcc/testsuite/gcc.dg/pr67563.c @@ -0,0 +1,11 @@ +/* { dg-do compile } */ +/* { dg-options "-O2 -fexceptions" } */ + +static void +emit_package (int p1) +{ + int a; + int b[0]; + a = __fprintf_chk (0, 0, ""); +} +void emit_lua () { emit_package (0); } diff --git a/gcc/testsuite/gcc.dg/pr67580.c b/gcc/testsuite/gcc.dg/pr67580.c new file mode 100644 index 00000000000..90e4b1b113f --- /dev/null +++ b/gcc/testsuite/gcc.dg/pr67580.c @@ -0,0 +1,31 @@ +/* PR c/67580 */ +/* { dg-do compile } */ + +struct S { int s; }; +union U { int s; }; +enum E { A }; + +void +f (void) +{ + S s; /* { dg-error "unknown type name" } */ +/* { dg-message "use .struct. keyword to refer to the type" "" { target *-*-* } 11 } */ + U u; /* { dg-error "unknown type name" } */ +/* { dg-message "use .union. keyword to refer to the type" "" { target *-*-* } 13 } */ + E e; /* { dg-error "unknown type name" } */ +/* { dg-message "use .enum. keyword to refer to the type" "" { target *-*-* } 15 } */ +} + +void +g (void) +{ + struct T { int i; }; + union V { int i; }; + enum F { J }; + T t; /* { dg-error "unknown type name" } */ +/* { dg-message "use .struct. keyword to refer to the type" "" { target *-*-* } 25 } */ + V v; /* { dg-error "unknown type name" } */ +/* { dg-message "use .union. keyword to refer to the type" "" { target *-*-* } 27 } */ + F f; /* { dg-error "unknown type name" } */ +/* { dg-message "use .enum. keyword to refer to the type" "" { target *-*-* } 29 } */ +} diff --git a/gcc/testsuite/gcc.dg/torture/pr67442.c b/gcc/testsuite/gcc.dg/torture/pr67442.c new file mode 100644 index 00000000000..bc214d62d4d --- /dev/null +++ b/gcc/testsuite/gcc.dg/torture/pr67442.c @@ -0,0 +1,12 @@ +/* { dg-do run } */ + +short foo[100]; + +int main() +{ + short* bar = &foo[50]; + short i = 1; + short j = 1; + short value = bar[8 - i * 2 * j]; + return value; +} diff --git a/gcc/testsuite/gcc.dg/torture/pr67470.c b/gcc/testsuite/gcc.dg/torture/pr67470.c new file mode 100644 index 00000000000..29a23c2ee9b --- /dev/null +++ b/gcc/testsuite/gcc.dg/torture/pr67470.c @@ -0,0 +1,30 @@ +/* { dg-do run } */ + +int a, b, *c, d, e; + +void abort (void); + +int +main () +{ + int f, *g, **h = &g; + for (; b;) + { + c = &a; + for (e = 0; e < 1; e++) + *h = 0; + for (; d; d++) + if (f) + *c = 0; + else + { + *c = e = 0; + *h = &a; + } + + if (a && !g) + abort (); + + } + return 0; +} diff --git a/gcc/testsuite/gcc.dg/torture/type-generic-1.c b/gcc/testsuite/gcc.dg/torture/type-generic-1.c index f6949cff136..38978185827 100644 --- a/gcc/testsuite/gcc.dg/torture/type-generic-1.c +++ b/gcc/testsuite/gcc.dg/torture/type-generic-1.c @@ -3,7 +3,7 @@ /* { dg-do run } */ /* { dg-skip-if "No Inf/NaN support" { spu-*-* } } */ -/* { dg-options "-DUNSAFE" { target tic6x*-*-* } } */ +/* { dg-options "-DUNSAFE" { target tic6x*-*-* visium-*-* } } */ /* { dg-add-options ieee } */ #include "../tg-tests.h" diff --git a/gcc/testsuite/gcc.dg/unused-4.c b/gcc/testsuite/gcc.dg/unused-4.c index 99e845f45a3..53236008007 100644 --- a/gcc/testsuite/gcc.dg/unused-4.c +++ b/gcc/testsuite/gcc.dg/unused-4.c @@ -1,6 +1,6 @@ /* { dg-do compile } */ /* { dg-options "-Wunused -O3" } */ -static const int i = 0; +static const int i = 0; /* { dg-warning "defined but not used" } */ static void f() { } /* { dg-warning "defined but not used" } */ static inline void g() { } diff --git a/gcc/testsuite/gcc.dg/unused-variable-1.c b/gcc/testsuite/gcc.dg/unused-variable-1.c new file mode 100644 index 00000000000..cb86c3bc8d2 --- /dev/null +++ b/gcc/testsuite/gcc.dg/unused-variable-1.c @@ -0,0 +1,7 @@ +/* { dg-do compile } */ +/* { dg-options "-Wunused-variable" } */ + +static int a = 0; /* { dg-warning "defined but not used" } */ +static const int b = 0; /* { dg-warning "defined but not used" } */ +static int c __attribute__ ((unused)) = 0; +static const char rcsid[] __attribute__ ((unused)) = "version-string"; diff --git a/gcc/testsuite/gcc.dg/unused-variable-2.c b/gcc/testsuite/gcc.dg/unused-variable-2.c new file mode 100644 index 00000000000..0496466a45f --- /dev/null +++ b/gcc/testsuite/gcc.dg/unused-variable-2.c @@ -0,0 +1,7 @@ +/* { dg-do compile } */ +/* { dg-options "-Wunused-variable -Wno-unused-const-variable" } */ + +static int a = 0; /* { dg-warning "defined but not used" } */ +static const int b = 0; +static int c __attribute__ ((unused)) = 0; +static const char rcsid[] = "version-string"; diff --git a/gcc/testsuite/gcc.target/aarch64/vect_int32x2x4_1.c b/gcc/testsuite/gcc.target/aarch64/vect_int32x2x4_1.c new file mode 100644 index 00000000000..734cfd61bda --- /dev/null +++ b/gcc/testsuite/gcc.target/aarch64/vect_int32x2x4_1.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-options "-O3 -fdump-rtl-expand" } */ + +#include <arm_neon.h> + +uint32x2x4_t +test_1 (uint32x2x4_t a, uint32x2x4_t b) +{ + uint32x2x4_t result; + + for (unsigned index = 0; index < 4; ++index) + result.val[index] = a.val[index] + b.val[index]; + + return result; +} + +/* Should not use the stack in expand. */ +/* { dg-final { scan-rtl-dump-not "virtual-stack-vars" "expand" } } */ +/* Should not have to modify the stack pointer. */ +/* { dg-final { scan-assembler-not "\t(add|sub).*sp" } } */ +/* Should not have to store or load anything. */ +/* { dg-final { scan-assembler-not "\t(ld|st)\[rp\]" } } */ diff --git a/gcc/testsuite/gfortran.dg/access_spec_1.f90 b/gcc/testsuite/gfortran.dg/access_spec_1.f90 index 8bebd113130..90c74cd3c48 100644 --- a/gcc/testsuite/gfortran.dg/access_spec_1.f90 +++ b/gcc/testsuite/gfortran.dg/access_spec_1.f90 @@ -1,4 +1,5 @@ ! { dg-do compile } +! { dg-require-visibility "" } ! PR fortran/31472 ! Access specifications: Valid Fortran 2003 code module mod diff --git a/gcc/testsuite/gfortran.dg/allocatable_function_8.f90 b/gcc/testsuite/gfortran.dg/allocatable_function_8.f90 index 48f6dd21657..fc4b05e0c0a 100644 --- a/gcc/testsuite/gfortran.dg/allocatable_function_8.f90 +++ b/gcc/testsuite/gfortran.dg/allocatable_function_8.f90 @@ -1,4 +1,5 @@ ! { dg-do run } +! { dg-require-visibility "" } ! Test the fix for PR61459 and PR58883. ! ! Contributed by John Wingate <johnww@tds.net> diff --git a/gcc/testsuite/gfortran.dg/bind_c_usage_25.f90 b/gcc/testsuite/gfortran.dg/bind_c_usage_25.f90 index ae3cf07fcb8..a50be36dffa 100644 --- a/gcc/testsuite/gfortran.dg/bind_c_usage_25.f90 +++ b/gcc/testsuite/gfortran.dg/bind_c_usage_25.f90 @@ -1,5 +1,6 @@ ! { dg-do compile } ! { dg-options "-Wno-c-binding-type" } +! { dg-require-visibility "" } ! ! That's a copy of "bind_c_usage_8.f03", "bind_c_dts_4.f03", ! "bind_c_implicit_vars.f03" and "c_kind_tests_2.f03" diff --git a/gcc/testsuite/gfortran.dg/bind_c_usage_8.f03 b/gcc/testsuite/gfortran.dg/bind_c_usage_8.f03 index 15843b5c9d6..04812422ead 100644 --- a/gcc/testsuite/gfortran.dg/bind_c_usage_8.f03 +++ b/gcc/testsuite/gfortran.dg/bind_c_usage_8.f03 @@ -1,5 +1,6 @@ ! { dg-do compile } ! { dg-options "-Wc-binding-type" } +! { dg-require-visibility "" } ! This should compile, though there is a warning about the type of len ! (return variable of strlen()) for being implicit. ! PR fortran/32797 diff --git a/gcc/testsuite/gfortran.dg/class_37.f03 b/gcc/testsuite/gfortran.dg/class_37.f03 index 1d75999626f..04731642ddc 100644 --- a/gcc/testsuite/gfortran.dg/class_37.f03 +++ b/gcc/testsuite/gfortran.dg/class_37.f03 @@ -1,4 +1,5 @@ ! { dg-do compile } +! { dg-require-visibility "" } ! Test fix for PR47082, in which an ICE in the ALLOCATE at line 248. ! ! Contributed by Salvatore Filippone <salvatore.filippone@uniroma2.it> diff --git a/gcc/testsuite/gfortran.dg/debug/pr46756.f b/gcc/testsuite/gfortran.dg/debug/pr46756.f index fab06e3949d..ffe6a02cb50 100644 --- a/gcc/testsuite/gfortran.dg/debug/pr46756.f +++ b/gcc/testsuite/gfortran.dg/debug/pr46756.f @@ -1,6 +1,8 @@ C PR debug/46756, reduced from ../20010519-1.f C { dg-do compile } C { dg-options "-O -fcompare-debug" } +C { dg-xfail-if "compare-debug" { powerpc-ibm-aix* } { "*" } { "" } } + LOGICAL QDISK,QDW,QCMPCT LOGICAL LNOMA,LRAISE,LSCI,LBIG ASSIGN 801 TO I800 ! { dg-warning "Deleted feature: ASSIGN" "Deleted feature: ASSIGN" } diff --git a/gcc/testsuite/gfortran.dg/elemental_dependency_4.f90 b/gcc/testsuite/gfortran.dg/elemental_dependency_4.f90 index 9aa2f88dfdd..79dd7ceff31 100644 --- a/gcc/testsuite/gfortran.dg/elemental_dependency_4.f90 +++ b/gcc/testsuite/gfortran.dg/elemental_dependency_4.f90 @@ -1,5 +1,6 @@ ! { dg-do run } ! { dg-additional-options "-fdump-tree-original" } +! { dg-require-visibility "" } ! ! Tests the fix for PR64952, in which the assignment to 'array' should ! have generated a temporary because of the references to the lhs in diff --git a/gcc/testsuite/gfortran.dg/elemental_dependency_5.f90 b/gcc/testsuite/gfortran.dg/elemental_dependency_5.f90 index 42e92692d02..71a8c480db6 100644 --- a/gcc/testsuite/gfortran.dg/elemental_dependency_5.f90 +++ b/gcc/testsuite/gfortran.dg/elemental_dependency_5.f90 @@ -1,4 +1,5 @@ ! { dg-do run } +! { dg-require-visibility "" } ! ! Tests the fix for PR64952. ! diff --git a/gcc/testsuite/gfortran.dg/host_assoc_variable_1.f90 b/gcc/testsuite/gfortran.dg/host_assoc_variable_1.f90 index 57231157c18..55a97c57c38 100644 --- a/gcc/testsuite/gfortran.dg/host_assoc_variable_1.f90 +++ b/gcc/testsuite/gfortran.dg/host_assoc_variable_1.f90 @@ -1,4 +1,5 @@ ! { dg-do compile } +! { dg-require-visibility "" } ! This tests that PR32760, in its various manifestations is fixed. ! ! Contributed by Harald Anlauf <anlauf@gmx.de> diff --git a/gcc/testsuite/gfortran.dg/initialization_10.f90 b/gcc/testsuite/gfortran.dg/initialization_10.f90 index d8e82d519b8..b99c8ff0c6a 100644 --- a/gcc/testsuite/gfortran.dg/initialization_10.f90 +++ b/gcc/testsuite/gfortran.dg/initialization_10.f90 @@ -1,4 +1,5 @@ ! { dg-do compile } +! { dg-require-visibility "" } ! ! PR fortran/32867 - nested initialization expression not simplified ! diff --git a/gcc/testsuite/gfortran.dg/internal_pack_7.f90 b/gcc/testsuite/gfortran.dg/internal_pack_7.f90 index 967835e5f56..1241510b814 100644 --- a/gcc/testsuite/gfortran.dg/internal_pack_7.f90 +++ b/gcc/testsuite/gfortran.dg/internal_pack_7.f90 @@ -1,5 +1,6 @@ ! { dg-do compile } ! { dg-options "-fdump-tree-original" } +! { dg-require-visibility "" } ! ! Test the fix for PR43072, in which unnecessary calls to ! internal PACK/UNPACK were being generated. diff --git a/gcc/testsuite/gfortran.dg/merge_init_expr_2.f90 b/gcc/testsuite/gfortran.dg/merge_init_expr_2.f90 index 71bc3a5b083..c761a47cccb 100644 --- a/gcc/testsuite/gfortran.dg/merge_init_expr_2.f90 +++ b/gcc/testsuite/gfortran.dg/merge_init_expr_2.f90 @@ -1,5 +1,6 @@ ! { dg-do compile } ! { dg-options "-fdump-tree-original" } +! { dg-require-visibility "" } ! ! PR fortran/56649 ! MERGE was not properly compile-time simplified diff --git a/gcc/testsuite/gfortran.dg/module_variable_1.f90 b/gcc/testsuite/gfortran.dg/module_variable_1.f90 index fcf6df8d4ab..38b915e6bef 100644 --- a/gcc/testsuite/gfortran.dg/module_variable_1.f90 +++ b/gcc/testsuite/gfortran.dg/module_variable_1.f90 @@ -1,5 +1,6 @@ ! { dg-do compile } ! { dg-options "-Wall" } +! { dg-require-visibility "" } module foo integer, private :: i ! { dg-warning "Unused PRIVATE" } integer, private :: j = 0 diff --git a/gcc/testsuite/gfortran.dg/module_variable_2.f90 b/gcc/testsuite/gfortran.dg/module_variable_2.f90 index ed5b903ca26..f28760ebf62 100644 --- a/gcc/testsuite/gfortran.dg/module_variable_2.f90 +++ b/gcc/testsuite/gfortran.dg/module_variable_2.f90 @@ -1,5 +1,6 @@ ! { dg-do compile } ! { dg-options "-Wall -fmodule-private" } +! { dg-require-visibility "" } module bar integer :: i ! { dg-warning "Unused PRIVATE" } diff --git a/gcc/testsuite/gfortran.dg/nested_forall_1.f b/gcc/testsuite/gfortran.dg/nested_forall_1.f index bf93b6b81c8..5cb8ee0febf 100644 --- a/gcc/testsuite/gfortran.dg/nested_forall_1.f +++ b/gcc/testsuite/gfortran.dg/nested_forall_1.f @@ -1,4 +1,5 @@ ! { dg-do compile } +! { dg-require-visibility "" } ! ! PR fortran/35820 ! diff --git a/gcc/testsuite/gfortran.dg/pr32535.f90 b/gcc/testsuite/gfortran.dg/pr32535.f90 index e16882103da..4cfe94156d6 100644 --- a/gcc/testsuite/gfortran.dg/pr32535.f90 +++ b/gcc/testsuite/gfortran.dg/pr32535.f90 @@ -1,4 +1,5 @@ ! { dg-do compile } +! { dg-require-visibility "" } ! ! PR32535: namelist with private items contained in sub-sub-procedure of a module rejected ! diff --git a/gcc/testsuite/gfortran.dg/pr37286.f90 b/gcc/testsuite/gfortran.dg/pr37286.f90 index 607fca496e5..f84de049a6a 100644 --- a/gcc/testsuite/gfortran.dg/pr37286.f90 +++ b/gcc/testsuite/gfortran.dg/pr37286.f90 @@ -1,4 +1,5 @@ ! { dg-do compile } +! { dg-require-visibility "" } module general_rand implicit none diff --git a/gcc/testsuite/gfortran.dg/pr61335.f90 b/gcc/testsuite/gfortran.dg/pr61335.f90 index c1dff97c9a5..c4954ca4b4d 100644 --- a/gcc/testsuite/gfortran.dg/pr61335.f90 +++ b/gcc/testsuite/gfortran.dg/pr61335.f90 @@ -1,4 +1,5 @@ ! { dg-do run } +! { dg-require-visibility "" } ! { dg-additional-options "-fbounds-check" } MODULE cp_units diff --git a/gcc/testsuite/gfortran.dg/pr67460.f90 b/gcc/testsuite/gfortran.dg/pr67460.f90 new file mode 100644 index 00000000000..ede55e1229b --- /dev/null +++ b/gcc/testsuite/gfortran.dg/pr67460.f90 @@ -0,0 +1,24 @@ +! Bogus "all warnings being treated as errors" +! { dg-do compile } +! { dg-options "-std=f2003 -Werror" } +MODULE btree_i8_k_sp2d_v + TYPE btree_node + INTEGER id + TYPE(btree_node_p), DIMENSION(:), POINTER :: subtrees + TYPE(btree_node), POINTER :: parent + END TYPE btree_node + TYPE btree_node_p + TYPE(btree_node), POINTER :: node + END TYPE btree_node_p +CONTAINS + RECURSIVE SUBROUTINE btree_verify_node (tree, node, level, nids, lastv,& + count, num_nodes, max_leaf_level, min_leaf_level, printing) + TYPE(btree_node), INTENT(IN) :: node + INTEGER :: branch + IF (ASSOCIATED (node%subtrees(branch)%node)) THEN + IF (node%subtrees(branch)%node%parent%id .NE. node%id) THEN + WRITE(*,*)'foo' + ENDIF + ENDIF + END SUBROUTINE btree_verify_node +END MODULE btree_i8_k_sp2d_v diff --git a/gcc/testsuite/gfortran.dg/private_type_3.f90 b/gcc/testsuite/gfortran.dg/private_type_3.f90 index 89ffa638d00..84bacd5956d 100644 --- a/gcc/testsuite/gfortran.dg/private_type_3.f90 +++ b/gcc/testsuite/gfortran.dg/private_type_3.f90 @@ -1,5 +1,6 @@ ! { dg-do compile } ! { dg-options "-O0" } +! { dg-require-visibility "" } ! Tests the fix for PR24207 and the problems associated ! with the fix for PR21986. In two cases, use associated ! public symbols were taking on the default private access diff --git a/gcc/testsuite/gfortran.dg/proc_ptr_3.f90 b/gcc/testsuite/gfortran.dg/proc_ptr_3.f90 index b69ae9c10e0..1b146819f84 100644 --- a/gcc/testsuite/gfortran.dg/proc_ptr_3.f90 +++ b/gcc/testsuite/gfortran.dg/proc_ptr_3.f90 @@ -1,4 +1,5 @@ ! { dg-do run } +! { dg-require-visibility "" } ! ! PROCEDURE POINTERS without the PROCEDURE statement ! diff --git a/gcc/testsuite/gfortran.dg/proc_ptr_result_3.f90 b/gcc/testsuite/gfortran.dg/proc_ptr_result_3.f90 index 6e2e5244e91..3ed899ce45c 100644 --- a/gcc/testsuite/gfortran.dg/proc_ptr_result_3.f90 +++ b/gcc/testsuite/gfortran.dg/proc_ptr_result_3.f90 @@ -1,4 +1,5 @@ -!{ dg-do run } +! { dg-do run } +! { dg-require-visibility "" } ! ! PR 36704: Procedure pointer as function result ! diff --git a/gcc/testsuite/gfortran.dg/public_private_module_2.f90 b/gcc/testsuite/gfortran.dg/public_private_module_2.f90 index 4c72b2cb55d..e84429e1003 100644 --- a/gcc/testsuite/gfortran.dg/public_private_module_2.f90 +++ b/gcc/testsuite/gfortran.dg/public_private_module_2.f90 @@ -1,8 +1,9 @@ ! { dg-do compile } ! { dg-options "-O2" } +! { dg-require-visibility "" } ! ! PR fortran/52751 (top, "module mod") -! PR fortran/40973 (bottom, "module m" +! PR fortran/40973 (bottom, "module m") ! ! Ensure that (only) those module variables and procedures which are PRIVATE ! and have no C-binding label are optimized away. diff --git a/gcc/testsuite/gfortran.dg/public_private_module_6.f90 b/gcc/testsuite/gfortran.dg/public_private_module_6.f90 index 75b1a972c23..b9145af113e 100644 --- a/gcc/testsuite/gfortran.dg/public_private_module_6.f90 +++ b/gcc/testsuite/gfortran.dg/public_private_module_6.f90 @@ -1,5 +1,6 @@ ! { dg-do compile } ! { dg-options "-O1" } +! { dg-require-visibility "" } ! ! PR fortran/54221 ! diff --git a/gcc/testsuite/gfortran.dg/submodule_10.f08 b/gcc/testsuite/gfortran.dg/submodule_10.f08 index 4671e393ddc..e956b2905c3 100644 --- a/gcc/testsuite/gfortran.dg/submodule_10.f08 +++ b/gcc/testsuite/gfortran.dg/submodule_10.f08 @@ -1,4 +1,5 @@ ! { dg-do compile } +! { dg-require-visibility "" } ! ! Checks that PRIVATE enities are visible to submodules. ! diff --git a/gcc/testsuite/gfortran.dg/typebound_call_20.f03 b/gcc/testsuite/gfortran.dg/typebound_call_20.f03 index 8ee7302c546..3936dd87955 100644 --- a/gcc/testsuite/gfortran.dg/typebound_call_20.f03 +++ b/gcc/testsuite/gfortran.dg/typebound_call_20.f03 @@ -1,4 +1,5 @@ ! { dg-do run } +! { dg-require-visibility "" } ! ! PR 47565: [4.6 Regression][OOP] Segfault with TBP ! diff --git a/gcc/testsuite/gfortran.dg/typebound_proc_19.f90 b/gcc/testsuite/gfortran.dg/typebound_proc_19.f90 index b9068b65dd6..1661ff90973 100644 --- a/gcc/testsuite/gfortran.dg/typebound_proc_19.f90 +++ b/gcc/testsuite/gfortran.dg/typebound_proc_19.f90 @@ -1,4 +1,5 @@ ! { dg-do compile } +! { dg-require-visibility "" } ! ! PR fortran/47399 ! diff --git a/gcc/testsuite/gfortran.dg/vect/fast-math-vect-8.f90 b/gcc/testsuite/gfortran.dg/vect/fast-math-vect-8.f90 index a2dbbe18b17..fe2c9e3a4fe 100644 --- a/gcc/testsuite/gfortran.dg/vect/fast-math-vect-8.f90 +++ b/gcc/testsuite/gfortran.dg/vect/fast-math-vect-8.f90 @@ -1,5 +1,6 @@ ! { dg-do compile } ! { dg-require-effective-target vect_float } +! { dg-require-visibility "" } module solv_cap diff --git a/gcc/testsuite/gnat.dg/array24.adb b/gcc/testsuite/gnat.dg/array24.adb new file mode 100644 index 00000000000..c6974365fed --- /dev/null +++ b/gcc/testsuite/gnat.dg/array24.adb @@ -0,0 +1,11 @@ +-- { dg-do compile } +-- { dg-options "-fdump-tree-optimized" } + +procedure Array24 (N : Natural) is + S : String (1 .. N); + pragma Volatile (S); +begin + S := (others => '0'); +end; + +-- { dg-final { scan-tree-dump-not "builtin_unwind_resume" "optimized" } } diff --git a/gcc/testsuite/gnat.dg/blocklocs.adb b/gcc/testsuite/gnat.dg/blocklocs.adb deleted file mode 100644 index 20ff7b30135..00000000000 --- a/gcc/testsuite/gnat.dg/blocklocs.adb +++ /dev/null @@ -1,26 +0,0 @@ --- { dg-do compile { target *-*-linux* } } --- { dg-options "-gdwarf-2" } - -procedure Blocklocs (Choice : Integer; N : in out Integer) is -begin - if Choice > 0 then - declare -- line 7 - S : String (1 .. N * 2); - pragma Volatile (S); - begin - S := (others => 'B'); - end; -- line 12 - else - declare -- line 14 - S : String (1 .. N ); - pragma Volatile (S); - begin - S := (others => '1'); - end; -- line 19 - end if; -end; - --- { dg-final { scan-assembler "loc 1 7" } } --- { dg-final { scan-assembler "loc 1 12" } } --- { dg-final { scan-assembler "loc 1 14" } } --- { dg-final { scan-assembler "loc 1 19" } } diff --git a/gcc/testsuite/gnat.dg/opt49.adb b/gcc/testsuite/gnat.dg/opt49.adb new file mode 100644 index 00000000000..4b91973a19d --- /dev/null +++ b/gcc/testsuite/gnat.dg/opt49.adb @@ -0,0 +1,31 @@ +-- { dg-do run } +-- { dg-options "-O -fstack-check" } + +procedure Opt49 is + + function Ident (I : Integer) return Integer; + pragma No_Inline (Ident); + + function Ident (I : Integer) return Integer is + begin + return I; + end; + + Int_0 : Integer := Ident (0); + Int_4 : Integer := Ident (4); + + A : array (-4 .. Int_4) of Integer; + +begin + A := (-4 , -3 , -2 , -1 , 100 , 1 , 2 , 3 , 4); + A (-4 .. Int_0) := A (Int_0 .. 4); + if A /= (100 , 1 , 2 , 3 , 4 , 1 , 2 , 3 , 4) then + raise Program_Error; + end if; + + A := (-4 , -3 , -2 , -1 , 100 , 1 , 2 , 3 , 4); + A (Int_0 .. 4) := A (-4 .. Int_0); + if A /= (-4 , -3 , -2 , -1 , -4 , -3 , -2 , -1 , 100) then + raise Program_Error; + end if; +end; diff --git a/gcc/testsuite/gnat.dg/specs/addr1.ads b/gcc/testsuite/gnat.dg/specs/addr1.ads deleted file mode 100644 index bcb833bec69..00000000000 --- a/gcc/testsuite/gnat.dg/specs/addr1.ads +++ /dev/null @@ -1,35 +0,0 @@ --- { dg-do compile } - -with Interfaces; use Interfaces; - -package Addr1 is - - type Arr is array (Integer range <>) of Unsigned_16; - - type Rec1 is record - I1, I2: Integer; - end record; - - type Rec2 is record - I1, I2: Integer; - end record; - for Rec2'Size use 64; - - A: Arr (1 .. 4); - - Obj1: Rec1; - for Obj1'Address use A'Address; -- { dg-bogus "(alignment|erroneous)" } - - Obj2: Rec2; - for Obj2'Address use A'Address; -- { dg-bogus "(alignment|erroneous)" "" { xfail mips*-*-* } } - - Obj3: Rec1; - for Obj3'Address use A(1)'Address; -- { dg-bogus "(alignment|erroneous)" } - - Obj4: Rec1; - for Obj4'Address use A(2)'Address; -- { dg-warning "(alignment|erroneous)" } - - Obj5: Rec1; - for Obj5'Address use A(3)'Address; -- { dg-bogus "(alignment|erroneous)" } - -end Addr1; diff --git a/gcc/toplev.c b/gcc/toplev.c index 981a7cc107c..3bfbe066953 100644 --- a/gcc/toplev.c +++ b/gcc/toplev.c @@ -521,10 +521,9 @@ check_global_declaration (tree decl) /* Warn about static fns or vars defined but not used. */ if (((warn_unused_function && TREE_CODE (decl) == FUNCTION_DECL) - /* We don't warn about "static const" variables because the - "rcs_id" idiom uses that construction. */ - || (warn_unused_variable - && TREE_CODE (decl) == VAR_DECL && ! TREE_READONLY (decl))) + || (((warn_unused_variable && ! TREE_READONLY (decl)) + || (warn_unused_const_variable && TREE_READONLY (decl))) + && TREE_CODE (decl) == VAR_DECL)) && ! DECL_IN_SYSTEM_HEADER (decl) && ! snode->referred_to_p (/*include_self=*/false) /* This TREE_USED check is needed in addition to referred_to_p @@ -551,7 +550,9 @@ check_global_declaration (tree decl) warning_at (DECL_SOURCE_LOCATION (decl), (TREE_CODE (decl) == FUNCTION_DECL) ? OPT_Wunused_function - : OPT_Wunused_variable, + : (TREE_READONLY (decl) + ? OPT_Wunused_const_variable + : OPT_Wunused_variable), "%qD defined but not used", decl); } diff --git a/gcc/tree-eh.c b/gcc/tree-eh.c index c1ca468fa25..1a55d22db32 100644 --- a/gcc/tree-eh.c +++ b/gcc/tree-eh.c @@ -915,7 +915,12 @@ lower_try_finally_dup_block (gimple_seq seq, struct leh_state *outer_state, for (gsi = gsi_start (new_seq); !gsi_end_p (gsi); gsi_next (&gsi)) { gimple stmt = gsi_stmt (gsi); - if (LOCATION_LOCUS (gimple_location (stmt)) == UNKNOWN_LOCATION) + /* We duplicate __builtin_stack_restore at -O0 in the hope of eliminating + it on the EH paths. When it is not eliminated, make it transparent in + the debug info. */ + if (gimple_call_builtin_p (stmt, BUILT_IN_STACK_RESTORE)) + gimple_set_location (stmt, UNKNOWN_LOCATION); + else if (LOCATION_LOCUS (gimple_location (stmt)) == UNKNOWN_LOCATION) { tree block = gimple_block (stmt); gimple_set_location (stmt, loc); @@ -1604,8 +1609,12 @@ decide_copy_try_finally (int ndests, bool may_throw, gimple_seq finally) for (gsi = gsi_start (finally); !gsi_end_p (gsi); gsi_next (&gsi)) { + /* Duplicate __builtin_stack_restore in the hope of eliminating it + on the EH paths and, consequently, useless cleanups. */ gimple stmt = gsi_stmt (gsi); - if (!is_gimple_debug (stmt) && !gimple_clobber_p (stmt)) + if (!is_gimple_debug (stmt) + && !gimple_clobber_p (stmt) + && !gimple_call_builtin_p (stmt, BUILT_IN_STACK_RESTORE)) return false; } return true; diff --git a/gcc/tree-sra.c b/gcc/tree-sra.c index 8b3a0adf7cc..94fea2e3cac 100644 --- a/gcc/tree-sra.c +++ b/gcc/tree-sra.c @@ -277,7 +277,7 @@ typedef struct access *access_p; /* Alloc pool for allocating access structures. */ -static object_allocator<struct access> access_pool ("SRA accesses", 16); +static object_allocator<struct access> access_pool ("SRA accesses"); /* A structure linking lhs and rhs accesses from an aggregate assignment. They are used to propagate subaccesses from rhs to lhs as long as they don't @@ -289,7 +289,7 @@ struct assign_link }; /* Alloc pool for allocating assign link structures. */ -static object_allocator<assign_link> assign_link_pool ("SRA links", 16); +static object_allocator<assign_link> assign_link_pool ("SRA links"); /* Base (tree) -> Vector (vec<access_p> *) map. */ static hash_map<tree, auto_vec<access_p> > *base_access_vec; diff --git a/gcc/tree-ssa-dom.c b/gcc/tree-ssa-dom.c index 10ae5f86d41..fd6ade224c3 100644 --- a/gcc/tree-ssa-dom.c +++ b/gcc/tree-ssa-dom.c @@ -121,28 +121,46 @@ struct edge_info marker. */ typedef struct expr_hash_elt * expr_hash_elt_t; -static vec<std::pair<expr_hash_elt_t, expr_hash_elt_t> > avail_exprs_stack; /* Structure for entries in the expression hash table. */ -struct expr_hash_elt -{ - /* The value (lhs) of this expression. */ - tree lhs; - +class expr_hash_elt +{ + public: + expr_hash_elt (gimple, tree); + expr_hash_elt (tree); + expr_hash_elt (struct hashable_expr *, tree); + expr_hash_elt (class expr_hash_elt &); + ~expr_hash_elt (); + void print (FILE *); + tree vop (void) { return m_vop; } + tree lhs (void) { return m_lhs; } + struct hashable_expr *expr (void) { return &m_expr; } + expr_hash_elt *stamp (void) { return m_stamp; } + hashval_t hash (void) { return m_hash; } + + private: /* The expression (rhs) we want to record. */ - struct hashable_expr expr; + struct hashable_expr m_expr; + + /* The value (lhs) of this expression. */ + tree m_lhs; /* The virtual operand associated with the nearest dominating stmt loading from or storing to expr. */ - tree vop; + tree m_vop; /* The hash value for RHS. */ - hashval_t hash; + hashval_t m_hash; /* A unique stamp, typically the address of the hash element itself, used in removing entries from the table. */ - struct expr_hash_elt *stamp; + struct expr_hash_elt *m_stamp; + + /* We should never be making assignments between objects in this class. + Though it might allow us to exploit C++11 move semantics if we + defined the move constructor and move assignment operator. */ + expr_hash_elt& operator=(const expr_hash_elt&); }; /* Hashtable helpers. */ @@ -158,25 +176,56 @@ struct expr_elt_hasher : pointer_hash <expr_hash_elt> static inline void remove (value_type &); }; +/* This class defines a unwindable AVAIL_EXPRs, built on top of the + available expression hash table. + + Essentially it's just a stack of available expression value pairs with + a special marker (NULL, NULL) to indicate unwind points. */ + +class avail_exprs_stack +{ + public: + /* We need access to the AVAIL_EXPR hash table so that we can + remove entries from the hash table when unwinding the stack. */ + avail_exprs_stack (hash_table<expr_elt_hasher> *table) + { m_stack.create (20); m_avail_exprs = table; } + ~avail_exprs_stack (void) { m_stack.release (); } + + /* Push the unwinding marker onto the stack. */ + void push_marker (void) { record_expr (NULL, NULL, 'M'); } + + /* Restore the AVAIL_EXPRs table to its state when the last marker + was pushed. */ + void pop_to_marker (); + + /* Record a single available expression that can be unwound. */ + void record_expr (expr_hash_elt_t, expr_hash_elt_t, char); + + private: + vec<std::pair<expr_hash_elt_t, expr_hash_elt_t> > m_stack; + hash_table<expr_elt_hasher> *m_avail_exprs; +}; + + inline hashval_t expr_elt_hasher::hash (const value_type &p) { - return p->hash; + return p->hash (); } inline bool expr_elt_hasher::equal (const value_type &p1, const compare_type &p2) { - const struct hashable_expr *expr1 = &p1->expr; - const struct expr_hash_elt *stamp1 = p1->stamp; - const struct hashable_expr *expr2 = &p2->expr; - const struct expr_hash_elt *stamp2 = p2->stamp; + const struct hashable_expr *expr1 = p1->expr (); + const struct expr_hash_elt *stamp1 = p1->stamp (); + const struct hashable_expr *expr2 = p2->expr (); + const struct expr_hash_elt *stamp2 = p2->stamp (); /* This case should apply only when removing entries from the table. */ if (stamp1 == stamp2) return true; - if (p1->hash != p2->hash) + if (p1->hash () != p2->hash ()) return false; /* In case of a collision, both RHS have to be identical and have the @@ -207,6 +256,7 @@ static hash_table<expr_elt_hasher> *avail_exprs; /* Unwindable const/copy equivalences. */ static const_and_copies *const_and_copies; +static avail_exprs_stack *avail_exprs_stack; /* Track whether or not we have changed the control flow graph. */ static bool cfg_altered; @@ -231,7 +281,7 @@ static struct opt_stats_d opt_stats; /* Local functions. */ static void optimize_stmt (basic_block, gimple_stmt_iterator); static tree lookup_avail_expr (gimple, bool); -static hashval_t avail_expr_hash (const void *); +static hashval_t avail_expr_hash (class expr_hash_elt *); static void htab_statistics (FILE *, const hash_table<expr_elt_hasher> &); static void record_cond (cond_equivalence *); @@ -240,19 +290,16 @@ static void record_equivalences_from_phis (basic_block); static void record_equivalences_from_incoming_edge (basic_block); static void eliminate_redundant_computations (gimple_stmt_iterator *); static void record_equivalences_from_stmt (gimple, int); -static void remove_local_expressions_from_table (void); static edge single_incoming_edge_ignoring_loop_edges (basic_block); /* Given a statement STMT, initialize the hash table element pointed to by ELEMENT. */ -static void -initialize_hash_element (gimple stmt, tree lhs, - struct expr_hash_elt *element) +expr_hash_elt::expr_hash_elt (gimple stmt, tree orig_lhs) { enum gimple_code code = gimple_code (stmt); - struct hashable_expr *expr = &element->expr; + struct hashable_expr *expr = this->expr (); if (code == GIMPLE_ASSIGN) { @@ -342,17 +389,17 @@ initialize_hash_element (gimple stmt, tree lhs, expr->kind = EXPR_PHI; expr->ops.phi.nargs = nargs; expr->ops.phi.args = XCNEWVEC (tree, nargs); - for (i = 0; i < nargs; i++) expr->ops.phi.args[i] = gimple_phi_arg_def (stmt, i); + } else gcc_unreachable (); - element->lhs = lhs; - element->vop = gimple_vuse (stmt); - element->hash = avail_expr_hash (element); - element->stamp = element; + m_lhs = orig_lhs; + m_vop = gimple_vuse (stmt); + m_hash = avail_expr_hash (this); + m_stamp = this; } /* Given a conditional expression COND as a tree, initialize @@ -385,16 +432,50 @@ initialize_expr_from_cond (tree cond, struct hashable_expr *expr) /* Given a hashable_expr expression EXPR and an LHS, initialize the hash table element pointed to by ELEMENT. */ -static void -initialize_hash_element_from_expr (struct hashable_expr *expr, - tree lhs, - struct expr_hash_elt *element) -{ - element->expr = *expr; - element->lhs = lhs; - element->vop = NULL_TREE; - element->hash = avail_expr_hash (element); - element->stamp = element; +expr_hash_elt::expr_hash_elt (struct hashable_expr *orig, tree orig_lhs) +{ + m_expr = *orig; + m_lhs = orig_lhs; + m_vop = NULL_TREE; + m_hash = avail_expr_hash (this); + m_stamp = this; +} + +expr_hash_elt::expr_hash_elt (class expr_hash_elt &old_elt) +{ + m_expr = old_elt.m_expr; + m_lhs = old_elt.m_lhs; + m_vop = old_elt.m_vop; + m_hash = old_elt.m_hash; + m_stamp = this; + + /* Now deep copy the malloc'd space for CALL and PHI args. */ + if (old_elt.m_expr.kind == EXPR_CALL) + { + size_t nargs = old_elt.m_expr.ops.call.nargs; + size_t i; + + m_expr.ops.call.args = XCNEWVEC (tree, nargs); + for (i = 0; i < nargs; i++) + m_expr.ops.call.args[i] = old_elt.m_expr.ops.call.args[i]; + } + else if (old_elt.m_expr.kind == EXPR_PHI) + { + size_t nargs = old_elt.m_expr.ops.phi.nargs; + size_t i; + + m_expr.ops.phi.args = XCNEWVEC (tree, nargs); + for (i = 0; i < nargs; i++) + m_expr.ops.phi.args[i] = old_elt.m_expr.ops.phi.args[i]; + } +} + +expr_hash_elt::~expr_hash_elt () +{ + if (m_expr.kind == EXPR_CALL) + free (m_expr.ops.call.args); + else if (m_expr.kind == EXPR_PHI) + free (m_expr.ops.phi.args); } /* Compare two hashable_expr structures for equivalence. They are @@ -404,7 +485,7 @@ initialize_hash_element_from_expr (struct hashable_expr *expr, static bool hashable_expr_equal_p (const struct hashable_expr *expr0, - const struct hashable_expr *expr1) + const struct hashable_expr *expr1) { tree type0 = expr0->type; tree type1 = expr1->type; @@ -641,51 +722,51 @@ add_hashable_expr (const struct hashable_expr *expr, hash &hstate) /* Print a diagnostic dump of an expression hash table entry. */ -static void -print_expr_hash_elt (FILE * stream, const struct expr_hash_elt *element) +void +expr_hash_elt::print (FILE *stream) { fprintf (stream, "STMT "); - if (element->lhs) + if (m_lhs) { - print_generic_expr (stream, element->lhs, 0); + print_generic_expr (stream, m_lhs, 0); fprintf (stream, " = "); } - switch (element->expr.kind) + switch (m_expr.kind) { case EXPR_SINGLE: - print_generic_expr (stream, element->expr.ops.single.rhs, 0); + print_generic_expr (stream, m_expr.ops.single.rhs, 0); break; case EXPR_UNARY: - fprintf (stream, "%s ", get_tree_code_name (element->expr.ops.unary.op)); - print_generic_expr (stream, element->expr.ops.unary.opnd, 0); + fprintf (stream, "%s ", get_tree_code_name (m_expr.ops.unary.op)); + print_generic_expr (stream, m_expr.ops.unary.opnd, 0); break; case EXPR_BINARY: - print_generic_expr (stream, element->expr.ops.binary.opnd0, 0); - fprintf (stream, " %s ", get_tree_code_name (element->expr.ops.binary.op)); - print_generic_expr (stream, element->expr.ops.binary.opnd1, 0); + print_generic_expr (stream, m_expr.ops.binary.opnd0, 0); + fprintf (stream, " %s ", get_tree_code_name (m_expr.ops.binary.op)); + print_generic_expr (stream, m_expr.ops.binary.opnd1, 0); break; case EXPR_TERNARY: - fprintf (stream, " %s <", get_tree_code_name (element->expr.ops.ternary.op)); - print_generic_expr (stream, element->expr.ops.ternary.opnd0, 0); + fprintf (stream, " %s <", get_tree_code_name (m_expr.ops.ternary.op)); + print_generic_expr (stream, m_expr.ops.ternary.opnd0, 0); fputs (", ", stream); - print_generic_expr (stream, element->expr.ops.ternary.opnd1, 0); + print_generic_expr (stream, m_expr.ops.ternary.opnd1, 0); fputs (", ", stream); - print_generic_expr (stream, element->expr.ops.ternary.opnd2, 0); + print_generic_expr (stream, m_expr.ops.ternary.opnd2, 0); fputs (">", stream); break; case EXPR_CALL: { size_t i; - size_t nargs = element->expr.ops.call.nargs; + size_t nargs = m_expr.ops.call.nargs; gcall *fn_from; - fn_from = element->expr.ops.call.fn_from; + fn_from = m_expr.ops.call.fn_from; if (gimple_call_internal_p (fn_from)) fputs (internal_fn_name (gimple_call_internal_fn (fn_from)), stream); @@ -694,7 +775,7 @@ print_expr_hash_elt (FILE * stream, const struct expr_hash_elt *element) fprintf (stream, " ("); for (i = 0; i < nargs; i++) { - print_generic_expr (stream, element->expr.ops.call.args[i], 0); + print_generic_expr (stream, m_expr.ops.call.args[i], 0); if (i + 1 < nargs) fprintf (stream, ", "); } @@ -705,12 +786,12 @@ print_expr_hash_elt (FILE * stream, const struct expr_hash_elt *element) case EXPR_PHI: { size_t i; - size_t nargs = element->expr.ops.phi.nargs; + size_t nargs = m_expr.ops.phi.nargs; fprintf (stream, "PHI <"); for (i = 0; i < nargs; i++) { - print_generic_expr (stream, element->expr.ops.phi.args[i], 0); + print_generic_expr (stream, m_expr.ops.phi.args[i], 0); if (i + 1 < nargs) fprintf (stream, ", "); } @@ -719,34 +800,22 @@ print_expr_hash_elt (FILE * stream, const struct expr_hash_elt *element) break; } - if (element->vop) + if (m_vop) { fprintf (stream, " with "); - print_generic_expr (stream, element->vop, 0); + print_generic_expr (stream, m_vop, 0); } fprintf (stream, "\n"); } -/* Delete variable sized pieces of the expr_hash_elt ELEMENT. */ - -static void -free_expr_hash_elt_contents (struct expr_hash_elt *element) -{ - if (element->expr.kind == EXPR_CALL) - free (element->expr.ops.call.args); - else if (element->expr.kind == EXPR_PHI) - free (element->expr.ops.phi.args); -} - /* Delete an expr_hash_elt and reclaim its storage. */ static void free_expr_hash_elt (void *elt) { - struct expr_hash_elt *element = ((struct expr_hash_elt *)elt); - free_expr_hash_elt_contents (element); - free (element); + class expr_hash_elt *element = ((class expr_hash_elt *)elt); + delete element; } /* Allocate an EDGE_INFO for edge E and attach it to E. @@ -1163,7 +1232,7 @@ pass_dominator::execute (function *fun) /* Create our hash tables. */ avail_exprs = new hash_table<expr_elt_hasher> (1024); - avail_exprs_stack.create (20); + avail_exprs_stack = new class avail_exprs_stack (avail_exprs); const_and_copies = new class const_and_copies (); need_eh_cleanup = BITMAP_ALLOC (NULL); need_noreturn_fixup.create (0); @@ -1286,7 +1355,7 @@ pass_dominator::execute (function *fun) /* Free asserted bitmaps and stacks. */ BITMAP_FREE (need_eh_cleanup); need_noreturn_fixup.release (); - avail_exprs_stack.release (); + delete avail_exprs_stack; delete const_and_copies; /* Free the value-handle array. */ @@ -1354,14 +1423,13 @@ canonicalize_comparison (gcond *condstmt) /* Remove all the expressions in LOCALS from TABLE, stopping when there are LIMIT entries left in LOCALs. */ -static void -remove_local_expressions_from_table (void) +void +avail_exprs_stack::pop_to_marker () { /* Remove all the expressions made available in this block. */ - while (avail_exprs_stack.length () > 0) + while (m_stack.length () > 0) { - std::pair<expr_hash_elt_t, expr_hash_elt_t> victim - = avail_exprs_stack.pop (); + std::pair<expr_hash_elt_t, expr_hash_elt_t> victim = m_stack.pop (); expr_hash_elt **slot; if (victim.first == NULL) @@ -1373,10 +1441,10 @@ remove_local_expressions_from_table (void) if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "<<<< "); - print_expr_hash_elt (dump_file, victim.first); + victim.first->print (dump_file); } - slot = avail_exprs->find_slot (victim.first, NO_INSERT); + slot = m_avail_exprs->find_slot (victim.first, NO_INSERT); gcc_assert (slot && *slot == victim.first); if (victim.second != NULL) { @@ -1384,10 +1452,25 @@ remove_local_expressions_from_table (void) *slot = victim.second; } else - avail_exprs->clear_slot (slot); + m_avail_exprs->clear_slot (slot); + } +} + +void +avail_exprs_stack::record_expr (class expr_hash_elt *elt1, + class expr_hash_elt *elt2, + char type) +{ + if (elt1 && dump_file && (dump_flags & TDF_DETAILS)) + { + fprintf (dump_file, "%c>>> ", type); + elt1->print (dump_file); } + + m_stack.safe_push (std::pair<expr_hash_elt_t, expr_hash_elt_t> (elt1, elt2)); } + /* A trivial wrapper so that we can present the generic jump threading code with a simple API for simplifying statements. */ static tree @@ -1522,8 +1605,7 @@ dom_opt_dom_walker::thread_across_edge (edge e) /* Push a marker on both stacks so we can unwind the tables back to their current state. */ - avail_exprs_stack.safe_push - (std::pair<expr_hash_elt_t, expr_hash_elt_t> (NULL, NULL)); + avail_exprs_stack->push_marker (); const_and_copies->push_marker (); /* Traversing E may result in equivalences we can utilize. */ @@ -1536,12 +1618,12 @@ dom_opt_dom_walker::thread_across_edge (edge e) simplify_stmt_for_jump_threading); /* And restore the various tables to their state before - we threaded this edge. + we threaded this edge. XXX The code in tree-ssa-threadedge.c will restore the state of the const_and_copies table. We we just have to restore the expression table. */ - remove_local_expressions_from_table (); + avail_exprs_stack->pop_to_marker (); } /* PHI nodes can create equivalences too. @@ -1699,24 +1781,15 @@ htab_statistics (FILE *file, const hash_table<expr_elt_hasher> &htab) static void record_cond (cond_equivalence *p) { - struct expr_hash_elt *element = XCNEW (struct expr_hash_elt); + class expr_hash_elt *element = new expr_hash_elt (&p->cond, p->value); expr_hash_elt **slot; - initialize_hash_element_from_expr (&p->cond, p->value, element); - - slot = avail_exprs->find_slot_with_hash (element, element->hash, INSERT); + slot = avail_exprs->find_slot_with_hash (element, element->hash (), INSERT); if (*slot == NULL) { *slot = element; - if (dump_file && (dump_flags & TDF_DETAILS)) - { - fprintf (dump_file, "1>>> "); - print_expr_hash_elt (dump_file, element); - } - - avail_exprs_stack.safe_push - (std::pair<expr_hash_elt_t, expr_hash_elt_t> (element, NULL)); + avail_exprs_stack->record_expr (element, NULL, '1'); } else free_expr_hash_elt (element); @@ -2121,8 +2194,7 @@ dom_opt_dom_walker::before_dom_children (basic_block bb) /* Push a marker on the stacks of local information so that we know how far to unwind when we finalize this block. */ - avail_exprs_stack.safe_push - (std::pair<expr_hash_elt_t, expr_hash_elt_t> (NULL, NULL)); + avail_exprs_stack->push_marker (); const_and_copies->push_marker (); record_equivalences_from_incoming_edge (bb); @@ -2133,11 +2205,10 @@ dom_opt_dom_walker::before_dom_children (basic_block bb) /* Create equivalences from redundant PHIs. PHIs are only truly redundant when they exist in the same block, so push another marker and unwind right afterwards. */ - avail_exprs_stack.safe_push - (std::pair<expr_hash_elt_t, expr_hash_elt_t> (NULL, NULL)); + avail_exprs_stack->push_marker (); for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi)) eliminate_redundant_computations (&gsi); - remove_local_expressions_from_table (); + avail_exprs_stack->pop_to_marker (); for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) optimize_stmt (bb, gsi); @@ -2188,7 +2259,7 @@ dom_opt_dom_walker::after_dom_children (basic_block bb) } /* These remove expressions local to BB from the tables. */ - remove_local_expressions_from_table (); + avail_exprs_stack->pop_to_marker (); const_and_copies->pop_to_marker (); } @@ -2730,7 +2801,6 @@ lookup_avail_expr (gimple stmt, bool insert) { expr_hash_elt **slot; tree lhs; - struct expr_hash_elt element; /* Get LHS of phi, assignment, or call; else NULL_TREE. */ if (gimple_code (stmt) == GIMPLE_PHI) @@ -2738,52 +2808,42 @@ lookup_avail_expr (gimple stmt, bool insert) else lhs = gimple_get_lhs (stmt); - initialize_hash_element (stmt, lhs, &element); + class expr_hash_elt element (stmt, lhs); if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "LKUP "); - print_expr_hash_elt (dump_file, &element); + element.print (dump_file); } /* Don't bother remembering constant assignments and copy operations. Constants and copy operations are handled by the constant/copy propagator in optimize_stmt. */ - if (element.expr.kind == EXPR_SINGLE - && (TREE_CODE (element.expr.ops.single.rhs) == SSA_NAME - || is_gimple_min_invariant (element.expr.ops.single.rhs))) + if (element.expr()->kind == EXPR_SINGLE + && (TREE_CODE (element.expr()->ops.single.rhs) == SSA_NAME + || is_gimple_min_invariant (element.expr()->ops.single.rhs))) return NULL_TREE; /* Finally try to find the expression in the main expression hash table. */ slot = avail_exprs->find_slot (&element, (insert ? INSERT : NO_INSERT)); if (slot == NULL) { - free_expr_hash_elt_contents (&element); return NULL_TREE; } else if (*slot == NULL) { - struct expr_hash_elt *element2 = XNEW (struct expr_hash_elt); - *element2 = element; - element2->stamp = element2; + class expr_hash_elt *element2 = new expr_hash_elt (element); *slot = element2; - if (dump_file && (dump_flags & TDF_DETAILS)) - { - fprintf (dump_file, "2>>> "); - print_expr_hash_elt (dump_file, element2); - } - - avail_exprs_stack.safe_push - (std::pair<expr_hash_elt_t, expr_hash_elt_t> (element2, NULL)); + avail_exprs_stack->record_expr (element2, NULL, '2'); return NULL_TREE; } /* If we found a redundant memory operation do an alias walk to check if we can re-use it. */ - if (gimple_vuse (stmt) != (*slot)->vop) + if (gimple_vuse (stmt) != (*slot)->vop ()) { - tree vuse1 = (*slot)->vop; + tree vuse1 = (*slot)->vop (); tree vuse2 = gimple_vuse (stmt); /* If we have a load of a register and a candidate in the hash with vuse1 then try to reach its stmt by walking @@ -2799,30 +2859,21 @@ lookup_avail_expr (gimple stmt, bool insert) { if (insert) { - struct expr_hash_elt *element2 = XNEW (struct expr_hash_elt); - *element2 = element; - element2->stamp = element2; + class expr_hash_elt *element2 = new expr_hash_elt (element); /* Insert the expr into the hash by replacing the current entry and recording the value to restore in the avail_exprs_stack. */ - avail_exprs_stack.safe_push (std::make_pair (element2, *slot)); + avail_exprs_stack->record_expr (element2, *slot, '2'); *slot = element2; - if (dump_file && (dump_flags & TDF_DETAILS)) - { - fprintf (dump_file, "2>>> "); - print_expr_hash_elt (dump_file, *slot); - } } return NULL_TREE; } } - free_expr_hash_elt_contents (&element); - /* Extract the LHS of the assignment so that it can be used as the current definition of another variable. */ - lhs = (*slot)->lhs; + lhs = (*slot)->lhs (); lhs = dom_valueize (lhs); @@ -2841,9 +2892,9 @@ lookup_avail_expr (gimple stmt, bool insert) its operands. */ static hashval_t -avail_expr_hash (const void *p) +avail_expr_hash (class expr_hash_elt *p) { - const struct hashable_expr *expr = &((const struct expr_hash_elt *)p)->expr; + const struct hashable_expr *expr = p->expr (); inchash::hash hstate; inchash::add_hashable_expr (expr, hstate); diff --git a/gcc/tree-ssa-loop-im.c b/gcc/tree-ssa-loop-im.c index b85d9cb5956..f67b57d7f27 100644 --- a/gcc/tree-ssa-loop-im.c +++ b/gcc/tree-ssa-loop-im.c @@ -102,16 +102,16 @@ static hash_map<gimple, lim_aux_data *> *lim_aux_data_map; /* Description of a memory reference location. */ -typedef struct mem_ref_loc +struct mem_ref_loc { tree *ref; /* The reference itself. */ gimple stmt; /* The statement in that it occurs. */ -} *mem_ref_loc_p; +}; /* Description of a memory reference. */ -typedef struct im_mem_ref +struct im_mem_ref { unsigned id; /* ID assigned to the memory reference (its index in memory_accesses.refs_list) */ @@ -138,7 +138,7 @@ typedef struct im_mem_ref If it is only loaded, then it is independent on all stores in the loop. */ bitmap_head dep_loop; /* The complement of INDEP_LOOP. */ -} *mem_ref_p; +}; /* We use two bits per loop in the ref->{in,}dep_loop bitmaps, the first to record (in)dependence against stores in the loop and its subloops, the @@ -181,7 +181,7 @@ static struct hash_table<mem_ref_hasher> *refs; /* The list of memory references. */ - vec<mem_ref_p> refs_list; + vec<im_mem_ref *> refs_list; /* The set of memory references accessed in each loop. */ vec<bitmap_head> refs_in_loop; @@ -200,7 +200,7 @@ static struct static bitmap_obstack lim_bitmap_obstack; static obstack mem_ref_obstack; -static bool ref_indep_loop_p (struct loop *, mem_ref_p); +static bool ref_indep_loop_p (struct loop *, im_mem_ref *); /* Minimum cost of an expensive expression. */ #define LIM_EXPENSIVE ((unsigned) PARAM_VALUE (PARAM_LIM_EXPENSIVE)) @@ -537,7 +537,7 @@ stmt_cost (gimple stmt) instead. */ static struct loop * -outermost_indep_loop (struct loop *outer, struct loop *loop, mem_ref_p ref) +outermost_indep_loop (struct loop *outer, struct loop *loop, im_mem_ref *ref) { struct loop *aloop; @@ -590,13 +590,13 @@ simple_mem_ref_in_stmt (gimple stmt, bool *is_store) /* Returns the memory reference contained in STMT. */ -static mem_ref_p +static im_mem_ref * mem_ref_in_stmt (gimple stmt) { bool store; tree *mem = simple_mem_ref_in_stmt (stmt, &store); hashval_t hash; - mem_ref_p ref; + im_mem_ref *ref; if (!mem) return NULL; @@ -790,7 +790,7 @@ determine_max_movement (gimple stmt, bool must_preserve_exec) if (gimple_vuse (stmt)) { - mem_ref_p ref = mem_ref_in_stmt (stmt); + im_mem_ref *ref = mem_ref_in_stmt (stmt); if (ref) { @@ -1420,10 +1420,10 @@ memref_free (struct im_mem_ref *mem) /* Allocates and returns a memory reference description for MEM whose hash value is HASH and id is ID. */ -static mem_ref_p +static im_mem_ref * mem_ref_alloc (tree mem, unsigned hash, unsigned id) { - mem_ref_p ref = XOBNEW (&mem_ref_obstack, struct im_mem_ref); + im_mem_ref *ref = XOBNEW (&mem_ref_obstack, struct im_mem_ref); ao_ref_init (&ref->mem, mem); ref->id = id; ref->hash = hash; @@ -1439,7 +1439,7 @@ mem_ref_alloc (tree mem, unsigned hash, unsigned id) description REF. The reference occurs in statement STMT. */ static void -record_mem_ref_loc (mem_ref_p ref, gimple stmt, tree *loc) +record_mem_ref_loc (im_mem_ref *ref, gimple stmt, tree *loc) { mem_ref_loc aref; aref.stmt = stmt; @@ -1451,7 +1451,7 @@ record_mem_ref_loc (mem_ref_p ref, gimple stmt, tree *loc) necessary. Return whether a bit was changed. */ static bool -set_ref_stored_in_loop (mem_ref_p ref, struct loop *loop) +set_ref_stored_in_loop (im_mem_ref *ref, struct loop *loop) { if (!ref->stored) ref->stored = BITMAP_ALLOC (&lim_bitmap_obstack); @@ -1461,7 +1461,7 @@ set_ref_stored_in_loop (mem_ref_p ref, struct loop *loop) /* Marks reference REF as stored in LOOP. */ static void -mark_ref_stored (mem_ref_p ref, struct loop *loop) +mark_ref_stored (im_mem_ref *ref, struct loop *loop) { while (loop != current_loops->tree_root && set_ref_stored_in_loop (ref, loop)) @@ -1479,7 +1479,7 @@ gather_mem_refs_stmt (struct loop *loop, gimple stmt) tree *mem = NULL; hashval_t hash; im_mem_ref **slot; - mem_ref_p ref; + im_mem_ref *ref; bool is_stored; unsigned id; @@ -1505,7 +1505,7 @@ gather_mem_refs_stmt (struct loop *loop, gimple stmt) slot = memory_accesses.refs->find_slot_with_hash (*mem, hash, INSERT); if (*slot) { - ref = (mem_ref_p) *slot; + ref = *slot; id = ref->id; } else @@ -1625,7 +1625,7 @@ analyze_memory_references (void) tree_to_aff_combination_expand. */ static bool -mem_refs_may_alias_p (mem_ref_p mem1, mem_ref_p mem2, +mem_refs_may_alias_p (im_mem_ref *mem1, im_mem_ref *mem2, hash_map<tree, name_expansion *> **ttae_cache) { /* Perform BASE + OFFSET analysis -- if MEM1 and MEM2 are based on the same @@ -1679,10 +1679,10 @@ find_ref_loc_in_loop_cmp (const void *loop_, const void *loc_) template <typename FN> static bool -for_all_locs_in_loop (struct loop *loop, mem_ref_p ref, FN fn) +for_all_locs_in_loop (struct loop *loop, im_mem_ref *ref, FN fn) { unsigned i; - mem_ref_loc_p loc; + mem_ref_loc *loc; /* Search for the cluster of locs in the accesses_in_loop vector which is sorted after postorder index of the loop father. */ @@ -1696,7 +1696,7 @@ for_all_locs_in_loop (struct loop *loop, mem_ref_p ref, FN fn) while (i > 0) { --i; - mem_ref_loc_p l = &ref->accesses_in_loop[i]; + mem_ref_loc *l = &ref->accesses_in_loop[i]; if (!flow_bb_inside_loop_p (loop, gimple_bb (l->stmt))) break; if (fn (l)) @@ -1705,7 +1705,7 @@ for_all_locs_in_loop (struct loop *loop, mem_ref_p ref, FN fn) for (i = loc - ref->accesses_in_loop.address (); i < ref->accesses_in_loop.length (); ++i) { - mem_ref_loc_p l = &ref->accesses_in_loop[i]; + mem_ref_loc *l = &ref->accesses_in_loop[i]; if (!flow_bb_inside_loop_p (loop, gimple_bb (l->stmt))) break; if (fn (l)) @@ -1720,12 +1720,12 @@ for_all_locs_in_loop (struct loop *loop, mem_ref_p ref, FN fn) struct rewrite_mem_ref_loc { rewrite_mem_ref_loc (tree tmp_var_) : tmp_var (tmp_var_) {} - bool operator () (mem_ref_loc_p loc); + bool operator () (mem_ref_loc *loc); tree tmp_var; }; bool -rewrite_mem_ref_loc::operator () (mem_ref_loc_p loc) +rewrite_mem_ref_loc::operator () (mem_ref_loc *loc) { *loc->ref = tmp_var; update_stmt (loc->stmt); @@ -1735,7 +1735,7 @@ rewrite_mem_ref_loc::operator () (mem_ref_loc_p loc) /* Rewrites all references to REF in LOOP by variable TMP_VAR. */ static void -rewrite_mem_refs (struct loop *loop, mem_ref_p ref, tree tmp_var) +rewrite_mem_refs (struct loop *loop, im_mem_ref *ref, tree tmp_var) { for_all_locs_in_loop (loop, ref, rewrite_mem_ref_loc (tmp_var)); } @@ -1744,13 +1744,13 @@ rewrite_mem_refs (struct loop *loop, mem_ref_p ref, tree tmp_var) struct first_mem_ref_loc_1 { - first_mem_ref_loc_1 (mem_ref_loc_p *locp_) : locp (locp_) {} - bool operator () (mem_ref_loc_p loc); - mem_ref_loc_p *locp; + first_mem_ref_loc_1 (mem_ref_loc **locp_) : locp (locp_) {} + bool operator () (mem_ref_loc *loc); + mem_ref_loc **locp; }; bool -first_mem_ref_loc_1::operator () (mem_ref_loc_p loc) +first_mem_ref_loc_1::operator () (mem_ref_loc *loc) { *locp = loc; return true; @@ -1758,10 +1758,10 @@ first_mem_ref_loc_1::operator () (mem_ref_loc_p loc) /* Returns the first reference location to REF in LOOP. */ -static mem_ref_loc_p -first_mem_ref_loc (struct loop *loop, mem_ref_p ref) +static mem_ref_loc * +first_mem_ref_loc (struct loop *loop, im_mem_ref *ref) { - mem_ref_loc_p locp = NULL; + mem_ref_loc *locp = NULL; for_all_locs_in_loop (loop, ref, first_mem_ref_loc_1 (&locp)); return locp; } @@ -1839,6 +1839,23 @@ execute_sm_if_changed (edge ex, tree mem, tree tmp_var, tree flag) if (loop_has_only_one_exit) ex = split_block_after_labels (ex->dest); + else + { + for (gphi_iterator gpi = gsi_start_phis (ex->dest); + !gsi_end_p (gpi); gsi_next (&gpi)) + { + gphi *phi = gpi.phi (); + if (virtual_operand_p (gimple_phi_result (phi))) + continue; + + /* When the destination has a non-virtual PHI node with multiple + predecessors make sure we preserve the PHI structure by + forcing a forwarder block so that hoisting of that PHI will + still work. */ + split_edge (ex); + break; + } + } old_dest = ex->dest; new_bb = split_edge (ex); @@ -1916,12 +1933,12 @@ execute_sm_if_changed (edge ex, tree mem, tree tmp_var, tree flag) struct sm_set_flag_if_changed { sm_set_flag_if_changed (tree flag_) : flag (flag_) {} - bool operator () (mem_ref_loc_p loc); + bool operator () (mem_ref_loc *loc); tree flag; }; bool -sm_set_flag_if_changed::operator () (mem_ref_loc_p loc) +sm_set_flag_if_changed::operator () (mem_ref_loc *loc) { /* Only set the flag for writes. */ if (is_gimple_assign (loc->stmt) @@ -1938,7 +1955,7 @@ sm_set_flag_if_changed::operator () (mem_ref_loc_p loc) set, set an appropriate flag indicating the store. */ static tree -execute_sm_if_changed_flag_set (struct loop *loop, mem_ref_p ref) +execute_sm_if_changed_flag_set (struct loop *loop, im_mem_ref *ref) { tree flag; char *str = get_lsm_tmp_name (ref->mem.ref, ~0, "_flag"); @@ -1953,7 +1970,7 @@ execute_sm_if_changed_flag_set (struct loop *loop, mem_ref_p ref) to the reference from the temporary variable are emitted to exits. */ static void -execute_sm (struct loop *loop, vec<edge> exits, mem_ref_p ref) +execute_sm (struct loop *loop, vec<edge> exits, im_mem_ref *ref) { tree tmp_var, store_flag = NULL_TREE; unsigned i; @@ -2029,7 +2046,7 @@ static void hoist_memory_references (struct loop *loop, bitmap mem_refs, vec<edge> exits) { - mem_ref_p ref; + im_mem_ref *ref; unsigned i; bitmap_iterator bi; @@ -2044,13 +2061,13 @@ struct ref_always_accessed { ref_always_accessed (struct loop *loop_, bool stored_p_) : loop (loop_), stored_p (stored_p_) {} - bool operator () (mem_ref_loc_p loc); + bool operator () (mem_ref_loc *loc); struct loop *loop; bool stored_p; }; bool -ref_always_accessed::operator () (mem_ref_loc_p loc) +ref_always_accessed::operator () (mem_ref_loc *loc) { struct loop *must_exec; @@ -2082,7 +2099,7 @@ ref_always_accessed::operator () (mem_ref_loc_p loc) make sure REF is always stored to in LOOP. */ static bool -ref_always_accessed_p (struct loop *loop, mem_ref_p ref, bool stored_p) +ref_always_accessed_p (struct loop *loop, im_mem_ref *ref, bool stored_p) { return for_all_locs_in_loop (loop, ref, ref_always_accessed (loop, stored_p)); @@ -2091,7 +2108,7 @@ ref_always_accessed_p (struct loop *loop, mem_ref_p ref, bool stored_p) /* Returns true if REF1 and REF2 are independent. */ static bool -refs_independent_p (mem_ref_p ref1, mem_ref_p ref2) +refs_independent_p (im_mem_ref *ref1, im_mem_ref *ref2) { if (ref1 == ref2) return true; @@ -2118,7 +2135,7 @@ refs_independent_p (mem_ref_p ref1, mem_ref_p ref2) and its super-loops. */ static void -record_dep_loop (struct loop *loop, mem_ref_p ref, bool stored_p) +record_dep_loop (struct loop *loop, im_mem_ref *ref, bool stored_p) { /* We can propagate dependent-in-loop bits up the loop hierarchy to all outer loops. */ @@ -2131,12 +2148,12 @@ record_dep_loop (struct loop *loop, mem_ref_p ref, bool stored_p) LOOP. */ static bool -ref_indep_loop_p_1 (struct loop *loop, mem_ref_p ref, bool stored_p) +ref_indep_loop_p_1 (struct loop *loop, im_mem_ref *ref, bool stored_p) { bitmap refs_to_check; unsigned i; bitmap_iterator bi; - mem_ref_p aref; + im_mem_ref *aref; if (stored_p) refs_to_check = &memory_accesses.refs_in_loop[loop->num]; @@ -2160,7 +2177,7 @@ ref_indep_loop_p_1 (struct loop *loop, mem_ref_p ref, bool stored_p) LOOP. Wrapper over ref_indep_loop_p_1, caching its results. */ static bool -ref_indep_loop_p_2 (struct loop *loop, mem_ref_p ref, bool stored_p) +ref_indep_loop_p_2 (struct loop *loop, im_mem_ref *ref, bool stored_p) { stored_p |= (ref->stored && bitmap_bit_p (ref->stored, loop->num)); @@ -2212,7 +2229,7 @@ ref_indep_loop_p_2 (struct loop *loop, mem_ref_p ref, bool stored_p) LOOP. */ static bool -ref_indep_loop_p (struct loop *loop, mem_ref_p ref) +ref_indep_loop_p (struct loop *loop, im_mem_ref *ref) { gcc_checking_assert (MEM_ANALYZABLE (ref)); @@ -2222,7 +2239,7 @@ ref_indep_loop_p (struct loop *loop, mem_ref_p ref) /* Returns true if we can perform store motion of REF from LOOP. */ static bool -can_sm_ref_p (struct loop *loop, mem_ref_p ref) +can_sm_ref_p (struct loop *loop, im_mem_ref *ref) { tree base; @@ -2268,7 +2285,7 @@ find_refs_for_sm (struct loop *loop, bitmap sm_executed, bitmap refs_to_sm) bitmap refs = &memory_accesses.all_refs_stored_in_loop[loop->num]; unsigned i; bitmap_iterator bi; - mem_ref_p ref; + im_mem_ref *ref; EXECUTE_IF_AND_COMPL_IN_BITMAP (refs, sm_executed, 0, i, bi) { @@ -2494,7 +2511,7 @@ tree_ssa_lim_finalize (void) { basic_block bb; unsigned i; - mem_ref_p ref; + im_mem_ref *ref; free_aux_for_edges (); diff --git a/gcc/tree-ssa-loop-ivopts.c b/gcc/tree-ssa-loop-ivopts.c index 723a9f7f5bc..ae14e8b7981 100644 --- a/gcc/tree-ssa-loop-ivopts.c +++ b/gcc/tree-ssa-loop-ivopts.c @@ -3889,6 +3889,7 @@ get_shiftadd_cost (tree expr, machine_mode mode, comp_cost cost0, if (!(m >= 0 && m < maxm)) return false; + STRIP_NOPS (op1); mult_in_op1 = operand_equal_p (op1, mult, 0); as_cost = add_cost (speed, mode) + shift_cost (speed, mode, m); diff --git a/gcc/tree-ssa-math-opts.c b/gcc/tree-ssa-math-opts.c index eae53580ec3..c8d0d331e9c 100644 --- a/gcc/tree-ssa-math-opts.c +++ b/gcc/tree-ssa-math-opts.c @@ -547,8 +547,7 @@ pass_cse_reciprocals::execute (function *fun) basic_block bb; tree arg; - occ_pool = new object_allocator<occurrence> - ("dominators for recip", n_basic_blocks_for_fn (fun) / 3 + 1); + occ_pool = new object_allocator<occurrence> ("dominators for recip"); memset (&reciprocal_stats, 0, sizeof (reciprocal_stats)); calculate_dominance_info (CDI_DOMINATORS); diff --git a/gcc/tree-ssa-pre.c b/gcc/tree-ssa-pre.c index 697958def0c..f38cb2b8eb5 100644 --- a/gcc/tree-ssa-pre.c +++ b/gcc/tree-ssa-pre.c @@ -349,7 +349,7 @@ clear_expression_ids (void) expressions.release (); } -static object_allocator<pre_expr_d> pre_expr_pool ("pre_expr nodes", 30); +static object_allocator<pre_expr_d> pre_expr_pool ("pre_expr nodes"); /* Given an SSA_NAME NAME, get or create a pre_expr to represent it. */ @@ -488,7 +488,7 @@ static unsigned int get_expr_value_id (pre_expr); /* We can add and remove elements and entries to and from sets and hash tables, so we use alloc pools for them. */ -static object_allocator<bitmap_set> bitmap_set_pool ("Bitmap sets", 30); +static object_allocator<bitmap_set> bitmap_set_pool ("Bitmap sets"); static bitmap_obstack grand_bitmap_obstack; /* Set of blocks with statements that have had their EH properties changed. */ diff --git a/gcc/tree-ssa-reassoc.c b/gcc/tree-ssa-reassoc.c index efb813c3efa..51934c02475 100644 --- a/gcc/tree-ssa-reassoc.c +++ b/gcc/tree-ssa-reassoc.c @@ -209,8 +209,8 @@ typedef struct operand_entry unsigned int count; } *operand_entry_t; -static object_allocator<operand_entry> operand_entry_pool ("operand entry pool", - 30); +static object_allocator<operand_entry> operand_entry_pool + ("operand entry pool"); /* This is used to assign a unique ID to each struct operand_entry so that qsort results are identical on different hosts. */ diff --git a/gcc/tree-ssa-sccvn.c b/gcc/tree-ssa-sccvn.c index aea6acc0451..f7904e2d3ce 100644 --- a/gcc/tree-ssa-sccvn.c +++ b/gcc/tree-ssa-sccvn.c @@ -953,9 +953,9 @@ ao_ref_init_from_vn_reference (ao_ref *ref, unsigned i; tree base = NULL_TREE; tree *op0_p = &base; - HOST_WIDE_INT offset = 0; - HOST_WIDE_INT max_size; - HOST_WIDE_INT size = -1; + offset_int offset = 0; + offset_int max_size; + offset_int size = -1; tree size_tree = NULL_TREE; alias_set_type base_alias_set = -1; @@ -971,15 +971,11 @@ ao_ref_init_from_vn_reference (ao_ref *ref, if (mode == BLKmode) size_tree = TYPE_SIZE (type); else - size = GET_MODE_BITSIZE (mode); - } - if (size_tree != NULL_TREE) - { - if (!tree_fits_uhwi_p (size_tree)) - size = -1; - else - size = tree_to_uhwi (size_tree); + size = int (GET_MODE_BITSIZE (mode)); } + if (size_tree != NULL_TREE + && TREE_CODE (size_tree) == INTEGER_CST) + size = wi::to_offset (size_tree); /* Initially, maxsize is the same as the accessed element size. In the following it will only grow (or become -1). */ @@ -1034,7 +1030,7 @@ ao_ref_init_from_vn_reference (ao_ref *ref, /* And now the usual component-reference style ops. */ case BIT_FIELD_REF: - offset += tree_to_shwi (op->op1); + offset += wi::to_offset (op->op1); break; case COMPONENT_REF: @@ -1043,15 +1039,16 @@ ao_ref_init_from_vn_reference (ao_ref *ref, /* We do not have a complete COMPONENT_REF tree here so we cannot use component_ref_field_offset. Do the interesting parts manually. */ + tree this_offset = DECL_FIELD_OFFSET (field); - if (op->op1 - || !tree_fits_uhwi_p (DECL_FIELD_OFFSET (field))) + if (op->op1 || TREE_CODE (this_offset) != INTEGER_CST) max_size = -1; else { - offset += (tree_to_uhwi (DECL_FIELD_OFFSET (field)) - * BITS_PER_UNIT); - offset += TREE_INT_CST_LOW (DECL_FIELD_BIT_OFFSET (field)); + offset_int woffset = wi::lshift (wi::to_offset (this_offset), + LOG2_BITS_PER_UNIT); + woffset += wi::to_offset (DECL_FIELD_BIT_OFFSET (field)); + offset += woffset; } break; } @@ -1059,17 +1056,18 @@ ao_ref_init_from_vn_reference (ao_ref *ref, case ARRAY_RANGE_REF: case ARRAY_REF: /* We recorded the lower bound and the element size. */ - if (!tree_fits_shwi_p (op->op0) - || !tree_fits_shwi_p (op->op1) - || !tree_fits_shwi_p (op->op2)) + if (TREE_CODE (op->op0) != INTEGER_CST + || TREE_CODE (op->op1) != INTEGER_CST + || TREE_CODE (op->op2) != INTEGER_CST) max_size = -1; else { - HOST_WIDE_INT hindex = tree_to_shwi (op->op0); - hindex -= tree_to_shwi (op->op1); - hindex *= tree_to_shwi (op->op2); - hindex *= BITS_PER_UNIT; - offset += hindex; + offset_int woffset + = wi::sext (wi::to_offset (op->op0) - wi::to_offset (op->op1), + TYPE_PRECISION (TREE_TYPE (op->op0))); + woffset *= wi::to_offset (op->op2); + woffset = wi::lshift (woffset, LOG2_BITS_PER_UNIT); + offset += woffset; } break; @@ -1102,9 +1100,6 @@ ao_ref_init_from_vn_reference (ao_ref *ref, ref->ref = NULL_TREE; ref->base = base; - ref->offset = offset; - ref->size = size; - ref->max_size = max_size; ref->ref_alias_set = set; if (base_alias_set != -1) ref->base_alias_set = base_alias_set; @@ -1113,6 +1108,30 @@ ao_ref_init_from_vn_reference (ao_ref *ref, /* We discount volatiles from value-numbering elsewhere. */ ref->volatile_p = false; + if (!wi::fits_shwi_p (size) || wi::neg_p (size)) + { + ref->offset = 0; + ref->size = -1; + ref->max_size = -1; + return true; + } + + ref->size = size.to_shwi (); + + if (!wi::fits_shwi_p (offset)) + { + ref->offset = 0; + ref->max_size = -1; + return true; + } + + ref->offset = offset.to_shwi (); + + if (!wi::fits_shwi_p (max_size) || wi::neg_p (max_size)) + ref->max_size = -1; + else + ref->max_size = max_size.to_shwi (); + return true; } @@ -4146,9 +4165,9 @@ allocate_vn_table (vn_tables_t table) table->references = new vn_reference_table_type (23); gcc_obstack_init (&table->nary_obstack); - table->phis_pool = new object_allocator<vn_phi_s> ("VN phis", 30); + table->phis_pool = new object_allocator<vn_phi_s> ("VN phis"); table->references_pool = new object_allocator<vn_reference_s> - ("VN references", 30); + ("VN references"); } /* Free a value number table. */ diff --git a/gcc/tree-ssa-strlen.c b/gcc/tree-ssa-strlen.c index cfe4dd9b31b..87f48bc3f44 100644 --- a/gcc/tree-ssa-strlen.c +++ b/gcc/tree-ssa-strlen.c @@ -113,8 +113,7 @@ typedef struct strinfo_struct } *strinfo; /* Pool for allocating strinfo_struct entries. */ -static object_allocator<strinfo_struct> strinfo_pool ("strinfo_struct pool", - 64); +static object_allocator<strinfo_struct> strinfo_pool ("strinfo_struct pool"); /* Vector mapping positive string indexes to strinfo, for the current basic block. The first pointer in the vector is special, diff --git a/gcc/tree-ssa-structalias.c b/gcc/tree-ssa-structalias.c index 615eb9f104a..5323ce6d139 100644 --- a/gcc/tree-ssa-structalias.c +++ b/gcc/tree-ssa-structalias.c @@ -323,7 +323,7 @@ static inline bool type_can_have_subvars (const_tree); /* Pool of variable info structures. */ static object_allocator<variable_info> variable_info_pool - ("Variable info pool", 30); + ("Variable info pool"); /* Map varinfo to final pt_solution. */ static hash_map<varinfo_t, pt_solution *> *final_solutions; @@ -523,7 +523,7 @@ struct constraint /* List of constraints that we use to build the constraint graph from. */ static vec<constraint_t> constraints; -static object_allocator<constraint> constraint_pool ("Constraint pool", 30); +static object_allocator<constraint> constraint_pool ("Constraint pool"); /* The constraint graph is represented as an array of bitmaps containing successor nodes. */ diff --git a/gcc/tree-ssa-ter.c b/gcc/tree-ssa-ter.c index f7ca95bb5af..17686a9d581 100644 --- a/gcc/tree-ssa-ter.c +++ b/gcc/tree-ssa-ter.c @@ -162,7 +162,7 @@ along with GCC; see the file COPYING3. If not see /* Temporary Expression Replacement (TER) table information. */ -typedef struct temp_expr_table_d +struct temp_expr_table { var_map map; bitmap *partition_dependencies; /* Partitions expr is dependent on. */ @@ -174,7 +174,7 @@ typedef struct temp_expr_table_d bitmap new_replaceable_dependencies; /* Holding place for pending dep's. */ int *num_in_part; /* # of ssa_names in a partition. */ int *call_cnt; /* Call count at definition. */ -} *temp_expr_table_p; +}; /* Used to indicate a dependency on VDEFs. */ #define VIRTUAL_PARTITION(table) (table->virtual_partition) @@ -183,19 +183,18 @@ typedef struct temp_expr_table_d static bitmap_obstack ter_bitmap_obstack; #ifdef ENABLE_CHECKING -extern void debug_ter (FILE *, temp_expr_table_p); +extern void debug_ter (FILE *, temp_expr_table *); #endif /* Create a new TER table for MAP. */ -static temp_expr_table_p +static temp_expr_table * new_temp_expr_table (var_map map) { - temp_expr_table_p t; unsigned x; - t = XNEW (struct temp_expr_table_d); + temp_expr_table *t = XNEW (struct temp_expr_table); t->map = map; t->partition_dependencies = XCNEWVEC (bitmap, num_ssa_names + 1); @@ -229,7 +228,7 @@ new_temp_expr_table (var_map map) vector. */ static bitmap -free_temp_expr_table (temp_expr_table_p t) +free_temp_expr_table (temp_expr_table *t) { bitmap ret = NULL; @@ -264,7 +263,7 @@ free_temp_expr_table (temp_expr_table_p t) /* Return TRUE if VERSION is to be replaced by an expression in TAB. */ static inline bool -version_to_be_replaced_p (temp_expr_table_p tab, int version) +version_to_be_replaced_p (temp_expr_table *tab, int version) { if (!tab->replaceable_expressions) return false; @@ -276,7 +275,7 @@ version_to_be_replaced_p (temp_expr_table_p tab, int version) the expression table */ static inline void -make_dependent_on_partition (temp_expr_table_p tab, int version, int p) +make_dependent_on_partition (temp_expr_table *tab, int version, int p) { if (!tab->partition_dependencies[version]) tab->partition_dependencies[version] = BITMAP_ALLOC (&ter_bitmap_obstack); @@ -288,7 +287,7 @@ make_dependent_on_partition (temp_expr_table_p tab, int version, int p) /* Add VER to the kill list for P. TAB is the expression table */ static inline void -add_to_partition_kill_list (temp_expr_table_p tab, int p, int ver) +add_to_partition_kill_list (temp_expr_table *tab, int p, int ver) { if (!tab->kill_list[p]) { @@ -303,7 +302,7 @@ add_to_partition_kill_list (temp_expr_table_p tab, int p, int ver) table. */ static inline void -remove_from_partition_kill_list (temp_expr_table_p tab, int p, int version) +remove_from_partition_kill_list (temp_expr_table *tab, int p, int version) { gcc_checking_assert (tab->kill_list[p]); bitmap_clear_bit (tab->kill_list[p], version); @@ -321,7 +320,7 @@ remove_from_partition_kill_list (temp_expr_table_p tab, int p, int version) expression table. */ static void -add_dependence (temp_expr_table_p tab, int version, tree var) +add_dependence (temp_expr_table *tab, int version, tree var) { int i; bitmap_iterator bi; @@ -372,7 +371,7 @@ add_dependence (temp_expr_table_p tab, int version, tree var) expression from consideration as well by freeing the decl uid bitmap. */ static void -finished_with_expr (temp_expr_table_p tab, int version, bool free_expr) +finished_with_expr (temp_expr_table *tab, int version, bool free_expr) { unsigned i; bitmap_iterator bi; @@ -444,7 +443,7 @@ ter_is_replaceable_p (gimple stmt) /* Create an expression entry for a replaceable expression. */ static void -process_replaceable (temp_expr_table_p tab, gimple stmt, int call_cnt) +process_replaceable (temp_expr_table *tab, gimple stmt, int call_cnt) { tree var, def, basevar; int version; @@ -493,7 +492,7 @@ process_replaceable (temp_expr_table_p tab, gimple stmt, int call_cnt) from consideration, making it not replaceable. */ static inline void -kill_expr (temp_expr_table_p tab, int partition) +kill_expr (temp_expr_table *tab, int partition) { unsigned version; @@ -513,7 +512,7 @@ kill_expr (temp_expr_table_p tab, int partition) partitions. */ static inline void -kill_virtual_exprs (temp_expr_table_p tab) +kill_virtual_exprs (temp_expr_table *tab) { kill_expr (tab, VIRTUAL_PARTITION (tab)); } @@ -524,7 +523,7 @@ kill_virtual_exprs (temp_expr_table_p tab) MORE_REPLACING is true, accumulate the pending partition dependencies. */ static void -mark_replaceable (temp_expr_table_p tab, tree var, bool more_replacing) +mark_replaceable (temp_expr_table *tab, tree var, bool more_replacing) { int version = SSA_NAME_VERSION (var); @@ -572,7 +571,7 @@ find_ssaname_in_store (gimple, tree, tree t, void *data) be replaced by their expressions. Results are stored in the table TAB. */ static void -find_replaceable_in_bb (temp_expr_table_p tab, basic_block bb) +find_replaceable_in_bb (temp_expr_table *tab, basic_block bb) { gimple_stmt_iterator bsi; gimple stmt; @@ -712,7 +711,7 @@ bitmap find_replaceable_exprs (var_map map) { basic_block bb; - temp_expr_table_p table; + temp_expr_table *table; bitmap ret; bitmap_obstack_initialize (&ter_bitmap_obstack); @@ -755,7 +754,7 @@ dump_replaceable_exprs (FILE *f, bitmap expr) table being debugged. */ DEBUG_FUNCTION void -debug_ter (FILE *f, temp_expr_table_p t) +debug_ter (FILE *f, temp_expr_table *t) { unsigned x, y; bitmap_iterator bi; diff --git a/gcc/tree-vrp.c b/gcc/tree-vrp.c index d85481e0103..89eaef70c41 100644 --- a/gcc/tree-vrp.c +++ b/gcc/tree-vrp.c @@ -126,7 +126,7 @@ static tree vrp_evaluate_conditional_warnv_with_ops (enum tree_code, SSA name may have more than one assertion associated with it, these locations are kept in a linked list attached to the corresponding SSA name. */ -struct assert_locus_d +struct assert_locus { /* Basic block where the assertion would be inserted. */ basic_block bb; @@ -148,11 +148,9 @@ struct assert_locus_d tree expr; /* Next node in the linked list. */ - struct assert_locus_d *next; + assert_locus *next; }; -typedef struct assert_locus_d *assert_locus_t; - /* If bit I is present, it means that SSA name N_i has a list of assertions that should be inserted in the IL. */ static bitmap need_assert_for; @@ -160,7 +158,7 @@ static bitmap need_assert_for; /* Array of locations lists where to insert assertions. ASSERTS_FOR[I] holds a list of ASSERT_LOCUS_T nodes that describe where ASSERT_EXPRs for SSA name N_I should be inserted. */ -static assert_locus_t *asserts_for; +static assert_locus **asserts_for; /* Value range array. After propagation, VR_VALUE[I] holds the range of values that SSA name N_I may take. */ @@ -4897,7 +4895,7 @@ void debug_all_asserts (void); void dump_asserts_for (FILE *file, tree name) { - assert_locus_t loc; + assert_locus *loc; fprintf (file, "Assertions to be inserted for "); print_generic_expr (file, name, 0); @@ -4979,7 +4977,7 @@ register_new_assert_for (tree name, tree expr, edge e, gimple_stmt_iterator si) { - assert_locus_t n, loc, last_loc; + assert_locus *n, *loc, *last_loc; basic_block dest_bb; gcc_checking_assert (bb == NULL || e == NULL); @@ -5054,7 +5052,7 @@ register_new_assert_for (tree name, tree expr, /* If we didn't find an assertion already registered for NAME COMP_CODE VAL, add a new one at the end of the list of assertions associated with NAME. */ - n = XNEW (struct assert_locus_d); + n = XNEW (struct assert_locus); n->bb = dest_bb; n->e = e; n->si = si; @@ -6333,7 +6331,7 @@ find_assert_locations (void) indicated by LOC. Return true if we made any edge insertions. */ static bool -process_assert_insertions_for (tree name, assert_locus_t loc) +process_assert_insertions_for (tree name, assert_locus *loc) { /* Build the comparison expression NAME_i COMP_CODE VAL. */ gimple stmt; @@ -6401,12 +6399,12 @@ process_assert_insertions (void) EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi) { - assert_locus_t loc = asserts_for[i]; + assert_locus *loc = asserts_for[i]; gcc_assert (loc); while (loc) { - assert_locus_t next = loc->next; + assert_locus *next = loc->next; update_edges_p |= process_assert_insertions_for (ssa_name (i), loc); free (loc); loc = next; @@ -6458,7 +6456,7 @@ static void insert_range_assertions (void) { need_assert_for = BITMAP_ALLOC (NULL); - asserts_for = XCNEWVEC (assert_locus_t, num_ssa_names); + asserts_for = XCNEWVEC (assert_locus *, num_ssa_names); calculate_dominance_info (CDI_DOMINATORS); diff --git a/gcc/var-tracking.c b/gcc/var-tracking.c index a31a1376007..003be574969 100644 --- a/gcc/var-tracking.c +++ b/gcc/var-tracking.c @@ -264,10 +264,10 @@ typedef struct attrs_def } *attrs; /* Structure for chaining the locations. */ -typedef struct location_chain_def +struct location_chain { /* Next element in the chain. */ - struct location_chain_def *next; + location_chain *next; /* The location (REG, MEM or VALUE). */ rtx loc; @@ -277,7 +277,7 @@ typedef struct location_chain_def /* Initialized? */ enum var_init_status init; -} *location_chain; +}; /* A vector of loc_exp_dep holds the active dependencies of a one-part DV on VALUEs, i.e., the VALUEs expanded so as to form the current @@ -337,7 +337,7 @@ struct onepart_aux typedef struct variable_part_def { /* Chain of locations of the part. */ - location_chain loc_chain; + location_chain *loc_chain; /* Location which was last emitted to location list. */ rtx cur_loc; @@ -525,14 +525,14 @@ typedef struct emit_note_data_def /* Structure holding a refcounted hash table. If refcount > 1, it must be first unshared before modified. */ -typedef struct shared_hash_def +struct shared_hash { /* Reference count. */ int refcount; /* Actual hash table. */ variable_table_type *htab; -} *shared_hash; +}; /* Structure holding the IN or OUT set for a basic block. */ typedef struct dataflow_set_def @@ -544,10 +544,10 @@ typedef struct dataflow_set_def attrs regs[FIRST_PSEUDO_REGISTER]; /* Variable locations. */ - shared_hash vars; + shared_hash *vars; /* Vars that is being traversed. */ - shared_hash traversed_vars; + shared_hash *traversed_vars; } dataflow_set; /* The structure (one for each basic block) containing the information @@ -576,28 +576,27 @@ typedef struct variable_tracking_info_def } *variable_tracking_info; /* Alloc pool for struct attrs_def. */ -object_allocator<attrs_def> attrs_def_pool ("attrs_def pool", 1024); +object_allocator<attrs_def> attrs_def_pool ("attrs_def pool"); /* Alloc pool for struct variable_def with MAX_VAR_PARTS entries. */ static pool_allocator var_pool - ("variable_def pool", 64, sizeof (variable_def) + + ("variable_def pool", sizeof (variable_def) + (MAX_VAR_PARTS - 1) * sizeof (((variable)NULL)->var_part[0])); /* Alloc pool for struct variable_def with a single var_part entry. */ static pool_allocator valvar_pool - ("small variable_def pool", 256, sizeof (variable_def)); + ("small variable_def pool", sizeof (variable_def)); -/* Alloc pool for struct location_chain_def. */ -static object_allocator<location_chain_def> location_chain_def_pool - ("location_chain_def pool", 1024); +/* Alloc pool for struct location_chain. */ +static object_allocator<location_chain> location_chain_pool + ("location_chain pool"); -/* Alloc pool for struct shared_hash_def. */ -static object_allocator<shared_hash_def> shared_hash_def_pool - ("shared_hash_def pool", 256); +/* Alloc pool for struct shared_hash. */ +static object_allocator<shared_hash> shared_hash_pool ("shared_hash pool"); /* Alloc pool for struct loc_exp_dep_s for NOT_ONEPART variables. */ -object_allocator<loc_exp_dep> loc_exp_dep_pool ("loc_exp_dep pool", 64); +object_allocator<loc_exp_dep> loc_exp_dep_pool ("loc_exp_dep pool"); /* Changed variables, notes will be emitted for them. */ static variable_table_type *changed_variables; @@ -611,7 +610,7 @@ static bool emit_notes; static variable_table_type *dropped_values; /* Empty shared hashtable. */ -static shared_hash empty_shared_hash; +static shared_hash *empty_shared_hash; /* Scratch register bitmap used by cselib_expand_value_rtx. */ static bitmap scratch_regs = NULL; @@ -663,7 +662,7 @@ static void dataflow_set_clear (dataflow_set *); static void dataflow_set_copy (dataflow_set *, dataflow_set *); static int variable_union_info_cmp_pos (const void *, const void *); static void dataflow_set_union (dataflow_set *, dataflow_set *); -static location_chain find_loc_in_1pdv (rtx, variable, variable_table_type *); +static location_chain *find_loc_in_1pdv (rtx, variable, variable_table_type *); static bool canon_value_cmp (rtx, rtx); static int loc_cmp (rtx, rtx); static bool variable_part_different_p (variable_part *, variable_part *); @@ -1435,7 +1434,7 @@ variable_htab_free (void *elem) { int i; variable var = (variable) elem; - location_chain node, next; + location_chain *node, *next; gcc_checking_assert (var->refcount > 0); @@ -1571,7 +1570,7 @@ attrs_list_mpdv_union (attrs *dstp, attrs src, attrs src2) /* Return true if VARS is shared. */ static inline bool -shared_hash_shared (shared_hash vars) +shared_hash_shared (shared_hash *vars) { return vars->refcount > 1; } @@ -1579,7 +1578,7 @@ shared_hash_shared (shared_hash vars) /* Return the hash table for VARS. */ static inline variable_table_type * -shared_hash_htab (shared_hash vars) +shared_hash_htab (shared_hash *vars) { return vars->htab; } @@ -1587,7 +1586,7 @@ shared_hash_htab (shared_hash vars) /* Return true if VAR is shared, or maybe because VARS is shared. */ static inline bool -shared_var_p (variable var, shared_hash vars) +shared_var_p (variable var, shared_hash *vars) { /* Don't count an entry in the changed_variables table as a duplicate. */ return ((var->refcount > 1 + (int) var->in_changed_variables) @@ -1596,10 +1595,10 @@ shared_var_p (variable var, shared_hash vars) /* Copy variables into a new hash table. */ -static shared_hash -shared_hash_unshare (shared_hash vars) +static shared_hash * +shared_hash_unshare (shared_hash *vars) { - shared_hash new_vars = new shared_hash_def; + shared_hash *new_vars = new shared_hash; gcc_assert (vars->refcount > 1); new_vars->refcount = 1; new_vars->htab = new variable_table_type (vars->htab->elements () + 3); @@ -1610,8 +1609,8 @@ shared_hash_unshare (shared_hash vars) /* Increment reference counter on VARS and return it. */ -static inline shared_hash -shared_hash_copy (shared_hash vars) +static inline shared_hash * +shared_hash_copy (shared_hash *vars) { vars->refcount++; return vars; @@ -1621,7 +1620,7 @@ shared_hash_copy (shared_hash vars) anymore. */ static void -shared_hash_destroy (shared_hash vars) +shared_hash_destroy (shared_hash *vars) { gcc_checking_assert (vars->refcount > 0); if (--vars->refcount == 0) @@ -1635,7 +1634,7 @@ shared_hash_destroy (shared_hash vars) INSERT, insert it if not already present. */ static inline variable_def ** -shared_hash_find_slot_unshare_1 (shared_hash *pvars, decl_or_value dv, +shared_hash_find_slot_unshare_1 (shared_hash **pvars, decl_or_value dv, hashval_t dvhash, enum insert_option ins) { if (shared_hash_shared (*pvars)) @@ -1644,7 +1643,7 @@ shared_hash_find_slot_unshare_1 (shared_hash *pvars, decl_or_value dv, } static inline variable_def ** -shared_hash_find_slot_unshare (shared_hash *pvars, decl_or_value dv, +shared_hash_find_slot_unshare (shared_hash **pvars, decl_or_value dv, enum insert_option ins) { return shared_hash_find_slot_unshare_1 (pvars, dv, dv_htab_hash (dv), ins); @@ -1655,7 +1654,7 @@ shared_hash_find_slot_unshare (shared_hash *pvars, decl_or_value dv, return NULL. */ static inline variable_def ** -shared_hash_find_slot_1 (shared_hash vars, decl_or_value dv, hashval_t dvhash) +shared_hash_find_slot_1 (shared_hash *vars, decl_or_value dv, hashval_t dvhash) { return shared_hash_htab (vars)->find_slot_with_hash (dv, dvhash, shared_hash_shared (vars) @@ -1663,7 +1662,7 @@ shared_hash_find_slot_1 (shared_hash vars, decl_or_value dv, hashval_t dvhash) } static inline variable_def ** -shared_hash_find_slot (shared_hash vars, decl_or_value dv) +shared_hash_find_slot (shared_hash *vars, decl_or_value dv) { return shared_hash_find_slot_1 (vars, dv, dv_htab_hash (dv)); } @@ -1671,14 +1670,14 @@ shared_hash_find_slot (shared_hash vars, decl_or_value dv) /* Return slot for DV only if it is already present in the hash table. */ static inline variable_def ** -shared_hash_find_slot_noinsert_1 (shared_hash vars, decl_or_value dv, +shared_hash_find_slot_noinsert_1 (shared_hash *vars, decl_or_value dv, hashval_t dvhash) { return shared_hash_htab (vars)->find_slot_with_hash (dv, dvhash, NO_INSERT); } static inline variable_def ** -shared_hash_find_slot_noinsert (shared_hash vars, decl_or_value dv) +shared_hash_find_slot_noinsert (shared_hash *vars, decl_or_value dv) { return shared_hash_find_slot_noinsert_1 (vars, dv, dv_htab_hash (dv)); } @@ -1687,13 +1686,13 @@ shared_hash_find_slot_noinsert (shared_hash vars, decl_or_value dv) table. */ static inline variable -shared_hash_find_1 (shared_hash vars, decl_or_value dv, hashval_t dvhash) +shared_hash_find_1 (shared_hash *vars, decl_or_value dv, hashval_t dvhash) { return shared_hash_htab (vars)->find_with_hash (dv, dvhash); } static inline variable -shared_hash_find (shared_hash vars, decl_or_value dv) +shared_hash_find (shared_hash *vars, decl_or_value dv) { return shared_hash_find_1 (vars, dv, dv_htab_hash (dv)); } @@ -1738,8 +1737,8 @@ unshare_variable (dataflow_set *set, variable_def **slot, variable var, for (i = 0; i < var->n_var_parts; i++) { - location_chain node; - location_chain *nextp; + location_chain *node; + location_chain **nextp; if (i == 0 && var->onepart) { @@ -1756,9 +1755,9 @@ unshare_variable (dataflow_set *set, variable_def **slot, variable var, nextp = &new_var->var_part[i].loc_chain; for (node = var->var_part[i].loc_chain; node; node = node->next) { - location_chain new_lc; + location_chain *new_lc; - new_lc = new location_chain_def; + new_lc = new location_chain; new_lc->next = NULL; if (node->init > initialized) new_lc->init = node->init; @@ -1882,7 +1881,7 @@ get_init_value (dataflow_set *set, rtx loc, decl_or_value dv) { for (i = 0; i < var->n_var_parts && ret_val == VAR_INIT_STATUS_UNKNOWN; i++) { - location_chain nextp; + location_chain *nextp; for (nextp = var->var_part[i].loc_chain; nextp; nextp = nextp->next) if (rtx_equal_p (nextp->loc, loc)) { @@ -2069,7 +2068,7 @@ get_addr_from_local_cache (dataflow_set *set, rtx const loc) rtx x; decl_or_value dv; variable var; - location_chain l; + location_chain *l; gcc_checking_assert (GET_CODE (loc) == VALUE); @@ -2246,7 +2245,7 @@ drop_overlapping_mem_locs (variable_def **slot, overlapping_mems *coms) if (var->onepart == ONEPART_VALUE) { - location_chain loc, *locp; + location_chain *loc, **locp; bool changed = false; rtx cur_loc; @@ -2514,7 +2513,7 @@ static void val_reset (dataflow_set *set, decl_or_value dv) { variable var = shared_hash_find (set->vars, dv) ; - location_chain node; + location_chain *node; rtx cval; if (!var || !var->n_var_parts) @@ -2695,7 +2694,7 @@ dataflow_set_copy (dataflow_set *dst, dataflow_set *src) struct variable_union_info { /* Node of the location chain. */ - location_chain lc; + location_chain *lc; /* The sum of positions in the input chains. */ int pos; @@ -2765,7 +2764,7 @@ variable_union (variable src, dataflow_set *set) entries are in canonical order. */ if (src->onepart) { - location_chain *nodep, dnode, snode; + location_chain **nodep, *dnode, *snode; gcc_assert (src->n_var_parts == 1 && dst->n_var_parts == 1); @@ -2784,7 +2783,7 @@ variable_union (variable src, dataflow_set *set) if (r > 0) { - location_chain nnode; + location_chain *nnode; if (shared_var_p (dst, set->vars)) { @@ -2794,7 +2793,7 @@ variable_union (variable src, dataflow_set *set) goto restart_onepart_unshared; } - *nodep = nnode = new location_chain_def; + *nodep = nnode = new location_chain; nnode->loc = snode->loc; nnode->init = snode->init; if (!snode->set_src || MEM_P (snode->set_src)) @@ -2852,7 +2851,7 @@ variable_union (variable src, dataflow_set *set) for (k--; k >= 0; k--) { - location_chain node, node2; + location_chain *node, *node2; if (i >= 0 && j >= 0 && VAR_PART_OFFSET (src, i) == VAR_PART_OFFSET (dst, j)) @@ -2901,7 +2900,7 @@ variable_union (variable src, dataflow_set *set) if (dst_l == 1) { /* The most common case, much simpler, no qsort is needed. */ - location_chain dstnode = dst->var_part[j].loc_chain; + location_chain *dstnode = dst->var_part[j].loc_chain; dst->var_part[k].loc_chain = dstnode; VAR_PART_OFFSET (dst, k) = VAR_PART_OFFSET (dst, j); node2 = dstnode; @@ -2911,10 +2910,10 @@ variable_union (variable src, dataflow_set *set) && REGNO (dstnode->loc) == REGNO (node->loc)) || rtx_equal_p (dstnode->loc, node->loc))) { - location_chain new_node; + location_chain *new_node; /* Copy the location from SRC. */ - new_node = new location_chain_def; + new_node = new location_chain; new_node->loc = node->loc; new_node->init = node->init; if (!node->set_src || MEM_P (node->set_src)) @@ -2966,10 +2965,10 @@ variable_union (variable src, dataflow_set *set) } if (jj >= dst_l) /* The location has not been found. */ { - location_chain new_node; + location_chain *new_node; /* Copy the location from SRC. */ - new_node = new location_chain_def; + new_node = new location_chain; new_node->loc = node->loc; new_node->init = node->init; if (!node->set_src || MEM_P (node->set_src)) @@ -3057,15 +3056,15 @@ variable_union (variable src, dataflow_set *set) && VAR_PART_OFFSET (src, i) > VAR_PART_OFFSET (dst, j)) || j < 0) { - location_chain *nextp; + location_chain **nextp; /* Copy the chain from SRC. */ nextp = &dst->var_part[k].loc_chain; for (node = src->var_part[i].loc_chain; node; node = node->next) { - location_chain new_lc; + location_chain *new_lc; - new_lc = new location_chain_def; + new_lc = new location_chain; new_lc->next = NULL; new_lc->init = node->init; if (!node->set_src || MEM_P (node->set_src)) @@ -3087,7 +3086,7 @@ variable_union (variable src, dataflow_set *set) if (flag_var_tracking_uninit) for (i = 0; i < src->n_var_parts && i < dst->n_var_parts; i++) { - location_chain node, node2; + location_chain *node, *node2; for (node = src->var_part[i].loc_chain; node; node = node->next) for (node2 = dst->var_part[i].loc_chain; node2; node2 = node2->next) if (rtx_equal_p (node->loc, node2->loc)) @@ -3184,10 +3183,10 @@ dv_changed_p (decl_or_value dv) any values recursively mentioned in the location lists. VARS must be in star-canonical form. */ -static location_chain +static location_chain * find_loc_in_1pdv (rtx loc, variable var, variable_table_type *vars) { - location_chain node; + location_chain *node; enum rtx_code loc_code; if (!var) @@ -3268,10 +3267,10 @@ struct dfset_merge loc_cmp order, and it is maintained as such. */ static void -insert_into_intersection (location_chain *nodep, rtx loc, +insert_into_intersection (location_chain **nodep, rtx loc, enum var_init_status status) { - location_chain node; + location_chain *node; int r; for (node = *nodep; node; nodep = &node->next, node = *nodep) @@ -3283,7 +3282,7 @@ insert_into_intersection (location_chain *nodep, rtx loc, else if (r > 0) break; - node = new location_chain_def; + node = new location_chain; node->loc = loc; node->set_src = NULL; @@ -3298,16 +3297,16 @@ insert_into_intersection (location_chain *nodep, rtx loc, DSM->dst. */ static void -intersect_loc_chains (rtx val, location_chain *dest, struct dfset_merge *dsm, - location_chain s1node, variable s2var) +intersect_loc_chains (rtx val, location_chain **dest, struct dfset_merge *dsm, + location_chain *s1node, variable s2var) { dataflow_set *s1set = dsm->cur; dataflow_set *s2set = dsm->src; - location_chain found; + location_chain *found; if (s2var) { - location_chain s2node; + location_chain *s2node; gcc_checking_assert (s2var->onepart); @@ -3580,7 +3579,7 @@ canonicalize_loc_order_check (variable_def **slot, dataflow_set *data ATTRIBUTE_UNUSED) { variable var = *slot; - location_chain node, next; + location_chain *node, *next; #ifdef ENABLE_RTL_CHECKING int i; @@ -3617,7 +3616,7 @@ canonicalize_values_mark (variable_def **slot, dataflow_set *set) variable var = *slot; decl_or_value dv = var->dv; rtx val; - location_chain node; + location_chain *node; if (!dv_is_value_p (dv)) return 1; @@ -3655,7 +3654,7 @@ canonicalize_values_star (variable_def **slot, dataflow_set *set) { variable var = *slot; decl_or_value dv = var->dv; - location_chain node; + location_chain *node; decl_or_value cdv; rtx val, cval; variable_def **cslot; @@ -3876,12 +3875,12 @@ canonicalize_vars_star (variable_def **slot, dataflow_set *set) { variable var = *slot; decl_or_value dv = var->dv; - location_chain node; + location_chain *node; rtx cval; decl_or_value cdv; variable_def **cslot; variable cvar; - location_chain cnode; + location_chain *cnode; if (!var->onepart || var->onepart == ONEPART_VALUE) return 1; @@ -3939,7 +3938,7 @@ variable_merge_over_cur (variable s1var, struct dfset_merge *dsm) onepart_enum_t onepart = s1var->onepart; rtx val; hashval_t dvhash; - location_chain node, *nodep; + location_chain *node, **nodep; /* If the incoming onepart variable has an empty location list, then the intersection will be just as empty. For other variables, @@ -4030,7 +4029,7 @@ variable_merge_over_cur (variable s1var, struct dfset_merge *dsm) nodep = &dvar->var_part[0].loc_chain; while ((node = *nodep)) { - location_chain *nextp = &node->next; + location_chain **nextp = &node->next; if (GET_CODE (node->loc) == REG) { @@ -4226,7 +4225,7 @@ dataflow_set_merge (dataflow_set *dst, dataflow_set *src2) dataflow_set_init (dst); dst->stack_adjust = cur.stack_adjust; shared_hash_destroy (dst->vars); - dst->vars = new shared_hash_def; + dst->vars = new shared_hash; dst->vars->refcount = 1; dst->vars->htab = new variable_table_type (MAX (src1_elems, src2_elems)); @@ -4338,7 +4337,7 @@ dataflow_set_equiv_regs (dataflow_set *set) static void remove_duplicate_values (variable var) { - location_chain node, *nodep; + location_chain *node, **nodep; gcc_assert (var->onepart); gcc_assert (var->n_var_parts == 1); @@ -4388,7 +4387,7 @@ variable_post_merge_new_vals (variable_def **slot, dfset_post_merge *dfpm) { dataflow_set *set = dfpm->set; variable var = *slot; - location_chain node; + location_chain *node; if (!var->onepart || !var->n_var_parts) return 1; @@ -4524,7 +4523,7 @@ variable_post_merge_perm_vals (variable_def **pslot, dfset_post_merge *dfpm) { dataflow_set *set = dfpm->set; variable pvar = *pslot, var; - location_chain pnode; + location_chain *pnode; decl_or_value dv; attrs att; @@ -4602,13 +4601,13 @@ dataflow_post_merge_adjust (dataflow_set *set, dataflow_set **permp) location list of a one-part variable or value VAR, or in that of any values recursively mentioned in the location lists. */ -static location_chain +static location_chain * find_mem_expr_in_1pdv (tree expr, rtx val, variable_table_type *vars) { - location_chain node; + location_chain *node; decl_or_value dv; variable var; - location_chain where = NULL; + location_chain *where = NULL; if (!val) return NULL; @@ -4682,7 +4681,7 @@ dataflow_set_preserve_mem_locs (variable_def **slot, dataflow_set *set) if (var->onepart == ONEPART_VDECL || var->onepart == ONEPART_DEXPR) { tree decl = dv_as_decl (var->dv); - location_chain loc, *locp; + location_chain *loc, **locp; bool changed = false; if (!var->n_var_parts) @@ -4721,7 +4720,7 @@ dataflow_set_preserve_mem_locs (variable_def **slot, dataflow_set *set) rtx old_loc = loc->loc; if (GET_CODE (old_loc) == VALUE) { - location_chain mem_node + location_chain *mem_node = find_mem_expr_in_1pdv (decl, loc->loc, shared_hash_htab (set->vars)); @@ -4790,7 +4789,7 @@ dataflow_set_remove_mem_locs (variable_def **slot, dataflow_set *set) if (var->onepart == ONEPART_VALUE) { - location_chain loc, *locp; + location_chain *loc, **locp; bool changed = false; rtx cur_loc; @@ -4883,7 +4882,7 @@ dataflow_set_clear_at_call (dataflow_set *set, rtx_insn *call_insn) static bool variable_part_different_p (variable_part *vp1, variable_part *vp2) { - location_chain lc1, lc2; + location_chain *lc1, *lc2; for (lc1 = vp1->loc_chain; lc1; lc1 = lc1->next) { @@ -4909,7 +4908,7 @@ variable_part_different_p (variable_part *vp1, variable_part *vp2) static bool onepart_variable_different_p (variable var1, variable var2) { - location_chain lc1, lc2; + location_chain *lc1, *lc2; if (var1 == var2) return false; @@ -6591,7 +6590,7 @@ find_src_set_src (dataflow_set *set, rtx src) tree decl = NULL_TREE; /* The variable being copied around. */ rtx set_src = NULL_RTX; /* The value for "decl" stored in "src". */ variable var; - location_chain nextp; + location_chain *nextp; int i; bool found; @@ -7179,7 +7178,7 @@ static void dump_var (variable var) { int i; - location_chain node; + location_chain *node; if (dv_is_decl_p (var->dv)) { @@ -7499,8 +7498,8 @@ set_slot_part (dataflow_set *set, rtx loc, variable_def **slot, enum var_init_status initialized, rtx set_src) { int pos; - location_chain node, next; - location_chain *nextp; + location_chain *node, *next; + location_chain **nextp; variable var; onepart_enum_t onepart; @@ -7727,7 +7726,7 @@ set_slot_part (dataflow_set *set, rtx loc, variable_def **slot, } /* Add the location to the beginning. */ - node = new location_chain_def; + node = new location_chain; node->loc = loc; node->init = initialized; node->set_src = set_src; @@ -7780,7 +7779,7 @@ clobber_slot_part (dataflow_set *set, rtx loc, variable_def **slot, if (pos >= 0) { - location_chain node, next; + location_chain *node, *next; /* Remove the register locations from the dataflow set. */ next = var->var_part[pos].loc_chain; @@ -7860,8 +7859,8 @@ delete_slot_part (dataflow_set *set, rtx loc, variable_def **slot, if (pos >= 0) { - location_chain node, next; - location_chain *nextp; + location_chain *node, *next; + location_chain **nextp; bool changed; rtx cur_loc; @@ -8220,7 +8219,7 @@ vt_expand_var_loc_chain (variable var, bitmap regs, void *data, bool *pendrecp) { struct expand_loc_callback_data *elcd = (struct expand_loc_callback_data *) data; - location_chain loc, next; + location_chain *loc, *next; rtx result = NULL; int first_child, result_first_child, last_child; bool pending_recursion; @@ -8577,7 +8576,7 @@ emit_note_insn_var_location (variable_def **varp, emit_note_data *data) HOST_WIDE_INT offsets[MAX_VAR_PARTS]; rtx loc[MAX_VAR_PARTS]; tree decl; - location_chain lc; + location_chain *lc; gcc_checking_assert (var->onepart == NOT_ONEPART || var->onepart == ONEPART_VDECL); @@ -8946,7 +8945,7 @@ process_changed_values (variable_table_type *htab) static void emit_notes_for_changes (rtx_insn *insn, enum emit_note_where where, - shared_hash vars) + shared_hash *vars) { emit_note_data data; variable_table_type *htab = shared_hash_htab (vars); @@ -9859,7 +9858,7 @@ vt_initialize (void) alloc_aux_for_blocks (sizeof (struct variable_tracking_info_def)); - empty_shared_hash = new shared_hash_def; + empty_shared_hash = new shared_hash; empty_shared_hash->refcount = 1; empty_shared_hash->htab = new variable_table_type (1); changed_variables = new variable_table_type (10); @@ -10219,8 +10218,8 @@ vt_finalize (void) changed_variables = NULL; attrs_def_pool.release (); var_pool.release (); - location_chain_def_pool.release (); - shared_hash_def_pool.release (); + location_chain_pool.release (); + shared_hash_pool.release (); if (MAY_HAVE_DEBUG_INSNS) { |