summaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
authorbviyer <bviyer@138bc75d-0d04-0410-961f-82ee72b054a4>2012-09-13 18:00:10 +0000
committerbviyer <bviyer@138bc75d-0d04-0410-961f-82ee72b054a4>2012-09-13 18:00:10 +0000
commit5833974a387381edadca641685500d08f60bb825 (patch)
treea2a8c0b4fa5b9921052a4576fb1ed9a2d7bfedd0 /gcc
parent53d8b18f0f6422454370ca38755dd79c577691ed (diff)
parent8873e58c710666be83748edc9c1b08c4b5436f8c (diff)
downloadgcc-5833974a387381edadca641685500d08f60bb825.tar.gz
Merged with trunk at revision 191228.
git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/branches/cilkplus@191274 138bc75d-0d04-0410-961f-82ee72b054a4
Diffstat (limited to 'gcc')
-rw-r--r--gcc/ChangeLog582
-rw-r--r--gcc/DATESTAMP2
-rw-r--r--gcc/Makefile.in7
-rw-r--r--gcc/ada/ChangeLog11
-rw-r--r--gcc/ada/gcc-interface/decl.c13
-rw-r--r--gcc/ada/gcc-interface/trans.c7
-rw-r--r--gcc/ada/gcc-interface/utils.c11
-rw-r--r--gcc/alias.c22
-rw-r--r--gcc/bitmap.c31
-rw-r--r--gcc/builtins.c18
-rw-r--r--gcc/c-family/c-common.c2
-rw-r--r--gcc/c-family/c-pragma.c15
-rw-r--r--gcc/c/c-decl.c8
-rw-r--r--gcc/c/c-tree.h4
-rw-r--r--gcc/c/c-typeck.c6
-rw-r--r--gcc/cgraph.c5
-rw-r--r--gcc/combine.c10
-rw-r--r--gcc/common.opt6
-rw-r--r--gcc/config.gcc16
-rw-r--r--gcc/config/alpha/predicates.md40
-rw-r--r--gcc/config/arm/arm.c498
-rw-r--r--gcc/config/arm/arm.h8
-rw-r--r--gcc/config/arm/arm.md207
-rw-r--r--gcc/config/arm/neon.md31
-rw-r--r--gcc/config/arm/predicates.md24
-rw-r--r--gcc/config/arm/thumb2.md18
-rw-r--r--gcc/config/arm/vec-common.md2
-rw-r--r--gcc/config/avr/avr-mcus.def2
-rw-r--r--gcc/config/darwin.c4
-rw-r--r--gcc/config/i386/cpuid.h54
-rw-r--r--gcc/config/i386/driver-i386.c23
-rw-r--r--gcc/config/i386/i386.c2
-rw-r--r--gcc/config/i386/i386.md8
-rw-r--r--gcc/config/m68k/m68k.c3
-rw-r--r--gcc/config/mips/gnu-user64.h10
-rw-r--r--gcc/config/mips/mips.c4
-rw-r--r--gcc/config/mips/mips.h6
-rw-r--r--gcc/config/mips/mti-linux.h43
-rw-r--r--gcc/config/mips/t-mti-linux24
-rw-r--r--gcc/config/mmix/constraints.md112
-rw-r--r--gcc/config/mmix/mmix-protos.h4
-rw-r--r--gcc/config/mmix/mmix.c93
-rw-r--r--gcc/config/mmix/mmix.h21
-rw-r--r--gcc/config/mmix/mmix.md4
-rw-r--r--gcc/config/mmix/predicates.md12
-rw-r--r--gcc/config/moxie/moxie.c20
-rw-r--r--gcc/config/openbsd-stdint.h3
-rw-r--r--gcc/config/pa/pa.c46
-rw-r--r--gcc/config/rs6000/aix43.h6
-rw-r--r--gcc/config/rs6000/aix51.h6
-rw-r--r--gcc/config/rs6000/aix52.h6
-rw-r--r--gcc/config/rs6000/aix53.h6
-rw-r--r--gcc/config/rs6000/aix61.h6
-rw-r--r--gcc/config/rs6000/freebsd64.h4
-rw-r--r--gcc/config/rs6000/linux64.h4
-rw-r--r--gcc/config/rs6000/rs6000-c.c7
-rw-r--r--gcc/config/rs6000/rs6000.c23
-rw-r--r--gcc/config/rs6000/rs6000.h4
-rw-r--r--gcc/config/rs6000/rs6000.md50
-rw-r--r--gcc/config/score/score.c27
-rw-r--r--gcc/config/sh/newlib.h4
-rw-r--r--gcc/config/sh/predicates.md5
-rw-r--r--gcc/config/sh/sh.c32
-rw-r--r--gcc/config/sh/sh.h34
-rw-r--r--gcc/config/sh/sh.md55
-rw-r--r--gcc/config/v850/predicates.md2
-rw-r--r--gcc/config/v850/v850.c114
-rw-r--r--gcc/config/v850/v850.h10
-rw-r--r--gcc/config/v850/v850.md2
-rwxr-xr-xgcc/configure4
-rw-r--r--gcc/configure.ac4
-rw-r--r--gcc/cp/ChangeLog56
-rw-r--r--gcc/cp/call.c16
-rw-r--r--gcc/cp/class.c8
-rw-r--r--gcc/cp/cp-tree.h6
-rw-r--r--gcc/cp/decl.c43
-rw-r--r--gcc/cp/except.c7
-rw-r--r--gcc/cp/init.c36
-rw-r--r--gcc/cp/method.c5
-rw-r--r--gcc/cp/name-lookup.c20
-rw-r--r--gcc/cp/optimize.c3
-rw-r--r--gcc/cp/parser.c77
-rw-r--r--gcc/cp/pt.c43
-rw-r--r--gcc/cp/rtti.c8
-rw-r--r--gcc/cp/semantics.c58
-rw-r--r--gcc/cp/typeck.c18
-rw-r--r--gcc/doc/invoke.texi16
-rw-r--r--gcc/doc/sourcebuild.texi8
-rw-r--r--gcc/dojump.c3
-rw-r--r--gcc/double-int.c44
-rw-r--r--gcc/double-int.h60
-rw-r--r--gcc/dwarf2cfi.c58
-rw-r--r--gcc/dwarf2out.c87
-rw-r--r--gcc/emit-rtl.c4
-rw-r--r--gcc/except.c16
-rw-r--r--gcc/expmed.c10
-rw-r--r--gcc/expr.c65
-rw-r--r--gcc/expr.h9
-rw-r--r--gcc/fixed-value.c29
-rw-r--r--gcc/fold-const.c132
-rw-r--r--gcc/fortran/ChangeLog19
-rw-r--r--gcc/fortran/array.c13
-rw-r--r--gcc/fortran/simplify.c3
-rw-r--r--gcc/fortran/trans-intrinsic.c7
-rw-r--r--gcc/fortran/trans-openmp.c5
-rw-r--r--gcc/fwprop.c2
-rw-r--r--gcc/gcc.c2
-rw-r--r--gcc/gcse.c8
-rw-r--r--gcc/genautomata.c5
-rw-r--r--gcc/genextract.c2
-rw-r--r--gcc/genopinit.c2
-rw-r--r--gcc/gimple-fold.c79
-rw-r--r--gcc/gimple-low.c2
-rw-r--r--gcc/gimple-ssa-strength-reduction.c106
-rw-r--r--gcc/gimple.c1195
-rw-r--r--gcc/gimple.h3
-rw-r--r--gcc/go/gofrontend/expressions.cc47
-rw-r--r--gcc/go/gofrontend/gogo-tree.cc35
-rw-r--r--gcc/graphite-scop-detection.c20
-rw-r--r--gcc/graphite-sese-to-poly.c2
-rw-r--r--gcc/graphite.c4
-rw-r--r--gcc/ipa-inline-analysis.c8
-rw-r--r--gcc/ipa-prop.c57
-rw-r--r--gcc/ipa-split.c4
-rw-r--r--gcc/ira.c2
-rw-r--r--gcc/java/class.c7
-rw-r--r--gcc/java/expr.c5
-rw-r--r--gcc/loop-iv.c2
-rw-r--r--gcc/lto-cgraph.c198
-rw-r--r--gcc/lto-streamer.c11
-rw-r--r--gcc/lto-streamer.h86
-rw-r--r--gcc/lto/ChangeLog78
-rw-r--r--gcc/lto/lto-partition.c467
-rw-r--r--gcc/lto/lto-partition.h3
-rw-r--r--gcc/lto/lto.c1239
-rw-r--r--gcc/objc/objc-next-runtime-abi-02.c14
-rw-r--r--gcc/optabs.c14
-rw-r--r--gcc/opts-common.c8
-rw-r--r--gcc/params.def13
-rw-r--r--gcc/passes.c36
-rw-r--r--gcc/read-rtl.c15
-rw-r--r--gcc/ree.c7
-rw-r--r--gcc/reload1.c9
-rw-r--r--gcc/rtl.h5
-rw-r--r--gcc/sel-sched-ir.c20
-rw-r--r--gcc/simplify-rtx.c63
-rw-r--r--gcc/stmt.c181
-rw-r--r--gcc/stor-layout.c7
-rw-r--r--gcc/symtab.c2
-rw-r--r--gcc/system.h5
-rw-r--r--gcc/testsuite/ChangeLog136
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/constexpr-virtual2.C24
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/constexpr-virtual3.C42
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/implicit14.C26
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/lambda/lambda-mangle4.C13
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/sfinae40.C21
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/sfinae41.C17
-rw-r--r--gcc/testsuite/g++.dg/tree-ssa/pr54515.C19
-rw-r--r--gcc/testsuite/gcc.dg/54455.c25
-rw-r--r--gcc/testsuite/gcc.dg/pr44194-1.c4
-rw-r--r--gcc/testsuite/gcc.dg/pr52558-1.c22
-rw-r--r--gcc/testsuite/gcc.dg/pr52558-2.c23
-rw-r--r--gcc/testsuite/gcc.dg/simulate-thread/speculative-store-2.c74
-rw-r--r--gcc/testsuite/gcc.dg/simulate-thread/speculative-store-3.c71
-rw-r--r--gcc/testsuite/gcc.dg/simulate-thread/speculative-store-4.c54
-rw-r--r--gcc/testsuite/gcc.dg/tm/memopt-16.c43
-rw-r--r--gcc/testsuite/gcc.dg/tm/reg-promotion.c2
-rw-r--r--gcc/testsuite/gcc.dg/torture/pr54520.c15
-rw-r--r--gcc/testsuite/gcc.dg/tree-ssa/forwprop-20.c70
-rw-r--r--gcc/testsuite/gcc.dg/tree-ssa/forwprop-21.c13
-rw-r--r--gcc/testsuite/gcc.dg/tree-ssa/forwprop-22.c18
-rw-r--r--gcc/testsuite/gcc.dg/tree-ssa/strlen-1.c17
-rw-r--r--gcc/testsuite/gcc.dg/tree-ssa/vrp80-2.c38
-rw-r--r--gcc/testsuite/gcc.dg/tree-ssa/vrp80.c33
-rw-r--r--gcc/testsuite/gcc.target/arm/neon-vfma-1.c12
-rw-r--r--gcc/testsuite/gcc.target/arm/neon-vfms-1.c12
-rw-r--r--gcc/testsuite/gcc.target/arm/neon-vmla-1.c4
-rw-r--r--gcc/testsuite/gcc.target/arm/neon-vmls-1.c4
-rw-r--r--gcc/testsuite/gcc.target/arm/neon-vset_lanes8.c7
-rw-r--r--gcc/testsuite/gcc.target/arm/pr48252.c19
-rw-r--r--gcc/testsuite/gcc.target/arm/pr50318-1.c2
-rw-r--r--gcc/testsuite/gcc.target/arm/pr51835.c3
-rw-r--r--gcc/testsuite/gcc.target/arm/smlaltb-1.c2
-rw-r--r--gcc/testsuite/gcc.target/arm/smlaltt-1.c2
-rw-r--r--gcc/testsuite/gcc.target/sh/pr54089-3.c40
-rw-r--r--gcc/testsuite/gfortran.dg/array_section_3.f9017
-rw-r--r--gcc/testsuite/gfortran.dg/bound_simplification_3.f9023
-rw-r--r--gcc/testsuite/gfortran.dg/coarray_10.f906
-rw-r--r--gcc/testsuite/gfortran.dg/coarray_28.f9010
-rw-r--r--gcc/testsuite/gfortran.dg/promotion_2.f9016
-rw-r--r--gcc/testsuite/lib/target-supports.exp78
-rw-r--r--gcc/trans-mem.c13
-rw-r--r--gcc/tree-affine.c73
-rw-r--r--gcc/tree-call-cdce.c4
-rw-r--r--gcc/tree-cfg.c6
-rw-r--r--gcc/tree-data-ref.c20
-rw-r--r--gcc/tree-dfa.c78
-rw-r--r--gcc/tree-diagnostic.c2
-rw-r--r--gcc/tree-emutls.c28
-rw-r--r--gcc/tree-flow-inline.h4
-rw-r--r--gcc/tree-inline.c1
-rw-r--r--gcc/tree-object-size.c9
-rw-r--r--gcc/tree-pass.h6
-rw-r--r--gcc/tree-predcom.c17
-rw-r--r--gcc/tree-pretty-print.c8
-rw-r--r--gcc/tree-sra.c43
-rw-r--r--gcc/tree-ssa-address.c28
-rw-r--r--gcc/tree-ssa-alias.c73
-rw-r--r--gcc/tree-ssa-alias.h2
-rw-r--r--gcc/tree-ssa-ccp.c156
-rw-r--r--gcc/tree-ssa-coalesce.c4
-rw-r--r--gcc/tree-ssa-dom.c6
-rw-r--r--gcc/tree-ssa-forwprop.c266
-rw-r--r--gcc/tree-ssa-loop-im.c11
-rw-r--r--gcc/tree-ssa-loop-ivopts.c36
-rw-r--r--gcc/tree-ssa-loop-niter.c72
-rw-r--r--gcc/tree-ssa-math-opts.c6
-rw-r--r--gcc/tree-ssa-phiopt.c4
-rw-r--r--gcc/tree-ssa-pre.c1028
-rw-r--r--gcc/tree-ssa-reassoc.c4
-rw-r--r--gcc/tree-ssa-sccvn.c57
-rw-r--r--gcc/tree-ssa-structalias.c125
-rw-r--r--gcc/tree-ssa-threadupdate.c14
-rw-r--r--gcc/tree-ssa.c9
-rw-r--r--gcc/tree-switch-conversion.c28
-rw-r--r--gcc/tree-vect-generic.c5
-rw-r--r--gcc/tree-vect-loop-manip.c6
-rw-r--r--gcc/tree-vect-slp.c4
-rw-r--r--gcc/tree-vectorizer.h2
-rw-r--r--gcc/tree-vrp.c416
-rw-r--r--gcc/tree.c54
-rw-r--r--gcc/tree.h15
-rw-r--r--gcc/var-tracking.c32
-rw-r--r--gcc/varasm.c36
-rw-r--r--gcc/vec.h167
235 files changed, 6820 insertions, 4824 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index 270af179cd0..345ea6a6287 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,7 +1,578 @@
+2012-09-12 Ganesh Gopalasubramanian <Ganesh.Gopalasubramanian@amd.com>
+
+ * config/i386/i386.md : Comments on fma4 instruction
+ selection reflect requirement on register pressure based
+ cost model.
+
+ * config/i386/driver-i386.c (host_detect_local_cpu): fma4
+ flag is set-reset as informed by the cpuid flag.
+
+ * config/i386/i386.c (processor_alias_table): fma4
+ flag is enabled for bdver2.
+
+2012-09-12 Richard Guenther <rguenther@suse.de>
+
+ PR tree-optimization/54489
+ * tree-ssa-pre.c: Include domwalk.h.
+ (in_fre): Remove.
+ (sccvn_valnum_from_value_id): New function.
+ (debug_bitmap_sets_for): Simplify.
+ (get_representative_for): Properly initialize the SCCVN valnum.
+ (create_expression_by_pieces): Likewise.
+ (insert_into_preds_of_block): Likewise.
+ (can_PRE_operation): Remove.
+ (make_values_for_phi): Simplify.
+ (compute_avail): Likewise.
+ (do_SCCVN_insertion): Remove.
+ (eliminate_avail, eliminate_push_avail, eliminate_insert):
+ New functions.
+ (eliminate): Split and perform a domwalk.
+ (eliminate_bb): Former eliminate part that is now dom-enter.
+ (eliminate_leave_block): New function.
+ (fini_eliminate): Likewise.
+ (init_pre): Simplify.
+ (fini_pre): Likewise.
+ (execute_pre): Fold into do_pre and do_fre.
+ (do_pre): Consume execute_pre.
+ (do_fre): Likewise.
+ * Makefile.in (tree-ssa-pre.o): Add domwalk.h dependency.
+
+2012-09-12 Diego Novillo <dnovillo@google.com>
+
+ * vec.h: Remove compatibility notes for previous distinction
+ between vectors of objects and vectors of pointers.
+
+2012-09-12 Christian Bruel <christian.bruel@st.com>
+
+ * config/sh/newlib.h (NO_IMPLICIT_EXTERN_C): Define.
+
+2012-09-12 Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE>
+
+ * rtl.h (FFS, FLOAT, ABS, PC): Don't undef.
+ * system.h (FFS, FLOAT, ABS, PC): Undef.
+
+2012-09-12 Richard Guenther <rguenther@suse.de>
+
+ PR middle-end/54553
+ * common.opt (finline): Mark with Optimization.
+
+2012-09-12 Jakub Jelinek <jakub@redhat.com>
+
+ * config.gcc: Obsolete picochip-*.
+
+2012-09-12 Nathan Froyd <froydnj@gcc.gnu.org>
+ Hans-Peter Nilsson <hp@bitrange.com>
+
+ * config/mmix/mmix.h (MMIX_REG_OK_STRICT): Delete.
+ (REG_CLASS_FROM_LETTER, CONST_OK_FOR_LETTER_P): Delete.
+ (CONST_DOUBLE_OK_FOR_LETTER_P, EXTRA_CONSTRAINT): Delete.
+ * config/mmix/mmix-protos.h (mmix_intval): Declare.
+ (mmix_const_ok_for_letter_p, mmix_extra_constraint): Delete.
+ (mmix_const_double_ok_for_letter_p): Delete.
+ * config/mmix/constraints.md: New file.
+ * config/mmix/mmix.md: Include it.
+ (iordi3): Fix typo; use "I" instead of undefined "H" constraint.
+ ("*call_real"): Update comment about not using the "p" constraint.
+ * config/mmix/predicates.md (mmix_reg_or_8bit_operand): Use
+ satisfies_constraint_I.
+ (mmix_address_operand): New predicate.
+ (mmix_symbolic_or_address_operand): Use it instead of address_operand.
+ * config/mmix/mmix.c: #include tm-constrs.h.
+ (mmix_intval): Delete declaration. Make non-static.
+ (mmix_const_ok_for_letter_p, mmix_extra_constraint): Delete.
+ (mmix_const_double_ok_for_letter_p): Delete.
+ (mmix_legitimate_address_p): Use satisfies_constraint_I.
+ (mmix_print_operand_address): Likewise.
+ (mmix_emit_sp_add): Adjust to use insn_const_int_ok_for_constraint
+ when matching "L" constraint.
+
+2012-09-11 Steven Bosscher <steven@gcc.gnu.org>
+
+ * tree.h (expand_case): Move prototype ...
+ * expr.h (expand_case): ...here.
+ (expand_sjlj_dispatch_table): New prototype.
+ * stmt.c: Include pointer-set.h instead of bitmap.h.
+ (expand_case): Use a pointer set instead of a bitmap for
+ already-seen labels. Fold label values here.
+ (add_case_node): Don't fold label values here.
+ (expand_sjlj_dispatch_table): New function.
+ * except.c (sjlj_emit_dispatch_table): Use it.
+
+2012-09-11 Marc Glisse <marc.glisse@inria.fr>
+
+ * tree-ssa-forwprop.c (simplify_vector_constructor): New function.
+ (ssa_forward_propagate_and_combine): Call it.
+
+2012-09-11 Diego Novillo <dnovillo@google.com>
+
+ * var-tracking.c (vt_add_function_parameter): Adjust for VEC changes.
+
+2012-09-11 Dominique Dhumieres <dominiq@lps.ens.fr>
+
+ * config/darwin.c (darwin_asm_named_section): Adjust for VEC changes.
+ (darwin_asm_dwarf_section): Likewise.
+
+2012-09-11 Martin Jambor <mjambor@suse.cz>
+
+ * dwarf2out.c (dwarf2out_abstract_function): Do not change cfun.
+ (premark_used_types): New parameter fun, use it instead of cfun.
+ (gen_subprogram_die): Use DECL_STRUCT_FUNCTION (decl) instead of cfun,
+ also pass it to premark_used_types.
+
+2012-09-11 Ramana Radhakrishnan <ramana.radhakrishnan@arm.com>
+
+ * doc/sourcebuild.texi (arm_neon_v2_ok): Adjust command line.
+
+2012-09-11 Tobias Burnus <burnus@net-b.de>
+
+ * doc/sourcebuild.texi (arm_neon_v2_ok): Fix @anchor.
+
+2012-09-11 Ramana Radhakrishnan <ramana.radhakrishnan@arm.com>
+ Matthew Gretton-Dann <matthew.gretton-dann@arm.com>
+
+ * config/arm/neon.md (fma<VCVTF:mode>4): New pattern.
+ (*fmsub<VCVTF:mode>4): Likewise.
+ * doc/sourcebuild.texi (arm_neon_v2_ok, arm_neon_v2_hw): Document it.
+
+2012-09-11 Aldy Hernandez <aldyh@redhat.com>
+
+ PR middle-end/54149
+ * tree-ssa-loop-im.c (execute_sm_if_changed_flag_set): Only set
+ flag for writes.
+
+2012-09-11 Bill Schmidt <wschmidt@linux.vnet.ibm.com>
+
+ PR tree-optimization/55492
+ * doc/invoke.texi (max-slsr-cand-scan): New description.
+ * gimple-ssa-strength-reduction.c (find_basis_for_candidate): Limit
+ the time spent searching for a basis.
+ * params.def (PARAM_MAX_SLSR_CANDIDATE_SCAN): New param.
+
+2012-09-11 Richard Guenther <rguenther@suse.de>
+
+ * gimple.h (gimple_register_type): Remove.
+ (print_gimple_types_stats): Adjust prototype.
+ * lto-streamer.h (print_lto_report): Likewise.
+ * lto-streamer.c (print_lto_report): Adjust.
+ * gimple.c (gimple_types, type_hash_cache, enum gtc_mode,
+ struct type_pair_d, lookup_type_pair, struct sccs,
+ next_dfs_num, gtc_next_dfs_num, struct gimple_type_leader_entry_s,
+ gimple_type_leader, gimple_lookup_type_leader, compare_type_names_p,
+ gtc_visit, gimple_types_compatible_p_1, gimple_types_compatible_p,
+ visit, iterative_hash_name, struct type_hash_pair,
+ type_hash_pair_compare, iterative_hash_gimple_type, gimple_type_hash,
+ gimple_type_eq, gimple_register_type_1, gimple_register_type):
+ Move to lto/lto.c.
+ (print_gimple_types_stats): Adjust.
+ (free_gimple_type_tables): Likewise.
+
+2012-09-11 Richard Guenther <rguenther@suse.de>
+
+ * graphite-scop-detection.c (move_sd_regions): Adjust for VEC changes.
+ (scopdet_basic_block_info): Likewise.
+ (build_scops_1): Likewise.
+ (limit_scops): Likewise.
+
+2012-09-11 Richard Guenther <rguenther@suse.de>
+
+ PR middle-end/54515
+ * gimple.c (get_base_address): Do not return NULL_TREE apart
+ from for WITH_SIZE_EXPR.
+ * gimple-fold.c (canonicalize_constructor_val): Do not call
+ get_base_address when not necessary.
+
+2012-09-10 Andrew Pinski <apinski@cavium.com>
+
+ PR tree-opt/54362
+ * trans-mem.c (thread_private_new_memory): Handle COND_EXPR also.
+
+2012-09-10 Maxim Kuvyrkov <maxim@codesourcery.com>
+
+ * config/m68k/m68k.c (m68k_sched_dfa_post_advance_cycle): Support
+ starting scheduling from a pre-existing state.
+
+2012-09-10 Diego Novillo <dnovillo@google.com>
+
+ * vec.h (vec_t::quick_push): Remove overload that accepts 'T *'.
+ Update all users.
+ (vec_t::safe_push): Likewise.
+ (vec_t::quick_insert): Likewise.
+ (vec_t::lower_bound): Likewise.
+ (vec_t::safe_insert): Likewise.
+ (vec_t::replace): Change second argument to 'T &'.
+
+2012-09-10 Maciej W. Rozycki <macro@codesourcery.com>
+
+ * config/rs6000/rs6000.md: Move a splitter next to its insn.
+
+2012-09-10 Oleg Endo <olegendo@gcc.gnu.org>
+
+ PR target/54089
+ * config/sh/sh.h (SH_DYNAMIC_SHIFT_COST): Set always to 1 if
+ dynamic shifts are available.
+ (SHIFT_COUNT_TRUNCATED): Always define to 0. Correct comment.
+ * config/sh/sh.c (ashl_lshr_seq, ext_ashl_lshr_seq): Add comments.
+ * config/sh/predicates.md (shift_count_operand): Allow
+ arith_reg_operand even if TARGET_DYNSHIFT is false.
+ * config/sh/sh.md (ashlsi3, lshrsi3): Expand library call patterns
+ if needed.
+ (ashlsi3_d_call, lshrsi3_d_call): New insns.
+
+2012-09-10 Richard Sandiford <rdsandiford@googlemail.com>
+
+ * ira.c (setup_pressure_classes): Handle synonymous classes.
+
+2012-09-10 Marc Glisse <marc.glisse@inria.fr>
+
+ * tree-ssa-forwprop.c (simplify_bitfield_ref): New function.
+ (ssa_forward_propagate_and_combine): Call it.
+
+2012-09-10 Steve Ellcey <sellcey@mips.com>
+
+ * config.gcc: Add mips*-mti-linux* target
+ Handle with_synci like other options.
+ * config/mips/gnu-user64.h (LINUX64_DRIVER_SELF_SPECS): New.
+ (DRIVER_SELF_SPECS): Define in terms of LINUX64_DRIVER_SELF_SPECS.
+ * config/mips/mips.h (MIPS_ISA_SYNCI_SPEC): New.
+ * config/mips/mti-linux.h: New file.
+ * config/mips/t-mti-linux: New file.
+
+2012-09-10 Marc Glisse <marc.glisse@inria.fr>
+
+ * tree-ssa-forwprop.c (simplify_permutation): Handle CONSTRUCTOR.
+
+2012-09-10 Martin Jambor <mjambor@suse.cz>
+
+ * params.def (PARAM_IPA_MAX_AGG_ITEMS): New parameter.
+ * ipa-prop.c: Include params.h.
+ (IPA_MAX_AFF_JF_ITEMS): Removed.
+ (determine_known_aggregate_parts): Use param value of
+ PARAM_IPA_MAX_AGG_ITEMS instead of IPA_MAX_AFF_JF_ITEMS.
+ * Makefile.in (ipa-prop.o): Add PARAMS_H dependency.
+
+2012-09-10 Richard Guenther <rguenther@suse.de>
+
+ PR tree-optimization/54520
+ * tree-ssa-threadupdate.c (def_split_header_continue_p):
+ Properly consider sub-loops.
+
+2012-09-10 Richard Henderson <rth@redhat.com>
+
+ * config/alpha/predicates.md (small_symbolic_operand): Disallow
+ large offsets.
+
+2012-09-10 Georg-Johann Lay <avr@gjlay.de>
+
+ PR target/54536
+ * config/avr/avr-mcus.def (at90usb1287): Set LIBRARY_NAME to "usb1287".
+
+2012-09-10 Jan Hubicka <jh@suse.cz>
+
+ * lto-cgraph.c (compute_ltrans_boundary): Do not care about aliases.
+ * symtab.c (symtab_make_decl_local): Remove user defined visibility
+ when making symbol local.
+
+2012-09-09 Mark Kettenis <kettenis@gnu.org>
+
+ * config/openbsd-stdint.h (INTMAX_TYPE, UINTMAX_TYPE): Define.
+
+2012-09-09 Jan Hubicka <jh@suse.cz>
+
+ * passes.c (ipa_write_summaries_1): Set state;
+ do not call compute_ltrans_boundary.
+ (ipa_write_optimization_summaries): Likewise.
+ (ipa_write_summaries): compute_ltrans_boundary here.
+ * lto-streamer.h (lto_symtab_encoder_d): NODES are allocated in heap.
+ (compute_ltrans_boundary): Update prototype.
+
+2012-09-09 Ulrich Drepper <drepper@gmail.com>
+
+ * config/i386/cpuid.h: Define signature_*_e[bcd]x macros for
+ matching results of level 0 calls to __cpuid to processor
+ manufacturers.
+ * config/i386/driver-i386.c (vendor_signatures): Removed.
+ (processor_signatures): Removed.
+ (host_detect_local_cpu): Replace uses of now-removed SIG_*
+ constants with the new signature_*_ebx constants.
+
+2012-09-08 Jan Hubicka <jh@suse.cz>
+
+ Replace cgraph_node_set and varpool_node_set by symtab_node_encoder
+ in partitioning.
+ * tree-pass.h (cgraph_node_set_def, varpool_node_set_def): Remove
+ forward declaration.
+ (lto_symtab_encoder_d): Forward declare.
+ (ipa_write_optimization_summaries): Update.
+ * lto-cgraph.c (lto_symtab_encoder_new): Do not initialize
+ body, initializer and in_partition.
+ (lto_symtab_encoder_delete): Update.
+ (lto_symtab_encoder_encode): Update.
+ (lto_symtab_encoder_lookup): Move inline.
+ (lto_symtab_encoder_delete_node): New function.
+ (lto_symtab_encoder_encode_body_p, lto_set_symtab_encoder_encode_body,
+ lto_symtab_encoder_encode_initializer_p,
+ lto_set_symtab_encoder_encode_initializer,
+ lto_symtab_encoder_in_partition_p,
+ lto_symtab_encoder_in_partition_p): Update.
+ (compute_ltrans_boundary): Take encoder as an input.
+ * passes.c (ipa_write_summaries_1): Update.
+ (ipa_write_summaries_1): Update.
+ (ipa_write_summaries): Update.
+ (ipa_write_optimization_summaries): Update.
+ * lto-streamer.c (print_lto_report): Report number of cgraph nodes.
+ * lto-streamer.h (lto_stats_d): Replace num_output_cgraph_nodes by
+ num_output_symtab_nodes.
+ (lto_encoder_entry): New structure.
+ (struct lto_symtab_encoder_d): Reorg.
+ (lto_symtab_encoder_delete_node): Declare.
+ (lto_symtab_encoder_lookup): Bring inline.
+ (compute_ltrans_boundary): Update.
+ (lto_symtab_encoder_size): Update.
+ (lsei_node, lsei_cgraph_node, lsei_varpool_node): Update.
+ (lto_symtab_encoder_deref): Update.
+
+2012-09-08 John David Anglin <dave.anglin@nrc-cnrc.gc.ca>
+
+ * config/pa/pa.c (hppa_rtx_costs): Update costs for large
+ integer modes.
+
+2012-09-08 Andi Kleen <ak@linux.intel.com>
+
+ * gcc/lto/lto.c (do_whole_program_analysis): Fix last broken patch.
+
+2012-09-08 Andi Kleen <ak@linux.intel.com>
+
+ * gcc/common.opt (-fmem-report-wpa): Add
+ * gcc/doc/invoke.texi (-fmem-report-wpa): Document.
+ * gcc/lto/lto.c (do_whole_program_analysis): Run mem_report
+ when mem_report_wpa is set.
+
+2012-09-07 Anthony Green <green@moxielogic.com>
+
+ * config/moxie/moxie.c (moxie_expand_prologue): Optimize prologue
+ for functions with large static stack requirements.
+ (moxie_expand_epilogue): Use $r12 instead of $r5 for pulling saved
+ values off of the stack.
+
+2012-09-07 Nick Clifton <nickc@redhat.com>
+
+ * config/v850/v850.h (DBX_DEBUGGING_INFO): Define.
+ (ASM_GENERATE_INTERNAL_LABEL): Define if not already provided.
+ * config/v850/v850.c (compute_register_save_size): Always include
+ the link pointer.
+ (increment_stack): New function - emits insns to increment or
+ decrement the stack pointer.
+ (expand_prologue, expand_epilogue): Use it.
+ (expand_prologue): Set the function stack size, if requested.
+ (v850_debug_unwind_info): New function.
+ (TARGET_DEBUG_UNWIND_INFO): Define.
+
+2012-09-07 Richard Earnshaw <rearnsha@arm.com>
+
+ PR tree-ssa/54295
+ * tree-ssa-math-opts.c (widening_mult_conversion_strippable_p):
+ Sign-extension of a zero-extended value can be simplified to
+ just zero-extension.
+
+2012-09-07 Richard Guenther <rguenther@suse.de>
+
+ PR middle-end/53667
+ * tree-ssa-structalias.c (handle_rhs_call): Properly clobber
+ EAF_NOESCAPED arguments. Transitively close non-EAF_DIRECT
+ arguments separately.
+
+2012-09-07 Steven Bosscher <steven@gcc.gnu.org>
+
+ * bitmap.c (bitmap_last_set_bit): Rewrite to return the correct bit.
+
+ * graphite.c (print_global_statistics): Use EDGE_COUNT instead
+ of VEC_length.
+ (print_graphite_scop_statistics): Likewise.
+ * graphite-scop-detection.c (get_bb_type): Use single_succ_p.
+ (print_graphite_scop_statistics): Use EDGE_COUNT, not VEC_length.
+ (canonicalize_loop_closed_ssa): Use single_pred_p.
+
+ * alias.c (reg_seen): Make this an sbitmap.
+ (record_set, init_alias_analysis): Update.
+
+ * tree-ssa-coalesce.c (ssa_conflicts_dump): Fix dumping.
+
+2012-09-07 Tom de Vries <tom@codesourcery.com>
+
+ PR tree-optimization/53986
+ * tree-vrp.c (extract_range_from_multiplicative_op_1): Allow
+ LSHIFT_EXPR.
+ (extract_range_from_binary_expr_1): Handle LSHIFT with constant
+ range as shift amount.
+
+2012-09-07 Segher Boessenkool <segher@kernel.crashing.org>
+
+ * config/rs6000/aix43.h (RS6000_CALL_GLUE): Delete.
+ * config/rs6000/aix51.h (RS6000_CALL_GLUE): Delete.
+ * config/rs6000/aix52.h (RS6000_CALL_GLUE): Delete.
+ * config/rs6000/aix53.h (RS6000_CALL_GLUE): Delete.
+ * config/rs6000/aix61.h (RS6000_CALL_GLUE): Delete.
+ * config/rs6000/freebsd64.h (RS6000_CALL_GLUE): Delete.
+ * config/rs6000/linux64.h (RS6000_CALL_GLUE): Delete.
+ * config/rs6000/rs6000.c (print_operand) ['.']: Delete.
+ * config/rs6000/rs6000.h (RS6000_CALL_GLUE): Delete.
+ * config/rs6000/rs6000.md (tls_gd_aix<TLSmode:tls_abi_suffix>):
+ Replace %. with nop.
+ (tls_gd_call_aix<TLSmode:tls_abi_suffix>): Ditto.
+ (tls_ld_aix<TLSmode:tls_abi_suffix>): Ditto.
+ (tls_ld_call_aix<TLSmode:tls_abi_suffix>): Ditto.
+ (call_nonlocal_aix32): Ditto.
+ (call_nonlocal_aix64): Ditto.
+ (call_value_nonlocal_aix32): Ditto.
+ (call_value_nonlocal_aix64): Ditto.
+
+2012-09-06 Andi Kleen <ak@linux.intel.com>
+
+ * doc/invoke.texi (-ffat-lto-objects): Clarify that gcc-ar
+ et.al. should be used.
+
+2012-09-06 Andi Kleen <ak@linux.intel.com>
+
+ * gcc/lto-streamer.h (res_pair): Add.
+ (lto_file_decl_data): Replace resolutions with respairs.
+ Add max_index.
+ * gcc/lto/lto.c (lto_resolution_read): Remove max_index. Add rp.
+ Initialize respairs.
+ (lto_file_finalize): Set up resolutions vector lazily from respairs.
+
+2012-09-06 Lawrence Crowl <crowl@google.com>
+
+ * double-int.h (double_int::operator &=): New.
+ (double_int::operator ^=): New.
+ (double_int::operator |=): New.
+ (double_int::mul_with_sign): Modify overflow parameter to bool*.
+ (double_int::add_with_sign): New.
+ (double_int::ule): New.
+ (double_int::sle): New.
+ (binary double_int::operator *): Remove parameter name.
+ (binary double_int::operator +): Likewise.
+ (binary double_int::operator -): Likewise.
+ (binary double_int::operator &): Likewise.
+ (double_int::operator |): Likewise.
+ (double_int::operator ^): Likewise.
+ (double_int::and_not): Likewise.
+ (double_int::from_shwi): Tidy formatting.
+ (double_int::from_uhwi): Likewise.
+ (double_int::from_uhwi): Likewise.
+ * double-int.c (double_int::mul_with_sign): Modify overflow parameter
+ to bool*.
+ (double_int::add_with_sign): New.
+ (double_int::ule): New.
+ (double_int::sle): New.
+ * builtins.c: Modify to use the new double_int interface.
+ * cgraph.c: Likewise.
+ * combine.c: Likewise.
+ * dwarf2out.c: Likewise.
+ * emit-rtl.c: Likewise.
+ * expmed.c: Likewise.
+ * expr.c: Likewise.
+ * fixed-value.c: Likewise.
+ * fold-const.c: Likewise.
+ * gimple-fold.c: Likewise.
+ * gimple-ssa-strength-reduction.c: Likewise.
+ * gimplify-rtx.c: Likewise.
+ * ipa-prop.c: Likewise.
+ * loop-iv.c: Likewise.
+ * optabs.c: Likewise.
+ * stor-layout.c: Likewise.
+ * tree-affine.c: Likewise.
+ * tree-cfg.c: Likewise.
+ * tree-dfa.c: Likewise.
+ * tree-flow-inline.h: Likewise.
+ * tree-object-size.c: Likewise.
+ * tree-predcom.c: Likewise.
+ * tree-pretty-print.c: Likewise.
+ * tree-sra.c: Likewise.
+ * tree-ssa-address.c: Likewise.
+ * tree-ssa-alias.c: Likewise.
+ * tree-ssa-ccp.c: Likewise.
+ * tree-ssa-forwprop.c: Likewise.
+ * tree-ssa-loop-ivopts.c: Likewise.
+ * tree-ssa-loop-niter.c: Likewise.
+ * tree-ssa-phiopt.c: Likewise.
+ * tree-ssa-pre.c: Likewise.
+ * tree-ssa-sccvn: Likewise.
+ * tree-ssa-structalias.c: Likewise.
+ * tree-ssa.c: Likewise.
+ * tree-switch-conversion.c: Likewise.
+ * tree-vect-loop-manip.c: Likewise.
+ * tree-vrp.c: Likewise.
+ * tree.h: Likewise.
+ * tree.c: Likewise.
+ * varasm.c: Likewise.
+
+2012-09-06 Uros Bizjak <ubizjak@gmail.com>
+
+ * configure.ac (hle prefixes): Remove .code64 directive.
+ * configure: Regenerated.
+
+2012-09-06 Kyrylo Tkachov <kyrylo.tkachov@arm.com>
+
+ * config/arm/arm.c: Use CONST_INT_P, CONST_DOUBLE_P, REG_P, MEM_P,
+ LABEL_P, JUMP_P, CALL_P, NOTE_P, BARRIER_P consistently.
+ * config/arm/arm.h: Use REG_P, MEM_P consistently.
+ * config/arm/arm.md: Use CONST_INT_P, REG_P, MEM_P, CONST_DOUBLE_P
+ consistently.
+ * config/arm/neon.md: Use REG_P consistently.
+ * config/arm/predicates.md: Use CONST_INT_P, REG_P, MEM_P consistently.
+ * config/arm/thumb2.md: Use CONST_INT_P, REG_P consistently.
+ * config/arm/vec-common.md: Use REG_P consistently.
+
+2012-09-06 Richard Guenther <rguenther@suse.de>
+
+ PR tree-optimization/54498
+ * tree-ssa-alias.h (get_continuation_for_phi): Add flag to
+ abort when reaching an already visited region.
+ * tree-ssa-alias.c (maybe_skip_until): Likewise. And do it.
+ (get_continuation_for_phi_1): Likewise.
+ (walk_non_aliased_vuses): When we translated the reference,
+ abort when we re-visit a region.
+ * tree-ssa-pre.c (translate_vuse_through_block): Adjust.
+
+2012-09-06 David Edelsohn <dje.gcc@gmail.com>
+
+ * config/rs6000/rs6000.c (rs6000_xcoff_asm_named_section): Add TLS
+ section.
+ * config/rs6000/rs6000.c (rs6000_debug_address_cost): Add new
+ arguments to TARGET_ADDRESS_COST call.
+
+2012-09-06 Richard Guenther <rguenther@suse.de>
+
+ * tree.h (MOVE_NONTEMPORAL): Remove.
+ * tree-pretty-print.c (dump_generic_node): Remove
+ MOVE_NONTEMPORAL handling.
+ * expr.c (expand_expr_real_1): Likewise.
+
+2012-09-06 Richard Guenther <rguenther@suse.de>
+
+ * passes.c (execute_function_todo): Call compute_may_aliases
+ only if flag_tree_pta is set.
+
+2012-09-06 Andrew Pinski <apinski@cavium.com>
+
+ PR tree-opt/54494
+ * tree-inline.c (remap_gimple_op_r): Copy TREE_SIDE_EFFECTS also.
+
+2012-09-06 Jakub Jelinek <jakub@redhat.com>
+
+ PR rtl-optimization/54455
+ * sel-sched-ir.c (maybe_tidy_empty_bb): Give up if previous fallthru
+ bb ends up with asm goto referencing bb's label.
+
+2012-09-06 Chen Liqin <liqin.gcc@gmail.com>
+
+ * config/score/score.c: Remove TARGET_LEGITIMIZE_ADDRESS define and
+ score_legitimize_address function, use compiler default code instead.
+
2012-09-05 Jan Hubicka <jh@suse.cz>
- * ipa-inline.c (want_inline_small_function_p): Use INLINE_HINT_loop_iterations
- hint.
+ * ipa-inline.c (want_inline_small_function_p): Use
+ INLINE_HINT_loop_iterations hint.
2012-09-05 Andrew Pinski <apinski@cavium.com>
@@ -78,8 +649,7 @@
2012-09-05 Diego Novillo <dnovillo@google.com>
PR bootstrap/54484
- * vec.h (vec_t::lower_bound): Fix spelling of LESSTHAN
- argument.
+ * vec.h (vec_t::lower_bound): Fix spelling of LESSTHAN argument.
2012-09-05 Jakub Jelinek <jakub@redhat.com>
@@ -362,7 +932,7 @@
* config/sh/sh.md (cbranchsi4): Remove TARGET_CBRANCHDI4 check and
always invoke expand_cbranchsi4.
-2012-09-03 Andi Kleen <ak@linux.intel.com>
+2012-09-03 Andi Kleen <ak@linux.intel.com>
* tree-ssa-sccvn.c (vn_reference_fold_indirect): Initialize
addr_offset always.
@@ -14555,7 +15125,7 @@
* cgraphunit.c (cgraph_analyze_function): Use gimple_has_body_p.
2012-05-02 Kirill Yukhin <kirill.yukhin@intel.com>
- Andi Kleen <ak@linux.intel.com>
+ Andi Kleen <ak@linux.intel.com>
* coretypes.h (MEMMODEL_MASK): New.
* builtins.c (get_memmodel): Add val. Call target.memmodel_check
diff --git a/gcc/DATESTAMP b/gcc/DATESTAMP
index abe4c3a7b99..a239852583d 100644
--- a/gcc/DATESTAMP
+++ b/gcc/DATESTAMP
@@ -1 +1 @@
-20120905
+20120912
diff --git a/gcc/Makefile.in b/gcc/Makefile.in
index e45cbc65423..0cba9d2df47 100644
--- a/gcc/Makefile.in
+++ b/gcc/Makefile.in
@@ -2250,7 +2250,8 @@ tree-ssa-dse.o : tree-ssa-dse.c $(CONFIG_H) $(SYSTEM_H) coretypes.h \
tree-ssa-forwprop.o : tree-ssa-forwprop.c $(CONFIG_H) $(SYSTEM_H) coretypes.h \
$(TM_H) $(TREE_H) $(TM_P_H) $(BASIC_BLOCK_H) $(CFGLOOP_H) \
$(TREE_FLOW_H) $(TREE_PASS_H) $(DIAGNOSTIC_H) \
- langhooks.h $(FLAGS_H) $(GIMPLE_H) $(GIMPLE_PRETTY_PRINT_H) $(EXPR_H)
+ langhooks.h $(FLAGS_H) $(GIMPLE_H) $(GIMPLE_PRETTY_PRINT_H) $(EXPR_H) \
+ $(TREE_VECTORIZER_H)
tree-ssa-phiprop.o : tree-ssa-phiprop.c $(CONFIG_H) $(SYSTEM_H) coretypes.h \
$(TM_H) $(TREE_H) $(TM_P_H) $(BASIC_BLOCK_H) \
$(TREE_FLOW_H) $(TREE_PASS_H) $(DIAGNOSTIC_H) \
@@ -2322,7 +2323,7 @@ tree-ssa-pre.o : tree-ssa-pre.c $(TREE_FLOW_H) $(CONFIG_H) \
$(TM_H) coretypes.h $(TREE_PASS_H) $(FLAGS_H) langhooks.h \
$(CFGLOOP_H) alloc-pool.h $(BASIC_BLOCK_H) $(BITMAP_H) $(HASH_TABLE_H) \
$(GIMPLE_H) $(TREE_INLINE_H) tree-iterator.h tree-ssa-sccvn.h $(PARAMS_H) \
- $(DBGCNT_H) tree-scalar-evolution.h $(GIMPLE_PRETTY_PRINT_H)
+ $(DBGCNT_H) tree-scalar-evolution.h $(GIMPLE_PRETTY_PRINT_H) domwalk.h
tree-ssa-sccvn.o : tree-ssa-sccvn.c $(TREE_FLOW_H) $(CONFIG_H) \
$(SYSTEM_H) $(TREE_H) $(DIAGNOSTIC_H) \
$(TM_H) coretypes.h dumpfile.h $(FLAGS_H) $(CFGLOOP_H) \
@@ -2857,7 +2858,7 @@ ipa-prop.o : ipa-prop.c $(CONFIG_H) $(SYSTEM_H) coretypes.h \
$(TREE_FLOW_H) $(TM_H) $(TREE_PASS_H) $(FLAGS_H) $(TREE_H) \
$(TREE_INLINE_H) $(GIMPLE_H) \
$(GIMPLE_PRETTY_PRINT_H) $(LTO_STREAMER_H) \
- $(DATA_STREAMER_H) $(TREE_STREAMER_H)
+ $(DATA_STREAMER_H) $(TREE_STREAMER_H) $(PARAMS_H)
ipa-ref.o : ipa-ref.c $(CONFIG_H) $(SYSTEM_H) coretypes.h \
langhooks.h $(GGC_H) $(TARGET_H) $(CGRAPH_H) $(TREE_H) $(TARGET_H) \
$(TREE_FLOW_H) $(TM_H) $(TREE_PASS_H) $(FLAGS_H) $(TREE_H) $(GGC_H)
diff --git a/gcc/ada/ChangeLog b/gcc/ada/ChangeLog
index 06259ebe1fc..bf3aa685691 100644
--- a/gcc/ada/ChangeLog
+++ b/gcc/ada/ChangeLog
@@ -1,3 +1,14 @@
+2012-09-10 Diego Novillo <dnovillo@google.com>
+
+ * gcc-interface/decl.c (build_subst_list): Adjust call to VEC_safe_push.
+ (build_variant_list): Likewise.
+ * gcc-interface/utils.c (convert): Adjust calls to VEC_quick_push.
+
+2012-09-07 Eric Botcazou <ebotcazou@adacore.com>
+
+ * gcc-interface/trans.c (Loop_Statement_to_gnu): Revert to using
+ size_type_node instead of sizetype.
+
2012-08-19 Eric Botcazou <ebotcazou@adacore.com>
* layout.adb (Set_Elem_Alignment): Cap the alignment of access types
diff --git a/gcc/ada/gcc-interface/decl.c b/gcc/ada/gcc-interface/decl.c
index cb0f074d7d2..9e14d8af1bf 100644
--- a/gcc/ada/gcc-interface/decl.c
+++ b/gcc/ada/gcc-interface/decl.c
@@ -7507,9 +7507,8 @@ build_subst_list (Entity_Id gnat_subtype, Entity_Id gnat_type, bool definition)
(Node (gnat_value), gnat_subtype,
get_entity_name (gnat_discrim),
definition, true, false));
- subst_pair *s = VEC_safe_push (subst_pair, heap, gnu_list, NULL);
- s->discriminant = gnu_field;
- s->replacement = replacement;
+ subst_pair s = {gnu_field, replacement};
+ VEC_safe_push (subst_pair, heap, gnu_list, s);
}
return gnu_list;
@@ -7541,14 +7540,10 @@ build_variant_list (tree qual_union_type, VEC(subst_pair,heap) *subst_list,
still be accessed. */
if (!integer_zerop (qual))
{
- variant_desc *v;
tree variant_type = TREE_TYPE (gnu_field), variant_subpart;
+ variant_desc v = {variant_type, gnu_field, qual, NULL_TREE};
- v = VEC_safe_push (variant_desc, heap, gnu_list, NULL);
- v->type = variant_type;
- v->field = gnu_field;
- v->qual = qual;
- v->new_type = NULL_TREE;
+ VEC_safe_push (variant_desc, heap, gnu_list, v);
/* Recurse on the variant subpart of the variant, if any. */
variant_subpart = get_variant_part (variant_type);
diff --git a/gcc/ada/gcc-interface/trans.c b/gcc/ada/gcc-interface/trans.c
index cd35cd1b123..4d8dac90afc 100644
--- a/gcc/ada/gcc-interface/trans.c
+++ b/gcc/ada/gcc-interface/trans.c
@@ -2417,14 +2417,15 @@ Loop_Statement_to_gnu (Node_Id gnat_node)
/* Otherwise, use the do-while form with the help of a special
induction variable in the unsigned version of the base type
- or the unsigned version of sizetype, whichever is the
+ or the unsigned version of the size type, whichever is the
largest, in order to have wrap-around arithmetics for it. */
else
{
- if (TYPE_PRECISION (gnu_base_type) > TYPE_PRECISION (sizetype))
+ if (TYPE_PRECISION (gnu_base_type)
+ > TYPE_PRECISION (size_type_node))
gnu_base_type = gnat_unsigned_type (gnu_base_type);
else
- gnu_base_type = sizetype;
+ gnu_base_type = size_type_node;
gnu_first = convert (gnu_base_type, gnu_first);
gnu_last = convert (gnu_base_type, gnu_last);
diff --git a/gcc/ada/gcc-interface/utils.c b/gcc/ada/gcc-interface/utils.c
index 4cca41bbf39..d9121c1931e 100644
--- a/gcc/ada/gcc-interface/utils.c
+++ b/gcc/ada/gcc-interface/utils.c
@@ -4615,16 +4615,14 @@ convert (tree type, tree expr)
FOR_EACH_CONSTRUCTOR_ELT(e, idx, index, value)
{
- constructor_elt *elt;
/* We expect only simple constructors. */
if (!SAME_FIELD_P (index, efield))
break;
/* The field must be the same. */
if (!SAME_FIELD_P (efield, field))
break;
- elt = VEC_quick_push (constructor_elt, v, NULL);
- elt->index = field;
- elt->value = convert (TREE_TYPE (field), value);
+ constructor_elt elt = {field, convert (TREE_TYPE (field), value)};
+ VEC_quick_push (constructor_elt, v, elt);
/* If packing has made this field a bitfield and the input
value couldn't be emitted statically any more, we need to
@@ -4690,9 +4688,8 @@ convert (tree type, tree expr)
v = VEC_alloc (constructor_elt, gc, len);
FOR_EACH_CONSTRUCTOR_VALUE (e, ix, value)
{
- constructor_elt *elt = VEC_quick_push (constructor_elt, v, NULL);
- elt->index = NULL_TREE;
- elt->value = value;
+ constructor_elt elt = {NULL_TREE, value};
+ VEC_quick_push (constructor_elt, v, elt);
}
expr = copy_node (expr);
TREE_TYPE (expr) = type;
diff --git a/gcc/alias.c b/gcc/alias.c
index 6afb6b7ae9a..865a8496781 100644
--- a/gcc/alias.c
+++ b/gcc/alias.c
@@ -855,8 +855,8 @@ new_alias_set (void)
if (flag_strict_aliasing)
{
if (alias_sets == 0)
- VEC_safe_push (alias_set_entry, gc, alias_sets, (alias_set_entry) 0);
- VEC_safe_push (alias_set_entry, gc, alias_sets, (alias_set_entry) 0);
+ VEC_safe_push (alias_set_entry, gc, alias_sets, 0);
+ VEC_safe_push (alias_set_entry, gc, alias_sets, 0);
return VEC_length (alias_set_entry, alias_sets) - 1;
}
else
@@ -1227,7 +1227,7 @@ find_base_value (rtx src)
/* While scanning insns to find base values, reg_seen[N] is nonzero if
register N has been set in this function. */
-static char *reg_seen;
+static sbitmap reg_seen;
static void
record_set (rtx dest, const_rtx set, void *data ATTRIBUTE_UNUSED)
@@ -1253,7 +1253,7 @@ record_set (rtx dest, const_rtx set, void *data ATTRIBUTE_UNUSED)
{
while (--n >= 0)
{
- reg_seen[regno + n] = 1;
+ SET_BIT (reg_seen, regno + n);
new_reg_base_value[regno + n] = 0;
}
return;
@@ -1274,12 +1274,12 @@ record_set (rtx dest, const_rtx set, void *data ATTRIBUTE_UNUSED)
else
{
/* There's a REG_NOALIAS note against DEST. */
- if (reg_seen[regno])
+ if (TEST_BIT (reg_seen, regno))
{
new_reg_base_value[regno] = 0;
return;
}
- reg_seen[regno] = 1;
+ SET_BIT (reg_seen, regno);
new_reg_base_value[regno] = unique_base_value (unique_id++);
return;
}
@@ -1335,10 +1335,10 @@ record_set (rtx dest, const_rtx set, void *data ATTRIBUTE_UNUSED)
}
/* If this is the first set of a register, record the value. */
else if ((regno >= FIRST_PSEUDO_REGISTER || ! fixed_regs[regno])
- && ! reg_seen[regno] && new_reg_base_value[regno] == 0)
+ && ! TEST_BIT (reg_seen, regno) && new_reg_base_value[regno] == 0)
new_reg_base_value[regno] = find_base_value (src);
- reg_seen[regno] = 1;
+ SET_BIT (reg_seen, regno);
}
/* Return REG_BASE_VALUE for REGNO. Selective scheduler uses this to avoid
@@ -2796,7 +2796,7 @@ init_alias_analysis (void)
VEC_safe_grow_cleared (rtx, gc, reg_base_value, maxreg);
new_reg_base_value = XNEWVEC (rtx, maxreg);
- reg_seen = XNEWVEC (char, maxreg);
+ reg_seen = sbitmap_alloc (maxreg);
/* The basic idea is that each pass through this loop will use the
"constant" information from the previous pass to propagate alias
@@ -2841,7 +2841,7 @@ init_alias_analysis (void)
memset (new_reg_base_value, 0, maxreg * sizeof (rtx));
/* Wipe the reg_seen array clean. */
- memset (reg_seen, 0, maxreg);
+ sbitmap_zero (reg_seen);
/* Mark all hard registers which may contain an address.
The stack, frame and argument pointers may contain an address.
@@ -2964,7 +2964,7 @@ init_alias_analysis (void)
/* Clean up. */
free (new_reg_base_value);
new_reg_base_value = 0;
- free (reg_seen);
+ sbitmap_free (reg_seen);
reg_seen = 0;
timevar_pop (TV_ALIAS_ANALYSIS);
}
diff --git a/gcc/bitmap.c b/gcc/bitmap.c
index 1a28788bc3e..63f0e099a05 100644
--- a/gcc/bitmap.c
+++ b/gcc/bitmap.c
@@ -837,33 +837,24 @@ bitmap_last_set_bit (const_bitmap a)
gcc_unreachable ();
found_bit:
bit_no += ix * BITMAP_WORD_BITS;
-
- /* Binary search for the last set bit. */
#if GCC_VERSION >= 3004
gcc_assert (sizeof(long) == sizeof (word));
- bit_no += sizeof (long) * 8 - __builtin_ctzl (word);
+ bit_no += BITMAP_WORD_BITS - __builtin_clzl (word) - 1;
#else
-#if BITMAP_WORD_BITS > 64
-#error "Fill out the table."
-#endif
+ /* Hopefully this is a twos-complement host... */
+ BITMAP_WORD x = word;
+ x |= (x >> 1);
+ x |= (x >> 2);
+ x |= (x >> 4);
+ x |= (x >> 8);
+ x |= (x >> 16);
#if BITMAP_WORD_BITS > 32
- if ((word & 0xffffffff00000000))
- word >>= 32, bit_no += 32;
+ x |= (x >> 32);
#endif
- if (word & 0xffff0000)
- word >>= 16, bit_no += 16;
- if (!(word & 0xff00))
- word >>= 8, bit_no += 8;
- if (!(word & 0xf0))
- word >>= 4, bit_no += 4;
- if (!(word & 12))
- word >>= 2, bit_no += 2;
- if (!(word & 2))
- word >>= 1, bit_no += 1;
+ bit_no += bitmap_popcount (x) - 1;
#endif
- gcc_checking_assert (word & 1);
- return bit_no;
+ return bit_no;
}
diff --git a/gcc/builtins.c b/gcc/builtins.c
index 1a7c30d400d..169b78078b7 100644
--- a/gcc/builtins.c
+++ b/gcc/builtins.c
@@ -4991,7 +4991,7 @@ expand_builtin_signbit (tree exp, rtx target)
if (bitpos < GET_MODE_BITSIZE (rmode))
{
- double_int mask = double_int_setbit (double_int_zero, bitpos);
+ double_int mask = double_int_zero.set_bit (bitpos);
if (GET_MODE_SIZE (imode) > GET_MODE_SIZE (rmode))
temp = gen_lowpart (rmode, temp);
@@ -8956,14 +8956,14 @@ fold_builtin_memory_op (location_t loc, tree dest, tree src,
if (! operand_equal_p (TREE_OPERAND (src_base, 0),
TREE_OPERAND (dest_base, 0), 0))
return NULL_TREE;
- off = double_int_add (mem_ref_offset (src_base),
- shwi_to_double_int (src_offset));
- if (!double_int_fits_in_shwi_p (off))
+ off = mem_ref_offset (src_base) +
+ double_int::from_shwi (src_offset);
+ if (!off.fits_shwi ())
return NULL_TREE;
src_offset = off.low;
- off = double_int_add (mem_ref_offset (dest_base),
- shwi_to_double_int (dest_offset));
- if (!double_int_fits_in_shwi_p (off))
+ off = mem_ref_offset (dest_base) +
+ double_int::from_shwi (dest_offset);
+ if (!off.fits_shwi ())
return NULL_TREE;
dest_offset = off.low;
if (ranges_overlap_p (src_offset, maxsize,
@@ -12877,7 +12877,7 @@ fold_builtin_object_size (tree ptr, tree ost)
{
bytes = compute_builtin_object_size (ptr, object_size_type);
if (double_int_fits_to_tree_p (size_type_node,
- uhwi_to_double_int (bytes)))
+ double_int::from_uhwi (bytes)))
return build_int_cstu (size_type_node, bytes);
}
else if (TREE_CODE (ptr) == SSA_NAME)
@@ -12888,7 +12888,7 @@ fold_builtin_object_size (tree ptr, tree ost)
bytes = compute_builtin_object_size (ptr, object_size_type);
if (bytes != (unsigned HOST_WIDE_INT) (object_size_type < 2 ? -1 : 0)
&& double_int_fits_to_tree_p (size_type_node,
- uhwi_to_double_int (bytes)))
+ double_int::from_uhwi (bytes)))
return build_int_cstu (size_type_node, bytes);
}
diff --git a/gcc/c-family/c-common.c b/gcc/c-family/c-common.c
index ab90c780ed4..93f7d4339f3 100644
--- a/gcc/c-family/c-common.c
+++ b/gcc/c-family/c-common.c
@@ -8551,7 +8551,7 @@ parse_optimize_options (tree args, bool attr_p)
/* Build up argv vector. Just in case the string is stored away, use garbage
collected strings. */
VEC_truncate (const_char_p, optimize_args, 0);
- VEC_safe_push (const_char_p, gc, optimize_args, (const_char_p)NULL);
+ VEC_safe_push (const_char_p, gc, optimize_args, NULL);
for (ap = args; ap != NULL_TREE; ap = TREE_CHAIN (ap))
{
diff --git a/gcc/c-family/c-pragma.c b/gcc/c-family/c-pragma.c
index bd8e6fe1eb1..4bd5117c1a0 100644
--- a/gcc/c-family/c-pragma.c
+++ b/gcc/c-family/c-pragma.c
@@ -372,10 +372,8 @@ handle_pragma_weak (cpp_reader * ARG_UNUSED (dummy))
}
else
{
- pending_weak *pe;
- pe = VEC_safe_push (pending_weak, gc, pending_weaks, NULL);
- pe->name = name;
- pe->value = value;
+ pending_weak pe = {name, value};
+ VEC_safe_push (pending_weak, gc, pending_weaks, pe);
}
}
@@ -499,9 +497,8 @@ add_to_renaming_pragma_list (tree oldname, tree newname)
return;
}
- p = VEC_safe_push (pending_redefinition, gc, pending_redefine_extname, NULL);
- p->oldname = oldname;
- p->newname = newname;
+ pending_redefinition e = {oldname, newname};
+ VEC_safe_push (pending_redefinition, gc, pending_redefine_extname, e);
}
/* The current prefix set by #pragma extern_prefix. */
@@ -1236,14 +1233,14 @@ c_register_pragma_1 (const char *space, const char *name,
ns_name.space = space;
ns_name.name = name;
- VEC_safe_push (pragma_ns_name, heap, registered_pp_pragmas, &ns_name);
+ VEC_safe_push (pragma_ns_name, heap, registered_pp_pragmas, ns_name);
id = VEC_length (pragma_ns_name, registered_pp_pragmas);
id += PRAGMA_FIRST_EXTERNAL - 1;
}
else
{
VEC_safe_push (internal_pragma_handler, heap, registered_pragmas,
- &ihandler);
+ ihandler);
id = VEC_length (internal_pragma_handler, registered_pragmas);
id += PRAGMA_FIRST_EXTERNAL - 1;
diff --git a/gcc/c/c-decl.c b/gcc/c/c-decl.c
index 8d3a948b6bb..dceb02f3bba 100644
--- a/gcc/c/c-decl.c
+++ b/gcc/c/c-decl.c
@@ -6444,7 +6444,7 @@ get_parm_info (bool ellipsis, tree expr)
{
tree decl = b->decl;
tree type = TREE_TYPE (decl);
- c_arg_tag *tag;
+ c_arg_tag tag;
const char *keyword;
switch (TREE_CODE (decl))
@@ -6518,9 +6518,9 @@ get_parm_info (bool ellipsis, tree expr)
}
}
- tag = VEC_safe_push (c_arg_tag, gc, tags, NULL);
- tag->id = b->id;
- tag->type = decl;
+ tag.id = b->id;
+ tag.type = decl;
+ VEC_safe_push (c_arg_tag, gc, tags, tag);
break;
case CONST_DECL:
diff --git a/gcc/c/c-tree.h b/gcc/c/c-tree.h
index 61604ec4640..c7b2b773ee2 100644
--- a/gcc/c/c-tree.h
+++ b/gcc/c/c-tree.h
@@ -142,8 +142,8 @@ DEF_VEC_ALLOC_O (c_expr_t, heap);
/* Append a new c_expr_t element to V. */
#define C_EXPR_APPEND(V, ELEM) \
do { \
- c_expr_t *__elem_p = VEC_safe_push (c_expr_t, gc, V, NULL); \
- *__elem_p = (ELEM); \
+ c_expr_t __elem = (ELEM); \
+ VEC_safe_push (c_expr_t, gc, V, __elem); \
} while (0)
/* A kind of type specifier. Note that this information is currently
diff --git a/gcc/c/c-typeck.c b/gcc/c/c-typeck.c
index 85c5ec7d62c..cd7fe49b542 100644
--- a/gcc/c/c-typeck.c
+++ b/gcc/c/c-typeck.c
@@ -7731,7 +7731,6 @@ output_init_element (tree value, tree origtype, bool strict_string, tree type,
struct obstack * braced_init_obstack)
{
tree semantic_type = NULL_TREE;
- constructor_elt *celt;
bool maybe_const = true;
bool npc;
@@ -7898,9 +7897,8 @@ output_init_element (tree value, tree origtype, bool strict_string, tree type,
/* Otherwise, output this element either to
constructor_elements or to the assembler file. */
- celt = VEC_safe_push (constructor_elt, gc, constructor_elements, NULL);
- celt->index = field;
- celt->value = value;
+ constructor_elt celt = {field, value};
+ VEC_safe_push (constructor_elt, gc, constructor_elements, celt);
/* Advance the variable that indicates sequential elements output. */
if (TREE_CODE (constructor_type) == ARRAY_TYPE)
diff --git a/gcc/cgraph.c b/gcc/cgraph.c
index c09d319e06f..3d4703b4b63 100644
--- a/gcc/cgraph.c
+++ b/gcc/cgraph.c
@@ -484,9 +484,8 @@ cgraph_add_thunk (struct cgraph_node *decl_node ATTRIBUTE_UNUSED,
node = cgraph_create_node (alias);
gcc_checking_assert (!virtual_offset
- || double_int_equal_p
- (tree_to_double_int (virtual_offset),
- shwi_to_double_int (virtual_value)));
+ || tree_to_double_int (virtual_offset) ==
+ double_int::from_shwi (virtual_value));
node->thunk.fixed_offset = fixed_offset;
node->thunk.this_adjusting = this_adjusting;
node->thunk.virtual_value = virtual_value;
diff --git a/gcc/combine.c b/gcc/combine.c
index 507b11e61ed..3284cee1a2a 100644
--- a/gcc/combine.c
+++ b/gcc/combine.c
@@ -2673,11 +2673,11 @@ try_combine (rtx i3, rtx i2, rtx i1, rtx i0, int *new_direct_jump_p,
o = rtx_to_double_int (outer);
i = rtx_to_double_int (inner);
- m = double_int_mask (width);
- i = double_int_and (i, m);
- m = double_int_lshift (m, offset, HOST_BITS_PER_DOUBLE_INT, false);
- i = double_int_lshift (i, offset, HOST_BITS_PER_DOUBLE_INT, false);
- o = double_int_ior (double_int_and_not (o, m), i);
+ m = double_int::mask (width);
+ i &= m;
+ m = m.llshift (offset, HOST_BITS_PER_DOUBLE_INT);
+ i = i.llshift (offset, HOST_BITS_PER_DOUBLE_INT);
+ o = o.and_not (m) | i;
combine_merges++;
subst_insn = i3;
diff --git a/gcc/common.opt b/gcc/common.opt
index 87e28b5efe9..19ea29fed48 100644
--- a/gcc/common.opt
+++ b/gcc/common.opt
@@ -1289,7 +1289,7 @@ Perform indirect inlining
; General flag to enable inlining. Specifying -fno-inline will disable
; all inlining apart from always-inline functions.
finline
-Common Report Var(flag_no_inline,0) Init(0)
+Common Report Var(flag_no_inline,0) Init(0) Optimization
Enable inlining of function declared \"inline\", disabling disables all inlining
finline-small-functions
@@ -1470,6 +1470,10 @@ fmem-report
Common Report Var(mem_report)
Report on permanent memory allocation
+fmem-report-wpa
+Common Report Var(mem_report_wpa)
+Report on permanent memory allocation in WPA only
+
; This will attempt to merge constant section constants, if 1 only
; string constants and constants from constant pool, if 2 also constant
; variables.
diff --git a/gcc/config.gcc b/gcc/config.gcc
index 6d9c68d81ff..c84348b8392 100644
--- a/gcc/config.gcc
+++ b/gcc/config.gcc
@@ -245,7 +245,8 @@ md_file=
# Obsolete configurations.
case ${target} in
- score-* \
+ picochip-* \
+ | score-* \
)
if test "x$enable_obsolete" != xyes; then
echo "*** Configuration ${target} is obsolete." >&2
@@ -1696,6 +1697,14 @@ mips*-*-netbsd*) # NetBSD/mips, either endian.
tm_file="elfos.h ${tm_file} mips/elf.h netbsd.h netbsd-elf.h mips/netbsd.h"
extra_options="${extra_options} netbsd.opt netbsd-elf.opt"
;;
+mips*-mti-linux*)
+ tm_file="dbxelf.h elfos.h gnu-user.h linux.h glibc-stdint.h ${tm_file} mips/gnu-user.h mips/gnu-user64.h mips/linux64.h mips/linux-common.h mips/mti-linux.h"
+ tmake_file="${tmake_file} mips/t-mti-linux"
+ tm_defines="${tm_defines} MIPS_ISA_DEFAULT=33 MIPS_ABI_DEFAULT=ABI_32"
+ gnu_ld=yes
+ gas=yes
+ test x$with_llsc != x || with_llsc=yes
+ ;;
mips64*-*-linux* | mipsisa64*-*-linux*)
tm_file="dbxelf.h elfos.h gnu-user.h linux.h glibc-stdint.h ${tm_file} mips/gnu-user.h mips/gnu-user64.h mips/linux64.h mips/linux-common.h"
tmake_file="${tmake_file} mips/t-linux64"
@@ -3273,10 +3282,11 @@ case "${target}" in
yes)
with_synci=synci
;;
- "" | no)
- # No is the default.
+ no)
with_synci=no-synci
;;
+ "")
+ ;;
*)
echo "Unknown synci type used in --with-synci" 1>&2
exit 1
diff --git a/gcc/config/alpha/predicates.md b/gcc/config/alpha/predicates.md
index 598742f81e7..0a1885bd5f9 100644
--- a/gcc/config/alpha/predicates.md
+++ b/gcc/config/alpha/predicates.md
@@ -328,26 +328,50 @@
(define_predicate "small_symbolic_operand"
(match_code "const,symbol_ref")
{
+ HOST_WIDE_INT ofs = 0, max_ofs = 0;
+
if (! TARGET_SMALL_DATA)
- return 0;
+ return false;
if (GET_CODE (op) == CONST
&& GET_CODE (XEXP (op, 0)) == PLUS
&& CONST_INT_P (XEXP (XEXP (op, 0), 1)))
- op = XEXP (XEXP (op, 0), 0);
+ {
+ ofs = INTVAL (XEXP (XEXP (op, 0), 1));
+ op = XEXP (XEXP (op, 0), 0);
+ }
if (GET_CODE (op) != SYMBOL_REF)
- return 0;
+ return false;
/* ??? There's no encode_section_info equivalent for the rtl
constant pool, so SYMBOL_FLAG_SMALL never gets set. */
if (CONSTANT_POOL_ADDRESS_P (op))
- return GET_MODE_SIZE (get_pool_mode (op)) <= g_switch_value;
+ {
+ max_ofs = GET_MODE_SIZE (get_pool_mode (op));
+ if (max_ofs > g_switch_value)
+ return false;
+ }
+ else if (SYMBOL_REF_LOCAL_P (op)
+ && SYMBOL_REF_SMALL_P (op)
+ && !SYMBOL_REF_WEAK (op)
+ && !SYMBOL_REF_TLS_MODEL (op))
+ {
+ if (SYMBOL_REF_DECL (op))
+ max_ofs = tree_low_cst (DECL_SIZE_UNIT (SYMBOL_REF_DECL (op)), 1);
+ }
+ else
+ return false;
- return (SYMBOL_REF_LOCAL_P (op)
- && SYMBOL_REF_SMALL_P (op)
- && !SYMBOL_REF_WEAK (op)
- && !SYMBOL_REF_TLS_MODEL (op));
+ /* Given that we know that the GP is always 8 byte aligned, we can
+ always adjust by 7 without overflowing. */
+ if (max_ofs < 8)
+ max_ofs = 8;
+
+ /* Since we know this is an object in a small data section, we know the
+ entire section is addressable via GP. We don't know where the section
+ boundaries are, but we know the entire object is within. */
+ return IN_RANGE (ofs, 0, max_ofs - 1);
})
;; Return true if OP is a SYMBOL_REF or CONST referencing a variable
diff --git a/gcc/config/arm/arm.c b/gcc/config/arm/arm.c
index 2e9003189b1..dd073daf235 100644
--- a/gcc/config/arm/arm.c
+++ b/gcc/config/arm/arm.c
@@ -2337,7 +2337,7 @@ use_return_insn (int iscond, rtx sibling)
/* ... or for a tail-call argument ... */
if (sibling)
{
- gcc_assert (GET_CODE (sibling) == CALL_INSN);
+ gcc_assert (CALL_P (sibling));
if (find_regno_fusage (sibling, USE, 3))
return 0;
@@ -2558,7 +2558,7 @@ arm_split_constant (enum rtx_code code, enum machine_mode mode, rtx insn,
cond = NULL_RTX;
if (subtargets || code == SET
- || (GET_CODE (target) == REG && GET_CODE (source) == REG
+ || (REG_P (target) && REG_P (source)
&& REGNO (target) != REGNO (source)))
{
/* After arm_reorg has been called, we can't fix up expensive
@@ -2990,7 +2990,7 @@ arm_gen_constant (enum rtx_code code, enum machine_mode mode, rtx cond,
if (generate)
{
if (mode == SImode && i == 16)
- /* Use UXTH in preference to UBFX, since on Thumb2 it's a
+ /* Use UXTH in preference to UBFX, since on Thumb2 it's a
smaller insn. */
emit_constant_insn (cond,
gen_zero_extendhisi2
@@ -3530,7 +3530,7 @@ arm_canonicalize_comparison (enum rtx_code code, rtx *op0, rtx *op1)
{
/* Missing comparison. First try to use an available
comparison. */
- if (GET_CODE (*op1) == CONST_INT)
+ if (CONST_INT_P (*op1))
{
i = INTVAL (*op1);
switch (code)
@@ -3583,7 +3583,7 @@ arm_canonicalize_comparison (enum rtx_code code, rtx *op0, rtx *op1)
/* Comparisons smaller than DImode. Only adjust comparisons against
an out-of-range constant. */
- if (GET_CODE (*op1) != CONST_INT
+ if (!CONST_INT_P (*op1)
|| const_ok_for_arm (INTVAL (*op1))
|| const_ok_for_arm (- INTVAL (*op1)))
return code;
@@ -4160,7 +4160,7 @@ aapcs_vfp_sub_candidate (const_tree type, enum machine_mode *modep)
tree index = TYPE_DOMAIN (type);
/* Can't handle incomplete types. */
- if (!COMPLETE_TYPE_P(type))
+ if (!COMPLETE_TYPE_P (type))
return -1;
count = aapcs_vfp_sub_candidate (TREE_TYPE (type), modep);
@@ -4192,7 +4192,7 @@ aapcs_vfp_sub_candidate (const_tree type, enum machine_mode *modep)
tree field;
/* Can't handle incomplete types. */
- if (!COMPLETE_TYPE_P(type))
+ if (!COMPLETE_TYPE_P (type))
return -1;
for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
@@ -4224,7 +4224,7 @@ aapcs_vfp_sub_candidate (const_tree type, enum machine_mode *modep)
tree field;
/* Can't handle incomplete types. */
- if (!COMPLETE_TYPE_P(type))
+ if (!COMPLETE_TYPE_P (type))
return -1;
for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
@@ -5442,7 +5442,7 @@ legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
&& GET_CODE (XEXP (XEXP (orig, 0), 0)) == UNSPEC
&& XINT (XEXP (XEXP (orig, 0), 0), 1) == UNSPEC_TLS)
{
- gcc_assert (GET_CODE (XEXP (XEXP (orig, 0), 1)) == CONST_INT);
+ gcc_assert (CONST_INT_P (XEXP (XEXP (orig, 0), 1)));
return orig;
}
@@ -5458,7 +5458,7 @@ legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
base == reg ? 0 : reg);
- if (GET_CODE (offset) == CONST_INT)
+ if (CONST_INT_P (offset))
{
/* The base register doesn't really matter, we only want to
test the index for the appropriate mode. */
@@ -5468,7 +5468,7 @@ legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
offset = force_reg (Pmode, offset);
}
- if (GET_CODE (offset) == CONST_INT)
+ if (CONST_INT_P (offset))
return plus_constant (Pmode, base, INTVAL (offset));
}
@@ -5649,7 +5649,7 @@ arm_address_register_rtx_p (rtx x, int strict_p)
{
int regno;
- if (GET_CODE (x) != REG)
+ if (!REG_P (x))
return 0;
regno = REGNO (x);
@@ -5716,7 +5716,7 @@ arm_legitimate_address_outer_p (enum machine_mode mode, rtx x, RTX_CODE outer,
to fixup invalid register choices. */
if (use_ldrd
&& GET_CODE (x) == POST_MODIFY
- && GET_CODE (addend) == REG)
+ && REG_P (addend))
return 0;
return ((use_ldrd || GET_MODE_SIZE (mode) <= 4)
@@ -5730,7 +5730,7 @@ arm_legitimate_address_outer_p (enum machine_mode mode, rtx x, RTX_CODE outer,
|| (code == CONST
&& GET_CODE (XEXP (x, 0)) == PLUS
&& GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
- && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
+ && CONST_INT_P (XEXP (XEXP (x, 0), 1)))))
return 1;
else if (mode == TImode || (TARGET_NEON && VALID_NEON_STRUCT_MODE (mode)))
@@ -5742,7 +5742,7 @@ arm_legitimate_address_outer_p (enum machine_mode mode, rtx x, RTX_CODE outer,
rtx xop1 = XEXP (x, 1);
return ((arm_address_register_rtx_p (xop0, strict_p)
- && ((GET_CODE(xop1) == CONST_INT
+ && ((CONST_INT_P (xop1)
&& arm_legitimate_index_p (mode, xop1, outer, strict_p))
|| (!strict_p && will_be_in_index_register (xop1))))
|| (arm_address_register_rtx_p (xop1, strict_p)
@@ -5800,7 +5800,7 @@ thumb2_legitimate_address_p (enum machine_mode mode, rtx x, int strict_p)
rtx addend = XEXP (XEXP (x, 1), 1);
HOST_WIDE_INT offset;
- if (GET_CODE (addend) != CONST_INT)
+ if (!CONST_INT_P (addend))
return 0;
offset = INTVAL(addend);
@@ -5818,7 +5818,7 @@ thumb2_legitimate_address_p (enum machine_mode mode, rtx x, int strict_p)
|| (code == CONST
&& GET_CODE (XEXP (x, 0)) == PLUS
&& GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
- && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
+ && CONST_INT_P (XEXP (XEXP (x, 0), 1)))))
return 1;
else if (mode == TImode || (TARGET_NEON && VALID_NEON_STRUCT_MODE (mode)))
@@ -5930,7 +5930,7 @@ arm_legitimate_index_p (enum machine_mode mode, rtx index, RTX_CODE outer,
rtx op = XEXP (index, 1);
return (arm_address_register_rtx_p (XEXP (index, 0), strict_p)
- && GET_CODE (op) == CONST_INT
+ && CONST_INT_P (op)
&& INTVAL (op) > 0
&& INTVAL (op) <= 31);
}
@@ -5962,7 +5962,7 @@ thumb2_index_mul_operand (rtx op)
{
HOST_WIDE_INT val;
- if (GET_CODE(op) != CONST_INT)
+ if (!CONST_INT_P (op))
return false;
val = INTVAL(op);
@@ -6052,7 +6052,7 @@ thumb2_legitimate_index_p (enum machine_mode mode, rtx index, int strict_p)
rtx op = XEXP (index, 1);
return (arm_address_register_rtx_p (XEXP (index, 0), strict_p)
- && GET_CODE (op) == CONST_INT
+ && CONST_INT_P (op)
&& INTVAL (op) > 0
&& INTVAL (op) <= 3);
}
@@ -6068,7 +6068,7 @@ thumb1_base_register_rtx_p (rtx x, enum machine_mode mode, int strict_p)
{
int regno;
- if (GET_CODE (x) != REG)
+ if (!REG_P (x))
return 0;
regno = REGNO (x);
@@ -6142,7 +6142,7 @@ thumb1_legitimate_address_p (enum machine_mode mode, rtx x, int strict_p)
|| (GET_CODE (x) == CONST
&& GET_CODE (XEXP (x, 0)) == PLUS
&& GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
- && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
+ && CONST_INT_P (XEXP (XEXP (x, 0), 1)))))
return 1;
/* Post-inc indexing only supported for SImode and larger. */
@@ -6167,7 +6167,7 @@ thumb1_legitimate_address_p (enum machine_mode mode, rtx x, int strict_p)
/* REG+const has 5-7 bit offset for non-SP registers. */
else if ((thumb1_index_register_rtx_p (XEXP (x, 0), strict_p)
|| XEXP (x, 0) == arg_pointer_rtx)
- && GET_CODE (XEXP (x, 1)) == CONST_INT
+ && CONST_INT_P (XEXP (x, 1))
&& thumb_legitimate_offset_p (mode, INTVAL (XEXP (x, 1))))
return 1;
@@ -6175,23 +6175,23 @@ thumb1_legitimate_address_p (enum machine_mode mode, rtx x, int strict_p)
larger is supported. */
/* ??? Should probably check for DI/DFmode overflow here
just like GO_IF_LEGITIMATE_OFFSET does. */
- else if (GET_CODE (XEXP (x, 0)) == REG
+ else if (REG_P (XEXP (x, 0))
&& REGNO (XEXP (x, 0)) == STACK_POINTER_REGNUM
&& GET_MODE_SIZE (mode) >= 4
- && GET_CODE (XEXP (x, 1)) == CONST_INT
+ && CONST_INT_P (XEXP (x, 1))
&& INTVAL (XEXP (x, 1)) >= 0
&& INTVAL (XEXP (x, 1)) + GET_MODE_SIZE (mode) <= 1024
&& (INTVAL (XEXP (x, 1)) & 3) == 0)
return 1;
- else if (GET_CODE (XEXP (x, 0)) == REG
+ else if (REG_P (XEXP (x, 0))
&& (REGNO (XEXP (x, 0)) == FRAME_POINTER_REGNUM
|| REGNO (XEXP (x, 0)) == ARG_POINTER_REGNUM
|| (REGNO (XEXP (x, 0)) >= FIRST_VIRTUAL_REGISTER
&& REGNO (XEXP (x, 0))
<= LAST_VIRTUAL_POINTER_REGISTER))
&& GET_MODE_SIZE (mode) >= 4
- && GET_CODE (XEXP (x, 1)) == CONST_INT
+ && CONST_INT_P (XEXP (x, 1))
&& (INTVAL (XEXP (x, 1)) & 3) == 0)
return 1;
}
@@ -6495,7 +6495,7 @@ arm_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
xop1 = force_reg (SImode, xop1);
if (ARM_BASE_REGISTER_RTX_P (xop0)
- && GET_CODE (xop1) == CONST_INT)
+ && CONST_INT_P (xop1))
{
HOST_WIDE_INT n, low_n;
rtx base_reg, val;
@@ -6551,7 +6551,7 @@ arm_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
with absolute addresses which often allows for the base register to
be factorized for multiple adjacent memory references, and it might
even allows for the mini pool to be avoided entirely. */
- else if (GET_CODE (x) == CONST_INT && optimize > 0)
+ else if (CONST_INT_P (x) && optimize > 0)
{
unsigned int bits;
HOST_WIDE_INT mask, base, index;
@@ -6598,7 +6598,7 @@ thumb_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
return legitimize_tls_address (x, NULL_RTX);
if (GET_CODE (x) == PLUS
- && GET_CODE (XEXP (x, 1)) == CONST_INT
+ && CONST_INT_P (XEXP (x, 1))
&& (INTVAL (XEXP (x, 1)) >= 32 * GET_MODE_SIZE (mode)
|| INTVAL (XEXP (x, 1)) < 0))
{
@@ -6668,9 +6668,9 @@ arm_legitimize_reload_address (rtx *p,
/* We must recognize output that we have already generated ourselves. */
if (GET_CODE (*p) == PLUS
&& GET_CODE (XEXP (*p, 0)) == PLUS
- && GET_CODE (XEXP (XEXP (*p, 0), 0)) == REG
- && GET_CODE (XEXP (XEXP (*p, 0), 1)) == CONST_INT
- && GET_CODE (XEXP (*p, 1)) == CONST_INT)
+ && REG_P (XEXP (XEXP (*p, 0), 0))
+ && CONST_INT_P (XEXP (XEXP (*p, 0), 1))
+ && CONST_INT_P (XEXP (*p, 1)))
{
push_reload (XEXP (*p, 0), NULL_RTX, &XEXP (*p, 0), NULL,
MODE_BASE_REG_CLASS (mode), GET_MODE (*p),
@@ -6679,13 +6679,13 @@ arm_legitimize_reload_address (rtx *p,
}
if (GET_CODE (*p) == PLUS
- && GET_CODE (XEXP (*p, 0)) == REG
+ && REG_P (XEXP (*p, 0))
&& ARM_REGNO_OK_FOR_BASE_P (REGNO (XEXP (*p, 0)))
/* If the base register is equivalent to a constant, let the generic
code handle it. Otherwise we will run into problems if a future
reload pass decides to rematerialize the constant. */
&& !reg_equiv_constant (ORIGINAL_REGNO (XEXP (*p, 0)))
- && GET_CODE (XEXP (*p, 1)) == CONST_INT)
+ && CONST_INT_P (XEXP (*p, 1)))
{
HOST_WIDE_INT val = INTVAL (XEXP (*p, 1));
HOST_WIDE_INT low, high;
@@ -6854,7 +6854,7 @@ thumb_legitimize_reload_address (rtx *x_p,
&& GET_MODE_SIZE (mode) < 4
&& REG_P (XEXP (x, 0))
&& XEXP (x, 0) == stack_pointer_rtx
- && GET_CODE (XEXP (x, 1)) == CONST_INT
+ && CONST_INT_P (XEXP (x, 1))
&& !thumb_legitimate_offset_p (mode, INTVAL (XEXP (x, 1))))
{
rtx orig_x = x;
@@ -6952,8 +6952,8 @@ arm_legitimate_constant_p_1 (enum machine_mode mode, rtx x)
static bool
thumb_legitimate_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
{
- return (GET_CODE (x) == CONST_INT
- || GET_CODE (x) == CONST_DOUBLE
+ return (CONST_INT_P (x)
+ || CONST_DOUBLE_P (x)
|| CONSTANT_ADDRESS_P (x)
|| flag_pic);
}
@@ -6985,11 +6985,11 @@ arm_cannot_force_const_mem (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
}
#define REG_OR_SUBREG_REG(X) \
- (GET_CODE (X) == REG \
- || (GET_CODE (X) == SUBREG && GET_CODE (SUBREG_REG (X)) == REG))
+ (REG_P (X) \
+ || (GET_CODE (X) == SUBREG && REG_P (SUBREG_REG (X))))
#define REG_OR_SUBREG_RTX(X) \
- (GET_CODE (X) == REG ? (X) : SUBREG_REG (X))
+ (REG_P (X) ? (X) : SUBREG_REG (X))
static inline int
thumb1_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer)
@@ -7011,7 +7011,7 @@ thumb1_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer)
return COSTS_N_INSNS (1);
case MULT:
- if (GET_CODE (XEXP (x, 1)) == CONST_INT)
+ if (CONST_INT_P (XEXP (x, 1)))
{
int cycles = 0;
unsigned HOST_WIDE_INT i = INTVAL (XEXP (x, 1));
@@ -7027,8 +7027,8 @@ thumb1_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer)
case SET:
return (COSTS_N_INSNS (1)
- + 4 * ((GET_CODE (SET_SRC (x)) == MEM)
- + GET_CODE (SET_DEST (x)) == MEM));
+ + 4 * ((MEM_P (SET_SRC (x)))
+ + MEM_P (SET_DEST (x))));
case CONST_INT:
if (outer == SET)
@@ -7144,9 +7144,9 @@ arm_rtx_costs_1 (rtx x, enum rtx_code outer, int* total, bool speed)
return false;
case ROTATE:
- if (GET_CODE (XEXP (x, 1)) == REG)
+ if (REG_P (XEXP (x, 1)))
*total = COSTS_N_INSNS (1); /* Need to subtract from 32 */
- else if (GET_CODE (XEXP (x, 1)) != CONST_INT)
+ else if (!CONST_INT_P (XEXP (x, 1)))
*total = rtx_cost (XEXP (x, 1), code, 1, speed);
/* Fall through */
@@ -7170,7 +7170,7 @@ arm_rtx_costs_1 (rtx x, enum rtx_code outer, int* total, bool speed)
/* Increase the cost of complex shifts because they aren't any faster,
and reduce dual issue opportunities. */
if (arm_tune_cortex_a9
- && outer != SET && GET_CODE (XEXP (x, 1)) != CONST_INT)
+ && outer != SET && !CONST_INT_P (XEXP (x, 1)))
++*total;
return true;
@@ -7179,14 +7179,14 @@ arm_rtx_costs_1 (rtx x, enum rtx_code outer, int* total, bool speed)
if (mode == DImode)
{
*total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
- if (GET_CODE (XEXP (x, 0)) == CONST_INT
+ if (CONST_INT_P (XEXP (x, 0))
&& const_ok_for_arm (INTVAL (XEXP (x, 0))))
{
*total += rtx_cost (XEXP (x, 1), code, 1, speed);
return true;
}
- if (GET_CODE (XEXP (x, 1)) == CONST_INT
+ if (CONST_INT_P (XEXP (x, 1))
&& const_ok_for_arm (INTVAL (XEXP (x, 1))))
{
*total += rtx_cost (XEXP (x, 0), code, 0, speed);
@@ -7203,14 +7203,14 @@ arm_rtx_costs_1 (rtx x, enum rtx_code outer, int* total, bool speed)
|| (mode == DFmode && !TARGET_VFP_SINGLE)))
{
*total = COSTS_N_INSNS (1);
- if (GET_CODE (XEXP (x, 0)) == CONST_DOUBLE
+ if (CONST_DOUBLE_P (XEXP (x, 0))
&& arm_const_double_rtx (XEXP (x, 0)))
{
*total += rtx_cost (XEXP (x, 1), code, 1, speed);
return true;
}
- if (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
+ if (CONST_DOUBLE_P (XEXP (x, 1))
&& arm_const_double_rtx (XEXP (x, 1)))
{
*total += rtx_cost (XEXP (x, 0), code, 0, speed);
@@ -7224,7 +7224,7 @@ arm_rtx_costs_1 (rtx x, enum rtx_code outer, int* total, bool speed)
}
*total = COSTS_N_INSNS (1);
- if (GET_CODE (XEXP (x, 0)) == CONST_INT
+ if (CONST_INT_P (XEXP (x, 0))
&& const_ok_for_arm (INTVAL (XEXP (x, 0))))
{
*total += rtx_cost (XEXP (x, 1), code, 1, speed);
@@ -7262,7 +7262,7 @@ arm_rtx_costs_1 (rtx x, enum rtx_code outer, int* total, bool speed)
|| GET_RTX_CLASS (GET_CODE (XEXP (x, 1))) == RTX_COMM_COMPARE)
{
*total = COSTS_N_INSNS (1) + rtx_cost (XEXP (x, 0), code, 0, speed);
- if (GET_CODE (XEXP (XEXP (x, 1), 0)) == REG
+ if (REG_P (XEXP (XEXP (x, 1), 0))
&& REGNO (XEXP (XEXP (x, 1), 0)) != CC_REGNUM)
*total += COSTS_N_INSNS (1);
@@ -7300,7 +7300,7 @@ arm_rtx_costs_1 (rtx x, enum rtx_code outer, int* total, bool speed)
|| (mode == DFmode && !TARGET_VFP_SINGLE)))
{
*total = COSTS_N_INSNS (1);
- if (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
+ if (CONST_DOUBLE_P (XEXP (x, 1))
&& arm_const_double_rtx (XEXP (x, 1)))
{
*total += rtx_cost (XEXP (x, 0), code, 0, speed);
@@ -7318,7 +7318,7 @@ arm_rtx_costs_1 (rtx x, enum rtx_code outer, int* total, bool speed)
|| GET_RTX_CLASS (GET_CODE (XEXP (x, 0))) == RTX_COMM_COMPARE)
{
*total = COSTS_N_INSNS (1) + rtx_cost (XEXP (x, 1), code, 1, speed);
- if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
+ if (REG_P (XEXP (XEXP (x, 0), 0))
&& REGNO (XEXP (XEXP (x, 0), 0)) != CC_REGNUM)
*total += COSTS_N_INSNS (1);
return true;
@@ -7335,13 +7335,13 @@ arm_rtx_costs_1 (rtx x, enum rtx_code outer, int* total, bool speed)
operand. */
if (REG_OR_SUBREG_REG (XEXP (x, 0))
&& ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))
- && GET_CODE (XEXP (x, 1)) != CONST_INT)
+ && !CONST_INT_P (XEXP (x, 1)))
*total = COSTS_N_INSNS (1);
if (mode == DImode)
{
*total += COSTS_N_INSNS (2);
- if (GET_CODE (XEXP (x, 1)) == CONST_INT
+ if (CONST_INT_P (XEXP (x, 1))
&& const_ok_for_op (INTVAL (XEXP (x, 1)), code))
{
*total += rtx_cost (XEXP (x, 0), code, 0, speed);
@@ -7352,7 +7352,7 @@ arm_rtx_costs_1 (rtx x, enum rtx_code outer, int* total, bool speed)
}
*total += COSTS_N_INSNS (1);
- if (GET_CODE (XEXP (x, 1)) == CONST_INT
+ if (CONST_INT_P (XEXP (x, 1))
&& const_ok_for_op (INTVAL (XEXP (x, 1)), code))
{
*total += rtx_cost (XEXP (x, 0), code, 0, speed);
@@ -7432,7 +7432,7 @@ arm_rtx_costs_1 (rtx x, enum rtx_code outer, int* total, bool speed)
{
*total += rtx_cost (XEXP (XEXP (x, 0), 0), subcode, 0, speed);
/* Register shifts cost an extra cycle. */
- if (GET_CODE (XEXP (XEXP (x, 0), 1)) != CONST_INT)
+ if (!CONST_INT_P (XEXP (XEXP (x, 0), 1)))
*total += COSTS_N_INSNS (1) + rtx_cost (XEXP (XEXP (x, 0), 1),
subcode, 1, speed);
return true;
@@ -7452,7 +7452,7 @@ arm_rtx_costs_1 (rtx x, enum rtx_code outer, int* total, bool speed)
if (!((GET_RTX_CLASS (GET_CODE (operand)) == RTX_COMPARE
|| GET_RTX_CLASS (GET_CODE (operand)) == RTX_COMM_COMPARE)
- && GET_CODE (XEXP (operand, 0)) == REG
+ && REG_P (XEXP (operand, 0))
&& REGNO (XEXP (operand, 0)) == CC_REGNUM))
*total += COSTS_N_INSNS (1);
*total += (rtx_cost (XEXP (x, 1), code, 1, speed)
@@ -7468,7 +7468,7 @@ arm_rtx_costs_1 (rtx x, enum rtx_code outer, int* total, bool speed)
goto scc_insn;
case GE:
- if ((GET_CODE (XEXP (x, 0)) != REG || REGNO (XEXP (x, 0)) != CC_REGNUM)
+ if ((!REG_P (XEXP (x, 0)) || REGNO (XEXP (x, 0)) != CC_REGNUM)
&& mode == SImode && XEXP (x, 1) == const0_rtx)
{
*total = COSTS_N_INSNS (2) + rtx_cost (XEXP (x, 0), code, 0, speed);
@@ -7477,7 +7477,7 @@ arm_rtx_costs_1 (rtx x, enum rtx_code outer, int* total, bool speed)
goto scc_insn;
case LT:
- if ((GET_CODE (XEXP (x, 0)) != REG || REGNO (XEXP (x, 0)) != CC_REGNUM)
+ if ((!REG_P (XEXP (x, 0)) || REGNO (XEXP (x, 0)) != CC_REGNUM)
&& mode == SImode && XEXP (x, 1) == const0_rtx)
{
*total = COSTS_N_INSNS (1) + rtx_cost (XEXP (x, 0), code, 0, speed);
@@ -7504,21 +7504,21 @@ arm_rtx_costs_1 (rtx x, enum rtx_code outer, int* total, bool speed)
performed, then they cost 2 instructions. Otherwise they need
an additional comparison before them. */
*total = COSTS_N_INSNS (2);
- if (GET_CODE (XEXP (x, 0)) == REG && REGNO (XEXP (x, 0)) == CC_REGNUM)
+ if (REG_P (XEXP (x, 0)) && REGNO (XEXP (x, 0)) == CC_REGNUM)
{
return true;
}
/* Fall through */
case COMPARE:
- if (GET_CODE (XEXP (x, 0)) == REG && REGNO (XEXP (x, 0)) == CC_REGNUM)
+ if (REG_P (XEXP (x, 0)) && REGNO (XEXP (x, 0)) == CC_REGNUM)
{
*total = 0;
return true;
}
*total += COSTS_N_INSNS (1);
- if (GET_CODE (XEXP (x, 1)) == CONST_INT
+ if (CONST_INT_P (XEXP (x, 1))
&& const_ok_for_op (INTVAL (XEXP (x, 1)), code))
{
*total += rtx_cost (XEXP (x, 0), code, 0, speed);
@@ -7550,7 +7550,7 @@ arm_rtx_costs_1 (rtx x, enum rtx_code outer, int* total, bool speed)
case SMIN:
case SMAX:
*total = COSTS_N_INSNS (2) + rtx_cost (XEXP (x, 0), code, 0, speed);
- if (GET_CODE (XEXP (x, 1)) != CONST_INT
+ if (!CONST_INT_P (XEXP (x, 1))
|| !const_ok_for_arm (INTVAL (XEXP (x, 1))))
*total += rtx_cost (XEXP (x, 1), code, 1, speed);
return true;
@@ -7667,7 +7667,7 @@ arm_rtx_costs_1 (rtx x, enum rtx_code outer, int* total, bool speed)
case SET:
return false;
-
+
case UNSPEC:
/* We cost this as high as our memory costs to allow this to
be hoisted from loops. */
@@ -7716,7 +7716,7 @@ thumb1_size_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer)
return COSTS_N_INSNS (1);
case MULT:
- if (GET_CODE (XEXP (x, 1)) == CONST_INT)
+ if (CONST_INT_P (XEXP (x, 1)))
{
/* Thumb1 mul instruction can't operate on const. We must Load it
into a register first. */
@@ -7727,8 +7727,8 @@ thumb1_size_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer)
case SET:
return (COSTS_N_INSNS (1)
- + 4 * ((GET_CODE (SET_SRC (x)) == MEM)
- + GET_CODE (SET_DEST (x)) == MEM));
+ + 4 * ((MEM_P (SET_SRC (x)))
+ + MEM_P (SET_DEST (x))));
case CONST_INT:
if (outer == SET)
@@ -7804,14 +7804,14 @@ thumb1_size_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer)
{
case QImode:
return (1 + (mode == DImode ? 4 : 0)
- + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
+ + (MEM_P (XEXP (x, 0)) ? 10 : 0));
case HImode:
return (4 + (mode == DImode ? 4 : 0)
- + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
+ + (MEM_P (XEXP (x, 0)) ? 10 : 0));
case SImode:
- return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
+ return (1 + (MEM_P (XEXP (x, 0)) ? 10 : 0));
default:
return 99;
@@ -7861,7 +7861,7 @@ arm_size_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
return false;
case ROTATE:
- if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG)
+ if (mode == SImode && REG_P (XEXP (x, 1)))
{
*total = COSTS_N_INSNS (2) + rtx_cost (XEXP (x, 0), code, 0, false);
return true;
@@ -7871,7 +7871,7 @@ arm_size_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
case ASHIFT:
case LSHIFTRT:
case ASHIFTRT:
- if (mode == DImode && GET_CODE (XEXP (x, 1)) == CONST_INT)
+ if (mode == DImode && CONST_INT_P (XEXP (x, 1)))
{
*total = COSTS_N_INSNS (3) + rtx_cost (XEXP (x, 0), code, 0, false);
return true;
@@ -7880,7 +7880,7 @@ arm_size_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
{
*total = COSTS_N_INSNS (1) + rtx_cost (XEXP (x, 0), code, 0, false);
/* Slightly disparage register shifts, but not by much. */
- if (GET_CODE (XEXP (x, 1)) != CONST_INT)
+ if (!CONST_INT_P (XEXP (x, 1)))
*total += 1 + rtx_cost (XEXP (x, 1), code, 1, false);
return true;
}
@@ -8097,7 +8097,7 @@ arm_slowmul_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
return false;
}
- if (GET_CODE (XEXP (x, 1)) == CONST_INT)
+ if (CONST_INT_P (XEXP (x, 1)))
{
unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
& (unsigned HOST_WIDE_INT) 0xffffffff);
@@ -8163,7 +8163,7 @@ arm_fastmul_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
return false;
}
- if (GET_CODE (XEXP (x, 1)) == CONST_INT)
+ if (CONST_INT_P (XEXP (x, 1)))
{
unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
& (unsigned HOST_WIDE_INT) 0xffffffff);
@@ -8255,7 +8255,7 @@ arm_xscale_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
return false;
}
- if (GET_CODE (XEXP (x, 1)) == CONST_INT)
+ if (CONST_INT_P (XEXP (x, 1)))
{
/* If operand 1 is a constant we can more accurately
calculate the cost of the multiply. The multiplier can
@@ -8383,7 +8383,7 @@ arm_arm_address_cost (rtx x)
if (c == PLUS)
{
- if (GET_CODE (XEXP (x, 1)) == CONST_INT)
+ if (CONST_INT_P (XEXP (x, 1)))
return 2;
if (ARITHMETIC_P (XEXP (x, 0)) || ARITHMETIC_P (XEXP (x, 1)))
@@ -8403,8 +8403,8 @@ arm_thumb_address_cost (rtx x)
if (c == REG)
return 1;
if (c == PLUS
- && GET_CODE (XEXP (x, 0)) == REG
- && GET_CODE (XEXP (x, 1)) == CONST_INT)
+ && REG_P (XEXP (x, 0))
+ && CONST_INT_P (XEXP (x, 1)))
return 1;
return 2;
@@ -8655,13 +8655,13 @@ arm_adjust_cost (rtx insn, rtx link, rtx dep, int cost)
/* Call insns don't incur a stall, even if they follow a load. */
if (REG_NOTE_KIND (link) == 0
- && GET_CODE (insn) == CALL_INSN)
+ && CALL_P (insn))
return 1;
if ((i_pat = single_set (insn)) != NULL
- && GET_CODE (SET_SRC (i_pat)) == MEM
+ && MEM_P (SET_SRC (i_pat))
&& (d_pat = single_set (dep)) != NULL
- && GET_CODE (SET_DEST (d_pat)) == MEM)
+ && MEM_P (SET_DEST (d_pat)))
{
rtx src_mem = XEXP (SET_SRC (i_pat), 0);
/* This is a load after a store, there is no conflict if the load reads
@@ -8756,7 +8756,7 @@ vfp3_const_double_index (rtx x)
HOST_WIDE_INT m1, m2;
int point_pos = 2 * HOST_BITS_PER_WIDE_INT - 1;
- if (!TARGET_VFP3 || GET_CODE (x) != CONST_DOUBLE)
+ if (!TARGET_VFP3 || !CONST_DOUBLE_P (x))
return -1;
REAL_VALUE_FROM_CONST_DOUBLE (r, x);
@@ -8959,12 +8959,12 @@ neon_valid_immediate (rtx op, enum machine_mode mode, int inverse,
unsigned HOST_WIDE_INT elpart;
unsigned int part, parts;
- if (GET_CODE (el) == CONST_INT)
+ if (CONST_INT_P (el))
{
elpart = INTVAL (el);
parts = 1;
}
- else if (GET_CODE (el) == CONST_DOUBLE)
+ else if (CONST_DOUBLE_P (el))
{
elpart = CONST_DOUBLE_LOW (el);
parts = 2;
@@ -8980,7 +8980,7 @@ neon_valid_immediate (rtx op, enum machine_mode mode, int inverse,
bytes[idx++] = (elpart & 0xff) ^ invmask;
elpart >>= BITS_PER_UNIT;
}
- if (GET_CODE (el) == CONST_DOUBLE)
+ if (CONST_DOUBLE_P (el))
elpart = CONST_DOUBLE_HIGH (el);
}
}
@@ -9154,9 +9154,9 @@ neon_immediate_valid_for_shift (rtx op, enum machine_mode mode,
rtx el = CONST_VECTOR_ELT (op, i);
unsigned HOST_WIDE_INT elpart;
- if (GET_CODE (el) == CONST_INT)
+ if (CONST_INT_P (el))
elpart = INTVAL (el);
- else if (GET_CODE (el) == CONST_DOUBLE)
+ else if (CONST_DOUBLE_P (el))
return 0;
else
gcc_unreachable ();
@@ -9326,7 +9326,7 @@ neon_make_constant (rtx vals)
for (i = 0; i < n_elts; ++i)
{
rtx x = XVECEXP (vals, 0, i);
- if (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE)
+ if (CONST_INT_P (x) || CONST_DOUBLE_P (x))
n_const++;
}
if (n_const == n_elts)
@@ -9467,7 +9467,7 @@ bounds_check (rtx operand, HOST_WIDE_INT low, HOST_WIDE_INT high,
{
HOST_WIDE_INT lane;
- gcc_assert (GET_CODE (operand) == CONST_INT);
+ gcc_assert (CONST_INT_P (operand));
lane = INTVAL (operand);
@@ -9524,7 +9524,7 @@ arm_coproc_mem_operand (rtx op, bool wb)
return FALSE;
/* Constants are converted into offsets from labels. */
- if (GET_CODE (op) != MEM)
+ if (!MEM_P (op))
return FALSE;
ind = XEXP (op, 0);
@@ -9534,11 +9534,11 @@ arm_coproc_mem_operand (rtx op, bool wb)
|| (GET_CODE (ind) == CONST
&& GET_CODE (XEXP (ind, 0)) == PLUS
&& GET_CODE (XEXP (XEXP (ind, 0), 0)) == LABEL_REF
- && GET_CODE (XEXP (XEXP (ind, 0), 1)) == CONST_INT)))
+ && CONST_INT_P (XEXP (XEXP (ind, 0), 1)))))
return TRUE;
/* Match: (mem (reg)). */
- if (GET_CODE (ind) == REG)
+ if (REG_P (ind))
return arm_address_register_rtx_p (ind, 0);
/* Autoincremment addressing modes. POST_INC and PRE_DEC are
@@ -9563,9 +9563,9 @@ arm_coproc_mem_operand (rtx op, bool wb)
(plus (reg)
(const)). */
if (GET_CODE (ind) == PLUS
- && GET_CODE (XEXP (ind, 0)) == REG
+ && REG_P (XEXP (ind, 0))
&& REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
- && GET_CODE (XEXP (ind, 1)) == CONST_INT
+ && CONST_INT_P (XEXP (ind, 1))
&& INTVAL (XEXP (ind, 1)) > -1024
&& INTVAL (XEXP (ind, 1)) < 1024
&& (INTVAL (XEXP (ind, 1)) & 3) == 0)
@@ -9596,7 +9596,7 @@ neon_vector_mem_operand (rtx op, int type)
return FALSE;
/* Constants are converted into offsets from labels. */
- if (GET_CODE (op) != MEM)
+ if (!MEM_P (op))
return FALSE;
ind = XEXP (op, 0);
@@ -9606,11 +9606,11 @@ neon_vector_mem_operand (rtx op, int type)
|| (GET_CODE (ind) == CONST
&& GET_CODE (XEXP (ind, 0)) == PLUS
&& GET_CODE (XEXP (XEXP (ind, 0), 0)) == LABEL_REF
- && GET_CODE (XEXP (XEXP (ind, 0), 1)) == CONST_INT)))
+ && CONST_INT_P (XEXP (XEXP (ind, 0), 1)))))
return TRUE;
/* Match: (mem (reg)). */
- if (GET_CODE (ind) == REG)
+ if (REG_P (ind))
return arm_address_register_rtx_p (ind, 0);
/* Allow post-increment with Neon registers. */
@@ -9625,9 +9625,9 @@ neon_vector_mem_operand (rtx op, int type)
(const)). */
if (type == 0
&& GET_CODE (ind) == PLUS
- && GET_CODE (XEXP (ind, 0)) == REG
+ && REG_P (XEXP (ind, 0))
&& REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
- && GET_CODE (XEXP (ind, 1)) == CONST_INT
+ && CONST_INT_P (XEXP (ind, 1))
&& INTVAL (XEXP (ind, 1)) > -1024
&& INTVAL (XEXP (ind, 1)) < 1016
&& (INTVAL (XEXP (ind, 1)) & 3) == 0)
@@ -9654,7 +9654,7 @@ neon_struct_mem_operand (rtx op)
return FALSE;
/* Constants are converted into offsets from labels. */
- if (GET_CODE (op) != MEM)
+ if (!MEM_P (op))
return FALSE;
ind = XEXP (op, 0);
@@ -9664,11 +9664,11 @@ neon_struct_mem_operand (rtx op)
|| (GET_CODE (ind) == CONST
&& GET_CODE (XEXP (ind, 0)) == PLUS
&& GET_CODE (XEXP (XEXP (ind, 0), 0)) == LABEL_REF
- && GET_CODE (XEXP (XEXP (ind, 0), 1)) == CONST_INT)))
+ && CONST_INT_P (XEXP (XEXP (ind, 0), 1)))))
return TRUE;
/* Match: (mem (reg)). */
- if (GET_CODE (ind) == REG)
+ if (REG_P (ind))
return arm_address_register_rtx_p (ind, 0);
/* vldm/vstm allows POST_INC (ia) and PRE_DEC (db). */
@@ -9903,12 +9903,12 @@ adjacent_mem_locations (rtx a, rtx b)
if (volatile_refs_p (a) || volatile_refs_p (b))
return 0;
- if ((GET_CODE (XEXP (a, 0)) == REG
+ if ((REG_P (XEXP (a, 0))
|| (GET_CODE (XEXP (a, 0)) == PLUS
- && GET_CODE (XEXP (XEXP (a, 0), 1)) == CONST_INT))
- && (GET_CODE (XEXP (b, 0)) == REG
+ && CONST_INT_P (XEXP (XEXP (a, 0), 1))))
+ && (REG_P (XEXP (b, 0))
|| (GET_CODE (XEXP (b, 0)) == PLUS
- && GET_CODE (XEXP (XEXP (b, 0), 1)) == CONST_INT)))
+ && CONST_INT_P (XEXP (XEXP (b, 0), 1)))))
{
HOST_WIDE_INT val0 = 0, val1 = 0;
rtx reg0, reg1;
@@ -10292,7 +10292,7 @@ load_multiple_sequence (rtx *operands, int nops, int *regs, int *saved_order,
if (GET_CODE (operands[nops + i]) == SUBREG)
operands[nops + i] = alter_subreg (operands + (nops + i));
- gcc_assert (GET_CODE (operands[nops + i]) == MEM);
+ gcc_assert (MEM_P (operands[nops + i]));
/* Don't reorder volatile memory references; it doesn't seem worth
looking for the case where the order is ok anyway. */
@@ -10301,16 +10301,15 @@ load_multiple_sequence (rtx *operands, int nops, int *regs, int *saved_order,
offset = const0_rtx;
- if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
+ if ((REG_P (reg = XEXP (operands[nops + i], 0))
|| (GET_CODE (reg) == SUBREG
- && GET_CODE (reg = SUBREG_REG (reg)) == REG))
+ && REG_P (reg = SUBREG_REG (reg))))
|| (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
- && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
- == REG)
+ && ((REG_P (reg = XEXP (XEXP (operands[nops + i], 0), 0)))
|| (GET_CODE (reg) == SUBREG
- && GET_CODE (reg = SUBREG_REG (reg)) == REG))
- && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
- == CONST_INT)))
+ && REG_P (reg = SUBREG_REG (reg))))
+ && (CONST_INT_P (offset
+ = XEXP (XEXP (operands[nops + i], 0), 1)))))
{
if (i == 0)
{
@@ -10323,7 +10322,7 @@ load_multiple_sequence (rtx *operands, int nops, int *regs, int *saved_order,
/* Not addressed from the same base register. */
return 0;
- unsorted_regs[i] = (GET_CODE (operands[i]) == REG
+ unsorted_regs[i] = (REG_P (operands[i])
? REGNO (operands[i])
: REGNO (SUBREG_REG (operands[i])));
@@ -10445,7 +10444,7 @@ store_multiple_sequence (rtx *operands, int nops, int nops_total,
if (GET_CODE (operands[nops + i]) == SUBREG)
operands[nops + i] = alter_subreg (operands + (nops + i));
- gcc_assert (GET_CODE (operands[nops + i]) == MEM);
+ gcc_assert (MEM_P (operands[nops + i]));
/* Don't reorder volatile memory references; it doesn't seem worth
looking for the case where the order is ok anyway. */
@@ -10454,18 +10453,17 @@ store_multiple_sequence (rtx *operands, int nops, int nops_total,
offset = const0_rtx;
- if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
+ if ((REG_P (reg = XEXP (operands[nops + i], 0))
|| (GET_CODE (reg) == SUBREG
- && GET_CODE (reg = SUBREG_REG (reg)) == REG))
+ && REG_P (reg = SUBREG_REG (reg))))
|| (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
- && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
- == REG)
+ && ((REG_P (reg = XEXP (XEXP (operands[nops + i], 0), 0)))
|| (GET_CODE (reg) == SUBREG
- && GET_CODE (reg = SUBREG_REG (reg)) == REG))
- && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
- == CONST_INT)))
+ && REG_P (reg = SUBREG_REG (reg))))
+ && (CONST_INT_P (offset
+ = XEXP (XEXP (operands[nops + i], 0), 1)))))
{
- unsorted_reg_rtxs[i] = (GET_CODE (operands[i]) == REG
+ unsorted_reg_rtxs[i] = (REG_P (operands[i])
? operands[i] : SUBREG_REG (operands[i]));
unsorted_regs[i] = REGNO (unsorted_reg_rtxs[i]);
@@ -10951,7 +10949,7 @@ gen_const_stm_seq (rtx *operands, int nops)
unaligned copies on processors which support unaligned semantics for those
instructions. INTERLEAVE_FACTOR can be used to attempt to hide load latency
(using more registers) by doing e.g. load/load/store/store for a factor of 2.
- An interleave factor of 1 (the minimum) will perform no interleaving.
+ An interleave factor of 1 (the minimum) will perform no interleaving.
Load/store multiple are used for aligned addresses where possible. */
static void
@@ -11286,8 +11284,8 @@ arm_gen_movmemqi (rtx *operands)
rtx part_bytes_reg = NULL;
rtx mem;
- if (GET_CODE (operands[2]) != CONST_INT
- || GET_CODE (operands[3]) != CONST_INT
+ if (!CONST_INT_P (operands[2])
+ || !CONST_INT_P (operands[3])
|| INTVAL (operands[2]) > 64)
return 0;
@@ -11632,13 +11630,13 @@ arm_select_cc_mode (enum rtx_code op, rtx x, rtx y)
equalities and unsigned inequalities). */
if (GET_MODE (x) == SImode
&& GET_CODE (x) == ASHIFT
- && GET_CODE (XEXP (x, 1)) == CONST_INT && INTVAL (XEXP (x, 1)) == 24
+ && CONST_INT_P (XEXP (x, 1)) && INTVAL (XEXP (x, 1)) == 24
&& GET_CODE (XEXP (x, 0)) == SUBREG
- && GET_CODE (SUBREG_REG (XEXP (x, 0))) == MEM
+ && MEM_P (SUBREG_REG (XEXP (x, 0)))
&& GET_MODE (SUBREG_REG (XEXP (x, 0))) == QImode
&& (op == EQ || op == NE
|| op == GEU || op == GTU || op == LTU || op == LEU)
- && GET_CODE (y) == CONST_INT)
+ && CONST_INT_P (y))
return CC_Zmode;
/* A construct for a conditional compare, if the false arm contains
@@ -11828,7 +11826,7 @@ arm_reload_in_hi (rtx *operands)
ref = SUBREG_REG (ref);
}
- if (GET_CODE (ref) == REG)
+ if (REG_P (ref))
{
/* We have a pseudo which has been spilt onto the stack; there
are two cases here: the first where there is a simple
@@ -11848,7 +11846,7 @@ arm_reload_in_hi (rtx *operands)
/* Handle the case where the address is too complex to be offset by 1. */
if (GET_CODE (base) == MINUS
- || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
+ || (GET_CODE (base) == PLUS && !CONST_INT_P (XEXP (base, 1))))
{
rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
@@ -11945,7 +11943,7 @@ arm_reload_out_hi (rtx *operands)
ref = SUBREG_REG (ref);
}
- if (GET_CODE (ref) == REG)
+ if (REG_P (ref))
{
/* We have a pseudo which has been spilt onto the stack; there
are two cases here: the first where there is a simple
@@ -11967,7 +11965,7 @@ arm_reload_out_hi (rtx *operands)
/* Handle the case where the address is too complex to be offset by 1. */
if (GET_CODE (base) == MINUS
- || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
+ || (GET_CODE (base) == PLUS && !CONST_INT_P (XEXP (base, 1))))
{
rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
@@ -12352,7 +12350,7 @@ is_jump_table (rtx insn)
&& ((table = next_real_insn (JUMP_LABEL (insn)))
== next_real_insn (insn))
&& table != NULL
- && GET_CODE (table) == JUMP_INSN
+ && JUMP_P (table)
&& (GET_CODE (PATTERN (table)) == ADDR_VEC
|| GET_CODE (PATTERN (table)) == ADDR_DIFF_VEC))
return table;
@@ -12498,7 +12496,7 @@ add_minipool_forward_ref (Mfix *fix)
{
if (GET_CODE (fix->value) == GET_CODE (mp->value)
&& fix->mode == mp->mode
- && (GET_CODE (fix->value) != CODE_LABEL
+ && (!LABEL_P (fix->value)
|| (CODE_LABEL_NUMBER (fix->value)
== CODE_LABEL_NUMBER (mp->value)))
&& rtx_equal_p (fix->value, mp->value))
@@ -12675,7 +12673,7 @@ add_minipool_backward_ref (Mfix *fix)
{
if (GET_CODE (fix->value) == GET_CODE (mp->value)
&& fix->mode == mp->mode
- && (GET_CODE (fix->value) != CODE_LABEL
+ && (!LABEL_P (fix->value)
|| (CODE_LABEL_NUMBER (fix->value)
== CODE_LABEL_NUMBER (mp->value)))
&& rtx_equal_p (fix->value, mp->value)
@@ -12907,7 +12905,7 @@ arm_barrier_cost (rtx insn)
int base_cost = 50;
rtx next = next_nonnote_insn (insn);
- if (next != NULL && GET_CODE (next) == CODE_LABEL)
+ if (next != NULL && LABEL_P (next))
base_cost -= 20;
switch (GET_CODE (insn))
@@ -12958,7 +12956,7 @@ create_fix_barrier (Mfix *fix, HOST_WIDE_INT max_address)
/* This code shouldn't have been called if there was a natural barrier
within range. */
- gcc_assert (GET_CODE (from) != BARRIER);
+ gcc_assert (!BARRIER_P (from));
/* Count the length of this insn. This must stay in sync with the
code that pushes minipool fixes. */
@@ -13125,8 +13123,8 @@ arm_const_double_inline_cost (rtx val)
lowpart = gen_lowpart (SImode, val);
highpart = gen_highpart_mode (SImode, mode, val);
- gcc_assert (GET_CODE (lowpart) == CONST_INT);
- gcc_assert (GET_CODE (highpart) == CONST_INT);
+ gcc_assert (CONST_INT_P (lowpart));
+ gcc_assert (CONST_INT_P (highpart));
return (arm_gen_constant (SET, SImode, NULL_RTX, INTVAL (lowpart),
NULL_RTX, NULL_RTX, 0, 0)
@@ -13152,7 +13150,7 @@ arm_const_double_by_parts (rtx val)
part = gen_highpart_mode (SImode, mode, val);
- gcc_assert (GET_CODE (part) == CONST_INT);
+ gcc_assert (CONST_INT_P (part));
if (const_ok_for_arm (INTVAL (part))
|| const_ok_for_arm (~INTVAL (part)))
@@ -13160,7 +13158,7 @@ arm_const_double_by_parts (rtx val)
part = gen_lowpart (SImode, val);
- gcc_assert (GET_CODE (part) == CONST_INT);
+ gcc_assert (CONST_INT_P (part));
if (const_ok_for_arm (INTVAL (part))
|| const_ok_for_arm (~INTVAL (part)))
@@ -13182,14 +13180,14 @@ arm_const_double_by_immediates (rtx val)
part = gen_highpart_mode (SImode, mode, val);
- gcc_assert (GET_CODE (part) == CONST_INT);
+ gcc_assert (CONST_INT_P (part));
if (!const_ok_for_arm (INTVAL (part)))
return false;
part = gen_lowpart (SImode, val);
- gcc_assert (GET_CODE (part) == CONST_INT);
+ gcc_assert (CONST_INT_P (part));
if (!const_ok_for_arm (INTVAL (part)))
return false;
@@ -13237,7 +13235,7 @@ note_invalid_constants (rtx insn, HOST_WIDE_INT address, int do_pushes)
push_minipool_fix (insn, address, recog_data.operand_loc[opno],
recog_data.operand_mode[opno], op);
}
- else if (GET_CODE (op) == MEM
+ else if (MEM_P (op)
&& GET_CODE (XEXP (op, 0)) == SYMBOL_REF
&& CONSTANT_POOL_ADDRESS_P (XEXP (op, 0)))
{
@@ -13337,7 +13335,7 @@ thumb2_reorg (void)
break;
case MINUS:
- /* RSBS <Rd>,<Rn>,#0
+ /* RSBS <Rd>,<Rn>,#0
Not handled here: see NEG below. */
/* SUBS <Rd>,<Rn>,#<imm3>
SUBS <Rdn>,#<imm8>
@@ -13476,13 +13474,13 @@ arm_reorg (void)
/* The first insn must always be a note, or the code below won't
scan it properly. */
insn = get_insns ();
- gcc_assert (GET_CODE (insn) == NOTE);
+ gcc_assert (NOTE_P (insn));
minipool_pad = 0;
/* Scan all the insns and record the operands that will need fixing. */
for (insn = next_nonnote_insn (insn); insn; insn = next_nonnote_insn (insn))
{
- if (GET_CODE (insn) == BARRIER)
+ if (BARRIER_P (insn))
push_minipool_barrier (insn, address);
else if (INSN_P (insn))
{
@@ -13518,7 +13516,7 @@ arm_reorg (void)
Mfix * this_fix;
/* Skip any further barriers before the next fix. */
- while (fix && GET_CODE (fix->insn) == BARRIER)
+ while (fix && BARRIER_P (fix->insn))
fix = fix->next;
/* No more fixes. */
@@ -13529,7 +13527,7 @@ arm_reorg (void)
for (ftmp = fix; ftmp; ftmp = ftmp->next)
{
- if (GET_CODE (ftmp->insn) == BARRIER)
+ if (BARRIER_P (ftmp->insn))
{
if (ftmp->address >= minipool_vector_head->max_address)
break;
@@ -13589,7 +13587,7 @@ arm_reorg (void)
while (ftmp)
{
- if (GET_CODE (ftmp->insn) != BARRIER
+ if (!BARRIER_P (ftmp->insn)
&& ((ftmp->minipool = add_minipool_backward_ref (ftmp))
== NULL))
break;
@@ -13601,7 +13599,7 @@ arm_reorg (void)
up and adding the constants to the pool itself. */
for (this_fix = fix; this_fix && ftmp != this_fix;
this_fix = this_fix->next)
- if (GET_CODE (this_fix->insn) != BARRIER)
+ if (!BARRIER_P (this_fix->insn))
{
rtx addr
= plus_constant (Pmode,
@@ -13744,7 +13742,7 @@ vfp_output_fstmd (rtx * operands)
strcpy (pattern, "fstmfdd%?\t%m0!, {%P1");
p = strlen (pattern);
- gcc_assert (GET_CODE (operands[1]) == REG);
+ gcc_assert (REG_P (operands[1]));
base = (REGNO (operands[1]) - FIRST_VFP_REGNUM) / 2;
for (i = 1; i < XVECLEN (operands[2], 0); i++)
@@ -14085,7 +14083,7 @@ output_move_double (rtx *operands, bool emit, int *count)
FIXME: IWMMXT allows offsets larger than ldrd can
handle, fix these up with a pair of ldr. */
if (TARGET_THUMB2
- || GET_CODE (otherops[2]) != CONST_INT
+ || !CONST_INT_P (otherops[2])
|| (INTVAL (otherops[2]) > -256
&& INTVAL (otherops[2]) < 256))
{
@@ -14111,7 +14109,7 @@ output_move_double (rtx *operands, bool emit, int *count)
FIXME: IWMMXT allows offsets larger than ldrd can handle,
fix these up with a pair of ldr. */
if (TARGET_THUMB2
- || GET_CODE (otherops[2]) != CONST_INT
+ || !CONST_INT_P (otherops[2])
|| (INTVAL (otherops[2]) > -256
&& INTVAL (otherops[2]) < 256))
{
@@ -14165,7 +14163,7 @@ output_move_double (rtx *operands, bool emit, int *count)
if (GET_CODE (XEXP (operands[1], 0)) == PLUS)
{
- if (GET_CODE (otherops[2]) == CONST_INT && !TARGET_LDRD)
+ if (CONST_INT_P (otherops[2]) && !TARGET_LDRD)
{
switch ((int) INTVAL (otherops[2]))
{
@@ -14190,9 +14188,9 @@ output_move_double (rtx *operands, bool emit, int *count)
otherops[0] = gen_rtx_REG(SImode, REGNO(operands[0]) + 1);
operands[1] = otherops[0];
if (TARGET_LDRD
- && (GET_CODE (otherops[2]) == REG
+ && (REG_P (otherops[2])
|| TARGET_THUMB2
- || (GET_CODE (otherops[2]) == CONST_INT
+ || (CONST_INT_P (otherops[2])
&& INTVAL (otherops[2]) > -256
&& INTVAL (otherops[2]) < 256)))
{
@@ -14228,7 +14226,7 @@ output_move_double (rtx *operands, bool emit, int *count)
return "";
}
- if (GET_CODE (otherops[2]) == CONST_INT)
+ if (CONST_INT_P (otherops[2]))
{
if (emit)
{
@@ -14345,7 +14343,7 @@ output_move_double (rtx *operands, bool emit, int *count)
/* IWMMXT allows offsets larger than ldrd can handle,
fix these up with a pair of ldr. */
if (!TARGET_THUMB2
- && GET_CODE (otherops[2]) == CONST_INT
+ && CONST_INT_P (otherops[2])
&& (INTVAL(otherops[2]) <= -256
|| INTVAL(otherops[2]) >= 256))
{
@@ -14384,7 +14382,7 @@ output_move_double (rtx *operands, bool emit, int *count)
case PLUS:
otherops[2] = XEXP (XEXP (operands[0], 0), 1);
- if (GET_CODE (otherops[2]) == CONST_INT && !TARGET_LDRD)
+ if (CONST_INT_P (otherops[2]) && !TARGET_LDRD)
{
switch ((int) INTVAL (XEXP (XEXP (operands[0], 0), 1)))
{
@@ -14409,9 +14407,9 @@ output_move_double (rtx *operands, bool emit, int *count)
}
}
if (TARGET_LDRD
- && (GET_CODE (otherops[2]) == REG
+ && (REG_P (otherops[2])
|| TARGET_THUMB2
- || (GET_CODE (otherops[2]) == CONST_INT
+ || (CONST_INT_P (otherops[2])
&& INTVAL (otherops[2]) > -256
&& INTVAL (otherops[2]) < 256)))
{
@@ -14761,10 +14759,10 @@ arm_address_offset_is_imm (rtx insn)
addr = XEXP (mem, 0);
- if (GET_CODE (addr) == REG
+ if (REG_P (addr)
|| (GET_CODE (addr) == PLUS
- && GET_CODE (XEXP (addr, 0)) == REG
- && GET_CODE (XEXP (addr, 1)) == CONST_INT))
+ && REG_P (XEXP (addr, 0))
+ && CONST_INT_P (XEXP (addr, 1))))
return 1;
else
return 0;
@@ -16910,7 +16908,7 @@ arm_print_operand (FILE *stream, rtx x, int code)
return;
case 'B':
- if (GET_CODE (x) == CONST_INT)
+ if (CONST_INT_P (x))
{
HOST_WIDE_INT val;
val = ARM_SIGN_EXTEND (~INTVAL (x));
@@ -16991,14 +16989,14 @@ arm_print_operand (FILE *stream, rtx x, int code)
The 'Q' and 'R' constraints are also available for 64-bit
constants. */
case 'Q':
- if (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE)
+ if (CONST_INT_P (x) || CONST_DOUBLE_P (x))
{
rtx part = gen_lowpart (SImode, x);
fprintf (stream, "#" HOST_WIDE_INT_PRINT_DEC, INTVAL (part));
return;
}
- if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
+ if (!REG_P (x) || REGNO (x) > LAST_ARM_REGNUM)
{
output_operand_lossage ("invalid operand for code '%c'", code);
return;
@@ -17008,7 +17006,7 @@ arm_print_operand (FILE *stream, rtx x, int code)
return;
case 'R':
- if (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE)
+ if (CONST_INT_P (x) || CONST_DOUBLE_P (x))
{
enum machine_mode mode = GET_MODE (x);
rtx part;
@@ -17020,7 +17018,7 @@ arm_print_operand (FILE *stream, rtx x, int code)
return;
}
- if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
+ if (!REG_P (x) || REGNO (x) > LAST_ARM_REGNUM)
{
output_operand_lossage ("invalid operand for code '%c'", code);
return;
@@ -17030,7 +17028,7 @@ arm_print_operand (FILE *stream, rtx x, int code)
return;
case 'H':
- if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
+ if (!REG_P (x) || REGNO (x) > LAST_ARM_REGNUM)
{
output_operand_lossage ("invalid operand for code '%c'", code);
return;
@@ -17040,7 +17038,7 @@ arm_print_operand (FILE *stream, rtx x, int code)
return;
case 'J':
- if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
+ if (!REG_P (x) || REGNO (x) > LAST_ARM_REGNUM)
{
output_operand_lossage ("invalid operand for code '%c'", code);
return;
@@ -17050,7 +17048,7 @@ arm_print_operand (FILE *stream, rtx x, int code)
return;
case 'K':
- if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
+ if (!REG_P (x) || REGNO (x) > LAST_ARM_REGNUM)
{
output_operand_lossage ("invalid operand for code '%c'", code);
return;
@@ -17061,7 +17059,7 @@ arm_print_operand (FILE *stream, rtx x, int code)
case 'm':
asm_fprintf (stream, "%r",
- GET_CODE (XEXP (x, 0)) == REG
+ REG_P (XEXP (x, 0))
? REGNO (XEXP (x, 0)) : REGNO (XEXP (XEXP (x, 0), 0)));
return;
@@ -17129,7 +17127,7 @@ arm_print_operand (FILE *stream, rtx x, int code)
return;
case 'U':
- if (GET_CODE (x) != REG
+ if (!REG_P (x)
|| REGNO (x) < FIRST_IWMMXT_GR_REGNUM
|| REGNO (x) > LAST_IWMMXT_GR_REGNUM)
/* Bad value for wCG register number. */
@@ -17144,7 +17142,7 @@ arm_print_operand (FILE *stream, rtx x, int code)
/* Print an iWMMXt control register name. */
case 'w':
- if (GET_CODE (x) != CONST_INT
+ if (!CONST_INT_P (x)
|| INTVAL (x) < 0
|| INTVAL (x) >= 16)
/* Bad value for wC register number. */
@@ -17174,7 +17172,7 @@ arm_print_operand (FILE *stream, rtx x, int code)
int mode = GET_MODE (x);
int regno;
- if (GET_MODE_SIZE (mode) != 8 || GET_CODE (x) != REG)
+ if (GET_MODE_SIZE (mode) != 8 || !REG_P (x))
{
output_operand_lossage ("invalid operand for code '%c'", code);
return;
@@ -17205,7 +17203,7 @@ arm_print_operand (FILE *stream, rtx x, int code)
return;
}
- if (GET_CODE (x) != REG
+ if (!REG_P (x)
|| !IS_VFP_REGNUM (REGNO (x)))
{
output_operand_lossage ("invalid operand for code '%c'", code);
@@ -17235,7 +17233,7 @@ arm_print_operand (FILE *stream, rtx x, int code)
int regno;
if ((GET_MODE_SIZE (mode) != 16
- && GET_MODE_SIZE (mode) != 32) || GET_CODE (x) != REG)
+ && GET_MODE_SIZE (mode) != 32) || !REG_P (x))
{
output_operand_lossage ("invalid operand for code '%c'", code);
return;
@@ -17315,7 +17313,7 @@ arm_print_operand (FILE *stream, rtx x, int code)
bool postinc = FALSE;
unsigned align, memsize, align_bits;
- gcc_assert (GET_CODE (x) == MEM);
+ gcc_assert (MEM_P (x));
addr = XEXP (x, 0);
if (GET_CODE (addr) == POST_INC)
{
@@ -17354,9 +17352,9 @@ arm_print_operand (FILE *stream, rtx x, int code)
{
rtx addr;
- gcc_assert (GET_CODE (x) == MEM);
+ gcc_assert (MEM_P (x));
addr = XEXP (x, 0);
- gcc_assert (GET_CODE (addr) == REG);
+ gcc_assert (REG_P (addr));
asm_fprintf (stream, "[%r]", REGNO (addr));
}
return;
@@ -17367,7 +17365,7 @@ arm_print_operand (FILE *stream, rtx x, int code)
int mode = GET_MODE (x);
int regno;
- if (GET_MODE_SIZE (mode) != 4 || GET_CODE (x) != REG)
+ if (GET_MODE_SIZE (mode) != 4 || !REG_P (x))
{
output_operand_lossage ("invalid operand for code '%c'", code);
return;
@@ -17386,7 +17384,7 @@ arm_print_operand (FILE *stream, rtx x, int code)
return;
case 'v':
- gcc_assert (GET_CODE (x) == CONST_DOUBLE);
+ gcc_assert (CONST_DOUBLE_P (x));
fprintf (stream, "#%d", vfp3_const_double_for_fract_bits (x));
return;
@@ -17397,7 +17395,7 @@ arm_print_operand (FILE *stream, rtx x, int code)
int mode = GET_MODE (x);
int regno;
- if (GET_MODE_SIZE (mode) != 2 || GET_CODE (x) != REG)
+ if (GET_MODE_SIZE (mode) != 2 || !REG_P (x))
{
output_operand_lossage ("invalid operand for code '%c'", code);
return;
@@ -17468,15 +17466,15 @@ arm_print_operand_address (FILE *stream, rtx x)
{
int is_minus = GET_CODE (x) == MINUS;
- if (GET_CODE (x) == REG)
+ if (REG_P (x))
asm_fprintf (stream, "[%r]", REGNO (x));
else if (GET_CODE (x) == PLUS || is_minus)
{
rtx base = XEXP (x, 0);
rtx index = XEXP (x, 1);
HOST_WIDE_INT offset = 0;
- if (GET_CODE (base) != REG
- || (GET_CODE (index) == REG && REGNO (index) == SP_REGNUM))
+ if (!REG_P (base)
+ || (REG_P (index) && REGNO (index) == SP_REGNUM))
{
/* Ensure that BASE is a register. */
/* (one of them must be). */
@@ -17524,7 +17522,7 @@ arm_print_operand_address (FILE *stream, rtx x)
{
extern enum machine_mode output_memory_reference_mode;
- gcc_assert (GET_CODE (XEXP (x, 0)) == REG);
+ gcc_assert (REG_P (XEXP (x, 0)));
if (GET_CODE (x) == PRE_DEC || GET_CODE (x) == PRE_INC)
asm_fprintf (stream, "[%r, #%s%d]!",
@@ -17540,7 +17538,7 @@ arm_print_operand_address (FILE *stream, rtx x)
else if (GET_CODE (x) == PRE_MODIFY)
{
asm_fprintf (stream, "[%r, ", REGNO (XEXP (x, 0)));
- if (GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT)
+ if (CONST_INT_P (XEXP (XEXP (x, 1), 1)))
asm_fprintf (stream, "#%wd]!",
INTVAL (XEXP (XEXP (x, 1), 1)));
else
@@ -17550,7 +17548,7 @@ arm_print_operand_address (FILE *stream, rtx x)
else if (GET_CODE (x) == POST_MODIFY)
{
asm_fprintf (stream, "[%r], ", REGNO (XEXP (x, 0)));
- if (GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT)
+ if (CONST_INT_P (XEXP (XEXP (x, 1), 1)))
asm_fprintf (stream, "#%wd",
INTVAL (XEXP (XEXP (x, 1), 1)));
else
@@ -17561,14 +17559,14 @@ arm_print_operand_address (FILE *stream, rtx x)
}
else
{
- if (GET_CODE (x) == REG)
+ if (REG_P (x))
asm_fprintf (stream, "[%r]", REGNO (x));
else if (GET_CODE (x) == POST_INC)
asm_fprintf (stream, "%r!", REGNO (XEXP (x, 0)));
else if (GET_CODE (x) == PLUS)
{
- gcc_assert (GET_CODE (XEXP (x, 0)) == REG);
- if (GET_CODE (XEXP (x, 1)) == CONST_INT)
+ gcc_assert (REG_P (XEXP (x, 0)));
+ if (CONST_INT_P (XEXP (x, 1)))
asm_fprintf (stream, "[%r, #%wd]",
REGNO (XEXP (x, 0)),
INTVAL (XEXP (x, 1)));
@@ -17931,7 +17929,7 @@ thumb2_final_prescan_insn (rtx insn)
return;
/* Conditional jumps are implemented directly. */
- if (GET_CODE (insn) == JUMP_INSN)
+ if (JUMP_P (insn))
return;
predicate = COND_EXEC_TEST (body);
@@ -17948,7 +17946,7 @@ thumb2_final_prescan_insn (rtx insn)
/* Jumping into the middle of an IT block is illegal, so a label or
barrier terminates the block. */
- if (GET_CODE (insn) != INSN && GET_CODE(insn) != JUMP_INSN)
+ if (!NONJUMP_INSN_P (insn) && !JUMP_P (insn))
break;
body = PATTERN (insn);
@@ -17977,7 +17975,7 @@ thumb2_final_prescan_insn (rtx insn)
arm_condexec_masklen += n;
/* A jump must be the last instruction in a conditional block. */
- if (GET_CODE(insn) == JUMP_INSN)
+ if (JUMP_P (insn))
break;
}
/* Restore recog_data (getting the attributes of other insns can
@@ -18025,12 +18023,12 @@ arm_final_prescan_insn (rtx insn)
if (simplejump_p (insn))
{
start_insn = next_nonnote_insn (start_insn);
- if (GET_CODE (start_insn) == BARRIER)
+ if (BARRIER_P (start_insn))
{
/* XXX Isn't this always a barrier? */
start_insn = next_nonnote_insn (start_insn);
}
- if (GET_CODE (start_insn) == CODE_LABEL
+ if (LABEL_P (start_insn)
&& CODE_LABEL_NUMBER (start_insn) == arm_target_label
&& LABEL_NUSES (start_insn) == 1)
reverse = TRUE;
@@ -18040,9 +18038,9 @@ arm_final_prescan_insn (rtx insn)
else if (ANY_RETURN_P (body))
{
start_insn = next_nonnote_insn (start_insn);
- if (GET_CODE (start_insn) == BARRIER)
+ if (BARRIER_P (start_insn))
start_insn = next_nonnote_insn (start_insn);
- if (GET_CODE (start_insn) == CODE_LABEL
+ if (LABEL_P (start_insn)
&& CODE_LABEL_NUMBER (start_insn) == arm_target_label
&& LABEL_NUSES (start_insn) == 1)
{
@@ -18058,7 +18056,7 @@ arm_final_prescan_insn (rtx insn)
}
gcc_assert (!arm_ccfsm_state || reverse);
- if (GET_CODE (insn) != JUMP_INSN)
+ if (!JUMP_P (insn))
return;
/* This jump might be paralleled with a clobber of the condition codes
@@ -18159,7 +18157,7 @@ arm_final_prescan_insn (rtx insn)
if the following two insns are a barrier and the
target label. */
this_insn = next_nonnote_insn (this_insn);
- if (this_insn && GET_CODE (this_insn) == BARRIER)
+ if (this_insn && BARRIER_P (this_insn))
this_insn = next_nonnote_insn (this_insn);
if (this_insn && this_insn == label
@@ -18245,8 +18243,8 @@ arm_final_prescan_insn (rtx insn)
{
this_insn = next_nonnote_insn (this_insn);
gcc_assert (!this_insn
- || (GET_CODE (this_insn) != BARRIER
- && GET_CODE (this_insn) != CODE_LABEL));
+ || (!BARRIER_P (this_insn)
+ && !LABEL_P (this_insn)));
}
if (!this_insn)
{
@@ -18445,7 +18443,7 @@ arm_debugger_arg_offset (int value, rtx addr)
return 0;
/* We can only cope with the case where the address is held in a register. */
- if (GET_CODE (addr) != REG)
+ if (!REG_P (addr))
return 0;
/* If we are using the frame pointer to point at the argument, then
@@ -18492,13 +18490,13 @@ arm_debugger_arg_offset (int value, rtx addr)
for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
{
- if ( GET_CODE (insn) == INSN
+ if ( NONJUMP_INSN_P (insn)
&& GET_CODE (PATTERN (insn)) == SET
&& REGNO (XEXP (PATTERN (insn), 0)) == REGNO (addr)
&& GET_CODE (XEXP (PATTERN (insn), 1)) == PLUS
- && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 0)) == REG
+ && REG_P (XEXP (XEXP (PATTERN (insn), 1), 0))
&& REGNO (XEXP (XEXP (PATTERN (insn), 1), 0)) == (unsigned) HARD_FRAME_POINTER_REGNUM
- && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 1)) == CONST_INT
+ && CONST_INT_P (XEXP (XEXP (PATTERN (insn), 1), 1))
)
{
value = INTVAL (XEXP (XEXP (PATTERN (insn), 1), 1));
@@ -20873,7 +20871,7 @@ arm_expand_builtin (tree exp,
op0 = expand_normal (arg0);
op1 = expand_normal (arg1);
op2 = expand_normal (arg2);
- if (GET_CODE (op2) == CONST_INT)
+ if (CONST_INT_P (op2))
{
icode = CODE_FOR_iwmmxt_waligni;
tmode = insn_data[icode].operand[0].mode;
@@ -21815,7 +21813,7 @@ thumb_far_jump_used_p (void)
insn with the far jump attribute set. */
for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
{
- if (GET_CODE (insn) == JUMP_INSN
+ if (JUMP_P (insn)
/* Ignore tablejump patterns. */
&& GET_CODE (PATTERN (insn)) != ADDR_VEC
&& GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC
@@ -22983,7 +22981,7 @@ thumb1_output_interwork (void)
const char * name;
FILE *f = asm_out_file;
- gcc_assert (GET_CODE (DECL_RTL (current_function_decl)) == MEM);
+ gcc_assert (MEM_P (DECL_RTL (current_function_decl)));
gcc_assert (GET_CODE (XEXP (DECL_RTL (current_function_decl), 0))
== SYMBOL_REF);
name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
@@ -23027,8 +23025,8 @@ thumb_load_double_from_address (rtx *operands)
rtx arg1;
rtx arg2;
- gcc_assert (GET_CODE (operands[0]) == REG);
- gcc_assert (GET_CODE (operands[1]) == MEM);
+ gcc_assert (REG_P (operands[0]));
+ gcc_assert (MEM_P (operands[1]));
/* Get the memory address. */
addr = XEXP (operands[1], 0);
@@ -23068,10 +23066,10 @@ thumb_load_double_from_address (rtx *operands)
else
base = arg1, offset = arg2;
- gcc_assert (GET_CODE (base) == REG);
+ gcc_assert (REG_P (base));
/* Catch the case of <address> = <reg> + <reg> */
- if (GET_CODE (offset) == REG)
+ if (REG_P (offset))
{
int reg_offset = REGNO (offset);
int reg_base = REGNO (base);
@@ -23671,10 +23669,10 @@ arm_output_load_gr (rtx *operands)
rtx wcgr;
rtx sum;
- if (GET_CODE (operands [1]) != MEM
+ if (!MEM_P (operands [1])
|| GET_CODE (sum = XEXP (operands [1], 0)) != PLUS
- || GET_CODE (reg = XEXP (sum, 0)) != REG
- || GET_CODE (offset = XEXP (sum, 1)) != CONST_INT
+ || !REG_P (reg = XEXP (sum, 0))
+ || !CONST_INT_P (offset = XEXP (sum, 1))
|| ((INTVAL (offset) < 1024) && (INTVAL (offset) > -1024)))
return "wldrw%?\t%0, %1";
@@ -23809,7 +23807,7 @@ arm_no_early_alu_shift_dep (rtx producer, rtx consumer)
/* This is either an actual independent shift, or a shift applied to
the first operand of another operation. We want the whole shift
operation. */
- if (GET_CODE (early_op) == REG)
+ if (REG_P (early_op))
early_op = op;
return !reg_overlap_mentioned_p (value, early_op);
@@ -23842,7 +23840,7 @@ arm_no_early_alu_shift_value_dep (rtx producer, rtx consumer)
/* This is either an actual independent shift, or a shift applied to
the first operand of another operation. We want the value being
shifted, in either case. */
- if (GET_CODE (early_op) != REG)
+ if (!REG_P (early_op))
early_op = XEXP (early_op, 0);
return !reg_overlap_mentioned_p (value, early_op);
@@ -24342,7 +24340,7 @@ arm_unwind_emit_sequence (FILE * asm_out_file, rtx p)
/* First insn will adjust the stack pointer. */
if (GET_CODE (e) != SET
- || GET_CODE (XEXP (e, 0)) != REG
+ || !REG_P (XEXP (e, 0))
|| REGNO (XEXP (e, 0)) != SP_REGNUM
|| GET_CODE (XEXP (e, 1)) != PLUS)
abort ();
@@ -24386,8 +24384,8 @@ arm_unwind_emit_sequence (FILE * asm_out_file, rtx p)
Where <addr> is (reg:SP) or (plus (reg:SP) (const_int)). */
e = XVECEXP (p, 0, i);
if (GET_CODE (e) != SET
- || GET_CODE (XEXP (e, 0)) != MEM
- || GET_CODE (XEXP (e, 1)) != REG)
+ || !MEM_P (XEXP (e, 0))
+ || !REG_P (XEXP (e, 1)))
abort ();
reg = REGNO (XEXP (e, 1));
@@ -24409,14 +24407,14 @@ arm_unwind_emit_sequence (FILE * asm_out_file, rtx p)
if (GET_CODE (e) == PLUS)
{
offset += reg_size;
- if (GET_CODE (XEXP (e, 0)) != REG
+ if (!REG_P (XEXP (e, 0))
|| REGNO (XEXP (e, 0)) != SP_REGNUM
- || GET_CODE (XEXP (e, 1)) != CONST_INT
+ || !CONST_INT_P (XEXP (e, 1))
|| offset != INTVAL (XEXP (e, 1)))
abort ();
}
else if (i != 1
- || GET_CODE (e) != REG
+ || !REG_P (e)
|| REGNO (e) != SP_REGNUM)
abort ();
#endif
@@ -24440,7 +24438,7 @@ arm_unwind_emit_set (FILE * asm_out_file, rtx p)
case MEM:
/* Pushing a single register. */
if (GET_CODE (XEXP (e0, 0)) != PRE_DEC
- || GET_CODE (XEXP (XEXP (e0, 0), 0)) != REG
+ || !REG_P (XEXP (XEXP (e0, 0), 0))
|| REGNO (XEXP (XEXP (e0, 0), 0)) != SP_REGNUM)
abort ();
@@ -24457,9 +24455,9 @@ arm_unwind_emit_set (FILE * asm_out_file, rtx p)
{
/* A stack increment. */
if (GET_CODE (e1) != PLUS
- || GET_CODE (XEXP (e1, 0)) != REG
+ || !REG_P (XEXP (e1, 0))
|| REGNO (XEXP (e1, 0)) != SP_REGNUM
- || GET_CODE (XEXP (e1, 1)) != CONST_INT)
+ || !CONST_INT_P (XEXP (e1, 1)))
abort ();
asm_fprintf (asm_out_file, "\t.pad #%wd\n",
@@ -24471,8 +24469,8 @@ arm_unwind_emit_set (FILE * asm_out_file, rtx p)
if (GET_CODE (e1) == PLUS)
{
- if (GET_CODE (XEXP (e1, 0)) != REG
- || GET_CODE (XEXP (e1, 1)) != CONST_INT)
+ if (!REG_P (XEXP (e1, 0))
+ || !CONST_INT_P (XEXP (e1, 1)))
abort ();
reg = REGNO (XEXP (e1, 0));
offset = INTVAL (XEXP (e1, 1));
@@ -24480,7 +24478,7 @@ arm_unwind_emit_set (FILE * asm_out_file, rtx p)
HARD_FRAME_POINTER_REGNUM, reg,
offset);
}
- else if (GET_CODE (e1) == REG)
+ else if (REG_P (e1))
{
reg = REGNO (e1);
asm_fprintf (asm_out_file, "\t.setfp %r, %r\n",
@@ -24489,15 +24487,15 @@ arm_unwind_emit_set (FILE * asm_out_file, rtx p)
else
abort ();
}
- else if (GET_CODE (e1) == REG && REGNO (e1) == SP_REGNUM)
+ else if (REG_P (e1) && REGNO (e1) == SP_REGNUM)
{
/* Move from sp to reg. */
asm_fprintf (asm_out_file, "\t.movsp %r\n", REGNO (e0));
}
else if (GET_CODE (e1) == PLUS
- && GET_CODE (XEXP (e1, 0)) == REG
+ && REG_P (XEXP (e1, 0))
&& REGNO (XEXP (e1, 0)) == SP_REGNUM
- && GET_CODE (XEXP (e1, 1)) == CONST_INT)
+ && CONST_INT_P (XEXP (e1, 1)))
{
/* Set reg to offset from sp. */
asm_fprintf (asm_out_file, "\t.movsp %r, #%d\n",
@@ -24607,7 +24605,7 @@ arm_output_ttype (rtx x)
fputs ("\t.word\t", asm_out_file);
output_addr_const (asm_out_file, x);
/* Use special relocations for symbol references. */
- if (GET_CODE (x) != CONST_INT)
+ if (!CONST_INT_P (x))
fputs ("(TARGET2)", asm_out_file);
fputc ('\n', asm_out_file);
@@ -25294,7 +25292,7 @@ vfp3_const_double_for_fract_bits (rtx operand)
{
REAL_VALUE_TYPE r0;
- if (GET_CODE (operand) != CONST_DOUBLE)
+ if (!CONST_DOUBLE_P (operand))
return 0;
REAL_VALUE_FROM_CONST_DOUBLE (r0, operand);
@@ -26182,7 +26180,7 @@ arm_vectorize_vec_perm_const_ok (enum machine_mode vmode,
bool
arm_autoinc_modes_ok_p (enum machine_mode mode, enum arm_auto_incmodes code)
{
- /* If we are soft float and we do not have ldrd
+ /* If we are soft float and we do not have ldrd
then all auto increment forms are ok. */
if (TARGET_SOFT_FLOAT && (TARGET_LDRD || GET_MODE_SIZE (mode) <= 4))
return true;
@@ -26197,7 +26195,7 @@ arm_autoinc_modes_ok_p (enum machine_mode mode, enum arm_auto_incmodes code)
{
if (code != ARM_PRE_DEC)
return true;
- else
+ else
return false;
}
@@ -26205,7 +26203,7 @@ arm_autoinc_modes_ok_p (enum machine_mode mode, enum arm_auto_incmodes code)
case ARM_POST_DEC:
case ARM_PRE_INC:
- /* Without LDRD and mode size greater than
+ /* Without LDRD and mode size greater than
word size, there is no point in auto-incrementing
because ldm and stm will not have these forms. */
if (!TARGET_LDRD && GET_MODE_SIZE (mode) > 4)
@@ -26478,7 +26476,7 @@ arm_emit_coreregs_64bit_shift (enum rtx_code code, rtx out, rtx in,
}
-/* Returns true if a valid comparison operation and makes
+/* Returns true if a valid comparison operation and makes
the operands in a form that is valid. */
bool
arm_validize_comparison (rtx *comparison, rtx * op1, rtx * op2)
@@ -26511,7 +26509,7 @@ arm_validize_comparison (rtx *comparison, rtx * op1, rtx * op2)
if (!cmpdi_operand (*op2, mode))
*op2 = force_reg (mode, *op2);
return true;
-
+
case SFmode:
case DFmode:
if (!arm_float_compare_operand (*op1, mode))
@@ -26522,7 +26520,7 @@ arm_validize_comparison (rtx *comparison, rtx * op1, rtx * op2)
default:
break;
}
-
+
return false;
}
diff --git a/gcc/config/arm/arm.h b/gcc/config/arm/arm.h
index 8acde0e6267..5f34f2a8100 100644
--- a/gcc/config/arm/arm.h
+++ b/gcc/config/arm/arm.h
@@ -1262,8 +1262,8 @@ enum reg_class
&& CONSTANT_P (X)) \
? GENERAL_REGS : \
(((MODE) == HImode && ! arm_arch4 \
- && (GET_CODE (X) == MEM \
- || ((GET_CODE (X) == REG || GET_CODE (X) == SUBREG) \
+ && (MEM_P (X) \
+ || ((REG_P (X) || GET_CODE (X) == SUBREG) \
&& true_regnum (X) == -1))) \
? GENERAL_REGS : NO_REGS) \
: THUMB_SECONDARY_INPUT_RELOAD_CLASS (CLASS, MODE, X)))
@@ -1909,10 +1909,10 @@ enum arm_auto_incmodes
REG_OK_FOR_INDEX_P (X)
#define ARM_BASE_REGISTER_RTX_P(X) \
- (GET_CODE (X) == REG && ARM_REG_OK_FOR_BASE_P (X))
+ (REG_P (X) && ARM_REG_OK_FOR_BASE_P (X))
#define ARM_INDEX_REGISTER_RTX_P(X) \
- (GET_CODE (X) == REG && ARM_REG_OK_FOR_INDEX_P (X))
+ (REG_P (X) && ARM_REG_OK_FOR_INDEX_P (X))
/* Specify the machine mode that this machine uses
for the index in the tablejump instruction. */
diff --git a/gcc/config/arm/arm.md b/gcc/config/arm/arm.md
index 6a642bfc01e..a60e659bf68 100644
--- a/gcc/config/arm/arm.md
+++ b/gcc/config/arm/arm.md
@@ -607,9 +607,9 @@
"
if (TARGET_THUMB1)
{
- if (GET_CODE (operands[1]) != REG)
+ if (!REG_P (operands[1]))
operands[1] = force_reg (DImode, operands[1]);
- if (GET_CODE (operands[2]) != REG)
+ if (!REG_P (operands[2]))
operands[2] = force_reg (DImode, operands[2]);
}
"
@@ -716,7 +716,7 @@
(match_operand:SI 2 "reg_or_int_operand" "")))]
"TARGET_EITHER"
"
- if (TARGET_32BIT && GET_CODE (operands[2]) == CONST_INT)
+ if (TARGET_32BIT && CONST_INT_P (operands[2]))
{
arm_split_constant (PLUS, SImode, NULL_RTX,
INTVAL (operands[2]), operands[0], operands[1],
@@ -764,7 +764,7 @@
subw%?\\t%0, %1, #%n2
#"
"TARGET_32BIT
- && GET_CODE (operands[2]) == CONST_INT
+ && CONST_INT_P (operands[2])
&& !const_ok_for_op (INTVAL (operands[2]), PLUS)
&& (reload_completed || !arm_eliminable_register (operands[1]))"
[(clobber (const_int 0))]
@@ -799,7 +799,7 @@
\"#\"
};
if ((which_alternative == 2 || which_alternative == 6)
- && GET_CODE (operands[2]) == CONST_INT
+ && CONST_INT_P (operands[2])
&& INTVAL (operands[2]) < 0)
return \"sub\\t%0, %1, #%n2\";
return asms[which_alternative];
@@ -1111,9 +1111,9 @@
"
if (TARGET_THUMB1)
{
- if (GET_CODE (operands[1]) != REG)
+ if (!REG_P (operands[1]))
operands[1] = force_reg (DImode, operands[1]);
- if (GET_CODE (operands[2]) != REG)
+ if (!REG_P (operands[2]))
operands[2] = force_reg (DImode, operands[2]);
}
"
@@ -1207,7 +1207,7 @@
(match_operand:SI 2 "s_register_operand" "")))]
"TARGET_EITHER"
"
- if (GET_CODE (operands[1]) == CONST_INT)
+ if (CONST_INT_P (operands[1]))
{
if (TARGET_32BIT)
{
@@ -1242,7 +1242,7 @@
sub%?\\t%0, %1, %2
sub%?\\t%0, %1, %2
#"
- "&& (GET_CODE (operands[1]) == CONST_INT
+ "&& (CONST_INT_P (operands[1])
&& !const_ok_for_arm (INTVAL (operands[1])))"
[(clobber (const_int 0))]
"
@@ -2097,7 +2097,7 @@
"
if (TARGET_32BIT)
{
- if (GET_CODE (operands[2]) == CONST_INT)
+ if (CONST_INT_P (operands[2]))
{
if (INTVAL (operands[2]) == 255 && arm_arch6)
{
@@ -2116,7 +2116,7 @@
}
else /* TARGET_THUMB1 */
{
- if (GET_CODE (operands[2]) != CONST_INT)
+ if (!CONST_INT_P (operands[2]))
{
rtx tmp = force_reg (SImode, operands[2]);
if (rtx_equal_p (operands[0], operands[1]))
@@ -2179,7 +2179,7 @@
bic%?\\t%0, %1, #%B2
#"
"TARGET_32BIT
- && GET_CODE (operands[2]) == CONST_INT
+ && CONST_INT_P (operands[2])
&& !(const_ok_for_arm (INTVAL (operands[2]))
|| const_ok_for_arm (~INTVAL (operands[2])))"
[(clobber (const_int 0))]
@@ -2512,7 +2512,7 @@
{
bool use_bfi = TRUE;
- if (GET_CODE (operands[3]) == CONST_INT)
+ if (CONST_INT_P (operands[3]))
{
HOST_WIDE_INT val = INTVAL (operands[3]) & mask;
@@ -2530,7 +2530,7 @@
if (use_bfi)
{
- if (GET_CODE (operands[3]) != REG)
+ if (!REG_P (operands[3]))
operands[3] = force_reg (SImode, operands[3]);
emit_insn (gen_insv_t2 (operands[0], operands[1], operands[2],
@@ -2558,7 +2558,7 @@
else
subtarget = target;
- if (GET_CODE (operands[3]) == CONST_INT)
+ if (CONST_INT_P (operands[3]))
{
/* Since we are inserting a known constant, we may be able to
reduce the number of bits that we have to clear so that
@@ -2625,7 +2625,7 @@
/* Mask out any bits in operand[3] that are not needed. */
emit_insn (gen_andsi3 (op1, operands[3], op0));
- if (GET_CODE (op0) == CONST_INT
+ if (CONST_INT_P (op0)
&& (const_ok_for_arm (mask << start_bit)
|| const_ok_for_arm (~(mask << start_bit))))
{
@@ -2634,7 +2634,7 @@
}
else
{
- if (GET_CODE (op0) == CONST_INT)
+ if (CONST_INT_P (op0))
{
rtx tmp = gen_reg_rtx (SImode);
@@ -2871,7 +2871,7 @@
(match_operand:SI 2 "reg_or_int_operand" "")))]
"TARGET_EITHER"
"
- if (GET_CODE (operands[2]) == CONST_INT)
+ if (CONST_INT_P (operands[2]))
{
if (TARGET_32BIT)
{
@@ -2905,7 +2905,7 @@
orn%?\\t%0, %1, #%B2
#"
"TARGET_32BIT
- && GET_CODE (operands[2]) == CONST_INT
+ && CONST_INT_P (operands[2])
&& !(const_ok_for_arm (INTVAL (operands[2]))
|| (TARGET_THUMB2 && const_ok_for_arm (~INTVAL (operands[2]))))"
[(clobber (const_int 0))]
@@ -3010,7 +3010,7 @@
(xor:SI (match_operand:SI 1 "s_register_operand" "")
(match_operand:SI 2 "reg_or_int_operand" "")))]
"TARGET_EITHER"
- "if (GET_CODE (operands[2]) == CONST_INT)
+ "if (CONST_INT_P (operands[2]))
{
if (TARGET_32BIT)
{
@@ -3042,7 +3042,7 @@
eor%?\\t%0, %1, %2
#"
"TARGET_32BIT
- && GET_CODE (operands[2]) == CONST_INT
+ && CONST_INT_P (operands[2])
&& !const_ok_for_arm (INTVAL (operands[2]))"
[(clobber (const_int 0))]
{
@@ -3542,7 +3542,7 @@
(match_operand:SI 2 "arm_rhs_operand" "")))]
"TARGET_EITHER"
"
- if (GET_CODE (operands[2]) == CONST_INT
+ if (CONST_INT_P (operands[2])
&& ((unsigned HOST_WIDE_INT) INTVAL (operands[2])) > 31)
{
emit_insn (gen_movsi (operands[0], const0_rtx));
@@ -3617,7 +3617,7 @@
(match_operand:SI 2 "arm_rhs_operand" "")))]
"TARGET_EITHER"
"
- if (GET_CODE (operands[2]) == CONST_INT
+ if (CONST_INT_P (operands[2])
&& ((unsigned HOST_WIDE_INT) INTVAL (operands[2])) > 31)
operands[2] = GEN_INT (31);
"
@@ -3689,7 +3689,7 @@
(match_operand:SI 2 "arm_rhs_operand" "")))]
"TARGET_EITHER"
"
- if (GET_CODE (operands[2]) == CONST_INT
+ if (CONST_INT_P (operands[2])
&& ((unsigned HOST_WIDE_INT) INTVAL (operands[2])) > 31)
{
emit_insn (gen_movsi (operands[0], const0_rtx));
@@ -3713,7 +3713,7 @@
(match_operand:SI 2 "reg_or_int_operand" "")))]
"TARGET_32BIT"
"
- if (GET_CODE (operands[2]) == CONST_INT)
+ if (CONST_INT_P (operands[2]))
operands[2] = GEN_INT ((32 - INTVAL (operands[2])) % 32);
else
{
@@ -3732,13 +3732,13 @@
"
if (TARGET_32BIT)
{
- if (GET_CODE (operands[2]) == CONST_INT
+ if (CONST_INT_P (operands[2])
&& ((unsigned HOST_WIDE_INT) INTVAL (operands[2])) > 31)
operands[2] = GEN_INT (INTVAL (operands[2]) % 32);
}
else /* TARGET_THUMB1 */
{
- if (GET_CODE (operands [2]) == CONST_INT)
+ if (CONST_INT_P (operands [2]))
operands [2] = force_reg (SImode, operands[2]);
}
"
@@ -4595,7 +4595,7 @@
rtx a = XEXP (mem, 0);
/* This can happen due to bugs in reload. */
- if (GET_CODE (a) == REG && REGNO (a) == SP_REGNUM)
+ if (REG_P (a) && REGNO (a) == SP_REGNUM)
{
rtx ops[2];
ops[0] = operands[0];
@@ -4653,7 +4653,7 @@
(zero_extend:SI (match_operand:QI 1 "nonimmediate_operand" "")))]
"TARGET_EITHER"
{
- if (TARGET_ARM && !arm_arch6 && GET_CODE (operands[1]) != MEM)
+ if (TARGET_ARM && !arm_arch6 && !MEM_P (operands[1]))
{
emit_insn (gen_andsi3 (operands[0],
gen_lowpart (SImode, operands[1]),
@@ -4746,7 +4746,7 @@
[(set (match_operand:SI 0 "s_register_operand" "")
(zero_extend:SI (subreg:QI (match_operand:SI 1 "" "") 0)))
(clobber (match_operand:SI 2 "s_register_operand" ""))]
- "TARGET_32BIT && (GET_CODE (operands[1]) != MEM) && ! BYTES_BIG_ENDIAN"
+ "TARGET_32BIT && (!MEM_P (operands[1])) && ! BYTES_BIG_ENDIAN"
[(set (match_dup 2) (match_dup 1))
(set (match_dup 0) (and:SI (match_dup 2) (const_int 255)))]
""
@@ -4756,7 +4756,7 @@
[(set (match_operand:SI 0 "s_register_operand" "")
(zero_extend:SI (subreg:QI (match_operand:SI 1 "" "") 3)))
(clobber (match_operand:SI 2 "s_register_operand" ""))]
- "TARGET_32BIT && (GET_CODE (operands[1]) != MEM) && BYTES_BIG_ENDIAN"
+ "TARGET_32BIT && (!MEM_P (operands[1])) && BYTES_BIG_ENDIAN"
[(set (match_dup 2) (match_dup 1))
(set (match_dup 0) (and:SI (match_dup 2) (const_int 255)))]
""
@@ -4872,10 +4872,10 @@
rtx b = XEXP (mem, 1);
if (GET_CODE (a) == LABEL_REF
- && GET_CODE (b) == CONST_INT)
+ && CONST_INT_P (b))
return \"ldr\\t%0, %1\";
- if (GET_CODE (b) == REG)
+ if (REG_P (b))
return \"ldrsh\\t%0, %1\";
ops[1] = a;
@@ -4887,7 +4887,7 @@
ops[2] = const0_rtx;
}
- gcc_assert (GET_CODE (ops[1]) == REG);
+ gcc_assert (REG_P (ops[1]));
ops[0] = operands[0];
if (reg_mentioned_p (operands[2], ops[1]))
@@ -4998,7 +4998,7 @@
"TARGET_ARM"
"
{
- if (arm_arch4 && GET_CODE (operands[1]) == MEM)
+ if (arm_arch4 && MEM_P (operands[1]))
{
emit_insn (gen_rtx_SET (VOIDmode,
operands[0],
@@ -5235,11 +5235,11 @@
;;{
;; rtx insn;
;;
-;; if (GET_CODE (operands[0]) == MEM && GET_CODE (operands[1]) == MEM)
+;; if (MEM_P (operands[0]) && MEM_P (operands[1]))
;; operands[1] = copy_to_reg (operands[1]);
-;; if (GET_CODE (operands[0]) == MEM)
+;; if (MEM_P (operands[0]))
;; insn = gen_storeti (XEXP (operands[0], 0), operands[1]);
-;; else if (GET_CODE (operands[1]) == MEM)
+;; else if (MEM_P (operands[1]))
;; insn = gen_loadti (operands[0], XEXP (operands[1], 0));
;; else
;; FAIL;
@@ -5280,7 +5280,7 @@
"
if (can_create_pseudo_p ())
{
- if (GET_CODE (operands[0]) != REG)
+ if (!REG_P (operands[0]))
operands[1] = force_reg (DImode, operands[1]);
}
"
@@ -5462,10 +5462,10 @@
if (TARGET_32BIT)
{
/* Everything except mem = const or mem = mem can be done easily. */
- if (GET_CODE (operands[0]) == MEM)
+ if (MEM_P (operands[0]))
operands[1] = force_reg (SImode, operands[1]);
if (arm_general_register_operand (operands[0], SImode)
- && GET_CODE (operands[1]) == CONST_INT
+ && CONST_INT_P (operands[1])
&& !(const_ok_for_arm (INTVAL (operands[1]))
|| const_ok_for_arm (~INTVAL (operands[1]))))
{
@@ -5479,7 +5479,7 @@
{
if (can_create_pseudo_p ())
{
- if (GET_CODE (operands[0]) != REG)
+ if (!REG_P (operands[0]))
operands[1] = force_reg (SImode, operands[1]);
}
}
@@ -5886,7 +5886,7 @@
rtx addr = XEXP (op1, 0);
enum rtx_code code = GET_CODE (addr);
- if ((code == PLUS && GET_CODE (XEXP (addr, 1)) != CONST_INT)
+ if ((code == PLUS && !CONST_INT_P (XEXP (addr, 1)))
|| code == MINUS)
op1 = replace_equiv_address (operands[1], force_reg (SImode, addr));
@@ -5911,7 +5911,7 @@
rtx addr = XEXP (op1, 0);
enum rtx_code code = GET_CODE (addr);
- if ((code == PLUS && GET_CODE (XEXP (addr, 1)) != CONST_INT)
+ if ((code == PLUS && !CONST_INT_P (XEXP (addr, 1)))
|| code == MINUS)
op1 = replace_equiv_address (op1, force_reg (SImode, addr));
@@ -5937,7 +5937,7 @@
rtx op0 = operands[0];
enum rtx_code code = GET_CODE (addr);
- if ((code == PLUS && GET_CODE (XEXP (addr, 1)) != CONST_INT)
+ if ((code == PLUS && !CONST_INT_P (XEXP (addr, 1)))
|| code == MINUS)
op0 = replace_equiv_address (op0, force_reg (SImode, addr));
@@ -5991,18 +5991,18 @@
{
if (can_create_pseudo_p ())
{
- if (GET_CODE (operands[0]) == MEM)
+ if (MEM_P (operands[0]))
{
if (arm_arch4)
{
emit_insn (gen_storehi_single_op (operands[0], operands[1]));
DONE;
}
- if (GET_CODE (operands[1]) == CONST_INT)
+ if (CONST_INT_P (operands[1]))
emit_insn (gen_storeinthi (operands[0], operands[1]));
else
{
- if (GET_CODE (operands[1]) == MEM)
+ if (MEM_P (operands[1]))
operands[1] = force_reg (HImode, operands[1]);
if (BYTES_BIG_ENDIAN)
emit_insn (gen_storehi_bigend (operands[1], operands[0]));
@@ -6012,7 +6012,7 @@
DONE;
}
/* Sign extend a constant, and keep it in an SImode reg. */
- else if (GET_CODE (operands[1]) == CONST_INT)
+ else if (CONST_INT_P (operands[1]))
{
rtx reg = gen_reg_rtx (SImode);
HOST_WIDE_INT val = INTVAL (operands[1]) & 0xffff;
@@ -6034,7 +6034,7 @@
operands[1] = gen_lowpart (HImode, reg);
}
else if (arm_arch4 && optimize && can_create_pseudo_p ()
- && GET_CODE (operands[1]) == MEM)
+ && MEM_P (operands[1]))
{
rtx reg = gen_reg_rtx (SImode);
@@ -6043,18 +6043,17 @@
}
else if (!arm_arch4)
{
- if (GET_CODE (operands[1]) == MEM)
+ if (MEM_P (operands[1]))
{
rtx base;
rtx offset = const0_rtx;
rtx reg = gen_reg_rtx (SImode);
- if ((GET_CODE (base = XEXP (operands[1], 0)) == REG
+ if ((REG_P (base = XEXP (operands[1], 0))
|| (GET_CODE (base) == PLUS
- && (GET_CODE (offset = XEXP (base, 1))
- == CONST_INT)
+ && (CONST_INT_P (offset = XEXP (base, 1)))
&& ((INTVAL(offset) & 1) != 1)
- && GET_CODE (base = XEXP (base, 0)) == REG))
+ && REG_P (base = XEXP (base, 0))))
&& REGNO_POINTER_ALIGN (REGNO (base)) >= 32)
{
rtx new_rtx;
@@ -6080,13 +6079,13 @@
}
}
/* Handle loading a large integer during reload. */
- else if (GET_CODE (operands[1]) == CONST_INT
+ else if (CONST_INT_P (operands[1])
&& !const_ok_for_arm (INTVAL (operands[1]))
&& !const_ok_for_arm (~INTVAL (operands[1])))
{
/* Writing a constant to memory needs a scratch, which should
be handled with SECONDARY_RELOADs. */
- gcc_assert (GET_CODE (operands[0]) == REG);
+ gcc_assert (REG_P (operands[0]));
operands[0] = gen_rtx_SUBREG (SImode, operands[0], 0);
emit_insn (gen_movsi (operands[0], operands[1]));
@@ -6098,10 +6097,10 @@
/* Thumb-2 can do everything except mem=mem and mem=const easily. */
if (can_create_pseudo_p ())
{
- if (GET_CODE (operands[0]) != REG)
+ if (!REG_P (operands[0]))
operands[1] = force_reg (HImode, operands[1]);
/* Zero extend a constant, and keep it in an SImode reg. */
- else if (GET_CODE (operands[1]) == CONST_INT)
+ else if (CONST_INT_P (operands[1]))
{
rtx reg = gen_reg_rtx (SImode);
HOST_WIDE_INT val = INTVAL (operands[1]) & 0xffff;
@@ -6115,7 +6114,7 @@
{
if (can_create_pseudo_p ())
{
- if (GET_CODE (operands[1]) == CONST_INT)
+ if (CONST_INT_P (operands[1]))
{
rtx reg = gen_reg_rtx (SImode);
@@ -6131,21 +6130,21 @@
fixup_stack_1, by checking for other kinds of invalid addresses,
e.g. a bare reference to a virtual register. This may confuse the
alpha though, which must handle this case differently. */
- if (GET_CODE (operands[0]) == MEM
+ if (MEM_P (operands[0])
&& !memory_address_p (GET_MODE (operands[0]),
XEXP (operands[0], 0)))
operands[0]
= replace_equiv_address (operands[0],
copy_to_reg (XEXP (operands[0], 0)));
- if (GET_CODE (operands[1]) == MEM
+ if (MEM_P (operands[1])
&& !memory_address_p (GET_MODE (operands[1]),
XEXP (operands[1], 0)))
operands[1]
= replace_equiv_address (operands[1],
copy_to_reg (XEXP (operands[1], 0)));
- if (GET_CODE (operands[1]) == MEM && optimize > 0)
+ if (MEM_P (operands[1]) && optimize > 0)
{
rtx reg = gen_reg_rtx (SImode);
@@ -6153,17 +6152,17 @@
operands[1] = gen_lowpart (HImode, reg);
}
- if (GET_CODE (operands[0]) == MEM)
+ if (MEM_P (operands[0]))
operands[1] = force_reg (HImode, operands[1]);
}
- else if (GET_CODE (operands[1]) == CONST_INT
+ else if (CONST_INT_P (operands[1])
&& !satisfies_constraint_I (operands[1]))
{
/* Handle loading a large integer during reload. */
/* Writing a constant to memory needs a scratch, which should
be handled with SECONDARY_RELOADs. */
- gcc_assert (GET_CODE (operands[0]) == REG);
+ gcc_assert (REG_P (operands[0]));
operands[0] = gen_rtx_SUBREG (SImode, operands[0], 0);
emit_insn (gen_movsi (operands[0], operands[1]));
@@ -6192,7 +6191,7 @@
/* The stack pointer can end up being taken as an index register.
Catch this case here and deal with it. */
if (GET_CODE (XEXP (operands[1], 0)) == PLUS
- && GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == REG
+ && REG_P (XEXP (XEXP (operands[1], 0), 0))
&& REGNO (XEXP (XEXP (operands[1], 0), 0)) == SP_REGNUM)
{
rtx ops[2];
@@ -6347,7 +6346,7 @@
if (can_create_pseudo_p ())
{
- if (GET_CODE (operands[1]) == CONST_INT)
+ if (CONST_INT_P (operands[1]))
{
rtx reg = gen_reg_rtx (SImode);
@@ -6370,13 +6369,13 @@
fixup_stack_1, by checking for other kinds of invalid addresses,
e.g. a bare reference to a virtual register. This may confuse the
alpha though, which must handle this case differently. */
- if (GET_CODE (operands[0]) == MEM
+ if (MEM_P (operands[0])
&& !memory_address_p (GET_MODE (operands[0]),
XEXP (operands[0], 0)))
operands[0]
= replace_equiv_address (operands[0],
copy_to_reg (XEXP (operands[0], 0)));
- if (GET_CODE (operands[1]) == MEM
+ if (MEM_P (operands[1])
&& !memory_address_p (GET_MODE (operands[1]),
XEXP (operands[1], 0)))
operands[1]
@@ -6384,7 +6383,7 @@
copy_to_reg (XEXP (operands[1], 0)));
}
- if (GET_CODE (operands[1]) == MEM && optimize > 0)
+ if (MEM_P (operands[1]) && optimize > 0)
{
rtx reg = gen_reg_rtx (SImode);
@@ -6392,18 +6391,18 @@
operands[1] = gen_lowpart (QImode, reg);
}
- if (GET_CODE (operands[0]) == MEM)
+ if (MEM_P (operands[0]))
operands[1] = force_reg (QImode, operands[1]);
}
else if (TARGET_THUMB
- && GET_CODE (operands[1]) == CONST_INT
+ && CONST_INT_P (operands[1])
&& !satisfies_constraint_I (operands[1]))
{
/* Handle loading a large integer during reload. */
/* Writing a constant to memory needs a scratch, which should
be handled with SECONDARY_RELOADs. */
- gcc_assert (GET_CODE (operands[0]) == REG);
+ gcc_assert (REG_P (operands[0]));
operands[0] = gen_rtx_SUBREG (SImode, operands[0], 0);
emit_insn (gen_movsi (operands[0], operands[1]));
@@ -6460,14 +6459,14 @@
"
if (TARGET_32BIT)
{
- if (GET_CODE (operands[0]) == MEM)
+ if (MEM_P (operands[0]))
operands[1] = force_reg (HFmode, operands[1]);
}
else /* TARGET_THUMB1 */
{
if (can_create_pseudo_p ())
{
- if (GET_CODE (operands[0]) != REG)
+ if (!REG_P (operands[0]))
operands[1] = force_reg (HFmode, operands[1]);
}
}
@@ -6531,13 +6530,13 @@
case 1:
{
rtx addr;
- gcc_assert (GET_CODE(operands[1]) == MEM);
+ gcc_assert (MEM_P (operands[1]));
addr = XEXP (operands[1], 0);
if (GET_CODE (addr) == LABEL_REF
|| (GET_CODE (addr) == CONST
&& GET_CODE (XEXP (addr, 0)) == PLUS
&& GET_CODE (XEXP (XEXP (addr, 0), 0)) == LABEL_REF
- && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT))
+ && CONST_INT_P (XEXP (XEXP (addr, 0), 1))))
{
/* Constant pool entry. */
return \"ldr\\t%0, %1\";
@@ -6561,14 +6560,14 @@
"
if (TARGET_32BIT)
{
- if (GET_CODE (operands[0]) == MEM)
+ if (MEM_P (operands[0]))
operands[1] = force_reg (SFmode, operands[1]);
}
else /* TARGET_THUMB1 */
{
if (can_create_pseudo_p ())
{
- if (GET_CODE (operands[0]) != REG)
+ if (!REG_P (operands[0]))
operands[1] = force_reg (SFmode, operands[1]);
}
}
@@ -6582,7 +6581,7 @@
(match_operand:SF 1 "immediate_operand" ""))]
"TARGET_EITHER
&& reload_completed
- && GET_CODE (operands[1]) == CONST_DOUBLE"
+ && CONST_DOUBLE_P (operands[1])"
[(set (match_dup 2) (match_dup 3))]
"
operands[2] = gen_lowpart (SImode, operands[0]);
@@ -6597,7 +6596,7 @@
(match_operand:SF 1 "general_operand" "r,mE,r"))]
"TARGET_32BIT
&& TARGET_SOFT_FLOAT
- && (GET_CODE (operands[0]) != MEM
+ && (!MEM_P (operands[0])
|| register_operand (operands[1], SFmode))"
"@
mov%?\\t%0, %1
@@ -6640,14 +6639,14 @@
"
if (TARGET_32BIT)
{
- if (GET_CODE (operands[0]) == MEM)
+ if (MEM_P (operands[0]))
operands[1] = force_reg (DFmode, operands[1]);
}
else /* TARGET_THUMB */
{
if (can_create_pseudo_p ())
{
- if (GET_CODE (operands[0]) != REG)
+ if (!REG_P (operands[0]))
operands[1] = force_reg (DFmode, operands[1]);
}
}
@@ -6778,11 +6777,11 @@
HOST_WIDE_INT offset = 0;
/* Support only fixed point registers. */
- if (GET_CODE (operands[2]) != CONST_INT
+ if (!CONST_INT_P (operands[2])
|| INTVAL (operands[2]) > 14
|| INTVAL (operands[2]) < 2
- || GET_CODE (operands[1]) != MEM
- || GET_CODE (operands[0]) != REG
+ || !MEM_P (operands[1])
+ || !REG_P (operands[0])
|| REGNO (operands[0]) > (LAST_ARM_REGNUM - 1)
|| REGNO (operands[0]) + INTVAL (operands[2]) > LAST_ARM_REGNUM)
FAIL;
@@ -6803,11 +6802,11 @@
HOST_WIDE_INT offset = 0;
/* Support only fixed point registers. */
- if (GET_CODE (operands[2]) != CONST_INT
+ if (!CONST_INT_P (operands[2])
|| INTVAL (operands[2]) > 14
|| INTVAL (operands[2]) < 2
- || GET_CODE (operands[1]) != REG
- || GET_CODE (operands[0]) != MEM
+ || !REG_P (operands[1])
+ || !MEM_P (operands[0])
|| REGNO (operands[1]) > (LAST_ARM_REGNUM - 1)
|| REGNO (operands[1]) + INTVAL (operands[2]) > LAST_ARM_REGNUM)
FAIL;
@@ -6999,7 +6998,7 @@
gcc_assert (GET_MODE (operands[1]) == DImode
|| GET_MODE (operands[2]) == DImode);
- if (!arm_validize_comparison (&operands[0], &operands[1], &operands[2]))
+ if (!arm_validize_comparison (&operands[0], &operands[1], &operands[2]))
FAIL;
emit_jump_insn (gen_cbranch_cc (operands[0], operands[1], operands[2],
operands[3]));
@@ -7430,7 +7429,7 @@
cond[1] = operands[2];
cond[2] = operands[3];
- if (GET_CODE (cond[2]) == CONST_INT && INTVAL (cond[2]) < 0)
+ if (CONST_INT_P (cond[2]) && INTVAL (cond[2]) < 0)
output_asm_insn (\"sub\\t%0, %1, #%n2\", cond);
else
output_asm_insn (\"add\\t%0, %1, %2\", cond);
@@ -9849,13 +9848,13 @@
if (GET_CODE (operands[5]) == LT
&& (operands[4] == const0_rtx))
{
- if (which_alternative != 1 && GET_CODE (operands[1]) == REG)
+ if (which_alternative != 1 && REG_P (operands[1]))
{
if (operands[2] == const0_rtx)
return \"and\\t%0, %1, %3, asr #31\";
return \"ands\\t%0, %1, %3, asr #32\;movcc\\t%0, %2\";
}
- else if (which_alternative != 0 && GET_CODE (operands[2]) == REG)
+ else if (which_alternative != 0 && REG_P (operands[2]))
{
if (operands[1] == const0_rtx)
return \"bic\\t%0, %2, %3, asr #31\";
@@ -9868,13 +9867,13 @@
if (GET_CODE (operands[5]) == GE
&& (operands[4] == const0_rtx))
{
- if (which_alternative != 1 && GET_CODE (operands[1]) == REG)
+ if (which_alternative != 1 && REG_P (operands[1]))
{
if (operands[2] == const0_rtx)
return \"bic\\t%0, %1, %3, asr #31\";
return \"bics\\t%0, %1, %3, asr #32\;movcs\\t%0, %2\";
}
- else if (which_alternative != 0 && GET_CODE (operands[2]) == REG)
+ else if (which_alternative != 0 && REG_P (operands[2]))
{
if (operands[1] == const0_rtx)
return \"and\\t%0, %2, %3, asr #31\";
@@ -9883,7 +9882,7 @@
/* The only case that falls through to here is when both ops 1 & 2
are constants. */
}
- if (GET_CODE (operands[4]) == CONST_INT
+ if (CONST_INT_P (operands[4])
&& !const_ok_for_arm (INTVAL (operands[4])))
output_asm_insn (\"cmn\\t%3, #%n4\", operands);
else
@@ -10023,8 +10022,8 @@
everything is in registers then we can do this in two instructions. */
if (operands[3] == const0_rtx
&& GET_CODE (operands[7]) != AND
- && GET_CODE (operands[5]) == REG
- && GET_CODE (operands[1]) == REG
+ && REG_P (operands[5])
+ && REG_P (operands[1])
&& REGNO (operands[1]) == REGNO (operands[4])
&& REGNO (operands[4]) != REGNO (operands[0]))
{
@@ -10033,7 +10032,7 @@
else if (GET_CODE (operands[6]) == GE)
return \"bic\\t%0, %5, %2, asr #31\;%I7\\t%0, %4, %0\";
}
- if (GET_CODE (operands[3]) == CONST_INT
+ if (CONST_INT_P (operands[3])
&& !const_ok_for_arm (INTVAL (operands[3])))
output_asm_insn (\"cmn\\t%2, #%n3\", operands);
else
@@ -10081,8 +10080,8 @@
everything is in registers then we can do this in two instructions */
if (operands[5] == const0_rtx
&& GET_CODE (operands[7]) != AND
- && GET_CODE (operands[3]) == REG
- && GET_CODE (operands[1]) == REG
+ && REG_P (operands[3])
+ && REG_P (operands[1])
&& REGNO (operands[1]) == REGNO (operands[2])
&& REGNO (operands[2]) != REGNO (operands[0]))
{
@@ -10092,7 +10091,7 @@
return \"bic\\t%0, %3, %4, asr #31\;%I7\\t%0, %2, %0\";
}
- if (GET_CODE (operands[5]) == CONST_INT
+ if (CONST_INT_P (operands[5])
&& !const_ok_for_arm (INTVAL (operands[5])))
output_asm_insn (\"cmn\\t%4, #%n5\", operands);
else
@@ -10677,7 +10676,7 @@
"
{
cfun->machine->eh_epilogue_sp_ofs = operands[1];
- if (GET_CODE (operands[2]) != REG || REGNO (operands[2]) != 2)
+ if (!REG_P (operands[2]) || REGNO (operands[2]) != 2)
{
rtx ra = gen_rtx_REG (Pmode, 2);
diff --git a/gcc/config/arm/neon.md b/gcc/config/arm/neon.md
index a9295465faf..fe0618c2258 100644
--- a/gcc/config/arm/neon.md
+++ b/gcc/config/arm/neon.md
@@ -255,7 +255,7 @@
{
if (can_create_pseudo_p ())
{
- if (GET_CODE (operands[0]) != REG)
+ if (!REG_P (operands[0]))
operands[1] = force_reg (TImode, operands[1]);
}
})
@@ -267,7 +267,7 @@
{
if (can_create_pseudo_p ())
{
- if (GET_CODE (operands[0]) != REG)
+ if (!REG_P (operands[0]))
operands[1] = force_reg (<MODE>mode, operands[1]);
}
})
@@ -707,6 +707,33 @@
(const_string "neon_mla_qqq_32_qqd_32_scalar")))))]
)
+;; Fused multiply-accumulate
+(define_insn "fma<VCVTF:mode>4"
+ [(set (match_operand:VCVTF 0 "register_operand" "=w")
+ (fma:VCVTF (match_operand:VCVTF 1 "register_operand" "w")
+ (match_operand:VCVTF 2 "register_operand" "w")
+ (match_operand:VCVTF 3 "register_operand" "0")))]
+ "TARGET_NEON && TARGET_FMA && flag_unsafe_math_optimizations"
+ "vfma%?.<V_if_elem>\\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
+ [(set (attr "neon_type")
+ (if_then_else (match_test "<Is_d_reg>")
+ (const_string "neon_fp_vmla_ddd")
+ (const_string "neon_fp_vmla_qqq")))]
+)
+
+(define_insn "*fmsub<VCVTF:mode>4"
+ [(set (match_operand:VCVTF 0 "register_operand" "=w")
+ (fma:VCVTF (neg:VCVTF (match_operand:VCVTF 1 "register_operand" "w"))
+ (match_operand:VCVTF 2 "register_operand" "w")
+ (match_operand:VCVTF 3 "register_operand" "0")))]
+ "TARGET_NEON && TARGET_FMA && flag_unsafe_math_optimizations"
+ "vfms%?.<V_if_elem>\\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
+ [(set (attr "neon_type")
+ (if_then_else (match_test "<Is_d_reg>")
+ (const_string "neon_fp_vmla_ddd")
+ (const_string "neon_fp_vmla_qqq")))]
+)
+
(define_insn "ior<mode>3"
[(set (match_operand:VDQ 0 "s_register_operand" "=w,w")
(ior:VDQ (match_operand:VDQ 1 "s_register_operand" "w,0")
diff --git a/gcc/config/arm/predicates.md b/gcc/config/arm/predicates.md
index 8ae26cae7a7..e7eb7b3cd44 100644
--- a/gcc/config/arm/predicates.md
+++ b/gcc/config/arm/predicates.md
@@ -26,7 +26,7 @@
/* We don't consider registers whose class is NO_REGS
to be a register operand. */
/* XXX might have to check for lo regs only for thumb ??? */
- return (GET_CODE (op) == REG
+ return (REG_P (op)
&& (REGNO (op) >= FIRST_PSEUDO_REGISTER
|| REGNO_REG_CLASS (REGNO (op)) != NO_REGS));
})
@@ -55,7 +55,7 @@
if (GET_CODE (op) == SUBREG)
op = SUBREG_REG (op);
- return (GET_CODE (op) == REG
+ return (REG_P (op)
&& (REGNO (op) <= LAST_ARM_REGNUM
|| REGNO (op) >= FIRST_PSEUDO_REGISTER));
})
@@ -68,7 +68,7 @@
/* We don't consider registers whose class is NO_REGS
to be a register operand. */
- return (GET_CODE (op) == REG
+ return (REG_P (op)
&& (REGNO (op) >= FIRST_PSEUDO_REGISTER
|| REGNO_REG_CLASS (REGNO (op)) == VFP_D0_D7_REGS
|| REGNO_REG_CLASS (REGNO (op)) == VFP_LO_REGS
@@ -178,7 +178,7 @@
(and (match_code "mem,reg,subreg")
(match_test "(!CONSTANT_P (op)
&& (true_regnum(op) == -1
- || (GET_CODE (op) == REG
+ || (REG_P (op)
&& REGNO (op) >= FIRST_PSEUDO_REGISTER)))")))
(define_predicate "vfp_compare_operand"
@@ -195,7 +195,7 @@
(define_predicate "index_operand"
(ior (match_operand 0 "s_register_operand")
(and (match_operand 0 "immediate_operand")
- (match_test "(GET_CODE (op) != CONST_INT
+ (match_test "(!CONST_INT_P (op)
|| (INTVAL (op) < 4096 && INTVAL (op) > -4096))"))))
;; True for operators that can be combined with a shift in ARM state.
@@ -223,10 +223,10 @@
(and (ior (ior (and (match_code "mult")
(match_test "power_of_two_operand (XEXP (op, 1), mode)"))
(and (match_code "rotate")
- (match_test "GET_CODE (XEXP (op, 1)) == CONST_INT
+ (match_test "CONST_INT_P (XEXP (op, 1))
&& ((unsigned HOST_WIDE_INT) INTVAL (XEXP (op, 1))) < 32")))
(and (match_code "ashift,ashiftrt,lshiftrt,rotatert")
- (match_test "GET_CODE (XEXP (op, 1)) != CONST_INT
+ (match_test "!CONST_INT_P (XEXP (op, 1))
|| ((unsigned HOST_WIDE_INT) INTVAL (XEXP (op, 1))) < 32")))
(match_test "mode == GET_MODE (op)")))
@@ -235,7 +235,7 @@
(and (ior (and (match_code "mult")
(match_test "power_of_two_operand (XEXP (op, 1), mode)"))
(and (match_code "ashift,ashiftrt")
- (match_test "GET_CODE (XEXP (op, 1)) == CONST_INT
+ (match_test "CONST_INT_P (XEXP (op, 1))
&& ((unsigned HOST_WIDE_INT) INTVAL (XEXP (op, 1)) < 32)")))
(match_test "mode == GET_MODE (op)")))
@@ -332,7 +332,7 @@
if (GET_CODE (op) == SUBREG)
op = SUBREG_REG (op);
- return GET_CODE (op) == MEM && memory_address_p (DImode, XEXP (op, 0));
+ return MEM_P (op) && memory_address_p (DImode, XEXP (op, 0));
})
(define_predicate "di_operand"
@@ -349,7 +349,7 @@
if (GET_CODE (op) == SUBREG)
op = SUBREG_REG (op);
- return GET_CODE (op) == MEM && memory_address_p (DFmode, XEXP (op, 0));
+ return MEM_P (op) && memory_address_p (DFmode, XEXP (op, 0));
})
(define_predicate "soft_df_operand"
@@ -559,7 +559,7 @@
rtx elt = XVECEXP (op, 0, i);
int val;
- if (GET_CODE (elt) != CONST_INT)
+ if (!CONST_INT_P (elt))
return false;
val = INTVAL (elt);
@@ -588,7 +588,7 @@
rtx elt = XVECEXP (op, 0, i);
int val;
- if (GET_CODE (elt) != CONST_INT)
+ if (!CONST_INT_P (elt))
return false;
val = INTVAL (elt);
diff --git a/gcc/config/arm/thumb2.md b/gcc/config/arm/thumb2.md
index fd0bea63f41..57d1539ee3d 100644
--- a/gcc/config/arm/thumb2.md
+++ b/gcc/config/arm/thumb2.md
@@ -494,13 +494,13 @@
if (GET_CODE (operands[5]) == LT
&& (operands[4] == const0_rtx))
{
- if (which_alternative != 1 && GET_CODE (operands[1]) == REG)
+ if (which_alternative != 1 && REG_P (operands[1]))
{
if (operands[2] == const0_rtx)
return \"and\\t%0, %1, %3, asr #31\";
return \"ands\\t%0, %1, %3, asr #32\;it\\tcc\;movcc\\t%0, %2\";
}
- else if (which_alternative != 0 && GET_CODE (operands[2]) == REG)
+ else if (which_alternative != 0 && REG_P (operands[2]))
{
if (operands[1] == const0_rtx)
return \"bic\\t%0, %2, %3, asr #31\";
@@ -513,13 +513,13 @@
if (GET_CODE (operands[5]) == GE
&& (operands[4] == const0_rtx))
{
- if (which_alternative != 1 && GET_CODE (operands[1]) == REG)
+ if (which_alternative != 1 && REG_P (operands[1]))
{
if (operands[2] == const0_rtx)
return \"bic\\t%0, %1, %3, asr #31\";
return \"bics\\t%0, %1, %3, asr #32\;it\\tcs\;movcs\\t%0, %2\";
}
- else if (which_alternative != 0 && GET_CODE (operands[2]) == REG)
+ else if (which_alternative != 0 && REG_P (operands[2]))
{
if (operands[1] == const0_rtx)
return \"and\\t%0, %2, %3, asr #31\";
@@ -528,7 +528,7 @@
/* The only case that falls through to here is when both ops 1 & 2
are constants. */
}
- if (GET_CODE (operands[4]) == CONST_INT
+ if (CONST_INT_P (operands[4])
&& !const_ok_for_arm (INTVAL (operands[4])))
output_asm_insn (\"cmn\\t%3, #%n4\", operands);
else
@@ -680,7 +680,7 @@
(clobber (reg:CC CC_REGNUM))]
"TARGET_THUMB2 && reload_completed
&& ((GET_CODE(operands[3]) != ROTATE && GET_CODE(operands[3]) != ROTATERT)
- || REG_P(operands[2]))"
+ || REG_P (operands[2]))"
"* return arm_output_shift(operands, 2);"
[(set_attr "predicable" "yes")
(set_attr "shift" "1")
@@ -709,7 +709,7 @@
"*
HOST_WIDE_INT val;
- if (GET_CODE (operands[2]) == CONST_INT)
+ if (CONST_INT_P (operands[2]))
val = INTVAL(operands[2]);
else
val = 0;
@@ -773,7 +773,7 @@
"*
HOST_WIDE_INT val;
- if (GET_CODE (operands[2]) == CONST_INT)
+ if (CONST_INT_P (operands[2]))
val = INTVAL (operands[2]);
else
val = 0;
@@ -797,7 +797,7 @@
"*
HOST_WIDE_INT val;
- if (GET_CODE (operands[1]) == CONST_INT)
+ if (CONST_INT_P (operands[1]))
val = INTVAL (operands[1]);
else
val = 0;
diff --git a/gcc/config/arm/vec-common.md b/gcc/config/arm/vec-common.md
index 4d903bd80fe..b43e9523a6b 100644
--- a/gcc/config/arm/vec-common.md
+++ b/gcc/config/arm/vec-common.md
@@ -28,7 +28,7 @@
{
if (can_create_pseudo_p ())
{
- if (GET_CODE (operands[0]) != REG)
+ if (!REG_P (operands[0]))
operands[1] = force_reg (<MODE>mode, operands[1]);
else if (TARGET_NEON && CONSTANT_P (operands[1]))
{
diff --git a/gcc/config/avr/avr-mcus.def b/gcc/config/avr/avr-mcus.def
index 462d6a26f7c..7d2882903f2 100644
--- a/gcc/config/avr/avr-mcus.def
+++ b/gcc/config/avr/avr-mcus.def
@@ -196,7 +196,7 @@ AVR_MCU ("atmega1284p", ARCH_AVR51, "__AVR_ATmega1284P__", 0, 0, 0
AVR_MCU ("atmega128rfa1", ARCH_AVR51, "__AVR_ATmega128RFA1__", 0, 0, 0x0200, 2, "m128rfa1")
AVR_MCU ("at90can128", ARCH_AVR51, "__AVR_AT90CAN128__", 0, 0, 0x0100, 2, "can128")
AVR_MCU ("at90usb1286", ARCH_AVR51, "__AVR_AT90USB1286__", 0, 0, 0x0100, 2, "usb1286")
-AVR_MCU ("at90usb1287", ARCH_AVR51, "__AVR_AT90USB1287__", 0, 0, 0x0100, 2, "usb1286")
+AVR_MCU ("at90usb1287", ARCH_AVR51, "__AVR_AT90USB1287__", 0, 0, 0x0100, 2, "usb1287")
/* 3-Byte PC. */
AVR_MCU ("avr6", ARCH_AVR6, NULL, 0, 0, 0x0200, 4, "m2561")
AVR_MCU ("atmega2560", ARCH_AVR6, "__AVR_ATmega2560__", 0, 0, 0x0200, 4, "m2560")
diff --git a/gcc/config/darwin.c b/gcc/config/darwin.c
index 33a831f89dc..54c92d16b75 100644
--- a/gcc/config/darwin.c
+++ b/gcc/config/darwin.c
@@ -1878,7 +1878,7 @@ darwin_asm_named_section (const char *name,
the assumption of how this is done. */
if (lto_section_names == NULL)
lto_section_names = VEC_alloc (darwin_lto_section_e, gc, 16);
- VEC_safe_push (darwin_lto_section_e, gc, lto_section_names, &e);
+ VEC_safe_push (darwin_lto_section_e, gc, lto_section_names, e);
}
else if (strncmp (name, "__DWARF,", 8) == 0)
darwin_asm_dwarf_section (name, flags, decl);
@@ -2698,7 +2698,7 @@ darwin_asm_dwarf_section (const char *name, unsigned int flags,
fprintf (asm_out_file, "Lsection%.*s:\n", namelen, sname);
e.count = 1;
e.name = xstrdup (sname);
- VEC_safe_push (dwarf_sect_used_entry, gc, dwarf_sect_names_table, &e);
+ VEC_safe_push (dwarf_sect_used_entry, gc, dwarf_sect_names_table, e);
}
}
diff --git a/gcc/config/i386/cpuid.h b/gcc/config/i386/cpuid.h
index a26dc9b4156..5cd1eb1c736 100644
--- a/gcc/config/i386/cpuid.h
+++ b/gcc/config/i386/cpuid.h
@@ -75,6 +75,60 @@
#define bit_RDSEED (1 << 18)
#define bit_ADX (1 << 19)
+/* Signatures for different CPU implementations as returned in uses
+ of cpuid with level 0. */
+#define signature_AMD_ebx 0x68747541
+#define signature_AMD_ecx 0x444d4163
+#define signature_AMD_edx 0x69746e65
+
+#define signature_CENTAUR_ebx 0x746e6543
+#define signature_CENTAUR_ecx 0x736c7561
+#define signature_CENTAUR_edx 0x48727561
+
+#define signature_CYRIX_ebx 0x69727943
+#define signature_CYRIX_ecx 0x64616574
+#define signature_CYRIX_edx 0x736e4978
+
+#define signature_INTEL_ebx 0x756e6547
+#define signature_INTEL_ecx 0x6c65746e
+#define signature_INTEL_edx 0x49656e69
+
+#define signature_TM1_ebx 0x6e617254
+#define signature_TM1_ecx 0x55504361
+#define signature_TM1_edx 0x74656d73
+
+#define signature_TM2_ebx 0x756e6547
+#define signature_TM2_ecx 0x3638784d
+#define signature_TM2_edx 0x54656e69
+
+#define signature_NSC_ebx 0x646f6547
+#define signature_NSC_ecx 0x43534e20
+#define signature_NSC_edx 0x79622065
+
+#define signature_NEXGEN_ebx 0x4778654e
+#define signature_NEXGEN_ecx 0x6e657669
+#define signature_NEXGEN_edx 0x72446e65
+
+#define signature_RISE_ebx 0x65736952
+#define signature_RISE_ecx 0x65736952
+#define signature_RISE_edx 0x65736952
+
+#define signature_SIS_ebx 0x20536953
+#define signature_SIS_ecx 0x20536953
+#define signature_SIS_edx 0x20536953
+
+#define signature_UMC_ebx 0x20434d55
+#define signature_UMC_ecx 0x20434d55
+#define signature_UMC_edx 0x20434d55
+
+#define signature_VIA_ebx 0x20414956
+#define signature_VIA_ecx 0x20414956
+#define signature_VIA_edx 0x20414956
+
+#define signature_VORTEX_ebx 0x74726f56
+#define signature_VORTEX_ecx 0x436f5320
+#define signature_VORTEX_edx 0x36387865
+
#if defined(__i386__) && defined(__PIC__)
/* %ebx may be the PIC register. */
#if __GNUC__ >= 3
diff --git a/gcc/config/i386/driver-i386.c b/gcc/config/i386/driver-i386.c
index b5ae18f18b3..bda4e022277 100644
--- a/gcc/config/i386/driver-i386.c
+++ b/gcc/config/i386/driver-i386.c
@@ -348,17 +348,6 @@ detect_caches_intel (bool xeon_mp, unsigned max_level,
return describe_cache (level1, level2);
}
-enum vendor_signatures
-{
- SIG_INTEL = 0x756e6547 /* Genu */,
- SIG_AMD = 0x68747541 /* Auth */
-};
-
-enum processor_signatures
-{
- SIG_GEODE = 0x646f6547 /* Geod */
-};
-
/* This will be called by the spec parser in gcc.c when it sees
a %:local_cpu_detect(args) construct. Currently it will be called
with either "arch" or "tune" as argument depending on if -march=native
@@ -422,7 +411,7 @@ const char *host_detect_local_cpu (int argc, const char **argv)
model = (eax >> 4) & 0x0f;
family = (eax >> 8) & 0x0f;
- if (vendor == SIG_INTEL)
+ if (vendor == signature_INTEL_ebx)
{
unsigned int extended_model, extended_family;
@@ -483,8 +472,6 @@ const char *host_detect_local_cpu (int argc, const char **argv)
has_abm = ecx & bit_ABM;
has_lwp = ecx & bit_LWP;
has_fma4 = ecx & bit_FMA4;
- if (vendor == SIG_AMD && has_fma4 && has_fma)
- has_fma4 = 0;
has_xop = ecx & bit_XOP;
has_tbm = ecx & bit_TBM;
has_lzcnt = ecx & bit_LZCNT;
@@ -497,9 +484,9 @@ const char *host_detect_local_cpu (int argc, const char **argv)
if (!arch)
{
- if (vendor == SIG_AMD)
+ if (vendor == signature_AMD_ebx)
cache = detect_caches_amd (ext_level);
- else if (vendor == SIG_INTEL)
+ else if (vendor == signature_INTEL_ebx)
{
bool xeon_mp = (family == 15 && model == 6);
cache = detect_caches_intel (xeon_mp, max_level,
@@ -507,7 +494,7 @@ const char *host_detect_local_cpu (int argc, const char **argv)
}
}
- if (vendor == SIG_AMD)
+ if (vendor == signature_AMD_ebx)
{
unsigned int name;
@@ -517,7 +504,7 @@ const char *host_detect_local_cpu (int argc, const char **argv)
else
name = 0;
- if (name == SIG_GEODE)
+ if (name == signature_NSC_ebx)
processor = PROCESSOR_GEODE;
else if (has_movbe)
processor = PROCESSOR_BTVER2;
diff --git a/gcc/config/i386/i386.c b/gcc/config/i386/i386.c
index 0c1a7b80103..ccd7d882eb5 100644
--- a/gcc/config/i386/i386.c
+++ b/gcc/config/i386/i386.c
@@ -3164,7 +3164,7 @@ ix86_option_override_internal (bool main_args_p)
{"bdver2", PROCESSOR_BDVER2, CPU_BDVER2,
PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
| PTA_SSE4A | PTA_CX16 | PTA_ABM | PTA_SSSE3 | PTA_SSE4_1
- | PTA_SSE4_2 | PTA_AES | PTA_PCLMUL | PTA_AVX
+ | PTA_SSE4_2 | PTA_AES | PTA_PCLMUL | PTA_AVX | PTA_FMA4
| PTA_XOP | PTA_LWP | PTA_BMI | PTA_TBM | PTA_F16C
| PTA_FMA},
{"btver1", PROCESSOR_BTVER1, CPU_GENERIC64,
diff --git a/gcc/config/i386/i386.md b/gcc/config/i386/i386.md
index 898e0156248..05d22ddb3dc 100644
--- a/gcc/config/i386/i386.md
+++ b/gcc/config/i386/i386.md
@@ -659,9 +659,11 @@
(eq_attr "isa" "noavx2") (symbol_ref "!TARGET_AVX2")
(eq_attr "isa" "bmi2") (symbol_ref "TARGET_BMI2")
(eq_attr "isa" "fma") (symbol_ref "TARGET_FMA")
- ;; Disable generation of FMA4 instructions for generic code
- ;; since FMA3 is preferred for targets that implement both
- ;; instruction sets.
+ ;; Fma instruction selection has to be done based on
+ ;; register pressure. For generating fma4, a cost model
+ ;; based on register pressure is required. Till then,
+ ;; fma4 instruction is disabled for targets that implement
+ ;; both fma and fma4 instruction sets.
(eq_attr "isa" "fma4")
(symbol_ref "TARGET_FMA4 && !TARGET_FMA")
]
diff --git a/gcc/config/m68k/m68k.c b/gcc/config/m68k/m68k.c
index 0e55e1c2309..b2e2e6c2564 100644
--- a/gcc/config/m68k/m68k.c
+++ b/gcc/config/m68k/m68k.c
@@ -6269,7 +6269,8 @@ m68k_sched_dfa_post_advance_cycle (void)
while (--i >= 0)
{
if (state_transition (curr_state, sched_ib.insn) >= 0)
- gcc_unreachable ();
+ /* Pick up scheduler state. */
+ ++sched_ib.filled;
}
}
diff --git a/gcc/config/mips/gnu-user64.h b/gcc/config/mips/gnu-user64.h
index ad59ba4c752..7c2187f4150 100644
--- a/gcc/config/mips/gnu-user64.h
+++ b/gcc/config/mips/gnu-user64.h
@@ -20,13 +20,17 @@ along with GCC; see the file COPYING3. If not see
/* Force the default endianness and ABI flags onto the command line
in order to make the other specs easier to write. */
-#undef DRIVER_SELF_SPECS
-#define DRIVER_SELF_SPECS \
- BASE_DRIVER_SELF_SPECS, \
+
+#define LINUX64_DRIVER_SELF_SPECS \
LINUX_DRIVER_SELF_SPECS \
" %{!EB:%{!EL:%(endian_spec)}}" \
" %{!mabi=*: -" MULTILIB_ABI_DEFAULT "}"
+#undef DRIVER_SELF_SPECS
+#define DRIVER_SELF_SPECS \
+ BASE_DRIVER_SELF_SPECS, \
+ LINUX64_DRIVER_SELF_SPECS
+
#undef GNU_USER_TARGET_LINK_SPEC
#define GNU_USER_TARGET_LINK_SPEC "\
%{G*} %{EB} %{EL} %{mips1} %{mips2} %{mips3} %{mips4} \
diff --git a/gcc/config/mips/mips.c b/gcc/config/mips/mips.c
index 09322f15926..7f9df4c161b 100644
--- a/gcc/config/mips/mips.c
+++ b/gcc/config/mips/mips.c
@@ -3989,8 +3989,8 @@ mips_multi_start (void)
static struct mips_multi_member *
mips_multi_add (void)
{
- return VEC_safe_push (mips_multi_member, heap, mips_multi_members,
- (struct mips_multi_member *) 0);
+ mips_multi_member empty;
+ return VEC_safe_push (mips_multi_member, heap, mips_multi_members, empty);
}
/* Add a normal insn with the given asm format to the current multi-insn
diff --git a/gcc/config/mips/mips.h b/gcc/config/mips/mips.h
index 9ce466dc44b..e04f0995273 100644
--- a/gcc/config/mips/mips.h
+++ b/gcc/config/mips/mips.h
@@ -728,6 +728,11 @@ struct mips_cpu_info {
#define MIPS_32BIT_OPTION_SPEC \
"mips1|mips2|mips32*|mgp32"
+/* Infer a -msynci setting from a -mips argument, on the assumption that
+ -msynci is desired where possible. */
+#define MIPS_ISA_SYNCI_SPEC \
+ "%{msynci|mno-synci:;:%{mips32r2|mips64r2:-msynci;:-mno-synci}}"
+
#if MIPS_ABI_DEFAULT == ABI_O64 \
|| MIPS_ABI_DEFAULT == ABI_N32 \
|| MIPS_ABI_DEFAULT == ABI_64
@@ -762,7 +767,6 @@ struct mips_cpu_info {
{"mips-plt", "%{!mplt:%{!mno-plt:-m%(VALUE)}}" }, \
{"synci", "%{!msynci:%{!mno-synci:-m%(VALUE)}}" }
-
/* A spec that infers the -mdsp setting from an -march argument. */
#define BASE_DRIVER_SELF_SPECS \
"%{!mno-dsp: \
diff --git a/gcc/config/mips/mti-linux.h b/gcc/config/mips/mti-linux.h
new file mode 100644
index 00000000000..36c003c60cb
--- /dev/null
+++ b/gcc/config/mips/mti-linux.h
@@ -0,0 +1,43 @@
+/* Target macros for mips*-mti-linux* targets.
+ Copyright (C) 2012
+ Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+/* This target is a multilib target, specify the sysroot paths. */
+#undef SYSROOT_SUFFIX_SPEC
+#define SYSROOT_SUFFIX_SPEC \
+ "%{mips32:/mips32}%{mips64:/mips64}%{mips64r2:/mips64r2}%{msoft-float:/sof}%{mel|EL:/el}%{mabi=64:/64}%{mabi=n32:/n32}"
+
+#undef DRIVER_SELF_SPECS
+#define DRIVER_SELF_SPECS \
+ /* Make sure a -mips option is present. This helps us to pick \
+ the right multilib, and also makes the later specs easier \
+ to write. */ \
+ MIPS_ISA_LEVEL_SPEC, \
+ \
+ /* Infer the default float setting from -march. */ \
+ MIPS_ARCH_FLOAT_SPEC, \
+ \
+ /* Infer the -msynci setting from -march if not explicitly set. */ \
+ MIPS_ISA_SYNCI_SPEC, \
+ \
+ /* Base SPECs. */ \
+ BASE_DRIVER_SELF_SPECS \
+ \
+ /* Use the standard linux specs for everything else. */ \
+ LINUX64_DRIVER_SELF_SPECS
diff --git a/gcc/config/mips/t-mti-linux b/gcc/config/mips/t-mti-linux
new file mode 100644
index 00000000000..ba11706ffee
--- /dev/null
+++ b/gcc/config/mips/t-mti-linux
@@ -0,0 +1,24 @@
+# Copyright (C) 2012 Free Software Foundation, Inc.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+# The default build is mips32r2, hard-float big-endian. Add mips32,
+# soft-float, and little-endian variations.
+
+MULTILIB_OPTIONS = mips32/mips64/mips64r2 msoft-float EL
+MULTILIB_DIRNAMES = mips32 mips64 mips64r2 sof el
+MULTILIB_MATCHES = EL=mel EB=meb
diff --git a/gcc/config/mmix/constraints.md b/gcc/config/mmix/constraints.md
new file mode 100644
index 00000000000..954cddaa5af
--- /dev/null
+++ b/gcc/config/mmix/constraints.md
@@ -0,0 +1,112 @@
+;; MMIX constraints
+;; Copyright (C) 2012 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+;; License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>. */
+
+(define_register_constraint "x" "SYSTEM_REGS"
+ "@internal")
+
+(define_register_constraint "y" "REMAINDER_REG"
+ "@internal")
+
+(define_register_constraint "z" "HIMULT_REG"
+ "@internal")
+
+(define_constraint "I"
+ "A 8-bit unsigned integer"
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (ival, 0, 255)")))
+
+(define_constraint "J"
+ "A 16-bit unsigned integer."
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (ival, 0, 65535)")))
+
+(define_constraint "K"
+ "An integer between -255 and 0."
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (ival, -255, 0)")))
+
+(define_constraint "L"
+ "@internal"
+ (and (match_code "const_int")
+ (match_test "mmix_shiftable_wyde_value (ival)")))
+
+(define_constraint "M"
+ "The value 0."
+ (and (match_code "const_int")
+ (match_test "ival == 0")))
+
+(define_constraint "N"
+ "@internal"
+ (and (match_code "const_int")
+ (match_test "mmix_shiftable_wyde_value (~ival)")))
+
+(define_constraint "O"
+ "The value 3, 5, 9, or 17."
+ (and (match_code "const_int")
+ (ior (match_test "ival == 3")
+ (match_test "ival == 5")
+ (match_test "ival == 9")
+ (match_test "ival == 17"))))
+
+;; FIXME: M (or G) is redundant.
+
+(define_constraint "G"
+ "Floating-point zero."
+ (and (match_code "const_double")
+ (match_test "op == CONST0_RTX (mode)")))
+
+;; R asks whether x is to be loaded with GETA or something else. Right
+;; now, only a SYMBOL_REF and LABEL_REF can fit for
+;; TARGET_BASE_ADDRESSES.
+;;
+;; Only constant symbolic addresses apply. With TARGET_BASE_ADDRESSES,
+;; we just allow straight LABEL_REF or SYMBOL_REFs with SYMBOL_REF_FLAG
+;; set right now; only function addresses and code labels. If we change
+;; to let SYMBOL_REF_FLAG be set on other symbols, we have to check
+;; inside CONST expressions. When TARGET_BASE_ADDRESSES is not in
+;; effect, a "raw" constant check together with mmix_constant_address_p
+;; is all that's needed; we want all constant addresses to be loaded
+;; with GETA then.
+
+(define_constraint "R"
+ "@internal"
+ (and (not (match_code "const_int,const_double"))
+ (match_test "mmix_constant_address_p (op)")
+ (ior (match_test "!TARGET_BASE_ADDRESSES")
+ (match_code "LABEL_REF")
+ (and (match_code "SYMBOL_REF")
+ (match_test "SYMBOL_REF_FLAG (op)")))))
+
+;; FIXME: L (or S) is redundant.
+
+(define_constraint "S"
+ "@internal"
+ (and (match_code "const_int,const_double")
+ (match_test "mmix_shiftable_wyde_value (mmix_intval (op))")))
+
+;; FIXME: N (or T) is redundant.
+
+(define_constraint "T"
+ "@internal"
+ (and (match_code "const_int,const_double")
+ (match_test "mmix_shiftable_wyde_value (~mmix_intval (op))")))
+
+(define_address_constraint "U"
+ "@internal"
+ (match_operand 0 "mmix_address_operand"))
diff --git a/gcc/config/mmix/mmix-protos.h b/gcc/config/mmix/mmix-protos.h
index 4e8c338b80c..62cdbae442a 100644
--- a/gcc/config/mmix/mmix-protos.h
+++ b/gcc/config/mmix/mmix-protos.h
@@ -40,6 +40,7 @@ extern void mmix_asm_output_reg_push (FILE *, int);
extern void mmix_asm_output_reg_pop (FILE *, int);
extern void mmix_asm_output_skip (FILE *, int);
extern void mmix_asm_output_align (FILE *, int);
+extern HOST_WIDEST_INT mmix_intval (const_rtx);
extern int mmix_shiftable_wyde_value (unsigned HOST_WIDEST_INT);
extern void mmix_output_register_setting (FILE *, int, HOST_WIDEST_INT, int);
extern int mmix_opposite_regno (int, int);
@@ -59,9 +60,6 @@ extern void mmix_asm_output_addr_diff_elt (FILE *, rtx, int, int);
extern void mmix_asm_output_addr_vec_elt (FILE *, int);
extern enum reg_class mmix_secondary_reload_class
(enum reg_class, enum machine_mode, rtx, int);
-extern int mmix_const_ok_for_letter_p (HOST_WIDE_INT, int);
-extern int mmix_const_double_ok_for_letter_p (rtx, int);
-extern int mmix_extra_constraint (rtx, int, int);
extern rtx mmix_dynamic_chain_address (rtx);
extern rtx mmix_return_addr_rtx (int, rtx);
extern rtx mmix_eh_return_stackadj_rtx (void);
diff --git a/gcc/config/mmix/mmix.c b/gcc/config/mmix/mmix.c
index eaf5f2ed437..d5d72dfdcad 100644
--- a/gcc/config/mmix/mmix.c
+++ b/gcc/config/mmix/mmix.c
@@ -44,6 +44,7 @@ along with GCC; see the file COPYING3. If not see
#include "target.h"
#include "target-def.h"
#include "df.h"
+#include "tm-constrs.h"
/* First some local helper definitions. */
#define MMIX_FIRST_GLOBAL_REGNUM 32
@@ -118,7 +119,6 @@ static void mmix_output_shiftvalue_op_from_str
(FILE *, const char *, HOST_WIDEST_INT);
static void mmix_output_shifted_value (FILE *, HOST_WIDEST_INT);
static void mmix_output_condition (FILE *, const_rtx, int);
-static HOST_WIDEST_INT mmix_intval (const_rtx);
static void mmix_output_octa (FILE *, HOST_WIDEST_INT, int);
static bool mmix_assemble_integer (rtx, unsigned int, int);
static struct machine_function *mmix_init_machine_status (void);
@@ -459,87 +459,6 @@ mmix_secondary_reload_class (enum reg_class rclass,
return NO_REGS;
}
-/* CONST_OK_FOR_LETTER_P. */
-
-int
-mmix_const_ok_for_letter_p (HOST_WIDE_INT value, int c)
-{
- return
- (c == 'I' ? value >= 0 && value <= 255
- : c == 'J' ? value >= 0 && value <= 65535
- : c == 'K' ? value <= 0 && value >= -255
- : c == 'L' ? mmix_shiftable_wyde_value (value)
- : c == 'M' ? value == 0
- : c == 'N' ? mmix_shiftable_wyde_value (~value)
- : c == 'O' ? (value == 3 || value == 5 || value == 9
- || value == 17)
- : 0);
-}
-
-/* CONST_DOUBLE_OK_FOR_LETTER_P. */
-
-int
-mmix_const_double_ok_for_letter_p (rtx value, int c)
-{
- return
- (c == 'G' ? value == CONST0_RTX (GET_MODE (value))
- : 0);
-}
-
-/* EXTRA_CONSTRAINT.
- We need this since our constants are not always expressible as
- CONST_INT:s, but rather often as CONST_DOUBLE:s. */
-
-int
-mmix_extra_constraint (rtx x, int c, int strict)
-{
- HOST_WIDEST_INT value;
-
- /* When checking for an address, we need to handle strict vs. non-strict
- register checks. Don't use address_operand, but instead its
- equivalent (its callee, which it is just a wrapper for),
- memory_operand_p and the strict-equivalent strict_memory_address_p. */
- if (c == 'U')
- return
- strict
- ? strict_memory_address_p (Pmode, x)
- : memory_address_p (Pmode, x);
-
- /* R asks whether x is to be loaded with GETA or something else. Right
- now, only a SYMBOL_REF and LABEL_REF can fit for
- TARGET_BASE_ADDRESSES.
-
- Only constant symbolic addresses apply. With TARGET_BASE_ADDRESSES,
- we just allow straight LABEL_REF or SYMBOL_REFs with SYMBOL_REF_FLAG
- set right now; only function addresses and code labels. If we change
- to let SYMBOL_REF_FLAG be set on other symbols, we have to check
- inside CONST expressions. When TARGET_BASE_ADDRESSES is not in
- effect, a "raw" constant check together with mmix_constant_address_p
- is all that's needed; we want all constant addresses to be loaded
- with GETA then. */
- if (c == 'R')
- return
- GET_CODE (x) != CONST_INT && GET_CODE (x) != CONST_DOUBLE
- && mmix_constant_address_p (x)
- && (! TARGET_BASE_ADDRESSES
- || (GET_CODE (x) == LABEL_REF
- || (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_FLAG (x))));
-
- if (GET_CODE (x) != CONST_DOUBLE || GET_MODE (x) != VOIDmode)
- return 0;
-
- value = mmix_intval (x);
-
- /* We used to map Q->J, R->K, S->L, T->N, U->O, but we don't have to any
- more ('U' taken for address_operand, 'R' similarly). Some letters map
- outside of CONST_INT, though; we still use 'S' and 'T'. */
- if (c == 'S')
- return mmix_shiftable_wyde_value (value);
- else if (c == 'T')
- return mmix_shiftable_wyde_value (~value);
- return 0;
-}
-
/* DYNAMIC_CHAIN_ADDRESS. */
rtx
@@ -1161,8 +1080,7 @@ mmix_legitimate_address_p (enum machine_mode mode ATTRIBUTE_UNUSED,
return 1;
/* (mem (plus (reg) (0..255?))) */
- if (GET_CODE (x2) == CONST_INT
- && CONST_OK_FOR_LETTER_P (INTVAL (x2), 'I'))
+ if (satisfies_constraint_I (x2))
return 1;
return 0;
@@ -1843,8 +1761,7 @@ mmix_print_operand_address (FILE *stream, rtx x)
reg_names[MMIX_OUTPUT_REGNO (REGNO (x2))]);
return;
}
- else if (GET_CODE (x2) == CONST_INT
- && CONST_OK_FOR_LETTER_P (INTVAL (x2), 'I'))
+ else if (satisfies_constraint_I (x2))
{
output_addr_const (stream, x2);
return;
@@ -2529,7 +2446,7 @@ mmix_emit_sp_add (HOST_WIDE_INT offset)
{
/* Positive adjustments are in the epilogue only. Don't mark them
as "frame-related" for unwind info. */
- if (CONST_OK_FOR_LETTER_P (offset, 'L'))
+ if (insn_const_int_ok_for_constraint (offset, CONSTRAINT_L))
emit_insn (gen_adddi3 (stack_pointer_rtx,
stack_pointer_rtx,
GEN_INT (offset)));
@@ -2754,7 +2671,7 @@ mmix_output_condition (FILE *stream, const_rtx x, int reversed)
/* Return the bit-value for a const_int or const_double. */
-static HOST_WIDEST_INT
+HOST_WIDEST_INT
mmix_intval (const_rtx x)
{
unsigned HOST_WIDEST_INT retval;
diff --git a/gcc/config/mmix/mmix.h b/gcc/config/mmix/mmix.h
index 787f64f4bf5..07b7368e9a5 100644
--- a/gcc/config/mmix/mmix.h
+++ b/gcc/config/mmix/mmix.h
@@ -72,12 +72,6 @@ along with GCC; see the file COPYING3. If not see
untouched by the epilogue". */
#define MMIX_EH_RETURN_STACKADJ_REGNUM MMIX_STATIC_CHAIN_REGNUM
-#ifdef REG_OK_STRICT
-# define MMIX_REG_OK_STRICT 1
-#else
-# define MMIX_REG_OK_STRICT 0
-#endif
-
#define MMIX_FUNCTION_ARG_SIZE(MODE, TYPE) \
((MODE) != BLKmode ? GET_MODE_SIZE (MODE) : int_size_in_bytes (TYPE))
@@ -439,11 +433,6 @@ enum reg_class
#define INDEX_REG_CLASS GENERAL_REGS
-#define REG_CLASS_FROM_LETTER(CHAR) \
- ((CHAR) == 'x' ? SYSTEM_REGS \
- : (CHAR) == 'y' ? REMAINDER_REG \
- : (CHAR) == 'z' ? HIMULT_REG : NO_REGS)
-
#define REGNO_OK_FOR_BASE_P(REGNO) \
((REGNO) <= MMIX_LAST_GENERAL_REGISTER \
|| (REGNO) == MMIX_ARG_POINTER_REGNUM \
@@ -460,16 +449,6 @@ enum reg_class
#define CLASS_MAX_NREGS(CLASS, MODE) HARD_REGNO_NREGS (CLASS, MODE)
-#define CONST_OK_FOR_LETTER_P(VALUE, C) \
- mmix_const_ok_for_letter_p (VALUE, C)
-
-#define EXTRA_CONSTRAINT(VALUE, C) \
- mmix_extra_constraint (VALUE, C, MMIX_REG_OK_STRICT)
-
-/* Do we need anything serious here? Yes, any FLOT constant. */
-#define CONST_DOUBLE_OK_FOR_LETTER_P(VALUE, C) \
- mmix_const_double_ok_for_letter_p (VALUE, C)
-
/* Node: Frame Layout */
diff --git a/gcc/config/mmix/mmix.md b/gcc/config/mmix/mmix.md
index 1cd397a8a14..24d6292f787 100644
--- a/gcc/config/mmix/mmix.md
+++ b/gcc/config/mmix/mmix.md
@@ -43,6 +43,7 @@
;; Operand and operator predicates.
(include "predicates.md")
+(include "constraints.md")
;; FIXME: Can we remove the reg-to-reg for smaller modes? Shouldn't they
;; be synthesized ok?
@@ -274,7 +275,7 @@
(define_insn "iordi3"
[(set (match_operand:DI 0 "register_operand" "=r,r")
(ior:DI (match_operand:DI 1 "register_operand" "%r,0")
- (match_operand:DI 2 "mmix_reg_or_constant_operand" "rH,LS")))]
+ (match_operand:DI 2 "mmix_reg_or_constant_operand" "rI,LS")))]
""
"@
OR %0,%1,%2
@@ -1037,6 +1038,7 @@ DIVU %1,%1,%2\;GET %0,:rR\;NEGU %2,0,%0\;CSNN %0,$255,%2")
;; first ("p") alternative by adding ? in the first operand
;; might do the trick. We define 'U' as a synonym to 'p', but without the
;; caveats (and very small advantages) of 'p'.
+;; As of r190682 still so: newlib/libc/stdlib/dtoa.c ICEs if "p" is used.
(define_insn "*call_real"
[(call (mem:QI
(match_operand:DI 0 "mmix_symbolic_or_address_operand" "s,rU"))
diff --git a/gcc/config/mmix/predicates.md b/gcc/config/mmix/predicates.md
index b5773b87aee..f9ba32c8832 100644
--- a/gcc/config/mmix/predicates.md
+++ b/gcc/config/mmix/predicates.md
@@ -118,7 +118,7 @@
return 1;
/* Fall through. */
default:
- return address_operand (op, mode);
+ return mmix_address_operand (op, mode);
}
})
@@ -152,4 +152,12 @@
(ior
(match_operand 0 "register_operand")
(and (match_code "const_int")
- (match_test "CONST_OK_FOR_LETTER_P (INTVAL (op), 'I')"))))
+ (match_test "satisfies_constraint_I (op)"))))
+
+;; True if this is a memory address, possibly strictly.
+;; See also comment above the "*call_real" pattern.
+
+(define_predicate "mmix_address_operand"
+ (if_then_else (match_test "reload_in_progress || reload_completed")
+ (match_test "strict_memory_address_p (Pmode, op)")
+ (match_test "memory_address_p (Pmode, op)")))
diff --git a/gcc/config/moxie/moxie.c b/gcc/config/moxie/moxie.c
index 8d40412d095..97f8af0eb04 100644
--- a/gcc/config/moxie/moxie.c
+++ b/gcc/config/moxie/moxie.c
@@ -1,5 +1,5 @@
/* Target Code for moxie
- Copyright (C) 2008, 2009, 2010, 2011 Free Software Foundation
+ Copyright (C) 2008, 2009, 2010, 2011, 2012 Free Software Foundation
Contributed by Anthony Green.
This file is part of GCC.
@@ -293,8 +293,8 @@ moxie_expand_prologue (void)
if (cfun->machine->size_for_adjusting_sp > 0)
{
- int i = cfun->machine->size_for_adjusting_sp;
- while (i > 255)
+ int i = cfun->machine->size_for_adjusting_sp;
+ while ((i >= 255) && (i <= 510))
{
insn = emit_insn (gen_subsi3 (stack_pointer_rtx,
stack_pointer_rtx,
@@ -302,13 +302,23 @@ moxie_expand_prologue (void)
RTX_FRAME_RELATED_P (insn) = 1;
i -= 255;
}
- if (i > 0)
+ if (i <= 255)
{
insn = emit_insn (gen_subsi3 (stack_pointer_rtx,
stack_pointer_rtx,
GEN_INT (i)));
RTX_FRAME_RELATED_P (insn) = 1;
}
+ else
+ {
+ rtx reg = gen_rtx_REG (SImode, MOXIE_R12);
+ insn = emit_move_insn (reg, GEN_INT (i));
+ RTX_FRAME_RELATED_P (insn) = 1;
+ insn = emit_insn (gen_subsi3 (stack_pointer_rtx,
+ stack_pointer_rtx,
+ reg));
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
}
}
@@ -320,7 +330,7 @@ moxie_expand_epilogue (void)
if (cfun->machine->callee_saved_reg_size != 0)
{
- reg = gen_rtx_REG (Pmode, MOXIE_R5);
+ reg = gen_rtx_REG (Pmode, MOXIE_R12);
if (cfun->machine->callee_saved_reg_size <= 255)
{
emit_move_insn (reg, hard_frame_pointer_rtx);
diff --git a/gcc/config/openbsd-stdint.h b/gcc/config/openbsd-stdint.h
index ab1f9cfffc6..a6da1da191f 100644
--- a/gcc/config/openbsd-stdint.h
+++ b/gcc/config/openbsd-stdint.h
@@ -26,6 +26,9 @@
#define UINT_FAST16_TYPE "unsigned int"
#define UINT_FAST32_TYPE "unsigned int"
#define UINT_FAST64_TYPE "long long unsigned int"
+
+#define INTMAX_TYPE "long long int"
+#define UINTMAX_TYPE "long long unsigned int"
#define INTPTR_TYPE "long int"
#define UINTPTR_TYPE "long unsigned int"
diff --git a/gcc/config/pa/pa.c b/gcc/config/pa/pa.c
index bb54c6e1bfe..6c8f8278d58 100644
--- a/gcc/config/pa/pa.c
+++ b/gcc/config/pa/pa.c
@@ -1422,6 +1422,8 @@ static bool
hppa_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
int *total, bool speed ATTRIBUTE_UNUSED)
{
+ int factor;
+
switch (code)
{
case CONST_INT:
@@ -1453,11 +1455,20 @@ hppa_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
case MULT:
if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
- *total = COSTS_N_INSNS (3);
- else if (TARGET_PA_11 && !TARGET_DISABLE_FPREGS && !TARGET_SOFT_FLOAT)
- *total = COSTS_N_INSNS (8);
+ {
+ *total = COSTS_N_INSNS (3);
+ return true;
+ }
+
+ /* A mode size N times larger than SImode needs O(N*N) more insns. */
+ factor = GET_MODE_SIZE (GET_MODE (x)) / 4;
+ if (factor == 0)
+ factor = 1;
+
+ if (TARGET_PA_11 && !TARGET_DISABLE_FPREGS && !TARGET_SOFT_FLOAT)
+ *total = factor * factor * COSTS_N_INSNS (8);
else
- *total = COSTS_N_INSNS (20);
+ *total = factor * factor * COSTS_N_INSNS (20);
return true;
case DIV:
@@ -1471,15 +1482,28 @@ hppa_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
case UDIV:
case MOD:
case UMOD:
- *total = COSTS_N_INSNS (60);
+ /* A mode size N times larger than SImode needs O(N*N) more insns. */
+ factor = GET_MODE_SIZE (GET_MODE (x)) / 4;
+ if (factor == 0)
+ factor = 1;
+
+ *total = factor * factor * COSTS_N_INSNS (60);
return true;
case PLUS: /* this includes shNadd insns */
case MINUS:
if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
- *total = COSTS_N_INSNS (3);
- else
- *total = COSTS_N_INSNS (1);
+ {
+ *total = COSTS_N_INSNS (3);
+ return true;
+ }
+
+ /* A size N times larger than UNITS_PER_WORD needs N times as
+ many insns, taking N times as long. */
+ factor = GET_MODE_SIZE (GET_MODE (x)) / UNITS_PER_WORD;
+ if (factor == 0)
+ factor = 1;
+ *total = factor * COSTS_N_INSNS (1);
return true;
case ASHIFT:
@@ -9924,11 +9948,9 @@ static GTY(()) VEC(extern_symbol,gc) *extern_symbols;
void
pa_hpux_asm_output_external (FILE *file, tree decl, const char *name)
{
- extern_symbol * p = VEC_safe_push (extern_symbol, gc, extern_symbols, NULL);
-
gcc_assert (file == asm_out_file);
- p->decl = decl;
- p->name = name;
+ extern_symbol p = {decl, name};
+ VEC_safe_push (extern_symbol, gc, extern_symbols, p);
}
/* Output text required at the end of an assembler file.
diff --git a/gcc/config/rs6000/aix43.h b/gcc/config/rs6000/aix43.h
index c1a69dc3a5a..8ff211107ec 100644
--- a/gcc/config/rs6000/aix43.h
+++ b/gcc/config/rs6000/aix43.h
@@ -142,12 +142,6 @@ do { \
#undef PTRDIFF_TYPE
#define PTRDIFF_TYPE "long int"
-/* AIX 4 uses PowerPC nop (ori 0,0,0) instruction as call glue for PowerPC
- and "cror 31,31,31" for POWER architecture. */
-
-#undef RS6000_CALL_GLUE
-#define RS6000_CALL_GLUE "nop"
-
/* AIX 4.2 and above provides initialization and finalization function
support from linker command line. */
#undef HAS_INIT_SECTION
diff --git a/gcc/config/rs6000/aix51.h b/gcc/config/rs6000/aix51.h
index 6ea30c1fba1..d62d3fb5d90 100644
--- a/gcc/config/rs6000/aix51.h
+++ b/gcc/config/rs6000/aix51.h
@@ -146,12 +146,6 @@ do { \
#undef WCHAR_TYPE_SIZE
#define WCHAR_TYPE_SIZE (!TARGET_64BIT ? 16 : 32)
-/* AIX V5 uses PowerPC nop (ori 0,0,0) instruction as call glue for PowerPC
- and "cror 31,31,31" for POWER architecture. */
-
-#undef RS6000_CALL_GLUE
-#define RS6000_CALL_GLUE "nop"
-
/* AIX 4.2 and above provides initialization and finalization function
support from linker command line. */
#undef HAS_INIT_SECTION
diff --git a/gcc/config/rs6000/aix52.h b/gcc/config/rs6000/aix52.h
index 1c9e77f9626..02b966d1f5b 100644
--- a/gcc/config/rs6000/aix52.h
+++ b/gcc/config/rs6000/aix52.h
@@ -158,12 +158,6 @@ do { \
#undef WCHAR_TYPE_SIZE
#define WCHAR_TYPE_SIZE (!TARGET_64BIT ? 16 : 32)
-/* AIX V5 uses PowerPC nop (ori 0,0,0) instruction as call glue for PowerPC
- and "cror 31,31,31" for POWER architecture. */
-
-#undef RS6000_CALL_GLUE
-#define RS6000_CALL_GLUE "nop"
-
/* AIX 4.2 and above provides initialization and finalization function
support from linker command line. */
#undef HAS_INIT_SECTION
diff --git a/gcc/config/rs6000/aix53.h b/gcc/config/rs6000/aix53.h
index d1a99e9c035..870eb0618d4 100644
--- a/gcc/config/rs6000/aix53.h
+++ b/gcc/config/rs6000/aix53.h
@@ -156,12 +156,6 @@ do { \
#undef WCHAR_TYPE_SIZE
#define WCHAR_TYPE_SIZE (!TARGET_64BIT ? 16 : 32)
-/* AIX V5 uses PowerPC nop (ori 0,0,0) instruction as call glue for PowerPC
- and "cror 31,31,31" for POWER architecture. */
-
-#undef RS6000_CALL_GLUE
-#define RS6000_CALL_GLUE "nop"
-
/* AIX 4.2 and above provides initialization and finalization function
support from linker command line. */
#undef HAS_INIT_SECTION
diff --git a/gcc/config/rs6000/aix61.h b/gcc/config/rs6000/aix61.h
index 663777c47cf..92168990a01 100644
--- a/gcc/config/rs6000/aix61.h
+++ b/gcc/config/rs6000/aix61.h
@@ -157,12 +157,6 @@ do { \
#undef WCHAR_TYPE_SIZE
#define WCHAR_TYPE_SIZE (!TARGET_64BIT ? 16 : 32)
-/* AIX V5 uses PowerPC nop (ori 0,0,0) instruction as call glue for PowerPC
- and "cror 31,31,31" for POWER architecture. */
-
-#undef RS6000_CALL_GLUE
-#define RS6000_CALL_GLUE "nop"
-
/* AIX 4.2 and above provides initialization and finalization function
support from linker command line. */
#undef HAS_INIT_SECTION
diff --git a/gcc/config/rs6000/freebsd64.h b/gcc/config/rs6000/freebsd64.h
index 909f703a07e..d434ace28f3 100644
--- a/gcc/config/rs6000/freebsd64.h
+++ b/gcc/config/rs6000/freebsd64.h
@@ -316,10 +316,6 @@ extern int dot_symbols;
#undef ASM_APP_OFF
#define ASM_APP_OFF "#NO_APP\n"
-/* PowerPC no-op instruction. */
-#undef RS6000_CALL_GLUE
-#define RS6000_CALL_GLUE (TARGET_64BIT ? "nop" : "cror 31,31,31")
-
/* Function profiling bits */
#undef RS6000_MCOUNT
#define RS6000_MCOUNT "_mcount"
diff --git a/gcc/config/rs6000/linux64.h b/gcc/config/rs6000/linux64.h
index 7c516eb56a8..8c32301a71d 100644
--- a/gcc/config/rs6000/linux64.h
+++ b/gcc/config/rs6000/linux64.h
@@ -417,10 +417,6 @@ extern int dot_symbols;
#undef ASM_APP_OFF
#define ASM_APP_OFF "#NO_APP\n"
-/* PowerPC no-op instruction. */
-#undef RS6000_CALL_GLUE
-#define RS6000_CALL_GLUE (TARGET_64BIT ? "nop" : "cror 31,31,31")
-
#undef RS6000_MCOUNT
#define RS6000_MCOUNT "_mcount"
diff --git a/gcc/config/rs6000/rs6000-c.c b/gcc/config/rs6000/rs6000-c.c
index a2ef08e17ca..58101ab4bff 100644
--- a/gcc/config/rs6000/rs6000-c.c
+++ b/gcc/config/rs6000/rs6000-c.c
@@ -3582,11 +3582,8 @@ altivec_resolve_overloaded_builtin (location_t loc, tree fndecl,
vec = VEC_alloc (constructor_elt, gc, size);
for(i = 0; i < size; i++)
{
- constructor_elt *elt;
-
- elt = VEC_quick_push (constructor_elt, vec, NULL);
- elt->index = NULL_TREE;
- elt->value = arg;
+ constructor_elt elt = {NULL_TREE, arg};
+ VEC_quick_push (constructor_elt, vec, elt);
}
return build_constructor (type, vec);
}
diff --git a/gcc/config/rs6000/rs6000.c b/gcc/config/rs6000/rs6000.c
index 430125f719f..a5a3848e585 100644
--- a/gcc/config/rs6000/rs6000.c
+++ b/gcc/config/rs6000/rs6000.c
@@ -14628,12 +14628,6 @@ print_operand (FILE *file, rtx x, int code)
switch (code)
{
- case '.':
- /* Write out an instruction after the call which may be replaced
- with glue code by the loader. This depends on the AIX version. */
- asm_fprintf (file, RS6000_CALL_GLUE);
- return;
-
/* %a is output_address. */
case 'A':
@@ -24907,11 +24901,8 @@ static void
add_compiler_branch_island (tree label_name, tree function_name,
int line_number)
{
- branch_island *bi = VEC_safe_push (branch_island, gc, branch_islands, NULL);
-
- bi->function_name = function_name;
- bi->label_name = label_name;
- bi->line_number = line_number;
+ branch_island bi = {function_name, label_name, line_number};
+ VEC_safe_push (branch_island, gc, branch_islands, bi);
}
/* Generate far-jump branch islands for everything recorded in
@@ -25547,10 +25538,12 @@ rs6000_xcoff_asm_named_section (const char *name, unsigned int flags,
tree decl ATTRIBUTE_UNUSED)
{
int smclass;
- static const char * const suffix[3] = { "PR", "RO", "RW" };
+ static const char * const suffix[4] = { "PR", "RO", "RW", "TL" };
if (flags & SECTION_CODE)
smclass = 0;
+ else if (flags & SECTION_TLS)
+ smclass = 3;
else if (flags & SECTION_WRITE)
smclass = 2;
else
@@ -26071,10 +26064,10 @@ rs6000_debug_rtx_costs (rtx x, int code, int outer_code, int opno, int *total,
/* Debug form of ADDRESS_COST that is selected if -mdebug=cost. */
static int
-rs6000_debug_address_cost (rtx x, enum machine_mode mode ATTRIBUTE_UNUSED,
- addr_space_t as ATTRIBUTE_UNUSED, bool speed)
+rs6000_debug_address_cost (rtx x, enum machine_mode mode,
+ addr_space_t as, bool speed)
{
- int ret = TARGET_ADDRESS_COST (x, speed);
+ int ret = TARGET_ADDRESS_COST (x, mode, as, speed);
fprintf (stderr, "\nrs6000_address_cost, return = %d, speed = %s, x:\n",
ret, speed ? "true" : "false");
diff --git a/gcc/config/rs6000/rs6000.h b/gcc/config/rs6000/rs6000.h
index 4e33611baf4..b968802d9fb 100644
--- a/gcc/config/rs6000/rs6000.h
+++ b/gcc/config/rs6000/rs6000.h
@@ -2186,10 +2186,6 @@ extern char rs6000_reg_names[][8]; /* register names (0 vs. %r0). */
{"vs56", 101},{"vs57", 102},{"vs58", 103},{"vs59", 104}, \
{"vs60", 105},{"vs61", 106},{"vs62", 107},{"vs63", 108} }
-/* Text to write out after a CALL that may be replaced by glue code by
- the loader. This depends on the AIX version. */
-#define RS6000_CALL_GLUE "cror 31,31,31"
-
/* This is how to output an element of a case-vector that is relative. */
#define ASM_OUTPUT_ADDR_DIFF_ELT(FILE, BODY, VALUE, REL) \
diff --git a/gcc/config/rs6000/rs6000.md b/gcc/config/rs6000/rs6000.md
index d5ffd81b068..f2bc15f14b8 100644
--- a/gcc/config/rs6000/rs6000.md
+++ b/gcc/config/rs6000/rs6000.md
@@ -957,6 +957,20 @@
#"
[(set_attr "type" "compare")
(set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 2 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC (sign_extend:SI (match_operand:HI 1 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (sign_extend:SI (match_dup 1)))]
+ "reload_completed"
+ [(set (match_dup 0)
+ (sign_extend:SI (match_dup 1)))
+ (set (match_dup 2)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
;; IBM 405, 440, 464 and 476 half-word multiplication operations.
@@ -1489,20 +1503,6 @@
DONE;
})
-(define_split
- [(set (match_operand:CC 2 "cc_reg_not_micro_cr0_operand" "")
- (compare:CC (sign_extend:SI (match_operand:HI 1 "gpc_reg_operand" ""))
- (const_int 0)))
- (set (match_operand:SI 0 "gpc_reg_operand" "")
- (sign_extend:SI (match_dup 1)))]
- "reload_completed"
- [(set (match_dup 0)
- (sign_extend:SI (match_dup 1)))
- (set (match_dup 2)
- (compare:CC (match_dup 0)
- (const_int 0)))]
- "")
-
;; Fixed-point arithmetic insns.
(define_expand "add<mode>3"
@@ -9814,9 +9814,10 @@
"HAVE_AS_TLS && DEFAULT_ABI == ABI_AIX"
{
if (TARGET_CMODEL != CMODEL_SMALL)
- return "addis %0,%1,%2@got@tlsgd@ha\;addi %0,%0,%2@got@tlsgd@l\;bl %z3\;%.";
+ return "addis %0,%1,%2@got@tlsgd@ha\;addi %0,%0,%2@got@tlsgd@l\;"
+ "bl %z3\;nop";
else
- return "addi %0,%1,%2@got@tlsgd\;bl %z3\;%.";
+ return "addi %0,%1,%2@got@tlsgd\;bl %z3\;nop";
}
"&& TARGET_TLS_MARKERS"
[(set (match_dup 0)
@@ -9919,7 +9920,7 @@
UNSPEC_TLSGD)
(clobber (reg:SI LR_REGNO))]
"HAVE_AS_TLS && DEFAULT_ABI == ABI_AIX && TARGET_TLS_MARKERS"
- "bl %z1(%3@tlsgd)\;%."
+ "bl %z1(%3@tlsgd)\;nop"
[(set_attr "type" "branch")
(set_attr "length" "8")])
@@ -9953,9 +9954,10 @@
"HAVE_AS_TLS && DEFAULT_ABI == ABI_AIX"
{
if (TARGET_CMODEL != CMODEL_SMALL)
- return "addis %0,%1,%&@got@tlsld@ha\;addi %0,%0,%&@got@tlsld@l\;bl %z2\;%.";
+ return "addis %0,%1,%&@got@tlsld@ha\;addi %0,%0,%&@got@tlsld@l\;"
+ "bl %z2\;nop";
else
- return "addi %0,%1,%&@got@tlsld\;bl %z2\;%.";
+ return "addi %0,%1,%&@got@tlsld\;bl %z2\;nop";
}
"&& TARGET_TLS_MARKERS"
[(set (match_dup 0)
@@ -10051,7 +10053,7 @@
(unspec:TLSmode [(const_int 0)] UNSPEC_TLSLD)
(clobber (reg:SI LR_REGNO))]
"HAVE_AS_TLS && DEFAULT_ABI == ABI_AIX && TARGET_TLS_MARKERS"
- "bl %z1(%&@tlsld)\;%."
+ "bl %z1(%&@tlsld)\;nop"
[(set_attr "type" "branch")
(set_attr "length" "8")])
@@ -10873,7 +10875,7 @@
"TARGET_32BIT
&& DEFAULT_ABI == ABI_AIX
&& (INTVAL (operands[2]) & CALL_LONG) == 0"
- "bl %z0\;%."
+ "bl %z0\;nop"
[(set_attr "type" "branch")
(set_attr "length" "8")])
@@ -10885,7 +10887,7 @@
"TARGET_64BIT
&& DEFAULT_ABI == ABI_AIX
&& (INTVAL (operands[2]) & CALL_LONG) == 0"
- "bl %z0\;%."
+ "bl %z0\;nop"
[(set_attr "type" "branch")
(set_attr "length" "8")])
@@ -10898,7 +10900,7 @@
"TARGET_32BIT
&& DEFAULT_ABI == ABI_AIX
&& (INTVAL (operands[3]) & CALL_LONG) == 0"
- "bl %z1\;%."
+ "bl %z1\;nop"
[(set_attr "type" "branch")
(set_attr "length" "8")])
@@ -10911,7 +10913,7 @@
"TARGET_64BIT
&& DEFAULT_ABI == ABI_AIX
&& (INTVAL (operands[3]) & CALL_LONG) == 0"
- "bl %z1\;%."
+ "bl %z1\;nop"
[(set_attr "type" "branch")
(set_attr "length" "8")])
diff --git a/gcc/config/score/score.c b/gcc/config/score/score.c
index 2eeeab9f872..3b550861515 100644
--- a/gcc/config/score/score.c
+++ b/gcc/config/score/score.c
@@ -120,9 +120,6 @@ struct extern_list *extern_head = 0;
#undef TARGET_OPTION_OVERRIDE
#define TARGET_OPTION_OVERRIDE score_option_override
-#undef TARGET_LEGITIMIZE_ADDRESS
-#define TARGET_LEGITIMIZE_ADDRESS score_legitimize_address
-
#undef TARGET_SCHED_ISSUE_RATE
#define TARGET_SCHED_ISSUE_RATE score_issue_rate
@@ -541,30 +538,6 @@ score_split_symbol (rtx temp, rtx addr)
return gen_rtx_LO_SUM (Pmode, high, addr);
}
-/* This function is used to implement LEGITIMIZE_ADDRESS. If X can
- be legitimized in a way that the generic machinery might not expect,
- return the new address. */
-static rtx
-score_legitimize_address (rtx x)
-{
- enum score_symbol_type symbol_type;
-
- if (score_symbolic_constant_p (x, &symbol_type)
- && symbol_type == SYMBOL_GENERAL)
- return score_split_symbol (0, x);
-
- if (GET_CODE (x) == PLUS
- && GET_CODE (XEXP (x, 1)) == CONST_INT)
- {
- rtx reg = XEXP (x, 0);
- if (!score_valid_base_register_p (reg, 0))
- reg = copy_to_mode_reg (Pmode, reg);
- return score_add_offset (reg, INTVAL (XEXP (x, 1)));
- }
-
- return x;
-}
-
/* Fill INFO with information about a single argument. CUM is the
cumulative state for earlier arguments. MODE is the mode of this
argument and TYPE is its type (if known). NAMED is true if this
diff --git a/gcc/config/sh/newlib.h b/gcc/config/sh/newlib.h
index 13099c1f8d9..3b3ec1651f1 100644
--- a/gcc/config/sh/newlib.h
+++ b/gcc/config/sh/newlib.h
@@ -23,3 +23,7 @@ along with GCC; see the file COPYING3. If not see
#undef LIB_SPEC
#define LIB_SPEC "-lc -lgloss"
+
+#undef NO_IMPLICIT_EXTERN_C
+#define NO_IMPLICIT_EXTERN_C 1
+
diff --git a/gcc/config/sh/predicates.md b/gcc/config/sh/predicates.md
index 92a7b689c84..3936ab2953f 100644
--- a/gcc/config/sh/predicates.md
+++ b/gcc/config/sh/predicates.md
@@ -791,9 +791,8 @@
/* Allow T_REG as shift count for dynamic shifts, although it is not
really possible. It will then be copied to a general purpose reg. */
if (! TARGET_SHMEDIA)
- return const_int_operand (op, mode)
- || (TARGET_DYNSHIFT && (arith_reg_operand (op, mode)
- || t_reg_operand (op, mode)));
+ return const_int_operand (op, mode) || arith_reg_operand (op, mode)
+ || (TARGET_DYNSHIFT && t_reg_operand (op, mode));
return (CONSTANT_P (op)
? (CONST_INT_P (op)
diff --git a/gcc/config/sh/sh.c b/gcc/config/sh/sh.c
index 10dad62b8f7..0abf28facc2 100644
--- a/gcc/config/sh/sh.c
+++ b/gcc/config/sh/sh.c
@@ -2871,35 +2871,35 @@ struct ashl_lshr_sequence
static const struct ashl_lshr_sequence ashl_lshr_seq[32] =
{
- { 0, { 0 }, 0 },
+ { 0, { 0 }, 0 }, // 0
{ 1, { 1 }, LSHR_CLOBBERS_T },
{ 1, { 2 }, 0 },
{ 2, { 2, 1 }, LSHR_CLOBBERS_T },
- { 2, { 2, 2 }, 0 },
+ { 2, { 2, 2 }, 0 }, // 4
{ 3, { 2, 1, 2 }, LSHR_CLOBBERS_T },
{ 3, { 2, 2, 2 }, 0 },
{ 4, { 2, 2, 1, 2 }, LSHR_CLOBBERS_T },
- { 1, { 8 }, 0 },
+ { 1, { 8 }, 0 }, // 8
{ 2, { 8, 1 }, LSHR_CLOBBERS_T },
{ 2, { 8, 2 }, 0 },
{ 3, { 8, 1, 2 }, LSHR_CLOBBERS_T },
- { 3, { 8, 2, 2 }, 0 },
+ { 3, { 8, 2, 2 }, 0 }, // 12
{ 4, { 8, 2, 1, 2 }, LSHR_CLOBBERS_T },
{ 3, { 8, -2, 8 }, 0 },
{ 3, { 8, -1, 8 }, ASHL_CLOBBERS_T },
- { 1, { 16 }, 0 },
+ { 1, { 16 }, 0 }, // 16
{ 2, { 16, 1 }, LSHR_CLOBBERS_T },
{ 2, { 16, 2 }, 0 },
{ 3, { 16, 1, 2 }, LSHR_CLOBBERS_T },
- { 3, { 16, 2, 2 }, 0 },
+ { 3, { 16, 2, 2 }, 0 }, // 20
{ 4, { 16, 2, 1, 2 }, LSHR_CLOBBERS_T },
{ 3, { 16, -2, 8 }, 0 },
{ 3, { 16, -1, 8 }, ASHL_CLOBBERS_T },
- { 2, { 16, 8 }, 0 },
+ { 2, { 16, 8 }, 0 }, // 24
{ 3, { 16, 1, 8 }, LSHR_CLOBBERS_T },
{ 3, { 16, 8, 2 }, 0 },
{ 4, { 16, 8, 1, 2 }, LSHR_CLOBBERS_T },
- { 4, { 16, 8, 2, 2 }, 0 },
+ { 4, { 16, 8, 2, 2 }, 0 }, // 28
{ 4, { 16, -1, -2, 16 }, ASHL_CLOBBERS_T },
{ 3, { 16, -2, 16 }, 0 },
@@ -2915,35 +2915,35 @@ static const struct ashl_lshr_sequence ashl_lshr_seq[32] =
kind of sign or zero extension. */
static const struct ashl_lshr_sequence ext_ashl_lshr_seq[32] =
{
- { 0, { 0 }, 0 },
+ { 0, { 0 }, 0 }, // 0
{ 1, { 1 }, LSHR_CLOBBERS_T },
{ 1, { 2 }, 0 },
{ 2, { 2, 1 }, LSHR_CLOBBERS_T },
- { 2, { 2, 2 }, 0 },
+ { 2, { 2, 2 }, 0 }, // 4
{ 3, { 2, 1, 2 }, LSHR_CLOBBERS_T },
{ 2, { 8, -2 }, 0 },
{ 2, { 8, -1 }, ASHL_CLOBBERS_T },
- { 1, { 8 }, 0 },
+ { 1, { 8 }, 0 }, // 8
{ 2, { 8, 1 }, LSHR_CLOBBERS_T },
{ 2, { 8, 2 }, 0 },
{ 3, { 8, 1, 2 }, LSHR_CLOBBERS_T },
- { 3, { 8, 2, 2 }, 0 },
+ { 3, { 8, 2, 2 }, 0 }, // 12
{ 3, { 16, -2, -1 }, ASHL_CLOBBERS_T },
{ 2, { 16, -2 }, 0 },
{ 2, { 16, -1 }, ASHL_CLOBBERS_T },
- { 1, { 16 }, 0 },
+ { 1, { 16 }, 0 }, // 16
{ 2, { 16, 1 }, LSHR_CLOBBERS_T },
{ 2, { 16, 2 }, 0 },
{ 3, { 16, 1, 2 }, LSHR_CLOBBERS_T },
- { 3, { 16, 2, 2 }, 0 },
+ { 3, { 16, 2, 2 }, 0 }, // 20
{ 4, { 16, 2, 1, 2 }, LSHR_CLOBBERS_T },
{ 3, { 16, -2, 8 }, 0 },
{ 3, { 16, -1, 8 }, ASHL_CLOBBERS_T },
- { 2, { 16, 8 }, 0 },
+ { 2, { 16, 8 }, 0 }, // 24
{ 3, { 16, 1, 8 }, LSHR_CLOBBERS_T },
{ 3, { 16, 8, 2 }, 0 },
{ 4, { 16, 8, 1, 2 }, LSHR_CLOBBERS_T },
- { 4, { 16, 8, 2, 2 }, 0 },
+ { 4, { 16, 8, 2, 2 }, 0 }, // 28
{ 4, { 16, -1, -2, 16 }, ASHL_CLOBBERS_T },
{ 3, { 16, -2, 16 }, 0 },
{ 3, { 16, -1, 16 }, ASHL_CLOBBERS_T }
diff --git a/gcc/config/sh/sh.h b/gcc/config/sh/sh.h
index b36287276aa..d72379022bb 100644
--- a/gcc/config/sh/sh.h
+++ b/gcc/config/sh/sh.h
@@ -1932,19 +1932,27 @@ struct sh_args {
like shad and shld. */
#define TARGET_DYNSHIFT (TARGET_SH3 || TARGET_SH2A)
-#define SH_DYNAMIC_SHIFT_COST \
- (TARGET_HARD_SH4 ? 1 : TARGET_DYNSHIFT ? (optimize_size ? 1 : 2) : 20)
-
-/* Immediate shift counts are truncated by the output routines (or was it
- the assembler?). Shift counts in a register are truncated by SH. Note
- that the native compiler puts too large (> 32) immediate shift counts
- into a register and shifts by the register, letting the SH decide what
- to do instead of doing that itself. */
-/* ??? The library routines in lib1funcs.S truncate the shift count.
- However, the SH3 has hardware shifts that do not truncate exactly as gcc
- expects - the sign bit is significant - so it appears that we need to
- leave this zero for correct SH3 code. */
-#define SHIFT_COUNT_TRUNCATED (! TARGET_SH3 && ! TARGET_SH2A)
+/* The cost of using the dynamic shift insns (shad, shld) are the same
+ if they are available. If they are not available a library function will
+ be emitted instead, which is more expensive. */
+#define SH_DYNAMIC_SHIFT_COST (TARGET_DYNSHIFT ? 1 : 20)
+
+/* Defining SHIFT_COUNT_TRUNCATED tells the combine pass that code like
+ (X << (Y % 32)) for register X, Y is equivalent to (X << Y).
+ This is not generally true when hardware dynamic shifts (shad, shld) are
+ used, because they check the sign bit _before_ the modulo op. The sign
+ bit determines whether it is a left shift or a right shift:
+ if (Y < 0)
+ return X << (Y & 31);
+ else
+ return X >> (-Y) & 31);
+
+ The dynamic shift library routines in lib1funcs.S do not use the sign bit
+ like the hardware dynamic shifts and truncate the shift count to 31.
+ We define SHIFT_COUNT_TRUNCATED to 0 and express the implied shift count
+ truncation in the library function call patterns, as this gives slightly
+ more compact code. */
+#define SHIFT_COUNT_TRUNCATED (0)
/* CANONICALIZE_COMPARISON macro for the combine pass. */
#define CANONICALIZE_COMPARISON(CODE, OP0, OP1) \
diff --git a/gcc/config/sh/sh.md b/gcc/config/sh/sh.md
index 8b44fbda496..c06a51011b7 100644
--- a/gcc/config/sh/sh.md
+++ b/gcc/config/sh/sh.md
@@ -4023,6 +4023,17 @@ label:
operands[2]));
DONE;
}
+
+ /* Expand a library call for the dynamic shift. */
+ if (!CONST_INT_P (operands[2]) && !TARGET_DYNSHIFT)
+ {
+ emit_move_insn (gen_rtx_REG (SImode, R4_REG), operands[1]);
+ rtx funcaddr = gen_reg_rtx (Pmode);
+ function_symbol (funcaddr, "__ashlsi3_r0", SFUNC_STATIC);
+ emit_insn (gen_ashlsi3_d_call (operands[0], operands[2], funcaddr));
+
+ DONE;
+ }
})
(define_insn "ashlsi3_k"
@@ -4067,6 +4078,23 @@ label:
}
[(set_attr "type" "dyn_shift")])
+;; If dynamic shifts are not available use a library function.
+;; By specifying the pattern we reduce the number of call clobbered regs.
+;; In order to make combine understand the truncation of the shift amount
+;; operand we have to allow it to use pseudo regs for the shift operands.
+(define_insn "ashlsi3_d_call"
+ [(set (match_operand:SI 0 "arith_reg_dest" "=z")
+ (ashift:SI (reg:SI R4_REG)
+ (and:SI (match_operand:SI 1 "arith_reg_operand" "z")
+ (const_int 31))))
+ (use (match_operand:SI 2 "arith_reg_operand" "r"))
+ (clobber (reg:SI T_REG))
+ (clobber (reg:SI PR_REG))]
+ "TARGET_SH1 && !TARGET_DYNSHIFT"
+ "jsr @%2%#"
+ [(set_attr "type" "sfunc")
+ (set_attr "needs_delay_slot" "yes")])
+
(define_insn_and_split "ashlsi3_n"
[(set (match_operand:SI 0 "arith_reg_dest" "=r")
(ashift:SI (match_operand:SI 1 "arith_reg_operand" "0")
@@ -4512,6 +4540,16 @@ label:
operands[2]));
DONE;
}
+
+ /* Expand a library call for the dynamic shift. */
+ if (!CONST_INT_P (operands[2]) && !TARGET_DYNSHIFT)
+ {
+ emit_move_insn (gen_rtx_REG (SImode, R4_REG), operands[1]);
+ rtx funcaddr = gen_reg_rtx (Pmode);
+ function_symbol (funcaddr, "__lshrsi3_r0", SFUNC_STATIC);
+ emit_insn (gen_lshrsi3_d_call (operands[0], operands[2], funcaddr));
+ DONE;
+ }
})
(define_insn "lshrsi3_k"
@@ -4556,6 +4594,23 @@ label:
}
[(set_attr "type" "dyn_shift")])
+;; If dynamic shifts are not available use a library function.
+;; By specifying the pattern we reduce the number of call clobbered regs.
+;; In order to make combine understand the truncation of the shift amount
+;; operand we have to allow it to use pseudo regs for the shift operands.
+(define_insn "lshrsi3_d_call"
+ [(set (match_operand:SI 0 "arith_reg_dest" "=z")
+ (lshiftrt:SI (reg:SI R4_REG)
+ (and:SI (match_operand:SI 1 "arith_reg_operand" "z")
+ (const_int 31))))
+ (use (match_operand:SI 2 "arith_reg_operand" "r"))
+ (clobber (reg:SI T_REG))
+ (clobber (reg:SI PR_REG))]
+ "TARGET_SH1 && !TARGET_DYNSHIFT"
+ "jsr @%2%#"
+ [(set_attr "type" "sfunc")
+ (set_attr "needs_delay_slot" "yes")])
+
(define_insn_and_split "lshrsi3_n"
[(set (match_operand:SI 0 "arith_reg_dest" "=r")
(lshiftrt:SI (match_operand:SI 1 "arith_reg_operand" "0")
diff --git a/gcc/config/v850/predicates.md b/gcc/config/v850/predicates.md
index 129f00d595f..404b89e25da 100644
--- a/gcc/config/v850/predicates.md
+++ b/gcc/config/v850/predicates.md
@@ -497,5 +497,3 @@
return op == CONST0_RTX(mode);
})
-
-
diff --git a/gcc/config/v850/v850.c b/gcc/config/v850/v850.c
index 95cdcb47702..fc06675c6f5 100644
--- a/gcc/config/v850/v850.c
+++ b/gcc/config/v850/v850.c
@@ -1,6 +1,6 @@
/* Subroutines for insn-output.c for NEC V850 series
Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005,
- 2006, 2007, 2008, 2009, 2010, 2011 Free Software Foundation, Inc.
+ 2006, 2007, 2008, 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
Contributed by Jeff Law (law@cygnus.com).
This file is part of GCC.
@@ -375,13 +375,13 @@ v850_print_operand (FILE * file, rtx x, int code)
switch (code)
{
case 'c':
- /* We use 'c' operands with symbols for .vtinherit */
+ /* We use 'c' operands with symbols for .vtinherit. */
if (GET_CODE (x) == SYMBOL_REF)
{
output_addr_const(file, x);
break;
}
- /* fall through */
+ /* Fall through. */
case 'b':
case 'B':
case 'C':
@@ -428,7 +428,7 @@ v850_print_operand (FILE * file, rtx x, int code)
gcc_unreachable ();
}
break;
- case 'F': /* high word of CONST_DOUBLE */
+ case 'F': /* High word of CONST_DOUBLE. */
switch (GET_CODE (x))
{
case CONST_INT:
@@ -444,7 +444,7 @@ v850_print_operand (FILE * file, rtx x, int code)
gcc_unreachable ();
}
break;
- case 'G': /* low word of CONST_DOUBLE */
+ case 'G': /* Low word of CONST_DOUBLE. */
switch (GET_CODE (x))
{
case CONST_INT:
@@ -537,7 +537,7 @@ v850_print_operand (FILE * file, rtx x, int code)
break;
}
- case 'W': /* print the instruction suffix */
+ case 'W': /* Print the instruction suffix. */
switch (GET_MODE (x))
{
default:
@@ -549,11 +549,11 @@ v850_print_operand (FILE * file, rtx x, int code)
case SFmode: fputs (".w", file); break;
}
break;
- case '.': /* register r0 */
+ case '.': /* Register r0. */
fputs (reg_names[0], file);
break;
- case 'z': /* reg or zero */
- if (GET_CODE (x) == REG)
+ case 'z': /* Reg or zero. */
+ if (REG_P (x))
fputs (reg_names[REGNO (x)], file);
else if ((GET_MODE(x) == SImode
|| GET_MODE(x) == DFmode
@@ -1448,13 +1448,13 @@ compute_register_save_size (long * p_reg_saved)
int call_p = df_regs_ever_live_p (LINK_POINTER_REGNUM);
long reg_saved = 0;
- /* Count the return pointer if we need to save it. */
- if (crtl->profile && !call_p)
+ /* Always save the link pointer - we cannot rely upon df_regs_ever_live_p. */
+ if (!call_p)
{
df_set_regs_ever_live (LINK_POINTER_REGNUM, true);
call_p = 1;
}
-
+
/* Count space for the register saves. */
if (interrupt_handler)
{
@@ -1589,6 +1589,27 @@ use_prolog_function (int num_save, int frame_size)
return ((save_func_len + restore_func_len) < (save_normal_len + restore_normal_len));
}
+static void
+increment_stack (unsigned int amount)
+{
+ rtx inc;
+
+ if (amount == 0)
+ return;
+
+ inc = GEN_INT (amount);
+
+ if (! CONST_OK_FOR_K (amount))
+ {
+ rtx reg = gen_rtx_REG (Pmode, 12);
+
+ emit_move_insn (reg, inc);
+ inc = reg;
+ }
+
+ emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, inc));
+}
+
void
expand_prologue (void)
{
@@ -1605,6 +1626,9 @@ expand_prologue (void)
actual_fsize = compute_frame_size (size, &reg_saved);
+ if (flag_stack_usage_info)
+ current_function_static_stack_size = actual_fsize;
+
/* Save/setup global registers for interrupt functions right now. */
if (interrupt_handler)
{
@@ -1710,9 +1734,7 @@ expand_prologue (void)
offset = init_stack_alloc - 4;
if (init_stack_alloc)
- emit_insn (gen_addsi3 (stack_pointer_rtx,
- stack_pointer_rtx,
- GEN_INT (- (signed) init_stack_alloc)));
+ increment_stack (- (signed) init_stack_alloc);
/* Save the return pointer first. */
if (num_save > 0 && REGNO (save_regs[num_save-1]) == LINK_POINTER_REGNUM)
@@ -1743,16 +1765,8 @@ expand_prologue (void)
if (actual_fsize > init_stack_alloc)
{
int diff = actual_fsize - init_stack_alloc;
- if (CONST_OK_FOR_K (-diff))
- emit_insn (gen_addsi3 (stack_pointer_rtx,
- stack_pointer_rtx,
- GEN_INT (-diff)));
- else
- {
- rtx reg = gen_rtx_REG (Pmode, 12);
- emit_move_insn (reg, GEN_INT (-diff));
- emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, reg));
- }
+
+ increment_stack (- diff);
}
/* If we need a frame pointer, set it up now. */
@@ -1837,25 +1851,10 @@ expand_epilogue (void)
rtx insn;
actual_fsize -= alloc_stack;
- if (actual_fsize)
- {
- if (CONST_OK_FOR_K (actual_fsize))
- emit_insn (gen_addsi3 (stack_pointer_rtx,
- stack_pointer_rtx,
- GEN_INT (actual_fsize)));
- else
- {
- rtx reg = gen_rtx_REG (Pmode, 12);
- emit_move_insn (reg, GEN_INT (actual_fsize));
- emit_insn (gen_addsi3 (stack_pointer_rtx,
- stack_pointer_rtx,
- reg));
- }
- }
+ increment_stack (actual_fsize);
insn = emit_jump_insn (restore_all);
INSN_CODE (insn) = code;
-
}
else
restore_all = NULL_RTX;
@@ -1878,24 +1877,7 @@ expand_epilogue (void)
/* Deallocate the rest of the stack if it is > 32K. */
if ((unsigned int) actual_fsize > init_stack_free)
- {
- int diff;
-
- diff = actual_fsize - init_stack_free;
-
- if (CONST_OK_FOR_K (diff))
- emit_insn (gen_addsi3 (stack_pointer_rtx,
- stack_pointer_rtx,
- GEN_INT (diff)));
- else
- {
- rtx reg = gen_rtx_REG (Pmode, 12);
- emit_move_insn (reg, GEN_INT (diff));
- emit_insn (gen_addsi3 (stack_pointer_rtx,
- stack_pointer_rtx,
- reg));
- }
- }
+ increment_stack (actual_fsize - init_stack_free);
/* Special case interrupt functions that save all registers
for a call. */
@@ -1936,10 +1918,7 @@ expand_epilogue (void)
}
/* Cut back the remainder of the stack. */
- if (init_stack_free)
- emit_insn (gen_addsi3 (stack_pointer_rtx,
- stack_pointer_rtx,
- GEN_INT (init_stack_free)));
+ increment_stack (init_stack_free);
}
/* And return or use reti for interrupt handlers. */
@@ -3088,6 +3067,15 @@ static const struct attribute_spec v850_attribute_table[] =
{ NULL, 0, 0, false, false, false, NULL, false }
};
+static enum unwind_info_type
+v850_debug_unwind_info (void)
+{
+ return UI_NONE;
+}
+
+#undef TARGET_DEBUG_UNWIND_INFO
+#define TARGET_DEBUG_UNWIND_INFO v850_debug_unwind_info
+
/* Initialize the GCC target structure. */
#undef TARGET_MEMORY_MOVE_COST
diff --git a/gcc/config/v850/v850.h b/gcc/config/v850/v850.h
index 10ddd7b13a6..298c03acba2 100644
--- a/gcc/config/v850/v850.h
+++ b/gcc/config/v850/v850.h
@@ -786,8 +786,14 @@ typedef enum
#define DEFAULT_GDB_EXTENSIONS 1
/* Use stabs debugging info by default. */
-#undef PREFERRED_DEBUGGING_TYPE
-#define PREFERRED_DEBUGGING_TYPE DBX_DEBUG
+#undef PREFERRED_DEBUGGING_TYPE
+#define PREFERRED_DEBUGGING_TYPE DBX_DEBUG
+#define DBX_DEBUGGING_INFO 1
+
+#ifndef ASM_GENERATE_INTERNAL_LABEL
+#define ASM_GENERATE_INTERNAL_LABEL(STRING, PREFIX, NUM) \
+ sprintf (STRING, "*.%s%u", PREFIX, (unsigned int)(NUM))
+#endif
/* Specify the machine mode that this machine uses
for the index in the tablejump instruction. */
diff --git a/gcc/config/v850/v850.md b/gcc/config/v850/v850.md
index f479ff6322e..ab6b8667501 100644
--- a/gcc/config/v850/v850.md
+++ b/gcc/config/v850/v850.md
@@ -2689,5 +2689,3 @@
"jarl __restore_all_interrupt,r10"
[(set_attr "length" "4")
(set_attr "cc" "clobber")])
-
-
diff --git a/gcc/configure b/gcc/configure
index 47106e4f8e9..5bb409a3b4d 100755
--- a/gcc/configure
+++ b/gcc/configure
@@ -24417,9 +24417,7 @@ if test "${gcc_cv_as_ix86_hle+set}" = set; then :
else
gcc_cv_as_ix86_hle=no
if test x$gcc_cv_as != x; then
- $as_echo '.code64
- lock xacquire cmpxchg %esi, (%rcx)
- ' > conftest.s
+ $as_echo 'lock xacquire cmpxchg %esi, (%ecx)' > conftest.s
if { ac_try='$gcc_cv_as $gcc_cv_as_flags -o conftest.o conftest.s >&5'
{ { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_try\""; } >&5
(eval $ac_try) 2>&5
diff --git a/gcc/configure.ac b/gcc/configure.ac
index 7042c9116ec..699c7904f7c 100644
--- a/gcc/configure.ac
+++ b/gcc/configure.ac
@@ -3581,9 +3581,7 @@ foo: nop
gcc_GAS_CHECK_FEATURE([hle prefixes],
gcc_cv_as_ix86_hle,,,
- [.code64
- lock xacquire cmpxchg %esi, (%rcx)
- ],,
+ [lock xacquire cmpxchg %esi, (%ecx)],,
[AC_DEFINE(HAVE_AS_IX86_HLE, 1,
[Define if your assembler supports HLE prefixes.])])
diff --git a/gcc/cp/ChangeLog b/gcc/cp/ChangeLog
index 41c01601b56..bd620469df1 100644
--- a/gcc/cp/ChangeLog
+++ b/gcc/cp/ChangeLog
@@ -1,3 +1,59 @@
+2012-09-10 Paolo Carlini <paolo.carlini@oracle.com>
+
+ PR c++/54541
+ PR c++/54542
+ * call.c (build_cxx_call): Add tsubst_flags_t parameter, use
+ require_complete_type_sfinae.
+ (build_op_delete_call, build_over_call): Adjust.
+ * typeck.c (build_x_compound_expr_from_vec): Add tsubst_flags_t
+ parameter.
+ (cp_build_function_call_vec): Adjust.
+ * init.c (build_new_1): Likewise.
+ * rtti.c (throw_bad_cast, throw_bad_typeid, build_dynamic_cast_1):
+ Likewise.
+ * optimize.c (build_delete_destructor_body): Likewise.
+ * cp-tree.h: Adjust declarations.
+
+ * call.c (convert_arg_to_ellipsis): Use require_complete_type_sfinae.
+
+2012-09-10 Jason Merrill <jason@redhat.com>
+
+ PR c++/54538
+ PR c++/53783
+ * pt.c (tsubst_copy_and_build) [LAMBDA_EXPR]: Go back to using RECUR
+ for LAMBDA_EXPR_EXTRA_SCOPE except for function scope.
+
+ PR c++/54506
+ * decl.c (move_signature_fn_p): Split out from move_fn_p.
+ * method.c (process_subob_fn): Use it.
+ * cp-tree.h: Declare it.
+
+2012-09-07 Jason Merrill <jason@redhat.com>
+
+ * semantics.c (sort_constexpr_mem_initializers): Tweak.
+
+2012-09-09 Mark Kettenis <kettenis@openbsd.org>
+
+ * decl.c (reshape_init_class): Avoid dereferencing a
+ past-the-end pointer.
+
+2012-09-07 Paolo Carlini <paolo.carlini@oracle.com>
+
+ * pt.c (num_template_headers_for_class): Rework per the code
+ inline in cp_parser_check_declarator_template_parameters.
+ * parser.c (cp_parser_check_declarator_template_parameters):
+ Use num_template_headers_for_class.
+
+2012-09-06 Jason Merrill <jason@redhat.com>
+
+ PR c++/54341
+ PR c++/54253
+ * semantics.c (sort_constexpr_mem_initializers): New.
+ (build_constexpr_constructor_member_initializers): Use it.
+ (cx_check_missing_mem_inits): Skip artificial fields.
+ * init.c (expand_aggr_init_1): Don't zero out a class
+ with no data.
+
2012-09-05 Paolo Carlini <paolo.carlini@oracle.com>
PR c++/54191
diff --git a/gcc/cp/call.c b/gcc/cp/call.c
index e3524a784f4..3b9b0a8dea7 100644
--- a/gcc/cp/call.c
+++ b/gcc/cp/call.c
@@ -5538,7 +5538,8 @@ build_op_delete_call (enum tree_code code, tree addr, tree size,
for (i = 1; i < nargs; i++)
argarray[i] = CALL_EXPR_ARG (placement, i);
mark_used (fn);
- return build_cxx_call (fn, CILK_CALL_NORMAL, nargs, argarray);
+ return build_cxx_call (fn, CILK_CALL_NORMAL, nargs, argarray,
+ complain);
}
else
{
@@ -6146,12 +6147,12 @@ convert_arg_to_ellipsis (tree arg, tsubst_flags_t complain)
arg = cp_perform_integral_promotions (arg, complain);
}
- arg = require_complete_type (arg);
+ arg = require_complete_type_sfinae (arg, complain);
arg_type = TREE_TYPE (arg);
if (arg != error_mark_node
/* In a template (or ill-formed code), we can have an incomplete type
- even after require_complete_type, in which case we don't know
+ even after require_complete_type_sfinae, in which case we don't know
whether it has trivial copy or not. */
&& COMPLETE_TYPE_P (arg_type))
{
@@ -6902,7 +6903,7 @@ build_over_call (struct z_candidate *cand, int flags,
return error_mark_node;
}
- return build_cxx_call (fn, spawning, nargs, argarray);
+ return build_cxx_call (fn, spawning, nargs, argarray, complain);
}
/* Build and return a call to FN, using NARGS arguments in ARGARRAY.
@@ -6910,7 +6911,8 @@ build_over_call (struct z_candidate *cand, int flags,
high-level operations. */
tree
-build_cxx_call (tree fn, enum call_context spawning, int nargs, tree *argarray)
+build_cxx_call (tree fn, enum call_context spawning, int nargs, tree *argarray,
+ tsubst_flags_t complain)
{
tree fndecl;
int optimize_sav;
@@ -6943,12 +6945,12 @@ build_cxx_call (tree fn, enum call_context spawning, int nargs, tree *argarray)
if (VOID_TYPE_P (TREE_TYPE (fn)))
return fn;
- fn = require_complete_type (fn);
+ fn = require_complete_type_sfinae (fn, complain);
if (fn == error_mark_node)
return error_mark_node;
if (MAYBE_CLASS_TYPE_P (TREE_TYPE (fn)))
- fn = build_cplus_new (TREE_TYPE (fn), fn, tf_warning_or_error);
+ fn = build_cplus_new (TREE_TYPE (fn), fn, complain);
return convert_from_reference (fn);
}
diff --git a/gcc/cp/class.c b/gcc/cp/class.c
index 13d9c768509..7abcc7e0da8 100644
--- a/gcc/cp/class.c
+++ b/gcc/cp/class.c
@@ -8835,11 +8835,9 @@ add_vcall_offset (tree orig_fn, tree binfo, vtbl_init_data *vid)
offset. */
if (vid->binfo == TYPE_BINFO (vid->derived))
{
- tree_pair_p elt = VEC_safe_push (tree_pair_s, gc,
- CLASSTYPE_VCALL_INDICES (vid->derived),
- NULL);
- elt->purpose = orig_fn;
- elt->value = vid->index;
+ tree_pair_s elt = {orig_fn, vid->index};
+ VEC_safe_push (tree_pair_s, gc, CLASSTYPE_VCALL_INDICES (vid->derived),
+ elt);
}
/* The next vcall offset will be found at a more negative
diff --git a/gcc/cp/cp-tree.h b/gcc/cp/cp-tree.h
index 952529fa087..b73bfd477d6 100644
--- a/gcc/cp/cp-tree.h
+++ b/gcc/cp/cp-tree.h
@@ -4965,7 +4965,7 @@ extern tree perform_direct_initialization_if_possible (tree, tree, bool,
tsubst_flags_t);
extern tree in_charge_arg_for_name (tree);
extern tree build_cxx_call (tree, enum call_context,
- int, tree *);
+ int, tree *, tsubst_flags_t);
extern bool is_std_init_list (tree);
extern bool is_list_ctor (tree);
#ifdef ENABLE_CHECKING
@@ -5113,6 +5113,7 @@ extern tree build_ptrmem_type (tree, tree);
extern tree build_this_parm (tree, cp_cv_quals);
extern int copy_fn_p (const_tree);
extern bool move_fn_p (const_tree);
+extern bool move_signature_fn_p (const_tree);
extern tree get_scope_of_declarator (const cp_declarator *);
extern void grok_special_member_properties (tree);
extern int grok_ctor_properties (const_tree, const_tree);
@@ -5907,7 +5908,8 @@ extern tree build_x_conditional_expr (location_t, tree, tree, tree,
tsubst_flags_t);
extern tree build_x_compound_expr_from_list (tree, expr_list_kind,
tsubst_flags_t);
-extern tree build_x_compound_expr_from_vec (VEC(tree,gc) *, const char *);
+extern tree build_x_compound_expr_from_vec (VEC(tree,gc) *, const char *,
+ tsubst_flags_t);
extern tree build_x_compound_expr (location_t, tree, tree,
tsubst_flags_t);
extern tree build_compound_expr (location_t, tree, tree);
diff --git a/gcc/cp/decl.c b/gcc/cp/decl.c
index 2a5e7add1ad..156fc516929 100644
--- a/gcc/cp/decl.c
+++ b/gcc/cp/decl.c
@@ -2652,16 +2652,16 @@ tree
declare_local_label (tree id)
{
tree decl;
- cp_label_binding *bind;
+ cp_label_binding bind;
/* Add a new entry to the SHADOWED_LABELS list so that when we leave
this scope we can restore the old value of IDENTIFIER_TYPE_VALUE. */
- bind = VEC_safe_push (cp_label_binding, gc,
- current_binding_level->shadowed_labels, NULL);
- bind->prev_value = IDENTIFIER_LABEL_VALUE (id);
+ bind.prev_value = IDENTIFIER_LABEL_VALUE (id);
decl = make_label_decl (id, /*local_p=*/1);
- bind->label = decl;
+ bind.label = decl;
+ VEC_safe_push (cp_label_binding, gc, current_binding_level->shadowed_labels,
+ bind);
return decl;
}
@@ -5145,7 +5145,7 @@ reshape_init_class (tree type, reshape_iter *d, bool first_initializer_p,
if (field_init == error_mark_node)
return error_mark_node;
- if (d->cur->index && d->cur == old_cur)
+ if (d->cur == old_cur && d->cur->index)
{
/* This can happen with an invalid initializer for a flexible
array member (c++/54441). */
@@ -10875,10 +10875,6 @@ copy_fn_p (const_tree d)
bool
move_fn_p (const_tree d)
{
- tree args;
- tree arg_type;
- bool result = false;
-
gcc_assert (DECL_FUNCTION_MEMBER_P (d));
if (cxx_dialect == cxx98)
@@ -10888,12 +10884,29 @@ move_fn_p (const_tree d)
if (TREE_CODE (d) == TEMPLATE_DECL
|| (DECL_TEMPLATE_INFO (d)
&& DECL_MEMBER_TEMPLATE_P (DECL_TI_TEMPLATE (d))))
- /* Instantiations of template member functions are never copy
+ /* Instantiations of template member functions are never move
functions. Note that member functions of templated classes are
represented as template functions internally, and we must
- accept those as copy functions. */
+ accept those as move functions. */
return 0;
+ return move_signature_fn_p (d);
+}
+
+/* D is a constructor or overloaded `operator='.
+
+ Then, this function returns true when D has the same signature as a move
+ constructor or move assignment operator (because either it is such a
+ ctor/op= or it is a template specialization with the same signature),
+ false otherwise. */
+
+bool
+move_signature_fn_p (const_tree d)
+{
+ tree args;
+ tree arg_type;
+ bool result = false;
+
args = FUNCTION_FIRST_USER_PARMTYPE (d);
if (!args)
return 0;
@@ -13797,10 +13810,8 @@ maybe_register_incomplete_var (tree var)
|| (TYPE_LANG_SPECIFIC (inner_type)
&& TYPE_BEING_DEFINED (inner_type)))
{
- incomplete_var *iv
- = VEC_safe_push (incomplete_var, gc, incomplete_vars, NULL);
- iv->decl = var;
- iv->incomplete_type = inner_type;
+ incomplete_var iv = {var, inner_type};
+ VEC_safe_push (incomplete_var, gc, incomplete_vars, iv);
}
}
}
diff --git a/gcc/cp/except.c b/gcc/cp/except.c
index 8134cbb5e78..af94aa15cdc 100644
--- a/gcc/cp/except.c
+++ b/gcc/cp/except.c
@@ -1252,11 +1252,8 @@ expr_noexcept_p (tree expr, tsubst_flags_t complain)
if (!DECL_INITIAL (fn))
{
/* Not defined yet; check again at EOF. */
- pending_noexcept *p
- = VEC_safe_push (pending_noexcept, gc,
- pending_noexcept_checks, NULL);
- p->fn = fn;
- p->loc = input_location;
+ pending_noexcept p = {fn, input_location};
+ VEC_safe_push (pending_noexcept, gc, pending_noexcept_checks, p);
}
else
maybe_noexcept_warning (fn);
diff --git a/gcc/cp/init.c b/gcc/cp/init.c
index 40902973427..e687ddb753f 100644
--- a/gcc/cp/init.c
+++ b/gcc/cp/init.c
@@ -253,21 +253,21 @@ build_zero_init_1 (tree type, tree nelts, bool static_storage_p,
have an upper bound of -1. */
if (!tree_int_cst_equal (max_index, integer_minus_one_node))
{
- constructor_elt *ce;
+ constructor_elt ce;
v = VEC_alloc (constructor_elt, gc, 1);
- ce = VEC_quick_push (constructor_elt, v, NULL);
/* If this is a one element array, we just use a regular init. */
if (tree_int_cst_equal (size_zero_node, max_index))
- ce->index = size_zero_node;
+ ce.index = size_zero_node;
else
- ce->index = build2 (RANGE_EXPR, sizetype, size_zero_node,
+ ce.index = build2 (RANGE_EXPR, sizetype, size_zero_node,
max_index);
- ce->value = build_zero_init_1 (TREE_TYPE (type),
+ ce.value = build_zero_init_1 (TREE_TYPE (type),
/*nelts=*/NULL_TREE,
static_storage_p, NULL_TREE);
+ VEC_quick_push (constructor_elt, v, ce);
}
/* Build a constructor to contain the initializations. */
@@ -448,28 +448,27 @@ build_value_init_noctor (tree type, tsubst_flags_t complain)
have an upper bound of -1. */
if (!tree_int_cst_equal (max_index, integer_minus_one_node))
{
- constructor_elt *ce;
+ constructor_elt ce;
v = VEC_alloc (constructor_elt, gc, 1);
- ce = VEC_quick_push (constructor_elt, v, NULL);
/* If this is a one element array, we just use a regular init. */
if (tree_int_cst_equal (size_zero_node, max_index))
- ce->index = size_zero_node;
+ ce.index = size_zero_node;
else
- ce->index = build2 (RANGE_EXPR, sizetype, size_zero_node,
- max_index);
+ ce.index = build2 (RANGE_EXPR, sizetype, size_zero_node, max_index);
- ce->value = build_value_init (TREE_TYPE (type), complain);
+ ce.value = build_value_init (TREE_TYPE (type), complain);
+ VEC_quick_push (constructor_elt, v, ce);
- if (ce->value == error_mark_node)
+ if (ce.value == error_mark_node)
return error_mark_node;
/* We shouldn't have gotten here for anything that would need
non-trivial initialization, and gimplify_init_ctor_preeval
would need to be fixed to allow it. */
- gcc_assert (TREE_CODE (ce->value) != TARGET_EXPR
- && TREE_CODE (ce->value) != AGGR_INIT_EXPR);
+ gcc_assert (TREE_CODE (ce.value) != TARGET_EXPR
+ && TREE_CODE (ce.value) != AGGR_INIT_EXPR);
}
/* Build a constructor to contain the initializations. */
@@ -1744,8 +1743,10 @@ expand_aggr_init_1 (tree binfo, tree true_exp, tree exp, tree init, int flags,
that's value-initialization. */
if (init == void_type_node)
{
- /* If no user-provided ctor, we need to zero out the object. */
- if (!type_has_user_provided_constructor (type))
+ /* If the type has data but no user-provided ctor, we need to zero
+ out the object. */
+ if (!type_has_user_provided_constructor (type)
+ && !is_really_empty_class (type))
{
tree field_size = NULL_TREE;
if (exp != true_exp && CLASSTYPE_AS_BASE (type) != type)
@@ -2740,7 +2741,8 @@ build_new_1 (VEC(tree,gc) **placement, tree type, tree nelts,
/* We are processing something like `new int (10)', which
means allocate an int, and initialize it with 10. */
- ie = build_x_compound_expr_from_vec (*init, "new initializer");
+ ie = build_x_compound_expr_from_vec (*init, "new initializer",
+ complain);
init_expr = cp_build_modify_expr (init_expr, INIT_EXPR, ie,
complain);
}
diff --git a/gcc/cp/method.c b/gcc/cp/method.c
index e89134d4df0..d914f86eddf 100644
--- a/gcc/cp/method.c
+++ b/gcc/cp/method.c
@@ -948,9 +948,10 @@ process_subob_fn (tree fn, bool move_p, tree *spec_p, bool *trivial_p,
}
}
- /* Core 1402: A non-trivial copy op suppresses the implicit
+ /* Core 1402: A non-trivial non-move ctor suppresses the implicit
declaration of the move ctor/op=. */
- if (no_implicit_p && move_p && !move_fn_p (fn) && !trivial_fn_p (fn))
+ if (no_implicit_p && move_p && !move_signature_fn_p (fn)
+ && !trivial_fn_p (fn))
*no_implicit_p = true;
if (constexpr_p && !DECL_DECLARED_CONSTEXPR_P (fn))
diff --git a/gcc/cp/name-lookup.c b/gcc/cp/name-lookup.c
index bfc6c544533..098aee9ccb9 100644
--- a/gcc/cp/name-lookup.c
+++ b/gcc/cp/name-lookup.c
@@ -318,13 +318,9 @@ cxx_binding_free (cxx_binding *binding)
static cxx_binding *
new_class_binding (tree name, tree value, tree type, cp_binding_level *scope)
{
- cp_class_binding *cb;
- cxx_binding *binding;
-
- cb = VEC_safe_push (cp_class_binding, gc, scope->class_shadowed, NULL);
-
- cb->identifier = name;
- cb->base = binding = cxx_binding_make (value, type);
+ cp_class_binding cb = {cxx_binding_make (value, type), name};
+ cxx_binding *binding = cb.base;
+ VEC_safe_push (cp_class_binding, gc, scope->class_shadowed, cb);
binding->scope = scope;
return binding;
}
@@ -5889,16 +5885,16 @@ store_binding_p (tree id)
static void
store_binding (tree id, VEC(cxx_saved_binding,gc) **old_bindings)
{
- cxx_saved_binding *saved;
+ cxx_saved_binding saved;
gcc_checking_assert (store_binding_p (id));
IDENTIFIER_MARKED (id) = 1;
- saved = VEC_quick_push (cxx_saved_binding, *old_bindings, NULL);
- saved->identifier = id;
- saved->binding = IDENTIFIER_BINDING (id);
- saved->real_type_value = REAL_IDENTIFIER_TYPE_VALUE (id);
+ saved.identifier = id;
+ saved.binding = IDENTIFIER_BINDING (id);
+ saved.real_type_value = REAL_IDENTIFIER_TYPE_VALUE (id);
+ VEC_quick_push (cxx_saved_binding, *old_bindings, saved);
IDENTIFIER_BINDING (id) = NULL;
}
diff --git a/gcc/cp/optimize.c b/gcc/cp/optimize.c
index 142cbe4f418..ec937524a53 100644
--- a/gcc/cp/optimize.c
+++ b/gcc/cp/optimize.c
@@ -128,7 +128,8 @@ build_delete_destructor_body (tree delete_dtor, tree complete_dtor)
/* Call the corresponding complete destructor. */
gcc_assert (complete_dtor);
- call_dtor = build_cxx_call (complete_dtor, CILK_CALL_NORMAL, 1, &parm);
+ call_dtor = build_cxx_call (complete_dtor, CILK_CALL_NORMAL, 1, &parm,
+ tf_warning_or_error);
add_stmt (call_dtor);
add_stmt (build_stmt (0, LABEL_EXPR, cdtor_label));
diff --git a/gcc/cp/parser.c b/gcc/cp/parser.c
index 2f8fbc97c12..9d64801db64 100644
--- a/gcc/cp/parser.c
+++ b/gcc/cp/parser.c
@@ -602,13 +602,13 @@ cp_lexer_new_main (void)
lexer = cp_lexer_alloc ();
/* Put the first token in the buffer. */
- VEC_quick_push (cp_token, lexer->buffer, &token);
+ VEC_quick_push (cp_token, lexer->buffer, token);
/* Get the remaining tokens from the preprocessor. */
while (token.type != CPP_EOF)
{
cp_lexer_get_preprocessor_token (lexer, &token);
- VEC_safe_push (cp_token, gc, lexer->buffer, &token);
+ VEC_safe_push (cp_token, gc, lexer->buffer, token);
}
lexer->last_token = VEC_address (cp_token, lexer->buffer)
@@ -1770,11 +1770,8 @@ cp_parser_context_new (cp_parser_context* next)
static void
push_unparsed_function_queues (cp_parser *parser)
{
- VEC_safe_push (cp_unparsed_functions_entry, gc,
- parser->unparsed_queues, NULL);
- unparsed_funs_with_default_args = NULL;
- unparsed_funs_with_definitions = make_tree_vector ();
- unparsed_nsdmis = NULL;
+ cp_unparsed_functions_entry e = {NULL, make_tree_vector (), NULL};
+ VEC_safe_push (cp_unparsed_functions_entry, gc, parser->unparsed_queues, e);
}
static void
@@ -8210,7 +8207,7 @@ start_lambda_scope (tree decl)
decl = current_function_decl;
ti.t = lambda_scope;
ti.i = lambda_count;
- VEC_safe_push (tree_int, gc, lambda_scope_stack, &ti);
+ VEC_safe_push (tree_int, gc, lambda_scope_stack, ti);
if (lambda_scope != decl)
{
/* Don't reset the count if we're still in the same function. */
@@ -21311,54 +21308,24 @@ cp_parser_check_declarator_template_parameters (cp_parser* parser,
cp_declarator *declarator,
location_t declarator_location)
{
- unsigned num_templates;
-
- /* We haven't seen any classes that involve template parameters yet. */
- num_templates = 0;
-
switch (declarator->kind)
{
case cdk_id:
- if (declarator->u.id.qualifying_scope)
- {
- tree scope;
-
- scope = declarator->u.id.qualifying_scope;
-
- while (scope && CLASS_TYPE_P (scope))
- {
- /* You're supposed to have one `template <...>'
- for every template class, but you don't need one
- for a full specialization. For example:
-
- template <class T> struct S{};
- template <> struct S<int> { void f(); };
- void S<int>::f () {}
-
- is correct; there shouldn't be a `template <>' for
- the definition of `S<int>::f'. */
- if (!CLASSTYPE_TEMPLATE_INFO (scope))
- /* If SCOPE does not have template information of any
- kind, then it is not a template, nor is it nested
- within a template. */
- break;
- if (explicit_class_specialization_p (scope))
- break;
- if (PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (scope)))
- ++num_templates;
-
- scope = TYPE_CONTEXT (scope);
- }
- }
- else if (TREE_CODE (declarator->u.id.unqualified_name)
- == TEMPLATE_ID_EXPR)
- /* If the DECLARATOR has the form `X<y>' then it uses one
- additional level of template parameters. */
- ++num_templates;
+ {
+ unsigned num_templates = 0;
+ tree scope = declarator->u.id.qualifying_scope;
- return cp_parser_check_template_parameters
- (parser, num_templates, declarator_location, declarator);
+ if (scope)
+ num_templates = num_template_headers_for_class (scope);
+ else if (TREE_CODE (declarator->u.id.unqualified_name)
+ == TEMPLATE_ID_EXPR)
+ /* If the DECLARATOR has the form `X<y>' then it uses one
+ additional level of template parameters. */
+ ++num_templates;
+ return cp_parser_check_template_parameters
+ (parser, num_templates, declarator_location, declarator);
+ }
case cdk_function:
case cdk_array:
@@ -22429,11 +22396,9 @@ cp_parser_save_default_args (cp_parser* parser, tree decl)
probe = TREE_CHAIN (probe))
if (TREE_PURPOSE (probe))
{
- cp_default_arg_entry *entry
- = VEC_safe_push (cp_default_arg_entry, gc,
- unparsed_funs_with_default_args, NULL);
- entry->class_type = current_class_type;
- entry->decl = decl;
+ cp_default_arg_entry entry = {current_class_type, decl};
+ VEC_safe_push (cp_default_arg_entry, gc,
+ unparsed_funs_with_default_args, entry);
break;
}
}
diff --git a/gcc/cp/pt.c b/gcc/cp/pt.c
index 7aad78c4745..ea6b2c1d904 100644
--- a/gcc/cp/pt.c
+++ b/gcc/cp/pt.c
@@ -2215,9 +2215,9 @@ copy_default_args_to_explicit_spec (tree decl)
int
num_template_headers_for_class (tree ctype)
{
- int template_count = 0;
- tree t = ctype;
- while (t != NULL_TREE && CLASS_TYPE_P (t))
+ int num_templates = 0;
+
+ while (ctype && CLASS_TYPE_P (ctype))
{
/* You're supposed to have one `template <...>' for every
template class, but you don't need one for a full
@@ -2229,21 +2229,20 @@ num_template_headers_for_class (tree ctype)
is correct; there shouldn't be a `template <>' for the
definition of `S<int>::f'. */
- if (CLASSTYPE_TEMPLATE_SPECIALIZATION (t)
- && !any_dependent_template_arguments_p (CLASSTYPE_TI_ARGS (t)))
- /* T is an explicit (not partial) specialization. All
- containing classes must therefore also be explicitly
- specialized. */
+ if (!CLASSTYPE_TEMPLATE_INFO (ctype))
+ /* If CTYPE does not have template information of any
+ kind, then it is not a template, nor is it nested
+ within a template. */
+ break;
+ if (explicit_class_specialization_p (ctype))
break;
- if ((CLASSTYPE_USE_TEMPLATE (t) || CLASSTYPE_IS_TEMPLATE (t))
- && PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (t)))
- template_count += 1;
+ if (PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (ctype)))
+ ++num_templates;
- t = TYPE_MAIN_DECL (t);
- t = DECL_CONTEXT (t);
+ ctype = TYPE_CONTEXT (ctype);
}
- return template_count;
+ return num_templates;
}
/* Do a simple sanity check on the template headers that precede the
@@ -14301,8 +14300,18 @@ tsubst_copy_and_build (tree t,
LAMBDA_EXPR_MUTABLE_P (r) = LAMBDA_EXPR_MUTABLE_P (t);
LAMBDA_EXPR_DISCRIMINATOR (r)
= (LAMBDA_EXPR_DISCRIMINATOR (t));
- LAMBDA_EXPR_EXTRA_SCOPE (r)
- = tsubst (LAMBDA_EXPR_EXTRA_SCOPE (t), args, complain, in_decl);
+ /* For a function scope, we want to use tsubst so that we don't
+ complain about referring to an auto function before its return
+ type has been deduced. Otherwise, we want to use tsubst_copy so
+ that we look up the existing field/parameter/variable rather
+ than build a new one. */
+ tree scope = LAMBDA_EXPR_EXTRA_SCOPE (t);
+ if (scope && TREE_CODE (scope) == FUNCTION_DECL)
+ scope = tsubst (LAMBDA_EXPR_EXTRA_SCOPE (t), args,
+ complain, in_decl);
+ else
+ scope = RECUR (scope);
+ LAMBDA_EXPR_EXTRA_SCOPE (r) = scope;
LAMBDA_EXPR_RETURN_TYPE (r)
= tsubst (LAMBDA_EXPR_RETURN_TYPE (t), args, complain, in_decl);
@@ -20485,7 +20494,7 @@ append_type_to_template_for_access_check_1 (tree t,
VEC_safe_push (qualified_typedef_usage_t, gc,
TI_TYPEDEFS_NEEDING_ACCESS_CHECKING (ti),
- &typedef_usage);
+ typedef_usage);
}
/* Append TYPE_DECL to the template TEMPL.
diff --git a/gcc/cp/rtti.c b/gcc/cp/rtti.c
index cb2c9c8dbbf..9c0f94acdf8 100644
--- a/gcc/cp/rtti.c
+++ b/gcc/cp/rtti.c
@@ -206,7 +206,7 @@ throw_bad_cast (void)
fn = push_throw_library_fn (fn, build_function_type_list (ptr_type_node,
NULL_TREE));
- return build_cxx_call (fn, CILK_CALL_NORMAL, 0, NULL);
+ return build_cxx_call (fn, CILK_CALL_NORMAL, 0, NULL, tf_warning_or_error);
}
/* Return an expression for "__cxa_bad_typeid()". The expression
@@ -225,7 +225,7 @@ throw_bad_typeid (void)
fn = push_throw_library_fn (fn, t);
}
- return build_cxx_call (fn, CILK_CALL_NORMAL, 0, NULL);
+ return build_cxx_call (fn, CILK_CALL_NORMAL, 0, NULL, tf_warning_or_error);
}
/* Return an lvalue expression whose type is "const std::type_info"
@@ -743,8 +743,8 @@ build_dynamic_cast_1 (tree type, tree expr, tsubst_flags_t complain)
pop_abi_namespace ();
dynamic_cast_node = dcast_fn;
}
- result = build_cxx_call (dcast_fn, CILK_CALL_NORMAL, 4, elems);
-
+ result = build_cxx_call (dcast_fn, CILK_CALL_NORMAL, 4, elems,
+ complain);
if (tc == REFERENCE_TYPE)
{
tree bad = throw_bad_cast ();
diff --git a/gcc/cp/semantics.c b/gcc/cp/semantics.c
index de1d08a72fb..4919f50d198 100644
--- a/gcc/cp/semantics.c
+++ b/gcc/cp/semantics.c
@@ -147,11 +147,8 @@ push_deferring_access_checks (deferring_kind deferring)
deferred_access_no_check++;
else
{
- deferred_access *ptr;
-
- ptr = VEC_safe_push (deferred_access, gc, deferred_access_stack, NULL);
- ptr->deferred_access_checks = NULL;
- ptr->deferring_access_checks_kind = deferring;
+ deferred_access e = {NULL, deferring};
+ VEC_safe_push (deferred_access, gc, deferred_access_stack, e);
}
}
@@ -245,7 +242,7 @@ pop_to_parent_deferring_access_checks (void)
}
/* Insert into parent's checks. */
VEC_safe_push (deferred_access_check, gc,
- ptr->deferred_access_checks, chk);
+ ptr->deferred_access_checks, *chk);
found:;
}
}
@@ -313,7 +310,6 @@ perform_or_defer_access_check (tree binfo, tree decl, tree diag_decl,
int i;
deferred_access *ptr;
deferred_access_check *chk;
- deferred_access_check *new_access;
/* Exit if we are in a context that no access checking is performed.
@@ -343,13 +339,9 @@ perform_or_defer_access_check (tree binfo, tree decl, tree diag_decl,
}
}
/* If not, record the check. */
- new_access =
- VEC_safe_push (deferred_access_check, gc,
- ptr->deferred_access_checks, 0);
- new_access->binfo = binfo;
- new_access->decl = decl;
- new_access->diag_decl = diag_decl;
- new_access->loc = input_location;
+ deferred_access_check new_access = {binfo, decl, diag_decl, input_location};
+ VEC_safe_push (deferred_access_check, gc, ptr->deferred_access_checks,
+ new_access);
return true;
}
@@ -5914,6 +5906,37 @@ check_constexpr_ctor_body (tree last, tree list)
return ok;
}
+/* VEC is a vector of constructor elements built up for the base and member
+ initializers of a constructor for TYPE. They need to be in increasing
+ offset order, which they might not be yet if TYPE has a primary base
+ which is not first in the base-clause. */
+
+static VEC(constructor_elt,gc) *
+sort_constexpr_mem_initializers (tree type, VEC(constructor_elt,gc) *vec)
+{
+ tree pri = CLASSTYPE_PRIMARY_BINFO (type);
+ constructor_elt elt;
+ int i;
+
+ if (pri == NULL_TREE
+ || pri == BINFO_BASE_BINFO (TYPE_BINFO (type), 0))
+ return vec;
+
+ /* Find the element for the primary base and move it to the beginning of
+ the vec. */
+ VEC(constructor_elt,gc) &v = *vec;
+ pri = BINFO_TYPE (pri);
+ for (i = 1; ; ++i)
+ if (TREE_TYPE (v[i].index) == pri)
+ break;
+
+ elt = v[i];
+ for (; i > 0; --i)
+ v[i] = v[i-1];
+ v[0] = elt;
+ return vec;
+}
+
/* Build compile-time evalable representations of member-initializer list
for a constexpr constructor. */
@@ -5983,6 +6006,7 @@ build_constexpr_constructor_member_initializers (tree type, tree body)
return body;
}
}
+ vec = sort_constexpr_mem_initializers (type, vec);
return build_constructor (type, vec);
}
else
@@ -6101,14 +6125,16 @@ cx_check_missing_mem_inits (tree fun, tree body, bool complain)
{
index = CONSTRUCTOR_ELT (body, i)->index;
/* Skip base and vtable inits. */
- if (TREE_CODE (index) != FIELD_DECL)
+ if (TREE_CODE (index) != FIELD_DECL
+ || DECL_ARTIFICIAL (index))
continue;
}
for (; field != index; field = DECL_CHAIN (field))
{
tree ftype;
if (TREE_CODE (field) != FIELD_DECL
- || (DECL_C_BIT_FIELD (field) && !DECL_NAME (field)))
+ || (DECL_C_BIT_FIELD (field) && !DECL_NAME (field))
+ || DECL_ARTIFICIAL (field))
continue;
ftype = strip_array_types (TREE_TYPE (field));
if (type_has_constexpr_default_constructor (ftype))
diff --git a/gcc/cp/typeck.c b/gcc/cp/typeck.c
index fce91140bdd..fbf2c56cec5 100644
--- a/gcc/cp/typeck.c
+++ b/gcc/cp/typeck.c
@@ -3378,7 +3378,7 @@ cp_build_function_call_vec (tree function, VEC(tree,gc) **params,
null parameters. */
check_function_arguments (fntype, nargs, argarray);
- ret = build_cxx_call (function, spawning, nargs, argarray);
+ ret = build_cxx_call (function, spawning, nargs, argarray, complain);
if (allocated != NULL)
release_tree_vector (allocated);
@@ -5739,7 +5739,8 @@ build_x_compound_expr_from_list (tree list, expr_list_kind exp,
/* Like build_x_compound_expr_from_list, but using a VEC. */
tree
-build_x_compound_expr_from_vec (VEC(tree,gc) *vec, const char *msg)
+build_x_compound_expr_from_vec (VEC(tree,gc) *vec, const char *msg,
+ tsubst_flags_t complain)
{
if (VEC_empty (tree, vec))
return NULL_TREE;
@@ -5752,14 +5753,19 @@ build_x_compound_expr_from_vec (VEC(tree,gc) *vec, const char *msg)
tree t;
if (msg != NULL)
- permerror (input_location,
- "%s expression list treated as compound expression",
- msg);
+ {
+ if (complain & tf_error)
+ permerror (input_location,
+ "%s expression list treated as compound expression",
+ msg);
+ else
+ return error_mark_node;
+ }
expr = VEC_index (tree, vec, 0);
for (ix = 1; VEC_iterate (tree, vec, ix, t); ++ix)
expr = build_x_compound_expr (EXPR_LOCATION (t), expr,
- t, tf_warning_or_error);
+ t, complain);
return expr;
}
diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi
index 6794313ab77..1a854b06a67 100644
--- a/gcc/doc/invoke.texi
+++ b/gcc/doc/invoke.texi
@@ -328,7 +328,7 @@ Objective-C and Objective-C++ Dialects}.
-feliminate-unused-debug-symbols -femit-class-debug-always @gol
-fenable-@var{kind}-@var{pass} @gol
-fenable-@var{kind}-@var{pass}=@var{range-list} @gol
--fdebug-types-section @gol
+-fdebug-types-section -fmem-report-wpa @gol
-fmem-report -fpre-ipa-mem-report -fpost-ipa-mem-report -fprofile-arcs @gol
-frandom-seed=@var{string} -fsched-verbose=@var{n} @gol
-fsel-sched-verbose -fsel-sched-dump-cfg -fsel-sched-pipelining-verbose @gol
@@ -5132,6 +5132,11 @@ pass when it finishes.
Makes the compiler print some statistics about permanent memory
allocation when it finishes.
+@item -fmem-report-wpa
+@opindex fmem-report-wpa
+Makes the compiler print some statistics about permanent memory
+allocation for the WPA phase only.
+
@item -fpre-ipa-mem-report
@opindex fpre-ipa-mem-report
@item -fpost-ipa-mem-report
@@ -8138,7 +8143,9 @@ requires the complete toolchain to be aware of LTO. It requires a linker with
linker plugin support for basic functionality. Additionally,
@command{nm}, @command{ar} and @command{ranlib}
need to support linker plugins to allow a full-featured build environment
-(capable of building static libraries etc).
+(capable of building static libraries etc). gcc provides the @command{gcc-ar},
+@command{gcc-nm}, @command{gcc-ranlib} wrappers to pass the right options
+to these tools. With non fat LTO makefiles need to be modified to use them.
The default is @option{-ffat-lto-objects} but this default is intended to
change in future releases when linker plugin enabled environments become more
@@ -9400,6 +9407,11 @@ having a regular register file and accurate register pressure classes.
See @file{haifa-sched.c} in the GCC sources for more details.
The default choice depends on the target.
+
+@item max-slsr-cand-scan
+Set the maximum number of existing candidates that will be considered when
+seeking a basis for a new straight-line strength reduction candidate.
+
@end table
@end table
diff --git a/gcc/doc/sourcebuild.texi b/gcc/doc/sourcebuild.texi
index 19440a8c566..705ee902a87 100644
--- a/gcc/doc/sourcebuild.texi
+++ b/gcc/doc/sourcebuild.texi
@@ -1532,11 +1532,19 @@ ARM target supports generating NEON instructions.
@item arm_neon_hw
Test system supports executing NEON instructions.
+@item arm_neonv2_hw
+Test system supports executing NEON v2 instructions.
+
@item arm_neon_ok
@anchor{arm_neon_ok}
ARM Target supports @code{-mfpu=neon -mfloat-abi=softfp} or compatible
options. Some multilibs may be incompatible with these options.
+@item arm_neonv2_ok
+@anchor{arm_neonv2_ok}
+ARM Target supports @code{-mfpu=neon-vfpv4 -mfloat-abi=softfp} or compatible
+options. Some multilibs may be incompatible with these options.
+
@item arm_neon_fp16_ok
@anchor{arm_neon_fp16_ok}
ARM Target supports @code{-mfpu=neon-fp16 -mfloat-abi=softfp} or compatible
diff --git a/gcc/dojump.c b/gcc/dojump.c
index ea6620d64b7..66d3b04bd03 100644
--- a/gcc/dojump.c
+++ b/gcc/dojump.c
@@ -165,8 +165,7 @@ prefer_and_bit_test (enum machine_mode mode, int bitnum)
/* Fill in the integers. */
XEXP (and_test, 1)
- = immed_double_int_const (double_int_setbit (double_int_zero, bitnum),
- mode);
+ = immed_double_int_const (double_int_zero.set_bit (bitnum), mode);
XEXP (XEXP (shift_test, 0), 1) = GEN_INT (bitnum);
speed_p = optimize_insn_for_speed_p ();
diff --git a/gcc/double-int.c b/gcc/double-int.c
index 3a22d15f08c..f3d5e8b3dde 100644
--- a/gcc/double-int.c
+++ b/gcc/double-int.c
@@ -606,7 +606,6 @@ div_and_round_double (unsigned code, int uns,
return overflow;
}
-
/* Returns mask for PREC bits. */
double_int
@@ -754,7 +753,7 @@ double_int::operator * (double_int b) const
*OVERFLOW is set to nonzero. */
double_int
-double_int::mul_with_sign (double_int b, bool unsigned_p, int *overflow) const
+double_int::mul_with_sign (double_int b, bool unsigned_p, bool *overflow) const
{
const double_int &a = *this;
double_int ret;
@@ -774,6 +773,19 @@ double_int::operator + (double_int b) const
return ret;
}
+/* Returns A + B. If the operation overflows according to UNSIGNED_P,
+ *OVERFLOW is set to nonzero. */
+
+double_int
+double_int::add_with_sign (double_int b, bool unsigned_p, bool *overflow) const
+{
+ const double_int &a = *this;
+ double_int ret;
+ *overflow = add_double_with_sign (a.low, a.high, b.low, b.high,
+ &ret.low, &ret.high, unsigned_p);
+ return ret;
+}
+
/* Returns A - B. */
double_int
@@ -1104,6 +1116,20 @@ double_int::ult (double_int b) const
return false;
}
+/* Compares two unsigned values A and B for less-than or equal-to. */
+
+bool
+double_int::ule (double_int b) const
+{
+ if ((unsigned HOST_WIDE_INT) high < (unsigned HOST_WIDE_INT) b.high)
+ return true;
+ if ((unsigned HOST_WIDE_INT) high > (unsigned HOST_WIDE_INT) b.high)
+ return false;
+ if (low <= b.low)
+ return true;
+ return false;
+}
+
/* Compares two unsigned values A and B for greater-than. */
bool
@@ -1132,6 +1158,20 @@ double_int::slt (double_int b) const
return false;
}
+/* Compares two signed values A and B for less-than or equal-to. */
+
+bool
+double_int::sle (double_int b) const
+{
+ if (high < b.high)
+ return true;
+ if (high > b.high)
+ return false;
+ if (low <= b.low)
+ return true;
+ return false;
+}
+
/* Compares two signed values A and B for greater-than. */
bool
diff --git a/gcc/double-int.h b/gcc/double-int.h
index 3d9aa2caa9d..bc7aca1896a 100644
--- a/gcc/double-int.h
+++ b/gcc/double-int.h
@@ -50,9 +50,8 @@ along with GCC; see the file COPYING3. If not see
numbers with precision higher than HOST_WIDE_INT). It might be less
confusing to have them both signed or both unsigned. */
-typedef struct double_int
+struct double_int
{
-public:
/* Normally, we would define constructors to create instances.
Two things prevent us from doing so.
First, defining a constructor makes the class non-POD in C++03,
@@ -78,6 +77,9 @@ public:
double_int &operator *= (double_int);
double_int &operator += (double_int);
double_int &operator -= (double_int);
+ double_int &operator &= (double_int);
+ double_int &operator ^= (double_int);
+ double_int &operator |= (double_int);
/* The following functions are non-mutating operations. */
@@ -104,17 +106,18 @@ public:
/* Arithmetic operation functions. */
double_int set_bit (unsigned) const;
- double_int mul_with_sign (double_int, bool, int *) const;
+ double_int mul_with_sign (double_int, bool unsigned_p, bool *overflow) const;
+ double_int add_with_sign (double_int, bool unsigned_p, bool *overflow) const;
- double_int operator * (double_int b) const;
- double_int operator + (double_int b) const;
- double_int operator - (double_int b) const;
+ double_int operator * (double_int) const;
+ double_int operator + (double_int) const;
+ double_int operator - (double_int) const;
double_int operator - () const;
double_int operator ~ () const;
- double_int operator & (double_int b) const;
- double_int operator | (double_int b) const;
- double_int operator ^ (double_int b) const;
- double_int and_not (double_int b) const;
+ double_int operator & (double_int) const;
+ double_int operator | (double_int) const;
+ double_int operator ^ (double_int) const;
+ double_int and_not (double_int) const;
double_int lshift (HOST_WIDE_INT count, unsigned int prec, bool arith) const;
double_int rshift (HOST_WIDE_INT count, unsigned int prec, bool arith) const;
@@ -156,8 +159,10 @@ public:
int scmp (double_int b) const;
bool ult (double_int b) const;
+ bool ule (double_int b) const;
bool ugt (double_int b) const;
bool slt (double_int b) const;
+ bool sle (double_int b) const;
bool sgt (double_int b) const;
double_int max (double_int b, bool uns);
@@ -176,7 +181,7 @@ public:
unsigned HOST_WIDE_INT low;
HOST_WIDE_INT high;
-} double_int;
+};
#define HOST_BITS_PER_DOUBLE_INT (2 * HOST_BITS_PER_WIDE_INT)
@@ -185,8 +190,8 @@ public:
/* Constructs double_int from integer CST. The bits over the precision of
HOST_WIDE_INT are filled with the sign bit. */
-inline
-double_int double_int::from_shwi (HOST_WIDE_INT cst)
+inline double_int
+double_int::from_shwi (HOST_WIDE_INT cst)
{
double_int r;
r.low = (unsigned HOST_WIDE_INT) cst;
@@ -215,8 +220,8 @@ shwi_to_double_int (HOST_WIDE_INT cst)
/* Constructs double_int from unsigned integer CST. The bits over the
precision of HOST_WIDE_INT are filled with zeros. */
-inline
-double_int double_int::from_uhwi (unsigned HOST_WIDE_INT cst)
+inline double_int
+double_int::from_uhwi (unsigned HOST_WIDE_INT cst)
{
double_int r;
r.low = cst;
@@ -266,6 +271,27 @@ double_int::operator -= (double_int b)
return *this;
}
+inline double_int &
+double_int::operator &= (double_int b)
+{
+ *this = *this & b;
+ return *this;
+}
+
+inline double_int &
+double_int::operator ^= (double_int b)
+{
+ *this = *this ^ b;
+ return *this;
+}
+
+inline double_int &
+double_int::operator |= (double_int b)
+{
+ *this = *this | b;
+ return *this;
+}
+
/* Returns value of CST as a signed number. CST must satisfy
double_int::fits_signed. */
@@ -346,7 +372,9 @@ inline double_int
double_int_mul_with_sign (double_int a, double_int b,
bool unsigned_p, int *overflow)
{
- return a.mul_with_sign (b, unsigned_p, overflow);
+ bool ovf;
+ return a.mul_with_sign (b, unsigned_p, &ovf);
+ *overflow = ovf;
}
/* FIXME(crowl): Remove after converting callers. */
diff --git a/gcc/dwarf2cfi.c b/gcc/dwarf2cfi.c
index 17027857bab..355c74698aa 100644
--- a/gcc/dwarf2cfi.c
+++ b/gcc/dwarf2cfi.c
@@ -941,10 +941,8 @@ record_reg_saved_in_reg (rtx dest, rtx src)
if (dest == NULL)
return;
- elt = VEC_safe_push (reg_saved_in_data, heap,
- cur_trace->regs_saved_in_regs, NULL);
- elt->orig_reg = src;
- elt->saved_in_reg = dest;
+ reg_saved_in_data e = {src, dest};
+ VEC_safe_push (reg_saved_in_data, heap, cur_trace->regs_saved_in_regs, e);
}
/* Add an entry to QUEUED_REG_SAVES saying that REG is now saved at
@@ -954,20 +952,19 @@ static void
queue_reg_save (rtx reg, rtx sreg, HOST_WIDE_INT offset)
{
queued_reg_save *q;
+ queued_reg_save e = {reg, sreg, offset};
size_t i;
/* Duplicates waste space, but it's also necessary to remove them
for correctness, since the queue gets output in reverse order. */
FOR_EACH_VEC_ELT (queued_reg_save, queued_reg_saves, i, q)
if (compare_reg_or_pc (q->reg, reg))
- goto found;
-
- q = VEC_safe_push (queued_reg_save, heap, queued_reg_saves, NULL);
+ {
+ *q = e;
+ return;
+ }
- found:
- q->reg = reg;
- q->saved_reg = sreg;
- q->cfa_offset = offset;
+ VEC_safe_push (queued_reg_save, heap, queued_reg_saves, e);
}
/* Output all the entries in QUEUED_REG_SAVES. */
@@ -2713,23 +2710,23 @@ static void
create_pseudo_cfg (void)
{
bool saw_barrier, switch_sections;
- dw_trace_info *ti;
+ dw_trace_info ti;
rtx insn;
unsigned i;
/* The first trace begins at the start of the function,
and begins with the CIE row state. */
trace_info = VEC_alloc (dw_trace_info, heap, 16);
- ti = VEC_quick_push (dw_trace_info, trace_info, NULL);
+ memset (&ti, 0, sizeof (ti));
+ ti.head = get_insns ();
+ ti.beg_row = cie_cfi_row;
+ ti.cfa_store = cie_cfi_row->cfa;
+ ti.cfa_temp.reg = INVALID_REGNUM;
+ VEC_quick_push (dw_trace_info, trace_info, ti);
- memset (ti, 0, sizeof (*ti));
- ti->head = get_insns ();
- ti->beg_row = cie_cfi_row;
- ti->cfa_store = cie_cfi_row->cfa;
- ti->cfa_temp.reg = INVALID_REGNUM;
if (cie_return_save)
VEC_safe_push (reg_saved_in_data, heap,
- ti->regs_saved_in_regs, cie_return_save);
+ ti.regs_saved_in_regs, *cie_return_save);
/* Walk all the insns, collecting start of trace locations. */
saw_barrier = false;
@@ -2751,11 +2748,11 @@ create_pseudo_cfg (void)
else if (save_point_p (insn)
&& (LABEL_P (insn) || !saw_barrier))
{
- ti = VEC_safe_push (dw_trace_info, heap, trace_info, NULL);
- memset (ti, 0, sizeof (*ti));
- ti->head = insn;
- ti->switch_sections = switch_sections;
- ti->id = VEC_length (dw_trace_info, trace_info) - 1;
+ memset (&ti, 0, sizeof (ti));
+ ti.head = insn;
+ ti.switch_sections = switch_sections;
+ ti.id = VEC_length (dw_trace_info, trace_info) - 1;
+ VEC_safe_push (dw_trace_info, heap, trace_info, ti);
saw_barrier = false;
switch_sections = false;
@@ -2766,19 +2763,20 @@ create_pseudo_cfg (void)
avoiding stale pointer problems due to reallocation. */
trace_index = htab_create (VEC_length (dw_trace_info, trace_info),
dw_trace_info_hash, dw_trace_info_eq, NULL);
- FOR_EACH_VEC_ELT (dw_trace_info, trace_info, i, ti)
+ dw_trace_info *tp;
+ FOR_EACH_VEC_ELT (dw_trace_info, trace_info, i, tp)
{
void **slot;
if (dump_file)
fprintf (dump_file, "Creating trace %u : start at %s %d%s\n", i,
- rtx_name[(int) GET_CODE (ti->head)], INSN_UID (ti->head),
- ti->switch_sections ? " (section switch)" : "");
+ rtx_name[(int) GET_CODE (tp->head)], INSN_UID (tp->head),
+ tp->switch_sections ? " (section switch)" : "");
- slot = htab_find_slot_with_hash (trace_index, ti,
- INSN_UID (ti->head), INSERT);
+ slot = htab_find_slot_with_hash (trace_index, tp,
+ INSN_UID (tp->head), INSERT);
gcc_assert (*slot == NULL);
- *slot = (void *) ti;
+ *slot = (void *) tp;
}
}
diff --git a/gcc/dwarf2out.c b/gcc/dwarf2out.c
index 38dca874439..27a11fbc0a5 100644
--- a/gcc/dwarf2out.c
+++ b/gcc/dwarf2out.c
@@ -3482,7 +3482,7 @@ add_dwarf_attr (dw_die_ref die, dw_attr_ref attr)
if (die->die_attr == NULL)
die->die_attr = VEC_alloc (dw_attr_node, gc, 1);
- VEC_safe_push (dw_attr_node, gc, die->die_attr, attr);
+ VEC_safe_push (dw_attr_node, gc, die->die_attr, *attr);
}
static inline enum dw_val_class
@@ -8216,7 +8216,7 @@ add_pubname_string (const char *str, dw_die_ref die)
e.die = die;
e.name = xstrdup (str);
- VEC_safe_push (pubname_entry, gc, pubname_table, &e);
+ VEC_safe_push (pubname_entry, gc, pubname_table, e);
}
static void
@@ -8250,7 +8250,7 @@ add_enumerator_pubname (const char *scope_name, dw_die_ref die)
gcc_assert (scope_name);
e.name = concat (scope_name, get_AT_string (die, DW_AT_name), NULL);
e.die = die;
- VEC_safe_push (pubname_entry, gc, pubname_table, &e);
+ VEC_safe_push (pubname_entry, gc, pubname_table, e);
}
/* Add a new entry to .debug_pubtypes if appropriate. */
@@ -8293,7 +8293,7 @@ add_pubtype (tree decl, dw_die_ref die)
{
e.die = die;
e.name = concat (scope_name, name, NULL);
- VEC_safe_push (pubname_entry, gc, pubtype_table, &e);
+ VEC_safe_push (pubname_entry, gc, pubtype_table, e);
}
/* Although it might be more consistent to add the pubinfo for the
@@ -9330,13 +9330,13 @@ static inline double_int
double_int_type_size_in_bits (const_tree type)
{
if (TREE_CODE (type) == ERROR_MARK)
- return uhwi_to_double_int (BITS_PER_WORD);
+ return double_int::from_uhwi (BITS_PER_WORD);
else if (TYPE_SIZE (type) == NULL_TREE)
return double_int_zero;
else if (TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
return tree_to_double_int (TYPE_SIZE (type));
else
- return uhwi_to_double_int (TYPE_ALIGN (type));
+ return double_int::from_uhwi (TYPE_ALIGN (type));
}
/* Given a pointer to a tree node for a subrange type, return a pointer
@@ -11756,7 +11756,7 @@ mem_loc_descriptor (rtx rtl, enum machine_mode mode,
mem_loc_result->dw_loc_oprnd2.val_class
= dw_val_class_const_double;
mem_loc_result->dw_loc_oprnd2.v.val_double
- = shwi_to_double_int (INTVAL (rtl));
+ = double_int::from_shwi (INTVAL (rtl));
}
}
break;
@@ -12315,7 +12315,7 @@ loc_descriptor (rtx rtl, enum machine_mode mode,
double_int val = rtx_to_double_int (elt);
if (elt_size <= sizeof (HOST_WIDE_INT))
- insert_int (double_int_to_shwi (val), elt_size, p);
+ insert_int (val.to_shwi (), elt_size, p);
else
{
gcc_assert (elt_size == 2 * sizeof (HOST_WIDE_INT));
@@ -13644,11 +13644,11 @@ simple_decl_align_in_bits (const_tree decl)
static inline double_int
round_up_to_align (double_int t, unsigned int align)
{
- double_int alignd = uhwi_to_double_int (align);
- t = double_int_add (t, alignd);
- t = double_int_add (t, double_int_minus_one);
- t = double_int_div (t, alignd, true, TRUNC_DIV_EXPR);
- t = double_int_mul (t, alignd);
+ double_int alignd = double_int::from_uhwi (align);
+ t += alignd;
+ t += double_int_minus_one;
+ t = t.div (alignd, true, TRUNC_DIV_EXPR);
+ t *= alignd;
return t;
}
@@ -13755,23 +13755,21 @@ field_byte_offset (const_tree decl)
/* Figure out the bit-distance from the start of the structure to
the "deepest" bit of the bit-field. */
- deepest_bitpos = double_int_add (bitpos_int, field_size_in_bits);
+ deepest_bitpos = bitpos_int + field_size_in_bits;
/* This is the tricky part. Use some fancy footwork to deduce
where the lowest addressed bit of the containing object must
be. */
- object_offset_in_bits
- = double_int_sub (deepest_bitpos, type_size_in_bits);
+ object_offset_in_bits = deepest_bitpos - type_size_in_bits;
/* Round up to type_align by default. This works best for
bitfields. */
object_offset_in_bits
= round_up_to_align (object_offset_in_bits, type_align_in_bits);
- if (double_int_ucmp (object_offset_in_bits, bitpos_int) > 0)
+ if (object_offset_in_bits.ugt (bitpos_int))
{
- object_offset_in_bits
- = double_int_sub (deepest_bitpos, type_size_in_bits);
+ object_offset_in_bits = deepest_bitpos - type_size_in_bits;
/* Round up to decl_align instead. */
object_offset_in_bits
@@ -13783,10 +13781,9 @@ field_byte_offset (const_tree decl)
object_offset_in_bits = bitpos_int;
object_offset_in_bytes
- = double_int_div (object_offset_in_bits,
- uhwi_to_double_int (BITS_PER_UNIT), true,
- TRUNC_DIV_EXPR);
- return double_int_to_shwi (object_offset_in_bytes);
+ = object_offset_in_bits.div (double_int::from_uhwi (BITS_PER_UNIT),
+ true, TRUNC_DIV_EXPR);
+ return object_offset_in_bytes.to_shwi ();
}
/* The following routines define various Dwarf attributes and any data
@@ -14062,7 +14059,7 @@ add_const_value_attribute (dw_die_ref die, rtx rtl)
double_int val = rtx_to_double_int (elt);
if (elt_size <= sizeof (HOST_WIDE_INT))
- insert_int (double_int_to_shwi (val), elt_size, p);
+ insert_int (val.to_shwi (), elt_size, p);
else
{
gcc_assert (elt_size == 2 * sizeof (HOST_WIDE_INT));
@@ -14672,7 +14669,7 @@ defer_location (tree variable, dw_die_ref die)
deferred_locations entry;
entry.variable = variable;
entry.die = die;
- VEC_safe_push (deferred_locations, gc, deferred_locations_list, &entry);
+ VEC_safe_push (deferred_locations, gc, deferred_locations_list, entry);
}
/* Helper function for tree_add_const_value_attribute. Natively encode
@@ -16774,7 +16771,6 @@ dwarf2out_abstract_function (tree decl)
/* Pretend we've just finished compiling this function. */
save_fn = current_function_decl;
current_function_decl = decl;
- push_cfun (DECL_STRUCT_FUNCTION (decl));
was_abstract = DECL_ABSTRACT (decl);
set_decl_abstract_flags (decl, 1);
@@ -16788,7 +16784,6 @@ dwarf2out_abstract_function (tree decl)
call_arg_locations = old_call_arg_locations;
call_site_count = old_call_site_count;
tail_call_site_count = old_tail_call_site_count;
- pop_cfun ();
}
/* Helper function of premark_used_types() which gets called through
@@ -16847,10 +16842,10 @@ premark_types_used_by_global_vars_helper (void **slot,
/* Mark all members of used_types_hash as perennial. */
static void
-premark_used_types (void)
+premark_used_types (struct function *fun)
{
- if (cfun && cfun->used_types_hash)
- htab_traverse (cfun->used_types_hash, premark_used_types_helper, NULL);
+ if (fun && fun->used_types_hash)
+ htab_traverse (fun->used_types_hash, premark_used_types_helper, NULL);
}
/* Mark all members of types_used_by_vars_entry as perennial. */
@@ -16913,7 +16908,7 @@ gen_subprogram_die (tree decl, dw_die_ref context_die)
int declaration = (current_function_decl != decl
|| class_or_namespace_scope_p (context_die));
- premark_used_types ();
+ premark_used_types (DECL_STRUCT_FUNCTION (decl));
/* It is possible to have both DECL_ABSTRACT and DECLARATION be true if we
started to generate the abstract instance of an inline, decided to output
@@ -17076,13 +17071,15 @@ gen_subprogram_die (tree decl, dw_die_ref context_die)
else if (!DECL_EXTERNAL (decl))
{
HOST_WIDE_INT cfa_fb_offset;
+ struct function *fun = DECL_STRUCT_FUNCTION (decl);
if (!old_die || !get_AT (old_die, DW_AT_inline))
equate_decl_number_to_die (decl, subr_die);
+ gcc_checking_assert (fun);
if (!flag_reorder_blocks_and_partition)
{
- dw_fde_ref fde = cfun->fde;
+ dw_fde_ref fde = fun->fde;
if (fde->dw_fde_begin)
{
/* We have already generated the labels. */
@@ -17128,7 +17125,7 @@ gen_subprogram_die (tree decl, dw_die_ref context_die)
else
{
/* Generate pubnames entries for the split function code ranges. */
- dw_fde_ref fde = cfun->fde;
+ dw_fde_ref fde = fun->fde;
if (fde->dw_fde_second_begin)
{
@@ -17228,9 +17225,9 @@ gen_subprogram_die (tree decl, dw_die_ref context_die)
by this displacement. */
compute_frame_pointer_to_fb_displacement (cfa_fb_offset);
- if (cfun->static_chain_decl)
+ if (fun->static_chain_decl)
add_AT_location_description (subr_die, DW_AT_static_link,
- loc_list_from_tree (cfun->static_chain_decl, 2));
+ loc_list_from_tree (fun->static_chain_decl, 2));
}
/* Generate child dies for template paramaters. */
@@ -19890,7 +19887,7 @@ append_entry_to_tmpl_value_parm_die_table (dw_die_ref die, tree arg)
entry.arg = arg;
VEC_safe_push (die_arg_entry, gc,
tmpl_value_parm_die_table,
- &entry);
+ entry);
}
/* Return TRUE if T is an instance of generic type, FALSE
@@ -20282,7 +20279,7 @@ push_dw_line_info_entry (dw_line_info_table *table,
dw_line_info_entry e;
e.opcode = opcode;
e.val = val;
- VEC_safe_push (dw_line_info_entry, gc, table->entries, &e);
+ VEC_safe_push (dw_line_info_entry, gc, table->entries, e);
}
/* Output a label to mark the beginning of a source code line entry
@@ -20402,7 +20399,7 @@ dwarf2out_start_source_file (unsigned int lineno, const char *filename)
e.code = DW_MACINFO_start_file;
e.lineno = lineno;
e.info = ggc_strdup (filename);
- VEC_safe_push (macinfo_entry, gc, macinfo_table, &e);
+ VEC_safe_push (macinfo_entry, gc, macinfo_table, e);
}
}
@@ -20421,7 +20418,7 @@ dwarf2out_end_source_file (unsigned int lineno ATTRIBUTE_UNUSED)
e.code = DW_MACINFO_end_file;
e.lineno = lineno;
e.info = NULL;
- VEC_safe_push (macinfo_entry, gc, macinfo_table, &e);
+ VEC_safe_push (macinfo_entry, gc, macinfo_table, e);
}
}
@@ -20443,12 +20440,12 @@ dwarf2out_define (unsigned int lineno ATTRIBUTE_UNUSED,
e.code = 0;
e.lineno = 0;
e.info = NULL;
- VEC_safe_push (macinfo_entry, gc, macinfo_table, &e);
+ VEC_safe_push (macinfo_entry, gc, macinfo_table, e);
}
e.code = DW_MACINFO_define;
e.lineno = lineno;
e.info = ggc_strdup (buffer);
- VEC_safe_push (macinfo_entry, gc, macinfo_table, &e);
+ VEC_safe_push (macinfo_entry, gc, macinfo_table, e);
}
}
@@ -20470,12 +20467,12 @@ dwarf2out_undef (unsigned int lineno ATTRIBUTE_UNUSED,
e.code = 0;
e.lineno = 0;
e.info = NULL;
- VEC_safe_push (macinfo_entry, gc, macinfo_table, &e);
+ VEC_safe_push (macinfo_entry, gc, macinfo_table, e);
}
e.code = DW_MACINFO_undef;
e.lineno = lineno;
e.info = ggc_strdup (buffer);
- VEC_safe_push (macinfo_entry, gc, macinfo_table, &e);
+ VEC_safe_push (macinfo_entry, gc, macinfo_table, e);
}
}
@@ -20751,7 +20748,7 @@ output_macinfo (void)
switch (ref->code)
{
case DW_MACINFO_start_file:
- VEC_safe_push (macinfo_entry, gc, files, ref);
+ VEC_safe_push (macinfo_entry, gc, files, *ref);
break;
case DW_MACINFO_end_file:
if (!VEC_empty (macinfo_entry, files))
@@ -21390,7 +21387,7 @@ move_linkage_attr (dw_die_ref die)
if (ix != VEC_length (dw_attr_node, die->die_attr) - 1)
{
VEC_pop (dw_attr_node, die->die_attr);
- VEC_quick_insert (dw_attr_node, die->die_attr, ix, &linkage);
+ VEC_quick_insert (dw_attr_node, die->die_attr, ix, linkage);
}
}
diff --git a/gcc/emit-rtl.c b/gcc/emit-rtl.c
index 0edd871b685..074e89ea534 100644
--- a/gcc/emit-rtl.c
+++ b/gcc/emit-rtl.c
@@ -490,7 +490,7 @@ rtx_to_double_int (const_rtx cst)
double_int r;
if (CONST_INT_P (cst))
- r = shwi_to_double_int (INTVAL (cst));
+ r = double_int::from_shwi (INTVAL (cst));
else if (CONST_DOUBLE_AS_INT_P (cst))
{
r.low = CONST_DOUBLE_LOW (cst);
@@ -6004,7 +6004,7 @@ curr_insn_locator (void)
{
curr_rtl_loc++;
VEC_safe_push (int, heap, locations_locators_locs, curr_rtl_loc);
- VEC_safe_push (location_t, heap, locations_locators_vals, &curr_location);
+ VEC_safe_push (location_t, heap, locations_locators_vals, curr_location);
last_location = curr_location;
}
return curr_rtl_loc;
diff --git a/gcc/except.c b/gcc/except.c
index ae5a11fdaa0..801718de195 100644
--- a/gcc/except.c
+++ b/gcc/except.c
@@ -304,8 +304,8 @@ init_eh_for_function (void)
cfun->eh = ggc_alloc_cleared_eh_status ();
/* Make sure zero'th entries are used. */
- VEC_safe_push (eh_region, gc, cfun->eh->region_array, (eh_region) NULL);
- VEC_safe_push (eh_landing_pad, gc, cfun->eh->lp_array, (eh_landing_pad) NULL);
+ VEC_safe_push (eh_region, gc, cfun->eh->region_array, NULL);
+ VEC_safe_push (eh_landing_pad, gc, cfun->eh->lp_array, NULL);
}
/* Routines to generate the exception tree somewhat directly.
@@ -806,7 +806,7 @@ add_ehspec_entry (htab_t ehspec_hash, htab_t ttypes_hash, tree list)
if (targetm.arm_eabi_unwinder)
VEC_safe_push (tree, gc, cfun->eh->ehspec_data.arm_eabi, NULL_TREE);
else
- VEC_safe_push (uchar, gc, cfun->eh->ehspec_data.other, (uchar) 0);
+ VEC_safe_push (uchar, gc, cfun->eh->ehspec_data.other, 0);
}
return n->filter;
@@ -1361,17 +1361,9 @@ sjlj_emit_dispatch_table (rtx dispatch_label, int num_dispatch)
if (num_dispatch > 1)
{
- gimple switch_stmt;
- tree default_label = create_artificial_label (UNKNOWN_LOCATION);
rtx disp = adjust_address (fc, TYPE_MODE (integer_type_node),
sjlj_fc_call_site_ofs);
- switch_stmt = gimple_build_switch (make_tree (integer_type_node, disp),
- build_case_label (NULL, NULL,
- default_label),
- dispatch_labels);
- expand_case (switch_stmt);
- emit_label (label_rtx (default_label));
- expand_builtin_trap ();
+ expand_sjlj_dispatch_table (disp, dispatch_labels);
}
seq = get_insns ();
diff --git a/gcc/expmed.c b/gcc/expmed.c
index 6c94a90fb84..24a15770a47 100644
--- a/gcc/expmed.c
+++ b/gcc/expmed.c
@@ -1985,11 +1985,11 @@ mask_rtx (enum machine_mode mode, int bitpos, int bitsize, int complement)
{
double_int mask;
- mask = double_int_mask (bitsize);
- mask = double_int_lshift (mask, bitpos, HOST_BITS_PER_DOUBLE_INT, false);
+ mask = double_int::mask (bitsize);
+ mask = mask.llshift (bitpos, HOST_BITS_PER_DOUBLE_INT);
if (complement)
- mask = double_int_not (mask);
+ mask = ~mask;
return immed_double_int_const (mask, mode);
}
@@ -2002,8 +2002,8 @@ lshift_value (enum machine_mode mode, rtx value, int bitpos, int bitsize)
{
double_int val;
- val = double_int_zext (uhwi_to_double_int (INTVAL (value)), bitsize);
- val = double_int_lshift (val, bitpos, HOST_BITS_PER_DOUBLE_INT, false);
+ val = double_int::from_uhwi (INTVAL (value)).zext (bitsize);
+ val = val.llshift (bitpos, HOST_BITS_PER_DOUBLE_INT);
return immed_double_int_const (val, mode);
}
diff --git a/gcc/expr.c b/gcc/expr.c
index b9dd2b6c7b1..de03d8e0a65 100644
--- a/gcc/expr.c
+++ b/gcc/expr.c
@@ -727,11 +727,11 @@ convert_modes (enum machine_mode mode, enum machine_mode oldmode, rtx x, int uns
&& GET_MODE_BITSIZE (mode) == HOST_BITS_PER_DOUBLE_INT
&& CONST_INT_P (x) && INTVAL (x) < 0)
{
- double_int val = uhwi_to_double_int (INTVAL (x));
+ double_int val = double_int::from_uhwi (INTVAL (x));
/* We need to zero extend VAL. */
if (oldmode != VOIDmode)
- val = double_int_zext (val, GET_MODE_BITSIZE (oldmode));
+ val = val.zext (GET_MODE_BITSIZE (oldmode));
return immed_double_int_const (val, mode);
}
@@ -6557,9 +6557,7 @@ get_inner_reference (tree exp, HOST_WIDE_INT *pbitsize,
switch (TREE_CODE (exp))
{
case BIT_FIELD_REF:
- bit_offset
- = double_int_add (bit_offset,
- tree_to_double_int (TREE_OPERAND (exp, 2)));
+ bit_offset += tree_to_double_int (TREE_OPERAND (exp, 2));
break;
case COMPONENT_REF:
@@ -6574,9 +6572,7 @@ get_inner_reference (tree exp, HOST_WIDE_INT *pbitsize,
break;
offset = size_binop (PLUS_EXPR, offset, this_offset);
- bit_offset = double_int_add (bit_offset,
- tree_to_double_int
- (DECL_FIELD_BIT_OFFSET (field)));
+ bit_offset += tree_to_double_int (DECL_FIELD_BIT_OFFSET (field));
/* ??? Right now we don't do anything with DECL_OFFSET_ALIGN. */
}
@@ -6608,8 +6604,7 @@ get_inner_reference (tree exp, HOST_WIDE_INT *pbitsize,
break;
case IMAGPART_EXPR:
- bit_offset = double_int_add (bit_offset,
- uhwi_to_double_int (*pbitsize));
+ bit_offset += double_int::from_uhwi (*pbitsize);
break;
case VIEW_CONVERT_EXPR:
@@ -6631,11 +6626,10 @@ get_inner_reference (tree exp, HOST_WIDE_INT *pbitsize,
if (!integer_zerop (off))
{
double_int boff, coff = mem_ref_offset (exp);
- boff = double_int_lshift (coff,
- BITS_PER_UNIT == 8
- ? 3 : exact_log2 (BITS_PER_UNIT),
- HOST_BITS_PER_DOUBLE_INT, true);
- bit_offset = double_int_add (bit_offset, boff);
+ boff = coff.alshift (BITS_PER_UNIT == 8
+ ? 3 : exact_log2 (BITS_PER_UNIT),
+ HOST_BITS_PER_DOUBLE_INT);
+ bit_offset += boff;
}
exp = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
}
@@ -6659,15 +6653,13 @@ get_inner_reference (tree exp, HOST_WIDE_INT *pbitsize,
if (TREE_CODE (offset) == INTEGER_CST)
{
double_int tem = tree_to_double_int (offset);
- tem = double_int_sext (tem, TYPE_PRECISION (sizetype));
- tem = double_int_lshift (tem,
- BITS_PER_UNIT == 8
- ? 3 : exact_log2 (BITS_PER_UNIT),
- HOST_BITS_PER_DOUBLE_INT, true);
- tem = double_int_add (tem, bit_offset);
- if (double_int_fits_in_shwi_p (tem))
- {
- *pbitpos = double_int_to_shwi (tem);
+ tem = tem.sext (TYPE_PRECISION (sizetype));
+ tem = tem.alshift (BITS_PER_UNIT == 8 ? 3 : exact_log2 (BITS_PER_UNIT),
+ HOST_BITS_PER_DOUBLE_INT);
+ tem += bit_offset;
+ if (tem.fits_shwi ())
+ {
+ *pbitpos = tem.to_shwi ();
*poffset = offset = NULL_TREE;
}
}
@@ -6676,24 +6668,23 @@ get_inner_reference (tree exp, HOST_WIDE_INT *pbitsize,
if (offset)
{
/* Avoid returning a negative bitpos as this may wreak havoc later. */
- if (double_int_negative_p (bit_offset))
+ if (bit_offset.is_negative ())
{
double_int mask
- = double_int_mask (BITS_PER_UNIT == 8
+ = double_int::mask (BITS_PER_UNIT == 8
? 3 : exact_log2 (BITS_PER_UNIT));
- double_int tem = double_int_and_not (bit_offset, mask);
+ double_int tem = bit_offset.and_not (mask);
/* TEM is the bitpos rounded to BITS_PER_UNIT towards -Inf.
Subtract it to BIT_OFFSET and add it (scaled) to OFFSET. */
- bit_offset = double_int_sub (bit_offset, tem);
- tem = double_int_rshift (tem,
- BITS_PER_UNIT == 8
- ? 3 : exact_log2 (BITS_PER_UNIT),
- HOST_BITS_PER_DOUBLE_INT, true);
+ bit_offset -= tem;
+ tem = tem.arshift (BITS_PER_UNIT == 8
+ ? 3 : exact_log2 (BITS_PER_UNIT),
+ HOST_BITS_PER_DOUBLE_INT);
offset = size_binop (PLUS_EXPR, offset,
double_int_to_tree (sizetype, tem));
}
- *pbitpos = double_int_to_shwi (bit_offset);
+ *pbitpos = bit_offset.to_shwi ();
*poffset = offset;
}
@@ -8730,7 +8721,7 @@ expand_expr_real_2 (sepops ops, rtx target, enum machine_mode tmode,
if (reduce_bit_field && TYPE_UNSIGNED (type))
temp = expand_binop (mode, xor_optab, op0,
immed_double_int_const
- (double_int_mask (TYPE_PRECISION (type)), mode),
+ (double_int::mask (TYPE_PRECISION (type)), mode),
target, 1, OPTAB_LIB_WIDEN);
else
temp = expand_unop (mode, one_cmpl_optab, op0, target, 1);
@@ -10365,13 +10356,13 @@ expand_expr_real_1 (tree exp, rtx target, enum machine_mode tmode,
value ? label : 0,
value ? 0 : label, -1);
expand_assignment (lhs, build_int_cst (TREE_TYPE (rhs), value),
- MOVE_NONTEMPORAL (exp));
+ false);
do_pending_stack_adjust ();
emit_label (label);
return const0_rtx;
}
- expand_assignment (lhs, rhs, MOVE_NONTEMPORAL (exp));
+ expand_assignment (lhs, rhs, false);
return const0_rtx;
}
@@ -10453,7 +10444,7 @@ reduce_to_bit_field_precision (rtx exp, rtx target, tree type)
}
else if (TYPE_UNSIGNED (type))
{
- rtx mask = immed_double_int_const (double_int_mask (prec),
+ rtx mask = immed_double_int_const (double_int::mask (prec),
GET_MODE (exp));
return expand_and (GET_MODE (exp), exp, mask, target);
}
diff --git a/gcc/expr.h b/gcc/expr.h
index 68cdb8d9109..f63b8f3052d 100644
--- a/gcc/expr.h
+++ b/gcc/expr.h
@@ -721,4 +721,13 @@ extern tree build_libfunc_function (const char *);
/* Get the personality libfunc for a function decl. */
rtx get_personality_function (tree);
+
+/* In stmt.c */
+
+/* Expand a GIMPLE_SWITCH statement. */
+extern void expand_case (gimple);
+
+/* Like expand_case but special-case for SJLJ exception dispatching. */
+extern void expand_sjlj_dispatch_table (rtx, VEC(tree,heap) *);
+
#endif /* GCC_EXPR_H */
diff --git a/gcc/fixed-value.c b/gcc/fixed-value.c
index 9a34bc53de1..b74a60e29f2 100644
--- a/gcc/fixed-value.c
+++ b/gcc/fixed-value.c
@@ -376,9 +376,8 @@ do_fixed_multiply (FIXED_VALUE_TYPE *f, const FIXED_VALUE_TYPE *a,
if (GET_MODE_PRECISION (f->mode) <= HOST_BITS_PER_WIDE_INT)
{
f->data = a->data * b->data;
- f->data = f->data.lshift ((-GET_MODE_FBIT (f->mode)),
- HOST_BITS_PER_DOUBLE_INT,
- !unsigned_p);
+ f->data = f->data.lshift (-GET_MODE_FBIT (f->mode),
+ HOST_BITS_PER_DOUBLE_INT, !unsigned_p);
overflow_p = fixed_saturate1 (f->mode, f->data, &f->data, sat_p);
}
else
@@ -466,9 +465,8 @@ do_fixed_multiply (FIXED_VALUE_TYPE *f, const FIXED_VALUE_TYPE *a,
f->data.high = f->data.high | s.high;
s.low = f->data.low;
s.high = f->data.high;
- r = r.lshift ((-GET_MODE_FBIT (f->mode)),
- HOST_BITS_PER_DOUBLE_INT,
- !unsigned_p);
+ r = r.lshift (-GET_MODE_FBIT (f->mode),
+ HOST_BITS_PER_DOUBLE_INT, !unsigned_p);
}
overflow_p = fixed_saturate2 (f->mode, r, s, &f->data, sat_p);
@@ -493,8 +491,7 @@ do_fixed_divide (FIXED_VALUE_TYPE *f, const FIXED_VALUE_TYPE *a,
if (GET_MODE_PRECISION (f->mode) <= HOST_BITS_PER_WIDE_INT)
{
f->data = a->data.lshift (GET_MODE_FBIT (f->mode),
- HOST_BITS_PER_DOUBLE_INT,
- !unsigned_p);
+ HOST_BITS_PER_DOUBLE_INT, !unsigned_p);
f->data = f->data.div (b->data, unsigned_p, TRUNC_DIV_EXPR);
overflow_p = fixed_saturate1 (f->mode, f->data, &f->data, sat_p);
}
@@ -612,9 +609,8 @@ do_fixed_shift (FIXED_VALUE_TYPE *f, const FIXED_VALUE_TYPE *a,
if (GET_MODE_PRECISION (f->mode) <= HOST_BITS_PER_WIDE_INT || (!left_p))
{
- f->data = a->data.lshift (left_p ? b->data.low : (-b->data.low),
- HOST_BITS_PER_DOUBLE_INT,
- !unsigned_p);
+ f->data = a->data.lshift (left_p ? b->data.low : -b->data.low,
+ HOST_BITS_PER_DOUBLE_INT, !unsigned_p);
if (left_p) /* Only left shift saturates. */
overflow_p = fixed_saturate1 (f->mode, f->data, &f->data, sat_p);
}
@@ -630,8 +626,7 @@ do_fixed_shift (FIXED_VALUE_TYPE *f, const FIXED_VALUE_TYPE *a,
else
{
temp_low = a->data.lshift (b->data.low,
- HOST_BITS_PER_DOUBLE_INT,
- !unsigned_p);
+ HOST_BITS_PER_DOUBLE_INT, !unsigned_p);
/* Logical shift right to temp_high. */
temp_high = a->data.llshift (b->data.low - HOST_BITS_PER_DOUBLE_INT,
HOST_BITS_PER_DOUBLE_INT);
@@ -801,8 +796,8 @@ fixed_convert (FIXED_VALUE_TYPE *f, enum machine_mode mode,
double_int temp_high, temp_low;
int amount = GET_MODE_FBIT (mode) - GET_MODE_FBIT (a->mode);
temp_low = a->data.lshift (amount,
- HOST_BITS_PER_DOUBLE_INT,
- SIGNED_FIXED_POINT_MODE_P (a->mode));
+ HOST_BITS_PER_DOUBLE_INT,
+ SIGNED_FIXED_POINT_MODE_P (a->mode));
/* Logical shift right to temp_high. */
temp_high = a->data.llshift (amount - HOST_BITS_PER_DOUBLE_INT,
HOST_BITS_PER_DOUBLE_INT);
@@ -864,8 +859,8 @@ fixed_convert (FIXED_VALUE_TYPE *f, enum machine_mode mode,
/* Right shift a to temp based on a->mode. */
double_int temp;
temp = a->data.lshift (GET_MODE_FBIT (mode) - GET_MODE_FBIT (a->mode),
- HOST_BITS_PER_DOUBLE_INT,
- SIGNED_FIXED_POINT_MODE_P (a->mode));
+ HOST_BITS_PER_DOUBLE_INT,
+ SIGNED_FIXED_POINT_MODE_P (a->mode));
f->mode = mode;
f->data = temp;
if (SIGNED_FIXED_POINT_MODE_P (a->mode) ==
diff --git a/gcc/fold-const.c b/gcc/fold-const.c
index 2bf51797847..24e21ebbd2e 100644
--- a/gcc/fold-const.c
+++ b/gcc/fold-const.c
@@ -192,11 +192,10 @@ div_if_zero_remainder (enum tree_code code, const_tree arg1, const_tree arg2)
a signed division. */
uns = TYPE_UNSIGNED (TREE_TYPE (arg2));
- quo = double_int_divmod (tree_to_double_int (arg1),
- tree_to_double_int (arg2),
- uns, code, &rem);
+ quo = tree_to_double_int (arg1).divmod (tree_to_double_int (arg2),
+ uns, code, &rem);
- if (double_int_zero_p (rem))
+ if (rem.is_zero ())
return build_int_cst_wide (TREE_TYPE (arg1), quo.low, quo.high);
return NULL_TREE;
@@ -948,55 +947,52 @@ int_const_binop_1 (enum tree_code code, const_tree arg1, const_tree arg2,
switch (code)
{
case BIT_IOR_EXPR:
- res = double_int_ior (op1, op2);
+ res = op1 | op2;
break;
case BIT_XOR_EXPR:
- res = double_int_xor (op1, op2);
+ res = op1 ^ op2;
break;
case BIT_AND_EXPR:
- res = double_int_and (op1, op2);
+ res = op1 & op2;
break;
case RSHIFT_EXPR:
- res = double_int_rshift (op1, double_int_to_shwi (op2),
- TYPE_PRECISION (type), !uns);
+ res = op1.rshift (op2.to_shwi (), TYPE_PRECISION (type), !uns);
break;
case LSHIFT_EXPR:
/* It's unclear from the C standard whether shifts can overflow.
The following code ignores overflow; perhaps a C standard
interpretation ruling is needed. */
- res = double_int_lshift (op1, double_int_to_shwi (op2),
- TYPE_PRECISION (type), !uns);
+ res = op1.lshift (op2.to_shwi (), TYPE_PRECISION (type), !uns);
break;
case RROTATE_EXPR:
- res = double_int_rrotate (op1, double_int_to_shwi (op2),
- TYPE_PRECISION (type));
+ res = op1.rrotate (op2.to_shwi (), TYPE_PRECISION (type));
break;
case LROTATE_EXPR:
- res = double_int_lrotate (op1, double_int_to_shwi (op2),
- TYPE_PRECISION (type));
+ res = op1.lrotate (op2.to_shwi (), TYPE_PRECISION (type));
break;
case PLUS_EXPR:
- overflow = add_double (op1.low, op1.high, op2.low, op2.high,
- &res.low, &res.high);
+ res = op1.add_with_sign (op2, false, &overflow);
break;
case MINUS_EXPR:
+/* FIXME(crowl) Remove this code if the replacment works.
neg_double (op2.low, op2.high, &res.low, &res.high);
add_double (op1.low, op1.high, res.low, res.high,
&res.low, &res.high);
overflow = OVERFLOW_SUM_SIGN (res.high, op2.high, op1.high);
+*/
+ res = op1.add_with_sign (-op2, false, &overflow);
break;
case MULT_EXPR:
- overflow = mul_double (op1.low, op1.high, op2.low, op2.high,
- &res.low, &res.high);
+ res = op1.mul_with_sign (op2, false, &overflow);
break;
case MULT_HIGHPART_EXPR:
@@ -1004,9 +1000,8 @@ int_const_binop_1 (enum tree_code code, const_tree arg1, const_tree arg2,
to the multiply primitive, to handle very large highparts. */
if (TYPE_PRECISION (type) > HOST_BITS_PER_WIDE_INT)
return NULL_TREE;
- tmp = double_int_mul (op1, op2);
- res = double_int_rshift (tmp, TYPE_PRECISION (type),
- TYPE_PRECISION (type), !uns);
+ tmp = op1 - op2;
+ res = tmp.rshift (TYPE_PRECISION (type), TYPE_PRECISION (type), !uns);
break;
case TRUNC_DIV_EXPR:
@@ -1028,15 +1023,14 @@ int_const_binop_1 (enum tree_code code, const_tree arg1, const_tree arg2,
/* ... fall through ... */
case ROUND_DIV_EXPR:
- if (double_int_zero_p (op2))
+ if (op2.is_zero ())
return NULL_TREE;
- if (double_int_one_p (op2))
+ if (op2.is_one ())
{
res = op1;
break;
}
- if (double_int_equal_p (op1, op2)
- && ! double_int_zero_p (op1))
+ if (op1 == op2 && !op1.is_zero ())
{
res = double_int_one;
break;
@@ -1064,7 +1058,7 @@ int_const_binop_1 (enum tree_code code, const_tree arg1, const_tree arg2,
/* ... fall through ... */
case ROUND_MOD_EXPR:
- if (double_int_zero_p (op2))
+ if (op2.is_zero ())
return NULL_TREE;
overflow = div_and_round_double (code, uns,
op1.low, op1.high, op2.low, op2.high,
@@ -1073,11 +1067,11 @@ int_const_binop_1 (enum tree_code code, const_tree arg1, const_tree arg2,
break;
case MIN_EXPR:
- res = double_int_min (op1, op2, uns);
+ res = op1.min (op2, uns);
break;
case MAX_EXPR:
- res = double_int_max (op1, op2, uns);
+ res = op1.max (op2, uns);
break;
default:
@@ -1602,14 +1596,14 @@ fold_convert_const_int_from_fixed (tree type, const_tree arg1)
mode = TREE_FIXED_CST (arg1).mode;
if (GET_MODE_FBIT (mode) < HOST_BITS_PER_DOUBLE_INT)
{
- temp = double_int_rshift (temp, GET_MODE_FBIT (mode),
- HOST_BITS_PER_DOUBLE_INT,
- SIGNED_FIXED_POINT_MODE_P (mode));
+ temp = temp.rshift (GET_MODE_FBIT (mode),
+ HOST_BITS_PER_DOUBLE_INT,
+ SIGNED_FIXED_POINT_MODE_P (mode));
/* Left shift temp to temp_trunc by fbit. */
- temp_trunc = double_int_lshift (temp, GET_MODE_FBIT (mode),
- HOST_BITS_PER_DOUBLE_INT,
- SIGNED_FIXED_POINT_MODE_P (mode));
+ temp_trunc = temp.lshift (GET_MODE_FBIT (mode),
+ HOST_BITS_PER_DOUBLE_INT,
+ SIGNED_FIXED_POINT_MODE_P (mode));
}
else
{
@@ -1620,14 +1614,14 @@ fold_convert_const_int_from_fixed (tree type, const_tree arg1)
/* If FIXED_CST is negative, we need to round the value toward 0.
By checking if the fractional bits are not zero to add 1 to temp. */
if (SIGNED_FIXED_POINT_MODE_P (mode)
- && double_int_negative_p (temp_trunc)
- && !double_int_equal_p (TREE_FIXED_CST (arg1).data, temp_trunc))
- temp = double_int_add (temp, double_int_one);
+ && temp_trunc.is_negative ()
+ && TREE_FIXED_CST (arg1).data != temp_trunc)
+ temp += double_int_one;
/* Given a fixed-point constant, make new constant with new type,
appropriately sign-extended or truncated. */
t = force_fit_type_double (type, temp, -1,
- (double_int_negative_p (temp)
+ (temp.is_negative ()
&& (TYPE_UNSIGNED (type)
< TYPE_UNSIGNED (TREE_TYPE (arg1))))
| TREE_OVERFLOW (arg1));
@@ -5890,20 +5884,16 @@ extract_muldiv_1 (tree t, tree c, enum tree_code code, tree wide_type,
if (tcode == code)
{
double_int mul;
- int overflow_p;
- mul = double_int_mul_with_sign
- (double_int_ext
- (tree_to_double_int (op1),
- TYPE_PRECISION (ctype), TYPE_UNSIGNED (ctype)),
- double_int_ext
- (tree_to_double_int (c),
- TYPE_PRECISION (ctype), TYPE_UNSIGNED (ctype)),
- false, &overflow_p);
- overflow_p = ((!TYPE_UNSIGNED (ctype) && overflow_p)
+ bool overflow_p;
+ unsigned prec = TYPE_PRECISION (ctype);
+ bool uns = TYPE_UNSIGNED (ctype);
+ double_int diop1 = tree_to_double_int (op1).ext (prec, uns);
+ double_int dic = tree_to_double_int (c).ext (prec, uns);
+ mul = diop1.mul_with_sign (dic, false, &overflow_p);
+ overflow_p = ((!uns && overflow_p)
| TREE_OVERFLOW (c) | TREE_OVERFLOW (op1));
if (!double_int_fits_to_tree_p (ctype, mul)
- && ((TYPE_UNSIGNED (ctype) && tcode != MULT_EXPR)
- || !TYPE_UNSIGNED (ctype)))
+ && ((uns && tcode != MULT_EXPR) || !uns))
overflow_p = 1;
if (!overflow_p)
return fold_build2 (tcode, ctype, fold_convert (ctype, op0),
@@ -11044,24 +11034,23 @@ fold_binary_loc (location_t loc,
c2 = tree_to_double_int (arg1);
/* If (C1&C2) == C1, then (X&C1)|C2 becomes (X,C2). */
- if (double_int_equal_p (double_int_and (c1, c2), c1))
+ if ((c1 & c2) == c1)
return omit_one_operand_loc (loc, type, arg1,
TREE_OPERAND (arg0, 0));
- msk = double_int_mask (width);
+ msk = double_int::mask (width);
/* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
- if (double_int_zero_p (double_int_and_not (msk,
- double_int_ior (c1, c2))))
+ if (msk.and_not (c1 | c2).is_zero ())
return fold_build2_loc (loc, BIT_IOR_EXPR, type,
TREE_OPERAND (arg0, 0), arg1);
/* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2,
unless (C1 & ~C2) | (C2 & C3) for some C3 is a mask of some
mode which allows further optimizations. */
- c1 = double_int_and (c1, msk);
- c2 = double_int_and (c2, msk);
- c3 = double_int_and_not (c1, c2);
+ c1 &= msk;
+ c2 &= msk;
+ c3 = c1.and_not (c2);
for (w = BITS_PER_UNIT;
w <= width && w <= HOST_BITS_PER_WIDE_INT;
w <<= 1)
@@ -11071,11 +11060,11 @@ fold_binary_loc (location_t loc,
if (((c1.low | c2.low) & mask) == mask
&& (c1.low & ~mask) == 0 && c1.high == 0)
{
- c3 = uhwi_to_double_int (mask);
+ c3 = double_int::from_uhwi (mask);
break;
}
}
- if (!double_int_equal_p (c3, c1))
+ if (c3 != c1)
return fold_build2_loc (loc, BIT_IOR_EXPR, type,
fold_build2_loc (loc, BIT_AND_EXPR, type,
TREE_OPERAND (arg0, 0),
@@ -11451,10 +11440,9 @@ fold_binary_loc (location_t loc,
if (TREE_CODE (arg1) == INTEGER_CST)
{
double_int cst1 = tree_to_double_int (arg1);
- double_int ncst1 = double_int_ext (double_int_neg (cst1),
- TYPE_PRECISION (TREE_TYPE (arg1)),
- TYPE_UNSIGNED (TREE_TYPE (arg1)));
- if (double_int_equal_p (double_int_and (cst1, ncst1), ncst1)
+ double_int ncst1 = (-cst1).ext(TYPE_PRECISION (TREE_TYPE (arg1)),
+ TYPE_UNSIGNED (TREE_TYPE (arg1)));
+ if ((cst1 & ncst1) == ncst1
&& multiple_of_p (type, arg0,
double_int_to_tree (TREE_TYPE (arg1), ncst1)))
return fold_convert_loc (loc, type, arg0);
@@ -11467,18 +11455,18 @@ fold_binary_loc (location_t loc,
&& TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST)
{
int arg1tz
- = double_int_ctz (tree_to_double_int (TREE_OPERAND (arg0, 1)));
+ = tree_to_double_int (TREE_OPERAND (arg0, 1)).trailing_zeros ();
if (arg1tz > 0)
{
double_int arg1mask, masked;
- arg1mask = double_int_not (double_int_mask (arg1tz));
- arg1mask = double_int_ext (arg1mask, TYPE_PRECISION (type),
+ arg1mask = ~double_int::mask (arg1tz);
+ arg1mask = arg1mask.ext (TYPE_PRECISION (type),
TYPE_UNSIGNED (type));
- masked = double_int_and (arg1mask, tree_to_double_int (arg1));
- if (double_int_zero_p (masked))
+ masked = arg1mask & tree_to_double_int (arg1);
+ if (masked.is_zero ())
return omit_two_operands_loc (loc, type, build_zero_cst (type),
arg0, arg1);
- else if (!double_int_equal_p (masked, tree_to_double_int (arg1)))
+ else if (masked != tree_to_double_int (arg1))
return fold_build2_loc (loc, code, type, op0,
double_int_to_tree (type, masked));
}
@@ -16002,7 +15990,7 @@ fold_abs_const (tree arg0, tree type)
/* If the value is unsigned or non-negative, then the absolute value
is the same as the ordinary value. */
if (TYPE_UNSIGNED (type)
- || !double_int_negative_p (val))
+ || !val.is_negative ())
t = arg0;
/* If the value is negative, then the absolute value is
@@ -16042,7 +16030,7 @@ fold_not_const (const_tree arg0, tree type)
gcc_assert (TREE_CODE (arg0) == INTEGER_CST);
- val = double_int_not (tree_to_double_int (arg0));
+ val = ~tree_to_double_int (arg0);
return force_fit_type_double (type, val, 0, TREE_OVERFLOW (arg0));
}
diff --git a/gcc/fortran/ChangeLog b/gcc/fortran/ChangeLog
index 69c0fe8e5e6..45f1c9fd48d 100644
--- a/gcc/fortran/ChangeLog
+++ b/gcc/fortran/ChangeLog
@@ -1,3 +1,22 @@
+2012-09-12 Tobias Burnus <burnus@net-b.de>
+
+ PR fortran/54225
+ PR fortran/53306
+ * array.c (match_subscript, gfc_match_array_ref): Fix
+ diagnostic of coarray's '*'.
+
+2012-09-07 Mikael Morin <mikael@gcc.gnu.org>
+
+ PR fortran/54208
+ * simplify.c (simplify_bound_dim): Resolve array spec before
+ proceeding with simplification.
+
+2012-09-06 Tobias Burnus <burnus@net-b.de>
+
+ PR fortran/54463
+ * trans-intrinsic.c (gfc_conv_intrinsic_funcall): Fix matmul
+ call to BLAS if the default-kind has been promoted.
+
2012-09-05 Tobias Burnus <burnus@net-b.de>
PR fortran/54462
diff --git a/gcc/fortran/array.c b/gcc/fortran/array.c
index 07fecd8aaf3..44ec72eb87c 100644
--- a/gcc/fortran/array.c
+++ b/gcc/fortran/array.c
@@ -91,9 +91,7 @@ match_subscript (gfc_array_ref *ar, int init, bool match_star)
else if (!star)
m = gfc_match_expr (&ar->start[i]);
- if (m == MATCH_NO && gfc_match_char ('*') == MATCH_YES)
- return MATCH_NO;
- else if (m == MATCH_NO)
+ if (m == MATCH_NO)
gfc_error ("Expected array subscript at %C");
if (m != MATCH_YES)
return MATCH_ERROR;
@@ -224,7 +222,7 @@ coarray:
for (ar->codimen = 0; ar->codimen + ar->dimen < GFC_MAX_DIMENSIONS; ar->codimen++)
{
- m = match_subscript (ar, init, ar->codimen == (corank - 1));
+ m = match_subscript (ar, init, true);
if (m == MATCH_ERROR)
return MATCH_ERROR;
@@ -255,6 +253,13 @@ coarray:
gfc_error ("Invalid form of coarray reference at %C");
return MATCH_ERROR;
}
+ else if (ar->dimen_type[ar->codimen + ar->dimen] == DIMEN_STAR)
+ {
+ gfc_error ("Unexpected '*' for codimension %d of %d at %C",
+ ar->codimen + 1, corank);
+ return MATCH_ERROR;
+ }
+
if (ar->codimen >= corank)
{
gfc_error ("Invalid codimension %d at %C, only %d codimensions exist",
diff --git a/gcc/fortran/simplify.c b/gcc/fortran/simplify.c
index e4ccddf967c..07aeee88e15 100644
--- a/gcc/fortran/simplify.c
+++ b/gcc/fortran/simplify.c
@@ -3255,6 +3255,9 @@ simplify_bound_dim (gfc_expr *array, gfc_expr *kind, int d, int upper,
gcc_assert (array->expr_type == EXPR_VARIABLE);
gcc_assert (as);
+ if (gfc_resolve_array_spec (as, 0) == FAILURE)
+ return NULL;
+
/* The last dimension of an assumed-size array is special. */
if ((!coarray && d == as->rank && as->type == AS_ASSUMED_SIZE && !upper)
|| (coarray && d == as->rank + as->corank
diff --git a/gcc/fortran/trans-intrinsic.c b/gcc/fortran/trans-intrinsic.c
index add4baaa311..4b268b34ba7 100644
--- a/gcc/fortran/trans-intrinsic.c
+++ b/gcc/fortran/trans-intrinsic.c
@@ -2362,21 +2362,20 @@ gfc_conv_intrinsic_funcall (gfc_se * se, gfc_expr * expr)
if (gfc_option.flag_external_blas
&& (sym->ts.type == BT_REAL || sym->ts.type == BT_COMPLEX)
- && (sym->ts.kind == gfc_default_real_kind
- || sym->ts.kind == gfc_default_double_kind))
+ && (sym->ts.kind == 4 || sym->ts.kind == 8))
{
tree gemm_fndecl;
if (sym->ts.type == BT_REAL)
{
- if (sym->ts.kind == gfc_default_real_kind)
+ if (sym->ts.kind == 4)
gemm_fndecl = gfor_fndecl_sgemm;
else
gemm_fndecl = gfor_fndecl_dgemm;
}
else
{
- if (sym->ts.kind == gfc_default_real_kind)
+ if (sym->ts.kind == 4)
gemm_fndecl = gfor_fndecl_cgemm;
else
gemm_fndecl = gfor_fndecl_zgemm;
diff --git a/gcc/fortran/trans-openmp.c b/gcc/fortran/trans-openmp.c
index 8d7aa5fe3c3..e843692e020 100644
--- a/gcc/fortran/trans-openmp.c
+++ b/gcc/fortran/trans-openmp.c
@@ -1434,9 +1434,8 @@ gfc_trans_omp_do (gfc_code *code, stmtblock_t *pblock,
/* Initialize DOVAR. */
tmp = fold_build2_loc (input_location, MULT_EXPR, type, count, step);
tmp = fold_build2_loc (input_location, PLUS_EXPR, type, from, tmp);
- di = VEC_safe_push (dovar_init, heap, inits, NULL);
- di->var = dovar;
- di->init = tmp;
+ dovar_init e = {dovar, tmp};
+ VEC_safe_push (dovar_init, heap, inits, e);
}
if (!dovar_found)
diff --git a/gcc/fwprop.c b/gcc/fwprop.c
index e64e76da221..cb571cd6d97 100644
--- a/gcc/fwprop.c
+++ b/gcc/fwprop.c
@@ -223,7 +223,7 @@ single_def_use_enter_block (struct dom_walk_data *walk_data ATTRIBUTE_UNUSED,
bitmap_copy (local_lr, &lr_bb_info->in);
/* Push a marker for the leave_block callback. */
- VEC_safe_push (df_ref, heap, reg_defs_stack, (df_ref) NULL);
+ VEC_safe_push (df_ref, heap, reg_defs_stack, NULL);
process_uses (df_get_artificial_uses (bb_index), DF_REF_AT_TOP);
process_defs (df_get_artificial_defs (bb_index), DF_REF_AT_TOP);
diff --git a/gcc/gcc.c b/gcc/gcc.c
index 5f68d5978e3..af3c34acfeb 100644
--- a/gcc/gcc.c
+++ b/gcc/gcc.c
@@ -2520,7 +2520,7 @@ execute (void)
and record info about each one.
Also search for the programs that are to be run. */
- VEC_safe_push (const_char_p, heap, argbuf, (const_char_p)0);
+ VEC_safe_push (const_char_p, heap, argbuf, 0);
commands[0].prog = VEC_index (const_char_p, argbuf, 0); /* first command. */
commands[0].argv = VEC_address (const_char_p, argbuf);
diff --git a/gcc/gcse.c b/gcc/gcse.c
index 767cc66edb5..a066b36c642 100644
--- a/gcc/gcse.c
+++ b/gcc/gcse.c
@@ -1417,7 +1417,7 @@ canon_list_insert (rtx dest ATTRIBUTE_UNUSED, const_rtx x ATTRIBUTE_UNUSED,
{
rtx dest_addr, insn;
int bb;
- modify_pair *pair;
+ modify_pair pair;
while (GET_CODE (dest) == SUBREG
|| GET_CODE (dest) == ZERO_EXTRACT
@@ -1436,9 +1436,9 @@ canon_list_insert (rtx dest ATTRIBUTE_UNUSED, const_rtx x ATTRIBUTE_UNUSED,
insn = (rtx) v_insn;
bb = BLOCK_FOR_INSN (insn)->index;
- pair = VEC_safe_push (modify_pair, heap, canon_modify_mem_list[bb], NULL);
- pair->dest = dest;
- pair->dest_addr = dest_addr;
+ pair.dest = dest;
+ pair.dest_addr = dest_addr;
+ VEC_safe_push (modify_pair, heap, canon_modify_mem_list[bb], pair);
}
/* Record memory modification information for INSN. We do not actually care
diff --git a/gcc/genautomata.c b/gcc/genautomata.c
index 122a4a4cfbe..46a398d1a6e 100644
--- a/gcc/genautomata.c
+++ b/gcc/genautomata.c
@@ -7752,8 +7752,7 @@ output_min_issue_delay_table (automaton_t automaton)
= VEC_alloc (vect_el_t, heap, compressed_min_issue_delay_len);
for (i = 0; i < compressed_min_issue_delay_len; i++)
- VEC_quick_push (vect_el_t, compressed_min_issue_delay_vect,
- (vect_el_t) 0);
+ VEC_quick_push (vect_el_t, compressed_min_issue_delay_vect, 0);
for (i = 0; i < min_issue_delay_len; i++)
{
@@ -7845,7 +7844,7 @@ output_reserved_units_table (automaton_t automaton)
reserved_units_table = VEC_alloc (vect_el_t, heap, reserved_units_size);
for (i = 0; i < reserved_units_size; i++)
- VEC_quick_push (vect_el_t, reserved_units_table, (vect_el_t) 0);
+ VEC_quick_push (vect_el_t, reserved_units_table, 0);
for (n = 0; n < VEC_length (state_t, output_states_vect); n++)
{
state_t s = VEC_index (state_t, output_states_vect, n);
diff --git a/gcc/genextract.c b/gcc/genextract.c
index 175febeb58f..fb1428687ca 100644
--- a/gcc/genextract.c
+++ b/gcc/genextract.c
@@ -201,7 +201,7 @@ VEC_safe_set_locstr (VEC(locstr,heap) **vp, unsigned int ix, char *str)
else
{
while (ix > VEC_length (locstr, *vp))
- VEC_safe_push (locstr, heap, *vp, (locstr) NULL);
+ VEC_safe_push (locstr, heap, *vp, NULL);
VEC_safe_push (locstr, heap, *vp, str);
}
}
diff --git a/gcc/genopinit.c b/gcc/genopinit.c
index 52612794802..e0ffc8f286c 100644
--- a/gcc/genopinit.c
+++ b/gcc/genopinit.c
@@ -265,7 +265,7 @@ gen_insn (rtx insn)
{
p.op = optabs[pindex].op;
p.sort_num = (p.op << 16) | (p.m2 << 8) | p.m1;
- VEC_safe_push (pattern, heap, patterns, &p);
+ VEC_safe_push (pattern, heap, patterns, p);
return;
}
}
diff --git a/gcc/gimple-fold.c b/gcc/gimple-fold.c
index 19a259e774e..bb13e1fd732 100644
--- a/gcc/gimple-fold.c
+++ b/gcc/gimple-fold.c
@@ -154,13 +154,15 @@ canonicalize_constructor_val (tree cval, tree from_decl)
}
if (TREE_CODE (cval) == ADDR_EXPR)
{
- tree base = get_base_address (TREE_OPERAND (cval, 0));
- if (!base && TREE_CODE (TREE_OPERAND (cval, 0)) == COMPOUND_LITERAL_EXPR)
+ tree base = NULL_TREE;
+ if (TREE_CODE (TREE_OPERAND (cval, 0)) == COMPOUND_LITERAL_EXPR)
{
base = COMPOUND_LITERAL_EXPR_DECL (TREE_OPERAND (cval, 0));
if (base)
TREE_OPERAND (cval, 0) = base;
}
+ else
+ base = get_base_address (TREE_OPERAND (cval, 0));
if (!base)
return NULL_TREE;
@@ -2807,32 +2809,28 @@ fold_array_ctor_reference (tree type, tree ctor,
be larger than size of array element. */
if (!TYPE_SIZE_UNIT (type)
|| TREE_CODE (TYPE_SIZE_UNIT (type)) != INTEGER_CST
- || double_int_cmp (elt_size,
- tree_to_double_int (TYPE_SIZE_UNIT (type)), 0) < 0)
+ || elt_size.slt (tree_to_double_int (TYPE_SIZE_UNIT (type))))
return NULL_TREE;
/* Compute the array index we look for. */
- access_index = double_int_udiv (uhwi_to_double_int (offset / BITS_PER_UNIT),
- elt_size, TRUNC_DIV_EXPR);
- access_index = double_int_add (access_index, low_bound);
+ access_index = double_int::from_uhwi (offset / BITS_PER_UNIT)
+ .udiv (elt_size, TRUNC_DIV_EXPR);
+ access_index += low_bound;
if (index_type)
- access_index = double_int_ext (access_index,
- TYPE_PRECISION (index_type),
- TYPE_UNSIGNED (index_type));
+ access_index = access_index.ext (TYPE_PRECISION (index_type),
+ TYPE_UNSIGNED (index_type));
/* And offset within the access. */
- inner_offset = offset % (double_int_to_uhwi (elt_size) * BITS_PER_UNIT);
+ inner_offset = offset % (elt_size.to_uhwi () * BITS_PER_UNIT);
/* See if the array field is large enough to span whole access. We do not
care to fold accesses spanning multiple array indexes. */
- if (inner_offset + size > double_int_to_uhwi (elt_size) * BITS_PER_UNIT)
+ if (inner_offset + size > elt_size.to_uhwi () * BITS_PER_UNIT)
return NULL_TREE;
- index = double_int_sub (low_bound, double_int_one);
+ index = low_bound - double_int_one;
if (index_type)
- index = double_int_ext (index,
- TYPE_PRECISION (index_type),
- TYPE_UNSIGNED (index_type));
+ index = index.ext (TYPE_PRECISION (index_type), TYPE_UNSIGNED (index_type));
FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (ctor), cnt, cfield, cval)
{
@@ -2852,17 +2850,16 @@ fold_array_ctor_reference (tree type, tree ctor,
}
else
{
- index = double_int_add (index, double_int_one);
+ index += double_int_one;
if (index_type)
- index = double_int_ext (index,
- TYPE_PRECISION (index_type),
- TYPE_UNSIGNED (index_type));
+ index = index.ext (TYPE_PRECISION (index_type),
+ TYPE_UNSIGNED (index_type));
max_index = index;
}
/* Do we have match? */
- if (double_int_cmp (access_index, index, 1) >= 0
- && double_int_cmp (access_index, max_index, 1) <= 0)
+ if (access_index.cmp (index, 1) >= 0
+ && access_index.cmp (max_index, 1) <= 0)
return fold_ctor_reference (type, cval, inner_offset, size,
from_decl);
}
@@ -2891,7 +2888,7 @@ fold_nonarray_ctor_reference (tree type, tree ctor,
tree field_size = DECL_SIZE (cfield);
double_int bitoffset;
double_int byte_offset_cst = tree_to_double_int (byte_offset);
- double_int bits_per_unit_cst = uhwi_to_double_int (BITS_PER_UNIT);
+ double_int bits_per_unit_cst = double_int::from_uhwi (BITS_PER_UNIT);
double_int bitoffset_end, access_end;
/* Variable sized objects in static constructors makes no sense,
@@ -2903,37 +2900,33 @@ fold_nonarray_ctor_reference (tree type, tree ctor,
: TREE_CODE (TREE_TYPE (cfield)) == ARRAY_TYPE));
/* Compute bit offset of the field. */
- bitoffset = double_int_add (tree_to_double_int (field_offset),
- double_int_mul (byte_offset_cst,
- bits_per_unit_cst));
+ bitoffset = tree_to_double_int (field_offset)
+ + byte_offset_cst * bits_per_unit_cst;
/* Compute bit offset where the field ends. */
if (field_size != NULL_TREE)
- bitoffset_end = double_int_add (bitoffset,
- tree_to_double_int (field_size));
+ bitoffset_end = bitoffset + tree_to_double_int (field_size);
else
bitoffset_end = double_int_zero;
- access_end = double_int_add (uhwi_to_double_int (offset),
- uhwi_to_double_int (size));
+ access_end = double_int::from_uhwi (offset)
+ + double_int::from_uhwi (size);
/* Is there any overlap between [OFFSET, OFFSET+SIZE) and
[BITOFFSET, BITOFFSET_END)? */
- if (double_int_cmp (access_end, bitoffset, 0) > 0
+ if (access_end.cmp (bitoffset, 0) > 0
&& (field_size == NULL_TREE
- || double_int_cmp (uhwi_to_double_int (offset),
- bitoffset_end, 0) < 0))
+ || double_int::from_uhwi (offset).slt (bitoffset_end)))
{
- double_int inner_offset = double_int_sub (uhwi_to_double_int (offset),
- bitoffset);
+ double_int inner_offset = double_int::from_uhwi (offset) - bitoffset;
/* We do have overlap. Now see if field is large enough to
cover the access. Give up for accesses spanning multiple
fields. */
- if (double_int_cmp (access_end, bitoffset_end, 0) > 0)
+ if (access_end.cmp (bitoffset_end, 0) > 0)
return NULL_TREE;
- if (double_int_cmp (uhwi_to_double_int (offset), bitoffset, 0) < 0)
+ if (double_int::from_uhwi (offset).slt (bitoffset))
return NULL_TREE;
return fold_ctor_reference (type, cval,
- double_int_to_uhwi (inner_offset), size,
+ inner_offset.to_uhwi (), size,
from_decl);
}
}
@@ -3028,13 +3021,11 @@ fold_const_aggregate_ref_1 (tree t, tree (*valueize) (tree))
TREE_CODE (low_bound) == INTEGER_CST)
&& (unit_size = array_ref_element_size (t),
host_integerp (unit_size, 1))
- && (doffset = double_int_sext
- (double_int_sub (TREE_INT_CST (idx),
- TREE_INT_CST (low_bound)),
- TYPE_PRECISION (TREE_TYPE (idx))),
- double_int_fits_in_shwi_p (doffset)))
+ && (doffset = (TREE_INT_CST (idx) - TREE_INT_CST (low_bound))
+ .sext (TYPE_PRECISION (TREE_TYPE (idx))),
+ doffset.fits_shwi ()))
{
- offset = double_int_to_shwi (doffset);
+ offset = doffset.to_shwi ();
offset *= TREE_INT_CST_LOW (unit_size);
offset *= BITS_PER_UNIT;
diff --git a/gcc/gimple-low.c b/gcc/gimple-low.c
index 7a51e8c271c..c5a16ac52d3 100644
--- a/gcc/gimple-low.c
+++ b/gcc/gimple-low.c
@@ -851,7 +851,7 @@ lower_gimple_return (gimple_stmt_iterator *gsi, struct lower_data *data)
/* Not found. Create a new label and record the return statement. */
tmp_rs.label = create_artificial_label (cfun->function_end_locus);
tmp_rs.stmt = stmt;
- VEC_safe_push (return_statements_t, heap, data->return_statements, &tmp_rs);
+ VEC_safe_push (return_statements_t, heap, data->return_statements, tmp_rs);
/* Generate a goto statement and remove the return statement. */
found:
diff --git a/gcc/gimple-ssa-strength-reduction.c b/gcc/gimple-ssa-strength-reduction.c
index 8806f48a5a0..87684b30543 100644
--- a/gcc/gimple-ssa-strength-reduction.c
+++ b/gcc/gimple-ssa-strength-reduction.c
@@ -54,6 +54,7 @@ along with GCC; see the file COPYING3. If not see
#include "domwalk.h"
#include "pointer-set.h"
#include "expmed.h"
+#include "params.h"
/* Information about a strength reduction candidate. Each statement
in the candidate table represents an expression of one of the
@@ -353,10 +354,14 @@ find_basis_for_candidate (slsr_cand_t c)
cand_chain_t chain;
slsr_cand_t basis = NULL;
+ // Limit potential of N^2 behavior for long candidate chains.
+ int iters = 0;
+ int max_iters = PARAM_VALUE (PARAM_MAX_SLSR_CANDIDATE_SCAN);
+
mapping_key.base_expr = c->base_expr;
chain = (cand_chain_t) htab_find (base_cand_map, &mapping_key);
- for (; chain; chain = chain->next)
+ for (; chain && iters < max_iters; chain = chain->next, ++iters)
{
slsr_cand_t one_basis = chain->cand;
@@ -539,7 +544,7 @@ restructure_reference (tree *pbase, tree *poffset, double_int *pindex,
{
tree base = *pbase, offset = *poffset;
double_int index = *pindex;
- double_int bpu = uhwi_to_double_int (BITS_PER_UNIT);
+ double_int bpu = double_int::from_uhwi (BITS_PER_UNIT);
tree mult_op0, mult_op1, t1, t2, type;
double_int c1, c2, c3, c4;
@@ -548,7 +553,7 @@ restructure_reference (tree *pbase, tree *poffset, double_int *pindex,
|| TREE_CODE (base) != MEM_REF
|| TREE_CODE (offset) != MULT_EXPR
|| TREE_CODE (TREE_OPERAND (offset, 1)) != INTEGER_CST
- || !double_int_zero_p (double_int_umod (index, bpu, FLOOR_MOD_EXPR)))
+ || !index.umod (bpu, FLOOR_MOD_EXPR).is_zero ())
return false;
t1 = TREE_OPERAND (base, 0);
@@ -575,7 +580,7 @@ restructure_reference (tree *pbase, tree *poffset, double_int *pindex,
if (TREE_CODE (TREE_OPERAND (mult_op0, 1)) == INTEGER_CST)
{
t2 = TREE_OPERAND (mult_op0, 0);
- c2 = double_int_neg (tree_to_double_int (TREE_OPERAND (mult_op0, 1)));
+ c2 = -tree_to_double_int (TREE_OPERAND (mult_op0, 1));
}
else
return false;
@@ -586,12 +591,12 @@ restructure_reference (tree *pbase, tree *poffset, double_int *pindex,
c2 = double_int_zero;
}
- c4 = double_int_udiv (index, bpu, FLOOR_DIV_EXPR);
+ c4 = index.udiv (bpu, FLOOR_DIV_EXPR);
*pbase = t1;
*poffset = fold_build2 (MULT_EXPR, sizetype, t2,
double_int_to_tree (sizetype, c3));
- *pindex = double_int_add (double_int_add (c1, double_int_mul (c2, c3)), c4);
+ *pindex = c1 + c2 * c3 + c4;
*ptype = type;
return true;
@@ -623,7 +628,7 @@ slsr_process_ref (gimple gs)
base = get_inner_reference (ref_expr, &bitsize, &bitpos, &offset, &mode,
&unsignedp, &volatilep, false);
- index = uhwi_to_double_int (bitpos);
+ index = double_int::from_uhwi (bitpos);
if (!restructure_reference (&base, &offset, &index, &type))
return;
@@ -677,8 +682,7 @@ create_mul_ssa_cand (gimple gs, tree base_in, tree stride_in, bool speed)
============================
X = B + ((i' * S) * Z) */
base = base_cand->base_expr;
- index = double_int_mul (base_cand->index,
- tree_to_double_int (base_cand->stride));
+ index = base_cand->index * tree_to_double_int (base_cand->stride);
stride = stride_in;
ctype = base_cand->cand_type;
if (has_single_use (base_in))
@@ -734,8 +738,8 @@ create_mul_imm_cand (gimple gs, tree base_in, tree stride_in, bool speed)
X = (B + i') * (S * c) */
base = base_cand->base_expr;
index = base_cand->index;
- temp = double_int_mul (tree_to_double_int (base_cand->stride),
- tree_to_double_int (stride_in));
+ temp = tree_to_double_int (base_cand->stride)
+ * tree_to_double_int (stride_in);
stride = double_int_to_tree (TREE_TYPE (stride_in), temp);
ctype = base_cand->cand_type;
if (has_single_use (base_in))
@@ -758,7 +762,7 @@ create_mul_imm_cand (gimple gs, tree base_in, tree stride_in, bool speed)
+ stmt_cost (base_cand->cand_stmt, speed));
}
else if (base_cand->kind == CAND_ADD
- && double_int_one_p (base_cand->index)
+ && base_cand->index.is_one ()
&& TREE_CODE (base_cand->stride) == INTEGER_CST)
{
/* Y = B + (1 * S), S constant
@@ -859,7 +863,7 @@ create_add_ssa_cand (gimple gs, tree base_in, tree addend_in,
while (addend_cand && !base)
{
if (addend_cand->kind == CAND_MULT
- && double_int_zero_p (addend_cand->index)
+ && addend_cand->index.is_zero ()
&& TREE_CODE (addend_cand->stride) == INTEGER_CST)
{
/* Z = (B + 0) * S, S constant
@@ -869,7 +873,7 @@ create_add_ssa_cand (gimple gs, tree base_in, tree addend_in,
base = base_in;
index = tree_to_double_int (addend_cand->stride);
if (subtract_p)
- index = double_int_neg (index);
+ index = -index;
stride = addend_cand->base_expr;
ctype = TREE_TYPE (base_in);
if (has_single_use (addend_in))
@@ -886,7 +890,7 @@ create_add_ssa_cand (gimple gs, tree base_in, tree addend_in,
while (base_cand && !base)
{
if (base_cand->kind == CAND_ADD
- && (double_int_zero_p (base_cand->index)
+ && (base_cand->index.is_zero ()
|| operand_equal_p (base_cand->stride,
integer_zero_node, 0)))
{
@@ -909,7 +913,7 @@ create_add_ssa_cand (gimple gs, tree base_in, tree addend_in,
while (subtrahend_cand && !base)
{
if (subtrahend_cand->kind == CAND_MULT
- && double_int_zero_p (subtrahend_cand->index)
+ && subtrahend_cand->index.is_zero ()
&& TREE_CODE (subtrahend_cand->stride) == INTEGER_CST)
{
/* Z = (B + 0) * S, S constant
@@ -918,7 +922,7 @@ create_add_ssa_cand (gimple gs, tree base_in, tree addend_in,
Value: X = Y + ((-1 * S) * B) */
base = base_in;
index = tree_to_double_int (subtrahend_cand->stride);
- index = double_int_neg (index);
+ index = -index;
stride = subtrahend_cand->base_expr;
ctype = TREE_TYPE (base_in);
if (has_single_use (addend_in))
@@ -973,10 +977,8 @@ create_add_imm_cand (gimple gs, tree base_in, double_int index_in, bool speed)
bool unsigned_p = TYPE_UNSIGNED (TREE_TYPE (base_cand->stride));
if (TREE_CODE (base_cand->stride) == INTEGER_CST
- && double_int_multiple_of (index_in,
- tree_to_double_int (base_cand->stride),
- unsigned_p,
- &multiple))
+ && index_in.multiple_of (tree_to_double_int (base_cand->stride),
+ unsigned_p, &multiple))
{
/* Y = (B + i') * S, S constant, c = kS for some integer k
X = Y + c
@@ -989,7 +991,7 @@ create_add_imm_cand (gimple gs, tree base_in, double_int index_in, bool speed)
X = (B + (i'+ k)) * S */
kind = base_cand->kind;
base = base_cand->base_expr;
- index = double_int_add (base_cand->index, multiple);
+ index = base_cand->index + multiple;
stride = base_cand->stride;
ctype = base_cand->cand_type;
if (has_single_use (base_in))
@@ -1066,7 +1068,7 @@ slsr_process_add (gimple gs, tree rhs1, tree rhs2, bool speed)
/* Record an interpretation for the add-immediate. */
index = tree_to_double_int (rhs2);
if (subtract_p)
- index = double_int_neg (index);
+ index = -index;
c = create_add_imm_cand (gs, rhs1, index, speed);
@@ -1581,7 +1583,7 @@ cand_increment (slsr_cand_t c)
basis = lookup_cand (c->basis);
gcc_assert (operand_equal_p (c->base_expr, basis->base_expr, 0));
- return double_int_sub (c->index, basis->index);
+ return c->index - basis->index;
}
/* Calculate the increment required for candidate C relative to
@@ -1594,8 +1596,8 @@ cand_abs_increment (slsr_cand_t c)
{
double_int increment = cand_increment (c);
- if (!address_arithmetic_p && double_int_negative_p (increment))
- increment = double_int_neg (increment);
+ if (!address_arithmetic_p && increment.is_negative ())
+ increment = -increment;
return increment;
}
@@ -1626,7 +1628,7 @@ static void
replace_dependent (slsr_cand_t c, enum tree_code cand_code)
{
double_int stride = tree_to_double_int (c->stride);
- double_int bump = double_int_mul (cand_increment (c), stride);
+ double_int bump = cand_increment (c) * stride;
gimple stmt_to_print = NULL;
slsr_cand_t basis;
tree basis_name, incr_type, bump_tree;
@@ -1637,7 +1639,7 @@ replace_dependent (slsr_cand_t c, enum tree_code cand_code)
in this case. Restriction to signed HWI is conservative
for unsigned types but allows for safe negation without
twisted logic. */
- if (!double_int_fits_in_shwi_p (bump))
+ if (!bump.fits_shwi ())
return;
basis = lookup_cand (c->basis);
@@ -1645,10 +1647,10 @@ replace_dependent (slsr_cand_t c, enum tree_code cand_code)
incr_type = TREE_TYPE (gimple_assign_rhs1 (c->cand_stmt));
code = PLUS_EXPR;
- if (double_int_negative_p (bump))
+ if (bump.is_negative ())
{
code = MINUS_EXPR;
- bump = double_int_neg (bump);
+ bump = -bump;
}
bump_tree = double_int_to_tree (incr_type, bump);
@@ -1659,7 +1661,7 @@ replace_dependent (slsr_cand_t c, enum tree_code cand_code)
print_gimple_stmt (dump_file, c->cand_stmt, 0, 0);
}
- if (double_int_zero_p (bump))
+ if (bump.is_zero ())
{
tree lhs = gimple_assign_lhs (c->cand_stmt);
gimple copy_stmt = gimple_build_assign (lhs, basis_name);
@@ -1739,9 +1741,7 @@ incr_vec_index (double_int increment)
{
unsigned i;
- for (i = 0;
- i < incr_vec_len && !double_int_equal_p (increment, incr_vec[i].incr);
- i++)
+ for (i = 0; i < incr_vec_len && increment != incr_vec[i].incr; i++)
;
gcc_assert (i < incr_vec_len);
@@ -1778,12 +1778,12 @@ record_increment (slsr_cand_t c, double_int increment)
/* Treat increments that differ only in sign as identical so as to
share initializers, unless we are generating pointer arithmetic. */
- if (!address_arithmetic_p && double_int_negative_p (increment))
- increment = double_int_neg (increment);
+ if (!address_arithmetic_p && increment.is_negative ())
+ increment = -increment;
for (i = 0; i < incr_vec_len; i++)
{
- if (double_int_equal_p (incr_vec[i].incr, increment))
+ if (incr_vec[i].incr == increment)
{
incr_vec[i].count++;
found = true;
@@ -1819,9 +1819,9 @@ record_increment (slsr_cand_t c, double_int increment)
opinion later if it doesn't dominate all other occurrences.
Exception: increments of -1, 0, 1 never need initializers. */
if (c->kind == CAND_ADD
- && double_int_equal_p (c->index, increment)
- && (double_int_scmp (increment, double_int_one) > 0
- || double_int_scmp (increment, double_int_minus_one) < 0))
+ && c->index == increment
+ && (increment.sgt (double_int_one)
+ || increment.slt (double_int_minus_one)))
{
tree t0;
tree rhs1 = gimple_assign_rhs1 (c->cand_stmt);
@@ -1923,7 +1923,7 @@ lowest_cost_path (int cost_in, int repl_savings, slsr_cand_t c, double_int incr)
if (cand_already_replaced (c))
local_cost = cost_in;
- else if (double_int_equal_p (incr, cand_incr))
+ else if (incr == cand_incr)
local_cost = cost_in - repl_savings - c->dead_savings;
else
local_cost = cost_in - c->dead_savings;
@@ -1954,8 +1954,7 @@ total_savings (int repl_savings, slsr_cand_t c, double_int incr)
int savings = 0;
double_int cand_incr = cand_abs_increment (c);
- if (double_int_equal_p (incr, cand_incr)
- && !cand_already_replaced (c))
+ if (incr == cand_incr && !cand_already_replaced (c))
savings += repl_savings + c->dead_savings;
if (c->dependent)
@@ -1984,13 +1983,12 @@ analyze_increments (slsr_cand_t first_dep, enum machine_mode mode, bool speed)
for (i = 0; i < incr_vec_len; i++)
{
- HOST_WIDE_INT incr = double_int_to_shwi (incr_vec[i].incr);
+ HOST_WIDE_INT incr = incr_vec[i].incr.to_shwi ();
/* If somehow this increment is bigger than a HWI, we won't
be optimizing candidates that use it. And if the increment
has a count of zero, nothing will be done with it. */
- if (!double_int_fits_in_shwi_p (incr_vec[i].incr)
- || !incr_vec[i].count)
+ if (!incr_vec[i].incr.fits_shwi () || !incr_vec[i].count)
incr_vec[i].cost = COST_INFINITE;
/* Increments of 0, 1, and -1 are always profitable to replace,
@@ -2168,7 +2166,7 @@ nearest_common_dominator_for_cands (slsr_cand_t c, double_int incr,
in, then the result depends only on siblings and dependents. */
cand_incr = cand_abs_increment (c);
- if (!double_int_equal_p (cand_incr, incr) || cand_already_replaced (c))
+ if (cand_incr != incr || cand_already_replaced (c))
{
*where = new_where;
return ncd;
@@ -2213,10 +2211,10 @@ insert_initializers (slsr_cand_t c)
double_int incr = incr_vec[i].incr;
if (!profitable_increment_p (i)
- || double_int_one_p (incr)
- || (double_int_minus_one_p (incr)
+ || incr.is_one ()
+ || (incr.is_minus_one ()
&& gimple_assign_rhs_code (c->cand_stmt) != POINTER_PLUS_EXPR)
- || double_int_zero_p (incr))
+ || incr.is_zero ())
continue;
/* We may have already identified an existing initializer that
@@ -2384,7 +2382,7 @@ replace_one_candidate (slsr_cand_t c, unsigned i, tree *new_var,
incr_vec[i].initializer,
new_var);
- if (!double_int_equal_p (incr_vec[i].incr, cand_incr))
+ if (incr_vec[i].incr != cand_incr)
{
gcc_assert (repl_code == PLUS_EXPR);
repl_code = MINUS_EXPR;
@@ -2400,7 +2398,7 @@ replace_one_candidate (slsr_cand_t c, unsigned i, tree *new_var,
from the basis name, or an add of the stride to the basis
name, respectively. It may be necessary to introduce a
cast (or reuse an existing cast). */
- else if (double_int_one_p (cand_incr))
+ else if (cand_incr.is_one ())
{
tree stride_type = TREE_TYPE (c->stride);
tree orig_type = TREE_TYPE (orig_rhs2);
@@ -2415,7 +2413,7 @@ replace_one_candidate (slsr_cand_t c, unsigned i, tree *new_var,
c);
}
- else if (double_int_minus_one_p (cand_incr))
+ else if (cand_incr.is_minus_one ())
{
tree stride_type = TREE_TYPE (c->stride);
tree orig_type = TREE_TYPE (orig_rhs2);
@@ -2441,7 +2439,7 @@ replace_one_candidate (slsr_cand_t c, unsigned i, tree *new_var,
fputs (" (duplicate, not actually replacing)\n", dump_file);
}
- else if (double_int_zero_p (cand_incr))
+ else if (cand_incr.is_zero ())
{
tree lhs = gimple_assign_lhs (c->cand_stmt);
tree lhs_type = TREE_TYPE (lhs);
diff --git a/gcc/gimple.c b/gcc/gimple.c
index 3cf064a2ef5..25932c9735f 100644
--- a/gcc/gimple.c
+++ b/gcc/gimple.c
@@ -37,17 +37,10 @@ along with GCC; see the file COPYING3. If not see
#include "demangle.h"
#include "langhooks.h"
-/* Global type table. FIXME lto, it should be possible to re-use some
- of the type hashing routines in tree.c (type_hash_canon, type_hash_lookup,
- etc), but those assume that types were built with the various
- build_*_type routines which is not the case with the streamer. */
-static GTY((if_marked ("ggc_marked_p"), param_is (union tree_node)))
- htab_t gimple_types;
+/* Global canonical type table. */
static GTY((if_marked ("ggc_marked_p"), param_is (union tree_node)))
htab_t gimple_canonical_types;
static GTY((if_marked ("tree_int_map_marked_p"), param_is (struct tree_int_map)))
- htab_t type_hash_cache;
-static GTY((if_marked ("tree_int_map_marked_p"), param_is (struct tree_int_map)))
htab_t canonical_type_hash_cache;
/* All the tuples have their operand vector (if present) at the very bottom
@@ -2886,16 +2879,12 @@ get_base_address (tree t)
&& TREE_CODE (TREE_OPERAND (t, 0)) == ADDR_EXPR)
t = TREE_OPERAND (TREE_OPERAND (t, 0), 0);
- if (TREE_CODE (t) == SSA_NAME
- || DECL_P (t)
- || TREE_CODE (t) == STRING_CST
- || TREE_CODE (t) == CONSTRUCTOR
- || INDIRECT_REF_P (t)
- || TREE_CODE (t) == MEM_REF
- || TREE_CODE (t) == TARGET_MEM_REF)
- return t;
- else
+ /* ??? Either the alias oracle or all callers need to properly deal
+ with WITH_SIZE_EXPRs before we can look through those. */
+ if (TREE_CODE (t) == WITH_SIZE_EXPR)
return NULL_TREE;
+
+ return t;
}
void
@@ -3026,159 +3015,6 @@ gimple_call_copy_skip_args (gimple stmt, bitmap args_to_skip)
}
-enum gtc_mode { GTC_MERGE = 0, GTC_DIAG = 1 };
-
-static hashval_t gimple_type_hash (const void *);
-
-/* Structure used to maintain a cache of some type pairs compared by
- gimple_types_compatible_p when comparing aggregate types. There are
- three possible values for SAME_P:
-
- -2: The pair (T1, T2) has just been inserted in the table.
- 0: T1 and T2 are different types.
- 1: T1 and T2 are the same type.
-
- The two elements in the SAME_P array are indexed by the comparison
- mode gtc_mode. */
-
-struct type_pair_d
-{
- unsigned int uid1;
- unsigned int uid2;
- signed char same_p[2];
-};
-typedef struct type_pair_d *type_pair_t;
-DEF_VEC_P(type_pair_t);
-DEF_VEC_ALLOC_P(type_pair_t,heap);
-
-#define GIMPLE_TYPE_PAIR_SIZE 16381
-struct type_pair_d *type_pair_cache;
-
-
-/* Lookup the pair of types T1 and T2 in *VISITED_P. Insert a new
- entry if none existed. */
-
-static inline type_pair_t
-lookup_type_pair (tree t1, tree t2)
-{
- unsigned int index;
- unsigned int uid1, uid2;
-
- if (type_pair_cache == NULL)
- type_pair_cache = XCNEWVEC (struct type_pair_d, GIMPLE_TYPE_PAIR_SIZE);
-
- if (TYPE_UID (t1) < TYPE_UID (t2))
- {
- uid1 = TYPE_UID (t1);
- uid2 = TYPE_UID (t2);
- }
- else
- {
- uid1 = TYPE_UID (t2);
- uid2 = TYPE_UID (t1);
- }
- gcc_checking_assert (uid1 != uid2);
-
- /* iterative_hash_hashval_t imply an function calls.
- We know that UIDS are in limited range. */
- index = ((((unsigned HOST_WIDE_INT)uid1 << HOST_BITS_PER_WIDE_INT / 2) + uid2)
- % GIMPLE_TYPE_PAIR_SIZE);
- if (type_pair_cache [index].uid1 == uid1
- && type_pair_cache [index].uid2 == uid2)
- return &type_pair_cache[index];
-
- type_pair_cache [index].uid1 = uid1;
- type_pair_cache [index].uid2 = uid2;
- type_pair_cache [index].same_p[0] = -2;
- type_pair_cache [index].same_p[1] = -2;
-
- return &type_pair_cache[index];
-}
-
-/* Per pointer state for the SCC finding. The on_sccstack flag
- is not strictly required, it is true when there is no hash value
- recorded for the type and false otherwise. But querying that
- is slower. */
-
-struct sccs
-{
- unsigned int dfsnum;
- unsigned int low;
- bool on_sccstack;
- union {
- hashval_t hash;
- signed char same_p;
- } u;
-};
-
-static unsigned int next_dfs_num;
-static unsigned int gtc_next_dfs_num;
-
-
-/* GIMPLE type merging cache. A direct-mapped cache based on TYPE_UID. */
-
-typedef struct GTY(()) gimple_type_leader_entry_s {
- tree type;
- tree leader;
-} gimple_type_leader_entry;
-
-#define GIMPLE_TYPE_LEADER_SIZE 16381
-static GTY((deletable, length("GIMPLE_TYPE_LEADER_SIZE")))
- gimple_type_leader_entry *gimple_type_leader;
-
-/* Lookup an existing leader for T and return it or NULL_TREE, if
- there is none in the cache. */
-
-static inline tree
-gimple_lookup_type_leader (tree t)
-{
- gimple_type_leader_entry *leader;
-
- if (!gimple_type_leader)
- return NULL_TREE;
-
- leader = &gimple_type_leader[TYPE_UID (t) % GIMPLE_TYPE_LEADER_SIZE];
- if (leader->type != t)
- return NULL_TREE;
-
- return leader->leader;
-}
-
-/* Return true if T1 and T2 have the same name. If FOR_COMPLETION_P is
- true then if any type has no name return false, otherwise return
- true if both types have no names. */
-
-static bool
-compare_type_names_p (tree t1, tree t2)
-{
- tree name1 = TYPE_NAME (t1);
- tree name2 = TYPE_NAME (t2);
-
- if ((name1 != NULL_TREE) != (name2 != NULL_TREE))
- return false;
-
- if (name1 == NULL_TREE)
- return true;
-
- /* Either both should be a TYPE_DECL or both an IDENTIFIER_NODE. */
- if (TREE_CODE (name1) != TREE_CODE (name2))
- return false;
-
- if (TREE_CODE (name1) == TYPE_DECL)
- name1 = DECL_NAME (name1);
- gcc_checking_assert (!name1 || TREE_CODE (name1) == IDENTIFIER_NODE);
-
- if (TREE_CODE (name2) == TYPE_DECL)
- name2 = DECL_NAME (name2);
- gcc_checking_assert (!name2 || TREE_CODE (name2) == IDENTIFIER_NODE);
-
- /* Identifiers can be compared with pointer equality rather
- than a string comparison. */
- if (name1 == name2)
- return true;
-
- return false;
-}
/* Return true if the field decls F1 and F2 are at the same offset.
@@ -3231,892 +3067,6 @@ gimple_compare_field_offset (tree f1, tree f2)
return false;
}
-static bool
-gimple_types_compatible_p_1 (tree, tree, type_pair_t,
- VEC(type_pair_t, heap) **,
- struct pointer_map_t *, struct obstack *);
-
-/* DFS visit the edge from the callers type pair with state *STATE to
- the pair T1, T2 while operating in FOR_MERGING_P mode.
- Update the merging status if it is not part of the SCC containing the
- callers pair and return it.
- SCCSTACK, SCCSTATE and SCCSTATE_OBSTACK are state for the DFS walk done. */
-
-static bool
-gtc_visit (tree t1, tree t2,
- struct sccs *state,
- VEC(type_pair_t, heap) **sccstack,
- struct pointer_map_t *sccstate,
- struct obstack *sccstate_obstack)
-{
- struct sccs *cstate = NULL;
- type_pair_t p;
- void **slot;
- tree leader1, leader2;
-
- /* Check first for the obvious case of pointer identity. */
- if (t1 == t2)
- return true;
-
- /* Check that we have two types to compare. */
- if (t1 == NULL_TREE || t2 == NULL_TREE)
- return false;
-
- /* Can't be the same type if the types don't have the same code. */
- if (TREE_CODE (t1) != TREE_CODE (t2))
- return false;
-
- /* Can't be the same type if they have different CV qualifiers. */
- if (TYPE_QUALS (t1) != TYPE_QUALS (t2))
- return false;
-
- if (TREE_ADDRESSABLE (t1) != TREE_ADDRESSABLE (t2))
- return false;
-
- /* Void types and nullptr types are always the same. */
- if (TREE_CODE (t1) == VOID_TYPE
- || TREE_CODE (t1) == NULLPTR_TYPE)
- return true;
-
- /* Can't be the same type if they have different alignment or mode. */
- if (TYPE_ALIGN (t1) != TYPE_ALIGN (t2)
- || TYPE_MODE (t1) != TYPE_MODE (t2))
- return false;
-
- /* Do some simple checks before doing three hashtable queries. */
- if (INTEGRAL_TYPE_P (t1)
- || SCALAR_FLOAT_TYPE_P (t1)
- || FIXED_POINT_TYPE_P (t1)
- || TREE_CODE (t1) == VECTOR_TYPE
- || TREE_CODE (t1) == COMPLEX_TYPE
- || TREE_CODE (t1) == OFFSET_TYPE
- || POINTER_TYPE_P (t1))
- {
- /* Can't be the same type if they have different sign or precision. */
- if (TYPE_PRECISION (t1) != TYPE_PRECISION (t2)
- || TYPE_UNSIGNED (t1) != TYPE_UNSIGNED (t2))
- return false;
-
- if (TREE_CODE (t1) == INTEGER_TYPE
- && TYPE_STRING_FLAG (t1) != TYPE_STRING_FLAG (t2))
- return false;
-
- /* That's all we need to check for float and fixed-point types. */
- if (SCALAR_FLOAT_TYPE_P (t1)
- || FIXED_POINT_TYPE_P (t1))
- return true;
-
- /* For other types fall through to more complex checks. */
- }
-
- /* If the types have been previously registered and found equal
- they still are. */
- leader1 = gimple_lookup_type_leader (t1);
- leader2 = gimple_lookup_type_leader (t2);
- if (leader1 == t2
- || t1 == leader2
- || (leader1 && leader1 == leader2))
- return true;
-
- /* If the hash values of t1 and t2 are different the types can't
- possibly be the same. This helps keeping the type-pair hashtable
- small, only tracking comparisons for hash collisions. */
- if (gimple_type_hash (t1) != gimple_type_hash (t2))
- return false;
-
- /* Allocate a new cache entry for this comparison. */
- p = lookup_type_pair (t1, t2);
- if (p->same_p[GTC_MERGE] == 0 || p->same_p[GTC_MERGE] == 1)
- {
- /* We have already decided whether T1 and T2 are the
- same, return the cached result. */
- return p->same_p[GTC_MERGE] == 1;
- }
-
- if ((slot = pointer_map_contains (sccstate, p)) != NULL)
- cstate = (struct sccs *)*slot;
- /* Not yet visited. DFS recurse. */
- if (!cstate)
- {
- gimple_types_compatible_p_1 (t1, t2, p,
- sccstack, sccstate, sccstate_obstack);
- cstate = (struct sccs *)* pointer_map_contains (sccstate, p);
- state->low = MIN (state->low, cstate->low);
- }
- /* If the type is still on the SCC stack adjust the parents low. */
- if (cstate->dfsnum < state->dfsnum
- && cstate->on_sccstack)
- state->low = MIN (cstate->dfsnum, state->low);
-
- /* Return the current lattice value. We start with an equality
- assumption so types part of a SCC will be optimistically
- treated equal unless proven otherwise. */
- return cstate->u.same_p;
-}
-
-/* Worker for gimple_types_compatible.
- SCCSTACK, SCCSTATE and SCCSTATE_OBSTACK are state for the DFS walk done. */
-
-static bool
-gimple_types_compatible_p_1 (tree t1, tree t2, type_pair_t p,
- VEC(type_pair_t, heap) **sccstack,
- struct pointer_map_t *sccstate,
- struct obstack *sccstate_obstack)
-{
- struct sccs *state;
-
- gcc_assert (p->same_p[GTC_MERGE] == -2);
-
- state = XOBNEW (sccstate_obstack, struct sccs);
- *pointer_map_insert (sccstate, p) = state;
-
- VEC_safe_push (type_pair_t, heap, *sccstack, p);
- state->dfsnum = gtc_next_dfs_num++;
- state->low = state->dfsnum;
- state->on_sccstack = true;
- /* Start with an equality assumption. As we DFS recurse into child
- SCCs this assumption may get revisited. */
- state->u.same_p = 1;
-
- /* The struct tags shall compare equal. */
- if (!compare_type_names_p (t1, t2))
- goto different_types;
-
- /* We may not merge typedef types to the same type in different
- contexts. */
- if (TYPE_NAME (t1)
- && TREE_CODE (TYPE_NAME (t1)) == TYPE_DECL
- && DECL_CONTEXT (TYPE_NAME (t1))
- && TYPE_P (DECL_CONTEXT (TYPE_NAME (t1))))
- {
- if (!gtc_visit (DECL_CONTEXT (TYPE_NAME (t1)),
- DECL_CONTEXT (TYPE_NAME (t2)),
- state, sccstack, sccstate, sccstate_obstack))
- goto different_types;
- }
-
- /* If their attributes are not the same they can't be the same type. */
- if (!attribute_list_equal (TYPE_ATTRIBUTES (t1), TYPE_ATTRIBUTES (t2)))
- goto different_types;
-
- /* Do type-specific comparisons. */
- switch (TREE_CODE (t1))
- {
- case VECTOR_TYPE:
- case COMPLEX_TYPE:
- if (!gtc_visit (TREE_TYPE (t1), TREE_TYPE (t2),
- state, sccstack, sccstate, sccstate_obstack))
- goto different_types;
- goto same_types;
-
- case ARRAY_TYPE:
- /* Array types are the same if the element types are the same and
- the number of elements are the same. */
- if (!gtc_visit (TREE_TYPE (t1), TREE_TYPE (t2),
- state, sccstack, sccstate, sccstate_obstack)
- || TYPE_STRING_FLAG (t1) != TYPE_STRING_FLAG (t2)
- || TYPE_NONALIASED_COMPONENT (t1) != TYPE_NONALIASED_COMPONENT (t2))
- goto different_types;
- else
- {
- tree i1 = TYPE_DOMAIN (t1);
- tree i2 = TYPE_DOMAIN (t2);
-
- /* For an incomplete external array, the type domain can be
- NULL_TREE. Check this condition also. */
- if (i1 == NULL_TREE && i2 == NULL_TREE)
- goto same_types;
- else if (i1 == NULL_TREE || i2 == NULL_TREE)
- goto different_types;
- else
- {
- tree min1 = TYPE_MIN_VALUE (i1);
- tree min2 = TYPE_MIN_VALUE (i2);
- tree max1 = TYPE_MAX_VALUE (i1);
- tree max2 = TYPE_MAX_VALUE (i2);
-
- /* The minimum/maximum values have to be the same. */
- if ((min1 == min2
- || (min1 && min2
- && ((TREE_CODE (min1) == PLACEHOLDER_EXPR
- && TREE_CODE (min2) == PLACEHOLDER_EXPR)
- || operand_equal_p (min1, min2, 0))))
- && (max1 == max2
- || (max1 && max2
- && ((TREE_CODE (max1) == PLACEHOLDER_EXPR
- && TREE_CODE (max2) == PLACEHOLDER_EXPR)
- || operand_equal_p (max1, max2, 0)))))
- goto same_types;
- else
- goto different_types;
- }
- }
-
- case METHOD_TYPE:
- /* Method types should belong to the same class. */
- if (!gtc_visit (TYPE_METHOD_BASETYPE (t1), TYPE_METHOD_BASETYPE (t2),
- state, sccstack, sccstate, sccstate_obstack))
- goto different_types;
-
- /* Fallthru */
-
- case FUNCTION_TYPE:
- /* Function types are the same if the return type and arguments types
- are the same. */
- if (!gtc_visit (TREE_TYPE (t1), TREE_TYPE (t2),
- state, sccstack, sccstate, sccstate_obstack))
- goto different_types;
-
- if (!comp_type_attributes (t1, t2))
- goto different_types;
-
- if (TYPE_ARG_TYPES (t1) == TYPE_ARG_TYPES (t2))
- goto same_types;
- else
- {
- tree parms1, parms2;
-
- for (parms1 = TYPE_ARG_TYPES (t1), parms2 = TYPE_ARG_TYPES (t2);
- parms1 && parms2;
- parms1 = TREE_CHAIN (parms1), parms2 = TREE_CHAIN (parms2))
- {
- if (!gtc_visit (TREE_VALUE (parms1), TREE_VALUE (parms2),
- state, sccstack, sccstate, sccstate_obstack))
- goto different_types;
- }
-
- if (parms1 || parms2)
- goto different_types;
-
- goto same_types;
- }
-
- case OFFSET_TYPE:
- {
- if (!gtc_visit (TREE_TYPE (t1), TREE_TYPE (t2),
- state, sccstack, sccstate, sccstate_obstack)
- || !gtc_visit (TYPE_OFFSET_BASETYPE (t1),
- TYPE_OFFSET_BASETYPE (t2),
- state, sccstack, sccstate, sccstate_obstack))
- goto different_types;
-
- goto same_types;
- }
-
- case POINTER_TYPE:
- case REFERENCE_TYPE:
- {
- /* If the two pointers have different ref-all attributes,
- they can't be the same type. */
- if (TYPE_REF_CAN_ALIAS_ALL (t1) != TYPE_REF_CAN_ALIAS_ALL (t2))
- goto different_types;
-
- /* Otherwise, pointer and reference types are the same if the
- pointed-to types are the same. */
- if (gtc_visit (TREE_TYPE (t1), TREE_TYPE (t2),
- state, sccstack, sccstate, sccstate_obstack))
- goto same_types;
-
- goto different_types;
- }
-
- case INTEGER_TYPE:
- case BOOLEAN_TYPE:
- {
- tree min1 = TYPE_MIN_VALUE (t1);
- tree max1 = TYPE_MAX_VALUE (t1);
- tree min2 = TYPE_MIN_VALUE (t2);
- tree max2 = TYPE_MAX_VALUE (t2);
- bool min_equal_p = false;
- bool max_equal_p = false;
-
- /* If either type has a minimum value, the other type must
- have the same. */
- if (min1 == NULL_TREE && min2 == NULL_TREE)
- min_equal_p = true;
- else if (min1 && min2 && operand_equal_p (min1, min2, 0))
- min_equal_p = true;
-
- /* Likewise, if either type has a maximum value, the other
- type must have the same. */
- if (max1 == NULL_TREE && max2 == NULL_TREE)
- max_equal_p = true;
- else if (max1 && max2 && operand_equal_p (max1, max2, 0))
- max_equal_p = true;
-
- if (!min_equal_p || !max_equal_p)
- goto different_types;
-
- goto same_types;
- }
-
- case ENUMERAL_TYPE:
- {
- /* FIXME lto, we cannot check bounds on enumeral types because
- different front ends will produce different values.
- In C, enumeral types are integers, while in C++ each element
- will have its own symbolic value. We should decide how enums
- are to be represented in GIMPLE and have each front end lower
- to that. */
- tree v1, v2;
-
- /* For enumeral types, all the values must be the same. */
- if (TYPE_VALUES (t1) == TYPE_VALUES (t2))
- goto same_types;
-
- for (v1 = TYPE_VALUES (t1), v2 = TYPE_VALUES (t2);
- v1 && v2;
- v1 = TREE_CHAIN (v1), v2 = TREE_CHAIN (v2))
- {
- tree c1 = TREE_VALUE (v1);
- tree c2 = TREE_VALUE (v2);
-
- if (TREE_CODE (c1) == CONST_DECL)
- c1 = DECL_INITIAL (c1);
-
- if (TREE_CODE (c2) == CONST_DECL)
- c2 = DECL_INITIAL (c2);
-
- if (tree_int_cst_equal (c1, c2) != 1)
- goto different_types;
-
- if (TREE_PURPOSE (v1) != TREE_PURPOSE (v2))
- goto different_types;
- }
-
- /* If one enumeration has more values than the other, they
- are not the same. */
- if (v1 || v2)
- goto different_types;
-
- goto same_types;
- }
-
- case RECORD_TYPE:
- case UNION_TYPE:
- case QUAL_UNION_TYPE:
- {
- tree f1, f2;
-
- /* For aggregate types, all the fields must be the same. */
- for (f1 = TYPE_FIELDS (t1), f2 = TYPE_FIELDS (t2);
- f1 && f2;
- f1 = TREE_CHAIN (f1), f2 = TREE_CHAIN (f2))
- {
- /* Different field kinds are not compatible. */
- if (TREE_CODE (f1) != TREE_CODE (f2))
- goto different_types;
- /* Field decls must have the same name and offset. */
- if (TREE_CODE (f1) == FIELD_DECL
- && (DECL_NONADDRESSABLE_P (f1) != DECL_NONADDRESSABLE_P (f2)
- || !gimple_compare_field_offset (f1, f2)))
- goto different_types;
- /* All entities should have the same name and type. */
- if (DECL_NAME (f1) != DECL_NAME (f2)
- || !gtc_visit (TREE_TYPE (f1), TREE_TYPE (f2),
- state, sccstack, sccstate, sccstate_obstack))
- goto different_types;
- }
-
- /* If one aggregate has more fields than the other, they
- are not the same. */
- if (f1 || f2)
- goto different_types;
-
- goto same_types;
- }
-
- default:
- gcc_unreachable ();
- }
-
- /* Common exit path for types that are not compatible. */
-different_types:
- state->u.same_p = 0;
- goto pop;
-
- /* Common exit path for types that are compatible. */
-same_types:
- gcc_assert (state->u.same_p == 1);
-
-pop:
- if (state->low == state->dfsnum)
- {
- type_pair_t x;
-
- /* Pop off the SCC and set its cache values to the final
- comparison result. */
- do
- {
- struct sccs *cstate;
- x = VEC_pop (type_pair_t, *sccstack);
- cstate = (struct sccs *)*pointer_map_contains (sccstate, x);
- cstate->on_sccstack = false;
- x->same_p[GTC_MERGE] = state->u.same_p;
- }
- while (x != p);
- }
-
- return state->u.same_p;
-}
-
-/* Return true iff T1 and T2 are structurally identical. When
- FOR_MERGING_P is true the an incomplete type and a complete type
- are considered different, otherwise they are considered compatible. */
-
-static bool
-gimple_types_compatible_p (tree t1, tree t2)
-{
- VEC(type_pair_t, heap) *sccstack = NULL;
- struct pointer_map_t *sccstate;
- struct obstack sccstate_obstack;
- type_pair_t p = NULL;
- bool res;
- tree leader1, leader2;
-
- /* Before starting to set up the SCC machinery handle simple cases. */
-
- /* Check first for the obvious case of pointer identity. */
- if (t1 == t2)
- return true;
-
- /* Check that we have two types to compare. */
- if (t1 == NULL_TREE || t2 == NULL_TREE)
- return false;
-
- /* Can't be the same type if the types don't have the same code. */
- if (TREE_CODE (t1) != TREE_CODE (t2))
- return false;
-
- /* Can't be the same type if they have different CV qualifiers. */
- if (TYPE_QUALS (t1) != TYPE_QUALS (t2))
- return false;
-
- if (TREE_ADDRESSABLE (t1) != TREE_ADDRESSABLE (t2))
- return false;
-
- /* Void types and nullptr types are always the same. */
- if (TREE_CODE (t1) == VOID_TYPE
- || TREE_CODE (t1) == NULLPTR_TYPE)
- return true;
-
- /* Can't be the same type if they have different alignment or mode. */
- if (TYPE_ALIGN (t1) != TYPE_ALIGN (t2)
- || TYPE_MODE (t1) != TYPE_MODE (t2))
- return false;
-
- /* Do some simple checks before doing three hashtable queries. */
- if (INTEGRAL_TYPE_P (t1)
- || SCALAR_FLOAT_TYPE_P (t1)
- || FIXED_POINT_TYPE_P (t1)
- || TREE_CODE (t1) == VECTOR_TYPE
- || TREE_CODE (t1) == COMPLEX_TYPE
- || TREE_CODE (t1) == OFFSET_TYPE
- || POINTER_TYPE_P (t1))
- {
- /* Can't be the same type if they have different sign or precision. */
- if (TYPE_PRECISION (t1) != TYPE_PRECISION (t2)
- || TYPE_UNSIGNED (t1) != TYPE_UNSIGNED (t2))
- return false;
-
- if (TREE_CODE (t1) == INTEGER_TYPE
- && TYPE_STRING_FLAG (t1) != TYPE_STRING_FLAG (t2))
- return false;
-
- /* That's all we need to check for float and fixed-point types. */
- if (SCALAR_FLOAT_TYPE_P (t1)
- || FIXED_POINT_TYPE_P (t1))
- return true;
-
- /* For other types fall through to more complex checks. */
- }
-
- /* If the types have been previously registered and found equal
- they still are. */
- leader1 = gimple_lookup_type_leader (t1);
- leader2 = gimple_lookup_type_leader (t2);
- if (leader1 == t2
- || t1 == leader2
- || (leader1 && leader1 == leader2))
- return true;
-
- /* If the hash values of t1 and t2 are different the types can't
- possibly be the same. This helps keeping the type-pair hashtable
- small, only tracking comparisons for hash collisions. */
- if (gimple_type_hash (t1) != gimple_type_hash (t2))
- return false;
-
- /* If we've visited this type pair before (in the case of aggregates
- with self-referential types), and we made a decision, return it. */
- p = lookup_type_pair (t1, t2);
- if (p->same_p[GTC_MERGE] == 0 || p->same_p[GTC_MERGE] == 1)
- {
- /* We have already decided whether T1 and T2 are the
- same, return the cached result. */
- return p->same_p[GTC_MERGE] == 1;
- }
-
- /* Now set up the SCC machinery for the comparison. */
- gtc_next_dfs_num = 1;
- sccstate = pointer_map_create ();
- gcc_obstack_init (&sccstate_obstack);
- res = gimple_types_compatible_p_1 (t1, t2, p,
- &sccstack, sccstate, &sccstate_obstack);
- VEC_free (type_pair_t, heap, sccstack);
- pointer_map_destroy (sccstate);
- obstack_free (&sccstate_obstack, NULL);
-
- return res;
-}
-
-
-static hashval_t
-iterative_hash_gimple_type (tree, hashval_t, VEC(tree, heap) **,
- struct pointer_map_t *, struct obstack *);
-
-/* DFS visit the edge from the callers type with state *STATE to T.
- Update the callers type hash V with the hash for T if it is not part
- of the SCC containing the callers type and return it.
- SCCSTACK, SCCSTATE and SCCSTATE_OBSTACK are state for the DFS walk done. */
-
-static hashval_t
-visit (tree t, struct sccs *state, hashval_t v,
- VEC (tree, heap) **sccstack,
- struct pointer_map_t *sccstate,
- struct obstack *sccstate_obstack)
-{
- struct sccs *cstate = NULL;
- struct tree_int_map m;
- void **slot;
-
- /* If there is a hash value recorded for this type then it can't
- possibly be part of our parent SCC. Simply mix in its hash. */
- m.base.from = t;
- if ((slot = htab_find_slot (type_hash_cache, &m, NO_INSERT))
- && *slot)
- return iterative_hash_hashval_t (((struct tree_int_map *) *slot)->to, v);
-
- if ((slot = pointer_map_contains (sccstate, t)) != NULL)
- cstate = (struct sccs *)*slot;
- if (!cstate)
- {
- hashval_t tem;
- /* Not yet visited. DFS recurse. */
- tem = iterative_hash_gimple_type (t, v,
- sccstack, sccstate, sccstate_obstack);
- if (!cstate)
- cstate = (struct sccs *)* pointer_map_contains (sccstate, t);
- state->low = MIN (state->low, cstate->low);
- /* If the type is no longer on the SCC stack and thus is not part
- of the parents SCC mix in its hash value. Otherwise we will
- ignore the type for hashing purposes and return the unaltered
- hash value. */
- if (!cstate->on_sccstack)
- return tem;
- }
- if (cstate->dfsnum < state->dfsnum
- && cstate->on_sccstack)
- state->low = MIN (cstate->dfsnum, state->low);
-
- /* We are part of our parents SCC, skip this type during hashing
- and return the unaltered hash value. */
- return v;
-}
-
-/* Hash NAME with the previous hash value V and return it. */
-
-static hashval_t
-iterative_hash_name (tree name, hashval_t v)
-{
- if (!name)
- return v;
- v = iterative_hash_hashval_t (TREE_CODE (name), v);
- if (TREE_CODE (name) == TYPE_DECL)
- name = DECL_NAME (name);
- if (!name)
- return v;
- gcc_assert (TREE_CODE (name) == IDENTIFIER_NODE);
- return iterative_hash_object (IDENTIFIER_HASH_VALUE (name), v);
-}
-
-/* A type, hashvalue pair for sorting SCC members. */
-
-struct type_hash_pair {
- tree type;
- hashval_t hash;
-};
-
-/* Compare two type, hashvalue pairs. */
-
-static int
-type_hash_pair_compare (const void *p1_, const void *p2_)
-{
- const struct type_hash_pair *p1 = (const struct type_hash_pair *) p1_;
- const struct type_hash_pair *p2 = (const struct type_hash_pair *) p2_;
- if (p1->hash < p2->hash)
- return -1;
- else if (p1->hash > p2->hash)
- return 1;
- return 0;
-}
-
-/* Returning a hash value for gimple type TYPE combined with VAL.
- SCCSTACK, SCCSTATE and SCCSTATE_OBSTACK are state for the DFS walk done.
-
- To hash a type we end up hashing in types that are reachable.
- Through pointers we can end up with cycles which messes up the
- required property that we need to compute the same hash value
- for structurally equivalent types. To avoid this we have to
- hash all types in a cycle (the SCC) in a commutative way. The
- easiest way is to not mix in the hashes of the SCC members at
- all. To make this work we have to delay setting the hash
- values of the SCC until it is complete. */
-
-static hashval_t
-iterative_hash_gimple_type (tree type, hashval_t val,
- VEC(tree, heap) **sccstack,
- struct pointer_map_t *sccstate,
- struct obstack *sccstate_obstack)
-{
- hashval_t v;
- void **slot;
- struct sccs *state;
-
- /* Not visited during this DFS walk. */
- gcc_checking_assert (!pointer_map_contains (sccstate, type));
- state = XOBNEW (sccstate_obstack, struct sccs);
- *pointer_map_insert (sccstate, type) = state;
-
- VEC_safe_push (tree, heap, *sccstack, type);
- state->dfsnum = next_dfs_num++;
- state->low = state->dfsnum;
- state->on_sccstack = true;
-
- /* Combine a few common features of types so that types are grouped into
- smaller sets; when searching for existing matching types to merge,
- only existing types having the same features as the new type will be
- checked. */
- v = iterative_hash_name (TYPE_NAME (type), 0);
- if (TYPE_NAME (type)
- && TREE_CODE (TYPE_NAME (type)) == TYPE_DECL
- && DECL_CONTEXT (TYPE_NAME (type))
- && TYPE_P (DECL_CONTEXT (TYPE_NAME (type))))
- v = visit (DECL_CONTEXT (TYPE_NAME (type)), state, v,
- sccstack, sccstate, sccstate_obstack);
- v = iterative_hash_hashval_t (TREE_CODE (type), v);
- v = iterative_hash_hashval_t (TYPE_QUALS (type), v);
- v = iterative_hash_hashval_t (TREE_ADDRESSABLE (type), v);
-
- /* Do not hash the types size as this will cause differences in
- hash values for the complete vs. the incomplete type variant. */
-
- /* Incorporate common features of numerical types. */
- if (INTEGRAL_TYPE_P (type)
- || SCALAR_FLOAT_TYPE_P (type)
- || FIXED_POINT_TYPE_P (type))
- {
- v = iterative_hash_hashval_t (TYPE_PRECISION (type), v);
- v = iterative_hash_hashval_t (TYPE_MODE (type), v);
- v = iterative_hash_hashval_t (TYPE_UNSIGNED (type), v);
- }
-
- /* For pointer and reference types, fold in information about the type
- pointed to. */
- if (POINTER_TYPE_P (type))
- v = visit (TREE_TYPE (type), state, v,
- sccstack, sccstate, sccstate_obstack);
-
- /* For integer types hash the types min/max values and the string flag. */
- if (TREE_CODE (type) == INTEGER_TYPE)
- {
- /* OMP lowering can introduce error_mark_node in place of
- random local decls in types. */
- if (TYPE_MIN_VALUE (type) != error_mark_node)
- v = iterative_hash_expr (TYPE_MIN_VALUE (type), v);
- if (TYPE_MAX_VALUE (type) != error_mark_node)
- v = iterative_hash_expr (TYPE_MAX_VALUE (type), v);
- v = iterative_hash_hashval_t (TYPE_STRING_FLAG (type), v);
- }
-
- /* For array types hash the domain and the string flag. */
- if (TREE_CODE (type) == ARRAY_TYPE && TYPE_DOMAIN (type))
- {
- v = iterative_hash_hashval_t (TYPE_STRING_FLAG (type), v);
- v = visit (TYPE_DOMAIN (type), state, v,
- sccstack, sccstate, sccstate_obstack);
- }
-
- /* Recurse for aggregates with a single element type. */
- if (TREE_CODE (type) == ARRAY_TYPE
- || TREE_CODE (type) == COMPLEX_TYPE
- || TREE_CODE (type) == VECTOR_TYPE)
- v = visit (TREE_TYPE (type), state, v,
- sccstack, sccstate, sccstate_obstack);
-
- /* Incorporate function return and argument types. */
- if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE)
- {
- unsigned na;
- tree p;
-
- /* For method types also incorporate their parent class. */
- if (TREE_CODE (type) == METHOD_TYPE)
- v = visit (TYPE_METHOD_BASETYPE (type), state, v,
- sccstack, sccstate, sccstate_obstack);
-
- /* Check result and argument types. */
- v = visit (TREE_TYPE (type), state, v,
- sccstack, sccstate, sccstate_obstack);
- for (p = TYPE_ARG_TYPES (type), na = 0; p; p = TREE_CHAIN (p))
- {
- v = visit (TREE_VALUE (p), state, v,
- sccstack, sccstate, sccstate_obstack);
- na++;
- }
-
- v = iterative_hash_hashval_t (na, v);
- }
-
- if (RECORD_OR_UNION_TYPE_P (type))
- {
- unsigned nf;
- tree f;
-
- for (f = TYPE_FIELDS (type), nf = 0; f; f = TREE_CHAIN (f))
- {
- v = iterative_hash_name (DECL_NAME (f), v);
- v = visit (TREE_TYPE (f), state, v,
- sccstack, sccstate, sccstate_obstack);
- nf++;
- }
-
- v = iterative_hash_hashval_t (nf, v);
- }
-
- /* Record hash for us. */
- state->u.hash = v;
-
- /* See if we found an SCC. */
- if (state->low == state->dfsnum)
- {
- tree x;
- struct tree_int_map *m;
-
- /* Pop off the SCC and set its hash values. */
- x = VEC_pop (tree, *sccstack);
- /* Optimize SCC size one. */
- if (x == type)
- {
- state->on_sccstack = false;
- m = ggc_alloc_cleared_tree_int_map ();
- m->base.from = x;
- m->to = v;
- slot = htab_find_slot (type_hash_cache, m, INSERT);
- gcc_assert (!*slot);
- *slot = (void *) m;
- }
- else
- {
- struct sccs *cstate;
- unsigned first, i, size, j;
- struct type_hash_pair *pairs;
- /* Pop off the SCC and build an array of type, hash pairs. */
- first = VEC_length (tree, *sccstack) - 1;
- while (VEC_index (tree, *sccstack, first) != type)
- --first;
- size = VEC_length (tree, *sccstack) - first + 1;
- pairs = XALLOCAVEC (struct type_hash_pair, size);
- i = 0;
- cstate = (struct sccs *)*pointer_map_contains (sccstate, x);
- cstate->on_sccstack = false;
- pairs[i].type = x;
- pairs[i].hash = cstate->u.hash;
- do
- {
- x = VEC_pop (tree, *sccstack);
- cstate = (struct sccs *)*pointer_map_contains (sccstate, x);
- cstate->on_sccstack = false;
- ++i;
- pairs[i].type = x;
- pairs[i].hash = cstate->u.hash;
- }
- while (x != type);
- gcc_assert (i + 1 == size);
- /* Sort the arrays of type, hash pairs so that when we mix in
- all members of the SCC the hash value becomes independent on
- the order we visited the SCC. Disregard hashes equal to
- the hash of the type we mix into because we cannot guarantee
- a stable sort for those across different TUs. */
- qsort (pairs, size, sizeof (struct type_hash_pair),
- type_hash_pair_compare);
- for (i = 0; i < size; ++i)
- {
- hashval_t hash;
- m = ggc_alloc_cleared_tree_int_map ();
- m->base.from = pairs[i].type;
- hash = pairs[i].hash;
- /* Skip same hashes. */
- for (j = i + 1; j < size && pairs[j].hash == pairs[i].hash; ++j)
- ;
- for (; j < size; ++j)
- hash = iterative_hash_hashval_t (pairs[j].hash, hash);
- for (j = 0; pairs[j].hash != pairs[i].hash; ++j)
- hash = iterative_hash_hashval_t (pairs[j].hash, hash);
- m->to = hash;
- if (pairs[i].type == type)
- v = hash;
- slot = htab_find_slot (type_hash_cache, m, INSERT);
- gcc_assert (!*slot);
- *slot = (void *) m;
- }
- }
- }
-
- return iterative_hash_hashval_t (v, val);
-}
-
-
-/* Returns a hash value for P (assumed to be a type). The hash value
- is computed using some distinguishing features of the type. Note
- that we cannot use pointer hashing here as we may be dealing with
- two distinct instances of the same type.
-
- This function should produce the same hash value for two compatible
- types according to gimple_types_compatible_p. */
-
-static hashval_t
-gimple_type_hash (const void *p)
-{
- const_tree t = (const_tree) p;
- VEC(tree, heap) *sccstack = NULL;
- struct pointer_map_t *sccstate;
- struct obstack sccstate_obstack;
- hashval_t val;
- void **slot;
- struct tree_int_map m;
-
- if (type_hash_cache == NULL)
- type_hash_cache = htab_create_ggc (512, tree_int_map_hash,
- tree_int_map_eq, NULL);
-
- m.base.from = CONST_CAST_TREE (t);
- if ((slot = htab_find_slot (type_hash_cache, &m, NO_INSERT))
- && *slot)
- return iterative_hash_hashval_t (((struct tree_int_map *) *slot)->to, 0);
-
- /* Perform a DFS walk and pre-hash all reachable types. */
- next_dfs_num = 1;
- sccstate = pointer_map_create ();
- gcc_obstack_init (&sccstate_obstack);
- val = iterative_hash_gimple_type (CONST_CAST_TREE (t), 0,
- &sccstack, sccstate, &sccstate_obstack);
- VEC_free (tree, heap, sccstack);
- pointer_map_destroy (sccstate);
- obstack_free (&sccstate_obstack, NULL);
-
- return val;
-}
-
/* Returning a hash value for gimple type TYPE combined with VAL.
The hash value returned is equal for types considered compatible
@@ -4244,85 +3194,8 @@ gimple_canonical_type_hash (const void *p)
}
-/* Returns nonzero if P1 and P2 are equal. */
-
-static int
-gimple_type_eq (const void *p1, const void *p2)
-{
- const_tree t1 = (const_tree) p1;
- const_tree t2 = (const_tree) p2;
- return gimple_types_compatible_p (CONST_CAST_TREE (t1),
- CONST_CAST_TREE (t2));
-}
-/* Worker for gimple_register_type.
- Register type T in the global type table gimple_types.
- When REGISTERING_MV is false first recurse for the main variant of T. */
-
-static tree
-gimple_register_type_1 (tree t, bool registering_mv)
-{
- void **slot;
- gimple_type_leader_entry *leader;
-
- /* If we registered this type before return the cached result. */
- leader = &gimple_type_leader[TYPE_UID (t) % GIMPLE_TYPE_LEADER_SIZE];
- if (leader->type == t)
- return leader->leader;
-
- /* Always register the main variant first. This is important so we
- pick up the non-typedef variants as canonical, otherwise we'll end
- up taking typedef ids for structure tags during comparison.
- It also makes sure that main variants will be merged to main variants.
- As we are operating on a possibly partially fixed up type graph
- do not bother to recurse more than once, otherwise we may end up
- walking in circles.
- If we are registering a main variant it will either remain its
- own main variant or it will be merged to something else in which
- case we do not care for the main variant leader. */
- if (!registering_mv
- && TYPE_MAIN_VARIANT (t) != t)
- gimple_register_type_1 (TYPE_MAIN_VARIANT (t), true);
-
- /* See if we already have an equivalent type registered. */
- slot = htab_find_slot (gimple_types, t, INSERT);
- if (*slot
- && *(tree *)slot != t)
- {
- tree new_type = (tree) *((tree *) slot);
- leader->type = t;
- leader->leader = new_type;
- return new_type;
- }
-
- /* If not, insert it to the cache and the hash. */
- leader->type = t;
- leader->leader = t;
- *slot = (void *) t;
- return t;
-}
-
-/* Register type T in the global type table gimple_types.
- If another type T', compatible with T, already existed in
- gimple_types then return T', otherwise return T. This is used by
- LTO to merge identical types read from different TUs. */
-
-tree
-gimple_register_type (tree t)
-{
- gcc_assert (TYPE_P (t));
-
- if (!gimple_type_leader)
- gimple_type_leader = ggc_alloc_cleared_vec_gimple_type_leader_entry_s
- (GIMPLE_TYPE_LEADER_SIZE);
-
- if (gimple_types == NULL)
- gimple_types = htab_create_ggc (16381, gimple_type_hash, gimple_type_eq, 0);
-
- return gimple_register_type_1 (t, false);
-}
-
/* The TYPE_CANONICAL merging machinery. It should closely resemble
the middle-end types_compatible_p function. It needs to avoid
claiming types are different for types that should be treated
@@ -4595,48 +3468,28 @@ gimple_register_canonical_type (tree t)
/* Show statistics on references to the global type table gimple_types. */
void
-print_gimple_types_stats (void)
-{
- if (gimple_types)
- fprintf (stderr, "GIMPLE type table: size %ld, %ld elements, "
- "%ld searches, %ld collisions (ratio: %f)\n",
- (long) htab_size (gimple_types),
- (long) htab_elements (gimple_types),
- (long) gimple_types->searches,
- (long) gimple_types->collisions,
- htab_collisions (gimple_types));
- else
- fprintf (stderr, "GIMPLE type table is empty\n");
- if (type_hash_cache)
- fprintf (stderr, "GIMPLE type hash table: size %ld, %ld elements, "
- "%ld searches, %ld collisions (ratio: %f)\n",
- (long) htab_size (type_hash_cache),
- (long) htab_elements (type_hash_cache),
- (long) type_hash_cache->searches,
- (long) type_hash_cache->collisions,
- htab_collisions (type_hash_cache));
- else
- fprintf (stderr, "GIMPLE type hash table is empty\n");
+print_gimple_types_stats (const char *pfx)
+{
if (gimple_canonical_types)
- fprintf (stderr, "GIMPLE canonical type table: size %ld, %ld elements, "
- "%ld searches, %ld collisions (ratio: %f)\n",
+ fprintf (stderr, "[%s] GIMPLE canonical type table: size %ld, "
+ "%ld elements, %ld searches, %ld collisions (ratio: %f)\n", pfx,
(long) htab_size (gimple_canonical_types),
(long) htab_elements (gimple_canonical_types),
(long) gimple_canonical_types->searches,
(long) gimple_canonical_types->collisions,
htab_collisions (gimple_canonical_types));
else
- fprintf (stderr, "GIMPLE canonical type table is empty\n");
+ fprintf (stderr, "[%s] GIMPLE canonical type table is empty\n", pfx);
if (canonical_type_hash_cache)
- fprintf (stderr, "GIMPLE canonical type hash table: size %ld, %ld elements, "
- "%ld searches, %ld collisions (ratio: %f)\n",
+ fprintf (stderr, "[%s] GIMPLE canonical type hash table: size %ld, "
+ "%ld elements, %ld searches, %ld collisions (ratio: %f)\n", pfx,
(long) htab_size (canonical_type_hash_cache),
(long) htab_elements (canonical_type_hash_cache),
(long) canonical_type_hash_cache->searches,
(long) canonical_type_hash_cache->collisions,
htab_collisions (canonical_type_hash_cache));
else
- fprintf (stderr, "GIMPLE canonical type hash table is empty\n");
+ fprintf (stderr, "[%s] GIMPLE canonical type hash table is empty\n", pfx);
}
/* Free the gimple type hashtables used for LTO type merging. */
@@ -4644,36 +3497,16 @@ print_gimple_types_stats (void)
void
free_gimple_type_tables (void)
{
- /* Last chance to print stats for the tables. */
- if (flag_lto_report)
- print_gimple_types_stats ();
-
- if (gimple_types)
- {
- htab_delete (gimple_types);
- gimple_types = NULL;
- }
if (gimple_canonical_types)
{
htab_delete (gimple_canonical_types);
gimple_canonical_types = NULL;
}
- if (type_hash_cache)
- {
- htab_delete (type_hash_cache);
- type_hash_cache = NULL;
- }
if (canonical_type_hash_cache)
{
htab_delete (canonical_type_hash_cache);
canonical_type_hash_cache = NULL;
}
- if (type_pair_cache)
- {
- free (type_pair_cache);
- type_pair_cache = NULL;
- }
- gimple_type_leader = NULL;
}
diff --git a/gcc/gimple.h b/gcc/gimple.h
index 50fc143278b..c95c8f44cf8 100644
--- a/gcc/gimple.h
+++ b/gcc/gimple.h
@@ -887,9 +887,8 @@ extern bool is_gimple_call_addr (tree);
extern void recalculate_side_effects (tree);
extern bool gimple_compare_field_offset (tree, tree);
-extern tree gimple_register_type (tree);
extern tree gimple_register_canonical_type (tree);
-extern void print_gimple_types_stats (void);
+extern void print_gimple_types_stats (const char *);
extern void free_gimple_type_tables (void);
extern tree gimple_unsigned_type (tree);
extern tree gimple_signed_type (tree);
diff --git a/gcc/go/gofrontend/expressions.cc b/gcc/go/gofrontend/expressions.cc
index b66a193afee..892c561d6a3 100644
--- a/gcc/go/gofrontend/expressions.cc
+++ b/gcc/go/gofrontend/expressions.cc
@@ -182,21 +182,22 @@ Expression::convert_for_assignment(Translate_context* context, Type* lhs_type,
VEC(constructor_elt,gc)* init = VEC_alloc(constructor_elt, gc, 3);
- constructor_elt* elt = VEC_quick_push(constructor_elt, init, NULL);
+ constructor_elt empty = {NULL, NULL};
+ constructor_elt* elt = VEC_quick_push(constructor_elt, init, empty);
tree field = TYPE_FIELDS(lhs_type_tree);
go_assert(strcmp(IDENTIFIER_POINTER(DECL_NAME(field)),
"__values") == 0);
elt->index = field;
elt->value = fold_convert(TREE_TYPE(field), null_pointer_node);
- elt = VEC_quick_push(constructor_elt, init, NULL);
+ elt = VEC_quick_push(constructor_elt, init, empty);
field = DECL_CHAIN(field);
go_assert(strcmp(IDENTIFIER_POINTER(DECL_NAME(field)),
"__count") == 0);
elt->index = field;
elt->value = fold_convert(TREE_TYPE(field), integer_zero_node);
- elt = VEC_quick_push(constructor_elt, init, NULL);
+ elt = VEC_quick_push(constructor_elt, init, empty);
field = DECL_CHAIN(field);
go_assert(strcmp(IDENTIFIER_POINTER(DECL_NAME(field)),
"__capacity") == 0);
@@ -315,7 +316,8 @@ Expression::convert_type_to_interface(Translate_context* context,
VEC(constructor_elt,gc)* init = VEC_alloc(constructor_elt, gc, 2);
- constructor_elt* elt = VEC_quick_push(constructor_elt, init, NULL);
+ constructor_elt empty = {NULL, NULL};
+ constructor_elt* elt = VEC_quick_push(constructor_elt, init, empty);
tree field = TYPE_FIELDS(lhs_type_tree);
go_assert(strcmp(IDENTIFIER_POINTER(DECL_NAME(field)),
(lhs_is_empty ? "__type_descriptor" : "__methods")) == 0);
@@ -323,7 +325,7 @@ Expression::convert_type_to_interface(Translate_context* context,
elt->value = fold_convert_loc(location.gcc_location(), TREE_TYPE(field),
first_field_value);
- elt = VEC_quick_push(constructor_elt, init, NULL);
+ elt = VEC_quick_push(constructor_elt, init, empty);
field = DECL_CHAIN(field);
go_assert(strcmp(IDENTIFIER_POINTER(DECL_NAME(field)), "__object") == 0);
elt->index = field;
@@ -439,7 +441,8 @@ Expression::convert_interface_to_interface(Translate_context* context,
VEC(constructor_elt,gc)* init = VEC_alloc(constructor_elt, gc, 2);
- constructor_elt* elt = VEC_quick_push(constructor_elt, init, NULL);
+ constructor_elt empty = {NULL, NULL};
+ constructor_elt* elt = VEC_quick_push(constructor_elt, init, empty);
tree field = TYPE_FIELDS(lhs_type_tree);
elt->index = field;
@@ -502,7 +505,7 @@ Expression::convert_interface_to_interface(Translate_context* context,
// The second field is simply the object pointer.
- elt = VEC_quick_push(constructor_elt, init, NULL);
+ elt = VEC_quick_push(constructor_elt, init, empty);
field = DECL_CHAIN(field);
go_assert(strcmp(IDENTIFIER_POINTER(DECL_NAME(field)), "__object") == 0);
elt->index = field;
@@ -9959,20 +9962,21 @@ Array_index_expression::do_get_tree(Translate_context* context)
VEC(constructor_elt,gc)* init = VEC_alloc(constructor_elt, gc, 3);
- constructor_elt* elt = VEC_quick_push(constructor_elt, init, NULL);
+ constructor_elt empty = {NULL, NULL};
+ constructor_elt* elt = VEC_quick_push(constructor_elt, init, empty);
tree field = TYPE_FIELDS(struct_tree);
go_assert(strcmp(IDENTIFIER_POINTER(DECL_NAME(field)), "__values") == 0);
elt->index = field;
elt->value = value_pointer;
- elt = VEC_quick_push(constructor_elt, init, NULL);
+ elt = VEC_quick_push(constructor_elt, init, empty);
field = DECL_CHAIN(field);
go_assert(strcmp(IDENTIFIER_POINTER(DECL_NAME(field)), "__count") == 0);
elt->index = field;
elt->value = fold_convert_loc(loc.gcc_location(), TREE_TYPE(field),
result_length_tree);
- elt = VEC_quick_push(constructor_elt, init, NULL);
+ elt = VEC_quick_push(constructor_elt, init, empty);
field = DECL_CHAIN(field);
go_assert(strcmp(IDENTIFIER_POINTER(DECL_NAME(field)), "__capacity") == 0);
elt->index = field;
@@ -11355,7 +11359,8 @@ Struct_construction_expression::do_get_tree(Translate_context* context)
if (val == error_mark_node || TREE_TYPE(val) == error_mark_node)
return error_mark_node;
- constructor_elt* elt = VEC_quick_push(constructor_elt, elts, NULL);
+ constructor_elt empty = {NULL, NULL};
+ constructor_elt* elt = VEC_quick_push(constructor_elt, elts, empty);
elt->index = field;
elt->value = val;
if (!TREE_CONSTANT(val))
@@ -11583,7 +11588,8 @@ Array_construction_expression::get_constructor_tree(Translate_context* context,
{
if (this->indexes_ != NULL)
go_assert(pi != this->indexes_->end());
- constructor_elt* elt = VEC_quick_push(constructor_elt, values, NULL);
+ constructor_elt empty = {NULL, NULL};
+ constructor_elt* elt = VEC_quick_push(constructor_elt, values, empty);
if (this->indexes_ == NULL)
elt->index = size_int(i);
@@ -11793,7 +11799,8 @@ Open_array_construction_expression::do_get_tree(Translate_context* context)
if (constructor_type == error_mark_node)
return error_mark_node;
VEC(constructor_elt,gc)* vec = VEC_alloc(constructor_elt, gc, 1);
- constructor_elt* elt = VEC_quick_push(constructor_elt, vec, NULL);
+ constructor_elt empty = {NULL, NULL};
+ constructor_elt* elt = VEC_quick_push(constructor_elt, vec, empty);
elt->index = size_int(0);
Gogo* gogo = context->gogo();
Btype* btype = element_type->get_backend(gogo);
@@ -11886,19 +11893,20 @@ Open_array_construction_expression::do_get_tree(Translate_context* context)
VEC(constructor_elt,gc)* init = VEC_alloc(constructor_elt, gc, 3);
- constructor_elt* elt = VEC_quick_push(constructor_elt, init, NULL);
+ constructor_elt empty = {NULL, NULL};
+ constructor_elt* elt = VEC_quick_push(constructor_elt, init, empty);
tree field = TYPE_FIELDS(type_tree);
go_assert(strcmp(IDENTIFIER_POINTER(DECL_NAME(field)), "__values") == 0);
elt->index = field;
elt->value = fold_convert(TREE_TYPE(field), space);
- elt = VEC_quick_push(constructor_elt, init, NULL);
+ elt = VEC_quick_push(constructor_elt, init, empty);
field = DECL_CHAIN(field);
go_assert(strcmp(IDENTIFIER_POINTER(DECL_NAME(field)), "__count") == 0);
elt->index = field;
elt->value = fold_convert(TREE_TYPE(field), length_tree);
- elt = VEC_quick_push(constructor_elt, init, NULL);
+ elt = VEC_quick_push(constructor_elt, init, empty);
field = DECL_CHAIN(field);
go_assert(strcmp(IDENTIFIER_POINTER(DECL_NAME(field)),"__capacity") == 0);
elt->index = field;
@@ -12102,7 +12110,8 @@ Map_construction_expression::do_get_tree(Translate_context* context)
VEC(constructor_elt,gc)* one = VEC_alloc(constructor_elt, gc, 2);
- constructor_elt* elt = VEC_quick_push(constructor_elt, one, NULL);
+ constructor_elt empty = {NULL, NULL};
+ constructor_elt* elt = VEC_quick_push(constructor_elt, one, empty);
elt->index = key_field;
tree val_tree = (*pv)->get_tree(context);
elt->value = Expression::convert_for_assignment(context, key_type,
@@ -12115,7 +12124,7 @@ Map_construction_expression::do_get_tree(Translate_context* context)
++pv;
- elt = VEC_quick_push(constructor_elt, one, NULL);
+ elt = VEC_quick_push(constructor_elt, one, empty);
elt->index = val_field;
val_tree = (*pv)->get_tree(context);
elt->value = Expression::convert_for_assignment(context, val_type,
@@ -12126,7 +12135,7 @@ Map_construction_expression::do_get_tree(Translate_context* context)
if (!TREE_CONSTANT(elt->value))
one_is_constant = false;
- elt = VEC_quick_push(constructor_elt, values, NULL);
+ elt = VEC_quick_push(constructor_elt, values, empty);
elt->index = size_int(i);
elt->value = build_constructor(struct_type, one);
if (one_is_constant)
diff --git a/gcc/go/gofrontend/gogo-tree.cc b/gcc/go/gofrontend/gogo-tree.cc
index c933d937596..9a181a344ad 100644
--- a/gcc/go/gofrontend/gogo-tree.cc
+++ b/gcc/go/gofrontend/gogo-tree.cc
@@ -354,7 +354,8 @@ Gogo::register_gc_vars(const std::vector<Named_object*>& var_gc,
{
VEC(constructor_elt,gc)* init = VEC_alloc(constructor_elt, gc, 2);
- constructor_elt* elt = VEC_quick_push(constructor_elt, init, NULL);
+ constructor_elt empty = {NULL, NULL};
+ constructor_elt* elt = VEC_quick_push(constructor_elt, init, empty);
tree field = TYPE_FIELDS(root_type);
elt->index = field;
Bvariable* bvar = (*p)->get_backend_variable(this, NULL);
@@ -362,12 +363,12 @@ Gogo::register_gc_vars(const std::vector<Named_object*>& var_gc,
go_assert(TREE_CODE(decl) == VAR_DECL);
elt->value = build_fold_addr_expr(decl);
- elt = VEC_quick_push(constructor_elt, init, NULL);
+ elt = VEC_quick_push(constructor_elt, init, empty);
field = DECL_CHAIN(field);
elt->index = field;
elt->value = DECL_SIZE_UNIT(decl);
- elt = VEC_quick_push(constructor_elt, roots_init, NULL);
+ elt = VEC_quick_push(constructor_elt, roots_init, empty);
elt->index = size_int(i);
elt->value = build_constructor(root_type, init);
}
@@ -376,17 +377,18 @@ Gogo::register_gc_vars(const std::vector<Named_object*>& var_gc,
VEC(constructor_elt,gc)* init = VEC_alloc(constructor_elt, gc, 2);
- constructor_elt* elt = VEC_quick_push(constructor_elt, init, NULL);
+ constructor_elt empty = {NULL, NULL};
+ constructor_elt* elt = VEC_quick_push(constructor_elt, init, empty);
tree field = TYPE_FIELDS(root_type);
elt->index = field;
elt->value = fold_convert(TREE_TYPE(field), null_pointer_node);
- elt = VEC_quick_push(constructor_elt, init, NULL);
+ elt = VEC_quick_push(constructor_elt, init, empty);
field = DECL_CHAIN(field);
elt->index = field;
elt->value = size_zero_node;
- elt = VEC_quick_push(constructor_elt, roots_init, NULL);
+ elt = VEC_quick_push(constructor_elt, roots_init, empty);
elt->index = size_int(i);
elt->value = build_constructor(root_type, init);
@@ -394,12 +396,12 @@ Gogo::register_gc_vars(const std::vector<Named_object*>& var_gc,
VEC(constructor_elt,gc)* root_list_init = VEC_alloc(constructor_elt, gc, 2);
- elt = VEC_quick_push(constructor_elt, root_list_init, NULL);
+ elt = VEC_quick_push(constructor_elt, root_list_init, empty);
field = TYPE_FIELDS(root_list_type);
elt->index = field;
elt->value = fold_convert(TREE_TYPE(field), null_pointer_node);
- elt = VEC_quick_push(constructor_elt, root_list_init, NULL);
+ elt = VEC_quick_push(constructor_elt, root_list_init, empty);
field = DECL_CHAIN(field);
elt->index = field;
elt->value = build_constructor(array_type, roots_init);
@@ -2029,7 +2031,8 @@ Gogo::go_string_constant_tree(const std::string& val)
VEC(constructor_elt, gc)* init = VEC_alloc(constructor_elt, gc, 2);
- constructor_elt* elt = VEC_quick_push(constructor_elt, init, NULL);
+ constructor_elt empty = {NULL, NULL};
+ constructor_elt* elt = VEC_quick_push(constructor_elt, init, empty);
tree field = TYPE_FIELDS(string_type);
go_assert(strcmp(IDENTIFIER_POINTER(DECL_NAME(field)), "__data") == 0);
elt->index = field;
@@ -2037,7 +2040,7 @@ Gogo::go_string_constant_tree(const std::string& val)
elt->value = fold_convert(TREE_TYPE(field),
build_fold_addr_expr(str));
- elt = VEC_quick_push(constructor_elt, init, NULL);
+ elt = VEC_quick_push(constructor_elt, init, empty);
field = DECL_CHAIN(field);
go_assert(strcmp(IDENTIFIER_POINTER(DECL_NAME(field)), "__length") == 0);
elt->index = field;
@@ -2089,7 +2092,8 @@ Gogo::slice_constructor(tree slice_type_tree, tree values, tree count,
tree field = TYPE_FIELDS(slice_type_tree);
go_assert(strcmp(IDENTIFIER_POINTER(DECL_NAME(field)), "__values") == 0);
- constructor_elt* elt = VEC_quick_push(constructor_elt, init, NULL);
+ constructor_elt empty = {NULL, NULL};
+ constructor_elt* elt = VEC_quick_push(constructor_elt, init, empty);
elt->index = field;
go_assert(TYPE_MAIN_VARIANT(TREE_TYPE(field))
== TYPE_MAIN_VARIANT(TREE_TYPE(values)));
@@ -2104,13 +2108,13 @@ Gogo::slice_constructor(tree slice_type_tree, tree values, tree count,
field = DECL_CHAIN(field);
go_assert(strcmp(IDENTIFIER_POINTER(DECL_NAME(field)), "__count") == 0);
- elt = VEC_quick_push(constructor_elt, init, NULL);
+ elt = VEC_quick_push(constructor_elt, init, empty);
elt->index = field;
elt->value = fold_convert(TREE_TYPE(field), count);
field = DECL_CHAIN(field);
go_assert(strcmp(IDENTIFIER_POINTER(DECL_NAME(field)), "__capacity") == 0);
- elt = VEC_quick_push(constructor_elt, init, NULL);
+ elt = VEC_quick_push(constructor_elt, init, empty);
elt->index = field;
elt->value = fold_convert(TREE_TYPE(field), capacity);
@@ -2170,7 +2174,8 @@ Gogo::interface_method_table_for_type(const Interface_type* interface,
count + 1);
// The first element is the type descriptor.
- constructor_elt* elt = VEC_quick_push(constructor_elt, pointers, NULL);
+ constructor_elt empty = {NULL, NULL};
+ constructor_elt* elt = VEC_quick_push(constructor_elt, pointers, empty);
elt->index = size_zero_node;
Type* td_type;
if (!is_pointer)
@@ -2204,7 +2209,7 @@ Gogo::interface_method_table_for_type(const Interface_type* interface,
go_unreachable();
fndecl = build_fold_addr_expr(fndecl);
- elt = VEC_quick_push(constructor_elt, pointers, NULL);
+ elt = VEC_quick_push(constructor_elt, pointers, empty);
elt->index = size_int(i);
elt->value = fold_convert(const_ptr_type_node, fndecl);
}
diff --git a/gcc/graphite-scop-detection.c b/gcc/graphite-scop-detection.c
index 408e6b2fb52..0ea9e6a473d 100644
--- a/gcc/graphite-scop-detection.c
+++ b/gcc/graphite-scop-detection.c
@@ -67,7 +67,7 @@ static gbb_type
get_bb_type (basic_block bb, struct loop *last_loop)
{
VEC (basic_block, heap) *dom;
- int nb_dom, nb_suc;
+ int nb_dom;
struct loop *loop = bb->loop_father;
/* Check, if we entry into a new loop. */
@@ -88,9 +88,7 @@ get_bb_type (basic_block bb, struct loop *last_loop)
if (nb_dom == 0)
return GBB_LAST;
- nb_suc = VEC_length (edge, bb->succs);
-
- if (nb_dom == 1 && nb_suc == 1)
+ if (nb_dom == 1 && single_succ_p (bb))
return GBB_SIMPLE;
return GBB_COND_HEADER;
@@ -145,7 +143,7 @@ move_sd_regions (VEC (sd_region, heap) **source,
int i;
FOR_EACH_VEC_ELT (sd_region, *source, i, s)
- VEC_safe_push (sd_region, heap, *target, s);
+ VEC_safe_push (sd_region, heap, *target, *s);
VEC_free (sd_region, heap, *source);
}
@@ -502,7 +500,7 @@ scopdet_basic_block_info (basic_block bb, loop_p outermost_loop,
sd_region open_scop;
open_scop.entry = bb;
open_scop.exit = exit_e->dest;
- VEC_safe_push (sd_region, heap, *scops, &open_scop);
+ VEC_safe_push (sd_region, heap, *scops, open_scop);
VEC_free (sd_region, heap, regions);
}
}
@@ -758,7 +756,7 @@ build_scops_1 (basic_block current, loop_p outermost_loop,
else if (in_scop && (sinfo.exits || sinfo.difficult))
{
open_scop.exit = current;
- VEC_safe_push (sd_region, heap, *scops, &open_scop);
+ VEC_safe_push (sd_region, heap, *scops, open_scop);
in_scop = false;
}
@@ -773,7 +771,7 @@ build_scops_1 (basic_block current, loop_p outermost_loop,
{
open_scop.exit = sinfo.exit;
gcc_assert (open_scop.exit);
- VEC_safe_push (sd_region, heap, *scops, &open_scop);
+ VEC_safe_push (sd_region, heap, *scops, open_scop);
}
result.exit = sinfo.exit;
@@ -1114,7 +1112,7 @@ print_graphite_scop_statistics (FILE* file, scop_p scop)
n_bbs++;
n_p_bbs += bb->count;
- if (VEC_length (edge, bb->succs) > 1)
+ if (EDGE_COUNT (bb->succs) > 1)
{
n_conditions++;
n_p_conditions += bb->count;
@@ -1207,7 +1205,7 @@ limit_scops (VEC (scop_p, heap) **scops)
&& contains_only_close_phi_nodes (open_scop.exit))
open_scop.exit = single_succ_edge (open_scop.exit)->dest;
- VEC_safe_push (sd_region, heap, regions, &open_scop);
+ VEC_safe_push (sd_region, heap, regions, open_scop);
}
}
@@ -1299,7 +1297,7 @@ canonicalize_loop_closed_ssa (loop_p loop)
bb = e->dest;
- if (VEC_length (edge, bb->preds) == 1)
+ if (single_pred_p (bb))
{
e = split_block_after_labels (bb);
make_close_phi_nodes_unique (e->src);
diff --git a/gcc/graphite-sese-to-poly.c b/gcc/graphite-sese-to-poly.c
index ded38f5af1a..3a7b9101db9 100644
--- a/gcc/graphite-sese-to-poly.c
+++ b/gcc/graphite-sese-to-poly.c
@@ -1249,7 +1249,7 @@ build_sese_conditions_before (struct dom_walk_data *dw_data,
if (e->flags & EDGE_TRUE_VALUE)
VEC_safe_push (gimple, heap, *cases, stmt);
else
- VEC_safe_push (gimple, heap, *cases, (gimple) NULL);
+ VEC_safe_push (gimple, heap, *cases, NULL);
}
gbb = gbb_from_bb (bb);
diff --git a/gcc/graphite.c b/gcc/graphite.c
index 04e1da29118..0eb1ca191d5 100644
--- a/gcc/graphite.c
+++ b/gcc/graphite.c
@@ -97,7 +97,7 @@ print_global_statistics (FILE* file)
n_p_loops += bb->count;
}
- if (VEC_length (edge, bb->succs) > 1)
+ if (EDGE_COUNT (bb->succs) > 1)
{
n_conditions++;
n_p_conditions += bb->count;
@@ -149,7 +149,7 @@ print_graphite_scop_statistics (FILE* file, scop_p scop)
n_bbs++;
n_p_bbs += bb->count;
- if (VEC_length (edge, bb->succs) > 1)
+ if (EDGE_COUNT (bb->succs) > 1)
{
n_conditions++;
n_p_conditions += bb->count;
diff --git a/gcc/ipa-inline-analysis.c b/gcc/ipa-inline-analysis.c
index 6e00f9373b4..8e8348e31af 100644
--- a/gcc/ipa-inline-analysis.c
+++ b/gcc/ipa-inline-analysis.c
@@ -266,7 +266,7 @@ add_condition (struct inline_summary *summary, int operand_num,
new_cond.agg_contents = agg_contents;
new_cond.by_ref = by_ref;
new_cond.offset = offset;
- VEC_safe_push (condition, gc, summary->conds, &new_cond);
+ VEC_safe_push (condition, gc, summary->conds, new_cond);
return single_cond_predicate (i + predicate_first_dynamic_condition);
}
@@ -688,7 +688,7 @@ account_size_time (struct inline_summary *summary, int size, int time,
new_entry.size = size;
new_entry.time = time;
new_entry.predicate = *pred;
- VEC_safe_push (size_time_entry, gc, summary->entry, &new_entry);
+ VEC_safe_push (size_time_entry, gc, summary->entry, new_entry);
}
else
{
@@ -3582,7 +3582,7 @@ inline_read_section (struct lto_file_decl_data *file_data, const char *data,
c.by_ref = bp_unpack_value (&bp, 1);
if (c.agg_contents)
c.offset = streamer_read_uhwi (&ib);
- VEC_safe_push (condition, gc, info->conds, &c);
+ VEC_safe_push (condition, gc, info->conds, c);
}
count2 = streamer_read_uhwi (&ib);
gcc_assert (!info->entry);
@@ -3594,7 +3594,7 @@ inline_read_section (struct lto_file_decl_data *file_data, const char *data,
e.time = streamer_read_uhwi (&ib);
e.predicate = read_predicate (&ib);
- VEC_safe_push (size_time_entry, gc, info->entry, &e);
+ VEC_safe_push (size_time_entry, gc, info->entry, e);
}
p = read_predicate (&ib);
diff --git a/gcc/ipa-prop.c b/gcc/ipa-prop.c
index 966dbfabcdb..9729145b7a1 100644
--- a/gcc/ipa-prop.c
+++ b/gcc/ipa-prop.c
@@ -37,6 +37,7 @@ along with GCC; see the file COPYING3. If not see
#include "lto-streamer.h"
#include "data-streamer.h"
#include "tree-streamer.h"
+#include "params.h"
/* Intermediate information about a parameter that is only useful during the
@@ -1145,9 +1146,6 @@ get_ssa_def_if_simple_copy (tree rhs)
return rhs;
}
-/* TODO: Turn this into a PARAM. */
-#define IPA_MAX_AFF_JF_ITEMS 16
-
/* Simple linked list, describing known contents of an aggregate beforere
call. */
@@ -1327,8 +1325,8 @@ determine_known_aggregate_parts (gimple call, tree arg,
*p = n;
item_count++;
- if (const_count == IPA_MAX_AFF_JF_ITEMS
- || item_count == 2 * IPA_MAX_AFF_JF_ITEMS)
+ if (const_count == PARAM_VALUE (PARAM_IPA_MAX_AGG_ITEMS)
+ || item_count == 2 * PARAM_VALUE (PARAM_IPA_MAX_AGG_ITEMS))
break;
}
@@ -1344,11 +1342,10 @@ determine_known_aggregate_parts (gimple call, tree arg,
{
if (list->constant)
{
- struct ipa_agg_jf_item *item;
- item = VEC_quick_push (ipa_agg_jf_item_t,
- jfunc->agg.items, NULL);
- item->offset = list->offset - arg_offset;
- item->value = list->constant;
+ struct ipa_agg_jf_item item;
+ item.offset = list->offset - arg_offset;
+ item.value = list->constant;
+ VEC_quick_push (ipa_agg_jf_item_t, jfunc->agg.items, item);
}
list = list->next;
}
@@ -2887,8 +2884,8 @@ ipa_modify_call_arguments (struct cgraph_edge *cs, gimple stmt,
unsigned HOST_WIDE_INT misalign;
get_pointer_alignment_1 (base, &align, &misalign);
- misalign += (double_int_sext (tree_to_double_int (off),
- TYPE_PRECISION (TREE_TYPE (off))).low
+ misalign += (tree_to_double_int (off)
+ .sext (TYPE_PRECISION (TREE_TYPE (off))).low
* BITS_PER_UNIT);
misalign = misalign & (align - 1);
if (misalign != 0)
@@ -3025,45 +3022,44 @@ ipa_combine_adjustments (ipa_parm_adjustment_vec inner,
if (n->remove_param)
removals++;
else
- VEC_quick_push (ipa_parm_adjustment_t, tmp, n);
+ VEC_quick_push (ipa_parm_adjustment_t, tmp, *n);
}
adjustments = VEC_alloc (ipa_parm_adjustment_t, heap, outlen + removals);
for (i = 0; i < outlen; i++)
{
- struct ipa_parm_adjustment *r;
+ struct ipa_parm_adjustment r;
struct ipa_parm_adjustment *out = &VEC_index (ipa_parm_adjustment_t,
outer, i);
struct ipa_parm_adjustment *in = &VEC_index (ipa_parm_adjustment_t, tmp,
out->base_index);
+ memset (&r, 0, sizeof (r));
gcc_assert (!in->remove_param);
if (out->remove_param)
{
if (!index_in_adjustments_multiple_times_p (in->base_index, tmp))
{
- r = VEC_quick_push (ipa_parm_adjustment_t, adjustments, NULL);
- memset (r, 0, sizeof (*r));
- r->remove_param = true;
+ r.remove_param = true;
+ VEC_quick_push (ipa_parm_adjustment_t, adjustments, r);
}
continue;
}
- r = VEC_quick_push (ipa_parm_adjustment_t, adjustments, NULL);
- memset (r, 0, sizeof (*r));
- r->base_index = in->base_index;
- r->type = out->type;
+ r.base_index = in->base_index;
+ r.type = out->type;
/* FIXME: Create nonlocal value too. */
if (in->copy_param && out->copy_param)
- r->copy_param = true;
+ r.copy_param = true;
else if (in->copy_param)
- r->offset = out->offset;
+ r.offset = out->offset;
else if (out->copy_param)
- r->offset = in->offset;
+ r.offset = in->offset;
else
- r->offset = in->offset + out->offset;
+ r.offset = in->offset + out->offset;
+ VEC_quick_push (ipa_parm_adjustment_t, adjustments, r);
}
for (i = 0; i < inlen; i++)
@@ -3072,7 +3068,7 @@ ipa_combine_adjustments (ipa_parm_adjustment_vec inner,
inner, i);
if (n->remove_param)
- VEC_quick_push (ipa_parm_adjustment_t, adjustments, n);
+ VEC_quick_push (ipa_parm_adjustment_t, adjustments, *n);
}
VEC_free (ipa_parm_adjustment_t, heap, tmp);
@@ -3240,11 +3236,10 @@ ipa_read_jump_function (struct lto_input_block *ib,
}
for (i = 0; i < count; i++)
{
- struct ipa_agg_jf_item *item = VEC_quick_push (ipa_agg_jf_item_t,
- jump_func->agg.items, NULL);
-
- item->offset = streamer_read_uhwi (ib);
- item->value = stream_read_tree (ib, data_in);
+ struct ipa_agg_jf_item item;
+ item.offset = streamer_read_uhwi (ib);
+ item.value = stream_read_tree (ib, data_in);
+ VEC_quick_push (ipa_agg_jf_item_t, jump_func->agg.items, item);
}
}
diff --git a/gcc/ipa-split.c b/gcc/ipa-split.c
index be1d2cce230..e1d1c4928e6 100644
--- a/gcc/ipa-split.c
+++ b/gcc/ipa-split.c
@@ -912,7 +912,7 @@ find_split_points (int overall_time, int overall_size)
first.set_ssa_names = 0;
first.used_ssa_names = 0;
first.bbs_visited = 0;
- VEC_safe_push (stack_entry, heap, stack, &first);
+ VEC_safe_push (stack_entry, heap, stack, first);
ENTRY_BLOCK_PTR->aux = (void *)(intptr_t)-1;
while (!VEC_empty (stack_entry, stack))
@@ -994,7 +994,7 @@ find_split_points (int overall_time, int overall_size)
new_entry.non_ssa_vars = BITMAP_ALLOC (NULL);
new_entry.can_split = true;
bitmap_set_bit (new_entry.bbs_visited, dest->index);
- VEC_safe_push (stack_entry, heap, stack, &new_entry);
+ VEC_safe_push (stack_entry, heap, stack, new_entry);
dest->aux = (void *)(intptr_t)VEC_length (stack_entry, stack);
}
/* Back edge found, record the earliest point. */
diff --git a/gcc/ira.c b/gcc/ira.c
index 691614e8d5a..ad0ae0a8e6e 100644
--- a/gcc/ira.c
+++ b/gcc/ira.c
@@ -789,7 +789,7 @@ setup_pressure_classes (void)
hard registers and movement between them is costly
(e.g. SPARC FPCC registers). We still should consider it
as a candidate for a pressure class. */
- && alloc_reg_class_subclasses[cl][0] != LIM_REG_CLASSES)
+ && alloc_reg_class_subclasses[cl][0] < cl)
{
/* Check that the moves between any hard registers of the
current class are not more expensive for a legal mode
diff --git a/gcc/java/class.c b/gcc/java/class.c
index f806cea1414..a89b83183b6 100644
--- a/gcc/java/class.c
+++ b/gcc/java/class.c
@@ -2198,9 +2198,10 @@ make_class_data (tree type)
for (i = 0; i < count; i++)
{
- constructor_elt *elt = VEC_quick_push (constructor_elt, v, NULL);
- elt->index = build_int_cst (sizetype, i);
- elt->value = build_int_cstu (byte_type_node, data[i]);
+ constructor_elt elt;
+ elt.index = build_int_cst (sizetype, i);
+ elt.value = build_int_cstu (byte_type_node, data[i]);
+ VEC_quick_push (constructor_elt, v, elt);
}
DECL_INITIAL (array) = build_constructor (type, v);
diff --git a/gcc/java/expr.c b/gcc/java/expr.c
index 0429c02ca34..8041cdd99c4 100644
--- a/gcc/java/expr.c
+++ b/gcc/java/expr.c
@@ -2296,14 +2296,13 @@ get_symbol_table_index (tree t, tree special,
{
method_entry *e;
unsigned i;
+ method_entry elem = {t, special};
FOR_EACH_VEC_ELT (method_entry, *symbol_table, i, e)
if (t == e->method && special == e->special)
goto done;
- e = VEC_safe_push (method_entry, gc, *symbol_table, NULL);
- e->method = t;
- e->special = special;
+ VEC_safe_push (method_entry, gc, *symbol_table, elem);
done:
return i + 1;
diff --git a/gcc/loop-iv.c b/gcc/loop-iv.c
index d31f0fa242d..145056e6d41 100644
--- a/gcc/loop-iv.c
+++ b/gcc/loop-iv.c
@@ -2295,7 +2295,7 @@ iv_number_of_iterations (struct loop *loop, rtx insn, rtx condition,
desc->niter_expr = NULL_RTX;
desc->niter_max = 0;
if (loop->any_upper_bound
- && double_int_fits_in_uhwi_p (loop->nb_iterations_upper_bound))
+ && loop->nb_iterations_upper_bound.fits_uhwi ())
desc->niter_max = loop->nb_iterations_upper_bound.low;
cond = GET_CODE (condition);
diff --git a/gcc/lto-cgraph.c b/gcc/lto-cgraph.c
index c08d0998779..24222883da4 100644
--- a/gcc/lto-cgraph.c
+++ b/gcc/lto-cgraph.c
@@ -80,9 +80,6 @@ lto_symtab_encoder_new (void)
lto_symtab_encoder_t encoder = XCNEW (struct lto_symtab_encoder_d);
encoder->map = pointer_map_create ();
encoder->nodes = NULL;
- encoder->body = pointer_set_create ();
- encoder->initializer = pointer_set_create ();
- encoder->in_partition = pointer_set_create ();
return encoder;
}
@@ -92,11 +89,8 @@ lto_symtab_encoder_new (void)
void
lto_symtab_encoder_delete (lto_symtab_encoder_t encoder)
{
- VEC_free (symtab_node, heap, encoder->nodes);
+ VEC_free (lto_encoder_entry, heap, encoder->nodes);
pointer_map_destroy (encoder->map);
- pointer_set_destroy (encoder->body);
- pointer_set_destroy (encoder->initializer);
- pointer_set_destroy (encoder->in_partition);
free (encoder);
}
@@ -113,29 +107,57 @@ lto_symtab_encoder_encode (lto_symtab_encoder_t encoder,
void **slot;
slot = pointer_map_contains (encoder->map, node);
- if (!slot)
+ if (!slot || !*slot)
{
- ref = VEC_length (symtab_node, encoder->nodes);
- slot = pointer_map_insert (encoder->map, node);
- *slot = (void *) (intptr_t) ref;
- VEC_safe_push (symtab_node, heap, encoder->nodes, node);
+ lto_encoder_entry entry = {node, false, false, false};
+ ref = VEC_length (lto_encoder_entry, encoder->nodes);
+ if (!slot)
+ slot = pointer_map_insert (encoder->map, node);
+ *slot = (void *) (intptr_t) (ref + 1);
+ VEC_safe_push (lto_encoder_entry, heap, encoder->nodes, entry);
}
else
- ref = (int) (intptr_t) *slot;
+ ref = (size_t) *slot - 1;
return ref;
}
+/* Remove NODE from encoder. */
-/* Look up NODE in encoder. Return NODE's reference if it has been encoded
- or LCC_NOT_FOUND if it is not there. */
-
-int
-lto_symtab_encoder_lookup (lto_symtab_encoder_t encoder,
- symtab_node node)
+bool
+lto_symtab_encoder_delete_node (lto_symtab_encoder_t encoder,
+ symtab_node node)
{
- void **slot = pointer_map_contains (encoder->map, node);
- return (slot ? (int) (intptr_t) *slot : LCC_NOT_FOUND);
+ void **slot, **last_slot;
+ int index;
+ lto_encoder_entry last_node;
+
+ slot = pointer_map_contains (encoder->map, node);
+ if (slot == NULL || !*slot)
+ return false;
+
+ index = (size_t) *slot - 1;
+ gcc_checking_assert (VEC_index (lto_encoder_entry,
+ encoder->nodes, index).node
+ == node);
+
+ /* Remove from vector. We do this by swapping node with the last element
+ of the vector. */
+ last_node = VEC_pop (lto_encoder_entry, encoder->nodes);
+ if (last_node.node != node)
+ {
+ last_slot = pointer_map_contains (encoder->map, last_node.node);
+ gcc_checking_assert (last_slot && *last_slot);
+ *last_slot = (void *)(size_t) (index + 1);
+
+ /* Move the last element to the original spot of NODE. */
+ VEC_replace (lto_encoder_entry, encoder->nodes, index,
+ last_node);
+ }
+
+ /* Remove element from hash table. */
+ *slot = NULL;
+ return true;
}
@@ -145,7 +167,8 @@ bool
lto_symtab_encoder_encode_body_p (lto_symtab_encoder_t encoder,
struct cgraph_node *node)
{
- return pointer_set_contains (encoder->body, node);
+ int index = lto_symtab_encoder_lookup (encoder, (symtab_node)node);
+ return VEC_index (lto_encoder_entry, encoder->nodes, index).body;
}
/* Return TRUE if we should encode body of NODE (if any). */
@@ -154,7 +177,10 @@ static void
lto_set_symtab_encoder_encode_body (lto_symtab_encoder_t encoder,
struct cgraph_node *node)
{
- pointer_set_insert (encoder->body, node);
+ int index = lto_symtab_encoder_encode (encoder, (symtab_node)node);
+ gcc_checking_assert (VEC_index (lto_encoder_entry, encoder->nodes,
+ index).node == (symtab_node)node);
+ VEC_index (lto_encoder_entry, encoder->nodes, index).body = true;
}
/* Return TRUE if we should encode initializer of NODE (if any). */
@@ -163,7 +189,10 @@ bool
lto_symtab_encoder_encode_initializer_p (lto_symtab_encoder_t encoder,
struct varpool_node *node)
{
- return pointer_set_contains (encoder->initializer, node);
+ int index = lto_symtab_encoder_lookup (encoder, (symtab_node)node);
+ if (index == LCC_NOT_FOUND)
+ return false;
+ return VEC_index (lto_encoder_entry, encoder->nodes, index).initializer;
}
/* Return TRUE if we should encode initializer of NODE (if any). */
@@ -172,7 +201,8 @@ static void
lto_set_symtab_encoder_encode_initializer (lto_symtab_encoder_t encoder,
struct varpool_node *node)
{
- pointer_set_insert (encoder->initializer, node);
+ int index = lto_symtab_encoder_lookup (encoder, (symtab_node)node);
+ VEC_index (lto_encoder_entry, encoder->nodes, index).initializer = true;
}
/* Return TRUE if we should encode initializer of NODE (if any). */
@@ -181,7 +211,10 @@ bool
lto_symtab_encoder_in_partition_p (lto_symtab_encoder_t encoder,
symtab_node node)
{
- return pointer_set_contains (encoder->in_partition, node);
+ int index = lto_symtab_encoder_lookup (encoder, (symtab_node)node);
+ if (index == LCC_NOT_FOUND)
+ return false;
+ return VEC_index (lto_encoder_entry, encoder->nodes, index).in_partition;
}
/* Return TRUE if we should encode body of NODE (if any). */
@@ -190,8 +223,8 @@ void
lto_set_symtab_encoder_in_partition (lto_symtab_encoder_t encoder,
symtab_node node)
{
- lto_symtab_encoder_encode (encoder, (symtab_node)node);
- pointer_set_insert (encoder->in_partition, node);
+ int index = lto_symtab_encoder_encode (encoder, (symtab_node)node);
+ VEC_index (lto_encoder_entry, encoder->nodes, index).in_partition = true;
}
/* Output the cgraph EDGE to OB using ENCODER. */
@@ -555,38 +588,6 @@ output_profile_summary (struct lto_simple_output_block *ob)
streamer_write_uhwi_stream (ob->main_stream, 0);
}
-/* Add NODE into encoder as well as nodes it is cloned from.
- Do it in a way so clones appear first. */
-
-static void
-add_node_to (lto_symtab_encoder_t encoder, struct cgraph_node *node,
- bool include_body)
-{
- if (node->clone_of)
- add_node_to (encoder, node->clone_of, include_body);
- else if (include_body)
- lto_set_symtab_encoder_encode_body (encoder, node);
- lto_symtab_encoder_encode (encoder, (symtab_node)node);
-}
-
-/* Add all references in LIST to encoders. */
-
-static void
-add_references (lto_symtab_encoder_t encoder,
- struct ipa_ref_list *list)
-{
- int i;
- struct ipa_ref *ref;
- for (i = 0; ipa_ref_list_reference_iterate (list, i, ref); i++)
- if (symtab_function_p (ref->referred))
- add_node_to (encoder, ipa_ref_node (ref), false);
- else
- {
- struct varpool_node *vnode = ipa_ref_varpool_node (ref);
- lto_symtab_encoder_encode (encoder, (symtab_node)vnode);
- }
-}
-
/* Output all callees or indirect outgoing edges. EDGE must be the first such
edge. */
@@ -641,32 +642,72 @@ output_refs (lto_symtab_encoder_t encoder)
lto_destroy_simple_output_block (ob);
}
-/* Find out all cgraph and varpool nodes we want to encode in current unit
- and insert them to encoders. */
-void
-compute_ltrans_boundary (struct lto_out_decl_state *state,
- cgraph_node_set set, varpool_node_set vset)
+/* Add NODE into encoder as well as nodes it is cloned from.
+ Do it in a way so clones appear first. */
+
+static void
+add_node_to (lto_symtab_encoder_t encoder, struct cgraph_node *node,
+ bool include_body)
+{
+ if (node->clone_of)
+ add_node_to (encoder, node->clone_of, include_body);
+ else if (include_body)
+ lto_set_symtab_encoder_encode_body (encoder, node);
+ lto_symtab_encoder_encode (encoder, (symtab_node)node);
+}
+
+/* Add all references in LIST to encoders. */
+
+static void
+add_references (lto_symtab_encoder_t encoder,
+ struct ipa_ref_list *list)
+{
+ int i;
+ struct ipa_ref *ref;
+ for (i = 0; ipa_ref_list_reference_iterate (list, i, ref); i++)
+ if (symtab_function_p (ref->referred))
+ add_node_to (encoder, ipa_ref_node (ref), false);
+ else
+ {
+ struct varpool_node *vnode = ipa_ref_varpool_node (ref);
+ lto_symtab_encoder_encode (encoder, (symtab_node)vnode);
+ }
+}
+
+/* Find all symbols we want to stream into given partition and insert them
+ to encoders.
+
+ The function actually replaces IN_ENCODER by new one. The reason is that
+ streaming code needs clone's origin to be streamed before clone. This
+ means that we need to insert the nodes in specific order. This order is
+ ignored by the partitioning logic earlier. */
+
+lto_symtab_encoder_t
+compute_ltrans_boundary (lto_symtab_encoder_t in_encoder)
{
struct cgraph_node *node;
- cgraph_node_set_iterator csi;
- varpool_node_set_iterator vsi;
struct cgraph_edge *edge;
int i;
lto_symtab_encoder_t encoder;
+ lto_symtab_encoder_iterator lsei;
- encoder = state->symtab_node_encoder = lto_symtab_encoder_new ();
+ encoder = lto_symtab_encoder_new ();
- /* Go over all the nodes in SET and assign references. */
- for (csi = csi_start (set); !csi_end_p (csi); csi_next (&csi))
+ /* Go over all entries in the IN_ENCODER and duplicate them to
+ ENCODER. At the same time insert masters of clones so
+ every master appears before clone. */
+ for (lsei = lsei_start_function_in_partition (in_encoder);
+ !lsei_end_p (lsei); lsei_next_function_in_partition (&lsei))
{
- node = csi_node (csi);
+ node = lsei_cgraph_node (lsei);
add_node_to (encoder, node, true);
lto_set_symtab_encoder_in_partition (encoder, (symtab_node)node);
add_references (encoder, &node->symbol.ref_list);
}
- for (vsi = vsi_start (vset); !vsi_end_p (vsi); vsi_next (&vsi))
+ for (lsei = lsei_start_variable_in_partition (in_encoder);
+ !lsei_end_p (lsei); lsei_next_variable_in_partition (&lsei))
{
- struct varpool_node *vnode = vsi_node (vsi);
+ struct varpool_node *vnode = lsei_varpool_node (lsei);
gcc_assert (!vnode->alias || vnode->alias_of);
lto_set_symtab_encoder_in_partition (encoder, (symtab_node)vnode);
lto_set_symtab_encoder_encode_initializer (encoder, vnode);
@@ -689,20 +730,19 @@ compute_ltrans_boundary (struct lto_out_decl_state *state,
lto_set_symtab_encoder_encode_initializer (encoder, vnode);
add_references (encoder, &vnode->symbol.ref_list);
}
- else if (vnode->alias || vnode->alias_of)
- add_references (encoder, &vnode->symbol.ref_list);
}
}
/* Go over all the nodes again to include callees that are not in
SET. */
- for (csi = csi_start (set); !csi_end_p (csi); csi_next (&csi))
+ for (lsei = lsei_start_function_in_partition (encoder);
+ !lsei_end_p (lsei); lsei_next_function_in_partition (&lsei))
{
- node = csi_node (csi);
+ node = lsei_cgraph_node (lsei);
for (edge = node->callees; edge; edge = edge->next_callee)
{
struct cgraph_node *callee = edge->callee;
- if (!cgraph_node_in_set_p (callee, set))
+ if (!lto_symtab_encoder_in_partition_p (encoder, (symtab_node)callee))
{
/* We should have moved all the inlines. */
gcc_assert (!callee->global.inlined_to);
@@ -710,6 +750,8 @@ compute_ltrans_boundary (struct lto_out_decl_state *state,
}
}
}
+ lto_symtab_encoder_delete (in_encoder);
+ return encoder;
}
/* Output the part of the symtab in SET and VSET. */
diff --git a/gcc/lto-streamer.c b/gcc/lto-streamer.c
index 7649a78052a..51c6658ec92 100644
--- a/gcc/lto-streamer.c
+++ b/gcc/lto-streamer.c
@@ -180,12 +180,10 @@ lto_get_section_name (int section_type, const char *name, struct lto_file_decl_d
/* Show various memory usage statistics related to LTO. */
void
-print_lto_report (void)
+print_lto_report (const char *s)
{
- const char *s = (flag_lto) ? "LTO" : (flag_wpa) ? "WPA" : "LTRANS";
unsigned i;
- fprintf (stderr, "%s statistics\n", s);
fprintf (stderr, "[%s] # of input files: "
HOST_WIDE_INT_PRINT_UNSIGNED "\n", s, lto_stats.num_input_files);
@@ -197,9 +195,6 @@ print_lto_report (void)
HOST_WIDE_INT_PRINT_UNSIGNED "\n", s,
lto_stats.num_function_bodies);
- fprintf (stderr, "[%s] ", s);
- print_gimple_types_stats ();
-
for (i = 0; i < NUM_TREE_CODES; i++)
if (lto_stats.num_trees[i])
fprintf (stderr, "[%s] # of '%s' objects read: "
@@ -228,9 +223,9 @@ print_lto_report (void)
HOST_WIDE_INT_PRINT_UNSIGNED "\n", s,
lto_stats.num_output_files);
- fprintf (stderr, "[%s] # of output cgraph nodes: "
+ fprintf (stderr, "[%s] # of output symtab nodes: "
HOST_WIDE_INT_PRINT_UNSIGNED "\n", s,
- lto_stats.num_output_cgraph_nodes);
+ lto_stats.num_output_symtab_nodes);
fprintf (stderr, "[%s] # callgraph partitions: "
HOST_WIDE_INT_PRINT_UNSIGNED "\n", s,
diff --git a/gcc/lto-streamer.h b/gcc/lto-streamer.h
index bed408aef0a..abeaa4b2417 100644
--- a/gcc/lto-streamer.h
+++ b/gcc/lto-streamer.h
@@ -407,7 +407,7 @@ struct lto_asm_header
struct lto_stats_d
{
unsigned HOST_WIDE_INT num_input_cgraph_nodes;
- unsigned HOST_WIDE_INT num_output_cgraph_nodes;
+ unsigned HOST_WIDE_INT num_output_symtab_nodes;
unsigned HOST_WIDE_INT num_input_files;
unsigned HOST_WIDE_INT num_output_files;
unsigned HOST_WIDE_INT num_cgraph_partitions;
@@ -420,21 +420,29 @@ struct lto_stats_d
unsigned HOST_WIDE_INT num_uncompressed_il_bytes;
};
+/* Entry of LTO symtab encoder. */
+typedef struct
+{
+ symtab_node node;
+ /* Is the node in this partition (i.e. ltrans of this partition will
+ be responsible for outputting it)? */
+ unsigned int in_partition:1;
+ /* Do we encode body in this partition? */
+ unsigned int body:1;
+ /* Do we encode initializer in this partition?
+ For example the readonly variable initializers are encoded to aid
+ constant folding even if they are not in the partition. */
+ unsigned int initializer:1;
+} lto_encoder_entry;
+
+DEF_VEC_O(lto_encoder_entry);
+DEF_VEC_ALLOC_O(lto_encoder_entry, heap);
+
/* Encoder data structure used to stream callgraph nodes. */
struct lto_symtab_encoder_d
{
- /* Map nodes to reference number. */
- struct pointer_map_t *map;
-
- /* Map reference number to node. */
- VEC(symtab_node,heap) *nodes;
-
- /* Map of nodes where we want to output body. */
- struct pointer_set_t *body;
- /* Map of nodes where we want to output initializer. */
- struct pointer_set_t *initializer;
- /* Map of nodes in this partition. */
- struct pointer_set_t *in_partition;
+ VEC(lto_encoder_entry,heap) *nodes;
+ pointer_map_t *map;
};
typedef struct lto_symtab_encoder_d *lto_symtab_encoder_t;
@@ -513,6 +521,18 @@ typedef struct lto_out_decl_state *lto_out_decl_state_ptr;
DEF_VEC_P(lto_out_decl_state_ptr);
DEF_VEC_ALLOC_P(lto_out_decl_state_ptr, heap);
+/* Compact representation of a index <-> resolution pair. Unpacked to an
+ vector later. */
+struct res_pair
+{
+ ld_plugin_symbol_resolution_t res;
+ unsigned index;
+};
+typedef struct res_pair res_pair;
+
+DEF_VEC_O(res_pair);
+DEF_VEC_ALLOC_O(res_pair, heap);
+
/* One of these is allocated for each object file that being compiled
by lto. This structure contains the tables that are needed by the
serialized functions and ipa passes to connect themselves to the
@@ -548,7 +568,8 @@ struct GTY(()) lto_file_decl_data
unsigned HOST_WIDE_INT id;
/* Symbol resolutions for this file */
- VEC(ld_plugin_symbol_resolution_t,heap) * GTY((skip)) resolutions;
+ VEC(res_pair, heap) * GTY((skip)) respairs;
+ unsigned max_index;
struct gcov_ctr_summary GTY((skip)) profile_info;
};
@@ -764,7 +785,7 @@ extern const char *lto_tag_name (enum LTO_tags);
extern bitmap lto_bitmap_alloc (void);
extern void lto_bitmap_free (bitmap);
extern char *lto_get_section_name (int, const char *, struct lto_file_decl_data *);
-extern void print_lto_report (void);
+extern void print_lto_report (const char *);
extern void lto_streamer_init (void);
extern bool gate_lto_out (void);
#ifdef LTO_STREAMER_DEBUG
@@ -812,10 +833,10 @@ void lto_output_location (struct output_block *, location_t);
/* In lto-cgraph.c */
-int lto_symtab_encoder_lookup (lto_symtab_encoder_t, symtab_node);
lto_symtab_encoder_t lto_symtab_encoder_new (void);
int lto_symtab_encoder_encode (lto_symtab_encoder_t, symtab_node);
void lto_symtab_encoder_delete (lto_symtab_encoder_t);
+bool lto_symtab_encoder_delete_node (lto_symtab_encoder_t, symtab_node);
bool lto_symtab_encoder_encode_body_p (lto_symtab_encoder_t,
struct cgraph_node *);
bool lto_symtab_encoder_in_partition_p (lto_symtab_encoder_t,
@@ -835,8 +856,7 @@ bool referenced_from_this_partition_p (struct ipa_ref_list *,
lto_symtab_encoder_t);
bool reachable_from_this_partition_p (struct cgraph_node *,
lto_symtab_encoder_t);
-void compute_ltrans_boundary (struct lto_out_decl_state *state,
- cgraph_node_set, varpool_node_set);
+lto_symtab_encoder_t compute_ltrans_boundary (lto_symtab_encoder_t encoder);
/* In lto-symtab.c. */
@@ -994,7 +1014,21 @@ emit_label_in_global_context_p (tree label)
static inline int
lto_symtab_encoder_size (lto_symtab_encoder_t encoder)
{
- return VEC_length (symtab_node, encoder->nodes);
+ return VEC_length (lto_encoder_entry, encoder->nodes);
+}
+
+/* Value used to represent failure of lto_symtab_encoder_lookup. */
+#define LCC_NOT_FOUND (-1)
+
+/* Look up NODE in encoder. Return NODE's reference if it has been encoded
+ or LCC_NOT_FOUND if it is not there. */
+
+static inline int
+lto_symtab_encoder_lookup (lto_symtab_encoder_t encoder,
+ symtab_node node)
+{
+ void **slot = pointer_map_contains (encoder->map, node);
+ return (slot && *slot ? (size_t) *(slot) - 1 : LCC_NOT_FOUND);
}
/* Return true if iterator LSE points to nothing. */
@@ -1015,26 +1049,26 @@ lsei_next (lto_symtab_encoder_iterator *lsei)
static inline symtab_node
lsei_node (lto_symtab_encoder_iterator lsei)
{
- return VEC_index (symtab_node, lsei.encoder->nodes, lsei.index);
+ return VEC_index (lto_encoder_entry,
+ lsei.encoder->nodes, lsei.index).node;
}
/* Return the node pointed to by LSI. */
static inline struct cgraph_node *
lsei_cgraph_node (lto_symtab_encoder_iterator lsei)
{
- return cgraph (VEC_index (symtab_node, lsei.encoder->nodes, lsei.index));
+ return cgraph (VEC_index (lto_encoder_entry,
+ lsei.encoder->nodes, lsei.index).node);
}
/* Return the node pointed to by LSI. */
static inline struct varpool_node *
lsei_varpool_node (lto_symtab_encoder_iterator lsei)
{
- return varpool (VEC_index (symtab_node, lsei.encoder->nodes, lsei.index));
+ return varpool (VEC_index (lto_encoder_entry,
+ lsei.encoder->nodes, lsei.index).node);
}
-/* Value used to represent failure of lto_symtab_encoder_lookup. */
-#define LCC_NOT_FOUND (-1)
-
/* Return the cgraph node corresponding to REF using ENCODER. */
static inline symtab_node
@@ -1043,7 +1077,7 @@ lto_symtab_encoder_deref (lto_symtab_encoder_t encoder, int ref)
if (ref == LCC_NOT_FOUND)
return NULL;
- return VEC_index (symtab_node, encoder->nodes, ref);
+ return VEC_index (lto_encoder_entry, encoder->nodes, ref).node;
}
/* Return an iterator to the first node in LSI. */
diff --git a/gcc/lto/ChangeLog b/gcc/lto/ChangeLog
index 4143c6af25d..85d59b23faf 100644
--- a/gcc/lto/ChangeLog
+++ b/gcc/lto/ChangeLog
@@ -1,3 +1,81 @@
+2012-09-11 Jan Hubicka <jh@suse.cz>
+
+ PR lto/54312
+ * lto.c (uniquify_nodes): Remove quadratic loop checking if the
+ type is variant leader.
+
+2012-09-11 Richard Guenther <rguenther@suse.de>
+
+ * lto.c (enum gtc_mode): Remove.
+ (struct type_pair_d): Adjust.
+ (lookup_type_pair): Likewise.
+ (gimple_type_leader): Do not mark as deletable.
+ (gimple_lookup_type_leader): Adjust.
+ (gtc_visit): Likewise.
+ (gimple_types_compatible_p_1): Likewise.
+ (gimple_types_compatible_p): Likewise.
+ (gimple_type_hash): Likewise.
+ (gimple_register_type): Likewise.
+ (read_cgraph_and_symbols): Manage lifetime of tables
+ here.
+
+2012-09-11 Richard Guenther <rguenther@suse.de>
+
+ * lto.c (gimple_types, type_hash_cache, enum gtc_mode,
+ struct type_pair_d, lookup_type_pair, struct sccs,
+ next_dfs_num, gtc_next_dfs_num, struct gimple_type_leader_entry_s,
+ gimple_type_leader, gimple_lookup_type_leader, compare_type_names_p,
+ gtc_visit, gimple_types_compatible_p_1, gimple_types_compatible_p,
+ visit, iterative_hash_name, struct type_hash_pair,
+ type_hash_pair_compare, iterative_hash_gimple_type, gimple_type_hash,
+ gimple_type_eq, gimple_register_type_1, gimple_register_type):
+ Move here from gimple.c
+ (read_cgraph_and_symbols): Free hash tables here.
+ (print_lto_report_1): New function wrapping print_lto_report.
+ (do_whole_program_analysis): Call it.
+ (lto_main): Likewise.
+
+2012-09-10 Jan Hubicka <jh@suse.cz>
+
+ * lto-partition.c (partition_symbol_p): Forward declare.
+ (add_references_to_partition): Reimplement using partition_symbol_p.
+ (add_aliases_to_partition): Break out from add_references_to_partition;
+ reimplement using partition_symbol_p.
+ (add_cgraph_node_to_partition_1): Handle callees using partition_symbol_p;
+ add sanity checks.
+ (add_varpool_node_to_partition): Use add_aliases_to_partition.
+ (partition_varpool_node_p): Do not special case aliases.
+
+2012-08-12 Jan Hubicka <jh@suse.cz>
+
+ * lto.c (lto_wpa_write_files): Do not delete partition encoder;
+ it is deleted after streaming.
+ * lto-partition.c (partition_symbol_p): New function.
+ (promote_var, promote_fn): Remove.
+ (promote_symbol): New function.
+ (lto_promote_cross_file_statics): First compute boundaries; rewrite
+ to lookup the actual boundaries instead of computing them ad-hoc.
+
+2012-08-12 Jan Hubicka <jh@suse.cz>
+
+ Replace cgraph_node_set and varpool_node_set by symtab_node_encoder
+ in partitioning.
+ * lto-partition.h (ltrans_partition_def): Replace cgraph_set and varpool_set
+ by encoder.
+ * lto-partition.c (new_partition): Update.
+ * lto.c (cmp_partitions_order): Update.
+ (lto_wpa_write_files): Update.
+ (free_ltrans_partitions): Update.
+ (add_references_to_partition): Update.
+ (add_cgraph_node_to_partition_1): Update.
+ (add_cgraph_node_to_partition): Update.
+ (add_varpool_node_to_partition): Update.
+ (undo_partition): Update.
+ (lto_balanced_map): Update.
+ (set_referenced_from_other_partition_p, set_reachable_from_other_partition_p,
+ set_referenced_from_this_partition_p): Update.
+ (lto_promote_cross_file_statics): Update.
+
2012-08-12 Jan Hubicka <jh@suse.cz>
* lto-partition.c (set_referenced_from_other_partition_p,
diff --git a/gcc/lto/lto-partition.c b/gcc/lto/lto-partition.c
index 37289b694ef..4775ee60340 100644
--- a/gcc/lto/lto-partition.c
+++ b/gcc/lto/lto-partition.c
@@ -35,14 +35,14 @@ VEC(ltrans_partition, heap) *ltrans_partitions;
static void add_cgraph_node_to_partition (ltrans_partition part, struct cgraph_node *node);
static void add_varpool_node_to_partition (ltrans_partition part, struct varpool_node *vnode);
+static bool partition_symbol_p (symtab_node node);
/* Create new partition with name NAME. */
static ltrans_partition
new_partition (const char *name)
{
ltrans_partition part = XCNEW (struct ltrans_partition_def);
- part->cgraph_set = cgraph_node_set_new ();
- part->varpool_set = varpool_node_set_new ();
+ part->encoder = lto_symtab_encoder_new ();
part->name = name;
part->insns = 0;
VEC_safe_push (ltrans_partition, heap, ltrans_partitions, part);
@@ -57,14 +57,14 @@ free_ltrans_partitions (void)
ltrans_partition part;
for (idx = 0; VEC_iterate (ltrans_partition, ltrans_partitions, idx, part); idx++)
{
- free_cgraph_node_set (part->cgraph_set);
+ /* Symtab encoder is freed after streaming. */
free (part);
}
VEC_free (ltrans_partition, heap, ltrans_partitions);
}
-/* See all references that go to comdat objects and bring them into partition too.
- Also see all aliases of the newly added entry and bring them, too. */
+/* Add all referenced symbols referenced by REFS that are not external and not
+ partitioned into PART. */
static void
add_references_to_partition (ltrans_partition part, struct ipa_ref_list *refs)
{
@@ -72,47 +72,38 @@ add_references_to_partition (ltrans_partition part, struct ipa_ref_list *refs)
struct ipa_ref *ref;
for (i = 0; ipa_ref_list_reference_iterate (refs, i, ref); i++)
{
- if (symtab_function_p (ref->referred)
- && (DECL_COMDAT (cgraph_function_node (ipa_ref_node (ref),
- NULL)->symbol.decl)
- || (ref->use == IPA_REF_ALIAS
- && lookup_attribute
- ("weakref", DECL_ATTRIBUTES (ipa_ref_node (ref)->symbol.decl))))
- && !cgraph_node_in_set_p (ipa_ref_node (ref), part->cgraph_set))
+ if (DECL_EXTERNAL (ref->referred->symbol.decl)
+ || partition_symbol_p (ref->referred)
+ || lto_symtab_encoder_in_partition_p (part->encoder, ref->referred))
+ continue;
+ if (symtab_function_p (ref->referred))
add_cgraph_node_to_partition (part, ipa_ref_node (ref));
else
- if (symtab_variable_p (ref->referred)
- && (DECL_COMDAT (ipa_ref_varpool_node (ref)->symbol.decl)
- || DECL_EXTERNAL (ipa_ref_varpool_node (ref)->symbol.decl)
- || (ref->use == IPA_REF_ALIAS
- && lookup_attribute
- ("weakref",
- DECL_ATTRIBUTES (ipa_ref_varpool_node (ref)->symbol.decl))))
- && !varpool_node_in_set_p (ipa_ref_varpool_node (ref),
- part->varpool_set))
- add_varpool_node_to_partition (part, ipa_ref_varpool_node (ref));
+ add_varpool_node_to_partition (part, ipa_ref_varpool_node (ref));
}
+}
+
+/* Look for all (nonweakref) aliases in REFS and add them into PART. */
+static void
+add_aliases_to_partition (ltrans_partition part, struct ipa_ref_list *refs)
+{
+ int i;
+ struct ipa_ref *ref;
+
for (i = 0; ipa_ref_list_referring_iterate (refs, i, ref); i++)
- {
- if (symtab_function_p (ref->referring)
- && ref->use == IPA_REF_ALIAS
- && !cgraph_node_in_set_p (ipa_ref_referring_node (ref),
- part->cgraph_set)
- && !lookup_attribute ("weakref",
- DECL_ATTRIBUTES
- (ipa_ref_referring_node (ref)->symbol.decl)))
- add_cgraph_node_to_partition (part, ipa_ref_referring_node (ref));
- else
- if (symtab_variable_p (ref->referring)
- && ref->use == IPA_REF_ALIAS
- && !varpool_node_in_set_p (ipa_ref_referring_varpool_node (ref),
- part->varpool_set)
- && !lookup_attribute ("weakref",
- DECL_ATTRIBUTES
- (ipa_ref_referring_varpool_node (ref)->symbol.decl)))
+ if (ref->use == IPA_REF_ALIAS
+ && !lto_symtab_encoder_in_partition_p (part->encoder,
+ ref->referring)
+ && !lookup_attribute ("weakref",
+ DECL_ATTRIBUTES
+ (ref->referring->symbol.decl)))
+ {
+ if (symtab_function_p (ref->referring))
+ add_cgraph_node_to_partition (part, ipa_ref_referring_node (ref));
+ else
add_varpool_node_to_partition (part,
ipa_ref_referring_varpool_node (ref));
- }
+ }
}
/* Worker for add_cgraph_node_to_partition. */
@@ -122,6 +113,9 @@ add_cgraph_node_to_partition_1 (struct cgraph_node *node, void *data)
{
ltrans_partition part = (ltrans_partition) data;
+ if (lto_symtab_encoder_in_partition_p (part->encoder, (symtab_node) node))
+ return false;
+
/* non-COMDAT aliases of COMDAT functions needs to be output just once. */
if (!DECL_COMDAT (node->symbol.decl)
&& !node->global.inlined_to
@@ -139,7 +133,7 @@ add_cgraph_node_to_partition_1 (struct cgraph_node *node, void *data)
cgraph_node_name (node), node->uid);
}
node->symbol.aux = (void *)((size_t)node->symbol.aux + 1);
- cgraph_node_set_add (part->cgraph_set, node);
+ lto_set_symtab_encoder_in_partition (part->encoder, (symtab_node)node);
return false;
}
@@ -149,25 +143,20 @@ static void
add_cgraph_node_to_partition (ltrans_partition part, struct cgraph_node *node)
{
struct cgraph_edge *e;
- cgraph_node_set_iterator csi;
struct cgraph_node *n;
/* If NODE is already there, we have nothing to do. */
- csi = cgraph_node_set_find (part->cgraph_set, node);
- if (!csi_end_p (csi))
+ if (lto_symtab_encoder_in_partition_p (part->encoder, (symtab_node) node))
return;
cgraph_for_node_thunks_and_aliases (node, add_cgraph_node_to_partition_1, part, true);
part->insns += inline_summary (node)->self_size;
-
- cgraph_node_set_add (part->cgraph_set, node);
-
for (e = node->callees; e; e = e->next_callee)
if ((!e->inline_failed
- || DECL_COMDAT (cgraph_function_node (e->callee, NULL)->symbol.decl))
- && !cgraph_node_in_set_p (e->callee, part->cgraph_set))
+ || (!DECL_EXTERNAL (e->callee->symbol.decl)
+ && !partition_symbol_p ((symtab_node) e->callee))))
add_cgraph_node_to_partition (part, e->callee);
/* The only way to assemble non-weakref alias is to add the aliased object into
@@ -190,15 +179,13 @@ add_cgraph_node_to_partition (ltrans_partition part, struct cgraph_node *node)
static void
add_varpool_node_to_partition (ltrans_partition part, struct varpool_node *vnode)
{
- varpool_node_set_iterator vsi;
struct varpool_node *v;
/* If NODE is already there, we have nothing to do. */
- vsi = varpool_node_set_find (part->varpool_set, vnode);
- if (!vsi_end_p (vsi))
+ if (lto_symtab_encoder_in_partition_p (part->encoder, (symtab_node) vnode))
return;
- varpool_node_set_add (part->varpool_set, vnode);
+ lto_set_symtab_encoder_in_partition (part->encoder, (symtab_node) vnode);
if (vnode->symbol.aux)
{
@@ -218,10 +205,11 @@ add_varpool_node_to_partition (ltrans_partition part, struct varpool_node *vnode
add_varpool_node_to_partition (part, v);
add_references_to_partition (part, &vnode->symbol.ref_list);
+ add_aliases_to_partition (part, &vnode->symbol.ref_list);
if (vnode->symbol.same_comdat_group
- && !varpool_node_in_set_p (varpool (vnode->symbol.same_comdat_group),
- part->varpool_set))
+ && !lto_symtab_encoder_in_partition_p (part->encoder,
+ vnode->symbol.same_comdat_group))
add_varpool_node_to_partition (part, varpool (vnode->symbol.same_comdat_group));
}
@@ -229,26 +217,15 @@ add_varpool_node_to_partition (ltrans_partition part, struct varpool_node *vnode
and number of varpool nodes is N_VARPOOL_NODES. */
static void
-undo_partition (ltrans_partition partition, unsigned int n_cgraph_nodes,
- unsigned int n_varpool_nodes)
+undo_partition (ltrans_partition partition, unsigned int n_nodes)
{
- while (VEC_length (cgraph_node_ptr, partition->cgraph_set->nodes) >
- n_cgraph_nodes)
+ while (lto_symtab_encoder_size (partition->encoder) > (int)n_nodes)
{
- struct cgraph_node *node = VEC_index (cgraph_node_ptr,
- partition->cgraph_set->nodes,
- n_cgraph_nodes);
- partition->insns -= inline_summary (node)->self_size;
- cgraph_node_set_remove (partition->cgraph_set, node);
- node->symbol.aux = (void *)((size_t)node->symbol.aux - 1);
- }
- while (VEC_length (varpool_node_ptr, partition->varpool_set->nodes) >
- n_varpool_nodes)
- {
- struct varpool_node *node = VEC_index (varpool_node_ptr,
- partition->varpool_set->nodes,
- n_varpool_nodes);
- varpool_node_set_remove (partition->varpool_set, node);
+ symtab_node node = lto_symtab_encoder_deref (partition->encoder,
+ n_nodes);
+ if (symtab_function_p (node))
+ partition->insns -= inline_summary (cgraph (node))->self_size;
+ lto_symtab_encoder_delete_node (partition->encoder, node);
node->symbol.aux = (void *)((size_t)node->symbol.aux - 1);
}
}
@@ -284,7 +261,7 @@ partition_cgraph_node_p (struct cgraph_node *node)
static bool
partition_varpool_node_p (struct varpool_node *vnode)
{
- if (vnode->alias || !vnode->analyzed)
+ if (!vnode->analyzed)
return false;
/* Constant pool and comdat are always only in partitions they are needed. */
if (DECL_IN_CONSTANT_POOL (vnode->symbol.decl)
@@ -298,6 +275,18 @@ partition_varpool_node_p (struct varpool_node *vnode)
return true;
}
+/* Return true if NODE should be partitioned.
+ This means that partitioning algorithm should put NODE into one of partitions. */
+
+static bool
+partition_symbol_p (symtab_node node)
+{
+ if (symtab_function_p (node))
+ return partition_cgraph_node_p (cgraph (node));
+ else
+ return partition_varpool_node_p (varpool (node));
+}
+
/* Group cgrah nodes by input files. This is used mainly for testing
right now. */
@@ -458,10 +447,10 @@ lto_balanced_map (void)
int total_size = 0, best_total_size = 0;
int partition_size;
ltrans_partition partition;
- unsigned int last_visited_cgraph_node = 0, last_visited_varpool_node = 0;
+ int last_visited_node = 0;
struct varpool_node *vnode;
int cost = 0, internal = 0;
- int best_n_nodes = 0, best_n_varpool_nodes = 0, best_i = 0, best_cost =
+ int best_n_nodes = 0, best_i = 0, best_cost =
INT_MAX, best_internal = 0;
int npartitions;
int current_order = -1;
@@ -545,28 +534,22 @@ lto_balanced_map (void)
callgraph or IPA reference edge leaving the partition contributes into
COST. Every edge inside partition was earlier computed as one leaving
it and thus we need to subtract it from COST. */
- while (last_visited_cgraph_node <
- VEC_length (cgraph_node_ptr, partition->cgraph_set->nodes)
- || last_visited_varpool_node < VEC_length (varpool_node_ptr,
- partition->varpool_set->
- nodes))
+ while (last_visited_node < lto_symtab_encoder_size (partition->encoder))
{
struct ipa_ref_list *refs;
int j;
struct ipa_ref *ref;
- bool cgraph_p = false;
+ symtab_node snode = lto_symtab_encoder_deref (partition->encoder,
+ last_visited_node);
- if (last_visited_cgraph_node <
- VEC_length (cgraph_node_ptr, partition->cgraph_set->nodes))
+ if (symtab_function_p (snode))
{
struct cgraph_edge *edge;
- cgraph_p = true;
- node = VEC_index (cgraph_node_ptr, partition->cgraph_set->nodes,
- last_visited_cgraph_node);
+ node = cgraph (snode);
refs = &node->symbol.ref_list;
- last_visited_cgraph_node++;
+ last_visited_node++;
gcc_assert (node->analyzed);
@@ -575,30 +558,32 @@ lto_balanced_map (void)
if (edge->callee->analyzed)
{
int edge_cost = edge->frequency;
- cgraph_node_set_iterator csi;
+ int index;
if (!edge_cost)
edge_cost = 1;
gcc_assert (edge_cost > 0);
- csi = cgraph_node_set_find (partition->cgraph_set, edge->callee);
- if (!csi_end_p (csi)
- && csi.index < last_visited_cgraph_node - 1)
- cost -= edge_cost, internal+= edge_cost;
+ index = lto_symtab_encoder_lookup (partition->encoder,
+ (symtab_node)edge->callee);
+ if (index != LCC_NOT_FOUND
+ && index < last_visited_node - 1)
+ cost -= edge_cost, internal += edge_cost;
else
cost += edge_cost;
}
for (edge = node->callers; edge; edge = edge->next_caller)
{
int edge_cost = edge->frequency;
- cgraph_node_set_iterator csi;
+ int index;
gcc_assert (edge->caller->analyzed);
if (!edge_cost)
edge_cost = 1;
gcc_assert (edge_cost > 0);
- csi = cgraph_node_set_find (partition->cgraph_set, edge->caller);
- if (!csi_end_p (csi)
- && csi.index < last_visited_cgraph_node)
+ index = lto_symtab_encoder_lookup (partition->encoder,
+ (symtab_node)edge->caller);
+ if (index != LCC_NOT_FOUND
+ && index < last_visited_node - 1)
cost -= edge_cost;
else
cost += edge_cost;
@@ -606,10 +591,8 @@ lto_balanced_map (void)
}
else
{
- refs =
- &VEC_index (varpool_node_ptr, partition->varpool_set->nodes,
- last_visited_varpool_node)->symbol.ref_list;
- last_visited_varpool_node++;
+ refs = &snode->symbol.ref_list;
+ last_visited_node++;
}
/* Compute boundary cost of IPA REF edges and at the same time look into
@@ -617,7 +600,7 @@ lto_balanced_map (void)
for (j = 0; ipa_ref_list_reference_iterate (refs, j, ref); j++)
if (symtab_variable_p (ref->referred))
{
- varpool_node_set_iterator vsi;
+ int index;
vnode = ipa_ref_varpool_node (ref);
if (!vnode->finalized)
@@ -625,23 +608,25 @@ lto_balanced_map (void)
if (!vnode->symbol.aux && flag_toplevel_reorder
&& partition_varpool_node_p (vnode))
add_varpool_node_to_partition (partition, vnode);
- vsi = varpool_node_set_find (partition->varpool_set, vnode);
- if (!vsi_end_p (vsi)
- && vsi.index < last_visited_varpool_node - !cgraph_p)
+ index = lto_symtab_encoder_lookup (partition->encoder,
+ (symtab_node)vnode);
+ if (index != LCC_NOT_FOUND
+ && index < last_visited_node - 1)
cost--, internal++;
else
cost++;
}
else
{
- cgraph_node_set_iterator csi;
+ int index;
node = ipa_ref_node (ref);
if (!node->analyzed)
continue;
- csi = cgraph_node_set_find (partition->cgraph_set, node);
- if (!csi_end_p (csi)
- && csi.index < last_visited_cgraph_node - cgraph_p)
+ index = lto_symtab_encoder_lookup (partition->encoder,
+ (symtab_node)node);
+ if (index != LCC_NOT_FOUND
+ && index < last_visited_node - 1)
cost--, internal++;
else
cost++;
@@ -649,29 +634,31 @@ lto_balanced_map (void)
for (j = 0; ipa_ref_list_referring_iterate (refs, j, ref); j++)
if (symtab_variable_p (ref->referring))
{
- varpool_node_set_iterator vsi;
+ int index;
vnode = ipa_ref_referring_varpool_node (ref);
gcc_assert (vnode->finalized);
if (!vnode->symbol.aux && flag_toplevel_reorder
&& partition_varpool_node_p (vnode))
add_varpool_node_to_partition (partition, vnode);
- vsi = varpool_node_set_find (partition->varpool_set, vnode);
- if (!vsi_end_p (vsi)
- && vsi.index < last_visited_varpool_node)
+ index = lto_symtab_encoder_lookup (partition->encoder,
+ (symtab_node)vnode);
+ if (index != LCC_NOT_FOUND
+ && index < last_visited_node - 1)
cost--;
else
cost++;
}
else
{
- cgraph_node_set_iterator csi;
+ int index;
node = ipa_ref_referring_node (ref);
gcc_assert (node->analyzed);
- csi = cgraph_node_set_find (partition->cgraph_set, node);
- if (!csi_end_p (csi)
- && csi.index < last_visited_cgraph_node)
+ index = lto_symtab_encoder_lookup (partition->encoder,
+ (symtab_node)node);
+ if (index != LCC_NOT_FOUND
+ && index < last_visited_node - 1)
cost--;
else
cost++;
@@ -689,10 +676,7 @@ lto_balanced_map (void)
best_cost = cost;
best_internal = internal;
best_i = i;
- best_n_nodes = VEC_length (cgraph_node_ptr,
- partition->cgraph_set->nodes);
- best_n_varpool_nodes = VEC_length (varpool_node_ptr,
- partition->varpool_set->nodes);
+ best_n_nodes = lto_symtab_encoder_size (partition->encoder);
best_total_size = total_size;
}
if (cgraph_dump_file)
@@ -708,7 +692,7 @@ lto_balanced_map (void)
if (cgraph_dump_file)
fprintf (cgraph_dump_file, "Unwinding %i insertions to step %i\n",
i - best_i, best_i);
- undo_partition (partition, best_n_nodes, best_n_varpool_nodes);
+ undo_partition (partition, best_n_nodes);
}
i = best_i;
/* When we are finished, avoid creating empty partition. */
@@ -717,15 +701,13 @@ lto_balanced_map (void)
if (i == n_nodes - 1)
break;
partition = new_partition ("");
- last_visited_cgraph_node = 0;
- last_visited_varpool_node = 0;
+ last_visited_node = 0;
total_size = best_total_size;
cost = 0;
if (cgraph_dump_file)
fprintf (cgraph_dump_file, "New partition\n");
best_n_nodes = 0;
- best_n_varpool_nodes = 0;
best_cost = INT_MAX;
/* Since the size of partitions is just approximate, update the size after
@@ -764,112 +746,26 @@ lto_balanced_map (void)
/* Promote variable VNODE to be static. */
-static bool
-promote_var (struct varpool_node *vnode)
+static void
+promote_symbol (symtab_node node)
{
- if (TREE_PUBLIC (vnode->symbol.decl) || DECL_EXTERNAL (vnode->symbol.decl))
- return false;
- gcc_assert (flag_wpa);
- TREE_PUBLIC (vnode->symbol.decl) = 1;
- DECL_VISIBILITY (vnode->symbol.decl) = VISIBILITY_HIDDEN;
- DECL_VISIBILITY_SPECIFIED (vnode->symbol.decl) = true;
- if (cgraph_dump_file)
- fprintf (cgraph_dump_file,
- "Promoting var as hidden: %s\n", varpool_node_name (vnode));
- return true;
-}
-
-/* Promote function NODE to be static. */
+ /* We already promoted ... */
+ if (DECL_VISIBILITY (node->symbol.decl) == VISIBILITY_HIDDEN
+ && DECL_VISIBILITY_SPECIFIED (node->symbol.decl)
+ && TREE_PUBLIC (node->symbol.decl))
+ return;
-static bool
-promote_fn (struct cgraph_node *node)
-{
- gcc_assert (flag_wpa);
- if (TREE_PUBLIC (node->symbol.decl) || DECL_EXTERNAL (node->symbol.decl))
- return false;
+ gcc_checking_assert (!TREE_PUBLIC (node->symbol.decl)
+ && !DECL_EXTERNAL (node->symbol.decl));
TREE_PUBLIC (node->symbol.decl) = 1;
DECL_VISIBILITY (node->symbol.decl) = VISIBILITY_HIDDEN;
DECL_VISIBILITY_SPECIFIED (node->symbol.decl) = true;
if (cgraph_dump_file)
fprintf (cgraph_dump_file,
- "Promoting function as hidden: %s/%i\n",
- cgraph_node_name (node), node->uid);
- return true;
-}
-
-/* Return if LIST contain references from other partitions.
- TODO: remove this once lto partitioning is using encoders. */
-
-static bool
-set_referenced_from_other_partition_p (struct ipa_ref_list *list, cgraph_node_set set,
- varpool_node_set vset)
-{
- int i;
- struct ipa_ref *ref;
- for (i = 0; ipa_ref_list_referring_iterate (list, i, ref); i++)
- {
- if (symtab_function_p (ref->referring))
- {
- if (ipa_ref_referring_node (ref)->symbol.in_other_partition
- || !cgraph_node_in_set_p (ipa_ref_referring_node (ref), set))
- return true;
- }
- else
- {
- if (ipa_ref_referring_varpool_node (ref)->symbol.in_other_partition
- || !varpool_node_in_set_p (ipa_ref_referring_varpool_node (ref),
- vset))
- return true;
- }
- }
- return false;
-}
-
-/* Return true when node is reachable from other partition.
- TODO: remove this once lto partitioning is using encoders. */
-
-static bool
-set_reachable_from_other_partition_p (struct cgraph_node *node, cgraph_node_set set)
-{
- struct cgraph_edge *e;
- if (!node->analyzed)
- return false;
- if (node->global.inlined_to)
- return false;
- for (e = node->callers; e; e = e->next_caller)
- if (e->caller->symbol.in_other_partition
- || !cgraph_node_in_set_p (e->caller, set))
- return true;
- return false;
+ "Promoting as hidden: %s\n", symtab_node_name (node));
}
-/* Return if LIST contain references from other partitions.
- TODO: remove this once lto partitioning is using encoders. */
-
-static bool
-set_referenced_from_this_partition_p (struct ipa_ref_list *list, cgraph_node_set set,
- varpool_node_set vset)
-{
- int i;
- struct ipa_ref *ref;
- for (i = 0; ipa_ref_list_referring_iterate (list, i, ref); i++)
- {
- if (symtab_function_p (ref->referring))
- {
- if (cgraph_node_in_set_p (ipa_ref_referring_node (ref), set))
- return true;
- }
- else
- {
- if (varpool_node_in_set_p (ipa_ref_referring_varpool_node (ref),
- vset))
- return true;
- }
- }
- return false;
-}
-
/* Find out all static decls that need to be promoted to global because
of cross file sharing. This function must be run in the WPA mode after
all inlinees are added. */
@@ -877,120 +773,43 @@ set_referenced_from_this_partition_p (struct ipa_ref_list *list, cgraph_node_set
void
lto_promote_cross_file_statics (void)
{
- struct varpool_node *vnode;
unsigned i, n_sets;
- cgraph_node_set set;
- varpool_node_set vset;
- cgraph_node_set_iterator csi;
- varpool_node_set_iterator vsi;
- VEC(varpool_node_ptr, heap) *promoted_initializers = NULL;
- struct pointer_set_t *inserted = pointer_set_create ();
gcc_assert (flag_wpa);
+ /* First compute boundaries. */
n_sets = VEC_length (ltrans_partition, ltrans_partitions);
for (i = 0; i < n_sets; i++)
{
ltrans_partition part
= VEC_index (ltrans_partition, ltrans_partitions, i);
- set = part->cgraph_set;
- vset = part->varpool_set;
-
- /* If node called or referred to from other partition, it needs to be
- globalized. */
- for (csi = csi_start (set); !csi_end_p (csi); csi_next (&csi))
- {
- struct cgraph_node *node = csi_node (csi);
- if (node->symbol.externally_visible)
- continue;
- if (node->global.inlined_to)
- continue;
- if ((!DECL_EXTERNAL (node->symbol.decl)
- && !DECL_COMDAT (node->symbol.decl))
- && (set_referenced_from_other_partition_p (&node->symbol.ref_list, set, vset)
- || set_reachable_from_other_partition_p (node, set)))
- promote_fn (node);
- }
- for (vsi = vsi_start (vset); !vsi_end_p (vsi); vsi_next (&vsi))
- {
- vnode = vsi_node (vsi);
- /* Constant pool references use internal labels and thus can not
- be made global. It is sensible to keep those ltrans local to
- allow better optimization. */
- if (!DECL_IN_CONSTANT_POOL (vnode->symbol.decl)
- && !DECL_EXTERNAL (vnode->symbol.decl)
- && !DECL_COMDAT (vnode->symbol.decl)
- && !vnode->symbol.externally_visible && vnode->analyzed
- && set_referenced_from_other_partition_p (&vnode->symbol.ref_list,
- set, vset))
- promote_var (vnode);
- }
+ part->encoder = compute_ltrans_boundary (part->encoder);
+ }
- /* We export the initializer of a read-only var into each partition
- referencing the var. Folding might take declarations from the
- initializer and use them, so everything referenced from the
- initializer can be accessed from this partition after folding.
+ /* Look at boundaries and promote symbols as needed. */
+ for (i = 0; i < n_sets; i++)
+ {
+ lto_symtab_encoder_iterator lsei;
+ lto_symtab_encoder_t encoder;
+ ltrans_partition part
+ = VEC_index (ltrans_partition, ltrans_partitions, i);
- This means that we need to promote all variables and functions
- referenced from all initializers of read-only vars referenced
- from this partition that are not in this partition. This needs
- to be done recursively. */
- FOR_EACH_VARIABLE (vnode)
- if (const_value_known_p (vnode->symbol.decl)
- && DECL_INITIAL (vnode->symbol.decl)
- && !varpool_node_in_set_p (vnode, vset)
- && set_referenced_from_this_partition_p (&vnode->symbol.ref_list, set, vset)
- && !pointer_set_insert (inserted, vnode))
- VEC_safe_push (varpool_node_ptr, heap, promoted_initializers, vnode);
-
- while (!VEC_empty (varpool_node_ptr, promoted_initializers))
- {
- int i;
- struct ipa_ref *ref;
+ encoder = part->encoder;
+ for (lsei = lsei_start (encoder); !lsei_end_p (lsei);
+ lsei_next (&lsei))
+ {
+ symtab_node node = lsei_node (lsei);
+
+ /* No need to promote if symbol already is externally visible ... */
+ if (node->symbol.externally_visible
+ /* ... or if it is part of current partition ... */
+ || lto_symtab_encoder_in_partition_p (encoder, node)
+ /* ... or if we do not partition it. This mean that it will
+ appear in every partition refernecing it. */
+ || !partition_symbol_p (node))
+ continue;
- vnode = VEC_pop (varpool_node_ptr, promoted_initializers);
- for (i = 0;
- ipa_ref_list_reference_iterate (&vnode->symbol.ref_list, i, ref);
- i++)
- {
- if (symtab_function_p (ref->referred))
- {
- struct cgraph_node *n = ipa_ref_node (ref);
- gcc_assert (!n->global.inlined_to);
- if (!n->symbol.externally_visible
- && !cgraph_node_in_set_p (n, set))
- promote_fn (n);
- }
- else
- {
- struct varpool_node *v = ipa_ref_varpool_node (ref);
- if (varpool_node_in_set_p (v, vset))
- continue;
-
- /* Constant pool references use internal labels and thus
- cannot be made global. It is sensible to keep those
- ltrans local to allow better optimization.
- Similarly we ship external vars initializers into
- every ltrans unit possibly referring to it. */
- if (DECL_IN_CONSTANT_POOL (v->symbol.decl)
- || DECL_EXTERNAL (v->symbol.decl))
- {
- if (!pointer_set_insert (inserted, vnode))
- VEC_safe_push (varpool_node_ptr, heap,
- promoted_initializers, v);
- }
- else if (!v->symbol.externally_visible && v->analyzed)
- {
- if (promote_var (v)
- && DECL_INITIAL (v->symbol.decl)
- && const_value_known_p (v->symbol.decl)
- && !pointer_set_insert (inserted, vnode))
- VEC_safe_push (varpool_node_ptr, heap,
- promoted_initializers, v);
- }
- }
- }
- }
+ promote_symbol (node);
+ }
}
- pointer_set_destroy (inserted);
}
diff --git a/gcc/lto/lto-partition.h b/gcc/lto/lto-partition.h
index 2160274dda6..e044934e5e7 100644
--- a/gcc/lto/lto-partition.h
+++ b/gcc/lto/lto-partition.h
@@ -22,8 +22,7 @@ along with GCC; see the file COPYING3. If not see
struct ltrans_partition_def
{
- cgraph_node_set cgraph_set;
- varpool_node_set varpool_set;
+ lto_symtab_encoder_t encoder;
const char * name;
int insns;
};
diff --git a/gcc/lto/lto.c b/gcc/lto/lto.c
index bd91c391fd1..c87ad6cfb06 100644
--- a/gcc/lto/lto.c
+++ b/gcc/lto/lto.c
@@ -276,6 +276,1115 @@ lto_read_in_decl_state (struct data_in *data_in, const uint32_t *data,
return data;
}
+
+
+/* Global type table. FIXME, it should be possible to re-use some
+ of the type hashing routines in tree.c (type_hash_canon, type_hash_lookup,
+ etc), but those assume that types were built with the various
+ build_*_type routines which is not the case with the streamer. */
+static GTY((if_marked ("ggc_marked_p"), param_is (union tree_node)))
+ htab_t gimple_types;
+static GTY((if_marked ("tree_int_map_marked_p"), param_is (struct tree_int_map)))
+ htab_t type_hash_cache;
+
+static hashval_t gimple_type_hash (const void *);
+
+/* Structure used to maintain a cache of some type pairs compared by
+ gimple_types_compatible_p when comparing aggregate types. There are
+ three possible values for SAME_P:
+
+ -2: The pair (T1, T2) has just been inserted in the table.
+ 0: T1 and T2 are different types.
+ 1: T1 and T2 are the same type. */
+
+struct type_pair_d
+{
+ unsigned int uid1;
+ unsigned int uid2;
+ signed char same_p;
+};
+typedef struct type_pair_d *type_pair_t;
+DEF_VEC_P(type_pair_t);
+DEF_VEC_ALLOC_P(type_pair_t,heap);
+
+#define GIMPLE_TYPE_PAIR_SIZE 16381
+struct type_pair_d *type_pair_cache;
+
+
+/* Lookup the pair of types T1 and T2 in *VISITED_P. Insert a new
+ entry if none existed. */
+
+static inline type_pair_t
+lookup_type_pair (tree t1, tree t2)
+{
+ unsigned int index;
+ unsigned int uid1, uid2;
+
+ if (TYPE_UID (t1) < TYPE_UID (t2))
+ {
+ uid1 = TYPE_UID (t1);
+ uid2 = TYPE_UID (t2);
+ }
+ else
+ {
+ uid1 = TYPE_UID (t2);
+ uid2 = TYPE_UID (t1);
+ }
+ gcc_checking_assert (uid1 != uid2);
+
+ /* iterative_hash_hashval_t imply an function calls.
+ We know that UIDS are in limited range. */
+ index = ((((unsigned HOST_WIDE_INT)uid1 << HOST_BITS_PER_WIDE_INT / 2) + uid2)
+ % GIMPLE_TYPE_PAIR_SIZE);
+ if (type_pair_cache [index].uid1 == uid1
+ && type_pair_cache [index].uid2 == uid2)
+ return &type_pair_cache[index];
+
+ type_pair_cache [index].uid1 = uid1;
+ type_pair_cache [index].uid2 = uid2;
+ type_pair_cache [index].same_p = -2;
+
+ return &type_pair_cache[index];
+}
+
+/* Per pointer state for the SCC finding. The on_sccstack flag
+ is not strictly required, it is true when there is no hash value
+ recorded for the type and false otherwise. But querying that
+ is slower. */
+
+struct sccs
+{
+ unsigned int dfsnum;
+ unsigned int low;
+ bool on_sccstack;
+ union {
+ hashval_t hash;
+ signed char same_p;
+ } u;
+};
+
+static unsigned int next_dfs_num;
+static unsigned int gtc_next_dfs_num;
+
+/* GIMPLE type merging cache. A direct-mapped cache based on TYPE_UID. */
+
+typedef struct GTY(()) gimple_type_leader_entry_s {
+ tree type;
+ tree leader;
+} gimple_type_leader_entry;
+
+#define GIMPLE_TYPE_LEADER_SIZE 16381
+static GTY((length("GIMPLE_TYPE_LEADER_SIZE")))
+ gimple_type_leader_entry *gimple_type_leader;
+
+/* Lookup an existing leader for T and return it or NULL_TREE, if
+ there is none in the cache. */
+
+static inline tree
+gimple_lookup_type_leader (tree t)
+{
+ gimple_type_leader_entry *leader;
+
+ leader = &gimple_type_leader[TYPE_UID (t) % GIMPLE_TYPE_LEADER_SIZE];
+ if (leader->type != t)
+ return NULL_TREE;
+
+ return leader->leader;
+}
+
+
+/* Return true if T1 and T2 have the same name. If FOR_COMPLETION_P is
+ true then if any type has no name return false, otherwise return
+ true if both types have no names. */
+
+static bool
+compare_type_names_p (tree t1, tree t2)
+{
+ tree name1 = TYPE_NAME (t1);
+ tree name2 = TYPE_NAME (t2);
+
+ if ((name1 != NULL_TREE) != (name2 != NULL_TREE))
+ return false;
+
+ if (name1 == NULL_TREE)
+ return true;
+
+ /* Either both should be a TYPE_DECL or both an IDENTIFIER_NODE. */
+ if (TREE_CODE (name1) != TREE_CODE (name2))
+ return false;
+
+ if (TREE_CODE (name1) == TYPE_DECL)
+ name1 = DECL_NAME (name1);
+ gcc_checking_assert (!name1 || TREE_CODE (name1) == IDENTIFIER_NODE);
+
+ if (TREE_CODE (name2) == TYPE_DECL)
+ name2 = DECL_NAME (name2);
+ gcc_checking_assert (!name2 || TREE_CODE (name2) == IDENTIFIER_NODE);
+
+ /* Identifiers can be compared with pointer equality rather
+ than a string comparison. */
+ if (name1 == name2)
+ return true;
+
+ return false;
+}
+
+static bool
+gimple_types_compatible_p_1 (tree, tree, type_pair_t,
+ VEC(type_pair_t, heap) **,
+ struct pointer_map_t *, struct obstack *);
+
+/* DFS visit the edge from the callers type pair with state *STATE to
+ the pair T1, T2 while operating in FOR_MERGING_P mode.
+ Update the merging status if it is not part of the SCC containing the
+ callers pair and return it.
+ SCCSTACK, SCCSTATE and SCCSTATE_OBSTACK are state for the DFS walk done. */
+
+static bool
+gtc_visit (tree t1, tree t2,
+ struct sccs *state,
+ VEC(type_pair_t, heap) **sccstack,
+ struct pointer_map_t *sccstate,
+ struct obstack *sccstate_obstack)
+{
+ struct sccs *cstate = NULL;
+ type_pair_t p;
+ void **slot;
+ tree leader1, leader2;
+
+ /* Check first for the obvious case of pointer identity. */
+ if (t1 == t2)
+ return true;
+
+ /* Check that we have two types to compare. */
+ if (t1 == NULL_TREE || t2 == NULL_TREE)
+ return false;
+
+ /* Can't be the same type if the types don't have the same code. */
+ if (TREE_CODE (t1) != TREE_CODE (t2))
+ return false;
+
+ /* Can't be the same type if they have different CV qualifiers. */
+ if (TYPE_QUALS (t1) != TYPE_QUALS (t2))
+ return false;
+
+ if (TREE_ADDRESSABLE (t1) != TREE_ADDRESSABLE (t2))
+ return false;
+
+ /* Void types and nullptr types are always the same. */
+ if (TREE_CODE (t1) == VOID_TYPE
+ || TREE_CODE (t1) == NULLPTR_TYPE)
+ return true;
+
+ /* Can't be the same type if they have different alignment or mode. */
+ if (TYPE_ALIGN (t1) != TYPE_ALIGN (t2)
+ || TYPE_MODE (t1) != TYPE_MODE (t2))
+ return false;
+
+ /* Do some simple checks before doing three hashtable queries. */
+ if (INTEGRAL_TYPE_P (t1)
+ || SCALAR_FLOAT_TYPE_P (t1)
+ || FIXED_POINT_TYPE_P (t1)
+ || TREE_CODE (t1) == VECTOR_TYPE
+ || TREE_CODE (t1) == COMPLEX_TYPE
+ || TREE_CODE (t1) == OFFSET_TYPE
+ || POINTER_TYPE_P (t1))
+ {
+ /* Can't be the same type if they have different sign or precision. */
+ if (TYPE_PRECISION (t1) != TYPE_PRECISION (t2)
+ || TYPE_UNSIGNED (t1) != TYPE_UNSIGNED (t2))
+ return false;
+
+ if (TREE_CODE (t1) == INTEGER_TYPE
+ && TYPE_STRING_FLAG (t1) != TYPE_STRING_FLAG (t2))
+ return false;
+
+ /* That's all we need to check for float and fixed-point types. */
+ if (SCALAR_FLOAT_TYPE_P (t1)
+ || FIXED_POINT_TYPE_P (t1))
+ return true;
+
+ /* For other types fall through to more complex checks. */
+ }
+
+ /* If the types have been previously registered and found equal
+ they still are. */
+ leader1 = gimple_lookup_type_leader (t1);
+ leader2 = gimple_lookup_type_leader (t2);
+ if (leader1 == t2
+ || t1 == leader2
+ || (leader1 && leader1 == leader2))
+ return true;
+
+ /* If the hash values of t1 and t2 are different the types can't
+ possibly be the same. This helps keeping the type-pair hashtable
+ small, only tracking comparisons for hash collisions. */
+ if (gimple_type_hash (t1) != gimple_type_hash (t2))
+ return false;
+
+ /* Allocate a new cache entry for this comparison. */
+ p = lookup_type_pair (t1, t2);
+ if (p->same_p == 0 || p->same_p == 1)
+ {
+ /* We have already decided whether T1 and T2 are the
+ same, return the cached result. */
+ return p->same_p == 1;
+ }
+
+ if ((slot = pointer_map_contains (sccstate, p)) != NULL)
+ cstate = (struct sccs *)*slot;
+ /* Not yet visited. DFS recurse. */
+ if (!cstate)
+ {
+ gimple_types_compatible_p_1 (t1, t2, p,
+ sccstack, sccstate, sccstate_obstack);
+ cstate = (struct sccs *)* pointer_map_contains (sccstate, p);
+ state->low = MIN (state->low, cstate->low);
+ }
+ /* If the type is still on the SCC stack adjust the parents low. */
+ if (cstate->dfsnum < state->dfsnum
+ && cstate->on_sccstack)
+ state->low = MIN (cstate->dfsnum, state->low);
+
+ /* Return the current lattice value. We start with an equality
+ assumption so types part of a SCC will be optimistically
+ treated equal unless proven otherwise. */
+ return cstate->u.same_p;
+}
+
+/* Worker for gimple_types_compatible.
+ SCCSTACK, SCCSTATE and SCCSTATE_OBSTACK are state for the DFS walk done. */
+
+static bool
+gimple_types_compatible_p_1 (tree t1, tree t2, type_pair_t p,
+ VEC(type_pair_t, heap) **sccstack,
+ struct pointer_map_t *sccstate,
+ struct obstack *sccstate_obstack)
+{
+ struct sccs *state;
+
+ gcc_assert (p->same_p == -2);
+
+ state = XOBNEW (sccstate_obstack, struct sccs);
+ *pointer_map_insert (sccstate, p) = state;
+
+ VEC_safe_push (type_pair_t, heap, *sccstack, p);
+ state->dfsnum = gtc_next_dfs_num++;
+ state->low = state->dfsnum;
+ state->on_sccstack = true;
+ /* Start with an equality assumption. As we DFS recurse into child
+ SCCs this assumption may get revisited. */
+ state->u.same_p = 1;
+
+ /* The struct tags shall compare equal. */
+ if (!compare_type_names_p (t1, t2))
+ goto different_types;
+
+ /* We may not merge typedef types to the same type in different
+ contexts. */
+ if (TYPE_NAME (t1)
+ && TREE_CODE (TYPE_NAME (t1)) == TYPE_DECL
+ && DECL_CONTEXT (TYPE_NAME (t1))
+ && TYPE_P (DECL_CONTEXT (TYPE_NAME (t1))))
+ {
+ if (!gtc_visit (DECL_CONTEXT (TYPE_NAME (t1)),
+ DECL_CONTEXT (TYPE_NAME (t2)),
+ state, sccstack, sccstate, sccstate_obstack))
+ goto different_types;
+ }
+
+ /* If their attributes are not the same they can't be the same type. */
+ if (!attribute_list_equal (TYPE_ATTRIBUTES (t1), TYPE_ATTRIBUTES (t2)))
+ goto different_types;
+
+ /* Do type-specific comparisons. */
+ switch (TREE_CODE (t1))
+ {
+ case VECTOR_TYPE:
+ case COMPLEX_TYPE:
+ if (!gtc_visit (TREE_TYPE (t1), TREE_TYPE (t2),
+ state, sccstack, sccstate, sccstate_obstack))
+ goto different_types;
+ goto same_types;
+
+ case ARRAY_TYPE:
+ /* Array types are the same if the element types are the same and
+ the number of elements are the same. */
+ if (!gtc_visit (TREE_TYPE (t1), TREE_TYPE (t2),
+ state, sccstack, sccstate, sccstate_obstack)
+ || TYPE_STRING_FLAG (t1) != TYPE_STRING_FLAG (t2)
+ || TYPE_NONALIASED_COMPONENT (t1) != TYPE_NONALIASED_COMPONENT (t2))
+ goto different_types;
+ else
+ {
+ tree i1 = TYPE_DOMAIN (t1);
+ tree i2 = TYPE_DOMAIN (t2);
+
+ /* For an incomplete external array, the type domain can be
+ NULL_TREE. Check this condition also. */
+ if (i1 == NULL_TREE && i2 == NULL_TREE)
+ goto same_types;
+ else if (i1 == NULL_TREE || i2 == NULL_TREE)
+ goto different_types;
+ else
+ {
+ tree min1 = TYPE_MIN_VALUE (i1);
+ tree min2 = TYPE_MIN_VALUE (i2);
+ tree max1 = TYPE_MAX_VALUE (i1);
+ tree max2 = TYPE_MAX_VALUE (i2);
+
+ /* The minimum/maximum values have to be the same. */
+ if ((min1 == min2
+ || (min1 && min2
+ && ((TREE_CODE (min1) == PLACEHOLDER_EXPR
+ && TREE_CODE (min2) == PLACEHOLDER_EXPR)
+ || operand_equal_p (min1, min2, 0))))
+ && (max1 == max2
+ || (max1 && max2
+ && ((TREE_CODE (max1) == PLACEHOLDER_EXPR
+ && TREE_CODE (max2) == PLACEHOLDER_EXPR)
+ || operand_equal_p (max1, max2, 0)))))
+ goto same_types;
+ else
+ goto different_types;
+ }
+ }
+
+ case METHOD_TYPE:
+ /* Method types should belong to the same class. */
+ if (!gtc_visit (TYPE_METHOD_BASETYPE (t1), TYPE_METHOD_BASETYPE (t2),
+ state, sccstack, sccstate, sccstate_obstack))
+ goto different_types;
+
+ /* Fallthru */
+
+ case FUNCTION_TYPE:
+ /* Function types are the same if the return type and arguments types
+ are the same. */
+ if (!gtc_visit (TREE_TYPE (t1), TREE_TYPE (t2),
+ state, sccstack, sccstate, sccstate_obstack))
+ goto different_types;
+
+ if (!comp_type_attributes (t1, t2))
+ goto different_types;
+
+ if (TYPE_ARG_TYPES (t1) == TYPE_ARG_TYPES (t2))
+ goto same_types;
+ else
+ {
+ tree parms1, parms2;
+
+ for (parms1 = TYPE_ARG_TYPES (t1), parms2 = TYPE_ARG_TYPES (t2);
+ parms1 && parms2;
+ parms1 = TREE_CHAIN (parms1), parms2 = TREE_CHAIN (parms2))
+ {
+ if (!gtc_visit (TREE_VALUE (parms1), TREE_VALUE (parms2),
+ state, sccstack, sccstate, sccstate_obstack))
+ goto different_types;
+ }
+
+ if (parms1 || parms2)
+ goto different_types;
+
+ goto same_types;
+ }
+
+ case OFFSET_TYPE:
+ {
+ if (!gtc_visit (TREE_TYPE (t1), TREE_TYPE (t2),
+ state, sccstack, sccstate, sccstate_obstack)
+ || !gtc_visit (TYPE_OFFSET_BASETYPE (t1),
+ TYPE_OFFSET_BASETYPE (t2),
+ state, sccstack, sccstate, sccstate_obstack))
+ goto different_types;
+
+ goto same_types;
+ }
+
+ case POINTER_TYPE:
+ case REFERENCE_TYPE:
+ {
+ /* If the two pointers have different ref-all attributes,
+ they can't be the same type. */
+ if (TYPE_REF_CAN_ALIAS_ALL (t1) != TYPE_REF_CAN_ALIAS_ALL (t2))
+ goto different_types;
+
+ /* Otherwise, pointer and reference types are the same if the
+ pointed-to types are the same. */
+ if (gtc_visit (TREE_TYPE (t1), TREE_TYPE (t2),
+ state, sccstack, sccstate, sccstate_obstack))
+ goto same_types;
+
+ goto different_types;
+ }
+
+ case INTEGER_TYPE:
+ case BOOLEAN_TYPE:
+ {
+ tree min1 = TYPE_MIN_VALUE (t1);
+ tree max1 = TYPE_MAX_VALUE (t1);
+ tree min2 = TYPE_MIN_VALUE (t2);
+ tree max2 = TYPE_MAX_VALUE (t2);
+ bool min_equal_p = false;
+ bool max_equal_p = false;
+
+ /* If either type has a minimum value, the other type must
+ have the same. */
+ if (min1 == NULL_TREE && min2 == NULL_TREE)
+ min_equal_p = true;
+ else if (min1 && min2 && operand_equal_p (min1, min2, 0))
+ min_equal_p = true;
+
+ /* Likewise, if either type has a maximum value, the other
+ type must have the same. */
+ if (max1 == NULL_TREE && max2 == NULL_TREE)
+ max_equal_p = true;
+ else if (max1 && max2 && operand_equal_p (max1, max2, 0))
+ max_equal_p = true;
+
+ if (!min_equal_p || !max_equal_p)
+ goto different_types;
+
+ goto same_types;
+ }
+
+ case ENUMERAL_TYPE:
+ {
+ /* FIXME lto, we cannot check bounds on enumeral types because
+ different front ends will produce different values.
+ In C, enumeral types are integers, while in C++ each element
+ will have its own symbolic value. We should decide how enums
+ are to be represented in GIMPLE and have each front end lower
+ to that. */
+ tree v1, v2;
+
+ /* For enumeral types, all the values must be the same. */
+ if (TYPE_VALUES (t1) == TYPE_VALUES (t2))
+ goto same_types;
+
+ for (v1 = TYPE_VALUES (t1), v2 = TYPE_VALUES (t2);
+ v1 && v2;
+ v1 = TREE_CHAIN (v1), v2 = TREE_CHAIN (v2))
+ {
+ tree c1 = TREE_VALUE (v1);
+ tree c2 = TREE_VALUE (v2);
+
+ if (TREE_CODE (c1) == CONST_DECL)
+ c1 = DECL_INITIAL (c1);
+
+ if (TREE_CODE (c2) == CONST_DECL)
+ c2 = DECL_INITIAL (c2);
+
+ if (tree_int_cst_equal (c1, c2) != 1)
+ goto different_types;
+
+ if (TREE_PURPOSE (v1) != TREE_PURPOSE (v2))
+ goto different_types;
+ }
+
+ /* If one enumeration has more values than the other, they
+ are not the same. */
+ if (v1 || v2)
+ goto different_types;
+
+ goto same_types;
+ }
+
+ case RECORD_TYPE:
+ case UNION_TYPE:
+ case QUAL_UNION_TYPE:
+ {
+ tree f1, f2;
+
+ /* For aggregate types, all the fields must be the same. */
+ for (f1 = TYPE_FIELDS (t1), f2 = TYPE_FIELDS (t2);
+ f1 && f2;
+ f1 = TREE_CHAIN (f1), f2 = TREE_CHAIN (f2))
+ {
+ /* Different field kinds are not compatible. */
+ if (TREE_CODE (f1) != TREE_CODE (f2))
+ goto different_types;
+ /* Field decls must have the same name and offset. */
+ if (TREE_CODE (f1) == FIELD_DECL
+ && (DECL_NONADDRESSABLE_P (f1) != DECL_NONADDRESSABLE_P (f2)
+ || !gimple_compare_field_offset (f1, f2)))
+ goto different_types;
+ /* All entities should have the same name and type. */
+ if (DECL_NAME (f1) != DECL_NAME (f2)
+ || !gtc_visit (TREE_TYPE (f1), TREE_TYPE (f2),
+ state, sccstack, sccstate, sccstate_obstack))
+ goto different_types;
+ }
+
+ /* If one aggregate has more fields than the other, they
+ are not the same. */
+ if (f1 || f2)
+ goto different_types;
+
+ goto same_types;
+ }
+
+ default:
+ gcc_unreachable ();
+ }
+
+ /* Common exit path for types that are not compatible. */
+different_types:
+ state->u.same_p = 0;
+ goto pop;
+
+ /* Common exit path for types that are compatible. */
+same_types:
+ gcc_assert (state->u.same_p == 1);
+
+pop:
+ if (state->low == state->dfsnum)
+ {
+ type_pair_t x;
+
+ /* Pop off the SCC and set its cache values to the final
+ comparison result. */
+ do
+ {
+ struct sccs *cstate;
+ x = VEC_pop (type_pair_t, *sccstack);
+ cstate = (struct sccs *)*pointer_map_contains (sccstate, x);
+ cstate->on_sccstack = false;
+ x->same_p = state->u.same_p;
+ }
+ while (x != p);
+ }
+
+ return state->u.same_p;
+}
+
+/* Return true iff T1 and T2 are structurally identical. When
+ FOR_MERGING_P is true the an incomplete type and a complete type
+ are considered different, otherwise they are considered compatible. */
+
+static bool
+gimple_types_compatible_p (tree t1, tree t2)
+{
+ VEC(type_pair_t, heap) *sccstack = NULL;
+ struct pointer_map_t *sccstate;
+ struct obstack sccstate_obstack;
+ type_pair_t p = NULL;
+ bool res;
+ tree leader1, leader2;
+
+ /* Before starting to set up the SCC machinery handle simple cases. */
+
+ /* Check first for the obvious case of pointer identity. */
+ if (t1 == t2)
+ return true;
+
+ /* Check that we have two types to compare. */
+ if (t1 == NULL_TREE || t2 == NULL_TREE)
+ return false;
+
+ /* Can't be the same type if the types don't have the same code. */
+ if (TREE_CODE (t1) != TREE_CODE (t2))
+ return false;
+
+ /* Can't be the same type if they have different CV qualifiers. */
+ if (TYPE_QUALS (t1) != TYPE_QUALS (t2))
+ return false;
+
+ if (TREE_ADDRESSABLE (t1) != TREE_ADDRESSABLE (t2))
+ return false;
+
+ /* Void types and nullptr types are always the same. */
+ if (TREE_CODE (t1) == VOID_TYPE
+ || TREE_CODE (t1) == NULLPTR_TYPE)
+ return true;
+
+ /* Can't be the same type if they have different alignment or mode. */
+ if (TYPE_ALIGN (t1) != TYPE_ALIGN (t2)
+ || TYPE_MODE (t1) != TYPE_MODE (t2))
+ return false;
+
+ /* Do some simple checks before doing three hashtable queries. */
+ if (INTEGRAL_TYPE_P (t1)
+ || SCALAR_FLOAT_TYPE_P (t1)
+ || FIXED_POINT_TYPE_P (t1)
+ || TREE_CODE (t1) == VECTOR_TYPE
+ || TREE_CODE (t1) == COMPLEX_TYPE
+ || TREE_CODE (t1) == OFFSET_TYPE
+ || POINTER_TYPE_P (t1))
+ {
+ /* Can't be the same type if they have different sign or precision. */
+ if (TYPE_PRECISION (t1) != TYPE_PRECISION (t2)
+ || TYPE_UNSIGNED (t1) != TYPE_UNSIGNED (t2))
+ return false;
+
+ if (TREE_CODE (t1) == INTEGER_TYPE
+ && TYPE_STRING_FLAG (t1) != TYPE_STRING_FLAG (t2))
+ return false;
+
+ /* That's all we need to check for float and fixed-point types. */
+ if (SCALAR_FLOAT_TYPE_P (t1)
+ || FIXED_POINT_TYPE_P (t1))
+ return true;
+
+ /* For other types fall through to more complex checks. */
+ }
+
+ /* If the types have been previously registered and found equal
+ they still are. */
+ leader1 = gimple_lookup_type_leader (t1);
+ leader2 = gimple_lookup_type_leader (t2);
+ if (leader1 == t2
+ || t1 == leader2
+ || (leader1 && leader1 == leader2))
+ return true;
+
+ /* If the hash values of t1 and t2 are different the types can't
+ possibly be the same. This helps keeping the type-pair hashtable
+ small, only tracking comparisons for hash collisions. */
+ if (gimple_type_hash (t1) != gimple_type_hash (t2))
+ return false;
+
+ /* If we've visited this type pair before (in the case of aggregates
+ with self-referential types), and we made a decision, return it. */
+ p = lookup_type_pair (t1, t2);
+ if (p->same_p == 0 || p->same_p == 1)
+ {
+ /* We have already decided whether T1 and T2 are the
+ same, return the cached result. */
+ return p->same_p == 1;
+ }
+
+ /* Now set up the SCC machinery for the comparison. */
+ gtc_next_dfs_num = 1;
+ sccstate = pointer_map_create ();
+ gcc_obstack_init (&sccstate_obstack);
+ res = gimple_types_compatible_p_1 (t1, t2, p,
+ &sccstack, sccstate, &sccstate_obstack);
+ VEC_free (type_pair_t, heap, sccstack);
+ pointer_map_destroy (sccstate);
+ obstack_free (&sccstate_obstack, NULL);
+
+ return res;
+}
+
+static hashval_t
+iterative_hash_gimple_type (tree, hashval_t, VEC(tree, heap) **,
+ struct pointer_map_t *, struct obstack *);
+
+/* DFS visit the edge from the callers type with state *STATE to T.
+ Update the callers type hash V with the hash for T if it is not part
+ of the SCC containing the callers type and return it.
+ SCCSTACK, SCCSTATE and SCCSTATE_OBSTACK are state for the DFS walk done. */
+
+static hashval_t
+visit (tree t, struct sccs *state, hashval_t v,
+ VEC (tree, heap) **sccstack,
+ struct pointer_map_t *sccstate,
+ struct obstack *sccstate_obstack)
+{
+ struct sccs *cstate = NULL;
+ struct tree_int_map m;
+ void **slot;
+
+ /* If there is a hash value recorded for this type then it can't
+ possibly be part of our parent SCC. Simply mix in its hash. */
+ m.base.from = t;
+ if ((slot = htab_find_slot (type_hash_cache, &m, NO_INSERT))
+ && *slot)
+ return iterative_hash_hashval_t (((struct tree_int_map *) *slot)->to, v);
+
+ if ((slot = pointer_map_contains (sccstate, t)) != NULL)
+ cstate = (struct sccs *)*slot;
+ if (!cstate)
+ {
+ hashval_t tem;
+ /* Not yet visited. DFS recurse. */
+ tem = iterative_hash_gimple_type (t, v,
+ sccstack, sccstate, sccstate_obstack);
+ if (!cstate)
+ cstate = (struct sccs *)* pointer_map_contains (sccstate, t);
+ state->low = MIN (state->low, cstate->low);
+ /* If the type is no longer on the SCC stack and thus is not part
+ of the parents SCC mix in its hash value. Otherwise we will
+ ignore the type for hashing purposes and return the unaltered
+ hash value. */
+ if (!cstate->on_sccstack)
+ return tem;
+ }
+ if (cstate->dfsnum < state->dfsnum
+ && cstate->on_sccstack)
+ state->low = MIN (cstate->dfsnum, state->low);
+
+ /* We are part of our parents SCC, skip this type during hashing
+ and return the unaltered hash value. */
+ return v;
+}
+
+/* Hash NAME with the previous hash value V and return it. */
+
+static hashval_t
+iterative_hash_name (tree name, hashval_t v)
+{
+ if (!name)
+ return v;
+ v = iterative_hash_hashval_t (TREE_CODE (name), v);
+ if (TREE_CODE (name) == TYPE_DECL)
+ name = DECL_NAME (name);
+ if (!name)
+ return v;
+ gcc_assert (TREE_CODE (name) == IDENTIFIER_NODE);
+ return iterative_hash_object (IDENTIFIER_HASH_VALUE (name), v);
+}
+
+/* A type, hashvalue pair for sorting SCC members. */
+
+struct type_hash_pair {
+ tree type;
+ hashval_t hash;
+};
+
+/* Compare two type, hashvalue pairs. */
+
+static int
+type_hash_pair_compare (const void *p1_, const void *p2_)
+{
+ const struct type_hash_pair *p1 = (const struct type_hash_pair *) p1_;
+ const struct type_hash_pair *p2 = (const struct type_hash_pair *) p2_;
+ if (p1->hash < p2->hash)
+ return -1;
+ else if (p1->hash > p2->hash)
+ return 1;
+ return 0;
+}
+
+/* Returning a hash value for gimple type TYPE combined with VAL.
+ SCCSTACK, SCCSTATE and SCCSTATE_OBSTACK are state for the DFS walk done.
+
+ To hash a type we end up hashing in types that are reachable.
+ Through pointers we can end up with cycles which messes up the
+ required property that we need to compute the same hash value
+ for structurally equivalent types. To avoid this we have to
+ hash all types in a cycle (the SCC) in a commutative way. The
+ easiest way is to not mix in the hashes of the SCC members at
+ all. To make this work we have to delay setting the hash
+ values of the SCC until it is complete. */
+
+static hashval_t
+iterative_hash_gimple_type (tree type, hashval_t val,
+ VEC(tree, heap) **sccstack,
+ struct pointer_map_t *sccstate,
+ struct obstack *sccstate_obstack)
+{
+ hashval_t v;
+ void **slot;
+ struct sccs *state;
+
+ /* Not visited during this DFS walk. */
+ gcc_checking_assert (!pointer_map_contains (sccstate, type));
+ state = XOBNEW (sccstate_obstack, struct sccs);
+ *pointer_map_insert (sccstate, type) = state;
+
+ VEC_safe_push (tree, heap, *sccstack, type);
+ state->dfsnum = next_dfs_num++;
+ state->low = state->dfsnum;
+ state->on_sccstack = true;
+
+ /* Combine a few common features of types so that types are grouped into
+ smaller sets; when searching for existing matching types to merge,
+ only existing types having the same features as the new type will be
+ checked. */
+ v = iterative_hash_name (TYPE_NAME (type), 0);
+ if (TYPE_NAME (type)
+ && TREE_CODE (TYPE_NAME (type)) == TYPE_DECL
+ && DECL_CONTEXT (TYPE_NAME (type))
+ && TYPE_P (DECL_CONTEXT (TYPE_NAME (type))))
+ v = visit (DECL_CONTEXT (TYPE_NAME (type)), state, v,
+ sccstack, sccstate, sccstate_obstack);
+ v = iterative_hash_hashval_t (TREE_CODE (type), v);
+ v = iterative_hash_hashval_t (TYPE_QUALS (type), v);
+ v = iterative_hash_hashval_t (TREE_ADDRESSABLE (type), v);
+
+ /* Do not hash the types size as this will cause differences in
+ hash values for the complete vs. the incomplete type variant. */
+
+ /* Incorporate common features of numerical types. */
+ if (INTEGRAL_TYPE_P (type)
+ || SCALAR_FLOAT_TYPE_P (type)
+ || FIXED_POINT_TYPE_P (type))
+ {
+ v = iterative_hash_hashval_t (TYPE_PRECISION (type), v);
+ v = iterative_hash_hashval_t (TYPE_MODE (type), v);
+ v = iterative_hash_hashval_t (TYPE_UNSIGNED (type), v);
+ }
+
+ /* For pointer and reference types, fold in information about the type
+ pointed to. */
+ if (POINTER_TYPE_P (type))
+ v = visit (TREE_TYPE (type), state, v,
+ sccstack, sccstate, sccstate_obstack);
+
+ /* For integer types hash the types min/max values and the string flag. */
+ if (TREE_CODE (type) == INTEGER_TYPE)
+ {
+ /* OMP lowering can introduce error_mark_node in place of
+ random local decls in types. */
+ if (TYPE_MIN_VALUE (type) != error_mark_node)
+ v = iterative_hash_expr (TYPE_MIN_VALUE (type), v);
+ if (TYPE_MAX_VALUE (type) != error_mark_node)
+ v = iterative_hash_expr (TYPE_MAX_VALUE (type), v);
+ v = iterative_hash_hashval_t (TYPE_STRING_FLAG (type), v);
+ }
+
+ /* For array types hash the domain and the string flag. */
+ if (TREE_CODE (type) == ARRAY_TYPE && TYPE_DOMAIN (type))
+ {
+ v = iterative_hash_hashval_t (TYPE_STRING_FLAG (type), v);
+ v = visit (TYPE_DOMAIN (type), state, v,
+ sccstack, sccstate, sccstate_obstack);
+ }
+
+ /* Recurse for aggregates with a single element type. */
+ if (TREE_CODE (type) == ARRAY_TYPE
+ || TREE_CODE (type) == COMPLEX_TYPE
+ || TREE_CODE (type) == VECTOR_TYPE)
+ v = visit (TREE_TYPE (type), state, v,
+ sccstack, sccstate, sccstate_obstack);
+
+ /* Incorporate function return and argument types. */
+ if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE)
+ {
+ unsigned na;
+ tree p;
+
+ /* For method types also incorporate their parent class. */
+ if (TREE_CODE (type) == METHOD_TYPE)
+ v = visit (TYPE_METHOD_BASETYPE (type), state, v,
+ sccstack, sccstate, sccstate_obstack);
+
+ /* Check result and argument types. */
+ v = visit (TREE_TYPE (type), state, v,
+ sccstack, sccstate, sccstate_obstack);
+ for (p = TYPE_ARG_TYPES (type), na = 0; p; p = TREE_CHAIN (p))
+ {
+ v = visit (TREE_VALUE (p), state, v,
+ sccstack, sccstate, sccstate_obstack);
+ na++;
+ }
+
+ v = iterative_hash_hashval_t (na, v);
+ }
+
+ if (RECORD_OR_UNION_TYPE_P (type))
+ {
+ unsigned nf;
+ tree f;
+
+ for (f = TYPE_FIELDS (type), nf = 0; f; f = TREE_CHAIN (f))
+ {
+ v = iterative_hash_name (DECL_NAME (f), v);
+ v = visit (TREE_TYPE (f), state, v,
+ sccstack, sccstate, sccstate_obstack);
+ nf++;
+ }
+
+ v = iterative_hash_hashval_t (nf, v);
+ }
+
+ /* Record hash for us. */
+ state->u.hash = v;
+
+ /* See if we found an SCC. */
+ if (state->low == state->dfsnum)
+ {
+ tree x;
+ struct tree_int_map *m;
+
+ /* Pop off the SCC and set its hash values. */
+ x = VEC_pop (tree, *sccstack);
+ /* Optimize SCC size one. */
+ if (x == type)
+ {
+ state->on_sccstack = false;
+ m = ggc_alloc_cleared_tree_int_map ();
+ m->base.from = x;
+ m->to = v;
+ slot = htab_find_slot (type_hash_cache, m, INSERT);
+ gcc_assert (!*slot);
+ *slot = (void *) m;
+ }
+ else
+ {
+ struct sccs *cstate;
+ unsigned first, i, size, j;
+ struct type_hash_pair *pairs;
+ /* Pop off the SCC and build an array of type, hash pairs. */
+ first = VEC_length (tree, *sccstack) - 1;
+ while (VEC_index (tree, *sccstack, first) != type)
+ --first;
+ size = VEC_length (tree, *sccstack) - first + 1;
+ pairs = XALLOCAVEC (struct type_hash_pair, size);
+ i = 0;
+ cstate = (struct sccs *)*pointer_map_contains (sccstate, x);
+ cstate->on_sccstack = false;
+ pairs[i].type = x;
+ pairs[i].hash = cstate->u.hash;
+ do
+ {
+ x = VEC_pop (tree, *sccstack);
+ cstate = (struct sccs *)*pointer_map_contains (sccstate, x);
+ cstate->on_sccstack = false;
+ ++i;
+ pairs[i].type = x;
+ pairs[i].hash = cstate->u.hash;
+ }
+ while (x != type);
+ gcc_assert (i + 1 == size);
+ /* Sort the arrays of type, hash pairs so that when we mix in
+ all members of the SCC the hash value becomes independent on
+ the order we visited the SCC. Disregard hashes equal to
+ the hash of the type we mix into because we cannot guarantee
+ a stable sort for those across different TUs. */
+ qsort (pairs, size, sizeof (struct type_hash_pair),
+ type_hash_pair_compare);
+ for (i = 0; i < size; ++i)
+ {
+ hashval_t hash;
+ m = ggc_alloc_cleared_tree_int_map ();
+ m->base.from = pairs[i].type;
+ hash = pairs[i].hash;
+ /* Skip same hashes. */
+ for (j = i + 1; j < size && pairs[j].hash == pairs[i].hash; ++j)
+ ;
+ for (; j < size; ++j)
+ hash = iterative_hash_hashval_t (pairs[j].hash, hash);
+ for (j = 0; pairs[j].hash != pairs[i].hash; ++j)
+ hash = iterative_hash_hashval_t (pairs[j].hash, hash);
+ m->to = hash;
+ if (pairs[i].type == type)
+ v = hash;
+ slot = htab_find_slot (type_hash_cache, m, INSERT);
+ gcc_assert (!*slot);
+ *slot = (void *) m;
+ }
+ }
+ }
+
+ return iterative_hash_hashval_t (v, val);
+}
+
+/* Returns a hash value for P (assumed to be a type). The hash value
+ is computed using some distinguishing features of the type. Note
+ that we cannot use pointer hashing here as we may be dealing with
+ two distinct instances of the same type.
+
+ This function should produce the same hash value for two compatible
+ types according to gimple_types_compatible_p. */
+
+static hashval_t
+gimple_type_hash (const void *p)
+{
+ const_tree t = (const_tree) p;
+ VEC(tree, heap) *sccstack = NULL;
+ struct pointer_map_t *sccstate;
+ struct obstack sccstate_obstack;
+ hashval_t val;
+ void **slot;
+ struct tree_int_map m;
+
+ m.base.from = CONST_CAST_TREE (t);
+ if ((slot = htab_find_slot (type_hash_cache, &m, NO_INSERT))
+ && *slot)
+ return iterative_hash_hashval_t (((struct tree_int_map *) *slot)->to, 0);
+
+ /* Perform a DFS walk and pre-hash all reachable types. */
+ next_dfs_num = 1;
+ sccstate = pointer_map_create ();
+ gcc_obstack_init (&sccstate_obstack);
+ val = iterative_hash_gimple_type (CONST_CAST_TREE (t), 0,
+ &sccstack, sccstate, &sccstate_obstack);
+ VEC_free (tree, heap, sccstack);
+ pointer_map_destroy (sccstate);
+ obstack_free (&sccstate_obstack, NULL);
+
+ return val;
+}
+
+/* Returns nonzero if P1 and P2 are equal. */
+
+static int
+gimple_type_eq (const void *p1, const void *p2)
+{
+ const_tree t1 = (const_tree) p1;
+ const_tree t2 = (const_tree) p2;
+ return gimple_types_compatible_p (CONST_CAST_TREE (t1),
+ CONST_CAST_TREE (t2));
+}
+
+
+/* Worker for gimple_register_type.
+ Register type T in the global type table gimple_types.
+ When REGISTERING_MV is false first recurse for the main variant of T. */
+
+static tree
+gimple_register_type_1 (tree t, bool registering_mv)
+{
+ void **slot;
+ gimple_type_leader_entry *leader;
+
+ /* If we registered this type before return the cached result. */
+ leader = &gimple_type_leader[TYPE_UID (t) % GIMPLE_TYPE_LEADER_SIZE];
+ if (leader->type == t)
+ return leader->leader;
+
+ /* Always register the main variant first. This is important so we
+ pick up the non-typedef variants as canonical, otherwise we'll end
+ up taking typedef ids for structure tags during comparison.
+ It also makes sure that main variants will be merged to main variants.
+ As we are operating on a possibly partially fixed up type graph
+ do not bother to recurse more than once, otherwise we may end up
+ walking in circles.
+ If we are registering a main variant it will either remain its
+ own main variant or it will be merged to something else in which
+ case we do not care for the main variant leader. */
+ if (!registering_mv
+ && TYPE_MAIN_VARIANT (t) != t)
+ gimple_register_type_1 (TYPE_MAIN_VARIANT (t), true);
+
+ /* See if we already have an equivalent type registered. */
+ slot = htab_find_slot (gimple_types, t, INSERT);
+ if (*slot
+ && *(tree *)slot != t)
+ {
+ tree new_type = (tree) *((tree *) slot);
+ leader->type = t;
+ leader->leader = new_type;
+ return new_type;
+ }
+
+ /* If not, insert it to the cache and the hash. */
+ leader->type = t;
+ leader->leader = t;
+ *slot = (void *) t;
+ return t;
+}
+
+/* Register type T in the global type table gimple_types.
+ If another type T', compatible with T, already existed in
+ gimple_types then return T', otherwise return T. This is used by
+ LTO to merge identical types read from different TUs. */
+
+static tree
+gimple_register_type (tree t)
+{
+ gcc_assert (TYPE_P (t));
+ return gimple_register_type_1 (t, false);
+}
+
+#define GIMPLE_REGISTER_TYPE(tt) \
+ (TREE_VISITED (tt) ? gimple_register_type (tt) : tt)
+
+
+
/* A hashtable of trees that potentially refer to variables or functions
that must be replaced with their prevailing variant. */
static GTY((if_marked ("ggc_marked_p"), param_is (union tree_node))) htab_t
@@ -289,9 +1398,6 @@ remember_with_vars (tree t)
*(tree *) htab_find_slot (tree_with_vars, t, INSERT) = t;
}
-#define GIMPLE_REGISTER_TYPE(tt) \
- (TREE_VISITED (tt) ? gimple_register_type (tt) : tt)
-
#define LTO_FIXUP_TREE(tt) \
do \
{ \
@@ -744,6 +1850,7 @@ uniquify_nodes (struct data_in *data_in, unsigned from)
variant list state before fixup is broken. */
tree tem, mv;
+#ifdef ENABLE_CHECKING
/* Remove us from our main variant list if we are not the
variant leader. */
if (TYPE_MAIN_VARIANT (t) != t)
@@ -751,10 +1858,9 @@ uniquify_nodes (struct data_in *data_in, unsigned from)
tem = TYPE_MAIN_VARIANT (t);
while (tem && TYPE_NEXT_VARIANT (tem) != t)
tem = TYPE_NEXT_VARIANT (tem);
- if (tem)
- TYPE_NEXT_VARIANT (tem) = TYPE_NEXT_VARIANT (t);
- TYPE_NEXT_VARIANT (t) = NULL_TREE;
+ gcc_assert (!tem && !TYPE_NEXT_VARIANT (t));
}
+#endif
/* Query our new main variant. */
mv = GIMPLE_REGISTER_TYPE (TYPE_MAIN_VARIANT (t));
@@ -1012,7 +2118,6 @@ lto_resolution_read (splay_tree file_ids, FILE *resolution, lto_file *file)
unsigned int num_symbols;
unsigned int i;
struct lto_file_decl_data *file_data;
- unsigned max_index = 0;
splay_tree_node nd = NULL;
if (!resolution)
@@ -1054,13 +2159,12 @@ lto_resolution_read (splay_tree file_ids, FILE *resolution, lto_file *file)
unsigned int j;
unsigned int lto_resolution_str_len =
sizeof (lto_resolution_str) / sizeof (char *);
+ res_pair rp;
t = fscanf (resolution, "%u " HOST_WIDE_INT_PRINT_HEX_PURE " %26s %*[^\n]\n",
&index, &id, r_str);
if (t != 3)
internal_error ("invalid line in the resolution file");
- if (index > max_index)
- max_index = index;
for (j = 0; j < lto_resolution_str_len; j++)
{
@@ -1082,11 +2186,13 @@ lto_resolution_read (splay_tree file_ids, FILE *resolution, lto_file *file)
}
file_data = (struct lto_file_decl_data *)nd->value;
- VEC_safe_grow_cleared (ld_plugin_symbol_resolution_t, heap,
- file_data->resolutions,
- max_index + 1);
- VEC_replace (ld_plugin_symbol_resolution_t,
- file_data->resolutions, index, r);
+ /* The indexes are very sparse. To save memory save them in a compact
+ format that is only unpacked later when the subfile is processed. */
+ rp.res = r;
+ rp.index = index;
+ VEC_safe_push (res_pair, heap, file_data->respairs, rp);
+ if (file_data->max_index < index)
+ file_data->max_index = index;
}
}
@@ -1166,6 +2272,18 @@ lto_file_finalize (struct lto_file_decl_data *file_data, lto_file *file)
{
const char *data;
size_t len;
+ VEC(ld_plugin_symbol_resolution_t,heap) *resolutions = NULL;
+ int i;
+ res_pair *rp;
+
+ /* Create vector for fast access of resolution. We do this lazily
+ to save memory. */
+ VEC_safe_grow_cleared (ld_plugin_symbol_resolution_t, heap,
+ resolutions,
+ file_data->max_index + 1);
+ for (i = 0; VEC_iterate (res_pair, file_data->respairs, i, rp); i++)
+ VEC_replace (ld_plugin_symbol_resolution_t, resolutions, rp->index, rp->res);
+ VEC_free (res_pair, heap, file_data->respairs);
file_data->renaming_hash_table = lto_create_renaming_table ();
file_data->file_name = file->filename;
@@ -1175,7 +2293,8 @@ lto_file_finalize (struct lto_file_decl_data *file_data, lto_file *file)
internal_error ("cannot read LTO decls from %s", file_data->file_name);
return;
}
- lto_read_decls (file_data, data, file_data->resolutions);
+ /* Frees resolutions */
+ lto_read_decls (file_data, data, resolutions);
lto_free_section_data (file_data, LTO_section_decls, NULL, data, len);
}
@@ -1408,14 +2527,10 @@ cmp_partitions_order (const void *a, const void *b)
= *(struct ltrans_partition_def *const *)b;
int ordera = -1, orderb = -1;
- if (VEC_length (cgraph_node_ptr, pa->cgraph_set->nodes))
- ordera = VEC_index (cgraph_node_ptr, pa->cgraph_set->nodes, 0)->symbol.order;
- else if (VEC_length (varpool_node_ptr, pa->varpool_set->nodes))
- ordera = VEC_index (varpool_node_ptr, pa->varpool_set->nodes, 0)->symbol.order;
- if (VEC_length (cgraph_node_ptr, pb->cgraph_set->nodes))
- orderb = VEC_index (cgraph_node_ptr, pb->cgraph_set->nodes, 0)->symbol.order;
- else if (VEC_length (varpool_node_ptr, pb->varpool_set->nodes))
- orderb = VEC_index (varpool_node_ptr, pb->varpool_set->nodes, 0)->symbol.order;
+ if (lto_symtab_encoder_size (pa->encoder))
+ ordera = lto_symtab_encoder_deref (pa->encoder, 0)->symbol.order;
+ if (lto_symtab_encoder_size (pb->encoder))
+ orderb = lto_symtab_encoder_deref (pb->encoder, 0)->symbol.order;
return orderb - ordera;
}
@@ -1427,8 +2542,6 @@ lto_wpa_write_files (void)
{
unsigned i, n_sets;
lto_file *file;
- cgraph_node_set set;
- varpool_node_set vset;
ltrans_partition part;
FILE *ltrans_output_list_stream;
char *temp_filename;
@@ -1444,8 +2557,7 @@ lto_wpa_write_files (void)
timevar_push (TV_WHOPR_WPA);
FOR_EACH_VEC_ELT (ltrans_partition, ltrans_partitions, i, part)
- lto_stats.num_output_cgraph_nodes += VEC_length (cgraph_node_ptr,
- part->cgraph_set->nodes);
+ lto_stats.num_output_symtab_nodes += lto_symtab_encoder_size (part->encoder);
/* Find out statics that need to be promoted
to globals with hidden visibility because they are accessed from multiple
@@ -1478,9 +2590,6 @@ lto_wpa_write_files (void)
size_t len;
ltrans_partition part = VEC_index (ltrans_partition, ltrans_partitions, i);
- set = part->cgraph_set;
- vset = part->varpool_set;
-
/* Write all the nodes in SET. */
sprintf (temp_filename + blen, "%u.o", i);
file = lto_obj_file_open (temp_filename, true);
@@ -1491,22 +2600,27 @@ lto_wpa_write_files (void)
fprintf (stderr, " %s (%s %i insns)", temp_filename, part->name, part->insns);
if (cgraph_dump_file)
{
+ lto_symtab_encoder_iterator lsei;
+
fprintf (cgraph_dump_file, "Writing partition %s to file %s, %i insns\n",
part->name, temp_filename, part->insns);
- fprintf (cgraph_dump_file, "cgraph nodes:");
- dump_cgraph_node_set (cgraph_dump_file, set);
- fprintf (cgraph_dump_file, "varpool nodes:");
- dump_varpool_node_set (cgraph_dump_file, vset);
+ for (lsei = lsei_start_in_partition (part->encoder); !lsei_end_p (lsei);
+ lsei_next_in_partition (&lsei))
+ {
+ symtab_node node = lsei_node (lsei);
+ fprintf (cgraph_dump_file, "%s ", symtab_node_name (node));
+ }
+ fprintf (cgraph_dump_file, "\n");
}
- gcc_checking_assert (cgraph_node_set_nonempty_p (set)
- || varpool_node_set_nonempty_p (vset) || !i);
+ gcc_checking_assert (lto_symtab_encoder_size (part->encoder) || !i);
lto_set_current_out_file (file);
- ipa_write_optimization_summaries (set, vset);
+ ipa_write_optimization_summaries (part->encoder);
lto_set_current_out_file (NULL);
lto_obj_file_close (file);
+ part->encoder = NULL;
len = strlen (temp_filename);
if (fwrite (temp_filename, 1, len, ltrans_output_list_stream) < len
@@ -1757,6 +2871,12 @@ read_cgraph_and_symbols (unsigned nfiles, const char **fnames)
tree_with_vars = htab_create_ggc (101, htab_hash_pointer, htab_eq_pointer,
NULL);
+ type_hash_cache = htab_create_ggc (512, tree_int_map_hash,
+ tree_int_map_eq, NULL);
+ type_pair_cache = XCNEWVEC (struct type_pair_d, GIMPLE_TYPE_PAIR_SIZE);
+ gimple_type_leader = ggc_alloc_cleared_vec_gimple_type_leader_entry_s
+ (GIMPLE_TYPE_LEADER_SIZE);
+ gimple_types = htab_create_ggc (16381, gimple_type_hash, gimple_type_eq, 0);
if (!quiet_flag)
fprintf (stderr, "Reading object files:");
@@ -1827,6 +2947,13 @@ read_cgraph_and_symbols (unsigned nfiles, const char **fnames)
lto_fixup_decls (all_file_decl_data);
htab_delete (tree_with_vars);
tree_with_vars = NULL;
+ htab_delete (gimple_types);
+ gimple_types = NULL;
+ htab_delete (type_hash_cache);
+ type_hash_cache = NULL;
+ free (type_pair_cache);
+ type_pair_cache = NULL;
+ gimple_type_leader = NULL;
free_gimple_type_tables ();
ggc_collect ();
@@ -1928,6 +3055,38 @@ materialize_cgraph (void)
}
+/* Show various memory usage statistics related to LTO. */
+static void
+print_lto_report_1 (void)
+{
+ const char *pfx = (flag_lto) ? "LTO" : (flag_wpa) ? "WPA" : "LTRANS";
+ fprintf (stderr, "%s statistics\n", pfx);
+
+ if (gimple_types)
+ fprintf (stderr, "[%s] GIMPLE type table: size %ld, %ld elements, "
+ "%ld searches, %ld collisions (ratio: %f)\n", pfx,
+ (long) htab_size (gimple_types),
+ (long) htab_elements (gimple_types),
+ (long) gimple_types->searches,
+ (long) gimple_types->collisions,
+ htab_collisions (gimple_types));
+ else
+ fprintf (stderr, "[%s] GIMPLE type table is empty\n", pfx);
+ if (type_hash_cache)
+ fprintf (stderr, "[%s] GIMPLE type hash table: size %ld, %ld elements, "
+ "%ld searches, %ld collisions (ratio: %f)\n", pfx,
+ (long) htab_size (type_hash_cache),
+ (long) htab_elements (type_hash_cache),
+ (long) type_hash_cache->searches,
+ (long) type_hash_cache->collisions,
+ htab_collisions (type_hash_cache));
+ else
+ fprintf (stderr, "[%s] GIMPLE type hash table is empty\n", pfx);
+
+ print_gimple_types_stats (pfx);
+ print_lto_report (pfx);
+}
+
/* Perform whole program analysis (WPA) on the callgraph and write out the
optimization plan. */
@@ -2002,7 +3161,9 @@ do_whole_program_analysis (void)
/* Show the LTO report before launching LTRANS. */
if (flag_lto_report)
- print_lto_report ();
+ print_lto_report_1 ();
+ if (mem_report_wpa)
+ dump_memory_report (true);
}
@@ -2126,7 +3287,7 @@ lto_main (void)
launched directly by the driver we would not need to do
this. */
if (flag_lto_report)
- print_lto_report ();
+ print_lto_report_1 ();
}
}
diff --git a/gcc/objc/objc-next-runtime-abi-02.c b/gcc/objc/objc-next-runtime-abi-02.c
index 4f47a579087..cf899d379bd 100644
--- a/gcc/objc/objc-next-runtime-abi-02.c
+++ b/gcc/objc/objc-next-runtime-abi-02.c
@@ -1068,7 +1068,7 @@ objc_v2_get_class_reference (tree ident)
decl = build_v2_class_reference_decl (ident);
e.ident = ident;
e.data = decl;
- VEC_safe_push (ident_data_tuple, gc, classrefs, &e);
+ VEC_safe_push (ident_data_tuple, gc, classrefs, e);
return decl;
}
@@ -1233,7 +1233,7 @@ build_v2_selector_messenger_reference (tree sel_name, tree message_func_decl)
e.func = message_func_decl;
e.selname = sel_name;
e.refdecl = decl;
- VEC_safe_push (msgref_entry, gc, msgrefs, &e);
+ VEC_safe_push (msgref_entry, gc, msgrefs, e);
return decl;
}
@@ -1290,7 +1290,7 @@ objc_v2_get_protocol_reference (tree ident)
decl = build_v2_protocollist_ref_decl (ident);
e.id = ident;
e.refdecl = decl;
- VEC_safe_push (prot_list_entry, gc, protrefs, &e);
+ VEC_safe_push (prot_list_entry, gc, protrefs, e);
return decl;
}
@@ -1476,7 +1476,7 @@ next_runtime_abi_02_get_class_super_ref (location_t loc ATTRIBUTE_UNUSED,
decl = build_v2_superclass_ref_decl (id, inst_meth);
e.ident = id;
e.data = decl;
- VEC_safe_push (ident_data_tuple, gc, list, &e);
+ VEC_safe_push (ident_data_tuple, gc, list, e);
return decl;
}
@@ -2126,7 +2126,7 @@ objc_add_to_protocol_list (tree protocol_interface_decl, tree protocol_decl)
protlist = VEC_alloc (prot_list_entry, gc, 32);
e.id = protocol_interface_decl;
e.refdecl = protocol_decl;
- VEC_safe_push (prot_list_entry, gc, protlist, &e);
+ VEC_safe_push (prot_list_entry, gc, protlist, e);
}
/* Build the __protocol_list section table containing address of all
@@ -2806,7 +2806,7 @@ ivar_offset_ref (tree class_name, tree field_decl)
e.decl = decl;
e.offset = byte_position (field_decl);
- VEC_safe_push (ivarref_entry, gc, ivar_offset_refs, &e);
+ VEC_safe_push (ivarref_entry, gc, ivar_offset_refs, e);
return decl;
}
@@ -3082,7 +3082,7 @@ objc_v2_add_to_ehtype_list (tree name)
/* Not found, or new list. */
e.ident = name;
e.data = NULL_TREE;
- VEC_safe_push (ident_data_tuple, gc, ehtype_list, &e);
+ VEC_safe_push (ident_data_tuple, gc, ehtype_list, e);
}
static void
diff --git a/gcc/optabs.c b/gcc/optabs.c
index 02c2fd5dd0f..7cae98cf718 100644
--- a/gcc/optabs.c
+++ b/gcc/optabs.c
@@ -2908,9 +2908,9 @@ expand_absneg_bit (enum rtx_code code, enum machine_mode mode,
nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
}
- mask = double_int_setbit (double_int_zero, bitpos);
+ mask = double_int_zero.set_bit (bitpos);
if (code == ABS)
- mask = double_int_not (mask);
+ mask = ~mask;
if (target == 0
|| target == op0
@@ -3569,7 +3569,7 @@ expand_copysign_absneg (enum machine_mode mode, rtx op0, rtx op1, rtx target,
op1 = operand_subword_force (op1, word, mode);
}
- mask = double_int_setbit (double_int_zero, bitpos);
+ mask = double_int_zero.set_bit (bitpos);
sign = expand_binop (imode, and_optab, op1,
immed_double_int_const (mask, imode),
@@ -3640,7 +3640,7 @@ expand_copysign_bit (enum machine_mode mode, rtx op0, rtx op1, rtx target,
nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
}
- mask = double_int_setbit (double_int_zero, bitpos);
+ mask = double_int_zero.set_bit (bitpos);
if (target == 0
|| target == op0
@@ -3662,8 +3662,7 @@ expand_copysign_bit (enum machine_mode mode, rtx op0, rtx op1, rtx target,
if (!op0_is_abs)
op0_piece
= expand_binop (imode, and_optab, op0_piece,
- immed_double_int_const (double_int_not (mask),
- imode),
+ immed_double_int_const (~mask, imode),
NULL_RTX, 1, OPTAB_LIB_WIDEN);
op1 = expand_binop (imode, and_optab,
@@ -3694,8 +3693,7 @@ expand_copysign_bit (enum machine_mode mode, rtx op0, rtx op1, rtx target,
op0 = gen_lowpart (imode, op0);
if (!op0_is_abs)
op0 = expand_binop (imode, and_optab, op0,
- immed_double_int_const (double_int_not (mask),
- imode),
+ immed_double_int_const (~mask, imode),
NULL_RTX, 1, OPTAB_LIB_WIDEN);
temp = expand_binop (imode, ior_optab, op0, op1,
diff --git a/gcc/opts-common.c b/gcc/opts-common.c
index 354bce07dc2..e024537fa0f 100644
--- a/gcc/opts-common.c
+++ b/gcc/opts-common.c
@@ -1144,12 +1144,8 @@ set_option (struct gcc_options *opts, struct gcc_options *opts_set,
{
VEC(cl_deferred_option,heap) *vec
= (VEC(cl_deferred_option,heap) *) *(void **) flag_var;
- cl_deferred_option *p;
-
- p = VEC_safe_push (cl_deferred_option, heap, vec, NULL);
- p->opt_index = opt_index;
- p->arg = arg;
- p->value = value;
+ cl_deferred_option p = {opt_index, arg, value};
+ VEC_safe_push (cl_deferred_option, heap, vec, p);
*(void **) flag_var = vec;
if (set_flag_var)
*(void **) set_flag_var = vec;
diff --git a/gcc/params.def b/gcc/params.def
index 17351bf0052..a4c930b9e3c 100644
--- a/gcc/params.def
+++ b/gcc/params.def
@@ -885,6 +885,12 @@ DEFPARAM (PARAM_IPA_CP_EVAL_THRESHOLD,
"beneficial to clone.",
500, 0, 0)
+DEFPARAM (PARAM_IPA_MAX_AGG_ITEMS,
+ "ipa-max-agg-items",
+ "Maximum number of aggregate content items for a parameter in "
+ "jump functions and lattices",
+ 16, 0, 0)
+
/* WHOPR partitioning configuration. */
DEFPARAM (PARAM_LTO_PARTITIONS,
@@ -973,6 +979,13 @@ DEFPARAM (PARAM_SCHED_PRESSURE_ALGORITHM,
"Which -fsched-pressure algorithm to apply",
1, 1, 2)
+/* Maximum length of candidate scans in straight-line strength reduction. */
+DEFPARAM (PARAM_MAX_SLSR_CANDIDATE_SCAN,
+ "max-slsr-cand-scan",
+ "Maximum length of candidate scans for straight-line "
+ "strength reduction",
+ 50, 1, 999999)
+
/*
Local variables:
mode:c
diff --git a/gcc/passes.c b/gcc/passes.c
index c5501eb2ef0..01c90095c01 100644
--- a/gcc/passes.c
+++ b/gcc/passes.c
@@ -1776,7 +1776,8 @@ execute_function_todo (void *data)
if (flags & TODO_rebuild_alias)
{
execute_update_addresses_taken ();
- compute_may_aliases ();
+ if (flag_tree_pta)
+ compute_may_aliases ();
}
else if (optimize && (flags & TODO_update_address_taken))
execute_update_addresses_taken ();
@@ -2260,10 +2261,10 @@ ipa_write_summaries_2 (struct opt_pass *pass, struct lto_out_decl_state *state)
summaries. SET is the set of nodes to be written. */
static void
-ipa_write_summaries_1 (cgraph_node_set set, varpool_node_set vset)
+ipa_write_summaries_1 (lto_symtab_encoder_t encoder)
{
struct lto_out_decl_state *state = lto_new_out_decl_state ();
- compute_ltrans_boundary (state, set, vset);
+ state->symtab_node_encoder = encoder;
lto_push_out_decl_state (state);
@@ -2281,16 +2282,15 @@ ipa_write_summaries_1 (cgraph_node_set set, varpool_node_set vset)
void
ipa_write_summaries (void)
{
- cgraph_node_set set;
- varpool_node_set vset;
- struct cgraph_node **order;
- struct varpool_node *vnode;
+ lto_symtab_encoder_t encoder;
int i, order_pos;
+ struct varpool_node *vnode;
+ struct cgraph_node **order;
if (!flag_generate_lto || seen_error ())
return;
- set = cgraph_node_set_new ();
+ encoder = lto_symtab_encoder_new ();
/* Create the callgraph set in the same order used in
cgraph_expand_all_functions. This mostly facilitates debugging,
@@ -2317,19 +2317,16 @@ ipa_write_summaries (void)
pop_cfun ();
}
if (node->analyzed)
- cgraph_node_set_add (set, node);
+ lto_set_symtab_encoder_in_partition (encoder, (symtab_node)node);
}
- vset = varpool_node_set_new ();
FOR_EACH_DEFINED_VARIABLE (vnode)
if ((!vnode->alias || vnode->alias_of))
- varpool_node_set_add (vset, vnode);
+ lto_set_symtab_encoder_in_partition (encoder, (symtab_node)vnode);
- ipa_write_summaries_1 (set, vset);
+ ipa_write_summaries_1 (compute_ltrans_boundary (encoder));
free (order);
- free_cgraph_node_set (set);
- free_varpool_node_set (vset);
}
/* Same as execute_pass_list but assume that subpasses of IPA passes
@@ -2375,16 +2372,17 @@ ipa_write_optimization_summaries_1 (struct opt_pass *pass, struct lto_out_decl_s
NULL, write out all summaries of all nodes. */
void
-ipa_write_optimization_summaries (cgraph_node_set set, varpool_node_set vset)
+ipa_write_optimization_summaries (lto_symtab_encoder_t encoder)
{
struct lto_out_decl_state *state = lto_new_out_decl_state ();
- cgraph_node_set_iterator csi;
- compute_ltrans_boundary (state, set, vset);
+ lto_symtab_encoder_iterator lsei;
+ state->symtab_node_encoder = encoder;
lto_push_out_decl_state (state);
- for (csi = csi_start (set); !csi_end_p (csi); csi_next (&csi))
+ for (lsei = lsei_start_function_in_partition (encoder);
+ !lsei_end_p (lsei); lsei_next_function_in_partition (&lsei))
{
- struct cgraph_node *node = csi_node (csi);
+ struct cgraph_node *node = lsei_cgraph_node (lsei);
/* When streaming out references to statements as part of some IPA
pass summary, the statements need to have uids assigned.
diff --git a/gcc/read-rtl.c b/gcc/read-rtl.c
index 71ecf537642..30c2fb69484 100644
--- a/gcc/read-rtl.c
+++ b/gcc/read-rtl.c
@@ -684,11 +684,8 @@ validate_const_int (const char *string)
static void
record_iterator_use (struct mapping *iterator, void *ptr)
{
- struct iterator_use *iuse;
-
- iuse = VEC_safe_push (iterator_use, heap, iterator_uses, NULL);
- iuse->iterator = iterator;
- iuse->ptr = ptr;
+ struct iterator_use iuse = {iterator, ptr};
+ VEC_safe_push (iterator_use, heap, iterator_uses, iuse);
}
/* Record that PTR uses attribute VALUE, which must match a built-in
@@ -698,12 +695,8 @@ static void
record_attribute_use (struct iterator_group *group, void *ptr,
const char *value)
{
- struct attribute_use *ause;
-
- ause = VEC_safe_push (attribute_use, heap, attribute_uses, NULL);
- ause->group = group;
- ause->value = value;
- ause->ptr = ptr;
+ struct attribute_use ause = {group, value, ptr};
+ VEC_safe_push (attribute_use, heap, attribute_uses, ause);
}
/* Interpret NAME as either a built-in value, iterator or attribute
diff --git a/gcc/ree.c b/gcc/ree.c
index 1d0f1949b80..99ecd578097 100644
--- a/gcc/ree.c
+++ b/gcc/ree.c
@@ -816,11 +816,8 @@ add_removable_extension (const_rtx expr, rtx insn,
/* Then add the candidate to the list and insert the reaching definitions
into the definition map. */
- cand = VEC_safe_push (ext_cand, heap, *insn_list, NULL);
- cand->expr = expr;
- cand->code = code;
- cand->mode = mode;
- cand->insn = insn;
+ ext_cand e = {expr, code, mode, insn};
+ VEC_safe_push (ext_cand, heap, *insn_list, e);
idx = VEC_length (ext_cand, *insn_list);
for (def = defs; def; def = def->next)
diff --git a/gcc/reload1.c b/gcc/reload1.c
index 77c7ba0911a..1bcdfad9377 100644
--- a/gcc/reload1.c
+++ b/gcc/reload1.c
@@ -659,15 +659,12 @@ grow_reg_equivs (void)
int old_size = VEC_length (reg_equivs_t, reg_equivs);
int max_regno = max_reg_num ();
int i;
+ reg_equivs_t ze;
+ memset (&ze, 0, sizeof (reg_equivs_t));
VEC_reserve (reg_equivs_t, gc, reg_equivs, max_regno);
for (i = old_size; i < max_regno; i++)
- {
- VEC_quick_insert (reg_equivs_t, reg_equivs, i, 0);
- memset (&VEC_index (reg_equivs_t, reg_equivs, i), 0,
- sizeof (reg_equivs_t));
- }
-
+ VEC_quick_insert (reg_equivs_t, reg_equivs, i, ze);
}
diff --git a/gcc/rtl.h b/gcc/rtl.h
index 42378b9f69f..a29f2611fb1 100644
--- a/gcc/rtl.h
+++ b/gcc/rtl.h
@@ -33,11 +33,6 @@ along with GCC; see the file COPYING3. If not see
#include "hashtab.h"
#include "flags.h"
-#undef FFS /* Some systems predefine this symbol; don't let it interfere. */
-#undef FLOAT /* Likewise. */
-#undef ABS /* Likewise. */
-#undef PC /* Likewise. */
-
/* Value used by some passes to "recognize" noop moves as valid
instructions. */
#define NOOP_MOVE_INSN_CODE INT_MAX
diff --git a/gcc/sel-sched-ir.c b/gcc/sel-sched-ir.c
index 449efc97cbc..2a7a17066f6 100644
--- a/gcc/sel-sched-ir.c
+++ b/gcc/sel-sched-ir.c
@@ -1,5 +1,5 @@
/* Instruction scheduling pass. Selective scheduler and pipeliner.
- Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011
+ Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012
Free Software Foundation, Inc.
This file is part of GCC.
@@ -1542,7 +1542,7 @@ insert_in_history_vect (VEC (expr_history_def, heap) **pvect,
vinsn_attach (old_expr_vinsn);
vinsn_attach (new_expr_vinsn);
- VEC_safe_insert (expr_history_def, heap, vect, ind, &temp);
+ VEC_safe_insert (expr_history_def, heap, vect, ind, temp);
*pvect = vect;
}
@@ -3686,6 +3686,22 @@ maybe_tidy_empty_bb (basic_block bb)
FOR_EACH_EDGE (e, ei, bb->preds)
if (e->flags & EDGE_COMPLEX)
return false;
+ else if (e->flags & EDGE_FALLTHRU)
+ {
+ rtx note;
+ /* If prev bb ends with asm goto, see if any of the
+ ASM_OPERANDS_LABELs don't point to the fallthru
+ label. Do not attempt to redirect it in that case. */
+ if (JUMP_P (BB_END (e->src))
+ && (note = extract_asm_operands (PATTERN (BB_END (e->src)))))
+ {
+ int i, n = ASM_OPERANDS_LABEL_LENGTH (note);
+
+ for (i = 0; i < n; ++i)
+ if (XEXP (ASM_OPERANDS_LABEL (note, i), 0) == BB_HEAD (bb))
+ return false;
+ }
+ }
free_data_sets (bb);
diff --git a/gcc/simplify-rtx.c b/gcc/simplify-rtx.c
index f59150ee2c1..9ed98e671a0 100644
--- a/gcc/simplify-rtx.c
+++ b/gcc/simplify-rtx.c
@@ -1986,7 +1986,7 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
else if (GET_CODE (lhs) == MULT
&& CONST_INT_P (XEXP (lhs, 1)))
{
- coeff0 = shwi_to_double_int (INTVAL (XEXP (lhs, 1)));
+ coeff0 = double_int::from_shwi (INTVAL (XEXP (lhs, 1)));
lhs = XEXP (lhs, 0);
}
else if (GET_CODE (lhs) == ASHIFT
@@ -1994,8 +1994,7 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
&& INTVAL (XEXP (lhs, 1)) >= 0
&& INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
{
- coeff0 = double_int_setbit (double_int_zero,
- INTVAL (XEXP (lhs, 1)));
+ coeff0 = double_int_zero.set_bit (INTVAL (XEXP (lhs, 1)));
lhs = XEXP (lhs, 0);
}
@@ -2007,7 +2006,7 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
else if (GET_CODE (rhs) == MULT
&& CONST_INT_P (XEXP (rhs, 1)))
{
- coeff1 = shwi_to_double_int (INTVAL (XEXP (rhs, 1)));
+ coeff1 = double_int::from_shwi (INTVAL (XEXP (rhs, 1)));
rhs = XEXP (rhs, 0);
}
else if (GET_CODE (rhs) == ASHIFT
@@ -2015,8 +2014,7 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
&& INTVAL (XEXP (rhs, 1)) >= 0
&& INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
{
- coeff1 = double_int_setbit (double_int_zero,
- INTVAL (XEXP (rhs, 1)));
+ coeff1 = double_int_zero.set_bit (INTVAL (XEXP (rhs, 1)));
rhs = XEXP (rhs, 0);
}
@@ -2027,7 +2025,7 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
double_int val;
bool speed = optimize_function_for_speed_p (cfun);
- val = double_int_add (coeff0, coeff1);
+ val = coeff0 + coeff1;
coeff = immed_double_int_const (val, mode);
tem = simplify_gen_binary (MULT, mode, lhs, coeff);
@@ -2165,7 +2163,7 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
else if (GET_CODE (lhs) == MULT
&& CONST_INT_P (XEXP (lhs, 1)))
{
- coeff0 = shwi_to_double_int (INTVAL (XEXP (lhs, 1)));
+ coeff0 = double_int::from_shwi (INTVAL (XEXP (lhs, 1)));
lhs = XEXP (lhs, 0);
}
else if (GET_CODE (lhs) == ASHIFT
@@ -2173,8 +2171,7 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
&& INTVAL (XEXP (lhs, 1)) >= 0
&& INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
{
- coeff0 = double_int_setbit (double_int_zero,
- INTVAL (XEXP (lhs, 1)));
+ coeff0 = double_int_zero.set_bit (INTVAL (XEXP (lhs, 1)));
lhs = XEXP (lhs, 0);
}
@@ -2186,7 +2183,7 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
else if (GET_CODE (rhs) == MULT
&& CONST_INT_P (XEXP (rhs, 1)))
{
- negcoeff1 = shwi_to_double_int (-INTVAL (XEXP (rhs, 1)));
+ negcoeff1 = double_int::from_shwi (-INTVAL (XEXP (rhs, 1)));
rhs = XEXP (rhs, 0);
}
else if (GET_CODE (rhs) == ASHIFT
@@ -2194,9 +2191,8 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
&& INTVAL (XEXP (rhs, 1)) >= 0
&& INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
{
- negcoeff1 = double_int_setbit (double_int_zero,
- INTVAL (XEXP (rhs, 1)));
- negcoeff1 = double_int_neg (negcoeff1);
+ negcoeff1 = double_int_zero.set_bit (INTVAL (XEXP (rhs, 1)));
+ negcoeff1 = -negcoeff1;
rhs = XEXP (rhs, 0);
}
@@ -2207,7 +2203,7 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
double_int val;
bool speed = optimize_function_for_speed_p (cfun);
- val = double_int_add (coeff0, negcoeff1);
+ val = coeff0 + negcoeff1;
coeff = immed_double_int_const (val, mode);
tem = simplify_gen_binary (MULT, mode, lhs, coeff);
@@ -3590,16 +3586,16 @@ simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
{
case MINUS:
/* A - B == A + (-B). */
- o1 = double_int_neg (o1);
+ o1 = -o1;
/* Fall through.... */
case PLUS:
- res = double_int_add (o0, o1);
+ res = o0 + o1;
break;
case MULT:
- res = double_int_mul (o0, o1);
+ res = o0 * o1;
break;
case DIV:
@@ -3635,31 +3631,31 @@ simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
break;
case AND:
- res = double_int_and (o0, o1);
+ res = o0 & o1;
break;
case IOR:
- res = double_int_ior (o0, o1);
+ res = o0 | o1;
break;
case XOR:
- res = double_int_xor (o0, o1);
+ res = o0 ^ o1;
break;
case SMIN:
- res = double_int_smin (o0, o1);
+ res = o0.smin (o1);
break;
case SMAX:
- res = double_int_smax (o0, o1);
+ res = o0.smax (o1);
break;
case UMIN:
- res = double_int_umin (o0, o1);
+ res = o0.umin (o1);
break;
case UMAX:
- res = double_int_umax (o0, o1);
+ res = o0.umax (o1);
break;
case LSHIFTRT: case ASHIFTRT:
@@ -3674,22 +3670,21 @@ simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
o1.low &= GET_MODE_PRECISION (mode) - 1;
}
- if (!double_int_fits_in_uhwi_p (o1)
- || double_int_to_uhwi (o1) >= GET_MODE_PRECISION (mode))
+ if (!o1.fits_uhwi ()
+ || o1.to_uhwi () >= GET_MODE_PRECISION (mode))
return 0;
- cnt = double_int_to_uhwi (o1);
+ cnt = o1.to_uhwi ();
+ unsigned short prec = GET_MODE_PRECISION (mode);
if (code == LSHIFTRT || code == ASHIFTRT)
- res = double_int_rshift (o0, cnt, GET_MODE_PRECISION (mode),
- code == ASHIFTRT);
+ res = o0.rshift (cnt, prec, code == ASHIFTRT);
else if (code == ASHIFT)
- res = double_int_lshift (o0, cnt, GET_MODE_PRECISION (mode),
- true);
+ res = o0.alshift (cnt, prec);
else if (code == ROTATE)
- res = double_int_lrotate (o0, cnt, GET_MODE_PRECISION (mode));
+ res = o0.lrotate (cnt, prec);
else /* code == ROTATERT */
- res = double_int_rrotate (o0, cnt, GET_MODE_PRECISION (mode));
+ res = o0.rrotate (cnt, prec);
}
break;
diff --git a/gcc/stmt.c b/gcc/stmt.c
index 8d76b3eea08..b64b0807433 100644
--- a/gcc/stmt.c
+++ b/gcc/stmt.c
@@ -52,7 +52,7 @@ along with GCC; see the file COPYING3. If not see
#include "regs.h"
#include "alloc-pool.h"
#include "pretty-print.h"
-#include "bitmap.h"
+#include "pointer-set.h"
#include "params.h"
#include "dumpfile.h"
@@ -113,9 +113,6 @@ static int node_has_low_bound (case_node_ptr, tree);
static int node_has_high_bound (case_node_ptr, tree);
static int node_is_bounded (case_node_ptr, tree);
static void emit_case_nodes (rtx, case_node_ptr, rtx, tree);
-static struct case_node *add_case_node (struct case_node *, tree,
- tree, tree, tree, alloc_pool);
-
/* Return the rtx-label that corresponds to a LABEL_DECL,
creating it if necessary. */
@@ -1650,31 +1647,34 @@ expand_stack_restore (tree var)
emit_stack_restore (SAVE_BLOCK, sa);
fixup_args_size_notes (prev, get_last_insn (), 0);
}
+
+/* Generate code to jump to LABEL if OP0 and OP1 are equal in mode MODE. */
+static void
+do_jump_if_equal (enum machine_mode mode, rtx op0, rtx op1, rtx label,
+ int unsignedp)
+{
+ do_compare_rtx_and_jump (op0, op1, EQ, unsignedp, mode,
+ NULL_RTX, NULL_RTX, label, -1);
+}
/* Do the insertion of a case label into case_list. The labels are
fed to us in descending order from the sorted vector of case labels used
in the tree part of the middle end. So the list we construct is
- sorted in ascending order. The bounds on the case range, LOW and HIGH,
- are converted to case's index type TYPE. Note that the original type
- of the case index in the source code is usually "lost" during
- gimplification due to type promotion, but the case labels retain the
- original type. */
+ sorted in ascending order. */
static struct case_node *
-add_case_node (struct case_node *head, tree type, tree low, tree high,
+add_case_node (struct case_node *head, tree low, tree high,
tree label, alloc_pool case_node_pool)
{
struct case_node *r;
gcc_checking_assert (low);
- gcc_checking_assert (! high || (TREE_TYPE (low) == TREE_TYPE (high)));
+ gcc_checking_assert (high && (TREE_TYPE (low) == TREE_TYPE (high)));
- /* Add this label to the chain. Make sure to drop overflow flags. */
+ /* Add this label to the chain. */
r = (struct case_node *) pool_alloc (case_node_pool);
- r->low = build_int_cst_wide (type, TREE_INT_CST_LOW (low),
- TREE_INT_CST_HIGH (low));
- r->high = build_int_cst_wide (type, TREE_INT_CST_LOW (high),
- TREE_INT_CST_HIGH (high));
+ r->low = low;
+ r->high = high;
r->code_label = label;
r->parent = r->left = NULL;
r->right = head;
@@ -1952,17 +1952,10 @@ expand_case (gimple stmt)
rtx default_label = NULL_RTX;
unsigned int count, uniq;
int i;
- rtx before_case, end;
int ncases = gimple_switch_num_labels (stmt);
tree index_expr = gimple_switch_index (stmt);
tree index_type = TREE_TYPE (index_expr);
-
tree elt;
- bitmap label_bitmap;
-
- /* The insn after which the case dispatch should finally
- be emitted. Zero for a dummy. */
- rtx start;
/* A list of case labels; it is first built as a list and it may then
be rearranged into a nearly balanced binary tree. */
@@ -2005,17 +1998,15 @@ expand_case (gimple stmt)
how to expand this switch(). */
uniq = 0;
count = 0;
- label_bitmap = BITMAP_ALLOC (NULL);
+ struct pointer_set_t *seen_labels = pointer_set_create ();
for (i = gimple_switch_num_labels (stmt) - 1; i >= 1; --i)
{
- tree low, high;
- rtx lab;
-
elt = gimple_switch_label (stmt, i);
- low = CASE_LOW (elt);
+ tree low = CASE_LOW (elt);
gcc_assert (low);
- high = CASE_HIGH (elt);
+ tree high = CASE_HIGH (elt);
gcc_assert (! high || tree_int_cst_lt (low, high));
+ tree lab = CASE_LABEL (elt);
/* Count the elements.
A range counts double, since it requires two compares. */
@@ -2025,20 +2016,35 @@ expand_case (gimple stmt)
/* If we have not seen this label yet, then increase the
number of unique case node targets seen. */
- lab = label_rtx (CASE_LABEL (elt));
- if (bitmap_set_bit (label_bitmap, CODE_LABEL_NUMBER (lab)))
+ if (!pointer_set_insert (seen_labels, lab))
uniq++;
+ /* The bounds on the case range, LOW and HIGH, have to be converted
+ to case's index type TYPE. Note that the original type of the
+ case index in the source code is usually "lost" during
+ gimplification due to type promotion, but the case labels retain the
+ original type. Make sure to drop overflow flags. */
+ low = fold_convert (index_type, low);
+ if (TREE_OVERFLOW (low))
+ low = build_int_cst_wide (index_type,
+ TREE_INT_CST_LOW (low),
+ TREE_INT_CST_HIGH (low));
+
/* The canonical from of a case label in GIMPLE is that a simple case
has an empty CASE_HIGH. For the casesi and tablejump expanders,
the back ends want simple cases to have high == low. */
if (! high)
high = low;
-
- case_list = add_case_node (case_list, index_type, low, high,
- CASE_LABEL (elt), case_node_pool);
+ high = fold_convert (index_type, high);
+ if (TREE_OVERFLOW (high))
+ high = build_int_cst_wide (index_type,
+ TREE_INT_CST_LOW (high),
+ TREE_INT_CST_HIGH (high));
+
+ case_list = add_case_node (case_list, low, high, lab,
+ case_node_pool);
}
- BITMAP_FREE (label_bitmap);
+ pointer_set_destroy (seen_labels);
/* cleanup_tree_cfg removes all SWITCH_EXPR with a single
destination, such as one with a default case only.
@@ -2046,7 +2052,7 @@ expand_case (gimple stmt)
type, so we should never get a zero here. */
gcc_assert (count > 0);
- before_case = start = get_last_insn ();
+ rtx before_case = get_last_insn ();
/* Decide how to expand this switch.
The two options at this point are a dispatch table (casesi or
@@ -2060,23 +2066,108 @@ expand_case (gimple stmt)
case_list, default_label,
minval, maxval, range);
- before_case = NEXT_INSN (before_case);
- end = get_last_insn ();
- reorder_insns (before_case, end, start);
+ reorder_insns (NEXT_INSN (before_case), get_last_insn (), before_case);
free_temp_slots ();
free_alloc_pool (case_node_pool);
}
-/* Generate code to jump to LABEL if OP0 and OP1 are equal in mode MODE. */
+/* Expand the dispatch to a short decrement chain if there are few cases
+ to dispatch to. Likewise if neither casesi nor tablejump is available,
+ or if flag_jump_tables is set. Otherwise, expand as a casesi or a
+ tablejump. The index mode is always the mode of integer_type_node.
+ Trap if no case matches the index.
-static void
-do_jump_if_equal (enum machine_mode mode, rtx op0, rtx op1, rtx label,
- int unsignedp)
+ DISPATCH_INDEX is the index expression to switch on. It should be a
+ memory or register operand.
+
+ DISPATCH_TABLE is a set of case labels. The set should be sorted in
+ ascending order, be contiguous, starting with value 0, and contain only
+ single-valued case labels. */
+
+void
+expand_sjlj_dispatch_table (rtx dispatch_index,
+ VEC(tree,heap) *dispatch_table)
{
- do_compare_rtx_and_jump (op0, op1, EQ, unsignedp, mode,
- NULL_RTX, NULL_RTX, label, -1);
+ tree index_type = integer_type_node;
+ enum machine_mode index_mode = TYPE_MODE (index_type);
+
+ int ncases = VEC_length (tree, dispatch_table);
+
+ do_pending_stack_adjust ();
+ rtx before_case = get_last_insn ();
+
+ /* Expand as a decrement-chain if there are 5 or fewer dispatch
+ labels. This covers more than 98% of the cases in libjava,
+ and seems to be a reasonable compromise between the "old way"
+ of expanding as a decision tree or dispatch table vs. the "new
+ way" with decrement chain or dispatch table. */
+ if (VEC_length (tree, dispatch_table) <= 5
+ || (!HAVE_casesi && !HAVE_tablejump)
+ || !flag_jump_tables)
+ {
+ /* Expand the dispatch as a decrement chain:
+
+ "switch(index) {case 0: do_0; case 1: do_1; ...; case N: do_N;}"
+
+ ==>
+
+ if (index == 0) do_0; else index--;
+ if (index == 0) do_1; else index--;
+ ...
+ if (index == 0) do_N; else index--;
+
+ This is more efficient than a dispatch table on most machines.
+ The last "index--" is redundant but the code is trivially dead
+ and will be cleaned up by later passes. */
+ rtx index = copy_to_mode_reg (index_mode, dispatch_index);
+ rtx zero = CONST0_RTX (index_mode);
+ for (int i = 0; i < ncases; i++)
+ {
+ tree elt = VEC_index (tree, dispatch_table, i);
+ rtx lab = label_rtx (CASE_LABEL (elt));
+ do_jump_if_equal (index_mode, index, zero, lab, 0);
+ force_expand_binop (index_mode, sub_optab,
+ index, CONST1_RTX (index_mode),
+ index, 0, OPTAB_DIRECT);
+ }
+ }
+ else
+ {
+ /* Similar to expand_case, but much simpler. */
+ struct case_node *case_list = 0;
+ alloc_pool case_node_pool = create_alloc_pool ("struct sjlj_case pool",
+ sizeof (struct case_node),
+ ncases);
+ tree index_expr = make_tree (index_type, dispatch_index);
+ tree minval = build_int_cst (index_type, 0);
+ tree maxval = CASE_LOW (VEC_last (tree, dispatch_table));
+ tree range = maxval;
+ rtx default_label = gen_label_rtx ();
+
+ for (int i = ncases - 1; i > 0; --i)
+ {
+ tree elt = VEC_index (tree, dispatch_table, i);
+ tree low = CASE_LOW (elt);
+ tree lab = CASE_LABEL (elt);
+ case_list = add_case_node (case_list, low, low, lab, case_node_pool);
+ }
+
+ emit_case_dispatch_table (index_expr, index_type,
+ case_list, default_label,
+ minval, maxval, range);
+ emit_label (default_label);
+ free_alloc_pool (case_node_pool);
+ }
+
+ /* Dispatching something not handled? Trap! */
+ expand_builtin_trap ();
+
+ reorder_insns (NEXT_INSN (before_case), get_last_insn (), before_case);
+
+ free_temp_slots ();
}
+
/* Take an ordered list of case nodes
and transform them into a near optimal binary tree,
diff --git a/gcc/stor-layout.c b/gcc/stor-layout.c
index 0f555946083..674f88801bc 100644
--- a/gcc/stor-layout.c
+++ b/gcc/stor-layout.c
@@ -2218,14 +2218,13 @@ layout_type (tree type)
&& TYPE_UNSIGNED (TREE_TYPE (lb))
&& tree_int_cst_lt (ub, lb))
{
+ unsigned prec = TYPE_PRECISION (TREE_TYPE (lb));
lb = double_int_to_tree
(ssizetype,
- double_int_sext (tree_to_double_int (lb),
- TYPE_PRECISION (TREE_TYPE (lb))));
+ tree_to_double_int (lb).sext (prec));
ub = double_int_to_tree
(ssizetype,
- double_int_sext (tree_to_double_int (ub),
- TYPE_PRECISION (TREE_TYPE (ub))));
+ tree_to_double_int (ub).sext (prec));
}
length
= fold_convert (sizetype,
diff --git a/gcc/symtab.c b/gcc/symtab.c
index 665ceae41e0..1dceb799dad 100644
--- a/gcc/symtab.c
+++ b/gcc/symtab.c
@@ -734,6 +734,8 @@ symtab_make_decl_local (tree decl)
DECL_WEAK (decl) = 0;
DECL_EXTERNAL (decl) = 0;
TREE_PUBLIC (decl) = 0;
+ DECL_VISIBILITY_SPECIFIED (decl) = 0;
+ DECL_VISIBILITY (decl) = VISIBILITY_DEFAULT;
if (!DECL_RTL_SET_P (decl))
return;
diff --git a/gcc/system.h b/gcc/system.h
index 48c9c894e34..ed36bae3e3b 100644
--- a/gcc/system.h
+++ b/gcc/system.h
@@ -638,6 +638,11 @@ extern int vsnprintf(char *, size_t, const char *, va_list);
/* Get libiberty declarations. */
#include "libiberty.h"
+#undef FFS /* Some systems predefine this symbol; don't let it interfere. */
+#undef FLOAT /* Likewise. */
+#undef ABS /* Likewise. */
+#undef PC /* Likewise. */
+
/* Provide a default for the HOST_BIT_BUCKET.
This suffices for POSIX-like hosts. */
diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog
index c33fddce097..a72e8efcdc6 100644
--- a/gcc/testsuite/ChangeLog
+++ b/gcc/testsuite/ChangeLog
@@ -1,3 +1,139 @@
+2012-09-12 Tobias Burnus <burnus@net-b.de>
+
+ PR fortran/54225
+ PR fortran/53306
+ * gfortran.dg/coarray_10.f90: Update dg-error.
+ * gfortran.dg/coarray_28.f90: New.
+ * gfortran.dg/array_section_3.f90: New.
+
+2012-09-11 Christophe Lyon <christophe.lyon@linaro.org>
+
+ gcc/testsuite/
+ * gcc.target/arm/neon-vset_lanes8.c, gcc.target/arm/pr51835.c,
+ gcc.target/arm/pr48252.c: Fix for big-endian support.
+
+2012-09-11 Marc Glisse <marc.glisse@inria.fr>
+
+ * gcc.dg/tree-ssa/forwprop-22.c: New testcase.
+
+2012-09-11 Ramana Radhakrishnan <ramana.radhakrishnan@arm.com>
+ Matthew Gretton-Dann <matthew.gretton-dann@arm.com>
+
+ * gcc.target/arm/neon-vfma-1.c: New testcase.
+ * gcc.target/arm/neon-vfms-1.c: Likewise.
+ * gcc.target/arm/neon-vmla-1.c: Update test to use int instead
+ of float.
+ * gcc.target/arm/neon-vmls-1.c: Likewise.
+ * lib/target-supports.exp (add_options_for_arm_neonv2): New
+ function.
+ (check_effective_target_arm_neonv2_ok_nocache): Likewise.
+ (check_effective_target_arm_neonv2_ok): Likewise.
+ (check_effective_target_arm_neonv2_hw): Likewise.
+ (check_effective_target_arm_neonv2): Likewise.
+
+2012-09-11 Richard Guenther <rguenther@suse.de>
+
+ PR middle-end/54515
+ * g++.dg/tree-ssa/pr54515.C: New testcase.
+
+2012-09-10 Andrew Pinski <apinski@cavium.com>
+
+ PR tree-opt/c54362
+ * gcc.dg/tm/memopt-16.c: New testcase.
+
+2012-09-10 Paolo Carlini <paolo.carlini@oracle.com>
+
+ PR c++/54541
+ PR c++/54542
+ * g++.dg/cpp0x/sfinae40.C: New.
+ * g++.dg/cpp0x/sfinae41.C: Likewise.
+
+2012-09-10 Jason Merrill <jason@redhat.com>
+
+ PR c++/54538
+ * g++.dg/cpp0x/lambda/lambda-mangle4.C: New.
+
+2012-09-10 Oleg Endo <olegendo@gcc.gnu.org>
+
+ PR target/54089
+ * gcc.target/sh/pr54089-3.c: New.
+
+2012-09-10 Marc Glisse <marc.glisse@inria.fr>
+
+ * gcc.dg/tree-ssa/forwprop-21.c: New testcase.
+
+2012-09-10 Aldy Hernandez <aldyh@redhat.com>
+
+ * gcc.dg/tm/reg-promotion.c: Modify dump message check.
+
+2012-09-10 Aldy Hernandez <aldyh@redhat.com>
+
+ * gcc.dg/pr52558-2.c: Delete.
+ * gcc.dg/simulate-thread/speculative-store-3.c: New.
+
+2012-09-10 Marc Glisse <marc.glisse@inria.fr>
+
+ * gcc.dg/tree-ssa/forwprop-20.c: New testcase.
+
+2012-09-10 Richard Guenther <rguenther@suse.de>
+
+ PR tree-optimization/54520
+ * gcc.dg/torture/pr54520.c: New testcase.
+
+2012-09-10 Jason Merrill <jason@redhat.com>
+
+ PR c++/54506
+ * g++.dg/cpp0x/implicit14.C: New.
+
+2012-09-07 Mikael Morin <mikael@gcc.gnu.org>
+
+ PR fortran/54208
+ * gfortran.dg/bound_simplification_3.f90: New test.
+
+2012-09-07 Aldy Hernandez <aldyh@redhat.com>
+
+ PR testsuite/54184
+ * gcc.dg/pr52558-1.c: Delete.
+ * gcc.dg/simulate-thread/speculative-store-2.c: New.
+
+2012-09-07 Richard Earnshaw <rearnsha@arm.com>
+
+ * gcc.target/arm/pr50318-1.c: Scan for smlal.
+ * gcc.target/arm/smlaltb-1.c: XFAIL test.
+ * gcc.target/arm/smlaltt-1.c: Likewise.
+
+2012-09-07 Eric Botcazou <ebotcazou@adacore.com>
+
+ * gcc.dg/pr44194-1.c: Skip on Alpha and adjust regexp for SPARC64.
+
+2012-09-07 Tom de Vries <tom@codesourcery.com>
+
+ PR tree-optimization/53986
+ * gcc.dg/tree-ssa/vrp80.c: New test.
+ * gcc.dg/tree-ssa/vrp80-2.c: Same.
+
+2012-09-06 Jason Merrill <jason@redhat.com>
+
+ PR c++/54341
+ PR c++/54253
+ * g++.dg/cpp0x/constexpr-virtual2.C: New.
+ * g++.dg/cpp0x/constexpr-virtual3.C: New.
+
+2012-09-06 Andrew Pinski <apinski@cavium.com>
+
+ PR tree-opt/54494
+ * gcc.dg/tree-ssa/strlen-1.c: New testcase.
+
+2012-09-06 Jakub Jelinek <jakub@redhat.com>
+
+ PR rtl-optimization/54455
+ * gcc.dg/54455.c: New test.
+
+2012-09-06 Tobias Burnus <burnus@net-b.de>
+
+ PR fortran/54463
+ * gfortran.dg/promotion_2.f90: New.
+
2012-09-05 Jakub Jelinek <jakub@redhat.com>
PR middle-end/54486
diff --git a/gcc/testsuite/g++.dg/cpp0x/constexpr-virtual2.C b/gcc/testsuite/g++.dg/cpp0x/constexpr-virtual2.C
new file mode 100644
index 00000000000..86040a31b1a
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp0x/constexpr-virtual2.C
@@ -0,0 +1,24 @@
+// PR c++/54341
+// { dg-do compile { target c++11 } }
+
+template<typename T>
+struct enable_shared_from_this
+{
+ constexpr enable_shared_from_this(); // { dg-warning "used but never defined" }
+
+private:
+ int mem;
+};
+
+class VTableClass {
+public:
+ virtual void someVirtualMethod() { }
+};
+
+class SomeClass : public enable_shared_from_this< SomeClass >, public
+VTableClass { };
+
+SomeClass* createInstance()
+{
+ return new SomeClass;
+}
diff --git a/gcc/testsuite/g++.dg/cpp0x/constexpr-virtual3.C b/gcc/testsuite/g++.dg/cpp0x/constexpr-virtual3.C
new file mode 100644
index 00000000000..de446bcfd3e
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp0x/constexpr-virtual3.C
@@ -0,0 +1,42 @@
+// PR c++/54253
+// { dg-do compile { target c++11 } }
+
+namespace A {
+ class Base {
+ int x;
+ public:
+ constexpr Base(int x) : x(x) {}
+ };
+
+ class Base2 {
+ public:
+ virtual void fun() {}
+ };
+
+ class Derived : public Base2, public Base {
+ public:
+ constexpr Derived() : Base2(), Base(5) {}
+ };
+
+ constexpr Derived der;
+}
+
+namespace B {
+ class Base {
+ int x;
+ public:
+ constexpr Base() : x(5) {}
+ };
+
+ class Base2 {
+ public:
+ virtual void fun() {}
+ };
+
+ class Derived : public Base, public Base2 {
+ public:
+ constexpr Derived() {}
+ };
+
+ constexpr Derived der;
+}
diff --git a/gcc/testsuite/g++.dg/cpp0x/implicit14.C b/gcc/testsuite/g++.dg/cpp0x/implicit14.C
new file mode 100644
index 00000000000..8a56244631b
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp0x/implicit14.C
@@ -0,0 +1,26 @@
+// PR c++/54506
+// { dg-do compile { target c++11 } }
+
+template <class T>
+struct A
+{
+ A() {}
+
+ A(A const volatile &&) = delete;
+ A &operator =(A const volatile &&) = delete;
+
+ template <class U> A(A<U> &&) {}
+ template <class U> A &operator =(A<U> &&) { return *this; }
+};
+
+struct B
+{
+ A<int> a;
+ B() = default;
+};
+
+int main()
+{
+ B b = B();
+ b = B();
+}
diff --git a/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-mangle4.C b/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-mangle4.C
new file mode 100644
index 00000000000..0d37637fe9c
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-mangle4.C
@@ -0,0 +1,13 @@
+// PR c++/54538
+// { dg-do compile { target c++11 } }
+
+template <class T>
+struct A
+{
+ // { dg-final { scan-assembler "_ZNK1AIcE1pMUlvE_cvPFvvEEv" } }
+ // { dg-final { scan-assembler "_ZNK1AIiE1pMUlvE_cvPFvvEEv" } }
+ void (*p)() = []{};
+};
+
+A<int> a1;
+A<char> a2;
diff --git a/gcc/testsuite/g++.dg/cpp0x/sfinae40.C b/gcc/testsuite/g++.dg/cpp0x/sfinae40.C
new file mode 100644
index 00000000000..18e5fecb268
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp0x/sfinae40.C
@@ -0,0 +1,21 @@
+// PR c++/54541
+// { dg-do compile { target c++11 } }
+
+template <typename T> T&& declval();
+
+struct X;
+
+X f(int);
+
+template <class T>
+void g(decltype((void)f(declval<T>())) *)
+{}
+
+template <class T>
+void g(...)
+{}
+
+int main()
+{
+ g<int>(0);
+}
diff --git a/gcc/testsuite/g++.dg/cpp0x/sfinae41.C b/gcc/testsuite/g++.dg/cpp0x/sfinae41.C
new file mode 100644
index 00000000000..bd6f624f0ad
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp0x/sfinae41.C
@@ -0,0 +1,17 @@
+// PR c++/54542
+// { dg-do compile { target c++11 } }
+
+template <class T>
+void f(decltype(new T(1, 2)) *)
+{
+ T(1, 2);
+}
+
+template <class T>
+void f(...)
+{}
+
+int main()
+{
+ f<int>(0);
+}
diff --git a/gcc/testsuite/g++.dg/tree-ssa/pr54515.C b/gcc/testsuite/g++.dg/tree-ssa/pr54515.C
new file mode 100644
index 00000000000..11ed46893bf
--- /dev/null
+++ b/gcc/testsuite/g++.dg/tree-ssa/pr54515.C
@@ -0,0 +1,19 @@
+// { dg-do compile }
+// { dg-options "-O2" }
+
+template < typename T > T h2le (T)
+{
+ T a;
+ unsigned short &b = a;
+ short c = 0;
+ unsigned char (&d)[2] = reinterpret_cast < unsigned char (&)[2] > (c);
+ unsigned char (&e)[2] = reinterpret_cast < unsigned char (&)[2] > (b);
+ e[0] = d[0];
+ return a;
+}
+
+void
+bar ()
+{
+ h2le ((unsigned short) 0);
+}
diff --git a/gcc/testsuite/gcc.dg/54455.c b/gcc/testsuite/gcc.dg/54455.c
new file mode 100644
index 00000000000..de68a53e233
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/54455.c
@@ -0,0 +1,25 @@
+/* PR rtl-optimization/54455 */
+/* { dg-do compile } */
+/* { dg-options "-O1 -fschedule-insns -fselective-scheduling --param max-sched-extend-regions-iters=2" } */
+
+extern void fn1 (void), fn2 (void);
+
+static inline __attribute__((always_inline)) int
+foo (int *x, long y)
+{
+ asm goto ("" : : "r" (x), "r" (y) : "memory" : lab);
+ return 0;
+lab:
+ return 1;
+}
+
+void
+bar (int *x)
+{
+ if (foo (x, 23))
+ fn1 ();
+ else
+ fn2 ();
+
+ foo (x, 2);
+}
diff --git a/gcc/testsuite/gcc.dg/pr44194-1.c b/gcc/testsuite/gcc.dg/pr44194-1.c
index d251bf1b941..d993a42a02b 100644
--- a/gcc/testsuite/gcc.dg/pr44194-1.c
+++ b/gcc/testsuite/gcc.dg/pr44194-1.c
@@ -1,4 +1,4 @@
-/* { dg-do compile { target { { { { i?86-*-* x86_64-*-* } && x32 } || lp64 } && { ! s390*-*-* } } } } */
+/* { dg-do compile { target { { { { { i?86-*-* x86_64-*-* } && x32 } || lp64 } && { ! s390*-*-* } } && { ! alpha*-*-* } } } } */
/* { dg-options "-O2 -fdump-rtl-dse1" } */
/* Restricting to 64-bit targets since 32-bit targets return
structures in memory. */
@@ -10,5 +10,5 @@ void func() {
struct ints s = foo();
bar(s.a, s.b);
}
-/* { dg-final { scan-rtl-dump "global deletions = 2" "dse1" } } */
+/* { dg-final { scan-rtl-dump "global deletions = (2|3)" "dse1" } } */
/* { dg-final { cleanup-rtl-dump "dse1" } } */
diff --git a/gcc/testsuite/gcc.dg/pr52558-1.c b/gcc/testsuite/gcc.dg/pr52558-1.c
deleted file mode 100644
index c34ad0655d3..00000000000
--- a/gcc/testsuite/gcc.dg/pr52558-1.c
+++ /dev/null
@@ -1,22 +0,0 @@
-/* { dg-do compile } */
-/* { dg-options "--param allow-store-data-races=0 -O2 -fdump-tree-lim1" } */
-
-/* Test that `count' is not written to unless p->data > 0. */
-
-int count;
-
-struct obj {
- int data;
- struct obj *next;
-} *q;
-
-void func()
-{
- struct obj *p;
- for (p = q; p; p = p->next)
- if (p->data > 0)
- count++;
-}
-
-/* { dg-final { scan-tree-dump-times "MEM count_lsm.. count_lsm_flag" 1 "lim1" } } */
-/* { dg-final { cleanup-tree-dump "lim1" } } */
diff --git a/gcc/testsuite/gcc.dg/pr52558-2.c b/gcc/testsuite/gcc.dg/pr52558-2.c
deleted file mode 100644
index 6d5f51c4f0c..00000000000
--- a/gcc/testsuite/gcc.dg/pr52558-2.c
+++ /dev/null
@@ -1,23 +0,0 @@
-/* { dg-do compile } */
-/* { dg-options "--param allow-store-data-races=0 -O2 -fdump-tree-lim1" } */
-
-/* Test that g_2 is not written to unless !g_1. */
-
-int g_1 = 1;
-int g_2 = 0;
-
-int func_1(void)
-{
- int l;
- for (l = 0; l < 1234; l++)
- {
- if (g_1)
- return l;
- else
- g_2 = 0;
- }
- return 999;
-}
-
-/* { dg-final { scan-tree-dump-times "MEM.*g_2_lsm_flag" 1 "lim1" } } */
-/* { dg-final { cleanup-tree-dump "lim1" } } */
diff --git a/gcc/testsuite/gcc.dg/simulate-thread/speculative-store-2.c b/gcc/testsuite/gcc.dg/simulate-thread/speculative-store-2.c
new file mode 100644
index 00000000000..d4d28f5ed60
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/simulate-thread/speculative-store-2.c
@@ -0,0 +1,74 @@
+/* { dg-do link } */
+/* { dg-options "--param allow-store-data-races=0 -O2" } */
+/* { dg-final { simulate-thread } } */
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "simulate-thread.h"
+
+/* Test that speculative stores do not happen for --param
+ allow-store-data-races=0. */
+
+int count, insns;
+
+struct obj {
+ int data;
+ struct obj *next;
+} *q;
+
+void simulate_thread_other_threads ()
+{
+ ++insns;
+ ++count;
+}
+
+int simulate_thread_step_verify ()
+{
+ return 0;
+}
+
+int simulate_thread_final_verify ()
+{
+ /* If count != insns, someone must have cached `count' and stored a
+ racy value into it. */
+ if (count != insns)
+ {
+ printf("FAIL: count was incorrectly cached\n");
+ return 1;
+ }
+ return 0;
+}
+
+/* Test that `count' is not written to unless p->data > 0. */
+
+__attribute__((noinline))
+void simulate_thread_main()
+{
+ struct obj *p;
+ for (p = q; p; p = p->next)
+ if (p->data > 0)
+ count++;
+}
+
+struct obj *
+insert(struct obj *head, int data)
+{
+ struct obj *t = (struct obj *) malloc (sizeof (struct obj));
+ t->next = head;
+ t->data = data;
+ return t;
+}
+
+int main()
+{
+ q = insert (0, 0);
+ q = insert (q, 0);
+ q = insert (q, 0);
+ q = insert (q, 0);
+ q = insert (q, 0);
+
+ simulate_thread_main ();
+ simulate_thread_done ();
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.dg/simulate-thread/speculative-store-3.c b/gcc/testsuite/gcc.dg/simulate-thread/speculative-store-3.c
new file mode 100644
index 00000000000..203c026048d
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/simulate-thread/speculative-store-3.c
@@ -0,0 +1,71 @@
+/* { dg-do link } */
+/* { dg-options "--param allow-store-data-races=0 -O2" } */
+/* { dg-final { simulate-thread } } */
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "simulate-thread.h"
+
+/* Test distilled from PR52558. */
+
+int g_1 = 1;
+int g_2 = 0, insns = 0;
+int f;
+
+/* Test that g_2 is not written to unless !g_1. */
+
+__attribute__((noinline))
+int funky()
+{
+ int l;
+ for (l = 0; l != 4; l++)
+ {
+ if (g_1)
+ {
+ /* g_1 is globally true so we should always execute here,
+ thus never writing to g_2 under any circumstance in this
+ code path. */
+ return l;
+ }
+ for (g_2 = 0; g_2 >= 26; ++g_2)
+ ;
+ }
+ return 999;
+}
+
+int simulate_thread_final_verify ()
+{
+ /* If g_2 != insns, someone must have cached `g_2' and stored a
+ racy value into it. */
+ if (g_2 != insns)
+ {
+ printf("FAIL: g_2 was incorrectly cached\n");
+ return 1;
+ }
+ return 0;
+}
+
+void simulate_thread_other_threads ()
+{
+ ++insns;
+ ++g_2;
+}
+
+int simulate_thread_step_verify ()
+{
+ return 0;
+}
+
+__attribute__((noinline))
+void simulate_thread_main()
+{
+ f = funky();
+}
+
+int main()
+{
+ simulate_thread_main ();
+ simulate_thread_done ();
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.dg/simulate-thread/speculative-store-4.c b/gcc/testsuite/gcc.dg/simulate-thread/speculative-store-4.c
new file mode 100644
index 00000000000..59f81b756be
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/simulate-thread/speculative-store-4.c
@@ -0,0 +1,54 @@
+/* { dg-do link } */
+/* { dg-options "--param allow-store-data-races=0" } */
+/* { dg-final { simulate-thread } } */
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "simulate-thread.h"
+
+/* PR 54139 */
+/* Test that speculative stores do not happen for --param
+ allow-store-data-races=0. */
+
+int g_13=1, insns=1;
+
+__attribute__((noinline))
+void simulate_thread_main()
+{
+ int l_245;
+
+ /* Since g_13 is unilaterally set positive above, there should be
+ no store to g_13 below. */
+ for (l_245 = 0; l_245 <= 1; l_245 += 1)
+ for (; g_13 <= 0; g_13 = 1)
+ ;
+}
+
+int main()
+{
+ simulate_thread_main ();
+ simulate_thread_done ();
+ return 0;
+}
+
+void simulate_thread_other_threads ()
+{
+ ++g_13;
+ ++insns;
+}
+
+int simulate_thread_step_verify ()
+{
+ return 0;
+}
+
+int simulate_thread_final_verify ()
+{
+ if (g_13 != insns)
+ {
+ printf("FAIL: g_13 was incorrectly cached\n");
+ return 1;
+ }
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.dg/tm/memopt-16.c b/gcc/testsuite/gcc.dg/tm/memopt-16.c
new file mode 100644
index 00000000000..c230240de57
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tm/memopt-16.c
@@ -0,0 +1,43 @@
+/* { dg-do compile } */
+/* { dg-options "-fgnu-tm -O3 -fdump-tree-tmmark" } */
+/* Like memopt-12.c but the phi is inside a look which causes
+ it to be converted into a COND_EXPR. */
+
+extern int test(void) __attribute__((transaction_safe));
+extern void *malloc (__SIZE_TYPE__) __attribute__((malloc,transaction_safe));
+
+struct large { int foo[500]; };
+
+int f(int j)
+{
+ int *p1, *p2, *p3;
+
+ p1 = malloc (sizeof (*p1)*5000);
+ __transaction_atomic {
+ _Bool t;
+ int i = 1;
+ *p1 = 0;
+
+ p2 = malloc (sizeof (*p2)*6000);
+ *p2 = 1;
+ t = test();
+
+ for (i = 0;i < j;i++)
+ {
+
+ /* p3 = PHI (p1, p2) */
+ if (t)
+ p3 = p1;
+ else
+ p3 = p2;
+
+ /* Since both p1 and p2 are thread-private, we can inherit the
+ logging already done. No ITM_W* instrumentation necessary. */
+ *p3 = 555;
+ }
+ }
+ return p3[something()];
+}
+
+/* { dg-final { scan-tree-dump-times "ITM_WU" 0 "tmmark" } } */
+/* { dg-final { cleanup-tree-dump "tmmark" } } */
diff --git a/gcc/testsuite/gcc.dg/tm/reg-promotion.c b/gcc/testsuite/gcc.dg/tm/reg-promotion.c
index 337c29f6c6e..e48bfb2795d 100644
--- a/gcc/testsuite/gcc.dg/tm/reg-promotion.c
+++ b/gcc/testsuite/gcc.dg/tm/reg-promotion.c
@@ -20,5 +20,5 @@ void func()
}
}
-/* { dg-final { scan-tree-dump-times "MEM count_lsm.. count_lsm_flag" 1 "lim1" } } */
+/* { dg-final { scan-tree-dump-times "Cannot hoist conditional load of count because it is in a transaction" 1 "lim1" } } */
/* { dg-final { cleanup-tree-dump "lim1" } } */
diff --git a/gcc/testsuite/gcc.dg/torture/pr54520.c b/gcc/testsuite/gcc.dg/torture/pr54520.c
new file mode 100644
index 00000000000..5884b2f353a
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/torture/pr54520.c
@@ -0,0 +1,15 @@
+/* { dg-do compile } */
+
+char *a;
+void
+fn1 ()
+{
+ char *p = a;
+ while (p && *p != '\0')
+ {
+ while (*p == '\t')
+ *p++ = '\0';
+ if (*p != '\0')
+ p = 0;
+ }
+}
diff --git a/gcc/testsuite/gcc.dg/tree-ssa/forwprop-20.c b/gcc/testsuite/gcc.dg/tree-ssa/forwprop-20.c
new file mode 100644
index 00000000000..5b0e8ee9806
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tree-ssa/forwprop-20.c
@@ -0,0 +1,70 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target double64 } */
+/* { dg-options "-O2 -fdump-tree-optimized" } */
+
+#include <stdint.h>
+
+/* All of these optimizations happen for unsupported vector modes as a
+ consequence of the lowering pass. We need to test with a vector mode
+ that is supported by default on at least some architectures, or make
+ the test target specific so we can pass a flag like -mavx. */
+
+typedef double vecf __attribute__ ((vector_size (2 * sizeof (double))));
+typedef int64_t veci __attribute__ ((vector_size (2 * sizeof (int64_t))));
+
+void f (double d, vecf* r)
+{
+ vecf x = { -d, 5 };
+ vecf y = { 1, 4 };
+ veci m = { 2, 0 };
+ *r = __builtin_shuffle (x, y, m); // { 1, -d }
+}
+
+void g (float d, vecf* r)
+{
+ vecf x = { d, 5 };
+ vecf y = { 1, 4 };
+ veci m = { 2, 1 };
+ *r = __builtin_shuffle (x, y, m); // { 1, 5 }
+}
+
+void h (double d, vecf* r)
+{
+ vecf x = { d + 1, 5 };
+ vecf y = { 1 , 4 };
+ veci m = { 2 , 0 };
+ *r = __builtin_shuffle (y, x, m); // { d + 1, 1 }
+}
+
+void i (float d, vecf* r)
+{
+ vecf x = { d, 5 };
+ veci m = { 1, 0 };
+ *r = __builtin_shuffle (x, m); // { 5, d }
+}
+
+void j (vecf* r)
+{
+ vecf y = { 1, 2 };
+ veci m = { 0, 0 };
+ *r = __builtin_shuffle (y, m); // { 1, 1 }
+}
+
+void k (vecf* r)
+{
+ vecf x = { 3, 4 };
+ vecf y = { 1, 2 };
+ veci m = { 3, 0 };
+ *r = __builtin_shuffle (x, y, m); // { 2, 3 }
+}
+
+void l (double d, vecf* r)
+{
+ vecf x = { -d, 5 };
+ vecf y = { d, 4 };
+ veci m = { 2, 0 };
+ *r = __builtin_shuffle (x, y, m); // { d, -d }
+}
+
+/* { dg-final { scan-tree-dump-not "VEC_PERM_EXPR" "optimized" } } */
+/* { dg-final { cleanup-tree-dump "optimized" } } */
diff --git a/gcc/testsuite/gcc.dg/tree-ssa/forwprop-21.c b/gcc/testsuite/gcc.dg/tree-ssa/forwprop-21.c
new file mode 100644
index 00000000000..4859fa8c305
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tree-ssa/forwprop-21.c
@@ -0,0 +1,13 @@
+/* { dg-do compile } */
+/* { dg-options "-O -fdump-tree-optimized" } */
+typedef int v4si __attribute__ ((vector_size (4 * sizeof(int))));
+
+int
+test (v4si *x, v4si *y)
+{
+ v4si m = { 2, 3, 6, 5 };
+ v4si z = __builtin_shuffle (*x, *y, m);
+ return z[2];
+}
+/* { dg-final { scan-tree-dump-not "VEC_PERM_EXPR" "optimized" } } */
+/* { dg-final { cleanup-tree-dump "optimized" } } */
diff --git a/gcc/testsuite/gcc.dg/tree-ssa/forwprop-22.c b/gcc/testsuite/gcc.dg/tree-ssa/forwprop-22.c
new file mode 100644
index 00000000000..9c66c997205
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tree-ssa/forwprop-22.c
@@ -0,0 +1,18 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target vect_double } */
+/* { dg-require-effective-target vect_perm } */
+/* { dg-options "-O -fdump-tree-optimized" } */
+
+typedef double vec __attribute__((vector_size (2 * sizeof (double))));
+void f (vec *px, vec *y, vec *z)
+{
+ vec x = *px;
+ vec t1 = { x[1], x[0] };
+ vec t2 = { x[0], x[1] };
+ *y = t1;
+ *z = t2;
+}
+
+/* { dg-final { scan-tree-dump-times "VEC_PERM_EXPR" 1 "optimized" } } */
+/* { dg-final { scan-tree-dump-not "BIT_FIELD_REF" "optimized" } } */
+/* { dg-final { cleanup-tree-dump "optimized" } } */
diff --git a/gcc/testsuite/gcc.dg/tree-ssa/strlen-1.c b/gcc/testsuite/gcc.dg/tree-ssa/strlen-1.c
new file mode 100644
index 00000000000..f6ff3e1570f
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tree-ssa/strlen-1.c
@@ -0,0 +1,17 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -fdump-tree-optimized" } */
+extern const unsigned long base;
+static inline void wreg(unsigned char val, unsigned long addr) __attribute__((always_inline));
+static inline void wreg(unsigned char val, unsigned long addr)
+{
+ *((volatile unsigned char *) (__SIZE_TYPE__) (base + addr)) = val;
+}
+void wreg_twice(void)
+{
+ wreg(0, 42);
+ wreg(0, 42);
+}
+
+/* We should not remove the second null character store to (base+42) address. */
+/* { dg-final { scan-tree-dump-times " ={v} 0;" 2 "optimized" } } */
+/* { dg-final { cleanup-tree-dump "optimized" } } */
diff --git a/gcc/testsuite/gcc.dg/tree-ssa/vrp80-2.c b/gcc/testsuite/gcc.dg/tree-ssa/vrp80-2.c
new file mode 100644
index 00000000000..bb38f27c0d7
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tree-ssa/vrp80-2.c
@@ -0,0 +1,38 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -fno-tree-switch-conversion -fdump-tree-vrp1" } */
+
+extern void vrp_keep (void);
+
+/* Test <<. */
+
+void
+f3 (int s, int b)
+{
+ if (s >> 3 == -2)
+ /* s in range [-16, -9]. */
+ {
+ s += 17;
+ /* s in range [1, 8]. */
+ b = (b & 1) + 1;
+ /* b in range [1, 2]. */
+ b = b << s;
+ /* b in range [bmin << smin, bmax << smax],
+ == [1 << 1, 2 << 8]
+ == [2, 512]. */
+ if (b == 2)
+ vrp_keep ();
+ if (b == 512)
+ vrp_keep ();
+ }
+}
+
+int
+main ()
+{
+ return 0;
+}
+
+/* { dg-final { scan-tree-dump-times "vrp_keep \\(" 2 "vrp1"} } */
+/* { dg-final { cleanup-tree-dump "vrp1" } } */
+
+
diff --git a/gcc/testsuite/gcc.dg/tree-ssa/vrp80.c b/gcc/testsuite/gcc.dg/tree-ssa/vrp80.c
new file mode 100644
index 00000000000..2d0004010cb
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tree-ssa/vrp80.c
@@ -0,0 +1,33 @@
+/* { dg-do link } */
+/* { dg-options "-O2 -fno-tree-switch-conversion" } */
+
+extern void link_error (void);
+
+/* Test <<. */
+
+void
+f3 (int s, int b)
+{
+ if (s >> 3 == -2)
+ /* s in range [-16, -9]. */
+ {
+ s += 17;
+ /* s in range [1, 8]. */
+ b = (b & 1) + 1;
+ /* b in range [1, 2]. */
+ b = b << s;
+ /* b in range [bmin << smin, bmax << smax],
+ == [1 << 1, 2 << 8]
+ == [2, 512]. */
+ if (b == 1 || b == 513)
+ link_error ();
+ }
+}
+
+int
+main ()
+{
+ return 0;
+}
+
+
diff --git a/gcc/testsuite/gcc.target/arm/neon-vfma-1.c b/gcc/testsuite/gcc.target/arm/neon-vfma-1.c
new file mode 100644
index 00000000000..a003a8274f6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/neon-vfma-1.c
@@ -0,0 +1,12 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_neonv2_ok } */
+/* { dg-options "-O2 -ftree-vectorize -ffast-math" } */
+/* { dg-add-options arm_neonv2 } */
+/* { dg-final { scan-assembler "vfma\\.f32\[ \]+\[dDqQ]" } } */
+
+/* Verify that VFMA is used. */
+void f1(int n, float a, float x[], float y[]) {
+ int i;
+ for (i = 0; i < n; ++i)
+ y[i] = a * x[i] + y[i];
+}
diff --git a/gcc/testsuite/gcc.target/arm/neon-vfms-1.c b/gcc/testsuite/gcc.target/arm/neon-vfms-1.c
new file mode 100644
index 00000000000..8cefd8a851c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/neon-vfms-1.c
@@ -0,0 +1,12 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_neonv2_ok } */
+/* { dg-options "-O2 -ftree-vectorize -ffast-math" } */
+/* { dg-add-options arm_neonv2 } */
+/* { dg-final { scan-assembler "vfms\\.f32\[ \]+\[dDqQ]" } } */
+
+/* Verify that VFMS is used. */
+void f1(int n, float a, float x[], float y[]) {
+ int i;
+ for (i = 0; i < n; ++i)
+ y[i] = a * -x[i] + y[i];
+}
diff --git a/gcc/testsuite/gcc.target/arm/neon-vmla-1.c b/gcc/testsuite/gcc.target/arm/neon-vmla-1.c
index 9d239ed47d0..c60c014e0c2 100644
--- a/gcc/testsuite/gcc.target/arm/neon-vmla-1.c
+++ b/gcc/testsuite/gcc.target/arm/neon-vmla-1.c
@@ -1,10 +1,10 @@
/* { dg-require-effective-target arm_neon_hw } */
/* { dg-options "-O2 -ftree-vectorize -ffast-math" } */
/* { dg-add-options arm_neon } */
-/* { dg-final { scan-assembler "vmla\\.f32" } } */
+/* { dg-final { scan-assembler "vmla\\.i32" } } */
/* Verify that VMLA is used. */
-void f1(int n, float a, float x[], float y[]) {
+void f1(int n, int a, int x[], int y[]) {
int i;
for (i = 0; i < n; ++i)
y[i] = a * x[i] + y[i];
diff --git a/gcc/testsuite/gcc.target/arm/neon-vmls-1.c b/gcc/testsuite/gcc.target/arm/neon-vmls-1.c
index 2beaebe17cf..89ee82b0fe8 100644
--- a/gcc/testsuite/gcc.target/arm/neon-vmls-1.c
+++ b/gcc/testsuite/gcc.target/arm/neon-vmls-1.c
@@ -1,10 +1,10 @@
/* { dg-require-effective-target arm_neon_hw } */
/* { dg-options "-O2 -ftree-vectorize -ffast-math" } */
/* { dg-add-options arm_neon } */
-/* { dg-final { scan-assembler "vmls\\.f32" } } */
+/* { dg-final { scan-assembler "vmls\\.i32" } } */
/* Verify that VMLS is used. */
-void f1(int n, float a, float x[], float y[]) {
+void f1(int n, int a, int x[], int y[]) {
int i;
for (i = 0; i < n; ++i)
y[i] = y[i] - a * x[i];
diff --git a/gcc/testsuite/gcc.target/arm/neon-vset_lanes8.c b/gcc/testsuite/gcc.target/arm/neon-vset_lanes8.c
index e87102edbe2..51d38fd1dfc 100644
--- a/gcc/testsuite/gcc.target/arm/neon-vset_lanes8.c
+++ b/gcc/testsuite/gcc.target/arm/neon-vset_lanes8.c
@@ -9,11 +9,14 @@
#include <stdlib.h>
#include <string.h>
-int8x8_t x = { 1, 2, 3, 4, 5, 6, 7, 8 };
-int8x8_t y = { 1, 2, 3, 16, 5, 6, 7, 8 };
+int8_t x_init[8] = { 1, 2, 3, 4, 5, 6, 7, 8 };
+int8_t y_init[8] = { 1, 2, 3, 16, 5, 6, 7, 8 };
int main (void)
{
+ int8x8_t x = vld1_s8 (x_init);
+ int8x8_t y = vld1_s8 (y_init);
+
x = vset_lane_s8 (16, x, 3);
if (memcmp (&x, &y, sizeof (x)) != 0)
abort();
diff --git a/gcc/testsuite/gcc.target/arm/pr48252.c b/gcc/testsuite/gcc.target/arm/pr48252.c
index 1a06c71e1be..17f729bb341 100644
--- a/gcc/testsuite/gcc.target/arm/pr48252.c
+++ b/gcc/testsuite/gcc.target/arm/pr48252.c
@@ -8,11 +8,14 @@
int main(void)
{
- uint8x8_t v1 = {1, 1, 1, 1, 1, 1, 1, 1};
- uint8x8_t v2 = {2, 2, 2, 2, 2, 2, 2, 2};
+ uint8_t v1_init[8] = {1, 1, 1, 1, 1, 1, 1, 1};
+ uint8_t v2_init[8] = {2, 2, 2, 2, 2, 2, 2, 2};
+ uint8x8_t v1 = vld1_u8 (v1_init);
+ uint8x8_t v2 = vld1_u8 (v2_init);
uint8x8x2_t vd1, vd2;
union {uint8x8_t v; uint8_t buf[8];} d1, d2, d3, d4;
int i;
+ uint8_t odd, even;
vd1 = vzip_u8(v1, vdup_n_u8(0));
vd2 = vzip_u8(v2, vdup_n_u8(0));
@@ -22,9 +25,17 @@ int main(void)
vst1_u8(d3.buf, vd2.val[0]);
vst1_u8(d4.buf, vd2.val[1]);
+#ifdef __ARMEL__
+ odd = 1;
+ even = 0;
+#else
+ odd = 0;
+ even = 1;
+#endif
+
for (i = 0; i < 8; i++)
- if ((i % 2 == 0 && d4.buf[i] != 2)
- || (i % 2 == 1 && d4.buf[i] != 0))
+ if ((i % 2 == even && d4.buf[i] != 2)
+ || (i % 2 == odd && d4.buf[i] != 0))
abort ();
return 0;
diff --git a/gcc/testsuite/gcc.target/arm/pr50318-1.c b/gcc/testsuite/gcc.target/arm/pr50318-1.c
index 05885e1b5ac..be270eefaef 100644
--- a/gcc/testsuite/gcc.target/arm/pr50318-1.c
+++ b/gcc/testsuite/gcc.target/arm/pr50318-1.c
@@ -8,4 +8,4 @@ long long test (unsigned int sec, unsigned long long nsecs)
long)nsecs;
}
-/* { dg-final { scan-assembler "umlal" } } */
+/* { dg-final { scan-assembler "smlal" } } */
diff --git a/gcc/testsuite/gcc.target/arm/pr51835.c b/gcc/testsuite/gcc.target/arm/pr51835.c
index 858b72f8ad8..500eb6e9f7b 100644
--- a/gcc/testsuite/gcc.target/arm/pr51835.c
+++ b/gcc/testsuite/gcc.target/arm/pr51835.c
@@ -11,4 +11,5 @@ unsigned int func2 (double d)
return (unsigned int)d;
}
-/* { dg-final { scan-assembler-times "fmrrd\[\\t \]+r0,\[\\t \]*r1,\[\\t \]*d0" 2 } } */
+/* { dg-final { scan-assembler-times "fmrrd\[\\t \]+r0,\[\\t \]*r1,\[\\t \]*d0" 2 { target { arm_little_endian } } } } */
+/* { dg-final { scan-assembler-times "fmrrd\[\\t \]+r1,\[\\t \]*r0,\[\\t \]*d0" 2 { target { ! arm_little_endian } } } } */
diff --git a/gcc/testsuite/gcc.target/arm/smlaltb-1.c b/gcc/testsuite/gcc.target/arm/smlaltb-1.c
index 1472c9b3fa1..a27009d2513 100644
--- a/gcc/testsuite/gcc.target/arm/smlaltb-1.c
+++ b/gcc/testsuite/gcc.target/arm/smlaltb-1.c
@@ -11,4 +11,4 @@ foo (long long x, int in)
return x + b * a;
}
-/* { dg-final { scan-assembler "smlaltb\\t" } } */
+/* { dg-final { scan-assembler "smlaltb\\t" { xfail *-*-* } } } */
diff --git a/gcc/testsuite/gcc.target/arm/smlaltt-1.c b/gcc/testsuite/gcc.target/arm/smlaltt-1.c
index 6bcbce0b958..380e3d01be6 100644
--- a/gcc/testsuite/gcc.target/arm/smlaltt-1.c
+++ b/gcc/testsuite/gcc.target/arm/smlaltt-1.c
@@ -11,4 +11,4 @@ foo (long long x, int in1, int in2)
return x + b * a;
}
-/* { dg-final { scan-assembler "smlaltt\\t" } } */
+/* { dg-final { scan-assembler "smlaltt\\t" { xfail *-*-* } } } */
diff --git a/gcc/testsuite/gcc.target/sh/pr54089-3.c b/gcc/testsuite/gcc.target/sh/pr54089-3.c
new file mode 100644
index 00000000000..ffb976ba11b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/sh/pr54089-3.c
@@ -0,0 +1,40 @@
+/* The dynamic shift library functions truncate the shift count to 5 bits.
+ Verify that this is taken into account and no extra shift count
+ truncations are generated before the library call. */
+/* { dg-do compile { target "sh*-*-*" } } */
+/* { dg-options "-O1" } */
+/* { dg-skip-if "" { "sh*-*-*" } { "*" } { "-m1*" "-m2" "-m2e*" } } */
+/* { dg-final { scan-assembler-not "and" } } */
+/* { dg-final { scan-assembler-not "31" } } */
+
+int
+test00 (unsigned int a, int* b, int c, int* d, unsigned int e)
+{
+ int s = 0;
+ int i;
+ for (i = 0; i < c; ++i)
+ s += d[i] + b[i] + (e << (i & 31));
+ return s;
+}
+
+int
+test01 (unsigned int a, int* b, int c, int* d, unsigned int e)
+{
+ int s = 0;
+ int i;
+ for (i = 0; i < c; ++i)
+ s += d[i] + b[i] + (e >> (i & 31));
+ return s;
+}
+
+int
+test03 (unsigned int a, unsigned int b)
+{
+ return b << (a & 31);
+}
+
+unsigned int
+test04 (unsigned int a, int b)
+{
+ return a >> (b & 31);
+}
diff --git a/gcc/testsuite/gfortran.dg/array_section_3.f90 b/gcc/testsuite/gfortran.dg/array_section_3.f90
new file mode 100644
index 00000000000..d3093d14d50
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/array_section_3.f90
@@ -0,0 +1,17 @@
+! { dg-do compile }
+!
+! PR fortran/54225
+!
+! Contributed by robb wu
+!
+program test
+ implicit none
+ real :: A(2,3)
+
+ print *, A(1, *) ! { dg-error "Expected array subscript" }
+end program
+
+subroutine test2
+integer, dimension(2) :: a
+a(*) = 1 ! { dg-error "Expected array subscript" }
+end
diff --git a/gcc/testsuite/gfortran.dg/bound_simplification_3.f90 b/gcc/testsuite/gfortran.dg/bound_simplification_3.f90
new file mode 100644
index 00000000000..de3a3dc8a94
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/bound_simplification_3.f90
@@ -0,0 +1,23 @@
+! { dg-do compile }
+! { dg-options "-fdump-tree-original" }
+!
+! PR fortran/54208
+! The I and J definitions used to raise an error because ARR's array spec
+! was resolved to late for the LBOUND and UBOUND calls to be simplified to
+! a constant.
+!
+! Contributed by Carlos A. Cruz <carlos.a.cruz@nasa.gov>
+
+program testit
+ integer, parameter :: n=2
+ integer, dimension(1-min(n,2)/2:n) :: arr
+ integer, parameter :: i=lbound(arr,1)
+ integer, parameter :: j=ubound(arr,1)
+ ! write(6,*) i, j
+ if (i /= 0) call abort
+ if (j /= 2) call abort
+end program testit
+
+! { dg-final { scan-tree-dump-times "bound" 0 "original" } }
+! { dg-final { scan-tree-dump-times "abort" 0 "original" } }
+! { dg-final { cleanup-tree-dump "original" } }
diff --git a/gcc/testsuite/gfortran.dg/coarray_10.f90 b/gcc/testsuite/gfortran.dg/coarray_10.f90
index 99f5782e35b..78abb5ad191 100644
--- a/gcc/testsuite/gfortran.dg/coarray_10.f90
+++ b/gcc/testsuite/gfortran.dg/coarray_10.f90
@@ -30,12 +30,12 @@ end subroutine this_image_check
subroutine rank_mismatch()
implicit none
integer,allocatable :: A(:)[:,:,:,:]
- allocate(A(1)[1,1,1:*]) ! { dg-error "Unexpected ... for codimension" }
+ allocate(A(1)[1,1,1:*]) ! { dg-error "Too few codimensions" }
allocate(A(1)[1,1,1,1,1,*]) ! { dg-error "Invalid codimension 5" }
allocate(A(1)[1,1,1,*])
allocate(A(1)[1,1]) ! { dg-error "Too few codimensions" }
allocate(A(1)[1,*]) ! { dg-error "Too few codimensions" }
- allocate(A(1)[1,1:*]) ! { dg-error "Unexpected ... for codimension" }
+ allocate(A(1)[1,1:*]) ! { dg-error "Too few codimensions" }
A(1)[1,1,1] = 1 ! { dg-error "Too few codimensions" }
A(1)[1,1,1,1,1,1] = 1 ! { dg-error "Invalid codimension 5" }
@@ -48,5 +48,5 @@ end subroutine rank_mismatch
subroutine rank_mismatch2()
implicit none
integer, allocatable:: A(:)[:,:,:]
- allocate(A(1)[7:8,4:*]) ! { dg-error "Unexpected .*. for codimension 2 of 3" }
+ allocate(A(1)[7:8,4:*]) ! { dg-error "Too few codimensions" }
end subroutine rank_mismatch2
diff --git a/gcc/testsuite/gfortran.dg/coarray_28.f90 b/gcc/testsuite/gfortran.dg/coarray_28.f90
new file mode 100644
index 00000000000..ca6f863568a
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/coarray_28.f90
@@ -0,0 +1,10 @@
+! { dg-do compile }
+! { dg-options "-fcoarray=single" }
+!
+! PR fortran/54225
+!
+
+integer, allocatable :: a[:,:]
+
+allocate (a[*,4]) ! { dg-error "Unexpected '.' for codimension 1 of 2" }
+end
diff --git a/gcc/testsuite/gfortran.dg/promotion_2.f90 b/gcc/testsuite/gfortran.dg/promotion_2.f90
new file mode 100644
index 00000000000..3acf249705c
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/promotion_2.f90
@@ -0,0 +1,16 @@
+! { dg-do compile }
+! { dg-options "-fdefault-real-8 -fexternal-blas -fdump-tree-original" }
+!
+! PR fortran/54463
+!
+! Contributed by Simon Reinhardt
+!
+program test
+ implicit none
+ real, dimension(3,3) :: A
+ A = matmul(A,A)
+end program test
+
+! { dg-final { scan-tree-dump-times "sgemm_" 0 "original" } }
+! { dg-final { scan-tree-dump-times "dgemm_" 1 "original" } }
+! { dg-final { cleanup-tree-dump "original" } }
diff --git a/gcc/testsuite/lib/target-supports.exp b/gcc/testsuite/lib/target-supports.exp
index f597316ac4d..8f793b7e509 100644
--- a/gcc/testsuite/lib/target-supports.exp
+++ b/gcc/testsuite/lib/target-supports.exp
@@ -2099,6 +2099,19 @@ proc add_options_for_arm_neon { flags } {
return "$flags $et_arm_neon_flags"
}
+# Add the options needed for NEON. We need either -mfloat-abi=softfp
+# or -mfloat-abi=hard, but if one is already specified by the
+# multilib, use it. Similarly, if a -mfpu option already enables
+# NEON, do not add -mfpu=neon.
+
+proc add_options_for_arm_neonv2 { flags } {
+ if { ! [check_effective_target_arm_neonv2_ok] } {
+ return "$flags"
+ }
+ global et_arm_neonv2_flags
+ return "$flags $et_arm_neonv2_flags"
+}
+
# Return 1 if this is an ARM target supporting -mfpu=neon
# -mfloat-abi=softfp or equivalent options. Some multilibs may be
# incompatible with these options. Also set et_arm_neon_flags to the
@@ -2127,6 +2140,38 @@ proc check_effective_target_arm_neon_ok { } {
check_effective_target_arm_neon_ok_nocache]
}
+# Return 1 if this is an ARM target supporting -mfpu=neon-vfpv4
+# -mfloat-abi=softfp or equivalent options. Some multilibs may be
+# incompatible with these options. Also set et_arm_neonv2_flags to the
+# best options to add.
+
+proc check_effective_target_arm_neonv2_ok_nocache { } {
+ global et_arm_neonv2_flags
+ set et_arm_neonv2_flags ""
+ if { [check_effective_target_arm32] } {
+ foreach flags {"" "-mfloat-abi=softfp" "-mfpu=neon-vfpv4" "-mfpu=neon-vfpv4 -mfloat-abi=softfp"} {
+ if { [check_no_compiler_messages_nocache arm_neonv2_ok object {
+ #include "arm_neon.h"
+ float32x2_t
+ foo (float32x2_t a, float32x2_t b, float32x2_t c)
+ {
+ return vfma_f32 (a, b, c);
+ }
+ } "$flags"] } {
+ set et_arm_neonv2_flags $flags
+ return 1
+ }
+ }
+ }
+
+ return 0
+}
+
+proc check_effective_target_arm_neonv2_ok { } {
+ return [check_cached_effective_target arm_neonv2_ok \
+ check_effective_target_arm_neonv2_ok_nocache]
+}
+
# Add the options needed for NEON. We need either -mfloat-abi=softfp
# or -mfloat-abi=hard, but if one is already specified by the
# multilib, use it.
@@ -2318,6 +2363,21 @@ proc check_effective_target_arm_neon_hw { } {
} [add_options_for_arm_neon ""]]
}
+proc check_effective_target_arm_neonv2_hw { } {
+ return [check_runtime arm_neon_hwv2_available {
+ #include "arm_neon.h"
+ int
+ main (void)
+ {
+ float32x2_t a, b, c;
+ asm ("vfma.f32 %P0, %P1, %P2"
+ : "=w" (a)
+ : "w" (b), "w" (c));
+ return 0;
+ }
+ } [add_options_for_arm_neonv2 ""]]
+}
+
# Return 1 if this is a ARM target with NEON enabled.
proc check_effective_target_arm_neon { } {
@@ -2334,6 +2394,24 @@ proc check_effective_target_arm_neon { } {
}
}
+proc check_effective_target_arm_neonv2 { } {
+ if { [check_effective_target_arm32] } {
+ return [check_no_compiler_messages arm_neon object {
+ #ifndef __ARM_NEON__
+ #error not NEON
+ #else
+ #ifndef __ARM_FEATURE_FMA
+ #error not NEONv2
+ #else
+ int dummy;
+ #endif
+ #endif
+ }]
+ } else {
+ return 0
+ }
+}
+
# Return 1 if this a Loongson-2E or -2F target using an ABI that supports
# the Loongson vector modes.
diff --git a/gcc/trans-mem.c b/gcc/trans-mem.c
index edb678e8c1e..e71efff7065 100644
--- a/gcc/trans-mem.c
+++ b/gcc/trans-mem.c
@@ -1379,6 +1379,19 @@ thread_private_new_memory (basic_block entry_block, tree x)
/* x = (cast*) foo ==> foo */
else if (code == VIEW_CONVERT_EXPR || code == NOP_EXPR)
x = gimple_assign_rhs1 (stmt);
+ /* x = c ? op1 : op2 == > op1 or op2 just like a PHI */
+ else if (code == COND_EXPR)
+ {
+ tree op1 = gimple_assign_rhs2 (stmt);
+ tree op2 = gimple_assign_rhs3 (stmt);
+ enum thread_memory_type mem;
+ retval = thread_private_new_memory (entry_block, op1);
+ if (retval == mem_non_local)
+ goto new_memory_ret;
+ mem = thread_private_new_memory (entry_block, op2);
+ retval = MIN (retval, mem);
+ goto new_memory_ret;
+ }
else
{
retval = mem_non_local;
diff --git a/gcc/tree-affine.c b/gcc/tree-affine.c
index 495ffe45603..456670d01e9 100644
--- a/gcc/tree-affine.c
+++ b/gcc/tree-affine.c
@@ -33,7 +33,7 @@ along with GCC; see the file COPYING3. If not see
double_int
double_int_ext_for_comb (double_int cst, aff_tree *comb)
{
- return double_int_sext (cst, TYPE_PRECISION (comb->type));
+ return cst.sext (TYPE_PRECISION (comb->type));
}
/* Initializes affine combination COMB so that its value is zero in TYPE. */
@@ -76,27 +76,26 @@ aff_combination_scale (aff_tree *comb, double_int scale)
unsigned i, j;
scale = double_int_ext_for_comb (scale, comb);
- if (double_int_one_p (scale))
+ if (scale.is_one ())
return;
- if (double_int_zero_p (scale))
+ if (scale.is_zero ())
{
aff_combination_zero (comb, comb->type);
return;
}
comb->offset
- = double_int_ext_for_comb (double_int_mul (scale, comb->offset), comb);
+ = double_int_ext_for_comb (scale * comb->offset, comb);
for (i = 0, j = 0; i < comb->n; i++)
{
double_int new_coef;
new_coef
- = double_int_ext_for_comb (double_int_mul (scale, comb->elts[i].coef),
- comb);
+ = double_int_ext_for_comb (scale * comb->elts[i].coef, comb);
/* A coefficient may become zero due to overflow. Remove the zero
elements. */
- if (double_int_zero_p (new_coef))
+ if (new_coef.is_zero ())
continue;
comb->elts[j].coef = new_coef;
comb->elts[j].val = comb->elts[i].val;
@@ -131,7 +130,7 @@ aff_combination_add_elt (aff_tree *comb, tree elt, double_int scale)
tree type;
scale = double_int_ext_for_comb (scale, comb);
- if (double_int_zero_p (scale))
+ if (scale.is_zero ())
return;
for (i = 0; i < comb->n; i++)
@@ -139,9 +138,9 @@ aff_combination_add_elt (aff_tree *comb, tree elt, double_int scale)
{
double_int new_coef;
- new_coef = double_int_add (comb->elts[i].coef, scale);
+ new_coef = comb->elts[i].coef + scale;
new_coef = double_int_ext_for_comb (new_coef, comb);
- if (!double_int_zero_p (new_coef))
+ if (!new_coef.is_zero ())
{
comb->elts[i].coef = new_coef;
return;
@@ -172,7 +171,7 @@ aff_combination_add_elt (aff_tree *comb, tree elt, double_int scale)
if (POINTER_TYPE_P (type))
type = sizetype;
- if (double_int_one_p (scale))
+ if (scale.is_one ())
elt = fold_convert (type, elt);
else
elt = fold_build2 (MULT_EXPR, type,
@@ -191,7 +190,7 @@ aff_combination_add_elt (aff_tree *comb, tree elt, double_int scale)
static void
aff_combination_add_cst (aff_tree *c, double_int cst)
{
- c->offset = double_int_ext_for_comb (double_int_add (c->offset, cst), c);
+ c->offset = double_int_ext_for_comb (c->offset + cst, c);
}
/* Adds COMB2 to COMB1. */
@@ -234,7 +233,7 @@ aff_combination_convert (aff_tree *comb, tree type)
for (i = j = 0; i < comb->n; i++)
{
double_int new_coef = double_int_ext_for_comb (comb->elts[i].coef, comb);
- if (double_int_zero_p (new_coef))
+ if (new_coef.is_zero ())
continue;
comb->elts[j].coef = new_coef;
comb->elts[j].val = fold_convert (type, comb->elts[i].val);
@@ -323,7 +322,7 @@ tree_to_aff_combination (tree expr, tree type, aff_tree *comb)
if (bitpos % BITS_PER_UNIT != 0)
break;
aff_combination_const (comb, type,
- uhwi_to_double_int (bitpos / BITS_PER_UNIT));
+ double_int::from_uhwi (bitpos / BITS_PER_UNIT));
core = build_fold_addr_expr (core);
if (TREE_CODE (core) == ADDR_EXPR)
aff_combination_add_elt (comb, core, double_int_one);
@@ -380,7 +379,7 @@ add_elt_to_tree (tree expr, tree type, tree elt, double_int scale,
scale = double_int_ext_for_comb (scale, comb);
elt = fold_convert (type1, elt);
- if (double_int_one_p (scale))
+ if (scale.is_one ())
{
if (!expr)
return fold_convert (type, elt);
@@ -390,7 +389,7 @@ add_elt_to_tree (tree expr, tree type, tree elt, double_int scale,
return fold_build2 (PLUS_EXPR, type, expr, elt);
}
- if (double_int_minus_one_p (scale))
+ if (scale.is_minus_one ())
{
if (!expr)
return fold_convert (type, fold_build1 (NEGATE_EXPR, type1, elt));
@@ -408,10 +407,10 @@ add_elt_to_tree (tree expr, tree type, tree elt, double_int scale,
fold_build2 (MULT_EXPR, type1, elt,
double_int_to_tree (type1, scale)));
- if (double_int_negative_p (scale))
+ if (scale.is_negative ())
{
code = MINUS_EXPR;
- scale = double_int_neg (scale);
+ scale = -scale;
}
else
code = PLUS_EXPR;
@@ -451,9 +450,9 @@ aff_combination_to_tree (aff_tree *comb)
/* Ensure that we get x - 1, not x + (-1) or x + 0xff..f if x is
unsigned. */
- if (double_int_negative_p (comb->offset))
+ if (comb->offset.is_negative ())
{
- off = double_int_neg (comb->offset);
+ off = -comb->offset;
sgn = double_int_minus_one;
}
else
@@ -516,8 +515,7 @@ aff_combination_add_product (aff_tree *c, double_int coef, tree val,
fold_convert (type, val));
}
- aff_combination_add_elt (r, aval,
- double_int_mul (coef, c->elts[i].coef));
+ aff_combination_add_elt (r, aval, coef * c->elts[i].coef);
}
if (c->rest)
@@ -534,10 +532,9 @@ aff_combination_add_product (aff_tree *c, double_int coef, tree val,
}
if (val)
- aff_combination_add_elt (r, val,
- double_int_mul (coef, c->offset));
+ aff_combination_add_elt (r, val, coef * c->offset);
else
- aff_combination_add_cst (r, double_int_mul (coef, c->offset));
+ aff_combination_add_cst (r, coef * c->offset);
}
/* Multiplies C1 by C2, storing the result to R */
@@ -685,7 +682,7 @@ aff_combination_expand (aff_tree *comb ATTRIBUTE_UNUSED,
it from COMB. */
scale = comb->elts[i].coef;
aff_combination_zero (&curre, comb->type);
- aff_combination_add_elt (&curre, e, double_int_neg (scale));
+ aff_combination_add_elt (&curre, e, -scale);
aff_combination_scale (&current, scale);
aff_combination_add (&to_add, &current);
aff_combination_add (&to_add, &curre);
@@ -751,17 +748,17 @@ double_int_constant_multiple_p (double_int val, double_int div,
{
double_int rem, cst;
- if (double_int_zero_p (val))
+ if (val.is_zero ())
return true;
- if (double_int_zero_p (div))
+ if (div.is_zero ())
return false;
- cst = double_int_sdivmod (val, div, FLOOR_DIV_EXPR, &rem);
- if (!double_int_zero_p (rem))
+ cst = val.sdivmod (div, FLOOR_DIV_EXPR, &rem);
+ if (!rem.is_zero ())
return false;
- if (*mult_set && !double_int_equal_p (*mult, cst))
+ if (*mult_set && *mult != cst)
return false;
*mult_set = true;
@@ -779,7 +776,7 @@ aff_combination_constant_multiple_p (aff_tree *val, aff_tree *div,
bool mult_set = false;
unsigned i;
- if (val->n == 0 && double_int_zero_p (val->offset))
+ if (val->n == 0 && val->offset.is_zero ())
{
*mult = double_int_zero;
return true;
@@ -880,10 +877,10 @@ get_inner_reference_aff (tree ref, aff_tree *addr, double_int *size)
}
aff_combination_const (&tmp, sizetype,
- shwi_to_double_int (bitpos / BITS_PER_UNIT));
+ double_int::from_shwi (bitpos / BITS_PER_UNIT));
aff_combination_add (addr, &tmp);
- *size = shwi_to_double_int ((bitsize + BITS_PER_UNIT - 1) / BITS_PER_UNIT);
+ *size = double_int::from_shwi ((bitsize + BITS_PER_UNIT - 1) / BITS_PER_UNIT);
}
/* Returns true if a region of size SIZE1 at position 0 and a region of
@@ -899,17 +896,17 @@ aff_comb_cannot_overlap_p (aff_tree *diff, double_int size1, double_int size2)
return false;
d = diff->offset;
- if (double_int_negative_p (d))
+ if (d.is_negative ())
{
/* The second object is before the first one, we succeed if the last
element of the second object is before the start of the first one. */
- bound = double_int_add (d, double_int_add (size2, double_int_minus_one));
- return double_int_negative_p (bound);
+ bound = d + size2 + double_int_minus_one;
+ return bound.is_negative ();
}
else
{
/* We succeed if the second object starts after the first one ends. */
- return double_int_scmp (size1, d) <= 0;
+ return size1.sle (d);
}
}
diff --git a/gcc/tree-call-cdce.c b/gcc/tree-call-cdce.c
index be020dab809..c879548483b 100644
--- a/gcc/tree-call-cdce.c
+++ b/gcc/tree-call-cdce.c
@@ -374,7 +374,7 @@ gen_conditions_for_domain (tree arg, inp_domain domain,
{
/* Now push a separator. */
if (domain.has_lb)
- VEC_quick_push (gimple, conds, (gimple)NULL);
+ VEC_quick_push (gimple, conds, NULL);
gen_one_condition (arg, domain.ub,
(domain.is_ub_inclusive
@@ -496,7 +496,7 @@ gen_conditions_for_pow_int_base (tree base, tree expn,
type is integer. */
/* Push a separator. */
- VEC_quick_push (gimple, conds, (gimple)NULL);
+ VEC_quick_push (gimple, conds, NULL);
temp = create_tmp_var (int_type, "DCE_COND1");
cst0 = build_int_cst (int_type, 0);
diff --git a/gcc/tree-cfg.c b/gcc/tree-cfg.c
index 9ac36519c8f..adb9ffbc8e0 100644
--- a/gcc/tree-cfg.c
+++ b/gcc/tree-cfg.c
@@ -1373,14 +1373,12 @@ group_case_labels_stmt (gimple stmt)
{
tree merge_case = gimple_switch_label (stmt, i);
basic_block merge_bb = label_to_block (CASE_LABEL (merge_case));
- double_int bhp1 = double_int_add (tree_to_double_int (base_high),
- double_int_one);
+ double_int bhp1 = tree_to_double_int (base_high) + double_int_one;
/* Merge the cases if they jump to the same place,
and their ranges are consecutive. */
if (merge_bb == base_bb
- && double_int_equal_p (tree_to_double_int (CASE_LOW (merge_case)),
- bhp1))
+ && tree_to_double_int (CASE_LOW (merge_case)) == bhp1)
{
base_high = CASE_HIGH (merge_case) ?
CASE_HIGH (merge_case) : CASE_LOW (merge_case);
diff --git a/gcc/tree-data-ref.c b/gcc/tree-data-ref.c
index 9164adbc7e6..9e94586fa45 100644
--- a/gcc/tree-data-ref.c
+++ b/gcc/tree-data-ref.c
@@ -4301,7 +4301,7 @@ static bool
get_references_in_stmt (gimple stmt, VEC (data_ref_loc, heap) **references)
{
bool clobbers_memory = false;
- data_ref_loc *ref;
+ data_ref_loc ref;
tree *op0, *op1;
enum gimple_code stmt_code = gimple_code (stmt);
@@ -4330,9 +4330,9 @@ get_references_in_stmt (gimple stmt, VEC (data_ref_loc, heap) **references)
&& (base = get_base_address (*op1))
&& TREE_CODE (base) != SSA_NAME))
{
- ref = VEC_safe_push (data_ref_loc, heap, *references, NULL);
- ref->pos = op1;
- ref->is_read = true;
+ ref.pos = op1;
+ ref.is_read = true;
+ VEC_safe_push (data_ref_loc, heap, *references, ref);
}
}
else if (stmt_code == GIMPLE_CALL)
@@ -4348,9 +4348,9 @@ get_references_in_stmt (gimple stmt, VEC (data_ref_loc, heap) **references)
if (DECL_P (*op1)
|| (REFERENCE_CLASS_P (*op1) && get_base_address (*op1)))
{
- ref = VEC_safe_push (data_ref_loc, heap, *references, NULL);
- ref->pos = op1;
- ref->is_read = true;
+ ref.pos = op1;
+ ref.is_read = true;
+ VEC_safe_push (data_ref_loc, heap, *references, ref);
}
}
}
@@ -4361,9 +4361,9 @@ get_references_in_stmt (gimple stmt, VEC (data_ref_loc, heap) **references)
&& (DECL_P (*op0)
|| (REFERENCE_CLASS_P (*op0) && get_base_address (*op0))))
{
- ref = VEC_safe_push (data_ref_loc, heap, *references, NULL);
- ref->pos = op0;
- ref->is_read = false;
+ ref.pos = op0;
+ ref.is_read = false;
+ VEC_safe_push (data_ref_loc, heap, *references, ref);
}
return clobbers_memory;
}
diff --git a/gcc/tree-dfa.c b/gcc/tree-dfa.c
index 5342f1973a4..423923fb66a 100644
--- a/gcc/tree-dfa.c
+++ b/gcc/tree-dfa.c
@@ -423,9 +423,7 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset,
switch (TREE_CODE (exp))
{
case BIT_FIELD_REF:
- bit_offset
- = double_int_add (bit_offset,
- tree_to_double_int (TREE_OPERAND (exp, 2)));
+ bit_offset += tree_to_double_int (TREE_OPERAND (exp, 2));
break;
case COMPONENT_REF:
@@ -436,14 +434,11 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset,
if (this_offset && TREE_CODE (this_offset) == INTEGER_CST)
{
double_int doffset = tree_to_double_int (this_offset);
- doffset = double_int_lshift (doffset,
- BITS_PER_UNIT == 8
- ? 3 : exact_log2 (BITS_PER_UNIT),
- HOST_BITS_PER_DOUBLE_INT, true);
- doffset = double_int_add (doffset,
- tree_to_double_int
- (DECL_FIELD_BIT_OFFSET (field)));
- bit_offset = double_int_add (bit_offset, doffset);
+ doffset = doffset.alshift (BITS_PER_UNIT == 8
+ ? 3 : exact_log2 (BITS_PER_UNIT),
+ HOST_BITS_PER_DOUBLE_INT);
+ doffset += tree_to_double_int (DECL_FIELD_BIT_OFFSET (field));
+ bit_offset = bit_offset + doffset;
/* If we had seen a variable array ref already and we just
referenced the last field of a struct or a union member
@@ -462,11 +457,11 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset,
tree ssize = TYPE_SIZE_UNIT (stype);
if (host_integerp (fsize, 0)
&& host_integerp (ssize, 0)
- && double_int_fits_in_shwi_p (doffset))
+ && doffset.fits_shwi ())
maxsize += ((TREE_INT_CST_LOW (ssize)
- TREE_INT_CST_LOW (fsize))
* BITS_PER_UNIT
- - double_int_to_shwi (doffset));
+ - doffset.to_shwi ());
else
maxsize = -1;
}
@@ -481,9 +476,9 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset,
if (maxsize != -1
&& csize
&& host_integerp (csize, 1)
- && double_int_fits_in_shwi_p (bit_offset))
+ && bit_offset.fits_shwi ())
maxsize = TREE_INT_CST_LOW (csize)
- - double_int_to_shwi (bit_offset);
+ - bit_offset.to_shwi ();
else
maxsize = -1;
}
@@ -504,17 +499,13 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset,
TREE_CODE (unit_size) == INTEGER_CST))
{
double_int doffset
- = double_int_sext
- (double_int_sub (TREE_INT_CST (index),
- TREE_INT_CST (low_bound)),
- TYPE_PRECISION (TREE_TYPE (index)));
- doffset = double_int_mul (doffset,
- tree_to_double_int (unit_size));
- doffset = double_int_lshift (doffset,
- BITS_PER_UNIT == 8
- ? 3 : exact_log2 (BITS_PER_UNIT),
- HOST_BITS_PER_DOUBLE_INT, true);
- bit_offset = double_int_add (bit_offset, doffset);
+ = (TREE_INT_CST (index) - TREE_INT_CST (low_bound))
+ .sext (TYPE_PRECISION (TREE_TYPE (index)));
+ doffset *= tree_to_double_int (unit_size);
+ doffset = doffset.alshift (BITS_PER_UNIT == 8
+ ? 3 : exact_log2 (BITS_PER_UNIT),
+ HOST_BITS_PER_DOUBLE_INT);
+ bit_offset = bit_offset + doffset;
/* An array ref with a constant index up in the structure
hierarchy will constrain the size of any variable array ref
@@ -530,9 +521,9 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset,
if (maxsize != -1
&& asize
&& host_integerp (asize, 1)
- && double_int_fits_in_shwi_p (bit_offset))
+ && bit_offset.fits_shwi ())
maxsize = TREE_INT_CST_LOW (asize)
- - double_int_to_shwi (bit_offset);
+ - bit_offset.to_shwi ();
else
maxsize = -1;
@@ -547,8 +538,7 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset,
break;
case IMAGPART_EXPR:
- bit_offset
- = double_int_add (bit_offset, uhwi_to_double_int (bitsize));
+ bit_offset += double_int::from_uhwi (bitsize);
break;
case VIEW_CONVERT_EXPR:
@@ -563,12 +553,11 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset,
else
{
double_int off = mem_ref_offset (exp);
- off = double_int_lshift (off,
- BITS_PER_UNIT == 8
- ? 3 : exact_log2 (BITS_PER_UNIT),
- HOST_BITS_PER_DOUBLE_INT, true);
- off = double_int_add (off, bit_offset);
- if (double_int_fits_in_shwi_p (off))
+ off = off.alshift (BITS_PER_UNIT == 8
+ ? 3 : exact_log2 (BITS_PER_UNIT),
+ HOST_BITS_PER_DOUBLE_INT);
+ off = off + bit_offset;
+ if (off.fits_shwi ())
{
bit_offset = off;
exp = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
@@ -595,12 +584,11 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset,
else
{
double_int off = mem_ref_offset (exp);
- off = double_int_lshift (off,
- BITS_PER_UNIT == 8
- ? 3 : exact_log2 (BITS_PER_UNIT),
- HOST_BITS_PER_DOUBLE_INT, true);
- off = double_int_add (off, bit_offset);
- if (double_int_fits_in_shwi_p (off))
+ off = off.alshift (BITS_PER_UNIT == 8
+ ? 3 : exact_log2 (BITS_PER_UNIT),
+ HOST_BITS_PER_DOUBLE_INT);
+ off += bit_offset;
+ if (off.fits_shwi ())
{
bit_offset = off;
exp = TREE_OPERAND (TMR_BASE (exp), 0);
@@ -617,7 +605,7 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset,
}
done:
- if (!double_int_fits_in_shwi_p (bit_offset))
+ if (!bit_offset.fits_shwi ())
{
*poffset = 0;
*psize = bitsize;
@@ -626,7 +614,7 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset,
return exp;
}
- hbit_offset = double_int_to_shwi (bit_offset);
+ hbit_offset = bit_offset.to_shwi ();
/* We need to deal with variable arrays ending structures such as
struct { int length; int a[1]; } x; x.a[d]
@@ -741,7 +729,7 @@ dump_enumerated_decls_push (tree *tp, int *walk_subtrees, void *data)
return NULL_TREE;
nt.t = *tp;
nt.num = VEC_length (numbered_tree, *list);
- VEC_safe_push (numbered_tree, heap, *list, &nt);
+ VEC_safe_push (numbered_tree, heap, *list, nt);
*walk_subtrees = 0;
return NULL_TREE;
}
diff --git a/gcc/tree-diagnostic.c b/gcc/tree-diagnostic.c
index 1276cbea6af..2756ed21374 100644
--- a/gcc/tree-diagnostic.c
+++ b/gcc/tree-diagnostic.c
@@ -127,7 +127,7 @@ maybe_unwind_expanded_macro_loc (diagnostic_context *context,
loc.where = where;
loc.map = map;
- VEC_safe_push (loc_map_pair, heap, loc_vec, &loc);
+ VEC_safe_push (loc_map_pair, heap, loc_vec, loc);
/* WHERE is the location of a token inside the expansion of a
macro. MAP is the map holding the locations of that macro
diff --git a/gcc/tree-emutls.c b/gcc/tree-emutls.c
index b30469c8e59..88e77dace60 100644
--- a/gcc/tree-emutls.c
+++ b/gcc/tree-emutls.c
@@ -149,29 +149,29 @@ tree
default_emutls_var_init (tree to, tree decl, tree proxy)
{
VEC(constructor_elt,gc) *v = VEC_alloc (constructor_elt, gc, 4);
- constructor_elt *elt;
+ constructor_elt elt;
tree type = TREE_TYPE (to);
tree field = TYPE_FIELDS (type);
- elt = VEC_quick_push (constructor_elt, v, NULL);
- elt->index = field;
- elt->value = fold_convert (TREE_TYPE (field), DECL_SIZE_UNIT (decl));
+ elt.index = field;
+ elt.value = fold_convert (TREE_TYPE (field), DECL_SIZE_UNIT (decl));
+ VEC_quick_push (constructor_elt, v, elt);
- elt = VEC_quick_push (constructor_elt, v, NULL);
field = DECL_CHAIN (field);
- elt->index = field;
- elt->value = build_int_cst (TREE_TYPE (field),
- DECL_ALIGN_UNIT (decl));
+ elt.index = field;
+ elt.value = build_int_cst (TREE_TYPE (field),
+ DECL_ALIGN_UNIT (decl));
+ VEC_quick_push (constructor_elt, v, elt);
- elt = VEC_quick_push (constructor_elt, v, NULL);
field = DECL_CHAIN (field);
- elt->index = field;
- elt->value = null_pointer_node;
+ elt.index = field;
+ elt.value = null_pointer_node;
+ VEC_quick_push (constructor_elt, v, elt);
- elt = VEC_quick_push (constructor_elt, v, NULL);
field = DECL_CHAIN (field);
- elt->index = field;
- elt->value = proxy;
+ elt.index = field;
+ elt.value = proxy;
+ VEC_quick_push (constructor_elt, v, elt);
return build_constructor (type, v);
}
diff --git a/gcc/tree-flow-inline.h b/gcc/tree-flow-inline.h
index acb2cc40dcb..6c55da6fae2 100644
--- a/gcc/tree-flow-inline.h
+++ b/gcc/tree-flow-inline.h
@@ -1271,7 +1271,7 @@ get_addr_base_and_unit_offset_1 (tree exp, HOST_WIDE_INT *poffset,
{
double_int off = mem_ref_offset (exp);
gcc_assert (off.high == -1 || off.high == 0);
- byte_offset += double_int_to_shwi (off);
+ byte_offset += off.to_shwi ();
}
exp = TREE_OPERAND (base, 0);
}
@@ -1294,7 +1294,7 @@ get_addr_base_and_unit_offset_1 (tree exp, HOST_WIDE_INT *poffset,
{
double_int off = mem_ref_offset (exp);
gcc_assert (off.high == -1 || off.high == 0);
- byte_offset += double_int_to_shwi (off);
+ byte_offset += off.to_shwi ();
}
exp = TREE_OPERAND (base, 0);
}
diff --git a/gcc/tree-inline.c b/gcc/tree-inline.c
index 3be73ae3ce4..aff83c0965f 100644
--- a/gcc/tree-inline.c
+++ b/gcc/tree-inline.c
@@ -854,6 +854,7 @@ remap_gimple_op_r (tree *tp, int *walk_subtrees, void *data)
ptr, TREE_OPERAND (*tp, 1));
TREE_THIS_NOTRAP (*tp) = TREE_THIS_NOTRAP (old);
TREE_THIS_VOLATILE (*tp) = TREE_THIS_VOLATILE (old);
+ TREE_SIDE_EFFECTS (*tp) = TREE_SIDE_EFFECTS (old);
TREE_NO_WARNING (*tp) = TREE_NO_WARNING (old);
*walk_subtrees = 0;
return NULL;
diff --git a/gcc/tree-object-size.c b/gcc/tree-object-size.c
index fcf9316a395..9a537f1c5fc 100644
--- a/gcc/tree-object-size.c
+++ b/gcc/tree-object-size.c
@@ -192,12 +192,11 @@ addr_object_size (struct object_size_info *osi, const_tree ptr,
}
if (sz != unknown[object_size_type])
{
- double_int dsz = double_int_sub (uhwi_to_double_int (sz),
- mem_ref_offset (pt_var));
- if (double_int_negative_p (dsz))
+ double_int dsz = double_int::from_uhwi (sz) - mem_ref_offset (pt_var);
+ if (dsz.is_negative ())
sz = 0;
- else if (double_int_fits_in_uhwi_p (dsz))
- sz = double_int_to_uhwi (dsz);
+ else if (dsz.fits_uhwi ())
+ sz = dsz.to_uhwi ();
else
sz = unknown[object_size_type];
}
diff --git a/gcc/tree-pass.h b/gcc/tree-pass.h
index 4eef2194bee..0661406e44f 100644
--- a/gcc/tree-pass.h
+++ b/gcc/tree-pass.h
@@ -92,8 +92,7 @@ struct rtl_opt_pass
struct varpool_node;
struct cgraph_node;
-struct cgraph_node_set_def;
-struct varpool_node_set_def;
+struct lto_symtab_encoder_d;
/* Description of IPA pass with generate summary, write, execute, read and
transform stages. */
@@ -528,8 +527,7 @@ extern const char *get_current_pass_name (void);
extern void print_current_pass (FILE *);
extern void debug_pass (void);
extern void ipa_write_summaries (void);
-extern void ipa_write_optimization_summaries (struct cgraph_node_set_def *,
- struct varpool_node_set_def *);
+extern void ipa_write_optimization_summaries (struct lto_symtab_encoder_d *);
extern void ipa_read_summaries (void);
extern void ipa_read_optimization_summaries (void);
extern void register_one_dump_file (struct opt_pass *);
diff --git a/gcc/tree-predcom.c b/gcc/tree-predcom.c
index 684a57a23b3..ba61c5b04e0 100644
--- a/gcc/tree-predcom.c
+++ b/gcc/tree-predcom.c
@@ -901,7 +901,7 @@ order_drefs (const void *a, const void *b)
{
const dref *const da = (const dref *) a;
const dref *const db = (const dref *) b;
- int offcmp = double_int_scmp ((*da)->offset, (*db)->offset);
+ int offcmp = (*da)->offset.scmp ((*db)->offset);
if (offcmp != 0)
return offcmp;
@@ -925,18 +925,18 @@ add_ref_to_chain (chain_p chain, dref ref)
dref root = get_chain_root (chain);
double_int dist;
- gcc_assert (double_int_scmp (root->offset, ref->offset) <= 0);
- dist = double_int_sub (ref->offset, root->offset);
- if (double_int_ucmp (uhwi_to_double_int (MAX_DISTANCE), dist) <= 0)
+ gcc_assert (root->offset.sle (ref->offset));
+ dist = ref->offset - root->offset;
+ if (double_int::from_uhwi (MAX_DISTANCE).ule (dist))
{
free (ref);
return;
}
- gcc_assert (double_int_fits_in_uhwi_p (dist));
+ gcc_assert (dist.fits_uhwi ());
VEC_safe_push (dref, heap, chain->refs, ref);
- ref->distance = double_int_to_uhwi (dist);
+ ref->distance = dist.to_uhwi ();
if (ref->distance >= chain->length)
{
@@ -1055,7 +1055,7 @@ valid_initializer_p (struct data_reference *ref,
if (!aff_combination_constant_multiple_p (&diff, &step, &off))
return false;
- if (!double_int_equal_p (off, uhwi_to_double_int (distance)))
+ if (off != double_int::from_uhwi (distance))
return false;
return true;
@@ -1198,8 +1198,7 @@ determine_roots_comp (struct loop *loop,
FOR_EACH_VEC_ELT (dref, comp->refs, i, a)
{
if (!chain || DR_IS_WRITE (a->ref)
- || double_int_ucmp (uhwi_to_double_int (MAX_DISTANCE),
- double_int_sub (a->offset, last_ofs)) <= 0)
+ || double_int::from_uhwi (MAX_DISTANCE).ule (a->offset - last_ofs))
{
if (nontrivial_chain_p (chain))
{
diff --git a/gcc/tree-pretty-print.c b/gcc/tree-pretty-print.c
index 1fb20cdf450..b0c283d9851 100644
--- a/gcc/tree-pretty-print.c
+++ b/gcc/tree-pretty-print.c
@@ -1331,8 +1331,7 @@ dump_generic_node (pretty_printer *buffer, tree node, int spc, int flags,
}
else if (is_array_init
&& (TREE_CODE (field) != INTEGER_CST
- || !double_int_equal_p (tree_to_double_int (field),
- curidx)))
+ || tree_to_double_int (field) != curidx))
{
pp_character (buffer, '[');
if (TREE_CODE (field) == RANGE_EXPR)
@@ -1353,7 +1352,7 @@ dump_generic_node (pretty_printer *buffer, tree node, int spc, int flags,
}
}
if (is_array_init)
- curidx = double_int_add (curidx, double_int_one);
+ curidx += double_int_one;
if (val && TREE_CODE (val) == ADDR_EXPR)
if (TREE_CODE (TREE_OPERAND (val, 0)) == FUNCTION_DECL)
val = TREE_OPERAND (val, 0);
@@ -1437,9 +1436,6 @@ dump_generic_node (pretty_printer *buffer, tree node, int spc, int flags,
false);
pp_space (buffer);
pp_character (buffer, '=');
- if (TREE_CODE (node) == MODIFY_EXPR
- && MOVE_NONTEMPORAL (node))
- pp_string (buffer, "{nt}");
pp_space (buffer);
dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags,
false);
diff --git a/gcc/tree-sra.c b/gcc/tree-sra.c
index cc7becdc319..ef3f5f99ed6 100644
--- a/gcc/tree-sra.c
+++ b/gcc/tree-sra.c
@@ -1488,8 +1488,8 @@ build_ref_for_offset (location_t loc, tree base, HOST_WIDE_INT offset,
|| TREE_CODE (prev_base) == TARGET_MEM_REF)
align = TYPE_ALIGN (TREE_TYPE (prev_base));
}
- misalign += (double_int_sext (tree_to_double_int (off),
- TYPE_PRECISION (TREE_TYPE (off))).low
+ misalign += (tree_to_double_int (off)
+ .sext (TYPE_PRECISION (TREE_TYPE (off))).low
* BITS_PER_UNIT);
misalign = misalign & (align - 1);
if (misalign != 0)
@@ -3999,7 +3999,7 @@ splice_all_param_accesses (VEC (access_p, heap) **representatives)
result = UNUSED_PARAMS;
}
else
- VEC_quick_push (access_p, *representatives, (access_p) NULL);
+ VEC_quick_push (access_p, *representatives, NULL);
}
if (result == NO_GOOD_ACCESS)
@@ -4050,36 +4050,35 @@ turn_representatives_into_adjustments (VEC (access_p, heap) *representatives,
if (!repr || no_accesses_p (repr))
{
- struct ipa_parm_adjustment *adj;
+ struct ipa_parm_adjustment adj;
- adj = VEC_quick_push (ipa_parm_adjustment_t, adjustments, NULL);
- memset (adj, 0, sizeof (*adj));
- adj->base_index = get_param_index (parm, parms);
- adj->base = parm;
+ memset (&adj, 0, sizeof (adj));
+ adj.base_index = get_param_index (parm, parms);
+ adj.base = parm;
if (!repr)
- adj->copy_param = 1;
+ adj.copy_param = 1;
else
- adj->remove_param = 1;
+ adj.remove_param = 1;
+ VEC_quick_push (ipa_parm_adjustment_t, adjustments, adj);
}
else
{
- struct ipa_parm_adjustment *adj;
+ struct ipa_parm_adjustment adj;
int index = get_param_index (parm, parms);
for (; repr; repr = repr->next_grp)
{
- adj = VEC_quick_push (ipa_parm_adjustment_t, adjustments, NULL);
- memset (adj, 0, sizeof (*adj));
+ memset (&adj, 0, sizeof (adj));
gcc_assert (repr->base == parm);
- adj->base_index = index;
- adj->base = repr->base;
- adj->type = repr->type;
- adj->alias_ptr_type = reference_alias_ptr_type (repr->expr);
- adj->offset = repr->offset;
- adj->by_ref = (POINTER_TYPE_P (TREE_TYPE (repr->base))
- && (repr->grp_maybe_modified
- || repr->grp_not_necessarilly_dereferenced));
-
+ adj.base_index = index;
+ adj.base = repr->base;
+ adj.type = repr->type;
+ adj.alias_ptr_type = reference_alias_ptr_type (repr->expr);
+ adj.offset = repr->offset;
+ adj.by_ref = (POINTER_TYPE_P (TREE_TYPE (repr->base))
+ && (repr->grp_maybe_modified
+ || repr->grp_not_necessarilly_dereferenced));
+ VEC_quick_push (ipa_parm_adjustment_t, adjustments, adj);
}
}
}
diff --git a/gcc/tree-ssa-address.c b/gcc/tree-ssa-address.c
index 57a590d4a63..caa51be6a5d 100644
--- a/gcc/tree-ssa-address.c
+++ b/gcc/tree-ssa-address.c
@@ -198,8 +198,8 @@ addr_for_mem_ref (struct mem_address *addr, addr_space_t as,
if (addr->offset && !integer_zerop (addr->offset))
off = immed_double_int_const
- (double_int_sext (tree_to_double_int (addr->offset),
- TYPE_PRECISION (TREE_TYPE (addr->offset))),
+ (tree_to_double_int (addr->offset)
+ .sext (TYPE_PRECISION (TREE_TYPE (addr->offset))),
pointer_mode);
else
off = NULL_RTX;
@@ -400,7 +400,7 @@ move_fixed_address_to_symbol (struct mem_address *parts, aff_tree *addr)
for (i = 0; i < addr->n; i++)
{
- if (!double_int_one_p (addr->elts[i].coef))
+ if (!addr->elts[i].coef.is_one ())
continue;
val = addr->elts[i].val;
@@ -428,7 +428,7 @@ move_hint_to_base (tree type, struct mem_address *parts, tree base_hint,
for (i = 0; i < addr->n; i++)
{
- if (!double_int_one_p (addr->elts[i].coef))
+ if (!addr->elts[i].coef.is_one ())
continue;
val = addr->elts[i].val;
@@ -460,7 +460,7 @@ move_pointer_to_base (struct mem_address *parts, aff_tree *addr)
for (i = 0; i < addr->n; i++)
{
- if (!double_int_one_p (addr->elts[i].coef))
+ if (!addr->elts[i].coef.is_one ())
continue;
val = addr->elts[i].val;
@@ -548,10 +548,10 @@ most_expensive_mult_to_index (tree type, struct mem_address *parts,
best_mult = double_int_zero;
for (i = 0; i < addr->n; i++)
{
- if (!double_int_fits_in_shwi_p (addr->elts[i].coef))
+ if (!addr->elts[i].coef.fits_shwi ())
continue;
- coef = double_int_to_shwi (addr->elts[i].coef);
+ coef = addr->elts[i].coef.to_shwi ();
if (coef == 1
|| !multiplier_allowed_in_address_p (coef, TYPE_MODE (type), as))
continue;
@@ -572,11 +572,11 @@ most_expensive_mult_to_index (tree type, struct mem_address *parts,
for (i = j = 0; i < addr->n; i++)
{
amult = addr->elts[i].coef;
- amult_neg = double_int_ext_for_comb (double_int_neg (amult), addr);
+ amult_neg = double_int_ext_for_comb (-amult, addr);
- if (double_int_equal_p (amult, best_mult))
+ if (amult == best_mult)
op_code = PLUS_EXPR;
- else if (double_int_equal_p (amult_neg, best_mult))
+ else if (amult_neg == best_mult)
op_code = MINUS_EXPR;
else
{
@@ -624,7 +624,7 @@ addr_to_parts (tree type, aff_tree *addr, tree iv_cand,
parts->index = NULL_TREE;
parts->step = NULL_TREE;
- if (!double_int_zero_p (addr->offset))
+ if (!addr->offset.is_zero ())
parts->offset = double_int_to_tree (sizetype, addr->offset);
else
parts->offset = NULL_TREE;
@@ -656,7 +656,7 @@ addr_to_parts (tree type, aff_tree *addr, tree iv_cand,
for (i = 0; i < addr->n; i++)
{
part = fold_convert (sizetype, addr->elts[i].val);
- if (!double_int_one_p (addr->elts[i].coef))
+ if (!addr->elts[i].coef.is_one ())
part = fold_build2 (MULT_EXPR, sizetype, part,
double_int_to_tree (sizetype, addr->elts[i].coef));
add_to_parts (parts, part);
@@ -876,8 +876,8 @@ copy_ref_info (tree new_ref, tree old_ref)
&& (TREE_INT_CST_LOW (TMR_STEP (new_ref))
< align)))))
{
- unsigned int inc = double_int_sub (mem_ref_offset (old_ref),
- mem_ref_offset (new_ref)).low;
+ unsigned int inc = (mem_ref_offset (old_ref)
+ - mem_ref_offset (new_ref)).low;
adjust_ptr_info_misalignment (new_pi, inc);
}
else
diff --git a/gcc/tree-ssa-alias.c b/gcc/tree-ssa-alias.c
index 574f418447d..b045da27eec 100644
--- a/gcc/tree-ssa-alias.c
+++ b/gcc/tree-ssa-alias.c
@@ -756,12 +756,11 @@ indirect_ref_may_alias_decl_p (tree ref1 ATTRIBUTE_UNUSED, tree base1,
/* The offset embedded in MEM_REFs can be negative. Bias them
so that the resulting offset adjustment is positive. */
moff = mem_ref_offset (base1);
- moff = double_int_lshift (moff,
- BITS_PER_UNIT == 8
- ? 3 : exact_log2 (BITS_PER_UNIT),
- HOST_BITS_PER_DOUBLE_INT, true);
- if (double_int_negative_p (moff))
- offset2p += double_int_neg (moff).low;
+ moff = moff.alshift (BITS_PER_UNIT == 8
+ ? 3 : exact_log2 (BITS_PER_UNIT),
+ HOST_BITS_PER_DOUBLE_INT);
+ if (moff.is_negative ())
+ offset2p += (-moff).low;
else
offset1p += moff.low;
@@ -835,12 +834,11 @@ indirect_ref_may_alias_decl_p (tree ref1 ATTRIBUTE_UNUSED, tree base1,
|| TREE_CODE (dbase2) == TARGET_MEM_REF)
{
double_int moff = mem_ref_offset (dbase2);
- moff = double_int_lshift (moff,
- BITS_PER_UNIT == 8
- ? 3 : exact_log2 (BITS_PER_UNIT),
- HOST_BITS_PER_DOUBLE_INT, true);
- if (double_int_negative_p (moff))
- doffset1 -= double_int_neg (moff).low;
+ moff = moff.alshift (BITS_PER_UNIT == 8
+ ? 3 : exact_log2 (BITS_PER_UNIT),
+ HOST_BITS_PER_DOUBLE_INT);
+ if (moff.is_negative ())
+ doffset1 -= (-moff).low;
else
doffset2 -= moff.low;
}
@@ -932,21 +930,19 @@ indirect_refs_may_alias_p (tree ref1 ATTRIBUTE_UNUSED, tree base1,
/* The offset embedded in MEM_REFs can be negative. Bias them
so that the resulting offset adjustment is positive. */
moff = mem_ref_offset (base1);
- moff = double_int_lshift (moff,
- BITS_PER_UNIT == 8
- ? 3 : exact_log2 (BITS_PER_UNIT),
- HOST_BITS_PER_DOUBLE_INT, true);
- if (double_int_negative_p (moff))
- offset2 += double_int_neg (moff).low;
+ moff = moff.alshift (BITS_PER_UNIT == 8
+ ? 3 : exact_log2 (BITS_PER_UNIT),
+ HOST_BITS_PER_DOUBLE_INT);
+ if (moff.is_negative ())
+ offset2 += (-moff).low;
else
offset1 += moff.low;
moff = mem_ref_offset (base2);
- moff = double_int_lshift (moff,
- BITS_PER_UNIT == 8
- ? 3 : exact_log2 (BITS_PER_UNIT),
- HOST_BITS_PER_DOUBLE_INT, true);
- if (double_int_negative_p (moff))
- offset1 += double_int_neg (moff).low;
+ moff = moff.alshift (BITS_PER_UNIT == 8
+ ? 3 : exact_log2 (BITS_PER_UNIT),
+ HOST_BITS_PER_DOUBLE_INT);
+ if (moff.is_negative ())
+ offset1 += (-moff).low;
else
offset2 += moff.low;
return ranges_overlap_p (offset1, max_size1, offset2, max_size2);
@@ -1929,7 +1925,8 @@ stmt_kills_ref_p (gimple stmt, tree ref)
static bool
maybe_skip_until (gimple phi, tree target, ao_ref *ref,
- tree vuse, unsigned int *cnt, bitmap *visited)
+ tree vuse, unsigned int *cnt, bitmap *visited,
+ bool abort_on_visited)
{
basic_block bb = gimple_bb (phi);
@@ -1947,8 +1944,9 @@ maybe_skip_until (gimple phi, tree target, ao_ref *ref,
{
/* An already visited PHI node ends the walk successfully. */
if (bitmap_bit_p (*visited, SSA_NAME_VERSION (PHI_RESULT (def_stmt))))
- return true;
- vuse = get_continuation_for_phi (def_stmt, ref, cnt, visited);
+ return !abort_on_visited;
+ vuse = get_continuation_for_phi (def_stmt, ref, cnt,
+ visited, abort_on_visited);
if (!vuse)
return false;
continue;
@@ -1967,7 +1965,7 @@ maybe_skip_until (gimple phi, tree target, ao_ref *ref,
if (gimple_bb (def_stmt) != bb)
{
if (!bitmap_set_bit (*visited, SSA_NAME_VERSION (vuse)))
- return true;
+ return !abort_on_visited;
bb = gimple_bb (def_stmt);
}
vuse = gimple_vuse (def_stmt);
@@ -1981,7 +1979,8 @@ maybe_skip_until (gimple phi, tree target, ao_ref *ref,
static tree
get_continuation_for_phi_1 (gimple phi, tree arg0, tree arg1,
- ao_ref *ref, unsigned int *cnt, bitmap *visited)
+ ao_ref *ref, unsigned int *cnt,
+ bitmap *visited, bool abort_on_visited)
{
gimple def0 = SSA_NAME_DEF_STMT (arg0);
gimple def1 = SSA_NAME_DEF_STMT (arg1);
@@ -1994,14 +1993,16 @@ get_continuation_for_phi_1 (gimple phi, tree arg0, tree arg1,
&& dominated_by_p (CDI_DOMINATORS,
gimple_bb (def1), gimple_bb (def0))))
{
- if (maybe_skip_until (phi, arg0, ref, arg1, cnt, visited))
+ if (maybe_skip_until (phi, arg0, ref, arg1, cnt,
+ visited, abort_on_visited))
return arg0;
}
else if (gimple_nop_p (def1)
|| dominated_by_p (CDI_DOMINATORS,
gimple_bb (def0), gimple_bb (def1)))
{
- if (maybe_skip_until (phi, arg1, ref, arg0, cnt, visited))
+ if (maybe_skip_until (phi, arg1, ref, arg0, cnt,
+ visited, abort_on_visited))
return arg1;
}
/* Special case of a diamond:
@@ -2038,7 +2039,8 @@ get_continuation_for_phi_1 (gimple phi, tree arg0, tree arg1,
tree
get_continuation_for_phi (gimple phi, ao_ref *ref,
- unsigned int *cnt, bitmap *visited)
+ unsigned int *cnt, bitmap *visited,
+ bool abort_on_visited)
{
unsigned nargs = gimple_phi_num_args (phi);
@@ -2076,7 +2078,7 @@ get_continuation_for_phi (gimple phi, ao_ref *ref,
{
arg1 = PHI_ARG_DEF (phi, i);
arg0 = get_continuation_for_phi_1 (phi, arg0, arg1, ref,
- cnt, visited);
+ cnt, visited, abort_on_visited);
if (!arg0)
return NULL_TREE;
}
@@ -2113,6 +2115,7 @@ walk_non_aliased_vuses (ao_ref *ref, tree vuse,
bitmap visited = NULL;
void *res;
unsigned int cnt = 0;
+ bool translated = false;
timevar_push (TV_ALIAS_STMT_WALK);
@@ -2136,7 +2139,8 @@ walk_non_aliased_vuses (ao_ref *ref, tree vuse,
if (gimple_nop_p (def_stmt))
break;
else if (gimple_code (def_stmt) == GIMPLE_PHI)
- vuse = get_continuation_for_phi (def_stmt, ref, &cnt, &visited);
+ vuse = get_continuation_for_phi (def_stmt, ref, &cnt,
+ &visited, translated);
else
{
cnt++;
@@ -2155,6 +2159,7 @@ walk_non_aliased_vuses (ao_ref *ref, tree vuse,
else if (res != NULL)
break;
/* Translation succeeded, continue walking. */
+ translated = true;
}
vuse = gimple_vuse (def_stmt);
}
diff --git a/gcc/tree-ssa-alias.h b/gcc/tree-ssa-alias.h
index cdff3812181..6f38f20bce5 100644
--- a/gcc/tree-ssa-alias.h
+++ b/gcc/tree-ssa-alias.h
@@ -108,7 +108,7 @@ extern bool stmt_may_clobber_ref_p_1 (gimple, ao_ref *);
extern bool call_may_clobber_ref_p (gimple, tree);
extern bool stmt_kills_ref_p (gimple, tree);
extern tree get_continuation_for_phi (gimple, ao_ref *,
- unsigned int *, bitmap *);
+ unsigned int *, bitmap *, bool);
extern void *walk_non_aliased_vuses (ao_ref *, tree,
void *(*)(ao_ref *, tree,
unsigned int, void *),
diff --git a/gcc/tree-ssa-ccp.c b/gcc/tree-ssa-ccp.c
index ac6ad5da74f..830f6f33460 100644
--- a/gcc/tree-ssa-ccp.c
+++ b/gcc/tree-ssa-ccp.c
@@ -186,12 +186,11 @@ dump_lattice_value (FILE *outf, const char *prefix, prop_value_t val)
case CONSTANT:
fprintf (outf, "%sCONSTANT ", prefix);
if (TREE_CODE (val.value) != INTEGER_CST
- || double_int_zero_p (val.mask))
+ || val.mask.is_zero ())
print_generic_expr (outf, val.value, dump_flags);
else
{
- double_int cval = double_int_and_not (tree_to_double_int (val.value),
- val.mask);
+ double_int cval = tree_to_double_int (val.value).and_not (val.mask);
fprintf (outf, "%sCONSTANT " HOST_WIDE_INT_PRINT_DOUBLE_HEX,
prefix, cval.high, cval.low);
fprintf (outf, " (" HOST_WIDE_INT_PRINT_DOUBLE_HEX ")",
@@ -323,7 +322,7 @@ get_constant_value (tree var)
if (val
&& val->lattice_val == CONSTANT
&& (TREE_CODE (val->value) != INTEGER_CST
- || double_int_zero_p (val->mask)))
+ || val->mask.is_zero ()))
return val->value;
return NULL_TREE;
}
@@ -414,11 +413,8 @@ valid_lattice_transition (prop_value_t old_val, prop_value_t new_val)
/* Bit-lattices have to agree in the still valid bits. */
if (TREE_CODE (old_val.value) == INTEGER_CST
&& TREE_CODE (new_val.value) == INTEGER_CST)
- return double_int_equal_p
- (double_int_and_not (tree_to_double_int (old_val.value),
- new_val.mask),
- double_int_and_not (tree_to_double_int (new_val.value),
- new_val.mask));
+ return tree_to_double_int (old_val.value).and_not (new_val.mask)
+ == tree_to_double_int (new_val.value).and_not (new_val.mask);
/* Otherwise constant values have to agree. */
return operand_equal_p (old_val.value, new_val.value, 0);
@@ -444,10 +440,9 @@ set_lattice_value (tree var, prop_value_t new_val)
&& TREE_CODE (old_val->value) == INTEGER_CST)
{
double_int diff;
- diff = double_int_xor (tree_to_double_int (new_val.value),
- tree_to_double_int (old_val->value));
- new_val.mask = double_int_ior (new_val.mask,
- double_int_ior (old_val->mask, diff));
+ diff = tree_to_double_int (new_val.value)
+ ^ tree_to_double_int (old_val->value);
+ new_val.mask = new_val.mask | old_val->mask | diff;
}
gcc_assert (valid_lattice_transition (*old_val, new_val));
@@ -458,7 +453,7 @@ set_lattice_value (tree var, prop_value_t new_val)
|| (new_val.lattice_val == CONSTANT
&& TREE_CODE (new_val.value) == INTEGER_CST
&& (TREE_CODE (old_val->value) != INTEGER_CST
- || !double_int_equal_p (new_val.mask, old_val->mask))))
+ || new_val.mask != old_val->mask)))
{
/* ??? We would like to delay creation of INTEGER_CSTs from
partially constants here. */
@@ -511,15 +506,15 @@ get_value_from_alignment (tree expr)
gcc_assert (TREE_CODE (expr) == ADDR_EXPR);
get_pointer_alignment_1 (expr, &align, &bitpos);
- val.mask
- = double_int_and_not (POINTER_TYPE_P (type) || TYPE_UNSIGNED (type)
- ? double_int_mask (TYPE_PRECISION (type))
- : double_int_minus_one,
- uhwi_to_double_int (align / BITS_PER_UNIT - 1));
- val.lattice_val = double_int_minus_one_p (val.mask) ? VARYING : CONSTANT;
+ val.mask = (POINTER_TYPE_P (type) || TYPE_UNSIGNED (type)
+ ? double_int::mask (TYPE_PRECISION (type))
+ : double_int_minus_one)
+ .and_not (double_int::from_uhwi (align / BITS_PER_UNIT - 1));
+ val.lattice_val = val.mask.is_minus_one () ? VARYING : CONSTANT;
if (val.lattice_val == CONSTANT)
val.value
- = double_int_to_tree (type, uhwi_to_double_int (bitpos / BITS_PER_UNIT));
+ = double_int_to_tree (type,
+ double_int::from_uhwi (bitpos / BITS_PER_UNIT));
else
val.value = NULL_TREE;
@@ -880,12 +875,10 @@ ccp_lattice_meet (prop_value_t *val1, prop_value_t *val2)
For INTEGER_CSTs mask unequal bits. If no equal bits remain,
drop to varying. */
- val1->mask
- = double_int_ior (double_int_ior (val1->mask,
- val2->mask),
- double_int_xor (tree_to_double_int (val1->value),
- tree_to_double_int (val2->value)));
- if (double_int_minus_one_p (val1->mask))
+ val1->mask = val1->mask | val2->mask
+ | (tree_to_double_int (val1->value)
+ ^ tree_to_double_int (val2->value));
+ if (val1->mask.is_minus_one ())
{
val1->lattice_val = VARYING;
val1->value = NULL_TREE;
@@ -1080,7 +1073,7 @@ bit_value_unop_1 (enum tree_code code, tree type,
{
case BIT_NOT_EXPR:
*mask = rmask;
- *val = double_int_not (rval);
+ *val = ~rval;
break;
case NEGATE_EXPR:
@@ -1100,13 +1093,13 @@ bit_value_unop_1 (enum tree_code code, tree type,
/* First extend mask and value according to the original type. */
uns = TYPE_UNSIGNED (rtype);
- *mask = double_int_ext (rmask, TYPE_PRECISION (rtype), uns);
- *val = double_int_ext (rval, TYPE_PRECISION (rtype), uns);
+ *mask = rmask.ext (TYPE_PRECISION (rtype), uns);
+ *val = rval.ext (TYPE_PRECISION (rtype), uns);
/* Then extend mask and value according to the target type. */
uns = TYPE_UNSIGNED (type);
- *mask = double_int_ext (*mask, TYPE_PRECISION (type), uns);
- *val = double_int_ext (*val, TYPE_PRECISION (type), uns);
+ *mask = (*mask).ext (TYPE_PRECISION (type), uns);
+ *val = (*val).ext (TYPE_PRECISION (type), uns);
break;
}
@@ -1135,37 +1128,33 @@ bit_value_binop_1 (enum tree_code code, tree type,
case BIT_AND_EXPR:
/* The mask is constant where there is a known not
set bit, (m1 | m2) & ((v1 | m1) & (v2 | m2)) */
- *mask = double_int_and (double_int_ior (r1mask, r2mask),
- double_int_and (double_int_ior (r1val, r1mask),
- double_int_ior (r2val, r2mask)));
- *val = double_int_and (r1val, r2val);
+ *mask = (r1mask | r2mask) & (r1val | r1mask) & (r2val | r2mask);
+ *val = r1val & r2val;
break;
case BIT_IOR_EXPR:
/* The mask is constant where there is a known
set bit, (m1 | m2) & ~((v1 & ~m1) | (v2 & ~m2)). */
- *mask = double_int_and_not
- (double_int_ior (r1mask, r2mask),
- double_int_ior (double_int_and_not (r1val, r1mask),
- double_int_and_not (r2val, r2mask)));
- *val = double_int_ior (r1val, r2val);
+ *mask = (r1mask | r2mask)
+ .and_not (r1val.and_not (r1mask) | r2val.and_not (r2mask));
+ *val = r1val | r2val;
break;
case BIT_XOR_EXPR:
/* m1 | m2 */
- *mask = double_int_ior (r1mask, r2mask);
- *val = double_int_xor (r1val, r2val);
+ *mask = r1mask | r2mask;
+ *val = r1val ^ r2val;
break;
case LROTATE_EXPR:
case RROTATE_EXPR:
- if (double_int_zero_p (r2mask))
+ if (r2mask.is_zero ())
{
HOST_WIDE_INT shift = r2val.low;
if (code == RROTATE_EXPR)
shift = -shift;
- *mask = double_int_lrotate (r1mask, shift, TYPE_PRECISION (type));
- *val = double_int_lrotate (r1val, shift, TYPE_PRECISION (type));
+ *mask = r1mask.lrotate (shift, TYPE_PRECISION (type));
+ *val = r1val.lrotate (shift, TYPE_PRECISION (type));
}
break;
@@ -1174,7 +1163,7 @@ bit_value_binop_1 (enum tree_code code, tree type,
/* ??? We can handle partially known shift counts if we know
its sign. That way we can tell that (x << (y | 8)) & 255
is zero. */
- if (double_int_zero_p (r2mask))
+ if (r2mask.is_zero ())
{
HOST_WIDE_INT shift = r2val.low;
if (code == RSHIFT_EXPR)
@@ -1186,18 +1175,14 @@ bit_value_binop_1 (enum tree_code code, tree type,
the sign bit was varying. */
if (shift > 0)
{
- *mask = double_int_lshift (r1mask, shift,
- TYPE_PRECISION (type), false);
- *val = double_int_lshift (r1val, shift,
- TYPE_PRECISION (type), false);
+ *mask = r1mask.llshift (shift, TYPE_PRECISION (type));
+ *val = r1val.llshift (shift, TYPE_PRECISION (type));
}
else if (shift < 0)
{
shift = -shift;
- *mask = double_int_rshift (r1mask, shift,
- TYPE_PRECISION (type), !uns);
- *val = double_int_rshift (r1val, shift,
- TYPE_PRECISION (type), !uns);
+ *mask = r1mask.rshift (shift, TYPE_PRECISION (type), !uns);
+ *val = r1val.rshift (shift, TYPE_PRECISION (type), !uns);
}
else
{
@@ -1213,21 +1198,18 @@ bit_value_binop_1 (enum tree_code code, tree type,
double_int lo, hi;
/* Do the addition with unknown bits set to zero, to give carry-ins of
zero wherever possible. */
- lo = double_int_add (double_int_and_not (r1val, r1mask),
- double_int_and_not (r2val, r2mask));
- lo = double_int_ext (lo, TYPE_PRECISION (type), uns);
+ lo = r1val.and_not (r1mask) + r2val.and_not (r2mask);
+ lo = lo.ext (TYPE_PRECISION (type), uns);
/* Do the addition with unknown bits set to one, to give carry-ins of
one wherever possible. */
- hi = double_int_add (double_int_ior (r1val, r1mask),
- double_int_ior (r2val, r2mask));
- hi = double_int_ext (hi, TYPE_PRECISION (type), uns);
+ hi = (r1val | r1mask) + (r2val | r2mask);
+ hi = hi.ext (TYPE_PRECISION (type), uns);
/* Each bit in the result is known if (a) the corresponding bits in
both inputs are known, and (b) the carry-in to that bit position
is known. We can check condition (b) by seeing if we got the same
result with minimised carries as with maximised carries. */
- *mask = double_int_ior (double_int_ior (r1mask, r2mask),
- double_int_xor (lo, hi));
- *mask = double_int_ext (*mask, TYPE_PRECISION (type), uns);
+ *mask = r1mask | r2mask | (lo ^ hi);
+ *mask = (*mask).ext (TYPE_PRECISION (type), uns);
/* It shouldn't matter whether we choose lo or hi here. */
*val = lo;
break;
@@ -1248,8 +1230,8 @@ bit_value_binop_1 (enum tree_code code, tree type,
{
/* Just track trailing zeros in both operands and transfer
them to the other. */
- int r1tz = double_int_ctz (double_int_ior (r1val, r1mask));
- int r2tz = double_int_ctz (double_int_ior (r2val, r2mask));
+ int r1tz = (r1val | r1mask).trailing_zeros ();
+ int r2tz = (r2val | r2mask).trailing_zeros ();
if (r1tz + r2tz >= HOST_BITS_PER_DOUBLE_INT)
{
*mask = double_int_zero;
@@ -1257,8 +1239,8 @@ bit_value_binop_1 (enum tree_code code, tree type,
}
else if (r1tz + r2tz > 0)
{
- *mask = double_int_not (double_int_mask (r1tz + r2tz));
- *mask = double_int_ext (*mask, TYPE_PRECISION (type), uns);
+ *mask = ~double_int::mask (r1tz + r2tz);
+ *mask = (*mask).ext (TYPE_PRECISION (type), uns);
*val = double_int_zero;
}
break;
@@ -1267,9 +1249,8 @@ bit_value_binop_1 (enum tree_code code, tree type,
case EQ_EXPR:
case NE_EXPR:
{
- double_int m = double_int_ior (r1mask, r2mask);
- if (!double_int_equal_p (double_int_and_not (r1val, m),
- double_int_and_not (r2val, m)))
+ double_int m = r1mask | r2mask;
+ if (r1val.and_not (m) != r2val.and_not (m))
{
*mask = double_int_zero;
*val = ((code == EQ_EXPR) ? double_int_zero : double_int_one);
@@ -1300,7 +1281,7 @@ bit_value_binop_1 (enum tree_code code, tree type,
{
int minmax, maxmin;
/* If the most significant bits are not known we know nothing. */
- if (double_int_negative_p (r1mask) || double_int_negative_p (r2mask))
+ if (r1mask.is_negative () || r2mask.is_negative ())
break;
/* For comparisons the signedness is in the comparison operands. */
@@ -1309,10 +1290,8 @@ bit_value_binop_1 (enum tree_code code, tree type,
/* If we know the most significant bits we know the values
value ranges by means of treating varying bits as zero
or one. Do a cross comparison of the max/min pairs. */
- maxmin = double_int_cmp (double_int_ior (r1val, r1mask),
- double_int_and_not (r2val, r2mask), uns);
- minmax = double_int_cmp (double_int_and_not (r1val, r1mask),
- double_int_ior (r2val, r2mask), uns);
+ maxmin = (r1val | r1mask).cmp (r2val.and_not (r2mask), uns);
+ minmax = r1val.and_not (r1mask).cmp (r2val | r2mask, uns);
if (maxmin < 0) /* r1 is less than r2. */
{
*mask = double_int_zero;
@@ -1358,10 +1337,10 @@ bit_value_unop (enum tree_code code, tree type, tree rhs)
gcc_assert ((rval.lattice_val == CONSTANT
&& TREE_CODE (rval.value) == INTEGER_CST)
- || double_int_minus_one_p (rval.mask));
+ || rval.mask.is_minus_one ());
bit_value_unop_1 (code, type, &value, &mask,
TREE_TYPE (rhs), value_to_double_int (rval), rval.mask);
- if (!double_int_minus_one_p (mask))
+ if (!mask.is_minus_one ())
{
val.lattice_val = CONSTANT;
val.mask = mask;
@@ -1399,14 +1378,14 @@ bit_value_binop (enum tree_code code, tree type, tree rhs1, tree rhs2)
gcc_assert ((r1val.lattice_val == CONSTANT
&& TREE_CODE (r1val.value) == INTEGER_CST)
- || double_int_minus_one_p (r1val.mask));
+ || r1val.mask.is_minus_one ());
gcc_assert ((r2val.lattice_val == CONSTANT
&& TREE_CODE (r2val.value) == INTEGER_CST)
- || double_int_minus_one_p (r2val.mask));
+ || r2val.mask.is_minus_one ());
bit_value_binop_1 (code, type, &value, &mask,
TREE_TYPE (rhs1), value_to_double_int (r1val), r1val.mask,
TREE_TYPE (rhs2), value_to_double_int (r2val), r2val.mask);
- if (!double_int_minus_one_p (mask))
+ if (!mask.is_minus_one ())
{
val.lattice_val = CONSTANT;
val.mask = mask;
@@ -1439,7 +1418,7 @@ bit_value_assume_aligned (gimple stmt)
return ptrval;
gcc_assert ((ptrval.lattice_val == CONSTANT
&& TREE_CODE (ptrval.value) == INTEGER_CST)
- || double_int_minus_one_p (ptrval.mask));
+ || ptrval.mask.is_minus_one ());
align = gimple_call_arg (stmt, 1);
if (!host_integerp (align, 1))
return ptrval;
@@ -1461,7 +1440,7 @@ bit_value_assume_aligned (gimple stmt)
bit_value_binop_1 (BIT_AND_EXPR, type, &value, &mask,
type, value_to_double_int (ptrval), ptrval.mask,
type, value_to_double_int (alignval), alignval.mask);
- if (!double_int_minus_one_p (mask))
+ if (!mask.is_minus_one ())
{
val.lattice_val = CONSTANT;
val.mask = mask;
@@ -1625,7 +1604,7 @@ evaluate_stmt (gimple stmt)
case BUILT_IN_STRNDUP:
val.lattice_val = CONSTANT;
val.value = build_int_cst (TREE_TYPE (gimple_get_lhs (stmt)), 0);
- val.mask = shwi_to_double_int
+ val.mask = double_int::from_shwi
(~(((HOST_WIDE_INT) MALLOC_ABI_ALIGNMENT)
/ BITS_PER_UNIT - 1));
break;
@@ -1637,9 +1616,8 @@ evaluate_stmt (gimple stmt)
: BIGGEST_ALIGNMENT);
val.lattice_val = CONSTANT;
val.value = build_int_cst (TREE_TYPE (gimple_get_lhs (stmt)), 0);
- val.mask = shwi_to_double_int
- (~(((HOST_WIDE_INT) align)
- / BITS_PER_UNIT - 1));
+ val.mask = double_int::from_shwi (~(((HOST_WIDE_INT) align)
+ / BITS_PER_UNIT - 1));
break;
/* These builtins return their first argument, unmodified. */
@@ -1857,7 +1835,7 @@ ccp_fold_stmt (gimple_stmt_iterator *gsi)
fold more conditionals here. */
val = evaluate_stmt (stmt);
if (val.lattice_val != CONSTANT
- || !double_int_zero_p (val.mask))
+ || !val.mask.is_zero ())
return false;
if (dump_file)
@@ -2037,7 +2015,7 @@ visit_cond_stmt (gimple stmt, edge *taken_edge_p)
block = gimple_bb (stmt);
val = evaluate_stmt (stmt);
if (val.lattice_val != CONSTANT
- || !double_int_zero_p (val.mask))
+ || !val.mask.is_zero ())
return SSA_PROP_VARYING;
/* Find which edge out of the conditional block will be taken and add it
diff --git a/gcc/tree-ssa-coalesce.c b/gcc/tree-ssa-coalesce.c
index 5d2ce38c5a6..6217825d1a6 100644
--- a/gcc/tree-ssa-coalesce.c
+++ b/gcc/tree-ssa-coalesce.c
@@ -626,10 +626,10 @@ ssa_conflicts_dump (FILE *file, ssa_conflicts_p ptr)
fprintf (file, "\nConflict graph:\n");
- FOR_EACH_VEC_ELT (bitmap, ptr->conflicts, x, b);
+ FOR_EACH_VEC_ELT (bitmap, ptr->conflicts, x, b)
if (b)
{
- fprintf (dump_file, "%d: ", x);
+ fprintf (file, "%d: ", x);
dump_bitmap (file, b);
}
}
diff --git a/gcc/tree-ssa-dom.c b/gcc/tree-ssa-dom.c
index 4a89df25fbb..9065006c55e 100644
--- a/gcc/tree-ssa-dom.c
+++ b/gcc/tree-ssa-dom.c
@@ -1231,7 +1231,7 @@ build_and_record_new_cond (enum tree_code code,
cond->ops.binary.opnd1 = op1;
c.value = boolean_true_node;
- VEC_safe_push (cond_equivalence, heap, *p, &c);
+ VEC_safe_push (cond_equivalence, heap, *p, c);
}
/* Record that COND is true and INVERTED is false into the edge information
@@ -1338,7 +1338,7 @@ record_conditions (struct edge_info *edge_info, tree cond, tree inverted)
two slots. */
initialize_expr_from_cond (cond, &c.cond);
c.value = boolean_true_node;
- VEC_safe_push (cond_equivalence, heap, edge_info->cond_equivalences, &c);
+ VEC_safe_push (cond_equivalence, heap, edge_info->cond_equivalences, c);
/* It is possible for INVERTED to be the negation of a comparison,
and not a valid RHS or GIMPLE_COND condition. This happens because
@@ -1347,7 +1347,7 @@ record_conditions (struct edge_info *edge_info, tree cond, tree inverted)
obey the trichotomy law. */
initialize_expr_from_cond (inverted, &c.cond);
c.value = boolean_false_node;
- VEC_safe_push (cond_equivalence, heap, edge_info->cond_equivalences, &c);
+ VEC_safe_push (cond_equivalence, heap, edge_info->cond_equivalences, c);
}
/* A helper function for record_const_or_copy and record_equality.
diff --git a/gcc/tree-ssa-forwprop.c b/gcc/tree-ssa-forwprop.c
index 532b9c5c688..ad407269dd7 100644
--- a/gcc/tree-ssa-forwprop.c
+++ b/gcc/tree-ssa-forwprop.c
@@ -33,6 +33,7 @@ along with GCC; see the file COPYING3. If not see
#include "gimple.h"
#include "expr.h"
#include "cfgloop.h"
+#include "tree-vectorizer.h"
/* This pass propagates the RHS of assignment statements into use
sites of the LHS of the assignment. It's basically a specialized
@@ -813,11 +814,10 @@ forward_propagate_addr_expr_1 (tree name, tree def_rhs,
{
double_int off = mem_ref_offset (lhs);
tree new_ptr;
- off = double_int_add (off,
- shwi_to_double_int (def_rhs_offset));
+ off += double_int::from_shwi (def_rhs_offset);
if (TREE_CODE (def_rhs_base) == MEM_REF)
{
- off = double_int_add (off, mem_ref_offset (def_rhs_base));
+ off += mem_ref_offset (def_rhs_base);
new_ptr = TREE_OPERAND (def_rhs_base, 0);
}
else
@@ -898,11 +898,10 @@ forward_propagate_addr_expr_1 (tree name, tree def_rhs,
{
double_int off = mem_ref_offset (rhs);
tree new_ptr;
- off = double_int_add (off,
- shwi_to_double_int (def_rhs_offset));
+ off += double_int::from_shwi (def_rhs_offset);
if (TREE_CODE (def_rhs_base) == MEM_REF)
{
- off = double_int_add (off, mem_ref_offset (def_rhs_base));
+ off += mem_ref_offset (def_rhs_base);
new_ptr = TREE_OPERAND (def_rhs_base, 0);
}
else
@@ -2373,8 +2372,7 @@ associate_pointerplus (gimple_stmt_iterator *gsi)
if (gimple_assign_rhs1 (def_stmt) != ptr)
return false;
- algn = double_int_to_tree (TREE_TYPE (ptr),
- double_int_not (tree_to_double_int (algn)));
+ algn = double_int_to_tree (TREE_TYPE (ptr), ~tree_to_double_int (algn));
gimple_assign_set_rhs_with_ops (gsi, BIT_AND_EXPR, ptr, algn);
fold_stmt_inplace (gsi);
update_stmt (stmt);
@@ -2537,7 +2535,7 @@ combine_conversions (gimple_stmt_iterator *gsi)
tem = fold_build2 (BIT_AND_EXPR, inside_type,
defop0,
double_int_to_tree
- (inside_type, double_int_mask (inter_prec)));
+ (inside_type, double_int::mask (inter_prec)));
if (!useless_type_conversion_p (type, inside_type))
{
tem = force_gimple_operand_gsi (gsi, tem, true, NULL_TREE, true,
@@ -2577,6 +2575,78 @@ combine_conversions (gimple_stmt_iterator *gsi)
return 0;
}
+/* Combine an element access with a shuffle. Returns true if there were
+ any changes made, else it returns false. */
+
+static bool
+simplify_bitfield_ref (gimple_stmt_iterator *gsi)
+{
+ gimple stmt = gsi_stmt (*gsi);
+ gimple def_stmt;
+ tree op, op0, op1, op2;
+ tree elem_type;
+ unsigned idx, n, size;
+ enum tree_code code;
+
+ op = gimple_assign_rhs1 (stmt);
+ gcc_checking_assert (TREE_CODE (op) == BIT_FIELD_REF);
+
+ op0 = TREE_OPERAND (op, 0);
+ if (TREE_CODE (op0) != SSA_NAME
+ || TREE_CODE (TREE_TYPE (op0)) != VECTOR_TYPE)
+ return false;
+
+ elem_type = TREE_TYPE (TREE_TYPE (op0));
+ if (TREE_TYPE (op) != elem_type)
+ return false;
+
+ size = TREE_INT_CST_LOW (TYPE_SIZE (elem_type));
+ op1 = TREE_OPERAND (op, 1);
+ n = TREE_INT_CST_LOW (op1) / size;
+ if (n != 1)
+ return false;
+
+ def_stmt = SSA_NAME_DEF_STMT (op0);
+ if (!def_stmt || !is_gimple_assign (def_stmt)
+ || !can_propagate_from (def_stmt))
+ return false;
+
+ op2 = TREE_OPERAND (op, 2);
+ idx = TREE_INT_CST_LOW (op2) / size;
+
+ code = gimple_assign_rhs_code (def_stmt);
+
+ if (code == VEC_PERM_EXPR)
+ {
+ tree p, m, index, tem;
+ unsigned nelts;
+ m = gimple_assign_rhs3 (def_stmt);
+ if (TREE_CODE (m) != VECTOR_CST)
+ return false;
+ nelts = VECTOR_CST_NELTS (m);
+ idx = TREE_INT_CST_LOW (VECTOR_CST_ELT (m, idx));
+ idx %= 2 * nelts;
+ if (idx < nelts)
+ {
+ p = gimple_assign_rhs1 (def_stmt);
+ }
+ else
+ {
+ p = gimple_assign_rhs2 (def_stmt);
+ idx -= nelts;
+ }
+ index = build_int_cst (TREE_TYPE (TREE_TYPE (m)), idx * size);
+ tem = build3 (BIT_FIELD_REF, TREE_TYPE (op),
+ unshare_expr (p), op1, index);
+ gimple_assign_set_rhs1 (stmt, tem);
+ fold_stmt (gsi);
+ update_stmt (gsi_stmt (*gsi));
+ return true;
+ }
+
+ return false;
+}
+
/* Determine whether applying the 2 permutations (mask1 then mask2)
gives back one of the input. */
@@ -2609,45 +2679,52 @@ is_combined_permutation_identity (tree mask1, tree mask2)
return maybe_identity1 ? 1 : maybe_identity2 ? 2 : 0;
}
-/* Combine two shuffles in a row. Returns 1 if there were any changes
- made, 2 if cfg-cleanup needs to run. Else it returns 0. */
+/* Combine a shuffle with its arguments. Returns 1 if there were any
+ changes made, 2 if cfg-cleanup needs to run. Else it returns 0. */
static int
simplify_permutation (gimple_stmt_iterator *gsi)
{
gimple stmt = gsi_stmt (*gsi);
gimple def_stmt;
- tree op0, op1, op2, op3;
- enum tree_code code = gimple_assign_rhs_code (stmt);
- enum tree_code code2;
+ tree op0, op1, op2, op3, arg0, arg1;
+ enum tree_code code;
- gcc_checking_assert (code == VEC_PERM_EXPR);
+ gcc_checking_assert (gimple_assign_rhs_code (stmt) == VEC_PERM_EXPR);
op0 = gimple_assign_rhs1 (stmt);
op1 = gimple_assign_rhs2 (stmt);
op2 = gimple_assign_rhs3 (stmt);
- if (TREE_CODE (op0) != SSA_NAME)
- return 0;
-
if (TREE_CODE (op2) != VECTOR_CST)
return 0;
- if (op0 != op1)
- return 0;
+ if (TREE_CODE (op0) == VECTOR_CST)
+ {
+ code = VECTOR_CST;
+ arg0 = op0;
+ }
+ else if (TREE_CODE (op0) == SSA_NAME)
+ {
+ def_stmt = SSA_NAME_DEF_STMT (op0);
+ if (!def_stmt || !is_gimple_assign (def_stmt)
+ || !can_propagate_from (def_stmt))
+ return 0;
- def_stmt = SSA_NAME_DEF_STMT (op0);
- if (!def_stmt || !is_gimple_assign (def_stmt)
- || !can_propagate_from (def_stmt))
+ code = gimple_assign_rhs_code (def_stmt);
+ arg0 = gimple_assign_rhs1 (def_stmt);
+ }
+ else
return 0;
- code2 = gimple_assign_rhs_code (def_stmt);
-
/* Two consecutive shuffles. */
- if (code2 == VEC_PERM_EXPR)
+ if (code == VEC_PERM_EXPR)
{
tree orig;
int ident;
+
+ if (op0 != op1)
+ return 0;
op3 = gimple_assign_rhs3 (def_stmt);
if (TREE_CODE (op3) != VECTOR_CST)
return 0;
@@ -2663,7 +2740,137 @@ simplify_permutation (gimple_stmt_iterator *gsi)
return remove_prop_source_from_use (op0) ? 2 : 1;
}
- return false;
+ /* Shuffle of a constructor. */
+ else if (code == CONSTRUCTOR || code == VECTOR_CST)
+ {
+ tree opt;
+ bool ret = false;
+ if (op0 != op1)
+ {
+ if (TREE_CODE (op0) == SSA_NAME && !has_single_use (op0))
+ return 0;
+
+ if (TREE_CODE (op1) == VECTOR_CST)
+ arg1 = op1;
+ else if (TREE_CODE (op1) == SSA_NAME)
+ {
+ enum tree_code code2;
+
+ if (!has_single_use (op1))
+ return 0;
+
+ gimple def_stmt2 = SSA_NAME_DEF_STMT (op1);
+ if (!def_stmt2 || !is_gimple_assign (def_stmt2)
+ || !can_propagate_from (def_stmt2))
+ return 0;
+
+ code2 = gimple_assign_rhs_code (def_stmt2);
+ if (code2 != CONSTRUCTOR && code2 != VECTOR_CST)
+ return 0;
+ arg1 = gimple_assign_rhs1 (def_stmt2);
+ }
+ else
+ return 0;
+ }
+ else
+ {
+ /* Already used twice in this statement. */
+ if (TREE_CODE (op0) == SSA_NAME && num_imm_uses (op0) > 2)
+ return 0;
+ arg1 = arg0;
+ }
+ opt = fold_ternary (VEC_PERM_EXPR, TREE_TYPE(op0), arg0, arg1, op2);
+ if (!opt
+ || (TREE_CODE (opt) != CONSTRUCTOR && TREE_CODE(opt) != VECTOR_CST))
+ return 0;
+ gimple_assign_set_rhs_from_tree (gsi, opt);
+ update_stmt (gsi_stmt (*gsi));
+ if (TREE_CODE (op0) == SSA_NAME)
+ ret = remove_prop_source_from_use (op0);
+ if (op0 != op1 && TREE_CODE (op1) == SSA_NAME)
+ ret |= remove_prop_source_from_use (op1);
+ return ret ? 2 : 1;
+ }
+
+ return 0;
+}
+
+/* Recognize a VEC_PERM_EXPR. Returns true if there were any changes. */
+
+static bool
+simplify_vector_constructor (gimple_stmt_iterator *gsi)
+{
+ gimple stmt = gsi_stmt (*gsi);
+ gimple def_stmt;
+ tree op, op2, orig, type, elem_type;
+ unsigned elem_size, nelts, i;
+ enum tree_code code;
+ constructor_elt *elt;
+ unsigned char *sel;
+ bool maybe_ident;
+
+ gcc_checking_assert (gimple_assign_rhs_code (stmt) == CONSTRUCTOR);
+
+ op = gimple_assign_rhs1 (stmt);
+ type = TREE_TYPE (op);
+ gcc_checking_assert (TREE_CODE (type) == VECTOR_TYPE);
+
+ nelts = TYPE_VECTOR_SUBPARTS (type);
+ elem_type = TREE_TYPE (type);
+ elem_size = TREE_INT_CST_LOW (TYPE_SIZE (elem_type));
+
+ sel = XALLOCAVEC (unsigned char, nelts);
+ orig = NULL;
+ maybe_ident = true;
+ FOR_EACH_VEC_ELT (constructor_elt, CONSTRUCTOR_ELTS (op), i, elt)
+ {
+ tree ref, op1;
+
+ if (i >= nelts)
+ return false;
+
+ if (TREE_CODE (elt->value) != SSA_NAME)
+ return false;
+ def_stmt = SSA_NAME_DEF_STMT (elt->value);
+ if (!def_stmt || !is_gimple_assign (def_stmt))
+ return false;
+ code = gimple_assign_rhs_code (def_stmt);
+ if (code != BIT_FIELD_REF)
+ return false;
+ op1 = gimple_assign_rhs1 (def_stmt);
+ ref = TREE_OPERAND (op1, 0);
+ if (orig)
+ {
+ if (ref != orig)
+ return false;
+ }
+ else
+ {
+ if (TREE_CODE (ref) != SSA_NAME)
+ return false;
+ orig = ref;
+ }
+ if (TREE_INT_CST_LOW (TREE_OPERAND (op1, 1)) != elem_size)
+ return false;
+ sel[i] = TREE_INT_CST_LOW (TREE_OPERAND (op1, 2)) / elem_size;
+ if (sel[i] != i) maybe_ident = false;
+ }
+ if (i < nelts)
+ return false;
+
+ if (maybe_ident)
+ {
+ gimple_assign_set_rhs_from_tree (gsi, orig);
+ }
+ else
+ {
+ op2 = vect_gen_perm_mask (type, sel);
+ if (!op2)
+ return false;
+ gimple_assign_set_rhs_with_ops_1 (gsi, VEC_PERM_EXPR, orig, orig, op2);
+ }
+ update_stmt (gsi_stmt (*gsi));
+ return true;
}
/* Main entry point for the forward propagation and statement combine
@@ -2835,6 +3042,11 @@ ssa_forward_propagate_and_combine (void)
cfg_changed = true;
changed = did_something != 0;
}
+ else if (code == BIT_FIELD_REF)
+ changed = simplify_bitfield_ref (&gsi);
+ else if (code == CONSTRUCTOR
+ && TREE_CODE (TREE_TYPE (rhs1)) == VECTOR_TYPE)
+ changed = simplify_vector_constructor (&gsi);
break;
}
diff --git a/gcc/tree-ssa-loop-im.c b/gcc/tree-ssa-loop-im.c
index 0f61631cc79..67cab3a31ae 100644
--- a/gcc/tree-ssa-loop-im.c
+++ b/gcc/tree-ssa-loop-im.c
@@ -2113,9 +2113,14 @@ execute_sm_if_changed_flag_set (struct loop *loop, mem_ref_p ref)
gimple_stmt_iterator gsi;
gimple stmt;
- gsi = gsi_for_stmt (loc->stmt);
- stmt = gimple_build_assign (flag, boolean_true_node);
- gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
+ /* Only set the flag for writes. */
+ if (is_gimple_assign (loc->stmt)
+ && gimple_assign_lhs_ptr (loc->stmt) == loc->ref)
+ {
+ gsi = gsi_for_stmt (loc->stmt);
+ stmt = gimple_build_assign (flag, boolean_true_node);
+ gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
+ }
}
VEC_free (mem_ref_loc_p, heap, locs);
return flag;
diff --git a/gcc/tree-ssa-loop-ivopts.c b/gcc/tree-ssa-loop-ivopts.c
index c0a825229d7..74097f8cccf 100644
--- a/gcc/tree-ssa-loop-ivopts.c
+++ b/gcc/tree-ssa-loop-ivopts.c
@@ -1571,8 +1571,7 @@ constant_multiple_of (tree top, tree bot, double_int *mul)
if (!constant_multiple_of (TREE_OPERAND (top, 0), bot, &res))
return false;
- *mul = double_int_sext (double_int_mul (res, tree_to_double_int (mby)),
- precision);
+ *mul = (res * tree_to_double_int (mby)).sext (precision);
return true;
case PLUS_EXPR:
@@ -1582,21 +1581,20 @@ constant_multiple_of (tree top, tree bot, double_int *mul)
return false;
if (code == MINUS_EXPR)
- p1 = double_int_neg (p1);
- *mul = double_int_sext (double_int_add (p0, p1), precision);
+ p1 = -p1;
+ *mul = (p0 + p1).sext (precision);
return true;
case INTEGER_CST:
if (TREE_CODE (bot) != INTEGER_CST)
return false;
- p0 = double_int_sext (tree_to_double_int (top), precision);
- p1 = double_int_sext (tree_to_double_int (bot), precision);
- if (double_int_zero_p (p1))
+ p0 = tree_to_double_int (top).sext (precision);
+ p1 = tree_to_double_int (bot).sext (precision);
+ if (p1.is_zero ())
return false;
- *mul = double_int_sext (double_int_sdivmod (p0, p1, FLOOR_DIV_EXPR, &res),
- precision);
- return double_int_zero_p (res);
+ *mul = p0.sdivmod (p1, FLOOR_DIV_EXPR, &res).sext (precision);
+ return res.is_zero ();
default:
return false;
@@ -3000,7 +2998,7 @@ get_computation_aff (struct loop *loop,
aff_combination_add (&cbase_aff, &cstep_aff);
}
- aff_combination_scale (&cbase_aff, double_int_neg (rat));
+ aff_combination_scale (&cbase_aff, -rat);
aff_combination_add (aff, &cbase_aff);
if (common_type != uutype)
aff_combination_convert (aff, uutype);
@@ -3777,7 +3775,7 @@ compare_aff_trees (aff_tree *aff1, aff_tree *aff2)
for (i = 0; i < aff1->n; i++)
{
- if (double_int_cmp (aff1->elts[i].coef, aff2->elts[i].coef, 0) != 0)
+ if (aff1->elts[i].coef != aff2->elts[i].coef)
return false;
if (!operand_equal_p (aff1->elts[i].val, aff2->elts[i].val, 0))
@@ -3904,7 +3902,7 @@ get_loop_invariant_expr_id (struct ivopts_data *data, tree ubase,
tree_to_aff_combination (ub, TREE_TYPE (ub), &ubase_aff);
tree_to_aff_combination (cb, TREE_TYPE (cb), &cbase_aff);
- aff_combination_scale (&cbase_aff, shwi_to_double_int (-1 * ratio));
+ aff_combination_scale (&cbase_aff, double_int::from_shwi (-1 * ratio));
aff_combination_add (&ubase_aff, &cbase_aff);
expr = aff_combination_to_tree (&ubase_aff);
return get_expr_id (data, expr);
@@ -3990,8 +3988,8 @@ get_computation_cost_at (struct ivopts_data *data,
if (!constant_multiple_of (ustep, cstep, &rat))
return infinite_cost;
- if (double_int_fits_in_shwi_p (rat))
- ratio = double_int_to_shwi (rat);
+ if (rat.fits_shwi ())
+ ratio = rat.to_shwi ();
else
return infinite_cost;
@@ -4504,7 +4502,7 @@ iv_elimination_compare_lt (struct ivopts_data *data,
aff_combination_scale (&tmpa, double_int_minus_one);
aff_combination_add (&tmpb, &tmpa);
aff_combination_add (&tmpb, &nit);
- if (tmpb.n != 0 || !double_int_equal_p (tmpb.offset, double_int_one))
+ if (tmpb.n != 0 || tmpb.offset != double_int_one)
return false;
/* Finally, check that CAND->IV->BASE - CAND->IV->STEP * A does not
@@ -4594,9 +4592,9 @@ may_eliminate_iv (struct ivopts_data *data,
max_niter = desc->max;
if (stmt_after_increment (loop, cand, use->stmt))
- max_niter = double_int_add (max_niter, double_int_one);
+ max_niter += double_int_one;
period_value = tree_to_double_int (period);
- if (double_int_ucmp (max_niter, period_value) > 0)
+ if (max_niter.ugt (period_value))
{
/* See if we can take advantage of inferred loop bound information. */
if (data->loop_single_exit_p)
@@ -4604,7 +4602,7 @@ may_eliminate_iv (struct ivopts_data *data,
if (!max_loop_iterations (loop, &max_niter))
return false;
/* The loop bound is already adjusted by adding 1. */
- if (double_int_ucmp (max_niter, period_value) > 0)
+ if (max_niter.ugt (period_value))
return false;
}
else
diff --git a/gcc/tree-ssa-loop-niter.c b/gcc/tree-ssa-loop-niter.c
index 8f1e2b4a964..84ae6104490 100644
--- a/gcc/tree-ssa-loop-niter.c
+++ b/gcc/tree-ssa-loop-niter.c
@@ -91,7 +91,7 @@ split_to_var_and_offset (tree expr, tree *var, mpz_t offset)
*var = op0;
/* Always sign extend the offset. */
off = tree_to_double_int (op1);
- off = double_int_sext (off, TYPE_PRECISION (type));
+ off = off.sext (TYPE_PRECISION (type));
mpz_set_double_int (offset, off, false);
if (negate)
mpz_neg (offset, offset);
@@ -170,7 +170,7 @@ bound_difference_of_offsetted_base (tree type, mpz_t x, mpz_t y,
}
mpz_init (m);
- mpz_set_double_int (m, double_int_mask (TYPE_PRECISION (type)), true);
+ mpz_set_double_int (m, double_int::mask (TYPE_PRECISION (type)), true);
mpz_add_ui (m, m, 1);
mpz_sub (bnds->up, x, y);
mpz_set (bnds->below, bnds->up);
@@ -457,7 +457,7 @@ bounds_add (bounds *bnds, double_int delta, tree type)
mpz_set_double_int (mdelta, delta, false);
mpz_init (max);
- mpz_set_double_int (max, double_int_mask (TYPE_PRECISION (type)), true);
+ mpz_set_double_int (max, double_int::mask (TYPE_PRECISION (type)), true);
mpz_add (bnds->up, bnds->up, mdelta);
mpz_add (bnds->below, bnds->below, mdelta);
@@ -573,7 +573,7 @@ number_of_iterations_ne_max (mpz_t bnd, bool no_overflow, tree c, tree s,
the whole # of iterations analysis will fail). */
if (!no_overflow)
{
- max = double_int_mask (TYPE_PRECISION (TREE_TYPE (c))
+ max = double_int::mask (TYPE_PRECISION (TREE_TYPE (c))
- tree_low_cst (num_ending_zeros (s), 1));
mpz_set_double_int (bnd, max, true);
return;
@@ -581,7 +581,7 @@ number_of_iterations_ne_max (mpz_t bnd, bool no_overflow, tree c, tree s,
/* Now we know that the induction variable does not overflow, so the loop
iterates at most (range of type / S) times. */
- mpz_set_double_int (bnd, double_int_mask (TYPE_PRECISION (TREE_TYPE (c))),
+ mpz_set_double_int (bnd, double_int::mask (TYPE_PRECISION (TREE_TYPE (c))),
true);
/* If the induction variable is guaranteed to reach the value of C before
@@ -922,9 +922,8 @@ assert_loop_rolls_lt (tree type, affine_iv *iv0, affine_iv *iv1,
dstep = tree_to_double_int (iv0->step);
else
{
- dstep = double_int_sext (tree_to_double_int (iv1->step),
- TYPE_PRECISION (type));
- dstep = double_int_neg (dstep);
+ dstep = tree_to_double_int (iv1->step).sext (TYPE_PRECISION (type));
+ dstep = -dstep;
}
mpz_init (mstep);
@@ -935,7 +934,7 @@ assert_loop_rolls_lt (tree type, affine_iv *iv0, affine_iv *iv1,
rolls_p = mpz_cmp (mstep, bnds->below) <= 0;
mpz_init (max);
- mpz_set_double_int (max, double_int_mask (TYPE_PRECISION (type)), true);
+ mpz_set_double_int (max, double_int::mask (TYPE_PRECISION (type)), true);
mpz_add (max, max, mstep);
no_overflow_p = (mpz_cmp (bnds->up, max) <= 0
/* For pointers, only values lying inside a single object
@@ -2394,7 +2393,7 @@ derive_constant_upper_bound_ops (tree type, tree op0,
/* If the bound does not fit in TYPE, max. value of TYPE could be
attained. */
- if (double_int_ucmp (max, bnd) < 0)
+ if (max.ult (bnd))
return max;
return bnd;
@@ -2410,27 +2409,27 @@ derive_constant_upper_bound_ops (tree type, tree op0,
choose the most logical way how to treat this constant regardless
of the signedness of the type. */
cst = tree_to_double_int (op1);
- cst = double_int_sext (cst, TYPE_PRECISION (type));
+ cst = cst.sext (TYPE_PRECISION (type));
if (code != MINUS_EXPR)
- cst = double_int_neg (cst);
+ cst = -cst;
bnd = derive_constant_upper_bound (op0);
- if (double_int_negative_p (cst))
+ if (cst.is_negative ())
{
- cst = double_int_neg (cst);
+ cst = -cst;
/* Avoid CST == 0x80000... */
- if (double_int_negative_p (cst))
+ if (cst.is_negative ())
return max;;
/* OP0 + CST. We need to check that
BND <= MAX (type) - CST. */
- mmax = double_int_sub (max, cst);
- if (double_int_ucmp (bnd, mmax) > 0)
+ mmax -= cst;
+ if (bnd.ugt (mmax))
return max;
- return double_int_add (bnd, cst);
+ return bnd + cst;
}
else
{
@@ -2447,7 +2446,7 @@ derive_constant_upper_bound_ops (tree type, tree op0,
/* This should only happen if the type is unsigned; however, for
buggy programs that use overflowing signed arithmetics even with
-fno-wrapv, this condition may also be true for signed values. */
- if (double_int_ucmp (bnd, cst) < 0)
+ if (bnd.ult (cst))
return max;
if (TYPE_UNSIGNED (type))
@@ -2458,7 +2457,7 @@ derive_constant_upper_bound_ops (tree type, tree op0,
return max;
}
- bnd = double_int_sub (bnd, cst);
+ bnd -= cst;
}
return bnd;
@@ -2470,7 +2469,7 @@ derive_constant_upper_bound_ops (tree type, tree op0,
return max;
bnd = derive_constant_upper_bound (op0);
- return double_int_udiv (bnd, tree_to_double_int (op1), FLOOR_DIV_EXPR);
+ return bnd.udiv (tree_to_double_int (op1), FLOOR_DIV_EXPR);
case BIT_AND_EXPR:
if (TREE_CODE (op1) != INTEGER_CST
@@ -2503,14 +2502,14 @@ record_niter_bound (struct loop *loop, double_int i_bound, bool realistic,
current estimation is smaller. */
if (upper
&& (!loop->any_upper_bound
- || double_int_ucmp (i_bound, loop->nb_iterations_upper_bound) < 0))
+ || i_bound.ult (loop->nb_iterations_upper_bound)))
{
loop->any_upper_bound = true;
loop->nb_iterations_upper_bound = i_bound;
}
if (realistic
&& (!loop->any_estimate
- || double_int_ucmp (i_bound, loop->nb_iterations_estimate) < 0))
+ || i_bound.ult (loop->nb_iterations_estimate)))
{
loop->any_estimate = true;
loop->nb_iterations_estimate = i_bound;
@@ -2520,8 +2519,7 @@ record_niter_bound (struct loop *loop, double_int i_bound, bool realistic,
number of iterations, use the upper bound instead. */
if (loop->any_upper_bound
&& loop->any_estimate
- && double_int_ucmp (loop->nb_iterations_upper_bound,
- loop->nb_iterations_estimate) < 0)
+ && loop->nb_iterations_upper_bound.ult (loop->nb_iterations_estimate))
loop->nb_iterations_estimate = loop->nb_iterations_upper_bound;
}
@@ -2583,10 +2581,10 @@ record_estimate (struct loop *loop, tree bound, double_int i_bound,
delta = double_int_zero;
else
delta = double_int_one;
- i_bound = double_int_add (i_bound, delta);
+ i_bound += delta;
/* If an overflow occurred, ignore the result. */
- if (double_int_ucmp (i_bound, delta) < 0)
+ if (i_bound.ult (delta))
return;
record_niter_bound (loop, i_bound, realistic, upper);
@@ -3050,9 +3048,9 @@ estimated_loop_iterations_int (struct loop *loop)
if (!estimated_loop_iterations (loop, &nit))
return -1;
- if (!double_int_fits_in_shwi_p (nit))
+ if (!nit.fits_shwi ())
return -1;
- hwi_nit = double_int_to_shwi (nit);
+ hwi_nit = nit.to_shwi ();
return hwi_nit < 0 ? -1 : hwi_nit;
}
@@ -3070,9 +3068,9 @@ max_loop_iterations_int (struct loop *loop)
if (!max_loop_iterations (loop, &nit))
return -1;
- if (!double_int_fits_in_shwi_p (nit))
+ if (!nit.fits_shwi ())
return -1;
- hwi_nit = double_int_to_shwi (nit);
+ hwi_nit = nit.to_shwi ();
return hwi_nit < 0 ? -1 : hwi_nit;
}
@@ -3129,9 +3127,9 @@ max_stmt_executions (struct loop *loop, double_int *nit)
nit_minus_one = *nit;
- *nit = double_int_add (*nit, double_int_one);
+ *nit += double_int_one;
- return double_int_ucmp (*nit, nit_minus_one) > 0;
+ return (*nit).ugt (nit_minus_one);
}
/* Sets NIT to the estimated number of executions of the latch of the
@@ -3148,9 +3146,9 @@ estimated_stmt_executions (struct loop *loop, double_int *nit)
nit_minus_one = *nit;
- *nit = double_int_add (*nit, double_int_one);
+ *nit += double_int_one;
- return double_int_ucmp (*nit, nit_minus_one) > 0;
+ return (*nit).ugt (nit_minus_one);
}
/* Records estimates on numbers of iterations of loops. */
@@ -3255,8 +3253,8 @@ n_of_executions_at_most (gimple stmt,
|| (gimple_bb (stmt) != gimple_bb (niter_bound->stmt)
&& !stmt_dominates_stmt_p (niter_bound->stmt, stmt)))
{
- bound = double_int_add (bound, double_int_one);
- if (double_int_zero_p (bound)
+ bound += double_int_one;
+ if (bound.is_zero ()
|| !double_int_fits_to_tree_p (nit_type, bound))
return false;
}
diff --git a/gcc/tree-ssa-math-opts.c b/gcc/tree-ssa-math-opts.c
index c3392fb14c8..94d19afc2c0 100644
--- a/gcc/tree-ssa-math-opts.c
+++ b/gcc/tree-ssa-math-opts.c
@@ -1985,7 +1985,11 @@ widening_mult_conversion_strippable_p (tree result_type, gimple stmt)
the operation and doesn't narrow the range. */
inner_op_type = TREE_TYPE (gimple_assign_rhs1 (stmt));
- if (TYPE_UNSIGNED (op_type) == TYPE_UNSIGNED (inner_op_type)
+ /* If the inner-most type is unsigned, then we can strip any
+ intermediate widening operation. If it's signed, then the
+ intermediate widening operation must also be signed. */
+ if ((TYPE_UNSIGNED (inner_op_type)
+ || TYPE_UNSIGNED (op_type) == TYPE_UNSIGNED (inner_op_type))
&& TYPE_PRECISION (op_type) > TYPE_PRECISION (inner_op_type))
return true;
diff --git a/gcc/tree-ssa-phiopt.c b/gcc/tree-ssa-phiopt.c
index 4c729158deb..c8ec5026659 100644
--- a/gcc/tree-ssa-phiopt.c
+++ b/gcc/tree-ssa-phiopt.c
@@ -720,9 +720,7 @@ jump_function_from_stmt (tree *arg, gimple stmt)
&offset);
if (tem
&& TREE_CODE (tem) == MEM_REF
- && double_int_zero_p
- (double_int_add (mem_ref_offset (tem),
- shwi_to_double_int (offset))))
+ && (mem_ref_offset (tem) + double_int::from_shwi (offset)).is_zero ())
{
*arg = TREE_OPERAND (tem, 0);
return true;
diff --git a/gcc/tree-ssa-pre.c b/gcc/tree-ssa-pre.c
index bb1a03dfe99..64dedb16842 100644
--- a/gcc/tree-ssa-pre.c
+++ b/gcc/tree-ssa-pre.c
@@ -43,6 +43,7 @@ along with GCC; see the file COPYING3. If not see
#include "tree-scalar-evolution.h"
#include "params.h"
#include "dbgcnt.h"
+#include "domwalk.h"
/* TODO:
@@ -351,8 +352,6 @@ get_or_alloc_expr_for_name (tree name)
return result;
}
-static bool in_fre = false;
-
/* An unordered bitmap set. One bitmap tracks values, the other,
expressions. */
typedef struct bitmap_set
@@ -637,6 +636,25 @@ get_expr_value_id (pre_expr expr)
}
}
+/* Return a SCCVN valnum (SSA name or constant) for the PRE value-id VAL. */
+
+static tree
+sccvn_valnum_from_value_id (unsigned int val)
+{
+ bitmap_iterator bi;
+ unsigned int i;
+ bitmap exprset = VEC_index (bitmap, value_expressions, val);
+ EXECUTE_IF_SET_IN_BITMAP (exprset, 0, i, bi)
+ {
+ pre_expr vexpr = expression_for_id (i);
+ if (vexpr->kind == NAME)
+ return VN_INFO (PRE_EXPR_NAME (vexpr))->valnum;
+ else if (vexpr->kind == CONSTANT)
+ return PRE_EXPR_CONSTANT (vexpr);
+ }
+ return NULL_TREE;
+}
+
/* Remove an expression EXPR from a bitmapped set. */
static void
@@ -1022,16 +1040,13 @@ DEBUG_FUNCTION void
debug_bitmap_sets_for (basic_block bb)
{
print_bitmap_set (stderr, AVAIL_OUT (bb), "avail_out", bb->index);
- if (!in_fre)
- {
- print_bitmap_set (stderr, EXP_GEN (bb), "exp_gen", bb->index);
- print_bitmap_set (stderr, PHI_GEN (bb), "phi_gen", bb->index);
- print_bitmap_set (stderr, TMP_GEN (bb), "tmp_gen", bb->index);
- print_bitmap_set (stderr, ANTIC_IN (bb), "antic_in", bb->index);
- if (do_partial_partial)
- print_bitmap_set (stderr, PA_IN (bb), "pa_in", bb->index);
- print_bitmap_set (stderr, NEW_SETS (bb), "new_sets", bb->index);
- }
+ print_bitmap_set (stderr, EXP_GEN (bb), "exp_gen", bb->index);
+ print_bitmap_set (stderr, PHI_GEN (bb), "phi_gen", bb->index);
+ print_bitmap_set (stderr, TMP_GEN (bb), "tmp_gen", bb->index);
+ print_bitmap_set (stderr, ANTIC_IN (bb), "antic_in", bb->index);
+ if (do_partial_partial)
+ print_bitmap_set (stderr, PA_IN (bb), "pa_in", bb->index);
+ print_bitmap_set (stderr, NEW_SETS (bb), "new_sets", bb->index);
}
/* Print out the expressions that have VAL to OUTFILE. */
@@ -1291,7 +1306,7 @@ translate_vuse_through_block (VEC (vn_reference_op_s, heap) *operands,
unsigned int cnt;
/* Try to find a vuse that dominates this phi node by skipping
non-clobbering statements. */
- vuse = get_continuation_for_phi (phi, &ref, &cnt, &visited);
+ vuse = get_continuation_for_phi (phi, &ref, &cnt, &visited, false);
if (visited)
BITMAP_FREE (visited);
}
@@ -1402,11 +1417,9 @@ get_representative_for (const pre_expr e)
that we will return. */
name = make_temp_ssa_name (get_expr_type (e), gimple_build_nop (), "pretmp");
VN_INFO_GET (name)->value_id = value_id;
- if (e->kind == CONSTANT)
- VN_INFO (name)->valnum = PRE_EXPR_CONSTANT (e);
- else
+ VN_INFO (name)->valnum = sccvn_valnum_from_value_id (value_id);
+ if (VN_INFO (name)->valnum == NULL_TREE)
VN_INFO (name)->valnum = name;
-
add_to_value (value_id, get_or_alloc_expr_for_name (name));
if (dump_file)
{
@@ -1600,11 +1613,9 @@ phi_translate_1 (pre_expr expr, bitmap_set_t set1, bitmap_set_t set2,
&& TREE_CODE (op[2]) == INTEGER_CST)
{
double_int off = tree_to_double_int (op[0]);
- off = double_int_add (off,
- double_int_neg
- (tree_to_double_int (op[1])));
- off = double_int_mul (off, tree_to_double_int (op[2]));
- if (double_int_fits_in_shwi_p (off))
+ off += -tree_to_double_int (op[1]);
+ off *= tree_to_double_int (op[2]);
+ if (off.fits_shwi ())
newop.off = off.low;
}
VEC_replace (vn_reference_op_s, newoperands, j, newop);
@@ -2565,23 +2576,6 @@ compute_antic (void)
sbitmap_free (changed_blocks);
}
-/* Return true if OP is a tree which we can perform PRE on.
- This may not match the operations we can value number, but in
- a perfect world would. */
-
-static bool
-can_PRE_operation (tree op)
-{
- return UNARY_CLASS_P (op)
- || BINARY_CLASS_P (op)
- || COMPARISON_CLASS_P (op)
- || TREE_CODE (op) == MEM_REF
- || TREE_CODE (op) == COMPONENT_REF
- || TREE_CODE (op) == VIEW_CONVERT_EXPR
- || TREE_CODE (op) == CALL_EXPR
- || TREE_CODE (op) == ARRAY_REF;
-}
-
/* Inserted expressions are placed onto this worklist, which is used
for performing quick dead code elimination of insertions we made
@@ -3074,8 +3068,7 @@ create_expression_by_pieces (basic_block block, pre_expr expr,
VN_INFO (forcedname)->value_id = get_next_value_id ();
nameexpr = get_or_alloc_expr_for_name (forcedname);
add_to_value (VN_INFO (forcedname)->value_id, nameexpr);
- if (!in_fre)
- bitmap_value_replace_in_set (NEW_SETS (block), nameexpr);
+ bitmap_value_replace_in_set (NEW_SETS (block), nameexpr);
bitmap_value_replace_in_set (AVAIL_OUT (block), nameexpr);
}
}
@@ -3099,9 +3092,12 @@ create_expression_by_pieces (basic_block block, pre_expr expr,
we are creating the expression by pieces, and this particular piece of
the expression may have been represented. There is no harm in replacing
here. */
- VN_INFO_GET (name)->valnum = name;
value_id = get_expr_value_id (expr);
- VN_INFO (name)->value_id = value_id;
+ VN_INFO_GET (name)->value_id = value_id;
+ VN_INFO (name)->valnum = sccvn_valnum_from_value_id (value_id);
+ if (VN_INFO (name)->valnum == NULL_TREE)
+ VN_INFO (name)->valnum = name;
+ gcc_assert (VN_INFO (name)->valnum != NULL_TREE);
nameexpr = get_or_alloc_expr_for_name (name);
add_to_value (value_id, nameexpr);
if (NEW_SETS (block))
@@ -3341,9 +3337,11 @@ insert_into_preds_of_block (basic_block block, unsigned int exprnum,
phi = create_phi_node (temp, block);
gimple_set_plf (phi, NECESSARY, false);
- VN_INFO_GET (gimple_phi_result (phi))->valnum = gimple_phi_result (phi);
- VN_INFO (gimple_phi_result (phi))->value_id = val;
- bitmap_set_bit (inserted_exprs, SSA_NAME_VERSION (gimple_phi_result (phi)));
+ VN_INFO_GET (temp)->value_id = val;
+ VN_INFO (temp)->valnum = sccvn_valnum_from_value_id (val);
+ if (VN_INFO (temp)->valnum == NULL_TREE)
+ VN_INFO (temp)->valnum = temp;
+ bitmap_set_bit (inserted_exprs, SSA_NAME_VERSION (temp));
FOR_EACH_EDGE (pred, ei, block->preds)
{
pre_expr ae = VEC_index (pre_expr, avail, pred->dest_idx);
@@ -3355,7 +3353,7 @@ insert_into_preds_of_block (basic_block block, unsigned int exprnum,
add_phi_arg (phi, PRE_EXPR_NAME (ae), pred, UNKNOWN_LOCATION);
}
- newphi = get_or_alloc_expr_for_name (gimple_phi_result (phi));
+ newphi = get_or_alloc_expr_for_name (temp);
add_to_value (val, newphi);
/* The value should *not* exist in PHI_GEN, or else we wouldn't be doing
@@ -3784,8 +3782,6 @@ add_to_exp_gen (basic_block block, tree op)
{
pre_expr result;
- gcc_checking_assert (!in_fre);
-
if (TREE_CODE (op) == SSA_NAME && ssa_undefined_value_p (op))
return;
@@ -3799,6 +3795,7 @@ static void
make_values_for_phi (gimple phi, basic_block block)
{
tree result = gimple_phi_result (phi);
+ unsigned i;
/* We have no need for virtual phis, as they don't represent
actual computations. */
@@ -3808,18 +3805,14 @@ make_values_for_phi (gimple phi, basic_block block)
pre_expr e = get_or_alloc_expr_for_name (result);
add_to_value (get_expr_value_id (e), e);
bitmap_value_insert_into_set (AVAIL_OUT (block), e);
- if (!in_fre)
+ bitmap_insert_into_set (PHI_GEN (block), e);
+ for (i = 0; i < gimple_phi_num_args (phi); ++i)
{
- unsigned i;
- bitmap_insert_into_set (PHI_GEN (block), e);
- for (i = 0; i < gimple_phi_num_args (phi); ++i)
+ tree arg = gimple_phi_arg_def (phi, i);
+ if (TREE_CODE (arg) == SSA_NAME)
{
- tree arg = gimple_phi_arg_def (phi, i);
- if (TREE_CODE (arg) == SSA_NAME)
- {
- e = get_or_alloc_expr_for_name (arg);
- add_to_value (get_expr_value_id (e), e);
- }
+ e = get_or_alloc_expr_for_name (arg);
+ add_to_value (get_expr_value_id (e), e);
}
}
}
@@ -3857,8 +3850,7 @@ compute_avail (void)
e = get_or_alloc_expr_for_name (name);
add_to_value (get_expr_value_id (e), e);
- if (!in_fre)
- bitmap_insert_into_set (TMP_GEN (ENTRY_BLOCK_PTR), e);
+ bitmap_insert_into_set (TMP_GEN (ENTRY_BLOCK_PTR), e);
bitmap_value_insert_into_set (AVAIL_OUT (ENTRY_BLOCK_PTR), e);
}
@@ -3926,15 +3918,10 @@ compute_avail (void)
pre_expr e = get_or_alloc_expr_for_name (op);
add_to_value (get_expr_value_id (e), e);
- if (!in_fre)
- bitmap_insert_into_set (TMP_GEN (block), e);
+ bitmap_insert_into_set (TMP_GEN (block), e);
bitmap_value_insert_into_set (AVAIL_OUT (block), e);
}
- /* That's all we need to do when doing FRE. */
- if (in_fre)
- continue;
-
if (gimple_has_side_effects (stmt) || stmt_could_throw_p (stmt))
continue;
@@ -4125,407 +4112,471 @@ compute_avail (void)
free (worklist);
}
-/* Insert the expression for SSA_VN that SCCVN thought would be simpler
- than the available expressions for it. The insertion point is
- right before the first use in STMT. Returns the SSA_NAME that should
- be used for replacement. */
+
+/* Local state for the eliminate domwalk. */
+static VEC (gimple, heap) *el_to_remove;
+static VEC (gimple, heap) *el_to_update;
+static unsigned int el_todo;
+static VEC (tree, heap) *el_avail;
+static VEC (tree, heap) *el_avail_stack;
+
+/* Return a leader for OP that is available at the current point of the
+ eliminate domwalk. */
static tree
-do_SCCVN_insertion (gimple stmt, tree ssa_vn)
+eliminate_avail (tree op)
{
- basic_block bb = gimple_bb (stmt);
- gimple_stmt_iterator gsi;
- gimple_seq stmts = NULL;
- tree expr;
- pre_expr e;
+ tree valnum = VN_INFO (op)->valnum;
+ if (TREE_CODE (valnum) == SSA_NAME)
+ {
+ if (SSA_NAME_IS_DEFAULT_DEF (valnum))
+ return valnum;
+ if (VEC_length (tree, el_avail) > SSA_NAME_VERSION (valnum))
+ return VEC_index (tree, el_avail, SSA_NAME_VERSION (valnum));
+ }
+ else if (is_gimple_min_invariant (valnum))
+ return valnum;
+ return NULL_TREE;
+}
+
+/* At the current point of the eliminate domwalk make OP available. */
- /* First create a value expression from the expression we want
- to insert and associate it with the value handle for SSA_VN. */
- e = get_or_alloc_expr_for (vn_get_expr_for (ssa_vn));
- if (e == NULL)
+static void
+eliminate_push_avail (tree op)
+{
+ tree valnum = VN_INFO (op)->valnum;
+ if (TREE_CODE (valnum) == SSA_NAME)
+ {
+ if (VEC_length (tree, el_avail) <= SSA_NAME_VERSION (valnum))
+ VEC_safe_grow_cleared (tree, heap,
+ el_avail, SSA_NAME_VERSION (valnum) + 1);
+ VEC_replace (tree, el_avail, SSA_NAME_VERSION (valnum), op);
+ VEC_safe_push (tree, heap, el_avail_stack, op);
+ }
+}
+
+/* Insert the expression recorded by SCCVN for VAL at *GSI. Returns
+ the leader for the expression if insertion was successful. */
+
+static tree
+eliminate_insert (gimple_stmt_iterator *gsi, tree val)
+{
+ tree expr = vn_get_expr_for (val);
+ if (!CONVERT_EXPR_P (expr)
+ && TREE_CODE (expr) != VIEW_CONVERT_EXPR)
return NULL_TREE;
- /* Then use create_expression_by_pieces to generate a valid
- expression to insert at this point of the IL stream. */
- expr = create_expression_by_pieces (bb, e, &stmts, stmt, NULL);
- if (expr == NULL_TREE)
+ tree op = TREE_OPERAND (expr, 0);
+ tree leader = TREE_CODE (op) == SSA_NAME ? eliminate_avail (op) : op;
+ if (!leader)
return NULL_TREE;
- gsi = gsi_for_stmt (stmt);
- gsi_insert_seq_before (&gsi, stmts, GSI_SAME_STMT);
- return expr;
+ tree res = make_temp_ssa_name (TREE_TYPE (val), NULL, "pretmp");
+ gimple tem = gimple_build_assign (res,
+ build1 (TREE_CODE (expr),
+ TREE_TYPE (expr), leader));
+ gsi_insert_before (gsi, tem, GSI_SAME_STMT);
+ VN_INFO_GET (res)->valnum = val;
+
+ if (TREE_CODE (leader) == SSA_NAME)
+ gimple_set_plf (SSA_NAME_DEF_STMT (leader), NECESSARY, true);
+
+ pre_stats.insertions++;
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, "Inserted ");
+ print_gimple_stmt (dump_file, tem, 0, 0);
+ }
+
+ return res;
}
-/* Eliminate fully redundant computations. */
+/* Perform elimination for the basic-block B during the domwalk. */
-static unsigned int
-eliminate (void)
+static void
+eliminate_bb (dom_walk_data *, basic_block b)
{
- VEC (gimple, heap) *to_remove = NULL;
- VEC (gimple, heap) *to_update = NULL;
- basic_block b;
- unsigned int todo = 0;
gimple_stmt_iterator gsi;
gimple stmt;
- unsigned i;
- FOR_EACH_BB (b)
+ /* Mark new bb. */
+ VEC_safe_push (tree, heap, el_avail_stack, NULL_TREE);
+
+ for (gsi = gsi_start_phis (b); !gsi_end_p (gsi);)
{
- for (gsi = gsi_start_bb (b); !gsi_end_p (gsi); gsi_next (&gsi))
+ gimple stmt, phi = gsi_stmt (gsi);
+ tree sprime = NULL_TREE, res = PHI_RESULT (phi);
+ gimple_stmt_iterator gsi2;
+
+ /* We want to perform redundant PHI elimination. Do so by
+ replacing the PHI with a single copy if possible.
+ Do not touch inserted, single-argument or virtual PHIs. */
+ if (gimple_phi_num_args (phi) == 1
+ || virtual_operand_p (res))
{
- tree lhs = NULL_TREE;
- tree rhs = NULL_TREE;
-
- stmt = gsi_stmt (gsi);
-
- if (gimple_has_lhs (stmt))
- lhs = gimple_get_lhs (stmt);
-
- if (gimple_assign_single_p (stmt))
- rhs = gimple_assign_rhs1 (stmt);
-
- /* Lookup the RHS of the expression, see if we have an
- available computation for it. If so, replace the RHS with
- the available computation.
-
- See PR43491.
- We don't replace global register variable when it is a the RHS of
- a single assign. We do replace local register variable since gcc
- does not guarantee local variable will be allocated in register. */
- if (gimple_has_lhs (stmt)
- && TREE_CODE (lhs) == SSA_NAME
- && !gimple_assign_ssa_name_copy_p (stmt)
- && (!gimple_assign_single_p (stmt)
- || (!is_gimple_min_invariant (rhs)
- && (gimple_assign_rhs_code (stmt) != VAR_DECL
- || !is_global_var (rhs)
- || !DECL_HARD_REGISTER (rhs))))
- && !gimple_has_volatile_ops (stmt)
- && !has_zero_uses (lhs))
- {
- tree sprime = NULL;
- pre_expr lhsexpr = get_or_alloc_expr_for_name (lhs);
- pre_expr sprimeexpr;
- gimple orig_stmt = stmt;
+ gsi_next (&gsi);
+ continue;
+ }
- sprimeexpr = bitmap_find_leader (AVAIL_OUT (b),
- get_expr_value_id (lhsexpr),
- NULL);
+ sprime = eliminate_avail (res);
+ if (!sprime
+ || sprime == res)
+ {
+ eliminate_push_avail (res);
+ gsi_next (&gsi);
+ continue;
+ }
+ else if (is_gimple_min_invariant (sprime))
+ {
+ if (!useless_type_conversion_p (TREE_TYPE (res),
+ TREE_TYPE (sprime)))
+ sprime = fold_convert (TREE_TYPE (res), sprime);
+ }
- if (sprimeexpr)
- {
- if (sprimeexpr->kind == CONSTANT)
- sprime = PRE_EXPR_CONSTANT (sprimeexpr);
- else if (sprimeexpr->kind == NAME)
- sprime = PRE_EXPR_NAME (sprimeexpr);
- else
- gcc_unreachable ();
- }
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, "Replaced redundant PHI node defining ");
+ print_generic_expr (dump_file, res, 0);
+ fprintf (dump_file, " with ");
+ print_generic_expr (dump_file, sprime, 0);
+ fprintf (dump_file, "\n");
+ }
- /* If there is no existing leader but SCCVN knows this
- value is constant, use that constant. */
- if (!sprime && is_gimple_min_invariant (VN_INFO (lhs)->valnum))
- {
- sprime = VN_INFO (lhs)->valnum;
- if (!useless_type_conversion_p (TREE_TYPE (lhs),
- TREE_TYPE (sprime)))
- sprime = fold_convert (TREE_TYPE (lhs), sprime);
+ remove_phi_node (&gsi, false);
+
+ if (inserted_exprs
+ && !bitmap_bit_p (inserted_exprs, SSA_NAME_VERSION (res))
+ && TREE_CODE (sprime) == SSA_NAME)
+ gimple_set_plf (SSA_NAME_DEF_STMT (sprime), NECESSARY, true);
+
+ if (!useless_type_conversion_p (TREE_TYPE (res), TREE_TYPE (sprime)))
+ sprime = fold_convert (TREE_TYPE (res), sprime);
+ stmt = gimple_build_assign (res, sprime);
+ SSA_NAME_DEF_STMT (res) = stmt;
+ gimple_set_plf (stmt, NECESSARY, gimple_plf (phi, NECESSARY));
+
+ gsi2 = gsi_after_labels (b);
+ gsi_insert_before (&gsi2, stmt, GSI_NEW_STMT);
+ /* Queue the copy for eventual removal. */
+ VEC_safe_push (gimple, heap, el_to_remove, stmt);
+ /* If we inserted this PHI node ourself, it's not an elimination. */
+ if (inserted_exprs
+ && bitmap_bit_p (inserted_exprs, SSA_NAME_VERSION (res)))
+ pre_stats.phis--;
+ else
+ pre_stats.eliminations++;
+ }
- if (dump_file && (dump_flags & TDF_DETAILS))
- {
- fprintf (dump_file, "Replaced ");
- print_gimple_expr (dump_file, stmt, 0, 0);
- fprintf (dump_file, " with ");
- print_generic_expr (dump_file, sprime, 0);
- fprintf (dump_file, " in ");
- print_gimple_stmt (dump_file, stmt, 0, 0);
- }
- pre_stats.eliminations++;
- propagate_tree_value_into_stmt (&gsi, sprime);
- stmt = gsi_stmt (gsi);
- update_stmt (stmt);
-
- /* If we removed EH side-effects from the statement, clean
- its EH information. */
- if (maybe_clean_or_replace_eh_stmt (orig_stmt, stmt))
- {
- bitmap_set_bit (need_eh_cleanup,
- gimple_bb (stmt)->index);
- if (dump_file && (dump_flags & TDF_DETAILS))
- fprintf (dump_file, " Removed EH side-effects.\n");
- }
- continue;
- }
+ for (gsi = gsi_start_bb (b); !gsi_end_p (gsi); gsi_next (&gsi))
+ {
+ tree lhs = NULL_TREE;
+ tree rhs = NULL_TREE;
+
+ stmt = gsi_stmt (gsi);
+
+ if (gimple_has_lhs (stmt))
+ lhs = gimple_get_lhs (stmt);
+
+ if (gimple_assign_single_p (stmt))
+ rhs = gimple_assign_rhs1 (stmt);
+
+ /* Lookup the RHS of the expression, see if we have an
+ available computation for it. If so, replace the RHS with
+ the available computation.
+
+ See PR43491.
+ We don't replace global register variable when it is a the RHS of
+ a single assign. We do replace local register variable since gcc
+ does not guarantee local variable will be allocated in register. */
+ if (gimple_has_lhs (stmt)
+ && TREE_CODE (lhs) == SSA_NAME
+ && !gimple_assign_ssa_name_copy_p (stmt)
+ && (!gimple_assign_single_p (stmt)
+ || (!is_gimple_min_invariant (rhs)
+ && (gimple_assign_rhs_code (stmt) != VAR_DECL
+ || !is_global_var (rhs)
+ || !DECL_HARD_REGISTER (rhs))))
+ && !gimple_has_volatile_ops (stmt))
+ {
+ tree sprime;
+ gimple orig_stmt = stmt;
+ sprime = eliminate_avail (lhs);
+ if (!sprime)
+ {
/* If there is no existing usable leader but SCCVN thinks
it has an expression it wants to use as replacement,
insert that. */
- if (!sprime || sprime == lhs)
+ tree val = VN_INFO (lhs)->valnum;
+ if (val != VN_TOP
+ && TREE_CODE (val) == SSA_NAME
+ && VN_INFO (val)->needs_insertion
+ && (sprime = eliminate_insert (&gsi, val)) != NULL_TREE)
+ eliminate_push_avail (sprime);
+ }
+ else if (is_gimple_min_invariant (sprime))
+ {
+ /* If there is no existing leader but SCCVN knows this
+ value is constant, use that constant. */
+ if (!useless_type_conversion_p (TREE_TYPE (lhs),
+ TREE_TYPE (sprime)))
+ sprime = fold_convert (TREE_TYPE (lhs), sprime);
+
+ if (dump_file && (dump_flags & TDF_DETAILS))
{
- tree val = VN_INFO (lhs)->valnum;
- if (val != VN_TOP
- && TREE_CODE (val) == SSA_NAME
- && VN_INFO (val)->needs_insertion
- && can_PRE_operation (vn_get_expr_for (val)))
- sprime = do_SCCVN_insertion (stmt, val);
+ fprintf (dump_file, "Replaced ");
+ print_gimple_expr (dump_file, stmt, 0, 0);
+ fprintf (dump_file, " with ");
+ print_generic_expr (dump_file, sprime, 0);
+ fprintf (dump_file, " in ");
+ print_gimple_stmt (dump_file, stmt, 0, 0);
}
- if (sprime
- && sprime != lhs
- && (rhs == NULL_TREE
- || TREE_CODE (rhs) != SSA_NAME
- || may_propagate_copy (rhs, sprime)))
+ pre_stats.eliminations++;
+ propagate_tree_value_into_stmt (&gsi, sprime);
+ stmt = gsi_stmt (gsi);
+ update_stmt (stmt);
+
+ /* If we removed EH side-effects from the statement, clean
+ its EH information. */
+ if (maybe_clean_or_replace_eh_stmt (orig_stmt, stmt))
{
- bool can_make_abnormal_goto
- = is_gimple_call (stmt)
- && stmt_can_make_abnormal_goto (stmt);
+ bitmap_set_bit (need_eh_cleanup,
+ gimple_bb (stmt)->index);
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, " Removed EH side-effects.\n");
+ }
+ continue;
+ }
- gcc_assert (sprime != rhs);
+ /* If there is no usable leader mark lhs as leader for its value. */
+ if (!sprime)
+ eliminate_push_avail (lhs);
- if (dump_file && (dump_flags & TDF_DETAILS))
- {
- fprintf (dump_file, "Replaced ");
- print_gimple_expr (dump_file, stmt, 0, 0);
- fprintf (dump_file, " with ");
- print_generic_expr (dump_file, sprime, 0);
- fprintf (dump_file, " in ");
- print_gimple_stmt (dump_file, stmt, 0, 0);
- }
+ if (sprime
+ && sprime != lhs
+ && (rhs == NULL_TREE
+ || TREE_CODE (rhs) != SSA_NAME
+ || may_propagate_copy (rhs, sprime)))
+ {
+ bool can_make_abnormal_goto
+ = is_gimple_call (stmt)
+ && stmt_can_make_abnormal_goto (stmt);
- if (TREE_CODE (sprime) == SSA_NAME)
- gimple_set_plf (SSA_NAME_DEF_STMT (sprime),
- NECESSARY, true);
- /* We need to make sure the new and old types actually match,
- which may require adding a simple cast, which fold_convert
- will do for us. */
- if ((!rhs || TREE_CODE (rhs) != SSA_NAME)
- && !useless_type_conversion_p (gimple_expr_type (stmt),
- TREE_TYPE (sprime)))
- sprime = fold_convert (gimple_expr_type (stmt), sprime);
-
- pre_stats.eliminations++;
- propagate_tree_value_into_stmt (&gsi, sprime);
- stmt = gsi_stmt (gsi);
- update_stmt (stmt);
-
- /* If we removed EH side-effects from the statement, clean
- its EH information. */
- if (maybe_clean_or_replace_eh_stmt (orig_stmt, stmt))
- {
- bitmap_set_bit (need_eh_cleanup,
- gimple_bb (stmt)->index);
- if (dump_file && (dump_flags & TDF_DETAILS))
- fprintf (dump_file, " Removed EH side-effects.\n");
- }
+ gcc_assert (sprime != rhs);
- /* Likewise for AB side-effects. */
- if (can_make_abnormal_goto
- && !stmt_can_make_abnormal_goto (stmt))
- {
- bitmap_set_bit (need_ab_cleanup,
- gimple_bb (stmt)->index);
- if (dump_file && (dump_flags & TDF_DETAILS))
- fprintf (dump_file, " Removed AB side-effects.\n");
- }
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, "Replaced ");
+ print_gimple_expr (dump_file, stmt, 0, 0);
+ fprintf (dump_file, " with ");
+ print_generic_expr (dump_file, sprime, 0);
+ fprintf (dump_file, " in ");
+ print_gimple_stmt (dump_file, stmt, 0, 0);
}
- }
- /* If the statement is a scalar store, see if the expression
- has the same value number as its rhs. If so, the store is
- dead. */
- else if (gimple_assign_single_p (stmt)
- && !gimple_has_volatile_ops (stmt)
- && !is_gimple_reg (gimple_assign_lhs (stmt))
- && (TREE_CODE (rhs) == SSA_NAME
- || is_gimple_min_invariant (rhs)))
- {
- tree val;
- val = vn_reference_lookup (gimple_assign_lhs (stmt),
- gimple_vuse (stmt), VN_WALK, NULL);
- if (TREE_CODE (rhs) == SSA_NAME)
- rhs = VN_INFO (rhs)->valnum;
- if (val
- && operand_equal_p (val, rhs, 0))
+
+ if (TREE_CODE (sprime) == SSA_NAME)
+ gimple_set_plf (SSA_NAME_DEF_STMT (sprime),
+ NECESSARY, true);
+ /* We need to make sure the new and old types actually match,
+ which may require adding a simple cast, which fold_convert
+ will do for us. */
+ if ((!rhs || TREE_CODE (rhs) != SSA_NAME)
+ && !useless_type_conversion_p (gimple_expr_type (stmt),
+ TREE_TYPE (sprime)))
+ sprime = fold_convert (gimple_expr_type (stmt), sprime);
+
+ pre_stats.eliminations++;
+ propagate_tree_value_into_stmt (&gsi, sprime);
+ stmt = gsi_stmt (gsi);
+ update_stmt (stmt);
+
+ /* If we removed EH side-effects from the statement, clean
+ its EH information. */
+ if (maybe_clean_or_replace_eh_stmt (orig_stmt, stmt))
{
+ bitmap_set_bit (need_eh_cleanup,
+ gimple_bb (stmt)->index);
if (dump_file && (dump_flags & TDF_DETAILS))
- {
- fprintf (dump_file, "Deleted redundant store ");
- print_gimple_stmt (dump_file, stmt, 0, 0);
- }
+ fprintf (dump_file, " Removed EH side-effects.\n");
+ }
- /* Queue stmt for removal. */
- VEC_safe_push (gimple, heap, to_remove, stmt);
+ /* Likewise for AB side-effects. */
+ if (can_make_abnormal_goto
+ && !stmt_can_make_abnormal_goto (stmt))
+ {
+ bitmap_set_bit (need_ab_cleanup,
+ gimple_bb (stmt)->index);
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, " Removed AB side-effects.\n");
}
}
- /* Visit COND_EXPRs and fold the comparison with the
- available value-numbers. */
- else if (gimple_code (stmt) == GIMPLE_COND)
+ }
+ /* If the statement is a scalar store, see if the expression
+ has the same value number as its rhs. If so, the store is
+ dead. */
+ else if (gimple_assign_single_p (stmt)
+ && !gimple_has_volatile_ops (stmt)
+ && !is_gimple_reg (gimple_assign_lhs (stmt))
+ && (TREE_CODE (rhs) == SSA_NAME
+ || is_gimple_min_invariant (rhs)))
+ {
+ tree val;
+ val = vn_reference_lookup (gimple_assign_lhs (stmt),
+ gimple_vuse (stmt), VN_WALK, NULL);
+ if (TREE_CODE (rhs) == SSA_NAME)
+ rhs = VN_INFO (rhs)->valnum;
+ if (val
+ && operand_equal_p (val, rhs, 0))
{
- tree op0 = gimple_cond_lhs (stmt);
- tree op1 = gimple_cond_rhs (stmt);
- tree result;
-
- if (TREE_CODE (op0) == SSA_NAME)
- op0 = VN_INFO (op0)->valnum;
- if (TREE_CODE (op1) == SSA_NAME)
- op1 = VN_INFO (op1)->valnum;
- result = fold_binary (gimple_cond_code (stmt), boolean_type_node,
- op0, op1);
- if (result && TREE_CODE (result) == INTEGER_CST)
+ if (dump_file && (dump_flags & TDF_DETAILS))
{
- if (integer_zerop (result))
- gimple_cond_make_false (stmt);
- else
- gimple_cond_make_true (stmt);
- update_stmt (stmt);
- todo = TODO_cleanup_cfg;
+ fprintf (dump_file, "Deleted redundant store ");
+ print_gimple_stmt (dump_file, stmt, 0, 0);
}
+
+ /* Queue stmt for removal. */
+ VEC_safe_push (gimple, heap, el_to_remove, stmt);
}
- /* Visit indirect calls and turn them into direct calls if
- possible. */
- if (is_gimple_call (stmt))
+ }
+ /* Visit COND_EXPRs and fold the comparison with the
+ available value-numbers. */
+ else if (gimple_code (stmt) == GIMPLE_COND)
+ {
+ tree op0 = gimple_cond_lhs (stmt);
+ tree op1 = gimple_cond_rhs (stmt);
+ tree result;
+
+ if (TREE_CODE (op0) == SSA_NAME)
+ op0 = VN_INFO (op0)->valnum;
+ if (TREE_CODE (op1) == SSA_NAME)
+ op1 = VN_INFO (op1)->valnum;
+ result = fold_binary (gimple_cond_code (stmt), boolean_type_node,
+ op0, op1);
+ if (result && TREE_CODE (result) == INTEGER_CST)
{
- tree orig_fn = gimple_call_fn (stmt);
- tree fn;
- if (!orig_fn)
- continue;
- if (TREE_CODE (orig_fn) == SSA_NAME)
- fn = VN_INFO (orig_fn)->valnum;
- else if (TREE_CODE (orig_fn) == OBJ_TYPE_REF
- && TREE_CODE (OBJ_TYPE_REF_EXPR (orig_fn)) == SSA_NAME)
- fn = VN_INFO (OBJ_TYPE_REF_EXPR (orig_fn))->valnum;
+ if (integer_zerop (result))
+ gimple_cond_make_false (stmt);
else
- continue;
- if (gimple_call_addr_fndecl (fn) != NULL_TREE
- && useless_type_conversion_p (TREE_TYPE (orig_fn),
- TREE_TYPE (fn)))
- {
- bool can_make_abnormal_goto
- = stmt_can_make_abnormal_goto (stmt);
- bool was_noreturn = gimple_call_noreturn_p (stmt);
-
- if (dump_file && (dump_flags & TDF_DETAILS))
- {
- fprintf (dump_file, "Replacing call target with ");
- print_generic_expr (dump_file, fn, 0);
- fprintf (dump_file, " in ");
- print_gimple_stmt (dump_file, stmt, 0, 0);
- }
+ gimple_cond_make_true (stmt);
+ update_stmt (stmt);
+ el_todo = TODO_cleanup_cfg;
+ }
+ }
+ /* Visit indirect calls and turn them into direct calls if
+ possible. */
+ if (is_gimple_call (stmt))
+ {
+ tree orig_fn = gimple_call_fn (stmt);
+ tree fn;
+ if (!orig_fn)
+ continue;
+ if (TREE_CODE (orig_fn) == SSA_NAME)
+ fn = VN_INFO (orig_fn)->valnum;
+ else if (TREE_CODE (orig_fn) == OBJ_TYPE_REF
+ && TREE_CODE (OBJ_TYPE_REF_EXPR (orig_fn)) == SSA_NAME)
+ fn = VN_INFO (OBJ_TYPE_REF_EXPR (orig_fn))->valnum;
+ else
+ continue;
+ if (gimple_call_addr_fndecl (fn) != NULL_TREE
+ && useless_type_conversion_p (TREE_TYPE (orig_fn),
+ TREE_TYPE (fn)))
+ {
+ bool can_make_abnormal_goto
+ = stmt_can_make_abnormal_goto (stmt);
+ bool was_noreturn = gimple_call_noreturn_p (stmt);
- gimple_call_set_fn (stmt, fn);
- VEC_safe_push (gimple, heap, to_update, stmt);
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, "Replacing call target with ");
+ print_generic_expr (dump_file, fn, 0);
+ fprintf (dump_file, " in ");
+ print_gimple_stmt (dump_file, stmt, 0, 0);
+ }
- /* When changing a call into a noreturn call, cfg cleanup
- is needed to fix up the noreturn call. */
- if (!was_noreturn && gimple_call_noreturn_p (stmt))
- todo |= TODO_cleanup_cfg;
+ gimple_call_set_fn (stmt, fn);
+ VEC_safe_push (gimple, heap, el_to_update, stmt);
- /* If we removed EH side-effects from the statement, clean
- its EH information. */
- if (maybe_clean_or_replace_eh_stmt (stmt, stmt))
- {
- bitmap_set_bit (need_eh_cleanup,
- gimple_bb (stmt)->index);
- if (dump_file && (dump_flags & TDF_DETAILS))
- fprintf (dump_file, " Removed EH side-effects.\n");
- }
+ /* When changing a call into a noreturn call, cfg cleanup
+ is needed to fix up the noreturn call. */
+ if (!was_noreturn && gimple_call_noreturn_p (stmt))
+ el_todo |= TODO_cleanup_cfg;
- /* Likewise for AB side-effects. */
- if (can_make_abnormal_goto
- && !stmt_can_make_abnormal_goto (stmt))
- {
- bitmap_set_bit (need_ab_cleanup,
- gimple_bb (stmt)->index);
- if (dump_file && (dump_flags & TDF_DETAILS))
- fprintf (dump_file, " Removed AB side-effects.\n");
- }
+ /* If we removed EH side-effects from the statement, clean
+ its EH information. */
+ if (maybe_clean_or_replace_eh_stmt (stmt, stmt))
+ {
+ bitmap_set_bit (need_eh_cleanup,
+ gimple_bb (stmt)->index);
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, " Removed EH side-effects.\n");
+ }
- /* Changing an indirect call to a direct call may
- have exposed different semantics. This may
- require an SSA update. */
- todo |= TODO_update_ssa_only_virtuals;
+ /* Likewise for AB side-effects. */
+ if (can_make_abnormal_goto
+ && !stmt_can_make_abnormal_goto (stmt))
+ {
+ bitmap_set_bit (need_ab_cleanup,
+ gimple_bb (stmt)->index);
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, " Removed AB side-effects.\n");
}
+
+ /* Changing an indirect call to a direct call may
+ have exposed different semantics. This may
+ require an SSA update. */
+ el_todo |= TODO_update_ssa_only_virtuals;
}
}
+ }
+}
- for (gsi = gsi_start_phis (b); !gsi_end_p (gsi);)
- {
- gimple stmt, phi = gsi_stmt (gsi);
- tree sprime = NULL_TREE, res = PHI_RESULT (phi);
- pre_expr sprimeexpr, resexpr;
- gimple_stmt_iterator gsi2;
-
- /* We want to perform redundant PHI elimination. Do so by
- replacing the PHI with a single copy if possible.
- Do not touch inserted, single-argument or virtual PHIs. */
- if (gimple_phi_num_args (phi) == 1
- || virtual_operand_p (res))
- {
- gsi_next (&gsi);
- continue;
- }
+/* Make no longer available leaders no longer available. */
- resexpr = get_or_alloc_expr_for_name (res);
- sprimeexpr = bitmap_find_leader (AVAIL_OUT (b),
- get_expr_value_id (resexpr), NULL);
- if (sprimeexpr)
- {
- if (sprimeexpr->kind == CONSTANT)
- sprime = PRE_EXPR_CONSTANT (sprimeexpr);
- else if (sprimeexpr->kind == NAME)
- sprime = PRE_EXPR_NAME (sprimeexpr);
- else
- gcc_unreachable ();
- }
- if (!sprime && is_gimple_min_invariant (VN_INFO (res)->valnum))
- {
- sprime = VN_INFO (res)->valnum;
- if (!useless_type_conversion_p (TREE_TYPE (res),
- TREE_TYPE (sprime)))
- sprime = fold_convert (TREE_TYPE (res), sprime);
- }
- if (!sprime
- || sprime == res)
- {
- gsi_next (&gsi);
- continue;
- }
+static void
+eliminate_leave_block (dom_walk_data *, basic_block)
+{
+ tree entry;
+ while ((entry = VEC_pop (tree, el_avail_stack)) != NULL_TREE)
+ VEC_replace (tree, el_avail,
+ SSA_NAME_VERSION (VN_INFO (entry)->valnum), NULL_TREE);
+}
- if (dump_file && (dump_flags & TDF_DETAILS))
- {
- fprintf (dump_file, "Replaced redundant PHI node defining ");
- print_generic_expr (dump_file, res, 0);
- fprintf (dump_file, " with ");
- print_generic_expr (dump_file, sprime, 0);
- fprintf (dump_file, "\n");
- }
+/* Eliminate fully redundant computations. */
- remove_phi_node (&gsi, false);
+static unsigned int
+eliminate (void)
+{
+ struct dom_walk_data walk_data;
+ gimple_stmt_iterator gsi;
+ gimple stmt;
+ unsigned i;
- if (!bitmap_bit_p (inserted_exprs, SSA_NAME_VERSION (res))
- && TREE_CODE (sprime) == SSA_NAME)
- gimple_set_plf (SSA_NAME_DEF_STMT (sprime), NECESSARY, true);
+ need_eh_cleanup = BITMAP_ALLOC (NULL);
+ need_ab_cleanup = BITMAP_ALLOC (NULL);
- if (!useless_type_conversion_p (TREE_TYPE (res), TREE_TYPE (sprime)))
- sprime = fold_convert (TREE_TYPE (res), sprime);
- stmt = gimple_build_assign (res, sprime);
- SSA_NAME_DEF_STMT (res) = stmt;
- gimple_set_plf (stmt, NECESSARY, gimple_plf (phi, NECESSARY));
-
- gsi2 = gsi_after_labels (b);
- gsi_insert_before (&gsi2, stmt, GSI_NEW_STMT);
- /* Queue the copy for eventual removal. */
- VEC_safe_push (gimple, heap, to_remove, stmt);
- /* If we inserted this PHI node ourself, it's not an elimination. */
- if (bitmap_bit_p (inserted_exprs, SSA_NAME_VERSION (res)))
- pre_stats.phis--;
- else
- pre_stats.eliminations++;
- }
- }
+ el_to_remove = NULL;
+ el_to_update = NULL;
+ el_todo = 0;
+ el_avail = NULL;
+ el_avail_stack = NULL;
+
+ walk_data.dom_direction = CDI_DOMINATORS;
+ walk_data.initialize_block_local_data = NULL;
+ walk_data.before_dom_children = eliminate_bb;
+ walk_data.after_dom_children = eliminate_leave_block;
+ walk_data.global_data = NULL;
+ walk_data.block_local_data_size = 0;
+ init_walk_dominator_tree (&walk_data);
+ walk_dominator_tree (&walk_data, ENTRY_BLOCK_PTR);
+ fini_walk_dominator_tree (&walk_data);
+
+ VEC_free (tree, heap, el_avail);
+ VEC_free (tree, heap, el_avail_stack);
/* We cannot remove stmts during BB walk, especially not release SSA
names there as this confuses the VN machinery. The stmts ending
- up in to_remove are either stores or simple copies. */
- FOR_EACH_VEC_ELT (gimple, to_remove, i, stmt)
+ up in el_to_remove are either stores or simple copies. */
+ FOR_EACH_VEC_ELT (gimple, el_to_remove, i, stmt)
{
tree lhs = gimple_assign_lhs (stmt);
tree rhs = gimple_assign_rhs1 (stmt);
@@ -4541,7 +4592,8 @@ eliminate (void)
{
SET_USE (use_p, rhs);
update_stmt (use_stmt);
- if (bitmap_bit_p (inserted_exprs, SSA_NAME_VERSION (lhs))
+ if (inserted_exprs
+ && bitmap_bit_p (inserted_exprs, SSA_NAME_VERSION (lhs))
&& TREE_CODE (rhs) == SSA_NAME)
gimple_set_plf (SSA_NAME_DEF_STMT (rhs), NECESSARY, true);
}
@@ -4555,21 +4607,43 @@ eliminate (void)
unlink_stmt_vdef (stmt);
if (gsi_remove (&gsi, true))
bitmap_set_bit (need_eh_cleanup, bb->index);
- if (TREE_CODE (lhs) == SSA_NAME)
+ if (inserted_exprs
+ && TREE_CODE (lhs) == SSA_NAME)
bitmap_clear_bit (inserted_exprs, SSA_NAME_VERSION (lhs));
release_defs (stmt);
}
}
- VEC_free (gimple, heap, to_remove);
+ VEC_free (gimple, heap, el_to_remove);
/* We cannot update call statements with virtual operands during
SSA walk. This might remove them which in turn makes our
VN lattice invalid. */
- FOR_EACH_VEC_ELT (gimple, to_update, i, stmt)
+ FOR_EACH_VEC_ELT (gimple, el_to_update, i, stmt)
update_stmt (stmt);
- VEC_free (gimple, heap, to_update);
+ VEC_free (gimple, heap, el_to_update);
- return todo;
+ return el_todo;
+}
+
+/* Perform CFG cleanups made necessary by elimination. */
+
+static void
+fini_eliminate (void)
+{
+ bool do_eh_cleanup = !bitmap_empty_p (need_eh_cleanup);
+ bool do_ab_cleanup = !bitmap_empty_p (need_ab_cleanup);
+
+ if (do_eh_cleanup)
+ gimple_purge_all_dead_eh_edges (need_eh_cleanup);
+
+ if (do_ab_cleanup)
+ gimple_purge_all_dead_abnormal_call_edges (need_ab_cleanup);
+
+ BITMAP_FREE (need_eh_cleanup);
+ BITMAP_FREE (need_ab_cleanup);
+
+ if (do_eh_cleanup || do_ab_cleanup)
+ cleanup_tree_cfg ();
}
/* Borrow a bit of tree-ssa-dce.c for the moment.
@@ -4769,20 +4843,18 @@ my_rev_post_order_compute (int *post_order, bool include_entry_exit)
/* Initialize data structures used by PRE. */
static void
-init_pre (bool do_fre)
+init_pre (void)
{
basic_block bb;
next_expression_id = 1;
expressions = NULL;
- VEC_safe_push (pre_expr, heap, expressions, (pre_expr)NULL);
+ VEC_safe_push (pre_expr, heap, expressions, NULL);
value_expressions = VEC_alloc (bitmap, heap, get_max_value_id () + 1);
VEC_safe_grow_cleared (bitmap, heap, value_expressions,
get_max_value_id() + 1);
name_to_id = NULL;
- in_fre = do_fre;
-
inserted_exprs = BITMAP_ALLOC (NULL);
connect_infinite_loops_to_exit ();
@@ -4806,28 +4878,19 @@ init_pre (bool do_fre)
sizeof (struct pre_expr_d), 30);
FOR_ALL_BB (bb)
{
- if (!do_fre)
- {
- EXP_GEN (bb) = bitmap_set_new ();
- PHI_GEN (bb) = bitmap_set_new ();
- TMP_GEN (bb) = bitmap_set_new ();
- }
+ EXP_GEN (bb) = bitmap_set_new ();
+ PHI_GEN (bb) = bitmap_set_new ();
+ TMP_GEN (bb) = bitmap_set_new ();
AVAIL_OUT (bb) = bitmap_set_new ();
}
-
- need_eh_cleanup = BITMAP_ALLOC (NULL);
- need_ab_cleanup = BITMAP_ALLOC (NULL);
}
/* Deallocate data structures used by PRE. */
static void
-fini_pre (bool do_fre)
+fini_pre ()
{
- bool do_eh_cleanup = !bitmap_empty_p (need_eh_cleanup);
- bool do_ab_cleanup = !bitmap_empty_p (need_ab_cleanup);
-
free (postorder);
VEC_free (bitmap, heap, value_expressions);
BITMAP_FREE (inserted_exprs);
@@ -4841,28 +4904,12 @@ fini_pre (bool do_fre)
free_aux_for_blocks ();
free_dominance_info (CDI_POST_DOMINATORS);
-
- if (do_eh_cleanup)
- gimple_purge_all_dead_eh_edges (need_eh_cleanup);
-
- if (do_ab_cleanup)
- gimple_purge_all_dead_abnormal_call_edges (need_ab_cleanup);
-
- BITMAP_FREE (need_eh_cleanup);
- BITMAP_FREE (need_ab_cleanup);
-
- if (do_eh_cleanup || do_ab_cleanup)
- cleanup_tree_cfg ();
-
- if (!do_fre)
- loop_optimizer_finalize ();
}
-/* Main entry point to the SSA-PRE pass. DO_FRE is true if the caller
- only wants to do full redundancy elimination. */
+/* Gate and execute functions for PRE. */
static unsigned int
-execute_pre (bool do_fre)
+do_pre (void)
{
unsigned int todo = 0;
@@ -4871,18 +4918,15 @@ execute_pre (bool do_fre)
/* This has to happen before SCCVN runs because
loop_optimizer_init may create new phis, etc. */
- if (!do_fre)
- loop_optimizer_init (LOOPS_NORMAL);
+ loop_optimizer_init (LOOPS_NORMAL);
- if (!run_scc_vn (do_fre ? VN_WALKREWRITE : VN_WALK))
+ if (!run_scc_vn (VN_WALK))
{
- if (!do_fre)
- loop_optimizer_finalize ();
-
+ loop_optimizer_finalize ();
return 0;
}
- init_pre (do_fre);
+ init_pre ();
scev_initialize ();
/* Collect and value number expressions computed in each basic block. */
@@ -4891,13 +4935,16 @@ execute_pre (bool do_fre)
if (dump_file && (dump_flags & TDF_DETAILS))
{
basic_block bb;
-
FOR_ALL_BB (bb)
{
- print_bitmap_set (dump_file, EXP_GEN (bb), "exp_gen", bb->index);
- print_bitmap_set (dump_file, PHI_GEN (bb), "phi_gen", bb->index);
- print_bitmap_set (dump_file, TMP_GEN (bb), "tmp_gen", bb->index);
- print_bitmap_set (dump_file, AVAIL_OUT (bb), "avail_out", bb->index);
+ print_bitmap_set (dump_file, EXP_GEN (bb),
+ "exp_gen", bb->index);
+ print_bitmap_set (dump_file, PHI_GEN (bb),
+ "phi_gen", bb->index);
+ print_bitmap_set (dump_file, TMP_GEN (bb),
+ "tmp_gen", bb->index);
+ print_bitmap_set (dump_file, AVAIL_OUT (bb),
+ "avail_out", bb->index);
}
}
@@ -4906,7 +4953,7 @@ execute_pre (bool do_fre)
fixed, don't run it when he have an incredibly large number of
bb's. If we aren't going to run insert, there is no point in
computing ANTIC, either, even though it's plenty fast. */
- if (!do_fre && n_basic_blocks < 4000)
+ if (n_basic_blocks < 4000)
{
compute_antic ();
insert ();
@@ -4928,37 +4975,28 @@ execute_pre (bool do_fre)
statistics_counter_event (cfun, "Constified", pre_stats.constified);
clear_expression_ids ();
- if (!do_fre)
- {
- remove_dead_inserted_code ();
- todo |= TODO_verify_flow;
- }
+ remove_dead_inserted_code ();
+ todo |= TODO_verify_flow;
scev_finalize ();
- fini_pre (do_fre);
-
- if (!do_fre)
- /* TODO: tail_merge_optimize may merge all predecessors of a block, in which
- case we can merge the block with the remaining predecessor of the block.
- It should either:
- - call merge_blocks after each tail merge iteration
- - call merge_blocks after all tail merge iterations
- - mark TODO_cleanup_cfg when necessary
- - share the cfg cleanup with fini_pre. */
- todo |= tail_merge_optimize (todo);
+ fini_pre ();
+ fini_eliminate ();
+ loop_optimizer_finalize ();
+
+ /* TODO: tail_merge_optimize may merge all predecessors of a block, in which
+ case we can merge the block with the remaining predecessor of the block.
+ It should either:
+ - call merge_blocks after each tail merge iteration
+ - call merge_blocks after all tail merge iterations
+ - mark TODO_cleanup_cfg when necessary
+ - share the cfg cleanup with fini_pre. */
+ todo |= tail_merge_optimize (todo);
+
free_scc_vn ();
return todo;
}
-/* Gate and execute functions for PRE. */
-
-static unsigned int
-do_pre (void)
-{
- return execute_pre (false);
-}
-
static bool
gate_pre (void)
{
@@ -4992,7 +5030,25 @@ struct gimple_opt_pass pass_pre =
static unsigned int
execute_fre (void)
{
- return execute_pre (true);
+ unsigned int todo = 0;
+
+ if (!run_scc_vn (VN_WALKREWRITE))
+ return 0;
+
+ memset (&pre_stats, 0, sizeof (pre_stats));
+
+ /* Remove all the redundant expressions. */
+ todo |= eliminate ();
+
+ fini_eliminate ();
+
+ free_scc_vn ();
+
+ statistics_counter_event (cfun, "Insertions", pre_stats.insertions);
+ statistics_counter_event (cfun, "Eliminated", pre_stats.eliminations);
+ statistics_counter_event (cfun, "Constified", pre_stats.constified);
+
+ return todo;
}
static bool
diff --git a/gcc/tree-ssa-reassoc.c b/gcc/tree-ssa-reassoc.c
index 2b1298d1402..960e2c3c389 100644
--- a/gcc/tree-ssa-reassoc.c
+++ b/gcc/tree-ssa-reassoc.c
@@ -1344,7 +1344,7 @@ undistribute_ops_list (enum tree_code opcode,
c.cnt = 1;
c.id = next_oecount_id++;
c.op = oe1->op;
- VEC_safe_push (oecount, heap, cvec, &c);
+ VEC_safe_push (oecount, heap, cvec, c);
idx = VEC_length (oecount, cvec) + 41;
slot = htab_find_slot (ctable, (void *)idx, INSERT);
if (!*slot)
@@ -3118,7 +3118,7 @@ attempt_builtin_powi (gimple stmt, VEC(operand_entry_t, heap) **ops)
rfnew.rank = oe->rank;
rfnew.count = oe->count;
rfnew.repr = NULL_TREE;
- VEC_safe_push (repeat_factor, heap, repeat_factor_vec, &rfnew);
+ VEC_safe_push (repeat_factor, heap, repeat_factor_vec, rfnew);
}
}
}
diff --git a/gcc/tree-ssa-sccvn.c b/gcc/tree-ssa-sccvn.c
index fed7c55e5f0..9a370e8ab3c 100644
--- a/gcc/tree-ssa-sccvn.c
+++ b/gcc/tree-ssa-sccvn.c
@@ -591,21 +591,21 @@ copy_reference_ops_from_ref (tree ref, VEC(vn_reference_op_s, heap) **result)
temp.op1 = TMR_STEP (ref);
temp.op2 = TMR_OFFSET (ref);
temp.off = -1;
- VEC_safe_push (vn_reference_op_s, heap, *result, &temp);
+ VEC_safe_push (vn_reference_op_s, heap, *result, temp);
memset (&temp, 0, sizeof (temp));
temp.type = NULL_TREE;
temp.opcode = ERROR_MARK;
temp.op0 = TMR_INDEX2 (ref);
temp.off = -1;
- VEC_safe_push (vn_reference_op_s, heap, *result, &temp);
+ VEC_safe_push (vn_reference_op_s, heap, *result, temp);
memset (&temp, 0, sizeof (temp));
temp.type = NULL_TREE;
temp.opcode = TREE_CODE (TMR_BASE (ref));
temp.op0 = TMR_BASE (ref);
temp.off = -1;
- VEC_safe_push (vn_reference_op_s, heap, *result, &temp);
+ VEC_safe_push (vn_reference_op_s, heap, *result, temp);
return;
}
@@ -656,13 +656,12 @@ copy_reference_ops_from_ref (tree ref, VEC(vn_reference_op_s, heap) **result)
if (TREE_INT_CST_LOW (bit_offset) % BITS_PER_UNIT == 0)
{
double_int off
- = double_int_add (tree_to_double_int (this_offset),
- double_int_rshift
- (tree_to_double_int (bit_offset),
- BITS_PER_UNIT == 8
- ? 3 : exact_log2 (BITS_PER_UNIT),
- HOST_BITS_PER_DOUBLE_INT, true));
- if (double_int_fits_in_shwi_p (off))
+ = tree_to_double_int (this_offset)
+ + tree_to_double_int (bit_offset)
+ .arshift (BITS_PER_UNIT == 8
+ ? 3 : exact_log2 (BITS_PER_UNIT),
+ HOST_BITS_PER_DOUBLE_INT);
+ if (off.fits_shwi ())
temp.off = off.low;
}
}
@@ -680,11 +679,9 @@ copy_reference_ops_from_ref (tree ref, VEC(vn_reference_op_s, heap) **result)
&& TREE_CODE (temp.op2) == INTEGER_CST)
{
double_int off = tree_to_double_int (temp.op0);
- off = double_int_add (off,
- double_int_neg
- (tree_to_double_int (temp.op1)));
- off = double_int_mul (off, tree_to_double_int (temp.op2));
- if (double_int_fits_in_shwi_p (off))
+ off += -tree_to_double_int (temp.op1);
+ off *= tree_to_double_int (temp.op2);
+ if (off.fits_shwi ())
temp.off = off.low;
}
break;
@@ -703,7 +700,7 @@ copy_reference_ops_from_ref (tree ref, VEC(vn_reference_op_s, heap) **result)
temp.opcode = MEM_REF;
temp.op0 = build_int_cst (build_pointer_type (TREE_TYPE (ref)), 0);
temp.off = 0;
- VEC_safe_push (vn_reference_op_s, heap, *result, &temp);
+ VEC_safe_push (vn_reference_op_s, heap, *result, temp);
temp.opcode = ADDR_EXPR;
temp.op0 = build_fold_addr_expr (ref);
temp.type = TREE_TYPE (temp.op0);
@@ -742,7 +739,7 @@ copy_reference_ops_from_ref (tree ref, VEC(vn_reference_op_s, heap) **result)
default:
gcc_unreachable ();
}
- VEC_safe_push (vn_reference_op_s, heap, *result, &temp);
+ VEC_safe_push (vn_reference_op_s, heap, *result, temp);
if (REFERENCE_CLASS_P (ref)
|| TREE_CODE (ref) == MODIFY_EXPR
@@ -952,7 +949,7 @@ copy_reference_ops_from_call (gimple call,
temp.type = TREE_TYPE (lhs);
temp.op0 = lhs;
temp.off = -1;
- VEC_safe_push (vn_reference_op_s, heap, *result, &temp);
+ VEC_safe_push (vn_reference_op_s, heap, *result, temp);
}
/* Copy the type, opcode, function being called and static chain. */
@@ -962,7 +959,7 @@ copy_reference_ops_from_call (gimple call,
temp.op0 = gimple_call_fn (call);
temp.op1 = gimple_call_chain (call);
temp.off = -1;
- VEC_safe_push (vn_reference_op_s, heap, *result, &temp);
+ VEC_safe_push (vn_reference_op_s, heap, *result, temp);
/* Copy the call arguments. As they can be references as well,
just chain them together. */
@@ -1018,8 +1015,8 @@ vn_reference_fold_indirect (VEC (vn_reference_op_s, heap) **ops,
if (addr_base != op->op0)
{
double_int off = tree_to_double_int (mem_op->op0);
- off = double_int_sext (off, TYPE_PRECISION (TREE_TYPE (mem_op->op0)));
- off = double_int_add (off, shwi_to_double_int (addr_offset));
+ off = off.sext (TYPE_PRECISION (TREE_TYPE (mem_op->op0)));
+ off += double_int::from_shwi (addr_offset);
mem_op->op0 = double_int_to_tree (TREE_TYPE (mem_op->op0), off);
op->op0 = build_fold_addr_expr (addr_base);
if (host_integerp (mem_op->op0, 0))
@@ -1052,7 +1049,7 @@ vn_reference_maybe_forwprop_address (VEC (vn_reference_op_s, heap) **ops,
return;
off = tree_to_double_int (mem_op->op0);
- off = double_int_sext (off, TYPE_PRECISION (TREE_TYPE (mem_op->op0)));
+ off = off.sext (TYPE_PRECISION (TREE_TYPE (mem_op->op0)));
/* The only thing we have to do is from &OBJ.foo.bar add the offset
from .foo.bar to the preceding MEM_REF offset and replace the
@@ -1069,8 +1066,8 @@ vn_reference_maybe_forwprop_address (VEC (vn_reference_op_s, heap) **ops,
|| TREE_CODE (addr_base) != MEM_REF)
return;
- off = double_int_add (off, shwi_to_double_int (addr_offset));
- off = double_int_add (off, mem_ref_offset (addr_base));
+ off += double_int::from_shwi (addr_offset);
+ off += mem_ref_offset (addr_base);
op->op0 = TREE_OPERAND (addr_base, 0);
}
else
@@ -1082,7 +1079,7 @@ vn_reference_maybe_forwprop_address (VEC (vn_reference_op_s, heap) **ops,
|| TREE_CODE (ptroff) != INTEGER_CST)
return;
- off = double_int_add (off, tree_to_double_int (ptroff));
+ off += tree_to_double_int (ptroff);
op->op0 = ptr;
}
@@ -1242,11 +1239,9 @@ valueize_refs_1 (VEC (vn_reference_op_s, heap) *orig, bool *valueized_anything)
&& TREE_CODE (vro->op2) == INTEGER_CST)
{
double_int off = tree_to_double_int (vro->op0);
- off = double_int_add (off,
- double_int_neg
- (tree_to_double_int (vro->op1)));
- off = double_int_mul (off, tree_to_double_int (vro->op2));
- if (double_int_fits_in_shwi_p (off))
+ off += -tree_to_double_int (vro->op1);
+ off *= tree_to_double_int (vro->op2);
+ if (off.fits_shwi ())
vro->off = off.low;
}
}
@@ -3771,7 +3766,7 @@ start_over:
{
/* Recurse by pushing the current use walking state on
the stack and starting over. */
- VEC_safe_push(ssa_op_iter, heap, itervec, &iter);
+ VEC_safe_push(ssa_op_iter, heap, itervec, iter);
VEC_safe_push(tree, heap, namevec, name);
name = use;
goto start_over;
diff --git a/gcc/tree-ssa-structalias.c b/gcc/tree-ssa-structalias.c
index 6e71d5504dc..688b0688b82 100644
--- a/gcc/tree-ssa-structalias.c
+++ b/gcc/tree-ssa-structalias.c
@@ -826,7 +826,7 @@ constraint_expr_less (struct constraint_expr a, struct constraint_expr b)
arbitrary, but consistent, in order to give them an ordering. */
static bool
-constraint_less (const constraint_t a, const constraint_t b)
+constraint_less (const constraint_t &a, const constraint_t &b)
{
if (constraint_expr_less (a->lhs, b->lhs))
return true;
@@ -2793,12 +2793,12 @@ get_constraint_for_ssa_var (tree t, VEC(ce_s, heap) **results, bool address_p)
for (; vi; vi = vi->next)
{
cexpr.var = vi->id;
- VEC_safe_push (ce_s, heap, *results, &cexpr);
+ VEC_safe_push (ce_s, heap, *results, cexpr);
}
return;
}
- VEC_safe_push (ce_s, heap, *results, &cexpr);
+ VEC_safe_push (ce_s, heap, *results, cexpr);
}
/* Process constraint T, performing various simplifications and then
@@ -2902,10 +2902,9 @@ get_constraint_for_ptr_offset (tree ptr, tree offset,
else
{
/* Sign-extend the offset. */
- double_int soffset
- = double_int_sext (tree_to_double_int (offset),
- TYPE_PRECISION (TREE_TYPE (offset)));
- if (!double_int_fits_in_shwi_p (soffset))
+ double_int soffset = tree_to_double_int (offset)
+ .sext (TYPE_PRECISION (TREE_TYPE (offset)));
+ if (!soffset.fits_shwi ())
rhsoffset = UNKNOWN_OFFSET;
else
{
@@ -2946,7 +2945,7 @@ get_constraint_for_ptr_offset (tree ptr, tree offset,
c2.type = ADDRESSOF;
c2.offset = 0;
if (c2.var != c.var)
- VEC_safe_push (ce_s, heap, *results, &c2);
+ VEC_safe_push (ce_s, heap, *results, c2);
temp = temp->next;
}
while (temp);
@@ -2981,7 +2980,7 @@ get_constraint_for_ptr_offset (tree ptr, tree offset,
c2.var = temp->next->id;
c2.type = ADDRESSOF;
c2.offset = 0;
- VEC_safe_push (ce_s, heap, *results, &c2);
+ VEC_safe_push (ce_s, heap, *results, c2);
}
c.var = temp->id;
c.offset = 0;
@@ -3025,7 +3024,7 @@ get_constraint_for_component_ref (tree t, VEC(ce_s, heap) **results,
temp.offset = 0;
temp.var = integer_id;
temp.type = SCALAR;
- VEC_safe_push (ce_s, heap, *results, &temp);
+ VEC_safe_push (ce_s, heap, *results, temp);
return;
}
@@ -3047,7 +3046,7 @@ get_constraint_for_component_ref (tree t, VEC(ce_s, heap) **results,
temp.offset = 0;
temp.var = anything_id;
temp.type = ADDRESSOF;
- VEC_safe_push (ce_s, heap, *results, &temp);
+ VEC_safe_push (ce_s, heap, *results, temp);
return;
}
}
@@ -3088,7 +3087,7 @@ get_constraint_for_component_ref (tree t, VEC(ce_s, heap) **results,
bitpos, bitmaxsize))
{
cexpr.var = curr->id;
- VEC_safe_push (ce_s, heap, *results, &cexpr);
+ VEC_safe_push (ce_s, heap, *results, cexpr);
if (address_p)
break;
}
@@ -3103,7 +3102,7 @@ get_constraint_for_component_ref (tree t, VEC(ce_s, heap) **results,
while (curr->next != NULL)
curr = curr->next;
cexpr.var = curr->id;
- VEC_safe_push (ce_s, heap, *results, &cexpr);
+ VEC_safe_push (ce_s, heap, *results, cexpr);
}
else if (VEC_length (ce_s, *results) == 0)
/* Assert that we found *some* field there. The user couldn't be
@@ -3116,7 +3115,7 @@ get_constraint_for_component_ref (tree t, VEC(ce_s, heap) **results,
cexpr.type = SCALAR;
cexpr.var = anything_id;
cexpr.offset = 0;
- VEC_safe_push (ce_s, heap, *results, &cexpr);
+ VEC_safe_push (ce_s, heap, *results, cexpr);
}
}
else if (bitmaxsize == 0)
@@ -3240,7 +3239,7 @@ get_constraint_for_1 (tree t, VEC (ce_s, heap) **results, bool address_p,
temp.var = nonlocal_id;
temp.type = ADDRESSOF;
temp.offset = 0;
- VEC_safe_push (ce_s, heap, *results, &temp);
+ VEC_safe_push (ce_s, heap, *results, temp);
return;
}
@@ -3250,7 +3249,7 @@ get_constraint_for_1 (tree t, VEC (ce_s, heap) **results, bool address_p,
temp.var = readonly_id;
temp.type = SCALAR;
temp.offset = 0;
- VEC_safe_push (ce_s, heap, *results, &temp);
+ VEC_safe_push (ce_s, heap, *results, temp);
return;
}
@@ -3311,7 +3310,7 @@ get_constraint_for_1 (tree t, VEC (ce_s, heap) **results, bool address_p,
if (curr->offset - vi->offset < size)
{
cs.var = curr->id;
- VEC_safe_push (ce_s, heap, *results, &cs);
+ VEC_safe_push (ce_s, heap, *results, cs);
}
else
break;
@@ -3353,7 +3352,7 @@ get_constraint_for_1 (tree t, VEC (ce_s, heap) **results, bool address_p,
unsigned j;
get_constraint_for_1 (val, &tmp, address_p, lhs_p);
FOR_EACH_VEC_ELT (ce_s, tmp, j, rhsp)
- VEC_safe_push (ce_s, heap, *results, rhsp);
+ VEC_safe_push (ce_s, heap, *results, *rhsp);
VEC_truncate (ce_s, tmp, 0);
}
VEC_free (ce_s, heap, tmp);
@@ -3377,7 +3376,7 @@ get_constraint_for_1 (tree t, VEC (ce_s, heap) **results, bool address_p,
temp.type = ADDRESSOF;
temp.var = nonlocal_id;
temp.offset = 0;
- VEC_safe_push (ce_s, heap, *results, &temp);
+ VEC_safe_push (ce_s, heap, *results, temp);
return;
}
default:;
@@ -3387,7 +3386,7 @@ get_constraint_for_1 (tree t, VEC (ce_s, heap) **results, bool address_p,
temp.type = ADDRESSOF;
temp.var = anything_id;
temp.offset = 0;
- VEC_safe_push (ce_s, heap, *results, &temp);
+ VEC_safe_push (ce_s, heap, *results, temp);
}
/* Given a gimple tree T, return the constraint expression vector for it. */
@@ -3744,29 +3743,43 @@ handle_rhs_call (gimple stmt, VEC(ce_s, heap) **results)
/* As we compute ESCAPED context-insensitive we do not gain
any precision with just EAF_NOCLOBBER but not EAF_NOESCAPE
set. The argument would still get clobbered through the
- escape solution.
- ??? We might get away with less (and more precise) constraints
- if using a temporary for transitively closing things. */
+ escape solution. */
if ((flags & EAF_NOCLOBBER)
&& (flags & EAF_NOESCAPE))
{
varinfo_t uses = get_call_use_vi (stmt);
if (!(flags & EAF_DIRECT))
- make_transitive_closure_constraints (uses);
- make_constraint_to (uses->id, arg);
+ {
+ varinfo_t tem = new_var_info (NULL_TREE, "callarg");
+ make_constraint_to (tem->id, arg);
+ make_transitive_closure_constraints (tem);
+ make_copy_constraint (uses, tem->id);
+ }
+ else
+ make_constraint_to (uses->id, arg);
returns_uses = true;
}
else if (flags & EAF_NOESCAPE)
{
+ struct constraint_expr lhs, rhs;
varinfo_t uses = get_call_use_vi (stmt);
varinfo_t clobbers = get_call_clobber_vi (stmt);
+ varinfo_t tem = new_var_info (NULL_TREE, "callarg");
+ make_constraint_to (tem->id, arg);
if (!(flags & EAF_DIRECT))
- {
- make_transitive_closure_constraints (uses);
- make_transitive_closure_constraints (clobbers);
- }
- make_constraint_to (uses->id, arg);
- make_constraint_to (clobbers->id, arg);
+ make_transitive_closure_constraints (tem);
+ make_copy_constraint (uses, tem->id);
+ make_copy_constraint (clobbers, tem->id);
+ /* Add *tem = nonlocal, do not add *tem = callused as
+ EAF_NOESCAPE parameters do not escape to other parameters
+ and all other uses appear in NONLOCAL as well. */
+ lhs.type = DEREF;
+ lhs.var = tem->id;
+ lhs.offset = 0;
+ rhs.type = SCALAR;
+ rhs.var = nonlocal_id;
+ rhs.offset = 0;
+ process_constraint (new_constraint (lhs, rhs));
returns_uses = true;
}
else
@@ -3780,7 +3793,7 @@ handle_rhs_call (gimple stmt, VEC(ce_s, heap) **results)
rhsc.var = get_call_use_vi (stmt)->id;
rhsc.offset = 0;
rhsc.type = SCALAR;
- VEC_safe_push (ce_s, heap, *results, &rhsc);
+ VEC_safe_push (ce_s, heap, *results, rhsc);
}
/* The static chain escapes as well. */
@@ -3807,7 +3820,7 @@ handle_rhs_call (gimple stmt, VEC(ce_s, heap) **results)
rhsc.var = nonlocal_id;
rhsc.offset = 0;
rhsc.type = SCALAR;
- VEC_safe_push (ce_s, heap, *results, &rhsc);
+ VEC_safe_push (ce_s, heap, *results, rhsc);
}
/* For non-IPA mode, generate constraints necessary for a call
@@ -3832,7 +3845,7 @@ handle_lhs_call (gimple stmt, tree lhs, int flags, VEC(ce_s, heap) *rhsc,
tmpc.var = escaped_id;
tmpc.offset = 0;
tmpc.type = SCALAR;
- VEC_safe_push (ce_s, heap, lhsc, &tmpc);
+ VEC_safe_push (ce_s, heap, lhsc, tmpc);
}
/* If the call returns an argument unmodified override the rhs
@@ -3867,7 +3880,7 @@ handle_lhs_call (gimple stmt, tree lhs, int flags, VEC(ce_s, heap) *rhsc,
tmpc.var = vi->id;
tmpc.offset = 0;
tmpc.type = ADDRESSOF;
- VEC_safe_push (ce_s, heap, rhsc, &tmpc);
+ VEC_safe_push (ce_s, heap, rhsc, tmpc);
process_all_all_constraints (lhsc, rhsc);
VEC_free (ce_s, heap, rhsc);
}
@@ -3896,7 +3909,7 @@ handle_const_call (gimple stmt, VEC(ce_s, heap) **results)
rhsc.var = uses->id;
rhsc.offset = 0;
rhsc.type = SCALAR;
- VEC_safe_push (ce_s, heap, *results, &rhsc);
+ VEC_safe_push (ce_s, heap, *results, rhsc);
}
/* May return arguments. */
@@ -3908,7 +3921,7 @@ handle_const_call (gimple stmt, VEC(ce_s, heap) **results)
struct constraint_expr *argp;
get_constraint_for_rhs (arg, &argc);
FOR_EACH_VEC_ELT (ce_s, argc, i, argp)
- VEC_safe_push (ce_s, heap, *results, argp);
+ VEC_safe_push (ce_s, heap, *results, *argp);
VEC_free(ce_s, heap, argc);
}
@@ -3916,7 +3929,7 @@ handle_const_call (gimple stmt, VEC(ce_s, heap) **results)
rhsc.var = nonlocal_id;
rhsc.offset = 0;
rhsc.type = ADDRESSOF;
- VEC_safe_push (ce_s, heap, *results, &rhsc);
+ VEC_safe_push (ce_s, heap, *results, rhsc);
}
/* For non-IPA mode, generate constraints necessary for a call to a
@@ -3958,12 +3971,12 @@ handle_pure_call (gimple stmt, VEC(ce_s, heap) **results)
rhsc.var = uses->id;
rhsc.offset = 0;
rhsc.type = SCALAR;
- VEC_safe_push (ce_s, heap, *results, &rhsc);
+ VEC_safe_push (ce_s, heap, *results, rhsc);
}
rhsc.var = nonlocal_id;
rhsc.offset = 0;
rhsc.type = SCALAR;
- VEC_safe_push (ce_s, heap, *results, &rhsc);
+ VEC_safe_push (ce_s, heap, *results, rhsc);
}
@@ -4399,7 +4412,7 @@ find_func_aliases_for_call (gimple t)
&& DECL_BY_REFERENCE (DECL_RESULT (fndecl)))
{
VEC(ce_s, heap) *tem = NULL;
- VEC_safe_push (ce_s, heap, tem, &rhs);
+ VEC_safe_push (ce_s, heap, tem, rhs);
do_deref (&tem);
rhs = VEC_index (ce_s, tem, 0);
VEC_free(ce_s, heap, tem);
@@ -4536,7 +4549,7 @@ find_func_aliases (gimple origt)
get_constraint_for_rhs (gimple_assign_rhs2 (t), &rhsc);
get_constraint_for_rhs (gimple_assign_rhs3 (t), &tmp);
FOR_EACH_VEC_ELT (ce_s, tmp, i, rhsp)
- VEC_safe_push (ce_s, heap, rhsc, rhsp);
+ VEC_safe_push (ce_s, heap, rhsc, *rhsp);
VEC_free (ce_s, heap, tmp);
}
else if (truth_value_p (code))
@@ -4554,7 +4567,7 @@ find_func_aliases (gimple origt)
{
get_constraint_for_rhs (gimple_op (t, i), &tmp);
FOR_EACH_VEC_ELT (ce_s, tmp, j, rhsp)
- VEC_safe_push (ce_s, heap, rhsc, rhsp);
+ VEC_safe_push (ce_s, heap, rhsc, *rhsp);
VEC_truncate (ce_s, tmp, 0);
}
VEC_free (ce_s, heap, tmp);
@@ -5178,13 +5191,8 @@ push_fields_onto_fieldstack (tree type, VEC(fieldoff_s,heap) **fieldstack,
if (!pair
&& offset + foff != 0)
{
- pair = VEC_safe_push (fieldoff_s, heap, *fieldstack, NULL);
- pair->offset = 0;
- pair->size = offset + foff;
- pair->has_unknown_size = false;
- pair->must_have_pointers = false;
- pair->may_have_pointers = false;
- pair->only_restrict_pointers = false;
+ fieldoff_s e = {0, offset + foff, false, false, false, false};
+ pair = VEC_safe_push (fieldoff_s, heap, *fieldstack, e);
}
if (!DECL_SIZE (field)
@@ -5204,19 +5212,20 @@ push_fields_onto_fieldstack (tree type, VEC(fieldoff_s,heap) **fieldstack,
}
else
{
- pair = VEC_safe_push (fieldoff_s, heap, *fieldstack, NULL);
- pair->offset = offset + foff;
- pair->has_unknown_size = has_unknown_size;
+ fieldoff_s e;
+ e.offset = offset + foff;
+ e.has_unknown_size = has_unknown_size;
if (!has_unknown_size)
- pair->size = TREE_INT_CST_LOW (DECL_SIZE (field));
+ e.size = TREE_INT_CST_LOW (DECL_SIZE (field));
else
- pair->size = -1;
- pair->must_have_pointers = must_have_pointers_p;
- pair->may_have_pointers = true;
- pair->only_restrict_pointers
+ e.size = -1;
+ e.must_have_pointers = must_have_pointers_p;
+ e.may_have_pointers = true;
+ e.only_restrict_pointers
= (!has_unknown_size
&& POINTER_TYPE_P (TREE_TYPE (field))
&& TYPE_RESTRICT (TREE_TYPE (field)));
+ VEC_safe_push (fieldoff_s, heap, *fieldstack, e);
}
}
diff --git a/gcc/tree-ssa-threadupdate.c b/gcc/tree-ssa-threadupdate.c
index 30336a18e81..ba6d69af942 100644
--- a/gcc/tree-ssa-threadupdate.c
+++ b/gcc/tree-ssa-threadupdate.c
@@ -846,8 +846,9 @@ static bool
def_split_header_continue_p (const_basic_block bb, const void *data)
{
const_basic_block new_header = (const_basic_block) data;
- return (bb->loop_father == new_header->loop_father
- && bb != new_header);
+ return (bb != new_header
+ && (loop_depth (bb->loop_father)
+ >= loop_depth (new_header->loop_father)));
}
/* Thread jumps through the header of LOOP. Returns true if cfg changes.
@@ -1031,10 +1032,11 @@ thread_through_loop_header (struct loop *loop, bool may_peel_loop_headers)
nblocks = dfs_enumerate_from (header, 0, def_split_header_continue_p,
bblocks, loop->num_nodes, tgt_bb);
for (i = 0; i < nblocks; i++)
- {
- remove_bb_from_loops (bblocks[i]);
- add_bb_to_loop (bblocks[i], loop_outer (loop));
- }
+ if (bblocks[i]->loop_father == loop)
+ {
+ remove_bb_from_loops (bblocks[i]);
+ add_bb_to_loop (bblocks[i], loop_outer (loop));
+ }
free (bblocks);
/* If the new header has multiple latches mark it so. */
diff --git a/gcc/tree-ssa.c b/gcc/tree-ssa.c
index 1746c8f7c91..7ba11e193d6 100644
--- a/gcc/tree-ssa.c
+++ b/gcc/tree-ssa.c
@@ -68,7 +68,7 @@ redirect_edge_var_map_add (edge e, tree result, tree def, source_location locus)
new_node.result = result;
new_node.locus = locus;
- VEC_safe_push (edge_var_map, heap, head, &new_node);
+ VEC_safe_push (edge_var_map, heap, head, new_node);
if (old_head != head)
{
/* The push did some reallocation. Update the pointer map. */
@@ -1833,10 +1833,9 @@ non_rewritable_mem_ref_base (tree ref)
|| TREE_CODE (TREE_TYPE (decl)) == COMPLEX_TYPE)
&& useless_type_conversion_p (TREE_TYPE (base),
TREE_TYPE (TREE_TYPE (decl)))
- && double_int_fits_in_uhwi_p (mem_ref_offset (base))
- && double_int_ucmp
- (tree_to_double_int (TYPE_SIZE_UNIT (TREE_TYPE (decl))),
- mem_ref_offset (base)) == 1
+ && mem_ref_offset (base).fits_uhwi ()
+ && tree_to_double_int (TYPE_SIZE_UNIT (TREE_TYPE (decl)))
+ .ugt (mem_ref_offset (base))
&& multiple_of_p (sizetype, TREE_OPERAND (base, 1),
TYPE_SIZE_UNIT (TREE_TYPE (base))))
return NULL_TREE;
diff --git a/gcc/tree-switch-conversion.c b/gcc/tree-switch-conversion.c
index 87baefc07cf..bbbd3caca02 100644
--- a/gcc/tree-switch-conversion.c
+++ b/gcc/tree-switch-conversion.c
@@ -868,13 +868,11 @@ build_constructors (gimple swtch, struct switch_conv_info *info)
int k;
for (k = 0; k < info->phi_count; k++)
{
- constructor_elt *elt;
+ constructor_elt elt;
- elt = VEC_quick_push (constructor_elt,
- info->constructors[k], NULL);
- elt->index = int_const_binop (MINUS_EXPR, pos,
- info->range_min);
- elt->value = info->default_values[k];
+ elt.index = int_const_binop (MINUS_EXPR, pos, info->range_min);
+ elt.value = info->default_values[k];
+ VEC_quick_push (constructor_elt, info->constructors[k], elt);
}
pos = int_const_binop (PLUS_EXPR, pos, integer_one_node);
@@ -896,12 +894,11 @@ build_constructors (gimple swtch, struct switch_conv_info *info)
do
{
- constructor_elt *elt;
+ constructor_elt elt;
- elt = VEC_quick_push (constructor_elt,
- info->constructors[j], NULL);
- elt->index = int_const_binop (MINUS_EXPR, pos, info->range_min);
- elt->value = val;
+ elt.index = int_const_binop (MINUS_EXPR, pos, info->range_min);
+ elt.value = val;
+ VEC_quick_push (constructor_elt, info->constructors[j], elt);
pos = int_const_binop (PLUS_EXPR, pos, integer_one_node);
} while (!tree_int_cst_lt (high, pos)
@@ -970,17 +967,14 @@ array_value_type (gimple swtch, tree type, int num,
if (prec > HOST_BITS_PER_WIDE_INT)
return type;
- if (sign >= 0
- && double_int_equal_p (cst, double_int_zext (cst, prec)))
+ if (sign >= 0 && cst == cst.zext (prec))
{
- if (sign == 0
- && double_int_equal_p (cst, double_int_sext (cst, prec)))
+ if (sign == 0 && cst == cst.sext (prec))
break;
sign = 1;
break;
}
- if (sign <= 0
- && double_int_equal_p (cst, double_int_sext (cst, prec)))
+ if (sign <= 0 && cst == cst.sext (prec))
{
sign = -1;
break;
diff --git a/gcc/tree-vect-generic.c b/gcc/tree-vect-generic.c
index 5b583124cab..b217b1123e9 100644
--- a/gcc/tree-vect-generic.c
+++ b/gcc/tree-vect-generic.c
@@ -252,9 +252,8 @@ expand_vector_piecewise (gimple_stmt_iterator *gsi, elem_op_func f,
i += delta, index = int_const_binop (PLUS_EXPR, index, part_width))
{
tree result = f (gsi, inner_type, a, b, index, part_width, code);
- constructor_elt *ce = VEC_quick_push (constructor_elt, v, NULL);
- ce->index = NULL_TREE;
- ce->value = result;
+ constructor_elt ce = {NULL_TREE, result};
+ VEC_quick_push (constructor_elt, v, ce);
}
return build_constructor (type, v);
diff --git a/gcc/tree-vect-loop-manip.c b/gcc/tree-vect-loop-manip.c
index 4df4800ab58..508dff0f714 100644
--- a/gcc/tree-vect-loop-manip.c
+++ b/gcc/tree-vect-loop-manip.c
@@ -214,7 +214,7 @@ adjust_debug_stmts (tree from, tree to, basic_block bb)
ai.bb = bb;
if (adjust_vec)
- VEC_safe_push (adjust_info, stack, adjust_vec, &ai);
+ VEC_safe_push (adjust_info, stack, adjust_vec, ai);
else
adjust_debug_stmts_now (&ai);
}
@@ -1908,7 +1908,7 @@ vect_do_peeling_for_loop_bound (loop_vec_info loop_vinfo, tree *ratio,
max_iter = LOOP_VINFO_VECT_FACTOR (loop_vinfo) - 1;
if (check_profitability)
max_iter = MAX (max_iter, (int) th);
- record_niter_bound (new_loop, shwi_to_double_int (max_iter), false, true);
+ record_niter_bound (new_loop, double_int::from_shwi (max_iter), false, true);
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "Setting upper bound of nb iterations for epilogue "
"loop to %d\n", max_iter);
@@ -2130,7 +2130,7 @@ vect_do_peeling_for_alignment (loop_vec_info loop_vinfo,
max_iter = LOOP_VINFO_VECT_FACTOR (loop_vinfo) - 1;
if (check_profitability)
max_iter = MAX (max_iter, (int) th);
- record_niter_bound (new_loop, shwi_to_double_int (max_iter), false, true);
+ record_niter_bound (new_loop, double_int::from_shwi (max_iter), false, true);
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "Setting upper bound of nb iterations for prologue "
"loop to %d\n", max_iter);
diff --git a/gcc/tree-vect-slp.c b/gcc/tree-vect-slp.c
index b2d0a6b4042..11dbdfb5a02 100644
--- a/gcc/tree-vect-slp.c
+++ b/gcc/tree-vect-slp.c
@@ -1099,7 +1099,7 @@ vect_slp_rearrange_stmts (slp_tree node, unsigned int group_size,
tmp_stmts = VEC_alloc (gimple, heap, group_size);
for (i = 0; i < group_size; i++)
- VEC_safe_push (gimple, heap, tmp_stmts, (gimple)NULL);
+ VEC_safe_push (gimple, heap, tmp_stmts, NULL);
FOR_EACH_VEC_ELT (gimple, SLP_TREE_SCALAR_STMTS (node), i, stmt)
{
@@ -2663,7 +2663,7 @@ vect_create_mask_and_perm (gimple stmt, gimple next_scalar_stmt,
stmts later. */
for (i = VEC_length (gimple, SLP_TREE_VEC_STMTS (node));
i < (int) SLP_TREE_NUMBER_OF_VEC_STMTS (node); i++)
- VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (node), (gimple)NULL);
+ VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (node), NULL);
perm_dest = vect_create_destination_var (gimple_assign_lhs (stmt), vectype);
for (i = 0; i < ncopies; i++)
diff --git a/gcc/tree-vectorizer.h b/gcc/tree-vectorizer.h
index dc000d6bc13..d66a81c408f 100644
--- a/gcc/tree-vectorizer.h
+++ b/gcc/tree-vectorizer.h
@@ -94,7 +94,7 @@ add_stmt_info_to_vec (stmt_vector_for_cost *stmt_cost_vec, int count,
si.kind = kind;
si.stmt = stmt;
si.misalign = misalign;
- VEC_safe_push (stmt_info_for_cost, heap, *stmt_cost_vec, &si);
+ VEC_safe_push (stmt_info_for_cost, heap, *stmt_cost_vec, si);
}
/************************************************************************
diff --git a/gcc/tree-vrp.c b/gcc/tree-vrp.c
index 067b60f168f..c0a4050a812 100644
--- a/gcc/tree-vrp.c
+++ b/gcc/tree-vrp.c
@@ -1961,9 +1961,9 @@ zero_nonzero_bits_from_vr (value_range_t *vr,
{
double_int dmin = tree_to_double_int (vr->min);
double_int dmax = tree_to_double_int (vr->max);
- double_int xor_mask = double_int_xor (dmin, dmax);
- *may_be_nonzero = double_int_ior (dmin, dmax);
- *must_be_nonzero = double_int_and (dmin, dmax);
+ double_int xor_mask = dmin ^ dmax;
+ *may_be_nonzero = dmin | dmax;
+ *must_be_nonzero = dmin & dmax;
if (xor_mask.high != 0)
{
unsigned HOST_WIDE_INT mask
@@ -2014,16 +2014,14 @@ ranges_from_anti_range (value_range_t *ar,
vr0->min = vrp_val_min (type);
vr0->max
= double_int_to_tree (type,
- double_int_sub (tree_to_double_int (ar->min),
- double_int_one));
+ tree_to_double_int (ar->min) - double_int_one);
}
if (!vrp_val_is_max (ar->max))
{
vr1->type = VR_RANGE;
vr1->min
= double_int_to_tree (type,
- double_int_add (tree_to_double_int (ar->max),
- double_int_one));
+ tree_to_double_int (ar->max) + double_int_one);
vr1->max = vrp_val_max (type);
}
if (vr0->type == VR_UNDEFINED)
@@ -2068,7 +2066,8 @@ extract_range_from_multiplicative_op_1 (value_range_t *vr,
|| code == CEIL_DIV_EXPR
|| code == EXACT_DIV_EXPR
|| code == ROUND_DIV_EXPR
- || code == RSHIFT_EXPR);
+ || code == RSHIFT_EXPR
+ || code == LSHIFT_EXPR);
gcc_assert ((vr0->type == VR_RANGE
|| (code == MULT_EXPR && vr0->type == VR_ANTI_RANGE))
&& vr0->type == vr1->type);
@@ -2193,9 +2192,9 @@ static int
quad_int_cmp (double_int l0, double_int h0,
double_int l1, double_int h1, bool uns)
{
- int c = double_int_cmp (h0, h1, uns);
+ int c = h0.cmp (h1, uns);
if (c != 0) return c;
- return double_int_ucmp (l0, l1);
+ return l0.ucmp (l1);
}
static void
@@ -2389,37 +2388,33 @@ extract_range_from_binary_expr_1 (value_range_t *vr,
double_int max1 = tree_to_double_int (vr1.max);
bool uns = TYPE_UNSIGNED (expr_type);
double_int type_min
- = double_int_min_value (TYPE_PRECISION (expr_type), uns);
+ = double_int::min_value (TYPE_PRECISION (expr_type), uns);
double_int type_max
- = double_int_max_value (TYPE_PRECISION (expr_type), uns);
+ = double_int::max_value (TYPE_PRECISION (expr_type), uns);
double_int dmin, dmax;
int min_ovf = 0;
int max_ovf = 0;
if (code == PLUS_EXPR)
{
- dmin = double_int_add (min0, min1);
- dmax = double_int_add (max0, max1);
+ dmin = min0 + min1;
+ dmax = max0 + max1;
/* Check for overflow in double_int. */
- if (double_int_cmp (min1, double_int_zero, uns)
- != double_int_cmp (dmin, min0, uns))
- min_ovf = double_int_cmp (min0, dmin, uns);
- if (double_int_cmp (max1, double_int_zero, uns)
- != double_int_cmp (dmax, max0, uns))
- max_ovf = double_int_cmp (max0, dmax, uns);
+ if (min1.cmp (double_int_zero, uns) != dmin.cmp (min0, uns))
+ min_ovf = min0.cmp (dmin, uns);
+ if (max1.cmp (double_int_zero, uns) != dmax.cmp (max0, uns))
+ max_ovf = max0.cmp (dmax, uns);
}
else /* if (code == MINUS_EXPR) */
{
- dmin = double_int_sub (min0, max1);
- dmax = double_int_sub (max0, min1);
-
- if (double_int_cmp (double_int_zero, max1, uns)
- != double_int_cmp (dmin, min0, uns))
- min_ovf = double_int_cmp (min0, max1, uns);
- if (double_int_cmp (double_int_zero, min1, uns)
- != double_int_cmp (dmax, max0, uns))
- max_ovf = double_int_cmp (max0, min1, uns);
+ dmin = min0 - max1;
+ dmax = max0 - min1;
+
+ if (double_int_zero.cmp (max1, uns) != dmin.cmp (min0, uns))
+ min_ovf = min0.cmp (max1, uns);
+ if (double_int_zero.cmp (min1, uns) != dmax.cmp (max0, uns))
+ max_ovf = max0.cmp (min1, uns);
}
/* For non-wrapping arithmetic look at possibly smaller
@@ -2435,16 +2430,16 @@ extract_range_from_binary_expr_1 (value_range_t *vr,
/* Check for type overflow. */
if (min_ovf == 0)
{
- if (double_int_cmp (dmin, type_min, uns) == -1)
+ if (dmin.cmp (type_min, uns) == -1)
min_ovf = -1;
- else if (double_int_cmp (dmin, type_max, uns) == 1)
+ else if (dmin.cmp (type_max, uns) == 1)
min_ovf = 1;
}
if (max_ovf == 0)
{
- if (double_int_cmp (dmax, type_min, uns) == -1)
+ if (dmax.cmp (type_min, uns) == -1)
max_ovf = -1;
- else if (double_int_cmp (dmax, type_max, uns) == 1)
+ else if (dmax.cmp (type_max, uns) == 1)
max_ovf = 1;
}
@@ -2453,9 +2448,9 @@ extract_range_from_binary_expr_1 (value_range_t *vr,
/* If overflow wraps, truncate the values and adjust the
range kind and bounds appropriately. */
double_int tmin
- = double_int_ext (dmin, TYPE_PRECISION (expr_type), uns);
+ = dmin.ext (TYPE_PRECISION (expr_type), uns);
double_int tmax
- = double_int_ext (dmax, TYPE_PRECISION (expr_type), uns);
+ = dmax.ext (TYPE_PRECISION (expr_type), uns);
if (min_ovf == max_ovf)
{
/* No overflow or both overflow or underflow. The
@@ -2479,16 +2474,16 @@ extract_range_from_binary_expr_1 (value_range_t *vr,
gcc_assert ((min_ovf == -1 && max_ovf == 0)
|| (max_ovf == 1 && min_ovf == 0));
type = VR_ANTI_RANGE;
- tmin = double_int_add (tmax, double_int_one);
- if (double_int_cmp (tmin, tmax, uns) < 0)
+ tmin = tmax + double_int_one;
+ if (tmin.cmp (tmax, uns) < 0)
covers = true;
- tmax = double_int_add (tem, double_int_minus_one);
+ tmax = tem + double_int_minus_one;
if (double_int_cmp (tmax, tem, uns) > 0)
covers = true;
/* If the anti-range would cover nothing, drop to varying.
Likewise if the anti-range bounds are outside of the
types values. */
- if (covers || double_int_cmp (tmin, tmax, uns) > 0)
+ if (covers || tmin.cmp (tmax, uns) > 0)
{
set_value_range_to_varying (vr);
return;
@@ -2605,8 +2600,8 @@ extract_range_from_binary_expr_1 (value_range_t *vr,
prod2l, prod2h, prod3l, prod3h;
bool uns0, uns1, uns;
- sizem1 = double_int_max_value (TYPE_PRECISION (expr_type), true);
- size = double_int_add (sizem1, double_int_one);
+ sizem1 = double_int::max_value (TYPE_PRECISION (expr_type), true);
+ size = sizem1 + double_int_one;
min0 = tree_to_double_int (vr0.min);
max0 = tree_to_double_int (vr0.max);
@@ -2619,19 +2614,19 @@ extract_range_from_binary_expr_1 (value_range_t *vr,
/* Canonicalize the intervals. */
if (TYPE_UNSIGNED (expr_type))
{
- double_int min2 = double_int_sub (size, min0);
- if (double_int_cmp (min2, max0, true) < 0)
+ double_int min2 = size - min0;
+ if (min2.cmp (max0, true) < 0)
{
- min0 = double_int_neg (min2);
- max0 = double_int_sub (max0, size);
+ min0 = -min2;
+ max0 -= size;
uns0 = false;
}
- min2 = double_int_sub (size, min1);
- if (double_int_cmp (min2, max1, true) < 0)
+ min2 = size - min1;
+ if (min2.cmp (max1, true) < 0)
{
- min1 = double_int_neg (min2);
- max1 = double_int_sub (max1, size);
+ min1 = -min2;
+ max1 -= size;
uns1 = false;
}
}
@@ -2641,37 +2636,37 @@ extract_range_from_binary_expr_1 (value_range_t *vr,
min1.low, min1.high,
&prod0l.low, &prod0l.high,
&prod0h.low, &prod0h.high, true);
- if (!uns0 && double_int_negative_p (min0))
- prod0h = double_int_sub (prod0h, min1);
- if (!uns1 && double_int_negative_p (min1))
- prod0h = double_int_sub (prod0h, min0);
+ if (!uns0 && min0.is_negative ())
+ prod0h -= min1;
+ if (!uns1 && min1.is_negative ())
+ prod0h -= min0;
mul_double_wide_with_sign (min0.low, min0.high,
max1.low, max1.high,
&prod1l.low, &prod1l.high,
&prod1h.low, &prod1h.high, true);
- if (!uns0 && double_int_negative_p (min0))
- prod1h = double_int_sub (prod1h, max1);
- if (!uns1 && double_int_negative_p (max1))
- prod1h = double_int_sub (prod1h, min0);
+ if (!uns0 && min0.is_negative ())
+ prod1h -= max1;
+ if (!uns1 && max1.is_negative ())
+ prod1h -= min0;
mul_double_wide_with_sign (max0.low, max0.high,
min1.low, min1.high,
&prod2l.low, &prod2l.high,
&prod2h.low, &prod2h.high, true);
- if (!uns0 && double_int_negative_p (max0))
- prod2h = double_int_sub (prod2h, min1);
- if (!uns1 && double_int_negative_p (min1))
- prod2h = double_int_sub (prod2h, max0);
+ if (!uns0 && max0.is_negative ())
+ prod2h -= min1;
+ if (!uns1 && min1.is_negative ())
+ prod2h -= max0;
mul_double_wide_with_sign (max0.low, max0.high,
max1.low, max1.high,
&prod3l.low, &prod3l.high,
&prod3h.low, &prod3h.high, true);
- if (!uns0 && double_int_negative_p (max0))
- prod3h = double_int_sub (prod3h, max1);
- if (!uns1 && double_int_negative_p (max1))
- prod3h = double_int_sub (prod3h, max0);
+ if (!uns0 && max0.is_negative ())
+ prod3h -= max1;
+ if (!uns1 && max1.is_negative ())
+ prod3h -= max0;
/* Sort the 4 products. */
quad_int_pair_sort (&prod0l, &prod0h, &prod3l, &prod3h, uns);
@@ -2680,23 +2675,23 @@ extract_range_from_binary_expr_1 (value_range_t *vr,
quad_int_pair_sort (&prod2l, &prod2h, &prod3l, &prod3h, uns);
/* Max - min. */
- if (double_int_zero_p (prod0l))
+ if (prod0l.is_zero ())
{
prod1l = double_int_zero;
- prod1h = double_int_neg (prod0h);
+ prod1h = -prod0h;
}
else
{
- prod1l = double_int_neg (prod0l);
- prod1h = double_int_not (prod0h);
+ prod1l = -prod0l;
+ prod1h = ~prod0h;
}
- prod2l = double_int_add (prod3l, prod1l);
- prod2h = double_int_add (prod3h, prod1h);
- if (double_int_ucmp (prod2l, prod3l) < 0)
- prod2h = double_int_add (prod2h, double_int_one); /* carry */
+ prod2l = prod3l + prod1l;
+ prod2h = prod3h + prod1h;
+ if (prod2l.ult (prod3l))
+ prod2h += double_int_one; /* carry */
- if (!double_int_zero_p (prod2h)
- || double_int_cmp (prod2l, sizem1, true) >= 0)
+ if (!prod2h.is_zero ()
+ || prod2l.cmp (sizem1, true) >= 0)
{
/* the range covers all values. */
set_value_range_to_varying (vr);
@@ -2755,11 +2750,9 @@ extract_range_from_binary_expr_1 (value_range_t *vr,
vr1p.type = VR_RANGE;
vr1p.min
= double_int_to_tree (expr_type,
- double_int_lshift
- (double_int_one,
- TREE_INT_CST_LOW (vr1.min),
- TYPE_PRECISION (expr_type),
- false));
+ double_int_one
+ .llshift (TREE_INT_CST_LOW (vr1.min),
+ TYPE_PRECISION (expr_type)));
vr1p.max = vr1p.min;
/* We have to use a wrapping multiply though as signed overflow
on lshifts is implementation defined in C89. */
@@ -2770,6 +2763,27 @@ extract_range_from_binary_expr_1 (value_range_t *vr,
flag_wrapv = saved_flag_wrapv;
return;
}
+ else if (code == LSHIFT_EXPR
+ && range_int_cst_p (&vr0))
+ {
+ int overflow_pos = TYPE_PRECISION (expr_type);
+ int bound_shift;
+ double_int bound;
+
+ if (!TYPE_UNSIGNED (expr_type))
+ overflow_pos -= 1;
+
+ bound_shift = overflow_pos - TREE_INT_CST_LOW (vr1.max);
+ bound = double_int_one.llshift (bound_shift,
+ TYPE_PRECISION (expr_type));
+ if (tree_to_double_int (vr0.max).ult (bound))
+ {
+ /* In the absense of overflow, (a << b) is equivalent
+ to (a * 2^b). */
+ extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
+ return;
+ }
+ }
}
set_value_range_to_varying (vr);
return;
@@ -2903,9 +2917,8 @@ extract_range_from_binary_expr_1 (value_range_t *vr,
{
double_int dmax;
min = double_int_to_tree (expr_type,
- double_int_and (must_be_nonzero0,
- must_be_nonzero1));
- dmax = double_int_and (may_be_nonzero0, may_be_nonzero1);
+ must_be_nonzero0 & must_be_nonzero1);
+ dmax = may_be_nonzero0 & may_be_nonzero1;
/* If both input ranges contain only negative values we can
truncate the result range maximum to the minimum of the
input range maxima. */
@@ -2913,19 +2926,19 @@ extract_range_from_binary_expr_1 (value_range_t *vr,
&& tree_int_cst_sgn (vr0.max) < 0
&& tree_int_cst_sgn (vr1.max) < 0)
{
- dmax = double_int_min (dmax, tree_to_double_int (vr0.max),
+ dmax = dmax.min (tree_to_double_int (vr0.max),
TYPE_UNSIGNED (expr_type));
- dmax = double_int_min (dmax, tree_to_double_int (vr1.max),
+ dmax = dmax.min (tree_to_double_int (vr1.max),
TYPE_UNSIGNED (expr_type));
}
/* If either input range contains only non-negative values
we can truncate the result range maximum to the respective
maximum of the input range. */
if (int_cst_range0 && tree_int_cst_sgn (vr0.min) >= 0)
- dmax = double_int_min (dmax, tree_to_double_int (vr0.max),
+ dmax = dmax.min (tree_to_double_int (vr0.max),
TYPE_UNSIGNED (expr_type));
if (int_cst_range1 && tree_int_cst_sgn (vr1.min) >= 0)
- dmax = double_int_min (dmax, tree_to_double_int (vr1.max),
+ dmax = dmax.min (tree_to_double_int (vr1.max),
TYPE_UNSIGNED (expr_type));
max = double_int_to_tree (expr_type, dmax);
}
@@ -2933,9 +2946,8 @@ extract_range_from_binary_expr_1 (value_range_t *vr,
{
double_int dmin;
max = double_int_to_tree (expr_type,
- double_int_ior (may_be_nonzero0,
- may_be_nonzero1));
- dmin = double_int_ior (must_be_nonzero0, must_be_nonzero1);
+ may_be_nonzero0 | may_be_nonzero1);
+ dmin = must_be_nonzero0 | must_be_nonzero1;
/* If the input ranges contain only positive values we can
truncate the minimum of the result range to the maximum
of the input range minima. */
@@ -2943,40 +2955,30 @@ extract_range_from_binary_expr_1 (value_range_t *vr,
&& tree_int_cst_sgn (vr0.min) >= 0
&& tree_int_cst_sgn (vr1.min) >= 0)
{
- dmin = double_int_max (dmin, tree_to_double_int (vr0.min),
- TYPE_UNSIGNED (expr_type));
- dmin = double_int_max (dmin, tree_to_double_int (vr1.min),
- TYPE_UNSIGNED (expr_type));
+ dmin = dmin.max (tree_to_double_int (vr0.min),
+ TYPE_UNSIGNED (expr_type));
+ dmin = dmin.max (tree_to_double_int (vr1.min),
+ TYPE_UNSIGNED (expr_type));
}
/* If either input range contains only negative values
we can truncate the minimum of the result range to the
respective minimum range. */
if (int_cst_range0 && tree_int_cst_sgn (vr0.max) < 0)
- dmin = double_int_max (dmin, tree_to_double_int (vr0.min),
- TYPE_UNSIGNED (expr_type));
+ dmin = dmin.max (tree_to_double_int (vr0.min),
+ TYPE_UNSIGNED (expr_type));
if (int_cst_range1 && tree_int_cst_sgn (vr1.max) < 0)
- dmin = double_int_max (dmin, tree_to_double_int (vr1.min),
- TYPE_UNSIGNED (expr_type));
+ dmin = dmin.max (tree_to_double_int (vr1.min),
+ TYPE_UNSIGNED (expr_type));
min = double_int_to_tree (expr_type, dmin);
}
else if (code == BIT_XOR_EXPR)
{
double_int result_zero_bits, result_one_bits;
- result_zero_bits
- = double_int_ior (double_int_and (must_be_nonzero0,
- must_be_nonzero1),
- double_int_not
- (double_int_ior (may_be_nonzero0,
- may_be_nonzero1)));
- result_one_bits
- = double_int_ior (double_int_and
- (must_be_nonzero0,
- double_int_not (may_be_nonzero1)),
- double_int_and
- (must_be_nonzero1,
- double_int_not (may_be_nonzero0)));
- max = double_int_to_tree (expr_type,
- double_int_not (result_zero_bits));
+ result_zero_bits = (must_be_nonzero0 & must_be_nonzero1)
+ | ~(may_be_nonzero0 | may_be_nonzero1);
+ result_one_bits = must_be_nonzero0.and_not (may_be_nonzero1)
+ | must_be_nonzero1.and_not (may_be_nonzero0);
+ max = double_int_to_tree (expr_type, ~result_zero_bits);
min = double_int_to_tree (expr_type, result_one_bits);
/* If the range has all positive or all negative values the
result is better than VARYING. */
@@ -3606,10 +3608,10 @@ adjust_range_with_scev (value_range_t *vr, struct loop *loop,
value_range_t maxvr = VR_INITIALIZER;
double_int dtmp;
bool unsigned_p = TYPE_UNSIGNED (TREE_TYPE (step));
- int overflow = 0;
+ bool overflow = false;
- dtmp = double_int_mul_with_sign (tree_to_double_int (step), nit,
- unsigned_p, &overflow);
+ dtmp = tree_to_double_int (step)
+ .mul_with_sign (nit, unsigned_p, &overflow);
/* If the multiplication overflowed we can't do a meaningful
adjustment. Likewise if the result doesn't fit in the type
of the induction variable. For a signed type we have to
@@ -4519,19 +4521,19 @@ masked_increment (double_int val, double_int mask, double_int sgnbit,
double_int bit = double_int_one, res;
unsigned int i;
- val = double_int_xor (val, sgnbit);
- for (i = 0; i < prec; i++, bit = double_int_add (bit, bit))
+ val ^= sgnbit;
+ for (i = 0; i < prec; i++, bit += bit)
{
res = mask;
- if (double_int_zero_p (double_int_and (res, bit)))
+ if ((res & bit).is_zero ())
continue;
- res = double_int_sub (bit, double_int_one);
- res = double_int_and_not (double_int_add (val, bit), res);
- res = double_int_and (res, mask);
- if (double_int_ucmp (res, val) > 0)
- return double_int_xor (res, sgnbit);
+ res = bit - double_int_one;
+ res = (val + bit).and_not (res);
+ res &= mask;
+ if (res.ugt (val))
+ return res ^ sgnbit;
}
- return double_int_xor (val, sgnbit);
+ return val ^ sgnbit;
}
/* Try to register an edge assertion for SSA name NAME on edge E for
@@ -4735,7 +4737,7 @@ register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi,
&& live_on_edge (e, name2)
&& !has_single_use (name2))
{
- mask = double_int_mask (tree_low_cst (cst2, 1));
+ mask = double_int::mask (tree_low_cst (cst2, 1));
val2 = fold_binary (LSHIFT_EXPR, TREE_TYPE (val), val, cst2);
}
}
@@ -4766,9 +4768,9 @@ register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi,
else
{
double_int maxval
- = double_int_max_value (prec, TYPE_UNSIGNED (TREE_TYPE (val)));
- mask = double_int_ior (tree_to_double_int (val2), mask);
- if (double_int_equal_p (mask, maxval))
+ = double_int::max_value (prec, TYPE_UNSIGNED (TREE_TYPE (val)));
+ mask |= tree_to_double_int (val2);
+ if (mask == maxval)
new_val = NULL_TREE;
else
new_val = double_int_to_tree (TREE_TYPE (val2), mask);
@@ -4835,12 +4837,12 @@ register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi,
bool valid_p = false, valn = false, cst2n = false;
enum tree_code ccode = comp_code;
- valv = double_int_zext (tree_to_double_int (val), prec);
- cst2v = double_int_zext (tree_to_double_int (cst2), prec);
+ valv = tree_to_double_int (val).zext (prec);
+ cst2v = tree_to_double_int (cst2).zext (prec);
if (!TYPE_UNSIGNED (TREE_TYPE (val)))
{
- valn = double_int_negative_p (double_int_sext (valv, prec));
- cst2n = double_int_negative_p (double_int_sext (cst2v, prec));
+ valn = valv.sext (prec).is_negative ();
+ cst2n = cst2v.sext (prec).is_negative ();
}
/* If CST2 doesn't have most significant bit set,
but VAL is negative, we have comparison like
@@ -4848,12 +4850,10 @@ register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi,
if (!cst2n && valn)
ccode = ERROR_MARK;
if (cst2n)
- sgnbit = double_int_zext (double_int_lshift (double_int_one,
- prec - 1, prec,
- false), prec);
+ sgnbit = double_int_one.llshift (prec - 1, prec).zext (prec);
else
sgnbit = double_int_zero;
- minv = double_int_and (valv, cst2v);
+ minv = valv & cst2v;
switch (ccode)
{
case EQ_EXPR:
@@ -4861,15 +4861,15 @@ register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi,
(should be equal to VAL, otherwise we probably should
have folded the comparison into false) and
maximum unsigned value is VAL | ~CST2. */
- maxv = double_int_ior (valv, double_int_not (cst2v));
- maxv = double_int_zext (maxv, prec);
+ maxv = valv | ~cst2v;
+ maxv = maxv.zext (prec);
valid_p = true;
break;
case NE_EXPR:
- tem = double_int_ior (valv, double_int_not (cst2v));
- tem = double_int_zext (tem, prec);
+ tem = valv | ~cst2v;
+ tem = tem.zext (prec);
/* If VAL is 0, handle (X & CST2) != 0 as (X & CST2) > 0U. */
- if (double_int_zero_p (valv))
+ if (valv.is_zero ())
{
cst2n = false;
sgnbit = double_int_zero;
@@ -4877,7 +4877,7 @@ register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi,
}
/* If (VAL | ~CST2) is all ones, handle it as
(X & CST2) < VAL. */
- if (double_int_equal_p (tem, double_int_mask (prec)))
+ if (tem == double_int::mask (prec))
{
cst2n = false;
valn = false;
@@ -4885,19 +4885,17 @@ register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi,
goto lt_expr;
}
if (!cst2n
- && double_int_negative_p (double_int_sext (cst2v, prec)))
- sgnbit = double_int_zext (double_int_lshift (double_int_one,
- prec - 1, prec,
- false), prec);
- if (!double_int_zero_p (sgnbit))
+ && cst2v.sext (prec).is_negative ())
+ sgnbit = double_int_one.llshift (prec - 1, prec).zext (prec);
+ if (!sgnbit.is_zero ())
{
- if (double_int_equal_p (valv, sgnbit))
+ if (valv == sgnbit)
{
cst2n = true;
valn = true;
goto gt_expr;
}
- if (double_int_equal_p (tem, double_int_mask (prec - 1)))
+ if (tem == double_int::mask (prec - 1))
{
cst2n = true;
goto lt_expr;
@@ -4912,15 +4910,15 @@ register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi,
comparison, if CST2 doesn't have most significant bit
set, handle it similarly. If CST2 has MSB set,
the minimum is the same, and maximum is ~0U/2. */
- if (!double_int_equal_p (minv, valv))
+ if (minv != valv)
{
/* If (VAL & CST2) != VAL, X & CST2 can't be equal to
VAL. */
minv = masked_increment (valv, cst2v, sgnbit, prec);
- if (double_int_equal_p (minv, valv))
+ if (minv == valv)
break;
}
- maxv = double_int_mask (prec - (cst2n ? 1 : 0));
+ maxv = double_int::mask (prec - (cst2n ? 1 : 0));
valid_p = true;
break;
case GT_EXPR:
@@ -4929,9 +4927,9 @@ register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi,
&& (MINV & CST2) == MINV, if any. If VAL is signed and
CST2 has MSB set, compute it biased by 1 << (prec - 1). */
minv = masked_increment (valv, cst2v, sgnbit, prec);
- if (double_int_equal_p (minv, valv))
+ if (minv == valv)
break;
- maxv = double_int_mask (prec - (cst2n ? 1 : 0));
+ maxv = double_int::mask (prec - (cst2n ? 1 : 0));
valid_p = true;
break;
case LE_EXPR:
@@ -4943,17 +4941,17 @@ register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi,
For signed comparison, if CST2 doesn't have most
significant bit set, handle it similarly. If CST2 has
MSB set, the maximum is the same and minimum is INT_MIN. */
- if (double_int_equal_p (minv, valv))
+ if (minv == valv)
maxv = valv;
else
{
maxv = masked_increment (valv, cst2v, sgnbit, prec);
- if (double_int_equal_p (maxv, valv))
+ if (maxv == valv)
break;
- maxv = double_int_sub (maxv, double_int_one);
+ maxv -= double_int_one;
}
- maxv = double_int_ior (maxv, double_int_not (cst2v));
- maxv = double_int_zext (maxv, prec);
+ maxv |= ~cst2v;
+ maxv = maxv.zext (prec);
minv = sgnbit;
valid_p = true;
break;
@@ -4967,21 +4965,21 @@ register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi,
For signed comparison, if CST2 doesn't have most
significant bit set, handle it similarly. If CST2 has
MSB set, the maximum is the same and minimum is INT_MIN. */
- if (double_int_equal_p (minv, valv))
+ if (minv == valv)
{
- if (double_int_equal_p (valv, sgnbit))
+ if (valv == sgnbit)
break;
maxv = valv;
}
else
{
maxv = masked_increment (valv, cst2v, sgnbit, prec);
- if (double_int_equal_p (maxv, valv))
+ if (maxv == valv)
break;
}
- maxv = double_int_sub (maxv, double_int_one);
- maxv = double_int_ior (maxv, double_int_not (cst2v));
- maxv = double_int_zext (maxv, prec);
+ maxv -= double_int_one;
+ maxv |= ~cst2v;
+ maxv = maxv.zext (prec);
minv = sgnbit;
valid_p = true;
break;
@@ -4989,10 +4987,7 @@ register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi,
break;
}
if (valid_p
- && !double_int_equal_p (double_int_zext (double_int_sub (maxv,
- minv),
- prec),
- double_int_mask (prec)))
+ && (maxv - minv).zext (prec) != double_int::mask (prec))
{
tree tmp, new_val, type;
int i;
@@ -5008,12 +5003,11 @@ register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi,
type = build_nonstandard_integer_type (prec, 1);
tmp = build1 (NOP_EXPR, type, names[i]);
}
- if (!double_int_zero_p (minv))
+ if (!minv.is_zero ())
{
tmp = build2 (PLUS_EXPR, type, tmp,
- double_int_to_tree (type,
- double_int_neg (minv)));
- maxv2 = double_int_sub (maxv, minv);
+ double_int_to_tree (type, -minv));
+ maxv2 = maxv - minv;
}
new_val = double_int_to_tree (type, maxv2);
@@ -5947,20 +5941,16 @@ search_for_addr_array (tree t, location_t location)
return;
idx = mem_ref_offset (t);
- idx = double_int_sdiv (idx, tree_to_double_int (el_sz), TRUNC_DIV_EXPR);
- if (double_int_scmp (idx, double_int_zero) < 0)
+ idx = idx.sdiv (tree_to_double_int (el_sz), TRUNC_DIV_EXPR);
+ if (idx.slt (double_int_zero))
{
warning_at (location, OPT_Warray_bounds,
"array subscript is below array bounds");
TREE_NO_WARNING (t) = 1;
}
- else if (double_int_scmp (idx,
- double_int_add
- (double_int_add
- (tree_to_double_int (up_bound),
- double_int_neg
- (tree_to_double_int (low_bound))),
- double_int_one)) > 0)
+ else if (idx.sgt (tree_to_double_int (up_bound)
+ - tree_to_double_int (low_bound)
+ + double_int_one))
{
warning_at (location, OPT_Warray_bounds,
"array subscript is above array bounds");
@@ -8221,28 +8211,28 @@ simplify_bit_ops_using_ranges (gimple_stmt_iterator *gsi, gimple stmt)
switch (gimple_assign_rhs_code (stmt))
{
case BIT_AND_EXPR:
- mask = double_int_and_not (may_be_nonzero0, must_be_nonzero1);
- if (double_int_zero_p (mask))
+ mask = may_be_nonzero0.and_not (must_be_nonzero1);
+ if (mask.is_zero ())
{
op = op0;
break;
}
- mask = double_int_and_not (may_be_nonzero1, must_be_nonzero0);
- if (double_int_zero_p (mask))
+ mask = may_be_nonzero1.and_not (must_be_nonzero0);
+ if (mask.is_zero ())
{
op = op1;
break;
}
break;
case BIT_IOR_EXPR:
- mask = double_int_and_not (may_be_nonzero0, must_be_nonzero1);
- if (double_int_zero_p (mask))
+ mask = may_be_nonzero0.and_not (must_be_nonzero1);
+ if (mask.is_zero ())
{
op = op1;
break;
}
- mask = double_int_and_not (may_be_nonzero1, must_be_nonzero0);
- if (double_int_zero_p (mask))
+ mask = may_be_nonzero1.and_not (must_be_nonzero0);
+ if (mask.is_zero ())
{
op = op0;
break;
@@ -8503,7 +8493,7 @@ simplify_switch_using_ranges (gimple stmt)
/* And queue an update for the stmt. */
su.stmt = stmt;
su.vec = vec2;
- VEC_safe_push (switch_update, heap, to_update_switch_stmts, &su);
+ VEC_safe_push (switch_update, heap, to_update_switch_stmts, su);
return false;
}
@@ -8549,42 +8539,34 @@ simplify_conversion_using_ranges (gimple stmt)
/* If the first conversion is not injective, the second must not
be widening. */
- if (double_int_cmp (double_int_sub (innermax, innermin),
- double_int_mask (middle_prec), true) > 0
+ if ((innermax - innermin).ugt (double_int::mask (middle_prec))
&& middle_prec < final_prec)
return false;
/* We also want a medium value so that we can track the effect that
narrowing conversions with sign change have. */
inner_unsigned_p = TYPE_UNSIGNED (TREE_TYPE (innerop));
if (inner_unsigned_p)
- innermed = double_int_rshift (double_int_mask (inner_prec),
- 1, inner_prec, false);
+ innermed = double_int::mask (inner_prec).lrshift (1, inner_prec);
else
innermed = double_int_zero;
- if (double_int_cmp (innermin, innermed, inner_unsigned_p) >= 0
- || double_int_cmp (innermed, innermax, inner_unsigned_p) >= 0)
+ if (innermin.cmp (innermed, inner_unsigned_p) >= 0
+ || innermed.cmp (innermax, inner_unsigned_p) >= 0)
innermed = innermin;
middle_unsigned_p = TYPE_UNSIGNED (TREE_TYPE (middleop));
- middlemin = double_int_ext (innermin, middle_prec, middle_unsigned_p);
- middlemed = double_int_ext (innermed, middle_prec, middle_unsigned_p);
- middlemax = double_int_ext (innermax, middle_prec, middle_unsigned_p);
+ middlemin = innermin.ext (middle_prec, middle_unsigned_p);
+ middlemed = innermed.ext (middle_prec, middle_unsigned_p);
+ middlemax = innermax.ext (middle_prec, middle_unsigned_p);
/* Require that the final conversion applied to both the original
and the intermediate range produces the same result. */
final_unsigned_p = TYPE_UNSIGNED (finaltype);
- if (!double_int_equal_p (double_int_ext (middlemin,
- final_prec, final_unsigned_p),
- double_int_ext (innermin,
- final_prec, final_unsigned_p))
- || !double_int_equal_p (double_int_ext (middlemed,
- final_prec, final_unsigned_p),
- double_int_ext (innermed,
- final_prec, final_unsigned_p))
- || !double_int_equal_p (double_int_ext (middlemax,
- final_prec, final_unsigned_p),
- double_int_ext (innermax,
- final_prec, final_unsigned_p)))
+ if (middlemin.ext (final_prec, final_unsigned_p)
+ != innermin.ext (final_prec, final_unsigned_p)
+ || middlemed.ext (final_prec, final_unsigned_p)
+ != innermed.ext (final_prec, final_unsigned_p)
+ || middlemax.ext (final_prec, final_unsigned_p)
+ != innermax.ext (final_prec, final_unsigned_p))
return false;
gimple_assign_set_rhs1 (stmt, innerop);
@@ -8629,11 +8611,11 @@ range_fits_type_p (value_range_t *vr, unsigned precision, bool unsigned_p)
/* Then we can perform the conversion on both ends and compare
the result for equality. */
- tem = double_int_ext (tree_to_double_int (vr->min), precision, unsigned_p);
- if (!double_int_equal_p (tree_to_double_int (vr->min), tem))
+ tem = tree_to_double_int (vr->min).ext (precision, unsigned_p);
+ if (tree_to_double_int (vr->min) != tem)
return false;
- tem = double_int_ext (tree_to_double_int (vr->max), precision, unsigned_p);
- if (!double_int_equal_p (tree_to_double_int (vr->max), tem))
+ tem = tree_to_double_int (vr->max).ext (precision, unsigned_p);
+ if (tree_to_double_int (vr->max) != tem)
return false;
return true;
diff --git a/gcc/tree.c b/gcc/tree.c
index 676e3bf3deb..162adda0d2c 100644
--- a/gcc/tree.c
+++ b/gcc/tree.c
@@ -1041,7 +1041,7 @@ build_int_cst (tree type, HOST_WIDE_INT low)
if (!type)
type = integer_type_node;
- return double_int_to_tree (type, shwi_to_double_int (low));
+ return double_int_to_tree (type, double_int::from_shwi (low));
}
/* Create an INT_CST node with a LOW value sign extended to TYPE. */
@@ -1051,7 +1051,7 @@ build_int_cst_type (tree type, HOST_WIDE_INT low)
{
gcc_assert (type);
- return double_int_to_tree (type, shwi_to_double_int (low));
+ return double_int_to_tree (type, double_int::from_shwi (low));
}
/* Constructs tree in type TYPE from with value given by CST. Signedness
@@ -1062,7 +1062,7 @@ double_int_to_tree (tree type, double_int cst)
{
bool sign_extended_type = !TYPE_UNSIGNED (type);
- cst = double_int_ext (cst, TYPE_PRECISION (type), !sign_extended_type);
+ cst = cst.ext (TYPE_PRECISION (type), !sign_extended_type);
return build_int_cst_wide (type, cst.low, cst.high);
}
@@ -1077,9 +1077,9 @@ double_int_fits_to_tree_p (const_tree type, double_int cst)
bool sign_extended_type = !TYPE_UNSIGNED (type);
double_int ext
- = double_int_ext (cst, TYPE_PRECISION (type), !sign_extended_type);
+ = cst.ext (TYPE_PRECISION (type), !sign_extended_type);
- return double_int_equal_p (cst, ext);
+ return cst == ext;
}
/* We force the double_int CST to the range of the type TYPE by sign or
@@ -1114,7 +1114,7 @@ force_fit_type_double (tree type, double_int cst, int overflowable,
|| (overflowable > 0 && sign_extended_type))
{
tree t = make_node (INTEGER_CST);
- TREE_INT_CST (t) = double_int_ext (cst, TYPE_PRECISION (type),
+ TREE_INT_CST (t) = cst.ext (TYPE_PRECISION (type),
!sign_extended_type);
TREE_TYPE (t) = type;
TREE_OVERFLOW (t) = 1;
@@ -1285,7 +1285,7 @@ build_low_bits_mask (tree type, unsigned bits)
/* Sign extended all-ones mask. */
mask = double_int_minus_one;
else
- mask = double_int_mask (bits);
+ mask = double_int::mask (bits);
return build_int_cst_wide (type, mask.low, mask.high);
}
@@ -1443,12 +1443,10 @@ tree
build_constructor_single (tree type, tree index, tree value)
{
VEC(constructor_elt,gc) *v;
- constructor_elt *elt;
+ constructor_elt elt = {index, value};
v = VEC_alloc (constructor_elt, gc, 1);
- elt = VEC_quick_push (constructor_elt, v, NULL);
- elt->index = index;
- elt->value = value;
+ VEC_quick_push (constructor_elt, v, elt);
return build_constructor (type, v);
}
@@ -1910,7 +1908,7 @@ int
fixed_zerop (const_tree expr)
{
return (TREE_CODE (expr) == FIXED_CST
- && double_int_zero_p (TREE_FIXED_CST (expr).data));
+ && TREE_FIXED_CST (expr).data.is_zero ());
}
/* Return the power of two represented by a tree node known to be a
@@ -4001,8 +3999,7 @@ double_int
mem_ref_offset (const_tree t)
{
tree toff = TREE_OPERAND (t, 1);
- return double_int_sext (tree_to_double_int (toff),
- TYPE_PRECISION (TREE_TYPE (toff)));
+ return tree_to_double_int (toff).sext (TYPE_PRECISION (TREE_TYPE (toff)));
}
/* Return the pointer-type relevant for TBAA purposes from the
@@ -6560,7 +6557,7 @@ HOST_WIDE_INT
size_low_cst (const_tree t)
{
double_int d = tree_to_double_int (t);
- return double_int_sext (d, TYPE_PRECISION (TREE_TYPE (t))).low;
+ return d.sext (TYPE_PRECISION (TREE_TYPE (t))).low;
}
/* Return the most significant (sign) bit of T. */
@@ -8298,15 +8295,15 @@ retry:
dd = tree_to_double_int (type_low_bound);
if (unsc != TYPE_UNSIGNED (TREE_TYPE (type_low_bound)))
{
- int c_neg = (!unsc && double_int_negative_p (dc));
- int t_neg = (unsc && double_int_negative_p (dd));
+ int c_neg = (!unsc && dc.is_negative ());
+ int t_neg = (unsc && dd.is_negative ());
if (c_neg && !t_neg)
return false;
- if ((c_neg || !t_neg) && double_int_ucmp (dc, dd) < 0)
+ if ((c_neg || !t_neg) && dc.ult (dd))
return false;
}
- else if (double_int_cmp (dc, dd, unsc) < 0)
+ else if (dc.cmp (dd, unsc) < 0)
return false;
ok_for_low_bound = true;
}
@@ -8319,15 +8316,15 @@ retry:
dd = tree_to_double_int (type_high_bound);
if (unsc != TYPE_UNSIGNED (TREE_TYPE (type_high_bound)))
{
- int c_neg = (!unsc && double_int_negative_p (dc));
- int t_neg = (unsc && double_int_negative_p (dd));
+ int c_neg = (!unsc && dc.is_negative ());
+ int t_neg = (unsc && dd.is_negative ());
if (t_neg && !c_neg)
return false;
- if ((t_neg || !c_neg) && double_int_ucmp (dc, dd) > 0)
+ if ((t_neg || !c_neg) && dc.ugt (dd))
return false;
}
- else if (double_int_cmp (dc, dd, unsc) > 0)
+ else if (dc.cmp (dd, unsc) > 0)
return false;
ok_for_high_bound = true;
}
@@ -8341,7 +8338,7 @@ retry:
/* Perform some generic filtering which may allow making a decision
even if the bounds are not constant. First, negative integers
never fit in unsigned types, */
- if (TYPE_UNSIGNED (type) && !unsc && double_int_negative_p (dc))
+ if (TYPE_UNSIGNED (type) && !unsc && dc.is_negative ())
return false;
/* Second, narrower types always fit in wider ones. */
@@ -8396,9 +8393,8 @@ get_type_static_bounds (const_tree type, mpz_t min, mpz_t max)
else
{
double_int mn;
- mn = double_int_mask (TYPE_PRECISION (type) - 1);
- mn = double_int_sext (double_int_add (mn, double_int_one),
- TYPE_PRECISION (type));
+ mn = double_int::mask (TYPE_PRECISION (type) - 1);
+ mn = (mn + double_int_one).sext (TYPE_PRECISION (type));
mpz_set_double_int (min, mn, false);
}
}
@@ -8410,10 +8406,10 @@ get_type_static_bounds (const_tree type, mpz_t min, mpz_t max)
else
{
if (TYPE_UNSIGNED (type))
- mpz_set_double_int (max, double_int_mask (TYPE_PRECISION (type)),
+ mpz_set_double_int (max, double_int::mask (TYPE_PRECISION (type)),
true);
else
- mpz_set_double_int (max, double_int_mask (TYPE_PRECISION (type) - 1),
+ mpz_set_double_int (max, double_int::mask (TYPE_PRECISION (type) - 1),
true);
}
}
diff --git a/gcc/tree.h b/gcc/tree.h
index 3a8b058568d..a5d4d771b1d 100644
--- a/gcc/tree.h
+++ b/gcc/tree.h
@@ -602,9 +602,6 @@ struct GTY(()) tree_base {
TYPE_REF_CAN_ALIAS_ALL in
POINTER_TYPE, REFERENCE_TYPE
- MOVE_NONTEMPORAL in
- MODIFY_EXPR
-
CASE_HIGH_SEEN in
CASE_LABEL_EXPR
@@ -1239,10 +1236,6 @@ extern void omp_clause_range_check_failed (const_tree, const char *, int,
#define TYPE_REF_CAN_ALIAS_ALL(NODE) \
(PTR_OR_REF_CHECK (NODE)->base.static_flag)
-/* In a MODIFY_EXPR, means that the store in the expression is nontemporal. */
-#define MOVE_NONTEMPORAL(NODE) \
- (EXPR_CHECK (NODE)->base.static_flag)
-
/* In an INTEGER_CST, REAL_CST, COMPLEX_CST, or VECTOR_CST, this means
there was an overflow in folding. */
@@ -1630,9 +1623,8 @@ struct GTY(()) tree_vec {
/* Append a new constructor element to V, with the specified INDEX and VAL. */
#define CONSTRUCTOR_APPEND_ELT(V, INDEX, VALUE) \
do { \
- constructor_elt *_ce___ = VEC_safe_push (constructor_elt, gc, V, NULL); \
- _ce___->index = INDEX; \
- _ce___->value = VALUE; \
+ constructor_elt _ce___ = {INDEX, VALUE}; \
+ VEC_safe_push (constructor_elt, gc, V, _ce___); \
} while (0)
/* True if NODE, a FIELD_DECL, is to be processed as a bitfield for
@@ -4871,7 +4863,7 @@ extern tree force_fit_type_double (tree, double_int, int, bool);
static inline tree
build_int_cstu (tree type, unsigned HOST_WIDE_INT cst)
{
- return double_int_to_tree (type, uhwi_to_double_int (cst));
+ return double_int_to_tree (type, double_int::from_uhwi (cst));
}
extern tree build_int_cst (tree, HOST_WIDE_INT);
@@ -6277,7 +6269,6 @@ extern bool parse_input_constraint (const char **, int, int, int, int,
const char * const *, bool *, bool *);
extern void expand_asm_stmt (gimple);
extern tree resolve_asm_operand_names (tree, tree, tree, tree);
-extern void expand_case (gimple);
#ifdef HARD_CONST
/* Silly ifdef to avoid having all includers depend on hard-reg-set.h. */
extern tree tree_overlaps_hard_reg_set (tree, HARD_REG_SET *);
diff --git a/gcc/var-tracking.c b/gcc/var-tracking.c
index 818fb2456b5..9f5bc126114 100644
--- a/gcc/var-tracking.c
+++ b/gcc/var-tracking.c
@@ -5510,7 +5510,7 @@ add_uses (rtx *ploc, void *data)
if (dump_file && (dump_flags & TDF_DETAILS))
log_op_type (mo.u.loc, cui->bb, cui->insn, mo.type, dump_file);
- VEC_safe_push (micro_operation, heap, VTI (bb)->mos, &mo);
+ VEC_safe_push (micro_operation, heap, VTI (bb)->mos, mo);
}
return 0;
@@ -5794,7 +5794,7 @@ add_stores (rtx loc, const_rtx expr, void *cuip)
if (dump_file && (dump_flags & TDF_DETAILS))
log_op_type (moa.u.loc, cui->bb, cui->insn,
moa.type, dump_file);
- VEC_safe_push (micro_operation, heap, VTI (bb)->mos, &moa);
+ VEC_safe_push (micro_operation, heap, VTI (bb)->mos, moa);
}
resolve = false;
@@ -5881,7 +5881,7 @@ add_stores (rtx loc, const_rtx expr, void *cuip)
log_and_return:
if (dump_file && (dump_flags & TDF_DETAILS))
log_op_type (mo.u.loc, cui->bb, cui->insn, mo.type, dump_file);
- VEC_safe_push (micro_operation, heap, VTI (bb)->mos, &mo);
+ VEC_safe_push (micro_operation, heap, VTI (bb)->mos, mo);
}
/* Arguments to the call. */
@@ -6300,7 +6300,7 @@ add_with_sets (rtx insn, struct cselib_set *sets, int n_sets)
if (dump_file && (dump_flags & TDF_DETAILS))
log_op_type (PATTERN (insn), bb, insn, mo.type, dump_file);
- VEC_safe_push (micro_operation, heap, VTI (bb)->mos, &mo);
+ VEC_safe_push (micro_operation, heap, VTI (bb)->mos, mo);
}
n1 = VEC_length (micro_operation, VTI (bb)->mos);
@@ -7864,7 +7864,9 @@ loc_exp_insert_dep (variable var, rtx x, htab_t vars)
led = (loc_exp_dep *) pool_alloc (loc_exp_dep_pool);
else
{
- VEC_quick_push (loc_exp_dep, VAR_LOC_DEP_VEC (var), NULL);
+ loc_exp_dep empty;
+ memset (&empty, 0, sizeof (empty));
+ VEC_quick_push (loc_exp_dep, VAR_LOC_DEP_VEC (var), empty);
led = &VEC_last (loc_exp_dep, VAR_LOC_DEP_VEC (var));
}
led->dv = var->dv;
@@ -9354,13 +9356,13 @@ vt_add_function_parameter (tree parm)
&& HARD_REGISTER_P (incoming)
&& OUTGOING_REGNO (REGNO (incoming)) != REGNO (incoming))
{
- parm_reg_t *p
- = VEC_safe_push (parm_reg_t, gc, windowed_parm_regs, NULL);
- p->incoming = incoming;
+ parm_reg_t p;
+ p.incoming = incoming;
incoming
= gen_rtx_REG_offset (incoming, GET_MODE (incoming),
OUTGOING_REGNO (REGNO (incoming)), 0);
- p->outgoing = incoming;
+ p.outgoing = incoming;
+ VEC_safe_push (parm_reg_t, gc, windowed_parm_regs, p);
}
else if (MEM_P (incoming)
&& REG_P (XEXP (incoming, 0))
@@ -9369,11 +9371,11 @@ vt_add_function_parameter (tree parm)
rtx reg = XEXP (incoming, 0);
if (OUTGOING_REGNO (REGNO (reg)) != REGNO (reg))
{
- parm_reg_t *p
- = VEC_safe_push (parm_reg_t, gc, windowed_parm_regs, NULL);
- p->incoming = reg;
+ parm_reg_t p;
+ p.incoming = reg;
reg = gen_raw_REG (GET_MODE (reg), OUTGOING_REGNO (REGNO (reg)));
- p->outgoing = reg;
+ p.outgoing = reg;
+ VEC_safe_push (parm_reg_t, gc, windowed_parm_regs, p);
incoming = replace_equiv_address_nv (incoming, reg);
}
}
@@ -9815,7 +9817,7 @@ vt_initialize (void)
log_op_type (PATTERN (insn), bb, insn,
MO_ADJUST, dump_file);
VEC_safe_push (micro_operation, heap, VTI (bb)->mos,
- &mo);
+ mo);
VTI (bb)->out.stack_adjust += pre;
}
}
@@ -9847,7 +9849,7 @@ vt_initialize (void)
log_op_type (PATTERN (insn), bb, insn,
MO_ADJUST, dump_file);
VEC_safe_push (micro_operation, heap, VTI (bb)->mos,
- &mo);
+ mo);
VTI (bb)->out.stack_adjust += post;
}
diff --git a/gcc/varasm.c b/gcc/varasm.c
index c40502df74e..ecafb3b411f 100644
--- a/gcc/varasm.c
+++ b/gcc/varasm.c
@@ -3002,9 +3002,8 @@ copy_constant (tree exp)
CONSTRUCTOR_ELTS (exp)));
FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (exp), idx, purpose, value)
{
- constructor_elt *ce = VEC_quick_push (constructor_elt, v, NULL);
- ce->index = purpose;
- ce->value = copy_constant (value);
+ constructor_elt ce = {purpose, copy_constant (value)};
+ VEC_quick_push (constructor_elt, v, ce);
}
CONSTRUCTOR_ELTS (copy) = v;
return copy;
@@ -4652,14 +4651,13 @@ array_size_for_constructor (tree val)
/* Compute the total number of array elements. */
tmp = TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (val)));
- i = double_int_sub (tree_to_double_int (max_index), tree_to_double_int (tmp));
- i = double_int_add (i, double_int_one);
+ i = tree_to_double_int (max_index) - tree_to_double_int (tmp);
+ i += double_int_one;
/* Multiply by the array element unit size to find number of bytes. */
- i = double_int_mul (i, tree_to_double_int
- (TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (val)))));
+ i *= tree_to_double_int (TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (val))));
- gcc_assert (double_int_fits_in_uhwi_p (i));
+ gcc_assert (i.fits_uhwi ());
return i.low;
}
@@ -4743,9 +4741,9 @@ output_constructor_regular_field (oc_local_state *local)
sign-extend the result because Ada has negative DECL_FIELD_OFFSETs
but we are using an unsigned sizetype. */
unsigned prec = TYPE_PRECISION (sizetype);
- double_int idx = double_int_sub (tree_to_double_int (local->index),
- tree_to_double_int (local->min_index));
- idx = double_int_sext (idx, prec);
+ double_int idx = tree_to_double_int (local->index)
+ - tree_to_double_int (local->min_index);
+ idx = idx.sext (prec);
fieldpos = (tree_low_cst (TYPE_SIZE_UNIT (TREE_TYPE (local->val)), 1)
* idx.low);
}
@@ -5567,9 +5565,8 @@ assemble_alias (tree decl, tree target)
do_assemble_alias (decl, target);
else
{
- alias_pair *p = VEC_safe_push (alias_pair, gc, alias_pairs, NULL);
- p->decl = decl;
- p->target = target;
+ alias_pair p = {decl, target};
+ VEC_safe_push (alias_pair, gc, alias_pairs, p);
}
}
@@ -5632,14 +5629,9 @@ static int
dump_tm_clone_to_vec (void **slot, void *info)
{
struct tree_map *map = (struct tree_map *) *slot;
- VEC(tm_alias_pair,heap) **tm_alias_pairs
- = (VEC(tm_alias_pair, heap) **) info;
- tm_alias_pair *p;
-
- p = VEC_safe_push (tm_alias_pair, heap, *tm_alias_pairs, NULL);
- p->from = map->base.from;
- p->to = map->to;
- p->uid = DECL_UID (p->from);
+ VEC(tm_alias_pair,heap) **tm_alias_pairs = (VEC(tm_alias_pair, heap) **) info;
+ tm_alias_pair p = {DECL_UID (map->base.from), map->base.from, map->to};
+ VEC_safe_push (tm_alias_pair, heap, *tm_alias_pairs, p);
return 1;
}
diff --git a/gcc/vec.h b/gcc/vec.h
index fbf95d22682..8858f6afea1 100644
--- a/gcc/vec.h
+++ b/gcc/vec.h
@@ -31,23 +31,6 @@ along with GCC; see the file COPYING3. If not see
sometimes backed by out-of-line generic functions. The vectors are
designed to interoperate with the GTY machinery.
- FIXME - Remove the following compatibility notes after a handler
- class for vec_t is implemented.
-
- To preserve compatibility with the existing API, some functions
- that manipulate vector elements implement two overloads: one taking
- a pointer to the element and others that take a pointer to a
- pointer to the element.
-
- This used to be implemented with three different families of macros
- and structures: structure objects, scalar objects and of pointers.
- Both the structure object and pointer variants passed pointers to
- objects around -- in the former case the pointers were stored into
- the vector and in the latter case the pointers were dereferenced and
- the objects copied into the vector. The scalar object variant was
- suitable for int-like objects, and the vector elements were returned
- by value.
-
There are both 'index' and 'iterate' accessors. The index accessor
is implemented by operator[]. The iterator returns a boolean
iteration condition and updates the iteration variable passed by
@@ -124,7 +107,6 @@ along with GCC; see the file COPYING3. If not see
VEC_safe_push(tree,gc,s->v,decl); // append some decl onto the end
for (ix = 0; VEC_iterate(tree,s->v,ix,elt); ix++)
{ do something with elt }
-
*/
#if ENABLE_CHECKING
@@ -178,19 +160,15 @@ struct GTY(()) vec_t
bool space (int VEC_CHECK_DECL);
void splice (vec_t<T> * VEC_CHECK_DECL);
- T &quick_push (T VEC_CHECK_DECL);
- T *quick_push (const T * VEC_CHECK_DECL);
+ T *quick_push (const T & VEC_CHECK_DECL);
T &pop (ALONE_VEC_CHECK_DECL);
void truncate (unsigned VEC_CHECK_DECL);
- void replace (unsigned, T VEC_CHECK_DECL);
- void quick_insert (unsigned, T VEC_CHECK_DECL);
- void quick_insert (unsigned, const T * VEC_CHECK_DECL);
+ void replace (unsigned, const T & VEC_CHECK_DECL);
+ void quick_insert (unsigned, const T & VEC_CHECK_DECL);
void ordered_remove (unsigned VEC_CHECK_DECL);
void unordered_remove (unsigned VEC_CHECK_DECL);
void block_remove (unsigned, unsigned VEC_CHECK_DECL);
-
- unsigned lower_bound (T, bool (*)(T, T)) const;
- unsigned lower_bound (const T *, bool (*)(const T *, const T *)) const;
+ unsigned lower_bound (T, bool (*)(const T &, const T &)) const;
/* Class-static member functions. Some of these will become member
functions of a future handler class wrapping vec_t. */
@@ -221,10 +199,7 @@ struct GTY(()) vec_t
MEM_STAT_DECL);
template<enum vec_allocation_t A>
- static T &safe_push (vec_t<T> **, T VEC_CHECK_DECL MEM_STAT_DECL);
-
- template<enum vec_allocation_t A>
- static T *safe_push (vec_t<T> **, const T * VEC_CHECK_DECL MEM_STAT_DECL);
+ static T *safe_push (vec_t<T> **, const T & VEC_CHECK_DECL MEM_STAT_DECL);
template<enum vec_allocation_t A>
static void safe_grow (vec_t<T> **, int VEC_CHECK_DECL MEM_STAT_DECL);
@@ -233,11 +208,7 @@ struct GTY(()) vec_t
static void safe_grow_cleared (vec_t<T> **, int VEC_CHECK_DECL MEM_STAT_DECL);
template<enum vec_allocation_t A>
- static void safe_insert (vec_t<T> **, unsigned, T * VEC_CHECK_DECL
- MEM_STAT_DECL);
-
- template<enum vec_allocation_t A>
- static void safe_insert (vec_t<T> **, unsigned, T obj VEC_CHECK_DECL
+ static void safe_insert (vec_t<T> **, unsigned, const T & VEC_CHECK_DECL
MEM_STAT_DECL);
static bool iterate (const vec_t<T> *, unsigned, T *);
@@ -802,63 +773,32 @@ vec_t<T>::safe_splice (vec_t<T> **dst, vec_t<T> *src VEC_CHECK_DECL
}
-/* Push OBJ (a new element) onto the end, returns a reference to the slot
- filled in. There must be sufficient space in the vector. */
+/* Push OBJ (a new element) onto the end of the vector. There must be
+ sufficient space in the vector. Return a pointer to the slot
+ where OBJ was inserted. */
-template<typename T>
-T &
-vec_t<T>::quick_push (T obj VEC_CHECK_DECL)
-{
- VEC_ASSERT (prefix_.num_ < prefix_.alloc_, "push", T, base);
- vec_[prefix_.num_] = obj;
- T &val = vec_[prefix_.num_];
- prefix_.num_++;
- return val;
-}
-
-
-/* Push PTR (a new pointer to an element) onto the end, returns a
- pointer to the slot filled in. The new value can be NULL, in which
- case NO initialization is performed. There must be sufficient
- space in the vector. */
template<typename T>
T *
-vec_t<T>::quick_push (const T *ptr VEC_CHECK_DECL)
+vec_t<T>::quick_push (const T &obj VEC_CHECK_DECL)
{
VEC_ASSERT (prefix_.num_ < prefix_.alloc_, "push", T, base);
T *slot = &vec_[prefix_.num_++];
- if (ptr)
- *slot = *ptr;
+ *slot = obj;
return slot;
}
-/* Push a new element OBJ onto the end of VEC. Returns a reference to
- the slot filled in. Reallocates V, if needed. */
-
-template<typename T>
-template<enum vec_allocation_t A>
-T &
-vec_t<T>::safe_push (vec_t<T> **vec, T obj VEC_CHECK_DECL MEM_STAT_DECL)
-{
- reserve<A> (vec, 1 VEC_CHECK_PASS PASS_MEM_STAT);
- return (*vec)->quick_push (obj VEC_CHECK_PASS);
-}
-
-
-/* Push a pointer PTR to a new element onto the end of VEC. Returns a
- pointer to the slot filled in. For object vectors, the new value
- can be NULL, in which case NO initialization is performed.
- Reallocates VEC, if needed. */
+/* Push a new element OBJ onto the end of VEC. Reallocates VEC, if
+ needed. Return a pointer to the slot where OBJ was inserted. */
template<typename T>
template<enum vec_allocation_t A>
T *
-vec_t<T>::safe_push (vec_t<T> **vec, const T *ptr VEC_CHECK_DECL MEM_STAT_DECL)
+vec_t<T>::safe_push (vec_t<T> **vec, const T &obj VEC_CHECK_DECL MEM_STAT_DECL)
{
reserve<A> (vec, 1 VEC_CHECK_PASS PASS_MEM_STAT);
- return (*vec)->quick_push (ptr VEC_CHECK_PASS);
+ return (*vec)->quick_push (obj VEC_CHECK_PASS);
}
@@ -923,7 +863,7 @@ vec_t<T>::safe_grow_cleared (vec_t<T> **vec, int size VEC_CHECK_DECL
template<typename T>
void
-vec_t<T>::replace (unsigned ix, T obj VEC_CHECK_DECL)
+vec_t<T>::replace (unsigned ix, const T &obj VEC_CHECK_DECL)
{
VEC_ASSERT (ix < prefix_.num_, "replace", T, base);
vec_[ix] = obj;
@@ -935,7 +875,7 @@ vec_t<T>::replace (unsigned ix, T obj VEC_CHECK_DECL)
template<typename T>
void
-vec_t<T>::quick_insert (unsigned ix, T obj VEC_CHECK_DECL)
+vec_t<T>::quick_insert (unsigned ix, const T &obj VEC_CHECK_DECL)
{
VEC_ASSERT (prefix_.num_ < prefix_.alloc_, "insert", T, base);
VEC_ASSERT (ix <= prefix_.num_, "insert", T, base);
@@ -945,30 +885,13 @@ vec_t<T>::quick_insert (unsigned ix, T obj VEC_CHECK_DECL)
}
-/* Insert an element, *PTR, at the IXth position of V. The new value
- can be NULL, in which case no initialization of the inserted slot
- takes place. There must be sufficient space. */
-
-template<typename T>
-void
-vec_t<T>::quick_insert (unsigned ix, const T *ptr VEC_CHECK_DECL)
-{
- VEC_ASSERT (prefix_.num_ < prefix_.alloc_, "insert", T, base);
- VEC_ASSERT (ix <= prefix_.num_, "insert", T, base);
- T *slot = &vec_[ix];
- memmove (slot + 1, slot, (prefix_.num_++ - ix) * sizeof (T));
- if (ptr)
- *slot = *ptr;
-}
-
-
-/* Insert an element, VAL, at the IXth position of VEC. Reallocate
+/* Insert an element, OBJ, at the IXth position of VEC. Reallocate
VEC, if necessary. */
template<typename T>
template<enum vec_allocation_t A>
void
-vec_t<T>::safe_insert (vec_t<T> **vec, unsigned ix, T obj VEC_CHECK_DECL
+vec_t<T>::safe_insert (vec_t<T> **vec, unsigned ix, const T &obj VEC_CHECK_DECL
MEM_STAT_DECL)
{
reserve<A> (vec, 1 VEC_CHECK_PASS PASS_MEM_STAT);
@@ -976,22 +899,6 @@ vec_t<T>::safe_insert (vec_t<T> **vec, unsigned ix, T obj VEC_CHECK_DECL
}
-/* Insert an element, *PTR, at the IXth position of VEC. Return a pointer
- to the slot created. For vectors of object, the new value can be
- NULL, in which case no initialization of the inserted slot takes
- place. Reallocate V, if necessary. */
-
-template<typename T>
-template<enum vec_allocation_t A>
-void
-vec_t<T>::safe_insert (vec_t<T> **vec, unsigned ix, T *ptr VEC_CHECK_DECL
- MEM_STAT_DECL)
-{
- reserve<A> (vec, 1 VEC_CHECK_PASS PASS_MEM_STAT);
- (*vec)->quick_insert (ix, ptr VEC_CHECK_PASS);
-}
-
-
/* Remove an element from the IXth position of this vector. Ordering of
remaining elements is preserved. This is an O(N) operation due to
a memmove. */
@@ -1043,14 +950,14 @@ vec_t<T>::block_remove (unsigned ix, unsigned len VEC_CHECK_DECL)
template<typename T>
unsigned
-vec_t<T>::lower_bound (T obj, bool (*lessthan)(T, T)) const
+vec_t<T>::lower_bound (T obj, bool (*lessthan)(const T &, const T &)) const
{
unsigned int len = VEC_length (T, this);
unsigned int half, middle;
unsigned int first = 0;
while (len > 0)
{
- half = len >> 1;
+ half = len / 2;
middle = first;
middle += half;
T middle_elem = (*this)[middle];
@@ -1067,38 +974,6 @@ vec_t<T>::lower_bound (T obj, bool (*lessthan)(T, T)) const
}
-/* Find and return the first position in which *PTR could be inserted
- without changing the ordering of this vector. LESSTHAN is a
- function that returns true if the first argument is strictly less
- than the second. */
-
-template<typename T>
-unsigned
-vec_t<T>::lower_bound (const T *ptr,
- bool (*lessthan)(const T *, const T *)) const
-{
- unsigned int len = VEC_length (T, this);
- unsigned int half, middle;
- unsigned int first = 0;
- while (len > 0)
- {
- half = len >> 1;
- middle = first;
- middle += half;
- const T *middle_elem = &(*this)[middle];
- if (lessthan (middle_elem, ptr))
- {
- first = middle;
- ++first;
- len = len - half - 1;
- }
- else
- len = half;
- }
- return first;
-}
-
-
void *vec_heap_o_reserve_1 (void *, int, size_t, size_t, bool MEM_STAT_DECL);
void *vec_gc_o_reserve_1 (void *, int, size_t, size_t, bool MEM_STAT_DECL);