summaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
authorbstarynk <bstarynk@138bc75d-0d04-0410-961f-82ee72b054a4>2008-06-06 20:39:46 +0000
committerbstarynk <bstarynk@138bc75d-0d04-0410-961f-82ee72b054a4>2008-06-06 20:39:46 +0000
commite97da9acca05b56be99bfd9abd8b83e5db5268fb (patch)
treefcab715a6c79d738a81d753254714e02889ad105 /gcc
parent76d6e5e429f5ca1ba89341e4fd3cbd5f4a6f055f (diff)
downloadgcc-e97da9acca05b56be99bfd9abd8b83e5db5268fb.tar.gz
2008-06-06 Basile Starynkevitch <basile@starynkevitch.net>
MELT branch merged with trunk r136492 git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/branches/melt-branch@136504 138bc75d-0d04-0410-961f-82ee72b054a4
Diffstat (limited to 'gcc')
-rw-r--r--gcc/ChangeLog550
-rw-r--r--gcc/ChangeLog.melt3
-rw-r--r--gcc/DATESTAMP2
-rw-r--r--gcc/Makefile.in5
-rw-r--r--gcc/auto-inc-dec.c4
-rw-r--r--gcc/bb-reorder.c4
-rw-r--r--gcc/bitmap.c16
-rw-r--r--gcc/builtin-types.def18
-rw-r--r--gcc/builtins.c10
-rw-r--r--gcc/c-common.c6
-rw-r--r--gcc/c-common.h1
-rw-r--r--gcc/c-cppbuiltin.c6
-rw-r--r--gcc/c-omp.c315
-rw-r--r--gcc/c-parser.c395
-rw-r--r--gcc/c-pragma.c2
-rw-r--r--gcc/c-pragma.h8
-rw-r--r--gcc/c-pretty-print.c4
-rw-r--r--gcc/c-tree.h2
-rw-r--r--gcc/c-typeck.c34
-rw-r--r--gcc/cfgcleanup.c6
-rw-r--r--gcc/cfgexpand.c4
-rw-r--r--gcc/cfghooks.c5
-rw-r--r--gcc/cfglayout.c2
-rw-r--r--gcc/cfgloopmanip.c9
-rw-r--r--gcc/cgraph.c6
-rw-r--r--gcc/cgraphunit.c8
-rw-r--r--gcc/common.opt4
-rw-r--r--gcc/config.gcc2
-rw-r--r--gcc/config/avr/avr.h4
-rw-r--r--gcc/config/i386/i386.c2
-rw-r--r--gcc/config/i386/i386.md39
-rw-r--r--gcc/config/mips/mips.c16
-rw-r--r--gcc/config/mips/mips.h1
-rw-r--r--gcc/config/mips/mips.md3
-rw-r--r--gcc/config/mips/xlr.md89
-rw-r--r--gcc/config/rs6000/dfp.md10
-rw-r--r--gcc/config/rs6000/linux64.h3
-rw-r--r--gcc/config/rs6000/rs6000.c68
-rw-r--r--gcc/config/rs6000/rs6000.h8
-rw-r--r--gcc/config/rs6000/xcoff.h3
-rw-r--r--gcc/config/xtensa/lib2funcs.S4
-rw-r--r--gcc/coverage.c5
-rw-r--r--gcc/cp/ChangeLog67
-rw-r--r--gcc/cp/call.c2
-rw-r--r--gcc/cp/cp-gimplify.c111
-rw-r--r--gcc/cp/cp-objcp-common.h2
-rw-r--r--gcc/cp/cp-tree.h11
-rw-r--r--gcc/cp/decl.c2
-rw-r--r--gcc/cp/parser.c676
-rw-r--r--gcc/cp/pt.c255
-rw-r--r--gcc/cp/semantics.c691
-rw-r--r--gcc/cse.c5
-rw-r--r--gcc/dbxout.c4
-rw-r--r--gcc/df-byte-scan.c4
-rw-r--r--gcc/df-core.c11
-rw-r--r--gcc/df-problems.c7
-rw-r--r--gcc/df-scan.c4
-rw-r--r--gcc/dfp.c4
-rw-r--r--gcc/doc/invoke.texi16
-rw-r--r--gcc/dominance.c7
-rw-r--r--gcc/domwalk.c5
-rw-r--r--gcc/dse.c4
-rw-r--r--gcc/dwarf2out.c2
-rw-r--r--gcc/emit-rtl.c8
-rw-r--r--gcc/et-forest.c5
-rw-r--r--gcc/except.c1
-rw-r--r--gcc/fold-const.c4
-rw-r--r--gcc/fortran/ChangeLog95
-rw-r--r--gcc/fortran/dump-parse-tree.c13
-rw-r--r--gcc/fortran/f95-lang.c8
-rw-r--r--gcc/fortran/gfortran.h15
-rw-r--r--gcc/fortran/intrinsic.texi2
-rw-r--r--gcc/fortran/match.h2
-rw-r--r--gcc/fortran/openmp.c178
-rw-r--r--gcc/fortran/parse.c23
-rw-r--r--gcc/fortran/resolve.c30
-rw-r--r--gcc/fortran/scanner.c18
-rw-r--r--gcc/fortran/st.c2
-rw-r--r--gcc/fortran/trans-openmp.c535
-rw-r--r--gcc/fortran/trans.c2
-rw-r--r--gcc/fortran/trans.h6
-rw-r--r--gcc/fortran/types.def21
-rw-r--r--gcc/function.c14
-rw-r--r--gcc/function.h4
-rw-r--r--gcc/gcc.c6
-rw-r--r--gcc/gcov-io.c6
-rw-r--r--gcc/gcov.c2
-rw-r--r--gcc/gcse.c4
-rw-r--r--gcc/genattrtab.c4
-rw-r--r--gcc/genautomata.c24
-rw-r--r--gcc/ggc-page.c2
-rw-r--r--gcc/ggc-zone.c2
-rw-r--r--gcc/gimple-low.c1
-rw-r--r--gcc/gimplify.c362
-rw-r--r--gcc/gthr-lynx.h4
-rw-r--r--gcc/haifa-sched.c6
-rw-r--r--gcc/hooks.c8
-rw-r--r--gcc/hooks.h1
-rw-r--r--gcc/hwint.h4
-rw-r--r--gcc/ipa-cp.c4
-rw-r--r--gcc/ipa-inline.c4
-rw-r--r--gcc/ipa-prop.h8
-rw-r--r--gcc/ipa-pure-const.c14
-rw-r--r--gcc/ipa-struct-reorg.c29
-rw-r--r--gcc/ipa-struct-reorg.h4
-rw-r--r--gcc/ipa-type-escape.c7
-rw-r--r--gcc/ipa.c5
-rw-r--r--gcc/langhooks-def.h8
-rw-r--r--gcc/langhooks.h14
-rw-r--r--gcc/loop-doloop.c5
-rw-r--r--gcc/matrix-reorg.c8
-rw-r--r--gcc/mips-tfile.c2
-rw-r--r--gcc/mkmap-flat.awk4
-rw-r--r--gcc/mkmap-symver.awk4
-rw-r--r--gcc/modulo-sched.c10
-rw-r--r--gcc/omega.c5
-rw-r--r--gcc/omp-builtins.def59
-rw-r--r--gcc/omp-low.c1941
-rw-r--r--gcc/optabs.c2
-rw-r--r--gcc/optabs.h6
-rw-r--r--gcc/opts.c10
-rw-r--r--gcc/passes.c14
-rw-r--r--gcc/postreload-gcse.c4
-rw-r--r--gcc/postreload.c4
-rw-r--r--gcc/predict.c4
-rw-r--r--gcc/pretty-print.h4
-rw-r--r--gcc/profile.c2
-rw-r--r--gcc/protoize.c5
-rw-r--r--gcc/ra-conflict.c4
-rw-r--r--gcc/real.c2
-rw-r--r--gcc/recog.c2
-rw-r--r--gcc/regclass.c2
-rw-r--r--gcc/regs.h5
-rw-r--r--gcc/reload.c4
-rw-r--r--gcc/rtl-error.c2
-rw-r--r--gcc/rtlanal.c4
-rw-r--r--gcc/scan.h5
-rw-r--r--gcc/sched-rgn.c8
-rw-r--r--gcc/see.c6
-rw-r--r--gcc/stmt.c4
-rw-r--r--gcc/target.h4
-rw-r--r--gcc/testsuite/ChangeLog53
-rw-r--r--gcc/testsuite/g++.dg/cdce3.C200
-rw-r--r--gcc/testsuite/g++.dg/gomp/for-16.C5
-rw-r--r--gcc/testsuite/g++.dg/gomp/pr27499.C2
-rw-r--r--gcc/testsuite/g++.dg/gomp/pr34607.C4
-rw-r--r--gcc/testsuite/g++.dg/gomp/predetermined-1.C33
-rw-r--r--gcc/testsuite/g++.dg/gomp/task-1.C17
-rw-r--r--gcc/testsuite/g++.dg/gomp/tls-3.C4
-rw-r--r--gcc/testsuite/g++.dg/gomp/tls-4.C16
-rw-r--r--gcc/testsuite/gcc.c-torture/execute/20080529-1.c17
-rw-r--r--gcc/testsuite/gcc.dg/cdce1.c80
-rw-r--r--gcc/testsuite/gcc.dg/cdce2.c55
-rw-r--r--gcc/testsuite/gcc.dg/gomp/appendix-a/a.35.4.c2
-rw-r--r--gcc/testsuite/gcc.dg/gomp/appendix-a/a.35.6.c2
-rw-r--r--gcc/testsuite/gcc.dg/gomp/collapse-1.c92
-rw-r--r--gcc/testsuite/gcc.dg/gomp/nesting-1.c198
-rw-r--r--gcc/testsuite/gcc.dg/gomp/pr27499.c2
-rw-r--r--gcc/testsuite/gcc.dg/tree-ssa/alias-18.c4
-rw-r--r--gcc/testsuite/gcc.target/i386/pr36438.c19
-rw-r--r--gcc/testsuite/gcc.target/powerpc/dfp-dd.c2
-rw-r--r--gcc/testsuite/gcc.target/powerpc/dfp-td.c2
-rw-r--r--gcc/testsuite/gcc.target/powerpc/ppc32-abi-dfp-1.c2
-rw-r--r--gcc/testsuite/gcc.target/powerpc/ppu-intrinsics.c2
-rw-r--r--gcc/testsuite/gfortran.dg/gomp/appendix-a/a.33.4.f902
-rw-r--r--gcc/testsuite/gfortran.dg/gomp/appendix-a/a.35.4.f902
-rw-r--r--gcc/testsuite/gfortran.dg/gomp/appendix-a/a.35.6.f902
-rw-r--r--gcc/testsuite/gfortran.dg/gomp/collapse1.f9057
-rw-r--r--gcc/testsuite/gfortran.dg/gomp/omp_parse1.f904
-rw-r--r--gcc/testsuite/gfortran.dg/gomp/reduction1.f902
-rw-r--r--gcc/testsuite/gfortran.dg/gomp/sharing-3.f9037
-rw-r--r--gcc/testsuite/gfortran.dg/proc_decl_2.f9020
-rw-r--r--gcc/testsuite/lib/target-supports.exp6
-rw-r--r--gcc/timevar.def1
-rw-r--r--gcc/tree-call-cdce.c944
-rw-r--r--gcc/tree-cfg.c6
-rw-r--r--gcc/tree-dfa.c11
-rw-r--r--gcc/tree-eh.c5
-rw-r--r--gcc/tree-flow-inline.h5
-rw-r--r--gcc/tree-gimple.c1
-rw-r--r--gcc/tree-inline.c5
-rw-r--r--gcc/tree-into-ssa.c6
-rw-r--r--gcc/tree-loop-distribution.c4
-rw-r--r--gcc/tree-nested.c195
-rw-r--r--gcc/tree-parloops.c29
-rw-r--r--gcc/tree-pass.h6
-rw-r--r--gcc/tree-pretty-print.c100
-rw-r--r--gcc/tree-profile.c2
-rw-r--r--gcc/tree-scalar-evolution.c7
-rw-r--r--gcc/tree-sra.c6
-rw-r--r--gcc/tree-ssa-alias-warnings.c4
-rw-r--r--gcc/tree-ssa-ccp.c4
-rw-r--r--gcc/tree-ssa-coalesce.c5
-rw-r--r--gcc/tree-ssa-dom.c8
-rw-r--r--gcc/tree-ssa-dse.c7
-rw-r--r--gcc/tree-ssa-forwprop.c2
-rw-r--r--gcc/tree-ssa-live.c5
-rw-r--r--gcc/tree-ssa-live.h4
-rw-r--r--gcc/tree-ssa-loop-im.c7
-rw-r--r--gcc/tree-ssa-loop-ivopts.c7
-rw-r--r--gcc/tree-ssa-loop-niter.c7
-rw-r--r--gcc/tree-ssa-loop-prefetch.c4
-rw-r--r--gcc/tree-ssa-operands.c26
-rw-r--r--gcc/tree-ssa-phiopt.c5
-rw-r--r--gcc/tree-ssa-phiprop.c4
-rw-r--r--gcc/tree-ssa-sccvn.c6
-rw-r--r--gcc/tree-ssa-structalias.c43
-rw-r--r--gcc/tree-ssa-ter.c19
-rw-r--r--gcc/tree-ssa-threadupdate.c7
-rw-r--r--gcc/tree-ssa.c5
-rw-r--r--gcc/tree-vect-analyze.c11
-rw-r--r--gcc/tree-vect-transform.c12
-rw-r--r--gcc/tree-vectorizer.c29
-rw-r--r--gcc/tree-vn.c5
-rw-r--r--gcc/tree-vrp.c6
-rw-r--r--gcc/tree.c27
-rw-r--r--gcc/tree.def27
-rw-r--r--gcc/tree.h51
-rw-r--r--gcc/unwind-dw2-fde.c4
-rw-r--r--gcc/unwind.inc4
-rw-r--r--gcc/value-prof.c5
-rw-r--r--gcc/vmsdbgout.c4
222 files changed, 8215 insertions, 1667 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index ed7c58c5c82..076cfe347c2 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,7 +1,539 @@
+2008-06-06 Uros Bizjak <ubizjak@gmail.com>
+
+ PR rtl-optimization/36438
+ * cse.c (fold_rtx) [ASHIFT, LSHIFTRT, ASHIFTRT]: Break out early
+ for vector shifts with constant scalar shift operands.
+
+2008-06-06 Sandip Matte <sandip@rmicorp.com>
+
+ * doc/invoke.texi: Document -march=xlr.
+ * config/mips/xlr.md: New file.
+ * config/mips/mips.md: Include it.
+ (cpu): Add "xlr".
+ * config/mips/mips.h (PROCESSOR_XLR): New processor_type.
+ * config/mips/mips.c (mips_cpu_info_table): Add an XLR entry.
+ (mips_rtx_cost_data): Likewise.
+
+2008-06-06 Nathan Froyd <froydnj@codesourcery.com>
+
+ * config/rs6000/rs6000.c (rs6000_mode_dependent_address): Remove
+ PRE_INC and PRE_DEC cases.
+
+2008-06-06 Jakub Jelinek <jakub@redhat.com>
+
+ PR rtl-optimization/36419
+ * except.c (expand_resx_expr): Call do_pending_stack_adjust () before
+ the emitting jump insn.
+
+ PR target/36362
+ * gimplify.c (gimplify_expr) <case TRUTH_NOT_EXPR>: If *expr_p type
+ is not bool, boolify the whole *expr_p and convert to the desired type.
+
+2008-06-06 Jakub Jelinek <jakub@redhat.com>
+
+ * c-cppbuiltin.c (c_cpp_builtins): Change _OPENMP value to 200805.
+ * langhooks.h (struct lang_hooks_for_decls): Add omp_finish_clause.
+ Add omp_private_outer_ref hook, add another argument to
+ omp_clause_default_ctor hook.
+ * langhooks-def.h (LANG_HOOKS_OMP_FINISH_CLAUSE): Define.
+ (LANG_HOOKS_OMP_PRIVATE_OUTER_REF): Define.
+ (LANG_HOOKS_OMP_CLAUSE_DEFAULT_CTOR): Change to
+ hook_tree_tree_tree_tree_null.
+ (LANG_HOOKS_DECLS): Add LANG_HOOKS_OMP_FINISH_CLAUSE and
+ LANG_HOOKS_OMP_PRIVATE_OUTER_REF.
+ * hooks.c (hook_tree_tree_tree_tree_null): New function.
+ * hooks.h (hook_tree_tree_tree_tree_null): New prototype.
+ * tree.def (OMP_TASK): New tree code.
+ * tree.h (OMP_TASK_COPYFN, OMP_TASK_ARG_SIZE, OMP_TASK_ARG_ALIGN,
+ OMP_CLAUSE_PRIVATE_OUTER_REF, OMP_CLAUSE_LASTPRIVATE_STMT,
+ OMP_CLAUSE_COLLAPSE_ITERVAR, OMP_CLAUSE_COLLAPSE_COUNT,
+ OMP_TASKREG_CHECK, OMP_TASKREG_BODY, OMP_TASKREG_CLAUSES,
+ OMP_TASKREG_FN, OMP_TASKREG_DATA_ARG, OMP_TASK_BODY,
+ OMP_TASK_CLAUSES, OMP_TASK_FN, OMP_TASK_DATA_ARG,
+ OMP_CLAUSE_COLLAPSE_EXPR): Define.
+ (enum omp_clause_default_kind): Add OMP_CLAUSE_DEFAULT_FIRSTPRIVATE.
+ (OMP_DIRECTIVE_P): Add OMP_TASK.
+ (OMP_CLAUSE_COLLAPSE, OMP_CLAUSE_UNTIED): New clause codes.
+ (OMP_CLAUSE_SCHEDULE_AUTO): New schedule kind.
+ * tree.c (omp_clause_code_name): Add OMP_CLAUSE_COLLAPSE
+ and OMP_CLAUSE_UNTIED entries.
+ (omp_clause_num_ops): Likewise. Increase OMP_CLAUSE_LASTPRIVATE
+ num_ops to 2.
+ (walk_tree_1): Handle OMP_CLAUSE_COLLAPSE and OMP_CLAUSE_UNTIED.
+ Walk OMP_CLAUSE_LASTPRIVATE_STMT.
+ * tree-pretty-print.c (dump_omp_clause): Handle
+ OMP_CLAUSE_SCHEDULE_AUTO, OMP_CLAUSE_UNTIED, OMP_CLAUSE_COLLAPSE,
+ OMP_CLAUSE_DEFAULT_FIRSTPRIVATE.
+ (dump_generic_node): Handle OMP_TASK and collapsed OMP_FOR loops.
+ * c-omp.c (c_finish_omp_for): Allow pointer iterators. Remove
+ warning about unsigned iterators. Change decl/init/cond/incr
+ arguments to TREE_VECs, check arguments for all collapsed loops.
+ (c_finish_omp_taskwait): New function.
+ (c_split_parallel_clauses): Put OMP_CLAUSE_COLLAPSE clause to
+ ws_clauses.
+ * c-parser.c (c_parser_omp_for_loop): Parse collapsed loops. Call
+ default_function_array_conversion on init. Add par_clauses argument.
+ If decl is present in parallel's lastprivate clause, change it to
+ shared and add lastprivate clause for decl to OMP_FOR_CLAUSES.
+ Add clauses argument, on success set OMP_FOR_CLAUSES to it. Look up
+ collapse count in clauses.
+ (c_parser_omp_for, c_parser_omp_parallel): Adjust
+ c_parser_omp_for_loop callers.
+ (OMP_FOR_CLAUSE_MASK): Add 1 << PRAGMA_OMP_CLAUSE_COLLAPSE.
+ (c_parser_pragma): Handle PRAGMA_OMP_TASKWAIT.
+ (c_parser_omp_clause_name): Handle collapse and untied clauses.
+ (c_parser_omp_clause_collapse, c_parser_omp_clause_untied): New
+ functions.
+ (c_parser_omp_clause_schedule): Handle schedule(auto).
+ Include correct location in the error message.
+ (c_parser_omp_all_clauses): Handle PRAGMA_OMP_CLAUSE_COLLAPSE
+ and PRAGMA_OMP_CLAUSE_UNTIED.
+ (OMP_TASK_CLAUSE_MASK): Define.
+ (c_parser_omp_task, c_parser_omp_taskwait): New functions.
+ (c_parser_omp_construct): Handle PRAGMA_OMP_TASK.
+ * tree-nested.c (convert_nonlocal_omp_clauses,
+ convert_local_omp_clauses): Handle OMP_CLAUSE_LASTPRIVATE_STMT,
+ OMP_CLAUSE_REDUCTION_INIT, OMP_CLAUSE_REDUCTION_MERGE,
+ OMP_CLAUSE_COLLAPSE and OMP_CLAUSE_UNTIED.
+ Don't handle TREE_STATIC or DECL_EXTERNAL VAR_DECLs in
+ OMP_CLAUSE_DECL.
+ (conver_nonlocal_reference, convert_local_reference,
+ convert_call_expr): Handle OMP_TASK the same as OMP_PARALLEL. Use
+ OMP_TASKREG_* macros rather than OMP_PARALLEL_*.
+ (walk_omp_for): Adjust for OMP_FOR_{INIT,COND,INCR} changes.
+ * tree-gimple.c (is_gimple_stmt): Handle OMP_TASK.
+ * c-tree.h (c_begin_omp_task, c_finish_omp_task): New prototypes.
+ * c-pragma.h (PRAGMA_OMP_TASK, PRAGMA_OMP_TASKWAIT): New.
+ (PRAGMA_OMP_CLAUSE_COLLAPSE, PRAGMA_OMP_CLAUSE_UNTIED): New.
+ * c-typeck.c (c_begin_omp_task, c_finish_omp_task): New functions.
+ (c_finish_omp_clauses): Handle OMP_CLAUSE_COLLAPSE and
+ OMP_CLAUSE_UNTIED.
+ * c-pragma.c (init_pragma): Init omp task and omp taskwait pragmas.
+ * c-common.h (c_finish_omp_taskwait): New prototype.
+ * gimple-low.c (lower_stmt): Handle OMP_TASK.
+ * tree-parloops.c (create_parallel_loop): Create 1 entry
+ vectors for OMP_FOR_{INIT,COND,INCR}.
+ * tree-cfg.c (remove_useless_stmts_1): Handle OMP_* containers.
+ (make_edges): Handle OMP_TASK.
+ * tree-ssa-operands.c (get_expr_operands): Handle collapsed OMP_FOR
+ loops, adjust for OMP_FOR_{INIT,COND,INCR} changes.
+ * tree-inline.c (estimate_num_insns_1): Handle OMP_TASK.
+ * builtin-types.def (BT_PTR_ULONGLONG, BT_PTR_FN_VOID_PTR_PTR,
+ BT_FN_BOOL_ULONGLONGPTR_ULONGLONGPTR,
+ BT_FN_BOOL_BOOL_ULL_ULL_ULL_ULLPTR_ULLPTR,
+ BT_FN_BOOL_BOOL_ULL_ULL_ULL_ULL_ULLPTR_ULLPTR,
+ BT_FN_VOID_OMPFN_PTR_OMPCPYFN_LONG_LONG_BOOL_UINT): New.
+ * omp-builtins.def (BUILT_IN_GOMP_TASK, BUILT_IN_GOMP_TASKWAIT,
+ BUILT_IN_GOMP_LOOP_ULL_STATIC_START,
+ BUILT_IN_GOMP_LOOP_ULL_DYNAMIC_START,
+ BUILT_IN_GOMP_LOOP_ULL_GUIDED_START,
+ BUILT_IN_GOMP_LOOP_ULL_RUNTIME_START,
+ BUILT_IN_GOMP_LOOP_ULL_ORDERED_STATIC_START,
+ BUILT_IN_GOMP_LOOP_ULL_ORDERED_DYNAMIC_START,
+ BUILT_IN_GOMP_LOOP_ULL_ORDERED_GUIDED_START,
+ BUILT_IN_GOMP_LOOP_ULL_ORDERED_RUNTIME_START,
+ BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT,
+ BUILT_IN_GOMP_LOOP_ULL_DYNAMIC_NEXT,
+ BUILT_IN_GOMP_LOOP_ULL_GUIDED_NEXT,
+ BUILT_IN_GOMP_LOOP_ULL_RUNTIME_NEXT,
+ BUILT_IN_GOMP_LOOP_ULL_ORDERED_STATIC_NEXT,
+ BUILT_IN_GOMP_LOOP_ULL_ORDERED_DYNAMIC_NEXT,
+ BUILT_IN_GOMP_LOOP_ULL_ORDERED_GUIDED_NEXT,
+ BUILT_IN_GOMP_LOOP_ULL_ORDERED_RUNTIME_NEXT): New builtins.
+ * gimplify.c (gimplify_omp_for): Allow pointer type for decl,
+ handle POINTER_PLUS_EXPR. If loop counter has been replaced and
+ original iterator is present in lastprivate clause or if
+ collapse > 1, set OMP_CLAUSE_LASTPRIVATE_STMT. Handle collapsed
+ OMP_FOR loops, adjust for OMP_FOR_{INIT,COND,INCR} changes.
+ (gimplify_expr): Handle OMP_SECTIONS_SWITCH and OMP_TASK.
+ (enum gimplify_omp_var_data): Add GOVD_PRIVATE_OUTER_REF.
+ (omp_notice_variable): Set GOVD_PRIVATE_OUTER_REF if needed,
+ if it is set, lookup var in outer contexts too. Handle
+ OMP_CLAUSE_DEFAULT_FIRSTPRIVATE. Handle vars that are supposed
+ to be implicitly determined firstprivate for task regions.
+ (gimplify_scan_omp_clauses): Set GOVD_PRIVATE_OUTER_REF if needed,
+ if it is set, lookup var in outer contexts too. Set
+ OMP_CLAUSE_PRIVATE_OUTER_REF if GOVD_PRIVATE_OUTER_REF is set.
+ Handle OMP_CLAUSE_LASTPRIVATE_STMT, OMP_CLAUSE_COLLAPSE and
+ OMP_CLAUSE_UNTIED. Take region_type as last argument
+ instead of in_parallel and in_combined_parallel.
+ (gimplify_omp_parallel, gimplify_omp_for, gimplify_omp_workshare):
+ Adjust callers.
+ (gimplify_adjust_omp_clauses_1): Set OMP_CLAUSE_PRIVATE_OUTER_REF if
+ GOVD_PRIVATE_OUTER_REF is set. Call omp_finish_clause langhook.
+ (new_omp_context): Set default_kind to
+ OMP_CLAUSE_DEFAULT_UNSPECIFIED for OMP_TASK regions.
+ (omp_region_type): New enum.
+ (struct gimplify_omp_ctx): Remove is_parallel and is_combined_parallel
+ fields, add region_type.
+ (new_omp_context): Take region_type as argument instead of is_parallel
+ and is_combined_parallel.
+ (gimple_add_tmp_var, omp_firstprivatize_variable, omp_notice_variable,
+ omp_is_private, omp_check_private): Adjust ctx->is_parallel and
+ ctx->is_combined_parallel checks.
+ (gimplify_omp_task): New function.
+ (gimplify_adjust_omp_clauses): Handle OMP_CLAUSE_COLLAPSE and
+ OMP_CLAUSE_UNTIED.
+ * omp-low.c (extract_omp_for_data): Use schedule(static)
+ for schedule(auto). Handle pointer and unsigned iterators.
+ Compute fd->iter_type. Handle POINTER_PLUS_EXPR increments.
+ Add loops argument. Extract data for collapsed OMP_FOR loops.
+ (expand_parallel_call): Assert sched_kind isn't auto,
+ map runtime schedule to index 3.
+ (struct omp_for_data_loop): New type.
+ (struct omp_for_data): Remove v, n1, n2, step, cond_code fields.
+ Add loop, loops, collapse and iter_type fields.
+ (workshare_safe_to_combine_p): Disallow combined for if
+ iter_type is unsigned long long. Don't combine collapse > 1 loops
+ unless all bounds and steps are constant. Adjust extract_omp_for_data
+ caller.
+ (expand_omp_for_generic): Handle pointer, unsigned and long long
+ iterators. Handle collapsed OMP_FOR loops. Adjust
+ for struct omp_for_data changes. If libgomp function doesn't return
+ boolean_type_node, add comparison of the return value with 0.
+ (expand_omp_for_static_nochunk, expand_omp_for_static_chunk): Handle
+ pointer, unsigned and long long iterators. Adjust for struct
+ omp_for_data changes.
+ (expand_omp_for): Assert sched_kind isn't auto, map runtime schedule
+ to index 3. Use GOMP_loop_ull*{start,next} if iter_type is
+ unsigned long long. Allocate loops array, pass it to
+ extract_omp_for_data. For collapse > 1 loops use always
+ expand_omp_for_generic.
+ (omp_context): Add sfield_map and srecord_type fields.
+ (is_task_ctx, lookup_sfield): New functions.
+ (use_pointer_for_field): Use is_task_ctx helper. Change first
+ argument's type from const_tree to tree. Clarify comment.
+ In OMP_TASK disallow copy-in/out sharing.
+ (build_sender_ref): Call lookup_sfield instead of lookup_field.
+ (install_var_field): Add mask argument. Populate both record_type
+ and srecord_type if needed.
+ (delete_omp_context): Destroy sfield_map, clear DECL_ABSTRACT_ORIGIN
+ in srecord_type.
+ (fixup_child_record_type): Also remap FIELD_DECL's DECL_SIZE{,_UNIT}
+ and DECL_FIELD_OFFSET.
+ (scan_sharing_clauses): Adjust install_var_field callers. For
+ firstprivate clauses on explicit tasks allocate the var by value in
+ record_type unconditionally, rather than by reference.
+ Handle OMP_CLAUSE_PRIVATE_OUTER_REF. Scan OMP_CLAUSE_LASTPRIVATE_STMT.
+ Use is_taskreg_ctx instead of is_parallel_ctx.
+ Handle OMP_CLAUSE_COLLAPSE and OMP_CLAUSE_UNTIED.
+ (create_omp_child_function_name): Add task_copy argument, use
+ *_omp_cpyfn* names if it is true.
+ (create_omp_child_function): Add task_copy argument, if true create
+ *_omp_cpyfn* helper function.
+ (scan_omp_parallel): Adjust create_omp_child_function callers.
+ Rename parallel_nesting_level to taskreg_nesting_level.
+ (scan_omp_task): New function.
+ (lower_rec_input_clauses): Don't run constructors for firstprivate
+ explicit task vars which are initialized by *_omp_cpyfn*.
+ Pass outer var ref to omp_clause_default_ctor hook if
+ OMP_CLAUSE_PRIVATE_OUTER_REF or OMP_CLAUSE_LASTPRIVATE.
+ Replace OMP_CLAUSE_REDUCTION_PLACEHOLDER decls in
+ OMP_CLAUSE_REDUCTION_INIT.
+ (lower_send_clauses): Clear DECL_ABSTRACT_ORIGIN if in task to
+ avoid duplicate setting of fields. Handle
+ OMP_CLAUSE_PRIVATE_OUTER_REF.
+ (lower_send_shared_vars): Use srecord_type if non-NULL. Don't
+ copy-out if TREE_READONLY, only copy-in.
+ (expand_task_copyfn): New function.
+ (expand_task_call): New function.
+ (struct omp_taskcopy_context): New type.
+ (task_copyfn_copy_decl, task_copyfn_remap_type, create_task_copyfn):
+ New functions.
+ (lower_omp_parallel): Rename to...
+ (lower_omp_taskreg): ... this. Use OMP_TASKREG_* macros where needed.
+ Call create_task_copyfn if srecord_type is needed. Adjust
+ sender_decl type.
+ (task_shared_vars): New variable.
+ (check_omp_nesting_restrictions): Warn if work-sharing,
+ barrier, master or ordered region is closely nested inside OMP_TASK.
+ Add warnings for barrier if closely nested inside of work-sharing,
+ ordered, or master region.
+ (scan_omp_1): Call check_omp_nesting_restrictions even for
+ GOMP_barrier calls. Rename parallel_nesting_level to
+ taskreg_nesting_level. Handle OMP_TASK.
+ (lower_lastprivate_clauses): Even if some lastprivate is found on a
+ work-sharing construct, continue looking for them on parent parallel
+ construct.
+ (lower_omp_for_lastprivate): Add lastprivate clauses
+ to the beginning of dlist rather than end. Adjust for struct
+ omp_for_data changes.
+ (lower_omp_for): Add rec input clauses before OMP_FOR_PRE_BODY,
+ not after it. Handle collapsed OMP_FOR loops, adjust for
+ OMP_FOR_{INIT,COND,INCR} changes, adjust extract_omp_for_data caller.
+ (get_ws_args_for): Adjust extract_omp_for_data caller.
+ (scan_omp_for): Handle collapsed OMP_FOR
+ loops, adjust for OMP_FOR_{INIT,COND,INCR} changes.
+ (lower_omp_single_simple): If libgomp function doesn't return
+ boolean_type_node, add comparison of the return value with 0.
+ (diagnose_sb_1, diagnose_sb_2): Handle collapsed OMP_FOR
+ loops, adjust for OMP_FOR_{INIT,COND,INCR} changes. Handle OMP_TASK.
+ (parallel_nesting_level): Rename to...
+ (taskreg_nesting_level): ... this.
+ (is_taskreg_ctx): New function.
+ (build_outer_var_ref, omp_copy_decl): Use is_taskreg_ctx instead
+ of is_parallel_ctx.
+ (execute_lower_omp): Rename parallel_nesting_level to
+ taskreg_nesting_level.
+ (expand_omp_parallel): Rename to...
+ (expand_omp_taskreg): ... this. Use OMP_TASKREG_* macros where needed.
+ Call omp_task_call for OMP_TASK regions.
+ (expand_omp): Adjust caller, handle OMP_TASK.
+ (lower_omp_1): Adjust lower_omp_taskreg caller, handle OMP_TASK.
+
+ * bitmap.c (bitmap_default_obstack_depth): New variable.
+ (bitmap_obstack_initialize, bitmap_obstack_release): Do nothing
+ if argument is NULL and bitmap_default_obstack is already initialized.
+ * ipa-struct-reorg.c (do_reorg_1): Call bitmap_obstack_release
+ at the end.
+ * matrix-reorg.c (matrix_reorg): Likewise.
+
+2008-06-06 Uros Bizjak <ubizjak@gmail.com>
+
+ * config/i386/i386.md (*indirect_jump): Macroize using P
+ mode iterator. Remove !TARGET_64BIT from insn constraints.
+ (*tablejump_1): Ditto.
+ (*indirect_jump_rex64): Remove insn pattern.
+ (*tablejump_1_rex64): Ditto.
+ (eh_return_<mode>): Macroize using P mode iterator from eh_return_di
+ and eh_return_si insn patterns.
+
+2008-06-06 Richard Guenther <rguenther@suse.de>
+
+ * tree-ssa-structalias.c (merge_smts_into): Remove.
+ (find_what_p_points_to): Do not bother to compute the
+ points-to set for pt_anything pointers.
+ * tree-ssa-operands.c (get_addr_dereference_operands): No NMT
+ for pt_anything pointers is ok.
+
+2008-06-06 Jan Hubicka <jh@suse.cz>
+
+ * passes.c (execute_ipa_pass_list): Do not regenerate summaries.
+
+2008-06-06 Ralf Wildenhues <Ralf.Wildenhues@gmx.de>
+
+ * cgraph.c: Fix typos in comments.
+ (cgraph_availability_names): Fix string typo.
+ * fold-const.c: Fix typos in comments.
+ (fold_binary): Fix typo in warning.
+ * genautomata.c: Fix typos in comments.
+ (check_presence_pattern_sets): Fix typo in local variable.
+ (output_description): Fix typo in output.
+ * ggc-zone.c (ggc_pch_finish): Fix typo in error message.
+ * hwint.h: Likewise.
+ * matrix-reorg.c (check_allocation_function): Likewise.
+ * omega.c (smooth_weird_equations): Likewise.
+ * auto-inc-dec.c: Fix typos in comments.
+ * bb-reorder.c: Likewise.
+ * builtins.c: Likewise.
+ * c-common.c: Likewise.
+ * c-cppbuiltin.c: Likewise.
+ * c-parser.c: Likewise.
+ * c-pretty-print.c: Likewise.
+ * cfgcleanup.c: Likewise.
+ * cfgexpand.c: Likewise.
+ * cfghooks.c: Likewise.
+ * cfglayout.c: Likewise.
+ * cfgloopmanip.c: Likewise.
+ * cgraphunit.c: Likewise.
+ * coverage.c: Likewise.
+ * dbxout.c: Likewise.
+ * df-byte-scan.c: Likewise.
+ * df-core.c: Likewise.
+ * df-problems.c: Likewise.
+ * df-scan.c: Likewise.
+ * dfp.c: Likewise.
+ * dominance.c: Likewise.
+ * domwalk.c: Likewise.
+ * dse.c: Likewise.
+ * dwarf2out.c: Likewise.
+ * emit-rtl.c: Likewise.
+ * et-forest.c: Likewise.
+ * function.c: Likewise.
+ * function.h: Likewise.
+ * gcc.c: Likewise.
+ * gcov-io.c: Likewise.
+ * gcov.c: Likewise.
+ * gcse.c: Likewise.
+ * genattrtab.c: Likewise.
+ * ggc-page.c: Likewise.
+ * gimplify.c: Likewise.
+ * gthr-lynx.h: Likewise.
+ * haifa-sched.c: Likewise.
+ * ipa-cp.c: Likewise.
+ * ipa-inline.c: Likewise.
+ * ipa-prop.h: Likewise.
+ * ipa-pure-const.c: Likewise.
+ * ipa-struct-reorg.c: Likewise.
+ * ipa-struct-reorg.h: Likewise.
+ * ipa-type-escape.c: Likewise.
+ * ipa.c: Likewise.
+ * loop-doloop.c: Likewise.
+ * mips-tfile.c: Likewise.
+ * mkmap-flat.awk: Likewise.
+ * mkmap-symver.awk: Likewise.
+ * modulo-sched.c: Likewise.
+ * omp-low.c: Likewise.
+ * optabs.c: Likewise.
+ * optabs.h: Likewise.
+ * opts.c: Likewise.
+ * passes.c: Likewise.
+ * postreload-gcse.c: Likewise.
+ * postreload.c: Likewise.
+ * predict.c: Likewise.
+ * pretty-print.h: Likewise.
+ * profile.c: Likewise.
+ * protoize.c: Likewise.
+ * ra-conflict.c: Likewise.
+ * real.c: Likewise.
+ * recog.c: Likewise.
+ * regclass.c: Likewise.
+ * regs.h: Likewise.
+ * reload.c: Likewise.
+ * rtl-error.c: Likewise.
+ * rtlanal.c: Likewise.
+ * scan.h: Likewise.
+ * sched-rgn.c: Likewise.
+ * see.c: Likewise.
+ * stmt.c: Likewise.
+ * target.h: Likewise.
+ * tree-dfa.c: Likewise.
+ * tree-eh.c: Likewise.
+ * tree-flow-inline.h: Likewise.
+ * tree-inline.c: Likewise.
+ * tree-into-ssa.c: Likewise.
+ * tree-loop-distribution.c: Likewise.
+ * tree-nested.c: Likewise.
+ * tree-parloops.c: Likewise.
+ * tree-pass.h: Likewise.
+ * tree-pretty-print.c: Likewise.
+ * tree-profile.c: Likewise.
+ * tree-scalar-evolution.c: Likewise.
+ * tree-sra.c: Likewise.
+ * tree-ssa-alias-warnings.c: Likewise.
+ * tree-ssa-ccp.c: Likewise.
+ * tree-ssa-coalesce.c: Likewise.
+ * tree-ssa-dom.c: Likewise.
+ * tree-ssa-dse.c: Likewise.
+ * tree-ssa-forwprop.c: Likewise.
+ * tree-ssa-live.c: Likewise.
+ * tree-ssa-live.h: Likewise.
+ * tree-ssa-loop-im.c: Likewise.
+ * tree-ssa-loop-ivopts.c: Likewise.
+ * tree-ssa-loop-niter.c: Likewise.
+ * tree-ssa-loop-prefetch.c: Likewise.
+ * tree-ssa-phiopt.c: Likewise.
+ * tree-ssa-phiprop.c: Likewise.
+ * tree-ssa-sccvn.c: Likewise.
+ * tree-ssa-ter.c: Likewise.
+ * tree-ssa-threadupdate.c: Likewise.
+ * tree-ssa.c: Likewise.
+ * tree-vect-analyze.c: Likewise.
+ * tree-vect-transform.c: Likewise.
+ * tree-vectorizer.c: Likewise.
+ * tree-vn.c: Likewise.
+ * tree-vrp.c: Likewise.
+ * tree.c: Likewise.
+ * tree.def: Likewise.
+ * tree.h: Likewise.
+ * unwind-dw2-fde.c: Likewise.
+ * unwind.inc: Likewise.
+ * value-prof.c: Likewise.
+ * vmsdbgout.c: Likewise.
+
+2008-06-05 David Edelsohn <edelsohn@gnu.org>
+
+ * config/rs6000/xcoff.h (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P): Do not
+ always place FP constants in the TOC for TARGET_POWERPC64.
+ * config/rs6000/linux64.h (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P): Same.
+
+2008-06-05 Joseph Myers <joseph@codesourcery.com>
+
+ * config.gcc (powerpc-*-linux*spe*): Use t-dfprules.
+ * config/rs6000/dfp.md (negdd2, absdd2, negtd2, abstd2): Do not
+ enable for TARGET_E500_DOUBLE.
+ (*movdd_softfloat32): Also enable for !TARGET_FPRS.
+ * config/rs6000/rs6000.c (invalid_e500_subreg): Treat decimal
+ floating-point modes like integer modes for E500 double.
+ (rs6000_legitimate_offset_address_p): Likewise.
+ (rs6000_legitimize_address): Likewise. Do not allow REG+REG
+ addressing for DDmode for E500 double.
+ (rs6000_hard_regno_nregs): Do not treat decimal floating-point
+ modes as using 64-bits of registers for E500 double.
+ (spe_build_register_parallel): Do not handle DDmode or TDmode.
+ (rs6000_spe_function_arg): Do not handle DDmode or TDmode
+ specially for E500 double.
+ (function_arg): Do not call rs6000_spe_function_arg for DDmode or
+ TDmode for E500 double.
+ (rs6000_gimplify_va_arg): Only handle SDmode in registers
+ specially if TARGET_HARD_FLOAT && TARGET_FPRS.
+ (rs6000_split_multireg_move): Do not handle TDmode specially for
+ E500 double.
+ (spe_func_has_64bit_regs_p): Do not treat DDmode or TDmode as
+ using 64-bit registers for E500 double.
+ (emit_frame_save): Do not handle DDmode specially for E500 double.
+ (gen_frame_mem_offset): Likewise.
+ (rs6000_function_value): Do not call spe_build_register_parallel
+ for DDmode or TDmode.
+ (rs6000_libcall_value): Likewise.
+ * config/rs6000/rs6000.h (LOCAL_ALIGNMENT, MEMBER_TYPE_FORCES_BLK,
+ DATA_ALIGNMENT, CLASS_MAX_NREGS): Do not handle DDmode specially
+ for E500 double.
+
+2008-06-04 H.J. Lu <hongjiu.lu@intel.com>
+
+ * config/i386/i386.c (setup_incoming_varargs_64): Fix a typo
+ in comments.
+
+2008-06-04 Junjie Gu <jgu@tensilica.com>
+
+ * config/xtensa/lib2funcs.S (__xtensa_nonlocal_goto): Use unsigned
+ comparison for frame pointers.
+
+2008-06-04 Andy Hutchinson <hutchinsonandy@aim.com>
+
+ PR target/27386
+ * config/avr/avr.h (PUSH_ROUNDING): Remove.
+
+2008-06-04 Andy Hutchinson <hutchinsonandy@aim.com>
+
+ PR target/30243
+ * builtins.c (expand_builtin_signbit): Don't take lowpart when
+ register is already smaller or equal to required mode.
+
+2008-06-04 Xinliang David Li <davidxl@google.com>
+
+ * tree-call-cdce.c: New file.
+ (cond_dead_built_in_calls): New static variable.
+ (input_domain): New struct.
+ (check_pow): New function.
+ (check_builtin_call): Ditto.
+ (check_target_format): Ditto.
+ (is_call_dce_candidate): Ditto.
+ (gen_one_condition): Ditto.
+ (gen_conditions_for_domain): Ditto.
+ (get_domain): Ditto.
+ (gen_conditions_for_pow_cst_base): Ditto.
+ (gen_conditions_for_pow_int_base): Ditto.
+ (gen_conditions_for_pow): Ditto.
+ (get_no_error_domain): Ditto.
+ (gen_shrink_wrap_conditions): Ditto.
+ (shrink_wrap_one_built_in_call): Ditto.
+ (shink_wrap_conditional_dead_built_in_calls): Ditto.
+ (tree_call_cdce): Ditto.
+ (gate_call_cdce): Ditto.
+ (pass_call_cdce): New gimple pass.
+ * passes.c: (init_optimization_passes): New pass.
+ * tree-pass.h: New pass declaration.
+ * opts.c (decode_options): New flag setting.
+ * common.opt: Add -ftree-builtin-call-dce flag.
+ * Makefile.in: Add new source file.
+ * tempvar.def: New tv_id.
+ * doc/invoke.texi (-ftree-builtin-call-dce): New flag.
+
2008-06-04 Richard Guenther <rguenther@suse.de>
- * tree-flow-inline.h (is_global_var): Do not check TREE_STATIC
- on MTAGs.
+ * tree-flow-inline.h (is_global_var): Do not check TREE_STATIC on MTAGs.
(is_call_clobbered): Always check var_ann->call_clobbered.
(mark_call_clobbered): Always set var_ann->call_clobbered.
(clear_call_clobbered): Always clear var_ann->call_clobbered.
@@ -13,7 +545,7 @@
(may_be_aliased): Do not check TREE_PUBLIC on MTAGs.
2008-06-04 Joseph Myers <joseph@codesourcery.com>
- Maxim Kuvyrkov <maxim@codesourcery.com>
+ Maxim Kuvyrkov <maxim@codesourcery.com>
* config/m68k/m68k.opt (mxgot): New option.
* config/m68k/m68k.c (legitimize_pic_address): Handle -mxgot.
@@ -30,7 +562,7 @@
2008-06-03 H.J. Lu <hongjiu.lu@intel.com>
- * config/i386/i386.c (ix86_gen_leave): New.
+ * config/i386/i386.c (ix86_gen_leave): New.
(ix86_gen_pop1): Likewise.
(ix86_gen_add3): Likewise.
(ix86_gen_sub3): Likewise.
@@ -67,8 +599,7 @@
2008-06-03 Kai Tietz <kai.tietz@onevision.com>
* config/i386/i386-protos.h (ix86_reg_parm_stack_space): New.
- * config/i386/i386.h (ix86_reg_parm_stack_space): Removed
- prototype.
+ * config/i386/i386.h (ix86_reg_parm_stack_space): Removed prototype.
* config/i386/i386.c (ix86_reg_parm_stack_space): Changed
return type to int.
(ix86_call_abi_override): Remove check for call_used_regs.
@@ -82,8 +613,7 @@
* doc/tm.texi (OVERRIDE_ABI_FORMAT): New.
* doc/extend.texi (ms_abi,sysv_abi): New attribute description.
- * function.c (allocate_struct_function): Use of
- OVERRIDE_ABI_FORMAT.
+ * function.c (allocate_struct_function): Use of OVERRIDE_ABI_FORMAT.
* config/i386/cygming.h (TARGET_64BIT_MS_ABI): Make use
of cfun and DEFAULT_ABI to deceide abi mode.
(DEFAULT_ABI): New.
@@ -145,8 +675,8 @@
(ix86_reg_parm_stack_space): New prototype.
(CUMULATIVE_ARGS): Add call_abi member.
(machine_function): Add call_abi member.
- * config/i386/mingw32.h (EXTRA_OS_CPP_BUILTINS): Replace TARGET_64BIT_MS_ABI
- by DEFAULT_ABI compare to MS_ABI.
+ * config/i386/mingw32.h (EXTRA_OS_CPP_BUILTINS): Replace
+ TARGET_64BIT_MS_ABI by DEFAULT_ABI compare to MS_ABI.
2008-06-02 Andy Hutchinson <hutchinsonandy@aim.com>
diff --git a/gcc/ChangeLog.melt b/gcc/ChangeLog.melt
index 78b4491fd95..3361bd7efc8 100644
--- a/gcc/ChangeLog.melt
+++ b/gcc/ChangeLog.melt
@@ -1,4 +1,7 @@
2008-06-06 Basile Starynkevitch <basile@starynkevitch.net>
+ MERGED WITH TRUNK rev.136492
+
+2008-06-06 Basile Starynkevitch <basile@starynkevitch.net>
* melt/warm-basilys.bysl: added compile_warning macro.
still unimplemented normexp_export_value.
diff --git a/gcc/DATESTAMP b/gcc/DATESTAMP
index 71b6316e95e..cb3399f407f 100644
--- a/gcc/DATESTAMP
+++ b/gcc/DATESTAMP
@@ -1 +1 @@
-20080604
+20080606
diff --git a/gcc/Makefile.in b/gcc/Makefile.in
index 1d00e75ce4a..4143826644b 100644
--- a/gcc/Makefile.in
+++ b/gcc/Makefile.in
@@ -1164,6 +1164,7 @@ OBJS-common = \
toplev.o \
tracer.o \
tree-affine.o \
+ tree-call-cdce.o \
tree-cfg.o \
tree-cfgcleanup.o \
tree-chrec.o \
@@ -2662,6 +2663,10 @@ tree-ssa-dce.o : tree-ssa-dce.c $(CONFIG_H) $(SYSTEM_H) $(TREE_H) \
coretypes.h $(TREE_DUMP_H) tree-pass.h $(FLAGS_H) $(BASIC_BLOCK_H) \
$(GGC_H) hard-reg-set.h $(OBSTACK_H) $(TREE_GIMPLE_H) $(CFGLOOP_H) \
$(SCEV_H)
+tree-call-cdce.o : tree-call-cdce.c $(CONFIG_H) $(SYSTEM_H) $(TREE_H) \
+ $(RTL_H) $(TM_P_H) $(TREE_FLOW_H) $(DIAGNOSTIC_H) $(TIMEVAR_H) $(TM_H) \
+ coretypes.h $(TREE_DUMP_H) tree-pass.h $(FLAGS_H) $(BASIC_BLOCK_H) \
+ $(GGC_H) hard-reg-set.h $(OBSTACK_H) $(TREE_GIMPLE_H)
tree-ssa-ccp.o : tree-ssa-ccp.c $(TREE_FLOW_H) $(CONFIG_H) \
$(SYSTEM_H) $(RTL_H) $(TREE_H) $(TM_P_H) $(EXPR_H) $(GGC_H) output.h \
$(DIAGNOSTIC_H) $(FUNCTION_H) $(TIMEVAR_H) $(TM_H) coretypes.h \
diff --git a/gcc/auto-inc-dec.c b/gcc/auto-inc-dec.c
index 8dbcecf93b5..18c1b7107da 100644
--- a/gcc/auto-inc-dec.c
+++ b/gcc/auto-inc-dec.c
@@ -1,5 +1,5 @@
/* Discovery of auto-inc and auto-dec instructions.
- Copyright (C) 2006, 2007 Free Software Foundation, Inc.
+ Copyright (C) 2006, 2007, 2008 Free Software Foundation, Inc.
Contributed by Kenneth Zadeck <zadeck@naturalbridge.com>
This file is part of GCC.
@@ -636,7 +636,7 @@ attempt_change (rtx new_addr, rtx inc_reg)
/* Try to combine the instruction in INC_INSN with the instruction in
MEM_INSN. First the form is determined using the DECISION_TABLE
- and and the results of parsing the INC_INSN and the MEM_INSN.
+ and the results of parsing the INC_INSN and the MEM_INSN.
Assuming the form is ok, a prototype new address is built which is
passed to ATTEMPT_CHANGE for final processing. */
diff --git a/gcc/bb-reorder.c b/gcc/bb-reorder.c
index 3b59fcd8377..a3fd3feb6c7 100644
--- a/gcc/bb-reorder.c
+++ b/gcc/bb-reorder.c
@@ -1,5 +1,5 @@
/* Basic block reordering routines for the GNU compiler.
- Copyright (C) 2000, 2002, 2003, 2004, 2005, 2006, 2007
+ Copyright (C) 2000, 2002, 2003, 2004, 2005, 2006, 2007, 2008
Free Software Foundation, Inc.
This file is part of GCC.
@@ -1490,7 +1490,7 @@ fix_up_fall_thru_edges (void)
}
}
-/* This function checks the destination blockof a "crossing jump" to
+/* This function checks the destination block of a "crossing jump" to
see if it has any crossing predecessors that begin with a code label
and end with an unconditional jump. If so, it returns that predecessor
block. (This is to avoid creating lots of new basic blocks that all
diff --git a/gcc/bitmap.c b/gcc/bitmap.c
index c2a66f96a73..97e60de6b3c 100644
--- a/gcc/bitmap.c
+++ b/gcc/bitmap.c
@@ -119,6 +119,7 @@ register_overhead (bitmap b, int amount)
/* Global data */
bitmap_element bitmap_zero_bits; /* An element of all zero bits. */
bitmap_obstack bitmap_default_obstack; /* The default bitmap obstack. */
+static int bitmap_default_obstack_depth;
static GTY((deletable)) bitmap_element *bitmap_ggc_free; /* Freelist of
GC'd elements. */
@@ -302,7 +303,11 @@ void
bitmap_obstack_initialize (bitmap_obstack *bit_obstack)
{
if (!bit_obstack)
- bit_obstack = &bitmap_default_obstack;
+ {
+ if (bitmap_default_obstack_depth++)
+ return;
+ bit_obstack = &bitmap_default_obstack;
+ }
#if !defined(__GNUC__) || (__GNUC__ < 2)
#define __alignof__(type) 0
@@ -323,7 +328,14 @@ void
bitmap_obstack_release (bitmap_obstack *bit_obstack)
{
if (!bit_obstack)
- bit_obstack = &bitmap_default_obstack;
+ {
+ if (--bitmap_default_obstack_depth)
+ {
+ gcc_assert (bitmap_default_obstack_depth > 0);
+ return;
+ }
+ bit_obstack = &bitmap_default_obstack;
+ }
bit_obstack->elements = NULL;
bit_obstack->heads = NULL;
diff --git a/gcc/builtin-types.def b/gcc/builtin-types.def
index 25b5a0964f5..7d25e5aad6d 100644
--- a/gcc/builtin-types.def
+++ b/gcc/builtin-types.def
@@ -1,4 +1,4 @@
-/* Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007
+/* Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
Free Software Foundation, Inc.
This file is part of GCC.
@@ -121,6 +121,7 @@ DEF_PRIMITIVE_TYPE (BT_I16, builtin_type_for_size (BITS_PER_UNIT*16, 1))
DEF_POINTER_TYPE (BT_PTR_CONST_STRING, BT_CONST_STRING)
DEF_POINTER_TYPE (BT_PTR_LONG, BT_LONG)
+DEF_POINTER_TYPE (BT_PTR_ULONGLONG, BT_ULONGLONG)
DEF_POINTER_TYPE (BT_PTR_PTR, BT_PTR)
DEF_FUNCTION_TYPE_0 (BT_FN_VOID, BT_VOID)
@@ -308,6 +309,10 @@ DEF_FUNCTION_TYPE_2 (BT_FN_I8_VPTR_I8, BT_I8, BT_VOLATILE_PTR, BT_I8)
DEF_FUNCTION_TYPE_2 (BT_FN_I16_VPTR_I16, BT_I16, BT_VOLATILE_PTR, BT_I16)
DEF_FUNCTION_TYPE_2 (BT_FN_BOOL_LONGPTR_LONGPTR,
BT_BOOL, BT_PTR_LONG, BT_PTR_LONG)
+DEF_FUNCTION_TYPE_2 (BT_FN_BOOL_ULONGLONGPTR_ULONGLONGPTR,
+ BT_BOOL, BT_PTR_ULONGLONG, BT_PTR_ULONGLONG)
+
+DEF_POINTER_TYPE (BT_PTR_FN_VOID_PTR_PTR, BT_FN_VOID_PTR_PTR)
DEF_FUNCTION_TYPE_3 (BT_FN_STRING_STRING_CONST_STRING_SIZE,
BT_STRING, BT_STRING, BT_CONST_STRING, BT_SIZE)
@@ -410,10 +415,21 @@ DEF_FUNCTION_TYPE_6 (BT_FN_BOOL_LONG_LONG_LONG_LONG_LONGPTR_LONGPTR,
DEF_FUNCTION_TYPE_6 (BT_FN_VOID_OMPFN_PTR_UINT_LONG_LONG_LONG,
BT_VOID, BT_PTR_FN_VOID_PTR, BT_PTR, BT_UINT,
BT_LONG, BT_LONG, BT_LONG)
+DEF_FUNCTION_TYPE_6 (BT_FN_BOOL_BOOL_ULL_ULL_ULL_ULLPTR_ULLPTR,
+ BT_BOOL, BT_BOOL, BT_ULONGLONG, BT_ULONGLONG,
+ BT_ULONGLONG, BT_PTR_ULONGLONG, BT_PTR_ULONGLONG)
DEF_FUNCTION_TYPE_7 (BT_FN_VOID_OMPFN_PTR_UINT_LONG_LONG_LONG_LONG,
BT_VOID, BT_PTR_FN_VOID_PTR, BT_PTR, BT_UINT,
BT_LONG, BT_LONG, BT_LONG, BT_LONG)
+DEF_FUNCTION_TYPE_7 (BT_FN_VOID_OMPFN_PTR_OMPCPYFN_LONG_LONG_BOOL_UINT,
+ BT_VOID, BT_PTR_FN_VOID_PTR, BT_PTR,
+ BT_PTR_FN_VOID_PTR_PTR, BT_LONG, BT_LONG,
+ BT_BOOL, BT_UINT)
+DEF_FUNCTION_TYPE_7 (BT_FN_BOOL_BOOL_ULL_ULL_ULL_ULL_ULLPTR_ULLPTR,
+ BT_BOOL, BT_BOOL, BT_ULONGLONG, BT_ULONGLONG,
+ BT_ULONGLONG, BT_ULONGLONG,
+ BT_PTR_ULONGLONG, BT_PTR_ULONGLONG)
DEF_FUNCTION_TYPE_VAR_0 (BT_FN_VOID_VAR, BT_VOID)
DEF_FUNCTION_TYPE_VAR_0 (BT_FN_INT_VAR, BT_INT)
diff --git a/gcc/builtins.c b/gcc/builtins.c
index 05eb6bbdd98..8abcc114647 100644
--- a/gcc/builtins.c
+++ b/gcc/builtins.c
@@ -1,6 +1,6 @@
/* Expand builtin functions.
Copyright (C) 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
- 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
+ 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
Free Software Foundation, Inc.
This file is part of GCC.
@@ -4783,7 +4783,7 @@ std_gimplify_va_arg_expr (tree valist, tree type, tree *pre_p, tree *post_p)
/* If the actual alignment is less than the alignment of the type,
adjust the type accordingly so that we don't assume strict alignment
- when deferencing the pointer. */
+ when dereferencing the pointer. */
boundary *= BITS_PER_UNIT;
if (boundary < TYPE_ALIGN (type))
{
@@ -5786,7 +5786,7 @@ expand_builtin_signbit (tree exp, rtx target)
lo = 0;
}
- if (imode != rmode)
+ if (GET_MODE_SIZE (imode) > GET_MODE_SIZE (rmode))
temp = gen_lowpart (rmode, temp);
temp = expand_binop (rmode, and_optab, temp,
immed_double_const (lo, hi, rmode),
@@ -8768,9 +8768,9 @@ fold_builtin_memory_op (tree dest, tree src, tree len, tree type, bool ignore, i
if (!tree_int_cst_equal (lang_hooks.expr_size (srcvar), len))
return NULL_TREE;
/* With memcpy, it is possible to bypass aliasing rules, so without
- this check i. e. execute/20060930-2.c would be misoptimized, because
+ this check i.e. execute/20060930-2.c would be misoptimized, because
it use conflicting alias set to hold argument for the memcpy call.
- This check is probably unnecesary with -fno-strict-aliasing.
+ This check is probably unnecessary with -fno-strict-aliasing.
Similarly for destvar. See also PR29286. */
if (!var_decl_component_p (srcvar)
/* Accept: memcpy (*char_var, "test", 1); that simplify
diff --git a/gcc/c-common.c b/gcc/c-common.c
index c02b442c7aa..377db5708d0 100644
--- a/gcc/c-common.c
+++ b/gcc/c-common.c
@@ -221,7 +221,7 @@ tree c_global_trees[CTI_MAX];
/* Switches common to the C front ends. */
-/* Nonzero if prepreprocessing only. */
+/* Nonzero if preprocessing only. */
int flag_preprocess_only;
@@ -360,7 +360,7 @@ int flag_gen_declaration;
int print_struct_values;
-/* Tells the compiler what is the constant string class for Objc. */
+/* Tells the compiler what is the constant string class for ObjC. */
const char *constant_string_class_name;
@@ -4496,7 +4496,7 @@ c_do_switch_warnings (splay_tree cases, location_t switch_location,
}
/* Even though there wasn't an exact match, there might be a
- case range which includes the enumator's value. */
+ case range which includes the enumerator's value. */
node = splay_tree_predecessor (cases, (splay_tree_key) value);
if (node && CASE_HIGH ((tree) node->value))
{
diff --git a/gcc/c-common.h b/gcc/c-common.h
index 7ad0be5a425..82c018b559f 100644
--- a/gcc/c-common.h
+++ b/gcc/c-common.h
@@ -995,6 +995,7 @@ extern tree c_finish_omp_ordered (tree);
extern void c_finish_omp_barrier (void);
extern tree c_finish_omp_atomic (enum tree_code, tree, tree);
extern void c_finish_omp_flush (void);
+extern void c_finish_omp_taskwait (void);
extern tree c_finish_omp_for (location_t, tree, tree, tree, tree, tree, tree);
extern void c_split_parallel_clauses (tree, tree *, tree *);
extern enum omp_clause_default_kind c_omp_predetermined_sharing (tree);
diff --git a/gcc/c-cppbuiltin.c b/gcc/c-cppbuiltin.c
index 63e5ad4e41b..82bd5c27313 100644
--- a/gcc/c-cppbuiltin.c
+++ b/gcc/c-cppbuiltin.c
@@ -1,5 +1,5 @@
/* Define builtin-in macros for the C family front ends.
- Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007
+ Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008
Free Software Foundation, Inc.
This file is part of GCC.
@@ -301,7 +301,7 @@ builtin_define_decimal_float_constants (const char *name_prefix,
sprintf (buf, "1E-%d%s", fmt->p - 1, suffix);
builtin_define_with_value (name, buf, 0);
- /* Minimum denormalized postive decimal value. */
+ /* Minimum denormalized positive decimal value. */
sprintf (name, "__%s_DEN__", name_prefix);
p = buf;
for (digits = fmt->p; digits > 1; digits--)
@@ -659,7 +659,7 @@ c_cpp_builtins (cpp_reader *pfile)
cpp_define (pfile, "__SSP__=1");
if (flag_openmp)
- cpp_define (pfile, "_OPENMP=200505");
+ cpp_define (pfile, "_OPENMP=200805");
builtin_define_type_sizeof ("__SIZEOF_INT__", integer_type_node);
builtin_define_type_sizeof ("__SIZEOF_LONG__", long_integer_type_node);
diff --git a/gcc/c-omp.c b/gcc/c-omp.c
index cdca2bcd4a9..1da71d27b9c 100644
--- a/gcc/c-omp.c
+++ b/gcc/c-omp.c
@@ -1,7 +1,7 @@
/* This file contains routines to construct GNU OpenMP constructs,
called from parsing in the C and C++ front ends.
- Copyright (C) 2005, 2007 Free Software Foundation, Inc.
+ Copyright (C) 2005, 2007, 2008 Free Software Foundation, Inc.
Contributed by Richard Henderson <rth@redhat.com>,
Diego Novillo <dnovillo@redhat.com>.
@@ -80,6 +80,19 @@ c_finish_omp_barrier (void)
}
+/* Complete a #pragma omp taskwait construct. */
+
+void
+c_finish_omp_taskwait (void)
+{
+ tree x;
+
+ x = built_in_decls[BUILT_IN_GOMP_TASKWAIT];
+ x = build_call_expr (x, 0);
+ add_stmt (x);
+}
+
+
/* Complete a #pragma omp atomic construct. The expression to be
implemented atomically is LHS code= RHS. The value returned is
either error_mark_node (if the construct was erroneous) or an
@@ -197,170 +210,205 @@ check_omp_for_incr_expr (tree exp, tree decl)
}
/* Validate and emit code for the OpenMP directive #pragma omp for.
- INIT, COND, INCR, BODY and PRE_BODY are the five basic elements
- of the loop (initialization expression, controlling predicate, increment
- expression, body of the loop and statements to go before the loop).
- DECL is the iteration variable. */
+ DECLV is a vector of iteration variables, for each collapsed loop.
+ INITV, CONDV and INCRV are vectors containing initialization
+ expressions, controlling predicates and increment expressions.
+ BODY is the body of the loop and PRE_BODY statements that go before
+ the loop. */
tree
-c_finish_omp_for (location_t locus, tree decl, tree init, tree cond,
- tree incr, tree body, tree pre_body)
+c_finish_omp_for (location_t locus, tree declv, tree initv, tree condv,
+ tree incrv, tree body, tree pre_body)
{
- location_t elocus = locus;
+ location_t elocus;
bool fail = false;
+ int i;
- if (EXPR_HAS_LOCATION (init))
- elocus = EXPR_LOCATION (init);
-
- /* Validate the iteration variable. */
- if (!INTEGRAL_TYPE_P (TREE_TYPE (decl)))
+ gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (initv));
+ gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (condv));
+ gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (incrv));
+ for (i = 0; i < TREE_VEC_LENGTH (declv); i++)
{
- error ("%Hinvalid type for iteration variable %qE", &elocus, decl);
- fail = true;
- }
- if (TYPE_UNSIGNED (TREE_TYPE (decl)))
- warning (0, "%Hiteration variable %qE is unsigned", &elocus, decl);
+ tree decl = TREE_VEC_ELT (declv, i);
+ tree init = TREE_VEC_ELT (initv, i);
+ tree cond = TREE_VEC_ELT (condv, i);
+ tree incr = TREE_VEC_ELT (incrv, i);
+
+ elocus = locus;
+ if (EXPR_HAS_LOCATION (init))
+ elocus = EXPR_LOCATION (init);
+
+ /* Validate the iteration variable. */
+ if (!INTEGRAL_TYPE_P (TREE_TYPE (decl))
+ && TREE_CODE (TREE_TYPE (decl)) != POINTER_TYPE)
+ {
+ error ("%Hinvalid type for iteration variable %qE", &elocus, decl);
+ fail = true;
+ }
- /* In the case of "for (int i = 0...)", init will be a decl. It should
- have a DECL_INITIAL that we can turn into an assignment. */
- if (init == decl)
- {
- elocus = DECL_SOURCE_LOCATION (decl);
+ /* In the case of "for (int i = 0...)", init will be a decl. It should
+ have a DECL_INITIAL that we can turn into an assignment. */
+ if (init == decl)
+ {
+ elocus = DECL_SOURCE_LOCATION (decl);
+
+ init = DECL_INITIAL (decl);
+ if (init == NULL)
+ {
+ error ("%H%qE is not initialized", &elocus, decl);
+ init = integer_zero_node;
+ fail = true;
+ }
- init = DECL_INITIAL (decl);
- if (init == NULL)
+ init = build_modify_expr (decl, NOP_EXPR, init);
+ SET_EXPR_LOCATION (init, elocus);
+ }
+ gcc_assert (TREE_CODE (init) == MODIFY_EXPR);
+ gcc_assert (TREE_OPERAND (init, 0) == decl);
+
+ if (cond == NULL_TREE)
{
- error ("%H%qE is not initialized", &elocus, decl);
- init = integer_zero_node;
+ error ("%Hmissing controlling predicate", &elocus);
fail = true;
}
+ else
+ {
+ bool cond_ok = false;
- init = build_modify_expr (decl, NOP_EXPR, init);
- SET_EXPR_LOCATION (init, elocus);
- }
- gcc_assert (TREE_CODE (init) == MODIFY_EXPR);
- gcc_assert (TREE_OPERAND (init, 0) == decl);
-
- if (cond == NULL_TREE)
- {
- error ("%Hmissing controlling predicate", &elocus);
- fail = true;
- }
- else
- {
- bool cond_ok = false;
+ if (EXPR_HAS_LOCATION (cond))
+ elocus = EXPR_LOCATION (cond);
- if (EXPR_HAS_LOCATION (cond))
- elocus = EXPR_LOCATION (cond);
+ if (TREE_CODE (cond) == LT_EXPR
+ || TREE_CODE (cond) == LE_EXPR
+ || TREE_CODE (cond) == GT_EXPR
+ || TREE_CODE (cond) == GE_EXPR)
+ {
+ tree op0 = TREE_OPERAND (cond, 0);
+ tree op1 = TREE_OPERAND (cond, 1);
- if (TREE_CODE (cond) == LT_EXPR
- || TREE_CODE (cond) == LE_EXPR
- || TREE_CODE (cond) == GT_EXPR
- || TREE_CODE (cond) == GE_EXPR)
- {
- tree op0 = TREE_OPERAND (cond, 0);
- tree op1 = TREE_OPERAND (cond, 1);
+ /* 2.5.1. The comparison in the condition is computed in
+ the type of DECL, otherwise the behavior is undefined.
- /* 2.5.1. The comparison in the condition is computed in the type
- of DECL, otherwise the behavior is undefined.
+ For example:
+ long n; int i;
+ i < n;
- For example:
- long n; int i;
- i < n;
+ according to ISO will be evaluated as:
+ (long)i < n;
- according to ISO will be evaluated as:
- (long)i < n;
+ We want to force:
+ i < (int)n; */
+ if (TREE_CODE (op0) == NOP_EXPR
+ && decl == TREE_OPERAND (op0, 0))
+ {
+ TREE_OPERAND (cond, 0) = TREE_OPERAND (op0, 0);
+ TREE_OPERAND (cond, 1)
+ = fold_build1 (NOP_EXPR, TREE_TYPE (decl),
+ TREE_OPERAND (cond, 1));
+ }
+ else if (TREE_CODE (op1) == NOP_EXPR
+ && decl == TREE_OPERAND (op1, 0))
+ {
+ TREE_OPERAND (cond, 1) = TREE_OPERAND (op1, 0);
+ TREE_OPERAND (cond, 0)
+ = fold_build1 (NOP_EXPR, TREE_TYPE (decl),
+ TREE_OPERAND (cond, 0));
+ }
- We want to force:
- i < (int)n; */
- if (TREE_CODE (op0) == NOP_EXPR
- && decl == TREE_OPERAND (op0, 0))
- {
- TREE_OPERAND (cond, 0) = TREE_OPERAND (op0, 0);
- TREE_OPERAND (cond, 1) = fold_build1 (NOP_EXPR, TREE_TYPE (decl),
- TREE_OPERAND (cond, 1));
- }
- else if (TREE_CODE (op1) == NOP_EXPR
- && decl == TREE_OPERAND (op1, 0))
- {
- TREE_OPERAND (cond, 1) = TREE_OPERAND (op1, 0);
- TREE_OPERAND (cond, 0) = fold_build1 (NOP_EXPR, TREE_TYPE (decl),
- TREE_OPERAND (cond, 0));
+ if (decl == TREE_OPERAND (cond, 0))
+ cond_ok = true;
+ else if (decl == TREE_OPERAND (cond, 1))
+ {
+ TREE_SET_CODE (cond,
+ swap_tree_comparison (TREE_CODE (cond)));
+ TREE_OPERAND (cond, 1) = TREE_OPERAND (cond, 0);
+ TREE_OPERAND (cond, 0) = decl;
+ cond_ok = true;
+ }
}
- if (decl == TREE_OPERAND (cond, 0))
- cond_ok = true;
- else if (decl == TREE_OPERAND (cond, 1))
+ if (!cond_ok)
{
- TREE_SET_CODE (cond, swap_tree_comparison (TREE_CODE (cond)));
- TREE_OPERAND (cond, 1) = TREE_OPERAND (cond, 0);
- TREE_OPERAND (cond, 0) = decl;
- cond_ok = true;
+ error ("%Hinvalid controlling predicate", &elocus);
+ fail = true;
}
}
- if (!cond_ok)
+ if (incr == NULL_TREE)
{
- error ("%Hinvalid controlling predicate", &elocus);
+ error ("%Hmissing increment expression", &elocus);
fail = true;
}
- }
-
- if (incr == NULL_TREE)
- {
- error ("%Hmissing increment expression", &elocus);
- fail = true;
- }
- else
- {
- bool incr_ok = false;
-
- if (EXPR_HAS_LOCATION (incr))
- elocus = EXPR_LOCATION (incr);
-
- /* Check all the valid increment expressions: v++, v--, ++v, --v,
- v = v + incr, v = incr + v and v = v - incr. */
- switch (TREE_CODE (incr))
+ else
{
- case POSTINCREMENT_EXPR:
- case PREINCREMENT_EXPR:
- case POSTDECREMENT_EXPR:
- case PREDECREMENT_EXPR:
- incr_ok = (TREE_OPERAND (incr, 0) == decl);
- break;
+ bool incr_ok = false;
- case MODIFY_EXPR:
- if (TREE_OPERAND (incr, 0) != decl)
- break;
- if (TREE_OPERAND (incr, 1) == decl)
- break;
- if (TREE_CODE (TREE_OPERAND (incr, 1)) == PLUS_EXPR
- && (TREE_OPERAND (TREE_OPERAND (incr, 1), 0) == decl
- || TREE_OPERAND (TREE_OPERAND (incr, 1), 1) == decl))
- incr_ok = true;
- else if (TREE_CODE (TREE_OPERAND (incr, 1)) == MINUS_EXPR
- && TREE_OPERAND (TREE_OPERAND (incr, 1), 0) == decl)
- incr_ok = true;
- else
+ if (EXPR_HAS_LOCATION (incr))
+ elocus = EXPR_LOCATION (incr);
+
+ /* Check all the valid increment expressions: v++, v--, ++v, --v,
+ v = v + incr, v = incr + v and v = v - incr. */
+ switch (TREE_CODE (incr))
{
- tree t = check_omp_for_incr_expr (TREE_OPERAND (incr, 1), decl);
- if (t != error_mark_node)
+ case POSTINCREMENT_EXPR:
+ case PREINCREMENT_EXPR:
+ case POSTDECREMENT_EXPR:
+ case PREDECREMENT_EXPR:
+ if (TREE_OPERAND (incr, 0) != decl)
+ break;
+
+ incr_ok = true;
+ if (POINTER_TYPE_P (TREE_TYPE (decl)))
{
- incr_ok = true;
- t = build2 (PLUS_EXPR, TREE_TYPE (decl), decl, t);
+ tree t = fold_convert (sizetype, TREE_OPERAND (incr, 1));
+
+ if (TREE_CODE (incr) == POSTDECREMENT_EXPR
+ || TREE_CODE (incr) == PREDECREMENT_EXPR)
+ t = fold_build1 (NEGATE_EXPR, sizetype, t);
+ t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (decl), decl, t);
incr = build2 (MODIFY_EXPR, void_type_node, decl, t);
}
- }
- break;
+ break;
+
+ case MODIFY_EXPR:
+ if (TREE_OPERAND (incr, 0) != decl)
+ break;
+ if (TREE_OPERAND (incr, 1) == decl)
+ break;
+ if (TREE_CODE (TREE_OPERAND (incr, 1)) == PLUS_EXPR
+ && (TREE_OPERAND (TREE_OPERAND (incr, 1), 0) == decl
+ || TREE_OPERAND (TREE_OPERAND (incr, 1), 1) == decl))
+ incr_ok = true;
+ else if ((TREE_CODE (TREE_OPERAND (incr, 1)) == MINUS_EXPR
+ || (TREE_CODE (TREE_OPERAND (incr, 1))
+ == POINTER_PLUS_EXPR))
+ && TREE_OPERAND (TREE_OPERAND (incr, 1), 0) == decl)
+ incr_ok = true;
+ else
+ {
+ tree t = check_omp_for_incr_expr (TREE_OPERAND (incr, 1),
+ decl);
+ if (t != error_mark_node)
+ {
+ incr_ok = true;
+ t = build2 (PLUS_EXPR, TREE_TYPE (decl), decl, t);
+ incr = build2 (MODIFY_EXPR, void_type_node, decl, t);
+ }
+ }
+ break;
- default:
- break;
- }
- if (!incr_ok)
- {
- error ("%Hinvalid increment expression", &elocus);
- fail = true;
+ default:
+ break;
+ }
+ if (!incr_ok)
+ {
+ error ("%Hinvalid increment expression", &elocus);
+ fail = true;
+ }
}
+
+ TREE_VEC_ELT (initv, i) = init;
+ TREE_VEC_ELT (incrv, i) = incr;
}
if (fail)
@@ -370,9 +418,9 @@ c_finish_omp_for (location_t locus, tree decl, tree init, tree cond,
tree t = make_node (OMP_FOR);
TREE_TYPE (t) = void_type_node;
- OMP_FOR_INIT (t) = init;
- OMP_FOR_COND (t) = cond;
- OMP_FOR_INCR (t) = incr;
+ OMP_FOR_INIT (t) = initv;
+ OMP_FOR_COND (t) = condv;
+ OMP_FOR_INCR (t) = incrv;
OMP_FOR_BODY (t) = body;
OMP_FOR_PRE_BODY (t) = pre_body;
@@ -416,6 +464,7 @@ c_split_parallel_clauses (tree clauses, tree *par_clauses, tree *ws_clauses)
case OMP_CLAUSE_SCHEDULE:
case OMP_CLAUSE_ORDERED:
+ case OMP_CLAUSE_COLLAPSE:
OMP_CLAUSE_CHAIN (clauses) = *ws_clauses;
*ws_clauses = clauses;
break;
diff --git a/gcc/c-parser.c b/gcc/c-parser.c
index d98640d0b9e..7607a8dfdcf 100644
--- a/gcc/c-parser.c
+++ b/gcc/c-parser.c
@@ -1018,6 +1018,7 @@ static void c_parser_omp_construct (c_parser *);
static void c_parser_omp_threadprivate (c_parser *);
static void c_parser_omp_barrier (c_parser *);
static void c_parser_omp_flush (c_parser *);
+static void c_parser_omp_taskwait (c_parser *);
enum pragma_context { pragma_external, pragma_stmt, pragma_compound };
static bool c_parser_pragma (c_parser *, enum pragma_context);
@@ -6674,6 +6675,17 @@ c_parser_pragma (c_parser *parser, enum pragma_context context)
c_parser_omp_flush (parser);
return false;
+ case PRAGMA_OMP_TASKWAIT:
+ if (context != pragma_compound)
+ {
+ if (context == pragma_stmt)
+ c_parser_error (parser, "%<#pragma omp taskwait%> may only be "
+ "used in compound statements");
+ goto bad_stmt;
+ }
+ c_parser_omp_taskwait (parser);
+ return false;
+
case PRAGMA_OMP_THREADPRIVATE:
c_parser_omp_threadprivate (parser);
return false;
@@ -6781,7 +6793,9 @@ c_parser_omp_clause_name (c_parser *parser)
switch (p[0])
{
case 'c':
- if (!strcmp ("copyin", p))
+ if (!strcmp ("collapse", p))
+ result = PRAGMA_OMP_CLAUSE_COLLAPSE;
+ else if (!strcmp ("copyin", p))
result = PRAGMA_OMP_CLAUSE_COPYIN;
else if (!strcmp ("copyprivate", p))
result = PRAGMA_OMP_CLAUSE_COPYPRIVATE;
@@ -6818,6 +6832,10 @@ c_parser_omp_clause_name (c_parser *parser)
else if (!strcmp ("shared", p))
result = PRAGMA_OMP_CLAUSE_SHARED;
break;
+ case 'u':
+ if (!strcmp ("untied", p))
+ result = PRAGMA_OMP_CLAUSE_UNTIED;
+ break;
}
}
@@ -6906,6 +6924,41 @@ c_parser_omp_var_list_parens (c_parser *parser, enum tree_code kind, tree list)
return list;
}
+/* OpenMP 3.0:
+ collapse ( constant-expression ) */
+
+static tree
+c_parser_omp_clause_collapse (c_parser *parser, tree list)
+{
+ tree c, num = error_mark_node;
+ HOST_WIDE_INT n;
+ location_t loc;
+
+ check_no_duplicate_clause (list, OMP_CLAUSE_COLLAPSE, "collapse");
+
+ loc = c_parser_peek_token (parser)->location;
+ if (c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
+ {
+ num = c_parser_expr_no_commas (parser, NULL).value;
+ c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>");
+ }
+ if (num == error_mark_node)
+ return list;
+ if (!INTEGRAL_TYPE_P (TREE_TYPE (num))
+ || !host_integerp (num, 0)
+ || (n = tree_low_cst (num, 0)) <= 0
+ || (int) n != n)
+ {
+ error ("%Hcollapse argument needs positive constant integer expression",
+ &loc);
+ return list;
+ }
+ c = build_omp_clause (OMP_CLAUSE_COLLAPSE);
+ OMP_CLAUSE_COLLAPSE_EXPR (c) = num;
+ OMP_CLAUSE_CHAIN (c) = list;
+ return c;
+}
+
/* OpenMP 2.5:
copyin ( variable-list ) */
@@ -7164,7 +7217,7 @@ c_parser_omp_clause_reduction (c_parser *parser, tree list)
schedule ( schedule-kind , expression )
schedule-kind:
- static | dynamic | guided | runtime
+ static | dynamic | guided | runtime | auto
*/
static tree
@@ -7208,6 +7261,8 @@ c_parser_omp_clause_schedule (c_parser *parser, tree list)
}
else if (c_parser_next_token_is_keyword (parser, RID_STATIC))
OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_STATIC;
+ else if (c_parser_next_token_is_keyword (parser, RID_AUTO))
+ OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_AUTO;
else
goto invalid_kind;
@@ -7223,6 +7278,9 @@ c_parser_omp_clause_schedule (c_parser *parser, tree list)
if (OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_RUNTIME)
error ("%Hschedule %<runtime%> does not take "
"a %<chunk_size%> parameter", &here);
+ else if (OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_AUTO)
+ error ("%Hschedule %<auto%> does not take "
+ "a %<chunk_size%> parameter", &here);
else if (TREE_CODE (TREE_TYPE (t)) == INTEGER_TYPE)
OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (c) = t;
else
@@ -7253,6 +7311,22 @@ c_parser_omp_clause_shared (c_parser *parser, tree list)
return c_parser_omp_var_list_parens (parser, OMP_CLAUSE_SHARED, list);
}
+/* OpenMP 3.0:
+ untied */
+
+static tree
+c_parser_omp_clause_untied (c_parser *parser ATTRIBUTE_UNUSED, tree list)
+{
+ tree c;
+
+ /* FIXME: Should we allow duplicates? */
+ check_no_duplicate_clause (list, OMP_CLAUSE_UNTIED, "untied");
+
+ c = build_omp_clause (OMP_CLAUSE_UNTIED);
+ OMP_CLAUSE_CHAIN (c) = list;
+ return c;
+}
+
/* Parse all OpenMP clauses. The set clauses allowed by the directive
is a bitmask in MASK. Return the list of clauses found; the result
of clause default goes in *pdefault. */
@@ -7280,6 +7354,10 @@ c_parser_omp_all_clauses (c_parser *parser, unsigned int mask,
switch (c_kind)
{
+ case PRAGMA_OMP_CLAUSE_COLLAPSE:
+ clauses = c_parser_omp_clause_collapse (parser, clauses);
+ c_name = "collapse";
+ break;
case PRAGMA_OMP_CLAUSE_COPYIN:
clauses = c_parser_omp_clause_copyin (parser, clauses);
c_name = "copyin";
@@ -7332,6 +7410,10 @@ c_parser_omp_all_clauses (c_parser *parser, unsigned int mask,
clauses = c_parser_omp_clause_shared (parser, clauses);
c_name = "shared";
break;
+ case PRAGMA_OMP_CLAUSE_UNTIED:
+ clauses = c_parser_omp_clause_untied (parser, clauses);
+ c_name = "untied";
+ break;
default:
c_parser_error (parser, "expected %<#pragma omp%> clause");
goto saw_error;
@@ -7522,15 +7604,29 @@ c_parser_omp_flush (c_parser *parser)
c_finish_omp_flush ();
}
-/* Parse the restricted form of the for statment allowed by OpenMP.
+/* Parse the restricted form of the for statement allowed by OpenMP.
The real trick here is to determine the loop control variable early
so that we can push a new decl if necessary to make it private. */
static tree
-c_parser_omp_for_loop (c_parser *parser)
+c_parser_omp_for_loop (c_parser *parser, tree clauses, tree *par_clauses)
{
- tree decl, cond, incr, save_break, save_cont, body, init;
+ tree decl, cond, incr, save_break, save_cont, body, init, stmt, cl;
+ tree declv, condv, incrv, initv, for_block = NULL, ret = NULL;
location_t loc;
+ bool fail = false, open_brace_parsed = false;
+ int i, collapse = 1, nbraces = 0;
+
+ for (cl = clauses; cl; cl = OMP_CLAUSE_CHAIN (cl))
+ if (OMP_CLAUSE_CODE (cl) == OMP_CLAUSE_COLLAPSE)
+ collapse = tree_low_cst (OMP_CLAUSE_COLLAPSE_EXPR (cl), 0);
+
+ gcc_assert (collapse >= 1);
+
+ declv = make_tree_vec (collapse);
+ initv = make_tree_vec (collapse);
+ condv = make_tree_vec (collapse);
+ incrv = make_tree_vec (collapse);
if (!c_parser_next_token_is_keyword (parser, RID_FOR))
{
@@ -7540,61 +7636,136 @@ c_parser_omp_for_loop (c_parser *parser)
loc = c_parser_peek_token (parser)->location;
c_parser_consume_token (parser);
- if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
- return NULL;
-
- /* Parse the initialization declaration or expression. */
- if (c_parser_next_token_starts_declspecs (parser))
+ for (i = 0; i < collapse; i++)
{
- c_parser_declaration_or_fndef (parser, true, true, true, true);
- decl = check_for_loop_decls ();
- if (decl == NULL)
- goto error_init;
- if (DECL_INITIAL (decl) == error_mark_node)
- decl = error_mark_node;
- init = decl;
- }
- else if (c_parser_next_token_is (parser, CPP_NAME)
- && c_parser_peek_2nd_token (parser)->type == CPP_EQ)
- {
- decl = c_parser_postfix_expression (parser).value;
+ int bracecount = 0;
- c_parser_require (parser, CPP_EQ, "expected %<=%>");
+ if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
+ goto pop_scopes;
- init = c_parser_expr_no_commas (parser, NULL).value;
- init = build_modify_expr (decl, NOP_EXPR, init);
- init = c_process_expr_stmt (init);
+ /* Parse the initialization declaration or expression. */
+ if (c_parser_next_token_starts_declspecs (parser))
+ {
+ if (i > 0)
+ for_block
+ = tree_cons (NULL, c_begin_compound_stmt (true), for_block);
+ c_parser_declaration_or_fndef (parser, true, true, true, true);
+ decl = check_for_loop_decls ();
+ if (decl == NULL)
+ goto error_init;
+ if (DECL_INITIAL (decl) == error_mark_node)
+ decl = error_mark_node;
+ init = decl;
+ }
+ else if (c_parser_next_token_is (parser, CPP_NAME)
+ && c_parser_peek_2nd_token (parser)->type == CPP_EQ)
+ {
+ struct c_expr init_exp;
+
+ decl = c_parser_postfix_expression (parser).value;
+
+ c_parser_require (parser, CPP_EQ, "expected %<=%>");
+
+ init_exp = c_parser_expr_no_commas (parser, NULL);
+ init_exp = default_function_array_conversion (init_exp);
+ init = build_modify_expr (decl, NOP_EXPR, init_exp.value);
+ init = c_process_expr_stmt (init);
+ c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>");
+ }
+ else
+ {
+ error_init:
+ c_parser_error (parser,
+ "expected iteration declaration or initialization");
+ c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
+ "expected %<)%>");
+ fail = true;
+ goto parse_next;
+ }
+
+ /* Parse the loop condition. */
+ cond = NULL_TREE;
+ if (c_parser_next_token_is_not (parser, CPP_SEMICOLON))
+ {
+ cond = c_parser_expression_conv (parser).value;
+ cond = c_objc_common_truthvalue_conversion (cond);
+ if (CAN_HAVE_LOCATION_P (cond))
+ SET_EXPR_LOCATION (cond, input_location);
+ }
c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>");
- }
- else
- goto error_init;
- /* Parse the loop condition. */
- cond = NULL_TREE;
- if (c_parser_next_token_is_not (parser, CPP_SEMICOLON))
- {
- cond = c_parser_expression_conv (parser).value;
- cond = c_objc_common_truthvalue_conversion (cond);
- if (CAN_HAVE_LOCATION_P (cond))
- SET_EXPR_LOCATION (cond, input_location);
- }
- c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>");
+ /* Parse the increment expression. */
+ incr = NULL_TREE;
+ if (c_parser_next_token_is_not (parser, CPP_CLOSE_PAREN))
+ incr = c_process_expr_stmt (c_parser_expression (parser).value);
+ c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>");
- /* Parse the increment expression. */
- incr = NULL_TREE;
- if (c_parser_next_token_is_not (parser, CPP_CLOSE_PAREN))
- incr = c_process_expr_stmt (c_parser_expression (parser).value);
- c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>");
+ if (decl == NULL || decl == error_mark_node || init == error_mark_node)
+ fail = true;
+ else
+ {
+ TREE_VEC_ELT (declv, i) = decl;
+ TREE_VEC_ELT (initv, i) = init;
+ TREE_VEC_ELT (condv, i) = cond;
+ TREE_VEC_ELT (incrv, i) = incr;
+ }
+
+ parse_next:
+ if (i == collapse - 1)
+ break;
+
+ /* FIXME: OpenMP 3.0 draft isn't very clear on what exactly is allowed
+ in between the collapsed for loops to be still considered perfectly
+ nested. Hopefully the final version clarifies this.
+ For now handle (multiple) {'s and empty statements. */
+ do
+ {
+ if (c_parser_next_token_is_keyword (parser, RID_FOR))
+ {
+ c_parser_consume_token (parser);
+ break;
+ }
+ else if (c_parser_next_token_is (parser, CPP_OPEN_BRACE))
+ {
+ c_parser_consume_token (parser);
+ bracecount++;
+ }
+ else if (bracecount
+ && c_parser_next_token_is (parser, CPP_SEMICOLON))
+ c_parser_consume_token (parser);
+ else
+ {
+ c_parser_error (parser, "not enough perfectly nested loops");
+ if (bracecount)
+ {
+ open_brace_parsed = true;
+ bracecount--;
+ }
+ fail = true;
+ collapse = 0;
+ break;
+ }
+ }
+ while (1);
+
+ nbraces += bracecount;
+ }
- parse_body:
save_break = c_break_label;
c_break_label = size_one_node;
save_cont = c_cont_label;
c_cont_label = NULL_TREE;
body = push_stmt_list ();
- add_stmt (c_parser_c99_block_statement (parser));
+ if (open_brace_parsed)
+ {
+ stmt = c_begin_compound_stmt (true);
+ c_parser_compound_statement_nostart (parser);
+ add_stmt (c_end_compound_stmt (stmt, true));
+ }
+ else
+ add_stmt (c_parser_c99_block_statement (parser));
if (c_cont_label)
add_stmt (build1 (LABEL_EXPR, void_type_node, c_cont_label));
@@ -7602,17 +7773,82 @@ c_parser_omp_for_loop (c_parser *parser)
c_break_label = save_break;
c_cont_label = save_cont;
+ while (nbraces)
+ {
+ if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE))
+ {
+ c_parser_consume_token (parser);
+ nbraces--;
+ }
+ else if (c_parser_next_token_is (parser, CPP_SEMICOLON))
+ c_parser_consume_token (parser);
+ else
+ {
+ c_parser_error (parser, "collapsed loops not perfectly nested");
+ while (nbraces)
+ {
+ stmt = c_begin_compound_stmt (true);
+ add_stmt (body);
+ c_parser_compound_statement_nostart (parser);
+ body = c_end_compound_stmt (stmt, true);
+ nbraces--;
+ }
+ goto pop_scopes;
+ }
+ }
+
/* Only bother calling c_finish_omp_for if we haven't already generated
an error from the initialization parsing. */
- if (decl != NULL && decl != error_mark_node && init != error_mark_node)
- return c_finish_omp_for (loc, decl, init, cond, incr, body, NULL);
- return NULL;
-
- error_init:
- c_parser_error (parser, "expected iteration declaration or initialization");
- c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>");
- decl = init = cond = incr = NULL_TREE;
- goto parse_body;
+ if (!fail)
+ {
+ stmt = c_finish_omp_for (loc, declv, initv, condv, incrv, body, NULL);
+ if (stmt)
+ {
+ if (par_clauses != NULL)
+ {
+ tree *c;
+ for (c = par_clauses; *c ; )
+ if (OMP_CLAUSE_CODE (*c) != OMP_CLAUSE_FIRSTPRIVATE
+ && OMP_CLAUSE_CODE (*c) != OMP_CLAUSE_LASTPRIVATE)
+ c = &OMP_CLAUSE_CHAIN (*c);
+ else
+ {
+ for (i = 0; i < collapse; i++)
+ if (TREE_VEC_ELT (declv, i) == OMP_CLAUSE_DECL (*c))
+ break;
+ if (i == collapse)
+ c = &OMP_CLAUSE_CHAIN (*c);
+ else if (OMP_CLAUSE_CODE (*c) == OMP_CLAUSE_FIRSTPRIVATE)
+ {
+ error ("%Hiteration variable %qD should not be firstprivate",
+ &loc, OMP_CLAUSE_DECL (*c));
+ *c = OMP_CLAUSE_CHAIN (*c);
+ }
+ else
+ {
+ /* Copy lastprivate (decl) clause to OMP_FOR_CLAUSES,
+ change it to shared (decl) in
+ OMP_PARALLEL_CLAUSES. */
+ tree l = build_omp_clause (OMP_CLAUSE_LASTPRIVATE);
+ OMP_CLAUSE_DECL (l) = OMP_CLAUSE_DECL (*c);
+ OMP_CLAUSE_CHAIN (l) = clauses;
+ clauses = l;
+ OMP_CLAUSE_SET_CODE (*c, OMP_CLAUSE_SHARED);
+ }
+ }
+ }
+ OMP_FOR_CLAUSES (stmt) = clauses;
+ }
+ ret = stmt;
+ }
+pop_scopes:
+ while (for_block)
+ {
+ stmt = c_end_compound_stmt (TREE_VALUE (for_block), true);
+ add_stmt (stmt);
+ for_block = TREE_CHAIN (for_block);
+ }
+ return ret;
}
/* OpenMP 2.5:
@@ -7627,6 +7863,7 @@ c_parser_omp_for_loop (c_parser *parser)
| (1u << PRAGMA_OMP_CLAUSE_REDUCTION) \
| (1u << PRAGMA_OMP_CLAUSE_ORDERED) \
| (1u << PRAGMA_OMP_CLAUSE_SCHEDULE) \
+ | (1u << PRAGMA_OMP_CLAUSE_COLLAPSE) \
| (1u << PRAGMA_OMP_CLAUSE_NOWAIT))
static tree
@@ -7638,9 +7875,7 @@ c_parser_omp_for (c_parser *parser)
"#pragma omp for");
block = c_begin_compound_stmt (true);
- ret = c_parser_omp_for_loop (parser);
- if (ret)
- OMP_FOR_CLAUSES (ret) = clauses;
+ ret = c_parser_omp_for_loop (parser, clauses, NULL);
block = c_end_compound_stmt (block, true);
add_stmt (block);
@@ -7845,9 +8080,7 @@ c_parser_omp_parallel (c_parser *parser)
case PRAGMA_OMP_PARALLEL_FOR:
block = c_begin_omp_parallel ();
c_split_parallel_clauses (clauses, &par_clause, &ws_clause);
- stmt = c_parser_omp_for_loop (parser);
- if (stmt)
- OMP_FOR_CLAUSES (stmt) = ws_clause;
+ c_parser_omp_for_loop (parser, ws_clause, &par_clause);
stmt = c_finish_omp_parallel (par_clause, block);
OMP_PARALLEL_COMBINED (stmt) = 1;
break;
@@ -7894,6 +8127,43 @@ c_parser_omp_single (c_parser *parser)
return add_stmt (stmt);
}
+/* OpenMP 3.0:
+ # pragma omp task task-clause[optseq] new-line
+*/
+
+#define OMP_TASK_CLAUSE_MASK \
+ ( (1u << PRAGMA_OMP_CLAUSE_IF) \
+ | (1u << PRAGMA_OMP_CLAUSE_UNTIED) \
+ | (1u << PRAGMA_OMP_CLAUSE_DEFAULT) \
+ | (1u << PRAGMA_OMP_CLAUSE_PRIVATE) \
+ | (1u << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \
+ | (1u << PRAGMA_OMP_CLAUSE_SHARED))
+
+static tree
+c_parser_omp_task (c_parser *parser)
+{
+ tree clauses, block;
+
+ clauses = c_parser_omp_all_clauses (parser, OMP_TASK_CLAUSE_MASK,
+ "#pragma omp task");
+
+ block = c_begin_omp_task ();
+ c_parser_statement (parser);
+ return c_finish_omp_task (clauses, block);
+}
+
+/* OpenMP 3.0:
+ # pragma omp taskwait new-line
+*/
+
+static void
+c_parser_omp_taskwait (c_parser *parser)
+{
+ c_parser_consume_pragma (parser);
+ c_parser_skip_to_pragma_eol (parser);
+
+ c_finish_omp_taskwait ();
+}
/* Main entry point to parsing most OpenMP pragmas. */
@@ -7940,6 +8210,9 @@ c_parser_omp_construct (c_parser *parser)
case PRAGMA_OMP_SINGLE:
stmt = c_parser_omp_single (parser);
break;
+ case PRAGMA_OMP_TASK:
+ stmt = c_parser_omp_task (parser);
+ break;
default:
gcc_unreachable ();
}
diff --git a/gcc/c-pragma.c b/gcc/c-pragma.c
index 44e95b81c0a..81b9910b41a 100644
--- a/gcc/c-pragma.c
+++ b/gcc/c-pragma.c
@@ -896,6 +896,8 @@ static const struct omp_pragma_def omp_pragmas[] = {
{ "section", PRAGMA_OMP_SECTION },
{ "sections", PRAGMA_OMP_SECTIONS },
{ "single", PRAGMA_OMP_SINGLE },
+ { "task", PRAGMA_OMP_TASK },
+ { "taskwait", PRAGMA_OMP_TASKWAIT },
{ "threadprivate", PRAGMA_OMP_THREADPRIVATE }
};
diff --git a/gcc/c-pragma.h b/gcc/c-pragma.h
index 747a053b2e8..188afb8dbaa 100644
--- a/gcc/c-pragma.h
+++ b/gcc/c-pragma.h
@@ -41,6 +41,8 @@ typedef enum pragma_kind {
PRAGMA_OMP_SECTION,
PRAGMA_OMP_SECTIONS,
PRAGMA_OMP_SINGLE,
+ PRAGMA_OMP_TASK,
+ PRAGMA_OMP_TASKWAIT,
PRAGMA_OMP_THREADPRIVATE,
PRAGMA_GCC_PCH_PREPROCESS,
@@ -49,11 +51,12 @@ typedef enum pragma_kind {
} pragma_kind;
-/* All clauses defined by OpenMP 2.5.
+/* All clauses defined by OpenMP 2.5 and 3.0.
Used internally by both C and C++ parsers. */
typedef enum pragma_omp_clause {
PRAGMA_OMP_CLAUSE_NONE = 0,
+ PRAGMA_OMP_CLAUSE_COLLAPSE,
PRAGMA_OMP_CLAUSE_COPYIN,
PRAGMA_OMP_CLAUSE_COPYPRIVATE,
PRAGMA_OMP_CLAUSE_DEFAULT,
@@ -66,7 +69,8 @@ typedef enum pragma_omp_clause {
PRAGMA_OMP_CLAUSE_PRIVATE,
PRAGMA_OMP_CLAUSE_REDUCTION,
PRAGMA_OMP_CLAUSE_SCHEDULE,
- PRAGMA_OMP_CLAUSE_SHARED
+ PRAGMA_OMP_CLAUSE_SHARED,
+ PRAGMA_OMP_CLAUSE_UNTIED
} pragma_omp_clause;
extern struct cpp_reader* parse_in;
diff --git a/gcc/c-pretty-print.c b/gcc/c-pretty-print.c
index 29e987d3660..4386c392c9f 100644
--- a/gcc/c-pretty-print.c
+++ b/gcc/c-pretty-print.c
@@ -1019,7 +1019,7 @@ pp_c_constant (c_pretty_printer *pp, tree e)
case COMPLEX_CST:
/* Sometimes, we are confused and we think a complex literal
is a constant. Such thing is a compound literal which
- grammatically belongs to postifx-expr production. */
+ grammatically belongs to postfix-expr production. */
pp_c_compound_literal (pp, e);
break;
@@ -1096,7 +1096,7 @@ pp_c_primary_expression (c_pretty_printer *pp, tree e)
break;
default:
- /* FIXME: Make sure we won't get into an infinie loop. */
+ /* FIXME: Make sure we won't get into an infinite loop. */
pp_c_left_paren (pp);
pp_expression (pp, e);
pp_c_right_paren (pp);
diff --git a/gcc/c-tree.h b/gcc/c-tree.h
index 02dfc6122d1..14df0444377 100644
--- a/gcc/c-tree.h
+++ b/gcc/c-tree.h
@@ -596,6 +596,8 @@ extern void c_end_vm_scope (unsigned int);
extern tree c_expr_to_decl (tree, bool *, bool *);
extern tree c_begin_omp_parallel (void);
extern tree c_finish_omp_parallel (tree, tree);
+extern tree c_begin_omp_task (void);
+extern tree c_finish_omp_task (tree, tree);
extern tree c_finish_omp_clauses (tree);
/* Set to 0 at beginning of a function definition, set to 1 if
diff --git a/gcc/c-typeck.c b/gcc/c-typeck.c
index b52a349ac6f..23880d16a43 100644
--- a/gcc/c-typeck.c
+++ b/gcc/c-typeck.c
@@ -8681,6 +8681,8 @@ c_begin_omp_parallel (void)
return block;
}
+/* Generate OMP_PARALLEL, with CLAUSES and BLOCK as its compound statement. */
+
tree
c_finish_omp_parallel (tree clauses, tree block)
{
@@ -8696,6 +8698,36 @@ c_finish_omp_parallel (tree clauses, tree block)
return add_stmt (stmt);
}
+/* Like c_begin_compound_stmt, except force the retention of the BLOCK. */
+
+tree
+c_begin_omp_task (void)
+{
+ tree block;
+
+ keep_next_level ();
+ block = c_begin_compound_stmt (true);
+
+ return block;
+}
+
+/* Generate OMP_TASK, with CLAUSES and BLOCK as its compound statement. */
+
+tree
+c_finish_omp_task (tree clauses, tree block)
+{
+ tree stmt;
+
+ block = c_end_compound_stmt (block, true);
+
+ stmt = make_node (OMP_TASK);
+ TREE_TYPE (stmt) = void_type_node;
+ OMP_TASK_CLAUSES (stmt) = clauses;
+ OMP_TASK_BODY (stmt) = block;
+
+ return add_stmt (stmt);
+}
+
/* For all elements of CLAUSES, validate them vs OpenMP constraints.
Remove any elements from the list that are invalid. */
@@ -8856,6 +8888,8 @@ c_finish_omp_clauses (tree clauses)
case OMP_CLAUSE_NOWAIT:
case OMP_CLAUSE_ORDERED:
case OMP_CLAUSE_DEFAULT:
+ case OMP_CLAUSE_UNTIED:
+ case OMP_CLAUSE_COLLAPSE:
pc = &OMP_CLAUSE_CHAIN (c);
continue;
diff --git a/gcc/cfgcleanup.c b/gcc/cfgcleanup.c
index aae9ca0009d..c5c7950f835 100644
--- a/gcc/cfgcleanup.c
+++ b/gcc/cfgcleanup.c
@@ -1,6 +1,6 @@
/* Control flow optimization code for GNU compiler.
Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
- 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
+ 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
Free Software Foundation, Inc.
This file is part of GCC.
@@ -418,8 +418,8 @@ try_forward_edges (int mode, basic_block b)
and cold sections.
Basic block partitioning may result in some jumps that appear to
- be optimizable (or blocks that appear to be mergeable), but which really m
- ust be left untouched (they are required to make it safely across
+ be optimizable (or blocks that appear to be mergeable), but which really
+ must be left untouched (they are required to make it safely across
partition boundaries). See the comments at the top of
bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
diff --git a/gcc/cfgexpand.c b/gcc/cfgexpand.c
index ffe2366a108..afa3e123106 100644
--- a/gcc/cfgexpand.c
+++ b/gcc/cfgexpand.c
@@ -125,7 +125,7 @@ static struct stack_var *stack_vars;
static size_t stack_vars_alloc;
static size_t stack_vars_num;
-/* An array of indicies such that stack_vars[stack_vars_sorted[i]].size
+/* An array of indices such that stack_vars[stack_vars_sorted[i]].size
is non-decreasing. */
static size_t *stack_vars_sorted;
@@ -341,7 +341,7 @@ add_alias_set_conflicts (void)
}
/* A subroutine of partition_stack_vars. A comparison function for qsort,
- sorting an array of indicies by the size of the object. */
+ sorting an array of indices by the size of the object. */
static int
stack_var_size_cmp (const void *a, const void *b)
diff --git a/gcc/cfghooks.c b/gcc/cfghooks.c
index f5fb18f0875..5815a7edd37 100644
--- a/gcc/cfghooks.c
+++ b/gcc/cfghooks.c
@@ -1,5 +1,6 @@
/* Hooks for cfg representation specific functions.
- Copyright (C) 2003, 2004, 2005, 2007 Free Software Foundation, Inc.
+ Copyright (C) 2003, 2004, 2005, 2007, 2008 Free Software Foundation,
+ Inc.
Contributed by Sebastian Pop <s.pop@laposte.net>
This file is part of GCC.
@@ -1029,7 +1030,7 @@ cfg_hook_duplicate_loop_to_header_edge (struct loop *loop, edge e,
/* Conditional jumps are represented differently in trees and RTL,
this hook takes a basic block that is known to have a cond jump
- at its end and extracts the taken and not taken eges out of it
+ at its end and extracts the taken and not taken edges out of it
and store it in E1 and E2 respectively. */
void
extract_cond_bb_edges (basic_block b, edge *e1, edge *e2)
diff --git a/gcc/cfglayout.c b/gcc/cfglayout.c
index 0885af79b3f..623e5bdd911 100644
--- a/gcc/cfglayout.c
+++ b/gcc/cfglayout.c
@@ -378,7 +378,7 @@ struct rtl_opt_pass pass_outof_cfg_layout_mode =
}
};
-/* Return sope resulting from combination of S1 and S2. */
+/* Return scope resulting from combination of S1 and S2. */
static tree
choose_inner_scope (tree s1, tree s2)
{
diff --git a/gcc/cfgloopmanip.c b/gcc/cfgloopmanip.c
index dc088440f44..d5bd216e08c 100644
--- a/gcc/cfgloopmanip.c
+++ b/gcc/cfgloopmanip.c
@@ -1,5 +1,6 @@
/* Loop manipulation code for GNU compiler.
- Copyright (C) 2002, 2003, 2004, 2005, 2007 Free Software Foundation, Inc.
+ Copyright (C) 2002, 2003, 2004, 2005, 2007, 2008 Free Software
+ Foundation, Inc.
This file is part of GCC.
@@ -1361,8 +1362,8 @@ loop_version (struct loop *loop,
free (bbs);
}
- /* At this point condition_bb is loop predheader with two successors,
- first_head and second_head. Make sure that loop predheader has only
+ /* At this point condition_bb is loop preheader with two successors,
+ first_head and second_head. Make sure that loop preheader has only
one successor. */
split_edge (loop_preheader_edge (loop));
split_edge (loop_preheader_edge (nloop));
@@ -1375,7 +1376,7 @@ loop_version (struct loop *loop,
removed (thus the loop nesting may be wrong), and some blocks and edges
were changed (so the information about bb --> loop mapping does not have
to be correct). But still for the remaining loops the header dominates
- the latch, and loops did not get new subloobs (new loops might possibly
+ the latch, and loops did not get new subloops (new loops might possibly
get created, but we are not interested in them). Fix up the mess.
If CHANGED_BBS is not NULL, basic blocks whose loop has changed are
diff --git a/gcc/cgraph.c b/gcc/cgraph.c
index d3f8fa6671f..66189d8ffde 100644
--- a/gcc/cgraph.c
+++ b/gcc/cgraph.c
@@ -296,7 +296,7 @@ cgraph_edge (struct cgraph_node *node, tree call_stmt)
return e;
}
-/* Change call_smtt of edge E to NEW_STMT. */
+/* Change call_stmt of edge E to NEW_STMT. */
void
cgraph_set_call_stmt (struct cgraph_edge *e, tree new_stmt)
@@ -701,7 +701,7 @@ cgraph_node_name (struct cgraph_node *node)
/* Names used to print out the availability enum. */
const char * const cgraph_availability_names[] =
- {"unset", "not_available", "overwrittable", "available", "local"};
+ {"unset", "not_available", "overwritable", "available", "local"};
/* Dump call graph node NODE to file F. */
@@ -1038,7 +1038,7 @@ cgraph_add_new_function (tree fndecl, bool lowered)
switch (cgraph_state)
{
case CGRAPH_STATE_CONSTRUCTION:
- /* Just enqueue function to be processed at nearest occurence. */
+ /* Just enqueue function to be processed at nearest occurrence. */
node = cgraph_node (fndecl);
node->next_needed = cgraph_new_nodes;
if (lowered)
diff --git a/gcc/cgraphunit.c b/gcc/cgraphunit.c
index 9f6ea72be80..7c0db40ce7c 100644
--- a/gcc/cgraphunit.c
+++ b/gcc/cgraphunit.c
@@ -119,9 +119,9 @@ along with GCC; see the file COPYING3. If not see
Functions are output early using call of
cgraph_assemble_pending_function from cgraph_finalize_function. The
decision on whether function is needed is made more conservative so
- uninlininable static functions are needed too. During the call-graph
+ uninlinable static functions are needed too. During the call-graph
construction the edge destinations are not marked as reachable and it
- is completely relied upn assemble_variable to mark them. */
+ is completely relied upon assemble_variable to mark them. */
#include "config.h"
@@ -326,7 +326,7 @@ cgraph_build_cdtor_fns (void)
/* Determine if function DECL is needed. That is, visible to something
either outside this translation unit, something magic in the system
- configury, or (if not doing unit-at-a-time) to something we havn't
+ configury, or (if not doing unit-at-a-time) to something we haven't
seen yet. */
static bool
@@ -1455,7 +1455,7 @@ cgraph_optimize (void)
/* Generate and emit a static constructor or destructor. WHICH must
be one of 'I' (for a constructor) or 'D' (for a destructor). BODY
is a STATEMENT_LIST containing GENERIC statements. PRIORITY is the
- initialization priority fot this constructor or destructor. */
+ initialization priority for this constructor or destructor. */
void
cgraph_build_static_cdtor (char which, tree body, int priority)
diff --git a/gcc/common.opt b/gcc/common.opt
index 5e56257bcbb..1454878b30c 100644
--- a/gcc/common.opt
+++ b/gcc/common.opt
@@ -1317,6 +1317,10 @@ fweb
Common Report Var(flag_web) Init(2) Optimization
Construct webs and split unrelated uses of single variable
+ftree-builtin-call-dce
+Common Report Var(flag_tree_builtin_call_dce) Init(0) Optimization
+Enable conditional dead code elimination for builtin calls
+
fwhole-program
Common Report Var(flag_whole_program) Init(0) Optimization
Perform whole program optimizations
diff --git a/gcc/config.gcc b/gcc/config.gcc
index 806728794f1..399d80c633b 100644
--- a/gcc/config.gcc
+++ b/gcc/config.gcc
@@ -1989,7 +1989,7 @@ powerpc-*-linux*altivec*)
powerpc-*-linux*spe*)
tm_file="${tm_file} dbxelf.h elfos.h svr4.h freebsd-spec.h rs6000/sysv4.h rs6000/linux.h rs6000/linuxspe.h rs6000/e500.h"
extra_options="${extra_options} rs6000/sysv4.opt"
- tmake_file="rs6000/t-fprules rs6000/t-fprules-softfp soft-fp/t-softfp rs6000/t-ppcos ${tmake_file} rs6000/t-ppccomm"
+ tmake_file="t-dfprules rs6000/t-fprules rs6000/t-fprules-softfp soft-fp/t-softfp rs6000/t-ppcos ${tmake_file} rs6000/t-ppccomm"
;;
powerpc-*-linux*paired*)
tm_file="${tm_file} dbxelf.h elfos.h svr4.h freebsd-spec.h rs6000/sysv4.h rs6000/linux.h rs6000/750cl.h"
diff --git a/gcc/config/avr/avr.h b/gcc/config/avr/avr.h
index 4cd0f24517e..6ae1c63d01e 100644
--- a/gcc/config/avr/avr.h
+++ b/gcc/config/avr/avr.h
@@ -360,7 +360,9 @@ enum reg_class {
#define RETURN_ADDR_RTX(count, x) \
gen_rtx_MEM (Pmode, memory_address (Pmode, plus_constant (tem, 1)))
-#define PUSH_ROUNDING(NPUSHED) (NPUSHED)
+/* Don't use Push rounding. expr.c: emit_single_push_insn is broken
+ for POST_DEC targets (PR27386). */
+/*#define PUSH_ROUNDING(NPUSHED) (NPUSHED)*/
#define RETURN_POPS_ARGS(FUNDECL, FUNTYPE, STACK_SIZE) 0
diff --git a/gcc/config/i386/i386.c b/gcc/config/i386/i386.c
index 81dcacd41e0..e3feb30d01b 100644
--- a/gcc/config/i386/i386.c
+++ b/gcc/config/i386/i386.c
@@ -5268,7 +5268,7 @@ setup_incoming_varargs_64 (CUMULATIVE_ARGS *cum)
label_ref = gen_rtx_LABEL_REF (Pmode, label);
/* Compute address to jump to :
- label - 5*eax + nnamed_sse_arguments*5 */
+ label - eax*4 + nnamed_sse_arguments*4 */
tmp_reg = gen_reg_rtx (Pmode);
nsse_reg = gen_reg_rtx (Pmode);
emit_insn (gen_zero_extendqidi2 (nsse_reg, gen_rtx_REG (QImode, AX_REG)));
diff --git a/gcc/config/i386/i386.md b/gcc/config/i386/i386.md
index 00a16d988e1..6debb18d2a0 100644
--- a/gcc/config/i386/i386.md
+++ b/gcc/config/i386/i386.md
@@ -14363,15 +14363,8 @@
"")
(define_insn "*indirect_jump"
- [(set (pc) (match_operand:SI 0 "nonimmediate_operand" "rm"))]
- "!TARGET_64BIT"
- "jmp\t%A0"
- [(set_attr "type" "ibr")
- (set_attr "length_immediate" "0")])
-
-(define_insn "*indirect_jump_rtx64"
- [(set (pc) (match_operand:DI 0 "nonimmediate_operand" "rm"))]
- "TARGET_64BIT"
+ [(set (pc) (match_operand:P 0 "nonimmediate_operand" "rm"))]
+ ""
"jmp\t%A0"
[(set_attr "type" "ibr")
(set_attr "length_immediate" "0")])
@@ -14415,17 +14408,9 @@
})
(define_insn "*tablejump_1"
- [(set (pc) (match_operand:SI 0 "nonimmediate_operand" "rm"))
- (use (label_ref (match_operand 1 "" "")))]
- "!TARGET_64BIT"
- "jmp\t%A0"
- [(set_attr "type" "ibr")
- (set_attr "length_immediate" "0")])
-
-(define_insn "*tablejump_1_rtx64"
- [(set (pc) (match_operand:DI 0 "nonimmediate_operand" "rm"))
+ [(set (pc) (match_operand:P 0 "nonimmediate_operand" "rm"))
(use (label_ref (match_operand 1 "" "")))]
- "TARGET_64BIT"
+ ""
"jmp\t%A0"
[(set_attr "type" "ibr")
(set_attr "length_immediate" "0")])
@@ -14876,21 +14861,11 @@
DONE;
})
-(define_insn_and_split "eh_return_si"
- [(set (pc)
- (unspec [(match_operand:SI 0 "register_operand" "c")]
- UNSPEC_EH_RETURN))]
- "!TARGET_64BIT"
- "#"
- "reload_completed"
- [(const_int 0)]
- "ix86_expand_epilogue (2); DONE;")
-
-(define_insn_and_split "eh_return_di"
+(define_insn_and_split "eh_return_<mode>"
[(set (pc)
- (unspec [(match_operand:DI 0 "register_operand" "c")]
+ (unspec [(match_operand:P 0 "register_operand" "c")]
UNSPEC_EH_RETURN))]
- "TARGET_64BIT"
+ ""
"#"
"reload_completed"
[(const_int 0)]
diff --git a/gcc/config/mips/mips.c b/gcc/config/mips/mips.c
index 41fc6d03009..82009308349 100644
--- a/gcc/config/mips/mips.c
+++ b/gcc/config/mips/mips.c
@@ -645,6 +645,7 @@ static const struct mips_cpu_info mips_cpu_info_table[] = {
{ "sb1", PROCESSOR_SB1, 64, PTF_AVOID_BRANCHLIKELY },
{ "sb1a", PROCESSOR_SB1A, 64, PTF_AVOID_BRANCHLIKELY },
{ "sr71000", PROCESSOR_SR71000, 64, PTF_AVOID_BRANCHLIKELY },
+ { "xlr", PROCESSOR_XLR, 64, 0 }
};
/* Default costs. If these are used for a processor we should look
@@ -1015,6 +1016,21 @@ static const struct mips_rtx_cost_data mips_rtx_cost_data[PROCESSOR_MAX] = {
{ /* SR71000 */
DEFAULT_COSTS
},
+ { /* XLR */
+ /* Need to replace first five with the costs of calling the appropriate
+ libgcc routine. */
+ COSTS_N_INSNS (256), /* fp_add */
+ COSTS_N_INSNS (256), /* fp_mult_sf */
+ COSTS_N_INSNS (256), /* fp_mult_df */
+ COSTS_N_INSNS (256), /* fp_div_sf */
+ COSTS_N_INSNS (256), /* fp_div_df */
+ COSTS_N_INSNS (8), /* int_mult_si */
+ COSTS_N_INSNS (8), /* int_mult_di */
+ COSTS_N_INSNS (72), /* int_div_si */
+ COSTS_N_INSNS (72), /* int_div_di */
+ 1, /* branch_cost */
+ 4 /* memory_latency */
+ }
};
/* This hash table keeps track of implicit "mips16" and "nomips16" attributes
diff --git a/gcc/config/mips/mips.h b/gcc/config/mips/mips.h
index 402cd579c5c..e2129a0fcd5 100644
--- a/gcc/config/mips/mips.h
+++ b/gcc/config/mips/mips.h
@@ -69,6 +69,7 @@ enum processor_type {
PROCESSOR_SB1,
PROCESSOR_SB1A,
PROCESSOR_SR71000,
+ PROCESSOR_XLR,
PROCESSOR_MAX
};
diff --git a/gcc/config/mips/mips.md b/gcc/config/mips/mips.md
index 29b8e703890..da7cb10f8b5 100644
--- a/gcc/config/mips/mips.md
+++ b/gcc/config/mips/mips.md
@@ -415,7 +415,7 @@
;; Attribute describing the processor. This attribute must match exactly
;; with the processor_type enumeration in mips.h.
(define_attr "cpu"
- "r3000,4kc,4kp,5kc,5kf,20kc,24kc,24kf2_1,24kf1_1,74kc,74kf2_1,74kf1_1,74kf3_2,loongson2e,loongson2f,m4k,r3900,r6000,r4000,r4100,r4111,r4120,r4130,r4300,r4600,r4650,r5000,r5400,r5500,r7000,r8000,r9000,sb1,sb1a,sr71000"
+ "r3000,4kc,4kp,5kc,5kf,20kc,24kc,24kf2_1,24kf1_1,74kc,74kf2_1,74kf1_1,74kf3_2,loongson2e,loongson2f,m4k,r3900,r6000,r4000,r4100,r4111,r4120,r4130,r4300,r4600,r4650,r5000,r5400,r5500,r7000,r8000,r9000,sb1,sb1a,sr71000,xlr"
(const (symbol_ref "mips_tune")))
;; The type of hardware hazard associated with this instruction.
@@ -748,6 +748,7 @@
(include "9000.md")
(include "sb1.md")
(include "sr71k.md")
+(include "xlr.md")
(include "generic.md")
;;
diff --git a/gcc/config/mips/xlr.md b/gcc/config/mips/xlr.md
new file mode 100644
index 00000000000..21550ba12d9
--- /dev/null
+++ b/gcc/config/mips/xlr.md
@@ -0,0 +1,89 @@
+;; DFA-based pipeline description for the XLR.
+;; Copyright (C) 2008 Free Software Foundation, Inc.
+;;
+;; xlr.md Machine Description for the RMI XLR Microprocessor
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published
+;; by the Free Software Foundation; either version 3, or (at your
+;; option) any later version.
+
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+;; License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+(define_automaton "xlr_main,xlr_muldiv")
+
+;; Definitions for xlr_main automaton.
+(define_cpu_unit "xlr_main_pipe" "xlr_main")
+
+(define_insn_reservation "ir_xlr_alu_slt" 2
+ (and (eq_attr "cpu" "xlr")
+ (eq_attr "type" "slt"))
+ "xlr_main_pipe")
+
+;; Integer arithmetic instructions.
+(define_insn_reservation "ir_xlr_alu" 1
+ (and (eq_attr "cpu" "xlr")
+ (eq_attr "type" "arith,shift,clz,const,unknown,multi,nop,trap"))
+ "xlr_main_pipe")
+
+;; Integer arithmetic instructions.
+(define_insn_reservation "ir_xlr_condmove" 2
+ (and (eq_attr "cpu" "xlr")
+ (eq_attr "type" "condmove"))
+ "xlr_main_pipe")
+
+;; Load/store instructions.
+(define_insn_reservation "ir_xlr_load" 4
+ (and (eq_attr "cpu" "xlr")
+ (eq_attr "type" "load"))
+ "xlr_main_pipe")
+
+(define_insn_reservation "ir_xlr_store" 1
+ (and (eq_attr "cpu" "xlr")
+ (eq_attr "type" "store"))
+ "xlr_main_pipe")
+
+(define_insn_reservation "ir_xlr_prefetch_x" 1
+ (and (eq_attr "cpu" "xlr")
+ (eq_attr "type" "prefetch,prefetchx"))
+ "xlr_main_pipe")
+
+;; Branch instructions - use branch misprediction latency.
+(define_insn_reservation "ir_xlr_branch" 1
+ (and (eq_attr "cpu" "xlr")
+ (eq_attr "type" "branch,jump,call"))
+ "xlr_main_pipe")
+
+;; Coprocessor move instructions.
+(define_insn_reservation "ir_xlr_xfer" 2
+ (and (eq_attr "cpu" "xlr")
+ (eq_attr "type" "mtc,mfc"))
+ "xlr_main_pipe")
+
+(define_bypass 5 "ir_xlr_xfer" "ir_xlr_xfer")
+
+;; Definitions for the xlr_muldiv automaton.
+(define_cpu_unit "xlr_imuldiv_nopipe" "xlr_muldiv")
+
+(define_insn_reservation "ir_xlr_imul" 8
+ (and (eq_attr "cpu" "xlr")
+ (eq_attr "type" "imul,imul3,imadd"))
+ "xlr_main_pipe,xlr_imuldiv_nopipe*6")
+
+(define_insn_reservation "ir_xlr_div" 68
+ (and (eq_attr "cpu" "xlr")
+ (eq_attr "type" "idiv"))
+ "xlr_main_pipe,xlr_imuldiv_nopipe*67")
+
+(define_insn_reservation "xlr_hilo" 2
+ (and (eq_attr "cpu" "xlr")
+ (eq_attr "type" "mfhilo,mthilo"))
+ "xlr_imuldiv_nopipe")
diff --git a/gcc/config/rs6000/dfp.md b/gcc/config/rs6000/dfp.md
index 2d111b8df64..90eaa2a0d55 100644
--- a/gcc/config/rs6000/dfp.md
+++ b/gcc/config/rs6000/dfp.md
@@ -155,7 +155,7 @@
(define_expand "negdd2"
[(set (match_operand:DD 0 "gpc_reg_operand" "")
(neg:DD (match_operand:DD 1 "gpc_reg_operand" "")))]
- "TARGET_HARD_FLOAT && (TARGET_FPRS || TARGET_E500_DOUBLE)"
+ "TARGET_HARD_FLOAT && TARGET_FPRS"
"")
(define_insn "*negdd2_fpr"
@@ -168,7 +168,7 @@
(define_expand "absdd2"
[(set (match_operand:DD 0 "gpc_reg_operand" "")
(abs:DD (match_operand:DD 1 "gpc_reg_operand" "")))]
- "TARGET_HARD_FLOAT && (TARGET_FPRS || TARGET_E500_DOUBLE)"
+ "TARGET_HARD_FLOAT && TARGET_FPRS"
"")
(define_insn "*absdd2_fpr"
@@ -376,7 +376,7 @@
(define_insn "*movdd_softfloat32"
[(set (match_operand:DD 0 "nonimmediate_operand" "=r,r,m,r,r,r")
(match_operand:DD 1 "input_operand" "r,m,r,G,H,F"))]
- "! TARGET_POWERPC64 && TARGET_SOFT_FLOAT
+ "! TARGET_POWERPC64 && (TARGET_SOFT_FLOAT || !TARGET_FPRS)
&& (gpc_reg_operand (operands[0], DDmode)
|| gpc_reg_operand (operands[1], DDmode))"
"*
@@ -486,7 +486,7 @@
(define_expand "negtd2"
[(set (match_operand:TD 0 "gpc_reg_operand" "")
(neg:TD (match_operand:TD 1 "gpc_reg_operand" "")))]
- "TARGET_HARD_FLOAT && (TARGET_FPRS || TARGET_E500_DOUBLE)"
+ "TARGET_HARD_FLOAT && TARGET_FPRS"
"")
(define_insn "*negtd2_fpr"
@@ -499,7 +499,7 @@
(define_expand "abstd2"
[(set (match_operand:TD 0 "gpc_reg_operand" "")
(abs:TD (match_operand:TD 1 "gpc_reg_operand" "")))]
- "TARGET_HARD_FLOAT && (TARGET_FPRS || TARGET_E500_DOUBLE)"
+ "TARGET_HARD_FLOAT && TARGET_FPRS"
"")
(define_insn "*abstd2_fpr"
diff --git a/gcc/config/rs6000/linux64.h b/gcc/config/rs6000/linux64.h
index e83e0e9697a..f9221f3ad32 100644
--- a/gcc/config/rs6000/linux64.h
+++ b/gcc/config/rs6000/linux64.h
@@ -477,8 +477,7 @@ extern int dot_symbols;
&& GET_MODE_BITSIZE (MODE) <= GET_MODE_BITSIZE (Pmode)) \
|| (GET_CODE (X) == CONST_DOUBLE \
&& ((TARGET_64BIT \
- && (TARGET_POWERPC64 \
- || TARGET_MINIMAL_TOC \
+ && (TARGET_MINIMAL_TOC \
|| (SCALAR_FLOAT_MODE_P (GET_MODE (X)) \
&& ! TARGET_NO_FP_IN_TOC))) \
|| (!TARGET_64BIT \
diff --git a/gcc/config/rs6000/rs6000.c b/gcc/config/rs6000/rs6000.c
index 19734767797..91e1732a8f4 100644
--- a/gcc/config/rs6000/rs6000.c
+++ b/gcc/config/rs6000/rs6000.c
@@ -3199,24 +3199,26 @@ invalid_e500_subreg (rtx op, enum machine_mode mode)
if (TARGET_E500_DOUBLE)
{
/* Reject (subreg:SI (reg:DF)); likewise with subreg:DI or
- subreg:TI and reg:TF. */
+ subreg:TI and reg:TF. Decimal float modes are like integer
+ modes (only low part of each register used) for this
+ purpose. */
if (GET_CODE (op) == SUBREG
- && (mode == SImode || mode == DImode || mode == TImode)
+ && (mode == SImode || mode == DImode || mode == TImode
+ || mode == DDmode || mode == TDmode)
&& REG_P (SUBREG_REG (op))
&& (GET_MODE (SUBREG_REG (op)) == DFmode
- || GET_MODE (SUBREG_REG (op)) == TFmode
- || GET_MODE (SUBREG_REG (op)) == DDmode
- || GET_MODE (SUBREG_REG (op)) == TDmode))
+ || GET_MODE (SUBREG_REG (op)) == TFmode))
return true;
/* Reject (subreg:DF (reg:DI)); likewise with subreg:TF and
reg:TI. */
if (GET_CODE (op) == SUBREG
- && (mode == DFmode || mode == TFmode
- || mode == DDmode || mode == TDmode)
+ && (mode == DFmode || mode == TFmode)
&& REG_P (SUBREG_REG (op))
&& (GET_MODE (SUBREG_REG (op)) == DImode
- || GET_MODE (SUBREG_REG (op)) == TImode))
+ || GET_MODE (SUBREG_REG (op)) == TImode
+ || GET_MODE (SUBREG_REG (op)) == DDmode
+ || GET_MODE (SUBREG_REG (op)) == TDmode))
return true;
}
@@ -3467,10 +3469,10 @@ rs6000_legitimate_offset_address_p (enum machine_mode mode, rtx x, int strict)
return SPE_CONST_OFFSET_OK (offset);
case DFmode:
- case DDmode:
if (TARGET_E500_DOUBLE)
return SPE_CONST_OFFSET_OK (offset);
+ case DDmode:
case DImode:
/* On e500v2, we may have:
@@ -3487,11 +3489,11 @@ rs6000_legitimate_offset_address_p (enum machine_mode mode, rtx x, int strict)
break;
case TFmode:
- case TDmode:
if (TARGET_E500_DOUBLE)
return (SPE_CONST_OFFSET_OK (offset)
&& SPE_CONST_OFFSET_OK (offset + 8));
+ case TDmode:
case TImode:
if (mode == TFmode || mode == TDmode || !TARGET_POWERPC64)
extra = 12;
@@ -3638,7 +3640,8 @@ rs6000_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
&& !(SPE_VECTOR_MODE (mode)
|| ALTIVEC_VECTOR_MODE (mode)
|| (TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode
- || mode == DImode))))
+ || mode == DImode || mode == DDmode
+ || mode == TDmode))))
{
HOST_WIDE_INT high_int, low_int;
rtx sum;
@@ -3655,7 +3658,7 @@ rs6000_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
&& ((TARGET_HARD_FLOAT && TARGET_FPRS)
|| TARGET_POWERPC64
|| ((mode != DImode && mode != DFmode && mode != DDmode)
- || TARGET_E500_DOUBLE))
+ || (TARGET_E500_DOUBLE && mode != DDmode)))
&& (TARGET_POWERPC64 || mode != DImode)
&& mode != TImode
&& mode != TFmode
@@ -3704,7 +3707,7 @@ rs6000_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
reg + offset] is not a legitimate addressing mode. */
y = gen_rtx_PLUS (Pmode, op1, op2);
- if (GET_MODE_SIZE (mode) > 8 && REG_P (op2))
+ if ((GET_MODE_SIZE (mode) > 8 || mode == DDmode) && REG_P (op2))
return force_reg (Pmode, y);
else
return y;
@@ -4265,7 +4268,8 @@ rs6000_legitimate_address (enum machine_mode mode, rtx x, int reg_ok_strict)
&& mode != TDmode
&& ((TARGET_HARD_FLOAT && TARGET_FPRS)
|| TARGET_POWERPC64
- || ((mode != DFmode && mode != DDmode) || TARGET_E500_DOUBLE))
+ || (mode != DFmode && mode != DDmode)
+ || (TARGET_E500_DOUBLE && mode != DDmode))
&& (TARGET_POWERPC64 || mode != DImode)
&& legitimate_indexed_address_p (x, reg_ok_strict))
return 1;
@@ -4320,8 +4324,7 @@ rs6000_mode_dependent_address (rtx addr)
case LO_SUM:
return true;
- case PRE_INC:
- case PRE_DEC:
+ /* Auto-increment cases are now treated generically in recog.c. */
case PRE_MODIFY:
return TARGET_UPDATE;
@@ -4389,7 +4392,8 @@ rs6000_hard_regno_nregs (int regno, enum machine_mode mode)
would require function_arg and rs6000_spe_function_arg to handle
SCmode so as to pass the value correctly in a pair of
registers. */
- if (TARGET_E500_DOUBLE && FLOAT_MODE_P (mode) && mode != SCmode)
+ if (TARGET_E500_DOUBLE && FLOAT_MODE_P (mode) && mode != SCmode
+ && !DECIMAL_FLOAT_MODE_P (mode))
return (GET_MODE_SIZE (mode) + UNITS_PER_FP_WORD - 1) / UNITS_PER_FP_WORD;
return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
@@ -5664,14 +5668,12 @@ spe_build_register_parallel (enum machine_mode mode, int gregno)
switch (mode)
{
case DFmode:
- case DDmode:
r1 = gen_rtx_REG (DImode, gregno);
r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
return gen_rtx_PARALLEL (mode, gen_rtvec (1, r1));
case DCmode:
case TFmode:
- case TDmode:
r1 = gen_rtx_REG (DImode, gregno);
r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
r3 = gen_rtx_REG (DImode, gregno + 2);
@@ -5704,13 +5706,12 @@ rs6000_spe_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode,
/* On E500 v2, double arithmetic is done on the full 64-bit GPR, but
are passed and returned in a pair of GPRs for ABI compatibility. */
if (TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode
- || mode == DDmode || mode == TDmode
|| mode == DCmode || mode == TCmode))
{
int n_words = rs6000_arg_size (mode, type);
/* Doubles go in an odd/even register pair (r5/r6, etc). */
- if (mode == DFmode || mode == DDmode)
+ if (mode == DFmode)
gregno += (1 - gregno) & 1;
/* Multi-reg args are not split between registers and stack. */
@@ -6123,10 +6124,8 @@ function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode,
else if (TARGET_SPE_ABI && TARGET_SPE
&& (SPE_VECTOR_MODE (mode)
|| (TARGET_E500_DOUBLE && (mode == DFmode
- || mode == DDmode
|| mode == DCmode
|| mode == TFmode
- || mode == TDmode
|| mode == TCmode))))
return rs6000_spe_function_arg (cum, mode, type);
@@ -6885,7 +6884,9 @@ rs6000_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
/* _Decimal32 varargs are located in the second word of the 64-bit
FP register for 32-bit binaries. */
- if (!TARGET_POWERPC64 && TYPE_MODE (type) == SDmode)
+ if (!TARGET_POWERPC64
+ && TARGET_HARD_FLOAT && TARGET_FPRS
+ && TYPE_MODE (type) == SDmode)
t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t, size_int (size));
t = build2 (GIMPLE_MODIFY_STMT, void_type_node, addr, t);
@@ -14015,8 +14016,8 @@ rs6000_split_multireg_move (rtx dst, rtx src)
reg_mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode : DFmode;
else if (ALTIVEC_REGNO_P (reg))
reg_mode = V16QImode;
- else if (TARGET_E500_DOUBLE && (mode == TFmode || mode == TDmode))
- reg_mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode : DFmode;
+ else if (TARGET_E500_DOUBLE && mode == TFmode)
+ reg_mode = DFmode;
else
reg_mode = word_mode;
reg_mode_size = GET_MODE_SIZE (reg_mode);
@@ -14757,8 +14758,7 @@ spe_func_has_64bit_regs_p (void)
if (SPE_VECTOR_MODE (mode))
return true;
- if (TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode
- || mode == DDmode || mode == TDmode))
+ if (TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode))
return true;
}
}
@@ -15509,7 +15509,7 @@ emit_frame_save (rtx frame_reg, rtx frame_ptr, enum machine_mode mode,
/* Some cases that need register indexed addressing. */
if ((TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
- || (TARGET_E500_DOUBLE && (mode == DFmode || mode == DDmode))
+ || (TARGET_E500_DOUBLE && mode == DFmode)
|| (TARGET_SPE_ABI
&& SPE_VECTOR_MODE (mode)
&& !SPE_CONST_OFFSET_OK (offset)))
@@ -15549,7 +15549,7 @@ gen_frame_mem_offset (enum machine_mode mode, rtx reg, int offset)
int_rtx = GEN_INT (offset);
if ((TARGET_SPE_ABI && SPE_VECTOR_MODE (mode))
- || (TARGET_E500_DOUBLE && (mode == DFmode || mode == DDmode)))
+ || (TARGET_E500_DOUBLE && mode == DFmode))
{
offset_rtx = gen_rtx_REG (Pmode, FIXED_SCRATCH);
emit_move_insn (offset_rtx, int_rtx);
@@ -21864,8 +21864,8 @@ rs6000_function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED)
&& ALTIVEC_VECTOR_MODE (mode))
regno = ALTIVEC_ARG_RETURN;
else if (TARGET_E500_DOUBLE && TARGET_HARD_FLOAT
- && (mode == DFmode || mode == DDmode || mode == DCmode
- || mode == TFmode || mode == TDmode || mode == TCmode))
+ && (mode == DFmode || mode == DCmode
+ || mode == TFmode || mode == TCmode))
return spe_build_register_parallel (mode, GP_ARG_RETURN);
else
regno = GP_ARG_RETURN;
@@ -21906,8 +21906,8 @@ rs6000_libcall_value (enum machine_mode mode)
else if (COMPLEX_MODE_P (mode) && targetm.calls.split_complex_arg)
return rs6000_complex_function_value (mode);
else if (TARGET_E500_DOUBLE && TARGET_HARD_FLOAT
- && (mode == DFmode || mode == DDmode || mode == DCmode
- || mode == TFmode || mode == TDmode || mode == TCmode))
+ && (mode == DFmode || mode == DCmode
+ || mode == TFmode || mode == TCmode))
return spe_build_register_parallel (mode, GP_ARG_RETURN);
else
regno = GP_ARG_RETURN;
diff --git a/gcc/config/rs6000/rs6000.h b/gcc/config/rs6000/rs6000.h
index 442f72708b0..4c225a3f0e7 100644
--- a/gcc/config/rs6000/rs6000.h
+++ b/gcc/config/rs6000/rs6000.h
@@ -583,7 +583,7 @@ extern enum rs6000_nop_insertion rs6000_sched_insert_nops;
#define LOCAL_ALIGNMENT(TYPE, ALIGN) \
((TARGET_ALTIVEC && TREE_CODE (TYPE) == VECTOR_TYPE) ? 128 : \
(TARGET_E500_DOUBLE \
- && (TYPE_MODE (TYPE) == DFmode || TYPE_MODE (TYPE) == DDmode)) ? 64 : \
+ && TYPE_MODE (TYPE) == DFmode) ? 64 : \
((TARGET_SPE && TREE_CODE (TYPE) == VECTOR_TYPE \
&& SPE_VECTOR_MODE (TYPE_MODE (TYPE))) || (TARGET_PAIRED_FLOAT \
&& TREE_CODE (TYPE) == VECTOR_TYPE \
@@ -609,7 +609,7 @@ extern enum rs6000_nop_insertion rs6000_sched_insert_nops;
fit into 1, whereas DI still needs two. */
#define MEMBER_TYPE_FORCES_BLK(FIELD, MODE) \
((TARGET_SPE && TREE_CODE (TREE_TYPE (FIELD)) == VECTOR_TYPE) \
- || (TARGET_E500_DOUBLE && ((MODE) == DFmode || (MODE) == DDmode)))
+ || (TARGET_E500_DOUBLE && (MODE) == DFmode))
/* A bit-field declared as `int' forces `int' alignment for the struct. */
#define PCC_BITFIELD_TYPE_MATTERS 1
@@ -630,7 +630,7 @@ extern enum rs6000_nop_insertion rs6000_sched_insert_nops;
(TREE_CODE (TYPE) == VECTOR_TYPE ? ((TARGET_SPE_ABI \
|| TARGET_PAIRED_FLOAT) ? 64 : 128) \
: (TARGET_E500_DOUBLE \
- && (TYPE_MODE (TYPE) == DFmode || TYPE_MODE (TYPE) == DDmode)) ? 64 \
+ && TYPE_MODE (TYPE) == DFmode) ? 64 \
: TREE_CODE (TYPE) == ARRAY_TYPE \
&& TYPE_MODE (TREE_TYPE (TYPE)) == QImode \
&& (ALIGN) < BITS_PER_WORD ? BITS_PER_WORD : (ALIGN))
@@ -1212,7 +1212,7 @@ enum reg_class
(((CLASS) == FLOAT_REGS) \
? ((GET_MODE_SIZE (MODE) + UNITS_PER_FP_WORD - 1) / UNITS_PER_FP_WORD) \
: (TARGET_E500_DOUBLE && (CLASS) == GENERAL_REGS \
- && ((MODE) == DFmode || (MODE) == DDmode)) \
+ && (MODE) == DFmode) \
? 1 \
: ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD))
diff --git a/gcc/config/rs6000/xcoff.h b/gcc/config/rs6000/xcoff.h
index c4bceca833c..3cf6e4b13ab 100644
--- a/gcc/config/rs6000/xcoff.h
+++ b/gcc/config/rs6000/xcoff.h
@@ -83,8 +83,7 @@
|| (GET_CODE (X) == CONST_INT \
&& GET_MODE_BITSIZE (MODE) <= GET_MODE_BITSIZE (Pmode)) \
|| (GET_CODE (X) == CONST_DOUBLE \
- && (TARGET_POWERPC64 \
- || TARGET_MINIMAL_TOC \
+ && (TARGET_MINIMAL_TOC \
|| (SCALAR_FLOAT_MODE_P (GET_MODE (X)) \
&& ! TARGET_NO_FP_IN_TOC)))))
diff --git a/gcc/config/xtensa/lib2funcs.S b/gcc/config/xtensa/lib2funcs.S
index 16d6734c277..f8bea21c6a3 100644
--- a/gcc/config/xtensa/lib2funcs.S
+++ b/gcc/config/xtensa/lib2funcs.S
@@ -1,5 +1,5 @@
/* Assembly functions for libgcc2.
- Copyright (C) 2001, 2006, 2007 Free Software Foundation, Inc.
+ Copyright (C) 2001, 2006, 2007, 2008 Free Software Foundation, Inc.
Contributed by Bob Wilson (bwilson@tensilica.com) at Tensilica.
This file is part of GCC.
@@ -105,7 +105,7 @@ __xtensa_nonlocal_goto:
addi a6, a7, -16 /* advance cur */
.Lfirstframe:
l32i a7, a6, 4 /* a7 = next */
- bge a2, a7, .Lnextframe
+ bgeu a2, a7, .Lnextframe
/* At this point, prev (a5) points to the save area with the saved
values of a0-a3. Copy those values into the save area at the
diff --git a/gcc/coverage.c b/gcc/coverage.c
index ec8d589539f..f2d26a29f4e 100644
--- a/gcc/coverage.c
+++ b/gcc/coverage.c
@@ -1,6 +1,7 @@
/* Read and write coverage files, and associated functionality.
Copyright (C) 1990, 1991, 1992, 1993, 1994, 1996, 1997, 1998, 1999,
- 2000, 2001, 2003, 2004, 2005, 2007 Free Software Foundation, Inc.
+ 2000, 2001, 2003, 2004, 2005, 2007, 2008 Free Software Foundation,
+ Inc.
Contributed by James E. Wilson, UC Berkeley/Cygnus Support;
based on some ideas from Dain Samples of UC Berkeley.
Further mangling by Bob Manson, Cygnus Support.
@@ -489,7 +490,7 @@ coverage_checksum_string (unsigned chksum, const char *string)
_GLOBAL__N_<filename>_<wrongmagicnumber>_<magicnumber>functionname
since filename might contain extra underscores there seems
to be no better chance then walk all possible offsets looking
- for magicnuber. */
+ for magicnumber. */
if (offset)
{
for (i = i + offset; string[i]; i++)
diff --git a/gcc/cp/ChangeLog b/gcc/cp/ChangeLog
index 54afddc9851..936db240b42 100644
--- a/gcc/cp/ChangeLog
+++ b/gcc/cp/ChangeLog
@@ -1,3 +1,70 @@
+2008-06-06 Jakub Jelinek <jakub@redhat.com>
+
+ * cp-tree.h (cxx_omp_finish_clause, cxx_omp_create_clause_info,
+ dependent_omp_for_p, begin_omp_task, finish_omp_task,
+ finish_omp_taskwait): New prototypes.
+ (cxx_omp_clause_default_ctor): Add outer argument.
+ (finish_omp_for): Add new clauses argument.
+ * cp-gimplify.c (cxx_omp_finish_clause): New function.
+ (cxx_omp_predetermined_sharing): Moved from semantics.c, rewritten.
+ (cxx_omp_clause_default_ctor): Add outer argument.
+ (cp_genericize_r): Walk OMP_CLAUSE_LASTPRIVATE_STMT.
+ * cp-objcp-common.h (LANG_HOOKS_OMP_FINISH_CLAUSE): Define.
+ * parser.c (cp_parser_omp_for_loop): Parse collapsed for loops.
+ Add par_clauses argument. If decl is present in parallel's
+ lastprivate clause, change that clause to shared and add
+ a lastprivate clause for decl to OMP_FOR_CLAUSES.
+ Fix wording of error messages. Adjust finish_omp_for caller.
+ Add clauses argument. Parse loops with random access iterators.
+ (cp_parser_omp_clause_collapse, cp_parser_omp_clause_untied): New
+ functions.
+ (cp_parser_omp_for, cp_parser_omp_parallel): Adjust
+ cp_parser_omp_for_loop callers.
+ (cp_parser_omp_for_cond, cp_parser_omp_for_incr): New helper
+ functions.
+ (cp_parser_omp_clause_name): Handle collapse and untied
+ clauses.
+ (cp_parser_omp_clause_schedule): Handle auto schedule.
+ (cp_parser_omp_all_clauses): Handle PRAGMA_OMP_CLAUSE_COLLAPSE
+ and PRAGMA_OMP_CLAUSE_UNTIED.
+ (OMP_FOR_CLAUSE_MASK): Add PRAGMA_OMP_CLAUSE_COLLAPSE.
+ (OMP_TASK_CLAUSE_MASK): Define.
+ (cp_parser_omp_task, cp_parser_omp_taskwait): New functions.
+ (cp_parser_omp_construct): Handle PRAGMA_OMP_TASK.
+ (cp_parser_pragma): Handle PRAGMA_OMP_TASK and
+ PRAGMA_OMP_TASKWAIT.
+ * pt.c (tsubst_omp_clauses): Handle OMP_CLAUSE_COLLAPSE and
+ OMP_CLAUSE_UNTIED. Handle OMP_CLAUSE_LASTPRIVATE_STMT.
+ (tsubst_omp_for_iterator): New function.
+ (dependent_omp_for_p): New function.
+ (tsubst_expr) <case OMP_FOR>: Use it. Handle collapsed OMP_FOR
+ loops. Adjust finish_omp_for caller. Handle loops with random
+ access iterators. Adjust for OMP_FOR_{INIT,COND,INCR} changes.
+ (tsubst_expr): Handle OMP_TASK.
+ * semantics.c (cxx_omp_create_clause_info): New function.
+ (finish_omp_clauses): Call it. Handle OMP_CLAUSE_UNTIED and
+ OMP_CLAUSE_COLLAPSE.
+ (cxx_omp_predetermined_sharing): Removed.
+ * semantics.c (finish_omp_for): Allow pointer iterators. Use
+ handle_omp_for_class_iterator and dependent_omp_for_p. Handle
+ collapsed for loops. Adjust c_finish_omp_for caller. Add new
+ clauses argument. Fix check for type dependent cond or incr.
+ Set OMP_FOR_CLAUSES to clauses. Use cp_convert instead of
+ fold_convert to convert incr amount to difference_type. Only
+ fold if not in template. If decl is mentioned in lastprivate
+ clause, set OMP_CLAUSE_LASTPRIVATE_STMT. Handle loops with random
+ access iterators. Adjust for OMP_FOR_{INIT,COND,INCR}
+ changes.
+ (finish_omp_threadprivate): Allow static class members of the
+ current class.
+ (handle_omp_for_class_iterator, begin_omp_task, finish_omp_task,
+ finish_omp_taskwait): New functions.
+
+ * parser.c (cp_parser_binary_expression): Add prec argument.
+ (cp_parser_assignment_expression): Adjust caller.
+ * cp-tree.h (outer_curly_brace_block): New prototype.
+ * decl.c (outer_curly_brace_block): No longer static.
+
2008-06-02 Paolo Carlini <paolo.carlini@oracle.com>
PR c++/36404
diff --git a/gcc/cp/call.c b/gcc/cp/call.c
index 0948c790c41..1d54e7cb43e 100644
--- a/gcc/cp/call.c
+++ b/gcc/cp/call.c
@@ -333,7 +333,7 @@ build_call_a (tree function, int n, tree *argarray)
nothrow = ((decl && TREE_NOTHROW (decl))
|| TYPE_NOTHROW_P (TREE_TYPE (TREE_TYPE (function))));
- if (decl && TREE_THIS_VOLATILE (decl) && cfun)
+ if (decl && TREE_THIS_VOLATILE (decl) && cfun && cp_function_chain)
current_function_returns_abnormally = 1;
if (decl && TREE_DEPRECATED (decl))
diff --git a/gcc/cp/cp-gimplify.c b/gcc/cp/cp-gimplify.c
index cc3e8479921..c6d64dfbb75 100644
--- a/gcc/cp/cp-gimplify.c
+++ b/gcc/cp/cp-gimplify.c
@@ -694,10 +694,19 @@ cp_genericize_r (tree *stmt_p, int *walk_subtrees, void *data)
else if (TREE_CODE (stmt) == OMP_CLAUSE)
switch (OMP_CLAUSE_CODE (stmt))
{
+ case OMP_CLAUSE_LASTPRIVATE:
+ /* Don't dereference an invisiref in OpenMP clauses. */
+ if (is_invisiref_parm (OMP_CLAUSE_DECL (stmt)))
+ {
+ *walk_subtrees = 0;
+ if (OMP_CLAUSE_LASTPRIVATE_STMT (stmt))
+ cp_walk_tree (&OMP_CLAUSE_LASTPRIVATE_STMT (stmt),
+ cp_genericize_r, p_set, NULL);
+ }
+ break;
case OMP_CLAUSE_PRIVATE:
case OMP_CLAUSE_SHARED:
case OMP_CLAUSE_FIRSTPRIVATE:
- case OMP_CLAUSE_LASTPRIVATE:
case OMP_CLAUSE_COPYIN:
case OMP_CLAUSE_COPYPRIVATE:
/* Don't dereference an invisiref in OpenMP clauses. */
@@ -893,7 +902,8 @@ cxx_omp_clause_apply_fn (tree fn, tree arg1, tree arg2)
NULL if there's nothing to do. */
tree
-cxx_omp_clause_default_ctor (tree clause, tree decl)
+cxx_omp_clause_default_ctor (tree clause, tree decl,
+ tree outer ATTRIBUTE_UNUSED)
{
tree info = CP_OMP_CLAUSE_INFO (clause);
tree ret = NULL;
@@ -958,3 +968,100 @@ cxx_omp_privatize_by_reference (const_tree decl)
{
return is_invisiref_parm (decl);
}
+
+/* True if OpenMP sharing attribute of DECL is predetermined. */
+
+enum omp_clause_default_kind
+cxx_omp_predetermined_sharing (tree decl)
+{
+ tree type;
+
+ /* Static data members are predetermined as shared. */
+ if (TREE_STATIC (decl))
+ {
+ tree ctx = CP_DECL_CONTEXT (decl);
+ if (TYPE_P (ctx) && MAYBE_CLASS_TYPE_P (ctx))
+ return OMP_CLAUSE_DEFAULT_SHARED;
+ }
+
+ type = TREE_TYPE (decl);
+ if (TREE_CODE (type) == REFERENCE_TYPE)
+ {
+ if (!is_invisiref_parm (decl))
+ return OMP_CLAUSE_DEFAULT_UNSPECIFIED;
+ type = TREE_TYPE (type);
+
+ if (TREE_CODE (decl) == RESULT_DECL && DECL_NAME (decl))
+ {
+ /* NVR doesn't preserve const qualification of the
+ variable's type. */
+ tree outer = outer_curly_brace_block (current_function_decl);
+ tree var;
+
+ if (outer)
+ for (var = BLOCK_VARS (outer); var; var = TREE_CHAIN (var))
+ if (DECL_NAME (decl) == DECL_NAME (var)
+ && (TYPE_MAIN_VARIANT (type)
+ == TYPE_MAIN_VARIANT (TREE_TYPE (var))))
+ {
+ if (TYPE_READONLY (TREE_TYPE (var)))
+ type = TREE_TYPE (var);
+ break;
+ }
+ }
+ }
+
+ if (type == error_mark_node)
+ return OMP_CLAUSE_DEFAULT_UNSPECIFIED;
+
+ /* Variables with const-qualified type having no mutable member
+ are predetermined shared. */
+ if (TYPE_READONLY (type) && !cp_has_mutable_p (type))
+ return OMP_CLAUSE_DEFAULT_SHARED;
+
+ return OMP_CLAUSE_DEFAULT_UNSPECIFIED;
+}
+
+/* Finalize an implicitly determined clause. */
+
+void
+cxx_omp_finish_clause (tree c)
+{
+ tree decl, inner_type;
+ bool make_shared = false;
+
+ if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_FIRSTPRIVATE)
+ return;
+
+ decl = OMP_CLAUSE_DECL (c);
+ decl = require_complete_type (decl);
+ inner_type = TREE_TYPE (decl);
+ if (decl == error_mark_node)
+ make_shared = true;
+ else if (TREE_CODE (TREE_TYPE (decl)) == REFERENCE_TYPE)
+ {
+ if (is_invisiref_parm (decl))
+ inner_type = TREE_TYPE (inner_type);
+ else
+ {
+ error ("%qE implicitly determined as %<firstprivate%> has reference type",
+ decl);
+ make_shared = true;
+ }
+ }
+
+ /* We're interested in the base element, not arrays. */
+ while (TREE_CODE (inner_type) == ARRAY_TYPE)
+ inner_type = TREE_TYPE (inner_type);
+
+ /* Check for special function availability by building a call to one.
+ Save the results, because later we won't be in the right context
+ for making these queries. */
+ if (!make_shared
+ && CLASS_TYPE_P (inner_type)
+ && cxx_omp_create_clause_info (c, inner_type, false, true, false))
+ make_shared = true;
+
+ if (make_shared)
+ OMP_CLAUSE_CODE (c) = OMP_CLAUSE_SHARED;
+}
diff --git a/gcc/cp/cp-objcp-common.h b/gcc/cp/cp-objcp-common.h
index b2b8405fccd..1121eb08479 100644
--- a/gcc/cp/cp-objcp-common.h
+++ b/gcc/cp/cp-objcp-common.h
@@ -141,6 +141,8 @@ extern tree objcp_tsubst_copy_and_build (tree, tree, tsubst_flags_t,
#define LANG_HOOKS_OMP_CLAUSE_ASSIGN_OP cxx_omp_clause_assign_op
#undef LANG_HOOKS_OMP_CLAUSE_DTOR
#define LANG_HOOKS_OMP_CLAUSE_DTOR cxx_omp_clause_dtor
+#undef LANG_HOOKS_OMP_FINISH_CLAUSE
+#define LANG_HOOKS_OMP_FINISH_CLAUSE cxx_omp_finish_clause
#undef LANG_HOOKS_OMP_PRIVATIZE_BY_REFERENCE
#define LANG_HOOKS_OMP_PRIVATIZE_BY_REFERENCE cxx_omp_privatize_by_reference
diff --git a/gcc/cp/cp-tree.h b/gcc/cp/cp-tree.h
index 0c3d0dd21aa..952020ad045 100644
--- a/gcc/cp/cp-tree.h
+++ b/gcc/cp/cp-tree.h
@@ -4235,6 +4235,7 @@ extern void start_preparsed_function (tree, tree, int);
extern int start_function (cp_decl_specifier_seq *, const cp_declarator *, tree);
extern tree begin_function_body (void);
extern void finish_function_body (tree);
+extern tree outer_curly_brace_block (tree);
extern tree finish_function (int);
extern tree start_method (cp_decl_specifier_seq *, const cp_declarator *, tree);
extern tree finish_method (tree);
@@ -4468,6 +4469,7 @@ extern bool type_dependent_expression_p (tree);
extern bool any_type_dependent_arguments_p (const_tree);
extern bool value_dependent_expression_p (tree);
extern bool any_value_dependent_elements_p (const_tree);
+extern bool dependent_omp_for_p (tree, tree, tree, tree);
extern tree resolve_typename_type (tree, bool);
extern tree template_for_substitution (tree);
extern tree build_non_dependent_expr (tree);
@@ -4666,17 +4668,22 @@ extern tree begin_omp_structured_block (void);
extern tree finish_omp_structured_block (tree);
extern tree begin_omp_parallel (void);
extern tree finish_omp_parallel (tree, tree);
+extern tree begin_omp_task (void);
+extern tree finish_omp_task (tree, tree);
extern tree finish_omp_for (location_t, tree, tree,
- tree, tree, tree, tree);
+ tree, tree, tree, tree, tree);
extern void finish_omp_atomic (enum tree_code, tree, tree);
extern void finish_omp_barrier (void);
extern void finish_omp_flush (void);
+extern void finish_omp_taskwait (void);
extern enum omp_clause_default_kind cxx_omp_predetermined_sharing (tree);
-extern tree cxx_omp_clause_default_ctor (tree, tree);
+extern tree cxx_omp_clause_default_ctor (tree, tree, tree);
extern tree cxx_omp_clause_copy_ctor (tree, tree, tree);
extern tree cxx_omp_clause_assign_op (tree, tree, tree);
extern tree cxx_omp_clause_dtor (tree, tree);
+extern void cxx_omp_finish_clause (tree);
extern bool cxx_omp_privatize_by_reference (const_tree);
+extern bool cxx_omp_create_clause_info (tree, tree, bool, bool, bool);
extern tree baselink_for_fns (tree);
extern void finish_static_assert (tree, tree, location_t,
bool);
diff --git a/gcc/cp/decl.c b/gcc/cp/decl.c
index 0898d5d7fd2..8056518e746 100644
--- a/gcc/cp/decl.c
+++ b/gcc/cp/decl.c
@@ -11759,7 +11759,7 @@ finish_function_body (tree compstmt)
of curly braces, skipping the artificial block created for constructor
initializers. */
-static tree
+tree
outer_curly_brace_block (tree fndecl)
{
tree block = BLOCK_SUBBLOCKS (DECL_INITIAL (fndecl));
diff --git a/gcc/cp/parser.c b/gcc/cp/parser.c
index c3383e6441e..5ca1bd7e533 100644
--- a/gcc/cp/parser.c
+++ b/gcc/cp/parser.c
@@ -1611,7 +1611,7 @@ static tree cp_parser_delete_expression
static tree cp_parser_cast_expression
(cp_parser *, bool, bool);
static tree cp_parser_binary_expression
- (cp_parser *, bool);
+ (cp_parser *, bool, enum cp_parser_prec);
static tree cp_parser_question_colon_clause
(cp_parser *, tree);
static tree cp_parser_assignment_expression
@@ -6008,14 +6008,15 @@ cp_parser_cast_expression (cp_parser *parser, bool address_p, bool cast_p)
: binops_by_token[token->type].prec)
static tree
-cp_parser_binary_expression (cp_parser* parser, bool cast_p)
+cp_parser_binary_expression (cp_parser* parser, bool cast_p,
+ enum cp_parser_prec prec)
{
cp_parser_expression_stack stack;
cp_parser_expression_stack_entry *sp = &stack[0];
tree lhs, rhs;
cp_token *token;
enum tree_code tree_type, lhs_type, rhs_type;
- enum cp_parser_prec prec = PREC_NOT_OPERATOR, new_prec, lookahead_prec;
+ enum cp_parser_prec new_prec, lookahead_prec;
bool overloaded_p;
/* Parse the first expression. */
@@ -6192,7 +6193,7 @@ cp_parser_assignment_expression (cp_parser* parser, bool cast_p)
else
{
/* Parse the binary expressions (logical-or-expression). */
- expr = cp_parser_binary_expression (parser, cast_p);
+ expr = cp_parser_binary_expression (parser, cast_p, PREC_NOT_OPERATOR);
/* If the next token is a `?' then we're actually looking at a
conditional-expression. */
if (cp_lexer_next_token_is (parser->lexer, CPP_QUERY))
@@ -19493,7 +19494,9 @@ cp_parser_omp_clause_name (cp_parser *parser)
switch (p[0])
{
case 'c':
- if (!strcmp ("copyin", p))
+ if (!strcmp ("collapse", p))
+ result = PRAGMA_OMP_CLAUSE_COLLAPSE;
+ else if (!strcmp ("copyin", p))
result = PRAGMA_OMP_CLAUSE_COPYIN;
else if (!strcmp ("copyprivate", p))
result = PRAGMA_OMP_CLAUSE_COPYPRIVATE;
@@ -19526,6 +19529,10 @@ cp_parser_omp_clause_name (cp_parser *parser)
else if (!strcmp ("shared", p))
result = PRAGMA_OMP_CLAUSE_SHARED;
break;
+ case 'u':
+ if (!strcmp ("untied", p))
+ result = PRAGMA_OMP_CLAUSE_UNTIED;
+ break;
}
}
@@ -19628,6 +19635,47 @@ cp_parser_omp_var_list (cp_parser *parser, enum omp_clause_code kind, tree list)
return list;
}
+/* OpenMP 3.0:
+ collapse ( constant-expression ) */
+
+static tree
+cp_parser_omp_clause_collapse (cp_parser *parser, tree list)
+{
+ tree c, num;
+ location_t loc;
+ HOST_WIDE_INT n;
+
+ loc = cp_lexer_peek_token (parser->lexer)->location;
+ if (!cp_parser_require (parser, CPP_OPEN_PAREN, "%<(%>"))
+ return list;
+
+ num = cp_parser_constant_expression (parser, false, NULL);
+
+ if (!cp_parser_require (parser, CPP_CLOSE_PAREN, "%<)%>"))
+ cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true,
+ /*or_comma=*/false,
+ /*consume_paren=*/true);
+
+ if (num == error_mark_node)
+ return list;
+ num = fold_non_dependent_expr (num);
+ if (!INTEGRAL_TYPE_P (TREE_TYPE (num))
+ || !host_integerp (num, 0)
+ || (n = tree_low_cst (num, 0)) <= 0
+ || (int) n != n)
+ {
+ error ("%Hcollapse argument needs positive constant integer expression", &loc);
+ return list;
+ }
+
+ check_no_duplicate_clause (list, OMP_CLAUSE_COLLAPSE, "collapse");
+ c = build_omp_clause (OMP_CLAUSE_COLLAPSE);
+ OMP_CLAUSE_CHAIN (c) = list;
+ OMP_CLAUSE_COLLAPSE_EXPR (c) = num;
+
+ return c;
+}
+
/* OpenMP 2.5:
default ( shared | none ) */
@@ -19839,7 +19887,7 @@ cp_parser_omp_clause_reduction (cp_parser *parser, tree list)
schedule ( schedule-kind , expression )
schedule-kind:
- static | dynamic | guided | runtime */
+ static | dynamic | guided | runtime | auto */
static tree
cp_parser_omp_clause_schedule (cp_parser *parser, tree list)
@@ -19882,6 +19930,8 @@ cp_parser_omp_clause_schedule (cp_parser *parser, tree list)
}
else if (cp_lexer_next_token_is_keyword (parser->lexer, RID_STATIC))
OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_STATIC;
+ else if (cp_lexer_next_token_is_keyword (parser->lexer, RID_AUTO))
+ OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_AUTO;
else
goto invalid_kind;
cp_lexer_consume_token (parser->lexer);
@@ -19897,6 +19947,9 @@ cp_parser_omp_clause_schedule (cp_parser *parser, tree list)
else if (OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_RUNTIME)
error ("schedule %<runtime%> does not take "
"a %<chunk_size%> parameter");
+ else if (OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_AUTO)
+ error ("schedule %<auto%> does not take "
+ "a %<chunk_size%> parameter");
else
OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (c) = t;
@@ -19919,6 +19972,21 @@ cp_parser_omp_clause_schedule (cp_parser *parser, tree list)
return list;
}
+/* OpenMP 3.0:
+ untied */
+
+static tree
+cp_parser_omp_clause_untied (cp_parser *parser ATTRIBUTE_UNUSED, tree list)
+{
+ tree c;
+
+ check_no_duplicate_clause (list, OMP_CLAUSE_UNTIED, "untied");
+
+ c = build_omp_clause (OMP_CLAUSE_UNTIED);
+ OMP_CLAUSE_CHAIN (c) = list;
+ return c;
+}
+
/* Parse all OpenMP clauses. The set clauses allowed by the directive
is a bitmask in MASK. Return the list of clauses found; the result
of clause default goes in *pdefault. */
@@ -19944,6 +20012,10 @@ cp_parser_omp_all_clauses (cp_parser *parser, unsigned int mask,
switch (c_kind)
{
+ case PRAGMA_OMP_CLAUSE_COLLAPSE:
+ clauses = cp_parser_omp_clause_collapse (parser, clauses);
+ c_name = "collapse";
+ break;
case PRAGMA_OMP_CLAUSE_COPYIN:
clauses = cp_parser_omp_var_list (parser, OMP_CLAUSE_COPYIN, clauses);
c_name = "copyin";
@@ -20001,6 +20073,10 @@ cp_parser_omp_all_clauses (cp_parser *parser, unsigned int mask,
clauses);
c_name = "shared";
break;
+ case PRAGMA_OMP_CLAUSE_UNTIED:
+ clauses = cp_parser_omp_clause_untied (parser, clauses);
+ c_name = "nowait";
+ break;
default:
cp_parser_error (parser, "expected %<#pragma omp%> clause");
goto saw_error;
@@ -20210,94 +20286,454 @@ cp_parser_omp_flush (cp_parser *parser, cp_token *pragma_tok)
finish_omp_flush ();
}
-/* Parse the restricted form of the for statment allowed by OpenMP. */
+/* Helper function, to parse omp for increment expression. */
static tree
-cp_parser_omp_for_loop (cp_parser *parser)
+cp_parser_omp_for_cond (cp_parser *parser, tree decl)
{
- tree init, cond, incr, body, decl, pre_body;
- location_t loc;
+ tree lhs = cp_parser_cast_expression (parser, false, false), rhs;
+ enum tree_code op;
+ cp_token *token;
- if (!cp_lexer_next_token_is_keyword (parser->lexer, RID_FOR))
+ if (lhs != decl)
{
- cp_parser_error (parser, "for statement expected");
- return NULL;
+ cp_parser_skip_to_end_of_statement (parser);
+ return error_mark_node;
}
- loc = cp_lexer_consume_token (parser->lexer)->location;
- if (!cp_parser_require (parser, CPP_OPEN_PAREN, "%<(%>"))
- return NULL;
- init = decl = NULL;
- pre_body = push_stmt_list ();
- if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON))
+ token = cp_lexer_peek_token (parser->lexer);
+ op = binops_by_token [token->type].tree_type;
+ switch (op)
+ {
+ case LT_EXPR:
+ case LE_EXPR:
+ case GT_EXPR:
+ case GE_EXPR:
+ break;
+ default:
+ cp_parser_skip_to_end_of_statement (parser);
+ return error_mark_node;
+ }
+
+ cp_lexer_consume_token (parser->lexer);
+ rhs = cp_parser_binary_expression (parser, false,
+ PREC_RELATIONAL_EXPRESSION);
+ if (rhs == error_mark_node
+ || cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON))
{
- cp_decl_specifier_seq type_specifiers;
+ cp_parser_skip_to_end_of_statement (parser);
+ return error_mark_node;
+ }
- /* First, try to parse as an initialized declaration. See
- cp_parser_condition, from whence the bulk of this is copied. */
+ return build2 (op, boolean_type_node, lhs, rhs);
+}
- cp_parser_parse_tentatively (parser);
- cp_parser_type_specifier_seq (parser, /*is_condition=*/false,
- &type_specifiers);
- if (!cp_parser_error_occurred (parser))
+/* Helper function, to parse omp for increment expression. */
+
+static tree
+cp_parser_omp_for_incr (cp_parser *parser, tree decl)
+{
+ cp_token *token = cp_lexer_peek_token (parser->lexer);
+ enum tree_code op;
+ tree lhs, rhs;
+ cp_id_kind idk;
+ bool decl_first;
+
+ if (token->type == CPP_PLUS_PLUS || token->type == CPP_MINUS_MINUS)
+ {
+ op = (token->type == CPP_PLUS_PLUS
+ ? PREINCREMENT_EXPR : PREDECREMENT_EXPR);
+ cp_lexer_consume_token (parser->lexer);
+ lhs = cp_parser_cast_expression (parser, false, false);
+ if (lhs != decl)
+ return error_mark_node;
+ return build2 (op, TREE_TYPE (decl), decl, NULL_TREE);
+ }
+
+ lhs = cp_parser_primary_expression (parser, false, false, false, &idk);
+ if (lhs != decl)
+ return error_mark_node;
+
+ token = cp_lexer_peek_token (parser->lexer);
+ if (token->type == CPP_PLUS_PLUS || token->type == CPP_MINUS_MINUS)
+ {
+ op = (token->type == CPP_PLUS_PLUS
+ ? POSTINCREMENT_EXPR : POSTDECREMENT_EXPR);
+ cp_lexer_consume_token (parser->lexer);
+ return build2 (op, TREE_TYPE (decl), decl, NULL_TREE);
+ }
+
+ op = cp_parser_assignment_operator_opt (parser);
+ if (op == ERROR_MARK)
+ return error_mark_node;
+
+ if (op != NOP_EXPR)
+ {
+ rhs = cp_parser_assignment_expression (parser, false);
+ rhs = build2 (op, TREE_TYPE (decl), decl, rhs);
+ return build2 (MODIFY_EXPR, TREE_TYPE (decl), decl, rhs);
+ }
+
+ lhs = cp_parser_binary_expression (parser, false,
+ PREC_ADDITIVE_EXPRESSION);
+ token = cp_lexer_peek_token (parser->lexer);
+ decl_first = lhs == decl;
+ if (decl_first)
+ lhs = NULL_TREE;
+ if (token->type != CPP_PLUS
+ && token->type != CPP_MINUS)
+ return error_mark_node;
+
+ do
+ {
+ op = token->type == CPP_PLUS ? PLUS_EXPR : MINUS_EXPR;
+ cp_lexer_consume_token (parser->lexer);
+ rhs = cp_parser_binary_expression (parser, false,
+ PREC_ADDITIVE_EXPRESSION);
+ token = cp_lexer_peek_token (parser->lexer);
+ if (token->type == CPP_PLUS || token->type == CPP_MINUS || decl_first)
{
- tree asm_specification, attributes;
- cp_declarator *declarator;
-
- declarator = cp_parser_declarator (parser,
- CP_PARSER_DECLARATOR_NAMED,
- /*ctor_dtor_or_conv_p=*/NULL,
- /*parenthesized_p=*/NULL,
- /*member_p=*/false);
- attributes = cp_parser_attributes_opt (parser);
- asm_specification = cp_parser_asm_specification_opt (parser);
+ if (lhs == NULL_TREE)
+ {
+ if (op == PLUS_EXPR)
+ lhs = rhs;
+ else
+ lhs = build_x_unary_op (NEGATE_EXPR, rhs, tf_warning_or_error);
+ }
+ else
+ lhs = build_x_binary_op (op, lhs, ERROR_MARK, rhs, ERROR_MARK,
+ NULL, tf_warning_or_error);
+ }
+ }
+ while (token->type == CPP_PLUS || token->type == CPP_MINUS);
- cp_parser_require (parser, CPP_EQ, "%<=%>");
- if (cp_parser_parse_definitely (parser))
+ if (!decl_first)
+ {
+ if (rhs != decl || op == MINUS_EXPR)
+ return error_mark_node;
+ rhs = build2 (op, TREE_TYPE (decl), lhs, decl);
+ }
+ else
+ rhs = build2 (PLUS_EXPR, TREE_TYPE (decl), decl, lhs);
+
+ return build2 (MODIFY_EXPR, TREE_TYPE (decl), decl, rhs);
+}
+
+/* Parse the restricted form of the for statment allowed by OpenMP. */
+
+static tree
+cp_parser_omp_for_loop (cp_parser *parser, tree clauses, tree *par_clauses)
+{
+ tree init, cond, incr, body, decl, pre_body = NULL_TREE, ret;
+ tree for_block = NULL_TREE, real_decl, initv, condv, incrv, declv;
+ tree this_pre_body, cl;
+ location_t loc_first;
+ bool collapse_err = false;
+ int i, collapse = 1, nbraces = 0;
+
+ for (cl = clauses; cl; cl = OMP_CLAUSE_CHAIN (cl))
+ if (OMP_CLAUSE_CODE (cl) == OMP_CLAUSE_COLLAPSE)
+ collapse = tree_low_cst (OMP_CLAUSE_COLLAPSE_EXPR (cl), 0);
+
+ gcc_assert (collapse >= 1);
+
+ declv = make_tree_vec (collapse);
+ initv = make_tree_vec (collapse);
+ condv = make_tree_vec (collapse);
+ incrv = make_tree_vec (collapse);
+
+ loc_first = cp_lexer_peek_token (parser->lexer)->location;
+
+ for (i = 0; i < collapse; i++)
+ {
+ int bracecount = 0;
+ bool add_private_clause = false;
+ location_t loc;
+
+ if (!cp_lexer_next_token_is_keyword (parser->lexer, RID_FOR))
+ {
+ cp_parser_error (parser, "for statement expected");
+ return NULL;
+ }
+ loc = cp_lexer_consume_token (parser->lexer)->location;
+
+ if (!cp_parser_require (parser, CPP_OPEN_PAREN, "%<(%>"))
+ return NULL;
+
+ init = decl = real_decl = NULL;
+ this_pre_body = push_stmt_list ();
+ if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON))
+ {
+ cp_decl_specifier_seq type_specifiers;
+
+ /* First, try to parse as an initialized declaration. See
+ cp_parser_condition, from whence the bulk of this is copied. */
+
+ cp_parser_parse_tentatively (parser);
+ cp_parser_type_specifier_seq (parser, /*is_condition=*/false,
+ &type_specifiers);
+ if (!cp_parser_error_occurred (parser))
{
- tree pushed_scope;
+ tree asm_specification, attributes;
+ cp_declarator *declarator;
+
+ declarator = cp_parser_declarator (parser,
+ CP_PARSER_DECLARATOR_NAMED,
+ /*ctor_dtor_or_conv_p=*/NULL,
+ /*parenthesized_p=*/NULL,
+ /*member_p=*/false);
+ attributes = cp_parser_attributes_opt (parser);
+ asm_specification = cp_parser_asm_specification_opt (parser);
+
+ if (cp_lexer_next_token_is_not (parser->lexer, CPP_EQ))
+ cp_parser_require (parser, CPP_EQ, "%<=%>");
+ if (cp_parser_parse_definitely (parser))
+ {
+ tree pushed_scope;
+
+ decl = start_decl (declarator, &type_specifiers,
+ /*initialized_p=*/false, attributes,
+ /*prefix_attributes=*/NULL_TREE,
+ &pushed_scope);
+
+ if (CLASS_TYPE_P (TREE_TYPE (decl))
+ || type_dependent_expression_p (decl))
+ {
+ bool is_parenthesized_init, is_non_constant_init;
+
+ init = cp_parser_initializer (parser,
+ &is_parenthesized_init,
+ &is_non_constant_init);
+
+ cp_finish_decl (decl, init, !is_non_constant_init,
+ asm_specification,
+ LOOKUP_ONLYCONVERTING);
+ if (CLASS_TYPE_P (TREE_TYPE (decl)))
+ {
+ for_block
+ = tree_cons (NULL, this_pre_body, for_block);
+ init = NULL_TREE;
+ }
+ else
+ init = pop_stmt_list (this_pre_body);
+ this_pre_body = NULL_TREE;
+ }
+ else
+ {
+ cp_parser_require (parser, CPP_EQ, "%<=%>");
+ init = cp_parser_assignment_expression (parser, false);
- decl = start_decl (declarator, &type_specifiers,
- /*initialized_p=*/false, attributes,
- /*prefix_attributes=*/NULL_TREE,
- &pushed_scope);
+ if (TREE_CODE (TREE_TYPE (decl)) == REFERENCE_TYPE)
+ init = error_mark_node;
+ else
+ cp_finish_decl (decl, NULL_TREE,
+ /*init_const_expr_p=*/false,
+ asm_specification,
+ LOOKUP_ONLYCONVERTING);
+ }
- init = cp_parser_assignment_expression (parser, false);
+ if (pushed_scope)
+ pop_scope (pushed_scope);
+ }
+ }
+ else
+ cp_parser_abort_tentative_parse (parser);
- if (TREE_CODE (TREE_TYPE (decl)) == REFERENCE_TYPE)
- init = error_mark_node;
+ /* If parsing as an initialized declaration failed, try again as
+ a simple expression. */
+ if (decl == NULL)
+ {
+ cp_id_kind idk;
+ cp_parser_parse_tentatively (parser);
+ decl = cp_parser_primary_expression (parser, false, false,
+ false, &idk);
+ if (!cp_parser_error_occurred (parser)
+ && decl
+ && DECL_P (decl)
+ && CLASS_TYPE_P (TREE_TYPE (decl)))
+ {
+ tree rhs;
+
+ cp_parser_parse_definitely (parser);
+ cp_parser_require (parser, CPP_EQ, "%<=%>");
+ rhs = cp_parser_assignment_expression (parser, false);
+ finish_expr_stmt (build_x_modify_expr (decl, NOP_EXPR,
+ rhs,
+ tf_warning_or_error));
+ add_private_clause = true;
+ }
else
- cp_finish_decl (decl, NULL_TREE, /*init_const_expr_p=*/false,
- asm_specification, LOOKUP_ONLYCONVERTING);
+ {
+ decl = NULL;
+ cp_parser_abort_tentative_parse (parser);
+ init = cp_parser_expression (parser, false);
+ if (init)
+ {
+ if (TREE_CODE (init) == MODIFY_EXPR
+ || TREE_CODE (init) == MODOP_EXPR)
+ real_decl = TREE_OPERAND (init, 0);
+ }
+ }
+ }
+ }
+ cp_parser_require (parser, CPP_SEMICOLON, "%<;%>");
+ if (this_pre_body)
+ {
+ this_pre_body = pop_stmt_list (this_pre_body);
+ if (pre_body)
+ {
+ tree t = pre_body;
+ pre_body = push_stmt_list ();
+ add_stmt (t);
+ add_stmt (this_pre_body);
+ pre_body = pop_stmt_list (pre_body);
+ }
+ else
+ pre_body = this_pre_body;
+ }
- if (pushed_scope)
- pop_scope (pushed_scope);
+ if (decl)
+ real_decl = decl;
+ if (par_clauses != NULL && real_decl != NULL_TREE)
+ {
+ tree *c;
+ for (c = par_clauses; *c ; )
+ if (OMP_CLAUSE_CODE (*c) == OMP_CLAUSE_FIRSTPRIVATE
+ && OMP_CLAUSE_DECL (*c) == real_decl)
+ {
+ error ("%Hiteration variable %qD should not be firstprivate",
+ &loc, real_decl);
+ *c = OMP_CLAUSE_CHAIN (*c);
+ }
+ else if (OMP_CLAUSE_CODE (*c) == OMP_CLAUSE_LASTPRIVATE
+ && OMP_CLAUSE_DECL (*c) == real_decl)
+ {
+ /* Add lastprivate (decl) clause to OMP_FOR_CLAUSES,
+ change it to shared (decl) in OMP_PARALLEL_CLAUSES. */
+ tree l = build_omp_clause (OMP_CLAUSE_LASTPRIVATE);
+ OMP_CLAUSE_DECL (l) = real_decl;
+ OMP_CLAUSE_CHAIN (l) = clauses;
+ CP_OMP_CLAUSE_INFO (l) = CP_OMP_CLAUSE_INFO (*c);
+ clauses = l;
+ OMP_CLAUSE_SET_CODE (*c, OMP_CLAUSE_SHARED);
+ CP_OMP_CLAUSE_INFO (*c) = NULL;
+ add_private_clause = false;
+ }
+ else
+ {
+ if (OMP_CLAUSE_CODE (*c) == OMP_CLAUSE_PRIVATE
+ && OMP_CLAUSE_DECL (*c) == real_decl)
+ add_private_clause = false;
+ c = &OMP_CLAUSE_CHAIN (*c);
+ }
+ }
+
+ if (add_private_clause)
+ {
+ tree c;
+ for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
+ {
+ if ((OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
+ || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE)
+ && OMP_CLAUSE_DECL (c) == decl)
+ break;
+ else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE
+ && OMP_CLAUSE_DECL (c) == decl)
+ error ("%Hiteration variable %qD should not be firstprivate",
+ &loc, decl);
+ else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
+ && OMP_CLAUSE_DECL (c) == decl)
+ error ("%Hiteration variable %qD should not be reduction",
+ &loc, decl);
+ }
+ if (c == NULL)
+ {
+ c = build_omp_clause (OMP_CLAUSE_PRIVATE);
+ OMP_CLAUSE_DECL (c) = decl;
+ c = finish_omp_clauses (c);
+ if (c)
+ {
+ OMP_CLAUSE_CHAIN (c) = clauses;
+ clauses = c;
+ }
}
}
- else
- cp_parser_abort_tentative_parse (parser);
- /* If parsing as an initialized declaration failed, try again as
- a simple expression. */
- if (decl == NULL)
- init = cp_parser_expression (parser, false);
- }
- cp_parser_require (parser, CPP_SEMICOLON, "%<;%>");
- pre_body = pop_stmt_list (pre_body);
+ cond = NULL;
+ if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON))
+ {
+ /* If decl is an iterator, preserve LHS and RHS of the relational
+ expr until finish_omp_for. */
+ if (decl
+ && (type_dependent_expression_p (decl)
+ || CLASS_TYPE_P (TREE_TYPE (decl))))
+ cond = cp_parser_omp_for_cond (parser, decl);
+ else
+ cond = cp_parser_condition (parser);
+ }
+ cp_parser_require (parser, CPP_SEMICOLON, "%<;%>");
- cond = NULL;
- if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON))
- cond = cp_parser_condition (parser);
- cp_parser_require (parser, CPP_SEMICOLON, "%<;%>");
+ incr = NULL;
+ if (cp_lexer_next_token_is_not (parser->lexer, CPP_CLOSE_PAREN))
+ {
+ /* If decl is an iterator, preserve the operator on decl
+ until finish_omp_for. */
+ if (decl
+ && (type_dependent_expression_p (decl)
+ || CLASS_TYPE_P (TREE_TYPE (decl))))
+ incr = cp_parser_omp_for_incr (parser, decl);
+ else
+ incr = cp_parser_expression (parser, false);
+ }
- incr = NULL;
- if (cp_lexer_next_token_is_not (parser->lexer, CPP_CLOSE_PAREN))
- incr = cp_parser_expression (parser, false);
+ if (!cp_parser_require (parser, CPP_CLOSE_PAREN, "%<)%>"))
+ cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true,
+ /*or_comma=*/false,
+ /*consume_paren=*/true);
- if (!cp_parser_require (parser, CPP_CLOSE_PAREN, "%<)%>"))
- cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true,
- /*or_comma=*/false,
- /*consume_paren=*/true);
+ TREE_VEC_ELT (declv, i) = decl;
+ TREE_VEC_ELT (initv, i) = init;
+ TREE_VEC_ELT (condv, i) = cond;
+ TREE_VEC_ELT (incrv, i) = incr;
+
+ if (i == collapse - 1)
+ break;
+
+ /* FIXME: OpenMP 3.0 draft isn't very clear on what exactly is allowed
+ in between the collapsed for loops to be still considered perfectly
+ nested. Hopefully the final version clarifies this.
+ For now handle (multiple) {'s and empty statements. */
+ cp_parser_parse_tentatively (parser);
+ do
+ {
+ if (cp_lexer_next_token_is_keyword (parser->lexer, RID_FOR))
+ break;
+ else if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE))
+ {
+ cp_lexer_consume_token (parser->lexer);
+ bracecount++;
+ }
+ else if (bracecount
+ && cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON))
+ cp_lexer_consume_token (parser->lexer);
+ else
+ {
+ loc = cp_lexer_peek_token (parser->lexer)->location;
+ error ("%Hnot enough collapsed for loops", &loc);
+ collapse_err = true;
+ cp_parser_abort_tentative_parse (parser);
+ declv = NULL_TREE;
+ break;
+ }
+ }
+ while (1);
+
+ if (declv)
+ {
+ cp_parser_parse_definitely (parser);
+ nbraces += bracecount;
+ }
+ }
/* Note that we saved the original contents of this flag when we entered
the structured block, and so we don't need to re-save it here. */
@@ -20309,7 +20745,38 @@ cp_parser_omp_for_loop (cp_parser *parser)
cp_parser_statement (parser, NULL_TREE, false, NULL);
body = pop_stmt_list (body);
- return finish_omp_for (loc, decl, init, cond, incr, body, pre_body);
+ if (declv == NULL_TREE)
+ ret = NULL_TREE;
+ else
+ ret = finish_omp_for (loc_first, declv, initv, condv, incrv, body,
+ pre_body, clauses);
+
+ while (nbraces)
+ {
+ if (cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_BRACE))
+ {
+ cp_lexer_consume_token (parser->lexer);
+ nbraces--;
+ }
+ else if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON))
+ cp_lexer_consume_token (parser->lexer);
+ else
+ {
+ if (!collapse_err)
+ error ("collapsed loops not perfectly nested");
+ collapse_err = true;
+ cp_parser_statement_seq_opt (parser, NULL);
+ cp_parser_require (parser, CPP_CLOSE_BRACE, "%<}%>");
+ }
+ }
+
+ while (for_block)
+ {
+ add_stmt (pop_stmt_list (TREE_VALUE (for_block)));
+ for_block = TREE_CHAIN (for_block);
+ }
+
+ return ret;
}
/* OpenMP 2.5:
@@ -20323,7 +20790,8 @@ cp_parser_omp_for_loop (cp_parser *parser)
| (1u << PRAGMA_OMP_CLAUSE_REDUCTION) \
| (1u << PRAGMA_OMP_CLAUSE_ORDERED) \
| (1u << PRAGMA_OMP_CLAUSE_SCHEDULE) \
- | (1u << PRAGMA_OMP_CLAUSE_NOWAIT))
+ | (1u << PRAGMA_OMP_CLAUSE_NOWAIT) \
+ | (1u << PRAGMA_OMP_CLAUSE_COLLAPSE))
static tree
cp_parser_omp_for (cp_parser *parser, cp_token *pragma_tok)
@@ -20337,9 +20805,7 @@ cp_parser_omp_for (cp_parser *parser, cp_token *pragma_tok)
sb = begin_omp_structured_block ();
save = cp_parser_begin_omp_structured_block (parser);
- ret = cp_parser_omp_for_loop (parser);
- if (ret)
- OMP_FOR_CLAUSES (ret) = clauses;
+ ret = cp_parser_omp_for_loop (parser, clauses, NULL);
cp_parser_end_omp_structured_block (parser, save);
add_stmt (finish_omp_structured_block (sb));
@@ -20537,9 +21003,7 @@ cp_parser_omp_parallel (cp_parser *parser, cp_token *pragma_tok)
case PRAGMA_OMP_PARALLEL_FOR:
c_split_parallel_clauses (clauses, &par_clause, &ws_clause);
- stmt = cp_parser_omp_for_loop (parser);
- if (stmt)
- OMP_FOR_CLAUSES (stmt) = ws_clause;
+ cp_parser_omp_for_loop (parser, ws_clause, &par_clause);
break;
case PRAGMA_OMP_PARALLEL_SECTIONS:
@@ -20584,6 +21048,43 @@ cp_parser_omp_single (cp_parser *parser, cp_token *pragma_tok)
return add_stmt (stmt);
}
+/* OpenMP 3.0:
+ # pragma omp task task-clause[optseq] new-line
+ structured-block */
+
+#define OMP_TASK_CLAUSE_MASK \
+ ( (1u << PRAGMA_OMP_CLAUSE_IF) \
+ | (1u << PRAGMA_OMP_CLAUSE_UNTIED) \
+ | (1u << PRAGMA_OMP_CLAUSE_DEFAULT) \
+ | (1u << PRAGMA_OMP_CLAUSE_PRIVATE) \
+ | (1u << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \
+ | (1u << PRAGMA_OMP_CLAUSE_SHARED))
+
+static tree
+cp_parser_omp_task (cp_parser *parser, cp_token *pragma_tok)
+{
+ tree clauses, block;
+ unsigned int save;
+
+ clauses = cp_parser_omp_all_clauses (parser, OMP_TASK_CLAUSE_MASK,
+ "#pragma omp task", pragma_tok);
+ block = begin_omp_task ();
+ save = cp_parser_begin_omp_structured_block (parser);
+ cp_parser_statement (parser, NULL_TREE, false, NULL);
+ cp_parser_end_omp_structured_block (parser, save);
+ return finish_omp_task (clauses, block);
+}
+
+/* OpenMP 3.0:
+ # pragma omp taskwait new-line */
+
+static void
+cp_parser_omp_taskwait (cp_parser *parser, cp_token *pragma_tok)
+{
+ cp_parser_require_pragma_eol (parser, pragma_tok);
+ finish_omp_taskwait ();
+}
+
/* OpenMP 2.5:
# pragma omp threadprivate (variable-list) */
@@ -20631,6 +21132,9 @@ cp_parser_omp_construct (cp_parser *parser, cp_token *pragma_tok)
case PRAGMA_OMP_SINGLE:
stmt = cp_parser_omp_single (parser, pragma_tok);
break;
+ case PRAGMA_OMP_TASK:
+ stmt = cp_parser_omp_task (parser, pragma_tok);
+ break;
default:
gcc_unreachable ();
}
@@ -20738,6 +21242,21 @@ cp_parser_pragma (cp_parser *parser, enum pragma_context context)
}
break;
+ case PRAGMA_OMP_TASKWAIT:
+ switch (context)
+ {
+ case pragma_compound:
+ cp_parser_omp_taskwait (parser, pragma_tok);
+ return false;
+ case pragma_stmt:
+ error ("%<#pragma omp taskwait%> may only be "
+ "used in compound statements");
+ break;
+ default:
+ goto bad_stmt;
+ }
+ break;
+
case PRAGMA_OMP_THREADPRIVATE:
cp_parser_omp_threadprivate (parser, pragma_tok);
return false;
@@ -20750,6 +21269,7 @@ cp_parser_pragma (cp_parser *parser, enum pragma_context context)
case PRAGMA_OMP_PARALLEL:
case PRAGMA_OMP_SECTIONS:
case PRAGMA_OMP_SINGLE:
+ case PRAGMA_OMP_TASK:
if (context == pragma_external)
goto bad_stmt;
cp_parser_omp_construct (parser, pragma_tok);
diff --git a/gcc/cp/pt.c b/gcc/cp/pt.c
index 4bb43addb74..f141b74a6fd 100644
--- a/gcc/cp/pt.c
+++ b/gcc/cp/pt.c
@@ -10214,16 +10214,26 @@ tsubst_omp_clauses (tree clauses, tree args, tsubst_flags_t complain,
switch (OMP_CLAUSE_CODE (nc))
{
+ case OMP_CLAUSE_LASTPRIVATE:
+ if (OMP_CLAUSE_LASTPRIVATE_STMT (oc))
+ {
+ OMP_CLAUSE_LASTPRIVATE_STMT (nc) = push_stmt_list ();
+ tsubst_expr (OMP_CLAUSE_LASTPRIVATE_STMT (oc), args, complain,
+ in_decl, /*integral_constant_expression_p=*/false);
+ OMP_CLAUSE_LASTPRIVATE_STMT (nc)
+ = pop_stmt_list (OMP_CLAUSE_LASTPRIVATE_STMT (nc));
+ }
+ /* FALLTHRU */
case OMP_CLAUSE_PRIVATE:
case OMP_CLAUSE_SHARED:
case OMP_CLAUSE_FIRSTPRIVATE:
- case OMP_CLAUSE_LASTPRIVATE:
case OMP_CLAUSE_REDUCTION:
case OMP_CLAUSE_COPYIN:
case OMP_CLAUSE_COPYPRIVATE:
case OMP_CLAUSE_IF:
case OMP_CLAUSE_NUM_THREADS:
case OMP_CLAUSE_SCHEDULE:
+ case OMP_CLAUSE_COLLAPSE:
OMP_CLAUSE_OPERAND (nc, 0)
= tsubst_expr (OMP_CLAUSE_OPERAND (oc, 0), args, complain,
in_decl, /*integral_constant_expression_p=*/false);
@@ -10231,6 +10241,7 @@ tsubst_omp_clauses (tree clauses, tree args, tsubst_flags_t complain,
case OMP_CLAUSE_NOWAIT:
case OMP_CLAUSE_ORDERED:
case OMP_CLAUSE_DEFAULT:
+ case OMP_CLAUSE_UNTIED:
break;
default:
gcc_unreachable ();
@@ -10274,6 +10285,137 @@ tsubst_copy_asm_operands (tree t, tree args, tsubst_flags_t complain,
#undef RECUR
}
+/* Substitute one OMP_FOR iterator. */
+
+static void
+tsubst_omp_for_iterator (tree t, int i, tree declv, tree initv,
+ tree condv, tree incrv, tree *clauses,
+ tree args, tsubst_flags_t complain, tree in_decl,
+ bool integral_constant_expression_p)
+{
+#define RECUR(NODE) \
+ tsubst_expr ((NODE), args, complain, in_decl, \
+ integral_constant_expression_p)
+ tree decl, init, cond, incr;
+
+ init = TREE_VEC_ELT (OMP_FOR_INIT (t), i);
+ gcc_assert (TREE_CODE (init) == MODIFY_EXPR);
+ decl = RECUR (TREE_OPERAND (init, 0));
+ init = TREE_OPERAND (init, 1);
+ gcc_assert (!type_dependent_expression_p (decl));
+
+ if (!CLASS_TYPE_P (TREE_TYPE (decl)))
+ {
+ cond = RECUR (TREE_VEC_ELT (OMP_FOR_COND (t), i));
+ incr = TREE_VEC_ELT (OMP_FOR_INCR (t), i);
+ if (TREE_CODE (incr) == MODIFY_EXPR)
+ incr = build_x_modify_expr (RECUR (TREE_OPERAND (incr, 0)), NOP_EXPR,
+ RECUR (TREE_OPERAND (incr, 1)),
+ complain);
+ else
+ incr = RECUR (incr);
+ TREE_VEC_ELT (declv, i) = decl;
+ TREE_VEC_ELT (initv, i) = init;
+ TREE_VEC_ELT (condv, i) = cond;
+ TREE_VEC_ELT (incrv, i) = incr;
+ return;
+ }
+
+ if (init && TREE_CODE (init) != DECL_EXPR)
+ {
+ tree c;
+ for (c = *clauses; c ; c = OMP_CLAUSE_CHAIN (c))
+ {
+ if ((OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
+ || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE)
+ && OMP_CLAUSE_DECL (c) == decl)
+ break;
+ else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE
+ && OMP_CLAUSE_DECL (c) == decl)
+ error ("iteration variable %qD should not be firstprivate", decl);
+ else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
+ && OMP_CLAUSE_DECL (c) == decl)
+ error ("iteration variable %qD should not be reduction", decl);
+ }
+ if (c == NULL)
+ {
+ c = build_omp_clause (OMP_CLAUSE_PRIVATE);
+ OMP_CLAUSE_DECL (c) = decl;
+ c = finish_omp_clauses (c);
+ if (c)
+ {
+ OMP_CLAUSE_CHAIN (c) = *clauses;
+ *clauses = c;
+ }
+ }
+ }
+ cond = TREE_VEC_ELT (OMP_FOR_COND (t), i);
+ if (COMPARISON_CLASS_P (cond))
+ cond = build2 (TREE_CODE (cond), boolean_type_node,
+ RECUR (TREE_OPERAND (cond, 0)),
+ RECUR (TREE_OPERAND (cond, 1)));
+ else
+ cond = RECUR (cond);
+ incr = TREE_VEC_ELT (OMP_FOR_INCR (t), i);
+ switch (TREE_CODE (incr))
+ {
+ case PREINCREMENT_EXPR:
+ case PREDECREMENT_EXPR:
+ case POSTINCREMENT_EXPR:
+ case POSTDECREMENT_EXPR:
+ incr = build2 (TREE_CODE (incr), TREE_TYPE (decl),
+ RECUR (TREE_OPERAND (incr, 0)), NULL_TREE);
+ break;
+ case MODIFY_EXPR:
+ if (TREE_CODE (TREE_OPERAND (incr, 1)) == PLUS_EXPR
+ || TREE_CODE (TREE_OPERAND (incr, 1)) == MINUS_EXPR)
+ {
+ tree rhs = TREE_OPERAND (incr, 1);
+ incr = build2 (MODIFY_EXPR, TREE_TYPE (decl),
+ RECUR (TREE_OPERAND (incr, 0)),
+ build2 (TREE_CODE (rhs), TREE_TYPE (decl),
+ RECUR (TREE_OPERAND (rhs, 0)),
+ RECUR (TREE_OPERAND (rhs, 1))));
+ }
+ else
+ incr = RECUR (incr);
+ break;
+ case MODOP_EXPR:
+ if (TREE_CODE (TREE_OPERAND (incr, 1)) == PLUS_EXPR
+ || TREE_CODE (TREE_OPERAND (incr, 1)) == MINUS_EXPR)
+ {
+ tree lhs = RECUR (TREE_OPERAND (incr, 0));
+ incr = build2 (MODIFY_EXPR, TREE_TYPE (decl), lhs,
+ build2 (TREE_CODE (TREE_OPERAND (incr, 1)),
+ TREE_TYPE (decl), lhs,
+ RECUR (TREE_OPERAND (incr, 2))));
+ }
+ else if (TREE_CODE (TREE_OPERAND (incr, 1)) == NOP_EXPR
+ && (TREE_CODE (TREE_OPERAND (incr, 2)) == PLUS_EXPR
+ || (TREE_CODE (TREE_OPERAND (incr, 2)) == MINUS_EXPR)))
+ {
+ tree rhs = TREE_OPERAND (incr, 2);
+ incr = build2 (MODIFY_EXPR, TREE_TYPE (decl),
+ RECUR (TREE_OPERAND (incr, 0)),
+ build2 (TREE_CODE (rhs), TREE_TYPE (decl),
+ RECUR (TREE_OPERAND (rhs, 0)),
+ RECUR (TREE_OPERAND (rhs, 1))));
+ }
+ else
+ incr = RECUR (incr);
+ break;
+ default:
+ incr = RECUR (incr);
+ break;
+ }
+
+ TREE_VEC_ELT (declv, i) = decl;
+ TREE_VEC_ELT (initv, i) = init;
+ TREE_VEC_ELT (condv, i) = cond;
+ TREE_VEC_ELT (incrv, i) = incr;
+#undef RECUR
+}
+
/* Like tsubst_copy for expressions, etc. but also does semantic
processing. */
@@ -10597,21 +10739,55 @@ tsubst_expr (tree t, tree args, tsubst_flags_t complain, tree in_decl,
= OMP_PARALLEL_COMBINED (t);
break;
+ case OMP_TASK:
+ tmp = tsubst_omp_clauses (OMP_TASK_CLAUSES (t),
+ args, complain, in_decl);
+ stmt = begin_omp_task ();
+ RECUR (OMP_TASK_BODY (t));
+ finish_omp_task (tmp, stmt);
+ break;
+
case OMP_FOR:
{
- tree clauses, decl, init, cond, incr, body, pre_body;
+ tree clauses, body, pre_body;
+ tree declv, initv, condv, incrv;
+ int i;
clauses = tsubst_omp_clauses (OMP_FOR_CLAUSES (t),
args, complain, in_decl);
- init = OMP_FOR_INIT (t);
- gcc_assert (TREE_CODE (init) == MODIFY_EXPR);
- decl = RECUR (TREE_OPERAND (init, 0));
- init = RECUR (TREE_OPERAND (init, 1));
- cond = RECUR (OMP_FOR_COND (t));
- incr = RECUR (OMP_FOR_INCR (t));
+ declv = make_tree_vec (TREE_VEC_LENGTH (OMP_FOR_INIT (t)));
+ initv = make_tree_vec (TREE_VEC_LENGTH (OMP_FOR_INIT (t)));
+ condv = make_tree_vec (TREE_VEC_LENGTH (OMP_FOR_INIT (t)));
+ incrv = make_tree_vec (TREE_VEC_LENGTH (OMP_FOR_INIT (t)));
+
+ for (i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INIT (t)); i++)
+ tsubst_omp_for_iterator (t, i, declv, initv, condv, incrv,
+ &clauses, args, complain, in_decl,
+ integral_constant_expression_p);
stmt = begin_omp_structured_block ();
+ for (i = 0; i < TREE_VEC_LENGTH (initv); i++)
+ if (TREE_VEC_ELT (initv, i) == NULL
+ || TREE_CODE (TREE_VEC_ELT (initv, i)) != DECL_EXPR)
+ TREE_VEC_ELT (initv, i) = RECUR (TREE_VEC_ELT (initv, i));
+ else if (CLASS_TYPE_P (TREE_TYPE (TREE_VEC_ELT (initv, i))))
+ {
+ tree init = RECUR (TREE_VEC_ELT (initv, i));
+ gcc_assert (init == TREE_VEC_ELT (declv, i));
+ TREE_VEC_ELT (initv, i) = NULL_TREE;
+ }
+ else
+ {
+ tree decl_expr = TREE_VEC_ELT (initv, i);
+ tree init = DECL_INITIAL (DECL_EXPR_DECL (decl_expr));
+ gcc_assert (init != NULL);
+ TREE_VEC_ELT (initv, i) = RECUR (init);
+ DECL_INITIAL (DECL_EXPR_DECL (decl_expr)) = NULL;
+ RECUR (decl_expr);
+ DECL_INITIAL (DECL_EXPR_DECL (decl_expr)) = init;
+ }
+
pre_body = push_stmt_list ();
RECUR (OMP_FOR_PRE_BODY (t));
pre_body = pop_stmt_list (pre_body);
@@ -10620,10 +10796,8 @@ tsubst_expr (tree t, tree args, tsubst_flags_t complain, tree in_decl,
RECUR (OMP_FOR_BODY (t));
body = pop_stmt_list (body);
- t = finish_omp_for (EXPR_LOCATION (t), decl, init, cond, incr, body,
- pre_body);
- if (t)
- OMP_FOR_CLAUSES (t) = clauses;
+ t = finish_omp_for (EXPR_LOCATION (t), declv, initv, condv, incrv,
+ body, pre_body, clauses);
add_stmt (finish_omp_structured_block (stmt));
}
@@ -16195,6 +16369,63 @@ dependent_template_id_p (tree tmpl, tree args)
|| any_dependent_template_arguments_p (args));
}
+/* Returns TRUE if OMP_FOR with DECLV, INITV, CONDV and INCRV vectors
+ is dependent. */
+
+bool
+dependent_omp_for_p (tree declv, tree initv, tree condv, tree incrv)
+{
+ int i;
+
+ if (!processing_template_decl)
+ return false;
+
+ for (i = 0; i < TREE_VEC_LENGTH (declv); i++)
+ {
+ tree decl = TREE_VEC_ELT (declv, i);
+ tree init = TREE_VEC_ELT (initv, i);
+ tree cond = TREE_VEC_ELT (condv, i);
+ tree incr = TREE_VEC_ELT (incrv, i);
+
+ if (type_dependent_expression_p (decl))
+ return true;
+
+ if (init && type_dependent_expression_p (init))
+ return true;
+
+ if (type_dependent_expression_p (cond))
+ return true;
+
+ if (COMPARISON_CLASS_P (cond)
+ && (type_dependent_expression_p (TREE_OPERAND (cond, 0))
+ || type_dependent_expression_p (TREE_OPERAND (cond, 1))))
+ return true;
+
+ if (TREE_CODE (incr) == MODOP_EXPR)
+ {
+ if (type_dependent_expression_p (TREE_OPERAND (incr, 0))
+ || type_dependent_expression_p (TREE_OPERAND (incr, 2)))
+ return true;
+ }
+ else if (type_dependent_expression_p (incr))
+ return true;
+ else if (TREE_CODE (incr) == MODIFY_EXPR)
+ {
+ if (type_dependent_expression_p (TREE_OPERAND (incr, 0)))
+ return true;
+ else if (BINARY_CLASS_P (TREE_OPERAND (incr, 1)))
+ {
+ tree t = TREE_OPERAND (incr, 1);
+ if (type_dependent_expression_p (TREE_OPERAND (t, 0))
+ || type_dependent_expression_p (TREE_OPERAND (t, 1)))
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
/* TYPE is a TYPENAME_TYPE. Returns the ordinary TYPE to which the
TYPENAME_TYPE corresponds. Returns the original TYPENAME_TYPE if
no such TYPE can be found. Note that this function peers inside
diff --git a/gcc/cp/semantics.c b/gcc/cp/semantics.c
index 96999bf7e89..83d23394cb5 100644
--- a/gcc/cp/semantics.c
+++ b/gcc/cp/semantics.c
@@ -3359,6 +3359,94 @@ omp_clause_info_fndecl (tree t, tree type)
return NULL_TREE;
}
+/* Create CP_OMP_CLAUSE_INFO for clause C. Returns true if it is invalid. */
+
+bool
+cxx_omp_create_clause_info (tree c, tree type, bool need_default_ctor,
+ bool need_copy_ctor, bool need_copy_assignment)
+{
+ int save_errorcount = errorcount;
+ tree info, t;
+
+ /* Always allocate 3 elements for simplicity. These are the
+ function decls for the ctor, dtor, and assignment op.
+ This layout is known to the three lang hooks,
+ cxx_omp_clause_default_init, cxx_omp_clause_copy_init,
+ and cxx_omp_clause_assign_op. */
+ info = make_tree_vec (3);
+ CP_OMP_CLAUSE_INFO (c) = info;
+
+ if (need_default_ctor
+ || (need_copy_ctor && !TYPE_HAS_TRIVIAL_INIT_REF (type)))
+ {
+ if (need_default_ctor)
+ t = NULL;
+ else
+ {
+ t = build_int_cst (build_pointer_type (type), 0);
+ t = build1 (INDIRECT_REF, type, t);
+ t = build_tree_list (NULL, t);
+ }
+ t = build_special_member_call (NULL_TREE, complete_ctor_identifier,
+ t, type, LOOKUP_NORMAL,
+ tf_warning_or_error);
+
+ if (targetm.cxx.cdtor_returns_this () || errorcount)
+ /* Because constructors and destructors return this,
+ the call will have been cast to "void". Remove the
+ cast here. We would like to use STRIP_NOPS, but it
+ wouldn't work here because TYPE_MODE (t) and
+ TYPE_MODE (TREE_OPERAND (t, 0)) are different.
+ They are VOIDmode and Pmode, respectively. */
+ if (TREE_CODE (t) == NOP_EXPR)
+ t = TREE_OPERAND (t, 0);
+
+ TREE_VEC_ELT (info, 0) = get_callee_fndecl (t);
+ }
+
+ if ((need_default_ctor || need_copy_ctor)
+ && TYPE_HAS_NONTRIVIAL_DESTRUCTOR (type))
+ {
+ t = build_int_cst (build_pointer_type (type), 0);
+ t = build1 (INDIRECT_REF, type, t);
+ t = build_special_member_call (t, complete_dtor_identifier,
+ NULL, type, LOOKUP_NORMAL,
+ tf_warning_or_error);
+
+ if (targetm.cxx.cdtor_returns_this () || errorcount)
+ /* Because constructors and destructors return this,
+ the call will have been cast to "void". Remove the
+ cast here. We would like to use STRIP_NOPS, but it
+ wouldn't work here because TYPE_MODE (t) and
+ TYPE_MODE (TREE_OPERAND (t, 0)) are different.
+ They are VOIDmode and Pmode, respectively. */
+ if (TREE_CODE (t) == NOP_EXPR)
+ t = TREE_OPERAND (t, 0);
+
+ TREE_VEC_ELT (info, 1) = omp_clause_info_fndecl (t, type);
+ }
+
+ if (need_copy_assignment && !TYPE_HAS_TRIVIAL_ASSIGN_REF (type))
+ {
+ t = build_int_cst (build_pointer_type (type), 0);
+ t = build1 (INDIRECT_REF, type, t);
+ t = build_special_member_call (t, ansi_assopname (NOP_EXPR),
+ build_tree_list (NULL, t),
+ type, LOOKUP_NORMAL,
+ tf_warning_or_error);
+
+ /* We'll have called convert_from_reference on the call, which
+ may well have added an indirect_ref. It's unneeded here,
+ and in the way, so kill it. */
+ if (TREE_CODE (t) == INDIRECT_REF)
+ t = TREE_OPERAND (t, 0);
+
+ TREE_VEC_ELT (info, 2) = omp_clause_info_fndecl (t, type);
+ }
+
+ return errorcount != save_errorcount;
+}
+
/* For all elements of CLAUSES, validate them vs OpenMP constraints.
Remove any elements from the list that are invalid. */
@@ -3499,6 +3587,8 @@ finish_omp_clauses (tree clauses)
case OMP_CLAUSE_NOWAIT:
case OMP_CLAUSE_ORDERED:
case OMP_CLAUSE_DEFAULT:
+ case OMP_CLAUSE_UNTIED:
+ case OMP_CLAUSE_COLLAPSE:
break;
default:
@@ -3662,93 +3752,10 @@ finish_omp_clauses (tree clauses)
for making these queries. */
if (CLASS_TYPE_P (inner_type)
&& (need_default_ctor || need_copy_ctor || need_copy_assignment)
- && !type_dependent_expression_p (t))
- {
- int save_errorcount = errorcount;
- tree info;
-
- /* Always allocate 3 elements for simplicity. These are the
- function decls for the ctor, dtor, and assignment op.
- This layout is known to the three lang hooks,
- cxx_omp_clause_default_init, cxx_omp_clause_copy_init,
- and cxx_omp_clause_assign_op. */
- info = make_tree_vec (3);
- CP_OMP_CLAUSE_INFO (c) = info;
-
- if (need_default_ctor
- || (need_copy_ctor
- && !TYPE_HAS_TRIVIAL_INIT_REF (inner_type)))
- {
- if (need_default_ctor)
- t = NULL;
- else
- {
- t = build_int_cst (build_pointer_type (inner_type), 0);
- t = build1 (INDIRECT_REF, inner_type, t);
- t = build_tree_list (NULL, t);
- }
- t = build_special_member_call (NULL_TREE,
- complete_ctor_identifier,
- t, inner_type, LOOKUP_NORMAL,
- tf_warning_or_error);
-
- if (targetm.cxx.cdtor_returns_this () || errorcount)
- /* Because constructors and destructors return this,
- the call will have been cast to "void". Remove the
- cast here. We would like to use STRIP_NOPS, but it
- wouldn't work here because TYPE_MODE (t) and
- TYPE_MODE (TREE_OPERAND (t, 0)) are different.
- They are VOIDmode and Pmode, respectively. */
- if (TREE_CODE (t) == NOP_EXPR)
- t = TREE_OPERAND (t, 0);
-
- TREE_VEC_ELT (info, 0) = get_callee_fndecl (t);
- }
-
- if ((need_default_ctor || need_copy_ctor)
- && TYPE_HAS_NONTRIVIAL_DESTRUCTOR (inner_type))
- {
- t = build_int_cst (build_pointer_type (inner_type), 0);
- t = build1 (INDIRECT_REF, inner_type, t);
- t = build_special_member_call (t, complete_dtor_identifier,
- NULL, inner_type, LOOKUP_NORMAL,
- tf_warning_or_error);
-
- if (targetm.cxx.cdtor_returns_this () || errorcount)
- /* Because constructors and destructors return this,
- the call will have been cast to "void". Remove the
- cast here. We would like to use STRIP_NOPS, but it
- wouldn't work here because TYPE_MODE (t) and
- TYPE_MODE (TREE_OPERAND (t, 0)) are different.
- They are VOIDmode and Pmode, respectively. */
- if (TREE_CODE (t) == NOP_EXPR)
- t = TREE_OPERAND (t, 0);
-
- TREE_VEC_ELT (info, 1) = omp_clause_info_fndecl (t, inner_type);
- }
-
- if (need_copy_assignment
- && !TYPE_HAS_TRIVIAL_ASSIGN_REF (inner_type))
- {
- t = build_int_cst (build_pointer_type (inner_type), 0);
- t = build1 (INDIRECT_REF, inner_type, t);
- t = build_special_member_call (t, ansi_assopname (NOP_EXPR),
- build_tree_list (NULL, t),
- inner_type, LOOKUP_NORMAL,
- tf_warning_or_error);
-
- /* We'll have called convert_from_reference on the call, which
- may well have added an indirect_ref. It's unneeded here,
- and in the way, so kill it. */
- if (TREE_CODE (t) == INDIRECT_REF)
- t = TREE_OPERAND (t, 0);
-
- TREE_VEC_ELT (info, 2) = omp_clause_info_fndecl (t, inner_type);
- }
-
- if (errorcount != save_errorcount)
- remove = true;
- }
+ && !type_dependent_expression_p (t)
+ && cxx_omp_create_clause_info (c, inner_type, need_default_ctor,
+ need_copy_ctor, need_copy_assignment))
+ remove = true;
if (remove)
*pc = OMP_CLAUSE_CHAIN (c);
@@ -3787,9 +3794,10 @@ finish_omp_threadprivate (tree vars)
error ("automatic variable %qE cannot be %<threadprivate%>", v);
else if (! COMPLETE_TYPE_P (TREE_TYPE (v)))
error ("%<threadprivate%> %qE has incomplete type", v);
- else if (TREE_STATIC (v) && TYPE_P (CP_DECL_CONTEXT (v)))
- error ("%<threadprivate%> %qE is not file, namespace "
- "or block scope variable", v);
+ else if (TREE_STATIC (v) && TYPE_P (CP_DECL_CONTEXT (v))
+ && CP_DECL_CONTEXT (v) != current_class_type)
+ error ("%<threadprivate%> %qE directive not "
+ "in %qT definition", v, CP_DECL_CONTEXT (v));
else
{
/* Allocate a LANG_SPECIFIC structure for V, if needed. */
@@ -3855,6 +3863,252 @@ finish_omp_parallel (tree clauses, tree body)
return add_stmt (stmt);
}
+tree
+begin_omp_task (void)
+{
+ keep_next_level (true);
+ return begin_omp_structured_block ();
+}
+
+tree
+finish_omp_task (tree clauses, tree body)
+{
+ tree stmt;
+
+ body = finish_omp_structured_block (body);
+
+ stmt = make_node (OMP_TASK);
+ TREE_TYPE (stmt) = void_type_node;
+ OMP_TASK_CLAUSES (stmt) = clauses;
+ OMP_TASK_BODY (stmt) = body;
+
+ return add_stmt (stmt);
+}
+
+/* Helper function for finish_omp_for. Convert Ith random access iterator
+ into integral iterator. Return FALSE if successful. */
+
+static bool
+handle_omp_for_class_iterator (int i, location_t locus, tree declv, tree initv,
+ tree condv, tree incrv, tree *body,
+ tree *pre_body, tree clauses)
+{
+ tree diff, iter_init, iter_incr = NULL, last;
+ tree incr_var = NULL, orig_pre_body, orig_body, c;
+ tree decl = TREE_VEC_ELT (declv, i);
+ tree init = TREE_VEC_ELT (initv, i);
+ tree cond = TREE_VEC_ELT (condv, i);
+ tree incr = TREE_VEC_ELT (incrv, i);
+ tree iter = decl;
+ location_t elocus = locus;
+
+ if (init && EXPR_HAS_LOCATION (init))
+ elocus = EXPR_LOCATION (init);
+
+ switch (TREE_CODE (cond))
+ {
+ case GT_EXPR:
+ case GE_EXPR:
+ case LT_EXPR:
+ case LE_EXPR:
+ if (TREE_OPERAND (cond, 0) != iter)
+ cond = error_mark_node;
+ else
+ {
+ tree tem = build_x_binary_op (TREE_CODE (cond), iter, ERROR_MARK,
+ TREE_OPERAND (cond, 1), ERROR_MARK,
+ NULL, tf_warning_or_error);
+ if (error_operand_p (tem))
+ return true;
+ }
+ break;
+ default:
+ cond = error_mark_node;
+ break;
+ }
+ if (cond == error_mark_node)
+ {
+ error ("%Hinvalid controlling predicate", &elocus);
+ return true;
+ }
+ diff = build_x_binary_op (MINUS_EXPR, TREE_OPERAND (cond, 1),
+ ERROR_MARK, iter, ERROR_MARK, NULL,
+ tf_warning_or_error);
+ if (error_operand_p (diff))
+ return true;
+ if (TREE_CODE (TREE_TYPE (diff)) != INTEGER_TYPE)
+ {
+ error ("%Hdifference between %qE and %qD does not have integer type",
+ &elocus, TREE_OPERAND (cond, 1), iter);
+ return true;
+ }
+
+ switch (TREE_CODE (incr))
+ {
+ case PREINCREMENT_EXPR:
+ case PREDECREMENT_EXPR:
+ case POSTINCREMENT_EXPR:
+ case POSTDECREMENT_EXPR:
+ if (TREE_OPERAND (incr, 0) != iter)
+ {
+ incr = error_mark_node;
+ break;
+ }
+ iter_incr = build_x_unary_op (TREE_CODE (incr), iter,
+ tf_warning_or_error);
+ if (error_operand_p (iter_incr))
+ return true;
+ else if (TREE_CODE (incr) == PREINCREMENT_EXPR
+ || TREE_CODE (incr) == POSTINCREMENT_EXPR)
+ incr = integer_one_node;
+ else
+ incr = integer_minus_one_node;
+ break;
+ case MODIFY_EXPR:
+ if (TREE_OPERAND (incr, 0) != iter)
+ incr = error_mark_node;
+ else if (TREE_CODE (TREE_OPERAND (incr, 1)) == PLUS_EXPR
+ || TREE_CODE (TREE_OPERAND (incr, 1)) == MINUS_EXPR)
+ {
+ tree rhs = TREE_OPERAND (incr, 1);
+ if (TREE_OPERAND (rhs, 0) == iter)
+ {
+ if (TREE_CODE (TREE_TYPE (TREE_OPERAND (rhs, 1)))
+ != INTEGER_TYPE)
+ incr = error_mark_node;
+ else
+ {
+ iter_incr = build_x_modify_expr (iter, TREE_CODE (rhs),
+ TREE_OPERAND (rhs, 1),
+ tf_warning_or_error);
+ if (error_operand_p (iter_incr))
+ return true;
+ incr = TREE_OPERAND (rhs, 1);
+ incr = cp_convert (TREE_TYPE (diff), incr);
+ if (TREE_CODE (rhs) == MINUS_EXPR)
+ {
+ incr = build1 (NEGATE_EXPR, TREE_TYPE (diff), incr);
+ incr = fold_if_not_in_template (incr);
+ }
+ if (TREE_CODE (incr) != INTEGER_CST
+ && (TREE_CODE (incr) != NOP_EXPR
+ || (TREE_CODE (TREE_OPERAND (incr, 0))
+ != INTEGER_CST)))
+ iter_incr = NULL;
+ }
+ }
+ else if (TREE_OPERAND (rhs, 1) == iter)
+ {
+ if (TREE_CODE (TREE_TYPE (TREE_OPERAND (rhs, 0))) != INTEGER_TYPE
+ || TREE_CODE (rhs) != PLUS_EXPR)
+ incr = error_mark_node;
+ else
+ {
+ iter_incr = build_x_binary_op (PLUS_EXPR,
+ TREE_OPERAND (rhs, 0),
+ ERROR_MARK, iter,
+ ERROR_MARK, NULL,
+ tf_warning_or_error);
+ if (error_operand_p (iter_incr))
+ return true;
+ iter_incr = build_x_modify_expr (iter, NOP_EXPR,
+ iter_incr,
+ tf_warning_or_error);
+ if (error_operand_p (iter_incr))
+ return true;
+ incr = TREE_OPERAND (rhs, 0);
+ iter_incr = NULL;
+ }
+ }
+ else
+ incr = error_mark_node;
+ }
+ else
+ incr = error_mark_node;
+ break;
+ default:
+ incr = error_mark_node;
+ break;
+ }
+
+ if (incr == error_mark_node)
+ {
+ error ("%Hinvalid increment expression", &elocus);
+ return true;
+ }
+
+ incr = cp_convert (TREE_TYPE (diff), incr);
+ for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
+ if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
+ && OMP_CLAUSE_DECL (c) == iter)
+ break;
+
+ decl = create_temporary_var (TREE_TYPE (diff));
+ pushdecl (decl);
+ add_decl_expr (decl);
+ last = create_temporary_var (TREE_TYPE (diff));
+ pushdecl (last);
+ add_decl_expr (last);
+ if (c && iter_incr == NULL)
+ {
+ incr_var = create_temporary_var (TREE_TYPE (diff));
+ pushdecl (incr_var);
+ add_decl_expr (incr_var);
+ }
+ gcc_assert (stmts_are_full_exprs_p ());
+
+ orig_pre_body = *pre_body;
+ *pre_body = push_stmt_list ();
+ if (orig_pre_body)
+ add_stmt (orig_pre_body);
+ if (init != NULL)
+ finish_expr_stmt (build_x_modify_expr (iter, NOP_EXPR, init,
+ tf_warning_or_error));
+ init = build_int_cst (TREE_TYPE (diff), 0);
+ if (c && iter_incr == NULL)
+ {
+ finish_expr_stmt (build_x_modify_expr (incr_var, NOP_EXPR,
+ incr, tf_warning_or_error));
+ incr = incr_var;
+ iter_incr = build_x_modify_expr (iter, PLUS_EXPR, incr,
+ tf_warning_or_error);
+ }
+ finish_expr_stmt (build_x_modify_expr (last, NOP_EXPR, init,
+ tf_warning_or_error));
+ *pre_body = pop_stmt_list (*pre_body);
+
+ cond = cp_build_binary_op (TREE_CODE (cond), decl, diff,
+ tf_warning_or_error);
+ incr = build_modify_expr (decl, PLUS_EXPR, incr);
+
+ orig_body = *body;
+ *body = push_stmt_list ();
+ iter_init = build2 (MINUS_EXPR, TREE_TYPE (diff), decl, last);
+ iter_init = build_x_modify_expr (iter, PLUS_EXPR, iter_init,
+ tf_warning_or_error);
+ iter_init = build1 (NOP_EXPR, void_type_node, iter_init);
+ finish_expr_stmt (iter_init);
+ finish_expr_stmt (build_x_modify_expr (last, NOP_EXPR, decl,
+ tf_warning_or_error));
+ add_stmt (orig_body);
+ *body = pop_stmt_list (*body);
+
+ if (c)
+ {
+ OMP_CLAUSE_LASTPRIVATE_STMT (c) = push_stmt_list ();
+ finish_expr_stmt (iter_incr);
+ OMP_CLAUSE_LASTPRIVATE_STMT (c)
+ = pop_stmt_list (OMP_CLAUSE_LASTPRIVATE_STMT (c));
+ }
+
+ TREE_VEC_ELT (declv, i) = decl;
+ TREE_VEC_ELT (initv, i) = init;
+ TREE_VEC_ELT (condv, i) = cond;
+ TREE_VEC_ELT (incrv, i) = incr;
+
+ return false;
+}
+
/* Build and validate an OMP_FOR statement. CLAUSES, BODY, COND, INCR
are directly for their associated operands in the statement. DECL
and INIT are a combo; if DECL is NULL then INIT ought to be a
@@ -3863,126 +4117,203 @@ finish_omp_parallel (tree clauses, tree body)
sk_omp scope. */
tree
-finish_omp_for (location_t locus, tree decl, tree init, tree cond,
- tree incr, tree body, tree pre_body)
+finish_omp_for (location_t locus, tree declv, tree initv, tree condv,
+ tree incrv, tree body, tree pre_body, tree clauses)
{
- tree omp_for = NULL;
+ tree omp_for = NULL, orig_incr = NULL;
+ tree decl, init, cond, incr;
+ location_t elocus;
+ int i;
- if (decl == NULL)
+ gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (initv));
+ gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (condv));
+ gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (incrv));
+ for (i = 0; i < TREE_VEC_LENGTH (declv); i++)
{
- if (init != NULL)
- switch (TREE_CODE (init))
- {
- case MODIFY_EXPR:
- decl = TREE_OPERAND (init, 0);
- init = TREE_OPERAND (init, 1);
- break;
- case MODOP_EXPR:
- if (TREE_CODE (TREE_OPERAND (init, 1)) == NOP_EXPR)
+ decl = TREE_VEC_ELT (declv, i);
+ init = TREE_VEC_ELT (initv, i);
+ cond = TREE_VEC_ELT (condv, i);
+ incr = TREE_VEC_ELT (incrv, i);
+ elocus = locus;
+
+ if (decl == NULL)
+ {
+ if (init != NULL)
+ switch (TREE_CODE (init))
{
+ case MODIFY_EXPR:
decl = TREE_OPERAND (init, 0);
- init = TREE_OPERAND (init, 2);
+ init = TREE_OPERAND (init, 1);
+ break;
+ case MODOP_EXPR:
+ if (TREE_CODE (TREE_OPERAND (init, 1)) == NOP_EXPR)
+ {
+ decl = TREE_OPERAND (init, 0);
+ init = TREE_OPERAND (init, 2);
+ }
+ break;
+ default:
+ break;
}
- break;
- default:
- break;
- }
- if (decl == NULL)
- {
- error ("expected iteration declaration or initialization");
- return NULL;
+ if (decl == NULL)
+ {
+ error ("%Hexpected iteration declaration or initialization",
+ &locus);
+ return NULL;
+ }
}
- }
- if (type_dependent_expression_p (decl)
- || type_dependent_expression_p (init)
- || (cond && type_dependent_expression_p (cond))
- || (incr && type_dependent_expression_p (incr)))
- {
- tree stmt;
+ if (init && EXPR_HAS_LOCATION (init))
+ elocus = EXPR_LOCATION (init);
if (cond == NULL)
{
- error ("%Hmissing controlling predicate", &locus);
+ error ("%Hmissing controlling predicate", &elocus);
return NULL;
}
if (incr == NULL)
{
- error ("%Hmissing increment expression", &locus);
+ error ("%Hmissing increment expression", &elocus);
return NULL;
}
+ TREE_VEC_ELT (declv, i) = decl;
+ TREE_VEC_ELT (initv, i) = init;
+ }
+
+ if (dependent_omp_for_p (declv, initv, condv, incrv))
+ {
+ tree stmt;
+
stmt = make_node (OMP_FOR);
- /* This is really just a place-holder. We'll be decomposing this
- again and going through the build_modify_expr path below when
- we instantiate the thing. */
- init = build2 (MODIFY_EXPR, void_type_node, decl, init);
+ for (i = 0; i < TREE_VEC_LENGTH (declv); i++)
+ {
+ /* This is really just a place-holder. We'll be decomposing this
+ again and going through the cp_build_modify_expr path below when
+ we instantiate the thing. */
+ TREE_VEC_ELT (initv, i)
+ = build2 (MODIFY_EXPR, void_type_node, TREE_VEC_ELT (declv, i),
+ TREE_VEC_ELT (initv, i));
+ }
TREE_TYPE (stmt) = void_type_node;
- OMP_FOR_INIT (stmt) = init;
- OMP_FOR_COND (stmt) = cond;
- OMP_FOR_INCR (stmt) = incr;
+ OMP_FOR_INIT (stmt) = initv;
+ OMP_FOR_COND (stmt) = condv;
+ OMP_FOR_INCR (stmt) = incrv;
OMP_FOR_BODY (stmt) = body;
OMP_FOR_PRE_BODY (stmt) = pre_body;
+ OMP_FOR_CLAUSES (stmt) = clauses;
SET_EXPR_LOCATION (stmt, locus);
return add_stmt (stmt);
}
- if (!DECL_P (decl))
- {
- error ("expected iteration declaration or initialization");
- return NULL;
- }
+ if (processing_template_decl)
+ orig_incr = make_tree_vec (TREE_VEC_LENGTH (incrv));
- if (!INTEGRAL_TYPE_P (TREE_TYPE (decl)))
+ for (i = 0; i < TREE_VEC_LENGTH (declv); )
{
- location_t elocus = locus;
-
- if (EXPR_HAS_LOCATION (init))
+ decl = TREE_VEC_ELT (declv, i);
+ init = TREE_VEC_ELT (initv, i);
+ cond = TREE_VEC_ELT (condv, i);
+ incr = TREE_VEC_ELT (incrv, i);
+ if (orig_incr)
+ TREE_VEC_ELT (orig_incr, i) = incr;
+ elocus = locus;
+
+ if (init && EXPR_HAS_LOCATION (init))
elocus = EXPR_LOCATION (init);
- error ("%Hinvalid type for iteration variable %qE", &elocus, decl);
- return NULL;
- }
- if (pre_body == NULL || IS_EMPTY_STMT (pre_body))
- pre_body = NULL;
- else if (! processing_template_decl)
- {
- add_stmt (pre_body);
- pre_body = NULL;
- }
+ if (!DECL_P (decl))
+ {
+ error ("%Hexpected iteration declaration or initialization",
+ &elocus);
+ return NULL;
+ }
- if (!processing_template_decl)
- init = fold_build_cleanup_point_expr (TREE_TYPE (init), init);
- init = cp_build_modify_expr (decl, NOP_EXPR, init, tf_warning_or_error);
- if (cond && TREE_SIDE_EFFECTS (cond) && COMPARISON_CLASS_P (cond))
- {
- int n = TREE_SIDE_EFFECTS (TREE_OPERAND (cond, 1)) != 0;
- tree t = TREE_OPERAND (cond, n);
+ if (incr && TREE_CODE (incr) == MODOP_EXPR)
+ {
+ if (orig_incr)
+ TREE_VEC_ELT (orig_incr, i) = incr;
+ incr = cp_build_modify_expr (TREE_OPERAND (incr, 0),
+ TREE_CODE (TREE_OPERAND (incr, 1)),
+ TREE_OPERAND (incr, 2),
+ tf_warning_or_error);
+ }
+
+ if (CLASS_TYPE_P (TREE_TYPE (decl)))
+ {
+ if (handle_omp_for_class_iterator (i, locus, declv, initv, condv,
+ incrv, &body, &pre_body, clauses))
+ return NULL;
+ continue;
+ }
+
+ if (!INTEGRAL_TYPE_P (TREE_TYPE (decl))
+ && TREE_CODE (TREE_TYPE (decl)) != POINTER_TYPE)
+ {
+ error ("%Hinvalid type for iteration variable %qE", &elocus, decl);
+ return NULL;
+ }
if (!processing_template_decl)
- TREE_OPERAND (cond, n)
- = fold_build_cleanup_point_expr (TREE_TYPE (t), t);
+ init = fold_build_cleanup_point_expr (TREE_TYPE (init), init);
+ init = cp_build_modify_expr (decl, NOP_EXPR, init, tf_warning_or_error);
+ if (cond && TREE_SIDE_EFFECTS (cond) && COMPARISON_CLASS_P (cond))
+ {
+ int n = TREE_SIDE_EFFECTS (TREE_OPERAND (cond, 1)) != 0;
+ tree t = TREE_OPERAND (cond, n);
+
+ if (!processing_template_decl)
+ TREE_OPERAND (cond, n)
+ = fold_build_cleanup_point_expr (TREE_TYPE (t), t);
+ }
+ if (decl == error_mark_node || init == error_mark_node)
+ return NULL;
+
+ TREE_VEC_ELT (declv, i) = decl;
+ TREE_VEC_ELT (initv, i) = init;
+ TREE_VEC_ELT (condv, i) = cond;
+ TREE_VEC_ELT (incrv, i) = incr;
+ i++;
}
- if (decl != error_mark_node && init != error_mark_node)
- omp_for = c_finish_omp_for (locus, decl, init, cond, incr, body, pre_body);
- if (omp_for != NULL
- && TREE_CODE (OMP_FOR_INCR (omp_for)) == MODIFY_EXPR
- && TREE_SIDE_EFFECTS (TREE_OPERAND (OMP_FOR_INCR (omp_for), 1))
- && BINARY_CLASS_P (TREE_OPERAND (OMP_FOR_INCR (omp_for), 1)))
+
+ if (IS_EMPTY_STMT (pre_body))
+ pre_body = NULL;
+
+ omp_for = c_finish_omp_for (locus, declv, initv, condv, incrv,
+ body, pre_body);
+
+ if (omp_for == NULL)
+ return NULL;
+
+ for (i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INCR (omp_for)); i++)
{
- tree t = TREE_OPERAND (OMP_FOR_INCR (omp_for), 1);
- int n = TREE_SIDE_EFFECTS (TREE_OPERAND (t, 1)) != 0;
+ tree incr = TREE_VEC_ELT (OMP_FOR_INCR (omp_for), i);
- if (!processing_template_decl)
- TREE_OPERAND (t, n)
- = fold_build_cleanup_point_expr (TREE_TYPE (TREE_OPERAND (t, n)),
- TREE_OPERAND (t, n));
+ if (TREE_CODE (incr) != MODIFY_EXPR)
+ continue;
+
+ if (TREE_SIDE_EFFECTS (TREE_OPERAND (incr, 1))
+ && BINARY_CLASS_P (TREE_OPERAND (incr, 1)))
+ {
+ tree t = TREE_OPERAND (incr, 1);
+ int n = TREE_SIDE_EFFECTS (TREE_OPERAND (t, 1)) != 0;
+
+ if (!processing_template_decl)
+ TREE_OPERAND (t, n)
+ = fold_build_cleanup_point_expr (TREE_TYPE (TREE_OPERAND (t, n)),
+ TREE_OPERAND (t, n));
+ }
+
+ if (orig_incr)
+ TREE_VEC_ELT (OMP_FOR_INCR (omp_for), i) = TREE_VEC_ELT (orig_incr, i);
}
+ if (omp_for != NULL)
+ OMP_FOR_CLAUSES (omp_for) = clauses;
return omp_for;
}
@@ -4039,26 +4370,12 @@ finish_omp_flush (void)
finish_expr_stmt (stmt);
}
-/* True if OpenMP sharing attribute of DECL is predetermined. */
-
-enum omp_clause_default_kind
-cxx_omp_predetermined_sharing (tree decl)
+void
+finish_omp_taskwait (void)
{
- enum omp_clause_default_kind kind;
-
- kind = c_omp_predetermined_sharing (decl);
- if (kind != OMP_CLAUSE_DEFAULT_UNSPECIFIED)
- return kind;
-
- /* Static data members are predetermined as shared. */
- if (TREE_STATIC (decl))
- {
- tree ctx = CP_DECL_CONTEXT (decl);
- if (TYPE_P (ctx) && MAYBE_CLASS_TYPE_P (ctx))
- return OMP_CLAUSE_DEFAULT_SHARED;
- }
-
- return OMP_CLAUSE_DEFAULT_UNSPECIFIED;
+ tree fn = built_in_decls[BUILT_IN_GOMP_TASKWAIT];
+ tree stmt = finish_call_expr (fn, NULL, false, false, tf_warning_or_error);
+ finish_expr_stmt (stmt);
}
void
diff --git a/gcc/cse.c b/gcc/cse.c
index ffe7efb6006..73eb1c6805b 100644
--- a/gcc/cse.c
+++ b/gcc/cse.c
@@ -3489,6 +3489,11 @@ fold_rtx (rtx x, rtx insn)
&& exact_log2 (- INTVAL (const_arg1)) >= 0)))
break;
+ /* ??? Vector mode shifts by scalar
+ shift operand are not supported yet. */
+ if (is_shift && VECTOR_MODE_P (mode))
+ break;
+
if (is_shift
&& (INTVAL (inner_const) >= GET_MODE_BITSIZE (mode)
|| INTVAL (inner_const) < 0))
diff --git a/gcc/dbxout.c b/gcc/dbxout.c
index 0d30427eeae..28f7bfa14a5 100644
--- a/gcc/dbxout.c
+++ b/gcc/dbxout.c
@@ -2334,7 +2334,7 @@ dbxout_class_name_qualifiers (tree decl)
/* This is a specialized subset of expand_expr for use by dbxout_symbol in
evaluating DECL_VALUE_EXPR. In particular, we stop if we find decls that
- havn't been expanded, or if the expression is getting so complex we won't
+ haven't been expanded, or if the expression is getting so complex we won't
be able to represent it in stabs anyway. Returns NULL on failure. */
static rtx
@@ -2500,7 +2500,7 @@ dbxout_symbol (tree decl, int local ATTRIBUTE_UNUSED)
/* If we are to generate only the symbols actually used then such
symbol nodes are flagged with TREE_USED. Ignore any that
- aren't flaged as TREE_USED. */
+ aren't flagged as TREE_USED. */
if (flag_debug_only_used_symbols
&& (!TREE_USED (decl)
diff --git a/gcc/df-byte-scan.c b/gcc/df-byte-scan.c
index c1c36197e59..11252cd4474 100644
--- a/gcc/df-byte-scan.c
+++ b/gcc/df-byte-scan.c
@@ -161,7 +161,7 @@ df_compute_accessed_bytes_strict_low_part (struct df_ref *ref,
int m2_size;
int offset;
- /* In order to accomodate multiword subregs of a hardreg, df_scan
+ /* In order to accommodate multiword subregs of a hardreg, df_scan
eats the subreg and it can only be found from the loc. */
if (REG_P (reg))
reg = *(DF_REF_LOC (ref));
@@ -214,7 +214,7 @@ df_compute_accessed_bytes_subreg (struct df_ref *ref, unsigned int *start_byte,
enum machine_mode m2;
int m2_size;
- /* In order to accomodate multiword subregs of a hardreg, df_scan
+ /* In order to accommodate multiword subregs of a hardreg, df_scan
eats the subreg and it can only be found from the loc. */
if (REG_P (reg))
reg = *(DF_REF_LOC (ref));
diff --git a/gcc/df-core.c b/gcc/df-core.c
index 8efbd21a014..95932909ebb 100644
--- a/gcc/df-core.c
+++ b/gcc/df-core.c
@@ -1,6 +1,6 @@
/* Allocation for dataflow support routines.
- Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
- Free Software Foundation, Inc.
+ Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007,
+ 2008 Free Software Foundation, Inc.
Originally contributed by Michael P. Hayes
(m.hayes@elec.canterbury.ac.nz, mhayes@redhat.com)
Major rewrite contributed by Danny Berlin (dberlin@dberlin.org)
@@ -42,7 +42,7 @@ requirement is that there be a correct control flow graph.
There are three variations of the live variable problem that are
available whenever dataflow is available. The LR problem finds the
areas that can reach a use of a variable, the UR problems finds the
-areas tha can be reached from a definition of a variable. The LIVE
+areas that can be reached from a definition of a variable. The LIVE
problem finds the intersection of these two areas.
There are several optional problems. These can be enabled when they
@@ -57,7 +57,7 @@ making this happen and are described in the INCREMENTAL SCANNING
section.
In the middle layer, basic blocks are scanned to produce transfer
-functions which describe the effects of that block on the a global
+functions which describe the effects of that block on the global
dataflow solution. The transfer functions are only rebuilt if the
some instruction within the block has changed.
@@ -343,7 +343,6 @@ There are 4 ways to obtain access to refs:
chains.
4) An array of all of the uses (and an array of all of the defs) can
-
be built. These arrays are indexed by the value in the id
structure. These arrays are only lazily kept up to date, and that
process can be expensive. To have these arrays built, call
@@ -370,7 +369,7 @@ address in this second example.
A set to a REG inside a ZERO_EXTRACT, or a set to a non-paradoxical SUBREG
for which the number of word_mode units covered by the outer mode is
-smaller than that covered by the inner mode, invokes a read-modify-write.
+smaller than that covered by the inner mode, invokes a read-modify-write
operation. We generate both a use and a def and again mark them
read/write.
diff --git a/gcc/df-problems.c b/gcc/df-problems.c
index b1e60b3ab71..043cf38398f 100644
--- a/gcc/df-problems.c
+++ b/gcc/df-problems.c
@@ -1,7 +1,6 @@
/* Standard problems for dataflow support routines.
- Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
-
- Free Software Foundation, Inc.
+ Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007,
+ 2008 Free Software Foundation, Inc.
Originally contributed by Michael P. Hayes
(m.hayes@elec.canterbury.ac.nz, mhayes@redhat.com)
Major rewrite contributed by Danny Berlin (dberlin@dberlin.org)
@@ -2335,7 +2334,7 @@ df_chain_add_problem (enum df_chain_flags chain_flags)
reach in the backwards direction. In and out bitvectors are built
for each basic block. There are two mapping functions,
df_byte_lr_get_regno_start and df_byte_lr_get_regno_len that are
- used to map regnos into bit vector postions.
+ used to map regnos into bit vector positions.
This problem differs from the regular df_lr function in the way
that subregs, *_extracts and strict_low_parts are handled. In lr
diff --git a/gcc/df-scan.c b/gcc/df-scan.c
index 79714e92a08..962c17a816d 100644
--- a/gcc/df-scan.c
+++ b/gcc/df-scan.c
@@ -462,7 +462,7 @@ df_scan_add_problem (void)
/* First, grow the reg_info information. If the current size is less than
- the number of psuedos, grow to 25% more than the number of
+ the number of pseudos, grow to 25% more than the number of
pseudos.
Second, assure that all of the slots up to max_reg_num have been
@@ -3434,7 +3434,7 @@ df_bb_refs_collect (struct df_collection_rec *collection_rec, basic_block bb)
bottom of the sender block.
The bottom of the sender block is problematic because not all
- out-edges of the a block are eh-edges. However, it is true
+ out-edges of a block are eh-edges. However, it is true
that all edges into a block are either eh-edges or none of
them are eh-edges. Thus, we can model this at the top of the
eh-receiver for all of the edges at once. */
diff --git a/gcc/dfp.c b/gcc/dfp.c
index 2526ef60c0f..fde3b84bda5 100644
--- a/gcc/dfp.c
+++ b/gcc/dfp.c
@@ -1,5 +1,5 @@
/* Decimal floating point support.
- Copyright (C) 2005, 2006, 2007 Free Software Foundation, Inc.
+ Copyright (C) 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
This file is part of GCC.
@@ -528,7 +528,7 @@ decimal_real_to_integer2 (HOST_WIDE_INT *plow, HOST_WIDE_INT *phigh,
decNumberZero (&dn3);
decNumberRescale (&dn, &dn2, &dn3, &set);
- /* Conver to REAL_VALUE_TYPE and call appropriate conversion
+ /* Convert to REAL_VALUE_TYPE and call appropriate conversion
function. */
decNumberToString (&dn, string);
real_from_string (&to, string);
diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi
index ece9d9d5fc0..6b41efed8de 100644
--- a/gcc/doc/invoke.texi
+++ b/gcc/doc/invoke.texi
@@ -352,8 +352,9 @@ Objective-C and Objective-C++ Dialects}.
-fschedule-insns -fschedule-insns2 -fsection-anchors -fsee @gol
-fsignaling-nans -fsingle-precision-constant -fsplit-ivs-in-unroller @gol
-fsplit-wide-types -fstack-protector -fstack-protector-all @gol
--fstrict-aliasing -fstrict-overflow -fthread-jumps -ftracer -ftree-ccp @gol
--ftree-ch -ftree-copy-prop -ftree-copyrename -ftree-dce @gol
+-fstrict-aliasing -fstrict-overflow -fthread-jumps -ftracer @gol
+-ftree-builtin-call-dce -ftree-ccp -ftree-ch -ftree-copy-prop @gol
+-ftree-copyrename -ftree-dce @gol
-ftree-dominator-opts -ftree-dse -ftree-fre -ftree-loop-im @gol
-ftree-loop-distribution @gol
-ftree-loop-ivcanon -ftree-loop-linear -ftree-loop-optimize @gol
@@ -5154,6 +5155,7 @@ compilation time.
-fipa-reference @gol
-fmerge-constants
-fsplit-wide-types @gol
+-ftree-builtin-call-dce @gol
-ftree-ccp @gol
-ftree-ch @gol
-ftree-copyrename @gol
@@ -5874,6 +5876,13 @@ enabled by default at @option{-O2} and higher.
Perform dead code elimination (DCE) on trees. This flag is enabled by
default at @option{-O} and higher.
+@item -ftree-builtin-call-dce
+@opindex ftree-builtin-call-dce
+Perform conditional dead code elimination (DCE) for calls to builtin functions
+that may set @code{errno} but are otherwise side-effect free. This flag is
+enabled by default at @option{-O2} and higher if @option{-Os} is not also
+specified.
+
@item -ftree-dominator-opts
@opindex ftree-dominator-opts
Perform a variety of simple scalar cleanups (constant/copy
@@ -11978,7 +11987,8 @@ The processor names are:
@samp{sb1},
@samp{sr71000},
@samp{vr4100}, @samp{vr4111}, @samp{vr4120}, @samp{vr4130}, @samp{vr4300},
-@samp{vr5000}, @samp{vr5400} and @samp{vr5500}.
+@samp{vr5000}, @samp{vr5400}, @samp{vr5500}
+and @samp{xlr}.
The special value @samp{from-abi} selects the
most compatible architecture for the selected ABI (that is,
@samp{mips1} for 32-bit ABIs and @samp{mips3} for 64-bit ABIs)@.
diff --git a/gcc/dominance.c b/gcc/dominance.c
index fdd94d2c14b..811d2b91300 100644
--- a/gcc/dominance.c
+++ b/gcc/dominance.c
@@ -1,5 +1,6 @@
/* Calculate (post)dominators in slightly super-linear time.
- Copyright (C) 2000, 2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
+ Copyright (C) 2000, 2003, 2004, 2005, 2006, 2007, 2008 Free
+ Software Foundation, Inc.
Contributed by Michael Matz (matz@ifh.de).
This file is part of GCC.
@@ -82,7 +83,7 @@ struct dom_info
/* The following few fields implement the structures needed for disjoint
sets. */
- /* set_chain[x] is the next node on the path from x to the representant
+ /* set_chain[x] is the next node on the path from x to the representative
of the set containing x. If set_chain[x]==0 then x is a root. */
TBB *set_chain;
/* set_size[x] is the number of elements in the set named by x. */
@@ -421,7 +422,7 @@ compress (struct dom_info *di, TBB v)
static inline TBB
eval (struct dom_info *di, TBB v)
{
- /* The representant of the set V is in, also called root (as the set
+ /* The representative of the set V is in, also called root (as the set
representation is a tree). */
TBB rep = di->set_chain[v];
diff --git a/gcc/domwalk.c b/gcc/domwalk.c
index fb7f0729d3d..cec95a5f932 100644
--- a/gcc/domwalk.c
+++ b/gcc/domwalk.c
@@ -1,5 +1,6 @@
/* Generic dominator tree walker
- Copyright (C) 2003, 2004, 2005, 2007 Free Software Foundation, Inc.
+ Copyright (C) 2003, 2004, 2005, 2007, 2008 Free Software Foundation,
+ Inc.
Contributed by Diego Novillo <dnovillo@redhat.com>
This file is part of GCC.
@@ -213,7 +214,7 @@ walk_dominator_tree (struct dom_walk_data *walk_data, basic_block bb)
(*walk_data->before_dom_children_after_stmts) (walk_data, bb);
/* Mark the current BB to be popped out of the recursion stack
- once childs are processed. */
+ once children are processed. */
worklist[sp++] = bb;
worklist[sp++] = NULL;
diff --git a/gcc/dse.c b/gcc/dse.c
index c2289384905..841e4668b27 100644
--- a/gcc/dse.c
+++ b/gcc/dse.c
@@ -1,5 +1,5 @@
/* RTL dead store elimination.
- Copyright (C) 2005, 2006, 2007 Free Software Foundation, Inc.
+ Copyright (C) 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
Contributed by Richard Sandiford <rsandifor@codesourcery.com>
and Kenneth Zadeck <zadeck@naturalbridge.com>
@@ -345,7 +345,7 @@ struct insn_info
/* The linked list of insns that are in consideration for removal in
the forwards pass thru the basic block. This pointer may be
trash as it is not cleared when a wild read occurs. The only
- time it is guaranteed to be correct is when the traveral starts
+ time it is guaranteed to be correct is when the traversal starts
at active_local_stores. */
struct insn_info * next_local_store;
};
diff --git a/gcc/dwarf2out.c b/gcc/dwarf2out.c
index 2113410232c..ba17c3f5c2d 100644
--- a/gcc/dwarf2out.c
+++ b/gcc/dwarf2out.c
@@ -12558,7 +12558,7 @@ gen_subprogram_die (tree decl, dw_die_ref context_die)
}
else
{ /* Do nothing for now; maybe need to duplicate die, one for
- hot section and ond for cold section, then use the hot/cold
+ hot section and one for cold section, then use the hot/cold
section begin/end labels to generate the aranges... */
/*
add_AT_lbl_id (subr_die, DW_AT_low_pc, hot_section_label);
diff --git a/gcc/emit-rtl.c b/gcc/emit-rtl.c
index 1194ad79b81..0bacebcf9f8 100644
--- a/gcc/emit-rtl.c
+++ b/gcc/emit-rtl.c
@@ -1141,7 +1141,7 @@ get_first_label_num (void)
/* If the rtx for label was created during the expansion of a nested
function, then first_label_num won't include this label number.
- Fix this now so that array indicies work later. */
+ Fix this now so that array indices work later. */
void
maybe_set_first_label_num (rtx x)
@@ -1623,7 +1623,7 @@ set_mem_attributes_minus_bitpos (rtx ref, tree t, int objectp,
}
/* If this is a field reference and not a bit-field, record it. */
- /* ??? There is some information that can be gleened from bit-fields,
+ /* ??? There is some information that can be gleaned from bit-fields,
such as the word offset in the structure that might be modified.
But skip it for now. */
else if (TREE_CODE (t) == COMPONENT_REF
@@ -1726,7 +1726,7 @@ set_mem_attributes_minus_bitpos (rtx ref, tree t, int objectp,
if (TREE_CODE (t) == ALIGN_INDIRECT_REF)
{
- /* Force EXPR and OFFSE to NULL, since we don't know exactly what
+ /* Force EXPR and OFFSET to NULL, since we don't know exactly what
we're overlapping. */
offset = NULL;
expr = NULL;
@@ -1960,7 +1960,7 @@ adjust_address_1 (rtx memref, enum machine_mode mode, HOST_WIDE_INT offset,
/* Return a memory reference like MEMREF, but with its mode changed
to MODE and its address changed to ADDR, which is assumed to be
- MEMREF offseted by OFFSET bytes. If VALIDATE is
+ MEMREF offset by OFFSET bytes. If VALIDATE is
nonzero, the memory address is forced to be valid. */
rtx
diff --git a/gcc/et-forest.c b/gcc/et-forest.c
index 6c62fac6751..02cbcc16402 100644
--- a/gcc/et-forest.c
+++ b/gcc/et-forest.c
@@ -1,6 +1,7 @@
/* ET-trees data structure implementation.
Contributed by Pavel Nejedly
- Copyright (C) 2002, 2003, 2004, 2005, 2007 Free Software Foundation, Inc.
+ Copyright (C) 2002, 2003, 2004, 2005, 2007, 2008 Free Software
+ Foundation, Inc.
This file is part of the libiberty library.
Libiberty is free software; you can redistribute it and/or
@@ -754,7 +755,7 @@ et_root (struct et_node *node)
{
struct et_occ *occ = node->rightmost_occ, *r;
- /* The root of the tree corresponds to the rightmost occurence in the
+ /* The root of the tree corresponds to the rightmost occurrence in the
represented path. */
et_splay (occ);
for (r = occ; r->next; r = r->next)
diff --git a/gcc/except.c b/gcc/except.c
index f8bacd5902b..8ebfd7fa1db 100644
--- a/gcc/except.c
+++ b/gcc/except.c
@@ -540,6 +540,7 @@ expand_resx_expr (tree exp)
cfun->eh->region_array, region_nr);
gcc_assert (!reg->resume);
+ do_pending_stack_adjust ();
reg->resume = emit_jump_insn (gen_rtx_RESX (VOIDmode, region_nr));
emit_barrier ();
}
diff --git a/gcc/fold-const.c b/gcc/fold-const.c
index 609217b755f..aaae9d66c51 100644
--- a/gcc/fold-const.c
+++ b/gcc/fold-const.c
@@ -11389,7 +11389,7 @@ fold_binary (enum tree_code code, tree type, tree op0, tree op1)
{
if (strict_overflow_p)
fold_overflow_warning (("assuming signed overflow does not occur "
- "when simplifying modulos"),
+ "when simplifying modulus"),
WARN_STRICT_OVERFLOW_MISC);
return fold_convert (type, tem);
}
@@ -14758,7 +14758,7 @@ fold_read_from_constant_string (tree exp)
with constant folding. (E.g. suppose the lower bound is 1,
and its mode is QI. Without the conversion,l (ARRAY
+(INDEX-(unsigned char)1)) becomes ((ARRAY+(-(unsigned char)1))
- +INDEX), which becomes (ARRAY+255+INDEX). Opps!) */
+ +INDEX), which becomes (ARRAY+255+INDEX). Oops!) */
if (! integer_zerop (low_bound))
index = size_diffop (index, fold_convert (sizetype, low_bound));
diff --git a/gcc/fortran/ChangeLog b/gcc/fortran/ChangeLog
index fd0817becbd..f34fddc2a10 100644
--- a/gcc/fortran/ChangeLog
+++ b/gcc/fortran/ChangeLog
@@ -1,3 +1,98 @@
+2008-06-06 Tobias Burnus <burnus@net-b.de>
+
+ * intrinsic.texi (BESSEL_J1): Fix BES(S)EL_J1 typo.
+
+2008-06-06 Jakub Jelinek <jakub@redhat.com>
+
+ * scanner.c (skip_free_comments, skip_fixed_comments): Handle tabs.
+ * parse.c (next_free): Allow tab after !$omp.
+ (decode_omp_directive): Handle !$omp task, !$omp taskwait
+ and !$omp end task.
+ (case_executable): Add ST_OMP_TASKWAIT.
+ (case_exec_markers): Add ST_OMP_TASK.
+ (gfc_ascii_statement): Handle ST_OMP_TASK, ST_OMP_END_TASK and
+ ST_OMP_TASKWAIT.
+ (parse_omp_structured_block, parse_executable): Handle ST_OMP_TASK.
+ * gfortran.h (gfc_find_sym_in_expr): New prototype.
+ (gfc_statement): Add ST_OMP_TASK, ST_OMP_END_TASK and ST_OMP_TASKWAIT.
+ (gfc_omp_clauses): Add OMP_SCHED_AUTO to sched_kind,
+ OMP_DEFAULT_FIRSTPRIVATE to default_sharing. Add collapse and
+ untied fields.
+ (gfc_exec_op): Add EXEC_OMP_TASK and EXEC_OMP_TASKWAIT.
+ * f95-lang.c (LANG_HOOKS_OMP_CLAUSE_COPY_CTOR,
+ LANG_HOOKS_OMP_CLAUSE_ASSIGN_OP, LANG_HOOKS_OMP_CLAUSE_DTOR,
+ LANG_HOOKS_OMP_PRIVATE_OUTER_REF): Define.
+ * trans.h (gfc_omp_clause_default_ctor): Add another argument.
+ (gfc_omp_clause_copy_ctor, gfc_omp_clause_assign_op,
+ gfc_omp_clause_dtor, gfc_omp_private_outer_ref): New prototypes.
+ * types.def (BT_ULONGLONG, BT_PTR_ULONGLONG,
+ BT_FN_BOOL_ULONGLONGPTR_ULONGLONGPTR,
+ BT_FN_BOOL_BOOL_ULL_ULL_ULL_ULLPTR_ULLPTR,
+ BT_FN_BOOL_BOOL_ULL_ULL_ULL_ULL_ULLPTR_ULLPTR,
+ BT_FN_VOID_PTR_PTR, BT_PTR_FN_VOID_PTR_PTR,
+ BT_FN_VOID_OMPFN_PTR_OMPCPYFN_LONG_LONG_BOOL_UINT): New.
+ (BT_BOOL): Use integer type with BOOL_TYPE_SIZE rather
+ than boolean_type_node.
+ * dump-parse-tree.c (gfc_show_omp_node): Handle EXEC_OMP_TASK,
+ EXEC_OMP_TASKWAIT, OMP_SCHED_AUTO, OMP_DEFAULT_FIRSTPRIVATE,
+ untied and collapse clauses.
+ (gfc_show_code_node): Handle EXEC_OMP_TASK and EXEC_OMP_TASKWAIT.
+ * trans.c (gfc_trans_code): Handle EXEC_OMP_TASK and
+ EXEC_OMP_TASKWAIT.
+ * st.c (gfc_free_statement): Likewise.
+ * resolve.c (gfc_resolve_blocks, resolve_code): Likewise.
+ (find_sym_in_expr): Rename to...
+ (gfc_find_sym_in_expr): ... this. No longer static.
+ (resolve_allocate_expr, resolve_ordinary_assign): Adjust caller.
+ * match.h (gfc_match_omp_task, gfc_match_omp_taskwait): New
+ prototypes.
+ * openmp.c (resolve_omp_clauses): Allow allocatable arrays in
+ firstprivate, lastprivate, reduction, copyprivate and copyin
+ clauses.
+ (omp_current_do_code): Made static.
+ (omp_current_do_collapse): New variable.
+ (gfc_resolve_omp_do_blocks): Compute omp_current_do_collapse,
+ clear omp_current_do_code and omp_current_do_collapse on return.
+ (gfc_resolve_do_iterator): Handle collapsed do loops.
+ (resolve_omp_do): Likewise, diagnose errorneous collapsed do loops.
+ (OMP_CLAUSE_COLLAPSE, OMP_CLAUSE_UNTIED): Define.
+ (gfc_match_omp_clauses): Handle default (firstprivate),
+ schedule (auto), untied and collapse (n) clauses.
+ (OMP_DO_CLAUSES): Add OMP_CLAUSE_COLLAPSE.
+ (OMP_TASK_CLAUSES): Define.
+ (gfc_match_omp_task, gfc_match_omp_taskwait): New functions.
+ * trans-openmp.c (gfc_omp_private_outer_ref): New function.
+ (gfc_omp_clause_default_ctor): Add outer argument. For allocatable
+ arrays allocate them with the bounds of the outer var if outer
+ var is allocated.
+ (gfc_omp_clause_copy_ctor, gfc_omp_clause_assign_op,
+ gfc_omp_clause_dtor): New functions.
+ (gfc_trans_omp_array_reduction): If decl is allocatable array,
+ allocate it with outer var's bounds in OMP_CLAUSE_REDUCTION_INIT
+ and deallocate it in OMP_CLAUSE_REDUCTION_MERGE.
+ (gfc_omp_predetermined_sharing): Return OMP_CLAUSE_DEFAULT_SHARED
+ for assumed-size arrays.
+ (gfc_trans_omp_do): Add par_clauses argument. If dovar is
+ present in lastprivate clause and do loop isn't simple,
+ set OMP_CLAUSE_LASTPRIVATE_STMT. If dovar is present in
+ parallel's lastprivate clause, change it to shared and add
+ lastprivate clause to OMP_FOR_CLAUSES. Handle collapsed do loops.
+ (gfc_trans_omp_directive): Adjust gfc_trans_omp_do callers.
+ (gfc_trans_omp_parallel_do): Likewise. Move collapse clause to
+ OMP_FOR from OMP_PARALLEL.
+ (gfc_trans_omp_clauses): Handle OMP_SCHED_AUTO,
+ OMP_DEFAULT_FIRSTPRIVATE, untied and collapse clauses.
+ (gfc_trans_omp_task, gfc_trans_omp_taskwait): New functions.
+ (gfc_trans_omp_directive): Handle EXEC_OMP_TASK and
+ EXEC_OMP_TASKWAIT.
+
+2008-06-04 Janus Weil <janus@gcc.gnu.org>
+
+ PR fortran/36322
+ PR fortran/36275
+ * resolve.c (resolve_symbol): Correctly copy the interface for a
+ PROCEDURE declaration.
+
2008-06-02 Janus Weil <janus@gcc.gnu.org>
PR fortran/36361
diff --git a/gcc/fortran/dump-parse-tree.c b/gcc/fortran/dump-parse-tree.c
index 44a4941e7b4..80ff5bcecb7 100644
--- a/gcc/fortran/dump-parse-tree.c
+++ b/gcc/fortran/dump-parse-tree.c
@@ -848,6 +848,8 @@ show_omp_node (int level, gfc_code *c)
case EXEC_OMP_PARALLEL_WORKSHARE: name = "PARALLEL WORKSHARE"; break;
case EXEC_OMP_SECTIONS: name = "SECTIONS"; break;
case EXEC_OMP_SINGLE: name = "SINGLE"; break;
+ case EXEC_OMP_TASK: name = "TASK"; break;
+ case EXEC_OMP_TASKWAIT: name = "TASKWAIT"; break;
case EXEC_OMP_WORKSHARE: name = "WORKSHARE"; break;
default:
gcc_unreachable ();
@@ -863,6 +865,7 @@ show_omp_node (int level, gfc_code *c)
case EXEC_OMP_SINGLE:
case EXEC_OMP_WORKSHARE:
case EXEC_OMP_PARALLEL_WORKSHARE:
+ case EXEC_OMP_TASK:
omp_clauses = c->ext.omp_clauses;
break;
case EXEC_OMP_CRITICAL:
@@ -878,6 +881,7 @@ show_omp_node (int level, gfc_code *c)
}
return;
case EXEC_OMP_BARRIER:
+ case EXEC_OMP_TASKWAIT:
return;
default:
break;
@@ -907,6 +911,7 @@ show_omp_node (int level, gfc_code *c)
case OMP_SCHED_DYNAMIC: type = "DYNAMIC"; break;
case OMP_SCHED_GUIDED: type = "GUIDED"; break;
case OMP_SCHED_RUNTIME: type = "RUNTIME"; break;
+ case OMP_SCHED_AUTO: type = "AUTO"; break;
default:
gcc_unreachable ();
}
@@ -926,7 +931,7 @@ show_omp_node (int level, gfc_code *c)
case OMP_DEFAULT_NONE: type = "NONE"; break;
case OMP_DEFAULT_PRIVATE: type = "PRIVATE"; break;
case OMP_DEFAULT_SHARED: type = "SHARED"; break;
- case OMP_SCHED_RUNTIME: type = "RUNTIME"; break;
+ case OMP_DEFAULT_FIRSTPRIVATE: type = "FIRSTPRIVATE"; break;
default:
gcc_unreachable ();
}
@@ -934,6 +939,10 @@ show_omp_node (int level, gfc_code *c)
}
if (omp_clauses->ordered)
fputs (" ORDERED", dumpfile);
+ if (omp_clauses->untied)
+ fputs (" UNTIED", dumpfile);
+ if (omp_clauses->collapse)
+ fprintf (dumpfile, " COLLAPSE(%d)", omp_clauses->collapse);
for (list_type = 0; list_type < OMP_LIST_NUM; list_type++)
if (omp_clauses->lists[list_type] != NULL
&& list_type != OMP_LIST_COPYPRIVATE)
@@ -1806,6 +1815,8 @@ show_code_node (int level, gfc_code *c)
case EXEC_OMP_PARALLEL_WORKSHARE:
case EXEC_OMP_SECTIONS:
case EXEC_OMP_SINGLE:
+ case EXEC_OMP_TASK:
+ case EXEC_OMP_TASKWAIT:
case EXEC_OMP_WORKSHARE:
show_omp_node (level, c);
break;
diff --git a/gcc/fortran/f95-lang.c b/gcc/fortran/f95-lang.c
index 63c380b61ea..42ab57a9606 100644
--- a/gcc/fortran/f95-lang.c
+++ b/gcc/fortran/f95-lang.c
@@ -115,8 +115,12 @@ static alias_set_type gfc_get_alias_set (tree);
#undef LANG_HOOKS_OMP_PRIVATIZE_BY_REFERENCE
#undef LANG_HOOKS_OMP_PREDETERMINED_SHARING
#undef LANG_HOOKS_OMP_CLAUSE_DEFAULT_CTOR
+#undef LANG_HOOKS_OMP_CLAUSE_COPY_CTOR
+#undef LANG_HOOKS_OMP_CLAUSE_ASSIGN_OP
+#undef LANG_HOOKS_OMP_CLAUSE_DTOR
#undef LANG_HOOKS_OMP_DISREGARD_VALUE_EXPR
#undef LANG_HOOKS_OMP_PRIVATE_DEBUG_CLAUSE
+#undef LANG_HOOKS_OMP_PRIVATE_OUTER_REF
#undef LANG_HOOKS_OMP_FIRSTPRIVATIZE_TYPE_SIZES
#undef LANG_HOOKS_BUILTIN_FUNCTION
#undef LANG_HOOKS_GET_ARRAY_DESCR_INFO
@@ -137,8 +141,12 @@ static alias_set_type gfc_get_alias_set (tree);
#define LANG_HOOKS_OMP_PRIVATIZE_BY_REFERENCE gfc_omp_privatize_by_reference
#define LANG_HOOKS_OMP_PREDETERMINED_SHARING gfc_omp_predetermined_sharing
#define LANG_HOOKS_OMP_CLAUSE_DEFAULT_CTOR gfc_omp_clause_default_ctor
+#define LANG_HOOKS_OMP_CLAUSE_COPY_CTOR gfc_omp_clause_copy_ctor
+#define LANG_HOOKS_OMP_CLAUSE_ASSIGN_OP gfc_omp_clause_assign_op
+#define LANG_HOOKS_OMP_CLAUSE_DTOR gfc_omp_clause_dtor
#define LANG_HOOKS_OMP_DISREGARD_VALUE_EXPR gfc_omp_disregard_value_expr
#define LANG_HOOKS_OMP_PRIVATE_DEBUG_CLAUSE gfc_omp_private_debug_clause
+#define LANG_HOOKS_OMP_PRIVATE_OUTER_REF gfc_omp_private_outer_ref
#define LANG_HOOKS_OMP_FIRSTPRIVATIZE_TYPE_SIZES \
gfc_omp_firstprivatize_type_sizes
#define LANG_HOOKS_BUILTIN_FUNCTION gfc_builtin_function
diff --git a/gcc/fortran/gfortran.h b/gcc/fortran/gfortran.h
index d4f9771e610..8665a48c566 100644
--- a/gcc/fortran/gfortran.h
+++ b/gcc/fortran/gfortran.h
@@ -228,7 +228,8 @@ typedef enum
ST_OMP_END_WORKSHARE, ST_OMP_DO, ST_OMP_FLUSH, ST_OMP_MASTER, ST_OMP_ORDERED,
ST_OMP_PARALLEL, ST_OMP_PARALLEL_DO, ST_OMP_PARALLEL_SECTIONS,
ST_OMP_PARALLEL_WORKSHARE, ST_OMP_SECTIONS, ST_OMP_SECTION, ST_OMP_SINGLE,
- ST_OMP_THREADPRIVATE, ST_OMP_WORKSHARE, ST_PROCEDURE,
+ ST_OMP_THREADPRIVATE, ST_OMP_WORKSHARE, ST_OMP_TASK, ST_OMP_END_TASK,
+ ST_OMP_TASKWAIT, ST_PROCEDURE,
ST_GET_FCN_CHARACTERISTICS, ST_NONE
}
gfc_statement;
@@ -927,7 +928,8 @@ typedef struct gfc_omp_clauses
OMP_SCHED_STATIC,
OMP_SCHED_DYNAMIC,
OMP_SCHED_GUIDED,
- OMP_SCHED_RUNTIME
+ OMP_SCHED_RUNTIME,
+ OMP_SCHED_AUTO
} sched_kind;
struct gfc_expr *chunk_size;
enum
@@ -935,9 +937,11 @@ typedef struct gfc_omp_clauses
OMP_DEFAULT_UNKNOWN,
OMP_DEFAULT_NONE,
OMP_DEFAULT_PRIVATE,
- OMP_DEFAULT_SHARED
+ OMP_DEFAULT_SHARED,
+ OMP_DEFAULT_FIRSTPRIVATE
} default_sharing;
- bool nowait, ordered;
+ int collapse;
+ bool nowait, ordered, untied;
}
gfc_omp_clauses;
@@ -1760,7 +1764,7 @@ typedef enum
EXEC_OMP_PARALLEL_SECTIONS, EXEC_OMP_PARALLEL_WORKSHARE,
EXEC_OMP_SECTIONS, EXEC_OMP_SINGLE, EXEC_OMP_WORKSHARE,
EXEC_OMP_ATOMIC, EXEC_OMP_BARRIER, EXEC_OMP_END_NOWAIT,
- EXEC_OMP_END_SINGLE
+ EXEC_OMP_END_SINGLE, EXEC_OMP_TASK, EXEC_OMP_TASKWAIT
}
gfc_exec_op;
@@ -2040,6 +2044,7 @@ bool gfc_post_options (const char **);
/* iresolve.c */
const char * gfc_get_string (const char *, ...) ATTRIBUTE_PRINTF_1;
+bool gfc_find_sym_in_expr (gfc_symbol *, gfc_expr *);
/* error.c */
diff --git a/gcc/fortran/intrinsic.texi b/gcc/fortran/intrinsic.texi
index 6852d64387e..a6259cc044e 100644
--- a/gcc/fortran/intrinsic.texi
+++ b/gcc/fortran/intrinsic.texi
@@ -1541,7 +1541,7 @@ end program test_besj0
@node BESSEL_J1
-@section @code{BESEL_J1} --- Bessel function of the first kind of order 1
+@section @code{BESSEL_J1} --- Bessel function of the first kind of order 1
@fnindex BESSEL_J1
@fnindex BESJ1
@fnindex DBESJ1
diff --git a/gcc/fortran/match.h b/gcc/fortran/match.h
index 3f8d31074e8..5ee91fb62de 100644
--- a/gcc/fortran/match.h
+++ b/gcc/fortran/match.h
@@ -119,6 +119,8 @@ match gfc_match_omp_parallel_sections (void);
match gfc_match_omp_parallel_workshare (void);
match gfc_match_omp_sections (void);
match gfc_match_omp_single (void);
+match gfc_match_omp_task (void);
+match gfc_match_omp_taskwait (void);
match gfc_match_omp_threadprivate (void);
match gfc_match_omp_workshare (void);
match gfc_match_omp_end_nowait (void);
diff --git a/gcc/fortran/openmp.c b/gcc/fortran/openmp.c
index 9c0bae497bf..28f1cc24dfd 100644
--- a/gcc/fortran/openmp.c
+++ b/gcc/fortran/openmp.c
@@ -182,6 +182,8 @@ cleanup:
#define OMP_CLAUSE_SCHEDULE (1 << 9)
#define OMP_CLAUSE_DEFAULT (1 << 10)
#define OMP_CLAUSE_ORDERED (1 << 11)
+#define OMP_CLAUSE_COLLAPSE (1 << 12)
+#define OMP_CLAUSE_UNTIED (1 << 13)
/* Match OpenMP directive clauses. MASK is a bitmask of
clauses that are allowed for a particular directive. */
@@ -335,6 +337,8 @@ gfc_match_omp_clauses (gfc_omp_clauses **cp, int mask)
c->default_sharing = OMP_DEFAULT_PRIVATE;
else if (gfc_match ("default ( none )") == MATCH_YES)
c->default_sharing = OMP_DEFAULT_NONE;
+ else if (gfc_match ("default ( firstprivate )") == MATCH_YES)
+ c->default_sharing = OMP_DEFAULT_FIRSTPRIVATE;
if (c->default_sharing != OMP_DEFAULT_UNKNOWN)
continue;
}
@@ -351,10 +355,13 @@ gfc_match_omp_clauses (gfc_omp_clauses **cp, int mask)
c->sched_kind = OMP_SCHED_GUIDED;
else if (gfc_match ("runtime") == MATCH_YES)
c->sched_kind = OMP_SCHED_RUNTIME;
+ else if (gfc_match ("auto") == MATCH_YES)
+ c->sched_kind = OMP_SCHED_AUTO;
if (c->sched_kind != OMP_SCHED_NONE)
{
match m = MATCH_NO;
- if (c->sched_kind != OMP_SCHED_RUNTIME)
+ if (c->sched_kind != OMP_SCHED_RUNTIME
+ && c->sched_kind != OMP_SCHED_AUTO)
m = gfc_match (" , %e )", &c->chunk_size);
if (m != MATCH_YES)
m = gfc_match_char (')');
@@ -372,6 +379,36 @@ gfc_match_omp_clauses (gfc_omp_clauses **cp, int mask)
c->ordered = needs_space = true;
continue;
}
+ if ((mask & OMP_CLAUSE_UNTIED) && !c->untied
+ && gfc_match ("untied") == MATCH_YES)
+ {
+ c->untied = needs_space = true;
+ continue;
+ }
+ if ((mask & OMP_CLAUSE_COLLAPSE) && !c->collapse)
+ {
+ gfc_expr *cexpr = NULL;
+ match m = gfc_match ("collapse ( %e )", &cexpr);
+
+ if (m == MATCH_YES)
+ {
+ int collapse;
+ const char *p = gfc_extract_int (cexpr, &collapse);
+ if (p)
+ {
+ gfc_error (p);
+ collapse = 1;
+ }
+ else if (collapse <= 0)
+ {
+ gfc_error ("COLLAPSE clause argument not constant positive integer at %C");
+ collapse = 1;
+ }
+ c->collapse = collapse;
+ gfc_free_expr (cexpr);
+ continue;
+ }
+ }
break;
}
@@ -393,10 +430,13 @@ gfc_match_omp_clauses (gfc_omp_clauses **cp, int mask)
#define OMP_DO_CLAUSES \
(OMP_CLAUSE_PRIVATE | OMP_CLAUSE_FIRSTPRIVATE \
| OMP_CLAUSE_LASTPRIVATE | OMP_CLAUSE_REDUCTION \
- | OMP_CLAUSE_SCHEDULE | OMP_CLAUSE_ORDERED)
+ | OMP_CLAUSE_SCHEDULE | OMP_CLAUSE_ORDERED | OMP_CLAUSE_COLLAPSE)
#define OMP_SECTIONS_CLAUSES \
(OMP_CLAUSE_PRIVATE | OMP_CLAUSE_FIRSTPRIVATE \
| OMP_CLAUSE_LASTPRIVATE | OMP_CLAUSE_REDUCTION)
+#define OMP_TASK_CLAUSES \
+ (OMP_CLAUSE_PRIVATE | OMP_CLAUSE_FIRSTPRIVATE | OMP_CLAUSE_SHARED \
+ | OMP_CLAUSE_IF | OMP_CLAUSE_DEFAULT | OMP_CLAUSE_UNTIED)
match
gfc_match_omp_parallel (void)
@@ -411,6 +451,29 @@ gfc_match_omp_parallel (void)
match
+gfc_match_omp_task (void)
+{
+ gfc_omp_clauses *c;
+ if (gfc_match_omp_clauses (&c, OMP_TASK_CLAUSES) != MATCH_YES)
+ return MATCH_ERROR;
+ new_st.op = EXEC_OMP_TASK;
+ new_st.ext.omp_clauses = c;
+ return MATCH_YES;
+}
+
+
+match
+gfc_match_omp_taskwait (void)
+{
+ if (gfc_match_omp_eos () != MATCH_YES)
+ return MATCH_ERROR;
+ new_st.op = EXEC_OMP_TASKWAIT;
+ new_st.ext.omp_clauses = NULL;
+ return MATCH_YES;
+}
+
+
+match
gfc_match_omp_critical (void)
{
char n[GFC_MAX_SYMBOL_LEN+1];
@@ -809,9 +872,6 @@ resolve_omp_clauses (gfc_code *code)
if (!n->sym->attr.threadprivate)
gfc_error ("Non-THREADPRIVATE object '%s' in COPYIN clause"
" at %L", n->sym->name, &code->loc);
- if (n->sym->attr.allocatable)
- gfc_error ("COPYIN clause object '%s' is ALLOCATABLE at %L",
- n->sym->name, &code->loc);
if (n->sym->ts.type == BT_DERIVED && n->sym->ts.derived->attr.alloc_comp)
gfc_error ("COPYIN clause object '%s' at %L has ALLOCATABLE components",
n->sym->name, &code->loc);
@@ -823,9 +883,6 @@ resolve_omp_clauses (gfc_code *code)
if (n->sym->as && n->sym->as->type == AS_ASSUMED_SIZE)
gfc_error ("Assumed size array '%s' in COPYPRIVATE clause "
"at %L", n->sym->name, &code->loc);
- if (n->sym->attr.allocatable)
- gfc_error ("COPYPRIVATE clause object '%s' is ALLOCATABLE "
- "at %L", n->sym->name, &code->loc);
if (n->sym->ts.type == BT_DERIVED && n->sym->ts.derived->attr.alloc_comp)
gfc_error ("COPYPRIVATE clause object '%s' at %L has ALLOCATABLE components",
n->sym->name, &code->loc);
@@ -856,9 +913,6 @@ resolve_omp_clauses (gfc_code *code)
if (n->sym->attr.pointer)
gfc_error ("POINTER object '%s' in %s clause at %L",
n->sym->name, name, &code->loc);
- if (n->sym->attr.allocatable)
- gfc_error ("%s clause object '%s' is ALLOCATABLE at %L",
- name, n->sym->name, &code->loc);
/* Variables in REDUCTION-clauses must be of intrinsic type (flagged below). */
if ((list < OMP_LIST_REDUCTION_FIRST || list > OMP_LIST_REDUCTION_LAST) &&
n->sym->ts.type == BT_DERIVED && n->sym->ts.derived->attr.alloc_comp)
@@ -1246,15 +1300,34 @@ struct omp_context
struct pointer_set_t *private_iterators;
struct omp_context *previous;
} *omp_current_ctx;
-gfc_code *omp_current_do_code;
-
+static gfc_code *omp_current_do_code;
+static int omp_current_do_collapse;
void
gfc_resolve_omp_do_blocks (gfc_code *code, gfc_namespace *ns)
{
if (code->block->next && code->block->next->op == EXEC_DO)
- omp_current_do_code = code->block->next;
+ {
+ int i;
+ gfc_code *c;
+
+ omp_current_do_code = code->block->next;
+ omp_current_do_collapse = code->ext.omp_clauses->collapse;
+ for (i = 1, c = omp_current_do_code; i < omp_current_do_collapse; i++)
+ {
+ c = c->block;
+ if (c->op != EXEC_DO || c->next == NULL)
+ break;
+ c = c->next;
+ if (c->op != EXEC_DO)
+ break;
+ }
+ if (i < omp_current_do_collapse || omp_current_do_collapse <= 0)
+ omp_current_do_collapse = 1;
+ }
gfc_resolve_blocks (code->block, ns);
+ omp_current_do_collapse = 0;
+ omp_current_do_code = NULL;
}
@@ -1294,6 +1367,8 @@ void
gfc_resolve_do_iterator (gfc_code *code, gfc_symbol *sym)
{
struct omp_context *ctx;
+ int i = omp_current_do_collapse;
+ gfc_code *c = omp_current_do_code;
if (sym->attr.threadprivate)
return;
@@ -1301,8 +1376,14 @@ gfc_resolve_do_iterator (gfc_code *code, gfc_symbol *sym)
/* !$omp do and !$omp parallel do iteration variable is predetermined
private just in the !$omp do resp. !$omp parallel do construct,
with no implications for the outer parallel constructs. */
- if (code == omp_current_do_code)
- return;
+
+ while (i-- >= 1)
+ {
+ if (code == c)
+ return;
+
+ c = c->block->next;
+ }
for (ctx = omp_current_ctx; ctx; ctx = ctx->previous)
{
@@ -1326,8 +1407,8 @@ gfc_resolve_do_iterator (gfc_code *code, gfc_symbol *sym)
static void
resolve_omp_do (gfc_code *code)
{
- gfc_code *do_code;
- int list;
+ gfc_code *do_code, *c;
+ int list, i, collapse;
gfc_namelist *n;
gfc_symbol *dovar;
@@ -1335,11 +1416,17 @@ resolve_omp_do (gfc_code *code)
resolve_omp_clauses (code);
do_code = code->block->next;
- if (do_code->op == EXEC_DO_WHILE)
- gfc_error ("!$OMP DO cannot be a DO WHILE or DO without loop control "
- "at %L", &do_code->loc);
- else
+ collapse = code->ext.omp_clauses->collapse;
+ if (collapse <= 0)
+ collapse = 1;
+ for (i = 1; i <= collapse; i++)
{
+ if (do_code->op == EXEC_DO_WHILE)
+ {
+ gfc_error ("!$OMP DO cannot be a DO WHILE or DO without loop control "
+ "at %L", &do_code->loc);
+ break;
+ }
gcc_assert (do_code->op == EXEC_DO);
if (do_code->ext.iterator->var->ts.type != BT_INTEGER)
gfc_error ("!$OMP DO iteration variable must be of type integer at %L",
@@ -1359,6 +1446,53 @@ resolve_omp_do (gfc_code *code)
&do_code->loc);
break;
}
+ if (i > 1)
+ {
+ gfc_code *do_code2 = code->block->next;
+ int j;
+
+ for (j = 1; j < i; j++)
+ {
+ gfc_symbol *ivar = do_code2->ext.iterator->var->symtree->n.sym;
+ if (dovar == ivar
+ || gfc_find_sym_in_expr (ivar, do_code->ext.iterator->start)
+ || gfc_find_sym_in_expr (ivar, do_code->ext.iterator->end)
+ || gfc_find_sym_in_expr (ivar, do_code->ext.iterator->step))
+ {
+ gfc_error ("!$OMP DO collapsed loops don't form rectangular iteration space at %L",
+ &do_code->loc);
+ break;
+ }
+ if (j < i)
+ break;
+ do_code2 = do_code2->block->next;
+ }
+ }
+ if (i == collapse)
+ break;
+ for (c = do_code->next; c; c = c->next)
+ if (c->op != EXEC_NOP && c->op != EXEC_CONTINUE)
+ {
+ gfc_error ("collapsed !$OMP DO loops not perfectly nested at %L",
+ &c->loc);
+ break;
+ }
+ if (c)
+ break;
+ do_code = do_code->block;
+ if (do_code->op != EXEC_DO && do_code->op != EXEC_DO_WHILE)
+ {
+ gfc_error ("not enough DO loops for collapsed !$OMP DO at %L",
+ &code->loc);
+ break;
+ }
+ do_code = do_code->next;
+ if (do_code->op != EXEC_DO && do_code->op != EXEC_DO_WHILE)
+ {
+ gfc_error ("not enough DO loops for collapsed !$OMP DO at %L",
+ &code->loc);
+ break;
+ }
}
}
diff --git a/gcc/fortran/parse.c b/gcc/fortran/parse.c
index 33f13c92200..c35db2d9cf6 100644
--- a/gcc/fortran/parse.c
+++ b/gcc/fortran/parse.c
@@ -515,6 +515,7 @@ decode_omp_directive (void)
match ("end parallel", gfc_match_omp_eos, ST_OMP_END_PARALLEL);
match ("end sections", gfc_match_omp_end_nowait, ST_OMP_END_SECTIONS);
match ("end single", gfc_match_omp_end_single, ST_OMP_END_SINGLE);
+ match ("end task", gfc_match_omp_eos, ST_OMP_END_TASK);
match ("end workshare", gfc_match_omp_end_nowait,
ST_OMP_END_WORKSHARE);
break;
@@ -541,6 +542,8 @@ decode_omp_directive (void)
match ("single", gfc_match_omp_single, ST_OMP_SINGLE);
break;
case 't':
+ match ("task", gfc_match_omp_task, ST_OMP_TASK);
+ match ("taskwait", gfc_match_omp_taskwait, ST_OMP_TASKWAIT);
match ("threadprivate", gfc_match_omp_threadprivate,
ST_OMP_THREADPRIVATE);
case 'w':
@@ -641,7 +644,7 @@ next_free (void)
for (i = 0; i < 5; i++, c = gfc_next_ascii_char ())
gcc_assert (c == "!$omp"[i]);
- gcc_assert (c == ' ');
+ gcc_assert (c == ' ' || c == '\t');
gfc_gobble_whitespace ();
return decode_omp_directive ();
}
@@ -870,7 +873,7 @@ next_statement (void)
case ST_POINTER_ASSIGNMENT: case ST_EXIT: case ST_CYCLE: \
case ST_ASSIGNMENT: case ST_ARITHMETIC_IF: case ST_WHERE: case ST_FORALL: \
case ST_LABEL_ASSIGNMENT: case ST_FLUSH: case ST_OMP_FLUSH: \
- case ST_OMP_BARRIER
+ case ST_OMP_BARRIER: case ST_OMP_TASKWAIT
/* Statements that mark other executable statements. */
@@ -879,7 +882,8 @@ next_statement (void)
case ST_OMP_PARALLEL_SECTIONS: case ST_OMP_SECTIONS: case ST_OMP_ORDERED: \
case ST_OMP_CRITICAL: case ST_OMP_MASTER: case ST_OMP_SINGLE: \
case ST_OMP_DO: case ST_OMP_PARALLEL_DO: case ST_OMP_ATOMIC: \
- case ST_OMP_WORKSHARE: case ST_OMP_PARALLEL_WORKSHARE
+ case ST_OMP_WORKSHARE: case ST_OMP_PARALLEL_WORKSHARE: \
+ case ST_OMP_TASK
/* Declaration statements */
@@ -1351,6 +1355,9 @@ gfc_ascii_statement (gfc_statement st)
case ST_OMP_END_SINGLE:
p = "!$OMP END SINGLE";
break;
+ case ST_OMP_END_TASK:
+ p = "!$OMP END TASK";
+ break;
case ST_OMP_END_WORKSHARE:
p = "!$OMP END WORKSHARE";
break;
@@ -1384,6 +1391,12 @@ gfc_ascii_statement (gfc_statement st)
case ST_OMP_SINGLE:
p = "!$OMP SINGLE";
break;
+ case ST_OMP_TASK:
+ p = "!$OMP TASK";
+ break;
+ case ST_OMP_TASKWAIT:
+ p = "!$OMP TASKWAIT";
+ break;
case ST_OMP_THREADPRIVATE:
p = "!$OMP THREADPRIVATE";
break;
@@ -2857,6 +2870,9 @@ parse_omp_structured_block (gfc_statement omp_st, bool workshare_stmts_only)
case ST_OMP_SINGLE:
omp_end_st = ST_OMP_END_SINGLE;
break;
+ case ST_OMP_TASK:
+ omp_end_st = ST_OMP_END_TASK;
+ break;
case ST_OMP_WORKSHARE:
omp_end_st = ST_OMP_END_WORKSHARE;
break;
@@ -3067,6 +3083,7 @@ parse_executable (gfc_statement st)
case ST_OMP_CRITICAL:
case ST_OMP_MASTER:
case ST_OMP_SINGLE:
+ case ST_OMP_TASK:
parse_omp_structured_block (st, false);
break;
diff --git a/gcc/fortran/resolve.c b/gcc/fortran/resolve.c
index c9809351c94..2787e293021 100644
--- a/gcc/fortran/resolve.c
+++ b/gcc/fortran/resolve.c
@@ -4670,8 +4670,8 @@ sym_in_expr (gfc_expr *e, gfc_symbol *sym, int *f ATTRIBUTE_UNUSED)
return false;
}
-static bool
-find_sym_in_expr (gfc_symbol *sym, gfc_expr *e)
+bool
+gfc_find_sym_in_expr (gfc_symbol *sym, gfc_expr *e)
{
return gfc_traverse_expr (e, sym, sym_in_expr, 0);
}
@@ -4868,8 +4868,10 @@ check_symbols:
if (sym->ts.type == BT_DERIVED)
continue;
- if ((ar->start[i] != NULL && find_sym_in_expr (sym, ar->start[i]))
- || (ar->end[i] != NULL && find_sym_in_expr (sym, ar->end[i])))
+ if ((ar->start[i] != NULL
+ && gfc_find_sym_in_expr (sym, ar->start[i]))
+ || (ar->end[i] != NULL
+ && gfc_find_sym_in_expr (sym, ar->end[i])))
{
gfc_error ("'%s' must not appear an the array specification at "
"%L in the same ALLOCATE statement where it is "
@@ -5982,6 +5984,8 @@ gfc_resolve_blocks (gfc_code *b, gfc_namespace *ns)
case EXEC_OMP_PARALLEL_WORKSHARE:
case EXEC_OMP_SECTIONS:
case EXEC_OMP_SINGLE:
+ case EXEC_OMP_TASK:
+ case EXEC_OMP_TASKWAIT:
case EXEC_OMP_WORKSHARE:
break;
@@ -6100,8 +6104,8 @@ resolve_ordinary_assign (gfc_code *code, gfc_namespace *ns)
{
for (n = 0; n < ref->u.ar.dimen; n++)
if (ref->u.ar.dimen_type[n] == DIMEN_VECTOR
- && find_sym_in_expr (lhs->symtree->n.sym,
- ref->u.ar.start[n]))
+ && gfc_find_sym_in_expr (lhs->symtree->n.sym,
+ ref->u.ar.start[n]))
ref->u.ar.start[n]
= gfc_get_parentheses (ref->u.ar.start[n]);
}
@@ -6176,6 +6180,7 @@ resolve_code (gfc_code *code, gfc_namespace *ns)
case EXEC_OMP_PARALLEL:
case EXEC_OMP_PARALLEL_DO:
case EXEC_OMP_PARALLEL_SECTIONS:
+ case EXEC_OMP_TASK:
omp_workshare_save = omp_workshare_flag;
omp_workshare_flag = 0;
gfc_resolve_omp_parallel_blocks (code, ns);
@@ -6418,6 +6423,7 @@ resolve_code (gfc_code *code, gfc_namespace *ns)
case EXEC_OMP_ORDERED:
case EXEC_OMP_SECTIONS:
case EXEC_OMP_SINGLE:
+ case EXEC_OMP_TASKWAIT:
case EXEC_OMP_WORKSHARE:
gfc_resolve_omp_directive (code, ns);
break;
@@ -6426,6 +6432,7 @@ resolve_code (gfc_code *code, gfc_namespace *ns)
case EXEC_OMP_PARALLEL_DO:
case EXEC_OMP_PARALLEL_SECTIONS:
case EXEC_OMP_PARALLEL_WORKSHARE:
+ case EXEC_OMP_TASK:
omp_workshare_save = omp_workshare_flag;
omp_workshare_flag = 0;
gfc_resolve_omp_directive (code, ns);
@@ -7893,11 +7900,12 @@ resolve_symbol (gfc_symbol *sym)
/* Get the attributes from the interface (now resolved). */
if (sym->ts.interface->attr.if_source || sym->ts.interface->attr.intrinsic)
{
- sym->ts.type = sym->ts.interface->ts.type;
- sym->ts.kind = sym->ts.interface->ts.kind;
- sym->attr.function = sym->ts.interface->attr.function;
- sym->attr.subroutine = sym->ts.interface->attr.subroutine;
- copy_formal_args (sym, sym->ts.interface);
+ gfc_symbol *ifc = sym->ts.interface;
+ sym->ts = ifc->ts;
+ sym->ts.interface = ifc;
+ sym->attr.function = ifc->attr.function;
+ sym->attr.subroutine = ifc->attr.subroutine;
+ copy_formal_args (sym, ifc);
}
else if (sym->ts.interface->name[0] != '\0')
{
diff --git a/gcc/fortran/scanner.c b/gcc/fortran/scanner.c
index 02d87b4f4ce..1b0eeca1e65 100644
--- a/gcc/fortran/scanner.c
+++ b/gcc/fortran/scanner.c
@@ -702,7 +702,8 @@ skip_free_comments (void)
if (((c = next_char ()) == 'm' || c == 'M')
&& ((c = next_char ()) == 'p' || c == 'P'))
{
- if ((c = next_char ()) == ' ' || continue_flag)
+ if ((c = next_char ()) == ' ' || c == '\t'
+ || continue_flag)
{
while (gfc_is_whitespace (c))
c = next_char ();
@@ -724,7 +725,7 @@ skip_free_comments (void)
next_char ();
c = next_char ();
}
- if (continue_flag || c == ' ')
+ if (continue_flag || c == ' ' || c == '\t')
{
gfc_current_locus = old_loc;
next_char ();
@@ -820,11 +821,11 @@ skip_fixed_comments (void)
c = next_char ();
if (c != '\n'
&& ((openmp_flag && continue_flag)
- || c == ' ' || c == '0'))
+ || c == ' ' || c == '\t' || c == '0'))
{
- c = next_char ();
- while (gfc_is_whitespace (c))
+ do
c = next_char ();
+ while (gfc_is_whitespace (c));
if (c != '\n' && c != '!')
{
/* Canonicalize to *$omp. */
@@ -843,6 +844,11 @@ skip_fixed_comments (void)
for (col = 3; col < 6; col++, c = next_char ())
if (c == ' ')
continue;
+ else if (c == '\t')
+ {
+ col = 6;
+ break;
+ }
else if (c < '0' || c > '9')
break;
else
@@ -850,7 +856,7 @@ skip_fixed_comments (void)
if (col == 6 && c != '\n'
&& ((continue_flag && !digit_seen)
- || c == ' ' || c == '0'))
+ || c == ' ' || c == '\t' || c == '0'))
{
gfc_current_locus = start;
start.nextc[0] = ' ';
diff --git a/gcc/fortran/st.c b/gcc/fortran/st.c
index 0f0e4813d28..abe7b94865c 100644
--- a/gcc/fortran/st.c
+++ b/gcc/fortran/st.c
@@ -171,6 +171,7 @@ gfc_free_statement (gfc_code *p)
case EXEC_OMP_PARALLEL_SECTIONS:
case EXEC_OMP_SECTIONS:
case EXEC_OMP_SINGLE:
+ case EXEC_OMP_TASK:
case EXEC_OMP_WORKSHARE:
case EXEC_OMP_PARALLEL_WORKSHARE:
gfc_free_omp_clauses (p->ext.omp_clauses);
@@ -189,6 +190,7 @@ gfc_free_statement (gfc_code *p)
case EXEC_OMP_MASTER:
case EXEC_OMP_ORDERED:
case EXEC_OMP_END_NOWAIT:
+ case EXEC_OMP_TASKWAIT:
break;
default:
diff --git a/gcc/fortran/trans-openmp.c b/gcc/fortran/trans-openmp.c
index c6c4baeca63..6f99800a014 100644
--- a/gcc/fortran/trans-openmp.c
+++ b/gcc/fortran/trans-openmp.c
@@ -84,6 +84,17 @@ gfc_omp_predetermined_sharing (tree decl)
if (GFC_DECL_CRAY_POINTEE (decl))
return OMP_CLAUSE_DEFAULT_PRIVATE;
+ /* Assumed-size arrays are predetermined to inherit sharing
+ attributes of the associated actual argument, which is shared
+ for all we care. */
+ if (TREE_CODE (decl) == PARM_DECL
+ && GFC_ARRAY_TYPE_P (TREE_TYPE (decl))
+ && GFC_TYPE_ARRAY_AKIND (TREE_TYPE (decl)) == GFC_ARRAY_UNKNOWN
+ && GFC_TYPE_ARRAY_UBOUND (TREE_TYPE (decl),
+ GFC_TYPE_ARRAY_RANK (TREE_TYPE (decl)) - 1)
+ == NULL)
+ return OMP_CLAUSE_DEFAULT_SHARED;
+
/* COMMON and EQUIVALENCE decls are shared. They
are only referenced through DECL_VALUE_EXPR of the variables
contained in them. If those are privatized, they will not be
@@ -98,27 +109,179 @@ gfc_omp_predetermined_sharing (tree decl)
}
+/* Return true if DECL in private clause needs
+ OMP_CLAUSE_PRIVATE_OUTER_REF on the private clause. */
+bool
+gfc_omp_private_outer_ref (tree decl)
+{
+ tree type = TREE_TYPE (decl);
+
+ if (GFC_DESCRIPTOR_TYPE_P (type)
+ && GFC_TYPE_ARRAY_AKIND (type) == GFC_ARRAY_ALLOCATABLE)
+ return true;
+
+ return false;
+}
+
/* Return code to initialize DECL with its default constructor, or
NULL if there's nothing to do. */
tree
-gfc_omp_clause_default_ctor (tree clause ATTRIBUTE_UNUSED, tree decl)
+gfc_omp_clause_default_ctor (tree clause, tree decl, tree outer)
{
- tree type = TREE_TYPE (decl);
- stmtblock_t block;
+ tree type = TREE_TYPE (decl), rank, size, esize, ptr, cond, then_b, else_b;
+ stmtblock_t block, cond_block;
- if (! GFC_DESCRIPTOR_TYPE_P (type))
+ if (! GFC_DESCRIPTOR_TYPE_P (type)
+ || GFC_TYPE_ARRAY_AKIND (type) != GFC_ARRAY_ALLOCATABLE)
return NULL;
+ gcc_assert (outer != NULL);
+ gcc_assert (OMP_CLAUSE_CODE (clause) == OMP_CLAUSE_PRIVATE
+ || OMP_CLAUSE_CODE (clause) == OMP_CLAUSE_LASTPRIVATE);
+
/* Allocatable arrays in PRIVATE clauses need to be set to
- "not currently allocated" allocation status. */
- gfc_init_block (&block);
+ "not currently allocated" allocation status if outer
+ array is "not currently allocated", otherwise should be allocated. */
+ gfc_start_block (&block);
+
+ gfc_init_block (&cond_block);
+
+ gfc_add_modify_expr (&cond_block, decl, outer);
+ rank = gfc_rank_cst[GFC_TYPE_ARRAY_RANK (type) - 1];
+ size = gfc_conv_descriptor_ubound (decl, rank);
+ size = fold_build2 (MINUS_EXPR, gfc_array_index_type, size,
+ gfc_conv_descriptor_lbound (decl, rank));
+ size = fold_build2 (PLUS_EXPR, gfc_array_index_type, size,
+ gfc_index_one_node);
+ if (GFC_TYPE_ARRAY_RANK (type) > 1)
+ size = fold_build2 (MULT_EXPR, gfc_array_index_type, size,
+ gfc_conv_descriptor_stride (decl, rank));
+ esize = fold_convert (gfc_array_index_type,
+ TYPE_SIZE_UNIT (gfc_get_element_type (type)));
+ size = fold_build2 (MULT_EXPR, gfc_array_index_type, size, esize);
+ size = gfc_evaluate_now (fold_convert (size_type_node, size), &cond_block);
+ ptr = gfc_allocate_array_with_status (&cond_block,
+ build_int_cst (pvoid_type_node, 0),
+ size, NULL);
+ gfc_conv_descriptor_data_set_tuples (&cond_block, decl, ptr);
+ then_b = gfc_finish_block (&cond_block);
+
+ gfc_init_block (&cond_block);
+ gfc_conv_descriptor_data_set_tuples (&cond_block, decl, null_pointer_node);
+ else_b = gfc_finish_block (&cond_block);
+
+ cond = fold_build2 (NE_EXPR, boolean_type_node,
+ fold_convert (pvoid_type_node,
+ gfc_conv_descriptor_data_get (outer)),
+ null_pointer_node);
+ gfc_add_expr_to_block (&block, build3 (COND_EXPR, void_type_node,
+ cond, then_b, else_b));
- gfc_conv_descriptor_data_set_tuples (&block, decl, null_pointer_node);
+ return gfc_finish_block (&block);
+}
+
+/* Build and return code for a copy constructor from SRC to DEST. */
+
+tree
+gfc_omp_clause_copy_ctor (tree clause, tree dest, tree src)
+{
+ tree type = TREE_TYPE (dest), ptr, size, esize, rank, call;
+ stmtblock_t block;
+
+ if (! GFC_DESCRIPTOR_TYPE_P (type)
+ || GFC_TYPE_ARRAY_AKIND (type) != GFC_ARRAY_ALLOCATABLE)
+ return build_gimple_modify_stmt (dest, src);
+
+ gcc_assert (OMP_CLAUSE_CODE (clause) == OMP_CLAUSE_FIRSTPRIVATE);
+
+ /* Allocatable arrays in FIRSTPRIVATE clauses need to be allocated
+ and copied from SRC. */
+ gfc_start_block (&block);
+
+ gfc_add_modify_expr (&block, dest, src);
+ rank = gfc_rank_cst[GFC_TYPE_ARRAY_RANK (type) - 1];
+ size = gfc_conv_descriptor_ubound (dest, rank);
+ size = fold_build2 (MINUS_EXPR, gfc_array_index_type, size,
+ gfc_conv_descriptor_lbound (dest, rank));
+ size = fold_build2 (PLUS_EXPR, gfc_array_index_type, size,
+ gfc_index_one_node);
+ if (GFC_TYPE_ARRAY_RANK (type) > 1)
+ size = fold_build2 (MULT_EXPR, gfc_array_index_type, size,
+ gfc_conv_descriptor_stride (dest, rank));
+ esize = fold_convert (gfc_array_index_type,
+ TYPE_SIZE_UNIT (gfc_get_element_type (type)));
+ size = fold_build2 (MULT_EXPR, gfc_array_index_type, size, esize);
+ size = gfc_evaluate_now (fold_convert (size_type_node, size), &block);
+ ptr = gfc_allocate_array_with_status (&block,
+ build_int_cst (pvoid_type_node, 0),
+ size, NULL);
+ gfc_conv_descriptor_data_set_tuples (&block, dest, ptr);
+ call = build_call_expr (built_in_decls[BUILT_IN_MEMCPY], 3, ptr,
+ fold_convert (pvoid_type_node,
+ gfc_conv_descriptor_data_get (src)),
+ size);
+ gfc_add_expr_to_block (&block, fold_convert (void_type_node, call));
return gfc_finish_block (&block);
}
+/* Similarly, except use an assignment operator instead. */
+
+tree
+gfc_omp_clause_assign_op (tree clause ATTRIBUTE_UNUSED, tree dest, tree src)
+{
+ tree type = TREE_TYPE (dest), rank, size, esize, call;
+ stmtblock_t block;
+
+ if (! GFC_DESCRIPTOR_TYPE_P (type)
+ || GFC_TYPE_ARRAY_AKIND (type) != GFC_ARRAY_ALLOCATABLE)
+ return build_gimple_modify_stmt (dest, src);
+
+ /* Handle copying allocatable arrays. */
+ gfc_start_block (&block);
+
+ rank = gfc_rank_cst[GFC_TYPE_ARRAY_RANK (type) - 1];
+ size = gfc_conv_descriptor_ubound (dest, rank);
+ size = fold_build2 (MINUS_EXPR, gfc_array_index_type, size,
+ gfc_conv_descriptor_lbound (dest, rank));
+ size = fold_build2 (PLUS_EXPR, gfc_array_index_type, size,
+ gfc_index_one_node);
+ if (GFC_TYPE_ARRAY_RANK (type) > 1)
+ size = fold_build2 (MULT_EXPR, gfc_array_index_type, size,
+ gfc_conv_descriptor_stride (dest, rank));
+ esize = fold_convert (gfc_array_index_type,
+ TYPE_SIZE_UNIT (gfc_get_element_type (type)));
+ size = fold_build2 (MULT_EXPR, gfc_array_index_type, size, esize);
+ size = gfc_evaluate_now (fold_convert (size_type_node, size), &block);
+ call = build_call_expr (built_in_decls[BUILT_IN_MEMCPY], 3,
+ fold_convert (pvoid_type_node,
+ gfc_conv_descriptor_data_get (dest)),
+ fold_convert (pvoid_type_node,
+ gfc_conv_descriptor_data_get (src)),
+ size);
+ gfc_add_expr_to_block (&block, fold_convert (void_type_node, call));
+
+ return gfc_finish_block (&block);
+}
+
+/* Build and return code destructing DECL. Return NULL if nothing
+ to be done. */
+
+tree
+gfc_omp_clause_dtor (tree clause ATTRIBUTE_UNUSED, tree decl)
+{
+ tree type = TREE_TYPE (decl);
+
+ if (! GFC_DESCRIPTOR_TYPE_P (type)
+ || GFC_TYPE_ARRAY_AKIND (type) != GFC_ARRAY_ALLOCATABLE)
+ return NULL;
+
+ /* Allocatable arrays in FIRSTPRIVATE/LASTPRIVATE etc. clauses need
+ to be deallocated if they were allocated. */
+ return gfc_trans_dealloc_allocated (decl);
+}
+
/* Return true if DECL's DECL_VALUE_EXPR (if any) should be
disregarded in OpenMP construct, because it is going to be
@@ -429,7 +592,39 @@ gfc_trans_omp_array_reduction (tree c, gfc_symbol *sym, locus where)
/* Create the init statement list. */
pushlevel (0);
- stmt = gfc_trans_assignment (e1, e2, false);
+ if (GFC_DESCRIPTOR_TYPE_P (TREE_TYPE (decl))
+ && GFC_TYPE_ARRAY_AKIND (TREE_TYPE (decl)) == GFC_ARRAY_ALLOCATABLE)
+ {
+ /* If decl is an allocatable array, it needs to be allocated
+ with the same bounds as the outer var. */
+ tree type = TREE_TYPE (decl), rank, size, esize, ptr;
+ stmtblock_t block;
+
+ gfc_start_block (&block);
+
+ gfc_add_modify_expr (&block, decl, outer_sym.backend_decl);
+ rank = gfc_rank_cst[GFC_TYPE_ARRAY_RANK (type) - 1];
+ size = gfc_conv_descriptor_ubound (decl, rank);
+ size = fold_build2 (MINUS_EXPR, gfc_array_index_type, size,
+ gfc_conv_descriptor_lbound (decl, rank));
+ size = fold_build2 (PLUS_EXPR, gfc_array_index_type, size,
+ gfc_index_one_node);
+ if (GFC_TYPE_ARRAY_RANK (type) > 1)
+ size = fold_build2 (MULT_EXPR, gfc_array_index_type, size,
+ gfc_conv_descriptor_stride (decl, rank));
+ esize = fold_convert (gfc_array_index_type,
+ TYPE_SIZE_UNIT (gfc_get_element_type (type)));
+ size = fold_build2 (MULT_EXPR, gfc_array_index_type, size, esize);
+ size = gfc_evaluate_now (fold_convert (size_type_node, size), &block);
+ ptr = gfc_allocate_array_with_status (&block,
+ build_int_cst (pvoid_type_node, 0),
+ size, NULL);
+ gfc_conv_descriptor_data_set_tuples (&block, decl, ptr);
+ gfc_add_expr_to_block (&block, gfc_trans_assignment (e1, e2, false));
+ stmt = gfc_finish_block (&block);
+ }
+ else
+ stmt = gfc_trans_assignment (e1, e2, false);
if (TREE_CODE (stmt) != BIND_EXPR)
stmt = build3_v (BIND_EXPR, NULL, stmt, poplevel (1, 0, 0));
else
@@ -438,7 +633,20 @@ gfc_trans_omp_array_reduction (tree c, gfc_symbol *sym, locus where)
/* Create the merge statement list. */
pushlevel (0);
- stmt = gfc_trans_assignment (e3, e4, false);
+ if (GFC_DESCRIPTOR_TYPE_P (TREE_TYPE (decl))
+ && GFC_TYPE_ARRAY_AKIND (TREE_TYPE (decl)) == GFC_ARRAY_ALLOCATABLE)
+ {
+ /* If decl is an allocatable array, it needs to be deallocated
+ afterwards. */
+ stmtblock_t block;
+
+ gfc_start_block (&block);
+ gfc_add_expr_to_block (&block, gfc_trans_assignment (e3, e4, false));
+ gfc_add_expr_to_block (&block, gfc_trans_dealloc_allocated (decl));
+ stmt = gfc_finish_block (&block);
+ }
+ else
+ stmt = gfc_trans_assignment (e3, e4, false);
if (TREE_CODE (stmt) != BIND_EXPR)
stmt = build3_v (BIND_EXPR, NULL, stmt, poplevel (1, 0, 0));
else
@@ -639,6 +847,9 @@ gfc_trans_omp_clauses (stmtblock_t *block, gfc_omp_clauses *clauses,
case OMP_SCHED_RUNTIME:
OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_RUNTIME;
break;
+ case OMP_SCHED_AUTO:
+ OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_AUTO;
+ break;
default:
gcc_unreachable ();
}
@@ -659,6 +870,9 @@ gfc_trans_omp_clauses (stmtblock_t *block, gfc_omp_clauses *clauses,
case OMP_DEFAULT_PRIVATE:
OMP_CLAUSE_DEFAULT_KIND (c) = OMP_CLAUSE_DEFAULT_PRIVATE;
break;
+ case OMP_DEFAULT_FIRSTPRIVATE:
+ OMP_CLAUSE_DEFAULT_KIND (c) = OMP_CLAUSE_DEFAULT_FIRSTPRIVATE;
+ break;
default:
gcc_unreachable ();
}
@@ -677,6 +891,19 @@ gfc_trans_omp_clauses (stmtblock_t *block, gfc_omp_clauses *clauses,
omp_clauses = gfc_trans_add_clause (c, omp_clauses);
}
+ if (clauses->untied)
+ {
+ c = build_omp_clause (OMP_CLAUSE_UNTIED);
+ omp_clauses = gfc_trans_add_clause (c, omp_clauses);
+ }
+
+ if (clauses->collapse)
+ {
+ c = build_omp_clause (OMP_CLAUSE_COLLAPSE);
+ OMP_CLAUSE_COLLAPSE_EXPR (c) = build_int_cst (NULL, clauses->collapse);
+ omp_clauses = gfc_trans_add_clause (c, omp_clauses);
+ }
+
return omp_clauses;
}
@@ -893,20 +1120,28 @@ gfc_trans_omp_critical (gfc_code *code)
static tree
gfc_trans_omp_do (gfc_code *code, stmtblock_t *pblock,
- gfc_omp_clauses *do_clauses)
+ gfc_omp_clauses *do_clauses, tree par_clauses)
{
gfc_se se;
tree dovar, stmt, from, to, step, type, init, cond, incr;
tree count = NULL_TREE, cycle_label, tmp, omp_clauses;
stmtblock_t block;
stmtblock_t body;
- int simple = 0;
- bool dovar_found = false;
gfc_omp_clauses *clauses = code->ext.omp_clauses;
+ gfc_code *outermost;
+ int i, collapse = clauses->collapse;
+ tree dovar_init = NULL_TREE;
- code = code->block->next;
+ if (collapse <= 0)
+ collapse = 1;
+
+ outermost = code = code->block->next;
gcc_assert (code->op == EXEC_DO);
+ init = make_tree_vec (collapse);
+ cond = make_tree_vec (collapse);
+ incr = make_tree_vec (collapse);
+
if (pblock == NULL)
{
gfc_start_block (&block);
@@ -914,107 +1149,168 @@ gfc_trans_omp_do (gfc_code *code, stmtblock_t *pblock,
}
omp_clauses = gfc_trans_omp_clauses (pblock, do_clauses, code->loc);
- if (clauses)
- {
- gfc_namelist *n;
- for (n = clauses->lists[OMP_LIST_LASTPRIVATE]; n != NULL; n = n->next)
- if (code->ext.iterator->var->symtree->n.sym == n->sym)
- break;
- if (n == NULL)
- for (n = clauses->lists[OMP_LIST_PRIVATE]; n != NULL; n = n->next)
- if (code->ext.iterator->var->symtree->n.sym == n->sym)
- break;
- if (n != NULL)
- dovar_found = true;
- }
- /* Evaluate all the expressions in the iterator. */
- gfc_init_se (&se, NULL);
- gfc_conv_expr_lhs (&se, code->ext.iterator->var);
- gfc_add_block_to_block (pblock, &se.pre);
- dovar = se.expr;
- type = TREE_TYPE (dovar);
- gcc_assert (TREE_CODE (type) == INTEGER_TYPE);
-
- gfc_init_se (&se, NULL);
- gfc_conv_expr_val (&se, code->ext.iterator->start);
- gfc_add_block_to_block (pblock, &se.pre);
- from = gfc_evaluate_now (se.expr, pblock);
-
- gfc_init_se (&se, NULL);
- gfc_conv_expr_val (&se, code->ext.iterator->end);
- gfc_add_block_to_block (pblock, &se.pre);
- to = gfc_evaluate_now (se.expr, pblock);
-
- gfc_init_se (&se, NULL);
- gfc_conv_expr_val (&se, code->ext.iterator->step);
- gfc_add_block_to_block (pblock, &se.pre);
- step = gfc_evaluate_now (se.expr, pblock);
-
- /* Special case simple loops. */
- if (integer_onep (step))
- simple = 1;
- else if (tree_int_cst_equal (step, integer_minus_one_node))
- simple = -1;
-
- /* Loop body. */
- if (simple)
+ for (i = 0; i < collapse; i++)
{
- init = build2_v (GIMPLE_MODIFY_STMT, dovar, from);
- cond = fold_build2 (simple > 0 ? LE_EXPR : GE_EXPR, boolean_type_node,
- dovar, to);
- incr = fold_build2 (PLUS_EXPR, type, dovar, step);
- incr = fold_build2 (GIMPLE_MODIFY_STMT, type, dovar, incr);
- if (pblock != &block)
+ int simple = 0;
+ int dovar_found = 0;
+
+ if (clauses)
{
- pushlevel (0);
- gfc_start_block (&block);
+ gfc_namelist *n;
+ for (n = clauses->lists[OMP_LIST_LASTPRIVATE]; n != NULL;
+ n = n->next)
+ if (code->ext.iterator->var->symtree->n.sym == n->sym)
+ break;
+ if (n != NULL)
+ dovar_found = 1;
+ else if (n == NULL)
+ for (n = clauses->lists[OMP_LIST_PRIVATE]; n != NULL; n = n->next)
+ if (code->ext.iterator->var->symtree->n.sym == n->sym)
+ break;
+ if (n != NULL)
+ dovar_found++;
}
- gfc_start_block (&body);
- }
- else
- {
- /* STEP is not 1 or -1. Use:
- for (count = 0; count < (to + step - from) / step; count++)
- {
- dovar = from + count * step;
- body;
- cycle_label:;
- } */
- tmp = fold_build2 (MINUS_EXPR, type, step, from);
- tmp = fold_build2 (PLUS_EXPR, type, to, tmp);
- tmp = fold_build2 (TRUNC_DIV_EXPR, type, tmp, step);
- tmp = gfc_evaluate_now (tmp, pblock);
- count = gfc_create_var (type, "count");
- init = build2_v (GIMPLE_MODIFY_STMT, count, build_int_cst (type, 0));
- cond = fold_build2 (LT_EXPR, boolean_type_node, count, tmp);
- incr = fold_build2 (PLUS_EXPR, type, count, build_int_cst (type, 1));
- incr = fold_build2 (GIMPLE_MODIFY_STMT, type, count, incr);
-
- if (pblock != &block)
+
+ /* Evaluate all the expressions in the iterator. */
+ gfc_init_se (&se, NULL);
+ gfc_conv_expr_lhs (&se, code->ext.iterator->var);
+ gfc_add_block_to_block (pblock, &se.pre);
+ dovar = se.expr;
+ type = TREE_TYPE (dovar);
+ gcc_assert (TREE_CODE (type) == INTEGER_TYPE);
+
+ gfc_init_se (&se, NULL);
+ gfc_conv_expr_val (&se, code->ext.iterator->start);
+ gfc_add_block_to_block (pblock, &se.pre);
+ from = gfc_evaluate_now (se.expr, pblock);
+
+ gfc_init_se (&se, NULL);
+ gfc_conv_expr_val (&se, code->ext.iterator->end);
+ gfc_add_block_to_block (pblock, &se.pre);
+ to = gfc_evaluate_now (se.expr, pblock);
+
+ gfc_init_se (&se, NULL);
+ gfc_conv_expr_val (&se, code->ext.iterator->step);
+ gfc_add_block_to_block (pblock, &se.pre);
+ step = gfc_evaluate_now (se.expr, pblock);
+
+ /* Special case simple loops. */
+ if (integer_onep (step))
+ simple = 1;
+ else if (tree_int_cst_equal (step, integer_minus_one_node))
+ simple = -1;
+
+ /* Loop body. */
+ if (simple)
{
- pushlevel (0);
- gfc_start_block (&block);
+ TREE_VEC_ELT (init, i) = build2_v (GIMPLE_MODIFY_STMT, dovar, from);
+ TREE_VEC_ELT (cond, i) = fold_build2 (simple > 0 ? LE_EXPR : GE_EXPR,
+ boolean_type_node, dovar, to);
+ TREE_VEC_ELT (incr, i) = fold_build2 (PLUS_EXPR, type, dovar, step);
+ TREE_VEC_ELT (incr, i) = fold_build2 (GIMPLE_MODIFY_STMT, type, dovar,
+ TREE_VEC_ELT (incr, i));
+ }
+ else
+ {
+ /* STEP is not 1 or -1. Use:
+ for (count = 0; count < (to + step - from) / step; count++)
+ {
+ dovar = from + count * step;
+ body;
+ cycle_label:;
+ } */
+ tmp = fold_build2 (MINUS_EXPR, type, step, from);
+ tmp = fold_build2 (PLUS_EXPR, type, to, tmp);
+ tmp = fold_build2 (TRUNC_DIV_EXPR, type, tmp, step);
+ tmp = gfc_evaluate_now (tmp, pblock);
+ count = gfc_create_var (type, "count");
+ TREE_VEC_ELT (init, i) = build2_v (GIMPLE_MODIFY_STMT, count,
+ build_int_cst (type, 0));
+ TREE_VEC_ELT (cond, i) = fold_build2 (LT_EXPR, boolean_type_node,
+ count, tmp);
+ TREE_VEC_ELT (incr, i) = fold_build2 (PLUS_EXPR, type, count,
+ build_int_cst (type, 1));
+ TREE_VEC_ELT (incr, i) = fold_build2 (GIMPLE_MODIFY_STMT, type,
+ count, TREE_VEC_ELT (incr, i));
+
+ /* Initialize DOVAR. */
+ tmp = fold_build2 (MULT_EXPR, type, count, step);
+ tmp = fold_build2 (PLUS_EXPR, type, from, tmp);
+ dovar_init = tree_cons (dovar, tmp, dovar_init);
}
- gfc_start_block (&body);
- /* Initialize DOVAR. */
- tmp = fold_build2 (MULT_EXPR, type, count, step);
- tmp = fold_build2 (PLUS_EXPR, type, from, tmp);
- gfc_add_modify_stmt (&body, dovar, tmp);
+ if (!dovar_found)
+ {
+ tmp = build_omp_clause (OMP_CLAUSE_PRIVATE);
+ OMP_CLAUSE_DECL (tmp) = dovar;
+ omp_clauses = gfc_trans_add_clause (tmp, omp_clauses);
+ }
+ else if (dovar_found == 2)
+ {
+ tree c = NULL;
+
+ tmp = NULL;
+ if (!simple)
+ {
+ /* If dovar is lastprivate, but different counter is used,
+ dovar += step needs to be added to
+ OMP_CLAUSE_LASTPRIVATE_STMT, otherwise the copied dovar
+ will have the value on entry of the last loop, rather
+ than value after iterator increment. */
+ tmp = gfc_evaluate_now (step, pblock);
+ tmp = fold_build2 (PLUS_EXPR, type, dovar, tmp);
+ tmp = fold_build2 (GIMPLE_MODIFY_STMT, type, dovar, tmp);
+ for (c = omp_clauses; c ; c = OMP_CLAUSE_CHAIN (c))
+ if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
+ && OMP_CLAUSE_DECL (c) == dovar)
+ {
+ OMP_CLAUSE_LASTPRIVATE_STMT (c) = tmp;
+ break;
+ }
+ }
+ if (c == NULL && par_clauses != NULL)
+ {
+ for (c = par_clauses; c ; c = OMP_CLAUSE_CHAIN (c))
+ if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
+ && OMP_CLAUSE_DECL (c) == dovar)
+ {
+ tree l = build_omp_clause (OMP_CLAUSE_LASTPRIVATE);
+ OMP_CLAUSE_DECL (l) = dovar;
+ OMP_CLAUSE_CHAIN (l) = omp_clauses;
+ OMP_CLAUSE_LASTPRIVATE_STMT (l) = tmp;
+ omp_clauses = l;
+ OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_SHARED);
+ break;
+ }
+ }
+ gcc_assert (simple || c != NULL);
+ }
+ if (!simple)
+ {
+ tmp = build_omp_clause (OMP_CLAUSE_PRIVATE);
+ OMP_CLAUSE_DECL (tmp) = count;
+ omp_clauses = gfc_trans_add_clause (tmp, omp_clauses);
+ }
+
+ if (i + 1 < collapse)
+ code = code->block->next;
}
- if (!dovar_found)
+ if (pblock != &block)
{
- tmp = build_omp_clause (OMP_CLAUSE_PRIVATE);
- OMP_CLAUSE_DECL (tmp) = dovar;
- omp_clauses = gfc_trans_add_clause (tmp, omp_clauses);
+ pushlevel (0);
+ gfc_start_block (&block);
}
- if (!simple)
+
+ gfc_start_block (&body);
+
+ dovar_init = nreverse (dovar_init);
+ while (dovar_init)
{
- tmp = build_omp_clause (OMP_CLAUSE_PRIVATE);
- OMP_CLAUSE_DECL (tmp) = count;
- omp_clauses = gfc_trans_add_clause (tmp, omp_clauses);
+ gfc_add_modify_stmt (&body, TREE_PURPOSE (dovar_init),
+ TREE_VALUE (dovar_init));
+ dovar_init = TREE_CHAIN (dovar_init);
}
/* Cycle statement is implemented with a goto. Exit statement must not be
@@ -1107,9 +1403,11 @@ gfc_trans_omp_parallel_do (gfc_code *code)
do_clauses.sched_kind = parallel_clauses.sched_kind;
do_clauses.chunk_size = parallel_clauses.chunk_size;
do_clauses.ordered = parallel_clauses.ordered;
+ do_clauses.collapse = parallel_clauses.collapse;
parallel_clauses.sched_kind = OMP_SCHED_NONE;
parallel_clauses.chunk_size = NULL;
parallel_clauses.ordered = false;
+ parallel_clauses.collapse = 0;
omp_clauses = gfc_trans_omp_clauses (&block, &parallel_clauses,
code->loc);
}
@@ -1118,7 +1416,7 @@ gfc_trans_omp_parallel_do (gfc_code *code)
pblock = &block;
else
pushlevel (0);
- stmt = gfc_trans_omp_do (code, pblock, &do_clauses);
+ stmt = gfc_trans_omp_do (code, pblock, &do_clauses, omp_clauses);
if (TREE_CODE (stmt) != BIND_EXPR)
stmt = build3_v (BIND_EXPR, NULL, stmt, poplevel (1, 0, 0));
else
@@ -1221,6 +1519,31 @@ gfc_trans_omp_single (gfc_code *code, gfc_omp_clauses *clauses)
}
static tree
+gfc_trans_omp_task (gfc_code *code)
+{
+ stmtblock_t block;
+ tree stmt, body_stmt, omp_clauses;
+
+ gfc_start_block (&block);
+ omp_clauses = gfc_trans_omp_clauses (&block, code->ext.omp_clauses,
+ code->loc);
+ body_stmt = gfc_trans_omp_code (code->block->next, true);
+ stmt = make_node (OMP_TASK);
+ TREE_TYPE (stmt) = void_type_node;
+ OMP_TASK_CLAUSES (stmt) = omp_clauses;
+ OMP_TASK_BODY (stmt) = body_stmt;
+ gfc_add_expr_to_block (&block, stmt);
+ return gfc_finish_block (&block);
+}
+
+static tree
+gfc_trans_omp_taskwait (void)
+{
+ tree decl = built_in_decls [BUILT_IN_GOMP_TASKWAIT];
+ return build_call_expr (decl, 0);
+}
+
+static tree
gfc_trans_omp_workshare (gfc_code *code, gfc_omp_clauses *clauses)
{
/* XXX */
@@ -1239,7 +1562,7 @@ gfc_trans_omp_directive (gfc_code *code)
case EXEC_OMP_CRITICAL:
return gfc_trans_omp_critical (code);
case EXEC_OMP_DO:
- return gfc_trans_omp_do (code, NULL, code->ext.omp_clauses);
+ return gfc_trans_omp_do (code, NULL, code->ext.omp_clauses, NULL);
case EXEC_OMP_FLUSH:
return gfc_trans_omp_flush ();
case EXEC_OMP_MASTER:
@@ -1258,6 +1581,10 @@ gfc_trans_omp_directive (gfc_code *code)
return gfc_trans_omp_sections (code, code->ext.omp_clauses);
case EXEC_OMP_SINGLE:
return gfc_trans_omp_single (code, code->ext.omp_clauses);
+ case EXEC_OMP_TASK:
+ return gfc_trans_omp_task (code);
+ case EXEC_OMP_TASKWAIT:
+ return gfc_trans_omp_taskwait ();
case EXEC_OMP_WORKSHARE:
return gfc_trans_omp_workshare (code, code->ext.omp_clauses);
default:
diff --git a/gcc/fortran/trans.c b/gcc/fortran/trans.c
index f303128a28d..51e0cdd6aad 100644
--- a/gcc/fortran/trans.c
+++ b/gcc/fortran/trans.c
@@ -1135,6 +1135,8 @@ gfc_trans_code (gfc_code * code)
case EXEC_OMP_PARALLEL_WORKSHARE:
case EXEC_OMP_SECTIONS:
case EXEC_OMP_SINGLE:
+ case EXEC_OMP_TASK:
+ case EXEC_OMP_TASKWAIT:
case EXEC_OMP_WORKSHARE:
res = gfc_trans_omp_directive (code);
break;
diff --git a/gcc/fortran/trans.h b/gcc/fortran/trans.h
index d0ce2354120..3a07d712791 100644
--- a/gcc/fortran/trans.h
+++ b/gcc/fortran/trans.h
@@ -493,9 +493,13 @@ bool gfc_get_array_descr_info (const_tree, struct array_descr_info *);
/* In trans-openmp.c */
bool gfc_omp_privatize_by_reference (const_tree);
enum omp_clause_default_kind gfc_omp_predetermined_sharing (tree);
-tree gfc_omp_clause_default_ctor (tree, tree);
+tree gfc_omp_clause_default_ctor (tree, tree, tree);
+tree gfc_omp_clause_copy_ctor (tree, tree, tree);
+tree gfc_omp_clause_assign_op (tree, tree, tree);
+tree gfc_omp_clause_dtor (tree, tree);
bool gfc_omp_disregard_value_expr (tree, bool);
bool gfc_omp_private_debug_clause (tree, bool);
+bool gfc_omp_private_outer_ref (tree);
struct gimplify_omp_ctx;
void gfc_omp_firstprivatize_type_sizes (struct gimplify_omp_ctx *, tree);
diff --git a/gcc/fortran/types.def b/gcc/fortran/types.def
index 22dcafd2397..5bcdb5261d9 100644
--- a/gcc/fortran/types.def
+++ b/gcc/fortran/types.def
@@ -50,10 +50,12 @@ along with GCC; see the file COPYING3. If not see
the type pointed to. */
DEF_PRIMITIVE_TYPE (BT_VOID, void_type_node)
-DEF_PRIMITIVE_TYPE (BT_BOOL, boolean_type_node)
+DEF_PRIMITIVE_TYPE (BT_BOOL,
+ (*lang_hooks.types.type_for_size) (BOOL_TYPE_SIZE, 1))
DEF_PRIMITIVE_TYPE (BT_INT, integer_type_node)
DEF_PRIMITIVE_TYPE (BT_UINT, unsigned_type_node)
DEF_PRIMITIVE_TYPE (BT_LONG, long_integer_type_node)
+DEF_PRIMITIVE_TYPE (BT_ULONGLONG, long_long_unsigned_type_node)
DEF_PRIMITIVE_TYPE (BT_WORD, (*lang_hooks.types.type_for_mode) (word_mode, 1))
DEF_PRIMITIVE_TYPE (BT_I1, builtin_type_for_size (BITS_PER_UNIT*1, 1))
@@ -70,6 +72,7 @@ DEF_PRIMITIVE_TYPE (BT_VOLATILE_PTR,
TYPE_QUAL_VOLATILE)))
DEF_POINTER_TYPE (BT_PTR_LONG, BT_LONG)
+DEF_POINTER_TYPE (BT_PTR_ULONGLONG, BT_ULONGLONG)
DEF_POINTER_TYPE (BT_PTR_PTR, BT_PTR)
DEF_FUNCTION_TYPE_0 (BT_FN_BOOL, BT_BOOL)
DEF_FUNCTION_TYPE_0 (BT_FN_PTR, BT_PTR)
@@ -87,11 +90,16 @@ DEF_POINTER_TYPE (BT_PTR_FN_VOID_PTR, BT_FN_VOID_PTR)
DEF_FUNCTION_TYPE_2 (BT_FN_BOOL_LONGPTR_LONGPTR,
BT_BOOL, BT_PTR_LONG, BT_PTR_LONG)
+DEF_FUNCTION_TYPE_2 (BT_FN_BOOL_ULONGLONGPTR_ULONGLONGPTR,
+ BT_BOOL, BT_PTR_ULONGLONG, BT_PTR_ULONGLONG)
DEF_FUNCTION_TYPE_2 (BT_FN_I1_VPTR_I1, BT_I1, BT_VOLATILE_PTR, BT_I1)
DEF_FUNCTION_TYPE_2 (BT_FN_I2_VPTR_I2, BT_I2, BT_VOLATILE_PTR, BT_I2)
DEF_FUNCTION_TYPE_2 (BT_FN_I4_VPTR_I4, BT_I4, BT_VOLATILE_PTR, BT_I4)
DEF_FUNCTION_TYPE_2 (BT_FN_I8_VPTR_I8, BT_I8, BT_VOLATILE_PTR, BT_I8)
DEF_FUNCTION_TYPE_2 (BT_FN_I16_VPTR_I16, BT_I16, BT_VOLATILE_PTR, BT_I16)
+DEF_FUNCTION_TYPE_2 (BT_FN_VOID_PTR_PTR, BT_VOID, BT_PTR, BT_PTR)
+
+DEF_POINTER_TYPE (BT_PTR_FN_VOID_PTR_PTR, BT_FN_VOID_PTR_PTR)
DEF_FUNCTION_TYPE_3 (BT_FN_BOOL_VPTR_I1_I1, BT_BOOL, BT_VOLATILE_PTR,
BT_I1, BT_I1)
@@ -127,9 +135,20 @@ DEF_FUNCTION_TYPE_6 (BT_FN_BOOL_LONG_LONG_LONG_LONG_LONGPTR_LONGPTR,
DEF_FUNCTION_TYPE_6 (BT_FN_VOID_OMPFN_PTR_UINT_LONG_LONG_LONG,
BT_VOID, BT_PTR_FN_VOID_PTR, BT_PTR, BT_UINT,
BT_LONG, BT_LONG, BT_LONG)
+DEF_FUNCTION_TYPE_6 (BT_FN_BOOL_BOOL_ULL_ULL_ULL_ULLPTR_ULLPTR,
+ BT_BOOL, BT_BOOL, BT_ULONGLONG, BT_ULONGLONG,
+ BT_ULONGLONG, BT_PTR_ULONGLONG, BT_PTR_ULONGLONG)
DEF_FUNCTION_TYPE_7 (BT_FN_VOID_OMPFN_PTR_UINT_LONG_LONG_LONG_LONG,
BT_VOID, BT_PTR_FN_VOID_PTR, BT_PTR, BT_UINT,
BT_LONG, BT_LONG, BT_LONG, BT_LONG)
+DEF_FUNCTION_TYPE_7 (BT_FN_VOID_OMPFN_PTR_OMPCPYFN_LONG_LONG_BOOL_UINT,
+ BT_VOID, BT_PTR_FN_VOID_PTR, BT_PTR,
+ BT_PTR_FN_VOID_PTR_PTR, BT_LONG, BT_LONG,
+ BT_BOOL, BT_UINT)
+DEF_FUNCTION_TYPE_7 (BT_FN_BOOL_BOOL_ULL_ULL_ULL_ULL_ULLPTR_ULLPTR,
+ BT_BOOL, BT_BOOL, BT_ULONGLONG, BT_ULONGLONG,
+ BT_ULONGLONG, BT_ULONGLONG,
+ BT_PTR_ULONGLONG, BT_PTR_ULONGLONG)
DEF_FUNCTION_TYPE_VAR_0 (BT_FN_VOID_VAR, BT_VOID)
diff --git a/gcc/function.c b/gcc/function.c
index 30dd9f302a0..928107e35c2 100644
--- a/gcc/function.c
+++ b/gcc/function.c
@@ -2009,15 +2009,15 @@ assign_parm_find_data_types (struct assign_parm_data_all *all, tree parm,
memset (data, 0, sizeof (*data));
- /* NAMED_ARG is a mis-nomer. We really mean 'non-varadic'. */
+ /* NAMED_ARG is a misnomer. We really mean 'non-variadic'. */
if (!cfun->stdarg)
- data->named_arg = 1; /* No varadic parms. */
+ data->named_arg = 1; /* No variadic parms. */
else if (TREE_CHAIN (parm))
- data->named_arg = 1; /* Not the last non-varadic parm. */
+ data->named_arg = 1; /* Not the last non-variadic parm. */
else if (targetm.calls.strict_argument_naming (&all->args_so_far))
- data->named_arg = 1; /* Only varadic ones are unnamed. */
+ data->named_arg = 1; /* Only variadic ones are unnamed. */
else
- data->named_arg = 0; /* Treat as varadic. */
+ data->named_arg = 0; /* Treat as variadic. */
nominal_type = TREE_TYPE (parm);
passed_type = DECL_ARG_TYPE (parm);
@@ -5304,7 +5304,7 @@ match_asm_constraints_1 (rtx insn, rtx *p_sets, int noutputs)
emit_insn_before (insns, insn);
/* Now replace all mentions of the input with output. We can't
- just replace the occurence in inputs[i], as the register might
+ just replace the occurrence in inputs[i], as the register might
also be used in some other input (or even in an address of an
output), which would mean possibly increasing the number of
inputs by one (namely 'output' in addition), which might pose
@@ -5314,7 +5314,7 @@ match_asm_constraints_1 (rtx insn, rtx *p_sets, int noutputs)
Here 'input' is used in two occurrences as input (once for the
input operand, once for the address in the second output operand).
- If we would replace only the occurence of the input operand (to
+ If we would replace only the occurrence of the input operand (to
make the matching) we would be left with this:
output = input
diff --git a/gcc/function.h b/gcc/function.h
index 2c469904712..ece44fdc7ac 100644
--- a/gcc/function.h
+++ b/gcc/function.h
@@ -414,9 +414,9 @@ struct rtl_data GTY(())
extern GTY(()) struct rtl_data x_rtl;
-/* Accestor to RTL datastructures. We keep them statically allocated now since
+/* Accessor to RTL datastructures. We keep them statically allocated now since
we never keep multiple functions. For threaded compiler we might however
- want to do differntly. */
+ want to do differently. */
#define crtl (&x_rtl)
/* This structure can save all the important global and static variables
diff --git a/gcc/gcc.c b/gcc/gcc.c
index b9ca2293149..37c3f5f2f02 100644
--- a/gcc/gcc.c
+++ b/gcc/gcc.c
@@ -1203,7 +1203,7 @@ static const struct {
/* Translate the options described by *ARGCP and *ARGVP.
Make a new vector and store it back in *ARGVP,
- and store its length in *ARGVC. */
+ and store its length in *ARGCP. */
static void
translate_options (int *argcp, const char *const **argvp)
@@ -7823,8 +7823,8 @@ getenv_spec_function (int argc, const char **argv)
fatal ("environment variable \"%s\" not defined", argv[0]);
/* We have to escape every character of the environment variable so
- they are not interpretted as active spec characters. A
- particulaly painful case is when we are reading a variable
+ they are not interpreted as active spec characters. A
+ particularly painful case is when we are reading a variable
holding a windows path complete with \ separators. */
len = strlen (value) * 2 + strlen (argv[1]) + 1;
result = xmalloc (len);
diff --git a/gcc/gcov-io.c b/gcc/gcov-io.c
index 4fcd600c624..665b98681c2 100644
--- a/gcc/gcov-io.c
+++ b/gcc/gcov-io.c
@@ -1,6 +1,6 @@
/* File format for coverage information
- Copyright (C) 1996, 1997, 1998, 2000, 2002, 2003, 2004, 2005, 2007
- Free Software Foundation, Inc.
+ Copyright (C) 1996, 1997, 1998, 2000, 2002, 2003, 2004, 2005, 2007,
+ 2008 Free Software Foundation, Inc.
Contributed by Bob Manson <manson@cygnus.com>.
Completely remangled by Nathan Sidwell <nathan@codesourcery.com>.
@@ -507,7 +507,7 @@ gcov_sync (gcov_position_t base, gcov_unsigned_t length)
#endif
#if IN_LIBGCOV
-/* Move to the a set position in a gcov file. */
+/* Move to a given position in a gcov file. */
GCOV_LINKAGE void
gcov_seek (gcov_position_t base)
diff --git a/gcc/gcov.c b/gcc/gcov.c
index 4f259a9228f..a6cabe84d03 100644
--- a/gcc/gcov.c
+++ b/gcc/gcov.c
@@ -83,7 +83,7 @@ typedef struct arc_info
/* Arc is for a function that abnormally returns. */
unsigned int is_call_non_return : 1;
- /* Arc is for catch/setjump. */
+ /* Arc is for catch/setjmp. */
unsigned int is_nonlocal_return : 1;
/* Is an unconditional branch. */
diff --git a/gcc/gcse.c b/gcc/gcse.c
index f6837bf1f60..c86f2af1e79 100644
--- a/gcc/gcse.c
+++ b/gcc/gcse.c
@@ -1705,7 +1705,7 @@ hash_scan_set (rtx pat, rtx insn, struct hash_table *table)
same PRE GCSE operation repeatedly on the same REG_EQUAL value if we
do more than one PRE GCSE pass.
- Note that this does not impede profitale constant propagations. We
+ Note that this does not impede profitable constant propagations. We
"look through" reg-reg sets in lookup_avail_set. */
note = find_reg_equal_equiv_note (insn);
if (note != 0
@@ -4680,7 +4680,7 @@ compute_transpout (void)
FOR_EACH_BB (bb)
{
- /* Note that flow inserted a nop a the end of basic blocks that
+ /* Note that flow inserted a nop at the end of basic blocks that
end in call instructions for reasons other than abnormal
control flow. */
if (! CALL_P (BB_END (bb)))
diff --git a/gcc/genattrtab.c b/gcc/genattrtab.c
index 385779afc2a..47b6ec13c9f 100644
--- a/gcc/genattrtab.c
+++ b/gcc/genattrtab.c
@@ -1,6 +1,6 @@
/* Generate code from machine description to compute values of attributes.
Copyright (C) 1991, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
- 2002, 2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
+ 2002, 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
This file is part of GCC.
@@ -135,7 +135,7 @@ struct insn_def
struct insn_def *next; /* Next insn in chain. */
rtx def; /* The DEFINE_... */
int insn_code; /* Instruction number. */
- int insn_index; /* Expression numer in file, for errors. */
+ int insn_index; /* Expression number in file, for errors. */
int lineno; /* Line number. */
int num_alternatives; /* Number of alternatives. */
int vec_idx; /* Index of attribute vector in `def'. */
diff --git a/gcc/genautomata.c b/gcc/genautomata.c
index 96737bcf654..59c1af06544 100644
--- a/gcc/genautomata.c
+++ b/gcc/genautomata.c
@@ -1,5 +1,5 @@
/* Pipeline hazard description translator.
- Copyright (C) 2000, 2001, 2002, 2003, 2004, 2005, 2007
+ Copyright (C) 2000, 2001, 2002, 2003, 2004, 2005, 2007, 2008
Free Software Foundation, Inc.
Written by Vladimir Makarov <vmakarov@redhat.com>
@@ -471,7 +471,7 @@ struct insn_reserv_decl
/* The following field is the insn regexp transformed that
the regexp has not optional regexp, repetition regexp, and an
reservation name (i.e. reservation identifiers are changed by the
- corresponding regexp) and all alternations are the topest level
+ corresponding regexp) and all alternations are the top level
of the regexp. The value can be NULL only if it is special
insn `cycle advancing'. */
regexp_t transformed_regexp;
@@ -4247,11 +4247,11 @@ initiate_presence_absence_pattern_sets (void)
}
/* The function checks that CHECKED_SET satisfies all presence pattern
- sets for units in ORIGIONAL_SET. The function returns TRUE if it
+ sets for units in ORIGINAL_SET. The function returns TRUE if it
is ok. */
static int
check_presence_pattern_sets (reserv_sets_t checked_set,
- reserv_sets_t origional_set,
+ reserv_sets_t original_set,
int final_p)
{
int char_num;
@@ -4264,9 +4264,9 @@ check_presence_pattern_sets (reserv_sets_t checked_set,
chars_num = els_in_cycle_reserv * sizeof (set_el_t);
for (char_num = 0; char_num < chars_num; char_num++)
- if (((unsigned char *) origional_set) [char_num])
+ if (((unsigned char *) original_set) [char_num])
for (i = CHAR_BIT - 1; i >= 0; i--)
- if ((((unsigned char *) origional_set) [char_num] >> i) & 1)
+ if ((((unsigned char *) original_set) [char_num] >> i) & 1)
{
start_unit_num = char_num * CHAR_BIT + i;
if (start_unit_num >= description->units_num)
@@ -4296,11 +4296,11 @@ check_presence_pattern_sets (reserv_sets_t checked_set,
}
/* The function checks that CHECKED_SET satisfies all absence pattern
- sets for units in ORIGIONAL_SET. The function returns TRUE if it
+ sets for units in ORIGINAL_SET. The function returns TRUE if it
is ok. */
static int
check_absence_pattern_sets (reserv_sets_t checked_set,
- reserv_sets_t origional_set,
+ reserv_sets_t original_set,
int final_p)
{
int char_num;
@@ -4312,9 +4312,9 @@ check_absence_pattern_sets (reserv_sets_t checked_set,
chars_num = els_in_cycle_reserv * sizeof (set_el_t);
for (char_num = 0; char_num < chars_num; char_num++)
- if (((unsigned char *) origional_set) [char_num])
+ if (((unsigned char *) original_set) [char_num])
for (i = CHAR_BIT - 1; i >= 0; i--)
- if ((((unsigned char *) origional_set) [char_num] >> i) & 1)
+ if ((((unsigned char *) original_set) [char_num] >> i) & 1)
{
start_unit_num = char_num * CHAR_BIT + i;
if (start_unit_num >= description->units_num)
@@ -5393,7 +5393,7 @@ make_automaton (automaton_t automaton)
VEC_free (state_t,heap, state_stack);
}
-/* Foms lists of all arcs of STATE marked by the same ainsn. */
+/* Form lists of all arcs of STATE marked by the same ainsn. */
static void
form_arcs_marked_by_insn (state_t state)
{
@@ -8488,7 +8488,7 @@ output_description (void)
{
if (DECL_UNIT (decl)->excl_list != NULL)
{
- fprintf (output_description_file, "unit %s exlusion_set: ",
+ fprintf (output_description_file, "unit %s exclusion_set: ",
DECL_UNIT (decl)->name);
output_unit_set_el_list (DECL_UNIT (decl)->excl_list);
fprintf (output_description_file, "\n");
diff --git a/gcc/ggc-page.c b/gcc/ggc-page.c
index f22ddf6e826..416f9114122 100644
--- a/gcc/ggc-page.c
+++ b/gcc/ggc-page.c
@@ -386,7 +386,7 @@ static struct globals
/* Maximum number of elements that can be used before resizing. */
unsigned int depth_max;
- /* Each element of this arry is an index in by_depth where the given
+ /* Each element of this array is an index in by_depth where the given
depth starts. This structure is indexed by that given depth we
are interested in. */
unsigned int *depth;
diff --git a/gcc/ggc-zone.c b/gcc/ggc-zone.c
index af211ad90dc..a80a6a0e979 100644
--- a/gcc/ggc-zone.c
+++ b/gcc/ggc-zone.c
@@ -2445,7 +2445,7 @@ ggc_pch_finish (struct ggc_pch_data *d, FILE *f)
fatal_error ("can't seek PCH file: %m");
if (fwrite (d->alloc_bits, d->alloc_size, 1, f) != 1)
- fatal_error ("can't write PCH fle: %m");
+ fatal_error ("can't write PCH file: %m");
/* Done with the PCH, so write out our footer. */
if (fwrite (&d->d, sizeof (d->d), 1, f) != 1)
diff --git a/gcc/gimple-low.c b/gcc/gimple-low.c
index fd1a19dfd32..99175d5b2f1 100644
--- a/gcc/gimple-low.c
+++ b/gcc/gimple-low.c
@@ -277,6 +277,7 @@ lower_stmt (tree_stmt_iterator *tsi, struct lower_data *data)
break;
case OMP_PARALLEL:
+ case OMP_TASK:
lower_omp_directive (tsi, data);
return;
diff --git a/gcc/gimplify.c b/gcc/gimplify.c
index 2d0ecbf20d9..47a2fe7b43d 100644
--- a/gcc/gimplify.c
+++ b/gcc/gimplify.c
@@ -62,10 +62,19 @@ enum gimplify_omp_var_data
GOVD_REDUCTION = 64,
GOVD_LOCAL = 128,
GOVD_DEBUG_PRIVATE = 256,
+ GOVD_PRIVATE_OUTER_REF = 512,
GOVD_DATA_SHARE_CLASS = (GOVD_SHARED | GOVD_PRIVATE | GOVD_FIRSTPRIVATE
| GOVD_LASTPRIVATE | GOVD_REDUCTION | GOVD_LOCAL)
};
+enum omp_region_type
+{
+ ORT_WORKSHARE = 0,
+ ORT_TASK = 1,
+ ORT_PARALLEL = 2,
+ ORT_COMBINED_PARALLEL = 3
+};
+
struct gimplify_omp_ctx
{
struct gimplify_omp_ctx *outer_context;
@@ -73,8 +82,7 @@ struct gimplify_omp_ctx
struct pointer_set_t *privatized_types;
location_t location;
enum omp_clause_default_kind default_kind;
- bool is_parallel;
- bool is_combined_parallel;
+ enum omp_region_type region_type;
};
struct gimplify_ctx
@@ -270,7 +278,7 @@ splay_tree_compare_decl_uid (splay_tree_key xa, splay_tree_key xb)
/* Create a new omp construct that deals with variable remapping. */
static struct gimplify_omp_ctx *
-new_omp_context (bool is_parallel, bool is_combined_parallel)
+new_omp_context (enum omp_region_type region_type)
{
struct gimplify_omp_ctx *c;
@@ -279,9 +287,11 @@ new_omp_context (bool is_parallel, bool is_combined_parallel)
c->variables = splay_tree_new (splay_tree_compare_decl_uid, 0, 0);
c->privatized_types = pointer_set_create ();
c->location = input_location;
- c->is_parallel = is_parallel;
- c->is_combined_parallel = is_combined_parallel;
- c->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
+ c->region_type = region_type;
+ if (region_type != ORT_TASK)
+ c->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
+ else
+ c->default_kind = OMP_CLAUSE_DEFAULT_UNSPECIFIED;
return c;
}
@@ -756,7 +766,7 @@ gimple_add_tmp_var (tree tmp)
if (gimplify_omp_ctxp)
{
struct gimplify_omp_ctx *ctx = gimplify_omp_ctxp;
- while (ctx && !ctx->is_parallel)
+ while (ctx && ctx->region_type == ORT_WORKSHARE)
ctx = ctx->outer_context;
if (ctx)
omp_add_variable (ctx, tmp, GOVD_LOCAL | GOVD_SEEN);
@@ -4711,7 +4721,7 @@ omp_firstprivatize_variable (struct gimplify_omp_ctx *ctx, tree decl)
else
return;
}
- else if (ctx->is_parallel)
+ else if (ctx->region_type != ORT_WORKSHARE)
omp_add_variable (ctx, decl, GOVD_FIRSTPRIVATE);
ctx = ctx->outer_context;
@@ -4904,8 +4914,9 @@ omp_notice_variable (struct gimplify_omp_ctx *ctx, tree decl, bool in_code)
if (n == NULL)
{
enum omp_clause_default_kind default_kind, kind;
+ struct gimplify_omp_ctx *octx;
- if (!ctx->is_parallel)
+ if (ctx->region_type == ORT_WORKSHARE)
goto do_outer;
/* ??? Some compiler-generated variables (like SAVE_EXPRs) could be
@@ -4929,10 +4940,47 @@ omp_notice_variable (struct gimplify_omp_ctx *ctx, tree decl, bool in_code)
case OMP_CLAUSE_DEFAULT_PRIVATE:
flags |= GOVD_PRIVATE;
break;
+ case OMP_CLAUSE_DEFAULT_FIRSTPRIVATE:
+ flags |= GOVD_FIRSTPRIVATE;
+ break;
+ case OMP_CLAUSE_DEFAULT_UNSPECIFIED:
+ /* decl will be either GOVD_FIRSTPRIVATE or GOVD_SHARED. */
+ gcc_assert (ctx->region_type == ORT_TASK);
+ if (ctx->outer_context)
+ omp_notice_variable (ctx->outer_context, decl, in_code);
+ for (octx = ctx->outer_context; octx; octx = octx->outer_context)
+ {
+ splay_tree_node n2;
+
+ n2 = splay_tree_lookup (octx->variables, (splay_tree_key) decl);
+ if (n2 && (n2->value & GOVD_DATA_SHARE_CLASS) != GOVD_SHARED)
+ {
+ flags |= GOVD_FIRSTPRIVATE;
+ break;
+ }
+ if ((octx->region_type & ORT_PARALLEL) != 0)
+ break;
+ }
+ if (flags & GOVD_FIRSTPRIVATE)
+ break;
+ if (octx == NULL
+ && (TREE_CODE (decl) == PARM_DECL
+ || (!is_global_var (decl)
+ && DECL_CONTEXT (decl) == current_function_decl)))
+ {
+ flags |= GOVD_FIRSTPRIVATE;
+ break;
+ }
+ flags |= GOVD_SHARED;
+ break;
default:
gcc_unreachable ();
}
+ if ((flags & GOVD_PRIVATE)
+ && lang_hooks.decls.omp_private_outer_ref (decl))
+ flags |= GOVD_PRIVATE_OUTER_REF;
+
omp_add_variable (ctx, decl, flags);
shared = (flags & GOVD_SHARED) != 0;
@@ -4952,7 +5000,7 @@ omp_notice_variable (struct gimplify_omp_ctx *ctx, tree decl, bool in_code)
do_outer:
/* If the variable is private in the current context, then we don't
need to propagate anything to an outer context. */
- if (flags & GOVD_PRIVATE)
+ if ((flags & GOVD_PRIVATE) && !(flags & GOVD_PRIVATE_OUTER_REF))
return ret;
if (ctx->outer_context
&& omp_notice_variable (ctx->outer_context, decl, in_code))
@@ -4985,7 +5033,7 @@ omp_is_private (struct gimplify_omp_ctx *ctx, tree decl)
}
else if ((n->value & GOVD_EXPLICIT) != 0
&& (ctx == gimplify_omp_ctxp
- || (ctx->is_combined_parallel
+ || (ctx->region_type == ORT_COMBINED_PARALLEL
&& gimplify_omp_ctxp->outer_context == ctx)))
{
if ((n->value & GOVD_FIRSTPRIVATE) != 0)
@@ -4998,7 +5046,7 @@ omp_is_private (struct gimplify_omp_ctx *ctx, tree decl)
return true;
}
- if (ctx->is_parallel)
+ if (ctx->region_type != ORT_WORKSHARE)
return false;
else if (ctx->outer_context)
return omp_is_private (ctx->outer_context, decl);
@@ -5027,7 +5075,7 @@ omp_check_private (struct gimplify_omp_ctx *ctx, tree decl)
if (n != NULL)
return (n->value & GOVD_SHARED) == 0;
}
- while (!ctx->is_parallel);
+ while (ctx->region_type == ORT_WORKSHARE);
return false;
}
@@ -5035,13 +5083,13 @@ omp_check_private (struct gimplify_omp_ctx *ctx, tree decl)
and previous omp contexts. */
static void
-gimplify_scan_omp_clauses (tree *list_p, tree *pre_p, bool in_parallel,
- bool in_combined_parallel)
+gimplify_scan_omp_clauses (tree *list_p, tree *pre_p,
+ enum omp_region_type region_type)
{
struct gimplify_omp_ctx *ctx, *outer_ctx;
tree c;
- ctx = new_omp_context (in_parallel, in_combined_parallel);
+ ctx = new_omp_context (region_type);
outer_ctx = ctx->outer_context;
while ((c = *list_p) != NULL)
@@ -5057,7 +5105,13 @@ gimplify_scan_omp_clauses (tree *list_p, tree *pre_p, bool in_parallel,
{
case OMP_CLAUSE_PRIVATE:
flags = GOVD_PRIVATE | GOVD_EXPLICIT;
- notice_outer = false;
+ if (lang_hooks.decls.omp_private_outer_ref (OMP_CLAUSE_DECL (c)))
+ {
+ flags |= GOVD_PRIVATE_OUTER_REF;
+ OMP_CLAUSE_PRIVATE_OUTER_REF (c) = 1;
+ }
+ else
+ notice_outer = false;
goto do_add;
case OMP_CLAUSE_SHARED:
flags = GOVD_SHARED | GOVD_EXPLICIT;
@@ -5097,6 +5151,23 @@ gimplify_scan_omp_clauses (tree *list_p, tree *pre_p, bool in_parallel,
pop_gimplify_context (OMP_CLAUSE_REDUCTION_MERGE (c));
gimplify_omp_ctxp = outer_ctx;
}
+ else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
+ && OMP_CLAUSE_LASTPRIVATE_STMT (c))
+ {
+ gimplify_omp_ctxp = ctx;
+ push_gimplify_context ();
+ if (TREE_CODE (OMP_CLAUSE_LASTPRIVATE_STMT (c)) != BIND_EXPR)
+ {
+ tree bind = build3 (BIND_EXPR, void_type_node, NULL,
+ NULL, NULL);
+ TREE_SIDE_EFFECTS (bind) = 1;
+ BIND_EXPR_BODY (bind) = OMP_CLAUSE_LASTPRIVATE_STMT (c);
+ OMP_CLAUSE_LASTPRIVATE_STMT (c) = bind;
+ }
+ gimplify_stmt (&OMP_CLAUSE_LASTPRIVATE_STMT (c));
+ pop_gimplify_context (OMP_CLAUSE_LASTPRIVATE_STMT (c));
+ gimplify_omp_ctxp = outer_ctx;
+ }
if (notice_outer)
goto do_notice;
break;
@@ -5113,7 +5184,7 @@ gimplify_scan_omp_clauses (tree *list_p, tree *pre_p, bool in_parallel,
if (outer_ctx)
omp_notice_variable (outer_ctx, decl, true);
if (check_non_private
- && !in_parallel
+ && region_type == ORT_WORKSHARE
&& omp_check_private (ctx, decl))
{
error ("%s variable %qs is private in outer context",
@@ -5137,6 +5208,8 @@ gimplify_scan_omp_clauses (tree *list_p, tree *pre_p, bool in_parallel,
case OMP_CLAUSE_NOWAIT:
case OMP_CLAUSE_ORDERED:
+ case OMP_CLAUSE_UNTIED:
+ case OMP_CLAUSE_COLLAPSE:
break;
case OMP_CLAUSE_DEFAULT:
@@ -5215,7 +5288,10 @@ gimplify_adjust_omp_clauses_1 (splay_tree_node n, void *data)
OMP_CLAUSE_CHAIN (clause) = *list_p;
if (private_debug)
OMP_CLAUSE_PRIVATE_DEBUG (clause) = 1;
+ else if (code == OMP_CLAUSE_PRIVATE && (flags & GOVD_PRIVATE_OUTER_REF))
+ OMP_CLAUSE_PRIVATE_OUTER_REF (clause) = 1;
*list_p = clause;
+ lang_hooks.decls.omp_finish_clause (clause);
return 0;
}
@@ -5272,6 +5348,8 @@ gimplify_adjust_omp_clauses (tree *list_p)
case OMP_CLAUSE_NOWAIT:
case OMP_CLAUSE_ORDERED:
case OMP_CLAUSE_DEFAULT:
+ case OMP_CLAUSE_UNTIED:
+ case OMP_CLAUSE_COLLAPSE:
break;
default:
@@ -5301,8 +5379,10 @@ gimplify_omp_parallel (tree *expr_p, tree *pre_p)
{
tree expr = *expr_p;
- gimplify_scan_omp_clauses (&OMP_PARALLEL_CLAUSES (expr), pre_p, true,
- OMP_PARALLEL_COMBINED (expr));
+ gimplify_scan_omp_clauses (&OMP_PARALLEL_CLAUSES (expr), pre_p,
+ OMP_PARALLEL_COMBINED (expr)
+ ? ORT_COMBINED_PARALLEL
+ : ORT_PARALLEL);
push_gimplify_context ();
@@ -5318,124 +5398,187 @@ gimplify_omp_parallel (tree *expr_p, tree *pre_p)
return GS_ALL_DONE;
}
-/* Gimplify the gross structure of an OMP_FOR statement. */
+/* Gimplify the contents of an OMP_TASK statement. This involves
+ gimplification of the body, as well as scanning the body for used
+ variables. We need to do this scan now, because variable-sized
+ decls will be decomposed during gimplification. */
static enum gimplify_status
-gimplify_omp_for (tree *expr_p, tree *pre_p)
+gimplify_omp_task (tree *expr_p, tree *pre_p)
{
- tree for_stmt, decl, var, t;
- enum gimplify_status ret = GS_OK;
- tree body, init_decl = NULL_TREE;
+ tree expr = *expr_p;
- for_stmt = *expr_p;
+ gimplify_scan_omp_clauses (&OMP_TASK_CLAUSES (expr), pre_p, ORT_TASK);
- gimplify_scan_omp_clauses (&OMP_FOR_CLAUSES (for_stmt), pre_p, false, false);
+ push_gimplify_context ();
- t = OMP_FOR_INIT (for_stmt);
- gcc_assert (TREE_CODE (t) == MODIFY_EXPR
- || TREE_CODE (t) == GIMPLE_MODIFY_STMT);
- decl = GENERIC_TREE_OPERAND (t, 0);
- gcc_assert (DECL_P (decl));
- gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (decl)));
+ gimplify_stmt (&OMP_TASK_BODY (expr));
- /* Make sure the iteration variable is private. */
- if (omp_is_private (gimplify_omp_ctxp, decl))
- omp_notice_variable (gimplify_omp_ctxp, decl, true);
+ if (TREE_CODE (OMP_TASK_BODY (expr)) == BIND_EXPR)
+ pop_gimplify_context (OMP_TASK_BODY (expr));
else
- omp_add_variable (gimplify_omp_ctxp, decl, GOVD_PRIVATE | GOVD_SEEN);
+ pop_gimplify_context (NULL_TREE);
- /* If DECL is not a gimple register, create a temporary variable to act as an
- iteration counter. This is valid, since DECL cannot be modified in the
- body of the loop. */
- if (!is_gimple_reg (decl))
- {
- var = create_tmp_var (TREE_TYPE (decl), get_name (decl));
- GENERIC_TREE_OPERAND (t, 0) = var;
+ gimplify_adjust_omp_clauses (&OMP_TASK_CLAUSES (expr));
- init_decl = build_gimple_modify_stmt (decl, var);
- omp_add_variable (gimplify_omp_ctxp, var, GOVD_PRIVATE | GOVD_SEEN);
- }
- else
- var = decl;
+ return GS_ALL_DONE;
+}
+
+/* Gimplify the gross structure of an OMP_FOR statement. */
+
+static enum gimplify_status
+gimplify_omp_for (tree *expr_p, tree *pre_p)
+{
+ tree for_stmt, decl, var, t, bodylist;
+ enum gimplify_status ret = GS_OK;
+ tree body, init_decl = NULL_TREE;
+ int i;
+
+ for_stmt = *expr_p;
+
+ gimplify_scan_omp_clauses (&OMP_FOR_CLAUSES (for_stmt), pre_p,
+ ORT_WORKSHARE);
/* If OMP_FOR is re-gimplified, ensure all variables in pre-body
are noticed. */
gimplify_stmt (&OMP_FOR_PRE_BODY (for_stmt));
- ret |= gimplify_expr (&GENERIC_TREE_OPERAND (t, 1),
- &OMP_FOR_PRE_BODY (for_stmt),
- NULL, is_gimple_val, fb_rvalue);
+ bodylist = alloc_stmt_list ();
+
+ gcc_assert (TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt))
+ == TREE_VEC_LENGTH (OMP_FOR_COND (for_stmt)));
+ gcc_assert (TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt))
+ == TREE_VEC_LENGTH (OMP_FOR_INCR (for_stmt)));
+ for (i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt)); i++)
+ {
+ t = TREE_VEC_ELT (OMP_FOR_INIT (for_stmt), i);
+ gcc_assert (TREE_CODE (t) == MODIFY_EXPR
+ || TREE_CODE (t) == GIMPLE_MODIFY_STMT);
+ decl = GENERIC_TREE_OPERAND (t, 0);
+ gcc_assert (DECL_P (decl));
+ gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (decl))
+ || POINTER_TYPE_P (TREE_TYPE (decl)));
+
+ /* Make sure the iteration variable is private. */
+ if (omp_is_private (gimplify_omp_ctxp, decl))
+ omp_notice_variable (gimplify_omp_ctxp, decl, true);
+ else
+ omp_add_variable (gimplify_omp_ctxp, decl, GOVD_PRIVATE | GOVD_SEEN);
- tree_to_gimple_tuple (&OMP_FOR_INIT (for_stmt));
+ /* If DECL is not a gimple register, create a temporary variable to act
+ as an iteration counter. This is valid, since DECL cannot be
+ modified in the body of the loop. */
+ if (!is_gimple_reg (decl))
+ {
+ var = create_tmp_var (TREE_TYPE (decl), get_name (decl));
+ GENERIC_TREE_OPERAND (t, 0) = var;
- t = OMP_FOR_COND (for_stmt);
- gcc_assert (COMPARISON_CLASS_P (t));
- gcc_assert (GENERIC_TREE_OPERAND (t, 0) == decl);
- TREE_OPERAND (t, 0) = var;
+ init_decl = build_gimple_modify_stmt (decl, var);
+ omp_add_variable (gimplify_omp_ctxp, var, GOVD_PRIVATE | GOVD_SEEN);
+ }
+ else
+ var = decl;
- ret |= gimplify_expr (&GENERIC_TREE_OPERAND (t, 1),
- &OMP_FOR_PRE_BODY (for_stmt),
- NULL, is_gimple_val, fb_rvalue);
+ ret |= gimplify_expr (&GENERIC_TREE_OPERAND (t, 1),
+ &OMP_FOR_PRE_BODY (for_stmt),
+ NULL, is_gimple_val, fb_rvalue);
- tree_to_gimple_tuple (&OMP_FOR_INCR (for_stmt));
- t = OMP_FOR_INCR (for_stmt);
- switch (TREE_CODE (t))
- {
- case PREINCREMENT_EXPR:
- case POSTINCREMENT_EXPR:
- t = build_int_cst (TREE_TYPE (decl), 1);
- t = build2 (PLUS_EXPR, TREE_TYPE (decl), var, t);
- t = build_gimple_modify_stmt (var, t);
- OMP_FOR_INCR (for_stmt) = t;
- break;
+ tree_to_gimple_tuple (&TREE_VEC_ELT (OMP_FOR_INIT (for_stmt), i));
- case PREDECREMENT_EXPR:
- case POSTDECREMENT_EXPR:
- t = build_int_cst (TREE_TYPE (decl), -1);
- t = build2 (PLUS_EXPR, TREE_TYPE (decl), var, t);
- t = build_gimple_modify_stmt (var, t);
- OMP_FOR_INCR (for_stmt) = t;
- break;
-
- case GIMPLE_MODIFY_STMT:
- gcc_assert (GIMPLE_STMT_OPERAND (t, 0) == decl);
- GIMPLE_STMT_OPERAND (t, 0) = var;
+ t = TREE_VEC_ELT (OMP_FOR_COND (for_stmt), i);
+ gcc_assert (COMPARISON_CLASS_P (t));
+ gcc_assert (GENERIC_TREE_OPERAND (t, 0) == decl);
+ TREE_OPERAND (t, 0) = var;
- t = GIMPLE_STMT_OPERAND (t, 1);
+ ret |= gimplify_expr (&GENERIC_TREE_OPERAND (t, 1),
+ &OMP_FOR_PRE_BODY (for_stmt),
+ NULL, is_gimple_val, fb_rvalue);
+
+ tree_to_gimple_tuple (&TREE_VEC_ELT (OMP_FOR_INCR (for_stmt), i));
+ t = TREE_VEC_ELT (OMP_FOR_INCR (for_stmt), i);
switch (TREE_CODE (t))
{
- case PLUS_EXPR:
- if (TREE_OPERAND (t, 1) == decl)
+ case PREINCREMENT_EXPR:
+ case POSTINCREMENT_EXPR:
+ t = build_int_cst (TREE_TYPE (decl), 1);
+ t = build2 (PLUS_EXPR, TREE_TYPE (decl), var, t);
+ t = build_gimple_modify_stmt (var, t);
+ TREE_VEC_ELT (OMP_FOR_INCR (for_stmt), i) = t;
+ break;
+
+ case PREDECREMENT_EXPR:
+ case POSTDECREMENT_EXPR:
+ t = build_int_cst (TREE_TYPE (decl), -1);
+ t = build2 (PLUS_EXPR, TREE_TYPE (decl), var, t);
+ t = build_gimple_modify_stmt (var, t);
+ TREE_VEC_ELT (OMP_FOR_INCR (for_stmt), i) = t;
+ break;
+
+ case GIMPLE_MODIFY_STMT:
+ gcc_assert (GIMPLE_STMT_OPERAND (t, 0) == decl);
+ GIMPLE_STMT_OPERAND (t, 0) = var;
+
+ t = GIMPLE_STMT_OPERAND (t, 1);
+ switch (TREE_CODE (t))
{
- TREE_OPERAND (t, 1) = TREE_OPERAND (t, 0);
+ case PLUS_EXPR:
+ if (TREE_OPERAND (t, 1) == decl)
+ {
+ TREE_OPERAND (t, 1) = TREE_OPERAND (t, 0);
+ TREE_OPERAND (t, 0) = var;
+ break;
+ }
+
+ /* Fallthru. */
+ case MINUS_EXPR:
+ case POINTER_PLUS_EXPR:
+ gcc_assert (TREE_OPERAND (t, 0) == decl);
TREE_OPERAND (t, 0) = var;
break;
+ default:
+ gcc_unreachable ();
}
- /* Fallthru. */
- case MINUS_EXPR:
- gcc_assert (TREE_OPERAND (t, 0) == decl);
- TREE_OPERAND (t, 0) = var;
+ ret |= gimplify_expr (&TREE_OPERAND (t, 1),
+ &OMP_FOR_PRE_BODY (for_stmt),
+ NULL, is_gimple_val, fb_rvalue);
break;
+
default:
gcc_unreachable ();
}
- ret |= gimplify_expr (&TREE_OPERAND (t, 1), &OMP_FOR_PRE_BODY (for_stmt),
- NULL, is_gimple_val, fb_rvalue);
- break;
+ if (init_decl)
+ append_to_statement_list (init_decl, &bodylist);
- default:
- gcc_unreachable ();
+ if (var != decl || TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt)) > 1)
+ {
+ tree c;
+ for (c = OMP_FOR_CLAUSES (for_stmt); c ; c = OMP_CLAUSE_CHAIN (c))
+ if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
+ && OMP_CLAUSE_DECL (c) == decl
+ && OMP_CLAUSE_LASTPRIVATE_STMT (c) == NULL)
+ {
+ t = TREE_VEC_ELT (OMP_FOR_INCR (for_stmt), i);
+ gcc_assert (TREE_CODE (t) == GIMPLE_MODIFY_STMT);
+ gcc_assert (GIMPLE_STMT_OPERAND (t, 0) == var);
+ t = GIMPLE_STMT_OPERAND (t, 1);
+ gcc_assert (TREE_CODE (t) == PLUS_EXPR
+ || TREE_CODE (t) == MINUS_EXPR
+ || TREE_CODE (t) == POINTER_PLUS_EXPR);
+ gcc_assert (TREE_OPERAND (t, 0) == var);
+ t = build2 (TREE_CODE (t), TREE_TYPE (decl), decl,
+ TREE_OPERAND (t, 1));
+ OMP_CLAUSE_LASTPRIVATE_STMT (c)
+ = build_gimple_modify_stmt (decl, t);
+ }
+ }
}
body = OMP_FOR_BODY (for_stmt);
gimplify_to_stmt_list (&body);
- t = alloc_stmt_list ();
- if (init_decl)
- append_to_statement_list (init_decl, &t);
- append_to_statement_list (body, &t);
- OMP_FOR_BODY (for_stmt) = t;
+ append_to_statement_list (body, &bodylist);
+ OMP_FOR_BODY (for_stmt) = bodylist;
gimplify_adjust_omp_clauses (&OMP_FOR_CLAUSES (for_stmt));
return ret == GS_ALL_DONE ? GS_ALL_DONE : GS_ERROR;
@@ -5449,7 +5592,7 @@ gimplify_omp_workshare (tree *expr_p, tree *pre_p)
{
tree stmt = *expr_p;
- gimplify_scan_omp_clauses (&OMP_CLAUSES (stmt), pre_p, false, false);
+ gimplify_scan_omp_clauses (&OMP_CLAUSES (stmt), pre_p, ORT_WORKSHARE);
gimplify_to_stmt_list (&OMP_BODY (stmt));
gimplify_adjust_omp_clauses (&OMP_CLAUSES (stmt));
@@ -5747,8 +5890,14 @@ gimplify_expr (tree *expr_p, tree *pre_p, tree *post_p,
break;
case TRUTH_NOT_EXPR:
- TREE_OPERAND (*expr_p, 0)
- = gimple_boolify (TREE_OPERAND (*expr_p, 0));
+ if (TREE_CODE (TREE_TYPE (*expr_p)) != BOOLEAN_TYPE)
+ {
+ tree type = TREE_TYPE (*expr_p);
+ *expr_p = fold_convert (type, gimple_boolify (*expr_p));
+ ret = GS_OK;
+ break;
+ }
+
ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p,
is_gimple_val, fb_rvalue);
recalculate_side_effects (*expr_p);
@@ -6025,6 +6174,10 @@ gimplify_expr (tree *expr_p, tree *pre_p, tree *post_p,
ret = gimplify_omp_parallel (expr_p, pre_p);
break;
+ case OMP_TASK:
+ ret = gimplify_omp_task (expr_p, pre_p);
+ break;
+
case OMP_FOR:
ret = gimplify_omp_for (expr_p, pre_p);
break;
@@ -6048,6 +6201,7 @@ gimplify_expr (tree *expr_p, tree *pre_p, tree *post_p,
case OMP_RETURN:
case OMP_CONTINUE:
case OMP_ATOMIC_STORE:
+ case OMP_SECTIONS_SWITCH:
ret = GS_ALL_DONE;
break;
@@ -6494,7 +6648,7 @@ gimplify_body (tree *body_p, tree fndecl, bool do_parms)
unshare_body (body_p, fndecl);
unvisit_body (body_p, fndecl);
- /* Make sure input_location isn't set to something wierd. */
+ /* Make sure input_location isn't set to something weird. */
input_location = DECL_SOURCE_LOCATION (fndecl);
/* Resolve callee-copies. This has to be done before processing
diff --git a/gcc/gthr-lynx.h b/gcc/gthr-lynx.h
index 8d2d72b9d97..5a8147df680 100644
--- a/gcc/gthr-lynx.h
+++ b/gcc/gthr-lynx.h
@@ -1,7 +1,7 @@
/* Threads compatibility routines for libgcc2 and libobjc for
LynxOS. */
/* Compile this one with gcc. */
-/* Copyright (C) 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2004, 2008 Free Software Foundation, Inc.
This file is part of GCC.
@@ -44,7 +44,7 @@ Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
/* When using static libc on LynxOS, we cannot define pthread_create
weak. If the multi-threaded application includes iostream.h,
gthr-posix.h is included and pthread_create will be defined weak.
- If pthead_create is weak its defining module in libc is not
+ If pthread_create is weak its defining module in libc is not
necessarily included in the link and the symbol is resolved to zero.
Therefore the first call to it will crash.
diff --git a/gcc/haifa-sched.c b/gcc/haifa-sched.c
index 0f12cd04dfa..bb799f96c3c 100644
--- a/gcc/haifa-sched.c
+++ b/gcc/haifa-sched.c
@@ -1110,7 +1110,7 @@ ready_sort (struct ready_list *ready)
/* PREV is an insn that is ready to execute. Adjust its priority if that
will help shorten or lengthen register lifetimes as appropriate. Also
- provide a hook for the target to tweek itself. */
+ provide a hook for the target to tweak itself. */
HAIFA_INLINE static void
adjust_priority (rtx prev)
@@ -2373,7 +2373,7 @@ schedule_block (basic_block *target_bb, int rgn_n_insns1)
asm_p = (GET_CODE (PATTERN (insn)) == ASM_INPUT
|| asm_noperands (PATTERN (insn)) >= 0);
if (!first_cycle_insn_p && asm_p)
- /* This is asm insn which is tryed to be issued on the
+ /* This is asm insn which is tried to be issued on the
cycle not first. Issue it on the next cycle. */
cost = 1;
else
@@ -2548,7 +2548,7 @@ schedule_block (basic_block *target_bb, int rgn_n_insns1)
{
targetm.sched.md_finish (sched_dump, sched_verbose);
- /* Target might have added some instructions to the scheduled block.
+ /* Target might have added some instructions to the scheduled block
in its md_finish () hook. These new insns don't have any data
initialized and to identify them we extend h_i_d so that they'll
get zero luids.*/
diff --git a/gcc/hooks.c b/gcc/hooks.c
index 289275e8441..50761b69ca5 100644
--- a/gcc/hooks.c
+++ b/gcc/hooks.c
@@ -291,6 +291,14 @@ hook_tree_tree_tree_null (tree t0 ATTRIBUTE_UNUSED, tree t1 ATTRIBUTE_UNUSED)
return NULL;
}
+tree
+hook_tree_tree_tree_tree_null (tree t0 ATTRIBUTE_UNUSED,
+ tree t1 ATTRIBUTE_UNUSED,
+ tree t2 ATTRIBUTE_UNUSED)
+{
+ return NULL;
+}
+
/* Generic hook that takes a rtx and returns a NULL string. */
const char *
hook_constcharptr_const_rtx_null (const_rtx r ATTRIBUTE_UNUSED)
diff --git a/gcc/hooks.h b/gcc/hooks.h
index 838a4223fe3..d6bbc4c2f07 100644
--- a/gcc/hooks.h
+++ b/gcc/hooks.h
@@ -63,6 +63,7 @@ extern int hook_int_size_t_constcharptr_int_0 (size_t, const char *, int);
extern int hook_int_void_no_regs (void);
extern tree hook_tree_tree_tree_null (tree, tree);
+extern tree hook_tree_tree_tree_tree_null (tree, tree, tree);
extern tree hook_tree_tree_tree_tree_3rd_identity (tree, tree, tree);
extern tree hook_tree_tree_tree_bool_null (tree, tree, bool);
diff --git a/gcc/hwint.h b/gcc/hwint.h
index 2f3fe4cb0b8..4e0679c77b6 100644
--- a/gcc/hwint.h
+++ b/gcc/hwint.h
@@ -1,5 +1,5 @@
/* HOST_WIDE_INT definitions for the GNU compiler.
- Copyright (C) 1998, 2002, 2004 Free Software Foundation, Inc.
+ Copyright (C) 1998, 2002, 2004, 2008 Free Software Foundation, Inc.
This file is part of GCC.
@@ -139,7 +139,7 @@ extern char sizeof_long_long_must_be_8[sizeof(long long) == 8 ? 1 : -1];
# define HOST_WIDEST_FAST_INT __int64
# define HOST_BITS_PER_WIDEST_FAST_INT HOST_BITS_PER___INT64
# else
-# error "Your host said it wantted to use long long or __int64 but neither"
+# error "Your host said it wanted to use long long or __int64 but neither"
# error "exist"
# endif
#else
diff --git a/gcc/ipa-cp.c b/gcc/ipa-cp.c
index 80774327d31..9e2153141d3 100644
--- a/gcc/ipa-cp.c
+++ b/gcc/ipa-cp.c
@@ -99,7 +99,7 @@ along with GCC; see the file COPYING3. If not see
2. For read-only parameters that do not live in memory, we replace all their
uses with the constant.
- We also need to modify some callsites to call the cloned functiosns instead
+ We also need to modify some callsites to call the cloned functions instead
of the original ones. For a callsite passing an argument found to be a
constant by IPCP, there are two different cases to handle:
1. A constant is passed as an argument. In this case the callsite in the
@@ -109,7 +109,7 @@ along with GCC; see the file COPYING3. If not see
only the callsite in the cloned caller is redirected to call to the
cloned callee.
- This update is done in two steps: First all cloned functionss are created
+ This update is done in two steps: First all cloned functions are created
during a traversal of the call graph, during which all callsites are
redirected to call the cloned function. Then the callsites are traversed
and many calls redirected back to fit the description above.
diff --git a/gcc/ipa-inline.c b/gcc/ipa-inline.c
index 6ac851a5bc9..6d74c1f71b7 100644
--- a/gcc/ipa-inline.c
+++ b/gcc/ipa-inline.c
@@ -1,5 +1,5 @@
/* Inlining decision heuristics.
- Copyright (C) 2003, 2004, 2007 Free Software Foundation, Inc.
+ Copyright (C) 2003, 2004, 2007, 2008 Free Software Foundation, Inc.
Contributed by Jan Hubicka
This file is part of GCC.
@@ -520,7 +520,7 @@ cgraph_edge_badness (struct cgraph_edge *edge)
within function, the function itself is infrequent.
Other objective to optimize for is number of different calls inlined.
- We add the estimated growth after inlining all functions to biass the
+ We add the estimated growth after inlining all functions to bias the
priorities slightly in this direction (so fewer times called functions
of the same size gets priority). */
else if (flag_guess_branch_prob)
diff --git a/gcc/ipa-prop.h b/gcc/ipa-prop.h
index 3be7502ae8c..a81418d7f47 100644
--- a/gcc/ipa-prop.h
+++ b/gcc/ipa-prop.h
@@ -1,5 +1,5 @@
/* Interprocedural analyses.
- Copyright (C) 2005, 2007 Free Software Foundation, Inc.
+ Copyright (C) 2005, 2007, 2008 Free Software Foundation, Inc.
This file is part of GCC.
@@ -115,7 +115,7 @@ struct ipa_replace_map
struct ipa_node_params
{
/* Number of formal parameters of this function. When set to 0,
- this functions's parameters would not be analyzed by the different
+ this function's parameters would not be analyzed by the different
stages of IPA CP. */
int param_count;
/* Array of lattices. */
@@ -134,7 +134,7 @@ struct ipa_node_params
one. */
gcov_type count_scale;
- /* Whether this fynction is called with variable number of actual
+ /* Whether this function is called with variable number of actual
arguments. */
unsigned called_with_var_arguments : 1;
};
@@ -165,7 +165,7 @@ ipa_get_ith_param (struct ipa_node_params *info, int i)
return info->param_decls[i];
}
-/* Returns the modification flag corresponding o the ith paramterer. Note
+/* Returns the modification flag corresponding to the ith parameter. Note
there is no setter method as the goal is to set all flags when building the
array in ipa_detect_param_modifications. */
static inline bool
diff --git a/gcc/ipa-pure-const.c b/gcc/ipa-pure-const.c
index a2c920601ac..3b2cdaee2a1 100644
--- a/gcc/ipa-pure-const.c
+++ b/gcc/ipa-pure-const.c
@@ -1,5 +1,5 @@
/* Callgraph based analysis of static variables.
- Copyright (C) 2004, 2005, 2007 Free Software Foundation, Inc.
+ Copyright (C) 2004, 2005, 2007, 2008 Free Software Foundation, Inc.
Contributed by Kenneth Zadeck <zadeck@naturalbridge.com>
This file is part of GCC.
@@ -18,9 +18,9 @@ You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
-/* This file mark functions as being either const (TREE_READONLY) or
- pure (DECL_PURE_P). It can also set the a variant of these that
- are allowed to infinite loop (DECL_LOOPING_CONST_PURE_P).
+/* This file marks functions as being either const (TREE_READONLY) or
+ pure (DECL_PURE_P). It can also set a variant of these that
+ are allowed to loop indefinitely (DECL_LOOPING_CONST_PURE_P).
This must be run after inlining decisions have been made since
otherwise, the local sets will not contain information that is
@@ -85,7 +85,7 @@ get_function_state (struct cgraph_node *node)
return (funct_state) info->aux;
}
-/* Check to see if the use (or definition when CHECHING_WRITE is true)
+/* Check to see if the use (or definition when CHECKING_WRITE is true)
variable T is legal in a function that is either pure or const. */
static inline void
@@ -174,7 +174,7 @@ check_tree (funct_state local, tree t, bool checking_write)
|| TREE_CODE (t) == SSA_NAME)
return;
- /* Any tree which is volatile disqualifies thie function from being
+ /* Any tree which is volatile disqualifies this function from being
const or pure. */
if (TREE_THIS_VOLATILE (t))
{
@@ -641,7 +641,7 @@ static_execute (void)
is a master clone. However, we do NOT process any
AVAIL_OVERWRITABLE functions (these are never clones) we cannot
guarantee that what we learn about the one we see will be true
- for the one that overriders it.
+ for the one that overrides it.
*/
for (node = cgraph_nodes; node; node = node->next)
if (node->analyzed && cgraph_is_master_clone (node))
diff --git a/gcc/ipa-struct-reorg.c b/gcc/ipa-struct-reorg.c
index d0d1c935dc3..cce9b3f6fc7 100644
--- a/gcc/ipa-struct-reorg.c
+++ b/gcc/ipa-struct-reorg.c
@@ -1,5 +1,5 @@
/* Struct-reorg optimization.
- Copyright (C) 2007 Free Software Foundation, Inc.
+ Copyright (C) 2007, 2008 Free Software Foundation, Inc.
Contributed by Olga Golovanevsky <olga@il.ibm.com>
(Initial version of this code was developed
by Caroline Tice and Mostafa Hagog.)
@@ -309,7 +309,7 @@ is_result_of_mult (tree arg, tree *num, tree struct_size)
{
tree size_def_stmt = SSA_NAME_DEF_STMT (arg);
- /* If allocation statementt was of the form
+ /* If the allocation statement was of the form
D.2229_10 = <alloc_func> (D.2228_9);
then size_def_stmt can be D.2228_9 = num.3_8 * 8; */
@@ -406,7 +406,7 @@ decompose_indirect_ref_acc (tree str_decl, struct field_access_site *acc)
/* This function checks whether the access ACC of structure type STR
- is of the form suitable for tranformation. If yes, it returns true.
+ is of the form suitable for transformation. If yes, it returns true.
False otherwise. */
static bool
@@ -435,7 +435,7 @@ make_field_acc_node (void)
}
/* This function returns the structure field access, defined by STMT,
- if it is aready in hashtable of function accesses F_ACCS. */
+ if it is already in hashtable of function accesses F_ACCS. */
static struct field_access_site *
is_in_field_accs (tree stmt, htab_t f_accs)
@@ -538,7 +538,7 @@ finalize_new_vars_creation (void **slot, void *data ATTRIBUTE_UNUSED)
return 1;
}
-/* This funciton updates statements in STMT_LIST with BB info. */
+/* This function updates statements in STMT_LIST with BB info. */
static void
add_bb_info (basic_block bb, tree stmt_list)
@@ -587,7 +587,7 @@ is_in_new_vars_htab (tree decl, htab_t new_vars_htab)
htab_hash_pointer (decl));
}
-/* Given original varaiable ORIG_VAR, this function returns
+/* Given original variable ORIG_VAR, this function returns
new variable corresponding to it of NEW_TYPE type. */
static tree
@@ -1616,7 +1616,7 @@ is_equal_types (tree type1, tree type2)
case ENUMERAL_TYPE:
{
tree field1;
- /* Compare fields of struture. */
+ /* Compare fields of structure. */
for (field1 = TYPE_FIELDS (type1); field1;
field1 = TREE_CHAIN (field1))
{
@@ -1760,7 +1760,7 @@ create_new_malloc (tree malloc_stmt, tree new_type, tree *new_stmts, tree num)
/* This function returns a tree representing
the number of instances of structure STR_DECL allocated
- by allocation STMT. If new statments are generated,
+ by allocation STMT. If new statements are generated,
they are filled into NEW_STMTS_P. */
static tree
@@ -2074,7 +2074,7 @@ dump_acc (void **slot, void *data ATTRIBUTE_UNUSED)
return 1;
}
-/* This function frees memory allocated for strcuture clusters,
+/* This function frees memory allocated for structure clusters,
starting from CLUSTER. */
static void
@@ -2402,7 +2402,7 @@ remove_structure (unsigned i)
}
/* Currently we support only EQ_EXPR or NE_EXPR conditions.
- COND_STNT is a condition statement to check. */
+ COND_STMT is a condition statement to check. */
static bool
is_safe_cond_expr (tree cond_stmt)
@@ -2861,7 +2861,7 @@ check_bitfields (d_str str, VEC (tree, heap) **unsuitable_types)
}
/* This function adds to UNSUITABLE_TYPES those types that escape
- due to results of ipa-type-escpae analysis. See ipa-type-escpae.[c,h]. */
+ due to results of ipa-type-escape analysis. See ipa-type-escape.[c,h]. */
static void
exclude_escaping_types_1 (VEC (tree, heap) **unsuitable_types)
@@ -3158,7 +3158,7 @@ exclude_alloc_and_field_accs_1 (d_str str, struct cgraph_node *node)
htab_traverse (dt.str->accs, exclude_from_accs, &dt);
}
-/* Collect accesses to the structure types that apear in basic bloack BB. */
+/* Collect accesses to the structure types that appear in basic block BB. */
static void
collect_accesses_in_bb (basic_block bb)
@@ -3181,8 +3181,8 @@ collect_accesses_in_bb (basic_block bb)
}
}
-/* This function generates cluster substructure that cointains FIELDS.
- The cluster added to the set of clusters of the structure SRT. */
+/* This function generates cluster substructure that contains FIELDS.
+ The cluster added to the set of clusters of the structure STR. */
static void
gen_cluster (sbitmap fields, d_str str)
@@ -3727,6 +3727,7 @@ do_reorg_1 (void)
}
set_cfun (NULL);
+ bitmap_obstack_release (NULL);
}
/* This function creates new global struct variables.
diff --git a/gcc/ipa-struct-reorg.h b/gcc/ipa-struct-reorg.h
index 6f4c5b83d75..54cdbc9982f 100644
--- a/gcc/ipa-struct-reorg.h
+++ b/gcc/ipa-struct-reorg.h
@@ -1,5 +1,5 @@
/* Struct-reorg optimization.
- Copyright (C) 2002, 2003-2007 Free Software Foundation, Inc.
+ Copyright (C) 2002, 2003-2007, 2008 Free Software Foundation, Inc.
Contributed by Olga Golovanevsky <olga@il.ibm.com>
This file is part of GCC.
@@ -104,7 +104,7 @@ struct data_structure
/* A data structure representing a reorganization decision. */
struct field_cluster *struct_clustering;
- /* New types to replace an the original structure type. */
+ /* New types to replace the original structure type. */
VEC(tree, heap) *new_types;
};
diff --git a/gcc/ipa-type-escape.c b/gcc/ipa-type-escape.c
index 2e6c5809df7..229d8b2e9a5 100644
--- a/gcc/ipa-type-escape.c
+++ b/gcc/ipa-type-escape.c
@@ -1,5 +1,6 @@
/* Type based alias analysis.
- Copyright (C) 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
+ Copyright (C) 2004, 2005, 2006, 2007, 2008 Free Software Foundation,
+ Inc.
Contributed by Kenneth Zadeck <zadeck@naturalbridge.com>
This file is part of GCC.
@@ -1781,7 +1782,7 @@ type_for_uid (int uid)
else return NULL;
}
-/* Return the a bitmap with the subtypes of the type for UID. If it
+/* Return a bitmap with the subtypes of the type for UID. If it
does not exist, return either NULL or a new bitmap depending on the
value of CREATE. */
@@ -2054,7 +2055,7 @@ type_escape_execute (void)
FOR_EACH_STATIC_VARIABLE (vnode)
analyze_variable (vnode);
- /* Process all of the functions. next
+ /* Process all of the functions next.
We do not want to process any of the clones so we check that this
is a master clone. However, we do need to process any
diff --git a/gcc/ipa.c b/gcc/ipa.c
index 06f838cb07d..0e2cb2db9eb 100644
--- a/gcc/ipa.c
+++ b/gcc/ipa.c
@@ -1,5 +1,6 @@
/* Basic IPA optimizations and utilities.
- Copyright (C) 2003, 2004, 2005, 2007 Free Software Foundation, Inc.
+ Copyright (C) 2003, 2004, 2005, 2007, 2008 Free Software Foundation,
+ Inc.
This file is part of GCC.
@@ -42,7 +43,7 @@ cgraph_postorder (struct cgraph_node **order)
/* We have to deal with cycles nicely, so use a depth first traversal
output algorithm. Ignore the fact that some functions won't need
to be output and put them into order as well, so we get dependencies
- right through intline functions. */
+ right through inline functions. */
for (node = cgraph_nodes; node; node = node->next)
node->aux = NULL;
for (node = cgraph_nodes; node; node = node->next)
diff --git a/gcc/langhooks-def.h b/gcc/langhooks-def.h
index aae46406515..dd4916caff6 100644
--- a/gcc/langhooks-def.h
+++ b/gcc/langhooks-def.h
@@ -199,10 +199,12 @@ extern tree lhd_make_node (enum tree_code);
#define LANG_HOOKS_OMP_PREDETERMINED_SHARING lhd_omp_predetermined_sharing
#define LANG_HOOKS_OMP_DISREGARD_VALUE_EXPR hook_bool_tree_bool_false
#define LANG_HOOKS_OMP_PRIVATE_DEBUG_CLAUSE hook_bool_tree_bool_false
-#define LANG_HOOKS_OMP_CLAUSE_DEFAULT_CTOR hook_tree_tree_tree_null
+#define LANG_HOOKS_OMP_PRIVATE_OUTER_REF hook_bool_tree_false
+#define LANG_HOOKS_OMP_CLAUSE_DEFAULT_CTOR hook_tree_tree_tree_tree_null
#define LANG_HOOKS_OMP_CLAUSE_COPY_CTOR lhd_omp_assignment
#define LANG_HOOKS_OMP_CLAUSE_ASSIGN_OP lhd_omp_assignment
#define LANG_HOOKS_OMP_CLAUSE_DTOR hook_tree_tree_tree_null
+#define LANG_HOOKS_OMP_FINISH_CLAUSE hook_void_tree
#define LANG_HOOKS_DECLS { \
LANG_HOOKS_GLOBAL_BINDINGS_P, \
@@ -216,10 +218,12 @@ extern tree lhd_make_node (enum tree_code);
LANG_HOOKS_OMP_PREDETERMINED_SHARING, \
LANG_HOOKS_OMP_DISREGARD_VALUE_EXPR, \
LANG_HOOKS_OMP_PRIVATE_DEBUG_CLAUSE, \
+ LANG_HOOKS_OMP_PRIVATE_OUTER_REF, \
LANG_HOOKS_OMP_CLAUSE_DEFAULT_CTOR, \
LANG_HOOKS_OMP_CLAUSE_COPY_CTOR, \
LANG_HOOKS_OMP_CLAUSE_ASSIGN_OP, \
- LANG_HOOKS_OMP_CLAUSE_DTOR \
+ LANG_HOOKS_OMP_CLAUSE_DTOR, \
+ LANG_HOOKS_OMP_FINISH_CLAUSE \
}
/* The whole thing. The structure is defined in langhooks.h. */
diff --git a/gcc/langhooks.h b/gcc/langhooks.h
index 6a54b01f060..1f64cf18d52 100644
--- a/gcc/langhooks.h
+++ b/gcc/langhooks.h
@@ -1,5 +1,5 @@
/* The lang_hooks data structure.
- Copyright 2001, 2002, 2003, 2004, 2005, 2006, 2007
+ Copyright 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
Free Software Foundation, Inc.
This file is part of GCC.
@@ -197,9 +197,14 @@ struct lang_hooks_for_decls
be put into OMP_CLAUSE_PRIVATE_DEBUG. */
bool (*omp_private_debug_clause) (tree, bool);
+ /* Return true if DECL in private clause needs
+ OMP_CLAUSE_PRIVATE_OUTER_REF on the private clause. */
+ bool (*omp_private_outer_ref) (tree);
+
/* Build and return code for a default constructor for DECL in
- response to CLAUSE. Return NULL if nothing to be done. */
- tree (*omp_clause_default_ctor) (tree clause, tree decl);
+ response to CLAUSE. OUTER is corresponding outer region's
+ variable if needed. Return NULL if nothing to be done. */
+ tree (*omp_clause_default_ctor) (tree clause, tree decl, tree outer);
/* Build and return code for a copy constructor from SRC to DST. */
tree (*omp_clause_copy_ctor) (tree clause, tree dst, tree src);
@@ -210,6 +215,9 @@ struct lang_hooks_for_decls
/* Build and return code destructing DECL. Return NULL if nothing
to be done. */
tree (*omp_clause_dtor) (tree clause, tree decl);
+
+ /* Do language specific checking on an implicitly determined clause. */
+ void (*omp_finish_clause) (tree clause);
};
/* Language-specific hooks. See langhooks-def.h for defaults. */
diff --git a/gcc/loop-doloop.c b/gcc/loop-doloop.c
index 448bb723a55..c6e0540d15f 100644
--- a/gcc/loop-doloop.c
+++ b/gcc/loop-doloop.c
@@ -1,5 +1,6 @@
/* Perform doloop optimizations
- Copyright (C) 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
+ Copyright (C) 2004, 2005, 2006, 2007, 2008 Free Software Foundation,
+ Inc.
Based on code by Michael P. Hayes (m.hayes@elec.canterbury.ac.nz)
This file is part of GCC.
@@ -362,7 +363,7 @@ doloop_modify (struct loop *loop, struct niter_desc *desc,
fputs (" iterations).\n", dump_file);
}
- /* Get the probabilty of the original branch. If it exists we would
+ /* Get the probability of the original branch. If it exists we would
need to update REG_BR_PROB of the new jump_insn. */
true_prob_val = find_reg_note (jump_insn, REG_BR_PROB, NULL_RTX);
diff --git a/gcc/matrix-reorg.c b/gcc/matrix-reorg.c
index e77023400e0..50ac7e87b0b 100644
--- a/gcc/matrix-reorg.c
+++ b/gcc/matrix-reorg.c
@@ -1,5 +1,5 @@
/* Matrix layout transformations.
- Copyright (C) 2006, 2007 Free Software Foundation, Inc.
+ Copyright (C) 2006, 2007, 2008 Free Software Foundation, Inc.
Contributed by Razya Ladelsky <razya@il.ibm.com>
Originally written by Revital Eres and Mustafa Hagog.
@@ -1543,7 +1543,7 @@ check_allocation_function (void **slot, void *data ATTRIBUTE_UNUSED)
mark_min_matrix_escape_level (mi, level, call_stmt);
if (dump_file)
fprintf (dump_file,
- "Matrix %s: Cannot calculate the size of allocation. escaping at level %d\n",
+ "Matrix %s: Cannot calculate the size of allocation, escaping at level %d\n",
get_name (mi->decl), level);
break;
}
@@ -2235,6 +2235,7 @@ matrix_reorg (void)
free_dominance_info (CDI_POST_DOMINATORS);
pop_cfun ();
current_function_decl = temp_fn;
+ bitmap_obstack_release (NULL);
return 0;
}
@@ -2249,6 +2250,7 @@ matrix_reorg (void)
free_dominance_info (CDI_POST_DOMINATORS);
pop_cfun ();
current_function_decl = temp_fn;
+ bitmap_obstack_release (NULL);
return 0;
}
@@ -2279,6 +2281,7 @@ matrix_reorg (void)
free_dominance_info (CDI_POST_DOMINATORS);
pop_cfun ();
current_function_decl = temp_fn;
+ bitmap_obstack_release (NULL);
}
htab_traverse (matrices_to_reorg, transform_allocation_sites, NULL);
/* Now transform the accesses. */
@@ -2299,6 +2302,7 @@ matrix_reorg (void)
free_dominance_info (CDI_POST_DOMINATORS);
pop_cfun ();
current_function_decl = temp_fn;
+ bitmap_obstack_release (NULL);
}
htab_traverse (matrices_to_reorg, dump_matrix_reorg_analysis, NULL);
diff --git a/gcc/mips-tfile.c b/gcc/mips-tfile.c
index 7a122a1e88a..56a953747e4 100644
--- a/gcc/mips-tfile.c
+++ b/gcc/mips-tfile.c
@@ -56,7 +56,7 @@ along with GCC; see the file COPYING3. If not see
The auxiliary table is a series of 32 bit integers, that are
referenced as needed from the local symbol table. Unlike standard
- COFF, the aux. information does not follow the symbol that uses
+ COFF, the aux. information does not follow the symbol that uses
it, but rather is a separate table. In theory, this would allow
the MIPS compilers to collapse duplicate aux. entries, but I've not
noticed this happening with the 1.31 compiler suite. The different
diff --git a/gcc/mkmap-flat.awk b/gcc/mkmap-flat.awk
index ff16ac98889..377731a4e7d 100644
--- a/gcc/mkmap-flat.awk
+++ b/gcc/mkmap-flat.awk
@@ -1,5 +1,5 @@
# Generate a flat list of symbols to export.
-# Copyright (C) 2007 Free Software Foundation, Inc.
+# Copyright (C) 2007, 2008 Free Software Foundation, Inc.
# Contributed by Richard Henderson <rth@cygnus.com>
#
# This file is part of GCC.
@@ -56,7 +56,7 @@ state == "nm" {
# Now we process a simplified variant of the Solaris symbol version
# script. We have one symbol per line, no semicolons, simple markers
# for beginning and ending each section, and %inherit markers for
-# describing version inheritence. A symbol may appear in more than
+# describing version inheritance. A symbol may appear in more than
# one symbol version, and the last seen takes effect.
# The magic version name '%exclude' causes all the symbols given that
# version to be dropped from the output (unless a later version overrides).
diff --git a/gcc/mkmap-symver.awk b/gcc/mkmap-symver.awk
index 7d9ed438f4d..855043f7e6c 100644
--- a/gcc/mkmap-symver.awk
+++ b/gcc/mkmap-symver.awk
@@ -1,5 +1,5 @@
# Generate an ELF symbol version map a-la Solaris and GNU ld.
-# Copyright (C) 2007 Free Software Foundation, Inc.
+# Copyright (C) 2007, 2008 Free Software Foundation, Inc.
# Contributed by Richard Henderson <rth@cygnus.com>
#
# This file is part of GCC.
@@ -58,7 +58,7 @@ state == "nm" {
# Now we process a simplified variant of the Solaris symbol version
# script. We have one symbol per line, no semicolons, simple markers
# for beginning and ending each section, and %inherit markers for
-# describing version inheritence. A symbol may appear in more than
+# describing version inheritance. A symbol may appear in more than
# one symbol version, and the last seen takes effect.
# The magic version name '%exclude' causes all the symbols given that
# version to be dropped from the output (unless a later version overrides).
diff --git a/gcc/modulo-sched.c b/gcc/modulo-sched.c
index f2193d4e41a..f11bc1c35cf 100644
--- a/gcc/modulo-sched.c
+++ b/gcc/modulo-sched.c
@@ -1,5 +1,5 @@
/* Swing Modulo Scheduling implementation.
- Copyright (C) 2004, 2005, 2006, 2007
+ Copyright (C) 2004, 2005, 2006, 2007, 2008
Free Software Foundation, Inc.
Contributed by Ayal Zaks and Mustafa Hagog <zaks,mustafa@il.ibm.com>
@@ -965,7 +965,7 @@ sms_schedule (void)
if (single_exit (loop)->count)
trip_count = latch_edge->count / single_exit (loop)->count;
- /* Perfrom SMS only on loops that their average count is above threshold. */
+ /* Perform SMS only on loops that their average count is above threshold. */
if ( latch_edge->count
&& (latch_edge->count < single_exit (loop)->count * SMS_LOOP_AVERAGE_COUNT_THRESHOLD))
@@ -1680,7 +1680,7 @@ calculate_must_precede_follow (ddg_node_ptr u_node, int start, int end,
parameters to decide if that's possible:
PS - The partial schedule.
U - The serial number of U_NODE.
- NUM_SPLITS - The number of row spilts made so far.
+ NUM_SPLITS - The number of row splits made so far.
MUST_PRECEDE - The nodes that must precede U_NODE. (only valid at
the first row of the scheduling window)
MUST_FOLLOW - The nodes that must follow U_NODE. (only valid at the
@@ -2125,7 +2125,7 @@ order_nodes_of_sccs (ddg_all_sccs_ptr all_sccs, int * node_order)
sbitmap_zero (prev_sccs);
sbitmap_ones (ones);
- /* Perfrom the node ordering starting from the SCC with the highest recMII.
+ /* Perform the node ordering starting from the SCC with the highest recMII.
For each SCC order the nodes according to their ASAP/ALAP/HEIGHT etc. */
for (i = 0; i < all_sccs->num_sccs; i++)
{
@@ -2740,7 +2740,7 @@ ps_has_conflicts (partial_schedule_ptr ps, int from, int to)
return true;
/* Update the DFA state and return with failure if the DFA found
- recource conflicts. */
+ resource conflicts. */
if (state_transition (curr_state, insn) >= 0)
return true;
diff --git a/gcc/omega.c b/gcc/omega.c
index 93b5515108e..8f0470f6dfd 100644
--- a/gcc/omega.c
+++ b/gcc/omega.c
@@ -5,7 +5,8 @@
This code has no license restrictions, and is considered public
domain.
- Changes copyright (C) 2005, 2006, 2007 Free Software Foundation, Inc.
+ Changes copyright (C) 2005, 2006, 2007, 2008 Free Software Foundation,
+ Inc.
Contributed by Sebastian Pop <sebastian.pop@inria.fr>
This file is part of GCC.
@@ -2433,7 +2434,7 @@ smooth_weird_equations (omega_pb pb)
if (dump_file && (dump_flags & TDF_DETAILS))
{
fprintf (dump_file,
- "Smoothing wierd equations; adding:\n");
+ "Smoothing weird equations; adding:\n");
omega_print_geq (dump_file, pb, &pb->geqs[e3]);
fprintf (dump_file, "\nto:\n");
omega_print_problem (dump_file, pb);
diff --git a/gcc/omp-builtins.def b/gcc/omp-builtins.def
index cc450f6d4d6..5fd4f9aea75 100644
--- a/gcc/omp-builtins.def
+++ b/gcc/omp-builtins.def
@@ -1,6 +1,6 @@
/* This file contains the definitions and documentation for the
OpenMP builtins used in the GNU compiler.
- Copyright (C) 2005, 2007 Free Software Foundation, Inc.
+ Copyright (C) 2005, 2007, 2008 Free Software Foundation, Inc.
This file is part of GCC.
@@ -35,6 +35,8 @@ DEF_GOMP_BUILTIN (BUILT_IN_GOMP_ATOMIC_END, "GOMP_atomic_end",
BT_FN_VOID, ATTR_NOTHROW_LIST)
DEF_GOMP_BUILTIN (BUILT_IN_GOMP_BARRIER, "GOMP_barrier",
BT_FN_VOID, ATTR_NOTHROW_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_TASKWAIT, "GOMP_taskwait",
+ BT_FN_VOID, ATTR_NOTHROW_LIST)
DEF_GOMP_BUILTIN (BUILT_IN_GOMP_CRITICAL_START, "GOMP_critical_start",
BT_FN_VOID, ATTR_NOTHROW_LIST)
DEF_GOMP_BUILTIN (BUILT_IN_GOMP_CRITICAL_END, "GOMP_critical_end",
@@ -100,6 +102,58 @@ DEF_GOMP_BUILTIN (BUILT_IN_GOMP_LOOP_ORDERED_GUIDED_NEXT,
DEF_GOMP_BUILTIN (BUILT_IN_GOMP_LOOP_ORDERED_RUNTIME_NEXT,
"GOMP_loop_ordered_runtime_next",
BT_FN_BOOL_LONGPTR_LONGPTR, ATTR_NOTHROW_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_LOOP_ULL_STATIC_START,
+ "GOMP_loop_ull_static_start",
+ BT_FN_BOOL_BOOL_ULL_ULL_ULL_ULL_ULLPTR_ULLPTR,
+ ATTR_NOTHROW_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_LOOP_ULL_DYNAMIC_START,
+ "GOMP_loop_ull_dynamic_start",
+ BT_FN_BOOL_BOOL_ULL_ULL_ULL_ULL_ULLPTR_ULLPTR,
+ ATTR_NOTHROW_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_LOOP_ULL_GUIDED_START,
+ "GOMP_loop_ull_guided_start",
+ BT_FN_BOOL_BOOL_ULL_ULL_ULL_ULL_ULLPTR_ULLPTR,
+ ATTR_NOTHROW_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_LOOP_ULL_RUNTIME_START,
+ "GOMP_loop_ull_runtime_start",
+ BT_FN_BOOL_BOOL_ULL_ULL_ULL_ULLPTR_ULLPTR,
+ ATTR_NOTHROW_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_LOOP_ULL_ORDERED_STATIC_START,
+ "GOMP_loop_ull_ordered_static_start",
+ BT_FN_BOOL_BOOL_ULL_ULL_ULL_ULL_ULLPTR_ULLPTR,
+ ATTR_NOTHROW_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_LOOP_ULL_ORDERED_DYNAMIC_START,
+ "GOMP_loop_ull_ordered_dynamic_start",
+ BT_FN_BOOL_BOOL_ULL_ULL_ULL_ULL_ULLPTR_ULLPTR,
+ ATTR_NOTHROW_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_LOOP_ULL_ORDERED_GUIDED_START,
+ "GOMP_loop_ull_ordered_guided_start",
+ BT_FN_BOOL_BOOL_ULL_ULL_ULL_ULL_ULLPTR_ULLPTR,
+ ATTR_NOTHROW_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_LOOP_ULL_ORDERED_RUNTIME_START,
+ "GOMP_loop_ull_ordered_runtime_start",
+ BT_FN_BOOL_BOOL_ULL_ULL_ULL_ULLPTR_ULLPTR,
+ ATTR_NOTHROW_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT, "GOMP_loop_ull_static_next",
+ BT_FN_BOOL_ULONGLONGPTR_ULONGLONGPTR, ATTR_NOTHROW_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_LOOP_ULL_DYNAMIC_NEXT, "GOMP_loop_ull_dynamic_next",
+ BT_FN_BOOL_ULONGLONGPTR_ULONGLONGPTR, ATTR_NOTHROW_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_LOOP_ULL_GUIDED_NEXT, "GOMP_loop_ull_guided_next",
+ BT_FN_BOOL_ULONGLONGPTR_ULONGLONGPTR, ATTR_NOTHROW_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_LOOP_ULL_RUNTIME_NEXT, "GOMP_loop_ull_runtime_next",
+ BT_FN_BOOL_ULONGLONGPTR_ULONGLONGPTR, ATTR_NOTHROW_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_LOOP_ULL_ORDERED_STATIC_NEXT,
+ "GOMP_loop_ull_ordered_static_next",
+ BT_FN_BOOL_ULONGLONGPTR_ULONGLONGPTR, ATTR_NOTHROW_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_LOOP_ULL_ORDERED_DYNAMIC_NEXT,
+ "GOMP_loop_ull_ordered_dynamic_next",
+ BT_FN_BOOL_ULONGLONGPTR_ULONGLONGPTR, ATTR_NOTHROW_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_LOOP_ULL_ORDERED_GUIDED_NEXT,
+ "GOMP_loop_ull_ordered_guided_next",
+ BT_FN_BOOL_ULONGLONGPTR_ULONGLONGPTR, ATTR_NOTHROW_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_LOOP_ULL_ORDERED_RUNTIME_NEXT,
+ "GOMP_loop_ull_ordered_runtime_next",
+ BT_FN_BOOL_ULONGLONGPTR_ULONGLONGPTR, ATTR_NOTHROW_LIST)
/* NOTE: Do not change the order of BUILT_IN_GOMP_PARALLEL_LOOP_*_START.
They are used in index arithmetic with enum omp_clause_schedule_kind
in omp-low.c. */
@@ -131,6 +185,9 @@ DEF_GOMP_BUILTIN (BUILT_IN_GOMP_PARALLEL_START, "GOMP_parallel_start",
BT_FN_VOID_OMPFN_PTR_UINT, ATTR_NOTHROW_LIST)
DEF_GOMP_BUILTIN (BUILT_IN_GOMP_PARALLEL_END, "GOMP_parallel_end",
BT_FN_VOID, ATTR_NOTHROW_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_TASK, "GOMP_task",
+ BT_FN_VOID_OMPFN_PTR_OMPCPYFN_LONG_LONG_BOOL_UINT,
+ ATTR_NOTHROW_LIST)
DEF_GOMP_BUILTIN (BUILT_IN_GOMP_SECTIONS_START, "GOMP_sections_start",
BT_FN_UINT_UINT, ATTR_NOTHROW_LIST)
DEF_GOMP_BUILTIN (BUILT_IN_GOMP_SECTIONS_NEXT, "GOMP_sections_next",
diff --git a/gcc/omp-low.c b/gcc/omp-low.c
index 1f3063ba53e..e9223b2afb2 100644
--- a/gcc/omp-low.c
+++ b/gcc/omp-low.c
@@ -77,6 +77,14 @@ typedef struct omp_context
tree sender_decl;
tree receiver_decl;
+ /* These are used just by task contexts, if task firstprivate fn is
+ needed. srecord_type is used to communicate from the thread
+ that encountered the task construct to task firstprivate fn,
+ record_type is allocated by GOMP_task, initialized by task firstprivate
+ fn and passed to the task body fn. */
+ splay_tree sfield_map;
+ tree srecord_type;
+
/* A chain of variables to add to the top-level block surrounding the
construct. In the case of a parallel, this is in the child function. */
tree block_vars;
@@ -95,21 +103,30 @@ typedef struct omp_context
} omp_context;
+struct omp_for_data_loop
+{
+ tree v, n1, n2, step;
+ enum tree_code cond_code;
+};
+
/* A structure describing the main elements of a parallel loop. */
struct omp_for_data
{
- tree v, n1, n2, step, chunk_size, for_stmt;
- enum tree_code cond_code;
- tree pre;
+ struct omp_for_data_loop loop;
+ tree chunk_size, for_stmt;
+ tree pre, iter_type;
+ int collapse;
bool have_nowait, have_ordered;
enum omp_clause_schedule_kind sched_kind;
+ struct omp_for_data_loop *loops;
};
static splay_tree all_contexts;
-static int parallel_nesting_level;
+static int taskreg_nesting_level;
struct omp_region *root_omp_region;
+static bitmap task_shared_vars;
static void scan_omp (tree *, omp_context *);
static void lower_omp (tree *, omp_context *);
@@ -137,6 +154,25 @@ is_parallel_ctx (omp_context *ctx)
}
+/* Return true if CTX is for an omp task. */
+
+static inline bool
+is_task_ctx (omp_context *ctx)
+{
+ return TREE_CODE (ctx->stmt) == OMP_TASK;
+}
+
+
+/* Return true if CTX is for an omp parallel or omp task. */
+
+static inline bool
+is_taskreg_ctx (omp_context *ctx)
+{
+ return TREE_CODE (ctx->stmt) == OMP_PARALLEL
+ || TREE_CODE (ctx->stmt) == OMP_TASK;
+}
+
+
/* Return true if REGION is a combined parallel+workshare region. */
static inline bool
@@ -150,65 +186,28 @@ is_combined_parallel (struct omp_region *region)
them into *FD. */
static void
-extract_omp_for_data (tree for_stmt, struct omp_for_data *fd)
+extract_omp_for_data (tree for_stmt, struct omp_for_data *fd,
+ struct omp_for_data_loop *loops)
{
- tree t, var;
+ tree t, var, *collapse_iter, *collapse_count;
+ tree count = NULL_TREE, iter_type = long_integer_type_node;
+ struct omp_for_data_loop *loop;
+ int i;
+ struct omp_for_data_loop dummy_loop;
fd->for_stmt = for_stmt;
fd->pre = NULL;
-
- t = OMP_FOR_INIT (for_stmt);
- gcc_assert (TREE_CODE (t) == GIMPLE_MODIFY_STMT);
- fd->v = GIMPLE_STMT_OPERAND (t, 0);
- gcc_assert (SSA_VAR_P (fd->v));
- gcc_assert (TREE_CODE (TREE_TYPE (fd->v)) == INTEGER_TYPE);
- var = TREE_CODE (fd->v) == SSA_NAME ? SSA_NAME_VAR (fd->v) : fd->v;
- fd->n1 = GIMPLE_STMT_OPERAND (t, 1);
-
- t = OMP_FOR_COND (for_stmt);
- fd->cond_code = TREE_CODE (t);
- gcc_assert (TREE_OPERAND (t, 0) == var);
- fd->n2 = TREE_OPERAND (t, 1);
- switch (fd->cond_code)
- {
- case LT_EXPR:
- case GT_EXPR:
- break;
- case LE_EXPR:
- fd->n2 = fold_build2 (PLUS_EXPR, TREE_TYPE (fd->n2), fd->n2,
- build_int_cst (TREE_TYPE (fd->n2), 1));
- fd->cond_code = LT_EXPR;
- break;
- case GE_EXPR:
- fd->n2 = fold_build2 (MINUS_EXPR, TREE_TYPE (fd->n2), fd->n2,
- build_int_cst (TREE_TYPE (fd->n2), 1));
- fd->cond_code = GT_EXPR;
- break;
- default:
- gcc_unreachable ();
- }
-
- t = OMP_FOR_INCR (fd->for_stmt);
- gcc_assert (TREE_CODE (t) == GIMPLE_MODIFY_STMT);
- gcc_assert (GIMPLE_STMT_OPERAND (t, 0) == var);
- t = GIMPLE_STMT_OPERAND (t, 1);
- gcc_assert (TREE_OPERAND (t, 0) == var);
- switch (TREE_CODE (t))
- {
- case PLUS_EXPR:
- fd->step = TREE_OPERAND (t, 1);
- break;
- case MINUS_EXPR:
- fd->step = TREE_OPERAND (t, 1);
- fd->step = fold_build1 (NEGATE_EXPR, TREE_TYPE (fd->step), fd->step);
- break;
- default:
- gcc_unreachable ();
- }
+ fd->collapse = TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt));
+ if (fd->collapse > 1)
+ fd->loops = loops;
+ else
+ fd->loops = &fd->loop;
fd->have_nowait = fd->have_ordered = false;
fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
fd->chunk_size = NULL_TREE;
+ collapse_iter = NULL;
+ collapse_count = NULL;
for (t = OMP_FOR_CLAUSES (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t))
switch (OMP_CLAUSE_CODE (t))
@@ -223,20 +222,223 @@ extract_omp_for_data (tree for_stmt, struct omp_for_data *fd)
fd->sched_kind = OMP_CLAUSE_SCHEDULE_KIND (t);
fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t);
break;
+ case OMP_CLAUSE_COLLAPSE:
+ if (fd->collapse > 1)
+ {
+ collapse_iter = &OMP_CLAUSE_COLLAPSE_ITERVAR (t);
+ collapse_count = &OMP_CLAUSE_COLLAPSE_COUNT (t);
+ }
default:
break;
}
+ /* FIXME: for now map schedule(auto) to schedule(static).
+ There should be analysis to determine whether all iterations
+ are approximately the same amount of work (then schedule(static)
+ is best) or if it varries (then schedule(dynamic,N) is better). */
+ if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_AUTO)
+ {
+ fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
+ gcc_assert (fd->chunk_size == NULL);
+ }
+ gcc_assert (fd->collapse == 1 || collapse_iter != NULL);
if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
gcc_assert (fd->chunk_size == NULL);
else if (fd->chunk_size == NULL)
{
/* We only need to compute a default chunk size for ordered
static loops and dynamic loops. */
- if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC || fd->have_ordered)
+ if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
+ || fd->have_ordered
+ || fd->collapse > 1)
fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
? integer_zero_node : integer_one_node;
}
+
+ for (i = 0; i < fd->collapse; i++)
+ {
+ if (fd->collapse == 1)
+ loop = &fd->loop;
+ else if (loops != NULL)
+ loop = loops + i;
+ else
+ loop = &dummy_loop;
+
+ t = TREE_VEC_ELT (OMP_FOR_INIT (for_stmt), i);
+ gcc_assert (TREE_CODE (t) == GIMPLE_MODIFY_STMT);
+ loop->v = GIMPLE_STMT_OPERAND (t, 0);
+ gcc_assert (SSA_VAR_P (loop->v));
+ gcc_assert (TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
+ || TREE_CODE (TREE_TYPE (loop->v)) == POINTER_TYPE);
+ var = TREE_CODE (loop->v) == SSA_NAME ? SSA_NAME_VAR (loop->v) : loop->v;
+ loop->n1 = GIMPLE_STMT_OPERAND (t, 1);
+
+ t = TREE_VEC_ELT (OMP_FOR_COND (for_stmt), i);
+ loop->cond_code = TREE_CODE (t);
+ gcc_assert (TREE_OPERAND (t, 0) == var);
+ loop->n2 = TREE_OPERAND (t, 1);
+ switch (loop->cond_code)
+ {
+ case LT_EXPR:
+ case GT_EXPR:
+ break;
+ case LE_EXPR:
+ if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
+ loop->n2 = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (loop->n2),
+ loop->n2, size_one_node);
+ else
+ loop->n2 = fold_build2 (PLUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
+ build_int_cst (TREE_TYPE (loop->n2), 1));
+ loop->cond_code = LT_EXPR;
+ break;
+ case GE_EXPR:
+ if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
+ loop->n2 = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (loop->n2),
+ loop->n2, size_int (-1));
+ else
+ loop->n2 = fold_build2 (MINUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
+ build_int_cst (TREE_TYPE (loop->n2), 1));
+ loop->cond_code = GT_EXPR;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+
+ t = TREE_VEC_ELT (OMP_FOR_INCR (for_stmt), i);
+ gcc_assert (TREE_CODE (t) == GIMPLE_MODIFY_STMT);
+ gcc_assert (GIMPLE_STMT_OPERAND (t, 0) == var);
+ t = GIMPLE_STMT_OPERAND (t, 1);
+ gcc_assert (TREE_OPERAND (t, 0) == var);
+ switch (TREE_CODE (t))
+ {
+ case PLUS_EXPR:
+ case POINTER_PLUS_EXPR:
+ loop->step = TREE_OPERAND (t, 1);
+ break;
+ case MINUS_EXPR:
+ loop->step = TREE_OPERAND (t, 1);
+ loop->step = fold_build1 (NEGATE_EXPR, TREE_TYPE (loop->step),
+ loop->step);
+ break;
+ default:
+ gcc_unreachable ();
+ }
+
+ if (iter_type != long_long_unsigned_type_node)
+ {
+ if (POINTER_TYPE_P (TREE_TYPE (loop->v)))
+ iter_type = long_long_unsigned_type_node;
+ else if (TYPE_UNSIGNED (TREE_TYPE (loop->v))
+ && TYPE_PRECISION (TREE_TYPE (loop->v))
+ >= TYPE_PRECISION (iter_type))
+ {
+ tree n;
+
+ if (loop->cond_code == LT_EXPR)
+ n = fold_build2 (PLUS_EXPR, TREE_TYPE (loop->v),
+ loop->n2, loop->step);
+ else
+ n = loop->n1;
+ if (TREE_CODE (n) != INTEGER_CST
+ || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type), n))
+ iter_type = long_long_unsigned_type_node;
+ }
+ else if (TYPE_PRECISION (TREE_TYPE (loop->v))
+ > TYPE_PRECISION (iter_type))
+ {
+ tree n1, n2;
+
+ if (loop->cond_code == LT_EXPR)
+ {
+ n1 = loop->n1;
+ n2 = fold_build2 (PLUS_EXPR, TREE_TYPE (loop->v),
+ loop->n2, loop->step);
+ }
+ else
+ {
+ n1 = fold_build2 (MINUS_EXPR, TREE_TYPE (loop->v),
+ loop->n2, loop->step);
+ n2 = loop->n1;
+ }
+ if (TREE_CODE (n1) != INTEGER_CST
+ || TREE_CODE (n2) != INTEGER_CST
+ || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type), n1)
+ || !tree_int_cst_lt (n2, TYPE_MAX_VALUE (iter_type)))
+ iter_type = long_long_unsigned_type_node;
+ }
+ }
+
+ if (collapse_count && *collapse_count == NULL)
+ {
+ if ((i == 0 || count != NULL_TREE)
+ && TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
+ && TREE_CONSTANT (loop->n1)
+ && TREE_CONSTANT (loop->n2)
+ && TREE_CODE (loop->step) == INTEGER_CST)
+ {
+ tree itype = TREE_TYPE (loop->v);
+
+ if (POINTER_TYPE_P (itype))
+ itype
+ = lang_hooks.types.type_for_size (TYPE_PRECISION (itype), 0);
+ t = build_int_cst (itype, (loop->cond_code == LT_EXPR ? -1 : 1));
+ t = fold_build2 (PLUS_EXPR, itype,
+ fold_convert (itype, loop->step), t);
+ t = fold_build2 (PLUS_EXPR, itype, t,
+ fold_convert (itype, loop->n2));
+ t = fold_build2 (MINUS_EXPR, itype, t,
+ fold_convert (itype, loop->n1));
+ if (TYPE_UNSIGNED (itype) && loop->cond_code == GT_EXPR)
+ t = fold_build2 (TRUNC_DIV_EXPR, itype,
+ fold_build1 (NEGATE_EXPR, itype, t),
+ fold_build1 (NEGATE_EXPR, itype,
+ fold_convert (itype,
+ loop->step)));
+ else
+ t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
+ fold_convert (itype, loop->step));
+ t = fold_convert (long_long_unsigned_type_node, t);
+ if (count != NULL_TREE)
+ count = fold_build2 (MULT_EXPR, long_long_unsigned_type_node,
+ count, t);
+ else
+ count = t;
+ if (TREE_CODE (count) != INTEGER_CST)
+ count = NULL_TREE;
+ }
+ else
+ count = NULL_TREE;
+ }
+ }
+
+ if (count)
+ {
+ if (!tree_int_cst_lt (count, TYPE_MAX_VALUE (long_integer_type_node)))
+ iter_type = long_long_unsigned_type_node;
+ else
+ iter_type = long_integer_type_node;
+ }
+ else if (collapse_iter && *collapse_iter != NULL)
+ iter_type = TREE_TYPE (*collapse_iter);
+ fd->iter_type = iter_type;
+ if (collapse_iter && *collapse_iter == NULL)
+ *collapse_iter = create_tmp_var (iter_type, ".iter");
+ if (collapse_count && *collapse_count == NULL)
+ {
+ if (count)
+ *collapse_count = fold_convert (iter_type, count);
+ else
+ *collapse_count = create_tmp_var (iter_type, ".count");
+ }
+
+ if (fd->collapse > 1)
+ {
+ fd->loop.v = *collapse_iter;
+ fd->loop.n1 = build_int_cst (TREE_TYPE (fd->loop.v), 0);
+ fd->loop.n2 = *collapse_count;
+ fd->loop.step = build_int_cst (TREE_TYPE (fd->loop.v), 1);
+ fd->loop.cond_code = LT_EXPR;
+ }
}
@@ -296,16 +498,21 @@ workshare_safe_to_combine_p (basic_block par_entry_bb, basic_block ws_entry_bb)
gcc_assert (TREE_CODE (ws_stmt) == OMP_FOR);
- extract_omp_for_data (ws_stmt, &fd);
+ extract_omp_for_data (ws_stmt, &fd, NULL);
+
+ if (fd.collapse > 1 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
+ return false;
+ if (fd.iter_type != long_integer_type_node)
+ return false;
/* FIXME. We give up too easily here. If any of these arguments
are not constants, they will likely involve variables that have
been mapped into fields of .omp_data_s for sharing with the child
function. With appropriate data flow, it would be possible to
see through this. */
- if (!is_gimple_min_invariant (fd.n1)
- || !is_gimple_min_invariant (fd.n2)
- || !is_gimple_min_invariant (fd.step)
+ if (!is_gimple_min_invariant (fd.loop.n1)
+ || !is_gimple_min_invariant (fd.loop.n2)
+ || !is_gimple_min_invariant (fd.loop.step)
|| (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size)))
return false;
@@ -327,7 +534,7 @@ get_ws_args_for (tree ws_stmt)
struct omp_for_data fd;
tree ws_args;
- extract_omp_for_data (ws_stmt, &fd);
+ extract_omp_for_data (ws_stmt, &fd, NULL);
ws_args = NULL_TREE;
if (fd.chunk_size)
@@ -336,13 +543,13 @@ get_ws_args_for (tree ws_stmt)
ws_args = tree_cons (NULL, t, ws_args);
}
- t = fold_convert (long_integer_type_node, fd.step);
+ t = fold_convert (long_integer_type_node, fd.loop.step);
ws_args = tree_cons (NULL, t, ws_args);
- t = fold_convert (long_integer_type_node, fd.n2);
+ t = fold_convert (long_integer_type_node, fd.loop.n2);
ws_args = tree_cons (NULL, t, ws_args);
- t = fold_convert (long_integer_type_node, fd.n1);
+ t = fold_convert (long_integer_type_node, fd.loop.n1);
ws_args = tree_cons (NULL, t, ws_args);
return ws_args;
@@ -472,6 +679,16 @@ lookup_field (tree var, omp_context *ctx)
}
static inline tree
+lookup_sfield (tree var, omp_context *ctx)
+{
+ splay_tree_node n;
+ n = splay_tree_lookup (ctx->sfield_map
+ ? ctx->sfield_map : ctx->field_map,
+ (splay_tree_key) var);
+ return (tree) n->value;
+}
+
+static inline tree
maybe_lookup_field (tree var, omp_context *ctx)
{
splay_tree_node n;
@@ -483,7 +700,7 @@ maybe_lookup_field (tree var, omp_context *ctx)
the parallel context if DECL is to be shared. */
static bool
-use_pointer_for_field (const_tree decl, omp_context *shared_ctx)
+use_pointer_for_field (tree decl, omp_context *shared_ctx)
{
if (AGGREGATE_TYPE_P (TREE_TYPE (decl)))
return true;
@@ -524,11 +741,11 @@ use_pointer_for_field (const_tree decl, omp_context *shared_ctx)
if (maybe_lookup_decl (decl, up))
break;
- if (up && is_parallel_ctx (up))
+ if (up && is_taskreg_ctx (up))
{
tree c;
- for (c = OMP_PARALLEL_CLAUSES (up->stmt);
+ for (c = OMP_TASKREG_CLAUSES (up->stmt);
c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
&& OMP_CLAUSE_DECL (c) == decl)
@@ -538,6 +755,26 @@ use_pointer_for_field (const_tree decl, omp_context *shared_ctx)
return true;
}
}
+
+ /* For tasks avoid using copy-in/out, unless they are readonly
+ (in which case just copy-in is used). As tasks can be
+ deferred or executed in different thread, when GOMP_task
+ returns, the task hasn't necessarily terminated. */
+ if (!TREE_READONLY (decl) && is_task_ctx (shared_ctx))
+ {
+ tree outer = maybe_lookup_decl_in_outer_ctx (decl, shared_ctx);
+ if (is_gimple_reg (outer))
+ {
+ /* Taking address of OUTER in lower_send_shared_vars
+ might need regimplification of everything that uses the
+ variable. */
+ if (!task_shared_vars)
+ task_shared_vars = BITMAP_ALLOC (NULL);
+ bitmap_set_bit (task_shared_vars, DECL_UID (outer));
+ TREE_ADDRESSABLE (outer) = 1;
+ }
+ return true;
+ }
}
return false;
@@ -622,7 +859,7 @@ build_outer_var_ref (tree var, omp_context *ctx)
x = build_outer_var_ref (x, ctx);
x = build_fold_indirect_ref (x);
}
- else if (is_parallel_ctx (ctx))
+ else if (is_taskreg_ctx (ctx))
{
bool by_ref = use_pointer_for_field (var, NULL);
x = build_receiver_ref (var, by_ref, ctx);
@@ -647,7 +884,7 @@ build_outer_var_ref (tree var, omp_context *ctx)
static tree
build_sender_ref (tree var, omp_context *ctx)
{
- tree field = lookup_field (var, ctx);
+ tree field = lookup_sfield (var, ctx);
return build3 (COMPONENT_REF, TREE_TYPE (field),
ctx->sender_decl, field, NULL);
}
@@ -655,15 +892,20 @@ build_sender_ref (tree var, omp_context *ctx)
/* Add a new field for VAR inside the structure CTX->SENDER_DECL. */
static void
-install_var_field (tree var, bool by_ref, omp_context *ctx)
+install_var_field (tree var, bool by_ref, int mask, omp_context *ctx)
{
- tree field, type;
+ tree field, type, sfield = NULL_TREE;
- gcc_assert (!splay_tree_lookup (ctx->field_map, (splay_tree_key) var));
+ gcc_assert ((mask & 1) == 0
+ || !splay_tree_lookup (ctx->field_map, (splay_tree_key) var));
+ gcc_assert ((mask & 2) == 0 || !ctx->sfield_map
+ || !splay_tree_lookup (ctx->sfield_map, (splay_tree_key) var));
type = TREE_TYPE (var);
if (by_ref)
type = build_pointer_type (type);
+ else if ((mask & 3) == 1 && is_reference (var))
+ type = TREE_TYPE (type);
field = build_decl (FIELD_DECL, DECL_NAME (var), type);
@@ -671,11 +913,57 @@ install_var_field (tree var, bool by_ref, omp_context *ctx)
side effect of making dwarf2out ignore this member, so for helpful
debugging we clear it later in delete_omp_context. */
DECL_ABSTRACT_ORIGIN (field) = var;
+ if (type == TREE_TYPE (var))
+ {
+ DECL_ALIGN (field) = DECL_ALIGN (var);
+ DECL_USER_ALIGN (field) = DECL_USER_ALIGN (var);
+ TREE_THIS_VOLATILE (field) = TREE_THIS_VOLATILE (var);
+ }
+ else
+ DECL_ALIGN (field) = TYPE_ALIGN (type);
- insert_field_into_struct (ctx->record_type, field);
+ if ((mask & 3) == 3)
+ {
+ insert_field_into_struct (ctx->record_type, field);
+ if (ctx->srecord_type)
+ {
+ sfield = build_decl (FIELD_DECL, DECL_NAME (var), type);
+ DECL_ABSTRACT_ORIGIN (sfield) = var;
+ DECL_ALIGN (sfield) = DECL_ALIGN (field);
+ DECL_USER_ALIGN (sfield) = DECL_USER_ALIGN (field);
+ TREE_THIS_VOLATILE (sfield) = TREE_THIS_VOLATILE (field);
+ insert_field_into_struct (ctx->srecord_type, sfield);
+ }
+ }
+ else
+ {
+ if (ctx->srecord_type == NULL_TREE)
+ {
+ tree t;
+
+ ctx->srecord_type = lang_hooks.types.make_type (RECORD_TYPE);
+ ctx->sfield_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
+ for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
+ {
+ sfield = build_decl (FIELD_DECL, DECL_NAME (t), TREE_TYPE (t));
+ DECL_ABSTRACT_ORIGIN (sfield) = DECL_ABSTRACT_ORIGIN (t);
+ insert_field_into_struct (ctx->srecord_type, sfield);
+ splay_tree_insert (ctx->sfield_map,
+ (splay_tree_key) DECL_ABSTRACT_ORIGIN (t),
+ (splay_tree_value) sfield);
+ }
+ }
+ sfield = field;
+ insert_field_into_struct ((mask & 1) ? ctx->record_type
+ : ctx->srecord_type, field);
+ }
- splay_tree_insert (ctx->field_map, (splay_tree_key) var,
- (splay_tree_value) field);
+ if (mask & 1)
+ splay_tree_insert (ctx->field_map, (splay_tree_key) var,
+ (splay_tree_value) field);
+ if ((mask & 2) && ctx->sfield_map)
+ splay_tree_insert (ctx->sfield_map, (splay_tree_key) var,
+ (splay_tree_value) sfield);
}
static tree
@@ -740,7 +1028,7 @@ omp_copy_decl (tree var, copy_body_data *cb)
return new_var;
}
- while (!is_parallel_ctx (ctx))
+ while (!is_taskreg_ctx (ctx))
{
ctx = ctx->outer;
if (ctx == NULL)
@@ -912,6 +1200,8 @@ delete_omp_context (splay_tree_value value)
if (ctx->field_map)
splay_tree_delete (ctx->field_map);
+ if (ctx->sfield_map)
+ splay_tree_delete (ctx->sfield_map);
/* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
it produces corrupt debug information. */
@@ -921,6 +1211,12 @@ delete_omp_context (splay_tree_value value)
for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
DECL_ABSTRACT_ORIGIN (t) = NULL;
}
+ if (ctx->srecord_type)
+ {
+ tree t;
+ for (t = TYPE_FIELDS (ctx->srecord_type); t ; t = TREE_CHAIN (t))
+ DECL_ABSTRACT_ORIGIN (t) = NULL;
+ }
XDELETE (ctx);
}
@@ -955,6 +1251,9 @@ fixup_child_record_type (omp_context *ctx)
DECL_CONTEXT (new_f) = type;
TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb);
TREE_CHAIN (new_f) = new_fields;
+ walk_tree (&DECL_SIZE (new_f), copy_body_r, &ctx->cb, NULL);
+ walk_tree (&DECL_SIZE_UNIT (new_f), copy_body_r, &ctx->cb, NULL);
+ walk_tree (&DECL_FIELD_OFFSET (new_f), copy_body_r, &ctx->cb, NULL);
new_fields = new_f;
/* Arrange to be able to look up the receiver field
@@ -986,26 +1285,28 @@ scan_sharing_clauses (tree clauses, omp_context *ctx)
{
case OMP_CLAUSE_PRIVATE:
decl = OMP_CLAUSE_DECL (c);
- if (!is_variable_sized (decl))
+ if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
+ goto do_private;
+ else if (!is_variable_sized (decl))
install_var_local (decl, ctx);
break;
case OMP_CLAUSE_SHARED:
- gcc_assert (is_parallel_ctx (ctx));
+ gcc_assert (is_taskreg_ctx (ctx));
decl = OMP_CLAUSE_DECL (c);
gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl))
|| !is_variable_sized (decl));
- by_ref = use_pointer_for_field (decl, ctx);
/* Global variables don't need to be copied,
the receiver side will use them directly. */
if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
break;
+ by_ref = use_pointer_for_field (decl, ctx);
if (! TREE_READONLY (decl)
|| TREE_ADDRESSABLE (decl)
|| by_ref
|| is_reference (decl))
{
- install_var_field (decl, by_ref, ctx);
+ install_var_field (decl, by_ref, 3, ctx);
install_var_local (decl, ctx);
break;
}
@@ -1025,13 +1326,26 @@ scan_sharing_clauses (tree clauses, omp_context *ctx)
decl = OMP_CLAUSE_DECL (c);
do_private:
if (is_variable_sized (decl))
- break;
- else if (is_parallel_ctx (ctx)
- && ! is_global_var (maybe_lookup_decl_in_outer_ctx (decl,
- ctx)))
{
+ if (is_task_ctx (ctx))
+ install_var_field (decl, false, 1, ctx);
+ break;
+ }
+ else if (is_taskreg_ctx (ctx))
+ {
+ bool global
+ = is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx));
by_ref = use_pointer_for_field (decl, NULL);
- install_var_field (decl, by_ref, ctx);
+
+ if (is_task_ctx (ctx)
+ && (global || by_ref || is_reference (decl)))
+ {
+ install_var_field (decl, false, 1, ctx);
+ if (!global)
+ install_var_field (decl, by_ref, 2, ctx);
+ }
+ else if (!global)
+ install_var_field (decl, by_ref, 3, ctx);
}
install_var_local (decl, ctx);
break;
@@ -1044,7 +1358,7 @@ scan_sharing_clauses (tree clauses, omp_context *ctx)
case OMP_CLAUSE_COPYIN:
decl = OMP_CLAUSE_DECL (c);
by_ref = use_pointer_for_field (decl, NULL);
- install_var_field (decl, by_ref, ctx);
+ install_var_field (decl, by_ref, 3, ctx);
break;
case OMP_CLAUSE_DEFAULT:
@@ -1060,6 +1374,8 @@ scan_sharing_clauses (tree clauses, omp_context *ctx)
case OMP_CLAUSE_NOWAIT:
case OMP_CLAUSE_ORDERED:
+ case OMP_CLAUSE_COLLAPSE:
+ case OMP_CLAUSE_UNTIED:
break;
default:
@@ -1074,6 +1390,8 @@ scan_sharing_clauses (tree clauses, omp_context *ctx)
case OMP_CLAUSE_LASTPRIVATE:
/* Let the corresponding firstprivate clause create
the variable. */
+ if (OMP_CLAUSE_LASTPRIVATE_STMT (c))
+ scan_array_reductions = true;
if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
break;
/* FALLTHRU */
@@ -1106,6 +1424,8 @@ scan_sharing_clauses (tree clauses, omp_context *ctx)
case OMP_CLAUSE_SCHEDULE:
case OMP_CLAUSE_NOWAIT:
case OMP_CLAUSE_ORDERED:
+ case OMP_CLAUSE_COLLAPSE:
+ case OMP_CLAUSE_UNTIED:
break;
default:
@@ -1121,6 +1441,9 @@ scan_sharing_clauses (tree clauses, omp_context *ctx)
scan_omp (&OMP_CLAUSE_REDUCTION_INIT (c), ctx);
scan_omp (&OMP_CLAUSE_REDUCTION_MERGE (c), ctx);
}
+ else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
+ && OMP_CLAUSE_LASTPRIVATE_STMT (c))
+ scan_omp (&OMP_CLAUSE_LASTPRIVATE_STMT (c), ctx);
}
/* Create a new name for omp child function. Returns an identifier. */
@@ -1128,15 +1451,17 @@ scan_sharing_clauses (tree clauses, omp_context *ctx)
static GTY(()) unsigned int tmp_ompfn_id_num;
static tree
-create_omp_child_function_name (void)
+create_omp_child_function_name (bool task_copy)
{
tree name = DECL_ASSEMBLER_NAME (current_function_decl);
size_t len = IDENTIFIER_LENGTH (name);
char *tmp_name, *prefix;
+ const char *suffix;
- prefix = alloca (len + sizeof ("_omp_fn"));
+ suffix = task_copy ? "_omp_cpyfn" : "_omp_fn";
+ prefix = alloca (len + strlen (suffix) + 1);
memcpy (prefix, IDENTIFIER_POINTER (name), len);
- strcpy (prefix + len, "_omp_fn");
+ strcpy (prefix + len, suffix);
#ifndef NO_DOT_IN_LABEL
prefix[len] = '.';
#elif !defined NO_DOLLAR_IN_LABEL
@@ -1150,17 +1475,24 @@ create_omp_child_function_name (void)
yet, just the bare decl. */
static void
-create_omp_child_function (omp_context *ctx)
+create_omp_child_function (omp_context *ctx, bool task_copy)
{
tree decl, type, name, t;
- name = create_omp_child_function_name ();
- type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
+ name = create_omp_child_function_name (task_copy);
+ if (task_copy)
+ type = build_function_type_list (void_type_node, ptr_type_node,
+ ptr_type_node, NULL_TREE);
+ else
+ type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
decl = build_decl (FUNCTION_DECL, name, type);
decl = lang_hooks.decls.pushdecl (decl);
- ctx->cb.dst_fn = decl;
+ if (!task_copy)
+ ctx->cb.dst_fn = decl;
+ else
+ OMP_TASK_COPYFN (ctx->stmt) = decl;
TREE_STATIC (decl) = 1;
TREE_USED (decl) = 1;
@@ -1183,7 +1515,19 @@ create_omp_child_function (omp_context *ctx)
DECL_CONTEXT (t) = current_function_decl;
TREE_USED (t) = 1;
DECL_ARGUMENTS (decl) = t;
- ctx->receiver_decl = t;
+ if (!task_copy)
+ ctx->receiver_decl = t;
+ else
+ {
+ t = build_decl (PARM_DECL, get_identifier (".omp_data_o"),
+ ptr_type_node);
+ DECL_ARTIFICIAL (t) = 1;
+ DECL_ARG_TYPE (t) = ptr_type_node;
+ DECL_CONTEXT (t) = current_function_decl;
+ TREE_USED (t) = 1;
+ TREE_CHAIN (t) = DECL_ARGUMENTS (decl);
+ DECL_ARGUMENTS (decl) = t;
+ }
/* Allocate memory for the function structure. The call to
allocate_struct_function clobbers CFUN, so we need to restore
@@ -1214,7 +1558,7 @@ scan_omp_parallel (tree *stmt_p, omp_context *outer_ctx)
}
ctx = new_omp_context (*stmt_p, outer_ctx);
- if (parallel_nesting_level > 1)
+ if (taskreg_nesting_level > 1)
ctx->is_nested = true;
ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
@@ -1222,7 +1566,7 @@ scan_omp_parallel (tree *stmt_p, omp_context *outer_ctx)
name = create_tmp_var_name (".omp_data_s");
name = build_decl (TYPE_DECL, name, ctx->record_type);
TYPE_NAME (ctx->record_type) = name;
- create_omp_child_function (ctx);
+ create_omp_child_function (ctx, false);
OMP_PARALLEL_FN (*stmt_p) = ctx->cb.dst_fn;
scan_sharing_clauses (OMP_PARALLEL_CLAUSES (*stmt_p), ctx);
@@ -1237,6 +1581,84 @@ scan_omp_parallel (tree *stmt_p, omp_context *outer_ctx)
}
}
+/* Scan an OpenMP task directive. */
+
+static void
+scan_omp_task (tree *stmt_p, omp_context *outer_ctx)
+{
+ omp_context *ctx;
+ tree name;
+
+ /* Ignore task directives with empty bodies. */
+ if (optimize > 0
+ && empty_body_p (OMP_TASK_BODY (*stmt_p)))
+ {
+ *stmt_p = build_empty_stmt ();
+ return;
+ }
+
+ ctx = new_omp_context (*stmt_p, outer_ctx);
+ if (taskreg_nesting_level > 1)
+ ctx->is_nested = true;
+ ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
+ ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
+ ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
+ name = create_tmp_var_name (".omp_data_s");
+ name = build_decl (TYPE_DECL, name, ctx->record_type);
+ TYPE_NAME (ctx->record_type) = name;
+ create_omp_child_function (ctx, false);
+ OMP_TASK_FN (*stmt_p) = ctx->cb.dst_fn;
+
+ scan_sharing_clauses (OMP_TASK_CLAUSES (*stmt_p), ctx);
+
+ if (ctx->srecord_type)
+ {
+ name = create_tmp_var_name (".omp_data_a");
+ name = build_decl (TYPE_DECL, name, ctx->srecord_type);
+ TYPE_NAME (ctx->srecord_type) = name;
+ create_omp_child_function (ctx, true);
+ }
+
+ scan_omp (&OMP_TASK_BODY (*stmt_p), ctx);
+
+ if (TYPE_FIELDS (ctx->record_type) == NULL)
+ {
+ ctx->record_type = ctx->receiver_decl = NULL;
+ OMP_TASK_ARG_SIZE (*stmt_p)
+ = build_int_cst (long_integer_type_node, 0);
+ OMP_TASK_ARG_ALIGN (*stmt_p)
+ = build_int_cst (long_integer_type_node, 1);
+ }
+ else
+ {
+ tree *p, vla_fields = NULL_TREE, *q = &vla_fields;
+ /* Move VLA fields to the end. */
+ p = &TYPE_FIELDS (ctx->record_type);
+ while (*p)
+ if (!TYPE_SIZE_UNIT (TREE_TYPE (*p))
+ || ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p))))
+ {
+ *q = *p;
+ *p = TREE_CHAIN (*p);
+ TREE_CHAIN (*q) = NULL_TREE;
+ q = &TREE_CHAIN (*q);
+ }
+ else
+ p = &TREE_CHAIN (*p);
+ *p = vla_fields;
+ layout_type (ctx->record_type);
+ fixup_child_record_type (ctx);
+ if (ctx->srecord_type)
+ layout_type (ctx->srecord_type);
+ OMP_TASK_ARG_SIZE (*stmt_p)
+ = fold_convert (long_integer_type_node,
+ TYPE_SIZE_UNIT (ctx->record_type));
+ OMP_TASK_ARG_ALIGN (*stmt_p)
+ = build_int_cst (long_integer_type_node,
+ TYPE_ALIGN_UNIT (ctx->record_type));
+ }
+}
+
/* Scan an OpenMP loop directive. */
@@ -1245,6 +1667,7 @@ scan_omp_for (tree *stmt_p, omp_context *outer_ctx)
{
omp_context *ctx;
tree stmt;
+ int i;
stmt = *stmt_p;
ctx = new_omp_context (stmt, outer_ctx);
@@ -1252,9 +1675,12 @@ scan_omp_for (tree *stmt_p, omp_context *outer_ctx)
scan_sharing_clauses (OMP_FOR_CLAUSES (stmt), ctx);
scan_omp (&OMP_FOR_PRE_BODY (stmt), ctx);
- scan_omp (&OMP_FOR_INIT (stmt), ctx);
- scan_omp (&OMP_FOR_COND (stmt), ctx);
- scan_omp (&OMP_FOR_INCR (stmt), ctx);
+ for (i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INIT (stmt)); i++)
+ {
+ scan_omp (&TREE_VEC_ELT (OMP_FOR_INIT (stmt), i), ctx);
+ scan_omp (&TREE_VEC_ELT (OMP_FOR_COND (stmt), i), ctx);
+ scan_omp (&TREE_VEC_ELT (OMP_FOR_INCR (stmt), i), ctx);
+ }
scan_omp (&OMP_FOR_BODY (stmt), ctx);
}
@@ -1307,6 +1733,7 @@ check_omp_nesting_restrictions (tree t, omp_context *ctx)
case OMP_FOR:
case OMP_SECTIONS:
case OMP_SINGLE:
+ case CALL_EXPR:
for (; ctx != NULL; ctx = ctx->outer)
switch (TREE_CODE (ctx->stmt))
{
@@ -1315,8 +1742,17 @@ check_omp_nesting_restrictions (tree t, omp_context *ctx)
case OMP_SINGLE:
case OMP_ORDERED:
case OMP_MASTER:
+ case OMP_TASK:
+ if (TREE_CODE (t) == CALL_EXPR)
+ {
+ warning (0, "barrier region may not be closely nested inside "
+ "of work-sharing, critical, ordered, master or "
+ "explicit task region");
+ return;
+ }
warning (0, "work-sharing region may not be closely nested inside "
- "of work-sharing, critical, ordered or master region");
+ "of work-sharing, critical, ordered, master or explicit "
+ "task region");
return;
case OMP_PARALLEL:
return;
@@ -1331,8 +1767,9 @@ check_omp_nesting_restrictions (tree t, omp_context *ctx)
case OMP_FOR:
case OMP_SECTIONS:
case OMP_SINGLE:
+ case OMP_TASK:
warning (0, "master region may not be closely nested inside "
- "of work-sharing region");
+ "of work-sharing or explicit task region");
return;
case OMP_PARALLEL:
return;
@@ -1345,8 +1782,9 @@ check_omp_nesting_restrictions (tree t, omp_context *ctx)
switch (TREE_CODE (ctx->stmt))
{
case OMP_CRITICAL:
+ case OMP_TASK:
warning (0, "ordered region may not be closely nested inside "
- "of critical region");
+ "of critical or explicit task region");
return;
case OMP_FOR:
if (find_omp_clause (OMP_CLAUSES (ctx->stmt),
@@ -1389,16 +1827,32 @@ scan_omp_1 (tree *tp, int *walk_subtrees, void *data)
input_location = EXPR_LOCATION (t);
/* Check the OpenMP nesting restrictions. */
- if (OMP_DIRECTIVE_P (t) && ctx != NULL)
- check_omp_nesting_restrictions (t, ctx);
+ if (ctx != NULL)
+ {
+ if (OMP_DIRECTIVE_P (t))
+ check_omp_nesting_restrictions (t, ctx);
+ else if (TREE_CODE (t) == CALL_EXPR)
+ {
+ tree fndecl = get_callee_fndecl (t);
+ if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
+ && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_GOMP_BARRIER)
+ check_omp_nesting_restrictions (t, ctx);
+ }
+ }
*walk_subtrees = 0;
switch (TREE_CODE (t))
{
case OMP_PARALLEL:
- parallel_nesting_level++;
+ taskreg_nesting_level++;
scan_omp_parallel (tp, ctx);
- parallel_nesting_level--;
+ taskreg_nesting_level--;
+ break;
+
+ case OMP_TASK:
+ taskreg_nesting_level++;
+ scan_omp_task (tp, ctx);
+ taskreg_nesting_level--;
break;
case OMP_FOR:
@@ -1715,16 +2169,18 @@ lower_rec_input_clauses (tree clauses, tree *ilist, tree *dlist,
if (pass == 0)
continue;
- ptr = DECL_VALUE_EXPR (new_var);
- gcc_assert (TREE_CODE (ptr) == INDIRECT_REF);
- ptr = TREE_OPERAND (ptr, 0);
- gcc_assert (DECL_P (ptr));
-
- x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
- x = build_call_expr (built_in_decls[BUILT_IN_ALLOCA], 1, x);
- x = fold_convert (TREE_TYPE (ptr), x);
- x = build_gimple_modify_stmt (ptr, x);
- gimplify_and_add (x, ilist);
+ if (c_kind != OMP_CLAUSE_FIRSTPRIVATE || !is_task_ctx (ctx))
+ {
+ ptr = DECL_VALUE_EXPR (new_var);
+ gcc_assert (TREE_CODE (ptr) == INDIRECT_REF);
+ ptr = TREE_OPERAND (ptr, 0);
+ gcc_assert (DECL_P (ptr));
+ x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
+ x = build_call_expr (built_in_decls[BUILT_IN_ALLOCA], 1, x);
+ x = fold_convert (TREE_TYPE (ptr), x);
+ x = build_gimple_modify_stmt (ptr, x);
+ gimplify_and_add (x, ilist);
+ }
}
else if (is_reference (var))
{
@@ -1740,7 +2196,12 @@ lower_rec_input_clauses (tree clauses, tree *ilist, tree *dlist,
continue;
x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
- if (TREE_CONSTANT (x))
+ if (c_kind == OMP_CLAUSE_FIRSTPRIVATE && is_task_ctx (ctx))
+ {
+ x = build_receiver_ref (var, false, ctx);
+ x = build_fold_addr_expr (x);
+ }
+ else if (TREE_CONSTANT (x))
{
const char *name = NULL;
if (DECL_NAME (var))
@@ -1800,7 +2261,18 @@ lower_rec_input_clauses (tree clauses, tree *ilist, tree *dlist,
/* FALLTHRU */
case OMP_CLAUSE_PRIVATE:
- x = lang_hooks.decls.omp_clause_default_ctor (c, new_var);
+ if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_PRIVATE)
+ x = build_outer_var_ref (var, ctx);
+ else if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
+ {
+ if (is_task_ctx (ctx))
+ x = build_receiver_ref (var, false, ctx);
+ else
+ x = build_outer_var_ref (var, ctx);
+ }
+ else
+ x = NULL;
+ x = lang_hooks.decls.omp_clause_default_ctor (c, new_var, x);
if (x)
gimplify_and_add (x, ilist);
/* FALLTHRU */
@@ -1816,6 +2288,20 @@ lower_rec_input_clauses (tree clauses, tree *ilist, tree *dlist,
break;
case OMP_CLAUSE_FIRSTPRIVATE:
+ if (is_task_ctx (ctx))
+ {
+ if (is_reference (var) || is_variable_sized (var))
+ goto do_dtor;
+ else if (is_global_var (maybe_lookup_decl_in_outer_ctx (var,
+ ctx))
+ || use_pointer_for_field (var, NULL))
+ {
+ x = build_receiver_ref (var, false, ctx);
+ SET_DECL_VALUE_EXPR (new_var, x);
+ DECL_HAS_VALUE_EXPR_P (new_var) = 1;
+ goto do_dtor;
+ }
+ }
x = build_outer_var_ref (var, ctx);
x = lang_hooks.decls.omp_clause_copy_ctor (c, new_var, x);
gimplify_and_add (x, ilist);
@@ -1833,8 +2319,16 @@ lower_rec_input_clauses (tree clauses, tree *ilist, tree *dlist,
case OMP_CLAUSE_REDUCTION:
if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
{
+ tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
+ x = build_outer_var_ref (var, ctx);
+
+ if (is_reference (var))
+ x = build_fold_addr_expr (x);
+ SET_DECL_VALUE_EXPR (placeholder, x);
+ DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
gimplify_and_add (OMP_CLAUSE_REDUCTION_INIT (c), ilist);
OMP_CLAUSE_REDUCTION_INIT (c) = NULL;
+ DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
}
else
{
@@ -1879,9 +2373,10 @@ lower_rec_input_clauses (tree clauses, tree *ilist, tree *dlist,
static void
lower_lastprivate_clauses (tree clauses, tree predicate, tree *stmt_list,
- omp_context *ctx)
+ omp_context *ctx)
{
tree sub_list, x, c;
+ bool par_clauses = false;
/* Early exit if there are no lastprivate clauses. */
clauses = find_omp_clause (clauses, OMP_CLAUSE_LASTPRIVATE);
@@ -1901,25 +2396,47 @@ lower_lastprivate_clauses (tree clauses, tree predicate, tree *stmt_list,
OMP_CLAUSE_LASTPRIVATE);
if (clauses == NULL)
return;
+ par_clauses = true;
}
sub_list = alloc_stmt_list ();
- for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
+ for (c = clauses; c ;)
{
tree var, new_var;
- if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_LASTPRIVATE)
- continue;
+ if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE)
+ {
+ var = OMP_CLAUSE_DECL (c);
+ new_var = lookup_decl (var, ctx);
- var = OMP_CLAUSE_DECL (c);
- new_var = lookup_decl (var, ctx);
+ if (OMP_CLAUSE_LASTPRIVATE_STMT (c))
+ gimplify_and_add (OMP_CLAUSE_LASTPRIVATE_STMT (c), &sub_list);
+ OMP_CLAUSE_LASTPRIVATE_STMT (c) = NULL;
- x = build_outer_var_ref (var, ctx);
- if (is_reference (var))
- new_var = build_fold_indirect_ref (new_var);
- x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var);
- append_to_statement_list (x, &sub_list);
+ x = build_outer_var_ref (var, ctx);
+ if (is_reference (var))
+ new_var = build_fold_indirect_ref (new_var);
+ x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var);
+ append_to_statement_list (x, &sub_list);
+ }
+ c = OMP_CLAUSE_CHAIN (c);
+ if (c == NULL && !par_clauses)
+ {
+ /* If this was a workshare clause, see if it had been combined
+ with its parallel. In that case, continue looking for the
+ clauses also on the parallel statement itself. */
+ if (is_parallel_ctx (ctx))
+ break;
+
+ ctx = ctx->outer;
+ if (ctx == NULL || !is_parallel_ctx (ctx))
+ break;
+
+ c = find_omp_clause (OMP_PARALLEL_CLAUSES (ctx->stmt),
+ OMP_CLAUSE_LASTPRIVATE);
+ par_clauses = true;
+ }
}
if (predicate)
@@ -2071,6 +2588,10 @@ lower_send_clauses (tree clauses, tree *ilist, tree *olist, omp_context *ctx)
switch (OMP_CLAUSE_CODE (c))
{
+ case OMP_CLAUSE_PRIVATE:
+ if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
+ break;
+ continue;
case OMP_CLAUSE_FIRSTPRIVATE:
case OMP_CLAUSE_COPYIN:
case OMP_CLAUSE_LASTPRIVATE:
@@ -2092,6 +2613,7 @@ lower_send_clauses (tree clauses, tree *ilist, tree *olist, omp_context *ctx)
switch (OMP_CLAUSE_CODE (c))
{
+ case OMP_CLAUSE_PRIVATE:
case OMP_CLAUSE_FIRSTPRIVATE:
case OMP_CLAUSE_COPYIN:
do_in = true;
@@ -2105,7 +2627,11 @@ lower_send_clauses (tree clauses, tree *ilist, tree *olist, omp_context *ctx)
do_in = true;
}
else
- do_out = true;
+ {
+ do_out = true;
+ if (lang_hooks.decls.omp_private_outer_ref (val))
+ do_in = true;
+ }
break;
case OMP_CLAUSE_REDUCTION:
@@ -2123,6 +2649,8 @@ lower_send_clauses (tree clauses, tree *ilist, tree *olist, omp_context *ctx)
x = by_ref ? build_fold_addr_expr (var) : var;
x = build_gimple_modify_stmt (ref, x);
gimplify_and_add (x, ilist);
+ if (is_task_ctx (ctx))
+ DECL_ABSTRACT_ORIGIN (TREE_OPERAND (ref, 1)) = NULL;
}
if (do_out)
@@ -2141,12 +2669,13 @@ lower_send_clauses (tree clauses, tree *ilist, tree *olist, omp_context *ctx)
static void
lower_send_shared_vars (tree *ilist, tree *olist, omp_context *ctx)
{
- tree var, ovar, nvar, f, x;
+ tree var, ovar, nvar, f, x, record_type;
if (ctx->record_type == NULL)
return;
- for (f = TYPE_FIELDS (ctx->record_type); f ; f = TREE_CHAIN (f))
+ record_type = ctx->srecord_type ? ctx->srecord_type : ctx->record_type;
+ for (f = TYPE_FIELDS (record_type); f ; f = TREE_CHAIN (f))
{
ovar = DECL_ABSTRACT_ORIGIN (f);
nvar = maybe_lookup_decl (ovar, ctx);
@@ -2171,9 +2700,12 @@ lower_send_shared_vars (tree *ilist, tree *olist, omp_context *ctx)
x = build_gimple_modify_stmt (x, var);
gimplify_and_add (x, ilist);
- x = build_sender_ref (ovar, ctx);
- x = build_gimple_modify_stmt (var, x);
- gimplify_and_add (x, olist);
+ if (!TREE_READONLY (var))
+ {
+ x = build_sender_ref (ovar, ctx);
+ x = build_gimple_modify_stmt (var, x);
+ gimplify_and_add (x, olist);
+ }
}
}
}
@@ -2203,8 +2735,11 @@ expand_parallel_call (struct omp_region *region, basic_block bb,
switch (region->inner->type)
{
case OMP_FOR:
+ gcc_assert (region->inner->sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
start_ix = BUILT_IN_GOMP_PARALLEL_LOOP_STATIC_START
- + region->inner->sched_kind;
+ + (region->inner->sched_kind
+ == OMP_CLAUSE_SCHEDULE_RUNTIME
+ ? 3 : region->inner->sched_kind);
break;
case OMP_SECTIONS:
start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS_START;
@@ -2347,6 +2882,80 @@ expand_parallel_call (struct omp_region *region, basic_block bb,
}
+static void maybe_catch_exception (tree *stmt_p);
+
+
+/* Finalize task copyfn. */
+
+static void
+expand_task_copyfn (tree task_stmt)
+{
+ struct function *child_cfun;
+ tree child_fn, old_fn;
+
+ child_fn = OMP_TASK_COPYFN (task_stmt);
+ child_cfun = DECL_STRUCT_FUNCTION (child_fn);
+
+ /* Inform the callgraph about the new function. */
+ DECL_STRUCT_FUNCTION (child_fn)->curr_properties
+ = cfun->curr_properties;
+
+ old_fn = current_function_decl;
+ push_cfun (child_cfun);
+ current_function_decl = child_fn;
+ gimplify_body (&DECL_SAVED_TREE (child_fn), child_fn, false);
+ maybe_catch_exception (&BIND_EXPR_BODY (DECL_SAVED_TREE (child_fn)));
+ pop_cfun ();
+ current_function_decl = old_fn;
+
+ cgraph_add_new_function (child_fn, false);
+}
+
+/* Build the function call to GOMP_task to actually
+ generate the task operation. BB is the block where to insert the code. */
+
+static void
+expand_task_call (basic_block bb, tree entry_stmt)
+{
+ tree t, t1, t2, t3, flags, cond, c, clauses;
+ block_stmt_iterator si;
+
+ clauses = OMP_TASK_CLAUSES (entry_stmt);
+
+ if (OMP_TASK_COPYFN (entry_stmt))
+ expand_task_copyfn (entry_stmt);
+
+ c = find_omp_clause (clauses, OMP_CLAUSE_IF);
+ if (c)
+ cond = gimple_boolify (OMP_CLAUSE_IF_EXPR (c));
+ else
+ cond = boolean_true_node;
+
+ c = find_omp_clause (clauses, OMP_CLAUSE_UNTIED);
+ flags = build_int_cst (unsigned_type_node, (c ? 1 : 0));
+
+ si = bsi_last (bb);
+ t = OMP_TASK_DATA_ARG (entry_stmt);
+ if (t == NULL)
+ t2 = null_pointer_node;
+ else
+ t2 = build_fold_addr_expr (t);
+ t1 = build_fold_addr_expr (OMP_TASK_FN (entry_stmt));
+ t = OMP_TASK_COPYFN (entry_stmt);
+ if (t == NULL)
+ t3 = null_pointer_node;
+ else
+ t3 = build_fold_addr_expr (t);
+
+ t = build_call_expr (built_in_decls[BUILT_IN_GOMP_TASK], 7, t1, t2, t3,
+ OMP_TASK_ARG_SIZE (entry_stmt),
+ OMP_TASK_ARG_ALIGN (entry_stmt), cond, flags);
+
+ force_gimple_operand_bsi (&si, t, true, NULL_TREE,
+ false, BSI_CONTINUE_LINKING);
+}
+
+
/* If exceptions are enabled, wrap *STMT_P in a MUST_NOT_THROW catch
handler. This prevents programs from violating the structured
block semantics with throws. */
@@ -2460,10 +3069,12 @@ remove_exit_barriers (struct omp_region *region)
calls. These can't be declared as const functions, but
within one parallel body they are constant, so they can be
transformed there into __builtin_omp_get_{thread_num,num_threads} ()
- which are declared const. */
+ which are declared const. Similarly for task body, except
+ that in untied task omp_get_thread_num () can change at any task
+ scheduling point. */
static void
-optimize_omp_library_calls (void)
+optimize_omp_library_calls (tree entry_stmt)
{
basic_block bb;
block_stmt_iterator bsi;
@@ -2471,6 +3082,9 @@ optimize_omp_library_calls (void)
= DECL_ASSEMBLER_NAME (built_in_decls [BUILT_IN_OMP_GET_THREAD_NUM]);
tree num_thr_id
= DECL_ASSEMBLER_NAME (built_in_decls [BUILT_IN_OMP_GET_NUM_THREADS]);
+ bool untied_task = (TREE_CODE (entry_stmt) == OMP_TASK
+ && find_omp_clause (OMP_TASK_CLAUSES (entry_stmt),
+ OMP_CLAUSE_UNTIED) != NULL);
FOR_EACH_BB (bb)
for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi))
@@ -2488,7 +3102,13 @@ optimize_omp_library_calls (void)
tree built_in;
if (DECL_NAME (decl) == thr_num_id)
- built_in = built_in_decls [BUILT_IN_OMP_GET_THREAD_NUM];
+ {
+ /* In #pragma omp task untied omp_get_thread_num () can change
+ during the execution of the task region. */
+ if (untied_task)
+ continue;
+ built_in = built_in_decls [BUILT_IN_OMP_GET_THREAD_NUM];
+ }
else if (DECL_NAME (decl) == num_thr_id)
built_in = built_in_decls [BUILT_IN_OMP_GET_NUM_THREADS];
else
@@ -2511,10 +3131,10 @@ optimize_omp_library_calls (void)
}
}
-/* Expand the OpenMP parallel directive starting at REGION. */
+/* Expand the OpenMP parallel or task directive starting at REGION. */
static void
-expand_omp_parallel (struct omp_region *region)
+expand_omp_taskreg (struct omp_region *region)
{
basic_block entry_bb, exit_bb, new_bb;
struct function *child_cfun;
@@ -2524,7 +3144,7 @@ expand_omp_parallel (struct omp_region *region)
edge e;
entry_stmt = last_stmt (region->entry);
- child_fn = OMP_PARALLEL_FN (entry_stmt);
+ child_fn = OMP_TASKREG_FN (entry_stmt);
child_cfun = DECL_STRUCT_FUNCTION (child_fn);
/* If this function has been already instrumented, make sure
the child function isn't instrumented again. */
@@ -2549,7 +3169,8 @@ expand_omp_parallel (struct omp_region *region)
entry_succ_e = single_succ_edge (entry_bb);
si = bsi_last (entry_bb);
- gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_PARALLEL);
+ gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_PARALLEL
+ || TREE_CODE (bsi_stmt (si)) == OMP_TASK);
bsi_remove (&si, true);
new_bb = entry_bb;
@@ -2575,7 +3196,7 @@ expand_omp_parallel (struct omp_region *region)
a function call that has been inlined, the original PARM_DECL
.OMP_DATA_I may have been converted into a different local
variable. In which case, we need to keep the assignment. */
- if (OMP_PARALLEL_DATA_ARG (entry_stmt))
+ if (OMP_TASKREG_DATA_ARG (entry_stmt))
{
basic_block entry_succ_bb = single_succ (entry_bb);
block_stmt_iterator si;
@@ -2594,7 +3215,7 @@ expand_omp_parallel (struct omp_region *region)
STRIP_NOPS (arg);
if (TREE_CODE (arg) == ADDR_EXPR
&& TREE_OPERAND (arg, 0)
- == OMP_PARALLEL_DATA_ARG (entry_stmt))
+ == OMP_TASKREG_DATA_ARG (entry_stmt))
{
parcopy_stmt = stmt;
break;
@@ -2633,11 +3254,12 @@ expand_omp_parallel (struct omp_region *region)
for (t = DECL_ARGUMENTS (child_fn); t; t = TREE_CHAIN (t))
DECL_CONTEXT (t) = child_fn;
- /* Split ENTRY_BB at OMP_PARALLEL so that it can be moved to the
- child function. */
+ /* Split ENTRY_BB at OMP_PARALLEL or OMP_TASK, so that it can be
+ moved to the child function. */
si = bsi_last (entry_bb);
t = bsi_stmt (si);
- gcc_assert (t && TREE_CODE (t) == OMP_PARALLEL);
+ gcc_assert (t && (TREE_CODE (t) == OMP_PARALLEL
+ || TREE_CODE (t) == OMP_TASK));
bsi_remove (&si, true);
e = split_block (entry_bb, t);
entry_bb = e->dest;
@@ -2677,7 +3299,7 @@ expand_omp_parallel (struct omp_region *region)
fixed in a following pass. */
push_cfun (child_cfun);
if (optimize)
- optimize_omp_library_calls ();
+ optimize_omp_library_calls (entry_stmt);
rebuild_cgraph_edges ();
/* Some EH regions might become dead, see PR34608. If
@@ -2701,7 +3323,10 @@ expand_omp_parallel (struct omp_region *region)
}
/* Emit a library call to launch the children threads. */
- expand_parallel_call (region, new_bb, entry_stmt, ws_args);
+ if (TREE_CODE (entry_stmt) == OMP_PARALLEL)
+ expand_parallel_call (region, new_bb, entry_stmt, ws_args);
+ else
+ expand_task_call (new_bb, entry_stmt);
update_ssa (TODO_update_ssa_only_virtuals);
}
@@ -2727,7 +3352,64 @@ expand_omp_parallel (struct omp_region *region)
L3:
If this is a combined omp parallel loop, instead of the call to
- GOMP_loop_foo_start, we call GOMP_loop_foo_next. */
+ GOMP_loop_foo_start, we call GOMP_loop_foo_next.
+
+ For collapsed loops, given parameters:
+ collapse(3)
+ for (V1 = N11; V1 cond1 N12; V1 += STEP1)
+ for (V2 = N21; V2 cond2 N22; V2 += STEP2)
+ for (V3 = N31; V3 cond3 N32; V3 += STEP3)
+ BODY;
+
+ we generate pseudocode
+
+ if (cond3 is <)
+ adj = STEP3 - 1;
+ else
+ adj = STEP3 + 1;
+ count3 = (adj + N32 - N31) / STEP3;
+ if (cond2 is <)
+ adj = STEP2 - 1;
+ else
+ adj = STEP2 + 1;
+ count2 = (adj + N22 - N21) / STEP2;
+ if (cond1 is <)
+ adj = STEP1 - 1;
+ else
+ adj = STEP1 + 1;
+ count1 = (adj + N12 - N11) / STEP1;
+ count = count1 * count2 * count3;
+ more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0);
+ if (more) goto L0; else goto L3;
+ L0:
+ V = istart0;
+ T = V;
+ V3 = N31 + (T % count3) * STEP3;
+ T = T / count3;
+ V2 = N21 + (T % count2) * STEP2;
+ T = T / count2;
+ V1 = N11 + T * STEP1;
+ iend = iend0;
+ L1:
+ BODY;
+ V += 1;
+ if (V < iend) goto L10; else goto L2;
+ L10:
+ V3 += STEP3;
+ if (V3 cond3 N32) goto L1; else goto L11;
+ L11:
+ V3 = N31;
+ V2 += STEP2;
+ if (V2 cond2 N22) goto L1; else goto L12;
+ L12:
+ V2 = N21;
+ V1 += STEP1;
+ goto L1;
+ L2:
+ if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
+ L3:
+
+ */
static void
expand_omp_for_generic (struct omp_region *region,
@@ -2736,20 +3418,23 @@ expand_omp_for_generic (struct omp_region *region,
enum built_in_function next_fn)
{
tree type, istart0, iend0, iend, phi;
- tree t, vmain, vback;
- basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb;
+ tree t, vmain, vback, bias = NULL_TREE;
+ basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, collapse_bb;
basic_block l2_bb = NULL, l3_bb = NULL;
block_stmt_iterator si;
bool in_combined_parallel = is_combined_parallel (region);
bool broken_loop = region->cont == NULL;
edge e, ne;
+ tree *counts = NULL;
+ int i;
gcc_assert (!broken_loop || !in_combined_parallel);
+ gcc_assert (fd->iter_type == long_integer_type_node
+ || !in_combined_parallel);
- type = TREE_TYPE (fd->v);
-
- istart0 = create_tmp_var (long_integer_type_node, ".istart0");
- iend0 = create_tmp_var (long_integer_type_node, ".iend0");
+ type = TREE_TYPE (fd->loop.v);
+ istart0 = create_tmp_var (fd->iter_type, ".istart0");
+ iend0 = create_tmp_var (fd->iter_type, ".iend0");
TREE_ADDRESSABLE (istart0) = 1;
TREE_ADDRESSABLE (iend0) = 1;
if (gimple_in_ssa_p (cfun))
@@ -2758,8 +3443,32 @@ expand_omp_for_generic (struct omp_region *region,
add_referenced_var (iend0);
}
+ /* See if we need to bias by LLONG_MIN. */
+ if (fd->iter_type == long_long_unsigned_type_node
+ && TREE_CODE (type) == INTEGER_TYPE
+ && !TYPE_UNSIGNED (type))
+ {
+ tree n1, n2;
+
+ if (fd->loop.cond_code == LT_EXPR)
+ {
+ n1 = fd->loop.n1;
+ n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step);
+ }
+ else
+ {
+ n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step);
+ n2 = fd->loop.n1;
+ }
+ if (TREE_CODE (n1) != INTEGER_CST
+ || TREE_CODE (n2) != INTEGER_CST
+ || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0)))
+ bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type));
+ }
+
entry_bb = region->entry;
cont_bb = region->cont;
+ collapse_bb = NULL;
gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
gcc_assert (broken_loop
|| BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
@@ -2777,7 +3486,60 @@ expand_omp_for_generic (struct omp_region *region,
exit_bb = region->exit;
si = bsi_last (entry_bb);
+
gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_FOR);
+ if (fd->collapse > 1)
+ {
+ /* collapsed loops need work for expansion in SSA form. */
+ gcc_assert (!gimple_in_ssa_p (cfun));
+ counts = (tree *) alloca (fd->collapse * sizeof (tree));
+ for (i = 0; i < fd->collapse; i++)
+ {
+ tree itype = TREE_TYPE (fd->loops[i].v);
+
+ if (POINTER_TYPE_P (itype))
+ itype = lang_hooks.types.type_for_size (TYPE_PRECISION (itype), 0);
+ t = build_int_cst (itype, (fd->loops[i].cond_code == LT_EXPR
+ ? -1 : 1));
+ t = fold_build2 (PLUS_EXPR, itype,
+ fold_convert (itype, fd->loops[i].step), t);
+ t = fold_build2 (PLUS_EXPR, itype, t,
+ fold_convert (itype, fd->loops[i].n2));
+ t = fold_build2 (MINUS_EXPR, itype, t,
+ fold_convert (itype, fd->loops[i].n1));
+ if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR)
+ t = fold_build2 (TRUNC_DIV_EXPR, itype,
+ fold_build1 (NEGATE_EXPR, itype, t),
+ fold_build1 (NEGATE_EXPR, itype,
+ fold_convert (itype,
+ fd->loops[i].step)));
+ else
+ t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
+ fold_convert (itype, fd->loops[i].step));
+ t = fold_convert (type, t);
+ if (TREE_CODE (t) == INTEGER_CST)
+ counts[i] = t;
+ else
+ {
+ counts[i] = create_tmp_var (type, ".count");
+ t = build_gimple_modify_stmt (counts[i], t);
+ force_gimple_operand_bsi (&si, t, true, NULL_TREE,
+ true, BSI_SAME_STMT);
+ }
+ if (SSA_VAR_P (fd->loop.n2))
+ {
+ if (i == 0)
+ t = build_gimple_modify_stmt (fd->loop.n2, counts[0]);
+ else
+ {
+ t = fold_build2 (MULT_EXPR, type, fd->loop.n2, counts[i]);
+ t = build_gimple_modify_stmt (fd->loop.n2, t);
+ }
+ force_gimple_operand_bsi (&si, t, true, NULL_TREE,
+ true, BSI_SAME_STMT);
+ }
+ }
+ }
if (in_combined_parallel)
{
/* In a combined parallel loop, emit a call to
@@ -2793,19 +3555,52 @@ expand_omp_for_generic (struct omp_region *region,
GOMP_loop_foo_start in ENTRY_BB. */
t4 = build_fold_addr_expr (iend0);
t3 = build_fold_addr_expr (istart0);
- t2 = fold_convert (long_integer_type_node, fd->step);
- t1 = fold_convert (long_integer_type_node, fd->n2);
- t0 = fold_convert (long_integer_type_node, fd->n1);
- if (fd->chunk_size)
+ t2 = fold_convert (fd->iter_type, fd->loop.step);
+ t1 = fold_convert (fd->iter_type, fd->loop.n2);
+ t0 = fold_convert (fd->iter_type, fd->loop.n1);
+ if (bias)
{
- t = fold_convert (long_integer_type_node, fd->chunk_size);
- t = build_call_expr (built_in_decls[start_fn], 6,
- t0, t1, t2, t, t3, t4);
+ t1 = fold_build2 (PLUS_EXPR, fd->iter_type, t1, bias);
+ t0 = fold_build2 (PLUS_EXPR, fd->iter_type, t0, bias);
+ }
+ if (fd->iter_type == long_integer_type_node)
+ {
+ if (fd->chunk_size)
+ {
+ t = fold_convert (fd->iter_type, fd->chunk_size);
+ t = build_call_expr (built_in_decls[start_fn], 6,
+ t0, t1, t2, t, t3, t4);
+ }
+ else
+ t = build_call_expr (built_in_decls[start_fn], 5,
+ t0, t1, t2, t3, t4);
}
else
- t = build_call_expr (built_in_decls[start_fn], 5,
- t0, t1, t2, t3, t4);
+ {
+ tree t5;
+ tree c_bool_type;
+
+ /* The GOMP_loop_ull_*start functions have additional boolean
+ argument, true for < loops and false for > loops.
+ In Fortran, the C bool type can be different from
+ boolean_type_node. */
+ c_bool_type = TREE_TYPE (TREE_TYPE (built_in_decls[start_fn]));
+ t5 = build_int_cst (c_bool_type,
+ fd->loop.cond_code == LT_EXPR ? 1 : 0);
+ if (fd->chunk_size)
+ {
+ t = fold_convert (fd->iter_type, fd->chunk_size);
+ t = build_call_expr (built_in_decls[start_fn], 7,
+ t5, t0, t1, t2, t, t3, t4);
+ }
+ else
+ t = build_call_expr (built_in_decls[start_fn], 6,
+ t5, t0, t1, t2, t3, t4);
+ }
}
+ if (TREE_TYPE (t) != boolean_type_node)
+ t = fold_build2 (NE_EXPR, boolean_type_node,
+ t, build_int_cst (TREE_TYPE (t), 0));
t = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
true, BSI_SAME_STMT);
t = build3 (COND_EXPR, void_type_node, t, NULL_TREE, NULL_TREE);
@@ -2816,17 +3611,57 @@ expand_omp_for_generic (struct omp_region *region,
/* Iteration setup for sequential loop goes in L0_BB. */
si = bsi_start (l0_bb);
- t = fold_convert (type, istart0);
+ if (bias)
+ t = fold_convert (type, fold_build2 (MINUS_EXPR, fd->iter_type,
+ istart0, bias));
+ else
+ t = fold_convert (type, istart0);
t = force_gimple_operand_bsi (&si, t, false, NULL_TREE,
false, BSI_CONTINUE_LINKING);
- t = build_gimple_modify_stmt (fd->v, t);
+ t = build_gimple_modify_stmt (fd->loop.v, t);
bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
if (gimple_in_ssa_p (cfun))
- SSA_NAME_DEF_STMT (fd->v) = t;
+ SSA_NAME_DEF_STMT (fd->loop.v) = t;
- t = fold_convert (type, iend0);
+ if (bias)
+ t = fold_convert (type, fold_build2 (MINUS_EXPR, fd->iter_type,
+ iend0, bias));
+ else
+ t = fold_convert (type, iend0);
iend = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
false, BSI_CONTINUE_LINKING);
+ if (fd->collapse > 1)
+ {
+ tree tem = create_tmp_var (type, ".tem");
+
+ t = build_gimple_modify_stmt (tem, fd->loop.v);
+ bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
+ for (i = fd->collapse - 1; i >= 0; i--)
+ {
+ tree vtype = TREE_TYPE (fd->loops[i].v), itype;
+ itype = vtype;
+ if (POINTER_TYPE_P (vtype))
+ itype = lang_hooks.types.type_for_size (TYPE_PRECISION (vtype), 0);
+ t = fold_build2 (TRUNC_MOD_EXPR, type, tem, counts[i]);
+ t = fold_convert (itype, t);
+ t = fold_build2 (MULT_EXPR, itype, t, fd->loops[i].step);
+ if (POINTER_TYPE_P (vtype))
+ t = fold_build2 (POINTER_PLUS_EXPR, vtype,
+ fd->loops[i].n1, fold_convert (sizetype, t));
+ else
+ t = fold_build2 (PLUS_EXPR, itype, fd->loops[i].n1, t);
+ t = build_gimple_modify_stmt (fd->loops[i].v, t);
+ force_gimple_operand_bsi (&si, t, true, NULL_TREE,
+ false, BSI_CONTINUE_LINKING);
+ if (i != 0)
+ {
+ t = fold_build2 (TRUNC_DIV_EXPR, type, tem, counts[i]);
+ t = build_gimple_modify_stmt (tem, t);
+ force_gimple_operand_bsi (&si, t, true, NULL_TREE,
+ false, BSI_CONTINUE_LINKING);
+ }
+ }
+ }
if (!broken_loop)
{
@@ -2838,7 +3673,11 @@ expand_omp_for_generic (struct omp_region *region,
vmain = TREE_OPERAND (t, 1);
vback = TREE_OPERAND (t, 0);
- t = fold_build2 (PLUS_EXPR, type, vmain, fd->step);
+ if (POINTER_TYPE_P (type))
+ t = fold_build2 (POINTER_PLUS_EXPR, type, vmain,
+ fold_convert (sizetype, fd->loop.step));
+ else
+ t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
t = force_gimple_operand_bsi (&si, t, false, NULL_TREE,
true, BSI_SAME_STMT);
t = build_gimple_modify_stmt (vback, t);
@@ -2846,19 +3685,78 @@ expand_omp_for_generic (struct omp_region *region,
if (gimple_in_ssa_p (cfun))
SSA_NAME_DEF_STMT (vback) = t;
- t = build2 (fd->cond_code, boolean_type_node, vback, iend);
+ t = build2 (fd->loop.cond_code, boolean_type_node, vback, iend);
t = build3 (COND_EXPR, void_type_node, t, NULL_TREE, NULL_TREE);
bsi_insert_before (&si, t, BSI_SAME_STMT);
/* Remove OMP_CONTINUE. */
bsi_remove (&si, true);
+ if (fd->collapse > 1)
+ {
+ basic_block last_bb, bb;
+
+ last_bb = cont_bb;
+ for (i = fd->collapse - 1; i >= 0; i--)
+ {
+ tree vtype = TREE_TYPE (fd->loops[i].v);
+
+ bb = create_empty_bb (last_bb);
+ si = bsi_start (bb);
+
+ if (i < fd->collapse - 1)
+ {
+ e = make_edge (last_bb, bb, EDGE_FALSE_VALUE);
+ e->probability = REG_BR_PROB_BASE / 8;
+
+ t = build_gimple_modify_stmt (fd->loops[i + 1].v,
+ fd->loops[i + 1].n1);
+ force_gimple_operand_bsi (&si, t, true, NULL_TREE,
+ false, BSI_CONTINUE_LINKING);
+ }
+ else
+ collapse_bb = bb;
+
+ set_immediate_dominator (CDI_DOMINATORS, bb, last_bb);
+
+ if (POINTER_TYPE_P (vtype))
+ t = fold_build2 (POINTER_PLUS_EXPR, vtype,
+ fd->loops[i].v,
+ fold_convert (sizetype, fd->loops[i].step));
+ else
+ t = fold_build2 (PLUS_EXPR, vtype, fd->loops[i].v,
+ fd->loops[i].step);
+ t = build_gimple_modify_stmt (fd->loops[i].v, t);
+ force_gimple_operand_bsi (&si, t, true, NULL_TREE,
+ false, BSI_CONTINUE_LINKING);
+
+ if (i > 0)
+ {
+ t = fold_build2 (fd->loops[i].cond_code, boolean_type_node,
+ fd->loops[i].v, fd->loops[i].n2);
+ t = force_gimple_operand_bsi (&si, t, false, NULL_TREE,
+ false, BSI_CONTINUE_LINKING);
+ t = build3 (COND_EXPR, void_type_node, t,
+ NULL_TREE, NULL_TREE);
+ bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
+ e = make_edge (bb, l1_bb, EDGE_TRUE_VALUE);
+ e->probability = REG_BR_PROB_BASE * 7 / 8;
+ }
+ else
+ make_edge (bb, l1_bb, EDGE_FALLTHRU);
+ last_bb = bb;
+ }
+ }
+
/* Emit code to get the next parallel iteration in L2_BB. */
si = bsi_start (l2_bb);
t = build_call_expr (built_in_decls[next_fn], 2,
build_fold_addr_expr (istart0),
build_fold_addr_expr (iend0));
+ if (TREE_TYPE (t) != boolean_type_node)
+ t = fold_build2 (NE_EXPR, boolean_type_node,
+ t, build_int_cst (TREE_TYPE (t), 0));
t = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
false, BSI_CONTINUE_LINKING);
t = build3 (COND_EXPR, void_type_node, t, NULL_TREE, NULL_TREE);
@@ -2889,8 +3787,20 @@ expand_omp_for_generic (struct omp_region *region,
PHI_ARG_DEF_FROM_EDGE (phi, e));
remove_edge (e);
- find_edge (cont_bb, l1_bb)->flags = EDGE_TRUE_VALUE;
make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE);
+ if (fd->collapse > 1)
+ {
+ e = find_edge (cont_bb, l1_bb);
+ remove_edge (e);
+ e = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
+ }
+ else
+ {
+ e = find_edge (cont_bb, l1_bb);
+ e->flags = EDGE_TRUE_VALUE;
+ }
+ e->probability = REG_BR_PROB_BASE * 7 / 8;
+ find_edge (cont_bb, l2_bb)->probability = REG_BR_PROB_BASE / 8;
make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE);
set_immediate_dominator (CDI_DOMINATORS, l2_bb,
@@ -2917,7 +3827,10 @@ expand_omp_for_generic (struct omp_region *region,
adj = STEP - 1;
else
adj = STEP + 1;
- n = (adj + N2 - N1) / STEP;
+ if ((__typeof (V)) -1 > 0 && cond is >)
+ n = -(adj + N2 - N1) / -STEP;
+ else
+ n = (adj + N2 - N1) / STEP;
q = n / nthreads;
q += (q * nthreads != n);
s0 = q * threadid;
@@ -2938,12 +3851,14 @@ expand_omp_for_static_nochunk (struct omp_region *region,
struct omp_for_data *fd)
{
tree n, q, s0, e0, e, t, nthreads, threadid;
- tree type, vmain, vback;
+ tree type, itype, vmain, vback;
basic_block entry_bb, exit_bb, seq_start_bb, body_bb, cont_bb;
basic_block fin_bb;
block_stmt_iterator si;
- type = TREE_TYPE (fd->v);
+ itype = type = TREE_TYPE (fd->loop.v);
+ if (POINTER_TYPE_P (type))
+ itype = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
entry_bb = region->entry;
cont_bb = region->cont;
@@ -2961,51 +3876,51 @@ expand_omp_for_static_nochunk (struct omp_region *region,
gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_FOR);
t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS], 0);
- t = fold_convert (type, t);
+ t = fold_convert (itype, t);
nthreads = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
true, BSI_SAME_STMT);
t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
- t = fold_convert (type, t);
+ t = fold_convert (itype, t);
threadid = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
true, BSI_SAME_STMT);
- fd->n1 = force_gimple_operand_bsi (&si,
- fold_convert (type, fd->n1),
- true, NULL_TREE,
- true, BSI_SAME_STMT);
-
- fd->n2 = force_gimple_operand_bsi (&si,
- fold_convert (type, fd->n2),
- true, NULL_TREE,
- true, BSI_SAME_STMT);
-
- fd->step = force_gimple_operand_bsi (&si,
- fold_convert (type, fd->step),
- true, NULL_TREE,
- true, BSI_SAME_STMT);
-
- t = build_int_cst (type, (fd->cond_code == LT_EXPR ? -1 : 1));
- t = fold_build2 (PLUS_EXPR, type, fd->step, t);
- t = fold_build2 (PLUS_EXPR, type, t, fd->n2);
- t = fold_build2 (MINUS_EXPR, type, t, fd->n1);
- t = fold_build2 (TRUNC_DIV_EXPR, type, t, fd->step);
- t = fold_convert (type, t);
+ fd->loop.n1
+ = force_gimple_operand_bsi (&si, fold_convert (type, fd->loop.n1),
+ true, NULL_TREE, true, BSI_SAME_STMT);
+ fd->loop.n2
+ = force_gimple_operand_bsi (&si, fold_convert (itype, fd->loop.n2),
+ true, NULL_TREE, true, BSI_SAME_STMT);
+ fd->loop.step
+ = force_gimple_operand_bsi (&si, fold_convert (itype, fd->loop.step),
+ true, NULL_TREE, true, BSI_SAME_STMT);
+
+ t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
+ t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
+ t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
+ t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
+ if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
+ t = fold_build2 (TRUNC_DIV_EXPR, itype,
+ fold_build1 (NEGATE_EXPR, itype, t),
+ fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
+ else
+ t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
+ t = fold_convert (itype, t);
n = force_gimple_operand_bsi (&si, t, true, NULL_TREE, true, BSI_SAME_STMT);
- t = fold_build2 (TRUNC_DIV_EXPR, type, n, nthreads);
+ t = fold_build2 (TRUNC_DIV_EXPR, itype, n, nthreads);
q = force_gimple_operand_bsi (&si, t, true, NULL_TREE, true, BSI_SAME_STMT);
- t = fold_build2 (MULT_EXPR, type, q, nthreads);
- t = fold_build2 (NE_EXPR, type, t, n);
- t = fold_build2 (PLUS_EXPR, type, q, t);
+ t = fold_build2 (MULT_EXPR, itype, q, nthreads);
+ t = fold_build2 (NE_EXPR, itype, t, n);
+ t = fold_build2 (PLUS_EXPR, itype, q, t);
q = force_gimple_operand_bsi (&si, t, true, NULL_TREE, true, BSI_SAME_STMT);
- t = build2 (MULT_EXPR, type, q, threadid);
+ t = build2 (MULT_EXPR, itype, q, threadid);
s0 = force_gimple_operand_bsi (&si, t, true, NULL_TREE, true, BSI_SAME_STMT);
- t = fold_build2 (PLUS_EXPR, type, s0, q);
- t = fold_build2 (MIN_EXPR, type, t, n);
+ t = fold_build2 (PLUS_EXPR, itype, s0, q);
+ t = fold_build2 (MIN_EXPR, itype, t, n);
e0 = force_gimple_operand_bsi (&si, t, true, NULL_TREE, true, BSI_SAME_STMT);
t = build2 (GE_EXPR, boolean_type_node, s0, e0);
@@ -3018,19 +3933,27 @@ expand_omp_for_static_nochunk (struct omp_region *region,
/* Setup code for sequential iteration goes in SEQ_START_BB. */
si = bsi_start (seq_start_bb);
- t = fold_convert (type, s0);
- t = fold_build2 (MULT_EXPR, type, t, fd->step);
- t = fold_build2 (PLUS_EXPR, type, t, fd->n1);
+ t = fold_convert (itype, s0);
+ t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
+ if (POINTER_TYPE_P (type))
+ t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
+ fold_convert (sizetype, t));
+ else
+ t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
t = force_gimple_operand_bsi (&si, t, false, NULL_TREE,
false, BSI_CONTINUE_LINKING);
- t = build_gimple_modify_stmt (fd->v, t);
+ t = build_gimple_modify_stmt (fd->loop.v, t);
bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
if (gimple_in_ssa_p (cfun))
- SSA_NAME_DEF_STMT (fd->v) = t;
+ SSA_NAME_DEF_STMT (fd->loop.v) = t;
- t = fold_convert (type, e0);
- t = fold_build2 (MULT_EXPR, type, t, fd->step);
- t = fold_build2 (PLUS_EXPR, type, t, fd->n1);
+ t = fold_convert (itype, e0);
+ t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
+ if (POINTER_TYPE_P (type))
+ t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
+ fold_convert (sizetype, t));
+ else
+ t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
e = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
false, BSI_CONTINUE_LINKING);
@@ -3041,7 +3964,11 @@ expand_omp_for_static_nochunk (struct omp_region *region,
vmain = TREE_OPERAND (t, 1);
vback = TREE_OPERAND (t, 0);
- t = fold_build2 (PLUS_EXPR, type, vmain, fd->step);
+ if (POINTER_TYPE_P (type))
+ t = fold_build2 (POINTER_PLUS_EXPR, type, vmain,
+ fold_convert (sizetype, fd->loop.step));
+ else
+ t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
t = force_gimple_operand_bsi (&si, t, false, NULL_TREE,
true, BSI_SAME_STMT);
t = build_gimple_modify_stmt (vback, t);
@@ -3049,7 +3976,7 @@ expand_omp_for_static_nochunk (struct omp_region *region,
if (gimple_in_ssa_p (cfun))
SSA_NAME_DEF_STMT (vback) = t;
- t = build2 (fd->cond_code, boolean_type_node, vback, e);
+ t = build2 (fd->loop.cond_code, boolean_type_node, vback, e);
t = build3 (COND_EXPR, void_type_node, t, NULL_TREE, NULL_TREE);
bsi_insert_before (&si, t, BSI_SAME_STMT);
@@ -3090,7 +4017,10 @@ expand_omp_for_static_nochunk (struct omp_region *region,
adj = STEP - 1;
else
adj = STEP + 1;
- n = (adj + N2 - N1) / STEP;
+ if ((__typeof (V)) -1 > 0 && cond is >)
+ n = -(adj + N2 - N1) / -STEP;
+ else
+ n = (adj + N2 - N1) / STEP;
trip = 0;
V = threadid * CHUNK * STEP + N1; -- this extra definition of V is
here so that V is defined
@@ -3113,17 +4043,20 @@ expand_omp_for_static_nochunk (struct omp_region *region,
*/
static void
-expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
+expand_omp_for_static_chunk (struct omp_region *region,
+ struct omp_for_data *fd)
{
tree n, s0, e0, e, t, phi, nphi, args;
tree trip_var, trip_init, trip_main, trip_back, nthreads, threadid;
- tree type, cont, v_main, v_back, v_extra;
+ tree type, itype, cont, v_main, v_back, v_extra;
basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb;
basic_block trip_update_bb, cont_bb, fin_bb;
block_stmt_iterator si;
edge se, re, ene;
- type = TREE_TYPE (fd->v);
+ itype = type = TREE_TYPE (fd->loop.v);
+ if (POINTER_TYPE_P (type))
+ itype = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
entry_bb = region->entry;
se = split_block (entry_bb, last_stmt (entry_bb));
@@ -3146,40 +4079,43 @@ expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_FOR);
t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS], 0);
- t = fold_convert (type, t);
+ t = fold_convert (itype, t);
nthreads = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
true, BSI_SAME_STMT);
t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
- t = fold_convert (type, t);
+ t = fold_convert (itype, t);
threadid = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
true, BSI_SAME_STMT);
- fd->n1 = force_gimple_operand_bsi (&si, fold_convert (type, fd->n1),
- true, NULL_TREE,
- true, BSI_SAME_STMT);
- fd->n2 = force_gimple_operand_bsi (&si, fold_convert (type, fd->n2),
- true, NULL_TREE,
- true, BSI_SAME_STMT);
- fd->step = force_gimple_operand_bsi (&si, fold_convert (type, fd->step),
- true, NULL_TREE,
- true, BSI_SAME_STMT);
+ fd->loop.n1
+ = force_gimple_operand_bsi (&si, fold_convert (type, fd->loop.n1),
+ true, NULL_TREE, true, BSI_SAME_STMT);
+ fd->loop.n2
+ = force_gimple_operand_bsi (&si, fold_convert (itype, fd->loop.n2),
+ true, NULL_TREE, true, BSI_SAME_STMT);
+ fd->loop.step
+ = force_gimple_operand_bsi (&si, fold_convert (itype, fd->loop.step),
+ true, NULL_TREE, true, BSI_SAME_STMT);
fd->chunk_size
- = force_gimple_operand_bsi (&si, fold_convert (type,
- fd->chunk_size),
- true, NULL_TREE,
- true, BSI_SAME_STMT);
-
- t = build_int_cst (type, (fd->cond_code == LT_EXPR ? -1 : 1));
- t = fold_build2 (PLUS_EXPR, type, fd->step, t);
- t = fold_build2 (PLUS_EXPR, type, t, fd->n2);
- t = fold_build2 (MINUS_EXPR, type, t, fd->n1);
- t = fold_build2 (TRUNC_DIV_EXPR, type, t, fd->step);
- t = fold_convert (type, t);
+ = force_gimple_operand_bsi (&si, fold_convert (itype, fd->chunk_size),
+ true, NULL_TREE, true, BSI_SAME_STMT);
+
+ t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
+ t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
+ t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
+ t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
+ if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
+ t = fold_build2 (TRUNC_DIV_EXPR, itype,
+ fold_build1 (NEGATE_EXPR, itype, t),
+ fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
+ else
+ t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
+ t = fold_convert (itype, t);
n = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
true, BSI_SAME_STMT);
- trip_var = create_tmp_var (type, ".trip");
+ trip_var = create_tmp_var (itype, ".trip");
if (gimple_in_ssa_p (cfun))
{
add_referenced_var (trip_var);
@@ -3194,14 +4130,18 @@ expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
trip_back = trip_var;
}
- t = build_gimple_modify_stmt (trip_init, build_int_cst (type, 0));
+ t = build_gimple_modify_stmt (trip_init, build_int_cst (itype, 0));
bsi_insert_before (&si, t, BSI_SAME_STMT);
if (gimple_in_ssa_p (cfun))
SSA_NAME_DEF_STMT (trip_init) = t;
- t = fold_build2 (MULT_EXPR, type, threadid, fd->chunk_size);
- t = fold_build2 (MULT_EXPR, type, t, fd->step);
- t = fold_build2 (PLUS_EXPR, type, t, fd->n1);
+ t = fold_build2 (MULT_EXPR, itype, threadid, fd->chunk_size);
+ t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
+ if (POINTER_TYPE_P (type))
+ t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
+ fold_convert (sizetype, t));
+ else
+ t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
v_extra = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
true, BSI_SAME_STMT);
@@ -3211,14 +4151,14 @@ expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
/* Iteration space partitioning goes in ITER_PART_BB. */
si = bsi_last (iter_part_bb);
- t = fold_build2 (MULT_EXPR, type, trip_main, nthreads);
- t = fold_build2 (PLUS_EXPR, type, t, threadid);
- t = fold_build2 (MULT_EXPR, type, t, fd->chunk_size);
+ t = fold_build2 (MULT_EXPR, itype, trip_main, nthreads);
+ t = fold_build2 (PLUS_EXPR, itype, t, threadid);
+ t = fold_build2 (MULT_EXPR, itype, t, fd->chunk_size);
s0 = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
false, BSI_CONTINUE_LINKING);
- t = fold_build2 (PLUS_EXPR, type, s0, fd->chunk_size);
- t = fold_build2 (MIN_EXPR, type, t, n);
+ t = fold_build2 (PLUS_EXPR, itype, s0, fd->chunk_size);
+ t = fold_build2 (MIN_EXPR, itype, t, n);
e0 = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
false, BSI_CONTINUE_LINKING);
@@ -3229,19 +4169,27 @@ expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
/* Setup code for sequential iteration goes in SEQ_START_BB. */
si = bsi_start (seq_start_bb);
- t = fold_convert (type, s0);
- t = fold_build2 (MULT_EXPR, type, t, fd->step);
- t = fold_build2 (PLUS_EXPR, type, t, fd->n1);
+ t = fold_convert (itype, s0);
+ t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
+ if (POINTER_TYPE_P (type))
+ t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
+ fold_convert (sizetype, t));
+ else
+ t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
t = force_gimple_operand_bsi (&si, t, false, NULL_TREE,
false, BSI_CONTINUE_LINKING);
- t = build_gimple_modify_stmt (fd->v, t);
+ t = build_gimple_modify_stmt (fd->loop.v, t);
bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
if (gimple_in_ssa_p (cfun))
- SSA_NAME_DEF_STMT (fd->v) = t;
+ SSA_NAME_DEF_STMT (fd->loop.v) = t;
- t = fold_convert (type, e0);
- t = fold_build2 (MULT_EXPR, type, t, fd->step);
- t = fold_build2 (PLUS_EXPR, type, t, fd->n1);
+ t = fold_convert (itype, e0);
+ t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
+ if (POINTER_TYPE_P (type))
+ t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
+ fold_convert (sizetype, t));
+ else
+ t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
e = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
false, BSI_CONTINUE_LINKING);
@@ -3253,13 +4201,17 @@ expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
v_main = TREE_OPERAND (cont, 1);
v_back = TREE_OPERAND (cont, 0);
- t = build2 (PLUS_EXPR, type, v_main, fd->step);
+ if (POINTER_TYPE_P (type))
+ t = fold_build2 (POINTER_PLUS_EXPR, type, v_main,
+ fold_convert (sizetype, fd->loop.step));
+ else
+ t = build2 (PLUS_EXPR, type, v_main, fd->loop.step);
t = build_gimple_modify_stmt (v_back, t);
bsi_insert_before (&si, t, BSI_SAME_STMT);
if (gimple_in_ssa_p (cfun))
SSA_NAME_DEF_STMT (v_back) = t;
- t = build2 (fd->cond_code, boolean_type_node, v_back, e);
+ t = build2 (fd->loop.cond_code, boolean_type_node, v_back, e);
t = build3 (COND_EXPR, void_type_node, t, NULL_TREE, NULL_TREE);
bsi_insert_before (&si, t, BSI_SAME_STMT);
@@ -3269,8 +4221,8 @@ expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
/* Trip update code goes into TRIP_UPDATE_BB. */
si = bsi_start (trip_update_bb);
- t = build_int_cst (type, 1);
- t = build2 (PLUS_EXPR, type, trip_main, t);
+ t = build_int_cst (itype, 1);
+ t = build2 (PLUS_EXPR, itype, trip_main, t);
t = build_gimple_modify_stmt (trip_back, t);
bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
if (gimple_in_ssa_p (cfun))
@@ -3313,9 +4265,9 @@ expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
SSA_NAME_DEF_STMT (t) = nphi;
t = PHI_ARG_DEF_FROM_EDGE (phi, se);
- /* A special case -- fd->v is not yet computed in iter_part_bb, we
- need to use v_extra instead. */
- if (t == fd->v)
+ /* A special case -- fd->loop.v is not yet computed in
+ iter_part_bb, we need to use v_extra instead. */
+ if (t == fd->loop.v)
t = v_extra;
add_phi_arg (nphi, t, ene);
add_phi_arg (nphi, TREE_VALUE (args), re);
@@ -3349,8 +4301,14 @@ static void
expand_omp_for (struct omp_region *region)
{
struct omp_for_data fd;
+ struct omp_for_data_loop *loops;
- extract_omp_for_data (last_stmt (region->entry), &fd);
+ loops
+ = (struct omp_for_data_loop *)
+ alloca (TREE_VEC_LENGTH (OMP_FOR_INIT (last_stmt (region->entry)))
+ * sizeof (struct omp_for_data_loop));
+
+ extract_omp_for_data (last_stmt (region->entry), &fd, loops);
region->sched_kind = fd.sched_kind;
gcc_assert (EDGE_COUNT (region->entry->succs) == 2);
@@ -3365,6 +4323,7 @@ expand_omp_for (struct omp_region *region)
if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
&& !fd.have_ordered
+ && fd.collapse == 1
&& region->cont != NULL)
{
if (fd.chunk_size == NULL)
@@ -3374,9 +4333,21 @@ expand_omp_for (struct omp_region *region)
}
else
{
- int fn_index = fd.sched_kind + fd.have_ordered * 4;
- int start_ix = BUILT_IN_GOMP_LOOP_STATIC_START + fn_index;
- int next_ix = BUILT_IN_GOMP_LOOP_STATIC_NEXT + fn_index;
+ int fn_index, start_ix, next_ix;
+
+ gcc_assert (fd.sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
+ fn_index = (fd.sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
+ ? 3 : fd.sched_kind;
+ fn_index += fd.have_ordered * 4;
+ start_ix = BUILT_IN_GOMP_LOOP_STATIC_START + fn_index;
+ next_ix = BUILT_IN_GOMP_LOOP_STATIC_NEXT + fn_index;
+ if (fd.iter_type == long_long_unsigned_type_node)
+ {
+ start_ix += BUILT_IN_GOMP_LOOP_ULL_STATIC_START
+ - BUILT_IN_GOMP_LOOP_STATIC_START;
+ next_ix += BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT
+ - BUILT_IN_GOMP_LOOP_STATIC_NEXT;
+ }
expand_omp_for_generic (region, &fd, start_ix, next_ix);
}
@@ -4037,7 +5008,11 @@ expand_omp (struct omp_region *region)
switch (region->type)
{
case OMP_PARALLEL:
- expand_omp_parallel (region);
+ expand_omp_taskreg (region);
+ break;
+
+ case OMP_TASK:
+ expand_omp_taskreg (region);
break;
case OMP_FOR:
@@ -4110,7 +5085,7 @@ build_omp_regions_1 (basic_block bb, struct omp_region *parent,
}
else if (code == OMP_ATOMIC_STORE)
{
- /* OMP_ATOMIC_STORE is analoguous to OMP_RETURN, but matches with
+ /* OMP_ATOMIC_STORE is analogous to OMP_RETURN, but matches with
OMP_ATOMIC_LOAD. */
gcc_assert (parent);
gcc_assert (parent->type == OMP_ATOMIC_LOAD);
@@ -4355,6 +5330,9 @@ lower_omp_single_simple (tree single_stmt, tree *pre_p)
tree t;
t = build_call_expr (built_in_decls[BUILT_IN_GOMP_SINGLE_START], 0);
+ if (TREE_TYPE (t) != boolean_type_node)
+ t = fold_build2 (NE_EXPR, boolean_type_node,
+ t, build_int_cst (TREE_TYPE (t), 0));
t = build3 (COND_EXPR, void_type_node, t,
OMP_SINGLE_BODY (single_stmt), NULL);
gimplify_and_add (t, pre_p);
@@ -4661,37 +5639,38 @@ lower_omp_for_lastprivate (struct omp_for_data *fd, tree *body_p,
tree clauses, cond, stmts, vinit, t;
enum tree_code cond_code;
- cond_code = fd->cond_code;
+ cond_code = fd->loop.cond_code;
cond_code = cond_code == LT_EXPR ? GE_EXPR : LE_EXPR;
/* When possible, use a strict equality expression. This can let VRP
type optimizations deduce the value and remove a copy. */
- if (host_integerp (fd->step, 0))
+ if (host_integerp (fd->loop.step, 0))
{
- HOST_WIDE_INT step = TREE_INT_CST_LOW (fd->step);
+ HOST_WIDE_INT step = TREE_INT_CST_LOW (fd->loop.step);
if (step == 1 || step == -1)
cond_code = EQ_EXPR;
}
- cond = build2 (cond_code, boolean_type_node, fd->v, fd->n2);
+ cond = build2 (cond_code, boolean_type_node, fd->loop.v, fd->loop.n2);
clauses = OMP_FOR_CLAUSES (fd->for_stmt);
stmts = NULL;
lower_lastprivate_clauses (clauses, cond, &stmts, ctx);
if (stmts != NULL)
{
- append_to_statement_list (stmts, dlist);
+ append_to_statement_list (*dlist, &stmts);
+ *dlist = stmts;
/* Optimize: v = 0; is usually cheaper than v = some_other_constant. */
- vinit = fd->n1;
+ vinit = fd->loop.n1;
if (cond_code == EQ_EXPR
- && host_integerp (fd->n2, 0)
- && ! integer_zerop (fd->n2))
- vinit = build_int_cst (TREE_TYPE (fd->v), 0);
+ && host_integerp (fd->loop.n2, 0)
+ && ! integer_zerop (fd->loop.n2))
+ vinit = build_int_cst (TREE_TYPE (fd->loop.v), 0);
/* Initialize the iterator variable, so that threads that don't execute
any iterations don't execute the lastprivate clauses by accident. */
- t = build_gimple_modify_stmt (fd->v, vinit);
+ t = build_gimple_modify_stmt (fd->loop.v, vinit);
gimplify_and_add (t, body_p);
}
}
@@ -4704,6 +5683,7 @@ lower_omp_for (tree *stmt_p, omp_context *ctx)
{
tree t, stmt, ilist, dlist, new_stmt, *body_p, *rhs_p;
struct omp_for_data fd;
+ int i;
stmt = *stmt_p;
@@ -4724,8 +5704,8 @@ lower_omp_for (tree *stmt_p, omp_context *ctx)
/* The pre-body and input clauses go before the lowered OMP_FOR. */
ilist = NULL;
dlist = NULL;
- append_to_statement_list (OMP_FOR_PRE_BODY (stmt), body_p);
lower_rec_input_clauses (OMP_FOR_CLAUSES (stmt), body_p, &dlist, ctx);
+ append_to_statement_list (OMP_FOR_PRE_BODY (stmt), body_p);
/* Lower the header expressions. At this point, we can assume that
the header is of the form:
@@ -4734,20 +5714,24 @@ lower_omp_for (tree *stmt_p, omp_context *ctx)
We just need to make sure that VAL1, VAL2 and VAL3 are lowered
using the .omp_data_s mapping, if needed. */
- rhs_p = &GIMPLE_STMT_OPERAND (OMP_FOR_INIT (stmt), 1);
- if (!is_gimple_min_invariant (*rhs_p))
- *rhs_p = get_formal_tmp_var (*rhs_p, body_p);
-
- rhs_p = &TREE_OPERAND (OMP_FOR_COND (stmt), 1);
- if (!is_gimple_min_invariant (*rhs_p))
- *rhs_p = get_formal_tmp_var (*rhs_p, body_p);
-
- rhs_p = &TREE_OPERAND (GIMPLE_STMT_OPERAND (OMP_FOR_INCR (stmt), 1), 1);
- if (!is_gimple_min_invariant (*rhs_p))
- *rhs_p = get_formal_tmp_var (*rhs_p, body_p);
+ for (i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INIT (stmt)); i++)
+ {
+ rhs_p = &GIMPLE_STMT_OPERAND (TREE_VEC_ELT (OMP_FOR_INIT (stmt), i), 1);
+ if (!is_gimple_min_invariant (*rhs_p))
+ *rhs_p = get_formal_tmp_var (*rhs_p, body_p);
+
+ rhs_p = &TREE_OPERAND (TREE_VEC_ELT (OMP_FOR_COND (stmt), i), 1);
+ if (!is_gimple_min_invariant (*rhs_p))
+ *rhs_p = get_formal_tmp_var (*rhs_p, body_p);
+
+ rhs_p = &TREE_OPERAND (GIMPLE_STMT_OPERAND
+ (TREE_VEC_ELT (OMP_FOR_INCR (stmt), i), 1), 1);
+ if (!is_gimple_min_invariant (*rhs_p))
+ *rhs_p = get_formal_tmp_var (*rhs_p, body_p);
+ }
/* Once lowered, extract the bounds and clauses. */
- extract_omp_for_data (stmt, &fd);
+ extract_omp_for_data (stmt, &fd, NULL);
lower_omp_for_lastprivate (&fd, body_p, &dlist, ctx);
@@ -4755,7 +5739,7 @@ lower_omp_for (tree *stmt_p, omp_context *ctx)
append_to_statement_list (OMP_FOR_BODY (stmt), body_p);
- t = build2 (OMP_CONTINUE, void_type_node, fd.v, fd.v);
+ t = build2 (OMP_CONTINUE, void_type_node, fd.loop.v, fd.loop.v);
append_to_statement_list (t, body_p);
/* After the loop, add exit clauses. */
@@ -4800,11 +5784,290 @@ check_combined_parallel (tree *tp, int *walk_subtrees, void *data)
return NULL;
}
-/* Lower the OpenMP parallel directive in *STMT_P. CTX holds context
+struct omp_taskcopy_context
+{
+ /* This field must be at the beginning, as we do "inheritance": Some
+ callback functions for tree-inline.c (e.g., omp_copy_decl)
+ receive a copy_body_data pointer that is up-casted to an
+ omp_context pointer. */
+ copy_body_data cb;
+ omp_context *ctx;
+};
+
+static tree
+task_copyfn_copy_decl (tree var, copy_body_data *cb)
+{
+ struct omp_taskcopy_context *tcctx = (struct omp_taskcopy_context *) cb;
+
+ if (splay_tree_lookup (tcctx->ctx->sfield_map, (splay_tree_key) var))
+ return create_tmp_var (TREE_TYPE (var), NULL);
+
+ return var;
+}
+
+static tree
+task_copyfn_remap_type (struct omp_taskcopy_context *tcctx, tree orig_type)
+{
+ tree name, new_fields = NULL, type, f;
+
+ type = lang_hooks.types.make_type (RECORD_TYPE);
+ name = DECL_NAME (TYPE_NAME (orig_type));
+ name = build_decl (TYPE_DECL, name, type);
+ TYPE_NAME (type) = name;
+
+ for (f = TYPE_FIELDS (orig_type); f ; f = TREE_CHAIN (f))
+ {
+ tree new_f = copy_node (f);
+ DECL_CONTEXT (new_f) = type;
+ TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &tcctx->cb);
+ TREE_CHAIN (new_f) = new_fields;
+ walk_tree (&DECL_SIZE (new_f), copy_body_r, &tcctx->cb, NULL);
+ walk_tree (&DECL_SIZE_UNIT (new_f), copy_body_r, &tcctx->cb, NULL);
+ walk_tree (&DECL_FIELD_OFFSET (new_f), copy_body_r, &tcctx->cb, NULL);
+ new_fields = new_f;
+ *pointer_map_insert (tcctx->cb.decl_map, f) = new_f;
+ }
+ TYPE_FIELDS (type) = nreverse (new_fields);
+ layout_type (type);
+ return type;
+}
+
+/* Create task copyfn. */
+
+static void
+create_task_copyfn (tree task_stmt, omp_context *ctx)
+{
+ struct function *child_cfun;
+ tree child_fn, t, c, src, dst, f, sf, arg, sarg, decl;
+ tree record_type, srecord_type, bind, list;
+ bool record_needs_remap = false, srecord_needs_remap = false;
+ splay_tree_node n;
+ struct omp_taskcopy_context tcctx;
+
+ child_fn = OMP_TASK_COPYFN (task_stmt);
+ child_cfun = DECL_STRUCT_FUNCTION (child_fn);
+ gcc_assert (child_cfun->cfg == NULL);
+ child_cfun->dont_save_pending_sizes_p = 1;
+ DECL_SAVED_TREE (child_fn) = alloc_stmt_list ();
+
+ /* Reset DECL_CONTEXT on function arguments. */
+ for (t = DECL_ARGUMENTS (child_fn); t; t = TREE_CHAIN (t))
+ DECL_CONTEXT (t) = child_fn;
+
+ /* Populate the function. */
+ push_gimplify_context ();
+ current_function_decl = child_fn;
+
+ bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
+ TREE_SIDE_EFFECTS (bind) = 1;
+ list = NULL;
+ DECL_SAVED_TREE (child_fn) = bind;
+ DECL_SOURCE_LOCATION (child_fn) = EXPR_LOCATION (task_stmt);
+
+ /* Remap src and dst argument types if needed. */
+ record_type = ctx->record_type;
+ srecord_type = ctx->srecord_type;
+ for (f = TYPE_FIELDS (record_type); f ; f = TREE_CHAIN (f))
+ if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
+ {
+ record_needs_remap = true;
+ break;
+ }
+ for (f = TYPE_FIELDS (srecord_type); f ; f = TREE_CHAIN (f))
+ if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
+ {
+ srecord_needs_remap = true;
+ break;
+ }
+
+ if (record_needs_remap || srecord_needs_remap)
+ {
+ memset (&tcctx, '\0', sizeof (tcctx));
+ tcctx.cb.src_fn = ctx->cb.src_fn;
+ tcctx.cb.dst_fn = child_fn;
+ tcctx.cb.src_node = cgraph_node (tcctx.cb.src_fn);
+ tcctx.cb.dst_node = tcctx.cb.src_node;
+ tcctx.cb.src_cfun = ctx->cb.src_cfun;
+ tcctx.cb.copy_decl = task_copyfn_copy_decl;
+ tcctx.cb.eh_region = -1;
+ tcctx.cb.transform_call_graph_edges = CB_CGE_MOVE;
+ tcctx.cb.decl_map = pointer_map_create ();
+ tcctx.ctx = ctx;
+
+ if (record_needs_remap)
+ record_type = task_copyfn_remap_type (&tcctx, record_type);
+ if (srecord_needs_remap)
+ srecord_type = task_copyfn_remap_type (&tcctx, srecord_type);
+ }
+ else
+ tcctx.cb.decl_map = NULL;
+
+ push_cfun (child_cfun);
+
+ arg = DECL_ARGUMENTS (child_fn);
+ TREE_TYPE (arg) = build_pointer_type (record_type);
+ sarg = TREE_CHAIN (arg);
+ TREE_TYPE (sarg) = build_pointer_type (srecord_type);
+
+ /* First pass: initialize temporaries used in record_type and srecord_type
+ sizes and field offsets. */
+ if (tcctx.cb.decl_map)
+ for (c = OMP_TASK_CLAUSES (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
+ if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
+ {
+ tree *p;
+
+ decl = OMP_CLAUSE_DECL (c);
+ p = (tree *) pointer_map_contains (tcctx.cb.decl_map, decl);
+ if (p == NULL)
+ continue;
+ n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
+ sf = (tree) n->value;
+ sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
+ src = build_fold_indirect_ref (sarg);
+ src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
+ t = build_gimple_modify_stmt (*p, src);
+ append_to_statement_list (t, &list);
+ }
+
+ /* Second pass: copy shared var pointers and copy construct non-VLA
+ firstprivate vars. */
+ for (c = OMP_TASK_CLAUSES (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
+ switch (OMP_CLAUSE_CODE (c))
+ {
+ case OMP_CLAUSE_SHARED:
+ decl = OMP_CLAUSE_DECL (c);
+ n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
+ if (n == NULL)
+ break;
+ f = (tree) n->value;
+ if (tcctx.cb.decl_map)
+ f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
+ n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
+ sf = (tree) n->value;
+ if (tcctx.cb.decl_map)
+ sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
+ src = build_fold_indirect_ref (sarg);
+ src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
+ dst = build_fold_indirect_ref (arg);
+ dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
+ t = build_gimple_modify_stmt (dst, src);
+ append_to_statement_list (t, &list);
+ break;
+ case OMP_CLAUSE_FIRSTPRIVATE:
+ decl = OMP_CLAUSE_DECL (c);
+ if (is_variable_sized (decl))
+ break;
+ n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
+ if (n == NULL)
+ break;
+ f = (tree) n->value;
+ if (tcctx.cb.decl_map)
+ f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
+ n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
+ if (n != NULL)
+ {
+ sf = (tree) n->value;
+ if (tcctx.cb.decl_map)
+ sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
+ src = build_fold_indirect_ref (sarg);
+ src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
+ if (use_pointer_for_field (decl, NULL) || is_reference (decl))
+ src = build_fold_indirect_ref (src);
+ }
+ else
+ src = decl;
+ dst = build_fold_indirect_ref (arg);
+ dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
+ t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
+ append_to_statement_list (t, &list);
+ break;
+ case OMP_CLAUSE_PRIVATE:
+ if (! OMP_CLAUSE_PRIVATE_OUTER_REF (c))
+ break;
+ decl = OMP_CLAUSE_DECL (c);
+ n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
+ f = (tree) n->value;
+ if (tcctx.cb.decl_map)
+ f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
+ n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
+ if (n != NULL)
+ {
+ sf = (tree) n->value;
+ if (tcctx.cb.decl_map)
+ sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
+ src = build_fold_indirect_ref (sarg);
+ src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
+ if (use_pointer_for_field (decl, NULL))
+ src = build_fold_indirect_ref (src);
+ }
+ else
+ src = decl;
+ dst = build_fold_indirect_ref (arg);
+ dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
+ t = build_gimple_modify_stmt (dst, src);
+ append_to_statement_list (t, &list);
+ break;
+ default:
+ break;
+ }
+
+ /* Last pass: handle VLA firstprivates. */
+ if (tcctx.cb.decl_map)
+ for (c = OMP_TASK_CLAUSES (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
+ if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
+ {
+ tree ind, ptr, df;
+
+ decl = OMP_CLAUSE_DECL (c);
+ if (!is_variable_sized (decl))
+ continue;
+ n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
+ if (n == NULL)
+ continue;
+ f = (tree) n->value;
+ f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
+ gcc_assert (DECL_HAS_VALUE_EXPR_P (decl));
+ ind = DECL_VALUE_EXPR (decl);
+ gcc_assert (TREE_CODE (ind) == INDIRECT_REF);
+ gcc_assert (DECL_P (TREE_OPERAND (ind, 0)));
+ n = splay_tree_lookup (ctx->sfield_map,
+ (splay_tree_key) TREE_OPERAND (ind, 0));
+ sf = (tree) n->value;
+ sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
+ src = build_fold_indirect_ref (sarg);
+ src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
+ src = build_fold_indirect_ref (src);
+ dst = build_fold_indirect_ref (arg);
+ dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
+ t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
+ append_to_statement_list (t, &list);
+ n = splay_tree_lookup (ctx->field_map,
+ (splay_tree_key) TREE_OPERAND (ind, 0));
+ df = (tree) n->value;
+ df = *(tree *) pointer_map_contains (tcctx.cb.decl_map, df);
+ ptr = build_fold_indirect_ref (arg);
+ ptr = build3 (COMPONENT_REF, TREE_TYPE (df), ptr, df, NULL);
+ t = build_gimple_modify_stmt (ptr, build_fold_addr_expr (dst));
+ append_to_statement_list (t, &list);
+ }
+
+ t = build1 (RETURN_EXPR, void_type_node, NULL);
+ append_to_statement_list (t, &list);
+
+ if (tcctx.cb.decl_map)
+ pointer_map_destroy (tcctx.cb.decl_map);
+ pop_gimplify_context (NULL);
+ BIND_EXPR_BODY (bind) = list;
+ pop_cfun ();
+ current_function_decl = ctx->cb.src_fn;
+}
+
+/* Lower the OpenMP parallel or task directive in *STMT_P. CTX holds context
information for the directive. */
static void
-lower_omp_parallel (tree *stmt_p, omp_context *ctx)
+lower_omp_taskreg (tree *stmt_p, omp_context *ctx)
{
tree clauses, par_bind, par_body, new_body, bind;
tree olist, ilist, par_olist, par_ilist;
@@ -4812,11 +6075,11 @@ lower_omp_parallel (tree *stmt_p, omp_context *ctx)
stmt = *stmt_p;
- clauses = OMP_PARALLEL_CLAUSES (stmt);
- par_bind = OMP_PARALLEL_BODY (stmt);
+ clauses = OMP_TASKREG_CLAUSES (stmt);
+ par_bind = OMP_TASKREG_BODY (stmt);
par_body = BIND_EXPR_BODY (par_bind);
child_fn = ctx->cb.dst_fn;
- if (!OMP_PARALLEL_COMBINED (stmt))
+ if (TREE_CODE (stmt) == OMP_PARALLEL && !OMP_PARALLEL_COMBINED (stmt))
{
struct walk_stmt_info wi;
int ws_num = 0;
@@ -4829,6 +6092,8 @@ lower_omp_parallel (tree *stmt_p, omp_context *ctx)
if (ws_num == 1)
OMP_PARALLEL_COMBINED (stmt) = 1;
}
+ if (ctx->srecord_type)
+ create_task_copyfn (stmt, ctx);
push_gimplify_context ();
@@ -4836,7 +6101,8 @@ lower_omp_parallel (tree *stmt_p, omp_context *ctx)
par_ilist = NULL_TREE;
lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx);
lower_omp (&par_body, ctx);
- lower_reduction_clauses (clauses, &par_olist, ctx);
+ if (TREE_CODE (stmt) == OMP_PARALLEL)
+ lower_reduction_clauses (clauses, &par_olist, ctx);
/* Declare all the variables created by mapping and the variables
declared in the scope of the parallel body. */
@@ -4845,8 +6111,10 @@ lower_omp_parallel (tree *stmt_p, omp_context *ctx)
if (ctx->record_type)
{
- ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_data_o");
- OMP_PARALLEL_DATA_ARG (stmt) = ctx->sender_decl;
+ ctx->sender_decl
+ = create_tmp_var (ctx->srecord_type ? ctx->srecord_type
+ : ctx->record_type, ".omp_data_o");
+ OMP_TASKREG_DATA_ARG (stmt) = ctx->sender_decl;
}
olist = NULL_TREE;
@@ -4855,7 +6123,7 @@ lower_omp_parallel (tree *stmt_p, omp_context *ctx)
lower_send_shared_vars (&ilist, &olist, ctx);
/* Once all the expansions are done, sequence all the different
- fragments inside OMP_PARALLEL_BODY. */
+ fragments inside OMP_TASKREG_BODY. */
bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
append_to_statement_list (ilist, &BIND_EXPR_BODY (bind));
@@ -4876,7 +6144,7 @@ lower_omp_parallel (tree *stmt_p, omp_context *ctx)
maybe_catch_exception (&new_body);
t = make_node (OMP_RETURN);
append_to_statement_list (t, &new_body);
- OMP_PARALLEL_BODY (stmt) = new_body;
+ OMP_TASKREG_BODY (stmt) = new_body;
append_to_statement_list (stmt, &BIND_EXPR_BODY (bind));
append_to_statement_list (olist, &BIND_EXPR_BODY (bind));
@@ -4890,17 +6158,21 @@ lower_omp_parallel (tree *stmt_p, omp_context *ctx)
regimplified. */
static tree
-lower_omp_2 (tree *tp, int *walk_subtrees, void *data ATTRIBUTE_UNUSED)
+lower_omp_2 (tree *tp, int *walk_subtrees, void *data)
{
tree t = *tp;
+ omp_context *ctx = data;
/* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
- if (TREE_CODE (t) == VAR_DECL && DECL_HAS_VALUE_EXPR_P (t))
+ if (TREE_CODE (t) == VAR_DECL
+ && ((ctx && DECL_HAS_VALUE_EXPR_P (t))
+ || (task_shared_vars
+ && bitmap_bit_p (task_shared_vars, DECL_UID (t)))))
return t;
/* If a global variable has been privatized, TREE_CONSTANT on
ADDR_EXPR might be wrong. */
- if (TREE_CODE (t) == ADDR_EXPR)
+ if (ctx && TREE_CODE (t) == ADDR_EXPR)
recompute_tree_invariant_for_addr_expr (t);
*walk_subtrees = !TYPE_P (t) && !DECL_P (t);
@@ -4940,7 +6212,7 @@ lower_omp_1 (tree *tp, omp_context *ctx, tree_stmt_iterator *tsi)
case COND_EXPR:
lower_omp_1 (&COND_EXPR_THEN (t), ctx, NULL);
lower_omp_1 (&COND_EXPR_ELSE (t), ctx, NULL);
- if (ctx
+ if ((ctx || task_shared_vars)
&& walk_tree (&COND_EXPR_COND (t), lower_omp_2, ctx, NULL))
{
tree pre = NULL;
@@ -4977,8 +6249,9 @@ lower_omp_1 (tree *tp, omp_context *ctx, tree_stmt_iterator *tsi)
break;
case OMP_PARALLEL:
+ case OMP_TASK:
ctx = maybe_lookup_ctx (t);
- lower_omp_parallel (tp, ctx);
+ lower_omp_taskreg (tp, ctx);
break;
case OMP_FOR:
ctx = maybe_lookup_ctx (t);
@@ -5012,7 +6285,8 @@ lower_omp_1 (tree *tp, omp_context *ctx, tree_stmt_iterator *tsi)
break;
default:
- if (ctx && walk_tree (tp, lower_omp_2, ctx, NULL))
+ if ((ctx || task_shared_vars)
+ && walk_tree (tp, lower_omp_2, ctx, NULL))
{
/* The gimplifier doesn't gimplify CALL_EXPR_STATIC_CHAIN.
Handle that here. */
@@ -5068,16 +6342,23 @@ execute_lower_omp (void)
delete_omp_context);
scan_omp (&DECL_SAVED_TREE (current_function_decl), NULL);
- gcc_assert (parallel_nesting_level == 0);
+ gcc_assert (taskreg_nesting_level == 0);
if (all_contexts->root)
- lower_omp (&DECL_SAVED_TREE (current_function_decl), NULL);
+ {
+ if (task_shared_vars)
+ push_gimplify_context ();
+ lower_omp (&DECL_SAVED_TREE (current_function_decl), NULL);
+ if (task_shared_vars)
+ pop_gimplify_context (NULL);
+ }
if (all_contexts)
{
splay_tree_delete (all_contexts);
all_contexts = NULL;
}
+ BITMAP_FREE (task_shared_vars);
return 0;
}
@@ -5124,7 +6405,7 @@ diagnose_sb_0 (tree *stmt_p, tree branch_ctx, tree label_ctx)
return false;
/* Try to avoid confusing the user by producing and error message
- with correct "exit" or "enter" verbage. We prefer "exit"
+ with correct "exit" or "enter" verbiage. We prefer "exit"
unless we can show that LABEL_CTX is nested within BRANCH_CTX. */
if (branch_ctx == NULL)
exit_p = false;
@@ -5160,11 +6441,13 @@ diagnose_sb_1 (tree *tp, int *walk_subtrees, void *data)
tree context = (tree) wi->info;
tree inner_context;
tree t = *tp;
+ int i;
*walk_subtrees = 0;
switch (TREE_CODE (t))
{
case OMP_PARALLEL:
+ case OMP_TASK:
case OMP_SECTIONS:
case OMP_SINGLE:
walk_tree (&OMP_CLAUSES (t), diagnose_sb_1, wi, NULL);
@@ -5184,9 +6467,15 @@ diagnose_sb_1 (tree *tp, int *walk_subtrees, void *data)
walk_tree (&OMP_FOR_CLAUSES (t), diagnose_sb_1, wi, NULL);
inner_context = tree_cons (NULL, t, context);
wi->info = inner_context;
- walk_tree (&OMP_FOR_INIT (t), diagnose_sb_1, wi, NULL);
- walk_tree (&OMP_FOR_COND (t), diagnose_sb_1, wi, NULL);
- walk_tree (&OMP_FOR_INCR (t), diagnose_sb_1, wi, NULL);
+ for (i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INIT (t)); i++)
+ {
+ walk_tree (&TREE_VEC_ELT (OMP_FOR_INIT (t), i), diagnose_sb_1,
+ wi, NULL);
+ walk_tree (&TREE_VEC_ELT (OMP_FOR_COND (t), i), diagnose_sb_1,
+ wi, NULL);
+ walk_tree (&TREE_VEC_ELT (OMP_FOR_INCR (t), i), diagnose_sb_1,
+ wi, NULL);
+ }
walk_stmts (wi, &OMP_FOR_PRE_BODY (t));
walk_stmts (wi, &OMP_FOR_BODY (t));
wi->info = context;
@@ -5214,11 +6503,13 @@ diagnose_sb_2 (tree *tp, int *walk_subtrees, void *data)
tree context = (tree) wi->info;
splay_tree_node n;
tree t = *tp;
+ int i;
*walk_subtrees = 0;
switch (TREE_CODE (t))
{
case OMP_PARALLEL:
+ case OMP_TASK:
case OMP_SECTIONS:
case OMP_SINGLE:
walk_tree (&OMP_CLAUSES (t), diagnose_sb_2, wi, NULL);
@@ -5235,9 +6526,15 @@ diagnose_sb_2 (tree *tp, int *walk_subtrees, void *data)
case OMP_FOR:
walk_tree (&OMP_FOR_CLAUSES (t), diagnose_sb_2, wi, NULL);
wi->info = t;
- walk_tree (&OMP_FOR_INIT (t), diagnose_sb_2, wi, NULL);
- walk_tree (&OMP_FOR_COND (t), diagnose_sb_2, wi, NULL);
- walk_tree (&OMP_FOR_INCR (t), diagnose_sb_2, wi, NULL);
+ for (i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INIT (t)); i++)
+ {
+ walk_tree (&TREE_VEC_ELT (OMP_FOR_INIT (t), i), diagnose_sb_2,
+ wi, NULL);
+ walk_tree (&TREE_VEC_ELT (OMP_FOR_COND (t), i), diagnose_sb_2,
+ wi, NULL);
+ walk_tree (&TREE_VEC_ELT (OMP_FOR_INCR (t), i), diagnose_sb_2,
+ wi, NULL);
+ }
walk_stmts (wi, &OMP_FOR_PRE_BODY (t));
walk_stmts (wi, &OMP_FOR_BODY (t));
wi->info = context;
diff --git a/gcc/optabs.c b/gcc/optabs.c
index bd054edce18..558a708c5f9 100644
--- a/gcc/optabs.c
+++ b/gcc/optabs.c
@@ -5785,7 +5785,7 @@ gen_fp_to_int_conv_libfunc (convert_optab tab,
gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
}
-/* Initialize the libfunc fiels of an of an intra-mode-class conversion optab.
+/* Initialize the libfunc fields of an of an intra-mode-class conversion optab.
The string formation rules are
similar to the ones for init_libfunc, above. */
diff --git a/gcc/optabs.h b/gcc/optabs.h
index 426b0d845fd..30a28d4816d 100644
--- a/gcc/optabs.h
+++ b/gcc/optabs.h
@@ -1,5 +1,5 @@
/* Definitions for code generation pass of GNU compiler.
- Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007
+ Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
Free Software Foundation, Inc.
This file is part of GCC.
@@ -115,7 +115,7 @@ enum optab_index
wider than the multiplicand and multiplier.
All involved operations are saturating. */
OTI_ssmadd_widen,
- /* Unigned multiply and add with the result and addend one machine mode
+ /* Unsigned multiply and add with the result and addend one machine mode
wider than the multiplicand and multiplier.
All involved operations are saturating. */
OTI_usmadd_widen,
@@ -129,7 +129,7 @@ enum optab_index
wider than the multiplicand and multiplier.
All involved operations are saturating. */
OTI_ssmsub_widen,
- /* Unigned multiply and subtract the result and minuend one machine mode
+ /* Unsigned multiply and subtract the result and minuend one machine mode
wider than the multiplicand and multiplier.
All involved operations are saturating. */
OTI_usmsub_widen,
diff --git a/gcc/opts.c b/gcc/opts.c
index 7add8d3d250..4a6d440bc1b 100644
--- a/gcc/opts.c
+++ b/gcc/opts.c
@@ -1,5 +1,5 @@
/* Command line option handling.
- Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007
+ Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008
Free Software Foundation, Inc.
Contributed by Neil Booth.
@@ -889,6 +889,8 @@ decode_options (unsigned int argc, const char **argv)
if (!optimize_size)
{
+ /* Conditional DCE generates bigger code. */
+ flag_tree_builtin_call_dce = 1;
/* PRE tends to generate bigger code. */
flag_tree_pre = 1;
}
@@ -948,7 +950,7 @@ decode_options (unsigned int argc, const char **argv)
modify it. */
target_flags = targetm.default_target_flags;
- /* Some tagets have ABI-specified unwind tables. */
+ /* Some targets have ABI-specified unwind tables. */
flag_unwind_tables = targetm.unwind_tables_default;
#ifdef OPTIMIZATION_OPTIONS
@@ -1403,7 +1405,7 @@ common_handle_option (size_t scode, const char *arg, int value,
unsigned int include_flags = 0;
/* Note - by default we include undocumented options when listing
specific classes. If you only want to see documented options
- then add ",^undocumented" to the --help= option. e.g.:
+ then add ",^undocumented" to the --help= option. E.g.:
--help=target,^undocumented */
unsigned int exclude_flags = 0;
@@ -1463,7 +1465,7 @@ common_handle_option (size_t scode, const char *arg, int value,
/* Check to see if the string matches a language name.
Note - we rely upon the alpha-sorted nature of the entries in
the lang_names array, specifically that shorter names appear
- before their longer variants. (ie C before C++). That way
+ before their longer variants. (i.e. C before C++). That way
when we are attempting to match --help=c for example we will
match with C first and not C++. */
for (i = 0, lang_flag = 0; i < cl_lang_count; i++)
diff --git a/gcc/passes.c b/gcc/passes.c
index 0578d13e71e..e26bfb8dd4c 100644
--- a/gcc/passes.c
+++ b/gcc/passes.c
@@ -1,6 +1,6 @@
/* Top level of GCC compilers (cc1, cc1plus, etc.)
Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
- 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
+ 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
Free Software Foundation, Inc.
This file is part of GCC.
@@ -564,6 +564,14 @@ init_optimization_passes (void)
NEXT_PASS (pass_copy_prop);
NEXT_PASS (pass_merge_phi);
NEXT_PASS (pass_dce);
+ /* Ideally the function call conditional
+ dead code elimination phase can be delayed
+ till later where potentially more opportunities
+ can be found. Due to lack of good ways to
+ update VDEFs associated with the shrink-wrapped
+ calls, it is better to do the transformation
+ here where memory SSA is not built yet. */
+ NEXT_PASS (pass_call_cdce);
NEXT_PASS (pass_update_address_taken);
NEXT_PASS (pass_simple_dse);
NEXT_PASS (pass_tail_recursion);
@@ -1243,7 +1251,7 @@ execute_one_pass (struct opt_pass *pass)
#endif
/* IPA passes are executed on whole program, so cfun should be NULL.
- Ohter passes needs function context set. */
+ Other passes need function context set. */
if (pass->type == SIMPLE_IPA_PASS || pass->type == IPA_PASS)
gcc_assert (!cfun && !current_function_decl);
else
@@ -1390,8 +1398,6 @@ execute_ipa_pass_list (struct opt_pass *pass)
}
summaries_generated = true;
}
- else
- summaries_generated = false;
if (execute_one_pass (pass) && pass->sub)
{
if (pass->sub->type == GIMPLE_PASS)
diff --git a/gcc/postreload-gcse.c b/gcc/postreload-gcse.c
index 4dc312d91cf..27889d01f2e 100644
--- a/gcc/postreload-gcse.c
+++ b/gcc/postreload-gcse.c
@@ -1,5 +1,5 @@
/* Post reload partially redundant load elimination
- Copyright (C) 2004, 2005, 2006, 2007
+ Copyright (C) 2004, 2005, 2006, 2007, 2008
Free Software Foundation, Inc.
This file is part of GCC.
@@ -1269,7 +1269,7 @@ gcse_after_reload_main (rtx f ATTRIBUTE_UNUSED)
memset (&stats, 0, sizeof (stats));
- /* Allocate ememory for this pass.
+ /* Allocate memory for this pass.
Also computes and initializes the insns' CUIDs. */
alloc_mem ();
diff --git a/gcc/postreload.c b/gcc/postreload.c
index 15a14f001c6..527bd8b0bef 100644
--- a/gcc/postreload.c
+++ b/gcc/postreload.c
@@ -1,6 +1,6 @@
/* Perform simple optimizations to clean up the result of reload.
Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997,
- 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
+ 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
Free Software Foundation, Inc.
This file is part of GCC.
@@ -661,7 +661,7 @@ reload_cse_simplify_operands (rtx insn, rtx testreg)
replace them with reg+reg addressing. */
#define RELOAD_COMBINE_MAX_USES 6
-/* INSN is the insn where a register has ben used, and USEP points to the
+/* INSN is the insn where a register has been used, and USEP points to the
location of the register within the rtl. */
struct reg_use { rtx insn, *usep; };
diff --git a/gcc/predict.c b/gcc/predict.c
index 42852dcfcac..2d6faf35679 100644
--- a/gcc/predict.c
+++ b/gcc/predict.c
@@ -1,5 +1,5 @@
/* Branch prediction routines for the GNU compiler.
- Copyright (C) 2000, 2001, 2002, 2003, 2004, 2005, 2007
+ Copyright (C) 2000, 2001, 2002, 2003, 2004, 2005, 2007, 2008
Free Software Foundation, Inc.
This file is part of GCC.
@@ -1493,7 +1493,7 @@ tree_estimate_probability (void)
return 0;
}
-/* Predict edges to succestors of CUR whose sources are not postdominated by
+/* Predict edges to successors of CUR whose sources are not postdominated by
BB by PRED and recurse to all postdominators. */
static void
diff --git a/gcc/pretty-print.h b/gcc/pretty-print.h
index 039058ee132..dd3f0c0ad39 100644
--- a/gcc/pretty-print.h
+++ b/gcc/pretty-print.h
@@ -1,5 +1,5 @@
/* Various declarations for language-independent pretty-print subroutines.
- Copyright (C) 2002, 2003, 2004, 2007 Free Software Foundation, Inc.
+ Copyright (C) 2002, 2003, 2004, 2007, 2008 Free Software Foundation, Inc.
Contributed by Gabriel Dos Reis <gdr@integrable-solutions.net>
This file is part of GCC.
@@ -142,7 +142,7 @@ typedef bool (*printer_fn) (pretty_printer *, text_info *, const char *,
formatting. */
#define pp_needs_newline(PP) pp_base (PP)->need_newline
-/* True if PRETTY-PTINTER is in line-wrapping mode. */
+/* True if PRETTY-PRINTER is in line-wrapping mode. */
#define pp_is_wrapping_line(PP) (pp_line_cutoff (PP) > 0)
/* The amount of whitespace to be emitted when starting a new line. */
diff --git a/gcc/profile.c b/gcc/profile.c
index a4f46b3ad4d..78568228e1f 100644
--- a/gcc/profile.c
+++ b/gcc/profile.c
@@ -441,7 +441,7 @@ compute_branch_probabilities (void)
FOR_EACH_EDGE (e, ei, bb->succs)
total += e->count;
- /* Seedgeh for the invalid edge, and set its count. */
+ /* Search for the invalid edge, and set its count. */
FOR_EACH_EDGE (e, ei, bb->succs)
if (! EDGE_INFO (e)->count_valid && ! EDGE_INFO (e)->ignore)
break;
diff --git a/gcc/protoize.c b/gcc/protoize.c
index 0eb6957a969..aab1291dbb2 100644
--- a/gcc/protoize.c
+++ b/gcc/protoize.c
@@ -1,6 +1,7 @@
/* Protoize program - Original version by Ron Guilmette (rfg@segfault.us.com).
Copyright (C) 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
- 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2007 Free Software Foundation, Inc.
+ 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2007, 2008 Free Software
+ Foundation, Inc.
This file is part of GCC.
@@ -1723,7 +1724,7 @@ save_def_or_dec (const char *l, int is_syscalls)
}
/* Since we are unprotoizing, if this item is already in old (K&R) style,
- we can just ignore it. If that is true, throw away the itme now. */
+ we can just ignore it. If that is true, throw away the item now. */
if (!def_dec_p->prototyped)
{
diff --git a/gcc/ra-conflict.c b/gcc/ra-conflict.c
index f97d9a89ee2..c693bc4d8b3 100644
--- a/gcc/ra-conflict.c
+++ b/gcc/ra-conflict.c
@@ -1,5 +1,5 @@
/* Allocate registers for pseudo-registers that span basic blocks.
- Copyright (C) 2007 Free Software Foundation, Inc.
+ Copyright (C) 2007, 2008 Free Software Foundation, Inc.
Contributed by Kenneth Zadeck <zadeck@naturalbridge.com>
This file is part of GCC.
@@ -405,7 +405,7 @@ set_conflicts_for_earlyclobber (rtx insn)
/* Init LIVE_SUBREGS[ALLOCNUM] and LIVE_SUBREGS_USED[ALLOCNUM] using
- REG to the the number of nregs, and INIT_VALUE to get the
+ REG to the number of nregs, and INIT_VALUE to get the
initialization. ALLOCNUM need not be the regno of REG. */
void
diff --git a/gcc/real.c b/gcc/real.c
index c4695cced91..5da2cb3b2ce 100644
--- a/gcc/real.c
+++ b/gcc/real.c
@@ -3526,7 +3526,7 @@ encode_ibm_extended (const struct real_format *fmt, long *buf,
base_fmt = fmt->qnan_msb_set ? &ieee_double_format : &mips_double_format;
- /* Renormlize R before doing any arithmetic on it. */
+ /* Renormalize R before doing any arithmetic on it. */
normr = *r;
if (normr.cl == rvc_normal)
normalize (&normr);
diff --git a/gcc/recog.c b/gcc/recog.c
index ee5837dc1ec..bf4c2b4a64f 100644
--- a/gcc/recog.c
+++ b/gcc/recog.c
@@ -451,7 +451,7 @@ confirm_change_group (void)
if (changes[i].unshare)
*changes[i].loc = copy_rtx (*changes[i].loc);
- /* Avoid unnecesary rescanning when multiple changes to same instruction
+ /* Avoid unnecessary rescanning when multiple changes to same instruction
are made. */
if (object)
{
diff --git a/gcc/regclass.c b/gcc/regclass.c
index 8b9e86b1411..f72a80258b3 100644
--- a/gcc/regclass.c
+++ b/gcc/regclass.c
@@ -1961,7 +1961,7 @@ record_reg_classes (int n_alts, int n_ops, rtx *ops,
we may want to adjust the cost of that register class to -1.
Avoid the adjustment if the source does not die to avoid stressing of
- register allocator by preferrencing two colliding registers into single
+ register allocator by preferencing two colliding registers into single
class.
Also avoid the adjustment if a copy between registers of the class
diff --git a/gcc/regs.h b/gcc/regs.h
index f0679f753e9..20587ac27b2 100644
--- a/gcc/regs.h
+++ b/gcc/regs.h
@@ -1,6 +1,7 @@
/* Define per-register tables for data flow info and register allocation.
Copyright (C) 1987, 1993, 1994, 1995, 1996, 1997, 1998,
- 1999, 2000, 2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
+ 1999, 2000, 2003, 2004, 2005, 2006, 2007, 2008 Free Software
+ Foundation, Inc.
This file is part of GCC.
@@ -129,7 +130,7 @@ extern size_t reg_info_p_size;
#define REG_FREQ(N) (reg_info_p[N].freq)
-/* The weights for each insn varries from 0 to REG_FREQ_BASE.
+/* The weights for each insn varies from 0 to REG_FREQ_BASE.
This constant does not need to be high, as in infrequently executed
regions we want to count instructions equivalently to optimize for
size instead of speed. */
diff --git a/gcc/reload.c b/gcc/reload.c
index ad0a04f6633..9ab01375a91 100644
--- a/gcc/reload.c
+++ b/gcc/reload.c
@@ -1,6 +1,6 @@
/* Search an insn for pseudo regs that must be in hard regs and are not.
Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
- 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
+ 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
Free Software Foundation, Inc.
This file is part of GCC.
@@ -988,7 +988,7 @@ push_reload (rtx in, rtx out, rtx *inloc, rtx *outloc,
we can't handle it here because CONST_INT does not indicate a mode.
Similarly, we must reload the inside expression if we have a
- STRICT_LOW_PART (presumably, in == out in the cas).
+ STRICT_LOW_PART (presumably, in == out in this case).
Also reload the inner expression if it does not require a secondary
reload but the SUBREG does.
diff --git a/gcc/rtl-error.c b/gcc/rtl-error.c
index 6f38631b71e..f5558005c9a 100644
--- a/gcc/rtl-error.c
+++ b/gcc/rtl-error.c
@@ -64,7 +64,7 @@ location_for_asm (const_rtx insn)
return loc;
}
-/* Report a diagnostic MESSAGE (an errror or a WARNING) at the line number
+/* Report a diagnostic MESSAGE (an error or a WARNING) at the line number
of the insn INSN. This is used only when INSN is an `asm' with operands,
and each ASM_OPERANDS records its own source file and line. */
static void
diff --git a/gcc/rtlanal.c b/gcc/rtlanal.c
index f0dc512fcdf..cf58c348489 100644
--- a/gcc/rtlanal.c
+++ b/gcc/rtlanal.c
@@ -841,7 +841,7 @@ reg_set_p (const_rtx reg, const_rtx insn)
/* Similar to reg_set_between_p, but check all registers in X. Return 0
only if none of them are modified between START and END. Return 1 if
- X contains a MEM; this routine does usememory aliasing. */
+ X contains a MEM; this routine does use memory aliasing. */
int
modified_between_p (const_rtx x, const_rtx start, const_rtx end)
@@ -1124,7 +1124,7 @@ noop_move_p (const_rtx insn)
return 0;
/* For now treat an insn with a REG_RETVAL note as a
- a special insn which should not be considered a no-op. */
+ special insn which should not be considered a no-op. */
if (find_reg_note (insn, REG_RETVAL, NULL_RTX))
return 0;
diff --git a/gcc/scan.h b/gcc/scan.h
index ff4b300b4a9..0a1b1af9647 100644
--- a/gcc/scan.h
+++ b/gcc/scan.h
@@ -1,5 +1,6 @@
/* scan.h - Utility declarations for scan-decls and fix-header programs.
- Copyright (C) 1993, 1998, 1999, 2003, 2004, 2007 Free Software Foundation, Inc.
+ Copyright (C) 1993, 1998, 1999, 2003, 2004, 2007, 2008 Free Software
+ Foundation, Inc.
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
@@ -65,7 +66,7 @@ extern int scan_decls (struct cpp_reader *, int, char **);
#define INT_TOKEN 303
extern int get_token (FILE *, sstring *);
-/* Current file and line numer, taking #-directives into account */
+/* Current file and line number, taking #-directives into account */
extern int source_lineno;
extern sstring source_filename;
/* Current physical line number */
diff --git a/gcc/sched-rgn.c b/gcc/sched-rgn.c
index e62046b8cd8..ca090dea4da 100644
--- a/gcc/sched-rgn.c
+++ b/gcc/sched-rgn.c
@@ -1,6 +1,6 @@
/* Instruction scheduling pass.
Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
- 2001, 2002, 2003, 2004, 2005, 2006, 2007
+ 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
Free Software Foundation, Inc.
Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
and currently maintained by, Jim Wilson (wilson@cygnus.com)
@@ -1566,7 +1566,7 @@ check_live_1 (int src, rtx x)
int t = bitmap_bit_p (&not_in_df, b->index);
/* We can have split blocks, that were recently generated.
- such blocks are always outside current region. */
+ Such blocks are always outside current region. */
gcc_assert (!t || (CONTAINING_RGN (b->index)
!= CONTAINING_RGN (BB_TO_BLOCK (src))));
@@ -1753,11 +1753,11 @@ find_conditional_protection (rtx insn, int load_insn_bb)
/* Returns 1 if the same insn1 that participates in the computation
of load_insn's address is feeding a conditional branch that is
- guarding on load_insn. This is true if we find a the two DEF-USE
+ guarding on load_insn. This is true if we find two DEF-USE
chains:
insn1 -> ... -> conditional-branch
insn1 -> ... -> load_insn,
- and if a flow path exist:
+ and if a flow path exists:
insn1 -> ... -> conditional-branch -> ... -> load_insn,
and if insn1 is on the path
region-entry -> ... -> bb_trg -> ... load_insn.
diff --git a/gcc/see.c b/gcc/see.c
index 5084b976559..6ea9f3ed25a 100644
--- a/gcc/see.c
+++ b/gcc/see.c
@@ -1,5 +1,5 @@
/* Sign extension elimination optimization for GNU compiler.
- Copyright (C) 2005, 2006, 2007 Free Software Foundation, Inc.
+ Copyright (C) 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
Contributed by Leehod Baruch <leehod@il.ibm.com>
This file is part of GCC.
@@ -1253,7 +1253,7 @@ see_update_leader_extra_info (struct web_entry *first, struct web_entry *second)
}
break;
default:
- /* Unknown patern type. */
+ /* Unknown pattern type. */
gcc_unreachable ();
}
@@ -1945,7 +1945,7 @@ see_analyze_unmerged_def_local_prop (void **slot, void *b)
}
-/* Analyze the properties of a use extension for the LCM and record anic and
+/* Analyze the properties of a use extension for the LCM and record any and
avail occurrences.
This is a subroutine of see_analyze_ref_local_prop called
diff --git a/gcc/stmt.c b/gcc/stmt.c
index 00dd59ce463..2908f082ea3 100644
--- a/gcc/stmt.c
+++ b/gcc/stmt.c
@@ -1,6 +1,6 @@
/* Expands front end tree to back end RTL for GCC
Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997,
- 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
+ 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
Free Software Foundation, Inc.
This file is part of GCC.
@@ -622,7 +622,7 @@ tree_conflicts_with_clobbers_p (tree t, HARD_REG_SET *clobbered_regs)
STRING is the instruction template.
OUTPUTS is a list of output arguments (lvalues); INPUTS a list of inputs.
Each output or input has an expression in the TREE_VALUE and
- and a tree list in TREE_PURPOSE which in turn contains a constraint
+ a tree list in TREE_PURPOSE which in turn contains a constraint
name in TREE_VALUE (or NULL_TREE) and a constraint string
in TREE_PURPOSE.
CLOBBERS is a list of STRING_CST nodes each naming a hard register
diff --git a/gcc/target.h b/gcc/target.h
index fa85e7cacd6..9b87dc5c309 100644
--- a/gcc/target.h
+++ b/gcc/target.h
@@ -1,5 +1,5 @@
/* Data structure definitions for a generic GCC target.
- Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007
+ Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
Free Software Foundation, Inc.
This program is free software; you can redistribute it and/or modify it
@@ -351,7 +351,7 @@ struct gcc_target
second argument is the cost of the dependence as estimated by
the scheduler. The last argument is the distance in cycles
between the already scheduled insn (first parameter) and the
- the second insn (second parameter). */
+ second insn (second parameter). */
bool (* is_costly_dependence) (struct _dep *_dep, int, int);
/* The following member value is a pointer to a function called
diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog
index a2f746c8198..a1ce73c2761 100644
--- a/gcc/testsuite/ChangeLog
+++ b/gcc/testsuite/ChangeLog
@@ -1,3 +1,56 @@
+2008-06-06 Jakub Jelinek <jakub@redhat.com>
+
+ PR target/36362
+ * gcc.c-torture/execute/20080529-1.c: New test.
+
+2008-06-06 Jakub Jelinek <jakub@redhat.com>
+
+ * gcc.dg/gomp/collapse-1.c: New test.
+ * gcc.dg/gomp/nesting-1.c: New test.
+ * g++.dg/gomp/task-1.C: New test.
+ * g++.dg/gomp/predetermined-1.C: New test.
+ * g++.dg/gomp/tls-4.C: New test.
+ * gfortran.dg/gomp/collapse1.f90: New test.
+ * gfortran.dg/gomp/sharing-3.f90: New test.
+ * gcc.dg/gomp/pr27499.c (foo): Remove is unsigned dg-warning.
+ * g++.dg/gomp/pr27499.C (foo): Likewise.
+ * g++.dg/gomp/for-16.C (foo): Likewise.
+ * g++.dg/gomp/tls-3.C: Remove dg-error, add S::s definition.
+ * g++.dg/gomp/pr34607.C: Adjust dg-error location.
+ * g++.dg/gomp/for-16.C (foo): Add a new dg-error.
+ * gcc.dg/gomp/appendix-a/a.35.4.c: Add dg-warning.
+ * gcc.dg/gomp/appendix-a/a.35.6.c: Likewise.
+ * gfortran.dg/gomp/appendix-a/a.35.4.f90: Likewise.
+ * gfortran.dg/gomp/appendix-a/a.35.6.f90: Likewise.
+ * gfortran.dg/gomp/omp_parse1.f90: Remove !$omp tab test.
+ * gfortran.dg/gomp/appendix-a/a.33.4.f90: Remove dg-error
+ about allocatable array.
+ * gfortran.dg/gomp/reduction1.f90: Likewise.
+
+2008-06-06 Richard Guenther <rguenther@suse.de>
+
+ * gcc.dg/tree-ssa/alias-18.c: XFAIL some sub-tests.
+
+2008-06-04 Joseph Myers <joseph@codesourcery.com>
+
+ * lib/target-supports.exp (check_effective_target_powerpc_spu):
+ Call check_effective_target_powerpc_altivec_ok.
+ * gcc.target/powerpc/dfp-dd.c, gcc.target/powerpc/dfp-td.c,
+ gcc.target/powerpc/ppc32-abi-dfp-1.c,
+ gcc.target/powerpc/ppu-intrinsics.c: Require powerpc_fprs.
+
+2008-06-04 Xinliang David Li <davidxl@google.com>
+
+ * gcc.dg/cdce1.c: New test.
+ * gcc.dg/cdce2.c: Ditto.
+ * g++.dg/cdce3.C: Ditto.
+
+2008-06-04 Janus Weil <janus@gcc.gnu.org>
+
+ PR fortran/36322
+ PR fortran/36275
+ * gfortran.dg/proc_decl_2.f90: Extended.
+
2008-06-04 Joseph Myers <joseph@codesourcery.com>
Maxim Kuvyrkov <maxim@codesourcery.com>
diff --git a/gcc/testsuite/g++.dg/cdce3.C b/gcc/testsuite/g++.dg/cdce3.C
new file mode 100644
index 00000000000..e1e8509f2ac
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cdce3.C
@@ -0,0 +1,200 @@
+/* { dg-do run { target { ! "*-*-darwin" } } } */
+/* { dg-options "-O2 -fmath-errno -fdump-tree-cdce-details -lm" } */
+/* { dg-final { scan-tree-dump "cdce3.C:68: note: function call is shrink-wrapped into error conditions\." "cdce" } }*/
+/* { dg-final { scan-tree-dump "cdce3.C:69: note: function call is shrink-wrapped into error conditions\." "cdce" } }*/
+/* { dg-final { scan-tree-dump "cdce3.C:70: note: function call is shrink-wrapped into error conditions\." "cdce" } }*/
+/* { dg-final { scan-tree-dump "cdce3.C:71: note: function call is shrink-wrapped into error conditions\." "cdce" } }*/
+/* { dg-final { scan-tree-dump "cdce3.C:72: note: function call is shrink-wrapped into error conditions\." "cdce" } }*/
+/* { dg-final { scan-tree-dump "cdce3.C:73: note: function call is shrink-wrapped into error conditions\." "cdce" } }*/
+/* { dg-final { scan-tree-dump "cdce3.C:74: note: function call is shrink-wrapped into error conditions\." "cdce" } }*/
+/* { dg-final { scan-tree-dump "cdce3.C:75: note: function call is shrink-wrapped into error conditions\." "cdce" } }*/
+/* { dg-final { scan-tree-dump "cdce3.C:76: note: function call is shrink-wrapped into error conditions\." "cdce" } }*/
+/* { dg-final { scan-tree-dump "cdce3.C:77: note: function call is shrink-wrapped into error conditions\." "cdce" } }*/
+/* { dg-final { scan-tree-dump "cdce3.C:78: note: function call is shrink-wrapped into error conditions\." "cdce" } }*/
+/* { dg-final { scan-tree-dump "cdce3.C:79: note: function call is shrink-wrapped into error conditions\." "cdce" } }*/
+/* { dg-final { scan-tree-dump "cdce3.C:80: note: function call is shrink-wrapped into error conditions\." "cdce" } }*/
+/* { dg-final { scan-tree-dump "cdce3.C:81: note: function call is shrink-wrapped into error conditions\." "cdce" } }*/
+/* { dg-final { scan-tree-dump "cdce3.C:82: note: function call is shrink-wrapped into error conditions\." "cdce" } }*/
+/* { dg-final { scan-tree-dump "cdce3.C:83: note: function call is shrink-wrapped into error conditions\." "cdce" } }*/
+/* { dg-final { cleanup-tree-dump "cdce" } } */
+#include <stdlib.h>
+#include <math.h>
+#ifdef DEBUG
+#include <stdio.h>
+#endif
+#include <errno.h>
+typedef long double ldouble;
+typedef void (*FP) (int xp);
+#define NI __attribute__((noinline))
+ldouble result;
+
+#define DEF_MATH_FUNC(prefix, name) NI void prefix##name##f (int x) \
+{ \
+ float yy = name##f ((float) x); \
+ STORE_RESULT; \
+} \
+NI void prefix##name (int x) \
+{ \
+ double yy = name ((double)x); \
+ STORE_RESULT; \
+} \
+NI void prefix##name##l (int x) \
+{ \
+ ldouble yy = name##l ((ldouble)x); \
+ STORE_RESULT; \
+}
+
+#undef STORE_RESULT
+#define STORE_RESULT result = yy
+DEF_MATH_FUNC (m,pow10)
+DEF_MATH_FUNC (m,exp10)
+DEF_MATH_FUNC (m,exp2)
+DEF_MATH_FUNC (m,exp)
+DEF_MATH_FUNC (m,expm1)
+DEF_MATH_FUNC (m,cosh)
+DEF_MATH_FUNC (m,sinh)
+DEF_MATH_FUNC (m,acos)
+DEF_MATH_FUNC (m,asin)
+DEF_MATH_FUNC (m,acosh)
+DEF_MATH_FUNC (m,atanh)
+DEF_MATH_FUNC (m,log)
+DEF_MATH_FUNC (m,log2)
+DEF_MATH_FUNC (m,log10)
+DEF_MATH_FUNC (m,log1p)
+DEF_MATH_FUNC (m,sqrt)
+
+#undef STORE_RESULT
+#define STORE_RESULT
+DEF_MATH_FUNC (o,pow10)
+DEF_MATH_FUNC (o,exp10)
+DEF_MATH_FUNC (o,exp2)
+DEF_MATH_FUNC (o,exp)
+DEF_MATH_FUNC (o,expm1)
+DEF_MATH_FUNC (o,cosh)
+DEF_MATH_FUNC (o,sinh)
+DEF_MATH_FUNC (o,acos)
+DEF_MATH_FUNC (o,asin)
+DEF_MATH_FUNC (o,acosh)
+DEF_MATH_FUNC (o,atanh)
+DEF_MATH_FUNC (o,log)
+DEF_MATH_FUNC (o,log2)
+DEF_MATH_FUNC (o,log10)
+DEF_MATH_FUNC (o,log1p)
+DEF_MATH_FUNC (o,sqrt)
+
+#define INIT_MATH_FUNC(prefix, name, lb, ub) { prefix##name##f, #name "f", 0, 0, lb, ub }, \
+{ prefix##name, #name, 0, 0, lb, ub }, \
+{ prefix##name##l, #name "l" , 0, 0, lb, ub },
+
+struct MathFuncInfo
+{
+ FP math_func;
+ const char* name;
+ int lb;
+ int ub;
+ bool has_lb;
+ bool has_ub;
+} math_func_arr[] = {
+ INIT_MATH_FUNC (m,pow10, false, true)
+ INIT_MATH_FUNC (m,exp10, false, true)
+ INIT_MATH_FUNC (m,exp2, false, true)
+ INIT_MATH_FUNC (m,expm1, false, true)
+ INIT_MATH_FUNC (m,exp, false, true)
+ INIT_MATH_FUNC (m,cosh, true, true)
+ INIT_MATH_FUNC (m,sinh, true, true)
+ INIT_MATH_FUNC (m,acos, true, true)
+ INIT_MATH_FUNC (m,asin, true, true)
+ INIT_MATH_FUNC (m,acosh, true, false)
+ INIT_MATH_FUNC (m,atanh, true, true)
+ INIT_MATH_FUNC (m,log10, true, false)
+ INIT_MATH_FUNC (m,log, true, false)
+ INIT_MATH_FUNC (m,log2, true, false)
+ INIT_MATH_FUNC (m,log1p, true, false)
+ INIT_MATH_FUNC (m,sqrt, true, false)
+ { 0, 0, 0, 0, 0, 0} };
+
+MathFuncInfo opt_math_func_arr[] =
+{ INIT_MATH_FUNC (o,pow10, false, true)
+ INIT_MATH_FUNC (o,exp10, false, true)
+ INIT_MATH_FUNC (o,exp2, false, true)
+ INIT_MATH_FUNC (o,expm1, false, true)
+ INIT_MATH_FUNC (o,exp, false, true)
+ INIT_MATH_FUNC (o,cosh, true, true)
+ INIT_MATH_FUNC (o,sinh, true, true)
+ INIT_MATH_FUNC (o,acos, true, true)
+ INIT_MATH_FUNC (o,asin, true, true)
+ INIT_MATH_FUNC (o,acosh, true, false)
+ INIT_MATH_FUNC (o,atanh, true, true)
+ INIT_MATH_FUNC (o,log10, true, false)
+ INIT_MATH_FUNC (o,log, true, false)
+ INIT_MATH_FUNC (o,log2, true, false)
+ INIT_MATH_FUNC (o,log1p, true, false)
+ INIT_MATH_FUNC (o,sqrt, true, false)
+ { 0, 0, 0, 0, 0, 0} };
+
+int test (MathFuncInfo* math_func_infos)
+{
+ int i = 0;
+ int te = 0;
+
+ for (i = 0; math_func_infos[i].math_func; i++)
+ {
+ MathFuncInfo& info = math_func_infos[i];
+ int j;
+ if (info.has_lb)
+ {
+ for (j = 0; j > -500000; j--)
+ {
+
+ errno = 0;
+ info.math_func (j);
+ if (errno != 0)
+ {
+ te++;
+ info.lb = j ;
+ break;
+ }
+ }
+ }
+ if (info.has_ub)
+ {
+ for (j = 0; j < 500000; j++)
+ {
+ errno = 0;
+ info.math_func (j);
+ if (errno != 0)
+ {
+ te++;
+ info.ub = j ;
+ break;
+ }
+ }
+ }
+ }
+ return te;
+}
+
+int main()
+{
+ int te1, te2;
+
+ te1 = test (&math_func_arr[0]);
+ te2 = test (&opt_math_func_arr[0]);
+
+ // Now examine the result
+ int i = 0;
+ int errcnt = 0;
+ for (i = 0; math_func_arr[i].math_func; i++)
+ {
+ MathFuncInfo& info = math_func_arr[i];
+ MathFuncInfo& opt_info = opt_math_func_arr[i];
+#ifdef DEBUG
+ fprintf (stderr," %s: lb = %d, ub = %d: lb_opt = %d, ub_opt = %d\n",
+ info.name, info.lb, info.ub, opt_info.lb, opt_info.ub);
+#endif
+ if (info.lb != opt_info.lb) errcnt ++;
+ if (info.ub != opt_info.ub) errcnt ++;
+ }
+ if (errcnt) abort();
+ return 0;
+}
diff --git a/gcc/testsuite/g++.dg/gomp/for-16.C b/gcc/testsuite/g++.dg/gomp/for-16.C
index 76231751f70..dbbed8fe505 100644
--- a/gcc/testsuite/g++.dg/gomp/for-16.C
+++ b/gcc/testsuite/g++.dg/gomp/for-16.C
@@ -4,7 +4,7 @@ template<typename T>
void foo ()
{
#pragma omp for
- for (unsigned int i = 0; i < 10; i++); // { dg-warning "is unsigned" }
+ for (unsigned int i = 0; i < 10; i++);
#pragma omp for
for (int j = 0; ; j++); // { dg-error "missing controlling predicate" }
#pragma omp for
@@ -12,8 +12,7 @@ void foo ()
#pragma omp for
for (int l = 0; l < 10; ); // { dg-error "missing increment expression" }
#pragma omp for
- for (int m = 0; m < 10; m *= 3); // Error here is emitted only during
- // instantiation
+ for (int m = 0; m < 10; m *= 3); // { dg-error "invalid increment expression" }
#pragma omp for
for (T n = 0; ; n++); // { dg-error "missing controlling predicate" }
#pragma omp for
diff --git a/gcc/testsuite/g++.dg/gomp/pr27499.C b/gcc/testsuite/g++.dg/gomp/pr27499.C
index 293ef8fb08e..4e0d5b1a5b8 100644
--- a/gcc/testsuite/g++.dg/gomp/pr27499.C
+++ b/gcc/testsuite/g++.dg/gomp/pr27499.C
@@ -8,6 +8,6 @@ foo (void)
{
unsigned int i;
#pragma omp for
- for (i = 0; i < 64; ++i) // { dg-warning "is unsigned" }
+ for (i = 0; i < 64; ++i)
bar (i);
}
diff --git a/gcc/testsuite/g++.dg/gomp/pr34607.C b/gcc/testsuite/g++.dg/gomp/pr34607.C
index 1dbba4a7414..f032aa45d50 100644
--- a/gcc/testsuite/g++.dg/gomp/pr34607.C
+++ b/gcc/testsuite/g++.dg/gomp/pr34607.C
@@ -13,6 +13,6 @@ foo ()
;
T j; // { dg-error "was not declared|expected" }
#pragma omp for
- for (j = 1; j < 3; j++) // { dg-error "was not declared" }
- ; // { dg-error "expected" }
+ for (j = 1; j < 3; j++) // { dg-error "was not declared|expected" }
+ ;
}
diff --git a/gcc/testsuite/g++.dg/gomp/predetermined-1.C b/gcc/testsuite/g++.dg/gomp/predetermined-1.C
new file mode 100644
index 00000000000..dd09855de97
--- /dev/null
+++ b/gcc/testsuite/g++.dg/gomp/predetermined-1.C
@@ -0,0 +1,33 @@
+// { dg-do compile }
+// { dg-options "-fopenmp" }
+
+struct A { int i; A (); ~A (); };
+struct B { int i; };
+struct C { int i; mutable int j; C (); ~C (); };
+
+template <typename T> void bar (const T *);
+
+const A a;
+const C c;
+
+const A foo (const A d, const C e)
+{
+ const A f;
+ const B b = { 4 };
+ A g;
+ #pragma omp parallel default (none)
+ bar (&a);
+ #pragma omp parallel default (none)
+ bar (&b);
+ #pragma omp parallel default (none) // { dg-error "enclosing parallel" }
+ bar (&c); // { dg-error "not specified" }
+ #pragma omp parallel default (none)
+ bar (&d);
+ #pragma omp parallel default (none) // { dg-error "enclosing parallel" }
+ bar (&e); // { dg-error "not specified" }
+ #pragma omp parallel default (none)
+ bar (&f);
+ #pragma omp parallel default (none) // { dg-error "enclosing parallel" }
+ bar (&g); // { dg-error "not specified" }
+ return f;
+}
diff --git a/gcc/testsuite/g++.dg/gomp/task-1.C b/gcc/testsuite/g++.dg/gomp/task-1.C
new file mode 100644
index 00000000000..0000e6f1fa9
--- /dev/null
+++ b/gcc/testsuite/g++.dg/gomp/task-1.C
@@ -0,0 +1,17 @@
+// { dg-do compile }
+// { dg-options "-fopenmp" }
+
+struct A { A (); ~A (); int i; };
+
+template <typename T> void bar (T &);
+
+const A a;
+
+void foo (A &p)
+{
+ const A &q = a;
+#pragma omp task // { dg-error "has reference type" }
+ bar (p);
+#pragma omp task // { dg-error "has reference type" }
+ bar (q);
+}
diff --git a/gcc/testsuite/g++.dg/gomp/tls-3.C b/gcc/testsuite/g++.dg/gomp/tls-3.C
index 96baec9453e..04f6bbea408 100644
--- a/gcc/testsuite/g++.dg/gomp/tls-3.C
+++ b/gcc/testsuite/g++.dg/gomp/tls-3.C
@@ -13,9 +13,11 @@ namespace N
struct S
{
static int s;
-#pragma omp thr (s) // { dg-error "is not file, namespace or block scope" }
+#pragma omp thr (s)
};
+int S::s = 5;
+
int
foo ()
{
diff --git a/gcc/testsuite/g++.dg/gomp/tls-4.C b/gcc/testsuite/g++.dg/gomp/tls-4.C
new file mode 100644
index 00000000000..e4377c5e4c0
--- /dev/null
+++ b/gcc/testsuite/g++.dg/gomp/tls-4.C
@@ -0,0 +1,16 @@
+// { dg-do compile }
+// { dg-require-effective-target tls_native }
+
+#define thr threadprivate
+
+struct S
+{
+ static int s;
+};
+struct T : public S
+{
+ static int t;
+#pragma omp thr (s) // { dg-error "directive not in" }
+};
+
+#pragma omp thr (T::t) // { dg-error "directive not in" }
diff --git a/gcc/testsuite/gcc.c-torture/execute/20080529-1.c b/gcc/testsuite/gcc.c-torture/execute/20080529-1.c
new file mode 100644
index 00000000000..cd429808496
--- /dev/null
+++ b/gcc/testsuite/gcc.c-torture/execute/20080529-1.c
@@ -0,0 +1,17 @@
+/* PR target/36362 */
+
+extern void abort (void);
+
+int
+test (float c)
+{
+ return !!c * 7LL == 0;
+}
+
+int
+main (void)
+{
+ if (test (1.0f) != 0)
+ abort ();
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.dg/cdce1.c b/gcc/testsuite/gcc.dg/cdce1.c
new file mode 100644
index 00000000000..26d38ae40f5
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/cdce1.c
@@ -0,0 +1,80 @@
+/* { dg-do run { target { ! "*-*-darwin" } } } */
+/* { dg-options "-O2 -fmath-errno -fdump-tree-cdce-details -lm" } */
+/* { dg-final { scan-tree-dump "cdce1.c:16: note: function call is shrink-wrapped into error conditions\." "cdce" } } */
+/* { dg-final { cleanup-tree-dump "cdce" } } */
+
+
+#include <stdlib.h>
+#include <math.h>
+#include <errno.h>
+int total_err_count = 0;
+double foo_opt (int x, double y) __attribute__((noinline));
+double foo_opt (int x, double y)
+{
+ double yy = 0;
+ errno = 0;
+ yy = pow (x, y * y);
+ return 0;
+}
+
+double foo (int x, double y) __attribute__((noinline));
+double foo (int x, double y)
+{
+ double yy = 0;
+ errno = 0;
+ yy = pow (x, y * y);
+ return yy;
+}
+
+int test (double (*fp)(int x, double y))
+{
+ int i,x;
+
+ x = 127;
+ for (i = 30; i < 300; i++)
+ {
+ fp (x, i);
+ if (errno)
+ total_err_count ++;
+ }
+
+ x = -300;
+ for (i = 100; i < 300; i++)
+ {
+ fp (x, i);
+ if (errno)
+ total_err_count ++;
+ }
+
+ x = 65577;
+ for (i = 60; i < 200; i++)
+ {
+ fp (x, i);
+ if (errno)
+ total_err_count ++;
+ }
+
+ x = 65577 * 127;
+ for (i = 1; i < 100; i++)
+ {
+ fp (x, i);
+ if (errno)
+ total_err_count ++;
+ }
+
+ return total_err_count;
+}
+
+int main ()
+{
+ int en1, en2;
+ total_err_count = 0;
+ en1 = test (foo_opt);
+ total_err_count = 0;
+ en2 = test (foo);
+
+ if (en1 != en2)
+ abort ();
+
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.dg/cdce2.c b/gcc/testsuite/gcc.dg/cdce2.c
new file mode 100644
index 00000000000..ba9e4962050
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/cdce2.c
@@ -0,0 +1,55 @@
+/* { dg-do run { target { ! "*-*-darwin" } } } */
+/* { dg-options "-O2 -fmath-errno -fdump-tree-cdce-details -lm" } */
+/* { dg-final { scan-tree-dump "cdce2.c:16: note: function call is shrink-wrapped into error conditions\." "cdce" } }*/
+/* { dg-final { cleanup-tree-dump "cdce" } } */
+
+
+#include <stdlib.h>
+#include <math.h>
+#include <errno.h>
+int total_err_count = 0;
+double foo_opt (double y) __attribute__((noinline));
+double foo_opt (double y)
+{
+ double yy = 0;
+ errno = 0;
+ yy = log (y);
+ return 0;
+}
+
+double foo (double y) __attribute__((noinline));
+double foo (double y)
+{
+ double yy = 0;
+ errno = 0;
+ yy = log (y);
+ return yy;
+}
+
+int test (double (*fp) (double y))
+{
+ int i,x;
+ for (i = -100; i < 100; i++)
+ {
+ fp (i);
+ if (errno)
+ total_err_count ++;
+ }
+
+ return total_err_count;
+}
+
+int main ()
+{
+ int en1, en2;
+ double yy;
+ total_err_count = 0;
+ en1 = test (foo_opt);
+ total_err_count = 0;
+ en2 = test (foo);
+
+ if (en1 != en2)
+ abort();
+
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.dg/gomp/appendix-a/a.35.4.c b/gcc/testsuite/gcc.dg/gomp/appendix-a/a.35.4.c
index 88824031cc2..d7579e6e735 100644
--- a/gcc/testsuite/gcc.dg/gomp/appendix-a/a.35.4.c
+++ b/gcc/testsuite/gcc.dg/gomp/appendix-a/a.35.4.c
@@ -11,7 +11,7 @@ wrong4 (int n)
{
work (i, 0);
/* incorrect nesting of barrier region in a loop region */
-#pragma omp barrier
+#pragma omp barrier /* { dg-warning "may not be closely nested" } */
work (i, 1);
}
}
diff --git a/gcc/testsuite/gcc.dg/gomp/appendix-a/a.35.6.c b/gcc/testsuite/gcc.dg/gomp/appendix-a/a.35.6.c
index 6385db30897..ac850e5410a 100644
--- a/gcc/testsuite/gcc.dg/gomp/appendix-a/a.35.6.c
+++ b/gcc/testsuite/gcc.dg/gomp/appendix-a/a.35.6.c
@@ -9,7 +9,7 @@ wrong6 (int n)
{
work (n, 0);
/* incorrect nesting of barrier region in a single region */
-#pragma omp barrier
+#pragma omp barrier /* { dg-warning "may not be closely nested" } */
work (n, 1);
}
}
diff --git a/gcc/testsuite/gcc.dg/gomp/collapse-1.c b/gcc/testsuite/gcc.dg/gomp/collapse-1.c
new file mode 100644
index 00000000000..89b76bb669c
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/gomp/collapse-1.c
@@ -0,0 +1,92 @@
+/* { dg-do compile } */
+/* { dg-options "-fopenmp" } */
+
+int i, j, k;
+extern int foo (void);
+
+void
+f1 (void)
+{
+ #pragma omp for collapse (2)
+ for (i = 0; i < 5; i++)
+ ; /* { dg-error "not enough perfectly nested" } */
+ {
+ for (j = 0; j < 5; j++)
+ ;
+ }
+}
+
+void
+f2 (void)
+{
+ #pragma omp for collapse (2)
+ for (i = 0; i < 5; i++)
+ {
+ {
+ {
+ for (j = 0; j < 5; j++)
+ {
+ }
+ }
+ }
+ }
+}
+
+void
+f3 (void)
+{
+ #pragma omp for collapse (2)
+ for (i = 0; i < 5; i++)
+ {
+ int k = foo (); /* { dg-error "not enough perfectly nested" } */
+ {
+ {
+ for (j = 0; j < 5; j++)
+ {
+ }
+ }
+ }
+ }
+}
+
+void
+f4 (void)
+{
+ #pragma omp for collapse (2)
+ for (i = 0; i < 5; i++)
+ {
+ {
+ for (j = 0; j < 5; j++)
+ ;
+ foo (); /* { dg-error "collapsed loops not perfectly nested before" } */
+ }
+ }
+}
+
+void
+f5 (void)
+{
+ #pragma omp for collapse (2)
+ for (i = 0; i < 5; i++)
+ {
+ {
+ for (j = 0; j < 5; j++)
+ ;
+ }
+ foo (); /* { dg-error "collapsed loops not perfectly nested before" } */
+ }
+}
+
+void
+f6 (void)
+{
+ #pragma omp for collapse (2)
+ for (i = 0; i < 5; i++)
+ {
+ {
+ for (j = 0; j < 5; j++)
+ ;
+ }
+ }
+ foo ();
+}
diff --git a/gcc/testsuite/gcc.dg/gomp/nesting-1.c b/gcc/testsuite/gcc.dg/gomp/nesting-1.c
new file mode 100644
index 00000000000..6f27b907e6b
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/gomp/nesting-1.c
@@ -0,0 +1,198 @@
+/* { dg-do compile } */
+/* { dg-options "-fopenmp" } */
+
+void
+f1 (void)
+{
+ int i, j;
+ #pragma omp for
+ for (i = 0; i < 3; i++)
+ {
+ #pragma omp for /* { dg-warning "may not be closely nested" } */
+ for (j = 0; j < 3; j++)
+ ;
+ #pragma omp sections /* { dg-warning "may not be closely nested" } */
+ {
+ ;
+ #pragma omp section
+ ;
+ }
+ #pragma omp single /* { dg-warning "may not be closely nested" } */
+ ;
+ #pragma omp master /* { dg-warning "may not be closely nested" } */
+ ;
+ #pragma omp barrier /* { dg-warning "may not be closely nested" } */
+ }
+ #pragma omp sections
+ {
+ #pragma omp for /* { dg-warning "may not be closely nested" } */
+ for (j = 0; j < 3; j++)
+ ;
+ #pragma omp sections /* { dg-warning "may not be closely nested" } */
+ {
+ ;
+ #pragma omp section
+ ;
+ }
+ #pragma omp single /* { dg-warning "may not be closely nested" } */
+ ;
+ #pragma omp master /* { dg-warning "may not be closely nested" } */
+ ;
+ #pragma omp section
+ ;
+ }
+ #pragma omp single
+ {
+ #pragma omp for /* { dg-warning "may not be closely nested" } */
+ for (j = 0; j < 3; j++)
+ ;
+ #pragma omp sections /* { dg-warning "may not be closely nested" } */
+ {
+ ;
+ #pragma omp section
+ ;
+ }
+ #pragma omp single /* { dg-warning "may not be closely nested" } */
+ ;
+ #pragma omp master /* { dg-warning "may not be closely nested" } */
+ ;
+ #pragma omp barrier /* { dg-warning "may not be closely nested" } */
+ }
+ #pragma omp master
+ {
+ #pragma omp for /* { dg-warning "may not be closely nested" } */
+ for (j = 0; j < 3; j++)
+ ;
+ #pragma omp sections /* { dg-warning "may not be closely nested" } */
+ {
+ ;
+ #pragma omp section
+ ;
+ }
+ #pragma omp single /* { dg-warning "may not be closely nested" } */
+ ;
+ #pragma omp master
+ ;
+ #pragma omp barrier /* { dg-warning "may not be closely nested" } */
+ }
+ #pragma omp task
+ {
+ #pragma omp for /* { dg-warning "may not be closely nested" } */
+ for (j = 0; j < 3; j++)
+ ;
+ #pragma omp sections /* { dg-warning "may not be closely nested" } */
+ {
+ ;
+ #pragma omp section
+ ;
+ }
+ #pragma omp single /* { dg-warning "may not be closely nested" } */
+ ;
+ #pragma omp master /* { dg-warning "may not be closely nested" } */
+ ;
+ #pragma omp barrier /* { dg-warning "may not be closely nested" } */
+ }
+ #pragma omp parallel
+ {
+ #pragma omp for
+ for (j = 0; j < 3; j++)
+ ;
+ #pragma omp sections
+ {
+ ;
+ #pragma omp section
+ ;
+ }
+ #pragma omp single
+ ;
+ #pragma omp master
+ ;
+ #pragma omp barrier
+ }
+}
+
+void
+f2 (void)
+{
+ int i, j;
+ #pragma omp ordered
+ {
+ #pragma omp for /* { dg-warning "may not be closely nested" } */
+ for (j = 0; j < 3; j++)
+ ;
+ #pragma omp sections /* { dg-warning "may not be closely nested" } */
+ {
+ ;
+ #pragma omp section
+ ;
+ }
+ #pragma omp single /* { dg-warning "may not be closely nested" } */
+ ;
+ #pragma omp master
+ ;
+ #pragma omp barrier /* { dg-warning "may not be closely nested" } */
+ }
+}
+
+void
+f3 (void)
+{
+ #pragma omp critical
+ {
+ #pragma omp ordered /* { dg-warning "may not be closely nested" } */
+ ;
+ }
+}
+
+void
+f4 (void)
+{
+ #pragma omp task
+ {
+ #pragma omp ordered /* { dg-warning "may not be closely nested" } */
+ ;
+ }
+}
+
+void
+f5 (void)
+{
+ int i;
+ #pragma omp for
+ for (i = 0; i < 10; i++)
+ {
+ #pragma omp ordered /* { dg-warning "must be closely nested" } */
+ ;
+ }
+ #pragma omp for ordered
+ for (i = 0; i < 10; i++)
+ {
+ #pragma omp ordered
+ ;
+ }
+}
+
+void
+f6 (void)
+{
+ #pragma omp critical (foo)
+ #pragma omp critical (bar)
+ ;
+ #pragma omp critical
+ #pragma omp critical (baz)
+ ;
+}
+
+void
+f7 (void)
+{
+ #pragma omp critical (foo2)
+ #pragma omp critical
+ ;
+ #pragma omp critical (bar)
+ #pragma omp critical (bar) /* { dg-warning "may not be nested" } */
+ ;
+ #pragma omp critical
+ #pragma omp critical /* { dg-warning "may not be nested" } */
+ ;
+}
diff --git a/gcc/testsuite/gcc.dg/gomp/pr27499.c b/gcc/testsuite/gcc.dg/gomp/pr27499.c
index e8c1db496d7..0de2e0686f1 100644
--- a/gcc/testsuite/gcc.dg/gomp/pr27499.c
+++ b/gcc/testsuite/gcc.dg/gomp/pr27499.c
@@ -8,6 +8,6 @@ foo (void)
{
unsigned int i;
#pragma omp parallel for
- for (i = 0; i < 64; ++i) /* { dg-warning "is unsigned" } */
+ for (i = 0; i < 64; ++i)
bar (i);
}
diff --git a/gcc/testsuite/gcc.dg/tree-ssa/alias-18.c b/gcc/testsuite/gcc.dg/tree-ssa/alias-18.c
index 9ef3f2aadb7..84d884effb5 100644
--- a/gcc/testsuite/gcc.dg/tree-ssa/alias-18.c
+++ b/gcc/testsuite/gcc.dg/tree-ssa/alias-18.c
@@ -78,10 +78,10 @@ int test8 (struct A *p, int *q)
/* { dg-final { scan-tree-dump "with 0" "fre" } } */
/* { dg-final { scan-tree-dump "with 1" "fre" { xfail *-*-* } } } */
-/* { dg-final { scan-tree-dump "with 3" "fre" } } */
+/* { dg-final { scan-tree-dump "with 3" "fre" { xfail *-*-* } } } */
/* { dg-final { scan-tree-dump "with 4" "fre" } } */
/* { dg-final { scan-tree-dump "with 5" "fre" } } */
-/* { dg-final { scan-tree-dump "with 8" "fre" } } */
+/* { dg-final { scan-tree-dump "with 8" "fre" { xfail *-*-* } } } */
/* { dg-final { scan-tree-dump-not "return 2;" "optimized" } } */
/* { dg-final { scan-tree-dump-not "return 6;" "optimized" } } */
/* { dg-final { scan-tree-dump-not "return 7;" "optimized" } } */
diff --git a/gcc/testsuite/gcc.target/i386/pr36438.c b/gcc/testsuite/gcc.target/i386/pr36438.c
new file mode 100644
index 00000000000..38376b8c903
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/pr36438.c
@@ -0,0 +1,19 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -mmmx" } */
+
+#include <mmintrin.h>
+
+extern __m64 SetS16 (unsigned short, unsigned short,
+ unsigned short, unsigned short);
+
+void foo(__m64* dest)
+{
+ __m64 mask = SetS16 (0x00FF, 0xFF00, 0x0000, 0x00FF);
+
+ mask = _mm_slli_si64(mask, 8);
+ mask = _mm_slli_si64(mask, 8);
+
+ *dest = mask;
+
+ _mm_empty ();
+}
diff --git a/gcc/testsuite/gcc.target/powerpc/dfp-dd.c b/gcc/testsuite/gcc.target/powerpc/dfp-dd.c
index b3293187544..85da9070531 100644
--- a/gcc/testsuite/gcc.target/powerpc/dfp-dd.c
+++ b/gcc/testsuite/gcc.target/powerpc/dfp-dd.c
@@ -1,6 +1,6 @@
/* Test generation of DFP instructions for POWER6. */
/* Origin: Janis Johnson <janis187@us.ibm.com> */
-/* { dg-do compile { target powerpc*-*-linux* } } */
+/* { dg-do compile { target { powerpc*-*-linux* && powerpc_fprs } } } */
/* { dg-options "-std=gnu99 -mcpu=power6" } */
/* { dg-final { scan-assembler "dadd" } } */
diff --git a/gcc/testsuite/gcc.target/powerpc/dfp-td.c b/gcc/testsuite/gcc.target/powerpc/dfp-td.c
index f66bbd86c87..752ba88743f 100644
--- a/gcc/testsuite/gcc.target/powerpc/dfp-td.c
+++ b/gcc/testsuite/gcc.target/powerpc/dfp-td.c
@@ -1,6 +1,6 @@
/* Test generation of DFP instructions for POWER6. */
/* Origin: Janis Johnson <janis187@us.ibm.com> */
-/* { dg-do compile { target powerpc*-*-linux* } } */
+/* { dg-do compile { target { powerpc*-*-linux* && powerpc_fprs } } } */
/* { dg-options "-std=gnu99 -mcpu=power6" } */
/* { dg-final { scan-assembler "daddq" } } */
diff --git a/gcc/testsuite/gcc.target/powerpc/ppc32-abi-dfp-1.c b/gcc/testsuite/gcc.target/powerpc/ppc32-abi-dfp-1.c
index 82dce4e3fd6..1b836d72795 100644
--- a/gcc/testsuite/gcc.target/powerpc/ppc32-abi-dfp-1.c
+++ b/gcc/testsuite/gcc.target/powerpc/ppc32-abi-dfp-1.c
@@ -1,4 +1,4 @@
-/* { dg-do run { target { powerpc*-*-* && { ilp32 && dfprt } } } } */
+/* { dg-do run { target { powerpc_fprs && { ilp32 && dfprt } } } } */
/* { dg-options "-std=gnu99 -O2 -fno-strict-aliasing" } */
/* Testcase to check for ABI compliance of parameter passing
diff --git a/gcc/testsuite/gcc.target/powerpc/ppu-intrinsics.c b/gcc/testsuite/gcc.target/powerpc/ppu-intrinsics.c
index 53b6c709ec2..bfdf7426600 100644
--- a/gcc/testsuite/gcc.target/powerpc/ppu-intrinsics.c
+++ b/gcc/testsuite/gcc.target/powerpc/ppu-intrinsics.c
@@ -1,4 +1,4 @@
-/* { dg-do link { target *-*-linux* } } */
+/* { dg-do link { target { *-*-linux* && powerpc_fprs } } } */
/* { dg-options "-W -Wall -mcpu=cell" } */
/* Test some PPU intrinsics from <ppu_intrinsics.h>. */
diff --git a/gcc/testsuite/gfortran.dg/gomp/appendix-a/a.33.4.f90 b/gcc/testsuite/gfortran.dg/gomp/appendix-a/a.33.4.f90
index 9685b5939c8..7a9e1840b24 100644
--- a/gcc/testsuite/gfortran.dg/gomp/appendix-a/a.33.4.f90
+++ b/gcc/testsuite/gfortran.dg/gomp/appendix-a/a.33.4.f90
@@ -4,7 +4,7 @@
REAL, DIMENSION(:), ALLOCATABLE :: A
REAL, DIMENSION(:), POINTER :: B
ALLOCATE (A(N))
-!$OMP SINGLE ! { dg-error "COPYPRIVATE clause object 'a'" }
+!$OMP SINGLE
ALLOCATE (B(N))
READ (11) A,B
!$OMP END SINGLE COPYPRIVATE(A,B)
diff --git a/gcc/testsuite/gfortran.dg/gomp/appendix-a/a.35.4.f90 b/gcc/testsuite/gfortran.dg/gomp/appendix-a/a.35.4.f90
index e44952263f1..f130dd5f480 100644
--- a/gcc/testsuite/gfortran.dg/gomp/appendix-a/a.35.4.f90
+++ b/gcc/testsuite/gfortran.dg/gomp/appendix-a/a.35.4.f90
@@ -8,7 +8,7 @@
DO I = 1, N
CALL WORK(I, 1)
! incorrect nesting of barrier region in a loop region
-!$OMP BARRIER
+!$OMP BARRIER ! { dg-warning "may not be closely nested" }
CALL WORK(I, 2)
END DO
!$OMP END PARALLEL
diff --git a/gcc/testsuite/gfortran.dg/gomp/appendix-a/a.35.6.f90 b/gcc/testsuite/gfortran.dg/gomp/appendix-a/a.35.6.f90
index 0488537dd10..62ba245236b 100644
--- a/gcc/testsuite/gfortran.dg/gomp/appendix-a/a.35.6.f90
+++ b/gcc/testsuite/gfortran.dg/gomp/appendix-a/a.35.6.f90
@@ -6,7 +6,7 @@
!$OMP SINGLE
CALL WORK(N,1)
! incorrect nesting of barrier region in a single region
-!$OMP BARRIER
+!$OMP BARRIER ! { dg-warning "may not be closely nested" }
CALL WORK(N,2)
!$OMP END SINGLE
!$OMP END PARALLEL
diff --git a/gcc/testsuite/gfortran.dg/gomp/collapse1.f90 b/gcc/testsuite/gfortran.dg/gomp/collapse1.f90
new file mode 100644
index 00000000000..f16a780ad99
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/gomp/collapse1.f90
@@ -0,0 +1,57 @@
+! { dg-do compile }
+! { dg-options "-fopenmp" }
+
+subroutine collapse1
+ integer :: i, j, k, a(1:3, 4:6, 5:7)
+ real :: r
+ logical :: l
+ integer, save :: thr
+ !$omp threadprivate (thr)
+ l = .false.
+ a(:, :, :) = 0
+ !$omp parallel do collapse(4) schedule(static, 4) ! { dg-error "not enough DO loops for collapsed" }
+ do i = 1, 3
+ do j = 4, 6
+ do k = 5, 7
+ a(i, j, k) = i + j + k
+ end do
+ end do
+ end do
+ !$omp parallel do collapse(2)
+ do i = 1, 5, 2
+ do j = i + 1, 7, i ! { dg-error "collapsed loops don.t form rectangular iteration space" }
+ end do
+ end do
+ !$omp parallel do collapse(2) shared(j)
+ do i = 1, 3
+ do j = 4, 6 ! { dg-error "iteration variable present on clause other than PRIVATE or LASTPRIVATE" }
+ end do
+ end do
+ !$omp parallel do collapse(2)
+ do i = 1, 3
+ do j = 4, 6
+ end do
+ k = 4
+ end do
+ !$omp parallel do collapse(2)
+ do i = 1, 3
+ do ! { dg-error "cannot be a DO WHILE or DO without loop control" }
+ end do
+ end do
+ !$omp parallel do collapse(2)
+ do i = 1, 3
+ do r = 4, 6 ! { dg-warning "must be integer" }
+ end do
+ end do
+end subroutine collapse1
+
+subroutine collapse1_2
+ integer :: i
+ !$omp parallel do collapse(2)
+ do i = -6, 6 ! { dg-error "cannot be redefined inside loop beginning" }
+ do i = 4, 6 ! { dg-error "collapsed loops don.t form rectangular iteration space|cannot be redefined" }
+ end do
+ end do
+end subroutine collapse1_2
+
+! { dg-error "iteration variable must be of type integer" "integer" { target *-*-* } 43 }
diff --git a/gcc/testsuite/gfortran.dg/gomp/omp_parse1.f90 b/gcc/testsuite/gfortran.dg/gomp/omp_parse1.f90
index d4137cd11ec..3ab43670762 100644
--- a/gcc/testsuite/gfortran.dg/gomp/omp_parse1.f90
+++ b/gcc/testsuite/gfortran.dg/gomp/omp_parse1.f90
@@ -14,10 +14,6 @@ call bar
!$omp rallel
call bar
!$omp end parallel
-! Non-continuation !$omp must be followed by space, and my reading
-! doesn't seem to allow tab there. So such lines should be completely
-! ignored.
-!$omp strange ! { dg-warning "starts a commented line" }
end
! { dg-final { scan-tree-dump-times "pragma omp parallel" 3 "omplower" } }
diff --git a/gcc/testsuite/gfortran.dg/gomp/reduction1.f90 b/gcc/testsuite/gfortran.dg/gomp/reduction1.f90
index 108e5dc4155..9c55d173c11 100644
--- a/gcc/testsuite/gfortran.dg/gomp/reduction1.f90
+++ b/gcc/testsuite/gfortran.dg/gomp/reduction1.f90
@@ -56,7 +56,7 @@ common /blk/ i1
!$omp end parallel
!$omp parallel reduction (*:p1) ! { dg-error "POINTER object" }
!$omp end parallel
-!$omp parallel reduction (-:aa1) ! { dg-error "is ALLOCATABLE" }
+!$omp parallel reduction (-:aa1)
!$omp end parallel
!$omp parallel reduction (*:ia1) ! { dg-error "Assumed size" }
!$omp end parallel
diff --git a/gcc/testsuite/gfortran.dg/gomp/sharing-3.f90 b/gcc/testsuite/gfortran.dg/gomp/sharing-3.f90
new file mode 100644
index 00000000000..5c1581454b2
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/gomp/sharing-3.f90
@@ -0,0 +1,37 @@
+! { dg-do compile }
+! { dg-options "-fopenmp" }
+
+subroutine foo (vara, varb, varc, vard, n)
+ integer :: n, vara(n), varb(*), varc(:), vard(6), vare(6)
+ vare(:) = 0
+ !$omp parallel default(none) shared(vara, varb, varc, vard, vare)
+ !$omp master
+ vara(1) = 1
+ varb(1) = 1
+ varc(1) = 1
+ vard(1) = 1
+ vare(1) = 1
+ !$omp end master
+ !$omp end parallel
+ !$omp parallel default(none) private(vara, varc, vard, vare)
+ vara(1) = 1
+ varc(1) = 1
+ vard(1) = 1
+ vare(1) = 1
+ !$omp end parallel
+ !$omp parallel default(none) firstprivate(vara, varc, vard, vare)
+ vara(1) = 1
+ varc(1) = 1
+ vard(1) = 1
+ vare(1) = 1
+ !$omp end parallel
+ !$omp parallel default(none) ! { dg-error "enclosing parallel" }
+ !$omp master
+ vara(1) = 1 ! { dg-error "not specified" }
+ varb(1) = 1 ! Assumed-size is predetermined
+ varc(1) = 1 ! { dg-error "not specified" "" { xfail *-*-* } }
+ vard(1) = 1 ! { dg-error "not specified" }
+ vare(1) = 1 ! { dg-error "not specified" }
+ !$omp end master
+ !$omp end parallel
+end subroutine foo
diff --git a/gcc/testsuite/gfortran.dg/proc_decl_2.f90 b/gcc/testsuite/gfortran.dg/proc_decl_2.f90
index 6edc6bd42b3..a16b4db5f01 100644
--- a/gcc/testsuite/gfortran.dg/proc_decl_2.f90
+++ b/gcc/testsuite/gfortran.dg/proc_decl_2.f90
@@ -4,16 +4,27 @@
module m
+ use ISO_C_BINDING
+
abstract interface
subroutine csub() bind(c)
end subroutine csub
end interface
+ integer, parameter :: ckind = C_FLOAT_COMPLEX
+ abstract interface
+ function stub() bind(C)
+ import ckind
+ complex(ckind) stub
+ end function
+ end interface
+
procedure():: mp1
procedure(real), private:: mp2
procedure(mfun), public:: mp3
procedure(csub), public, bind(c) :: c, d
procedure(csub), public, bind(c, name="myB") :: b
+ procedure(stub), bind(C) :: e
contains
@@ -32,6 +43,15 @@ contains
procedure(a), optional :: b
end subroutine bar
+ subroutine bar2(x)
+ abstract interface
+ character function abs_fun()
+ end function
+ end interface
+ procedure(abs_fun):: x
+ end subroutine
+
+
end module
diff --git a/gcc/testsuite/lib/target-supports.exp b/gcc/testsuite/lib/target-supports.exp
index 3a8a08fe1e3..d158ef0cfc1 100644
--- a/gcc/testsuite/lib/target-supports.exp
+++ b/gcc/testsuite/lib/target-supports.exp
@@ -1292,7 +1292,11 @@ proc check_effective_target_powerpc_altivec_ok { } {
# Return 1 if this is a PowerPC target that supports SPU.
proc check_effective_target_powerpc_spu { } {
- return [istarget powerpc*-*-linux*]
+ if [istarget powerpc*-*-linux*] {
+ return [check_effective_target_powerpc_altivec_ok]
+ } else {
+ return 0
+ }
}
# Return 1 if this is a PowerPC target with SPE enabled.
diff --git a/gcc/timevar.def b/gcc/timevar.def
index f0bb384af24..77009560b01 100644
--- a/gcc/timevar.def
+++ b/gcc/timevar.def
@@ -113,6 +113,7 @@ DEFTIMEVAR (TV_TREE_FORWPROP , "tree forward propagate")
DEFTIMEVAR (TV_TREE_PHIPROP , "tree phiprop")
DEFTIMEVAR (TV_TREE_DCE , "tree conservative DCE")
DEFTIMEVAR (TV_TREE_CD_DCE , "tree aggressive DCE")
+DEFTIMEVAR (TV_TREE_CALL_CDCE , "tree buildin call DCE")
DEFTIMEVAR (TV_TREE_DSE , "tree DSE")
DEFTIMEVAR (TV_TREE_MERGE_PHI , "PHI merge")
DEFTIMEVAR (TV_TREE_LOOP , "tree loop optimization")
diff --git a/gcc/tree-call-cdce.c b/gcc/tree-call-cdce.c
new file mode 100644
index 00000000000..4be0cf9bd89
--- /dev/null
+++ b/gcc/tree-call-cdce.c
@@ -0,0 +1,944 @@
+/* Conditional Dead Call Elimination pass for the GNU compiler.
+ Copyright (C) 2008
+ Free Software Foundation, Inc.
+ Contributed by Xinliang David Li <davidxl@google.com>
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 3, or (at your option) any
+later version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "ggc.h"
+
+/* These RTL headers are needed for basic-block.h. */
+#include "rtl.h"
+#include "tm_p.h"
+#include "hard-reg-set.h"
+#include "obstack.h"
+#include "basic-block.h"
+
+#include "tree.h"
+#include "diagnostic.h"
+#include "tree-flow.h"
+#include "tree-gimple.h"
+#include "tree-dump.h"
+#include "tree-pass.h"
+#include "timevar.h"
+#include "flags.h"
+
+
+/* Conditional dead call elimination
+
+ Some builtin functions can set errno on error conditions, but they
+ are otherwise pure. If the result of a call to such a function is
+ not used, the compiler can still not eliminate the call without
+ powerful interprocedural analysis to prove that the errno is not
+ checked. However, if the conditions under which the error occurs
+ are known, the compiler can conditionally dead code eliminate the
+ calls by shrink-wrapping the semi-dead calls into the error condition:
+
+ built_in_call (args)
+ ==>
+ if (error_cond (args))
+ built_in_call (args)
+
+ An actual simple example is :
+ log (x); // Mostly dead call
+ ==>
+ if (x < 0)
+ log (x);
+ With this change, call to log (x) is effectively eliminated, as
+ in majority of the cases, log won't be called with x out of
+ range. The branch is totally predictable, so the branch cost
+ is low.
+
+ Note that library functions are not supposed to clear errno to zero without
+ error. See IEEE Std 1003.1, section 2.3 Error Numbers, and section 7.5:3 of
+ ISO/IEC 9899 (C99).
+
+ The condition wrapping the builtin call is conservatively set to avoid too
+ aggressive (wrong) shrink wrapping. The optimization is called conditional
+ dead call elimination because the call is eliminated under the condition
+ that the input arguments would not lead to domain or range error (for
+ instance when x <= 0 for a log (x) call), however the chances that the error
+ condition is hit is very low (those builtin calls which are conditionally
+ dead are usually part of the C++ abstraction penalty exposed after
+ inlining). */
+
+
+/* A structure for representing input domain of
+ a function argument in integer. If the lower
+ bound is -inf, has_lb is set to false. If the
+ upper bound is +inf, has_ub is false.
+ is_lb_inclusive and is_ub_inclusive are flags
+ to indicate if lb and ub value are inclusive
+ respectively. */
+
+typedef struct input_domain
+{
+ int lb;
+ int ub;
+ bool has_lb;
+ bool has_ub;
+ bool is_lb_inclusive;
+ bool is_ub_inclusive;
+} inp_domain;
+
+static VEC (tree, heap) *cond_dead_built_in_calls;
+
+/* A helper function to construct and return an input
+ domain object. LB is the lower bound, HAS_LB is
+ a boolean flag indicating if the lower bound exists,
+ and LB_INCLUSIVE is a boolean flag indicating if the
+ lower bound is inclusive or not. UB, HAS_UB, and
+ UB_INCLUSIVE have the same meaning, but for upper
+ bound of the domain. */
+
+static inp_domain
+get_domain (int lb, bool has_lb, bool lb_inclusive,
+ int ub, bool has_ub, bool ub_inclusive)
+{
+ inp_domain domain;
+ domain.lb = lb;
+ domain.has_lb = has_lb;
+ domain.is_lb_inclusive = lb_inclusive;
+ domain.ub = ub;
+ domain.has_ub = has_ub;
+ domain.is_ub_inclusive = ub_inclusive;
+ return domain;
+}
+
+/* A helper function to check the target format for the
+ argument type. In this implementation, only IEEE formats
+ are supported. ARG is the call argument to be checked.
+ Returns true if the format is supported. To support other
+ target formats, function get_no_error_domain needs to be
+ enhanced to have range bounds properly computed. Since
+ the check is cheap (very small number of candidates
+ to be checked), the result is not cached for each float type. */
+
+static bool
+check_target_format (tree arg)
+{
+ tree type;
+ enum machine_mode mode;
+ const struct real_format *rfmt;
+
+ type = TREE_TYPE (arg);
+ mode = TYPE_MODE (type);
+ rfmt = REAL_MODE_FORMAT (mode);
+ if ((mode == SFmode && rfmt == &ieee_single_format)
+ || (mode == DFmode && rfmt == &ieee_double_format)
+ /* For long double, we can not really check XFmode
+ which is only defined on intel platforms.
+ Candidate pre-selection using builtin function
+ code guarantees that we are checking formats
+ for long double modes: double, quad, and extended. */
+ || (mode != SFmode && mode != DFmode
+ && (rfmt == &ieee_quad_format
+ || rfmt == &ieee_extended_intel_96_format
+ || rfmt == &ieee_extended_intel_128_format
+ || rfmt == &ieee_extended_intel_96_round_53_format)))
+ return true;
+
+ return false;
+}
+
+
+/* A helper function to help select calls to pow that are suitable for
+ conditional DCE transformation. It looks for pow calls that can be
+ guided with simple conditions. Such calls either have constant base
+ values or base values converted from integers. Returns true if
+ the pow call POW_CALL is a candidate. */
+
+/* The maximum integer bit size for base argument of a pow call
+ that is suitable for shrink-wrapping transformation. */
+#define MAX_BASE_INT_BIT_SIZE 32
+
+static bool
+check_pow (tree pow_call)
+{
+ tree base, expn;
+ enum tree_code bc, ec;
+
+ if (call_expr_nargs (pow_call) != 2)
+ return false;
+
+ base = CALL_EXPR_ARG (pow_call, 0);
+ expn = CALL_EXPR_ARG (pow_call, 1);
+
+ if (!check_target_format (expn))
+ return false;
+
+ bc = TREE_CODE (base);
+ ec = TREE_CODE (expn);
+
+ /* Folding candidates are not interesting.
+ Can actually assert that it is already folded. */
+ if (ec == REAL_CST && bc == REAL_CST)
+ return false;
+
+ if (bc == REAL_CST)
+ {
+ /* Only handle a fixed range of constant. */
+ REAL_VALUE_TYPE mv;
+ REAL_VALUE_TYPE bcv = TREE_REAL_CST (base);
+ if (REAL_VALUES_EQUAL (bcv, dconst1))
+ return false;
+ if (REAL_VALUES_LESS (bcv, dconst1))
+ return false;
+ real_from_integer (&mv, TYPE_MODE (TREE_TYPE (base)), 256, 0, 1);
+ if (REAL_VALUES_LESS (mv, bcv))
+ return false;
+ return true;
+ }
+ else if (bc == SSA_NAME)
+ {
+ tree base_def, base_val, base_val0, base_var, type;
+ int bit_sz;
+
+ /* Only handles cases where base value is converted
+ from integer values. */
+ base_def = SSA_NAME_DEF_STMT (base);
+ if (TREE_CODE (base_def) != GIMPLE_MODIFY_STMT)
+ return false;
+
+ base_val = GIMPLE_STMT_OPERAND (base_def, 1);
+
+ if (TREE_CODE (base_val) != FLOAT_EXPR)
+ return false;
+ base_val0 = TREE_OPERAND (base_val, 0);
+
+ base_var = SSA_NAME_VAR (base_val0);
+ if (!DECL_P (base_var))
+ return false;
+
+ type = TREE_TYPE (base_var);
+ if (TREE_CODE (type) != INTEGER_TYPE)
+ return false;
+ bit_sz = TYPE_PRECISION (type);
+ /* If the type of the base is too wide,
+ the resulting shrink wrapping condition
+ will be too conservative. */
+ if (bit_sz > MAX_BASE_INT_BIT_SIZE)
+ return false;
+
+ return true;
+ }
+ else
+ return false;
+}
+
+/* A helper function to help select candidate function calls that are
+ suitable for conditional DCE. Candidate functions must have single
+ valid input domain in this implementation except for pow (see check_pow).
+ Returns true if the function call is a candidate. */
+
+static bool
+check_builtin_call (tree bcall)
+{
+ tree arg;
+
+ arg = CALL_EXPR_ARG (bcall, 0);
+ return check_target_format (arg);
+}
+
+/* A helper function to determine if a builtin function call is a
+ candidate for conditional DCE. Returns true if the builtin call
+ is a candidate. */
+
+static bool
+is_call_dce_candidate (tree call)
+{
+ tree fn;
+ enum built_in_function fnc;
+
+ if (!flag_tree_builtin_call_dce)
+ return false;
+
+ gcc_assert (call && TREE_CODE (call) == CALL_EXPR);
+
+ fn = get_callee_fndecl (call);
+ if (!fn || !DECL_BUILT_IN (fn)
+ || (DECL_BUILT_IN_CLASS (fn) != BUILT_IN_NORMAL))
+ return false;
+
+ fnc = DECL_FUNCTION_CODE (fn);
+ switch (fnc)
+ {
+ /* Trig functions. */
+ CASE_FLT_FN (BUILT_IN_ACOS):
+ CASE_FLT_FN (BUILT_IN_ASIN):
+ /* Hyperbolic functions. */
+ CASE_FLT_FN (BUILT_IN_ACOSH):
+ CASE_FLT_FN (BUILT_IN_ATANH):
+ CASE_FLT_FN (BUILT_IN_COSH):
+ CASE_FLT_FN (BUILT_IN_SINH):
+ /* Log functions. */
+ CASE_FLT_FN (BUILT_IN_LOG):
+ CASE_FLT_FN (BUILT_IN_LOG2):
+ CASE_FLT_FN (BUILT_IN_LOG10):
+ CASE_FLT_FN (BUILT_IN_LOG1P):
+ /* Exp functions. */
+ CASE_FLT_FN (BUILT_IN_EXP):
+ CASE_FLT_FN (BUILT_IN_EXP2):
+ CASE_FLT_FN (BUILT_IN_EXP10):
+ CASE_FLT_FN (BUILT_IN_EXPM1):
+ CASE_FLT_FN (BUILT_IN_POW10):
+ /* Sqrt. */
+ CASE_FLT_FN (BUILT_IN_SQRT):
+ return check_builtin_call (call);
+ /* Special one: two argument pow. */
+ case BUILT_IN_POW:
+ return check_pow (call);
+ default:
+ break;
+ }
+
+ return false;
+}
+
+
+/* A helper function to generate gimple statements for
+ one bound comparison. ARG is the call argument to
+ be compared with the bound, LBUB is the bound value
+ in integer, TCODE is the tree_code of the comparison,
+ TEMP_NAME1/TEMP_NAME2 are names of the temporaries,
+ CONDS is a vector holding the produced GIMPLE statements,
+ and NCONDS points to the variable holding the number
+ of logical comparisons. CONDS is either empty or
+ a list ended with a null tree. */
+
+static void
+gen_one_condition (tree arg, int lbub,
+ enum tree_code tcode,
+ const char *temp_name1,
+ const char *temp_name2,
+ VEC (tree, heap) *conds,
+ unsigned *nconds)
+{
+ tree lbub_real_cst, lbub_cst, float_type;
+ tree temp, tempn, tempc, tempcn;
+ tree stmt1, stmt2, stmt3;
+
+ float_type = TREE_TYPE (arg);
+ lbub_cst = build_int_cst (integer_type_node, lbub);
+ lbub_real_cst = build_real_from_int_cst (float_type, lbub_cst);
+
+ temp = create_tmp_var (float_type, temp_name1);
+ stmt1 = build_gimple_modify_stmt (temp, arg);
+ tempn = make_ssa_name (temp, stmt1);
+ GIMPLE_STMT_OPERAND (stmt1, 0) = tempn;
+
+ tempc = create_tmp_var (boolean_type_node, temp_name2);
+ stmt2 = build_gimple_modify_stmt (tempc,
+ fold_build2 (tcode,
+ boolean_type_node,
+ tempn, lbub_real_cst));
+ tempcn = make_ssa_name (tempc, stmt2);
+ GIMPLE_STMT_OPERAND (stmt2, 0) = tempcn;
+
+ /* fold_built3 not used for gimple statement here,
+ as it will hit assertion. */
+ stmt3 = build3 (COND_EXPR, void_type_node,
+ tempcn, NULL_TREE, NULL_TREE);
+ VEC_quick_push (tree, conds, stmt1);
+ VEC_quick_push (tree, conds, stmt2);
+ VEC_quick_push (tree, conds, stmt3);
+ (*nconds)++;
+}
+
+/* A helper function to generate GIMPLE statements for
+ out of input domain check. ARG is the call argument
+ to be runtime checked, DOMAIN holds the valid domain
+ for the given function, CONDS points to the vector
+ holding the result GIMPLE statements. *NCONDS is
+ the number of logical comparisons. This function
+ produces no more than two logical comparisons, one
+ for lower bound check, one for upper bound check. */
+
+static void
+gen_conditions_for_domain (tree arg, inp_domain domain,
+ VEC (tree, heap) *conds,
+ unsigned *nconds)
+{
+ if (domain.has_lb)
+ gen_one_condition (arg, domain.lb,
+ (domain.is_lb_inclusive
+ ? LT_EXPR : LE_EXPR),
+ "DCE_COND_LB", "DCE_COND_LB_TEST",
+ conds, nconds);
+
+ if (domain.has_ub)
+ {
+ /* Now push a separator. */
+ if (domain.has_lb)
+ VEC_quick_push (tree, conds, NULL);
+
+ gen_one_condition (arg, domain.ub,
+ (domain.is_ub_inclusive
+ ? GT_EXPR : GE_EXPR),
+ "DCE_COND_UB", "DCE_COND_UB_TEST",
+ conds, nconds);
+ }
+}
+
+
+/* A helper function to generate condition
+ code for the y argument in call pow (some_const, y).
+ See candidate selection in check_pow. Since the
+ candidates' base values have a limited range,
+ the guarded code generated for y are simple:
+ if (y > max_y)
+ pow (const, y);
+ Note max_y can be computed separately for each
+ const base, but in this implementation, we
+ choose to compute it using the max base
+ in the allowed range for the purpose of
+ simplicity. BASE is the constant base value,
+ EXPN is the expression for the exponent argument,
+ *CONDS is the vector to hold resulting statements,
+ and *NCONDS is the number of logical conditions. */
+
+static void
+gen_conditions_for_pow_cst_base (tree base, tree expn,
+ VEC (tree, heap) *conds,
+ unsigned *nconds)
+{
+ inp_domain exp_domain;
+ /* Validate the range of the base constant to make
+ sure it is consistent with check_pow. */
+ REAL_VALUE_TYPE mv;
+ REAL_VALUE_TYPE bcv = TREE_REAL_CST (base);
+ gcc_assert (!REAL_VALUES_EQUAL (bcv, dconst1)
+ && !REAL_VALUES_LESS (bcv, dconst1));
+ real_from_integer (&mv, TYPE_MODE (TREE_TYPE (base)), 256, 0, 1);
+ gcc_assert (!REAL_VALUES_LESS (mv, bcv));
+
+ exp_domain = get_domain (0, false, false,
+ 127, true, false);
+
+ gen_conditions_for_domain (expn, exp_domain,
+ conds, nconds);
+}
+
+/* Generate error condition code for pow calls with
+ non constant base values. The candidates selected
+ have their base argument value converted from
+ integer (see check_pow) value (1, 2, 4 bytes), and
+ the max exp value is computed based on the size
+ of the integer type (i.e. max possible base value).
+ The resulting input domain for exp argument is thus
+ conservative (smaller than the max value allowed by
+ the runtime value of the base). BASE is the integer
+ base value, EXPN is the expression for the exponent
+ argument, *CONDS is the vector to hold resulting
+ statements, and *NCONDS is the number of logical
+ conditions. */
+
+static void
+gen_conditions_for_pow_int_base (tree base, tree expn,
+ VEC (tree, heap) *conds,
+ unsigned *nconds)
+{
+ tree base_def, base_nm, base_val, base_val0;
+ tree base_var, int_type;
+ tree temp, tempn;
+ tree cst0, stmt1, stmt2;
+ int bit_sz, max_exp;
+ inp_domain exp_domain;
+
+ base_def = SSA_NAME_DEF_STMT (base);
+ base_nm = GIMPLE_STMT_OPERAND (base_def, 0);
+ base_val = GIMPLE_STMT_OPERAND (base_def, 1);
+ base_val0 = TREE_OPERAND (base_val, 0);
+ base_var = SSA_NAME_VAR (base_val0);
+ int_type = TREE_TYPE (base_var);
+ bit_sz = TYPE_PRECISION (int_type);
+ gcc_assert (bit_sz > 0
+ && bit_sz <= MAX_BASE_INT_BIT_SIZE);
+
+ /* Determine the max exp argument value according to
+ the size of the base integer. The max exp value
+ is conservatively estimated assuming IEEE754 double
+ precision format. */
+ if (bit_sz == 8)
+ max_exp = 128;
+ else if (bit_sz == 16)
+ max_exp = 64;
+ else
+ {
+ gcc_assert (bit_sz == MAX_BASE_INT_BIT_SIZE);
+ max_exp = 32;
+ }
+
+ /* For pow ((double)x, y), generate the following conditions:
+ cond 1:
+ temp1 = x;
+ if (temp1 <= 0)
+
+ cond 2:
+ temp2 = y;
+ if (temp2 > max_exp_real_cst) */
+
+ /* Generate condition in reverse order -- first
+ the condition for the exp argument. */
+
+ exp_domain = get_domain (0, false, false,
+ max_exp, true, true);
+
+ gen_conditions_for_domain (expn, exp_domain,
+ conds, nconds);
+
+ /* Now generate condition for the base argument.
+ Note it does not use the helper function
+ gen_conditions_for_domain because the base
+ type is integer. */
+
+ /* Push a separator. */
+ VEC_quick_push (tree, conds, NULL);
+
+ temp = create_tmp_var (int_type, "DCE_COND1");
+ cst0 = build_int_cst (int_type, 0);
+ stmt1 = build_gimple_modify_stmt (temp, base_val0);
+ tempn = make_ssa_name (temp, stmt1);
+ GIMPLE_STMT_OPERAND (stmt1, 0) = tempn;
+ stmt2 = build3 (COND_EXPR, void_type_node,
+ fold_build2 (LE_EXPR, boolean_type_node, tempn, cst0),
+ NULL_TREE, NULL_TREE);
+
+ VEC_quick_push (tree, conds, stmt1);
+ VEC_quick_push (tree, conds, stmt2);
+ (*nconds)++;
+}
+
+/* Method to generate conditional statements for guarding conditionally
+ dead calls to pow. One or more statements can be generated for
+ each logical condition. Statement groups of different conditions
+ are separated by a NULL tree and they are stored in the VEC
+ conds. The number of logical conditions are stored in *nconds.
+
+ See C99 standard, 7.12.7.4:2, for description of pow (x, y).
+ The precise condition for domain errors are complex. In this
+ implementation, a simplified (but conservative) valid domain
+ for x and y are used: x is positive to avoid dom errors, while
+ y is smaller than a upper bound (depending on x) to avoid range
+ errors. Runtime code is generated to check x (if not constant)
+ and y against the valid domain. If it is out, jump to the call,
+ otherwise the call is bypassed. POW_CALL is the call statement,
+ *CONDS is a vector holding the resulting condition statements,
+ and *NCONDS is the number of logical conditions. */
+
+static void
+gen_conditions_for_pow (tree pow_call, VEC (tree, heap) *conds,
+ unsigned *nconds)
+{
+ tree base, expn;
+ enum tree_code bc, ec;
+
+#ifdef ENABLE_CHECKING
+ gcc_assert (check_pow (pow_call));
+#endif
+
+ *nconds = 0;
+
+ base = CALL_EXPR_ARG (pow_call, 0);
+ expn = CALL_EXPR_ARG (pow_call, 1);
+
+ bc = TREE_CODE (base);
+ ec = TREE_CODE (expn);
+
+ if (bc == REAL_CST)
+ gen_conditions_for_pow_cst_base (base, expn,
+ conds, nconds);
+ else if (bc == SSA_NAME)
+ gen_conditions_for_pow_int_base (base, expn,
+ conds, nconds);
+ else
+ gcc_unreachable ();
+}
+
+/* A helper routine to help computing the valid input domain
+ for a builtin function. See C99 7.12.7 for details. In this
+ implementation, we only handle single region domain. The
+ resulting region can be conservative (smaller) than the actual
+ one and rounded to integers. Some of the bounds are documented
+ in the standard, while other limit constants are computed
+ assuming IEEE floating point format (for SF and DF modes).
+ Since IEEE only sets minimum requirements for long double format,
+ different long double formats exist under different implementations
+ (e.g, 64 bit double precision (DF), 80 bit double-extended
+ precision (XF), and 128 bit quad precision (QF) ). For simplicity,
+ in this implementation, the computed bounds for long double assume
+ 64 bit format (DF), and are therefore conservative. Another
+ assumption is that single precision float type is always SF mode,
+ and double type is DF mode. This function is quite
+ implementation specific, so it may not be suitable to be part of
+ builtins.c. This needs to be revisited later to see if it can
+ be leveraged in x87 assembly expansion. */
+
+static inp_domain
+get_no_error_domain (enum built_in_function fnc)
+{
+ switch (fnc)
+ {
+ /* Trig functions: return [-1, +1] */
+ CASE_FLT_FN (BUILT_IN_ACOS):
+ CASE_FLT_FN (BUILT_IN_ASIN):
+ return get_domain (-1, true, true,
+ 1, true, true);
+ /* Hyperbolic functions. */
+ CASE_FLT_FN (BUILT_IN_ACOSH):
+ /* acosh: [1, +inf) */
+ return get_domain (1, true, true,
+ 1, false, false);
+ CASE_FLT_FN (BUILT_IN_ATANH):
+ /* atanh: (-1, +1) */
+ return get_domain (-1, true, false,
+ 1, true, false);
+ case BUILT_IN_COSHF:
+ case BUILT_IN_SINHF:
+ /* coshf: (-89, +89) */
+ return get_domain (-89, true, false,
+ 89, true, false);
+ case BUILT_IN_COSH:
+ case BUILT_IN_SINH:
+ case BUILT_IN_COSHL:
+ case BUILT_IN_SINHL:
+ /* cosh: (-710, +710) */
+ return get_domain (-710, true, false,
+ 710, true, false);
+ /* Log functions: (0, +inf) */
+ CASE_FLT_FN (BUILT_IN_LOG):
+ CASE_FLT_FN (BUILT_IN_LOG2):
+ CASE_FLT_FN (BUILT_IN_LOG10):
+ return get_domain (0, true, false,
+ 0, false, false);
+ CASE_FLT_FN (BUILT_IN_LOG1P):
+ return get_domain (-1, true, false,
+ 0, false, false);
+ /* Exp functions. */
+ case BUILT_IN_EXPF:
+ case BUILT_IN_EXPM1F:
+ /* expf: (-inf, 88) */
+ return get_domain (-1, false, false,
+ 88, true, false);
+ case BUILT_IN_EXP:
+ case BUILT_IN_EXPM1:
+ case BUILT_IN_EXPL:
+ case BUILT_IN_EXPM1L:
+ /* exp: (-inf, 709) */
+ return get_domain (-1, false, false,
+ 709, true, false);
+ case BUILT_IN_EXP2F:
+ /* exp2f: (-inf, 128) */
+ return get_domain (-1, false, false,
+ 128, true, false);
+ case BUILT_IN_EXP2:
+ case BUILT_IN_EXP2L:
+ /* exp2: (-inf, 1024) */
+ return get_domain (-1, false, false,
+ 1024, true, false);
+ case BUILT_IN_EXP10F:
+ case BUILT_IN_POW10F:
+ /* exp10f: (-inf, 38) */
+ return get_domain (-1, false, false,
+ 38, true, false);
+ case BUILT_IN_EXP10:
+ case BUILT_IN_POW10:
+ case BUILT_IN_EXP10L:
+ case BUILT_IN_POW10L:
+ /* exp10: (-inf, 308) */
+ return get_domain (-1, false, false,
+ 308, true, false);
+ /* sqrt: [0, +inf) */
+ CASE_FLT_FN (BUILT_IN_SQRT):
+ return get_domain (0, true, true,
+ 0, false, false);
+ default:
+ gcc_unreachable ();
+ }
+
+ gcc_unreachable ();
+}
+
+/* The function to generate shrink wrap conditions for a partially
+ dead builtin call whose return value is not used anywhere,
+ but has to be kept live due to potential error condition.
+ BI_CALL is the builtin call, CONDS is the vector of statements
+ for condition code, NCODES is the pointer to the number of
+ logical conditions. Statements belonging to different logical
+ condition are separated by NULL tree in the vector. */
+
+static void
+gen_shrink_wrap_conditions (tree bi_call, VEC (tree, heap) *conds,
+ unsigned int *nconds)
+{
+ tree call, fn;
+ enum built_in_function fnc;
+
+ gcc_assert (nconds && conds);
+ gcc_assert (VEC_length (tree, conds) == 0);
+ gcc_assert (TREE_CODE (bi_call) == GIMPLE_MODIFY_STMT
+ || TREE_CODE (bi_call) == CALL_EXPR);
+
+ call = bi_call;
+ if (TREE_CODE (call) == GIMPLE_MODIFY_STMT)
+ call = get_call_expr_in (bi_call);
+
+ fn = get_callee_fndecl (call);
+ gcc_assert (fn && DECL_BUILT_IN (fn));
+ fnc = DECL_FUNCTION_CODE (fn);
+ *nconds = 0;
+
+ if (fnc == BUILT_IN_POW)
+ gen_conditions_for_pow (call, conds, nconds);
+ else
+ {
+ tree arg;
+ inp_domain domain = get_no_error_domain (fnc);
+ *nconds = 0;
+ arg = CALL_EXPR_ARG (bi_call, 0);
+ gen_conditions_for_domain (arg, domain, conds, nconds);
+ }
+
+ return;
+}
+
+
+/* Probability of the branch (to the call) is taken. */
+#define ERR_PROB 0.01
+
+/* The function to shrink wrap a partially dead builtin call
+ whose return value is not used anywhere, but has to be kept
+ live due to potential error condition. Returns true if the
+ transformation actually happens. */
+
+static bool
+shrink_wrap_one_built_in_call (tree bi_call)
+{
+ block_stmt_iterator bi_call_bsi;
+ basic_block bi_call_bb, join_tgt_bb, guard_bb, guard_bb0;
+ edge join_tgt_in_edge_from_call, join_tgt_in_edge_fall_thru;
+ edge bi_call_in_edge0, guard_bb_in_edge;
+ VEC (tree, heap) *conds;
+ unsigned tn_cond_stmts, nconds;
+ unsigned ci;
+ tree cond_expr = NULL;
+ tree cond_expr_start;
+ tree bi_call_label_decl;
+ tree bi_call_label;
+
+ conds = VEC_alloc (tree, heap, 12);
+ gen_shrink_wrap_conditions (bi_call, conds, &nconds);
+
+ /* This can happen if the condition generator decides
+ it is not beneficial to do the transformation. Just
+ return false and do not do any transformation for
+ the call. */
+ if (nconds == 0)
+ return false;
+
+ bi_call_bb = bb_for_stmt (bi_call);
+
+ /* Now find the join target bb -- split
+ bi_call_bb if needed. */
+ bi_call_bsi = bsi_for_stmt (bi_call);
+
+ join_tgt_in_edge_from_call = split_block (bi_call_bb, bi_call);
+ bi_call_bsi = bsi_for_stmt (bi_call);
+
+ join_tgt_bb = join_tgt_in_edge_from_call->dest;
+
+ /* Now it is time to insert the first conditional expression
+ into bi_call_bb and split this bb so that bi_call is
+ shrink-wrapped. */
+ tn_cond_stmts = VEC_length (tree, conds);
+ cond_expr = NULL;
+ cond_expr_start = VEC_index (tree, conds, 0);
+ for (ci = 0; ci < tn_cond_stmts; ci++)
+ {
+ tree c = VEC_index (tree, conds, ci);
+ gcc_assert (c || ci != 0);
+ if (!c)
+ break;
+ bsi_insert_before (&bi_call_bsi, c, BSI_SAME_STMT);
+ cond_expr = c;
+ }
+ nconds--;
+ ci++;
+ gcc_assert (cond_expr && TREE_CODE (cond_expr) == COND_EXPR);
+
+ /* Now the label. */
+ bi_call_label_decl = create_artificial_label ();
+ bi_call_label = build1 (LABEL_EXPR, void_type_node, bi_call_label_decl);
+ bsi_insert_before (&bi_call_bsi, bi_call_label, BSI_SAME_STMT);
+
+ bi_call_in_edge0 = split_block (bi_call_bb, cond_expr);
+ bi_call_in_edge0->flags &= ~EDGE_FALLTHRU;
+ bi_call_in_edge0->flags |= EDGE_TRUE_VALUE;
+ guard_bb0 = bi_call_bb;
+ bi_call_bb = bi_call_in_edge0->dest;
+ join_tgt_in_edge_fall_thru = make_edge (guard_bb0, join_tgt_bb,
+ EDGE_FALSE_VALUE);
+
+ bi_call_in_edge0->probability = REG_BR_PROB_BASE * ERR_PROB;
+ join_tgt_in_edge_fall_thru->probability =
+ REG_BR_PROB_BASE - bi_call_in_edge0->probability;
+
+ /* Code generation for the rest of the conditions */
+ guard_bb = guard_bb0;
+ while (nconds > 0)
+ {
+ unsigned ci0;
+ edge bi_call_in_edge;
+ block_stmt_iterator guard_bsi = bsi_for_stmt (cond_expr_start);
+ ci0 = ci;
+ cond_expr_start = VEC_index (tree, conds, ci0);
+ for (; ci < tn_cond_stmts; ci++)
+ {
+ tree c = VEC_index (tree, conds, ci);
+ gcc_assert (c || ci != ci0);
+ if (!c)
+ break;
+ bsi_insert_before (&guard_bsi, c, BSI_SAME_STMT);
+ cond_expr = c;
+ }
+ nconds--;
+ ci++;
+ gcc_assert (cond_expr && TREE_CODE (cond_expr) == COND_EXPR);
+ guard_bb_in_edge = split_block (guard_bb, cond_expr);
+ guard_bb_in_edge->flags &= ~EDGE_FALLTHRU;
+ guard_bb_in_edge->flags |= EDGE_FALSE_VALUE;
+
+ bi_call_in_edge = make_edge (guard_bb, bi_call_bb, EDGE_TRUE_VALUE);
+
+ bi_call_in_edge->probability = REG_BR_PROB_BASE * ERR_PROB;
+ guard_bb_in_edge->probability =
+ REG_BR_PROB_BASE - bi_call_in_edge->probability;
+ }
+
+ VEC_free (tree, heap, conds);
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ location_t loc;
+ loc = EXPR_LOCATION (bi_call);
+ fprintf (dump_file,
+ "%s:%d: note: function call is shrink-wrapped"
+ " into error conditions.\n",
+ LOCATION_FILE (loc), LOCATION_LINE (loc));
+ }
+
+ return true;
+}
+
+/* The top level function for conditional dead code shrink
+ wrapping transformation. */
+
+static bool
+shrink_wrap_conditional_dead_built_in_calls (void)
+{
+ bool changed = false;
+ unsigned i = 0;
+
+ unsigned n = VEC_length (tree, cond_dead_built_in_calls);
+ if (n == 0)
+ return false;
+
+ for (; i < n ; i++)
+ {
+ tree bi_call = VEC_index (tree, cond_dead_built_in_calls, i);
+ changed |= shrink_wrap_one_built_in_call (bi_call);
+ }
+
+ return changed;
+}
+
+/* Pass entry points. */
+
+static unsigned int
+tree_call_cdce (void)
+{
+ basic_block bb;
+ block_stmt_iterator i;
+ bool something_changed = false;
+ cond_dead_built_in_calls = VEC_alloc (tree, heap, 64);
+
+ FOR_EACH_BB (bb)
+ {
+ /* Collect dead call candidates. */
+ for (i = bsi_start (bb); ! bsi_end_p (i); bsi_next (&i))
+ {
+ tree stmt = bsi_stmt (i);
+ if (TREE_CODE (stmt) == CALL_EXPR
+ && is_call_dce_candidate (stmt))
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, "Found conditional dead call: ");
+ print_generic_stmt (dump_file, stmt, TDF_SLIM);
+ fprintf (dump_file, "\n");
+ }
+ VEC_quick_push (tree, cond_dead_built_in_calls, stmt);
+ }
+ }
+ }
+
+ something_changed =
+ shrink_wrap_conditional_dead_built_in_calls ();
+
+ VEC_free (tree, heap, cond_dead_built_in_calls);
+
+ if (something_changed)
+ {
+ free_dominance_info (CDI_DOMINATORS);
+ free_dominance_info (CDI_POST_DOMINATORS);
+ return (TODO_update_ssa | TODO_cleanup_cfg | TODO_ggc_collect
+ | TODO_remove_unused_locals);
+ }
+ else
+ return 0;
+}
+
+static bool
+gate_call_cdce (void)
+{
+ /* The limit constants used in the implementation
+ assume IEEE floating point format. Other formats
+ can be supported in the future if needed. */
+ return flag_tree_builtin_call_dce != 0;
+}
+
+struct gimple_opt_pass pass_call_cdce =
+{
+ {
+ GIMPLE_PASS,
+ "cdce", /* name */
+ gate_call_cdce, /* gate */
+ tree_call_cdce, /* execute */
+ NULL, /* sub */
+ NULL, /* next */
+ 0, /* static_pass_number */
+ TV_TREE_CALL_CDCE, /* tv_id */
+ PROP_cfg | PROP_ssa, /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ TODO_dump_func | TODO_verify_ssa /* todo_flags_finish */
+ }
+};
diff --git a/gcc/tree-cfg.c b/gcc/tree-cfg.c
index c0cb7b87488..3034ba339e1 100644
--- a/gcc/tree-cfg.c
+++ b/gcc/tree-cfg.c
@@ -523,6 +523,7 @@ make_edges (void)
break;
case OMP_PARALLEL:
+ case OMP_TASK:
case OMP_FOR:
case OMP_SINGLE:
case OMP_MASTER:
@@ -1936,16 +1937,17 @@ remove_useless_stmts_1 (tree *tp, struct rus_data *data)
break;
case OMP_PARALLEL:
+ case OMP_TASK:
/* Make sure the outermost BIND_EXPR in OMP_BODY isn't removed
as useless. */
- remove_useless_stmts_1 (&BIND_EXPR_BODY (OMP_BODY (*tp)), data);
+ remove_useless_stmts_1 (&BIND_EXPR_BODY (OMP_TASKREG_BODY (*tp)), data);
data->last_goto = NULL;
break;
case OMP_SECTIONS:
case OMP_SINGLE:
case OMP_SECTION:
- case OMP_MASTER :
+ case OMP_MASTER:
case OMP_ORDERED:
case OMP_CRITICAL:
remove_useless_stmts_1 (&OMP_BODY (*tp), data);
diff --git a/gcc/tree-dfa.c b/gcc/tree-dfa.c
index af167492de6..1406339a0fb 100644
--- a/gcc/tree-dfa.c
+++ b/gcc/tree-dfa.c
@@ -1,5 +1,6 @@
/* Data flow functions for trees.
- Copyright (C) 2001, 2002, 2003, 2004, 2005, 2007 Free Software Foundation, Inc.
+ Copyright (C) 2001, 2002, 2003, 2004, 2005, 2007, 2008 Free Software
+ Foundation, Inc.
Contributed by Diego Novillo <dnovillo@redhat.com>
This file is part of GCC.
@@ -724,7 +725,7 @@ add_referenced_var (tree var)
/* Scan DECL_INITIAL for pointer variables as they may contain
address arithmetic referencing the address of other
variables.
- Even non-constant intializers need to be walked, because
+ Even non-constant initializers need to be walked, because
IPA passes might prove that their are invariant later on. */
if (DECL_INITIAL (var)
/* Initializers of external variables are not useful to the
@@ -911,7 +912,7 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset,
{
tree csize = TYPE_SIZE (TREE_TYPE (TREE_OPERAND (exp, 0)));
/* We need to adjust maxsize to the whole structure bitsize.
- But we can subtract any constant offset seen sofar,
+ But we can subtract any constant offset seen so far,
because that would get us out of the structure otherwise. */
if (maxsize != -1 && csize && host_integerp (csize, 1))
maxsize = TREE_INT_CST_LOW (csize) - bit_offset;
@@ -949,7 +950,7 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset,
{
tree asize = TYPE_SIZE (TREE_TYPE (TREE_OPERAND (exp, 0)));
/* We need to adjust maxsize to the whole array bitsize.
- But we can subtract any constant offset seen sofar,
+ But we can subtract any constant offset seen so far,
because that would get us outside of the array otherwise. */
if (maxsize != -1 && asize && host_integerp (asize, 1))
maxsize = TREE_INT_CST_LOW (asize) - bit_offset;
@@ -1060,7 +1061,7 @@ refs_may_alias_p (tree ref1, tree ref2)
/* If both references are based on different variables, they cannot alias.
If both references are based on the same variable, they cannot alias if
- if the accesses do not overlap. */
+ the accesses do not overlap. */
if (SSA_VAR_P (base1)
&& SSA_VAR_P (base2))
{
diff --git a/gcc/tree-eh.c b/gcc/tree-eh.c
index 9428e38c54e..965acce7490 100644
--- a/gcc/tree-eh.c
+++ b/gcc/tree-eh.c
@@ -1,5 +1,6 @@
/* Exception handling semantics and decomposition for trees.
- Copyright (C) 2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
+ Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008 Free Software
+ Foundation, Inc.
This file is part of GCC.
@@ -314,7 +315,7 @@ struct leh_tf_state
size_t goto_queue_size;
size_t goto_queue_active;
- /* Pointer map to help in searching qoto_queue when it is large. */
+ /* Pointer map to help in searching goto_queue when it is large. */
struct pointer_map_t *goto_queue_map;
/* The set of unique labels seen as entries in the goto queue. */
diff --git a/gcc/tree-flow-inline.h b/gcc/tree-flow-inline.h
index c667867404e..90fcf12c079 100644
--- a/gcc/tree-flow-inline.h
+++ b/gcc/tree-flow-inline.h
@@ -1,5 +1,6 @@
/* Inline functions for tree-flow.h
- Copyright (C) 2001, 2003, 2005, 2006, 2007 Free Software Foundation, Inc.
+ Copyright (C) 2001, 2003, 2005, 2006, 2007, 2008 Free Software
+ Foundation, Inc.
Contributed by Diego Novillo <dnovillo@redhat.com>
This file is part of GCC.
@@ -1463,7 +1464,7 @@ link_use_stmts_after (use_operand_p head, imm_use_iterator *imm)
if (USE_FROM_PTR (use_p) == use)
last_p = move_use_after_head (use_p, head, last_p);
}
- /* LInk iter node in after last_p. */
+ /* Link iter node in after last_p. */
if (imm->iter_node.prev != NULL)
delink_imm_use (&imm->iter_node);
link_imm_use_to_list (&(imm->iter_node), last_p);
diff --git a/gcc/tree-gimple.c b/gcc/tree-gimple.c
index 2334e126343..8b05f93d505 100644
--- a/gcc/tree-gimple.c
+++ b/gcc/tree-gimple.c
@@ -322,6 +322,7 @@ is_gimple_stmt (tree t)
case OMP_CRITICAL:
case OMP_RETURN:
case OMP_CONTINUE:
+ case OMP_TASK:
case OMP_ATOMIC_LOAD:
case OMP_ATOMIC_STORE:
/* These are always void. */
diff --git a/gcc/tree-inline.c b/gcc/tree-inline.c
index fb4f765a200..a9ca33b14d4 100644
--- a/gcc/tree-inline.c
+++ b/gcc/tree-inline.c
@@ -192,7 +192,7 @@ remap_ssa_name (tree name, copy_body_data *id)
/* By inlining function having uninitialized variable, we might
extend the lifetime (variable might get reused). This cause
ICE in the case we end up extending lifetime of SSA name across
- abnormal edge, but also increase register presure.
+ abnormal edge, but also increase register pressure.
We simply initialize all uninitialized vars by 0 except for case
we are inlining to very first BB. We can avoid this for all
@@ -1603,7 +1603,7 @@ setup_one_parameter (copy_body_data *id, tree p, tree value, tree fn,
}
/* If VAR represents a zero-sized variable, it's possible that the
- assignment statment may result in no gimple statements. */
+ assignment statement may result in no gimple statements. */
if (init_stmt)
bsi_insert_after (&bsi, init_stmt, BSI_NEW_STMT);
if (gimple_in_ssa_p (cfun))
@@ -2478,6 +2478,7 @@ estimate_num_insns_1 (tree *tp, int *walk_subtrees, void *data)
}
case OMP_PARALLEL:
+ case OMP_TASK:
case OMP_FOR:
case OMP_SECTIONS:
case OMP_SINGLE:
diff --git a/gcc/tree-into-ssa.c b/gcc/tree-into-ssa.c
index 5542ede5503..2d0ddb6a5bf 100644
--- a/gcc/tree-into-ssa.c
+++ b/gcc/tree-into-ssa.c
@@ -1,5 +1,5 @@
/* Rewrite a program in Normal form into SSA.
- Copyright (C) 2001, 2002, 2003, 2004, 2005, 2007
+ Copyright (C) 2001, 2002, 2003, 2004, 2005, 2007, 2008
Free Software Foundation, Inc.
Contributed by Diego Novillo <dnovillo@redhat.com>
@@ -207,7 +207,7 @@ struct ssa_name_info
ENUM_BITFIELD (need_phi_state) need_phi_state : 2;
/* Age of this record (so that info_for_ssa_name table can be cleared
- quicky); if AGE < CURRENT_INFO_FOR_SSA_NAME_AGE, then the fields
+ quickly); if AGE < CURRENT_INFO_FOR_SSA_NAME_AGE, then the fields
are assumed to be null. */
unsigned age;
};
@@ -362,7 +362,7 @@ set_current_def (tree var, tree def)
}
-/* Compute global livein information given the set of blockx where
+/* Compute global livein information given the set of blocks where
an object is locally live at the start of the block (LIVEIN)
and the set of blocks where the object is defined (DEF_BLOCKS).
diff --git a/gcc/tree-loop-distribution.c b/gcc/tree-loop-distribution.c
index c380854eab5..d15ab80377f 100644
--- a/gcc/tree-loop-distribution.c
+++ b/gcc/tree-loop-distribution.c
@@ -1,5 +1,5 @@
/* Loop distribution.
- Copyright (C) 2006, 2007 Free Software Foundation, Inc.
+ Copyright (C) 2006, 2007, 2008 Free Software Foundation, Inc.
Contributed by Georges-Andre Silber <Georges-Andre.Silber@ensmp.fr>
and Sebastian Pop <sebastian.pop@amd.com>.
@@ -639,7 +639,7 @@ rdg_flag_vertex (struct graph *rdg, int v, bitmap partition, bitmap loops,
}
/* Flag in the bitmap PARTITION the vertex V and all its predecessors.
- Alse flag their loop number in LOOPS. */
+ Also flag their loop number in LOOPS. */
static void
rdg_flag_vertex_and_dependent (struct graph *rdg, int v, bitmap partition,
diff --git a/gcc/tree-nested.c b/gcc/tree-nested.c
index 9e3d8ceee1f..be5e87ca316 100644
--- a/gcc/tree-nested.c
+++ b/gcc/tree-nested.c
@@ -156,7 +156,7 @@ build_addr (tree exp, tree context)
/* Building the ADDR_EXPR will compute a set of properties for
that ADDR_EXPR. Those properties are unfortunately context
- specific. ie, they are dependent on CURRENT_FUNCTION_DECL.
+ specific, i.e., they are dependent on CURRENT_FUNCTION_DECL.
Temporarily set CURRENT_FUNCTION_DECL to the desired context,
build the ADDR_EXPR, then restore CURRENT_FUNCTION_DECL. That
@@ -677,6 +677,7 @@ walk_omp_for (walk_tree_fn callback, struct nesting_info *info, tree for_stmt)
{
struct walk_stmt_info wi;
tree t, list = NULL, empty;
+ int i;
walk_body (callback, info, &OMP_FOR_PRE_BODY (for_stmt));
@@ -687,36 +688,39 @@ walk_omp_for (walk_tree_fn callback, struct nesting_info *info, tree for_stmt)
wi.info = info;
wi.tsi = tsi_last (list);
- t = OMP_FOR_INIT (for_stmt);
- gcc_assert (TREE_CODE (t) == GIMPLE_MODIFY_STMT);
- SET_EXPR_LOCUS (empty, EXPR_LOCUS (t));
- wi.val_only = false;
- walk_tree (&GIMPLE_STMT_OPERAND (t, 0), callback, &wi, NULL);
- wi.val_only = true;
- wi.is_lhs = false;
- walk_tree (&GIMPLE_STMT_OPERAND (t, 1), callback, &wi, NULL);
-
- t = OMP_FOR_COND (for_stmt);
- gcc_assert (COMPARISON_CLASS_P (t));
- SET_EXPR_LOCUS (empty, EXPR_LOCUS (t));
- wi.val_only = false;
- walk_tree (&TREE_OPERAND (t, 0), callback, &wi, NULL);
- wi.val_only = true;
- wi.is_lhs = false;
- walk_tree (&TREE_OPERAND (t, 1), callback, &wi, NULL);
-
- t = OMP_FOR_INCR (for_stmt);
- gcc_assert (TREE_CODE (t) == GIMPLE_MODIFY_STMT);
- SET_EXPR_LOCUS (empty, EXPR_LOCUS (t));
- wi.val_only = false;
- walk_tree (&GIMPLE_STMT_OPERAND (t, 0), callback, &wi, NULL);
- t = GIMPLE_STMT_OPERAND (t, 1);
- gcc_assert (BINARY_CLASS_P (t));
- wi.val_only = false;
- walk_tree (&TREE_OPERAND (t, 0), callback, &wi, NULL);
- wi.val_only = true;
- wi.is_lhs = false;
- walk_tree (&TREE_OPERAND (t, 1), callback, &wi, NULL);
+ for (i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt)); i++)
+ {
+ t = TREE_VEC_ELT (OMP_FOR_INIT (for_stmt), i);
+ gcc_assert (TREE_CODE (t) == GIMPLE_MODIFY_STMT);
+ SET_EXPR_LOCUS (empty, EXPR_LOCUS (t));
+ wi.val_only = false;
+ walk_tree (&GIMPLE_STMT_OPERAND (t, 0), callback, &wi, NULL);
+ wi.val_only = true;
+ wi.is_lhs = false;
+ walk_tree (&GIMPLE_STMT_OPERAND (t, 1), callback, &wi, NULL);
+
+ t = TREE_VEC_ELT (OMP_FOR_COND (for_stmt), i);
+ gcc_assert (COMPARISON_CLASS_P (t));
+ SET_EXPR_LOCUS (empty, EXPR_LOCUS (t));
+ wi.val_only = false;
+ walk_tree (&TREE_OPERAND (t, 0), callback, &wi, NULL);
+ wi.val_only = true;
+ wi.is_lhs = false;
+ walk_tree (&TREE_OPERAND (t, 1), callback, &wi, NULL);
+
+ t = TREE_VEC_ELT (OMP_FOR_INCR (for_stmt), i);
+ gcc_assert (TREE_CODE (t) == GIMPLE_MODIFY_STMT);
+ SET_EXPR_LOCUS (empty, EXPR_LOCUS (t));
+ wi.val_only = false;
+ walk_tree (&GIMPLE_STMT_OPERAND (t, 0), callback, &wi, NULL);
+ t = GIMPLE_STMT_OPERAND (t, 1);
+ gcc_assert (BINARY_CLASS_P (t));
+ wi.val_only = false;
+ walk_tree (&TREE_OPERAND (t, 0), callback, &wi, NULL);
+ wi.val_only = true;
+ wi.is_lhs = false;
+ walk_tree (&TREE_OPERAND (t, 1), callback, &wi, NULL);
+ }
/* Remove empty statement added above from the end of statement list. */
tsi_delink (&wi.tsi);
@@ -1100,24 +1104,25 @@ convert_nonlocal_reference (tree *tp, int *walk_subtrees, void *data)
break;
case OMP_PARALLEL:
+ case OMP_TASK:
save_suppress = info->suppress_expansion;
- if (convert_nonlocal_omp_clauses (&OMP_PARALLEL_CLAUSES (t), wi))
+ if (convert_nonlocal_omp_clauses (&OMP_TASKREG_CLAUSES (t), wi))
{
tree c, decl;
decl = get_chain_decl (info);
c = build_omp_clause (OMP_CLAUSE_FIRSTPRIVATE);
OMP_CLAUSE_DECL (c) = decl;
- OMP_CLAUSE_CHAIN (c) = OMP_PARALLEL_CLAUSES (t);
- OMP_PARALLEL_CLAUSES (t) = c;
+ OMP_CLAUSE_CHAIN (c) = OMP_TASKREG_CLAUSES (t);
+ OMP_TASKREG_CLAUSES (t) = c;
}
save_local_var_chain = info->new_local_var_chain;
info->new_local_var_chain = NULL;
- walk_body (convert_nonlocal_reference, info, &OMP_PARALLEL_BODY (t));
+ walk_body (convert_nonlocal_reference, info, &OMP_TASKREG_BODY (t));
if (info->new_local_var_chain)
- declare_vars (info->new_local_var_chain, OMP_PARALLEL_BODY (t), false);
+ declare_vars (info->new_local_var_chain, OMP_TASKREG_BODY (t), false);
info->new_local_var_chain = save_local_var_chain;
info->suppress_expansion = save_suppress;
break;
@@ -1161,7 +1166,7 @@ static bool
convert_nonlocal_omp_clauses (tree *pclauses, struct walk_stmt_info *wi)
{
struct nesting_info *info = wi->info;
- bool need_chain = false;
+ bool need_chain = false, need_stmts = false;
tree clause, decl;
int dummy;
bitmap new_suppress;
@@ -1173,13 +1178,25 @@ convert_nonlocal_omp_clauses (tree *pclauses, struct walk_stmt_info *wi)
{
switch (OMP_CLAUSE_CODE (clause))
{
+ case OMP_CLAUSE_REDUCTION:
+ if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (clause))
+ need_stmts = true;
+ goto do_decl_clause;
+
+ case OMP_CLAUSE_LASTPRIVATE:
+ if (OMP_CLAUSE_LASTPRIVATE_STMT (clause))
+ need_stmts = true;
+ goto do_decl_clause;
+
case OMP_CLAUSE_PRIVATE:
case OMP_CLAUSE_FIRSTPRIVATE:
- case OMP_CLAUSE_LASTPRIVATE:
- case OMP_CLAUSE_REDUCTION:
case OMP_CLAUSE_COPYPRIVATE:
case OMP_CLAUSE_SHARED:
+ do_decl_clause:
decl = OMP_CLAUSE_DECL (clause);
+ if (TREE_CODE (decl) == VAR_DECL
+ && (TREE_STATIC (decl) || DECL_EXTERNAL (decl)))
+ break;
if (decl_function_context (decl) != info->context)
{
bitmap_set_bit (new_suppress, DECL_UID (decl));
@@ -1204,6 +1221,8 @@ convert_nonlocal_omp_clauses (tree *pclauses, struct walk_stmt_info *wi)
case OMP_CLAUSE_ORDERED:
case OMP_CLAUSE_DEFAULT:
case OMP_CLAUSE_COPYIN:
+ case OMP_CLAUSE_COLLAPSE:
+ case OMP_CLAUSE_UNTIED:
break;
default:
@@ -1213,6 +1232,35 @@ convert_nonlocal_omp_clauses (tree *pclauses, struct walk_stmt_info *wi)
info->suppress_expansion = new_suppress;
+ if (need_stmts)
+ for (clause = *pclauses; clause ; clause = OMP_CLAUSE_CHAIN (clause))
+ switch (OMP_CLAUSE_CODE (clause))
+ {
+ case OMP_CLAUSE_REDUCTION:
+ if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (clause))
+ {
+ tree old_context
+ = DECL_CONTEXT (OMP_CLAUSE_REDUCTION_PLACEHOLDER (clause));
+ DECL_CONTEXT (OMP_CLAUSE_REDUCTION_PLACEHOLDER (clause))
+ = info->context;
+ walk_body (convert_nonlocal_reference, info,
+ &OMP_CLAUSE_REDUCTION_INIT (clause));
+ walk_body (convert_nonlocal_reference, info,
+ &OMP_CLAUSE_REDUCTION_MERGE (clause));
+ DECL_CONTEXT (OMP_CLAUSE_REDUCTION_PLACEHOLDER (clause))
+ = old_context;
+ }
+ break;
+
+ case OMP_CLAUSE_LASTPRIVATE:
+ walk_body (convert_nonlocal_reference, info,
+ &OMP_CLAUSE_LASTPRIVATE_STMT (clause));
+ break;
+
+ default:
+ break;
+ }
+
return need_chain;
}
@@ -1392,24 +1440,25 @@ convert_local_reference (tree *tp, int *walk_subtrees, void *data)
break;
case OMP_PARALLEL:
+ case OMP_TASK:
save_suppress = info->suppress_expansion;
- if (convert_local_omp_clauses (&OMP_PARALLEL_CLAUSES (t), wi))
+ if (convert_local_omp_clauses (&OMP_TASKREG_CLAUSES (t), wi))
{
tree c;
(void) get_frame_type (info);
c = build_omp_clause (OMP_CLAUSE_SHARED);
OMP_CLAUSE_DECL (c) = info->frame_decl;
- OMP_CLAUSE_CHAIN (c) = OMP_PARALLEL_CLAUSES (t);
- OMP_PARALLEL_CLAUSES (t) = c;
+ OMP_CLAUSE_CHAIN (c) = OMP_TASKREG_CLAUSES (t);
+ OMP_TASKREG_CLAUSES (t) = c;
}
save_local_var_chain = info->new_local_var_chain;
info->new_local_var_chain = NULL;
- walk_body (convert_local_reference, info, &OMP_PARALLEL_BODY (t));
+ walk_body (convert_local_reference, info, &OMP_TASKREG_BODY (t));
if (info->new_local_var_chain)
- declare_vars (info->new_local_var_chain, OMP_PARALLEL_BODY (t), false);
+ declare_vars (info->new_local_var_chain, OMP_TASKREG_BODY (t), false);
info->new_local_var_chain = save_local_var_chain;
info->suppress_expansion = save_suppress;
break;
@@ -1453,7 +1502,7 @@ static bool
convert_local_omp_clauses (tree *pclauses, struct walk_stmt_info *wi)
{
struct nesting_info *info = wi->info;
- bool need_frame = false;
+ bool need_frame = false, need_stmts = false;
tree clause, decl;
int dummy;
bitmap new_suppress;
@@ -1465,13 +1514,25 @@ convert_local_omp_clauses (tree *pclauses, struct walk_stmt_info *wi)
{
switch (OMP_CLAUSE_CODE (clause))
{
+ case OMP_CLAUSE_REDUCTION:
+ if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (clause))
+ need_stmts = true;
+ goto do_decl_clause;
+
+ case OMP_CLAUSE_LASTPRIVATE:
+ if (OMP_CLAUSE_LASTPRIVATE_STMT (clause))
+ need_stmts = true;
+ goto do_decl_clause;
+
case OMP_CLAUSE_PRIVATE:
case OMP_CLAUSE_FIRSTPRIVATE:
- case OMP_CLAUSE_LASTPRIVATE:
- case OMP_CLAUSE_REDUCTION:
case OMP_CLAUSE_COPYPRIVATE:
case OMP_CLAUSE_SHARED:
+ do_decl_clause:
decl = OMP_CLAUSE_DECL (clause);
+ if (TREE_CODE (decl) == VAR_DECL
+ && (TREE_STATIC (decl) || DECL_EXTERNAL (decl)))
+ break;
if (decl_function_context (decl) == info->context
&& !use_pointer_in_frame (decl))
{
@@ -1501,6 +1562,8 @@ convert_local_omp_clauses (tree *pclauses, struct walk_stmt_info *wi)
case OMP_CLAUSE_ORDERED:
case OMP_CLAUSE_DEFAULT:
case OMP_CLAUSE_COPYIN:
+ case OMP_CLAUSE_COLLAPSE:
+ case OMP_CLAUSE_UNTIED:
break;
default:
@@ -1510,6 +1573,35 @@ convert_local_omp_clauses (tree *pclauses, struct walk_stmt_info *wi)
info->suppress_expansion = new_suppress;
+ if (need_stmts)
+ for (clause = *pclauses; clause ; clause = OMP_CLAUSE_CHAIN (clause))
+ switch (OMP_CLAUSE_CODE (clause))
+ {
+ case OMP_CLAUSE_REDUCTION:
+ if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (clause))
+ {
+ tree old_context
+ = DECL_CONTEXT (OMP_CLAUSE_REDUCTION_PLACEHOLDER (clause));
+ DECL_CONTEXT (OMP_CLAUSE_REDUCTION_PLACEHOLDER (clause))
+ = info->context;
+ walk_body (convert_local_reference, info,
+ &OMP_CLAUSE_REDUCTION_INIT (clause));
+ walk_body (convert_local_reference, info,
+ &OMP_CLAUSE_REDUCTION_MERGE (clause));
+ DECL_CONTEXT (OMP_CLAUSE_REDUCTION_PLACEHOLDER (clause))
+ = old_context;
+ }
+ break;
+
+ case OMP_CLAUSE_LASTPRIVATE:
+ walk_body (convert_local_reference, info,
+ &OMP_CLAUSE_LASTPRIVATE_STMT (clause));
+ break;
+
+ default:
+ break;
+ }
+
return need_frame;
}
@@ -1731,9 +1823,10 @@ convert_call_expr (tree *tp, int *walk_subtrees, void *data)
break;
case OMP_PARALLEL:
+ case OMP_TASK:
save_static_chain_added = info->static_chain_added;
info->static_chain_added = 0;
- walk_body (convert_call_expr, info, &OMP_PARALLEL_BODY (t));
+ walk_body (convert_call_expr, info, &OMP_TASKREG_BODY (t));
for (i = 0; i < 2; i++)
{
tree c, decl;
@@ -1741,7 +1834,7 @@ convert_call_expr (tree *tp, int *walk_subtrees, void *data)
continue;
decl = i ? get_chain_decl (info) : info->frame_decl;
/* Don't add CHAIN.* or FRAME.* twice. */
- for (c = OMP_PARALLEL_CLAUSES (t); c; c = OMP_CLAUSE_CHAIN (c))
+ for (c = OMP_TASKREG_CLAUSES (t); c; c = OMP_CLAUSE_CHAIN (c))
if ((OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED)
&& OMP_CLAUSE_DECL (c) == decl)
@@ -1751,8 +1844,8 @@ convert_call_expr (tree *tp, int *walk_subtrees, void *data)
c = build_omp_clause (i ? OMP_CLAUSE_FIRSTPRIVATE
: OMP_CLAUSE_SHARED);
OMP_CLAUSE_DECL (c) = decl;
- OMP_CLAUSE_CHAIN (c) = OMP_PARALLEL_CLAUSES (t);
- OMP_PARALLEL_CLAUSES (t) = c;
+ OMP_CLAUSE_CHAIN (c) = OMP_TASKREG_CLAUSES (t);
+ OMP_TASKREG_CLAUSES (t) = c;
}
}
info->static_chain_added |= save_static_chain_added;
diff --git a/gcc/tree-parloops.c b/gcc/tree-parloops.c
index 109e3058921..8344093ba6b 100644
--- a/gcc/tree-parloops.c
+++ b/gcc/tree-parloops.c
@@ -106,7 +106,7 @@ parloop
....
- # Storing the the initial value given by the user. #
+ # Storing the initial value given by the user. #
.paral_data_store.32.sum.27 = 1;
@@ -885,7 +885,7 @@ add_field_for_name (void **slot, void *data)
/* Callback for htab_traverse. A local result is the intermediate result
computed by a single
- thread, or the intial value in case no iteration was executed.
+ thread, or the initial value in case no iteration was executed.
This function creates a phi node reflecting these values.
The phi's result will be stored in NEW_PHI field of the
reduction's data structure. */
@@ -1043,7 +1043,7 @@ create_loads_for_reductions (void **slot, void *data)
/* Load the reduction result that was stored in LD_ST_DATA.
REDUCTION_LIST describes the list of reductions that the
- loades should be generated for. */
+ loads should be generated for. */
static void
create_final_loads_for_reduction (htab_t reduction_list,
struct clsn_data *ld_st_data)
@@ -1416,7 +1416,7 @@ canonicalize_loop_ivs (struct loop *loop, htab_t reduction_list, tree nit)
exit of the loop. NIT is the number of iterations of the loop
(used to initialize the variables in the duplicated part).
- TODO: the common case is that latch of the loop is empty and immediatelly
+ TODO: the common case is that latch of the loop is empty and immediately
follows the loop exit. In this case, it would be better not to copy the
body of the loop, but only move the entry of the loop directly before the
exit check and increase the number of iterations of the loop by one.
@@ -1617,13 +1617,16 @@ create_parallel_loop (struct loop *loop, tree loop_fn, tree data,
for_stmt = make_node (OMP_FOR);
TREE_TYPE (for_stmt) = void_type_node;
OMP_FOR_CLAUSES (for_stmt) = t;
- OMP_FOR_INIT (for_stmt) = build_gimple_modify_stmt (initvar, cvar_init);
- OMP_FOR_COND (for_stmt) = cond;
- OMP_FOR_INCR (for_stmt) = build_gimple_modify_stmt (cvar_base,
- build2 (PLUS_EXPR, type,
- cvar_base,
- build_int_cst
- (type, 1)));
+ OMP_FOR_INIT (for_stmt) = make_tree_vec (1);
+ TREE_VEC_ELT (OMP_FOR_INIT (for_stmt), 0)
+ = build_gimple_modify_stmt (initvar, cvar_init);
+ OMP_FOR_COND (for_stmt) = make_tree_vec (1);
+ TREE_VEC_ELT (OMP_FOR_COND (for_stmt), 0) = cond;
+ OMP_FOR_INCR (for_stmt) = make_tree_vec (2);
+ TREE_VEC_ELT (OMP_FOR_INCR (for_stmt), 0)
+ = build_gimple_modify_stmt (cvar_base,
+ build2 (PLUS_EXPR, type, cvar_base,
+ build_int_cst (type, 1)));
OMP_FOR_BODY (for_stmt) = NULL_TREE;
OMP_FOR_PRE_BODY (for_stmt) = NULL_TREE;
@@ -1648,7 +1651,7 @@ create_parallel_loop (struct loop *loop, tree loop_fn, tree data,
/* Generates code to execute the iterations of LOOP in N_THREADS threads in
parallel. NITER describes number of iterations of LOOP.
- REDUCTION_LIST describes the reductions existant in the LOOP. */
+ REDUCTION_LIST describes the reductions existent in the LOOP. */
static void
gen_parallel_loop (struct loop *loop, htab_t reduction_list,
@@ -1757,7 +1760,7 @@ gen_parallel_loop (struct loop *loop, htab_t reduction_list,
/* Ensure that the exit condition is the first statement in the loop. */
transform_to_exit_first_loop (loop, reduction_list, nit);
- /* Generate intializations for reductions. */
+ /* Generate initializations for reductions. */
if (htab_elements (reduction_list) > 0)
htab_traverse (reduction_list, initialize_reductions, loop);
diff --git a/gcc/tree-pass.h b/gcc/tree-pass.h
index a389b1ee45b..39632c82977 100644
--- a/gcc/tree-pass.h
+++ b/gcc/tree-pass.h
@@ -1,5 +1,6 @@
/* Definitions for describing one tree-ssa optimization pass.
- Copyright (C) 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
+ Copyright (C) 2004, 2005, 2006, 2007, 2008 Free Software Foundation,
+ Inc.
Contributed by Richard Henderson <rth@redhat.com>
This file is part of GCC.
@@ -142,7 +143,7 @@ struct gimple_opt_pass
struct opt_pass pass;
};
-/* Decription of RTL pass. */
+/* Description of RTL pass. */
struct rtl_opt_pass
{
struct opt_pass pass;
@@ -337,6 +338,7 @@ extern struct gimple_opt_pass pass_dominator;
extern struct gimple_opt_pass pass_dce;
extern struct gimple_opt_pass pass_dce_loop;
extern struct gimple_opt_pass pass_cd_dce;
+extern struct gimple_opt_pass pass_call_cdce;
extern struct gimple_opt_pass pass_merge_phi;
extern struct gimple_opt_pass pass_split_crit_edges;
extern struct gimple_opt_pass pass_pre;
diff --git a/gcc/tree-pretty-print.c b/gcc/tree-pretty-print.c
index 811c195e126..2f7de8f1c7d 100644
--- a/gcc/tree-pretty-print.c
+++ b/gcc/tree-pretty-print.c
@@ -334,19 +334,22 @@ dump_omp_clause (pretty_printer *buffer, tree clause, int spc, int flags)
pp_string (buffer, "default(");
switch (OMP_CLAUSE_DEFAULT_KIND (clause))
{
- case OMP_CLAUSE_DEFAULT_UNSPECIFIED:
- break;
- case OMP_CLAUSE_DEFAULT_SHARED:
- pp_string (buffer, "shared");
- break;
- case OMP_CLAUSE_DEFAULT_NONE:
- pp_string (buffer, "none");
- break;
- case OMP_CLAUSE_DEFAULT_PRIVATE:
- pp_string (buffer, "private");
- break;
- default:
- gcc_unreachable ();
+ case OMP_CLAUSE_DEFAULT_UNSPECIFIED:
+ break;
+ case OMP_CLAUSE_DEFAULT_SHARED:
+ pp_string (buffer, "shared");
+ break;
+ case OMP_CLAUSE_DEFAULT_NONE:
+ pp_string (buffer, "none");
+ break;
+ case OMP_CLAUSE_DEFAULT_PRIVATE:
+ pp_string (buffer, "private");
+ break;
+ case OMP_CLAUSE_DEFAULT_FIRSTPRIVATE:
+ pp_string (buffer, "firstprivate");
+ break;
+ default:
+ gcc_unreachable ();
}
pp_character (buffer, ')');
break;
@@ -367,6 +370,9 @@ dump_omp_clause (pretty_printer *buffer, tree clause, int spc, int flags)
case OMP_CLAUSE_SCHEDULE_RUNTIME:
pp_string (buffer, "runtime");
break;
+ case OMP_CLAUSE_SCHEDULE_AUTO:
+ pp_string (buffer, "auto");
+ break;
default:
gcc_unreachable ();
}
@@ -380,6 +386,18 @@ dump_omp_clause (pretty_printer *buffer, tree clause, int spc, int flags)
pp_character (buffer, ')');
break;
+ case OMP_CLAUSE_UNTIED:
+ pp_string (buffer, "untied");
+ break;
+
+ case OMP_CLAUSE_COLLAPSE:
+ pp_string (buffer, "collapse(");
+ dump_generic_node (buffer,
+ OMP_CLAUSE_COLLAPSE_EXPR (clause),
+ spc, flags, false);
+ pp_character (buffer, ')');
+ break;
+
default:
/* Should never happen. */
dump_generic_node (buffer, clause, spc, flags, false);
@@ -1863,12 +1881,41 @@ dump_generic_node (pretty_printer *buffer, tree node, int spc, int flags,
is_expr = false;
break;
+ case OMP_TASK:
+ pp_string (buffer, "#pragma omp task");
+ dump_omp_clauses (buffer, OMP_TASK_CLAUSES (node), spc, flags);
+ if (OMP_TASK_FN (node))
+ {
+ pp_string (buffer, " [child fn: ");
+ dump_generic_node (buffer, OMP_TASK_FN (node), spc, flags, false);
+
+ pp_string (buffer, " (");
+
+ if (OMP_TASK_DATA_ARG (node))
+ dump_generic_node (buffer, OMP_TASK_DATA_ARG (node), spc, flags,
+ false);
+ else
+ pp_string (buffer, "???");
+
+ pp_character (buffer, ')');
+ if (OMP_TASK_COPYFN (node))
+ {
+ pp_string (buffer, ", copy fn: ");
+ dump_generic_node (buffer, OMP_TASK_COPYFN (node), spc,
+ flags, false);
+ }
+ pp_character (buffer, ']');
+ }
+ goto dump_omp_body;
+
case OMP_FOR:
pp_string (buffer, "#pragma omp for");
dump_omp_clauses (buffer, OMP_FOR_CLAUSES (node), spc, flags);
if (!(flags & TDF_SLIM))
{
+ int i;
+
if (OMP_FOR_PRE_BODY (node))
{
newline_and_indent (buffer, spc + 2);
@@ -1878,14 +1925,22 @@ dump_generic_node (pretty_printer *buffer, tree node, int spc, int flags,
dump_generic_node (buffer, OMP_FOR_PRE_BODY (node),
spc, flags, false);
}
- newline_and_indent (buffer, spc);
- pp_string (buffer, "for (");
- dump_generic_node (buffer, OMP_FOR_INIT (node), spc, flags, false);
- pp_string (buffer, "; ");
- dump_generic_node (buffer, OMP_FOR_COND (node), spc, flags, false);
- pp_string (buffer, "; ");
- dump_generic_node (buffer, OMP_FOR_INCR (node), spc, flags, false);
- pp_string (buffer, ")");
+ spc -= 2;
+ for (i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INIT (node)); i++)
+ {
+ spc += 2;
+ newline_and_indent (buffer, spc);
+ pp_string (buffer, "for (");
+ dump_generic_node (buffer, TREE_VEC_ELT (OMP_FOR_INIT (node), i),
+ spc, flags, false);
+ pp_string (buffer, "; ");
+ dump_generic_node (buffer, TREE_VEC_ELT (OMP_FOR_COND (node), i),
+ spc, flags, false);
+ pp_string (buffer, "; ");
+ dump_generic_node (buffer, TREE_VEC_ELT (OMP_FOR_INCR (node), i),
+ spc, flags, false);
+ pp_string (buffer, ")");
+ }
if (OMP_FOR_BODY (node))
{
newline_and_indent (buffer, spc + 2);
@@ -1896,6 +1951,7 @@ dump_generic_node (pretty_printer *buffer, tree node, int spc, int flags,
newline_and_indent (buffer, spc + 2);
pp_character (buffer, '}');
}
+ spc -= 2 * TREE_VEC_LENGTH (OMP_FOR_INIT (node)) - 2;
if (OMP_FOR_PRE_BODY (node))
{
spc -= 4;
@@ -2253,7 +2309,7 @@ print_declaration (pretty_printer *buffer, tree t, int spc, int flags)
pp_character (buffer, ')');
}
- /* The initial value of a function serves to determine wether the function
+ /* The initial value of a function serves to determine whether the function
is declared or defined. So the following does not apply to function
nodes. */
if (TREE_CODE (t) != FUNCTION_DECL)
diff --git a/gcc/tree-profile.c b/gcc/tree-profile.c
index 6121837e496..9aac154b436 100644
--- a/gcc/tree-profile.c
+++ b/gcc/tree-profile.c
@@ -65,7 +65,7 @@ static GTY(()) tree ptr_void;
/* Add code:
static gcov* __gcov_indirect_call_counters; // pointer to actual counter
- static void* __gcov_indirect_call_callee; // actual callee addres
+ static void* __gcov_indirect_call_callee; // actual callee address
*/
static void
tree_init_ic_make_global_vars (void)
diff --git a/gcc/tree-scalar-evolution.c b/gcc/tree-scalar-evolution.c
index 2cc008020e2..57fe59b186e 100644
--- a/gcc/tree-scalar-evolution.c
+++ b/gcc/tree-scalar-evolution.c
@@ -1,5 +1,6 @@
/* Scalar evolution detector.
- Copyright (C) 2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
+ Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008 Free Software
+ Foundation, Inc.
Contributed by Sebastian Pop <s.pop@laposte.net>
This file is part of GCC.
@@ -2357,7 +2358,7 @@ end:
/* Returns the number of executions of the exit condition of LOOP,
i.e., the number by one higher than number_of_latch_executions.
- Note that unline number_of_latch_executions, this number does
+ Note that unlike number_of_latch_executions, this number does
not necessarily fit in the unsigned variant of the type of
the control variable -- if the number of iterations is a constant,
we return chrec_dont_know if adding one to number_of_latch_executions
@@ -2843,7 +2844,7 @@ scev_const_prop (void)
and avoided final value elimination if that is the case. The problem
is that it is hard to evaluate whether the expression is too
expensive, as we do not know what optimization opportunities the
- the elimination of the final value may reveal. Therefore, we now
+ elimination of the final value may reveal. Therefore, we now
eliminate the final values of induction variables unconditionally. */
if (niter == chrec_dont_know)
continue;
diff --git a/gcc/tree-sra.c b/gcc/tree-sra.c
index 244219ffe79..aea18d5a826 100644
--- a/gcc/tree-sra.c
+++ b/gcc/tree-sra.c
@@ -357,7 +357,7 @@ decl_can_be_decomposed_p (tree var)
/* HACK: if we decompose a va_list_type_node before inlining, then we'll
confuse tree-stdarg.c, and we won't be able to figure out which and
how many arguments are accessed. This really should be improved in
- tree-stdarg.c, as the decomposition is truely a win. This could also
+ tree-stdarg.c, as the decomposition is truly a win. This could also
be fixed if the stdarg pass ran early, but this can't be done until
we've aliasing information early too. See PR 30791. */
if (early_sra
@@ -2076,7 +2076,7 @@ generate_one_element_ref (struct sra_elt *elt, tree base)
{
tree field = elt->element;
- /* We can't test elt->in_bitfld_blk here because, when this is
+ /* We can't test elt->in_bitfld_block here because, when this is
called from instantiate_element, we haven't set this field
yet. */
if (TREE_CODE (field) == BIT_FIELD_REF)
@@ -2162,7 +2162,7 @@ sra_build_assignment (tree dst, tree src)
var = TREE_OPERAND (src, 0);
width = TREE_OPERAND (src, 1);
/* The offset needs to be adjusted to a right shift quantity
- depending on the endianess. */
+ depending on the endianness. */
if (BYTES_BIG_ENDIAN)
{
tree tmp = size_binop (PLUS_EXPR, width, TREE_OPERAND (src, 2));
diff --git a/gcc/tree-ssa-alias-warnings.c b/gcc/tree-ssa-alias-warnings.c
index aacaa0ac0ee..bf95258ed28 100644
--- a/gcc/tree-ssa-alias-warnings.c
+++ b/gcc/tree-ssa-alias-warnings.c
@@ -1,5 +1,5 @@
/* Strict aliasing checks.
- Copyright (C) 2007 Free Software Foundation, Inc.
+ Copyright (C) 2007, 2008 Free Software Foundation, Inc.
Contributed by Silvius Rus <rus@google.com>.
This file is part of GCC.
@@ -93,7 +93,7 @@
-Wstrict-aliasing=3 (default)
===================
Should have very few false positives and few false negatives.
- Takes care of the common punn+dereference pattern in the front end:
+ Takes care of the common pun+dereference pattern in the front end:
*(int*)&some_float.
Takes care of multiple statement cases in the back end,
using flow-sensitive points-to information (-O required).
diff --git a/gcc/tree-ssa-ccp.c b/gcc/tree-ssa-ccp.c
index 455af4c4863..d4dfadbced0 100644
--- a/gcc/tree-ssa-ccp.c
+++ b/gcc/tree-ssa-ccp.c
@@ -1,5 +1,5 @@
/* Conditional constant propagation pass for the GNU compiler.
- Copyright (C) 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
+ Copyright (C) 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
Free Software Foundation, Inc.
Adapted from original RTL SSA-CCP by Daniel Berlin <dberlin@dberlin.org>
Adapted to GIMPLE trees by Diego Novillo <dnovillo@redhat.com>
@@ -286,7 +286,7 @@ get_symbol_constant_value (tree sym)
return val;
}
/* Variables declared 'const' without an initializer
- have zero as the intializer if they may not be
+ have zero as the initializer if they may not be
overridden at link or run time. */
if (!val
&& targetm.binds_local_p (sym)
diff --git a/gcc/tree-ssa-coalesce.c b/gcc/tree-ssa-coalesce.c
index 172f1a2f829..ef28c8eff38 100644
--- a/gcc/tree-ssa-coalesce.c
+++ b/gcc/tree-ssa-coalesce.c
@@ -1,5 +1,6 @@
/* Coalesce SSA_NAMES together for the out-of-ssa pass.
- Copyright (C) 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
+ Copyright (C) 2004, 2005, 2006, 2007, 2008 Free Software Foundation,
+ Inc.
Contributed by Andrew MacLeod <amacleod@redhat.com>
This file is part of GCC.
@@ -582,7 +583,7 @@ ssa_conflicts_merge (ssa_conflicts_p ptr, unsigned x, unsigned y)
return;
/* Add a conflict between X and every one Y has. If the bitmap doesn't
- exist, then it has already been coalesced, and we dont need to add a
+ exist, then it has already been coalesced, and we don't need to add a
conflict. */
EXECUTE_IF_SET_IN_BITMAP (ptr->conflicts[y], 0, z, bi)
if (ptr->conflicts[z])
diff --git a/gcc/tree-ssa-dom.c b/gcc/tree-ssa-dom.c
index 255f24b98e7..ea99a021fb1 100644
--- a/gcc/tree-ssa-dom.c
+++ b/gcc/tree-ssa-dom.c
@@ -1,5 +1,5 @@
/* SSA Dominator optimizations for trees
- Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007
+ Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
Free Software Foundation, Inc.
Contributed by Diego Novillo <dnovillo@redhat.com>
@@ -283,7 +283,7 @@ tree_ssa_dominator_optimize (void)
loop_optimizer_init (LOOPS_HAVE_SIMPLE_LATCHES);
/* We need accurate information regarding back edges in the CFG
- for jump threading; this may include back edes that are not part of
+ for jump threading; this may include back edges that are not part of
a single loop. */
mark_dfs_back_edges ();
@@ -610,7 +610,7 @@ dom_opt_finalize_block (struct dom_walk_data *walk_data, basic_block bb)
/* If we have an outgoing edge to a block with multiple incoming and
- outgoing edges, then we may be able to thread the edge. ie, we
+ outgoing edges, then we may be able to thread the edge, i.e., we
may be able to statically determine which of the outgoing edges
will be traversed when the incoming edge from BB is traversed. */
if (single_succ_p (bb)
@@ -1360,7 +1360,7 @@ record_edge_info (basic_block bb)
tree op1 = TREE_OPERAND (cond, 1);
/* Special case comparing booleans against a constant as we
- know the value of OP0 on both arms of the branch. i.e., we
+ know the value of OP0 on both arms of the branch, i.e., we
can record an equivalence for OP0 rather than COND. */
if ((TREE_CODE (cond) == EQ_EXPR || TREE_CODE (cond) == NE_EXPR)
&& TREE_CODE (op0) == SSA_NAME
diff --git a/gcc/tree-ssa-dse.c b/gcc/tree-ssa-dse.c
index f6f76d5b7c5..2f7e9238ab1 100644
--- a/gcc/tree-ssa-dse.c
+++ b/gcc/tree-ssa-dse.c
@@ -1,5 +1,6 @@
/* Dead store elimination
- Copyright (C) 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
+ Copyright (C) 2004, 2005, 2006, 2007, 2008 Free Software Foundation,
+ Inc.
This file is part of GCC.
@@ -207,7 +208,7 @@ memory_address_same (tree store1, tree store2)
}
/* Return true if there is a stmt that kills the lhs of STMT and is in the
- virtual def-use chain of STMT without a use inbetween the kill and STMT.
+ virtual def-use chain of STMT without a use in between the kill and STMT.
Returns false if no such stmt is found.
*FIRST_USE_P is set to the first use of the single virtual def of
STMT. *USE_P is set to the vop killed by *USE_STMT. */
@@ -691,7 +692,7 @@ execute_simple_dse (void)
if (TREE_THIS_VOLATILE (op))
dead = false;
- /* Look for possible occurence var = indirect_ref (...) where
+ /* Look for possible occurrence var = indirect_ref (...) where
indirect_ref itself is volatile. */
if (dead && TREE_THIS_VOLATILE (GIMPLE_STMT_OPERAND (stmt, 1)))
diff --git a/gcc/tree-ssa-forwprop.c b/gcc/tree-ssa-forwprop.c
index cf6cf04ac1b..ac9d8514784 100644
--- a/gcc/tree-ssa-forwprop.c
+++ b/gcc/tree-ssa-forwprop.c
@@ -845,7 +845,7 @@ forward_propagate_comparison (tree cond, tree stmt)
TREE_OPERAND (cond, 0), TREE_OPERAND (cond, 1));
}
/* We can propagate the condition into X op CST where op
- is EQ_EXRP or NE_EXPR and CST is either one or zero. */
+ is EQ_EXPR or NE_EXPR and CST is either one or zero. */
else if (COMPARISON_CLASS_P (rhs)
&& TREE_CODE (TREE_OPERAND (rhs, 0)) == SSA_NAME
&& TREE_CODE (TREE_OPERAND (rhs, 1)) == INTEGER_CST)
diff --git a/gcc/tree-ssa-live.c b/gcc/tree-ssa-live.c
index ae4b909a5e3..c277980ed86 100644
--- a/gcc/tree-ssa-live.c
+++ b/gcc/tree-ssa-live.c
@@ -1,5 +1,6 @@
/* Liveness for SSA trees.
- Copyright (C) 2003, 2004, 2005, 2007 Free Software Foundation, Inc.
+ Copyright (C) 2003, 2004, 2005, 2007, 2008 Free Software Foundation,
+ Inc.
Contributed by Andrew MacLeod <amacleod@redhat.com>
This file is part of GCC.
@@ -46,7 +47,7 @@ static void verify_live_on_entry (tree_live_info_p);
At the end of out-of-ssa, each partition becomes a "real" variable and is
rewritten as a compiler variable.
- The var_map datat structure is used to manage these partitions. It allows
+ The var_map data structure is used to manage these partitions. It allows
partitions to be combined, and determines which partition belongs to what
ssa_name or variable, and vice versa. */
diff --git a/gcc/tree-ssa-live.h b/gcc/tree-ssa-live.h
index 46135ee16ac..4f021609b2f 100644
--- a/gcc/tree-ssa-live.h
+++ b/gcc/tree-ssa-live.h
@@ -1,5 +1,5 @@
/* Routines for liveness in SSA trees.
- Copyright (C) 2003, 2004, 2005, 2007 Free Software Foundation, Inc.
+ Copyright (C) 2003, 2004, 2005, 2007, 2008 Free Software Foundation, Inc.
Contributed by Andrew MacLeod <amacleod@redhat.com>
This file is part of GCC.
@@ -82,7 +82,7 @@ typedef struct _var_map
/* Partition number of a non ssa-name variable. */
#define VAR_ANN_PARTITION(ann) (ann->partition)
-/* Index iot the basevar table of a non ssa-name variable. */
+/* Index to the basevar table of a non ssa-name variable. */
#define VAR_ANN_BASE_INDEX(ann) (ann->base_index)
diff --git a/gcc/tree-ssa-loop-im.c b/gcc/tree-ssa-loop-im.c
index 2336263b499..8a2dd9de304 100644
--- a/gcc/tree-ssa-loop-im.c
+++ b/gcc/tree-ssa-loop-im.c
@@ -1,5 +1,6 @@
/* Loop invariant motion.
- Copyright (C) 2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
+ Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008 Free Software
+ Foundation, Inc.
This file is part of GCC.
@@ -131,7 +132,7 @@ typedef struct mem_ref
unsigned id; /* ID assigned to the memory reference
(its index in memory_accesses.refs_list) */
hashval_t hash; /* Its hash value. */
- bitmap stored; /* The set of loops in that this memory locatio
+ bitmap stored; /* The set of loops in that this memory location
is stored to. */
VEC (mem_ref_locs_p, heap) *accesses_in_loop;
/* The locations of the accesses. Vector
@@ -570,7 +571,7 @@ outermost_indep_loop (struct loop *outer, struct loop *loop, mem_ref_p ref)
}
/* If there is a simple load or store to a memory reference in STMT, returns
- the location of the memory reference, and sets IS_STORE accoring to whether
+ the location of the memory reference, and sets IS_STORE according to whether
it is a store or load. Otherwise, returns NULL. */
static tree *
diff --git a/gcc/tree-ssa-loop-ivopts.c b/gcc/tree-ssa-loop-ivopts.c
index 0247a1e71f5..41c37943656 100644
--- a/gcc/tree-ssa-loop-ivopts.c
+++ b/gcc/tree-ssa-loop-ivopts.c
@@ -1,5 +1,6 @@
/* Induction variable optimizations.
- Copyright (C) 2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
+ Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008 Free Software
+ Foundation, Inc.
This file is part of GCC.
@@ -3047,7 +3048,7 @@ get_address_cost (bool symbol_present, bool var_present,
{
base = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (""));
/* ??? We can run into trouble with some backends by presenting
- it with symbols which havn't been properly passed through
+ it with symbols which haven't been properly passed through
targetm.encode_section_info. By setting the local bit, we
enhance the probability of things working. */
SYMBOL_REF_FLAGS (base) = SYMBOL_FLAG_LOCAL;
@@ -3978,7 +3979,7 @@ determine_iv_cost (struct ivopts_data *data, struct iv_cand *cand)
cost = cost_step + cost_base.cost / AVG_LOOP_NITER (current_loop);
/* Prefer the original ivs unless we may gain something by replacing it.
- The reason is to makee debugging simpler; so this is not relevant for
+ The reason is to make debugging simpler; so this is not relevant for
artificial ivs created by other optimization passes. */
if (cand->pos != IP_ORIGINAL
|| DECL_ARTIFICIAL (SSA_NAME_VAR (cand->var_before)))
diff --git a/gcc/tree-ssa-loop-niter.c b/gcc/tree-ssa-loop-niter.c
index 934afa78d7d..74153fd294d 100644
--- a/gcc/tree-ssa-loop-niter.c
+++ b/gcc/tree-ssa-loop-niter.c
@@ -1,5 +1,6 @@
/* Functions to determine/estimate number of iterations of a loop.
- Copyright (C) 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
+ Copyright (C) 2004, 2005, 2006, 2007, 2008 Free Software Foundation,
+ Inc.
This file is part of GCC.
@@ -2313,7 +2314,7 @@ derive_constant_upper_bound (const_tree val)
}
/* Records that every statement in LOOP is executed I_BOUND times.
- REALISTIC is true if I_BOUND is expected to be close the the real number
+ REALISTIC is true if I_BOUND is expected to be close to the real number
of iterations. UPPER is true if we are sure the loop iterates at most
I_BOUND times. */
@@ -2342,7 +2343,7 @@ record_niter_bound (struct loop *loop, double_int i_bound, bool realistic,
/* Records that AT_STMT is executed at most BOUND + 1 times in LOOP. IS_EXIT
is true if the loop is exited immediately after STMT, and this exit
is taken at last when the STMT is executed BOUND + 1 times.
- REALISTIC is true if BOUND is expected to be close the the real number
+ REALISTIC is true if BOUND is expected to be close to the real number
of iterations. UPPER is true if we are sure the loop iterates at most
BOUND times. I_BOUND is an unsigned double_int upper estimate on BOUND. */
diff --git a/gcc/tree-ssa-loop-prefetch.c b/gcc/tree-ssa-loop-prefetch.c
index 705848216be..48fafb1a97a 100644
--- a/gcc/tree-ssa-loop-prefetch.c
+++ b/gcc/tree-ssa-loop-prefetch.c
@@ -1,5 +1,5 @@
/* Array prefetching.
- Copyright (C) 2005, 2007 Free Software Foundation, Inc.
+ Copyright (C) 2005, 2007, 2008 Free Software Foundation, Inc.
This file is part of GCC.
@@ -156,7 +156,7 @@ along with GCC; see the file COPYING3. If not see
/* In some cases we are only able to determine that there is a certain
probability that the two accesses hit the same cache line. In this
case, we issue the prefetches for both of them if this probability
- is less then (1000 - ACCEPTABLE_MISS_RATE) promile. */
+ is less then (1000 - ACCEPTABLE_MISS_RATE) per thousand. */
#ifndef ACCEPTABLE_MISS_RATE
#define ACCEPTABLE_MISS_RATE 50
diff --git a/gcc/tree-ssa-operands.c b/gcc/tree-ssa-operands.c
index cdbc442a0ee..71fb883f84a 100644
--- a/gcc/tree-ssa-operands.c
+++ b/gcc/tree-ssa-operands.c
@@ -1523,7 +1523,8 @@ get_addr_dereference_operands (tree stmt, tree *addr, int flags, tree full_ref,
if (dump_file
&& TREE_CODE (ptr) == SSA_NAME
&& (pi == NULL
- || pi->name_mem_tag == NULL_TREE))
+ || (pi->name_mem_tag == NULL_TREE
+ && !pi->pt_anything)))
{
fprintf (dump_file,
"NOTE: no flow-sensitive alias info for ");
@@ -2092,17 +2093,22 @@ get_expr_operands (tree stmt, tree *expr_p, int flags)
case OMP_FOR:
{
- tree init = OMP_FOR_INIT (expr);
- tree cond = OMP_FOR_COND (expr);
- tree incr = OMP_FOR_INCR (expr);
tree c, clauses = OMP_FOR_CLAUSES (stmt);
+ int i;
- get_expr_operands (stmt, &GIMPLE_STMT_OPERAND (init, 0), opf_def);
- get_expr_operands (stmt, &GIMPLE_STMT_OPERAND (init, 1), opf_use);
- get_expr_operands (stmt, &TREE_OPERAND (cond, 1), opf_use);
- get_expr_operands (stmt,
- &TREE_OPERAND (GIMPLE_STMT_OPERAND (incr, 1), 1),
- opf_use);
+ for (i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INIT (expr)); i++)
+ {
+ tree init = TREE_VEC_ELT (OMP_FOR_INIT (expr), i);
+ tree cond = TREE_VEC_ELT (OMP_FOR_COND (expr), i);
+ tree incr = TREE_VEC_ELT (OMP_FOR_INCR (expr), i);
+
+ get_expr_operands (stmt, &GIMPLE_STMT_OPERAND (init, 0), opf_def);
+ get_expr_operands (stmt, &GIMPLE_STMT_OPERAND (init, 1), opf_use);
+ get_expr_operands (stmt, &TREE_OPERAND (cond, 1), opf_use);
+ get_expr_operands (stmt,
+ &TREE_OPERAND (GIMPLE_STMT_OPERAND (incr, 1),
+ 1), opf_use);
+ }
c = find_omp_clause (clauses, OMP_CLAUSE_SCHEDULE);
if (c)
diff --git a/gcc/tree-ssa-phiopt.c b/gcc/tree-ssa-phiopt.c
index fbc7a683ec8..93b82d93ab1 100644
--- a/gcc/tree-ssa-phiopt.c
+++ b/gcc/tree-ssa-phiopt.c
@@ -1,5 +1,6 @@
/* Optimization of PHI nodes by converting them into straightline code.
- Copyright (C) 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
+ Copyright (C) 2004, 2005, 2006, 2007, 2008 Free Software Foundation,
+ Inc.
This file is part of GCC.
@@ -1129,7 +1130,7 @@ name_to_bb_eq (const void *p1, const void *p2)
return n1->ssa_name == n2->ssa_name && n1->store == n2->store;
}
-/* We see a the expression EXP in basic block BB. If it's an interesting
+/* We see the expression EXP in basic block BB. If it's an interesting
expression (an INDIRECT_REF through an SSA_NAME) possibly insert the
expression into the set NONTRAP or the hash table of seen expressions.
STORE is true if this expression is on the LHS, otherwise it's on
diff --git a/gcc/tree-ssa-phiprop.c b/gcc/tree-ssa-phiprop.c
index 2621dae1abd..f408939b07d 100644
--- a/gcc/tree-ssa-phiprop.c
+++ b/gcc/tree-ssa-phiprop.c
@@ -36,7 +36,7 @@ along with GCC; see the file COPYING3. If not see
#include "flags.h"
/* This pass propagates indirect loads through the PHI node for its
- address to make the load source possiby non-addressable and to
+ address to make the load source possibly non-addressable and to
allow for PHI optimization to trigger.
For example the pass changes
@@ -48,7 +48,7 @@ along with GCC; see the file COPYING3. If not see
# tmp_1 = PHI <a, b>
- but also handles more complex cenarios like
+ but also handles more complex scenarios like
D.2077_2 = &this_1(D)->a1;
...
diff --git a/gcc/tree-ssa-sccvn.c b/gcc/tree-ssa-sccvn.c
index c4c13f96ded..8e3de703866 100644
--- a/gcc/tree-ssa-sccvn.c
+++ b/gcc/tree-ssa-sccvn.c
@@ -1,5 +1,5 @@
/* SCC value numbering for trees
- Copyright (C) 2006, 2007
+ Copyright (C) 2006, 2007, 2008
Free Software Foundation, Inc.
Contributed by Daniel Berlin <dan@dberlin.org>
@@ -2009,7 +2009,7 @@ extract_and_process_scc_for_name (tree name)
Execution of this algorithm relies on the fact that the SCC's are
popped off the stack in topological order.
Returns true if successful, false if we stopped processing SCC's due
- to ressource constraints. */
+ to resource constraints. */
static bool
DFS (tree name)
@@ -2238,7 +2238,7 @@ free_scc_vn (void)
}
/* Do SCCVN. Returns true if it finished, false if we bailed out
- due to ressource constraints. */
+ due to resource constraints. */
bool
run_scc_vn (bool may_insert_arg)
diff --git a/gcc/tree-ssa-structalias.c b/gcc/tree-ssa-structalias.c
index 570c173b3a9..d66a4a82239 100644
--- a/gcc/tree-ssa-structalias.c
+++ b/gcc/tree-ssa-structalias.c
@@ -4721,32 +4721,6 @@ set_used_smts (void)
}
}
-/* Merge the necessary SMT's into the bitmap INTO, which is
- P's varinfo. This involves merging all SMT's that are a subset of
- the SMT necessary for P. */
-
-static void
-merge_smts_into (tree p, bitmap solution)
-{
- tree smt;
- bitmap aliases;
- tree var = p;
-
- if (TREE_CODE (p) == SSA_NAME)
- var = SSA_NAME_VAR (p);
-
- smt = var_ann (var)->symbol_mem_tag;
- if (smt)
- {
- /* The smt itself isn't included in its aliases. */
- bitmap_set_bit (solution, DECL_UID (smt));
-
- aliases = MTAG_ALIASES (smt);
- if (aliases)
- bitmap_ior_into (solution, aliases);
- }
-}
-
/* Given a pointer variable P, fill in its points-to set, or return
false if we can't.
Rather than return false for variables that point-to anything, we
@@ -4828,19 +4802,18 @@ find_what_p_points_to (tree p)
}
}
+ /* Instead of doing extra work, simply do not create
+ points-to information for pt_anything pointers. This
+ will cause the operand scanner to fall back to the
+ type-based SMT and its aliases. Which is the best
+ we could do here for the points-to set as well. */
+ if (was_pt_anything)
+ return false;
+
/* Share the final set of variables when possible. */
finished_solution = BITMAP_GGC_ALLOC ();
stats.points_to_sets_created++;
- /* Instead of using pt_anything, we merge in the SMT aliases
- for the underlying SMT. In addition, if they could have
- pointed to anything, they could point to global memory. */
- if (was_pt_anything)
- {
- merge_smts_into (p, finished_solution);
- pi->pt_global_mem = 1;
- }
-
set_uids_in_ptset (p, finished_solution, vi->solution,
vi->directly_dereferenced,
vi->no_tbaa_pruning);
diff --git a/gcc/tree-ssa-ter.c b/gcc/tree-ssa-ter.c
index 670d9637539..a93001a985b 100644
--- a/gcc/tree-ssa-ter.c
+++ b/gcc/tree-ssa-ter.c
@@ -1,5 +1,6 @@
/* Routines for performing Temporary Expression Replacement (TER) in SSA trees.
- Copyright (C) 2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
+ Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation,
+ Inc.
Contributed by Andrew MacLeod <amacleod@redhat.com>
This file is part of GCC.
@@ -58,7 +59,7 @@ along with GCC; see the file COPYING3. If not see
v_9 = (b_5 + 6) * (C * 10)
which will then have the ssa_name assigned to regular variables, and the
- resulting code which will be passed ot the expander looks something like:
+ resulting code which will be passed to the expander looks something like:
v = (b + 6) * (C * 10)
@@ -70,7 +71,7 @@ along with GCC; see the file COPYING3. If not see
Although SSA_NAMES themselves don't change, this pass is performed after
coalescing has coalesced different SSA_NAMES together, so there could be a
definition of an SSA_NAME which is coalesced with a use that causes a
- problem. ie
+ problem, i.e.,
PHI b_5 = <b_8(2), b_14(1)>
<...>
@@ -95,8 +96,8 @@ along with GCC; see the file COPYING3. If not see
EXPR_DECL_UID bitmap is allocated and set to the base variable UID of the
def and any uses in the expression. non-NULL means the expression is being
tracked. The UID's themselves are used to prevent TER substitution into
- accumulating sequences.
- ie
+ accumulating sequences, i.e.,
+
x = x + y
x = x + z
x = x + w
@@ -124,7 +125,7 @@ along with GCC; see the file COPYING3. If not see
a block to clear out the KILL_LIST bitmaps at the end of each block.
NEW_REPLACEABLE_DEPENDENCIES is used as a temporary place to store
- dependencies which will be reused by the current definition. ALl the uses
+ dependencies which will be reused by the current definition. All the uses
on an expression are processed before anything else is done. If a use is
determined to be a replaceable expression AND the current stmt is also going
to be replaceable, all the dependencies of this replaceable use will be
@@ -137,8 +138,8 @@ along with GCC; see the file COPYING3. If not see
a_2's expression 'b_5 + 6' is determined to be replaceable at the use
location. It is dependent on the partition 'b_5' is in. This is cached into
- the NEW_REPLACEABLE_DEPENDENCIES bitmap. and when v_8 is examined for
- replaceablility, it is a candidate, and it is dependent on the partition
+ the NEW_REPLACEABLE_DEPENDENCIES bitmap, and when v_8 is examined for
+ replaceability, it is a candidate, and it is dependent on the partition
b_5 is in *NOT* a_2, as well as c_4's partition.
if v_8 is also replaceable:
@@ -520,7 +521,7 @@ kill_virtual_exprs (temp_expr_table_p tab)
/* Mark the expression associated with VAR as replaceable, and enter
- the defining stmt into the partition_dependencies table TAB. if
+ the defining stmt into the partition_dependencies table TAB. If
MORE_REPLACING is true, accumulate the pending partition dependencies. */
static void
diff --git a/gcc/tree-ssa-threadupdate.c b/gcc/tree-ssa-threadupdate.c
index 359702161c1..54f87afaf68 100644
--- a/gcc/tree-ssa-threadupdate.c
+++ b/gcc/tree-ssa-threadupdate.c
@@ -1,5 +1,6 @@
/* Thread edges through blocks and update the control flow and SSA graphs.
- Copyright (C) 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
+ Copyright (C) 2004, 2005, 2006, 2007, 2008 Free Software Foundation,
+ Inc.
This file is part of GCC.
@@ -71,7 +72,7 @@ along with GCC; see the file COPYING3. If not see
7. Put the duplicated resources in B and all the B' blocks into SSA form.
Note that block duplication can be minimized by first collecting the
- the set of unique destination blocks that the incoming edges should
+ set of unique destination blocks that the incoming edges should
be threaded to. Block duplication can be further minimized by using
B instead of creating B' for one destination if all edges into B are
going to be threaded to a successor of B.
@@ -1089,7 +1090,7 @@ thread_through_all_blocks (bool may_peel_loop_headers)
threading opportunities discovered by a pass and update the CFG
and SSA form all at once.
- E is the edge we can thread, E2 is the new target edge. ie, we
+ E is the edge we can thread, E2 is the new target edge, i.e., we
are effectively recording that E->dest can be changed to E2->dest
after fixing the SSA graph. */
diff --git a/gcc/tree-ssa.c b/gcc/tree-ssa.c
index f71e3af7ff9..8e6ea4cff7a 100644
--- a/gcc/tree-ssa.c
+++ b/gcc/tree-ssa.c
@@ -1,5 +1,6 @@
/* Miscellaneous SSA utility functions.
- Copyright (C) 2001, 2002, 2003, 2004, 2005, 2007 Free Software Foundation, Inc.
+ Copyright (C) 2001, 2002, 2003, 2004, 2005, 2007, 2008 Free Software
+ Foundation, Inc.
This file is part of GCC.
@@ -129,7 +130,7 @@ redirect_edge_var_map_dup (edge newe, edge olde)
}
-/* Return the varable mappings for a given edge. If there is none, return
+/* Return the variable mappings for a given edge. If there is none, return
NULL. */
edge_var_map_vector
diff --git a/gcc/tree-vect-analyze.c b/gcc/tree-vect-analyze.c
index 18d7bb8bab1..9f2640d09d6 100644
--- a/gcc/tree-vect-analyze.c
+++ b/gcc/tree-vect-analyze.c
@@ -1,5 +1,6 @@
/* Analysis Utilities for Loop Vectorization.
- Copyright (C) 2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
+ Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008 Free Software
+ Foundation, Inc.
Contributed by Dorit Naishlos <dorit@il.ibm.com>
This file is part of GCC.
@@ -509,7 +510,7 @@ vect_analyze_operations (loop_vec_info loop_vinfo)
/* Groups of strided accesses whose size is not a power of 2 are
not vectorizable yet using loop-vectorization. Therefore, if
this stmt feeds non-SLP-able stmts (i.e., this stmt has to be
- both SLPed and loop-based vectorzed), the loop cannot be
+ both SLPed and loop-based vectorized), the loop cannot be
vectorized. */
if (STMT_VINFO_STRIDED_ACCESS (stmt_info)
&& exact_log2 (DR_GROUP_SIZE (vinfo_for_stmt (
@@ -690,7 +691,7 @@ exist_non_indexing_operands_for_use_p (tree use, tree stmt)
/* Function vect_analyze_scalar_cycles_1.
Examine the cross iteration def-use cycles of scalar variables
- in LOOP. LOOP_VINFO represents the loop that is noe being
+ in LOOP. LOOP_VINFO represents the loop that is now being
considered for vectorization (can be LOOP, or an outer-loop
enclosing LOOP). */
@@ -3542,8 +3543,8 @@ vect_stmt_relevant_p (tree stmt, loop_vec_info loop_vinfo,
Inputs:
- a USE in STMT in a loop represented by LOOP_VINFO
- LIVE_P, RELEVANT - enum values to be set in the STMT_VINFO of the stmt
- that defined USE. This is dont by calling mark_relevant and passing it
- the WORKLIST (to add DEF_STMT to the WORKlist in case itis relevant).
+ that defined USE. This is done by calling mark_relevant and passing it
+ the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
Outputs:
Generally, LIVE_P and RELEVANT are used to define the liveness and
diff --git a/gcc/tree-vect-transform.c b/gcc/tree-vect-transform.c
index 72e87ef1e41..450af302403 100644
--- a/gcc/tree-vect-transform.c
+++ b/gcc/tree-vect-transform.c
@@ -216,7 +216,7 @@ vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo)
/* Add additional cost for the peeled instructions in prologue and epilogue
loop.
- FORNOW: If we dont know the value of peel_iters for prologue or epilogue
+ FORNOW: If we don't know the value of peel_iters for prologue or epilogue
at compile-time - we assume it's vf/2 (the worst would be vf-1).
TODO: Build an expression that represents peel_iters for prologue and
@@ -332,7 +332,7 @@ vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo)
branches.
TODO: The back end may reorder the BBS's differently and reverse
- conditions/branch directions. Change the stimates below to
+ conditions/branch directions. Change the estimates below to
something more reasonable. */
if (runtime_test)
@@ -1392,7 +1392,7 @@ vect_get_constant_vectors (slp_tree slp_node, VEC(tree,heap) **vec_oprnds,
created vectors. It is greater than 1 if unrolling is performed.
For example, we have two scalar operands, s1 and s2 (e.g., group of
- strided accesses of size two), while NUINTS is four (i.e., four scalars
+ strided accesses of size two), while NUNITS is four (i.e., four scalars
of this type can be packed in a vector). The output vector will contain
two copies of each scalar operand: {s1, s2, s1, s2}. (NUMBER_OF_COPIES
will be 2).
@@ -1400,7 +1400,7 @@ vect_get_constant_vectors (slp_tree slp_node, VEC(tree,heap) **vec_oprnds,
If GROUP_SIZE > NUNITS, the scalars will be split into several vectors
containing the operands.
- For example, NUINTS is four as before, and the group size is 8
+ For example, NUNITS is four as before, and the group size is 8
(s1, s2, ..., s8). We will create two vectors {s1, s2, s3, s4} and
{s5, s6, s7, s8}. */
@@ -4481,7 +4481,7 @@ vectorizable_type_promotion (tree stmt, block_stmt_iterator *bsi,
/* Arguments are ready. Create the new vector stmt. We are creating
two vector defs because the widened result does not fit in one vector.
- The vectorized stmt can be expressed as a call to a taregt builtin,
+ The vectorized stmt can be expressed as a call to a target builtin,
or a using a tree-code. */
/* Generate first half of the widened result: */
new_stmt = vect_gen_widened_results_half (code1, vectype_out, decl1,
@@ -6199,7 +6199,7 @@ vectorizable_condition (tree stmt, block_stmt_iterator *bsi, tree *vec_stmt)
vec_then_clause = vect_get_vec_def_for_operand (then_clause, stmt, NULL);
vec_else_clause = vect_get_vec_def_for_operand (else_clause, stmt, NULL);
- /* Arguments are ready. create the new vector stmt. */
+ /* Arguments are ready. Create the new vector stmt. */
vec_compare = build2 (TREE_CODE (cond_expr), vectype,
vec_cond_lhs, vec_cond_rhs);
vec_cond_expr = build3 (VEC_COND_EXPR, vectype,
diff --git a/gcc/tree-vectorizer.c b/gcc/tree-vectorizer.c
index c513dda7652..be93e024576 100644
--- a/gcc/tree-vectorizer.c
+++ b/gcc/tree-vectorizer.c
@@ -1,5 +1,6 @@
/* Loop Vectorization
- Copyright (C) 2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
+ Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008 Free Software
+ Foundation, Inc.
Contributed by Dorit Naishlos <dorit@il.ibm.com>
This file is part of GCC.
@@ -396,7 +397,7 @@ slpeel_update_phis_for_duplicate_loop (struct loop *orig_loop,
I.E., the overall structure is:
loop1_preheader_bb:
- guard1 (goto loop1/merg1_bb)
+ guard1 (goto loop1/merge1_bb)
loop1
loop1_exit_bb:
guard2 (goto merge1_bb/merge2_bb)
@@ -463,7 +464,7 @@ slpeel_update_phis_for_duplicate_loop (struct loop *orig_loop,
In the context of the overall structure, we have:
loop1_preheader_bb:
- guard1 (goto loop1/merg1_bb)
+ guard1 (goto loop1/merge1_bb)
LOOP-> loop1
loop1_exit_bb:
guard2 (goto merge1_bb/merge2_bb)
@@ -506,7 +507,7 @@ slpeel_update_phi_nodes_for_guard1 (edge guard_edge, struct loop *loop,
{
/* Virtual phi; Mark it for renaming. We actually want to call
mar_sym_for_renaming, but since all ssa renaming datastructures
- are going to be freed before we get to call ssa_upate, we just
+ are going to be freed before we get to call ssa_update, we just
record this name for now in a bitmap, and will mark it for
renaming later. */
name = PHI_RESULT (orig_phi);
@@ -590,7 +591,7 @@ slpeel_update_phi_nodes_for_guard1 (edge guard_edge, struct loop *loop,
In the context of the overall structure, we have:
loop1_preheader_bb:
- guard1 (goto loop1/merg1_bb)
+ guard1 (goto loop1/merge1_bb)
loop1
loop1_exit_bb:
guard2 (goto merge1_bb/merge2_bb)
@@ -979,13 +980,13 @@ slpeel_verify_cfg_after_peeling (struct loop *first_loop,
basic_block loop1_entry_bb = loop_preheader_edge (first_loop)->src;
/* A guard that controls whether the second_loop is to be executed or skipped
- is placed in first_loop->exit. first_loopt->exit therefore has two
+ is placed in first_loop->exit. first_loop->exit therefore has two
successors - one is the preheader of second_loop, and the other is a bb
after second_loop.
*/
gcc_assert (EDGE_COUNT (loop1_exit_bb->succs) == 2);
- /* 1. Verify that one of the successors of first_loopt->exit is the preheader
+ /* 1. Verify that one of the successors of first_loop->exit is the preheader
of second_loop. */
/* The preheader of new_loop is expected to have two predecessors:
@@ -997,7 +998,7 @@ slpeel_verify_cfg_after_peeling (struct loop *first_loop,
|| (EDGE_PRED (loop2_entry_bb, 1)->src == loop1_exit_bb
&& EDGE_PRED (loop2_entry_bb, 0)->src == loop1_entry_bb)));
- /* Verify that the other successor of first_loopt->exit is after the
+ /* Verify that the other successor of first_loop->exit is after the
second_loop. */
/* TODO */
}
@@ -1101,10 +1102,10 @@ set_prologue_iterations (basic_block bb_before_first_loop,
is false, the caller of this function may want to take care of this
(this can be useful if we don't want new stmts added to first-loop).
- TH: cost model profitability threshold of iterations for vectorization.
- - CHECK_PROFITABILITY: specify whether cost model check has not occured
+ - CHECK_PROFITABILITY: specify whether cost model check has not occurred
during versioning and hence needs to occur during
prologue generation or whether cost model check
- has not occured during prologue generation and hence
+ has not occurred during prologue generation and hence
needs to occur during epilogue generation.
@@ -1200,7 +1201,7 @@ slpeel_tree_peel_loop_to_edge (struct loop *loop,
/* 2. Add the guard code in one of the following ways:
2.a Add the guard that controls whether the first loop is executed.
- This occurs when this function is invoked for prologue or epilogiue
+ This occurs when this function is invoked for prologue or epilogue
generation and when the cost model check can be done at compile time.
Resulting CFG would be:
@@ -2120,7 +2121,7 @@ supportable_widening_operation (enum tree_code code, tree stmt, tree vectype,
/* The result of a vectorized widening operation usually requires two vectors
(because the widened results do not fit int one vector). The generated
vector results would normally be expected to be generated in the same
- order as in the original scalar computation. i.e. if 8 results are
+ order as in the original scalar computation, i.e. if 8 results are
generated in each vector iteration, they are to be organized as follows:
vect1: [res1,res2,res3,res4], vect2: [res5,res6,res7,res8].
@@ -2132,7 +2133,7 @@ supportable_widening_operation (enum tree_code code, tree stmt, tree vectype,
of {mult_even,mult_odd} generate the following vectors:
vect1: [res1,res3,res5,res7], vect2: [res2,res4,res6,res8].
- When vectorizaing outer-loops, we execute the inner-loop sequentially
+ When vectorizing outer-loops, we execute the inner-loop sequentially
(each vectorized inner-loop iteration contributes to VF outer-loop
iterations in parallel). We therefore don't allow to change the order
of the computation in the inner-loop during outer-loop vectorization. */
@@ -2493,7 +2494,7 @@ vect_is_simple_reduction (loop_vec_info loop_info, tree phi)
computation. This may change the behavior of the program in some
cases, so we need to check that this is ok. One exception is when
vectorizing an outer-loop: the inner-loop is executed sequentially,
- and therefore vectorizing reductions in the inner-loop durint
+ and therefore vectorizing reductions in the inner-loop during
outer-loop vectorization is safe. */
/* CHECKME: check for !flag_finite_math_only too? */
diff --git a/gcc/tree-vn.c b/gcc/tree-vn.c
index 1d2e5a55de0..4960ac5aea3 100644
--- a/gcc/tree-vn.c
+++ b/gcc/tree-vn.c
@@ -1,5 +1,6 @@
/* Value Numbering routines for tree expressions.
- Copyright (C) 2001, 2002, 2003, 2004, 2005, 2007 Free Software Foundation, Inc.
+ Copyright (C) 2001, 2002, 2003, 2004, 2005, 2007, 2008 Free Software
+ Foundation, Inc.
Contributed by Daniel Berlin <dan@dberlin.org>, Steven Bosscher
<stevenb@suse.de> and Diego Novillo <dnovillo@redhat.com>
@@ -211,7 +212,7 @@ vn_add (tree expr, tree val)
add_to_value (val, expr);
}
-/* Insert EXPR into the value numbering tables. with value VAL, and
+/* Insert EXPR into the value numbering tables with value VAL, and
add expression EXPR to the value set for value VAL. VUSES
represents the virtual use operands associated with EXPR. It is
used when computing a hash value for EXPR. */
diff --git a/gcc/tree-vrp.c b/gcc/tree-vrp.c
index fe39a24f096..404531f4504 100644
--- a/gcc/tree-vrp.c
+++ b/gcc/tree-vrp.c
@@ -1,5 +1,5 @@
/* Support routines for Value Range Propagation (VRP).
- Copyright (C) 2005, 2006, 2007 Free Software Foundation, Inc.
+ Copyright (C) 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
Contributed by Diego Novillo <dnovillo@redhat.com>.
This file is part of GCC.
@@ -1585,7 +1585,7 @@ extract_range_from_assert (value_range_t *vr_p, tree expr)
3a. If the high limit of the VR_ANTI_RANGE resides
within the VR_RANGE, then the result is a new
VR_RANGE starting at the high limit of the
- the VR_ANTI_RANGE + 1 and extending to the
+ VR_ANTI_RANGE + 1 and extending to the
high limit of the original VR_RANGE.
3b. If the low limit of the VR_ANTI_RANGE resides
@@ -6440,7 +6440,7 @@ simplify_stmt_for_jump_threading (tree stmt, tree within_stmt)
}
/* Blocks which have more than one predecessor and more than
- one successor present jump threading opportunities. ie,
+ one successor present jump threading opportunities, i.e.,
when the block is reached from a specific predecessor, we
may be able to determine which of the outgoing edges will
be traversed. When this optimization applies, we are able
diff --git a/gcc/tree.c b/gcc/tree.c
index 38e06938968..8bf2b7718af 100644
--- a/gcc/tree.c
+++ b/gcc/tree.c
@@ -177,7 +177,7 @@ unsigned const char omp_clause_num_ops[] =
1, /* OMP_CLAUSE_PRIVATE */
1, /* OMP_CLAUSE_SHARED */
1, /* OMP_CLAUSE_FIRSTPRIVATE */
- 1, /* OMP_CLAUSE_LASTPRIVATE */
+ 2, /* OMP_CLAUSE_LASTPRIVATE */
4, /* OMP_CLAUSE_REDUCTION */
1, /* OMP_CLAUSE_COPYIN */
1, /* OMP_CLAUSE_COPYPRIVATE */
@@ -186,7 +186,9 @@ unsigned const char omp_clause_num_ops[] =
1, /* OMP_CLAUSE_SCHEDULE */
0, /* OMP_CLAUSE_NOWAIT */
0, /* OMP_CLAUSE_ORDERED */
- 0 /* OMP_CLAUSE_DEFAULT */
+ 0, /* OMP_CLAUSE_DEFAULT */
+ 3, /* OMP_CLAUSE_COLLAPSE */
+ 0 /* OMP_CLAUSE_UNTIED */
};
const char * const omp_clause_code_name[] =
@@ -204,7 +206,9 @@ const char * const omp_clause_code_name[] =
"schedule",
"nowait",
"ordered",
- "default"
+ "default",
+ "collapse",
+ "untied"
};
/* Init tree.c. */
@@ -3946,7 +3950,7 @@ merge_dllimport_decl_attributes (tree old, tree new)
}
else if (DECL_DLLIMPORT_P (old) && !DECL_DLLIMPORT_P (new))
{
- /* Warn about overriding a symbol that has already been used. eg:
+ /* Warn about overriding a symbol that has already been used, e.g.:
extern int __attribute__ ((dllimport)) foo;
int* bar () {return &foo;}
int foo;
@@ -8452,7 +8456,6 @@ walk_tree_1 (tree *tp, walk_tree_fn func, void *data,
case OMP_CLAUSE_PRIVATE:
case OMP_CLAUSE_SHARED:
case OMP_CLAUSE_FIRSTPRIVATE:
- case OMP_CLAUSE_LASTPRIVATE:
case OMP_CLAUSE_COPYIN:
case OMP_CLAUSE_COPYPRIVATE:
case OMP_CLAUSE_IF:
@@ -8464,8 +8467,22 @@ walk_tree_1 (tree *tp, walk_tree_fn func, void *data,
case OMP_CLAUSE_NOWAIT:
case OMP_CLAUSE_ORDERED:
case OMP_CLAUSE_DEFAULT:
+ case OMP_CLAUSE_UNTIED:
WALK_SUBTREE_TAIL (OMP_CLAUSE_CHAIN (*tp));
+ case OMP_CLAUSE_LASTPRIVATE:
+ WALK_SUBTREE (OMP_CLAUSE_DECL (*tp));
+ WALK_SUBTREE (OMP_CLAUSE_LASTPRIVATE_STMT (*tp));
+ WALK_SUBTREE_TAIL (OMP_CLAUSE_CHAIN (*tp));
+
+ case OMP_CLAUSE_COLLAPSE:
+ {
+ int i;
+ for (i = 0; i < 3; i++)
+ WALK_SUBTREE (OMP_CLAUSE_OPERAND (*tp, i));
+ WALK_SUBTREE_TAIL (OMP_CLAUSE_CHAIN (*tp));
+ }
+
case OMP_CLAUSE_REDUCTION:
{
int i;
diff --git a/gcc/tree.def b/gcc/tree.def
index 73bf70904c9..7474f2e68f1 100644
--- a/gcc/tree.def
+++ b/gcc/tree.def
@@ -1,7 +1,7 @@
/* This file contains the definitions and documentation for the
tree codes used in GCC.
Copyright (C) 1987, 1988, 1993, 1995, 1997, 1998, 2000, 2001, 2004, 2005,
- 2006, 2007 Free Software Foundation, Inc.
+ 2006, 2007, 2008 Free Software Foundation, Inc.
This file is part of GCC.
@@ -942,7 +942,7 @@ DEFTREECODE (VALUE_HANDLE, "value_handle", tcc_exceptional, 0)
2- EXPR is a conditional expression and is known to be true.
Valid and to be expected forms of conditional expressions are
- valid GIMPLE condidional expressions (as defined by is_gimple_condexpr)
+ valid GIMPLE conditional expressions (as defined by is_gimple_condexpr)
and conditional expressions with the first operand being a
PLUS_EXPR with a variable possibly wrapped in a NOP_EXPR first
operand and an integer constant second operand.
@@ -1001,6 +1001,23 @@ DEFTREECODE (TARGET_MEM_REF, "target_mem_ref", tcc_reference, 7)
DEFTREECODE (OMP_PARALLEL, "omp_parallel", tcc_statement, 4)
+/* OpenMP - #pragma omp task [clause1 ... clauseN]
+ Operand 0: OMP_TASK_BODY: Code to be executed by all threads.
+ Operand 1: OMP_TASK_CLAUSES: List of clauses.
+ Operand 2: OMP_TASK_FN: FUNCTION_DECL used when outlining the
+ body of the task region. Only valid after
+ pass_lower_omp.
+ Operand 3: OMP_TASK_DATA_ARG: Local variable in the parent
+ function containing data to be shared with the child
+ function.
+ Operand 4: OMP_TASK_COPYFN: FUNCTION_DECL used for constructing
+ firstprivate variables.
+ Operand 5: OMP_TASK_ARG_SIZE: Length of the task argument block.
+ Operand 6: OMP_TASK_ARG_ALIGN: Required alignment of the task
+ argument block. */
+
+DEFTREECODE (OMP_TASK, "omp_task", tcc_statement, 7)
+
/* OpenMP - #pragma omp for [clause1 ... clauseN]
Operand 0: OMP_FOR_BODY: Loop body.
Operand 1: OMP_FOR_CLAUSES: List of clauses.
@@ -1096,7 +1113,7 @@ DEFTREECODE (REDUC_MAX_EXPR, "reduc_max_expr", tcc_unary, 1)
DEFTREECODE (REDUC_MIN_EXPR, "reduc_min_expr", tcc_unary, 1)
DEFTREECODE (REDUC_PLUS_EXPR, "reduc_plus_expr", tcc_unary, 1)
-/* Widenning dot-product.
+/* Widening dot-product.
The first two arguments are of type t1.
The third argument and the result are of type t2, such that t2 is at least
twice the size of t1. DOT_PROD_EXPR(arg1,arg2,arg3) is equivalent to:
@@ -1107,7 +1124,7 @@ DEFTREECODE (REDUC_PLUS_EXPR, "reduc_plus_expr", tcc_unary, 1)
arg3 = WIDEN_SUM_EXPR (tmp, arg3); */
DEFTREECODE (DOT_PROD_EXPR, "dot_prod_expr", tcc_expression, 3)
-/* Widenning summation.
+/* Widening summation.
The first argument is of type t1.
The second argument is of type t2, such that t2 is at least twice
the size of t1. The type of the entire expression is also t2.
@@ -1116,7 +1133,7 @@ DEFTREECODE (DOT_PROD_EXPR, "dot_prod_expr", tcc_expression, 3)
with the second argument. */
DEFTREECODE (WIDEN_SUM_EXPR, "widen_sum_expr", tcc_binary, 2)
-/* Widenning multiplication.
+/* Widening multiplication.
The two arguments are of type t1.
The result is of type t2, such that t2 is at least twice
the size of t1. WIDEN_MULT_EXPR is equivalent to first widening (promoting)
diff --git a/gcc/tree.h b/gcc/tree.h
index 014f9e93680..c3d2abe6978 100644
--- a/gcc/tree.h
+++ b/gcc/tree.h
@@ -174,7 +174,7 @@ extern const enum tree_code_class tree_code_type[];
#define IS_EXPR_CODE_CLASS(CLASS)\
((CLASS) >= tcc_reference && (CLASS) <= tcc_expression)
-/* Returns nonzer iff CLASS is a GIMPLE statement. */
+/* Returns nonzero iff CLASS is a GIMPLE statement. */
#define IS_GIMPLE_STMT_CODE_CLASS(CLASS) ((CLASS) == tcc_gimple_stmt)
@@ -186,6 +186,7 @@ extern const enum tree_code_class tree_code_type[];
#define OMP_DIRECTIVE_P(NODE) \
(TREE_CODE (NODE) == OMP_PARALLEL \
+ || TREE_CODE (NODE) == OMP_TASK \
|| TREE_CODE (NODE) == OMP_FOR \
|| TREE_CODE (NODE) == OMP_SECTIONS \
|| TREE_CODE (NODE) == OMP_SECTIONS_SWITCH \
@@ -315,7 +316,7 @@ enum omp_clause_code
Operand 2: OMP_CLAUSE_REDUCTION_MERGE: Stmt-list to merge private var
into the shared one.
Operand 3: OMP_CLAUSE_REDUCTION_PLACEHOLDER: A dummy VAR_DECL
- placeholder used in OMP_CLAUSE_REDUCTION_MERGE. */
+ placeholder used in OMP_CLAUSE_REDUCTION_{INIT,MERGE}. */
OMP_CLAUSE_REDUCTION,
/* OpenMP clause: copyin (variable_list). */
@@ -340,7 +341,13 @@ enum omp_clause_code
OMP_CLAUSE_ORDERED,
/* OpenMP clause: default. */
- OMP_CLAUSE_DEFAULT
+ OMP_CLAUSE_DEFAULT,
+
+ /* OpenMP clause: collapse (constant-integer-expression). */
+ OMP_CLAUSE_COLLAPSE,
+
+ /* OpenMP clause: untied. */
+ OMP_CLAUSE_UNTIED
};
/* The definition of tree nodes fills the next several pages. */
@@ -524,6 +531,8 @@ struct gimple_stmt GTY(())
OMP_PARALLEL_COMBINED in
OMP_PARALLEL
+ OMP_CLAUSE_PRIVATE_OUTER_REF in
+ OMP_CLAUSE_PRIVATE
protected_flag:
@@ -1796,6 +1805,20 @@ struct tree_constructor GTY(())
#define OMP_PARALLEL_FN(NODE) TREE_OPERAND (OMP_PARALLEL_CHECK (NODE), 2)
#define OMP_PARALLEL_DATA_ARG(NODE) TREE_OPERAND (OMP_PARALLEL_CHECK (NODE), 3)
+#define OMP_TASK_BODY(NODE) TREE_OPERAND (OMP_TASK_CHECK (NODE), 0)
+#define OMP_TASK_CLAUSES(NODE) TREE_OPERAND (OMP_TASK_CHECK (NODE), 1)
+#define OMP_TASK_FN(NODE) TREE_OPERAND (OMP_TASK_CHECK (NODE), 2)
+#define OMP_TASK_DATA_ARG(NODE) TREE_OPERAND (OMP_TASK_CHECK (NODE), 3)
+#define OMP_TASK_COPYFN(NODE) TREE_OPERAND (OMP_TASK_CHECK (NODE), 4)
+#define OMP_TASK_ARG_SIZE(NODE) TREE_OPERAND (OMP_TASK_CHECK (NODE), 5)
+#define OMP_TASK_ARG_ALIGN(NODE) TREE_OPERAND (OMP_TASK_CHECK (NODE), 6)
+
+#define OMP_TASKREG_CHECK(NODE) TREE_RANGE_CHECK (NODE, OMP_PARALLEL, OMP_TASK)
+#define OMP_TASKREG_BODY(NODE) TREE_OPERAND (OMP_TASKREG_CHECK (NODE), 0)
+#define OMP_TASKREG_CLAUSES(NODE) TREE_OPERAND (OMP_TASKREG_CHECK (NODE), 1)
+#define OMP_TASKREG_FN(NODE) TREE_OPERAND (OMP_TASKREG_CHECK (NODE), 2)
+#define OMP_TASKREG_DATA_ARG(NODE) TREE_OPERAND (OMP_TASKREG_CHECK (NODE), 3)
+
#define OMP_FOR_BODY(NODE) TREE_OPERAND (OMP_FOR_CHECK (NODE), 0)
#define OMP_FOR_CLAUSES(NODE) TREE_OPERAND (OMP_FOR_CHECK (NODE), 1)
#define OMP_FOR_INIT(NODE) TREE_OPERAND (OMP_FOR_CHECK (NODE), 2)
@@ -1848,10 +1871,19 @@ struct tree_constructor GTY(())
#define OMP_CLAUSE_PRIVATE_DEBUG(NODE) \
(OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_PRIVATE)->base.public_flag)
+/* True on a PRIVATE clause if ctor needs access to outer region's
+ variable. */
+#define OMP_CLAUSE_PRIVATE_OUTER_REF(NODE) \
+ TREE_PRIVATE (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_PRIVATE))
+
/* True on a LASTPRIVATE clause if a FIRSTPRIVATE clause for the same
decl is present in the chain. */
#define OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE(NODE) \
(OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_LASTPRIVATE)->base.public_flag)
+#define OMP_CLAUSE_LASTPRIVATE_STMT(NODE) \
+ OMP_CLAUSE_OPERAND (OMP_CLAUSE_SUBCODE_CHECK (NODE, \
+ OMP_CLAUSE_LASTPRIVATE),\
+ 1)
#define OMP_CLAUSE_IF_EXPR(NODE) \
OMP_CLAUSE_OPERAND (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_IF), 0)
@@ -1860,6 +1892,13 @@ struct tree_constructor GTY(())
#define OMP_CLAUSE_SCHEDULE_CHUNK_EXPR(NODE) \
OMP_CLAUSE_OPERAND (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_SCHEDULE), 0)
+#define OMP_CLAUSE_COLLAPSE_EXPR(NODE) \
+ OMP_CLAUSE_OPERAND (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_COLLAPSE), 0)
+#define OMP_CLAUSE_COLLAPSE_ITERVAR(NODE) \
+ OMP_CLAUSE_OPERAND (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_COLLAPSE), 1)
+#define OMP_CLAUSE_COLLAPSE_COUNT(NODE) \
+ OMP_CLAUSE_OPERAND (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_COLLAPSE), 2)
+
#define OMP_CLAUSE_REDUCTION_CODE(NODE) \
(OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_REDUCTION)->omp_clause.subcode.reduction_code)
#define OMP_CLAUSE_REDUCTION_INIT(NODE) \
@@ -1874,6 +1913,7 @@ enum omp_clause_schedule_kind
OMP_CLAUSE_SCHEDULE_STATIC,
OMP_CLAUSE_SCHEDULE_DYNAMIC,
OMP_CLAUSE_SCHEDULE_GUIDED,
+ OMP_CLAUSE_SCHEDULE_AUTO,
OMP_CLAUSE_SCHEDULE_RUNTIME
};
@@ -1885,7 +1925,8 @@ enum omp_clause_default_kind
OMP_CLAUSE_DEFAULT_UNSPECIFIED,
OMP_CLAUSE_DEFAULT_SHARED,
OMP_CLAUSE_DEFAULT_NONE,
- OMP_CLAUSE_DEFAULT_PRIVATE
+ OMP_CLAUSE_DEFAULT_PRIVATE,
+ OMP_CLAUSE_DEFAULT_FIRSTPRIVATE
};
#define OMP_CLAUSE_DEFAULT_KIND(NODE) \
@@ -3175,7 +3216,7 @@ extern void decl_debug_expr_insert (tree, tree);
#define SET_DECL_DEBUG_EXPR(NODE, VAL) \
(decl_debug_expr_insert (VAR_DECL_CHECK (NODE), VAL))
-/* An initializationp priority. */
+/* An initialization priority. */
typedef unsigned short priority_type;
extern priority_type decl_init_priority_lookup (tree);
diff --git a/gcc/unwind-dw2-fde.c b/gcc/unwind-dw2-fde.c
index ac2e0d465ae..674976f2e56 100644
--- a/gcc/unwind-dw2-fde.c
+++ b/gcc/unwind-dw2-fde.c
@@ -1,5 +1,5 @@
/* Subroutines needed for unwinding stack frames for exception handling. */
-/* Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005
+/* Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2008
Free Software Foundation, Inc.
Contributed by Jason Merrill <jason@cygnus.com>.
@@ -947,7 +947,7 @@ search_object (struct object* ob, void *pc)
}
else
{
- /* Long slow labourious linear search, cos we've no memory. */
+ /* Long slow laborious linear search, cos we've no memory. */
if (ob->s.b.from_array)
{
fde **p;
diff --git a/gcc/unwind.inc b/gcc/unwind.inc
index 1b2346051d2..e0ab71c412f 100644
--- a/gcc/unwind.inc
+++ b/gcc/unwind.inc
@@ -1,5 +1,5 @@
/* Exception handling and frame unwind runtime interface routines. -*- C -*-
- Copyright (C) 2001, 2003 Free Software Foundation, Inc.
+ Copyright (C) 2001, 2003, 2008 Free Software Foundation, Inc.
This file is part of GCC.
@@ -108,7 +108,7 @@ _Unwind_RaiseException(struct _Unwind_Exception *exc)
return _URC_END_OF_STACK;
if (code != _URC_NO_REASON)
- /* Some error encountered. Ususally the unwinder doesn't
+ /* Some error encountered. Usually the unwinder doesn't
diagnose these and merely crashes. */
return _URC_FATAL_PHASE1_ERROR;
diff --git a/gcc/value-prof.c b/gcc/value-prof.c
index fbefc97a46d..f1330def688 100644
--- a/gcc/value-prof.c
+++ b/gcc/value-prof.c
@@ -1,5 +1,6 @@
/* Transformations based on profile information for values.
- Copyright (C) 2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
+ Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008 Free Software
+ Foundation, Inc.
This file is part of GCC.
@@ -1062,7 +1063,7 @@ find_func_by_pid (int pid)
/* Do transformation
- if (actual_callee_addres == addres_of_most_common_function/method)
+ if (actual_callee_address == address_of_most_common_function/method)
do direct call
else
old call
diff --git a/gcc/vmsdbgout.c b/gcc/vmsdbgout.c
index 27197c17f7e..751e44cc0d4 100644
--- a/gcc/vmsdbgout.c
+++ b/gcc/vmsdbgout.c
@@ -1,6 +1,6 @@
/* Output VMS debug format symbol table information from GCC.
Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
- 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2007
+ 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2007, 2008
Free Software Foundation, Inc.
Contributed by Douglas B. Rupp (rupp@gnat.com).
Updated by Bernard W. Giroud (bgiroud@users.sourceforge.net).
@@ -125,7 +125,7 @@ static unsigned int func_table_in_use;
static vms_func_ref func_table;
/* Local pointer to the name of the main input file. Initialized in
- avmdbgout_init. */
+ vmsdbgout_init. */
static const char *primary_filename;
static char *module_producer;