summaryrefslogtreecommitdiff
path: root/libgomp/testsuite
diff options
context:
space:
mode:
authorjakub <jakub@138bc75d-0d04-0410-961f-82ee72b054a4>2008-06-06 13:01:54 +0000
committerjakub <jakub@138bc75d-0d04-0410-961f-82ee72b054a4>2008-06-06 13:01:54 +0000
commitfd6481cf2e4413bca3ef43b1e504e1c78de6025d (patch)
tree5d5537ea17855b77cca7b9c90a262e584c441592 /libgomp/testsuite
parentcbdcfa59ffeb7d51f7cbdfe64e1a99e43c82b2ac (diff)
downloadgcc-fd6481cf2e4413bca3ef43b1e504e1c78de6025d.tar.gz
* c-cppbuiltin.c (c_cpp_builtins): Change _OPENMP value to
200805. * langhooks.h (struct lang_hooks_for_decls): Add omp_finish_clause. Add omp_private_outer_ref hook, add another argument to omp_clause_default_ctor hook. * langhooks-def.h (LANG_HOOKS_OMP_FINISH_CLAUSE): Define. (LANG_HOOKS_OMP_PRIVATE_OUTER_REF): Define. (LANG_HOOKS_OMP_CLAUSE_DEFAULT_CTOR): Change to hook_tree_tree_tree_tree_null. (LANG_HOOKS_DECLS): Add LANG_HOOKS_OMP_FINISH_CLAUSE and LANG_HOOKS_OMP_PRIVATE_OUTER_REF. * hooks.c (hook_tree_tree_tree_tree_null): New function. * hooks.h (hook_tree_tree_tree_tree_null): New prototype. * tree.def (OMP_TASK): New tree code. * tree.h (OMP_TASK_COPYFN, OMP_TASK_ARG_SIZE, OMP_TASK_ARG_ALIGN, OMP_CLAUSE_PRIVATE_OUTER_REF, OMP_CLAUSE_LASTPRIVATE_STMT, OMP_CLAUSE_COLLAPSE_ITERVAR, OMP_CLAUSE_COLLAPSE_COUNT, OMP_TASKREG_CHECK, OMP_TASKREG_BODY, OMP_TASKREG_CLAUSES, OMP_TASKREG_FN, OMP_TASKREG_DATA_ARG, OMP_TASK_BODY, OMP_TASK_CLAUSES, OMP_TASK_FN, OMP_TASK_DATA_ARG, OMP_CLAUSE_COLLAPSE_EXPR): Define. (enum omp_clause_default_kind): Add OMP_CLAUSE_DEFAULT_FIRSTPRIVATE. (OMP_DIRECTIVE_P): Add OMP_TASK. (OMP_CLAUSE_COLLAPSE, OMP_CLAUSE_UNTIED): New clause codes. (OMP_CLAUSE_SCHEDULE_AUTO): New schedule kind. * tree.c (omp_clause_code_name): Add OMP_CLAUSE_COLLAPSE and OMP_CLAUSE_UNTIED entries. (omp_clause_num_ops): Likewise. Increase OMP_CLAUSE_LASTPRIVATE num_ops to 2. (walk_tree_1): Handle OMP_CLAUSE_COLLAPSE and OMP_CLAUSE_UNTIED. Walk OMP_CLAUSE_LASTPRIVATE_STMT. * tree-pretty-print.c (dump_omp_clause): Handle OMP_CLAUSE_SCHEDULE_AUTO, OMP_CLAUSE_UNTIED, OMP_CLAUSE_COLLAPSE, OMP_CLAUSE_DEFAULT_FIRSTPRIVATE. (dump_generic_node): Handle OMP_TASK and collapsed OMP_FOR loops. * c-omp.c (c_finish_omp_for): Allow pointer iterators. Remove warning about unsigned iterators. Change decl/init/cond/incr arguments to TREE_VECs, check arguments for all collapsed loops. (c_finish_omp_taskwait): New function. (c_split_parallel_clauses): Put OMP_CLAUSE_COLLAPSE clause to ws_clauses. * c-parser.c (c_parser_omp_for_loop): Parse collapsed loops. Call default_function_array_conversion on init. Add par_clauses argument. If decl is present in parallel's lastprivate clause, change it to shared and add lastprivate clause for decl to OMP_FOR_CLAUSES. Add clauses argument, on success set OMP_FOR_CLAUSES to it. Look up collapse count in clauses. (c_parser_omp_for, c_parser_omp_parallel): Adjust c_parser_omp_for_loop callers. (OMP_FOR_CLAUSE_MASK): Add 1 << PRAGMA_OMP_CLAUSE_COLLAPSE. (c_parser_pragma): Handle PRAGMA_OMP_TASKWAIT. (c_parser_omp_clause_name): Handle collapse and untied clauses. (c_parser_omp_clause_collapse, c_parser_omp_clause_untied): New functions. (c_parser_omp_clause_schedule): Handle schedule(auto). Include correct location in the error message. (c_parser_omp_all_clauses): Handle PRAGMA_OMP_CLAUSE_COLLAPSE and PRAGMA_OMP_CLAUSE_UNTIED. (OMP_TASK_CLAUSE_MASK): Define. (c_parser_omp_task, c_parser_omp_taskwait): New functions. (c_parser_omp_construct): Handle PRAGMA_OMP_TASK. * tree-nested.c (convert_nonlocal_omp_clauses, convert_local_omp_clauses): Handle OMP_CLAUSE_LASTPRIVATE_STMT, OMP_CLAUSE_REDUCTION_INIT, OMP_CLAUSE_REDUCTION_MERGE, OMP_CLAUSE_COLLAPSE and OMP_CLAUSE_UNTIED. Don't handle TREE_STATIC or DECL_EXTERNAL VAR_DECLs in OMP_CLAUSE_DECL. (conver_nonlocal_reference, convert_local_reference, convert_call_expr): Handle OMP_TASK the same as OMP_PARALLEL. Use OMP_TASKREG_* macros rather than OMP_PARALLEL_*. (walk_omp_for): Adjust for OMP_FOR_{INIT,COND,INCR} changes. * tree-gimple.c (is_gimple_stmt): Handle OMP_TASK. * c-tree.h (c_begin_omp_task, c_finish_omp_task): New prototypes. * c-pragma.h (PRAGMA_OMP_TASK, PRAGMA_OMP_TASKWAIT): New. (PRAGMA_OMP_CLAUSE_COLLAPSE, PRAGMA_OMP_CLAUSE_UNTIED): New. * c-typeck.c (c_begin_omp_task, c_finish_omp_task): New functions. (c_finish_omp_clauses): Handle OMP_CLAUSE_COLLAPSE and OMP_CLAUSE_UNTIED. * c-pragma.c (init_pragma): Init omp task and omp taskwait pragmas. * c-common.h (c_finish_omp_taskwait): New prototype. * gimple-low.c (lower_stmt): Handle OMP_TASK. * tree-parloops.c (create_parallel_loop): Create 1 entry vectors for OMP_FOR_{INIT,COND,INCR}. * tree-cfg.c (remove_useless_stmts_1): Handle OMP_* containers. (make_edges): Handle OMP_TASK. * tree-ssa-operands.c (get_expr_operands): Handle collapsed OMP_FOR loops, adjust for OMP_FOR_{INIT,COND,INCR} changes. * tree-inline.c (estimate_num_insns_1): Handle OMP_TASK. * builtin-types.def (BT_PTR_ULONGLONG, BT_PTR_FN_VOID_PTR_PTR, BT_FN_BOOL_ULONGLONGPTR_ULONGLONGPTR, BT_FN_BOOL_BOOL_ULL_ULL_ULL_ULLPTR_ULLPTR, BT_FN_BOOL_BOOL_ULL_ULL_ULL_ULL_ULLPTR_ULLPTR, BT_FN_VOID_OMPFN_PTR_OMPCPYFN_LONG_LONG_BOOL_UINT): New. * omp-builtins.def (BUILT_IN_GOMP_TASK, BUILT_IN_GOMP_TASKWAIT, BUILT_IN_GOMP_LOOP_ULL_STATIC_START, BUILT_IN_GOMP_LOOP_ULL_DYNAMIC_START, BUILT_IN_GOMP_LOOP_ULL_GUIDED_START, BUILT_IN_GOMP_LOOP_ULL_RUNTIME_START, BUILT_IN_GOMP_LOOP_ULL_ORDERED_STATIC_START, BUILT_IN_GOMP_LOOP_ULL_ORDERED_DYNAMIC_START, BUILT_IN_GOMP_LOOP_ULL_ORDERED_GUIDED_START, BUILT_IN_GOMP_LOOP_ULL_ORDERED_RUNTIME_START, BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT, BUILT_IN_GOMP_LOOP_ULL_DYNAMIC_NEXT, BUILT_IN_GOMP_LOOP_ULL_GUIDED_NEXT, BUILT_IN_GOMP_LOOP_ULL_RUNTIME_NEXT, BUILT_IN_GOMP_LOOP_ULL_ORDERED_STATIC_NEXT, BUILT_IN_GOMP_LOOP_ULL_ORDERED_DYNAMIC_NEXT, BUILT_IN_GOMP_LOOP_ULL_ORDERED_GUIDED_NEXT, BUILT_IN_GOMP_LOOP_ULL_ORDERED_RUNTIME_NEXT): New builtins. * gimplify.c (gimplify_omp_for): Allow pointer type for decl, handle POINTER_PLUS_EXPR. If loop counter has been replaced and original iterator is present in lastprivate clause or if collapse > 1, set OMP_CLAUSE_LASTPRIVATE_STMT. Handle collapsed OMP_FOR loops, adjust for OMP_FOR_{INIT,COND,INCR} changes. (gimplify_expr): Handle OMP_SECTIONS_SWITCH and OMP_TASK. (enum gimplify_omp_var_data): Add GOVD_PRIVATE_OUTER_REF. (omp_notice_variable): Set GOVD_PRIVATE_OUTER_REF if needed, if it is set, lookup var in outer contexts too. Handle OMP_CLAUSE_DEFAULT_FIRSTPRIVATE. Handle vars that are supposed to be implicitly determined firstprivate for task regions. (gimplify_scan_omp_clauses): Set GOVD_PRIVATE_OUTER_REF if needed, if it is set, lookup var in outer contexts too. Set OMP_CLAUSE_PRIVATE_OUTER_REF if GOVD_PRIVATE_OUTER_REF is set. Handle OMP_CLAUSE_LASTPRIVATE_STMT, OMP_CLAUSE_COLLAPSE and OMP_CLAUSE_UNTIED. Take region_type as last argument instead of in_parallel and in_combined_parallel. (gimplify_omp_parallel, gimplify_omp_for, gimplify_omp_workshare): Adjust callers. (gimplify_adjust_omp_clauses_1): Set OMP_CLAUSE_PRIVATE_OUTER_REF if GOVD_PRIVATE_OUTER_REF is set. Call omp_finish_clause langhook. (new_omp_context): Set default_kind to OMP_CLAUSE_DEFAULT_UNSPECIFIED for OMP_TASK regions. (omp_region_type): New enum. (struct gimplify_omp_ctx): Remove is_parallel and is_combined_parallel fields, add region_type. (new_omp_context): Take region_type as argument instead of is_parallel and is_combined_parallel. (gimple_add_tmp_var, omp_firstprivatize_variable, omp_notice_variable, omp_is_private, omp_check_private): Adjust ctx->is_parallel and ctx->is_combined_parallel checks. (gimplify_omp_task): New function. (gimplify_adjust_omp_clauses): Handle OMP_CLAUSE_COLLAPSE and OMP_CLAUSE_UNTIED. * omp-low.c (extract_omp_for_data): Use schedule(static) for schedule(auto). Handle pointer and unsigned iterators. Compute fd->iter_type. Handle POINTER_PLUS_EXPR increments. Add loops argument. Extract data for collapsed OMP_FOR loops. (expand_parallel_call): Assert sched_kind isn't auto, map runtime schedule to index 3. (struct omp_for_data_loop): New type. (struct omp_for_data): Remove v, n1, n2, step, cond_code fields. Add loop, loops, collapse and iter_type fields. (workshare_safe_to_combine_p): Disallow combined for if iter_type is unsigned long long. Don't combine collapse > 1 loops unless all bounds and steps are constant. Adjust extract_omp_for_data caller. (expand_omp_for_generic): Handle pointer, unsigned and long long iterators. Handle collapsed OMP_FOR loops. Adjust for struct omp_for_data changes. If libgomp function doesn't return boolean_type_node, add comparison of the return value with 0. (expand_omp_for_static_nochunk, expand_omp_for_static_chunk): Handle pointer, unsigned and long long iterators. Adjust for struct omp_for_data changes. (expand_omp_for): Assert sched_kind isn't auto, map runtime schedule to index 3. Use GOMP_loop_ull*{start,next} if iter_type is unsigned long long. Allocate loops array, pass it to extract_omp_for_data. For collapse > 1 loops use always expand_omp_for_generic. (omp_context): Add sfield_map and srecord_type fields. (is_task_ctx, lookup_sfield): New functions. (use_pointer_for_field): Use is_task_ctx helper. Change first argument's type from const_tree to tree. Clarify comment. In OMP_TASK disallow copy-in/out sharing. (build_sender_ref): Call lookup_sfield instead of lookup_field. (install_var_field): Add mask argument. Populate both record_type and srecord_type if needed. (delete_omp_context): Destroy sfield_map, clear DECL_ABSTRACT_ORIGIN in srecord_type. (fixup_child_record_type): Also remap FIELD_DECL's DECL_SIZE{,_UNIT} and DECL_FIELD_OFFSET. (scan_sharing_clauses): Adjust install_var_field callers. For firstprivate clauses on explicit tasks allocate the var by value in record_type unconditionally, rather than by reference. Handle OMP_CLAUSE_PRIVATE_OUTER_REF. Scan OMP_CLAUSE_LASTPRIVATE_STMT. Use is_taskreg_ctx instead of is_parallel_ctx. Handle OMP_CLAUSE_COLLAPSE and OMP_CLAUSE_UNTIED. (create_omp_child_function_name): Add task_copy argument, use *_omp_cpyfn* names if it is true. (create_omp_child_function): Add task_copy argument, if true create *_omp_cpyfn* helper function. (scan_omp_parallel): Adjust create_omp_child_function callers. Rename parallel_nesting_level to taskreg_nesting_level. (scan_omp_task): New function. (lower_rec_input_clauses): Don't run constructors for firstprivate explicit task vars which are initialized by *_omp_cpyfn*. Pass outer var ref to omp_clause_default_ctor hook if OMP_CLAUSE_PRIVATE_OUTER_REF or OMP_CLAUSE_LASTPRIVATE. Replace OMP_CLAUSE_REDUCTION_PLACEHOLDER decls in OMP_CLAUSE_REDUCTION_INIT. (lower_send_clauses): Clear DECL_ABSTRACT_ORIGIN if in task to avoid duplicate setting of fields. Handle OMP_CLAUSE_PRIVATE_OUTER_REF. (lower_send_shared_vars): Use srecord_type if non-NULL. Don't copy-out if TREE_READONLY, only copy-in. (expand_task_copyfn): New function. (expand_task_call): New function. (struct omp_taskcopy_context): New type. (task_copyfn_copy_decl, task_copyfn_remap_type, create_task_copyfn): New functions. (lower_omp_parallel): Rename to... (lower_omp_taskreg): ... this. Use OMP_TASKREG_* macros where needed. Call create_task_copyfn if srecord_type is needed. Adjust sender_decl type. (task_shared_vars): New variable. (check_omp_nesting_restrictions): Warn if work-sharing, barrier, master or ordered region is closely nested inside OMP_TASK. Add warnings for barrier if closely nested inside of work-sharing, ordered, or master region. (scan_omp_1): Call check_omp_nesting_restrictions even for GOMP_barrier calls. Rename parallel_nesting_level to taskreg_nesting_level. Handle OMP_TASK. (lower_lastprivate_clauses): Even if some lastprivate is found on a work-sharing construct, continue looking for them on parent parallel construct. (lower_omp_for_lastprivate): Add lastprivate clauses to the beginning of dlist rather than end. Adjust for struct omp_for_data changes. (lower_omp_for): Add rec input clauses before OMP_FOR_PRE_BODY, not after it. Handle collapsed OMP_FOR loops, adjust for OMP_FOR_{INIT,COND,INCR} changes, adjust extract_omp_for_data caller. (get_ws_args_for): Adjust extract_omp_for_data caller. (scan_omp_for): Handle collapsed OMP_FOR loops, adjust for OMP_FOR_{INIT,COND,INCR} changes. (lower_omp_single_simple): If libgomp function doesn't return boolean_type_node, add comparison of the return value with 0. (diagnose_sb_1, diagnose_sb_2): Handle collapsed OMP_FOR loops, adjust for OMP_FOR_{INIT,COND,INCR} changes. Handle OMP_TASK. (parallel_nesting_level): Rename to... (taskreg_nesting_level): ... this. (is_taskreg_ctx): New function. (build_outer_var_ref, omp_copy_decl): Use is_taskreg_ctx instead of is_parallel_ctx. (execute_lower_omp): Rename parallel_nesting_level to taskreg_nesting_level. (expand_omp_parallel): Rename to... (expand_omp_taskreg): ... this. Use OMP_TASKREG_* macros where needed. Call omp_task_call for OMP_TASK regions. (expand_omp): Adjust caller, handle OMP_TASK. (lower_omp_1): Adjust lower_omp_taskreg caller, handle OMP_TASK. * bitmap.c (bitmap_default_obstack_depth): New variable. (bitmap_obstack_initialize, bitmap_obstack_release): Do nothing if argument is NULL and bitmap_default_obstack is already initialized. * ipa-struct-reorg.c (do_reorg_1): Call bitmap_obstack_release at the end. * matrix-reorg.c (matrix_reorg): Likewise. cp/ * cp-tree.h (cxx_omp_finish_clause, cxx_omp_create_clause_info, dependent_omp_for_p, begin_omp_task, finish_omp_task, finish_omp_taskwait): New prototypes. (cxx_omp_clause_default_ctor): Add outer argument. (finish_omp_for): Add new clauses argument. * cp-gimplify.c (cxx_omp_finish_clause): New function. (cxx_omp_predetermined_sharing): Moved from semantics.c, rewritten. (cxx_omp_clause_default_ctor): Add outer argument. (cp_genericize_r): Walk OMP_CLAUSE_LASTPRIVATE_STMT. * cp-objcp-common.h (LANG_HOOKS_OMP_FINISH_CLAUSE): Define. * parser.c (cp_parser_omp_for_loop): Parse collapsed for loops. Add par_clauses argument. If decl is present in parallel's lastprivate clause, change that clause to shared and add a lastprivate clause for decl to OMP_FOR_CLAUSES. Fix wording of error messages. Adjust finish_omp_for caller. Add clauses argument. Parse loops with random access iterators. (cp_parser_omp_clause_collapse, cp_parser_omp_clause_untied): New functions. (cp_parser_omp_for, cp_parser_omp_parallel): Adjust cp_parser_omp_for_loop callers. (cp_parser_omp_for_cond, cp_parser_omp_for_incr): New helper functions. (cp_parser_omp_clause_name): Handle collapse and untied clauses. (cp_parser_omp_clause_schedule): Handle auto schedule. (cp_parser_omp_all_clauses): Handle PRAGMA_OMP_CLAUSE_COLLAPSE and PRAGMA_OMP_CLAUSE_UNTIED. (OMP_FOR_CLAUSE_MASK): Add PRAGMA_OMP_CLAUSE_COLLAPSE. (OMP_TASK_CLAUSE_MASK): Define. (cp_parser_omp_task, cp_parser_omp_taskwait): New functions. (cp_parser_omp_construct): Handle PRAGMA_OMP_TASK. (cp_parser_pragma): Handle PRAGMA_OMP_TASK and PRAGMA_OMP_TASKWAIT. * pt.c (tsubst_omp_clauses): Handle OMP_CLAUSE_COLLAPSE and OMP_CLAUSE_UNTIED. Handle OMP_CLAUSE_LASTPRIVATE_STMT. (tsubst_omp_for_iterator): New function. (dependent_omp_for_p): New function. (tsubst_expr) <case OMP_FOR>: Use it. Handle collapsed OMP_FOR loops. Adjust finish_omp_for caller. Handle loops with random access iterators. Adjust for OMP_FOR_{INIT,COND,INCR} changes. (tsubst_expr): Handle OMP_TASK. * semantics.c (cxx_omp_create_clause_info): New function. (finish_omp_clauses): Call it. Handle OMP_CLAUSE_UNTIED and OMP_CLAUSE_COLLAPSE. (cxx_omp_predetermined_sharing): Removed. * semantics.c (finish_omp_for): Allow pointer iterators. Use handle_omp_for_class_iterator and dependent_omp_for_p. Handle collapsed for loops. Adjust c_finish_omp_for caller. Add new clauses argument. Fix check for type dependent cond or incr. Set OMP_FOR_CLAUSES to clauses. Use cp_convert instead of fold_convert to convert incr amount to difference_type. Only fold if not in template. If decl is mentioned in lastprivate clause, set OMP_CLAUSE_LASTPRIVATE_STMT. Handle loops with random access iterators. Adjust for OMP_FOR_{INIT,COND,INCR} changes. (finish_omp_threadprivate): Allow static class members of the current class. (handle_omp_for_class_iterator, begin_omp_task, finish_omp_task, finish_omp_taskwait): New functions. * parser.c (cp_parser_binary_expression): Add prec argument. (cp_parser_assignment_expression): Adjust caller. * cp-tree.h (outer_curly_brace_block): New prototype. * decl.c (outer_curly_brace_block): No longer static. fortran/ * scanner.c (skip_free_comments, skip_fixed_comments): Handle tabs. * parse.c (next_free): Allow tab after !$omp. (decode_omp_directive): Handle !$omp task, !$omp taskwait and !$omp end task. (case_executable): Add ST_OMP_TASKWAIT. (case_exec_markers): Add ST_OMP_TASK. (gfc_ascii_statement): Handle ST_OMP_TASK, ST_OMP_END_TASK and ST_OMP_TASKWAIT. (parse_omp_structured_block, parse_executable): Handle ST_OMP_TASK. * gfortran.h (gfc_find_sym_in_expr): New prototype. (gfc_statement): Add ST_OMP_TASK, ST_OMP_END_TASK and ST_OMP_TASKWAIT. (gfc_omp_clauses): Add OMP_SCHED_AUTO to sched_kind, OMP_DEFAULT_FIRSTPRIVATE to default_sharing. Add collapse and untied fields. (gfc_exec_op): Add EXEC_OMP_TASK and EXEC_OMP_TASKWAIT. * f95-lang.c (LANG_HOOKS_OMP_CLAUSE_COPY_CTOR, LANG_HOOKS_OMP_CLAUSE_ASSIGN_OP, LANG_HOOKS_OMP_CLAUSE_DTOR, LANG_HOOKS_OMP_PRIVATE_OUTER_REF): Define. * trans.h (gfc_omp_clause_default_ctor): Add another argument. (gfc_omp_clause_copy_ctor, gfc_omp_clause_assign_op, gfc_omp_clause_dtor, gfc_omp_private_outer_ref): New prototypes. * types.def (BT_ULONGLONG, BT_PTR_ULONGLONG, BT_FN_BOOL_ULONGLONGPTR_ULONGLONGPTR, BT_FN_BOOL_BOOL_ULL_ULL_ULL_ULLPTR_ULLPTR, BT_FN_BOOL_BOOL_ULL_ULL_ULL_ULL_ULLPTR_ULLPTR, BT_FN_VOID_PTR_PTR, BT_PTR_FN_VOID_PTR_PTR, BT_FN_VOID_OMPFN_PTR_OMPCPYFN_LONG_LONG_BOOL_UINT): New. (BT_BOOL): Use integer type with BOOL_TYPE_SIZE rather than boolean_type_node. * dump-parse-tree.c (gfc_show_omp_node): Handle EXEC_OMP_TASK, EXEC_OMP_TASKWAIT, OMP_SCHED_AUTO, OMP_DEFAULT_FIRSTPRIVATE, untied and collapse clauses. (gfc_show_code_node): Handle EXEC_OMP_TASK and EXEC_OMP_TASKWAIT. * trans.c (gfc_trans_code): Handle EXEC_OMP_TASK and EXEC_OMP_TASKWAIT. * st.c (gfc_free_statement): Likewise. * resolve.c (gfc_resolve_blocks, resolve_code): Likewise. (find_sym_in_expr): Rename to... (gfc_find_sym_in_expr): ... this. No longer static. (resolve_allocate_expr, resolve_ordinary_assign): Adjust caller. * match.h (gfc_match_omp_task, gfc_match_omp_taskwait): New prototypes. * openmp.c (resolve_omp_clauses): Allow allocatable arrays in firstprivate, lastprivate, reduction, copyprivate and copyin clauses. (omp_current_do_code): Made static. (omp_current_do_collapse): New variable. (gfc_resolve_omp_do_blocks): Compute omp_current_do_collapse, clear omp_current_do_code and omp_current_do_collapse on return. (gfc_resolve_do_iterator): Handle collapsed do loops. (resolve_omp_do): Likewise, diagnose errorneous collapsed do loops. (OMP_CLAUSE_COLLAPSE, OMP_CLAUSE_UNTIED): Define. (gfc_match_omp_clauses): Handle default (firstprivate), schedule (auto), untied and collapse (n) clauses. (OMP_DO_CLAUSES): Add OMP_CLAUSE_COLLAPSE. (OMP_TASK_CLAUSES): Define. (gfc_match_omp_task, gfc_match_omp_taskwait): New functions. * trans-openmp.c (gfc_omp_private_outer_ref): New function. (gfc_omp_clause_default_ctor): Add outer argument. For allocatable arrays allocate them with the bounds of the outer var if outer var is allocated. (gfc_omp_clause_copy_ctor, gfc_omp_clause_assign_op, gfc_omp_clause_dtor): New functions. (gfc_trans_omp_array_reduction): If decl is allocatable array, allocate it with outer var's bounds in OMP_CLAUSE_REDUCTION_INIT and deallocate it in OMP_CLAUSE_REDUCTION_MERGE. (gfc_omp_predetermined_sharing): Return OMP_CLAUSE_DEFAULT_SHARED for assumed-size arrays. (gfc_trans_omp_do): Add par_clauses argument. If dovar is present in lastprivate clause and do loop isn't simple, set OMP_CLAUSE_LASTPRIVATE_STMT. If dovar is present in parallel's lastprivate clause, change it to shared and add lastprivate clause to OMP_FOR_CLAUSES. Handle collapsed do loops. (gfc_trans_omp_directive): Adjust gfc_trans_omp_do callers. (gfc_trans_omp_parallel_do): Likewise. Move collapse clause to OMP_FOR from OMP_PARALLEL. (gfc_trans_omp_clauses): Handle OMP_SCHED_AUTO, OMP_DEFAULT_FIRSTPRIVATE, untied and collapse clauses. (gfc_trans_omp_task, gfc_trans_omp_taskwait): New functions. (gfc_trans_omp_directive): Handle EXEC_OMP_TASK and EXEC_OMP_TASKWAIT. gcc/testsuite/ * gcc.dg/gomp/collapse-1.c: New test. * gcc.dg/gomp/nesting-1.c: New test. * g++.dg/gomp/task-1.C: New test. * g++.dg/gomp/predetermined-1.C: New test. * g++.dg/gomp/tls-4.C: New test. * gfortran.dg/gomp/collapse1.f90: New test. * gfortran.dg/gomp/sharing-3.f90: New test. * gcc.dg/gomp/pr27499.c (foo): Remove is unsigned dg-warning. * g++.dg/gomp/pr27499.C (foo): Likewise. * g++.dg/gomp/for-16.C (foo): Likewise. * g++.dg/gomp/tls-3.C: Remove dg-error, add S::s definition. * g++.dg/gomp/pr34607.C: Adjust dg-error location. * g++.dg/gomp/for-16.C (foo): Add a new dg-error. * gcc.dg/gomp/appendix-a/a.35.4.c: Add dg-warning. * gcc.dg/gomp/appendix-a/a.35.6.c: Likewise. * gfortran.dg/gomp/appendix-a/a.35.4.f90: Likewise. * gfortran.dg/gomp/appendix-a/a.35.6.f90: Likewise. * gfortran.dg/gomp/omp_parse1.f90: Remove !$omp tab test. * gfortran.dg/gomp/appendix-a/a.33.4.f90: Remove dg-error about allocatable array. * gfortran.dg/gomp/reduction1.f90: Likewise. libgomp/ * configure.ac (LIBGOMP_GNU_SYMBOL_VERSIONING): New AC_DEFINE. Substitute also OMP_*LOCK_25*. * configure: Regenerated. * config.h.in: Regenerated. * Makefile.am (libgomp_la_SOURCES): Add loop_ull.c, iter_ull.c, ptrlock.c and task.c. * Makefile.in: Regenerated. * testsuite/Makefile.in: Regenerated. * task.c: New file. * loop_ull.c: New file. * iter_ull.c: New file. * libgomp.h: Include ptrlock.h. (enum gomp_task_kind): New type. (struct gomp_team): Add task_lock, task_queue, task_count, task_running_count, single_count fields. Add work_share_list_free_lock ifndef HAVE_SYNC_BUILTINS. Remove work_share_lock, generation_mask, oldest_live_gen, num_live_gen and init_work_shares fields, add work work_share_list_alloc, work_share_list_free and work_share_chunk fields. Change work_shares from pointer to pointers into an array. Change ordered_release field into gomp_sem_t ** from flexible array member. Add implicit_task and initial_work_shares fields. Move close to the end of the struct. (struct gomp_team_state): Add single_count, last_work_share, active_level and level fields, remove work_share_generation. (gomp_barrier_handle_tasks): New prototype. (gomp_finish_task): New inline function. (struct gomp_work_share): Move chunk_size, end, incr into transparent union/struct, add chunk_size_ull, end_ll, incr_ll and next_ll fields. Reshuffle fields. Add next_alloc, next_ws, next_free and inline_ordered_team_ids fields, change ordered_team_ids into pointer from flexible array member. Add mode field. Put lock and next into a different cache line from most of the write-once fields. (gomp_iter_ull_static_next, gomp_iter_ull_dynamic_next_locked, gomp_iter_ull_guided_next_locked, gomp_iter_ull_dynamic_next, gomp_iter_ull_guided_next): New prototypes. (gomp_new_icv): New prototype. (struct gomp_thread): Add thread_pool and task fields. (struct gomp_thread_pool): New type. (gomp_new_team): New prototype. (gomp_team_start): Change type of last argument. (gomp_new_work_share): Removed. (gomp_init_work_share, gomp_fini_work_share): New prototypes. (gomp_work_share_init_done): New static inline. (gomp_throttled_spin_count_var, gomp_available_cpus, gomp_managed_threads): New extern decls. (gomp_init_task): New prototype. (gomp_spin_count_var): New extern var decl. (LIBGOMP_GNU_SYMBOL_VERSIONING): Undef if no visibility or no alias support, or if not PIC. (gomp_init_lock_30, gomp_destroy_lock_30, gomp_set_lock_30, gomp_unset_lock_30, gomp_test_lock_30, gomp_init_nest_lock_30, gomp_destroy_nest_lock_30, gomp_set_nest_lock_30, gomp_unset_nest_lock_30, gomp_test_nest_lock_30, gomp_init_lock_25, gomp_destroy_lock_25, gomp_set_lock_25, gomp_unset_lock_25, gomp_test_lock_25, gomp_init_nest_lock_25, gomp_destroy_nest_lock_25, gomp_set_nest_lock_25, gomp_unset_nest_lock_25, gomp_test_nest_lock_25): New prototypes. (omp_lock_symver, strong_alias): Define. (gomp_remaining_threads_count, gomp_remaining_threads_lock): New decls. (gomp_end_task): New. (struct gomp_task_icv, gomp_global_icv): New. (gomp_thread_limit_var, gomp_max_active_levels_var): New. (struct gomp_task): New. (gomp_nthreads_var, gomp_dyn_var, gomp_nest_var, gomp_run_sched_var, gomp_run_sched_chunk): Remove. (gomp_icv): New. (gomp_schedule_type): Reorder enum to match omp_sched_t. * team.c (struct gomp_thread_start_data): Add thread_pool and task fields. (gomp_thread_start): Add gomp_team_barrier_wait call. For non-nested case remove clearing of docked thread thr fields. Use pool fields instead of global gomp_* variables. Use gomp_barrier_wait_last when needed. Initialize ts.active_level. Create tasks for each member thread. (free_team): Only destroy team barrier, task_lock here and free it. (gomp_free_thread): Free last_team if non-NULL. (gomp_team_end): Call gomp_team_barrier_wait instead of gomp_barrier_wait. For nested case call one extra gomp_barrier_wait. Move here some destruction from free_team. Call free_team on pool->last_team if any, rather than freeing current team. Destroy work_share_list_free_lock ifndef HAVE_SYNC_BUILTINS. (gomp_new_icv): New function. (gomp_threads, gomp_threads_size, gomp_threads_used, gomp_threads_dock): Removed. (gomp_thread_destructor): New variable. (gomp_new_thread_pool, gomp_free_pool_helper, gomp_free_thread): New functions. (gomp_team_start): Create new pool if current thread doesn't have one. Use pool fields instead of global gomp_* variables. Initialize thread_pool field for new threads. Clear single_count. Change last argument from ws to team, don't create new team, set ts.work_share to &team->work_shares[0] and clear ts.last_work_share. Don't clear ts.work_share_generation. If number of threads changed, adjust atomically gomp_managed_threads. Use gomp_init_task instead of gomp_new_task, set thr->task to the corresponding implicit_task array entry. Create tasks for each member thread. Initialize ts.level. (initialize_team): Call pthread_key_create on gomp_thread_destructor. (team_destructor): New function. (new_team): Removed. (gomp_new_team): New function. (free_team): Free gomp_work_share blocks chained through next_alloc, instead of freeing work_shares and destroying work_share_lock. (gomp_team_end): Call gomp_fini_work_share. If number of threads changed, adjust atomically gomp_managed_threads. Use gomp_end_task. * barrier.c (GOMP_barrier): Call gomp_team_barrier_wait instead of gomp_barrier_wait. * single.c (GOMP_single_copy_start): Call gomp_team_barrier_wait instead of gomp_barrier_wait. Call gomp_work_share_init_done if gomp_work_share_start returned true. Don't unlock ws->lock. (GOMP_single_copy_end): Call gomp_team_barrier_wait instead of gomp_barrier_wait. (GOMP_single_start): Rewritten if HAVE_SYNC_BUILTINS. Call gomp_work_share_init_done if gomp_work_share_start returned true. Don't unlock ws->lock. * work.c: Include stddef.h. (free_work_share): Use work_share_list_free_lock instead of atomic chaining ifndef HAVE_SYNC_BUILTINS. Add team argument. Call gomp_fini_work_share and then either free ws if orphaned, or put it into work_share_list_free list of the current team. (alloc_work_share, gomp_init_work_share, gomp_fini_work_share): New functions. (gomp_work_share_start, gomp_work_share_end, gomp_work_share_end_nowait): Rewritten. * omp_lib.f90.in Change some tabs to spaces to prevent warnings. (openmp_version): Set to 200805. (omp_sched_kind, omp_sched_static, omp_sched_dynamic, omp_sched_guided, omp_sched_auto): New parameters. (omp_set_schedule, omp_get_schedule, omp_get_thread_limit, omp_set_max_active_levels, omp_get_max_active_levels, omp_get_level, omp_get_ancestor_thread_num, omp_get_team_size, omp_get_active_level): New interfaces. * omp_lib.h.in (openmp_version): Set to 200805. (omp_sched_kind, omp_sched_static, omp_sched_dynamic, omp_sched_guided, omp_sched_auto): New parameters. (omp_set_schedule, omp_get_schedule, omp_get_thread_limit, omp_set_max_active_levels, omp_get_max_active_levels, omp_get_level, omp_get_ancestor_thread_num, omp_get_team_size, omp_get_active_level): New externals. * loop.c: Include limits.h. (GOMP_loop_runtime_next, GOMP_loop_ordered_runtime_next): Handle GFS_AUTO. (GOMP_loop_runtime_start, GOMP_loop_ordered_runtime_start): Likewise. Use gomp_icv. (gomp_loop_static_start, gomp_loop_dynamic_start): Clear ts.static_trip here. (gomp_loop_static_start, gomp_loop_ordered_static_start): Call gomp_work_share_init_done after gomp_loop_init. Don't unlock ws->lock. (gomp_loop_dynamic_start, gomp_loop_guided_start): Call gomp_work_share_init_done after gomp_loop_init. If HAVE_SYNC_BUILTINS, don't unlock ws->lock, otherwise lock it. (gomp_loop_ordered_dynamic_start, gomp_loop_ordered_guided_start): Call gomp_work_share_init_done after gomp_loop_init. Lock ws->lock. (gomp_parallel_loop_start): Call gomp_new_team instead of gomp_new_work_share. Call gomp_loop_init on &team->work_shares[0]. Adjust gomp_team_start caller. Pass 0 as second argument to gomp_resolve_num_threads. (gomp_loop_init): For GFS_DYNAMIC, multiply ws->chunk_size by incr. If adding ws->chunk_size nthreads + 1 times after end won't overflow, set ws->mode to 1. * libgomp_g.h (GOMP_loop_ull_static_start, GOMP_loop_ull_dynamic_start, GOMP_loop_ull_guided_start, GOMP_loop_ull_runtime_start, GOMP_loop_ull_ordered_static_start, GOMP_loop_ull_ordered_dynamic_start, GOMP_loop_ull_ordered_guided_start, GOMP_loop_ull_ordered_runtime_start, GOMP_loop_ull_static_next, GOMP_loop_ull_dynamic_next, GOMP_loop_ull_guided_next, GOMP_loop_ull_runtime_next, GOMP_loop_ull_ordered_static_next, GOMP_loop_ull_ordered_dynamic_next, GOMP_loop_ull_ordered_guided_next, GOMP_loop_ull_ordered_runtime_next, GOMP_task, GOMP_taskwait): New prototypes. * libgomp.map: Export lock routines also @@OMP_2.0. (GOMP_loop_ordered_dynamic_first, GOMP_loop_ordered_guided_first, GOMP_loop_ordered_runtime_first, GOMP_loop_ordered_static_first): Remove. (GOMP_loop_ull_dynamic_next, GOMP_loop_ull_dynamic_start, GOMP_loop_ull_guided_next, GOMP_loop_ull_guided_start, GOMP_loop_ull_ordered_dynamic_next, GOMP_loop_ull_ordered_dynamic_start, GOMP_loop_ull_ordered_guided_next, GOMP_loop_ull_ordered_guided_start, GOMP_loop_ull_ordered_runtime_next, GOMP_loop_ull_ordered_runtime_start, GOMP_loop_ull_ordered_static_next, GOMP_loop_ull_ordered_static_start, GOMP_loop_ull_runtime_next, GOMP_loop_ull_runtime_start, GOMP_loop_ull_static_next, GOMP_loop_ull_static_start, GOMP_task, GOMP_taskwait): Export @@GOMP_2.0. (omp_set_schedule, omp_get_schedule, omp_get_thread_limit, omp_set_max_active_levels, omp_get_max_active_levels, omp_get_level, omp_get_ancestor_thread_num, omp_get_team_size, omp_get_active_level, omp_set_schedule_, omp_set_schedule_8_, omp_get_schedule_, omp_get_schedule_8_, omp_get_thread_limit_, omp_set_max_active_levels_, omp_set_max_active_levels_8_, omp_get_max_active_levels_, omp_get_level_, omp_get_ancestor_thread_num_, omp_get_ancestor_thread_num_8_, omp_get_team_size_, omp_get_team_size_8_, omp_get_active_level_): New exports @@OMP_3.0. * omp.h.in (omp_sched_t): New type. (omp_set_schedule, omp_get_schedule, omp_get_thread_limit, omp_set_max_active_levels, omp_get_max_active_levels, omp_get_level, omp_get_ancestor_thread_num, omp_get_team_size, omp_get_active_level): New prototypes. * env.c (gomp_spin_count_var, gomp_throttled_spin_count_var, gomp_available_cpus, gomp_managed_threads, gomp_max_active_levels_var, gomp_thread_limit_var, gomp_remaining_threads_count, gomp_remaining_threads_lock): New variables. (parse_spincount): New function. (initialize_env): Call gomp_init_num_threads unconditionally. Initialize gomp_available_cpus. Call parse_spincount, initialize gomp_{,throttled_}spin_count_var depending on presence and value of OMP_WAIT_POLICY and GOMP_SPINCOUNT env vars. Handle GOMP_BLOCKTIME env var. Handle OMP_WAIT_POLICY, OMP_MAX_ACTIVE_LEVELS, OMP_THREAD_LIMIT, OMP_STACKSIZE env vars. Handle unit specification for GOMP_STACKSIZE. Initialize gomp_remaining_threads_count and gomp_remaining_threads_lock if needed. Use gomp_global_icv. (gomp_nthreads_var, gomp_dyn_var, gomp_nest_var, gomp_run_sched_var, gomp_run_sched_chunk): Remove. (gomp_global_icv): New. (parse_schedule): Use it. Parse "auto". (omp_set_num_threads): Use gomp_icv. (omp_set_dynamic, omp_get_dynamic, omp_set_nested, omp_get_nested): Likewise. (omp_get_max_threads): Move from parallel.c. (omp_set_schedule, omp_get_schedule, omp_get_thread_limit, omp_set_max_active_levels, omp_get_max_active_levels): New functions, add ialias. (parse_stacksize, parse_wait_policy): New functions. * fortran.c: Rewrite lock wrappers, if symbol versioning provide both wrappers for compatibility and new locks. (omp_set_schedule, omp_get_schedule, omp_get_thread_limit, omp_set_max_active_levels, omp_get_max_active_levels, omp_get_level, omp_get_ancestor_thread_num, omp_get_team_size, omp_get_active_level): New ialias_redirect. (omp_set_schedule_, omp_set_schedule_8_, omp_get_schedule_, omp_get_schedule_8_, omp_get_thread_limit_, omp_set_max_active_levels_, omp_set_max_active_levels_8_, omp_get_max_active_levels_, omp_get_level_, omp_get_ancestor_thread_num_, omp_get_ancestor_thread_num_8_, omp_get_team_size_, omp_get_team_size_8_, omp_get_active_level_): New functions. * parallel.c: Include limits.h. (gomp_resolve_num_threads): Add count argument. Rewritten. (GOMP_parallel_start): Call gomp_new_team and pass that as last argument to gomp_team_start. Pass 0 as second argument to gomp_resolve_num_threads. (GOMP_parallel_end): Decrease gomp_remaining_threads_count if gomp_thread_limit_var != ULONG_MAX. (omp_in_parallel): Implement using ts.active_level. (omp_get_max_threads): Move to env.c. (omp_get_level, omp_get_ancestor_thread_num, omp_get_team_size, omp_get_active_level): New functions, add ialias. * sections.c (GOMP_sections_start): Call gomp_work_share_init_done after gomp_sections_init. If HAVE_SYNC_BUILTINS, call gomp_iter_dynamic_next instead of the _locked variant and don't take lock around it, otherwise acquire it before calling gomp_iter_dynamic_next_locked. (GOMP_sections_next): If HAVE_SYNC_BUILTINS, call gomp_iter_dynamic_next instead of the _locked variant and don't take lock around it. (GOMP_parallel_sections_start): Call gomp_new_team instead of gomp_new_work_share. Call gomp_sections_init on &team->work_shares[0]. Adjust gomp_team_start caller. Pass count as second argument to gomp_resolve_num_threads, don't adjust num_threads after the call. Use gomp_icv. * iter.c (gomp_iter_dynamic_next_locked): Don't multiply ws->chunk_size by incr. (gomp_iter_dynamic_next): Likewise. If ws->mode, use more efficient code. * libgomp_f.h.in (omp_lock_25_arg_t, omp_nest_lock_25_arg_t): New types. (omp_lock_25_arg, omp_nest_lock_25_arg): New macros. (omp_check_defines): Check even the compat defines. * config/linux/ptrlock.c: New file. * config/linux/ptrlock.h: New file. * config/linux/wait.h: New file. * config/posix/ptrlock.c: New file. * config/posix/ptrlock.h: New file. * config/linux/bar.h (gomp_team_barrier_wait, gomp_team_barrier_wait_end, gomp_team_barrier_wake): New prototypes. (gomp_team_barrier_set_task_pending, gomp_team_barrier_clear_task_pending, gomp_team_barrier_set_waiting_for_tasks, gomp_team_barrier_waiting_for_tasks, gomp_team_barrier_done): New inlines. (gomp_barrier_t): Rewritten. (gomp_barrier_state_t): New typedef. (gomp_barrier_init, gomp_barrier_reinit, gomp_barrier_destroy, gomp_barrier_wait_start): Rewritten. (gomp_barrier_wait_end): Change second argument to gomp_barrier_state_t. (gomp_barrier_last_thread, gomp_barrier_wait_last): New static inlines. * config/linux/bar.c: Include wait.h instead of libgomp.h and futex.h. (gomp_barrier_wait_end): Rewritten. (gomp_team_barrier_wait, gomp_team_barrier_wait_end, gomp_team_barrier_wake, gomp_barrier_wait_last): New functions. * config/posix/bar.h (gomp_barrier_t): Add generation field. (gomp_barrier_state_t): New typedef. (gomp_team_barrier_wait, gomp_team_barrier_wait_end, gomp_team_barrier_wake): New prototypes. (gomp_barrier_wait_start): Or all but low 2 bits from generation into the return value. Return gomp_barrier_state_t. (gomp_team_barrier_set_task_pending, gomp_team_barrier_clear_task_pending, gomp_team_barrier_set_waiting_for_tasks, gomp_team_barrier_waiting_for_tasks, gomp_team_barrier_done): New inlines. (gomp_barrier_wait_end): Change second argument to gomp_barrier_state_t. (gomp_barrier_last_thread, gomp_barrier_wait_last): New static inlines. * config/posix/bar.c (gomp_barrier_init): Clear generation field. (gomp_barrier_wait_end): Change second argument to gomp_barrier_state_t. (gomp_team_barrier_wait, gomp_team_barrier_wait_end, gomp_team_barrier_wake): New functions. * config/linux/mutex.c: Include wait.h instead of libgomp.h and futex.h. (gomp_futex_wake, gomp_futex_wait): New variables. (gomp_mutex_lock_slow): Call do_wait instead of futex_wait. * config/linux/lock.c: Rewrite to make locks task owned, for backwards compatibility provide the old entrypoints if symbol versioning. Include wait.h instead of libgomp.h and futex.h. (gomp_set_nest_lock_25): Call do_wait instead of futex_wait. * config/posix95/lock.c: Rewrite to make locks task owned, for backwards compatibility provide the old entrypoints if symbol versioning. * config/posix/lock.c: Rewrite to make locks task owned, for backwards compatibility provide the old entrypoints if symbol versioning. * config/linux/proc.c (gomp_init_num_threads): Use gomp_global_icv. (get_num_procs, gomp_dynamic_max_threads): Use gomp_icv. * config/posix/proc.c, config/mingw32/proc.c: Similarly. * config/linux/powerpc/futex.h (FUTEX_WAIT, FUTEX_WAKE): Remove. (sys_futex0): Return error code. (futex_wake, futex_wait): If ENOSYS was returned, clear FUTEX_PRIVATE_FLAG in gomp_futex_wa{ke,it} and retry. (cpu_relax, atomic_write_barrier): New static inlines. * config/linux/alpha/futex.h (FUTEX_WAIT, FUTEX_WAKE): Remove. (futex_wake, futex_wait): If ENOSYS was returned, clear FUTEX_PRIVATE_FLAG in gomp_futex_wa{ke,it} and retry. (cpu_relax, atomic_write_barrier): New static inlines. * config/linux/x86/futex.h (FUTEX_WAIT, FUTEX_WAKE): Remove. (sys_futex0): Return error code. (futex_wake, futex_wait): If ENOSYS was returned, clear FUTEX_PRIVATE_FLAG in gomp_futex_wa{ke,it} and retry. (cpu_relax, atomic_write_barrier): New static inlines. * config/linux/s390/futex.h (FUTEX_WAIT, FUTEX_WAKE): Remove. (sys_futex0): Return error code. (futex_wake, futex_wait): If ENOSYS was returned, clear FUTEX_PRIVATE_FLAG in gomp_futex_wa{ke,it} and retry. (cpu_relax, atomic_write_barrier): New static inlines. * config/linux/ia64/futex.h (FUTEX_WAIT, FUTEX_WAKE): Remove. (sys_futex0): Return error code. (futex_wake, futex_wait): If ENOSYS was returned, clear FUTEX_PRIVATE_FLAG in gomp_futex_wa{ke,it} and retry. (cpu_relax, atomic_write_barrier): New static inlines. * config/linux/sparc/futex.h (FUTEX_WAIT, FUTEX_WAKE): Remove. (sys_futex0): Return error code. (futex_wake, futex_wait): If ENOSYS was returned, clear FUTEX_PRIVATE_FLAG in gomp_futex_wa{ke,it} and retry. (cpu_relax, atomic_write_barrier): New static inlines. * config/linux/sem.c: Include wait.h instead of libgomp.h and futex.h. (gomp_sem_wait_slow): Call do_wait instead of futex_wait. * config/linux/affinity.c: Assume HAVE_SYNC_BUILTINS. * config/linux/omp-lock.h (omp_lock_25_t, omp_nest_lock_25_t): New types. (omp_nest_lock_t): Change owner into void *, add lock field. * config/posix95/omp-lock.h: Include semaphore.h. (omp_lock_25_t, omp_nest_lock_25_t): New types. (omp_lock_t): Use sem_t instead of mutex if semaphores aren't broken. (omp_nest_lock_t): Likewise. Change owner to void *. * config/posix/omp-lock.h: Include semaphore.h. (omp_lock_25_t, omp_nest_lock_25_t): New types. (omp_lock_t): Use sem_t instead of mutex if semaphores aren't broken. (omp_nest_lock_t): Likewise. Add owner field. * testsuite/libgomp.c/collapse-1.c: New test. * testsuite/libgomp.c/collapse-2.c: New test. * testsuite/libgomp.c/collapse-3.c: New test. * testsuite/libgomp.c/icv-1.c: New test. * testsuite/libgomp.c/icv-2.c: New test. * testsuite/libgomp.c/lib-2.c: New test. * testsuite/libgomp.c/lock-1.c: New test. * testsuite/libgomp.c/lock-2.c: New test. * testsuite/libgomp.c/lock-3.c: New test. * testsuite/libgomp.c/loop-4.c: New test. * testsuite/libgomp.c/loop-5.c: New test. * testsuite/libgomp.c/loop-6.c: New test. * testsuite/libgomp.c/loop-7.c: New test. * testsuite/libgomp.c/loop-8.c: New test. * testsuite/libgomp.c/loop-9.c: New test. * testsuite/libgomp.c/nested-3.c: New test. * testsuite/libgomp.c/nestedfn-6.c: New test. * testsuite/libgomp.c/sort-1.c: New test. * testsuite/libgomp.c/task-1.c: New test. * testsuite/libgomp.c/task-2.c: New test. * testsuite/libgomp.c/task-3.c: New test. * testsuite/libgomp.c/task-4.c: New test. * testsuite/libgomp.c++/c++.exp: Add libstdc++-v3 build includes to C++ testsuite default compiler options. * testsuite/libgomp.c++/collapse-1.C: New test. * testsuite/libgomp.c++/collapse-2.C: New test. * testsuite/libgomp.c++/ctor-10.C: New test. * testsuite/libgomp.c++/for-1.C: New test. * testsuite/libgomp.c++/for-2.C: New test. * testsuite/libgomp.c++/for-3.C: New test. * testsuite/libgomp.c++/for-4.C: New test. * testsuite/libgomp.c++/for-5.C: New test. * testsuite/libgomp.c++/loop-8.C: New test. * testsuite/libgomp.c++/loop-9.C: New test. * testsuite/libgomp.c++/loop-10.C: New test. * testsuite/libgomp.c++/task-1.C: New test. * testsuite/libgomp.c++/task-2.C: New test. * testsuite/libgomp.c++/task-3.C: New test. * testsuite/libgomp.c++/task-4.C: New test. * testsuite/libgomp.c++/task-5.C: New test. * testsuite/libgomp.c++/task-6.C: New test. * testsuite/libgomp.fortran/allocatable1.f90: New test. * testsuite/libgomp.fortran/allocatable2.f90: New test. * testsuite/libgomp.fortran/allocatable3.f90: New test. * testsuite/libgomp.fortran/allocatable4.f90: New test. * testsuite/libgomp.fortran/collapse1.f90: New test. * testsuite/libgomp.fortran/collapse2.f90: New test. * testsuite/libgomp.fortran/collapse3.f90: New test. * testsuite/libgomp.fortran/collapse4.f90: New test. * testsuite/libgomp.fortran/lastprivate1.f90: New test. * testsuite/libgomp.fortran/lastprivate2.f90: New test. * testsuite/libgomp.fortran/lib4.f90: New test. * testsuite/libgomp.fortran/lock-1.f90: New test. * testsuite/libgomp.fortran/lock-2.f90: New test. * testsuite/libgomp.fortran/nested1.f90: New test. * testsuite/libgomp.fortran/nestedfn4.f90: New test. * testsuite/libgomp.fortran/strassen.f90: New test. * testsuite/libgomp.fortran/tabs1.f90: New test. * testsuite/libgomp.fortran/tabs2.f: New test. * testsuite/libgomp.fortran/task1.f90: New test. * testsuite/libgomp.fortran/task2.f90: New test. * testsuite/libgomp.fortran/vla4.f90: Add dg-warning. * testsuite/libgomp.fortran/vla5.f90: Likewise. * testsuite/libgomp.c/pr26943-2.c: Likewise. * testsuite/libgomp.c/pr26943-3.c: Likewise. * testsuite/libgomp.c/pr26943-4.c: Likewise. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@136433 138bc75d-0d04-0410-961f-82ee72b054a4
Diffstat (limited to 'libgomp/testsuite')
-rw-r--r--libgomp/testsuite/Makefile.in6
-rw-r--r--libgomp/testsuite/libgomp.c++/c++.exp9
-rw-r--r--libgomp/testsuite/libgomp.c++/collapse-1.C29
-rw-r--r--libgomp/testsuite/libgomp.c++/collapse-2.C371
-rw-r--r--libgomp/testsuite/libgomp.c++/ctor-10.C78
-rw-r--r--libgomp/testsuite/libgomp.c++/for-1.C291
-rw-r--r--libgomp/testsuite/libgomp.c++/for-2.C182
-rw-r--r--libgomp/testsuite/libgomp.c++/for-3.C239
-rw-r--r--libgomp/testsuite/libgomp.c++/for-4.C225
-rw-r--r--libgomp/testsuite/libgomp.c++/for-5.C303
-rw-r--r--libgomp/testsuite/libgomp.c++/loop-10.C105
-rw-r--r--libgomp/testsuite/libgomp.c++/loop-8.C276
-rw-r--r--libgomp/testsuite/libgomp.c++/loop-9.C387
-rw-r--r--libgomp/testsuite/libgomp.c++/task-1.C83
-rw-r--r--libgomp/testsuite/libgomp.c++/task-2.C70
-rw-r--r--libgomp/testsuite/libgomp.c++/task-3.C90
-rw-r--r--libgomp/testsuite/libgomp.c++/task-4.C37
-rw-r--r--libgomp/testsuite/libgomp.c++/task-5.C90
-rw-r--r--libgomp/testsuite/libgomp.c++/task-6.C86
-rw-r--r--libgomp/testsuite/libgomp.c/collapse-1.c30
-rw-r--r--libgomp/testsuite/libgomp.c/collapse-2.c30
-rw-r--r--libgomp/testsuite/libgomp.c/collapse-3.c31
-rw-r--r--libgomp/testsuite/libgomp.c/icv-1.c33
-rw-r--r--libgomp/testsuite/libgomp.c/icv-2.c46
-rw-r--r--libgomp/testsuite/libgomp.c/lib-2.c25
-rw-r--r--libgomp/testsuite/libgomp.c/lock-1.c31
-rw-r--r--libgomp/testsuite/libgomp.c/lock-2.c32
-rw-r--r--libgomp/testsuite/libgomp.c/lock-3.c60
-rw-r--r--libgomp/testsuite/libgomp.c/loop-4.c28
-rw-r--r--libgomp/testsuite/libgomp.c/loop-5.c276
-rw-r--r--libgomp/testsuite/libgomp.c/loop-6.c387
-rw-r--r--libgomp/testsuite/libgomp.c/loop-7.c105
-rw-r--r--libgomp/testsuite/libgomp.c/loop-8.c27
-rw-r--r--libgomp/testsuite/libgomp.c/loop-9.c18
-rw-r--r--libgomp/testsuite/libgomp.c/nested-3.c89
-rw-r--r--libgomp/testsuite/libgomp.c/nestedfn-6.c21
-rw-r--r--libgomp/testsuite/libgomp.c/pr26943-2.c4
-rw-r--r--libgomp/testsuite/libgomp.c/pr26943-3.c4
-rw-r--r--libgomp/testsuite/libgomp.c/pr26943-4.c4
-rw-r--r--libgomp/testsuite/libgomp.c/sort-1.c379
-rw-r--r--libgomp/testsuite/libgomp.c/task-1.c84
-rw-r--r--libgomp/testsuite/libgomp.c/task-2.c53
-rw-r--r--libgomp/testsuite/libgomp.c/task-3.c70
-rw-r--r--libgomp/testsuite/libgomp.c/task-4.c40
-rw-r--r--libgomp/testsuite/libgomp.fortran/allocatable1.f9081
-rw-r--r--libgomp/testsuite/libgomp.fortran/allocatable2.f9047
-rw-r--r--libgomp/testsuite/libgomp.fortran/allocatable3.f9021
-rw-r--r--libgomp/testsuite/libgomp.fortran/allocatable4.f9047
-rw-r--r--libgomp/testsuite/libgomp.fortran/collapse1.f9026
-rw-r--r--libgomp/testsuite/libgomp.fortran/collapse2.f9053
-rw-r--r--libgomp/testsuite/libgomp.fortran/collapse3.f90204
-rw-r--r--libgomp/testsuite/libgomp.fortran/collapse4.f9012
-rw-r--r--libgomp/testsuite/libgomp.fortran/lastprivate1.f90126
-rw-r--r--libgomp/testsuite/libgomp.fortran/lastprivate2.f90141
-rw-r--r--libgomp/testsuite/libgomp.fortran/lib4.f9016
-rw-r--r--libgomp/testsuite/libgomp.fortran/lock-1.f9024
-rw-r--r--libgomp/testsuite/libgomp.fortran/lock-2.f9024
-rw-r--r--libgomp/testsuite/libgomp.fortran/nested1.f9087
-rw-r--r--libgomp/testsuite/libgomp.fortran/nestedfn4.f9041
-rw-r--r--libgomp/testsuite/libgomp.fortran/strassen.f9075
-rw-r--r--libgomp/testsuite/libgomp.fortran/tabs1.f9012
-rw-r--r--libgomp/testsuite/libgomp.fortran/tabs2.f13
-rw-r--r--libgomp/testsuite/libgomp.fortran/task1.f9027
-rw-r--r--libgomp/testsuite/libgomp.fortran/task2.f90142
-rw-r--r--libgomp/testsuite/libgomp.fortran/vla4.f902
-rw-r--r--libgomp/testsuite/libgomp.fortran/vla5.f902
66 files changed, 6078 insertions, 9 deletions
diff --git a/libgomp/testsuite/Makefile.in b/libgomp/testsuite/Makefile.in
index 9c6163ba2bf..ae1806fb2da 100644
--- a/libgomp/testsuite/Makefile.in
+++ b/libgomp/testsuite/Makefile.in
@@ -112,9 +112,15 @@ MAINTAINER_MODE_TRUE = @MAINTAINER_MODE_TRUE@
MAKEINFO = @MAKEINFO@
NM = @NM@
OBJEXT = @OBJEXT@
+OMP_LOCK_25_ALIGN = @OMP_LOCK_25_ALIGN@
+OMP_LOCK_25_KIND = @OMP_LOCK_25_KIND@
+OMP_LOCK_25_SIZE = @OMP_LOCK_25_SIZE@
OMP_LOCK_ALIGN = @OMP_LOCK_ALIGN@
OMP_LOCK_KIND = @OMP_LOCK_KIND@
OMP_LOCK_SIZE = @OMP_LOCK_SIZE@
+OMP_NEST_LOCK_25_ALIGN = @OMP_NEST_LOCK_25_ALIGN@
+OMP_NEST_LOCK_25_KIND = @OMP_NEST_LOCK_25_KIND@
+OMP_NEST_LOCK_25_SIZE = @OMP_NEST_LOCK_25_SIZE@
OMP_NEST_LOCK_ALIGN = @OMP_NEST_LOCK_ALIGN@
OMP_NEST_LOCK_KIND = @OMP_NEST_LOCK_KIND@
OMP_NEST_LOCK_SIZE = @OMP_NEST_LOCK_SIZE@
diff --git a/libgomp/testsuite/libgomp.c++/c++.exp b/libgomp/testsuite/libgomp.c++/c++.exp
index f11482c7315..f3f42de6619 100644
--- a/libgomp/testsuite/libgomp.c++/c++.exp
+++ b/libgomp/testsuite/libgomp.c++/c++.exp
@@ -31,8 +31,15 @@ if { $lang_test_file_found } {
set ld_library_path "$always_ld_library_path:${blddir}/${lang_library_path}"
set_ld_library_path_env_vars
+ set flags_file "${blddir}/../libstdc++-v3/scripts/testsuite_flags"
+ if { [file exists $flags_file] } {
+ set libstdcxx_includes [exec sh $flags_file --build-includes]
+ } else {
+ set libstdcxx_includes ""
+ }
+
# Main loop.
- gfortran-dg-runtest $tests ""
+ gfortran-dg-runtest $tests $libstdcxx_includes
}
# All done.
diff --git a/libgomp/testsuite/libgomp.c++/collapse-1.C b/libgomp/testsuite/libgomp.c++/collapse-1.C
new file mode 100644
index 00000000000..132d35cf41d
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c++/collapse-1.C
@@ -0,0 +1,29 @@
+// { dg-do run }
+
+#include <string.h>
+#include <stdlib.h>
+
+int
+main ()
+{
+ int i, j, k, l = 0;
+ int a[3][3][3];
+
+ memset (a, '\0', sizeof (a));
+ #pragma omp parallel for collapse(4 - 1) schedule(static, 4)
+ for (i = 0; i < 2; i++)
+ for (j = 0; j < 2; j++)
+ for (k = 0; k < 2; k++)
+ a[i][j][k] = i + j * 4 + k * 16;
+ #pragma omp parallel
+ {
+ #pragma omp for collapse(2) reduction(|:l) private (k)
+ for (i = 0; i < 2; i++)
+ for (j = 0; j < 2; j++)
+ for (k = 0; k < 2; k++)
+ if (a[i][j][k] != i + j * 4 + k * 16)
+ l = 1;
+ }
+ if (l)
+ abort ();
+}
diff --git a/libgomp/testsuite/libgomp.c++/collapse-2.C b/libgomp/testsuite/libgomp.c++/collapse-2.C
new file mode 100644
index 00000000000..a42a1f07ffd
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c++/collapse-2.C
@@ -0,0 +1,371 @@
+// { dg-do run }
+
+#include <omp.h>
+typedef __PTRDIFF_TYPE__ ptrdiff_t;
+extern "C" void abort ();
+
+template <typename T>
+class I
+{
+public:
+ typedef ptrdiff_t difference_type;
+ I ();
+ ~I ();
+ I (T *);
+ I (const I &);
+ T &operator * ();
+ T *operator -> ();
+ T &operator [] (const difference_type &) const;
+ I &operator = (const I &);
+ I &operator ++ ();
+ I operator ++ (int);
+ I &operator -- ();
+ I operator -- (int);
+ I &operator += (const difference_type &);
+ I &operator -= (const difference_type &);
+ I operator + (const difference_type &) const;
+ I operator - (const difference_type &) const;
+ template <typename S> friend bool operator == (I<S> &, I<S> &);
+ template <typename S> friend bool operator == (const I<S> &, const I<S> &);
+ template <typename S> friend bool operator < (I<S> &, I<S> &);
+ template <typename S> friend bool operator < (const I<S> &, const I<S> &);
+ template <typename S> friend bool operator <= (I<S> &, I<S> &);
+ template <typename S> friend bool operator <= (const I<S> &, const I<S> &);
+ template <typename S> friend bool operator > (I<S> &, I<S> &);
+ template <typename S> friend bool operator > (const I<S> &, const I<S> &);
+ template <typename S> friend bool operator >= (I<S> &, I<S> &);
+ template <typename S> friend bool operator >= (const I<S> &, const I<S> &);
+ template <typename S> friend typename I<S>::difference_type operator - (I<S> &, I<S> &);
+ template <typename S> friend typename I<S>::difference_type operator - (const I<S> &, const I<S> &);
+ template <typename S> friend I<S> operator + (typename I<S>::difference_type , const I<S> &);
+private:
+ T *p;
+};
+template <typename T> I<T>::I () : p (0) {}
+template <typename T> I<T>::~I () { p = (T *) 0; }
+template <typename T> I<T>::I (T *x) : p (x) {}
+template <typename T> I<T>::I (const I &x) : p (x.p) {}
+template <typename T> T &I<T>::operator * () { return *p; }
+template <typename T> T *I<T>::operator -> () { return p; }
+template <typename T> T &I<T>::operator [] (const difference_type &x) const { return p[x]; }
+template <typename T> I<T> &I<T>::operator = (const I &x) { p = x.p; return *this; }
+template <typename T> I<T> &I<T>::operator ++ () { ++p; return *this; }
+template <typename T> I<T> I<T>::operator ++ (int) { return I (p++); }
+template <typename T> I<T> &I<T>::operator -- () { --p; return *this; }
+template <typename T> I<T> I<T>::operator -- (int) { return I (p--); }
+template <typename T> I<T> &I<T>::operator += (const difference_type &x) { p += x; return *this; }
+template <typename T> I<T> &I<T>::operator -= (const difference_type &x) { p -= x; return *this; }
+template <typename T> I<T> I<T>::operator + (const difference_type &x) const { return I (p + x); }
+template <typename T> I<T> I<T>::operator - (const difference_type &x) const { return I (p - x); }
+template <typename T> bool operator == (I<T> &x, I<T> &y) { return x.p == y.p; }
+template <typename T> bool operator == (const I<T> &x, const I<T> &y) { return x.p == y.p; }
+template <typename T> bool operator != (I<T> &x, I<T> &y) { return !(x == y); }
+template <typename T> bool operator != (const I<T> &x, const I<T> &y) { return !(x == y); }
+template <typename T> bool operator < (I<T> &x, I<T> &y) { return x.p < y.p; }
+template <typename T> bool operator < (const I<T> &x, const I<T> &y) { return x.p < y.p; }
+template <typename T> bool operator <= (I<T> &x, I<T> &y) { return x.p <= y.p; }
+template <typename T> bool operator <= (const I<T> &x, const I<T> &y) { return x.p <= y.p; }
+template <typename T> bool operator > (I<T> &x, I<T> &y) { return x.p > y.p; }
+template <typename T> bool operator > (const I<T> &x, const I<T> &y) { return x.p > y.p; }
+template <typename T> bool operator >= (I<T> &x, I<T> &y) { return x.p >= y.p; }
+template <typename T> bool operator >= (const I<T> &x, const I<T> &y) { return x.p >= y.p; }
+template <typename T> typename I<T>::difference_type operator - (I<T> &x, I<T> &y) { return x.p - y.p; }
+template <typename T> typename I<T>::difference_type operator - (const I<T> &x, const I<T> &y) { return x.p - y.p; }
+template <typename T> I<T> operator + (typename I<T>::difference_type x, const I<T> &y) { return I<T> (x + y.p); }
+
+template <typename T>
+class J
+{
+public:
+ J(const I<T> &x, const I<T> &y) : b (x), e (y) {}
+ const I<T> &begin ();
+ const I<T> &end ();
+private:
+ I<T> b, e;
+};
+
+template <typename T> const I<T> &J<T>::begin () { return b; }
+template <typename T> const I<T> &J<T>::end () { return e; }
+
+int results[2000];
+
+void
+f1 (J<int> x, J<int> y, J<int> z)
+{
+ I<int> i, j, k;
+ int l, f = 0, n = 0, m = 0;
+#pragma omp parallel shared (i, j, k, l) firstprivate (f) \
+ reduction (+:n, m) num_threads (8)
+ {
+ #pragma omp for lastprivate (i, j, k, l) schedule (static, 9) \
+ collapse (4)
+ for (i = x.begin (); i < x.end (); ++i)
+ for (j = y.begin (); j <= y.end (); j += 1)
+ for (l = 0; l < 1; l++)
+ for (k = z.begin () + 3; k < z.end () - 3; k++)
+ if (omp_get_num_threads () == 8
+ && ((*i + 2) * 12 + (*j + 5) * 4 + (*k - 13)
+ != (omp_get_thread_num () * 9 + f++)))
+ n++;
+ else
+ m++;
+ }
+ if (n || i != x.end () || j != y.end () + 1 || k != z.end () - 3
+ || m != 72 || l != 1)
+ abort ();
+}
+
+void
+f2 (J<int> x, J<int> y, J<int> z)
+{
+ int f = 0, n = 0, m = 0;
+#pragma omp parallel for firstprivate (f) reduction (+:n, m) \
+ num_threads (8) schedule (static, 9) \
+ collapse (6 - 2)
+ for (I<int> i = x.end () - 1; i >= x.begin (); --i)
+ for (int l = -131; l >= -131; l--)
+ for (I<int> j = y.end (); j > y.begin () - 1; j -= 1)
+ {
+ for (I<int> k = z.end () - 4; k >= z.begin () + 3; k--)
+ if (omp_get_num_threads () == 8
+ && ((3 - *i) * 12 + (-3 - *j) * 4 + (16 - *k)
+ != (omp_get_thread_num () * 9 + f++)))
+ n++;
+ else
+ m++;
+ }
+ if (n || m != 72)
+ abort ();
+}
+
+template <typename T>
+void
+f3 (J<int> x, J<int> y, J<int> z)
+{
+ I<int> i, j, k;
+ int l, f = 0, n = 0, m = 0;
+#pragma omp parallel shared (i, j, k, l) firstprivate (f) \
+ reduction (+:n, m) num_threads (8)
+ {
+ #pragma omp for lastprivate (i, j, k, l) schedule (static, 9) \
+ collapse (4)
+ for (i = x.begin (); i < x.end (); ++i)
+ for (j = y.begin (); j <= y.end (); j += 1)
+ for (k = z.begin () + 3; k < z.end () - 3; k++)
+ for (l = 7; l <= 7; l++)
+ if (omp_get_num_threads () == 8
+ && ((*i + 2) * 12 + (*j + 5) * 4 + (*k - 13)
+ != (omp_get_thread_num () * 9 + f++)))
+ n++;
+ else
+ m++;
+ }
+ if (n || i != x.end () || j != y.end () + 1 || k != z.end () - 3
+ || m != 72 || l != 8)
+ abort ();
+}
+
+template <typename T>
+void
+f4 (J<int> x, J<int> y, J<int> z)
+{
+ int f = 0, n = 0, m = 0;
+#pragma omp parallel for firstprivate (f) reduction (+:n, m) \
+ num_threads (8) schedule (static, 9) \
+ collapse (5 - 2)
+ for (I<int> i = x.end () - 1; i >= x.begin (); --i)
+ {
+ for (I<int> j = y.end (); j > y.begin () - 1; j -= 1)
+ {
+ for (I<int> k = z.end () - 4; k >= z.begin () + 3; k--)
+ if (omp_get_num_threads () == 8
+ && ((3 - *i) * 12 + (-3 - *j) * 4 + (16 - *k)
+ != (omp_get_thread_num () * 9 + f++)))
+ n++;
+ else
+ m++;
+ }
+ }
+ if (n || m != 72)
+ abort ();
+}
+
+template <typename T>
+void
+f5 (J<int> x, J<int> y, J<int> z)
+{
+ I<int> i, j, k;
+ int f = 0, n = 0, m = 0;
+#pragma omp parallel shared (i, j, k) firstprivate (f) \
+ reduction (+:n, m) num_threads (8)
+ {
+ #pragma omp for lastprivate (i, j, k) schedule (static, 9) \
+ collapse (3)
+ for (i = x.begin (); i < x.end (); ++i)
+ for (j = y.begin (); j <= y.end (); j += (T) 1)
+ {
+ for (k = z.begin () + 3; k < z.end () - 3; k++)
+ if (omp_get_num_threads () == 8
+ && ((*i + 2) * 12 + (*j + 5) * 4 + (*k - 13)
+ != (omp_get_thread_num () * 9 + f++)))
+ n++;
+ else
+ m++;
+ }
+ }
+ if (n || i != x.end () || j != y.end () + 1 || k != z.end () - 3
+ || m != 72)
+ abort ();
+}
+
+template <typename T>
+void
+f6 (J<int> x, J<int> y, J<int> z)
+{
+ int f = 0, n = 0, m = 0;
+#pragma omp parallel for firstprivate (f) reduction (+:n, m) \
+ num_threads (8) schedule (static, 9) \
+ collapse (5 - 2)
+ for (I<int> i = x.end () - 1; i >= x.begin (); --i)
+ {
+ for (I<int> j = y.end (); j > y.begin () - 1; j -= 1)
+ {
+ for (I<int> k = z.end () - 4; k >= z.begin () + (T) 3; k--)
+ if (omp_get_num_threads () == 8
+ && ((3 - *i) * 12 + (-3 - *j) * 4 + (16 - *k)
+ != (omp_get_thread_num () * 9 + f++)))
+ n++;
+ else
+ m++;
+ }
+ }
+ if (n || m != 72)
+ abort ();
+}
+
+template <typename T>
+void
+f7 (J<T> x, J<T> y, J<T> z)
+{
+ I<T> i, j, k, o = y.begin ();
+ T l, f = 0, n = 0, m = 0;
+#pragma omp parallel shared (i, j, k, l) firstprivate (f) \
+ reduction (+:n, m) num_threads (8)
+ {
+ #pragma omp for lastprivate (i, j, k, l) schedule (static, 9) \
+ collapse (4)
+ for (i = x.begin (); i < x.end (); ++i)
+ for (j = y.begin (); j <= y.end (); j += 1)
+ for (l = *o; l <= *o; l = 1 + l)
+ for (k = z.begin () + 3; k < z.end () - 3; k++)
+ if (omp_get_num_threads () == 8
+ && ((*i + 2) * 12 + (*j + 5) * 4 + (*k - 13)
+ != (omp_get_thread_num () * 9 + f++)))
+ n++;
+ else
+ m++;
+ }
+ if (n || i != x.end () || j != y.end () + 1 || k != z.end () - 3
+ || m != 72 || l != *o + 1)
+ abort ();
+}
+
+template <typename T>
+void
+f8 (J<T> x, J<T> y, J<T> z)
+{
+ T f = 0, n = 0, m = 0;
+#pragma omp parallel for firstprivate (f) reduction (+:n, m) \
+ num_threads (8) schedule (static, 9) \
+ collapse (6 - 2)
+ for (I<T> i = x.end () - 1; i >= x.begin (); --i)
+ for (T l = 0; l < 1; l++)
+ for (I<T> j = y.end (); j > y.begin () - 1; j -= 1)
+ {
+ for (I<T> k = z.end () - 4; k >= z.begin () + 3; k--)
+ if (omp_get_num_threads () == 8
+ && ((3 - *i) * 12 + (-3 - *j) * 4 + (16 - *k)
+ != (omp_get_thread_num () * 9 + f++)))
+ n++;
+ else
+ m++;
+ }
+ if (n || m != 72)
+ abort ();
+}
+
+template <typename S, typename T>
+void
+f9 (J<T> x, J<T> y, J<T> z)
+{
+ S i, j, k, o = y.begin ();
+ T l, f = 0, n = 0, m = 0;
+#pragma omp parallel shared (i, j, k, l) firstprivate (f) \
+ reduction (+:n, m) num_threads (8)
+ {
+ #pragma omp for lastprivate (i, j, k, l) schedule (static, 9) \
+ collapse (4)
+ for (i = x.begin (); i < x.end (); ++i)
+ for (j = y.begin (); j <= y.end (); j += 1)
+ for (l = *o; l <= *o; l = 1 + l)
+ for (k = z.begin () + 3; k < z.end () - 3; k++)
+ if (omp_get_num_threads () == 8
+ && ((*i + 2) * 12 + (*j + 5) * 4 + (*k - 13)
+ != (omp_get_thread_num () * 9 + f++)))
+ n++;
+ else
+ m++;
+ }
+ if (n || i != x.end () || j != y.end () + 1 || k != z.end () - 3
+ || m != 72 || l != *o + 1)
+ abort ();
+}
+
+template <typename S, typename T>
+void
+f10 (J<T> x, J<T> y, J<T> z)
+{
+ T f = 0, n = 0, m = 0;
+#pragma omp parallel for firstprivate (f) reduction (+:n, m) \
+ num_threads (8) schedule (static, 9) \
+ collapse (6 - 2)
+ for (S i = x.end () - 1; i >= x.begin (); --i)
+ for (T l = 0; l < 1; l++)
+ for (S j = y.end (); j > y.begin () - 1; j -= 1)
+ {
+ for (S k = z.end () - 4; k >= z.begin () + 3; k--)
+ if (omp_get_num_threads () == 8
+ && ((3 - *i) * 12 + (-3 - *j) * 4 + (16 - *k)
+ != (omp_get_thread_num () * 9 + f++)))
+ n++;
+ else
+ m++;
+ }
+ if (n || m != 72)
+ abort ();
+}
+
+int
+main ()
+{
+ int a[2000];
+ long b[2000];
+ for (int i = 0; i < 2000; i++)
+ {
+ a[i] = i - 1000;
+ b[i] = i - 1000;
+ }
+ J<int> x (&a[998], &a[1004]);
+ J<int> y (&a[995], &a[997]);
+ J<int> z (&a[1010], &a[1020]);
+ f1 (x, y, z);
+ f2 (x, y, z);
+ f3 <int> (x, y, z);
+ f4 <int> (x, y, z);
+ f5 <int> (x, y, z);
+ f6 <int> (x, y, z);
+ f7 <int> (x, y, z);
+ f8 <int> (x, y, z);
+ f9 <I<int>, int> (x, y, z);
+ f10 <I<int>, int> (x, y, z);
+}
diff --git a/libgomp/testsuite/libgomp.c++/ctor-10.C b/libgomp/testsuite/libgomp.c++/ctor-10.C
new file mode 100644
index 00000000000..f46e45ec418
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c++/ctor-10.C
@@ -0,0 +1,78 @@
+// { dg-do run }
+// { dg-require-effective-target tls_runtime }
+
+#include <omp.h>
+#include <assert.h>
+
+#define N 10
+#define THR 4
+
+struct B
+{
+ B();
+ B(const B &);
+ ~B();
+ B& operator=(const B &);
+ void doit();
+ static B *base;
+ static B *threadbase;
+#pragma omp threadprivate(threadbase)
+};
+
+B *B::base;
+B *B::threadbase;
+static unsigned cmask[THR];
+static unsigned dmask[THR];
+
+B::B()
+{
+ assert (base == 0);
+}
+
+B::B(const B &b)
+{
+ unsigned index = &b - base;
+ assert (index < N);
+ cmask[omp_get_thread_num()] |= 1u << index;
+}
+
+B::~B()
+{
+ if (threadbase)
+ {
+ unsigned index = this - threadbase;
+ assert (index < N);
+ dmask[omp_get_thread_num()] |= 1u << index;
+ }
+}
+
+void foo()
+{
+ B b[N];
+
+ B::base = b;
+
+ #pragma omp parallel firstprivate(b)
+ {
+ assert (omp_get_num_threads () == THR);
+ B::threadbase = b;
+ }
+
+ B::threadbase = 0;
+}
+
+int main()
+{
+ omp_set_dynamic (0);
+ omp_set_num_threads (THR);
+ foo();
+
+ for (int i = 0; i < THR; ++i)
+ {
+ unsigned xmask = (1u << N) - 1;
+ assert (cmask[i] == xmask);
+ assert (dmask[i] == xmask);
+ }
+
+ return 0;
+}
diff --git a/libgomp/testsuite/libgomp.c++/for-1.C b/libgomp/testsuite/libgomp.c++/for-1.C
new file mode 100644
index 00000000000..1c713464ebe
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c++/for-1.C
@@ -0,0 +1,291 @@
+// { dg-do run }
+
+typedef __PTRDIFF_TYPE__ ptrdiff_t;
+extern "C" void abort ();
+
+template <typename T>
+class I
+{
+public:
+ typedef ptrdiff_t difference_type;
+ I ();
+ ~I ();
+ I (T *);
+ I (const I &);
+ T &operator * ();
+ T *operator -> ();
+ T &operator [] (const difference_type &) const;
+ I &operator = (const I &);
+ I &operator ++ ();
+ I operator ++ (int);
+ I &operator -- ();
+ I operator -- (int);
+ I &operator += (const difference_type &);
+ I &operator -= (const difference_type &);
+ I operator + (const difference_type &) const;
+ I operator - (const difference_type &) const;
+ template <typename S> friend bool operator == (I<S> &, I<S> &);
+ template <typename S> friend bool operator == (const I<S> &, const I<S> &);
+ template <typename S> friend bool operator < (I<S> &, I<S> &);
+ template <typename S> friend bool operator < (const I<S> &, const I<S> &);
+ template <typename S> friend bool operator <= (I<S> &, I<S> &);
+ template <typename S> friend bool operator <= (const I<S> &, const I<S> &);
+ template <typename S> friend bool operator > (I<S> &, I<S> &);
+ template <typename S> friend bool operator > (const I<S> &, const I<S> &);
+ template <typename S> friend bool operator >= (I<S> &, I<S> &);
+ template <typename S> friend bool operator >= (const I<S> &, const I<S> &);
+ template <typename S> friend typename I<S>::difference_type operator - (I<S> &, I<S> &);
+ template <typename S> friend typename I<S>::difference_type operator - (const I<S> &, const I<S> &);
+ template <typename S> friend I<S> operator + (typename I<S>::difference_type , const I<S> &);
+private:
+ T *p;
+};
+template <typename T> I<T>::I () : p (0) {}
+template <typename T> I<T>::~I () {}
+template <typename T> I<T>::I (T *x) : p (x) {}
+template <typename T> I<T>::I (const I &x) : p (x.p) {}
+template <typename T> T &I<T>::operator * () { return *p; }
+template <typename T> T *I<T>::operator -> () { return p; }
+template <typename T> T &I<T>::operator [] (const difference_type &x) const { return p[x]; }
+template <typename T> I<T> &I<T>::operator = (const I &x) { p = x.p; return *this; }
+template <typename T> I<T> &I<T>::operator ++ () { ++p; return *this; }
+template <typename T> I<T> I<T>::operator ++ (int) { return I (p++); }
+template <typename T> I<T> &I<T>::operator -- () { --p; return *this; }
+template <typename T> I<T> I<T>::operator -- (int) { return I (p--); }
+template <typename T> I<T> &I<T>::operator += (const difference_type &x) { p += x; return *this; }
+template <typename T> I<T> &I<T>::operator -= (const difference_type &x) { p -= x; return *this; }
+template <typename T> I<T> I<T>::operator + (const difference_type &x) const { return I (p + x); }
+template <typename T> I<T> I<T>::operator - (const difference_type &x) const { return I (p - x); }
+template <typename T> bool operator == (I<T> &x, I<T> &y) { return x.p == y.p; }
+template <typename T> bool operator == (const I<T> &x, const I<T> &y) { return x.p == y.p; }
+template <typename T> bool operator != (I<T> &x, I<T> &y) { return !(x == y); }
+template <typename T> bool operator != (const I<T> &x, const I<T> &y) { return !(x == y); }
+template <typename T> bool operator < (I<T> &x, I<T> &y) { return x.p < y.p; }
+template <typename T> bool operator < (const I<T> &x, const I<T> &y) { return x.p < y.p; }
+template <typename T> bool operator <= (I<T> &x, I<T> &y) { return x.p <= y.p; }
+template <typename T> bool operator <= (const I<T> &x, const I<T> &y) { return x.p <= y.p; }
+template <typename T> bool operator > (I<T> &x, I<T> &y) { return x.p > y.p; }
+template <typename T> bool operator > (const I<T> &x, const I<T> &y) { return x.p > y.p; }
+template <typename T> bool operator >= (I<T> &x, I<T> &y) { return x.p >= y.p; }
+template <typename T> bool operator >= (const I<T> &x, const I<T> &y) { return x.p >= y.p; }
+template <typename T> typename I<T>::difference_type operator - (I<T> &x, I<T> &y) { return x.p - y.p; }
+template <typename T> typename I<T>::difference_type operator - (const I<T> &x, const I<T> &y) { return x.p - y.p; }
+template <typename T> I<T> operator + (typename I<T>::difference_type x, const I<T> &y) { return I<T> (x + y.p); }
+
+template <typename T>
+class J
+{
+public:
+ J(const I<T> &x, const I<T> &y) : b (x), e (y) {}
+ const I<T> &begin ();
+ const I<T> &end ();
+private:
+ I<T> b, e;
+};
+
+template <typename T> const I<T> &J<T>::begin () { return b; }
+template <typename T> const I<T> &J<T>::end () { return e; }
+
+int results[2000];
+
+template <typename T>
+void
+baz (I<T> &i)
+{
+ if (*i < 0 || *i >= 2000)
+ abort ();
+ results[*i]++;
+}
+
+void
+f1 (const I<int> &x, const I<int> &y)
+{
+#pragma omp parallel for
+ for (I<int> i = x; i <= y; i += 6)
+ baz (i);
+}
+
+void
+f2 (const I<int> &x, const I<int> &y)
+{
+ I<int> i;
+#pragma omp parallel for private(i)
+ for (i = x; i < y - 1; i = 1 - 6 + 7 + i)
+ baz (i);
+}
+
+template <typename T>
+void
+f3 (const I<int> &x, const I<int> &y)
+{
+#pragma omp parallel for
+ for (I<int> i = x; i <= y; i = i + 9 - 8)
+ baz (i);
+}
+
+template <typename T>
+void
+f4 (const I<int> &x, const I<int> &y)
+{
+ I<int> i;
+#pragma omp parallel for lastprivate(i)
+ for (i = x + 2000 - 64; i > y + 10; --i)
+ baz (i);
+}
+
+void
+f5 (const I<int> &x, const I<int> &y)
+{
+#pragma omp parallel for
+ for (I<int> i = x + 2000 - 64; i > y + 10; i -= 10)
+ baz (i);
+}
+
+template <int N>
+void
+f6 (const I<int> &x, const I<int> &y)
+{
+#pragma omp parallel for
+ for (I<int> i = x + 2000 - 64; i > y + 10; i = i - 12 + 2)
+ {
+ I<int> j = i + N;
+ baz (j);
+ }
+}
+
+template <int N>
+void
+f7 (I<int> i, const I<int> &x, const I<int> &y)
+{
+#pragma omp parallel for
+ for (i = x - 10; i <= y + 10; i += N)
+ baz (i);
+}
+
+template <int N>
+void
+f8 (J<int> j)
+{
+ I<int> i;
+#pragma omp parallel for
+ for (i = j.begin (); i <= j.end () + N; i += 2)
+ baz (i);
+}
+
+template <typename T, int N>
+void
+f9 (const I<T> &x, const I<T> &y)
+{
+#pragma omp parallel for
+ for (I<T> i = x; i <= y; i = i + N)
+ baz (i);
+}
+
+template <typename T, int N>
+void
+f10 (const I<T> &x, const I<T> &y)
+{
+ I<T> i;
+#pragma omp parallel for
+ for (i = x; i > y; i = i + N)
+ baz (i);
+}
+
+template <typename T>
+void
+f11 (const T &x, const T &y)
+{
+#pragma omp parallel
+ {
+#pragma omp for nowait
+ for (T i = x; i <= y; i += 3)
+ baz (i);
+#pragma omp single
+ {
+ T j = y + 3;
+ baz (j);
+ }
+ }
+}
+
+template <typename T>
+void
+f12 (const T &x, const T &y)
+{
+ T i;
+#pragma omp parallel for
+ for (i = x; i > y; --i)
+ baz (i);
+}
+
+template <int N>
+struct K
+{
+ template <typename T>
+ static void
+ f13 (const T &x, const T &y)
+ {
+#pragma omp parallel for
+ for (T i = x; i <= y + N; i += N)
+ baz (i);
+ }
+};
+
+#define check(expr) \
+ for (int i = 0; i < 2000; i++) \
+ if (expr) \
+ { \
+ if (results[i] != 1) \
+ abort (); \
+ results[i] = 0; \
+ } \
+ else if (results[i]) \
+ abort ()
+
+int
+main ()
+{
+ int a[2000];
+ long b[2000];
+ for (int i = 0; i < 2000; i++)
+ {
+ a[i] = i;
+ b[i] = i;
+ }
+ f1 (&a[10], &a[1990]);
+ check (i >= 10 && i <= 1990 && (i - 10) % 6 == 0);
+ f2 (&a[0], &a[1999]);
+ check (i < 1998 && (i & 1) == 0);
+ f3<char> (&a[20], &a[1837]);
+ check (i >= 20 && i <= 1837);
+ f4<int> (&a[0], &a[30]);
+ check (i > 40 && i <= 2000 - 64);
+ f5 (&a[0], &a[100]);
+ check (i >= 116 && i <= 2000 - 64 && (i - 116) % 10 == 0);
+ f6<-10> (&a[10], &a[110]);
+ check (i >= 116 && i <= 2000 - 64 && (i - 116) % 10 == 0);
+ f7<6> (I<int> (), &a[12], &a[1800]);
+ check (i >= 2 && i <= 1808 && (i - 2) % 6 == 0);
+ f8<121> (J<int> (&a[14], &a[1803]));
+ check (i >= 14 && i <= 1924 && (i & 1) == 0);
+ f9<int, 7> (&a[33], &a[1967]);
+ check (i >= 33 && i <= 1967 && (i - 33) % 7 == 0);
+ f10<int, -7> (&a[1939], &a[17]);
+ check (i >= 21 && i <= 1939 && (i - 21) % 7 == 0);
+ f11<I<int> > (&a[16], &a[1981]);
+ check (i >= 16 && i <= 1984 && (i - 16) % 3 == 0);
+ f12<I<int> > (&a[1761], &a[37]);
+ check (i > 37 && i <= 1761);
+ K<5>::f13<I<int> > (&a[1], &a[1935]);
+ check (i >= 1 && i <= 1936 && (i - 1) % 5 == 0);
+ f9<long, 7> (&b[33], &b[1967]);
+ check (i >= 33 && i <= 1967 && (i - 33) % 7 == 0);
+ f10<long, -7> (&b[1939], &b[17]);
+ check (i >= 21 && i <= 1939 && (i - 21) % 7 == 0);
+ f11<I<long> > (&b[16], &b[1981]);
+ check (i >= 16 && i <= 1984 && (i - 16) % 3 == 0);
+ f12<I<long> > (&b[1761], &b[37]);
+ check (i > 37 && i <= 1761);
+ K<5>::f13<I<long> > (&b[1], &b[1935]);
+ check (i >= 1 && i <= 1936 && (i - 1) % 5 == 0);
+}
diff --git a/libgomp/testsuite/libgomp.c++/for-2.C b/libgomp/testsuite/libgomp.c++/for-2.C
new file mode 100644
index 00000000000..98ffa1ae6f0
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c++/for-2.C
@@ -0,0 +1,182 @@
+// { dg-do run }
+
+extern "C" void abort ();
+
+template <typename T>
+class J
+{
+public:
+ J(T x, T y) : b (x), e (y) {}
+ T begin ();
+ T end ();
+private:
+ T b, e;
+};
+
+template <typename T> T J<T>::begin () { return b; }
+template <typename T> T J<T>::end () { return e; }
+
+int results[2000];
+
+void
+baz (int i)
+{
+ if (i < 0 || i >= 2000)
+ abort ();
+ results[i]++;
+}
+
+void
+f1 (int x, int y)
+{
+#pragma omp parallel for
+ for (int i = x; i <= y; i += 6)
+ baz (i);
+}
+
+void
+f2 (int x, int y)
+{
+ int i;
+#pragma omp parallel for private(i)
+ for (i = x; i < y - 1; i = 1 - 6 + 7 + i)
+ baz (i);
+}
+
+template <typename T>
+void
+f3 (int x, int y)
+{
+#pragma omp parallel for
+ for (int i = x; i <= y; i = i + 9 - 8)
+ baz (i);
+}
+
+template <typename T>
+void
+f4 (int x, int y)
+{
+ int i;
+#pragma omp parallel for lastprivate(i)
+ for (i = x + 2000 - 64; i > y + 10; --i)
+ baz (i);
+}
+
+void
+f5 (int x, int y)
+{
+#pragma omp parallel for
+ for (int i = x + 2000 - 64; i > y + 10L; i -= 10L)
+ baz (i);
+}
+
+template <int N>
+void
+f6 (int x, int y)
+{
+#pragma omp parallel for
+ for (int i = x + 2000 - 64; i > y + 10L; i = i - 12 + 2L)
+ baz (i + N);
+}
+
+template <long N>
+void
+f7 (int i, int x, int y)
+{
+#pragma omp parallel for
+ for (i = x - 10; i <= y + 10; i += N)
+ baz (i);
+}
+
+template <long N>
+void
+f8 (J<int> j)
+{
+ int i;
+#pragma omp parallel for
+ for (i = j.begin (); i <= j.end () + N; i += 2)
+ baz (i);
+}
+
+template <typename T, long N>
+void
+f9 (T x, T y)
+{
+#pragma omp parallel for
+ for (T i = x; i <= y; i = i + N)
+ baz (i);
+}
+
+template <typename T, long N>
+void
+f10 (T x, T y)
+{
+ T i;
+#pragma omp parallel for
+ for (i = x; i > y; i = i + N)
+ baz (i);
+}
+
+template <typename T>
+void
+f11 (T x, long y)
+{
+#pragma omp parallel
+ {
+#pragma omp for nowait
+ for (T i = x; i <= y; i += 3L)
+ baz (i);
+#pragma omp single
+ baz (y + 3);
+ }
+}
+
+template <typename T>
+void
+f12 (T x, T y)
+{
+ T i;
+#pragma omp parallel for
+ for (i = x; i > y; --i)
+ baz (i);
+}
+
+#define check(expr) \
+ for (int i = 0; i < 2000; i++) \
+ if (expr) \
+ { \
+ if (results[i] != 1) \
+ abort (); \
+ results[i] = 0; \
+ } \
+ else if (results[i]) \
+ abort ()
+
+int
+main ()
+{
+ f1 (10, 1990);
+ check (i >= 10 && i <= 1990 && (i - 10) % 6 == 0);
+ f2 (0, 1999);
+ check (i < 1998 && (i & 1) == 0);
+ f3<char> (20, 1837);
+ check (i >= 20 && i <= 1837);
+ f4<int> (0, 30);
+ check (i > 40 && i <= 2000 - 64);
+ f5 (0, 100);
+ check (i >= 116 && i <= 2000 - 64 && (i - 116) % 10 == 0);
+ f6<-10> (10, 110);
+ check (i >= 116 && i <= 2000 - 64 && (i - 116) % 10 == 0);
+ f7<6> (0, 12, 1800);
+ check (i >= 2 && i <= 1808 && (i - 2) % 6 == 0);
+ f8<121> (J<int> (14, 1803));
+ check (i >= 14 && i <= 1924 && (i & 1) == 0);
+ f9<int, 7> (33, 1967);
+ check (i >= 33 && i <= 1967 && (i - 33) % 7 == 0);
+ f10<int, -7> (1939, 17);
+ check (i >= 21 && i <= 1939 && (i - 21) % 7 == 0);
+ f11<int> (16, 1981);
+ check (i >= 16 && i <= 1984 && (i - 16) % 3 == 0);
+ f12<int> (1761, 37);
+ check (i > 37 && i <= 1761);
+}
diff --git a/libgomp/testsuite/libgomp.c++/for-3.C b/libgomp/testsuite/libgomp.c++/for-3.C
new file mode 100644
index 00000000000..235f83875ea
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c++/for-3.C
@@ -0,0 +1,239 @@
+// { dg-do run }
+
+#include <vector>
+#include <cstdlib>
+
+template <typename T>
+class J
+{
+public:
+ typedef typename std::vector<T>::const_iterator const_iterator;
+ J(const const_iterator &x, const const_iterator &y) : b (x), e (y) {}
+ const const_iterator &begin ();
+ const const_iterator &end ();
+private:
+ const_iterator b, e;
+};
+
+template <typename T>
+const typename std::vector<T>::const_iterator &J<T>::begin () { return b; }
+template <typename T>
+const typename std::vector<T>::const_iterator &J<T>::end () { return e; }
+
+int results[2000];
+
+template <typename T>
+void
+baz (T &i)
+{
+ if (*i < 0 || *i >= 2000)
+ std::abort ();
+ results[*i]++;
+}
+
+void
+f1 (const std::vector<int>::const_iterator &x,
+ const std::vector<int>::const_iterator &y)
+{
+#pragma omp parallel for
+ for (std::vector<int>::const_iterator i = x; i <= y; i += 6)
+ baz (i);
+}
+
+void
+f2 (const std::vector<int>::const_iterator &x,
+ const std::vector<int>::const_iterator &y)
+{
+ std::vector<int>::const_iterator i;
+#pragma omp parallel for private(i)
+ for (i = x; i < y - 1; i = 1 - 6 + 7 + i)
+ baz (i);
+}
+
+template <typename T>
+void
+f3 (const std::vector<int>::const_iterator &x,
+ const std::vector<int>::const_iterator &y)
+{
+#pragma omp parallel for schedule (dynamic, 6)
+ for (std::vector<int>::const_iterator i = x; i <= y; i = i + 9 - 8)
+ baz (i);
+}
+
+template <typename T>
+void
+f4 (const std::vector<int>::const_iterator &x,
+ const std::vector<int>::const_iterator &y)
+{
+ std::vector<int>::const_iterator i;
+#pragma omp parallel for lastprivate(i)
+ for (i = x + 2000 - 64; i > y + 10; --i)
+ baz (i);
+}
+
+void
+f5 (const std::vector<int>::const_iterator &x,
+ const std::vector<int>::const_iterator &y)
+{
+#pragma omp parallel for schedule (static, 10)
+ for (std::vector<int>::const_iterator i = x + 2000 - 64; i > y + 10; i -= 10)
+ baz (i);
+}
+
+template <int N>
+void
+f6 (const std::vector<int>::const_iterator &x,
+ const std::vector<int>::const_iterator &y)
+{
+#pragma omp parallel for schedule (runtime)
+ for (std::vector<int>::const_iterator i = x + 2000 - 64;
+ i > y + 10; i = i - 12 + 2)
+ {
+ std::vector<int>::const_iterator j = i + N;
+ baz (j);
+ }
+}
+
+template <int N>
+void
+f7 (std::vector<int>::const_iterator i,
+ const std::vector<int>::const_iterator &x,
+ const std::vector<int>::const_iterator &y)
+{
+#pragma omp parallel for schedule (dynamic, 6)
+ for (i = x - 10; i <= y + 10; i += N)
+ baz (i);
+}
+
+template <int N>
+void
+f8 (J<int> j)
+{
+ std::vector<int>::const_iterator i;
+#pragma omp parallel for schedule (dynamic, 40)
+ for (i = j.begin (); i <= j.end () + N; i += 2)
+ baz (i);
+}
+
+template <typename T, int N>
+void
+f9 (const typename std::vector<T>::const_iterator &x,
+ const typename std::vector<T>::const_iterator &y)
+{
+#pragma omp parallel for schedule (static, 25)
+ for (typename std::vector<T>::const_iterator i = x; i <= y; i = i + N)
+ baz (i);
+}
+
+template <typename T, int N>
+void
+f10 (const typename std::vector<T>::const_iterator &x,
+ const typename std::vector<T>::const_iterator &y)
+{
+ typename std::vector<T>::const_iterator i;
+#pragma omp parallel for
+ for (i = x; i > y; i = i + N)
+ baz (i);
+}
+
+template <typename T>
+void
+f11 (const T &x, const T &y)
+{
+#pragma omp parallel
+ {
+#pragma omp for nowait schedule (static, 2)
+ for (T i = x; i <= y; i += 3)
+ baz (i);
+#pragma omp single
+ {
+ T j = y + 3;
+ baz (j);
+ }
+ }
+}
+
+template <typename T>
+void
+f12 (const T &x, const T &y)
+{
+ T i;
+#pragma omp parallel for schedule (dynamic, 130)
+ for (i = x; i > y; --i)
+ baz (i);
+}
+
+template <int N>
+struct K
+{
+ template <typename T>
+ static void
+ f13 (const T &x, const T &y)
+ {
+#pragma omp parallel for schedule (runtime)
+ for (T i = x; i <= y + N; i += N)
+ baz (i);
+ }
+};
+
+#define check(expr) \
+ for (int i = 0; i < 2000; i++) \
+ if (expr) \
+ { \
+ if (results[i] != 1) \
+ std::abort (); \
+ results[i] = 0; \
+ } \
+ else if (results[i]) \
+ std::abort ()
+
+int
+main ()
+{
+ std::vector<int> a(2000);
+ std::vector<long> b(2000);
+ for (int i = 0; i < 2000; i++)
+ {
+ a[i] = i;
+ b[i] = i;
+ }
+ f1 (a.begin () + 10, a.begin () + 1990);
+ check (i >= 10 && i <= 1990 && (i - 10) % 6 == 0);
+ f2 (a.begin () + 0, a.begin () + 1999);
+ check (i < 1998 && (i & 1) == 0);
+ f3<char> (a.begin () + 20, a.begin () + 1837);
+ check (i >= 20 && i <= 1837);
+ f4<int> (a.begin () + 0, a.begin () + 30);
+ check (i > 40 && i <= 2000 - 64);
+ f5 (a.begin () + 0, a.begin () + 100);
+ check (i >= 116 && i <= 2000 - 64 && (i - 116) % 10 == 0);
+ f6<-10> (a.begin () + 10, a.begin () + 110);
+ check (i >= 116 && i <= 2000 - 64 && (i - 116) % 10 == 0);
+ f7<6> (std::vector<int>::const_iterator (), a.begin () + 12,
+ a.begin () + 1800);
+ check (i >= 2 && i <= 1808 && (i - 2) % 6 == 0);
+ f8<121> (J<int> (a.begin () + 14, a.begin () + 1803));
+ check (i >= 14 && i <= 1924 && (i & 1) == 0);
+ f9<int, 7> (a.begin () + 33, a.begin () + 1967);
+ check (i >= 33 && i <= 1967 && (i - 33) % 7 == 0);
+ f10<int, -7> (a.begin () + 1939, a.begin () + 17);
+ check (i >= 21 && i <= 1939 && (i - 21) % 7 == 0);
+ f11<std::vector<int>::const_iterator > (a.begin () + 16, a.begin () + 1981);
+ check (i >= 16 && i <= 1984 && (i - 16) % 3 == 0);
+ f12<std::vector<int>::const_iterator > (a.begin () + 1761, a.begin () + 37);
+ check (i > 37 && i <= 1761);
+ K<5>::f13<std::vector<int>::const_iterator > (a.begin () + 1,
+ a.begin () + 1935);
+ check (i >= 1 && i <= 1936 && (i - 1) % 5 == 0);
+ f9<long, 7> (b.begin () + 33, b.begin () + 1967);
+ check (i >= 33 && i <= 1967 && (i - 33) % 7 == 0);
+ f10<long, -7> (b.begin () + 1939, b.begin () + 17);
+ check (i >= 21 && i <= 1939 && (i - 21) % 7 == 0);
+ f11<std::vector<long>::const_iterator > (b.begin () + 16, b.begin () + 1981);
+ check (i >= 16 && i <= 1984 && (i - 16) % 3 == 0);
+ f12<std::vector<long>::const_iterator > (b.begin () + 1761, b.begin () + 37);
+ check (i > 37 && i <= 1761);
+ K<5>::f13<std::vector<long>::const_iterator > (b.begin () + 1,
+ b.begin () + 1935);
+ check (i >= 1 && i <= 1936 && (i - 1) % 5 == 0);
+}
diff --git a/libgomp/testsuite/libgomp.c++/for-4.C b/libgomp/testsuite/libgomp.c++/for-4.C
new file mode 100644
index 00000000000..c528ef9d1fa
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c++/for-4.C
@@ -0,0 +1,225 @@
+// { dg-do run }
+
+#include <string>
+#include <cstdlib>
+
+template <typename T>
+class J
+{
+public:
+ typedef typename std::basic_string<T>::iterator iterator;
+ J(const iterator &x, const iterator &y) : b (x), e (y) {}
+ const iterator &begin ();
+ const iterator &end ();
+private:
+ iterator b, e;
+};
+
+template <typename T>
+const typename std::basic_string<T>::iterator &J<T>::begin () { return b; }
+template <typename T>
+const typename std::basic_string<T>::iterator &J<T>::end () { return e; }
+
+template <typename T>
+void
+baz (T &i)
+{
+ if (*i < L'a' || *i >= L'a' + 2000)
+ std::abort ();
+ (*i)++;
+}
+
+void
+f1 (const std::basic_string<wchar_t>::iterator &x,
+ const std::basic_string<wchar_t>::iterator &y)
+{
+#pragma omp parallel for
+ for (std::basic_string<wchar_t>::iterator i = x; i <= y; i += 6)
+ baz (i);
+}
+
+void
+f2 (const std::basic_string<wchar_t>::iterator &x,
+ const std::basic_string<wchar_t>::iterator &y)
+{
+ std::basic_string<wchar_t>::iterator i;
+#pragma omp parallel for private(i)
+ for (i = x; i < y - 1; i = 1 - 6 + 7 + i)
+ baz (i);
+}
+
+template <typename T>
+void
+f3 (const std::basic_string<wchar_t>::iterator &x,
+ const std::basic_string<wchar_t>::iterator &y)
+{
+#pragma omp parallel for schedule (dynamic, 6)
+ for (std::basic_string<wchar_t>::iterator i = x; i <= y; i = i + 9 - 8)
+ baz (i);
+}
+
+template <typename T>
+void
+f4 (const std::basic_string<wchar_t>::iterator &x,
+ const std::basic_string<wchar_t>::iterator &y)
+{
+ std::basic_string<wchar_t>::iterator i;
+#pragma omp parallel for lastprivate(i)
+ for (i = x + 2000 - 64; i > y + 10; --i)
+ baz (i);
+}
+
+void
+f5 (const std::basic_string<wchar_t>::iterator &x,
+ const std::basic_string<wchar_t>::iterator &y)
+{
+#pragma omp parallel for schedule (static, 10)
+ for (std::basic_string<wchar_t>::iterator i = x + 2000 - 64;
+ i > y + 10; i -= 10)
+ baz (i);
+}
+
+template <int N>
+void
+f6 (const std::basic_string<wchar_t>::iterator &x,
+ const std::basic_string<wchar_t>::iterator &y)
+{
+#pragma omp parallel for schedule (runtime)
+ for (std::basic_string<wchar_t>::iterator i = x + 2000 - 64;
+ i > y + 10; i = i - 12 + 2)
+ {
+ std::basic_string<wchar_t>::iterator j = i + N;
+ baz (j);
+ }
+}
+
+template <int N>
+void
+f7 (std::basic_string<wchar_t>::iterator i,
+ const std::basic_string<wchar_t>::iterator &x,
+ const std::basic_string<wchar_t>::iterator &y)
+{
+#pragma omp parallel for schedule (dynamic, 6)
+ for (i = x - 10; i <= y + 10; i += N)
+ baz (i);
+}
+
+template <wchar_t N>
+void
+f8 (J<wchar_t> j)
+{
+ std::basic_string<wchar_t>::iterator i;
+#pragma omp parallel for schedule (dynamic, 40)
+ for (i = j.begin (); i <= j.end () + N; i += 2)
+ baz (i);
+}
+
+template <typename T, int N>
+void
+f9 (const typename std::basic_string<T>::iterator &x,
+ const typename std::basic_string<T>::iterator &y)
+{
+#pragma omp parallel for schedule (static, 25)
+ for (typename std::basic_string<T>::iterator i = x; i <= y; i = i + N)
+ baz (i);
+}
+
+template <typename T, int N>
+void
+f10 (const typename std::basic_string<T>::iterator &x,
+ const typename std::basic_string<T>::iterator &y)
+{
+ typename std::basic_string<T>::iterator i;
+#pragma omp parallel for
+ for (i = x; i > y; i = i + N)
+ baz (i);
+}
+
+template <typename T>
+void
+f11 (const T &x, const T &y)
+{
+#pragma omp parallel
+ {
+#pragma omp for nowait schedule (static, 2)
+ for (T i = x; i <= y; i += 3)
+ baz (i);
+#pragma omp single
+ {
+ T j = y + 3;
+ baz (j);
+ }
+ }
+}
+
+template <typename T>
+void
+f12 (const T &x, const T &y)
+{
+ T i;
+#pragma omp parallel for schedule (dynamic, 130)
+ for (i = x; i > y; --i)
+ baz (i);
+}
+
+template <int N>
+struct K
+{
+ template <typename T>
+ static void
+ f13 (const T &x, const T &y)
+ {
+#pragma omp parallel for schedule (runtime)
+ for (T i = x; i <= y + N; i += N)
+ baz (i);
+ }
+};
+
+#define check(expr) \
+ for (int i = 0; i < 2000; i++) \
+ if (expr) \
+ { \
+ if (a[i] != L'a' + i + 1) \
+ std::abort (); \
+ a[i] = L'a' + i; \
+ } \
+ else if (a[i] != L'a' + i) \
+ std::abort ()
+
+int
+main ()
+{
+ std::basic_string<wchar_t> a = L"";
+ for (int i = 0; i < 2000; i++)
+ a += L'a' + i;
+ f1 (a.begin () + 10, a.begin () + 1990);
+ check (i >= 10 && i <= 1990 && (i - 10) % 6 == 0);
+ f2 (a.begin () + 0, a.begin () + 1999);
+ check (i < 1998 && (i & 1) == 0);
+ f3<char> (a.begin () + 20, a.begin () + 1837);
+ check (i >= 20 && i <= 1837);
+ f4<int> (a.begin () + 0, a.begin () + 30);
+ check (i > 40 && i <= 2000 - 64);
+ f5 (a.begin () + 0, a.begin () + 100);
+ check (i >= 116 && i <= 2000 - 64 && (i - 116) % 10 == 0);
+ f6<-10> (a.begin () + 10, a.begin () + 110);
+ check (i >= 116 && i <= 2000 - 64 && (i - 116) % 10 == 0);
+ f7<6> (std::basic_string<wchar_t>::iterator (), a.begin () + 12,
+ a.begin () + 1800);
+ check (i >= 2 && i <= 1808 && (i - 2) % 6 == 0);
+ f8<121> (J<wchar_t> (a.begin () + 14, a.begin () + 1803));
+ check (i >= 14 && i <= 1924 && (i & 1) == 0);
+ f9<wchar_t, 7> (a.begin () + 33, a.begin () + 1967);
+ check (i >= 33 && i <= 1967 && (i - 33) % 7 == 0);
+ f10<wchar_t, -7> (a.begin () + 1939, a.begin () + 17);
+ check (i >= 21 && i <= 1939 && (i - 21) % 7 == 0);
+ f11<std::basic_string<wchar_t>::iterator > (a.begin () + 16,
+ a.begin () + 1981);
+ check (i >= 16 && i <= 1984 && (i - 16) % 3 == 0);
+ f12<std::basic_string<wchar_t>::iterator > (a.begin () + 1761,
+ a.begin () + 37);
+ check (i > 37 && i <= 1761);
+ K<5>::f13<std::basic_string<wchar_t>::iterator > (a.begin () + 1,
+ a.begin () + 1935);
+ check (i >= 1 && i <= 1936 && (i - 1) % 5 == 0);
+}
diff --git a/libgomp/testsuite/libgomp.c++/for-5.C b/libgomp/testsuite/libgomp.c++/for-5.C
new file mode 100644
index 00000000000..9b75bf379ce
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c++/for-5.C
@@ -0,0 +1,303 @@
+// { dg-do run }
+
+typedef __PTRDIFF_TYPE__ ptrdiff_t;
+extern "C" void abort ();
+
+template <typename T>
+class I
+{
+public:
+ typedef ptrdiff_t difference_type;
+ I ();
+ ~I ();
+ I (T *);
+ I (const I &);
+ T &operator * ();
+ T *operator -> ();
+ T &operator [] (const difference_type &) const;
+ I &operator = (const I &);
+ I &operator ++ ();
+ I operator ++ (int);
+ I &operator -- ();
+ I operator -- (int);
+ I &operator += (const difference_type &);
+ I &operator -= (const difference_type &);
+ I operator + (const difference_type &) const;
+ I operator - (const difference_type &) const;
+ template <typename S> friend bool operator == (I<S> &, I<S> &);
+ template <typename S> friend bool operator == (const I<S> &, const I<S> &);
+ template <typename S> friend bool operator < (I<S> &, I<S> &);
+ template <typename S> friend bool operator < (const I<S> &, const I<S> &);
+ template <typename S> friend bool operator <= (I<S> &, I<S> &);
+ template <typename S> friend bool operator <= (const I<S> &, const I<S> &);
+ template <typename S> friend bool operator > (I<S> &, I<S> &);
+ template <typename S> friend bool operator > (const I<S> &, const I<S> &);
+ template <typename S> friend bool operator >= (I<S> &, I<S> &);
+ template <typename S> friend bool operator >= (const I<S> &, const I<S> &);
+ template <typename S> friend typename I<S>::difference_type operator - (I<S> &, I<S> &);
+ template <typename S> friend typename I<S>::difference_type operator - (const I<S> &, const I<S> &);
+ template <typename S> friend I<S> operator + (typename I<S>::difference_type , const I<S> &);
+private:
+ T *p;
+};
+template <typename T> I<T>::I () : p (0) {}
+template <typename T> I<T>::~I () { p = (T *) 0; }
+template <typename T> I<T>::I (T *x) : p (x) {}
+template <typename T> I<T>::I (const I &x) : p (x.p) {}
+template <typename T> T &I<T>::operator * () { return *p; }
+template <typename T> T *I<T>::operator -> () { return p; }
+template <typename T> T &I<T>::operator [] (const difference_type &x) const { return p[x]; }
+template <typename T> I<T> &I<T>::operator = (const I &x) { p = x.p; return *this; }
+template <typename T> I<T> &I<T>::operator ++ () { ++p; return *this; }
+template <typename T> I<T> I<T>::operator ++ (int) { return I (p++); }
+template <typename T> I<T> &I<T>::operator -- () { --p; return *this; }
+template <typename T> I<T> I<T>::operator -- (int) { return I (p--); }
+template <typename T> I<T> &I<T>::operator += (const difference_type &x) { p += x; return *this; }
+template <typename T> I<T> &I<T>::operator -= (const difference_type &x) { p -= x; return *this; }
+template <typename T> I<T> I<T>::operator + (const difference_type &x) const { return I (p + x); }
+template <typename T> I<T> I<T>::operator - (const difference_type &x) const { return I (p - x); }
+template <typename T> bool operator == (I<T> &x, I<T> &y) { return x.p == y.p; }
+template <typename T> bool operator == (const I<T> &x, const I<T> &y) { return x.p == y.p; }
+template <typename T> bool operator != (I<T> &x, I<T> &y) { return !(x == y); }
+template <typename T> bool operator != (const I<T> &x, const I<T> &y) { return !(x == y); }
+template <typename T> bool operator < (I<T> &x, I<T> &y) { return x.p < y.p; }
+template <typename T> bool operator < (const I<T> &x, const I<T> &y) { return x.p < y.p; }
+template <typename T> bool operator <= (I<T> &x, I<T> &y) { return x.p <= y.p; }
+template <typename T> bool operator <= (const I<T> &x, const I<T> &y) { return x.p <= y.p; }
+template <typename T> bool operator > (I<T> &x, I<T> &y) { return x.p > y.p; }
+template <typename T> bool operator > (const I<T> &x, const I<T> &y) { return x.p > y.p; }
+template <typename T> bool operator >= (I<T> &x, I<T> &y) { return x.p >= y.p; }
+template <typename T> bool operator >= (const I<T> &x, const I<T> &y) { return x.p >= y.p; }
+template <typename T> typename I<T>::difference_type operator - (I<T> &x, I<T> &y) { return x.p - y.p; }
+template <typename T> typename I<T>::difference_type operator - (const I<T> &x, const I<T> &y) { return x.p - y.p; }
+template <typename T> I<T> operator + (typename I<T>::difference_type x, const I<T> &y) { return I<T> (x + y.p); }
+
+template <typename T>
+class J
+{
+public:
+ J(const I<T> &x, const I<T> &y) : b (x), e (y) {}
+ const I<T> &begin ();
+ const I<T> &end ();
+private:
+ I<T> b, e;
+};
+
+template <typename T> const I<T> &J<T>::begin () { return b; }
+template <typename T> const I<T> &J<T>::end () { return e; }
+
+int results[2000];
+
+template <typename T>
+void
+baz (I<T> &i)
+{
+ if (*i < 0 || *i >= 2000)
+ abort ();
+ results[*i]++;
+}
+
+I<int>
+f1 (const I<int> &x, const I<int> &y)
+{
+ I<int> i;
+#pragma omp parallel shared (i)
+ {
+ #pragma omp for lastprivate (i) schedule(runtime)
+ for (i = x; i < y - 1; ++i)
+ baz (i);
+ #pragma omp single
+ i += 3;
+ }
+ return I<int> (i);
+}
+
+I<int>
+f2 (const I<int> &x, const I<int> &y)
+{
+ I<int> i;
+#pragma omp parallel for lastprivate (i)
+ for (i = x; i < y - 1; i = 1 - 6 + 7 + i)
+ baz (i);
+ return I<int> (i);
+}
+
+template <typename T>
+I<int>
+f3 (const I<int> &x, const I<int> &y)
+{
+ I<int> i;
+#pragma omp parallel
+ #pragma omp for lastprivate (i)
+ for (i = x + 1000 - 64; i <= y - 10; i++)
+ baz (i);
+ return i;
+}
+
+template <typename T>
+I<int>
+f4 (const I<int> &x, const I<int> &y)
+{
+ I<int> i;
+#pragma omp parallel for lastprivate (i)
+ for (i = x + 2000 - 64; i > y + 10; --i)
+ baz (i);
+ return I<int> (i);
+}
+
+template <typename T>
+I<int>
+f5 (const I<int> &x, const I<int> &y)
+{
+ I<int> i;
+#pragma omp parallel for lastprivate (i)
+ for (i = x; i > y + T (6); i--)
+ baz (i);
+ return i;
+}
+
+template <typename T>
+I<int>
+f6 (const I<int> &x, const I<int> &y)
+{
+ I<int> i;
+#pragma omp parallel for lastprivate (i)
+ for (i = x - T (7); i > y; i -= T (2))
+ baz (i);
+ return I<int> (i);
+}
+
+template <int N>
+I<int>
+f7 (I<int> i, const I<int> &x, const I<int> &y)
+{
+#pragma omp parallel for lastprivate (i)
+ for (i = x - 10; i <= y + 10; i += N)
+ baz (i);
+ return I<int> (i);
+}
+
+template <int N>
+I<int>
+f8 (J<int> j)
+{
+ I<int> i;
+#pragma omp parallel shared (i)
+ #pragma omp for lastprivate (i)
+ for (i = j.begin (); i <= j.end () + N; i += 2)
+ baz (i);
+ return i;
+}
+
+I<int> i9;
+
+template <long N>
+I<int> &
+f9 (J<int> j)
+{
+#pragma omp parallel for lastprivate (i9)
+ for (i9 = j.begin () + N; i9 <= j.end () - N; i9 = i9 - N)
+ baz (i9);
+ return i9;
+}
+
+template <typename T, int N>
+I<T>
+f10 (const I<T> &x, const I<T> &y)
+{
+ I<T> i;
+#pragma omp parallel for lastprivate (i)
+ for (i = x; i > y; i = i + N)
+ baz (i);
+ return i;
+}
+
+template <typename T, typename U>
+T
+f11 (T i, const T &x, const T &y)
+{
+#pragma omp parallel
+ #pragma omp for lastprivate (i)
+ for (i = x + U (2); i <= y + U (1); i = U (2) + U (3) + i)
+ baz (i);
+ return T (i);
+}
+
+template <typename T>
+T
+f12 (const T &x, const T &y)
+{
+ T i;
+#pragma omp parallel for lastprivate (i)
+ for (i = x; i > y; --i)
+ baz (i);
+ return i;
+}
+
+#define check(expr) \
+ for (int i = 0; i < 2000; i++) \
+ if (expr) \
+ { \
+ if (results[i] != 1) \
+ abort (); \
+ results[i] = 0; \
+ } \
+ else if (results[i]) \
+ abort ()
+
+int
+main ()
+{
+ int a[2000];
+ long b[2000];
+ for (int i = 0; i < 2000; i++)
+ {
+ a[i] = i;
+ b[i] = i;
+ }
+ if (*f1 (&a[10], &a[1873]) != 1875)
+ abort ();
+ check (i >= 10 && i < 1872);
+ if (*f2 (&a[0], &a[1998]) != 1998)
+ abort ();
+ check (i < 1997 && (i & 1) == 0);
+ if (*f3<int> (&a[10], &a[1971]) != 1962)
+ abort ();
+ check (i >= 946 && i <= 1961);
+ if (*f4<int> (&a[0], &a[30]) != 40)
+ abort ();
+ check (i > 40 && i <= 2000 - 64);
+ if (*f5<short> (&a[1931], &a[17]) != 23)
+ abort ();
+ check (i > 23 && i <= 1931);
+ if (*f6<long> (&a[1931], &a[17]) != 16)
+ abort ();
+ check (i > 17 && i <= 1924 && (i & 1) == 0);
+ if (*f7<6> (I<int> (), &a[12], &a[1800]) != 1814)
+ abort ();
+ check (i >= 2 && i <= 1808 && (i - 2) % 6 == 0);
+ if (*f8<121> (J<int> (&a[14], &a[1803])) != 1926)
+ abort ();
+ check (i >= 14 && i <= 1924 && (i & 1) == 0);
+ if (*f9<-3L> (J<int> (&a[27], &a[1761])) != 1767)
+ abort ();
+ check (i >= 24 && i <= 1764 && (i % 3) == 0);
+ if (*f10<int, -7> (&a[1939], &a[17]) != 14)
+ abort ();
+ check (i >= 21 && i <= 1939 && i % 7 == 0);
+ if (*f11<I<int>, short> (I<int> (), &a[71], &a[1941]) != 1943)
+ abort ();
+ check (i >= 73 && i <= 1938 && (i - 73) % 5 == 0);
+ if (*f12<I<int> > (&a[1761], &a[37]) != 37)
+ abort ();
+ check (i > 37 && i <= 1761);
+ if (*f10<long, -7> (&b[1939], &b[17]) != 14)
+ abort ();
+ check (i >= 21 && i <= 1939 && i % 7 == 0);
+ if (*f11<I<long>, short> (I<long> (), &b[71], &b[1941]) != 1943)
+ abort ();
+ check (i >= 73 && i <= 1938 && (i - 73) % 5 == 0);
+ if (*f12<I<long> > (&b[1761], &b[37]) != 37)
+ abort ();
+ check (i > 37 && i <= 1761);
+}
diff --git a/libgomp/testsuite/libgomp.c++/loop-10.C b/libgomp/testsuite/libgomp.c++/loop-10.C
new file mode 100644
index 00000000000..9c0de25d56f
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c++/loop-10.C
@@ -0,0 +1,105 @@
+// { dg-do run }
+
+#include <omp.h>
+
+extern "C" void abort (void);
+
+#define LLONG_MAX __LONG_LONG_MAX__
+#define ULLONG_MAX (LLONG_MAX * 2ULL + 1)
+#define INT_MAX __INT_MAX__
+
+int v;
+
+int
+test1 (void)
+{
+ int e = 0, cnt = 0;
+ long long i;
+ unsigned long long j;
+ char buf[6], *p;
+
+ #pragma omp for schedule(dynamic,1) collapse(2) nowait
+ for (i = LLONG_MAX - 30001; i <= LLONG_MAX - 10001; i += 10000)
+ for (j = 20; j <= LLONG_MAX - 70; j += LLONG_MAX + 50ULL)
+ if ((i != LLONG_MAX - 30001
+ && i != LLONG_MAX - 20001
+ && i != LLONG_MAX - 10001)
+ || j != 20)
+ e = 1;
+ else
+ cnt++;
+ if (e || cnt != 3)
+ abort ();
+ else
+ cnt = 0;
+
+ #pragma omp for schedule(guided,1) collapse(2) nowait
+ for (i = -LLONG_MAX + 30000; i >= -LLONG_MAX + 10000; i -= 10000)
+ for (j = ULLONG_MAX - 3; j >= LLONG_MAX + 70ULL; j -= LLONG_MAX + 50ULL)
+ if ((i != -LLONG_MAX + 30000
+ && i != -LLONG_MAX + 20000
+ && i != -LLONG_MAX + 10000)
+ || j != ULLONG_MAX - 3)
+ e = 1;
+ else
+ cnt++;
+ if (e || cnt != 3)
+ abort ();
+ else
+ cnt = 0;
+
+ #pragma omp for schedule(static,1) collapse(2) nowait
+ for (i = LLONG_MAX - 30001; i <= LLONG_MAX - 10001; i += 10000)
+ for (j = 20; j <= LLONG_MAX - 70 + v; j += LLONG_MAX + 50ULL)
+ if ((i != LLONG_MAX - 30001
+ && i != LLONG_MAX - 20001
+ && i != LLONG_MAX - 10001)
+ || j != 20)
+ e = 1;
+ else
+ cnt++;
+ if (e || cnt != 3)
+ abort ();
+ else
+ cnt = 0;
+
+ #pragma omp for schedule(static) collapse(2) nowait
+ for (i = -LLONG_MAX + 30000 + v; i >= -LLONG_MAX + 10000; i -= 10000)
+ for (j = ULLONG_MAX - 3; j >= LLONG_MAX + 70ULL; j -= LLONG_MAX + 50ULL)
+ if ((i != -LLONG_MAX + 30000
+ && i != -LLONG_MAX + 20000
+ && i != -LLONG_MAX + 10000)
+ || j != ULLONG_MAX - 3)
+ e = 1;
+ else
+ cnt++;
+ if (e || cnt != 3)
+ abort ();
+ else
+ cnt = 0;
+
+ #pragma omp for schedule(runtime) collapse(2) nowait
+ for (i = 10; i < 30; i++)
+ for (p = buf; p <= buf + 4; p += 2)
+ if (i < 10 || i >= 30 || (p != buf && p != buf + 2 && p != buf + 4))
+ e = 1;
+ else
+ cnt++;
+ if (e || cnt != 60)
+ abort ();
+ else
+ cnt = 0;
+
+ return 0;
+}
+
+int
+main (void)
+{
+ if (2 * sizeof (int) != sizeof (long long))
+ return 0;
+ asm volatile ("" : "+r" (v));
+ omp_set_schedule (omp_sched_dynamic, 1);
+ test1 ();
+ return 0;
+}
diff --git a/libgomp/testsuite/libgomp.c++/loop-8.C b/libgomp/testsuite/libgomp.c++/loop-8.C
new file mode 100644
index 00000000000..bc20c68a167
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c++/loop-8.C
@@ -0,0 +1,276 @@
+#include <omp.h>
+#include <stdlib.h>
+#include <string.h>
+
+int
+test1 ()
+{
+ short int buf[64], *p;
+ int i;
+ memset (buf, '\0', sizeof (buf));
+#pragma omp parallel for
+ for (p = &buf[10]; p < &buf[54]; p++)
+ *p = 5;
+ for (i = 0; i < 64; i++)
+ if (buf[i] != 5 * (i >= 10 && i < 54))
+ abort ();
+ memset (buf, '\0', sizeof (buf));
+#pragma omp parallel for
+ for (p = &buf[3]; p <= &buf[63]; p += 2)
+ p[-2] = 6;
+ for (i = 0; i < 64; i++)
+ if (buf[i] != 6 * ((i & 1) && i <= 61))
+ abort ();
+ memset (buf, '\0', sizeof (buf));
+#pragma omp parallel for
+ for (p = &buf[16]; p < &buf[51]; p = 4 + p)
+ p[2] = 7;
+ for (i = 0; i < 64; i++)
+ if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))
+ abort ();
+ memset (buf, '\0', sizeof (buf));
+#pragma omp parallel for
+ for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)
+ p[2] = -7;
+ for (i = 0; i < 64; i++)
+ if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))
+ abort ();
+ memset (buf, '\0', sizeof (buf));
+#pragma omp parallel for
+ for (p = &buf[53]; p > &buf[9]; --p)
+ *p = 5;
+ for (i = 0; i < 64; i++)
+ if (buf[i] != 5 * (i >= 10 && i < 54))
+ abort ();
+ memset (buf, '\0', sizeof (buf));
+#pragma omp parallel for
+ for (p = &buf[63]; p >= &buf[3]; p -= 2)
+ p[-2] = 6;
+ for (i = 0; i < 64; i++)
+ if (buf[i] != 6 * ((i & 1) && i <= 61))
+ abort ();
+ memset (buf, '\0', sizeof (buf));
+#pragma omp parallel for
+ for (p = &buf[48]; p > &buf[15]; p = -4 + p)
+ p[2] = 7;
+ for (i = 0; i < 64; i++)
+ if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))
+ abort ();
+ memset (buf, '\0', sizeof (buf));
+#pragma omp parallel for
+ for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)
+ p[2] = -7;
+ for (i = 0; i < 64; i++)
+ if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))
+ abort ();
+ return 0;
+}
+
+int
+test2 ()
+{
+ int buf[64], *p;
+ int i;
+ memset (buf, '\0', sizeof (buf));
+#pragma omp parallel for schedule (static, 3)
+ for (p = &buf[10]; p < &buf[54]; p++)
+ *p = 5;
+ for (i = 0; i < 64; i++)
+ if (buf[i] != 5 * (i >= 10 && i < 54))
+ abort ();
+ memset (buf, '\0', sizeof (buf));
+#pragma omp parallel for schedule (static, 3)
+ for (p = &buf[3]; p <= &buf[63]; p += 2)
+ p[-2] = 6;
+ for (i = 0; i < 64; i++)
+ if (buf[i] != 6 * ((i & 1) && i <= 61))
+ abort ();
+ memset (buf, '\0', sizeof (buf));
+#pragma omp parallel for schedule (static, 3)
+ for (p = &buf[16]; p < &buf[51]; p = 4 + p)
+ p[2] = 7;
+ for (i = 0; i < 64; i++)
+ if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))
+ abort ();
+ memset (buf, '\0', sizeof (buf));
+#pragma omp parallel for schedule (static, 3)
+ for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)
+ p[2] = -7;
+ for (i = 0; i < 64; i++)
+ if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))
+ abort ();
+ memset (buf, '\0', sizeof (buf));
+#pragma omp parallel for schedule (static, 3)
+ for (p = &buf[53]; p > &buf[9]; --p)
+ *p = 5;
+ for (i = 0; i < 64; i++)
+ if (buf[i] != 5 * (i >= 10 && i < 54))
+ abort ();
+ memset (buf, '\0', sizeof (buf));
+#pragma omp parallel for schedule (static, 3)
+ for (p = &buf[63]; p >= &buf[3]; p -= 2)
+ p[-2] = 6;
+ for (i = 0; i < 64; i++)
+ if (buf[i] != 6 * ((i & 1) && i <= 61))
+ abort ();
+ memset (buf, '\0', sizeof (buf));
+#pragma omp parallel for schedule (static, 3)
+ for (p = &buf[48]; p > &buf[15]; p = -4 + p)
+ p[2] = 7;
+ for (i = 0; i < 64; i++)
+ if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))
+ abort ();
+ memset (buf, '\0', sizeof (buf));
+#pragma omp parallel for schedule (static, 3)
+ for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)
+ p[2] = -7;
+ for (i = 0; i < 64; i++)
+ if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))
+ abort ();
+ return 0;
+}
+
+int
+test3 ()
+{
+ int buf[64], *p;
+ int i;
+ memset (buf, '\0', sizeof (buf));
+#pragma omp parallel for schedule (dynamic, 3)
+ for (p = &buf[10]; p < &buf[54]; p++)
+ *p = 5;
+ for (i = 0; i < 64; i++)
+ if (buf[i] != 5 * (i >= 10 && i < 54))
+ abort ();
+ memset (buf, '\0', sizeof (buf));
+#pragma omp parallel for schedule (dynamic, 3)
+ for (p = &buf[3]; p <= &buf[63]; p += 2)
+ p[-2] = 6;
+ for (i = 0; i < 64; i++)
+ if (buf[i] != 6 * ((i & 1) && i <= 61))
+ abort ();
+ memset (buf, '\0', sizeof (buf));
+#pragma omp parallel for schedule (dynamic, 3)
+ for (p = &buf[16]; p < &buf[51]; p = 4 + p)
+ p[2] = 7;
+ for (i = 0; i < 64; i++)
+ if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))
+ abort ();
+ memset (buf, '\0', sizeof (buf));
+#pragma omp parallel for schedule (dynamic, 3)
+ for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)
+ p[2] = -7;
+ for (i = 0; i < 64; i++)
+ if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))
+ abort ();
+ memset (buf, '\0', sizeof (buf));
+#pragma omp parallel for schedule (dynamic, 3)
+ for (p = &buf[53]; p > &buf[9]; --p)
+ *p = 5;
+ for (i = 0; i < 64; i++)
+ if (buf[i] != 5 * (i >= 10 && i < 54))
+ abort ();
+ memset (buf, '\0', sizeof (buf));
+#pragma omp parallel for schedule (dynamic, 3)
+ for (p = &buf[63]; p >= &buf[3]; p -= 2)
+ p[-2] = 6;
+ for (i = 0; i < 64; i++)
+ if (buf[i] != 6 * ((i & 1) && i <= 61))
+ abort ();
+ memset (buf, '\0', sizeof (buf));
+#pragma omp parallel for schedule (dynamic, 3)
+ for (p = &buf[48]; p > &buf[15]; p = -4 + p)
+ p[2] = 7;
+ for (i = 0; i < 64; i++)
+ if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))
+ abort ();
+ memset (buf, '\0', sizeof (buf));
+#pragma omp parallel for schedule (dynamic, 3)
+ for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)
+ p[2] = -7;
+ for (i = 0; i < 64; i++)
+ if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))
+ abort ();
+ return 0;
+}
+
+int
+test4 ()
+{
+ int buf[64], *p;
+ int i;
+ memset (buf, '\0', sizeof (buf));
+#pragma omp parallel for schedule (runtime)
+ for (p = &buf[10]; p < &buf[54]; p++)
+ *p = 5;
+ for (i = 0; i < 64; i++)
+ if (buf[i] != 5 * (i >= 10 && i < 54))
+ abort ();
+ memset (buf, '\0', sizeof (buf));
+#pragma omp parallel for schedule (runtime)
+ for (p = &buf[3]; p <= &buf[63]; p += 2)
+ p[-2] = 6;
+ for (i = 0; i < 64; i++)
+ if (buf[i] != 6 * ((i & 1) && i <= 61))
+ abort ();
+ memset (buf, '\0', sizeof (buf));
+#pragma omp parallel for schedule (runtime)
+ for (p = &buf[16]; p < &buf[51]; p = 4 + p)
+ p[2] = 7;
+ for (i = 0; i < 64; i++)
+ if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))
+ abort ();
+ memset (buf, '\0', sizeof (buf));
+#pragma omp parallel for schedule (runtime)
+ for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)
+ p[2] = -7;
+ for (i = 0; i < 64; i++)
+ if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))
+ abort ();
+ memset (buf, '\0', sizeof (buf));
+#pragma omp parallel for schedule (runtime)
+ for (p = &buf[53]; p > &buf[9]; --p)
+ *p = 5;
+ for (i = 0; i < 64; i++)
+ if (buf[i] != 5 * (i >= 10 && i < 54))
+ abort ();
+ memset (buf, '\0', sizeof (buf));
+#pragma omp parallel for schedule (runtime)
+ for (p = &buf[63]; p >= &buf[3]; p -= 2)
+ p[-2] = 6;
+ for (i = 0; i < 64; i++)
+ if (buf[i] != 6 * ((i & 1) && i <= 61))
+ abort ();
+ memset (buf, '\0', sizeof (buf));
+#pragma omp parallel for schedule (runtime)
+ for (p = &buf[48]; p > &buf[15]; p = -4 + p)
+ p[2] = 7;
+ for (i = 0; i < 64; i++)
+ if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))
+ abort ();
+ memset (buf, '\0', sizeof (buf));
+#pragma omp parallel for schedule (runtime)
+ for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)
+ p[2] = -7;
+ for (i = 0; i < 64; i++)
+ if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))
+ abort ();
+ return 0;
+}
+
+int
+main ()
+{
+ test1 ();
+ test2 ();
+ test3 ();
+ omp_set_schedule (omp_sched_static, 0);
+ test4 ();
+ omp_set_schedule (omp_sched_static, 3);
+ test4 ();
+ omp_set_schedule (omp_sched_dynamic, 5);
+ test4 ();
+ omp_set_schedule (omp_sched_guided, 2);
+ test4 ();
+ return 0;
+}
diff --git a/libgomp/testsuite/libgomp.c++/loop-9.C b/libgomp/testsuite/libgomp.c++/loop-9.C
new file mode 100644
index 00000000000..35daf2276e8
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c++/loop-9.C
@@ -0,0 +1,387 @@
+// { dg-do run }
+
+#include <omp.h>
+
+extern "C" void abort ();
+
+#define LLONG_MAX __LONG_LONG_MAX__
+#define ULLONG_MAX (LLONG_MAX * 2ULL + 1)
+#define INT_MAX __INT_MAX__
+
+int arr[6 * 5];
+
+void
+set (int loopidx, int idx)
+{
+#pragma omp atomic
+ arr[loopidx * 5 + idx]++;
+}
+
+#define check(var, val, loopidx, idx) \
+ if (var == (val)) set (loopidx, idx); else
+#define test(loopidx, count) \
+ for (idx = 0; idx < 5; idx++) \
+ if (arr[loopidx * 5 + idx] != idx < count) \
+ abort (); \
+ else \
+ arr[loopidx * 5 + idx] = 0
+
+int
+test1 ()
+{
+ int e = 0, idx;
+
+#pragma omp parallel reduction(+:e)
+ {
+ long long i;
+ unsigned long long j;
+ #pragma omp for schedule(dynamic,1) nowait
+ for (i = LLONG_MAX - 30001; i <= LLONG_MAX - 10001; i += 10000)
+ {
+ check (i, LLONG_MAX - 30001, 0, 0)
+ check (i, LLONG_MAX - 20001, 0, 1)
+ check (i, LLONG_MAX - 10001, 0, 2)
+ e = 1;
+ }
+ #pragma omp for schedule(dynamic,1) nowait
+ for (i = -LLONG_MAX + 30000; i >= -LLONG_MAX + 10000; i -= 10000)
+ {
+ check (i, -LLONG_MAX + 30000, 1, 0)
+ check (i, -LLONG_MAX + 20000, 1, 1)
+ check (i, -LLONG_MAX + 10000, 1, 2)
+ e = 1;
+ }
+ #pragma omp for schedule(dynamic,1) nowait
+ for (j = 20; j <= LLONG_MAX - 70; j += LLONG_MAX + 50ULL)
+ {
+ check (j, 20, 2, 0)
+ e = 1;
+ }
+ #pragma omp for schedule(dynamic,1) nowait
+ for (j = ULLONG_MAX - 3; j >= LLONG_MAX + 70ULL; j -= LLONG_MAX + 50ULL)
+ {
+ check (j, ULLONG_MAX - 3, 3, 0)
+ e = 1;
+ }
+ #pragma omp for schedule(dynamic,1) nowait
+ for (j = LLONG_MAX - 20000ULL; j <= LLONG_MAX + 10000ULL; j += 10000ULL)
+ {
+ check (j, LLONG_MAX - 20000ULL, 4, 0)
+ check (j, LLONG_MAX - 10000ULL, 4, 1)
+ check (j, LLONG_MAX, 4, 2)
+ check (j, LLONG_MAX + 10000ULL, 4, 3)
+ e = 1;
+ }
+ #pragma omp for schedule(dynamic,1) nowait
+ for (i = -3LL * INT_MAX - 20000LL; i <= INT_MAX + 10000LL; i += INT_MAX + 200LL)
+ {
+ check (i, -3LL * INT_MAX - 20000LL, 5, 0)
+ check (i, -2LL * INT_MAX - 20000LL + 200LL, 5, 1)
+ check (i, -INT_MAX - 20000LL + 400LL, 5, 2)
+ check (i, -20000LL + 600LL, 5, 3)
+ check (i, INT_MAX - 20000LL + 800LL, 5, 4)
+ e = 1;
+ }
+ }
+ if (e)
+ abort ();
+ test (0, 3);
+ test (1, 3);
+ test (2, 1);
+ test (3, 1);
+ test (4, 4);
+ test (5, 5);
+ return 0;
+}
+
+int
+test2 ()
+{
+ int e = 0, idx;
+
+#pragma omp parallel reduction(+:e)
+ {
+ long long i;
+ unsigned long long j;
+ #pragma omp for schedule(guided,1) nowait
+ for (i = LLONG_MAX - 30001; i <= LLONG_MAX - 10001; i += 10000)
+ {
+ check (i, LLONG_MAX - 30001, 0, 0)
+ check (i, LLONG_MAX - 20001, 0, 1)
+ check (i, LLONG_MAX - 10001, 0, 2)
+ e = 1;
+ }
+ #pragma omp for schedule(guided,1) nowait
+ for (i = -LLONG_MAX + 30000; i >= -LLONG_MAX + 10000; i -= 10000)
+ {
+ check (i, -LLONG_MAX + 30000, 1, 0)
+ check (i, -LLONG_MAX + 20000, 1, 1)
+ check (i, -LLONG_MAX + 10000, 1, 2)
+ e = 1;
+ }
+ #pragma omp for schedule(guided,1) nowait
+ for (j = 20; j <= LLONG_MAX - 70; j += LLONG_MAX + 50ULL)
+ {
+ check (j, 20, 2, 0)
+ e = 1;
+ }
+ #pragma omp for schedule(guided,1) nowait
+ for (j = ULLONG_MAX - 3; j >= LLONG_MAX + 70ULL; j -= LLONG_MAX + 50ULL)
+ {
+ check (j, ULLONG_MAX - 3, 3, 0)
+ e = 1;
+ }
+ #pragma omp for schedule(guided,1) nowait
+ for (j = LLONG_MAX - 20000ULL; j <= LLONG_MAX + 10000ULL; j += 10000ULL)
+ {
+ check (j, LLONG_MAX - 20000ULL, 4, 0)
+ check (j, LLONG_MAX - 10000ULL, 4, 1)
+ check (j, LLONG_MAX, 4, 2)
+ check (j, LLONG_MAX + 10000ULL, 4, 3)
+ e = 1;
+ }
+ #pragma omp for schedule(guided,1) nowait
+ for (i = -3LL * INT_MAX - 20000LL; i <= INT_MAX + 10000LL; i += INT_MAX + 200LL)
+ {
+ check (i, -3LL * INT_MAX - 20000LL, 5, 0)
+ check (i, -2LL * INT_MAX - 20000LL + 200LL, 5, 1)
+ check (i, -INT_MAX - 20000LL + 400LL, 5, 2)
+ check (i, -20000LL + 600LL, 5, 3)
+ check (i, INT_MAX - 20000LL + 800LL, 5, 4)
+ e = 1;
+ }
+ }
+ if (e)
+ abort ();
+ test (0, 3);
+ test (1, 3);
+ test (2, 1);
+ test (3, 1);
+ test (4, 4);
+ test (5, 5);
+ return 0;
+}
+
+int
+test3 ()
+{
+ int e = 0, idx;
+
+#pragma omp parallel reduction(+:e)
+ {
+ long long i;
+ unsigned long long j;
+ #pragma omp for schedule(static) nowait
+ for (i = LLONG_MAX - 30001; i <= LLONG_MAX - 10001; i += 10000)
+ {
+ check (i, LLONG_MAX - 30001, 0, 0)
+ check (i, LLONG_MAX - 20001, 0, 1)
+ check (i, LLONG_MAX - 10001, 0, 2)
+ e = 1;
+ }
+ #pragma omp for schedule(static) nowait
+ for (i = -LLONG_MAX + 30000; i >= -LLONG_MAX + 10000; i -= 10000)
+ {
+ check (i, -LLONG_MAX + 30000, 1, 0)
+ check (i, -LLONG_MAX + 20000, 1, 1)
+ check (i, -LLONG_MAX + 10000, 1, 2)
+ e = 1;
+ }
+ #pragma omp for schedule(static) nowait
+ for (j = 20; j <= LLONG_MAX - 70; j += LLONG_MAX + 50ULL)
+ {
+ check (j, 20, 2, 0)
+ e = 1;
+ }
+ #pragma omp for schedule(static) nowait
+ for (j = ULLONG_MAX - 3; j >= LLONG_MAX + 70ULL; j -= LLONG_MAX + 50ULL)
+ {
+ check (j, ULLONG_MAX - 3, 3, 0)
+ e = 1;
+ }
+ #pragma omp for schedule(static) nowait
+ for (j = LLONG_MAX - 20000ULL; j <= LLONG_MAX + 10000ULL; j += 10000ULL)
+ {
+ check (j, LLONG_MAX - 20000ULL, 4, 0)
+ check (j, LLONG_MAX - 10000ULL, 4, 1)
+ check (j, LLONG_MAX, 4, 2)
+ check (j, LLONG_MAX + 10000ULL, 4, 3)
+ e = 1;
+ }
+ #pragma omp for schedule(static) nowait
+ for (i = -3LL * INT_MAX - 20000LL; i <= INT_MAX + 10000LL; i += INT_MAX + 200LL)
+ {
+ check (i, -3LL * INT_MAX - 20000LL, 5, 0)
+ check (i, -2LL * INT_MAX - 20000LL + 200LL, 5, 1)
+ check (i, -INT_MAX - 20000LL + 400LL, 5, 2)
+ check (i, -20000LL + 600LL, 5, 3)
+ check (i, INT_MAX - 20000LL + 800LL, 5, 4)
+ e = 1;
+ }
+ }
+ if (e)
+ abort ();
+ test (0, 3);
+ test (1, 3);
+ test (2, 1);
+ test (3, 1);
+ test (4, 4);
+ test (5, 5);
+ return 0;
+}
+
+int
+test4 ()
+{
+ int e = 0, idx;
+
+#pragma omp parallel reduction(+:e)
+ {
+ long long i;
+ unsigned long long j;
+ #pragma omp for schedule(static,1) nowait
+ for (i = LLONG_MAX - 30001; i <= LLONG_MAX - 10001; i += 10000)
+ {
+ check (i, LLONG_MAX - 30001, 0, 0)
+ check (i, LLONG_MAX - 20001, 0, 1)
+ check (i, LLONG_MAX - 10001, 0, 2)
+ e = 1;
+ }
+ #pragma omp for schedule(static,1) nowait
+ for (i = -LLONG_MAX + 30000; i >= -LLONG_MAX + 10000; i -= 10000)
+ {
+ check (i, -LLONG_MAX + 30000, 1, 0)
+ check (i, -LLONG_MAX + 20000, 1, 1)
+ check (i, -LLONG_MAX + 10000, 1, 2)
+ e = 1;
+ }
+ #pragma omp for schedule(static,1) nowait
+ for (j = 20; j <= LLONG_MAX - 70; j += LLONG_MAX + 50ULL)
+ {
+ check (j, 20, 2, 0)
+ e = 1;
+ }
+ #pragma omp for schedule(static,1) nowait
+ for (j = ULLONG_MAX - 3; j >= LLONG_MAX + 70ULL; j -= LLONG_MAX + 50ULL)
+ {
+ check (j, ULLONG_MAX - 3, 3, 0)
+ e = 1;
+ }
+ #pragma omp for schedule(static,1) nowait
+ for (j = LLONG_MAX - 20000ULL; j <= LLONG_MAX + 10000ULL; j += 10000ULL)
+ {
+ check (j, LLONG_MAX - 20000ULL, 4, 0)
+ check (j, LLONG_MAX - 10000ULL, 4, 1)
+ check (j, LLONG_MAX, 4, 2)
+ check (j, LLONG_MAX + 10000ULL, 4, 3)
+ e = 1;
+ }
+ #pragma omp for schedule(static,1) nowait
+ for (i = -3LL * INT_MAX - 20000LL; i <= INT_MAX + 10000LL; i += INT_MAX + 200LL)
+ {
+ check (i, -3LL * INT_MAX - 20000LL, 5, 0)
+ check (i, -2LL * INT_MAX - 20000LL + 200LL, 5, 1)
+ check (i, -INT_MAX - 20000LL + 400LL, 5, 2)
+ check (i, -20000LL + 600LL, 5, 3)
+ check (i, INT_MAX - 20000LL + 800LL, 5, 4)
+ e = 1;
+ }
+ }
+ if (e)
+ abort ();
+ test (0, 3);
+ test (1, 3);
+ test (2, 1);
+ test (3, 1);
+ test (4, 4);
+ test (5, 5);
+ return 0;
+}
+
+int
+test5 ()
+{
+ int e = 0, idx;
+
+#pragma omp parallel reduction(+:e)
+ {
+ long long i;
+ unsigned long long j;
+ #pragma omp for schedule(runtime) nowait
+ for (i = LLONG_MAX - 30001; i <= LLONG_MAX - 10001; i += 10000)
+ {
+ check (i, LLONG_MAX - 30001, 0, 0)
+ check (i, LLONG_MAX - 20001, 0, 1)
+ check (i, LLONG_MAX - 10001, 0, 2)
+ e = 1;
+ }
+ #pragma omp for schedule(runtime) nowait
+ for (i = -LLONG_MAX + 30000; i >= -LLONG_MAX + 10000; i -= 10000)
+ {
+ check (i, -LLONG_MAX + 30000, 1, 0)
+ check (i, -LLONG_MAX + 20000, 1, 1)
+ check (i, -LLONG_MAX + 10000, 1, 2)
+ e = 1;
+ }
+ #pragma omp for schedule(runtime) nowait
+ for (j = 20; j <= LLONG_MAX - 70; j += LLONG_MAX + 50ULL)
+ {
+ check (j, 20, 2, 0)
+ e = 1;
+ }
+ #pragma omp for schedule(runtime) nowait
+ for (j = ULLONG_MAX - 3; j >= LLONG_MAX + 70ULL; j -= LLONG_MAX + 50ULL)
+ {
+ check (j, ULLONG_MAX - 3, 3, 0)
+ e = 1;
+ }
+ #pragma omp for schedule(runtime) nowait
+ for (j = LLONG_MAX - 20000ULL; j <= LLONG_MAX + 10000ULL; j += 10000ULL)
+ {
+ check (j, LLONG_MAX - 20000ULL, 4, 0)
+ check (j, LLONG_MAX - 10000ULL, 4, 1)
+ check (j, LLONG_MAX, 4, 2)
+ check (j, LLONG_MAX + 10000ULL, 4, 3)
+ e = 1;
+ }
+ #pragma omp for schedule(runtime) nowait
+ for (i = -3LL * INT_MAX - 20000LL; i <= INT_MAX + 10000LL; i += INT_MAX + 200LL)
+ {
+ check (i, -3LL * INT_MAX - 20000LL, 5, 0)
+ check (i, -2LL * INT_MAX - 20000LL + 200LL, 5, 1)
+ check (i, -INT_MAX - 20000LL + 400LL, 5, 2)
+ check (i, -20000LL + 600LL, 5, 3)
+ check (i, INT_MAX - 20000LL + 800LL, 5, 4)
+ e = 1;
+ }
+ }
+ if (e)
+ abort ();
+ test (0, 3);
+ test (1, 3);
+ test (2, 1);
+ test (3, 1);
+ test (4, 4);
+ test (5, 5);
+ return 0;
+}
+
+int
+main ()
+{
+ if (2 * sizeof (int) != sizeof (long long))
+ return 0;
+ test1 ();
+ test2 ();
+ test3 ();
+ test4 ();
+ omp_set_schedule (omp_sched_static, 0);
+ test5 ();
+ omp_set_schedule (omp_sched_static, 3);
+ test5 ();
+ omp_set_schedule (omp_sched_dynamic, 5);
+ test5 ();
+ omp_set_schedule (omp_sched_guided, 2);
+ test5 ();
+ return 0;
+}
diff --git a/libgomp/testsuite/libgomp.c++/task-1.C b/libgomp/testsuite/libgomp.c++/task-1.C
new file mode 100644
index 00000000000..535a8287b0c
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c++/task-1.C
@@ -0,0 +1,83 @@
+extern "C" void abort ();
+
+int a = 18;
+
+void
+f1 (int i, int j, int k)
+{
+ int l = 6, m = 7, n = 8;
+#pragma omp task private(j, m) shared(k, n)
+ {
+ j = 6;
+ m = 5;
+ if (++a != 19 || ++i != 9 || j != 6 || ++l != 7 || m != 5 || ++n != 9)
+ #pragma omp atomic
+ k++;
+ }
+#pragma omp taskwait
+ if (a != 19 || i != 8 || j != 26 || k != 0 || l != 6 || m != 7 || n != 9)
+ abort ();
+}
+
+int v1 = 1, v2 = 2, v5 = 5;
+int err;
+
+void
+f2 (void)
+{
+ int v3 = 3;
+#pragma omp sections private (v1) firstprivate (v2)
+ {
+ #pragma omp section
+ {
+ int v4 = 4;
+ v1 = 7;
+ #pragma omp task
+ {
+ if (++v1 != 8 || ++v2 != 3 || ++v3 != 4 || ++v4 != 5 || ++v5 != 6)
+ err = 1;
+ }
+ #pragma omp taskwait
+ if (v1 != 7 || v2 != 2 || v3 != 3 || v4 != 4 || v5 != 6)
+ abort ();
+ if (err)
+ abort ();
+ }
+ }
+}
+
+void
+f3 (int i, int j, int k)
+{
+ int l = 6, m = 7, n = 8;
+#pragma omp task private(j, m) shared(k, n) untied
+ {
+ j = 6;
+ m = 5;
+ if (++a != 19 || ++i != 9 || j != 6 || ++l != 7 || m != 5 || ++n != 9)
+ #pragma omp atomic
+ k++;
+ }
+#pragma omp taskwait
+ if (a != 19 || i != 8 || j != 26 || k != 0 || l != 6 || m != 7 || n != 9)
+ abort ();
+}
+
+int
+main ()
+{
+ f1 (8, 26, 0);
+ f2 ();
+ a = 18;
+ f3 (8, 26, 0);
+ a = 18;
+#pragma omp parallel num_threads(4)
+ {
+ #pragma omp master
+ {
+ f1 (8, 26, 0);
+ a = 18;
+ f3 (8, 26, 0);
+ }
+ }
+}
diff --git a/libgomp/testsuite/libgomp.c++/task-2.C b/libgomp/testsuite/libgomp.c++/task-2.C
new file mode 100644
index 00000000000..a198cc721b5
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c++/task-2.C
@@ -0,0 +1,70 @@
+// { dg-do run }
+
+#include <omp.h>
+extern "C" void abort ();
+
+int l = 5;
+
+int
+foo (int i)
+{
+ int j = 7;
+ const int k = 8;
+ #pragma omp task firstprivate (i) shared (j, l)
+ {
+ #pragma omp critical
+ {
+ j += i;
+ l += k;
+ }
+ }
+ i++;
+ #pragma omp task firstprivate (i) shared (j, l)
+ {
+ #pragma omp critical
+ {
+ j += i;
+ l += k;
+ }
+ }
+ i++;
+ #pragma omp task firstprivate (i) shared (j, l)
+ {
+ #pragma omp critical
+ {
+ j += i;
+ l += k;
+ }
+ }
+ i++;
+ #pragma omp task firstprivate (i) shared (j, l)
+ {
+ #pragma omp critical
+ {
+ j += i;
+ l += k;
+ }
+ }
+ i++;
+ #pragma omp taskwait
+ return (i != 8 * omp_get_thread_num () + 4
+ || j != 4 * i - 3
+ || k != 8);
+}
+
+int
+main (void)
+{
+ int r = 0;
+ #pragma omp parallel num_threads (4) reduction(+:r)
+ if (omp_get_num_threads () != 4)
+ {
+ #pragma omp master
+ l = 133;
+ }
+ else if (foo (8 * omp_get_thread_num ()))
+ r++;
+ if (r || l != 133)
+ abort ();
+ return 0;
+}
diff --git a/libgomp/testsuite/libgomp.c++/task-3.C b/libgomp/testsuite/libgomp.c++/task-3.C
new file mode 100644
index 00000000000..e1ecb49654a
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c++/task-3.C
@@ -0,0 +1,90 @@
+// { dg-do run }
+
+extern "C" void abort ();
+
+struct A
+{
+ A ();
+ ~A ();
+ A (const A &);
+ unsigned long l;
+};
+
+int e;
+
+A::A ()
+{
+ l = 17;
+}
+
+A::~A ()
+{
+ if (l > 30)
+ #pragma omp atomic
+ e++;
+}
+
+A::A (const A &r)
+{
+ l = r.l;
+}
+
+void
+check (int i, A &a, int j, A &b)
+{
+ if (i != 6 || a.l != 21 || j != 0 || b.l != 23)
+ #pragma omp atomic
+ e++;
+}
+
+A b;
+int j;
+
+void
+foo (int i)
+{
+ A a;
+ a.l = 21;
+ #pragma omp task firstprivate (i, a, j, b)
+ check (i, a, j, b);
+}
+
+void
+bar (int i, A a)
+{
+ a.l = 21;
+ #pragma omp task firstprivate (i, a, j, b)
+ check (i, a, j, b);
+}
+
+A
+baz ()
+{
+ A a, c;
+ a.l = 21;
+ c.l = 23;
+ #pragma omp task firstprivate (a, c)
+ check (6, a, 0, c);
+ return a;
+}
+
+int
+main ()
+{
+ b.l = 23;
+ foo (6);
+ bar (6, A ());
+ baz ();
+ #pragma omp parallel num_threads (4)
+ {
+ #pragma omp single
+ for (int i = 0; i < 64; i++)
+ {
+ foo (6);
+ bar (6, A ());
+ baz ();
+ }
+ }
+ if (e)
+ abort ();
+}
diff --git a/libgomp/testsuite/libgomp.c++/task-4.C b/libgomp/testsuite/libgomp.c++/task-4.C
new file mode 100644
index 00000000000..f2e786a2fdd
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c++/task-4.C
@@ -0,0 +1,37 @@
+#include <omp.h>
+extern "C" void *memset (void *, int, __SIZE_TYPE__);
+extern "C" void abort (void);
+
+int e;
+
+void
+baz (int i, int *p, int j, int *q)
+{
+ if (p[0] != 1 || p[i] != 3 || q[0] != 2 || q[j] != 4)
+ #pragma omp atomic
+ e++;
+}
+
+void
+foo (int i, int j)
+{
+ int p[i + 1];
+ int q[j + 1];
+ memset (p, 0, sizeof (p));
+ memset (q, 0, sizeof (q));
+ p[0] = 1;
+ p[i] = 3;
+ q[0] = 2;
+ q[j] = 4;
+ #pragma omp task firstprivate (p, q)
+ baz (i, p, j, q);
+}
+
+int
+main ()
+{
+ #pragma omp parallel num_threads (4)
+ foo (5 + omp_get_thread_num (), 7 + omp_get_thread_num ());
+ if (e)
+ abort ();
+}
diff --git a/libgomp/testsuite/libgomp.c++/task-5.C b/libgomp/testsuite/libgomp.c++/task-5.C
new file mode 100644
index 00000000000..c882bfe1517
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c++/task-5.C
@@ -0,0 +1,90 @@
+// { dg-do run }
+
+extern "C" void abort ();
+
+struct A
+{
+ A ();
+ ~A ();
+ A (const A &);
+ unsigned long l;
+};
+
+int e;
+
+A::A ()
+{
+ l = 17;
+}
+
+A::~A ()
+{
+ if (l > 130)
+ #pragma omp atomic
+ e++;
+}
+
+A::A (const A &r)
+{
+ l = r.l + 64;
+}
+
+void
+check (int i, A &a, int j, A &b)
+{
+ if (i != 6 || a.l != 21 + 64 || j != 0 || b.l != 23 + 64)
+ #pragma omp atomic
+ e++;
+}
+
+A b;
+int j;
+
+void
+foo (int i)
+{
+ A a;
+ a.l = 21;
+ #pragma omp task firstprivate (j, b)
+ check (i, a, j, b);
+}
+
+void
+bar (int i, A a)
+{
+ a.l = 21;
+ #pragma omp task firstprivate (j, b)
+ check (i, a, j, b);
+}
+
+A
+baz ()
+{
+ A a, c;
+ a.l = 21;
+ c.l = 23;
+ #pragma omp task firstprivate (a, c)
+ check (6, a, 0, c);
+ return a;
+}
+
+int
+main ()
+{
+ b.l = 23;
+ foo (6);
+ bar (6, A ());
+ baz ();
+ #pragma omp parallel num_threads (4)
+ {
+ #pragma omp single
+ for (int i = 0; i < 64; i++)
+ {
+ foo (6);
+ bar (6, A ());
+ baz ();
+ }
+ }
+ if (e)
+ abort ();
+}
diff --git a/libgomp/testsuite/libgomp.c++/task-6.C b/libgomp/testsuite/libgomp.c++/task-6.C
new file mode 100644
index 00000000000..cc9072b9d1c
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c++/task-6.C
@@ -0,0 +1,86 @@
+extern "C" void abort ();
+
+int a = 18;
+
+template <typename T>
+void
+f1 (T i, T j, T k)
+{
+ T l = 6, m = 7, n = 8;
+#pragma omp task private(j, m) shared(k, n)
+ {
+ j = 6;
+ m = 5;
+ if (++a != 19 || ++i != 9 || j != 6 || ++l != 7 || m != 5 || ++n != 9)
+ #pragma omp atomic
+ k++;
+ }
+#pragma omp taskwait
+ if (a != 19 || i != 8 || j != 26 || k != 0 || l != 6 || m != 7 || n != 9)
+ abort ();
+}
+
+int v1 = 1, v2 = 2, v5 = 5;
+int err;
+
+template <typename T>
+void
+f2 (void)
+{
+ T v3 = 3;
+#pragma omp sections private (v1) firstprivate (v2)
+ {
+ #pragma omp section
+ {
+ T v4 = 4;
+ v1 = 7;
+ #pragma omp task
+ {
+ if (++v1 != 8 || ++v2 != 3 || ++v3 != 4 || ++v4 != 5 || ++v5 != 6)
+ err = 1;
+ }
+ #pragma omp taskwait
+ if (v1 != 7 || v2 != 2 || v3 != 3 || v4 != 4 || v5 != 6)
+ abort ();
+ if (err)
+ abort ();
+ }
+ }
+}
+
+template <typename T>
+void
+f3 (T i, T j, T k)
+{
+ T l = 6, m = 7, n = 8;
+#pragma omp task private(j, m) shared(k, n) untied
+ {
+ j = 6;
+ m = 5;
+ if (++a != 19 || ++i != 9 || j != 6 || ++l != 7 || m != 5 || ++n != 9)
+ #pragma omp atomic
+ k++;
+ }
+#pragma omp taskwait
+ if (a != 19 || i != 8 || j != 26 || k != 0 || l != 6 || m != 7 || n != 9)
+ abort ();
+}
+
+int
+main ()
+{
+ f1 <int> (8, 26, 0);
+ f2 <int> ();
+ a = 18;
+ f3 <int> (8, 26, 0);
+ a = 18;
+#pragma omp parallel num_threads(4)
+ {
+ #pragma omp master
+ {
+ f1 <int> (8, 26, 0);
+ a = 18;
+ f3 <int> (8, 26, 0);
+ }
+ }
+}
diff --git a/libgomp/testsuite/libgomp.c/collapse-1.c b/libgomp/testsuite/libgomp.c/collapse-1.c
new file mode 100644
index 00000000000..82becfa7952
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c/collapse-1.c
@@ -0,0 +1,30 @@
+/* { dg-do run } */
+
+#include <string.h>
+#include <stdlib.h>
+
+int
+main (void)
+{
+ int i, j, k, l = 0;
+ int a[3][3][3];
+
+ memset (a, '\0', sizeof (a));
+ #pragma omp parallel for collapse(4 - 1) schedule(static, 4)
+ for (i = 0; i < 2; i++)
+ for (j = 0; j < 2; j++)
+ for (k = 0; k < 2; k++)
+ a[i][j][k] = i + j * 4 + k * 16;
+ #pragma omp parallel
+ {
+ #pragma omp for collapse(2) reduction(|:l)
+ for (i = 0; i < 2; i++)
+ for (j = 0; j < 2; j++)
+ for (k = 0; k < 2; k++)
+ if (a[i][j][k] != i + j * 4 + k * 16)
+ l = 1;
+ }
+ if (l)
+ abort ();
+ return 0;
+}
diff --git a/libgomp/testsuite/libgomp.c/collapse-2.c b/libgomp/testsuite/libgomp.c/collapse-2.c
new file mode 100644
index 00000000000..b5c77d46143
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c/collapse-2.c
@@ -0,0 +1,30 @@
+/* { dg-do run } */
+
+#include <stdlib.h>
+#include <omp.h>
+
+int
+main (void)
+{
+ int i, j, k, l = 0, f = 0;
+ int m1 = 4, m2 = -5, m3 = 17;
+
+ #pragma omp parallel for num_threads (8) collapse(3) \
+ schedule(static, 9) reduction(+:l) \
+ firstprivate(f)
+ for (i = -2; i < m1; i++)
+ for (j = m2; j < -2; j++)
+ {
+ for (k = 13; k < m3; k++)
+ {
+ if (omp_get_num_threads () == 8
+ && ((i + 2) * 12 + (j + 5) * 4 + (k - 13)
+ != (omp_get_thread_num () * 9
+ + f++)))
+ l++;
+ }
+ }
+ if (l)
+ abort ();
+ return 0;
+}
diff --git a/libgomp/testsuite/libgomp.c/collapse-3.c b/libgomp/testsuite/libgomp.c/collapse-3.c
new file mode 100644
index 00000000000..4674f83f4b6
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c/collapse-3.c
@@ -0,0 +1,31 @@
+/* { dg-do run } */
+/* { dg-options "-O2 -std=gnu99" } */
+
+#include <string.h>
+#include <stdlib.h>
+
+int
+main (void)
+{
+ int i2, l = 0;
+ int a[3][3][3];
+
+ memset (a, '\0', sizeof (a));
+ #pragma omp parallel for collapse(4 - 1) schedule(static, 4)
+ for (int i = 0; i < 2; i++)
+ for (int j = 0; j < 2; j++)
+ for (int k = 0; k < 2; k++)
+ a[i][j][k] = i + j * 4 + k * 16;
+ #pragma omp parallel
+ {
+ #pragma omp for collapse(2) reduction(|:l)
+ for (i2 = 0; i2 < 2; i2++)
+ for (int j = 0; j < 2; j++)
+ for (int k = 0; k < 2; k++)
+ if (a[i2][j][k] != i2 + j * 4 + k * 16)
+ l = 1;
+ }
+ if (l)
+ abort ();
+ return 0;
+}
diff --git a/libgomp/testsuite/libgomp.c/icv-1.c b/libgomp/testsuite/libgomp.c/icv-1.c
new file mode 100644
index 00000000000..99708f82306
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c/icv-1.c
@@ -0,0 +1,33 @@
+#include <omp.h>
+#include <stdlib.h>
+
+int
+main (void)
+{
+ int err = 0;
+
+ omp_set_num_threads (4);
+ if (omp_get_max_threads () != 4)
+ abort ();
+ #pragma omp parallel reduction(|: err) num_threads(1)
+ {
+ if (omp_get_max_threads () != 4)
+ err |= 1;
+ omp_set_num_threads (6);
+ #pragma omp task if(0) shared(err)
+ {
+ if (omp_get_max_threads () != 6)
+ err |= 2;
+ omp_set_num_threads (5);
+ if (omp_get_max_threads () != 5)
+ err |= 4;
+ }
+ if (omp_get_max_threads () != 6)
+ err |= 8;
+ }
+ if (err)
+ abort ();
+ if (omp_get_max_threads () != 4)
+ abort ();
+ return 0;
+}
diff --git a/libgomp/testsuite/libgomp.c/icv-2.c b/libgomp/testsuite/libgomp.c/icv-2.c
new file mode 100644
index 00000000000..326f8eb404a
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c/icv-2.c
@@ -0,0 +1,46 @@
+/* { dg-do run { target *-*-linux* } } */
+
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE 1
+#endif
+#include <pthread.h>
+#include <omp.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+pthread_barrier_t bar;
+
+void *tf (void *p)
+{
+ int l;
+ if (p)
+ omp_set_num_threads (3);
+ pthread_barrier_wait (&bar);
+ if (!p)
+ omp_set_num_threads (6);
+ pthread_barrier_wait (&bar);
+ omp_set_dynamic (0);
+ if (omp_get_max_threads () != (p ? 3 : 6))
+ abort ();
+ l = 0;
+ #pragma omp parallel num_threads (6) reduction (|:l)
+ {
+ l |= omp_get_max_threads () != (p ? 3 : 6);
+ omp_set_num_threads ((p ? 3 : 6) + omp_get_thread_num ());
+ l |= omp_get_max_threads () != ((p ? 3 : 6) + omp_get_thread_num ());
+ }
+ if (l)
+ abort ();
+ return NULL;
+}
+
+int
+main (void)
+{
+ pthread_t th;
+ pthread_barrier_init (&bar, NULL, 2);
+ pthread_create (&th, NULL, tf, NULL);
+ tf ("");
+ pthread_join (th, NULL);
+ return 0;
+}
diff --git a/libgomp/testsuite/libgomp.c/lib-2.c b/libgomp/testsuite/libgomp.c/lib-2.c
new file mode 100644
index 00000000000..3a3b3f65517
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c/lib-2.c
@@ -0,0 +1,25 @@
+#include <stdlib.h>
+#include <omp.h>
+
+int
+main (void)
+{
+ omp_sched_t kind;
+ int modifier;
+
+ omp_set_schedule (omp_sched_static, 32);
+ omp_get_schedule (&kind, &modifier);
+ if (kind != omp_sched_static || modifier != 32)
+ abort ();
+ omp_set_schedule (omp_sched_guided, 4);
+ omp_get_schedule (&kind, &modifier);
+ if (kind != omp_sched_guided || modifier != 4)
+ abort ();
+ if (omp_get_thread_limit () < 0)
+ abort ();
+ omp_set_max_active_levels (6);
+ if (omp_get_max_active_levels () != 6)
+ abort ();
+
+ return 0;
+}
diff --git a/libgomp/testsuite/libgomp.c/lock-1.c b/libgomp/testsuite/libgomp.c/lock-1.c
new file mode 100644
index 00000000000..e09645dbc3f
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c/lock-1.c
@@ -0,0 +1,31 @@
+#include <omp.h>
+#include <stdlib.h>
+
+int
+main (void)
+{
+ int l = 0;
+ omp_nest_lock_t lock;
+ omp_init_nest_lock (&lock);
+ if (omp_test_nest_lock (&lock) != 1)
+ abort ();
+ if (omp_test_nest_lock (&lock) != 2)
+ abort ();
+#pragma omp parallel if (0) reduction (+:l)
+ {
+ /* In OpenMP 2.5 this was supposed to return 3,
+ but in OpenMP 3.0 the parallel region has a different
+ task and omp_*_lock_t are owned by tasks, not by threads. */
+ if (omp_test_nest_lock (&lock) != 0)
+ l++;
+ }
+ if (l)
+ abort ();
+ if (omp_test_nest_lock (&lock) != 3)
+ abort ();
+ omp_unset_nest_lock (&lock);
+ omp_unset_nest_lock (&lock);
+ omp_unset_nest_lock (&lock);
+ omp_destroy_nest_lock (&lock);
+ return 0;
+}
diff --git a/libgomp/testsuite/libgomp.c/lock-2.c b/libgomp/testsuite/libgomp.c/lock-2.c
new file mode 100644
index 00000000000..9009b12fe5d
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c/lock-2.c
@@ -0,0 +1,32 @@
+#include <omp.h>
+#include <stdlib.h>
+
+int
+main (void)
+{
+ int l = 0;
+ omp_nest_lock_t lock;
+ omp_init_nest_lock (&lock);
+#pragma omp parallel reduction (+:l) num_threads (1)
+ {
+ if (omp_test_nest_lock (&lock) != 1)
+ l++;
+ if (omp_test_nest_lock (&lock) != 2)
+ l++;
+ #pragma omp task if (0) shared (lock, l)
+ {
+ if (omp_test_nest_lock (&lock) != 0)
+ l++;
+ }
+ #pragma omp taskwait
+ if (omp_test_nest_lock (&lock) != 3)
+ l++;
+ omp_unset_nest_lock (&lock);
+ omp_unset_nest_lock (&lock);
+ omp_unset_nest_lock (&lock);
+ }
+ if (l)
+ abort ();
+ omp_destroy_nest_lock (&lock);
+ return 0;
+}
diff --git a/libgomp/testsuite/libgomp.c/lock-3.c b/libgomp/testsuite/libgomp.c/lock-3.c
new file mode 100644
index 00000000000..1fc83726d18
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c/lock-3.c
@@ -0,0 +1,60 @@
+/* { dg-do run { target *-*-linux* } } */
+
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE 1
+#endif
+#include <pthread.h>
+#include <omp.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+pthread_barrier_t bar;
+omp_nest_lock_t lock;
+
+void *tf (void *p)
+{
+ int l;
+ if (p)
+ {
+ if (omp_test_nest_lock (&lock) != 1)
+ abort ();
+ if (omp_test_nest_lock (&lock) != 2)
+ abort ();
+ }
+ pthread_barrier_wait (&bar);
+ if (!p && omp_test_nest_lock (&lock) != 0)
+ abort ();
+ pthread_barrier_wait (&bar);
+ if (p)
+ {
+ if (omp_test_nest_lock (&lock) != 3)
+ abort ();
+ omp_unset_nest_lock (&lock);
+ omp_unset_nest_lock (&lock);
+ omp_unset_nest_lock (&lock);
+ }
+ pthread_barrier_wait (&bar);
+ if (!p)
+ {
+ if (omp_test_nest_lock (&lock) != 1)
+ abort ();
+ if (omp_test_nest_lock (&lock) != 2)
+ abort ();
+ omp_unset_nest_lock (&lock);
+ omp_unset_nest_lock (&lock);
+ }
+ return NULL;
+}
+
+int
+main (void)
+{
+ pthread_t th;
+ omp_init_nest_lock (&lock);
+ pthread_barrier_init (&bar, NULL, 2);
+ pthread_create (&th, NULL, tf, NULL);
+ tf ("");
+ pthread_join (th, NULL);
+ omp_destroy_nest_lock (&lock);
+ return 0;
+}
diff --git a/libgomp/testsuite/libgomp.c/loop-4.c b/libgomp/testsuite/libgomp.c/loop-4.c
new file mode 100644
index 00000000000..bc57c043aad
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c/loop-4.c
@@ -0,0 +1,28 @@
+/* { dg-do run } */
+
+extern void abort (void);
+
+int
+main (void)
+{
+ int e = 0;
+#pragma omp parallel num_threads (4) reduction(+:e)
+ {
+ long i;
+ #pragma omp for schedule(dynamic,1)
+ for (i = __LONG_MAX__ - 30001; i <= __LONG_MAX__ - 10001; i += 10000)
+ if (i != __LONG_MAX__ - 30001
+ && i != __LONG_MAX__ - 20001
+ && i != __LONG_MAX__ - 10001)
+ e = 1;
+ #pragma omp for schedule(dynamic,1)
+ for (i = -__LONG_MAX__ + 30000; i >= -__LONG_MAX__ + 10000; i -= 10000)
+ if (i != -__LONG_MAX__ + 30000
+ && i != -__LONG_MAX__ + 20000
+ && i != -__LONG_MAX__ + 10000)
+ e = 1;
+ }
+ if (e)
+ abort ();
+ return 0;
+}
diff --git a/libgomp/testsuite/libgomp.c/loop-5.c b/libgomp/testsuite/libgomp.c/loop-5.c
new file mode 100644
index 00000000000..3a5c7cf4556
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c/loop-5.c
@@ -0,0 +1,276 @@
+#include <omp.h>
+#include <stdlib.h>
+#include <string.h>
+
+int
+test1 (void)
+{
+ short int buf[64], *p;
+ int i;
+ memset (buf, '\0', sizeof (buf));
+#pragma omp parallel for
+ for (p = &buf[10]; p < &buf[54]; p++)
+ *p = 5;
+ for (i = 0; i < 64; i++)
+ if (buf[i] != 5 * (i >= 10 && i < 54))
+ abort ();
+ memset (buf, '\0', sizeof (buf));
+#pragma omp parallel for
+ for (p = &buf[3]; p <= &buf[63]; p += 2)
+ p[-2] = 6;
+ for (i = 0; i < 64; i++)
+ if (buf[i] != 6 * ((i & 1) && i <= 61))
+ abort ();
+ memset (buf, '\0', sizeof (buf));
+#pragma omp parallel for
+ for (p = &buf[16]; p < &buf[51]; p = 4 + p)
+ p[2] = 7;
+ for (i = 0; i < 64; i++)
+ if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))
+ abort ();
+ memset (buf, '\0', sizeof (buf));
+#pragma omp parallel for
+ for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)
+ p[2] = -7;
+ for (i = 0; i < 64; i++)
+ if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))
+ abort ();
+ memset (buf, '\0', sizeof (buf));
+#pragma omp parallel for
+ for (p = &buf[53]; p > &buf[9]; --p)
+ *p = 5;
+ for (i = 0; i < 64; i++)
+ if (buf[i] != 5 * (i >= 10 && i < 54))
+ abort ();
+ memset (buf, '\0', sizeof (buf));
+#pragma omp parallel for
+ for (p = &buf[63]; p >= &buf[3]; p -= 2)
+ p[-2] = 6;
+ for (i = 0; i < 64; i++)
+ if (buf[i] != 6 * ((i & 1) && i <= 61))
+ abort ();
+ memset (buf, '\0', sizeof (buf));
+#pragma omp parallel for
+ for (p = &buf[48]; p > &buf[15]; p = -4 + p)
+ p[2] = 7;
+ for (i = 0; i < 64; i++)
+ if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))
+ abort ();
+ memset (buf, '\0', sizeof (buf));
+#pragma omp parallel for
+ for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)
+ p[2] = -7;
+ for (i = 0; i < 64; i++)
+ if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))
+ abort ();
+ return 0;
+}
+
+int
+test2 (void)
+{
+ int buf[64], *p;
+ int i;
+ memset (buf, '\0', sizeof (buf));
+#pragma omp parallel for schedule (static, 3)
+ for (p = &buf[10]; p < &buf[54]; p++)
+ *p = 5;
+ for (i = 0; i < 64; i++)
+ if (buf[i] != 5 * (i >= 10 && i < 54))
+ abort ();
+ memset (buf, '\0', sizeof (buf));
+#pragma omp parallel for schedule (static, 3)
+ for (p = &buf[3]; p <= &buf[63]; p += 2)
+ p[-2] = 6;
+ for (i = 0; i < 64; i++)
+ if (buf[i] != 6 * ((i & 1) && i <= 61))
+ abort ();
+ memset (buf, '\0', sizeof (buf));
+#pragma omp parallel for schedule (static, 3)
+ for (p = &buf[16]; p < &buf[51]; p = 4 + p)
+ p[2] = 7;
+ for (i = 0; i < 64; i++)
+ if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))
+ abort ();
+ memset (buf, '\0', sizeof (buf));
+#pragma omp parallel for schedule (static, 3)
+ for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)
+ p[2] = -7;
+ for (i = 0; i < 64; i++)
+ if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))
+ abort ();
+ memset (buf, '\0', sizeof (buf));
+#pragma omp parallel for schedule (static, 3)
+ for (p = &buf[53]; p > &buf[9]; --p)
+ *p = 5;
+ for (i = 0; i < 64; i++)
+ if (buf[i] != 5 * (i >= 10 && i < 54))
+ abort ();
+ memset (buf, '\0', sizeof (buf));
+#pragma omp parallel for schedule (static, 3)
+ for (p = &buf[63]; p >= &buf[3]; p -= 2)
+ p[-2] = 6;
+ for (i = 0; i < 64; i++)
+ if (buf[i] != 6 * ((i & 1) && i <= 61))
+ abort ();
+ memset (buf, '\0', sizeof (buf));
+#pragma omp parallel for schedule (static, 3)
+ for (p = &buf[48]; p > &buf[15]; p = -4 + p)
+ p[2] = 7;
+ for (i = 0; i < 64; i++)
+ if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))
+ abort ();
+ memset (buf, '\0', sizeof (buf));
+#pragma omp parallel for schedule (static, 3)
+ for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)
+ p[2] = -7;
+ for (i = 0; i < 64; i++)
+ if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))
+ abort ();
+ return 0;
+}
+
+int
+test3 (void)
+{
+ int buf[64], *p;
+ int i;
+ memset (buf, '\0', sizeof (buf));
+#pragma omp parallel for schedule (dynamic, 3)
+ for (p = &buf[10]; p < &buf[54]; p++)
+ *p = 5;
+ for (i = 0; i < 64; i++)
+ if (buf[i] != 5 * (i >= 10 && i < 54))
+ abort ();
+ memset (buf, '\0', sizeof (buf));
+#pragma omp parallel for schedule (dynamic, 3)
+ for (p = &buf[3]; p <= &buf[63]; p += 2)
+ p[-2] = 6;
+ for (i = 0; i < 64; i++)
+ if (buf[i] != 6 * ((i & 1) && i <= 61))
+ abort ();
+ memset (buf, '\0', sizeof (buf));
+#pragma omp parallel for schedule (dynamic, 3)
+ for (p = &buf[16]; p < &buf[51]; p = 4 + p)
+ p[2] = 7;
+ for (i = 0; i < 64; i++)
+ if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))
+ abort ();
+ memset (buf, '\0', sizeof (buf));
+#pragma omp parallel for schedule (dynamic, 3)
+ for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)
+ p[2] = -7;
+ for (i = 0; i < 64; i++)
+ if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))
+ abort ();
+ memset (buf, '\0', sizeof (buf));
+#pragma omp parallel for schedule (dynamic, 3)
+ for (p = &buf[53]; p > &buf[9]; --p)
+ *p = 5;
+ for (i = 0; i < 64; i++)
+ if (buf[i] != 5 * (i >= 10 && i < 54))
+ abort ();
+ memset (buf, '\0', sizeof (buf));
+#pragma omp parallel for schedule (dynamic, 3)
+ for (p = &buf[63]; p >= &buf[3]; p -= 2)
+ p[-2] = 6;
+ for (i = 0; i < 64; i++)
+ if (buf[i] != 6 * ((i & 1) && i <= 61))
+ abort ();
+ memset (buf, '\0', sizeof (buf));
+#pragma omp parallel for schedule (dynamic, 3)
+ for (p = &buf[48]; p > &buf[15]; p = -4 + p)
+ p[2] = 7;
+ for (i = 0; i < 64; i++)
+ if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))
+ abort ();
+ memset (buf, '\0', sizeof (buf));
+#pragma omp parallel for schedule (dynamic, 3)
+ for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)
+ p[2] = -7;
+ for (i = 0; i < 64; i++)
+ if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))
+ abort ();
+ return 0;
+}
+
+int
+test4 (void)
+{
+ int buf[64], *p;
+ int i;
+ memset (buf, '\0', sizeof (buf));
+#pragma omp parallel for schedule (runtime)
+ for (p = &buf[10]; p < &buf[54]; p++)
+ *p = 5;
+ for (i = 0; i < 64; i++)
+ if (buf[i] != 5 * (i >= 10 && i < 54))
+ abort ();
+ memset (buf, '\0', sizeof (buf));
+#pragma omp parallel for schedule (runtime)
+ for (p = &buf[3]; p <= &buf[63]; p += 2)
+ p[-2] = 6;
+ for (i = 0; i < 64; i++)
+ if (buf[i] != 6 * ((i & 1) && i <= 61))
+ abort ();
+ memset (buf, '\0', sizeof (buf));
+#pragma omp parallel for schedule (runtime)
+ for (p = &buf[16]; p < &buf[51]; p = 4 + p)
+ p[2] = 7;
+ for (i = 0; i < 64; i++)
+ if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))
+ abort ();
+ memset (buf, '\0', sizeof (buf));
+#pragma omp parallel for schedule (runtime)
+ for (p = &buf[16]; p <= &buf[40]; p = p + 4ULL)
+ p[2] = -7;
+ for (i = 0; i < 64; i++)
+ if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))
+ abort ();
+ memset (buf, '\0', sizeof (buf));
+#pragma omp parallel for schedule (runtime)
+ for (p = &buf[53]; p > &buf[9]; --p)
+ *p = 5;
+ for (i = 0; i < 64; i++)
+ if (buf[i] != 5 * (i >= 10 && i < 54))
+ abort ();
+ memset (buf, '\0', sizeof (buf));
+#pragma omp parallel for schedule (runtime)
+ for (p = &buf[63]; p >= &buf[3]; p -= 2)
+ p[-2] = 6;
+ for (i = 0; i < 64; i++)
+ if (buf[i] != 6 * ((i & 1) && i <= 61))
+ abort ();
+ memset (buf, '\0', sizeof (buf));
+#pragma omp parallel for schedule (runtime)
+ for (p = &buf[48]; p > &buf[15]; p = -4 + p)
+ p[2] = 7;
+ for (i = 0; i < 64; i++)
+ if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53))
+ abort ();
+ memset (buf, '\0', sizeof (buf));
+#pragma omp parallel for schedule (runtime)
+ for (p = &buf[40]; p >= &buf[16]; p = p - 4ULL)
+ p[2] = -7;
+ for (i = 0; i < 64; i++)
+ if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42))
+ abort ();
+ return 0;
+}
+
+int
+main (void)
+{
+ test1 ();
+ test2 ();
+ test3 ();
+ omp_set_schedule (omp_sched_static, 0);
+ test4 ();
+ omp_set_schedule (omp_sched_static, 3);
+ test4 ();
+ omp_set_schedule (omp_sched_dynamic, 5);
+ test4 ();
+ omp_set_schedule (omp_sched_guided, 2);
+ test4 ();
+ return 0;
+}
diff --git a/libgomp/testsuite/libgomp.c/loop-6.c b/libgomp/testsuite/libgomp.c/loop-6.c
new file mode 100644
index 00000000000..9029e181bd2
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c/loop-6.c
@@ -0,0 +1,387 @@
+/* { dg-do run } */
+
+#include <omp.h>
+
+extern void abort (void);
+
+#define LLONG_MAX __LONG_LONG_MAX__
+#define ULLONG_MAX (LLONG_MAX * 2ULL + 1)
+#define INT_MAX __INT_MAX__
+
+int arr[6 * 5];
+
+void
+set (int loopidx, int idx)
+{
+#pragma omp atomic
+ arr[loopidx * 5 + idx]++;
+}
+
+#define check(var, val, loopidx, idx) \
+ if (var == (val)) set (loopidx, idx); else
+#define test(loopidx, count) \
+ for (idx = 0; idx < 5; idx++) \
+ if (arr[loopidx * 5 + idx] != idx < count) \
+ abort (); \
+ else \
+ arr[loopidx * 5 + idx] = 0
+
+int
+test1 (void)
+{
+ int e = 0, idx;
+
+#pragma omp parallel reduction(+:e)
+ {
+ long long i;
+ unsigned long long j;
+ #pragma omp for schedule(dynamic,1) nowait
+ for (i = LLONG_MAX - 30001; i <= LLONG_MAX - 10001; i += 10000)
+ {
+ check (i, LLONG_MAX - 30001, 0, 0)
+ check (i, LLONG_MAX - 20001, 0, 1)
+ check (i, LLONG_MAX - 10001, 0, 2)
+ e = 1;
+ }
+ #pragma omp for schedule(dynamic,1) nowait
+ for (i = -LLONG_MAX + 30000; i >= -LLONG_MAX + 10000; i -= 10000)
+ {
+ check (i, -LLONG_MAX + 30000, 1, 0)
+ check (i, -LLONG_MAX + 20000, 1, 1)
+ check (i, -LLONG_MAX + 10000, 1, 2)
+ e = 1;
+ }
+ #pragma omp for schedule(dynamic,1) nowait
+ for (j = 20; j <= LLONG_MAX - 70; j += LLONG_MAX + 50ULL)
+ {
+ check (j, 20, 2, 0)
+ e = 1;
+ }
+ #pragma omp for schedule(dynamic,1) nowait
+ for (j = ULLONG_MAX - 3; j >= LLONG_MAX + 70ULL; j -= LLONG_MAX + 50ULL)
+ {
+ check (j, ULLONG_MAX - 3, 3, 0)
+ e = 1;
+ }
+ #pragma omp for schedule(dynamic,1) nowait
+ for (j = LLONG_MAX - 20000ULL; j <= LLONG_MAX + 10000ULL; j += 10000ULL)
+ {
+ check (j, LLONG_MAX - 20000ULL, 4, 0)
+ check (j, LLONG_MAX - 10000ULL, 4, 1)
+ check (j, LLONG_MAX, 4, 2)
+ check (j, LLONG_MAX + 10000ULL, 4, 3)
+ e = 1;
+ }
+ #pragma omp for schedule(dynamic,1) nowait
+ for (i = -3LL * INT_MAX - 20000LL; i <= INT_MAX + 10000LL; i += INT_MAX + 200LL)
+ {
+ check (i, -3LL * INT_MAX - 20000LL, 5, 0)
+ check (i, -2LL * INT_MAX - 20000LL + 200LL, 5, 1)
+ check (i, -INT_MAX - 20000LL + 400LL, 5, 2)
+ check (i, -20000LL + 600LL, 5, 3)
+ check (i, INT_MAX - 20000LL + 800LL, 5, 4)
+ e = 1;
+ }
+ }
+ if (e)
+ abort ();
+ test (0, 3);
+ test (1, 3);
+ test (2, 1);
+ test (3, 1);
+ test (4, 4);
+ test (5, 5);
+ return 0;
+}
+
+int
+test2 (void)
+{
+ int e = 0, idx;
+
+#pragma omp parallel reduction(+:e)
+ {
+ long long i;
+ unsigned long long j;
+ #pragma omp for schedule(guided,1) nowait
+ for (i = LLONG_MAX - 30001; i <= LLONG_MAX - 10001; i += 10000)
+ {
+ check (i, LLONG_MAX - 30001, 0, 0)
+ check (i, LLONG_MAX - 20001, 0, 1)
+ check (i, LLONG_MAX - 10001, 0, 2)
+ e = 1;
+ }
+ #pragma omp for schedule(guided,1) nowait
+ for (i = -LLONG_MAX + 30000; i >= -LLONG_MAX + 10000; i -= 10000)
+ {
+ check (i, -LLONG_MAX + 30000, 1, 0)
+ check (i, -LLONG_MAX + 20000, 1, 1)
+ check (i, -LLONG_MAX + 10000, 1, 2)
+ e = 1;
+ }
+ #pragma omp for schedule(guided,1) nowait
+ for (j = 20; j <= LLONG_MAX - 70; j += LLONG_MAX + 50ULL)
+ {
+ check (j, 20, 2, 0)
+ e = 1;
+ }
+ #pragma omp for schedule(guided,1) nowait
+ for (j = ULLONG_MAX - 3; j >= LLONG_MAX + 70ULL; j -= LLONG_MAX + 50ULL)
+ {
+ check (j, ULLONG_MAX - 3, 3, 0)
+ e = 1;
+ }
+ #pragma omp for schedule(guided,1) nowait
+ for (j = LLONG_MAX - 20000ULL; j <= LLONG_MAX + 10000ULL; j += 10000ULL)
+ {
+ check (j, LLONG_MAX - 20000ULL, 4, 0)
+ check (j, LLONG_MAX - 10000ULL, 4, 1)
+ check (j, LLONG_MAX, 4, 2)
+ check (j, LLONG_MAX + 10000ULL, 4, 3)
+ e = 1;
+ }
+ #pragma omp for schedule(guided,1) nowait
+ for (i = -3LL * INT_MAX - 20000LL; i <= INT_MAX + 10000LL; i += INT_MAX + 200LL)
+ {
+ check (i, -3LL * INT_MAX - 20000LL, 5, 0)
+ check (i, -2LL * INT_MAX - 20000LL + 200LL, 5, 1)
+ check (i, -INT_MAX - 20000LL + 400LL, 5, 2)
+ check (i, -20000LL + 600LL, 5, 3)
+ check (i, INT_MAX - 20000LL + 800LL, 5, 4)
+ e = 1;
+ }
+ }
+ if (e)
+ abort ();
+ test (0, 3);
+ test (1, 3);
+ test (2, 1);
+ test (3, 1);
+ test (4, 4);
+ test (5, 5);
+ return 0;
+}
+
+int
+test3 (void)
+{
+ int e = 0, idx;
+
+#pragma omp parallel reduction(+:e)
+ {
+ long long i;
+ unsigned long long j;
+ #pragma omp for schedule(static) nowait
+ for (i = LLONG_MAX - 30001; i <= LLONG_MAX - 10001; i += 10000)
+ {
+ check (i, LLONG_MAX - 30001, 0, 0)
+ check (i, LLONG_MAX - 20001, 0, 1)
+ check (i, LLONG_MAX - 10001, 0, 2)
+ e = 1;
+ }
+ #pragma omp for schedule(static) nowait
+ for (i = -LLONG_MAX + 30000; i >= -LLONG_MAX + 10000; i -= 10000)
+ {
+ check (i, -LLONG_MAX + 30000, 1, 0)
+ check (i, -LLONG_MAX + 20000, 1, 1)
+ check (i, -LLONG_MAX + 10000, 1, 2)
+ e = 1;
+ }
+ #pragma omp for schedule(static) nowait
+ for (j = 20; j <= LLONG_MAX - 70; j += LLONG_MAX + 50ULL)
+ {
+ check (j, 20, 2, 0)
+ e = 1;
+ }
+ #pragma omp for schedule(static) nowait
+ for (j = ULLONG_MAX - 3; j >= LLONG_MAX + 70ULL; j -= LLONG_MAX + 50ULL)
+ {
+ check (j, ULLONG_MAX - 3, 3, 0)
+ e = 1;
+ }
+ #pragma omp for schedule(static) nowait
+ for (j = LLONG_MAX - 20000ULL; j <= LLONG_MAX + 10000ULL; j += 10000ULL)
+ {
+ check (j, LLONG_MAX - 20000ULL, 4, 0)
+ check (j, LLONG_MAX - 10000ULL, 4, 1)
+ check (j, LLONG_MAX, 4, 2)
+ check (j, LLONG_MAX + 10000ULL, 4, 3)
+ e = 1;
+ }
+ #pragma omp for schedule(static) nowait
+ for (i = -3LL * INT_MAX - 20000LL; i <= INT_MAX + 10000LL; i += INT_MAX + 200LL)
+ {
+ check (i, -3LL * INT_MAX - 20000LL, 5, 0)
+ check (i, -2LL * INT_MAX - 20000LL + 200LL, 5, 1)
+ check (i, -INT_MAX - 20000LL + 400LL, 5, 2)
+ check (i, -20000LL + 600LL, 5, 3)
+ check (i, INT_MAX - 20000LL + 800LL, 5, 4)
+ e = 1;
+ }
+ }
+ if (e)
+ abort ();
+ test (0, 3);
+ test (1, 3);
+ test (2, 1);
+ test (3, 1);
+ test (4, 4);
+ test (5, 5);
+ return 0;
+}
+
+int
+test4 (void)
+{
+ int e = 0, idx;
+
+#pragma omp parallel reduction(+:e)
+ {
+ long long i;
+ unsigned long long j;
+ #pragma omp for schedule(static,1) nowait
+ for (i = LLONG_MAX - 30001; i <= LLONG_MAX - 10001; i += 10000)
+ {
+ check (i, LLONG_MAX - 30001, 0, 0)
+ check (i, LLONG_MAX - 20001, 0, 1)
+ check (i, LLONG_MAX - 10001, 0, 2)
+ e = 1;
+ }
+ #pragma omp for schedule(static,1) nowait
+ for (i = -LLONG_MAX + 30000; i >= -LLONG_MAX + 10000; i -= 10000)
+ {
+ check (i, -LLONG_MAX + 30000, 1, 0)
+ check (i, -LLONG_MAX + 20000, 1, 1)
+ check (i, -LLONG_MAX + 10000, 1, 2)
+ e = 1;
+ }
+ #pragma omp for schedule(static,1) nowait
+ for (j = 20; j <= LLONG_MAX - 70; j += LLONG_MAX + 50ULL)
+ {
+ check (j, 20, 2, 0)
+ e = 1;
+ }
+ #pragma omp for schedule(static,1) nowait
+ for (j = ULLONG_MAX - 3; j >= LLONG_MAX + 70ULL; j -= LLONG_MAX + 50ULL)
+ {
+ check (j, ULLONG_MAX - 3, 3, 0)
+ e = 1;
+ }
+ #pragma omp for schedule(static,1) nowait
+ for (j = LLONG_MAX - 20000ULL; j <= LLONG_MAX + 10000ULL; j += 10000ULL)
+ {
+ check (j, LLONG_MAX - 20000ULL, 4, 0)
+ check (j, LLONG_MAX - 10000ULL, 4, 1)
+ check (j, LLONG_MAX, 4, 2)
+ check (j, LLONG_MAX + 10000ULL, 4, 3)
+ e = 1;
+ }
+ #pragma omp for schedule(static,1) nowait
+ for (i = -3LL * INT_MAX - 20000LL; i <= INT_MAX + 10000LL; i += INT_MAX + 200LL)
+ {
+ check (i, -3LL * INT_MAX - 20000LL, 5, 0)
+ check (i, -2LL * INT_MAX - 20000LL + 200LL, 5, 1)
+ check (i, -INT_MAX - 20000LL + 400LL, 5, 2)
+ check (i, -20000LL + 600LL, 5, 3)
+ check (i, INT_MAX - 20000LL + 800LL, 5, 4)
+ e = 1;
+ }
+ }
+ if (e)
+ abort ();
+ test (0, 3);
+ test (1, 3);
+ test (2, 1);
+ test (3, 1);
+ test (4, 4);
+ test (5, 5);
+ return 0;
+}
+
+int
+test5 (void)
+{
+ int e = 0, idx;
+
+#pragma omp parallel reduction(+:e)
+ {
+ long long i;
+ unsigned long long j;
+ #pragma omp for schedule(runtime) nowait
+ for (i = LLONG_MAX - 30001; i <= LLONG_MAX - 10001; i += 10000)
+ {
+ check (i, LLONG_MAX - 30001, 0, 0)
+ check (i, LLONG_MAX - 20001, 0, 1)
+ check (i, LLONG_MAX - 10001, 0, 2)
+ e = 1;
+ }
+ #pragma omp for schedule(runtime) nowait
+ for (i = -LLONG_MAX + 30000; i >= -LLONG_MAX + 10000; i -= 10000)
+ {
+ check (i, -LLONG_MAX + 30000, 1, 0)
+ check (i, -LLONG_MAX + 20000, 1, 1)
+ check (i, -LLONG_MAX + 10000, 1, 2)
+ e = 1;
+ }
+ #pragma omp for schedule(runtime) nowait
+ for (j = 20; j <= LLONG_MAX - 70; j += LLONG_MAX + 50ULL)
+ {
+ check (j, 20, 2, 0)
+ e = 1;
+ }
+ #pragma omp for schedule(runtime) nowait
+ for (j = ULLONG_MAX - 3; j >= LLONG_MAX + 70ULL; j -= LLONG_MAX + 50ULL)
+ {
+ check (j, ULLONG_MAX - 3, 3, 0)
+ e = 1;
+ }
+ #pragma omp for schedule(runtime) nowait
+ for (j = LLONG_MAX - 20000ULL; j <= LLONG_MAX + 10000ULL; j += 10000ULL)
+ {
+ check (j, LLONG_MAX - 20000ULL, 4, 0)
+ check (j, LLONG_MAX - 10000ULL, 4, 1)
+ check (j, LLONG_MAX, 4, 2)
+ check (j, LLONG_MAX + 10000ULL, 4, 3)
+ e = 1;
+ }
+ #pragma omp for schedule(runtime) nowait
+ for (i = -3LL * INT_MAX - 20000LL; i <= INT_MAX + 10000LL; i += INT_MAX + 200LL)
+ {
+ check (i, -3LL * INT_MAX - 20000LL, 5, 0)
+ check (i, -2LL * INT_MAX - 20000LL + 200LL, 5, 1)
+ check (i, -INT_MAX - 20000LL + 400LL, 5, 2)
+ check (i, -20000LL + 600LL, 5, 3)
+ check (i, INT_MAX - 20000LL + 800LL, 5, 4)
+ e = 1;
+ }
+ }
+ if (e)
+ abort ();
+ test (0, 3);
+ test (1, 3);
+ test (2, 1);
+ test (3, 1);
+ test (4, 4);
+ test (5, 5);
+ return 0;
+}
+
+int
+main (void)
+{
+ if (2 * sizeof (int) != sizeof (long long))
+ return 0;
+ test1 ();
+ test2 ();
+ test3 ();
+ test4 ();
+ omp_set_schedule (omp_sched_static, 0);
+ test5 ();
+ omp_set_schedule (omp_sched_static, 3);
+ test5 ();
+ omp_set_schedule (omp_sched_dynamic, 5);
+ test5 ();
+ omp_set_schedule (omp_sched_guided, 2);
+ test5 ();
+ return 0;
+}
diff --git a/libgomp/testsuite/libgomp.c/loop-7.c b/libgomp/testsuite/libgomp.c/loop-7.c
new file mode 100644
index 00000000000..fc97f4a2907
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c/loop-7.c
@@ -0,0 +1,105 @@
+/* { dg-do run } */
+
+#include <omp.h>
+
+extern void abort (void);
+
+#define LLONG_MAX __LONG_LONG_MAX__
+#define ULLONG_MAX (LLONG_MAX * 2ULL + 1)
+#define INT_MAX __INT_MAX__
+
+int v;
+
+int
+test1 (void)
+{
+ int e = 0, cnt = 0;
+ long long i;
+ unsigned long long j;
+ char buf[6], *p;
+
+ #pragma omp for schedule(dynamic,1) collapse(2) nowait
+ for (i = LLONG_MAX - 30001; i <= LLONG_MAX - 10001; i += 10000)
+ for (j = 20; j <= LLONG_MAX - 70; j += LLONG_MAX + 50ULL)
+ if ((i != LLONG_MAX - 30001
+ && i != LLONG_MAX - 20001
+ && i != LLONG_MAX - 10001)
+ || j != 20)
+ e = 1;
+ else
+ cnt++;
+ if (e || cnt != 3)
+ abort ();
+ else
+ cnt = 0;
+
+ #pragma omp for schedule(guided,1) collapse(2) nowait
+ for (i = -LLONG_MAX + 30000; i >= -LLONG_MAX + 10000; i -= 10000)
+ for (j = ULLONG_MAX - 3; j >= LLONG_MAX + 70ULL; j -= LLONG_MAX + 50ULL)
+ if ((i != -LLONG_MAX + 30000
+ && i != -LLONG_MAX + 20000
+ && i != -LLONG_MAX + 10000)
+ || j != ULLONG_MAX - 3)
+ e = 1;
+ else
+ cnt++;
+ if (e || cnt != 3)
+ abort ();
+ else
+ cnt = 0;
+
+ #pragma omp for schedule(static,1) collapse(2) nowait
+ for (i = LLONG_MAX - 30001; i <= LLONG_MAX - 10001; i += 10000)
+ for (j = 20; j <= LLONG_MAX - 70 + v; j += LLONG_MAX + 50ULL)
+ if ((i != LLONG_MAX - 30001
+ && i != LLONG_MAX - 20001
+ && i != LLONG_MAX - 10001)
+ || j != 20)
+ e = 1;
+ else
+ cnt++;
+ if (e || cnt != 3)
+ abort ();
+ else
+ cnt = 0;
+
+ #pragma omp for schedule(static) collapse(2) nowait
+ for (i = -LLONG_MAX + 30000 + v; i >= -LLONG_MAX + 10000; i -= 10000)
+ for (j = ULLONG_MAX - 3; j >= LLONG_MAX + 70ULL; j -= LLONG_MAX + 50ULL)
+ if ((i != -LLONG_MAX + 30000
+ && i != -LLONG_MAX + 20000
+ && i != -LLONG_MAX + 10000)
+ || j != ULLONG_MAX - 3)
+ e = 1;
+ else
+ cnt++;
+ if (e || cnt != 3)
+ abort ();
+ else
+ cnt = 0;
+
+ #pragma omp for schedule(runtime) collapse(2) nowait
+ for (i = 10; i < 30; i++)
+ for (p = buf; p <= buf + 4; p += 2)
+ if (i < 10 || i >= 30 || (p != buf && p != buf + 2 && p != buf + 4))
+ e = 1;
+ else
+ cnt++;
+ if (e || cnt != 60)
+ abort ();
+ else
+ cnt = 0;
+
+ return 0;
+}
+
+int
+main (void)
+{
+ if (2 * sizeof (int) != sizeof (long long))
+ return 0;
+ asm volatile ("" : "+r" (v));
+ omp_set_schedule (omp_sched_dynamic, 1);
+ test1 ();
+ return 0;
+}
diff --git a/libgomp/testsuite/libgomp.c/loop-8.c b/libgomp/testsuite/libgomp.c/loop-8.c
new file mode 100644
index 00000000000..25db25c3b43
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c/loop-8.c
@@ -0,0 +1,27 @@
+extern void abort (void);
+
+int buf[256];
+
+void __attribute__((noinline))
+foo (void)
+{
+ int i;
+ #pragma omp for schedule (auto)
+ for (i = 0; i < 256; i++)
+ buf[i] += i;
+}
+
+int
+main (void)
+{
+ int i;
+ #pragma omp parallel for schedule (auto)
+ for (i = 0; i < 256; i++)
+ buf[i] = i;
+ #pragma omp parallel num_threads (4)
+ foo ();
+ for (i = 0; i < 256; i++)
+ if (buf[i] != 2 * i)
+ abort ();
+ return 0;
+}
diff --git a/libgomp/testsuite/libgomp.c/loop-9.c b/libgomp/testsuite/libgomp.c/loop-9.c
new file mode 100644
index 00000000000..1f789e12ecb
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c/loop-9.c
@@ -0,0 +1,18 @@
+extern void abort (void);
+
+char buf[8] = "01234567";
+char buf2[8] = "23456789";
+
+int
+main (void)
+{
+ char *p, *q;
+ int sum = 0;
+ #pragma omp parallel for collapse (2) reduction (+:sum) lastprivate (p, q)
+ for (p = buf; p < &buf[8]; p++)
+ for (q = &buf2[0]; q <= buf2 + 7; q++)
+ sum += (*p - '0') + (*q - '0');
+ if (p != &buf[8] || q != buf2 + 8 || sum != 576)
+ abort ();
+ return 0;
+}
diff --git a/libgomp/testsuite/libgomp.c/nested-3.c b/libgomp/testsuite/libgomp.c/nested-3.c
new file mode 100644
index 00000000000..618600633ac
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c/nested-3.c
@@ -0,0 +1,89 @@
+#include <omp.h>
+#include <stdlib.h>
+#include <string.h>
+
+int
+main (void)
+{
+ int e[3];
+
+ memset (e, '\0', sizeof (e));
+ omp_set_nested (1);
+ omp_set_dynamic (0);
+ if (omp_in_parallel ()
+ || omp_get_level () != 0
+ || omp_get_ancestor_thread_num (0) != 0
+ || omp_get_ancestor_thread_num (-1) != -1
+ || omp_get_ancestor_thread_num (1) != -1
+ || omp_get_team_size (0) != 1
+ || omp_get_team_size (-1) != -1
+ || omp_get_team_size (1) != -1
+ || omp_get_active_level () != 0)
+ abort ();
+#pragma omp parallel num_threads (4)
+ {
+ int tn1 = omp_get_thread_num ();
+ if (omp_in_parallel () != 1
+ || omp_get_num_threads () != 4
+ || tn1 >= 4 || tn1 < 0
+ || omp_get_level () != 1
+ || omp_get_ancestor_thread_num (0) != 0
+ || omp_get_ancestor_thread_num (1) != tn1
+ || omp_get_ancestor_thread_num (-1) != -1
+ || omp_get_ancestor_thread_num (2) != -1
+ || omp_get_team_size (0) != 1
+ || omp_get_team_size (1) != omp_get_num_threads ()
+ || omp_get_team_size (-1) != -1
+ || omp_get_team_size (2) != -1
+ || omp_get_active_level () != 1)
+ #pragma omp atomic
+ e[0] += 1;
+ #pragma omp parallel if (0) num_threads(5) firstprivate(tn1)
+ {
+ int tn2 = omp_get_thread_num ();
+ if (omp_in_parallel () != 1
+ || omp_get_num_threads () != 1
+ || tn2 != 0
+ || omp_get_level () != 2
+ || omp_get_ancestor_thread_num (0) != 0
+ || omp_get_ancestor_thread_num (1) != tn1
+ || omp_get_ancestor_thread_num (2) != tn2
+ || omp_get_ancestor_thread_num (-1) != -1
+ || omp_get_ancestor_thread_num (3) != -1
+ || omp_get_team_size (0) != 1
+ || omp_get_team_size (1) != 4
+ || omp_get_team_size (2) != 1
+ || omp_get_team_size (-1) != -1
+ || omp_get_team_size (3) != -1
+ || omp_get_active_level () != 1)
+ #pragma omp atomic
+ e[1] += 1;
+ #pragma omp parallel num_threads(2) firstprivate(tn1, tn2)
+ {
+ int tn3 = omp_get_thread_num ();
+ if (omp_in_parallel () != 1
+ || omp_get_num_threads () != 2
+ || tn3 > 1 || tn3 < 0
+ || omp_get_level () != 3
+ || omp_get_ancestor_thread_num (0) != 0
+ || omp_get_ancestor_thread_num (1) != tn1
+ || omp_get_ancestor_thread_num (2) != tn2
+ || omp_get_ancestor_thread_num (3) != tn3
+ || omp_get_ancestor_thread_num (-1) != -1
+ || omp_get_ancestor_thread_num (4) != -1
+ || omp_get_team_size (0) != 1
+ || omp_get_team_size (1) != 4
+ || omp_get_team_size (2) != 1
+ || omp_get_team_size (3) != 2
+ || omp_get_team_size (-1) != -1
+ || omp_get_team_size (4) != -1
+ || omp_get_active_level () != 2)
+ #pragma omp atomic
+ e[2] += 1;
+ }
+ }
+ }
+ if (e[0] || e[1] || e[2])
+ abort ();
+ return 0;
+}
diff --git a/libgomp/testsuite/libgomp.c/nestedfn-6.c b/libgomp/testsuite/libgomp.c/nestedfn-6.c
new file mode 100644
index 00000000000..c0ace6b3fb8
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c/nestedfn-6.c
@@ -0,0 +1,21 @@
+extern void abort (void);
+
+int j;
+
+int
+main (void)
+{
+ int i;
+ void nested (void) { i = 0; }
+#pragma omp parallel for lastprivate (i)
+ for (i = 0; i < 50; i += 3)
+ ;
+ if (i != 51)
+ abort ();
+#pragma omp parallel for lastprivate (j)
+ for (j = -50; j < 70; j += 7)
+ ;
+ if (j != 76)
+ abort ();
+ return 0;
+}
diff --git a/libgomp/testsuite/libgomp.c/pr26943-2.c b/libgomp/testsuite/libgomp.c/pr26943-2.c
index 778048492f6..c052e811288 100644
--- a/libgomp/testsuite/libgomp.c/pr26943-2.c
+++ b/libgomp/testsuite/libgomp.c/pr26943-2.c
@@ -20,7 +20,7 @@ main (void)
{
if (a != 8 || b != 12 || e[0] != 'a' || f[0] != 'b')
j++;
-#pragma omp barrier
+#pragma omp barrier /* { dg-warning "may not be closely nested" } */
#pragma omp atomic
a += i;
b += i;
@@ -31,7 +31,7 @@ main (void)
f[0] += i;
g[0] = 'g' + i;
h[0] = 'h' + i;
-#pragma omp barrier
+#pragma omp barrier /* { dg-warning "may not be closely nested" } */
if (a != 8 + 6 || b != 12 + i || c != i || d != i)
j += 8;
if (e[0] != 'a' + 6 || f[0] != 'b' + i || g[0] != 'g' + i)
diff --git a/libgomp/testsuite/libgomp.c/pr26943-3.c b/libgomp/testsuite/libgomp.c/pr26943-3.c
index be93cb479d1..dc3d5010da1 100644
--- a/libgomp/testsuite/libgomp.c/pr26943-3.c
+++ b/libgomp/testsuite/libgomp.c/pr26943-3.c
@@ -26,7 +26,7 @@ main (void)
{
if (a != 8 || b != 12 || e[0] != 'a' || f[0] != 'b')
j++;
-#pragma omp barrier
+#pragma omp barrier /* { dg-warning "may not be closely nested" } */
#pragma omp atomic
a += i;
b += i;
@@ -37,7 +37,7 @@ main (void)
f[0] += i;
g[0] = 'g' + i;
h[0] = 'h' + i;
-#pragma omp barrier
+#pragma omp barrier /* { dg-warning "may not be closely nested" } */
if (a != 8 + 6 || b != 12 + i || c != i || d != i)
j += 8;
if (e[0] != 'a' + 6 || f[0] != 'b' + i || g[0] != 'g' + i)
diff --git a/libgomp/testsuite/libgomp.c/pr26943-4.c b/libgomp/testsuite/libgomp.c/pr26943-4.c
index 33d368583dd..0f1d4197a5f 100644
--- a/libgomp/testsuite/libgomp.c/pr26943-4.c
+++ b/libgomp/testsuite/libgomp.c/pr26943-4.c
@@ -27,7 +27,7 @@ main (void)
{
if (a != 8 || b != 12 || e[0] != 'a' || f[0] != 'b')
j++;
-#pragma omp barrier
+#pragma omp barrier /* { dg-warning "may not be closely nested" } */
#pragma omp atomic
a += i;
b += i;
@@ -38,7 +38,7 @@ main (void)
f[0] += i;
g[0] = 'g' + i;
h[0] = 'h' + i;
-#pragma omp barrier
+#pragma omp barrier /* { dg-warning "may not be closely nested" } */
if (a != 8 + 6 || b != 12 + i || c != i || d != i)
j += 8;
if (e[0] != 'a' + 6 || f[0] != 'b' + i || g[0] != 'g' + i)
diff --git a/libgomp/testsuite/libgomp.c/sort-1.c b/libgomp/testsuite/libgomp.c/sort-1.c
new file mode 100644
index 00000000000..269d69da12c
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c/sort-1.c
@@ -0,0 +1,379 @@
+/* Test and benchmark of a couple of parallel sorting algorithms.
+ Copyright (C) 2008 Free Software Foundation, Inc.
+
+ GCC is free software; you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation; either version 3, or (at your option) any later
+ version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+ WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#include <limits.h>
+#include <omp.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+int failures;
+
+#define THRESHOLD 100
+
+static void
+verify (const char *name, double stime, int *array, int count)
+{
+ int i;
+ double etime = omp_get_wtime ();
+
+ printf ("%s: %g\n", name, etime - stime);
+ for (i = 1; i < count; i++)
+ if (array[i] < array[i - 1])
+ {
+ printf ("%s: incorrectly sorted\n", name);
+ failures = 1;
+ }
+}
+
+static void
+insertsort (int *array, int s, int e)
+{
+ int i, j, val;
+ for (i = s + 1; i <= e; i++)
+ {
+ val = array[i];
+ j = i;
+ while (j-- > s && val < array[j])
+ array[j + 1] = array[j];
+ array[j + 1] = val;
+ }
+}
+
+struct int_pair
+{
+ int lo;
+ int hi;
+};
+
+struct int_pair_stack
+{
+ struct int_pair *top;
+#define STACK_SIZE 4 * CHAR_BIT * sizeof (int)
+ struct int_pair arr[STACK_SIZE];
+};
+
+static inline void
+init_int_pair_stack (struct int_pair_stack *stack)
+{
+ stack->top = &stack->arr[0];
+}
+
+static inline void
+push_int_pair_stack (struct int_pair_stack *stack, int lo, int hi)
+{
+ stack->top->lo = lo;
+ stack->top->hi = hi;
+ stack->top++;
+}
+
+static inline void
+pop_int_pair_stack (struct int_pair_stack *stack, int *lo, int *hi)
+{
+ stack->top--;
+ *lo = stack->top->lo;
+ *hi = stack->top->hi;
+}
+
+static inline int
+size_int_pair_stack (struct int_pair_stack *stack)
+{
+ return stack->top - &stack->arr[0];
+}
+
+static inline void
+busy_wait (void)
+{
+#if defined __i386__ || defined __x86_64__
+ __asm volatile ("rep; nop" : : : "memory");
+#elif defined __ia64__
+ __asm volatile ("hint @pause" : : : "memory");
+#elif defined __sparc__ && (defined __arch64__ || defined __sparc_v9__)
+ __asm volatile ("membar #LoadLoad" : : : "memory");
+#else
+ __asm volatile ("" : : : "memory");
+#endif
+}
+
+static inline void
+swap (int *array, int a, int b)
+{
+ int val = array[a];
+ array[a] = array[b];
+ array[b] = val;
+}
+
+static inline int
+choose_pivot (int *array, int lo, int hi)
+{
+ int mid = (lo + hi) / 2;
+
+ if (array[mid] < array[lo])
+ swap (array, lo, mid);
+ if (array[hi] < array[mid])
+ {
+ swap (array, mid, hi);
+ if (array[mid] < array[lo])
+ swap (array, lo, mid);
+ }
+ return array[mid];
+}
+
+static inline int
+partition (int *array, int lo, int hi)
+{
+ int pivot = choose_pivot (array, lo, hi);
+ int left = lo;
+ int right = hi;
+
+ for (;;)
+ {
+ while (array[++left] < pivot);
+ while (array[--right] > pivot);
+ if (left >= right)
+ break;
+ swap (array, left, right);
+ }
+ return left;
+}
+
+static void
+sort1 (int *array, int count)
+{
+ omp_lock_t lock;
+ struct int_pair_stack global_stack;
+ int busy = 1;
+ int num_threads;
+
+ omp_init_lock (&lock);
+ init_int_pair_stack (&global_stack);
+ #pragma omp parallel firstprivate (array, count)
+ {
+ int lo = 0, hi = 0, mid, next_lo, next_hi;
+ bool idle = true;
+ struct int_pair_stack local_stack;
+
+ init_int_pair_stack (&local_stack);
+ if (omp_get_thread_num () == 0)
+ {
+ num_threads = omp_get_num_threads ();
+ hi = count - 1;
+ idle = false;
+ }
+
+ for (;;)
+ {
+ if (hi - lo < THRESHOLD)
+ {
+ insertsort (array, lo, hi);
+ lo = hi;
+ }
+ if (lo >= hi)
+ {
+ if (size_int_pair_stack (&local_stack) == 0)
+ {
+ again:
+ omp_set_lock (&lock);
+ if (size_int_pair_stack (&global_stack) == 0)
+ {
+ if (!idle)
+ busy--;
+ if (busy == 0)
+ {
+ omp_unset_lock (&lock);
+ break;
+ }
+ omp_unset_lock (&lock);
+ idle = true;
+ while (size_int_pair_stack (&global_stack) == 0
+ && busy)
+ busy_wait ();
+ goto again;
+ }
+ if (idle)
+ busy++;
+ pop_int_pair_stack (&global_stack, &lo, &hi);
+ omp_unset_lock (&lock);
+ idle = false;
+ }
+ else
+ pop_int_pair_stack (&local_stack, &lo, &hi);
+ }
+
+ mid = partition (array, lo, hi);
+ if (mid - lo < hi - mid)
+ {
+ next_lo = mid;
+ next_hi = hi;
+ hi = mid - 1;
+ }
+ else
+ {
+ next_lo = lo;
+ next_hi = mid - 1;
+ lo = mid;
+ }
+
+ if (next_hi - next_lo < THRESHOLD)
+ insertsort (array, next_lo, next_hi);
+ else
+ {
+ if (size_int_pair_stack (&global_stack) < num_threads - 1)
+ {
+ int size;
+
+ omp_set_lock (&lock);
+ size = size_int_pair_stack (&global_stack);
+ if (size < num_threads - 1 && size < STACK_SIZE)
+ push_int_pair_stack (&global_stack, next_lo, next_hi);
+ else
+ push_int_pair_stack (&local_stack, next_lo, next_hi);
+ omp_unset_lock (&lock);
+ }
+ else
+ push_int_pair_stack (&local_stack, next_lo, next_hi);
+ }
+ }
+ }
+ omp_destroy_lock (&lock);
+}
+
+static void
+sort2_1 (int *array, int lo, int hi, int num_threads, int *busy)
+{
+ int mid;
+
+ if (hi - lo < THRESHOLD)
+ {
+ insertsort (array, lo, hi);
+ return;
+ }
+
+ mid = partition (array, lo, hi);
+
+ if (*busy >= num_threads)
+ {
+ sort2_1 (array, lo, mid - 1, num_threads, busy);
+ sort2_1 (array, mid, hi, num_threads, busy);
+ return;
+ }
+
+ #pragma omp atomic
+ *busy += 1;
+
+ #pragma omp parallel num_threads (2) \
+ firstprivate (array, lo, hi, mid, num_threads, busy)
+ {
+ if (omp_get_thread_num () == 0)
+ sort2_1 (array, lo, mid - 1, num_threads, busy);
+ else
+ {
+ sort2_1 (array, mid, hi, num_threads, busy);
+ #pragma omp atomic
+ *busy -= 1;
+ }
+ }
+}
+
+static void
+sort2 (int *array, int count)
+{
+ int num_threads;
+ int busy = 1;
+
+ #pragma omp parallel
+ #pragma omp single nowait
+ num_threads = omp_get_num_threads ();
+
+ sort2_1 (array, 0, count - 1, num_threads, &busy);
+}
+
+#if _OPENMP >= 200805
+static void
+sort3_1 (int *array, int lo, int hi)
+{
+ int mid;
+
+ if (hi - lo < THRESHOLD)
+ {
+ insertsort (array, lo, hi);
+ return;
+ }
+
+ mid = partition (array, lo, hi);
+ #pragma omp task
+ sort3_1 (array, lo, mid - 1);
+ sort3_1 (array, mid, hi);
+}
+
+static void
+sort3 (int *array, int count)
+{
+ #pragma omp parallel
+ #pragma omp single
+ sort3_1 (array, 0, count - 1);
+}
+#endif
+
+int
+main (int argc, char **argv)
+{
+ int i, count = 1000000;
+ double stime;
+ int *unsorted, *sorted, num_threads;
+ if (argc >= 2)
+ count = strtoul (argv[1], NULL, 0);
+
+ unsorted = malloc (count * sizeof (int));
+ sorted = malloc (count * sizeof (int));
+ if (unsorted == NULL || sorted == NULL)
+ {
+ puts ("allocation failure");
+ exit (1);
+ }
+
+ srand (0xdeadbeef);
+ for (i = 0; i < count; i++)
+ unsorted[i] = rand ();
+
+ omp_set_nested (1);
+ omp_set_dynamic (0);
+ #pragma omp parallel
+ #pragma omp single nowait
+ num_threads = omp_get_num_threads ();
+ printf ("Threads: %d\n", num_threads);
+
+ memcpy (sorted, unsorted, count * sizeof (int));
+ stime = omp_get_wtime ();
+ sort1 (sorted, count);
+ verify ("sort1", stime, sorted, count);
+
+ memcpy (sorted, unsorted, count * sizeof (int));
+ stime = omp_get_wtime ();
+ sort2 (sorted, count);
+ verify ("sort2", stime, sorted, count);
+
+#if _OPENMP >= 200805
+ memcpy (sorted, unsorted, count * sizeof (int));
+ stime = omp_get_wtime ();
+ sort3 (sorted, count);
+ verify ("sort3", stime, sorted, count);
+#endif
+
+ return 0;
+}
diff --git a/libgomp/testsuite/libgomp.c/task-1.c b/libgomp/testsuite/libgomp.c/task-1.c
new file mode 100644
index 00000000000..66f58a29b87
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c/task-1.c
@@ -0,0 +1,84 @@
+extern void abort (void);
+
+int a = 18;
+
+void
+f1 (int i, int j, int k)
+{
+ int l = 6, m = 7, n = 8;
+#pragma omp task private(j, m) shared(k, n)
+ {
+ j = 6;
+ m = 5;
+ if (++a != 19 || ++i != 9 || j != 6 || ++l != 7 || m != 5 || ++n != 9)
+ #pragma omp atomic
+ k++;
+ }
+#pragma omp taskwait
+ if (a != 19 || i != 8 || j != 26 || k != 0 || l != 6 || m != 7 || n != 9)
+ abort ();
+}
+
+int v1 = 1, v2 = 2, v5 = 5;
+int err;
+
+void
+f2 (void)
+{
+ int v3 = 3;
+#pragma omp sections private (v1) firstprivate (v2)
+ {
+ #pragma omp section
+ {
+ int v4 = 4;
+ v1 = 7;
+ #pragma omp task
+ {
+ if (++v1 != 8 || ++v2 != 3 || ++v3 != 4 || ++v4 != 5 || ++v5 != 6)
+ err = 1;
+ }
+ #pragma omp taskwait
+ if (v1 != 7 || v2 != 2 || v3 != 3 || v4 != 4 || v5 != 6)
+ abort ();
+ if (err)
+ abort ();
+ }
+ }
+}
+
+void
+f3 (int i, int j, int k)
+{
+ int l = 6, m = 7, n = 8;
+#pragma omp task private(j, m) shared(k, n) untied
+ {
+ j = 6;
+ m = 5;
+ if (++a != 19 || ++i != 9 || j != 6 || ++l != 7 || m != 5 || ++n != 9)
+ #pragma omp atomic
+ k++;
+ }
+#pragma omp taskwait
+ if (a != 19 || i != 8 || j != 26 || k != 0 || l != 6 || m != 7 || n != 9)
+ abort ();
+}
+
+int
+main (void)
+{
+ f1 (8, 26, 0);
+ f2 ();
+ a = 18;
+ f3 (8, 26, 0);
+ a = 18;
+#pragma omp parallel num_threads(4)
+ {
+ #pragma omp master
+ {
+ f1 (8, 26, 0);
+ a = 18;
+ f3 (8, 26, 0);
+ }
+ }
+ return 0;
+}
diff --git a/libgomp/testsuite/libgomp.c/task-2.c b/libgomp/testsuite/libgomp.c/task-2.c
new file mode 100644
index 00000000000..ed6a09c3557
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c/task-2.c
@@ -0,0 +1,53 @@
+extern void abort (void);
+
+int
+f1 (void)
+{
+ int a = 6, e = 0;
+ int nested (int x)
+ {
+ return x + a;
+ }
+ #pragma omp task
+ {
+ int n = nested (5);
+ if (n != 11)
+ #pragma omp atomic
+ e += 1;
+ }
+ #pragma omp taskwait
+ return e;
+}
+
+int
+f2 (void)
+{
+ int a = 6, e = 0;
+ int nested (int x)
+ {
+ return x + a;
+ }
+ a = nested (4);
+ #pragma omp task
+ {
+ if (a != 10)
+ #pragma omp atomic
+ e += 1;
+ }
+ #pragma omp taskwait
+ return e;
+}
+
+int
+main (void)
+{
+ int e = 0;
+ #pragma omp parallel num_threads(4) reduction(+:e)
+ {
+ e += f1 ();
+ e += f2 ();
+ }
+ if (e)
+ abort ();
+ return 0;
+}
diff --git a/libgomp/testsuite/libgomp.c/task-3.c b/libgomp/testsuite/libgomp.c/task-3.c
new file mode 100644
index 00000000000..5657346bd15
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c/task-3.c
@@ -0,0 +1,70 @@
+/* { dg-do run } */
+
+#include <omp.h>
+extern void abort ();
+
+int l = 5;
+
+int
+foo (int i)
+{
+ int j = 7;
+ const int k = 8;
+ #pragma omp task firstprivate (i) shared (j, l)
+ {
+ #pragma omp critical
+ {
+ j += i;
+ l += k;
+ }
+ }
+ i++;
+ #pragma omp task firstprivate (i) shared (j, l)
+ {
+ #pragma omp critical
+ {
+ j += i;
+ l += k;
+ }
+ }
+ i++;
+ #pragma omp task firstprivate (i) shared (j, l)
+ {
+ #pragma omp critical
+ {
+ j += i;
+ l += k;
+ }
+ }
+ i++;
+ #pragma omp task firstprivate (i) shared (j, l)
+ {
+ #pragma omp critical
+ {
+ j += i;
+ l += k;
+ }
+ }
+ i++;
+ #pragma omp taskwait
+ return (i != 8 * omp_get_thread_num () + 4
+ || j != 4 * i - 3
+ || k != 8);
+}
+
+int
+main (void)
+{
+ int r = 0;
+ #pragma omp parallel num_threads (4) reduction(+:r)
+ if (omp_get_num_threads () != 4)
+ {
+ #pragma omp master
+ l = 133;
+ }
+ else if (foo (8 * omp_get_thread_num ()))
+ r++;
+ if (r || l != 133)
+ abort ();
+ return 0;
+}
diff --git a/libgomp/testsuite/libgomp.c/task-4.c b/libgomp/testsuite/libgomp.c/task-4.c
new file mode 100644
index 00000000000..18435930019
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c/task-4.c
@@ -0,0 +1,40 @@
+/* { dg-do run } */
+
+#include <omp.h>
+#include <stdlib.h>
+#include <string.h>
+
+int e;
+
+void __attribute__((noinline))
+baz (int i, int *p, int j, int *q)
+{
+ if (p[0] != 1 || p[i] != 3 || q[0] != 2 || q[j] != 4)
+ #pragma omp atomic
+ e++;
+}
+
+void __attribute__((noinline))
+foo (int i, int j)
+{
+ int p[i + 1];
+ int q[j + 1];
+ memset (p, 0, sizeof (p));
+ memset (q, 0, sizeof (q));
+ p[0] = 1;
+ p[i] = 3;
+ q[0] = 2;
+ q[j] = 4;
+ #pragma omp task firstprivate (p, q)
+ baz (i, p, j, q);
+}
+
+int
+main (void)
+{
+ #pragma omp parallel num_threads (4)
+ foo (5 + omp_get_thread_num (), 7 + omp_get_thread_num ());
+ if (e)
+ abort ();
+ return 0;
+}
diff --git a/libgomp/testsuite/libgomp.fortran/allocatable1.f90 b/libgomp/testsuite/libgomp.fortran/allocatable1.f90
new file mode 100644
index 00000000000..1efe2abe959
--- /dev/null
+++ b/libgomp/testsuite/libgomp.fortran/allocatable1.f90
@@ -0,0 +1,81 @@
+! { dg-do run }
+!$ use omp_lib
+
+ integer, allocatable :: a(:, :)
+ integer :: b(6, 3)
+ integer :: i, j
+ logical :: k, l
+ b(:, :) = 16
+ l = .false.
+ if (allocated (a)) call abort
+!$omp parallel private (a, b) reduction (.or.:l)
+ l = l.or.allocated (a)
+ allocate (a(3, 6))
+ l = l.or..not.allocated (a)
+ l = l.or.size(a).ne.18.or.size(a,1).ne.3.or.size(a,2).ne.6
+ a(3, 2) = 1
+ b(3, 2) = 1
+ deallocate (a)
+ l = l.or.allocated (a)
+!$omp end parallel
+ if (allocated (a).or.l) call abort
+ allocate (a(6, 3))
+ a(:, :) = 3
+ if (.not.allocated (a)) call abort
+ l = l.or.size(a).ne.18.or.size(a,1).ne.6.or.size(a,2).ne.3
+ if (l) call abort
+!$omp parallel private (a, b) reduction (.or.:l)
+ l = l.or..not.allocated (a)
+ a(3, 2) = 1
+ b(3, 2) = 1
+!$omp end parallel
+ if (l.or..not.allocated (a)) call abort
+!$omp parallel firstprivate (a, b) reduction (.or.:l)
+ l = l.or..not.allocated (a)
+ l = l.or.size(a).ne.18.or.size(a,1).ne.6.or.size(a,2).ne.3
+ do i = 1, 6
+ l = l.or.(a(i, 1).ne.3).or.(a(i, 2).ne.3)
+ l = l.or.(a(i, 3).ne.3).or.(b(i, 1).ne.16)
+ l = l.or.(b(i, 2).ne.16).or.(b(i, 3).ne.16)
+ end do
+ a(:, :) = omp_get_thread_num ()
+ b(:, :) = omp_get_thread_num ()
+!$omp end parallel
+ if (any (a.ne.3).or.any (b.ne.16).or.l) call abort
+ k = .true.
+!$omp parallel do firstprivate (a, b, k) lastprivate (a, b) &
+!$omp & reduction (.or.:l)
+ do i = 1, 36
+ l = l.or..not.allocated (a)
+ l = l.or.size(a).ne.18.or.size(a,1).ne.6.or.size(a,2).ne.3
+ if (k) then
+ do j = 1, 6
+ l = l.or.(a(j, 1).ne.3).or.(a(j, 2).ne.3)
+ l = l.or.(a(j, 3).ne.3).or.(b(j, 1).ne.16)
+ l = l.or.(b(j, 2).ne.16).or.(b(j, 3).ne.16)
+ end do
+ k = .false.
+ end if
+ a(:, :) = i + 2
+ b(:, :) = i
+ end do
+ if (any (a.ne.38).or.any (b.ne.36).or.l) call abort
+ deallocate (a)
+ if (allocated (a)) call abort
+ allocate (a (0:1, 0:3))
+ a(:, :) = 0
+!$omp parallel do reduction (+:a) reduction (.or.:l) &
+!$omp & num_threads(3) schedule(static)
+ do i = 0, 7
+ l = l.or..not.allocated (a)
+ l = l.or.size(a).ne.8.or.size(a,1).ne.2.or.size(a,2).ne.4
+ a(modulo (i, 2), i / 2) = a(modulo (i, 2), i / 2) + i
+ a(i / 4, modulo (i, 4)) = a(i / 4, modulo (i, 4)) + i
+ end do
+ if (l) call abort
+ do i = 0, 1
+ do j = 0, 3
+ if (a(i, j) .ne. (5*i + 3*j)) call abort
+ end do
+ end do
+end
diff --git a/libgomp/testsuite/libgomp.fortran/allocatable2.f90 b/libgomp/testsuite/libgomp.fortran/allocatable2.f90
new file mode 100644
index 00000000000..a37616b04b1
--- /dev/null
+++ b/libgomp/testsuite/libgomp.fortran/allocatable2.f90
@@ -0,0 +1,47 @@
+! { dg-do run }
+! { dg-require-effective-target tls_runtime }
+!$ use omp_lib
+
+ integer, save, allocatable :: a(:, :)
+ integer, allocatable :: b(:, :)
+ integer :: n
+ logical :: l
+!$omp threadprivate (a)
+ if (allocated (a)) call abort
+ call omp_set_dynamic (.false.)
+ l = .false.
+!$omp parallel num_threads (4) reduction(.or.:l)
+ allocate (a(-1:1, 7:10))
+ a(:, :) = omp_get_thread_num () + 6
+ l = l.or..not.allocated (a)
+ l = l.or.size(a).ne.12.or.size(a,1).ne.3.or.size(a,2).ne.4
+!$omp end parallel
+ if (l.or.any(a.ne.6)) call abort ()
+!$omp parallel num_threads (4) copyin (a) reduction(.or.:l) private (b)
+ l = l.or.allocated (b)
+ l = l.or..not.allocated (a)
+ l = l.or.size(a).ne.12.or.size(a,1).ne.3.or.size(a,2).ne.4
+ l = l.or.any(a.ne.6)
+ allocate (b(1, 3))
+ a(:, :) = omp_get_thread_num () + 36
+ b(:, :) = omp_get_thread_num () + 66
+ !$omp single
+ n = omp_get_thread_num ()
+ !$omp end single copyprivate (a, b)
+ l = l.or..not.allocated (a)
+ l = l.or.size(a).ne.12.or.size(a,1).ne.3.or.size(a,2).ne.4
+ l = l.or.any(a.ne.(n + 36))
+ l = l.or..not.allocated (b)
+ l = l.or.size(b).ne.3.or.size(b,1).ne.1.or.size(b,2).ne.3
+ l = l.or.any(b.ne.(n + 66))
+ deallocate (b)
+ l = l.or.allocated (b)
+!$omp end parallel
+ if (n.lt.0 .or. n.ge.4) call abort
+ if (l.or.any(a.ne.(n + 36))) call abort
+!$omp parallel num_threads (4) reduction(.or.:l)
+ deallocate (a)
+ l = l.or.allocated (a)
+!$omp end parallel
+ if (l.or.allocated (a)) call abort
+end
diff --git a/libgomp/testsuite/libgomp.fortran/allocatable3.f90 b/libgomp/testsuite/libgomp.fortran/allocatable3.f90
new file mode 100644
index 00000000000..fe3714a2b1f
--- /dev/null
+++ b/libgomp/testsuite/libgomp.fortran/allocatable3.f90
@@ -0,0 +1,21 @@
+! { dg-do run }
+
+ integer, allocatable :: a(:)
+ integer :: i
+ logical :: l
+ l = .false.
+ if (allocated (a)) call abort
+!$omp parallel private (a) reduction (.or.:l)
+ allocate (a (-7:-5))
+ l = l.or..not.allocated (a)
+ l = l.or.size(a).ne.3.or.size(a,1).ne.3
+ a(:) = 0
+ !$omp do private (a)
+ do i = 1, 7
+ a(:) = i
+ l = l.or.any (a.ne.i)
+ end do
+ l = l.or.any (a.ne.0)
+ deallocate (a)
+!$omp end parallel
+end
diff --git a/libgomp/testsuite/libgomp.fortran/allocatable4.f90 b/libgomp/testsuite/libgomp.fortran/allocatable4.f90
new file mode 100644
index 00000000000..996578c94fa
--- /dev/null
+++ b/libgomp/testsuite/libgomp.fortran/allocatable4.f90
@@ -0,0 +1,47 @@
+! { dg-do run }
+
+ integer, allocatable :: a(:, :)
+ integer :: b(6, 3)
+ integer :: i, j
+ logical :: k, l
+ b(:, :) = 16
+ l = .false.
+ if (allocated (a)) call abort
+!$omp task private (a, b) shared (l)
+ l = l.or.allocated (a)
+ allocate (a(3, 6))
+ l = l.or..not.allocated (a)
+ l = l.or.size(a).ne.18.or.size(a,1).ne.3.or.size(a,2).ne.6
+ a(3, 2) = 1
+ b(3, 2) = 1
+ deallocate (a)
+ l = l.or.allocated (a)
+!$omp end task
+!$omp taskwait
+ if (allocated (a).or.l) call abort
+ allocate (a(6, 3))
+ a(:, :) = 3
+ if (.not.allocated (a)) call abort
+ l = l.or.size(a).ne.18.or.size(a,1).ne.6.or.size(a,2).ne.3
+ if (l) call abort
+!$omp task private (a, b) shared (l)
+ l = l.or..not.allocated (a)
+ a(3, 2) = 1
+ b(3, 2) = 1
+!$omp end task
+!$omp taskwait
+ if (l.or..not.allocated (a)) call abort
+!$omp task firstprivate (a, b) shared (l)
+ l = l.or..not.allocated (a)
+ l = l.or.size(a).ne.18.or.size(a,1).ne.6.or.size(a,2).ne.3
+ do i = 1, 6
+ l = l.or.(a(i, 1).ne.3).or.(a(i, 2).ne.3)
+ l = l.or.(a(i, 3).ne.3).or.(b(i, 1).ne.16)
+ l = l.or.(b(i, 2).ne.16).or.(b(i, 3).ne.16)
+ end do
+ a(:, :) = 7
+ b(:, :) = 8
+!$omp end task
+!$omp taskwait
+ if (any (a.ne.3).or.any (b.ne.16).or.l) call abort
+end
diff --git a/libgomp/testsuite/libgomp.fortran/collapse1.f90 b/libgomp/testsuite/libgomp.fortran/collapse1.f90
new file mode 100644
index 00000000000..1ecfa0c9365
--- /dev/null
+++ b/libgomp/testsuite/libgomp.fortran/collapse1.f90
@@ -0,0 +1,26 @@
+! { dg-do run }
+
+program collapse1
+ integer :: i, j, k, a(1:3, 4:6, 5:7)
+ logical :: l
+ l = .false.
+ a(:, :, :) = 0
+ !$omp parallel do collapse(4 - 1) schedule(static, 4)
+ do i = 1, 3
+ do j = 4, 6
+ do k = 5, 7
+ a(i, j, k) = i + j + k
+ end do
+ end do
+ end do
+ !$omp parallel do collapse(2) reduction(.or.:l)
+ do i = 1, 3
+ do j = 4, 6
+ do k = 5, 7
+ if (a(i, j, k) .ne. (i + j + k)) l = .true.
+ end do
+ end do
+ end do
+ !$omp end parallel do
+ if (l) call abort
+end program collapse1
diff --git a/libgomp/testsuite/libgomp.fortran/collapse2.f90 b/libgomp/testsuite/libgomp.fortran/collapse2.f90
new file mode 100644
index 00000000000..77e0dee8260
--- /dev/null
+++ b/libgomp/testsuite/libgomp.fortran/collapse2.f90
@@ -0,0 +1,53 @@
+! { dg-do run }
+
+program collapse2
+ call test1
+ call test2
+contains
+ subroutine test1
+ integer :: i, j, k, a(1:3, 4:6, 5:7)
+ logical :: l
+ l = .false.
+ a(:, :, :) = 0
+ !$omp parallel do collapse(4 - 1) schedule(static, 4)
+ do 164 i = 1, 3
+ do 164 j = 4, 6
+ do 164 k = 5, 7
+ a(i, j, k) = i + j + k
+164 end do
+ !$omp parallel do collapse(2) reduction(.or.:l)
+firstdo: do i = 1, 3
+ do j = 4, 6
+ do k = 5, 7
+ if (a(i, j, k) .ne. (i + j + k)) l = .true.
+ end do
+ end do
+ end do firstdo
+ !$omp end parallel do
+ if (l) call abort
+ end subroutine test1
+
+ subroutine test2
+ integer :: a(3,3,3), k, kk, kkk, l, ll, lll
+ !$omp do collapse(3)
+ do 115 k=1,3
+ dokk: do kk=1,3
+ do kkk=1,3
+ a(k,kk,kkk) = 1
+ enddo
+ enddo dokk
+115 continue
+ if (any(a(1:3,1:3,1:3).ne.1)) call abort
+
+ !$omp do collapse(3)
+ dol: do 120 l=1,3
+ doll: do ll=1,3
+ do lll=1,3
+ a(l,ll,lll) = 2
+ enddo
+ enddo doll
+120 end do dol
+ if (any(a(1:3,1:3,1:3).ne.2)) call abort
+ end subroutine test2
+
+end program collapse2
diff --git a/libgomp/testsuite/libgomp.fortran/collapse3.f90 b/libgomp/testsuite/libgomp.fortran/collapse3.f90
new file mode 100644
index 00000000000..eac9eac651b
--- /dev/null
+++ b/libgomp/testsuite/libgomp.fortran/collapse3.f90
@@ -0,0 +1,204 @@
+! { dg-do run }
+
+program collapse3
+ call test1
+ call test2 (2, 6, -2, 4, 13, 18)
+ call test3 (2, 6, -2, 4, 13, 18, 1, 1, 1)
+ call test4
+ call test5 (2, 6, -2, 4, 13, 18)
+ call test6 (2, 6, -2, 4, 13, 18, 1, 1, 1)
+contains
+ subroutine test1
+ integer :: i, j, k, a(1:7, -3:5, 12:19), m
+ logical :: l
+ l = .false.
+ a(:, :, :) = 0
+ !$omp parallel do collapse (3) lastprivate (i, j, k, m) reduction (.or.:l)
+ do i = 2, 6
+ do j = -2, 4
+ do k = 13, 18
+ l = l.or.i.lt.2.or.i.gt.6.or.j.lt.-2.or.j.gt.4
+ l = l.or.k.lt.13.or.k.gt.18
+ if (.not.l) a(i, j, k) = a(i, j, k) + 1
+ m = i * 100 + j * 10 + k
+ end do
+ end do
+ end do
+ if (i.ne.7.or.j.ne.5.or.k.ne.19) call abort
+ if (m.ne.(600+40+18)) call abort
+ do i = 1, 7
+ do j = -3, 5
+ do k = 12, 19
+ if (i.eq.1.or.i.eq.7.or.j.eq.-3.or.j.eq.5.or.k.eq.12.or.k.eq.19) then
+ if (a(i, j, k).ne.0) print *, i, j, k
+ else
+ if (a(i, j, k).ne.1) print *, 'kk', i, j, k, a(i, j, k)
+ end if
+ end do
+ end do
+ end do
+ end subroutine test1
+
+ subroutine test2(v1, v2, v3, v4, v5, v6)
+ integer :: i, j, k, a(1:7, -3:5, 12:19), m
+ integer :: v1, v2, v3, v4, v5, v6
+ logical :: l
+ l = .false.
+ a(:, :, :) = 0
+ !$omp parallel do collapse (3) lastprivate (i, j, k, m) reduction (.or.:l)
+ do i = v1, v2
+ do j = v3, v4
+ do k = v5, v6
+ l = l.or.i.lt.2.or.i.gt.6.or.j.lt.-2.or.j.gt.4
+ l = l.or.k.lt.13.or.k.gt.18
+ if (.not.l) a(i, j, k) = a(i, j, k) + 1
+ m = i * 100 + j * 10 + k
+ end do
+ end do
+ end do
+ if (i.ne.7.or.j.ne.5.or.k.ne.19) call abort
+ if (m.ne.(600+40+18)) call abort
+ do i = 1, 7
+ do j = -3, 5
+ do k = 12, 19
+ if (i.eq.1.or.i.eq.7.or.j.eq.-3.or.j.eq.5.or.k.eq.12.or.k.eq.19) then
+ if (a(i, j, k).ne.0) print *, i, j, k
+ else
+ if (a(i, j, k).ne.1) print *, 'kk', i, j, k, a(i, j, k)
+ end if
+ end do
+ end do
+ end do
+ end subroutine test2
+
+ subroutine test3(v1, v2, v3, v4, v5, v6, v7, v8, v9)
+ integer :: i, j, k, a(1:7, -3:5, 12:19), m
+ integer :: v1, v2, v3, v4, v5, v6, v7, v8, v9
+ logical :: l
+ l = .false.
+ a(:, :, :) = 0
+ !$omp parallel do collapse (3) lastprivate (i, j, k, m) reduction (.or.:l)
+ do i = v1, v2, v7
+ do j = v3, v4, v8
+ do k = v5, v6, v9
+ l = l.or.i.lt.2.or.i.gt.6.or.j.lt.-2.or.j.gt.4
+ l = l.or.k.lt.13.or.k.gt.18
+ if (.not.l) a(i, j, k) = a(i, j, k) + 1
+ m = i * 100 + j * 10 + k
+ end do
+ end do
+ end do
+ if (i.ne.7.or.j.ne.5.or.k.ne.19) call abort
+ if (m.ne.(600+40+18)) call abort
+ do i = 1, 7
+ do j = -3, 5
+ do k = 12, 19
+ if (i.eq.1.or.i.eq.7.or.j.eq.-3.or.j.eq.5.or.k.eq.12.or.k.eq.19) then
+ if (a(i, j, k).ne.0) print *, i, j, k
+ else
+ if (a(i, j, k).ne.1) print *, 'kk', i, j, k, a(i, j, k)
+ end if
+ end do
+ end do
+ end do
+ end subroutine test3
+
+ subroutine test4
+ integer :: i, j, k, a(1:7, -3:5, 12:19), m
+ logical :: l
+ l = .false.
+ a(:, :, :) = 0
+ !$omp parallel do collapse (3) lastprivate (i, j, k, m) reduction (.or.:l) &
+ !$omp& schedule (dynamic, 5)
+ do i = 2, 6
+ do j = -2, 4
+ do k = 13, 18
+ l = l.or.i.lt.2.or.i.gt.6.or.j.lt.-2.or.j.gt.4
+ l = l.or.k.lt.13.or.k.gt.18
+ if (.not.l) a(i, j, k) = a(i, j, k) + 1
+ m = i * 100 + j * 10 + k
+ end do
+ end do
+ end do
+ if (i.ne.7.or.j.ne.5.or.k.ne.19) call abort
+ if (m.ne.(600+40+18)) call abort
+ do i = 1, 7
+ do j = -3, 5
+ do k = 12, 19
+ if (i.eq.1.or.i.eq.7.or.j.eq.-3.or.j.eq.5.or.k.eq.12.or.k.eq.19) then
+ if (a(i, j, k).ne.0) print *, i, j, k
+ else
+ if (a(i, j, k).ne.1) print *, 'kk', i, j, k, a(i, j, k)
+ end if
+ end do
+ end do
+ end do
+ end subroutine test4
+
+ subroutine test5(v1, v2, v3, v4, v5, v6)
+ integer :: i, j, k, a(1:7, -3:5, 12:19), m
+ integer :: v1, v2, v3, v4, v5, v6
+ logical :: l
+ l = .false.
+ a(:, :, :) = 0
+ !$omp parallel do collapse (3) lastprivate (i, j, k, m) reduction (.or.:l) &
+ !$omp & schedule (guided)
+ do i = v1, v2
+ do j = v3, v4
+ do k = v5, v6
+ l = l.or.i.lt.2.or.i.gt.6.or.j.lt.-2.or.j.gt.4
+ l = l.or.k.lt.13.or.k.gt.18
+ if (.not.l) a(i, j, k) = a(i, j, k) + 1
+ m = i * 100 + j * 10 + k
+ end do
+ end do
+ end do
+ if (i.ne.7.or.j.ne.5.or.k.ne.19) call abort
+ if (m.ne.(600+40+18)) call abort
+ do i = 1, 7
+ do j = -3, 5
+ do k = 12, 19
+ if (i.eq.1.or.i.eq.7.or.j.eq.-3.or.j.eq.5.or.k.eq.12.or.k.eq.19) then
+ if (a(i, j, k).ne.0) print *, i, j, k
+ else
+ if (a(i, j, k).ne.1) print *, 'kk', i, j, k, a(i, j, k)
+ end if
+ end do
+ end do
+ end do
+ end subroutine test5
+
+ subroutine test6(v1, v2, v3, v4, v5, v6, v7, v8, v9)
+ integer :: i, j, k, a(1:7, -3:5, 12:19), m
+ integer :: v1, v2, v3, v4, v5, v6, v7, v8, v9
+ logical :: l
+ l = .false.
+ a(:, :, :) = 0
+ !$omp parallel do collapse (3) lastprivate (i, j, k, m) reduction (.or.:l) &
+ !$omp & schedule (dynamic)
+ do i = v1, v2, v7
+ do j = v3, v4, v8
+ do k = v5, v6, v9
+ l = l.or.i.lt.2.or.i.gt.6.or.j.lt.-2.or.j.gt.4
+ l = l.or.k.lt.13.or.k.gt.18
+ if (.not.l) a(i, j, k) = a(i, j, k) + 1
+ m = i * 100 + j * 10 + k
+ end do
+ end do
+ end do
+ if (i.ne.7.or.j.ne.5.or.k.ne.19) call abort
+ if (m.ne.(600+40+18)) call abort
+ do i = 1, 7
+ do j = -3, 5
+ do k = 12, 19
+ if (i.eq.1.or.i.eq.7.or.j.eq.-3.or.j.eq.5.or.k.eq.12.or.k.eq.19) then
+ if (a(i, j, k).ne.0) print *, i, j, k
+ else
+ if (a(i, j, k).ne.1) print *, 'kk', i, j, k, a(i, j, k)
+ end if
+ end do
+ end do
+ end do
+ end subroutine test6
+
+end program collapse3
diff --git a/libgomp/testsuite/libgomp.fortran/collapse4.f90 b/libgomp/testsuite/libgomp.fortran/collapse4.f90
new file mode 100644
index 00000000000..f19b0f6c695
--- /dev/null
+++ b/libgomp/testsuite/libgomp.fortran/collapse4.f90
@@ -0,0 +1,12 @@
+! { dg-do run }
+
+ integer :: i, j, k
+ !$omp parallel do lastprivate (i, j, k) collapse (3)
+ do i = 0, 17
+ do j = 0, 6
+ do k = 0, 5
+ end do
+ end do
+ end do
+ if (i .ne. 18 .or. j .ne. 7 .or. k .ne. 6) call abort
+end
diff --git a/libgomp/testsuite/libgomp.fortran/lastprivate1.f90 b/libgomp/testsuite/libgomp.fortran/lastprivate1.f90
new file mode 100644
index 00000000000..91bb96ca75a
--- /dev/null
+++ b/libgomp/testsuite/libgomp.fortran/lastprivate1.f90
@@ -0,0 +1,126 @@
+program lastprivate
+ integer :: i
+ common /c/ i
+ !$omp parallel num_threads (4)
+ call test1
+ !$omp end parallel
+ if (i .ne. 21) call abort
+ !$omp parallel num_threads (4)
+ call test2
+ !$omp end parallel
+ if (i .ne. 64) call abort
+ !$omp parallel num_threads (4)
+ call test3
+ !$omp end parallel
+ if (i .ne. 14) call abort
+ call test4
+ call test5
+ call test6
+ call test7
+ call test8
+ call test9
+ call test10
+ call test11
+ call test12
+contains
+ subroutine test1
+ integer :: i
+ common /c/ i
+ !$omp do lastprivate (i)
+ do i = 1, 20
+ end do
+ end subroutine test1
+ subroutine test2
+ integer :: i
+ common /c/ i
+ !$omp do lastprivate (i)
+ do i = 7, 61, 3
+ end do
+ end subroutine test2
+ function ret3 ()
+ integer :: ret3
+ ret3 = 3
+ end function ret3
+ subroutine test3
+ integer :: i
+ common /c/ i
+ !$omp do lastprivate (i)
+ do i = -10, 11, ret3 ()
+ end do
+ end subroutine test3
+ subroutine test4
+ integer :: j
+ !$omp parallel do lastprivate (j) num_threads (4) default (none)
+ do j = 1, 20
+ end do
+ if (j .ne. 21) call abort
+ end subroutine test4
+ subroutine test5
+ integer :: j
+ !$omp parallel do lastprivate (j) num_threads (4) default (none)
+ do j = 7, 61, 3
+ end do
+ if (j .ne. 64) call abort
+ end subroutine test5
+ subroutine test6
+ integer :: j
+ !$omp parallel do lastprivate (j) num_threads (4) default (none)
+ do j = -10, 11, ret3 ()
+ end do
+ if (j .ne. 14) call abort
+ end subroutine test6
+ subroutine test7
+ integer :: i
+ common /c/ i
+ !$omp parallel do lastprivate (i) num_threads (4) default (none)
+ do i = 1, 20
+ end do
+ if (i .ne. 21) call abort
+ end subroutine test7
+ subroutine test8
+ integer :: i
+ common /c/ i
+ !$omp parallel do lastprivate (i) num_threads (4) default (none)
+ do i = 7, 61, 3
+ end do
+ if (i .ne. 64) call abort
+ end subroutine test8
+ subroutine test9
+ integer :: i
+ common /c/ i
+ !$omp parallel do lastprivate (i) num_threads (4) default (none)
+ do i = -10, 11, ret3 ()
+ end do
+ if (i .ne. 14) call abort
+ end subroutine test9
+ subroutine test10
+ integer :: i
+ common /c/ i
+ !$omp parallel num_threads (4) default (none) shared (i)
+ !$omp do lastprivate (i)
+ do i = 1, 20
+ end do
+ !$omp end parallel
+ if (i .ne. 21) call abort
+ end subroutine test10
+ subroutine test11
+ integer :: i
+ common /c/ i
+ !$omp parallel num_threads (4) default (none) shared (i)
+ !$omp do lastprivate (i)
+ do i = 7, 61, 3
+ end do
+ !$omp end parallel
+ if (i .ne. 64) call abort
+ end subroutine test11
+ subroutine test12
+ integer :: i
+ common /c/ i
+ !$omp parallel num_threads (4) default (none) shared (i)
+ !$omp do lastprivate (i)
+ do i = -10, 11, ret3 ()
+ end do
+ !$omp end parallel
+ if (i .ne. 14) call abort
+ end subroutine test12
+end program lastprivate
diff --git a/libgomp/testsuite/libgomp.fortran/lastprivate2.f90 b/libgomp/testsuite/libgomp.fortran/lastprivate2.f90
new file mode 100644
index 00000000000..6d7e11eab00
--- /dev/null
+++ b/libgomp/testsuite/libgomp.fortran/lastprivate2.f90
@@ -0,0 +1,141 @@
+program lastprivate
+ integer :: i, k
+ common /c/ i, k
+ !$omp parallel num_threads (4)
+ call test1
+ !$omp end parallel
+ if (i .ne. 21 .or. k .ne. 20) call abort
+ !$omp parallel num_threads (4)
+ call test2
+ !$omp end parallel
+ if (i .ne. 64 .or. k .ne. 61) call abort
+ !$omp parallel num_threads (4)
+ call test3
+ !$omp end parallel
+ if (i .ne. 14 .or. k .ne. 11) call abort
+ call test4
+ call test5
+ call test6
+ call test7
+ call test8
+ call test9
+ call test10
+ call test11
+ call test12
+contains
+ subroutine test1
+ integer :: i, k
+ common /c/ i, k
+ !$omp do lastprivate (i, k)
+ do i = 1, 20
+ k = i
+ end do
+ end subroutine test1
+ subroutine test2
+ integer :: i, k
+ common /c/ i, k
+ !$omp do lastprivate (i, k)
+ do i = 7, 61, 3
+ k = i
+ end do
+ end subroutine test2
+ function ret3 ()
+ integer :: ret3
+ ret3 = 3
+ end function ret3
+ subroutine test3
+ integer :: i, k
+ common /c/ i, k
+ !$omp do lastprivate (i, k)
+ do i = -10, 11, ret3 ()
+ k = i
+ end do
+ end subroutine test3
+ subroutine test4
+ integer :: j, l
+ !$omp parallel do lastprivate (j, l) num_threads (4)
+ do j = 1, 20
+ l = j
+ end do
+ if (j .ne. 21 .or. l .ne. 20) call abort
+ end subroutine test4
+ subroutine test5
+ integer :: j, l
+ l = 77
+ !$omp parallel do lastprivate (j, l) num_threads (4) firstprivate (l)
+ do j = 7, 61, 3
+ l = j
+ end do
+ if (j .ne. 64 .or. l .ne. 61) call abort
+ end subroutine test5
+ subroutine test6
+ integer :: j, l
+ !$omp parallel do lastprivate (j, l) num_threads (4)
+ do j = -10, 11, ret3 ()
+ l = j
+ end do
+ if (j .ne. 14 .or. l .ne. 11) call abort
+ end subroutine test6
+ subroutine test7
+ integer :: i, k
+ common /c/ i, k
+ !$omp parallel do lastprivate (i, k) num_threads (4)
+ do i = 1, 20
+ k = i
+ end do
+ if (i .ne. 21 .or. k .ne. 20) call abort
+ end subroutine test7
+ subroutine test8
+ integer :: i, k
+ common /c/ i, k
+ !$omp parallel do lastprivate (i, k) num_threads (4)
+ do i = 7, 61, 3
+ k = i
+ end do
+ if (i .ne. 64 .or. k .ne. 61) call abort
+ end subroutine test8
+ subroutine test9
+ integer :: i, k
+ common /c/ i, k
+ k = 77
+ !$omp parallel do lastprivate (i, k) num_threads (4) firstprivate (k)
+ do i = -10, 11, ret3 ()
+ k = i
+ end do
+ if (i .ne. 14 .or. k .ne. 11) call abort
+ end subroutine test9
+ subroutine test10
+ integer :: i, k
+ common /c/ i, k
+ !$omp parallel num_threads (4)
+ !$omp do lastprivate (i, k)
+ do i = 1, 20
+ k = i
+ end do
+ !$omp end parallel
+ if (i .ne. 21 .or. k .ne. 20) call abort
+ end subroutine test10
+ subroutine test11
+ integer :: i, k
+ common /c/ i, k
+ !$omp parallel num_threads (4)
+ !$omp do lastprivate (i, k)
+ do i = 7, 61, 3
+ k = i
+ end do
+ !$omp end parallel
+ if (i .ne. 64 .or. k .ne. 61) call abort
+ end subroutine test11
+ subroutine test12
+ integer :: i, k
+ common /c/ i, k
+ k = 77
+ !$omp parallel num_threads (4)
+ !$omp do lastprivate (i, k) firstprivate (k)
+ do i = -10, 11, ret3 ()
+ k = i
+ end do
+ !$omp end parallel
+ if (i .ne. 14 .or. k .ne. 11) call abort
+ end subroutine test12
+end program lastprivate
diff --git a/libgomp/testsuite/libgomp.fortran/lib4.f90 b/libgomp/testsuite/libgomp.fortran/lib4.f90
new file mode 100644
index 00000000000..cbb984574ff
--- /dev/null
+++ b/libgomp/testsuite/libgomp.fortran/lib4.f90
@@ -0,0 +1,16 @@
+! { dg-do run }
+
+program lib4
+ use omp_lib
+ integer (omp_sched_kind) :: kind
+ integer :: modifier
+ call omp_set_schedule (omp_sched_static, 32)
+ call omp_get_schedule (kind, modifier)
+ if (kind.ne.omp_sched_static.or.modifier.ne.32) call abort
+ call omp_set_schedule (omp_sched_dynamic, 4)
+ call omp_get_schedule (kind, modifier)
+ if (kind.ne.omp_sched_dynamic.or.modifier.ne.4) call abort
+ if (omp_get_thread_limit ().lt.0) call abort
+ call omp_set_max_active_levels (6)
+ if (omp_get_max_active_levels ().ne.6) call abort
+end program lib4
diff --git a/libgomp/testsuite/libgomp.fortran/lock-1.f90 b/libgomp/testsuite/libgomp.fortran/lock-1.f90
new file mode 100644
index 00000000000..d7d3e3fd6cc
--- /dev/null
+++ b/libgomp/testsuite/libgomp.fortran/lock-1.f90
@@ -0,0 +1,24 @@
+! { dg-do run }
+
+ use omp_lib
+
+ integer (kind = omp_nest_lock_kind) :: lock
+ logical :: l
+
+ l = .false.
+ call omp_init_nest_lock (lock)
+ if (omp_test_nest_lock (lock) .ne. 1) call abort
+ if (omp_test_nest_lock (lock) .ne. 2) call abort
+!$omp parallel if (.false.) reduction (.or.:l)
+ ! In OpenMP 2.5 this was supposed to return 3,
+ ! but in OpenMP 3.0 the parallel region has a different
+ ! task and omp_*_lock_t are owned by tasks, not by threads.
+ if (omp_test_nest_lock (lock) .ne. 0) l = .true.
+!$omp end parallel
+ if (l) call abort
+ if (omp_test_nest_lock (lock) .ne. 3) call abort
+ call omp_unset_nest_lock (lock)
+ call omp_unset_nest_lock (lock)
+ call omp_unset_nest_lock (lock)
+ call omp_destroy_nest_lock (lock)
+end
diff --git a/libgomp/testsuite/libgomp.fortran/lock-2.f90 b/libgomp/testsuite/libgomp.fortran/lock-2.f90
new file mode 100644
index 00000000000..9965139b9ba
--- /dev/null
+++ b/libgomp/testsuite/libgomp.fortran/lock-2.f90
@@ -0,0 +1,24 @@
+! { dg-do run }
+
+ use omp_lib
+
+ integer (kind = omp_nest_lock_kind) :: lock
+ logical :: l
+
+ l = .false.
+ call omp_init_nest_lock (lock)
+!$omp parallel num_threads (1) reduction (.or.:l)
+ if (omp_test_nest_lock (lock) .ne. 1) call abort
+ if (omp_test_nest_lock (lock) .ne. 2) call abort
+!$omp task if (.false.) shared (lock, l)
+ if (omp_test_nest_lock (lock) .ne. 0) l = .true.
+!$omp end task
+!$omp taskwait
+ if (omp_test_nest_lock (lock) .ne. 3) l = .true.
+ call omp_unset_nest_lock (lock)
+ call omp_unset_nest_lock (lock)
+ call omp_unset_nest_lock (lock)
+!$omp end parallel
+ if (l) call abort
+ call omp_destroy_nest_lock (lock)
+end
diff --git a/libgomp/testsuite/libgomp.fortran/nested1.f90 b/libgomp/testsuite/libgomp.fortran/nested1.f90
new file mode 100644
index 00000000000..98c4322d0bf
--- /dev/null
+++ b/libgomp/testsuite/libgomp.fortran/nested1.f90
@@ -0,0 +1,87 @@
+! { dg-do run }
+program nested1
+ use omp_lib
+ integer :: e1, e2, e3, e
+ integer :: tn1, tn2, tn3
+ e1 = 0
+ e2 = 0
+ e3 = 0
+ call omp_set_nested (.true.)
+ call omp_set_dynamic (.false.)
+ if (omp_in_parallel ()) call abort
+ if (omp_get_num_threads ().ne.1) call abort
+ if (omp_get_level ().ne.0) call abort
+ if (omp_get_ancestor_thread_num (0).ne.0) call abort
+ if (omp_get_ancestor_thread_num (-1).ne.-1) call abort
+ if (omp_get_ancestor_thread_num (1).ne.-1) call abort
+ if (omp_get_team_size (0).ne.1) call abort
+ if (omp_get_team_size (-1).ne.-1) call abort
+ if (omp_get_team_size (1).ne.-1) call abort
+ if (omp_get_active_level ().ne.0) call abort
+!$omp parallel num_threads (4) private (e, tn1)
+ e = 0
+ tn1 = omp_get_thread_num ()
+ if (.not.omp_in_parallel ()) e = e + 1
+ if (omp_get_num_threads ().ne.4) e = e + 1
+ if (tn1.lt.0.or.tn1.ge.4) e = e + 1
+ if (omp_get_level ().ne.1) e = e + 1
+ if (omp_get_ancestor_thread_num (0).ne.0) e = e + 1
+ if (omp_get_ancestor_thread_num (1).ne.tn1) e = e + 1
+ if (omp_get_ancestor_thread_num (-1).ne.-1) e = e + 1
+ if (omp_get_ancestor_thread_num (2).ne.-1) e = e + 1
+ if (omp_get_team_size (0).ne.1) e = e + 1
+ if (omp_get_team_size (1).ne.4) e = e + 1
+ if (omp_get_team_size (-1).ne.-1) e = e + 1
+ if (omp_get_team_size (2).ne.-1) e = e + 1
+ if (omp_get_active_level ().ne.1) e = e + 1
+ !$omp atomic
+ e1 = e1 + e
+!$omp parallel num_threads (5) if (.false.) firstprivate (tn1) &
+!$omp& private (e, tn2)
+ e = 0
+ tn2 = omp_get_thread_num ()
+ if (.not.omp_in_parallel ()) e = e + 1
+ if (omp_get_num_threads ().ne.1) e = e + 1
+ if (tn2.ne.0) e = e + 1
+ if (omp_get_level ().ne.2) e = e + 1
+ if (omp_get_ancestor_thread_num (0).ne.0) e = e + 1
+ if (omp_get_ancestor_thread_num (1).ne.tn1) e = e + 1
+ if (omp_get_ancestor_thread_num (2).ne.tn2) e = e + 1
+ if (omp_get_ancestor_thread_num (-1).ne.-1) e = e + 1
+ if (omp_get_ancestor_thread_num (3).ne.-1) e = e + 1
+ if (omp_get_team_size (0).ne.1) e = e + 1
+ if (omp_get_team_size (1).ne.4) e = e + 1
+ if (omp_get_team_size (2).ne.1) e = e + 1
+ if (omp_get_team_size (-1).ne.-1) e = e + 1
+ if (omp_get_team_size (3).ne.-1) e = e + 1
+ if (omp_get_active_level ().ne.1) e = e + 1
+ !$omp atomic
+ e2 = e2 + e
+!$omp parallel num_threads (2) firstprivate (tn1, tn2) &
+!$omp& private (e, tn3)
+ e = 0
+ tn3 = omp_get_thread_num ()
+ if (.not.omp_in_parallel ()) e = e + 1
+ if (omp_get_num_threads ().ne.2) e = e + 1
+ if (tn3.lt.0.or.tn3.ge.2) e = e + 1
+ if (omp_get_level ().ne.3) e = e + 1
+ if (omp_get_ancestor_thread_num (0).ne.0) e = e + 1
+ if (omp_get_ancestor_thread_num (1).ne.tn1) e = e + 1
+ if (omp_get_ancestor_thread_num (2).ne.tn2) e = e + 1
+ if (omp_get_ancestor_thread_num (3).ne.tn3) e = e + 1
+ if (omp_get_ancestor_thread_num (-1).ne.-1) e = e + 1
+ if (omp_get_ancestor_thread_num (4).ne.-1) e = e + 1
+ if (omp_get_team_size (0).ne.1) e = e + 1
+ if (omp_get_team_size (1).ne.4) e = e + 1
+ if (omp_get_team_size (2).ne.1) e = e + 1
+ if (omp_get_team_size (3).ne.2) e = e + 1
+ if (omp_get_team_size (-1).ne.-1) e = e + 1
+ if (omp_get_team_size (4).ne.-1) e = e + 1
+ if (omp_get_active_level ().ne.2) e = e + 1
+ !$omp atomic
+ e3 = e3 + e
+!$omp end parallel
+!$omp end parallel
+!$omp end parallel
+ if (e1.ne.0.or.e2.ne.0.or.e3.ne.0) call abort
+end program nested1
diff --git a/libgomp/testsuite/libgomp.fortran/nestedfn4.f90 b/libgomp/testsuite/libgomp.fortran/nestedfn4.f90
new file mode 100644
index 00000000000..c987bf440b0
--- /dev/null
+++ b/libgomp/testsuite/libgomp.fortran/nestedfn4.f90
@@ -0,0 +1,41 @@
+program foo
+ integer :: i, j, k
+ integer :: a(10), c(10)
+ k = 2
+ a(:) = 0
+ call test1
+ call test2
+ do i = 1, 10
+ if (a(i) .ne. 10 * i) call abort
+ end do
+ !$omp parallel do reduction (+:c)
+ do i = 1, 10
+ c = c + a
+ end do
+ do i = 1, 10
+ if (c(i) .ne. 10 * a(i)) call abort
+ end do
+ !$omp parallel do lastprivate (j)
+ do j = 1, 10, k
+ end do
+ if (j .ne. 11) call abort
+contains
+ subroutine test1
+ integer :: i
+ integer :: b(10)
+ do i = 1, 10
+ b(i) = i
+ end do
+ c(:) = 0
+ !$omp parallel do reduction (+:a)
+ do i = 1, 10
+ a = a + b
+ end do
+ end subroutine test1
+ subroutine test2
+ !$omp parallel do lastprivate (j)
+ do j = 1, 10, k
+ end do
+ if (j .ne. 11) call abort
+ end subroutine test2
+end program foo
diff --git a/libgomp/testsuite/libgomp.fortran/strassen.f90 b/libgomp/testsuite/libgomp.fortran/strassen.f90
new file mode 100644
index 00000000000..b44982665a6
--- /dev/null
+++ b/libgomp/testsuite/libgomp.fortran/strassen.f90
@@ -0,0 +1,75 @@
+! { dg-options "-O2" }
+
+program strassen_matmul
+ use omp_lib
+ integer, parameter :: N = 1024
+ double precision, save :: A(N,N), B(N,N), C(N,N), D(N,N)
+ double precision :: start, end
+
+ call random_seed
+ call random_number (A)
+ call random_number (B)
+ start = omp_get_wtime ()
+ C = matmul (A, B)
+ end = omp_get_wtime ()
+ write(*,'(a, f10.6)') ' Time for matmul = ', end - start
+ D = 0
+ start = omp_get_wtime ()
+ call strassen (A, B, D, N)
+ end = omp_get_wtime ()
+ write(*,'(a, f10.6)') ' Time for Strassen = ', end - start
+ if (sqrt (sum ((C - D) ** 2)) / N .gt. 0.1) call abort
+ D = 0
+ start = omp_get_wtime ()
+!$omp parallel
+!$omp single
+ call strassen (A, B, D, N)
+!$omp end single nowait
+!$omp end parallel
+ end = omp_get_wtime ()
+ write(*,'(a, f10.6)') ' Time for Strassen MP = ', end - start
+ if (sqrt (sum ((C - D) ** 2)) / N .gt. 0.1) call abort
+
+contains
+
+ recursive subroutine strassen (A, B, C, N)
+ integer, intent(in) :: N
+ double precision, intent(in) :: A(N,N), B(N,N)
+ double precision, intent(out) :: C(N,N)
+ double precision :: T(N/2,N/2,7)
+ integer :: K, L
+
+ if (iand (N,1) .ne. 0 .or. N < 64) then
+ C = matmul (A, B)
+ return
+ end if
+ K = N / 2
+ L = N / 2 + 1
+!$omp task shared (A, B, T)
+ call strassen (A(:K,:K) + A(L:,L:), B(:K,:K) + B(L:,L:), T(:,:,1), K)
+!$omp end task
+!$omp task shared (A, B, T)
+ call strassen (A(L:,:K) + A(L:,L:), B(:K,:K), T(:,:,2), K)
+!$omp end task
+!$omp task shared (A, B, T)
+ call strassen (A(:K,:K), B(:K,L:) - B(L:,L:), T(:,:,3), K)
+!$omp end task
+!$omp task shared (A, B, T)
+ call strassen (A(L:,L:), B(L:,:K) - B(:K,:K), T(:,:,4), K)
+!$omp end task
+!$omp task shared (A, B, T)
+ call strassen (A(:K,:K) + A(:K,L:), B(L:,L:), T(:,:,5), K)
+!$omp end task
+!$omp task shared (A, B, T)
+ call strassen (A(L:,:K) - A(:K,:K), B(:K,:K) + B(:K,L:), T(:,:,6), K)
+!$omp end task
+!$omp task shared (A, B, T)
+ call strassen (A(:K,L:) - A(L:,L:), B(L:,:K) + B(L:,L:), T(:,:,7), K)
+!$omp end task
+!$omp taskwait
+ C(:K,:K) = T(:,:,1) + T(:,:,4) - T(:,:,5) + T(:,:,7)
+ C(L:,:K) = T(:,:,2) + T(:,:,4)
+ C(:K,L:) = T(:,:,3) + T(:,:,5)
+ C(L:,L:) = T(:,:,1) - T(:,:,2) + T(:,:,3) + T(:,:,6)
+ end subroutine strassen
+end
diff --git a/libgomp/testsuite/libgomp.fortran/tabs1.f90 b/libgomp/testsuite/libgomp.fortran/tabs1.f90
new file mode 100644
index 00000000000..4f3d4f5b435
--- /dev/null
+++ b/libgomp/testsuite/libgomp.fortran/tabs1.f90
@@ -0,0 +1,12 @@
+ if (b().ne.2) call abort
+contains
+subroutine a
+!$omp parallel
+ !$omp end parallel
+ end subroutine a
+function b()
+ integer :: b
+ b = 1
+ !$ b = 2
+end function b
+ end
diff --git a/libgomp/testsuite/libgomp.fortran/tabs2.f b/libgomp/testsuite/libgomp.fortran/tabs2.f
new file mode 100644
index 00000000000..7aed5498d34
--- /dev/null
+++ b/libgomp/testsuite/libgomp.fortran/tabs2.f
@@ -0,0 +1,13 @@
+! { dg-options "-ffixed-form" }
+ if (b().ne.2) call abort
+ contains
+ subroutine a
+!$omp parallel
+!$omp end parallel
+ end subroutine a
+ function b()
+ integer :: b
+ b = 1
+!$ b = 2
+ end function b
+ end
diff --git a/libgomp/testsuite/libgomp.fortran/task1.f90 b/libgomp/testsuite/libgomp.fortran/task1.f90
new file mode 100644
index 00000000000..df57cb83168
--- /dev/null
+++ b/libgomp/testsuite/libgomp.fortran/task1.f90
@@ -0,0 +1,27 @@
+! { dg-do run }
+
+program tasktest
+ use omp_lib
+ integer :: i, j
+ common /tasktest_j/ j
+ j = 0
+ !$omp parallel private (i)
+ i = omp_get_thread_num ()
+ if (i.lt.2) then
+ !$omp task if (.false.) default(firstprivate)
+ call subr (i + 1)
+ !$omp end task
+ end if
+ !$omp end parallel
+ if (j.gt.0) call abort
+contains
+ subroutine subr (i)
+ use omp_lib
+ integer :: i, j
+ common /tasktest_j/ j
+ if (omp_get_thread_num ().ne.(i - 1)) then
+ !$omp atomic
+ j = j + 1
+ end if
+ end subroutine subr
+end program tasktest
diff --git a/libgomp/testsuite/libgomp.fortran/task2.f90 b/libgomp/testsuite/libgomp.fortran/task2.f90
new file mode 100644
index 00000000000..24ffee53ac8
--- /dev/null
+++ b/libgomp/testsuite/libgomp.fortran/task2.f90
@@ -0,0 +1,142 @@
+ integer :: err
+ err = 0
+!$omp parallel num_threads (4) default (none) shared (err)
+!$omp single
+ call test
+!$omp end single
+!$omp end parallel
+ if (err.ne.0) call abort
+contains
+ subroutine check (x, y, l)
+ integer :: x, y
+ logical :: l
+ l = l .or. x .ne. y
+ end subroutine check
+
+ subroutine foo (c, d, e, f, g, h, i, j, k, n)
+ use omp_lib
+ integer :: n
+ character (len = *) :: c
+ character (len = n) :: d
+ integer, dimension (2, 3:5, n) :: e
+ integer, dimension (2, 3:n, n) :: f
+ character (len = *), dimension (5, 3:n) :: g
+ character (len = n), dimension (5, 3:n) :: h
+ real, dimension (:, :, :) :: i
+ double precision, dimension (3:, 5:, 7:) :: j
+ integer, dimension (:, :, :) :: k
+ logical :: l
+ integer :: p, q, r
+ character (len = n) :: s
+ integer, dimension (2, 3:5, n) :: t
+ integer, dimension (2, 3:n, n) :: u
+ character (len = n), dimension (5, 3:n) :: v
+ character (len = 2 * n + 24) :: w
+ integer :: x, z
+ character (len = 1) :: y
+ s = 'PQRSTUV'
+ forall (p = 1:2, q = 3:5, r = 1:7) t(p, q, r) = -10 + p - q + 2 * r
+ forall (p = 1:2, q = 3:7, r = 1:7) u(p, q, r) = 30 - p + q - 2 * r
+ forall (p = 1:5, q = 3:7, p + q .le. 8) v(p, q) = '_+|/Oo_'
+ forall (p = 1:5, q = 3:7, p + q .gt. 8) v(p, q) = '///|||!'
+!$omp task default (none) firstprivate (c, d, e, f, g, h, i, j, k) &
+!$omp & firstprivate (s, t, u, v) private (l, p, q, r, w, x, y) shared (err)
+ l = .false.
+ l = l .or. c .ne. 'abcdefghijkl'
+ l = l .or. d .ne. 'ABCDEFG'
+ l = l .or. s .ne. 'PQRSTUV'
+ do 100, p = 1, 2
+ do 100, q = 3, 7
+ do 100, r = 1, 7
+ if (q .lt. 6) l = l .or. e(p, q, r) .ne. 5 + p + q + 2 * r
+ l = l .or. f(p, q, r) .ne. 25 + p + q + 2 * r
+ if (r .lt. 6 .and. q + r .le. 8) l = l .or. g(r, q) .ne. '0123456789AB'
+ if (r .lt. 6 .and. q + r .gt. 8) l = l .or. g(r, q) .ne. '9876543210ZY'
+ if (r .lt. 6 .and. q + r .le. 8) l = l .or. h(r, q) .ne. '0123456'
+ if (r .lt. 6 .and. q + r .gt. 8) l = l .or. h(r, q) .ne. '9876543'
+ if (q .lt. 6) l = l .or. t(p, q, r) .ne. -10 + p - q + 2 * r
+ l = l .or. u(p, q, r) .ne. 30 - p + q - 2 * r
+ if (r .lt. 6 .and. q + r .le. 8) l = l .or. v(r, q) .ne. '_+|/Oo_'
+ if (r .lt. 6 .and. q + r .gt. 8) l = l .or. v(r, q) .ne. '///|||!'
+100 continue
+ do 101, p = 3, 5
+ do 101, q = 2, 6
+ do 101, r = 1, 7
+ l = l .or. i(p - 2, q - 1, r) .ne. 7.5 * p * q * r
+ l = l .or. j(p, q + 3, r + 6) .ne. 9.5 * p * q * r
+101 continue
+ do 102, p = 1, 5
+ do 102, q = 4, 6
+ l = l .or. k(p, 1, q - 3) .ne. 19 + p + 7 + 3 * q
+102 continue
+ call check (size (e, 1), 2, l)
+ call check (size (e, 2), 3, l)
+ call check (size (e, 3), 7, l)
+ call check (size (e), 42, l)
+ call check (size (f, 1), 2, l)
+ call check (size (f, 2), 5, l)
+ call check (size (f, 3), 7, l)
+ call check (size (f), 70, l)
+ call check (size (g, 1), 5, l)
+ call check (size (g, 2), 5, l)
+ call check (size (g), 25, l)
+ call check (size (h, 1), 5, l)
+ call check (size (h, 2), 5, l)
+ call check (size (h), 25, l)
+ call check (size (i, 1), 3, l)
+ call check (size (i, 2), 5, l)
+ call check (size (i, 3), 7, l)
+ call check (size (i), 105, l)
+ call check (size (j, 1), 4, l)
+ call check (size (j, 2), 5, l)
+ call check (size (j, 3), 7, l)
+ call check (size (j), 140, l)
+ call check (size (k, 1), 5, l)
+ call check (size (k, 2), 1, l)
+ call check (size (k, 3), 3, l)
+ call check (size (k), 15, l)
+ if (l) then
+!$omp atomic
+ err = err + 1
+ end if
+!$omp end task
+ c = ''
+ d = ''
+ e(:, :, :) = 199
+ f(:, :, :) = 198
+ g(:, :) = ''
+ h(:, :) = ''
+ i(:, :, :) = 7.0
+ j(:, :, :) = 8.0
+ k(:, :, :) = 9
+ s = ''
+ t(:, :, :) = 10
+ u(:, :, :) = 11
+ v(:, :) = ''
+ end subroutine foo
+
+ subroutine test
+ character (len = 12) :: c
+ character (len = 7) :: d
+ integer, dimension (2, 3:5, 7) :: e
+ integer, dimension (2, 3:7, 7) :: f
+ character (len = 12), dimension (5, 3:7) :: g
+ character (len = 7), dimension (5, 3:7) :: h
+ real, dimension (3:5, 2:6, 1:7) :: i
+ double precision, dimension (3:6, 2:6, 1:7) :: j
+ integer, dimension (1:5, 7:7, 4:6) :: k
+ integer :: p, q, r
+ c = 'abcdefghijkl'
+ d = 'ABCDEFG'
+ forall (p = 1:2, q = 3:5, r = 1:7) e(p, q, r) = 5 + p + q + 2 * r
+ forall (p = 1:2, q = 3:7, r = 1:7) f(p, q, r) = 25 + p + q + 2 * r
+ forall (p = 1:5, q = 3:7, p + q .le. 8) g(p, q) = '0123456789AB'
+ forall (p = 1:5, q = 3:7, p + q .gt. 8) g(p, q) = '9876543210ZY'
+ forall (p = 1:5, q = 3:7, p + q .le. 8) h(p, q) = '0123456'
+ forall (p = 1:5, q = 3:7, p + q .gt. 8) h(p, q) = '9876543'
+ forall (p = 3:5, q = 2:6, r = 1:7) i(p, q, r) = 7.5 * p * q * r
+ forall (p = 3:6, q = 2:6, r = 1:7) j(p, q, r) = 9.5 * p * q * r
+ forall (p = 1:5, q = 7:7, r = 4:6) k(p, q, r) = 19 + p + q + 3 * r
+ call foo (c, d, e, f, g, h, i, j, k, 7)
+ end subroutine test
+end
diff --git a/libgomp/testsuite/libgomp.fortran/vla4.f90 b/libgomp/testsuite/libgomp.fortran/vla4.f90
index 58caabc6248..cdd4849b6ad 100644
--- a/libgomp/testsuite/libgomp.fortran/vla4.f90
+++ b/libgomp/testsuite/libgomp.fortran/vla4.f90
@@ -94,7 +94,7 @@ contains
forall (p = 1:2, q = 3:7, r = 1:7) u(p, q, r) = 30 - x - p + q - 2 * r
forall (p = 1:5, q = 3:7, p + q .le. 8) v(p, q) = w(1:7)
forall (p = 1:5, q = 3:7, p + q .gt. 8) v(p, q) = w(20:26)
-!$omp barrier
+!$omp barrier ! { dg-warning "may not be closely nested" }
y = ''
if (x .eq. 0) y = '0'
if (x .eq. 1) y = '1'
diff --git a/libgomp/testsuite/libgomp.fortran/vla5.f90 b/libgomp/testsuite/libgomp.fortran/vla5.f90
index 5c889f9923a..9b611505219 100644
--- a/libgomp/testsuite/libgomp.fortran/vla5.f90
+++ b/libgomp/testsuite/libgomp.fortran/vla5.f90
@@ -66,7 +66,7 @@ contains
forall (p = 1:2, q = 3:7, r = 1:7) u(p, q, r) = 30 - x - p + q - 2 * r
forall (p = 1:5, q = 3:7, p + q .le. 8) v(p, q) = w(1:7)
forall (p = 1:5, q = 3:7, p + q .gt. 8) v(p, q) = w(20:26)
-!$omp barrier
+!$omp barrier ! { dg-warning "may not be closely nested" }
y = ''
if (x .eq. 0) y = '0'
if (x .eq. 1) y = '1'