diff options
author | jakub <jakub@138bc75d-0d04-0410-961f-82ee72b054a4> | 2013-10-11 09:26:50 +0000 |
---|---|---|
committer | jakub <jakub@138bc75d-0d04-0410-961f-82ee72b054a4> | 2013-10-11 09:26:50 +0000 |
commit | bc7bff742355562cd43792f0814bae55eb21d012 (patch) | |
tree | 2a3d60fbf15f9346c02647762dcc441fe3841855 /libgomp/testsuite/libgomp.c/affinity-1.c | |
parent | cf3cae555d03f07e989fd18e4db778fba44d9abd (diff) | |
download | gcc-bc7bff742355562cd43792f0814bae55eb21d012.tar.gz |
libgomp/
* target.c: New file.
* Makefile.am (libgomp_la_SOURCES): Add target.c.
* Makefile.in: Regenerated.
* libgomp_g.h (GOMP_task): Add depend argument.
(GOMP_barrier_cancel, GOMP_loop_end_cancel,
GOMP_sections_end_cancel, GOMP_target, GOMP_target_data,
GOMP_target_end_data, GOMP_target_update, GOMP_teams,
GOMP_parallel_loop_static, GOMP_parallel_loop_dynamic,
GOMP_parallel_loop_guided, GOMP_parallel_loop_runtime,
GOMP_parallel, GOMP_cancel, GOMP_cancellation_point,
GOMP_taskgroup_start, GOMP_taskgroup_end,
GOMP_parallel_sections): New prototypes.
* fortran.c (omp_is_initial_device): Add ialias_redirect.
(omp_is_initial_device_): New function.
(ULP, STR1, STR2, ialias_redirect): Removed.
(omp_get_cancellation_, omp_get_proc_bind_, omp_set_default_device_,
omp_set_default_device_8_, omp_get_default_device_,
omp_get_num_devices_, omp_get_num_teams_, omp_get_team_num_): New
functions.
* libgomp.map (GOMP_barrier_cancel, GOMP_loop_end_cancel,
GOMP_sections_end_cancel, GOMP_target, GOMP_target_data,
GOMP_target_end_data, GOMP_target_update, GOMP_teams): Export
@@GOMP_4.0.
(omp_is_initial_device, omp_is_initial_device_, omp_get_cancellation,
omp_get_cancellation_, omp_get_proc_bind, omp_get_proc_bind_,
omp_set_default_device, omp_set_default_device_,
omp_set_default_device_8_, omp_get_default_device,
omp_get_default_device_, omp_get_num_devices, omp_get_num_devices_,
omp_get_num_teams, omp_get_num_teams_, omp_get_team_num,
omp_get_team_num_): Export @@OMP_4.0.
* team.c (struct gomp_thread_start_data): Add place field.
(gomp_thread_start): Clear thr->thread_pool and
thr->task before returning. Use gomp_team_barrier_wait_final
instead of gomp_team_barrier_wait. Initialize thr->place.
(gomp_new_team): Initialize work_shares_to_free, work_share_cancelled,
team_cancelled and task_queued_count fields.
(gomp_free_pool_helper): Clear thr->thread_pool and thr->task
before calling pthread_exit.
(gomp_free_thread): No longer static. Use
gomp_managed_threads_lock instead of gomp_remaining_threads_lock.
(gomp_team_start): Add flags argument. Set
thr->thread_pool->threads_busy to nthreads immediately after creating
new pool. Use gomp_managed_threads_lock instead of
gomp_remaining_threads_lock. Handle OpenMP 4.0 affinity.
(gomp_team_end): Use gomp_managed_threads_lock instead of
gomp_remaining_threads_lock. Use gomp_team_barrier_wait_final instead
of gomp_team_barrier_wait. If team->team_cancelled, call
gomp_fini_worshare on ws chain starting at team->work_shares_to_free
rather than thr->ts.work_share.
(initialize_team): Don't call gomp_sem_init here.
* sections.c (GOMP_parallel_sections_start): Adjust gomp_team_start
caller.
(GOMP_parallel_sections, GOMP_sections_end_cancel): New functions.
* env.c (gomp_global_icv): Add default_device_var, target_data and
bind_var initializers.
(gomp_cpu_affinity, gomp_cpu_affinity_len): Remove.
(gomp_bind_var_list, gomp_bind_var_list_len, gomp_places_list,
gomp_places_list_len): New variables.
(parse_bind_var, parse_one_place, parse_places_var): New functions.
(parse_affinity): Rewritten to construct OMP_PLACES list with unit
sized places.
(gomp_cancel_var): New global variable.
(parse_int): New function.
(handle_omp_display_env): New function.
(initialize_env): Use it. Initialize default_device_var.
Parse OMP_CANCELLATION env var. Use parse_bind_var to parse
OMP_PROC_BIND instead of parse_boolean. Use parse_places_var for
OMP_PLACES parsing. Don't call parse_affinity if OMP_PLACES has
been successfully parsed (and call gomp_init_affinity in that case).
(omp_get_cancellation, omp_get_proc_bind, omp_set_default_device,
omp_get_default_device, omp_get_num_devices, omp_get_num_teams,
omp_get_team_num, omp_is_initial_device): New functions.
* libgomp.h: Include stdlib.h.
(ialias_ulp, ialias_str1, ialias_str2, ialias_redirect, ialias_call):
Define.
(struct target_mem_desc): Forward declare.
(struct gomp_task_icv): Add default_device_var, target_data, bind_var
and thread_limit_var fields.
(gomp_get_num_devices): New prototype.
(gomp_cancel_var): New extern decl.
(struct gomp_team): Add work_shares_to_free, work_share_cancelled,
team_cancelled and task_queued_count fields. Add comments about
task_{,queued_,running_}count.
(gomp_cancel_kind): New enum.
(gomp_work_share_end_cancel): New prototype.
(struct gomp_task): Add next_taskgroup, prev_taskgroup, taskgroup,
copy_ctors_done, dependers, depend_hash, depend_count, num_dependees
and depend fields.
(struct gomp_taskgroup): New type.
(struct gomp_task_depend_entry,
struct gomp_dependers_vec): New types.
(gomp_finish_task): Free depend_hash if non-NULL.
(struct gomp_team_state): Add place_partition_off
and place_partition_len fields.
(gomp_bind_var_list, gomp_bind_var_list_len, gomp_places_list,
gomp_places_list_len): New extern decls.
(struct gomp_thread): Add place field.
(gomp_cpu_affinity, gomp_cpu_affinity_len): Remove.
(gomp_init_thread_affinity): Add place argument.
(gomp_affinity_alloc, gomp_affinity_init_place, gomp_affinity_add_cpus,
gomp_affinity_remove_cpu, gomp_affinity_copy_place,
gomp_affinity_same_place, gomp_affinity_finalize_place_list,
gomp_affinity_init_level, gomp_affinity_print_place): New
prototypes.
(gomp_team_start): Add flags argument.
(gomp_thread_limit_var, gomp_remaining_threads_count,
gomp_remaining_threads_lock): Remove.
(gomp_managed_threads_lock): New variable.
(struct gomp_thread_pool): Add threads_busy field.
(gomp_free_thread): New prototype.
* task.c: Include hashtab.h.
(hash_entry_type): New typedef.
(htab_alloc, htab_free, htab_hash, htab_eq): New inlines.
(gomp_init_task): Clear dependers, depend_hash, depend_count,
copy_ctors_done and taskgroup fields.
(GOMP_task): Add depend argument, handle depend clauses. If
gomp_team_barrier_cancelled or if it's taskgroup has been
cancelled, don't queue or start new tasks. Set copy_ctors_done
field if needed. Initialize taskgroup field. If copy_ctors_done
and already cancelled, don't discard the task. If taskgroup is
non-NULL, enqueue the task into taskgroup queue. Increment
num_children field in taskgroup. Increment task_queued_count.
(gomp_task_run_pre, gomp_task_run_post_remove_parent,
gomp_task_run_post_remove_taskgroup): New inline functions.
(gomp_task_run_post_handle_depend_hash,
gomp_task_run_post_handle_dependers,
gomp_task_run_post_handle_depend): New functions.
(GOMP_taskwait): Use them. If more than one new tasks
have been queued, wake other threads if needed.
(gomp_barrier_handle_tasks): Likewise. If
gomp_team_barrier_cancelled, don't start any new tasks, just free
all tasks.
(GOMP_taskgroup_start, GOMP_taskgroup_end): New functions.
* omp_lib.f90.in
(omp_proc_bind_kind, omp_proc_bind_false,
omp_proc_bind_true, omp_proc_bind_master, omp_proc_bind_close,
omp_proc_bind_spread): New params.
(omp_get_cancellation, omp_get_proc_bind, omp_set_default_device,
omp_get_default_device, omp_get_num_devices, omp_get_num_teams,
omp_get_team_num, omp_is_initial_device): New interfaces.
(omp_get_dynamic, omp_get_nested, omp_in_parallel,
omp_get_max_threads, omp_get_num_procs, omp_get_num_threads,
omp_get_thread_num, omp_get_thread_limit, omp_set_max_active_levels,
omp_get_max_active_levels, omp_get_level, omp_get_ancestor_thread_num,
omp_get_team_size, omp_get_active_level, omp_in_final): Remove
useless use omp_lib_kinds.
* omp.h.in (omp_proc_bind_t): New typedef.
(omp_get_cancellation, omp_get_proc_bind, omp_set_default_device,
omp_get_default_device, omp_get_num_devices, omp_get_num_teams,
omp_get_team_num, omp_is_initial_device): New prototypes.
* loop.c (gomp_parallel_loop_start): Add flags argument, pass it
through to gomp_team_start.
(GOMP_parallel_loop_static_start, GOMP_parallel_loop_dynamic_start,
GOMP_parallel_loop_guided_start, GOMP_parallel_loop_runtime_start):
Adjust gomp_parallel_loop_start callers.
(GOMP_parallel_loop_static, GOMP_parallel_loop_dynamic,
GOMP_parallel_loop_guided, GOMP_parallel_loop_runtime,
GOMP_loop_end_cancel): New functions.
(GOMP_parallel_end): Add ialias_redirect.
* hashtab.h: New file.
* libgomp.texi (Environment Variables): Minor cleanup,
update section refs to OpenMP 4.0rc2.
(OMP_DISPLAY_ENV, GOMP_SPINCOUNT): Document these
environment variables.
* work.c (gomp_work_share_end, gomp_work_share_end_nowait): Set
team->work_shares_to_free to thr->ts.work_share before calling
free_work_share.
(gomp_work_share_end_cancel): New function.
* config/linux/proc.c: Include errno.h.
(gomp_get_cpuset_size, gomp_cpuset_size, gomp_cpusetp): New variables.
(gomp_cpuset_popcount): Add cpusetsize argument, use it instead of
sizeof (cpu_set_t) to determine number of iterations. Fix up check
extern decl. Use CPU_COUNT_S if available, or CPU_COUNT if
gomp_cpuset_size is sizeof (cpu_set_t).
(gomp_init_num_threads): Initialize gomp_cpuset_size,
gomp_get_cpuset_size and gomp_cpusetp here, use gomp_cpusetp instead
of &cpuset and pass gomp_cpuset_size instead of sizeof (cpu_set_t)
to pthread_getaffinity_np. Free and clear gomp_cpusetp if it didn't
contain any logical CPUs.
(get_num_procs): Don't call pthread_getaffinity_np if gomp_cpusetp
is NULL. Use gomp_cpusetp instead of &cpuset and pass
gomp_get_cpuset_size instead of sizeof (cpu_set_t) to
pthread_getaffinity_np. Check gomp_places_list instead of
gomp_cpu_affinity. Adjust gomp_cpuset_popcount caller.
* config/linux/bar.c (gomp_barrier_wait_end,
gomp_barrier_wait_last): Use BAR_* defines.
(gomp_team_barrier_wait_end): Likewise. Clear BAR_CANCELLED
from state where needed. Set work_share_cancelled to 0 on last
thread.
(gomp_team_barrier_wait_final, gomp_team_barrier_wait_cancel_end,
gomp_team_barrier_wait_cancel, gomp_team_barrier_cancel): New
functions.
* config/linux/proc.h (gomp_cpuset_popcount): Add attribute_hidden.
Add cpusetsize argument.
(gomp_cpuset_size, gomp_cpusetp): Declare.
* config/linux/affinity.c: Include errno.h, stdio.h and string.h.
(affinity_counter): Remove.
(CPU_ISSET_S, CPU_ZERO_S, CPU_SET_S, CPU_CLR_S): Define
if CPU_ALLOC_SIZE isn't defined.
(gomp_init_affinity): Rewritten, if gomp_places_list is NULL, try
silently create OMP_PLACES=threads, if it is non-NULL afterwards,
bind current thread to the first place.
(gomp_init_thread_affinity): Rewritten. Add place argument, just
pthread_setaffinity_np to gomp_places_list[place].
(gomp_affinity_alloc, gomp_affinity_init_place, gomp_affinity_add_cpus,
gomp_affinity_remove_cpu, gomp_affinity_copy_place,
gomp_affinity_same_place, gomp_affinity_finalize_place_list,
gomp_affinity_init_level, gomp_affinity_print_place): New functions.
* config/linux/bar.h (BAR_TASK_PENDING, BAR_WAS_LAST,
BAR_WAITING_FOR_TASK, BAR_INCR, BAR_CANCELLED): Define.
(gomp_barrier_t): Add awaited_final field.
(gomp_barrier_init): Initialize awaited_final field.
(gomp_team_barrier_wait_final, gomp_team_barrier_wait_cancel,
gomp_team_barrier_wait_cancel_end, gomp_team_barrier_cancel): New
prototypes.
(gomp_barrier_wait_start): Preserve BAR_CANCELLED bit. Use BAR_*
defines.
(gomp_barrier_wait_cancel_start, gomp_team_barrier_wait_final_start,
gomp_team_barrier_cancelled): New inline functions.
(gomp_barrier_last_thread,
gomp_team_barrier_set_task_pending,
gomp_team_barrier_clear_task_pending,
gomp_team_barrier_set_waiting_for_tasks,
gomp_team_barrier_waiting_for_tasks,
gomp_team_barrier_done): Use BAR_* defines.
* config/posix/bar.c (gomp_barrier_init): Clear cancellable field.
(gomp_barrier_wait_end): Use BAR_* defines.
(gomp_team_barrier_wait_end): Clear BAR_CANCELLED from state.
Set work_share_cancelled to 0 on last thread, use __atomic_load_n.
Use BAR_* defines.
(gomp_team_barrier_wait_cancel_end, gomp_team_barrier_wait_cancel,
gomp_team_barrier_cancel): New functions.
* config/posix/affinity.c (gomp_init_thread_affinity): Add place
argument.
(gomp_affinity_alloc, gomp_affinity_init_place, gomp_affinity_add_cpus,
gomp_affinity_remove_cpu, gomp_affinity_copy_place,
gomp_affinity_same_place, gomp_affinity_finalize_place_list,
gomp_affinity_init_level, gomp_affinity_print_place): New stubs.
* config/posix/bar.h (BAR_TASK_PENDING, BAR_WAS_LAST,
BAR_WAITING_FOR_TASK, BAR_INCR, BAR_CANCELLED): Define.
(gomp_barrier_t): Add cancellable field.
(gomp_team_barrier_wait_cancel, gomp_team_barrier_wait_cancel_end,
gomp_team_barrier_cancel): New prototypes.
(gomp_barrier_wait_start): Preserve BAR_CANCELLED bit.
(gomp_barrier_wait_cancel_start, gomp_team_barrier_wait_final,
gomp_team_barrier_cancelled): New inline functions.
(gomp_barrier_wait_start, gomp_barrier_last_thread,
gomp_team_barrier_set_task_pending,
gomp_team_barrier_clear_task_pending,
gomp_team_barrier_set_waiting_for_tasks,
gomp_team_barrier_waiting_for_tasks,
gomp_team_barrier_done): Use BAR_* defines.
* barrier.c (GOMP_barrier_cancel): New function.
* omp_lib.h.in (omp_proc_bind_kind, omp_proc_bind_false,
omp_proc_bind_true, omp_proc_bind_master, omp_proc_bind_close,
omp_proc_bind_spread): New params.
(omp_get_cancellation, omp_get_proc_bind, omp_set_default_device,
omp_get_default_device, omp_get_num_devices, omp_get_num_teams,
omp_get_team_num, omp_is_initial_device): New externals.
* parallel.c (GOMP_parallel, GOMP_cancel, GOMP_cancellation_point):
New functions.
(gomp_resolve_num_threads): Adjust for thread_limit now being in
icv->thread_limit_var. Use UINT_MAX instead of ULONG_MAX as
infinity. If not nested, just return minimum of max_num_threads
and icv->thread_limit_var and if thr->thread_pool, set threads_busy
to the returned value. Otherwise, don't update atomically
gomp_remaining_threads_count, but instead thr->thread_pool->threads_busy.
(GOMP_parallel_end): Adjust for thread_limit now being in
icv->thread_limit_var. Use UINT_MAX instead of ULONG_MAX as
infinity. Adjust threads_busy in the pool rather than
gomp_remaining_threads_count. Remember team->nthreads and call
gomp_team_end before adjusting threads_busy, if not nested
afterwards, just set it to 1 non-atomically. Add ialias.
(GOMP_parallel_start): Adjust gomp_team_start caller.
* testsuite/libgomp.c/atomic-14.c: Add parens to make it valid.
* testsuite/libgomp.c/affinity-1.c: New test.
* testsuite/libgomp.c/atomic-15.c: New test.
* testsuite/libgomp.c/atomic-16.c: New test.
* testsuite/libgomp.c/atomic-17.c: New test.
* testsuite/libgomp.c/cancel-for-1.c: New test.
* testsuite/libgomp.c/cancel-for-2.c: New test.
* testsuite/libgomp.c/cancel-parallel-1.c: New test.
* testsuite/libgomp.c/cancel-parallel-2.c: New test.
* testsuite/libgomp.c/cancel-parallel-3.c: New test.
* testsuite/libgomp.c/cancel-sections-1.c: New test.
* testsuite/libgomp.c/cancel-taskgroup-1.c: New test.
* testsuite/libgomp.c/cancel-taskgroup-2.c: New test.
* testsuite/libgomp.c/depend-1.c: New test.
* testsuite/libgomp.c/depend-2.c: New test.
* testsuite/libgomp.c/depend-3.c: New test.
* testsuite/libgomp.c/depend-4.c: New test.
* testsuite/libgomp.c/for-1.c: New test.
* testsuite/libgomp.c/for-1.h: New file.
* testsuite/libgomp.c/for-2.c: New test.
* testsuite/libgomp.c/for-2.h: New file.
* testsuite/libgomp.c/for-3.c: New test.
* testsuite/libgomp.c/pr58392.c: New test.
* testsuite/libgomp.c/simd-1.c: New test.
* testsuite/libgomp.c/simd-2.c: New test.
* testsuite/libgomp.c/simd-3.c: New test.
* testsuite/libgomp.c/simd-4.c: New test.
* testsuite/libgomp.c/simd-5.c: New test.
* testsuite/libgomp.c/simd-6.c: New test.
* testsuite/libgomp.c/target-1.c: New test.
* testsuite/libgomp.c/target-2.c: New test.
* testsuite/libgomp.c/target-3.c: New test.
* testsuite/libgomp.c/target-4.c: New test.
* testsuite/libgomp.c/target-5.c: New test.
* testsuite/libgomp.c/target-6.c: New test.
* testsuite/libgomp.c/target-7.c: New test.
* testsuite/libgomp.c/taskgroup-1.c: New test.
* testsuite/libgomp.c/thread-limit-1.c: New test.
* testsuite/libgomp.c/thread-limit-2.c: New test.
* testsuite/libgomp.c/thread-limit-3.c: New test.
* testsuite/libgomp.c/udr-1.c: New test.
* testsuite/libgomp.c/udr-2.c: New test.
* testsuite/libgomp.c/udr-3.c: New test.
* testsuite/libgomp.c++/affinity-1.C: New test.
* testsuite/libgomp.c++/atomic-10.C: New test.
* testsuite/libgomp.c++/atomic-11.C: New test.
* testsuite/libgomp.c++/atomic-12.C: New test.
* testsuite/libgomp.c++/atomic-13.C: New test.
* testsuite/libgomp.c++/atomic-14.C: New test.
* testsuite/libgomp.c++/atomic-15.C: New test.
* testsuite/libgomp.c++/cancel-for-1.C: New test.
* testsuite/libgomp.c++/cancel-for-2.C: New test.
* testsuite/libgomp.c++/cancel-parallel-1.C: New test.
* testsuite/libgomp.c++/cancel-parallel-2.C: New test.
* testsuite/libgomp.c++/cancel-parallel-3.C: New test.
* testsuite/libgomp.c++/cancel-sections-1.C: New test.
* testsuite/libgomp.c++/cancel-taskgroup-1.C: New test.
* testsuite/libgomp.c++/cancel-taskgroup-2.C: New test.
* testsuite/libgomp.c++/cancel-taskgroup-3.C: New test.
* testsuite/libgomp.c++/cancel-test.h: New file.
* testsuite/libgomp.c++/for-9.C: New test.
* testsuite/libgomp.c++/for-10.C: New test.
* testsuite/libgomp.c++/for-11.C: New test.
* testsuite/libgomp.c++/simd-1.C: New test.
* testsuite/libgomp.c++/simd-2.C: New test.
* testsuite/libgomp.c++/simd-3.C: New test.
* testsuite/libgomp.c++/simd-4.C: New test.
* testsuite/libgomp.c++/simd-5.C: New test.
* testsuite/libgomp.c++/simd-6.C: New test.
* testsuite/libgomp.c++/simd-7.C: New test.
* testsuite/libgomp.c++/simd-8.C: New test.
* testsuite/libgomp.c++/target-1.C: New test.
* testsuite/libgomp.c++/target-2.C: New test.
* testsuite/libgomp.c++/target-2-aux.cc: New file.
* testsuite/libgomp.c++/target-3.C: New test.
* testsuite/libgomp.c++/taskgroup-1.C: New test.
* testsuite/libgomp.c++/udr-1.C: New test.
* testsuite/libgomp.c++/udr-2.C: New test.
* testsuite/libgomp.c++/udr-3.C: New test.
* testsuite/libgomp.c++/udr-4.C: New test.
* testsuite/libgomp.c++/udr-5.C: New test.
* testsuite/libgomp.c++/udr-6.C: New test.
* testsuite/libgomp.c++/udr-7.C: New test.
* testsuite/libgomp.c++/udr-8.C: New test.
* testsuite/libgomp.c++/udr-9.C: New test.
gcc/
* tree-pretty-print.c (dump_omp_clause): Handle OMP_CLAUSE__LOOPTEMP_
and new OpenMP 4.0 clauses, handle UDR OMP_CLAUSE_REDUCTION,
formatting fixes, use pp_colon instead of pp_character (..., ':'),
similarly pp_right_paren.
(dump_generic_node): Handle OMP_DISTRIBUTE, OMP_TEAMS,
OMP_TARGET_DATA, OMP_TARGET, OMP_TARGET_UPDATE, OMP_TASKGROUP,
allow OMP_FOR_INIT to be NULL, handle OMP_ATOMIC_SEQ_CST.
* tree.c (omp_clause_num_ops, omp_clause_code_name): Add OpenMP 4.0
clauses.
(omp_declare_simd_clauses_equal,
omp_remove_redundant_declare_simd_attrs): New functions.
(attribute_value_equal): Use omp_declare_simd_clauses_equal.
(walk_tree_1): Handle new OpenMP 4.0 clauses.
* tree.h (OMP_LOOP_CHECK): Define.
(OMP_FOR_BODY, OMP_FOR_CLAUSES, OMP_FOR_INIT, OMP_FOR_COND,
OMP_FOR_INCR, OMP_FOR_PRE_BODY): Use it.
(OMP_TASKGROUP_BODY, OMP_TEAMS_BODY, OMP_TEAMS_CLAUSES,
OMP_TARGET_DATA_BODY, OMP_TARGET_DATA_CLAUSES, OMP_TARGET_BODY,
OMP_TARGET_CLAUSES, OMP_TARGET_UPDATE_CLAUSES, OMP_CLAUSE_SIZE,
OMP_ATOMIC_SEQ_CST, OMP_CLAUSE_DEPEND_KIND, OMP_CLAUSE_MAP_KIND,
OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION, OMP_CLAUSE_PROC_BIND_KIND,
OMP_CLAUSE_REDUCTION_OMP_ORIG_REF, OMP_CLAUSE_ALIGNED_ALIGNMENT,
OMP_CLAUSE_NUM_TEAMS_EXPR, OMP_CLAUSE_THREAD_LIMIT_EXPR,
OMP_CLAUSE_DEVICE_ID, OMP_CLAUSE_DIST_SCHEDULE_CHUNK_EXPR,
OMP_CLAUSE_SIMDLEN_EXPR): Define.
(OMP_CLAUSE_DECL): Change range up to OMP_CLAUSE__LOOPTEMP_.
(omp_remove_redundant_declare_simd_attrs): New prototype.
* gimple.def (GIMPLE_OMP_TASKGROUP, GIMPLE_OMP_TARGET,
GIMPLE_OMP_TEAMS): New codes.
(GIMPLE_OMP_RETURN): Use GSS_OMP_ATOMIC_STORE instead of GSS_BASE.
* omp-low.c (struct omp_context): Add cancel_label and cancellable
fields.
(target_nesting_level): New variable.
(extract_omp_for_data): Handle GF_OMP_FOR_KIND_DISTRIBUTE and
OMP_CLAUSE_DIST_SCHEDULE. Don't fallback to library implementation
for collapse > 1 static schedule unless ordered.
(get_ws_args_for): Add par_stmt argument. Handle combined loops.
(determine_parallel_type): Adjust get_ws_args_for caller.
(install_var_field): Handle mask & 4 for double indirection.
(scan_sharing_clauses): Ignore shared clause on teams construct.
Handle OMP_CLAUSE__LOOPTEMP_ and new OpenMP 4.0 clauses.
(create_omp_child_function): If inside target or declare target
constructs, set "omp declare target" attribute on the child
function.
(find_combined_for): New function.
(scan_omp_parallel): Handle combined loops.
(scan_omp_target, scan_omp_teams): New functions.
(check_omp_nesting_restrictions): Check new OpenMP 4.0 nesting
restrictions and set ctx->cancellable for cancellable constructs.
(scan_omp_1_stmt): Call check_omp_nesting_restrictions also on
selected builtin calls. Handle GIMPLE_OMP_TASKGROUP,
GIMPLE_OMP_TARGET, GIMPLE_OMP_TEAMS.
(build_omp_barrier): Add lhs argument, return gimple rather than
tree.
(omp_clause_aligned_alignment): New function.
(lower_rec_simd_input_clauses): Only call SET_DECL_VALUE_EXPR
on decls.
(lower_rec_input_clauses): Add FD argument. Ignore shared clauses
on teams constructs. Handle user defined reductions and new
OpenMP 4.0 clauses.
(lower_reduction_clauses): Don't set placeholder to address of ref
if it has already the right type.
(lower_send_clauses): Handle OMP_CLAUSE__LOOPTEMP_.
(expand_parallel_call): Use the new non-_start suffixed builtins,
handle OMP_CLAUSE_PROC_BIND, don't call the outlined function
and GOMP_parallel_end after the call.
(expand_task_call): Handle OMP_CLAUSE_DEPEND.
(expand_omp_for_init_counts): Handle combined loops.
(expand_omp_for_init_vars): Add inner_stmt argument, handle combined
loops.
(expand_omp_for_generic): Likewise. Use GOMP_loop_end_cancel at the
end of cancellable loops.
(expand_omp_for_static_nochunk, expand_omp_for_static_chunk):
Likewise. Handle collapse > 1 loops.
(expand_omp_simd): Handle combined loops.
(expand_omp_for): Add inner_stmt argument, adjust callers of
expand_omp_for* functions, use expand_omp_for_static*chunk even
for collapse > 1 unless ordered.
(expand_omp_sections): Use GOMP_sections_end_cancel at the end
of cancellable sections.
(expand_omp_single): Remove need_barrier variable, just rely on
gimple_omp_return_nowait_p. Adjust build_omp_barrier caller.
(expand_omp_synch): Allow GIMPLE_OMP_TASKGROUP and GIMPLE_OMP_TEAMS.
(expand_omp_atomic_load, expand_omp_atomic_store,
expand_omp_atomic_fetch_op): Handle gimple_omp_atomic_seq_cst_p.
(expand_omp_target): New function.
(expand_omp): Handle combined loops. Handle GIMPLE_OMP_TASKGROUP,
GIMPLE_OMP_TEAMS, GIMPLE_OMP_TARGET.
(build_omp_regions_1): Immediately close region for
GF_OMP_TARGET_KIND_UPDATE.
(maybe_add_implicit_barrier_cancel): New function.
(lower_omp_sections): Adjust lower_rec_input_clauses caller. Handle
cancellation.
(lower_omp_single): Likewise. Add clobber after the barrier.
(lower_omp_taskgroup): New function.
(lower_omp_for): Handle combined loops. Adjust
lower_rec_input_clauses caller. Handle cancellation.
(lower_depend_clauses): New function.
(lower_omp_taskreg): Lower depend clauses. Adjust
lower_rec_input_clauses caller. Add clobber after the call. Handle
cancellation.
(lower_omp_target, lower_omp_teams): New functions.
(lower_omp_1): Handle cancellation. Handle GIMPLE_OMP_TASKGROUP,
GIMPLE_OMP_TARGET, GIMPLE_OMP_TEAMS and GOMP_barrier, GOMP_cancel
and GOMP_cancellation_point calls.
(lower_omp): Fold stmts inside of target region.
(diagnose_sb_1, diagnose_sb_2): Handle GIMPLE_OMP_TASKGROUP,
GIMPLE_OMP_TARGET and GIMPLE_OMP_TEAMS.
* builtin-types.def (DEF_FUNCTION_TYPE_8): Document.
(BT_FN_VOID_OMPFN_PTR_UINT,
BT_FN_VOID_OMPFN_PTR_UINT_LONG_LONG_LONG,
BT_FN_VOID_OMPFN_PTR_UINT_LONG_LONG_LONG_LONG,
BT_FN_VOID_OMPFN_PTR_OMPCPYFN_LONG_LONG_BOOL_UINT): Remove.
(BT_FN_VOID_OMPFN_PTR_UINT_UINT_UINT,
BT_FN_VOID_OMPFN_PTR_UINT_LONG_LONG_LONG_UINT,
BT_FN_VOID_OMPFN_PTR_UINT_LONG_LONG_LONG_LONG_UINT,
BT_FN_BOOL_INT, BT_FN_BOOL_INT_BOOL, BT_FN_VOID_UINT_UINT,
BT_FN_VOID_INT_PTR_SIZE_PTR_PTR_PTR,
BT_FN_VOID_INT_OMPFN_PTR_SIZE_PTR_PTR_PTR,
BT_FN_VOID_OMPFN_PTR_OMPCPYFN_LONG_LONG_BOOL_UINT_PTR): New.
* tree-ssa-alias.c (ref_maybe_used_by_call_p_1,
call_may_clobber_ref_p_1): Handle BUILT_IN_GOMP_BARRIER_CANCEL,
BUILT_IN_GOMP_TASKGROUP_END, BUILT_IN_GOMP_LOOP_END_CANCEL,
BUILT_IN_GOMP_SECTIONS_END_CANCEL. Don't handle
BUILT_IN_GOMP_PARALLEL_END.
* gimple-low.c (lower_stmt): Handle GIMPLE_OMP_TASKGROUP,
GIMPLE_OMP_TARGET and GIMPLE_OMP_TEAMS.
* gimple-pretty-print.c (dump_gimple_omp_for): Handle
GF_OMP_FOR_KIND_DISTRIBUTE.
(dump_gimple_omp_target, dump_gimple_omp_teams): New functions.
(dump_gimple_omp_block): Handle GIMPLE_OMP_TASKGROUP.
(dump_gimple_omp_return): Print lhs if it has any.
(dump_gimple_omp_atomic_load, dump_gimple_omp_atomic_store): Handle
gimple_omp_atomic_seq_cst_p.
(pp_gimple_stmt_1): Handle GIMPLE_OMP_TASKGROUP, GIMPLE_OMP_TARGET
and GIMPLE_OMP_TEAMS.
* langhooks.c (lhd_omp_mappable_type): New function.
* tree-vectorizer.c (struct simd_array_to_simduid): Fix up comment.
* langhooks.h (struct lang_hooks_for_types): Add omp_mappable_type
hook.
* gimplify.c (enum gimplify_omp_var_data): Add GOVD_MAP,
GOVD_ALIGNED and GOVD_MAP_TO_ONLY.
(enum omp_region_type): Add ORT_TEAMS, ORT_TARGET_DATA and
ORT_TARGET.
(struct gimplify_omp_ctx): Add combined_loop field.
(gimplify_call_expr, gimplify_modify_expr): Don't call fold_stmt
on stmts inside of target region.
(is_gimple_stmt): Return true for OMP_DISTRIBUTE and OMP_TASKGROUP.
(omp_firstprivatize_variable): Handle GOVD_MAP, GOVD_ALIGNED,
ORT_TARGET and ORT_TARGET_DATA.
(omp_add_variable): Avoid checks on readding var for GOVD_ALIGNED.
Handle GOVD_MAP.
(omp_notice_threadprivate_variable): Complain about threadprivate
variables in target region.
(omp_notice_variable): Complain about vars with non-mappable type
in target region. Handle ORT_TEAMS, ORT_TARGET and ORT_TARGET_DATA.
(omp_check_private): Ignore ORT_TARGET* regions.
(gimplify_scan_omp_clauses, gimplify_adjust_omp_clauses_1,
gimplify_adjust_omp_clauses): Handle new OpenMP 4.0 clauses.
(find_combined_omp_for): New function.
(gimplify_omp_for): Handle gimplification of combined loops.
(gimplify_omp_workshare): Gimplify also OMP_TARGET, OMP_TARGET_DATA,
OMP_TEAMS.
(gimplify_omp_target_update): New function.
(gimplify_omp_atomic): Handle OMP_ATOMIC_SEQ_CST.
(gimplify_expr): Handle OMP_DISTRIBUTE, OMP_TARGET, OMP_TARGET_DATA,
OMP_TARGET_UPDATE, OMP_TEAMS, OMP_TASKGROUP.
(gimplify_body): If fndecl has "omp declare target" attribute, add
implicit ORT_TARGET context around it.
* tree.def (OMP_DISTRIBUTE, OMP_TEAMS, OMP_TARGET_DATA, OMP_TARGET,
OMP_TASKGROUP, OMP_TARGET_UPDATE): New tree codes.
* tree-nested.c (convert_nonlocal_reference_stmt,
convert_local_reference_stmt, convert_gimple_call): Handle
GIMPLE_OMP_TARGET, GIMPLE_OMP_TEAMS and GIMPLE_OMP_TASKGROUP.
* omp-builtins.def (BUILT_IN_GOMP_TASK): Use
BT_FN_VOID_OMPFN_PTR_OMPCPYFN_LONG_LONG_BOOL_UINT_PTR
instead of BT_FN_VOID_OMPFN_PTR_OMPCPYFN_LONG_LONG_BOOL_UINT.
(BUILT_IN_GOMP_TARGET, BUILT_IN_GOMP_TARGET_DATA,
BUILT_IN_GOMP_TARGET_END_DATA, BUILT_IN_GOMP_TARGET_UPDATE,
BUILT_IN_GOMP_TEAMS, BUILT_IN_BARRIER_CANCEL,
BUILT_IN_GOMP_LOOP_END_CANCEL,
BUILT_IN_GOMP_SECTIONS_END_CANCEL, BUILT_IN_OMP_GET_TEAM_NUM,
BUILT_IN_OMP_GET_NUM_TEAMS, BUILT_IN_GOMP_TASKGROUP_START,
BUILT_IN_GOMP_TASKGROUP_END, BUILT_IN_GOMP_PARALLEL_LOOP_STATIC,
BUILT_IN_GOMP_PARALLEL_LOOP_DYNAMIC,
BUILT_IN_GOMP_PARALLEL_LOOP_GUIDED,
BUILT_IN_GOMP_PARALLEL_LOOP_RUNTIME, BUILT_IN_GOMP_PARALLEL,
BUILT_IN_GOMP_PARALLEL_SECTIONS, BUILT_IN_GOMP_CANCEL,
BUILT_IN_GOMP_CANCELLATION_POINT): New built-ins.
(BUILT_IN_GOMP_PARALLEL_LOOP_STATIC_START,
BUILT_IN_GOMP_PARALLEL_LOOP_DYNAMIC_START,
BUILT_IN_GOMP_PARALLEL_LOOP_GUIDED_START,
BUILT_IN_GOMP_PARALLEL_LOOP_RUNTIME_START,
BUILT_IN_GOMP_PARALLEL_START, BUILT_IN_GOMP_PARALLEL_END,
BUILT_IN_GOMP_PARALLEL_SECTIONS_START): Remove.
* tree-inline.c (remap_gimple_stmt, estimate_num_insns):
Handle GIMPLE_OMP_TARGET, GIMPLE_OMP_TEAMS and GIMPLE_OMP_TASKGROUP.
* gimple.c (gimple_build_omp_taskgroup, gimple_build_omp_target,
gimple_build_omp_teams): New functions.
(walk_gimple_op): Handle GIMPLE_OMP_TARGET, GIMPLE_OMP_TEAMS and
GIMPLE_OMP_TASKGROUP. Walk optional lhs on GIMPLE_OMP_RETURN.
(walk_gimple_stmt, gimple_copy): Handle GIMPLE_OMP_TARGET,
GIMPLE_OMP_TEAMS and GIMPLE_OMP_TASKGROUP.
* gimple.h (enum gf_mask): GF_OMP_FOR_KIND_DISTRIBUTE,
GF_OMP_FOR_COMBINED, GF_OMP_FOR_COMBINED_INTO,
GF_OMP_TARGET_KIND_MASK, GF_OMP_TARGET_KIND_REGION,
GF_OMP_TARGET_KIND_DATA, GF_OMP_TARGET_KIND_UPDATE,
GF_OMP_ATOMIC_SEQ_CST): New.
(gimple_build_omp_taskgroup, gimple_build_omp_target,
gimple_build_omp_teams): New prototypes.
(gimple_has_substatements): Handle GIMPLE_OMP_TARGET,
GIMPLE_OMP_TEAMS and GIMPLE_OMP_TASKGROUP.
(gimple_omp_subcode): Use GIMPLE_OMP_TEAMS instead of
GIMPLE_OMP_SINGLE as end of range.
(gimple_omp_return_set_lhs, gimple_omp_return_lhs,
gimple_omp_return_lhs_ptr, gimple_omp_atomic_seq_cst_p,
gimple_omp_atomic_set_seq_cst, gimple_omp_for_combined_p,
gimple_omp_for_set_combined_p, gimple_omp_for_combined_into_p,
gimple_omp_for_set_combined_into_p, gimple_omp_target_clauses,
gimple_omp_target_clauses_ptr, gimple_omp_target_set_clauses,
gimple_omp_target_kind, gimple_omp_target_set_kind,
gimple_omp_target_child_fn, gimple_omp_target_child_fn_ptr,
gimple_omp_target_set_child_fn, gimple_omp_target_data_arg,
gimple_omp_target_data_arg_ptr, gimple_omp_target_set_data_arg,
gimple_omp_teams_clauses, gimple_omp_teams_clauses_ptr,
gimple_omp_teams_set_clauses): New inlines.
(CASE_GIMPLE_OMP): Add GIMPLE_OMP_TARGET, GIMPLE_OMP_TEAMS
and GIMPLE_OMP_TASKGROUP.
* tree-core.h (enum omp_clause_code): Add new OpenMP 4.0 clause
codes.
(enum omp_clause_depend_kind, enum omp_clause_map_kind,
enum omp_clause_proc_bind_kind): New.
(union omp_clause_subcode): Add depend_kind, map_kind and
proc_bind_kind fields.
* tree-cfg.c (make_edges): Handle GIMPLE_OMP_TARGET,
GIMPLE_OMP_TEAMS and GIMPLE_OMP_TASKGROUP.
* langhooks-def.h (lhd_omp_mappable_type): New prototype.
(LANG_HOOKS_OMP_MAPPABLE_TYPE): Define.
(LANG_HOOKS_FOR_TYPES_INITIALIZER): Add it.
gcc/c-family/
* c-cppbuiltin.c (c_cpp_builtins): Predefine _OPENMP to
201307 instead of 201107.
* c-common.c (DEF_FUNCTION_TYPE_8): Define.
(c_common_attribute_table): Add "omp declare target" and
"omp declare simd" attributes.
(handle_omp_declare_target_attribute,
handle_omp_declare_simd_attribute): New functions.
* c-omp.c: Include c-pragma.h.
(c_finish_omp_taskgroup): New function.
(c_finish_omp_atomic): Add swapped argument, if true,
build the operation first with rhs, lhs arguments and use NOP_EXPR
build_modify_expr.
(c_finish_omp_for): Add code argument, pass it down to make_code.
(c_omp_split_clauses): New function.
(c_split_parallel_clauses): Removed.
(c_omp_declare_simd_clause_cmp, c_omp_declare_simd_clauses_to_numbers,
c_omp_declare_simd_clauses_to_decls): New functions.
* c-common.h (omp_clause_mask): New type.
(OMP_CLAUSE_MASK_1): Define.
(omp_clause_mask::omp_clause_mask, omp_clause_mask::operator &=,
omp_clause_mask::operator |=, omp_clause_mask::operator ~,
omp_clause_mask::operator |, omp_clause_mask::operator &,
omp_clause_mask::operator <<, omp_clause_mask::operator >>,
omp_clause_mask::operator ==): New methods.
(enum c_omp_clause_split): New.
(c_finish_omp_taskgroup): New prototype.
(c_finish_omp_atomic): Add swapped argument.
(c_finish_omp_for): Add code argument.
(c_omp_split_clauses): New prototype.
(c_split_parallel_clauses): Removed.
(c_omp_declare_simd_clauses_to_numbers,
c_omp_declare_simd_clauses_to_decls): New prototypes.
* c-pragma.c (omp_pragmas): Add new OpenMP 4.0 constructs.
* c-pragma.h (enum pragma_kind): Add PRAGMA_OMP_CANCEL,
PRAGMA_OMP_CANCELLATION_POINT, PRAGMA_OMP_DECLARE_REDUCTION,
PRAGMA_OMP_DISTRIBUTE, PRAGMA_OMP_END_DECLARE_TARGET, PRAGMA_OMP_SIMD,
PRAGMA_OMP_TARGET, PRAGMA_OMP_TASKGROUP and PRAGMA_OMP_TEAMS.
Remove PRAGMA_OMP_PARALLEL_FOR and PRAGMA_OMP_PARALLEL_SECTIONS.
(enum pragma_omp_clause): Add PRAGMA_OMP_CLAUSE_ALIGNED,
PRAGMA_OMP_CLAUSE_DEPEND, PRAGMA_OMP_CLAUSE_DEVICE,
PRAGMA_OMP_CLAUSE_DIST_SCHEDULE, PRAGMA_OMP_CLAUSE_FOR,
PRAGMA_OMP_CLAUSE_FROM, PRAGMA_OMP_CLAUSE_INBRANCH,
PRAGMA_OMP_CLAUSE_LINEAR, PRAGMA_OMP_CLAUSE_MAP,
PRAGMA_OMP_CLAUSE_NOTINBRANCH, PRAGMA_OMP_CLAUSE_NUM_TEAMS,
PRAGMA_OMP_CLAUSE_PARALLEL, PRAGMA_OMP_CLAUSE_PROC_BIND,
PRAGMA_OMP_CLAUSE_SAFELEN, PRAGMA_OMP_CLAUSE_SECTIONS,
PRAGMA_OMP_CLAUSE_SIMDLEN, PRAGMA_OMP_CLAUSE_TASKGROUP,
PRAGMA_OMP_CLAUSE_THREAD_LIMIT, PRAGMA_OMP_CLAUSE_TO and
PRAGMA_OMP_CLAUSE_UNIFORM.
gcc/ada/
* gcc-interface/utils.c (DEF_FUNCTION_TYPE_8): Define.
gcc/fortran/
* trans-openmp.c (gfc_omp_clause_default_ctor,
gfc_omp_clause_dtor): Return NULL for OMP_CLAUSE_REDUCTION.
* f95-lang.c (ATTR_NULL, DEF_FUNCTION_TYPE_8): Define.
* types.def (DEF_FUNCTION_TYPE_8): Document.
(BT_FN_VOID_OMPFN_PTR_UINT,
BT_FN_VOID_OMPFN_PTR_UINT_LONG_LONG_LONG,
BT_FN_VOID_OMPFN_PTR_UINT_LONG_LONG_LONG_LONG,
BT_FN_VOID_OMPFN_PTR_OMPCPYFN_LONG_LONG_BOOL_UINT): Remove.
(BT_FN_VOID_OMPFN_PTR_UINT_UINT_UINT,
BT_FN_VOID_OMPFN_PTR_UINT_LONG_LONG_LONG_UINT,
BT_FN_VOID_OMPFN_PTR_UINT_LONG_LONG_LONG_LONG_UINT,
BT_FN_BOOL_INT, BT_FN_BOOL_INT_BOOL, BT_FN_VOID_UINT_UINT,
BT_FN_VOID_INT_PTR_SIZE_PTR_PTR_PTR,
BT_FN_VOID_INT_OMPFN_PTR_SIZE_PTR_PTR_PTR,
BT_FN_VOID_OMPFN_PTR_OMPCPYFN_LONG_LONG_BOOL_UINT_PTR): New.
gcc/lto/
* lto-lang.c (DEF_FUNCTION_TYPE_8): Define.
gcc/c/
* c-lang.h (current_omp_declare_target_attribute): New extern
decl.
* c-parser.c: Include c-lang.h.
(struct c_parser): Change tokens to c_token *.
Add tokens_buf field. Change tokens_avail type to unsigned int.
(c_parser_consume_token): If parser->tokens isn't
&parser->tokens_buf[0], increment parser->tokens.
(c_parser_consume_pragma): Likewise.
(enum pragma_context): Add pragma_struct and pragma_param.
(c_parser_external_declaration): Adjust
c_parser_declaration_or_fndef caller.
(c_parser_declaration_or_fndef): Add omp_declare_simd_clauses
argument, if it is non-vNULL vector, call c_finish_omp_declare_simd.
Adjust recursive call.
(c_parser_struct_or_union_specifier): Use pragma_struct instead
of pragma_external.
(c_parser_parameter_declaration): Use pragma_param instead of
pragma_external.
(c_parser_compound_statement_nostart, c_parser_label,
c_parser_for_statement): Adjust
c_parser_declaration_or_fndef callers.
(c_parser_expr_no_commas): Add omp_atomic_lhs argument, pass
it through to c_parser_conditional_expression.
(c_parser_conditional_expression): Add omp_atomic_lhs argument,
pass it through to c_parser_binary_expression. Adjust recursive
call.
(c_parser_binary_expression): Remove prec argument, add
omp_atomic_lhs argument instead. Always start from PREC_NONE, if
omp_atomic_lhs is non-NULL and one of the arguments of toplevel
binop matches it, use build2 instead of parser_build_binary_op.
(c_parser_pragma): Handle PRAGMA_OMP_CANCEL,
PRAGMA_OMP_CANCELLATION_POINT, PRAGMA_OMP_TARGET,
PRAGMA_OMP_END_DECLARE_TARGET, PRAGMA_OMP_DECLARE_REDUCTION.
Handle pragma_struct and pragma_param the same as pragma_external.
(c_parser_omp_clause_name): Parse new OpenMP 4.0 clause names.
(c_parser_omp_variable_list): Parse array sections for
OMP_CLAUSE_{DEPEND,MAP,TO,FROM} clauses.
(c_parser_omp_clause_collapse): Fully fold collapse expression.
(c_parser_omp_clause_reduction): Handle user defined reductions.
(c_parser_omp_clause_branch, c_parser_omp_clause_cancelkind,
c_parser_omp_clause_num_teams, c_parser_omp_clause_thread_limit,
c_parser_omp_clause_aligned, c_parser_omp_clause_linear,
c_parser_omp_clause_safelen, c_parser_omp_clause_simdlen,
c_parser_omp_clause_depend, c_parser_omp_clause_map,
c_parser_omp_clause_device, c_parser_omp_clause_dist_schedule,
c_parser_omp_clause_proc_bind, c_parser_omp_clause_to,
c_parser_omp_clause_from, c_parser_omp_clause_uniform): New functions.
(c_parser_omp_all_clauses): Add finish_p argument. Don't call
c_finish_omp_clauses if it is false. Handle new OpenMP 4.0 clauses.
(c_parser_omp_atomic): Parse seq_cst clause, pass true if it is
present to c_finish_omp_atomic. Handle OpenMP 4.0 atomic forms.
(c_parser_omp_for_loop): Add CODE argument, pass it through
to c_finish_omp_for. Change last argument to cclauses,
and adjust uses to grab parallel clauses from the array of all
the split clauses. Adjust c_parser_binary_expression,
c_parser_declaration_or_fndef and c_finish_omp_for callers.
(omp_split_clauses): New function.
(c_parser_omp_simd): New function.
(c_parser_omp_for): Add p_name, mask and cclauses arguments.
Allow the function to be called also when parsing combined constructs,
and call c_parser_omp_simd when parsing for simd.
(c_parser_omp_sections_scope): If section-sequence doesn't start with
#pragma omp section, require exactly one structured-block instead of
sequence of statements.
(c_parser_omp_sections): Add p_name, mask and cclauses arguments.
Allow the function to be called also when parsing combined constructs.
(c_parser_omp_parallel): Add p_name, mask and cclauses arguments.
Allow the function to be called also when parsing combined
constructs.
(c_parser_omp_taskgroup, c_parser_omp_cancel,
c_parser_omp_cancellation_point, c_parser_omp_distribute,
c_parser_omp_teams, c_parser_omp_target_data,
c_parser_omp_target_update, c_parser_omp_target,
c_parser_omp_declare_simd, c_finish_omp_declare_simd,
c_parser_omp_declare_target, c_parser_omp_end_declare_target,
c_parser_omp_declare_reduction, c_parser_omp_declare): New functions.
(c_parser_omp_construct): Add p_name and mask vars. Handle
PRAGMA_OMP_DISTRIBUTE, PRAGMA_OMP_SIMD, PRAGMA_OMP_TASKGROUP,
PRAGMA_OMP_TEAMS. Adjust c_parser_omp_for, c_parser_omp_parallel
and c_parser_omp_sections callers.
(c_parse_file): Initialize tparser.tokens and the_parser->tokens here.
(OMP_FOR_CLAUSE_MASK, OMP_SECTIONS_CLAUSE_MASK,
OMP_SINGLE_CLAUSE_MASK): Use OMP_CLAUSE_MASK_1 instead of 1.
(OMP_PARALLEL_CLAUSE_MASK): Likewise. Add OMP_CLAUSE_PROC_BIND.
(OMP_TASK_CLAUSE_MASK): Use OMP_CLAUSE_MASK_1 instead of 1. Add
OMP_CLAUSE_DEPEND.
(OMP_SIMD_CLAUSE_MASK, OMP_CANCEL_CLAUSE_MASK,
OMP_CANCELLATION_POINT_CLAUSE_MASK, OMP_DISTRIBUTE_CLAUSE_MASK,
OMP_TEAMS_CLAUSE_MASK, OMP_TARGET_DATA_CLAUSE_MASK,
OMP_TARGET_UPDATE_CLAUSE_MASK, OMP_TARGET_CLAUSE_MASK,
OMP_DECLARE_SIMD_CLAUSE_MASK): Define.
* c-typeck.c: Include tree-inline.h.
(c_finish_omp_cancel, c_finish_omp_cancellation_point,
handle_omp_array_sections_1, handle_omp_array_sections,
c_clone_omp_udr, c_find_omp_placeholder_r): New functions.
(c_finish_omp_clauses): Handle new OpenMP 4.0 clauses and
user defined reductions.
(c_tree_equal): New function.
* c-tree.h (temp_store_parm_decls, temp_pop_parm_decls,
c_finish_omp_cancel, c_finish_omp_cancellation_point, c_tree_equal,
c_omp_reduction_id, c_omp_reduction_decl, c_omp_reduction_lookup,
c_check_omp_declare_reduction_r): New prototypes.
* c-decl.c (current_omp_declare_target_attribute): New variable.
(c_decl_attributes): New function.
(start_decl, start_function): Use it instead of decl_attributes.
(temp_store_parm_decls, temp_pop_parm_decls, c_omp_reduction_id,
c_omp_reduction_decl, c_omp_reduction_lookup,
c_check_omp_declare_reduction_r): New functions.
gcc/cp/
* decl.c (duplicate_decls): Error out for redeclaration of UDRs.
(declare_simd_adjust_this): New function.
(grokfndecl): If "omp declare simd" attribute is present,
call declare_simd_adjust_this if needed and
c_omp_declare_simd_clauses_to_numbers.
* cp-array-notation.c (expand_array_notation_exprs): Handle
OMP_TASKGROUP.
* cp-gimplify.c (cp_gimplify_expr): Handle OMP_SIMD and
OMP_DISTRIBUTE. Handle is_invisiref_parm decls in
OMP_CLAUSE_REDUCTION.
(cp_genericize_r): Handle OMP_SIMD and OMP_DISTRIBUTE like
OMP_FOR.
(cxx_omp_privatize_by_reference): Return true for
is_invisiref_parm decls.
(cxx_omp_finish_clause): Adjust cxx_omp_create_clause_info
caller.
* pt.c (apply_late_template_attributes): For "omp declare simd"
attribute call tsubst_omp_clauses,
c_omp_declare_simd_clauses_to_decls, finish_omp_clauses
and c_omp_declare_simd_clauses_to_numbers.
(instantiate_class_template_1): Call cp_check_omp_declare_reduction
for UDRs.
(tsubst_decl): Handle UDRs.
(tsubst_omp_clauses): Add declare_simd argument, if true don't
call finish_omp_clauses. Handle new OpenMP 4.0 clauses.
Handle non-NULL OMP_CLAUSE_REDUCTION_PLACEHOLDER on
OMP_CLAUSE_REDUCTION.
(tsubst_expr): For UDRs call pushdecl and
cp_check_omp_declare_reduction. Adjust tsubst_omp_clauses
callers. Handle OMP_SIMD, OMP_DISTRIBUTE, OMP_TEAMS,
OMP_TARGET_DATA, OMP_TARGET_UPDATE, OMP_TARGET, OMP_TASKGROUP.
Adjust finish_omp_atomic caller.
(tsubst_omp_udr): New function.
(instantiate_decl): For UDRs at block scope, don't call
start_preparsed_function/finish_function. Call tsubst_omp_udr.
* semantics.c (cxx_omp_create_clause_info): Add need_dtor argument,
use it instead of need_default_ctor || need_copy_ctor.
(struct cp_check_omp_declare_reduction_data): New type.
(handle_omp_array_sections_1, handle_omp_array_sections,
omp_reduction_id, omp_reduction_lookup,
cp_remove_omp_priv_cleanup_stmt, cp_check_omp_declare_reduction_r,
cp_check_omp_declare_reduction, clone_omp_udr,
find_omp_placeholder_r, finish_omp_reduction_clause): New functions.
(finish_omp_clauses): Handle new OpenMP 4.0 clauses and user defined
reductions.
(finish_omp_for): Add CODE argument, use it instead of hardcoded
OMP_FOR. Adjust c_finish_omp_for caller.
(finish_omp_atomic): Add seq_cst argument, adjust
c_finish_omp_atomic callers, handle seq_cst and new OpenMP 4.0
atomic variants.
(finish_omp_cancel, finish_omp_cancellation_point): New functions.
* decl2.c (mark_used): Force immediate instantiation of
DECL_OMP_DECLARE_REDUCTION_P decls.
(is_late_template_attribute): Return true for "omp declare simd"
attribute.
(cp_omp_mappable_type): New function.
(cplus_decl_attributes): Add implicit "omp declare target" attribute
if requested.
* parser.c (cp_debug_parser): Print
parser->colon_doesnt_start_class_def_p.
(cp_ensure_no_omp_declare_simd, cp_finalize_omp_declare_simd): New
functions.
(enum pragma_context): Add pragma_member and pragma_objc_icode.
(cp_parser_binary_expression): Handle no_toplevel_fold_p
even for binary operations other than comparison.
(cp_parser_linkage_specification): Call
cp_ensure_no_omp_declare_simd if needed.
(cp_parser_namespace_definition): Likewise.
(cp_parser_init_declarator): Call cp_finalize_omp_declare_simd.
(cp_parser_direct_declarator): Pass declarator to
cp_parser_late_return_type_opt.
(cp_parser_late_return_type_opt): Add declarator argument,
call cp_parser_late_parsing_omp_declare_simd for declare simd.
(cp_parser_class_specifier_1): Call cp_ensure_no_omp_declare_simd.
Parse UDRs before all other methods.
(cp_parser_member_specification_opt): Use pragma_member instead of
pragma_external.
(cp_parser_member_declaration): Call cp_finalize_omp_declare_simd.
(cp_parser_function_definition_from_specifiers_and_declarator,
cp_parser_save_member_function_body): Likewise.
(cp_parser_late_parsing_for_member): Handle UDRs specially.
(cp_parser_next_token_starts_class_definition_p): Don't allow
CPP_COLON if colon_doesnt_start_class_def_p flag is true.
(cp_parser_objc_interstitial_code): Use pragma_objc_icode
instead of pragma_external.
(cp_parser_omp_clause_name): Parse new OpenMP 4.0 clause names.
(cp_parser_omp_var_list_no_open): Parse array sections for
OMP_CLAUSE_{DEPEND,MAP,TO,FROM} clauses. Add COLON argument,
if non-NULL, allow parsing to end with a colon rather than close
paren.
(cp_parser_omp_var_list): Adjust cp_parser_omp_var_list_no_open
caller.
(cp_parser_omp_clause_reduction): Handle user defined reductions.
(cp_parser_omp_clause_branch, cp_parser_omp_clause_cancelkind,
cp_parser_omp_clause_num_teams, cp_parser_omp_clause_thread_limit,
cp_parser_omp_clause_aligned, cp_parser_omp_clause_linear,
cp_parser_omp_clause_safelen, cp_parser_omp_clause_simdlen,
cp_parser_omp_clause_depend, cp_parser_omp_clause_map,
cp_parser_omp_clause_device, cp_parser_omp_clause_dist_schedule,
cp_parser_omp_clause_proc_bind, cp_parser_omp_clause_to,
cp_parser_omp_clause_from, cp_parser_omp_clause_uniform): New
functions.
(cp_parser_omp_all_clauses): Add finish_p argument. Don't call
finish_omp_clauses if it is false. Handle new OpenMP 4.0 clauses.
(cp_parser_omp_atomic): Parse seq_cst clause, pass
true if it is present to finish_omp_atomic. Handle new OpenMP 4.0
atomic forms.
(cp_parser_omp_for_loop): Add CODE argument, pass it through
to finish_omp_for. Change last argument to cclauses,
and adjust uses to grab parallel clauses from the array of all
the split clauses.
(cp_omp_split_clauses): New function.
(cp_parser_omp_simd): New function.
(cp_parser_omp_for): Add p_name, mask and cclauses arguments.
Allow the function to be called also when parsing combined constructs,
and call c_parser_omp_simd when parsing for simd.
(cp_parser_omp_sections_scope): If section-sequence doesn't start with
#pragma omp section, require exactly one structured-block instead of
sequence of statements.
(cp_parser_omp_sections): Add p_name, mask and cclauses arguments.
Allow the function to be called also when parsing combined constructs.
(cp_parser_omp_parallel): Add p_name, mask and cclauses arguments.
Allow the function to be called also when parsing combined
constructs.
(cp_parser_omp_taskgroup, cp_parser_omp_cancel,
cp_parser_omp_cancellation_point, cp_parser_omp_distribute,
cp_parser_omp_teams, cp_parser_omp_target_data,
cp_parser_omp_target_update, cp_parser_omp_target,
cp_parser_omp_declare_simd, cp_parser_late_parsing_omp_declare_simd,
cp_parser_omp_declare_target, cp_parser_omp_end_declare_target,
cp_parser_omp_declare_reduction_exprs, cp_parser_omp_declare_reduction,
cp_parser_omp_declare): New functions.
(cp_parser_omp_construct): Add p_name and mask vars. Handle
PRAGMA_OMP_DISTRIBUTE, PRAGMA_OMP_SIMD, PRAGMA_OMP_TASKGROUP,
PRAGMA_OMP_TEAMS. Adjust cp_parser_omp_for, cp_parser_omp_parallel
and cp_parser_omp_sections callers.
(cp_parser_pragma): Handle PRAGMA_OMP_CANCEL,
PRAGMA_OMP_CANCELLATION_POINT, PRAGMA_OMP_DECLARE_REDUCTION,
PRAGMA_OMP_DISTRIBUTE, PRAGMA_OMP_SIMD, PRAGMA_OMP_TASKGROUP,
PRAGMA_OMP_TEAMS, PRAGMA_OMP_TARGET, PRAGMA_OMP_END_DECLARE_TARGET.
Handle pragma_member and pragma_objc_icode like pragma_external.
(OMP_FOR_CLAUSE_MASK, OMP_SECTIONS_CLAUSE_MASK,
OMP_SINGLE_CLAUSE_MASK): Use OMP_CLAUSE_MASK_1 instead of 1.
(OMP_PARALLEL_CLAUSE_MASK): Likewise. Add OMP_CLAUSE_PROC_BIND.
(OMP_TASK_CLAUSE_MASK): Use OMP_CLAUSE_MASK_1 instead of 1. Add
OMP_CLAUSE_DEPEND.
(OMP_SIMD_CLAUSE_MASK, OMP_CANCEL_CLAUSE_MASK,
OMP_CANCELLATION_POINT_CLAUSE_MASK, OMP_DISTRIBUTE_CLAUSE_MASK,
OMP_TEAMS_CLAUSE_MASK, OMP_TARGET_DATA_CLAUSE_MASK,
OMP_TARGET_UPDATE_CLAUSE_MASK, OMP_TARGET_CLAUSE_MASK,
OMP_DECLARE_SIMD_CLAUSE_MASK): Define.
* parser.h (struct cp_omp_declare_simd_data): New type.
(struct cp_parser): Add colon_doesnt_start_class_def_p and
omp_declare_simd fields.
* cp-objcp-common.h (LANG_HOOKS_OMP_MAPPABLE_TYPE): Define.
* cp-tree.h (struct lang_decl_fn): Add omp_declare_reduction_p
bit.
(DECL_OMP_DECLARE_REDUCTION_P): Define.
(OMP_FOR_GIMPLIFYING_P): Use OMP_LOOP_CHECK macro.
(struct saved_scope): Add omp_declare_target_attribute field.
(cp_omp_mappable_type, omp_reduction_id,
cp_remove_omp_priv_cleanup_stmt, cp_check_omp_declare_reduction,
finish_omp_cancel, finish_omp_cancellation_point): New prototypes.
(finish_omp_for): Add CODE argument.
(finish_omp_atomic): Add seq_cst argument.
(cxx_omp_create_clause_info): Add need_dtor argument.
gcc/testsuite/
* c-c++-common/gomp/atomic-15.c: Adjust for C diagnostics.
Remove error test that is now valid in OpenMP 4.0.
* c-c++-common/gomp/atomic-16.c: New test.
* c-c++-common/gomp/cancel-1.c: New test.
* c-c++-common/gomp/depend-1.c: New test.
* c-c++-common/gomp/depend-2.c: New test.
* c-c++-common/gomp/map-1.c: New test.
* c-c++-common/gomp/pr58472.c: New test.
* c-c++-common/gomp/sections1.c: New test.
* c-c++-common/gomp/simd1.c: New test.
* c-c++-common/gomp/simd2.c: New test.
* c-c++-common/gomp/simd3.c: New test.
* c-c++-common/gomp/simd4.c: New test.
* c-c++-common/gomp/simd5.c: New test.
* c-c++-common/gomp/single1.c: New test.
* g++.dg/gomp/block-0.C: Adjust for stricter #pragma omp sections
parser.
* g++.dg/gomp/block-3.C: Likewise.
* g++.dg/gomp/clause-3.C: Adjust error messages.
* g++.dg/gomp/declare-simd-1.C: New test.
* g++.dg/gomp/declare-simd-2.C: New test.
* g++.dg/gomp/depend-1.C: New test.
* g++.dg/gomp/depend-2.C: New test.
* g++.dg/gomp/target-1.C: New test.
* g++.dg/gomp/target-2.C: New test.
* g++.dg/gomp/taskgroup-1.C: New test.
* g++.dg/gomp/teams-1.C: New test.
* g++.dg/gomp/udr-1.C: New test.
* g++.dg/gomp/udr-2.C: New test.
* g++.dg/gomp/udr-3.C: New test.
* g++.dg/gomp/udr-4.C: New test.
* g++.dg/gomp/udr-5.C: New test.
* g++.dg/gomp/udr-6.C: New test.
* gcc.dg/autopar/outer-1.c: Expect 4 instead of 5 loopfn matches.
* gcc.dg/autopar/outer-2.c: Likewise.
* gcc.dg/autopar/outer-3.c: Likewise.
* gcc.dg/autopar/outer-4.c: Likewise.
* gcc.dg/autopar/outer-5.c: Likewise.
* gcc.dg/autopar/outer-6.c: Likewise.
* gcc.dg/autopar/parallelization-1.c: Likewise.
* gcc.dg/gomp/block-3.c: Adjust for stricter #pragma omp sections
parser.
* gcc.dg/gomp/clause-1.c: Adjust error messages.
* gcc.dg/gomp/combined-1.c: Look for GOMP_parallel_loop_runtime
instead of GOMP_parallel_loop_runtime_start.
* gcc.dg/gomp/declare-simd-1.c: New test.
* gcc.dg/gomp/declare-simd-2.c: New test.
* gcc.dg/gomp/nesting-1.c: Adjust for stricter #pragma omp sections
parser. Add further #pragma omp sections nesting tests.
* gcc.dg/gomp/target-1.c: New test.
* gcc.dg/gomp/target-2.c: New test.
* gcc.dg/gomp/taskgroup-1.c: New test.
* gcc.dg/gomp/teams-1.c: New test.
* gcc.dg/gomp/udr-1.c: New test.
* gcc.dg/gomp/udr-2.c: New test.
* gcc.dg/gomp/udr-3.c: New test.
* gcc.dg/gomp/udr-4.c: New test.
* gfortran.dg/gomp/appendix-a/a.35.5.f90: Add dg-error.
git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@203408 138bc75d-0d04-0410-961f-82ee72b054a4
Diffstat (limited to 'libgomp/testsuite/libgomp.c/affinity-1.c')
-rw-r--r-- | libgomp/testsuite/libgomp.c/affinity-1.c | 1146 |
1 files changed, 1146 insertions, 0 deletions
diff --git a/libgomp/testsuite/libgomp.c/affinity-1.c b/libgomp/testsuite/libgomp.c/affinity-1.c new file mode 100644 index 00000000000..5d3e45d1df9 --- /dev/null +++ b/libgomp/testsuite/libgomp.c/affinity-1.c @@ -0,0 +1,1146 @@ +/* Affinity tests. + Copyright (C) 2013 Free Software Foundation, Inc. + + GCC is free software; you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation; either version 3, or (at your option) any later + version. + + GCC is distributed in the hope that it will be useful, but WITHOUT ANY + WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING3. If not see + <http://www.gnu.org/licenses/>. */ + +/* { dg-do run } */ +/* { dg-set-target-env-var OMP_PROC_BIND "false" } */ +/* { dg-additional-options "-DINTERPOSE_GETAFFINITY -DDO_FORK -ldl" { target *-*-linux* } } */ + +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif +#include "config.h" +#include <alloca.h> +#include <omp.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <unistd.h> + +#ifdef DO_FORK +#include <signal.h> +#endif +#ifdef HAVE_PTHREAD_AFFINITY_NP +#include <sched.h> +#include <pthread.h> +#ifdef INTERPOSE_GETAFFINITY +#include <dlfcn.h> +#endif +#endif + +struct place +{ + int start, len; +}; +struct places +{ + char name[40]; + int count; + struct place places[8]; +} places_array[] = { + { "", 1, { { -1, -1 } } }, + { "{0}:8", 8, + { { 0, 1 }, { 1, 1 }, { 2, 1 }, { 3, 1 }, + { 4, 1 }, { 5, 1 }, { 6, 1 }, { 7, 1 } } }, + { "{7,6}:2:-3", 2, { { 6, 2 }, { 3, 2 } } }, + { "{6,7}:4:-2,!{2,3}", 3, { { 6, 2 }, { 4, 2 }, { 0, 2 } } }, + { "{1}:7:1", 7, + { { 1, 1 }, { 2, 1 }, { 3, 1 }, + { 4, 1 }, { 5, 1 }, { 6, 1 }, { 7, 1 } } }, + { "{0,1},{3,2,4},{6,5,!6},{6},{7:2:-1,!6}", 5, + { { 0, 2 }, { 2, 3 }, { 5, 1 }, { 6, 1 }, { 7, 1 } } } +}; + +unsigned long contig_cpucount; +unsigned long min_cpusetsize; + +#if defined (HAVE_PTHREAD_AFFINITY_NP) && defined (_SC_NPROCESSORS_CONF) \ + && defined (CPU_ALLOC_SIZE) + +#if defined (RTLD_NEXT) && defined (INTERPOSE_GETAFFINITY) +int (*orig_getaffinity_np) (pthread_t, size_t, cpu_set_t *); + +int +pthread_getaffinity_np (pthread_t thread, size_t cpusetsize, cpu_set_t *cpuset) +{ + int ret; + unsigned long i, max; + if (orig_getaffinity_np == NULL) + { + orig_getaffinity_np = (int (*) (pthread_t, size_t, cpu_set_t *)) + dlsym (RTLD_NEXT, "pthread_getaffinity_np"); + if (orig_getaffinity_np == NULL) + exit (0); + } + ret = orig_getaffinity_np (thread, cpusetsize, cpuset); + if (ret != 0) + return ret; + if (contig_cpucount == 0) + { + max = 8 * cpusetsize; + for (i = 0; i < max; i++) + if (!CPU_ISSET_S (i, cpusetsize, cpuset)) + break; + contig_cpucount = i; + min_cpusetsize = cpusetsize; + } + return ret; +} +#endif + +void +print_affinity (struct place p) +{ + static unsigned long size; + if (size == 0) + { + if (min_cpusetsize) + size = min_cpusetsize; + else + { + size = sysconf (_SC_NPROCESSORS_CONF); + size = CPU_ALLOC_SIZE (size); + if (size < sizeof (cpu_set_t)) + size = sizeof (cpu_set_t); + } + } + cpu_set_t *cpusetp = (cpu_set_t *) alloca (size); + if (pthread_getaffinity_np (pthread_self (), size, cpusetp) == 0) + { + unsigned long i, len, max = 8 * size; + int notfirst = 0, unexpected = 1; + + printf (" bound to {"); + for (i = 0, len = 0; i < max; i++) + if (CPU_ISSET_S (i, size, cpusetp)) + { + if (len == 0) + { + if (notfirst) + { + unexpected = 1; + printf (","); + } + else if (i == (unsigned long) p.start) + unexpected = 0; + notfirst = 1; + printf ("%lu", i); + } + ++len; + } + else + { + if (len && len != (unsigned long) p.len) + unexpected = 1; + if (len > 1) + printf (":%lu", len); + len = 0; + } + if (len && len != (unsigned long) p.len) + unexpected = 1; + if (len > 1) + printf (":%lu", len); + printf ("}"); + if (p.start != -1 && unexpected) + { + printf (", expected {%d", p.start); + if (p.len != 1) + printf (":%d", p.len); + printf ("} instead"); + } + else if (p.start != -1) + printf (", verified"); + } +} +#else +void +print_affinity (struct place p) +{ + (void) p.start; + (void) p.len; +} +#endif + + +int +main () +{ + char *env_proc_bind = getenv ("OMP_PROC_BIND"); + int test_false = env_proc_bind && strcmp (env_proc_bind, "false") == 0; + int test_true = env_proc_bind && strcmp (env_proc_bind, "true") == 0; + int test_spread_master_close + = env_proc_bind && strcmp (env_proc_bind, "spread,master,close") == 0; + char *env_places = getenv ("OMP_PLACES"); + int test_places = 0; + +#ifdef DO_FORK + if (env_places == NULL && contig_cpucount >= 8 && test_false + && getenv ("GOMP_AFFINITY") == NULL) + { + int i, j, status; + pid_t pid; + for (j = 0; j < 2; j++) + { + if (setenv ("OMP_PROC_BIND", j ? "spread,master,close" : "true", 1) + < 0) + break; + for (i = sizeof (places_array) / sizeof (places_array[0]) - 1; + i; --i) + { + if (setenv ("OMP_PLACES", places_array[i].name, 1) < 0) + break; + pid = fork (); + if (pid == -1) + break; + if (pid == 0) + { + execl ("/proc/self/exe", "affinity-1.exe", NULL); + _exit (1); + } + if (waitpid (pid, &status, 0) < 0) + break; + if (WIFSIGNALED (status) && WTERMSIG (status) == SIGABRT) + abort (); + else if (!WIFEXITED (status) || WEXITSTATUS (status) != 0) + break; + } + if (i) + break; + } + } +#endif + + int first = 1; + if (env_proc_bind) + { + printf ("OMP_PROC_BIND='%s'", env_proc_bind); + first = 0; + } + if (env_places) + printf ("%sOMP_PLACES='%s'", first ? "" : " ", env_places); + printf ("\n"); + + if (env_places && contig_cpucount >= 8 + && (test_true || test_spread_master_close)) + { + for (test_places = sizeof (places_array) / sizeof (places_array[0]) - 1; + test_places; --test_places) + if (strcmp (env_places, places_array[test_places].name) == 0) + break; + } + +#define verify(if_true, if_s_m_c) \ + if (test_false && omp_get_proc_bind () != omp_proc_bind_false) \ + abort (); \ + if (test_true && omp_get_proc_bind () != if_true) \ + abort (); \ + if (test_spread_master_close && omp_get_proc_bind () != if_s_m_c) \ + abort (); + + verify (omp_proc_bind_true, omp_proc_bind_spread); + + printf ("Initial thread"); + print_affinity (places_array[test_places].places[0]); + printf ("\n"); + omp_set_nested (1); + omp_set_dynamic (0); + + #pragma omp parallel if (0) + { + verify (omp_proc_bind_true, omp_proc_bind_master); + #pragma omp parallel if (0) + { + verify (omp_proc_bind_true, omp_proc_bind_close); + #pragma omp parallel if (0) + { + verify (omp_proc_bind_true, omp_proc_bind_close); + } + #pragma omp parallel if (0) proc_bind (spread) + { + verify (omp_proc_bind_spread, omp_proc_bind_spread); + } + } + #pragma omp parallel if (0) proc_bind (master) + { + verify (omp_proc_bind_master, omp_proc_bind_close); + #pragma omp parallel if (0) + { + verify (omp_proc_bind_master, omp_proc_bind_close); + } + #pragma omp parallel if (0) proc_bind (spread) + { + verify (omp_proc_bind_spread, omp_proc_bind_spread); + } + } + } + + /* True/spread */ + #pragma omp parallel num_threads (4) + { + verify (omp_proc_bind_true, omp_proc_bind_master); + #pragma omp critical + { + struct place p = places_array[0].places[0]; + int thr = omp_get_thread_num (); + printf ("#1 thread %d", thr); + if (omp_get_num_threads () == 4 && test_spread_master_close) + switch (places_array[test_places].count) + { + case 8: + /* T = 4, P = 8, each subpartition has 2 places. */ + case 7: + /* T = 4, P = 7, each subpartition has 2 places, but + last partition, which has just one place. */ + p = places_array[test_places].places[2 * thr]; + break; + case 5: + /* T = 4, P = 5, first subpartition has 2 places, the + rest just one. */ + p = places_array[test_places].places[thr ? 1 + thr : 0]; + break; + case 3: + /* T = 4, P = 3, unit sized subpartitions, first gets + thr0 and thr3, second thr1, third thr2. */ + p = places_array[test_places].places[thr == 3 ? 0 : thr]; + break; + case 2: + /* T = 4, P = 2, unit sized subpartitions, each with + 2 threads. */ + p = places_array[test_places].places[thr / 2]; + break; + } + print_affinity (p); + printf ("\n"); + } + #pragma omp barrier + if (omp_get_thread_num () == 3) + { + /* True/spread, true/master. */ + #pragma omp parallel num_threads (3) + { + verify (omp_proc_bind_true, omp_proc_bind_close); + #pragma omp critical + { + struct place p = places_array[0].places[0]; + int thr = omp_get_thread_num (); + printf ("#1,#1 thread 3,%d", thr); + if (omp_get_num_threads () == 3 && test_spread_master_close) + /* Outer is spread, inner master, so just bind to the + place or the master thread, which is thr 3 above. */ + switch (places_array[test_places].count) + { + case 8: + case 7: + p = places_array[test_places].places[6]; + break; + case 5: + p = places_array[test_places].places[4]; + break; + case 3: + p = places_array[test_places].places[0]; + break; + case 2: + p = places_array[test_places].places[1]; + break; + } + print_affinity (p); + printf ("\n"); + } + } + /* True/spread, spread. */ + #pragma omp parallel num_threads (5) proc_bind (spread) + { + verify (omp_proc_bind_spread, omp_proc_bind_close); + #pragma omp critical + { + struct place p = places_array[0].places[0]; + int thr = omp_get_thread_num (); + printf ("#1,#2 thread 3,%d", thr); + if (omp_get_num_threads () == 5 && test_spread_master_close) + /* Outer is spread, inner spread. */ + switch (places_array[test_places].count) + { + case 8: + /* T = 5, P = 2, unit sized subpartitions. */ + p = places_array[test_places].places[thr == 4 ? 6 + : 6 + thr / 2]; + break; + /* The rest are T = 5, P = 1. */ + case 7: + p = places_array[test_places].places[6]; + break; + case 5: + p = places_array[test_places].places[4]; + break; + case 3: + p = places_array[test_places].places[0]; + break; + case 2: + p = places_array[test_places].places[1]; + break; + } + print_affinity (p); + printf ("\n"); + } + #pragma omp barrier + if (omp_get_thread_num () == 3) + { + /* True/spread, spread, close. */ + #pragma omp parallel num_threads (5) proc_bind (close) + { + verify (omp_proc_bind_close, omp_proc_bind_close); + #pragma omp critical + { + struct place p = places_array[0].places[0]; + int thr = omp_get_thread_num (); + printf ("#1,#2,#1 thread 3,3,%d", thr); + if (omp_get_num_threads () == 5 && test_spread_master_close) + /* Outer is spread, inner spread, innermost close. */ + switch (places_array[test_places].count) + { + /* All are T = 5, P = 1. */ + case 8: + p = places_array[test_places].places[7]; + break; + case 7: + p = places_array[test_places].places[6]; + break; + case 5: + p = places_array[test_places].places[4]; + break; + case 3: + p = places_array[test_places].places[0]; + break; + case 2: + p = places_array[test_places].places[1]; + break; + } + print_affinity (p); + printf ("\n"); + } + } + } + } + /* True/spread, master. */ + #pragma omp parallel num_threads (4) proc_bind(master) + { + verify (omp_proc_bind_master, omp_proc_bind_close); + #pragma omp critical + { + struct place p = places_array[0].places[0]; + int thr = omp_get_thread_num (); + printf ("#1,#3 thread 3,%d", thr); + if (omp_get_num_threads () == 4 && test_spread_master_close) + /* Outer is spread, inner master, so just bind to the + place or the master thread, which is thr 3 above. */ + switch (places_array[test_places].count) + { + case 8: + case 7: + p = places_array[test_places].places[6]; + break; + case 5: + p = places_array[test_places].places[4]; + break; + case 3: + p = places_array[test_places].places[0]; + break; + case 2: + p = places_array[test_places].places[1]; + break; + } + print_affinity (p); + printf ("\n"); + } + } + /* True/spread, close. */ + #pragma omp parallel num_threads (6) proc_bind (close) + { + verify (omp_proc_bind_close, omp_proc_bind_close); + #pragma omp critical + { + struct place p = places_array[0].places[0]; + int thr = omp_get_thread_num (); + printf ("#1,#4 thread 3,%d", thr); + if (omp_get_num_threads () == 6 && test_spread_master_close) + /* Outer is spread, inner close. */ + switch (places_array[test_places].count) + { + case 8: + /* T = 6, P = 2, unit sized subpartitions. */ + p = places_array[test_places].places[6 + thr / 3]; + break; + /* The rest are T = 6, P = 1. */ + case 7: + p = places_array[test_places].places[6]; + break; + case 5: + p = places_array[test_places].places[4]; + break; + case 3: + p = places_array[test_places].places[0]; + break; + case 2: + p = places_array[test_places].places[1]; + break; + } + print_affinity (p); + printf ("\n"); + } + } + } + } + + /* Spread. */ + #pragma omp parallel num_threads (5) proc_bind(spread) + { + verify (omp_proc_bind_spread, omp_proc_bind_master); + #pragma omp critical + { + struct place p = places_array[0].places[0]; + int thr = omp_get_thread_num (); + printf ("#2 thread %d", thr); + if (omp_get_num_threads () == 5 + && (test_spread_master_close || test_true)) + switch (places_array[test_places].count) + { + case 8: + /* T = 5, P = 8, first 3 subpartitions have 2 places, last + 2 one place. */ + p = places_array[test_places].places[thr < 3 ? 2 * thr : 3 + thr]; + break; + case 7: + /* T = 5, P = 7, first 2 subpartitions have 2 places, last + 3 one place. */ + p = places_array[test_places].places[thr < 2 ? 2 * thr : 2 + thr]; + break; + case 5: + /* T = 5, P = 5, unit sized subpartitions, each one with one + thread. */ + p = places_array[test_places].places[thr]; + break; + case 3: + /* T = 5, P = 3, unit sized subpartitions, first gets + thr0 and thr3, second thr1 and thr4, third thr2. */ + p = places_array[test_places].places[thr >= 3 ? thr - 3 : thr]; + break; + case 2: + /* T = 5, P = 2, unit sized subpartitions, first with + thr{0,1,4} and second with thr{2,3}. */ + p = places_array[test_places].places[thr == 4 ? 0 : thr / 2]; + break; + } + print_affinity (p); + printf ("\n"); + } + #pragma omp barrier + if (omp_get_thread_num () == 3) + { + int pp = 0; + switch (places_array[test_places].count) + { + case 8: pp = 6; break; + case 7: pp = 5; break; + case 5: pp = 3; break; + case 2: pp = 1; break; + } + /* Spread, spread/master. */ + #pragma omp parallel num_threads (3) firstprivate (pp) + { + verify (omp_proc_bind_spread, omp_proc_bind_close); + #pragma omp critical + { + struct place p = places_array[0].places[0]; + int thr = omp_get_thread_num (); + printf ("#2,#1 thread 3,%d", thr); + if (test_spread_master_close || test_true) + /* Outer is spread, inner spread resp. master, bit we have + just unit sized partitions. */ + p = places_array[test_places].places[pp]; + print_affinity (p); + printf ("\n"); + } + } + /* Spread, spread. */ + #pragma omp parallel num_threads (5) proc_bind (spread) \ + firstprivate (pp) + { + verify (omp_proc_bind_spread, omp_proc_bind_close); + #pragma omp critical + { + struct place p = places_array[0].places[0]; + int thr = omp_get_thread_num (); + printf ("#2,#2 thread 3,%d", thr); + if (test_spread_master_close || test_true) + /* Outer is spread, inner spread, bit we have + just unit sized partitions. */ + p = places_array[test_places].places[pp]; + print_affinity (p); + printf ("\n"); + } + } + /* Spread, master. */ + #pragma omp parallel num_threads (4) proc_bind(master) \ + firstprivate(pp) + { + verify (omp_proc_bind_master, omp_proc_bind_close); + #pragma omp critical + { + struct place p = places_array[0].places[0]; + int thr = omp_get_thread_num (); + printf ("#2,#3 thread 3,%d", thr); + if (test_spread_master_close || test_true) + /* Outer is spread, inner master, bit we have + just unit sized partitions. */ + p = places_array[test_places].places[pp]; + print_affinity (p); + printf ("\n"); + } + } + /* Spread, close. */ + #pragma omp parallel num_threads (6) proc_bind (close) \ + firstprivate (pp) + { + verify (omp_proc_bind_close, omp_proc_bind_close); + #pragma omp critical + { + struct place p = places_array[0].places[0]; + int thr = omp_get_thread_num (); + printf ("#2,#4 thread 3,%d", thr); + if (test_spread_master_close || test_true) + /* Outer is spread, inner close, bit we have + just unit sized partitions. */ + p = places_array[test_places].places[pp]; + print_affinity (p); + printf ("\n"); + } + } + } + } + + /* Master. */ + #pragma omp parallel num_threads (3) proc_bind(master) + { + verify (omp_proc_bind_master, omp_proc_bind_master); + #pragma omp critical + { + struct place p = places_array[0].places[0]; + int thr = omp_get_thread_num (); + printf ("#3 thread %d", thr); + if (test_spread_master_close || test_true) + p = places_array[test_places].places[0]; + print_affinity (p); + printf ("\n"); + } + #pragma omp barrier + if (omp_get_thread_num () == 2) + { + /* Master, master. */ + #pragma omp parallel num_threads (4) + { + verify (omp_proc_bind_master, omp_proc_bind_close); + #pragma omp critical + { + struct place p = places_array[0].places[0]; + int thr = omp_get_thread_num (); + printf ("#3,#1 thread 2,%d", thr); + if (test_spread_master_close || test_true) + /* Outer is master, inner is master. */ + p = places_array[test_places].places[0]; + print_affinity (p); + printf ("\n"); + } + } + /* Master, spread. */ + #pragma omp parallel num_threads (4) proc_bind (spread) + { + verify (omp_proc_bind_spread, omp_proc_bind_close); + #pragma omp critical + { + struct place p = places_array[0].places[0]; + int thr = omp_get_thread_num (); + printf ("#3,#2 thread 2,%d", thr); + if (omp_get_num_threads () == 4 + && (test_spread_master_close || test_true)) + /* Outer is master, inner is spread. */ + switch (places_array[test_places].count) + { + case 8: + /* T = 4, P = 8, each subpartition has 2 places. */ + case 7: + /* T = 4, P = 7, each subpartition has 2 places, but + last partition, which has just one place. */ + p = places_array[test_places].places[2 * thr]; + break; + case 5: + /* T = 4, P = 5, first subpartition has 2 places, the + rest just one. */ + p = places_array[test_places].places[thr ? 1 + thr : 0]; + break; + case 3: + /* T = 4, P = 3, unit sized subpartitions, first gets + thr0 and thr3, second thr1, third thr2. */ + p = places_array[test_places].places[thr == 3 ? 0 : thr]; + break; + case 2: + /* T = 4, P = 2, unit sized subpartitions, each with + 2 threads. */ + p = places_array[test_places].places[thr / 2]; + break; + } + print_affinity (p); + printf ("\n"); + } + #pragma omp barrier + if (omp_get_thread_num () == 0) + { + /* Master, spread, close. */ + #pragma omp parallel num_threads (5) proc_bind (close) + { + verify (omp_proc_bind_close, omp_proc_bind_close); + #pragma omp critical + { + struct place p = places_array[0].places[0]; + int thr = omp_get_thread_num (); + printf ("#3,#2,#1 thread 2,0,%d", thr); + if (omp_get_num_threads () == 5 + && (test_spread_master_close || test_true)) + /* Outer is master, inner spread, innermost close. */ + switch (places_array[test_places].count) + { + /* First 3 are T = 5, P = 2. */ + case 8: + case 7: + case 5: + p = places_array[test_places].places[(thr & 2) / 2]; + break; + /* All the rest are T = 5, P = 1. */ + case 3: + case 2: + p = places_array[test_places].places[0]; + break; + } + print_affinity (p); + printf ("\n"); + } + } + } + #pragma omp barrier + if (omp_get_thread_num () == 3) + { + /* Master, spread, close. */ + #pragma omp parallel num_threads (5) proc_bind (close) + { + verify (omp_proc_bind_close, omp_proc_bind_close); + #pragma omp critical + { + struct place p = places_array[0].places[0]; + int thr = omp_get_thread_num (); + printf ("#3,#2,#2 thread 2,3,%d", thr); + if (omp_get_num_threads () == 5 + && (test_spread_master_close || test_true)) + /* Outer is master, inner spread, innermost close. */ + switch (places_array[test_places].count) + { + case 8: + /* T = 5, P = 2. */ + p = places_array[test_places].places[6 + + (thr & 2) / 2]; + break; + /* All the rest are T = 5, P = 1. */ + case 7: + p = places_array[test_places].places[6]; + break; + case 5: + p = places_array[test_places].places[4]; + break; + case 3: + p = places_array[test_places].places[0]; + break; + case 2: + p = places_array[test_places].places[1]; + break; + } + print_affinity (p); + printf ("\n"); + } + } + } + } + /* Master, master. */ + #pragma omp parallel num_threads (4) proc_bind(master) + { + verify (omp_proc_bind_master, omp_proc_bind_close); + #pragma omp critical + { + struct place p = places_array[0].places[0]; + int thr = omp_get_thread_num (); + printf ("#3,#3 thread 2,%d", thr); + if (test_spread_master_close || test_true) + /* Outer is master, inner master. */ + p = places_array[test_places].places[0]; + print_affinity (p); + printf ("\n"); + } + } + /* Master, close. */ + #pragma omp parallel num_threads (6) proc_bind (close) + { + verify (omp_proc_bind_close, omp_proc_bind_close); + #pragma omp critical + { + struct place p = places_array[0].places[0]; + int thr = omp_get_thread_num (); + printf ("#3,#4 thread 2,%d", thr); + if (omp_get_num_threads () == 6 + && (test_spread_master_close || test_true)) + switch (places_array[test_places].count) + { + case 8: + /* T = 6, P = 8. */ + case 7: + /* T = 6, P = 7. */ + p = places_array[test_places].places[thr]; + break; + case 5: + /* T = 6, P = 5. thr{0,5} go into the first place. */ + p = places_array[test_places].places[thr == 5 ? 0 : thr]; + break; + case 3: + /* T = 6, P = 3, two threads into each place. */ + p = places_array[test_places].places[thr / 2]; + break; + case 2: + /* T = 6, P = 2, 3 threads into each place. */ + p = places_array[test_places].places[thr / 3]; + break; + } + print_affinity (p); + printf ("\n"); + } + } + } + } + + #pragma omp parallel num_threads (5) proc_bind(close) + { + verify (omp_proc_bind_close, omp_proc_bind_master); + #pragma omp critical + { + struct place p = places_array[0].places[0]; + int thr = omp_get_thread_num (); + printf ("#4 thread %d", thr); + if (omp_get_num_threads () == 5 + && (test_spread_master_close || test_true)) + switch (places_array[test_places].count) + { + case 8: + /* T = 5, P = 8. */ + case 7: + /* T = 5, P = 7. */ + case 5: + /* T = 5, P = 5. */ + p = places_array[test_places].places[thr]; + break; + case 3: + /* T = 5, P = 3, thr{0,3} in first place, thr{1,4} in second, + thr2 in third. */ + p = places_array[test_places].places[thr >= 3 ? thr - 3 : thr]; + break; + case 2: + /* T = 5, P = 2, thr{0,1,4} in first place, thr{2,3} in second. */ + p = places_array[test_places].places[thr == 4 ? 0 : thr / 2]; + break; + } + print_affinity (p); + printf ("\n"); + } + #pragma omp barrier + if (omp_get_thread_num () == 2) + { + int pp = 0; + switch (places_array[test_places].count) + { + case 8: + case 7: + case 5: + case 3: + pp = 2; + break; + case 2: + pp = 1; + break; + } + /* Close, close/master. */ + #pragma omp parallel num_threads (4) firstprivate (pp) + { + verify (omp_proc_bind_close, omp_proc_bind_close); + #pragma omp critical + { + struct place p = places_array[0].places[0]; + int thr = omp_get_thread_num (); + printf ("#4,#1 thread 2,%d", thr); + if (test_spread_master_close) + /* Outer is close, inner is master. */ + p = places_array[test_places].places[pp]; + else if (omp_get_num_threads () == 4 && test_true) + /* Outer is close, inner is close. */ + switch (places_array[test_places].count) + { + case 8: + /* T = 4, P = 8. */ + case 7: + /* T = 4, P = 7. */ + p = places_array[test_places].places[2 + thr]; + break; + case 5: + /* T = 4, P = 5. There is wrap-around for thr3. */ + p = places_array[test_places].places[thr == 3 ? 0 : 2 + thr]; + break; + case 3: + /* T = 4, P = 3, thr{0,3} go into p2, thr1 into p0, thr2 + into p1. */ + p = places_array[test_places].places[(2 + thr) % 3]; + break; + case 2: + /* T = 4, P = 2, 2 threads into each place. */ + p = places_array[test_places].places[1 - thr / 2]; + break; + } + + print_affinity (p); + printf ("\n"); + } + } + /* Close, spread. */ + #pragma omp parallel num_threads (4) proc_bind (spread) + { + verify (omp_proc_bind_spread, omp_proc_bind_close); + #pragma omp critical + { + struct place p = places_array[0].places[0]; + int thr = omp_get_thread_num (); + printf ("#4,#2 thread 2,%d", thr); + if (omp_get_num_threads () == 4 + && (test_spread_master_close || test_true)) + /* Outer is close, inner is spread. */ + switch (places_array[test_places].count) + { + case 8: + /* T = 4, P = 8, each subpartition has 2 places. */ + case 7: + /* T = 4, P = 7, each subpartition has 2 places, but + last partition, which has just one place. */ + p = places_array[test_places].places[thr == 3 ? 0 + : 2 + 2 * thr]; + break; + case 5: + /* T = 4, P = 5, first subpartition has 2 places, the + rest just one. */ + p = places_array[test_places].places[thr == 3 ? 0 + : 2 + thr]; + break; + case 3: + /* T = 4, P = 3, unit sized subpartitions, third gets + thr0 and thr3, first thr1, second thr2. */ + p = places_array[test_places].places[thr == 0 ? 2 : thr - 1]; + break; + case 2: + /* T = 4, P = 2, unit sized subpartitions, each with + 2 threads. */ + p = places_array[test_places].places[1 - thr / 2]; + break; + } + print_affinity (p); + printf ("\n"); + } + #pragma omp barrier + if (omp_get_thread_num () == 0) + { + /* Close, spread, close. */ + #pragma omp parallel num_threads (5) proc_bind (close) + { + verify (omp_proc_bind_close, omp_proc_bind_close); + #pragma omp critical + { + struct place p = places_array[0].places[0]; + int thr = omp_get_thread_num (); + printf ("#4,#2,#1 thread 2,0,%d", thr); + if (omp_get_num_threads () == 5 + && (test_spread_master_close || test_true)) + /* Outer is close, inner spread, innermost close. */ + switch (places_array[test_places].count) + { + case 8: + case 7: + /* T = 5, P = 2. */ + p = places_array[test_places].places[2 + + (thr & 2) / 2]; + break; + /* All the rest are T = 5, P = 1. */ + case 5: + case 3: + p = places_array[test_places].places[2]; + break; + case 2: + p = places_array[test_places].places[1]; + break; + } + print_affinity (p); + printf ("\n"); + } + } + } + #pragma omp barrier + if (omp_get_thread_num () == 2) + { + /* Close, spread, close. */ + #pragma omp parallel num_threads (5) proc_bind (close) + { + verify (omp_proc_bind_close, omp_proc_bind_close); + #pragma omp critical + { + struct place p = places_array[0].places[0]; + int thr = omp_get_thread_num (); + printf ("#4,#2,#2 thread 2,2,%d", thr); + if (omp_get_num_threads () == 5 + && (test_spread_master_close || test_true)) + /* Outer is close, inner spread, innermost close. */ + switch (places_array[test_places].count) + { + case 8: + /* T = 5, P = 2. */ + p = places_array[test_places].places[6 + + (thr & 2) / 2]; + break; + /* All the rest are T = 5, P = 1. */ + case 7: + p = places_array[test_places].places[6]; + break; + case 5: + p = places_array[test_places].places[4]; + break; + case 3: + p = places_array[test_places].places[1]; + break; + case 2: + p = places_array[test_places].places[0]; + break; + } + print_affinity (p); + printf ("\n"); + } + } + } + #pragma omp barrier + if (omp_get_thread_num () == 3) + { + /* Close, spread, close. */ + #pragma omp parallel num_threads (5) proc_bind (close) + { + verify (omp_proc_bind_close, omp_proc_bind_close); + #pragma omp critical + { + struct place p = places_array[0].places[0]; + int thr = omp_get_thread_num (); + printf ("#4,#2,#3 thread 2,3,%d", thr); + if (omp_get_num_threads () == 5 + && (test_spread_master_close || test_true)) + /* Outer is close, inner spread, innermost close. */ + switch (places_array[test_places].count) + { + case 8: + case 7: + case 5: + /* T = 5, P = 2. */ + p = places_array[test_places].places[(thr & 2) / 2]; + break; + /* All the rest are T = 5, P = 1. */ + case 3: + p = places_array[test_places].places[2]; + break; + case 2: + p = places_array[test_places].places[0]; + break; + } + print_affinity (p); + printf ("\n"); + } + } + } + } + /* Close, master. */ + #pragma omp parallel num_threads (4) proc_bind(master) \ + firstprivate (pp) + { + verify (omp_proc_bind_master, omp_proc_bind_close); + #pragma omp critical + { + struct place p = places_array[0].places[0]; + int thr = omp_get_thread_num (); + printf ("#4,#3 thread 2,%d", thr); + if (test_spread_master_close || test_true) + /* Outer is close, inner master. */ + p = places_array[test_places].places[pp]; + print_affinity (p); + printf ("\n"); + } + } + /* Close, close. */ + #pragma omp parallel num_threads (6) proc_bind (close) + { + verify (omp_proc_bind_close, omp_proc_bind_close); + #pragma omp critical + { + struct place p = places_array[0].places[0]; + int thr = omp_get_thread_num (); + printf ("#4,#4 thread 2,%d", thr); + if (omp_get_num_threads () == 6 + && (test_spread_master_close || test_true)) + switch (places_array[test_places].count) + { + case 8: + /* T = 6, P = 8. */ + p = places_array[test_places].places[2 + thr]; + break; + case 7: + /* T = 6, P = 7. */ + p = places_array[test_places].places[thr == 5 ? 0 : 2 + thr]; + break; + case 5: + /* T = 6, P = 5. thr{0,5} go into the third place. */ + p = places_array[test_places].places[thr >= 3 ? thr - 3 + : 2 + thr]; + break; + case 3: + /* T = 6, P = 3, two threads into each place. */ + p = places_array[test_places].places[thr < 2 ? 2 + : thr / 2 - 1]; + break; + case 2: + /* T = 6, P = 2, 3 threads into each place. */ + p = places_array[test_places].places[1 - thr / 3]; + break; + } + print_affinity (p); + printf ("\n"); + } + } + } + } + + return 0; +} |