Mercurial > hg > CbC > CbC_gcc
diff libgomp/parallel.c @ 145:1830386684a0
gcc-9.2.0
author | anatofuz |
---|---|
date | Thu, 13 Feb 2020 11:34:05 +0900 |
parents | 84e7813d76e9 |
children |
line wrap: on
line diff
--- a/libgomp/parallel.c Thu Oct 25 07:37:49 2018 +0900 +++ b/libgomp/parallel.c Thu Feb 13 11:34:05 2020 +0900 @@ -1,4 +1,4 @@ -/* Copyright (C) 2005-2018 Free Software Foundation, Inc. +/* Copyright (C) 2005-2020 Free Software Foundation, Inc. Contributed by Richard Henderson <rth@redhat.com>. This file is part of the GNU Offloading and Multi Processing Library @@ -123,7 +123,8 @@ GOMP_parallel_start (void (*fn) (void *), void *data, unsigned num_threads) { num_threads = gomp_resolve_num_threads (num_threads, 0); - gomp_team_start (fn, data, num_threads, 0, gomp_new_team (num_threads)); + gomp_team_start (fn, data, num_threads, 0, gomp_new_team (num_threads), + NULL); } void @@ -161,14 +162,33 @@ ialias (GOMP_parallel_end) void -GOMP_parallel (void (*fn) (void *), void *data, unsigned num_threads, unsigned int flags) +GOMP_parallel (void (*fn) (void *), void *data, unsigned num_threads, + unsigned int flags) { num_threads = gomp_resolve_num_threads (num_threads, 0); - gomp_team_start (fn, data, num_threads, flags, gomp_new_team (num_threads)); + gomp_team_start (fn, data, num_threads, flags, gomp_new_team (num_threads), + NULL); fn (data); ialias_call (GOMP_parallel_end) (); } +unsigned +GOMP_parallel_reductions (void (*fn) (void *), void *data, + unsigned num_threads, unsigned int flags) +{ + struct gomp_taskgroup *taskgroup; + num_threads = gomp_resolve_num_threads (num_threads, 0); + uintptr_t *rdata = *(uintptr_t **)data; + taskgroup = gomp_parallel_reduction_register (rdata, num_threads); + gomp_team_start (fn, data, num_threads, flags, gomp_new_team (num_threads), + taskgroup); + fn (data); + ialias_call (GOMP_parallel_end) (); + gomp_sem_destroy (&taskgroup->taskgroup_sem); + free (taskgroup); + return num_threads; +} + bool GOMP_cancellation_point (int which) { @@ -185,8 +205,15 @@ } else if (which & GOMP_CANCEL_TASKGROUP) { - if (thr->task->taskgroup && thr->task->taskgroup->cancelled) - return true; + if (thr->task->taskgroup) + { + if (thr->task->taskgroup->cancelled) + return true; + if (thr->task->taskgroup->workshare + && thr->task->taskgroup->prev + && thr->task->taskgroup->prev->cancelled) + return true; + } /* FALLTHRU into the GOMP_CANCEL_PARALLEL case, as #pragma omp cancel parallel also cancels all explicit tasks. */ @@ -218,11 +245,17 @@ } else if (which & GOMP_CANCEL_TASKGROUP) { - if (thr->task->taskgroup && !thr->task->taskgroup->cancelled) + if (thr->task->taskgroup) { - gomp_mutex_lock (&team->task_lock); - thr->task->taskgroup->cancelled = true; - gomp_mutex_unlock (&team->task_lock); + struct gomp_taskgroup *taskgroup = thr->task->taskgroup; + if (taskgroup->workshare && taskgroup->prev) + taskgroup = taskgroup->prev; + if (!taskgroup->cancelled) + { + gomp_mutex_lock (&team->task_lock); + taskgroup->cancelled = true; + gomp_mutex_unlock (&team->task_lock); + } } return true; }