Mercurial > hg > CbC > CbC_gcc
annotate libgomp/libgomp.h @ 158:494b0b89df80 default tip
...
author | Shinji KONO <kono@ie.u-ryukyu.ac.jp> |
---|---|
date | Mon, 25 May 2020 18:13:55 +0900 |
parents | 1830386684a0 |
children |
rev | line source |
---|---|
145 | 1 /* Copyright (C) 2005-2020 Free Software Foundation, Inc. |
0 | 2 Contributed by Richard Henderson <rth@redhat.com>. |
3 | |
111 | 4 This file is part of the GNU Offloading and Multi Processing Library |
5 (libgomp). | |
0 | 6 |
7 Libgomp is free software; you can redistribute it and/or modify it | |
8 under the terms of the GNU General Public License as published by | |
9 the Free Software Foundation; either version 3, or (at your option) | |
10 any later version. | |
11 | |
12 Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY | |
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS | |
14 FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
15 more details. | |
16 | |
17 Under Section 7 of GPL version 3, you are granted additional | |
18 permissions described in the GCC Runtime Library Exception, version | |
19 3.1, as published by the Free Software Foundation. | |
20 | |
21 You should have received a copy of the GNU General Public License and | |
22 a copy of the GCC Runtime Library Exception along with this program; | |
23 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see | |
24 <http://www.gnu.org/licenses/>. */ | |
25 | |
26 /* This file contains data types and function declarations that are not | |
111 | 27 part of the official OpenACC or OpenMP user interfaces. There are |
28 declarations in here that are part of the GNU Offloading and Multi | |
29 Processing ABI, in that the compiler is required to know about them | |
30 and use them. | |
0 | 31 |
32 The convention is that the all caps prefix "GOMP" is used group items | |
33 that are part of the external ABI, and the lower case prefix "gomp" | |
34 is used group items that are completely private to the library. */ | |
35 | |
36 #ifndef LIBGOMP_H | |
37 #define LIBGOMP_H 1 | |
38 | |
111 | 39 #ifndef _LIBGOMP_CHECKING_ |
40 /* Define to 1 to perform internal sanity checks. */ | |
41 #define _LIBGOMP_CHECKING_ 0 | |
42 #endif | |
43 | |
0 | 44 #include "config.h" |
145 | 45 #include <stdint.h> |
111 | 46 #include "libgomp-plugin.h" |
131 | 47 #include "gomp-constants.h" |
0 | 48 |
111 | 49 #ifdef HAVE_PTHREAD_H |
0 | 50 #include <pthread.h> |
111 | 51 #endif |
0 | 52 #include <stdbool.h> |
111 | 53 #include <stdlib.h> |
54 #include <stdarg.h> | |
55 | |
56 /* Needed for memset in priority_queue.c. */ | |
57 #if _LIBGOMP_CHECKING_ | |
58 # ifdef STRING_WITH_STRINGS | |
59 # include <string.h> | |
60 # include <strings.h> | |
61 # else | |
62 # ifdef HAVE_STRING_H | |
63 # include <string.h> | |
64 # else | |
65 # ifdef HAVE_STRINGS_H | |
66 # include <strings.h> | |
67 # endif | |
68 # endif | |
69 # endif | |
70 #endif | |
0 | 71 |
72 #ifdef HAVE_ATTRIBUTE_VISIBILITY | |
73 # pragma GCC visibility push(hidden) | |
74 #endif | |
75 | |
111 | 76 /* If we were a C++ library, we'd get this from <std/atomic>. */ |
77 enum memmodel | |
78 { | |
79 MEMMODEL_RELAXED = 0, | |
80 MEMMODEL_CONSUME = 1, | |
81 MEMMODEL_ACQUIRE = 2, | |
82 MEMMODEL_RELEASE = 3, | |
83 MEMMODEL_ACQ_REL = 4, | |
84 MEMMODEL_SEQ_CST = 5 | |
85 }; | |
86 | |
87 /* alloc.c */ | |
88 | |
145 | 89 #if defined(HAVE_ALIGNED_ALLOC) \ |
90 || defined(HAVE__ALIGNED_MALLOC) \ | |
91 || defined(HAVE_POSIX_MEMALIGN) \ | |
92 || defined(HAVE_MEMALIGN) | |
93 /* Defined if gomp_aligned_alloc doesn't use fallback version | |
94 and free can be used instead of gomp_aligned_free. */ | |
95 #define GOMP_HAVE_EFFICIENT_ALIGNED_ALLOC 1 | |
96 #endif | |
97 | |
111 | 98 extern void *gomp_malloc (size_t) __attribute__((malloc)); |
99 extern void *gomp_malloc_cleared (size_t) __attribute__((malloc)); | |
100 extern void *gomp_realloc (void *, size_t); | |
145 | 101 extern void *gomp_aligned_alloc (size_t, size_t) |
102 __attribute__((malloc, alloc_size (2))); | |
103 extern void gomp_aligned_free (void *); | |
111 | 104 |
105 /* Avoid conflicting prototypes of alloca() in system headers by using | |
106 GCC's builtin alloca(). */ | |
107 #define gomp_alloca(x) __builtin_alloca(x) | |
108 | |
145 | 109 /* Optimized allocators for team-specific data that will die with the team. */ |
110 | |
111 #ifdef __AMDGCN__ | |
112 /* The arena is initialized in config/gcn/team.c. */ | |
113 #define TEAM_ARENA_SIZE 64*1024 /* Must match the value in plugin-gcn.c. */ | |
114 #define TEAM_ARENA_START 16 /* LDS offset of free pointer. */ | |
115 #define TEAM_ARENA_FREE 24 /* LDS offset of free pointer. */ | |
116 #define TEAM_ARENA_END 32 /* LDS offset of end pointer. */ | |
117 | |
118 static inline void * __attribute__((malloc)) | |
119 team_malloc (size_t size) | |
120 { | |
121 /* 4-byte align the size. */ | |
122 size = (size + 3) & ~3; | |
123 | |
124 /* Allocate directly from the arena. | |
125 The compiler does not support DS atomics, yet. */ | |
126 void *result; | |
127 asm ("ds_add_rtn_u64 %0, %1, %2\n\ts_waitcnt 0" | |
128 : "=v"(result) : "v"(TEAM_ARENA_FREE), "v"(size), "e"(1L) : "memory"); | |
129 | |
130 /* Handle OOM. */ | |
131 if (result + size > *(void * __lds *)TEAM_ARENA_END) | |
132 { | |
133 /* While this is experimental, let's make sure we know when OOM | |
134 happens. */ | |
135 const char msg[] = "GCN team arena exhausted\n"; | |
136 write (2, msg, sizeof(msg)-1); | |
137 | |
138 /* Fall back to using the heap (slowly). */ | |
139 result = gomp_malloc (size); | |
140 } | |
141 return result; | |
142 } | |
143 | |
144 static inline void * __attribute__((malloc)) | |
145 team_malloc_cleared (size_t size) | |
146 { | |
147 char *result = team_malloc (size); | |
148 | |
149 /* Clear the allocated memory. */ | |
150 __builtin_memset (result, 0, size); | |
151 | |
152 return result; | |
153 } | |
154 | |
155 static inline void | |
156 team_free (void *ptr) | |
157 { | |
158 /* The whole arena is freed when the kernel exits. | |
159 However, if we fell back to using heap then we should free it. | |
160 It would be better if this function could be a no-op, but at least | |
161 LDS loads are cheap. */ | |
162 if (ptr < *(void * __lds *)TEAM_ARENA_START | |
163 || ptr >= *(void * __lds *)TEAM_ARENA_END) | |
164 free (ptr); | |
165 } | |
166 #else | |
167 #define team_malloc(...) gomp_malloc (__VA_ARGS__) | |
168 #define team_malloc_cleared(...) gomp_malloc_cleared (__VA_ARGS__) | |
169 #define team_free(...) free (__VA_ARGS__) | |
170 #endif | |
171 | |
111 | 172 /* error.c */ |
173 | |
174 extern void gomp_vdebug (int, const char *, va_list); | |
175 extern void gomp_debug (int, const char *, ...) | |
176 __attribute__ ((format (printf, 2, 3))); | |
177 #define gomp_vdebug(KIND, FMT, VALIST) \ | |
178 do { \ | |
179 if (__builtin_expect (gomp_debug_var, 0)) \ | |
180 (gomp_vdebug) ((KIND), (FMT), (VALIST)); \ | |
181 } while (0) | |
182 #define gomp_debug(KIND, ...) \ | |
183 do { \ | |
184 if (__builtin_expect (gomp_debug_var, 0)) \ | |
185 (gomp_debug) ((KIND), __VA_ARGS__); \ | |
186 } while (0) | |
187 extern void gomp_verror (const char *, va_list); | |
188 extern void gomp_error (const char *, ...) | |
189 __attribute__ ((format (printf, 1, 2))); | |
190 extern void gomp_vfatal (const char *, va_list) | |
191 __attribute__ ((noreturn)); | |
192 extern void gomp_fatal (const char *, ...) | |
193 __attribute__ ((noreturn, format (printf, 1, 2))); | |
194 | |
195 struct gomp_task; | |
196 struct gomp_taskgroup; | |
197 struct htab; | |
198 | |
199 #include "priority_queue.h" | |
0 | 200 #include "sem.h" |
201 #include "mutex.h" | |
202 #include "bar.h" | |
111 | 203 #include "simple-bar.h" |
0 | 204 #include "ptrlock.h" |
205 | |
206 | |
207 /* This structure contains the data to control one work-sharing construct, | |
208 either a LOOP (FOR/DO) or a SECTIONS. */ | |
209 | |
210 enum gomp_schedule_type | |
211 { | |
212 GFS_RUNTIME, | |
213 GFS_STATIC, | |
214 GFS_DYNAMIC, | |
215 GFS_GUIDED, | |
145 | 216 GFS_AUTO, |
217 GFS_MONOTONIC = 0x80000000U | |
0 | 218 }; |
219 | |
111 | 220 struct gomp_doacross_work_share |
221 { | |
222 union { | |
223 /* chunk_size copy, as ws->chunk_size is multiplied by incr for | |
224 GFS_DYNAMIC. */ | |
225 long chunk_size; | |
226 /* Likewise, but for ull implementation. */ | |
227 unsigned long long chunk_size_ull; | |
228 /* For schedule(static,0) this is the number | |
229 of iterations assigned to the last thread, i.e. number of | |
230 iterations / number of threads. */ | |
231 long q; | |
232 /* Likewise, but for ull implementation. */ | |
233 unsigned long long q_ull; | |
234 }; | |
235 /* Size of each array entry (padded to cache line size). */ | |
236 unsigned long elt_sz; | |
237 /* Number of dimensions in sink vectors. */ | |
238 unsigned int ncounts; | |
239 /* True if the iterations can be flattened. */ | |
240 bool flattened; | |
241 /* Actual array (of elt_sz sized units), aligned to cache line size. | |
242 This is indexed by team_id for GFS_STATIC and outermost iteration | |
243 / chunk_size for other schedules. */ | |
244 unsigned char *array; | |
245 /* These two are only used for schedule(static,0). */ | |
246 /* This one is number of iterations % number of threads. */ | |
247 long t; | |
248 union { | |
249 /* And this one is cached t * (q + 1). */ | |
250 long boundary; | |
251 /* Likewise, but for the ull implementation. */ | |
252 unsigned long long boundary_ull; | |
253 }; | |
145 | 254 /* Pointer to extra memory if needed for lastprivate(conditional). */ |
255 void *extra; | |
111 | 256 /* Array of shift counts for each dimension if they can be flattened. */ |
257 unsigned int shift_counts[]; | |
258 }; | |
259 | |
0 | 260 struct gomp_work_share |
261 { | |
262 /* This member records the SCHEDULE clause to be used for this construct. | |
263 The user specification of "runtime" will already have been resolved. | |
264 If this is a SECTIONS construct, this value will always be DYNAMIC. */ | |
265 enum gomp_schedule_type sched; | |
266 | |
267 int mode; | |
268 | |
269 union { | |
270 struct { | |
271 /* This is the chunk_size argument to the SCHEDULE clause. */ | |
272 long chunk_size; | |
273 | |
274 /* This is the iteration end point. If this is a SECTIONS construct, | |
275 this is the number of contained sections. */ | |
276 long end; | |
277 | |
278 /* This is the iteration step. If this is a SECTIONS construct, this | |
279 is always 1. */ | |
280 long incr; | |
281 }; | |
282 | |
283 struct { | |
284 /* The same as above, but for the unsigned long long loop variants. */ | |
285 unsigned long long chunk_size_ull; | |
286 unsigned long long end_ull; | |
287 unsigned long long incr_ull; | |
288 }; | |
289 }; | |
290 | |
111 | 291 union { |
292 /* This is a circular queue that details which threads will be allowed | |
293 into the ordered region and in which order. When a thread allocates | |
294 iterations on which it is going to work, it also registers itself at | |
295 the end of the array. When a thread reaches the ordered region, it | |
296 checks to see if it is the one at the head of the queue. If not, it | |
297 blocks on its RELEASE semaphore. */ | |
298 unsigned *ordered_team_ids; | |
299 | |
300 /* This is a pointer to DOACROSS work share data. */ | |
301 struct gomp_doacross_work_share *doacross; | |
302 }; | |
0 | 303 |
304 /* This is the number of threads that have registered themselves in | |
305 the circular queue ordered_team_ids. */ | |
306 unsigned ordered_num_used; | |
307 | |
308 /* This is the team_id of the currently acknowledged owner of the ordered | |
309 section, or -1u if the ordered section has not been acknowledged by | |
310 any thread. This is distinguished from the thread that is *allowed* | |
311 to take the section next. */ | |
312 unsigned ordered_owner; | |
313 | |
314 /* This is the index into the circular queue ordered_team_ids of the | |
315 current thread that's allowed into the ordered reason. */ | |
316 unsigned ordered_cur; | |
317 | |
318 /* This is a chain of allocated gomp_work_share blocks, valid only | |
319 in the first gomp_work_share struct in the block. */ | |
320 struct gomp_work_share *next_alloc; | |
321 | |
322 /* The above fields are written once during workshare initialization, | |
323 or related to ordered worksharing. Make sure the following fields | |
324 are in a different cache line. */ | |
325 | |
326 /* This lock protects the update of the following members. */ | |
327 gomp_mutex_t lock __attribute__((aligned (64))); | |
328 | |
329 /* This is the count of the number of threads that have exited the work | |
330 share construct. If the construct was marked nowait, they have moved on | |
331 to other work; otherwise they're blocked on a barrier. The last member | |
332 of the team to exit the work share construct must deallocate it. */ | |
333 unsigned threads_completed; | |
334 | |
335 union { | |
336 /* This is the next iteration value to be allocated. In the case of | |
337 GFS_STATIC loops, this the iteration start point and never changes. */ | |
338 long next; | |
339 | |
340 /* The same, but with unsigned long long type. */ | |
341 unsigned long long next_ull; | |
342 | |
343 /* This is the returned data structure for SINGLE COPYPRIVATE. */ | |
344 void *copyprivate; | |
345 }; | |
346 | |
347 union { | |
348 /* Link to gomp_work_share struct for next work sharing construct | |
349 encountered after this one. */ | |
350 gomp_ptrlock_t next_ws; | |
351 | |
352 /* gomp_work_share structs are chained in the free work share cache | |
353 through this. */ | |
354 struct gomp_work_share *next_free; | |
355 }; | |
356 | |
145 | 357 /* Task reductions for this work-sharing construct. */ |
358 uintptr_t *task_reductions; | |
359 | |
0 | 360 /* If only few threads are in the team, ordered_team_ids can point |
361 to this array which fills the padding at the end of this struct. */ | |
362 unsigned inline_ordered_team_ids[0]; | |
363 }; | |
364 | |
365 /* This structure contains all of the thread-local data associated with | |
366 a thread team. This is the data that must be saved when a thread | |
367 encounters a nested PARALLEL construct. */ | |
368 | |
369 struct gomp_team_state | |
370 { | |
371 /* This is the team of which the thread is currently a member. */ | |
372 struct gomp_team *team; | |
373 | |
374 /* This is the work share construct which this thread is currently | |
375 processing. Recall that with NOWAIT, not all threads may be | |
376 processing the same construct. */ | |
377 struct gomp_work_share *work_share; | |
378 | |
379 /* This is the previous work share construct or NULL if there wasn't any. | |
380 When all threads are done with the current work sharing construct, | |
381 the previous one can be freed. The current one can't, as its | |
382 next_ws field is used. */ | |
383 struct gomp_work_share *last_work_share; | |
384 | |
385 /* This is the ID of this thread within the team. This value is | |
386 guaranteed to be between 0 and N-1, where N is the number of | |
387 threads in the team. */ | |
388 unsigned team_id; | |
389 | |
390 /* Nesting level. */ | |
391 unsigned level; | |
392 | |
393 /* Active nesting level. Only active parallel regions are counted. */ | |
394 unsigned active_level; | |
395 | |
111 | 396 /* Place-partition-var, offset and length into gomp_places_list array. */ |
397 unsigned place_partition_off; | |
398 unsigned place_partition_len; | |
399 | |
0 | 400 #ifdef HAVE_SYNC_BUILTINS |
401 /* Number of single stmts encountered. */ | |
402 unsigned long single_count; | |
403 #endif | |
404 | |
405 /* For GFS_RUNTIME loops that resolved to GFS_STATIC, this is the | |
406 trip number through the loop. So first time a particular loop | |
407 is encountered this number is 0, the second time through the loop | |
408 is 1, etc. This is unused when the compiler knows in advance that | |
409 the loop is statically scheduled. */ | |
410 unsigned long static_trip; | |
411 }; | |
412 | |
111 | 413 struct target_mem_desc; |
414 | |
415 /* These are the OpenMP 4.0 Internal Control Variables described in | |
0 | 416 section 2.3.1. Those described as having one copy per task are |
417 stored within the structure; those described as having one copy | |
418 for the whole program are (naturally) global variables. */ | |
111 | 419 |
0 | 420 struct gomp_task_icv |
421 { | |
422 unsigned long nthreads_var; | |
423 enum gomp_schedule_type run_sched_var; | |
111 | 424 int run_sched_chunk_size; |
425 int default_device_var; | |
426 unsigned int thread_limit_var; | |
0 | 427 bool dyn_var; |
428 bool nest_var; | |
111 | 429 char bind_var; |
430 /* Internal ICV. */ | |
431 struct target_mem_desc *target_data; | |
0 | 432 }; |
433 | |
434 extern struct gomp_task_icv gomp_global_icv; | |
435 #ifndef HAVE_SYNC_BUILTINS | |
111 | 436 extern gomp_mutex_t gomp_managed_threads_lock; |
0 | 437 #endif |
438 extern unsigned long gomp_max_active_levels_var; | |
111 | 439 extern bool gomp_cancel_var; |
440 extern int gomp_max_task_priority_var; | |
0 | 441 extern unsigned long long gomp_spin_count_var, gomp_throttled_spin_count_var; |
442 extern unsigned long gomp_available_cpus, gomp_managed_threads; | |
111 | 443 extern unsigned long *gomp_nthreads_var_list, gomp_nthreads_var_list_len; |
444 extern char *gomp_bind_var_list; | |
445 extern unsigned long gomp_bind_var_list_len; | |
446 extern void **gomp_places_list; | |
447 extern unsigned long gomp_places_list_len; | |
448 extern unsigned int gomp_num_teams_var; | |
449 extern int gomp_debug_var; | |
145 | 450 extern bool gomp_display_affinity_var; |
451 extern char *gomp_affinity_format_var; | |
452 extern size_t gomp_affinity_format_len; | |
111 | 453 extern int goacc_device_num; |
454 extern char *goacc_device_type; | |
131 | 455 extern int goacc_default_dims[GOMP_DIM_MAX]; |
0 | 456 |
457 enum gomp_task_kind | |
458 { | |
111 | 459 /* Implicit task. */ |
0 | 460 GOMP_TASK_IMPLICIT, |
111 | 461 /* Undeferred task. */ |
462 GOMP_TASK_UNDEFERRED, | |
463 /* Task created by GOMP_task and waiting to be run. */ | |
0 | 464 GOMP_TASK_WAITING, |
111 | 465 /* Task currently executing or scheduled and about to execute. */ |
466 GOMP_TASK_TIED, | |
467 /* Used for target tasks that have vars mapped and async run started, | |
468 but not yet completed. Once that completes, they will be readded | |
469 into the queues as GOMP_TASK_WAITING in order to perform the var | |
470 unmapping. */ | |
471 GOMP_TASK_ASYNC_RUNNING | |
472 }; | |
473 | |
474 struct gomp_task_depend_entry | |
475 { | |
476 /* Address of dependency. */ | |
477 void *addr; | |
478 struct gomp_task_depend_entry *next; | |
479 struct gomp_task_depend_entry *prev; | |
480 /* Task that provides the dependency in ADDR. */ | |
481 struct gomp_task *task; | |
482 /* Depend entry is of type "IN". */ | |
483 bool is_in; | |
484 bool redundant; | |
485 bool redundant_out; | |
486 }; | |
487 | |
488 struct gomp_dependers_vec | |
489 { | |
490 size_t n_elem; | |
491 size_t allocated; | |
492 struct gomp_task *elem[]; | |
493 }; | |
494 | |
495 /* Used when in GOMP_taskwait or in gomp_task_maybe_wait_for_dependencies. */ | |
496 | |
497 struct gomp_taskwait | |
498 { | |
499 bool in_taskwait; | |
500 bool in_depend_wait; | |
501 /* Number of tasks we are waiting for. */ | |
502 size_t n_depend; | |
503 gomp_sem_t taskwait_sem; | |
0 | 504 }; |
505 | |
506 /* This structure describes a "task" to be run by a thread. */ | |
507 | |
508 struct gomp_task | |
509 { | |
111 | 510 /* Parent of this task. */ |
0 | 511 struct gomp_task *parent; |
111 | 512 /* Children of this task. */ |
513 struct priority_queue children_queue; | |
514 /* Taskgroup this task belongs in. */ | |
515 struct gomp_taskgroup *taskgroup; | |
516 /* Tasks that depend on this task. */ | |
517 struct gomp_dependers_vec *dependers; | |
518 struct htab *depend_hash; | |
519 struct gomp_taskwait *taskwait; | |
520 /* Number of items in DEPEND. */ | |
521 size_t depend_count; | |
522 /* Number of tasks this task depends on. Once this counter reaches | |
523 0, we have no unsatisfied dependencies, and this task can be put | |
524 into the various queues to be scheduled. */ | |
525 size_t num_dependees; | |
526 | |
527 /* Priority of this task. */ | |
528 int priority; | |
529 /* The priority node for this task in each of the different queues. | |
530 We put this here to avoid allocating space for each priority | |
531 node. Then we play offsetof() games to convert between pnode[] | |
532 entries and the gomp_task in which they reside. */ | |
533 struct priority_node pnode[3]; | |
534 | |
0 | 535 struct gomp_task_icv icv; |
536 void (*fn) (void *); | |
537 void *fn_data; | |
538 enum gomp_task_kind kind; | |
539 bool in_tied_task; | |
111 | 540 bool final_task; |
541 bool copy_ctors_done; | |
542 /* Set for undeferred tasks with unsatisfied dependencies which | |
543 block further execution of their parent until the dependencies | |
544 are satisfied. */ | |
545 bool parent_depends_on; | |
546 /* Dependencies provided and/or needed for this task. DEPEND_COUNT | |
547 is the number of items available. */ | |
548 struct gomp_task_depend_entry depend[]; | |
549 }; | |
550 | |
551 /* This structure describes a single #pragma omp taskgroup. */ | |
552 | |
553 struct gomp_taskgroup | |
554 { | |
555 struct gomp_taskgroup *prev; | |
556 /* Queue of tasks that belong in this taskgroup. */ | |
557 struct priority_queue taskgroup_queue; | |
145 | 558 uintptr_t *reductions; |
111 | 559 bool in_taskgroup_wait; |
560 bool cancelled; | |
145 | 561 bool workshare; |
111 | 562 gomp_sem_t taskgroup_sem; |
563 size_t num_children; | |
564 }; | |
565 | |
566 /* Various state of OpenMP async offloading tasks. */ | |
567 enum gomp_target_task_state | |
568 { | |
569 GOMP_TARGET_TASK_DATA, | |
570 GOMP_TARGET_TASK_BEFORE_MAP, | |
571 GOMP_TARGET_TASK_FALLBACK, | |
572 GOMP_TARGET_TASK_READY_TO_RUN, | |
573 GOMP_TARGET_TASK_RUNNING, | |
574 GOMP_TARGET_TASK_FINISHED | |
575 }; | |
576 | |
577 /* This structure describes a target task. */ | |
578 | |
579 struct gomp_target_task | |
580 { | |
581 struct gomp_device_descr *devicep; | |
582 void (*fn) (void *); | |
583 size_t mapnum; | |
584 size_t *sizes; | |
585 unsigned short *kinds; | |
586 unsigned int flags; | |
587 enum gomp_target_task_state state; | |
588 struct target_mem_desc *tgt; | |
589 struct gomp_task *task; | |
590 struct gomp_team *team; | |
591 /* Device-specific target arguments. */ | |
592 void **args; | |
593 void *hostaddrs[]; | |
0 | 594 }; |
595 | |
596 /* This structure describes a "team" of threads. These are the threads | |
597 that are spawned by a PARALLEL constructs, as well as the work sharing | |
598 constructs that the team encounters. */ | |
599 | |
600 struct gomp_team | |
601 { | |
602 /* This is the number of threads in the current team. */ | |
603 unsigned nthreads; | |
604 | |
605 /* This is number of gomp_work_share structs that have been allocated | |
606 as a block last time. */ | |
607 unsigned work_share_chunk; | |
608 | |
609 /* This is the saved team state that applied to a master thread before | |
610 the current thread was created. */ | |
611 struct gomp_team_state prev_ts; | |
612 | |
613 /* This semaphore should be used by the master thread instead of its | |
614 "native" semaphore in the thread structure. Required for nested | |
615 parallels, as the master is a member of two teams. */ | |
616 gomp_sem_t master_release; | |
617 | |
618 /* This points to an array with pointers to the release semaphore | |
619 of the threads in the team. */ | |
620 gomp_sem_t **ordered_release; | |
621 | |
111 | 622 /* List of work shares on which gomp_fini_work_share hasn't been |
623 called yet. If the team hasn't been cancelled, this should be | |
624 equal to each thr->ts.work_share, but otherwise it can be a possibly | |
625 long list of workshares. */ | |
626 struct gomp_work_share *work_shares_to_free; | |
627 | |
0 | 628 /* List of gomp_work_share structs chained through next_free fields. |
629 This is populated and taken off only by the first thread in the | |
630 team encountering a new work sharing construct, in a critical | |
631 section. */ | |
632 struct gomp_work_share *work_share_list_alloc; | |
633 | |
634 /* List of gomp_work_share structs freed by free_work_share. New | |
635 entries are atomically added to the start of the list, and | |
636 alloc_work_share can safely only move all but the first entry | |
637 to work_share_list alloc, as free_work_share can happen concurrently | |
638 with alloc_work_share. */ | |
639 struct gomp_work_share *work_share_list_free; | |
640 | |
641 #ifdef HAVE_SYNC_BUILTINS | |
642 /* Number of simple single regions encountered by threads in this | |
643 team. */ | |
644 unsigned long single_count; | |
645 #else | |
646 /* Mutex protecting addition of workshares to work_share_list_free. */ | |
647 gomp_mutex_t work_share_list_free_lock; | |
648 #endif | |
649 | |
650 /* This barrier is used for most synchronization of the team. */ | |
651 gomp_barrier_t barrier; | |
652 | |
653 /* Initial work shares, to avoid allocating any gomp_work_share | |
654 structs in the common case. */ | |
655 struct gomp_work_share work_shares[8]; | |
656 | |
657 gomp_mutex_t task_lock; | |
111 | 658 /* Scheduled tasks. */ |
659 struct priority_queue task_queue; | |
660 /* Number of all GOMP_TASK_{WAITING,TIED} tasks in the team. */ | |
661 unsigned int task_count; | |
662 /* Number of GOMP_TASK_WAITING tasks currently waiting to be scheduled. */ | |
663 unsigned int task_queued_count; | |
664 /* Number of GOMP_TASK_{WAITING,TIED} tasks currently running | |
665 directly in gomp_barrier_handle_tasks; tasks spawned | |
666 from e.g. GOMP_taskwait or GOMP_taskgroup_end don't count, even when | |
667 that is called from a task run from gomp_barrier_handle_tasks. | |
668 task_running_count should be always <= team->nthreads, | |
669 and if current task isn't in_tied_task, then it will be | |
670 even < team->nthreads. */ | |
671 unsigned int task_running_count; | |
672 int work_share_cancelled; | |
673 int team_cancelled; | |
0 | 674 |
675 /* This array contains structures for implicit tasks. */ | |
676 struct gomp_task implicit_task[]; | |
677 }; | |
678 | |
679 /* This structure contains all data that is private to libgomp and is | |
680 allocated per thread. */ | |
681 | |
682 struct gomp_thread | |
683 { | |
684 /* This is the function that the thread should run upon launch. */ | |
685 void (*fn) (void *data); | |
686 void *data; | |
687 | |
688 /* This is the current team state for this thread. The ts.team member | |
689 is NULL only if the thread is idle. */ | |
690 struct gomp_team_state ts; | |
691 | |
692 /* This is the task that the thread is currently executing. */ | |
693 struct gomp_task *task; | |
694 | |
695 /* This semaphore is used for ordered loops. */ | |
696 gomp_sem_t release; | |
697 | |
111 | 698 /* Place this thread is bound to plus one, or zero if not bound |
699 to any place. */ | |
700 unsigned int place; | |
701 | |
702 /* User pthread thread pool */ | |
0 | 703 struct gomp_thread_pool *thread_pool; |
145 | 704 |
705 #if defined(LIBGOMP_USE_PTHREADS) \ | |
706 && (!defined(HAVE_TLS) \ | |
707 || !defined(__GLIBC__) \ | |
708 || !defined(USING_INITIAL_EXEC_TLS)) | |
709 /* pthread_t of the thread containing this gomp_thread. | |
710 On Linux when using initial-exec TLS, | |
711 (typeof (pthread_t)) gomp_thread () - pthread_self () | |
712 is constant in all threads, so we can optimize and not | |
713 store it. */ | |
714 #define GOMP_NEEDS_THREAD_HANDLE 1 | |
715 pthread_t handle; | |
716 #endif | |
0 | 717 }; |
718 | |
719 | |
720 struct gomp_thread_pool | |
721 { | |
722 /* This array manages threads spawned from the top level, which will | |
723 return to the idle loop once the current PARALLEL construct ends. */ | |
724 struct gomp_thread **threads; | |
725 unsigned threads_size; | |
726 unsigned threads_used; | |
111 | 727 /* The last team is used for non-nested teams to delay their destruction to |
728 make sure all the threads in the team move on to the pool's barrier before | |
729 the team's barrier is destroyed. */ | |
0 | 730 struct gomp_team *last_team; |
111 | 731 /* Number of threads running in this contention group. */ |
732 unsigned long threads_busy; | |
0 | 733 |
111 | 734 /* This barrier holds and releases threads waiting in thread pools. */ |
735 gomp_simple_barrier_t threads_dock; | |
736 }; | |
737 | |
738 enum gomp_cancel_kind | |
739 { | |
740 GOMP_CANCEL_PARALLEL = 1, | |
741 GOMP_CANCEL_LOOP = 2, | |
742 GOMP_CANCEL_FOR = GOMP_CANCEL_LOOP, | |
743 GOMP_CANCEL_DO = GOMP_CANCEL_LOOP, | |
744 GOMP_CANCEL_SECTIONS = 4, | |
745 GOMP_CANCEL_TASKGROUP = 8 | |
0 | 746 }; |
747 | |
748 /* ... and here is that TLS data. */ | |
749 | |
111 | 750 #if defined __nvptx__ |
751 extern struct gomp_thread *nvptx_thrs __attribute__((shared)); | |
752 static inline struct gomp_thread *gomp_thread (void) | |
753 { | |
754 int tid; | |
755 asm ("mov.u32 %0, %%tid.y;" : "=r" (tid)); | |
756 return nvptx_thrs + tid; | |
757 } | |
145 | 758 #elif defined __AMDGCN__ |
759 static inline struct gomp_thread *gcn_thrs (void) | |
760 { | |
761 /* The value is at the bottom of LDS. */ | |
762 struct gomp_thread * __lds *thrs = (struct gomp_thread * __lds *)4; | |
763 return *thrs; | |
764 } | |
765 static inline void set_gcn_thrs (struct gomp_thread *val) | |
766 { | |
767 /* The value is at the bottom of LDS. */ | |
768 struct gomp_thread * __lds *thrs = (struct gomp_thread * __lds *)4; | |
769 *thrs = val; | |
770 } | |
771 static inline struct gomp_thread *gomp_thread (void) | |
772 { | |
773 int tid = __builtin_gcn_dim_pos(1); | |
774 return gcn_thrs () + tid; | |
775 } | |
111 | 776 #elif defined HAVE_TLS || defined USE_EMUTLS |
0 | 777 extern __thread struct gomp_thread gomp_tls_data; |
778 static inline struct gomp_thread *gomp_thread (void) | |
779 { | |
780 return &gomp_tls_data; | |
781 } | |
782 #else | |
783 extern pthread_key_t gomp_tls_key; | |
784 static inline struct gomp_thread *gomp_thread (void) | |
785 { | |
786 return pthread_getspecific (gomp_tls_key); | |
787 } | |
788 #endif | |
789 | |
790 extern struct gomp_task_icv *gomp_new_icv (void); | |
791 | |
792 /* Here's how to access the current copy of the ICVs. */ | |
793 | |
794 static inline struct gomp_task_icv *gomp_icv (bool write) | |
795 { | |
796 struct gomp_task *task = gomp_thread ()->task; | |
797 if (task) | |
798 return &task->icv; | |
799 else if (write) | |
800 return gomp_new_icv (); | |
801 else | |
802 return &gomp_global_icv; | |
803 } | |
804 | |
111 | 805 #ifdef LIBGOMP_USE_PTHREADS |
0 | 806 /* The attributes to be used during thread creation. */ |
807 extern pthread_attr_t gomp_thread_attr; | |
808 | |
111 | 809 extern pthread_key_t gomp_thread_destructor; |
810 #endif | |
0 | 811 |
812 /* Function prototypes. */ | |
813 | |
814 /* affinity.c */ | |
815 | |
816 extern void gomp_init_affinity (void); | |
111 | 817 #ifdef LIBGOMP_USE_PTHREADS |
818 extern void gomp_init_thread_affinity (pthread_attr_t *, unsigned int); | |
819 #endif | |
820 extern void **gomp_affinity_alloc (unsigned long, bool); | |
821 extern void gomp_affinity_init_place (void *); | |
822 extern bool gomp_affinity_add_cpus (void *, unsigned long, unsigned long, | |
823 long, bool); | |
824 extern bool gomp_affinity_remove_cpu (void *, unsigned long); | |
825 extern bool gomp_affinity_copy_place (void *, void *, long); | |
826 extern bool gomp_affinity_same_place (void *, void *); | |
827 extern bool gomp_affinity_finalize_place_list (bool); | |
828 extern bool gomp_affinity_init_level (int, unsigned long, bool); | |
829 extern void gomp_affinity_print_place (void *); | |
830 extern void gomp_get_place_proc_ids_8 (int, int64_t *); | |
145 | 831 extern void gomp_display_affinity_place (char *, size_t, size_t *, int); |
832 | |
833 /* affinity-fmt.c */ | |
834 | |
835 extern bool gomp_print_string (const char *str, size_t len); | |
836 extern void gomp_set_affinity_format (const char *, size_t); | |
837 extern void gomp_display_string (char *, size_t, size_t *, const char *, | |
838 size_t); | |
839 #ifdef LIBGOMP_USE_PTHREADS | |
840 typedef pthread_t gomp_thread_handle; | |
841 #else | |
842 typedef struct {} gomp_thread_handle; | |
843 #endif | |
844 extern size_t gomp_display_affinity (char *, size_t, const char *, | |
845 gomp_thread_handle, | |
846 struct gomp_team_state *, unsigned int); | |
847 extern void gomp_display_affinity_thread (gomp_thread_handle, | |
848 struct gomp_team_state *, | |
849 unsigned int) __attribute__((cold)); | |
0 | 850 |
851 /* iter.c */ | |
852 | |
853 extern int gomp_iter_static_next (long *, long *); | |
854 extern bool gomp_iter_dynamic_next_locked (long *, long *); | |
855 extern bool gomp_iter_guided_next_locked (long *, long *); | |
856 | |
857 #ifdef HAVE_SYNC_BUILTINS | |
858 extern bool gomp_iter_dynamic_next (long *, long *); | |
859 extern bool gomp_iter_guided_next (long *, long *); | |
860 #endif | |
861 | |
862 /* iter_ull.c */ | |
863 | |
864 extern int gomp_iter_ull_static_next (unsigned long long *, | |
865 unsigned long long *); | |
866 extern bool gomp_iter_ull_dynamic_next_locked (unsigned long long *, | |
867 unsigned long long *); | |
868 extern bool gomp_iter_ull_guided_next_locked (unsigned long long *, | |
869 unsigned long long *); | |
870 | |
871 #if defined HAVE_SYNC_BUILTINS && defined __LP64__ | |
872 extern bool gomp_iter_ull_dynamic_next (unsigned long long *, | |
873 unsigned long long *); | |
874 extern bool gomp_iter_ull_guided_next (unsigned long long *, | |
875 unsigned long long *); | |
876 #endif | |
877 | |
878 /* ordered.c */ | |
879 | |
880 extern void gomp_ordered_first (void); | |
881 extern void gomp_ordered_last (void); | |
882 extern void gomp_ordered_next (void); | |
883 extern void gomp_ordered_static_init (void); | |
884 extern void gomp_ordered_static_next (void); | |
885 extern void gomp_ordered_sync (void); | |
145 | 886 extern void gomp_doacross_init (unsigned, long *, long, size_t); |
111 | 887 extern void gomp_doacross_ull_init (unsigned, unsigned long long *, |
145 | 888 unsigned long long, size_t); |
0 | 889 |
890 /* parallel.c */ | |
891 | |
892 extern unsigned gomp_resolve_num_threads (unsigned, unsigned); | |
893 | |
894 /* proc.c (in config/) */ | |
895 | |
896 extern void gomp_init_num_threads (void); | |
897 extern unsigned gomp_dynamic_max_threads (void); | |
898 | |
899 /* task.c */ | |
900 | |
901 extern void gomp_init_task (struct gomp_task *, struct gomp_task *, | |
902 struct gomp_task_icv *); | |
903 extern void gomp_end_task (void); | |
904 extern void gomp_barrier_handle_tasks (gomp_barrier_state_t); | |
111 | 905 extern void gomp_task_maybe_wait_for_dependencies (void **); |
906 extern bool gomp_create_target_task (struct gomp_device_descr *, | |
907 void (*) (void *), size_t, void **, | |
908 size_t *, unsigned short *, unsigned int, | |
909 void **, void **, | |
910 enum gomp_target_task_state); | |
145 | 911 extern struct gomp_taskgroup *gomp_parallel_reduction_register (uintptr_t *, |
912 unsigned); | |
913 extern void gomp_workshare_taskgroup_start (void); | |
914 extern void gomp_workshare_task_reduction_register (uintptr_t *, uintptr_t *); | |
0 | 915 |
916 static void inline | |
917 gomp_finish_task (struct gomp_task *task) | |
918 { | |
111 | 919 if (__builtin_expect (task->depend_hash != NULL, 0)) |
920 free (task->depend_hash); | |
0 | 921 } |
922 | |
923 /* team.c */ | |
924 | |
925 extern struct gomp_team *gomp_new_team (unsigned); | |
926 extern void gomp_team_start (void (*) (void *), void *, unsigned, | |
145 | 927 unsigned, struct gomp_team *, |
928 struct gomp_taskgroup *); | |
0 | 929 extern void gomp_team_end (void); |
111 | 930 extern void gomp_free_thread (void *); |
145 | 931 extern int gomp_pause_host (void); |
111 | 932 |
933 /* target.c */ | |
934 | |
935 extern void gomp_init_targets_once (void); | |
936 extern int gomp_get_num_devices (void); | |
937 extern bool gomp_target_task_fn (void *); | |
938 | |
939 /* Splay tree definitions. */ | |
940 typedef struct splay_tree_node_s *splay_tree_node; | |
941 typedef struct splay_tree_s *splay_tree; | |
942 typedef struct splay_tree_key_s *splay_tree_key; | |
943 | |
944 struct target_var_desc { | |
945 /* Splay key. */ | |
946 splay_tree_key key; | |
947 /* True if data should be copied from device to host at the end. */ | |
948 bool copy_from; | |
949 /* True if data always should be copied from device to host at the end. */ | |
950 bool always_copy_from; | |
145 | 951 /* True if variable should be detached at end of region. */ |
952 bool do_detach; | |
111 | 953 /* Relative offset against key host_start. */ |
954 uintptr_t offset; | |
955 /* Actual length. */ | |
956 uintptr_t length; | |
957 }; | |
958 | |
959 struct target_mem_desc { | |
960 /* Reference count. */ | |
961 uintptr_t refcount; | |
962 /* All the splay nodes allocated together. */ | |
963 splay_tree_node array; | |
964 /* Start of the target region. */ | |
965 uintptr_t tgt_start; | |
966 /* End of the targer region. */ | |
967 uintptr_t tgt_end; | |
968 /* Handle to free. */ | |
969 void *to_free; | |
970 /* Previous target_mem_desc. */ | |
971 struct target_mem_desc *prev; | |
972 /* Number of items in following list. */ | |
973 size_t list_count; | |
974 | |
975 /* Corresponding target device descriptor. */ | |
976 struct gomp_device_descr *device_descr; | |
977 | |
978 /* List of target items to remove (or decrease refcount) | |
979 at the end of region. */ | |
980 struct target_var_desc list[]; | |
981 }; | |
982 | |
983 /* Special value for refcount - infinity. */ | |
984 #define REFCOUNT_INFINITY (~(uintptr_t) 0) | |
985 /* Special value for refcount - tgt_offset contains target address of the | |
986 artificial pointer to "omp declare target link" object. */ | |
987 #define REFCOUNT_LINK (~(uintptr_t) 1) | |
988 | |
145 | 989 /* Special offset values. */ |
990 #define OFFSET_INLINED (~(uintptr_t) 0) | |
991 #define OFFSET_POINTER (~(uintptr_t) 1) | |
992 #define OFFSET_STRUCT (~(uintptr_t) 2) | |
993 | |
994 /* Auxiliary structure for infrequently-used or API-specific data. */ | |
995 | |
996 struct splay_tree_aux { | |
997 /* Pointer to the original mapping of "omp declare target link" object. */ | |
998 splay_tree_key link_key; | |
999 /* For a block with attached pointers, the attachment counters for each. | |
1000 Only used for OpenACC. */ | |
1001 uintptr_t *attach_count; | |
1002 }; | |
1003 | |
111 | 1004 struct splay_tree_key_s { |
1005 /* Address of the host object. */ | |
1006 uintptr_t host_start; | |
1007 /* Address immediately after the host object. */ | |
1008 uintptr_t host_end; | |
1009 /* Descriptor of the target memory. */ | |
1010 struct target_mem_desc *tgt; | |
1011 /* Offset from tgt->tgt_start to the start of the target object. */ | |
1012 uintptr_t tgt_offset; | |
1013 /* Reference count. */ | |
1014 uintptr_t refcount; | |
145 | 1015 /* Reference counts beyond those that represent genuine references in the |
1016 linked splay tree key/target memory structures, e.g. for multiple OpenACC | |
1017 "present increment" operations (via "acc enter data") referring to the same | |
1018 host-memory block. */ | |
1019 uintptr_t virtual_refcount; | |
1020 struct splay_tree_aux *aux; | |
111 | 1021 }; |
1022 | |
1023 /* The comparison function. */ | |
1024 | |
1025 static inline int | |
1026 splay_compare (splay_tree_key x, splay_tree_key y) | |
1027 { | |
1028 if (x->host_start == x->host_end | |
1029 && y->host_start == y->host_end) | |
1030 return 0; | |
1031 if (x->host_end <= y->host_start) | |
1032 return -1; | |
1033 if (x->host_start >= y->host_end) | |
1034 return 1; | |
1035 return 0; | |
1036 } | |
1037 | |
1038 #include "splay-tree.h" | |
1039 | |
1040 typedef struct acc_dispatch_t | |
1041 { | |
1042 /* Execute. */ | |
1043 __typeof (GOMP_OFFLOAD_openacc_exec) *exec_func; | |
1044 | |
1045 /* Create/destroy TLS data. */ | |
1046 __typeof (GOMP_OFFLOAD_openacc_create_thread_data) *create_thread_data_func; | |
1047 __typeof (GOMP_OFFLOAD_openacc_destroy_thread_data) | |
1048 *destroy_thread_data_func; | |
145 | 1049 |
1050 struct { | |
1051 /* Once created and put into the "active" list, asyncqueues are then never | |
1052 destructed and removed from the "active" list, other than if the TODO | |
1053 device is shut down. */ | |
1054 gomp_mutex_t lock; | |
1055 int nasyncqueue; | |
1056 struct goacc_asyncqueue **asyncqueue; | |
1057 struct goacc_asyncqueue_list *active; | |
1058 | |
1059 __typeof (GOMP_OFFLOAD_openacc_async_construct) *construct_func; | |
1060 __typeof (GOMP_OFFLOAD_openacc_async_destruct) *destruct_func; | |
1061 __typeof (GOMP_OFFLOAD_openacc_async_test) *test_func; | |
1062 __typeof (GOMP_OFFLOAD_openacc_async_synchronize) *synchronize_func; | |
1063 __typeof (GOMP_OFFLOAD_openacc_async_serialize) *serialize_func; | |
1064 __typeof (GOMP_OFFLOAD_openacc_async_queue_callback) *queue_callback_func; | |
1065 | |
1066 __typeof (GOMP_OFFLOAD_openacc_async_exec) *exec_func; | |
1067 __typeof (GOMP_OFFLOAD_openacc_async_dev2host) *dev2host_func; | |
1068 __typeof (GOMP_OFFLOAD_openacc_async_host2dev) *host2dev_func; | |
1069 } async; | |
1070 | |
1071 __typeof (GOMP_OFFLOAD_openacc_get_property) *get_property_func; | |
111 | 1072 |
1073 /* NVIDIA target specific routines. */ | |
1074 struct { | |
1075 __typeof (GOMP_OFFLOAD_openacc_cuda_get_current_device) | |
1076 *get_current_device_func; | |
1077 __typeof (GOMP_OFFLOAD_openacc_cuda_get_current_context) | |
1078 *get_current_context_func; | |
1079 __typeof (GOMP_OFFLOAD_openacc_cuda_get_stream) *get_stream_func; | |
1080 __typeof (GOMP_OFFLOAD_openacc_cuda_set_stream) *set_stream_func; | |
1081 } cuda; | |
1082 } acc_dispatch_t; | |
1083 | |
1084 /* Various state of the accelerator device. */ | |
1085 enum gomp_device_state | |
1086 { | |
1087 GOMP_DEVICE_UNINITIALIZED, | |
1088 GOMP_DEVICE_INITIALIZED, | |
1089 GOMP_DEVICE_FINALIZED | |
1090 }; | |
1091 | |
1092 /* This structure describes accelerator device. | |
1093 It contains name of the corresponding libgomp plugin, function handlers for | |
1094 interaction with the device, ID-number of the device, and information about | |
1095 mapped memory. */ | |
1096 struct gomp_device_descr | |
1097 { | |
1098 /* Immutable data, which is only set during initialization, and which is not | |
1099 guarded by the lock. */ | |
1100 | |
1101 /* The name of the device. */ | |
1102 const char *name; | |
1103 | |
1104 /* Capabilities of device (supports OpenACC, OpenMP). */ | |
1105 unsigned int capabilities; | |
1106 | |
1107 /* This is the ID number of device among devices of the same type. */ | |
1108 int target_id; | |
1109 | |
1110 /* This is the TYPE of device. */ | |
1111 enum offload_target_type type; | |
1112 | |
1113 /* Function handlers. */ | |
1114 __typeof (GOMP_OFFLOAD_get_name) *get_name_func; | |
1115 __typeof (GOMP_OFFLOAD_get_caps) *get_caps_func; | |
1116 __typeof (GOMP_OFFLOAD_get_type) *get_type_func; | |
1117 __typeof (GOMP_OFFLOAD_get_num_devices) *get_num_devices_func; | |
1118 __typeof (GOMP_OFFLOAD_init_device) *init_device_func; | |
1119 __typeof (GOMP_OFFLOAD_fini_device) *fini_device_func; | |
1120 __typeof (GOMP_OFFLOAD_version) *version_func; | |
1121 __typeof (GOMP_OFFLOAD_load_image) *load_image_func; | |
1122 __typeof (GOMP_OFFLOAD_unload_image) *unload_image_func; | |
1123 __typeof (GOMP_OFFLOAD_alloc) *alloc_func; | |
1124 __typeof (GOMP_OFFLOAD_free) *free_func; | |
1125 __typeof (GOMP_OFFLOAD_dev2host) *dev2host_func; | |
1126 __typeof (GOMP_OFFLOAD_host2dev) *host2dev_func; | |
1127 __typeof (GOMP_OFFLOAD_dev2dev) *dev2dev_func; | |
1128 __typeof (GOMP_OFFLOAD_can_run) *can_run_func; | |
1129 __typeof (GOMP_OFFLOAD_run) *run_func; | |
1130 __typeof (GOMP_OFFLOAD_async_run) *async_run_func; | |
1131 | |
1132 /* Splay tree containing information about mapped memory regions. */ | |
1133 struct splay_tree_s mem_map; | |
1134 | |
1135 /* Mutex for the mutable data. */ | |
1136 gomp_mutex_t lock; | |
1137 | |
1138 /* Current state of the device. OpenACC allows to move from INITIALIZED state | |
1139 back to UNINITIALIZED state. OpenMP allows only to move from INITIALIZED | |
1140 to FINALIZED state (at program shutdown). */ | |
1141 enum gomp_device_state state; | |
1142 | |
1143 /* OpenACC-specific data and functions. */ | |
145 | 1144 /* This is mutable because of its mutable target_data member. */ |
111 | 1145 acc_dispatch_t openacc; |
1146 }; | |
1147 | |
1148 /* Kind of the pragma, for which gomp_map_vars () is called. */ | |
1149 enum gomp_map_vars_kind | |
1150 { | |
1151 GOMP_MAP_VARS_OPENACC, | |
145 | 1152 GOMP_MAP_VARS_OPENACC_ENTER_DATA, |
111 | 1153 GOMP_MAP_VARS_TARGET, |
1154 GOMP_MAP_VARS_DATA, | |
1155 GOMP_MAP_VARS_ENTER_DATA | |
1156 }; | |
1157 | |
131 | 1158 extern void gomp_acc_declare_allocate (bool, size_t, void **, size_t *, |
1159 unsigned short *); | |
145 | 1160 struct gomp_coalesce_buf; |
1161 extern void gomp_copy_host2dev (struct gomp_device_descr *, | |
1162 struct goacc_asyncqueue *, void *, const void *, | |
1163 size_t, struct gomp_coalesce_buf *); | |
1164 extern void gomp_copy_dev2host (struct gomp_device_descr *, | |
1165 struct goacc_asyncqueue *, void *, const void *, | |
1166 size_t); | |
1167 extern uintptr_t gomp_map_val (struct target_mem_desc *, void **, size_t); | |
1168 extern void gomp_attach_pointer (struct gomp_device_descr *, | |
1169 struct goacc_asyncqueue *, splay_tree, | |
1170 splay_tree_key, uintptr_t, size_t, | |
1171 struct gomp_coalesce_buf *); | |
1172 extern void gomp_detach_pointer (struct gomp_device_descr *, | |
1173 struct goacc_asyncqueue *, splay_tree_key, | |
1174 uintptr_t, bool, struct gomp_coalesce_buf *); | |
111 | 1175 |
1176 extern struct target_mem_desc *gomp_map_vars (struct gomp_device_descr *, | |
1177 size_t, void **, void **, | |
1178 size_t *, void *, bool, | |
1179 enum gomp_map_vars_kind); | |
145 | 1180 extern struct target_mem_desc *gomp_map_vars_async (struct gomp_device_descr *, |
1181 struct goacc_asyncqueue *, | |
1182 size_t, void **, void **, | |
1183 size_t *, void *, bool, | |
1184 enum gomp_map_vars_kind); | |
111 | 1185 extern void gomp_unmap_vars (struct target_mem_desc *, bool); |
145 | 1186 extern void gomp_unmap_vars_async (struct target_mem_desc *, bool, |
1187 struct goacc_asyncqueue *); | |
111 | 1188 extern void gomp_init_device (struct gomp_device_descr *); |
145 | 1189 extern bool gomp_fini_device (struct gomp_device_descr *); |
111 | 1190 extern void gomp_unload_device (struct gomp_device_descr *); |
131 | 1191 extern bool gomp_remove_var (struct gomp_device_descr *, splay_tree_key); |
145 | 1192 extern void gomp_remove_var_async (struct gomp_device_descr *, splay_tree_key, |
1193 struct goacc_asyncqueue *); | |
0 | 1194 |
1195 /* work.c */ | |
1196 | |
145 | 1197 extern void gomp_init_work_share (struct gomp_work_share *, size_t, unsigned); |
0 | 1198 extern void gomp_fini_work_share (struct gomp_work_share *); |
145 | 1199 extern bool gomp_work_share_start (size_t); |
0 | 1200 extern void gomp_work_share_end (void); |
111 | 1201 extern bool gomp_work_share_end_cancel (void); |
0 | 1202 extern void gomp_work_share_end_nowait (void); |
1203 | |
1204 static inline void | |
1205 gomp_work_share_init_done (void) | |
1206 { | |
1207 struct gomp_thread *thr = gomp_thread (); | |
1208 if (__builtin_expect (thr->ts.last_work_share != NULL, 1)) | |
1209 gomp_ptrlock_set (&thr->ts.last_work_share->next_ws, thr->ts.work_share); | |
1210 } | |
1211 | |
1212 #ifdef HAVE_ATTRIBUTE_VISIBILITY | |
1213 # pragma GCC visibility pop | |
1214 #endif | |
1215 | |
1216 /* Now that we're back to default visibility, include the globals. */ | |
1217 #include "libgomp_g.h" | |
1218 | |
1219 /* Include omp.h by parts. */ | |
1220 #include "omp-lock.h" | |
1221 #define _LIBGOMP_OMP_LOCK_DEFINED 1 | |
1222 #include "omp.h.in" | |
1223 | |
1224 #if !defined (HAVE_ATTRIBUTE_VISIBILITY) \ | |
1225 || !defined (HAVE_ATTRIBUTE_ALIAS) \ | |
1226 || !defined (HAVE_AS_SYMVER_DIRECTIVE) \ | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1227 || !defined (PIC) \ |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1228 || !defined (HAVE_SYMVER_SYMBOL_RENAMING_RUNTIME_SUPPORT) |
0 | 1229 # undef LIBGOMP_GNU_SYMBOL_VERSIONING |
1230 #endif | |
1231 | |
1232 #ifdef LIBGOMP_GNU_SYMBOL_VERSIONING | |
1233 extern void gomp_init_lock_30 (omp_lock_t *) __GOMP_NOTHROW; | |
1234 extern void gomp_destroy_lock_30 (omp_lock_t *) __GOMP_NOTHROW; | |
1235 extern void gomp_set_lock_30 (omp_lock_t *) __GOMP_NOTHROW; | |
1236 extern void gomp_unset_lock_30 (omp_lock_t *) __GOMP_NOTHROW; | |
1237 extern int gomp_test_lock_30 (omp_lock_t *) __GOMP_NOTHROW; | |
1238 extern void gomp_init_nest_lock_30 (omp_nest_lock_t *) __GOMP_NOTHROW; | |
1239 extern void gomp_destroy_nest_lock_30 (omp_nest_lock_t *) __GOMP_NOTHROW; | |
1240 extern void gomp_set_nest_lock_30 (omp_nest_lock_t *) __GOMP_NOTHROW; | |
1241 extern void gomp_unset_nest_lock_30 (omp_nest_lock_t *) __GOMP_NOTHROW; | |
1242 extern int gomp_test_nest_lock_30 (omp_nest_lock_t *) __GOMP_NOTHROW; | |
1243 | |
1244 extern void gomp_init_lock_25 (omp_lock_25_t *) __GOMP_NOTHROW; | |
1245 extern void gomp_destroy_lock_25 (omp_lock_25_t *) __GOMP_NOTHROW; | |
1246 extern void gomp_set_lock_25 (omp_lock_25_t *) __GOMP_NOTHROW; | |
1247 extern void gomp_unset_lock_25 (omp_lock_25_t *) __GOMP_NOTHROW; | |
1248 extern int gomp_test_lock_25 (omp_lock_25_t *) __GOMP_NOTHROW; | |
1249 extern void gomp_init_nest_lock_25 (omp_nest_lock_25_t *) __GOMP_NOTHROW; | |
1250 extern void gomp_destroy_nest_lock_25 (omp_nest_lock_25_t *) __GOMP_NOTHROW; | |
1251 extern void gomp_set_nest_lock_25 (omp_nest_lock_25_t *) __GOMP_NOTHROW; | |
1252 extern void gomp_unset_nest_lock_25 (omp_nest_lock_25_t *) __GOMP_NOTHROW; | |
1253 extern int gomp_test_nest_lock_25 (omp_nest_lock_25_t *) __GOMP_NOTHROW; | |
1254 | |
1255 # define omp_lock_symver(fn) \ | |
1256 __asm (".symver g" #fn "_30, " #fn "@@OMP_3.0"); \ | |
1257 __asm (".symver g" #fn "_25, " #fn "@OMP_1.0"); | |
1258 #else | |
1259 # define gomp_init_lock_30 omp_init_lock | |
1260 # define gomp_destroy_lock_30 omp_destroy_lock | |
1261 # define gomp_set_lock_30 omp_set_lock | |
1262 # define gomp_unset_lock_30 omp_unset_lock | |
1263 # define gomp_test_lock_30 omp_test_lock | |
1264 # define gomp_init_nest_lock_30 omp_init_nest_lock | |
1265 # define gomp_destroy_nest_lock_30 omp_destroy_nest_lock | |
1266 # define gomp_set_nest_lock_30 omp_set_nest_lock | |
1267 # define gomp_unset_nest_lock_30 omp_unset_nest_lock | |
1268 # define gomp_test_nest_lock_30 omp_test_nest_lock | |
1269 #endif | |
1270 | |
1271 #ifdef HAVE_ATTRIBUTE_VISIBILITY | |
1272 # define attribute_hidden __attribute__ ((visibility ("hidden"))) | |
1273 #else | |
1274 # define attribute_hidden | |
1275 #endif | |
1276 | |
145 | 1277 #if __GNUC__ >= 9 |
1278 # define HAVE_ATTRIBUTE_COPY | |
1279 #endif | |
1280 | |
1281 #ifdef HAVE_ATTRIBUTE_COPY | |
1282 # define attribute_copy(arg) __attribute__ ((copy (arg))) | |
1283 #else | |
1284 # define attribute_copy(arg) | |
1285 #endif | |
1286 | |
0 | 1287 #ifdef HAVE_ATTRIBUTE_ALIAS |
111 | 1288 # define strong_alias(fn, al) \ |
145 | 1289 extern __typeof (fn) al __attribute__ ((alias (#fn))) attribute_copy (fn); |
111 | 1290 |
1291 # define ialias_ulp ialias_str1(__USER_LABEL_PREFIX__) | |
1292 # define ialias_str1(x) ialias_str2(x) | |
1293 # define ialias_str2(x) #x | |
0 | 1294 # define ialias(fn) \ |
1295 extern __typeof (fn) gomp_ialias_##fn \ | |
145 | 1296 __attribute__ ((alias (#fn))) attribute_hidden attribute_copy (fn); |
111 | 1297 # define ialias_redirect(fn) \ |
1298 extern __typeof (fn) fn __asm__ (ialias_ulp "gomp_ialias_" #fn) attribute_hidden; | |
1299 # define ialias_call(fn) gomp_ialias_ ## fn | |
0 | 1300 #else |
1301 # define ialias(fn) | |
111 | 1302 # define ialias_redirect(fn) |
1303 # define ialias_call(fn) fn | |
0 | 1304 #endif |
1305 | |
111 | 1306 /* Helper function for priority_node_to_task() and |
1307 task_to_priority_node(). | |
1308 | |
1309 Return the offset from a task to its priority_node entry. The | |
1310 priority_node entry is has a type of TYPE. */ | |
1311 | |
1312 static inline size_t | |
1313 priority_queue_offset (enum priority_queue_type type) | |
1314 { | |
1315 return offsetof (struct gomp_task, pnode[(int) type]); | |
1316 } | |
1317 | |
1318 /* Return the task associated with a priority NODE of type TYPE. */ | |
1319 | |
1320 static inline struct gomp_task * | |
1321 priority_node_to_task (enum priority_queue_type type, | |
1322 struct priority_node *node) | |
1323 { | |
1324 return (struct gomp_task *) ((char *) node - priority_queue_offset (type)); | |
1325 } | |
1326 | |
1327 /* Return the priority node of type TYPE for a given TASK. */ | |
1328 | |
1329 static inline struct priority_node * | |
1330 task_to_priority_node (enum priority_queue_type type, | |
1331 struct gomp_task *task) | |
1332 { | |
1333 return (struct priority_node *) ((char *) task | |
1334 + priority_queue_offset (type)); | |
1335 } | |
145 | 1336 |
1337 #ifdef LIBGOMP_USE_PTHREADS | |
1338 static inline gomp_thread_handle | |
1339 gomp_thread_self (void) | |
1340 { | |
1341 return pthread_self (); | |
1342 } | |
1343 | |
1344 static inline gomp_thread_handle | |
1345 gomp_thread_to_pthread_t (struct gomp_thread *thr) | |
1346 { | |
1347 struct gomp_thread *this_thr = gomp_thread (); | |
1348 if (thr == this_thr) | |
1349 return pthread_self (); | |
1350 #ifdef GOMP_NEEDS_THREAD_HANDLE | |
1351 return thr->handle; | |
1352 #else | |
1353 /* On Linux with initial-exec TLS, the pthread_t of the thread containing | |
1354 thr can be computed from thr, this_thr and pthread_self (), | |
1355 as the distance between this_thr and pthread_self () is constant. */ | |
1356 return pthread_self () + ((uintptr_t) thr - (uintptr_t) this_thr); | |
1357 #endif | |
1358 } | |
1359 #else | |
1360 static inline gomp_thread_handle | |
1361 gomp_thread_self (void) | |
1362 { | |
1363 return (gomp_thread_handle) {}; | |
1364 } | |
1365 | |
1366 static inline gomp_thread_handle | |
1367 gomp_thread_to_pthread_t (struct gomp_thread *thr) | |
1368 { | |
1369 (void) thr; | |
1370 return gomp_thread_self (); | |
1371 } | |
1372 #endif | |
1373 | |
0 | 1374 #endif /* LIBGOMP_H */ |