comparison libgomp/config/linux/bar.c @ 111:04ced10e8804

gcc 7
author kono
date Fri, 27 Oct 2017 22:46:09 +0900
parents a06113de4d67
children 84e7813d76e9
comparison
equal deleted inserted replaced
68:561a7518be6b 111:04ced10e8804
1 /* Copyright (C) 2005, 2008, 2009 Free Software Foundation, Inc. 1 /* Copyright (C) 2005-2017 Free Software Foundation, Inc.
2 Contributed by Richard Henderson <rth@redhat.com>. 2 Contributed by Richard Henderson <rth@redhat.com>.
3 3
4 This file is part of the GNU OpenMP Library (libgomp). 4 This file is part of the GNU Offloading and Multi Processing Library
5 (libgomp).
5 6
6 Libgomp is free software; you can redistribute it and/or modify it 7 Libgomp is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by 8 under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option) 9 the Free Software Foundation; either version 3, or (at your option)
9 any later version. 10 any later version.
31 32
32 33
33 void 34 void
34 gomp_barrier_wait_end (gomp_barrier_t *bar, gomp_barrier_state_t state) 35 gomp_barrier_wait_end (gomp_barrier_t *bar, gomp_barrier_state_t state)
35 { 36 {
36 if (__builtin_expect ((state & 1) != 0, 0)) 37 if (__builtin_expect (state & BAR_WAS_LAST, 0))
37 { 38 {
38 /* Next time we'll be awaiting TOTAL threads again. */ 39 /* Next time we'll be awaiting TOTAL threads again. */
39 bar->awaited = bar->total; 40 bar->awaited = bar->total;
40 atomic_write_barrier (); 41 __atomic_store_n (&bar->generation, bar->generation + BAR_INCR,
41 bar->generation += 4; 42 MEMMODEL_RELEASE);
42 futex_wake ((int *) &bar->generation, INT_MAX); 43 futex_wake ((int *) &bar->generation, INT_MAX);
43 } 44 }
44 else 45 else
45 { 46 {
46 unsigned int generation = state;
47
48 do 47 do
49 do_wait ((int *) &bar->generation, generation); 48 do_wait ((int *) &bar->generation, state);
50 while (bar->generation == generation); 49 while (__atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE) == state);
51 } 50 }
52 } 51 }
53 52
54 void 53 void
55 gomp_barrier_wait (gomp_barrier_t *bar) 54 gomp_barrier_wait (gomp_barrier_t *bar)
66 65
67 void 66 void
68 gomp_barrier_wait_last (gomp_barrier_t *bar) 67 gomp_barrier_wait_last (gomp_barrier_t *bar)
69 { 68 {
70 gomp_barrier_state_t state = gomp_barrier_wait_start (bar); 69 gomp_barrier_state_t state = gomp_barrier_wait_start (bar);
71 if (state & 1) 70 if (state & BAR_WAS_LAST)
72 gomp_barrier_wait_end (bar, state); 71 gomp_barrier_wait_end (bar, state);
73 } 72 }
74 73
75 void 74 void
76 gomp_team_barrier_wake (gomp_barrier_t *bar, int count) 75 gomp_team_barrier_wake (gomp_barrier_t *bar, int count)
79 } 78 }
80 79
81 void 80 void
82 gomp_team_barrier_wait_end (gomp_barrier_t *bar, gomp_barrier_state_t state) 81 gomp_team_barrier_wait_end (gomp_barrier_t *bar, gomp_barrier_state_t state)
83 { 82 {
84 unsigned int generation; 83 unsigned int generation, gen;
85 84
86 if (__builtin_expect ((state & 1) != 0, 0)) 85 if (__builtin_expect (state & BAR_WAS_LAST, 0))
87 { 86 {
88 /* Next time we'll be awaiting TOTAL threads again. */ 87 /* Next time we'll be awaiting TOTAL threads again. */
89 struct gomp_thread *thr = gomp_thread (); 88 struct gomp_thread *thr = gomp_thread ();
90 struct gomp_team *team = thr->ts.team; 89 struct gomp_team *team = thr->ts.team;
90
91 bar->awaited = bar->total; 91 bar->awaited = bar->total;
92 atomic_write_barrier (); 92 team->work_share_cancelled = 0;
93 if (__builtin_expect (team->task_count, 0)) 93 if (__builtin_expect (team->task_count, 0))
94 { 94 {
95 gomp_barrier_handle_tasks (state); 95 gomp_barrier_handle_tasks (state);
96 state &= ~1; 96 state &= ~BAR_WAS_LAST;
97 } 97 }
98 else 98 else
99 { 99 {
100 bar->generation = state + 3; 100 state &= ~BAR_CANCELLED;
101 state += BAR_INCR - BAR_WAS_LAST;
102 __atomic_store_n (&bar->generation, state, MEMMODEL_RELEASE);
101 futex_wake ((int *) &bar->generation, INT_MAX); 103 futex_wake ((int *) &bar->generation, INT_MAX);
102 return; 104 return;
103 } 105 }
104 } 106 }
105 107
106 generation = state; 108 generation = state;
109 state &= ~BAR_CANCELLED;
107 do 110 do
108 { 111 {
109 do_wait ((int *) &bar->generation, generation); 112 do_wait ((int *) &bar->generation, generation);
110 if (__builtin_expect (bar->generation & 1, 0)) 113 gen = __atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE);
111 gomp_barrier_handle_tasks (state); 114 if (__builtin_expect (gen & BAR_TASK_PENDING, 0))
112 if ((bar->generation & 2)) 115 {
113 generation |= 2; 116 gomp_barrier_handle_tasks (state);
114 } 117 gen = __atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE);
115 while (bar->generation != state + 4); 118 }
119 generation |= gen & BAR_WAITING_FOR_TASK;
120 }
121 while (gen != state + BAR_INCR);
116 } 122 }
117 123
118 void 124 void
119 gomp_team_barrier_wait (gomp_barrier_t *bar) 125 gomp_team_barrier_wait (gomp_barrier_t *bar)
120 { 126 {
121 gomp_team_barrier_wait_end (bar, gomp_barrier_wait_start (bar)); 127 gomp_team_barrier_wait_end (bar, gomp_barrier_wait_start (bar));
122 } 128 }
129
130 void
131 gomp_team_barrier_wait_final (gomp_barrier_t *bar)
132 {
133 gomp_barrier_state_t state = gomp_barrier_wait_final_start (bar);
134 if (__builtin_expect (state & BAR_WAS_LAST, 0))
135 bar->awaited_final = bar->total;
136 gomp_team_barrier_wait_end (bar, state);
137 }
138
139 bool
140 gomp_team_barrier_wait_cancel_end (gomp_barrier_t *bar,
141 gomp_barrier_state_t state)
142 {
143 unsigned int generation, gen;
144
145 if (__builtin_expect (state & BAR_WAS_LAST, 0))
146 {
147 /* Next time we'll be awaiting TOTAL threads again. */
148 /* BAR_CANCELLED should never be set in state here, because
149 cancellation means that at least one of the threads has been
150 cancelled, thus on a cancellable barrier we should never see
151 all threads to arrive. */
152 struct gomp_thread *thr = gomp_thread ();
153 struct gomp_team *team = thr->ts.team;
154
155 bar->awaited = bar->total;
156 team->work_share_cancelled = 0;
157 if (__builtin_expect (team->task_count, 0))
158 {
159 gomp_barrier_handle_tasks (state);
160 state &= ~BAR_WAS_LAST;
161 }
162 else
163 {
164 state += BAR_INCR - BAR_WAS_LAST;
165 __atomic_store_n (&bar->generation, state, MEMMODEL_RELEASE);
166 futex_wake ((int *) &bar->generation, INT_MAX);
167 return false;
168 }
169 }
170
171 if (__builtin_expect (state & BAR_CANCELLED, 0))
172 return true;
173
174 generation = state;
175 do
176 {
177 do_wait ((int *) &bar->generation, generation);
178 gen = __atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE);
179 if (__builtin_expect (gen & BAR_CANCELLED, 0))
180 return true;
181 if (__builtin_expect (gen & BAR_TASK_PENDING, 0))
182 {
183 gomp_barrier_handle_tasks (state);
184 gen = __atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE);
185 }
186 generation |= gen & BAR_WAITING_FOR_TASK;
187 }
188 while (gen != state + BAR_INCR);
189
190 return false;
191 }
192
193 bool
194 gomp_team_barrier_wait_cancel (gomp_barrier_t *bar)
195 {
196 return gomp_team_barrier_wait_cancel_end (bar, gomp_barrier_wait_start (bar));
197 }
198
199 void
200 gomp_team_barrier_cancel (struct gomp_team *team)
201 {
202 gomp_mutex_lock (&team->task_lock);
203 if (team->barrier.generation & BAR_CANCELLED)
204 {
205 gomp_mutex_unlock (&team->task_lock);
206 return;
207 }
208 team->barrier.generation |= BAR_CANCELLED;
209 gomp_mutex_unlock (&team->task_lock);
210 futex_wake ((int *) &team->barrier.generation, INT_MAX);
211 }