145
|
1 //===-- tsan_rtl_thread.cpp -----------------------------------------------===//
|
|
2 //
|
|
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
4 // See https://llvm.org/LICENSE.txt for license information.
|
|
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
6 //
|
|
7 //===----------------------------------------------------------------------===//
|
|
8 //
|
|
9 // This file is a part of ThreadSanitizer (TSan), a race detector.
|
|
10 //
|
|
11 //===----------------------------------------------------------------------===//
|
|
12
|
|
13 #include "sanitizer_common/sanitizer_placement_new.h"
|
|
14 #include "tsan_rtl.h"
|
|
15 #include "tsan_mman.h"
|
|
16 #include "tsan_platform.h"
|
|
17 #include "tsan_report.h"
|
|
18 #include "tsan_sync.h"
|
|
19
|
|
20 namespace __tsan {
|
|
21
|
|
22 // ThreadContext implementation.
|
|
23
|
|
24 ThreadContext::ThreadContext(int tid)
|
|
25 : ThreadContextBase(tid)
|
|
26 , thr()
|
|
27 , sync()
|
|
28 , epoch0()
|
|
29 , epoch1() {
|
|
30 }
|
|
31
|
|
32 #if !SANITIZER_GO
|
|
33 ThreadContext::~ThreadContext() {
|
|
34 }
|
|
35 #endif
|
|
36
|
|
37 void ThreadContext::OnDead() {
|
|
38 CHECK_EQ(sync.size(), 0);
|
|
39 }
|
|
40
|
|
41 void ThreadContext::OnJoined(void *arg) {
|
|
42 ThreadState *caller_thr = static_cast<ThreadState *>(arg);
|
|
43 AcquireImpl(caller_thr, 0, &sync);
|
|
44 sync.Reset(&caller_thr->proc()->clock_cache);
|
|
45 }
|
|
46
|
|
47 struct OnCreatedArgs {
|
|
48 ThreadState *thr;
|
|
49 uptr pc;
|
|
50 };
|
|
51
|
|
52 void ThreadContext::OnCreated(void *arg) {
|
|
53 thr = 0;
|
|
54 if (tid == 0)
|
|
55 return;
|
|
56 OnCreatedArgs *args = static_cast<OnCreatedArgs *>(arg);
|
|
57 if (!args->thr) // GCD workers don't have a parent thread.
|
|
58 return;
|
|
59 args->thr->fast_state.IncrementEpoch();
|
|
60 // Can't increment epoch w/o writing to the trace as well.
|
|
61 TraceAddEvent(args->thr, args->thr->fast_state, EventTypeMop, 0);
|
|
62 ReleaseImpl(args->thr, 0, &sync);
|
|
63 creation_stack_id = CurrentStackId(args->thr, args->pc);
|
|
64 if (reuse_count == 0)
|
|
65 StatInc(args->thr, StatThreadMaxTid);
|
|
66 }
|
|
67
|
|
68 void ThreadContext::OnReset() {
|
|
69 CHECK_EQ(sync.size(), 0);
|
|
70 uptr trace_p = GetThreadTrace(tid);
|
|
71 ReleaseMemoryPagesToOS(trace_p, trace_p + TraceSize() * sizeof(Event));
|
|
72 //!!! ReleaseMemoryToOS(GetThreadTraceHeader(tid), sizeof(Trace));
|
|
73 }
|
|
74
|
|
75 void ThreadContext::OnDetached(void *arg) {
|
|
76 ThreadState *thr1 = static_cast<ThreadState*>(arg);
|
|
77 sync.Reset(&thr1->proc()->clock_cache);
|
|
78 }
|
|
79
|
|
80 struct OnStartedArgs {
|
|
81 ThreadState *thr;
|
|
82 uptr stk_addr;
|
|
83 uptr stk_size;
|
|
84 uptr tls_addr;
|
|
85 uptr tls_size;
|
|
86 };
|
|
87
|
|
88 void ThreadContext::OnStarted(void *arg) {
|
|
89 OnStartedArgs *args = static_cast<OnStartedArgs*>(arg);
|
|
90 thr = args->thr;
|
|
91 // RoundUp so that one trace part does not contain events
|
|
92 // from different threads.
|
|
93 epoch0 = RoundUp(epoch1 + 1, kTracePartSize);
|
|
94 epoch1 = (u64)-1;
|
|
95 new(thr) ThreadState(ctx, tid, unique_id, epoch0, reuse_count,
|
|
96 args->stk_addr, args->stk_size, args->tls_addr, args->tls_size);
|
|
97 #if !SANITIZER_GO
|
|
98 thr->shadow_stack = &ThreadTrace(thr->tid)->shadow_stack[0];
|
|
99 thr->shadow_stack_pos = thr->shadow_stack;
|
|
100 thr->shadow_stack_end = thr->shadow_stack + kShadowStackSize;
|
|
101 #else
|
|
102 // Setup dynamic shadow stack.
|
|
103 const int kInitStackSize = 8;
|
|
104 thr->shadow_stack = (uptr*)internal_alloc(MBlockShadowStack,
|
|
105 kInitStackSize * sizeof(uptr));
|
|
106 thr->shadow_stack_pos = thr->shadow_stack;
|
|
107 thr->shadow_stack_end = thr->shadow_stack + kInitStackSize;
|
|
108 #endif
|
|
109 if (common_flags()->detect_deadlocks)
|
|
110 thr->dd_lt = ctx->dd->CreateLogicalThread(unique_id);
|
|
111 thr->fast_state.SetHistorySize(flags()->history_size);
|
|
112 // Commit switch to the new part of the trace.
|
|
113 // TraceAddEvent will reset stack0/mset0 in the new part for us.
|
|
114 TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
|
|
115
|
|
116 thr->fast_synch_epoch = epoch0;
|
|
117 AcquireImpl(thr, 0, &sync);
|
|
118 StatInc(thr, StatSyncAcquire);
|
|
119 sync.Reset(&thr->proc()->clock_cache);
|
|
120 thr->is_inited = true;
|
|
121 DPrintf("#%d: ThreadStart epoch=%zu stk_addr=%zx stk_size=%zx "
|
|
122 "tls_addr=%zx tls_size=%zx\n",
|
|
123 tid, (uptr)epoch0, args->stk_addr, args->stk_size,
|
|
124 args->tls_addr, args->tls_size);
|
|
125 }
|
|
126
|
|
127 void ThreadContext::OnFinished() {
|
|
128 #if SANITIZER_GO
|
|
129 internal_free(thr->shadow_stack);
|
|
130 thr->shadow_stack = nullptr;
|
|
131 thr->shadow_stack_pos = nullptr;
|
|
132 thr->shadow_stack_end = nullptr;
|
|
133 #endif
|
|
134 if (!detached) {
|
|
135 thr->fast_state.IncrementEpoch();
|
|
136 // Can't increment epoch w/o writing to the trace as well.
|
|
137 TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
|
|
138 ReleaseImpl(thr, 0, &sync);
|
|
139 }
|
|
140 epoch1 = thr->fast_state.epoch();
|
|
141
|
|
142 if (common_flags()->detect_deadlocks)
|
|
143 ctx->dd->DestroyLogicalThread(thr->dd_lt);
|
|
144 thr->clock.ResetCached(&thr->proc()->clock_cache);
|
|
145 #if !SANITIZER_GO
|
|
146 thr->last_sleep_clock.ResetCached(&thr->proc()->clock_cache);
|
|
147 #endif
|
|
148 thr->~ThreadState();
|
|
149 #if TSAN_COLLECT_STATS
|
|
150 StatAggregate(ctx->stat, thr->stat);
|
|
151 #endif
|
|
152 thr = 0;
|
|
153 }
|
|
154
|
|
155 #if !SANITIZER_GO
|
|
156 struct ThreadLeak {
|
|
157 ThreadContext *tctx;
|
|
158 int count;
|
|
159 };
|
|
160
|
|
161 static void MaybeReportThreadLeak(ThreadContextBase *tctx_base, void *arg) {
|
|
162 Vector<ThreadLeak> &leaks = *(Vector<ThreadLeak>*)arg;
|
|
163 ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
|
|
164 if (tctx->detached || tctx->status != ThreadStatusFinished)
|
|
165 return;
|
|
166 for (uptr i = 0; i < leaks.Size(); i++) {
|
|
167 if (leaks[i].tctx->creation_stack_id == tctx->creation_stack_id) {
|
|
168 leaks[i].count++;
|
|
169 return;
|
|
170 }
|
|
171 }
|
|
172 ThreadLeak leak = {tctx, 1};
|
|
173 leaks.PushBack(leak);
|
|
174 }
|
|
175 #endif
|
|
176
|
|
177 #if !SANITIZER_GO
|
|
178 static void ReportIgnoresEnabled(ThreadContext *tctx, IgnoreSet *set) {
|
|
179 if (tctx->tid == 0) {
|
|
180 Printf("ThreadSanitizer: main thread finished with ignores enabled\n");
|
|
181 } else {
|
|
182 Printf("ThreadSanitizer: thread T%d %s finished with ignores enabled,"
|
|
183 " created at:\n", tctx->tid, tctx->name);
|
|
184 PrintStack(SymbolizeStackId(tctx->creation_stack_id));
|
|
185 }
|
|
186 Printf(" One of the following ignores was not ended"
|
|
187 " (in order of probability)\n");
|
|
188 for (uptr i = 0; i < set->Size(); i++) {
|
|
189 Printf(" Ignore was enabled at:\n");
|
|
190 PrintStack(SymbolizeStackId(set->At(i)));
|
|
191 }
|
|
192 Die();
|
|
193 }
|
|
194
|
|
195 static void ThreadCheckIgnore(ThreadState *thr) {
|
|
196 if (ctx->after_multithreaded_fork)
|
|
197 return;
|
|
198 if (thr->ignore_reads_and_writes)
|
|
199 ReportIgnoresEnabled(thr->tctx, &thr->mop_ignore_set);
|
|
200 if (thr->ignore_sync)
|
|
201 ReportIgnoresEnabled(thr->tctx, &thr->sync_ignore_set);
|
|
202 }
|
|
203 #else
|
|
204 static void ThreadCheckIgnore(ThreadState *thr) {}
|
|
205 #endif
|
|
206
|
|
207 void ThreadFinalize(ThreadState *thr) {
|
|
208 ThreadCheckIgnore(thr);
|
|
209 #if !SANITIZER_GO
|
|
210 if (!flags()->report_thread_leaks)
|
|
211 return;
|
|
212 ThreadRegistryLock l(ctx->thread_registry);
|
|
213 Vector<ThreadLeak> leaks;
|
|
214 ctx->thread_registry->RunCallbackForEachThreadLocked(
|
|
215 MaybeReportThreadLeak, &leaks);
|
|
216 for (uptr i = 0; i < leaks.Size(); i++) {
|
|
217 ScopedReport rep(ReportTypeThreadLeak);
|
|
218 rep.AddThread(leaks[i].tctx, true);
|
|
219 rep.SetCount(leaks[i].count);
|
|
220 OutputReport(thr, rep);
|
|
221 }
|
|
222 #endif
|
|
223 }
|
|
224
|
|
225 int ThreadCount(ThreadState *thr) {
|
|
226 uptr result;
|
|
227 ctx->thread_registry->GetNumberOfThreads(0, 0, &result);
|
|
228 return (int)result;
|
|
229 }
|
|
230
|
|
231 int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached) {
|
|
232 StatInc(thr, StatThreadCreate);
|
|
233 OnCreatedArgs args = { thr, pc };
|
|
234 u32 parent_tid = thr ? thr->tid : kInvalidTid; // No parent for GCD workers.
|
|
235 int tid =
|
|
236 ctx->thread_registry->CreateThread(uid, detached, parent_tid, &args);
|
|
237 DPrintf("#%d: ThreadCreate tid=%d uid=%zu\n", parent_tid, tid, uid);
|
|
238 StatSet(thr, StatThreadMaxAlive, ctx->thread_registry->GetMaxAliveThreads());
|
|
239 return tid;
|
|
240 }
|
|
241
|
|
242 void ThreadStart(ThreadState *thr, int tid, tid_t os_id,
|
|
243 ThreadType thread_type) {
|
|
244 uptr stk_addr = 0;
|
|
245 uptr stk_size = 0;
|
|
246 uptr tls_addr = 0;
|
|
247 uptr tls_size = 0;
|
|
248 #if !SANITIZER_GO
|
|
249 if (thread_type != ThreadType::Fiber)
|
|
250 GetThreadStackAndTls(tid == 0, &stk_addr, &stk_size, &tls_addr, &tls_size);
|
|
251
|
|
252 if (tid) {
|
|
253 if (stk_addr && stk_size)
|
|
254 MemoryRangeImitateWrite(thr, /*pc=*/ 1, stk_addr, stk_size);
|
|
255
|
|
256 if (tls_addr && tls_size) ImitateTlsWrite(thr, tls_addr, tls_size);
|
|
257 }
|
|
258 #endif
|
|
259
|
|
260 ThreadRegistry *tr = ctx->thread_registry;
|
|
261 OnStartedArgs args = { thr, stk_addr, stk_size, tls_addr, tls_size };
|
|
262 tr->StartThread(tid, os_id, thread_type, &args);
|
|
263
|
|
264 tr->Lock();
|
|
265 thr->tctx = (ThreadContext*)tr->GetThreadLocked(tid);
|
|
266 tr->Unlock();
|
|
267
|
|
268 #if !SANITIZER_GO
|
|
269 if (ctx->after_multithreaded_fork) {
|
|
270 thr->ignore_interceptors++;
|
|
271 ThreadIgnoreBegin(thr, 0);
|
|
272 ThreadIgnoreSyncBegin(thr, 0);
|
|
273 }
|
|
274 #endif
|
|
275 }
|
|
276
|
|
277 void ThreadFinish(ThreadState *thr) {
|
|
278 ThreadCheckIgnore(thr);
|
|
279 StatInc(thr, StatThreadFinish);
|
|
280 if (thr->stk_addr && thr->stk_size)
|
|
281 DontNeedShadowFor(thr->stk_addr, thr->stk_size);
|
|
282 if (thr->tls_addr && thr->tls_size)
|
|
283 DontNeedShadowFor(thr->tls_addr, thr->tls_size);
|
|
284 thr->is_dead = true;
|
|
285 ctx->thread_registry->FinishThread(thr->tid);
|
|
286 }
|
|
287
|
|
288 static bool FindThreadByUid(ThreadContextBase *tctx, void *arg) {
|
|
289 uptr uid = (uptr)arg;
|
|
290 if (tctx->user_id == uid && tctx->status != ThreadStatusInvalid) {
|
|
291 tctx->user_id = 0;
|
|
292 return true;
|
|
293 }
|
|
294 return false;
|
|
295 }
|
|
296
|
|
297 int ThreadTid(ThreadState *thr, uptr pc, uptr uid) {
|
|
298 int res = ctx->thread_registry->FindThread(FindThreadByUid, (void*)uid);
|
|
299 DPrintf("#%d: ThreadTid uid=%zu tid=%d\n", thr->tid, uid, res);
|
|
300 return res;
|
|
301 }
|
|
302
|
|
303 void ThreadJoin(ThreadState *thr, uptr pc, int tid) {
|
|
304 CHECK_GT(tid, 0);
|
|
305 CHECK_LT(tid, kMaxTid);
|
|
306 DPrintf("#%d: ThreadJoin tid=%d\n", thr->tid, tid);
|
|
307 ctx->thread_registry->JoinThread(tid, thr);
|
|
308 }
|
|
309
|
|
310 void ThreadDetach(ThreadState *thr, uptr pc, int tid) {
|
|
311 CHECK_GT(tid, 0);
|
|
312 CHECK_LT(tid, kMaxTid);
|
|
313 ctx->thread_registry->DetachThread(tid, thr);
|
|
314 }
|
|
315
|
|
316 void ThreadNotJoined(ThreadState *thr, uptr pc, int tid, uptr uid) {
|
|
317 CHECK_GT(tid, 0);
|
|
318 CHECK_LT(tid, kMaxTid);
|
|
319 ctx->thread_registry->SetThreadUserId(tid, uid);
|
|
320 }
|
|
321
|
|
322 void ThreadSetName(ThreadState *thr, const char *name) {
|
|
323 ctx->thread_registry->SetThreadName(thr->tid, name);
|
|
324 }
|
|
325
|
|
326 void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr,
|
|
327 uptr size, bool is_write) {
|
|
328 if (size == 0)
|
|
329 return;
|
|
330
|
|
331 u64 *shadow_mem = (u64*)MemToShadow(addr);
|
|
332 DPrintf2("#%d: MemoryAccessRange: @%p %p size=%d is_write=%d\n",
|
|
333 thr->tid, (void*)pc, (void*)addr,
|
|
334 (int)size, is_write);
|
|
335
|
|
336 #if SANITIZER_DEBUG
|
|
337 if (!IsAppMem(addr)) {
|
|
338 Printf("Access to non app mem %zx\n", addr);
|
|
339 DCHECK(IsAppMem(addr));
|
|
340 }
|
|
341 if (!IsAppMem(addr + size - 1)) {
|
|
342 Printf("Access to non app mem %zx\n", addr + size - 1);
|
|
343 DCHECK(IsAppMem(addr + size - 1));
|
|
344 }
|
|
345 if (!IsShadowMem((uptr)shadow_mem)) {
|
|
346 Printf("Bad shadow addr %p (%zx)\n", shadow_mem, addr);
|
|
347 DCHECK(IsShadowMem((uptr)shadow_mem));
|
|
348 }
|
|
349 if (!IsShadowMem((uptr)(shadow_mem + size * kShadowCnt / 8 - 1))) {
|
|
350 Printf("Bad shadow addr %p (%zx)\n",
|
|
351 shadow_mem + size * kShadowCnt / 8 - 1, addr + size - 1);
|
|
352 DCHECK(IsShadowMem((uptr)(shadow_mem + size * kShadowCnt / 8 - 1)));
|
|
353 }
|
|
354 #endif
|
|
355
|
|
356 StatInc(thr, StatMopRange);
|
|
357
|
|
358 if (*shadow_mem == kShadowRodata) {
|
|
359 DCHECK(!is_write);
|
|
360 // Access to .rodata section, no races here.
|
|
361 // Measurements show that it can be 10-20% of all memory accesses.
|
|
362 StatInc(thr, StatMopRangeRodata);
|
|
363 return;
|
|
364 }
|
|
365
|
|
366 FastState fast_state = thr->fast_state;
|
|
367 if (fast_state.GetIgnoreBit())
|
|
368 return;
|
|
369
|
|
370 fast_state.IncrementEpoch();
|
|
371 thr->fast_state = fast_state;
|
|
372 TraceAddEvent(thr, fast_state, EventTypeMop, pc);
|
|
373
|
|
374 bool unaligned = (addr % kShadowCell) != 0;
|
|
375
|
|
376 // Handle unaligned beginning, if any.
|
|
377 for (; addr % kShadowCell && size; addr++, size--) {
|
|
378 int const kAccessSizeLog = 0;
|
|
379 Shadow cur(fast_state);
|
|
380 cur.SetWrite(is_write);
|
|
381 cur.SetAddr0AndSizeLog(addr & (kShadowCell - 1), kAccessSizeLog);
|
|
382 MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, false,
|
|
383 shadow_mem, cur);
|
|
384 }
|
|
385 if (unaligned)
|
|
386 shadow_mem += kShadowCnt;
|
|
387 // Handle middle part, if any.
|
|
388 for (; size >= kShadowCell; addr += kShadowCell, size -= kShadowCell) {
|
|
389 int const kAccessSizeLog = 3;
|
|
390 Shadow cur(fast_state);
|
|
391 cur.SetWrite(is_write);
|
|
392 cur.SetAddr0AndSizeLog(0, kAccessSizeLog);
|
|
393 MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, false,
|
|
394 shadow_mem, cur);
|
|
395 shadow_mem += kShadowCnt;
|
|
396 }
|
|
397 // Handle ending, if any.
|
|
398 for (; size; addr++, size--) {
|
|
399 int const kAccessSizeLog = 0;
|
|
400 Shadow cur(fast_state);
|
|
401 cur.SetWrite(is_write);
|
|
402 cur.SetAddr0AndSizeLog(addr & (kShadowCell - 1), kAccessSizeLog);
|
|
403 MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, false,
|
|
404 shadow_mem, cur);
|
|
405 }
|
|
406 }
|
|
407
|
|
408 #if !SANITIZER_GO
|
|
409 void FiberSwitchImpl(ThreadState *from, ThreadState *to) {
|
|
410 Processor *proc = from->proc();
|
|
411 ProcUnwire(proc, from);
|
|
412 ProcWire(proc, to);
|
|
413 set_cur_thread(to);
|
|
414 }
|
|
415
|
|
416 ThreadState *FiberCreate(ThreadState *thr, uptr pc, unsigned flags) {
|
|
417 void *mem = internal_alloc(MBlockThreadContex, sizeof(ThreadState));
|
|
418 ThreadState *fiber = static_cast<ThreadState *>(mem);
|
|
419 internal_memset(fiber, 0, sizeof(*fiber));
|
|
420 int tid = ThreadCreate(thr, pc, 0, true);
|
|
421 FiberSwitchImpl(thr, fiber);
|
|
422 ThreadStart(fiber, tid, 0, ThreadType::Fiber);
|
|
423 FiberSwitchImpl(fiber, thr);
|
|
424 return fiber;
|
|
425 }
|
|
426
|
|
427 void FiberDestroy(ThreadState *thr, uptr pc, ThreadState *fiber) {
|
|
428 FiberSwitchImpl(thr, fiber);
|
|
429 ThreadFinish(fiber);
|
|
430 FiberSwitchImpl(fiber, thr);
|
|
431 internal_free(fiber);
|
|
432 }
|
|
433
|
|
434 void FiberSwitch(ThreadState *thr, uptr pc,
|
|
435 ThreadState *fiber, unsigned flags) {
|
|
436 if (!(flags & FiberSwitchFlagNoSync))
|
|
437 Release(thr, pc, (uptr)fiber);
|
|
438 FiberSwitchImpl(thr, fiber);
|
|
439 if (!(flags & FiberSwitchFlagNoSync))
|
|
440 Acquire(fiber, pc, (uptr)fiber);
|
|
441 }
|
|
442 #endif
|
|
443
|
|
444 } // namespace __tsan
|