Mercurial > hg > CbC > CbC_gcc
comparison libsanitizer/tsan/tsan_rtl_report.cpp @ 145:1830386684a0
gcc-9.2.0
author | anatofuz |
---|---|
date | Thu, 13 Feb 2020 11:34:05 +0900 |
parents | |
children |
comparison
equal
deleted
inserted
replaced
131:84e7813d76e9 | 145:1830386684a0 |
---|---|
1 //===-- tsan_rtl_report.cpp -----------------------------------------------===// | |
2 // | |
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | |
4 // See https://llvm.org/LICENSE.txt for license information. | |
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | |
6 // | |
7 //===----------------------------------------------------------------------===// | |
8 // | |
9 // This file is a part of ThreadSanitizer (TSan), a race detector. | |
10 // | |
11 //===----------------------------------------------------------------------===// | |
12 | |
13 #include "sanitizer_common/sanitizer_libc.h" | |
14 #include "sanitizer_common/sanitizer_placement_new.h" | |
15 #include "sanitizer_common/sanitizer_stackdepot.h" | |
16 #include "sanitizer_common/sanitizer_common.h" | |
17 #include "sanitizer_common/sanitizer_stacktrace.h" | |
18 #include "tsan_platform.h" | |
19 #include "tsan_rtl.h" | |
20 #include "tsan_suppressions.h" | |
21 #include "tsan_symbolize.h" | |
22 #include "tsan_report.h" | |
23 #include "tsan_sync.h" | |
24 #include "tsan_mman.h" | |
25 #include "tsan_flags.h" | |
26 #include "tsan_fd.h" | |
27 | |
28 namespace __tsan { | |
29 | |
30 using namespace __sanitizer; | |
31 | |
32 static ReportStack *SymbolizeStack(StackTrace trace); | |
33 | |
34 void TsanCheckFailed(const char *file, int line, const char *cond, | |
35 u64 v1, u64 v2) { | |
36 // There is high probability that interceptors will check-fail as well, | |
37 // on the other hand there is no sense in processing interceptors | |
38 // since we are going to die soon. | |
39 ScopedIgnoreInterceptors ignore; | |
40 #if !SANITIZER_GO | |
41 cur_thread()->ignore_sync++; | |
42 cur_thread()->ignore_reads_and_writes++; | |
43 #endif | |
44 Printf("FATAL: ThreadSanitizer CHECK failed: " | |
45 "%s:%d \"%s\" (0x%zx, 0x%zx)\n", | |
46 file, line, cond, (uptr)v1, (uptr)v2); | |
47 PrintCurrentStackSlow(StackTrace::GetCurrentPc()); | |
48 Die(); | |
49 } | |
50 | |
51 // Can be overriden by an application/test to intercept reports. | |
52 #ifdef TSAN_EXTERNAL_HOOKS | |
53 bool OnReport(const ReportDesc *rep, bool suppressed); | |
54 #else | |
55 SANITIZER_WEAK_CXX_DEFAULT_IMPL | |
56 bool OnReport(const ReportDesc *rep, bool suppressed) { | |
57 (void)rep; | |
58 return suppressed; | |
59 } | |
60 #endif | |
61 | |
62 SANITIZER_WEAK_DEFAULT_IMPL | |
63 void __tsan_on_report(const ReportDesc *rep) { | |
64 (void)rep; | |
65 } | |
66 | |
67 static void StackStripMain(SymbolizedStack *frames) { | |
68 SymbolizedStack *last_frame = nullptr; | |
69 SymbolizedStack *last_frame2 = nullptr; | |
70 for (SymbolizedStack *cur = frames; cur; cur = cur->next) { | |
71 last_frame2 = last_frame; | |
72 last_frame = cur; | |
73 } | |
74 | |
75 if (last_frame2 == 0) | |
76 return; | |
77 #if !SANITIZER_GO | |
78 const char *last = last_frame->info.function; | |
79 const char *last2 = last_frame2->info.function; | |
80 // Strip frame above 'main' | |
81 if (last2 && 0 == internal_strcmp(last2, "main")) { | |
82 last_frame->ClearAll(); | |
83 last_frame2->next = nullptr; | |
84 // Strip our internal thread start routine. | |
85 } else if (last && 0 == internal_strcmp(last, "__tsan_thread_start_func")) { | |
86 last_frame->ClearAll(); | |
87 last_frame2->next = nullptr; | |
88 // Strip global ctors init. | |
89 } else if (last && 0 == internal_strcmp(last, "__do_global_ctors_aux")) { | |
90 last_frame->ClearAll(); | |
91 last_frame2->next = nullptr; | |
92 // If both are 0, then we probably just failed to symbolize. | |
93 } else if (last || last2) { | |
94 // Ensure that we recovered stack completely. Trimmed stack | |
95 // can actually happen if we do not instrument some code, | |
96 // so it's only a debug print. However we must try hard to not miss it | |
97 // due to our fault. | |
98 DPrintf("Bottom stack frame is missed\n"); | |
99 } | |
100 #else | |
101 // The last frame always point into runtime (gosched0, goexit0, runtime.main). | |
102 last_frame->ClearAll(); | |
103 last_frame2->next = nullptr; | |
104 #endif | |
105 } | |
106 | |
107 ReportStack *SymbolizeStackId(u32 stack_id) { | |
108 if (stack_id == 0) | |
109 return 0; | |
110 StackTrace stack = StackDepotGet(stack_id); | |
111 if (stack.trace == nullptr) | |
112 return nullptr; | |
113 return SymbolizeStack(stack); | |
114 } | |
115 | |
116 static ReportStack *SymbolizeStack(StackTrace trace) { | |
117 if (trace.size == 0) | |
118 return 0; | |
119 SymbolizedStack *top = nullptr; | |
120 for (uptr si = 0; si < trace.size; si++) { | |
121 const uptr pc = trace.trace[si]; | |
122 uptr pc1 = pc; | |
123 // We obtain the return address, but we're interested in the previous | |
124 // instruction. | |
125 if ((pc & kExternalPCBit) == 0) | |
126 pc1 = StackTrace::GetPreviousInstructionPc(pc); | |
127 SymbolizedStack *ent = SymbolizeCode(pc1); | |
128 CHECK_NE(ent, 0); | |
129 SymbolizedStack *last = ent; | |
130 while (last->next) { | |
131 last->info.address = pc; // restore original pc for report | |
132 last = last->next; | |
133 } | |
134 last->info.address = pc; // restore original pc for report | |
135 last->next = top; | |
136 top = ent; | |
137 } | |
138 StackStripMain(top); | |
139 | |
140 ReportStack *stack = ReportStack::New(); | |
141 stack->frames = top; | |
142 return stack; | |
143 } | |
144 | |
145 ScopedReportBase::ScopedReportBase(ReportType typ, uptr tag) { | |
146 ctx->thread_registry->CheckLocked(); | |
147 void *mem = internal_alloc(MBlockReport, sizeof(ReportDesc)); | |
148 rep_ = new(mem) ReportDesc; | |
149 rep_->typ = typ; | |
150 rep_->tag = tag; | |
151 ctx->report_mtx.Lock(); | |
152 } | |
153 | |
154 ScopedReportBase::~ScopedReportBase() { | |
155 ctx->report_mtx.Unlock(); | |
156 DestroyAndFree(rep_); | |
157 rep_ = nullptr; | |
158 } | |
159 | |
160 void ScopedReportBase::AddStack(StackTrace stack, bool suppressable) { | |
161 ReportStack **rs = rep_->stacks.PushBack(); | |
162 *rs = SymbolizeStack(stack); | |
163 (*rs)->suppressable = suppressable; | |
164 } | |
165 | |
166 void ScopedReportBase::AddMemoryAccess(uptr addr, uptr external_tag, Shadow s, | |
167 StackTrace stack, const MutexSet *mset) { | |
168 void *mem = internal_alloc(MBlockReportMop, sizeof(ReportMop)); | |
169 ReportMop *mop = new(mem) ReportMop; | |
170 rep_->mops.PushBack(mop); | |
171 mop->tid = s.tid(); | |
172 mop->addr = addr + s.addr0(); | |
173 mop->size = s.size(); | |
174 mop->write = s.IsWrite(); | |
175 mop->atomic = s.IsAtomic(); | |
176 mop->stack = SymbolizeStack(stack); | |
177 mop->external_tag = external_tag; | |
178 if (mop->stack) | |
179 mop->stack->suppressable = true; | |
180 for (uptr i = 0; i < mset->Size(); i++) { | |
181 MutexSet::Desc d = mset->Get(i); | |
182 u64 mid = this->AddMutex(d.id); | |
183 ReportMopMutex mtx = {mid, d.write}; | |
184 mop->mset.PushBack(mtx); | |
185 } | |
186 } | |
187 | |
188 void ScopedReportBase::AddUniqueTid(int unique_tid) { | |
189 rep_->unique_tids.PushBack(unique_tid); | |
190 } | |
191 | |
192 void ScopedReportBase::AddThread(const ThreadContext *tctx, bool suppressable) { | |
193 for (uptr i = 0; i < rep_->threads.Size(); i++) { | |
194 if ((u32)rep_->threads[i]->id == tctx->tid) | |
195 return; | |
196 } | |
197 void *mem = internal_alloc(MBlockReportThread, sizeof(ReportThread)); | |
198 ReportThread *rt = new(mem) ReportThread; | |
199 rep_->threads.PushBack(rt); | |
200 rt->id = tctx->tid; | |
201 rt->os_id = tctx->os_id; | |
202 rt->running = (tctx->status == ThreadStatusRunning); | |
203 rt->name = internal_strdup(tctx->name); | |
204 rt->parent_tid = tctx->parent_tid; | |
205 rt->thread_type = tctx->thread_type; | |
206 rt->stack = 0; | |
207 rt->stack = SymbolizeStackId(tctx->creation_stack_id); | |
208 if (rt->stack) | |
209 rt->stack->suppressable = suppressable; | |
210 } | |
211 | |
212 #if !SANITIZER_GO | |
213 static bool FindThreadByUidLockedCallback(ThreadContextBase *tctx, void *arg) { | |
214 int unique_id = *(int *)arg; | |
215 return tctx->unique_id == (u32)unique_id; | |
216 } | |
217 | |
218 static ThreadContext *FindThreadByUidLocked(int unique_id) { | |
219 ctx->thread_registry->CheckLocked(); | |
220 return static_cast<ThreadContext *>( | |
221 ctx->thread_registry->FindThreadContextLocked( | |
222 FindThreadByUidLockedCallback, &unique_id)); | |
223 } | |
224 | |
225 static ThreadContext *FindThreadByTidLocked(int tid) { | |
226 ctx->thread_registry->CheckLocked(); | |
227 return static_cast<ThreadContext*>( | |
228 ctx->thread_registry->GetThreadLocked(tid)); | |
229 } | |
230 | |
231 static bool IsInStackOrTls(ThreadContextBase *tctx_base, void *arg) { | |
232 uptr addr = (uptr)arg; | |
233 ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base); | |
234 if (tctx->status != ThreadStatusRunning) | |
235 return false; | |
236 ThreadState *thr = tctx->thr; | |
237 CHECK(thr); | |
238 return ((addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size) || | |
239 (addr >= thr->tls_addr && addr < thr->tls_addr + thr->tls_size)); | |
240 } | |
241 | |
242 ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack) { | |
243 ctx->thread_registry->CheckLocked(); | |
244 ThreadContext *tctx = static_cast<ThreadContext*>( | |
245 ctx->thread_registry->FindThreadContextLocked(IsInStackOrTls, | |
246 (void*)addr)); | |
247 if (!tctx) | |
248 return 0; | |
249 ThreadState *thr = tctx->thr; | |
250 CHECK(thr); | |
251 *is_stack = (addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size); | |
252 return tctx; | |
253 } | |
254 #endif | |
255 | |
256 void ScopedReportBase::AddThread(int unique_tid, bool suppressable) { | |
257 #if !SANITIZER_GO | |
258 if (const ThreadContext *tctx = FindThreadByUidLocked(unique_tid)) | |
259 AddThread(tctx, suppressable); | |
260 #endif | |
261 } | |
262 | |
263 void ScopedReportBase::AddMutex(const SyncVar *s) { | |
264 for (uptr i = 0; i < rep_->mutexes.Size(); i++) { | |
265 if (rep_->mutexes[i]->id == s->uid) | |
266 return; | |
267 } | |
268 void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex)); | |
269 ReportMutex *rm = new(mem) ReportMutex; | |
270 rep_->mutexes.PushBack(rm); | |
271 rm->id = s->uid; | |
272 rm->addr = s->addr; | |
273 rm->destroyed = false; | |
274 rm->stack = SymbolizeStackId(s->creation_stack_id); | |
275 } | |
276 | |
277 u64 ScopedReportBase::AddMutex(u64 id) { | |
278 u64 uid = 0; | |
279 u64 mid = id; | |
280 uptr addr = SyncVar::SplitId(id, &uid); | |
281 SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, true); | |
282 // Check that the mutex is still alive. | |
283 // Another mutex can be created at the same address, | |
284 // so check uid as well. | |
285 if (s && s->CheckId(uid)) { | |
286 mid = s->uid; | |
287 AddMutex(s); | |
288 } else { | |
289 AddDeadMutex(id); | |
290 } | |
291 if (s) | |
292 s->mtx.Unlock(); | |
293 return mid; | |
294 } | |
295 | |
296 void ScopedReportBase::AddDeadMutex(u64 id) { | |
297 for (uptr i = 0; i < rep_->mutexes.Size(); i++) { | |
298 if (rep_->mutexes[i]->id == id) | |
299 return; | |
300 } | |
301 void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex)); | |
302 ReportMutex *rm = new(mem) ReportMutex; | |
303 rep_->mutexes.PushBack(rm); | |
304 rm->id = id; | |
305 rm->addr = 0; | |
306 rm->destroyed = true; | |
307 rm->stack = 0; | |
308 } | |
309 | |
310 void ScopedReportBase::AddLocation(uptr addr, uptr size) { | |
311 if (addr == 0) | |
312 return; | |
313 #if !SANITIZER_GO | |
314 int fd = -1; | |
315 int creat_tid = kInvalidTid; | |
316 u32 creat_stack = 0; | |
317 if (FdLocation(addr, &fd, &creat_tid, &creat_stack)) { | |
318 ReportLocation *loc = ReportLocation::New(ReportLocationFD); | |
319 loc->fd = fd; | |
320 loc->tid = creat_tid; | |
321 loc->stack = SymbolizeStackId(creat_stack); | |
322 rep_->locs.PushBack(loc); | |
323 ThreadContext *tctx = FindThreadByUidLocked(creat_tid); | |
324 if (tctx) | |
325 AddThread(tctx); | |
326 return; | |
327 } | |
328 MBlock *b = 0; | |
329 Allocator *a = allocator(); | |
330 if (a->PointerIsMine((void*)addr)) { | |
331 void *block_begin = a->GetBlockBegin((void*)addr); | |
332 if (block_begin) | |
333 b = ctx->metamap.GetBlock((uptr)block_begin); | |
334 } | |
335 if (b != 0) { | |
336 ThreadContext *tctx = FindThreadByTidLocked(b->tid); | |
337 ReportLocation *loc = ReportLocation::New(ReportLocationHeap); | |
338 loc->heap_chunk_start = (uptr)allocator()->GetBlockBegin((void *)addr); | |
339 loc->heap_chunk_size = b->siz; | |
340 loc->external_tag = b->tag; | |
341 loc->tid = tctx ? tctx->tid : b->tid; | |
342 loc->stack = SymbolizeStackId(b->stk); | |
343 rep_->locs.PushBack(loc); | |
344 if (tctx) | |
345 AddThread(tctx); | |
346 return; | |
347 } | |
348 bool is_stack = false; | |
349 if (ThreadContext *tctx = IsThreadStackOrTls(addr, &is_stack)) { | |
350 ReportLocation *loc = | |
351 ReportLocation::New(is_stack ? ReportLocationStack : ReportLocationTLS); | |
352 loc->tid = tctx->tid; | |
353 rep_->locs.PushBack(loc); | |
354 AddThread(tctx); | |
355 } | |
356 #endif | |
357 if (ReportLocation *loc = SymbolizeData(addr)) { | |
358 loc->suppressable = true; | |
359 rep_->locs.PushBack(loc); | |
360 return; | |
361 } | |
362 } | |
363 | |
364 #if !SANITIZER_GO | |
365 void ScopedReportBase::AddSleep(u32 stack_id) { | |
366 rep_->sleep = SymbolizeStackId(stack_id); | |
367 } | |
368 #endif | |
369 | |
370 void ScopedReportBase::SetCount(int count) { rep_->count = count; } | |
371 | |
372 const ReportDesc *ScopedReportBase::GetReport() const { return rep_; } | |
373 | |
374 ScopedReport::ScopedReport(ReportType typ, uptr tag) | |
375 : ScopedReportBase(typ, tag) {} | |
376 | |
377 ScopedReport::~ScopedReport() {} | |
378 | |
379 void RestoreStack(int tid, const u64 epoch, VarSizeStackTrace *stk, | |
380 MutexSet *mset, uptr *tag) { | |
381 // This function restores stack trace and mutex set for the thread/epoch. | |
382 // It does so by getting stack trace and mutex set at the beginning of | |
383 // trace part, and then replaying the trace till the given epoch. | |
384 Trace* trace = ThreadTrace(tid); | |
385 ReadLock l(&trace->mtx); | |
386 const int partidx = (epoch / kTracePartSize) % TraceParts(); | |
387 TraceHeader* hdr = &trace->headers[partidx]; | |
388 if (epoch < hdr->epoch0 || epoch >= hdr->epoch0 + kTracePartSize) | |
389 return; | |
390 CHECK_EQ(RoundDown(epoch, kTracePartSize), hdr->epoch0); | |
391 const u64 epoch0 = RoundDown(epoch, TraceSize()); | |
392 const u64 eend = epoch % TraceSize(); | |
393 const u64 ebegin = RoundDown(eend, kTracePartSize); | |
394 DPrintf("#%d: RestoreStack epoch=%zu ebegin=%zu eend=%zu partidx=%d\n", | |
395 tid, (uptr)epoch, (uptr)ebegin, (uptr)eend, partidx); | |
396 Vector<uptr> stack; | |
397 stack.Resize(hdr->stack0.size + 64); | |
398 for (uptr i = 0; i < hdr->stack0.size; i++) { | |
399 stack[i] = hdr->stack0.trace[i]; | |
400 DPrintf2(" #%02zu: pc=%zx\n", i, stack[i]); | |
401 } | |
402 if (mset) | |
403 *mset = hdr->mset0; | |
404 uptr pos = hdr->stack0.size; | |
405 Event *events = (Event*)GetThreadTrace(tid); | |
406 for (uptr i = ebegin; i <= eend; i++) { | |
407 Event ev = events[i]; | |
408 EventType typ = (EventType)(ev >> kEventPCBits); | |
409 uptr pc = (uptr)(ev & ((1ull << kEventPCBits) - 1)); | |
410 DPrintf2(" %zu typ=%d pc=%zx\n", i, typ, pc); | |
411 if (typ == EventTypeMop) { | |
412 stack[pos] = pc; | |
413 } else if (typ == EventTypeFuncEnter) { | |
414 if (stack.Size() < pos + 2) | |
415 stack.Resize(pos + 2); | |
416 stack[pos++] = pc; | |
417 } else if (typ == EventTypeFuncExit) { | |
418 if (pos > 0) | |
419 pos--; | |
420 } | |
421 if (mset) { | |
422 if (typ == EventTypeLock) { | |
423 mset->Add(pc, true, epoch0 + i); | |
424 } else if (typ == EventTypeUnlock) { | |
425 mset->Del(pc, true); | |
426 } else if (typ == EventTypeRLock) { | |
427 mset->Add(pc, false, epoch0 + i); | |
428 } else if (typ == EventTypeRUnlock) { | |
429 mset->Del(pc, false); | |
430 } | |
431 } | |
432 for (uptr j = 0; j <= pos; j++) | |
433 DPrintf2(" #%zu: %zx\n", j, stack[j]); | |
434 } | |
435 if (pos == 0 && stack[0] == 0) | |
436 return; | |
437 pos++; | |
438 stk->Init(&stack[0], pos); | |
439 ExtractTagFromStack(stk, tag); | |
440 } | |
441 | |
442 static bool HandleRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2], | |
443 uptr addr_min, uptr addr_max) { | |
444 bool equal_stack = false; | |
445 RacyStacks hash; | |
446 bool equal_address = false; | |
447 RacyAddress ra0 = {addr_min, addr_max}; | |
448 { | |
449 ReadLock lock(&ctx->racy_mtx); | |
450 if (flags()->suppress_equal_stacks) { | |
451 hash.hash[0] = md5_hash(traces[0].trace, traces[0].size * sizeof(uptr)); | |
452 hash.hash[1] = md5_hash(traces[1].trace, traces[1].size * sizeof(uptr)); | |
453 for (uptr i = 0; i < ctx->racy_stacks.Size(); i++) { | |
454 if (hash == ctx->racy_stacks[i]) { | |
455 VPrintf(2, | |
456 "ThreadSanitizer: suppressing report as doubled (stack)\n"); | |
457 equal_stack = true; | |
458 break; | |
459 } | |
460 } | |
461 } | |
462 if (flags()->suppress_equal_addresses) { | |
463 for (uptr i = 0; i < ctx->racy_addresses.Size(); i++) { | |
464 RacyAddress ra2 = ctx->racy_addresses[i]; | |
465 uptr maxbeg = max(ra0.addr_min, ra2.addr_min); | |
466 uptr minend = min(ra0.addr_max, ra2.addr_max); | |
467 if (maxbeg < minend) { | |
468 VPrintf(2, "ThreadSanitizer: suppressing report as doubled (addr)\n"); | |
469 equal_address = true; | |
470 break; | |
471 } | |
472 } | |
473 } | |
474 } | |
475 if (!equal_stack && !equal_address) | |
476 return false; | |
477 if (!equal_stack) { | |
478 Lock lock(&ctx->racy_mtx); | |
479 ctx->racy_stacks.PushBack(hash); | |
480 } | |
481 if (!equal_address) { | |
482 Lock lock(&ctx->racy_mtx); | |
483 ctx->racy_addresses.PushBack(ra0); | |
484 } | |
485 return true; | |
486 } | |
487 | |
488 static void AddRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2], | |
489 uptr addr_min, uptr addr_max) { | |
490 Lock lock(&ctx->racy_mtx); | |
491 if (flags()->suppress_equal_stacks) { | |
492 RacyStacks hash; | |
493 hash.hash[0] = md5_hash(traces[0].trace, traces[0].size * sizeof(uptr)); | |
494 hash.hash[1] = md5_hash(traces[1].trace, traces[1].size * sizeof(uptr)); | |
495 ctx->racy_stacks.PushBack(hash); | |
496 } | |
497 if (flags()->suppress_equal_addresses) { | |
498 RacyAddress ra0 = {addr_min, addr_max}; | |
499 ctx->racy_addresses.PushBack(ra0); | |
500 } | |
501 } | |
502 | |
503 bool OutputReport(ThreadState *thr, const ScopedReport &srep) { | |
504 if (!flags()->report_bugs || thr->suppress_reports) | |
505 return false; | |
506 atomic_store_relaxed(&ctx->last_symbolize_time_ns, NanoTime()); | |
507 const ReportDesc *rep = srep.GetReport(); | |
508 CHECK_EQ(thr->current_report, nullptr); | |
509 thr->current_report = rep; | |
510 Suppression *supp = 0; | |
511 uptr pc_or_addr = 0; | |
512 for (uptr i = 0; pc_or_addr == 0 && i < rep->mops.Size(); i++) | |
513 pc_or_addr = IsSuppressed(rep->typ, rep->mops[i]->stack, &supp); | |
514 for (uptr i = 0; pc_or_addr == 0 && i < rep->stacks.Size(); i++) | |
515 pc_or_addr = IsSuppressed(rep->typ, rep->stacks[i], &supp); | |
516 for (uptr i = 0; pc_or_addr == 0 && i < rep->threads.Size(); i++) | |
517 pc_or_addr = IsSuppressed(rep->typ, rep->threads[i]->stack, &supp); | |
518 for (uptr i = 0; pc_or_addr == 0 && i < rep->locs.Size(); i++) | |
519 pc_or_addr = IsSuppressed(rep->typ, rep->locs[i], &supp); | |
520 if (pc_or_addr != 0) { | |
521 Lock lock(&ctx->fired_suppressions_mtx); | |
522 FiredSuppression s = {srep.GetReport()->typ, pc_or_addr, supp}; | |
523 ctx->fired_suppressions.push_back(s); | |
524 } | |
525 { | |
526 bool old_is_freeing = thr->is_freeing; | |
527 thr->is_freeing = false; | |
528 bool suppressed = OnReport(rep, pc_or_addr != 0); | |
529 thr->is_freeing = old_is_freeing; | |
530 if (suppressed) { | |
531 thr->current_report = nullptr; | |
532 return false; | |
533 } | |
534 } | |
535 PrintReport(rep); | |
536 __tsan_on_report(rep); | |
537 ctx->nreported++; | |
538 if (flags()->halt_on_error) | |
539 Die(); | |
540 thr->current_report = nullptr; | |
541 return true; | |
542 } | |
543 | |
544 bool IsFiredSuppression(Context *ctx, ReportType type, StackTrace trace) { | |
545 ReadLock lock(&ctx->fired_suppressions_mtx); | |
546 for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) { | |
547 if (ctx->fired_suppressions[k].type != type) | |
548 continue; | |
549 for (uptr j = 0; j < trace.size; j++) { | |
550 FiredSuppression *s = &ctx->fired_suppressions[k]; | |
551 if (trace.trace[j] == s->pc_or_addr) { | |
552 if (s->supp) | |
553 atomic_fetch_add(&s->supp->hit_count, 1, memory_order_relaxed); | |
554 return true; | |
555 } | |
556 } | |
557 } | |
558 return false; | |
559 } | |
560 | |
561 static bool IsFiredSuppression(Context *ctx, ReportType type, uptr addr) { | |
562 ReadLock lock(&ctx->fired_suppressions_mtx); | |
563 for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) { | |
564 if (ctx->fired_suppressions[k].type != type) | |
565 continue; | |
566 FiredSuppression *s = &ctx->fired_suppressions[k]; | |
567 if (addr == s->pc_or_addr) { | |
568 if (s->supp) | |
569 atomic_fetch_add(&s->supp->hit_count, 1, memory_order_relaxed); | |
570 return true; | |
571 } | |
572 } | |
573 return false; | |
574 } | |
575 | |
576 static bool RaceBetweenAtomicAndFree(ThreadState *thr) { | |
577 Shadow s0(thr->racy_state[0]); | |
578 Shadow s1(thr->racy_state[1]); | |
579 CHECK(!(s0.IsAtomic() && s1.IsAtomic())); | |
580 if (!s0.IsAtomic() && !s1.IsAtomic()) | |
581 return true; | |
582 if (s0.IsAtomic() && s1.IsFreed()) | |
583 return true; | |
584 if (s1.IsAtomic() && thr->is_freeing) | |
585 return true; | |
586 return false; | |
587 } | |
588 | |
589 void ReportRace(ThreadState *thr) { | |
590 CheckNoLocks(thr); | |
591 | |
592 // Symbolizer makes lots of intercepted calls. If we try to process them, | |
593 // at best it will cause deadlocks on internal mutexes. | |
594 ScopedIgnoreInterceptors ignore; | |
595 | |
596 if (!flags()->report_bugs) | |
597 return; | |
598 if (!flags()->report_atomic_races && !RaceBetweenAtomicAndFree(thr)) | |
599 return; | |
600 | |
601 bool freed = false; | |
602 { | |
603 Shadow s(thr->racy_state[1]); | |
604 freed = s.GetFreedAndReset(); | |
605 thr->racy_state[1] = s.raw(); | |
606 } | |
607 | |
608 uptr addr = ShadowToMem((uptr)thr->racy_shadow_addr); | |
609 uptr addr_min = 0; | |
610 uptr addr_max = 0; | |
611 { | |
612 uptr a0 = addr + Shadow(thr->racy_state[0]).addr0(); | |
613 uptr a1 = addr + Shadow(thr->racy_state[1]).addr0(); | |
614 uptr e0 = a0 + Shadow(thr->racy_state[0]).size(); | |
615 uptr e1 = a1 + Shadow(thr->racy_state[1]).size(); | |
616 addr_min = min(a0, a1); | |
617 addr_max = max(e0, e1); | |
618 if (IsExpectedReport(addr_min, addr_max - addr_min)) | |
619 return; | |
620 } | |
621 | |
622 ReportType typ = ReportTypeRace; | |
623 if (thr->is_vptr_access && freed) | |
624 typ = ReportTypeVptrUseAfterFree; | |
625 else if (thr->is_vptr_access) | |
626 typ = ReportTypeVptrRace; | |
627 else if (freed) | |
628 typ = ReportTypeUseAfterFree; | |
629 | |
630 if (IsFiredSuppression(ctx, typ, addr)) | |
631 return; | |
632 | |
633 const uptr kMop = 2; | |
634 VarSizeStackTrace traces[kMop]; | |
635 uptr tags[kMop] = {kExternalTagNone}; | |
636 uptr toppc = TraceTopPC(thr); | |
637 if (toppc >> kEventPCBits) { | |
638 // This is a work-around for a known issue. | |
639 // The scenario where this happens is rather elaborate and requires | |
640 // an instrumented __sanitizer_report_error_summary callback and | |
641 // a __tsan_symbolize_external callback and a race during a range memory | |
642 // access larger than 8 bytes. MemoryAccessRange adds the current PC to | |
643 // the trace and starts processing memory accesses. A first memory access | |
644 // triggers a race, we report it and call the instrumented | |
645 // __sanitizer_report_error_summary, which adds more stuff to the trace | |
646 // since it is intrumented. Then a second memory access in MemoryAccessRange | |
647 // also triggers a race and we get here and call TraceTopPC to get the | |
648 // current PC, however now it contains some unrelated events from the | |
649 // callback. Most likely, TraceTopPC will now return a EventTypeFuncExit | |
650 // event. Later we subtract -1 from it (in GetPreviousInstructionPc) | |
651 // and the resulting PC has kExternalPCBit set, so we pass it to | |
652 // __tsan_symbolize_external_ex. __tsan_symbolize_external_ex is within its | |
653 // rights to crash since the PC is completely bogus. | |
654 // test/tsan/double_race.cpp contains a test case for this. | |
655 toppc = 0; | |
656 } | |
657 ObtainCurrentStack(thr, toppc, &traces[0], &tags[0]); | |
658 if (IsFiredSuppression(ctx, typ, traces[0])) | |
659 return; | |
660 | |
661 // MutexSet is too large to live on stack. | |
662 Vector<u64> mset_buffer; | |
663 mset_buffer.Resize(sizeof(MutexSet) / sizeof(u64) + 1); | |
664 MutexSet *mset2 = new(&mset_buffer[0]) MutexSet(); | |
665 | |
666 Shadow s2(thr->racy_state[1]); | |
667 RestoreStack(s2.tid(), s2.epoch(), &traces[1], mset2, &tags[1]); | |
668 if (IsFiredSuppression(ctx, typ, traces[1])) | |
669 return; | |
670 | |
671 if (HandleRacyStacks(thr, traces, addr_min, addr_max)) | |
672 return; | |
673 | |
674 // If any of the accesses has a tag, treat this as an "external" race. | |
675 uptr tag = kExternalTagNone; | |
676 for (uptr i = 0; i < kMop; i++) { | |
677 if (tags[i] != kExternalTagNone) { | |
678 typ = ReportTypeExternalRace; | |
679 tag = tags[i]; | |
680 break; | |
681 } | |
682 } | |
683 | |
684 ThreadRegistryLock l0(ctx->thread_registry); | |
685 ScopedReport rep(typ, tag); | |
686 for (uptr i = 0; i < kMop; i++) { | |
687 Shadow s(thr->racy_state[i]); | |
688 rep.AddMemoryAccess(addr, tags[i], s, traces[i], | |
689 i == 0 ? &thr->mset : mset2); | |
690 } | |
691 | |
692 for (uptr i = 0; i < kMop; i++) { | |
693 FastState s(thr->racy_state[i]); | |
694 ThreadContext *tctx = static_cast<ThreadContext*>( | |
695 ctx->thread_registry->GetThreadLocked(s.tid())); | |
696 if (s.epoch() < tctx->epoch0 || s.epoch() > tctx->epoch1) | |
697 continue; | |
698 rep.AddThread(tctx); | |
699 } | |
700 | |
701 rep.AddLocation(addr_min, addr_max - addr_min); | |
702 | |
703 #if !SANITIZER_GO | |
704 { | |
705 Shadow s(thr->racy_state[1]); | |
706 if (s.epoch() <= thr->last_sleep_clock.get(s.tid())) | |
707 rep.AddSleep(thr->last_sleep_stack_id); | |
708 } | |
709 #endif | |
710 | |
711 if (!OutputReport(thr, rep)) | |
712 return; | |
713 | |
714 AddRacyStacks(thr, traces, addr_min, addr_max); | |
715 } | |
716 | |
717 void PrintCurrentStack(ThreadState *thr, uptr pc) { | |
718 VarSizeStackTrace trace; | |
719 ObtainCurrentStack(thr, pc, &trace); | |
720 PrintStack(SymbolizeStack(trace)); | |
721 } | |
722 | |
723 // Always inlining PrintCurrentStackSlow, because LocatePcInTrace assumes | |
724 // __sanitizer_print_stack_trace exists in the actual unwinded stack, but | |
725 // tail-call to PrintCurrentStackSlow breaks this assumption because | |
726 // __sanitizer_print_stack_trace disappears after tail-call. | |
727 // However, this solution is not reliable enough, please see dvyukov's comment | |
728 // http://reviews.llvm.org/D19148#406208 | |
729 // Also see PR27280 comment 2 and 3 for breaking examples and analysis. | |
730 ALWAYS_INLINE | |
731 void PrintCurrentStackSlow(uptr pc) { | |
732 #if !SANITIZER_GO | |
733 uptr bp = GET_CURRENT_FRAME(); | |
734 BufferedStackTrace *ptrace = | |
735 new(internal_alloc(MBlockStackTrace, sizeof(BufferedStackTrace))) | |
736 BufferedStackTrace(); | |
737 ptrace->Unwind(pc, bp, nullptr, false); | |
738 | |
739 for (uptr i = 0; i < ptrace->size / 2; i++) { | |
740 uptr tmp = ptrace->trace_buffer[i]; | |
741 ptrace->trace_buffer[i] = ptrace->trace_buffer[ptrace->size - i - 1]; | |
742 ptrace->trace_buffer[ptrace->size - i - 1] = tmp; | |
743 } | |
744 PrintStack(SymbolizeStack(*ptrace)); | |
745 #endif | |
746 } | |
747 | |
748 } // namespace __tsan | |
749 | |
750 using namespace __tsan; | |
751 | |
752 extern "C" { | |
753 SANITIZER_INTERFACE_ATTRIBUTE | |
754 void __sanitizer_print_stack_trace() { | |
755 PrintCurrentStackSlow(StackTrace::GetCurrentPc()); | |
756 } | |
757 } // extern "C" |