145
|
1 //=-- lsan_common.cpp -----------------------------------------------------===//
|
|
2 //
|
|
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
4 // See https://llvm.org/LICENSE.txt for license information.
|
|
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
6 //
|
|
7 //===----------------------------------------------------------------------===//
|
|
8 //
|
|
9 // This file is a part of LeakSanitizer.
|
|
10 // Implementation of common leak checking functionality.
|
|
11 //
|
|
12 //===----------------------------------------------------------------------===//
|
|
13
|
|
14 #include "lsan_common.h"
|
|
15
|
|
16 #include "sanitizer_common/sanitizer_common.h"
|
|
17 #include "sanitizer_common/sanitizer_flag_parser.h"
|
|
18 #include "sanitizer_common/sanitizer_flags.h"
|
|
19 #include "sanitizer_common/sanitizer_placement_new.h"
|
|
20 #include "sanitizer_common/sanitizer_procmaps.h"
|
|
21 #include "sanitizer_common/sanitizer_report_decorator.h"
|
|
22 #include "sanitizer_common/sanitizer_stackdepot.h"
|
|
23 #include "sanitizer_common/sanitizer_stacktrace.h"
|
|
24 #include "sanitizer_common/sanitizer_suppressions.h"
|
|
25 #include "sanitizer_common/sanitizer_thread_registry.h"
|
|
26 #include "sanitizer_common/sanitizer_tls_get_addr.h"
|
|
27
|
|
28 #if CAN_SANITIZE_LEAKS
|
|
29 namespace __lsan {
|
|
30
|
|
31 // This mutex is used to prevent races between DoLeakCheck and IgnoreObject, and
|
|
32 // also to protect the global list of root regions.
|
|
33 BlockingMutex global_mutex(LINKER_INITIALIZED);
|
|
34
|
|
35 Flags lsan_flags;
|
|
36
|
|
37 void DisableCounterUnderflow() {
|
|
38 if (common_flags()->detect_leaks) {
|
|
39 Report("Unmatched call to __lsan_enable().\n");
|
|
40 Die();
|
|
41 }
|
|
42 }
|
|
43
|
|
44 void Flags::SetDefaults() {
|
|
45 #define LSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
|
|
46 #include "lsan_flags.inc"
|
|
47 #undef LSAN_FLAG
|
|
48 }
|
|
49
|
|
50 void RegisterLsanFlags(FlagParser *parser, Flags *f) {
|
|
51 #define LSAN_FLAG(Type, Name, DefaultValue, Description) \
|
|
52 RegisterFlag(parser, #Name, Description, &f->Name);
|
|
53 #include "lsan_flags.inc"
|
|
54 #undef LSAN_FLAG
|
|
55 }
|
|
56
|
|
57 #define LOG_POINTERS(...) \
|
|
58 do { \
|
|
59 if (flags()->log_pointers) Report(__VA_ARGS__); \
|
|
60 } while (0)
|
|
61
|
|
62 #define LOG_THREADS(...) \
|
|
63 do { \
|
|
64 if (flags()->log_threads) Report(__VA_ARGS__); \
|
|
65 } while (0)
|
|
66
|
|
67 ALIGNED(64) static char suppression_placeholder[sizeof(SuppressionContext)];
|
|
68 static SuppressionContext *suppression_ctx = nullptr;
|
|
69 static const char kSuppressionLeak[] = "leak";
|
|
70 static const char *kSuppressionTypes[] = { kSuppressionLeak };
|
|
71 static const char kStdSuppressions[] =
|
|
72 #if SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
|
|
73 // For more details refer to the SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
|
|
74 // definition.
|
|
75 "leak:*pthread_exit*\n"
|
|
76 #endif // SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
|
|
77 #if SANITIZER_MAC
|
|
78 // For Darwin and os_log/os_trace: https://reviews.llvm.org/D35173
|
|
79 "leak:*_os_trace*\n"
|
|
80 #endif
|
|
81 // TLS leak in some glibc versions, described in
|
|
82 // https://sourceware.org/bugzilla/show_bug.cgi?id=12650.
|
|
83 "leak:*tls_get_addr*\n";
|
|
84
|
|
85 void InitializeSuppressions() {
|
|
86 CHECK_EQ(nullptr, suppression_ctx);
|
|
87 suppression_ctx = new (suppression_placeholder)
|
|
88 SuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes));
|
|
89 suppression_ctx->ParseFromFile(flags()->suppressions);
|
|
90 if (&__lsan_default_suppressions)
|
|
91 suppression_ctx->Parse(__lsan_default_suppressions());
|
|
92 suppression_ctx->Parse(kStdSuppressions);
|
|
93 }
|
|
94
|
|
95 static SuppressionContext *GetSuppressionContext() {
|
|
96 CHECK(suppression_ctx);
|
|
97 return suppression_ctx;
|
|
98 }
|
|
99
|
|
100 static InternalMmapVector<RootRegion> *root_regions;
|
|
101
|
|
102 InternalMmapVector<RootRegion> const *GetRootRegions() { return root_regions; }
|
|
103
|
|
104 void InitializeRootRegions() {
|
|
105 CHECK(!root_regions);
|
|
106 ALIGNED(64) static char placeholder[sizeof(InternalMmapVector<RootRegion>)];
|
|
107 root_regions = new (placeholder) InternalMmapVector<RootRegion>();
|
|
108 }
|
|
109
|
|
110 const char *MaybeCallLsanDefaultOptions() {
|
|
111 return (&__lsan_default_options) ? __lsan_default_options() : "";
|
|
112 }
|
|
113
|
|
114 void InitCommonLsan() {
|
|
115 InitializeRootRegions();
|
|
116 if (common_flags()->detect_leaks) {
|
|
117 // Initialization which can fail or print warnings should only be done if
|
|
118 // LSan is actually enabled.
|
|
119 InitializeSuppressions();
|
|
120 InitializePlatformSpecificModules();
|
|
121 }
|
|
122 }
|
|
123
|
|
124 class Decorator: public __sanitizer::SanitizerCommonDecorator {
|
|
125 public:
|
|
126 Decorator() : SanitizerCommonDecorator() { }
|
|
127 const char *Error() { return Red(); }
|
|
128 const char *Leak() { return Blue(); }
|
|
129 };
|
|
130
|
|
131 static inline bool CanBeAHeapPointer(uptr p) {
|
|
132 // Since our heap is located in mmap-ed memory, we can assume a sensible lower
|
|
133 // bound on heap addresses.
|
|
134 const uptr kMinAddress = 4 * 4096;
|
|
135 if (p < kMinAddress) return false;
|
|
136 #if defined(__x86_64__)
|
|
137 // Accept only canonical form user-space addresses.
|
|
138 return ((p >> 47) == 0);
|
|
139 #elif defined(__mips64)
|
|
140 return ((p >> 40) == 0);
|
|
141 #elif defined(__aarch64__)
|
|
142 unsigned runtimeVMA =
|
|
143 (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1);
|
|
144 return ((p >> runtimeVMA) == 0);
|
|
145 #else
|
|
146 return true;
|
|
147 #endif
|
|
148 }
|
|
149
|
|
150 // Scans the memory range, looking for byte patterns that point into allocator
|
|
151 // chunks. Marks those chunks with |tag| and adds them to |frontier|.
|
|
152 // There are two usage modes for this function: finding reachable chunks
|
|
153 // (|tag| = kReachable) and finding indirectly leaked chunks
|
|
154 // (|tag| = kIndirectlyLeaked). In the second case, there's no flood fill,
|
|
155 // so |frontier| = 0.
|
|
156 void ScanRangeForPointers(uptr begin, uptr end,
|
|
157 Frontier *frontier,
|
|
158 const char *region_type, ChunkTag tag) {
|
|
159 CHECK(tag == kReachable || tag == kIndirectlyLeaked);
|
|
160 const uptr alignment = flags()->pointer_alignment();
|
|
161 LOG_POINTERS("Scanning %s range %p-%p.\n", region_type, begin, end);
|
|
162 uptr pp = begin;
|
|
163 if (pp % alignment)
|
|
164 pp = pp + alignment - pp % alignment;
|
|
165 for (; pp + sizeof(void *) <= end; pp += alignment) {
|
|
166 void *p = *reinterpret_cast<void **>(pp);
|
|
167 if (!CanBeAHeapPointer(reinterpret_cast<uptr>(p))) continue;
|
|
168 uptr chunk = PointsIntoChunk(p);
|
|
169 if (!chunk) continue;
|
|
170 // Pointers to self don't count. This matters when tag == kIndirectlyLeaked.
|
|
171 if (chunk == begin) continue;
|
|
172 LsanMetadata m(chunk);
|
|
173 if (m.tag() == kReachable || m.tag() == kIgnored) continue;
|
|
174
|
|
175 // Do this check relatively late so we can log only the interesting cases.
|
|
176 if (!flags()->use_poisoned && WordIsPoisoned(pp)) {
|
|
177 LOG_POINTERS(
|
|
178 "%p is poisoned: ignoring %p pointing into chunk %p-%p of size "
|
|
179 "%zu.\n",
|
|
180 pp, p, chunk, chunk + m.requested_size(), m.requested_size());
|
|
181 continue;
|
|
182 }
|
|
183
|
|
184 m.set_tag(tag);
|
|
185 LOG_POINTERS("%p: found %p pointing into chunk %p-%p of size %zu.\n", pp, p,
|
|
186 chunk, chunk + m.requested_size(), m.requested_size());
|
|
187 if (frontier)
|
|
188 frontier->push_back(chunk);
|
|
189 }
|
|
190 }
|
|
191
|
|
192 // Scans a global range for pointers
|
|
193 void ScanGlobalRange(uptr begin, uptr end, Frontier *frontier) {
|
|
194 uptr allocator_begin = 0, allocator_end = 0;
|
|
195 GetAllocatorGlobalRange(&allocator_begin, &allocator_end);
|
|
196 if (begin <= allocator_begin && allocator_begin < end) {
|
|
197 CHECK_LE(allocator_begin, allocator_end);
|
|
198 CHECK_LE(allocator_end, end);
|
|
199 if (begin < allocator_begin)
|
|
200 ScanRangeForPointers(begin, allocator_begin, frontier, "GLOBAL",
|
|
201 kReachable);
|
|
202 if (allocator_end < end)
|
|
203 ScanRangeForPointers(allocator_end, end, frontier, "GLOBAL", kReachable);
|
|
204 } else {
|
|
205 ScanRangeForPointers(begin, end, frontier, "GLOBAL", kReachable);
|
|
206 }
|
|
207 }
|
|
208
|
|
209 void ForEachExtraStackRangeCb(uptr begin, uptr end, void* arg) {
|
|
210 Frontier *frontier = reinterpret_cast<Frontier *>(arg);
|
|
211 ScanRangeForPointers(begin, end, frontier, "FAKE STACK", kReachable);
|
|
212 }
|
|
213
|
|
214 // Scans thread data (stacks and TLS) for heap pointers.
|
|
215 static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
|
|
216 Frontier *frontier) {
|
|
217 InternalMmapVector<uptr> registers(suspended_threads.RegisterCount());
|
|
218 uptr registers_begin = reinterpret_cast<uptr>(registers.data());
|
|
219 uptr registers_end =
|
|
220 reinterpret_cast<uptr>(registers.data() + registers.size());
|
|
221 for (uptr i = 0; i < suspended_threads.ThreadCount(); i++) {
|
|
222 tid_t os_id = static_cast<tid_t>(suspended_threads.GetThreadID(i));
|
|
223 LOG_THREADS("Processing thread %d.\n", os_id);
|
|
224 uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end;
|
|
225 DTLS *dtls;
|
|
226 bool thread_found = GetThreadRangesLocked(os_id, &stack_begin, &stack_end,
|
|
227 &tls_begin, &tls_end,
|
|
228 &cache_begin, &cache_end, &dtls);
|
|
229 if (!thread_found) {
|
|
230 // If a thread can't be found in the thread registry, it's probably in the
|
|
231 // process of destruction. Log this event and move on.
|
|
232 LOG_THREADS("Thread %d not found in registry.\n", os_id);
|
|
233 continue;
|
|
234 }
|
|
235 uptr sp;
|
|
236 PtraceRegistersStatus have_registers =
|
|
237 suspended_threads.GetRegistersAndSP(i, registers.data(), &sp);
|
|
238 if (have_registers != REGISTERS_AVAILABLE) {
|
|
239 Report("Unable to get registers from thread %d.\n", os_id);
|
|
240 // If unable to get SP, consider the entire stack to be reachable unless
|
|
241 // GetRegistersAndSP failed with ESRCH.
|
|
242 if (have_registers == REGISTERS_UNAVAILABLE_FATAL) continue;
|
|
243 sp = stack_begin;
|
|
244 }
|
|
245
|
|
246 if (flags()->use_registers && have_registers)
|
|
247 ScanRangeForPointers(registers_begin, registers_end, frontier,
|
|
248 "REGISTERS", kReachable);
|
|
249
|
|
250 if (flags()->use_stacks) {
|
|
251 LOG_THREADS("Stack at %p-%p (SP = %p).\n", stack_begin, stack_end, sp);
|
|
252 if (sp < stack_begin || sp >= stack_end) {
|
|
253 // SP is outside the recorded stack range (e.g. the thread is running a
|
|
254 // signal handler on alternate stack, or swapcontext was used).
|
|
255 // Again, consider the entire stack range to be reachable.
|
|
256 LOG_THREADS("WARNING: stack pointer not in stack range.\n");
|
|
257 uptr page_size = GetPageSizeCached();
|
|
258 int skipped = 0;
|
|
259 while (stack_begin < stack_end &&
|
|
260 !IsAccessibleMemoryRange(stack_begin, 1)) {
|
|
261 skipped++;
|
|
262 stack_begin += page_size;
|
|
263 }
|
|
264 LOG_THREADS("Skipped %d guard page(s) to obtain stack %p-%p.\n",
|
|
265 skipped, stack_begin, stack_end);
|
|
266 } else {
|
|
267 // Shrink the stack range to ignore out-of-scope values.
|
|
268 stack_begin = sp;
|
|
269 }
|
|
270 ScanRangeForPointers(stack_begin, stack_end, frontier, "STACK",
|
|
271 kReachable);
|
|
272 ForEachExtraStackRange(os_id, ForEachExtraStackRangeCb, frontier);
|
|
273 }
|
|
274
|
|
275 if (flags()->use_tls) {
|
|
276 if (tls_begin) {
|
|
277 LOG_THREADS("TLS at %p-%p.\n", tls_begin, tls_end);
|
|
278 // If the tls and cache ranges don't overlap, scan full tls range,
|
|
279 // otherwise, only scan the non-overlapping portions
|
|
280 if (cache_begin == cache_end || tls_end < cache_begin ||
|
|
281 tls_begin > cache_end) {
|
|
282 ScanRangeForPointers(tls_begin, tls_end, frontier, "TLS", kReachable);
|
|
283 } else {
|
|
284 if (tls_begin < cache_begin)
|
|
285 ScanRangeForPointers(tls_begin, cache_begin, frontier, "TLS",
|
|
286 kReachable);
|
|
287 if (tls_end > cache_end)
|
|
288 ScanRangeForPointers(cache_end, tls_end, frontier, "TLS",
|
|
289 kReachable);
|
|
290 }
|
|
291 }
|
|
292 if (dtls && !DTLSInDestruction(dtls)) {
|
|
293 for (uptr j = 0; j < dtls->dtv_size; ++j) {
|
|
294 uptr dtls_beg = dtls->dtv[j].beg;
|
|
295 uptr dtls_end = dtls_beg + dtls->dtv[j].size;
|
|
296 if (dtls_beg < dtls_end) {
|
|
297 LOG_THREADS("DTLS %zu at %p-%p.\n", j, dtls_beg, dtls_end);
|
|
298 ScanRangeForPointers(dtls_beg, dtls_end, frontier, "DTLS",
|
|
299 kReachable);
|
|
300 }
|
|
301 }
|
|
302 } else {
|
|
303 // We are handling a thread with DTLS under destruction. Log about
|
|
304 // this and continue.
|
|
305 LOG_THREADS("Thread %d has DTLS under destruction.\n", os_id);
|
|
306 }
|
|
307 }
|
|
308 }
|
|
309 }
|
|
310
|
|
311 void ScanRootRegion(Frontier *frontier, const RootRegion &root_region,
|
|
312 uptr region_begin, uptr region_end, bool is_readable) {
|
|
313 uptr intersection_begin = Max(root_region.begin, region_begin);
|
|
314 uptr intersection_end = Min(region_end, root_region.begin + root_region.size);
|
|
315 if (intersection_begin >= intersection_end) return;
|
|
316 LOG_POINTERS("Root region %p-%p intersects with mapped region %p-%p (%s)\n",
|
|
317 root_region.begin, root_region.begin + root_region.size,
|
|
318 region_begin, region_end,
|
|
319 is_readable ? "readable" : "unreadable");
|
|
320 if (is_readable)
|
|
321 ScanRangeForPointers(intersection_begin, intersection_end, frontier, "ROOT",
|
|
322 kReachable);
|
|
323 }
|
|
324
|
|
325 static void ProcessRootRegion(Frontier *frontier,
|
|
326 const RootRegion &root_region) {
|
|
327 MemoryMappingLayout proc_maps(/*cache_enabled*/ true);
|
|
328 MemoryMappedSegment segment;
|
|
329 while (proc_maps.Next(&segment)) {
|
|
330 ScanRootRegion(frontier, root_region, segment.start, segment.end,
|
|
331 segment.IsReadable());
|
|
332 }
|
|
333 }
|
|
334
|
|
335 // Scans root regions for heap pointers.
|
|
336 static void ProcessRootRegions(Frontier *frontier) {
|
|
337 if (!flags()->use_root_regions) return;
|
|
338 CHECK(root_regions);
|
|
339 for (uptr i = 0; i < root_regions->size(); i++) {
|
|
340 ProcessRootRegion(frontier, (*root_regions)[i]);
|
|
341 }
|
|
342 }
|
|
343
|
|
344 static void FloodFillTag(Frontier *frontier, ChunkTag tag) {
|
|
345 while (frontier->size()) {
|
|
346 uptr next_chunk = frontier->back();
|
|
347 frontier->pop_back();
|
|
348 LsanMetadata m(next_chunk);
|
|
349 ScanRangeForPointers(next_chunk, next_chunk + m.requested_size(), frontier,
|
|
350 "HEAP", tag);
|
|
351 }
|
|
352 }
|
|
353
|
|
354 // ForEachChunk callback. If the chunk is marked as leaked, marks all chunks
|
|
355 // which are reachable from it as indirectly leaked.
|
|
356 static void MarkIndirectlyLeakedCb(uptr chunk, void *arg) {
|
|
357 chunk = GetUserBegin(chunk);
|
|
358 LsanMetadata m(chunk);
|
|
359 if (m.allocated() && m.tag() != kReachable) {
|
|
360 ScanRangeForPointers(chunk, chunk + m.requested_size(),
|
|
361 /* frontier */ nullptr, "HEAP", kIndirectlyLeaked);
|
|
362 }
|
|
363 }
|
|
364
|
|
365 // ForEachChunk callback. If chunk is marked as ignored, adds its address to
|
|
366 // frontier.
|
|
367 static void CollectIgnoredCb(uptr chunk, void *arg) {
|
|
368 CHECK(arg);
|
|
369 chunk = GetUserBegin(chunk);
|
|
370 LsanMetadata m(chunk);
|
|
371 if (m.allocated() && m.tag() == kIgnored) {
|
|
372 LOG_POINTERS("Ignored: chunk %p-%p of size %zu.\n",
|
|
373 chunk, chunk + m.requested_size(), m.requested_size());
|
|
374 reinterpret_cast<Frontier *>(arg)->push_back(chunk);
|
|
375 }
|
|
376 }
|
|
377
|
|
378 static uptr GetCallerPC(u32 stack_id, StackDepotReverseMap *map) {
|
|
379 CHECK(stack_id);
|
|
380 StackTrace stack = map->Get(stack_id);
|
|
381 // The top frame is our malloc/calloc/etc. The next frame is the caller.
|
|
382 if (stack.size >= 2)
|
|
383 return stack.trace[1];
|
|
384 return 0;
|
|
385 }
|
|
386
|
|
387 struct InvalidPCParam {
|
|
388 Frontier *frontier;
|
|
389 StackDepotReverseMap *stack_depot_reverse_map;
|
|
390 bool skip_linker_allocations;
|
|
391 };
|
|
392
|
|
393 // ForEachChunk callback. If the caller pc is invalid or is within the linker,
|
|
394 // mark as reachable. Called by ProcessPlatformSpecificAllocations.
|
|
395 static void MarkInvalidPCCb(uptr chunk, void *arg) {
|
|
396 CHECK(arg);
|
|
397 InvalidPCParam *param = reinterpret_cast<InvalidPCParam *>(arg);
|
|
398 chunk = GetUserBegin(chunk);
|
|
399 LsanMetadata m(chunk);
|
|
400 if (m.allocated() && m.tag() != kReachable && m.tag() != kIgnored) {
|
|
401 u32 stack_id = m.stack_trace_id();
|
|
402 uptr caller_pc = 0;
|
|
403 if (stack_id > 0)
|
|
404 caller_pc = GetCallerPC(stack_id, param->stack_depot_reverse_map);
|
|
405 // If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark
|
|
406 // it as reachable, as we can't properly report its allocation stack anyway.
|
|
407 if (caller_pc == 0 || (param->skip_linker_allocations &&
|
|
408 GetLinker()->containsAddress(caller_pc))) {
|
|
409 m.set_tag(kReachable);
|
|
410 param->frontier->push_back(chunk);
|
|
411 }
|
|
412 }
|
|
413 }
|
|
414
|
|
415 // On Linux, treats all chunks allocated from ld-linux.so as reachable, which
|
|
416 // covers dynamically allocated TLS blocks, internal dynamic loader's loaded
|
|
417 // modules accounting etc.
|
|
418 // Dynamic TLS blocks contain the TLS variables of dynamically loaded modules.
|
|
419 // They are allocated with a __libc_memalign() call in allocate_and_init()
|
|
420 // (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those
|
|
421 // blocks, but we can make sure they come from our own allocator by intercepting
|
|
422 // __libc_memalign(). On top of that, there is no easy way to reach them. Their
|
|
423 // addresses are stored in a dynamically allocated array (the DTV) which is
|
|
424 // referenced from the static TLS. Unfortunately, we can't just rely on the DTV
|
|
425 // being reachable from the static TLS, and the dynamic TLS being reachable from
|
|
426 // the DTV. This is because the initial DTV is allocated before our interception
|
|
427 // mechanism kicks in, and thus we don't recognize it as allocated memory. We
|
|
428 // can't special-case it either, since we don't know its size.
|
|
429 // Our solution is to include in the root set all allocations made from
|
|
430 // ld-linux.so (which is where allocate_and_init() is implemented). This is
|
|
431 // guaranteed to include all dynamic TLS blocks (and possibly other allocations
|
|
432 // which we don't care about).
|
|
433 // On all other platforms, this simply checks to ensure that the caller pc is
|
|
434 // valid before reporting chunks as leaked.
|
|
435 void ProcessPC(Frontier *frontier) {
|
|
436 StackDepotReverseMap stack_depot_reverse_map;
|
|
437 InvalidPCParam arg;
|
|
438 arg.frontier = frontier;
|
|
439 arg.stack_depot_reverse_map = &stack_depot_reverse_map;
|
|
440 arg.skip_linker_allocations =
|
|
441 flags()->use_tls && flags()->use_ld_allocations && GetLinker() != nullptr;
|
|
442 ForEachChunk(MarkInvalidPCCb, &arg);
|
|
443 }
|
|
444
|
|
445 // Sets the appropriate tag on each chunk.
|
|
446 static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) {
|
|
447 // Holds the flood fill frontier.
|
|
448 Frontier frontier;
|
|
449
|
|
450 ForEachChunk(CollectIgnoredCb, &frontier);
|
|
451 ProcessGlobalRegions(&frontier);
|
|
452 ProcessThreads(suspended_threads, &frontier);
|
|
453 ProcessRootRegions(&frontier);
|
|
454 FloodFillTag(&frontier, kReachable);
|
|
455
|
|
456 CHECK_EQ(0, frontier.size());
|
|
457 ProcessPC(&frontier);
|
|
458
|
|
459 // The check here is relatively expensive, so we do this in a separate flood
|
|
460 // fill. That way we can skip the check for chunks that are reachable
|
|
461 // otherwise.
|
|
462 LOG_POINTERS("Processing platform-specific allocations.\n");
|
|
463 ProcessPlatformSpecificAllocations(&frontier);
|
|
464 FloodFillTag(&frontier, kReachable);
|
|
465
|
|
466 // Iterate over leaked chunks and mark those that are reachable from other
|
|
467 // leaked chunks.
|
|
468 LOG_POINTERS("Scanning leaked chunks.\n");
|
|
469 ForEachChunk(MarkIndirectlyLeakedCb, nullptr);
|
|
470 }
|
|
471
|
|
472 // ForEachChunk callback. Resets the tags to pre-leak-check state.
|
|
473 static void ResetTagsCb(uptr chunk, void *arg) {
|
|
474 (void)arg;
|
|
475 chunk = GetUserBegin(chunk);
|
|
476 LsanMetadata m(chunk);
|
|
477 if (m.allocated() && m.tag() != kIgnored)
|
|
478 m.set_tag(kDirectlyLeaked);
|
|
479 }
|
|
480
|
|
481 static void PrintStackTraceById(u32 stack_trace_id) {
|
|
482 CHECK(stack_trace_id);
|
|
483 StackDepotGet(stack_trace_id).Print();
|
|
484 }
|
|
485
|
|
486 // ForEachChunk callback. Aggregates information about unreachable chunks into
|
|
487 // a LeakReport.
|
|
488 static void CollectLeaksCb(uptr chunk, void *arg) {
|
|
489 CHECK(arg);
|
|
490 LeakReport *leak_report = reinterpret_cast<LeakReport *>(arg);
|
|
491 chunk = GetUserBegin(chunk);
|
|
492 LsanMetadata m(chunk);
|
|
493 if (!m.allocated()) return;
|
|
494 if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) {
|
|
495 u32 resolution = flags()->resolution;
|
|
496 u32 stack_trace_id = 0;
|
|
497 if (resolution > 0) {
|
|
498 StackTrace stack = StackDepotGet(m.stack_trace_id());
|
|
499 stack.size = Min(stack.size, resolution);
|
|
500 stack_trace_id = StackDepotPut(stack);
|
|
501 } else {
|
|
502 stack_trace_id = m.stack_trace_id();
|
|
503 }
|
|
504 leak_report->AddLeakedChunk(chunk, stack_trace_id, m.requested_size(),
|
|
505 m.tag());
|
|
506 }
|
|
507 }
|
|
508
|
|
509 static void PrintMatchedSuppressions() {
|
|
510 InternalMmapVector<Suppression *> matched;
|
|
511 GetSuppressionContext()->GetMatched(&matched);
|
|
512 if (!matched.size())
|
|
513 return;
|
|
514 const char *line = "-----------------------------------------------------";
|
|
515 Printf("%s\n", line);
|
|
516 Printf("Suppressions used:\n");
|
|
517 Printf(" count bytes template\n");
|
|
518 for (uptr i = 0; i < matched.size(); i++)
|
|
519 Printf("%7zu %10zu %s\n", static_cast<uptr>(atomic_load_relaxed(
|
|
520 &matched[i]->hit_count)), matched[i]->weight, matched[i]->templ);
|
|
521 Printf("%s\n\n", line);
|
|
522 }
|
|
523
|
|
524 struct CheckForLeaksParam {
|
|
525 bool success;
|
|
526 LeakReport leak_report;
|
|
527 };
|
|
528
|
|
529 static void ReportIfNotSuspended(ThreadContextBase *tctx, void *arg) {
|
|
530 const InternalMmapVector<tid_t> &suspended_threads =
|
|
531 *(const InternalMmapVector<tid_t> *)arg;
|
|
532 if (tctx->status == ThreadStatusRunning) {
|
|
533 uptr i = InternalLowerBound(suspended_threads, 0, suspended_threads.size(),
|
|
534 tctx->os_id, CompareLess<int>());
|
|
535 if (i >= suspended_threads.size() || suspended_threads[i] != tctx->os_id)
|
|
536 Report("Running thread %d was not suspended. False leaks are possible.\n",
|
|
537 tctx->os_id);
|
|
538 }
|
|
539 }
|
|
540
|
|
541 static void ReportUnsuspendedThreads(
|
|
542 const SuspendedThreadsList &suspended_threads) {
|
|
543 InternalMmapVector<tid_t> threads(suspended_threads.ThreadCount());
|
|
544 for (uptr i = 0; i < suspended_threads.ThreadCount(); ++i)
|
|
545 threads[i] = suspended_threads.GetThreadID(i);
|
|
546
|
|
547 Sort(threads.data(), threads.size());
|
|
548
|
|
549 GetThreadRegistryLocked()->RunCallbackForEachThreadLocked(
|
|
550 &ReportIfNotSuspended, &threads);
|
|
551 }
|
|
552
|
|
553 static void CheckForLeaksCallback(const SuspendedThreadsList &suspended_threads,
|
|
554 void *arg) {
|
|
555 CheckForLeaksParam *param = reinterpret_cast<CheckForLeaksParam *>(arg);
|
|
556 CHECK(param);
|
|
557 CHECK(!param->success);
|
|
558 ReportUnsuspendedThreads(suspended_threads);
|
|
559 ClassifyAllChunks(suspended_threads);
|
|
560 ForEachChunk(CollectLeaksCb, ¶m->leak_report);
|
|
561 // Clean up for subsequent leak checks. This assumes we did not overwrite any
|
|
562 // kIgnored tags.
|
|
563 ForEachChunk(ResetTagsCb, nullptr);
|
|
564 param->success = true;
|
|
565 }
|
|
566
|
|
567 static bool CheckForLeaks() {
|
|
568 if (&__lsan_is_turned_off && __lsan_is_turned_off())
|
|
569 return false;
|
|
570 EnsureMainThreadIDIsCorrect();
|
|
571 CheckForLeaksParam param;
|
|
572 param.success = false;
|
|
573 LockStuffAndStopTheWorld(CheckForLeaksCallback, ¶m);
|
|
574
|
|
575 if (!param.success) {
|
|
576 Report("LeakSanitizer has encountered a fatal error.\n");
|
|
577 Report(
|
|
578 "HINT: For debugging, try setting environment variable "
|
|
579 "LSAN_OPTIONS=verbosity=1:log_threads=1\n");
|
|
580 Report(
|
|
581 "HINT: LeakSanitizer does not work under ptrace (strace, gdb, etc)\n");
|
|
582 Die();
|
|
583 }
|
|
584 param.leak_report.ApplySuppressions();
|
|
585 uptr unsuppressed_count = param.leak_report.UnsuppressedLeakCount();
|
|
586 if (unsuppressed_count > 0) {
|
|
587 Decorator d;
|
|
588 Printf("\n"
|
|
589 "================================================================="
|
|
590 "\n");
|
|
591 Printf("%s", d.Error());
|
|
592 Report("ERROR: LeakSanitizer: detected memory leaks\n");
|
|
593 Printf("%s", d.Default());
|
|
594 param.leak_report.ReportTopLeaks(flags()->max_leaks);
|
|
595 }
|
|
596 if (common_flags()->print_suppressions)
|
|
597 PrintMatchedSuppressions();
|
|
598 if (unsuppressed_count > 0) {
|
|
599 param.leak_report.PrintSummary();
|
|
600 return true;
|
|
601 }
|
|
602 return false;
|
|
603 }
|
|
604
|
|
605 static bool has_reported_leaks = false;
|
|
606 bool HasReportedLeaks() { return has_reported_leaks; }
|
|
607
|
|
608 void DoLeakCheck() {
|
|
609 BlockingMutexLock l(&global_mutex);
|
|
610 static bool already_done;
|
|
611 if (already_done) return;
|
|
612 already_done = true;
|
|
613 has_reported_leaks = CheckForLeaks();
|
|
614 if (has_reported_leaks) HandleLeaks();
|
|
615 }
|
|
616
|
|
617 static int DoRecoverableLeakCheck() {
|
|
618 BlockingMutexLock l(&global_mutex);
|
|
619 bool have_leaks = CheckForLeaks();
|
|
620 return have_leaks ? 1 : 0;
|
|
621 }
|
|
622
|
|
623 void DoRecoverableLeakCheckVoid() { DoRecoverableLeakCheck(); }
|
|
624
|
|
625 static Suppression *GetSuppressionForAddr(uptr addr) {
|
|
626 Suppression *s = nullptr;
|
|
627
|
|
628 // Suppress by module name.
|
|
629 SuppressionContext *suppressions = GetSuppressionContext();
|
|
630 if (const char *module_name =
|
|
631 Symbolizer::GetOrInit()->GetModuleNameForPc(addr))
|
|
632 if (suppressions->Match(module_name, kSuppressionLeak, &s))
|
|
633 return s;
|
|
634
|
|
635 // Suppress by file or function name.
|
|
636 SymbolizedStack *frames = Symbolizer::GetOrInit()->SymbolizePC(addr);
|
|
637 for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
|
|
638 if (suppressions->Match(cur->info.function, kSuppressionLeak, &s) ||
|
|
639 suppressions->Match(cur->info.file, kSuppressionLeak, &s)) {
|
|
640 break;
|
|
641 }
|
|
642 }
|
|
643 frames->ClearAll();
|
|
644 return s;
|
|
645 }
|
|
646
|
|
647 static Suppression *GetSuppressionForStack(u32 stack_trace_id) {
|
|
648 StackTrace stack = StackDepotGet(stack_trace_id);
|
|
649 for (uptr i = 0; i < stack.size; i++) {
|
|
650 Suppression *s = GetSuppressionForAddr(
|
|
651 StackTrace::GetPreviousInstructionPc(stack.trace[i]));
|
|
652 if (s) return s;
|
|
653 }
|
|
654 return nullptr;
|
|
655 }
|
|
656
|
|
657 ///// LeakReport implementation. /////
|
|
658
|
|
659 // A hard limit on the number of distinct leaks, to avoid quadratic complexity
|
|
660 // in LeakReport::AddLeakedChunk(). We don't expect to ever see this many leaks
|
|
661 // in real-world applications.
|
|
662 // FIXME: Get rid of this limit by changing the implementation of LeakReport to
|
|
663 // use a hash table.
|
|
664 const uptr kMaxLeaksConsidered = 5000;
|
|
665
|
|
666 void LeakReport::AddLeakedChunk(uptr chunk, u32 stack_trace_id,
|
|
667 uptr leaked_size, ChunkTag tag) {
|
|
668 CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked);
|
|
669 bool is_directly_leaked = (tag == kDirectlyLeaked);
|
|
670 uptr i;
|
|
671 for (i = 0; i < leaks_.size(); i++) {
|
|
672 if (leaks_[i].stack_trace_id == stack_trace_id &&
|
|
673 leaks_[i].is_directly_leaked == is_directly_leaked) {
|
|
674 leaks_[i].hit_count++;
|
|
675 leaks_[i].total_size += leaked_size;
|
|
676 break;
|
|
677 }
|
|
678 }
|
|
679 if (i == leaks_.size()) {
|
|
680 if (leaks_.size() == kMaxLeaksConsidered) return;
|
|
681 Leak leak = { next_id_++, /* hit_count */ 1, leaked_size, stack_trace_id,
|
|
682 is_directly_leaked, /* is_suppressed */ false };
|
|
683 leaks_.push_back(leak);
|
|
684 }
|
|
685 if (flags()->report_objects) {
|
|
686 LeakedObject obj = {leaks_[i].id, chunk, leaked_size};
|
|
687 leaked_objects_.push_back(obj);
|
|
688 }
|
|
689 }
|
|
690
|
|
691 static bool LeakComparator(const Leak &leak1, const Leak &leak2) {
|
|
692 if (leak1.is_directly_leaked == leak2.is_directly_leaked)
|
|
693 return leak1.total_size > leak2.total_size;
|
|
694 else
|
|
695 return leak1.is_directly_leaked;
|
|
696 }
|
|
697
|
|
698 void LeakReport::ReportTopLeaks(uptr num_leaks_to_report) {
|
|
699 CHECK(leaks_.size() <= kMaxLeaksConsidered);
|
|
700 Printf("\n");
|
|
701 if (leaks_.size() == kMaxLeaksConsidered)
|
|
702 Printf("Too many leaks! Only the first %zu leaks encountered will be "
|
|
703 "reported.\n",
|
|
704 kMaxLeaksConsidered);
|
|
705
|
|
706 uptr unsuppressed_count = UnsuppressedLeakCount();
|
|
707 if (num_leaks_to_report > 0 && num_leaks_to_report < unsuppressed_count)
|
|
708 Printf("The %zu top leak(s):\n", num_leaks_to_report);
|
|
709 Sort(leaks_.data(), leaks_.size(), &LeakComparator);
|
|
710 uptr leaks_reported = 0;
|
|
711 for (uptr i = 0; i < leaks_.size(); i++) {
|
|
712 if (leaks_[i].is_suppressed) continue;
|
|
713 PrintReportForLeak(i);
|
|
714 leaks_reported++;
|
|
715 if (leaks_reported == num_leaks_to_report) break;
|
|
716 }
|
|
717 if (leaks_reported < unsuppressed_count) {
|
|
718 uptr remaining = unsuppressed_count - leaks_reported;
|
|
719 Printf("Omitting %zu more leak(s).\n", remaining);
|
|
720 }
|
|
721 }
|
|
722
|
|
723 void LeakReport::PrintReportForLeak(uptr index) {
|
|
724 Decorator d;
|
|
725 Printf("%s", d.Leak());
|
|
726 Printf("%s leak of %zu byte(s) in %zu object(s) allocated from:\n",
|
|
727 leaks_[index].is_directly_leaked ? "Direct" : "Indirect",
|
|
728 leaks_[index].total_size, leaks_[index].hit_count);
|
|
729 Printf("%s", d.Default());
|
|
730
|
|
731 PrintStackTraceById(leaks_[index].stack_trace_id);
|
|
732
|
|
733 if (flags()->report_objects) {
|
|
734 Printf("Objects leaked above:\n");
|
|
735 PrintLeakedObjectsForLeak(index);
|
|
736 Printf("\n");
|
|
737 }
|
|
738 }
|
|
739
|
|
740 void LeakReport::PrintLeakedObjectsForLeak(uptr index) {
|
|
741 u32 leak_id = leaks_[index].id;
|
|
742 for (uptr j = 0; j < leaked_objects_.size(); j++) {
|
|
743 if (leaked_objects_[j].leak_id == leak_id)
|
|
744 Printf("%p (%zu bytes)\n", leaked_objects_[j].addr,
|
|
745 leaked_objects_[j].size);
|
|
746 }
|
|
747 }
|
|
748
|
|
749 void LeakReport::PrintSummary() {
|
|
750 CHECK(leaks_.size() <= kMaxLeaksConsidered);
|
|
751 uptr bytes = 0, allocations = 0;
|
|
752 for (uptr i = 0; i < leaks_.size(); i++) {
|
|
753 if (leaks_[i].is_suppressed) continue;
|
|
754 bytes += leaks_[i].total_size;
|
|
755 allocations += leaks_[i].hit_count;
|
|
756 }
|
|
757 InternalScopedString summary(kMaxSummaryLength);
|
|
758 summary.append("%zu byte(s) leaked in %zu allocation(s).", bytes,
|
|
759 allocations);
|
|
760 ReportErrorSummary(summary.data());
|
|
761 }
|
|
762
|
|
763 void LeakReport::ApplySuppressions() {
|
|
764 for (uptr i = 0; i < leaks_.size(); i++) {
|
|
765 Suppression *s = GetSuppressionForStack(leaks_[i].stack_trace_id);
|
|
766 if (s) {
|
|
767 s->weight += leaks_[i].total_size;
|
|
768 atomic_store_relaxed(&s->hit_count, atomic_load_relaxed(&s->hit_count) +
|
|
769 leaks_[i].hit_count);
|
|
770 leaks_[i].is_suppressed = true;
|
|
771 }
|
|
772 }
|
|
773 }
|
|
774
|
|
775 uptr LeakReport::UnsuppressedLeakCount() {
|
|
776 uptr result = 0;
|
|
777 for (uptr i = 0; i < leaks_.size(); i++)
|
|
778 if (!leaks_[i].is_suppressed) result++;
|
|
779 return result;
|
|
780 }
|
|
781
|
|
782 } // namespace __lsan
|
|
783 #else // CAN_SANITIZE_LEAKS
|
|
784 namespace __lsan {
|
|
785 void InitCommonLsan() { }
|
|
786 void DoLeakCheck() { }
|
|
787 void DoRecoverableLeakCheckVoid() { }
|
|
788 void DisableInThisThread() { }
|
|
789 void EnableInThisThread() { }
|
|
790 }
|
|
791 #endif // CAN_SANITIZE_LEAKS
|
|
792
|
|
793 using namespace __lsan;
|
|
794
|
|
795 extern "C" {
|
|
796 SANITIZER_INTERFACE_ATTRIBUTE
|
|
797 void __lsan_ignore_object(const void *p) {
|
|
798 #if CAN_SANITIZE_LEAKS
|
|
799 if (!common_flags()->detect_leaks)
|
|
800 return;
|
|
801 // Cannot use PointsIntoChunk or LsanMetadata here, since the allocator is not
|
|
802 // locked.
|
|
803 BlockingMutexLock l(&global_mutex);
|
|
804 IgnoreObjectResult res = IgnoreObjectLocked(p);
|
|
805 if (res == kIgnoreObjectInvalid)
|
|
806 VReport(1, "__lsan_ignore_object(): no heap object found at %p", p);
|
|
807 if (res == kIgnoreObjectAlreadyIgnored)
|
|
808 VReport(1, "__lsan_ignore_object(): "
|
|
809 "heap object at %p is already being ignored\n", p);
|
|
810 if (res == kIgnoreObjectSuccess)
|
|
811 VReport(1, "__lsan_ignore_object(): ignoring heap object at %p\n", p);
|
|
812 #endif // CAN_SANITIZE_LEAKS
|
|
813 }
|
|
814
|
|
815 SANITIZER_INTERFACE_ATTRIBUTE
|
|
816 void __lsan_register_root_region(const void *begin, uptr size) {
|
|
817 #if CAN_SANITIZE_LEAKS
|
|
818 BlockingMutexLock l(&global_mutex);
|
|
819 CHECK(root_regions);
|
|
820 RootRegion region = {reinterpret_cast<uptr>(begin), size};
|
|
821 root_regions->push_back(region);
|
|
822 VReport(1, "Registered root region at %p of size %llu\n", begin, size);
|
|
823 #endif // CAN_SANITIZE_LEAKS
|
|
824 }
|
|
825
|
|
826 SANITIZER_INTERFACE_ATTRIBUTE
|
|
827 void __lsan_unregister_root_region(const void *begin, uptr size) {
|
|
828 #if CAN_SANITIZE_LEAKS
|
|
829 BlockingMutexLock l(&global_mutex);
|
|
830 CHECK(root_regions);
|
|
831 bool removed = false;
|
|
832 for (uptr i = 0; i < root_regions->size(); i++) {
|
|
833 RootRegion region = (*root_regions)[i];
|
|
834 if (region.begin == reinterpret_cast<uptr>(begin) && region.size == size) {
|
|
835 removed = true;
|
|
836 uptr last_index = root_regions->size() - 1;
|
|
837 (*root_regions)[i] = (*root_regions)[last_index];
|
|
838 root_regions->pop_back();
|
|
839 VReport(1, "Unregistered root region at %p of size %llu\n", begin, size);
|
|
840 break;
|
|
841 }
|
|
842 }
|
|
843 if (!removed) {
|
|
844 Report(
|
|
845 "__lsan_unregister_root_region(): region at %p of size %llu has not "
|
|
846 "been registered.\n",
|
|
847 begin, size);
|
|
848 Die();
|
|
849 }
|
|
850 #endif // CAN_SANITIZE_LEAKS
|
|
851 }
|
|
852
|
|
853 SANITIZER_INTERFACE_ATTRIBUTE
|
|
854 void __lsan_disable() {
|
|
855 #if CAN_SANITIZE_LEAKS
|
|
856 __lsan::DisableInThisThread();
|
|
857 #endif
|
|
858 }
|
|
859
|
|
860 SANITIZER_INTERFACE_ATTRIBUTE
|
|
861 void __lsan_enable() {
|
|
862 #if CAN_SANITIZE_LEAKS
|
|
863 __lsan::EnableInThisThread();
|
|
864 #endif
|
|
865 }
|
|
866
|
|
867 SANITIZER_INTERFACE_ATTRIBUTE
|
|
868 void __lsan_do_leak_check() {
|
|
869 #if CAN_SANITIZE_LEAKS
|
|
870 if (common_flags()->detect_leaks)
|
|
871 __lsan::DoLeakCheck();
|
|
872 #endif // CAN_SANITIZE_LEAKS
|
|
873 }
|
|
874
|
|
875 SANITIZER_INTERFACE_ATTRIBUTE
|
|
876 int __lsan_do_recoverable_leak_check() {
|
|
877 #if CAN_SANITIZE_LEAKS
|
|
878 if (common_flags()->detect_leaks)
|
|
879 return __lsan::DoRecoverableLeakCheck();
|
|
880 #endif // CAN_SANITIZE_LEAKS
|
|
881 return 0;
|
|
882 }
|
|
883
|
|
884 #if !SANITIZER_SUPPORTS_WEAK_HOOKS
|
|
885 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
|
886 const char * __lsan_default_options() {
|
|
887 return "";
|
|
888 }
|
|
889
|
|
890 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
|
891 int __lsan_is_turned_off() {
|
|
892 return 0;
|
|
893 }
|
|
894
|
|
895 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
|
896 const char *__lsan_default_suppressions() {
|
|
897 return "";
|
|
898 }
|
|
899 #endif
|
|
900 } // extern "C"
|