111
|
1 //=-- lsan_common.h -------------------------------------------------------===//
|
|
2 //
|
|
3 // This file is distributed under the University of Illinois Open Source
|
|
4 // License. See LICENSE.TXT for details.
|
|
5 //
|
|
6 //===----------------------------------------------------------------------===//
|
|
7 //
|
|
8 // This file is a part of LeakSanitizer.
|
|
9 // Private LSan header.
|
|
10 //
|
|
11 //===----------------------------------------------------------------------===//
|
|
12
|
|
13 #ifndef LSAN_COMMON_H
|
|
14 #define LSAN_COMMON_H
|
|
15
|
|
16 #include "sanitizer_common/sanitizer_allocator.h"
|
|
17 #include "sanitizer_common/sanitizer_common.h"
|
|
18 #include "sanitizer_common/sanitizer_internal_defs.h"
|
|
19 #include "sanitizer_common/sanitizer_platform.h"
|
|
20 #include "sanitizer_common/sanitizer_stoptheworld.h"
|
|
21 #include "sanitizer_common/sanitizer_symbolizer.h"
|
|
22
|
|
23 // LeakSanitizer relies on some Glibc's internals (e.g. TLS machinery) thus
|
|
24 // supported for Linux only. Also, LSan doesn't like 32 bit architectures
|
|
25 // because of "small" (4 bytes) pointer size that leads to high false negative
|
|
26 // ratio on large leaks. But we still want to have it for some 32 bit arches
|
|
27 // (e.g. x86), see https://github.com/google/sanitizers/issues/403.
|
|
28 // To enable LeakSanitizer on new architecture, one need to implement
|
|
29 // internal_clone function as well as (probably) adjust TLS machinery for
|
|
30 // new architecture inside sanitizer library.
|
|
31 #if (SANITIZER_LINUX && !SANITIZER_ANDROID || SANITIZER_MAC) && \
|
|
32 (SANITIZER_WORDSIZE == 64) && \
|
|
33 (defined(__x86_64__) || defined(__mips64) || defined(__aarch64__) || \
|
|
34 defined(__powerpc64__))
|
|
35 #define CAN_SANITIZE_LEAKS 1
|
|
36 #elif defined(__i386__) && \
|
|
37 (SANITIZER_LINUX && !SANITIZER_ANDROID || SANITIZER_MAC)
|
|
38 #define CAN_SANITIZE_LEAKS 1
|
|
39 #elif defined(__arm__) && \
|
|
40 SANITIZER_LINUX && !SANITIZER_ANDROID
|
|
41 #define CAN_SANITIZE_LEAKS 1
|
|
42 #else
|
|
43 #define CAN_SANITIZE_LEAKS 0
|
|
44 #endif
|
|
45
|
|
46 namespace __sanitizer {
|
|
47 class FlagParser;
|
|
48 struct DTLS;
|
|
49 }
|
|
50
|
|
51 namespace __lsan {
|
|
52
|
|
53 // Chunk tags.
|
|
54 enum ChunkTag {
|
|
55 kDirectlyLeaked = 0, // default
|
|
56 kIndirectlyLeaked = 1,
|
|
57 kReachable = 2,
|
|
58 kIgnored = 3
|
|
59 };
|
|
60
|
|
61 const u32 kInvalidTid = (u32) -1;
|
|
62
|
|
63 struct Flags {
|
|
64 #define LSAN_FLAG(Type, Name, DefaultValue, Description) Type Name;
|
|
65 #include "lsan_flags.inc"
|
|
66 #undef LSAN_FLAG
|
|
67
|
|
68 void SetDefaults();
|
|
69 uptr pointer_alignment() const {
|
|
70 return use_unaligned ? 1 : sizeof(uptr);
|
|
71 }
|
|
72 };
|
|
73
|
|
74 extern Flags lsan_flags;
|
|
75 inline Flags *flags() { return &lsan_flags; }
|
|
76 void RegisterLsanFlags(FlagParser *parser, Flags *f);
|
|
77
|
|
78 struct Leak {
|
|
79 u32 id;
|
|
80 uptr hit_count;
|
|
81 uptr total_size;
|
|
82 u32 stack_trace_id;
|
|
83 bool is_directly_leaked;
|
|
84 bool is_suppressed;
|
|
85 };
|
|
86
|
|
87 struct LeakedObject {
|
|
88 u32 leak_id;
|
|
89 uptr addr;
|
|
90 uptr size;
|
|
91 };
|
|
92
|
|
93 // Aggregates leaks by stack trace prefix.
|
|
94 class LeakReport {
|
|
95 public:
|
|
96 LeakReport() : next_id_(0), leaks_(1), leaked_objects_(1) {}
|
|
97 void AddLeakedChunk(uptr chunk, u32 stack_trace_id, uptr leaked_size,
|
|
98 ChunkTag tag);
|
|
99 void ReportTopLeaks(uptr max_leaks);
|
|
100 void PrintSummary();
|
|
101 void ApplySuppressions();
|
|
102 uptr UnsuppressedLeakCount();
|
|
103
|
|
104
|
|
105 private:
|
|
106 void PrintReportForLeak(uptr index);
|
|
107 void PrintLeakedObjectsForLeak(uptr index);
|
|
108
|
|
109 u32 next_id_;
|
|
110 InternalMmapVector<Leak> leaks_;
|
|
111 InternalMmapVector<LeakedObject> leaked_objects_;
|
|
112 };
|
|
113
|
|
114 typedef InternalMmapVector<uptr> Frontier;
|
|
115
|
|
116 // Platform-specific functions.
|
|
117 void InitializePlatformSpecificModules();
|
|
118 void ProcessGlobalRegions(Frontier *frontier);
|
|
119 void ProcessPlatformSpecificAllocations(Frontier *frontier);
|
|
120
|
|
121 struct RootRegion {
|
|
122 uptr begin;
|
|
123 uptr size;
|
|
124 };
|
|
125
|
|
126 InternalMmapVector<RootRegion> const *GetRootRegions();
|
|
127 void ScanRootRegion(Frontier *frontier, RootRegion const ®ion,
|
|
128 uptr region_begin, uptr region_end, bool is_readable);
|
|
129 // Run stoptheworld while holding any platform-specific locks.
|
|
130 void DoStopTheWorld(StopTheWorldCallback callback, void* argument);
|
|
131
|
|
132 void ScanRangeForPointers(uptr begin, uptr end,
|
|
133 Frontier *frontier,
|
|
134 const char *region_type, ChunkTag tag);
|
|
135 void ScanGlobalRange(uptr begin, uptr end, Frontier *frontier);
|
|
136
|
|
137 enum IgnoreObjectResult {
|
|
138 kIgnoreObjectSuccess,
|
|
139 kIgnoreObjectAlreadyIgnored,
|
|
140 kIgnoreObjectInvalid
|
|
141 };
|
|
142
|
|
143 // Functions called from the parent tool.
|
|
144 const char *MaybeCallLsanDefaultOptions();
|
|
145 void InitCommonLsan();
|
|
146 void DoLeakCheck();
|
|
147 void DoRecoverableLeakCheckVoid();
|
|
148 void DisableCounterUnderflow();
|
|
149 bool DisabledInThisThread();
|
|
150
|
|
151 // Used to implement __lsan::ScopedDisabler.
|
|
152 void DisableInThisThread();
|
|
153 void EnableInThisThread();
|
|
154 // Can be used to ignore memory allocated by an intercepted
|
|
155 // function.
|
|
156 struct ScopedInterceptorDisabler {
|
|
157 ScopedInterceptorDisabler() { DisableInThisThread(); }
|
|
158 ~ScopedInterceptorDisabler() { EnableInThisThread(); }
|
|
159 };
|
|
160
|
|
161 // According to Itanium C++ ABI array cookie is a one word containing
|
|
162 // size of allocated array.
|
|
163 static inline bool IsItaniumABIArrayCookie(uptr chunk_beg, uptr chunk_size,
|
|
164 uptr addr) {
|
|
165 return chunk_size == sizeof(uptr) && chunk_beg + chunk_size == addr &&
|
|
166 *reinterpret_cast<uptr *>(chunk_beg) == 0;
|
|
167 }
|
|
168
|
|
169 // According to ARM C++ ABI array cookie consists of two words:
|
|
170 // struct array_cookie {
|
|
171 // std::size_t element_size; // element_size != 0
|
|
172 // std::size_t element_count;
|
|
173 // };
|
|
174 static inline bool IsARMABIArrayCookie(uptr chunk_beg, uptr chunk_size,
|
|
175 uptr addr) {
|
|
176 return chunk_size == 2 * sizeof(uptr) && chunk_beg + chunk_size == addr &&
|
|
177 *reinterpret_cast<uptr *>(chunk_beg + sizeof(uptr)) == 0;
|
|
178 }
|
|
179
|
|
180 // Special case for "new T[0]" where T is a type with DTOR.
|
|
181 // new T[0] will allocate a cookie (one or two words) for the array size (0)
|
|
182 // and store a pointer to the end of allocated chunk. The actual cookie layout
|
|
183 // varies between platforms according to their C++ ABI implementation.
|
|
184 inline bool IsSpecialCaseOfOperatorNew0(uptr chunk_beg, uptr chunk_size,
|
|
185 uptr addr) {
|
|
186 #if defined(__arm__)
|
|
187 return IsARMABIArrayCookie(chunk_beg, chunk_size, addr);
|
|
188 #else
|
|
189 return IsItaniumABIArrayCookie(chunk_beg, chunk_size, addr);
|
|
190 #endif
|
|
191 }
|
|
192
|
|
193 // The following must be implemented in the parent tool.
|
|
194
|
|
195 void ForEachChunk(ForEachChunkCallback callback, void *arg);
|
|
196 // Returns the address range occupied by the global allocator object.
|
|
197 void GetAllocatorGlobalRange(uptr *begin, uptr *end);
|
|
198 // Wrappers for allocator's ForceLock()/ForceUnlock().
|
|
199 void LockAllocator();
|
|
200 void UnlockAllocator();
|
|
201 // Returns true if [addr, addr + sizeof(void *)) is poisoned.
|
|
202 bool WordIsPoisoned(uptr addr);
|
|
203 // Wrappers for ThreadRegistry access.
|
|
204 void LockThreadRegistry();
|
|
205 void UnlockThreadRegistry();
|
|
206 bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end,
|
|
207 uptr *tls_begin, uptr *tls_end, uptr *cache_begin,
|
|
208 uptr *cache_end, DTLS **dtls);
|
|
209 void ForEachExtraStackRange(tid_t os_id, RangeIteratorCallback callback,
|
|
210 void *arg);
|
|
211 // If called from the main thread, updates the main thread's TID in the thread
|
|
212 // registry. We need this to handle processes that fork() without a subsequent
|
|
213 // exec(), which invalidates the recorded TID. To update it, we must call
|
|
214 // gettid() from the main thread. Our solution is to call this function before
|
|
215 // leak checking and also before every call to pthread_create() (to handle cases
|
|
216 // where leak checking is initiated from a non-main thread).
|
|
217 void EnsureMainThreadIDIsCorrect();
|
|
218 // If p points into a chunk that has been allocated to the user, returns its
|
|
219 // user-visible address. Otherwise, returns 0.
|
|
220 uptr PointsIntoChunk(void *p);
|
|
221 // Returns address of user-visible chunk contained in this allocator chunk.
|
|
222 uptr GetUserBegin(uptr chunk);
|
|
223 // Helper for __lsan_ignore_object().
|
|
224 IgnoreObjectResult IgnoreObjectLocked(const void *p);
|
|
225
|
|
226 // Return the linker module, if valid for the platform.
|
|
227 LoadedModule *GetLinker();
|
|
228
|
|
229 // Return true if LSan has finished leak checking and reported leaks.
|
|
230 bool HasReportedLeaks();
|
|
231
|
|
232 // Run platform-specific leak handlers.
|
|
233 void HandleLeaks();
|
|
234
|
|
235 // Wrapper for chunk metadata operations.
|
|
236 class LsanMetadata {
|
|
237 public:
|
|
238 // Constructor accepts address of user-visible chunk.
|
|
239 explicit LsanMetadata(uptr chunk);
|
|
240 bool allocated() const;
|
|
241 ChunkTag tag() const;
|
|
242 void set_tag(ChunkTag value);
|
|
243 uptr requested_size() const;
|
|
244 u32 stack_trace_id() const;
|
|
245 private:
|
|
246 void *metadata_;
|
|
247 };
|
|
248
|
|
249 } // namespace __lsan
|
|
250
|
|
251 extern "C" {
|
|
252 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
|
253 const char *__lsan_default_options();
|
|
254
|
|
255 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
|
256 int __lsan_is_turned_off();
|
|
257
|
|
258 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
|
259 const char *__lsan_default_suppressions();
|
|
260 } // extern "C"
|
|
261
|
|
262 #endif // LSAN_COMMON_H
|