145
|
1 //===-- tsan_platform_posix.cpp -------------------------------------------===//
|
|
2 //
|
|
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
4 // See https://llvm.org/LICENSE.txt for license information.
|
|
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
6 //
|
|
7 //===----------------------------------------------------------------------===//
|
|
8 //
|
|
9 // This file is a part of ThreadSanitizer (TSan), a race detector.
|
|
10 //
|
|
11 // POSIX-specific code.
|
|
12 //===----------------------------------------------------------------------===//
|
|
13
|
|
14 #include "sanitizer_common/sanitizer_platform.h"
|
|
15 #if SANITIZER_POSIX
|
|
16
|
|
17 #include "sanitizer_common/sanitizer_common.h"
|
|
18 #include "sanitizer_common/sanitizer_errno.h"
|
|
19 #include "sanitizer_common/sanitizer_libc.h"
|
|
20 #include "sanitizer_common/sanitizer_procmaps.h"
|
|
21 #include "tsan_platform.h"
|
|
22 #include "tsan_rtl.h"
|
|
23
|
|
24 namespace __tsan {
|
|
25
|
|
26 static const char kShadowMemoryMappingWarning[] =
|
|
27 "FATAL: %s can not madvise shadow region [%zx, %zx] with %s (errno: %d)\n";
|
|
28 static const char kShadowMemoryMappingHint[] =
|
|
29 "HINT: if %s is not supported in your environment, you may set "
|
|
30 "TSAN_OPTIONS=%s=0\n";
|
|
31
|
|
32 static void NoHugePagesInShadow(uptr addr, uptr size) {
|
|
33 SetShadowRegionHugePageMode(addr, size);
|
|
34 }
|
|
35
|
|
36 static void DontDumpShadow(uptr addr, uptr size) {
|
|
37 if (common_flags()->use_madv_dontdump)
|
|
38 if (!DontDumpShadowMemory(addr, size)) {
|
|
39 Printf(kShadowMemoryMappingWarning, SanitizerToolName, addr, addr + size,
|
|
40 "MADV_DONTDUMP", errno);
|
|
41 Printf(kShadowMemoryMappingHint, "MADV_DONTDUMP", "use_madv_dontdump");
|
|
42 Die();
|
|
43 }
|
|
44 }
|
|
45
|
|
46 #if !SANITIZER_GO
|
|
47 void InitializeShadowMemory() {
|
|
48 // Map memory shadow.
|
|
49 if (!MmapFixedNoReserve(ShadowBeg(), ShadowEnd() - ShadowBeg(), "shadow")) {
|
|
50 Printf("FATAL: ThreadSanitizer can not mmap the shadow memory\n");
|
|
51 Printf("FATAL: Make sure to compile with -fPIE and to link with -pie.\n");
|
|
52 Die();
|
|
53 }
|
|
54 // This memory range is used for thread stacks and large user mmaps.
|
|
55 // Frequently a thread uses only a small part of stack and similarly
|
|
56 // a program uses a small part of large mmap. On some programs
|
|
57 // we see 20% memory usage reduction without huge pages for this range.
|
|
58 // FIXME: don't use constants here.
|
|
59 #if defined(__x86_64__)
|
|
60 const uptr kMadviseRangeBeg = 0x7f0000000000ull;
|
|
61 const uptr kMadviseRangeSize = 0x010000000000ull;
|
|
62 #elif defined(__mips64)
|
|
63 const uptr kMadviseRangeBeg = 0xff00000000ull;
|
|
64 const uptr kMadviseRangeSize = 0x0100000000ull;
|
|
65 #elif defined(__aarch64__) && defined(__APPLE__)
|
|
66 uptr kMadviseRangeBeg = LoAppMemBeg();
|
|
67 uptr kMadviseRangeSize = LoAppMemEnd() - LoAppMemBeg();
|
|
68 #elif defined(__aarch64__)
|
|
69 uptr kMadviseRangeBeg = 0;
|
|
70 uptr kMadviseRangeSize = 0;
|
|
71 if (vmaSize == 39) {
|
|
72 kMadviseRangeBeg = 0x7d00000000ull;
|
|
73 kMadviseRangeSize = 0x0300000000ull;
|
|
74 } else if (vmaSize == 42) {
|
|
75 kMadviseRangeBeg = 0x3f000000000ull;
|
|
76 kMadviseRangeSize = 0x01000000000ull;
|
|
77 } else {
|
|
78 DCHECK(0);
|
|
79 }
|
|
80 #elif defined(__powerpc64__)
|
|
81 uptr kMadviseRangeBeg = 0;
|
|
82 uptr kMadviseRangeSize = 0;
|
|
83 if (vmaSize == 44) {
|
|
84 kMadviseRangeBeg = 0x0f60000000ull;
|
|
85 kMadviseRangeSize = 0x0010000000ull;
|
|
86 } else if (vmaSize == 46) {
|
|
87 kMadviseRangeBeg = 0x3f0000000000ull;
|
|
88 kMadviseRangeSize = 0x010000000000ull;
|
|
89 } else {
|
|
90 DCHECK(0);
|
|
91 }
|
|
92 #endif
|
|
93 NoHugePagesInShadow(MemToShadow(kMadviseRangeBeg),
|
|
94 kMadviseRangeSize * kShadowMultiplier);
|
|
95 DontDumpShadow(ShadowBeg(), ShadowEnd() - ShadowBeg());
|
|
96 DPrintf("memory shadow: %zx-%zx (%zuGB)\n",
|
|
97 ShadowBeg(), ShadowEnd(),
|
|
98 (ShadowEnd() - ShadowBeg()) >> 30);
|
|
99
|
|
100 // Map meta shadow.
|
|
101 const uptr meta = MetaShadowBeg();
|
|
102 const uptr meta_size = MetaShadowEnd() - meta;
|
|
103 if (!MmapFixedNoReserve(meta, meta_size, "meta shadow")) {
|
|
104 Printf("FATAL: ThreadSanitizer can not mmap the shadow memory\n");
|
|
105 Printf("FATAL: Make sure to compile with -fPIE and to link with -pie.\n");
|
|
106 Die();
|
|
107 }
|
|
108 NoHugePagesInShadow(meta, meta_size);
|
|
109 DontDumpShadow(meta, meta_size);
|
|
110 DPrintf("meta shadow: %zx-%zx (%zuGB)\n",
|
|
111 meta, meta + meta_size, meta_size >> 30);
|
|
112
|
|
113 InitializeShadowMemoryPlatform();
|
|
114 }
|
|
115
|
|
116 static void ProtectRange(uptr beg, uptr end) {
|
|
117 CHECK_LE(beg, end);
|
|
118 if (beg == end)
|
|
119 return;
|
|
120 if (beg != (uptr)MmapFixedNoAccess(beg, end - beg)) {
|
|
121 Printf("FATAL: ThreadSanitizer can not protect [%zx,%zx]\n", beg, end);
|
|
122 Printf("FATAL: Make sure you are not using unlimited stack\n");
|
|
123 Die();
|
|
124 }
|
|
125 }
|
|
126
|
|
127 void CheckAndProtect() {
|
|
128 // Ensure that the binary is indeed compiled with -pie.
|
|
129 MemoryMappingLayout proc_maps(true);
|
|
130 MemoryMappedSegment segment;
|
|
131 while (proc_maps.Next(&segment)) {
|
|
132 if (IsAppMem(segment.start)) continue;
|
|
133 if (segment.start >= HeapMemEnd() && segment.start < HeapEnd()) continue;
|
|
134 if (segment.protection == 0) // Zero page or mprotected.
|
|
135 continue;
|
|
136 if (segment.start >= VdsoBeg()) // vdso
|
|
137 break;
|
|
138 Printf("FATAL: ThreadSanitizer: unexpected memory mapping %p-%p\n",
|
|
139 segment.start, segment.end);
|
|
140 Die();
|
|
141 }
|
|
142
|
|
143 #if defined(__aarch64__) && defined(__APPLE__)
|
|
144 ProtectRange(HeapMemEnd(), ShadowBeg());
|
|
145 ProtectRange(ShadowEnd(), MetaShadowBeg());
|
|
146 ProtectRange(MetaShadowEnd(), TraceMemBeg());
|
|
147 #else
|
|
148 ProtectRange(LoAppMemEnd(), ShadowBeg());
|
|
149 ProtectRange(ShadowEnd(), MetaShadowBeg());
|
|
150 #ifdef TSAN_MID_APP_RANGE
|
|
151 ProtectRange(MetaShadowEnd(), MidAppMemBeg());
|
|
152 ProtectRange(MidAppMemEnd(), TraceMemBeg());
|
|
153 #else
|
|
154 ProtectRange(MetaShadowEnd(), TraceMemBeg());
|
|
155 #endif
|
|
156 // Memory for traces is mapped lazily in MapThreadTrace.
|
|
157 // Protect the whole range for now, so that user does not map something here.
|
|
158 ProtectRange(TraceMemBeg(), TraceMemEnd());
|
|
159 ProtectRange(TraceMemEnd(), HeapMemBeg());
|
|
160 ProtectRange(HeapEnd(), HiAppMemBeg());
|
|
161 #endif
|
|
162 }
|
|
163 #endif
|
|
164
|
|
165 } // namespace __tsan
|
|
166
|
|
167 #endif // SANITIZER_POSIX
|