145
|
1 //===-- sanitizer_stoptheworld_netbsd_libcdep.cpp -------------------------===//
|
|
2 //
|
|
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
4 // See https://llvm.org/LICENSE.txt for license information.
|
|
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
6 //
|
|
7 //===----------------------------------------------------------------------===//
|
|
8 //
|
|
9 // See sanitizer_stoptheworld.h for details.
|
|
10 // This implementation was inspired by Markus Gutschke's linuxthreads.cc.
|
|
11 //
|
|
12 // This is a NetBSD variation of Linux stoptheworld implementation
|
|
13 // See sanitizer_stoptheworld_linux_libcdep.cpp for code comments.
|
|
14 //
|
|
15 //===----------------------------------------------------------------------===//
|
|
16
|
|
17 #include "sanitizer_platform.h"
|
|
18
|
|
19 #if SANITIZER_NETBSD
|
|
20
|
|
21 #include "sanitizer_stoptheworld.h"
|
|
22
|
|
23 #include "sanitizer_atomic.h"
|
|
24 #include "sanitizer_platform_limits_posix.h"
|
|
25
|
|
26 #include <sys/types.h>
|
|
27
|
|
28 #include <sys/ptrace.h>
|
|
29 #include <sys/uio.h>
|
|
30 #include <sys/wait.h>
|
|
31
|
|
32 #include <machine/reg.h>
|
|
33
|
|
34 #include <elf.h>
|
|
35 #include <errno.h>
|
|
36 #include <sched.h>
|
|
37 #include <signal.h>
|
|
38 #include <stddef.h>
|
|
39
|
|
40 #define internal_sigaction_norestorer internal_sigaction
|
|
41
|
|
42 #include "sanitizer_common.h"
|
|
43 #include "sanitizer_flags.h"
|
|
44 #include "sanitizer_libc.h"
|
|
45 #include "sanitizer_linux.h"
|
|
46 #include "sanitizer_mutex.h"
|
|
47 #include "sanitizer_placement_new.h"
|
|
48
|
|
49 namespace __sanitizer {
|
|
50
|
|
51 class SuspendedThreadsListNetBSD : public SuspendedThreadsList {
|
|
52 public:
|
|
53 SuspendedThreadsListNetBSD() { thread_ids_.reserve(1024); }
|
|
54
|
|
55 tid_t GetThreadID(uptr index) const;
|
|
56 uptr ThreadCount() const;
|
|
57 bool ContainsTid(tid_t thread_id) const;
|
|
58 void Append(tid_t tid);
|
|
59
|
|
60 PtraceRegistersStatus GetRegistersAndSP(uptr index, uptr *buffer,
|
|
61 uptr *sp) const;
|
|
62 uptr RegisterCount() const;
|
|
63
|
|
64 private:
|
|
65 InternalMmapVector<tid_t> thread_ids_;
|
|
66 };
|
|
67
|
|
68 struct TracerThreadArgument {
|
|
69 StopTheWorldCallback callback;
|
|
70 void *callback_argument;
|
|
71 BlockingMutex mutex;
|
|
72 atomic_uintptr_t done;
|
|
73 uptr parent_pid;
|
|
74 };
|
|
75
|
|
76 class ThreadSuspender {
|
|
77 public:
|
|
78 explicit ThreadSuspender(pid_t pid, TracerThreadArgument *arg)
|
|
79 : arg(arg), pid_(pid) {
|
|
80 CHECK_GE(pid, 0);
|
|
81 }
|
|
82 bool SuspendAllThreads();
|
|
83 void ResumeAllThreads();
|
|
84 void KillAllThreads();
|
|
85 SuspendedThreadsListNetBSD &suspended_threads_list() {
|
|
86 return suspended_threads_list_;
|
|
87 }
|
|
88 TracerThreadArgument *arg;
|
|
89
|
|
90 private:
|
|
91 SuspendedThreadsListNetBSD suspended_threads_list_;
|
|
92 pid_t pid_;
|
|
93 };
|
|
94
|
|
95 void ThreadSuspender::ResumeAllThreads() {
|
|
96 int pterrno;
|
|
97 if (!internal_iserror(internal_ptrace(PT_DETACH, pid_, (void *)(uptr)1, 0),
|
|
98 &pterrno)) {
|
|
99 VReport(2, "Detached from process %d.\n", pid_);
|
|
100 } else {
|
|
101 VReport(1, "Could not detach from process %d (errno %d).\n", pid_, pterrno);
|
|
102 }
|
|
103 }
|
|
104
|
|
105 void ThreadSuspender::KillAllThreads() {
|
|
106 internal_ptrace(PT_KILL, pid_, nullptr, 0);
|
|
107 }
|
|
108
|
|
109 bool ThreadSuspender::SuspendAllThreads() {
|
|
110 int pterrno;
|
|
111 if (internal_iserror(internal_ptrace(PT_ATTACH, pid_, nullptr, 0),
|
|
112 &pterrno)) {
|
|
113 Printf("Could not attach to process %d (errno %d).\n", pid_, pterrno);
|
|
114 return false;
|
|
115 }
|
|
116
|
|
117 int status;
|
|
118 uptr waitpid_status;
|
|
119 HANDLE_EINTR(waitpid_status, internal_waitpid(pid_, &status, 0));
|
|
120
|
|
121 VReport(2, "Attached to process %d.\n", pid_);
|
|
122
|
|
123 struct ptrace_lwpinfo pl;
|
|
124 int val;
|
|
125 pl.pl_lwpid = 0;
|
|
126 while ((val = ptrace(PT_LWPINFO, pid_, (void *)&pl, sizeof(pl))) != -1 &&
|
|
127 pl.pl_lwpid != 0) {
|
|
128 suspended_threads_list_.Append(pl.pl_lwpid);
|
|
129 VReport(2, "Appended thread %d in process %d.\n", pl.pl_lwpid, pid_);
|
|
130 }
|
|
131 return true;
|
|
132 }
|
|
133
|
|
134 // Pointer to the ThreadSuspender instance for use in signal handler.
|
|
135 static ThreadSuspender *thread_suspender_instance = nullptr;
|
|
136
|
|
137 // Synchronous signals that should not be blocked.
|
|
138 static const int kSyncSignals[] = {SIGABRT, SIGILL, SIGFPE, SIGSEGV,
|
|
139 SIGBUS, SIGXCPU, SIGXFSZ};
|
|
140
|
|
141 static void TracerThreadDieCallback() {
|
|
142 ThreadSuspender *inst = thread_suspender_instance;
|
|
143 if (inst && stoptheworld_tracer_pid == internal_getpid()) {
|
|
144 inst->KillAllThreads();
|
|
145 thread_suspender_instance = nullptr;
|
|
146 }
|
|
147 }
|
|
148
|
|
149 // Signal handler to wake up suspended threads when the tracer thread dies.
|
|
150 static void TracerThreadSignalHandler(int signum, __sanitizer_siginfo *siginfo,
|
|
151 void *uctx) {
|
|
152 SignalContext ctx(siginfo, uctx);
|
|
153 Printf("Tracer caught signal %d: addr=0x%zx pc=0x%zx sp=0x%zx\n", signum,
|
|
154 ctx.addr, ctx.pc, ctx.sp);
|
|
155 ThreadSuspender *inst = thread_suspender_instance;
|
|
156 if (inst) {
|
|
157 if (signum == SIGABRT)
|
|
158 inst->KillAllThreads();
|
|
159 else
|
|
160 inst->ResumeAllThreads();
|
|
161 RAW_CHECK(RemoveDieCallback(TracerThreadDieCallback));
|
|
162 thread_suspender_instance = nullptr;
|
|
163 atomic_store(&inst->arg->done, 1, memory_order_relaxed);
|
|
164 }
|
|
165 internal__exit((signum == SIGABRT) ? 1 : 2);
|
|
166 }
|
|
167
|
|
168 // Size of alternative stack for signal handlers in the tracer thread.
|
|
169 static const int kHandlerStackSize = 8192;
|
|
170
|
|
171 // This function will be run as a cloned task.
|
|
172 static int TracerThread(void *argument) {
|
|
173 TracerThreadArgument *tracer_thread_argument =
|
|
174 (TracerThreadArgument *)argument;
|
|
175
|
|
176 // Check if parent is already dead.
|
|
177 if (internal_getppid() != tracer_thread_argument->parent_pid)
|
|
178 internal__exit(4);
|
|
179
|
|
180 // Wait for the parent thread to finish preparations.
|
|
181 tracer_thread_argument->mutex.Lock();
|
|
182 tracer_thread_argument->mutex.Unlock();
|
|
183
|
|
184 RAW_CHECK(AddDieCallback(TracerThreadDieCallback));
|
|
185
|
|
186 ThreadSuspender thread_suspender(internal_getppid(), tracer_thread_argument);
|
|
187 // Global pointer for the signal handler.
|
|
188 thread_suspender_instance = &thread_suspender;
|
|
189
|
|
190 // Alternate stack for signal handling.
|
|
191 InternalMmapVector<char> handler_stack_memory(kHandlerStackSize);
|
|
192 stack_t handler_stack;
|
|
193 internal_memset(&handler_stack, 0, sizeof(handler_stack));
|
|
194 handler_stack.ss_sp = handler_stack_memory.data();
|
|
195 handler_stack.ss_size = kHandlerStackSize;
|
|
196 internal_sigaltstack(&handler_stack, nullptr);
|
|
197
|
|
198 // Install our handler for synchronous signals. Other signals should be
|
|
199 // blocked by the mask we inherited from the parent thread.
|
|
200 for (uptr i = 0; i < ARRAY_SIZE(kSyncSignals); i++) {
|
|
201 __sanitizer_sigaction act;
|
|
202 internal_memset(&act, 0, sizeof(act));
|
|
203 act.sigaction = TracerThreadSignalHandler;
|
|
204 act.sa_flags = SA_ONSTACK | SA_SIGINFO;
|
|
205 internal_sigaction_norestorer(kSyncSignals[i], &act, 0);
|
|
206 }
|
|
207
|
|
208 int exit_code = 0;
|
|
209 if (!thread_suspender.SuspendAllThreads()) {
|
|
210 VReport(1, "Failed suspending threads.\n");
|
|
211 exit_code = 3;
|
|
212 } else {
|
|
213 tracer_thread_argument->callback(thread_suspender.suspended_threads_list(),
|
|
214 tracer_thread_argument->callback_argument);
|
|
215 thread_suspender.ResumeAllThreads();
|
|
216 exit_code = 0;
|
|
217 }
|
|
218 RAW_CHECK(RemoveDieCallback(TracerThreadDieCallback));
|
|
219 thread_suspender_instance = nullptr;
|
|
220 atomic_store(&tracer_thread_argument->done, 1, memory_order_relaxed);
|
|
221 return exit_code;
|
|
222 }
|
|
223
|
|
224 class ScopedStackSpaceWithGuard {
|
|
225 public:
|
|
226 explicit ScopedStackSpaceWithGuard(uptr stack_size) {
|
|
227 stack_size_ = stack_size;
|
|
228 guard_size_ = GetPageSizeCached();
|
|
229 // FIXME: Omitting MAP_STACK here works in current kernels but might break
|
|
230 // in the future.
|
|
231 guard_start_ =
|
|
232 (uptr)MmapOrDie(stack_size_ + guard_size_, "ScopedStackWithGuard");
|
|
233 CHECK(MprotectNoAccess((uptr)guard_start_, guard_size_));
|
|
234 }
|
|
235 ~ScopedStackSpaceWithGuard() {
|
|
236 UnmapOrDie((void *)guard_start_, stack_size_ + guard_size_);
|
|
237 }
|
|
238 void *Bottom() const {
|
|
239 return (void *)(guard_start_ + stack_size_ + guard_size_);
|
|
240 }
|
|
241
|
|
242 private:
|
|
243 uptr stack_size_;
|
|
244 uptr guard_size_;
|
|
245 uptr guard_start_;
|
|
246 };
|
|
247
|
|
248 static __sanitizer_sigset_t blocked_sigset;
|
|
249 static __sanitizer_sigset_t old_sigset;
|
|
250
|
|
251 struct ScopedSetTracerPID {
|
|
252 explicit ScopedSetTracerPID(uptr tracer_pid) {
|
|
253 stoptheworld_tracer_pid = tracer_pid;
|
|
254 stoptheworld_tracer_ppid = internal_getpid();
|
|
255 }
|
|
256 ~ScopedSetTracerPID() {
|
|
257 stoptheworld_tracer_pid = 0;
|
|
258 stoptheworld_tracer_ppid = 0;
|
|
259 }
|
|
260 };
|
|
261
|
|
262 void StopTheWorld(StopTheWorldCallback callback, void *argument) {
|
|
263 // Prepare the arguments for TracerThread.
|
|
264 struct TracerThreadArgument tracer_thread_argument;
|
|
265 tracer_thread_argument.callback = callback;
|
|
266 tracer_thread_argument.callback_argument = argument;
|
|
267 tracer_thread_argument.parent_pid = internal_getpid();
|
|
268 atomic_store(&tracer_thread_argument.done, 0, memory_order_relaxed);
|
|
269 const uptr kTracerStackSize = 2 * 1024 * 1024;
|
|
270 ScopedStackSpaceWithGuard tracer_stack(kTracerStackSize);
|
|
271
|
|
272 tracer_thread_argument.mutex.Lock();
|
|
273
|
|
274 internal_sigfillset(&blocked_sigset);
|
|
275 for (uptr i = 0; i < ARRAY_SIZE(kSyncSignals); i++)
|
|
276 internal_sigdelset(&blocked_sigset, kSyncSignals[i]);
|
|
277 int rv = internal_sigprocmask(SIG_BLOCK, &blocked_sigset, &old_sigset);
|
|
278 CHECK_EQ(rv, 0);
|
|
279 uptr tracer_pid = internal_clone(TracerThread, tracer_stack.Bottom(),
|
|
280 CLONE_VM | CLONE_FS | CLONE_FILES,
|
|
281 &tracer_thread_argument);
|
|
282 internal_sigprocmask(SIG_SETMASK, &old_sigset, 0);
|
|
283 int local_errno = 0;
|
|
284 if (internal_iserror(tracer_pid, &local_errno)) {
|
|
285 VReport(1, "Failed spawning a tracer thread (errno %d).\n", local_errno);
|
|
286 tracer_thread_argument.mutex.Unlock();
|
|
287 } else {
|
|
288 ScopedSetTracerPID scoped_set_tracer_pid(tracer_pid);
|
|
289
|
|
290 tracer_thread_argument.mutex.Unlock();
|
|
291
|
|
292 while (atomic_load(&tracer_thread_argument.done, memory_order_relaxed) == 0)
|
|
293 sched_yield();
|
|
294
|
|
295 for (;;) {
|
|
296 uptr waitpid_status = internal_waitpid(tracer_pid, nullptr, __WALL);
|
|
297 if (!internal_iserror(waitpid_status, &local_errno))
|
|
298 break;
|
|
299 if (local_errno == EINTR)
|
|
300 continue;
|
|
301 VReport(1, "Waiting on the tracer thread failed (errno %d).\n",
|
|
302 local_errno);
|
|
303 break;
|
|
304 }
|
|
305 }
|
|
306 }
|
|
307
|
|
308 tid_t SuspendedThreadsListNetBSD::GetThreadID(uptr index) const {
|
|
309 CHECK_LT(index, thread_ids_.size());
|
|
310 return thread_ids_[index];
|
|
311 }
|
|
312
|
|
313 uptr SuspendedThreadsListNetBSD::ThreadCount() const {
|
|
314 return thread_ids_.size();
|
|
315 }
|
|
316
|
|
317 bool SuspendedThreadsListNetBSD::ContainsTid(tid_t thread_id) const {
|
|
318 for (uptr i = 0; i < thread_ids_.size(); i++) {
|
|
319 if (thread_ids_[i] == thread_id)
|
|
320 return true;
|
|
321 }
|
|
322 return false;
|
|
323 }
|
|
324
|
|
325 void SuspendedThreadsListNetBSD::Append(tid_t tid) {
|
|
326 thread_ids_.push_back(tid);
|
|
327 }
|
|
328
|
|
329 PtraceRegistersStatus SuspendedThreadsListNetBSD::GetRegistersAndSP(
|
|
330 uptr index, uptr *buffer, uptr *sp) const {
|
|
331 lwpid_t tid = GetThreadID(index);
|
|
332 pid_t ppid = internal_getppid();
|
|
333 struct reg regs;
|
|
334 int pterrno;
|
|
335 bool isErr =
|
|
336 internal_iserror(internal_ptrace(PT_GETREGS, ppid, ®s, tid), &pterrno);
|
|
337 if (isErr) {
|
|
338 VReport(1,
|
|
339 "Could not get registers from process %d thread %d (errno %d).\n",
|
|
340 ppid, tid, pterrno);
|
|
341 return pterrno == ESRCH ? REGISTERS_UNAVAILABLE_FATAL
|
|
342 : REGISTERS_UNAVAILABLE;
|
|
343 }
|
|
344
|
|
345 *sp = PTRACE_REG_SP(®s);
|
|
346 internal_memcpy(buffer, ®s, sizeof(regs));
|
|
347
|
|
348 return REGISTERS_AVAILABLE;
|
|
349 }
|
|
350
|
|
351 uptr SuspendedThreadsListNetBSD::RegisterCount() const {
|
|
352 return sizeof(struct reg) / sizeof(uptr);
|
|
353 }
|
|
354 } // namespace __sanitizer
|
|
355
|
|
356 #endif
|