comparison libmudflap/mf-hooks1.c @ 0:a06113de4d67

first commit
author kent <kent@cr.ie.u-ryukyu.ac.jp>
date Fri, 17 Jul 2009 14:47:48 +0900
parents
children 77e2b8dfacca
comparison
equal deleted inserted replaced
-1:000000000000 0:a06113de4d67
1 /* Mudflap: narrow-pointer bounds-checking by tree rewriting.
2 Copyright (C) 2002, 2003, 2004, 2009 Free Software Foundation, Inc.
3 Contributed by Frank Ch. Eigler <fche@redhat.com>
4 and Graydon Hoare <graydon@redhat.com>
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 Under Section 7 of GPL version 3, you are granted additional
19 permissions described in the GCC Runtime Library Exception, version
20 3.1, as published by the Free Software Foundation.
21
22 You should have received a copy of the GNU General Public License and
23 a copy of the GCC Runtime Library Exception along with this program;
24 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
25 <http://www.gnu.org/licenses/>. */
26
27 #include "config.h"
28
29 #ifndef HAVE_SOCKLEN_T
30 #define socklen_t int
31 #endif
32
33
34 /* These attempt to coax various unix flavours to declare all our
35 needed tidbits in the system headers. */
36 #if !defined(__FreeBSD__) && !defined(__APPLE__)
37 #define _POSIX_SOURCE
38 #endif /* Some BSDs break <sys/socket.h> if this is defined. */
39 #define _GNU_SOURCE
40 #define _XOPEN_SOURCE
41 #define _BSD_TYPES
42 #define __EXTENSIONS__
43 #define _ALL_SOURCE
44 #define _LARGE_FILE_API
45 #define _XOPEN_SOURCE_EXTENDED 1
46
47 #include <string.h>
48 #include <stdio.h>
49 #include <stdlib.h>
50 #include <sys/time.h>
51 #include <sys/types.h>
52 #include <unistd.h>
53 #include <assert.h>
54 #include <errno.h>
55 #include <limits.h>
56 #include <time.h>
57
58 #include "mf-runtime.h"
59 #include "mf-impl.h"
60
61 #ifdef _MUDFLAP
62 #error "Do not compile this file with -fmudflap!"
63 #endif
64
65
66 /* Memory allocation related hook functions. Some of these are
67 intercepted via linker wrapping or symbol interposition. Others
68 use plain macros in mf-runtime.h. */
69
70
71 #if PIC
72
73 enum { BS = 4096, NB=10 };
74 static char __mf_0fn_bufs[NB][BS];
75 static unsigned __mf_0fn_bufs_used[NB];
76
77
78 /* A special bootstrap variant. */
79 void *
80 __mf_0fn_malloc (size_t c)
81 {
82 unsigned i;
83
84 for (i=0; i<NB; i++)
85 {
86 if (! __mf_0fn_bufs_used[i] && c < BS)
87 {
88 __mf_0fn_bufs_used[i] = 1;
89 return & __mf_0fn_bufs[i][0];
90 }
91 }
92 return NULL;
93 }
94 #endif
95
96
97 #undef malloc
98 WRAPPER(void *, malloc, size_t c)
99 {
100 size_t size_with_crumple_zones;
101 DECLARE(void *, malloc, size_t c);
102 void *result;
103 BEGIN_PROTECT (malloc, c);
104
105 size_with_crumple_zones =
106 CLAMPADD(c,CLAMPADD(__mf_opts.crumple_zone,
107 __mf_opts.crumple_zone));
108 BEGIN_MALLOC_PROTECT ();
109 result = (char *) CALL_REAL (malloc, size_with_crumple_zones);
110 END_MALLOC_PROTECT ();
111
112 if (LIKELY(result))
113 {
114 result += __mf_opts.crumple_zone;
115 __mf_register (result, c, __MF_TYPE_HEAP, "malloc region");
116 /* XXX: register __MF_TYPE_NOACCESS for crumple zones. */
117 }
118
119 return result;
120 }
121
122
123 #ifdef PIC
124 /* A special bootstrap variant. */
125 void *
126 __mf_0fn_calloc (size_t c, size_t n)
127 {
128 return __mf_0fn_malloc (c * n);
129 }
130 #endif
131
132
133 #undef calloc
134 WRAPPER(void *, calloc, size_t c, size_t n)
135 {
136 size_t size_with_crumple_zones;
137 DECLARE(void *, calloc, size_t, size_t);
138 DECLARE(void *, malloc, size_t);
139 DECLARE(void *, memset, void *, int, size_t);
140 char *result;
141 BEGIN_PROTECT (calloc, c, n);
142
143 size_with_crumple_zones =
144 CLAMPADD((c * n), /* XXX: CLAMPMUL */
145 CLAMPADD(__mf_opts.crumple_zone,
146 __mf_opts.crumple_zone));
147 BEGIN_MALLOC_PROTECT ();
148 result = (char *) CALL_REAL (malloc, size_with_crumple_zones);
149 END_MALLOC_PROTECT ();
150
151 if (LIKELY(result))
152 memset (result, 0, size_with_crumple_zones);
153
154 if (LIKELY(result))
155 {
156 result += __mf_opts.crumple_zone;
157 __mf_register (result, c*n /* XXX: clamp */, __MF_TYPE_HEAP_I, "calloc region");
158 /* XXX: register __MF_TYPE_NOACCESS for crumple zones. */
159 }
160
161 return result;
162 }
163
164
165 #if PIC
166 /* A special bootstrap variant. */
167 void *
168 __mf_0fn_realloc (void *buf, size_t c)
169 {
170 return NULL;
171 }
172 #endif
173
174
175 #undef realloc
176 WRAPPER(void *, realloc, void *buf, size_t c)
177 {
178 DECLARE(void * , realloc, void *, size_t);
179 size_t size_with_crumple_zones;
180 char *base = buf;
181 unsigned saved_wipe_heap;
182 char *result;
183 BEGIN_PROTECT (realloc, buf, c);
184
185 if (LIKELY(buf))
186 base -= __mf_opts.crumple_zone;
187
188 size_with_crumple_zones =
189 CLAMPADD(c, CLAMPADD(__mf_opts.crumple_zone,
190 __mf_opts.crumple_zone));
191 BEGIN_MALLOC_PROTECT ();
192 result = (char *) CALL_REAL (realloc, base, size_with_crumple_zones);
193 END_MALLOC_PROTECT ();
194
195 /* Ensure heap wiping doesn't occur during this peculiar
196 unregister/reregister pair. */
197 LOCKTH ();
198 __mf_set_state (reentrant);
199 saved_wipe_heap = __mf_opts.wipe_heap;
200 __mf_opts.wipe_heap = 0;
201
202 if (LIKELY(buf))
203 __mfu_unregister (buf, 0, __MF_TYPE_HEAP_I);
204 /* NB: underlying region may have been __MF_TYPE_HEAP. */
205
206 if (LIKELY(result))
207 {
208 result += __mf_opts.crumple_zone;
209 __mfu_register (result, c, __MF_TYPE_HEAP_I, "realloc region");
210 /* XXX: register __MF_TYPE_NOACCESS for crumple zones. */
211 }
212
213 /* Restore previous setting. */
214 __mf_opts.wipe_heap = saved_wipe_heap;
215
216 __mf_set_state (active);
217 UNLOCKTH ();
218
219 return result;
220 }
221
222
223 #if PIC
224 /* A special bootstrap variant. */
225 void
226 __mf_0fn_free (void *buf)
227 {
228 return;
229 }
230 #endif
231
232 #undef free
233 WRAPPER(void, free, void *buf)
234 {
235 /* Use a circular queue to delay some number (__mf_opts.free_queue_length) of free()s. */
236 static void *free_queue [__MF_FREEQ_MAX];
237 static unsigned free_ptr = 0;
238 static int freeq_initialized = 0;
239 DECLARE(void, free, void *);
240
241 BEGIN_PROTECT (free, buf);
242
243 if (UNLIKELY(buf == NULL))
244 return;
245
246 #if PIC
247 /* Check whether the given buffer might have come from a
248 __mf_0fn_malloc/calloc call that for whatever reason was not
249 redirected back to __mf_0fn_free. If so, we just ignore the
250 call. */
251 if (UNLIKELY((uintptr_t) buf >= (uintptr_t) __mf_0fn_bufs &&
252 (uintptr_t) buf < ((uintptr_t) __mf_0fn_bufs + sizeof(__mf_0fn_bufs))))
253 {
254 VERBOSE_TRACE ("skipping free of boot (0fn) alloc buffer %p\n", buf);
255 return;
256 }
257 #endif
258
259 LOCKTH ();
260 if (UNLIKELY(!freeq_initialized))
261 {
262 memset (free_queue, 0,
263 __MF_FREEQ_MAX * sizeof (void *));
264 freeq_initialized = 1;
265 }
266 UNLOCKTH ();
267
268 __mf_unregister (buf, 0, __MF_TYPE_HEAP_I);
269 /* NB: underlying region may have been __MF_TYPE_HEAP. */
270
271 if (UNLIKELY(__mf_opts.free_queue_length > 0))
272 {
273 char *freeme = NULL;
274 LOCKTH ();
275 if (free_queue [free_ptr] != NULL)
276 {
277 freeme = free_queue [free_ptr];
278 freeme -= __mf_opts.crumple_zone;
279 }
280 free_queue [free_ptr] = buf;
281 free_ptr = (free_ptr == (__mf_opts.free_queue_length-1) ? 0 : free_ptr + 1);
282 UNLOCKTH ();
283 if (freeme)
284 {
285 if (__mf_opts.trace_mf_calls)
286 {
287 VERBOSE_TRACE ("freeing deferred pointer %p (crumple %u)\n",
288 (void *) freeme,
289 __mf_opts.crumple_zone);
290 }
291 BEGIN_MALLOC_PROTECT ();
292 CALL_REAL (free, freeme);
293 END_MALLOC_PROTECT ();
294 }
295 }
296 else
297 {
298 /* back pointer up a bit to the beginning of crumple zone */
299 char *base = (char *)buf;
300 base -= __mf_opts.crumple_zone;
301 if (__mf_opts.trace_mf_calls)
302 {
303 VERBOSE_TRACE ("freeing pointer %p = %p - %u\n",
304 (void *) base,
305 (void *) buf,
306 __mf_opts.crumple_zone);
307 }
308 BEGIN_MALLOC_PROTECT ();
309 CALL_REAL (free, base);
310 END_MALLOC_PROTECT ();
311 }
312 }
313
314
315 /* We can only wrap mmap if the target supports it. Likewise for munmap.
316 We assume we have both if we have mmap. */
317 #ifdef HAVE_MMAP
318
319 #if PIC
320 /* A special bootstrap variant. */
321 void *
322 __mf_0fn_mmap (void *start, size_t l, int prot, int f, int fd, off_t off)
323 {
324 return (void *) -1;
325 }
326 #endif
327
328
329 #undef mmap
330 WRAPPER(void *, mmap,
331 void *start, size_t length, int prot,
332 int flags, int fd, off_t offset)
333 {
334 DECLARE(void *, mmap, void *, size_t, int,
335 int, int, off_t);
336 void *result;
337 BEGIN_PROTECT (mmap, start, length, prot, flags, fd, offset);
338
339 result = CALL_REAL (mmap, start, length, prot,
340 flags, fd, offset);
341
342 /*
343 VERBOSE_TRACE ("mmap (%08lx, %08lx, ...) => %08lx\n",
344 (uintptr_t) start, (uintptr_t) length,
345 (uintptr_t) result);
346 */
347
348 if (result != (void *)-1)
349 {
350 /* Register each page as a heap object. Why not register it all
351 as a single segment? That's so that a later munmap() call
352 can unmap individual pages. XXX: would __MF_TYPE_GUESS make
353 this more automatic? */
354 size_t ps = getpagesize ();
355 uintptr_t base = (uintptr_t) result;
356 uintptr_t offset;
357
358 for (offset=0; offset<length; offset+=ps)
359 {
360 /* XXX: We could map PROT_NONE to __MF_TYPE_NOACCESS. */
361 /* XXX: Unaccessed HEAP pages are reported as leaks. Is this
362 appropriate for unaccessed mmap pages? */
363 __mf_register ((void *) CLAMPADD (base, offset), ps,
364 __MF_TYPE_HEAP_I, "mmap page");
365 }
366 }
367
368 return result;
369 }
370
371
372 #if PIC
373 /* A special bootstrap variant. */
374 int
375 __mf_0fn_munmap (void *start, size_t length)
376 {
377 return -1;
378 }
379 #endif
380
381
382 #undef munmap
383 WRAPPER(int , munmap, void *start, size_t length)
384 {
385 DECLARE(int, munmap, void *, size_t);
386 int result;
387 BEGIN_PROTECT (munmap, start, length);
388
389 result = CALL_REAL (munmap, start, length);
390
391 /*
392 VERBOSE_TRACE ("munmap (%08lx, %08lx, ...) => %08lx\n",
393 (uintptr_t) start, (uintptr_t) length,
394 (uintptr_t) result);
395 */
396
397 if (result == 0)
398 {
399 /* Unregister each page as a heap object. */
400 size_t ps = getpagesize ();
401 uintptr_t base = (uintptr_t) start & (~ (ps - 1)); /* page align */
402 uintptr_t offset;
403
404 for (offset=0; offset<length; offset+=ps)
405 __mf_unregister ((void *) CLAMPADD (base, offset), ps, __MF_TYPE_HEAP_I);
406 }
407 return result;
408 }
409 #endif /* HAVE_MMAP */
410
411
412 /* This wrapper is a little different, as it's called indirectly from
413 __mf_fini also to clean up pending allocations. */
414 void *
415 __mf_wrap_alloca_indirect (size_t c)
416 {
417 DECLARE (void *, malloc, size_t);
418 DECLARE (void, free, void *);
419
420 /* This struct, a linked list, tracks alloca'd objects. The newest
421 object is at the head of the list. If we detect that we've
422 popped a few levels of stack, then the listed objects are freed
423 as needed. NB: The tracking struct is allocated with
424 real_malloc; the user data with wrap_malloc.
425 */
426 struct alloca_tracking { void *ptr; void *stack; struct alloca_tracking* next; };
427 static struct alloca_tracking *alloca_history = NULL;
428
429 void *stack = __builtin_frame_address (0);
430 void *result;
431 struct alloca_tracking *track;
432
433 TRACE ("%s\n", __PRETTY_FUNCTION__);
434 VERBOSE_TRACE ("alloca stack level %p\n", (void *) stack);
435
436 /* XXX: thread locking! */
437
438 /* Free any previously alloca'd blocks that belong to deeper-nested functions,
439 which must therefore have exited by now. */
440
441 #define DEEPER_THAN < /* XXX: for x86; steal find_stack_direction() from libiberty/alloca.c */
442
443 while (alloca_history &&
444 ((uintptr_t) alloca_history->stack DEEPER_THAN (uintptr_t) stack))
445 {
446 struct alloca_tracking *next = alloca_history->next;
447 __mf_unregister (alloca_history->ptr, 0, __MF_TYPE_HEAP);
448 BEGIN_MALLOC_PROTECT ();
449 CALL_REAL (free, alloca_history->ptr);
450 CALL_REAL (free, alloca_history);
451 END_MALLOC_PROTECT ();
452 alloca_history = next;
453 }
454
455 /* Allocate new block. */
456 result = NULL;
457 if (LIKELY (c > 0)) /* alloca(0) causes no allocation. */
458 {
459 BEGIN_MALLOC_PROTECT ();
460 track = (struct alloca_tracking *) CALL_REAL (malloc,
461 sizeof (struct alloca_tracking));
462 END_MALLOC_PROTECT ();
463 if (LIKELY (track != NULL))
464 {
465 BEGIN_MALLOC_PROTECT ();
466 result = CALL_REAL (malloc, c);
467 END_MALLOC_PROTECT ();
468 if (UNLIKELY (result == NULL))
469 {
470 BEGIN_MALLOC_PROTECT ();
471 CALL_REAL (free, track);
472 END_MALLOC_PROTECT ();
473 /* Too bad. XXX: What about errno? */
474 }
475 else
476 {
477 __mf_register (result, c, __MF_TYPE_HEAP, "alloca region");
478 track->ptr = result;
479 track->stack = stack;
480 track->next = alloca_history;
481 alloca_history = track;
482 }
483 }
484 }
485
486 return result;
487 }
488
489
490 #undef alloca
491 WRAPPER(void *, alloca, size_t c)
492 {
493 return __mf_wrap_alloca_indirect (c);
494 }
495