comparison libmudflap/mf-runtime.c @ 0:a06113de4d67

first commit
author kent <kent@cr.ie.u-ryukyu.ac.jp>
date Fri, 17 Jul 2009 14:47:48 +0900
parents
children 3bfb6c00c1e0
comparison
equal deleted inserted replaced
-1:000000000000 0:a06113de4d67
1 /* Mudflap: narrow-pointer bounds-checking by tree rewriting.
2 Copyright (C) 2002, 2003, 2004, 2005, 2006, 2008, 2009
3 Free Software Foundation, Inc.
4 Contributed by Frank Ch. Eigler <fche@redhat.com>
5 and Graydon Hoare <graydon@redhat.com>
6 Splay Tree code originally by Mark Mitchell <mark@markmitchell.com>,
7 adapted from libiberty.
8
9 This file is part of GCC.
10
11 GCC is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License as published by the Free
13 Software Foundation; either version 3, or (at your option) any later
14 version.
15
16 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17 WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 for more details.
20
21 Under Section 7 of GPL version 3, you are granted additional
22 permissions described in the GCC Runtime Library Exception, version
23 3.1, as published by the Free Software Foundation.
24
25 You should have received a copy of the GNU General Public License and
26 a copy of the GCC Runtime Library Exception along with this program;
27 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
28 <http://www.gnu.org/licenses/>. */
29
30 #include "config.h"
31
32 /* These attempt to coax various unix flavours to declare all our
33 needed tidbits in the system headers. */
34 #if !defined(__FreeBSD__) && !defined(__APPLE__)
35 #define _POSIX_SOURCE
36 #endif /* Some BSDs break <sys/socket.h> if this is defined. */
37 #define _GNU_SOURCE
38 #define _XOPEN_SOURCE
39 #define _BSD_TYPES
40 #define __EXTENSIONS__
41 #define _ALL_SOURCE
42 #define _LARGE_FILE_API
43 #define _XOPEN_SOURCE_EXTENDED 1
44
45 #include <stdio.h>
46 #include <stdlib.h>
47 #include <sys/types.h>
48 #include <sys/time.h>
49 #include <time.h>
50 #include <unistd.h>
51 #ifdef HAVE_EXECINFO_H
52 #include <execinfo.h>
53 #endif
54 #ifdef HAVE_SIGNAL_H
55 #include <signal.h>
56 #endif
57 #include <assert.h>
58
59 #include <string.h>
60 #include <limits.h>
61 #include <sys/types.h>
62 #include <signal.h>
63 #include <errno.h>
64 #include <ctype.h>
65
66 #include "mf-runtime.h"
67 #include "mf-impl.h"
68
69
70 /* ------------------------------------------------------------------------ */
71 /* Splay-tree implementation. */
72
73 typedef uintptr_t mfsplay_tree_key;
74 typedef void *mfsplay_tree_value;
75
76 /* Forward declaration for a node in the tree. */
77 typedef struct mfsplay_tree_node_s *mfsplay_tree_node;
78
79 /* The type of a function used to iterate over the tree. */
80 typedef int (*mfsplay_tree_foreach_fn) (mfsplay_tree_node, void *);
81
82 /* The nodes in the splay tree. */
83 struct mfsplay_tree_node_s
84 {
85 /* Data. */
86 mfsplay_tree_key key;
87 mfsplay_tree_value value;
88 /* Children. */
89 mfsplay_tree_node left;
90 mfsplay_tree_node right;
91 /* XXX: The addition of a parent pointer may eliminate some recursion. */
92 };
93
94 /* The splay tree itself. */
95 struct mfsplay_tree_s
96 {
97 /* The root of the tree. */
98 mfsplay_tree_node root;
99
100 /* The last key value for which the tree has been splayed, but not
101 since modified. */
102 mfsplay_tree_key last_splayed_key;
103 int last_splayed_key_p;
104
105 /* Statistics. */
106 unsigned num_keys;
107
108 /* Traversal recursion control flags. */
109 unsigned max_depth;
110 unsigned depth;
111 unsigned rebalance_p;
112 };
113 typedef struct mfsplay_tree_s *mfsplay_tree;
114
115 static mfsplay_tree mfsplay_tree_new (void);
116 static mfsplay_tree_node mfsplay_tree_insert (mfsplay_tree, mfsplay_tree_key, mfsplay_tree_value);
117 static void mfsplay_tree_remove (mfsplay_tree, mfsplay_tree_key);
118 static mfsplay_tree_node mfsplay_tree_lookup (mfsplay_tree, mfsplay_tree_key);
119 static mfsplay_tree_node mfsplay_tree_predecessor (mfsplay_tree, mfsplay_tree_key);
120 static mfsplay_tree_node mfsplay_tree_successor (mfsplay_tree, mfsplay_tree_key);
121 static int mfsplay_tree_foreach (mfsplay_tree, mfsplay_tree_foreach_fn, void *);
122 static void mfsplay_tree_rebalance (mfsplay_tree sp);
123
124 /* ------------------------------------------------------------------------ */
125 /* Utility macros */
126
127 #define CTOR __attribute__ ((constructor))
128 #define DTOR __attribute__ ((destructor))
129
130
131 /* Codes to describe the context in which a violation occurs. */
132 #define __MF_VIOL_UNKNOWN 0
133 #define __MF_VIOL_READ 1
134 #define __MF_VIOL_WRITE 2
135 #define __MF_VIOL_REGISTER 3
136 #define __MF_VIOL_UNREGISTER 4
137 #define __MF_VIOL_WATCH 5
138
139 /* Protect against recursive calls. */
140
141 static void
142 begin_recursion_protect1 (const char *pf)
143 {
144 if (__mf_get_state () == reentrant)
145 {
146 write (2, "mf: erroneous reentrancy detected in `", 38);
147 write (2, pf, strlen(pf));
148 write (2, "'\n", 2); \
149 abort ();
150 }
151 __mf_set_state (reentrant);
152 }
153
154 #define BEGIN_RECURSION_PROTECT() \
155 begin_recursion_protect1 (__PRETTY_FUNCTION__)
156
157 #define END_RECURSION_PROTECT() \
158 __mf_set_state (active)
159
160 /* ------------------------------------------------------------------------ */
161 /* Required globals. */
162
163 #define LOOKUP_CACHE_MASK_DFL 1023
164 #define LOOKUP_CACHE_SIZE_MAX 65536 /* Allows max CACHE_MASK 0xFFFF */
165 #define LOOKUP_CACHE_SHIFT_DFL 2
166
167 struct __mf_cache __mf_lookup_cache [LOOKUP_CACHE_SIZE_MAX];
168 uintptr_t __mf_lc_mask = LOOKUP_CACHE_MASK_DFL;
169 unsigned char __mf_lc_shift = LOOKUP_CACHE_SHIFT_DFL;
170 #define LOOKUP_CACHE_SIZE (__mf_lc_mask + 1)
171
172 struct __mf_options __mf_opts;
173 int __mf_starting_p = 1;
174
175 #ifdef LIBMUDFLAPTH
176 #if defined(HAVE_TLS) && !defined(USE_EMUTLS)
177 __thread enum __mf_state_enum __mf_state_1 = reentrant;
178 #endif
179 #else
180 enum __mf_state_enum __mf_state_1 = reentrant;
181 #endif
182
183 #ifdef LIBMUDFLAPTH
184 pthread_mutex_t __mf_biglock =
185 #ifdef PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP
186 PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP;
187 #else
188 PTHREAD_MUTEX_INITIALIZER;
189 #endif
190 #endif
191
192 /* Use HAVE_PTHREAD_H here instead of LIBMUDFLAPTH, so that even
193 the libmudflap.la (no threading support) can diagnose whether
194 the application is linked with -lpthread. See __mf_usage() below. */
195 #if HAVE_PTHREAD_H
196 #ifdef _POSIX_THREADS
197 #pragma weak pthread_join
198 #else
199 #define pthread_join NULL
200 #endif
201 #endif
202
203
204 /* ------------------------------------------------------------------------ */
205 /* stats-related globals. */
206
207 static unsigned long __mf_count_check;
208 static unsigned long __mf_lookup_cache_reusecount [LOOKUP_CACHE_SIZE_MAX];
209 static unsigned long __mf_count_register;
210 static unsigned long __mf_total_register_size [__MF_TYPE_MAX+1];
211 static unsigned long __mf_count_unregister;
212 static unsigned long __mf_total_unregister_size;
213 static unsigned long __mf_count_violation [__MF_VIOL_WATCH+1];
214 static unsigned long __mf_sigusr1_received;
215 static unsigned long __mf_sigusr1_handled;
216 /* not static */ unsigned long __mf_reentrancy;
217 #ifdef LIBMUDFLAPTH
218 /* not static */ unsigned long __mf_lock_contention;
219 #endif
220
221
222 /* ------------------------------------------------------------------------ */
223 /* mode-check-related globals. */
224
225 typedef struct __mf_object
226 {
227 uintptr_t low, high; /* __mf_register parameters */
228 const char *name;
229 char type; /* __MF_TYPE_something */
230 char watching_p; /* Trigger a VIOL_WATCH on access? */
231 unsigned read_count; /* Number of times __mf_check/read was called on this object. */
232 unsigned write_count; /* Likewise for __mf_check/write. */
233 unsigned liveness; /* A measure of recent checking activity. */
234 unsigned description_epoch; /* Last epoch __mf_describe_object printed this. */
235
236 uintptr_t alloc_pc;
237 struct timeval alloc_time;
238 char **alloc_backtrace;
239 size_t alloc_backtrace_size;
240 #ifdef LIBMUDFLAPTH
241 pthread_t alloc_thread;
242 #endif
243
244 int deallocated_p;
245 uintptr_t dealloc_pc;
246 struct timeval dealloc_time;
247 char **dealloc_backtrace;
248 size_t dealloc_backtrace_size;
249 #ifdef LIBMUDFLAPTH
250 pthread_t dealloc_thread;
251 #endif
252 } __mf_object_t;
253
254 /* Live objects: splay trees, separated by type, ordered on .low (base address). */
255 /* Actually stored as static vars within lookup function below. */
256
257 /* Dead objects: circular arrays; _MIN_CEM .. _MAX_CEM only */
258 static unsigned __mf_object_dead_head[__MF_TYPE_MAX_CEM+1]; /* next empty spot */
259 static __mf_object_t *__mf_object_cemetary[__MF_TYPE_MAX_CEM+1][__MF_PERSIST_MAX];
260
261
262 /* ------------------------------------------------------------------------ */
263 /* Forward function declarations */
264
265 void __mf_init () CTOR;
266 static void __mf_sigusr1_respond ();
267 static unsigned __mf_find_objects (uintptr_t ptr_low, uintptr_t ptr_high,
268 __mf_object_t **objs, unsigned max_objs);
269 static unsigned __mf_find_objects2 (uintptr_t ptr_low, uintptr_t ptr_high,
270 __mf_object_t **objs, unsigned max_objs, int type);
271 static unsigned __mf_find_dead_objects (uintptr_t ptr_low, uintptr_t ptr_high,
272 __mf_object_t **objs, unsigned max_objs);
273 static void __mf_adapt_cache ();
274 static void __mf_describe_object (__mf_object_t *obj);
275 static unsigned __mf_watch_or_not (void *ptr, size_t sz, char flag);
276 static mfsplay_tree __mf_object_tree (int type);
277 static void __mf_link_object (__mf_object_t *node);
278 static void __mf_unlink_object (__mf_object_t *node);
279
280
281 /* ------------------------------------------------------------------------ */
282 /* Configuration engine */
283
284 static void
285 __mf_set_default_options ()
286 {
287 memset (& __mf_opts, 0, sizeof (__mf_opts));
288
289 __mf_opts.adapt_cache = 1000003;
290 __mf_opts.abbreviate = 1;
291 __mf_opts.verbose_violations = 1;
292 __mf_opts.free_queue_length = 4;
293 __mf_opts.persistent_count = 100;
294 __mf_opts.crumple_zone = 32;
295 __mf_opts.backtrace = 4;
296 __mf_opts.timestamps = 1;
297 __mf_opts.mudflap_mode = mode_check;
298 __mf_opts.violation_mode = viol_nop;
299 #ifdef HAVE___LIBC_FREERES
300 __mf_opts.call_libc_freeres = 1;
301 #endif
302 __mf_opts.heur_std_data = 1;
303 #ifdef LIBMUDFLAPTH
304 __mf_opts.thread_stack = 0;
305 #endif
306 }
307
308 static struct mudoption
309 {
310 char *name;
311 char *description;
312 enum
313 {
314 set_option,
315 read_integer_option,
316 } type;
317 unsigned value;
318 unsigned *target;
319 }
320 options [] =
321 {
322 {"mode-nop",
323 "mudflaps do nothing",
324 set_option, (unsigned)mode_nop, (unsigned *)&__mf_opts.mudflap_mode},
325 {"mode-populate",
326 "mudflaps populate object tree",
327 set_option, (unsigned)mode_populate, (unsigned *)&__mf_opts.mudflap_mode},
328 {"mode-check",
329 "mudflaps check for memory violations",
330 set_option, (unsigned)mode_check, (unsigned *)&__mf_opts.mudflap_mode},
331 {"mode-violate",
332 "mudflaps always cause violations (diagnostic)",
333 set_option, (unsigned)mode_violate, (unsigned *)&__mf_opts.mudflap_mode},
334
335 {"viol-nop",
336 "violations do not change program execution",
337 set_option, (unsigned)viol_nop, (unsigned *)&__mf_opts.violation_mode},
338 {"viol-abort",
339 "violations cause a call to abort()",
340 set_option, (unsigned)viol_abort, (unsigned *)&__mf_opts.violation_mode},
341 {"viol-segv",
342 "violations are promoted to SIGSEGV signals",
343 set_option, (unsigned)viol_segv, (unsigned *)&__mf_opts.violation_mode},
344 {"viol-gdb",
345 "violations fork a gdb process attached to current program",
346 set_option, (unsigned)viol_gdb, (unsigned *)&__mf_opts.violation_mode},
347 {"trace-calls",
348 "trace calls to mudflap runtime library",
349 set_option, 1, &__mf_opts.trace_mf_calls},
350 {"verbose-trace",
351 "trace internal events within mudflap runtime library",
352 set_option, 1, &__mf_opts.verbose_trace},
353 {"collect-stats",
354 "collect statistics on mudflap's operation",
355 set_option, 1, &__mf_opts.collect_stats},
356 #ifdef SIGUSR1
357 {"sigusr1-report",
358 "print report upon SIGUSR1",
359 set_option, 1, &__mf_opts.sigusr1_report},
360 #endif
361 {"internal-checking",
362 "perform more expensive internal checking",
363 set_option, 1, &__mf_opts.internal_checking},
364 {"print-leaks",
365 "print any memory leaks at program shutdown",
366 set_option, 1, &__mf_opts.print_leaks},
367 #ifdef HAVE___LIBC_FREERES
368 {"libc-freeres",
369 "call glibc __libc_freeres at shutdown for better leak data",
370 set_option, 1, &__mf_opts.call_libc_freeres},
371 #endif
372 {"check-initialization",
373 "detect uninitialized object reads",
374 set_option, 1, &__mf_opts.check_initialization},
375 {"verbose-violations",
376 "print verbose messages when memory violations occur",
377 set_option, 1, &__mf_opts.verbose_violations},
378 {"abbreviate",
379 "abbreviate repetitive listings",
380 set_option, 1, &__mf_opts.abbreviate},
381 {"timestamps",
382 "track object lifetime timestamps",
383 set_option, 1, &__mf_opts.timestamps},
384 {"ignore-reads",
385 "ignore read accesses - assume okay",
386 set_option, 1, &__mf_opts.ignore_reads},
387 {"wipe-stack",
388 "wipe stack objects at unwind",
389 set_option, 1, &__mf_opts.wipe_stack},
390 {"wipe-heap",
391 "wipe heap objects at free",
392 set_option, 1, &__mf_opts.wipe_heap},
393 {"heur-proc-map",
394 "support /proc/self/map heuristics",
395 set_option, 1, &__mf_opts.heur_proc_map},
396 {"heur-stack-bound",
397 "enable a simple upper stack bound heuristic",
398 set_option, 1, &__mf_opts.heur_stack_bound},
399 {"heur-start-end",
400 "support _start.._end heuristics",
401 set_option, 1, &__mf_opts.heur_start_end},
402 {"heur-stdlib",
403 "register standard library data (argv, errno, stdin, ...)",
404 set_option, 1, &__mf_opts.heur_std_data},
405 {"free-queue-length",
406 "queue N deferred free() calls before performing them",
407 read_integer_option, 0, &__mf_opts.free_queue_length},
408 {"persistent-count",
409 "keep a history of N unregistered regions",
410 read_integer_option, 0, &__mf_opts.persistent_count},
411 {"crumple-zone",
412 "surround allocations with crumple zones of N bytes",
413 read_integer_option, 0, &__mf_opts.crumple_zone},
414 /* XXX: not type-safe.
415 {"lc-mask",
416 "set lookup cache size mask to N (2**M - 1)",
417 read_integer_option, 0, (int *)(&__mf_lc_mask)},
418 {"lc-shift",
419 "set lookup cache pointer shift",
420 read_integer_option, 0, (int *)(&__mf_lc_shift)},
421 */
422 {"lc-adapt",
423 "adapt mask/shift parameters after N cache misses",
424 read_integer_option, 1, &__mf_opts.adapt_cache},
425 {"backtrace",
426 "keep an N-level stack trace of each call context",
427 read_integer_option, 0, &__mf_opts.backtrace},
428 #ifdef LIBMUDFLAPTH
429 {"thread-stack",
430 "override thread stacks allocation: N kB",
431 read_integer_option, 0, &__mf_opts.thread_stack},
432 #endif
433 {0, 0, set_option, 0, NULL}
434 };
435
436 static void
437 __mf_usage ()
438 {
439 struct mudoption *opt;
440
441 fprintf (stderr,
442 "This is a %s%sGCC \"mudflap\" memory-checked binary.\n"
443 "Mudflap is Copyright (C) 2002-2009 Free Software Foundation, Inc.\n"
444 "\n"
445 "The mudflap code can be controlled by an environment variable:\n"
446 "\n"
447 "$ export MUDFLAP_OPTIONS='<options>'\n"
448 "$ <mudflapped_program>\n"
449 "\n"
450 "where <options> is a space-separated list of \n"
451 "any of the following options. Use `-no-OPTION' to disable options.\n"
452 "\n",
453 #if HAVE_PTHREAD_H
454 (pthread_join ? "multi-threaded " : "single-threaded "),
455 #else
456 "",
457 #endif
458 #if LIBMUDFLAPTH
459 "thread-aware "
460 #else
461 "thread-unaware "
462 #endif
463 );
464 /* XXX: The multi-threaded thread-unaware combination is bad. */
465
466 for (opt = options; opt->name; opt++)
467 {
468 int default_p = (opt->value == * opt->target);
469
470 switch (opt->type)
471 {
472 char buf[128];
473 case set_option:
474 fprintf (stderr, "-%-23.23s %s", opt->name, opt->description);
475 if (default_p)
476 fprintf (stderr, " [active]\n");
477 else
478 fprintf (stderr, "\n");
479 break;
480 case read_integer_option:
481 strncpy (buf, opt->name, 128);
482 strncpy (buf + strlen (opt->name), "=N", 2);
483 fprintf (stderr, "-%-23.23s %s", buf, opt->description);
484 fprintf (stderr, " [%d]\n", * opt->target);
485 break;
486 default: abort();
487 }
488 }
489
490 fprintf (stderr, "\n");
491 }
492
493
494 int
495 __mf_set_options (const char *optstr)
496 {
497 int rc;
498 LOCKTH ();
499 BEGIN_RECURSION_PROTECT ();
500 rc = __mfu_set_options (optstr);
501 /* XXX: It's not really that easy. A change to a bunch of parameters
502 can require updating auxiliary state or risk crashing:
503 free_queue_length, crumple_zone ... */
504 END_RECURSION_PROTECT ();
505 UNLOCKTH ();
506 return rc;
507 }
508
509
510 int
511 __mfu_set_options (const char *optstr)
512 {
513 struct mudoption *opts = 0;
514 char *nxt = 0;
515 long tmp = 0;
516 int rc = 0;
517 const char *saved_optstr = optstr;
518
519 /* XXX: bounds-check for optstr! */
520
521 while (*optstr)
522 {
523 switch (*optstr) {
524 case ' ':
525 case '\t':
526 case '\n':
527 optstr++;
528 break;
529
530 case '-':
531 if (*optstr+1)
532 {
533 int negate = 0;
534 optstr++;
535
536 if (*optstr == '?' ||
537 strncmp (optstr, "help", 4) == 0)
538 {
539 /* Caller will print help and exit. */
540 return -1;
541 }
542
543 if (strncmp (optstr, "no-", 3) == 0)
544 {
545 negate = 1;
546 optstr = & optstr[3];
547 }
548
549 for (opts = options; opts->name; opts++)
550 {
551 if (strncmp (optstr, opts->name, strlen (opts->name)) == 0)
552 {
553 optstr += strlen (opts->name);
554 assert (opts->target);
555 switch (opts->type)
556 {
557 case set_option:
558 if (negate)
559 *(opts->target) = 0;
560 else
561 *(opts->target) = opts->value;
562 break;
563 case read_integer_option:
564 if (! negate && (*optstr == '=' && *(optstr+1)))
565 {
566 optstr++;
567 tmp = strtol (optstr, &nxt, 10);
568 if ((optstr != nxt) && (tmp != LONG_MAX))
569 {
570 optstr = nxt;
571 *(opts->target) = (int)tmp;
572 }
573 }
574 else if (negate)
575 * opts->target = 0;
576 break;
577 }
578 }
579 }
580 }
581 break;
582
583 default:
584 fprintf (stderr,
585 "warning: unrecognized string '%s' in mudflap options\n",
586 optstr);
587 optstr += strlen (optstr);
588 rc = -1;
589 break;
590 }
591 }
592
593 /* Special post-processing: bound __mf_lc_mask and free_queue_length for security. */
594 __mf_lc_mask &= (LOOKUP_CACHE_SIZE_MAX - 1);
595 __mf_opts.free_queue_length &= (__MF_FREEQ_MAX - 1);
596
597 /* Clear the lookup cache, in case the parameters got changed. */
598 /* XXX: race */
599 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
600 /* void slot 0 */
601 __mf_lookup_cache[0].low = MAXPTR;
602
603 TRACE ("set options from `%s'\n", saved_optstr);
604
605 /* Call this unconditionally, in case -sigusr1-report was toggled. */
606 __mf_sigusr1_respond ();
607
608 return rc;
609 }
610
611
612 #ifdef PIC
613
614 void
615 __mf_resolve_single_dynamic (struct __mf_dynamic_entry *e)
616 {
617 char *err;
618
619 assert (e);
620 if (e->pointer) return;
621
622 #if HAVE_DLVSYM
623 if (e->version != NULL && e->version[0] != '\0') /* non-null/empty */
624 e->pointer = dlvsym (RTLD_NEXT, e->name, e->version);
625 else
626 #endif
627 e->pointer = dlsym (RTLD_NEXT, e->name);
628
629 err = dlerror ();
630
631 if (err)
632 {
633 fprintf (stderr, "mf: error in dlsym(\"%s\"): %s\n",
634 e->name, err);
635 abort ();
636 }
637 if (! e->pointer)
638 {
639 fprintf (stderr, "mf: dlsym(\"%s\") = NULL\n", e->name);
640 abort ();
641 }
642 }
643
644
645 static void
646 __mf_resolve_dynamics ()
647 {
648 int i;
649 for (i = 0; i < dyn_INITRESOLVE; i++)
650 __mf_resolve_single_dynamic (& __mf_dynamic[i]);
651 }
652
653
654 /* NB: order must match enums in mf-impl.h */
655 struct __mf_dynamic_entry __mf_dynamic [] =
656 {
657 {NULL, "calloc", NULL},
658 {NULL, "free", NULL},
659 {NULL, "malloc", NULL},
660 {NULL, "mmap", NULL},
661 {NULL, "munmap", NULL},
662 {NULL, "realloc", NULL},
663 {NULL, "DUMMY", NULL}, /* dyn_INITRESOLVE */
664 #ifdef LIBMUDFLAPTH
665 {NULL, "pthread_create", PTHREAD_CREATE_VERSION},
666 {NULL, "pthread_join", NULL},
667 {NULL, "pthread_exit", NULL}
668 #endif
669 };
670
671 #endif /* PIC */
672
673
674
675 /* ------------------------------------------------------------------------ */
676
677 /* Lookup & manage automatic initialization of the five or so splay trees. */
678 static mfsplay_tree
679 __mf_object_tree (int type)
680 {
681 static mfsplay_tree trees [__MF_TYPE_MAX+1];
682 assert (type >= 0 && type <= __MF_TYPE_MAX);
683 if (UNLIKELY (trees[type] == NULL))
684 trees[type] = mfsplay_tree_new ();
685 return trees[type];
686 }
687
688
689 /* not static */void
690 __mf_init ()
691 {
692 char *ov = 0;
693
694 /* Return if initialization has already been done. */
695 if (LIKELY (__mf_starting_p == 0))
696 return;
697
698 /* This initial bootstrap phase requires that __mf_starting_p = 1. */
699 #ifdef PIC
700 __mf_resolve_dynamics ();
701 #endif
702 __mf_starting_p = 0;
703
704 __mf_set_state (active);
705
706 __mf_set_default_options ();
707
708 ov = getenv ("MUDFLAP_OPTIONS");
709 if (ov)
710 {
711 int rc = __mfu_set_options (ov);
712 if (rc < 0)
713 {
714 __mf_usage ();
715 exit (1);
716 }
717 }
718
719 /* Initialize to a non-zero description epoch. */
720 __mf_describe_object (NULL);
721
722 #define REG_RESERVED(obj) \
723 __mf_register (& obj, sizeof(obj), __MF_TYPE_NOACCESS, # obj)
724
725 REG_RESERVED (__mf_lookup_cache);
726 REG_RESERVED (__mf_lc_mask);
727 REG_RESERVED (__mf_lc_shift);
728 /* XXX: others of our statics? */
729
730 /* Prevent access to *NULL. */
731 __mf_register (MINPTR, 1, __MF_TYPE_NOACCESS, "NULL");
732 __mf_lookup_cache[0].low = (uintptr_t) -1;
733 }
734
735
736
737 int
738 __wrap_main (int argc, char* argv[])
739 {
740 extern char **environ;
741 extern int main ();
742 extern int __real_main ();
743 static int been_here = 0;
744
745 if (__mf_opts.heur_std_data && ! been_here)
746 {
747 unsigned i;
748
749 been_here = 1;
750 __mf_register (argv, sizeof(char *)*(argc+1), __MF_TYPE_STATIC, "argv[]");
751 for (i=0; i<argc; i++)
752 {
753 unsigned j = strlen (argv[i]);
754 __mf_register (argv[i], j+1, __MF_TYPE_STATIC, "argv element");
755 }
756
757 for (i=0; ; i++)
758 {
759 char *e = environ[i];
760 unsigned j;
761 if (e == NULL) break;
762 j = strlen (environ[i]);
763 __mf_register (environ[i], j+1, __MF_TYPE_STATIC, "environ element");
764 }
765 __mf_register (environ, sizeof(char *)*(i+1), __MF_TYPE_STATIC, "environ[]");
766
767 __mf_register (& errno, sizeof (errno), __MF_TYPE_STATIC, "errno area");
768
769 __mf_register (stdin, sizeof (*stdin), __MF_TYPE_STATIC, "stdin");
770 __mf_register (stdout, sizeof (*stdout), __MF_TYPE_STATIC, "stdout");
771 __mf_register (stderr, sizeof (*stderr), __MF_TYPE_STATIC, "stderr");
772
773 /* Make some effort to register ctype.h static arrays. */
774 /* XXX: e.g., on Solaris, may need to register __ctype, _ctype, __ctype_mask, __toupper, etc. */
775 /* On modern Linux GLIBC, these are thread-specific and changeable, and are dealt
776 with in mf-hooks2.c. */
777 }
778
779 #ifdef PIC
780 return main (argc, argv, environ);
781 #else
782 return __real_main (argc, argv, environ);
783 #endif
784 }
785
786
787
788 extern void __mf_fini () DTOR;
789 void __mf_fini ()
790 {
791 TRACE ("__mf_fini\n");
792 __mfu_report ();
793
794 #ifndef PIC
795 /* Since we didn't populate the tree for allocations in constructors
796 before __mf_init, we cannot check destructors after __mf_fini. */
797 __mf_opts.mudflap_mode = mode_nop;
798 #endif
799 }
800
801
802
803 /* ------------------------------------------------------------------------ */
804 /* __mf_check */
805
806 void __mf_check (void *ptr, size_t sz, int type, const char *location)
807 {
808 LOCKTH ();
809 BEGIN_RECURSION_PROTECT ();
810 __mfu_check (ptr, sz, type, location);
811 END_RECURSION_PROTECT ();
812 UNLOCKTH ();
813 }
814
815
816 void __mfu_check (void *ptr, size_t sz, int type, const char *location)
817 {
818 unsigned entry_idx = __MF_CACHE_INDEX (ptr);
819 struct __mf_cache *entry = & __mf_lookup_cache [entry_idx];
820 int judgement = 0; /* 0=undecided; <0=violation; >0=okay */
821 uintptr_t ptr_low = (uintptr_t) ptr;
822 uintptr_t ptr_high = CLAMPSZ (ptr, sz);
823 struct __mf_cache old_entry = *entry;
824
825 if (UNLIKELY (__mf_opts.sigusr1_report))
826 __mf_sigusr1_respond ();
827 if (UNLIKELY (__mf_opts.ignore_reads && type == 0))
828 return;
829
830 TRACE ("check ptr=%p b=%u size=%lu %s location=`%s'\n",
831 ptr, entry_idx, (unsigned long)sz,
832 (type == 0 ? "read" : "write"), location);
833
834 switch (__mf_opts.mudflap_mode)
835 {
836 case mode_nop:
837 /* It is tempting to poison the cache here similarly to
838 mode_populate. However that eliminates a valuable
839 distinction between these two modes. mode_nop is useful to
840 let a user count & trace every single check / registration
841 call. mode_populate is useful to let a program run fast
842 while unchecked.
843 */
844 judgement = 1;
845 break;
846
847 case mode_populate:
848 entry->low = ptr_low;
849 entry->high = ptr_high;
850 judgement = 1;
851 break;
852
853 case mode_check:
854 {
855 unsigned heuristics = 0;
856
857 /* Advance aging/adaptation counters. */
858 static unsigned adapt_count;
859 adapt_count ++;
860 if (UNLIKELY (__mf_opts.adapt_cache > 0 &&
861 adapt_count > __mf_opts.adapt_cache))
862 {
863 adapt_count = 0;
864 __mf_adapt_cache ();
865 }
866
867 /* Looping only occurs if heuristics were triggered. */
868 while (judgement == 0)
869 {
870 DECLARE (void, free, void *p);
871 __mf_object_t* ovr_obj[1];
872 unsigned obj_count;
873 __mf_object_t** all_ovr_obj = NULL;
874 __mf_object_t** dealloc_me = NULL;
875 unsigned i;
876
877 /* Find all overlapping objects. Be optimistic that there is just one. */
878 obj_count = __mf_find_objects (ptr_low, ptr_high, ovr_obj, 1);
879 if (UNLIKELY (obj_count > 1))
880 {
881 /* Allocate a real buffer and do the search again. */
882 DECLARE (void *, malloc, size_t c);
883 unsigned n;
884 all_ovr_obj = CALL_REAL (malloc, (sizeof (__mf_object_t *) *
885 obj_count));
886 if (all_ovr_obj == NULL) abort ();
887 n = __mf_find_objects (ptr_low, ptr_high, all_ovr_obj, obj_count);
888 assert (n == obj_count);
889 dealloc_me = all_ovr_obj;
890 }
891 else
892 {
893 all_ovr_obj = ovr_obj;
894 dealloc_me = NULL;
895 }
896
897 /* Update object statistics. */
898 for (i = 0; i < obj_count; i++)
899 {
900 __mf_object_t *obj = all_ovr_obj[i];
901 assert (obj != NULL);
902 if (type == __MF_CHECK_READ)
903 obj->read_count ++;
904 else
905 obj->write_count ++;
906 obj->liveness ++;
907 }
908
909 /* Iterate over the various objects. There are a number of special cases. */
910 for (i = 0; i < obj_count; i++)
911 {
912 __mf_object_t *obj = all_ovr_obj[i];
913
914 /* Any __MF_TYPE_NOACCESS hit is bad. */
915 if (UNLIKELY (obj->type == __MF_TYPE_NOACCESS))
916 judgement = -1;
917
918 /* Any object with a watch flag is bad. */
919 if (UNLIKELY (obj->watching_p))
920 judgement = -2; /* trigger VIOL_WATCH */
921
922 /* A read from an uninitialized object is bad. */
923 if (UNLIKELY (__mf_opts.check_initialization
924 /* reading */
925 && type == __MF_CHECK_READ
926 /* not written */
927 && obj->write_count == 0
928 /* uninitialized (heap) */
929 && obj->type == __MF_TYPE_HEAP))
930 judgement = -1;
931 }
932
933 /* We now know that the access spans no invalid objects. */
934 if (LIKELY (judgement >= 0))
935 for (i = 0; i < obj_count; i++)
936 {
937 __mf_object_t *obj = all_ovr_obj[i];
938
939 /* Is this access entirely contained within this object? */
940 if (LIKELY (ptr_low >= obj->low && ptr_high <= obj->high))
941 {
942 /* Valid access. */
943 entry->low = obj->low;
944 entry->high = obj->high;
945 judgement = 1;
946 }
947 }
948
949 /* This access runs off the end of one valid object. That
950 could be okay, if other valid objects fill in all the
951 holes. We allow this only for HEAP and GUESS type
952 objects. Accesses to STATIC and STACK variables
953 should not be allowed to span. */
954 if (UNLIKELY ((judgement == 0) && (obj_count > 1)))
955 {
956 unsigned uncovered = 0;
957 for (i = 0; i < obj_count; i++)
958 {
959 __mf_object_t *obj = all_ovr_obj[i];
960 int j, uncovered_low_p, uncovered_high_p;
961 uintptr_t ptr_lower, ptr_higher;
962
963 uncovered_low_p = ptr_low < obj->low;
964 ptr_lower = CLAMPSUB (obj->low, 1);
965 uncovered_high_p = ptr_high > obj->high;
966 ptr_higher = CLAMPADD (obj->high, 1);
967
968 for (j = 0; j < obj_count; j++)
969 {
970 __mf_object_t *obj2 = all_ovr_obj[j];
971
972 if (i == j) continue;
973
974 /* Filter out objects that cannot be spanned across. */
975 if (obj2->type == __MF_TYPE_STACK
976 || obj2->type == __MF_TYPE_STATIC)
977 continue;
978
979 /* Consider a side "covered" if obj2 includes
980 the next byte on that side. */
981 if (uncovered_low_p
982 && (ptr_lower >= obj2->low && ptr_lower <= obj2->high))
983 uncovered_low_p = 0;
984 if (uncovered_high_p
985 && (ptr_high >= obj2->low && ptr_higher <= obj2->high))
986 uncovered_high_p = 0;
987 }
988
989 if (uncovered_low_p || uncovered_high_p)
990 uncovered ++;
991 }
992
993 /* Success if no overlapping objects are uncovered. */
994 if (uncovered == 0)
995 judgement = 1;
996 }
997
998
999 if (dealloc_me != NULL)
1000 CALL_REAL (free, dealloc_me);
1001
1002 /* If the judgment is still unknown at this stage, loop
1003 around at most one more time. */
1004 if (judgement == 0)
1005 {
1006 if (heuristics++ < 2) /* XXX parametrize this number? */
1007 judgement = __mf_heuristic_check (ptr_low, ptr_high);
1008 else
1009 judgement = -1;
1010 }
1011 }
1012
1013 }
1014 break;
1015
1016 case mode_violate:
1017 judgement = -1;
1018 break;
1019 }
1020
1021 if (__mf_opts.collect_stats)
1022 {
1023 __mf_count_check ++;
1024
1025 if (LIKELY (old_entry.low != entry->low || old_entry.high != entry->high))
1026 /* && (old_entry.low != 0) && (old_entry.high != 0)) */
1027 __mf_lookup_cache_reusecount [entry_idx] ++;
1028 }
1029
1030 if (UNLIKELY (judgement < 0))
1031 __mf_violation (ptr, sz,
1032 (uintptr_t) __builtin_return_address (0), location,
1033 ((judgement == -1) ?
1034 (type == __MF_CHECK_READ ? __MF_VIOL_READ : __MF_VIOL_WRITE) :
1035 __MF_VIOL_WATCH));
1036 }
1037
1038
1039 static __mf_object_t *
1040 __mf_insert_new_object (uintptr_t low, uintptr_t high, int type,
1041 const char *name, uintptr_t pc)
1042 {
1043 DECLARE (void *, calloc, size_t c, size_t n);
1044
1045 __mf_object_t *new_obj;
1046 new_obj = CALL_REAL (calloc, 1, sizeof(__mf_object_t));
1047 new_obj->low = low;
1048 new_obj->high = high;
1049 new_obj->type = type;
1050 new_obj->name = name;
1051 new_obj->alloc_pc = pc;
1052 #if HAVE_GETTIMEOFDAY
1053 if (__mf_opts.timestamps)
1054 gettimeofday (& new_obj->alloc_time, NULL);
1055 #endif
1056 #if LIBMUDFLAPTH
1057 new_obj->alloc_thread = pthread_self ();
1058 #endif
1059
1060 if (__mf_opts.backtrace > 0 && (type == __MF_TYPE_HEAP || type == __MF_TYPE_HEAP_I))
1061 new_obj->alloc_backtrace_size =
1062 __mf_backtrace (& new_obj->alloc_backtrace,
1063 (void *) pc, 2);
1064
1065 __mf_link_object (new_obj);
1066 return new_obj;
1067 }
1068
1069
1070 static void
1071 __mf_uncache_object (__mf_object_t *old_obj)
1072 {
1073 /* Remove any low/high pointers for this object from the lookup cache. */
1074
1075 /* Can it possibly exist in the cache? */
1076 if (LIKELY (old_obj->read_count + old_obj->write_count))
1077 {
1078 uintptr_t low = old_obj->low;
1079 uintptr_t high = old_obj->high;
1080 struct __mf_cache *entry;
1081 unsigned i;
1082 if ((high - low) >= (__mf_lc_mask << __mf_lc_shift))
1083 {
1084 /* For large objects (>= cache size - 1) check the whole cache. */
1085 entry = & __mf_lookup_cache [0];
1086 for (i = 0; i <= __mf_lc_mask; i++, entry++)
1087 {
1088 /* NB: the "||" in the following test permits this code to
1089 tolerate the situation introduced by __mf_check over
1090 contiguous objects, where a cache entry spans several
1091 objects. */
1092 if (entry->low == low || entry->high == high)
1093 {
1094 entry->low = MAXPTR;
1095 entry->high = MINPTR;
1096 }
1097 }
1098 }
1099 else
1100 {
1101 /* Object is now smaller then cache size. */
1102 unsigned entry_low_idx = __MF_CACHE_INDEX (low);
1103 unsigned entry_high_idx = __MF_CACHE_INDEX (high);
1104 if (entry_low_idx <= entry_high_idx)
1105 {
1106 entry = & __mf_lookup_cache [entry_low_idx];
1107 for (i = entry_low_idx; i <= entry_high_idx; i++, entry++)
1108 {
1109 /* NB: the "||" in the following test permits this code to
1110 tolerate the situation introduced by __mf_check over
1111 contiguous objects, where a cache entry spans several
1112 objects. */
1113 if (entry->low == low || entry->high == high)
1114 {
1115 entry->low = MAXPTR;
1116 entry->high = MINPTR;
1117 }
1118 }
1119 }
1120 else
1121 {
1122 /* Object wrapped around the end of the cache. First search
1123 from low to end of cache and then from 0 to high. */
1124 entry = & __mf_lookup_cache [entry_low_idx];
1125 for (i = entry_low_idx; i <= __mf_lc_mask; i++, entry++)
1126 {
1127 /* NB: the "||" in the following test permits this code to
1128 tolerate the situation introduced by __mf_check over
1129 contiguous objects, where a cache entry spans several
1130 objects. */
1131 if (entry->low == low || entry->high == high)
1132 {
1133 entry->low = MAXPTR;
1134 entry->high = MINPTR;
1135 }
1136 }
1137 entry = & __mf_lookup_cache [0];
1138 for (i = 0; i <= entry_high_idx; i++, entry++)
1139 {
1140 /* NB: the "||" in the following test permits this code to
1141 tolerate the situation introduced by __mf_check over
1142 contiguous objects, where a cache entry spans several
1143 objects. */
1144 if (entry->low == low || entry->high == high)
1145 {
1146 entry->low = MAXPTR;
1147 entry->high = MINPTR;
1148 }
1149 }
1150 }
1151 }
1152 }
1153 }
1154
1155
1156 void
1157 __mf_register (void *ptr, size_t sz, int type, const char *name)
1158 {
1159 LOCKTH ();
1160 BEGIN_RECURSION_PROTECT ();
1161 __mfu_register (ptr, sz, type, name);
1162 END_RECURSION_PROTECT ();
1163 UNLOCKTH ();
1164 }
1165
1166
1167 void
1168 __mfu_register (void *ptr, size_t sz, int type, const char *name)
1169 {
1170 TRACE ("register ptr=%p size=%lu type=%x name='%s'\n",
1171 ptr, (unsigned long) sz, type, name ? name : "");
1172
1173 if (__mf_opts.collect_stats)
1174 {
1175 __mf_count_register ++;
1176 __mf_total_register_size [(type < 0) ? 0 :
1177 (type > __MF_TYPE_MAX) ? 0 :
1178 type] += sz;
1179 }
1180
1181 if (UNLIKELY (__mf_opts.sigusr1_report))
1182 __mf_sigusr1_respond ();
1183
1184 switch (__mf_opts.mudflap_mode)
1185 {
1186 case mode_nop:
1187 break;
1188
1189 case mode_violate:
1190 __mf_violation (ptr, sz, (uintptr_t) __builtin_return_address (0), NULL,
1191 __MF_VIOL_REGISTER);
1192 break;
1193
1194 case mode_populate:
1195 /* Clear the cache. */
1196 /* XXX: why the entire cache? */
1197 /* XXX: race */
1198 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
1199 /* void slot 0 */
1200 __mf_lookup_cache[0].low = MAXPTR;
1201 break;
1202
1203 case mode_check:
1204 {
1205 __mf_object_t *ovr_objs [1];
1206 unsigned num_overlapping_objs;
1207 uintptr_t low = (uintptr_t) ptr;
1208 uintptr_t high = CLAMPSZ (ptr, sz);
1209 uintptr_t pc = (uintptr_t) __builtin_return_address (0);
1210
1211 /* Treat unknown size indication as 1. */
1212 if (UNLIKELY (sz == 0)) sz = 1;
1213
1214 /* Look for objects only of the same type. This will e.g. permit a registration
1215 of a STATIC overlapping with a GUESS, and a HEAP with a NOACCESS. At
1216 __mf_check time however harmful overlaps will be detected. */
1217 num_overlapping_objs = __mf_find_objects2 (low, high, ovr_objs, 1, type);
1218
1219 /* Handle overlaps. */
1220 if (UNLIKELY (num_overlapping_objs > 0))
1221 {
1222 __mf_object_t *ovr_obj = ovr_objs[0];
1223
1224 /* Accept certain specific duplication pairs. */
1225 if (((type == __MF_TYPE_STATIC) || (type == __MF_TYPE_GUESS))
1226 && ovr_obj->low == low
1227 && ovr_obj->high == high
1228 && ovr_obj->type == type)
1229 {
1230 /* Duplicate registration for static objects may come
1231 from distinct compilation units. */
1232 VERBOSE_TRACE ("harmless duplicate reg %p-%p `%s'\n",
1233 (void *) low, (void *) high,
1234 (ovr_obj->name ? ovr_obj->name : ""));
1235 break;
1236 }
1237
1238 /* Alas, a genuine violation. */
1239 else
1240 {
1241 /* Two or more *real* mappings here. */
1242 __mf_violation ((void *) ptr, sz,
1243 (uintptr_t) __builtin_return_address (0), NULL,
1244 __MF_VIOL_REGISTER);
1245 }
1246 }
1247 else /* No overlapping objects: AOK. */
1248 __mf_insert_new_object (low, high, type, name, pc);
1249
1250 /* We could conceivably call __mf_check() here to prime the cache,
1251 but then the read_count/write_count field is not reliable. */
1252 break;
1253 }
1254 } /* end switch (__mf_opts.mudflap_mode) */
1255 }
1256
1257
1258 void
1259 __mf_unregister (void *ptr, size_t sz, int type)
1260 {
1261 LOCKTH ();
1262 BEGIN_RECURSION_PROTECT ();
1263 __mfu_unregister (ptr, sz, type);
1264 END_RECURSION_PROTECT ();
1265 UNLOCKTH ();
1266 }
1267
1268
1269 void
1270 __mfu_unregister (void *ptr, size_t sz, int type)
1271 {
1272 DECLARE (void, free, void *ptr);
1273
1274 if (UNLIKELY (__mf_opts.sigusr1_report))
1275 __mf_sigusr1_respond ();
1276
1277 TRACE ("unregister ptr=%p size=%lu type=%x\n", ptr, (unsigned long) sz, type);
1278
1279 switch (__mf_opts.mudflap_mode)
1280 {
1281 case mode_nop:
1282 break;
1283
1284 case mode_violate:
1285 __mf_violation (ptr, sz,
1286 (uintptr_t) __builtin_return_address (0), NULL,
1287 __MF_VIOL_UNREGISTER);
1288 break;
1289
1290 case mode_populate:
1291 /* Clear the cache. */
1292 /* XXX: race */
1293 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
1294 /* void slot 0 */
1295 __mf_lookup_cache[0].low = MAXPTR;
1296 break;
1297
1298 case mode_check:
1299 {
1300 __mf_object_t *old_obj = NULL;
1301 __mf_object_t *del_obj = NULL; /* Object to actually delete. */
1302 __mf_object_t *objs[1] = {NULL};
1303 unsigned num_overlapping_objs;
1304
1305 num_overlapping_objs = __mf_find_objects2 ((uintptr_t) ptr,
1306 CLAMPSZ (ptr, sz), objs, 1, type);
1307
1308 /* Special case for HEAP_I - see free & realloc hook. They don't
1309 know whether the input region was HEAP or HEAP_I before
1310 unmapping it. Here we give HEAP a try in case HEAP_I
1311 failed. */
1312 if ((type == __MF_TYPE_HEAP_I) && (num_overlapping_objs == 0))
1313 {
1314 num_overlapping_objs = __mf_find_objects2 ((uintptr_t) ptr,
1315 CLAMPSZ (ptr, sz), objs, 1, __MF_TYPE_HEAP);
1316 }
1317
1318 old_obj = objs[0];
1319 if (UNLIKELY ((num_overlapping_objs != 1) /* more than one overlap */
1320 || ((sz == 0) ? 0 : (sz != (old_obj->high - old_obj->low + 1))) /* size mismatch */
1321 || ((uintptr_t) ptr != old_obj->low))) /* base mismatch */
1322 {
1323 __mf_violation (ptr, sz,
1324 (uintptr_t) __builtin_return_address (0), NULL,
1325 __MF_VIOL_UNREGISTER);
1326 break;
1327 }
1328
1329 __mf_unlink_object (old_obj);
1330 __mf_uncache_object (old_obj);
1331
1332 /* Wipe buffer contents if desired. */
1333 if ((__mf_opts.wipe_stack && old_obj->type == __MF_TYPE_STACK)
1334 || (__mf_opts.wipe_heap && (old_obj->type == __MF_TYPE_HEAP
1335 || old_obj->type == __MF_TYPE_HEAP_I)))
1336 {
1337 memset ((void *) old_obj->low,
1338 0,
1339 (size_t) (old_obj->high - old_obj->low + 1));
1340 }
1341
1342 /* Manage the object cemetary. */
1343 if (__mf_opts.persistent_count > 0
1344 && (unsigned) old_obj->type <= __MF_TYPE_MAX_CEM)
1345 {
1346 old_obj->deallocated_p = 1;
1347 old_obj->dealloc_pc = (uintptr_t) __builtin_return_address (0);
1348 #if HAVE_GETTIMEOFDAY
1349 if (__mf_opts.timestamps)
1350 gettimeofday (& old_obj->dealloc_time, NULL);
1351 #endif
1352 #ifdef LIBMUDFLAPTH
1353 old_obj->dealloc_thread = pthread_self ();
1354 #endif
1355
1356 if (__mf_opts.backtrace > 0 && old_obj->type == __MF_TYPE_HEAP)
1357 old_obj->dealloc_backtrace_size =
1358 __mf_backtrace (& old_obj->dealloc_backtrace,
1359 NULL, 2);
1360
1361 /* Encourage this object to be displayed again in current epoch. */
1362 old_obj->description_epoch --;
1363
1364 /* Put this object into the cemetary. This may require this plot to
1365 be recycled, and the previous resident to be designated del_obj. */
1366 {
1367 unsigned row = old_obj->type;
1368 unsigned plot = __mf_object_dead_head [row];
1369
1370 del_obj = __mf_object_cemetary [row][plot];
1371 __mf_object_cemetary [row][plot] = old_obj;
1372 plot ++;
1373 if (plot == __mf_opts.persistent_count) plot = 0;
1374 __mf_object_dead_head [row] = plot;
1375 }
1376 }
1377 else
1378 del_obj = old_obj;
1379
1380 if (__mf_opts.print_leaks)
1381 {
1382 if ((old_obj->read_count + old_obj->write_count) == 0 &&
1383 (old_obj->type == __MF_TYPE_HEAP
1384 || old_obj->type == __MF_TYPE_HEAP_I))
1385 {
1386 /* The problem with a warning message here is that we may not
1387 be privy to accesses to such objects that occur within
1388 uninstrumented libraries. */
1389 #if 0
1390 fprintf (stderr,
1391 "*******\n"
1392 "mudflap warning: unaccessed registered object:\n");
1393 __mf_describe_object (old_obj);
1394 #endif
1395 }
1396 }
1397
1398 if (del_obj != NULL) /* May or may not equal old_obj. */
1399 {
1400 if (__mf_opts.backtrace > 0)
1401 {
1402 CALL_REAL(free, del_obj->alloc_backtrace);
1403 if (__mf_opts.persistent_count > 0)
1404 {
1405 CALL_REAL(free, del_obj->dealloc_backtrace);
1406 }
1407 }
1408 CALL_REAL(free, del_obj);
1409 }
1410
1411 break;
1412 }
1413 } /* end switch (__mf_opts.mudflap_mode) */
1414
1415
1416 if (__mf_opts.collect_stats)
1417 {
1418 __mf_count_unregister ++;
1419 __mf_total_unregister_size += sz;
1420 }
1421 }
1422
1423
1424
1425 struct tree_stats
1426 {
1427 unsigned obj_count;
1428 unsigned long total_size;
1429 unsigned live_obj_count;
1430 double total_weight;
1431 double weighted_size;
1432 unsigned long weighted_address_bits [sizeof (uintptr_t) * 8][2];
1433 };
1434
1435
1436
1437 static int
1438 __mf_adapt_cache_fn (mfsplay_tree_node n, void *param)
1439 {
1440 __mf_object_t *obj = (__mf_object_t *) n->value;
1441 struct tree_stats *s = (struct tree_stats *) param;
1442
1443 assert (obj != NULL && s != NULL);
1444
1445 /* Exclude never-accessed objects. */
1446 if (obj->read_count + obj->write_count)
1447 {
1448 s->obj_count ++;
1449 s->total_size += (obj->high - obj->low + 1);
1450
1451 if (obj->liveness)
1452 {
1453 unsigned i;
1454 uintptr_t addr;
1455
1456 /* VERBOSE_TRACE ("analyze low=%p live=%u name=`%s'\n",
1457 (void *) obj->low, obj->liveness, obj->name); */
1458
1459 s->live_obj_count ++;
1460 s->total_weight += (double) obj->liveness;
1461 s->weighted_size +=
1462 (double) (obj->high - obj->low + 1) *
1463 (double) obj->liveness;
1464
1465 addr = obj->low;
1466 for (i=0; i<sizeof(uintptr_t) * 8; i++)
1467 {
1468 unsigned bit = addr & 1;
1469 s->weighted_address_bits[i][bit] += obj->liveness;
1470 addr = addr >> 1;
1471 }
1472
1473 /* Age the liveness value. */
1474 obj->liveness >>= 1;
1475 }
1476 }
1477
1478 return 0;
1479 }
1480
1481
1482 static void
1483 __mf_adapt_cache ()
1484 {
1485 struct tree_stats s;
1486 uintptr_t new_mask = 0;
1487 unsigned char new_shift;
1488 float cache_utilization;
1489 float max_value;
1490 static float smoothed_new_shift = -1.0;
1491 unsigned i;
1492
1493 memset (&s, 0, sizeof (s));
1494
1495 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP), __mf_adapt_cache_fn, (void *) & s);
1496 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP_I), __mf_adapt_cache_fn, (void *) & s);
1497 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_STACK), __mf_adapt_cache_fn, (void *) & s);
1498 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_STATIC), __mf_adapt_cache_fn, (void *) & s);
1499 mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_GUESS), __mf_adapt_cache_fn, (void *) & s);
1500
1501 /* Maybe we're dealing with funny aging/adaptation parameters, or an
1502 empty tree. Just leave the cache alone in such cases, rather
1503 than risk dying by division-by-zero. */
1504 if (! (s.obj_count > 0) && (s.live_obj_count > 0) && (s.total_weight > 0.0))
1505 return;
1506
1507 /* Guess a good value for the shift parameter by finding an address bit that is a
1508 good discriminant of lively objects. */
1509 max_value = 0.0;
1510 for (i=0; i<sizeof (uintptr_t)*8; i++)
1511 {
1512 float value = (float) s.weighted_address_bits[i][0] * (float) s.weighted_address_bits[i][1];
1513 if (max_value < value) max_value = value;
1514 }
1515 for (i=0; i<sizeof (uintptr_t)*8; i++)
1516 {
1517 float shoulder_factor = 0.7; /* Include slightly less popular bits too. */
1518 float value = (float) s.weighted_address_bits[i][0] * (float) s.weighted_address_bits[i][1];
1519 if (value >= max_value * shoulder_factor)
1520 break;
1521 }
1522 if (smoothed_new_shift < 0) smoothed_new_shift = __mf_lc_shift;
1523 /* Converge toward this slowly to reduce flapping. */
1524 smoothed_new_shift = 0.9*smoothed_new_shift + 0.1*i;
1525 new_shift = (unsigned) (smoothed_new_shift + 0.5);
1526 assert (new_shift < sizeof (uintptr_t)*8);
1527
1528 /* Count number of used buckets. */
1529 cache_utilization = 0.0;
1530 for (i = 0; i < (1 + __mf_lc_mask); i++)
1531 if (__mf_lookup_cache[i].low != 0 || __mf_lookup_cache[i].high != 0)
1532 cache_utilization += 1.0;
1533 cache_utilization /= (1 + __mf_lc_mask);
1534
1535 new_mask |= 0xffff; /* XXX: force a large cache. */
1536 new_mask &= (LOOKUP_CACHE_SIZE_MAX - 1);
1537
1538 VERBOSE_TRACE ("adapt cache obj=%u/%u sizes=%lu/%.0f/%.0f => "
1539 "util=%u%% m=%p s=%u\n",
1540 s.obj_count, s.live_obj_count, s.total_size, s.total_weight, s.weighted_size,
1541 (unsigned)(cache_utilization*100.0), (void *) new_mask, new_shift);
1542
1543 /* We should reinitialize cache if its parameters have changed. */
1544 if (new_mask != __mf_lc_mask ||
1545 new_shift != __mf_lc_shift)
1546 {
1547 __mf_lc_mask = new_mask;
1548 __mf_lc_shift = new_shift;
1549 /* XXX: race */
1550 memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
1551 /* void slot 0 */
1552 __mf_lookup_cache[0].low = MAXPTR;
1553 }
1554 }
1555
1556
1557
1558 /* __mf_find_object[s] */
1559
1560 /* Find overlapping live objecs between [low,high]. Return up to
1561 max_objs of their pointers in objs[]. Return total count of
1562 overlaps (may exceed max_objs). */
1563
1564 unsigned
1565 __mf_find_objects2 (uintptr_t ptr_low, uintptr_t ptr_high,
1566 __mf_object_t **objs, unsigned max_objs, int type)
1567 {
1568 unsigned count = 0;
1569 mfsplay_tree t = __mf_object_tree (type);
1570 mfsplay_tree_key k = (mfsplay_tree_key) ptr_low;
1571 int direction;
1572
1573 mfsplay_tree_node n = mfsplay_tree_lookup (t, k);
1574 /* An exact match for base address implies a hit. */
1575 if (n != NULL)
1576 {
1577 if (count < max_objs)
1578 objs[count] = (__mf_object_t *) n->value;
1579 count ++;
1580 }
1581
1582 /* Iterate left then right near this key value to find all overlapping objects. */
1583 for (direction = 0; direction < 2; direction ++)
1584 {
1585 /* Reset search origin. */
1586 k = (mfsplay_tree_key) ptr_low;
1587
1588 while (1)
1589 {
1590 __mf_object_t *obj;
1591
1592 n = (direction == 0 ? mfsplay_tree_successor (t, k) : mfsplay_tree_predecessor (t, k));
1593 if (n == NULL) break;
1594 obj = (__mf_object_t *) n->value;
1595
1596 if (! (obj->low <= ptr_high && obj->high >= ptr_low)) /* No overlap? */
1597 break;
1598
1599 if (count < max_objs)
1600 objs[count] = (__mf_object_t *) n->value;
1601 count ++;
1602
1603 k = (mfsplay_tree_key) obj->low;
1604 }
1605 }
1606
1607 return count;
1608 }
1609
1610
1611 unsigned
1612 __mf_find_objects (uintptr_t ptr_low, uintptr_t ptr_high,
1613 __mf_object_t **objs, unsigned max_objs)
1614 {
1615 int type;
1616 unsigned count = 0;
1617
1618 /* Search each splay tree for overlaps. */
1619 for (type = __MF_TYPE_NOACCESS; type <= __MF_TYPE_GUESS; type++)
1620 {
1621 unsigned c = __mf_find_objects2 (ptr_low, ptr_high, objs, max_objs, type);
1622 if (c > max_objs)
1623 {
1624 max_objs = 0;
1625 objs = NULL;
1626 }
1627 else /* NB: C may equal 0 */
1628 {
1629 max_objs -= c;
1630 objs += c;
1631 }
1632 count += c;
1633 }
1634
1635 return count;
1636 }
1637
1638
1639
1640 /* __mf_link_object */
1641
1642 static void
1643 __mf_link_object (__mf_object_t *node)
1644 {
1645 mfsplay_tree t = __mf_object_tree (node->type);
1646 mfsplay_tree_insert (t, (mfsplay_tree_key) node->low, (mfsplay_tree_value) node);
1647 }
1648
1649 /* __mf_unlink_object */
1650
1651 static void
1652 __mf_unlink_object (__mf_object_t *node)
1653 {
1654 mfsplay_tree t = __mf_object_tree (node->type);
1655 mfsplay_tree_remove (t, (mfsplay_tree_key) node->low);
1656 }
1657
1658 /* __mf_find_dead_objects */
1659
1660 /* Find overlapping dead objecs between [low,high]. Return up to
1661 max_objs of their pointers in objs[]. Return total count of
1662 overlaps (may exceed max_objs). */
1663
1664 static unsigned
1665 __mf_find_dead_objects (uintptr_t low, uintptr_t high,
1666 __mf_object_t **objs, unsigned max_objs)
1667 {
1668 if (__mf_opts.persistent_count > 0)
1669 {
1670 unsigned count = 0;
1671 unsigned recollection = 0;
1672 unsigned row = 0;
1673
1674 assert (low <= high);
1675 assert (max_objs == 0 || objs != NULL);
1676
1677 /* Widen the search from the most recent plots in each row, looking
1678 backward in time. */
1679 recollection = 0;
1680 while (recollection < __mf_opts.persistent_count)
1681 {
1682 count = 0;
1683
1684 for (row = 0; row <= __MF_TYPE_MAX_CEM; row ++)
1685 {
1686 unsigned plot;
1687 unsigned i;
1688
1689 plot = __mf_object_dead_head [row];
1690 for (i = 0; i <= recollection; i ++)
1691 {
1692 __mf_object_t *obj;
1693
1694 /* Look backward through row: it's a circular buffer. */
1695 if (plot > 0) plot --;
1696 else plot = __mf_opts.persistent_count - 1;
1697
1698 obj = __mf_object_cemetary [row][plot];
1699 if (obj && obj->low <= high && obj->high >= low)
1700 {
1701 /* Found an overlapping dead object! */
1702 if (count < max_objs)
1703 objs [count] = obj;
1704 count ++;
1705 }
1706 }
1707 }
1708
1709 if (count)
1710 break;
1711
1712 /* Look farther back in time. */
1713 recollection = (recollection * 2) + 1;
1714 }
1715
1716 return count;
1717 } else {
1718 return 0;
1719 }
1720 }
1721
1722 /* __mf_describe_object */
1723
1724 static void
1725 __mf_describe_object (__mf_object_t *obj)
1726 {
1727 static unsigned epoch = 0;
1728 if (obj == NULL)
1729 {
1730 epoch ++;
1731 return;
1732 }
1733
1734 if (__mf_opts.abbreviate && obj->description_epoch == epoch)
1735 {
1736 fprintf (stderr,
1737 "mudflap %sobject %p: name=`%s'\n",
1738 (obj->deallocated_p ? "dead " : ""),
1739 (void *) obj, (obj->name ? obj->name : ""));
1740 return;
1741 }
1742 else
1743 obj->description_epoch = epoch;
1744
1745 fprintf (stderr,
1746 "mudflap %sobject %p: name=`%s'\n"
1747 "bounds=[%p,%p] size=%lu area=%s check=%ur/%uw liveness=%u%s\n"
1748 "alloc time=%lu.%06lu pc=%p"
1749 #ifdef LIBMUDFLAPTH
1750 " thread=%u"
1751 #endif
1752 "\n",
1753 (obj->deallocated_p ? "dead " : ""),
1754 (void *) obj, (obj->name ? obj->name : ""),
1755 (void *) obj->low, (void *) obj->high,
1756 (unsigned long) (obj->high - obj->low + 1),
1757 (obj->type == __MF_TYPE_NOACCESS ? "no-access" :
1758 obj->type == __MF_TYPE_HEAP ? "heap" :
1759 obj->type == __MF_TYPE_HEAP_I ? "heap-init" :
1760 obj->type == __MF_TYPE_STACK ? "stack" :
1761 obj->type == __MF_TYPE_STATIC ? "static" :
1762 obj->type == __MF_TYPE_GUESS ? "guess" :
1763 "unknown"),
1764 obj->read_count, obj->write_count, obj->liveness,
1765 obj->watching_p ? " watching" : "",
1766 obj->alloc_time.tv_sec, obj->alloc_time.tv_usec,
1767 (void *) obj->alloc_pc
1768 #ifdef LIBMUDFLAPTH
1769 , (unsigned) obj->alloc_thread
1770 #endif
1771 );
1772
1773 if (__mf_opts.backtrace > 0)
1774 {
1775 unsigned i;
1776 for (i=0; i<obj->alloc_backtrace_size; i++)
1777 fprintf (stderr, " %s\n", obj->alloc_backtrace[i]);
1778 }
1779
1780 if (__mf_opts.persistent_count > 0)
1781 {
1782 if (obj->deallocated_p)
1783 {
1784 fprintf (stderr, "dealloc time=%lu.%06lu pc=%p"
1785 #ifdef LIBMUDFLAPTH
1786 " thread=%u"
1787 #endif
1788 "\n",
1789 obj->dealloc_time.tv_sec, obj->dealloc_time.tv_usec,
1790 (void *) obj->dealloc_pc
1791 #ifdef LIBMUDFLAPTH
1792 , (unsigned) obj->dealloc_thread
1793 #endif
1794 );
1795
1796
1797 if (__mf_opts.backtrace > 0)
1798 {
1799 unsigned i;
1800 for (i=0; i<obj->dealloc_backtrace_size; i++)
1801 fprintf (stderr, " %s\n", obj->dealloc_backtrace[i]);
1802 }
1803 }
1804 }
1805 }
1806
1807
1808 static int
1809 __mf_report_leaks_fn (mfsplay_tree_node n, void *param)
1810 {
1811 __mf_object_t *node = (__mf_object_t *) n->value;
1812 unsigned *count = (unsigned *) param;
1813
1814 if (count != NULL)
1815 (*count) ++;
1816
1817 fprintf (stderr, "Leaked object %u:\n", (*count));
1818 __mf_describe_object (node);
1819
1820 return 0;
1821 }
1822
1823
1824 static unsigned
1825 __mf_report_leaks ()
1826 {
1827 unsigned count = 0;
1828
1829 (void) mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP),
1830 __mf_report_leaks_fn, & count);
1831 (void) mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP_I),
1832 __mf_report_leaks_fn, & count);
1833
1834 return count;
1835 }
1836
1837 /* ------------------------------------------------------------------------ */
1838 /* __mf_report */
1839
1840 void
1841 __mf_report ()
1842 {
1843 LOCKTH ();
1844 BEGIN_RECURSION_PROTECT ();
1845 __mfu_report ();
1846 END_RECURSION_PROTECT ();
1847 UNLOCKTH ();
1848 }
1849
1850 void
1851 __mfu_report ()
1852 {
1853 if (__mf_opts.collect_stats)
1854 {
1855 fprintf (stderr,
1856 "*******\n"
1857 "mudflap stats:\n"
1858 "calls to __mf_check: %lu\n"
1859 " __mf_register: %lu [%luB, %luB, %luB, %luB, %luB]\n"
1860 " __mf_unregister: %lu [%luB]\n"
1861 " __mf_violation: [%lu, %lu, %lu, %lu, %lu]\n",
1862 __mf_count_check,
1863 __mf_count_register,
1864 __mf_total_register_size[0], __mf_total_register_size[1],
1865 __mf_total_register_size[2], __mf_total_register_size[3],
1866 __mf_total_register_size[4], /* XXX */
1867 __mf_count_unregister, __mf_total_unregister_size,
1868 __mf_count_violation[0], __mf_count_violation[1],
1869 __mf_count_violation[2], __mf_count_violation[3],
1870 __mf_count_violation[4]);
1871
1872 fprintf (stderr,
1873 "calls with reentrancy: %lu\n", __mf_reentrancy);
1874 #ifdef LIBMUDFLAPTH
1875 fprintf (stderr,
1876 " lock contention: %lu\n", __mf_lock_contention);
1877 #endif
1878
1879 /* Lookup cache stats. */
1880 {
1881 unsigned i;
1882 unsigned max_reuse = 0;
1883 unsigned num_used = 0;
1884 unsigned num_unused = 0;
1885
1886 for (i = 0; i < LOOKUP_CACHE_SIZE; i++)
1887 {
1888 if (__mf_lookup_cache_reusecount[i])
1889 num_used ++;
1890 else
1891 num_unused ++;
1892 if (max_reuse < __mf_lookup_cache_reusecount[i])
1893 max_reuse = __mf_lookup_cache_reusecount[i];
1894 }
1895 fprintf (stderr, "lookup cache slots used: %u unused: %u peak-reuse: %u\n",
1896 num_used, num_unused, max_reuse);
1897 }
1898
1899 {
1900 unsigned live_count;
1901 live_count = __mf_find_objects (MINPTR, MAXPTR, NULL, 0);
1902 fprintf (stderr, "number of live objects: %u\n", live_count);
1903 }
1904
1905 if (__mf_opts.persistent_count > 0)
1906 {
1907 unsigned dead_count = 0;
1908 unsigned row, plot;
1909 for (row = 0; row <= __MF_TYPE_MAX_CEM; row ++)
1910 for (plot = 0 ; plot < __mf_opts.persistent_count; plot ++)
1911 if (__mf_object_cemetary [row][plot] != 0)
1912 dead_count ++;
1913 fprintf (stderr, " zombie objects: %u\n", dead_count);
1914 }
1915 }
1916 if (__mf_opts.print_leaks && (__mf_opts.mudflap_mode == mode_check))
1917 {
1918 unsigned l;
1919 extern void * __mf_wrap_alloca_indirect (size_t c);
1920
1921 /* Free up any remaining alloca()'d blocks. */
1922 __mf_wrap_alloca_indirect (0);
1923 #ifdef HAVE___LIBC_FREERES
1924 if (__mf_opts.call_libc_freeres)
1925 {
1926 extern void __libc_freeres (void);
1927 __libc_freeres ();
1928 }
1929 #endif
1930
1931 __mf_describe_object (NULL); /* Reset description epoch. */
1932 l = __mf_report_leaks ();
1933 fprintf (stderr, "number of leaked objects: %u\n", l);
1934 }
1935 }
1936
1937 /* __mf_backtrace */
1938
1939 size_t
1940 __mf_backtrace (char ***symbols, void *guess_pc, unsigned guess_omit_levels)
1941 {
1942 void ** pc_array;
1943 unsigned pc_array_size = __mf_opts.backtrace + guess_omit_levels;
1944 unsigned remaining_size;
1945 unsigned omitted_size = 0;
1946 unsigned i;
1947 DECLARE (void, free, void *ptr);
1948 DECLARE (void *, calloc, size_t c, size_t n);
1949 DECLARE (void *, malloc, size_t n);
1950
1951 pc_array = CALL_REAL (calloc, pc_array_size, sizeof (void *) );
1952 #ifdef HAVE_BACKTRACE
1953 pc_array_size = backtrace (pc_array, pc_array_size);
1954 #else
1955 #define FETCH(n) do { if (pc_array_size >= n) { \
1956 pc_array[n] = __builtin_return_address(n); \
1957 if (pc_array[n] == 0) pc_array_size = n; } } while (0)
1958
1959 /* Unroll some calls __builtin_return_address because this function
1960 only takes a literal integer parameter. */
1961 FETCH (0);
1962 #if 0
1963 /* XXX: __builtin_return_address sometimes crashes (!) on >0 arguments,
1964 rather than simply returning 0. :-( */
1965 FETCH (1);
1966 FETCH (2);
1967 FETCH (3);
1968 FETCH (4);
1969 FETCH (5);
1970 FETCH (6);
1971 FETCH (7);
1972 FETCH (8);
1973 if (pc_array_size > 8) pc_array_size = 9;
1974 #else
1975 if (pc_array_size > 0) pc_array_size = 1;
1976 #endif
1977
1978 #undef FETCH
1979 #endif
1980
1981 /* We want to trim the first few levels of the stack traceback,
1982 since they contain libmudflap wrappers and junk. If pc_array[]
1983 ends up containing a non-NULL guess_pc, then trim everything
1984 before that. Otherwise, omit the first guess_omit_levels
1985 entries. */
1986
1987 if (guess_pc != NULL)
1988 for (i=0; i<pc_array_size; i++)
1989 if (pc_array [i] == guess_pc)
1990 omitted_size = i;
1991
1992 if (omitted_size == 0) /* No match? */
1993 if (pc_array_size > guess_omit_levels)
1994 omitted_size = guess_omit_levels;
1995
1996 remaining_size = pc_array_size - omitted_size;
1997
1998 #ifdef HAVE_BACKTRACE_SYMBOLS
1999 *symbols = backtrace_symbols (pc_array + omitted_size, remaining_size);
2000 #else
2001 {
2002 /* Let's construct a buffer by hand. It will have <remaining_size>
2003 char*'s at the front, pointing at individual strings immediately
2004 afterwards. */
2005 void *buffer;
2006 char *chars;
2007 char **pointers;
2008 enum { perline = 30 };
2009 buffer = CALL_REAL (malloc, remaining_size * (perline + sizeof(char *)));
2010 pointers = (char **) buffer;
2011 chars = (char *)buffer + (remaining_size * sizeof (char *));
2012 for (i = 0; i < remaining_size; i++)
2013 {
2014 pointers[i] = chars;
2015 sprintf (chars, "[0x%p]", pc_array [omitted_size + i]);
2016 chars = chars + perline;
2017 }
2018 *symbols = pointers;
2019 }
2020 #endif
2021 CALL_REAL (free, pc_array);
2022
2023 return remaining_size;
2024 }
2025
2026 /* ------------------------------------------------------------------------ */
2027 /* __mf_violation */
2028
2029 void
2030 __mf_violation (void *ptr, size_t sz, uintptr_t pc,
2031 const char *location, int type)
2032 {
2033 char buf [128];
2034 static unsigned violation_number;
2035 DECLARE(void, free, void *ptr);
2036
2037 TRACE ("violation pc=%p location=%s type=%d ptr=%p size=%lu\n",
2038 (void *) pc,
2039 (location != NULL ? location : ""), type, ptr, (unsigned long) sz);
2040
2041 if (__mf_opts.collect_stats)
2042 __mf_count_violation [(type < 0) ? 0 :
2043 (type > __MF_VIOL_WATCH) ? 0 :
2044 type] ++;
2045
2046 /* Print out a basic warning message. */
2047 if (__mf_opts.verbose_violations)
2048 {
2049 unsigned dead_p;
2050 unsigned num_helpful = 0;
2051 struct timeval now = { 0, 0 };
2052 #if HAVE_GETTIMEOFDAY
2053 gettimeofday (& now, NULL);
2054 #endif
2055
2056 violation_number ++;
2057 fprintf (stderr,
2058 "*******\n"
2059 "mudflap violation %u (%s): time=%lu.%06lu "
2060 "ptr=%p size=%lu\npc=%p%s%s%s\n",
2061 violation_number,
2062 ((type == __MF_VIOL_READ) ? "check/read" :
2063 (type == __MF_VIOL_WRITE) ? "check/write" :
2064 (type == __MF_VIOL_REGISTER) ? "register" :
2065 (type == __MF_VIOL_UNREGISTER) ? "unregister" :
2066 (type == __MF_VIOL_WATCH) ? "watch" : "unknown"),
2067 now.tv_sec, now.tv_usec,
2068 (void *) ptr, (unsigned long)sz, (void *) pc,
2069 (location != NULL ? " location=`" : ""),
2070 (location != NULL ? location : ""),
2071 (location != NULL ? "'" : ""));
2072
2073 if (__mf_opts.backtrace > 0)
2074 {
2075 char ** symbols;
2076 unsigned i, num;
2077
2078 num = __mf_backtrace (& symbols, (void *) pc, 2);
2079 /* Note: backtrace_symbols calls malloc(). But since we're in
2080 __mf_violation and presumably __mf_check, it'll detect
2081 recursion, and not put the new string into the database. */
2082
2083 for (i=0; i<num; i++)
2084 fprintf (stderr, " %s\n", symbols[i]);
2085
2086 /* Calling free() here would trigger a violation. */
2087 CALL_REAL(free, symbols);
2088 }
2089
2090
2091 /* Look for nearby objects. For this, we start with s_low/s_high
2092 pointing to the given area, looking for overlapping objects.
2093 If none show up, widen the search area and keep looking. */
2094
2095 if (sz == 0) sz = 1;
2096
2097 for (dead_p = 0; dead_p <= 1; dead_p ++) /* for dead_p in 0 1 */
2098 {
2099 enum {max_objs = 3}; /* magic */
2100 __mf_object_t *objs[max_objs];
2101 unsigned num_objs = 0;
2102 uintptr_t s_low, s_high;
2103 unsigned tries = 0;
2104 unsigned i;
2105
2106 s_low = (uintptr_t) ptr;
2107 s_high = CLAMPSZ (ptr, sz);
2108
2109 while (tries < 16) /* magic */
2110 {
2111 if (dead_p)
2112 num_objs = __mf_find_dead_objects (s_low, s_high, objs, max_objs);
2113 else
2114 num_objs = __mf_find_objects (s_low, s_high, objs, max_objs);
2115
2116 if (num_objs) /* good enough */
2117 break;
2118
2119 tries ++;
2120
2121 /* XXX: tune this search strategy. It's too dependent on
2122 sz, which can vary from 1 to very big (when array index
2123 checking) numbers. */
2124 s_low = CLAMPSUB (s_low, (sz * tries * tries));
2125 s_high = CLAMPADD (s_high, (sz * tries * tries));
2126 }
2127
2128 for (i = 0; i < min (num_objs, max_objs); i++)
2129 {
2130 __mf_object_t *obj = objs[i];
2131 uintptr_t low = (uintptr_t) ptr;
2132 uintptr_t high = CLAMPSZ (ptr, sz);
2133 unsigned before1 = (low < obj->low) ? obj->low - low : 0;
2134 unsigned after1 = (low > obj->high) ? low - obj->high : 0;
2135 unsigned into1 = (high >= obj->low && low <= obj->high) ? low - obj->low : 0;
2136 unsigned before2 = (high < obj->low) ? obj->low - high : 0;
2137 unsigned after2 = (high > obj->high) ? high - obj->high : 0;
2138 unsigned into2 = (high >= obj->low && low <= obj->high) ? high - obj->low : 0;
2139
2140 fprintf (stderr, "Nearby object %u: checked region begins %uB %s and ends %uB %s\n",
2141 num_helpful + i + 1,
2142 (before1 ? before1 : after1 ? after1 : into1),
2143 (before1 ? "before" : after1 ? "after" : "into"),
2144 (before2 ? before2 : after2 ? after2 : into2),
2145 (before2 ? "before" : after2 ? "after" : "into"));
2146 __mf_describe_object (obj);
2147 }
2148 num_helpful += num_objs;
2149 }
2150
2151 fprintf (stderr, "number of nearby objects: %u\n", num_helpful);
2152 }
2153
2154 /* How to finally handle this violation? */
2155 switch (__mf_opts.violation_mode)
2156 {
2157 case viol_nop:
2158 break;
2159 case viol_segv:
2160 kill (getpid(), SIGSEGV);
2161 break;
2162 case viol_abort:
2163 abort ();
2164 break;
2165 case viol_gdb:
2166
2167 snprintf (buf, 128, "gdb --pid=%u", (unsigned) getpid ());
2168 system (buf);
2169 /* XXX: should probably fork() && sleep(GDB_WAIT_PARAMETER)
2170 instead, and let the forked child execlp() gdb. That way, this
2171 subject process can be resumed under the supervision of gdb.
2172 This can't happen now, since system() only returns when gdb
2173 dies. In that case, we need to beware of starting a second
2174 concurrent gdb child upon the next violation. (But if the first
2175 gdb dies, then starting a new one is appropriate.) */
2176 break;
2177 }
2178 }
2179
2180 /* ------------------------------------------------------------------------ */
2181
2182
2183 unsigned __mf_watch (void *ptr, size_t sz)
2184 {
2185 unsigned rc;
2186 LOCKTH ();
2187 BEGIN_RECURSION_PROTECT ();
2188 rc = __mf_watch_or_not (ptr, sz, 1);
2189 END_RECURSION_PROTECT ();
2190 UNLOCKTH ();
2191 return rc;
2192 }
2193
2194 unsigned __mf_unwatch (void *ptr, size_t sz)
2195 {
2196 unsigned rc;
2197 LOCKTH ();
2198 rc = __mf_watch_or_not (ptr, sz, 0);
2199 UNLOCKTH ();
2200 return rc;
2201 }
2202
2203
2204 static unsigned
2205 __mf_watch_or_not (void *ptr, size_t sz, char flag)
2206 {
2207 uintptr_t ptr_high = CLAMPSZ (ptr, sz);
2208 uintptr_t ptr_low = (uintptr_t) ptr;
2209 unsigned count = 0;
2210
2211 TRACE ("%s ptr=%p size=%lu\n",
2212 (flag ? "watch" : "unwatch"), ptr, (unsigned long) sz);
2213
2214 switch (__mf_opts.mudflap_mode)
2215 {
2216 case mode_nop:
2217 case mode_populate:
2218 case mode_violate:
2219 count = 0;
2220 break;
2221
2222 case mode_check:
2223 {
2224 __mf_object_t **all_ovr_objs;
2225 unsigned obj_count;
2226 unsigned n;
2227 DECLARE (void *, malloc, size_t c);
2228 DECLARE (void, free, void *p);
2229
2230 obj_count = __mf_find_objects (ptr_low, ptr_high, NULL, 0);
2231 VERBOSE_TRACE (" %u:", obj_count);
2232
2233 all_ovr_objs = CALL_REAL (malloc, (sizeof (__mf_object_t *) * obj_count));
2234 if (all_ovr_objs == NULL) abort ();
2235 n = __mf_find_objects (ptr_low, ptr_high, all_ovr_objs, obj_count);
2236 assert (n == obj_count);
2237
2238 for (n = 0; n < obj_count; n ++)
2239 {
2240 __mf_object_t *obj = all_ovr_objs[n];
2241
2242 VERBOSE_TRACE (" [%p]", (void *) obj);
2243 if (obj->watching_p != flag)
2244 {
2245 obj->watching_p = flag;
2246 count ++;
2247
2248 /* Remove object from cache, to ensure next access
2249 goes through __mf_check(). */
2250 if (flag)
2251 __mf_uncache_object (obj);
2252 }
2253 }
2254 CALL_REAL (free, all_ovr_objs);
2255 }
2256 break;
2257 }
2258
2259 return count;
2260 }
2261
2262
2263 void
2264 __mf_sigusr1_handler (int num)
2265 {
2266 __mf_sigusr1_received ++;
2267 }
2268
2269 /* Install or remove SIGUSR1 handler as necessary.
2270 Also, respond to a received pending SIGUSR1. */
2271 void
2272 __mf_sigusr1_respond ()
2273 {
2274 static int handler_installed;
2275
2276 #ifdef SIGUSR1
2277 /* Manage handler */
2278 if (__mf_opts.sigusr1_report && ! handler_installed)
2279 {
2280 signal (SIGUSR1, __mf_sigusr1_handler);
2281 handler_installed = 1;
2282 }
2283 else if(! __mf_opts.sigusr1_report && handler_installed)
2284 {
2285 signal (SIGUSR1, SIG_DFL);
2286 handler_installed = 0;
2287 }
2288 #endif
2289
2290 /* Manage enqueued signals */
2291 if (__mf_sigusr1_received > __mf_sigusr1_handled)
2292 {
2293 __mf_sigusr1_handled ++;
2294 assert (__mf_get_state () == reentrant);
2295 __mfu_report ();
2296 handler_installed = 0; /* We may need to re-enable signal; this might be a SysV library. */
2297 }
2298 }
2299
2300
2301 /* XXX: provide an alternative __assert_fail function that cannot
2302 fail due to libmudflap infinite recursion. */
2303 #ifndef NDEBUG
2304
2305 static void
2306 write_itoa (int fd, unsigned n)
2307 {
2308 enum x { bufsize = sizeof(n)*4 };
2309 char buf [bufsize];
2310 unsigned i;
2311
2312 for (i=0; i<bufsize-1; i++)
2313 {
2314 unsigned digit = n % 10;
2315 buf[bufsize-2-i] = digit + '0';
2316 n /= 10;
2317 if (n == 0)
2318 {
2319 char *m = & buf [bufsize-2-i];
2320 buf[bufsize-1] = '\0';
2321 write (fd, m, strlen(m));
2322 break;
2323 }
2324 }
2325 }
2326
2327
2328 void
2329 __assert_fail (const char *msg, const char *file, unsigned line, const char *func)
2330 {
2331 #define write2(string) write (2, (string), strlen ((string)));
2332 write2("mf");
2333 #ifdef LIBMUDFLAPTH
2334 write2("(");
2335 write_itoa (2, (unsigned) pthread_self ());
2336 write2(")");
2337 #endif
2338 write2(": assertion failure: `");
2339 write (2, msg, strlen (msg));
2340 write2("' in ");
2341 write (2, func, strlen (func));
2342 write2(" at ");
2343 write (2, file, strlen (file));
2344 write2(":");
2345 write_itoa (2, line);
2346 write2("\n");
2347 #undef write2
2348 abort ();
2349 }
2350
2351
2352 #endif
2353
2354
2355
2356 /* Adapted splay tree code, originally from libiberty. It has been
2357 specialized for libmudflap as requested by RMS. */
2358
2359 static void
2360 mfsplay_tree_free (void *p)
2361 {
2362 DECLARE (void, free, void *p);
2363 CALL_REAL (free, p);
2364 }
2365
2366 static void *
2367 mfsplay_tree_xmalloc (size_t s)
2368 {
2369 DECLARE (void *, malloc, size_t s);
2370 return CALL_REAL (malloc, s);
2371 }
2372
2373
2374 static void mfsplay_tree_splay (mfsplay_tree, mfsplay_tree_key);
2375 static mfsplay_tree_node mfsplay_tree_splay_helper (mfsplay_tree,
2376 mfsplay_tree_key,
2377 mfsplay_tree_node *,
2378 mfsplay_tree_node *,
2379 mfsplay_tree_node *);
2380
2381
2382 /* Help splay SP around KEY. PARENT and GRANDPARENT are the parent
2383 and grandparent, respectively, of NODE. */
2384
2385 static mfsplay_tree_node
2386 mfsplay_tree_splay_helper (mfsplay_tree sp,
2387 mfsplay_tree_key key,
2388 mfsplay_tree_node * node,
2389 mfsplay_tree_node * parent,
2390 mfsplay_tree_node * grandparent)
2391 {
2392 mfsplay_tree_node *next;
2393 mfsplay_tree_node n;
2394 int comparison;
2395
2396 n = *node;
2397
2398 if (!n)
2399 return *parent;
2400
2401 comparison = ((key > n->key) ? 1 : ((key < n->key) ? -1 : 0));
2402
2403 if (comparison == 0)
2404 /* We've found the target. */
2405 next = 0;
2406 else if (comparison < 0)
2407 /* The target is to the left. */
2408 next = &n->left;
2409 else
2410 /* The target is to the right. */
2411 next = &n->right;
2412
2413 if (next)
2414 {
2415 /* Check whether our recursion depth is too high. Abort this search,
2416 and signal that a rebalance is required to continue. */
2417 if (sp->depth > sp->max_depth)
2418 {
2419 sp->rebalance_p = 1;
2420 return n;
2421 }
2422
2423 /* Continue down the tree. */
2424 sp->depth ++;
2425 n = mfsplay_tree_splay_helper (sp, key, next, node, parent);
2426 sp->depth --;
2427
2428 /* The recursive call will change the place to which NODE
2429 points. */
2430 if (*node != n || sp->rebalance_p)
2431 return n;
2432 }
2433
2434 if (!parent)
2435 /* NODE is the root. We are done. */
2436 return n;
2437
2438 /* First, handle the case where there is no grandparent (i.e.,
2439 *PARENT is the root of the tree.) */
2440 if (!grandparent)
2441 {
2442 if (n == (*parent)->left)
2443 {
2444 *node = n->right;
2445 n->right = *parent;
2446 }
2447 else
2448 {
2449 *node = n->left;
2450 n->left = *parent;
2451 }
2452 *parent = n;
2453 return n;
2454 }
2455
2456 /* Next handle the cases where both N and *PARENT are left children,
2457 or where both are right children. */
2458 if (n == (*parent)->left && *parent == (*grandparent)->left)
2459 {
2460 mfsplay_tree_node p = *parent;
2461
2462 (*grandparent)->left = p->right;
2463 p->right = *grandparent;
2464 p->left = n->right;
2465 n->right = p;
2466 *grandparent = n;
2467 return n;
2468 }
2469 else if (n == (*parent)->right && *parent == (*grandparent)->right)
2470 {
2471 mfsplay_tree_node p = *parent;
2472
2473 (*grandparent)->right = p->left;
2474 p->left = *grandparent;
2475 p->right = n->left;
2476 n->left = p;
2477 *grandparent = n;
2478 return n;
2479 }
2480
2481 /* Finally, deal with the case where N is a left child, but *PARENT
2482 is a right child, or vice versa. */
2483 if (n == (*parent)->left)
2484 {
2485 (*parent)->left = n->right;
2486 n->right = *parent;
2487 (*grandparent)->right = n->left;
2488 n->left = *grandparent;
2489 *grandparent = n;
2490 return n;
2491 }
2492 else
2493 {
2494 (*parent)->right = n->left;
2495 n->left = *parent;
2496 (*grandparent)->left = n->right;
2497 n->right = *grandparent;
2498 *grandparent = n;
2499 return n;
2500 }
2501 }
2502
2503
2504
2505 static int
2506 mfsplay_tree_rebalance_helper1 (mfsplay_tree_node n, void *array_ptr)
2507 {
2508 mfsplay_tree_node **p = array_ptr;
2509 *(*p) = n;
2510 (*p)++;
2511 return 0;
2512 }
2513
2514
2515 static mfsplay_tree_node
2516 mfsplay_tree_rebalance_helper2 (mfsplay_tree_node * array, unsigned low,
2517 unsigned high)
2518 {
2519 unsigned middle = low + (high - low) / 2;
2520 mfsplay_tree_node n = array[middle];
2521
2522 /* Note that since we're producing a balanced binary tree, it is not a problem
2523 that this function is recursive. */
2524 if (low + 1 <= middle)
2525 n->left = mfsplay_tree_rebalance_helper2 (array, low, middle - 1);
2526 else
2527 n->left = NULL;
2528
2529 if (middle + 1 <= high)
2530 n->right = mfsplay_tree_rebalance_helper2 (array, middle + 1, high);
2531 else
2532 n->right = NULL;
2533
2534 return n;
2535 }
2536
2537
2538 /* Rebalance the entire tree. Do this by copying all the node
2539 pointers into an array, then cleverly re-linking them. */
2540 static void
2541 mfsplay_tree_rebalance (mfsplay_tree sp)
2542 {
2543 mfsplay_tree_node *all_nodes, *all_nodes_1;
2544
2545 if (sp->num_keys <= 2)
2546 return;
2547
2548 all_nodes = mfsplay_tree_xmalloc (sizeof (mfsplay_tree_node) * sp->num_keys);
2549
2550 /* Traverse all nodes to copy their addresses into this array. */
2551 all_nodes_1 = all_nodes;
2552 mfsplay_tree_foreach (sp, mfsplay_tree_rebalance_helper1,
2553 (void *) &all_nodes_1);
2554
2555 /* Relink all the nodes. */
2556 sp->root = mfsplay_tree_rebalance_helper2 (all_nodes, 0, sp->num_keys - 1);
2557
2558 mfsplay_tree_free (all_nodes);
2559 }
2560
2561
2562 /* Splay SP around KEY. */
2563 static void
2564 mfsplay_tree_splay (mfsplay_tree sp, mfsplay_tree_key key)
2565 {
2566 if (sp->root == 0)
2567 return;
2568
2569 /* If we just splayed the tree with the same key, do nothing. */
2570 if (sp->last_splayed_key_p &&
2571 (sp->last_splayed_key == key))
2572 return;
2573
2574 /* Compute a maximum recursion depth for a splay tree with NUM nodes.
2575 The idea is to limit excessive stack usage if we're facing
2576 degenerate access patterns. Unfortunately such patterns can occur
2577 e.g. during static initialization, where many static objects might
2578 be registered in increasing address sequence, or during a case where
2579 large tree-like heap data structures are allocated quickly.
2580
2581 On x86, this corresponds to roughly 200K of stack usage.
2582 XXX: For libmudflapth, this could be a function of __mf_opts.thread_stack. */
2583 sp->max_depth = 2500;
2584 sp->rebalance_p = sp->depth = 0;
2585
2586 mfsplay_tree_splay_helper (sp, key, &sp->root, NULL, NULL);
2587 if (sp->rebalance_p)
2588 {
2589 mfsplay_tree_rebalance (sp);
2590
2591 sp->rebalance_p = sp->depth = 0;
2592 mfsplay_tree_splay_helper (sp, key, &sp->root, NULL, NULL);
2593
2594 if (sp->rebalance_p)
2595 abort ();
2596 }
2597
2598
2599 /* Cache this splay key. */
2600 sp->last_splayed_key = key;
2601 sp->last_splayed_key_p = 1;
2602 }
2603
2604
2605
2606 /* Allocate a new splay tree. */
2607 static mfsplay_tree
2608 mfsplay_tree_new ()
2609 {
2610 mfsplay_tree sp = mfsplay_tree_xmalloc (sizeof (struct mfsplay_tree_s));
2611 sp->root = NULL;
2612 sp->last_splayed_key_p = 0;
2613 sp->num_keys = 0;
2614
2615 return sp;
2616 }
2617
2618
2619
2620 /* Insert a new node (associating KEY with DATA) into SP. If a
2621 previous node with the indicated KEY exists, its data is replaced
2622 with the new value. Returns the new node. */
2623 static mfsplay_tree_node
2624 mfsplay_tree_insert (mfsplay_tree sp, mfsplay_tree_key key, mfsplay_tree_value value)
2625 {
2626 int comparison = 0;
2627
2628 mfsplay_tree_splay (sp, key);
2629
2630 if (sp->root)
2631 comparison = ((sp->root->key > key) ? 1 :
2632 ((sp->root->key < key) ? -1 : 0));
2633
2634 if (sp->root && comparison == 0)
2635 {
2636 /* If the root of the tree already has the indicated KEY, just
2637 replace the value with VALUE. */
2638 sp->root->value = value;
2639 }
2640 else
2641 {
2642 /* Create a new node, and insert it at the root. */
2643 mfsplay_tree_node node;
2644
2645 node = mfsplay_tree_xmalloc (sizeof (struct mfsplay_tree_node_s));
2646 node->key = key;
2647 node->value = value;
2648 sp->num_keys++;
2649 if (!sp->root)
2650 node->left = node->right = 0;
2651 else if (comparison < 0)
2652 {
2653 node->left = sp->root;
2654 node->right = node->left->right;
2655 node->left->right = 0;
2656 }
2657 else
2658 {
2659 node->right = sp->root;
2660 node->left = node->right->left;
2661 node->right->left = 0;
2662 }
2663
2664 sp->root = node;
2665 sp->last_splayed_key_p = 0;
2666 }
2667
2668 return sp->root;
2669 }
2670
2671 /* Remove KEY from SP. It is not an error if it did not exist. */
2672
2673 static void
2674 mfsplay_tree_remove (mfsplay_tree sp, mfsplay_tree_key key)
2675 {
2676 mfsplay_tree_splay (sp, key);
2677 sp->last_splayed_key_p = 0;
2678 if (sp->root && (sp->root->key == key))
2679 {
2680 mfsplay_tree_node left, right;
2681 left = sp->root->left;
2682 right = sp->root->right;
2683 /* Delete the root node itself. */
2684 mfsplay_tree_free (sp->root);
2685 sp->num_keys--;
2686 /* One of the children is now the root. Doesn't matter much
2687 which, so long as we preserve the properties of the tree. */
2688 if (left)
2689 {
2690 sp->root = left;
2691 /* If there was a right child as well, hang it off the
2692 right-most leaf of the left child. */
2693 if (right)
2694 {
2695 while (left->right)
2696 left = left->right;
2697 left->right = right;
2698 }
2699 }
2700 else
2701 sp->root = right;
2702 }
2703 }
2704
2705 /* Lookup KEY in SP, returning VALUE if present, and NULL
2706 otherwise. */
2707
2708 static mfsplay_tree_node
2709 mfsplay_tree_lookup (mfsplay_tree sp, mfsplay_tree_key key)
2710 {
2711 mfsplay_tree_splay (sp, key);
2712 if (sp->root && (sp->root->key == key))
2713 return sp->root;
2714 else
2715 return 0;
2716 }
2717
2718
2719 /* Return the immediate predecessor KEY, or NULL if there is no
2720 predecessor. KEY need not be present in the tree. */
2721
2722 static mfsplay_tree_node
2723 mfsplay_tree_predecessor (mfsplay_tree sp, mfsplay_tree_key key)
2724 {
2725 int comparison;
2726 mfsplay_tree_node node;
2727 /* If the tree is empty, there is certainly no predecessor. */
2728 if (!sp->root)
2729 return NULL;
2730 /* Splay the tree around KEY. That will leave either the KEY
2731 itself, its predecessor, or its successor at the root. */
2732 mfsplay_tree_splay (sp, key);
2733 comparison = ((sp->root->key > key) ? 1 :
2734 ((sp->root->key < key) ? -1 : 0));
2735
2736 /* If the predecessor is at the root, just return it. */
2737 if (comparison < 0)
2738 return sp->root;
2739 /* Otherwise, find the rightmost element of the left subtree. */
2740 node = sp->root->left;
2741 if (node)
2742 while (node->right)
2743 node = node->right;
2744 return node;
2745 }
2746
2747 /* Return the immediate successor KEY, or NULL if there is no
2748 successor. KEY need not be present in the tree. */
2749
2750 static mfsplay_tree_node
2751 mfsplay_tree_successor (mfsplay_tree sp, mfsplay_tree_key key)
2752 {
2753 int comparison;
2754 mfsplay_tree_node node;
2755 /* If the tree is empty, there is certainly no successor. */
2756 if (!sp->root)
2757 return NULL;
2758 /* Splay the tree around KEY. That will leave either the KEY
2759 itself, its predecessor, or its successor at the root. */
2760 mfsplay_tree_splay (sp, key);
2761 comparison = ((sp->root->key > key) ? 1 :
2762 ((sp->root->key < key) ? -1 : 0));
2763 /* If the successor is at the root, just return it. */
2764 if (comparison > 0)
2765 return sp->root;
2766 /* Otherwise, find the leftmost element of the right subtree. */
2767 node = sp->root->right;
2768 if (node)
2769 while (node->left)
2770 node = node->left;
2771 return node;
2772 }
2773
2774 /* Call FN, passing it the DATA, for every node in SP, following an
2775 in-order traversal. If FN every returns a non-zero value, the
2776 iteration ceases immediately, and the value is returned.
2777 Otherwise, this function returns 0.
2778
2779 This function simulates recursion using dynamically allocated
2780 arrays, since it may be called from mfsplay_tree_rebalance(), which
2781 in turn means that the tree is already uncomfortably deep for stack
2782 space limits. */
2783 static int
2784 mfsplay_tree_foreach (mfsplay_tree st, mfsplay_tree_foreach_fn fn, void *data)
2785 {
2786 mfsplay_tree_node *stack1;
2787 char *stack2;
2788 unsigned sp;
2789 int val = 0;
2790 enum s { s_left, s_here, s_right, s_up };
2791
2792 if (st->root == NULL) /* => num_keys == 0 */
2793 return 0;
2794
2795 stack1 = mfsplay_tree_xmalloc (sizeof (mfsplay_tree_node) * st->num_keys);
2796 stack2 = mfsplay_tree_xmalloc (sizeof (char) * st->num_keys);
2797
2798 sp = 0;
2799 stack1 [sp] = st->root;
2800 stack2 [sp] = s_left;
2801
2802 while (1)
2803 {
2804 mfsplay_tree_node n;
2805 enum s s;
2806
2807 n = stack1 [sp];
2808 s = stack2 [sp];
2809
2810 /* Handle each of the four possible states separately. */
2811
2812 /* 1: We're here to traverse the left subtree (if any). */
2813 if (s == s_left)
2814 {
2815 stack2 [sp] = s_here;
2816 if (n->left != NULL)
2817 {
2818 sp ++;
2819 stack1 [sp] = n->left;
2820 stack2 [sp] = s_left;
2821 }
2822 }
2823
2824 /* 2: We're here to traverse this node. */
2825 else if (s == s_here)
2826 {
2827 stack2 [sp] = s_right;
2828 val = (*fn) (n, data);
2829 if (val) break;
2830 }
2831
2832 /* 3: We're here to traverse the right subtree (if any). */
2833 else if (s == s_right)
2834 {
2835 stack2 [sp] = s_up;
2836 if (n->right != NULL)
2837 {
2838 sp ++;
2839 stack1 [sp] = n->right;
2840 stack2 [sp] = s_left;
2841 }
2842 }
2843
2844 /* 4: We're here after both subtrees (if any) have been traversed. */
2845 else if (s == s_up)
2846 {
2847 /* Pop the stack. */
2848 if (sp == 0) break; /* Popping off the root note: we're finished! */
2849 sp --;
2850 }
2851
2852 else
2853 abort ();
2854 }
2855
2856 mfsplay_tree_free (stack1);
2857 mfsplay_tree_free (stack2);
2858 return val;
2859 }