diff gcc/ggc-page.c @ 111:04ced10e8804

gcc 7
author kono
date Fri, 27 Oct 2017 22:46:09 +0900
parents f6334be47118
children 84e7813d76e9
line wrap: on
line diff
--- a/gcc/ggc-page.c	Sun Aug 21 07:07:55 2011 +0900
+++ b/gcc/ggc-page.c	Fri Oct 27 22:46:09 2017 +0900
@@ -1,6 +1,5 @@
 /* "Bag-of-pages" garbage collector for the GNU compiler.
-   Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2007, 2008, 2009,
-   2010 Free Software Foundation, Inc.
+   Copyright (C) 1999-2017 Free Software Foundation, Inc.
 
 This file is part of GCC.
 
@@ -21,17 +20,18 @@
 #include "config.h"
 #include "system.h"
 #include "coretypes.h"
-#include "tm.h"
+#include "backend.h"
+#include "alias.h"
 #include "tree.h"
 #include "rtl.h"
+#include "memmodel.h"
 #include "tm_p.h"
 #include "diagnostic-core.h"
 #include "flags.h"
-#include "ggc.h"
 #include "ggc-internal.h"
 #include "timevar.h"
 #include "params.h"
-#include "tree-flow.h"
+#include "cgraph.h"
 #include "cfgloop.h"
 #include "plugin.h"
 
@@ -50,6 +50,11 @@
 #define USING_MALLOC_PAGE_GROUPS
 #endif
 
+#if defined(HAVE_MADVISE) && HAVE_DECL_MADVISE && defined(MADV_DONTNEED) \
+    && defined(USING_MMAP)
+# define USING_MADVISE
+#endif
+
 /* Strategy:
 
    This garbage-collecting allocator allocates objects on one of a set
@@ -116,14 +121,14 @@
 
 #define PAGE_L1_BITS	(8)
 #define PAGE_L2_BITS	(32 - PAGE_L1_BITS - G.lg_pagesize)
-#define PAGE_L1_SIZE	((size_t) 1 << PAGE_L1_BITS)
-#define PAGE_L2_SIZE	((size_t) 1 << PAGE_L2_BITS)
+#define PAGE_L1_SIZE	((uintptr_t) 1 << PAGE_L1_BITS)
+#define PAGE_L2_SIZE	((uintptr_t) 1 << PAGE_L2_BITS)
 
 #define LOOKUP_L1(p) \
-  (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
+  (((uintptr_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
 
 #define LOOKUP_L2(p) \
-  (((size_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
+  (((uintptr_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
 
 /* The number of objects per allocation page, for objects on a page of
    the indicated ORDER.  */
@@ -152,7 +157,7 @@
 struct max_alignment {
   char c;
   union {
-    HOST_WIDEST_INT i;
+    int64_t i;
     void *p;
   } u;
 };
@@ -196,7 +201,7 @@
   sizeof (struct tree_field_decl),
   sizeof (struct tree_parm_decl),
   sizeof (struct tree_var_decl),
-  sizeof (struct tree_type),
+  sizeof (struct tree_type_non_common),
   sizeof (struct function),
   sizeof (struct basic_block_def),
   sizeof (struct cgraph_node),
@@ -212,9 +217,9 @@
 
 #define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f))
 
-/* Compute the smallest multiple of F that is >= X.  */
-
-#define ROUND_UP(x, f) (CEIL (x, f) * (f))
+/* Round X to next multiple of the page size */
+
+#define PAGE_ALIGN(x) ROUND_UP ((x), G.pagesize)
 
 /* The Ith entry is the number of objects on a page or order I.  */
 
@@ -237,7 +242,7 @@
 
 /* A page_entry records the status of an allocation page.  This
    structure is dynamically sized to fit the bitmap in_use_p.  */
-typedef struct page_entry
+struct page_entry
 {
   /* The next page-entry with objects of the same size, or NULL if
      this is the last page-entry.  */
@@ -277,16 +282,19 @@
   /* The lg of size of objects allocated from this page.  */
   unsigned char order;
 
+  /* Discarded page? */
+  bool discarded;
+
   /* A bit vector indicating whether or not objects are in use.  The
      Nth bit is one if the Nth object on this page is allocated.  This
      array is dynamically sized.  */
   unsigned long in_use_p[1];
-} page_entry;
+};
 
 #ifdef USING_MALLOC_PAGE_GROUPS
 /* A page_group describes a large allocation from malloc, from which
    we parcel out aligned pages.  */
-typedef struct page_group
+struct page_group
 {
   /* A linked list of all extant page groups.  */
   struct page_group *next;
@@ -299,7 +307,7 @@
 
   /* A bitmask of pages in use.  */
   unsigned int in_use;
-} page_group;
+};
 #endif
 
 #if HOST_BITS_PER_PTR <= 32
@@ -321,6 +329,41 @@
 
 #endif
 
+class finalizer
+{
+public:
+  finalizer (void *addr, void (*f)(void *)) : m_addr (addr), m_function (f) {}
+
+  void *addr () const { return m_addr; }
+
+  void call () const { m_function (m_addr); }
+
+private:
+  void *m_addr;
+  void (*m_function)(void *);
+};
+
+class vec_finalizer
+{
+public:
+  vec_finalizer (uintptr_t addr, void (*f)(void *), size_t s, size_t n) :
+    m_addr (addr), m_function (f), m_object_size (s), m_n_objects (n) {}
+
+  void call () const
+    {
+      for (size_t i = 0; i < m_n_objects; i++)
+	m_function (reinterpret_cast<void *> (m_addr + (i * m_object_size)));
+    }
+
+  void *addr () const { return reinterpret_cast<void *> (m_addr); }
+
+private:
+  uintptr_t m_addr;
+  void (*m_function)(void *);
+  size_t m_object_size;
+  size_t m_n_objects;
+};
+
 #ifdef ENABLE_GC_ALWAYS_COLLECT
 /* List of free objects to be verified as actually free on the
    next collection.  */
@@ -332,7 +375,7 @@
 #endif
 
 /* The rest of the global variables.  */
-static struct globals
+static struct ggc_globals
 {
   /* The Nth element in this array is a page with objects of size 2^N.
      If there are any pages with free objects, they will be at the
@@ -414,13 +457,18 @@
      better runtime data access pattern.  */
   unsigned long **save_in_use;
 
+  /* Finalizers for single objects.  The first index is collection_depth.  */
+  vec<vec<finalizer> > finalizers;
+
+  /* Finalizers for vectors of objects.  */
+  vec<vec<vec_finalizer> > vec_finalizers;
+
 #ifdef ENABLE_GC_ALWAYS_COLLECT
   /* List of free objects to be verified as actually free on the
      next collection.  */
   struct free_object *free_object_list;
 #endif
 
-#ifdef GATHER_STATISTICS
   struct
   {
     /* Total GC-allocated memory.  */
@@ -447,13 +495,16 @@
     /* The overhead for each of the allocation orders.  */
     unsigned long long total_overhead_per_order[NUM_ORDERS];
   } stats;
-#endif
 } G;
 
+/* True if a gc is currently taking place.  */
+
+static bool in_gc = false;
+
 /* The size in bytes required to maintain a bitmap for the objects
    on a page-entry.  */
 #define BITMAP_SIZE(Num_objects) \
-  (CEIL ((Num_objects), HOST_BITS_PER_LONG) * sizeof(long))
+  (CEIL ((Num_objects), HOST_BITS_PER_LONG) * sizeof (long))
 
 /* Allocate pages in chunks of this size, to throttle calls to memory
    allocation routines.  The first page is used, the rest go onto the
@@ -462,7 +513,7 @@
    can override this by defining GGC_QUIRE_SIZE explicitly.  */
 #ifndef GGC_QUIRE_SIZE
 # ifdef USING_MMAP
-#  define GGC_QUIRE_SIZE 256
+#  define GGC_QUIRE_SIZE 512	/* 2MB for 4K pages */
 # else
 #  define GGC_QUIRE_SIZE 16
 # endif
@@ -471,11 +522,10 @@
 /* Initial guess as to how many page table entries we might need.  */
 #define INITIAL_PTE_COUNT 128
 
-static int ggc_allocated_p (const void *);
 static page_entry *lookup_page_table_entry (const void *);
 static void set_page_table_entry (void *, page_entry *);
 #ifdef USING_MMAP
-static char *alloc_anon (char *, size_t);
+static char *alloc_anon (char *, size_t, bool check);
 #endif
 #ifdef USING_MALLOC_PAGE_GROUPS
 static size_t page_group_index (char *, char *);
@@ -536,10 +586,11 @@
 #define save_in_use_p(__p) \
   (save_in_use_p_i (__p->index_by_depth))
 
-/* Returns nonzero if P was allocated in GC'able memory.  */
-
-static inline int
-ggc_allocated_p (const void *p)
+/* Traverse the page table and find the entry for a page.
+   If the object wasn't allocated in GC return NULL.  */
+
+static inline page_entry *
+safe_lookup_page_table_entry (const void *p)
 {
   page_entry ***base;
   size_t L1, L2;
@@ -548,11 +599,11 @@
   base = &G.lookup[0];
 #else
   page_table table = G.lookup;
-  size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
+  uintptr_t high_bits = (uintptr_t) p & ~ (uintptr_t) 0xffffffff;
   while (1)
     {
       if (table == NULL)
-	return 0;
+	return NULL;
       if (table->high_bits == high_bits)
 	break;
       table = table->next;
@@ -563,8 +614,10 @@
   /* Extract the level 1 and 2 indices.  */
   L1 = LOOKUP_L1 (p);
   L2 = LOOKUP_L2 (p);
-
-  return base[L1] && base[L1][L2];
+  if (! base[L1])
+    return NULL;
+
+  return base[L1][L2];
 }
 
 /* Traverse the page table and find the entry for a page.
@@ -580,7 +633,7 @@
   base = &G.lookup[0];
 #else
   page_table table = G.lookup;
-  size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
+  uintptr_t high_bits = (uintptr_t) p & ~ (uintptr_t) 0xffffffff;
   while (table->high_bits != high_bits)
     table = table->next;
   base = &table->table[0];
@@ -605,7 +658,7 @@
   base = &G.lookup[0];
 #else
   page_table table;
-  size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
+  uintptr_t high_bits = (uintptr_t) p & ~ (uintptr_t) 0xffffffff;
   for (table = G.lookup; table; table = table->next)
     if (table->high_bits == high_bits)
       goto found;
@@ -654,7 +707,7 @@
    compile error unless exactly one of the HAVE_* is defined.  */
 
 static inline char *
-alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size)
+alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size, bool check)
 {
 #ifdef HAVE_MMAP_ANON
   char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
@@ -667,6 +720,8 @@
 
   if (page == (char *) MAP_FAILED)
     {
+      if (!check)
+        return NULL;
       perror ("virtual memory exhausted");
       exit (FATAL_EXIT_CODE);
     }
@@ -729,6 +784,7 @@
   entry_size = num_objects * OBJECT_SIZE (order);
   if (entry_size < G.pagesize)
     entry_size = G.pagesize;
+  entry_size = PAGE_ALIGN (entry_size);
 
   entry = NULL;
   page = NULL;
@@ -740,6 +796,10 @@
 
   if (p != NULL)
     {
+      if (p->discarded)
+        G.bytes_mapped += p->bytes;
+      p->discarded = false;
+
       /* Recycle the allocated memory from this page ...  */
       *pp = p->next;
       page = p->page;
@@ -764,13 +824,18 @@
 	 extras on the freelist.  (Can only do this optimization with
 	 mmap for backing store.)  */
       struct page_entry *e, *f = G.free_pages;
-      int i;
-
-      page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE);
+      int i, entries = GGC_QUIRE_SIZE;
+
+      page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE, false);
+      if (page == NULL)
+     	{
+	  page = alloc_anon (NULL, G.pagesize, true);
+          entries = 1;
+	}
 
       /* This loop counts down so that the chain will be in ascending
 	 memory order.  */
-      for (i = GGC_QUIRE_SIZE - 1; i >= 1; i--)
+      for (i = entries - 1; i >= 1; i--)
 	{
 	  e = XCNEWVAR (struct page_entry, page_entry_size);
 	  e->order = order;
@@ -783,7 +848,7 @@
       G.free_pages = f;
     }
   else
-    page = alloc_anon (NULL, entry_size);
+    page = alloc_anon (NULL, entry_size, true);
 #endif
 #ifdef USING_MALLOC_PAGE_GROUPS
   else
@@ -802,7 +867,7 @@
 	alloc_size = entry_size + G.pagesize - 1;
       allocation = XNEWVEC (char, alloc_size);
 
-      page = (char *) (((size_t) allocation + G.pagesize - 1) & -G.pagesize);
+      page = (char *) (((uintptr_t) allocation + G.pagesize - 1) & -G.pagesize);
       head_slop = page - allocation;
       if (multiple_pages)
 	tail_slop = ((size_t) allocation + alloc_size) & (G.pagesize - 1);
@@ -956,7 +1021,90 @@
 static void
 release_pages (void)
 {
-#ifdef USING_MMAP
+#ifdef USING_MADVISE
+  page_entry *p, *start_p;
+  char *start;
+  size_t len;
+  size_t mapped_len;
+  page_entry *next, *prev, *newprev;
+  size_t free_unit = (GGC_QUIRE_SIZE/2) * G.pagesize;
+
+  /* First free larger continuous areas to the OS.
+     This allows other allocators to grab these areas if needed.
+     This is only done on larger chunks to avoid fragmentation. 
+     This does not always work because the free_pages list is only
+     approximately sorted. */
+
+  p = G.free_pages;
+  prev = NULL;
+  while (p)
+    {
+      start = p->page;
+      start_p = p;
+      len = 0;
+      mapped_len = 0;
+      newprev = prev;
+      while (p && p->page == start + len)
+        {
+          len += p->bytes;
+	  if (!p->discarded)
+	      mapped_len += p->bytes;
+	  newprev = p;
+          p = p->next;
+        }
+      if (len >= free_unit)
+        {
+          while (start_p != p)
+            {
+              next = start_p->next;
+              free (start_p);
+              start_p = next;
+            }
+          munmap (start, len);
+	  if (prev)
+	    prev->next = p;
+          else
+            G.free_pages = p;
+          G.bytes_mapped -= mapped_len;
+	  continue;
+        }
+      prev = newprev;
+   }
+
+  /* Now give back the fragmented pages to the OS, but keep the address 
+     space to reuse it next time. */
+
+  for (p = G.free_pages; p; )
+    {
+      if (p->discarded)
+        {
+          p = p->next;
+          continue;
+        }
+      start = p->page;
+      len = p->bytes;
+      start_p = p;
+      p = p->next;
+      while (p && p->page == start + len)
+        {
+          len += p->bytes;
+          p = p->next;
+        }
+      /* Give the page back to the kernel, but don't free the mapping.
+         This avoids fragmentation in the virtual memory map of the 
+ 	 process. Next time we can reuse it by just touching it. */
+      madvise (start, len, MADV_DONTNEED);
+      /* Don't count those pages as mapped to not touch the garbage collector
+         unnecessarily. */
+      G.bytes_mapped -= len;
+      while (start_p != p)
+        {
+          start_p->discarded = true;
+          start_p = start_p->next;
+        }
+    }
+#endif
+#if defined(USING_MMAP) && !defined(USING_MADVISE)
   page_entry *p, *next;
   char *start;
   size_t len;
@@ -1054,35 +1202,77 @@
   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
 };
 
-/* Typed allocation function.  Does nothing special in this collector.  */
-
-void *
-ggc_alloc_typed_stat (enum gt_types_enum type ATTRIBUTE_UNUSED, size_t size
-		      MEM_STAT_DECL)
+/* For a given size of memory requested for allocation, return the
+   actual size that is going to be allocated, as well as the size
+   order.  */
+
+static void
+ggc_round_alloc_size_1 (size_t requested_size,
+			size_t *size_order,
+			size_t *alloced_size)
 {
-  return ggc_internal_alloc_stat (size PASS_MEM_STAT);
+  size_t order, object_size;
+
+  if (requested_size < NUM_SIZE_LOOKUP)
+    {
+      order = size_lookup[requested_size];
+      object_size = OBJECT_SIZE (order);
+    }
+  else
+    {
+      order = 10;
+      while (requested_size > (object_size = OBJECT_SIZE (order)))
+        order++;
+    }
+
+  if (size_order)
+    *size_order = order;
+  if (alloced_size)
+    *alloced_size = object_size;
+}
+
+/* For a given size of memory requested for allocation, return the
+   actual size that is going to be allocated.  */
+
+size_t
+ggc_round_alloc_size (size_t requested_size)
+{
+  size_t size = 0;
+  
+  ggc_round_alloc_size_1 (requested_size, NULL, &size);
+  return size;
+}
+
+/* Push a finalizer onto the appropriate vec.  */
+
+static void
+add_finalizer (void *result, void (*f)(void *), size_t s, size_t n)
+{
+  if (f == NULL)
+    /* No finalizer.  */;
+  else if (n == 1)
+    {
+      finalizer fin (result, f);
+      G.finalizers[G.context_depth].safe_push (fin);
+    }
+  else
+    {
+      vec_finalizer fin (reinterpret_cast<uintptr_t> (result), f, s, n);
+      G.vec_finalizers[G.context_depth].safe_push (fin);
+    }
 }
 
 /* Allocate a chunk of memory of SIZE bytes.  Its contents are undefined.  */
 
 void *
-ggc_internal_alloc_stat (size_t size MEM_STAT_DECL)
+ggc_internal_alloc (size_t size, void (*f)(void *), size_t s, size_t n
+		    MEM_STAT_DECL)
 {
   size_t order, word, bit, object_offset, object_size;
   struct page_entry *entry;
   void *result;
 
-  if (size < NUM_SIZE_LOOKUP)
-    {
-      order = size_lookup[size];
-      object_size = OBJECT_SIZE (order);
-    }
-  else
-    {
-      order = 10;
-      while (size > (object_size = OBJECT_SIZE (order)))
-	order++;
-    }
+  ggc_round_alloc_size_1 (size, &order, &object_size);
 
   /* If there are non-full pages for this size allocation, they are at
      the head of the list.  */
@@ -1186,10 +1376,9 @@
 
   /* Calculate the object's address.  */
   result = entry->page + object_offset;
-#ifdef GATHER_STATISTICS
-  ggc_record_overhead (OBJECT_SIZE (order), OBJECT_SIZE (order) - size,
-		       result PASS_MEM_STAT);
-#endif
+  if (GATHER_STATISTICS)
+    ggc_record_overhead (OBJECT_SIZE (order), OBJECT_SIZE (order) - size,
+			 result FINAL_PASS_MEM_STAT);
 
 #ifdef ENABLE_GC_CHECKING
   /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the
@@ -1220,32 +1409,34 @@
   /* For timevar statistics.  */
   timevar_ggc_mem_total += object_size;
 
-#ifdef GATHER_STATISTICS
-  {
-    size_t overhead = object_size - size;
-
-    G.stats.total_overhead += overhead;
-    G.stats.total_allocated += object_size;
-    G.stats.total_overhead_per_order[order] += overhead;
-    G.stats.total_allocated_per_order[order] += object_size;
-
-    if (size <= 32)
-      {
-	G.stats.total_overhead_under32 += overhead;
-	G.stats.total_allocated_under32 += object_size;
-      }
-    if (size <= 64)
-      {
-	G.stats.total_overhead_under64 += overhead;
-	G.stats.total_allocated_under64 += object_size;
-      }
-    if (size <= 128)
-      {
-	G.stats.total_overhead_under128 += overhead;
-	G.stats.total_allocated_under128 += object_size;
-      }
-  }
-#endif
+  if (f)
+    add_finalizer (result, f, s, n);
+
+  if (GATHER_STATISTICS)
+    {
+      size_t overhead = object_size - size;
+
+      G.stats.total_overhead += overhead;
+      G.stats.total_allocated += object_size;
+      G.stats.total_overhead_per_order[order] += overhead;
+      G.stats.total_allocated_per_order[order] += object_size;
+
+      if (size <= 32)
+	{
+	  G.stats.total_overhead_under32 += overhead;
+	  G.stats.total_allocated_under32 += object_size;
+	}
+      if (size <= 64)
+	{
+	  G.stats.total_overhead_under64 += overhead;
+	  G.stats.total_allocated_under64 += object_size;
+	}
+      if (size <= 128)
+	{
+	  G.stats.total_overhead_under128 += overhead;
+	  G.stats.total_allocated_under128 += object_size;
+	}
+    }
 
   if (GGC_DEBUG_LEVEL >= 3)
     fprintf (G.debug_file,
@@ -1266,12 +1457,14 @@
   unsigned long mask;
   unsigned long offset;
 
-  if (!p || !ggc_allocated_p (p))
+  if (!p)
     return;
 
-  /* Look up the page on which the object is alloced.  .  */
-  entry = lookup_page_table_entry (p);
-  gcc_assert (entry);
+  /* Look up the page on which the object is alloced.  If it was not
+     GC allocated, gracefully bail out.  */
+  entry = safe_lookup_page_table_entry (p);
+  if (!entry)
+    return;
 
   /* Calculate the index of the object on the page; this is its bit
      position in the in_use_p bitmap.  Note that because a char* might
@@ -1307,6 +1500,26 @@
   return;
 }
 
+
+/* User-callable entry points for marking string X.  */
+
+void
+gt_ggc_mx (const char *& x)
+{
+  gt_ggc_m_S (x);
+}
+
+void
+gt_ggc_mx (unsigned char *& x)
+{
+  gt_ggc_m_S (x);
+}
+
+void
+gt_ggc_mx (unsigned char& x ATTRIBUTE_UNUSED)
+{
+}
+
 /* If P is not marked, marks it and return false.  Otherwise return true.
    P must have been allocated by the GC allocator; it mustn't point to
    static objects, stack variables, or memory allocated with malloc.  */
@@ -1382,13 +1595,15 @@
 void
 ggc_free (void *p)
 {
+  if (in_gc)
+    return;
+
   page_entry *pe = lookup_page_table_entry (p);
   size_t order = pe->order;
   size_t size = OBJECT_SIZE (order);
 
-#ifdef GATHER_STATISTICS
-  ggc_free_overhead (p);
-#endif
+  if (GATHER_STATISTICS)
+    ggc_free_overhead (p);
 
   if (GGC_DEBUG_LEVEL >= 3)
     fprintf (G.debug_file,
@@ -1499,9 +1714,14 @@
 void
 init_ggc (void)
 {
+  static bool init_p = false;
   unsigned order;
 
-  G.pagesize = getpagesize();
+  if (init_p)
+    return;
+  init_p = true;
+
+  G.pagesize = getpagesize ();
   G.lg_pagesize = exact_log2 (G.pagesize);
 
 #ifdef HAVE_MMAP_DEV_ZERO
@@ -1522,15 +1742,15 @@
      believe, is an unaligned page allocation, which would cause us to
      hork badly if we tried to use it.  */
   {
-    char *p = alloc_anon (NULL, G.pagesize);
+    char *p = alloc_anon (NULL, G.pagesize, true);
     struct page_entry *e;
-    if ((size_t)p & (G.pagesize - 1))
+    if ((uintptr_t)p & (G.pagesize - 1))
       {
 	/* How losing.  Discard this one and try another.  If we still
 	   can't get something useful, give up.  */
 
-	p = alloc_anon (NULL, G.pagesize);
-	gcc_assert (!((size_t)p & (G.pagesize - 1)));
+	p = alloc_anon (NULL, G.pagesize, true);
+	gcc_assert (!((uintptr_t)p & (G.pagesize - 1)));
       }
 
     /* We have a good page, might as well hold onto it...  */
@@ -1589,6 +1809,11 @@
   G.by_depth_max = INITIAL_PTE_COUNT;
   G.by_depth = XNEWVEC (page_entry *, G.by_depth_max);
   G.save_in_use = XNEWVEC (unsigned long *, G.by_depth_max);
+
+  /* Allocate space for the depth 0 finalizers.  */
+  G.finalizers.safe_push (vNULL);
+  G.vec_finalizers.safe_push (vNULL);
+  gcc_assert (G.finalizers.length() == 1);
 }
 
 /* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P
@@ -1644,7 +1869,7 @@
 	  size_t bitmap_size = BITMAP_SIZE (num_objects + 1);
 
 	  /* The data should be page-aligned.  */
-	  gcc_assert (!((size_t) p->page & (G.pagesize - 1)));
+	  gcc_assert (!((uintptr_t) p->page & (G.pagesize - 1)));
 
 	  /* Pages that aren't in the topmost context are not collected;
 	     nevertheless, we need their in-use bit vectors to store GC
@@ -1668,6 +1893,53 @@
     }
 }
 
+/* Check if any blocks with a registered finalizer have become unmarked. If so
+   run the finalizer and unregister it because the block is about to be freed.
+   Note that no garantee is made about what order finalizers will run in so
+   touching other objects in gc memory is extremely unwise.  */
+
+static void
+ggc_handle_finalizers ()
+{
+  unsigned dlen = G.finalizers.length();
+  for (unsigned d = G.context_depth; d < dlen; ++d)
+    {
+      vec<finalizer> &v = G.finalizers[d];
+      unsigned length = v.length ();
+      for (unsigned int i = 0; i < length;)
+	{
+	  finalizer &f = v[i];
+	  if (!ggc_marked_p (f.addr ()))
+	    {
+	      f.call ();
+	      v.unordered_remove (i);
+	      length--;
+	    }
+	  else
+	    i++;
+	}
+    }
+
+  gcc_assert (dlen == G.vec_finalizers.length());
+  for (unsigned d = G.context_depth; d < dlen; ++d)
+    {
+      vec<vec_finalizer> &vv = G.vec_finalizers[d];
+      unsigned length = vv.length ();
+      for (unsigned int i = 0; i < length;)
+	{
+	  vec_finalizer &f = vv[i];
+	  if (!ggc_marked_p (f.addr ()))
+	    {
+	      f.call ();
+	      vv.unordered_remove (i);
+	      length--;
+	    }
+	  else
+	    i++;
+	}
+    }
+}
+
 /* Free all empty pages.  Partially empty pages need no attention
    because the `mark' bit doubles as an `unused' bit.  */
 
@@ -1907,7 +2179,6 @@
     MAX (G.allocated_last_gc, (size_t)PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024);
 
   float min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100;
-
   if (G.allocated < allocated_last_gc + min_expand && !ggc_force_collect)
     return;
 
@@ -1930,15 +2201,19 @@
 
   invoke_plugin_callbacks (PLUGIN_GGC_START, NULL);
 
+  in_gc = true;
   clear_marks ();
   ggc_mark_roots ();
-#ifdef GATHER_STATISTICS
-  ggc_prune_overhead_list ();
-#endif
+  ggc_handle_finalizers ();
+
+  if (GATHER_STATISTICS)
+    ggc_prune_overhead_list ();
+
   poison_pages ();
   validate_free_objects ();
   sweep_pages ();
 
+  in_gc = false;
   G.allocated_last_gc = G.allocated;
 
   invoke_plugin_callbacks (PLUGIN_GGC_END, NULL);
@@ -1951,6 +2226,22 @@
     fprintf (G.debug_file, "END COLLECTING\n");
 }
 
+/* Assume that all GGC memory is reachable and grow the limits for next collection.
+   With checking, trigger GGC so -Q compilation outputs how much of memory really is
+   reachable.  */
+
+void
+ggc_grow (void)
+{
+  if (!flag_checking)
+    G.allocated_last_gc = MAX (G.allocated_last_gc,
+			       G.allocated);
+  else
+    ggc_collect ();
+  if (!quiet_flag)
+    fprintf (stderr, " {GC start %luk} ", (unsigned long) G.allocated / 1024);
+}
+
 /* Print allocation statistics.  */
 #define SCALE(x) ((unsigned long) ((x) < 1024*10 \
 		  ? (x) \
@@ -1983,7 +2274,7 @@
      allocation.  */
   fprintf (stderr,
            "Memory still allocated at the end of the compilation process\n");
-  fprintf (stderr, "%-5s %10s  %10s  %10s\n",
+  fprintf (stderr, "%-8s %10s  %10s  %10s\n",
 	   "Size", "Allocated", "Used", "Overhead");
   for (i = 0; i < NUM_ORDERS; ++i)
     {
@@ -2010,52 +2301,55 @@
 	  overhead += (sizeof (page_entry) - sizeof (long)
 		       + BITMAP_SIZE (OBJECTS_IN_PAGE (p) + 1));
 	}
-      fprintf (stderr, "%-5lu %10lu%c %10lu%c %10lu%c\n",
+      fprintf (stderr, "%-8lu %10lu%c %10lu%c %10lu%c\n",
 	       (unsigned long) OBJECT_SIZE (i),
 	       SCALE (allocated), STAT_LABEL (allocated),
 	       SCALE (in_use), STAT_LABEL (in_use),
 	       SCALE (overhead), STAT_LABEL (overhead));
       total_overhead += overhead;
     }
-  fprintf (stderr, "%-5s %10lu%c %10lu%c %10lu%c\n", "Total",
+  fprintf (stderr, "%-8s %10lu%c %10lu%c %10lu%c\n", "Total",
 	   SCALE (G.bytes_mapped), STAT_LABEL (G.bytes_mapped),
-	   SCALE (G.allocated), STAT_LABEL(G.allocated),
+	   SCALE (G.allocated), STAT_LABEL (G.allocated),
 	   SCALE (total_overhead), STAT_LABEL (total_overhead));
 
-#ifdef GATHER_STATISTICS
-  {
-    fprintf (stderr, "\nTotal allocations and overheads during the compilation process\n");
-
-    fprintf (stderr, "Total Overhead:                        %10lld\n",
-             G.stats.total_overhead);
-    fprintf (stderr, "Total Allocated:                       %10lld\n",
-             G.stats.total_allocated);
-
-    fprintf (stderr, "Total Overhead  under  32B:            %10lld\n",
-             G.stats.total_overhead_under32);
-    fprintf (stderr, "Total Allocated under  32B:            %10lld\n",
-             G.stats.total_allocated_under32);
-    fprintf (stderr, "Total Overhead  under  64B:            %10lld\n",
-             G.stats.total_overhead_under64);
-    fprintf (stderr, "Total Allocated under  64B:            %10lld\n",
-             G.stats.total_allocated_under64);
-    fprintf (stderr, "Total Overhead  under 128B:            %10lld\n",
-             G.stats.total_overhead_under128);
-    fprintf (stderr, "Total Allocated under 128B:            %10lld\n",
-             G.stats.total_allocated_under128);
-
-    for (i = 0; i < NUM_ORDERS; i++)
-      if (G.stats.total_allocated_per_order[i])
-        {
-          fprintf (stderr, "Total Overhead  page size %7lu:     %10lld\n",
-                   (unsigned long) OBJECT_SIZE (i),
-		   G.stats.total_overhead_per_order[i]);
-          fprintf (stderr, "Total Allocated page size %7lu:     %10lld\n",
-                   (unsigned long) OBJECT_SIZE (i),
-		   G.stats.total_allocated_per_order[i]);
-        }
+  if (GATHER_STATISTICS)
+    {
+      fprintf (stderr, "\nTotal allocations and overheads during "
+	       "the compilation process\n");
+
+      fprintf (stderr, "Total Overhead:                          %10"
+	       HOST_LONG_LONG_FORMAT "d\n", G.stats.total_overhead);
+      fprintf (stderr, "Total Allocated:                         %10"
+	       HOST_LONG_LONG_FORMAT "d\n",
+	       G.stats.total_allocated);
+
+      fprintf (stderr, "Total Overhead  under  32B:              %10"
+	       HOST_LONG_LONG_FORMAT "d\n", G.stats.total_overhead_under32);
+      fprintf (stderr, "Total Allocated under  32B:              %10"
+	       HOST_LONG_LONG_FORMAT "d\n", G.stats.total_allocated_under32);
+      fprintf (stderr, "Total Overhead  under  64B:              %10"
+	       HOST_LONG_LONG_FORMAT "d\n", G.stats.total_overhead_under64);
+      fprintf (stderr, "Total Allocated under  64B:              %10"
+	       HOST_LONG_LONG_FORMAT "d\n", G.stats.total_allocated_under64);
+      fprintf (stderr, "Total Overhead  under 128B:              %10"
+	       HOST_LONG_LONG_FORMAT "d\n", G.stats.total_overhead_under128);
+      fprintf (stderr, "Total Allocated under 128B:              %10"
+	       HOST_LONG_LONG_FORMAT "d\n", G.stats.total_allocated_under128);
+
+      for (i = 0; i < NUM_ORDERS; i++)
+	if (G.stats.total_allocated_per_order[i])
+	  {
+	    fprintf (stderr, "Total Overhead  page size %9lu:     %10"
+		     HOST_LONG_LONG_FORMAT "d\n",
+		     (unsigned long) OBJECT_SIZE (i),
+		     G.stats.total_overhead_per_order[i]);
+	    fprintf (stderr, "Total Allocated page size %9lu:     %10"
+		     HOST_LONG_LONG_FORMAT "d\n",
+		     (unsigned long) OBJECT_SIZE (i),
+		     G.stats.total_allocated_per_order[i]);
+	  }
   }
-#endif
 }
 
 struct ggc_pch_ondisk
@@ -2066,7 +2360,7 @@
 struct ggc_pch_data
 {
   struct ggc_pch_ondisk d;
-  size_t base[NUM_ORDERS];
+  uintptr_t base[NUM_ORDERS];
   size_t written[NUM_ORDERS];
 };
 
@@ -2078,8 +2372,7 @@
 
 void
 ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
-		      size_t size, bool is_string ATTRIBUTE_UNUSED,
-		      enum gt_types_enum type ATTRIBUTE_UNUSED)
+		      size_t size, bool is_string ATTRIBUTE_UNUSED)
 {
   unsigned order;
 
@@ -2102,28 +2395,27 @@
   unsigned i;
 
   for (i = 0; i < NUM_ORDERS; i++)
-    a += ROUND_UP (d->d.totals[i] * OBJECT_SIZE (i), G.pagesize);
+    a += PAGE_ALIGN (d->d.totals[i] * OBJECT_SIZE (i));
   return a;
 }
 
 void
 ggc_pch_this_base (struct ggc_pch_data *d, void *base)
 {
-  size_t a = (size_t) base;
+  uintptr_t a = (uintptr_t) base;
   unsigned i;
 
   for (i = 0; i < NUM_ORDERS; i++)
     {
       d->base[i] = a;
-      a += ROUND_UP (d->d.totals[i] * OBJECT_SIZE (i), G.pagesize);
+      a += PAGE_ALIGN (d->d.totals[i] * OBJECT_SIZE (i));
     }
 }
 
 
 char *
 ggc_pch_alloc_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
-		      size_t size, bool is_string ATTRIBUTE_UNUSED,
-		      enum gt_types_enum type ATTRIBUTE_UNUSED)
+		      size_t size, bool is_string ATTRIBUTE_UNUSED)
 {
   unsigned order;
   char *result;
@@ -2150,7 +2442,7 @@
 }
 
 void
-ggc_pch_write_object (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
+ggc_pch_write_object (struct ggc_pch_data *d,
 		      FILE *f, void *x, void *newx ATTRIBUTE_UNUSED,
 		      size_t size, bool is_string ATTRIBUTE_UNUSED)
 {
@@ -2167,29 +2459,29 @@
     }
 
   if (fwrite (x, size, 1, f) != 1)
-    fatal_error ("can%'t write PCH file: %m");
+    fatal_error (input_location, "can%'t write PCH file: %m");
 
   /* If SIZE is not the same as OBJECT_SIZE(order), then we need to pad the
      object out to OBJECT_SIZE(order).  This happens for strings.  */
 
   if (size != OBJECT_SIZE (order))
     {
-      unsigned padding = OBJECT_SIZE(order) - size;
+      unsigned padding = OBJECT_SIZE (order) - size;
 
       /* To speed small writes, we use a nulled-out array that's larger
          than most padding requests as the source for our null bytes.  This
          permits us to do the padding with fwrite() rather than fseek(), and
          limits the chance the OS may try to flush any outstanding writes.  */
-      if (padding <= sizeof(emptyBytes))
+      if (padding <= sizeof (emptyBytes))
         {
           if (fwrite (emptyBytes, 1, padding, f) != padding)
-            fatal_error ("can%'t write PCH file");
+            fatal_error (input_location, "can%'t write PCH file");
         }
       else
         {
           /* Larger than our buffer?  Just default to fseek.  */
           if (fseek (f, padding, SEEK_CUR) != 0)
-            fatal_error ("can%'t write PCH file");
+            fatal_error (input_location, "can%'t write PCH file");
         }
     }
 
@@ -2198,14 +2490,14 @@
       && fseek (f, ROUND_UP_VALUE (d->d.totals[order] * OBJECT_SIZE (order),
 				   G.pagesize),
 		SEEK_CUR) != 0)
-    fatal_error ("can%'t write PCH file: %m");
+    fatal_error (input_location, "can%'t write PCH file: %m");
 }
 
 void
 ggc_pch_finish (struct ggc_pch_data *d, FILE *f)
 {
   if (fwrite (&d->d, sizeof (d->d), 1, f) != 1)
-    fatal_error ("can%'t write PCH file: %m");
+    fatal_error (input_location, "can%'t write PCH file: %m");
   free (d);
 }
 
@@ -2215,8 +2507,6 @@
 static void
 move_ptes_to_front (int count_old_page_tables, int count_new_page_tables)
 {
-  unsigned i;
-
   /* First, we swap the new entries to the front of the varrays.  */
   page_entry **new_by_depth;
   unsigned long **new_save_in_use;
@@ -2244,10 +2534,10 @@
   G.save_in_use = new_save_in_use;
 
   /* Now update all the index_by_depth fields.  */
-  for (i = G.by_depth_in_use; i > 0; --i)
+  for (unsigned i = G.by_depth_in_use; i--;)
     {
-      page_entry *p = G.by_depth[i-1];
-      p->index_by_depth = i-1;
+      page_entry *p = G.by_depth[i];
+      p->index_by_depth = i;
     }
 
   /* And last, we update the depth pointers in G.depth.  The first
@@ -2278,13 +2568,17 @@
 #endif
   /* Since we free all the allocated objects, the free list becomes
      useless.  Validate it now, which will also clear it.  */
-  validate_free_objects();
+  validate_free_objects ();
 
   /* No object read from a PCH file should ever be freed.  So, set the
      context depth to 1, and set the depth of all the currently-allocated
      pages to be 1 too.  PCH pages will have depth 0.  */
   gcc_assert (!G.context_depth);
   G.context_depth = 1;
+  /* Allocate space for the depth 1 finalizers.  */
+  G.finalizers.safe_push (vNULL);
+  G.vec_finalizers.safe_push (vNULL);
+  gcc_assert (G.finalizers.length() == 2);
   for (i = 0; i < NUM_ORDERS; i++)
     {
       page_entry *p;
@@ -2295,7 +2589,7 @@
   /* Allocate the appropriate page-table entries for the pages read from
      the PCH file.  */
   if (fread (&d, sizeof (d), 1, f) != 1)
-    fatal_error ("can%'t read PCH file: %m");
+    fatal_error (input_location, "can%'t read PCH file: %m");
 
   for (i = 0; i < NUM_ORDERS; i++)
     {
@@ -2308,7 +2602,7 @@
       if (d.totals[i] == 0)
 	continue;
 
-      bytes = ROUND_UP (d.totals[i] * OBJECT_SIZE (i), G.pagesize);
+      bytes = PAGE_ALIGN (d.totals[i] * OBJECT_SIZE (i));
       num_objs = bytes / OBJECT_SIZE (i);
       entry = XCNEWVAR (struct page_entry, (sizeof (struct page_entry)
 					    - sizeof (long)
@@ -2355,12 +2649,3 @@
   /* Update the statistics.  */
   G.allocated = G.allocated_last_gc = offs - (char *)addr;
 }
-
-struct alloc_zone
-{
-  int dummy;
-};
-
-struct alloc_zone rtl_zone;
-struct alloc_zone tree_zone;
-struct alloc_zone tree_id_zone;