Back to index

plt-scheme  4.2.1
os_dep.c
Go to the documentation of this file.
00001 /*
00002  * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
00003  * Copyright (c) 1991-1995 by Xerox Corporation.  All rights reserved.
00004  * Copyright (c) 1996-1999 by Silicon Graphics.  All rights reserved.
00005  * Copyright (c) 1999 by Hewlett-Packard Company.  All rights reserved.
00006  *
00007  * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
00008  * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
00009  *
00010  * Permission is hereby granted to use or copy this program
00011  * for any purpose,  provided the above notices are retained on all copies.
00012  * Permission to modify the code and to distribute modified code is granted,
00013  * provided the above notices are retained, and a notice that the code was
00014  * modified is included with the above copyright notice.
00015  */
00016 
00017 # include "private/gc_priv.h"
00018 
00019 # if defined(LINUX) && !defined(POWERPC)
00020 #   include <linux/version.h>
00021 #   if (LINUX_VERSION_CODE <= 0x10400)
00022       /* Ugly hack to get struct sigcontext_struct definition.  Required      */
00023       /* for some early 1.3.X releases.  Will hopefully go away soon. */
00024       /* in some later Linux releases, asm/sigcontext.h may have to   */
00025       /* be included instead.                                         */
00026 #     define __KERNEL__
00027 #     include <asm/signal.h>
00028 #     undef __KERNEL__
00029 #   else
00030       /* Kernels prior to 2.1.1 defined struct sigcontext_struct instead of */
00031       /* struct sigcontext.  libc6 (glibc2) uses "struct sigcontext" in     */
00032       /* prototypes, so we have to include the top-level sigcontext.h to    */
00033       /* make sure the former gets defined to be the latter if appropriate. */
00034 #     include <features.h>
00035 #     if 2 <= __GLIBC__
00036 #       if 2 == __GLIBC__ && 0 == __GLIBC_MINOR__
00037          /* glibc 2.1 no longer has sigcontext.h.  But signal.h       */
00038          /* has the right declaration for glibc 2.1.                  */
00039 #         include <sigcontext.h>
00040 #       endif /* 0 == __GLIBC_MINOR__ */
00041 #     else /* not 2 <= __GLIBC__ */
00042         /* libc5 doesn't have <sigcontext.h>: go directly with the kernel   */
00043         /* one.  Check LINUX_VERSION_CODE to see which we should reference. */
00044 #       include <asm/sigcontext.h>
00045 #     endif /* 2 <= __GLIBC__ */
00046 #   endif
00047 # endif
00048 # if !defined(OS2) && !defined(PCR) && !defined(AMIGA) && !defined(MACOS) \
00049     && !defined(MSWINCE)
00050 #   include <sys/types.h>
00051 #   if !defined(MSWIN32) && !defined(SUNOS4)
00052 #      include <unistd.h>
00053 #   endif
00054 # endif
00055 
00056 # include <stdio.h>
00057 # if defined(MSWINCE)
00058 #   define SIGSEGV 0 /* value is irrelevant */
00059 # else
00060 #   include <signal.h>
00061 # endif
00062 
00063 #if defined(LINUX) || defined(LINUX_STACKBOTTOM)
00064 # include <ctype.h>
00065 #endif
00066 
00067 /* Blatantly OS dependent routines, except for those that are related        */
00068 /* to dynamic loading.                                                */
00069 
00070 # if defined(HEURISTIC2) || defined(SEARCH_FOR_DATA_START)
00071 #   define NEED_FIND_LIMIT
00072 # endif
00073 
00074 # if !defined(STACKBOTTOM) && defined(HEURISTIC2)
00075 #   define NEED_FIND_LIMIT
00076 # endif
00077 
00078 # if (defined(SUNOS4) && defined(DYNAMIC_LOADING)) && !defined(PCR)
00079 #   define NEED_FIND_LIMIT
00080 # endif
00081 
00082 # if (defined(SVR4) || defined(AUX) || defined(DGUX) \
00083       || (defined(LINUX) && defined(SPARC))) && !defined(PCR)
00084 #   define NEED_FIND_LIMIT
00085 # endif
00086 
00087 #if defined(FREEBSD) && (defined(I386) || defined(powerpc) || defined(__powerpc__))
00088 #  include <machine/trap.h>
00089 #  if !defined(PCR)
00090 #    define NEED_FIND_LIMIT
00091 #  endif
00092 #endif
00093 
00094 #if (defined(NETBSD) || defined(OPENBSD)) && defined(__ELF__) \
00095     && !defined(NEED_FIND_LIMIT)
00096    /* Used by GC_init_netbsd_elf() below. */
00097 #  define NEED_FIND_LIMIT
00098 #endif
00099 
00100 #ifdef NEED_FIND_LIMIT
00101 #   include <setjmp.h>
00102 #endif
00103 
00104 #ifdef AMIGA
00105 # define GC_AMIGA_DEF
00106 # include "AmigaOS.c"
00107 # undef GC_AMIGA_DEF
00108 #endif
00109 
00110 #if defined(MSWIN32) || defined(MSWINCE)
00111 # define WIN32_LEAN_AND_MEAN
00112 # define NOSERVICE
00113 # include <windows.h>
00114 #endif
00115 
00116 #ifdef MACOS
00117 # include <Processes.h>
00118 #endif
00119 
00120 #ifdef IRIX5
00121 # include <sys/uio.h>
00122 # include <malloc.h>   /* for locking */
00123 #endif
00124 #if defined(USE_MMAP) || defined(USE_MUNMAP)
00125 # ifndef USE_MMAP
00126     --> USE_MUNMAP requires USE_MMAP
00127 # endif
00128 # include <sys/types.h>
00129 # include <sys/mman.h>
00130 # include <sys/stat.h>
00131 # include <errno.h>
00132 #endif
00133 
00134 #ifdef UNIX_LIKE
00135 # include <fcntl.h>
00136 # if defined(SUNOS5SIGS) && !defined(FREEBSD)
00137 #  include <sys/siginfo.h>
00138 # endif
00139   /* Define SETJMP and friends to be the version that restores */
00140   /* the signal mask.                                          */
00141 # define SETJMP(env) sigsetjmp(env, 1)
00142 # define LONGJMP(env, val) siglongjmp(env, val)
00143 # define JMP_BUF sigjmp_buf
00144 #else
00145 # define SETJMP(env) setjmp(env)
00146 # define LONGJMP(env, val) longjmp(env, val)
00147 # define JMP_BUF jmp_buf
00148 #endif
00149 
00150 #ifdef DARWIN
00151 /* for get_etext and friends */
00152 #include <mach-o/getsect.h>
00153 #endif
00154 
00155 #ifdef DJGPP
00156   /* Apparently necessary for djgpp 2.01.  May cause problems with    */
00157   /* other versions.                                           */
00158   typedef long unsigned int caddr_t;
00159 #endif
00160 
00161 #ifdef PCR
00162 # include "il/PCR_IL.h"
00163 # include "th/PCR_ThCtl.h"
00164 # include "mm/PCR_MM.h"
00165 #endif
00166 
00167 #if !defined(NO_EXECUTE_PERMISSION)
00168 # define OPT_PROT_EXEC PROT_EXEC
00169 #else
00170 # define OPT_PROT_EXEC 0
00171 #endif
00172 
00173 #if defined(LINUX) && \
00174     (defined(USE_PROC_FOR_LIBRARIES) || defined(IA64) || !defined(SMALL_CONFIG))
00175 
00176 /* We need to parse /proc/self/maps, either to find dynamic libraries,       */
00177 /* and/or to find the register backing store base (IA64).  Do it once */
00178 /* here.                                                       */
00179 
00180 #define READ read
00181 
00182 /* Repeatedly perform a read call until the buffer is filled or       */
00183 /* we encounter EOF.                                    */
00184 ssize_t GC_repeat_read(int fd, char *buf, size_t count)
00185 {
00186     ssize_t num_read = 0;
00187     ssize_t result;
00188     
00189     while (num_read < count) {
00190        result = READ(fd, buf + num_read, count - num_read);
00191        if (result < 0) return result;
00192        if (result == 0) break;
00193        num_read += result;
00194     }
00195     return num_read;
00196 }
00197 
00198 /*
00199  * Apply fn to a buffer containing the contents of /proc/self/maps.
00200  * Return the result of fn or, if we failed, 0.
00201  * We currently do nothing to /proc/self/maps other than simply read
00202  * it.  This code could be simplified if we could determine its size
00203  * ahead of time.
00204  */
00205 
00206 word GC_apply_to_maps(word (*fn)(char *))
00207 {
00208     int f;
00209     int result;
00210     size_t maps_size = 4000;  /* Initial guess.  */
00211     static char init_buf[1];
00212     static char *maps_buf = init_buf;
00213     static size_t maps_buf_sz = 1;
00214 
00215     /* Read /proc/self/maps, growing maps_buf as necessary.    */
00216         /* Note that we may not allocate conventionally, and   */
00217         /* thus can't use stdio.                        */
00218        do {
00219            if (maps_size >= maps_buf_sz) {
00220              /* Grow only by powers of 2, since we leak "too small" buffers. */
00221              while (maps_size >= maps_buf_sz) maps_buf_sz *= 2;
00222              maps_buf = GC_scratch_alloc(maps_buf_sz);
00223              if (maps_buf == 0) return 0;
00224            }
00225            f = open("/proc/self/maps", O_RDONLY);
00226            if (-1 == f) return 0;
00227            maps_size = 0;
00228            do {
00229                result = GC_repeat_read(f, maps_buf, maps_buf_sz-1);
00230                if (result <= 0) return 0;
00231                maps_size += result;
00232            } while (result == maps_buf_sz-1);
00233            close(f);
00234        } while (maps_size >= maps_buf_sz);
00235         maps_buf[maps_size] = '\0';
00236        
00237     /* Apply fn to result. */
00238        return fn(maps_buf);
00239 }
00240 
00241 #endif /* Need GC_apply_to_maps */
00242 
00243 #if defined(LINUX) && (defined(USE_PROC_FOR_LIBRARIES) || defined(IA64))
00244 //
00245 //  GC_parse_map_entry parses an entry from /proc/self/maps so we can
00246 //  locate all writable data segments that belong to shared libraries.
00247 //  The format of one of these entries and the fields we care about
00248 //  is as follows:
00249 //  XXXXXXXX-XXXXXXXX r-xp 00000000 30:05 260537     name of mapping...\n
00250 //  ^^^^^^^^ ^^^^^^^^ ^^^^          ^^
00251 //  start    end      prot          maj_dev
00252 //
00253 //  Note that since about auguat 2003 kernels, the columns no longer have
00254 //  fixed offsets on 64-bit kernels.  Hence we no longer rely on fixed offsets
00255 //  anywhere, which is safer anyway.
00256 //
00257 
00258 /*
00259  * Assign various fields of the first line in buf_ptr to *start, *end,
00260  * *prot_buf and *maj_dev.  Only *prot_buf may be set for unwritable maps.
00261  */
00262 char *GC_parse_map_entry(char *buf_ptr, word *start, word *end,
00263                                 char *prot_buf, unsigned int *maj_dev)
00264 {
00265     char *start_start, *end_start, *prot_start, *maj_dev_start;
00266     char *p;
00267     char *endp;
00268 
00269     if (buf_ptr == NULL || *buf_ptr == '\0') {
00270         return NULL;
00271     }
00272 
00273     p = buf_ptr;
00274     while (isspace(*p)) ++p;
00275     start_start = p;
00276     GC_ASSERT(isxdigit(*start_start));
00277     *start = strtoul(start_start, &endp, 16); p = endp;
00278     GC_ASSERT(*p=='-');
00279 
00280     ++p;
00281     end_start = p;
00282     GC_ASSERT(isxdigit(*end_start));
00283     *end = strtoul(end_start, &endp, 16); p = endp;
00284     GC_ASSERT(isspace(*p));
00285 
00286     while (isspace(*p)) ++p;
00287     prot_start = p;
00288     GC_ASSERT(*prot_start == 'r' || *prot_start == '-');
00289     memcpy(prot_buf, prot_start, 4);
00290     prot_buf[4] = '\0';
00291     if (prot_buf[1] == 'w') {/* we can skip the rest if it's not writable. */
00292        /* Skip past protection field to offset field */
00293           while (!isspace(*p)) ++p; while (isspace(*p)) ++p;
00294           GC_ASSERT(isxdigit(*p));
00295        /* Skip past offset field, which we ignore */
00296           while (!isspace(*p)) ++p; while (isspace(*p)) ++p;
00297        maj_dev_start = p;
00298         GC_ASSERT(isxdigit(*maj_dev_start));
00299         *maj_dev = strtoul(maj_dev_start, NULL, 16);
00300     }
00301 
00302     while (*p && *p++ != '\n');
00303 
00304     return p;
00305 }
00306 
00307 #endif /* Need to parse /proc/self/maps. */      
00308 
00309 #if defined(SEARCH_FOR_DATA_START)
00310   /* The I386 case can be handled without a search.  The Alpha case   */
00311   /* used to be handled differently as well, but the rules changed    */
00312   /* for recent Linux versions.  This seems to be the easiest way to  */
00313   /* cover all versions.                                       */
00314 
00315 # ifdef LINUX
00316     /* Some Linux distributions arrange to define __data_start.  Some */
00317     /* define data_start as a weak symbol.  The latter is technically */
00318     /* broken, since the user program may define data_start, in which */
00319     /* case we lose.  Nonetheless, we try both, prefering __data_start.      */
00320     /* We assume gcc-compatible pragmas.  */
00321 #   pragma weak __data_start
00322     extern int __data_start[];
00323 #   pragma weak data_start
00324     extern int data_start[];
00325 # endif /* LINUX */
00326   extern int _end[];
00327 
00328   ptr_t GC_data_start;
00329 
00330   void GC_init_linux_data_start()
00331   {
00332     extern ptr_t GC_find_limit();
00333 
00334 #   ifdef LINUX
00335       /* Try the easy approaches first:   */
00336       if ((ptr_t)__data_start != 0) {
00337          GC_data_start = (ptr_t)(__data_start);
00338          return;
00339       }
00340       if ((ptr_t)data_start != 0) {
00341          GC_data_start = (ptr_t)(data_start);
00342          return;
00343       }
00344 #   endif /* LINUX */
00345     GC_data_start = GC_find_limit((ptr_t)(_end), FALSE);
00346   }
00347 #endif
00348 
00349 # ifdef ECOS
00350 
00351 # ifndef ECOS_GC_MEMORY_SIZE
00352 # define ECOS_GC_MEMORY_SIZE (448 * 1024)
00353 # endif /* ECOS_GC_MEMORY_SIZE */
00354 
00355 // setjmp() function, as described in ANSI para 7.6.1.1
00356 #undef SETJMP
00357 #define SETJMP( __env__ )  hal_setjmp( __env__ )
00358 
00359 // FIXME: This is a simple way of allocating memory which is
00360 // compatible with ECOS early releases.  Later releases use a more
00361 // sophisticated means of allocating memory than this simple static
00362 // allocator, but this method is at least bound to work.
00363 static char memory[ECOS_GC_MEMORY_SIZE];
00364 static char *brk = memory;
00365 
00366 static void *tiny_sbrk(ptrdiff_t increment)
00367 {
00368   void *p = brk;
00369 
00370   brk += increment;
00371 
00372   if (brk >  memory + sizeof memory)
00373     {
00374       brk -= increment;
00375       return NULL;
00376     }
00377 
00378   return p;
00379 }
00380 #define sbrk tiny_sbrk
00381 # endif /* ECOS */
00382 
00383 #if (defined(NETBSD) || defined(OPENBSD)) && defined(__ELF__)
00384   ptr_t GC_data_start;
00385 
00386   void GC_init_netbsd_elf()
00387   {
00388     extern ptr_t GC_find_limit();
00389     extern char **environ;
00390        /* This may need to be environ, without the underscore, for    */
00391        /* some versions.                                       */
00392     GC_data_start = GC_find_limit((ptr_t)&environ, FALSE);
00393   }
00394 #endif
00395 
00396 # ifdef OS2
00397 
00398 # include <stddef.h>
00399 
00400 # if !defined(__IBMC__) && !defined(__WATCOMC__) /* e.g. EMX */
00401 
00402 struct exe_hdr {
00403     unsigned short      magic_number;
00404     unsigned short      padding[29];
00405     long                new_exe_offset;
00406 };
00407 
00408 #define E_MAGIC(x)      (x).magic_number
00409 #define EMAGIC          0x5A4D  
00410 #define E_LFANEW(x)     (x).new_exe_offset
00411 
00412 struct e32_exe {
00413     unsigned char       magic_number[2]; 
00414     unsigned char       byte_order; 
00415     unsigned char       word_order; 
00416     unsigned long       exe_format_level;
00417     unsigned short      cpu;       
00418     unsigned short      os;
00419     unsigned long       padding1[13];
00420     unsigned long       object_table_offset;
00421     unsigned long       object_count;    
00422     unsigned long       padding2[31];
00423 };
00424 
00425 #define E32_MAGIC1(x)   (x).magic_number[0]
00426 #define E32MAGIC1       'L'
00427 #define E32_MAGIC2(x)   (x).magic_number[1]
00428 #define E32MAGIC2       'X'
00429 #define E32_BORDER(x)   (x).byte_order
00430 #define E32LEBO         0
00431 #define E32_WORDER(x)   (x).word_order
00432 #define E32LEWO         0
00433 #define E32_CPU(x)      (x).cpu
00434 #define E32CPU286       1
00435 #define E32_OBJTAB(x)   (x).object_table_offset
00436 #define E32_OBJCNT(x)   (x).object_count
00437 
00438 struct o32_obj {
00439     unsigned long       size;  
00440     unsigned long       base;
00441     unsigned long       flags;  
00442     unsigned long       pagemap;
00443     unsigned long       mapsize; 
00444     unsigned long       reserved;
00445 };
00446 
00447 #define O32_FLAGS(x)    (x).flags
00448 #define OBJREAD         0x0001L
00449 #define OBJWRITE        0x0002L
00450 #define OBJINVALID      0x0080L
00451 #define O32_SIZE(x)     (x).size
00452 #define O32_BASE(x)     (x).base
00453 
00454 # else  /* IBM's compiler */
00455 
00456 /* A kludge to get around what appears to be a header file bug */
00457 # ifndef WORD
00458 #   define WORD unsigned short
00459 # endif
00460 # ifndef DWORD
00461 #   define DWORD unsigned long
00462 # endif
00463 
00464 # define EXE386 1
00465 # include <newexe.h>
00466 # include <exe386.h>
00467 
00468 # endif  /* __IBMC__ */
00469 
00470 # define INCL_DOSEXCEPTIONS
00471 # define INCL_DOSPROCESS
00472 # define INCL_DOSERRORS
00473 # define INCL_DOSMODULEMGR
00474 # define INCL_DOSMEMMGR
00475 # include <os2.h>
00476 
00477 
00478 /* Disable and enable signals during nontrivial allocations    */
00479 
00480 void GC_disable_signals(void)
00481 {
00482     ULONG nest;
00483     
00484     DosEnterMustComplete(&nest);
00485     if (nest != 1) ABORT("nested GC_disable_signals");
00486 }
00487 
00488 void GC_enable_signals(void)
00489 {
00490     ULONG nest;
00491     
00492     DosExitMustComplete(&nest);
00493     if (nest != 0) ABORT("GC_enable_signals");
00494 }
00495 
00496 
00497 # else
00498 
00499 #  if !defined(PCR) && !defined(AMIGA) && !defined(MSWIN32) \
00500       && !defined(MSWINCE) \
00501       && !defined(MACOS) && !defined(DJGPP) && !defined(DOS4GW) \
00502       && !defined(NOSYS) && !defined(ECOS)
00503 
00504 #   if defined(sigmask) && !defined(UTS4) && !defined(HURD)
00505        /* Use the traditional BSD interface */
00506 #      define SIGSET_T int
00507 #      define SIG_DEL(set, signal) (set) &= ~(sigmask(signal))
00508 #      define SIG_FILL(set)  (set) = 0x7fffffff
00509          /* Setting the leading bit appears to provoke a bug in some  */
00510          /* longjmp implementations.  Most systems appear not to have */
00511          /* a signal 32.                                       */
00512 #      define SIGSETMASK(old, new) (old) = sigsetmask(new)
00513 #   else
00514        /* Use POSIX/SYSV interface */
00515 #      define SIGSET_T sigset_t
00516 #      define SIG_DEL(set, signal) sigdelset(&(set), (signal))
00517 #      define SIG_FILL(set) sigfillset(&set)
00518 #      define SIGSETMASK(old, new) sigprocmask(SIG_SETMASK, &(new), &(old))
00519 #   endif
00520 
00521 static GC_bool mask_initialized = FALSE;
00522 
00523 static SIGSET_T new_mask;
00524 
00525 static SIGSET_T old_mask;
00526 
00527 static SIGSET_T dummy;
00528 
00529 #if defined(PRINTSTATS) && !defined(THREADS)
00530 # define CHECK_SIGNALS
00531   int GC_sig_disabled = 0;
00532 #endif
00533 
00534 void GC_disable_signals()
00535 {
00536     if (!mask_initialized) {
00537        SIG_FILL(new_mask);
00538 
00539        SIG_DEL(new_mask, SIGSEGV);
00540        SIG_DEL(new_mask, SIGILL);
00541        SIG_DEL(new_mask, SIGQUIT);
00542 #      ifdef SIGBUS
00543            SIG_DEL(new_mask, SIGBUS);
00544 #      endif
00545 #      ifdef SIGIOT
00546            SIG_DEL(new_mask, SIGIOT);
00547 #      endif
00548 #      ifdef SIGEMT
00549            SIG_DEL(new_mask, SIGEMT);
00550 #      endif
00551 #      ifdef SIGTRAP
00552            SIG_DEL(new_mask, SIGTRAP);
00553 #      endif 
00554        mask_initialized = TRUE;
00555     }
00556 #   ifdef CHECK_SIGNALS
00557        if (GC_sig_disabled != 0) ABORT("Nested disables");
00558        GC_sig_disabled++;
00559 #   endif
00560     SIGSETMASK(old_mask,new_mask);
00561 }
00562 
00563 void GC_enable_signals()
00564 {
00565 #   ifdef CHECK_SIGNALS
00566        if (GC_sig_disabled != 1) ABORT("Unmatched enable");
00567        GC_sig_disabled--;
00568 #   endif
00569     SIGSETMASK(dummy,old_mask);
00570 }
00571 
00572 #  endif  /* !PCR */
00573 
00574 # endif 
00576 /* Ivan Demakov: simplest way (to me) */
00577 #if defined (DOS4GW)
00578   void GC_disable_signals() { }
00579   void GC_enable_signals() { }
00580 #endif
00581 
00582 /* Find the page size */
00583 word GC_page_size;
00584 
00585 # if defined(MSWIN32) || defined(MSWINCE)
00586   void GC_setpagesize()
00587   {
00588     GetSystemInfo(&GC_sysinfo);
00589     GC_page_size = GC_sysinfo.dwPageSize;
00590   }
00591 
00592 # else
00593 #   if defined(MPROTECT_VDB) || defined(PROC_VDB) || defined(USE_MMAP) \
00594        || defined(USE_MUNMAP)
00595        void GC_setpagesize()
00596        {
00597            GC_page_size = GETPAGESIZE();
00598        }
00599 #   else
00600        /* It's acceptable to fake it. */
00601        void GC_setpagesize()
00602        {
00603            GC_page_size = HBLKSIZE;
00604        }
00605 #   endif
00606 # endif
00607 
00608 /* 
00609  * Find the base of the stack. 
00610  * Used only in single-threaded environment.
00611  * With threads, GC_mark_roots needs to know how to do this.
00612  * Called with allocator lock held.
00613  */
00614 # if defined(MSWIN32) || defined(MSWINCE)
00615 # define is_writable(prot) ((prot) == PAGE_READWRITE \
00616                          || (prot) == PAGE_WRITECOPY \
00617                          || (prot) == PAGE_EXECUTE_READWRITE \
00618                          || (prot) == PAGE_EXECUTE_WRITECOPY)
00619 /* Return the number of bytes that are writable starting at p. */
00620 /* The pointer p is assumed to be page aligned.                */
00621 /* If base is not 0, *base becomes the beginning of the        */
00622 /* allocation region containing p.                      */
00623 word GC_get_writable_length(ptr_t p, ptr_t *base)
00624 {
00625     MEMORY_BASIC_INFORMATION buf;
00626     word result;
00627     word protect;
00628     
00629     result = VirtualQuery(p, &buf, sizeof(buf));
00630     if (result != sizeof(buf)) ABORT("Weird VirtualQuery result");
00631     if (base != 0) *base = (ptr_t)(buf.AllocationBase);
00632     protect = (buf.Protect & ~(PAGE_GUARD | PAGE_NOCACHE));
00633     if (!is_writable(protect)) {
00634         return(0);
00635     }
00636     if (buf.State != MEM_COMMIT) return(0);
00637     return(buf.RegionSize);
00638 }
00639 
00640 ptr_t GC_get_stack_base()
00641 {
00642   /* PLTSCHEME: set page size if it's not ready (so I can use this
00643      function before a GC happens). */
00644   if (!GC_page_size) GC_setpagesize();
00645   {
00646 
00647     int dummy;
00648     ptr_t sp = (ptr_t)(&dummy);
00649     ptr_t trunc_sp = (ptr_t)((word)sp & ~(GC_page_size - 1));
00650     word size = GC_get_writable_length(trunc_sp, 0);
00651    
00652     return(trunc_sp + size);
00653 
00654   } /* PLTSCHEME: close brace */
00655 }
00656 
00657 
00658 # endif /* MS Windows */
00659 
00660 # ifdef BEOS
00661 # include <kernel/OS.h>
00662 ptr_t GC_get_stack_base(){
00663        thread_info th;
00664        get_thread_info(find_thread(NULL),&th);
00665        return th.stack_end;
00666 }
00667 # endif /* BEOS */
00668 
00669 
00670 # ifdef OS2
00671 
00672 ptr_t GC_get_stack_base()
00673 {
00674     PTIB ptib;
00675     PPIB ppib;
00676     
00677     if (DosGetInfoBlocks(&ptib, &ppib) != NO_ERROR) {
00678        GC_err_printf0("DosGetInfoBlocks failed\n");
00679        ABORT("DosGetInfoBlocks failed\n");
00680     }
00681     return((ptr_t)(ptib -> tib_pstacklimit));
00682 }
00683 
00684 # endif /* OS2 */
00685 
00686 # ifdef AMIGA
00687 #   define GC_AMIGA_SB
00688 #   include "AmigaOS.c"
00689 #   undef GC_AMIGA_SB
00690 # endif /* AMIGA */
00691 
00692 # if defined(NEED_FIND_LIMIT) || defined(UNIX_LIKE)
00693 
00694 #   ifdef __STDC__
00695        typedef void (*handler)(int);
00696 #   else
00697        typedef void (*handler)();
00698 #   endif
00699 
00700 #   if defined(SUNOS5SIGS) || defined(IRIX5) || defined(OSF1) \
00701     || defined(HURD) || defined(NETBSD)
00702        static struct sigaction old_segv_act;
00703 #      if defined(IRIX5) || defined(HPUX) \
00704        || defined(HURD) || defined(NETBSD)
00705            static struct sigaction old_bus_act;
00706 #      endif
00707 #   else
00708         static handler old_segv_handler, old_bus_handler;
00709 #   endif
00710     
00711 #   ifdef __STDC__
00712       void GC_set_and_save_fault_handler(handler h)
00713 #   else
00714       void GC_set_and_save_fault_handler(h)
00715       handler h;
00716 #   endif
00717     {
00718 #      if defined(SUNOS5SIGS) || defined(IRIX5)  \
00719         || defined(OSF1) || defined(HURD) || defined(NETBSD)
00720          struct sigaction   act;
00721 
00722          act.sa_handler     = h;
00723 #        if 0 /* Was necessary for Solaris 2.3 and very temporary     */
00724               /* NetBSD bugs.                                         */
00725             act.sa_flags          = SA_RESTART | SA_NODEFER;
00726 #         else
00727             act.sa_flags          = SA_RESTART;
00728 #        endif
00729 
00730          (void) sigemptyset(&act.sa_mask);
00731 #        ifdef GC_IRIX_THREADS
00732               /* Older versions have a bug related to retrieving and  */
00733               /* and setting a handler at the same time.              */
00734                (void) sigaction(SIGSEGV, 0, &old_segv_act);
00735                (void) sigaction(SIGSEGV, &act, 0);
00736                (void) sigaction(SIGBUS, 0, &old_bus_act);
00737                (void) sigaction(SIGBUS, &act, 0);
00738 #        else
00739                (void) sigaction(SIGSEGV, &act, &old_segv_act);
00740 #             if defined(IRIX5) \
00741                  || defined(HPUX) || defined(HURD) || defined(NETBSD)
00742                   /* Under Irix 5.x or HP/UX, we may get SIGBUS.      */
00743                   /* Pthreads doesn't exist under Irix 5.x, so we     */
00744                   /* don't have to worry in the threads case.         */
00745                   (void) sigaction(SIGBUS, &act, &old_bus_act);
00746 #             endif
00747 #        endif       /* GC_IRIX_THREADS */
00748 #      else
00749          old_segv_handler = signal(SIGSEGV, h);
00750 #        ifdef SIGBUS
00751            old_bus_handler = signal(SIGBUS, h);
00752 #        endif
00753 #      endif
00754     }
00755 # endif /* NEED_FIND_LIMIT || UNIX_LIKE */
00756 
00757 # ifdef NEED_FIND_LIMIT
00758   /* Some tools to implement HEURISTIC2   */
00759 #   define MIN_PAGE_SIZE 256       /* Smallest conceivable page size, bytes */
00760     /* static */ JMP_BUF GC_jmp_buf;
00761     
00762     /*ARGSUSED*/
00763     void GC_fault_handler(sig)
00764     int sig;
00765     {
00766         LONGJMP(GC_jmp_buf, 1);
00767     }
00768 
00769     void GC_setup_temporary_fault_handler()
00770     {
00771        GC_set_and_save_fault_handler(GC_fault_handler);
00772     }
00773     
00774     void GC_reset_fault_handler()
00775     {
00776 #       if defined(SUNOS5SIGS) || defined(IRIX5) \
00777           || defined(OSF1) || defined(HURD) || defined(NETBSD)
00778          (void) sigaction(SIGSEGV, &old_segv_act, 0);
00779 #        if defined(IRIX5) \
00780             || defined(HPUX) || defined(HURD) || defined(NETBSD)
00781              (void) sigaction(SIGBUS, &old_bus_act, 0);
00782 #        endif
00783 #       else
00784          (void) signal(SIGSEGV, old_segv_handler);
00785 #        ifdef SIGBUS
00786            (void) signal(SIGBUS, old_bus_handler);
00787 #        endif
00788 #       endif
00789     }
00790 
00791     /* Return the first nonaddressible location > p (up) or    */
00792     /* the smallest location q s.t. [q,p) is addressable (!up).       */
00793     /* We assume that p (up) or p-1 (!up) is addressable.      */
00794     ptr_t GC_find_limit(p, up)
00795     ptr_t p;
00796     GC_bool up;
00797     {
00798         static VOLATILE ptr_t result;
00799               /* Needs to be static, since otherwise it may not be    */
00800               /* preserved across the longjmp.  Can safely be  */
00801               /* static since it's only called once, with the         */
00802               /* allocation lock held.                         */
00803 
00804 
00805        GC_setup_temporary_fault_handler();
00806        if (SETJMP(GC_jmp_buf) == 0) {
00807            result = (ptr_t)(((word)(p))
00808                            & ~(MIN_PAGE_SIZE-1));
00809            for (;;) {
00810                if (up) {
00811                   result += MIN_PAGE_SIZE;
00812                } else {
00813                   result -= MIN_PAGE_SIZE;
00814                }
00815               GC_noop1((word)(*result));
00816            }
00817        }
00818        GC_reset_fault_handler();
00819        if (!up) {
00820            result += MIN_PAGE_SIZE;
00821        }
00822        return(result);
00823     }
00824 # endif
00825 
00826 #if defined(ECOS) || defined(NOSYS)
00827   ptr_t GC_get_stack_base()
00828   {
00829     return STACKBOTTOM;
00830   }
00831 #endif
00832 
00833 #ifdef HPUX_STACKBOTTOM
00834 
00835 #include <sys/param.h>
00836 #include <sys/pstat.h>
00837 
00838   ptr_t GC_get_register_stack_base(void)
00839   {
00840     struct pst_vm_status vm_status;
00841 
00842     int i = 0;
00843     while (pstat_getprocvm(&vm_status, sizeof(vm_status), 0, i++) == 1) {
00844       if (vm_status.pst_type == PS_RSESTACK) {
00845         return (ptr_t) vm_status.pst_vaddr;
00846       }
00847     }
00848 
00849     /* old way to get the register stackbottom */
00850     return (ptr_t)(((word)GC_stackbottom - BACKING_STORE_DISPLACEMENT - 1)
00851                    & ~(BACKING_STORE_ALIGNMENT - 1));
00852   }
00853 
00854 #endif /* HPUX_STACK_BOTTOM */
00855 
00856 #ifdef LINUX_STACKBOTTOM
00857 
00858 #include <sys/types.h>
00859 #include <sys/stat.h>
00860 
00861 # define STAT_SKIP 27   /* Number of fields preceding startstack      */
00862                      /* field in /proc/self/stat               */
00863 
00864 #ifdef USE_LIBC_PRIVATES
00865 # pragma weak __libc_stack_end
00866   extern ptr_t __libc_stack_end;
00867 #endif
00868 
00869 # ifdef IA64
00870     /* Try to read the backing store base from /proc/self/maps.       */
00871     /* We look for the writable mapping with a 0 major device,  */
00872     /* which is      as close to our frame as possible, but below it.*/
00873     static word backing_store_base_from_maps(char *maps)
00874     {
00875       char prot_buf[5];
00876       char *buf_ptr = maps;
00877       word start, end;
00878       unsigned int maj_dev;
00879       word current_best = 0;
00880       word dummy;
00881   
00882       for (;;) {
00883         buf_ptr = GC_parse_map_entry(buf_ptr, &start, &end, prot_buf, &maj_dev);
00884        if (buf_ptr == NULL) return current_best;
00885        if (prot_buf[1] == 'w' && maj_dev == 0) {
00886            if (end < (word)(&dummy) && start > current_best) current_best = start;
00887        }
00888       }
00889       return current_best;
00890     }
00891 
00892     static word backing_store_base_from_proc(void)
00893     {
00894         return GC_apply_to_maps(backing_store_base_from_maps);
00895     }
00896 
00897 #   ifdef USE_LIBC_PRIVATES
00898 #     pragma weak __libc_ia64_register_backing_store_base
00899       extern ptr_t __libc_ia64_register_backing_store_base;
00900 #   endif
00901 
00902     ptr_t GC_get_register_stack_base(void)
00903     {
00904 #     ifdef USE_LIBC_PRIVATES
00905         if (0 != &__libc_ia64_register_backing_store_base
00906            && 0 != __libc_ia64_register_backing_store_base) {
00907          /* Glibc 2.2.4 has a bug such that for dynamically linked    */
00908          /* executables __libc_ia64_register_backing_store_base is    */
00909          /* defined but uninitialized during constructor calls.       */
00910          /* Hence we check for both nonzero address and value.        */
00911          return __libc_ia64_register_backing_store_base;
00912         }
00913 #     endif
00914       word result = backing_store_base_from_proc();
00915       if (0 == result) {
00916          /* Use dumb heuristics.  Works only for default configuration. */
00917          result = (word)GC_stackbottom - BACKING_STORE_DISPLACEMENT;
00918          result += BACKING_STORE_ALIGNMENT - 1;
00919          result &= ~(BACKING_STORE_ALIGNMENT - 1);
00920          /* Verify that it's at least readable.  If not, we goofed. */
00921          GC_noop1(*(word *)result); 
00922       }
00923       return (ptr_t)result;
00924     }
00925 # endif
00926 
00927   ptr_t GC_linux_stack_base(void)
00928   {
00929     /* We read the stack base value from /proc/self/stat.  We do this */
00930     /* using direct I/O system calls in order to avoid calling malloc   */
00931     /* in case REDIRECT_MALLOC is defined.                            */ 
00932 #   define STAT_BUF_SIZE 4096
00933 #   define STAT_READ read
00934          /* Should probably call the real read, if read is wrapped.   */
00935     char stat_buf[STAT_BUF_SIZE];
00936     int f;
00937     char c;
00938     word result = 0;
00939     size_t i, buf_offset = 0;
00940 
00941     /* First try the easy way.  This should work for glibc 2.2 */
00942     /* This fails in a prelinked ("prelink" command) executable */
00943     /* since the correct value of __libc_stack_end never       */
00944     /* becomes visible to us.  The second test works around    */
00945     /* this.                                            */  
00946 #   ifdef USE_LIBC_PRIVATES
00947       if (0 != &__libc_stack_end && 0 != __libc_stack_end ) {
00948 #       ifdef IA64
00949          /* Some versions of glibc set the address 16 bytes too       */
00950          /* low while the initialization code is running.             */
00951          if (((word)__libc_stack_end & 0xfff) + 0x10 < 0x1000) {
00952            return __libc_stack_end + 0x10;
00953          } /* Otherwise it's not safe to add 16 bytes and we fall     */
00954            /* back to using /proc.                             */
00955 #      else 
00956 #      ifdef SPARC
00957          /* Older versions of glibc for 64-bit Sparc do not set
00958           * this variable correctly, it gets set to either zero
00959           * or one.
00960           */
00961          if (__libc_stack_end != (ptr_t) (unsigned long)0x1)
00962            return __libc_stack_end;
00963 #      else
00964          return __libc_stack_end;
00965 #      endif
00966 #      endif
00967       }
00968 #   endif
00969     f = open("/proc/self/stat", O_RDONLY);
00970     if (f < 0 || STAT_READ(f, stat_buf, STAT_BUF_SIZE) < 2 * STAT_SKIP) {
00971        ABORT("Couldn't read /proc/self/stat");
00972     }
00973     c = stat_buf[buf_offset++];
00974     /* Skip the required number of fields.  This number is hopefully  */
00975     /* constant across all Linux implementations.                     */
00976       for (i = 0; i < STAT_SKIP; ++i) {
00977        while (isspace(c)) c = stat_buf[buf_offset++];
00978        while (!isspace(c)) c = stat_buf[buf_offset++];
00979       }
00980     while (isspace(c)) c = stat_buf[buf_offset++];
00981     while (isdigit(c)) {
00982       result *= 10;
00983       result += c - '0';
00984       c = stat_buf[buf_offset++];
00985     }
00986     close(f);
00987     if (result < 0x10000000) ABORT("Absurd stack bottom value");
00988     return (ptr_t)result;
00989   }
00990 
00991 #endif /* LINUX_STACKBOTTOM */
00992 
00993 #ifdef FREEBSD_STACKBOTTOM
00994 
00995 /* This uses an undocumented sysctl call, but at least one expert     */
00996 /* believes it will stay.                                      */
00997 
00998 #include <unistd.h>
00999 #include <sys/types.h>
01000 #include <sys/sysctl.h>
01001 
01002   ptr_t GC_freebsd_stack_base(void)
01003   {
01004     int nm[2] = {CTL_KERN, KERN_USRSTACK};
01005     ptr_t base;
01006     size_t len = sizeof(ptr_t);
01007     int r = sysctl(nm, 2, &base, &len, NULL, 0);
01008     
01009     if (r) ABORT("Error getting stack base");
01010 
01011     return base;
01012   }
01013 
01014 #endif /* FREEBSD_STACKBOTTOM */
01015 
01016 #if !defined(BEOS) && !defined(AMIGA) && !defined(MSWIN32) \
01017     && !defined(MSWINCE) && !defined(OS2) && !defined(NOSYS) && !defined(ECOS)
01018 
01019 ptr_t GC_get_stack_base()
01020 {
01021 #   if defined(HEURISTIC1) || defined(HEURISTIC2) || \
01022        defined(LINUX_STACKBOTTOM) || defined(FREEBSD_STACKBOTTOM)
01023     word dummy;
01024     ptr_t result;
01025 #   endif
01026 
01027 #   define STACKBOTTOM_ALIGNMENT_M1 ((word)STACK_GRAN - 1)
01028 
01029 #   ifdef STACKBOTTOM
01030        return(STACKBOTTOM);
01031 #   else
01032 #      ifdef HEURISTIC1
01033 #         ifdef STACK_GROWS_DOWN
01034             result = (ptr_t)((((word)(&dummy))
01035                             + STACKBOTTOM_ALIGNMENT_M1)
01036                            & ~STACKBOTTOM_ALIGNMENT_M1);
01037 #         else
01038             result = (ptr_t)(((word)(&dummy))
01039                            & ~STACKBOTTOM_ALIGNMENT_M1);
01040 #         endif
01041 #      endif /* HEURISTIC1 */
01042 #      ifdef LINUX_STACKBOTTOM
01043           result = GC_linux_stack_base();
01044 #      endif
01045 #      ifdef FREEBSD_STACKBOTTOM
01046           result = GC_freebsd_stack_base();
01047 #      endif
01048 #      ifdef HEURISTIC2
01049 #          ifdef STACK_GROWS_DOWN
01050               result = GC_find_limit((ptr_t)(&dummy), TRUE);
01051 #             ifdef HEURISTIC2_LIMIT
01052                   if (result > HEURISTIC2_LIMIT
01053                       && (ptr_t)(&dummy) < HEURISTIC2_LIMIT) {
01054                           result = HEURISTIC2_LIMIT;
01055                   }
01056 #              endif
01057 #          else
01058               result = GC_find_limit((ptr_t)(&dummy), FALSE);
01059 #             ifdef HEURISTIC2_LIMIT
01060                   if (result < HEURISTIC2_LIMIT
01061                       && (ptr_t)(&dummy) > HEURISTIC2_LIMIT) {
01062                           result = HEURISTIC2_LIMIT;
01063                   }
01064 #              endif
01065 #          endif
01066 
01067 #      endif /* HEURISTIC2 */
01068 #      ifdef STACK_GROWS_DOWN
01069            if (result == 0) result = (ptr_t)(signed_word)(-sizeof(ptr_t));
01070 #      endif
01071        return(result);
01072 #   endif /* STACKBOTTOM */
01073 }
01074 
01075 # endif /* ! AMIGA, !OS 2, ! MS Windows, !BEOS, !NOSYS, !ECOS */
01076 
01077 /*
01078  * Register static data segment(s) as roots.
01079  * If more data segments are added later then they need to be registered
01080  * add that point (as we do with SunOS dynamic loading),
01081  * or GC_mark_roots needs to check for them (as we do with PCR).
01082  * Called with allocator lock held.
01083  */
01084 
01085 # ifdef OS2
01086 
01087 void GC_register_data_segments()
01088 {
01089     PTIB ptib;
01090     PPIB ppib;
01091     HMODULE module_handle;
01092 #   define PBUFSIZ 512
01093     UCHAR path[PBUFSIZ];
01094     FILE * myexefile;
01095     struct exe_hdr hdrdos;  /* MSDOS header.     */
01096     struct e32_exe hdr386;  /* Real header for my executable */
01097     struct o32_obj seg;     /* Currrent segment */
01098     int nsegs;
01099     
01100     
01101     if (DosGetInfoBlocks(&ptib, &ppib) != NO_ERROR) {
01102        GC_err_printf0("DosGetInfoBlocks failed\n");
01103        ABORT("DosGetInfoBlocks failed\n");
01104     }
01105     module_handle = ppib -> pib_hmte;
01106     if (DosQueryModuleName(module_handle, PBUFSIZ, path) != NO_ERROR) {
01107        GC_err_printf0("DosQueryModuleName failed\n");
01108        ABORT("DosGetInfoBlocks failed\n");
01109     }
01110     myexefile = fopen(path, "rb");
01111     if (myexefile == 0) {
01112         GC_err_puts("Couldn't open executable ");
01113         GC_err_puts(path); GC_err_puts("\n");
01114         ABORT("Failed to open executable\n");
01115     }
01116     if (fread((char *)(&hdrdos), 1, sizeof hdrdos, myexefile) < sizeof hdrdos) {
01117         GC_err_puts("Couldn't read MSDOS header from ");
01118         GC_err_puts(path); GC_err_puts("\n");
01119         ABORT("Couldn't read MSDOS header");
01120     }
01121     if (E_MAGIC(hdrdos) != EMAGIC) {
01122         GC_err_puts("Executable has wrong DOS magic number: ");
01123         GC_err_puts(path); GC_err_puts("\n");
01124         ABORT("Bad DOS magic number");
01125     }
01126     if (fseek(myexefile, E_LFANEW(hdrdos), SEEK_SET) != 0) {
01127         GC_err_puts("Seek to new header failed in ");
01128         GC_err_puts(path); GC_err_puts("\n");
01129         ABORT("Bad DOS magic number");
01130     }
01131     if (fread((char *)(&hdr386), 1, sizeof hdr386, myexefile) < sizeof hdr386) {
01132         GC_err_puts("Couldn't read MSDOS header from ");
01133         GC_err_puts(path); GC_err_puts("\n");
01134         ABORT("Couldn't read OS/2 header");
01135     }
01136     if (E32_MAGIC1(hdr386) != E32MAGIC1 || E32_MAGIC2(hdr386) != E32MAGIC2) {
01137         GC_err_puts("Executable has wrong OS/2 magic number:");
01138         GC_err_puts(path); GC_err_puts("\n");
01139         ABORT("Bad OS/2 magic number");
01140     }
01141     if ( E32_BORDER(hdr386) != E32LEBO || E32_WORDER(hdr386) != E32LEWO) {
01142         GC_err_puts("Executable %s has wrong byte order: ");
01143         GC_err_puts(path); GC_err_puts("\n");
01144         ABORT("Bad byte order");
01145     }
01146     if ( E32_CPU(hdr386) == E32CPU286) {
01147         GC_err_puts("GC can't handle 80286 executables: ");
01148         GC_err_puts(path); GC_err_puts("\n");
01149         EXIT();
01150     }
01151     if (fseek(myexefile, E_LFANEW(hdrdos) + E32_OBJTAB(hdr386),
01152              SEEK_SET) != 0) {
01153         GC_err_puts("Seek to object table failed: ");
01154         GC_err_puts(path); GC_err_puts("\n");
01155         ABORT("Seek to object table failed");
01156     }
01157     for (nsegs = E32_OBJCNT(hdr386); nsegs > 0; nsegs--) {
01158       int flags;
01159       if (fread((char *)(&seg), 1, sizeof seg, myexefile) < sizeof seg) {
01160         GC_err_puts("Couldn't read obj table entry from ");
01161         GC_err_puts(path); GC_err_puts("\n");
01162         ABORT("Couldn't read obj table entry");
01163       }
01164       flags = O32_FLAGS(seg);
01165       if (!(flags & OBJWRITE)) continue;
01166       if (!(flags & OBJREAD)) continue;
01167       if (flags & OBJINVALID) {
01168           GC_err_printf0("Object with invalid pages?\n");
01169           continue;
01170       } 
01171       GC_add_roots_inner(O32_BASE(seg), O32_BASE(seg)+O32_SIZE(seg), FALSE);
01172     }
01173 }
01174 
01175 # else /* !OS2 */
01176 
01177 # if defined(MSWIN32) || defined(MSWINCE)
01178 
01179 # ifdef MSWIN32
01180   /* Unfortunately, we have to handle win32s very differently from NT,       */
01181   /* Since VirtualQuery has very different semantics.  In particular, */
01182   /* under win32s a VirtualQuery call on an unmapped page returns an  */
01183   /* invalid result.  Under NT, GC_register_data_segments is a noop and      */
01184   /* all real work is done by GC_register_dynamic_libraries.  Under   */
01185   /* win32s, we cannot find the data segments associated with dll's.  */
01186   /* We register the main data segment here.                          */
01187   GC_bool GC_no_win32_dlls = FALSE;        
01188        /* This used to be set for gcc, to avoid dealing with          */
01189        /* the structured exception handling issues.  But we now have  */
01190        /* assembly code to do that right.                      */
01191   GC_bool GC_wnt = FALSE;
01192         /* This is a Windows NT derivative, i.e. NT, W2K, XP or later.  */
01193   
01194   void GC_init_win32()
01195   {
01196     /* if we're running under win32s, assume that no DLLs will be loaded */
01197     DWORD v = GetVersion();
01198     GC_wnt = !(v & 0x80000000);
01199     GC_no_win32_dlls |= ((!GC_wnt) && (v & 0xff) <= 3);
01200   }
01201 
01202   /* Return the smallest address a such that VirtualQuery             */
01203   /* returns correct results for all addresses between a and start.   */
01204   /* Assumes VirtualQuery returns correct information for start.      */
01205   ptr_t GC_least_described_address(ptr_t start)
01206   {  
01207     MEMORY_BASIC_INFORMATION buf;
01208     DWORD result;
01209     LPVOID limit;
01210     ptr_t p;
01211     LPVOID q;
01212     
01213     limit = GC_sysinfo.lpMinimumApplicationAddress;
01214     p = (ptr_t)((word)start & ~(GC_page_size - 1));
01215     for (;;) {
01216        q = (LPVOID)(p - GC_page_size);
01217        if ((ptr_t)q > (ptr_t)p /* underflow */ || q < limit) break;
01218        result = VirtualQuery(q, &buf, sizeof(buf));
01219        if (result != sizeof(buf) || buf.AllocationBase == 0) break;
01220        p = (ptr_t)(buf.AllocationBase);
01221     }
01222     return(p);
01223   }
01224 # endif
01225 
01226 # ifndef REDIRECT_MALLOC
01227   /* We maintain a linked list of AllocationBase values that we know  */
01228   /* correspond to malloc heap sections.  Currently this is only called */
01229   /* during a GC.  But there is some hope that for long running              */
01230   /* programs we will eventually see most heap sections.              */
01231 
01232   /* In the long run, it would be more reliable to occasionally walk  */
01233   /* the malloc heap with HeapWalk on the default heap.  But that     */
01234   /* apparently works only for NT-based Windows.                      */ 
01235 
01236   /* In the long run, a better data structure would also be nice ...  */
01237   struct GC_malloc_heap_list {
01238     void * allocation_base;
01239     struct GC_malloc_heap_list *next;
01240   } *GC_malloc_heap_l = 0;
01241 
01242   /* Is p the base of one of the malloc heap sections we already know */
01243   /* about?                                                    */
01244   GC_bool GC_is_malloc_heap_base(ptr_t p)
01245   {
01246     struct GC_malloc_heap_list *q = GC_malloc_heap_l;
01247 
01248     while (0 != q) {
01249       if (q -> allocation_base == p) return TRUE;
01250       q = q -> next;
01251     }
01252     return FALSE;
01253   }
01254 
01255   void *GC_get_allocation_base(void *p)
01256   {
01257     MEMORY_BASIC_INFORMATION buf;
01258     DWORD result = VirtualQuery(p, &buf, sizeof(buf));
01259     if (result != sizeof(buf)) {
01260       ABORT("Weird VirtualQuery result");
01261     }
01262     return buf.AllocationBase;
01263   }
01264 
01265   size_t GC_max_root_size = 100000;       /* Appr. largest root size. */
01266 
01267   void GC_add_current_malloc_heap()
01268   {
01269     struct GC_malloc_heap_list *new_l =
01270                  malloc(sizeof(struct GC_malloc_heap_list));
01271     void * candidate = GC_get_allocation_base(new_l);
01272 
01273     if (new_l == 0) return;
01274     if (GC_is_malloc_heap_base(candidate)) {
01275       /* Try a little harder to find malloc heap.                     */
01276        size_t req_size = 10000;
01277        do {
01278          void *p = malloc(req_size);
01279          if (0 == p) { free(new_l); return; }
01280          candidate = GC_get_allocation_base(p);
01281          free(p);
01282          req_size *= 2;
01283        } while (GC_is_malloc_heap_base(candidate)
01284                 && req_size < GC_max_root_size/10 && req_size < 500000);
01285        if (GC_is_malloc_heap_base(candidate)) {
01286          free(new_l); return;
01287        }
01288     }
01289 #   ifdef CONDPRINT
01290       if (GC_print_stats)
01291          GC_printf1("Found new system malloc AllocationBase at 0x%lx\n",
01292                      candidate);
01293 #   endif
01294     new_l -> allocation_base = candidate;
01295     new_l -> next = GC_malloc_heap_l;
01296     GC_malloc_heap_l = new_l;
01297   }
01298 # endif /* REDIRECT_MALLOC */
01299   
01300   /* Is p the start of either the malloc heap, or of one of our */
01301   /* heap sections?                                     */
01302   GC_bool GC_is_heap_base (ptr_t p)
01303   {
01304      
01305      unsigned i;
01306      
01307 #    ifndef REDIRECT_MALLOC
01308        static word last_gc_no = -1;
01309      
01310        if (last_gc_no != GC_gc_no) {
01311         GC_add_current_malloc_heap();
01312         last_gc_no = GC_gc_no;
01313        }
01314        if (GC_root_size > GC_max_root_size) GC_max_root_size = GC_root_size;
01315        if (GC_is_malloc_heap_base(p)) return TRUE;
01316 #    endif
01317      for (i = 0; i < GC_n_heap_bases; i++) {
01318          if (GC_heap_bases[i] == p) return TRUE;
01319      }
01320      return FALSE ;
01321   }
01322 
01323 # ifdef MSWIN32
01324   void GC_register_root_section(ptr_t static_root)
01325   {
01326       MEMORY_BASIC_INFORMATION buf;
01327       DWORD result;
01328       DWORD protect;
01329       LPVOID p;
01330       char * base;
01331       char * limit, * new_limit;
01332     
01333       if (!GC_no_win32_dlls) return;
01334       p = base = limit = GC_least_described_address(static_root);
01335       while (p < GC_sysinfo.lpMaximumApplicationAddress) {
01336         result = VirtualQuery(p, &buf, sizeof(buf));
01337         if (result != sizeof(buf) || buf.AllocationBase == 0
01338             || GC_is_heap_base(buf.AllocationBase)) break;
01339         new_limit = (char *)p + buf.RegionSize;
01340         protect = buf.Protect;
01341         if (buf.State == MEM_COMMIT
01342             && is_writable(protect)) {
01343             if ((char *)p == limit) {
01344                 limit = new_limit;
01345             } else {
01346                 if (base != limit) GC_add_roots_inner(base, limit, FALSE);
01347                 base = p;
01348                 limit = new_limit;
01349             }
01350         }
01351         if (p > (LPVOID)new_limit /* overflow */) break;
01352         p = (LPVOID)new_limit;
01353       }
01354       if (base != limit) GC_add_roots_inner(base, limit, FALSE);
01355   }
01356 #endif
01357   
01358   void GC_register_data_segments()
01359   {
01360 #     ifdef MSWIN32
01361       static char dummy;
01362       GC_register_root_section((ptr_t)(&dummy));
01363 #     endif
01364   }
01365 
01366 # else /* !OS2 && !Windows */
01367 
01368 # if (defined(SVR4) || defined(AUX) || defined(DGUX) \
01369       || (defined(LINUX) && defined(SPARC))) && !defined(PCR)
01370 ptr_t GC_SysVGetDataStart(max_page_size, etext_addr)
01371 int max_page_size;
01372 int * etext_addr;
01373 {
01374     word text_end = ((word)(etext_addr) + sizeof(word) - 1)
01375                   & ~(sizeof(word) - 1);
01376        /* etext rounded to word boundary  */
01377     word next_page = ((text_end + (word)max_page_size - 1)
01378                     & ~((word)max_page_size - 1));
01379     word page_offset = (text_end & ((word)max_page_size - 1));
01380     VOLATILE char * result = (char *)(next_page + page_offset);
01381     /* Note that this isnt equivalent to just adding           */
01382     /* max_page_size to &etext if &etext is at a page boundary */
01383     
01384     GC_setup_temporary_fault_handler();
01385     if (SETJMP(GC_jmp_buf) == 0) {
01386        /* Try writing to the address.     */
01387        *result = *result;
01388         GC_reset_fault_handler();
01389     } else {
01390         GC_reset_fault_handler();
01391        /* We got here via a longjmp.  The address is not readable.    */
01392        /* This is known to happen under Solaris 2.4 + gcc, which place       */
01393        /* string constants in the text segment, but after etext.      */
01394        /* Use plan B.  Note that we now know there is a gap between   */
01395        /* text and data segments, so plan A bought us something.      */
01396        result = (char *)GC_find_limit((ptr_t)(DATAEND), FALSE);
01397     }
01398     return((ptr_t)result);
01399 }
01400 # endif
01401 
01402 # if defined(FREEBSD) && (defined(I386) || defined(powerpc) || defined(__powerpc__)) && !defined(PCR)
01403 /* Its unclear whether this should be identical to the above, or      */
01404 /* whether it should apply to non-X86 architectures.                  */
01405 /* For now we don't assume that there is always an empty page after   */
01406 /* etext.  But in some cases there actually seems to be slightly more.  */
01407 /* This also deals with holes between read-only data and writable data.      */
01408 ptr_t GC_FreeBSDGetDataStart(max_page_size, etext_addr)
01409 int max_page_size;
01410 int * etext_addr;
01411 {
01412     word text_end = ((word)(etext_addr) + sizeof(word) - 1)
01413                    & ~(sizeof(word) - 1);
01414        /* etext rounded to word boundary  */
01415     VOLATILE word next_page = (text_end + (word)max_page_size - 1)
01416                            & ~((word)max_page_size - 1);
01417     VOLATILE ptr_t result = (ptr_t)text_end;
01418     GC_setup_temporary_fault_handler();
01419     if (SETJMP(GC_jmp_buf) == 0) {
01420        /* Try reading at the address.                          */
01421        /* This should happen before there is another thread.   */
01422        for (; next_page < (word)(DATAEND); next_page += (word)max_page_size)
01423            *(VOLATILE char *)next_page;
01424        GC_reset_fault_handler();
01425     } else {
01426        GC_reset_fault_handler();
01427        /* As above, we go to plan B       */
01428        result = GC_find_limit((ptr_t)(DATAEND), FALSE);
01429     }
01430     return(result);
01431 }
01432 
01433 # endif
01434 
01435 
01436 #ifdef AMIGA
01437 
01438 #  define GC_AMIGA_DS
01439 #  include "AmigaOS.c"
01440 #  undef GC_AMIGA_DS
01441 
01442 #else /* !OS2 && !Windows && !AMIGA */
01443 
01444 void GC_register_data_segments()
01445 {
01446 #   if !defined(PCR) && !defined(SRC_M3) && !defined(MACOS)
01447 #     if defined(REDIRECT_MALLOC) && defined(GC_SOLARIS_THREADS)
01448        /* As of Solaris 2.3, the Solaris threads implementation       */
01449        /* allocates the data structure for the initial thread with    */
01450        /* sbrk at process startup.  It needs to be scanned, so that   */
01451        /* we don't lose some malloc allocated data structures         */
01452        /* hanging from it.  We're on thin ice here ...                */
01453         extern caddr_t sbrk();
01454 
01455        GC_add_roots_inner(DATASTART, (char *)sbrk(0), FALSE);
01456 #     else
01457        GC_add_roots_inner(DATASTART, (char *)(DATAEND), FALSE);
01458 #       if defined(DATASTART2)
01459          GC_add_roots_inner(DATASTART2, (char *)(DATAEND2), FALSE);
01460 #       endif
01461 #     endif
01462 #   endif
01463 #   if defined(MACOS)
01464     {
01465 #   if defined(THINK_C)
01466        extern void* GC_MacGetDataStart(void);
01467        /* globals begin above stack and end at a5. */
01468        GC_add_roots_inner((ptr_t)GC_MacGetDataStart(),
01469                         (ptr_t)LMGetCurrentA5(), FALSE);
01470 #   else
01471 #     if defined(__MWERKS__)
01472 #       if !__POWERPC__
01473          extern void* GC_MacGetDataStart(void);
01474          /* MATTHEW: Function to handle Far Globals (CW Pro 3) */
01475 #         if __option(far_data)
01476          extern void* GC_MacGetDataEnd(void);
01477 #         endif
01478          /* globals begin above stack and end at a5. */
01479          GC_add_roots_inner((ptr_t)GC_MacGetDataStart(),
01480                           (ptr_t)LMGetCurrentA5(), FALSE);
01481          /* MATTHEW: Handle Far Globals */                          
01482 #         if __option(far_data)
01483       /* Far globals follow he QD globals: */
01484          GC_add_roots_inner((ptr_t)LMGetCurrentA5(),
01485                           (ptr_t)GC_MacGetDataEnd(), FALSE);
01486 #         endif
01487 #       else
01488          extern char __data_start__[], __data_end__[];
01489          GC_add_roots_inner((ptr_t)&__data_start__,
01490                           (ptr_t)&__data_end__, FALSE);
01491 #       endif /* __POWERPC__ */
01492 #     endif /* __MWERKS__ */
01493 #   endif /* !THINK_C */
01494     }
01495 #   endif /* MACOS */
01496 
01497     /* Dynamic libraries are added at every collection, since they may  */
01498     /* change.                                                        */
01499 }
01500 
01501 # endif  /* ! AMIGA */
01502 # endif  /* ! MSWIN32 && ! MSWINCE*/
01503 # endif  /* ! OS2 */
01504 
01505 /*
01506  * Auxiliary routines for obtaining memory from OS.
01507  */
01508 
01509 # if !defined(OS2) && !defined(PCR) && !defined(AMIGA) \
01510        && !defined(MSWIN32) && !defined(MSWINCE) \
01511        && !defined(MACOS) && !defined(DOS4GW) && !defined(NONSTOP)
01512 
01513 # ifdef SUNOS4
01514     extern caddr_t sbrk();
01515 # endif
01516 # ifdef __STDC__
01517 #   define SBRK_ARG_T ptrdiff_t
01518 # else
01519 #   define SBRK_ARG_T int
01520 # endif
01521 
01522 
01523 # if 0 && defined(RS6000)  /* We now use mmap */
01524 /* The compiler seems to generate speculative reads one past the end of      */
01525 /* an allocated object.  Hence we need to make sure that the page     */
01526 /* following the last heap page is also mapped.                       */
01527 ptr_t GC_unix_get_mem(bytes)
01528 word bytes;
01529 {
01530     caddr_t cur_brk = (caddr_t)sbrk(0);
01531     caddr_t result;
01532     SBRK_ARG_T lsbs = (word)cur_brk & (GC_page_size-1);
01533     static caddr_t my_brk_val = 0;
01534     
01535     if ((SBRK_ARG_T)bytes < 0) return(0); /* too big */
01536     if (lsbs != 0) {
01537         if((caddr_t)(sbrk(GC_page_size - lsbs)) == (caddr_t)(-1)) return(0);
01538     }
01539     if (cur_brk == my_brk_val) {
01540        /* Use the extra block we allocated last time. */
01541         result = (ptr_t)sbrk((SBRK_ARG_T)bytes);
01542         if (result == (caddr_t)(-1)) return(0);
01543         result -= GC_page_size;
01544     } else {
01545         result = (ptr_t)sbrk(GC_page_size + (SBRK_ARG_T)bytes);
01546         if (result == (caddr_t)(-1)) return(0);
01547     }
01548     my_brk_val = result + bytes + GC_page_size;  /* Always page aligned */
01549     return((ptr_t)result);
01550 }
01551 
01552 #else  /* Not RS6000 */
01553 
01554 #if defined(USE_MMAP) || defined(USE_MUNMAP)
01555 
01556 #ifdef USE_MMAP_FIXED
01557 #   define GC_MMAP_FLAGS MAP_FIXED | MAP_PRIVATE
01558        /* Seems to yield better performance on Solaris 2, but can     */
01559        /* be unreliable if something is already mapped at the address.       */
01560 #else
01561 #   define GC_MMAP_FLAGS MAP_PRIVATE
01562 #endif
01563 
01564 #ifdef USE_MMAP_ANON
01565 # define zero_fd -1
01566 # if defined(MAP_ANONYMOUS)
01567 #   define OPT_MAP_ANON MAP_ANONYMOUS
01568 # else
01569 #   define OPT_MAP_ANON MAP_ANON
01570 # endif
01571 #else
01572   static int zero_fd;
01573 # define OPT_MAP_ANON 0
01574 #endif 
01575 
01576 #endif /* defined(USE_MMAP) || defined(USE_MUNMAP) */
01577 
01578 #if defined(USE_MMAP)
01579 /* Tested only under Linux, IRIX5 and Solaris 2 */
01580 
01581 #ifndef HEAP_START
01582 #   define HEAP_START 0
01583 #endif
01584 
01585 ptr_t GC_unix_get_mem(bytes)
01586 word bytes;
01587 {
01588     void *result;
01589     static ptr_t last_addr = HEAP_START;
01590 
01591 #   ifndef USE_MMAP_ANON
01592       static GC_bool initialized = FALSE;
01593 
01594       if (!initialized) {
01595          zero_fd = open("/dev/zero", O_RDONLY);
01596          fcntl(zero_fd, F_SETFD, FD_CLOEXEC);
01597          initialized = TRUE;
01598       }
01599 #   endif
01600 
01601     if (bytes & (GC_page_size -1)) ABORT("Bad GET_MEM arg");
01602     result = mmap(last_addr, bytes, PROT_READ | PROT_WRITE | OPT_PROT_EXEC,
01603                 GC_MMAP_FLAGS | OPT_MAP_ANON, zero_fd, 0/* offset */);
01604     if (result == MAP_FAILED) return(0);
01605     last_addr = (ptr_t)result + bytes + GC_page_size - 1;
01606     last_addr = (ptr_t)((word)last_addr & ~(GC_page_size - 1));
01607 #   if !defined(LINUX)
01608       if (last_addr == 0) {
01609         /* Oops.  We got the end of the address space.  This isn't    */
01610        /* usable by arbitrary C code, since one-past-end pointers     */
01611        /* don't work, so we discard it and try again.                 */
01612        munmap(result, (size_t)(-GC_page_size) - (size_t)result);
01613                      /* Leave last page mapped, so we can't repeat. */
01614        return GC_unix_get_mem(bytes);
01615       }
01616 #   else
01617       GC_ASSERT(last_addr != 0);
01618 #   endif
01619     return((ptr_t)result);
01620 }
01621 
01622 #else /* Not RS6000, not USE_MMAP */
01623 ptr_t GC_unix_get_mem(bytes)
01624 word bytes;
01625 {
01626   ptr_t result;
01627 # ifdef IRIX5
01628     /* Bare sbrk isn't thread safe.  Play by malloc rules.     */
01629     /* The equivalent may be needed on other systems as well.  */
01630     __LOCK_MALLOC();
01631 # endif
01632   {
01633     ptr_t cur_brk = (ptr_t)sbrk(0);
01634     SBRK_ARG_T lsbs = (word)cur_brk & (GC_page_size-1);
01635     
01636     if ((SBRK_ARG_T)bytes < 0) return(0); /* too big */
01637     if (lsbs != 0) {
01638         if((ptr_t)sbrk(GC_page_size - lsbs) == (ptr_t)(-1)) return(0);
01639     }
01640     result = (ptr_t)sbrk((SBRK_ARG_T)bytes);
01641     if (result == (ptr_t)(-1)) result = 0;
01642   }
01643 # ifdef IRIX5
01644     __UNLOCK_MALLOC();
01645 # endif
01646   return(result);
01647 }
01648 
01649 #endif /* Not USE_MMAP */
01650 #endif /* Not RS6000 */
01651 
01652 # endif /* UN*X */
01653 
01654 # ifdef OS2
01655 
01656 void * os2_alloc(size_t bytes)
01657 {
01658     void * result;
01659 
01660     if (DosAllocMem(&result, bytes, PAG_EXECUTE | PAG_READ |
01661                                 PAG_WRITE | PAG_COMMIT)
01662                   != NO_ERROR) {
01663        return(0);
01664     }
01665     if (result == 0) return(os2_alloc(bytes));
01666     return(result);
01667 }
01668 
01669 # endif /* OS2 */
01670 
01671 
01672 # if defined(MSWIN32) || defined(MSWINCE)
01673 SYSTEM_INFO GC_sysinfo;
01674 # endif
01675 
01676 # ifdef MSWIN32
01677 
01678 # ifdef USE_GLOBAL_ALLOC
01679 #   define GLOBAL_ALLOC_TEST 1
01680 # else
01681 #   define GLOBAL_ALLOC_TEST GC_no_win32_dlls
01682 # endif
01683 
01684 word GC_n_heap_bases = 0;
01685 
01686 ptr_t GC_win32_get_mem(bytes)
01687 word bytes;
01688 {
01689     ptr_t result;
01690 
01691     if (GLOBAL_ALLOC_TEST) {
01692        /* VirtualAlloc doesn't like PAGE_EXECUTE_READWRITE.    */
01693        /* There are also unconfirmed rumors of other           */
01694        /* problems, so we dodge the issue.                     */
01695         result = (ptr_t) GlobalAlloc(0, bytes + HBLKSIZE);
01696         result = (ptr_t)(((word)result + HBLKSIZE) & ~(HBLKSIZE-1));
01697     } else {
01698        /* VirtualProtect only works on regions returned by a   */
01699        /* single VirtualAlloc call.  Thus we allocate one      */
01700        /* extra page, which will prevent merging of blocks     */
01701        /* in separate regions, and eliminate any temptation    */
01702        /* to call VirtualProtect on a range spanning regions.  */
01703        /* This wastes a small amount of memory, and risks      */
01704        /* increased fragmentation.  But better alternatives    */
01705        /* would require effort.                         */
01706         /* PLTSCHEME: use more conservative PAGE_READWRITE */
01707         result = (ptr_t) VirtualAlloc(NULL, bytes + 1,
01708                                   MEM_COMMIT | MEM_RESERVE,
01709                                   PAGE_READWRITE);
01710     }
01711     if (HBLKDISPL(result) != 0) ABORT("Bad VirtualAlloc result");
01712        /* If I read the documentation correctly, this can      */
01713        /* only happen if HBLKSIZE > 64k or not a power of 2.   */
01714     if (GC_n_heap_bases >= MAX_HEAP_SECTS) ABORT("Too many heap sections");
01715     GC_heap_bases[GC_n_heap_bases++] = result;
01716     return(result);                  
01717 }
01718 
01719 void GC_win32_free_heap ()
01720 {
01721     if (GC_no_win32_dlls) {
01722        while (GC_n_heap_bases > 0) {
01723            GlobalFree (GC_heap_bases[--GC_n_heap_bases]);
01724            GC_heap_bases[GC_n_heap_bases] = 0;
01725        }
01726     }
01727 }
01728 # endif
01729 
01730 #ifdef AMIGA
01731 # define GC_AMIGA_AM
01732 # include "AmigaOS.c"
01733 # undef GC_AMIGA_AM
01734 #endif
01735 
01736 
01737 # ifdef MSWINCE
01738 word GC_n_heap_bases = 0;
01739 
01740 ptr_t GC_wince_get_mem(bytes)
01741 word bytes;
01742 {
01743     ptr_t result;
01744     word i;
01745 
01746     /* Round up allocation size to multiple of page size */
01747     bytes = (bytes + GC_page_size-1) & ~(GC_page_size-1);
01748 
01749     /* Try to find reserved, uncommitted pages */
01750     for (i = 0; i < GC_n_heap_bases; i++) {
01751        if (((word)(-(signed_word)GC_heap_lengths[i])
01752             & (GC_sysinfo.dwAllocationGranularity-1))
01753            >= bytes) {
01754            result = GC_heap_bases[i] + GC_heap_lengths[i];
01755            break;
01756        }
01757     }
01758 
01759     if (i == GC_n_heap_bases) {
01760        /* Reserve more pages */
01761        word res_bytes = (bytes + GC_sysinfo.dwAllocationGranularity-1)
01762                       & ~(GC_sysinfo.dwAllocationGranularity-1);
01763        /* If we ever support MPROTECT_VDB here, we will probably need to     */
01764        /* ensure that res_bytes is strictly > bytes, so that VirtualProtect  */
01765        /* never spans regions.  It seems to be OK for a VirtualFree argument */
01766        /* to span regions, so we should be OK for now.                       */
01767        result = (ptr_t) VirtualAlloc(NULL, res_bytes,
01768                                   MEM_RESERVE | MEM_TOP_DOWN,
01769                                   PAGE_EXECUTE_READWRITE);
01770        if (HBLKDISPL(result) != 0) ABORT("Bad VirtualAlloc result");
01771            /* If I read the documentation correctly, this can  */
01772            /* only happen if HBLKSIZE > 64k or not a power of 2.      */
01773        if (GC_n_heap_bases >= MAX_HEAP_SECTS) ABORT("Too many heap sections");
01774        GC_heap_bases[GC_n_heap_bases] = result;
01775        GC_heap_lengths[GC_n_heap_bases] = 0;
01776        GC_n_heap_bases++;
01777     }
01778 
01779     /* Commit pages */
01780     result = (ptr_t) VirtualAlloc(result, bytes,
01781                               MEM_COMMIT,
01782                               PAGE_EXECUTE_READWRITE);
01783     if (result != NULL) {
01784        if (HBLKDISPL(result) != 0) ABORT("Bad VirtualAlloc result");
01785        GC_heap_lengths[i] += bytes;
01786     }
01787 
01788     return(result);                  
01789 }
01790 # endif
01791 
01792 #ifdef USE_MUNMAP
01793 
01794 /* For now, this only works on Win32/WinCE and some Unix-like  */
01795 /* systems.  If you have something else, don't define          */
01796 /* USE_MUNMAP.                                                 */
01797 /* We assume ANSI C to support this feature.                   */
01798 
01799 #if !defined(MSWIN32) && !defined(MSWINCE)
01800 
01801 #include <unistd.h>
01802 #include <sys/mman.h>
01803 #include <sys/stat.h>
01804 #include <sys/types.h>
01805 
01806 #endif
01807 
01808 /* Compute a page aligned starting address for the unmap       */
01809 /* operation on a block of size bytes starting at start.       */
01810 /* Return 0 if the block is too small to make this feasible.   */
01811 ptr_t GC_unmap_start(ptr_t start, word bytes)
01812 {
01813     ptr_t result = start;
01814     /* Round start to next page boundary.       */
01815         result += GC_page_size - 1;
01816         result = (ptr_t)((word)result & ~(GC_page_size - 1));
01817     if (result + GC_page_size > start + bytes) return 0;
01818     return result;
01819 }
01820 
01821 /* Compute end address for an unmap operation on the indicated */
01822 /* block.                                               */
01823 ptr_t GC_unmap_end(ptr_t start, word bytes)
01824 {
01825     ptr_t end_addr = start + bytes;
01826     end_addr = (ptr_t)((word)end_addr & ~(GC_page_size - 1));
01827     return end_addr;
01828 }
01829 
01830 /* Under Win32/WinCE we commit (map) and decommit (unmap)      */
01831 /* memory using      VirtualAlloc and VirtualFree.  These functions   */
01832 /* work on individual allocations of virtual memory, made      */
01833 /* previously using VirtualAlloc with the MEM_RESERVE flag.    */
01834 /* The ranges we need to (de)commit may span several of these  */
01835 /* allocations; therefore we use VirtualQuery to check         */
01836 /* allocation lengths, and split up the range as necessary.    */
01837 
01838 /* We assume that GC_remap is called on exactly the same range */
01839 /* as a previous call to GC_unmap.  It is safe to consistently */
01840 /* round the endpoints in both places.                         */
01841 void GC_unmap(ptr_t start, word bytes)
01842 {
01843     ptr_t start_addr = GC_unmap_start(start, bytes);
01844     ptr_t end_addr = GC_unmap_end(start, bytes);
01845     word len = end_addr - start_addr;
01846     if (0 == start_addr) return;
01847 #   if defined(MSWIN32) || defined(MSWINCE)
01848       while (len != 0) {
01849           MEMORY_BASIC_INFORMATION mem_info;
01850          GC_word free_len;
01851          if (VirtualQuery(start_addr, &mem_info, sizeof(mem_info))
01852              != sizeof(mem_info))
01853              ABORT("Weird VirtualQuery result");
01854          free_len = (len < mem_info.RegionSize) ? len : mem_info.RegionSize;
01855          if (!VirtualFree(start_addr, free_len, MEM_DECOMMIT))
01856              ABORT("VirtualFree failed");
01857          GC_unmapped_bytes += free_len;
01858          start_addr += free_len;
01859          len -= free_len;
01860       }
01861 #   else
01862       /* We immediately remap it to prevent an intervening mmap from  */
01863       /* accidentally grabbing the same address space.                */
01864       {
01865        void * result;
01866         result = mmap(start_addr, len, PROT_NONE,
01867                     MAP_PRIVATE | MAP_FIXED | OPT_MAP_ANON,
01868                     zero_fd, 0/* offset */);
01869         if (result != (void *)start_addr) ABORT("mmap(...PROT_NONE...) failed");
01870       }
01871       GC_unmapped_bytes += len;
01872 #   endif
01873 }
01874 
01875 
01876 void GC_remap(ptr_t start, word bytes)
01877 {
01878     ptr_t start_addr = GC_unmap_start(start, bytes);
01879     ptr_t end_addr = GC_unmap_end(start, bytes);
01880     word len = end_addr - start_addr;
01881 
01882 #   if defined(MSWIN32) || defined(MSWINCE)
01883       ptr_t result;
01884 
01885       if (0 == start_addr) return;
01886       while (len != 0) {
01887           MEMORY_BASIC_INFORMATION mem_info;
01888          GC_word alloc_len;
01889          if (VirtualQuery(start_addr, &mem_info, sizeof(mem_info))
01890              != sizeof(mem_info))
01891              ABORT("Weird VirtualQuery result");
01892          alloc_len = (len < mem_info.RegionSize) ? len : mem_info.RegionSize;
01893          result = VirtualAlloc(start_addr, alloc_len,
01894                             MEM_COMMIT,
01895                             PAGE_EXECUTE_READWRITE);
01896          if (result != start_addr) {
01897              ABORT("VirtualAlloc remapping failed");
01898          }
01899          GC_unmapped_bytes -= alloc_len;
01900          start_addr += alloc_len;
01901          len -= alloc_len;
01902       }
01903 #   else
01904       /* It was already remapped with PROT_NONE. */
01905       int result; 
01906 
01907       if (0 == start_addr) return;
01908       result = mprotect(start_addr, len,
01909                       PROT_READ | PROT_WRITE | OPT_PROT_EXEC);
01910       if (result != 0) {
01911          GC_err_printf3(
01912               "Mprotect failed at 0x%lx (length %ld) with errno %ld\n",
01913                start_addr, len, errno);
01914          ABORT("Mprotect remapping failed");
01915       }
01916       GC_unmapped_bytes -= len;
01917 #   endif
01918 }
01919 
01920 /* Two adjacent blocks have already been unmapped and are about to    */
01921 /* be merged.  Unmap the whole block.  This typically requires        */
01922 /* that we unmap a small section in the middle that was not previously       */
01923 /* unmapped due to alignment constraints.                      */
01924 void GC_unmap_gap(ptr_t start1, word bytes1, ptr_t start2, word bytes2)
01925 {
01926     ptr_t start1_addr = GC_unmap_start(start1, bytes1);
01927     ptr_t end1_addr = GC_unmap_end(start1, bytes1);
01928     ptr_t start2_addr = GC_unmap_start(start2, bytes2);
01929     ptr_t end2_addr = GC_unmap_end(start2, bytes2);
01930     ptr_t start_addr = end1_addr;
01931     ptr_t end_addr = start2_addr;
01932     word len;
01933     GC_ASSERT(start1 + bytes1 == start2);
01934     if (0 == start1_addr) start_addr = GC_unmap_start(start1, bytes1 + bytes2);
01935     if (0 == start2_addr) end_addr = GC_unmap_end(start1, bytes1 + bytes2);
01936     if (0 == start_addr) return;
01937     len = end_addr - start_addr;
01938 #   if defined(MSWIN32) || defined(MSWINCE)
01939       while (len != 0) {
01940           MEMORY_BASIC_INFORMATION mem_info;
01941          GC_word free_len;
01942          if (VirtualQuery(start_addr, &mem_info, sizeof(mem_info))
01943              != sizeof(mem_info))
01944              ABORT("Weird VirtualQuery result");
01945          free_len = (len < mem_info.RegionSize) ? len : mem_info.RegionSize;
01946          if (!VirtualFree(start_addr, free_len, MEM_DECOMMIT))
01947              ABORT("VirtualFree failed");
01948          GC_unmapped_bytes += free_len;
01949          start_addr += free_len;
01950          len -= free_len;
01951       }
01952 #   else
01953       if (len != 0 && munmap(start_addr, len) != 0) ABORT("munmap failed");
01954       GC_unmapped_bytes += len;
01955 #   endif
01956 }
01957 
01958 #endif /* USE_MUNMAP */
01959 
01960 /* Routine for pushing any additional roots.  In THREADS       */
01961 /* environment, this is also responsible for marking from      */
01962 /* thread stacks.                                       */
01963 #ifndef THREADS
01964 void (*GC_push_other_roots)() = 0;
01965 #else /* THREADS */
01966 
01967 # ifdef PCR
01968 PCR_ERes GC_push_thread_stack(PCR_Th_T *t, PCR_Any dummy)
01969 {
01970     struct PCR_ThCtl_TInfoRep info;
01971     PCR_ERes result;
01972     
01973     info.ti_stkLow = info.ti_stkHi = 0;
01974     result = PCR_ThCtl_GetInfo(t, &info);
01975     GC_push_all_stack((ptr_t)(info.ti_stkLow), (ptr_t)(info.ti_stkHi));
01976     return(result);
01977 }
01978 
01979 /* Push the contents of an old object. We treat this as stack  */
01980 /* data only becasue that makes it robust against mark stack   */
01981 /* overflow.                                            */
01982 PCR_ERes GC_push_old_obj(void *p, size_t size, PCR_Any data)
01983 {
01984     GC_push_all_stack((ptr_t)p, (ptr_t)p + size);
01985     return(PCR_ERes_okay);
01986 }
01987 
01988 
01989 void GC_default_push_other_roots GC_PROTO((void))
01990 {
01991     /* Traverse data allocated by previous memory managers.           */
01992        {
01993          extern struct PCR_MM_ProcsRep * GC_old_allocator;
01994          
01995          if ((*(GC_old_allocator->mmp_enumerate))(PCR_Bool_false,
01996                                              GC_push_old_obj, 0)
01997              != PCR_ERes_okay) {
01998              ABORT("Old object enumeration failed");
01999          }
02000        }
02001     /* Traverse all thread stacks. */
02002        if (PCR_ERes_IsErr(
02003                 PCR_ThCtl_ApplyToAllOtherThreads(GC_push_thread_stack,0))
02004               || PCR_ERes_IsErr(GC_push_thread_stack(PCR_Th_CurrThread(), 0))) {
02005               ABORT("Thread stack marking failed\n");
02006        }
02007 }
02008 
02009 # endif /* PCR */
02010 
02011 # ifdef SRC_M3
02012 
02013 # ifdef ALL_INTERIOR_POINTERS
02014     --> misconfigured
02015 # endif
02016 
02017 void GC_push_thread_structures GC_PROTO((void))
02018 {
02019     /* Not our responsibibility. */
02020 }
02021 
02022 extern void ThreadF__ProcessStacks();
02023 
02024 void GC_push_thread_stack(start, stop)
02025 word start, stop;
02026 {
02027    GC_push_all_stack((ptr_t)start, (ptr_t)stop + sizeof(word));
02028 }
02029 
02030 /* Push routine with M3 specific calling convention. */
02031 GC_m3_push_root(dummy1, p, dummy2, dummy3)
02032 word *p;
02033 ptr_t dummy1, dummy2;
02034 int dummy3;
02035 {
02036     word q = *p;
02037     
02038     GC_PUSH_ONE_STACK(q, p);
02039 }
02040 
02041 /* M3 set equivalent to RTHeap.TracedRefTypes */
02042 typedef struct { int elts[1]; }  RefTypeSet;
02043 RefTypeSet GC_TracedRefTypes = {{0x1}};
02044 
02045 void GC_default_push_other_roots GC_PROTO((void))
02046 {
02047     /* Use the M3 provided routine for finding static roots.    */
02048     /* This is a bit dubious, since it presumes no C roots.     */
02049     /* We handle the collector roots explicitly in GC_push_roots */
02050        RTMain__GlobalMapProc(GC_m3_push_root, 0, GC_TracedRefTypes);
02051        if (GC_words_allocd > 0) {
02052            ThreadF__ProcessStacks(GC_push_thread_stack);
02053        }
02054        /* Otherwise this isn't absolutely necessary, and we have      */
02055        /* startup ordering problems.                                  */
02056 }
02057 
02058 # endif /* SRC_M3 */
02059 
02060 # if defined(GC_SOLARIS_THREADS) || defined(GC_PTHREADS) || \
02061      defined(GC_WIN32_THREADS)
02062 
02063 extern void GC_push_all_stacks();
02064 
02065 void GC_default_push_other_roots GC_PROTO((void))
02066 {
02067     GC_push_all_stacks();
02068 }
02069 
02070 # endif /* GC_SOLARIS_THREADS || GC_PTHREADS */
02071 
02072 void (*GC_push_other_roots) GC_PROTO((void)) = GC_default_push_other_roots;
02073 
02074 #endif /* THREADS */
02075 
02076 /*
02077  * Routines for accessing dirty  bits on virtual pages.
02078  * We plan to eventually implement four strategies for doing so:
02079  * DEFAULT_VDB:      A simple dummy implementation that treats every page
02080  *            as possibly dirty.  This makes incremental collection
02081  *            useless, but the implementation is still correct.
02082  * PCR_VDB:   Use PPCRs virtual dirty bit facility.
02083  * PROC_VDB:  Use the /proc facility for reading dirty bits.  Only
02084  *            works under some SVR4 variants.  Even then, it may be
02085  *            too slow to be entirely satisfactory.  Requires reading
02086  *            dirty bits for entire address space.  Implementations tend
02087  *            to assume that the client is a (slow) debugger.
02088  * MPROTECT_VDB:Protect pages and then catch the faults to keep track of
02089  *            dirtied pages.  The implementation (and implementability)
02090  *            is highly system dependent.  This usually fails when system
02091  *            calls write to a protected page.  We prevent the read system
02092  *            call from doing so.  It is the clients responsibility to
02093  *            make sure that other system calls are similarly protected
02094  *            or write only to the stack.
02095  */
02096 GC_bool GC_dirty_maintained = FALSE;
02097 
02098 # ifdef DEFAULT_VDB
02099 
02100 /* All of the following assume the allocation lock is held, and       */
02101 /* signals are disabled.                                */
02102 
02103 /* The client asserts that unallocated pages in the heap are never    */
02104 /* written.                                                    */
02105 
02106 /* Initialize virtual dirty bit implementation.                */
02107 void GC_dirty_init()
02108 {
02109 #   ifdef PRINTSTATS
02110       GC_printf0("Initializing DEFAULT_VDB...\n");
02111 #   endif
02112     GC_dirty_maintained = TRUE;
02113 }
02114 
02115 /* Retrieve system dirty bits for heap to a local buffer.      */
02116 /* Restore the systems notion of which pages are dirty.        */
02117 void GC_read_dirty()
02118 {}
02119 
02120 /* Is the HBLKSIZE sized page at h marked dirty in the local buffer?  */
02121 /* If the actual page size is different, this returns TRUE if any     */
02122 /* of the pages overlapping h are dirty.  This routine may err on the */
02123 /* side of labelling pages as dirty (and this implementation does).   */
02124 /*ARGSUSED*/
02125 GC_bool GC_page_was_dirty(h)
02126 struct hblk *h;
02127 {
02128     return(TRUE);
02129 }
02130 
02131 /*
02132  * The following two routines are typically less crucial.  They matter
02133  * most with large dynamic libraries, or if we can't accurately identify
02134  * stacks, e.g. under Solaris 2.X.  Otherwise the following default
02135  * versions are adequate.
02136  */
02137  
02138 /* Could any valid GC heap pointer ever have been written to this page?      */
02139 /*ARGSUSED*/
02140 GC_bool GC_page_was_ever_dirty(h)
02141 struct hblk *h;
02142 {
02143     return(TRUE);
02144 }
02145 
02146 /* Reset the n pages starting at h to "was never dirty" status.       */
02147 void GC_is_fresh(h, n)
02148 struct hblk *h;
02149 word n;
02150 {
02151 }
02152 
02153 /* A call that:                                         */
02154 /* I) hints that [h, h+nblocks) is about to be written. */
02155 /* II) guarantees that protection is removed.           */
02156 /* (I) may speed up some dirty bit implementations.     */
02157 /* (II) may be essential if we need to ensure that      */
02158 /* pointer-free system call buffers in the heap are     */
02159 /* not protected.                                */
02160 /*ARGSUSED*/
02161 void GC_remove_protection(h, nblocks, is_ptrfree)
02162 struct hblk *h;
02163 word nblocks;
02164 GC_bool is_ptrfree;
02165 {
02166 }
02167 
02168 # endif /* DEFAULT_VDB */
02169 
02170 
02171 # ifdef MPROTECT_VDB
02172 
02173 /*
02174  * See DEFAULT_VDB for interface descriptions.
02175  */
02176 
02177 /*
02178  * This implementation maintains dirty bits itself by catching write
02179  * faults and keeping track of them.  We assume nobody else catches
02180  * SIGBUS or SIGSEGV.  We assume no write faults occur in system calls.
02181  * This means that clients must ensure that system calls don't write
02182  * to the write-protected heap.  Probably the best way to do this is to
02183  * ensure that system calls write at most to POINTERFREE objects in the
02184  * heap, and do even that only if we are on a platform on which those
02185  * are not protected.  Another alternative is to wrap system calls
02186  * (see example for read below), but the current implementation holds
02187  * a lock across blocking calls, making it problematic for multithreaded
02188  * applications. 
02189  * We assume the page size is a multiple of HBLKSIZE.
02190  * We prefer them to be the same.  We avoid protecting POINTERFREE
02191  * objects only if they are the same.
02192  */
02193 
02194 # if !defined(MSWIN32) && !defined(MSWINCE) && !defined(DARWIN)
02195 
02196 #   include <sys/mman.h>
02197 #   include <signal.h>
02198 #   include <sys/syscall.h>
02199 
02200 #   define PROTECT(addr, len) \
02201          if (mprotect((caddr_t)(addr), (size_t)(len), \
02202                      PROT_READ | OPT_PROT_EXEC) < 0) { \
02203            ABORT("mprotect failed"); \
02204          }
02205 #   define UNPROTECT(addr, len) \
02206          if (mprotect((caddr_t)(addr), (size_t)(len), \
02207                      PROT_WRITE | PROT_READ | OPT_PROT_EXEC ) < 0) { \
02208            ABORT("un-mprotect failed"); \
02209          }
02210          
02211 # else
02212 
02213 # ifdef DARWIN
02214     /* Using vm_protect (mach syscall) over mprotect (BSD syscall) seems to
02215        decrease the likelihood of some of the problems described below. */
02216     #include <mach/vm_map.h>
02217     static mach_port_t GC_task_self;
02218     #define PROTECT(addr,len) \
02219         if(vm_protect(GC_task_self,(vm_address_t)(addr),(vm_size_t)(len), \
02220                 FALSE,VM_PROT_READ) != KERN_SUCCESS) { \
02221             ABORT("vm_portect failed"); \
02222         }
02223     #define UNPROTECT(addr,len) \
02224         if(vm_protect(GC_task_self,(vm_address_t)(addr),(vm_size_t)(len), \
02225                 FALSE,VM_PROT_READ|VM_PROT_WRITE) != KERN_SUCCESS) { \
02226             ABORT("vm_portect failed"); \
02227         }
02228 # else
02229     
02230 #   ifndef MSWINCE
02231 #     include <signal.h>
02232 #   endif
02233 
02234     static DWORD protect_junk;
02235 #   define PROTECT(addr, len) \
02236          if (!VirtualProtect((addr), (len), PAGE_EXECUTE_READ, \
02237                            &protect_junk)) { \
02238            DWORD last_error = GetLastError(); \
02239            GC_printf1("Last error code: %lx\n", last_error); \
02240            ABORT("VirtualProtect failed"); \
02241          }
02242 #   define UNPROTECT(addr, len) \
02243          if (!VirtualProtect((addr), (len), PAGE_EXECUTE_READWRITE, \
02244                            &protect_junk)) { \
02245            ABORT("un-VirtualProtect failed"); \
02246          }
02247 # endif /* !DARWIN */
02248 # endif /* MSWIN32 || MSWINCE || DARWIN */
02249 
02250 #if defined(SUNOS4) || (defined(FREEBSD) && !defined(SUNOS5SIGS))
02251     typedef void (* SIG_PF)();
02252 #endif /* SUNOS4 || (FREEBSD && !SUNOS5SIGS) */
02253 
02254 #if defined(SUNOS5SIGS) || defined(OSF1) || defined(LINUX) \
02255     || defined(HURD)
02256 # ifdef __STDC__
02257     typedef void (* SIG_PF)(int);
02258 # else
02259     typedef void (* SIG_PF)();
02260 # endif
02261 #endif /* SUNOS5SIGS || OSF1 || LINUX || HURD */
02262 
02263 #if defined(MSWIN32)
02264     typedef LPTOP_LEVEL_EXCEPTION_FILTER SIG_PF;
02265 #   undef SIG_DFL
02266 #   define SIG_DFL (LPTOP_LEVEL_EXCEPTION_FILTER) (-1)
02267 #endif
02268 #if defined(MSWINCE)
02269     typedef LONG (WINAPI *SIG_PF)(struct _EXCEPTION_POINTERS *);
02270 #   undef SIG_DFL
02271 #   define SIG_DFL (SIG_PF) (-1)
02272 #endif
02273 
02274 #if defined(IRIX5) || defined(OSF1) || defined(HURD)
02275     typedef void (* REAL_SIG_PF)(int, int, struct sigcontext *);
02276 #endif /* IRIX5 || OSF1 || HURD */
02277 
02278 #if defined(SUNOS5SIGS)
02279 # if defined(HPUX) || defined(FREEBSD)
02280 #   define SIGINFO_T siginfo_t
02281 # else
02282 #   define SIGINFO_T struct siginfo
02283 # endif
02284 # ifdef __STDC__
02285     typedef void (* REAL_SIG_PF)(int, SIGINFO_T *, void *);
02286 # else
02287     typedef void (* REAL_SIG_PF)();
02288 # endif
02289 #endif /* SUNOS5SIGS */
02290 
02291 #if defined(LINUX)
02292 #   if __GLIBC__ > 2 || __GLIBC__ == 2 && __GLIBC_MINOR__ >= 2
02293       typedef struct sigcontext s_c;
02294 #   else  /* glibc < 2.2 */
02295 #     include <linux/version.h>
02296 #     if (LINUX_VERSION_CODE >= 0x20100) && !defined(M68K) || defined(ALPHA) || defined(ARM32)
02297         typedef struct sigcontext s_c;
02298 #     else
02299         typedef struct sigcontext_struct s_c;
02300 #     endif
02301 #   endif  /* glibc < 2.2 */
02302 #   if defined(ALPHA) || defined(M68K)
02303       typedef void (* REAL_SIG_PF)(int, int, s_c *);
02304 #   else
02305 #     if defined(IA64) || defined(HP_PA) || defined(X86_64)
02306         typedef void (* REAL_SIG_PF)(int, siginfo_t *, s_c *);
02307        /* FIXME:                                          */
02308        /* According to SUSV3, the last argument should have type */
02309        /* void * or ucontext_t *                          */
02310 #     else
02311         typedef void (* REAL_SIG_PF)(int, s_c);
02312 #     endif
02313 #   endif
02314 #   ifdef ALPHA
02315     /* Retrieve fault address from sigcontext structure by decoding   */
02316     /* instruction.                                            */
02317     char * get_fault_addr(s_c *sc) {
02318         unsigned instr;
02319        word faultaddr;
02320 
02321        instr = *((unsigned *)(sc->sc_pc));
02322        faultaddr = sc->sc_regs[(instr >> 16) & 0x1f];
02323        faultaddr += (word) (((int)instr << 16) >> 16);
02324        return (char *)faultaddr;
02325     }
02326 #   endif /* !ALPHA */
02327 # endif /* LINUX */
02328 
02329 #ifndef DARWIN
02330 SIG_PF GC_old_bus_handler;
02331 SIG_PF GC_old_segv_handler; /* Also old MSWIN32 ACCESS_VIOLATION filter */
02332 #endif /* !DARWIN */
02333 
02334 #if defined(THREADS)
02335 /* We need to lock around the bitmap update in the write fault handler       */
02336 /* in order to avoid the risk of losing a bit.  We do this with a     */
02337 /* test-and-set spin lock if we know how to do that.  Otherwise we    */
02338 /* check whether we are already in the handler and use the dumb but   */
02339 /* safe fallback algorithm of setting all bits in the word.           */
02340 /* Contention should be very rare, so we do the minimum to handle it  */
02341 /* correctly.                                                  */
02342 #ifdef GC_TEST_AND_SET_DEFINED
02343   static VOLATILE unsigned int fault_handler_lock = 0;
02344   void async_set_pht_entry_from_index(VOLATILE page_hash_table db, int index) {
02345     while (GC_test_and_set(&fault_handler_lock)) {}
02346     /* Could also revert to set_pht_entry_from_index_safe if initial  */
02347     /* GC_test_and_set fails.                                         */
02348     set_pht_entry_from_index(db, index);
02349     GC_clear(&fault_handler_lock);
02350   }
02351 #else /* !GC_TEST_AND_SET_DEFINED */
02352   /* THIS IS INCORRECT! The dirty bit vector may be temporarily wrong,       */
02353   /* just before we notice the conflict and correct it. We may end up   */
02354   /* looking at it while it's wrong.  But this requires contention    */
02355   /* exactly when a GC is triggered, which seems far less likely to   */
02356   /* fail than the old code, which had no reported failures.  Thus we */
02357   /* leave it this way while we think of something better, or support */
02358   /* GC_test_and_set on the remaining platforms.               */
02359   static VOLATILE word currently_updating = 0;
02360   void async_set_pht_entry_from_index(VOLATILE page_hash_table db, int index) {
02361     unsigned int update_dummy;
02362     currently_updating = (word)(&update_dummy);
02363     set_pht_entry_from_index(db, index);
02364     /* If we get contention in the 10 or so instruction window here,  */
02365     /* and we get stopped by a GC between the two updates, we lose!   */
02366     if (currently_updating != (word)(&update_dummy)) {
02367        set_pht_entry_from_index_safe(db, index);
02368        /* We claim that if two threads concurrently try to update the */
02369        /* dirty bit vector, the first one to execute UPDATE_START     */
02370        /* will see it changed when UPDATE_END is executed.  (Note that       */
02371        /* &update_dummy must differ in two distinct threads.)  It     */
02372        /* will then execute set_pht_entry_from_index_safe, thus       */
02373        /* returning us to a safe state, though not soon enough.       */
02374     }
02375   }
02376 #endif /* !GC_TEST_AND_SET_DEFINED */
02377 #else /* !THREADS */
02378 # define async_set_pht_entry_from_index(db, index) \
02379        set_pht_entry_from_index(db, index)
02380 #endif /* !THREADS */
02381 
02382 /*ARGSUSED*/
02383 #if !defined(DARWIN)
02384 # if defined (SUNOS4) || (defined(FREEBSD) && !defined(SUNOS5SIGS))
02385     void GC_write_fault_handler(sig, code, scp, addr)
02386     int sig, code;
02387     struct sigcontext *scp;
02388     char * addr;
02389 #   ifdef SUNOS4
02390 #     define SIG_OK (sig == SIGSEGV || sig == SIGBUS)
02391 #     define CODE_OK (FC_CODE(code) == FC_PROT \
02392                          || (FC_CODE(code) == FC_OBJERR \
02393                             && FC_ERRNO(code) == FC_PROT))
02394 #   endif
02395 #   ifdef FREEBSD
02396 #     define SIG_OK (sig == SIGBUS)
02397 #     define CODE_OK TRUE
02398 #   endif
02399 # endif /* SUNOS4 || (FREEBSD && !SUNOS5SIGS) */
02400 
02401 # if defined(IRIX5) || defined(OSF1) || defined(HURD)
02402 #   include <errno.h>
02403     void GC_write_fault_handler(int sig, int code, struct sigcontext *scp)
02404 #   ifdef OSF1
02405 #     define SIG_OK (sig == SIGSEGV)
02406 #     define CODE_OK (code == 2 /* experimentally determined */)
02407 #   endif
02408 #   ifdef IRIX5
02409 #     define SIG_OK (sig == SIGSEGV)
02410 #     define CODE_OK (code == EACCES)
02411 #   endif
02412 #   ifdef HURD
02413 #     define SIG_OK (sig == SIGBUS || sig == SIGSEGV)   
02414 #     define CODE_OK  TRUE
02415 #   endif
02416 # endif /* IRIX5 || OSF1 || HURD */
02417 
02418 # if defined(LINUX)
02419 #   if defined(ALPHA) || defined(M68K)
02420       void GC_write_fault_handler(int sig, int code, s_c * sc)
02421 #   else
02422 #     if defined(IA64) || defined(HP_PA) || defined(X86_64)
02423         void GC_write_fault_handler(int sig, siginfo_t * si, s_c * scp)
02424 #     else
02425 #       if defined(ARM32)
02426           void GC_write_fault_handler(int sig, int a2, int a3, int a4, s_c sc)
02427 #       else
02428           void GC_write_fault_handler(int sig, s_c sc)
02429 #       endif
02430 #     endif
02431 #   endif
02432 #   define SIG_OK (sig == SIGSEGV)
02433 #   define CODE_OK TRUE
02434        /* Empirically c.trapno == 14, on IA32, but is that useful?     */
02435        /* Should probably consider alignment issues on other          */
02436        /* architectures.                                       */
02437 # endif /* LINUX */
02438 
02439 # if defined(SUNOS5SIGS)
02440 #  ifdef __STDC__
02441     void GC_write_fault_handler(int sig, SIGINFO_T *scp, void * context)
02442 #  else
02443     void GC_write_fault_handler(sig, scp, context)
02444     int sig;
02445     SIGINFO_T *scp;
02446     void * context;
02447 #  endif
02448 #   ifdef HPUX
02449 #     define SIG_OK (sig == SIGSEGV || sig == SIGBUS)
02450 #     define CODE_OK (scp -> si_code == SEGV_ACCERR) \
02451                    || (scp -> si_code == BUS_ADRERR) \
02452                    || (scp -> si_code == BUS_UNKNOWN) \
02453                    || (scp -> si_code == SEGV_UNKNOWN) \
02454                    || (scp -> si_code == BUS_OBJERR)
02455 #   else
02456 #     ifdef FREEBSD
02457 #       define SIG_OK (sig == SIGBUS)
02458 #       define CODE_OK (scp -> si_code == BUS_PAGE_FAULT)
02459 #     else
02460 #       define SIG_OK (sig == SIGSEGV)
02461 #       define CODE_OK (scp -> si_code == SEGV_ACCERR)
02462 #     endif
02463 #   endif    
02464 # endif /* SUNOS5SIGS */
02465 
02466 # if defined(MSWIN32) || defined(MSWINCE)
02467     LONG WINAPI GC_write_fault_handler(struct _EXCEPTION_POINTERS *exc_info)
02468 #   define SIG_OK (exc_info -> ExceptionRecord -> ExceptionCode == \
02469                      STATUS_ACCESS_VIOLATION)
02470 #   define CODE_OK (exc_info -> ExceptionRecord -> ExceptionInformation[0] == 1)
02471                      /* Write fault */
02472 # endif /* MSWIN32 || MSWINCE */
02473 {
02474     register unsigned i;
02475 #   if defined(HURD) 
02476        char *addr = (char *) code;
02477 #   endif
02478 #   ifdef IRIX5
02479        char * addr = (char *) (size_t) (scp -> sc_badvaddr);
02480 #   endif
02481 #   if defined(OSF1) && defined(ALPHA)
02482        char * addr = (char *) (scp -> sc_traparg_a0);
02483 #   endif
02484 #   ifdef SUNOS5SIGS
02485        char * addr = (char *) (scp -> si_addr);
02486 #   endif
02487 #   ifdef LINUX
02488 #     if defined(I386)
02489        char * addr = (char *) (sc.cr2);
02490 #     else
02491 #      if defined(M68K)
02492           char * addr = NULL;
02493 
02494          struct sigcontext *scp = (struct sigcontext *)(sc);
02495 
02496          int format = (scp->sc_formatvec >> 12) & 0xf;
02497          unsigned long *framedata = (unsigned long *)(scp + 1); 
02498          unsigned long ea;
02499 
02500          if (format == 0xa || format == 0xb) {
02501               /* 68020/030 */
02502               ea = framedata[2];
02503          } else if (format == 7) {
02504               /* 68040 */
02505               ea = framedata[3];
02506               if (framedata[1] & 0x08000000) {
02507                      /* correct addr on misaligned access */
02508                      ea = (ea+4095)&(~4095);
02509               }
02510          } else if (format == 4) {
02511               /* 68060 */
02512               ea = framedata[0];
02513               if (framedata[1] & 0x08000000) {
02514                      /* correct addr on misaligned access */
02515                      ea = (ea+4095)&(~4095);
02516               }
02517          }    
02518          addr = (char *)ea;
02519 #      else
02520 #        ifdef ALPHA
02521             char * addr = get_fault_addr(sc);
02522 #        else
02523 #          if defined(IA64) || defined(HP_PA) || defined(X86_64)
02524              char * addr = si -> si_addr;
02525              /* I believe this is claimed to work on all platforms for       */
02526              /* Linux 2.3.47 and later.  Hopefully we don't have to   */
02527              /* worry about earlier kernels on IA64.                  */
02528 #          else
02529 #             if defined(POWERPC)
02530                 char * addr = (char *) (sc.regs->dar);
02531 #            else
02532 #               if defined(ARM32)
02533                   char * addr = (char *)sc.fault_address;
02534 #               else
02535 #               if defined(CRIS)
02536                   char * addr = (char *)sc.regs.csraddr;
02537 #               else
02538                   --> architecture not supported
02539 #               endif
02540 #               endif
02541 #            endif
02542 #          endif
02543 #        endif
02544 #      endif
02545 #     endif
02546 #   endif
02547 #   if defined(MSWIN32) || defined(MSWINCE)
02548        char * addr = (char *) (exc_info -> ExceptionRecord
02549                             -> ExceptionInformation[1]);
02550 #      define sig SIGSEGV
02551 #   endif
02552     
02553     if (SIG_OK && CODE_OK) {
02554         register struct hblk * h =
02555                      (struct hblk *)((word)addr & ~(GC_page_size-1));
02556         GC_bool in_allocd_block;
02557         
02558 #      ifdef SUNOS5SIGS
02559            /* Address is only within the correct physical page.       */
02560            in_allocd_block = FALSE;
02561             for (i = 0; i < divHBLKSZ(GC_page_size); i++) {
02562               if (HDR(h+i) != 0) {
02563                 in_allocd_block = TRUE;
02564               }
02565             }
02566 #      else
02567            in_allocd_block = (HDR(addr) != 0);
02568 #      endif
02569         if (!in_allocd_block) {
02570            /* FIXME - We should make sure that we invoke the   */
02571            /* old handler with the appropriate calling  */
02572            /* sequence, which often depends on SA_SIGINFO.     */
02573 
02574            /* Heap blocks now begin and end on page boundaries */
02575             SIG_PF old_handler;
02576             
02577             if (sig == SIGSEGV) {
02578               old_handler = GC_old_segv_handler;
02579             } else {
02580                 old_handler = GC_old_bus_handler;
02581             }
02582             if (old_handler == SIG_DFL) {
02583 #             if !defined(MSWIN32) && !defined(MSWINCE)
02584                   GC_err_printf1("Segfault at 0x%lx\n", addr);
02585                     ABORT("Unexpected bus error or segmentation fault");
02586 #             else
02587                   return(EXCEPTION_CONTINUE_SEARCH);
02588 #             endif
02589             } else {
02590 #             if defined (SUNOS4) \
02591                     || (defined(FREEBSD) && !defined(SUNOS5SIGS))
02592                   (*old_handler) (sig, code, scp, addr);
02593                   return;
02594 #             endif
02595 #             if defined (SUNOS5SIGS)
02596                     /*
02597                      * FIXME: For FreeBSD, this code should check if the 
02598                      * old signal handler used the traditional BSD style and
02599                      * if so call it using that style.
02600                      */
02601                   (*(REAL_SIG_PF)old_handler) (sig, scp, context);
02602                   return;
02603 #             endif
02604 #             if defined (LINUX)
02605 #                 if defined(ALPHA) || defined(M68K)
02606                       (*(REAL_SIG_PF)old_handler) (sig, code, sc);
02607 #                 else 
02608 #                   if defined(IA64) || defined(HP_PA) || defined(X86_64)
02609                       (*(REAL_SIG_PF)old_handler) (sig, si, scp);
02610 #                   else
02611                       (*(REAL_SIG_PF)old_handler) (sig, sc);
02612 #                   endif
02613 #                 endif
02614                   return;
02615 #             endif
02616 #             if defined (IRIX5) || defined(OSF1) || defined(HURD)
02617                   (*(REAL_SIG_PF)old_handler) (sig, code, scp);
02618                   return;
02619 #             endif
02620 #             ifdef MSWIN32
02621                   return((*old_handler)(exc_info));
02622 #             endif
02623             }
02624         }
02625         UNPROTECT(h, GC_page_size);
02626        /* We need to make sure that no collection occurs between      */
02627        /* the UNPROTECT and the setting of the dirty bit.  Otherwise  */
02628        /* a write by a third thread might go unnoticed.  Reversing    */
02629        /* the order is just as bad, since we would end up unprotecting       */
02630        /* a page in a GC cycle during which it's not marked.          */
02631        /* Currently we do this by disabling the thread stopping       */
02632        /* signals while this handler is running.  An alternative might       */
02633        /* be to record the fact that we're about to unprotect, or     */
02634        /* have just unprotected a page in the GC's thread structure,  */
02635        /* and then to have the thread stopping code set the dirty     */
02636        /* flag, if necessary.                                         */
02637         for (i = 0; i < divHBLKSZ(GC_page_size); i++) {
02638             register int index = PHT_HASH(h+i);
02639             
02640             async_set_pht_entry_from_index(GC_dirty_pages, index);
02641         }
02642 #      if defined(OSF1)
02643            /* These reset the signal handler each time by default. */
02644            signal(SIGSEGV, (SIG_PF) GC_write_fault_handler);
02645 #      endif
02646        /* The write may not take place before dirty bits are read.    */
02647        /* But then we'll fault again ...                       */
02648 #      if defined(MSWIN32) || defined(MSWINCE)
02649            return(EXCEPTION_CONTINUE_EXECUTION);
02650 #      else
02651            return;
02652 #      endif
02653     }
02654 #if defined(MSWIN32) || defined(MSWINCE)
02655     return EXCEPTION_CONTINUE_SEARCH;
02656 #else
02657     GC_err_printf1("Segfault at 0x%lx\n", addr);
02658     ABORT("Unexpected bus error or segmentation fault");
02659 #endif
02660 }
02661 #endif /* !DARWIN */
02662 
02663 /*
02664  * We hold the allocation lock.  We expect block h to be written
02665  * shortly.  Ensure that all pages containing any part of the n hblks
02666  * starting at h are no longer protected.  If is_ptrfree is false,
02667  * also ensure that they will subsequently appear to be dirty.
02668  */
02669 void GC_remove_protection(h, nblocks, is_ptrfree)
02670 struct hblk *h;
02671 word nblocks;
02672 GC_bool is_ptrfree;
02673 {
02674     struct hblk * h_trunc;  /* Truncated to page boundary */
02675     struct hblk * h_end;    /* Page boundary following block end */
02676     struct hblk * current;
02677     GC_bool found_clean;
02678     
02679     if (!GC_dirty_maintained) return;
02680     h_trunc = (struct hblk *)((word)h & ~(GC_page_size-1));
02681     h_end = (struct hblk *)(((word)(h + nblocks) + GC_page_size-1)
02682                            & ~(GC_page_size-1));
02683     found_clean = FALSE;
02684     for (current = h_trunc; current < h_end; ++current) {
02685         int index = PHT_HASH(current);
02686             
02687         if (!is_ptrfree || current < h || current >= h + nblocks) {
02688             async_set_pht_entry_from_index(GC_dirty_pages, index);
02689         }
02690     }
02691     UNPROTECT(h_trunc, (ptr_t)h_end - (ptr_t)h_trunc);
02692 }
02693 
02694 #if !defined(DARWIN)
02695 void GC_dirty_init()
02696 {
02697 #   if defined(SUNOS5SIGS) || defined(IRIX5) || defined(LINUX) || \
02698        defined(OSF1) || defined(HURD)
02699       struct sigaction      act, oldact;
02700       /* We should probably specify SA_SIGINFO for Linux, and handle  */
02701       /* the different architectures more uniformly.                  */
02702 #     if defined(IRIX5) || defined(LINUX) && !defined(X86_64) \
02703         || defined(OSF1) || defined(HURD)
02704        act.sa_flags  = SA_RESTART;
02705         act.sa_handler  = (SIG_PF)GC_write_fault_handler;
02706 #     else
02707        act.sa_flags  = SA_RESTART | SA_SIGINFO;
02708         act.sa_sigaction = GC_write_fault_handler;
02709 #     endif
02710       (void)sigemptyset(&act.sa_mask);
02711 #     ifdef SIG_SUSPEND
02712         /* Arrange to postpone SIG_SUSPEND while we're in a write fault      */
02713         /* handler.  This effectively makes the handler atomic w.r.t. */
02714         /* stopping the world for GC.                                 */
02715         (void)sigaddset(&act.sa_mask, SIG_SUSPEND);
02716 #     endif /* SIG_SUSPEND */
02717 #    endif
02718 #   ifdef PRINTSTATS
02719        GC_printf0("Inititalizing mprotect virtual dirty bit implementation\n");
02720 #   endif
02721     GC_dirty_maintained = TRUE;
02722     if (GC_page_size % HBLKSIZE != 0) {
02723         GC_err_printf0("Page size not multiple of HBLKSIZE\n");
02724         ABORT("Page size not multiple of HBLKSIZE");
02725     }
02726 #   if defined(SUNOS4) || (defined(FREEBSD) && !defined(SUNOS5SIGS))
02727       GC_old_bus_handler = signal(SIGBUS, GC_write_fault_handler);
02728       if (GC_old_bus_handler == SIG_IGN) {
02729         GC_err_printf0("Previously ignored bus error!?");
02730         GC_old_bus_handler = SIG_DFL;
02731       }
02732       if (GC_old_bus_handler != SIG_DFL) {
02733 #      ifdef PRINTSTATS
02734           GC_err_printf0("Replaced other SIGBUS handler\n");
02735 #      endif
02736       }
02737 #   endif
02738 #   if defined(SUNOS4)
02739       GC_old_segv_handler = signal(SIGSEGV, (SIG_PF)GC_write_fault_handler);
02740       if (GC_old_segv_handler == SIG_IGN) {
02741         GC_err_printf0("Previously ignored segmentation violation!?");
02742         GC_old_segv_handler = SIG_DFL;
02743       }
02744       if (GC_old_segv_handler != SIG_DFL) {
02745 #      ifdef PRINTSTATS
02746           GC_err_printf0("Replaced other SIGSEGV handler\n");
02747 #      endif
02748       }
02749 #   endif
02750 #   if (defined(SUNOS5SIGS) && !defined(FREEBSD)) || defined(IRIX5) \
02751        || defined(LINUX) || defined(OSF1) || defined(HURD)
02752       /* SUNOS5SIGS includes HPUX */
02753 #     if defined(GC_IRIX_THREADS)
02754        sigaction(SIGSEGV, 0, &oldact);
02755        sigaction(SIGSEGV, &act, 0);
02756 #     else 
02757        {
02758          int res = sigaction(SIGSEGV, &act, &oldact);
02759          if (res != 0) ABORT("Sigaction failed");
02760        }
02761 #     endif
02762 #     if defined(_sigargs) || defined(HURD) || !defined(SA_SIGINFO)
02763        /* This is Irix 5.x, not 6.x.  Irix 5.x does not have   */
02764        /* sa_sigaction.                                 */
02765        GC_old_segv_handler = oldact.sa_handler;
02766 #     else /* Irix 6.x or SUNOS5SIGS or LINUX */
02767         if (oldact.sa_flags & SA_SIGINFO) {
02768           GC_old_segv_handler = (SIG_PF)(oldact.sa_sigaction);
02769         } else {
02770           GC_old_segv_handler = oldact.sa_handler;
02771         }
02772 #     endif
02773       if (GC_old_segv_handler == SIG_IGN) {
02774             GC_err_printf0("Previously ignored segmentation violation!?");
02775             GC_old_segv_handler = SIG_DFL;
02776       }
02777       if (GC_old_segv_handler != SIG_DFL) {
02778 #       ifdef PRINTSTATS
02779          GC_err_printf0("Replaced other SIGSEGV handler\n");
02780 #       endif
02781       }
02782 #   endif /* (SUNOS5SIGS && !FREEBSD) || IRIX5 || LINUX || OSF1 || HURD */
02783 #   if defined(HPUX) || defined(LINUX) || defined(HURD) \
02784       || (defined(FREEBSD) && defined(SUNOS5SIGS))
02785       sigaction(SIGBUS, &act, &oldact);
02786       GC_old_bus_handler = oldact.sa_handler;
02787       if (GC_old_bus_handler == SIG_IGN) {
02788             GC_err_printf0("Previously ignored bus error!?");
02789             GC_old_bus_handler = SIG_DFL;
02790       }
02791       if (GC_old_bus_handler != SIG_DFL) {
02792 #       ifdef PRINTSTATS
02793          GC_err_printf0("Replaced other SIGBUS handler\n");
02794 #       endif
02795       }
02796 #   endif /* HPUX || LINUX || HURD || (FREEBSD && SUNOS5SIGS) */
02797 #   if defined(MSWIN32)
02798       GC_old_segv_handler = SetUnhandledExceptionFilter(GC_write_fault_handler);
02799       if (GC_old_segv_handler != NULL) {
02800 #      ifdef PRINTSTATS
02801           GC_err_printf0("Replaced other UnhandledExceptionFilter\n");
02802 #      endif
02803       } else {
02804           GC_old_segv_handler = SIG_DFL;
02805       }
02806 #   endif
02807 }
02808 #endif /* !DARWIN */
02809 
02810 int GC_incremental_protection_needs()
02811 {
02812     if (GC_page_size == HBLKSIZE) {
02813        return GC_PROTECTS_POINTER_HEAP;
02814     } else {
02815        return GC_PROTECTS_POINTER_HEAP | GC_PROTECTS_PTRFREE_HEAP;
02816     }
02817 }
02818 
02819 #define HAVE_INCREMENTAL_PROTECTION_NEEDS
02820 
02821 #define IS_PTRFREE(hhdr) ((hhdr)->hb_descr == 0)
02822 
02823 #define PAGE_ALIGNED(x) !((word)(x) & (GC_page_size - 1))
02824 void GC_protect_heap()
02825 {
02826     ptr_t start;
02827     word len;
02828     struct hblk * current;
02829     struct hblk * current_start;  /* Start of block to be protected. */
02830     struct hblk * limit;
02831     unsigned i;
02832     GC_bool protect_all = 
02833          (0 != (GC_incremental_protection_needs() & GC_PROTECTS_PTRFREE_HEAP));
02834     for (i = 0; i < GC_n_heap_sects; i++) {
02835         start = GC_heap_sects[i].hs_start;
02836         len = GC_heap_sects[i].hs_bytes;
02837        if (protect_all) {
02838           PROTECT(start, len);
02839        } else {
02840          GC_ASSERT(PAGE_ALIGNED(len))
02841          GC_ASSERT(PAGE_ALIGNED(start))
02842          current_start = current = (struct hblk *)start;
02843          limit = (struct hblk *)(start + len);
02844          while (current < limit) {
02845             hdr * hhdr;
02846            word nhblks;
02847            GC_bool is_ptrfree;
02848 
02849            GC_ASSERT(PAGE_ALIGNED(current));
02850            GET_HDR(current, hhdr);
02851            if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
02852              /* This can happen only if we're at the beginning of a   */
02853              /* heap segment, and a block spans heap segments.        */
02854              /* We will handle that block as part of the preceding    */
02855              /* segment.                                       */
02856              GC_ASSERT(current_start == current);
02857              current_start = ++current;
02858              continue;
02859            }
02860            if (HBLK_IS_FREE(hhdr)) {
02861              GC_ASSERT(PAGE_ALIGNED(hhdr -> hb_sz));
02862              nhblks = divHBLKSZ(hhdr -> hb_sz);
02863              is_ptrfree = TRUE;    /* dirty on alloc */
02864            } else {
02865              nhblks = OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz);
02866              is_ptrfree = IS_PTRFREE(hhdr);
02867            }
02868            if (is_ptrfree) {
02869              if (current_start < current) {
02870               PROTECT(current_start, (ptr_t)current - (ptr_t)current_start);
02871              }
02872              current_start = (current += nhblks);
02873            } else {
02874              current += nhblks;
02875            }
02876          } 
02877          if (current_start < current) {
02878            PROTECT(current_start, (ptr_t)current - (ptr_t)current_start);
02879          }
02880        }
02881     }
02882 }
02883 
02884 /* We assume that either the world is stopped or its OK to lose dirty */
02885 /* bits while this is happenning (as in GC_enable_incremental).              */
02886 void GC_read_dirty()
02887 {
02888     BCOPY((word *)GC_dirty_pages, GC_grungy_pages,
02889           (sizeof GC_dirty_pages));
02890     BZERO((word *)GC_dirty_pages, (sizeof GC_dirty_pages));
02891     GC_protect_heap();
02892 }
02893 
02894 GC_bool GC_page_was_dirty(h)
02895 struct hblk * h;
02896 {
02897     register word index = PHT_HASH(h);
02898     
02899     return(HDR(h) == 0 || get_pht_entry_from_index(GC_grungy_pages, index));
02900 }
02901 
02902 /*
02903  * Acquiring the allocation lock here is dangerous, since this
02904  * can be called from within GC_call_with_alloc_lock, and the cord
02905  * package does so.  On systems that allow nested lock acquisition, this
02906  * happens to work.
02907  * On other systems, SET_LOCK_HOLDER and friends must be suitably defined.
02908  */
02909 
02910 static GC_bool syscall_acquired_lock = FALSE;    /* Protected by GC lock. */
02911  
02912 void GC_begin_syscall()
02913 {
02914     if (!I_HOLD_LOCK()) {
02915        LOCK();
02916        syscall_acquired_lock = TRUE;
02917     }
02918 }
02919 
02920 void GC_end_syscall()
02921 {
02922     if (syscall_acquired_lock) {
02923        syscall_acquired_lock = FALSE;
02924        UNLOCK();
02925     }
02926 }
02927 
02928 void GC_unprotect_range(addr, len)
02929 ptr_t addr;
02930 word len;
02931 {
02932     struct hblk * start_block;
02933     struct hblk * end_block;
02934     register struct hblk *h;
02935     ptr_t obj_start;
02936     
02937     if (!GC_dirty_maintained) return;
02938     obj_start = GC_base(addr);
02939     if (obj_start == 0) return;
02940     if (GC_base(addr + len - 1) != obj_start) {
02941         ABORT("GC_unprotect_range(range bigger than object)");
02942     }
02943     start_block = (struct hblk *)((word)addr & ~(GC_page_size - 1));
02944     end_block = (struct hblk *)((word)(addr + len - 1) & ~(GC_page_size - 1));
02945     end_block += GC_page_size/HBLKSIZE - 1;
02946     for (h = start_block; h <= end_block; h++) {
02947         register word index = PHT_HASH(h);
02948         
02949         async_set_pht_entry_from_index(GC_dirty_pages, index);
02950     }
02951     UNPROTECT(start_block,
02952              ((ptr_t)end_block - (ptr_t)start_block) + HBLKSIZE);
02953 }
02954 
02955 #if 0
02956 
02957 /* We no longer wrap read by default, since that was causing too many */
02958 /* problems.  It is preferred that the client instead avoids writing  */
02959 /* to the write-protected heap with a system call.                    */
02960 /* This still serves as sample code if you do want to wrap system calls.*/
02961 
02962 #if !defined(MSWIN32) && !defined(MSWINCE) && !defined(GC_USE_LD_WRAP)
02963 /* Replacement for UNIX system call.                                    */
02964 /* Other calls that write to the heap should be handled similarly.      */
02965 /* Note that this doesn't work well for blocking reads:  It will hold   */
02966 /* the allocation lock for the entire duration of the call. Multithreaded */
02967 /* clients should really ensure that it won't block, either by setting         */
02968 /* the descriptor nonblocking, or by calling select or poll first, to   */
02969 /* make sure that input is available.                                   */
02970 /* Another, preferred alternative is to ensure that system calls never         */
02971 /* write to the protected heap (see above).                             */
02972 # if defined(__STDC__) && !defined(SUNOS4)
02973 #   include <unistd.h>
02974 #   include <sys/uio.h>
02975     ssize_t read(int fd, void *buf, size_t nbyte)
02976 # else
02977 #   ifndef LINT
02978       int read(fd, buf, nbyte)
02979 #   else
02980       int GC_read(fd, buf, nbyte)
02981 #   endif
02982     int fd;
02983     char *buf;
02984     int nbyte;
02985 # endif
02986 {
02987     int result;
02988     
02989     GC_begin_syscall();
02990     GC_unprotect_range(buf, (word)nbyte);
02991 #   if defined(IRIX5) || defined(GC_LINUX_THREADS)
02992        /* Indirect system call may not always be easily available.    */
02993        /* We could call _read, but that would interfere with the      */
02994        /* libpthread interception of read.                            */
02995        /* On Linux, we have to be careful with the linuxthreads       */
02996        /* read interception.                                          */
02997        {
02998            struct iovec iov;
02999 
03000            iov.iov_base = buf;
03001            iov.iov_len = nbyte;
03002            result = readv(fd, &iov, 1);
03003        }
03004 #   else
03005 #     if defined(HURD)      
03006        result = __read(fd, buf, nbyte);
03007 #     else
03008        /* The two zero args at the end of this list are because one
03009           IA-64 syscall() implementation actually requires six args
03010           to be passed, even though they aren't always used. */
03011        result = syscall(SYS_read, fd, buf, nbyte, 0, 0);
03012 #     endif /* !HURD */
03013 #   endif
03014     GC_end_syscall();
03015     return(result);
03016 }
03017 #endif /* !MSWIN32 && !MSWINCE && !GC_LINUX_THREADS */
03018 
03019 #if defined(GC_USE_LD_WRAP) && !defined(THREADS)
03020     /* We use the GNU ld call wrapping facility.               */
03021     /* This requires that the linker be invoked with "--wrap read".   */
03022     /* This can be done by passing -Wl,"--wrap read" to gcc.          */
03023     /* I'm not sure that this actually wraps whatever version of read */
03024     /* is called by stdio.  That code also mentions __read.           */
03025 #   include <unistd.h>
03026     ssize_t __wrap_read(int fd, void *buf, size_t nbyte)
03027     {
03028        int result;
03029 
03030        GC_begin_syscall();
03031        GC_unprotect_range(buf, (word)nbyte);
03032        result = __real_read(fd, buf, nbyte);
03033        GC_end_syscall();
03034        return(result);
03035     }
03036 
03037     /* We should probably also do this for __read, or whatever stdio  */
03038     /* actually calls.                                                */
03039 #endif
03040 
03041 #endif /* 0 */
03042 
03043 /*ARGSUSED*/
03044 GC_bool GC_page_was_ever_dirty(h)
03045 struct hblk *h;
03046 {
03047     return(TRUE);
03048 }
03049 
03050 /* Reset the n pages starting at h to "was never dirty" status.       */
03051 /*ARGSUSED*/
03052 void GC_is_fresh(h, n)
03053 struct hblk *h;
03054 word n;
03055 {
03056 }
03057 
03058 # endif /* MPROTECT_VDB */
03059 
03060 # ifdef PROC_VDB
03061 
03062 /*
03063  * See DEFAULT_VDB for interface descriptions.
03064  */
03065  
03066 /*
03067  * This implementaion assumes a Solaris 2.X like /proc pseudo-file-system
03068  * from which we can read page modified bits.  This facility is far from
03069  * optimal (e.g. we would like to get the info for only some of the
03070  * address space), but it avoids intercepting system calls.
03071  */
03072 
03073 #include <errno.h>
03074 #include <sys/types.h>
03075 #include <sys/signal.h>
03076 #include <sys/fault.h>
03077 #include <sys/syscall.h>
03078 #include <sys/procfs.h>
03079 #include <sys/stat.h>
03080 
03081 #define INITIAL_BUF_SZ 16384
03082 word GC_proc_buf_size = INITIAL_BUF_SZ;
03083 char *GC_proc_buf;
03084 
03085 #ifdef GC_SOLARIS_THREADS
03086 /* We don't have exact sp values for threads.  So we count on  */
03087 /* occasionally declaring stack pages to be fresh.  Thus we    */
03088 /* need a real implementation of GC_is_fresh.  We can't clear  */
03089 /* entries in GC_written_pages, since that would declare all   */
03090 /* pages with the given hash address to be fresh.              */
03091 #   define MAX_FRESH_PAGES 8*1024  /* Must be power of 2 */
03092     struct hblk ** GC_fresh_pages; /* A direct mapped cache.   */
03093                                    /* Collisions are dropped.  */
03094 
03095 #   define FRESH_PAGE_SLOT(h) (divHBLKSZ((word)(h)) & (MAX_FRESH_PAGES-1))
03096 #   define ADD_FRESH_PAGE(h) \
03097        GC_fresh_pages[FRESH_PAGE_SLOT(h)] = (h)
03098 #   define PAGE_IS_FRESH(h) \
03099        (GC_fresh_pages[FRESH_PAGE_SLOT(h)] == (h) && (h) != 0)
03100 #endif
03101 
03102 /* Add all pages in pht2 to pht1 */
03103 void GC_or_pages(pht1, pht2)
03104 page_hash_table pht1, pht2;
03105 {
03106     register int i;
03107     
03108     for (i = 0; i < PHT_SIZE; i++) pht1[i] |= pht2[i];
03109 }
03110 
03111 int GC_proc_fd;
03112 
03113 void GC_dirty_init()
03114 {
03115     int fd;
03116     char buf[30];
03117 
03118     GC_dirty_maintained = TRUE;
03119     if (GC_words_allocd != 0 || GC_words_allocd_before_gc != 0) {
03120        register int i;
03121     
03122         for (i = 0; i < PHT_SIZE; i++) GC_written_pages[i] = (word)(-1);
03123 #       ifdef PRINTSTATS
03124            GC_printf1("Allocated words:%lu:all pages may have been written\n",
03125                      (unsigned long)
03126                             (GC_words_allocd + GC_words_allocd_before_gc));
03127 #      endif       
03128     }
03129     sprintf(buf, "/proc/%d", getpid());
03130     fd = open(buf, O_RDONLY);
03131     if (fd < 0) {
03132        ABORT("/proc open failed");
03133     }
03134     GC_proc_fd = syscall(SYS_ioctl, fd, PIOCOPENPD, 0);
03135     close(fd);
03136     syscall(SYS_fcntl, GC_proc_fd, F_SETFD, FD_CLOEXEC);
03137     if (GC_proc_fd < 0) {
03138        ABORT("/proc ioctl failed");
03139     }
03140     GC_proc_buf = GC_scratch_alloc(GC_proc_buf_size);
03141 #   ifdef GC_SOLARIS_THREADS
03142        GC_fresh_pages = (struct hblk **)
03143          GC_scratch_alloc(MAX_FRESH_PAGES * sizeof (struct hblk *));
03144        if (GC_fresh_pages == 0) {
03145            GC_err_printf0("No space for fresh pages\n");
03146            EXIT();
03147        }
03148        BZERO(GC_fresh_pages, MAX_FRESH_PAGES * sizeof (struct hblk *));
03149 #   endif
03150 }
03151 
03152 /* Ignore write hints. They don't help us here.  */
03153 /*ARGSUSED*/
03154 void GC_remove_protection(h, nblocks, is_ptrfree)
03155 struct hblk *h;
03156 word nblocks;
03157 GC_bool is_ptrfree;
03158 {
03159 }
03160 
03161 #ifdef GC_SOLARIS_THREADS
03162 #   define READ(fd,buf,nbytes) syscall(SYS_read, fd, buf, nbytes)
03163 #else
03164 #   define READ(fd,buf,nbytes) read(fd, buf, nbytes)
03165 #endif
03166 
03167 void GC_read_dirty()
03168 {
03169     unsigned long ps, np;
03170     int nmaps;
03171     ptr_t vaddr;
03172     struct prasmap * map;
03173     char * bufp;
03174     ptr_t current_addr, limit;
03175     int i;
03176 int dummy;
03177 
03178     BZERO(GC_grungy_pages, (sizeof GC_grungy_pages));
03179     
03180     bufp = GC_proc_buf;
03181     if (READ(GC_proc_fd, bufp, GC_proc_buf_size) <= 0) {
03182 #      ifdef PRINTSTATS
03183             GC_printf1("/proc read failed: GC_proc_buf_size = %lu\n",
03184                      GC_proc_buf_size);
03185 #      endif       
03186         {
03187             /* Retry with larger buffer. */
03188             word new_size = 2 * GC_proc_buf_size;
03189             char * new_buf = GC_scratch_alloc(new_size);
03190             
03191             if (new_buf != 0) {
03192                 GC_proc_buf = bufp = new_buf;
03193                 GC_proc_buf_size = new_size;
03194             }
03195             if (READ(GC_proc_fd, bufp, GC_proc_buf_size) <= 0) {
03196                 WARN("Insufficient space for /proc read\n", 0);
03197                 /* Punt:    */
03198               memset(GC_grungy_pages, 0xff, sizeof (page_hash_table));
03199               memset(GC_written_pages, 0xff, sizeof(page_hash_table));
03200 #             ifdef GC_SOLARIS_THREADS
03201                   BZERO(GC_fresh_pages,
03202                        MAX_FRESH_PAGES * sizeof (struct hblk *)); 
03203 #             endif
03204               return;
03205             }
03206         }
03207     }
03208     /* Copy dirty bits into GC_grungy_pages */
03209        nmaps = ((struct prpageheader *)bufp) -> pr_nmap;
03210        /* printf( "nmaps = %d, PG_REFERENCED = %d, PG_MODIFIED = %d\n",
03211                    nmaps, PG_REFERENCED, PG_MODIFIED); */
03212        bufp = bufp + sizeof(struct prpageheader);
03213        for (i = 0; i < nmaps; i++) {
03214            map = (struct prasmap *)bufp;
03215            vaddr = (ptr_t)(map -> pr_vaddr);
03216            ps = map -> pr_pagesize;
03217            np = map -> pr_npage;
03218            /* printf("vaddr = 0x%X, ps = 0x%X, np = 0x%X\n", vaddr, ps, np); */
03219            limit = vaddr + ps * np;
03220            bufp += sizeof (struct prasmap);
03221            for (current_addr = vaddr;
03222                 current_addr < limit; current_addr += ps){
03223                if ((*bufp++) & PG_MODIFIED) {
03224                    register struct hblk * h = (struct hblk *) current_addr;
03225                    
03226                    while ((ptr_t)h < current_addr + ps) {
03227                        register word index = PHT_HASH(h);
03228                        
03229                        set_pht_entry_from_index(GC_grungy_pages, index);
03230 #                    ifdef GC_SOLARIS_THREADS
03231                        {
03232                          register int slot = FRESH_PAGE_SLOT(h);
03233                          
03234                          if (GC_fresh_pages[slot] == h) {
03235                              GC_fresh_pages[slot] = 0;
03236                          }
03237                        }
03238 #                    endif
03239                        h++;
03240                    }
03241                }
03242            }
03243            bufp += sizeof(long) - 1;
03244            bufp = (char *)((unsigned long)bufp & ~(sizeof(long)-1));
03245        }
03246     /* Update GC_written_pages. */
03247         GC_or_pages(GC_written_pages, GC_grungy_pages);
03248 #   ifdef GC_SOLARIS_THREADS
03249       /* Make sure that old stacks are considered completely clean    */
03250       /* unless written again.                                        */
03251        GC_old_stacks_are_fresh();
03252 #   endif
03253 }
03254 
03255 #undef READ
03256 
03257 GC_bool GC_page_was_dirty(h)
03258 struct hblk *h;
03259 {
03260     register word index = PHT_HASH(h);
03261     register GC_bool result;
03262     
03263     result = get_pht_entry_from_index(GC_grungy_pages, index);
03264 #   ifdef GC_SOLARIS_THREADS
03265        if (result && PAGE_IS_FRESH(h)) result = FALSE;
03266        /* This happens only if page was declared fresh since   */
03267        /* the read_dirty call, e.g. because it's in an unused  */
03268        /* thread stack.  It's OK to treat it as clean, in      */
03269        /* that case.  And it's consistent with          */
03270        /* GC_page_was_ever_dirty.                       */
03271 #   endif
03272     return(result);
03273 }
03274 
03275 GC_bool GC_page_was_ever_dirty(h)
03276 struct hblk *h;
03277 {
03278     register word index = PHT_HASH(h);
03279     register GC_bool result;
03280     
03281     result = get_pht_entry_from_index(GC_written_pages, index);
03282 #   ifdef GC_SOLARIS_THREADS
03283        if (result && PAGE_IS_FRESH(h)) result = FALSE;
03284 #   endif
03285     return(result);
03286 }
03287 
03288 /* Caller holds allocation lock.   */
03289 void GC_is_fresh(h, n)
03290 struct hblk *h;
03291 word n;
03292 {
03293 
03294     register word index;
03295     
03296 #   ifdef GC_SOLARIS_THREADS
03297       register word i;
03298       
03299       if (GC_fresh_pages != 0) {
03300         for (i = 0; i < n; i++) {
03301           ADD_FRESH_PAGE(h + i);
03302         }
03303       }
03304 #   endif
03305 }
03306 
03307 # endif /* PROC_VDB */
03308 
03309 
03310 # ifdef PCR_VDB
03311 
03312 # include "vd/PCR_VD.h"
03313 
03314 # define NPAGES (32*1024)   /* 128 MB */
03315 
03316 PCR_VD_DB  GC_grungy_bits[NPAGES];
03317 
03318 ptr_t GC_vd_base;    /* Address corresponding to GC_grungy_bits[0]    */
03319                      /* HBLKSIZE aligned.                      */
03320 
03321 void GC_dirty_init()
03322 {
03323     GC_dirty_maintained = TRUE;
03324     /* For the time being, we assume the heap generally grows up */
03325     GC_vd_base = GC_heap_sects[0].hs_start;
03326     if (GC_vd_base == 0) {
03327        ABORT("Bad initial heap segment");
03328     }
03329     if (PCR_VD_Start(HBLKSIZE, GC_vd_base, NPAGES*HBLKSIZE)
03330        != PCR_ERes_okay) {
03331        ABORT("dirty bit initialization failed");
03332     }
03333 }
03334 
03335 void GC_read_dirty()
03336 {
03337     /* lazily enable dirty bits on newly added heap sects */
03338     {
03339         static int onhs = 0;
03340         int nhs = GC_n_heap_sects;
03341         for( ; onhs < nhs; onhs++ ) {
03342             PCR_VD_WriteProtectEnable(
03343                     GC_heap_sects[onhs].hs_start,
03344                     GC_heap_sects[onhs].hs_bytes );
03345         }
03346     }
03347 
03348 
03349     if (PCR_VD_Clear(GC_vd_base, NPAGES*HBLKSIZE, GC_grungy_bits)
03350         != PCR_ERes_okay) {
03351        ABORT("dirty bit read failed");
03352     }
03353 }
03354 
03355 GC_bool GC_page_was_dirty(h)
03356 struct hblk *h;
03357 {
03358     if((ptr_t)h < GC_vd_base || (ptr_t)h >= GC_vd_base + NPAGES*HBLKSIZE) {
03359        return(TRUE);
03360     }
03361     return(GC_grungy_bits[h - (struct hblk *)GC_vd_base] & PCR_VD_DB_dirtyBit);
03362 }
03363 
03364 /*ARGSUSED*/
03365 void GC_remove_protection(h, nblocks, is_ptrfree)
03366 struct hblk *h;
03367 word nblocks;
03368 GC_bool is_ptrfree;
03369 {
03370     PCR_VD_WriteProtectDisable(h, nblocks*HBLKSIZE);
03371     PCR_VD_WriteProtectEnable(h, nblocks*HBLKSIZE);
03372 }
03373 
03374 # endif /* PCR_VDB */
03375 
03376 #if defined(MPROTECT_VDB) && defined(DARWIN)
03377 /* The following sources were used as a *reference* for this exception handling
03378    code:
03379       1. Apple's mach/xnu documentation
03380       2. Timothy J. Wood's "Mach Exception Handlers 101" post to the
03381          omnigroup's macosx-dev list. 
03382          www.omnigroup.com/mailman/archive/macosx-dev/2000-June/002030.html
03383       3. macosx-nat.c from Apple's GDB source code.
03384 */
03385    
03386 /* The bug that caused all this trouble should now be fixed. This should
03387    eventually be removed if all goes well. */
03388 /* define BROKEN_EXCEPTION_HANDLING */
03389     
03390 #include <mach/mach.h>
03391 #include <mach/mach_error.h>
03392 #include <mach/thread_status.h>
03393 #include <mach/exception.h>
03394 #include <mach/task.h>
03395 #include <pthread.h>
03396 
03397 /* These are not defined in any header, although they are documented */
03398 extern boolean_t exc_server(mach_msg_header_t *,mach_msg_header_t *);
03399 extern kern_return_t exception_raise(
03400     mach_port_t,mach_port_t,mach_port_t,
03401     exception_type_t,exception_data_t,mach_msg_type_number_t);
03402 extern kern_return_t exception_raise_state(
03403     mach_port_t,mach_port_t,mach_port_t,
03404     exception_type_t,exception_data_t,mach_msg_type_number_t,
03405     thread_state_flavor_t*,thread_state_t,mach_msg_type_number_t,
03406     thread_state_t,mach_msg_type_number_t*);
03407 extern kern_return_t exception_raise_state_identity(
03408     mach_port_t,mach_port_t,mach_port_t,
03409     exception_type_t,exception_data_t,mach_msg_type_number_t,
03410     thread_state_flavor_t*,thread_state_t,mach_msg_type_number_t,
03411     thread_state_t,mach_msg_type_number_t*);
03412 
03413 
03414 #define MAX_EXCEPTION_PORTS 16
03415 
03416 static struct {
03417     mach_msg_type_number_t count;
03418     exception_mask_t      masks[MAX_EXCEPTION_PORTS];
03419     exception_handler_t   ports[MAX_EXCEPTION_PORTS];
03420     exception_behavior_t  behaviors[MAX_EXCEPTION_PORTS];
03421     thread_state_flavor_t flavors[MAX_EXCEPTION_PORTS];
03422 } GC_old_exc_ports;
03423 
03424 static struct {
03425     mach_port_t exception;
03426 #if defined(THREADS)
03427     mach_port_t reply;
03428 #endif
03429 } GC_ports;
03430 
03431 typedef struct {
03432     mach_msg_header_t head;
03433 } GC_msg_t;
03434 
03435 typedef enum {
03436     GC_MP_NORMAL, GC_MP_DISCARDING, GC_MP_STOPPED
03437 } GC_mprotect_state_t;
03438 
03439 /* FIXME: 1 and 2 seem to be safe to use in the msgh_id field,
03440    but it isn't  documented. Use the source and see if they
03441    should be ok. */
03442 #define ID_STOP 1
03443 #define ID_RESUME 2
03444 
03445 /* These values are only used on the reply port */
03446 #define ID_ACK 3
03447 
03448 #if defined(THREADS)
03449 
03450 GC_mprotect_state_t GC_mprotect_state;
03451 
03452 /* The following should ONLY be called when the world is stopped  */
03453 static void GC_mprotect_thread_notify(mach_msg_id_t id) {
03454     struct {
03455         GC_msg_t msg;
03456         mach_msg_trailer_t trailer;
03457     } buf;
03458     mach_msg_return_t r;
03459     /* remote, local */
03460     buf.msg.head.msgh_bits = 
03461         MACH_MSGH_BITS(MACH_MSG_TYPE_MAKE_SEND,0);
03462     buf.msg.head.msgh_size = sizeof(buf.msg);
03463     buf.msg.head.msgh_remote_port = GC_ports.exception;
03464     buf.msg.head.msgh_local_port = MACH_PORT_NULL;
03465     buf.msg.head.msgh_id = id;
03466             
03467     r = mach_msg(
03468         &buf.msg.head,
03469         MACH_SEND_MSG|MACH_RCV_MSG|MACH_RCV_LARGE,
03470         sizeof(buf.msg),
03471         sizeof(buf),
03472         GC_ports.reply,
03473         MACH_MSG_TIMEOUT_NONE,
03474         MACH_PORT_NULL);
03475     if(r != MACH_MSG_SUCCESS)
03476        ABORT("mach_msg failed in GC_mprotect_thread_notify");
03477     if(buf.msg.head.msgh_id != ID_ACK)
03478         ABORT("invalid ack in GC_mprotect_thread_notify");
03479 }
03480 
03481 /* Should only be called by the mprotect thread */
03482 static void GC_mprotect_thread_reply() {
03483     GC_msg_t msg;
03484     mach_msg_return_t r;
03485     /* remote, local */
03486     msg.head.msgh_bits = 
03487         MACH_MSGH_BITS(MACH_MSG_TYPE_MAKE_SEND,0);
03488     msg.head.msgh_size = sizeof(msg);
03489     msg.head.msgh_remote_port = GC_ports.reply;
03490     msg.head.msgh_local_port = MACH_PORT_NULL;
03491     msg.head.msgh_id = ID_ACK;
03492             
03493     r = mach_msg(
03494         &msg.head,
03495         MACH_SEND_MSG,
03496         sizeof(msg),
03497         0,
03498         MACH_PORT_NULL,
03499         MACH_MSG_TIMEOUT_NONE,
03500         MACH_PORT_NULL);
03501     if(r != MACH_MSG_SUCCESS)
03502        ABORT("mach_msg failed in GC_mprotect_thread_reply");
03503 }
03504 
03505 void GC_mprotect_stop() {
03506     GC_mprotect_thread_notify(ID_STOP);
03507 }
03508 void GC_mprotect_resume() {
03509     GC_mprotect_thread_notify(ID_RESUME);
03510 }
03511 
03512 #else /* !THREADS */
03513 /* The compiler should optimize away any GC_mprotect_state computations */
03514 #define GC_mprotect_state GC_MP_NORMAL
03515 #endif
03516 
03517 static void *GC_mprotect_thread(void *arg) {
03518     mach_msg_return_t r;
03519     /* These two structures contain some private kernel data. We don't need to
03520        access any of it so we don't bother defining a proper struct. The
03521        correct definitions are in the xnu source code. */
03522     struct {
03523         mach_msg_header_t head;
03524         char data[256];
03525     } reply;
03526     struct {
03527         mach_msg_header_t head;
03528         mach_msg_body_t msgh_body;
03529         char data[1024];
03530     } msg;
03531 
03532     mach_msg_id_t id;
03533 
03534     /* PLTSCHEME: only needed when THREADS? */
03535 #if defined(THREADS)
03536     GC_darwin_register_mach_handler_thread(mach_thread_self());
03537 #endif
03538     
03539     for(;;) {
03540         r = mach_msg(
03541             &msg.head,
03542             MACH_RCV_MSG|MACH_RCV_LARGE|
03543                 (GC_mprotect_state == GC_MP_DISCARDING ? MACH_RCV_TIMEOUT : 0),
03544             0,
03545             sizeof(msg),
03546             GC_ports.exception,
03547             GC_mprotect_state == GC_MP_DISCARDING ? 0 : MACH_MSG_TIMEOUT_NONE,
03548             MACH_PORT_NULL);
03549         
03550         id = r == MACH_MSG_SUCCESS ? msg.head.msgh_id : -1;
03551         
03552 #if defined(THREADS)
03553         if(GC_mprotect_state == GC_MP_DISCARDING) {
03554             if(r == MACH_RCV_TIMED_OUT) {
03555                 GC_mprotect_state = GC_MP_STOPPED;
03556                 GC_mprotect_thread_reply();
03557                 continue;
03558             }
03559             if(r == MACH_MSG_SUCCESS && (id == ID_STOP || id == ID_RESUME))
03560                 ABORT("out of order mprotect thread request");
03561         }
03562 #endif
03563         
03564         if(r != MACH_MSG_SUCCESS) {
03565             GC_err_printf2("mach_msg failed with %d %s\n", 
03566                 (int)r,mach_error_string(r));
03567             ABORT("mach_msg failed");
03568         }
03569         
03570         switch(id) {
03571 #if defined(THREADS)
03572             case ID_STOP:
03573                 if(GC_mprotect_state != GC_MP_NORMAL)
03574                     ABORT("Called mprotect_stop when state wasn't normal");
03575                 GC_mprotect_state = GC_MP_DISCARDING;
03576                 break;
03577             case ID_RESUME:
03578                 if(GC_mprotect_state != GC_MP_STOPPED)
03579                     ABORT("Called mprotect_resume when state wasn't stopped");
03580                 GC_mprotect_state = GC_MP_NORMAL;
03581                 GC_mprotect_thread_reply();
03582                 break;
03583 #endif /* THREADS */
03584             default:
03585                    /* Handle the message (calls catch_exception_raise) */
03586                if(!exc_server(&msg.head,&reply.head))
03587                     ABORT("exc_server failed");
03588                 /* Send the reply */
03589                 r = mach_msg(
03590                     &reply.head,
03591                     MACH_SEND_MSG,
03592                     reply.head.msgh_size,
03593                     0,
03594                     MACH_PORT_NULL,
03595                     MACH_MSG_TIMEOUT_NONE,
03596                     MACH_PORT_NULL);
03597                if(r != MACH_MSG_SUCCESS) {
03598                      /* This will fail if the thread dies, but the thread shouldn't
03599                         die... */
03600                      #ifdef BROKEN_EXCEPTION_HANDLING
03601                      GC_err_printf2(
03602                         "mach_msg failed with %d %s while sending exc reply\n",
03603                         (int)r,mach_error_string(r));
03604                #else
03605                      ABORT("mach_msg failed while sending exception reply");
03606                #endif
03607               }
03608         } /* switch */
03609     } /* for(;;) */
03610     /* NOT REACHED */
03611     return NULL;
03612 }
03613 
03614 /* All this SIGBUS code shouldn't be necessary. All protection faults should
03615    be going throught the mach exception handler. However, it seems a SIGBUS is
03616    occasionally sent for some unknown reason. Even more odd, it seems to be
03617    meaningless and safe to ignore. */
03618 #ifdef BROKEN_EXCEPTION_HANDLING
03619 
03620 typedef void (* SIG_PF)();
03621 static SIG_PF GC_old_bus_handler;
03622 
03623 /* Updates to this aren't atomic, but the SIGBUSs seem pretty rare.
03624    Even if this doesn't get updated property, it isn't really a problem */
03625 static int GC_sigbus_count;
03626 
03627 static void GC_darwin_sigbus(int num,siginfo_t *sip,void *context) {
03628     if(num != SIGBUS) ABORT("Got a non-sigbus signal in the sigbus handler");
03629     
03630     /* Ugh... some seem safe to ignore, but too many in a row probably means
03631        trouble. GC_sigbus_count is reset for each mach exception that is
03632        handled */
03633     if(GC_sigbus_count >= 8) {
03634         ABORT("Got more than 8 SIGBUSs in a row!");
03635     } else {
03636         GC_sigbus_count++;
03637         GC_err_printf0("GC: WARNING: Ignoring SIGBUS.\n");
03638     }
03639 }
03640 #endif /* BROKEN_EXCEPTION_HANDLING */
03641 
03642 void GC_dirty_init() {
03643     kern_return_t r;
03644     mach_port_t me;
03645     pthread_t thread;
03646     pthread_attr_t attr;
03647     exception_mask_t mask;
03648     
03649 #   ifdef PRINTSTATS
03650         GC_printf0("Inititalizing mach/darwin mprotect virtual dirty bit "
03651             "implementation\n");
03652 #   endif  
03653 #      ifdef BROKEN_EXCEPTION_HANDLING
03654         GC_err_printf0("GC: WARNING: Enabling workarounds for various darwin "
03655             "exception handling bugs.\n");
03656 #      endif
03657     GC_dirty_maintained = TRUE;
03658     if (GC_page_size % HBLKSIZE != 0) {
03659         GC_err_printf0("Page size not multiple of HBLKSIZE\n");
03660         ABORT("Page size not multiple of HBLKSIZE");
03661     }
03662     
03663     GC_task_self = me = mach_task_self();
03664     
03665     r = mach_port_allocate(me,MACH_PORT_RIGHT_RECEIVE,&GC_ports.exception);
03666     if(r != KERN_SUCCESS) ABORT("mach_port_allocate failed (exception port)");
03667     
03668     r = mach_port_insert_right(me,GC_ports.exception,GC_ports.exception,
03669        MACH_MSG_TYPE_MAKE_SEND);
03670     if(r != KERN_SUCCESS)
03671        ABORT("mach_port_insert_right failed (exception port)");
03672 
03673     #if defined(THREADS)
03674         r = mach_port_allocate(me,MACH_PORT_RIGHT_RECEIVE,&GC_ports.reply);
03675         if(r != KERN_SUCCESS) ABORT("mach_port_allocate failed (reply port)");
03676     #endif
03677 
03678     /* The exceptions we want to catch */  
03679     mask = EXC_MASK_BAD_ACCESS;
03680 
03681     r = task_get_exception_ports(
03682         me,
03683         mask,
03684         GC_old_exc_ports.masks,
03685         &GC_old_exc_ports.count,
03686         GC_old_exc_ports.ports,
03687         GC_old_exc_ports.behaviors,
03688         GC_old_exc_ports.flavors
03689     );
03690     if(r != KERN_SUCCESS) ABORT("task_get_exception_ports failed");
03691         
03692     r = task_set_exception_ports(
03693         me,
03694         mask,
03695         GC_ports.exception,
03696         EXCEPTION_DEFAULT,
03697         MACHINE_THREAD_STATE
03698     );
03699     if(r != KERN_SUCCESS) ABORT("task_set_exception_ports failed");
03700 
03701     if(pthread_attr_init(&attr) != 0) ABORT("pthread_attr_init failed");
03702     if(pthread_attr_setdetachstate(&attr,PTHREAD_CREATE_DETACHED) != 0) 
03703         ABORT("pthread_attr_setdetachedstate failed");
03704 
03705 #      undef pthread_create
03706     /* This will call the real pthread function, not our wrapper */
03707     if(pthread_create(&thread,&attr,GC_mprotect_thread,NULL) != 0)
03708         ABORT("pthread_create failed");
03709     pthread_attr_destroy(&attr);
03710     
03711     /* Setup the sigbus handler for ignoring the meaningless SIGBUSs */
03712     #ifdef BROKEN_EXCEPTION_HANDLING 
03713     {
03714         struct sigaction sa, oldsa;
03715         sa.sa_handler = (SIG_PF)GC_darwin_sigbus;
03716         sigemptyset(&sa.sa_mask);
03717         sa.sa_flags = SA_RESTART|SA_SIGINFO;
03718         if(sigaction(SIGBUS,&sa,&oldsa) < 0) ABORT("sigaction");
03719         GC_old_bus_handler = (SIG_PF)oldsa.sa_handler;
03720         if (GC_old_bus_handler != SIG_DFL) {
03721 #             ifdef PRINTSTATS
03722                 GC_err_printf0("Replaced other SIGBUS handler\n");
03723 #             endif
03724         }
03725     }
03726     #endif /* BROKEN_EXCEPTION_HANDLING  */
03727 }
03728  
03729 /* The source code for Apple's GDB was used as a reference for the exception
03730    forwarding code. This code is similar to be GDB code only because there is 
03731    only one way to do it. */
03732 static kern_return_t GC_forward_exception(
03733         mach_port_t thread,
03734         mach_port_t task,
03735         exception_type_t exception,
03736         exception_data_t data,
03737         mach_msg_type_number_t data_count
03738 ) {
03739     int i;
03740     kern_return_t r;
03741     mach_port_t port;
03742     exception_behavior_t behavior;
03743     thread_state_flavor_t flavor;
03744     
03745     thread_state_t thread_state;
03746     mach_msg_type_number_t thread_state_count = THREAD_STATE_MAX;
03747         
03748     for(i=0;i<GC_old_exc_ports.count;i++)
03749         if(GC_old_exc_ports.masks[i] & (1 << exception))
03750             break;
03751     if(i==GC_old_exc_ports.count) ABORT("No handler for exception!");
03752     
03753     port = GC_old_exc_ports.ports[i];
03754     behavior = GC_old_exc_ports.behaviors[i];
03755     flavor = GC_old_exc_ports.flavors[i];
03756 
03757     if(behavior != EXCEPTION_DEFAULT) {
03758         r = thread_get_state(thread,flavor,thread_state,&thread_state_count);
03759         if(r != KERN_SUCCESS)
03760             ABORT("thread_get_state failed in forward_exception");
03761     }
03762     
03763     switch(behavior) {
03764         case EXCEPTION_DEFAULT:
03765             r = exception_raise(port,thread,task,exception,data,data_count);
03766             break;
03767         case EXCEPTION_STATE:
03768             r = exception_raise_state(port,thread,task,exception,data,
03769                 data_count,&flavor,thread_state,thread_state_count,
03770                 thread_state,&thread_state_count);
03771             break;
03772         case EXCEPTION_STATE_IDENTITY:
03773             r = exception_raise_state_identity(port,thread,task,exception,data,
03774                 data_count,&flavor,thread_state,thread_state_count,
03775                 thread_state,&thread_state_count);
03776             break;
03777         default:
03778             r = KERN_FAILURE; /* make gcc happy */
03779             ABORT("forward_exception: unknown behavior");
03780             break;
03781     }
03782     
03783     if(behavior != EXCEPTION_DEFAULT) {
03784         r = thread_set_state(thread,flavor,thread_state,thread_state_count);
03785         if(r != KERN_SUCCESS)
03786             ABORT("thread_set_state failed in forward_exception");
03787     }
03788     
03789     return r;
03790 }
03791 
03792 #define FWD() GC_forward_exception(thread,task,exception,code,code_count)
03793 
03794 /* This violates the namespace rules but there isn't anything that can be done
03795    about it. The exception handling stuff is hard coded to call this */
03796 kern_return_t
03797 catch_exception_raise(
03798    mach_port_t exception_port,mach_port_t thread,mach_port_t task,
03799    exception_type_t exception,exception_data_t code,
03800    mach_msg_type_number_t code_count
03801 ) {
03802     kern_return_t r;
03803     char *addr;
03804     struct hblk *h;
03805     int i;
03806 #   if defined(POWERPC)
03807 #     if CPP_WORDSZ == 32
03808         thread_state_flavor_t flavor = PPC_EXCEPTION_STATE;
03809         mach_msg_type_number_t exc_state_count = PPC_EXCEPTION_STATE_COUNT;
03810         ppc_exception_state_t exc_state;
03811 #     else
03812         thread_state_flavor_t flavor = PPC_EXCEPTION_STATE64;
03813         mach_msg_type_number_t exc_state_count = PPC_EXCEPTION_STATE64_COUNT;
03814         ppc_exception_state64_t exc_state;
03815 #     endif
03816 #   elif defined(I386)
03817         thread_state_flavor_t flavor = i386_EXCEPTION_STATE;
03818         mach_msg_type_number_t exc_state_count = i386_EXCEPTION_STATE_COUNT;
03819         i386_exception_state_t exc_state;
03820 #   else
03821 #      error FIXME for non-ppc/x86 darwin
03822 #   endif
03823 
03824     
03825     if(exception != EXC_BAD_ACCESS || code[0] != KERN_PROTECTION_FAILURE) {
03826         #ifdef DEBUG_EXCEPTION_HANDLING
03827         /* We aren't interested, pass it on to the old handler */
03828         GC_printf3("Exception: 0x%x Code: 0x%x 0x%x in catch....\n",
03829             exception,
03830             code_count > 0 ? code[0] : -1,
03831             code_count > 1 ? code[1] : -1); 
03832         #endif
03833         return FWD();
03834     }
03835 
03836     r = thread_get_state(thread,flavor,
03837         (natural_t*)&exc_state,&exc_state_count);
03838     if(r != KERN_SUCCESS) {
03839         /* The thread is supposed to be suspended while the exception handler
03840            is called. This shouldn't fail. */
03841         #ifdef BROKEN_EXCEPTION_HANDLING
03842             GC_err_printf0("thread_get_state failed in "
03843                 "catch_exception_raise\n");
03844             return KERN_SUCCESS;
03845         #else
03846             ABORT("thread_get_state failed in catch_exception_raise");
03847         #endif
03848     }
03849     
03850     /* This is the address that caused the fault */
03851 #if defined(POWERPC)
03852     addr = (char*) exc_state.dar;
03853 #elif defined (I386)
03854     addr = (char*) exc_state.faultvaddr;
03855 #else
03856 #   error FIXME for non POWERPC/I386
03857 #endif
03858         
03859     if((HDR(addr)) == 0) {
03860         /* Ugh... just like the SIGBUS problem above, it seems we get a bogus 
03861            KERN_PROTECTION_FAILURE every once and a while. We wait till we get
03862            a bunch in a row before doing anything about it. If a "real" fault 
03863            ever occurres it'll just keep faulting over and over and we'll hit
03864            the limit pretty quickly. */
03865         #ifdef BROKEN_EXCEPTION_HANDLING
03866             static char *last_fault;
03867             static int last_fault_count;
03868             
03869             if(addr != last_fault) {
03870                 last_fault = addr;
03871                 last_fault_count = 0;
03872             }
03873             if(++last_fault_count < 32) {
03874                 if(last_fault_count == 1)
03875                     GC_err_printf1(
03876                         "GC: WARNING: Ignoring KERN_PROTECTION_FAILURE at %p\n",
03877                         addr);
03878                 return KERN_SUCCESS;
03879             }
03880             
03881             GC_err_printf1("Unexpected KERN_PROTECTION_FAILURE at %p\n",addr);
03882             /* Can't pass it along to the signal handler because that is
03883                ignoring SIGBUS signals. We also shouldn't call ABORT here as
03884                signals don't always work too well from the exception handler. */
03885             GC_err_printf0("Aborting\n");
03886             exit(EXIT_FAILURE);
03887         #else /* BROKEN_EXCEPTION_HANDLING */
03888             /* Pass it along to the next exception handler 
03889                (which should call SIGBUS/SIGSEGV) */
03890             return FWD();
03891         #endif /* !BROKEN_EXCEPTION_HANDLING */
03892     }
03893 
03894     #ifdef BROKEN_EXCEPTION_HANDLING
03895         /* Reset the number of consecutive SIGBUSs */
03896         GC_sigbus_count = 0;
03897     #endif
03898     
03899     if(GC_mprotect_state == GC_MP_NORMAL) { /* common case */
03900         h = (struct hblk*)((word)addr & ~(GC_page_size-1));
03901         UNPROTECT(h, GC_page_size);       
03902         for (i = 0; i < divHBLKSZ(GC_page_size); i++) {
03903             register int index = PHT_HASH(h+i);
03904             async_set_pht_entry_from_index(GC_dirty_pages, index);
03905         }
03906     } else if(GC_mprotect_state == GC_MP_DISCARDING) {
03907         /* Lie to the thread for now. No sense UNPROTECT()ing the memory
03908            when we're just going to PROTECT() it again later. The thread
03909            will just fault again once it resumes */
03910     } else {
03911         /* Shouldn't happen, i don't think */
03912         GC_printf0("KERN_PROTECTION_FAILURE while world is stopped\n");
03913         return FWD();
03914     }
03915     return KERN_SUCCESS;
03916 }
03917 #undef FWD
03918 
03919 /* These should never be called, but just in case...  */
03920 kern_return_t catch_exception_raise_state(mach_port_name_t exception_port,
03921     int exception, exception_data_t code, mach_msg_type_number_t codeCnt,
03922     int flavor, thread_state_t old_state, int old_stateCnt,
03923     thread_state_t new_state, int new_stateCnt)
03924 {
03925     ABORT("catch_exception_raise_state");
03926     return(KERN_INVALID_ARGUMENT);
03927 }
03928 kern_return_t catch_exception_raise_state_identity(
03929     mach_port_name_t exception_port, mach_port_t thread, mach_port_t task,
03930     int exception, exception_data_t code, mach_msg_type_number_t codeCnt,
03931     int flavor, thread_state_t old_state, int old_stateCnt, 
03932     thread_state_t new_state, int new_stateCnt)
03933 {
03934     ABORT("catch_exception_raise_state_identity");
03935     return(KERN_INVALID_ARGUMENT);
03936 }
03937 
03938 
03939 #endif /* DARWIN && MPROTECT_VDB */
03940 
03941 # ifndef HAVE_INCREMENTAL_PROTECTION_NEEDS
03942   int GC_incremental_protection_needs()
03943   {
03944     return GC_PROTECTS_NONE;
03945   }
03946 # endif /* !HAVE_INCREMENTAL_PROTECTION_NEEDS */
03947 
03948 /*
03949  * Call stack save code for debugging.
03950  * Should probably be in mach_dep.c, but that requires reorganization.
03951  */
03952 
03953 /* I suspect the following works for most X86 *nix variants, so       */
03954 /* long as the frame pointer is explicitly stored.  In the case of gcc,      */
03955 /* compiler flags (e.g. -fomit-frame-pointer) determine whether it is.       */
03956 #if defined(I386) && defined(LINUX) && defined(SAVE_CALL_CHAIN)
03957 #   include <features.h>
03958 
03959     struct frame {
03960        struct frame *fr_savfp;
03961        long   fr_savpc;
03962         long  fr_arg[NARGS];  /* All the arguments go here.    */
03963     };
03964 #endif
03965 
03966 #if defined(SPARC)
03967 #  if defined(LINUX)
03968 #    include <features.h>
03969 
03970      struct frame {
03971        long   fr_local[8];
03972        long   fr_arg[6];
03973        struct frame *fr_savfp;
03974        long   fr_savpc;
03975 #       ifndef __arch64__
03976          char *fr_stret;
03977 #       endif
03978        long   fr_argd[6];
03979        long   fr_argx[0];
03980      };
03981 #  else
03982 #    if defined(SUNOS4)
03983 #      include <machine/frame.h>
03984 #    else
03985 #      if defined (DRSNX)
03986 #       include <sys/sparc/frame.h>
03987 #      else
03988 #       if defined(OPENBSD)
03989 #         include <frame.h>
03990 #       else
03991 #         if defined(FREEBSD) || defined(NETBSD)
03992 #           include <machine/frame.h>
03993 #         else
03994 #           include <sys/frame.h>
03995 #         endif
03996 #       endif
03997 #      endif
03998 #    endif
03999 #  endif
04000 #  if NARGS > 6
04001        --> We only know how to to get the first 6 arguments
04002 #  endif
04003 #endif /* SPARC */
04004 
04005 #ifdef  NEED_CALLINFO
04006 /* Fill in the pc and argument information for up to NFRAMES of my    */
04007 /* callers.  Ignore my frame and my callers frame.                    */
04008 
04009 #ifdef LINUX
04010 #   include <unistd.h>
04011 #endif
04012 
04013 #endif /* NEED_CALLINFO */
04014 
04015 #if defined(GC_HAVE_BUILTIN_BACKTRACE)
04016 # include <execinfo.h>
04017 #endif
04018 
04019 #ifdef SAVE_CALL_CHAIN
04020 
04021 #if NARGS == 0 && NFRAMES % 2 == 0 /* No padding */ \
04022     && defined(GC_HAVE_BUILTIN_BACKTRACE)
04023 
04024 #ifdef REDIRECT_MALLOC
04025   /* Deal with possible malloc calls in backtrace by omitting  */
04026   /* the infinitely recursing backtrace.                */
04027 # ifdef THREADS
04028     __thread  /* If your compiler doesn't understand this */
04029               /* you could use something like pthread_getspecific.    */
04030 # endif
04031   GC_in_save_callers = FALSE;
04032 #endif
04033 
04034 void GC_save_callers (info) 
04035 struct callinfo info[NFRAMES];
04036 {
04037   void * tmp_info[NFRAMES + 1];
04038   int npcs, i;
04039 # define IGNORE_FRAMES 1
04040   
04041   /* We retrieve NFRAMES+1 pc values, but discard the first, since it */
04042   /* points to our own frame.                                         */
04043 # ifdef REDIRECT_MALLOC
04044     if (GC_in_save_callers) {
04045       info[0].ci_pc = (word)(&GC_save_callers);
04046       for (i = 1; i < NFRAMES; ++i) info[i].ci_pc = 0;
04047       return;
04048     }
04049     GC_in_save_callers = TRUE;
04050 # endif
04051   GC_ASSERT(sizeof(struct callinfo) == sizeof(void *));
04052   npcs = backtrace((void **)tmp_info, NFRAMES + IGNORE_FRAMES);
04053   BCOPY(tmp_info+IGNORE_FRAMES, info, (npcs - IGNORE_FRAMES) * sizeof(void *));
04054   for (i = npcs - IGNORE_FRAMES; i < NFRAMES; ++i) info[i].ci_pc = 0;
04055 # ifdef REDIRECT_MALLOC
04056     GC_in_save_callers = FALSE;
04057 # endif
04058 }
04059 
04060 #else /* No builtin backtrace; do it ourselves */
04061 
04062 #if (defined(OPENBSD) || defined(NETBSD) || defined(FREEBSD)) && defined(SPARC)
04063 #  define FR_SAVFP fr_fp
04064 #  define FR_SAVPC fr_pc
04065 #else
04066 #  define FR_SAVFP fr_savfp
04067 #  define FR_SAVPC fr_savpc
04068 #endif
04069 
04070 #if defined(SPARC) && (defined(__arch64__) || defined(__sparcv9))
04071 #   define BIAS 2047
04072 #else
04073 #   define BIAS 0
04074 #endif
04075 
04076 void GC_save_callers (info) 
04077 struct callinfo info[NFRAMES];
04078 {
04079   struct frame *frame;
04080   struct frame *fp;
04081   int nframes = 0;
04082 # ifdef I386
04083     /* We assume this is turned on only with gcc as the compiler. */
04084     asm("movl %%ebp,%0" : "=r"(frame));
04085     fp = frame;
04086 # else
04087     frame = (struct frame *) GC_save_regs_in_stack ();
04088     fp = (struct frame *)((long) frame -> FR_SAVFP + BIAS);
04089 #endif
04090   
04091    for (; (!(fp HOTTER_THAN frame) && !(GC_stackbottom HOTTER_THAN (ptr_t)fp)
04092           && (nframes < NFRAMES));
04093        fp = (struct frame *)((long) fp -> FR_SAVFP + BIAS), nframes++) {
04094       register int i;
04095       
04096       info[nframes].ci_pc = fp->FR_SAVPC;
04097 #     if NARGS > 0
04098         for (i = 0; i < NARGS; i++) {
04099          info[nframes].ci_arg[i] = ~(fp->fr_arg[i]);
04100         }
04101 #     endif /* NARGS > 0 */
04102   }
04103   if (nframes < NFRAMES) info[nframes].ci_pc = 0;
04104 }
04105 
04106 #endif /* No builtin backtrace */
04107 
04108 #endif /* SAVE_CALL_CHAIN */
04109 
04110 #ifdef NEED_CALLINFO
04111 
04112 /* Print info to stderr.  We do NOT hold the allocation lock */
04113 void GC_print_callers (info)
04114 struct callinfo info[NFRAMES];
04115 {
04116     register int i;
04117     static int reentry_count = 0;
04118     GC_bool stop = FALSE;
04119 
04120     /* FIXME: This should probably use a different lock, so that we   */
04121     /* become callable with or without the allocation lock.           */
04122     LOCK();
04123       ++reentry_count;
04124     UNLOCK();
04125     
04126 #   if NFRAMES == 1
04127       GC_err_printf0("\tCaller at allocation:\n");
04128 #   else
04129       GC_err_printf0("\tCall chain at allocation:\n");
04130 #   endif
04131     for (i = 0; i < NFRAMES && !stop ; i++) {
04132        if (info[i].ci_pc == 0) break;
04133 #      if NARGS > 0
04134        {
04135          int j;
04136 
04137          GC_err_printf0("\t\targs: ");
04138          for (j = 0; j < NARGS; j++) {
04139            if (j != 0) GC_err_printf0(", ");
04140            GC_err_printf2("%d (0x%X)", ~(info[i].ci_arg[j]),
04141                                    ~(info[i].ci_arg[j]));
04142          }
04143          GC_err_printf0("\n");
04144        }
04145 #      endif
04146         if (reentry_count > 1) {
04147            /* We were called during an allocation during       */
04148            /* a previous GC_print_callers call; punt.          */
04149            GC_err_printf1("\t\t##PC##= 0x%lx\n", info[i].ci_pc);
04150            continue;
04151        }
04152        {
04153 #        ifdef LINUX
04154            FILE *pipe;
04155 #        endif
04156 #        if defined(GC_HAVE_BUILTIN_BACKTRACE) \
04157             && !defined(GC_BACKTRACE_SYMBOLS_BROKEN)
04158            char **sym_name =
04159              backtrace_symbols((void **)(&(info[i].ci_pc)), 1);
04160            char *name = sym_name[0];
04161 #        else
04162            char buf[40];
04163            char *name = buf;
04164            sprintf(buf, "##PC##= 0x%lx", info[i].ci_pc);
04165 #        endif
04166 #        if defined(LINUX) && !defined(SMALL_CONFIG)
04167            /* Try for a line number. */
04168            {
04169 #              define EXE_SZ 100
04170               static char exe_name[EXE_SZ];
04171 #             define CMD_SZ 200
04172               char cmd_buf[CMD_SZ];
04173 #             define RESULT_SZ 200
04174               static char result_buf[RESULT_SZ];
04175               size_t result_len;
04176               char *old_preload;
04177 #             define PRELOAD_SZ 200
04178               char preload_buf[PRELOAD_SZ];
04179               static GC_bool found_exe_name = FALSE;
04180               static GC_bool will_fail = FALSE;
04181               int ret_code;
04182               /* Try to get it via a hairy and expensive scheme.      */
04183               /* First we get the name of the executable:             */
04184               if (will_fail) goto out;
04185               if (!found_exe_name) { 
04186                 ret_code = readlink("/proc/self/exe", exe_name, EXE_SZ);
04187                 if (ret_code < 0 || ret_code >= EXE_SZ
04188                     || exe_name[0] != '/') {
04189                   will_fail = TRUE;       /* Dont try again. */
04190                   goto out;
04191                 }
04192                 exe_name[ret_code] = '\0';
04193                 found_exe_name = TRUE;
04194               }
04195               /* Then we use popen to start addr2line -e <exe> <addr> */
04196               /* There are faster ways to do this, but hopefully this */
04197               /* isn't time critical.                                 */
04198               sprintf(cmd_buf, "/usr/bin/addr2line -f -e %s 0x%lx", exe_name,
04199                              (unsigned long)info[i].ci_pc);
04200               old_preload = getenv ("LD_PRELOAD");
04201                if (0 != old_preload) {
04202                 if (strlen (old_preload) >= PRELOAD_SZ) {
04203                   will_fail = TRUE;
04204                   goto out;
04205                 }
04206                 strcpy (preload_buf, old_preload);
04207                 unsetenv ("LD_PRELOAD");
04208                }
04209               pipe = popen(cmd_buf, "r");
04210               if (0 != old_preload
04211                   && 0 != setenv ("LD_PRELOAD", preload_buf, 0)) {
04212                 WARN("Failed to reset LD_PRELOAD\n", 0);
04213               }
04214               if (pipe == NULL
04215                   || (result_len = fread(result_buf, 1, RESULT_SZ - 1, pipe))
04216                      == 0) {
04217                 if (pipe != NULL) pclose(pipe);
04218                 will_fail = TRUE;
04219                 goto out;
04220               }
04221               if (result_buf[result_len - 1] == '\n') --result_len;
04222               result_buf[result_len] = 0;
04223               if (result_buf[0] == '?'
04224                   || result_buf[result_len-2] == ':' 
04225                      && result_buf[result_len-1] == '0') {
04226                   pclose(pipe);
04227                   goto out;
04228               }
04229               /* Get rid of embedded newline, if any.  Test for "main" */
04230               {
04231                  char * nl = strchr(result_buf, '\n');
04232                  if (nl != NULL && nl < result_buf + result_len) {
04233                    *nl = ':';
04234                  }
04235                  if (strncmp(result_buf, "main", nl - result_buf) == 0) {
04236                    stop = TRUE;
04237                  }
04238               }
04239               if (result_len < RESULT_SZ - 25) {
04240                 /* Add in hex address     */
04241                   sprintf(result_buf + result_len, " [0x%lx]",
04242                        (unsigned long)info[i].ci_pc);
04243               }
04244               name = result_buf;
04245               pclose(pipe);
04246               out:;
04247            }
04248 #        endif /* LINUX */
04249          GC_err_printf1("\t\t%s\n", name);
04250 #        if defined(GC_HAVE_BUILTIN_BACKTRACE) \
04251             && !defined(GC_BACKTRACE_SYMBOLS_BROKEN)
04252            free(sym_name);  /* May call GC_free; that's OK */
04253 #         endif
04254        }
04255     }
04256     LOCK();
04257       --reentry_count;
04258     UNLOCK();
04259 }
04260 
04261 #endif /* NEED_CALLINFO */
04262 
04263 
04264 
04265 #if defined(LINUX) && defined(__ELF__) && !defined(SMALL_CONFIG)
04266 
04267 /* Dump /proc/self/maps to GC_stderr, to enable looking up names for
04268    addresses in FIND_LEAK output. */
04269 
04270 static word dump_maps(char *maps)
04271 {
04272     GC_err_write(maps, strlen(maps));
04273     return 1;
04274 }
04275 
04276 void GC_print_address_map()
04277 {
04278     GC_err_printf0("---------- Begin address map ----------\n");
04279     GC_apply_to_maps(dump_maps);
04280     GC_err_printf0("---------- End address map ----------\n");
04281 }
04282 
04283 #endif
04284 
04285