Back to index

plt-scheme  4.2.1
alloc_cache.c
Go to the documentation of this file.
00001 /* 
00002    Provides:
00003       vm_malloc_pages --- usual interface
00004       vm_free_pages --- usual interface
00005       vm_flush_freed_pages --- usual interface
00006    Requires (defined earlier):
00007       page_size --- in bytes
00008       my_qsort --- possibly from my_qsort.c
00009 */
00010 
00011 /* interface to GC */
00012 /*
00013 static void *vm_malloc_pages(size_t len, size_t alignment, int dirty_ok);
00014 static void vm_free_pages(void *p, size_t len);
00015 static void vm_flush_freed_pages(void);
00016 static void vm_protect_pages(void *p, size_t len, int writable);
00017 */
00018 
00019 /* interface to OS */
00020 /*
00021 static void os_vm_free_pages(void *p, size_t len);
00022 static void *os_vm_alloc_pages(size_t len);
00023 */
00024 #define BLOCKFREE_UNMAP_AGE 1
00025 
00026 static int free_block_compare(const void *a, const void *b)
00027 {
00028   if ((unsigned long)((FreeBlock *)a)->start < (unsigned long)((FreeBlock *)b)->start)
00029     return -1;
00030   else
00031     return 1;
00032 }
00033 
00034 static void alloc_cache_collapse_pages(FreeBlock *blockfree)
00035 {
00036   int i;
00037   int j;
00038 
00039   /* sort by FreeBlock->start */
00040   my_qsort(blockfree, BLOCKFREE_CACHE_SIZE, sizeof(FreeBlock), free_block_compare);
00041 
00042   /* collapse adjacent: */
00043   j = 0;
00044   for (i = 1; i < BLOCKFREE_CACHE_SIZE; i++) {
00045     if ((blockfree[j].start + blockfree[j].len) == blockfree[i].start) {
00046       blockfree[j].len += blockfree[i].len;
00047       blockfree[i].start = NULL;
00048       blockfree[i].len = 0;
00049       if (!blockfree[i].zeroed)
00050         blockfree[j].zeroed = 0;
00051     } else
00052       j = i;
00053   }
00054 }
00055 
00056 inline static void *alloc_cache_find_pages(FreeBlock *blockfree, size_t len, size_t alignment, int dirty_ok)
00057 {
00058   int i;
00059   void *r;
00060 
00061   /* Try an exact fit: */
00062   for (i = 0; i < BLOCKFREE_CACHE_SIZE; i++) {
00063     if (blockfree[i].len == len) {
00064       r = blockfree[i].start;
00065       if (!alignment || !((unsigned long)r & (alignment - 1))) {
00066         blockfree[i].start = NULL;
00067         blockfree[i].len = 0;
00068         if (!blockfree[i].zeroed && !dirty_ok)
00069           memset(r, 0, len);
00070         return r;
00071       }
00072     }
00073   }
00074 
00075   /* Try a first fit: */
00076   for (i = 0; i < BLOCKFREE_CACHE_SIZE; i++) {
00077     if (blockfree[i].len > len) {
00078       /* Align at start? */
00079       r = blockfree[i].start;
00080       if (!alignment || !((unsigned long)r & (alignment - 1))) {
00081         blockfree[i].start += len;
00082         blockfree[i].len -= len;
00083         if (!blockfree[i].zeroed && !dirty_ok)
00084           memset(r, 0, len);
00085         return r;
00086       }
00087 
00088       /* Align at end? */
00089       r = blockfree[i].start + (blockfree[i].len - len);
00090       if (!((unsigned long)r & (alignment - 1))) {
00091         blockfree[i].len -= len;
00092         if (!blockfree[i].zeroed && !dirty_ok)
00093           memset(r, 0, len);
00094         return r;
00095       }
00096 
00097       /* We don't try a middle alignment, because that would
00098          split the block into three. */
00099     }
00100   }
00101 
00102   /* Nothing useable in the cache... */
00103   return NULL;
00104 }
00105 
00106 static void alloc_cache_return_mem(VM *vm, void *p, size_t len, int zeroed)
00107 {
00108   int i;
00109   FreeBlock *blockfree = vm->freeblocks;
00110 
00111   /* Round up to nearest page: */
00112   if (len & (page_size - 1))
00113     len += page_size - (len & (page_size - 1));
00114 
00115   /* Try to free pages in larger blocks, since the OS may be slow. */
00116 
00117   for (i = 0; i < BLOCKFREE_CACHE_SIZE; i++)
00118     if(blockfree[i].start && (blockfree[i].len < (1024 * 1024))) {
00119       if (p == blockfree[i].start + blockfree[i].len) {
00120         blockfree[i].len += len;
00121         if (!zeroed)
00122           blockfree[i].zeroed = 0;
00123         return;
00124       }
00125       if (p + len == blockfree[i].start) {
00126         blockfree[i].start = p;
00127         blockfree[i].len += len;
00128         if (!zeroed)
00129           blockfree[i].zeroed = 0;
00130         return;
00131       }
00132     }
00133 
00134   for (i = 0; i < BLOCKFREE_CACHE_SIZE; i++) {
00135     if (!blockfree[i].start) {
00136       blockfree[i].start = p;
00137       blockfree[i].len = len;
00138       blockfree[i].age = 0;
00139       blockfree[i].zeroed = zeroed;
00140       return;
00141     }
00142   }
00143 
00144   /* Might help next time around: */
00145   alloc_cache_collapse_pages(blockfree);
00146 
00147   os_vm_free_pages(p, len);
00148 
00149   vm_memory_allocated_dec(vm, len);
00150 }
00151 
00152 static void vm_free_pages(VM *vm, void *p, size_t len)
00153 {
00154   alloc_cache_return_mem(vm, p, len, 0);
00155 }
00156 
00157 static void vm_flush_freed_pages(VM *vm)
00158 {
00159   int i;
00160   FreeBlock *blockfree = vm->freeblocks;
00161 
00162   alloc_cache_collapse_pages(blockfree);
00163 
00164   for (i = 0; i < BLOCKFREE_CACHE_SIZE; i++) {
00165     if (blockfree[i].start) {
00166       if (blockfree[i].age == BLOCKFREE_UNMAP_AGE) {
00167         os_vm_free_pages(blockfree[i].start, blockfree[i].len);
00168         vm_memory_allocated_dec(vm, blockfree[i].len);
00169         blockfree[i].start = NULL;
00170         blockfree[i].len = 0;
00171       } else
00172         blockfree[i].age++;
00173     }
00174   }
00175 }
00176 
00177 /* Instead of immediately freeing pages with munmap---only to mmap
00178    them again---we cache BLOCKFREE_CACHE_SIZE freed pages. A page is
00179    cached unused for at most BLOCKFREE_UNMAP_AGE cycles of the
00180    collector. (A max age of 1 seems useful, anything more seems
00181    dangerous.) 
00182 
00183    The cache is small enough that we don't need an elaborate search
00184    mechanism, but we do a bit of work to collapse adjacent pages in
00185    the cache. */
00186 
00187 static void *vm_malloc_pages(VM *vm, size_t len, size_t alignment, int dirty_ok)
00188 {
00189   void *r;
00190   FreeBlock *blockfree = vm->freeblocks;
00191 
00192   if (!page_size)
00193     page_size = getpagesize();
00194 
00195   /* Round up to nearest page: */
00196   if (len & (page_size - 1))
00197     len += page_size - (len & (page_size - 1));
00198 
00199   /* Something from the cache, perhaps? */
00200   r = alloc_cache_find_pages(blockfree, len, alignment, dirty_ok);
00201   if(!r) {
00202     /* attempt to allocate from OS */
00203     r = os_vm_alloc_pages(len + alignment);
00204     if(r == (void *)-1) { return NULL; }
00205 
00206     if (alignment) {
00207       /* We allocated too large so we can choose the alignment. */
00208       size_t extra    = alignment;
00209       void *real_r    = (void *)(((unsigned long)r + (alignment - 1)) & (~(alignment - 1)));
00210       long pre_extra  = real_r - r;
00211 
00212       /* in front extra */
00213       if (pre_extra) { os_vm_free_pages(r, pre_extra); }
00214       /* in back extra exists */
00215       if (pre_extra < extra) {
00216         if (pre_extra == 0) {
00217           /* Instead of actually unmapping, put it in the cache, and there's
00218              a good chance we can use it next time: */
00219           vm_memory_allocated_inc(vm, extra);
00220           alloc_cache_return_mem(vm, real_r + len, extra, 1);
00221         } 
00222         else { os_vm_free_pages(real_r + len, extra - pre_extra); }
00223       }
00224       r = real_r;
00225     }
00226 
00227     vm_memory_allocated_inc(vm, len);
00228   }
00229 
00230   return r;
00231 }