Back to index

tor  0.2.3.19-rc
Defines | Typedefs | Functions
mempool.h File Reference

Headers for mempool.c. More...

This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Defines

#define MEMPOOL_STATS

Typedefs

typedef struct mp_pool_t
 A memory pool is a context in which a large number of fixed-sized objects can be allocated efficiently.

Functions

void * mp_pool_get (mp_pool_t *pool)
 Return a newly allocated item from pool.
void mp_pool_release (void *item)
 Return an allocated memory item to its memory pool.
mp_pool_tmp_pool_new (size_t item_size, size_t chunk_capacity)
 Allocate a new memory pool to hold items of size item_size.
void mp_pool_clean (mp_pool_t *pool, int n_to_keep, int keep_recently_used)
 If there are more than n empty chunks in pool, free the excess ones that have been empty for the longest.
void mp_pool_destroy (mp_pool_t *pool)
 Free all space held in pool This makes all pointers returned from mp_pool_get(pool) invalid.
void mp_pool_assert_ok (mp_pool_t *pool)
 Fail with an assertion if pool is not internally consistent.
void mp_pool_log_status (mp_pool_t *pool, int severity)
 Dump information about pool's memory usage to the Tor log at level severity.

Detailed Description

Headers for mempool.c.

Definition in file mempool.h.


Define Documentation

#define MEMPOOL_STATS

Definition at line 25 of file mempool.h.


Typedef Documentation

typedef struct mp_pool_t

A memory pool is a context in which a large number of fixed-sized objects can be allocated efficiently.

See mempool.c for implementation details.

Definition at line 15 of file mempool.h.


Function Documentation

void mp_pool_assert_ok ( mp_pool_t pool)

Fail with an assertion if pool is not internally consistent.

Definition at line 568 of file mempool.c.

{
  int n_empty;

  n_empty = assert_chunks_ok(pool, pool->empty_chunks, 1, 0);
  assert_chunks_ok(pool, pool->full_chunks, 0, 1);
  assert_chunks_ok(pool, pool->used_chunks, 0, 0);

  ASSERT(pool->n_empty_chunks == n_empty);
}

Here is the call graph for this function:

Here is the caller graph for this function:

void mp_pool_clean ( mp_pool_t pool,
int  n_to_keep,
int  keep_recently_used 
)

If there are more than n empty chunks in pool, free the excess ones that have been empty for the longest.

If keep_recently_used is true, do not free chunks unless they have been empty since the last call to this function.

Definition at line 460 of file mempool.c.

{
  mp_chunk_t *chunk, **first_to_free;

  mp_pool_sort_used_chunks(pool);
  ASSERT(n_to_keep >= 0);

  if (keep_recently_used) {
    int n_recently_used = pool->n_empty_chunks - pool->min_empty_chunks;
    if (n_to_keep < n_recently_used)
      n_to_keep = n_recently_used;
  }

  ASSERT(n_to_keep >= 0);

  first_to_free = &pool->empty_chunks;
  while (*first_to_free && n_to_keep > 0) {
    first_to_free = &(*first_to_free)->next;
    --n_to_keep;
  }
  if (!*first_to_free) {
    pool->min_empty_chunks = pool->n_empty_chunks;
    return;
  }

  chunk = *first_to_free;
  while (chunk) {
    mp_chunk_t *next = chunk->next;
    chunk->magic = 0xdeadbeef;
    FREE(chunk);
#ifdef MEMPOOL_STATS
    ++pool->total_chunks_freed;
#endif
    --pool->n_empty_chunks;
    chunk = next;
  }

  pool->min_empty_chunks = pool->n_empty_chunks;
  *first_to_free = NULL;
}

Here is the call graph for this function:

Here is the caller graph for this function:

void mp_pool_destroy ( mp_pool_t pool)

Free all space held in pool This makes all pointers returned from mp_pool_get(pool) invalid.

Definition at line 517 of file mempool.c.

{
  destroy_chunks(pool->empty_chunks);
  destroy_chunks(pool->used_chunks);
  destroy_chunks(pool->full_chunks);
  memset(pool, 0xe0, sizeof(mp_pool_t));
  FREE(pool);
}

Here is the call graph for this function:

Here is the caller graph for this function:

void* mp_pool_get ( mp_pool_t pool)

Return a newly allocated item from pool.

Definition at line 207 of file mempool.c.

{
  mp_chunk_t *chunk;
  mp_allocated_t *allocated;

  if (PREDICT_LIKELY(pool->used_chunks != NULL)) {
    /* Common case: there is some chunk that is neither full nor empty.  Use
     * that one. (We can't use the full ones, obviously, and we should fill
     * up the used ones before we start on any empty ones. */
    chunk = pool->used_chunks;

  } else if (pool->empty_chunks) {
    /* We have no used chunks, but we have an empty chunk that we haven't
     * freed yet: use that.  (We pull from the front of the list, which should
     * get us the most recently emptied chunk.) */
    chunk = pool->empty_chunks;

    /* Remove the chunk from the empty list. */
    pool->empty_chunks = chunk->next;
    if (chunk->next)
      chunk->next->prev = NULL;

    /* Put the chunk on the 'used' list*/
    add_newly_used_chunk_to_used_list(pool, chunk);

    ASSERT(!chunk->prev);
    --pool->n_empty_chunks;
    if (pool->n_empty_chunks < pool->min_empty_chunks)
      pool->min_empty_chunks = pool->n_empty_chunks;
  } else {
    /* We have no used or empty chunks: allocate a new chunk. */
    chunk = mp_chunk_new(pool);
    CHECK_ALLOC(chunk);

    /* Add the new chunk to the used list. */
    add_newly_used_chunk_to_used_list(pool, chunk);
  }

  ASSERT(chunk->n_allocated < chunk->capacity);

  if (chunk->first_free) {
    /* If there's anything on the chunk's freelist, unlink it and use it. */
    allocated = chunk->first_free;
    chunk->first_free = allocated->u.next_free;
    allocated->u.next_free = NULL; /* For debugging; not really needed. */
    ASSERT(allocated->in_chunk == chunk);
  } else {
    /* Otherwise, the chunk had better have some free space left on it. */
    ASSERT(chunk->next_mem + pool->item_alloc_size <=
           chunk->mem + chunk->mem_size);

    /* Good, it did.  Let's carve off a bit of that free space, and use
     * that. */
    allocated = (void*)chunk->next_mem;
    chunk->next_mem += pool->item_alloc_size;
    allocated->in_chunk = chunk;
    allocated->u.next_free = NULL; /* For debugging; not really needed. */
  }

  ++chunk->n_allocated;
#ifdef MEMPOOL_STATS
  ++pool->total_items_allocated;
#endif

  if (PREDICT_UNLIKELY(chunk->n_allocated == chunk->capacity)) {
    /* This chunk just became full. */
    ASSERT(chunk == pool->used_chunks);
    ASSERT(chunk->prev == NULL);

    /* Take it off the used list. */
    pool->used_chunks = chunk->next;
    if (chunk->next)
      chunk->next->prev = NULL;

    /* Put it on the full list. */
    chunk->next = pool->full_chunks;
    if (chunk->next)
      chunk->next->prev = chunk;
    pool->full_chunks = chunk;
  }
  /* And return the memory portion of the mp_allocated_t. */
  return A2M(allocated);
}

Here is the call graph for this function:

Here is the caller graph for this function:

void mp_pool_log_status ( mp_pool_t pool,
int  severity 
)

Dump information about pool's memory usage to the Tor log at level severity.

Definition at line 584 of file mempool.c.

{
  uint64_t bytes_used = 0;
  uint64_t bytes_allocated = 0;
  uint64_t bu = 0, ba = 0;
  mp_chunk_t *chunk;
  int n_full = 0, n_used = 0;

  ASSERT(pool);

  for (chunk = pool->empty_chunks; chunk; chunk = chunk->next) {
    bytes_allocated += chunk->mem_size;
  }
  log_fn(severity, LD_MM, U64_FORMAT" bytes in %d empty chunks",
         U64_PRINTF_ARG(bytes_allocated), pool->n_empty_chunks);
  for (chunk = pool->used_chunks; chunk; chunk = chunk->next) {
    ++n_used;
    bu += chunk->n_allocated * pool->item_alloc_size;
    ba += chunk->mem_size;
    log_fn(severity, LD_MM, "   used chunk: %d items allocated",
           chunk->n_allocated);
  }
  log_fn(severity, LD_MM, U64_FORMAT"/"U64_FORMAT
         " bytes in %d partially full chunks",
         U64_PRINTF_ARG(bu), U64_PRINTF_ARG(ba), n_used);
  bytes_used += bu;
  bytes_allocated += ba;
  bu = ba = 0;
  for (chunk = pool->full_chunks; chunk; chunk = chunk->next) {
    ++n_full;
    bu += chunk->n_allocated * pool->item_alloc_size;
    ba += chunk->mem_size;
  }
  log_fn(severity, LD_MM, U64_FORMAT"/"U64_FORMAT
         " bytes in %d full chunks",
         U64_PRINTF_ARG(bu), U64_PRINTF_ARG(ba), n_full);
  bytes_used += bu;
  bytes_allocated += ba;

  log_fn(severity, LD_MM, "Total: "U64_FORMAT"/"U64_FORMAT" bytes allocated "
         "for cell pools are full.",
         U64_PRINTF_ARG(bytes_used), U64_PRINTF_ARG(bytes_allocated));

#ifdef MEMPOOL_STATS
  log_fn(severity, LD_MM, U64_FORMAT" cell allocations ever; "
         U64_FORMAT" chunk allocations ever; "
         U64_FORMAT" chunk frees ever.",
         U64_PRINTF_ARG(pool->total_items_allocated),
         U64_PRINTF_ARG(pool->total_chunks_allocated),
         U64_PRINTF_ARG(pool->total_chunks_freed));
#endif
}

Here is the caller graph for this function:

mp_pool_t* mp_pool_new ( size_t  item_size,
size_t  chunk_capacity 
)

Allocate a new memory pool to hold items of size item_size.

We'll try to fit about chunk_capacity bytes in each chunk.

Definition at line 354 of file mempool.c.

{
  mp_pool_t *pool;
  size_t alloc_size, new_chunk_cap;

  tor_assert(item_size < SIZE_T_CEILING);
  tor_assert(chunk_capacity < SIZE_T_CEILING);
  tor_assert(SIZE_T_CEILING / item_size > chunk_capacity);

  pool = ALLOC(sizeof(mp_pool_t));
  CHECK_ALLOC(pool);
  memset(pool, 0, sizeof(mp_pool_t));

  /* First, we figure out how much space to allow per item.  We'll want to
   * use make sure we have enough for the overhead plus the item size. */
  alloc_size = (size_t)(STRUCT_OFFSET(mp_allocated_t, u.mem) + item_size);
  /* If the item_size is less than sizeof(next_free), we need to make
   * the allocation bigger. */
  if (alloc_size < sizeof(mp_allocated_t))
    alloc_size = sizeof(mp_allocated_t);

  /* If we're not an even multiple of ALIGNMENT, round up. */
  if (alloc_size % ALIGNMENT) {
    alloc_size = alloc_size + ALIGNMENT - (alloc_size % ALIGNMENT);
  }
  if (alloc_size < ALIGNMENT)
    alloc_size = ALIGNMENT;
  ASSERT((alloc_size % ALIGNMENT) == 0);

  /* Now we figure out how many items fit in each chunk.  We need to fit at
   * least 2 items per chunk. No chunk can be more than MAX_CHUNK bytes long,
   * or less than MIN_CHUNK. */
  if (chunk_capacity > MAX_CHUNK)
    chunk_capacity = MAX_CHUNK;
  /* Try to be around a power of 2 in size, since that's what allocators like
   * handing out. 512K-1 byte is a lot better than 512K+1 byte. */
  chunk_capacity = (size_t) round_to_power_of_2(chunk_capacity);
  while (chunk_capacity < alloc_size * 2 + CHUNK_OVERHEAD)
    chunk_capacity *= 2;
  if (chunk_capacity < MIN_CHUNK)
    chunk_capacity = MIN_CHUNK;

  new_chunk_cap = (chunk_capacity-CHUNK_OVERHEAD) / alloc_size;
  tor_assert(new_chunk_cap < INT_MAX);
  pool->new_chunk_capacity = (int)new_chunk_cap;

  pool->item_alloc_size = alloc_size;

  log_debug(LD_MM, "Capacity is %lu, item size is %lu, alloc size is %lu",
            (unsigned long)pool->new_chunk_capacity,
            (unsigned long)pool->item_alloc_size,
            (unsigned long)(pool->new_chunk_capacity*pool->item_alloc_size));

  return pool;
}

Here is the call graph for this function:

Here is the caller graph for this function:

void mp_pool_release ( void *  item)

Return an allocated memory item to its memory pool.

Definition at line 293 of file mempool.c.

{
  mp_allocated_t *allocated = (void*) M2A(item);
  mp_chunk_t *chunk = allocated->in_chunk;

  ASSERT(chunk);
  ASSERT(chunk->magic == MP_CHUNK_MAGIC);
  ASSERT(chunk->n_allocated > 0);

  allocated->u.next_free = chunk->first_free;
  chunk->first_free = allocated;

  if (PREDICT_UNLIKELY(chunk->n_allocated == chunk->capacity)) {
    /* This chunk was full and is about to be used. */
    mp_pool_t *pool = chunk->pool;
    /* unlink from the full list  */
    if (chunk->prev)
      chunk->prev->next = chunk->next;
    if (chunk->next)
      chunk->next->prev = chunk->prev;
    if (chunk == pool->full_chunks)
      pool->full_chunks = chunk->next;

    /* link to the used list. */
    chunk->next = pool->used_chunks;
    chunk->prev = NULL;
    if (chunk->next)
      chunk->next->prev = chunk;
    pool->used_chunks = chunk;
  } else if (PREDICT_UNLIKELY(chunk->n_allocated == 1)) {
    /* This was used and is about to be empty. */
    mp_pool_t *pool = chunk->pool;

    /* Unlink from the used list */
    if (chunk->prev)
      chunk->prev->next = chunk->next;
    if (chunk->next)
      chunk->next->prev = chunk->prev;
    if (chunk == pool->used_chunks)
      pool->used_chunks = chunk->next;

    /* Link to the empty list */
    chunk->next = pool->empty_chunks;
    chunk->prev = NULL;
    if (chunk->next)
      chunk->next->prev = chunk;
    pool->empty_chunks = chunk;

    /* Reset the guts of this chunk to defragment it, in case it gets
     * used again. */
    chunk->first_free = NULL;
    chunk->next_mem = chunk->mem;

    ++pool->n_empty_chunks;
  }
  --chunk->n_allocated;
}

Here is the caller graph for this function: