--- /dev/null
+#define SLAB_MAX_IDX 12
+
+#define SLAB_ALLOC_SIZE 8192*16
+#define SLAB_MAX_SLAB_SIZE 16384
+//#define SLAB_USE_MMAP_DIRECTLY
+#define SLAB_MAIN
+
+#define NDEBUG
+
+#include "postgres.h"
+#include "utils/memutils.h"
+#include "slab.h"
+
+#include "ilist.h"
+
+#include <assert.h>
+#include <stddef.h>
+
+#include <sys/mman.h>
+
+#define likely(x) __builtin_expect(!!(x),1)
+#define unlikely(x) __builtin_expect(!!(x),0)
+
+struct SlabGlobalData;
+typedef struct SlabGlobalData SlabGlobalData;
+struct AllocSlabContext;
+typedef struct AllocSlabContext AllocSlabContext;
+struct AllocSlabBlock;
+typedef struct AllocSlabBlock AllocSlabBlock;
+struct AllocSlabChunk;
+typedef struct AllocSlabChunk AllocSlabChunk;
+
+struct SlabGlobalData{
+ ilist_head freelist;
+
+ uint32_t num_free_blocks;
+ const uint32_t max_free_blocks;
+ const size_t slab_idx_to_size[SLAB_MAX_IDX];
+ const size_t slab_idx_to_count[SLAB_MAX_IDX];
+};
+
+struct AllocSlabContext{
+ MemoryContextData header;
+
+ /*
+ * This is pointing to a list of blocks for each slab having free space.
+ *
+ * A block is taken away from that list as soon as there is no free space
+ * anymore.
+ *
+ * XXX: As an optimization we could put blocks where a chunk just got freed
+ * at the top of the lists as its caches are likely hot, but the costs of
+ * moving the ->next_block respectively ->prev_block pointers of adjacent blocks would
+ * likely remove any beneficial effect.
+ */
+ ilist_head slab_idx[SLAB_MAX_IDX];
+
+ /*
+ * A list of blocks which either do not have any space anymore or are
+ * blocks which are used for large allocations
+ */
+ ilist_head full_blocks_head;
+};
+
+enum AllocSlabBlockFlags{
+ SLAB_BLOCK_ON_FULLLIST = 1<<0,
+ SLAB_BLOCK_IS_LARGE_ALLOC = 1<<1,
+};
+
+struct AllocSlabBlock{
+ /*
+ * When != NULL we belong to this context. This may never be set if this
+ * block is just reserve for further needed blocks.
+ */
+ AllocSlabContext* context;
+
+ /*
+ * This is the size of the actual allocated memory per chunk. I.e. this is:
+ * data + sizeof(AllocSlabChunk) + alignment.
+ * => free_ptr is always aligned!
+ */
+ size_t element_size;
+
+ /*
+ * Is this block on the list of blocks without usable space.
+ */
+ size_t flags;
+
+ /*
+ * our slot. We need this to put this block back on the list of blocks with
+ * free space on free
+ */
+ size_t slab_idx;
+
+ /* ----
+ * ----
+ */
+ uint32 num_allocated;
+
+ uint32 max_allocated;
+
+
+ /* ----
+ * Head pointer to a list of Chunks that are free. An empty block does
+ * *not* have anything on its freelist. Instead it uses `free_ptr` to
+ * find the first free element.
+ * This is done to avoid needing to walk all over the block and totally
+ * trash the cpu cache uppon initialization.
+ * ----
+ */
+ AllocSlabChunk* chunk_freelist;
+
+ /*
+ * A list of blocks is used to maintain enough infomration to be able to
+ * free those blocks when a context is reset/deleted. They are also used to
+ * keep a list of unallocated blocks of a certain size to avoid constant
+ * deallocation/allocation uppon frequent resets.
+ */
+ ilist_node block_node;
+
+ /*
+ * The reason of existence itself...
+ */
+ char data[] __attribute__ ((aligned (MAXIMUM_ALIGNOF)));
+};
+
+struct AllocSlabChunk {
+ union{
+ //
+ AllocSlabBlock* block;
+ AllocSlabChunk* next_chunk;
+ };
+ /*we might need that again to be compatible with multiple contexts*/
+ /*StandardChunkHeader header;*/
+
+ /*
+ * FIXME: Use a more general mechanism to enforce alignment
+ */
+ char data[] __attribute__ ((aligned (MAXIMUM_ALIGNOF)));
+};
+
+typedef struct AllocSlabBlock AllocSlabBlock;
+
+#define SLAB_COUNT(size) ((SLAB_ALLOC_SIZE - offsetof(AllocSlabBlock, data))/(size + sizeof(AllocSlabChunk)))
+SlabGlobalData slab_global_data = {
+ //.freelist = NULL,//FIXME initialize
+ .num_free_blocks = 0,
+ .max_free_blocks = 1024,//FIXME
+ .slab_idx_to_size = {8, //SIZE_8
+ 16, //SIZE_16
+ 32, //SIZE_32
+ 64, //SIZE_64
+ 128, //SIZE_128
+ 256, //SIZE_256
+ 512, //SIZE_512
+ 1024, //SIZE_1024
+ 2048, //SIZE_1024
+ 4096, //SIZE_4096
+ 8192, //SIZE_8192
+ 16384, //SIZE_16384
+// 32768, //SIZE_32768
+// 18, //LIST_CELL, list.h
+ },
+ .slab_idx_to_count = {SLAB_COUNT(8), //SIZE_8
+ SLAB_COUNT(16), //SIZE_16
+ SLAB_COUNT(32), //SIZE_32
+ SLAB_COUNT(64), //SIZE_64
+ SLAB_COUNT(128), //SIZE_128
+ SLAB_COUNT(256), //SIZE_256
+ SLAB_COUNT(512), //SIZE_512
+ SLAB_COUNT(1024), //SIZE_1024
+ SLAB_COUNT(2048), //SIZE_1024
+ SLAB_COUNT(4096), //SIZE_4096
+ SLAB_COUNT(8192), //SIZE_8192
+ SLAB_COUNT(16384), //SIZE_16384
+// 32768, //SIZE_32768
+// 18, //LIST_CELL, list.h
+ },
+
+};
+
+
+/*
+ * Get a AllocSlabBlock from the operating systems allocator
+ */
+static AllocSlabBlock* slab_alloc_block(void);
+
+/*
+ */
+static inline void slab_reset_block(AllocSlabContext* context, AllocSlabBlock *block);
+
+/*
+ * Get a block either from the freelist of unused blocks or from the operating
+ * system if none is found
+ */
+static AllocSlabBlock* slab_get_block(AllocSlabContext* context);
+
+static inline void
+slab_init_block(AllocSlabContext* context, AllocSlabBlock* block,
+ size_t slab_idx);
+
+
+
+/*
+ * Put a block on the list of free blocks or free it if that list "full"
+ */
+static void slab_put_block(AllocSlabBlock* block);
+
+/*
+ * Free a block (and thus return it to the systems allocator)
+ */
+static void slab_free_block(AllocSlabBlock* block);
+
+
+static inline void
+slab_init_context(MemoryContext context);
+
+MemoryContext slab_create_context(void);
+
+void slab_free_context(MemoryContext context);
+
+void*
+//__attribute__((noinline))
+slab_alloc_dyn(MemoryContext context, Size element_size);
+
+/*
+ */
+static void*
+__attribute__((always_inline))
+slab_alloc_in(AllocSlabContext* context, size_t slab_idx, Size element_size);
+
+//static inline
+size_t
+//__attribute__((noinline))
+slab_size_to_slab_idx(size_t size);
+
+void slab_reset_context(MemoryContext context);
+
+void slab_free(void* p);
+void slab_free_in(MemoryContext context, void* p);
+
+void* slab_realloc(void* p, size_t new_size);
+void* slab_realloc_in(MemoryContext context, void* p, size_t new_size);
+
+Size slab_get_chunk_space(MemoryContext context, void *p);
+
+void slab_stats(MemoryContext context, int level);
+void slab_check_context(MemoryContext context_o);
+
+bool slab_is_empty_context(MemoryContext c);
+
+extern AllocSlabChunk* slab_ptr_to_chunk(void *ptr);
+
+
+#if 0
+typedef struct MemoryContextMethods
+{
+ void *(*alloc) (MemoryContext context, Size size);
+ /* call this free_p in case someone #define's free() */
+ void (*free_p) (MemoryContext context, void *pointer);
+ void *(*realloc) (MemoryContext context, void *pointer, Size size);
+ void (*init) (MemoryContext context);
+ void (*reset) (MemoryContext context);
+ void (*delete_context) (MemoryContext context);
+ Size (*get_chunk_space) (MemoryContext context, void *pointer);
+ bool (*is_empty) (MemoryContext context);
+ void (*stats) (MemoryContext context, int level);
+#ifdef MEMORY_CONTEXT_CHECKING
+ void (*check) (MemoryContext context);
+#endif
+} AllocSlabMethods;
+#endif
+
+MemoryContextMethods AllocSlabMethods = {
+ slab_alloc_dyn,
+ slab_free_in,//AllocSlabFree,
+ slab_realloc_in,//AllocSlabRealloc,
+ slab_init_context,//AllocSlabInit,
+ slab_reset_context,//AllocSlabReset,
+ slab_free_context,//AllocSlabDelete,
+ slab_get_chunk_space,//AllocSlabGetChunkSpace,
+ slab_is_empty_context,//AllocSlabIsEmpty,//XXX
+ slab_stats,//AllocSlabStats
+#ifdef MEMORY_CONTEXT_CHECKING
+ slab_check_context,//,AllocSlabCheck
+#endif
+};
+
+//XXX:
+
+static AllocSlabBlock* slab_alloc_block(){
+ /*XXX Allocation Policy*/
+ AllocSlabBlock* block;
+ size_t alloc_size = SLAB_ALLOC_SIZE;
+#ifdef SLAB_USE_MMAP_DIRECTLY
+ size_t off;
+ size_t nr_prealloc = 16;
+ block = mmap(NULL, alloc_size*(nr_prealloc), PROT_READ|PROT_WRITE,
+ MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
+ if(!block)
+ abort();
+ for(off = 1;off < ((nr_prealloc)-1); off++){
+ slab_put_block(block);
+ block = (AllocSlabBlock*)((char*)block + alloc_size);
+ }
+#else
+ block = malloc(alloc_size);
+
+ if (block == NULL)
+ {
+ #if 0
+ MemoryContextStats(TopMemoryContext);
+ ereport(ERROR,
+ (errcode(ERRCODE_OUT_OF_MEMORY),
+ errmsg("out of memory"),
+ errdetail("Failed while creating memory context \"%s\".",
+ "unknown")));/*XXX: Report actual name*/
+ #endif
+ abort();
+ }
+#endif
+ return block;
+}
+
+static
+AllocSlabBlock*
+__attribute__((noinline))
+slab_get_block(AllocSlabContext* context){
+ AllocSlabBlock* block;
+ if(!ilist_is_empty(&slab_global_data.freelist)){
+ ilist_node* first = slab_global_data.freelist.head.next;
+ ilist_remove(&slab_global_data.freelist, first);
+ assert(slab_global_data.num_free_blocks);
+ block = ilist_container(AllocSlabBlock, block_node, first);
+ //printf("reusing block %p from %u\n", block, slab_global_data.num_free_blocks);
+ --slab_global_data.num_free_blocks;
+ }
+ else{
+ block = slab_alloc_block();
+ //printf("new block %p\n", block);
+ }
+ slab_reset_block(context, block);
+ return block;
+}
+
+static inline
+void slab_reset_block(AllocSlabContext* context, AllocSlabBlock* block){
+ block->context = context;
+ block->num_allocated = 0;
+ block->flags = 0;
+ block->chunk_freelist = 0;
+ //ilist_init(&block->chunk_freelist);
+}
+
+static void slab_free_block(AllocSlabBlock* block){
+ //printf("free block %p\n", block);
+#ifdef SLAB_USE_MMAP_DIRECTLY
+ munmap(block, SLAB_ALLOC_SIZE);
+#else
+ free(block);
+#endif
+}
+static void slab_put_block(AllocSlabBlock* block){
+ /*XXX: Deallocation policy should be improved*/
+ if(slab_global_data.num_free_blocks >= slab_global_data.max_free_blocks){
+ //printf("really freeing block %p\n", block);
+ slab_free_block(block);
+ }
+ else{
+ ilist_push_back(&slab_global_data.freelist, &block->block_node);
+ ++slab_global_data.num_free_blocks;
+ //printf("pushing block %p to freelist of %u\n", block, slab_global_data.num_free_blocks);
+ }
+}
+
+AllocSlabChunk* slab_ptr_to_chunk(void *ptr){
+ return (AllocSlabChunk*)((char*)ptr-offsetof(AllocSlabChunk, data));
+}
+
+//static inline
+size_t
+//__attribute__((noinline))
+slab_size_to_slab_idx(size_t size){
+#define LT16(n) n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n
+ /*
+ * Table for AllocSetFreeIdx
+ */
+ static const unsigned char LogTable256[256] =
+ {
+ 0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
+ LT16(5), LT16(6), LT16(6), LT16(7), LT16(7), LT16(7), LT16(7),
+ LT16(8), LT16(8), LT16(8), LT16(8), LT16(8), LT16(8), LT16(8), LT16(8)
+ };
+#undef LT16
+ int idx;
+ unsigned int t;
+ unsigned int tsize;
+ const int ALLOC_MINBITS = 3; /* smallest chunk size is 8 bytes */
+ if (size > (1 << ALLOC_MINBITS))
+ {
+ tsize = (size - 1) >> ALLOC_MINBITS;
+ /*
+ * At this point we need to obtain log2(tsize)+1, ie, the number of
+ * not-all-zero bits at the right. We used to do this with a
+ * shift-and-count loop, but this function is enough of a hotspot to
+ * justify micro-optimization effort. The best approach seems to be
+ * to use a lookup table. Note that this code assumes that
+ * ALLOCSET_NUM_FREELISTS <= 17, since we only cope with two bytes of
+ * the tsize value.
+ */
+ t = tsize >> 8;
+ idx = t ? LogTable256[t] + 8 : LogTable256[tsize];
+ }
+ else
+ idx = 0;
+ return idx;
+}
+
+static void* slab_alloc_large(AllocSlabContext* context, Size element_size){
+ AllocSlabChunk *chunk;
+ AllocSlabBlock* block = malloc(sizeof(AllocSlabBlock)
+ + sizeof(AllocSlabChunk)
+ + element_size);
+ if(!block)
+ abort();
+
+ block->context = context;
+ ilist_push_back(&context->full_blocks_head,
+ &block->block_node);
+
+ block->element_size = element_size + sizeof(AllocSlabChunk);
+ block->num_allocated = 1;
+ block->flags = SLAB_BLOCK_IS_LARGE_ALLOC;
+
+ chunk = (AllocSlabChunk*)block->data;
+ chunk->block = block;
+
+ return chunk->data;
+}
+
+static void slab_free_large(AllocSlabContext* context, AllocSlabBlock *block){
+ assert(block->flags & SLAB_BLOCK_IS_LARGE_ALLOC);
+
+ ilist_remove(&context->full_blocks_head,
+ &block->block_node);
+
+ memset(block, 0xef, block->element_size);
+ free(block);
+#ifndef NDEBUG
+ slab_check_context((MemoryContext)context);
+#endif
+}
+void*
+//__attribute__((noinline))
+slab_alloc_dyn(MemoryContext context, Size element_size){
+ void *p;
+ if(unlikely(element_size > SLAB_MAX_SLAB_SIZE)){//XXX
+ p = slab_alloc_large((AllocSlabContext*)context, element_size);
+ }
+ else{
+ size_t slab_idx = slab_size_to_slab_idx(element_size);
+ p = slab_alloc_in((AllocSlabContext*)context, slab_idx, element_size);
+ }
+ assert((slab_ptr_to_chunk(p)->block->element_size - sizeof(AllocSlabChunk)) >= element_size);
+ return p;
+}
+
+static void*
+// __attribute__((noinline))
+__attribute__((always_inline))
+slab_alloc_in(AllocSlabContext* context, size_t slab_idx,
+ Size orig_element_size){
+ AllocSlabBlock* top_slab = ilist_front(AllocSlabBlock, block_node,
+ &context->slab_idx[slab_idx]);
+
+ AllocSlabChunk* chunk;
+
+ //assert(slab_idx);
+
+ if(unlikely(!top_slab)){
+ //there definitely is space
+ top_slab = slab_get_block(context);
+
+ slab_init_block(context, top_slab, slab_idx);
+
+ ilist_push_front(&context->slab_idx[slab_idx],
+ &top_slab->block_node);
+
+ top_slab->num_allocated = 1;
+ chunk = (AllocSlabChunk*)top_slab->data;
+ chunk->block = top_slab;
+
+ //printf("block: %p allocation from newspace with %lu allocated, max %lu el %lu\n",
+ // top_slab, top_slab->num_allocated, top_slab->max_allocated,
+ // top_slab->element_size);
+
+ }
+ else{
+ bool is_full;
+ if(top_slab->chunk_freelist){
+ ++top_slab->num_allocated;
+ chunk = top_slab->chunk_freelist;
+ top_slab->chunk_freelist = chunk->next_chunk;
+ //printf("block: %p allocation from freelist with %lu allocated, max %lu el %lu\n",
+ // top_slab, top_slab->num_allocated, top_slab->max_allocated,
+ // top_slab->element_size);
+ }
+ else{
+ //assert((top_slab->end_ptr - top_slab->free_ptr) >= top_slab->element_size);
+
+ //chunk = (AllocSlabChunk*)(top_slab->data + top_slab->element_size * top_slab->num_allocated);
+ chunk = (AllocSlabChunk*)&top_slab->data[top_slab->element_size * top_slab->num_allocated];
+ ++top_slab->num_allocated;
+
+ //printf("block: %p allocation from freespace with %lu allocated, max %lu el %lu\n",
+ // top_slab, top_slab->num_allocated, top_slab->max_allocated,
+ // top_slab->element_size);
+ }
+ is_full = top_slab->num_allocated == top_slab->max_allocated;
+ chunk->block = top_slab;
+
+ /*
+ * No more free space on block
+ *
+ * We assume that this case cannot be hit if we have allocated a new
+ * block above as we won't ever use slab allocation for sizes where
+ * only one chunk fits on a block
+ */
+ if(unlikely(is_full)){
+ //printf("block: %p block is full with %lu allocated, max %lu el %lu\n",
+ // top_slab, top_slab->num_allocated, top_slab->max_allocated,
+ // top_slab->element_size);
+ ilist_remove(&context->slab_idx[slab_idx],
+ &top_slab->block_node);
+ ilist_push_front(&context->full_blocks_head,
+ &top_slab->block_node);
+ top_slab->flags |= SLAB_BLOCK_ON_FULLLIST;
+ }
+ }
+#ifndef NDEBUG
+ slab_check_context((MemoryContext)context);
+#endif
+ return chunk->data;
+}
+
+void slab_free(void* p){
+ /*
+ AllocSlabChunk *chunk = slab_ptr_to_chunk(p);
+ AllocSlabBlock *block = chunk->block;
+ */
+ slab_free_in(0, p);
+}
+
+void slab_free_in(MemoryContext __attribute__((unused))context_o, void *p){
+ AllocSlabChunk *chunk = slab_ptr_to_chunk(p);
+ AllocSlabBlock *block = chunk->block;
+
+ if(block->flags & SLAB_BLOCK_IS_LARGE_ALLOC){
+ AllocSlabContext* context = block->context;
+ slab_free_large(context, block);
+ return;
+ }
+ --block->num_allocated;
+
+ //no r-w dependency when using postfix dec
+ if(unlikely(block->num_allocated == 0)){
+ /*
+ * block is empty, free it
+ */
+
+ AllocSlabContext* context = block->context;
+ /*
+ * We don't want to remove the slab if there are no more slabs linked
+ * in the chain so we don't have to fetch a new one in the next
+ * allocation
+ */
+ ilist_head* head = &context->slab_idx[block->slab_idx];
+ if(ilist_has_next(head, &block->block_node) ||
+ ilist_has_prev(head, &block->block_node)){
+ ilist_remove(head, &block->block_node);
+ slab_put_block(block);
+ return;
+ }
+ else{
+ //printf("skipping delete\n");
+ }
+
+#ifndef NDEBUG
+ slab_check_context((MemoryContext)context);
+#endif
+ }
+ else if(unlikely(block->flags & SLAB_BLOCK_ON_FULLLIST)){
+ /* ----
+ * The block currently was on the list of full blocks. We have to put
+ * it on the list of blocks with free space.
+ *
+ * We put it on the last place in the list to make it less likely that
+ * a block constantly gets added/removed from the freelist. One could
+ * argue that cache-usage makes it a better idea to add to the front
+ * ----
+ */
+ AllocSlabContext* context = block->context;
+ block->flags &= ~SLAB_BLOCK_ON_FULLLIST;
+ ilist_remove(&context->full_blocks_head,
+ &block->block_node);
+
+ ilist_push_back(&context->slab_idx[block->slab_idx],
+ &block->block_node);
+ }
+
+ chunk->next_chunk = block->chunk_freelist;
+ block->chunk_freelist = chunk;
+#ifndef NDEBUG
+ slab_check_context((MemoryContext)block->context);
+#endif
+}
+
+void* slab_realloc_in(MemoryContext __attribute__((unused))context_o, void* p, size_t new_size){
+ AllocSlabChunk *chunk = slab_ptr_to_chunk(p);
+ AllocSlabBlock *block = chunk->block;
+ AllocSlabContext* context = block->context;
+ void *new_ptr;
+
+ if(block->element_size >= (new_size + sizeof(AllocSlabChunk)))
+ return p;
+ //printf("slab realloc %p to %lu\n", p, new_size);
+
+ new_ptr = slab_alloc_dyn((MemoryContext)context, new_size);
+ memcpy(new_ptr, p, block->element_size - sizeof(AllocSlabChunk));
+ slab_free(p);
+ return new_ptr;
+}
+
+void* slab_realloc(void* p, size_t new_size){
+ return slab_realloc_in(0, p, new_size);
+}
+
+void slab_reset_context(MemoryContext context_o){
+ AllocSlabContext* context = (AllocSlabContext*)context_o;
+ AllocSlabBlock *block;
+ uint8 slab_idx;
+ ilist_node *cur, *next;
+ //printf("reset context: %s\n", context_o->name);
+
+#ifndef NDEBUG
+ slab_check_context(context_o);
+#endif
+ ilist_foreach_modify(cur, next, &context->full_blocks_head){
+ ilist_remove(&context->full_blocks_head, cur);
+ block = ilist_container(AllocSlabBlock, block_node, cur);
+ if(block->flags & SLAB_BLOCK_IS_LARGE_ALLOC){
+ slab_free_large(context, block);
+ }
+ else{
+ assert(block->flags & SLAB_BLOCK_ON_FULLLIST);
+ slab_put_block(block);
+ }
+ }
+
+ for(slab_idx = 0; slab_idx < SLAB_MAX_IDX; slab_idx++){
+ bool first = true;
+ ilist_foreach_modify(cur, next, &context->slab_idx[slab_idx]){
+ /*
+ * XXX: A nice optimization would be not to remove the
+ * first block here but just reset it
+ */
+ block = ilist_container(AllocSlabBlock, block_node, cur);
+ if(first){
+ slab_reset_block(context, block);
+ }
+ else{
+ ilist_remove(&context->slab_idx[slab_idx], cur);
+ slab_put_block(block);
+ }
+ }
+ }
+ //printf("slab reset\n");
+#ifndef NDEBUG
+ slab_check_context(context_o);
+#endif
+}
+
+static void slab_reset_global(){
+ ilist_node *cur, *next;
+ //printf("freelist: %u\n", slab_global_data.num_free_blocks);
+ ilist_foreach_modify(cur, next, &slab_global_data.freelist){
+ AllocSlabBlock *block = ilist_container(AllocSlabBlock, block_node, cur);
+ //printf("removing from freelist: %p\n", cur);
+ --slab_global_data.num_free_blocks;
+ ilist_remove(&slab_global_data.freelist, cur);
+ slab_free_block(block);
+ }
+ assert(slab_global_data.num_free_blocks == 0);
+}
+
+static inline void
+slab_init_context(MemoryContext context_o){
+ AllocSlabContext *context = (AllocSlabContext*)context_o;
+ int idx;
+ for(idx = 0; idx < SLAB_MAX_IDX; idx++){
+ ilist_init(&context->slab_idx[idx]);
+ }
+ ilist_init(&context->full_blocks_head);
+}
+
+#ifndef SLAB_MAIN
+MemoryContext slab_create_context_r(MemoryContext parent,
+ const char* name){
+ //printf("create context %s\n", name);
+
+ AllocSlabContext* context;
+
+ /* Do the type-independent part of context creation */
+ context = (AllocSlabContext*) MemoryContextCreate(T_AllocSlabContext,
+ sizeof(AllocSlabContext),
+ &AllocSlabMethods,
+ parent,
+ name);
+ slab_init_context(context);
+ return (MemoryContext) context;
+}
+#else
+MemoryContext slab_create_context(){
+ AllocSlabContext* context = malloc(sizeof(AllocSlabContext));
+ memset(context, 0, sizeof(AllocSlabContext));
+ slab_init_context((MemoryContext)context);
+
+ return (MemoryContext)context;
+}
+#endif
+
+void slab_free_context(MemoryContext context_o){
+ AllocSlabContext* context = (AllocSlabContext*)context_o;
+ ilist_node *cur, *next;
+ size_t idx;
+
+ slab_reset_context(context_o);
+
+ for(idx = 0; idx < SLAB_MAX_IDX; idx++){
+ AllocSlabBlock *block;
+ ilist_foreach_modify(cur, next, &context->slab_idx[idx]){
+ block = ilist_container(AllocSlabBlock, block_node, cur);
+ ilist_remove(&context->slab_idx[idx], cur);
+ slab_put_block(block);
+ }
+ }
+
+ #ifdef SLAB_MAIN
+ free(context);
+ #endif
+}
+
+Size slab_get_chunk_space(MemoryContext __attribute__((unused))context_o, void *p){
+ AllocSlabChunk *chunk = slab_ptr_to_chunk(p);
+ AllocSlabBlock *block = chunk->block;
+ return block->element_size;
+}
+
+void slab_stats(MemoryContext context, int level){
+}
+
+void slab_check_context(MemoryContext context_o){
+ AllocSlabContext* context = (AllocSlabContext*)context_o;
+ AllocSlabBlock *block;
+ ilist_node *cur;
+ size_t slab_idx;
+
+ assert(context_o);
+
+ //printf("checking head %p of context %p\n", &context->full_blocks_head, context);
+ ilist_foreach(cur, &context->full_blocks_head){
+ block = ilist_container(AllocSlabBlock, block_node, cur);
+ //printf("checking block %p\n", block);
+ if(!(block->flags & SLAB_BLOCK_IS_LARGE_ALLOC||
+ block->flags & SLAB_BLOCK_ON_FULLLIST)){
+ //printf("block: %p cur: %p\n", block, cur);
+ assert(false);
+ }
+ }
+
+ for(slab_idx = 0; slab_idx < SLAB_MAX_IDX; slab_idx++){
+ ilist_foreach(cur, &context->slab_idx[slab_idx]){
+ block = ilist_container(AllocSlabBlock, block_node, cur);
+ assert(!(block->flags & SLAB_BLOCK_ON_FULLLIST));
+ }
+ };
+}
+
+MemoryContext slab_ptr_to_context(void *p){
+ return (MemoryContext)slab_ptr_to_chunk(p)->block->context;
+}
+
+bool slab_is_empty_context(MemoryContext context){
+ if (context->isReset)
+ return true;
+ return false;
+}
+
+static inline void
+slab_init_block(AllocSlabContext* context, AllocSlabBlock* block,
+ size_t slab_idx){
+ block->slab_idx = slab_idx;
+ /*
+ * FIXME: Ensure proper alignment by adding enough space to ensure a chunk
+ * is always aligned
+ */
+ block->element_size = slab_global_data.slab_idx_to_size[slab_idx]
+ + sizeof(AllocSlabChunk);
+ block->flags = 0;
+ block->num_allocated = 0;
+ block->max_allocated = slab_global_data.slab_idx_to_count[slab_idx];
+ assert(block->max_allocated > 5);
+//FIXME: block->max_allocated = (block->end_ptr - block->data)/(block->element_size);
+//FIXME: block->cur_unallocated = block->max_allocated;
+}
+
+void slab_init_global(){
+ printf("inited global\n");
+ ilist_init(&slab_global_data.freelist);
+}
+
+#ifdef SLAB_MAIN
+//#define TEST_MALLOC
+#define TEST_DYN
+int main(){
+ void* x1;
+ void* x2;
+ void* x3;
+ void* x4;
+ void* x5;
+ void* x6;
+ MemoryContext context;
+ MemoryContext context2;
+ uint64_t allocations = 0;
+
+ int i3, i, i2;
+
+ slab_init_global();
+
+ context = slab_create_context();
+ context2 = slab_create_context();
+
+#if 0
+ for(i3 = 0; i3 < 10000000;i3++){
+ for(i = 0; i < 2;i++){
+ for(i2 = 0; i2 < 2;i2++){
+#else
+ for(i3 = 0; i3 < 500;i3++){
+ for(i = 0; i < 10000;i++){
+ for(i2 = 0; i2 < 6;i2++){
+#endif
+
+#ifndef TEST_MALLOC
+ x1 = slab_alloc_dyn(context, 8);
+ x2 = slab_alloc_dyn(context, 16);
+ x3 = slab_alloc_dyn(context, 32);
+ x4 = slab_alloc_dyn(context, 128);
+ x5 = slab_alloc_dyn(context, 60);
+ x5 = slab_realloc(x5, 64);
+
+ x6 = slab_alloc_dyn(context2, 8);
+#else
+ x1 = malloc(8);
+ x2 = malloc(16);
+ x3 = malloc(32);
+ x4 = malloc(128);
+ x5 = malloc(64);
+ x6 = malloc(8);
+#endif //TEST_MALLOC
+ allocations += 6;
+ slab_free(x5);
+ }
+#ifndef TEST_MALLOC
+ slab_free(x1);
+ slab_free(x2);
+ slab_free(x3);
+ slab_free(x4);
+#else
+ free(x1);
+ free(x2);
+ free(x3);
+ free(x4);
+#endif
+ }
+ slab_reset_context(context);
+ }
+ slab_reset_context(context2);
+ slab_free_context(context);
+ slab_free_context(context2);
+ slab_reset_global();
+ printf("allocated last chunk at %p\n", x1);
+ printf("allocated last chunk at %p\n", x2);
+ printf("allocated last chunk at %p\n", x3);
+ printf("allocated last chunk at %p\n", x4);
+ printf("allocated last chunk at %p\n", x6);
+ printf("allocated %lu chunks\n", allocations);
+
+ return 0;
+}
+#endif