/* Helper functions. */
static mspan_context *mspan_allocate_context_descriptor(char *base,
mspan_manager *mgr);
+static void mspan_destroy_span(char *base, mspan *span);
static mspan *mspan_find_free_span(char *base, mspan_manager *mgr,
Size minpages, Size maxpages);
+static void mspan_recycle_span(char *base, mspan_manager *mgr,
+ mspan *span);
+static void mspan_unlink_span(char *base, mspan *span);
static void mspan_update_page_map(char *base, mspan_manager *mgr,
Size first_page, Size npages, Size value);
/* All lists of allocations begin empty. */
memset(cxt, 0, sizeof(mspan_context));
+ /* Increment the number of active contexts. */
+ ++mgr->ncontexts;
+
return cxt;
}
static mspan_context *
mspan_allocate_context_descriptor(char *base, mspan_manager *mgr)
{
- mspan_context *cxt;
+ mspan_context *cxt = NULL;
mspan *span;
Size pageno;
+ Size i;
/* Outside of a dynamic shared memory segment, just allocate from OS. */
if (base == NULL)
pageno = span->first_page;
}
}
-
- /* Remove the page map entry for the start of the span. */
- mspan_update_page_map(base, mgr, pageno, 1, 0);
+ if (span != NULL)
+ {
+ /*
+ * If the span is just one page, deallocate it completely (see
+ * function header comments for why this is OK). Otherwise, remove
+ * the first page from the span and put the rest back on the
+ * appropriate free list. Also adjust the page map entries as
+ * appropriate.
+ */
+ mspan_update_page_map(base, mgr, pageno, 1, 0);
+ if (span->npages == 1)
+ mspan_destroy_span(base, span);
+ else
+ {
+ mspan_unlink_span(base, span);
+ ++span->first_page;
+ --span->npages;
+ mspan_recycle_span(base, mgr, span);
+
+ /*
+ * The last-page entry for this span is still OK, so no need to
+ * update that. Technically, the first-page entries isn't needed
+ * any more since the page we just stole will never be freed, but
+ * let's do it just to be consistent.
+ */
+ mspan_update_page_map(base, mgr, span->first_page, 1,
+ ((char *) span) - base);
+ }
+ }
/*
- * XXX. If the span is exactly one page long, we want to free it.
- * Presumably this means the mspace_manager (not the context) must
- * manage the list of free spans. Alternatively, we could leak it,
- * which is no worse than what would happen for a garden-variety
- * dsm-lifetime span allocation.
- *
- * If the span is more than one page long, we want to increment the
- * first page, decrement the page count, and make a page map entry
- * for whatever's left over. Technically the last doesn't matter,
- * since the adjacent "span" is one which can never be freed, but
- * let's do it anyway for the sake of tidiness.
+ * OK, we have a page, either from a span or from the boundary. Carve
+ * it up int chunks of just the right size.
*/
+ for (i = 0; i < MSPAN_PAGE_SIZE; i += MAXALIGN(sizeof(mspan_context)))
+ {
+ Size offset = pageno * MSPAN_PAGE_SIZE + i;
+ /* Plan to return the first object as the context. */
+ if (i == 0)
+ {
+ cxt = (mspan_context *) (base + offset);
+ continue;
+ }
+
+ /* Push remaining objects onto the free list. */
+ * (Size *) (base + offset) = mgr->freecontext.relptr_off;
+ mgr->freecontext.relptr_off = offset;
+ }
+
+ Assert(cxt != NULL);
+ return cxt;
+}
+
+/*
+ * Deallocate a span descriptor.
+ */
+static void
+mspan_destroy_span(char *base, mspan *span)
+{
/*
- * XXX. Once we've worked things out on a span level, we need to carve
- * up the page, put all but one item on the manager-level context free
- * list, and return the last item.
+ * XXX. As a special case, the superblock descriptor for a span of
+ * spans is always stored within the span itself. Return the span
+ * to be destroyed to the superblock, and then, there's only 1 remaining
+ * span outstanding, nuke the whole superblock.
*/
}
return NULL;
}
+/*
+ * Put a span on the appropriate free list.
+ */
+static void
+mspan_recycle_span(char *base, mspan_manager *mgr, mspan *span)
+{
+ int fidx;
+ mspan *head;
+
+#ifdef USE_ASSERT_CHECKING
+ Assert(relptr_is_null(span->nextspan));
+ Assert(relptr_is_null(span->prevspan));
+#endif
+
+ fidx = span->npages >= MSPAN_NUM_FREE_LISTS ? MSPAN_NUM_FREE_LISTS : 1;
+ head = relptr_access(base, mgr->freelist[fidx]);
+ span->nextspan.relptr_off = mgr->freelist[fidx].relptr_off;
+ span->prevspan.relptr_off = head->prevspan.relptr_off;
+ relptr_store(base, head->prevspan, span);
+ relptr_store(base, mgr->freelist[fidx], span);
+}
+
/*
* Update the page map.
*/
mspan_update_page_map(char *base, mspan_manager *mgr, Size first_page,
Size npages, Size value)
{
- aspace_map_handle h;
-
- h.as_map = &mgr->page_map;
- h.as_base = base;
- h.as_allocator = NULL; /* XXX FIXME */
- h.as_allocator_private = NULL; /* XXX FIXME */
-
- aspace_map_set_range(&h, first_page, npages, value);
+ aspace_map_set_range(&mgr->page_map, first_page, npages, value,
+ base, NULL, NULL);
+ /* XXX: Last two args should not be NULL! */
}
/*
{
mspan *next;
mspan *prev;
- mspan *null = NULL;
next = relptr_access(base, span->nextspan);
prev = relptr_access(base, span->prevspan);
Assert(next != NULL && prev != NULL);
next->prevspan.relptr_off = span->prevspan.relptr_off;
prev->nextspan.relptr_off = span->nextspan.relptr_off;
+
#ifdef USE_ASSERT_CHECKING
- relptr_store(base, span->prevspan, null);
- relptr_store(base, span->nextspan, null);
+ {
+ mspan *null = NULL;
+
+ relptr_store(base, span->prevspan, null);
+ relptr_store(base, span->nextspan, null);
+ }
#endif
}
-
-static mspan *mspan_allocate_span_descriptor();
-static mspan *mspan_allocate_span();
-static void mspan_free_span(mspan *);
-
-static void mspan_init_superblock(mspan *);
-static void *mspan_allocate_from_superblock(mspan_context *);
-static void mspan_free_to_superblock();
typedef void *(*aspace_map_allocator)(void *private, Size);
-/*
- * An aspace_map might be located within a dynamic shared memory segment,
- * so we need to separate the shared state from the backend-private state.
- * There's no special API for initializing an aspace_map_handle; callers
- * are expected to construct a suitable object by filling in the necessary
- * fields.
- */
-typedef struct
-{
- aspace_map *as_map;
- char *as_base;
- aspace_map_allocator as_allocator;
- void *as_allocator_private;
-} aspace_map_handle;
-
/* API functions. */
extern void aspace_map_initialize(aspace_map *, uint64 key_limit, int flags);
-extern void aspace_map_set(aspace_map_handle *map, uint64 key, uint64 value);
-extern void aspace_map_set_range(aspace_map_handle *map,
- uint64 first_key, uint64 nkeys, uint64 value);
-extern uint64 aspace_map_get(aspace_map_handle *map, uint64 key);
+extern void aspace_map_set_range(aspace_map *map,
+ uint64 first_key, uint64 nkeys, uint64 value,
+ char *base, aspace_map_allocator allocator,
+ void *allocator_private);
+extern uint64 aspace_map_get(aspace_map *, uint64 key, char *base);
#endif