From dd74602c053d14a53ec79a120b998a3d628c6a8c Mon Sep 17 00:00:00 2001 From: Robert Haas Date: Mon, 12 May 2014 15:22:15 -0400 Subject: [PATCH] More renaming, somewhat incomplete. sb_alloc.c/h -> balloc.c/h. --- contrib/test_sballoc/test_sballoc.c | 26 ++--- src/backend/access/nbtree/nbtsort.c | 8 +- src/backend/utils/mmgr/Makefile | 2 +- src/backend/utils/mmgr/aregion.c | 18 +-- .../utils/mmgr/{sb_alloc.c => balloc.c} | 103 +++++++++--------- src/backend/utils/sort/tuplesort.c | 18 +-- src/include/utils/aregion.h | 10 +- src/include/utils/{sb_alloc.h => balloc.h} | 28 ++--- 8 files changed, 108 insertions(+), 105 deletions(-) rename src/backend/utils/mmgr/{sb_alloc.c => balloc.c} (92%) rename src/include/utils/{sb_alloc.h => balloc.h} (55%) diff --git a/contrib/test_sballoc/test_sballoc.c b/contrib/test_sballoc/test_sballoc.c index 38c03da78b..9a9545f6f9 100644 --- a/contrib/test_sballoc/test_sballoc.c +++ b/contrib/test_sballoc/test_sballoc.c @@ -15,8 +15,8 @@ #include "fmgr.h" #include "utils/memutils.h" -#include "utils/sb_alloc.h" -#include "utils/sb_region.h" +#include "utils/aregion.h" +#include "utils/balloc.h" typedef struct llnode { @@ -36,16 +36,16 @@ alloc(PG_FUNCTION_ARGS) int64 count = PG_GETARG_INT64(1); int64 i; int64 *p; - sb_allocator *a; + BlockAllocatorContext *context; - a = sb_create_private_allocator(); + context = BlockAllocatorContextCreate(); for (i = 0; i < count; ++i) { - p = sb_alloc(a, size, 0); + p = BlockAllocatorAlloc(context, size, 0); *p = i; } - sb_reset_allocator(a); - sb_dump_regions(); + BlockAllocatorReset(context); + DumpAllocatorRegions(); PG_RETURN_VOID(); } @@ -83,26 +83,26 @@ alloc_list(PG_FUNCTION_ARGS) int64 i; llnode *h = NULL; llnode *p; - sb_allocator *a; + BlockAllocatorContext *context; if (size < sizeof(llnode)) elog(ERROR, "size too small"); - a = sb_create_private_allocator(); + context = BlockAllocatorContextCreate(); for (i = 0; i < count; ++i) { - p = sb_alloc(a, size, 0); + p = BlockAllocatorAlloc(context, size, 0); p->next = h; h = p; } while (h != NULL) { p = h->next; - sb_free(h); + BlockAllocatorFree(h); h = p; } - sb_dump_regions(); - sb_reset_allocator(a); + DumpAllocatorRegions(); + BlockAllocatorReset(context); PG_RETURN_VOID(); } diff --git a/src/backend/access/nbtree/nbtsort.c b/src/backend/access/nbtree/nbtsort.c index 1fe1a9bed5..be5e46d29c 100644 --- a/src/backend/access/nbtree/nbtsort.c +++ b/src/backend/access/nbtree/nbtsort.c @@ -72,7 +72,7 @@ #include "storage/smgr.h" #include "tcop/tcopprot.h" #include "utils/rel.h" -#include "utils/sb_alloc.h" +#include "utils/balloc.h" #include "utils/tuplesort.h" @@ -770,7 +770,7 @@ _bt_load(BTWriteState *wstate, BTSpool *btspool, BTSpool *btspool2) { _bt_buildadd(wstate, state, itup); if (should_free) - sb_free(itup); + BlockAllocatorFree(itup); itup = tuplesort_getindextuple(btspool->sortstate, true, &should_free); } @@ -778,7 +778,7 @@ _bt_load(BTWriteState *wstate, BTSpool *btspool, BTSpool *btspool2) { _bt_buildadd(wstate, state, itup2); if (should_free2) - sb_free(itup2); + BlockAllocatorFree(itup2); itup2 = tuplesort_getindextuple(btspool2->sortstate, true, &should_free2); } @@ -797,7 +797,7 @@ _bt_load(BTWriteState *wstate, BTSpool *btspool, BTSpool *btspool2) _bt_buildadd(wstate, state, itup); if (should_free) - sb_free(itup); + BlockAllocatorFree(itup); } } diff --git a/src/backend/utils/mmgr/Makefile b/src/backend/utils/mmgr/Makefile index e686a90ad7..4c63c0efcd 100644 --- a/src/backend/utils/mmgr/Makefile +++ b/src/backend/utils/mmgr/Makefile @@ -12,6 +12,6 @@ subdir = src/backend/utils/mmgr top_builddir = ../../../.. include $(top_builddir)/src/Makefile.global -OBJS = aregion.o aset.o freepage.o mcxt.o portalmem.o sb_alloc.o balloc_map.o +OBJS = aregion.o aset.o balloc.o freepage.o mcxt.o portalmem.o balloc_map.o include $(top_srcdir)/src/backend/common.mk diff --git a/src/backend/utils/mmgr/aregion.c b/src/backend/utils/mmgr/aregion.c index 2ef538e71b..e5054c0495 100644 --- a/src/backend/utils/mmgr/aregion.c +++ b/src/backend/utils/mmgr/aregion.c @@ -1,16 +1,16 @@ /*------------------------------------------------------------------------- * * aregion.c - * Superblock allocator memory region manager. + * Management of allocator memory regions. * - * The superblock allocator operates on ranges of pages managed by a + * The block allocator operates on ranges of pages managed by a * FreePageManager and reverse-mapped by a BlockAllocatorMap. When it's * asked to free an object, it just gets a pointer address; our job is to * figure out which page range contains that object and locate the * FreePageManager, BlockAllocatorMap, and other metadata that the superblock * allocator will need to do its thing. Moreover, when allocating an - * object, the caller is only required to provide the superblock allocator - * with a pointer to the sb_allocator object, which could be in either + * object, the caller is only required to provide the block allocator + * with a pointer to the BlockAllocatorContext object, which could be in either * shared or backend-private memory; our job again is to know which it * is and provide pointers to the appropriate supporting data structures. * To do all this, we have to keep track of where all dynamic shared memory @@ -267,9 +267,9 @@ LookupAllocatorRegion(void *ptr) } /* - * When a backend-private sb_allocator needs more memory, it calls this - * function. We search the existing backend-private regions for one capable - * of satisfying the request; if none found, we must create a new region. + * Provide a backend-private AllocatorRegion capable of satisfying a request + * for a given number of pages. If no existing region has enough contiguous + * freespace, we'll create a new one. */ AllocatorRegion * GetRegionForPrivateAllocation(Size npages) @@ -399,7 +399,7 @@ GetRegionForPrivateAllocation(Size npages) region->usable_pages = new_region_net_pages; aregion_private_pages_allocated += region->usable_pages; region->seg = NULL; - region->allocator = NULL; + region->context = NULL; region->fpm = (FreePageManager *) (region_start + MAXALIGN(sizeof(AllocatorRegion))); region->pagemap = (BlockAllocatorMap *) @@ -436,7 +436,7 @@ ReportRegionContiguousFreespace(AllocatorRegion *region, Size npages) /* This should only be called for private regions. */ Assert(region->seg == NULL); - Assert(region->allocator == NULL); + Assert(region->context == NULL); /* * If there have been allocations from the region since the last report, diff --git a/src/backend/utils/mmgr/sb_alloc.c b/src/backend/utils/mmgr/balloc.c similarity index 92% rename from src/backend/utils/mmgr/sb_alloc.c rename to src/backend/utils/mmgr/balloc.c index 83dcc15915..46aed1cfbe 100644 --- a/src/backend/utils/mmgr/sb_alloc.c +++ b/src/backend/utils/mmgr/balloc.c @@ -1,12 +1,12 @@ /*------------------------------------------------------------------------- * - * sb_alloc.c + * balloc.c * Superblock-based memory allocator. * * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * - * src/backend/utils/mmgr/sb_alloc.c + * src/backend/utils/mmgr/balloc.c * *------------------------------------------------------------------------- */ @@ -126,8 +126,8 @@ static char sb_size_class_map[] = { * For large objects, we just stick all of the allocations in fullness class * 0. Since we can just return the space directly to the free page manager, * we don't really need them on a list at all, except that if someone wants - * to bulk release everything allocated using this sb_allocator, we have no - * other way of finding them. + * to bulk release everything allocated using this BlockAllocatorContext, we + * have no other way of finding them. */ #define SB_FULLNESS_CLASSES 4 @@ -143,11 +143,11 @@ struct sb_heap }; /* - * An sb_allocator is basically just a group of heaps, one per size class. - * If locking is required, then we've also got an array of LWLocks, one per - * heap. + * A BlockAllocatorContext is basically just a group of heaps, one per size + * class. If locking is required, then we've also got an array of LWLocks, + * one per heap. */ -struct sb_allocator +struct BlockAllocatorContext { bool private; relptr(LWLock) locks; @@ -155,14 +155,14 @@ struct sb_allocator }; /* Helper functions. */ -static char *sb_alloc_guts(char *base, AllocatorRegion *region, - sb_allocator *a, int size_class); +static char *BlockAllocatorAllocGuts(char *base, AllocatorRegion *region, + BlockAllocatorContext *context, int size_class); static bool sb_ensure_active_superblock(char *base, AllocatorRegion *region, - sb_allocator *a, sb_heap *heap, + BlockAllocatorContext *context, sb_heap *heap, int size_class); static void sb_init_span(char *base, sb_span *span, sb_heap *heap, char *ptr, Size npages, uint16 size_class); -static void sb_out_of_memory_error(sb_allocator *a); +static void sb_out_of_memory_error(BlockAllocatorContext *context); static bool sb_transfer_first_span(char *base, sb_heap *heap, int fromclass, int toclass); static void sb_unlink_span(char *base, sb_heap *heap, sb_span *span); @@ -170,41 +170,41 @@ static void sb_unlink_span(char *base, sb_heap *heap, sb_span *span); /* * Create a backend-private allocator. */ -sb_allocator * -sb_create_private_allocator(void) +BlockAllocatorContext * +BlockAllocatorContextCreate(void) { Size allocator_size; int heapno; int fclass; - sb_allocator *a; + BlockAllocatorContext *context; char *base = NULL; - allocator_size = offsetof(sb_allocator, heaps); + allocator_size = offsetof(BlockAllocatorContext, heaps); allocator_size += sizeof(sb_heap) * SB_NUM_SIZE_CLASSES; - a = malloc(allocator_size); - if (a == NULL) + context = malloc(allocator_size); + if (context == NULL) ereport(ERROR, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("out of memory"))); - a->private = true; + context->private = true; for (heapno = 0; heapno < SB_NUM_SIZE_CLASSES; ++heapno) { - sb_heap *heap = &a->heaps[heapno]; + sb_heap *heap = &context->heaps[heapno]; relptr_store(base, heap->lock, (LWLock *) NULL); for (fclass = 0; fclass < SB_FULLNESS_CLASSES; ++fclass) relptr_store(base, heap->spans[fclass], (sb_span *) NULL); } - return a; + return context; } /* * Allocate memory. */ void * -sb_alloc(sb_allocator *a, Size size, int flags) +BlockAllocatorAlloc(BlockAllocatorContext *context, Size size, int flags) { AllocatorRegion *region = NULL; char *base = NULL; @@ -220,9 +220,9 @@ sb_alloc(sb_allocator *a, Size size, int flags) * a region; we'll only need to grab a region if we can't allocate out of * an existing superblock. */ - if (!a->private) + if (!context->private) { - region = LookupAllocatorRegion(a); + region = LookupAllocatorRegion(context); if (region == NULL) elog(ERROR, "AllocatorRegion not found"); base = region->region_start; @@ -234,17 +234,17 @@ sb_alloc(sb_allocator *a, Size size, int flags) Size npages = fpm_size_to_pages(size); Size first_page; sb_span *span; - sb_heap *heap = &a->heaps[SB_SCLASS_SPAN_LARGE]; + sb_heap *heap = &context->heaps[SB_SCLASS_SPAN_LARGE]; LWLock *lock = relptr_access(base, heap->lock); void *ptr; /* Obtain a span object. */ - span = (sb_span *) sb_alloc_guts(base, region, a, - SB_SCLASS_SPAN_OF_SPANS); + span = (sb_span *) BlockAllocatorAllocGuts(base, region, context, + SB_SCLASS_SPAN_OF_SPANS); if (span == NULL) { if ((flags & SB_ALLOC_SOFT_FAIL) == 0) - sb_out_of_memory_error(a); + sb_out_of_memory_error(context); return NULL; } @@ -258,7 +258,7 @@ sb_alloc(sb_allocator *a, Size size, int flags) { /* XXX. Free the span. */ if ((flags & SB_ALLOC_SOFT_FAIL) == 0) - sb_out_of_memory_error(a); + sb_out_of_memory_error(context); return NULL; } ptr = fpm_page_to_pointer(fpm_segment_base(region->fpm), first_page); @@ -305,9 +305,9 @@ sb_alloc(sb_allocator *a, Size size, int flags) Assert(size_class == 0 || size > sb_size_classes[size_class - 1]); /* Attempt the actual allocation. */ - result = sb_alloc_guts(base, region, a, size_class); + result = BlockAllocatorAllocGuts(base, region, context, size_class); if (result == NULL && (flags & SB_ALLOC_SOFT_FAIL) == 0) - sb_out_of_memory_error(a); + sb_out_of_memory_error(context); return result; } @@ -315,7 +315,7 @@ sb_alloc(sb_allocator *a, Size size, int flags) * Free memory allocated via sb_alloc. */ void -sb_free(void *ptr) +BlockAllocatorFree(void *ptr) { AllocatorRegion *region; char *fpm_base; @@ -360,7 +360,7 @@ sb_free(void *ptr) first_page = fpm_pointer_to_page(fpm_base, relptr_access(base, span->start)); FreePageManagerPut(region->fpm, first_page, span->npages); - sb_free(span); + BlockAllocatorFree(span); /* We're done, but must release any lock first. */ if (lock != NULL) @@ -423,7 +423,7 @@ sb_free(void *ptr) * type, we need to separately free the span object also. */ if (size_class != SB_SCLASS_SPAN_OF_SPANS) - sb_free(span); + BlockAllocatorFree(span); } /* If we locked the heap, release the lock. */ @@ -436,7 +436,7 @@ sb_free(void *ptr) * allocation. */ Size -sb_alloc_space(Size size) +BlockAllocatorGetAllocSpace(Size size) { uint16 size_class; @@ -490,7 +490,7 @@ sb_alloc_space(Size size) * overhead of its own. */ Size -sb_chunk_space(void *ptr) +BlockAllocatorGetChunkSpace(void *ptr) { AllocatorRegion *region; char *fpm_base; @@ -518,7 +518,7 @@ sb_chunk_space(void *ptr) * NB: It's not safe to do this while the allocator is in use! */ void -sb_reset_allocator(sb_allocator *a) +BlockAllocatorReset(BlockAllocatorContext *context) { char *base = NULL; int heapno; @@ -527,9 +527,9 @@ sb_reset_allocator(sb_allocator *a) * For shared memory allocation, pointers are relative to the start of the * region. */ - if (!a->private) + if (!context->private) { - AllocatorRegion *region = LookupAllocatorRegion(a); + AllocatorRegion *region = LookupAllocatorRegion(context); if (region == NULL) elog(ERROR, "AllocatorRegion not found"); base = region->region_start; @@ -541,7 +541,7 @@ sb_reset_allocator(sb_allocator *a) */ for (heapno = SB_NUM_SIZE_CLASSES - 1; heapno >= 0; --heapno) { - sb_heap *heap = &a->heaps[heapno]; + sb_heap *heap = &context->heaps[heapno]; int fclass; for (fclass = 0; fclass < SB_FULLNESS_CLASSES; ++fclass) @@ -575,9 +575,10 @@ sb_reset_allocator(sb_allocator *a) * If necessary, steal or create another superblock. */ static char * -sb_alloc_guts(char *base, AllocatorRegion *region, sb_allocator *a, int size_class) +BlockAllocatorAllocGuts(char *base, AllocatorRegion *region, + BlockAllocatorContext *context, int size_class) { - sb_heap *heap = &a->heaps[size_class]; + sb_heap *heap = &context->heaps[size_class]; LWLock *lock = relptr_access(base, heap->lock); sb_span *active_sb; char *superblock; @@ -593,7 +594,8 @@ sb_alloc_guts(char *base, AllocatorRegion *region, sb_allocator *a, int size_cla * fail the request. */ if (relptr_is_null(heap->spans[1]) - && !sb_ensure_active_superblock(base, region, a, heap, size_class)) + && !sb_ensure_active_superblock(base, region, context, + heap, size_class)) { if (lock != NULL) LWLockRelease(lock); @@ -657,7 +659,8 @@ sb_alloc_guts(char *base, AllocatorRegion *region, sb_allocator *a, int size_cla * superblock that would otherwise become empty soon. */ static bool -sb_ensure_active_superblock(char *base, AllocatorRegion *region, sb_allocator *a, +sb_ensure_active_superblock(char *base, AllocatorRegion *region, + BlockAllocatorContext *context, sb_heap *heap, int size_class) { Size obsize = sb_size_classes[size_class]; @@ -755,10 +758,10 @@ sb_ensure_active_superblock(char *base, AllocatorRegion *region, sb_allocator *a */ if (size_class != SB_SCLASS_SPAN_OF_SPANS) { - AllocatorRegion *span_region = a->private ? NULL : region; + AllocatorRegion *span_region = context->private ? NULL : region; - span = (sb_span *) sb_alloc_guts(base, span_region, a, - SB_SCLASS_SPAN_OF_SPANS); + span = (sb_span *) BlockAllocatorAllocGuts(base, span_region, context, + SB_SCLASS_SPAN_OF_SPANS); if (span == NULL) return false; npages = BLOCK_ALLOCATOR_PAGES_PER_CHUNK; @@ -767,7 +770,7 @@ sb_ensure_active_superblock(char *base, AllocatorRegion *region, sb_allocator *a /* Find a region from which to allocate the superblock. */ if (region == NULL) { - Assert(a->private); + Assert(context->private); region = GetRegionForPrivateAllocation(npages); } @@ -838,9 +841,9 @@ sb_init_span(char *base, sb_span *span, sb_heap *heap, char *ptr, * Report an out-of-memory condition. */ static void -sb_out_of_memory_error(sb_allocator *a) +sb_out_of_memory_error(BlockAllocatorContext *context) { - if (a->private) + if (context->private) ereport(ERROR, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("out of memory"))); diff --git a/src/backend/utils/sort/tuplesort.c b/src/backend/utils/sort/tuplesort.c index 993eef65cd..aa5f375f56 100644 --- a/src/backend/utils/sort/tuplesort.c +++ b/src/backend/utils/sort/tuplesort.c @@ -113,7 +113,7 @@ #include "utils/memutils.h" #include "utils/pg_rusage.h" #include "utils/rel.h" -#include "utils/sb_alloc.h" +#include "utils/balloc.h" #include "utils/sortsupport.h" #include "utils/tuplesort.h" @@ -217,7 +217,7 @@ struct Tuplesortstate int maxTapes; /* number of tapes (Knuth's T) */ int tapeRange; /* maxTapes-1 (Knuth's P) */ MemoryContext sortcontext; /* memory context holding all sort data */ - sb_allocator *sortallocator; /* superblock allocator holding sort data */ + BlockAllocatorContext *sortallocator; /* block allocator for sort data */ LogicalTapeSet *tapeset; /* logtape.c object for tapes in a temp file */ /* @@ -730,7 +730,7 @@ tuplesort_begin_index_btree(Relation heapRel, { Tuplesortstate *state = tuplesort_begin_common(workMem, randomAccess); MemoryContext oldcontext; - state->sortallocator = sb_create_private_allocator(); + state->sortallocator = BlockAllocatorContextCreate(); oldcontext = MemoryContextSwitchTo(state->sortcontext); @@ -3350,9 +3350,9 @@ copytup_index(Tuplesortstate *state, SortTuple *stup, void *tup) IndexTuple newtuple; /* copy the tuple into sort storage */ - newtuple = (IndexTuple) sb_alloc(state->sortallocator, tuplen, 0); + newtuple = (IndexTuple) BlockAllocatorAlloc(state->sortallocator, tuplen, 0); memcpy(newtuple, tuple, tuplen); - USEMEM(state, sb_alloc_space(tuplen)); + USEMEM(state, BlockAllocatorGetAllocSpace(tuplen)); stup->tuple = (void *) newtuple; /* set up first-column key value */ stup->datum1 = index_getattr(newtuple, @@ -3376,8 +3376,8 @@ writetup_index(Tuplesortstate *state, int tapenum, SortTuple *stup) LogicalTapeWrite(state->tapeset, tapenum, (void *) &tuplen, sizeof(tuplen)); - FREEMEM(state, sb_chunk_space(tuple)); - sb_free(tuple); + FREEMEM(state, BlockAllocatorGetChunkSpace(tuple)); + BlockAllocatorFree(tuple); } static void @@ -3385,9 +3385,9 @@ readtup_index(Tuplesortstate *state, SortTuple *stup, int tapenum, unsigned int len) { unsigned int tuplen = len - sizeof(unsigned int); - IndexTuple tuple = (IndexTuple) sb_alloc(state->sortallocator, tuplen, 0); + IndexTuple tuple = (IndexTuple) BlockAllocatorAlloc(state->sortallocator, tuplen, 0); - USEMEM(state, sb_chunk_space(tuple)); + USEMEM(state, BlockAllocatorGetChunkSpace(tuple)); LogicalTapeReadExact(state->tapeset, tapenum, tuple, tuplen); if (state->randomAccess) /* need trailing length word? */ diff --git a/src/include/utils/aregion.h b/src/include/utils/aregion.h index 7081e7ff24..2309a38a27 100644 --- a/src/include/utils/aregion.h +++ b/src/include/utils/aregion.h @@ -18,7 +18,7 @@ #include "storage/dsm.h" #include "storage/shm_toc.h" #include "utils/freepage.h" -#include "utils/sb_alloc.h" +#include "utils/balloc.h" #include "utils/balloc_map.h" /* @@ -30,8 +30,8 @@ typedef struct AllocatorRegion char *region_start; /* Address of region. */ Size region_size; /* Number of bytes in region. */ Size usable_pages; /* Number of usable pages in region. */ - dsm_segment *seg; /* If not backend-private, DSM handle. */ - sb_allocator *allocator; /* If not backend-private, shared allocator. */ + dsm_segment *seg; /* DSM handle (if not private). */ + BlockAllocatorContext *context; /* Shared allocator (if not private). */ FreePageManager *fpm; /* Free page manager for region (if any). */ BlockAllocatorMap *pagemap; /* Page map for region (if any). */ Size contiguous_pages; /* Last reported contiguous free pages. */ @@ -48,7 +48,7 @@ typedef struct AllocatorSharedRegion { relptr(FreePageManager) fpm; relptr(BlockAllocatorMap) pagemap; - relptr(sb_allocator) allocator; + relptr(BlockAllocatorContext) allocator; int lwlock_tranche_id; char lwlock_tranche_name[FLEXIBLE_ARRAY_MEMBER]; } AllocatorSharedRegion; @@ -58,7 +58,7 @@ extern AllocatorSharedRegion *CreateAllocatorSharedRegion(dsm_segment *seg, shm_toc *toc, Size size, int lwlock_tranche_id, char *lwlock_tranche_name); -extern sb_allocator *AttachAllocatorSharedRegion(dsm_segment *, +extern BlockAllocatorContext *AttachAllocatorSharedRegion(dsm_segment *, AllocatorSharedRegion *); extern void DumpAllocatorRegions(void); diff --git a/src/include/utils/sb_alloc.h b/src/include/utils/balloc.h similarity index 55% rename from src/include/utils/sb_alloc.h rename to src/include/utils/balloc.h index 1ce53340e5..2048847a83 100644 --- a/src/include/utils/sb_alloc.h +++ b/src/include/utils/balloc.h @@ -1,23 +1,23 @@ /*------------------------------------------------------------------------- * - * sb_alloc.h - * Superblock-based memory allocator. + * balloc.h + * Block-based memory allocator. * * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * - * src/include/utils/sb_alloc.h + * src/include/utils/balloc.h * *------------------------------------------------------------------------- */ -#ifndef SB_ALLOC_H -#define SB_ALLOC_H +#ifndef BALLOC_H +#define BALLOC_H #include "storage/lwlock.h" #include "utils/relptr.h" -typedef struct sb_allocator sb_allocator; +typedef struct BlockAllocatorContext BlockAllocatorContext; /* Number of pages (see FPM_PAGE_SIZE) per block-allocator chunk. */ #define BLOCK_ALLOCATOR_PAGES_PER_CHUNK 16 @@ -27,16 +27,16 @@ typedef struct sb_allocator sb_allocator; #define SB_ALLOC_SOFT_FAIL 0x0002 /* return NULL if no mem */ /* Functions to manipulate allocators. */ -extern sb_allocator *sb_create_private_allocator(void); -extern void sb_reset_allocator(sb_allocator *a); -extern void sb_destroy_private_allocator(sb_allocator *); +extern BlockAllocatorContext *BlockAllocatorContextCreate(void); +extern void BlockAllocatorReset(BlockAllocatorContext *); +extern void BlockAllocatorDelete(BlockAllocatorContext *); /* Functions to allocate and free memory. */ -extern void *sb_alloc(sb_allocator *, Size, int flags); -extern void sb_free(void *ptr); +extern void *BlockAllocatorAlloc(BlockAllocatorContext *, Size, int flags); +extern void BlockAllocatorFree(void *ptr); /* Reporting functions. */ -extern Size sb_alloc_space(Size size); -extern Size sb_chunk_space(void *ptr); +extern Size BlockAllocatorGetAllocSpace(Size size); +extern Size BlockAllocatorGetChunkSpace(void *ptr); -#endif /* SB_ALLOC_H */ +#endif /* BALLOC_H */ -- 2.39.5