From f04f98540d6921a5a1a3a6e78ca5dd57fc33df11 Mon Sep 17 00:00:00 2001 From: Robert Haas Date: Tue, 25 Mar 2014 16:23:55 -0700 Subject: [PATCH] Fix bugs. --- src/backend/utils/mmgr/sb_alloc.c | 48 +++++++++++++++++------------- src/backend/utils/mmgr/sb_map.c | 40 ++++++++++++++++--------- src/backend/utils/mmgr/sb_region.c | 4 +-- src/include/utils/sb_map.h | 8 ++--- 4 files changed, 59 insertions(+), 41 deletions(-) diff --git a/src/backend/utils/mmgr/sb_alloc.c b/src/backend/utils/mmgr/sb_alloc.c index 4c707f73e2..954992fb63 100644 --- a/src/backend/utils/mmgr/sb_alloc.c +++ b/src/backend/utils/mmgr/sb_alloc.c @@ -115,7 +115,7 @@ static char *sb_alloc_from_heap(char *base, sb_heap *heap, Size obsize); static char *sb_alloc_guts(char *base, sb_region *region, sb_allocator *a, int size_class); static void sb_init_span(char *base, sb_span *span, sb_heap *heap, - Size first_page, Size npages, uint16 size_class); + char *ptr, Size npages, uint16 size_class); static void sb_out_of_memory_error(sb_allocator *a); static bool sb_transfer_first_span(char *base, sb_heap *heap, int fromclass, int toclass); @@ -169,6 +169,8 @@ sb_alloc(sb_allocator *a, Size size, int flags) uint16 size_class; char *result; + Assert(size > 0); + /* * For shared memory allocation, pointers are relative to the start of the * region, so finding out that information is essential. For @@ -194,6 +196,7 @@ sb_alloc(sb_allocator *a, Size size, int flags) int heapno = heapproc * SB_NUM_SIZE_CLASSES + size_class; sb_heap *heap = &a->heaps[heapno]; LWLock *lock = relptr_access(base, heap->lock); + void *ptr; /* Obtain a span object. */ span = (sb_span *) sb_alloc_guts(base, region, a, @@ -218,26 +221,26 @@ sb_alloc(sb_allocator *a, Size size, int flags) sb_out_of_memory_error(a); return NULL; } + ptr = fpm_page_to_pointer(fpm_segment_base(region->fpm), first_page); /* Initialize span and pagemap. */ if (lock != NULL) LWLockAcquire(lock, LW_EXCLUSIVE); - sb_init_span(base, span, heap, first_page, npages, size_class); + sb_init_span(base, span, heap, ptr, npages, size_class); if (lock != NULL) LWLockRelease(lock); - sb_map_set(region->pagemap, first_page, ((char *) span) - base); + sb_map_set(region->pagemap, first_page, span); - return fpm_page_to_pointer(fpm_segment_base(region->fpm), - first_page); + return ptr; } /* Map allocation to a size class. */ - if (size < lengthof(sb_size_class_map) * SB_SIZE_CLASS_MAP_QUANTUM) + if (size <= lengthof(sb_size_class_map) * SB_SIZE_CLASS_MAP_QUANTUM) { int mapidx; - mapidx = (size + SB_SIZE_CLASS_MAP_QUANTUM - 1) / - SB_SIZE_CLASS_MAP_QUANTUM; + mapidx = ((size + SB_SIZE_CLASS_MAP_QUANTUM - 1) / + SB_SIZE_CLASS_MAP_QUANTUM) - 1; size_class = sb_size_class_map[mapidx]; } else @@ -259,6 +262,7 @@ sb_alloc(sb_allocator *a, Size size, int flags) size_class = min; } Assert(size <= sb_size_classes[size_class]); + Assert(size_class == 0 || size > sb_size_classes[size_class - 1]); size_class += SB_SCLASS_FIRST_REGULAR; /* Attempt the actual allocation. */ @@ -374,7 +378,7 @@ sb_alloc_from_heap(char *base, sb_heap *heap, Size obsize) superblock = base + active_sb->first_page * FPM_PAGE_SIZE; Assert(active_sb != NULL); Assert(active_sb->nused < nmax); - Assert(active_sb->nused < active_sb->ninitialized); + Assert(active_sb->nused <= active_sb->ninitialized); if (active_sb->firstfree < nmax) { result = superblock + active_sb->firstfree * obsize; @@ -395,7 +399,7 @@ sb_alloc_from_heap(char *base, sb_heap *heap, Size obsize) } /* - * Allocate an object of the requeted size class from the given allocator. + * Allocate an object of the requested size class from the given allocator. * If necessary, steal or create another superblock. */ static char * @@ -445,6 +449,7 @@ sb_alloc_guts(char *base, sb_region *region, sb_allocator *a, int size_class) Size npages = 1; Size first_page; Size i; + void *ptr; /* * Get an sb_span object to describe the new superblock... unless @@ -454,7 +459,9 @@ sb_alloc_guts(char *base, sb_region *region, sb_allocator *a, int size_class) */ if (size_class != SB_SCLASS_SPAN_OF_SPANS) { - span = (sb_span *) sb_alloc_guts(base, region, a, + sb_region *span_region = a->private ? NULL : region; + + span = (sb_span *) sb_alloc_guts(base, span_region, a, SB_SCLASS_SPAN_OF_SPANS); if (span == NULL) return NULL; @@ -472,22 +479,20 @@ sb_alloc_guts(char *base, sb_region *region, sb_allocator *a, int size_class) /* XXX. Free the span, if any. */ return NULL; } + ptr = fpm_page_to_pointer(fpm_segment_base(region->fpm), + first_page); /* * If this is a span-of-spans, carve the descriptor right out of * the allocated space. */ if (size_class == SB_SCLASS_SPAN_OF_SPANS) - { - char *fpm_base = fpm_segment_base(region->fpm); - span = (sb_span *) fpm_page_to_pointer(fpm_base, first_page); - } + span = (sb_span *) ptr; /* Initialize span and pagemap. */ - sb_init_span(base, span, heap, first_page, npages, size_class); + sb_init_span(base, span, heap, ptr, npages, size_class); for (i = 0; i < npages; ++i) - sb_map_set(region->pagemap, first_page + i, - ((char *) span) - base); + sb_map_set(region->pagemap, first_page + i, span); /* For a span-of-spans, record that we allocated ourselves. */ if (size_class == SB_SCLASS_SPAN_OF_SPANS) @@ -503,14 +508,14 @@ sb_alloc_guts(char *base, sb_region *region, sb_allocator *a, int size_class) if (lock != NULL) LWLockRelease(lock); - return NULL; + return result; } /* * Add a new span to fullness class 1 of the indicated heap. */ static void -sb_init_span(char *base, sb_span *span, sb_heap *heap, Size first_page, +sb_init_span(char *base, sb_span *span, sb_heap *heap, char *ptr, Size npages, uint16 size_class) { sb_span *head = relptr_access(base, heap->spans[1]); @@ -520,7 +525,8 @@ sb_init_span(char *base, sb_span *span, sb_heap *heap, Size first_page, relptr_store(base, span->parent, heap); relptr_store(base, span->nextspan, head); relptr_store(base, span->prevspan, (sb_span *) NULL); - span->first_page = first_page; + relptr_store(base, heap->spans[1], span); + span->first_page = (ptr - base) / FPM_PAGE_SIZE; span->npages = npages; span->size_class = size_class; span->ninitialized = 0; diff --git a/src/backend/utils/mmgr/sb_map.c b/src/backend/utils/mmgr/sb_map.c index 84f45edc39..e66c2c076b 100644 --- a/src/backend/utils/mmgr/sb_map.c +++ b/src/backend/utils/mmgr/sb_map.c @@ -40,7 +40,9 @@ const uint64 maxpages_4b = UINT64CONST(0x100000000) / FPM_PAGE_SIZE; struct sb_map { + relptr(sb_map) self; Size npages; + bool use64; }; /* Map layout for segments less than 4GB. */ @@ -57,17 +59,21 @@ typedef struct sb_map64 uint64 map[FLEXIBLE_ARRAY_MEMBER]; } sb_map64; +#define sb_map_base(m) \ + (((char *) m) - m->self.relptr_off) + /* * Compute the amount of space required for an sb_map covering a given - * number of pages. Note we assume that the maximum offset we'll be asked - * to store is governed by that number of pages also. + * number of pages. Note that for shared memory (i.e. when base != NULL), + * we assume that the pointers will always point to addresses within that + * same segment, but for backend-private memory that might not be the case. */ Size -sb_map_size(Size npages) +sb_map_size(char *base, Size npages) { Size map_bytes; - if (npages < maxpages_4b) + if (sizeof(Size) <= 4 || (base != NULL && npages < maxpages_4b)) map_bytes = add_size(offsetof(sb_map32, map), mul_size(npages, sizeof(uint32))); else @@ -83,37 +89,43 @@ sb_map_size(Size npages) * been set. */ void -sb_map_initialize(sb_map *m, Size npages) +sb_map_initialize(sb_map *m, char *base, Size npages) { + relptr_store(base, m->self, m); m->npages = npages; + if (sizeof(Size) <= 4 || (base != NULL && npages < maxpages_4b)) + m->use64 = false; + else + m->use64 = true; } /* * Store a value into an sb_map. */ void -sb_map_set(sb_map *m, Size pageno, Size offset) +sb_map_set(sb_map *m, Size pageno, void *ptr) { + char *base = sb_map_base(m); Assert(pageno < m->npages); - Assert(offset / FPM_PAGE_SIZE < m->npages); - if (m->npages < maxpages_4b) - ((sb_map32 *) m)->map[pageno] = (uint32) offset; + if (m->use64) + ((sb_map64 *) m)->map[pageno] = (uint64) (((char *) ptr) - base); else - ((sb_map64 *) m)->map[pageno] = (uint32) offset; + ((sb_map32 *) m)->map[pageno] = (uint32) (((char *) ptr) - base); } /* * Get a value from an sb_map. Getting a value not previously stored will * produce an undefined result, so don't do that. */ -Size +void * sb_map_get(sb_map *m, Size pageno) { + char *base = sb_map_base(m); Assert(pageno < m->npages); - if (m->npages < maxpages_4b) - return ((sb_map32 *) m)->map[pageno]; + if (m->use64) + return base + ((sb_map64 *) m)->map[pageno]; else - return ((sb_map64 *) m)->map[pageno]; + return base + ((sb_map32 *) m)->map[pageno]; } diff --git a/src/backend/utils/mmgr/sb_region.c b/src/backend/utils/mmgr/sb_region.c index a0d2f8c8e5..b695bcf8c0 100644 --- a/src/backend/utils/mmgr/sb_region.c +++ b/src/backend/utils/mmgr/sb_region.c @@ -258,7 +258,7 @@ sb_private_region_for_allocator(Size npages) */ metadata_bytes = MAXALIGN(sizeof(sb_region)); metadata_bytes += MAXALIGN(sizeof(FreePageManager)); - metadata_bytes += MAXALIGN(sb_map_size(new_region_net_pages)); + metadata_bytes += MAXALIGN(sb_map_size(NULL, new_region_net_pages)); if (metadata_bytes % FPM_PAGE_SIZE != 0) metadata_bytes += FPM_PAGE_SIZE - (metadata_bytes % FPM_PAGE_SIZE); region_size = new_region_net_pages * FPM_PAGE_SIZE + metadata_bytes; @@ -299,7 +299,7 @@ sb_private_region_for_allocator(Size npages) FreePageManagerInitialize(region->fpm, region->region_start, NULL, false); FreePageManagerPut(region->fpm, metadata_bytes / FPM_PAGE_SIZE, new_region_net_pages); - sb_map_initialize(region->pagemap, new_region_net_pages); + sb_map_initialize(region->pagemap, NULL, new_region_net_pages); region->contiguous_pages = new_region_net_pages; /* Now fix the value. */ freelist = Min(fls(new_region_net_pages), NUM_PRIVATE_FREELISTS); dlist_push_head(&private_freelist[freelist], ®ion->fl_node); diff --git a/src/include/utils/sb_map.h b/src/include/utils/sb_map.h index 402a452794..a0683537c4 100644 --- a/src/include/utils/sb_map.h +++ b/src/include/utils/sb_map.h @@ -16,9 +16,9 @@ typedef struct sb_map sb_map; -extern Size sb_map_size(Size npages); -extern void sb_map_initialize(sb_map *, Size npages); -extern void sb_map_set(sb_map *, Size pageno, Size offset); -extern Size sb_map_get(sb_map *, Size pageno); +extern Size sb_map_size(char *base, Size npages); +extern void sb_map_initialize(sb_map *, char *base, Size npages); +extern void sb_map_set(sb_map *, Size pageno, void *ptr); +extern void *sb_map_get(sb_map *, Size pageno); #endif /* SB_MAP_H */ -- 2.39.5