From: Robert Haas Date: Thu, 10 Apr 2014 22:57:20 +0000 (+0000) Subject: sb_region: Improve private freelist management. X-Git-Url: http://git.postgresql.org/gitweb/static/developers.postgresql.org?a=commitdiff_plain;h=f83f74240a07fa0e65b5fbd168dc5a83ebf21f9a;p=users%2Frhaas%2Fpostgres.git sb_region: Improve private freelist management. --- diff --git a/src/backend/utils/mmgr/sb_region.c b/src/backend/utils/mmgr/sb_region.c index 4de545a687..5d686c8d80 100644 --- a/src/backend/utils/mmgr/sb_region.c +++ b/src/backend/utils/mmgr/sb_region.c @@ -270,28 +270,23 @@ sb_private_region_for_allocator(Size npages) region = dlist_container(sb_region, fl_node, iter.cur); - /* Skip regions that are certain not to have space. */ - if (region->contiguous_pages < npages) + /* + * Quickly skip regions which appear to have enough space to + * belong on this freelist but which don't have enough space to + * satisfy the request, to avoid probing every region on the list + * for its exact free space on every trip through. + */ + if (region->contiguous_pages >= threshold && + region->contiguous_pages < npages) continue; /* - * The region we're examining was at one point reported to - * have adequate space, but subsequent allocations might have - * eroded that, so recheck. If there's enough, we're done! - * - * NB: For larger allocations this might be suboptimal, because - * we might carve space out of a chunk that's bigger than we - * really need rather than locating the best fit across all - * chunks. It shouldn't be too far off, though, because - * chunks with way more contiguous space available will be on - * a higher-numbered freelist. For really large allocations, - * it's probably better to malloc() directly than go through - * this machinery. + * We're going to either use this region or move it to a + * lower-numbered freelist or both, so determine the precise size + * of the largest remaining run of pages. */ largest = FreePageManagerInquireLargest(region->fpm); region->contiguous_pages = largest; - if (largest >= npages) - return region; /* * The region we're examining not only doesn't have enough @@ -300,12 +295,26 @@ sb_private_region_for_allocator(Size npages) */ if (largest < threshold) { - int new_freelist = Min(fls(npages), NUM_PRIVATE_FREELISTS); + int new_freelist = Min(fls(largest), NUM_PRIVATE_FREELISTS); dlist_delete(iter.cur); dlist_push_head(&private_freelist[new_freelist], ®ion->fl_node); } + + /* + * If the region is big enough, use it. For larger allocations + * this might be suboptimal, because we might carve space out of a + * chunk that's bigger than we really need rather than locating + * the best fit across all chunks. It shouldn't be too far off, + * though, because chunks with way more contiguous space available + * will be on a higher-numbered freelist. + * + * NB: For really large backend-private allocations, it's probably + * better to malloc() directly than go through this machinery. + */ + if (largest >= npages) + return region; } /* Try next freelist. */