From: Robert Haas Date: Thu, 20 Feb 2014 01:47:13 +0000 (-0500) Subject: More hacking. X-Git-Url: http://git.postgresql.org/gitweb/static/developers.postgresql.org?a=commitdiff_plain;h=70ca936dbba87cd9365ae8e56e1ffe2d25008776;p=users%2Frhaas%2Fpostgres.git More hacking. --- diff --git a/src/backend/utils/mmgr/freepage.c b/src/backend/utils/mmgr/freepage.c index 2e4dda83fc..eeb2ab23a8 100644 --- a/src/backend/utils/mmgr/freepage.c +++ b/src/backend/utils/mmgr/freepage.c @@ -158,6 +158,7 @@ FreePageManagerGet(FreePageManager *fpm, Size npages, Size *first_page) if (lock != NULL) LWLockAcquire(lock, LW_EXCLUSIVE); result = FreePageManagerGetInternal(fpm, npages, first_page); + /* XXX. Try to softly PutInternal recycled pages? */ if (lock != NULL) LWLockRelease(lock); @@ -177,22 +178,9 @@ FreePageManagerPut(FreePageManager *fpm, Size first_page, Size npages) if (lock != NULL) LWLockAcquire(lock, LW_EXCLUSIVE); - /* - * As a special case, we store the very first range in the FreePageManager - * itself, so that a request for the entire number of pages will succeed. - * Otherwise, we must build or update a btree. - */ - if (fpm->btree_depth == 0 && fpm->singleton_npages == 0) - { - fpm->singleton_first_page = first_page; - fpm->singleton_npages = npages; - } - else if (fpm->btree_depth == 0) - { - /* XXX Create the btree. */ - } - else - FreePageManagerPutInternal(fpm, first_page, npages, false); + FreePageManagerPutInternal(fpm, first_page, npages, false); + + /* XXX. Try to softly PutInternal recycled pages? */ /* Release lock (if there is one). */ if (lock != NULL) @@ -368,31 +356,47 @@ FreePageManagerGetInternal(FreePageManager *fpm, Size npages, Size *first_page) relptr_copy(next->prev, victim->prev); /* - * If the span we found is exactly the right size, remove it from the - * btree completely. Otherwise, adjust the btree entry to reflect the - * still-unallocated portion of the span, and put that portion on the - * appropriate free list. + * If we haven't initialized the btree yet, the victim must be the single + * span stored within the FreePageManager itself. Otherwise, we need + * to update the btree. */ - FreePageBtreeSearch(fpm, victim_page, &result); - Assert(result.page_exact != NULL); - if (victim->npages == npages) - FreePageBtreeRemove(fpm, result.page_exact, result.index_exact); + if (relptr_is_null(fpm->btree_root)) + { + Assert(fpm_pointer_to_page(base, victim) == fpm->singleton_first_page); + Assert(victim->npages = fpm->singleton_npages); + Assert(victim->npages >= npages); + fpm->singleton_first_page += npages; + fpm->singleton_npages -= npages; + } else { - FreePageBtreeLeafKey *key; - - /* Adjust btree to reflect remaining pages. */ - Assert(victim->npages > npages); - key = &result.page_exact->u.leaf_key[result.index_exact]; - Assert(key->npages == victim->npages); - key->first_page += npages; - key->npages -= npages; - if (result.index_exact == 0) - FreePageBtreeAdjustAncestorKeys(fpm, result.page_exact); - - /* Put the unallocated pages back on the appropriate free list. */ - FreePagePushSpanLeader(fpm, victim_page + npages, - victim->npages - npages); + /* + * If the span we found is exactly the right size, remove it from the + * btree completely. Otherwise, adjust the btree entry to reflect the + * still-unallocated portion of the span, and put that portion on the + * appropriate free list. + */ + FreePageBtreeSearch(fpm, victim_page, &result); + Assert(result.page_exact != NULL); + if (victim->npages == npages) + FreePageBtreeRemove(fpm, result.page_exact, result.index_exact); + else + { + FreePageBtreeLeafKey *key; + + /* Adjust btree to reflect remaining pages. */ + Assert(victim->npages > npages); + key = &result.page_exact->u.leaf_key[result.index_exact]; + Assert(key->npages == victim->npages); + key->first_page += npages; + key->npages -= npages; + if (result.index_exact == 0) + FreePageBtreeAdjustAncestorKeys(fpm, result.page_exact); + + /* Put the unallocated pages back on the appropriate free list. */ + FreePagePushSpanLeader(fpm, victim_page + npages, + victim->npages - npages); + } } /* Return results to caller. */ @@ -408,8 +412,10 @@ FreePageBtreeGetRecycled(FreePageManager *fpm) { char *base = fpm_segment_base(fpm); FreePageSpanLeader *victim = relptr_access(base, fpm->btree_recycle); - FreePageSpanLeader *newhead = relptr_access(base, victim->next); + FreePageSpanLeader *newhead; + Assert(victim != NULL); + newhead = relptr_access(base, victim->next); relptr_copy(newhead->prev, victim->prev); relptr_store(base, fpm->btree_recycle, newhead); Assert(fpm_pointer_is_page_aligned(base, victim)); @@ -782,6 +788,50 @@ FreePageManagerPutInternal(FreePageManager *fpm, Size first_page, Size npages, FreePageBtree *btp; Size index; + /* + * As a special case, we store the very first range in the FreePageManager + * itself, so that a request for the entire number of pages will succeed. + * Otherwise, we must build or update a btree. + */ + if (fpm->btree_depth == 0 && fpm->singleton_npages == 0) + { + fpm->singleton_first_page = first_page; + fpm->singleton_npages = npages; + return true; + } + + /* + * When we see the second range, we need to initialize the btree for + * real. + */ + if (fpm->btree_depth == 0) + { + char *base = fpm_segment_base(fpm); + Size root_page; + FreePageBtree *root; + + if (!relptr_is_null(fpm->btree_recycle)) + root = FreePageBtreeGetRecycled(fpm); + else if (FreePageManagerGetInternal(fpm, 1, &root_page)) + root = (FreePageBtree *) fpm_page_to_pointer(base, root_page); + else + { + /* We'd better be able to get a page from the existing range. */ + elog(FATAL, "free page manager btree is corrupt"); + } + + /* Create the btree and move the preexisting range into it. */ + root->hdr.magic = FREE_PAGE_LEAF_MAGIC; + root->hdr.nused = 1; + relptr_store(base, root->hdr.parent, (FreePageBtree *) NULL); + root->u.leaf_key[0].first_page = fpm->singleton_first_page; + root->u.leaf_key[0].npages = fpm->singleton_npages; + fpm->singleton_first_page = 0; + fpm->singleton_npages = 0; + + /* Fall through to insert the new key. */ + } + /* Search the btree. */ FreePageBtreeSearch(fpm, first_page, &result); Assert(result.page_exact == NULL); /* can't already be there */