cbm_flags -> cbm_entries_in_newest_index_segment
authorRobert Haas <[email protected]>
Fri, 1 Oct 2021 18:57:58 +0000 (14:57 -0400)
committerRobert Haas <[email protected]>
Fri, 1 Oct 2021 18:57:58 +0000 (14:57 -0400)
src/backend/access/conveyor/cbmetapage.c
src/include/access/cbmetapage.h
src/include/access/cbmetapage_format.h

index 2641dda293a8e4d924bea213328580661db15914..4076c5cf2c2827075e2aa5f152d8843b8f99acae 100644 (file)
@@ -26,6 +26,7 @@
 #include "postgres.h"
 
 #include "access/cbfsmpage.h"
+#include "access/cbindexpage.h"
 #include "access/cbmetapage.h"
 #include "access/cbmetapage_format.h"
 
@@ -139,8 +140,11 @@ cb_metapage_find_logical_page(CBMetapageData *meta,
  * will be set to the segment number of the newest index segment, or
  * CB_INVALID_SEGMENT if there is none.
  *
- * If the return value is CBM_INSERT_OK, there is an unfilled payload segment,
- * and *blkno will be set to the block number of the first unused page in that
+ * If the return value is CBM_INSERT_OK, *blkno will be set to the block number
+ * of the first unused page in the unfilled payload segment.
+ *
+ * If the return value is CBM_INSERT_NEEDS_INDEX_ENTRIES_RELOCATED, *blkno
+ * will be set to the first not-entirely-filled page in the newest index
  * segment.
  */
 CBMInsertState
@@ -172,22 +176,38 @@ cb_metapage_get_insert_state(CBMetapageData *meta,
         * metapage that now precede the logical truncation point, but that would
         * require a cleanup lock on the metapage, and it normally isn't going to
         * be possible, because typically the last truncate operation will have
-        * afterward done any such work that is possible. We might miss an
+        * afterwards done any such work that is possible. We might miss an
         * opportunity in the case where the last truncate operation didn't clean
         * up fully, but hopefully that's rare enough that we don't need to stress
         * about it.
         *
         * If the newest index segment is already full, then a new index segment
         * will need to be created. Otherwise, some entries can be copied into the
-        * existing index segment. To make things easier for the caller, there is
-        * a metapage flag to tell us which situation prevails.
+        * existing index segment.
         */
        if (relp >= CB_METAPAGE_INDEX_ENTRIES * meta->cbm_pages_per_segment)
        {
-               if ((meta->cbm_flags & CBM_FLAG_INDEX_SEGMENT_FULL) != 0)
+               unsigned        entries;
+               unsigned        maxentries;
+
+               entries = meta->cbm_entries_in_newest_index_segment;
+               maxentries = CB_INDEXPAGE_INDEX_ENTRIES * meta->cbm_pages_per_segment;
+
+               if (entries > maxentries)
+                       elog(ERROR,
+                                "newest index segment listed as using %u of %u entries",
+                                entries, maxentries);
+               else if (entries == maxentries)
                        return CBM_INSERT_NEEDS_INDEX_SEGMENT;
                else
+               {
+                       /* Figure out which block should be targeted. */
+                       *blkno = cb_segment_to_block(meta->cbm_pages_per_segment,
+                                                                                meta->cbm_newest_index_segment,
+                                                                                entries / CB_INDEXPAGE_INDEX_ENTRIES);
+
                        return CBM_INSERT_NEEDS_INDEX_ENTRIES_RELOCATED;
+               }
        }
 
        /* Compute current insertion segment and offset. */
@@ -374,7 +394,10 @@ cb_metapage_remove_index_entries(CBMetapageData *meta, unsigned count,
        meta->cbm_index_metapage_start +=
                count * meta->cbm_pages_per_segment;
        if (relocating)
+       {
                meta->cbm_index_start = meta->cbm_index_metapage_start;
+               meta->cbm_entries_in_newest_index_segment += count;
+       }
 }
 
 /*
@@ -425,6 +448,7 @@ void
 cb_metapage_add_index_segment(CBMetapageData *meta, CBSegNo segno)
 {
        meta->cbm_newest_index_segment = segno;
+       meta->cbm_entries_in_newest_index_segment = 0;
        if (meta->cbm_oldest_index_segment == CB_INVALID_SEGMENT)
                meta->cbm_oldest_index_segment = segno;
 }
index d910afde50d86c057784881bb6ba2860e17b2183..9157677bfe02ed05bb4eb6767bb6cd3495654fd4 100644 (file)
@@ -72,7 +72,7 @@ typedef struct CBMetapageData CBMetapageData;
  * in the metapage for additional index entries, but there is room in the
  * newest index segment for entries to be relocated from the metapage.
  *
- * CBM_INSERT_NEEDS_INDEX_ENTRY_SPACE means that there is no more room in
+ * CBM_INSERT_NEEDS_INDEX_SEGMENT means that there is no more room in
  * the metapage for additional index entries, and the newest index segment
  * is full, too.
  */
index 081f570604a4499b1f952f543a6e657eecb5e240..a39ab44949b93d722ac83fe35157eb839ba3ba36 100644 (file)
@@ -43,14 +43,10 @@ struct CBMetapageData
         * larger reduces the number of index and freespace map segments required
         * and decreases fragmentation at the storage level, but it also increases
         * the granularity of space reuse.
-        *
-        * cbm_flags stores flags. Currently, the only flag is
-        * CBM_FLAG_INDEX_SEGMENT_FULL.
         */
        uint32          cbm_magic;
        uint32          cbm_version;
        uint16          cbm_pages_per_segment;
-       uint16          cbm_flags;
 
        /*
         * Logical start and end of the conveyor belt.
@@ -81,13 +77,11 @@ struct CBMetapageData
         * cbm_oldest_index_segment and cbm_newest_index_segment are the oldest
         * and newest index segments that exist. Both values will be
         * CB_INVALID_SEGMENT if there are no index segments. Otherwise, the
-        * mapping for cbm_oldest_logical_page is stored in the first entry in the
+        * mapping for cbm_index_start is stored in the first entry in the
         * first page of cbm_oldest_index_segment.
         *
-        * Note that the end of the newest index segment will often be unused, and
-        * its contents undefined. That's because new index entries are always
-        * inserted directly into the metapage, and later moved into index
-        * segments in bulk.
+        * cbm_entries_in_newest_index_segment is the number of index entries
+        * in the newest index segment, or 0 if there are no index segments.
         *
         * cbm_index_segments_moved is the total number of times in the history
         * of this conveyor belt that an index segment has been physically
@@ -102,6 +96,7 @@ struct CBMetapageData
        CBPageNo        cbm_index_metapage_start;
        CBSegNo         cbm_oldest_index_segment;
        CBSegNo         cbm_newest_index_segment;
+       unsigned        cbm_entries_in_newest_index_segment;
        uint64          cbm_index_segments_moved;
        CBSegNo         cbm_next_segment;
 
@@ -112,12 +107,4 @@ struct CBMetapageData
        uint8           cbm_freespace_map[CB_METAPAGE_FREESPACE_BYTES];
 };
 
-/*
- * Conveyor belt metapage flags.
- *
- * CBM_FLAG_INDEX_SEGMENT_FULL indicates that there is no room in the
- * newest index segment for any more index entries.
- */
-#define        CBM_FLAG_INDEX_SEGMENT_FULL                     0x0001
-
 #endif