summaryrefslogtreecommitdiff
path: root/src/backend/access/nbtree/nbtpage.c
diff options
context:
space:
mode:
authorTom Lane <tgl@sss.pgh.pa.us>2013-01-11 12:56:58 -0500
committerTom Lane <tgl@sss.pgh.pa.us>2013-01-11 12:56:58 -0500
commit31f38f28b00cbe2b9267205359e3cf7bafa1cb97 (patch)
treed6220cc699b6e265924ba5546e1e0eb83ec287c3 /src/backend/access/nbtree/nbtpage.c
parente1b735ae35f225927c95746babafaa84e39c3325 (diff)
downloadpostgresql-31f38f28b00cbe2b9267205359e3cf7bafa1cb97.tar.gz
Redesign the planner's handling of index-descent cost estimation.
Historically we've used a couple of very ad-hoc fudge factors to try to get the right results when indexes of different sizes would satisfy a query with the same number of index leaf tuples being visited. In commit 21a39de5809cd3050a37d2554323cc1d0cbeed9d I tweaked one of these fudge factors, with results that proved disastrous for larger indexes. Commit bf01e34b556ff37982ba2d882db424aa484c0d07 fudged it some more, but still with not a lot of principle behind it. What seems like a better way to address these issues is to explicitly model index-descent costs, since that's what's really at stake when considering diferent indexes with similar leaf-page-level costs. We tried that once long ago, and found that charging random_page_cost per page descended through was way too much, because upper btree levels tend to stay in cache in real-world workloads. However, there's still CPU costs to think about, and the previous fudge factors can be seen as a crude attempt to account for those costs. So this patch replaces those fudge factors with explicit charges for the number of tuple comparisons needed to descend the index tree, plus a small charge per page touched in the descent. The cost multipliers are chosen so that the resulting charges are in the vicinity of the historical (pre-9.2) fudge factors for indexes of up to about a million tuples, while not ballooning unreasonably beyond that, as the old fudge factor did (even more so in 9.2). To make this work accurately for btree indexes, add some code that allows extraction of the known root-page height from a btree. There's no equivalent number readily available for other index types, but we can use the log of the number of index pages as an approximate substitute. This seems like too much of a behavioral change to risk back-patching, but it should improve matters going forward. In 9.2 I'll just revert the fudge-factor change.
Diffstat (limited to 'src/backend/access/nbtree/nbtpage.c')
-rw-r--r--src/backend/access/nbtree/nbtpage.c76
1 files changed, 76 insertions, 0 deletions
diff --git a/src/backend/access/nbtree/nbtpage.c b/src/backend/access/nbtree/nbtpage.c
index 6612948769..9013f41727 100644
--- a/src/backend/access/nbtree/nbtpage.c
+++ b/src/backend/access/nbtree/nbtpage.c
@@ -412,6 +412,82 @@ _bt_gettrueroot(Relation rel)
}
/*
+ * _bt_getrootheight() -- Get the height of the btree search tree.
+ *
+ * We return the level (counting from zero) of the current fast root.
+ * This represents the number of tree levels we'd have to descend through
+ * to start any btree index search.
+ *
+ * This is used by the planner for cost-estimation purposes. Since it's
+ * only an estimate, slightly-stale data is fine, hence we don't worry
+ * about updating previously cached data.
+ */
+int
+_bt_getrootheight(Relation rel)
+{
+ BTMetaPageData *metad;
+
+ /*
+ * We can get what we need from the cached metapage data. If it's not
+ * cached yet, load it. Sanity checks here must match _bt_getroot().
+ */
+ if (rel->rd_amcache == NULL)
+ {
+ Buffer metabuf;
+ Page metapg;
+ BTPageOpaque metaopaque;
+
+ metabuf = _bt_getbuf(rel, BTREE_METAPAGE, BT_READ);
+ metapg = BufferGetPage(metabuf);
+ metaopaque = (BTPageOpaque) PageGetSpecialPointer(metapg);
+ metad = BTPageGetMeta(metapg);
+
+ /* sanity-check the metapage */
+ if (!(metaopaque->btpo_flags & BTP_META) ||
+ metad->btm_magic != BTREE_MAGIC)
+ ereport(ERROR,
+ (errcode(ERRCODE_INDEX_CORRUPTED),
+ errmsg("index \"%s\" is not a btree",
+ RelationGetRelationName(rel))));
+
+ if (metad->btm_version != BTREE_VERSION)
+ ereport(ERROR,
+ (errcode(ERRCODE_INDEX_CORRUPTED),
+ errmsg("version mismatch in index \"%s\": file version %d, code version %d",
+ RelationGetRelationName(rel),
+ metad->btm_version, BTREE_VERSION)));
+
+ /*
+ * If there's no root page yet, _bt_getroot() doesn't expect a cache
+ * to be made, so just stop here and report the index height is zero.
+ * (XXX perhaps _bt_getroot() should be changed to allow this case.)
+ */
+ if (metad->btm_root == P_NONE)
+ {
+ _bt_relbuf(rel, metabuf);
+ return 0;
+ }
+
+ /*
+ * Cache the metapage data for next time
+ */
+ rel->rd_amcache = MemoryContextAlloc(rel->rd_indexcxt,
+ sizeof(BTMetaPageData));
+ memcpy(rel->rd_amcache, metad, sizeof(BTMetaPageData));
+
+ _bt_relbuf(rel, metabuf);
+ }
+
+ metad = (BTMetaPageData *) rel->rd_amcache;
+ /* We shouldn't have cached it if any of these fail */
+ Assert(metad->btm_magic == BTREE_MAGIC);
+ Assert(metad->btm_version == BTREE_VERSION);
+ Assert(metad->btm_fastroot != P_NONE);
+
+ return metad->btm_fastlevel;
+}
+
+/*
* _bt_checkpage() -- Verify that a freshly-read page looks sane.
*/
void