summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--mysql-test/main/index_intersect.result10
-rw-r--r--mysql-test/main/index_intersect_innodb.result8
-rw-r--r--mysql-test/main/mysqld--help.result24
-rw-r--r--mysql-test/main/mysqld--help.test1
-rw-r--r--mysql-test/main/opt_trace_index_merge.result16
-rw-r--r--mysql-test/main/selectivity_innodb.result2
-rw-r--r--mysql-test/suite/sys_vars/r/sysvars_server_embedded.result70
-rw-r--r--mysql-test/suite/sys_vars/r/sysvars_server_notembedded.result62
-rw-r--r--sql/filesort.cc7
-rw-r--r--sql/filesort_utils.cc35
-rw-r--r--sql/filesort_utils.h2
-rw-r--r--sql/ha_partition.cc9
-rw-r--r--sql/ha_partition.h1
-rw-r--r--sql/handler.cc43
-rw-r--r--sql/handler.h19
-rw-r--r--sql/multi_range_read.cc9
-rw-r--r--sql/opt_range.cc53
-rw-r--r--sql/opt_subselect.cc4
-rw-r--r--sql/optimizer_costs.h69
-rw-r--r--sql/rowid_filter.cc13
-rw-r--r--sql/rowid_filter.h3
-rw-r--r--sql/sql_class.h6
-rw-r--r--sql/sql_select.cc45
-rw-r--r--sql/sys_vars.cc50
-rw-r--r--sql/table.cc18
-rw-r--r--sql/table.h5
-rw-r--r--sql/uniques.cc8
-rw-r--r--sql/uniques.h2
28 files changed, 451 insertions, 143 deletions
diff --git a/mysql-test/main/index_intersect.result b/mysql-test/main/index_intersect.result
index 79cd7a32c1a..1f31636860f 100644
--- a/mysql-test/main/index_intersect.result
+++ b/mysql-test/main/index_intersect.result
@@ -80,7 +80,7 @@ EXPLAIN
SELECT * FROM City
WHERE Name LIKE 'M%' AND Population > 7000000;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE City index_merge Population,Name Population,Name 4,35 NULL # Using sort_intersect(Population,Name); Using where
+1 SIMPLE City range Population,Name Population 4 NULL # Using index condition; Using where
SELECT * FROM City USE INDEX ()
WHERE Name LIKE 'C%' AND Population > 1000000;
ID Name Country Population
@@ -335,8 +335,8 @@ ID Name Country Population
SELECT * FROM City
WHERE Name LIKE 'M%' AND Population > 7000000;
ID Name Country Population
-1024 Mumbai (Bombay) IND 10500000
3580 Moscow RUS 8389200
+1024 Mumbai (Bombay) IND 10500000
SELECT COUNT(*) FROM City WHERE Name BETWEEN 'M' AND 'N';
COUNT(*)
301
@@ -372,7 +372,7 @@ EXPLAIN
SELECT * FROM City
WHERE Name BETWEEN 'G' AND 'K' AND Population > 500000 AND Country LIKE 'C%';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE City index_merge Population,Name,Country Name,Country,Population # NULL # Using sort_intersect(Name,Country,Population); Using where
+1 SIMPLE City index_merge Population,Name,Country Name,Country # NULL # Using sort_intersect(Name,Country); Using where
SELECT * FROM City USE INDEX ()
WHERE Name BETWEEN 'M' AND 'N' AND Population > 1000000 AND Country LIKE 'C%';
ID Name Country Population
@@ -463,12 +463,12 @@ EXPLAIN
SELECT * FROM City
WHERE ID BETWEEN 501 AND 1000 AND Population > 700000 AND Country LIKE 'C%';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE City index_merge PRIMARY,Population,Country Population,PRIMARY,Country 4,4,3 NULL # Using sort_intersect(Population,PRIMARY,Country); Using where
+1 SIMPLE City index_merge PRIMARY,Population,Country Population,PRIMARY 4,4 NULL # Using sort_intersect(Population,PRIMARY); Using where
EXPLAIN
SELECT * FROM City
WHERE ID BETWEEN 1 AND 500 AND Population > 700000 AND Country LIKE 'C%';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE City index_merge PRIMARY,Population,Country Population,PRIMARY,Country 4,4,3 NULL # Using sort_intersect(Population,PRIMARY,Country); Using where
+1 SIMPLE City index_merge PRIMARY,Population,Country Population,PRIMARY 4,4 NULL # Using sort_intersect(Population,PRIMARY); Using where
EXPLAIN
SELECT * FROM City
WHERE ID BETWEEN 2001 AND 2500 AND Population > 300000 AND Country LIKE 'H%';
diff --git a/mysql-test/main/index_intersect_innodb.result b/mysql-test/main/index_intersect_innodb.result
index 5ee4f88fddd..efe74389119 100644
--- a/mysql-test/main/index_intersect_innodb.result
+++ b/mysql-test/main/index_intersect_innodb.result
@@ -469,12 +469,12 @@ EXPLAIN
SELECT * FROM City
WHERE ID BETWEEN 501 AND 1000 AND Population > 700000 AND Country LIKE 'C%';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE City index_merge PRIMARY,Population,Country PRIMARY,Population,Country 4,4,7 NULL # Using sort_intersect(PRIMARY,Population,Country); Using where
+1 SIMPLE City index_merge PRIMARY,Population,Country PRIMARY,Population 4,4 NULL # Using sort_intersect(PRIMARY,Population); Using where
EXPLAIN
SELECT * FROM City
WHERE ID BETWEEN 1 AND 500 AND Population > 700000 AND Country LIKE 'C%';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE City index_merge PRIMARY,Population,Country PRIMARY,Population,Country 4,4,7 NULL # Using sort_intersect(PRIMARY,Population,Country); Using where
+1 SIMPLE City index_merge PRIMARY,Population,Country PRIMARY,Population 4,4 NULL # Using sort_intersect(PRIMARY,Population); Using where
EXPLAIN
SELECT * FROM City
WHERE ID BETWEEN 2001 AND 2500 AND Population > 300000 AND Country LIKE 'H%';
@@ -718,12 +718,12 @@ EXPLAIN
SELECT * FROM City
WHERE Name BETWEEN 'G' AND 'J' AND Population > 500000 AND Country LIKE 'C%';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE City index_merge Population,Country,Name Name,Population,Country 35,4,3 NULL # Using sort_intersect(Name,Population,Country); Using where
+1 SIMPLE City index_merge Population,Country,Name Name,Population 35,4 NULL # Using sort_intersect(Name,Population); Using where
EXPLAIN
SELECT * FROM City
WHERE ID BETWEEN 1 AND 500 AND Population > 700000 AND Country LIKE 'C%';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE City index_merge PRIMARY,Population,Country PRIMARY,Population,Country 4,4,7 NULL # Using sort_intersect(PRIMARY,Population,Country); Using where
+1 SIMPLE City index_merge PRIMARY,Population,Country PRIMARY,Population 4,4 NULL # Using sort_intersect(PRIMARY,Population); Using where
EXPLAIN
SELECT * FROM City
WHERE ID BETWEEN 3001 AND 4000 AND Population > 600000
diff --git a/mysql-test/main/mysqld--help.result b/mysql-test/main/mysqld--help.result
index f5e1340feda..bece8b777be 100644
--- a/mysql-test/main/mysqld--help.result
+++ b/mysql-test/main/mysqld--help.result
@@ -724,11 +724,22 @@ The following specify which files/extra groups are read (specified before remain
Expected hit rate of the row and index cache in storage
engines. The value should be an integer between 0 and 99,
where 0 means cache is empty and 99 means that value is
- almost always in the cache
+ almost always in the cache.
--optimizer-extra-pruning-depth=#
If the optimizer needs to enumerate join prefix of this
size or larger, then it will try agressively prune away
the search space.
+ --optimizer-index-block-copy-cost=#
+ Cost of copying a key block from the cache to intern
+ storage as part of an index scan.
+ --optimizer-key-compare-cost=#
+ Cost of checking a key against the end key condition.
+ --optimizer-key-copy-cost=#
+ Cost of finding the next key in the engine and copying it
+ to the SQL layer.
+ --optimizer-key-next-find-cost=#
+ Cost of finding the next key and rowid when using
+ filters.
--optimizer-max-sel-arg-weight=#
The maximum weight of the SEL_ARG graph. Set to 0 for no
limit
@@ -739,6 +750,9 @@ The following specify which files/extra groups are read (specified before remain
heuristic, thus perform exhaustive search: 1 - prune
plans based on cost and number of retrieved rows eq_ref:
2 - prune also if we find an eq_ref chain
+ --optimizer-row-copy-cost=#
+ Cost of copying a row from the engine or the join cache
+ to the SQL layer.
--optimizer-search-depth=#
Maximum depth of search performed by the query optimizer.
Values larger than the number of relations in a query
@@ -791,6 +805,8 @@ The following specify which files/extra groups are read (specified before remain
the cardinality of a partial join.5 - additionally use
selectivity of certain non-range predicates calculated on
record samples
+ --optimizer-where-cost=#
+ Cost of checking the row against the WHERE clause.
--performance-schema
Enable the performance schema.
--performance-schema-accounts-size=#
@@ -1705,14 +1721,20 @@ old-passwords FALSE
old-style-user-limits FALSE
optimizer-cache-hit-ratio 50
optimizer-extra-pruning-depth 8
+optimizer-index-block-copy-cost 0.2
+optimizer-key-compare-cost 0.05
+optimizer-key-copy-cost 0.025
+optimizer-key-next-find-cost 0.0125
optimizer-max-sel-arg-weight 32000
optimizer-prune-level 2
+optimizer-row-copy-cost 0.05
optimizer-search-depth 62
optimizer-selectivity-sampling-limit 100
optimizer-switch index_merge=on,index_merge_union=on,index_merge_sort_union=on,index_merge_intersection=on,index_merge_sort_intersection=off,engine_condition_pushdown=off,index_condition_pushdown=on,derived_merge=on,derived_with_keys=on,firstmatch=on,loosescan=on,materialization=on,in_to_exists=on,semijoin=on,partial_match_rowid_merge=on,partial_match_table_scan=on,subquery_cache=on,mrr=off,mrr_cost_based=off,mrr_sort_keys=off,outer_join_with_cache=on,semijoin_with_cache=on,join_cache_incremental=on,join_cache_hashed=on,join_cache_bka=on,optimize_join_buffer_size=on,table_elimination=on,extended_keys=on,exists_to_in=on,orderby_uses_equalities=on,condition_pushdown_for_derived=on,split_materialized=on,condition_pushdown_for_subquery=on,rowid_filter=on,condition_pushdown_from_having=on
optimizer-trace
optimizer-trace-max-mem-size 1048576
optimizer-use-condition-selectivity 4
+optimizer-where-cost 0.2
performance-schema FALSE
performance-schema-accounts-size -1
performance-schema-consumer-events-stages-current FALSE
diff --git a/mysql-test/main/mysqld--help.test b/mysql-test/main/mysqld--help.test
index a73562e4249..ad2307f43c6 100644
--- a/mysql-test/main/mysqld--help.test
+++ b/mysql-test/main/mysqld--help.test
@@ -29,6 +29,7 @@ perl;
collation-server character-set-server log-tc-size table-cache
table-open-cache table-open-cache-instances max-connections
tls-version version.* password-reuse-check
+ provider-bzip2 provider-lzma provider-lzo
password-reuse-check-interval/;
# Plugins which may or may not be there:
diff --git a/mysql-test/main/opt_trace_index_merge.result b/mysql-test/main/opt_trace_index_merge.result
index 5407af41fd5..c4ca31fcad9 100644
--- a/mysql-test/main/opt_trace_index_merge.result
+++ b/mysql-test/main/opt_trace_index_merge.result
@@ -158,7 +158,7 @@ explain select * from t1 where a=1 or b=1 {
}
}
],
- "index_roworder_union_cost": 2.595171589,
+ "index_roworder_union_cost": 2.601171589,
"members": 2,
"chosen": true
}
@@ -187,7 +187,7 @@ explain select * from t1 where a=1 or b=1 {
]
},
"rows_for_plan": 2,
- "cost_for_plan": 2.595171589,
+ "cost_for_plan": 2.601171589,
"chosen": true
}
}
@@ -220,14 +220,14 @@ explain select * from t1 where a=1 or b=1 {
"rows": 2,
"rows_after_scan": 2,
"rows_after_filter": 2,
- "cost": 2.595171589,
+ "cost": 2.601171589,
"chosen": true
}
],
"chosen_access_method": {
"type": "index_merge",
"records": 2,
- "cost": 2.595171589,
+ "cost": 2.601171589,
"uses_join_buffering": false
}
}
@@ -238,14 +238,14 @@ explain select * from t1 where a=1 or b=1 {
"plan_prefix": [],
"table": "t1",
"rows_for_plan": 2,
- "cost_for_plan": 2.595171589
+ "cost_for_plan": 2.601171589
}
]
},
{
"best_join_order": ["t1"],
"rows": 2,
- "cost": 2.595171589
+ "cost": 2.601171589
},
{
"substitute_best_equal": {
@@ -630,7 +630,7 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
}
}
],
- "index_roworder_union_cost": 332.5637481,
+ "index_roworder_union_cost": 333.0257481,
"members": 2,
"chosen": true
}
@@ -697,7 +697,7 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.chosen_range_access_summary'))
]
},
"rows_for_plan": 154,
- "cost_for_plan": 332.5637481,
+ "cost_for_plan": 333.0257481,
"chosen": true
}
]
diff --git a/mysql-test/main/selectivity_innodb.result b/mysql-test/main/selectivity_innodb.result
index f8c14a14dbf..02dd7c993eb 100644
--- a/mysql-test/main/selectivity_innodb.result
+++ b/mysql-test/main/selectivity_innodb.result
@@ -88,7 +88,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
2 DEPENDENT SUBQUERY region eq_ref PRIMARY PRIMARY 4 dbt3_s001.nation.n_regionkey 1 20.00 Using where
Warnings:
Note 1276 Field or reference 'dbt3_s001.part.p_partkey' of SELECT #2 was resolved in SELECT #1
-Note 1003 /* select#1 */ select `dbt3_s001`.`supplier`.`s_acctbal` AS `s_acctbal`,`dbt3_s001`.`supplier`.`s_name` AS `s_name`,`dbt3_s001`.`nation`.`n_name` AS `n_name`,`dbt3_s001`.`part`.`p_partkey` AS `p_partkey`,`dbt3_s001`.`part`.`p_mfgr` AS `p_mfgr`,`dbt3_s001`.`supplier`.`s_address` AS `s_address`,`dbt3_s001`.`supplier`.`s_phone` AS `s_phone`,`dbt3_s001`.`supplier`.`s_comment` AS `s_comment` from `dbt3_s001`.`part` join `dbt3_s001`.`supplier` join `dbt3_s001`.`partsupp` join `dbt3_s001`.`nation` join `dbt3_s001`.`region` where `dbt3_s001`.`partsupp`.`ps_partkey` = `dbt3_s001`.`part`.`p_partkey` and `dbt3_s001`.`supplier`.`s_suppkey` = `dbt3_s001`.`partsupp`.`ps_suppkey` and `dbt3_s001`.`nation`.`n_nationkey` = `dbt3_s001`.`supplier`.`s_nationkey` and `dbt3_s001`.`nation`.`n_regionkey` = `dbt3_s001`.`region`.`r_regionkey` and `dbt3_s001`.`region`.`r_name` = 'ASIA' and (`dbt3_s001`.`part`.`p_size` = 9 or `dbt3_s001`.`part`.`p_size` = 19999) and `dbt3_s001`.`part`.`p_type` like '%TIN' and `dbt3_s001`.`partsupp`.`ps_supplycost` = <expr_cache><`dbt3_s001`.`part`.`p_partkey`>((/* select#2 */ select min(`dbt3_s001`.`partsupp`.`ps_supplycost`) from `dbt3_s001`.`partsupp` join `dbt3_s001`.`supplier` join `dbt3_s001`.`nation` join `dbt3_s001`.`region` where `dbt3_s001`.`supplier`.`s_suppkey` = `dbt3_s001`.`partsupp`.`ps_suppkey` and `dbt3_s001`.`nation`.`n_nationkey` = `dbt3_s001`.`supplier`.`s_nationkey` and `dbt3_s001`.`nation`.`n_regionkey` = `dbt3_s001`.`region`.`r_regionkey` and `dbt3_s001`.`region`.`r_name` = 'ASIA' and `dbt3_s001`.`part`.`p_partkey` = `dbt3_s001`.`partsupp`.`ps_partkey`)) order by `dbt3_s001`.`supplier`.`s_acctbal` desc,`dbt3_s001`.`nation`.`n_name`,`dbt3_s001`.`supplier`.`s_name`,`dbt3_s001`.`part`.`p_partkey`
+Note 1003 /* select#1 */ select `dbt3_s001`.`supplier`.`s_acctbal` AS `s_acctbal`,`dbt3_s001`.`supplier`.`s_name` AS `s_name`,`dbt3_s001`.`nation`.`n_name` AS `n_name`,`dbt3_s001`.`part`.`p_partkey` AS `p_partkey`,`dbt3_s001`.`part`.`p_mfgr` AS `p_mfgr`,`dbt3_s001`.`supplier`.`s_address` AS `s_address`,`dbt3_s001`.`supplier`.`s_phone` AS `s_phone`,`dbt3_s001`.`supplier`.`s_comment` AS `s_comment` from `dbt3_s001`.`part` join `dbt3_s001`.`supplier` join `dbt3_s001`.`partsupp` join `dbt3_s001`.`nation` join `dbt3_s001`.`region` where `dbt3_s001`.`partsupp`.`ps_partkey` = `dbt3_s001`.`part`.`p_partkey` and `dbt3_s001`.`supplier`.`s_suppkey` = `dbt3_s001`.`partsupp`.`ps_suppkey` and `dbt3_s001`.`nation`.`n_nationkey` = `dbt3_s001`.`supplier`.`s_nationkey` and `dbt3_s001`.`nation`.`n_regionkey` = `dbt3_s001`.`region`.`r_regionkey` and `dbt3_s001`.`region`.`r_name` = 'ASIA' and (`dbt3_s001`.`part`.`p_size` = 9 or `dbt3_s001`.`part`.`p_size` = 19999) and `dbt3_s001`.`part`.`p_type` like '%TIN' and `dbt3_s001`.`partsupp`.`ps_supplycost` = <expr_cache><`dbt3_s001`.`part`.`p_partkey`>((/* select#2 */ select min(`dbt3_s001`.`partsupp`.`ps_supplycost`) from `dbt3_s001`.`partsupp` join `dbt3_s001`.`supplier` join `dbt3_s001`.`nation` join `dbt3_s001`.`region` where `dbt3_s001`.`supplier`.`s_suppkey` = `dbt3_s001`.`partsupp`.`ps_suppkey` and `dbt3_s001`.`nation`.`n_nationkey` = `dbt3_s001`.`supplier`.`s_nationkey` and `dbt3_s001`.`region`.`r_regionkey` = `dbt3_s001`.`nation`.`n_regionkey` and `dbt3_s001`.`region`.`r_name` = 'ASIA' and `dbt3_s001`.`part`.`p_partkey` = `dbt3_s001`.`partsupp`.`ps_partkey`)) order by `dbt3_s001`.`supplier`.`s_acctbal` desc,`dbt3_s001`.`nation`.`n_name`,`dbt3_s001`.`supplier`.`s_name`,`dbt3_s001`.`part`.`p_partkey`
set optimizer_use_condition_selectivity=4;
explain extended
select
diff --git a/mysql-test/suite/sys_vars/r/sysvars_server_embedded.result b/mysql-test/suite/sys_vars/r/sysvars_server_embedded.result
index aa3cc62edab..c8b05b6599c 100644
--- a/mysql-test/suite/sys_vars/r/sysvars_server_embedded.result
+++ b/mysql-test/suite/sys_vars/r/sysvars_server_embedded.result
@@ -2292,6 +2292,16 @@ NUMERIC_BLOCK_SIZE 1
ENUM_VALUE_LIST NULL
READ_ONLY YES
COMMAND_LINE_ARGUMENT REQUIRED
+VARIABLE_NAME OPTIMIZER_CACHE_HIT_RATIO
+VARIABLE_SCOPE SESSION
+VARIABLE_TYPE INT UNSIGNED
+VARIABLE_COMMENT Expected hit rate of the row and index cache in storage engines. The value should be an integer between 0 and 99, where 0 means cache is empty and 99 means that value is almost always in the cache.
+NUMERIC_MIN_VALUE 0
+NUMERIC_MAX_VALUE 99
+NUMERIC_BLOCK_SIZE 1
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME OPTIMIZER_EXTRA_PRUNING_DEPTH
VARIABLE_SCOPE SESSION
VARIABLE_TYPE BIGINT UNSIGNED
@@ -2302,6 +2312,46 @@ NUMERIC_BLOCK_SIZE 1
ENUM_VALUE_LIST NULL
READ_ONLY NO
COMMAND_LINE_ARGUMENT REQUIRED
+VARIABLE_NAME OPTIMIZER_INDEX_BLOCK_COPY_COST
+VARIABLE_SCOPE SESSION
+VARIABLE_TYPE DOUBLE
+VARIABLE_COMMENT Cost of copying a key block from the cache to intern storage as part of an index scan.
+NUMERIC_MIN_VALUE 0
+NUMERIC_MAX_VALUE 1
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
+VARIABLE_NAME OPTIMIZER_KEY_COMPARE_COST
+VARIABLE_SCOPE SESSION
+VARIABLE_TYPE DOUBLE
+VARIABLE_COMMENT Cost of checking a key against the end key condition.
+NUMERIC_MIN_VALUE 0
+NUMERIC_MAX_VALUE 1
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
+VARIABLE_NAME OPTIMIZER_KEY_COPY_COST
+VARIABLE_SCOPE SESSION
+VARIABLE_TYPE DOUBLE
+VARIABLE_COMMENT Cost of finding the next key in the engine and copying it to the SQL layer.
+NUMERIC_MIN_VALUE 0
+NUMERIC_MAX_VALUE 1
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
+VARIABLE_NAME OPTIMIZER_KEY_NEXT_FIND_COST
+VARIABLE_SCOPE SESSION
+VARIABLE_TYPE DOUBLE
+VARIABLE_COMMENT Cost of finding the next key and rowid when using filters.
+NUMERIC_MIN_VALUE 0
+NUMERIC_MAX_VALUE 1
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME OPTIMIZER_MAX_SEL_ARG_WEIGHT
VARIABLE_SCOPE SESSION
VARIABLE_TYPE BIGINT UNSIGNED
@@ -2322,6 +2372,16 @@ NUMERIC_BLOCK_SIZE 1
ENUM_VALUE_LIST NULL
READ_ONLY NO
COMMAND_LINE_ARGUMENT REQUIRED
+VARIABLE_NAME OPTIMIZER_ROW_COPY_COST
+VARIABLE_SCOPE SESSION
+VARIABLE_TYPE DOUBLE
+VARIABLE_COMMENT Cost of copying a row from the engine or the join cache to the SQL layer.
+NUMERIC_MIN_VALUE 0
+NUMERIC_MAX_VALUE 1
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME OPTIMIZER_SEARCH_DEPTH
VARIABLE_SCOPE SESSION
VARIABLE_TYPE BIGINT UNSIGNED
@@ -2382,6 +2442,16 @@ NUMERIC_BLOCK_SIZE 1
ENUM_VALUE_LIST NULL
READ_ONLY NO
COMMAND_LINE_ARGUMENT REQUIRED
+VARIABLE_NAME OPTIMIZER_WHERE_COST
+VARIABLE_SCOPE SESSION
+VARIABLE_TYPE DOUBLE
+VARIABLE_COMMENT Cost of checking the row against the WHERE clause.
+NUMERIC_MIN_VALUE 0
+NUMERIC_MAX_VALUE 1
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA
VARIABLE_SCOPE GLOBAL
VARIABLE_TYPE BOOLEAN
diff --git a/mysql-test/suite/sys_vars/r/sysvars_server_notembedded.result b/mysql-test/suite/sys_vars/r/sysvars_server_notembedded.result
index 294cf957d35..63288a493dd 100644
--- a/mysql-test/suite/sys_vars/r/sysvars_server_notembedded.result
+++ b/mysql-test/suite/sys_vars/r/sysvars_server_notembedded.result
@@ -2465,7 +2465,7 @@ COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME OPTIMIZER_CACHE_HIT_RATIO
VARIABLE_SCOPE SESSION
VARIABLE_TYPE INT UNSIGNED
-VARIABLE_COMMENT Expected hit rate of the row and index cache in storage engines. The value should be an integer between 0 and 99, where 0 means cache is empty and 99 means that value is almost always in the cache
+VARIABLE_COMMENT Expected hit rate of the row and index cache in storage engines. The value should be an integer between 0 and 99, where 0 means cache is empty and 99 means that value is almost always in the cache.
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 99
NUMERIC_BLOCK_SIZE 1
@@ -2482,6 +2482,46 @@ NUMERIC_BLOCK_SIZE 1
ENUM_VALUE_LIST NULL
READ_ONLY NO
COMMAND_LINE_ARGUMENT REQUIRED
+VARIABLE_NAME OPTIMIZER_INDEX_BLOCK_COPY_COST
+VARIABLE_SCOPE SESSION
+VARIABLE_TYPE DOUBLE
+VARIABLE_COMMENT Cost of copying a key block from the cache to intern storage as part of an index scan.
+NUMERIC_MIN_VALUE 0
+NUMERIC_MAX_VALUE 1
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
+VARIABLE_NAME OPTIMIZER_KEY_COMPARE_COST
+VARIABLE_SCOPE SESSION
+VARIABLE_TYPE DOUBLE
+VARIABLE_COMMENT Cost of checking a key against the end key condition.
+NUMERIC_MIN_VALUE 0
+NUMERIC_MAX_VALUE 1
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
+VARIABLE_NAME OPTIMIZER_KEY_COPY_COST
+VARIABLE_SCOPE SESSION
+VARIABLE_TYPE DOUBLE
+VARIABLE_COMMENT Cost of finding the next key in the engine and copying it to the SQL layer.
+NUMERIC_MIN_VALUE 0
+NUMERIC_MAX_VALUE 1
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
+VARIABLE_NAME OPTIMIZER_KEY_NEXT_FIND_COST
+VARIABLE_SCOPE SESSION
+VARIABLE_TYPE DOUBLE
+VARIABLE_COMMENT Cost of finding the next key and rowid when using filters.
+NUMERIC_MIN_VALUE 0
+NUMERIC_MAX_VALUE 1
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME OPTIMIZER_MAX_SEL_ARG_WEIGHT
VARIABLE_SCOPE SESSION
VARIABLE_TYPE BIGINT UNSIGNED
@@ -2502,6 +2542,16 @@ NUMERIC_BLOCK_SIZE 1
ENUM_VALUE_LIST NULL
READ_ONLY NO
COMMAND_LINE_ARGUMENT REQUIRED
+VARIABLE_NAME OPTIMIZER_ROW_COPY_COST
+VARIABLE_SCOPE SESSION
+VARIABLE_TYPE DOUBLE
+VARIABLE_COMMENT Cost of copying a row from the engine or the join cache to the SQL layer.
+NUMERIC_MIN_VALUE 0
+NUMERIC_MAX_VALUE 1
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME OPTIMIZER_SEARCH_DEPTH
VARIABLE_SCOPE SESSION
VARIABLE_TYPE BIGINT UNSIGNED
@@ -2562,6 +2612,16 @@ NUMERIC_BLOCK_SIZE 1
ENUM_VALUE_LIST NULL
READ_ONLY NO
COMMAND_LINE_ARGUMENT REQUIRED
+VARIABLE_NAME OPTIMIZER_WHERE_COST
+VARIABLE_SCOPE SESSION
+VARIABLE_TYPE DOUBLE
+VARIABLE_COMMENT Cost of checking the row against the WHERE clause.
+NUMERIC_MIN_VALUE 0
+NUMERIC_MAX_VALUE 1
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA
VARIABLE_SCOPE GLOBAL
VARIABLE_TYPE BOOLEAN
diff --git a/sql/filesort.cc b/sql/filesort.cc
index 1a29208392d..c141490acde 100644
--- a/sql/filesort.cc
+++ b/sql/filesort.cc
@@ -1594,17 +1594,18 @@ static bool check_if_pq_applicable(Sort_param *param,
scanning the table and comparing the rows with the where clause!
*/
const double sort_merge_cost=
- get_merge_many_buffs_cost_fast(num_rows,
+ get_merge_many_buffs_cost_fast(table->in_use, num_rows,
num_available_keys,
(uint)row_length);
/*
PQ has cost:
- (insert + qsort) * log(queue size) / TIME_FOR_COMPARE_ROWID +
+ (insert + qsort) * log(queue size) * ROWID_COMPARE_COST
cost of rowid lookup of the original row to find the addon fields.
*/
const double pq_cpu_cost=
(PQ_slowness * num_rows + param->max_keys_per_buffer) *
- log((double) param->max_keys_per_buffer) / TIME_FOR_COMPARE_ROWID;
+ log((double) param->max_keys_per_buffer) *
+ ROWID_COMPARE_COST_THD(table->in_use);
const double pq_io_cost= table->file->ha_rnd_pos_time(param->max_rows);
const double pq_cost= pq_cpu_cost + pq_io_cost;
diff --git a/sql/filesort_utils.cc b/sql/filesort_utils.cc
index 341e43ea6cf..6790016c838 100644
--- a/sql/filesort_utils.cc
+++ b/sql/filesort_utils.cc
@@ -23,17 +23,16 @@
PSI_memory_key key_memory_Filesort_buffer_sort_keys;
-namespace {
/**
A local helper function. See comments for get_merge_buffers_cost().
- */
-double get_merge_cost(ha_rows num_elements, ha_rows num_buffers, uint elem_size)
+*/
+
+static double get_merge_cost(double rowid_compare_cost, ha_rows num_elements,
+ ha_rows num_buffers, uint elem_size)
{
- return
- 2.0 * ((double) num_elements * elem_size) / IO_SIZE
- + (double) num_elements * log((double) num_buffers) /
- (TIME_FOR_COMPARE_ROWID * M_LN2);
-}
+ return (2.0 * ((double) num_elements * elem_size) / IO_SIZE
+ + (double) num_elements * log((double) num_buffers) *
+ rowid_compare_cost / M_LN2);
}
/**
@@ -43,20 +42,21 @@ double get_merge_cost(ha_rows num_elements, ha_rows num_buffers, uint elem_size)
see comments for get_merge_buffers_cost().
TODO: Use this function for Unique::get_use_cost().
*/
-double get_merge_many_buffs_cost_fast(ha_rows num_rows,
+
+double get_merge_many_buffs_cost_fast(THD *thd,
+ ha_rows num_rows,
ha_rows num_keys_per_buffer,
uint elem_size)
{
ha_rows num_buffers= num_rows / num_keys_per_buffer;
ha_rows last_n_elems= num_rows % num_keys_per_buffer;
- double total_cost;
+ double total_cost, rowid_compare_cost= ROWID_COMPARE_COST_THD(thd);
// Calculate CPU cost of sorting buffers.
total_cost=
((num_buffers * num_keys_per_buffer * log(1.0 + num_keys_per_buffer) +
- last_n_elems * log(1.0 + last_n_elems)) /
- TIME_FOR_COMPARE_ROWID);
-
+ last_n_elems * log(1.0 + last_n_elems)) * rowid_compare_cost);
+
// Simulate behavior of merge_many_buff().
while (num_buffers >= MERGEBUFF2)
{
@@ -69,14 +69,16 @@ double get_merge_many_buffs_cost_fast(ha_rows num_rows,
// Cost of merge sort 'num_merge_calls'.
total_cost+=
num_merge_calls *
- get_merge_cost(num_keys_per_buffer * MERGEBUFF, MERGEBUFF, elem_size);
+ get_merge_cost(rowid_compare_cost, num_keys_per_buffer * MERGEBUFF,
+ MERGEBUFF, elem_size);
// # of records in remaining buffers.
last_n_elems+= num_remaining_buffs * num_keys_per_buffer;
// Cost of merge sort of remaining buffers.
total_cost+=
- get_merge_cost(last_n_elems, 1 + num_remaining_buffs, elem_size);
+ get_merge_cost(rowid_compare_cost, last_n_elems, 1 + num_remaining_buffs,
+ elem_size);
num_buffers= num_merge_calls;
num_keys_per_buffer*= MERGEBUFF;
@@ -84,7 +86,8 @@ double get_merge_many_buffs_cost_fast(ha_rows num_rows,
// Simulate final merge_buff call.
last_n_elems+= num_keys_per_buffer * num_buffers;
- total_cost+= get_merge_cost(last_n_elems, 1 + num_buffers, elem_size);
+ total_cost+= get_merge_cost(rowid_compare_cost, last_n_elems, 1 + num_buffers,
+ elem_size);
return total_cost;
}
diff --git a/sql/filesort_utils.h b/sql/filesort_utils.h
index 1962f149893..c9f6e045fd3 100644
--- a/sql/filesort_utils.h
+++ b/sql/filesort_utils.h
@@ -39,7 +39,7 @@ class Sort_param;
See also comments get_merge_many_buffs_cost().
*/
-double get_merge_many_buffs_cost_fast(ha_rows num_rows,
+double get_merge_many_buffs_cost_fast(THD *thd, ha_rows num_rows,
ha_rows num_keys_per_buffer,
uint elem_size);
diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc
index a984dea8836..35fb6be53b7 100644
--- a/sql/ha_partition.cc
+++ b/sql/ha_partition.cc
@@ -12145,6 +12145,15 @@ ha_partition::can_convert_nocopy(const Field &field,
return true;
}
+void ha_partition::set_optimizer_costs(THD *thd)
+{
+ handler::set_optimizer_costs(thd);
+ for (uint i= bitmap_get_first_set(&m_part_info->read_partitions);
+ i < m_tot_parts;
+ i= bitmap_get_next_set(&m_part_info->read_partitions, i))
+ m_file[i]->set_optimizer_costs(thd);
+}
+
struct st_mysql_storage_engine partition_storage_engine=
{ MYSQL_HANDLERTON_INTERFACE_VERSION };
diff --git a/sql/ha_partition.h b/sql/ha_partition.h
index 37529073391..8aaf87b699c 100644
--- a/sql/ha_partition.h
+++ b/sql/ha_partition.h
@@ -1637,5 +1637,6 @@ public:
bool can_convert_nocopy(const Field &field,
const Column_definition &new_field) const override;
+ void set_optimizer_costs(THD *thd) override;
};
#endif /* HA_PARTITION_INCLUDED */
diff --git a/sql/handler.cc b/sql/handler.cc
index 61e2ab7a4ee..f2cc502a6e9 100644
--- a/sql/handler.cc
+++ b/sql/handler.cc
@@ -3249,7 +3249,7 @@ LEX_CSTRING *handler::engine_name()
those key entries into the engine buffers.
This function doesn't take in account into copying the key to record
- (KEY_COPY_COST) or comparing the key to the where clause (TIME_FOR_COMPARE)
+ (KEY_COPY_COST) or comparing the key to the where clause (WHERE_COST)
*/
double handler::keyread_time(uint index, uint ranges, ha_rows rows)
@@ -3261,7 +3261,8 @@ double handler::keyread_time(uint index, uint ranges, ha_rows rows)
if (table->file->is_clustering_key(index))
len= table->s->stored_rec_length;
- cost= ((double)rows*len/(stats.block_size+1)*INDEX_BLOCK_COPY_COST);
+ cost= ((double)rows*len/(stats.block_size+1) *
+ INDEX_BLOCK_COPY_COST(table->in_use));
/*
We divide the cost with optimizer_cache_cost as ha_keyread_time()
and ha_key_scan_time() will multiply the result value with
@@ -3413,9 +3414,18 @@ int handler::ha_open(TABLE *table_arg, const char *name, int mode,
else
dup_ref=ref+ALIGN_SIZE(ref_length);
cached_table_flags= table_flags();
+
+ /* Copy current optimizer costs. Needed in case clone() is used */
+ set_optimizer_costs(table->in_use);
+ DBUG_ASSERT(optimizer_key_copy_cost >= 0.0);
+ DBUG_ASSERT(optimizer_key_next_find_cost >= 0.0);
+ DBUG_ASSERT(optimizer_row_copy_cost >= 0.0);
+ DBUG_ASSERT(optimizer_where_cost >= 0.0);
+ DBUG_ASSERT(optimizer_key_cmp_cost >= 0.0);
+ reset_statistics();
}
- reset_statistics();
internal_tmp_table= MY_TEST(test_if_locked & HA_OPEN_INTERNAL_TABLE);
+
DBUG_RETURN(error);
}
@@ -8755,3 +8765,30 @@ Table_scope_and_contents_source_st::fix_period_fields(THD *thd,
}
return false;
}
+
+/*
+ Copy common optimizer cost variables to the engine
+
+ This is needed to provide fast acccess to these variables during
+ optimization (as we refer to them multiple times).
+
+ The other option would be to access them from thd, but that
+ would require a function call (as we cannot access THD from
+ an inline handler function) and two extra memory accesses
+ for each variable.
+
+ index_block_copy_cost is not copied as it is used so seldom.
+*/
+
+
+void handler::set_optimizer_costs(THD *thd)
+{
+ optimizer_key_copy_cost= thd->variables.optimizer_key_copy_cost;
+ optimizer_key_next_find_cost=
+ thd->variables.optimizer_key_next_find_cost;
+ optimizer_row_copy_cost= thd->variables.optimizer_row_copy_cost;
+ optimizer_where_cost= thd->variables.optimizer_where_cost;
+ optimizer_key_cmp_cost= thd->variables.optimizer_key_cmp_cost;
+ set_optimizer_cache_cost(cache_hit_cost(thd->variables.
+ optimizer_cache_hit_ratio));
+}
diff --git a/sql/handler.h b/sql/handler.h
index 09299cc99e9..72ab1ed00a3 100644
--- a/sql/handler.h
+++ b/sql/handler.h
@@ -3225,6 +3225,10 @@ public:
Updated from THD in open_tables()
*/
double optimizer_cache_cost;
+ double optimizer_key_next_find_cost;
+ double optimizer_row_copy_cost, optimizer_key_copy_cost;
+ double optimizer_where_cost, optimizer_key_cmp_cost;
+
ha_copy_info copy_info;
private:
@@ -3360,7 +3364,7 @@ public:
ref_length(sizeof(my_off_t)),
ft_handler(0), inited(NONE), pre_inited(NONE),
pushed_cond(0), next_insert_id(0), insert_id_for_cur_row(0),
- optimizer_cache_cost((100-CACHE_HIT_RATIO)/100.0),
+ optimizer_cache_cost((100-DEFAULT_CACHE_HIT_RATIO)/100.0),
tracker(NULL),
pushed_idx_cond(NULL),
pushed_idx_cond_keyno(MAX_KEY),
@@ -3624,7 +3628,7 @@ public:
inline double ha_scan_and_compare_time(ha_rows records)
{
return (ha_scan_time() +
- (double) records * (ROW_COPY_COST + 1/TIME_FOR_COMPARE));
+ (double) records * (ROW_COPY_COST + WHERE_COST));
}
virtual double avg_io_cost()
@@ -3632,6 +3636,13 @@ public:
return 1.0;
}
+ virtual void set_optimizer_costs(THD *thd);
+
+ /*
+ Set cost for finding a row in the engine cache
+ This allows the handler to override the cost if there is no
+ caching of rows, like in heap or federatedx.
+ */
virtual void set_optimizer_cache_cost(double cost)
{
optimizer_cache_cost= cost;
@@ -3678,7 +3689,7 @@ public:
inline double ha_read_and_compare_time(uint index, uint ranges, ha_rows rows)
{
return (ha_read_time(index, ranges, rows) +
- rows2double(rows) * (ROW_COPY_COST + 1/TIME_FOR_COMPARE));
+ rows2double(rows) * (ROW_COPY_COST + WHERE_COST));
}
/* Cost of reading a row with rowid */
@@ -3754,7 +3765,7 @@ public:
inline double ha_key_scan_and_compare_time(uint index, ha_rows rows)
{
return (ha_key_scan_time(index) +
- (double) rows * (KEY_COPY_COST + 1/TIME_FOR_COMPARE));
+ (double) rows * (KEY_COPY_COST + WHERE_COST));
}
virtual const key_map *keys_to_use_for_scanning() { return &key_map_empty; }
diff --git a/sql/multi_range_read.cc b/sql/multi_range_read.cc
index f8fd4e91f83..234c4cdfd7a 100644
--- a/sql/multi_range_read.cc
+++ b/sql/multi_range_read.cc
@@ -343,7 +343,7 @@ handler::multi_range_read_info_const(uint keyno, RANGE_SEQ_IF *seq,
cost->cpu_cost= ha_read_time(keyno, 0, total_rows);
cost->copy_cost= rows2double(total_rows) * ROW_COPY_COST;
}
- cost->comp_cost= (rows2double(total_rows) / TIME_FOR_COMPARE +
+ cost->comp_cost= (rows2double(total_rows) * WHERE_COST +
MULTI_RANGE_READ_SETUP_COST);
}
DBUG_PRINT("statistics",
@@ -427,7 +427,8 @@ ha_rows handler::multi_range_read_info(uint keyno, uint n_ranges, uint n_rows,
else
{
/*
- Same as above, but take into account copying the key to the upper level.
+ Same as above, but take into account copying the key to the upper
+ level.
*/
cost->idx_cpu_cost= (keyread_time(keyno, 1, n_rows) +
(n_ranges-1) * range_lookup_cost);
@@ -440,7 +441,7 @@ ha_rows handler::multi_range_read_info(uint keyno, uint n_ranges, uint n_rows,
cost->cpu_cost= read_time(keyno, n_ranges, n_rows);
cost->copy_cost= rows2double(n_rows) * ROW_COPY_COST;
}
- cost->comp_cost= rows2double(n_rows) / TIME_FOR_COMPARE;
+ cost->comp_cost= rows2double(n_rows) * WHERE_COST;
return 0;
}
@@ -2067,7 +2068,7 @@ void get_sort_and_sweep_cost(TABLE *table, ha_rows nrows, Cost_estimate *cost)
{
get_sweep_read_cost(table, nrows, FALSE, cost);
/* Add cost of qsort call: n * log2(n) * cost(rowid_comparison) */
- double cmp_op= rows2double(nrows) * (1.0 / TIME_FOR_COMPARE_ROWID);
+ double cmp_op= rows2double(nrows) * ROWID_COMPARE_COST_THD(table->in_use);
if (cmp_op < 3)
cmp_op= 3;
cost->cpu_cost += cmp_op * log2(cmp_op);
diff --git a/sql/opt_range.cc b/sql/opt_range.cc
index ba2497c48a8..ae1524d1fb2 100644
--- a/sql/opt_range.cc
+++ b/sql/opt_range.cc
@@ -5069,7 +5069,7 @@ static void dbug_print_singlepoint_range(SEL_ARG **start, uint num)
get_sweep_read_cost()
param Parameter from test_quick_select
records # of records to be retrieved
- add_time_for_compare If set, add cost of WHERE clause (TIME_FOR_COMPARE)
+ add_time_for_compare If set, add cost of WHERE clause (WHERE_COST)
RETURN
cost of sweep
*/
@@ -5080,7 +5080,8 @@ static double get_sweep_read_cost(const PARAM *param, ha_rows records,
DBUG_ENTER("get_sweep_read_cost");
#ifndef OLD_SWEEP_COST
double cost= (param->table->file->ha_rnd_pos_time(records) +
- (add_time_for_compare ? records / TIME_FOR_COMPARE : 0));
+ (add_time_for_compare ?
+ records * param->thd->variables.optimizer_where_cost : 0));
DBUG_PRINT("return", ("cost: %g", cost));
DBUG_RETURN(cost);
#else
@@ -5132,7 +5133,7 @@ static double get_sweep_read_cost(const PARAM *param, ha_rows records,
*/
result= busy_blocks;
}
- result+= rows2double(n_rows) * ROW_COPY_COST;
+ result+= rows2double(n_rows) * ROW_COPY_COST_THD(param->table->thd);
}
DBUG_PRINT("return",("cost: %g", result));
DBUG_RETURN(result);
@@ -5345,8 +5346,8 @@ TABLE_READ_PLAN *get_best_disjunct_quick(PARAM *param, SEL_IMERGE *imerge,
Add one ROWID comparison for each row retrieved on non-CPK scan. (it
is done in QUICK_RANGE_SELECT::row_in_ranges)
*/
- double rid_comp_cost= (rows2double(non_cpk_scan_records) /
- TIME_FOR_COMPARE_ROWID);
+ double rid_comp_cost= (rows2double(non_cpk_scan_records) *
+ ROWID_COMPARE_COST_THD(param->thd));
imerge_cost+= rid_comp_cost;
trace_best_disjunct.add("cost_of_mapping_rowid_in_non_clustered_pk_scan",
rid_comp_cost);
@@ -5354,7 +5355,7 @@ TABLE_READ_PLAN *get_best_disjunct_quick(PARAM *param, SEL_IMERGE *imerge,
/* Calculate cost(rowid_to_row_scan) */
{
- /* imerge_cost already includes TIME_FOR_COMPARE */
+ /* imerge_cost already includes WHERE_COST */
double sweep_cost= get_sweep_read_cost(param, non_cpk_scan_records, 0);
imerge_cost+= sweep_cost;
trace_best_disjunct.
@@ -5392,7 +5393,7 @@ TABLE_READ_PLAN *get_best_disjunct_quick(PARAM *param, SEL_IMERGE *imerge,
param->imerge_cost_buff, (uint)non_cpk_scan_records,
param->table->file->ref_length,
(size_t)param->thd->variables.sortbuff_size,
- TIME_FOR_COMPARE_ROWID,
+ ROWID_COMPARE_COST_THD(param->thd),
FALSE, NULL);
imerge_cost+= dup_removal_cost;
if (unlikely(trace_best_disjunct.trace_started()))
@@ -5506,8 +5507,8 @@ skip_to_ror_scan:
double roru_total_cost;
roru_total_cost= (roru_index_costs +
- rows2double(roru_total_records)*log((double)n_child_scans) /
- (TIME_FOR_COMPARE_ROWID * M_LN2) +
+ rows2double(roru_total_records)*log((double)n_child_scans) *
+ ROWID_COMPARE_COST_THD(param->thd) / M_LN2 +
get_sweep_read_cost(param, roru_total_records, 0));
DBUG_PRINT("info", ("ROR-union: cost %g, %zu members",
@@ -5663,8 +5664,8 @@ typedef struct st_common_index_intersect_info
{
PARAM *param; /* context info for range optimizations */
uint key_size; /* size of a ROWID element stored in Unique object */
- double compare_factor; /* 1/compare - cost to compare two ROWIDs */
- size_t max_memory_size; /* maximum space allowed for Unique objects */
+ double compare_factor; /* cost to compare two ROWIDs */
+ size_t max_memory_size; /* maximum space allowed for Unique objects */
ha_rows table_cardinality; /* estimate of the number of records in table */
double cutoff_cost; /* discard index intersects with greater costs */
INDEX_SCAN_INFO *cpk_scan; /* clustered primary key used in intersection */
@@ -5877,7 +5878,7 @@ bool prepare_search_best_index_intersect(PARAM *param,
common->param= param;
common->key_size= table->file->ref_length;
- common->compare_factor= TIME_FOR_COMPARE_ROWID;
+ common->compare_factor= ROWID_COMPARE_COST_THD(param->thd);
common->max_memory_size= (size_t)param->thd->variables.sortbuff_size;
common->cutoff_cost= cutoff_cost;
common->cpk_scan= NULL;
@@ -5934,7 +5935,7 @@ bool prepare_search_best_index_intersect(PARAM *param,
continue;
}
- cost= table->opt_range[(*index_scan)->keynr].index_only_fetch_cost();
+ cost= table->opt_range[(*index_scan)->keynr].index_only_fetch_cost(thd);
idx_scan.add("cost", cost);
@@ -6298,8 +6299,8 @@ double get_cpk_filter_cost(ha_rows filtered_records,
INDEX_SCAN_INFO *cpk_scan,
double compare_factor)
{
- return log((double) (cpk_scan->range_count+1)) / (compare_factor * M_LN2) *
- filtered_records;
+ return (log((double) (cpk_scan->range_count+1)) * compare_factor / M_LN2 *
+ filtered_records);
}
@@ -7054,11 +7055,11 @@ static bool ror_intersect_add(ROR_INTERSECT_INFO *info,
{
/*
CPK scan is used to filter out rows. We apply filtering for
- each record of every scan. Assuming 1/TIME_FOR_COMPARE_ROWID
+ each record of every scan. Assuming ROWID_COMPARE_COST
per check this gives us:
*/
- const double idx_cost= (rows2double(info->index_records) /
- TIME_FOR_COMPARE_ROWID);
+ const double idx_cost= (rows2double(info->index_records) *
+ ROWID_COMPARE_COST_THD(info->param->thd));
info->index_scan_costs+= idx_cost;
trace_costs->add("index_scan_cost", idx_cost);
}
@@ -7367,7 +7368,7 @@ TRP_ROR_INTERSECT *get_best_ror_intersect(const PARAM *param, SEL_TREE *tree,
Adjust row count and add the cost of comparing the final rows to the
WHERE clause
*/
- cmp_cost= intersect_best->out_rows/TIME_FOR_COMPARE;
+ cmp_cost= intersect_best->out_rows * thd->variables.optimizer_where_cost;
/* Ok, return ROR-intersect plan if we have found one */
TRP_ROR_INTERSECT *trp= NULL;
@@ -7536,9 +7537,9 @@ TRP_ROR_INTERSECT *get_best_covering_ror_intersect(PARAM *param,
tree->ror_scans, ror_scan_mark););
/* Add priority queue use cost. */
- total_cost += rows2double(records)*
- log((double)(ror_scan_mark - tree->ror_scans)) /
- (TIME_FOR_COMPARE_ROWID * M_LN2);
+ total_cost += (rows2double(records) *
+ log((double)(ror_scan_mark - tree->ror_scans)) *
+ ROWID_COMPARE_COST_THD(param->thd) / M_LN2);
DBUG_PRINT("info", ("Covering ROR-intersect full cost: %g", total_cost));
/* TODO: Add TIME_FOR_COMPARE cost to total_cost */
@@ -15093,7 +15094,7 @@ void cost_group_min_max(TABLE* table, KEY *index_info, uint used_key_parts,
group. To make the CPU cost reflect this, we estimate the CPU cost
as the sum of:
1. Cost for evaluating the condition for each num_group
- (1/TIME_FOR_COMPARE_IDX) (similarly as for index scan).
+ KEY_COMPARE_COST (similarly as for index scan).
2. Cost for navigating the index structure (assuming a b-tree).
Note: We only add the cost for one index comparision per block. For a
b-tree the number of comparisons will be larger. However the cost
@@ -15102,13 +15103,15 @@ void cost_group_min_max(TABLE* table, KEY *index_info, uint used_key_parts,
TODO: This cost should be provided by the storage engine.
3. Cost for comparing the row with the where clause
*/
+ const THD *thd= table->in_use;
const double tree_traversal_cost=
ceil(log(static_cast<double>(table_records))/
log(static_cast<double>(keys_per_block))) *
- 1/(TIME_FOR_COMPARE_IDX);
+ thd->variables.optimizer_key_cmp_cost;
const double cpu_cost= (num_groups *
- (tree_traversal_cost + 1/TIME_FOR_COMPARE));
+ (tree_traversal_cost +
+ thd->variables.optimizer_where_cost));
*read_cost= io_cost + cpu_cost;
*records= num_groups;
diff --git a/sql/opt_subselect.cc b/sql/opt_subselect.cc
index 6f2de3796c4..bd0a187db63 100644
--- a/sql/opt_subselect.cc
+++ b/sql/opt_subselect.cc
@@ -1449,6 +1449,7 @@ void get_delayed_table_estimates(TABLE *table,
Item_in_subselect *item= table->pos_in_table_list->jtbm_subselect;
Table_function_json_table *table_function=
table->pos_in_table_list->table_function;
+ handler *file= table->file;
if (table_function)
{
@@ -1468,9 +1469,10 @@ void get_delayed_table_estimates(TABLE *table,
/* Calculate cost of scanning the temptable */
double data_size= COST_MULT(item->jtbm_record_count,
hash_sj_engine->tmp_table->s->reclength);
+
/* Do like in handler::ha_scan_and_compare_time, but ignore the where cost */
*scan_time= ((data_size/table->file->stats.block_size+2) *
- table->file->avg_io_cost()) + *out_rows * ROW_COPY_COST;
+ table->file->avg_io_cost()) + *out_rows * file->ROW_COPY_COST;
}
diff --git a/sql/optimizer_costs.h b/sql/optimizer_costs.h
index 8634285319a..41ed42a99f8 100644
--- a/sql/optimizer_costs.h
+++ b/sql/optimizer_costs.h
@@ -20,14 +20,6 @@
/* This file includes costs variables used by the optimizer */
-/**
- The following is used to decide if MariaDB should use table scanning
- instead of reading with keys. The number says how many evaluation of the
- WHERE clause is comparable to reading one extra row from a table.
-*/
-#define TIME_FOR_COMPARE 5.0 // 5 WHERE compares == one read
-#define TIME_FOR_COMPARE_IDX 20.0
-
/*
The table/index cache hit ratio in %. 0 means that a searched for key or row
will never be in the cache while 100 means it always in the cache.
@@ -43,7 +35,7 @@
Note that avg_io_cost() is multipled with this constant!
*/
-#define CACHE_HIT_RATIO 50
+#define DEFAULT_CACHE_HIT_RATIO 50
/* Convert ratio to cost */
@@ -53,14 +45,23 @@ static inline double cache_hit_cost(uint ratio)
}
/*
- Cost of finding and copying keys of a total length of 'blocksize'
- used in handler::keyread_time()
- */
-#define INDEX_BLOCK_COPY_COST ((double) 1 / 5.0)
+ Base cost for finding keys and rows from the engine is 1.0
+ All other costs should be proportional to these
+*/
+
/* Cost for finding the first key in a key scan */
#define KEY_LOOKUP_COST ((double) 1.0)
/* Cost of finding a key from a row_ID (not used for clustered keys) */
#define ROW_LOOKUP_COST ((double) 1.0)
+
+/*
+ Cost of finding and copying keys from the storage engine index cache to
+ an internal cache as part of an index scan.
+ Used in handler::keyread_time()
+*/
+#define DEFAULT_INDEX_BLOCK_COPY_COST ((double) 1 / 5.0)
+#define INDEX_BLOCK_COPY_COST(THD) ((THD)->variables.optimizer_index_block_copy_cost)
+
/*
Cost of finding the next row during table scan and copying it to
'table->record'.
@@ -70,7 +71,10 @@ static inline double cache_hit_cost(uint ratio)
too big then MariaDB will used key lookup even when table scan is
better.
*/
-#define ROW_COPY_COST ((double) 1.0 / 20.0)
+#define DEFAULT_ROW_COPY_COST ((double) 1.0 / 20.0)
+#define ROW_COPY_COST optimizer_row_copy_cost
+#define ROW_COPY_COST_THD(THD) ((THD)->variables.optimizer_row_copy_cost)
+
/*
Cost of finding the next key during index scan and copying it to
'table->record'
@@ -79,13 +83,40 @@ static inline double cache_hit_cost(uint ratio)
as with table scans there are no key read (KEY_LOOKUP_COST) and
fewer disk reads.
*/
-#define KEY_COPY_COST ((double) 1.0 / 40.0)
+#define DEFAULT_KEY_COPY_COST ((double) 1.0 / 40.0)
+#define KEY_COPY_COST optimizer_key_copy_cost
+#define KEY_COPY_COST_THD(THD) ((THD)->variables.optimizer_key_copy_cost)
+
/*
Cost of finding the next index entry and checking it against filter
This cost is very low as it's done inside the storage engine.
Should be smaller than KEY_COPY_COST.
*/
-#define KEY_NEXT_FIND_COST ((double) 1.0 / 80.0)
+#define DEFAULT_KEY_NEXT_FIND_COST ((double) 1.0 / 80.0)
+#define KEY_NEXT_FIND_COST optimizer_next_find_cost
+
+/**
+ The following is used to decide if MariaDB should use table scanning
+ instead of reading with keys. The number says how many evaluation of the
+ WHERE clause is comparable to reading one extra row from a table.
+*/
+#define DEFAULT_WHERE_COST (1 / 5.0)
+#define WHERE_COST optimizer_where_cost
+#define WHERE_COST_THD(THD) ((THD)->variables.optimizer_where_cost)
+
+#define DEFAULT_KEY_COMPARE_COST (1 / 20.0)
+#define KEY_COMPARE_COST optimizer_key_cmp_cost
+
+/*
+ Cost of comparing two rowids. This is set relative to KEY_COMPARE_COST
+ This is usally just a memcmp!
+*/
+#define ROWID_COMPARE_COST KEY_COMPARE_COST/10.0
+#define ROWID_COMPARE_COST_THD(THD) ((THD)->variables.KEY_COMPARE_COST / 10.0)
+
+/*
+ Setup cost for different operations
+*/
/* Extra cost for doing a range scan. Used to prefer 'ref' over range */
#define MULTI_RANGE_READ_SETUP_COST (double) (1.0 / 50.0)
@@ -105,12 +136,6 @@ static inline double cache_hit_cost(uint ratio)
*/
#define MIN_ROWS_AFTER_FILTERING 1.0
-/**
- Number of comparisons of table rowids equivalent to reading one row from a
- table.
-*/
-#define TIME_FOR_COMPARE_ROWID (TIME_FOR_COMPARE*100)
-
/*
cost1 is better that cost2 only if cost1 + COST_EPS < cost2
The main purpose of this is to ensure we use the first index or plan
diff --git a/sql/rowid_filter.cc b/sql/rowid_filter.cc
index 24d2b0e5c73..1926b43188a 100644
--- a/sql/rowid_filter.cc
+++ b/sql/rowid_filter.cc
@@ -23,7 +23,7 @@
#include "opt_trace.h"
/*
- KEY_NEXT_FIND_COST below is the cost of finding the next possible key
+ key_next_find_cost below is the cost of finding the next possible key
and calling handler_rowid_filter_check() to check it against the filter
*/
@@ -32,7 +32,7 @@ lookup_cost(Rowid_filter_container_type cont_type)
{
switch (cont_type) {
case SORTED_ARRAY_CONTAINER:
- return log(est_elements)*0.01+KEY_NEXT_FIND_COST;
+ return log(est_elements)*0.01+key_next_find_cost;
default:
DBUG_ASSERT(0);
return 0;
@@ -51,7 +51,7 @@ double Range_rowid_filter_cost_info::
avg_access_and_eval_gain_per_row(Rowid_filter_container_type cont_type,
double cost_of_row_fetch)
{
- return (cost_of_row_fetch+1.0/TIME_FOR_COMPARE) * (1 - selectivity) -
+ return (cost_of_row_fetch + where_cost) * (1 - selectivity) -
lookup_cost(cont_type);
}
@@ -125,6 +125,8 @@ void Range_rowid_filter_cost_info::init(Rowid_filter_container_type cont_type,
key_no= idx;
est_elements= (ulonglong) table->opt_range[key_no].rows;
cost_of_building_range_filter= build_cost(container_type);
+ where_cost= tab->in_use->variables.optimizer_where_cost;
+ key_next_find_cost= tab->in_use->variables.optimizer_key_next_find_cost;
selectivity= est_elements/((double) table->stat_records());
gain= avg_access_and_eval_gain_per_row(container_type,
tab->file->optimizer_cache_cost);
@@ -147,7 +149,7 @@ Range_rowid_filter_cost_info::build_cost(Rowid_filter_container_type cont_type)
double cost;
DBUG_ASSERT(table->opt_range_keys.is_set(key_no));
- cost= table->opt_range[key_no].index_only_fetch_cost();
+ cost= table->opt_range[key_no].index_only_fetch_cost(table->in_use);
switch (cont_type) {
@@ -523,7 +525,8 @@ TABLE::best_range_rowid_filter_for_partial_join(uint access_key_no,
cost_of_rejected_rows= index_only_cost * (1 - filter->selectivity);
new_cost= (cost_of_accepted_rows + cost_of_rejected_rows +
records * filter->lookup_cost());
- new_total_cost= ((new_cost + new_records/TIME_FOR_COMPARE) * prev_records +
+ new_total_cost= ((new_cost + new_records *
+ in_use->variables.optimizer_where_cost) * prev_records +
filter->get_setup_cost());
if (best_filter_gain > new_total_cost)
diff --git a/sql/rowid_filter.h b/sql/rowid_filter.h
index 97c9522c9d3..dc64dc876ce 100644
--- a/sql/rowid_filter.h
+++ b/sql/rowid_filter.h
@@ -406,6 +406,7 @@ class Range_rowid_filter_cost_info final: public Sql_alloc
/* The index whose range scan would be used to build the range filter */
uint key_no;
double cost_of_building_range_filter;
+ double where_cost, key_next_find_cost;
/*
(gain*row_combinations)-cost_of_building_range_filter yields the gain of
the filter for 'row_combinations' key tuples of the index key_no
@@ -466,7 +467,7 @@ public:
*/
inline double get_cmp_gain(double row_combinations)
{
- return row_combinations * (1 - selectivity) / TIME_FOR_COMPARE;
+ return (row_combinations * (1 - selectivity) * where_cost);
}
Rowid_filter_container *create_container();
diff --git a/sql/sql_class.h b/sql/sql_class.h
index 7a26eba24cd..62bd7c46b66 100644
--- a/sql/sql_class.h
+++ b/sql/sql_class.h
@@ -692,6 +692,10 @@ typedef struct system_variables
ulonglong slave_skip_counter;
ulonglong max_relay_log_size;
+ double optimizer_index_block_copy_cost, optimizer_key_next_find_cost;
+ double optimizer_row_copy_cost, optimizer_key_copy_cost;
+ double optimizer_where_cost, optimizer_key_cmp_cost;
+
ha_rows select_limit;
ha_rows max_join_size;
ha_rows expensive_subquery_limit;
@@ -776,7 +780,7 @@ typedef struct system_variables
uint group_concat_max_len;
uint eq_range_index_dive_limit;
- uint optimizer_cache_hit_ratio;
+ uint optimizer_cache_hit_ratio; // Stored in handler::optimizer_cache_cost
int max_user_connections;
/**
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index 3609e757e11..54992cb438f 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -7825,7 +7825,7 @@ static double matching_candidates_in_table(JOIN_TAB *s,
KEY_COPY_COST as for filtering there is no copying of not accepted
keys.
- TIME_FOR_COMPARE cost is not added to any result.
+ WHERE_COST cost is not added to any result.
*/
INDEX_READ_COST cost_for_index_read(const THD *thd, const TABLE *table,
@@ -7850,14 +7850,14 @@ INDEX_READ_COST cost_for_index_read(const THD *thd, const TABLE *table,
records to compute the record copy cost.
*/
cost.read_cost= (cost.index_only_cost +
- rows2double(records) * ROW_COPY_COST);
+ rows2double(records) * ROW_COPY_COST_THD(thd));
}
else if (table->covering_keys.is_set(key) && !table->no_keyread)
{
cost.index_only_cost= file->ha_keyread_time(key, 1, rows_adjusted);
/* Same computation as in ha_keyread_and_copy_time() */
cost.read_cost= (cost.index_only_cost +
- rows2double(records) * KEY_COPY_COST);
+ rows2double(records) * KEY_COPY_COST_THD(thd));
}
else
{
@@ -7868,7 +7868,7 @@ INDEX_READ_COST cost_for_index_read(const THD *thd, const TABLE *table,
*/
cost.read_cost= (cost.index_only_cost +
file->ha_read_time(key, 0, rows_adjusted) +
- rows2double(records) * ROW_COPY_COST);
+ rows2double(records) * ROW_COPY_COST_THD(thd));
}
DBUG_PRINT("statistics", ("index_cost: %.3f full_cost: %.3f",
cost.index_only_cost, cost.read_cost));
@@ -7882,7 +7882,7 @@ INDEX_READ_COST cost_for_index_read(const THD *thd, const TABLE *table,
@param thd Thread handler
@param table Table
@param cost Pointer to cost for *records_arg rows, not including
- TIME_FOR_COMPARE cost.
+ WHERE_COST cost.
Will be updated to new cost if filter is used.
@param records_arg Pointer to number of records for the current key.
Will be updated to records after filter, if filter is
@@ -7940,11 +7940,11 @@ apply_filter(THD *thd, TABLE *table, double *cost, double *records_arg,
ranges * KEY_LOOKUP_COST * io_cost *
table->file->optimizer_cache_cost) +
cost_of_rejected_rows + filter_lookup_cost);
- new_total_cost= ((new_cost + new_records/TIME_FOR_COMPARE) * prev_records +
- filter_startup_cost);
+ new_total_cost= ((new_cost + new_records * WHERE_COST_THD(thd)) *
+ prev_records + filter_startup_cost);
DBUG_ASSERT(new_cost >= 0 && new_records >= 0);
- use_filter= ((*cost + records/TIME_FOR_COMPARE) * prev_records >
+ use_filter= ((*cost + records * WHERE_COST_THD(thd)) * prev_records >
new_total_cost);
if (unlikely(thd->trace_started()))
@@ -8638,7 +8638,7 @@ best_access_path(JOIN *join,
tmp, index_only_cost,
1, record_count);
}
- tmp= COST_ADD(tmp, records_after_filter/TIME_FOR_COMPARE);
+ tmp= COST_ADD(tmp, records_after_filter * WHERE_COST_THD(thd));
tmp= COST_MULT(tmp, record_count);
tmp= COST_ADD(tmp, startup_cost);
if (unlikely(trace_access_idx.trace_started()))
@@ -8753,7 +8753,8 @@ best_access_path(JOIN *join,
We assume here that, thanks to the hash, we don't have to compare all
row combinations, only a HASH_FANOUT (10%) rows in the cache.
*/
- cmp_time= (rnd_records * record_count * HASH_FANOUT / TIME_FOR_COMPARE);
+ cmp_time= (rnd_records * record_count * HASH_FANOUT *
+ WHERE_COST_THD(thd));
tmp= COST_ADD(tmp, cmp_time);
best_cost= tmp;
@@ -8840,7 +8841,7 @@ best_access_path(JOIN *join,
- skip rows which does not satisfy WHERE constraints
Note that s->quick->read_time includes the cost of comparing
- the row with the where clause (TIME_FOR_COMPARE)
+ the row with the where clause (WHERE_COST)
TODO:
We take into account possible use of join cache for ALL/index
@@ -8889,8 +8890,9 @@ best_access_path(JOIN *join,
if (filter)
{
tmp= filter_cost;
- /* Filter returns cost without TIME_FOR_COMPARE */
- tmp= COST_ADD(tmp, records_after_filter / TIME_FOR_COMPARE);
+ /* Filter returns cost without WHERE_COST */
+ tmp= COST_ADD(tmp, records_after_filter *
+ WHERE_COST_THD(thd));
tmp= COST_MULT(tmp, record_count);
tmp= COST_ADD(tmp, startup_cost);
startup_cost= 0; // Avoid adding it later
@@ -8927,7 +8929,7 @@ best_access_path(JOIN *join,
type= JT_NEXT;
tmp= cost.read_cost;
/* Calculate cost of checking the attached WHERE */
- tmp= COST_ADD(cost.read_cost, s->records / TIME_FOR_COMPARE);
+ tmp= COST_ADD(cost.read_cost, s->records * WHERE_COST_THD(thd));
}
else // table scan
{
@@ -8979,8 +8981,8 @@ best_access_path(JOIN *join,
row.
*/
cmp_time= (rnd_records * record_count *
- (ROW_COPY_COST * (idx - join->const_tables) +
- 1 / TIME_FOR_COMPARE));
+ (ROW_COPY_COST_THD(thd) * (idx - join->const_tables) +
+ WHERE_COST_THD(thd)));
tmp= COST_ADD(tmp, cmp_time);
}
}
@@ -14848,7 +14850,8 @@ void JOIN_TAB::cleanup()
void JOIN_TAB::estimate_scan_time()
{
- double copy_cost= ROW_COPY_COST;
+ THD *thd= join->thd;
+ double copy_cost= ROW_COPY_COST_THD(thd);
cached_covering_key= MAX_KEY;
if (table->is_created())
@@ -14873,7 +14876,7 @@ void JOIN_TAB::estimate_scan_time()
{
cached_covering_key= find_shortest_key(table, &table->covering_keys);
read_time= table->file->ha_key_scan_time(cached_covering_key);
- copy_cost= KEY_COPY_COST;
+ copy_cost= KEY_COPY_COST_THD(thd);
}
else
read_time= table->file->ha_scan_time();
@@ -14888,7 +14891,7 @@ void JOIN_TAB::estimate_scan_time()
found_records= records;
cached_scan_time= read_time;
cached_scan_and_compare_time= (read_time + records *
- (copy_cost + 1/TIME_FOR_COMPARE));
+ (copy_cost + WHERE_COST_THD(thd)));
}
@@ -29967,7 +29970,7 @@ static bool get_range_limit_read_cost(const JOIN_TAB *tab,
best_cost *= rows_limit_for_quick / best_rows;
best_rows = rows_limit_for_quick;
}
- *read_time= best_cost + best_rows/TIME_FOR_COMPARE;
+ *read_time= best_cost + best_rows * WHERE_COST_THD(table->in_use);
*read_rows= best_rows;
res= true;
}
@@ -30280,7 +30283,7 @@ test_if_cheaper_ordering(const JOIN_TAB *tab, ORDER *order, TABLE *table,
(ha_rows) tab->worst_seeks :
HA_ROWS_MAX);
index_scan_time= (cost.read_cost +
- select_limit / TIME_FOR_COMPARE);
+ select_limit * WHERE_COST_THD(thd));
}
#else
/*
diff --git a/sql/sys_vars.cc b/sql/sys_vars.cc
index b32edefa7f9..f60ac57e310 100644
--- a/sql/sys_vars.cc
+++ b/sql/sys_vars.cc
@@ -6968,7 +6968,8 @@ static Sys_var_uint Sys_in_subquery_conversion_threshold(
"IN predicate that triggers its conversion to IN subquery. Set to "
"0 to disable the conversion.",
SESSION_VAR(in_subquery_conversion_threshold), CMD_LINE(REQUIRED_ARG),
- VALID_RANGE(0, UINT_MAX), DEFAULT(IN_SUBQUERY_CONVERSION_THRESHOLD), BLOCK_SIZE(1));
+ VALID_RANGE(0, UINT_MAX), DEFAULT(IN_SUBQUERY_CONVERSION_THRESHOLD),
+ BLOCK_SIZE(1));
static Sys_var_ulong Sys_optimizer_max_sel_arg_weight(
"optimizer_max_sel_arg_weight",
@@ -6985,7 +6986,50 @@ static Sys_var_uint Sys_optimizer_cache_hit_ratio(
"optimizer_cache_hit_ratio",
"Expected hit rate of the row and index cache in storage engines. "
"The value should be an integer between 0 and 99, where 0 means cache is "
- "empty and 99 means that value is almost always in the cache",
+ "empty and 99 means that value is almost always in the cache.",
SESSION_VAR(optimizer_cache_hit_ratio), CMD_LINE(REQUIRED_ARG),
- VALID_RANGE(0, 99), DEFAULT(CACHE_HIT_RATIO), 1, NO_MUTEX_GUARD,
+ VALID_RANGE(0, 99), DEFAULT(DEFAULT_CACHE_HIT_RATIO), 1, NO_MUTEX_GUARD,
+ NOT_IN_BINLOG);
+
+static Sys_var_double Sys_optimizer_key_copy_cost(
+ "optimizer_key_copy_cost",
+ "Cost of finding the next key in the engine and copying it to the SQL layer.",
+ SESSION_VAR(optimizer_key_copy_cost), CMD_LINE(REQUIRED_ARG),
+ VALID_RANGE(0, 1), DEFAULT(DEFAULT_KEY_COPY_COST), NO_MUTEX_GUARD,
+ NOT_IN_BINLOG);
+
+static Sys_var_double Sys_optimizer_index_block_copy_cost(
+ "optimizer_index_block_copy_cost",
+ "Cost of copying a key block from the cache to intern storage as part of an "
+ "index scan.",
+ SESSION_VAR(optimizer_index_block_copy_cost), CMD_LINE(REQUIRED_ARG),
+ VALID_RANGE(0, 1), DEFAULT(DEFAULT_INDEX_BLOCK_COPY_COST), NO_MUTEX_GUARD,
+ NOT_IN_BINLOG);
+
+static Sys_var_double Sys_optimizer_key_next_find_cost(
+ "optimizer_key_next_find_cost",
+ "Cost of finding the next key and rowid when using filters.",
+ SESSION_VAR(optimizer_key_next_find_cost), CMD_LINE(REQUIRED_ARG),
+ VALID_RANGE(0, 1), DEFAULT(DEFAULT_KEY_NEXT_FIND_COST), NO_MUTEX_GUARD,
+ NOT_IN_BINLOG);
+
+static Sys_var_double Sys_optimizer_row_copy_cost(
+ "optimizer_row_copy_cost",
+ "Cost of copying a row from the engine or the join cache to the SQL layer.",
+ SESSION_VAR(optimizer_row_copy_cost), CMD_LINE(REQUIRED_ARG),
+ VALID_RANGE(0, 1), DEFAULT(DEFAULT_ROW_COPY_COST), NO_MUTEX_GUARD,
+ NOT_IN_BINLOG);
+
+static Sys_var_double Sys_optimizer_where_cost(
+ "optimizer_where_cost",
+ "Cost of checking the row against the WHERE clause.",
+ SESSION_VAR(optimizer_where_cost), CMD_LINE(REQUIRED_ARG),
+ VALID_RANGE(0, 1), DEFAULT(DEFAULT_WHERE_COST), NO_MUTEX_GUARD,
+ NOT_IN_BINLOG);
+
+static Sys_var_double Sys_optimizer_key_cmp_cost(
+ "optimizer_key_compare_cost",
+ "Cost of checking a key against the end key condition.",
+ SESSION_VAR(optimizer_key_cmp_cost), CMD_LINE(REQUIRED_ARG),
+ VALID_RANGE(0, 1), DEFAULT(DEFAULT_KEY_COMPARE_COST), NO_MUTEX_GUARD,
NOT_IN_BINLOG);
diff --git a/sql/table.cc b/sql/table.cc
index 17611a1e2fb..16dcb48970d 100644
--- a/sql/table.cc
+++ b/sql/table.cc
@@ -5672,12 +5672,14 @@ void TABLE::init(THD *thd, TABLE_LIST *tl)
opt_range_condition_rows=0;
no_cache= false;
initialize_opt_range_structures();
+
+
/*
- Update optimizer_cache_cost to ensure that a SET STATEMENT of
- the variable it will work.
+ Update optimizer_costs to ensure that a SET STATEMENT of the
+ variables it will work.
*/
- file->set_optimizer_cache_cost(cache_hit_cost(thd->variables.
- optimizer_cache_hit_ratio));
+ file->set_optimizer_costs(thd);
+
#ifdef HAVE_REPLICATION
/* used in RBR Triggers */
master_had_triggers= 0;
@@ -10434,6 +10436,14 @@ inline void TABLE::initialize_opt_range_structures()
TRASH_ALLOC(const_key_parts, s->keys * sizeof(*const_key_parts));
}
+
+double TABLE::OPT_RANGE::index_only_fetch_cost(THD *thd)
+{
+ return (index_only_cost + (double) rows *
+ thd->variables.optimizer_key_copy_cost);
+}
+
+
/*
Mark table to be reopened after query
*/
diff --git a/sql/table.h b/sql/table.h
index 147fc38dd7c..bd1816175df 100644
--- a/sql/table.h
+++ b/sql/table.h
@@ -1420,10 +1420,7 @@ public:
Cost of fetching keys with index only read and returning them to the
sql level.
*/
- double index_only_fetch_cost()
- {
- return index_only_cost + (double) rows * KEY_COPY_COST;
- }
+ double index_only_fetch_cost(THD *thd);
} *opt_range;
/*
Bitmaps of key parts that =const for the duration of join execution. If
diff --git a/sql/uniques.cc b/sql/uniques.cc
index 572d80f0b64..09fed128793 100644
--- a/sql/uniques.cc
+++ b/sql/uniques.cc
@@ -156,7 +156,7 @@ inline double log2_n_fact(double x)
the same length, so each of total_buf_size elements will be added to a sort
heap with (n_buffers-1) elements. This gives the comparison cost:
- total_buf_elems* log2(n_buffers) / TIME_FOR_COMPARE_ROWID;
+ total_buf_elems* log2(n_buffers) * ROWID_COMPARE_COST;
*/
static double get_merge_buffers_cost(uint *buff_elems, uint elem_size,
@@ -171,8 +171,8 @@ static double get_merge_buffers_cost(uint *buff_elems, uint elem_size,
size_t n_buffers= last - first + 1;
/* Using log2(n)=log(n)/log(2) formula */
- return 2*((double)total_buf_elems*elem_size) / IO_SIZE +
- total_buf_elems*log((double) n_buffers) / (compare_factor * M_LN2);
+ return (2*((double)total_buf_elems*elem_size) / IO_SIZE +
+ total_buf_elems*log((double) n_buffers) * compare_factor / M_LN2);
}
@@ -327,7 +327,7 @@ double Unique::get_use_cost(uint *buffer, size_t nkeys, uint key_size,
result= 2*log2_n_fact(last_tree_elems + 1.0);
if (n_full_trees)
result+= n_full_trees * log2_n_fact(max_elements_in_tree + 1.0);
- result /= compare_factor;
+ result *= compare_factor;
DBUG_PRINT("info",("unique trees sizes: %u=%u*%u + %u", (uint)nkeys,
(uint)n_full_trees,
diff --git a/sql/uniques.h b/sql/uniques.h
index 7e12a391fbd..f4c45cde095 100644
--- a/sql/uniques.h
+++ b/sql/uniques.h
@@ -75,7 +75,7 @@ public:
inline static double get_search_cost(ulonglong tree_elems,
double compare_factor)
{
- return log((double) tree_elems) / (compare_factor * M_LN2);
+ return log((double) tree_elems) * compare_factor / M_LN2;
}
static double get_use_cost(uint *buffer, size_t nkeys, uint key_size,