diff options
author | Sergei Petrunia <psergey@askmonty.org> | 2019-08-03 23:15:44 +0300 |
---|---|---|
committer | Sergei Petrunia <psergey@askmonty.org> | 2019-08-04 17:25:17 +0300 |
commit | 09a85692a65634cda40214b6693f34bec2f5b73b (patch) | |
tree | b26231e501e91ed5a721482ac106d7bf6d4865a3 /sql/opt_range.cc | |
parent | 05b35cf4c1cc11f2078ebf0a16a1aa82cfe04b25 (diff) | |
download | mariadb-git-09a85692a65634cda40214b6693f34bec2f5b73b.tar.gz |
Post-merge fixes for rocksdb.group_min_max test
- Fix the LooseScan code to support storage engines that return
HA_ERR_END_OF_FILE if the index scan goes out of provided range
bounds
- Add a DBUG_EXECUTE_IF("force_group_by",...) to allow a test to
force a LooseScan
- Adjust rocksdb.group_min_max test not to use features not present
in MariaDB 10.2 (e.g. optimizer_trace. In MariaDB 10.4 it's present
but it doesn't meet the assumptions that the test makes about it
- Adjust the test result file:
= MariaDB doesn't support "Enhanced Loose Scan" that FB/MySQL has
= MariaDB has different cost calculations.
Diffstat (limited to 'sql/opt_range.cc')
-rw-r--r-- | sql/opt_range.cc | 38 |
1 files changed, 30 insertions, 8 deletions
diff --git a/sql/opt_range.cc b/sql/opt_range.cc index 3ba8c44d790..3f6c18dca5c 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -2428,6 +2428,7 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use, KEY_PART *key_parts; KEY *key_info; PARAM param; + bool force_group_by = false; if (check_stack_overrun(thd, 2*STACK_MIN_SIZE + sizeof(PARAM), buff)) DBUG_RETURN(0); // Fatal error flag is set @@ -2555,15 +2556,20 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use, Try to construct a QUICK_GROUP_MIN_MAX_SELECT. Notice that it can be constructed no matter if there is a range tree. */ + DBUG_EXECUTE_IF("force_group_by", force_group_by = true; ); group_trp= get_best_group_min_max(¶m, tree, best_read_time); if (group_trp) { param.table->quick_condition_rows= MY_MIN(group_trp->records, head->stat_records()); - if (group_trp->read_cost < best_read_time) + if (group_trp->read_cost < best_read_time || force_group_by) { best_trp= group_trp; best_read_time= best_trp->read_cost; + if (force_group_by) + { + goto force_plan; + } } } @@ -2663,6 +2669,7 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use, } } +force_plan: thd->mem_root= param.old_root; /* If we got a read plan, create a quick select from it. */ @@ -11509,13 +11516,28 @@ int QUICK_RANGE_SELECT::get_next_prefix(uint prefix_length, DBUG_ASSERT(cur_prefix != NULL); result= file->ha_index_read_map(record, cur_prefix, keypart_map, HA_READ_AFTER_KEY); - if (result || last_range->max_keypart_map == 0) - DBUG_RETURN(result); - - key_range previous_endpoint; - last_range->make_max_endpoint(&previous_endpoint, prefix_length, keypart_map); - if (file->compare_key(&previous_endpoint) <= 0) - DBUG_RETURN(0); + if (result || last_range->max_keypart_map == 0) { + /* + Only return if actual failure occurred. For HA_ERR_KEY_NOT_FOUND + or HA_ERR_END_OF_FILE, we just want to continue to reach the next + set of ranges. It is possible for the storage engine to return + HA_ERR_KEY_NOT_FOUND/HA_ERR_END_OF_FILE even when there are more + keys if it respects the end range set by the read_range_first call + below. + */ + if (result != HA_ERR_KEY_NOT_FOUND && result != HA_ERR_END_OF_FILE) + DBUG_RETURN(result); + } else { + /* + For storage engines that don't respect end range, check if we've + moved past the current range. + */ + key_range previous_endpoint; + last_range->make_max_endpoint(&previous_endpoint, prefix_length, + keypart_map); + if (file->compare_key(&previous_endpoint) <= 0) + DBUG_RETURN(0); + } } uint count= ranges.elements - (uint)(cur_range - (QUICK_RANGE**) ranges.buffer); |