summaryrefslogtreecommitdiff
path: root/sql/opt_range.cc
diff options
context:
space:
mode:
Diffstat (limited to 'sql/opt_range.cc')
-rw-r--r--sql/opt_range.cc36
1 files changed, 20 insertions, 16 deletions
diff --git a/sql/opt_range.cc b/sql/opt_range.cc
index cb5a0604733..8c69f7f5b0b 100644
--- a/sql/opt_range.cc
+++ b/sql/opt_range.cc
@@ -5227,7 +5227,7 @@ TABLE_READ_PLAN *get_best_disjunct_quick(PARAM *param, SEL_IMERGE *imerge,
/* Add Unique operations cost */
unique_calc_buff_size=
- Unique::get_cost_calc_buff_size((ulong)non_cpk_scan_records,
+ Unique_impl::get_cost_calc_buff_size((ulong)non_cpk_scan_records,
param->table->file->ref_length,
(size_t)param->thd->variables.sortbuff_size);
if (param->imerge_cost_buff_size < unique_calc_buff_size)
@@ -5239,7 +5239,7 @@ TABLE_READ_PLAN *get_best_disjunct_quick(PARAM *param, SEL_IMERGE *imerge,
}
{
- const double dup_removal_cost= Unique::get_use_cost(
+ const double dup_removal_cost= Unique_impl::get_use_cost(
param->imerge_cost_buff, (uint)non_cpk_scan_records,
param->table->file->ref_length,
(size_t)param->thd->variables.sortbuff_size,
@@ -5849,7 +5849,7 @@ bool prepare_search_best_index_intersect(PARAM *param,
return TRUE;
size_t calc_cost_buff_size=
- Unique::get_cost_calc_buff_size((size_t)records_in_scans,
+ Unique_impl::get_cost_calc_buff_size((size_t)records_in_scans,
common->key_size,
common->max_memory_size);
if (!(common->buff_elems= (uint *) alloc_root(param->mem_root,
@@ -6198,7 +6198,7 @@ bool check_index_intersect_extension(PARTIAL_INDEX_INTERSECT_INFO *curr,
*/
ha_rows elems_in_tree= common_info->search_scans[0]->records-
common_info->search_scans[0]->filtered_out ;
- next->in_memory_cost+= Unique::get_search_cost(elems_in_tree,
+ next->in_memory_cost+= Unique_impl::get_search_cost(elems_in_tree,
common_info->compare_factor)*
ext_index_scan_records;
cost= next->in_memory_cost;
@@ -6211,7 +6211,7 @@ bool check_index_intersect_extension(PARTIAL_INDEX_INTERSECT_INFO *curr,
size_t max_memory_size= common_info->max_memory_size;
records_sent_to_unique+= ext_index_scan_records;
- cost= Unique::get_use_cost(buff_elems, (size_t) records_sent_to_unique, key_size,
+ cost= Unique_impl::get_use_cost(buff_elems, (size_t) records_sent_to_unique, key_size,
max_memory_size, compare_factor, TRUE,
&next->in_memory);
if (records_filtered_out_by_cpk)
@@ -6221,7 +6221,7 @@ bool check_index_intersect_extension(PARTIAL_INDEX_INTERSECT_INFO *curr,
double cost2;
bool in_memory2;
ha_rows records2= records_sent_to_unique-records_filtered_out_by_cpk;
- cost2= Unique::get_use_cost(buff_elems, (size_t) records2, key_size,
+ cost2= Unique_impl::get_use_cost(buff_elems, (size_t) records2, key_size,
max_memory_size, compare_factor, TRUE,
&in_memory2);
cost2+= get_cpk_filter_cost(ext_index_scan_records, common_info->cpk_scan,
@@ -11746,12 +11746,12 @@ int read_keys_and_merge_scans(THD *thd,
READ_RECORD *read_record,
bool intersection,
key_map *filtered_scans,
- Unique **unique_ptr)
+ Unique_impl **unique_ptr)
{
List_iterator_fast<QUICK_RANGE_SELECT> cur_quick_it(quick_selects);
QUICK_RANGE_SELECT* cur_quick;
int result;
- Unique *unique= *unique_ptr;
+ Unique_impl *unique= *unique_ptr;
handler *file= head->file;
bool with_cpk_filter= pk_quick_select != NULL;
DBUG_ENTER("read_keys_and_merge");
@@ -11778,10 +11778,14 @@ int read_keys_and_merge_scans(THD *thd,
DBUG_EXECUTE_IF("only_one_Unique_may_be_created",
DBUG_SET("+d,index_merge_may_not_create_a_Unique"); );
- unique= new Unique(refpos_order_cmp, (void *)file,
- file->ref_length,
- (size_t)thd->variables.sortbuff_size,
- intersection ? quick_selects.elements : 0);
+ Descriptor *desc= new Fixed_size_keys_for_rowids(file);
+
+ if (!desc)
+ goto err;
+ unique= new Unique_impl(refpos_cmp, (void *)desc,
+ file->ref_length,
+ (size_t)thd->variables.sortbuff_size,
+ intersection ? quick_selects.elements : 0, desc);
if (!unique)
goto err;
*unique_ptr= unique;
@@ -11792,7 +11796,7 @@ int read_keys_and_merge_scans(THD *thd,
}
DBUG_ASSERT(file->ref_length == unique->get_size());
- DBUG_ASSERT(thd->variables.sortbuff_size == unique->get_max_in_memory_size());
+ DBUG_ASSERT(thd->variables.sortbuff_size <= unique->get_max_in_memory_size());
for (;;)
{
@@ -11850,7 +11854,7 @@ int read_keys_and_merge_scans(THD *thd,
*/
head->file->ha_end_keyread();
if (init_read_record(read_record, thd, head, (SQL_SELECT*) 0,
- &unique->sort, 1 , 1, TRUE))
+ unique->get_sort(), 1 , 1, TRUE))
result= 1;
DBUG_RETURN(result);
@@ -11893,7 +11897,7 @@ int QUICK_INDEX_MERGE_SELECT::get_next()
result= HA_ERR_END_OF_FILE;
end_read_record(&read_record);
// Free things used by sort early. Shouldn't be strictly necessary
- unique->sort.reset();
+ unique->get_sort()->reset();
/* All rows from Unique have been retrieved, do a clustered PK scan */
if (pk_quick_select)
{
@@ -11928,7 +11932,7 @@ int QUICK_INDEX_INTERSECT_SELECT::get_next()
{
result= HA_ERR_END_OF_FILE;
end_read_record(&read_record);
- unique->sort.reset(); // Free things early
+ unique->get_sort()->reset(); // Free things early
}
DBUG_RETURN(result);