diff options
Diffstat (limited to 'sql/sql_select.cc')
-rw-r--r-- | sql/sql_select.cc | 188 |
1 files changed, 142 insertions, 46 deletions
diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 5cd3f88352a..7dc3612c011 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -260,7 +260,8 @@ static TABLE *get_sort_by_table(ORDER *a,ORDER *b,List<TABLE_LIST> &tables, static void calc_group_buffer(JOIN *join, ORDER *group); static bool make_group_fields(JOIN *main_join, JOIN *curr_join); static bool alloc_group_fields(JOIN *join, ORDER *group); -static bool alloc_order_fields(JOIN *join, ORDER *group); +static bool alloc_order_fields(JOIN *join, ORDER *group, + uint max_number_of_elements); // Create list for using with tempory table static bool change_to_use_tmp_fields(THD *thd, Ref_ptr_array ref_pointer_array, List<Item> &new_list1, @@ -1393,9 +1394,23 @@ JOIN::prepare(TABLE_LIST *tables_init, COND *conds_init, uint og_num, if (order) { - bool real_order= FALSE; - ORDER *ord; - for (ord= order; ord; ord= ord->next) + bool requires_sorting= FALSE; + /* + WITH TIES forces the results to be sorted, even if it's not sanely + sortable. + */ + if (select_lex->limit_params.with_ties) + requires_sorting= true; + + /* + Go through each ORDER BY item and perform the following: + 1. Detect if none of the items contain meaningful data, which means we + can drop the sorting altogether. + 2. Split any columns with aggregation functions or window functions into + their base components and store them as separate fields. + (see split_sum_func) for more details. + */ + for (ORDER *ord= order; ord; ord= ord->next) { Item *item= *ord->item; /* @@ -1404,7 +1419,7 @@ JOIN::prepare(TABLE_LIST *tables_init, COND *conds_init, uint og_num, zero length NOT NULL string functions there. Such tuples don't contain any data to sort. */ - if (!real_order && + if (!requires_sorting && /* Not a zero length NOT NULL field */ ((item->type() != Item::FIELD_ITEM || ((Item_field *) item)->field->maybe_null() || @@ -1414,15 +1429,26 @@ JOIN::prepare(TABLE_LIST *tables_init, COND *conds_init, uint og_num, item->maybe_null || item->result_type() != STRING_RESULT || item->max_length))) - real_order= TRUE; + requires_sorting= TRUE; if ((item->with_sum_func() && item->type() != Item::SUM_FUNC_ITEM) || item->with_window_func) item->split_sum_func(thd, ref_ptrs, all_fields, SPLIT_SUM_SELECT); } - if (!real_order) + /* Drop the ORDER BY clause if none of the columns contain any data that + can produce a meaningful sorted set. */ + if (!requires_sorting) order= NULL; } + else + { + /* The current select does not have an ORDER BY */ + if (select_lex->limit_params.with_ties) + { + my_error(ER_WITH_TIES_NEEDS_ORDER, MYF(0)); + DBUG_RETURN(-1); + } + } if (having && having->with_sum_func()) having->split_sum_func2(thd, ref_ptrs, all_fields, @@ -2615,6 +2641,19 @@ int JOIN::optimize_stage2() if (!order && org_order) skip_sort_order= 1; } + + /* + For FETCH ... WITH TIES save how many items order by had, after we've + removed constant items that have no relevance on the final sorting. + */ + if (unit->lim.is_with_ties()) + { + DBUG_ASSERT(with_ties_order_count == 0); + for (ORDER *it= order; it; it= it->next) + with_ties_order_count+= 1; + } + + /* Check if we can optimize away GROUP BY/DISTINCT. We can do that if there are no aggregate functions, the @@ -2802,7 +2841,7 @@ int JOIN::optimize_stage2() if (test_if_subpart(group_list, order) || (!group_list && tmp_table_param.sum_func_count)) { - order=0; + order= 0; if (is_indexed_agg_distinct(this, NULL)) sort_and_group= 0; } @@ -3738,6 +3777,9 @@ bool JOIN::make_aggr_tables_info() sort_tab->filesort->limit= (has_group_by || (join_tab + top_join_tab_count > curr_tab + 1)) ? select_limit : unit->lim.get_select_limit(); + + if (unit->lim.is_with_ties()) + sort_tab->filesort->limit= HA_POS_ERROR; } if (!only_const_tables() && !join_tab[const_tables].filesort && @@ -3774,6 +3816,18 @@ bool JOIN::make_aggr_tables_info() if (select_lex->custom_agg_func_used()) status_var_increment(thd->status_var.feature_custom_aggregate_functions); + /* + Allocate Cached_items of ORDER BY for FETCH FIRST .. WITH TIES. + The order list might have been modified prior to this, but we are + only interested in the initial order by columns, after all const + elements are removed. + */ + if (unit->lim.is_with_ties()) + { + if (alloc_order_fields(this, order, with_ties_order_count)) + DBUG_RETURN(true); + } + fields= curr_fields_list; // Reset before execution set_items_ref_array(items0); @@ -21982,6 +22036,19 @@ end_send(JOIN *join, JOIN_TAB *join_tab, bool end_of_records) DBUG_RETURN(NESTED_LOOP_ERROR); DBUG_RETURN(NESTED_LOOP_OK); } + + if (join->send_records >= join->unit->lim.get_select_limit() && + join->unit->lim.is_with_ties()) + { + /* + Stop sending rows if the order fields corresponding to WITH TIES + have changed. + */ + int idx= test_if_item_cache_changed(join->order_fields); + if (idx >= 0) + join->do_send_rows= false; + } + if (join->do_send_rows) { int error; @@ -21998,27 +22065,36 @@ end_send(JOIN *join, JOIN_TAB *join_tab, bool end_of_records) } ++join->send_records; - if (join->send_records >= join->unit->lim.get_select_limit() && - !join->do_send_rows) + if (join->send_records >= join->unit->lim.get_select_limit()) { - /* - If we have used Priority Queue for optimizing order by with limit, - then stop here, there are no more records to consume. - When this optimization is used, end_send is called on the next - join_tab. - */ - if (join->order && - join->select_options & OPTION_FOUND_ROWS && - join_tab > join->join_tab && - (join_tab - 1)->filesort && (join_tab - 1)->filesort->using_pq) + if (!join->do_send_rows) { - DBUG_PRINT("info", ("filesort NESTED_LOOP_QUERY_LIMIT")); - DBUG_RETURN(NESTED_LOOP_QUERY_LIMIT); + /* + If we have used Priority Queue for optimizing order by with limit, + then stop here, there are no more records to consume. + When this optimization is used, end_send is called on the next + join_tab. + */ + if (join->order && + join->select_options & OPTION_FOUND_ROWS && + join_tab > join->join_tab && + (join_tab - 1)->filesort && (join_tab - 1)->filesort->using_pq) + { + DBUG_PRINT("info", ("filesort NESTED_LOOP_QUERY_LIMIT")); + DBUG_RETURN(NESTED_LOOP_QUERY_LIMIT); + } + DBUG_RETURN(NESTED_LOOP_OK); + } + + /* For WITH TIES we keep sending rows until a group has changed. */ + if (join->unit->lim.is_with_ties()) + { + /* Prepare the order_fields comparison for with ties. */ + if (join->send_records == join->unit->lim.get_select_limit()) + (void) test_if_group_changed(join->order_fields); + /* One more loop, to check if the next row matches with_ties or not. */ + DBUG_RETURN(NESTED_LOOP_OK); } - } - if (join->send_records >= join->unit->lim.get_select_limit() && - join->do_send_rows) - { if (join->select_options & OPTION_FOUND_ROWS) { JOIN_TAB *jt=join->join_tab; @@ -22095,6 +22171,7 @@ end_send_group(JOIN *join, JOIN_TAB *join_tab, bool end_of_records) { if (join->procedure) join->procedure->end_group(); + /* Test if there was a group change. */ if (idx < (int) join->send_group_parts) { int error=0; @@ -22113,6 +22190,7 @@ end_send_group(JOIN *join, JOIN_TAB *join_tab, bool end_of_records) } else { + /* Reset all sum functions on group change. */ if (!join->first_record) { List_iterator_fast<Item> it(*join->fields); @@ -22152,21 +22230,27 @@ end_send_group(JOIN *join, JOIN_TAB *join_tab, bool end_of_records) DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */ if (end_of_records) DBUG_RETURN(NESTED_LOOP_OK); - if (join->send_records >= join->unit->lim.get_select_limit() && - join->do_send_rows) - { - if (!(join->select_options & OPTION_FOUND_ROWS)) - DBUG_RETURN(NESTED_LOOP_QUERY_LIMIT); // Abort nicely - join->do_send_rows=0; - join->unit->lim.set_unlimited(); + if (join->send_records >= join->unit->lim.get_select_limit() && + join->do_send_rows) + { + /* WITH TIES can be computed during end_send_group if + the order by is a subset of group by and we had an index + available to compute group by order directly. */ + if (!join->unit->lim.is_with_ties() || + idx < (int)join->with_ties_order_count) + { + if (!(join->select_options & OPTION_FOUND_ROWS)) + DBUG_RETURN(NESTED_LOOP_QUERY_LIMIT); // Abort nicely + join->do_send_rows= 0; + join->unit->lim.set_unlimited(); + } } else if (join->send_records >= join->fetch_limit) { /* There is a server side cursor and all rows for this fetch request are sent. - */ - /* + Preventing code duplication. When finished with the group reset the group functions and copy_fields. We fall through. bug #11904 */ @@ -25221,6 +25305,19 @@ make_group_fields(JOIN *main_join, JOIN *curr_join) return (0); } +static bool +fill_cached_item_list(THD *thd, List<Cached_item> *list, ORDER *order, + uint max_number_of_elements = UINT_MAX) +{ + for (; order && max_number_of_elements ; + order= order->next, max_number_of_elements--) + { + Cached_item *tmp= new_Cached_item(thd, *order->item, true); + if (!tmp || list->push_front(tmp)) + return true; + } + return false; +} /** Get a list of buffers for saving last group. @@ -25229,21 +25326,20 @@ make_group_fields(JOIN *main_join, JOIN *curr_join) */ static bool -alloc_group_fields(JOIN *join,ORDER *group) +alloc_group_fields(JOIN *join, ORDER *group) { - if (group) - { - for (; group ; group=group->next) - { - Cached_item *tmp=new_Cached_item(join->thd, *group->item, TRUE); - if (!tmp || join->group_fields.push_front(tmp)) - return TRUE; - } - } + if (fill_cached_item_list(join->thd, &join->group_fields, group)) + return true; join->sort_and_group=1; /* Mark for do_select */ - return FALSE; + return false; } +static bool +alloc_order_fields(JOIN *join, ORDER *order, uint max_number_of_elements) +{ + return fill_cached_item_list(join->thd, &join->order_fields, order, + max_number_of_elements); +} /* |