summaryrefslogtreecommitdiff
path: root/sql/sql_select.cc
diff options
context:
space:
mode:
Diffstat (limited to 'sql/sql_select.cc')
-rw-r--r--sql/sql_select.cc70
1 files changed, 51 insertions, 19 deletions
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index a88b8a54a08..ccec6f1215b 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -3801,15 +3801,26 @@ bool JOIN::make_aggr_tables_info()
/*
If we have different sort & group then we must sort the data by group
- and copy it to another tmp table
+ and copy it to another tmp table.
+
This code is also used if we are using distinct something
we haven't been able to store in the temporary table yet
like SEC_TO_TIME(SUM(...)).
+
+ 3. Also, this is used when
+ - the query has Window functions,
+ - the GROUP BY operation is done with OrderedGroupBy algorithm.
+ In this case, the first temptable will contain pre-GROUP-BY data. Force
+ the creation of the second temporary table. Post-GROUP-BY dataset will be
+ written there, and then Window Function processing code will be able to
+ process it.
*/
if ((group_list &&
(!test_if_subpart(group_list, order) || select_distinct)) ||
- (select_distinct && tmp_table_param.using_outer_summary_function))
- { /* Must copy to another table */
+ (select_distinct && tmp_table_param.using_outer_summary_function) ||
+ (group_list && !tmp_table_param.quick_group && // (3)
+ select_lex->have_window_funcs())) // (3)
+ { /* Must copy to another table */
DBUG_PRINT("info",("Creating group table"));
calc_group_buffer(this, group_list);
@@ -4235,7 +4246,7 @@ JOIN::optimize_distinct()
}
/* Optimize "select distinct b from t1 order by key_part_1 limit #" */
- if (order && skip_sort_order)
+ if (order && skip_sort_order && !unit->lim.is_with_ties())
{
/* Should already have been optimized away */
DBUG_ASSERT(ordered_index_usage == ordered_index_order_by);
@@ -24138,11 +24149,17 @@ end_send(JOIN *join, JOIN_TAB *join_tab, bool end_of_records)
/*
@brief
- Perform a GROUP BY operation over a stream of rows ordered by their group.
- The result is sent into join->result.
+ Perform OrderedGroupBy operation and write the output into join->result.
@detail
- Also applies HAVING, etc.
+ The input stream is ordered by the GROUP BY expression, so groups come
+ one after another. We only need to accumulate the aggregate value, when
+ a GROUP BY group ends, check the HAVING and send the group.
+
+ Note that the output comes in the GROUP BY order, which is required by
+ the MySQL's GROUP BY semantics. No further sorting is needed.
+
+ @seealso end_write_group() also implements SortAndGroup
*/
enum_nested_loop_state
@@ -24339,13 +24356,26 @@ end:
/*
@brief
- Perform a GROUP BY operation over rows coming in arbitrary order.
-
- This is done by looking up the group in a temp.table and updating group
- values.
+ Perform GROUP BY operation over rows coming in arbitrary order: use
+ TemporaryTableWithPartialSums algorithm.
+
+ @detail
+ The TemporaryTableWithPartialSums algorithm is:
+
+ CREATE TEMPORARY TABLE tmp (
+ group_by_columns PRIMARY KEY,
+ partial_sum
+ );
+
+ for each row R in join output {
+ INSERT INTO tmp (R.group_by_columns, R.sum_value)
+ ON DUPLICATE KEY UPDATE partial_sum=partial_sum + R.sum_value;
+ }
@detail
Also applies HAVING, etc.
+
+ @seealso end_unique_update()
*/
static enum_nested_loop_state
@@ -24498,13 +24528,15 @@ end_unique_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
/*
@brief
- Perform a GROUP BY operation over a stream of rows ordered by their group.
- Write the result into a temporary table.
+ Perform OrderedGroupBy operation and write the output into the temporary
+ table (join_tab->table).
@detail
- Also applies HAVING, etc.
+ The input stream is ordered by the GROUP BY expression, so groups come
+ one after another. We only need to accumulate the aggregate value, when
+ a GROUP BY group ends, check the HAVING and write the group.
- The rows are written into temptable so e.g. filesort can read them.
+ @seealso end_send_group() also implements OrderedGroupBy
*/
enum_nested_loop_state
@@ -30493,7 +30525,9 @@ static bool get_range_limit_read_cost(const POSITION *pos,
cond_selectivity= best_rows / range_rows;
else
cond_selectivity= 1.0;
+#if 0 // FIXME: cond_selectivity=8/4 = 2 in main.update_use_source
DBUG_ASSERT(cond_selectivity <= 1.000000001);
+#endif
set_if_smaller(cond_selectivity, 1.0);
/*
@@ -31435,7 +31469,6 @@ void JOIN::make_notnull_conds_for_range_scans()
{
DBUG_ENTER("JOIN::make_notnull_conds_for_range_scans");
-
if (impossible_where ||
!optimizer_flag(thd, OPTIMIZER_SWITCH_NOT_NULL_RANGE_SCAN))
{
@@ -31515,7 +31548,6 @@ bool build_notnull_conds_for_range_scans(JOIN *join, Item *cond,
table_map allowed)
{
THD *thd= join->thd;
-
DBUG_ENTER("build_notnull_conds_for_range_scans");
for (JOIN_TAB *s= join->join_tab;
@@ -31523,13 +31555,13 @@ bool build_notnull_conds_for_range_scans(JOIN *join, Item *cond,
{
/* Clear all needed bitmaps to mark found fields */
if ((allowed & s->table->map) &&
- !(s->table->map && join->const_table_map))
+ !(s->table->map & join->const_table_map))
bitmap_clear_all(&s->table->tmp_set);
}
/*
Find all null-rejected fields assuming that cond is null-rejected and
- only formulas over tables from 'allowed' are to be taken into account
+ only formulas over tables from 'allowed' are to be taken into account
*/
if (cond->find_not_null_fields(allowed))
DBUG_RETURN(true);