summaryrefslogtreecommitdiff
path: root/sql/item_sum.cc
diff options
context:
space:
mode:
authorVarun Gupta <varun.gupta@mariadb.com>2020-06-08 11:25:30 +0530
committerVarun Gupta <varun.gupta@mariadb.com>2020-06-09 17:55:29 +0530
commit81a08c5462154e169d0a35e52c023d066065b175 (patch)
treea1cd346b7aba441371435743ef6db49b1ec83017 /sql/item_sum.cc
parentbefb0bed68b555852e01859a846bf7ac40f15dbb (diff)
downloadmariadb-git-81a08c5462154e169d0a35e52c023d066065b175.tar.gz
MDEV-11563: GROUP_CONCAT(DISTINCT ...) may produce a non-distinct list
Backported from MYSQL Bug #25331425: DISTINCT CLAUSE DOES NOT WORK IN GROUP_CONCAT Issue: ------ The problem occurs when: 1) GROUP_CONCAT (DISTINCT ....) is used in the query. 2) Data size greater than value of system variable: tmp_table_size. The result would contain values that are non-unique. Root cause: ----------- An in-memory structure is used to filter out non-unique values. When the data size exceeds tmp_table_size, the overflow is written to disk as a separate file. The expectation here is that when all such files are merged, the full set of unique values can be obtained. But the Item_func_group_concat::add function is in a bit of hurry. Even as it is adding values to the tree, it wants to decide if a value is unique and write it to the result buffer. This works fine if the configured maximum size is greater than the size of the data. But since tmp_table_size is set to a low value, the size of the tree is smaller and hence requires the creation of multiple copies on disk. Item_func_group_concat currently has no mechanism to merge all the copies on disk and then generate the result. This results in duplicate values. Solution: --------- In case of the DISTINCT clause, don't write to the result buffer immediately. Do the merge and only then put the unique values in the result buffer. This has be done in Item_func_group_concat::val_str. Note regarding result file changes: ----------------------------------- Earlier when a unique value was seen in Item_func_group_concat::add, it was dumped to the output. So result is in the order stored in SE. But with this fix, we wait until all the data is read and the final set of unique values are written to output buffer. So the data appears in the sorted order. This only fixes the cases when we have DISTINCT without ORDER BY clause in GROUP_CONCAT.
Diffstat (limited to 'sql/item_sum.cc')
-rw-r--r--sql/item_sum.cc40
1 files changed, 25 insertions, 15 deletions
diff --git a/sql/item_sum.cc b/sql/item_sum.cc
index d843e87fa03..8283ccd562d 100644
--- a/sql/item_sum.cc
+++ b/sql/item_sum.cc
@@ -3619,23 +3619,25 @@ int dump_leaf_key(void* key_arg, element_count count __attribute__((unused)),
ulonglong *offset_limit= &item->copy_offset_limit;
ulonglong *row_limit = &item->copy_row_limit;
if (item->limit_clause && !(*row_limit))
+ {
+ item->result_finalized= true;
return 1;
-
- if (item->no_appended)
- item->no_appended= FALSE;
- else
- result->append(*item->separator);
+ }
tmp.length(0);
if (item->limit_clause && (*offset_limit))
{
item->row_count++;
- item->no_appended= TRUE;
(*offset_limit)--;
return 0;
}
+ if (!item->result_finalized)
+ item->result_finalized= true;
+ else
+ result->append(*item->separator);
+
for (; arg < arg_end; arg++)
{
String *res;
@@ -3890,7 +3892,7 @@ void Item_func_group_concat::clear()
result.copy();
null_value= TRUE;
warning_for_row= FALSE;
- no_appended= TRUE;
+ result_finalized= FALSE;
if (offset_limit)
copy_offset_limit= offset_limit->val_int();
if (row_limit)
@@ -4023,13 +4025,12 @@ bool Item_func_group_concat::add()
return 1;
tree_len+= row_str_len;
}
+
/*
- If the row is not a duplicate (el->count == 1)
- we can dump the row here in case of GROUP_CONCAT(DISTINCT...)
- instead of doing tree traverse later.
+ In case of GROUP_CONCAT with DISTINCT or ORDER BY (or both) don't dump the
+ row to the output buffer here. That will be done in val_str.
*/
- if (row_eligible && !warning_for_row &&
- (!tree || (el->count == 1 && distinct && !arg_count_order)))
+ if (row_eligible && !warning_for_row && (!tree && !distinct))
dump_leaf_key(table->record[0] + table->s->null_bytes, 1, this);
return 0;
@@ -4264,9 +4265,18 @@ String* Item_func_group_concat::val_str(String* str)
DBUG_ASSERT(fixed == 1);
if (null_value)
return 0;
- if (no_appended && tree)
- /* Tree is used for sorting as in ORDER BY */
- tree_walk(tree, &dump_leaf_key, this, left_root_right);
+
+ if (!result_finalized) // Result yet to be written.
+ {
+ if (tree != NULL) // order by
+ tree_walk(tree, &dump_leaf_key, this, left_root_right);
+ else if (distinct) // distinct (and no order by).
+ unique_filter->walk(table, &dump_leaf_key, this);
+ else if (row_limit && copy_row_limit == (ulonglong)row_limit->val_int())
+ return &result;
+ else
+ DBUG_ASSERT(false); // Can't happen
+ }
if (table && table->blob_storage &&
table->blob_storage->is_truncated_value())