summaryrefslogtreecommitdiff
path: root/mysql-test/r/group_by.result
diff options
context:
space:
mode:
authorMonty <monty@mariadb.org>2015-07-05 12:39:46 +0300
committerMonty <monty@mariadb.org>2015-07-05 12:39:46 +0300
commit86377d078ef130d3da32c5da31131e519822e139 (patch)
treeea6ab5a2dbf0672aab54c270c09d2b6d2919300d /mysql-test/r/group_by.result
parent79af0b3823aa91d30914a7e3378bf644995bea7f (diff)
downloadmariadb-git-86377d078ef130d3da32c5da31131e519822e139.tar.gz
Fixes done while working on MDEV-4119:
Fixed several optimizer issues relatied to GROUP BY: a) Refering to a SELECT column in HAVING sometimes calculated it twice, which caused problems with non determinstic functions b) Removing duplicate fields and constants from GROUP BY was done too late for "using index for group by" optimization to work c) EXPLAIN SELECT ... GROUP BY did wrongly show 'Using filesort' in some cases involving "Using index for group-by" a) was fixed by: - Changed last argument to Item::split_sum_func2() from bool to int to allow more flags - Added flag argument to Item::split_sum_func() to allow on to specify if the item was in the SELECT part - Mark all split_sum_func() calls from SELECT with SPLIT_SUM_SELECT - Changed split_sum_func2() to do nothing if called with an argument that is not a sum function and doesn't include sum functions, if we are not an argument to SELECT. This ensures that in a case like select a*sum(b) as f1 from t1 where a=1 group by c having f1 <= 10; That 'a' in the SELECT part is stored as a reference in the temporary table togeher with sum(b) while the 'a' in having isn't (not needed as 'a' is already a reference to a column in the result) b) was fixed by: - Added an extra remove_const() pass for GROUP BY arguments before make_join_statistics() in case of one table SELECT. This allowes get_best_group_min_max() to optimize things better. c) was fixed by: - Added test for group by optimization in JOIN::exec_inner for select->quick->get_type() == QUICK_SELECT_I::QS_TYPE_GROUP_MIN_MAX item.cc: - Simplifed Item::split_sum_func2() - Split test to make them faster and easier to read - Changed last argument to Item::split_sum_func2() from bool to int to allow more flags - Added flag argument to Item::split_sum_func() to allow on to specify if the item was in the SELECT part - Changed split_sum_func2() to do nothing if called with an argument that is not a sum function and doesn't include sum functions, if we are not an argument to SELECT. opt_range.cc: - Simplified get_best_group_min_max() by calcuating first how many group_by elements. - Use join->group instead of join->group_list to test if group by, as join->group_list may be NULL if everything was optimized away. sql_select.cc: - Added an extra remove_const() pass for GROUP BY arguments before make_join_statistics() in case of one table SELECT. - Use group instead of group_list to test if group by, as group_list may be NULL if everything was optimized away. - Moved printing of "Error in remove_const" to remove_const() instead of having it in caller. - Simplified some if tests by re-ordering code. - update_depend_map_for_order() and remove_const() fixed to handle the case where make_join_statistics() has not yet been called (join->join_tab is 0 in this case)
Diffstat (limited to 'mysql-test/r/group_by.result')
-rw-r--r--mysql-test/r/group_by.result88
1 files changed, 85 insertions, 3 deletions
diff --git a/mysql-test/r/group_by.result b/mysql-test/r/group_by.result
index 38abbfef261..483d4d1ca8e 100644
--- a/mysql-test/r/group_by.result
+++ b/mysql-test/r/group_by.result
@@ -522,6 +522,7 @@ NULL 9
3
b 1
drop table t1;
+set big_tables=0;
create table t1 (a int not null, b int not null);
insert into t1 values (1,1),(1,2),(3,1),(3,2),(2,2),(2,1);
create table t2 (a int not null, b int not null, key(a));
@@ -659,7 +660,10 @@ insert into t1 (a,b) values (1,2),(1,3),(2,5);
select a, 0.1*0+1 r2, sum(1) r1 from t1 where a = 1 group by a having r1>1 and r2=1;
a r2 r1
1 1.0 2
-select a, round(rand(100)*10) r2, sum(1) r1 from t1 where a = 1 group by a having r1>1 and r2<=2;
+select a, round(rand(100)*10) r2, sum(1) r1 from t1 where a = 1 group by a having r1>1 and r2<=2;
+a r2 r1
+1 2 2
+select a, round(rand(100)*10) r2, sum(1) r1 from t1 group by a having r1>1 and r2<=2;
a r2 r1
1 2 2
select a,sum(b) from t1 where a=1 group by c;
@@ -668,6 +672,12 @@ a sum(b)
select a*sum(b) from t1 where a=1 group by c;
a*sum(b)
5
+select a*sum(b) as f1 from t1 where a=1 group by c having f1 <= 10;
+f1
+5
+select a,a*sum(b) as f1 from t1 where a=1 group by c having a*sum(b)+0 <= 10;
+a f1
+1 5
select sum(a)*sum(b) from t1 where a=1 group by c;
sum(a)*sum(b)
10
@@ -1988,12 +1998,12 @@ SHOW SESSION STATUS LIKE 'Sort_scan%';
Variable_name Value
Sort_scan 0
EXPLAIN SELECT SQL_BIG_RESULT col1 AS field1, col1 AS field2
-FROM t1 GROUP BY field1, field2;;
+FROM t1 GROUP BY field1, field2;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index NULL idx 5 NULL 20 Using index; Using filesort
FLUSH STATUS;
SELECT SQL_BIG_RESULT col1 AS field1, col1 AS field2
-FROM t1 GROUP BY field1, field2;;
+FROM t1 GROUP BY field1, field2;
field1 field2
1 1
2 2
@@ -2082,6 +2092,58 @@ f1 f2
19 19
20 20
explain
+select col1 f1, col1 f2 from t1 group by f1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index NULL idx 5 NULL 20 Using index
+select col1 f1, col1 f2 from t1 group by f1;
+f1 f2
+1 1
+2 2
+3 3
+4 4
+5 5
+6 6
+7 7
+8 8
+9 9
+10 10
+11 11
+12 12
+13 13
+14 14
+15 15
+16 16
+17 17
+18 18
+19 19
+20 20
+explain
+select col1 f1, col1 f2 from t1 group by f1, f2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index NULL idx 5 NULL 20 Using index
+select col1 f1, col1 f2 from t1 group by f1, f2;
+f1 f2
+1 1
+2 2
+3 3
+4 4
+5 5
+6 6
+7 7
+8 8
+9 9
+10 10
+11 11
+12 12
+13 13
+14 14
+15 15
+16 16
+17 17
+18 18
+19 19
+20 20
+explain
select col1 f1, col1 f2 from t1 group by f2 order by f2, f1;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index NULL idx 5 NULL 20 Using index
@@ -2141,6 +2203,22 @@ INSERT INTO t2(col1, col2) VALUES
(1,20),(2,19),(3,18),(4,17),(5,16),(6,15),(7,14),(8,13),(9,12),(10,11),
(11,10),(12,9),(13,8),(14,7),(15,6),(16,5),(17,4),(18,3),(19,2),(20,1);
explain
+select col1 f1, col2 f2, col1 f3 from t2 group by f1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 range NULL idx 5 NULL 7 Using index for group-by
+explain
+select SQL_BIG_RESULT col1 f1, col2 f2, col1 f3 from t2 group by f1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 range NULL idx 5 NULL 7 Using index for group-by
+explain
+select col1 f1, col2 f2, col1 f3 from t2 group by f1, f2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 index NULL idx 10 NULL 20 Using index
+explain
+select col1 f1, col1 f2 from t2 group by f1, 1+1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 range NULL idx 5 NULL 7 Using index for group-by
+explain
select col1 f1, col2 f2, col1 f3 from t2 group by f1, f2, f3+0;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 index NULL idx 10 NULL 20 Using index; Using temporary; Using filesort
@@ -2167,6 +2245,10 @@ f1 f2 f3
19 2 19
20 1 20
explain
+select col1 f1, col2 f2, col1 f3 from t2 order by f1,f2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 index NULL idx 10 NULL 20 Using index
+explain
select col1 f1, col2 f2, col1 f3 from t2 order by f1, f2, f3+0;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 index NULL idx 10 NULL 20 Using index; Using filesort