From 42802ad66c49b6de11b37c7ea4e4658ccc5a94aa Mon Sep 17 00:00:00 2001 From: Brandon Nesterenko Date: Wed, 14 Sep 2022 15:08:12 -0600 Subject: MDEV-25616 XA PREPARE event group is not binlogged when.. the only query of the XA transaction is on a non-transactional table errors out: XA BEGIN 'x'; --error ER_DUP_ENTRY INSERT INTO t1 VALUES (1),(1); XA END 'x'; XA PREPARE 'x'; The binlogging pattern is correctly started as expected with the errored-out Query or its ROW format events, but there is no empty XA_prepare_log_event group. The following XA COMMIT 'x'; therefore should not be logged either, but it does. The bug is fixed with proper maintaining of a read-write binlog hton property and use it to enforce correct binlogging decisions. Specifically in the bug description case XA COMMIT won't be binlogged in both when given in the same connection and externally after disconnect. The same continue to apply to an empty XA that do not change any data in all transactional engines involved. --- .../suite/binlog/r/binlog_empty_xa_prepared.result | 108 ++ .../suite/binlog/t/binlog_empty_xa_prepared.test | 52 + .../suite/rpl/include/rpl_xa_empty_transaction.inc | 10 + .../include/rpl_xa_empty_transaction_test_case.inc | 131 +++ .../suite/rpl/r/rpl_xa_empty_transaction.result | 1169 ++++++++++++++++++++ .../suite/rpl/t/rpl_xa_empty_transaction.cnf | 18 + .../suite/rpl/t/rpl_xa_empty_transaction.test | 175 +++ 7 files changed, 1663 insertions(+) create mode 100644 mysql-test/suite/rpl/include/rpl_xa_empty_transaction.inc create mode 100644 mysql-test/suite/rpl/include/rpl_xa_empty_transaction_test_case.inc create mode 100644 mysql-test/suite/rpl/r/rpl_xa_empty_transaction.result create mode 100644 mysql-test/suite/rpl/t/rpl_xa_empty_transaction.cnf create mode 100644 mysql-test/suite/rpl/t/rpl_xa_empty_transaction.test (limited to 'mysql-test') diff --git a/mysql-test/suite/binlog/r/binlog_empty_xa_prepared.result b/mysql-test/suite/binlog/r/binlog_empty_xa_prepared.result index 9f998e049c0..589570d8300 100644 --- a/mysql-test/suite/binlog/r/binlog_empty_xa_prepared.result +++ b/mysql-test/suite/binlog/r/binlog_empty_xa_prepared.result @@ -108,3 +108,111 @@ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; DROP SEQUENCE `s` /* generated by server */ master-bin.000001 # Gtid # # GTID #-#-# master-bin.000001 # Query # # use `test`; DROP TABLE `t1` /* generated by server */ +CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB; +INSERT INTO t1 VALUES (1); +connect con1,localhost,root,,; +XA START '1'; +INSERT INTO t1 VALUES (2),(1); +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +SELECT * FROM t1 WHERE a = 2; +a +XA END '1'; +XA PREPARE '1'; +disconnect con1; +connection default; +XA RECOVER; +formatID gtrid_length bqual_length data +1 1 0 1 +XA COMMIT '1'; +ERROR XA100: XA_RBROLLBACK: Transaction branch was rolled back +Must be no XA PREPARE group nor XA completion one: +include/show_binlog_events.inc +Log_name Pos Event_type Server_id End_log_pos Info +master-bin.000001 # Gtid # # GTID #-#-# +master-bin.000001 # Query # # use `test`; CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB +master-bin.000001 # Gtid # # BEGIN GTID #-#-# +master-bin.000001 # Annotate_rows # # INSERT INTO t1 VALUES (1) +master-bin.000001 # Table_map # # table_id: # (test.t1) +master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F +master-bin.000001 # Xid # # COMMIT /* XID */ +master-bin.000001 # Gtid # # GTID #-#-# +master-bin.000001 # Query # # use `test`; CREATE SEQUENCE s ENGINE=InnoDB +master-bin.000001 # Gtid # # BEGIN GTID #-#-# +master-bin.000001 # Annotate_rows # # SELECT NEXT VALUE FOR s +master-bin.000001 # Table_map # # table_id: # (test.s) +master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # COMMIT +master-bin.000001 # Gtid # # XA START X'32',X'',1 GTID #-#-# +master-bin.000001 # Query # # XA END X'32',X'',1 +master-bin.000001 # XA_prepare # # XA PREPARE X'32',X'',1 +master-bin.000001 # Gtid # # GTID #-#-# +master-bin.000001 # Query # # XA ROLLBACK X'32',X'',1 +master-bin.000001 # Gtid # # GTID #-#-# +master-bin.000001 # Query # # use `test`; DROP SEQUENCE `s` /* generated by server */ +master-bin.000001 # Gtid # # GTID #-#-# +master-bin.000001 # Query # # use `test`; DROP TABLE `t1` /* generated by server */ +master-bin.000001 # Gtid # # GTID #-#-# +master-bin.000001 # Query # # use `test`; CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB +master-bin.000001 # Gtid # # BEGIN GTID #-#-# +master-bin.000001 # Annotate_rows # # INSERT INTO t1 VALUES (1) +master-bin.000001 # Table_map # # table_id: # (test.t1) +master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F +master-bin.000001 # Xid # # COMMIT /* XID */ +DROP TABLE t1; +connect con2,localhost,root,,; +CREATE TABLE tm (a INT PRIMARY KEY) ENGINE=MyISAM; +XA START '1'; +INSERT INTO tm VALUES (1),(1); +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +SELECT * FROM tm WHERE a = 2; +a +XA END '1'; +XA PREPARE '1'; +disconnect con2; +connection default; +XA RECOVER; +formatID gtrid_length bqual_length data +1 1 0 1 +XA ROLLBACK '1'; +ERROR XA100: XA_RBROLLBACK: Transaction branch was rolled back +Must be no XA PREPARE group nor XA completion one: +include/show_binlog_events.inc +Log_name Pos Event_type Server_id End_log_pos Info +master-bin.000001 # Gtid # # GTID #-#-# +master-bin.000001 # Query # # use `test`; CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB +master-bin.000001 # Gtid # # BEGIN GTID #-#-# +master-bin.000001 # Annotate_rows # # INSERT INTO t1 VALUES (1) +master-bin.000001 # Table_map # # table_id: # (test.t1) +master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F +master-bin.000001 # Xid # # COMMIT /* XID */ +master-bin.000001 # Gtid # # GTID #-#-# +master-bin.000001 # Query # # use `test`; CREATE SEQUENCE s ENGINE=InnoDB +master-bin.000001 # Gtid # # BEGIN GTID #-#-# +master-bin.000001 # Annotate_rows # # SELECT NEXT VALUE FOR s +master-bin.000001 # Table_map # # table_id: # (test.s) +master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # COMMIT +master-bin.000001 # Gtid # # XA START X'32',X'',1 GTID #-#-# +master-bin.000001 # Query # # XA END X'32',X'',1 +master-bin.000001 # XA_prepare # # XA PREPARE X'32',X'',1 +master-bin.000001 # Gtid # # GTID #-#-# +master-bin.000001 # Query # # XA ROLLBACK X'32',X'',1 +master-bin.000001 # Gtid # # GTID #-#-# +master-bin.000001 # Query # # use `test`; DROP SEQUENCE `s` /* generated by server */ +master-bin.000001 # Gtid # # GTID #-#-# +master-bin.000001 # Query # # use `test`; DROP TABLE `t1` /* generated by server */ +master-bin.000001 # Gtid # # GTID #-#-# +master-bin.000001 # Query # # use `test`; CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB +master-bin.000001 # Gtid # # BEGIN GTID #-#-# +master-bin.000001 # Annotate_rows # # INSERT INTO t1 VALUES (1) +master-bin.000001 # Table_map # # table_id: # (test.t1) +master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F +master-bin.000001 # Xid # # COMMIT /* XID */ +master-bin.000001 # Gtid # # GTID #-#-# +master-bin.000001 # Query # # use `test`; DROP TABLE `t1` /* generated by server */ +master-bin.000001 # Gtid # # GTID #-#-# +master-bin.000001 # Query # # use `test`; CREATE TABLE tm (a INT PRIMARY KEY) ENGINE=MyISAM +master-bin.000001 # Gtid # # BEGIN GTID #-#-# +master-bin.000001 # Query # # use `test`; INSERT INTO tm VALUES (1),(1) +master-bin.000001 # Query # # COMMIT +DROP TABLE tm; diff --git a/mysql-test/suite/binlog/t/binlog_empty_xa_prepared.test b/mysql-test/suite/binlog/t/binlog_empty_xa_prepared.test index 443feb60627..2890c42a087 100644 --- a/mysql-test/suite/binlog/t/binlog_empty_xa_prepared.test +++ b/mysql-test/suite/binlog/t/binlog_empty_xa_prepared.test @@ -80,3 +80,55 @@ DROP TABLE t1; --echo # Proof of correct logging incl empty XA-PREPARE --source include/show_binlog_events.inc + + +# MDEV-25616 Binlog event for XA COMMIT is generated without matching XA START + +CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB; +INSERT INTO t1 VALUES (1); + +--source include/count_sessions.inc +--connect(con1,localhost,root,,) + +XA START '1'; + --error ER_DUP_ENTRY + INSERT INTO t1 VALUES (2),(1); + SELECT * FROM t1 WHERE a = 2; +XA END '1'; +XA PREPARE '1'; + +--disconnect con1 + +--connection default +--source include/wait_until_count_sessions.inc +XA RECOVER; + +--error ER_XA_RBROLLBACK +XA COMMIT '1'; +--echo Must be no XA PREPARE group nor XA completion one: +--source include/show_binlog_events.inc +DROP TABLE t1; + +--source include/count_sessions.inc + +--connect(con2,localhost,root,,) +CREATE TABLE tm (a INT PRIMARY KEY) ENGINE=MyISAM; +XA START '1'; + --error ER_DUP_ENTRY + INSERT INTO tm VALUES (1),(1); + SELECT * FROM tm WHERE a = 2; +XA END '1'; +XA PREPARE '1'; + +--disconnect con2 + +--connection default +--source include/wait_until_count_sessions.inc +XA RECOVER; + +--error ER_XA_RBROLLBACK +XA ROLLBACK '1'; +--echo Must be no XA PREPARE group nor XA completion one: +--source include/show_binlog_events.inc +DROP TABLE tm; + diff --git a/mysql-test/suite/rpl/include/rpl_xa_empty_transaction.inc b/mysql-test/suite/rpl/include/rpl_xa_empty_transaction.inc new file mode 100644 index 00000000000..4cb4fe8962f --- /dev/null +++ b/mysql-test/suite/rpl/include/rpl_xa_empty_transaction.inc @@ -0,0 +1,10 @@ +# +# Helper file to run each empty-due-to-err XA transaction test case both with +# and without detaching from the connection when the transaction is prepared. +# + +--let $use_disconnect=0 +--source rpl_xa_empty_transaction_test_case.inc + +--let $use_disconnect=1 +--source rpl_xa_empty_transaction_test_case.inc diff --git a/mysql-test/suite/rpl/include/rpl_xa_empty_transaction_test_case.inc b/mysql-test/suite/rpl/include/rpl_xa_empty_transaction_test_case.inc new file mode 100644 index 00000000000..6368336b8e3 --- /dev/null +++ b/mysql-test/suite/rpl/include/rpl_xa_empty_transaction_test_case.inc @@ -0,0 +1,131 @@ +# +# Helper script to create an XA transaction and validate it was not +# binlogged +# +# Parameters +# $xa_completion_action : The action to end the XA transaction, either +# COMMIT or ROLLBACK +# $trx_statements : A comma separated list specifying how to build +# the statements of the transaction. Each item in +# the list is either T (for transactional) or N +# (for non-transactional). An empty list will not +# add any statements to the transaction. +# $use_disconnect : When TRUE, disconnect after preparing the XA +# transaction to test the detach/rollback case +# + +# +# Setup +--let $generic_assert_text= should not binlog XA transaction + +--connection server_1 +--let server_1_datadir=`select @@datadir` + +--connection server_2 +--let server_2_datadir=`select @@datadir` + +--connection server_3 +--let server_3_datadir=`select @@datadir` + +--let assert_file=$MYSQLTEST_VARDIR/tmp/binlog_decoded.out + +--connection server_1 +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +--source include/save_master_gtid.inc + +--connection server_3 +--source include/sync_with_master_gtid.inc + +--connection server_1 + +if ($use_disconnect) +{ + --source include/count_sessions.inc + --connect(con1,localhost,root,,) +} + +XA START 'x'; +--let $_stmt_items= $trx_statements +--let $_ctr= 1 +while($_stmt_items) +{ + --let $_cur_stmt= `SELECT SUBSTRING_INDEX('$_stmt_items', ',', 1)` + --let $_stmt_items= `SELECT LTRIM(SUBSTRING('$_stmt_items', LENGTH('$_cur_stmt') + 2))` + + if (`SELECT strcmp("$_cur_stmt","T") = 0`) + { + --let $target_table= ti + } + + if (`SELECT strcmp("$_cur_stmt","N") = 0`) + { + --let $target_table= tm + } + + --error ER_DUP_ENTRY + --eval INSERT INTO $target_table VALUES ($_ctr),($_ctr); + inc $_ctr; + +} +XA END 'x'; +XA PREPARE 'x'; + +if ($use_disconnect) +{ + --disconnect con1 + --connection server_1 + --source include/wait_until_count_sessions.inc + XA RECOVER; + + --error ER_XA_RBROLLBACK + --eval XA $xa_completion_action 'x'; +} +if (!$use_disconnect) +{ + --eval XA $xa_completion_action 'x'; +} + +--source include/save_master_gtid.inc + +--let binlog_filename= query_get_value(SHOW MASTER STATUS, File, 1) +FLUSH LOGS; + +--echo # MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +--exec $MYSQL_BINLOG $server_1_datadir/$binlog_filename --result-file=$assert_file + +--let assert_text= server_1 $generic_assert_text +--let assert_count= 0 +--let assert_select= XA START|XA END|XA PREPARE|XA COMMIT|XA ROLLBACK +--source include/assert_grep.inc + +--connection server_2 +--source include/sync_with_master_gtid.inc +--let binlog_filename= query_get_value(SHOW MASTER STATUS, File, 1) +FLUSH LOGS; + +--echo # MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +--exec $MYSQL_BINLOG $server_2_datadir/$binlog_filename --result-file=$assert_file + +--let assert_text= server_2 $generic_assert_text +--source include/assert_grep.inc + +--connection server_3 +--source include/sync_with_master_gtid.inc +--let binlog_filename= query_get_value(SHOW MASTER STATUS, File, 1) +FLUSH LOGS; + +--echo # MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +--exec $MYSQL_BINLOG $server_3_datadir/$binlog_filename --result-file=$assert_file + +--let assert_text= server_3 $generic_assert_text +--source include/assert_grep.inc + +# +# Cleanup +--connection server_1 +DROP TABLE ti,tm; +--source include/save_master_gtid.inc + +--connection server_3 +--source include/sync_with_master_gtid.inc diff --git a/mysql-test/suite/rpl/r/rpl_xa_empty_transaction.result b/mysql-test/suite/rpl/r/rpl_xa_empty_transaction.result new file mode 100644 index 00000000000..f3ea53c219a --- /dev/null +++ b/mysql-test/suite/rpl/r/rpl_xa_empty_transaction.result @@ -0,0 +1,1169 @@ +include/rpl_init.inc [topology=1->2->3] +connection server_1; +connection server_2; +connection server_3; +connection server_1; +# +# Test Case 1: An XA transaction without any statements should not be +# binlogged +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +XA START 'x'; +XA END 'x'; +XA PREPARE 'x'; +XA COMMIT 'x';; +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connect con1,localhost,root,,; +XA START 'x'; +XA END 'x'; +XA PREPARE 'x'; +disconnect con1; +connection server_1; +XA RECOVER; +formatID gtrid_length bqual_length data +1 1 0 x +XA COMMIT 'x';; +ERROR XA100: XA_RBROLLBACK: Transaction branch was rolled back +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +XA START 'x'; +XA END 'x'; +XA PREPARE 'x'; +XA ROLLBACK 'x';; +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connect con1,localhost,root,,; +XA START 'x'; +XA END 'x'; +XA PREPARE 'x'; +disconnect con1; +connection server_1; +XA RECOVER; +formatID gtrid_length bqual_length data +1 1 0 x +XA ROLLBACK 'x';; +ERROR XA100: XA_RBROLLBACK: Transaction branch was rolled back +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +# +# Test Case 2: An XA transaction consisting of a successfully rolled back +# statement should not be binlogged +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +XA START 'x'; +INSERT INTO ti VALUES (1),(1);; +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +XA END 'x'; +XA PREPARE 'x'; +XA COMMIT 'x';; +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connect con1,localhost,root,,; +XA START 'x'; +INSERT INTO ti VALUES (1),(1);; +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +XA END 'x'; +XA PREPARE 'x'; +disconnect con1; +connection server_1; +XA RECOVER; +formatID gtrid_length bqual_length data +1 1 0 x +XA COMMIT 'x';; +ERROR XA100: XA_RBROLLBACK: Transaction branch was rolled back +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +XA START 'x'; +INSERT INTO ti VALUES (1),(1);; +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +XA END 'x'; +XA PREPARE 'x'; +XA ROLLBACK 'x';; +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connect con1,localhost,root,,; +XA START 'x'; +INSERT INTO ti VALUES (1),(1);; +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +XA END 'x'; +XA PREPARE 'x'; +disconnect con1; +connection server_1; +XA RECOVER; +formatID gtrid_length bqual_length data +1 1 0 x +XA ROLLBACK 'x';; +ERROR XA100: XA_RBROLLBACK: Transaction branch was rolled back +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +# +# Test Case 3: An XA transaction with a statement that cannot be rolled +# back should be binlogged +connection server_1; +set @sav_binlog_format = @@binlog_format; +set @@binlog_format = row; +set @@global.binlog_format = row; +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +XA START 'x'; +INSERT INTO tm VALUES (1),(1);; +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +XA END 'x'; +XA PREPARE 'x'; +XA COMMIT 'x';; +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connect con1,localhost,root,,; +XA START 'x'; +INSERT INTO tm VALUES (1),(1);; +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +XA END 'x'; +XA PREPARE 'x'; +disconnect con1; +connection server_1; +XA RECOVER; +formatID gtrid_length bqual_length data +1 1 0 x +XA COMMIT 'x';; +ERROR XA100: XA_RBROLLBACK: Transaction branch was rolled back +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +XA START 'x'; +INSERT INTO tm VALUES (1),(1);; +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +XA END 'x'; +XA PREPARE 'x'; +XA ROLLBACK 'x';; +Warnings: +Warning 1196 Some non-transactional changed tables couldn't be rolled back +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connect con1,localhost,root,,; +XA START 'x'; +INSERT INTO tm VALUES (1),(1);; +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +XA END 'x'; +XA PREPARE 'x'; +disconnect con1; +connection server_1; +XA RECOVER; +formatID gtrid_length bqual_length data +1 1 0 x +XA ROLLBACK 'x';; +ERROR XA100: XA_RBROLLBACK: Transaction branch was rolled back +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +set @@binlog_format = @sav_binlog_format; +set @@global.binlog_format = @sav_binlog_format; +# +# Test Case 4: An XA transaction with multiple statements that can all +# be rolled back should not be binlogged +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +XA START 'x'; +INSERT INTO ti VALUES (1),(1);; +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +INSERT INTO ti VALUES (2),(2);; +ERROR 23000: Duplicate entry '2' for key 'PRIMARY' +XA END 'x'; +XA PREPARE 'x'; +XA COMMIT 'x';; +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connect con1,localhost,root,,; +XA START 'x'; +INSERT INTO ti VALUES (1),(1);; +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +INSERT INTO ti VALUES (2),(2);; +ERROR 23000: Duplicate entry '2' for key 'PRIMARY' +XA END 'x'; +XA PREPARE 'x'; +disconnect con1; +connection server_1; +XA RECOVER; +formatID gtrid_length bqual_length data +1 1 0 x +XA COMMIT 'x';; +ERROR XA100: XA_RBROLLBACK: Transaction branch was rolled back +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +XA START 'x'; +INSERT INTO ti VALUES (1),(1);; +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +INSERT INTO ti VALUES (2),(2);; +ERROR 23000: Duplicate entry '2' for key 'PRIMARY' +XA END 'x'; +XA PREPARE 'x'; +XA ROLLBACK 'x';; +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connect con1,localhost,root,,; +XA START 'x'; +INSERT INTO ti VALUES (1),(1);; +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +INSERT INTO ti VALUES (2),(2);; +ERROR 23000: Duplicate entry '2' for key 'PRIMARY' +XA END 'x'; +XA PREPARE 'x'; +disconnect con1; +connection server_1; +XA RECOVER; +formatID gtrid_length bqual_length data +1 1 0 x +XA ROLLBACK 'x';; +ERROR XA100: XA_RBROLLBACK: Transaction branch was rolled back +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +# +# Test Case 5: A mixed XA transaction consisting of one statement that +# can successfully be rolled back (first statement), and another that +# can not (second statement) should be binlogged +connection server_1; +set @sav_binlog_format = @@binlog_format; +set @@binlog_format = row; +set @@global.binlog_format = row; +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +XA START 'x'; +INSERT INTO ti VALUES (1),(1);; +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +INSERT INTO tm VALUES (2),(2);; +ERROR 23000: Duplicate entry '2' for key 'PRIMARY' +XA END 'x'; +XA PREPARE 'x'; +XA COMMIT 'x';; +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connect con1,localhost,root,,; +XA START 'x'; +INSERT INTO ti VALUES (1),(1);; +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +INSERT INTO tm VALUES (2),(2);; +ERROR 23000: Duplicate entry '2' for key 'PRIMARY' +XA END 'x'; +XA PREPARE 'x'; +disconnect con1; +connection server_1; +XA RECOVER; +formatID gtrid_length bqual_length data +1 1 0 x +XA COMMIT 'x';; +ERROR XA100: XA_RBROLLBACK: Transaction branch was rolled back +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +XA START 'x'; +INSERT INTO ti VALUES (1),(1);; +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +INSERT INTO tm VALUES (2),(2);; +ERROR 23000: Duplicate entry '2' for key 'PRIMARY' +XA END 'x'; +XA PREPARE 'x'; +XA ROLLBACK 'x';; +Warnings: +Warning 1196 Some non-transactional changed tables couldn't be rolled back +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connect con1,localhost,root,,; +XA START 'x'; +INSERT INTO ti VALUES (1),(1);; +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +INSERT INTO tm VALUES (2),(2);; +ERROR 23000: Duplicate entry '2' for key 'PRIMARY' +XA END 'x'; +XA PREPARE 'x'; +disconnect con1; +connection server_1; +XA RECOVER; +formatID gtrid_length bqual_length data +1 1 0 x +XA ROLLBACK 'x';; +ERROR XA100: XA_RBROLLBACK: Transaction branch was rolled back +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +set @@binlog_format = @sav_binlog_format; +set @@global.binlog_format = @sav_binlog_format; +# +# Test Case 6: A mixed XA transaction consisting of one statement that +# cannot successfully be rolled back (first statement), and another that +# can (second statement) should be binlogged +connection server_1; +set @sav_binlog_format = @@binlog_format; +set @@binlog_format = row; +set @@global.binlog_format = row; +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +XA START 'x'; +INSERT INTO tm VALUES (1),(1);; +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +INSERT INTO ti VALUES (2),(2);; +ERROR 23000: Duplicate entry '2' for key 'PRIMARY' +XA END 'x'; +XA PREPARE 'x'; +XA COMMIT 'x';; +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connect con1,localhost,root,,; +XA START 'x'; +INSERT INTO tm VALUES (1),(1);; +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +INSERT INTO ti VALUES (2),(2);; +ERROR 23000: Duplicate entry '2' for key 'PRIMARY' +XA END 'x'; +XA PREPARE 'x'; +disconnect con1; +connection server_1; +XA RECOVER; +formatID gtrid_length bqual_length data +1 1 0 x +XA COMMIT 'x';; +ERROR XA100: XA_RBROLLBACK: Transaction branch was rolled back +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +XA START 'x'; +INSERT INTO tm VALUES (1),(1);; +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +INSERT INTO ti VALUES (2),(2);; +ERROR 23000: Duplicate entry '2' for key 'PRIMARY' +XA END 'x'; +XA PREPARE 'x'; +XA ROLLBACK 'x';; +Warnings: +Warning 1196 Some non-transactional changed tables couldn't be rolled back +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connect con1,localhost,root,,; +XA START 'x'; +INSERT INTO tm VALUES (1),(1);; +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +INSERT INTO ti VALUES (2),(2);; +ERROR 23000: Duplicate entry '2' for key 'PRIMARY' +XA END 'x'; +XA PREPARE 'x'; +disconnect con1; +connection server_1; +XA RECOVER; +formatID gtrid_length bqual_length data +1 1 0 x +XA ROLLBACK 'x';; +ERROR XA100: XA_RBROLLBACK: Transaction branch was rolled back +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +set @@binlog_format = @sav_binlog_format; +set @@global.binlog_format = @sav_binlog_format; +# +# Test Case 7: An XA transaction consisting of two failed +# non-transactional statements should be binlogged +connection server_1; +set @sav_binlog_format = @@binlog_format; +set @@binlog_format = row; +set @@global.binlog_format = row; +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +XA START 'x'; +INSERT INTO tm VALUES (1),(1);; +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +INSERT INTO tm VALUES (2),(2);; +ERROR 23000: Duplicate entry '2' for key 'PRIMARY' +XA END 'x'; +XA PREPARE 'x'; +XA COMMIT 'x';; +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connect con1,localhost,root,,; +XA START 'x'; +INSERT INTO tm VALUES (1),(1);; +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +INSERT INTO tm VALUES (2),(2);; +ERROR 23000: Duplicate entry '2' for key 'PRIMARY' +XA END 'x'; +XA PREPARE 'x'; +disconnect con1; +connection server_1; +XA RECOVER; +formatID gtrid_length bqual_length data +1 1 0 x +XA COMMIT 'x';; +ERROR XA100: XA_RBROLLBACK: Transaction branch was rolled back +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +XA START 'x'; +INSERT INTO tm VALUES (1),(1);; +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +INSERT INTO tm VALUES (2),(2);; +ERROR 23000: Duplicate entry '2' for key 'PRIMARY' +XA END 'x'; +XA PREPARE 'x'; +XA ROLLBACK 'x';; +Warnings: +Warning 1196 Some non-transactional changed tables couldn't be rolled back +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connection server_2; +connection server_3; +connection server_1; +CREATE TABLE tm (a INT PRIMARY KEY) engine=myisam; +CREATE TABLE ti (a INT PRIMARY KEY) engine=innodb; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +connect con1,localhost,root,,; +XA START 'x'; +INSERT INTO tm VALUES (1),(1);; +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +INSERT INTO tm VALUES (2),(2);; +ERROR 23000: Duplicate entry '2' for key 'PRIMARY' +XA END 'x'; +XA PREPARE 'x'; +disconnect con1; +connection server_1; +XA RECOVER; +formatID gtrid_length bqual_length data +1 1 0 x +XA ROLLBACK 'x';; +ERROR XA100: XA_RBROLLBACK: Transaction branch was rolled back +include/save_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_1_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_1 should not binlog XA transaction] +connection server_2; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_2_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_2 should not binlog XA transaction] +connection server_3; +include/sync_with_master_gtid.inc +FLUSH LOGS; +# MYSQL_BINLOG server_3_datadir/binlog_filename --result-file=assert_file +include/assert_grep.inc [server_3 should not binlog XA transaction] +connection server_1; +DROP TABLE ti,tm; +include/save_master_gtid.inc +connection server_3; +include/sync_with_master_gtid.inc +connection server_1; +set @@binlog_format = @sav_binlog_format; +set @@global.binlog_format = @sav_binlog_format; +connection server_1; +include/rpl_end.inc +# End of rpl_xa_empty_transaction.test diff --git a/mysql-test/suite/rpl/t/rpl_xa_empty_transaction.cnf b/mysql-test/suite/rpl/t/rpl_xa_empty_transaction.cnf new file mode 100644 index 00000000000..92acd0c73a6 --- /dev/null +++ b/mysql-test/suite/rpl/t/rpl_xa_empty_transaction.cnf @@ -0,0 +1,18 @@ +!include include/default_mysqld.cnf + +[mysqld.1] +log-slave-updates +innodb + +[mysqld.2] +log-slave-updates +innodb + +[mysqld.3] +log-slave-updates +innodb + +[ENV] +SERVER_MYPORT_1= @mysqld.1.port +SERVER_MYPORT_2= @mysqld.2.port +SERVER_MYPORT_3= @mysqld.3.port diff --git a/mysql-test/suite/rpl/t/rpl_xa_empty_transaction.test b/mysql-test/suite/rpl/t/rpl_xa_empty_transaction.test new file mode 100644 index 00000000000..61cc0621d5a --- /dev/null +++ b/mysql-test/suite/rpl/t/rpl_xa_empty_transaction.test @@ -0,0 +1,175 @@ +# +# Purpose: +# This test ensures consistency in binlogging behavior for XA transactions +# that have all statements error and rollback, effectively leaving an "empty" +# transaction. In such cases, an empty XA transaction should be binlogged. The +# bug reported by MDEV-25616 revealed that an "empty" XA transaction would +# binlog an XA ROLLBACK or XA COMMIT event without a preceding setup, i.e. +# XA START through XA PREPARE. The bug presented differently for XA +# transactions consisting of transactional and non-transactional statements. +# Therefore, this test validates that an entire XA transaction is binlogged +# for different combinations of transactional or non-transactional statements. +# Note that the behavior changes when binlogging empty XA transactions +# depending on the binlog_row_format variables. That is, when the content of +# the transaction consists of errored transactional statements, in row format, +# an empty XA transaction will be binlogged; however, in mixed and statement +# formats, nothing will be written into the binary log. +# +# Methodology: +# Create XA transactions with various combinations of erroring transactional +# or non-transactional statements. The binary log is examined to ensure all +# XA components are written. Chain replication is used, i.e. +# (primary->replica->replica), to ensure replica binlogging is consistent with +# manual execution. The transactional and non-transactional tables use InnoDB +# and MyISAM, respectively. +# +# Parameters +# $expect_transactional_xa_binlog : Boolean indicating whether or not an +# errored transactional statement should result in XA statements written +# into the binary log. +# +# References: +# MDEV-25616: Binlog event for XA COMMIT is generated without matching +# XA START, replication aborts +# +--source include/have_log_bin.inc + +--let $rpl_server_count= 3 +--let $rpl_topology= 1->2->3 +--source include/rpl_init.inc + +--connection server_1 +-- source include/have_innodb.inc +--connection server_2 +-- source include/have_innodb.inc +--connection server_3 +-- source include/have_innodb.inc +--connection server_1 + +--echo # +--echo # Test Case 1: An XA transaction without any statements should not be +--echo # binlogged +--let $trx_statements= + +--let $xa_completion_action= COMMIT +--source include/rpl_xa_empty_transaction.inc + +--let $xa_completion_action= ROLLBACK +--source include/rpl_xa_empty_transaction.inc + + +--echo # +--echo # Test Case 2: An XA transaction consisting of a successfully rolled back +--echo # statement should not be binlogged +--let $trx_statements= T + +--let $xa_completion_action= COMMIT +--source include/rpl_xa_empty_transaction.inc + +--let $xa_completion_action= ROLLBACK +--source include/rpl_xa_empty_transaction.inc + + +--echo # +--echo # Test Case 3: An XA transaction with a statement that cannot be rolled +--echo # back should be binlogged + +# TODO: remove work-around MDEV-24654 when fixed. +--connection server_1 +set @sav_binlog_format = @@binlog_format; +set @@binlog_format = row; +set @@global.binlog_format = row; +--let $trx_statements= N + +--let $xa_completion_action= COMMIT +--source include/rpl_xa_empty_transaction.inc + +--let $xa_completion_action= ROLLBACK +--source include/rpl_xa_empty_transaction.inc + +--connection server_1 +set @@binlog_format = @sav_binlog_format; +set @@global.binlog_format = @sav_binlog_format; + + +--echo # +--echo # Test Case 4: An XA transaction with multiple statements that can all +--echo # be rolled back should not be binlogged +--let $trx_statements= T,T + +--let $xa_completion_action= COMMIT +--source include/rpl_xa_empty_transaction.inc + +--let $xa_completion_action= ROLLBACK +--source include/rpl_xa_empty_transaction.inc + + +--echo # +--echo # Test Case 5: A mixed XA transaction consisting of one statement that +--echo # can successfully be rolled back (first statement), and another that +--echo # can not (second statement) should be binlogged + +--connection server_1 +set @sav_binlog_format = @@binlog_format; +set @@binlog_format = row; +set @@global.binlog_format = row; +--let $trx_statements= T,N + +--let $xa_completion_action= COMMIT +--source include/rpl_xa_empty_transaction.inc + +--let $xa_completion_action= ROLLBACK +--source include/rpl_xa_empty_transaction.inc + +--connection server_1 +set @@binlog_format = @sav_binlog_format; +set @@global.binlog_format = @sav_binlog_format; + + +--echo # +--echo # Test Case 6: A mixed XA transaction consisting of one statement that +--echo # cannot successfully be rolled back (first statement), and another that +--echo # can (second statement) should be binlogged + +--connection server_1 +set @sav_binlog_format = @@binlog_format; +set @@binlog_format = row; +set @@global.binlog_format = row; +--let $trx_statements= N,T + +--let $xa_completion_action= COMMIT +--source include/rpl_xa_empty_transaction.inc + +--let $xa_completion_action= ROLLBACK +--source include/rpl_xa_empty_transaction.inc + +--connection server_1 +set @@binlog_format = @sav_binlog_format; +set @@global.binlog_format = @sav_binlog_format; + +--echo # +--echo # Test Case 7: An XA transaction consisting of two failed +--echo # non-transactional statements should be binlogged + +--connection server_1 +set @sav_binlog_format = @@binlog_format; +set @@binlog_format = row; +set @@global.binlog_format = row; +--let $trx_statements= N,N + +--let $xa_completion_action= COMMIT +--source include/rpl_xa_empty_transaction.inc + +--let $xa_completion_action= ROLLBACK +--source include/rpl_xa_empty_transaction.inc + +--connection server_1 +set @@binlog_format = @sav_binlog_format; +set @@global.binlog_format = @sav_binlog_format; + +# +# Cleanup +--connection server_1 +--source include/rpl_end.inc + +--echo # End of rpl_xa_empty_transaction.test -- cgit v1.2.1 From 8c5d323326d9d527e9a5e08c69eb6085953eb130 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Lindstr=C3=B6m?= Date: Tue, 25 Oct 2022 07:33:35 +0300 Subject: Additional fixes * galera_many_rows : reduce the time used * wsrep_thd.cc : remove incorrect assertion * disabled.def : disable failing test cases --- mysql-test/suite/galera/disabled.def | 7 +++++-- mysql-test/suite/galera/r/galera_many_rows.result | 14 +++++++------- mysql-test/suite/galera/t/galera_many_rows.test | 6 +++--- 3 files changed, 15 insertions(+), 12 deletions(-) (limited to 'mysql-test') diff --git a/mysql-test/suite/galera/disabled.def b/mysql-test/suite/galera/disabled.def index 7902f7ef537..d9f4924756e 100644 --- a/mysql-test/suite/galera/disabled.def +++ b/mysql-test/suite/galera/disabled.def @@ -25,6 +25,9 @@ galera_var_ignore_apply_errors : MDEV-26770 galera_var_ignore_apply_errors fails galera_var_node_address : MDEV-20485 Galera test failure galera_var_retry_autocommit: MDEV-18181 Galera test failure on galera.galera_var_retry_autocommit partition : MDEV-19958 Galera test failure on galera.partition -query_cache: MDEV-15805 Test failure on galera.query_cache -versioning_trx_id: MDEV-18590: galera.versioning_trx_id: Test failure: mysqltest: Result content mismatch +query_cache : MDEV-15805 Test failure on galera.query_cache +versioning_trx_id : MDEV-18590: galera.versioning_trx_id: Test failure: mysqltest: Result content mismatch galera_bf_abort_at_after_statement : Unstable +galera.MW-284 : MDEV-29861: Galera test case hangs +galera.galera_binlog_checksum : MDEV-29861: Galera test case hangs +galera_var_notify_ssl_ipv6 : MDEV-29861: Galera test case hangs diff --git a/mysql-test/suite/galera/r/galera_many_rows.result b/mysql-test/suite/galera/r/galera_many_rows.result index 566bc59f8ab..b34c2484aea 100644 --- a/mysql-test/suite/galera/r/galera_many_rows.result +++ b/mysql-test/suite/galera/r/galera_many_rows.result @@ -5,32 +5,32 @@ connection node_2; connection node_1; SET SESSION innodb_lock_wait_timeout=600; SET SESSION lock_wait_timeout=600; -CREATE TABLE ten (f1 INTEGER) engine=InnoDB; +CREATE TABLE ten (f1 INTEGER NOT NULL PRIMARY KEY) engine=InnoDB; INSERT INTO ten VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),(10); CREATE TABLE t1 (f1 INTEGER AUTO_INCREMENT PRIMARY KEY, f2 INTEGER) Engine=InnoDB; -INSERT INTO t1 (f2) SELECT a1.f1 FROM ten AS a1, ten AS a2, ten AS a3, ten AS a4, ten AS a5; +INSERT INTO t1 (f2) SELECT a1.f1 FROM ten AS a1, ten AS a2, ten AS a3, ten AS a4; connection node_2; SET SESSION wsrep_sync_wait = 0; SET SESSION wsrep_sync_wait = 15; SET GLOBAL wsrep_provider_options = 'repl.causal_read_timeout=PT1H'; SELECT COUNT(*) FROM t1; COUNT(*) -100000 -INSERT INTO t1 (f2) SELECT a1.f1 FROM ten AS a1, ten AS a2, ten AS a3, ten AS a4, ten AS a5; +10000 +INSERT INTO t1 (f2) SELECT a1.f1 FROM ten AS a1, ten AS a2, ten AS a3, ten AS a4; connection node_1; SELECT COUNT(*) FROM t1; COUNT(*) -200000 +20000 UPDATE t1 SET f2 = 1; connection node_2; SELECT COUNT(*) FROM t1 WHERE f2 = 1; COUNT(*) -200000 +20000 connection node_1; START TRANSACTION; SELECT COUNT(*) FROM t1; COUNT(*) -200000 +20000 UPDATE t1 SET f2 = 3; connection node_2; START TRANSACTION; diff --git a/mysql-test/suite/galera/t/galera_many_rows.test b/mysql-test/suite/galera/t/galera_many_rows.test index bc9e99db8da..3623b3f33b0 100644 --- a/mysql-test/suite/galera/t/galera_many_rows.test +++ b/mysql-test/suite/galera/t/galera_many_rows.test @@ -10,11 +10,11 @@ SET SESSION innodb_lock_wait_timeout=600; SET SESSION lock_wait_timeout=600; -CREATE TABLE ten (f1 INTEGER) engine=InnoDB; +CREATE TABLE ten (f1 INTEGER NOT NULL PRIMARY KEY) engine=InnoDB; INSERT INTO ten VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),(10); CREATE TABLE t1 (f1 INTEGER AUTO_INCREMENT PRIMARY KEY, f2 INTEGER) Engine=InnoDB; -INSERT INTO t1 (f2) SELECT a1.f1 FROM ten AS a1, ten AS a2, ten AS a3, ten AS a4, ten AS a5; +INSERT INTO t1 (f2) SELECT a1.f1 FROM ten AS a1, ten AS a2, ten AS a3, ten AS a4; --connection node_2 SET SESSION wsrep_sync_wait = 0; @@ -24,7 +24,7 @@ SET SESSION wsrep_sync_wait = 15; SET GLOBAL wsrep_provider_options = 'repl.causal_read_timeout=PT1H'; SELECT COUNT(*) FROM t1; -INSERT INTO t1 (f2) SELECT a1.f1 FROM ten AS a1, ten AS a2, ten AS a3, ten AS a4, ten AS a5; +INSERT INTO t1 (f2) SELECT a1.f1 FROM ten AS a1, ten AS a2, ten AS a3, ten AS a4; --connection node_1 SELECT COUNT(*) FROM t1; -- cgit v1.2.1 From 32158be720b85a3ae0e0eeebe1277c36f86dca38 Mon Sep 17 00:00:00 2001 From: Vladislav Vaintroub Date: Fri, 21 Oct 2022 19:50:07 +0200 Subject: MDEV-29811 server advertises ssl even if it's unusable. Abort startup, if SSL setup fails. Also, for the server always check that certificate matches private key (even if ssl_cert is not set, OpenSSL will try to use default one) --- mysql-test/main/bad_startup_options.result | 1 + mysql-test/main/bad_startup_options.test | 19 +++++++++++++++++++ 2 files changed, 20 insertions(+) create mode 100644 mysql-test/main/bad_startup_options.result create mode 100644 mysql-test/main/bad_startup_options.test (limited to 'mysql-test') diff --git a/mysql-test/main/bad_startup_options.result b/mysql-test/main/bad_startup_options.result new file mode 100644 index 00000000000..72c05cf5235 --- /dev/null +++ b/mysql-test/main/bad_startup_options.result @@ -0,0 +1 @@ +FOUND 1 /\[ERROR\] SSL error: Unable to get certificate/ in errorlog.err diff --git a/mysql-test/main/bad_startup_options.test b/mysql-test/main/bad_startup_options.test new file mode 100644 index 00000000000..bd0b6283854 --- /dev/null +++ b/mysql-test/main/bad_startup_options.test @@ -0,0 +1,19 @@ +--source include/not_embedded.inc +--source include/have_ssl_communication.inc + +--source include/shutdown_mysqld.inc + +# Try to start the server, with bad values for some options. +# Make sure, the starts fails, and expected message is in the error log + +--let errorlog=$MYSQL_TMP_DIR/errorlog.err +--let SEARCH_FILE=$errorlog + +# Bad ssl-cert +--error 1 +--exec $MYSQLD --defaults-group-suffix=.1 --defaults-file=$MYSQLTEST_VARDIR/my.cnf --ssl-cert=bad --log-error=$errorlog +--let SEARCH_PATTERN=\[ERROR\] SSL error: Unable to get certificate +--source include/search_pattern_in_file.inc +--remove_file $SEARCH_FILE + +--source include/start_mysqld.inc -- cgit v1.2.1 From 1ff476b415cacc616f68af542b75793d064367ae Mon Sep 17 00:00:00 2001 From: Lawrin Novitsky Date: Mon, 12 Sep 2022 14:39:12 +0200 Subject: MDEV-29490 Renaming internally used client API to avoid name conflicts with C/C. The patch introduces mariadb_capi_rename.h which is included into mysql.h. The hew header contains macro definitions for the names being renamed. In versions 10.6+(i.e. where sql service exists) the renaming condition in the mariadb_capi_rename.h should be added with && !defined(MYSQL_DYNAMIC_PLUGIN) and look like The patch also contains removal of mysql.h from the api check. Disabling false_duper-6543 test for embedded. ha_federated.so uses C API. C API functions are being renamed in the server, but not renamed in embedded, since embedded server library should have proper C API, as expected by programs using it. Thus the same ha_federated.so cannot work both for server and embedded server library. As all federated tests are already disabled for embedded, federated isn't supposed to work for embedded anyway, and thus the test is being disabled. --- mysql-test/suite/plugins/t/false_dupes-6543.test | 1 + 1 file changed, 1 insertion(+) (limited to 'mysql-test') diff --git a/mysql-test/suite/plugins/t/false_dupes-6543.test b/mysql-test/suite/plugins/t/false_dupes-6543.test index ebdbe00e47c..ca278685967 100644 --- a/mysql-test/suite/plugins/t/false_dupes-6543.test +++ b/mysql-test/suite/plugins/t/false_dupes-6543.test @@ -1,3 +1,4 @@ +source include/not_embedded.inc; # # MDEV-6543 Crash if enable 'federatedx' when 'federated' plugin already enabled, and vice-versa # -- cgit v1.2.1 From f1bbc1cd19d0d81fee5433efcb570a8845172241 Mon Sep 17 00:00:00 2001 From: Alexander Barkov Date: Tue, 25 Oct 2022 11:53:39 +0400 Subject: MDEV-28545 MyISAM reorganize partition corrupt older table format The ALTER related code cannot do at the same time both: - modify partitions - change column data types Explicit changing of a column data type together with a partition change is prohibited by the parter, so this is not allowed and returns a syntax error: ALTER TABLE t MODIFY ts BIGINT, DROP PARTITION p1; This fix additionally disables implicit data type upgrade (e.g. from "MariaDB 5.3 TIME" to "MySQL 5.6 TIME", or the other way around according to the current mysql56_temporal_format) in case of an ALTER modifying partitions, e.g.: ALTER TABLE t DROP PARTITION p1; In such commands now only the partition change happens, while the data types stay unchanged. One can additionally run: ALTER TABLE t FORCE; either before or after the ALTER modifying partitions to upgrade data types according to mysql56_temporal_format. --- mysql-test/main/partition_alter.result | 23 +++++++++++++++++++++++ mysql-test/main/partition_alter.test | 26 ++++++++++++++++++++++++++ 2 files changed, 49 insertions(+) (limited to 'mysql-test') diff --git a/mysql-test/main/partition_alter.result b/mysql-test/main/partition_alter.result index 2b0a09d2653..37e975deb0e 100644 --- a/mysql-test/main/partition_alter.result +++ b/mysql-test/main/partition_alter.result @@ -212,3 +212,26 @@ test.t check status OK delete from t order by b limit 1; drop table t; # End of 10.3 tests +# +# Start of 10.4 tests +# +# +# MDEV-28545 MyISAM reorganize partition corrupt older table format +# +SET GLOBAL mysql56_temporal_format=OFF; +CREATE TABLE t (ts timestamp, KEY (ts)) ENGINE=MyISAM +PARTITION BY RANGE (unix_timestamp(ts)) ( +PARTITION p1 VALUES LESS THAN (1645398000), +PARTITION pn VALUES LESS THAN MAXVALUE +); +SET GLOBAL mysql56_temporal_format=ON; +FLUSH TABLES; +ALTER TABLE t DROP PARTITION p1; +CHECK TABLE t; +Table Op Msg_type Msg_text +test.t check status OK +DROP TABLE t; +SET GLOBAL mysql56_temporal_format=DEFAULT; +# +# End of 10.4 tests +# diff --git a/mysql-test/main/partition_alter.test b/mysql-test/main/partition_alter.test index 7a80779e386..23ad5ece0e4 100644 --- a/mysql-test/main/partition_alter.test +++ b/mysql-test/main/partition_alter.test @@ -197,3 +197,29 @@ delete from t order by b limit 1; drop table t; --echo # End of 10.3 tests + +--echo # +--echo # Start of 10.4 tests +--echo # + +--echo # +--echo # MDEV-28545 MyISAM reorganize partition corrupt older table format +--echo # + +SET GLOBAL mysql56_temporal_format=OFF; +CREATE TABLE t (ts timestamp, KEY (ts)) ENGINE=MyISAM +PARTITION BY RANGE (unix_timestamp(ts)) ( + PARTITION p1 VALUES LESS THAN (1645398000), + PARTITION pn VALUES LESS THAN MAXVALUE +); + +SET GLOBAL mysql56_temporal_format=ON; +FLUSH TABLES; +ALTER TABLE t DROP PARTITION p1; +CHECK TABLE t; +DROP TABLE t; +SET GLOBAL mysql56_temporal_format=DEFAULT; + +--echo # +--echo # End of 10.4 tests +--echo # -- cgit v1.2.1 From 58cd0bd59ef011be54f162237f2ff017c3148e7b Mon Sep 17 00:00:00 2001 From: Igor Babaev Date: Mon, 17 Oct 2022 16:44:10 -0700 Subject: MDEV-28846 Poor performance when rowid filter contains no elements When a range rowid filter was used with an index ref access the cost of accessing the index entries for the records rejected by the filter was not taken into account. For a ref access by an index with big average number of records per key this led to poor execution plans if selectivity of the used filter was high. The patch resolves this problem. It also introduces a minor optimization that skips look-ups into a filter that turns out to be empty. With this patch the output of ANALYZE stmt reports the number of look-ups into used rowid filters. The patch also back-ports from 10.5 the code that properly sets the field TABLE::file::table for opened temporary tables. The test cases that were supposed to use rowid filters have been adjusted in order to use similar execution plans after this fix. Approved by Oleksandr Byelkin --- mysql-test/include/rowid_filter_debug_kill.inc | 9 +- mysql-test/main/join_cache.result | 24 +- mysql-test/main/join_nested_jcl6.result | 2 +- mysql-test/main/rowid_filter.result | 494 +++++++++++- mysql-test/main/rowid_filter.test | 210 ++++- mysql-test/main/rowid_filter_innodb.result | 968 +++++++++++++++++++++-- mysql-test/main/rowid_filter_innodb.test | 98 ++- mysql-test/main/rowid_filter_innodb_debug.result | 20 +- mysql-test/main/rowid_filter_innodb_debug.test | 8 + mysql-test/main/rowid_filter_myisam_debug.result | 10 +- mysql-test/main/select.result | 14 +- mysql-test/main/select_jcl6.result | 14 +- mysql-test/main/select_pkeycache.result | 14 +- mysql-test/main/subselect2.result | 2 +- 14 files changed, 1724 insertions(+), 163 deletions(-) (limited to 'mysql-test') diff --git a/mysql-test/include/rowid_filter_debug_kill.inc b/mysql-test/include/rowid_filter_debug_kill.inc index 6a8c5d3f70d..c701d206297 100644 --- a/mysql-test/include/rowid_filter_debug_kill.inc +++ b/mysql-test/include/rowid_filter_debug_kill.inc @@ -9,9 +9,6 @@ create table t0(a int); insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); -create table t1(a int); -insert into t1 select A.a + B.a* 10 + C.a * 100 from t0 A, t0 B, t0 C; - # 100 rows create table t2(a int); insert into t2 select A.a + B.a* 10 from t0 A, t0 B; @@ -30,10 +27,10 @@ where table_schema=database() and table_name='t3'; insert into t3 select A.a, - A.a, + B.a, 'filler-data-filler-data' from - t0 A, t1 B; + t2 A, t2 B; analyze table t2,t3; @@ -63,6 +60,6 @@ disconnect con1; reap; set debug_sync='RESET'; -drop table t0,t1,t2,t3; +drop table t0,t2,t3; --source include/wait_until_count_sessions.inc diff --git a/mysql-test/main/join_cache.result b/mysql-test/main/join_cache.result index 1837576e719..f337ab6509b 100644 --- a/mysql-test/main/join_cache.result +++ b/mysql-test/main/join_cache.result @@ -853,7 +853,7 @@ LENGTH(Language) < LENGTH(City.Name) - 2; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE City ALL Country NULL NULL NULL 4079 Using where 1 SIMPLE Country hash_ALL PRIMARY #hash#PRIMARY 3 world.City.Country 239 Using where; Using join buffer (flat, BNLH join) -1 SIMPLE CountryLanguage hash_ALL|filter PRIMARY,Percentage #hash#PRIMARY|Percentage 3|4 world.City.Country 984 (19%) Using where; Using join buffer (flat, BNLH join); Using rowid filter +1 SIMPLE CountryLanguage hash_ALL PRIMARY,Percentage #hash#PRIMARY 3 world.City.Country 984 Using where; Using join buffer (flat, BNLH join) SELECT City.Name, Country.Name, CountryLanguage.Language FROM City,Country,CountryLanguage WHERE City.Country=Country.Code AND @@ -1053,7 +1053,7 @@ LENGTH(Language) < LENGTH(City.Name) - 2; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE City ALL Country NULL NULL NULL 4079 Using where 1 SIMPLE Country hash_ALL PRIMARY #hash#PRIMARY 3 world.City.Country 239 Using where; Using join buffer (flat, BNLH join) -1 SIMPLE CountryLanguage hash_ALL|filter PRIMARY,Percentage #hash#PRIMARY|Percentage 3|4 world.City.Country 984 (19%) Using where; Using join buffer (incremental, BNLH join); Using rowid filter +1 SIMPLE CountryLanguage hash_ALL PRIMARY,Percentage #hash#PRIMARY 3 world.City.Country 984 Using where; Using join buffer (incremental, BNLH join) SELECT City.Name, Country.Name, CountryLanguage.Language FROM City,Country,CountryLanguage WHERE City.Country=Country.Code AND @@ -1312,7 +1312,7 @@ LENGTH(Language) < LENGTH(City.Name) - 2; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE City ALL Country NULL NULL NULL 4079 Using where 1 SIMPLE Country eq_ref PRIMARY PRIMARY 3 world.City.Country 1 Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan -1 SIMPLE CountryLanguage ref|filter PRIMARY,Percentage PRIMARY|Percentage 3|4 world.City.Country 4 (19%) Using index condition(BKA); Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan; Using rowid filter +1 SIMPLE CountryLanguage ref PRIMARY,Percentage PRIMARY 3 world.City.Country 4 Using index condition(BKA); Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan SELECT City.Name, Country.Name, CountryLanguage.Language FROM City,Country,CountryLanguage WHERE City.Country=Country.Code AND @@ -1509,7 +1509,7 @@ LENGTH(Language) < LENGTH(City.Name) - 2; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE City ALL Country NULL NULL NULL 4079 Using where 1 SIMPLE Country eq_ref PRIMARY PRIMARY 3 world.City.Country 1 Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan -1 SIMPLE CountryLanguage ref|filter PRIMARY,Percentage PRIMARY|Percentage 3|4 world.City.Country 4 (19%) Using index condition(BKA); Using where; Using join buffer (incremental, BKA join); Key-ordered Rowid-ordered scan; Using rowid filter +1 SIMPLE CountryLanguage ref PRIMARY,Percentage PRIMARY 3 world.City.Country 4 Using index condition(BKA); Using where; Using join buffer (incremental, BKA join); Key-ordered Rowid-ordered scan SELECT City.Name, Country.Name, CountryLanguage.Language FROM City,Country,CountryLanguage WHERE City.Country=Country.Code AND @@ -1706,7 +1706,7 @@ LENGTH(Language) < LENGTH(City.Name) - 2; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE City ALL Country NULL NULL NULL 4079 Using where 1 SIMPLE Country eq_ref PRIMARY PRIMARY 3 world.City.Country 1 Using where; Using join buffer (flat, BKAH join); Key-ordered Rowid-ordered scan -1 SIMPLE CountryLanguage ref|filter PRIMARY,Percentage PRIMARY|Percentage 3|4 world.City.Country 4 (19%) Using index condition(BKA); Using where; Using join buffer (flat, BKAH join); Key-ordered Rowid-ordered scan; Using rowid filter +1 SIMPLE CountryLanguage ref PRIMARY,Percentage PRIMARY 3 world.City.Country 4 Using index condition(BKA); Using where; Using join buffer (flat, BKAH join); Key-ordered Rowid-ordered scan SELECT City.Name, Country.Name, CountryLanguage.Language FROM City,Country,CountryLanguage WHERE City.Country=Country.Code AND @@ -1903,7 +1903,7 @@ LENGTH(Language) < LENGTH(City.Name) - 2; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE City ALL Country NULL NULL NULL 4079 Using where 1 SIMPLE Country eq_ref PRIMARY PRIMARY 3 world.City.Country 1 Using where; Using join buffer (flat, BKAH join); Key-ordered Rowid-ordered scan -1 SIMPLE CountryLanguage ref|filter PRIMARY,Percentage PRIMARY|Percentage 3|4 world.City.Country 4 (19%) Using index condition(BKA); Using where; Using join buffer (incremental, BKAH join); Key-ordered Rowid-ordered scan; Using rowid filter +1 SIMPLE CountryLanguage ref PRIMARY,Percentage PRIMARY 3 world.City.Country 4 Using index condition(BKA); Using where; Using join buffer (incremental, BKAH join); Key-ordered Rowid-ordered scan SELECT City.Name, Country.Name, CountryLanguage.Language FROM City,Country,CountryLanguage WHERE City.Country=Country.Code AND @@ -2104,7 +2104,7 @@ LENGTH(Language) < LENGTH(City.Name) - 2; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE City ALL Country NULL NULL NULL 4079 Using where 1 SIMPLE Country hash_ALL PRIMARY #hash#PRIMARY 3 world.City.Country 239 Using where; Using join buffer (flat, BNLH join) -1 SIMPLE CountryLanguage hash_ALL|filter PRIMARY,Percentage #hash#PRIMARY|Percentage 3|4 world.City.Country 984 (19%) Using where; Using join buffer (flat, BNLH join); Using rowid filter +1 SIMPLE CountryLanguage hash_ALL PRIMARY,Percentage #hash#PRIMARY 3 world.City.Country 984 Using where; Using join buffer (flat, BNLH join) SELECT City.Name, Country.Name, CountryLanguage.Language FROM City,Country,CountryLanguage WHERE City.Country=Country.Code AND @@ -2208,7 +2208,7 @@ LENGTH(Language) < LENGTH(City.Name) - 2; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE City ALL Country NULL NULL NULL 4079 Using where 1 SIMPLE Country hash_ALL PRIMARY #hash#PRIMARY 3 world.City.Country 239 Using where; Using join buffer (flat, BNLH join) -1 SIMPLE CountryLanguage hash_ALL|filter PRIMARY,Percentage #hash#PRIMARY|Percentage 3|4 world.City.Country 984 (19%) Using where; Using join buffer (incremental, BNLH join); Using rowid filter +1 SIMPLE CountryLanguage hash_ALL PRIMARY,Percentage #hash#PRIMARY 3 world.City.Country 984 Using where; Using join buffer (incremental, BNLH join) SELECT City.Name, Country.Name, CountryLanguage.Language FROM City,Country,CountryLanguage WHERE City.Country=Country.Code AND @@ -2312,7 +2312,7 @@ LENGTH(Language) < LENGTH(City.Name) - 2; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE City ALL Country NULL NULL NULL 4079 Using where 1 SIMPLE Country eq_ref PRIMARY PRIMARY 3 world.City.Country 1 Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan -1 SIMPLE CountryLanguage ref|filter PRIMARY,Percentage PRIMARY|Percentage 3|4 world.City.Country 4 (19%) Using index condition(BKA); Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan; Using rowid filter +1 SIMPLE CountryLanguage ref PRIMARY,Percentage PRIMARY 3 world.City.Country 4 Using index condition(BKA); Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan SELECT City.Name, Country.Name, CountryLanguage.Language FROM City,Country,CountryLanguage WHERE City.Country=Country.Code AND @@ -2416,7 +2416,7 @@ LENGTH(Language) < LENGTH(City.Name) - 2; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE City ALL Country NULL NULL NULL 4079 Using where 1 SIMPLE Country eq_ref PRIMARY PRIMARY 3 world.City.Country 1 Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan -1 SIMPLE CountryLanguage ref|filter PRIMARY,Percentage PRIMARY|Percentage 3|4 world.City.Country 4 (19%) Using index condition(BKA); Using where; Using join buffer (incremental, BKA join); Key-ordered Rowid-ordered scan; Using rowid filter +1 SIMPLE CountryLanguage ref PRIMARY,Percentage PRIMARY 3 world.City.Country 4 Using index condition(BKA); Using where; Using join buffer (incremental, BKA join); Key-ordered Rowid-ordered scan SELECT City.Name, Country.Name, CountryLanguage.Language FROM City,Country,CountryLanguage WHERE City.Country=Country.Code AND @@ -2520,7 +2520,7 @@ LENGTH(Language) < LENGTH(City.Name) - 2; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE City ALL Country NULL NULL NULL 4079 Using where 1 SIMPLE Country eq_ref PRIMARY PRIMARY 3 world.City.Country 1 Using where; Using join buffer (flat, BKAH join); Key-ordered Rowid-ordered scan -1 SIMPLE CountryLanguage ref|filter PRIMARY,Percentage PRIMARY|Percentage 3|4 world.City.Country 4 (19%) Using index condition(BKA); Using where; Using join buffer (flat, BKAH join); Key-ordered Rowid-ordered scan; Using rowid filter +1 SIMPLE CountryLanguage ref PRIMARY,Percentage PRIMARY 3 world.City.Country 4 Using index condition(BKA); Using where; Using join buffer (flat, BKAH join); Key-ordered Rowid-ordered scan SELECT City.Name, Country.Name, CountryLanguage.Language FROM City,Country,CountryLanguage WHERE City.Country=Country.Code AND @@ -2624,7 +2624,7 @@ LENGTH(Language) < LENGTH(City.Name) - 2; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE City ALL Country NULL NULL NULL 4079 Using where 1 SIMPLE Country eq_ref PRIMARY PRIMARY 3 world.City.Country 1 Using where; Using join buffer (flat, BKAH join); Key-ordered Rowid-ordered scan -1 SIMPLE CountryLanguage ref|filter PRIMARY,Percentage PRIMARY|Percentage 3|4 world.City.Country 4 (19%) Using index condition(BKA); Using where; Using join buffer (incremental, BKAH join); Key-ordered Rowid-ordered scan; Using rowid filter +1 SIMPLE CountryLanguage ref PRIMARY,Percentage PRIMARY 3 world.City.Country 4 Using index condition(BKA); Using where; Using join buffer (incremental, BKAH join); Key-ordered Rowid-ordered scan SELECT City.Name, Country.Name, CountryLanguage.Language FROM City,Country,CountryLanguage WHERE City.Country=Country.Code AND diff --git a/mysql-test/main/join_nested_jcl6.result b/mysql-test/main/join_nested_jcl6.result index 3293f20aa17..26fa772dfd1 100644 --- a/mysql-test/main/join_nested_jcl6.result +++ b/mysql-test/main/join_nested_jcl6.result @@ -2033,7 +2033,7 @@ ON t6.b >= 2 AND t5.b=t7.b AND (t8.a > 0 OR t8.c IS NULL) AND t6.a>0 AND t7.a>0; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t5 ALL NULL NULL NULL NULL 3 -1 SIMPLE t7 ref|filter PRIMARY,b_i b_i|PRIMARY 5|4 test.t5.b 2 (29%) Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan; Using rowid filter +1 SIMPLE t7 ref PRIMARY,b_i b_i 5 test.t5.b 2 Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan 1 SIMPLE t6 range PRIMARY,b_i PRIMARY 4 NULL 3 Using where; Rowid-ordered scan; Using join buffer (incremental, BNL join) 1 SIMPLE t8 ref b_i b_i 5 test.t5.b 2 Using where; Using join buffer (incremental, BKA join); Key-ordered Rowid-ordered scan SELECT t5.a,t5.b,t6.a,t6.b,t7.a,t7.b,t8.a,t8.b diff --git a/mysql-test/main/rowid_filter.result b/mysql-test/main/rowid_filter.result index 2a014b04a90..0a7497f1055 100644 --- a/mysql-test/main/rowid_filter.result +++ b/mysql-test/main/rowid_filter.result @@ -128,6 +128,7 @@ ANALYZE "rows": 702, "selectivity_pct": 11.69, "r_rows": 605, + "r_lookups": 510, "r_selectivity_pct": 11.765, "r_buffer_size": "REPLACED", "r_filling_time_ms": "REPLACED" @@ -435,6 +436,7 @@ ANALYZE "rows": 69, "selectivity_pct": 4.6, "r_rows": 71, + "r_lookups": 96, "r_selectivity_pct": 10.417, "r_buffer_size": "REPLACED", "r_filling_time_ms": "REPLACED" @@ -686,6 +688,7 @@ ANALYZE "rows": 702, "selectivity_pct": 11.69, "r_rows": 605, + "r_lookups": 510, "r_selectivity_pct": 11.765, "r_buffer_size": "REPLACED", "r_filling_time_ms": "REPLACED" @@ -715,6 +718,7 @@ ANALYZE "rows": 139, "selectivity_pct": 9.2667, "r_rows": 144, + "r_lookups": 59, "r_selectivity_pct": 25.424, "r_buffer_size": "REPLACED", "r_filling_time_ms": "REPLACED" @@ -887,7 +891,7 @@ WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND o_totalprice between 200000 and 230000; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE orders range PRIMARY,i_o_totalprice i_o_totalprice 9 NULL 69 Using index condition -1 SIMPLE lineitem ref|filter PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity PRIMARY|i_l_shipdate 4|4 dbt3_s001.orders.o_orderkey 4 (8%) Using where; Using rowid filter +1 SIMPLE lineitem ref|filter PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity i_l_orderkey|i_l_shipdate 4|4 dbt3_s001.orders.o_orderkey 4 (8%) Using where; Using rowid filter set statement optimizer_switch='rowid_filter=on' for EXPLAIN FORMAT=JSON SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice FROM orders JOIN lineitem ON o_orderkey=l_orderkey WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND @@ -916,7 +920,7 @@ EXPLAIN "i_l_orderkey", "i_l_orderkey_quantity" ], - "key": "PRIMARY", + "key": "i_l_orderkey", "key_length": "4", "used_key_parts": ["l_orderkey"], "ref": ["dbt3_s001.orders.o_orderkey"], @@ -940,7 +944,7 @@ WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND o_totalprice between 200000 and 230000; id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra 1 SIMPLE orders range PRIMARY,i_o_totalprice i_o_totalprice 9 NULL 69 71.00 100.00 100.00 Using index condition -1 SIMPLE lineitem ref|filter PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity PRIMARY|i_l_shipdate 4|4 dbt3_s001.orders.o_orderkey 4 (8%) 0.52 (7%) 8.48 100.00 Using where; Using rowid filter +1 SIMPLE lineitem ref|filter PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity i_l_orderkey|i_l_shipdate 4|4 dbt3_s001.orders.o_orderkey 4 (8%) 0.52 (7%) 8.48 100.00 Using where; Using rowid filter set statement optimizer_switch='rowid_filter=on' for ANALYZE FORMAT=JSON SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice FROM orders JOIN lineitem ON o_orderkey=l_orderkey WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND @@ -975,7 +979,7 @@ ANALYZE "i_l_orderkey", "i_l_orderkey_quantity" ], - "key": "PRIMARY", + "key": "i_l_orderkey", "key_length": "4", "used_key_parts": ["l_orderkey"], "ref": ["dbt3_s001.orders.o_orderkey"], @@ -987,6 +991,7 @@ ANALYZE "rows": 509, "selectivity_pct": 8.4763, "r_rows": 510, + "r_lookups": 476, "r_selectivity_pct": 7.7731, "r_buffer_size": "REPLACED", "r_filling_time_ms": "REPLACED" @@ -2019,7 +2024,7 @@ EXPLAIN EXTENDED SELECT * FROM t1 HAVING (7, 9) IN (SELECT t2.i1, t2.i2 FROM t2 WHERE t2.i1 = 3); id select_type table type possible_keys key key_len ref rows filtered Extra 1 PRIMARY NULL NULL NULL NULL NULL NULL NULL NULL Impossible HAVING -2 SUBQUERY NULL NULL NULL NULL NULL NULL NULL NULL no matching row in const table +2 SUBQUERY t2 ref i1,i2 i1 5 const 1 100.00 Using index condition; Using where Warnings: Note 1003 /* select#1 */ select `test`.`t1`.`pk` AS `pk` from `test`.`t1` having 0 DROP TABLE t1,t2; @@ -2028,7 +2033,7 @@ DROP TABLE t1,t2; # that uses in expensive subquery # CREATE TABLE t1 ( -pk1 INT PRIMARY KEY, a1 INT, b1 VARCHAR(1), KEY(b1) +pk1 INT PRIMARY KEY, a1 INT, b1 VARCHAR(1), KEY(a1), KEY(b1) ) ENGINE=MyISAM; INSERT INTO t1 VALUES (10,0,'z'),(11,3,'j'),(12,8,'f'),(13,8,'p'),(14,6,'w'),(15,0,'c'),(16,1,'j'), @@ -2047,21 +2052,31 @@ INSERT INTO t1 VALUES (101,0,'u'),(102,7,'r'),(103,2,'x'),(104,8,'e'),(105,8,'i'),(106,5,'q'), (107,8,'z'),(108,3,'k'),(109,65,NULL); CREATE TABLE t2 (pk2 INT PRIMARY KEY, a2 INT, b2 VARCHAR(1)) ENGINE=MyISAM; -INSERT INTO t2 VALUES (1,1,'x'); +INSERT INTO t2 VALUES (1,1,'i'); INSERT INTO t2 SELECT * FROM t1; -SELECT * FROM t1 INNER JOIN t2 ON ( pk1 <> pk2 AND pk1 = a2 ) +INSERT INTO t1 SELECT pk1+200, a1, b1 FROM t1; +INSERT INTO t1 SELECT pk1+400, a1, b1 FROM t1; +ANALYZE TABLE t1,t2 PERSISTENT FOR ALL; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status OK +test.t2 analyze status Engine-independent statistics collected +test.t2 analyze status OK +SELECT * FROM t1 INNER JOIN t2 ON ( pk1+1 = pk2+2 AND a1 = a2 ) WHERE b1 <= ( SELECT MAX(b2) FROM t2 WHERE pk2 <= 1 ); pk1 a1 b1 pk2 a2 b2 -65 2 a 109 65 NULL -EXPLAIN EXTENDED SELECT * FROM t1 INNER JOIN t2 ON ( pk1 <> pk2 AND pk1 = a2 ) +17 1 f 16 1 j +37 3 g 36 3 a +105 8 i 104 8 e +EXPLAIN EXTENDED SELECT * FROM t1 INNER JOIN t2 ON ( pk1+1 = pk2+2 AND a1 = a2 ) WHERE b1 <= ( SELECT MAX(b2) FROM t2 WHERE pk2 <= 1 ); id select_type table type possible_keys key key_len ref rows filtered Extra 1 PRIMARY t2 ALL NULL NULL NULL NULL 101 100.00 Using where -1 PRIMARY t1 eq_ref|filter PRIMARY,b1 PRIMARY|b1 4|4 test.t2.a2 1 (87%) 87.00 Using where; Using rowid filter +1 PRIMARY t1 ref|filter a1,b1 a1|b1 5|4 test.t2.a2 36 (29%) 28.75 Using where; Using rowid filter 2 SUBQUERY t2 range PRIMARY PRIMARY 4 NULL 1 100.00 Using index condition Warnings: -Note 1003 /* select#1 */ select `test`.`t1`.`pk1` AS `pk1`,`test`.`t1`.`a1` AS `a1`,`test`.`t1`.`b1` AS `b1`,`test`.`t2`.`pk2` AS `pk2`,`test`.`t2`.`a2` AS `a2`,`test`.`t2`.`b2` AS `b2` from `test`.`t1` join `test`.`t2` where `test`.`t1`.`pk1` = `test`.`t2`.`a2` and `test`.`t1`.`b1` <= (/* select#2 */ select max(`test`.`t2`.`b2`) from `test`.`t2` where `test`.`t2`.`pk2` <= 1) and `test`.`t2`.`a2` <> `test`.`t2`.`pk2` -EXPLAIN FORMAT=JSON SELECT * FROM t1 INNER JOIN t2 ON ( pk1 <> pk2 AND pk1 = a2 ) +Note 1003 /* select#1 */ select `test`.`t1`.`pk1` AS `pk1`,`test`.`t1`.`a1` AS `a1`,`test`.`t1`.`b1` AS `b1`,`test`.`t2`.`pk2` AS `pk2`,`test`.`t2`.`a2` AS `a2`,`test`.`t2`.`b2` AS `b2` from `test`.`t1` join `test`.`t2` where `test`.`t1`.`a1` = `test`.`t2`.`a2` and `test`.`t1`.`b1` <= (/* select#2 */ select max(`test`.`t2`.`b2`) from `test`.`t2` where `test`.`t2`.`pk2` <= 1) and `test`.`t1`.`pk1` + 1 = `test`.`t2`.`pk2` + 2 +EXPLAIN FORMAT=JSON SELECT * FROM t1 INNER JOIN t2 ON ( pk1+1 = pk2+2 AND a1 = a2 ) WHERE b1 <= ( SELECT MAX(b2) FROM t2 WHERE pk2 <= 1 ); EXPLAIN { @@ -2072,27 +2087,27 @@ EXPLAIN "access_type": "ALL", "rows": 101, "filtered": 100, - "attached_condition": "t2.a2 <> t2.pk2 and t2.a2 is not null" + "attached_condition": "t2.a2 is not null" }, "table": { "table_name": "t1", - "access_type": "eq_ref", - "possible_keys": ["PRIMARY", "b1"], - "key": "PRIMARY", - "key_length": "4", - "used_key_parts": ["pk1"], + "access_type": "ref", + "possible_keys": ["a1", "b1"], + "key": "a1", + "key_length": "5", + "used_key_parts": ["a1"], "ref": ["test.t2.a2"], "rowid_filter": { "range": { "key": "b1", "used_key_parts": ["b1"] }, - "rows": 87, - "selectivity_pct": 87 + "rows": 115, + "selectivity_pct": 28.75 }, - "rows": 1, - "filtered": 87, - "attached_condition": "t1.b1 <= (subquery#2)" + "rows": 36, + "filtered": 28.75, + "attached_condition": "t1.b1 <= (subquery#2) and t1.pk1 + 1 = t2.pk2 + 2" }, "subqueries": [ { @@ -2159,13 +2174,442 @@ set @save_optimizer_switch= @@optimizer_switch; SET @@optimizer_switch="index_merge_sort_union=OFF"; CREATE TABLE t1 (a INT, b INT, INDEX(a), INDEX(b)); INSERT INTO t1 VALUES (0,0),(1,0),(-1,1), (-2,1), (-2,3), (-3,4), (-2,4); +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; +ANALYZE table t1 PERSISTENT FOR ALL; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status OK explain SELECT * FROM t1 WHERE a > 0 AND b=0; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 ref|filter a,b b|a 5|5 const 2 (14%) Using where; Using rowid filter +1 SIMPLE t1 ref|filter a,b b|a 5|5 const 151 (17%) Using where; Using rowid filter SELECT * FROM t1 WHERE a > 0 AND b=0; a b 1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 drop table t1; SET @@optimizer_switch=@save_optimizer_switch; +# +# MDEV-28846: Poor performance when rowid filter contains no elements +# +create table t1 ( +pk int primary key auto_increment, +nm varchar(32), +fl1 tinyint default 0, +fl2 tinyint default 0, +index idx1(nm, fl1), +index idx2(fl2) +) engine=myisam; +create table name ( +pk int primary key auto_increment, +nm bigint +) engine=myisam; +create table flag2 ( +pk int primary key auto_increment, +fl2 tinyint +) engine=myisam; +insert into name(nm) select seq from seq_1_to_1000 order by rand(17); +insert into flag2(fl2) select seq mod 2 from seq_1_to_1000 order by rand(19); +insert into t1(nm,fl2) +select nm, fl2 from name, flag2 where name.pk = flag2.pk; +analyze table t1 persistent for all; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status Table is already up to date +select '500%' as a; +a +500% +set optimizer_switch='rowid_filter=on'; +explain +select * from t1 where nm like '500%' AND fl2 = 0; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range idx1,idx2 idx1 35 NULL 1 Using index condition; Using where +analyze format=json +select * from t1 where nm like '500%' AND fl2 = 0; +ANALYZE +{ + "query_block": { + "select_id": 1, + "r_loops": 1, + "r_total_time_ms": "REPLACED", + "table": { + "table_name": "t1", + "access_type": "range", + "possible_keys": ["idx1", "idx2"], + "key": "idx1", + "key_length": "35", + "used_key_parts": ["nm"], + "r_loops": 1, + "rows": 1, + "r_rows": 1, + "r_total_time_ms": "REPLACED", + "filtered": 49.2, + "r_filtered": 100, + "index_condition": "t1.nm like '500%'", + "attached_condition": "t1.fl2 = 0" + } + } +} +select * from t1 where nm like '500%' AND fl2 = 0; +pk nm fl1 fl2 +517 500 0 0 +truncate table name; +truncate table flag2; +truncate table t1; +insert into name(nm) select seq from seq_1_to_1000 order by rand(17); +insert into flag2(fl2) select seq mod 2 from seq_1_to_1000 order by rand(19); +insert into t1(nm,fl2) +select nm, fl2 from name, flag2 where name.pk = flag2.pk; +analyze table t1 persistent for all; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status Table is already up to date +set optimizer_switch='rowid_filter=off'; +explain +select * from t1 where nm like '500%' AND fl2 = 0; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range idx1,idx2 idx1 35 NULL 1 Using index condition; Using where +analyze format=json +select * from t1 where nm like '500%' AND fl2 = 0; +ANALYZE +{ + "query_block": { + "select_id": 1, + "r_loops": 1, + "r_total_time_ms": "REPLACED", + "table": { + "table_name": "t1", + "access_type": "range", + "possible_keys": ["idx1", "idx2"], + "key": "idx1", + "key_length": "35", + "used_key_parts": ["nm"], + "r_loops": 1, + "rows": 1, + "r_rows": 1, + "r_total_time_ms": "REPLACED", + "filtered": 49.2, + "r_filtered": 100, + "index_condition": "t1.nm like '500%'", + "attached_condition": "t1.fl2 = 0" + } + } +} +select * from t1 where nm like '500%' AND fl2 = 0; +pk nm fl1 fl2 +517 500 0 0 +truncate table name; +truncate table flag2; +truncate table t1; +insert into name(nm) select seq from seq_1_to_1000 order by rand(17); +insert into flag2(fl2) select seq mod 10 from seq_1_to_1000 order by rand(19); +insert into t1(nm,fl2) +select nm, fl2 from name, flag2 where name.pk = flag2.pk; +analyze table t1 persistent for all; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status Table is already up to date +select '607%' as a; +a +607% +set optimizer_switch='rowid_filter=on'; +explain +select * from t1 where nm like '607%' AND fl2 = 0; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range idx1,idx2 idx1 35 NULL 1 Using index condition; Using where +select * from t1 where nm like '607%' AND fl2 = 0; +pk nm fl1 fl2 +721 607 0 0 +truncate table name; +truncate table flag2; +truncate table t1; +insert into name(nm) select seq from seq_1_to_10000 order by rand(17); +insert into flag2(fl2) select seq mod 100 from seq_1_to_10000 order by rand(19); +insert into t1(nm,fl2) +select nm, fl2 from name, flag2 where name.pk = flag2.pk; +analyze table t1 persistent for all; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status Table is already up to date +select '75%' as a; +a +75% +set optimizer_switch='rowid_filter=on'; +explain +select * from t1 where nm like '75%' AND fl2 = 0; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref|filter idx1,idx2 idx2|idx1 2|35 const 55 (1%) Using where; Using rowid filter +analyze format=json +select * from t1 where nm like '75%' AND fl2 = 0; +ANALYZE +{ + "query_block": { + "select_id": 1, + "r_loops": 1, + "r_total_time_ms": "REPLACED", + "table": { + "table_name": "t1", + "access_type": "ref", + "possible_keys": ["idx1", "idx2"], + "key": "idx2", + "key_length": "2", + "used_key_parts": ["fl2"], + "ref": ["const"], + "rowid_filter": { + "range": { + "key": "idx1", + "used_key_parts": ["nm"] + }, + "rows": 115, + "selectivity_pct": 1.15, + "r_rows": 111, + "r_lookups": 100, + "r_selectivity_pct": 2, + "r_buffer_size": "REPLACED", + "r_filling_time_ms": "REPLACED" + }, + "r_loops": 1, + "rows": 55, + "r_rows": 2, + "r_total_time_ms": "REPLACED", + "filtered": 1.15, + "r_filtered": 100, + "attached_condition": "t1.nm like '75%'" + } + } +} +select * from t1 where nm like '75%' AND fl2 = 0; +pk nm fl1 fl2 +4543 7503 0 0 +7373 7518 0 0 +drop table name, flag2; +drop table t1; +create table t1 ( +pk int primary key auto_increment, +nm varchar(32), +fl1 tinyint default 0, +fl2 int default 0, +index idx1(nm, fl1), +index idx2(fl2) +) engine=myisam; +create table name ( +pk int primary key auto_increment, +nm bigint +) engine=myisam; +create table flag2 ( +pk int primary key auto_increment, +fl2 int +) engine=myisam; +insert into name(nm) select seq from seq_1_to_10000 order by rand(17); +insert into flag2(fl2) select seq mod 10 from seq_1_to_10000 order by rand(19); +insert into t1(nm,fl2) +select nm, fl2 from name, flag2 where name.pk = flag2.pk; +analyze table t1 persistent for all; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status Table is already up to date +select * from t1 +where +( +nm like '3400%' or nm like '3402%' or nm like '3403%' or +nm like '3404%' or nm like '3405%' or nm like '3406%' or nm like '3407%' or +nm like '3409%' or +nm like '3411%' or nm like '3412%' or nm like '3413%' or +nm like '3414%' or nm like '3415%' or nm like '3416%' or nm like '3417%' or +nm like '3418%' or nm like '3419%' or +nm like '3421%' or nm like '3422%' or nm like '3423%' or +nm like '3424%' or nm like '3425%' or nm like '3426%' or nm like '3427%' or +nm like '3428%' or nm like '3429%' or +nm like '3430%' or nm like '3431%' or nm like '3432%' or nm like '3433%' or +nm like '3434%' or nm like '3435%' or nm like '3436%' or nm like '3437%' or +nm like '3439%' or +nm like '3440%' or nm like '3441%' or nm like '3442%' or nm like '3443%' or +nm like '3444%' or nm like '3445%' or nm like '3446%' or nm like '3447%' or +nm like '3448%' +) and fl2 = 0; +pk nm fl1 fl2 +analyze format=json select * from t1 +where +( +nm like '3400%' or nm like '3402%' or nm like '3403%' or +nm like '3404%' or nm like '3405%' or nm like '3406%' or nm like '3407%' or +nm like '3409%' or +nm like '3411%' or nm like '3412%' or nm like '3413%' or +nm like '3414%' or nm like '3415%' or nm like '3416%' or nm like '3417%' or +nm like '3418%' or nm like '3419%' or +nm like '3421%' or nm like '3422%' or nm like '3423%' or +nm like '3424%' or nm like '3425%' or nm like '3426%' or nm like '3427%' or +nm like '3428%' or nm like '3429%' or +nm like '3430%' or nm like '3431%' or nm like '3432%' or nm like '3433%' or +nm like '3434%' or nm like '3435%' or nm like '3436%' or nm like '3437%' or +nm like '3439%' or +nm like '3440%' or nm like '3441%' or nm like '3442%' or nm like '3443%' or +nm like '3444%' or nm like '3445%' or nm like '3446%' or nm like '3447%' or +nm like '3448%' +) and fl2 = 0; +ANALYZE +{ + "query_block": { + "select_id": 1, + "r_loops": 1, + "r_total_time_ms": "REPLACED", + "table": { + "table_name": "t1", + "access_type": "ref", + "possible_keys": ["idx1", "idx2"], + "key": "idx2", + "key_length": "5", + "used_key_parts": ["fl2"], + "ref": ["const"], + "rowid_filter": { + "range": { + "key": "idx1", + "used_key_parts": ["nm"] + }, + "rows": 44, + "selectivity_pct": 0.44, + "r_rows": 44, + "r_lookups": 1000, + "r_selectivity_pct": 0, + "r_buffer_size": "REPLACED", + "r_filling_time_ms": "REPLACED" + }, + "r_loops": 1, + "rows": 921, + "r_rows": 0, + "r_total_time_ms": "REPLACED", + "filtered": 0.44, + "r_filtered": 100, + "attached_condition": "t1.nm like '3400%' or t1.nm like '3402%' or t1.nm like '3403%' or t1.nm like '3404%' or t1.nm like '3405%' or t1.nm like '3406%' or t1.nm like '3407%' or t1.nm like '3409%' or t1.nm like '3411%' or t1.nm like '3412%' or t1.nm like '3413%' or t1.nm like '3414%' or t1.nm like '3415%' or t1.nm like '3416%' or t1.nm like '3417%' or t1.nm like '3418%' or t1.nm like '3419%' or t1.nm like '3421%' or t1.nm like '3422%' or t1.nm like '3423%' or t1.nm like '3424%' or t1.nm like '3425%' or t1.nm like '3426%' or t1.nm like '3427%' or t1.nm like '3428%' or t1.nm like '3429%' or t1.nm like '3430%' or t1.nm like '3431%' or t1.nm like '3432%' or t1.nm like '3433%' or t1.nm like '3434%' or t1.nm like '3435%' or t1.nm like '3436%' or t1.nm like '3437%' or t1.nm like '3439%' or t1.nm like '3440%' or t1.nm like '3441%' or t1.nm like '3442%' or t1.nm like '3443%' or t1.nm like '3444%' or t1.nm like '3445%' or t1.nm like '3446%' or t1.nm like '3447%' or t1.nm like '3448%'" + } + } +} +create table t0 select * from t1 where nm like '34%'; +delete from t1 using t1,t0 where t1.nm=t0.nm; +analyze format=json select * from t1 +where +( +nm like '3400%' or nm like '3402%' or nm like '3403%' or +nm like '3404%' or nm like '3405%' or nm like '3406%' or nm like '3407%' or +nm like '3409%' or +nm like '3411%' or nm like '3412%' or nm like '3413%' or +nm like '3414%' or nm like '3415%' or nm like '3416%' or nm like '3417%' or +nm like '3418%' or nm like '3419%' or +nm like '3421%' or nm like '3422%' or nm like '3423%' or +nm like '3424%' or nm like '3425%' or nm like '3426%' or nm like '3427%' or +nm like '3428%' or nm like '3429%' or +nm like '3430%' or nm like '3431%' or nm like '3432%' or nm like '3433%' or +nm like '3434%' or nm like '3435%' or nm like '3436%' or nm like '3437%' or +nm like '3439%' or +nm like '3440%' or nm like '3441%' or nm like '3442%' or nm like '3443%' or +nm like '3444%' or nm like '3445%' or nm like '3446%' or nm like '3447%' or +nm like '3448%' +) and fl2 = 0; +ANALYZE +{ + "query_block": { + "select_id": 1, + "r_loops": 1, + "r_total_time_ms": "REPLACED", + "table": { + "table_name": "t1", + "access_type": "ref", + "possible_keys": ["idx1", "idx2"], + "key": "idx2", + "key_length": "5", + "used_key_parts": ["fl2"], + "ref": ["const"], + "rowid_filter": { + "range": { + "key": "idx1", + "used_key_parts": ["nm"] + }, + "rows": 44, + "selectivity_pct": 0.44, + "r_rows": 0, + "r_lookups": 0, + "r_selectivity_pct": 0, + "r_buffer_size": "REPLACED", + "r_filling_time_ms": "REPLACED" + }, + "r_loops": 1, + "rows": 911, + "r_rows": 0, + "filtered": 0.44, + "r_filtered": 100, + "attached_condition": "t1.nm like '3400%' or t1.nm like '3402%' or t1.nm like '3403%' or t1.nm like '3404%' or t1.nm like '3405%' or t1.nm like '3406%' or t1.nm like '3407%' or t1.nm like '3409%' or t1.nm like '3411%' or t1.nm like '3412%' or t1.nm like '3413%' or t1.nm like '3414%' or t1.nm like '3415%' or t1.nm like '3416%' or t1.nm like '3417%' or t1.nm like '3418%' or t1.nm like '3419%' or t1.nm like '3421%' or t1.nm like '3422%' or t1.nm like '3423%' or t1.nm like '3424%' or t1.nm like '3425%' or t1.nm like '3426%' or t1.nm like '3427%' or t1.nm like '3428%' or t1.nm like '3429%' or t1.nm like '3430%' or t1.nm like '3431%' or t1.nm like '3432%' or t1.nm like '3433%' or t1.nm like '3434%' or t1.nm like '3435%' or t1.nm like '3436%' or t1.nm like '3437%' or t1.nm like '3439%' or t1.nm like '3440%' or t1.nm like '3441%' or t1.nm like '3442%' or t1.nm like '3443%' or t1.nm like '3444%' or t1.nm like '3445%' or t1.nm like '3446%' or t1.nm like '3447%' or t1.nm like '3448%'" + } + } +} +drop table t0; +set optimizer_switch='rowid_filter=default'; +drop table name, flag2; +drop table t1; set @@use_stat_tables=@save_use_stat_tables; diff --git a/mysql-test/main/rowid_filter.test b/mysql-test/main/rowid_filter.test index a68c32cf0de..1dd99097556 100644 --- a/mysql-test/main/rowid_filter.test +++ b/mysql-test/main/rowid_filter.test @@ -320,7 +320,7 @@ DROP TABLE t1,t2; --echo # CREATE TABLE t1 ( - pk1 INT PRIMARY KEY, a1 INT, b1 VARCHAR(1), KEY(b1) + pk1 INT PRIMARY KEY, a1 INT, b1 VARCHAR(1), KEY(a1), KEY(b1) ) ENGINE=MyISAM; INSERT INTO t1 VALUES (10,0,'z'),(11,3,'j'),(12,8,'f'),(13,8,'p'),(14,6,'w'),(15,0,'c'),(16,1,'j'), @@ -340,11 +340,16 @@ INSERT INTO t1 VALUES (107,8,'z'),(108,3,'k'),(109,65,NULL); CREATE TABLE t2 (pk2 INT PRIMARY KEY, a2 INT, b2 VARCHAR(1)) ENGINE=MyISAM; -INSERT INTO t2 VALUES (1,1,'x'); +INSERT INTO t2 VALUES (1,1,'i'); INSERT INTO t2 SELECT * FROM t1; +INSERT INTO t1 SELECT pk1+200, a1, b1 FROM t1; +INSERT INTO t1 SELECT pk1+400, a1, b1 FROM t1; + +ANALYZE TABLE t1,t2 PERSISTENT FOR ALL; + let $q= -SELECT * FROM t1 INNER JOIN t2 ON ( pk1 <> pk2 AND pk1 = a2 ) +SELECT * FROM t1 INNER JOIN t2 ON ( pk1+1 = pk2+2 AND a1 = a2 ) WHERE b1 <= ( SELECT MAX(b2) FROM t2 WHERE pk2 <= 1 ); eval $q; @@ -399,6 +404,15 @@ set @save_optimizer_switch= @@optimizer_switch; SET @@optimizer_switch="index_merge_sort_union=OFF"; CREATE TABLE t1 (a INT, b INT, INDEX(a), INDEX(b)); INSERT INTO t1 VALUES (0,0),(1,0),(-1,1), (-2,1), (-2,3), (-3,4), (-2,4); +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; + +ANALYZE table t1 PERSISTENT FOR ALL; + explain SELECT * FROM t1 WHERE a > 0 AND b=0; SELECT * FROM t1 WHERE a > 0 AND b=0; @@ -406,4 +420,194 @@ drop table t1; SET @@optimizer_switch=@save_optimizer_switch; +--echo # +--echo # MDEV-28846: Poor performance when rowid filter contains no elements +--echo # + +--source include/have_sequence.inc + +create table t1 ( + pk int primary key auto_increment, + nm varchar(32), + fl1 tinyint default 0, + fl2 tinyint default 0, + index idx1(nm, fl1), + index idx2(fl2) +) engine=myisam; + +create table name ( + pk int primary key auto_increment, + nm bigint +) engine=myisam; + +create table flag2 ( + pk int primary key auto_increment, + fl2 tinyint +) engine=myisam; + +insert into name(nm) select seq from seq_1_to_1000 order by rand(17); +insert into flag2(fl2) select seq mod 2 from seq_1_to_1000 order by rand(19); + +insert into t1(nm,fl2) + select nm, fl2 from name, flag2 where name.pk = flag2.pk; + +analyze table t1 persistent for all; + +let $a= +`select concat((select nm from t1 where fl2=0 order by RAND(13) limit 1),'%')`; +eval select '$a' as a; + +set optimizer_switch='rowid_filter=on'; +eval +explain +select * from t1 where nm like '$a' AND fl2 = 0; +--source include/analyze-format.inc +eval +analyze format=json +select * from t1 where nm like '$a' AND fl2 = 0; +eval +select * from t1 where nm like '$a' AND fl2 = 0; + +truncate table name; +truncate table flag2; +truncate table t1; + +insert into name(nm) select seq from seq_1_to_1000 order by rand(17); +insert into flag2(fl2) select seq mod 2 from seq_1_to_1000 order by rand(19); + +insert into t1(nm,fl2) + select nm, fl2 from name, flag2 where name.pk = flag2.pk; + +analyze table t1 persistent for all; + +set optimizer_switch='rowid_filter=off'; +eval +explain +select * from t1 where nm like '$a' AND fl2 = 0; +--source include/analyze-format.inc +eval +analyze format=json +select * from t1 where nm like '$a' AND fl2 = 0; +eval +select * from t1 where nm like '$a' AND fl2 = 0; + +truncate table name; +truncate table flag2; +truncate table t1; + +insert into name(nm) select seq from seq_1_to_1000 order by rand(17); +insert into flag2(fl2) select seq mod 10 from seq_1_to_1000 order by rand(19); + +insert into t1(nm,fl2) + select nm, fl2 from name, flag2 where name.pk = flag2.pk; + +analyze table t1 persistent for all; + +let $a= +`select concat((select nm from t1 where fl2=0 order by RAND(13) limit 1),'%')`; +eval select '$a' as a; + +set optimizer_switch='rowid_filter=on'; +eval +explain +select * from t1 where nm like '$a' AND fl2 = 0; +eval +select * from t1 where nm like '$a' AND fl2 = 0; + +truncate table name; +truncate table flag2; +truncate table t1; + +insert into name(nm) select seq from seq_1_to_10000 order by rand(17); +insert into flag2(fl2) select seq mod 100 from seq_1_to_10000 order by rand(19); + +insert into t1(nm,fl2) + select nm, fl2 from name, flag2 where name.pk = flag2.pk; + +analyze table t1 persistent for all; + +let $a= +`select concat(left((select nm from t1 where fl2=0 order by RAND(13) limit 1),2),'%')`; +eval select '$a' as a; + +set optimizer_switch='rowid_filter=on'; +eval +explain +select * from t1 where nm like '$a' AND fl2 = 0; +--source include/analyze-format.inc +eval +analyze format=json +select * from t1 where nm like '$a' AND fl2 = 0; +eval +select * from t1 where nm like '$a' AND fl2 = 0; + +drop table name, flag2; +drop table t1; + +# This test shows that if the container is empty there are no lookups into it + +create table t1 ( + pk int primary key auto_increment, + nm varchar(32), + fl1 tinyint default 0, + fl2 int default 0, + index idx1(nm, fl1), + index idx2(fl2) +) engine=myisam; + +create table name ( + pk int primary key auto_increment, + nm bigint +) engine=myisam; + +create table flag2 ( + pk int primary key auto_increment, + fl2 int +) engine=myisam; + +insert into name(nm) select seq from seq_1_to_10000 order by rand(17); +insert into flag2(fl2) select seq mod 10 from seq_1_to_10000 order by rand(19); + +insert into t1(nm,fl2) + select nm, fl2 from name, flag2 where name.pk = flag2.pk; + +analyze table t1 persistent for all; + +let $q= +select * from t1 +where +( + nm like '3400%' or nm like '3402%' or nm like '3403%' or + nm like '3404%' or nm like '3405%' or nm like '3406%' or nm like '3407%' or + nm like '3409%' or + nm like '3411%' or nm like '3412%' or nm like '3413%' or + nm like '3414%' or nm like '3415%' or nm like '3416%' or nm like '3417%' or + nm like '3418%' or nm like '3419%' or + nm like '3421%' or nm like '3422%' or nm like '3423%' or + nm like '3424%' or nm like '3425%' or nm like '3426%' or nm like '3427%' or + nm like '3428%' or nm like '3429%' or + nm like '3430%' or nm like '3431%' or nm like '3432%' or nm like '3433%' or + nm like '3434%' or nm like '3435%' or nm like '3436%' or nm like '3437%' or + nm like '3439%' or + nm like '3440%' or nm like '3441%' or nm like '3442%' or nm like '3443%' or + nm like '3444%' or nm like '3445%' or nm like '3446%' or nm like '3447%' or + nm like '3448%' +) and fl2 = 0; + +eval $q; +--source include/analyze-format.inc +eval analyze format=json $q; + +create table t0 select * from t1 where nm like '34%'; +delete from t1 using t1,t0 where t1.nm=t0.nm; +--source include/analyze-format.inc +eval analyze format=json $q; + +drop table t0; + +set optimizer_switch='rowid_filter=default'; + +drop table name, flag2; +drop table t1; + set @@use_stat_tables=@save_use_stat_tables; diff --git a/mysql-test/main/rowid_filter_innodb.result b/mysql-test/main/rowid_filter_innodb.result index b6be3d32aa6..d7f1fe4a0d3 100644 --- a/mysql-test/main/rowid_filter_innodb.result +++ b/mysql-test/main/rowid_filter_innodb.result @@ -129,6 +129,7 @@ ANALYZE "rows": 605, "selectivity_pct": 10.075, "r_rows": 605, + "r_lookups": 510, "r_selectivity_pct": 11.765, "r_buffer_size": "REPLACED", "r_filling_time_ms": "REPLACED" @@ -1948,7 +1949,7 @@ EXPLAIN EXTENDED SELECT * FROM t1 HAVING (7, 9) IN (SELECT t2.i1, t2.i2 FROM t2 WHERE t2.i1 = 3); id select_type table type possible_keys key key_len ref rows filtered Extra 1 PRIMARY NULL NULL NULL NULL NULL NULL NULL NULL Impossible HAVING -2 SUBQUERY NULL NULL NULL NULL NULL NULL NULL NULL no matching row in const table +2 SUBQUERY t2 ref i1,i2 i1 5 const 1 100.00 Using index condition; Using where Warnings: Note 1003 /* select#1 */ select `test`.`t1`.`pk` AS `pk` from `test`.`t1` having 0 DROP TABLE t1,t2; @@ -1957,7 +1958,7 @@ DROP TABLE t1,t2; # that uses in expensive subquery # CREATE TABLE t1 ( -pk1 INT PRIMARY KEY, a1 INT, b1 VARCHAR(1), KEY(b1) +pk1 INT PRIMARY KEY, a1 INT, b1 VARCHAR(1), KEY(a1), KEY(b1) ) ENGINE=MyISAM; INSERT INTO t1 VALUES (10,0,'z'),(11,3,'j'),(12,8,'f'),(13,8,'p'),(14,6,'w'),(15,0,'c'),(16,1,'j'), @@ -1976,21 +1977,31 @@ INSERT INTO t1 VALUES (101,0,'u'),(102,7,'r'),(103,2,'x'),(104,8,'e'),(105,8,'i'),(106,5,'q'), (107,8,'z'),(108,3,'k'),(109,65,NULL); CREATE TABLE t2 (pk2 INT PRIMARY KEY, a2 INT, b2 VARCHAR(1)) ENGINE=MyISAM; -INSERT INTO t2 VALUES (1,1,'x'); +INSERT INTO t2 VALUES (1,1,'i'); INSERT INTO t2 SELECT * FROM t1; -SELECT * FROM t1 INNER JOIN t2 ON ( pk1 <> pk2 AND pk1 = a2 ) +INSERT INTO t1 SELECT pk1+200, a1, b1 FROM t1; +INSERT INTO t1 SELECT pk1+400, a1, b1 FROM t1; +ANALYZE TABLE t1,t2 PERSISTENT FOR ALL; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status OK +test.t2 analyze status Engine-independent statistics collected +test.t2 analyze status OK +SELECT * FROM t1 INNER JOIN t2 ON ( pk1+1 = pk2+2 AND a1 = a2 ) WHERE b1 <= ( SELECT MAX(b2) FROM t2 WHERE pk2 <= 1 ); pk1 a1 b1 pk2 a2 b2 -65 2 a 109 65 NULL -EXPLAIN EXTENDED SELECT * FROM t1 INNER JOIN t2 ON ( pk1 <> pk2 AND pk1 = a2 ) +17 1 f 16 1 j +37 3 g 36 3 a +105 8 i 104 8 e +EXPLAIN EXTENDED SELECT * FROM t1 INNER JOIN t2 ON ( pk1+1 = pk2+2 AND a1 = a2 ) WHERE b1 <= ( SELECT MAX(b2) FROM t2 WHERE pk2 <= 1 ); id select_type table type possible_keys key key_len ref rows filtered Extra 1 PRIMARY t2 ALL NULL NULL NULL NULL 101 100.00 Using where -1 PRIMARY t1 eq_ref|filter PRIMARY,b1 PRIMARY|b1 4|4 test.t2.a2 1 (87%) 87.00 Using where; Using rowid filter +1 PRIMARY t1 ref|filter a1,b1 a1|b1 5|4 test.t2.a2 36 (29%) 28.75 Using where; Using rowid filter 2 SUBQUERY t2 range PRIMARY PRIMARY 4 NULL 1 100.00 Using index condition Warnings: -Note 1003 /* select#1 */ select `test`.`t1`.`pk1` AS `pk1`,`test`.`t1`.`a1` AS `a1`,`test`.`t1`.`b1` AS `b1`,`test`.`t2`.`pk2` AS `pk2`,`test`.`t2`.`a2` AS `a2`,`test`.`t2`.`b2` AS `b2` from `test`.`t1` join `test`.`t2` where `test`.`t1`.`pk1` = `test`.`t2`.`a2` and `test`.`t1`.`b1` <= (/* select#2 */ select max(`test`.`t2`.`b2`) from `test`.`t2` where `test`.`t2`.`pk2` <= 1) and `test`.`t2`.`a2` <> `test`.`t2`.`pk2` -EXPLAIN FORMAT=JSON SELECT * FROM t1 INNER JOIN t2 ON ( pk1 <> pk2 AND pk1 = a2 ) +Note 1003 /* select#1 */ select `test`.`t1`.`pk1` AS `pk1`,`test`.`t1`.`a1` AS `a1`,`test`.`t1`.`b1` AS `b1`,`test`.`t2`.`pk2` AS `pk2`,`test`.`t2`.`a2` AS `a2`,`test`.`t2`.`b2` AS `b2` from `test`.`t1` join `test`.`t2` where `test`.`t1`.`a1` = `test`.`t2`.`a2` and `test`.`t1`.`b1` <= (/* select#2 */ select max(`test`.`t2`.`b2`) from `test`.`t2` where `test`.`t2`.`pk2` <= 1) and `test`.`t1`.`pk1` + 1 = `test`.`t2`.`pk2` + 2 +EXPLAIN FORMAT=JSON SELECT * FROM t1 INNER JOIN t2 ON ( pk1+1 = pk2+2 AND a1 = a2 ) WHERE b1 <= ( SELECT MAX(b2) FROM t2 WHERE pk2 <= 1 ); EXPLAIN { @@ -2001,27 +2012,27 @@ EXPLAIN "access_type": "ALL", "rows": 101, "filtered": 100, - "attached_condition": "t2.a2 <> t2.pk2 and t2.a2 is not null" + "attached_condition": "t2.a2 is not null" }, "table": { "table_name": "t1", - "access_type": "eq_ref", - "possible_keys": ["PRIMARY", "b1"], - "key": "PRIMARY", - "key_length": "4", - "used_key_parts": ["pk1"], + "access_type": "ref", + "possible_keys": ["a1", "b1"], + "key": "a1", + "key_length": "5", + "used_key_parts": ["a1"], "ref": ["test.t2.a2"], "rowid_filter": { "range": { "key": "b1", "used_key_parts": ["b1"] }, - "rows": 87, - "selectivity_pct": 87 + "rows": 115, + "selectivity_pct": 28.75 }, - "rows": 1, - "filtered": 87, - "attached_condition": "t1.b1 <= (subquery#2)" + "rows": 36, + "filtered": 28.75, + "attached_condition": "t1.b1 <= (subquery#2) and t1.pk1 + 1 = t2.pk2 + 2" }, "subqueries": [ { @@ -2088,15 +2099,444 @@ set @save_optimizer_switch= @@optimizer_switch; SET @@optimizer_switch="index_merge_sort_union=OFF"; CREATE TABLE t1 (a INT, b INT, INDEX(a), INDEX(b)); INSERT INTO t1 VALUES (0,0),(1,0),(-1,1), (-2,1), (-2,3), (-3,4), (-2,4); +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; +ANALYZE table t1 PERSISTENT FOR ALL; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status OK explain SELECT * FROM t1 WHERE a > 0 AND b=0; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 ref|filter a,b b|a 5|5 const 2 (14%) Using where; Using rowid filter +1 SIMPLE t1 ref|filter a,b b|a 5|5 const 128 (14%) Using where; Using rowid filter SELECT * FROM t1 WHERE a > 0 AND b=0; a b 1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 +1 0 drop table t1; SET @@optimizer_switch=@save_optimizer_switch; +# +# MDEV-28846: Poor performance when rowid filter contains no elements +# +create table t1 ( +pk int primary key auto_increment, +nm varchar(32), +fl1 tinyint default 0, +fl2 tinyint default 0, +index idx1(nm, fl1), +index idx2(fl2) +) engine=myisam; +create table name ( +pk int primary key auto_increment, +nm bigint +) engine=myisam; +create table flag2 ( +pk int primary key auto_increment, +fl2 tinyint +) engine=myisam; +insert into name(nm) select seq from seq_1_to_1000 order by rand(17); +insert into flag2(fl2) select seq mod 2 from seq_1_to_1000 order by rand(19); +insert into t1(nm,fl2) +select nm, fl2 from name, flag2 where name.pk = flag2.pk; +analyze table t1 persistent for all; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status Table is already up to date +select '500%' as a; +a +500% +set optimizer_switch='rowid_filter=on'; +explain +select * from t1 where nm like '500%' AND fl2 = 0; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range idx1,idx2 idx1 35 NULL 1 Using index condition; Using where +analyze format=json +select * from t1 where nm like '500%' AND fl2 = 0; +ANALYZE +{ + "query_block": { + "select_id": 1, + "r_loops": 1, + "r_total_time_ms": "REPLACED", + "table": { + "table_name": "t1", + "access_type": "range", + "possible_keys": ["idx1", "idx2"], + "key": "idx1", + "key_length": "35", + "used_key_parts": ["nm"], + "r_loops": 1, + "rows": 1, + "r_rows": 1, + "r_total_time_ms": "REPLACED", + "filtered": 49.2, + "r_filtered": 100, + "index_condition": "t1.nm like '500%'", + "attached_condition": "t1.fl2 = 0" + } + } +} +select * from t1 where nm like '500%' AND fl2 = 0; +pk nm fl1 fl2 +517 500 0 0 +truncate table name; +truncate table flag2; +truncate table t1; +insert into name(nm) select seq from seq_1_to_1000 order by rand(17); +insert into flag2(fl2) select seq mod 2 from seq_1_to_1000 order by rand(19); +insert into t1(nm,fl2) +select nm, fl2 from name, flag2 where name.pk = flag2.pk; +analyze table t1 persistent for all; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status Table is already up to date +set optimizer_switch='rowid_filter=off'; +explain +select * from t1 where nm like '500%' AND fl2 = 0; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range idx1,idx2 idx1 35 NULL 1 Using index condition; Using where +analyze format=json +select * from t1 where nm like '500%' AND fl2 = 0; +ANALYZE +{ + "query_block": { + "select_id": 1, + "r_loops": 1, + "r_total_time_ms": "REPLACED", + "table": { + "table_name": "t1", + "access_type": "range", + "possible_keys": ["idx1", "idx2"], + "key": "idx1", + "key_length": "35", + "used_key_parts": ["nm"], + "r_loops": 1, + "rows": 1, + "r_rows": 1, + "r_total_time_ms": "REPLACED", + "filtered": 49.2, + "r_filtered": 100, + "index_condition": "t1.nm like '500%'", + "attached_condition": "t1.fl2 = 0" + } + } +} +select * from t1 where nm like '500%' AND fl2 = 0; +pk nm fl1 fl2 +517 500 0 0 +truncate table name; +truncate table flag2; +truncate table t1; +insert into name(nm) select seq from seq_1_to_1000 order by rand(17); +insert into flag2(fl2) select seq mod 10 from seq_1_to_1000 order by rand(19); +insert into t1(nm,fl2) +select nm, fl2 from name, flag2 where name.pk = flag2.pk; +analyze table t1 persistent for all; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status Table is already up to date +select '607%' as a; +a +607% +set optimizer_switch='rowid_filter=on'; +explain +select * from t1 where nm like '607%' AND fl2 = 0; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range idx1,idx2 idx1 35 NULL 1 Using index condition; Using where +select * from t1 where nm like '607%' AND fl2 = 0; +pk nm fl1 fl2 +721 607 0 0 +truncate table name; +truncate table flag2; +truncate table t1; +insert into name(nm) select seq from seq_1_to_10000 order by rand(17); +insert into flag2(fl2) select seq mod 100 from seq_1_to_10000 order by rand(19); +insert into t1(nm,fl2) +select nm, fl2 from name, flag2 where name.pk = flag2.pk; +analyze table t1 persistent for all; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status Table is already up to date +select '75%' as a; +a +75% +set optimizer_switch='rowid_filter=on'; +explain +select * from t1 where nm like '75%' AND fl2 = 0; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref|filter idx1,idx2 idx2|idx1 2|35 const 55 (1%) Using where; Using rowid filter +analyze format=json +select * from t1 where nm like '75%' AND fl2 = 0; +ANALYZE +{ + "query_block": { + "select_id": 1, + "r_loops": 1, + "r_total_time_ms": "REPLACED", + "table": { + "table_name": "t1", + "access_type": "ref", + "possible_keys": ["idx1", "idx2"], + "key": "idx2", + "key_length": "2", + "used_key_parts": ["fl2"], + "ref": ["const"], + "rowid_filter": { + "range": { + "key": "idx1", + "used_key_parts": ["nm"] + }, + "rows": 115, + "selectivity_pct": 1.15, + "r_rows": 111, + "r_lookups": 100, + "r_selectivity_pct": 2, + "r_buffer_size": "REPLACED", + "r_filling_time_ms": "REPLACED" + }, + "r_loops": 1, + "rows": 55, + "r_rows": 2, + "r_total_time_ms": "REPLACED", + "filtered": 1.15, + "r_filtered": 100, + "attached_condition": "t1.nm like '75%'" + } + } +} +select * from t1 where nm like '75%' AND fl2 = 0; +pk nm fl1 fl2 +4543 7503 0 0 +7373 7518 0 0 +drop table name, flag2; +drop table t1; +create table t1 ( +pk int primary key auto_increment, +nm varchar(32), +fl1 tinyint default 0, +fl2 int default 0, +index idx1(nm, fl1), +index idx2(fl2) +) engine=myisam; +create table name ( +pk int primary key auto_increment, +nm bigint +) engine=myisam; +create table flag2 ( +pk int primary key auto_increment, +fl2 int +) engine=myisam; +insert into name(nm) select seq from seq_1_to_10000 order by rand(17); +insert into flag2(fl2) select seq mod 10 from seq_1_to_10000 order by rand(19); +insert into t1(nm,fl2) +select nm, fl2 from name, flag2 where name.pk = flag2.pk; +analyze table t1 persistent for all; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status Table is already up to date +select * from t1 +where +( +nm like '3400%' or nm like '3402%' or nm like '3403%' or +nm like '3404%' or nm like '3405%' or nm like '3406%' or nm like '3407%' or +nm like '3409%' or +nm like '3411%' or nm like '3412%' or nm like '3413%' or +nm like '3414%' or nm like '3415%' or nm like '3416%' or nm like '3417%' or +nm like '3418%' or nm like '3419%' or +nm like '3421%' or nm like '3422%' or nm like '3423%' or +nm like '3424%' or nm like '3425%' or nm like '3426%' or nm like '3427%' or +nm like '3428%' or nm like '3429%' or +nm like '3430%' or nm like '3431%' or nm like '3432%' or nm like '3433%' or +nm like '3434%' or nm like '3435%' or nm like '3436%' or nm like '3437%' or +nm like '3439%' or +nm like '3440%' or nm like '3441%' or nm like '3442%' or nm like '3443%' or +nm like '3444%' or nm like '3445%' or nm like '3446%' or nm like '3447%' or +nm like '3448%' +) and fl2 = 0; +pk nm fl1 fl2 +analyze format=json select * from t1 +where +( +nm like '3400%' or nm like '3402%' or nm like '3403%' or +nm like '3404%' or nm like '3405%' or nm like '3406%' or nm like '3407%' or +nm like '3409%' or +nm like '3411%' or nm like '3412%' or nm like '3413%' or +nm like '3414%' or nm like '3415%' or nm like '3416%' or nm like '3417%' or +nm like '3418%' or nm like '3419%' or +nm like '3421%' or nm like '3422%' or nm like '3423%' or +nm like '3424%' or nm like '3425%' or nm like '3426%' or nm like '3427%' or +nm like '3428%' or nm like '3429%' or +nm like '3430%' or nm like '3431%' or nm like '3432%' or nm like '3433%' or +nm like '3434%' or nm like '3435%' or nm like '3436%' or nm like '3437%' or +nm like '3439%' or +nm like '3440%' or nm like '3441%' or nm like '3442%' or nm like '3443%' or +nm like '3444%' or nm like '3445%' or nm like '3446%' or nm like '3447%' or +nm like '3448%' +) and fl2 = 0; +ANALYZE +{ + "query_block": { + "select_id": 1, + "r_loops": 1, + "r_total_time_ms": "REPLACED", + "table": { + "table_name": "t1", + "access_type": "ref", + "possible_keys": ["idx1", "idx2"], + "key": "idx2", + "key_length": "5", + "used_key_parts": ["fl2"], + "ref": ["const"], + "rowid_filter": { + "range": { + "key": "idx1", + "used_key_parts": ["nm"] + }, + "rows": 44, + "selectivity_pct": 0.44, + "r_rows": 44, + "r_lookups": 1000, + "r_selectivity_pct": 0, + "r_buffer_size": "REPLACED", + "r_filling_time_ms": "REPLACED" + }, + "r_loops": 1, + "rows": 921, + "r_rows": 0, + "r_total_time_ms": "REPLACED", + "filtered": 0.44, + "r_filtered": 100, + "attached_condition": "t1.nm like '3400%' or t1.nm like '3402%' or t1.nm like '3403%' or t1.nm like '3404%' or t1.nm like '3405%' or t1.nm like '3406%' or t1.nm like '3407%' or t1.nm like '3409%' or t1.nm like '3411%' or t1.nm like '3412%' or t1.nm like '3413%' or t1.nm like '3414%' or t1.nm like '3415%' or t1.nm like '3416%' or t1.nm like '3417%' or t1.nm like '3418%' or t1.nm like '3419%' or t1.nm like '3421%' or t1.nm like '3422%' or t1.nm like '3423%' or t1.nm like '3424%' or t1.nm like '3425%' or t1.nm like '3426%' or t1.nm like '3427%' or t1.nm like '3428%' or t1.nm like '3429%' or t1.nm like '3430%' or t1.nm like '3431%' or t1.nm like '3432%' or t1.nm like '3433%' or t1.nm like '3434%' or t1.nm like '3435%' or t1.nm like '3436%' or t1.nm like '3437%' or t1.nm like '3439%' or t1.nm like '3440%' or t1.nm like '3441%' or t1.nm like '3442%' or t1.nm like '3443%' or t1.nm like '3444%' or t1.nm like '3445%' or t1.nm like '3446%' or t1.nm like '3447%' or t1.nm like '3448%'" + } + } +} +create table t0 select * from t1 where nm like '34%'; +delete from t1 using t1,t0 where t1.nm=t0.nm; +analyze format=json select * from t1 +where +( +nm like '3400%' or nm like '3402%' or nm like '3403%' or +nm like '3404%' or nm like '3405%' or nm like '3406%' or nm like '3407%' or +nm like '3409%' or +nm like '3411%' or nm like '3412%' or nm like '3413%' or +nm like '3414%' or nm like '3415%' or nm like '3416%' or nm like '3417%' or +nm like '3418%' or nm like '3419%' or +nm like '3421%' or nm like '3422%' or nm like '3423%' or +nm like '3424%' or nm like '3425%' or nm like '3426%' or nm like '3427%' or +nm like '3428%' or nm like '3429%' or +nm like '3430%' or nm like '3431%' or nm like '3432%' or nm like '3433%' or +nm like '3434%' or nm like '3435%' or nm like '3436%' or nm like '3437%' or +nm like '3439%' or +nm like '3440%' or nm like '3441%' or nm like '3442%' or nm like '3443%' or +nm like '3444%' or nm like '3445%' or nm like '3446%' or nm like '3447%' or +nm like '3448%' +) and fl2 = 0; +ANALYZE +{ + "query_block": { + "select_id": 1, + "r_loops": 1, + "r_total_time_ms": "REPLACED", + "table": { + "table_name": "t1", + "access_type": "ref", + "possible_keys": ["idx1", "idx2"], + "key": "idx2", + "key_length": "5", + "used_key_parts": ["fl2"], + "ref": ["const"], + "rowid_filter": { + "range": { + "key": "idx1", + "used_key_parts": ["nm"] + }, + "rows": 44, + "selectivity_pct": 0.44, + "r_rows": 0, + "r_lookups": 0, + "r_selectivity_pct": 0, + "r_buffer_size": "REPLACED", + "r_filling_time_ms": "REPLACED" + }, + "r_loops": 1, + "rows": 911, + "r_rows": 0, + "filtered": 0.44, + "r_filtered": 100, + "attached_condition": "t1.nm like '3400%' or t1.nm like '3402%' or t1.nm like '3403%' or t1.nm like '3404%' or t1.nm like '3405%' or t1.nm like '3406%' or t1.nm like '3407%' or t1.nm like '3409%' or t1.nm like '3411%' or t1.nm like '3412%' or t1.nm like '3413%' or t1.nm like '3414%' or t1.nm like '3415%' or t1.nm like '3416%' or t1.nm like '3417%' or t1.nm like '3418%' or t1.nm like '3419%' or t1.nm like '3421%' or t1.nm like '3422%' or t1.nm like '3423%' or t1.nm like '3424%' or t1.nm like '3425%' or t1.nm like '3426%' or t1.nm like '3427%' or t1.nm like '3428%' or t1.nm like '3429%' or t1.nm like '3430%' or t1.nm like '3431%' or t1.nm like '3432%' or t1.nm like '3433%' or t1.nm like '3434%' or t1.nm like '3435%' or t1.nm like '3436%' or t1.nm like '3437%' or t1.nm like '3439%' or t1.nm like '3440%' or t1.nm like '3441%' or t1.nm like '3442%' or t1.nm like '3443%' or t1.nm like '3444%' or t1.nm like '3445%' or t1.nm like '3446%' or t1.nm like '3447%' or t1.nm like '3448%'" + } + } +} +drop table t0; +set optimizer_switch='rowid_filter=default'; +drop table name, flag2; +drop table t1; set @@use_stat_tables=@save_use_stat_tables; # # MDEV-18755: possible RORI-plan and possible plan with range filter @@ -2121,6 +2561,11 @@ insert into t1 values (81,'a','a',20),(82,'a','a',0),(83,'a','a',0),(84,'a','a',null), (85,'a','a',-1),(86,'a','a',5),(87,'a','a',null),(88,'a','a',160), (89,null,null,null),(90,'a','a',14785),(91,'a','a',0),(92,'a','a',null); +insert into t1 select pk+100, f1, f2, a from t1; +analyze table t1; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status OK ( select * from t1 where (f1 is null and f2 is null) and (f2 between 'a' and 'z' or f1 in ('a'))) union @@ -2169,7 +2614,7 @@ EXPLAIN } }, "rows": 1, - "filtered": 1.5873, + "filtered": 3.1746, "attached_condition": "t1.f1 is null and t1.f2 is null and (t1.f2 between 'a' and 'z' or t1.f1 = 'a')" } } @@ -2196,7 +2641,7 @@ EXPLAIN } }, "rows": 1, - "filtered": 1.5873, + "filtered": 3.1746, "attached_condition": "t1.f1 is null and t1.f2 is null and (t1.f2 between 'a' and 'z' or t1.f1 = 'a')" } } @@ -2231,46 +2676,44 @@ drop table t1, t2; # create table t1 (a int, b int, key (b), key (a)) engine=innodb; insert into t1 -select (rand(1)*1000)/10, (rand(1001)*1000)/50 from seq_1_to_1000; +select (rand(1)*1000)/10, (rand(1001)*1000)/20 from seq_1_to_1000; analyze table t1; Table Op Msg_type Msg_text test.t1 analyze status Engine-independent statistics collected test.t1 analyze status OK set @save_optimizer_switch= @@optimizer_switch; set optimizer_switch='rowid_filter=off'; -select count(*) from t1 where a in (22,83,11) and b=2; +select count(*) from t1 where a between 21 and 30 and b=2; count(*) -6 -explain extended select count(*) from t1 where a in (22,83,11) and b=2; +5 +explain extended select count(*) from t1 where a between 21 and 30 and b=2; id select_type table type possible_keys key key_len ref rows filtered Extra -1 SIMPLE t1 ref b,a b 5 const 59 3.30 Using where +1 SIMPLE t1 ref b,a b 5 const 24 9.60 Using where Warnings: -Note 1003 select count(0) AS `count(*)` from `test`.`t1` where `test`.`t1`.`b` = 2 and `test`.`t1`.`a` in (22,83,11) -select * from t1 where a in (22,83,11) and b=2; +Note 1003 select count(0) AS `count(*)` from `test`.`t1` where `test`.`t1`.`b` = 2 and `test`.`t1`.`a` between 21 and 30 +select * from t1 where a between 21 and 30 and b=2; a b -11 2 -11 2 -83 2 -11 2 -83 2 +30 2 +21 2 22 2 +26 2 +25 2 set optimizer_switch='rowid_filter=on'; -select count(*) from t1 where a in (22,83,11) and b=2; +select count(*) from t1 where a between 21 and 30 and b=2; count(*) -6 -explain extended select count(*) from t1 where a in (22,83,11) and b=2; +5 +explain extended select count(*) from t1 where a between 21 and 30 and b=2; id select_type table type possible_keys key key_len ref rows filtered Extra -1 SIMPLE t1 ref|filter b,a b|a 5|5 const 59 (3%) 3.30 Using where; Using rowid filter +1 SIMPLE t1 ref|filter b,a b|a 5|5 const 24 (10%) 9.60 Using where; Using rowid filter Warnings: -Note 1003 select count(0) AS `count(*)` from `test`.`t1` where `test`.`t1`.`b` = 2 and `test`.`t1`.`a` in (22,83,11) -select * from t1 where a in (22,83,11) and b=2; +Note 1003 select count(0) AS `count(*)` from `test`.`t1` where `test`.`t1`.`b` = 2 and `test`.`t1`.`a` between 21 and 30 +select * from t1 where a between 21 and 30 and b=2; a b -11 2 -11 2 -83 2 -11 2 -83 2 +30 2 +21 2 22 2 +26 2 +25 2 drop table t1; set optimizer_switch=@save_optimizer_switch; SET SESSION STORAGE_ENGINE=DEFAULT; @@ -2425,7 +2868,7 @@ set global innodb_stats_persistent= @stats.save; # CREATE TABLE t1 ( id int(11) unsigned NOT NULL AUTO_INCREMENT, -domain varchar(255) NOT NULL, +domain varchar(32) NOT NULL, registrant_name varchar(255) DEFAULT NULL, registrant_organization varchar(255) DEFAULT NULL, registrant_street1 varchar(255) DEFAULT NULL, @@ -2516,21 +2959,216 @@ null, 'SUELZBURGSTRASSE 158A', null, null, null, null, 'KOELN', '50937', 'MAXIMILIAN V. KETELHODT', null, 'SUELZBURGSTRASSE 158A', null, null, null, null, 'KOELN', '50937', 'GERMANY', 'ICANN@EXPIRES-2009.WEBCARE24.COM', '492214307580', '', '2017-01-30 10:08:29'); +INSERT INTO t1 ( +domain, registrant_name, registrant_organization, registrant_street1, +registrant_street2, registrant_street3, registrant_street4, registrant_street5, +registrant_city, registrant_postal_code, registrant_country, registrant_email, +registrant_telephone, administrative_name, administrative_organization, +administrative_street1, administrative_street2, administrative_street3, +administrative_street4, administrative_street5, administrative_city, +administrative_postal_code, administrative_country, administrative_email, +administrative_telephone, technical_name, technical_organization, +technical_street1, technical_street2, technical_street3, technical_street4, +technical_street5, technical_city, technical_postal_code, technical_country, +technical_email, technical_telephone, json, timestamp) +VALUES +('www.mailhost.i-dev.fr', null, null, null, null, null, null, null, null, +null, null, null, null, null, null, null, null, null, null, null, null, null, +null, null, null, null, null, null, null, null, null, null, null, null, null, +null, null, '', '2016-12-22 09:18:28'); +INSERT INTO t1 ( +domain, registrant_name, registrant_organization, registrant_street1, +registrant_street2, registrant_street3, registrant_street4, registrant_street5, +registrant_city, registrant_postal_code, registrant_country, registrant_email, +registrant_telephone, administrative_name, administrative_organization, +administrative_street1, administrative_street2, administrative_street3, +administrative_street4, administrative_street5, administrative_city, +administrative_postal_code, administrative_country, administrative_email, +administrative_telephone, technical_name, technical_organization, +technical_street1, technical_street2, technical_street3, technical_street4, +technical_street5, technical_city, technical_postal_code, technical_country, +technical_email, technical_telephone, json, timestamp) +VALUES +('www.mailhost.i-dev.fr', null, null, null, null, null, null, null, null, +null, null, null, null, null, null, null, null, null, null, null, null, null, +null, null, null, null, null, null, null, null, null, null, null, null, null, +null, null, '', '2016-12-22 09:18:28'); +INSERT INTO t1 ( +domain, registrant_name, registrant_organization, registrant_street1, +registrant_street2, registrant_street3, registrant_street4, registrant_street5, +registrant_city, registrant_postal_code, registrant_country, registrant_email, +registrant_telephone, administrative_name, administrative_organization, +administrative_street1, administrative_street2, administrative_street3, +administrative_street4, administrative_street5, administrative_city, +administrative_postal_code, administrative_country, administrative_email, +administrative_telephone, technical_name, technical_organization, +technical_street1, technical_street2, technical_street3, technical_street4, +technical_street5, technical_city, technical_postal_code, technical_country, +technical_email, technical_telephone, json, timestamp) +VALUES +('www.mailhost.i-dev.fr', null, null, null, null, null, null, null, null, +null, null, null, null, null, null, null, null, null, null, null, null, null, +null, null, null, null, null, null, null, null, null, null, null, null, null, +null, null, '', '2016-12-22 09:18:28'); +INSERT INTO t1 ( +domain, registrant_name, registrant_organization, registrant_street1, +registrant_street2, registrant_street3, registrant_street4, registrant_street5, +registrant_city, registrant_postal_code, registrant_country, registrant_email, +registrant_telephone, administrative_name, administrative_organization, +administrative_street1, administrative_street2, administrative_street3, +administrative_street4, administrative_street5, administrative_city, +administrative_postal_code, administrative_country, administrative_email, +administrative_telephone, technical_name, technical_organization, +technical_street1, technical_street2, technical_street3, technical_street4, +technical_street5, technical_city, technical_postal_code, technical_country, +technical_email, technical_telephone, json, timestamp) +VALUES +('www.mailhost.i-dev.fr', null, null, null, null, null, null, null, null, +null, null, null, null, null, null, null, null, null, null, null, null, null, +null, null, null, null, null, null, null, null, null, null, null, null, null, +null, null, '', '2016-12-22 09:18:28'); +INSERT INTO t1 ( +domain, registrant_name, registrant_organization, registrant_street1, +registrant_street2, registrant_street3, registrant_street4, registrant_street5, +registrant_city, registrant_postal_code, registrant_country, registrant_email, +registrant_telephone, administrative_name, administrative_organization, +administrative_street1, administrative_street2, administrative_street3, +administrative_street4, administrative_street5, administrative_city, +administrative_postal_code, administrative_country, administrative_email, +administrative_telephone, technical_name, technical_organization, +technical_street1, technical_street2, technical_street3, technical_street4, +technical_street5, technical_city, technical_postal_code, technical_country, +technical_email, technical_telephone, json, timestamp) +VALUES +('www.mailhost.i-dev.fr', null, null, null, null, null, null, null, null, +null, null, null, null, null, null, null, null, null, null, null, null, null, +null, null, null, null, null, null, null, null, null, null, null, null, null, +null, null, '', '2016-12-22 09:18:28'); +INSERT INTO t1 ( +domain, registrant_name, registrant_organization, registrant_street1, +registrant_street2, registrant_street3, registrant_street4, registrant_street5, +registrant_city, registrant_postal_code, registrant_country, registrant_email, +registrant_telephone, administrative_name, administrative_organization, +administrative_street1, administrative_street2, administrative_street3, +administrative_street4, administrative_street5, administrative_city, +administrative_postal_code, administrative_country, administrative_email, +administrative_telephone, technical_name, technical_organization, +technical_street1, technical_street2, technical_street3, technical_street4, +technical_street5, technical_city, technical_postal_code, technical_country, +technical_email, technical_telephone, json, timestamp) +VALUES +('www.mailhost.i-dev.fr', null, null, null, null, null, null, null, null, +null, null, null, null, null, null, null, null, null, null, null, null, null, +null, null, null, null, null, null, null, null, null, null, null, null, null, +null, null, '', '2016-12-22 09:18:28'); +INSERT INTO t1 ( +domain, registrant_name, registrant_organization, registrant_street1, +registrant_street2, registrant_street3, registrant_street4, registrant_street5, +registrant_city, registrant_postal_code, registrant_country, registrant_email, +registrant_telephone, administrative_name, administrative_organization, +administrative_street1, administrative_street2, administrative_street3, +administrative_street4, administrative_street5, administrative_city, +administrative_postal_code, administrative_country, administrative_email, +administrative_telephone, technical_name, technical_organization, +technical_street1, technical_street2, technical_street3, technical_street4, +technical_street5, technical_city, technical_postal_code, technical_country, +technical_email, technical_telephone, json, timestamp) +VALUES +('www.mailhost.i-dev.fr', null, null, null, null, null, null, null, null, +null, null, null, null, null, null, null, null, null, null, null, null, null, +null, null, null, null, null, null, null, null, null, null, null, null, null, +null, null, '', '2016-12-22 09:18:28'); +INSERT INTO t1 ( +domain, registrant_name, registrant_organization, registrant_street1, +registrant_street2, registrant_street3, registrant_street4, registrant_street5, +registrant_city, registrant_postal_code, registrant_country, registrant_email, +registrant_telephone, administrative_name, administrative_organization, +administrative_street1, administrative_street2, administrative_street3, +administrative_street4, administrative_street5, administrative_city, +administrative_postal_code, administrative_country, administrative_email, +administrative_telephone, technical_name, technical_organization, +technical_street1, technical_street2, technical_street3, technical_street4, +technical_street5, technical_city, technical_postal_code, technical_country, +technical_email, technical_telephone, json, timestamp) +VALUES +('www.mailhost.i-dev.fr', null, null, null, null, null, null, null, null, +null, null, null, null, null, null, null, null, null, null, null, null, null, +null, null, null, null, null, null, null, null, null, null, null, null, null, +null, null, '', '2016-12-22 09:18:28'); +INSERT INTO t1 ( +domain, registrant_name, registrant_organization, registrant_street1, +registrant_street2, registrant_street3, registrant_street4, registrant_street5, +registrant_city, registrant_postal_code, registrant_country, registrant_email, +registrant_telephone, administrative_name, administrative_organization, +administrative_street1, administrative_street2, administrative_street3, +administrative_street4, administrative_street5, administrative_city, +administrative_postal_code, administrative_country, administrative_email, +administrative_telephone, technical_name, technical_organization, +technical_street1, technical_street2, technical_street3, technical_street4, +technical_street5, technical_city, technical_postal_code, technical_country, +technical_email, technical_telephone, json, timestamp) +SELECT +domain, registrant_name, registrant_organization, registrant_street1, +registrant_street2, registrant_street3, registrant_street4, registrant_street5, +registrant_city, registrant_postal_code, registrant_country, registrant_email, +registrant_telephone, administrative_name, administrative_organization, +administrative_street1, administrative_street2, administrative_street3, +administrative_street4, administrative_street5, administrative_city, +administrative_postal_code, administrative_country, administrative_email, +administrative_telephone, technical_name, technical_organization, +technical_street1, technical_street2, technical_street3, technical_street4, +technical_street5, technical_city, technical_postal_code, technical_country, +technical_email, technical_telephone, json, timestamp +FROM t1; +INSERT INTO t1 ( +domain, registrant_name, registrant_organization, registrant_street1, +registrant_street2, registrant_street3, registrant_street4, registrant_street5, +registrant_city, registrant_postal_code, registrant_country, registrant_email, +registrant_telephone, administrative_name, administrative_organization, +administrative_street1, administrative_street2, administrative_street3, +administrative_street4, administrative_street5, administrative_city, +administrative_postal_code, administrative_country, administrative_email, +administrative_telephone, technical_name, technical_organization, +technical_street1, technical_street2, technical_street3, technical_street4, +technical_street5, technical_city, technical_postal_code, technical_country, +technical_email, technical_telephone, json, timestamp) +SELECT +domain, registrant_name, registrant_organization, registrant_street1, +registrant_street2, registrant_street3, registrant_street4, registrant_street5, +registrant_city, registrant_postal_code, registrant_country, registrant_email, +registrant_telephone, administrative_name, administrative_organization, +administrative_street1, administrative_street2, administrative_street3, +administrative_street4, administrative_street5, administrative_city, +administrative_postal_code, administrative_country, administrative_email, +administrative_telephone, technical_name, technical_organization, +technical_street1, technical_street2, technical_street3, technical_street4, +technical_street5, technical_city, technical_postal_code, technical_country, +technical_email, technical_telephone, json, timestamp +FROM t1; +ANALYZE TABLE t1 PERSISTENT FOR ALL; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze Warning Engine-independent statistics are not collected for column 'json' +test.t1 analyze status OK SET @save_optimizer_switch=@@optimizer_switch; SET optimizer_switch='mrr=on,mrr_sort_keys=on'; SELECT * FROM t1 WHERE 1 = 1 AND domain = 'www.mailhost.i-dev.fr' AND -timestamp >= DATE_ADD(CURRENT_TIMESTAMP, INTERVAL -1 MONTH) +timestamp >= DATE_ADD('2017-01-30 08:24:51', INTERVAL -1 MONTH) ORDER BY timestamp DESC; id domain registrant_name registrant_organization registrant_street1 registrant_street2 registrant_street3 registrant_street4 registrant_street5 registrant_city registrant_postal_code registrant_country registrant_email registrant_telephone administrative_name administrative_organization administrative_street1 administrative_street2 administrative_street3 administrative_street4 administrative_street5 administrative_city administrative_postal_code administrative_country administrative_email administrative_telephone technical_name technical_organization technical_street1 technical_street2 technical_street3 technical_street4 technical_street5 technical_city technical_postal_code technical_country technical_email technical_telephone json timestamp +80551 www.mailhost.i-dev.fr NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 2017-01-30 10:00:56 +80579 www.mailhost.i-dev.fr NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 2017-01-30 10:00:56 +80594 www.mailhost.i-dev.fr NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 2017-01-30 10:00:56 +80609 www.mailhost.i-dev.fr NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 2017-01-30 10:00:56 EXPLAIN EXTENDED SELECT * FROM t1 WHERE 1 = 1 AND domain = 'www.mailhost.i-dev.fr' AND -timestamp >= DATE_ADD(CURRENT_TIMESTAMP, INTERVAL -1 MONTH) +timestamp >= DATE_ADD('2017-01-30 08:24:51', INTERVAL -1 MONTH) ORDER BY timestamp DESC; id select_type table type possible_keys key key_len ref rows filtered Extra -1 SIMPLE t1 ref|filter ixEventWhoisDomainDomain,ixEventWhoisDomainTimestamp ixEventWhoisDomainDomain|ixEventWhoisDomainTimestamp 767|4 const 2 (14%) 14.29 Using index condition; Using where; Using filesort; Using rowid filter +1 SIMPLE t1 ref|filter ixEventWhoisDomainDomain,ixEventWhoisDomainTimestamp ixEventWhoisDomainDomain|ixEventWhoisDomainTimestamp 98|4 const 40 (33%) 33.33 Using index condition; Using where; Using filesort; Using rowid filter Warnings: -Note 1003 select `test`.`t1`.`id` AS `id`,`test`.`t1`.`domain` AS `domain`,`test`.`t1`.`registrant_name` AS `registrant_name`,`test`.`t1`.`registrant_organization` AS `registrant_organization`,`test`.`t1`.`registrant_street1` AS `registrant_street1`,`test`.`t1`.`registrant_street2` AS `registrant_street2`,`test`.`t1`.`registrant_street3` AS `registrant_street3`,`test`.`t1`.`registrant_street4` AS `registrant_street4`,`test`.`t1`.`registrant_street5` AS `registrant_street5`,`test`.`t1`.`registrant_city` AS `registrant_city`,`test`.`t1`.`registrant_postal_code` AS `registrant_postal_code`,`test`.`t1`.`registrant_country` AS `registrant_country`,`test`.`t1`.`registrant_email` AS `registrant_email`,`test`.`t1`.`registrant_telephone` AS `registrant_telephone`,`test`.`t1`.`administrative_name` AS `administrative_name`,`test`.`t1`.`administrative_organization` AS `administrative_organization`,`test`.`t1`.`administrative_street1` AS `administrative_street1`,`test`.`t1`.`administrative_street2` AS `administrative_street2`,`test`.`t1`.`administrative_street3` AS `administrative_street3`,`test`.`t1`.`administrative_street4` AS `administrative_street4`,`test`.`t1`.`administrative_street5` AS `administrative_street5`,`test`.`t1`.`administrative_city` AS `administrative_city`,`test`.`t1`.`administrative_postal_code` AS `administrative_postal_code`,`test`.`t1`.`administrative_country` AS `administrative_country`,`test`.`t1`.`administrative_email` AS `administrative_email`,`test`.`t1`.`administrative_telephone` AS `administrative_telephone`,`test`.`t1`.`technical_name` AS `technical_name`,`test`.`t1`.`technical_organization` AS `technical_organization`,`test`.`t1`.`technical_street1` AS `technical_street1`,`test`.`t1`.`technical_street2` AS `technical_street2`,`test`.`t1`.`technical_street3` AS `technical_street3`,`test`.`t1`.`technical_street4` AS `technical_street4`,`test`.`t1`.`technical_street5` AS `technical_street5`,`test`.`t1`.`technical_city` AS `technical_city`,`test`.`t1`.`technical_postal_code` AS `technical_postal_code`,`test`.`t1`.`technical_country` AS `technical_country`,`test`.`t1`.`technical_email` AS `technical_email`,`test`.`t1`.`technical_telephone` AS `technical_telephone`,`test`.`t1`.`json` AS `json`,`test`.`t1`.`timestamp` AS `timestamp` from `test`.`t1` where `test`.`t1`.`domain` = 'www.mailhost.i-dev.fr' and `test`.`t1`.`timestamp` >= (current_timestamp() + interval -1 month) order by `test`.`t1`.`timestamp` desc +Note 1003 select `test`.`t1`.`id` AS `id`,`test`.`t1`.`domain` AS `domain`,`test`.`t1`.`registrant_name` AS `registrant_name`,`test`.`t1`.`registrant_organization` AS `registrant_organization`,`test`.`t1`.`registrant_street1` AS `registrant_street1`,`test`.`t1`.`registrant_street2` AS `registrant_street2`,`test`.`t1`.`registrant_street3` AS `registrant_street3`,`test`.`t1`.`registrant_street4` AS `registrant_street4`,`test`.`t1`.`registrant_street5` AS `registrant_street5`,`test`.`t1`.`registrant_city` AS `registrant_city`,`test`.`t1`.`registrant_postal_code` AS `registrant_postal_code`,`test`.`t1`.`registrant_country` AS `registrant_country`,`test`.`t1`.`registrant_email` AS `registrant_email`,`test`.`t1`.`registrant_telephone` AS `registrant_telephone`,`test`.`t1`.`administrative_name` AS `administrative_name`,`test`.`t1`.`administrative_organization` AS `administrative_organization`,`test`.`t1`.`administrative_street1` AS `administrative_street1`,`test`.`t1`.`administrative_street2` AS `administrative_street2`,`test`.`t1`.`administrative_street3` AS `administrative_street3`,`test`.`t1`.`administrative_street4` AS `administrative_street4`,`test`.`t1`.`administrative_street5` AS `administrative_street5`,`test`.`t1`.`administrative_city` AS `administrative_city`,`test`.`t1`.`administrative_postal_code` AS `administrative_postal_code`,`test`.`t1`.`administrative_country` AS `administrative_country`,`test`.`t1`.`administrative_email` AS `administrative_email`,`test`.`t1`.`administrative_telephone` AS `administrative_telephone`,`test`.`t1`.`technical_name` AS `technical_name`,`test`.`t1`.`technical_organization` AS `technical_organization`,`test`.`t1`.`technical_street1` AS `technical_street1`,`test`.`t1`.`technical_street2` AS `technical_street2`,`test`.`t1`.`technical_street3` AS `technical_street3`,`test`.`t1`.`technical_street4` AS `technical_street4`,`test`.`t1`.`technical_street5` AS `technical_street5`,`test`.`t1`.`technical_city` AS `technical_city`,`test`.`t1`.`technical_postal_code` AS `technical_postal_code`,`test`.`t1`.`technical_country` AS `technical_country`,`test`.`t1`.`technical_email` AS `technical_email`,`test`.`t1`.`technical_telephone` AS `technical_telephone`,`test`.`t1`.`json` AS `json`,`test`.`t1`.`timestamp` AS `timestamp` from `test`.`t1` where `test`.`t1`.`domain` = 'www.mailhost.i-dev.fr' and `test`.`t1`.`timestamp` >= ('2017-01-30 08:24:51' + interval -1 month) order by `test`.`t1`.`timestamp` desc SET optimizer_switch=@save_optimizer_switch; DROP TABLE t1; # @@ -2692,6 +3330,10 @@ insert into filt(id,aceid,clid,fh) values (6341490487802728361,6341490487802728360,1,1291319099896431785), (6341490487802728362,6341490487802728360,1,8948400944397203540), (6341490487802728363,6341490487802728361,1,6701841652906431497); +insert into filt select id+10000,aceid,clid,fh from filt; +insert into filt select id+20000,aceid,clid,fh from filt; +insert into filt select id+40000,aceid,clid,fh from filt; +insert into filt select id+80000,aceid,clid,fh from filt; analyze table filt, acei, acli; Table Op Msg_type Msg_text test.filt analyze status Engine-independent statistics collected @@ -2716,7 +3358,7 @@ fi.fh in (6311439873746261694,-397087483897438286, id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t index_merge PRIMARY,acli_rid,acli_tp acli_tp,acli_rid 2,767 NULL 2 100.00 Using intersect(acli_tp,acli_rid); Using where; Using index 1 SIMPLE a ref PRIMARY,acei_aclid acei_aclid 8 test.t.id 1 100.00 Using where -1 SIMPLE fi ref filt_aceid,filt_fh filt_aceid 8 test.a.id 1 17.14 Using where +1 SIMPLE fi ref filt_aceid,filt_fh filt_aceid 8 test.a.id 24 14.46 Using where Warnings: Note 1003 select `test`.`t`.`id` AS `id`,`test`.`fi`.`id` AS `id`,`test`.`fi`.`aceid` AS `aceid`,`test`.`fi`.`clid` AS `clid`,`test`.`fi`.`fh` AS `fh` from `test`.`acli` `t` join `test`.`acei` `a` join `test`.`filt` `fi` where `test`.`t`.`tp` = 121 and `test`.`a`.`atp` = 1 and `test`.`fi`.`aceid` = `test`.`a`.`id` and `test`.`a`.`aclid` = `test`.`t`.`id` and `test`.`t`.`rid` = 'B5FCC8C7111E4E3CBC21AAF5012F59C2' and `test`.`fi`.`fh` in (6311439873746261694,-397087483897438286,8518228073041491534,-5420422472375069774) set statement optimizer_switch='rowid_filter=off' for select t.id, fi.* @@ -2731,6 +3373,36 @@ fi.fh in (6311439873746261694,-397087483897438286, id id aceid clid fh 3080602882609775594 3080602882609775600 3080602882609775598 1 6311439873746261694 3080602882609775594 3080602882609775601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609785600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609785601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609795600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609795601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609805600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609805601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609815600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609815601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609825600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609825601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609835600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609835601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609845600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609845601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609855600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609855601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609865600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609865601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609875600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609875601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609885600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609885601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609895600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609895601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609905600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609905601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609915600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609915601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609925600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609925601 3080602882609775598 1 6311439873746261694 set statement optimizer_switch='rowid_filter=on' for explain extended select t.id, fi.* from (acli t inner join acei a on a.aclid = t.id) inner join filt fi on a.id = fi.aceid @@ -2743,7 +3415,7 @@ fi.fh in (6311439873746261694,-397087483897438286, id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t index_merge PRIMARY,acli_rid,acli_tp acli_tp,acli_rid 2,767 NULL 2 100.00 Using intersect(acli_tp,acli_rid); Using where; Using index 1 SIMPLE a ref PRIMARY,acei_aclid acei_aclid 8 test.t.id 1 100.00 Using where -1 SIMPLE fi ref|filter filt_aceid,filt_fh filt_aceid|filt_fh 8|8 test.a.id 1 (17%) 17.14 Using where; Using rowid filter +1 SIMPLE fi ref|filter filt_aceid,filt_fh filt_aceid|filt_fh 8|8 test.a.id 24 (14%) 14.46 Using where; Using rowid filter Warnings: Note 1003 select `test`.`t`.`id` AS `id`,`test`.`fi`.`id` AS `id`,`test`.`fi`.`aceid` AS `aceid`,`test`.`fi`.`clid` AS `clid`,`test`.`fi`.`fh` AS `fh` from `test`.`acli` `t` join `test`.`acei` `a` join `test`.`filt` `fi` where `test`.`t`.`tp` = 121 and `test`.`a`.`atp` = 1 and `test`.`fi`.`aceid` = `test`.`a`.`id` and `test`.`a`.`aclid` = `test`.`t`.`id` and `test`.`t`.`rid` = 'B5FCC8C7111E4E3CBC21AAF5012F59C2' and `test`.`fi`.`fh` in (6311439873746261694,-397087483897438286,8518228073041491534,-5420422472375069774) set statement optimizer_switch='rowid_filter=on' for select t.id, fi.* @@ -2758,6 +3430,36 @@ fi.fh in (6311439873746261694,-397087483897438286, id id aceid clid fh 3080602882609775594 3080602882609775600 3080602882609775598 1 6311439873746261694 3080602882609775594 3080602882609775601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609785600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609785601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609795600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609795601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609805600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609805601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609815600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609815601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609825600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609825601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609835600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609835601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609845600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609845601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609855600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609855601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609865600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609865601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609875600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609875601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609885600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609885601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609895600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609895601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609905600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609905601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609915600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609915601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609925600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609925601 3080602882609775598 1 6311439873746261694 set optimizer_switch='mrr=on'; set join_cache_level=6; set statement optimizer_switch='rowid_filter=off' for explain extended select t.id, fi.* @@ -2772,7 +3474,7 @@ fi.fh in (6311439873746261694,-397087483897438286, id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t index_merge PRIMARY,acli_rid,acli_tp acli_tp,acli_rid 2,767 NULL 2 100.00 Using intersect(acli_tp,acli_rid); Using where; Using index 1 SIMPLE a ref PRIMARY,acei_aclid acei_aclid 8 test.t.id 1 100.00 Using where; Using join buffer (flat, BKA join); Rowid-ordered scan -1 SIMPLE fi ref filt_aceid,filt_fh filt_aceid 8 test.a.id 1 17.14 Using where; Using join buffer (incremental, BKA join); Rowid-ordered scan +1 SIMPLE fi ref filt_aceid,filt_fh filt_aceid 8 test.a.id 24 14.46 Using where; Using join buffer (incremental, BKA join); Rowid-ordered scan Warnings: Note 1003 select `test`.`t`.`id` AS `id`,`test`.`fi`.`id` AS `id`,`test`.`fi`.`aceid` AS `aceid`,`test`.`fi`.`clid` AS `clid`,`test`.`fi`.`fh` AS `fh` from `test`.`acli` `t` join `test`.`acei` `a` join `test`.`filt` `fi` where `test`.`t`.`tp` = 121 and `test`.`a`.`atp` = 1 and `test`.`fi`.`aceid` = `test`.`a`.`id` and `test`.`a`.`aclid` = `test`.`t`.`id` and `test`.`t`.`rid` = 'B5FCC8C7111E4E3CBC21AAF5012F59C2' and `test`.`fi`.`fh` in (6311439873746261694,-397087483897438286,8518228073041491534,-5420422472375069774) set statement optimizer_switch='rowid_filter=off' for select t.id, fi.* @@ -2787,6 +3489,36 @@ fi.fh in (6311439873746261694,-397087483897438286, id id aceid clid fh 3080602882609775594 3080602882609775600 3080602882609775598 1 6311439873746261694 3080602882609775594 3080602882609775601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609785600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609785601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609795600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609795601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609805600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609805601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609815600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609815601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609825600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609825601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609835600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609835601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609845600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609845601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609855600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609855601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609865600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609865601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609875600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609875601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609885600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609885601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609895600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609895601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609905600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609905601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609915600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609915601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609925600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609925601 3080602882609775598 1 6311439873746261694 set statement optimizer_switch='rowid_filter=on' for explain extended select t.id, fi.* from (acli t inner join acei a on a.aclid = t.id) inner join filt fi on a.id = fi.aceid @@ -2799,7 +3531,7 @@ fi.fh in (6311439873746261694,-397087483897438286, id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t index_merge PRIMARY,acli_rid,acli_tp acli_tp,acli_rid 2,767 NULL 2 100.00 Using intersect(acli_tp,acli_rid); Using where; Using index 1 SIMPLE a ref PRIMARY,acei_aclid acei_aclid 8 test.t.id 1 100.00 Using where; Using join buffer (flat, BKA join); Rowid-ordered scan -1 SIMPLE fi ref|filter filt_aceid,filt_fh filt_aceid|filt_fh 8|8 test.a.id 1 (17%) 17.14 Using where; Using join buffer (incremental, BKA join); Rowid-ordered scan; Using rowid filter +1 SIMPLE fi ref|filter filt_aceid,filt_fh filt_aceid|filt_fh 8|8 test.a.id 24 (14%) 14.46 Using where; Using join buffer (incremental, BKA join); Rowid-ordered scan; Using rowid filter Warnings: Note 1003 select `test`.`t`.`id` AS `id`,`test`.`fi`.`id` AS `id`,`test`.`fi`.`aceid` AS `aceid`,`test`.`fi`.`clid` AS `clid`,`test`.`fi`.`fh` AS `fh` from `test`.`acli` `t` join `test`.`acei` `a` join `test`.`filt` `fi` where `test`.`t`.`tp` = 121 and `test`.`a`.`atp` = 1 and `test`.`fi`.`aceid` = `test`.`a`.`id` and `test`.`a`.`aclid` = `test`.`t`.`id` and `test`.`t`.`rid` = 'B5FCC8C7111E4E3CBC21AAF5012F59C2' and `test`.`fi`.`fh` in (6311439873746261694,-397087483897438286,8518228073041491534,-5420422472375069774) set statement optimizer_switch='rowid_filter=on' for select t.id, fi.* @@ -2814,6 +3546,36 @@ fi.fh in (6311439873746261694,-397087483897438286, id id aceid clid fh 3080602882609775594 3080602882609775600 3080602882609775598 1 6311439873746261694 3080602882609775594 3080602882609775601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609785600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609785601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609795600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609795601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609805600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609805601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609815600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609815601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609825600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609825601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609835600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609835601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609845600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609845601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609855600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609855601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609865600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609865601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609875600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609875601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609885600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609885601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609895600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609895601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609905600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609905601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609915600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609915601 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609925600 3080602882609775598 1 6311439873746261694 +3080602882609775594 3080602882609925601 3080602882609775598 1 6311439873746261694 set statement optimizer_switch='rowid_filter=on' for analyze format=json select t.id, fi.* from (acli t inner join acei a on a.aclid = t.id) inner join filt fi on a.id = fi.aceid @@ -2892,22 +3654,23 @@ ANALYZE "key": "filt_fh", "used_key_parts": ["fh"] }, - "rows": 6, - "selectivity_pct": 17.143, - "r_rows": 5, + "rows": 81, + "selectivity_pct": 14.464, + "r_rows": 80, + "r_lookups": 80, "r_selectivity_pct": 40, "r_buffer_size": "REPLACED", "r_filling_time_ms": "REPLACED" }, "r_loops": 1, - "rows": 1, - "r_rows": 2, + "rows": 24, + "r_rows": 32, "r_total_time_ms": "REPLACED", - "filtered": 17.143, + "filtered": 14.464, "r_filtered": 100 }, "buffer_type": "incremental", - "buffer_size": "603", + "buffer_size": "4Kb", "join_type": "BKA", "mrr_type": "Rowid-ordered scan", "attached_condition": "fi.fh in (6311439873746261694,-397087483897438286,8518228073041491534,-5420422472375069774)", @@ -2926,38 +3689,99 @@ CREATE TABLE t1 (pk int NOT NULL, c1 varchar(1)) engine=innodb; INSERT INTO t1 VALUES (1,NULL),(15,'o'),(16,'x'),(19,'t'),(35,'k'),(36,'h'),(42,'t'),(43,'h'), (53,'l'),(62,'a'),(71,NULL),(79,'u'),(128,'y'),(129,NULL),(133,NULL); +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; CREATE TABLE t2 ( -i1 int, c1 varchar(1) NOT NULL, KEY c1 (c1), KEY i1 (i1) +i1 int, c1 varchar(1) NOT NULL, +filler1 char(255) default '0', filler2 char(255) default '0', +KEY c1 (c1), KEY i1 (i1) ) engine=innodb; -INSERT INTO t2 VALUES -(1,'1'),(NULL,'1'),(42,'t'),(NULL,'1'),(79,'u'),(NULL,'1'), -(NULL,'4'),(NULL,'4'),(NULL,'1'),(NULL,'u'),(2,'1'),(NULL,'w'); +INSERT INTO t2(i1,c1) VALUES +(NULL,'1'),(1,'1'),(2,'t'),(3,'1'),(4,'u'),(5,'1'), +(6,'4'),(7,'4'),(8,'1'),(1,'u'),(2,'1'),(NULL,'w'); +INSERT INTO t2 SELECT * FROM t2; INSERT INTO t2 SELECT * FROM t2; +INSERT INTO t2 SELECT * FROM t2; +INSERT INTO t2 SELECT * FROM t2; +ANALYZE TABLE t1,t2 PERSISTENT FOR ALL; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status OK +test.t2 analyze status Engine-independent statistics collected +test.t2 analyze status OK SELECT * FROM t1 WHERE t1.c1 NOT IN (SELECT t2.c1 FROM t2, t1 AS a1 -WHERE t2.i1 = t1.pk AND t2.i1 IS NOT NULL); +WHERE t2.i1 = t1.pk AND t2.i1 BETWEEN 3 AND 5); pk c1 +1 NULL +15 o +16 x +19 t +35 k +36 h +42 t +43 h +53 l +62 a +71 NULL +79 u +128 y +129 NULL +133 NULL +1 NULL +15 o +16 x +19 t +35 k +36 h +42 t +43 h +53 l +62 a +71 NULL +79 u +128 y +129 NULL +133 NULL +1 NULL +15 o +16 x +19 t +35 k +36 h +42 t +43 h +53 l +62 a +71 NULL +79 u +128 y +129 NULL +133 NULL +1 NULL 15 o 16 x 19 t 35 k 36 h +42 t 43 h 53 l 62 a 71 NULL +79 u 128 y 129 NULL 133 NULL EXPLAIN EXTENDED SELECT * FROM t1 WHERE t1.c1 NOT IN (SELECT t2.c1 FROM t2, t1 AS a1 -WHERE t2.i1 = t1.pk AND t2.i1 IS NOT NULL); +WHERE t2.i1 = t1.pk AND t2.i1 BETWEEN 3 AND 5); id select_type table type possible_keys key key_len ref rows filtered Extra -1 PRIMARY t1 ALL NULL NULL NULL NULL 15 100.00 Using where -2 DEPENDENT SUBQUERY t2 ref|filter c1,i1 c1|i1 3|5 func 6 (33%) 33.33 Using where; Full scan on NULL key; Using rowid filter -2 DEPENDENT SUBQUERY a1 ALL NULL NULL NULL NULL 15 100.00 Using join buffer (flat, BNL join) +1 PRIMARY t1 ALL NULL NULL NULL NULL 60 100.00 Using where +2 DEPENDENT SUBQUERY t2 ref|filter c1,i1 c1|i1 3|5 func 38 (25%) 25.00 Using where; Full scan on NULL key; Using rowid filter +2 DEPENDENT SUBQUERY a1 ALL NULL NULL NULL NULL 60 100.00 Using join buffer (flat, BNL join) Warnings: Note 1276 Field or reference 'test.t1.pk' of SELECT #2 was resolved in SELECT #1 -Note 1003 /* select#1 */ select `test`.`t1`.`pk` AS `pk`,`test`.`t1`.`c1` AS `c1` from `test`.`t1` where !<`test`.`t1`.`c1`,`test`.`t1`.`pk`>((`test`.`t1`.`c1`,(/* select#2 */ select `test`.`t2`.`c1` from `test`.`t2` join `test`.`t1` `a1` where `test`.`t2`.`i1` = `test`.`t1`.`pk` and `test`.`t2`.`i1` is not null and trigcond((`test`.`t1`.`c1`) = `test`.`t2`.`c1`)))) +Note 1003 /* select#1 */ select `test`.`t1`.`pk` AS `pk`,`test`.`t1`.`c1` AS `c1` from `test`.`t1` where !<`test`.`t1`.`c1`,`test`.`t1`.`pk`>((`test`.`t1`.`c1`,(/* select#2 */ select `test`.`t2`.`c1` from `test`.`t2` join `test`.`t1` `a1` where `test`.`t2`.`i1` = `test`.`t1`.`pk` and `test`.`t2`.`i1` between 3 and 5 and trigcond((`test`.`t1`.`c1`) = `test`.`t2`.`c1`)))) DROP TABLE t1,t2; # End of 10.4 tests diff --git a/mysql-test/main/rowid_filter_innodb.test b/mysql-test/main/rowid_filter_innodb.test index dc8b1ddbca5..f4d0b241d11 100644 --- a/mysql-test/main/rowid_filter_innodb.test +++ b/mysql-test/main/rowid_filter_innodb.test @@ -32,6 +32,10 @@ insert into t1 values (85,'a','a',-1),(86,'a','a',5),(87,'a','a',null),(88,'a','a',160), (89,null,null,null),(90,'a','a',14785),(91,'a','a',0),(92,'a','a',null); +insert into t1 select pk+100, f1, f2, a from t1; + +analyze table t1; + let $q= ( select * from t1 where (f1 is null and f2 is null) and (f2 between 'a' and 'z' or f1 in ('a'))) @@ -73,13 +77,13 @@ drop table t1, t2; create table t1 (a int, b int, key (b), key (a)) engine=innodb; insert into t1 -select (rand(1)*1000)/10, (rand(1001)*1000)/50 from seq_1_to_1000; +select (rand(1)*1000)/10, (rand(1001)*1000)/20 from seq_1_to_1000; analyze table t1; let $q= -select count(*) from t1 where a in (22,83,11) and b=2; +select count(*) from t1 where a between 21 and 30 and b=2; let $q1= -select * from t1 where a in (22,83,11) and b=2; +select * from t1 where a between 21 and 30 and b=2; set @save_optimizer_switch= @@optimizer_switch; @@ -224,7 +228,7 @@ set global innodb_stats_persistent= @stats.save; CREATE TABLE t1 ( id int(11) unsigned NOT NULL AUTO_INCREMENT, - domain varchar(255) NOT NULL, + domain varchar(32) NOT NULL, registrant_name varchar(255) DEFAULT NULL, registrant_organization varchar(255) DEFAULT NULL, registrant_street1 varchar(255) DEFAULT NULL, @@ -317,6 +321,66 @@ technical_email, technical_telephone, json, timestamp) VALUES null, 'KOELN', '50937', 'GERMANY', 'ICANN@EXPIRES-2009.WEBCARE24.COM', '492214307580', '', '2017-01-30 10:08:29'); +let $sqi= +INSERT INTO t1 ( +domain, registrant_name, registrant_organization, registrant_street1, +registrant_street2, registrant_street3, registrant_street4, registrant_street5, +registrant_city, registrant_postal_code, registrant_country, registrant_email, +registrant_telephone, administrative_name, administrative_organization, +administrative_street1, administrative_street2, administrative_street3, +administrative_street4, administrative_street5, administrative_city, +administrative_postal_code, administrative_country, administrative_email, +administrative_telephone, technical_name, technical_organization, +technical_street1, technical_street2, technical_street3, technical_street4, +technical_street5, technical_city, technical_postal_code, technical_country, +technical_email, technical_telephone, json, timestamp) +VALUES +('www.mailhost.i-dev.fr', null, null, null, null, null, null, null, null, + null, null, null, null, null, null, null, null, null, null, null, null, null, + null, null, null, null, null, null, null, null, null, null, null, null, null, + null, null, '', '2016-12-22 09:18:28'); + +eval $sqi; +eval $sqi; +eval $sqi; +eval $sqi; +eval $sqi; +eval $sqi; +eval $sqi; +eval $sqi; + +let $qi= +INSERT INTO t1 ( +domain, registrant_name, registrant_organization, registrant_street1, +registrant_street2, registrant_street3, registrant_street4, registrant_street5, +registrant_city, registrant_postal_code, registrant_country, registrant_email, +registrant_telephone, administrative_name, administrative_organization, +administrative_street1, administrative_street2, administrative_street3, +administrative_street4, administrative_street5, administrative_city, +administrative_postal_code, administrative_country, administrative_email, +administrative_telephone, technical_name, technical_organization, +technical_street1, technical_street2, technical_street3, technical_street4, +technical_street5, technical_city, technical_postal_code, technical_country, +technical_email, technical_telephone, json, timestamp) +SELECT +domain, registrant_name, registrant_organization, registrant_street1, +registrant_street2, registrant_street3, registrant_street4, registrant_street5, +registrant_city, registrant_postal_code, registrant_country, registrant_email, +registrant_telephone, administrative_name, administrative_organization, +administrative_street1, administrative_street2, administrative_street3, +administrative_street4, administrative_street5, administrative_city, +administrative_postal_code, administrative_country, administrative_email, +administrative_telephone, technical_name, technical_organization, +technical_street1, technical_street2, technical_street3, technical_street4, +technical_street5, technical_city, technical_postal_code, technical_country, +technical_email, technical_telephone, json, timestamp +FROM t1; + +eval $qi; +eval $qi; + +ANALYZE TABLE t1 PERSISTENT FOR ALL; + SET @save_optimizer_switch=@@optimizer_switch; SET optimizer_switch='mrr=on,mrr_sort_keys=on'; @@ -324,7 +388,7 @@ SET optimizer_switch='mrr=on,mrr_sort_keys=on'; let $q= SELECT * FROM t1 WHERE 1 = 1 AND domain = 'www.mailhost.i-dev.fr' AND - timestamp >= DATE_ADD(CURRENT_TIMESTAMP, INTERVAL -1 MONTH) + timestamp >= DATE_ADD('2017-01-30 08:24:51', INTERVAL -1 MONTH) ORDER BY timestamp DESC; eval $q; @@ -497,6 +561,11 @@ insert into filt(id,aceid,clid,fh) values (6341490487802728362,6341490487802728360,1,8948400944397203540), (6341490487802728363,6341490487802728361,1,6701841652906431497); +insert into filt select id+10000,aceid,clid,fh from filt; +insert into filt select id+20000,aceid,clid,fh from filt; +insert into filt select id+40000,aceid,clid,fh from filt; +insert into filt select id+80000,aceid,clid,fh from filt; + analyze table filt, acei, acli; let $q= @@ -545,19 +614,28 @@ CREATE TABLE t1 (pk int NOT NULL, c1 varchar(1)) engine=innodb; INSERT INTO t1 VALUES (1,NULL),(15,'o'),(16,'x'),(19,'t'),(35,'k'),(36,'h'),(42,'t'),(43,'h'), (53,'l'),(62,'a'),(71,NULL),(79,'u'),(128,'y'),(129,NULL),(133,NULL); +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; CREATE TABLE t2 ( -i1 int, c1 varchar(1) NOT NULL, KEY c1 (c1), KEY i1 (i1) +i1 int, c1 varchar(1) NOT NULL, +filler1 char(255) default '0', filler2 char(255) default '0', +KEY c1 (c1), KEY i1 (i1) ) engine=innodb; -INSERT INTO t2 VALUES -(1,'1'),(NULL,'1'),(42,'t'),(NULL,'1'),(79,'u'),(NULL,'1'), -(NULL,'4'),(NULL,'4'),(NULL,'1'),(NULL,'u'),(2,'1'),(NULL,'w'); +INSERT INTO t2(i1,c1) VALUES +(NULL,'1'),(1,'1'),(2,'t'),(3,'1'),(4,'u'),(5,'1'), +(6,'4'),(7,'4'),(8,'1'),(1,'u'),(2,'1'),(NULL,'w'); +INSERT INTO t2 SELECT * FROM t2; +INSERT INTO t2 SELECT * FROM t2; INSERT INTO t2 SELECT * FROM t2; +INSERT INTO t2 SELECT * FROM t2; + +ANALYZE TABLE t1,t2 PERSISTENT FOR ALL; let $q= SELECT * FROM t1 WHERE t1.c1 NOT IN (SELECT t2.c1 FROM t2, t1 AS a1 - WHERE t2.i1 = t1.pk AND t2.i1 IS NOT NULL); + WHERE t2.i1 = t1.pk AND t2.i1 BETWEEN 3 AND 5); eval $q; eval EXPLAIN EXTENDED $q; diff --git a/mysql-test/main/rowid_filter_innodb_debug.result b/mysql-test/main/rowid_filter_innodb_debug.result index 6fd75294bdb..f989e00919b 100644 --- a/mysql-test/main/rowid_filter_innodb_debug.result +++ b/mysql-test/main/rowid_filter_innodb_debug.result @@ -4,8 +4,6 @@ set default_storage_engine=innodb; # create table t0(a int); insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); -create table t1(a int); -insert into t1 select A.a + B.a* 10 + C.a * 100 from t0 A, t0 B, t0 C; create table t2(a int); insert into t2 select A.a + B.a* 10 from t0 A, t0 B; CREATE TABLE t3 ( @@ -22,10 +20,10 @@ InnoDB insert into t3 select A.a, -A.a, +B.a, 'filler-data-filler-data' from -t0 A, t1 B; +t2 A, t2 B; analyze table t2,t3; Table Op Msg_type Msg_text test.t2 analyze status Engine-independent statistics collected @@ -38,7 +36,7 @@ where t3.key1=t2.a and t3.key2 in (2,3); id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t2 ALL NULL NULL NULL NULL 100 Using where -1 SIMPLE t3 ref|filter key1,key2 key1|key2 5|5 test.t2.a 1000 (20%) Using where; Using rowid filter +1 SIMPLE t3 ref|filter key1,key2 key1|key2 5|5 test.t2.a 100 (2%) Using where; Using rowid filter set debug_sync='handler_rowid_filter_check SIGNAL at_rowid_filter_check WAIT_FOR go'; select * from t2, t3 where @@ -52,7 +50,7 @@ connection default; disconnect con1; ERROR 70100: Query execution was interrupted set debug_sync='RESET'; -drop table t0,t1,t2,t3; +drop table t0,t2,t3; set default_storage_engine=default; set @save_optimizer_switch= @@optimizer_switch; set @save_use_stat_tables= @@use_stat_tables; @@ -66,6 +64,16 @@ set optimizer_switch='rowid_filter=on'; # CREATE TABLE t1 (a INT, b INT, INDEX(a), INDEX(b)) ENGINE=InnoDB; INSERT INTO t1 VALUES (0,0),(1,0),(-1,1), (-2,1), (-2,3), (-3,4), (-2,4); +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; +ANALYZE TABLE t1 PERSISTENT FOR ALL; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status OK set debug_sync='handler_rowid_filter_check SIGNAL killme WAIT_FOR go'; SELECT * FROM t1 WHERE a > 0 AND b=0; connect con1, localhost, root,,; diff --git a/mysql-test/main/rowid_filter_innodb_debug.test b/mysql-test/main/rowid_filter_innodb_debug.test index 31fbd937304..74deaa8ccc9 100644 --- a/mysql-test/main/rowid_filter_innodb_debug.test +++ b/mysql-test/main/rowid_filter_innodb_debug.test @@ -24,6 +24,14 @@ set optimizer_switch='rowid_filter=on'; CREATE TABLE t1 (a INT, b INT, INDEX(a), INDEX(b)) ENGINE=InnoDB; INSERT INTO t1 VALUES (0,0),(1,0),(-1,1), (-2,1), (-2,3), (-3,4), (-2,4); +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; +INSERT INTO t1 SELECT * FROM t1; + +ANALYZE TABLE t1 PERSISTENT FOR ALL; let $ID= `SELECT @id := CONNECTION_ID()`; diff --git a/mysql-test/main/rowid_filter_myisam_debug.result b/mysql-test/main/rowid_filter_myisam_debug.result index 16fcb2a416e..32a989f50da 100644 --- a/mysql-test/main/rowid_filter_myisam_debug.result +++ b/mysql-test/main/rowid_filter_myisam_debug.result @@ -3,8 +3,6 @@ # create table t0(a int); insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); -create table t1(a int); -insert into t1 select A.a + B.a* 10 + C.a * 100 from t0 A, t0 B, t0 C; create table t2(a int); insert into t2 select A.a + B.a* 10 from t0 A, t0 B; CREATE TABLE t3 ( @@ -21,10 +19,10 @@ MyISAM insert into t3 select A.a, -A.a, +B.a, 'filler-data-filler-data' from -t0 A, t1 B; +t2 A, t2 B; analyze table t2,t3; Table Op Msg_type Msg_text test.t2 analyze status Engine-independent statistics collected @@ -37,7 +35,7 @@ where t3.key1=t2.a and t3.key2 in (2,3); id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t2 ALL NULL NULL NULL NULL 100 Using where -1 SIMPLE t3 ref|filter key1,key2 key1|key2 5|5 test.t2.a 1000 (18%) Using where; Using rowid filter +1 SIMPLE t3 ref|filter key1,key2 key1|key2 5|5 test.t2.a 100 (2%) Using where; Using rowid filter set debug_sync='handler_rowid_filter_check SIGNAL at_rowid_filter_check WAIT_FOR go'; select * from t2, t3 where @@ -51,4 +49,4 @@ connection default; disconnect con1; ERROR 70100: Query execution was interrupted set debug_sync='RESET'; -drop table t0,t1,t2,t3; +drop table t0,t2,t3; diff --git a/mysql-test/main/select.result b/mysql-test/main/select.result index 189775aa5aa..1562144b164 100644 --- a/mysql-test/main/select.result +++ b/mysql-test/main/select.result @@ -3474,13 +3474,13 @@ INSERT INTO t2 VALUES EXPLAIN SELECT a, c, d, f FROM t1,t2 WHERE a=c AND b BETWEEN 4 AND 6; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t2 ALL c NULL NULL NULL 18 Using where -1 SIMPLE t1 eq_ref|filter PRIMARY,b PRIMARY|b 4|5 test.t2.c 1 (30%) Using where; Using rowid filter +1 SIMPLE t1 range PRIMARY,b b 5 NULL 3 Using index condition +1 SIMPLE t2 ref c c 5 test.t1.a 2 EXPLAIN SELECT a, c, d, f FROM t1,t2 WHERE a=c AND b BETWEEN 4 AND 6 AND a > 0; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t2 ALL c NULL NULL NULL 18 Using where -1 SIMPLE t1 eq_ref|filter PRIMARY,b PRIMARY|b 4|5 test.t2.c 1 (30%) Using where; Using rowid filter +1 SIMPLE t1 range PRIMARY,b b 5 NULL 3 Using index condition; Using where +1 SIMPLE t2 ref c c 5 test.t1.a 2 DROP TABLE t1, t2; create table t1 ( a int unsigned not null auto_increment primary key, @@ -3616,7 +3616,7 @@ t3.a=t2.a AND t3.c IN ('bb','ee'); id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1 1 SIMPLE t2 range si si 5 NULL 4 Using index condition; Using where -1 SIMPLE t3 eq_ref|filter PRIMARY,ci PRIMARY|ci 4|5 test.t2.a 1 (30%) Using where; Using rowid filter +1 SIMPLE t3 eq_ref PRIMARY,ci PRIMARY 4 test.t2.a 1 Using where EXPLAIN SELECT t3.a FROM t1,t2,t3 WHERE t1.id = 8 AND t2.i BETWEEN t1.b AND t1.e AND @@ -3624,7 +3624,7 @@ t3.a=t2.a AND t3.c IN ('bb','ee') ; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1 1 SIMPLE t2 range si,ai si 5 NULL 4 Using index condition; Using where -1 SIMPLE t3 eq_ref|filter PRIMARY,ci PRIMARY|ci 4|5 test.t2.a 1 (30%) Using where; Using rowid filter +1 SIMPLE t3 eq_ref PRIMARY,ci PRIMARY 4 test.t2.a 1 Using where EXPLAIN SELECT t3.a FROM t1,t2 FORCE INDEX (si),t3 WHERE t1.id = 8 AND (t2.i=t1.b OR t2.i=t1.e) AND t3.a=t2.a AND @@ -3744,7 +3744,7 @@ EXPLAIN SELECT * FROM t1 WHERE ID_better=1 AND ID1_with_null IS NULL AND (ID2_with_null=1 OR ID2_with_null=2); id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 ref|filter idx1,idx2 idx1|idx2 5|4 const 2 (1%) Using index condition; Using where; Using rowid filter +1 SIMPLE t1 ref idx1,idx2 idx2 4 const 2 Using where DROP TABLE t1; CREATE TABLE t1 (a INT, ts TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, KEY ts(ts)); INSERT INTO t1 VALUES (30,"2006-01-03 23:00:00"), (31,"2006-01-03 23:00:00"); diff --git a/mysql-test/main/select_jcl6.result b/mysql-test/main/select_jcl6.result index 37277f07ff4..e144477b66e 100644 --- a/mysql-test/main/select_jcl6.result +++ b/mysql-test/main/select_jcl6.result @@ -3485,13 +3485,13 @@ INSERT INTO t2 VALUES EXPLAIN SELECT a, c, d, f FROM t1,t2 WHERE a=c AND b BETWEEN 4 AND 6; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t2 ALL c NULL NULL NULL 18 Using where -1 SIMPLE t1 eq_ref|filter PRIMARY,b PRIMARY|b 4|5 test.t2.c 1 (30%) Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan; Using rowid filter +1 SIMPLE t1 range PRIMARY,b b 5 NULL 3 Using index condition; Rowid-ordered scan +1 SIMPLE t2 ref c c 5 test.t1.a 2 Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan EXPLAIN SELECT a, c, d, f FROM t1,t2 WHERE a=c AND b BETWEEN 4 AND 6 AND a > 0; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t2 ALL c NULL NULL NULL 18 Using where -1 SIMPLE t1 eq_ref|filter PRIMARY,b PRIMARY|b 4|5 test.t2.c 1 (30%) Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan; Using rowid filter +1 SIMPLE t1 range PRIMARY,b b 5 NULL 3 Using index condition; Using where; Rowid-ordered scan +1 SIMPLE t2 ref c c 5 test.t1.a 2 Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan DROP TABLE t1, t2; create table t1 ( a int unsigned not null auto_increment primary key, @@ -3627,7 +3627,7 @@ t3.a=t2.a AND t3.c IN ('bb','ee'); id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1 1 SIMPLE t2 range si si 5 NULL 4 Using index condition; Using where; Rowid-ordered scan -1 SIMPLE t3 eq_ref|filter PRIMARY,ci PRIMARY|ci 4|5 test.t2.a 1 (30%) Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan; Using rowid filter +1 SIMPLE t3 eq_ref PRIMARY,ci PRIMARY 4 test.t2.a 1 Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan EXPLAIN SELECT t3.a FROM t1,t2,t3 WHERE t1.id = 8 AND t2.i BETWEEN t1.b AND t1.e AND @@ -3635,7 +3635,7 @@ t3.a=t2.a AND t3.c IN ('bb','ee') ; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1 1 SIMPLE t2 range si,ai si 5 NULL 4 Using index condition; Using where; Rowid-ordered scan -1 SIMPLE t3 eq_ref|filter PRIMARY,ci PRIMARY|ci 4|5 test.t2.a 1 (30%) Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan; Using rowid filter +1 SIMPLE t3 eq_ref PRIMARY,ci PRIMARY 4 test.t2.a 1 Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan EXPLAIN SELECT t3.a FROM t1,t2 FORCE INDEX (si),t3 WHERE t1.id = 8 AND (t2.i=t1.b OR t2.i=t1.e) AND t3.a=t2.a AND @@ -3755,7 +3755,7 @@ EXPLAIN SELECT * FROM t1 WHERE ID_better=1 AND ID1_with_null IS NULL AND (ID2_with_null=1 OR ID2_with_null=2); id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 ref|filter idx1,idx2 idx1|idx2 5|4 const 2 (1%) Using index condition; Using where; Using rowid filter +1 SIMPLE t1 ref idx1,idx2 idx2 4 const 2 Using where DROP TABLE t1; CREATE TABLE t1 (a INT, ts TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, KEY ts(ts)); INSERT INTO t1 VALUES (30,"2006-01-03 23:00:00"), (31,"2006-01-03 23:00:00"); diff --git a/mysql-test/main/select_pkeycache.result b/mysql-test/main/select_pkeycache.result index 189775aa5aa..1562144b164 100644 --- a/mysql-test/main/select_pkeycache.result +++ b/mysql-test/main/select_pkeycache.result @@ -3474,13 +3474,13 @@ INSERT INTO t2 VALUES EXPLAIN SELECT a, c, d, f FROM t1,t2 WHERE a=c AND b BETWEEN 4 AND 6; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t2 ALL c NULL NULL NULL 18 Using where -1 SIMPLE t1 eq_ref|filter PRIMARY,b PRIMARY|b 4|5 test.t2.c 1 (30%) Using where; Using rowid filter +1 SIMPLE t1 range PRIMARY,b b 5 NULL 3 Using index condition +1 SIMPLE t2 ref c c 5 test.t1.a 2 EXPLAIN SELECT a, c, d, f FROM t1,t2 WHERE a=c AND b BETWEEN 4 AND 6 AND a > 0; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t2 ALL c NULL NULL NULL 18 Using where -1 SIMPLE t1 eq_ref|filter PRIMARY,b PRIMARY|b 4|5 test.t2.c 1 (30%) Using where; Using rowid filter +1 SIMPLE t1 range PRIMARY,b b 5 NULL 3 Using index condition; Using where +1 SIMPLE t2 ref c c 5 test.t1.a 2 DROP TABLE t1, t2; create table t1 ( a int unsigned not null auto_increment primary key, @@ -3616,7 +3616,7 @@ t3.a=t2.a AND t3.c IN ('bb','ee'); id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1 1 SIMPLE t2 range si si 5 NULL 4 Using index condition; Using where -1 SIMPLE t3 eq_ref|filter PRIMARY,ci PRIMARY|ci 4|5 test.t2.a 1 (30%) Using where; Using rowid filter +1 SIMPLE t3 eq_ref PRIMARY,ci PRIMARY 4 test.t2.a 1 Using where EXPLAIN SELECT t3.a FROM t1,t2,t3 WHERE t1.id = 8 AND t2.i BETWEEN t1.b AND t1.e AND @@ -3624,7 +3624,7 @@ t3.a=t2.a AND t3.c IN ('bb','ee') ; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1 1 SIMPLE t2 range si,ai si 5 NULL 4 Using index condition; Using where -1 SIMPLE t3 eq_ref|filter PRIMARY,ci PRIMARY|ci 4|5 test.t2.a 1 (30%) Using where; Using rowid filter +1 SIMPLE t3 eq_ref PRIMARY,ci PRIMARY 4 test.t2.a 1 Using where EXPLAIN SELECT t3.a FROM t1,t2 FORCE INDEX (si),t3 WHERE t1.id = 8 AND (t2.i=t1.b OR t2.i=t1.e) AND t3.a=t2.a AND @@ -3744,7 +3744,7 @@ EXPLAIN SELECT * FROM t1 WHERE ID_better=1 AND ID1_with_null IS NULL AND (ID2_with_null=1 OR ID2_with_null=2); id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 ref|filter idx1,idx2 idx1|idx2 5|4 const 2 (1%) Using index condition; Using where; Using rowid filter +1 SIMPLE t1 ref idx1,idx2 idx2 4 const 2 Using where DROP TABLE t1; CREATE TABLE t1 (a INT, ts TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, KEY ts(ts)); INSERT INTO t1 VALUES (30,"2006-01-03 23:00:00"), (31,"2006-01-03 23:00:00"); diff --git a/mysql-test/main/subselect2.result b/mysql-test/main/subselect2.result index c54d635230f..55ac483157f 100644 --- a/mysql-test/main/subselect2.result +++ b/mysql-test/main/subselect2.result @@ -132,7 +132,7 @@ id select_type table type possible_keys key key_len ref rows Extra 1 PRIMARY t3 eq_ref PRIMARY,FFOLDERID_IDX,CMFLDRPARNT_IDX PRIMARY 34 test.t3.PARENTID 1 Using where 1 PRIMARY t3 eq_ref PRIMARY,FFOLDERID_IDX,CMFLDRPARNT_IDX PRIMARY 34 test.t3.PARENTID 1 Using where 1 PRIMARY t3 eq_ref PRIMARY,FFOLDERID_IDX,CMFLDRPARNT_IDX PRIMARY 34 test.t3.PARENTID 1 Using where -1 PRIMARY t3 ref|filter PRIMARY,FFOLDERID_IDX,CMFLDRPARNT_IDX FFOLDERID_IDX|CMFLDRPARNT_IDX 34|35 test.t3.PARENTID 1 (29%) Using where; Using rowid filter +1 PRIMARY t3 eq_ref PRIMARY,FFOLDERID_IDX,CMFLDRPARNT_IDX PRIMARY 34 test.t3.PARENTID 1 Using where drop table t1, t2, t3, t4; CREATE TABLE t1 (a int(10) , PRIMARY KEY (a)) Engine=InnoDB; INSERT INTO t1 VALUES (1),(2); -- cgit v1.2.1 From e910dff81ebaa84d0028705d20a40abe8f779afd Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Tue, 25 Oct 2022 21:21:19 +0200 Subject: MDEV-26161 crash in Gis_point::calculate_haversine return an error on invalid gis data --- mysql-test/main/gis.result | 5 +++++ mysql-test/main/gis.test | 5 +++++ 2 files changed, 10 insertions(+) (limited to 'mysql-test') diff --git a/mysql-test/main/gis.result b/mysql-test/main/gis.result index bfe1d3f40a5..3d8c64b0ce8 100644 --- a/mysql-test/main/gis.result +++ b/mysql-test/main/gis.result @@ -4977,5 +4977,10 @@ ERROR HY000: Illegal parameter data type geometry for operation 'is_free_lock' SELECT IS_USED_LOCK(POINT(1,1)); ERROR HY000: Illegal parameter data type geometry for operation 'is_used_lock' # +# MDEV-26161 crash in Gis_point::calculate_haversine +# +select st_distance_sphere(x'01030000000400000004000000000000', multipoint(point(124,204)), 10); +ERROR 22003: Cannot get geometry object from data you send to the GEOMETRY field +# # End of 10.3 tests # diff --git a/mysql-test/main/gis.test b/mysql-test/main/gis.test index c7bdb366124..1d202e9be08 100644 --- a/mysql-test/main/gis.test +++ b/mysql-test/main/gis.test @@ -3090,6 +3090,11 @@ SELECT IS_FREE_LOCK(POINT(1,1)); --error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION SELECT IS_USED_LOCK(POINT(1,1)); +--echo # +--echo # MDEV-26161 crash in Gis_point::calculate_haversine +--echo # +--error ER_CANT_CREATE_GEOMETRY_OBJECT +select st_distance_sphere(x'01030000000400000004000000000000', multipoint(point(124,204)), 10); --echo # --echo # End of 10.3 tests -- cgit v1.2.1 From 77951dd7102381385093209a1f2597d28e39900a Mon Sep 17 00:00:00 2001 From: Alexey Botchkov Date: Tue, 25 Oct 2022 23:48:54 +0400 Subject: MDEV-26161 crash in Gis_point::calculate_haversine More checks for bad geometry data added. --- mysql-test/main/gis.result | 4 ++++ mysql-test/main/gis.test | 4 ++++ 2 files changed, 8 insertions(+) (limited to 'mysql-test') diff --git a/mysql-test/main/gis.result b/mysql-test/main/gis.result index 3d8c64b0ce8..358be520b06 100644 --- a/mysql-test/main/gis.result +++ b/mysql-test/main/gis.result @@ -4981,6 +4981,10 @@ ERROR HY000: Illegal parameter data type geometry for operation 'is_used_lock' # select st_distance_sphere(x'01030000000400000004000000000000', multipoint(point(124,204)), 10); ERROR 22003: Cannot get geometry object from data you send to the GEOMETRY field +select st_distance_sphere(x'010300000004000000040000', multipoint(point(124,204)), 10); +ERROR 22003: Cannot get geometry object from data you send to the GEOMETRY field +select st_distance_sphere(x'010300000001000000040000', multipoint(point(124,204)), 10); +ERROR 22003: Cannot get geometry object from data you send to the GEOMETRY field # # End of 10.3 tests # diff --git a/mysql-test/main/gis.test b/mysql-test/main/gis.test index 1d202e9be08..716fab9bfeb 100644 --- a/mysql-test/main/gis.test +++ b/mysql-test/main/gis.test @@ -3095,6 +3095,10 @@ SELECT IS_USED_LOCK(POINT(1,1)); --echo # --error ER_CANT_CREATE_GEOMETRY_OBJECT select st_distance_sphere(x'01030000000400000004000000000000', multipoint(point(124,204)), 10); +--error ER_CANT_CREATE_GEOMETRY_OBJECT +select st_distance_sphere(x'010300000004000000040000', multipoint(point(124,204)), 10); +--error ER_CANT_CREATE_GEOMETRY_OBJECT +select st_distance_sphere(x'010300000001000000040000', multipoint(point(124,204)), 10); --echo # --echo # End of 10.3 tests -- cgit v1.2.1 From 5f296f3a181eb63b6112153c6d4f9186180e6c50 Mon Sep 17 00:00:00 2001 From: Oleg Smirnov Date: Tue, 25 Oct 2022 19:30:42 +0700 Subject: MDEV-29640 FederatedX does not properly handle pushdown in case of difference in local and remote table names FederatedX table may refer to a table with a different name on the remote server: test> CREATE TABLE t2 (...) ENGINE="FEDERATEDX" CONNECTION="mysql://user:pass@192.168.1.111:9308/federatedx/t1"; test> select * from t2 where ...; This could cause an issue with federated_pushdown=1, because FederatedX pushes the query (or derived table's) text to the remote server. The remote server will try to read from table t2 (while it should read from t1). Solution: do not allow pushing down queries with tables that have different db_name.table name on the local and remote server. This patch also fixes: MDEV-29863 Server crashes in federatedx_txn::acquire after select from the FederatedX table with partitions Solution: disallow pushdown when partitioned FederatedX tables are used. --- .../federated/federatedx_create_handlers.result | 49 ++++++++++++++++++++++ .../federated/federatedx_create_handlers.test | 48 +++++++++++++++++++++ 2 files changed, 97 insertions(+) (limited to 'mysql-test') diff --git a/mysql-test/suite/federated/federatedx_create_handlers.result b/mysql-test/suite/federated/federatedx_create_handlers.result index b115cc73b87..29ce2c4348b 100644 --- a/mysql-test/suite/federated/federatedx_create_handlers.result +++ b/mysql-test/suite/federated/federatedx_create_handlers.result @@ -420,6 +420,55 @@ SELECT * FROM (SELECT * FROM federated.t1 LIMIT 70000) dt; SELECT COUNT(DISTINCT a) FROM federated.t2; COUNT(DISTINCT a) 70000 +# +# MDEV-29640 FederatedX does not properly handle pushdown +# in case of difference in local and remote table names +# +connection master; +# Use tables from the previous test. Make sure pushdown works: +EXPLAIN SELECT COUNT(DISTINCT a) FROM federated.t2; +id select_type table type possible_keys key key_len ref rows Extra +1 PUSHED SELECT NULL NULL NULL NULL NULL NULL NULL NULL +SELECT COUNT(DISTINCT a) FROM federated.t2; +COUNT(DISTINCT a) +70000 +# Link remote table `federated.t1` with the local table named `t1_local` +CREATE TABLE federated.t1_local ENGINE="FEDERATED" +CONNECTION='mysql://root@127.0.0.1:SLAVE_PORT/federated/t1'; +# No pushdown here due to table names mismatch, retrieve data as usual: +EXPLAIN SELECT COUNT(DISTINCT a) FROM federated.t1_local; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1_local ALL NULL NULL NULL NULL 70000 +SELECT COUNT(DISTINCT a) FROM federated.t1_local; +COUNT(DISTINCT a) +70000 +# +# MDEV-29863 Server crashes in federatedx_txn::acquire after select from +# the Federated table with partitions and federated_pushdown=1 +# in case of difference in local and remote table names +# +connection slave; +CREATE TABLE federated.t3 (a INT); +INSERT INTO federated.t3 VALUES (1),(2),(3); +CREATE TABLE federated.t4 (a INT); +connection master; +CREATE SERVER fedlink FOREIGN DATA WRAPPER mysql +OPTIONS (USER 'root', HOST '127.0.0.1', DATABASE 'federated', +PORT SLAVE_PORT); +CREATE TABLE federated.t3 (a INT) +ENGINE=FEDERATED +CONNECTION='mysql://root@127.0.0.1:$SLAVE_MYPORT/federated/t3' + PARTITION BY list (a) +(PARTITION p1 VALUES IN (1) CONNECTION='fedlink/t3', +PARTITION p2 VALUES IN (2) CONNECTION='fedlink/t4'); +EXPLAIN SELECT * FROM federated.t3; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t3 ALL NULL NULL NULL NULL 3 +SELECT * FROM federated.t3; +a +1 +2 +3 set global federated_pushdown=0; connection master; DROP TABLE IF EXISTS federated.t1; diff --git a/mysql-test/suite/federated/federatedx_create_handlers.test b/mysql-test/suite/federated/federatedx_create_handlers.test index 8863a057b47..2d6c2bc4197 100644 --- a/mysql-test/suite/federated/federatedx_create_handlers.test +++ b/mysql-test/suite/federated/federatedx_create_handlers.test @@ -1,6 +1,7 @@ --source have_federatedx.inc --source include/federated.inc --source include/no_valgrind_without_big.inc +--source include/have_partition.inc connection default; @@ -266,6 +267,53 @@ INSERT INTO federated.t2 SELECT * FROM (SELECT * FROM federated.t1 LIMIT 70000) dt; SELECT COUNT(DISTINCT a) FROM federated.t2; + +--echo # +--echo # MDEV-29640 FederatedX does not properly handle pushdown +--echo # in case of difference in local and remote table names +--echo # +connection master; +--echo # Use tables from the previous test. Make sure pushdown works: +EXPLAIN SELECT COUNT(DISTINCT a) FROM federated.t2; +SELECT COUNT(DISTINCT a) FROM federated.t2; + +--echo # Link remote table `federated.t1` with the local table named `t1_local` +--replace_result $SLAVE_MYPORT SLAVE_PORT +eval +CREATE TABLE federated.t1_local ENGINE="FEDERATED" +CONNECTION='mysql://root@127.0.0.1:$SLAVE_MYPORT/federated/t1'; + +--echo # No pushdown here due to table names mismatch, retrieve data as usual: +EXPLAIN SELECT COUNT(DISTINCT a) FROM federated.t1_local; +SELECT COUNT(DISTINCT a) FROM federated.t1_local; + + +--echo # +--echo # MDEV-29863 Server crashes in federatedx_txn::acquire after select from +--echo # the Federated table with partitions and federated_pushdown=1 +--echo # in case of difference in local and remote table names +--echo # +connection slave; +CREATE TABLE federated.t3 (a INT); +INSERT INTO federated.t3 VALUES (1),(2),(3); +CREATE TABLE federated.t4 (a INT); + +connection master; +--replace_result $SLAVE_MYPORT SLAVE_PORT +eval CREATE SERVER fedlink FOREIGN DATA WRAPPER mysql + OPTIONS (USER 'root', HOST '127.0.0.1', DATABASE 'federated', + PORT $SLAVE_MYPORT); + +CREATE TABLE federated.t3 (a INT) + ENGINE=FEDERATED + CONNECTION='mysql://root@127.0.0.1:$SLAVE_MYPORT/federated/t3' + PARTITION BY list (a) + (PARTITION p1 VALUES IN (1) CONNECTION='fedlink/t3', + PARTITION p2 VALUES IN (2) CONNECTION='fedlink/t4'); + +EXPLAIN SELECT * FROM federated.t3; +SELECT * FROM federated.t3; + set global federated_pushdown=0; source include/federated_cleanup.inc; -- cgit v1.2.1 From f90d9c347fdac35720f874070797559ede066598 Mon Sep 17 00:00:00 2001 From: Alexander Barkov Date: Tue, 4 Oct 2022 11:44:14 +0400 Subject: MDEV-28822 Table from older version requires table rebuild when adding column to table with multi-column index This problem was earlier fixed in 10.4 by the patch for MDEV-29481. Adding MTR tests only. --- .../mysql_upgrade/mdev28822_100427_innodb.frm | Bin 0 -> 1443 bytes mysql-test/suite/innodb/r/instant_alter.result | 11 +++++++++++ mysql-test/suite/innodb/t/instant_alter.test | 18 ++++++++++++++++++ 3 files changed, 29 insertions(+) create mode 100644 mysql-test/std_data/mysql_upgrade/mdev28822_100427_innodb.frm (limited to 'mysql-test') diff --git a/mysql-test/std_data/mysql_upgrade/mdev28822_100427_innodb.frm b/mysql-test/std_data/mysql_upgrade/mdev28822_100427_innodb.frm new file mode 100644 index 00000000000..d9cc5e6cc69 Binary files /dev/null and b/mysql-test/std_data/mysql_upgrade/mdev28822_100427_innodb.frm differ diff --git a/mysql-test/suite/innodb/r/instant_alter.result b/mysql-test/suite/innodb/r/instant_alter.result index 6744f0fe061..4a67e04205b 100644 --- a/mysql-test/suite/innodb/r/instant_alter.result +++ b/mysql-test/suite/innodb/r/instant_alter.result @@ -2929,3 +2929,14 @@ t1 CREATE TABLE `t1` ( KEY `f2` (`f2`) ) ENGINE=InnoDB DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci DROP TABLE t1, t2; +# +# MDEV-28822 Table from older version requires table rebuild when adding column to table with multi-column index +# +CREATE TABLE mdev28822_100427_innodb ( +id int not null primary key, +msg varchar(10), +index(id, msg) +) ENGINE=InnoDB ROW_FORMAT=DYNAMIC; +FLUSH TABLES; +ALTER TABLE mdev28822_100427_innodb ADD i1 INTEGER, ALGORITHM=INSTANT; +DROP TABLE mdev28822_100427_innodb; diff --git a/mysql-test/suite/innodb/t/instant_alter.test b/mysql-test/suite/innodb/t/instant_alter.test index 22815798f69..8e333e3bb72 100644 --- a/mysql-test/suite/innodb/t/instant_alter.test +++ b/mysql-test/suite/innodb/t/instant_alter.test @@ -1,6 +1,8 @@ --source include/innodb_page_size.inc --source include/have_sequence.inc +let $datadir=`select @@datadir`; + --echo # --echo # MDEV-11369: Instant ADD COLUMN for InnoDB --echo # @@ -949,3 +951,19 @@ ALTER TABLE t1 DROP COLUMN f3, ADD FOREIGN KEY fk (f1) ALTER TABLE t1 DROP COLUMN f5; SHOW CREATE TABLE t1; DROP TABLE t1, t2; + + +--echo # +--echo # MDEV-28822 Table from older version requires table rebuild when adding column to table with multi-column index +--echo # + +CREATE TABLE mdev28822_100427_innodb ( + id int not null primary key, + msg varchar(10), + index(id, msg) +) ENGINE=InnoDB ROW_FORMAT=DYNAMIC; +FLUSH TABLES; +remove_file $datadir/test/mdev28822_100427_innodb.frm; +copy_file std_data/mysql_upgrade/mdev28822_100427_innodb.frm $datadir/test/mdev28822_100427_innodb.frm; +ALTER TABLE mdev28822_100427_innodb ADD i1 INTEGER, ALGORITHM=INSTANT; +DROP TABLE mdev28822_100427_innodb; -- cgit v1.2.1 From 1a3859fff09986a8ffc7b1b466ef565ce2b0bf42 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Tue, 1 Nov 2022 13:22:34 +0100 Subject: MDEV-29924 Assertion `(((nr) % (1LL << 24)) % (int) log_10_int[6 - dec]) == 0' failed in my_time_packed_to_binary on SELECT when using TIME field when assigning the cached item to the Item_cache for the first time make sure to use Item_cache::setup(), not Item_cache::store(). Because the former copies the metadata (and allocates memory, in case of Item_cache_row), and Item_cache::decimal must be set for comparisons to work correctly. --- mysql-test/main/type_time_hires.result | 13 ++++++++++++- mysql-test/main/type_time_hires.test | 15 +++++++++++++-- 2 files changed, 25 insertions(+), 3 deletions(-) (limited to 'mysql-test') diff --git a/mysql-test/main/type_time_hires.result b/mysql-test/main/type_time_hires.result index 5fa9d11591a..cf7dce59f1f 100644 --- a/mysql-test/main/type_time_hires.result +++ b/mysql-test/main/type_time_hires.result @@ -360,7 +360,7 @@ select cast(1e-6 as time(6)); cast(1e-6 as time(6)) 00:00:00.000001 # -# Start of 10.4 tests +# End of 5.5 tests # # # MDEV-20397 Support TIMESTAMP, DATETIME, TIME in ROUND() and TRUNCATE() @@ -907,3 +907,14 @@ a CEILING(a) CEILING_SP(a) CEILING(a)=CEILING_SP(a) DROP FUNCTION FLOOR_SP; DROP FUNCTION CEILING_SP; DROP TABLE t1; +# +# MDEV-29924 Assertion `(((nr) % (1LL << 24)) % (int) log_10_int[6 - dec]) == 0' failed in my_time_packed_to_binary on SELECT when using TIME field +# +create table t1 (c decimal(3,1),d time(6)); +insert into t1 values (null,0.1),(null,0.1), (0.1,0.2); +select c from t1 where c Date: Wed, 2 Nov 2022 12:49:24 +0100 Subject: MDEV-29926: ASAN heap-use-after-free in Explain_query::~Explain_query Make sure that EXPLAIN object allocated on runtime arena. --- mysql-test/main/subselect.result | 11 +++++++++++ mysql-test/main/subselect.test | 14 ++++++++++++++ mysql-test/main/subselect_no_exists_to_in.result | 11 +++++++++++ mysql-test/main/subselect_no_mat.result | 11 +++++++++++ mysql-test/main/subselect_no_opts.result | 11 +++++++++++ mysql-test/main/subselect_no_scache.result | 11 +++++++++++ mysql-test/main/subselect_no_semijoin.result | 11 +++++++++++ 7 files changed, 80 insertions(+) (limited to 'mysql-test') diff --git a/mysql-test/main/subselect.result b/mysql-test/main/subselect.result index 1d49e178c68..281ac022318 100644 --- a/mysql-test/main/subselect.result +++ b/mysql-test/main/subselect.result @@ -7369,3 +7369,14 @@ a 1 drop table t1,t2,t3; # End of 10.2 tests +# +# MDEV-29926: ASAN heap-use-after-free in Explain_query::~Explain_query +# +CREATE TABLE t (a VARCHAR(1)) CHARACTER SET utf8mb3; +EXECUTE IMMEDIATE "SELECT COUNT(*) FROM t WHERE a < (SELECT 'x')"; +COUNT(*) +0 +DROP TABLE t; +# +# End of 10.3 tests +# diff --git a/mysql-test/main/subselect.test b/mysql-test/main/subselect.test index 20358ed5d60..27092a60ca6 100644 --- a/mysql-test/main/subselect.test +++ b/mysql-test/main/subselect.test @@ -6308,3 +6308,17 @@ select a from t3 drop table t1,t2,t3; --echo # End of 10.2 tests + +--echo # +--echo # MDEV-29926: ASAN heap-use-after-free in Explain_query::~Explain_query +--echo # + +CREATE TABLE t (a VARCHAR(1)) CHARACTER SET utf8mb3; +EXECUTE IMMEDIATE "SELECT COUNT(*) FROM t WHERE a < (SELECT 'x')"; + +# Cleanup +DROP TABLE t; + +--echo # +--echo # End of 10.3 tests +--echo # diff --git a/mysql-test/main/subselect_no_exists_to_in.result b/mysql-test/main/subselect_no_exists_to_in.result index 737636359ba..7e694b52c85 100644 --- a/mysql-test/main/subselect_no_exists_to_in.result +++ b/mysql-test/main/subselect_no_exists_to_in.result @@ -7369,6 +7369,17 @@ a 1 drop table t1,t2,t3; # End of 10.2 tests +# +# MDEV-29926: ASAN heap-use-after-free in Explain_query::~Explain_query +# +CREATE TABLE t (a VARCHAR(1)) CHARACTER SET utf8mb3; +EXECUTE IMMEDIATE "SELECT COUNT(*) FROM t WHERE a < (SELECT 'x')"; +COUNT(*) +0 +DROP TABLE t; +# +# End of 10.3 tests +# set optimizer_switch=default; select @@optimizer_switch like '%exists_to_in=off%'; @@optimizer_switch like '%exists_to_in=off%' diff --git a/mysql-test/main/subselect_no_mat.result b/mysql-test/main/subselect_no_mat.result index 66586cf5f3a..fd3f234b4e0 100644 --- a/mysql-test/main/subselect_no_mat.result +++ b/mysql-test/main/subselect_no_mat.result @@ -7362,6 +7362,17 @@ a 1 drop table t1,t2,t3; # End of 10.2 tests +# +# MDEV-29926: ASAN heap-use-after-free in Explain_query::~Explain_query +# +CREATE TABLE t (a VARCHAR(1)) CHARACTER SET utf8mb3; +EXECUTE IMMEDIATE "SELECT COUNT(*) FROM t WHERE a < (SELECT 'x')"; +COUNT(*) +0 +DROP TABLE t; +# +# End of 10.3 tests +# set optimizer_switch=default; select @@optimizer_switch like '%materialization=on%'; @@optimizer_switch like '%materialization=on%' diff --git a/mysql-test/main/subselect_no_opts.result b/mysql-test/main/subselect_no_opts.result index f55978a591c..dc0e690b957 100644 --- a/mysql-test/main/subselect_no_opts.result +++ b/mysql-test/main/subselect_no_opts.result @@ -7360,4 +7360,15 @@ a 1 drop table t1,t2,t3; # End of 10.2 tests +# +# MDEV-29926: ASAN heap-use-after-free in Explain_query::~Explain_query +# +CREATE TABLE t (a VARCHAR(1)) CHARACTER SET utf8mb3; +EXECUTE IMMEDIATE "SELECT COUNT(*) FROM t WHERE a < (SELECT 'x')"; +COUNT(*) +0 +DROP TABLE t; +# +# End of 10.3 tests +# set @optimizer_switch_for_subselect_test=null; diff --git a/mysql-test/main/subselect_no_scache.result b/mysql-test/main/subselect_no_scache.result index 895a68338d8..6c8dbc40c3b 100644 --- a/mysql-test/main/subselect_no_scache.result +++ b/mysql-test/main/subselect_no_scache.result @@ -7375,6 +7375,17 @@ a 1 drop table t1,t2,t3; # End of 10.2 tests +# +# MDEV-29926: ASAN heap-use-after-free in Explain_query::~Explain_query +# +CREATE TABLE t (a VARCHAR(1)) CHARACTER SET utf8mb3; +EXECUTE IMMEDIATE "SELECT COUNT(*) FROM t WHERE a < (SELECT 'x')"; +COUNT(*) +0 +DROP TABLE t; +# +# End of 10.3 tests +# set optimizer_switch=default; select @@optimizer_switch like '%subquery_cache=on%'; @@optimizer_switch like '%subquery_cache=on%' diff --git a/mysql-test/main/subselect_no_semijoin.result b/mysql-test/main/subselect_no_semijoin.result index 8a0e6a6c325..1731e934c6c 100644 --- a/mysql-test/main/subselect_no_semijoin.result +++ b/mysql-test/main/subselect_no_semijoin.result @@ -7361,6 +7361,17 @@ a drop table t1,t2,t3; # End of 10.2 tests # +# MDEV-29926: ASAN heap-use-after-free in Explain_query::~Explain_query +# +CREATE TABLE t (a VARCHAR(1)) CHARACTER SET utf8mb3; +EXECUTE IMMEDIATE "SELECT COUNT(*) FROM t WHERE a < (SELECT 'x')"; +COUNT(*) +0 +DROP TABLE t; +# +# End of 10.3 tests +# +# # MDEV-19714: JOIN::pseudo_bits_cond is not visible in EXPLAIN FORMAT=JSON # CREATE TABLE t1 ( a INT ); -- cgit v1.2.1