diff options
author | Marko Mäkelä <marko.makela@mariadb.com> | 2021-08-18 18:22:35 +0300 |
---|---|---|
committer | Marko Mäkelä <marko.makela@mariadb.com> | 2021-08-18 18:22:35 +0300 |
commit | 4a2595727465648f2d4e794d1b2f182345f0bee8 (patch) | |
tree | 8d4734e6c5b2795455416191ca50d5a0fbd23cd9 | |
parent | da171182b7d79d21177d113d2bbaecbca21d8bbc (diff) | |
parent | f84e28c119b495da77e197f7cd18af4048fc3126 (diff) | |
download | mariadb-git-4a2595727465648f2d4e794d1b2f182345f0bee8.tar.gz |
Merge 10.4 into 10.5
75 files changed, 1425 insertions, 400 deletions
diff --git a/cmake/plugin.cmake b/cmake/plugin.cmake index 6bc1de3e52a..813d8ef6e42 100644 --- a/cmake/plugin.cmake +++ b/cmake/plugin.cmake @@ -30,13 +30,13 @@ INCLUDE(CMakeParseArguments) # [CONFIG cnf_file_name] # [VERSION version_string] # [LINK_LIBRARIES lib1...libN] -# [DEPENDENCIES target1...targetN] +# [DEPENDS target1...targetN] MACRO(MYSQL_ADD_PLUGIN) CMAKE_PARSE_ARGUMENTS(ARG "STORAGE_ENGINE;STATIC_ONLY;MODULE_ONLY;MANDATORY;DEFAULT;DISABLED;NOT_EMBEDDED;RECOMPILE_FOR_EMBEDDED;CLIENT" "MODULE_OUTPUT_NAME;STATIC_OUTPUT_NAME;COMPONENT;CONFIG;VERSION" - "LINK_LIBRARIES;DEPENDENCIES" + "LINK_LIBRARIES;DEPENDS" ${ARGN} ) IF(NOT WITHOUT_SERVER OR ARG_CLIENT) @@ -115,8 +115,8 @@ MACRO(MYSQL_ADD_PLUGIN) ENDIF() UNSET(${with_var} CACHE) - IF(NOT ARG_DEPENDENCIES) - SET(ARG_DEPENDENCIES) + IF(NOT ARG_DEPENDS) + SET(ARG_DEPENDS) ENDIF() IF(ARG_VERSION) @@ -146,7 +146,7 @@ MACRO(MYSQL_ADD_PLUGIN) ADD_LIBRARY(${target} STATIC ${SOURCES}) DTRACE_INSTRUMENT(${target}) - ADD_DEPENDENCIES(${target} GenError ${ARG_DEPENDENCIES}) + ADD_DEPENDENCIES(${target} GenError ${ARG_DEPENDS}) RESTRICT_SYMBOL_EXPORTS(${target}) IF(WITH_EMBEDDED_SERVER AND (NOT ARG_NOT_EMBEDDED)) # Embedded library should contain PIC code and be linkable @@ -160,7 +160,7 @@ MACRO(MYSQL_ADD_PLUGIN) SET_TARGET_PROPERTIES(${target}_embedded PROPERTIES COMPILE_DEFINITIONS "EMBEDDED_LIBRARY${version_string}") ENDIF() - ADD_DEPENDENCIES(${target}_embedded GenError) + ADD_DEPENDENCIES(${target}_embedded GenError ${ARG_DEPENDS}) ENDIF() ENDIF() @@ -235,7 +235,7 @@ MACRO(MYSQL_ADD_PLUGIN) TARGET_LINK_LIBRARIES (${target} "-Wl,--no-undefined") ENDIF() - ADD_DEPENDENCIES(${target} GenError ${ARG_DEPENDENCIES}) + ADD_DEPENDENCIES(${target} GenError ${ARG_DEPENDS}) SET_TARGET_PROPERTIES(${target} PROPERTIES OUTPUT_NAME "${ARG_MODULE_OUTPUT_NAME}") @@ -285,15 +285,20 @@ MACRO(MYSQL_ADD_PLUGIN) INSTALL_MYSQL_TEST("${CMAKE_CURRENT_SOURCE_DIR}/mysql-test/" "plugin/${subpath}") ENDIF() - GET_TARGET_PROPERTY(plugin_type ${target} TYPE) - STRING(REGEX REPLACE "_LIBRARY$" "" plugin_type ${plugin_type}) - STRING(REGEX REPLACE "^NO$" "" plugin_type ${plugin_type}) + IF(TARGET ${target}) + GET_TARGET_PROPERTY(plugin_type ${target} TYPE) + STRING(REPLACE "_LIBRARY" "" plugin_type ${plugin_type}) + SET(have_target 1) + ELSE() + SET(plugin_type) + SET(have_target 0) + ENDIF() IF(ARG_STORAGE_ENGINE) - ADD_FEATURE_INFO(${plugin} PLUGIN_${plugin} "Storage Engine ${plugin_type}") + ADD_FEATURE_INFO(${plugin} ${have_target} "Storage Engine ${plugin_type}") ELSEIF(ARG_CLIENT) - ADD_FEATURE_INFO(${plugin} PLUGIN_${plugin} "Client plugin ${plugin_type}") + ADD_FEATURE_INFO(${plugin} ${have_target} "Client plugin ${plugin_type}") ELSE() - ADD_FEATURE_INFO(${plugin} PLUGIN_${plugin} "Server plugin ${plugin_type}") + ADD_FEATURE_INFO(${plugin} ${have_target} "Server plugin ${plugin_type}") ENDIF() ENDIF(NOT WITHOUT_SERVER OR ARG_CLIENT) ENDMACRO() diff --git a/extra/mariabackup/xtrabackup.cc b/extra/mariabackup/xtrabackup.cc index 801f046addd..535568f7822 100644 --- a/extra/mariabackup/xtrabackup.cc +++ b/extra/mariabackup/xtrabackup.cc @@ -3471,7 +3471,7 @@ next_file: if (err == ERROR_NO_MORE_FILES) { status = 1; } else { - msg("readdir_next_file in %s returned %lu", dir, err); + msg("FindNextFile in %s returned %lu", dirname, err); status = -1; } } diff --git a/mysql-test/suite/binlog/r/show_concurrent_rotate.result b/mysql-test/suite/binlog/r/show_concurrent_rotate.result index cee5de33973..b830b75eeef 100644 --- a/mysql-test/suite/binlog/r/show_concurrent_rotate.result +++ b/mysql-test/suite/binlog/r/show_concurrent_rotate.result @@ -2,9 +2,10 @@ connect con1,localhost,root,,; FLUSH LOGS; FLUSH LOGS; FLUSH LOGS; -SET DEBUG_SYNC= "at_after_lock_index WAIT_FOR con1_go"; +SET DEBUG_SYNC= "at_after_lock_index SIGNAL con1_ready WAIT_FOR con1_go"; SHOW BINARY LOGS; connect con2,localhost,root,,; +SET DEBUG_SYNC= "now WAIT_FOR con1_ready"; RESET MASTER; FLUSH LOGS; SET DEBUG_SYNC= "now SIGNAL con1_go"; diff --git a/mysql-test/suite/binlog/t/show_concurrent_rotate.test b/mysql-test/suite/binlog/t/show_concurrent_rotate.test index 79d36c30a86..b5758e3a883 100644 --- a/mysql-test/suite/binlog/t/show_concurrent_rotate.test +++ b/mysql-test/suite/binlog/t/show_concurrent_rotate.test @@ -8,10 +8,15 @@ FLUSH LOGS; FLUSH LOGS; FLUSH LOGS; -SET DEBUG_SYNC= "at_after_lock_index WAIT_FOR con1_go"; +# This forced synchronization pattern ensures con1 will execute its retry +# path. More specifically, con1 should see that the cache of log files it +# creates during SHOW BINARY LOGS becomes invalidated after con2 completes +# RESET MASTER. +SET DEBUG_SYNC= "at_after_lock_index SIGNAL con1_ready WAIT_FOR con1_go"; --send SHOW BINARY LOGS connect(con2,localhost,root,,); +SET DEBUG_SYNC= "now WAIT_FOR con1_ready"; RESET MASTER; FLUSH LOGS; SET DEBUG_SYNC= "now SIGNAL con1_go"; diff --git a/mysql-test/suite/encryption/r/innodb_import.result b/mysql-test/suite/encryption/r/innodb_import.result new file mode 100644 index 00000000000..169af37f404 --- /dev/null +++ b/mysql-test/suite/encryption/r/innodb_import.result @@ -0,0 +1,21 @@ +# +# MDEV-26131 SEGV in ha_innobase::discard_or_import_tablespace +# +CREATE TABLE t1(f1 int,f2 text)ENGINE=InnoDB; +INSERT INTO t1 VALUES(1, "InnoDB"); +CREATE TABLE t2 LIKE t1; +ALTER TABLE t2 ADD KEY idx (f2(13)); +ALTER TABLE t2 DISCARD TABLESPACE; +FLUSH TABLES t1 FOR EXPORT; +UNLOCK TABLES; +ALTER TABLE t2 IMPORT TABLESPACE; +ERROR HY000: Internal error: Drop all secondary indexes before importing table test/t2 when .cfg file is missing. +ALTER TABLE t2 DROP KEY idx; +ALTER TABLE t2 IMPORT TABLESPACE; +Warnings: +Warning 1814 Tablespace has been discarded for table `t2` +Warning 1810 IO Read error: (2, No such file or directory) Error opening './test/t2.cfg', will attempt to import without schema verification +SELECT * FROM t2; +f1 f2 +1 InnoDB +DROP TABLE t1, t2; diff --git a/mysql-test/suite/encryption/t/innodb_import.combinations b/mysql-test/suite/encryption/t/innodb_import.combinations new file mode 100644 index 00000000000..75458949582 --- /dev/null +++ b/mysql-test/suite/encryption/t/innodb_import.combinations @@ -0,0 +1,7 @@ +[page_compressed] +innodb-compression-default=1 +[encryption] +innodb-encrypt-tables=1 +[page_compressed_encryption] +innodb-compression-default=1 +innodb-encrypt-tables=1 diff --git a/mysql-test/suite/encryption/t/innodb_import.opt b/mysql-test/suite/encryption/t/innodb_import.opt new file mode 100644 index 00000000000..c44c611ed60 --- /dev/null +++ b/mysql-test/suite/encryption/t/innodb_import.opt @@ -0,0 +1 @@ +--innodb-checksum-algorithm=crc32 diff --git a/mysql-test/suite/encryption/t/innodb_import.test b/mysql-test/suite/encryption/t/innodb_import.test new file mode 100644 index 00000000000..791a1757878 --- /dev/null +++ b/mysql-test/suite/encryption/t/innodb_import.test @@ -0,0 +1,22 @@ +--source include/have_innodb.inc +-- source include/have_example_key_management_plugin.inc +--echo # +--echo # MDEV-26131 SEGV in ha_innobase::discard_or_import_tablespace +--echo # +let $MYSQLD_DATADIR = `SELECT @@datadir`; +CREATE TABLE t1(f1 int,f2 text)ENGINE=InnoDB; +INSERT INTO t1 VALUES(1, "InnoDB"); +CREATE TABLE t2 LIKE t1; +ALTER TABLE t2 ADD KEY idx (f2(13)); +ALTER TABLE t2 DISCARD TABLESPACE; +FLUSH TABLES t1 FOR EXPORT; +--copy_file $MYSQLD_DATADIR/test/t1.ibd $MYSQLD_DATADIR/test/t2.ibd +UNLOCK TABLES; +--error ER_INTERNAL_ERROR +ALTER TABLE t2 IMPORT TABLESPACE; + +ALTER TABLE t2 DROP KEY idx; +--replace_regex /opening '.*\/test\//opening '.\/test\// +ALTER TABLE t2 IMPORT TABLESPACE; +SELECT * FROM t2; +DROP TABLE t1, t2; diff --git a/mysql-test/suite/federated/federated_partition.result b/mysql-test/suite/federated/federated_partition.result index c8a61d825b6..374dba515c7 100644 --- a/mysql-test/suite/federated/federated_partition.result +++ b/mysql-test/suite/federated/federated_partition.result @@ -47,6 +47,42 @@ connection slave; drop table federated.t1_1; drop table federated.t1_2; End of 5.1 tests +# +# MDEV-18734 ASAN heap-use-after-free upon sorting by blob column from partitioned table +# +connection slave; +use federated; +create table t1_1 (x int, b text, key(x)); +create table t1_2 (x int, b text, key(x)); +connection master; +create table t1 (x int, b text, key(x)) engine=federated +partition by range columns (x) ( +partition p1 values less than (40) connection='mysql://root@127.0.0.1:SLAVE_PORT/federated/t1_1', +partition pn values less than (maxvalue) connection='mysql://root@127.0.0.1:SLAVE_PORT/federated/t1_2' +); +insert t1 values (1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8); +insert t1 select x + 8, x + 8 from t1; +insert t1 select x + 16, x + 16 from t1; +insert t1 select x + 49, repeat(x + 49, 100) from t1; +flush tables; +# This produces wrong result before MDEV-17573 +select x, left(b, 10) from t1 where x > 30 and x < 60 order by b; +x left(b, 10) +31 31 +32 32 +50 5050505050 +51 5151515151 +52 5252525252 +53 5353535353 +54 5454545454 +55 5555555555 +56 5656565656 +57 5757575757 +58 5858585858 +59 5959595959 +drop table t1; +connection slave; +drop table t1_1, t1_2; connection master; DROP TABLE IF EXISTS federated.t1; DROP DATABASE IF EXISTS federated; diff --git a/mysql-test/suite/federated/federated_partition.test b/mysql-test/suite/federated/federated_partition.test index 47110b5eebf..42f56134279 100644 --- a/mysql-test/suite/federated/federated_partition.test +++ b/mysql-test/suite/federated/federated_partition.test @@ -51,4 +51,29 @@ drop table federated.t1_2; --echo End of 5.1 tests +--echo # +--echo # MDEV-18734 ASAN heap-use-after-free upon sorting by blob column from partitioned table +--echo # +connection slave; +use federated; +create table t1_1 (x int, b text, key(x)); +create table t1_2 (x int, b text, key(x)); +connection master; +--replace_result $SLAVE_MYPORT SLAVE_PORT +eval create table t1 (x int, b text, key(x)) engine=federated + partition by range columns (x) ( + partition p1 values less than (40) connection='mysql://root@127.0.0.1:$SLAVE_MYPORT/federated/t1_1', + partition pn values less than (maxvalue) connection='mysql://root@127.0.0.1:$SLAVE_MYPORT/federated/t1_2' +); +insert t1 values (1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8); +insert t1 select x + 8, x + 8 from t1; +insert t1 select x + 16, x + 16 from t1; +insert t1 select x + 49, repeat(x + 49, 100) from t1; +flush tables; +--echo # This produces wrong result before MDEV-17573 +select x, left(b, 10) from t1 where x > 30 and x < 60 order by b; +drop table t1; +connection slave; +drop table t1_1, t1_2; + source include/federated_cleanup.inc; diff --git a/mysql-test/suite/galera/r/galera_as_slave_replay.result b/mysql-test/suite/galera/r/galera_as_slave_replay.result index 3c2cea19179..d81795eeed9 100644 --- a/mysql-test/suite/galera/r/galera_as_slave_replay.result +++ b/mysql-test/suite/galera/r/galera_as_slave_replay.result @@ -84,11 +84,21 @@ SET GLOBAL wsrep_provider_options = 'dbug='; SET GLOBAL wsrep_provider_options = 'signal=commit_monitor_enter_sync'; SET DEBUG_SYNC = "RESET"; connection node_2a; -set session wsrep_sync_wait=15; -SELECT COUNT(*) = 1 FROM test.t1 WHERE f2 = 'e'; -COUNT(*) = 1 -1 set session wsrep_sync_wait=0; +SELECT * from test.t1; +f1 f2 +1 a +2 b +3 e +4 d +connection node_1; +SELECT * from test.t1; +f1 f2 +1 a +2 b +3 e +4 d +connection node_2a; STOP SLAVE; RESET SLAVE; DROP TABLE t1; diff --git a/mysql-test/suite/galera/t/galera_as_slave_replay.test b/mysql-test/suite/galera/t/galera_as_slave_replay.test index 47f70bda721..2e8f45a047b 100644 --- a/mysql-test/suite/galera/t/galera_as_slave_replay.test +++ b/mysql-test/suite/galera/t/galera_as_slave_replay.test @@ -185,11 +185,17 @@ SET DEBUG_SYNC = "now SIGNAL signal.wsrep_apply_cb"; SET DEBUG_SYNC = "RESET"; --connection node_2a - -set session wsrep_sync_wait=15; -SELECT COUNT(*) = 1 FROM test.t1 WHERE f2 = 'e'; set session wsrep_sync_wait=0; +--let $wait_condition = SELECT COUNT(*) = 1 FROM test.t1 where f2 = 'e' +--source include/wait_condition.inc +SELECT * from test.t1; +--connection node_1 +--let $wait_condition = SELECT COUNT(*) = 1 FROM test.t1 where f2 = 'e' +--source include/wait_condition.inc +SELECT * from test.t1; + +--connection node_2a STOP SLAVE; RESET SLAVE; diff --git a/mysql-test/suite/galera_sr/r/MDEV-25717.result b/mysql-test/suite/galera_sr/r/MDEV-25717.result new file mode 100644 index 00000000000..22f8d5eb5db --- /dev/null +++ b/mysql-test/suite/galera_sr/r/MDEV-25717.result @@ -0,0 +1,47 @@ +connection node_2; +connection node_1; +connection node_1; +CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) Engine=InnoDB; +INSERT INTO t1 VALUES (1), (2), (3); +connection node_2; +SET SESSION wsrep_trx_fragment_size = 1; +START TRANSACTION; +INSERT INTO t1 VALUES (4); +connection node_1; +SELECT COUNT(*) FROM t1; +COUNT(*) +3 +connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2; +connection node_2a; +SET GLOBAL DEBUG_DBUG = "d,sync.wsrep_apply_toi"; +connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1; +connection node_1a; +SET GLOBAL DEBUG_DBUG = "d,sync.wsrep_bf_abort"; +connection node_1; +TRUNCATE TABLE t1; +connection node_1a; +SET DEBUG_SYNC = "now WAIT_FOR sync.wsrep_bf_abort_reached"; +connection node_2a; +SET DEBUG_SYNC = "now WAIT_FOR sync.wsrep_apply_toi_reached"; +connection node_2; +INSERT INTO t1 VALUES (5); +connection node_2a; +SET SESSION wsrep_sync_wait = 0; +SET SESSION wsrep_sync_wait = DEFAULT; +SET GLOBAL DEBUG_DBUG = ""; +SET DEBUG_SYNC = "now SIGNAL signal.wsrep_apply_toi"; +connection node_2; +ERROR 40001: Deadlock found when trying to get lock; try restarting transaction +connection node_1a; +SET SESSION wsrep_sync_wait=0; +SET GLOBAL DEBUG_DBUG = "+d,sync.wsrep_log_dummy_write_set"; +SET DEBUG_SYNC = "now SIGNAL signal.wsrep_bf_abort"; +SET DEBUG_SYNC = "now WAIT_FOR sync.wsrep_log_dummy_write_set_reached"; +connection node_1; +connection node_2; +SET GLOBAL DEBUG_DBUG = ""; +SET DEBUG_SYNC = "RESET"; +connection node_1; +SET GLOBAL DEBUG_DBUG = ""; +SET DEBUG_SYNC = "RESET"; +DROP TABLE t1; diff --git a/mysql-test/suite/galera_sr/t/MDEV-25717.test b/mysql-test/suite/galera_sr/t/MDEV-25717.test new file mode 100644 index 00000000000..7188f8bb172 --- /dev/null +++ b/mysql-test/suite/galera_sr/t/MDEV-25717.test @@ -0,0 +1,113 @@ +# +# MDEV-25717 Assertion `owning_thread_id_ == wsrep::this_thread::get_id()' +# +# This test exposes a race condition between rollbacker thread and rollback +# fragment processing. +# + +--source include/galera_cluster.inc +--source include/have_debug_sync.inc + +--connection node_1 +CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) Engine=InnoDB; +INSERT INTO t1 VALUES (1), (2), (3); + +# +# On node_2 we start a SR transaction, it going to +# be BF aborted later on +# +--connection node_2 +SET SESSION wsrep_trx_fragment_size = 1; +START TRANSACTION; +INSERT INTO t1 VALUES (4); + +--connection node_1 +SELECT COUNT(*) FROM t1; # Sync wait + +# +# Issue a conflicting TRUNCATE statement on node_1: +# - on node_2, block it before it is going to apply +# - on node_1, block before the before it BF aborts the INSERT +# +--connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2 +--connection node_2a +SET GLOBAL DEBUG_DBUG = "d,sync.wsrep_apply_toi"; + +--connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1 +--connection node_1a +SET GLOBAL DEBUG_DBUG = "d,sync.wsrep_bf_abort"; + +--connection node_1 +--send TRUNCATE TABLE t1 + +--connection node_1a +SET DEBUG_SYNC = "now WAIT_FOR sync.wsrep_bf_abort_reached"; + +--connection node_2a +SET DEBUG_SYNC = "now WAIT_FOR sync.wsrep_apply_toi_reached"; + +# +# Generate one more fragment on the SR transaction. +# This is going to fail certification and results +# in a rollback fragment. +# +--connection node_2 +--let $expected_cert_failures = `SELECT VARIABLE_VALUE + 1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_cert_failures'` + +--send INSERT INTO t1 VALUES (5) + +# +# Wait until after certify and observe the certification +# failure. Let both continue and we are done on node_2. +# +--connection node_2a +SET SESSION wsrep_sync_wait = 0; +--let $wait_condition = SELECT VARIABLE_VALUE = $expected_cert_failures FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_cert_failures' +--source include/wait_condition.inc +SET SESSION wsrep_sync_wait = DEFAULT; + +SET GLOBAL DEBUG_DBUG = ""; +SET DEBUG_SYNC = "now SIGNAL signal.wsrep_apply_toi"; + +--connection node_2 +--error ER_LOCK_DEADLOCK +--reap + +# +# On node_1 we expect the following things: +# - the TRUNCATE should successfully bf abort the transaction +# - A rollback fragment should be delivered as a result of +# certification failure. We expect the rollback fragment to +# be delivered after TRUNCATE has bf aborted, therefore rollback +# fragment logs a dummy writeset. +# +--connection node_1a +SET SESSION wsrep_sync_wait=0; +SET GLOBAL DEBUG_DBUG = "+d,sync.wsrep_log_dummy_write_set"; + +# Signal the TRUNCATE to continue and observe the BF abort +--let $expected_bf_aborts = `SELECT VARIABLE_VALUE + 1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_bf_aborts'` +SET DEBUG_SYNC = "now SIGNAL signal.wsrep_bf_abort"; + +# Expect a timeout if bug is present +--let $wait_condition = SELECT VARIABLE_VALUE = $expected_bf_aborts FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_bf_aborts' +--source include/wait_condition.inc + +# Observe logging of dummy writeset +SET DEBUG_SYNC = "now WAIT_FOR sync.wsrep_log_dummy_write_set_reached"; + +# TRUNCATE succeeds +--connection node_1 +--reap + +# +# Cleanup +# +--connection node_2 +SET GLOBAL DEBUG_DBUG = ""; +SET DEBUG_SYNC = "RESET"; + +--connection node_1 +SET GLOBAL DEBUG_DBUG = ""; +SET DEBUG_SYNC = "RESET"; +DROP TABLE t1; diff --git a/mysql-test/suite/innodb/r/full_crc32_import.result b/mysql-test/suite/innodb/r/full_crc32_import.result index 06da5716aae..6fec6107f2c 100644 --- a/mysql-test/suite/innodb/r/full_crc32_import.result +++ b/mysql-test/suite/innodb/r/full_crc32_import.result @@ -36,19 +36,21 @@ db.opt t1.frm restore: t1 .ibd and .cfg files ALTER TABLE t1 IMPORT TABLESPACE; +ERROR HY000: Internal error: Drop all secondary indexes before importing table test/t1 when .cfg file is missing. +ALTER TABLE t1 DROP INDEX b; +ALTER TABLE t1 IMPORT TABLESPACE; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) NOT NULL AUTO_INCREMENT, `b` blob DEFAULT NULL, `c` blob DEFAULT NULL, - PRIMARY KEY (`a`), - KEY `b` (`b`(200)) + PRIMARY KEY (`a`) ) ENGINE=InnoDB AUTO_INCREMENT=46 DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC UPDATE t1 set b = repeat("de", 100) where b = repeat("cd", 200); explain SELECT a FROM t1 where b = repeat("de", 100); id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 ref b b 203 const # Using where +1 SIMPLE t1 ALL NULL NULL NULL NULL # Using where SELECT a FROM t1 where b = repeat("de", 100); a 3 @@ -112,14 +114,19 @@ ALTER TABLE t1 ROW_FORMAT=DYNAMIC; ALTER TABLE t1 DISCARD TABLESPACE; restore: t1 .ibd and .cfg files ALTER TABLE t1 IMPORT TABLESPACE; +ERROR HY000: Internal error: Drop all secondary indexes before importing table test/t1 when .cfg file is missing. +ALTER TABLE t1 DROP INDEX idx1; +ALTER TABLE t1 IMPORT TABLESPACE; +Warnings: +Warning 1814 Tablespace has been discarded for table `t1` +Warning 1810 IO Read error: (2, No such file or directory) Error opening './test/t1.cfg', will attempt to import without schema verification SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `c1` int(11) NOT NULL AUTO_INCREMENT, `c2` point NOT NULL, `c3` linestring NOT NULL, - PRIMARY KEY (`c1`), - SPATIAL KEY `idx1` (`c2`) + PRIMARY KEY (`c1`) ) ENGINE=InnoDB AUTO_INCREMENT=14325 DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC UPDATE t1 SET C2 = ST_GeomFromText('POINT(0 0)'); SELECT COUNT(*) FROM t1; diff --git a/mysql-test/suite/innodb/r/implicit_gap_lock_convertion.result b/mysql-test/suite/innodb/r/implicit_gap_lock_convertion.result new file mode 100644 index 00000000000..fd197324c3e --- /dev/null +++ b/mysql-test/suite/innodb/r/implicit_gap_lock_convertion.result @@ -0,0 +1,17 @@ +CREATE TABLE t(a INT UNSIGNED PRIMARY KEY) ENGINE=InnoDB; +INSERT INTO t VALUES (10), (30); +connect con1,localhost,root,,; +SET TRANSACTION ISOLATION LEVEL SERIALIZABLE; +BEGIN; +INSERT INTO t VALUES (20); +SELECT * FROM t WHERE a BETWEEN 10 AND 30; +a +10 +20 +30 +connection default; +SET session innodb_lock_wait_timeout=1; +INSERT INTO t VALUES (15); +ERROR HY000: Lock wait timeout exceeded; try restarting transaction +disconnect con1; +DROP TABLE t; diff --git a/mysql-test/suite/innodb/r/import_corrupted.result b/mysql-test/suite/innodb/r/import_corrupted.result new file mode 100644 index 00000000000..fe431e62eef --- /dev/null +++ b/mysql-test/suite/innodb/r/import_corrupted.result @@ -0,0 +1,30 @@ +call mtr.add_suppression("Table `test`.`t2` should have 2 indexes but the tablespace has 1 indexes"); +call mtr.add_suppression("Index for table 't2' is corrupt; try to repair it"); +call mtr.add_suppression("Trying to read .* bytes at .* outside the bounds of the file: ./test/t2.ibd"); +CREATE TABLE t1 ( +id INT AUTO_INCREMENT PRIMARY KEY, +not_id INT, +data CHAR(255), +data2 BLOB +) ENGINE=INNODB; +ALTER TABLE t1 MODIFY not_id INT UNIQUE KEY; +connect purge_control,localhost,root,,; +START TRANSACTION WITH CONSISTENT SNAPSHOT; +connection default; +DELETE FROM t1 WHERE id % 2 = 1; +FLUSH TABLES t1 FOR EXPORT; +UNLOCK TABLES; +connection purge_control; +COMMIT; +connection default; +DROP TABLE t1; +CREATE TABLE t2 ( +id INT AUTO_INCREMENT PRIMARY KEY, +not_id INT UNIQUE KEY, +data CHAR(255), +data2 BLOB +) ENGINE=INNODB; +ALTER TABLE t2 DISCARD TABLESPACE; +ALTER TABLE t2 IMPORT TABLESPACE; +ERROR HY000: Index for table 't2' is corrupt; try to repair it +DROP TABLE t2; diff --git a/mysql-test/suite/innodb/r/innodb_information_schema.result b/mysql-test/suite/innodb/r/innodb_information_schema.result index 70458758437..6325917c236 100644 --- a/mysql-test/suite/innodb/r/innodb_information_schema.result +++ b/mysql-test/suite/innodb/r/innodb_information_schema.result @@ -45,7 +45,7 @@ trx_last_foreign_key_error varchar(256) YES NULL trx_is_read_only int(1) NO 0 trx_autocommit_non_locking int(1) NO 0 trx_state trx_weight trx_tables_in_use trx_tables_locked trx_rows_locked trx_rows_modified trx_concurrency_tickets trx_isolation_level trx_unique_checks trx_foreign_key_checks -RUNNING 3 0 1 5 1 0 REPEATABLE READ 1 1 +RUNNING 3 0 1 6 1 0 REPEATABLE READ 1 1 trx_isolation_level trx_unique_checks trx_foreign_key_checks SERIALIZABLE 0 0 trx_state trx_isolation_level trx_last_foreign_key_error diff --git a/mysql-test/suite/innodb/t/full_crc32_import.test b/mysql-test/suite/innodb/t/full_crc32_import.test index aa4db3f9bf7..1034282d992 100644 --- a/mysql-test/suite/innodb/t/full_crc32_import.test +++ b/mysql-test/suite/innodb/t/full_crc32_import.test @@ -53,6 +53,9 @@ ib_restore_tablespaces("test", "t1"); EOF --remove_file $MYSQLD_DATADIR/test/t1.cfg +--error ER_INTERNAL_ERROR +ALTER TABLE t1 IMPORT TABLESPACE; +ALTER TABLE t1 DROP INDEX b; --disable_warnings ALTER TABLE t1 IMPORT TABLESPACE; --enable_warnings @@ -131,9 +134,12 @@ ib_restore_tablespaces("test", "t1"); EOF --remove_file $MYSQLD_DATADIR/test/t1.cfg ---disable_warnings +--error ER_INTERNAL_ERROR ALTER TABLE t1 IMPORT TABLESPACE; --enable_warnings +ALTER TABLE t1 DROP INDEX idx1; +ALTER TABLE t1 IMPORT TABLESPACE; +--disable_warnings SHOW CREATE TABLE t1; UPDATE t1 SET C2 = ST_GeomFromText('POINT(0 0)'); SELECT COUNT(*) FROM t1; diff --git a/mysql-test/suite/innodb/t/implicit_gap_lock_convertion.test b/mysql-test/suite/innodb/t/implicit_gap_lock_convertion.test new file mode 100644 index 00000000000..bf2d09ffb2e --- /dev/null +++ b/mysql-test/suite/innodb/t/implicit_gap_lock_convertion.test @@ -0,0 +1,21 @@ +--source include/have_innodb.inc +--source include/count_sessions.inc + +CREATE TABLE t(a INT UNSIGNED PRIMARY KEY) ENGINE=InnoDB; + +INSERT INTO t VALUES (10), (30); + +--connect (con1,localhost,root,,) +SET TRANSACTION ISOLATION LEVEL SERIALIZABLE; +BEGIN; +INSERT INTO t VALUES (20); +SELECT * FROM t WHERE a BETWEEN 10 AND 30; + +--connection default +SET session innodb_lock_wait_timeout=1; +--error ER_LOCK_WAIT_TIMEOUT +INSERT INTO t VALUES (15); + +--disconnect con1 +DROP TABLE t; +--source include/wait_until_count_sessions.inc diff --git a/mysql-test/suite/innodb/t/import_corrupted.test b/mysql-test/suite/innodb/t/import_corrupted.test new file mode 100644 index 00000000000..fcdb03b4601 --- /dev/null +++ b/mysql-test/suite/innodb/t/import_corrupted.test @@ -0,0 +1,68 @@ +--source include/have_innodb.inc + +call mtr.add_suppression("Table `test`.`t2` should have 2 indexes but the tablespace has 1 indexes"); +call mtr.add_suppression("Index for table 't2' is corrupt; try to repair it"); +call mtr.add_suppression("Trying to read .* bytes at .* outside the bounds of the file: ./test/t2.ibd"); + +let MYSQLD_DATADIR = `SELECT @@datadir`; + +CREATE TABLE t1 ( + id INT AUTO_INCREMENT PRIMARY KEY, + not_id INT, + data CHAR(255), + data2 BLOB +) ENGINE=INNODB; + +--disable_query_log +--let i = 0 +while ($i != 1000) { + eval INSERT INTO t1 VALUES (DEFAULT, $i, REPEAT('b', 255), REPEAT('a', 5000)); + --inc $i +} +--enable_query_log + +ALTER TABLE t1 MODIFY not_id INT UNIQUE KEY; + +connect (purge_control,localhost,root,,); +START TRANSACTION WITH CONSISTENT SNAPSHOT; +connection default; + +DELETE FROM t1 WHERE id % 2 = 1; + +FLUSH TABLES t1 FOR EXPORT; + +--copy_file $MYSQLD_DATADIR/test/t1.ibd $MYSQLD_DATADIR/test/tmp.ibd +--copy_file $MYSQLD_DATADIR/test/t1.cfg $MYSQLD_DATADIR/test/tmp.cfg + +perl; +use strict; +die unless open(FILE, "+<$ENV{MYSQLD_DATADIR}/test/tmp.ibd"); +die unless truncate(FILE, 16384*23); +close(FILE); +EOF + +UNLOCK TABLES; +connection purge_control; +COMMIT; +connection default; +DROP TABLE t1; + +CREATE TABLE t2 ( + id INT AUTO_INCREMENT PRIMARY KEY, + not_id INT UNIQUE KEY, + data CHAR(255), + data2 BLOB +) ENGINE=INNODB; + +ALTER TABLE t2 DISCARD TABLESPACE; + +--copy_file $MYSQLD_DATADIR/test/tmp.ibd $MYSQLD_DATADIR/test/t2.ibd +--copy_file $MYSQLD_DATADIR/test/tmp.cfg $MYSQLD_DATADIR/test/t2.cfg + +--error ER_NOT_KEYFILE +ALTER TABLE t2 IMPORT TABLESPACE; + +DROP TABLE t2; + +--remove_file $MYSQLD_DATADIR/test/tmp.ibd +--remove_file $MYSQLD_DATADIR/test/tmp.cfg diff --git a/mysql-test/suite/innodb_gis/r/alter_spatial_index.result b/mysql-test/suite/innodb_gis/r/alter_spatial_index.result index 52420da2409..caacde7fecb 100644 --- a/mysql-test/suite/innodb_gis/r/alter_spatial_index.result +++ b/mysql-test/suite/innodb_gis/r/alter_spatial_index.result @@ -252,6 +252,16 @@ UNLOCK TABLES; ALTER TABLE tab DISCARD TABLESPACE; SELECT c1,ST_Astext(c2),ST_Astext(c4) FROM tab; ERROR HY000: Tablespace has been discarded for table `tab` +ERROR HY000: Internal error: Drop all secondary indexes before importing table test/tab when .cfg file is missing. +Table Create Table +tab CREATE TABLE `tab` ( + `c1` int(11) NOT NULL, + `c2` point NOT NULL, + `c3` linestring NOT NULL, + `c4` polygon NOT NULL, + `c5` geometry NOT NULL, + PRIMARY KEY (`c2`(25)) +) ENGINE=InnoDB DEFAULT CHARSET=latin1 CHECK TABLE tab; Table Op Msg_type Msg_text test.tab check status OK @@ -282,9 +292,6 @@ INSERT INTO tab SELECT * FROM tab1; ALTER TABLE tab DROP PRIMARY KEY; affected rows: 1 info: Records: 1 Duplicates: 0 Warnings: 0 -ALTER TABLE tab DROP INDEX idx2; -affected rows: 0 -info: Records: 0 Duplicates: 0 Warnings: 0 SET STATEMENT sql_mode = 'NO_ENGINE_SUBSTITUTION' FOR CREATE TEMPORARY TABLE temp_tab AS SELECT * FROM tab where c1 = c2; ERROR HY000: Illegal parameter data types int and point for operation '=' @@ -325,18 +332,10 @@ tab CREATE TABLE `tab` ( `c2` point NOT NULL, `c3` linestring NOT NULL, `c4` polygon NOT NULL, - `c5` geometry NOT NULL, - SPATIAL KEY `idx3` (`c3`), - SPATIAL KEY `idx4` (`c4`) COMMENT 'testing spatial index on Polygon', - SPATIAL KEY `idx5` (`c5`) COMMENT 'testing spatial index on Geometry', - KEY `idx6` (`c4`(10)) USING BTREE + `c5` geometry NOT NULL ) ENGINE=InnoDB DEFAULT CHARSET=latin1 SHOW INDEX FROM tab; Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment -tab 1 idx3 1 c3 A # 32 NULL SPATIAL -tab 1 idx4 1 c4 A # 32 NULL SPATIAL testing spatial index on Polygon -tab 1 idx5 1 c5 A # 32 NULL SPATIAL testing spatial index on Geometry -tab 1 idx6 1 c4 A # 10 NULL BTREE DELETE FROM tab; ALTER TABLE tab ADD PRIMARY KEY(c2); affected rows: 0 @@ -357,20 +356,12 @@ tab CREATE TABLE `tab` ( `c5` geometry NOT NULL, PRIMARY KEY (`c2`(25)), UNIQUE KEY `const_1` (`c2`(25)), - SPATIAL KEY `idx3` (`c3`), - SPATIAL KEY `idx4` (`c4`) COMMENT 'testing spatial index on Polygon', - SPATIAL KEY `idx5` (`c5`) COMMENT 'testing spatial index on Geometry', - KEY `idx6` (`c4`(10)) USING BTREE, SPATIAL KEY `idx2` (`c2`) ) ENGINE=InnoDB DEFAULT CHARSET=latin1 SHOW INDEX FROM tab; Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment tab 0 PRIMARY 1 c2 A # 25 NULL BTREE tab 0 const_1 1 c2 A # 25 NULL BTREE -tab 1 idx3 1 c3 A # 32 NULL SPATIAL -tab 1 idx4 1 c4 A # 32 NULL SPATIAL testing spatial index on Polygon -tab 1 idx5 1 c5 A # 32 NULL SPATIAL testing spatial index on Geometry -tab 1 idx6 1 c4 A # 10 NULL BTREE tab 1 idx2 1 c2 A # 32 NULL SPATIAL INSERT INTO tab(c1,c2,c3,c4,c5) VALUES(1,ST_GeomFromText('POINT(10 10)'),ST_GeomFromText('LINESTRING(5 5,20 20,30 30)'), @@ -399,20 +390,12 @@ tab CREATE TABLE `tab` ( `c5` geometry NOT NULL, PRIMARY KEY (`c5`(10)), UNIQUE KEY `const_1` (`c5`(10)), - SPATIAL KEY `idx3` (`c3`), - SPATIAL KEY `idx4` (`c4`) COMMENT 'testing spatial index on Polygon', - SPATIAL KEY `idx5` (`c5`) COMMENT 'testing spatial index on Geometry', - KEY `idx6` (`c4`(10)) USING BTREE, SPATIAL KEY `idx2` (`c2`) ) ENGINE=InnoDB DEFAULT CHARSET=latin1 SHOW INDEX FROM tab; Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment tab 0 PRIMARY 1 c5 A # 10 NULL BTREE tab 0 const_1 1 c5 A # 10 NULL BTREE -tab 1 idx3 1 c3 A # 32 NULL SPATIAL -tab 1 idx4 1 c4 A # 32 NULL SPATIAL testing spatial index on Polygon -tab 1 idx5 1 c5 A # 32 NULL SPATIAL testing spatial index on Geometry -tab 1 idx6 1 c4 A # 10 NULL BTREE tab 1 idx2 1 c2 A # 32 NULL SPATIAL INSERT INTO tab(c1,c2,c3,c4,c5) VALUES(1,ST_GeomFromText('POINT(10 10)'),ST_GeomFromText('LINESTRING(5 5,20 20,30 30)'), diff --git a/mysql-test/suite/innodb_gis/t/alter_spatial_index.test b/mysql-test/suite/innodb_gis/t/alter_spatial_index.test index b7eac432fcc..de1d301245f 100644 --- a/mysql-test/suite/innodb_gis/t/alter_spatial_index.test +++ b/mysql-test/suite/innodb_gis/t/alter_spatial_index.test @@ -277,8 +277,17 @@ SELECT c1,ST_Astext(c2),ST_Astext(c4) FROM tab; --disable_query_log +--error ER_INTERNAL_ERROR ALTER TABLE tab IMPORT TABLESPACE; +ALTER TABLE tab DROP INDEX idx2; +ALTER TABLE tab DROP INDEX idx3; +ALTER TABLE tab DROP INDEX idx4; +ALTER TABLE tab DROP INDEX idx5; +ALTER TABLE tab DROP INDEX idx6; + +SHOW CREATE TABLE tab; +ALTER TABLE tab IMPORT TABLESPACE; --enable_query_log CHECK TABLE tab; @@ -308,7 +317,6 @@ INSERT INTO tab SELECT * FROM tab1; --enable_info ALTER TABLE tab DROP PRIMARY KEY; -ALTER TABLE tab DROP INDEX idx2; --disable_info # Check spatial index on temp tables diff --git a/mysql-test/suite/vcol/r/partition.result b/mysql-test/suite/vcol/r/partition.result index bd1353fa145..d7c5052b72a 100644 --- a/mysql-test/suite/vcol/r/partition.result +++ b/mysql-test/suite/vcol/r/partition.result @@ -28,3 +28,76 @@ set statement sql_mode= '' for update t1 set i= 1, v= 2; Warnings: Warning 1906 The value specified for generated column 'v' in table 't1' has been ignored drop table t1; +# +# MDEV-18734 ASAN heap-use-after-free in my_strnxfrm_simple_internal upon update on versioned partitioned table +# +# Cover queue_fix() in ha_partition::handle_ordered_index_scan() +create or replace table t1 ( +x int auto_increment primary key, +b text, v mediumtext as (b) virtual, +index (v(10)) +) partition by range columns (x) ( +partition p1 values less than (3), +partition p2 values less than (6), +partition p3 values less than (9), +partition p4 values less than (12), +partition p5 values less than (15), +partition p6 values less than (17), +partition p7 values less than (19), +partition p8 values less than (21), +partition p9 values less than (23), +partition p10 values less than (25), +partition p11 values less than (27), +partition p12 values less than (29), +partition p13 values less than (31), +partition p14 values less than (33), +partition p15 values less than (35), +partition pn values less than (maxvalue)); +insert into t1 (b) values +(repeat('q', 8192)), (repeat('z', 8192)), (repeat('a', 8192)), (repeat('b', 8192)), +(repeat('x', 8192)), (repeat('y', 8192)); +insert t1 (b) select b from t1; +insert t1 (b) select b from t1; +insert t1 (b) select b from t1; +insert t1 (b) select b from t1; +select x, left(b, 10), left(v, 10) from t1 where x > 30 and x < 60 order by v; +x left(b, 10) left(v, 10) +33 aaaaaaaaaa aaaaaaaaaa +39 aaaaaaaaaa aaaaaaaaaa +45 aaaaaaaaaa aaaaaaaaaa +51 aaaaaaaaaa aaaaaaaaaa +57 aaaaaaaaaa aaaaaaaaaa +34 bbbbbbbbbb bbbbbbbbbb +40 bbbbbbbbbb bbbbbbbbbb +46 bbbbbbbbbb bbbbbbbbbb +52 bbbbbbbbbb bbbbbbbbbb +58 bbbbbbbbbb bbbbbbbbbb +31 qqqqqqqqqq qqqqqqqqqq +37 qqqqqqqqqq qqqqqqqqqq +43 qqqqqqqqqq qqqqqqqqqq +49 qqqqqqqqqq qqqqqqqqqq +55 qqqqqqqqqq qqqqqqqqqq +35 xxxxxxxxxx xxxxxxxxxx +41 xxxxxxxxxx xxxxxxxxxx +47 xxxxxxxxxx xxxxxxxxxx +53 xxxxxxxxxx xxxxxxxxxx +59 xxxxxxxxxx xxxxxxxxxx +36 yyyyyyyyyy yyyyyyyyyy +42 yyyyyyyyyy yyyyyyyyyy +48 yyyyyyyyyy yyyyyyyyyy +54 yyyyyyyyyy yyyyyyyyyy +32 zzzzzzzzzz zzzzzzzzzz +38 zzzzzzzzzz zzzzzzzzzz +44 zzzzzzzzzz zzzzzzzzzz +50 zzzzzzzzzz zzzzzzzzzz +56 zzzzzzzzzz zzzzzzzzzz +update t1 set b= 'bar' where v > 'a' limit 20; +drop table t1; +# Cover return_top_record() in ha_partition::handle_ordered_index_scan() +create table t1 (x int primary key, b tinytext, v text as (b) virtual) +partition by range columns (x) ( +partition p1 values less than (4), +partition pn values less than (maxvalue)); +insert into t1 (x, b) values (1, ''), (2, ''), (3, 'a'), (4, 'b'); +update t1 set b= 'bar' where x > 0 order by v limit 2; +drop table t1; diff --git a/mysql-test/suite/vcol/t/partition.test b/mysql-test/suite/vcol/t/partition.test index 889724fb1c5..408990b20a6 100644 --- a/mysql-test/suite/vcol/t/partition.test +++ b/mysql-test/suite/vcol/t/partition.test @@ -30,3 +30,51 @@ subpartition by hash(v) subpartitions 3 ( insert t1 set i= 0; set statement sql_mode= '' for update t1 set i= 1, v= 2; drop table t1; + +--echo # +--echo # MDEV-18734 ASAN heap-use-after-free in my_strnxfrm_simple_internal upon update on versioned partitioned table +--echo # +--echo # Cover queue_fix() in ha_partition::handle_ordered_index_scan() +create or replace table t1 ( + x int auto_increment primary key, + b text, v mediumtext as (b) virtual, + index (v(10)) +) partition by range columns (x) ( + partition p1 values less than (3), + partition p2 values less than (6), + partition p3 values less than (9), + partition p4 values less than (12), + partition p5 values less than (15), + partition p6 values less than (17), + partition p7 values less than (19), + partition p8 values less than (21), + partition p9 values less than (23), + partition p10 values less than (25), + partition p11 values less than (27), + partition p12 values less than (29), + partition p13 values less than (31), + partition p14 values less than (33), + partition p15 values less than (35), + partition pn values less than (maxvalue)); +insert into t1 (b) values +(repeat('q', 8192)), (repeat('z', 8192)), (repeat('a', 8192)), (repeat('b', 8192)), +(repeat('x', 8192)), (repeat('y', 8192)); + +insert t1 (b) select b from t1; +insert t1 (b) select b from t1; +insert t1 (b) select b from t1; +insert t1 (b) select b from t1; + +select x, left(b, 10), left(v, 10) from t1 where x > 30 and x < 60 order by v; +update t1 set b= 'bar' where v > 'a' limit 20; + +drop table t1; + +--echo # Cover return_top_record() in ha_partition::handle_ordered_index_scan() +create table t1 (x int primary key, b tinytext, v text as (b) virtual) +partition by range columns (x) ( + partition p1 values less than (4), + partition pn values less than (maxvalue)); +insert into t1 (x, b) values (1, ''), (2, ''), (3, 'a'), (4, 'b'); +update t1 set b= 'bar' where x > 0 order by v limit 2; +drop table t1; diff --git a/mysql-test/suite/versioning/r/foreign.result b/mysql-test/suite/versioning/r/foreign.result index 07e133e1eeb..3c8508507fd 100644 --- a/mysql-test/suite/versioning/r/foreign.result +++ b/mysql-test/suite/versioning/r/foreign.result @@ -446,6 +446,19 @@ pk f1 f2 left(f3, 4) check_row_ts(row_start, row_end) 2 8 8 LONG HISTORICAL ROW drop table t1; # +# MDEV-21555 Assertion secondary index is out of sync on delete from versioned table +# +create table t1 (a int, b int as (a + 1) virtual, key(a)) engine=innodb with system versioning; +set foreign_key_checks= off; +insert into t1 (a) values (1), (2); +alter table t1 add foreign key (b) references t1 (a), algorithm=copy; +update t1 set a= null where a = 1; +delete from t1 where a is null; +set foreign_key_checks= on; +delete history from t1; +delete from t1; +drop table t1; +# # MDEV-20729 Fix REFERENCES constraint in column definition # create or replace table t1( diff --git a/mysql-test/suite/versioning/t/foreign.test b/mysql-test/suite/versioning/t/foreign.test index ed2ed4dd122..441491e461f 100644 --- a/mysql-test/suite/versioning/t/foreign.test +++ b/mysql-test/suite/versioning/t/foreign.test @@ -477,6 +477,24 @@ select pk, f1, f2, left(f3, 4), check_row_ts(row_start, row_end) from t1 for sys drop table t1; --echo # +--echo # MDEV-21555 Assertion secondary index is out of sync on delete from versioned table +--echo # +create table t1 (a int, b int as (a + 1) virtual, key(a)) engine=innodb with system versioning; + +set foreign_key_checks= off; +insert into t1 (a) values (1), (2); +alter table t1 add foreign key (b) references t1 (a), algorithm=copy; +update t1 set a= null where a = 1; +delete from t1 where a is null; +set foreign_key_checks= on; + +delete history from t1; +delete from t1; + +# cleanup +drop table t1; + +--echo # --echo # MDEV-20729 Fix REFERENCES constraint in column definition --echo # create or replace table t1( diff --git a/scripts/wsrep_sst_common.sh b/scripts/wsrep_sst_common.sh index 562f9dc3aac..67244a7c622 100644 --- a/scripts/wsrep_sst_common.sh +++ b/scripts/wsrep_sst_common.sh @@ -1010,7 +1010,13 @@ check_port() lsof -Pnl -i ":$port" 2>/dev/null | \ grep -q -E "^($utils)[^[:space:]]*[[:space:]]+$pid[[:space:]].*\\(LISTEN\\)" && rc=0 elif [ $sockstat_available -ne 0 ]; then - sockstat -p "$port" 2>/dev/null | \ + local opts='-p' + if [ "$OS" = 'FreeBSD' ]; then + # sockstat on FreeBSD requires the "-s" option + # to display the connection state: + opts='-sp' + fi + sockstat "$opts" "$port" 2>/dev/null | \ grep -q -E "[[:space:]]+($utils)[^[:space:]]*[[:space:]]+$pid[[:space:]].*[[:space:]]LISTEN" && rc=0 elif [ $ss_available -ne 0 ]; then ss -nlpH "( sport = :$port )" 2>/dev/null | \ diff --git a/scripts/wsrep_sst_mariabackup.sh b/scripts/wsrep_sst_mariabackup.sh index 46804c9dce4..562b9b929f2 100644 --- a/scripts/wsrep_sst_mariabackup.sh +++ b/scripts/wsrep_sst_mariabackup.sh @@ -166,7 +166,8 @@ get_keys() fi if [ -z "$ekey" -a ! -r "$ekeyfile" ]; then - wsrep_log_error "FATAL: Either key or keyfile must be readable" + wsrep_log_error "FATAL: Either key must be specified " \ + "or keyfile must be readable" exit 3 fi @@ -448,9 +449,30 @@ encgroups='--mysqld|sst|xtrabackup' check_server_ssl_config() { - tcert=$(parse_cnf "$encgroups" 'ssl-ca') - tpem=$(parse_cnf "$encgroups" 'ssl-cert') - tkey=$(parse_cnf "$encgroups" 'ssl-key') + # backward-compatible behavior: + tcert=$(parse_cnf 'sst' 'tca') + tpem=$(parse_cnf 'sst' 'tcert') + tkey=$(parse_cnf 'sst' 'tkey') + # reading new ssl configuration options: + local tcert2=$(parse_cnf "$encgroups" 'ssl-ca') + local tpem2=$(parse_cnf "$encgroups" 'ssl-cert') + local tkey2=$(parse_cnf "$encgroups" 'ssl-key') + # if there are no old options, then we take new ones: + if [ -z "$tcert" -a -z "$tpem" -a -z "$tkey" ]; then + tcert="$tcert2" + tpem="$tpem2" + tkey="$tkey2" + # checking for presence of the new-style SSL configuration: + elif [ -n "$tcert2" -o -n "$tpem2" -o -n "$tkey2" ]; then + if [ "$tcert" != "$tcert2" -o \ + "$tpem" != "$tpem2" -o \ + "$tkey" != "$tkey2" ] + then + wsrep_log_info "new ssl configuration options (ssl-ca, ssl-cert " \ + "and ssl-key) are ignored by SST due to presence " \ + "of the tca, tcert and/or tkey in the [sst] section" + fi + fi } read_cnf() @@ -463,18 +485,10 @@ read_cnf() if [ $encrypt -eq 0 -o $encrypt -ge 2 ] then - if [ "$tmode" != 'DISABLED' -o $encrypt -ge 2 ] - then - tcert=$(parse_cnf 'sst' 'tca') - tpem=$(parse_cnf 'sst' 'tcert') - tkey=$(parse_cnf 'sst' 'tkey') + if [ "$tmode" != 'DISABLED' -o $encrypt -ge 2 ]; then + check_server_ssl_config fi if [ "$tmode" != 'DISABLED' ]; then - # backward-incompatible behavior - if [ -z "$tpem" -a -z "$tkey" -a -z "$tcert" ]; then - # no old-style SSL config in [sst] - check_server_ssl_config - fi if [ 0 -eq $encrypt -a -n "$tpem" -a -n "$tkey" ] then encrypt=3 # enable cert/key SSL encyption @@ -489,7 +503,11 @@ read_cnf() ealgo=$(parse_cnf "$encgroups" 'encrypt-algo') eformat=$(parse_cnf "$encgroups" 'encrypt-format' 'openssl') ekey=$(parse_cnf "$encgroups" 'encrypt-key') - ekeyfile=$(parse_cnf "$encgroups" 'encrypt-key-file') + # The keyfile should be read only when the key + # is not specified or empty: + if [ -z "$ekey" ]; then + ekeyfile=$(parse_cnf "$encgroups" 'encrypt-key-file') + fi fi wsrep_log_info "SSL configuration: CA='$tcert', CERT='$tpem'," \ diff --git a/scripts/wsrep_sst_rsync.sh b/scripts/wsrep_sst_rsync.sh index fc9f5017937..d90e87b68f2 100644 --- a/scripts/wsrep_sst_rsync.sh +++ b/scripts/wsrep_sst_rsync.sh @@ -93,7 +93,15 @@ check_pid_and_port() else local filter='([^[:space:]]+[[:space:]]+){4}[^[:space:]]+' if [ $sockstat_available -eq 1 ]; then - port_info=$(sockstat -p "$port" 2>/dev/null | \ + local opts='-p' + if [ "$OS" = 'FreeBSD' ]; then + # sockstat on FreeBSD requires the "-s" option + # to display the connection state: + opts='-sp' + # in addition, sockstat produces an additional column: + filter='([^[:space:]]+[[:space:]]+){5}[^[:space:]]+' + fi + port_info=$(sockstat "$opts" "$port" 2>/dev/null | \ grep -E '[[:space:]]LISTEN' | grep -o -E "$filter") else port_info=$(ss -nlpH "( sport = :$port )" 2>/dev/null | \ @@ -388,7 +396,7 @@ EOF # Use deltaxfer only for WAN inv=$(basename "$0") WHOLE_FILE_OPT="" - if [ "${inv%wsrep_sst_rsync_wan*}" != "$inv" ]; then + if [ "${inv%wsrep_sst_rsync_wan*}" = "$inv" ]; then WHOLE_FILE_OPT="--whole-file" fi diff --git a/sql/field.cc b/sql/field.cc index 71ca27c5ad0..08bde5f58e1 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -1,6 +1,6 @@ /* Copyright (c) 2000, 2017, Oracle and/or its affiliates. - Copyright (c) 2008, 2020, MariaDB + Copyright (c) 2008, 2021, MariaDB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -8612,6 +8612,7 @@ int Field_blob::store(const char *from,size_t length,CHARSET_INFO *cs) rc= well_formed_copy_with_check((char*) value.ptr(), (uint) new_length, cs, from, length, length, true, ©_len); + value.length(copy_len); Field_blob::store_length(copy_len); bmove(ptr+packlength,(uchar*) &tmp,sizeof(char*)); diff --git a/sql/field.h b/sql/field.h index 9fe4db3b521..6ada7f94507 100644 --- a/sql/field.h +++ b/sql/field.h @@ -4554,7 +4554,13 @@ public: uchar *new_ptr, uint32 length, uchar *new_null_ptr, uint new_null_bit) override; void sql_type(String &str) const override; - inline bool copy() + /** + Copy blob buffer into internal storage "value" and update record pointer. + + @retval true Memory allocation error + @retval false Success + */ + bool copy() { uchar *tmp= get_ptr(); if (value.copy((char*) tmp, get_length(), charset())) @@ -4566,6 +4572,33 @@ public: memcpy(ptr+packlength, &tmp, sizeof(char*)); return 0; } + void swap(String &inout, bool set_read_value) + { + if (set_read_value) + read_value.swap(inout); + else + value.swap(inout); + } + /** + Return pointer to blob cache or NULL if not cached. + */ + String * cached(bool *set_read_value) + { + char *tmp= (char *) get_ptr(); + if (!value.is_empty() && tmp == value.ptr()) + { + *set_read_value= false; + return &value; + } + + if (!read_value.is_empty() && tmp == read_value.ptr()) + { + *set_read_value= true; + return &read_value; + } + + return NULL; + } /* store value for the duration of the current read record */ inline void swap_value_and_read_value() { diff --git a/sql/filesort.cc b/sql/filesort.cc index 4eea588007e..766415f58fb 100644 --- a/sql/filesort.cc +++ b/sql/filesort.cc @@ -713,6 +713,15 @@ const char* dbug_print_table_row(TABLE *table) } +const char* dbug_print_row(TABLE *table, uchar *rec) +{ + table->move_fields(table->field, rec, table->record[0]); + const char* ret= dbug_print_table_row(table); + table->move_fields(table->field, table->record[0], rec); + return ret; +} + + /* Print a text, SQL-like record representation into dbug trace. diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc index 2c60a98de33..73e9f8ee84f 100644 --- a/sql/ha_partition.cc +++ b/sql/ha_partition.cc @@ -5417,59 +5417,69 @@ bool ha_partition::init_record_priority_queue() /* Initialize the ordered record buffer. */ - if (!m_ordered_rec_buffer) - { - size_t alloc_len; - uint used_parts= bitmap_bits_set(&m_part_info->read_partitions); - - if (used_parts == 0) /* Do nothing since no records expected. */ - DBUG_RETURN(false); + size_t alloc_len; + uint used_parts= bitmap_bits_set(&m_part_info->read_partitions); - /* Allocate record buffer for each used partition. */ - m_priority_queue_rec_len= m_rec_length + PARTITION_BYTES_IN_POS; - if (!m_using_extended_keys) - m_priority_queue_rec_len += get_open_file_sample()->ref_length; - alloc_len= used_parts * m_priority_queue_rec_len; - /* Allocate a key for temporary use when setting up the scan. */ - alloc_len+= table_share->max_key_length; + if (used_parts == 0) /* Do nothing since no records expected. */ + DBUG_RETURN(false); - if (!(m_ordered_rec_buffer= (uchar*)my_malloc(key_memory_partition_sort_buffer, - alloc_len, MYF(MY_WME)))) - DBUG_RETURN(true); + /* Allocate record buffer for each used partition. */ + m_priority_queue_rec_len= m_rec_length + ORDERED_REC_OFFSET; + if (!m_using_extended_keys) + m_priority_queue_rec_len+= get_open_file_sample()->ref_length; + alloc_len= used_parts * m_priority_queue_rec_len; + /* Allocate a key for temporary use when setting up the scan. */ + alloc_len+= table_share->max_key_length; + Ordered_blob_storage **blob_storage; + Ordered_blob_storage *objs; + const size_t n_all= used_parts * table->s->blob_fields; + + if (!my_multi_malloc(key_memory_partition_sort_buffer, MYF(MY_WME), + &m_ordered_rec_buffer, alloc_len, + &blob_storage, n_all * sizeof *blob_storage, + &objs, n_all * sizeof *objs, NULL)) + DBUG_RETURN(true); - /* - We set-up one record per partition and each record has 2 bytes in - front where the partition id is written. This is used by ordered - index_read. - We also set-up a reference to the first record for temporary use in - setting up the scan. - */ - char *ptr= (char*) m_ordered_rec_buffer; - uint i; - for (i= bitmap_get_first_set(&m_part_info->read_partitions); - i < m_tot_parts; - i= bitmap_get_next_set(&m_part_info->read_partitions, i)) + /* + We set-up one record per partition and each record has 2 bytes in + front where the partition id is written. This is used by ordered + index_read. + We also set-up a reference to the first record for temporary use in + setting up the scan. + */ + char *ptr= (char*) m_ordered_rec_buffer; + uint i; + for (i= bitmap_get_first_set(&m_part_info->read_partitions); + i < m_tot_parts; + i= bitmap_get_next_set(&m_part_info->read_partitions, i)) + { + DBUG_PRINT("info", ("init rec-buf for part %u", i)); + if (table->s->blob_fields) { - DBUG_PRINT("info", ("init rec-buf for part %u", i)); - int2store(ptr, i); - ptr+= m_priority_queue_rec_len; + for (uint j= 0; j < table->s->blob_fields; ++j, ++objs) + blob_storage[j]= new (objs) Ordered_blob_storage; + *((Ordered_blob_storage ***) ptr)= blob_storage; + blob_storage+= table->s->blob_fields; } - m_start_key.key= (const uchar*)ptr; + int2store(ptr + sizeof(String **), i); + ptr+= m_priority_queue_rec_len; + } + m_start_key.key= (const uchar*)ptr; - /* Initialize priority queue, initialized to reading forward. */ - int (*cmp_func)(void *, uchar *, uchar *); - void *cmp_arg= (void*) this; - if (!m_using_extended_keys && !(table_flags() & HA_SLOW_CMP_REF)) - cmp_func= cmp_key_rowid_part_id; - else - cmp_func= cmp_key_part_id; - DBUG_PRINT("info", ("partition queue_init(1) used_parts: %u", used_parts)); - if (init_queue(&m_queue, used_parts, 0, 0, cmp_func, cmp_arg, 0, 0)) - { - my_free(m_ordered_rec_buffer); - m_ordered_rec_buffer= NULL; - DBUG_RETURN(true); - } + /* Initialize priority queue, initialized to reading forward. */ + int (*cmp_func)(void *, uchar *, uchar *); + void *cmp_arg= (void*) this; + if (!m_using_extended_keys && !(table_flags() & HA_SLOW_CMP_REF)) + cmp_func= cmp_key_rowid_part_id; + else + cmp_func= cmp_key_part_id; + DBUG_PRINT("info", ("partition queue_init(1) used_parts: %u", used_parts)); + if (init_queue(&m_queue, used_parts, ORDERED_PART_NUM_OFFSET, + 0, cmp_func, cmp_arg, 0, 0)) + { + my_free(m_ordered_rec_buffer); + m_ordered_rec_buffer= NULL; + DBUG_RETURN(true); } DBUG_RETURN(false); } @@ -5484,6 +5494,20 @@ void ha_partition::destroy_record_priority_queue() DBUG_ENTER("ha_partition::destroy_record_priority_queue"); if (m_ordered_rec_buffer) { + if (table->s->blob_fields) + { + char *ptr= (char *) m_ordered_rec_buffer; + for (uint i= bitmap_get_first_set(&m_part_info->read_partitions); + i < m_tot_parts; + i= bitmap_get_next_set(&m_part_info->read_partitions, i)) + { + Ordered_blob_storage **blob_storage= *((Ordered_blob_storage ***) ptr); + for (uint b= 0; b < table->s->blob_fields; ++b) + blob_storage[b]->blob.free(); + ptr+= m_priority_queue_rec_len; + } + } + delete_queue(&m_queue); my_free(m_ordered_rec_buffer); m_ordered_rec_buffer= NULL; @@ -5711,12 +5735,10 @@ static int cmp_part_ids(uchar *ref1, uchar *ref2) extern "C" int cmp_key_part_id(void *ptr, uchar *ref1, uchar *ref2) { ha_partition *file= (ha_partition*)ptr; - int res; - if ((res= key_rec_cmp(file->m_curr_key_info, ref1 + PARTITION_BYTES_IN_POS, - ref2 + PARTITION_BYTES_IN_POS))) - { + if (int res= key_rec_cmp(file->m_curr_key_info, + ref1 + PARTITION_BYTES_IN_POS, + ref2 + PARTITION_BYTES_IN_POS)) return res; - } return cmp_part_ids(ref1, ref2); } @@ -6964,6 +6986,48 @@ int ha_partition::pre_ft_end() } +void ha_partition::swap_blobs(uchar * rec_buf, Ordered_blob_storage ** storage, bool restore) +{ + uint *ptr, *end; + uint blob_n= 0; + table->move_fields(table->field, rec_buf, table->record[0]); + for (ptr= table->s->blob_field, end= ptr + table->s->blob_fields; + ptr != end; ++ptr, ++blob_n) + { + DBUG_ASSERT(*ptr < table->s->fields); + Field_blob *blob= (Field_blob*) table->field[*ptr]; + DBUG_ASSERT(blob->flags & BLOB_FLAG); + DBUG_ASSERT(blob->field_index == *ptr); + if (!bitmap_is_set(table->read_set, *ptr) || blob->is_null()) + continue; + + Ordered_blob_storage &s= *storage[blob_n]; + + if (restore) + { + /* + We protect only blob cache (value or read_value). If the cache was + empty that doesn't mean the blob was empty. Blobs allocated by a + storage engine should work just fine. + */ + if (!s.blob.is_empty()) + blob->swap(s.blob, s.set_read_value); + } + else + { + bool set_read_value; + String *cached= blob->cached(&set_read_value); + if (cached) + { + cached->swap(s.blob); + s.set_read_value= set_read_value; + } + } + } + table->move_fields(table->field, table->record[0], rec_buf); +} + + /** Initialize a full text search using the extended API. @@ -7671,8 +7735,8 @@ int ha_partition::handle_ordered_index_scan(uchar *buf, bool reverse_order) { DBUG_PRINT("info", ("reading from part %u (scan_type: %u)", i, m_index_scan_type)); - DBUG_ASSERT(i == uint2korr(part_rec_buf_ptr)); - uchar *rec_buf_ptr= part_rec_buf_ptr + PARTITION_BYTES_IN_POS; + DBUG_ASSERT(i == uint2korr(part_rec_buf_ptr + ORDERED_PART_NUM_OFFSET)); + uchar *rec_buf_ptr= part_rec_buf_ptr + ORDERED_REC_OFFSET; handler *file= m_file[i]; switch (m_index_scan_type) { @@ -7752,6 +7816,12 @@ int ha_partition::handle_ordered_index_scan(uchar *buf, bool reverse_order) Initialize queue without order first, simply insert */ queue_element(&m_queue, j++)= part_rec_buf_ptr; + if (table->s->blob_fields) + { + Ordered_blob_storage **storage= + *((Ordered_blob_storage ***) part_rec_buf_ptr); + swap_blobs(rec_buf_ptr, storage, false); + } } else if (error == HA_ERR_KEY_NOT_FOUND) { @@ -7794,7 +7864,7 @@ int ha_partition::handle_ordered_index_scan(uchar *buf, bool reverse_order) DBUG_PRINT("info", ("partition !bitmap_is_set(&m_mrr_used_partitions, i)")); continue; } - DBUG_ASSERT(i == uint2korr(part_rec_buf_ptr)); + DBUG_ASSERT(i == uint2korr(part_rec_buf_ptr + ORDERED_PART_NUM_OFFSET)); if (smallest_range_seq == m_stock_range_seq[i]) { m_stock_range_seq[i]= 0; @@ -7841,12 +7911,17 @@ void ha_partition::return_top_record(uchar *buf) { uint part_id; uchar *key_buffer= queue_top(&m_queue); - uchar *rec_buffer= key_buffer + PARTITION_BYTES_IN_POS; + uchar *rec_buffer= key_buffer + ORDERED_REC_OFFSET; DBUG_ENTER("ha_partition::return_top_record"); DBUG_PRINT("enter", ("partition this: %p", this)); - part_id= uint2korr(key_buffer); + part_id= uint2korr(key_buffer + ORDERED_PART_NUM_OFFSET); memcpy(buf, rec_buffer, m_rec_length); + if (table->s->blob_fields) + { + Ordered_blob_storage **storage= *((Ordered_blob_storage ***) key_buffer); + swap_blobs(buf, storage, true); + } m_last_part= part_id; DBUG_PRINT("info", ("partition m_last_part: %u", m_last_part)); m_top_entry= part_id; @@ -7898,7 +7973,7 @@ int ha_partition::handle_ordered_index_scan_key_not_found() This partition is used and did return HA_ERR_KEY_NOT_FOUND in index_read_map. */ - curr_rec_buf= part_buf + PARTITION_BYTES_IN_POS; + curr_rec_buf= part_buf + ORDERED_REC_OFFSET; error= m_file[i]->ha_index_next(curr_rec_buf); /* HA_ERR_KEY_NOT_FOUND is not allowed from index_next! */ DBUG_ASSERT(error != HA_ERR_KEY_NOT_FOUND); @@ -7949,7 +8024,8 @@ int ha_partition::handle_ordered_next(uchar *buf, bool is_next_same) DBUG_RETURN(HA_ERR_END_OF_FILE); uint part_id= m_top_entry; - uchar *rec_buf= queue_top(&m_queue) + PARTITION_BYTES_IN_POS; + uchar *part_rec_buf_ptr= queue_top(&m_queue); + uchar *rec_buf= part_rec_buf_ptr + ORDERED_REC_OFFSET; handler *file; if (m_key_not_found) @@ -7991,7 +8067,16 @@ int ha_partition::handle_ordered_next(uchar *buf, bool is_next_same) if (m_index_scan_type == partition_read_range) { error= file->read_range_next(); - memcpy(rec_buf, table->record[0], m_rec_length); + if (likely(!error)) + { + memcpy(rec_buf, table->record[0], m_rec_length); + if (table->s->blob_fields) + { + Ordered_blob_storage **storage= + *((Ordered_blob_storage ***) part_rec_buf_ptr); + swap_blobs(rec_buf, storage, false); + } + } } else if (m_index_scan_type == partition_read_multi_range) { @@ -8028,6 +8113,11 @@ int ha_partition::handle_ordered_next(uchar *buf, bool is_next_same) DBUG_PRINT("info", ("m_mrr_range_current->id: %u", m_mrr_range_current->id)); memcpy(rec_buf, table->record[0], m_rec_length); + if (table->s->blob_fields) + { + Ordered_blob_storage **storage= *((Ordered_blob_storage ***) part_rec_buf_ptr); + swap_blobs(rec_buf, storage, false); + } if (((PARTITION_KEY_MULTI_RANGE *) m_range_info[part_id])->id != m_mrr_range_current->id) { @@ -8078,9 +8168,8 @@ int ha_partition::handle_ordered_next(uchar *buf, bool is_next_same) DBUG_PRINT("info",("partition !bitmap_is_set(&m_mrr_used_partitions, i)")); continue; } - DBUG_PRINT("info",("partition uint2korr: %u", - uint2korr(part_rec_buf_ptr))); - DBUG_ASSERT(i == uint2korr(part_rec_buf_ptr)); + DBUG_ASSERT(i == uint2korr(part_rec_buf_ptr + + ORDERED_PART_NUM_OFFSET)); DBUG_PRINT("info", ("partition m_stock_range_seq[%u]: %u", i, m_stock_range_seq[i])); if (smallest_range_seq == m_stock_range_seq[i]) @@ -8169,7 +8258,7 @@ int ha_partition::handle_ordered_prev(uchar *buf) DBUG_RETURN(HA_ERR_END_OF_FILE); uint part_id= m_top_entry; - uchar *rec_buf= queue_top(&m_queue) + PARTITION_BYTES_IN_POS; + uchar *rec_buf= queue_top(&m_queue) + ORDERED_REC_OFFSET; handler *file= m_file[part_id]; if (unlikely((error= file->ha_index_prev(rec_buf)))) diff --git a/sql/ha_partition.h b/sql/ha_partition.h index b77e20f3e9c..c082274b1be 100644 --- a/sql/ha_partition.h +++ b/sql/ha_partition.h @@ -21,8 +21,19 @@ #include "sql_partition.h" /* part_id_range, partition_element */ #include "queues.h" /* QUEUE */ -#define PARTITION_BYTES_IN_POS 2 +struct Ordered_blob_storage +{ + String blob; + bool set_read_value; + Ordered_blob_storage() : set_read_value(false) + {} +}; + #define PAR_EXT ".par" +#define PARTITION_BYTES_IN_POS 2 +#define ORDERED_PART_NUM_OFFSET sizeof(Ordered_blob_storage **) +#define ORDERED_REC_OFFSET (ORDERED_PART_NUM_OFFSET + PARTITION_BYTES_IN_POS) + /** Struct used for partition_name_hash */ typedef struct st_part_name_def @@ -933,6 +944,7 @@ private: int handle_ordered_next(uchar * buf, bool next_same); int handle_ordered_prev(uchar * buf); void return_top_record(uchar * buf); + void swap_blobs(uchar* rec_buf, Ordered_blob_storage ** storage, bool restore); public: /* ------------------------------------------------------------------------- diff --git a/sql/item_jsonfunc.h b/sql/item_jsonfunc.h index 0026fce21b2..d5c385b81fe 100644 --- a/sql/item_jsonfunc.h +++ b/sql/item_jsonfunc.h @@ -444,7 +444,7 @@ public: const char *func_name() const { return mode_insert ? - (mode_replace ? "json_set" : "json_insert") : "json_update"; + (mode_replace ? "json_set" : "json_insert") : "json_replace"; } Item *get_copy(THD *thd) { return get_item_copy<Item_func_json_insert>(thd, this); } diff --git a/sql/log.cc b/sql/log.cc index d9a271c56bd..3cfe6bbfd37 100644 --- a/sql/log.cc +++ b/sql/log.cc @@ -3392,7 +3392,7 @@ MYSQL_BIN_LOG::MYSQL_BIN_LOG(uint *sync_period) checksum_alg_reset(BINLOG_CHECKSUM_ALG_UNDEF), relay_log_checksum_alg(BINLOG_CHECKSUM_ALG_UNDEF), description_event_for_exec(0), description_event_for_queue(0), - current_binlog_id(0) + current_binlog_id(0), reset_master_count(0) { /* We don't want to initialize locks here as such initialization depends on @@ -4485,6 +4485,7 @@ err: } mysql_cond_broadcast(&COND_xid_list); reset_master_pending--; + reset_master_count++; mysql_mutex_unlock(&LOCK_xid_list); } diff --git a/sql/log.h b/sql/log.h index eaf7cde1c07..e7df904d49e 100644 --- a/sql/log.h +++ b/sql/log.h @@ -675,6 +675,11 @@ public: my_off_t last_commit_pos_offset; ulong current_binlog_id; + /* + Tracks the number of times that the master has been reset + */ + Atomic_counter<uint64> reset_master_count; + MYSQL_BIN_LOG(uint *sync_period); /* note that there's no destructor ~MYSQL_BIN_LOG() ! @@ -888,6 +893,7 @@ public: inline mysql_mutex_t* get_log_lock() { return &LOCK_log; } inline mysql_cond_t* get_bin_log_cond() { return &COND_bin_log_updated; } inline IO_CACHE* get_log_file() { return &log_file; } + inline uint64 get_reset_master_count() { return reset_master_count; } inline void lock_index() { mysql_mutex_lock(&LOCK_index);} inline void unlock_index() { mysql_mutex_unlock(&LOCK_index);} diff --git a/sql/sql_audit.h b/sql/sql_audit.h index 40276c86a78..64500067699 100644 --- a/sql/sql_audit.h +++ b/sql/sql_audit.h @@ -155,7 +155,7 @@ void mysql_audit_general(THD *thd, uint event_subtype, DBUG_ENTER("mysql_audit_general"); if (mysql_audit_general_enabled()) { - char user_buff[MAX_USER_HOST_SIZE]; + char user_buff[MAX_USER_HOST_SIZE+1]; mysql_event_general event; event.event_subclass= event_subtype; diff --git a/sql/sql_class.cc b/sql/sql_class.cc index 37580568de5..df241afc838 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -5403,8 +5403,8 @@ extern "C" bool thd_is_strict_mode(const MYSQL_THD thd) */ void thd_get_query_start_data(THD *thd, char *buf) { - LEX_CSTRING field_name; - Field_timestampf f((uchar *)buf, NULL, 0, Field::NONE, &field_name, NULL, 6); + Field_timestampf f((uchar *)buf, nullptr, 0, Field::NONE, &empty_clex_str, + nullptr, 6); f.store_TIME(thd->query_start(), thd->query_start_sec_part()); } diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc index 5a39b380855..03dcd825de7 100644 --- a/sql/sql_lex.cc +++ b/sql/sql_lex.cc @@ -3536,7 +3536,7 @@ bool st_select_lex::setup_ref_array(THD *thd, uint order_group_num) prepared statement */ Query_arena *arena= thd->stmt_arena; - const uint n_elems= (n_sum_items + + const size_t n_elems= (n_sum_items + n_child_sum_items + item_list.elements + select_n_reserved + @@ -3544,7 +3544,8 @@ bool st_select_lex::setup_ref_array(THD *thd, uint order_group_num) select_n_where_fields + order_group_num + hidden_bit_fields + - fields_in_window_functions) * 5; + fields_in_window_functions) * (size_t) 5; + DBUG_ASSERT(n_elems % 5 == 0); if (!ref_pointer_array.is_null()) { /* diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc index b1a5a3e7199..a6fb17eaa74 100644 --- a/sql/sql_prepare.cc +++ b/sql/sql_prepare.cc @@ -3445,6 +3445,11 @@ static void mysql_stmt_execute_common(THD *thd, stmt_id == LAST_STMT_ID, read_types)) { my_error(ER_MALFORMED_PACKET, MYF(0)); + /* + Let's set the thd->query_string so the audit plugin + can report the executed query that failed. + */ + thd->set_query_inner(stmt->query_string); DBUG_VOID_RETURN; } diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc index ff2faca5ecf..c49ea7d5908 100644 --- a/sql/sql_repl.cc +++ b/sql/sql_repl.cc @@ -4375,6 +4375,7 @@ bool show_binlogs(THD* thd) Protocol *protocol= thd->protocol; uint retry_count= 0; size_t cur_dir_len; + uint64 expected_reset_masters; DBUG_ENTER("show_binlogs"); if (!mysql_bin_log.is_open()) @@ -4399,6 +4400,7 @@ retry: mysql_mutex_lock(mysql_bin_log.get_log_lock()); mysql_bin_log.lock_index(); mysql_bin_log.raw_get_current_log(&cur); + expected_reset_masters= mysql_bin_log.get_reset_master_count(); mysql_mutex_unlock(mysql_bin_log.get_log_lock()); /* The following call unlocks lock_index */ @@ -4419,6 +4421,16 @@ retry: cur_link->name.str+= dir_len; cur_link->name.length-= dir_len; + if (mysql_bin_log.get_reset_master_count() > expected_reset_masters) + { + /* + Reset master was called after we cached filenames. + Reinitialize the cache. + */ + free_root(&mem_root, MYF(MY_MARK_BLOCKS_FREE)); + goto retry; + } + if (!(strncmp(fname+dir_len, cur.log_file_name+cur_dir_len, length))) cur_link->size= cur.pos; /* The active log, use the active position */ else diff --git a/sql/sql_view.cc b/sql/sql_view.cc index b7f64389f02..a95e52a72a0 100644 --- a/sql/sql_view.cc +++ b/sql/sql_view.cc @@ -845,7 +845,7 @@ int mariadb_fix_view(THD *thd, TABLE_LIST *view, bool wrong_checksum, if ((view->md5.str= (char *)thd->alloc(32 + 1)) == NULL) DBUG_RETURN(HA_ADMIN_FAILED); } - view->calc_md5(view->md5.str); + view->calc_md5(const_cast<char*>(view->md5.str)); view->md5.length= 32; } view->mariadb_version= MYSQL_VERSION_ID; diff --git a/sql/table.cc b/sql/table.cc index 2197fa9dd7b..91a94238514 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -5532,12 +5532,12 @@ void TABLE::reset_item_list(List<Item> *item_list, uint skip) const buffer buffer for md5 writing */ -void TABLE_LIST::calc_md5(const char *buffer) +void TABLE_LIST::calc_md5(char *buffer) { uchar digest[16]; compute_md5_hash(digest, select_stmt.str, select_stmt.length); - sprintf((char *) buffer, + sprintf(buffer, "%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x", digest[0], digest[1], digest[2], digest[3], digest[4], digest[5], digest[6], digest[7], diff --git a/sql/table.h b/sql/table.h index d063a3f27be..31b25e7de61 100644 --- a/sql/table.h +++ b/sql/table.h @@ -2625,7 +2625,7 @@ struct TABLE_LIST List<String> *partition_names; #endif /* WITH_PARTITION_STORAGE_ENGINE */ - void calc_md5(const char *buffer); + void calc_md5(char *buffer); int view_check_option(THD *thd, bool ignore_failure); bool create_field_translation(THD *thd); bool setup_underlying(THD *thd); diff --git a/storage/innobase/CMakeLists.txt b/storage/innobase/CMakeLists.txt index b7e7fb93bf3..1329cd95117 100644 --- a/storage/innobase/CMakeLists.txt +++ b/storage/innobase/CMakeLists.txt @@ -1,6 +1,6 @@ # Copyright (c) 2006, 2017, Oracle and/or its affiliates. All rights reserved. -# Copyright (c) 2014, 2020, MariaDB Corporation. +# Copyright (c) 2014, 2021, MariaDB Corporation. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by diff --git a/storage/innobase/btr/btr0btr.cc b/storage/innobase/btr/btr0btr.cc index de87ad02e68..4c832eb77e8 100644 --- a/storage/innobase/btr/btr0btr.cc +++ b/storage/innobase/btr/btr0btr.cc @@ -3144,8 +3144,8 @@ func_exit: @param[in] block page to remove @param[in] index index tree @param[in,out] mtr mini-transaction */ -void btr_level_list_remove(const buf_block_t& block, const dict_index_t& index, - mtr_t* mtr) +dberr_t btr_level_list_remove(const buf_block_t& block, + const dict_index_t& index, mtr_t* mtr) { ut_ad(mtr->memo_contains_flagged(&block, MTR_MEMO_PAGE_X_FIX)); ut_ad(block.zip_size() == index.table->space->zip_size()); @@ -3177,6 +3177,10 @@ void btr_level_list_remove(const buf_block_t& block, const dict_index_t& index, buf_block_t* next_block = btr_block_get( index, next_page_no, RW_X_LATCH, page_is_leaf(page), mtr); + + if (!next_block) { + return DB_ERROR; + } #ifdef UNIV_BTR_DEBUG ut_a(page_is_comp(next_block->frame) == page_is_comp(page)); static_assert(FIL_PAGE_PREV % 4 == 0, "alignment"); @@ -3187,6 +3191,8 @@ void btr_level_list_remove(const buf_block_t& block, const dict_index_t& index, btr_page_set_prev(next_block, prev_page_no, mtr); } + + return DB_SUCCESS; } /*************************************************************//** @@ -3565,7 +3571,9 @@ retry: btr_search_drop_page_hash_index(block); /* Remove the page from the level list */ - btr_level_list_remove(*block, *index, mtr); + if (DB_SUCCESS != btr_level_list_remove(*block, *index, mtr)) { + goto err_exit; + } if (dict_index_is_spatial(index)) { rec_t* my_rec = father_cursor.page_cur.rec; @@ -3692,7 +3700,9 @@ retry: #endif /* UNIV_BTR_DEBUG */ /* Remove the page from the level list */ - btr_level_list_remove(*block, *index, mtr); + if (DB_SUCCESS != btr_level_list_remove(*block, *index, mtr)) { + goto err_exit; + } ut_ad(btr_node_ptr_get_child_page_no( btr_cur_get_rec(&father_cursor), offsets) @@ -4073,7 +4083,7 @@ btr_discard_page( } /* Remove the page from the level list */ - btr_level_list_remove(*block, *index, mtr); + ut_a(DB_SUCCESS == btr_level_list_remove(*block, *index, mtr)); #ifdef UNIV_ZIP_DEBUG { diff --git a/storage/innobase/btr/btr0cur.cc b/storage/innobase/btr/btr0cur.cc index 5bb2a0e22d3..a3351800231 100644 --- a/storage/innobase/btr/btr0cur.cc +++ b/storage/innobase/btr/btr0cur.cc @@ -330,10 +330,12 @@ btr_cur_latch_leaves( true, mtr); latch_leaves.blocks[2] = get_block; #ifdef UNIV_BTR_DEBUG - ut_a(page_is_comp(get_block->frame) - == page_is_comp(block->frame)); - ut_a(btr_page_get_prev(get_block->frame) - == block->page.id().page_no()); + if (get_block) { + ut_a(page_is_comp(get_block->frame) + == page_is_comp(block->frame)); + ut_a(btr_page_get_prev(get_block->frame) + == block->page.id().page_no()); + } #endif /* UNIV_BTR_DEBUG */ if (spatial) { cursor->rtr_info->tree_blocks[ diff --git a/storage/innobase/btr/btr0defragment.cc b/storage/innobase/btr/btr0defragment.cc index ebe9854b5dc..d8c9cadb2b2 100644 --- a/storage/innobase/btr/btr0defragment.cc +++ b/storage/innobase/btr/btr0defragment.cc @@ -529,7 +529,8 @@ btr_defragment_merge_pages( lock_update_merge_left(to_block, orig_pred, from_block); btr_search_drop_page_hash_index(from_block); - btr_level_list_remove(*from_block, *index, mtr); + ut_a(DB_SUCCESS == btr_level_list_remove(*from_block, *index, + mtr)); btr_page_get_father(index, from_block, mtr, &parent); btr_cur_node_ptr_delete(&parent, mtr); /* btr_blob_dbg_remove(from_page, index, diff --git a/storage/innobase/buf/buf0buf.cc b/storage/innobase/buf/buf0buf.cc index b658bdfc540..f2f443dd4b0 100644 --- a/storage/innobase/buf/buf0buf.cc +++ b/storage/innobase/buf/buf0buf.cc @@ -711,8 +711,7 @@ bool buf_is_zeroes(span<const byte> buf) /** Check if a page is corrupt. @param[in] check_lsn whether the LSN should be checked @param[in] read_buf database page -@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0 -@param[in] space tablespace +@param[in] fsp_flags tablespace flags @return whether the page is corrupted */ bool buf_page_is_corrupted( @@ -3008,6 +3007,10 @@ lookup: } } + if (local_err == DB_IO_ERROR) { + return NULL; + } + ib::fatal() << "Unable to read page " << page_id << " into the buffer pool after " << BUF_PAGE_READ_MAX_RETRIES diff --git a/storage/innobase/buf/buf0rea.cc b/storage/innobase/buf/buf0rea.cc index 253a2542760..42b3f674f81 100644 --- a/storage/innobase/buf/buf0rea.cc +++ b/storage/innobase/buf/buf0rea.cc @@ -345,7 +345,8 @@ nothing_read: *err= fio.err; if (UNIV_UNLIKELY(fio.err != DB_SUCCESS)) { - if (!sync || fio.err == DB_TABLESPACE_DELETED) { + if (!sync || fio.err == DB_TABLESPACE_DELETED + || fio.err == DB_IO_ERROR) { buf_pool.corrupted_evict(bpage); return false; } diff --git a/storage/innobase/fil/fil0fil.cc b/storage/innobase/fil/fil0fil.cc index a2591dd9172..1f3f73f3f8e 100644 --- a/storage/innobase/fil/fil0fil.cc +++ b/storage/innobase/fil/fil0fil.cc @@ -3216,17 +3216,17 @@ func_exit: /*============================ FILE I/O ================================*/ /** Report information about an invalid page access. */ -ATTRIBUTE_COLD __attribute__((noreturn)) -static void -fil_report_invalid_page_access(const char *name, - os_offset_t offset, ulint len, bool is_read) +ATTRIBUTE_COLD +static std::string fil_invalid_page_access_msg(const char *name, + os_offset_t offset, ulint len, + bool is_read) { - ib::fatal() << "Trying to " << (is_read ? "read " : "write ") << len - << " bytes at " << offset - << " outside the bounds of the file: " << name; + std::stringstream ss; + ss << "Trying to " << (is_read ? "read " : "write ") << len << " bytes at " + << offset << " outside the bounds of the file: " << name; + return ss.str(); } - /** Update the data structures on write completion */ inline void fil_node_t::complete_write() { @@ -3294,9 +3294,9 @@ fil_io_t fil_space_t::io(const IORequest &type, os_offset_t offset, size_t len, release(); return {DB_ERROR, nullptr}; } - fil_report_invalid_page_access(name, offset, - len, - type.is_read()); + ib::fatal() + << fil_invalid_page_access_msg(name, + offset, len, type.is_read()); } } @@ -3312,7 +3312,16 @@ fil_io_t fil_space_t::io(const IORequest &type, os_offset_t offset, size_t len, return {DB_ERROR, nullptr}; } - fil_report_invalid_page_access( + if (node->space->purpose == FIL_TYPE_IMPORT) { + release(); + ib::error() << fil_invalid_page_access_msg( + node->name, offset, len, type.is_read()); + + + return {DB_IO_ERROR, nullptr}; + } + + ib::fatal() << fil_invalid_page_access_msg( node->name, offset, len, type.is_read()); } diff --git a/storage/innobase/fil/fil0pagecompress.cc b/storage/innobase/fil/fil0pagecompress.cc index 909e8092f99..25b039aa9f1 100644 --- a/storage/innobase/fil/fil0pagecompress.cc +++ b/storage/innobase/fil/fil0pagecompress.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (C) 2013, 2020, MariaDB Corporation. +Copyright (C) 2013, 2021, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -597,6 +597,7 @@ ulint fil_page_decompress_for_non_full_crc32( /** Decompress a page that may be subject to page_compressed compression. @param[in,out] tmp_buf temporary buffer (of innodb_page_size) @param[in,out] buf possibly compressed page buffer +@param[in] flags tablespace flags @return size of the compressed data @retval 0 if decompression failed @retval srv_page_size if the page was not compressed */ diff --git a/storage/innobase/fts/fts0config.cc b/storage/innobase/fts/fts0config.cc index 9e2b40911ae..f95159dc5b7 100644 --- a/storage/innobase/fts/fts0config.cc +++ b/storage/innobase/fts/fts0config.cc @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 2007, 2016, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2017, 2020, MariaDB Corporation. +Copyright (c) 2017, 2021, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -97,7 +97,7 @@ fts_config_get_value( fts_table->suffix = "CONFIG"; fts_get_table_name(fts_table, table_name); - pars_info_bind_id(info, true, "table_name", table_name); + pars_info_bind_id(info, "table_name", table_name); graph = fts_parse_sql( fts_table, @@ -217,7 +217,7 @@ fts_config_set_value( fts_table->suffix = "CONFIG"; fts_get_table_name(fts_table, table_name, dict_locked); - pars_info_bind_id(info, true, "table_name", table_name); + pars_info_bind_id(info, "table_name", table_name); graph = fts_parse_sql( fts_table, info, @@ -245,7 +245,7 @@ fts_config_set_value( info, "value", value->f_str, value->f_len); fts_get_table_name(fts_table, table_name, dict_locked); - pars_info_bind_id(info, true, "table_name", table_name); + pars_info_bind_id(info, "table_name", table_name); graph = fts_parse_sql( fts_table, info, diff --git a/storage/innobase/fts/fts0fts.cc b/storage/innobase/fts/fts0fts.cc index 96ad0570052..ff99a2d7e2a 100644 --- a/storage/innobase/fts/fts0fts.cc +++ b/storage/innobase/fts/fts0fts.cc @@ -483,7 +483,7 @@ cleanup: pars_info_t* info = pars_info_create(); - pars_info_bind_id(info, TRUE, "table_stopword", stopword_table_name); + pars_info_bind_id(info, "table_stopword", stopword_table_name); pars_info_bind_function(info, "my_func", fts_read_stopword, stopword_info); @@ -1899,7 +1899,7 @@ fts_create_common_tables( fts_table.suffix = "CONFIG"; fts_get_table_name(&fts_table, fts_name, true); - pars_info_bind_id(info, true, "config_table", fts_name); + pars_info_bind_id(info, "config_table", fts_name); graph = fts_parse_sql_no_dict_lock( info, fts_config_table_insert_values_sql); @@ -2627,7 +2627,7 @@ retry: info, "my_func", fts_fetch_store_doc_id, doc_id); fts_get_table_name(&fts_table, table_name); - pars_info_bind_id(info, true, "config_table", table_name); + pars_info_bind_id(info, "config_table", table_name); graph = fts_parse_sql( &fts_table, info, @@ -2755,7 +2755,7 @@ fts_update_sync_doc_id( fts_get_table_name(&fts_table, fts_name, table->fts->dict_locked); - pars_info_bind_id(info, true, "table_name", fts_name); + pars_info_bind_id(info, "table_name", fts_name); graph = fts_parse_sql( &fts_table, info, @@ -2898,7 +2898,7 @@ fts_delete( fts_table.suffix = "DELETED"; fts_get_table_name(&fts_table, table_name); - pars_info_bind_id(info, true, "deleted", table_name); + pars_info_bind_id(info, "deleted", table_name); graph = fts_parse_sql( &fts_table, @@ -3704,7 +3704,7 @@ fts_doc_fetch_by_doc_id( pars_info_bind_function(info, "my_func", callback, arg); select_str = fts_get_select_columns_str(index, info, info->heap); - pars_info_bind_id(info, TRUE, "table_name", index->table->name.m_name); + pars_info_bind_id(info, "table_name", index->table->name.m_name); if (!get_doc || !get_doc->get_document_graph) { if (option == FTS_FETCH_DOC_BY_ID_EQUAL) { @@ -3811,7 +3811,7 @@ fts_write_node( info = pars_info_create(); fts_get_table_name(fts_table, table_name); - pars_info_bind_id(info, true, "index_table_name", table_name); + pars_info_bind_id(info, "index_table_name", table_name); } pars_info_bind_varchar_literal(info, "token", word->f_str, word->f_len); @@ -3886,7 +3886,7 @@ fts_sync_add_deleted_cache( &fts_table, "DELETED_CACHE", FTS_COMMON_TABLE, sync->table); fts_get_table_name(&fts_table, table_name); - pars_info_bind_id(info, true, "table_name", table_name); + pars_info_bind_id(info, "table_name", table_name); graph = fts_parse_sql( &fts_table, @@ -4883,7 +4883,7 @@ fts_get_rows_count( pars_info_bind_function(info, "my_func", fts_read_ulint, &count); fts_get_table_name(fts_table, table_name); - pars_info_bind_id(info, true, "table_name", table_name); + pars_info_bind_id(info, "table_name", table_name); graph = fts_parse_sql( fts_table, diff --git a/storage/innobase/fts/fts0opt.cc b/storage/innobase/fts/fts0opt.cc index df10cf63f36..52a7428f941 100644 --- a/storage/innobase/fts/fts0opt.cc +++ b/storage/innobase/fts/fts0opt.cc @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 2007, 2018, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2016, 2020, MariaDB Corporation. +Copyright (c) 2016, 2021, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -498,7 +498,7 @@ fts_index_fetch_nodes( fts_get_table_name(fts_table, table_name); - pars_info_bind_id(info, true, "table_name", table_name); + pars_info_bind_id(info, "table_name", table_name); } pars_info_bind_function(info, "my_func", fetch->read_record, fetch); @@ -827,7 +827,7 @@ fts_index_fetch_words( info, "word", word->f_str, word->f_len); fts_get_table_name(&optim->fts_index_table, table_name); - pars_info_bind_id(info, true, "table_name", table_name); + pars_info_bind_id(info, "table_name", table_name); graph = fts_parse_sql( &optim->fts_index_table, @@ -983,7 +983,7 @@ fts_table_fetch_doc_ids( pars_info_bind_function(info, "my_func", fts_fetch_doc_ids, doc_ids); fts_get_table_name(fts_table, table_name); - pars_info_bind_id(info, true, "table_name", table_name); + pars_info_bind_id(info, "table_name", table_name); graph = fts_parse_sql( fts_table, @@ -1447,7 +1447,7 @@ fts_optimize_write_word( fts_table->suffix = fts_get_suffix(selected); fts_get_table_name(fts_table, table_name); - pars_info_bind_id(info, true, "table_name", table_name); + pars_info_bind_id(info, "table_name", table_name); graph = fts_parse_sql( fts_table, @@ -2039,11 +2039,11 @@ fts_optimize_purge_deleted_doc_ids( used in the fts_delete_doc_ids_sql */ optim->fts_common_table.suffix = fts_common_tables[3]; fts_get_table_name(&optim->fts_common_table, deleted); - pars_info_bind_id(info, true, fts_common_tables[3], deleted); + pars_info_bind_id(info, fts_common_tables[3], deleted); optim->fts_common_table.suffix = fts_common_tables[4]; fts_get_table_name(&optim->fts_common_table, deleted_cache); - pars_info_bind_id(info, true, fts_common_tables[4], deleted_cache); + pars_info_bind_id(info, fts_common_tables[4], deleted_cache); graph = fts_parse_sql(NULL, info, fts_delete_doc_ids_sql); @@ -2096,12 +2096,11 @@ fts_optimize_purge_deleted_doc_id_snapshot( used in the fts_end_delete_sql */ optim->fts_common_table.suffix = fts_common_tables[0]; fts_get_table_name(&optim->fts_common_table, being_deleted); - pars_info_bind_id(info, true, fts_common_tables[0], being_deleted); + pars_info_bind_id(info, fts_common_tables[0], being_deleted); optim->fts_common_table.suffix = fts_common_tables[1]; fts_get_table_name(&optim->fts_common_table, being_deleted_cache); - pars_info_bind_id(info, true, fts_common_tables[1], - being_deleted_cache); + pars_info_bind_id(info, fts_common_tables[1], being_deleted_cache); /* Delete the doc ids that were copied to delete pending state at the start of optimize. */ @@ -2157,20 +2156,19 @@ fts_optimize_create_deleted_doc_id_snapshot( used in the fts_init_delete_sql */ optim->fts_common_table.suffix = fts_common_tables[0]; fts_get_table_name(&optim->fts_common_table, being_deleted); - pars_info_bind_id(info, true, fts_common_tables[0], being_deleted); + pars_info_bind_id(info, fts_common_tables[0], being_deleted); optim->fts_common_table.suffix = fts_common_tables[3]; fts_get_table_name(&optim->fts_common_table, deleted); - pars_info_bind_id(info, true, fts_common_tables[3], deleted); + pars_info_bind_id(info, fts_common_tables[3], deleted); optim->fts_common_table.suffix = fts_common_tables[1]; fts_get_table_name(&optim->fts_common_table, being_deleted_cache); - pars_info_bind_id(info, true, fts_common_tables[1], - being_deleted_cache); + pars_info_bind_id(info, fts_common_tables[1], being_deleted_cache); optim->fts_common_table.suffix = fts_common_tables[4]; fts_get_table_name(&optim->fts_common_table, deleted_cache); - pars_info_bind_id(info, true, fts_common_tables[4], deleted_cache); + pars_info_bind_id(info, fts_common_tables[4], deleted_cache); /* Move doc_ids that are to be deleted to state being deleted. */ graph = fts_parse_sql(NULL, info, fts_init_delete_sql); diff --git a/storage/innobase/fts/fts0que.cc b/storage/innobase/fts/fts0que.cc index 8e2cb838e5a..304a3315342 100644 --- a/storage/innobase/fts/fts0que.cc +++ b/storage/innobase/fts/fts0que.cc @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 2007, 2020, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2017, 2020, MariaDB Corporation. +Copyright (c) 2017, 2021, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -2147,7 +2147,7 @@ fts_query_find_term( query->fts_index_table.suffix = fts_get_suffix(selected); fts_get_table_name(&query->fts_index_table, table_name); - pars_info_bind_id(info, true, "index_table_name", table_name); + pars_info_bind_id(info, "index_table_name", table_name); } select.found = FALSE; @@ -2287,7 +2287,7 @@ fts_query_total_docs_containing_term( fts_get_table_name(&query->fts_index_table, table_name); - pars_info_bind_id(info, true, "index_table_name", table_name); + pars_info_bind_id(info, "index_table_name", table_name); graph = fts_parse_sql( &query->fts_index_table, @@ -2370,7 +2370,7 @@ fts_query_terms_in_document( fts_get_table_name(&query->fts_index_table, table_name); - pars_info_bind_id(info, true, "index_table_name", table_name); + pars_info_bind_id(info, "index_table_name", table_name); graph = fts_parse_sql( &query->fts_index_table, diff --git a/storage/innobase/fts/fts0sql.cc b/storage/innobase/fts/fts0sql.cc index 180500f64a5..a4234f7b376 100644 --- a/storage/innobase/fts/fts0sql.cc +++ b/storage/innobase/fts/fts0sql.cc @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 2007, 2016, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2019, 2020, MariaDB Corporation. +Copyright (c) 2019, 2021, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -248,7 +248,7 @@ fts_get_select_columns_str( sel_str = mem_heap_printf(heap, "sel%lu", (ulong) i); /* Set copy_name to TRUE since it's dynamic. */ - pars_info_bind_id(info, TRUE, sel_str, field->name); + pars_info_bind_id(info, sel_str, field->name); str = mem_heap_printf( heap, "%s%s$%s", str, (*str) ? ", " : "", sel_str); diff --git a/storage/innobase/handler/i_s.cc b/storage/innobase/handler/i_s.cc index 420e7eac9e1..7683f69b492 100644 --- a/storage/innobase/handler/i_s.cc +++ b/storage/innobase/handler/i_s.cc @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 2007, 2016, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2014, 2020, MariaDB Corporation. +Copyright (c) 2014, 2021, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -2946,7 +2946,7 @@ i_s_fts_index_table_fill_selected( FTS_INIT_INDEX_TABLE(&fts_table, fts_get_suffix(selected), FTS_INDEX_TABLE, index); fts_get_table_name(&fts_table, table_name); - pars_info_bind_id(info, true, "table_name", table_name); + pars_info_bind_id(info, "table_name", table_name); graph = fts_parse_sql( &fts_table, info, diff --git a/storage/innobase/include/btr0btr.h b/storage/innobase/include/btr0btr.h index 7fae1ad163b..8b2859bd770 100644 --- a/storage/innobase/include/btr0btr.h +++ b/storage/innobase/include/btr0btr.h @@ -2,7 +2,7 @@ Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2012, Facebook Inc. -Copyright (c) 2014, 2020, MariaDB Corporation. +Copyright (c) 2014, 2021, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -728,8 +728,9 @@ btr_validate_index( @param[in] block page to remove @param[in] index index tree @param[in,out] mtr mini-transaction */ -void btr_level_list_remove(const buf_block_t& block, const dict_index_t& index, - mtr_t* mtr); +dberr_t btr_level_list_remove(const buf_block_t& block, + const dict_index_t& index, mtr_t* mtr) + MY_ATTRIBUTE((warn_unused_result)); /*************************************************************//** If page is the only on its level, this function moves its records to the diff --git a/storage/innobase/include/fil0pagecompress.h b/storage/innobase/include/fil0pagecompress.h index c6ba24faaad..a22867ad56a 100644 --- a/storage/innobase/include/fil0pagecompress.h +++ b/storage/innobase/include/fil0pagecompress.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (C) 2013, 2019 MariaDB Corporation. +Copyright (C) 2013, 2021, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -48,7 +48,7 @@ ulint fil_page_compress( /** Decompress a page that may be subject to page_compressed compression. @param[in,out] tmp_buf temporary buffer (of innodb_page_size) @param[in,out] buf compressed page buffer -@param[in] flags talespace flags +@param[in] flags tablespace flags @return size of the compressed data @retval 0 if decompression failed @retval srv_page_size if the page was not compressed */ diff --git a/storage/innobase/include/pars0pars.h b/storage/innobase/include/pars0pars.h index 03aa72d3be8..4c588dca061 100644 --- a/storage/innobase/include/pars0pars.h +++ b/storage/innobase/include/pars0pars.h @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2017, 2020, MariaDB Corporation. +Copyright (c) 2017, 2021, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -491,7 +491,6 @@ void pars_info_bind_id( /*=============*/ pars_info_t* info, /*!< in: info struct */ - ibool copy_name,/* in: make a copy of name if TRUE */ const char* name, /*!< in: name */ const char* id); /*!< in: id */ /****************************************************************//** @@ -538,15 +537,6 @@ pars_info_bind_ull_literal( MY_ATTRIBUTE((nonnull)); /****************************************************************//** -Add bound id. */ -void -pars_info_add_id( -/*=============*/ - pars_info_t* info, /*!< in: info struct */ - const char* name, /*!< in: name */ - const char* id); /*!< in: id */ - -/****************************************************************//** Get bound literal with the given name. @return bound literal, or NULL if not found */ pars_bound_lit_t* diff --git a/storage/innobase/lock/lock0lock.cc b/storage/innobase/lock/lock0lock.cc index 8dc2d7c585a..e46a0a91ac5 100644 --- a/storage/innobase/lock/lock0lock.cc +++ b/storage/innobase/lock/lock0lock.cc @@ -5643,7 +5643,8 @@ lock_sec_rec_read_check_and_lock( if (!page_rec_is_supremum(rec) && page_get_max_trx_id(block->frame) >= trx_sys.get_min_trx_id() && lock_rec_convert_impl_to_expl(thr_get_trx(thr), block, rec, - index, offsets)) { + index, offsets) + && gap_mode == LOCK_REC_NOT_GAP) { /* We already hold an implicit exclusive lock. */ return DB_SUCCESS; } @@ -5725,7 +5726,8 @@ lock_clust_rec_read_check_and_lock( if (heap_no != PAGE_HEAP_NO_SUPREMUM && lock_rec_convert_impl_to_expl(thr_get_trx(thr), block, rec, - index, offsets)) { + index, offsets) + && gap_mode == LOCK_REC_NOT_GAP) { /* We already hold an implicit exclusive lock. */ return DB_SUCCESS; } diff --git a/storage/innobase/pars/pars0pars.cc b/storage/innobase/pars/pars0pars.cc index b955b94b4df..2981e31c05f 100644 --- a/storage/innobase/pars/pars0pars.cc +++ b/storage/innobase/pars/pars0pars.cc @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2018, 2020, MariaDB Corporation. +Copyright (c) 2018, 2021, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -2357,7 +2357,6 @@ void pars_info_bind_id( /*==============*/ pars_info_t* info, /*!< in: info struct */ - ibool copy_name, /* in: copy name if TRUE */ const char* name, /*!< in: name */ const char* id) /*!< in: id */ { @@ -2380,8 +2379,7 @@ pars_info_bind_id( bid = static_cast<pars_bound_id_t*>( ib_vector_push(info->bound_ids, NULL)); - bid->name = (copy_name) - ? mem_heap_strdup(info->heap, name) : name; + bid->name = name; } bid->id = id; diff --git a/storage/innobase/row/row0import.cc b/storage/innobase/row/row0import.cc index 219e0e9381c..b57c2df549c 100644 --- a/storage/innobase/row/row0import.cc +++ b/storage/innobase/row/row0import.cc @@ -225,6 +225,19 @@ struct row_import { found and was readable */ }; +struct fil_iterator_t { + pfs_os_file_t file; /*!< File handle */ + const char* filepath; /*!< File path name */ + os_offset_t start; /*!< From where to start */ + os_offset_t end; /*!< Where to stop */ + os_offset_t file_size; /*!< File size in bytes */ + ulint n_io_buffers; /*!< Number of pages to use + for IO */ + byte* io_buffer; /*!< Buffer to use for IO */ + fil_space_crypt_t *crypt_data; /*!< Crypt data (if encrypted) */ + byte* crypt_io_buffer; /*!< IO buffer when encrypted */ +}; + /** Use the page cursor to iterate over records in a block. */ class RecIterator { public: @@ -468,6 +481,10 @@ public: ? block->page.zip.data : block->frame; } + /** Invoke the functionality for the callback */ + virtual dberr_t run(const fil_iterator_t& iter, + buf_block_t* block) UNIV_NOTHROW = 0; + protected: /** Get the physical offset of the extent descriptor within the page. @param page_no page number of the extent descriptor @@ -628,6 +645,24 @@ AbstractCallback::init( } /** +TODO: This can be made parallel trivially by chunking up the file +and creating a callback per thread.. Main benefit will be to use +multiple CPUs for checksums and compressed tables. We have to do +compressed tables block by block right now. Secondly we need to +decompress/compress and copy too much of data. These are +CPU intensive. + +Iterate over all the pages in the tablespace. +@param iter - Tablespace iterator +@param block - block to use for IO +@param callback - Callback to inspect and update page contents +@retval DB_SUCCESS or error code */ +static dberr_t fil_iterate( + const fil_iterator_t& iter, + buf_block_t* block, + AbstractCallback& callback); + +/** Try and determine the index root pages by checking if the next/prev pointers are both FIL_NULL. We need to ensure that skip deleted pages. */ struct FetchIndexRootPages : public AbstractCallback { @@ -644,19 +679,24 @@ struct FetchIndexRootPages : public AbstractCallback { ulint m_page_no; /*!< Root page number */ }; - typedef std::vector<Index, ut_allocator<Index> > Indexes; - /** Constructor @param trx covering (user) transaction @param table table definition in server .*/ FetchIndexRootPages(const dict_table_t* table, trx_t* trx) : AbstractCallback(trx, ULINT_UNDEFINED), - m_table(table) UNIV_NOTHROW { } + m_table(table), m_index(0, 0) UNIV_NOTHROW { } /** Destructor */ ~FetchIndexRootPages() UNIV_NOTHROW override { } + /** Fetch the clustered index root page in the tablespace + @param iter Tablespace iterator + @param block Block to use for IO + @retval DB_SUCCESS or error code */ + dberr_t run(const fil_iterator_t& iter, + buf_block_t* block) UNIV_NOTHROW; + /** Called for each block as it is read from the file. @param block block to convert, it is not from the buffer pool. @retval DB_SUCCESS or error code. */ @@ -670,7 +710,7 @@ struct FetchIndexRootPages : public AbstractCallback { const dict_table_t* m_table; /** Index information */ - Indexes m_indexes; + Index m_index; }; /** Called for each block as it is read from the file. Check index pages to @@ -685,39 +725,27 @@ dberr_t FetchIndexRootPages::operator()(buf_block_t* block) UNIV_NOTHROW const page_t* page = get_frame(block); - ulint page_type = fil_page_get_type(page); - - if (page_type == FIL_PAGE_TYPE_XDES) { - return set_current_xdes(block->page.id().page_no(), page); - } else if (fil_page_index_page_check(page) - && !is_free(block->page.id().page_no()) - && !page_has_siblings(page)) { - - index_id_t id = btr_page_get_index_id(page); + m_index.m_id = btr_page_get_index_id(page); + m_index.m_page_no = block->page.id().page_no(); - m_indexes.push_back(Index(id, block->page.id().page_no())); - - if (m_indexes.size() == 1) { - /* Check that the tablespace flags match the table flags. */ - ulint expected = dict_tf_to_fsp_flags(m_table->flags); - if (!fsp_flags_match(expected, m_space_flags)) { - ib_errf(m_trx->mysql_thd, IB_LOG_LEVEL_ERROR, - ER_TABLE_SCHEMA_MISMATCH, - "Expected FSP_SPACE_FLAGS=0x%x, .ibd " - "file contains 0x%x.", - unsigned(expected), - unsigned(m_space_flags)); - return(DB_CORRUPTION); - } - } + /* Check that the tablespace flags match the table flags. */ + ulint expected = dict_tf_to_fsp_flags(m_table->flags); + if (!fsp_flags_match(expected, m_space_flags)) { + ib_errf(m_trx->mysql_thd, IB_LOG_LEVEL_ERROR, + ER_TABLE_SCHEMA_MISMATCH, + "Expected FSP_SPACE_FLAGS=0x%x, .ibd " + "file contains 0x%x.", + unsigned(expected), + unsigned(m_space_flags)); + return(DB_CORRUPTION); + } - if (!page_is_comp(block->frame) != - !dict_table_is_comp(m_table)) { - ib_errf(m_trx->mysql_thd, IB_LOG_LEVEL_ERROR, - ER_TABLE_SCHEMA_MISMATCH, - "ROW_FORMAT mismatch"); - return DB_CORRUPTION; - } + if (!page_is_comp(block->frame) != + !dict_table_is_comp(m_table)) { + ib_errf(m_trx->mysql_thd, IB_LOG_LEVEL_ERROR, + ER_TABLE_SCHEMA_MISMATCH, + "ROW_FORMAT mismatch"); + return DB_CORRUPTION; } return DB_SUCCESS; @@ -729,11 +757,9 @@ Update the import configuration that will be used to import the tablespace. dberr_t FetchIndexRootPages::build_row_import(row_import* cfg) const UNIV_NOTHROW { - Indexes::const_iterator end = m_indexes.end(); - ut_a(cfg->m_table == m_table); cfg->m_zip_size = m_zip_size; - cfg->m_n_indexes = m_indexes.size(); + cfg->m_n_indexes = 1; if (cfg->m_n_indexes == 0) { @@ -759,37 +785,32 @@ FetchIndexRootPages::build_row_import(row_import* cfg) const UNIV_NOTHROW row_index_t* cfg_index = cfg->m_indexes; - for (Indexes::const_iterator it = m_indexes.begin(); - it != end; - ++it, ++cfg_index) { - - char name[BUFSIZ]; + char name[BUFSIZ]; - snprintf(name, sizeof(name), "index" IB_ID_FMT, it->m_id); + snprintf(name, sizeof(name), "index" IB_ID_FMT, m_index.m_id); - ulint len = strlen(name) + 1; + ulint len = strlen(name) + 1; - cfg_index->m_name = UT_NEW_ARRAY_NOKEY(byte, len); + cfg_index->m_name = UT_NEW_ARRAY_NOKEY(byte, len); - /* Trigger OOM */ - DBUG_EXECUTE_IF( - "ib_import_OOM_12", - UT_DELETE_ARRAY(cfg_index->m_name); - cfg_index->m_name = NULL; - ); + /* Trigger OOM */ + DBUG_EXECUTE_IF( + "ib_import_OOM_12", + UT_DELETE_ARRAY(cfg_index->m_name); + cfg_index->m_name = NULL; + ); - if (cfg_index->m_name == NULL) { - return(DB_OUT_OF_MEMORY); - } + if (cfg_index->m_name == NULL) { + return(DB_OUT_OF_MEMORY); + } - memcpy(cfg_index->m_name, name, len); + memcpy(cfg_index->m_name, name, len); - cfg_index->m_id = it->m_id; + cfg_index->m_id = m_index.m_id; - cfg_index->m_space = m_space; + cfg_index->m_space = m_space; - cfg_index->m_page_no = it->m_page_no; - } + cfg_index->m_page_no = m_index.m_page_no; return(DB_SUCCESS); } @@ -845,6 +866,11 @@ public: } } + dberr_t run(const fil_iterator_t& iter, buf_block_t* block) UNIV_NOTHROW + { + return fil_iterate(iter, block, *this); + } + /** Called for each block as it is read from the file. @param block block to convert, it is not from the buffer pool. @retval DB_SUCCESS or error code. */ @@ -1902,8 +1928,10 @@ PageConverter::update_index_page( row_index_t* index = find_index(id); if (UNIV_UNLIKELY(!index)) { - ib::warn() << "Unknown index id " << id - << " on page " << page_id.page_no(); + if (!m_cfg->m_missing) { + ib::warn() << "Unknown index id " << id + << " on page " << page_id.page_no(); + } return DB_SUCCESS; } @@ -3337,20 +3365,6 @@ dberr_t row_import_update_discarded_flag(trx_t* trx, table_id_t table_id, return(err); } -struct fil_iterator_t { - pfs_os_file_t file; /*!< File handle */ - const char* filepath; /*!< File path name */ - os_offset_t start; /*!< From where to start */ - os_offset_t end; /*!< Where to stop */ - os_offset_t file_size; /*!< File size in bytes */ - ulint n_io_buffers; /*!< Number of pages to use - for IO */ - byte* io_buffer; /*!< Buffer to use for IO */ - fil_space_crypt_t *crypt_data; /*!< Crypt data (if encrypted) */ - byte* crypt_io_buffer; /*!< IO buffer when encrypted */ -}; - - /** InnoDB writes page by page when there is page compressed tablespace involved. It does help to save the disk space when punch hole is enabled @@ -3408,22 +3422,89 @@ dberr_t fil_import_compress_fwrite(const fil_iterator_t &iter, return DB_SUCCESS; } -/********************************************************************//** -TODO: This can be made parallel trivially by chunking up the file and creating -a callback per thread. . Main benefit will be to use multiple CPUs for -checksums and compressed tables. We have to do compressed tables block by -block right now. Secondly we need to decompress/compress and copy too much -of data. These are CPU intensive. +dberr_t FetchIndexRootPages::run(const fil_iterator_t& iter, + buf_block_t* block) UNIV_NOTHROW +{ + const unsigned zip_size= fil_space_t::zip_size(m_space_flags); + const unsigned size= zip_size ? zip_size : unsigned(srv_page_size); + const ulint buf_size= +#ifdef HAVE_LZO + LZO1X_1_15_MEM_COMPRESS+ +#elif defined HAVE_SNAPPY + snappy_max_compressed_length(srv_page_size) + +#endif + srv_page_size; + byte* page_compress_buf = static_cast<byte*>(malloc(buf_size)); + ut_ad(!srv_read_only_mode); -Iterate over all the pages in the tablespace. -@param iter - Tablespace iterator -@param block - block to use for IO -@param callback - Callback to inspect and update page contents -@retval DB_SUCCESS or error code */ -static -dberr_t -fil_iterate( -/*========*/ + if (!page_compress_buf) + return DB_OUT_OF_MEMORY; + + const bool encrypted= iter.crypt_data != NULL && + iter.crypt_data->should_encrypt(); + byte* const readptr= iter.io_buffer; + block->frame= readptr; + + if (block->page.zip.data) + block->page.zip.data= readptr; + + bool page_compressed= false; + + dberr_t err= os_file_read_no_error_handling( + IORequestReadPartial, iter.file, readptr, 3 * size, size, 0); + if (err != DB_SUCCESS) + { + ib::error() << iter.filepath << ": os_file_read() failed"; + goto func_exit; + } + + if (page_get_page_no(readptr) != 3) + { +page_corrupted: + ib::warn() << filename() << ": Page 3 at offset " + << 3 * size << " looks corrupted."; + err= DB_CORRUPTION; + goto func_exit; + } + + block->page.id_.set_page_no(3); + switch (fil_page_get_type(readptr)) { + case FIL_PAGE_PAGE_COMPRESSED: + case FIL_PAGE_PAGE_COMPRESSED_ENCRYPTED: + if (block->page.zip.data) + goto page_corrupted; + page_compressed= true; + } + + if (encrypted) + { + if (!fil_space_verify_crypt_checksum(readptr, zip_size)) + goto page_corrupted; + + if (!fil_space_decrypt(get_space_id(), iter.crypt_data, readptr, + size, m_space_flags, readptr, &err) || + err != DB_SUCCESS) + goto func_exit; + } + + if (page_compressed) + { + ulint compress_length= fil_page_decompress(page_compress_buf, readptr, + m_space_flags); + ut_ad(compress_length != srv_page_size); + if (compress_length == 0) + goto page_corrupted; + } + else if (buf_page_is_corrupted(false, readptr, m_space_flags)) + goto page_corrupted; + + err= this->operator()(block); +func_exit: + free(page_compress_buf); + return err; +} + +static dberr_t fil_iterate( const fil_iterator_t& iter, buf_block_t* block, AbstractCallback& callback) @@ -3886,7 +3967,7 @@ fil_tablespace_iterate( block->page.zip.data = block->frame + srv_page_size; } - err = fil_iterate(iter, block, callback); + err = callback.run(iter, block); if (iter.crypt_data) { fil_space_destroy_crypt_data(&iter.crypt_data); @@ -4030,6 +4111,16 @@ row_import_for_mysql( cfg.m_zip_size = 0; + if (UT_LIST_GET_LEN(table->indexes) > 1) { + ib_errf(trx->mysql_thd, IB_LOG_LEVEL_ERROR, + ER_INTERNAL_ERROR, + "Drop all secondary indexes before importing " + "table %s when .cfg file is missing.", + table->name.m_name); + err = DB_ERROR; + return row_import_error(prebuilt, trx, err); + } + FetchIndexRootPages fetchIndexRootPages(table, trx); err = fil_tablespace_iterate( diff --git a/storage/innobase/row/row0ins.cc b/storage/innobase/row/row0ins.cc index 6c2edd1933f..60f4e4d19e1 100644 --- a/storage/innobase/row/row0ins.cc +++ b/storage/innobase/row/row0ins.cc @@ -1665,23 +1665,6 @@ row_ins_check_foreign_constraint( cmp = cmp_dtuple_rec(entry, rec, offsets); if (cmp == 0) { - if (check_table->versioned()) { - bool history_row = false; - - if (check_index->is_primary()) { - history_row = check_index-> - vers_history_row(rec, offsets); - } else if (check_index-> - vers_history_row(rec, history_row)) - { - break; - } - - if (history_row) { - continue; - } - } - if (rec_get_deleted_flag(rec, rec_offs_comp(offsets))) { /* In delete-marked records, DB_TRX_ID must @@ -1703,6 +1686,23 @@ row_ins_check_foreign_constraint( goto end_scan; } } else { + if (check_table->versioned()) { + bool history_row = false; + + if (check_index->is_primary()) { + history_row = check_index-> + vers_history_row(rec, + offsets); + } else if (check_index-> + vers_history_row(rec, + history_row)) { + break; + } + + if (history_row) { + continue; + } + } /* Found a matching record. Lock only a record because we can allow inserts into gaps */ diff --git a/storage/innobase/row/row0mysql.cc b/storage/innobase/row/row0mysql.cc index 6998a5730f9..ea71c59c344 100644 --- a/storage/innobase/row/row0mysql.cc +++ b/storage/innobase/row/row0mysql.cc @@ -99,25 +99,6 @@ static ib_mutex_t row_drop_list_mutex; /** Flag: has row_mysql_drop_list been initialized? */ static bool row_mysql_drop_list_inited; -/*******************************************************************//** -Determine if the given name is a name reserved for MySQL system tables. -@return TRUE if name is a MySQL system table name */ -static -ibool -row_mysql_is_system_table( -/*======================*/ - const char* name) -{ - if (strncmp(name, "mysql/", 6) != 0) { - - return(FALSE); - } - - return(0 == strcmp(name + 6, "host") - || 0 == strcmp(name + 6, "user") - || 0 == strcmp(name + 6, "db")); -} - #ifdef UNIV_DEBUG /** Wait for the background drop list to become empty. */ void @@ -2364,25 +2345,12 @@ row_create_table_for_mysql( DBUG_EXECUTE_IF( "ib_create_table_fail_at_start_of_row_create_table_for_mysql", - goto err_exit; - ); - - trx->op_info = "creating table"; - - if (row_mysql_is_system_table(table->name.m_name)) { - - ib::error() << "Trying to create a MySQL system table " - << table->name << " of type InnoDB. MySQL system" - " tables must be of the MyISAM type!"; -#ifndef DBUG_OFF -err_exit: -#endif /* !DBUG_OFF */ dict_mem_table_free(table); - trx->op_info = ""; + return DB_ERROR; + ); - return(DB_ERROR); - } + trx->op_info = "creating table"; trx_start_if_not_started_xa(trx, true); @@ -4190,14 +4158,6 @@ row_rename_table_for_mysql( if (high_level_read_only) { return(DB_READ_ONLY); - - } else if (row_mysql_is_system_table(new_name)) { - - ib::error() << "Trying to create a MySQL system table " - << new_name << " of type InnoDB. MySQL system tables" - " must be of the MyISAM type!"; - - goto funct_exit; } trx->op_info = "renaming table"; diff --git a/storage/perfschema/CMakeLists.txt b/storage/perfschema/CMakeLists.txt index d5625afbb1a..894c4585013 100644 --- a/storage/perfschema/CMakeLists.txt +++ b/storage/perfschema/CMakeLists.txt @@ -341,9 +341,8 @@ FAIL_REGEX "warning: incompatible pointer to integer conversion" CONFIGURE_FILE(pfs_config.h.cmake ${CMAKE_CURRENT_BINARY_DIR}/pfs_config.h) MYSQL_ADD_PLUGIN(perfschema ${PERFSCHEMA_SOURCES} STORAGE_ENGINE DEFAULT - STATIC_ONLY RECOMPILE_FOR_EMBEDDED) + STATIC_ONLY RECOMPILE_FOR_EMBEDDED DEPENDS GenServerSource) IF (TARGET perfschema) - ADD_DEPENDENCIES(perfschema GenServerSource) IF(WITH_UNIT_TESTS) ADD_SUBDIRECTORY(unittest) ENDIF(WITH_UNIT_TESTS) diff --git a/storage/spider/mysql-test/spider/bugfix/include/mdev_24523_deinit.inc b/storage/spider/mysql-test/spider/bugfix/include/mdev_24523_deinit.inc new file mode 100644 index 00000000000..e8d30523978 --- /dev/null +++ b/storage/spider/mysql-test/spider/bugfix/include/mdev_24523_deinit.inc @@ -0,0 +1,9 @@ +--let $MASTER_1_COMMENT_P_2_1= $MASTER_1_COMMENT_P_2_1_BACKUP +--let $CHILD2_1_CREATE_TABLES= $CHILD2_1_CREATE_TABLES_BACKUP +--disable_warnings +--disable_query_log +--disable_result_log +--source ../t/test_deinit.inc +--enable_result_log +--enable_query_log +--enable_warnings diff --git a/storage/spider/mysql-test/spider/bugfix/include/mdev_24523_init.inc b/storage/spider/mysql-test/spider/bugfix/include/mdev_24523_init.inc new file mode 100644 index 00000000000..989faa54c16 --- /dev/null +++ b/storage/spider/mysql-test/spider/bugfix/include/mdev_24523_init.inc @@ -0,0 +1,31 @@ +--disable_warnings +--disable_query_log +--disable_result_log +--source ../t/test_init.inc +--enable_result_log +--enable_query_log +--enable_warnings +--let $MASTER_1_COMMENT_P_2_1_BACKUP= $MASTER_1_COMMENT_P_2_1 +let $MASTER_1_COMMENT_P_2_1= + PARTITION BY RANGE(i) ( + PARTITION pt1 VALUES LESS THAN (5) COMMENT='srv "s_2_1", table "ta_r2"', + PARTITION pt2 VALUES LESS THAN (10) COMMENT='srv "s_2_1", table "ta_r3"', + PARTITION pt3 VALUES LESS THAN MAXVALUE COMMENT='srv "s_2_1", table "ta_r4"' + ); +--let $CHILD2_1_CREATE_TABLES_BACKUP= $CHILD2_1_CREATE_TABLES +let $CHILD2_1_CREATE_TABLES= + CREATE TABLE ta_r2 ( + i INT, + j JSON, + PRIMARY KEY(i) + ) $CHILD2_1_ENGINE $CHILD2_1_CHARSET $STR_SEMICOLON + CREATE TABLE ta_r3 ( + i INT, + j JSON, + PRIMARY KEY(i) + ) $CHILD2_1_ENGINE $CHILD2_1_CHARSET $STR_SEMICOLON + CREATE TABLE ta_r4 ( + i INT, + j JSON, + PRIMARY KEY(i) + ) $CHILD2_1_ENGINE $CHILD2_1_CHARSET; diff --git a/storage/spider/mysql-test/spider/bugfix/r/mdev_24523.result b/storage/spider/mysql-test/spider/bugfix/r/mdev_24523.result new file mode 100644 index 00000000000..0b3d6c3142b --- /dev/null +++ b/storage/spider/mysql-test/spider/bugfix/r/mdev_24523.result @@ -0,0 +1,58 @@ +for master_1 +for child2 +child2_1 +child2_2 +child2_3 +for child3 + +this test is for MDEV-24523 + +drop and create databases +connection master_1; +CREATE DATABASE auto_test_local; +USE auto_test_local; +connection child2_1; +CREATE DATABASE auto_test_remote; +USE auto_test_remote; + +create table and insert +connection child2_1; +CHILD2_1_CREATE_TABLES +connection master_1; +CREATE TABLE tbl_a ( +i INT, +j JSON, +PRIMARY KEY(i) +) ENGINE=Spider PARTITION BY RANGE(i) ( +PARTITION pt1 VALUES LESS THAN (5) COMMENT='srv "s_2_1", table "ta_r2"', +PARTITION pt2 VALUES LESS THAN (10) COMMENT='srv "s_2_1", table "ta_r3"', +PARTITION pt3 VALUES LESS THAN MAXVALUE COMMENT='srv "s_2_1", table "ta_r4"' + ) +INSERT INTO tbl_a VALUES (1, '{ "a": 1, "b": [2, 3]}'); + +test 1 +connection master_1; +UPDATE tbl_a SET j = JSON_REPLACE(j, '$.a', 10, '$.c', '[1, 2]'); +SELECT * FROM tbl_a; +i j +1 {"a": 10, "b": [2, 3]} +TRUNCATE TABLE tbl_a; +INSERT INTO tbl_a VALUES (1, '{ "a": 1, "b": [2, 3]}'); +UPDATE tbl_a SET j = JSON_REPLACE(j, '$.a', 10, '$.b', '[1, 2]'); +SELECT * FROM tbl_a; +i j +1 {"a": 10, "b": "[1, 2]"} + +deinit +connection master_1; +DROP DATABASE IF EXISTS auto_test_local; +connection child2_1; +DROP DATABASE IF EXISTS auto_test_remote; +for master_1 +for child2 +child2_1 +child2_2 +child2_3 +for child3 + +end of test diff --git a/storage/spider/mysql-test/spider/bugfix/t/mdev_24523.cnf b/storage/spider/mysql-test/spider/bugfix/t/mdev_24523.cnf new file mode 100644 index 00000000000..05dfd8a0bce --- /dev/null +++ b/storage/spider/mysql-test/spider/bugfix/t/mdev_24523.cnf @@ -0,0 +1,3 @@ +!include include/default_mysqld.cnf +!include ../my_1_1.cnf +!include ../my_2_1.cnf diff --git a/storage/spider/mysql-test/spider/bugfix/t/mdev_24523.test b/storage/spider/mysql-test/spider/bugfix/t/mdev_24523.test new file mode 100644 index 00000000000..00c0c873f20 --- /dev/null +++ b/storage/spider/mysql-test/spider/bugfix/t/mdev_24523.test @@ -0,0 +1,66 @@ +--source ../include/mdev_24523_init.inc +--echo +--echo this test is for MDEV-24523 +--echo +--echo drop and create databases + +--connection master_1 +--disable_warnings +CREATE DATABASE auto_test_local; +USE auto_test_local; + +--connection child2_1 +CREATE DATABASE auto_test_remote; +USE auto_test_remote; +--enable_warnings + +--echo +--echo create table and insert + +--connection child2_1 +--disable_query_log +--disable_ps_protocol +echo CHILD2_1_CREATE_TABLES; +eval $CHILD2_1_CREATE_TABLES; +--enable_ps_protocol +--enable_query_log + +--connection master_1 +--disable_query_log +echo CREATE TABLE tbl_a ( + i INT, + j JSON, + PRIMARY KEY(i) +) $MASTER_1_ENGINE $MASTER_1_COMMENT_P_2_1; +eval CREATE TABLE tbl_a ( + i INT, + j JSON, + PRIMARY KEY(i) +) $MASTER_1_ENGINE $MASTER_1_COMMENT_P_2_1; +--enable_query_log +INSERT INTO tbl_a VALUES (1, '{ "a": 1, "b": [2, 3]}'); + +--echo +--echo test 1 + +--connection master_1 +UPDATE tbl_a SET j = JSON_REPLACE(j, '$.a', 10, '$.c', '[1, 2]'); +SELECT * FROM tbl_a; +TRUNCATE TABLE tbl_a; +INSERT INTO tbl_a VALUES (1, '{ "a": 1, "b": [2, 3]}'); +UPDATE tbl_a SET j = JSON_REPLACE(j, '$.a', 10, '$.b', '[1, 2]'); +SELECT * FROM tbl_a; +--echo +--echo deinit +--disable_warnings + +--connection master_1 +DROP DATABASE IF EXISTS auto_test_local; + +--connection child2_1 +DROP DATABASE IF EXISTS auto_test_remote; + +--enable_warnings +--source ../include/mdev_24523_deinit.inc +--echo +--echo end of test diff --git a/tests/mysql_client_test.c b/tests/mysql_client_test.c index 9614d6a8880..87e1304387b 100644 --- a/tests/mysql_client_test.c +++ b/tests/mysql_client_test.c @@ -5256,10 +5256,10 @@ static void test_manual_sample() { unsigned int param_count; MYSQL_STMT *stmt; - short small_data; - int int_data; + short small_data= 1; + int int_data= 2; int rc; - char str_data[50]; + char str_data[50]= "std_data"; ulonglong affected_rows; MYSQL_BIND my_bind[3]; my_bool is_null; @@ -18209,9 +18209,9 @@ static void test_bug40365(void) if (!opt_silent) fprintf(stdout, "\ntime[%d]: %02d-%02d-%02d ", i, tm[i].year, tm[i].month, tm[i].day); - DIE_UNLESS(tm[i].year == 0); - DIE_UNLESS(tm[i].month == 0); - DIE_UNLESS(tm[i].day == 0); + DIE_UNLESS(tm[i].year == 0); + DIE_UNLESS(tm[i].month == 0); + DIE_UNLESS(tm[i].day == 0); } mysql_stmt_close(stmt); rc= mysql_commit(mysql); |