From 8e3e87d2fc1e63d287f203d441dcb9360775c6b7 Mon Sep 17 00:00:00 2001 From: sjaakola Date: Tue, 8 Dec 2020 18:02:42 +0200 Subject: MDEV-23851 MDEV-24229 BF-BF conflict issues Issues MDEV-23851 and MDEV-24229 are probably duplicates and are caused by the new self-asserting function lock0lock.cc:wsrep_assert_no_bf_bf_wait(). The criteria for asserting is too strict and does not take in consideration scenarios of "false positive" lock conflicts, which are resolved by replaying the local transaction. As a fix, this PR is relaxing the assert criteria by two conditions, which skip assert if high priority transactions are locking in correct order or if conflicting high priority lock holder is aborting and has just not yet released the lock. Alternative fix would be to remove wsrep_assert_no_bf_bf_wait() altogether, or remove the assert in this function and let it only print warnings in error log. But in my high conflict rate multi-master test scenario, this relaxed asserting appears to be safe. This PR also removes two wsrep_report_bf_lock_wait() calls in innodb lock manager, which cause mutex access assert in debug builds. Foreign key appending missed handling of data types of float and double in INSERT execution. This is not directly related to the actual issue here but is fixed in this PR nevertheless. Missing these foreign keys values in certification could cause problems in some multi-master load scenarios. Finally, some problem reports suggest that some of the issues reported in MDEV-23851 might relate to false positive lock conflicts over unique secondary index gaps. There is separate work for relaxing UK index gap locking of replication appliers, and separate PR will be submitted for it, with a related mtr test as well. --- storage/innobase/lock/lock0lock.cc | 19 ++++++++++++++----- storage/innobase/rem/rem0rec.cc | 17 ++++++++++++++++- 2 files changed, 30 insertions(+), 6 deletions(-) diff --git a/storage/innobase/lock/lock0lock.cc b/storage/innobase/lock/lock0lock.cc index 913c80027bf..ec37f9029f7 100644 --- a/storage/innobase/lock/lock0lock.cc +++ b/storage/innobase/lock/lock0lock.cc @@ -666,6 +666,20 @@ static void wsrep_assert_no_bf_bf_wait( if (UNIV_LIKELY(!wsrep_thd_is_BF(lock_rec2->trx->mysql_thd, FALSE))) return; + /* if BF - BF order is honored, we can keep trx1 waiting for the lock */ + if (wsrep_trx_order_before(trx1->mysql_thd, lock_rec2->trx->mysql_thd)) + return; + + /* avoiding BF-BF conflict assert, if victim is already aborting + or rolling back for replaying + */ + wsrep_thd_LOCK(lock_rec2->trx->mysql_thd); + if (wsrep_trx_is_aborting(lock_rec2->trx->mysql_thd)) { + wsrep_thd_UNLOCK(lock_rec2->trx->mysql_thd); + return; + } + wsrep_thd_UNLOCK(lock_rec2->trx->mysql_thd); + mtr_t mtr; if (lock_rec1) { @@ -1522,11 +1536,6 @@ lock_rec_create_low( trx_mutex_exit(c_lock->trx); - if (UNIV_UNLIKELY(wsrep_debug)) { - wsrep_report_bf_lock_wait(trx->mysql_thd, trx->id); - wsrep_report_bf_lock_wait(c_lock->trx->mysql_thd, c_lock->trx->id); - } - /* have to bail out here to avoid lock_set_lock... */ return(lock); } diff --git a/storage/innobase/rem/rem0rec.cc b/storage/innobase/rem/rem0rec.cc index 962de60f555..fa6a6c738e6 100644 --- a/storage/innobase/rem/rem0rec.cc +++ b/storage/innobase/rem/rem0rec.cc @@ -2283,9 +2283,24 @@ wsrep_rec_get_foreign_key( break; case DATA_BLOB: case DATA_BINARY: + case DATA_FIXBINARY: + case DATA_GEOMETRY: memcpy(buf, data, len); break; - default: + + case DATA_FLOAT: + { + float f = mach_float_read(data); + memcpy(buf, &f, sizeof(float)); + } + break; + case DATA_DOUBLE: + { + double d = mach_double_read(data); + memcpy(buf, &d, sizeof(double)); + } + break; + default: break; } -- cgit v1.2.1 From 5b9ee8d8193a8c7a8ebdd35eedcadc3ae78e7fc1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Mon, 28 Dec 2020 12:06:22 +0200 Subject: MDEV-24449 Corruption of system tablespace or last recovered page This corresponds to 10.5 commit 39378e1366f78b38c05e45103b9fb9c829cc5f4f. With a patched version of the test innodb.ibuf_not_empty (so that it would trigger crash recovery after using the change buffer), and patched code that would modify the os_thread_sleep() in recv_apply_hashed_log_recs() to be 1ms as well as add a sleep of the same duration to the end of recv_recover_page() when recv_sys->n_addrs=0, we can demonstrate a race condition. After disabling some debug checks in buf_all_freed_instance(), buf_pool_invalidate_instance() and buf_validate(), we managed to trigger an assertion failure in fseg_free_step(), on the XDES_FREE_BIT. In other words, an trx_undo_seg_free() call during trx_rollback_resurrected() was attempting a double-free of a page. This was repeated about once in 400 to 500 test runs. With the fix applied, the test passed 2,000 runs. recv_apply_hashed_log_recs(): Do not only wait for recv_sys->n_addrs to reach 0, but also wait for buf_get_n_pending_read_ios() to reach 0, to guarantee that buf_page_io_complete() will not be executing ibuf_merge_or_delete_for_page(). --- storage/innobase/log/log0recv.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/storage/innobase/log/log0recv.cc b/storage/innobase/log/log0recv.cc index 4c3886caeaf..95179ec2271 100644 --- a/storage/innobase/log/log0recv.cc +++ b/storage/innobase/log/log0recv.cc @@ -2501,7 +2501,7 @@ apply: /* Wait until all the pages have been processed */ - while (recv_sys->n_addrs != 0) { + while (recv_sys->n_addrs || buf_get_n_pending_read_ios()) { const bool abort = recv_sys->found_corrupt_log || recv_sys->found_corrupt_fs; -- cgit v1.2.1 From 78292047a4747ccd9210ba36a185a1dbe825de89 Mon Sep 17 00:00:00 2001 From: Alexey Botchkov Date: Mon, 28 Dec 2020 15:12:32 +0400 Subject: MDEV-19442 server_audit plugin doesn't consider proxy users in server_audit_excl_users/server_audit_incl_users. Check the proxy user just as the connection user against the incl_users_list and excl_users_list. --- mysql-test/suite/plugins/r/server_audit.result | 5 ++++- mysql-test/suite/plugins/t/server_audit.test | 1 + plugin/server_audit/server_audit.c | 22 +++++++++++++++------- 3 files changed, 20 insertions(+), 8 deletions(-) diff --git a/mysql-test/suite/plugins/r/server_audit.result b/mysql-test/suite/plugins/r/server_audit.result index f309b6c95a3..3fce3346f29 100644 --- a/mysql-test/suite/plugins/r/server_audit.result +++ b/mysql-test/suite/plugins/r/server_audit.result @@ -227,6 +227,7 @@ set global server_audit_logging= on; disconnect cn1; drop user user1@localhost; set global server_audit_events=''; +set global server_audit_incl_users='root, plug_dest'; CREATE USER plug IDENTIFIED WITH 'test_plugin_server' AS 'plug_dest'; CREATE USER plug_dest IDENTIFIED BY 'plug_dest_passwd'; connect(localhost,plug,plug_dest,test,MYSQL_PORT,MYSQL_SOCK); @@ -277,7 +278,7 @@ server_audit_file_path server_audit_file_rotate_now OFF server_audit_file_rotate_size 1000000 server_audit_file_rotations 9 -server_audit_incl_users root +server_audit_incl_users root, plug_dest server_audit_logging ON server_audit_mode 1 server_audit_output_type file @@ -419,6 +420,7 @@ TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,procs_priv, TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,proxies_priv, TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,roles_mapping, TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'set global server_audit_events=\'\'',0 +TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'set global server_audit_incl_users=\'root, plug_dest\'',0 TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,user, TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,db, TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,tables_priv, @@ -442,6 +444,7 @@ TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,proxies_priv, TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'GRANT PROXY ON plug_dest TO plug',0 TIME,HOSTNAME,plug,localhost,ID,0,CONNECT,test,,0 TIME,HOSTNAME,plug,localhost,ID,0,PROXY_CONNECT,test,`plug_dest`@`%`,0 +TIME,HOSTNAME,plug,localhost,ID,ID,QUERY,test,'select USER(),CURRENT_USER()',0 TIME,HOSTNAME,plug,localhost,ID,0,DISCONNECT,test,,0 TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,user, TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,db, diff --git a/mysql-test/suite/plugins/t/server_audit.test b/mysql-test/suite/plugins/t/server_audit.test index 397dd554962..fa5bd7e1349 100644 --- a/mysql-test/suite/plugins/t/server_audit.test +++ b/mysql-test/suite/plugins/t/server_audit.test @@ -173,6 +173,7 @@ source include/wait_until_count_sessions.inc; drop user user1@localhost; set global server_audit_events=''; +set global server_audit_incl_users='root, plug_dest'; CREATE USER plug IDENTIFIED WITH 'test_plugin_server' AS 'plug_dest'; CREATE USER plug_dest IDENTIFIED BY 'plug_dest_passwd'; diff --git a/plugin/server_audit/server_audit.c b/plugin/server_audit/server_audit.c index fe8bf4478fc..ebb2c6c0a05 100644 --- a/plugin/server_audit/server_audit.c +++ b/plugin/server_audit/server_audit.c @@ -1578,22 +1578,27 @@ no_password: -static int do_log_user(const char *name, int take_lock) +static int do_log_user(const char *name, int len, + const char *proxy, int proxy_len, int take_lock) { - size_t len; int result; if (!name) return 0; - len= strlen(name); if (take_lock) flogger_mutex_lock(&lock_operations); if (incl_user_coll.n_users) - result= coll_search(&incl_user_coll, name, len) != 0; + { + result= coll_search(&incl_user_coll, name, len) != 0 || + (proxy && coll_search(&incl_user_coll, proxy, proxy_len) != 0); + } else if (excl_user_coll.n_users) - result= coll_search(&excl_user_coll, name, len) == 0; + { + result= coll_search(&excl_user_coll, name, len) == 0 && + (proxy && coll_search(&excl_user_coll, proxy, proxy_len) == 0); + } else result= 1; @@ -2134,7 +2139,9 @@ void auditing(MYSQL_THD thd, unsigned int event_class, const void *ev) } if (event_class == MYSQL_AUDIT_GENERAL_CLASS && FILTER(EVENT_QUERY) && - cn && (cn->log_always || do_log_user(cn->user, 1))) + cn && (cn->log_always || do_log_user(cn->user, cn->user_length, + cn->proxy, cn->proxy_length, + 1))) { const struct mysql_event_general *event = (const struct mysql_event_general *) ev; @@ -2154,7 +2161,8 @@ void auditing(MYSQL_THD thd, unsigned int event_class, const void *ev) { const struct mysql_event_table *event = (const struct mysql_event_table *) ev; - if (do_log_user(event->user, 1)) + if (do_log_user(event->user, SAFE_STRLEN(event->user), + cn->proxy, cn->proxy_length, 1)) { switch (event->event_subclass) { -- cgit v1.2.1 From 4601e6e565dacd074e211108acf48c067f951b4f Mon Sep 17 00:00:00 2001 From: Teemu Ollakka Date: Thu, 29 Oct 2020 16:30:52 +0200 Subject: MDEV-24255 MTR test galera_bf_abort fails with --ps-protocol MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Under ps-protocol, commandsl like COM_STMT_FETCH, COM_STMT_CLOSE and COM_STMT_SEND_LONG_DATA are not supposed to return errors. Therefore, if a transaction is BF aborted and the client is processing one of those commands, then we should not return a deadlock error immediately. Instead wait for the a subsequent client interaction which permits errors to be returned. To handle this, wsrep_before_command() now accepts parameter keep_command_error. If set true, keep_command_error will cause wsrep-lib side to skip result handling, and to keep the current error for the next interaction with the client. Reviewed-by: Jan Lindström --- mysql-test/suite/galera/r/galera#500.result | 2 - .../suite/galera/r/galera_bf_abort_ps.result | 16 +++++++ .../galera/r/galera_bf_abort_ps_threadpool.result | 22 +++++++++ mysql-test/suite/galera/t/galera_bf_abort_ps.cnf | 3 ++ mysql-test/suite/galera/t/galera_bf_abort_ps.test | 34 ++++++++++++++ .../galera/t/galera_bf_abort_ps_threadpool.cnf | 7 +++ .../galera/t/galera_bf_abort_ps_threadpool.test | 54 ++++++++++++++++++++++ sql/sql_parse.cc | 31 +++++++++---- sql/wsrep_trans_observer.h | 10 +++- wsrep-lib | 2 +- 10 files changed, 166 insertions(+), 15 deletions(-) create mode 100644 mysql-test/suite/galera/r/galera_bf_abort_ps.result create mode 100644 mysql-test/suite/galera/r/galera_bf_abort_ps_threadpool.result create mode 100644 mysql-test/suite/galera/t/galera_bf_abort_ps.cnf create mode 100644 mysql-test/suite/galera/t/galera_bf_abort_ps.test create mode 100644 mysql-test/suite/galera/t/galera_bf_abort_ps_threadpool.cnf create mode 100644 mysql-test/suite/galera/t/galera_bf_abort_ps_threadpool.test diff --git a/mysql-test/suite/galera/r/galera#500.result b/mysql-test/suite/galera/r/galera#500.result index d983e844d76..a5ab0b19718 100644 --- a/mysql-test/suite/galera/r/galera#500.result +++ b/mysql-test/suite/galera/r/galera#500.result @@ -1,5 +1,3 @@ -connection node_1; -connection node_2; connection node_2; connection node_1; connection node_1; diff --git a/mysql-test/suite/galera/r/galera_bf_abort_ps.result b/mysql-test/suite/galera/r/galera_bf_abort_ps.result new file mode 100644 index 00000000000..42292cb20a0 --- /dev/null +++ b/mysql-test/suite/galera/r/galera_bf_abort_ps.result @@ -0,0 +1,16 @@ +connection node_2; +connection node_1; +CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 CHAR(6)) ENGINE=InnoDB; +connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2; +connection node_2; +START TRANSACTION; +INSERT INTO t1 VALUES (1,'node_2'); +connection node_1; +INSERT INTO t1 VALUES (1,'node_1'); +connection node_2a; +connection node_2; +INSERT INTO t1 VALUES (2, 'node_2'); +ERROR 40001: Deadlock found when trying to get lock; try restarting transaction +wsrep_local_aborts_increment +1 +DROP TABLE t1; diff --git a/mysql-test/suite/galera/r/galera_bf_abort_ps_threadpool.result b/mysql-test/suite/galera/r/galera_bf_abort_ps_threadpool.result new file mode 100644 index 00000000000..7482e76778e --- /dev/null +++ b/mysql-test/suite/galera/r/galera_bf_abort_ps_threadpool.result @@ -0,0 +1,22 @@ +connection node_2; +connection node_1; +CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 CHAR(6)) ENGINE=InnoDB; +connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2; +connection node_2; +START TRANSACTION; +INSERT INTO t1 VALUES (1,'node_2'); +connection node_2a; +SET GLOBAL debug_dbug = "+d,sync.wsrep_apply_cb"; +connection node_1; +INSERT INTO t1 VALUES (1,'node_1'); +connection node_2a; +SET DEBUG_SYNC = "now WAIT_FOR sync.wsrep_apply_cb_reached"; +connection node_2; +SET DEBUG_SYNC = "wsrep_before_before_command SIGNAL signal.wsrep_apply_cb WAIT_FOR bf_abort"; +INSERT INTO t1 VALUES (2, 'node_2'); +ERROR 40001: Deadlock found when trying to get lock; try restarting transaction +wsrep_local_aborts_increment +1 +SET DEBUG_SYNC = 'RESET'; +SET GLOBAL debug_dbug = DEFAULT; +DROP TABLE t1; diff --git a/mysql-test/suite/galera/t/galera_bf_abort_ps.cnf b/mysql-test/suite/galera/t/galera_bf_abort_ps.cnf new file mode 100644 index 00000000000..34c1a8cc3cf --- /dev/null +++ b/mysql-test/suite/galera/t/galera_bf_abort_ps.cnf @@ -0,0 +1,3 @@ +!include ../galera_2nodes.cnf +[mysqltest] +ps-protocol \ No newline at end of file diff --git a/mysql-test/suite/galera/t/galera_bf_abort_ps.test b/mysql-test/suite/galera/t/galera_bf_abort_ps.test new file mode 100644 index 00000000000..d2dfb92651e --- /dev/null +++ b/mysql-test/suite/galera/t/galera_bf_abort_ps.test @@ -0,0 +1,34 @@ +# +# MDEV-24255 +# Test BF abort of a transaction that has ps-protocol enabled +# + +--source include/galera_cluster.inc + +CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 CHAR(6)) ENGINE=InnoDB; +--connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2 + +--connection node_2 +--let $wsrep_local_bf_aborts_before = `SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_bf_aborts'` + +START TRANSACTION; +INSERT INTO t1 VALUES (1,'node_2'); + +--connection node_1 +INSERT INTO t1 VALUES (1,'node_1'); + +--connection node_2a +--let $wait_condition = SELECT COUNT(*) = 1 FROM t1 WHERE f2 = 'node_1' +--source include/wait_condition.inc + +--connection node_2 +--error ER_LOCK_DEADLOCK +INSERT INTO t1 VALUES (2, 'node_2'); + +--let $wsrep_local_bf_aborts_after = `SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_bf_aborts'` + +--disable_query_log +--eval SELECT $wsrep_local_bf_aborts_after - $wsrep_local_bf_aborts_before = 1 AS wsrep_local_aborts_increment; +--enable_query_log + +DROP TABLE t1; diff --git a/mysql-test/suite/galera/t/galera_bf_abort_ps_threadpool.cnf b/mysql-test/suite/galera/t/galera_bf_abort_ps_threadpool.cnf new file mode 100644 index 00000000000..83baa995c17 --- /dev/null +++ b/mysql-test/suite/galera/t/galera_bf_abort_ps_threadpool.cnf @@ -0,0 +1,7 @@ +!include ../galera_2nodes.cnf + +[mysqld] +thread-handling=pool-of-threads + +[mysqltest] +ps-protocol diff --git a/mysql-test/suite/galera/t/galera_bf_abort_ps_threadpool.test b/mysql-test/suite/galera/t/galera_bf_abort_ps_threadpool.test new file mode 100644 index 00000000000..56348a6f527 --- /dev/null +++ b/mysql-test/suite/galera/t/galera_bf_abort_ps_threadpool.test @@ -0,0 +1,54 @@ +# +# MDEV-24255 +# Test BF abort of a transaction that has ps-protocol enabled +# This test stresses the case where wsrep_before_command() +# finds the transaction in state s_must_abort. This only +# possible when the server is using the thread pool. +# + +--source include/galera_cluster.inc +--source include/have_debug_sync.inc + +CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 CHAR(6)) ENGINE=InnoDB; + +--connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2 + +--connection node_2 +--let $wsrep_local_bf_aborts_before = `SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_bf_aborts'` + +START TRANSACTION; +INSERT INTO t1 VALUES (1,'node_2'); + +--connection node_2a +SET GLOBAL debug_dbug = "+d,sync.wsrep_apply_cb"; + +--connection node_1 +INSERT INTO t1 VALUES (1,'node_1'); + +--connection node_2a +SET DEBUG_SYNC = "now WAIT_FOR sync.wsrep_apply_cb_reached"; + +--connection node_2 +SET DEBUG_SYNC = "wsrep_before_before_command SIGNAL signal.wsrep_apply_cb WAIT_FOR bf_abort"; + +# +# The following INSERT is expected to enter +# wsrep_before_command() and find its transaction +# in state s_must_abort. +# Notice that the test appears more complicated +# than it needs to... however we cannot use +# --send for this INSERT, otherwise mysqltest +# will not use ps-protocol +# +--error ER_LOCK_DEADLOCK +INSERT INTO t1 VALUES (2, 'node_2'); + +--let $wsrep_local_bf_aborts_after = `SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_bf_aborts'` + +--disable_query_log +--eval SELECT $wsrep_local_bf_aborts_after - $wsrep_local_bf_aborts_before = 1 AS wsrep_local_aborts_increment; +--enable_query_log + +SET DEBUG_SYNC = 'RESET'; +SET GLOBAL debug_dbug = DEFAULT; +DROP TABLE t1; diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index e03b98177d0..209caa9460e 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -1164,6 +1164,14 @@ static bool wsrep_tables_accessible_when_detached(const TABLE_LIST *tables) } return true; } + +static bool wsrep_command_no_result(char command) +{ + return (command == COM_STMT_PREPARE || + command == COM_STMT_FETCH || + command == COM_STMT_SEND_LONG_DATA || + command == COM_STMT_CLOSE); +} #endif /* WITH_WSREP */ #ifndef EMBEDDED_LIBRARY @@ -1287,12 +1295,20 @@ bool do_command(THD *thd) #ifdef WITH_WSREP DEBUG_SYNC(thd, "wsrep_before_before_command"); /* - Aborted by background rollbacker thread. - Handle error here and jump straight to out + If this command does not return a result, then we + instruct wsrep_before_command() to skip result handling. + This causes BF aborted transaction to roll back but keep + the error state until next command which is able to return + a result to the client. */ - if (wsrep_before_command(thd)) + if (wsrep_before_command(thd, wsrep_command_no_result(command))) { - thd->store_globals(); + /* + Aborted by background rollbacker thread. + Handle error here and jump straight to out. + Notice that thd->store_globals() is called + in wsrep_before_command(). + */ WSREP_LOG_THD(thd, "enter found BF aborted"); DBUG_ASSERT(!thd->mdl_context.has_locks()); DBUG_ASSERT(!thd->get_stmt_da()->is_set()); @@ -2384,12 +2400,7 @@ dispatch_end: WSREP_DEBUG("THD is killed at dispatch_end"); } wsrep_after_command_before_result(thd); - if (wsrep_current_error(thd) && - !(command == COM_STMT_PREPARE || - command == COM_STMT_FETCH || - command == COM_STMT_SEND_LONG_DATA || - command == COM_STMT_CLOSE - )) + if (wsrep_current_error(thd) && !wsrep_command_no_result(command)) { /* todo: Pass wsrep client state current error to override */ wsrep_override_error(thd, wsrep_current_error(thd), diff --git a/sql/wsrep_trans_observer.h b/sql/wsrep_trans_observer.h index 05970e8b12f..dbe710a0256 100644 --- a/sql/wsrep_trans_observer.h +++ b/sql/wsrep_trans_observer.h @@ -442,11 +442,17 @@ wsrep_wait_rollback_complete_and_acquire_ownership(THD *thd) DBUG_VOID_RETURN; } -static inline int wsrep_before_command(THD* thd) +static inline int wsrep_before_command(THD* thd, bool keep_command_error) { return (thd->wsrep_cs().state() != wsrep::client_state::s_none ? - thd->wsrep_cs().before_command() : 0); + thd->wsrep_cs().before_command(keep_command_error) : 0); +} + +static inline int wsrep_before_command(THD* thd) +{ + return wsrep_before_command(thd, false); } + /* Called after each command. diff --git a/wsrep-lib b/wsrep-lib index 41a6e9dad79..dcf3ce91cdb 160000 --- a/wsrep-lib +++ b/wsrep-lib @@ -1 +1 @@ -Subproject commit 41a6e9dad79c921134e44cf974b6b7ca3b84e538 +Subproject commit dcf3ce91cdb9d254ae04ecc6a2f91f46b171280d -- cgit v1.2.1 From e59c1cef3bc4016f9fa9d7a0f6935463b7283a58 Mon Sep 17 00:00:00 2001 From: Igor Babaev Date: Mon, 28 Dec 2020 21:20:13 -0800 Subject: Correction of the merge 10.2 into 10.3 for MDEV-23619 (correction for commit 6fed6de93f120b5e311b79892e7865639e9613a4) --- mysql-test/main/cte_recursive.result | 4 ++-- sql/sql_lex.cc | 14 +++++++------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/mysql-test/main/cte_recursive.result b/mysql-test/main/cte_recursive.result index 62354a619c1..21c6b225a89 100644 --- a/mysql-test/main/cte_recursive.result +++ b/mysql-test/main/cte_recursive.result @@ -1301,7 +1301,7 @@ select ancestors.name, ancestors.dob from ancestors; id select_type table type possible_keys key key_len ref rows Extra 1 PRIMARY ALL NULL NULL NULL NULL 24 4 DERIVED folks ALL NULL NULL NULL NULL 12 Using where -6 RECURSIVE UNION ALL NULL NULL NULL NULL 12 +6 UNION ALL NULL NULL NULL NULL 12 5 RECURSIVE UNION ALL NULL NULL NULL NULL 24 NULL UNION RESULT ALL NULL NULL NULL NULL NULL 3 DERIVED folks ALL NULL NULL NULL NULL 12 Using where @@ -4029,7 +4029,7 @@ id select_type table type possible_keys key key_len ref rows Extra 3 RECURSIVE UNION t1 ALL NULL NULL NULL NULL 4 Using where 3 RECURSIVE UNION ref key0 key0 9 test.t1.c 2 NULL UNION RESULT ALL NULL NULL NULL NULL NULL -4 RECURSIVE UNION ALL NULL NULL NULL NULL 4 +4 UNION ALL NULL NULL NULL NULL 4 with recursive r_cte as ( select * from t1 as s union diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc index dc0662e11b8..6116dee6e7e 100644 --- a/sql/sql_lex.cc +++ b/sql/sql_lex.cc @@ -4712,14 +4712,14 @@ void st_select_lex::set_explain_type(bool on_the_fly) /* pos_in_table_list=NULL for e.g. post-join aggregation JOIN_TABs. */ - if (!tab->table); - else if (const TABLE_LIST *pos= tab->table->pos_in_table_list) + if (!(tab->table && tab->table->pos_in_table_list)) + continue; + TABLE_LIST *tbl= tab->table->pos_in_table_list; + if (tbl->with && tbl->with->is_recursive && + tbl->is_with_table_recursive_reference()) { - if (pos->with && pos->with->is_recursive) - { - uses_cte= true; - break; - } + uses_cte= true; + break; } } if (uses_cte) -- cgit v1.2.1 From 4f5d5a7857b48b6c34608f1e8dc2fe354cacd373 Mon Sep 17 00:00:00 2001 From: Rucha Deodhar Date: Mon, 28 Dec 2020 21:27:27 +0530 Subject: MDEV-23875: select into outfile not respect UMASK and UMASK_DIR Analysis: select into outfile creates files everytime with 666 permission, regardsless if umask environment variables and umask settings on OS level. It seems hardcoded. Fix: change 0666 to 0644 which will let anybody consume the file but not change it. --- sql/sql_class.cc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/sql/sql_class.cc b/sql/sql_class.cc index 7eaafbd9044..b16a750aa9b 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -2859,12 +2859,12 @@ static File create_file(THD *thd, char *path, sql_exchange *exchange, } /* Create the file world readable */ if ((file= mysql_file_create(key_select_to_file, - path, 0666, O_WRONLY|O_EXCL, MYF(MY_WME))) < 0) + path, 0644, O_WRONLY|O_EXCL, MYF(MY_WME))) < 0) return file; #ifdef HAVE_FCHMOD - (void) fchmod(file, 0666); // Because of umask() + (void) fchmod(file, 0644); // Because of umask() #else - (void) chmod(path, 0666); + (void) chmod(path, 0644; #endif if (init_io_cache(cache, file, 0L, WRITE_CACHE, 0L, 1, MYF(MY_WME))) { -- cgit v1.2.1 From c1a7a82bcac4b8694bdb79c665b42de7a3865d05 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Fri, 1 Jan 2021 19:15:46 +0200 Subject: WolfSSL v4.6.0-stable --- extra/wolfssl/CMakeLists.txt | 2 +- extra/wolfssl/wolfssl | 2 +- include/ssl_compat.h | 6 +++--- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/extra/wolfssl/CMakeLists.txt b/extra/wolfssl/CMakeLists.txt index 953d377ebcf..566106abb86 100644 --- a/extra/wolfssl/CMakeLists.txt +++ b/extra/wolfssl/CMakeLists.txt @@ -44,7 +44,7 @@ ADD_DEFINITIONS(-DWOLFSSL_LIB -DBUILDING_WOLFSSL) INCLUDE_DIRECTORIES(BEFORE ${CMAKE_CURRENT_SOURCE_DIR}/wolfssl) IF(MSVC) # size_t to long truncation warning - SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -wd4267 -wd4334 -wd4028") + SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -wd4267 -wd4334 -wd4028 -wd4244") ENDIF() ADD_CONVENIENCE_LIBRARY(wolfssl ${WOLFSSL_SOURCES}) diff --git a/extra/wolfssl/wolfssl b/extra/wolfssl/wolfssl index e116c89a58a..9c87f979a7f 160000 --- a/extra/wolfssl/wolfssl +++ b/extra/wolfssl/wolfssl @@ -1 +1 @@ -Subproject commit e116c89a58af750421d82ece13f80516d2bde02e +Subproject commit 9c87f979a7f1d3a6d786b260653d566c1d31a1c4 diff --git a/include/ssl_compat.h b/include/ssl_compat.h index 8cc0e6a9a2b..9f4b6be8d95 100644 --- a/include/ssl_compat.h +++ b/include/ssl_compat.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2016, 2017 MariaDB Corporation + Copyright (c) 2016, 2021, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -73,19 +73,19 @@ #define EVP_MD_CTX_SIZE sizeof(EVP_MD_CTX) #endif -#define OPENSSL_init_ssl(X,Y) SSL_library_init() #define DH_set0_pqg(D,P,Q,G) ((D)->p= (P), (D)->g= (G)) #define EVP_CIPHER_CTX_buf_noconst(ctx) ((ctx)->buf) #define EVP_CIPHER_CTX_encrypting(ctx) ((ctx)->encrypt) #define EVP_CIPHER_CTX_SIZE sizeof(EVP_CIPHER_CTX) #ifndef HAVE_WOLFSSL +#define OPENSSL_init_ssl(X,Y) SSL_library_init() #define EVP_MD_CTX_reset(X) EVP_MD_CTX_cleanup(X) #define EVP_CIPHER_CTX_reset(X) EVP_CIPHER_CTX_cleanup(X) -#endif #define X509_get0_notBefore(X) X509_get_notBefore(X) #define X509_get0_notAfter(X) X509_get_notAfter(X) #endif +#endif #ifndef TLS1_3_VERSION #define SSL_CTX_set_ciphersuites(X,Y) 0 -- cgit v1.2.1 From 734c587f684828fc4ffa725cb75375ac8fc65cd4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Fri, 1 Jan 2021 19:17:03 +0200 Subject: MDEV-20386: Allow RDRAND, RDSEED WITH_MSAN Let us use Intel intrinsic functions in WolfSSL whenever possible. This allows such code to be compiled WITH_MSAN. --- extra/wolfssl/CMakeLists.txt | 55 ++++++++++++++++++++++---------------------- 1 file changed, 28 insertions(+), 27 deletions(-) diff --git a/extra/wolfssl/CMakeLists.txt b/extra/wolfssl/CMakeLists.txt index 566106abb86..c99fb155dd6 100644 --- a/extra/wolfssl/CMakeLists.txt +++ b/extra/wolfssl/CMakeLists.txt @@ -9,20 +9,32 @@ ENDIF() IF(CMAKE_SIZEOF_VOID_P MATCHES 8) IF(MSVC) SET(WOLFSSL_INTELASM ON) + SET(WOLFSSL_X86_64_BUILD 1) + SET(HAVE_INTEL_RDSEED 1) + SET(HAVE_INTEL_RDRAND 1) ELSEIF(CMAKE_SYSTEM_PROCESSOR MATCHES "x86_64|amd64") + SET(WOLFSSL_X86_64_BUILD 1) IF(CMAKE_C_COMPILER_ID MATCHES GNU AND CMAKE_C_COMPILER_VERSION VERSION_LESS 4.9) MESSAGE_ONCE(NO_INTEL_ASSEMBLY "Disable Intel assembly for WolfSSL - compiler is too old") - ELSEIF(WITH_MSAN) - MESSAGE_ONCE(MSAN_CANT_HANDLE_IT - "Disable Intel assembly for WolfSSL - MSAN can't handle it") ELSE() - MY_CHECK_C_COMPILER_FLAG(-maes) - MY_CHECK_C_COMPILER_FLAG(-msse4) - MY_CHECK_C_COMPILER_FLAG(-mpclmul) + IF(WITH_MSAN) + MESSAGE_ONCE(MSAN_CANT_HANDLE_IT + "Disable Intel assembly for WolfSSL - MSAN can't handle it") + ELSE() + MY_CHECK_C_COMPILER_FLAG(-maes) + MY_CHECK_C_COMPILER_FLAG(-msse4) + MY_CHECK_C_COMPILER_FLAG(-mpclmul) + IF(have_C__maes AND have_C__msse4 AND have_C__mpclmul) + SET(WOLFSSL_INTELASM ON) + ENDIF() + ENDIF() MY_CHECK_C_COMPILER_FLAG(-mrdrnd) MY_CHECK_C_COMPILER_FLAG(-mrdseed) - IF(have_C__maes AND have_C__msse4 AND have_C__mpclmul) - SET(WOLFSSL_INTELASM ON) + IF(have_C__mrdrnd) + SET(HAVE_INTEL_RDRAND ON) + ENDIF() + IF(have_C__mrdseed) + SET(HAVE_INTEL_RDSEED ON) ENDIF() ENDIF() ENDIF() @@ -109,33 +121,22 @@ ELSE() SET(WOLFCRYPT_SOURCES ${WOLFCRYPT_SOURCES} ${WOLFCRYPT_SRCDIR}/integer.c) ENDIF() -IF(WOLFSSL_INTELASM) - SET(WOLFSSL_AESNI 1) - +IF(WOLFSSL_X86_64_BUILD) LIST(APPEND WOLFCRYPT_SOURCES ${WOLFCRYPT_SRCDIR}/cpuid.c) IF(MSVC) + SET(WOLFSSL_AESNI 1) LIST(APPEND WOLFCRYPT_SOURCES ${WOLFCRYPT_SRCDIR}/aes_asm.asm) - SET(WOLFSSL_X86_64_BUILD 1) - SET(HAVE_INTEL_RDSEED 1) - SET(HAVE_INTEL_RDRAND 1) IF(CMAKE_C_COMPILER_ID MATCHES Clang) SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -maes -msse4.2 -mpclmul -mrdrnd -mrdseed") ENDIF() - ELSEIF(CMAKE_SYSTEM_PROCESSOR MATCHES "x86_64|amd64") + ELSEIF(WOLFSSL_INTELASM) + SET(WOLFSSL_AESNI 1) SET(USE_INTEL_SPEEDUP 1) LIST(APPEND WOLFCRYPT_SOURCES - ${WOLFCRYPT_SRCDIR}/aes_asm.S - ${WOLFCRYPT_SRCDIR}/sha512_asm.S - ${WOLFCRYPT_SRCDIR}/sha256_asm.S) - ADD_DEFINITIONS(-maes -msse4 -mpclmul) - IF(have_C__mrdrnd) - SET(HAVE_INTEL_RDRAND 1) - ADD_DEFINITIONS(-mrdrnd) - ENDIF() - IF(have_C__mrdseed) - SET(HAVE_INTEL_RDSEED 1) - ADD_DEFINITIONS(-mrdseed) - ENDIF() + ${WOLFCRYPT_SRCDIR}/aes_asm.S + ${WOLFCRYPT_SRCDIR}/sha512_asm.S + ${WOLFCRYPT_SRCDIR}/sha256_asm.S) + ADD_DEFINITIONS(-maes -msse4.2 -mpclmul) ENDIF() ENDIF() -- cgit v1.2.1 From d67e17bb812ac1198d916aed3da8573d0f620fb5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Sat, 2 Jan 2021 16:11:55 +0200 Subject: MDEV-24512 Assertion failed in rec_is_metadata() in btr_discard_only_page_on_level() btr_discard_only_page_on_level(): Attempt to read the MDEV-15562 metadata record from the leaf page, not the root page. In the root, the leftmost (in this case, the only) node pointer would look like a metadata record. This corruption bug was introduced in commit 0e5a4ac2532c64a545796c787354dc41d61d0e62 (MDEV-15562). The scenario is rare: a column was dropped instantly or the order of columns was changed instantly, and then the table became empty in such a way that in the last step, the root page had one child page. Normally, a non-leaf B-tree page would always contain at least 2 children. --- mysql-test/suite/innodb/r/instant_alter_debug.result | 16 +++++++++++++++- mysql-test/suite/innodb/t/instant_alter_debug.test | 20 ++++++++++++++++++-- storage/innobase/btr/btr0btr.cc | 8 +++----- 3 files changed, 36 insertions(+), 8 deletions(-) diff --git a/mysql-test/suite/innodb/r/instant_alter_debug.result b/mysql-test/suite/innodb/r/instant_alter_debug.result index 9fcb8b05a34..6f45f072fb6 100644 --- a/mysql-test/suite/innodb/r/instant_alter_debug.result +++ b/mysql-test/suite/innodb/r/instant_alter_debug.result @@ -327,7 +327,6 @@ FROM information_schema.global_status WHERE variable_name = 'innodb_instant_alter_column'; instants 22 -SET GLOBAL innodb_purge_rseg_truncate_frequency = @save_frequency; # # MDEV-21045 AddressSanitizer: use-after-poison in mem_heap_dup / row_log_table_get_pk_col # @@ -370,3 +369,18 @@ b SET DEBUG_SYNC='RESET'; disconnect con2; DROP TABLE t1; +# +# MDEV-24512 Assertion failed in rec_is_metadata() +# in btr_discard_only_page_on_level() +# +SET @save_limit= @@GLOBAL.innodb_limit_optimistic_insert_debug; +SET GLOBAL innodb_limit_optimistic_insert_debug=2; +CREATE TABLE t1 (c CHAR(1) UNIQUE) ENGINE=InnoDB; +ALTER TABLE t1 ADD c2 INT NOT NULL DEFAULT 0 FIRST; +INSERT INTO t1 (c) VALUES ('x'),('d'),('r'),('f'),('y'),('u'),('m'),('d'); +ERROR 23000: Duplicate entry 'd' for key 'c' +SET GLOBAL innodb_limit_optimistic_insert_debug=@save_limit; +SELECT * FROM t1; +c2 c +DROP TABLE t1; +SET GLOBAL innodb_purge_rseg_truncate_frequency = @save_frequency; diff --git a/mysql-test/suite/innodb/t/instant_alter_debug.test b/mysql-test/suite/innodb/t/instant_alter_debug.test index fe80de2ca51..774de9202b5 100644 --- a/mysql-test/suite/innodb/t/instant_alter_debug.test +++ b/mysql-test/suite/innodb/t/instant_alter_debug.test @@ -361,8 +361,6 @@ SELECT variable_value-@old_instant instants FROM information_schema.global_status WHERE variable_name = 'innodb_instant_alter_column'; -SET GLOBAL innodb_purge_rseg_truncate_frequency = @save_frequency; - --echo # --echo # MDEV-21045 AddressSanitizer: use-after-poison in mem_heap_dup / row_log_table_get_pk_col --echo # @@ -414,3 +412,21 @@ SELECT * FROM t1; SET DEBUG_SYNC='RESET'; --disconnect con2 DROP TABLE t1; + +--echo # +--echo # MDEV-24512 Assertion failed in rec_is_metadata() +--echo # in btr_discard_only_page_on_level() +--echo # + +SET @save_limit= @@GLOBAL.innodb_limit_optimistic_insert_debug; +SET GLOBAL innodb_limit_optimistic_insert_debug=2; +CREATE TABLE t1 (c CHAR(1) UNIQUE) ENGINE=InnoDB; + +ALTER TABLE t1 ADD c2 INT NOT NULL DEFAULT 0 FIRST; +--error ER_DUP_ENTRY +INSERT INTO t1 (c) VALUES ('x'),('d'),('r'),('f'),('y'),('u'),('m'),('d'); +SET GLOBAL innodb_limit_optimistic_insert_debug=@save_limit; +SELECT * FROM t1; +DROP TABLE t1; + +SET GLOBAL innodb_purge_rseg_truncate_frequency = @save_frequency; diff --git a/storage/innobase/btr/btr0btr.cc b/storage/innobase/btr/btr0btr.cc index 8ee7d167805..2bda1ad372d 100644 --- a/storage/innobase/btr/btr0btr.cc +++ b/storage/innobase/btr/btr0btr.cc @@ -4094,12 +4094,13 @@ btr_discard_only_page_on_level( mtr_t* mtr) /*!< in: mtr */ { ulint page_level = 0; - trx_id_t max_trx_id; ut_ad(!index->is_dummy); /* Save the PAGE_MAX_TRX_ID from the leaf page. */ - max_trx_id = page_get_max_trx_id(buf_block_get_frame(block)); + const trx_id_t max_trx_id = page_get_max_trx_id(block->frame); + const rec_t* r = page_rec_get_next(page_get_infimum_rec(block->frame)); + ut_ad(rec_is_metadata(r, *index) == index->is_instant()); while (block->page.id.page_no() != dict_index_get_page(index)) { btr_cur_t cursor; @@ -4154,9 +4155,6 @@ btr_discard_only_page_on_level( const rec_t* rec = NULL; rec_offs* offsets = NULL; if (index->table->instant) { - const rec_t* r = page_rec_get_next(page_get_infimum_rec( - block->frame)); - ut_ad(rec_is_metadata(r, *index) == index->is_instant()); if (rec_is_alter_metadata(r, *index)) { heap = mem_heap_create(srv_page_size); offsets = rec_get_offsets(r, index, NULL, true, -- cgit v1.2.1 From 25db9ffa8bdab8a2f2af3c7f154343dd6c6d238f Mon Sep 17 00:00:00 2001 From: Rucha Deodhar Date: Mon, 4 Jan 2021 14:57:15 +0530 Subject: MDEV-23875 is failing to build on windows. --- sql/sql_class.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sql/sql_class.cc b/sql/sql_class.cc index b16a750aa9b..2595717572a 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -2864,7 +2864,7 @@ static File create_file(THD *thd, char *path, sql_exchange *exchange, #ifdef HAVE_FCHMOD (void) fchmod(file, 0644); // Because of umask() #else - (void) chmod(path, 0644; + (void) chmod(path, 0644); #endif if (init_io_cache(cache, file, 0L, WRITE_CACHE, 0L, 1, MYF(MY_WME))) { -- cgit v1.2.1 From 608b0ee52ef3e854ce14a407e64e936adbbeba23 Mon Sep 17 00:00:00 2001 From: Sujatha Date: Thu, 31 Dec 2020 18:15:31 +0530 Subject: MDEV-23033: All slaves crash once in ~24 hours and loop restart with signal 11 Problem: ======= Upon deleting or updating a row in a parent table (with primary key), if the child table has virtual column and an associated key with ON UPDATE CASCADE/ON DELETE CASCADE, it will result in slave crash. Analysis: ======== Tables which are related through foreign key require prelocking similar to triggers. i.e If a table has triggers/foreign keys we should add all tables and routines used by them to the prelocking set. This prelocking happens during 'open_and_lock_tables' call. Each table being opened is checked for foreign key references. If foreign key reference exists then the child table is opened and it is linked to the table_list. Upon any modification to parent table its corresponding child tables are retried from table_list and they are updated accordingly. This prelocking work fine on master. On slave prelocking works for following cases. - Statement/mixed based replication - In row based replication when trigger execution is enabled through 'slave_run_triggers_for_rbr=YES/LOGGING/ENFORCE' Otherwise it results in an assert/crash, as the parent table will not find the corresponding child table and it will be NULL. Dereferencing NULL pointer leads to slave server exit. Fix: === Introduce a new 'slave_fk_event_map' flag similar to 'trg_event_map'. This flag will ensure that when foreign key is enabled in row based replication all the parent and child tables are prelocked, so that parent is able to locate the child table. Note: This issue is specific to slave, hence only slave needs to be upgraded. --- mysql-test/suite/rpl/r/rpl_row_vcol_crash.result | 380 ++++++++++++++++++++ mysql-test/suite/rpl/t/rpl_row_vcol_crash.test | 425 +++++++++++++++++++++++ sql/log_event.cc | 42 +-- sql/sql_base.cc | 118 ++++--- sql/table.h | 6 +- 5 files changed, 906 insertions(+), 65 deletions(-) create mode 100644 mysql-test/suite/rpl/r/rpl_row_vcol_crash.result create mode 100644 mysql-test/suite/rpl/t/rpl_row_vcol_crash.test diff --git a/mysql-test/suite/rpl/r/rpl_row_vcol_crash.result b/mysql-test/suite/rpl/r/rpl_row_vcol_crash.result new file mode 100644 index 00000000000..f76d8935fa8 --- /dev/null +++ b/mysql-test/suite/rpl/r/rpl_row_vcol_crash.result @@ -0,0 +1,380 @@ +include/master-slave.inc +[connection master] +# +# Test case 1: KEY on a virtual column with ON DELETE CASCADE +# +CREATE TABLE t1 (id INT NOT NULL PRIMARY KEY) ENGINE=InnoDB; +INSERT INTO t1 VALUES (1),(2),(3); +CREATE TABLE t2 (id INT NOT NULL PRIMARY KEY, +t1_id INT NOT NULL, +v_col INT AS (t1_id+1) VIRTUAL, KEY (v_col), KEY (t1_id), +CONSTRAINT a FOREIGN KEY (t1_id) REFERENCES t1 (id) ON DELETE CASCADE +) ENGINE=InnoDB; +INSERT INTO t2 VALUES (90,1,NULL); +INSERT INTO t2 VALUES (91,2,default); +DELETE FROM t1 WHERE id=1; +connection slave; +# +# Verify data consistency on slave +# +include/diff_tables.inc [master:test.t1, slave:test.t1] +include/diff_tables.inc [master:test.t2, slave:test.t2] +connection master; +DROP TABLE t2,t1; +connection slave; +# +# Test case 2: Verify "ON DELETE CASCADE" for parent->child->child scenario +# Parent table: users +# Child tables: matchmaking_groups, matchmaking_group_users +# Parent table: matchmaking_groups +# Child tables: matchmaking_group_users, matchmaking_group_maps +# +# Deleting a row from parent table should be reflected in +# child tables. +# matchmaking_groups->matchmaking_group_users->matchmaking_group_maps +# users->matchmaking_group_users->matchmaking_group_maps +# +connection master; +CREATE TABLE users (id INT UNSIGNED AUTO_INCREMENT PRIMARY KEY, +name VARCHAR(32) NOT NULL DEFAULT '' +) ENGINE=InnoDB DEFAULT CHARSET=utf8; +CREATE TABLE matchmaking_groups ( +id BIGINT UNSIGNED AUTO_INCREMENT PRIMARY KEY, +host_user_id INT UNSIGNED NOT NULL UNIQUE, +v_col INT AS (host_user_id+1) VIRTUAL, KEY (v_col), +CONSTRAINT FOREIGN KEY (host_user_id) REFERENCES users (id) +ON DELETE CASCADE ON UPDATE CASCADE +) ENGINE=InnoDB DEFAULT CHARSET=utf8; +CREATE TABLE matchmaking_group_users ( +matchmaking_group_id BIGINT UNSIGNED NOT NULL, +user_id INT UNSIGNED NOT NULL, +v_col1 int as (user_id+1) virtual, KEY (v_col1), +PRIMARY KEY (matchmaking_group_id,user_id), +UNIQUE KEY user_id (user_id), +CONSTRAINT FOREIGN KEY (matchmaking_group_id) +REFERENCES matchmaking_groups (id) ON DELETE CASCADE ON UPDATE CASCADE, +CONSTRAINT FOREIGN KEY (user_id) +REFERENCES users (id) ON DELETE CASCADE ON UPDATE CASCADE +) ENGINE=InnoDB DEFAULT CHARSET=utf8; +CREATE TABLE matchmaking_group_maps ( +matchmaking_group_id BIGINT UNSIGNED NOT NULL, +map_id TINYINT UNSIGNED NOT NULL, +v_col2 INT AS (map_id+1) VIRTUAL, KEY (v_col2), +PRIMARY KEY (matchmaking_group_id,map_id), +CONSTRAINT FOREIGN KEY (matchmaking_group_id) +REFERENCES matchmaking_groups (id) ON DELETE CASCADE ON UPDATE CASCADE +) ENGINE=InnoDB DEFAULT CHARSET=utf8; +connection slave; +connection master; +INSERT INTO users VALUES (NULL,'foo'),(NULL,'bar'); +INSERT INTO matchmaking_groups VALUES (10,1,default),(11,2,default); +INSERT INTO matchmaking_group_users VALUES (10,1,default),(11,2,default); +INSERT INTO matchmaking_group_maps VALUES (10,55,default),(11,66,default); +DELETE FROM matchmaking_groups WHERE id = 10; +connection slave; +# +# No rows should be returned as ON DELETE CASCASE should have removed +# corresponding rows from child tables. There should not any mismatch +# of 'id' field between parent->child. +# +SELECT * FROM matchmaking_group_users WHERE matchmaking_group_id NOT IN (SELECT id FROM matchmaking_groups); +matchmaking_group_id user_id v_col1 +SELECT * FROM matchmaking_group_maps WHERE matchmaking_group_id NOT IN (SELECT id FROM matchmaking_groups); +matchmaking_group_id map_id v_col2 +# +# Rows with id=11 should be present +# +SELECT * FROM matchmaking_group_users; +matchmaking_group_id user_id v_col1 +11 2 3 +SELECT * FROM matchmaking_group_maps; +matchmaking_group_id map_id v_col2 +11 66 67 +connection master; +DELETE FROM users WHERE id = 2; +connection slave; +# +# No rows should be present in both the child tables +# +SELECT * FROM matchmaking_group_users; +matchmaking_group_id user_id v_col1 +SELECT * FROM matchmaking_group_maps; +matchmaking_group_id map_id v_col2 +connection master; +DROP TABLE matchmaking_group_maps, matchmaking_group_users, matchmaking_groups, users; +connection slave; +# +# Test case 3: KEY on a virtual column with ON UPDATE CASCADE +# +connection master; +CREATE TABLE t1 (a INT NOT NULL PRIMARY KEY, b INT NOT NULL) ENGINE=InnoDB; +INSERT INTO t1 VALUES (1, 80); +CREATE TABLE t2 (a INT KEY, b INT, +v_col int as (b+1) virtual, KEY (v_col), +CONSTRAINT b FOREIGN KEY (b) REFERENCES t1(a) ON UPDATE CASCADE +) ENGINE=InnoDB; +INSERT INTO t2 VALUES (51, 1, default); +connection slave; +connection master; +UPDATE t1 SET a = 50 WHERE a = 1; +# +# Master: Verify that ON UPDATE CASCADE works fine +# old_row: (51, 1, 2) ON UPDATE New_row: (51, 50, 51) +# +SELECT * FROM t2 WHERE b=50; +a b v_col +51 50 51 +connection slave; +# +# Slave: Verify that ON UPDATE CASCADE works fine +# old_row: (51, 1, 2) ON UPDATE New_row: (51, 50, 51) +# +SELECT * FROM t2 WHERE b=50; +a b v_col +51 50 51 +connection master; +DROP TABLE t2, t1; +connection slave; +# +# Test case 4: Define triggers on master, their results should be +# replicated as part of row events and they should be +# applied on slave with the default +# slave_run_triggers_for_rbr=NO +# +connection master; +CREATE TABLE t1 (id INT NOT NULL PRIMARY KEY) ENGINE=InnoDB; +CREATE TABLE t2 (count INT NOT NULL) ENGINE=InnoDB; +CREATE TRIGGER trg AFTER INSERT ON t1 FOR EACH ROW INSERT INTO t2 VALUES (1); +INSERT INTO t1 VALUES (2),(3); +connection slave; +SHOW GLOBAL VARIABLES LIKE 'slave_run_triggers_for_rbr'; +Variable_name Value +slave_run_triggers_for_rbr NO +# +# As two rows are inserted in table 't1', two rows should get inserted +# into table 't2' as part of trigger. +# +include/assert.inc [Table t2 should have two rows.] +connection master; +DROP TABLE t1,t2; +connection slave; +# +# Test case 5: Define triggers + Foreign Keys on master, their results +# should be replicated as part of row events and master +# and slave should be in sync. +# +connection master; +CREATE TABLE t1 (id INT NOT NULL PRIMARY KEY) ENGINE=InnoDB; +CREATE TABLE t2 (t1_id INT NOT NULL, +v_col INT AS (t1_id+1) VIRTUAL, KEY (v_col), KEY (t1_id), +CONSTRAINT a FOREIGN KEY (t1_id) REFERENCES t1 (id) ON DELETE CASCADE +) ENGINE=InnoDB; +CREATE TABLE t3 (count INT NOT NULL) ENGINE=InnoDB; +CREATE TRIGGER trg AFTER INSERT ON t1 FOR EACH ROW INSERT INTO t3 VALUES (1); +INSERT INTO t1 VALUES (2),(3); +INSERT INTO t2 VALUES (2, default), (3, default); +connection slave; +# +# As two rows are inserted in table 't1', two rows should get inserted +# into table 't3' as part of trigger. +# +include/assert.inc [Table t3 should have two rows.] +# +# Verify ON DELETE CASCASE correctness +# +connection master; +DELETE FROM t1 WHERE id=2; +connection slave; +connection master; +include/diff_tables.inc [master:test.t1, slave:test.t1] +include/diff_tables.inc [master:test.t2, slave:test.t2] +include/diff_tables.inc [master:test.t3, slave:test.t3] +DROP TABLE t3,t2,t1; +connection slave; +# +# Test case 6: Triggers are present only on slave and +# 'slave_run_triggers_for_rbr=NO' +# +connection slave; +SET @save_slave_run_triggers_for_rbr= @@GLOBAL.slave_run_triggers_for_rbr; +SET GLOBAL slave_run_triggers_for_rbr= NO;; +SHOW GLOBAL VARIABLES LIKE '%slave_run_triggers_for_rbr%'; +Variable_name Value +slave_run_triggers_for_rbr NO +connection master; +CREATE TABLE t1 (id INT NOT NULL PRIMARY KEY) ENGINE=InnoDB; +CREATE TABLE t2 (t1_id INT NOT NULL, +v_col INT AS (t1_id+1) VIRTUAL, KEY (v_col), +KEY (t1_id), CONSTRAINT a FOREIGN KEY (t1_id) REFERENCES t1 (id) ON DELETE CASCADE +) ENGINE=InnoDB; +CREATE TABLE t3 (count INT NOT NULL) ENGINE=InnoDB; +connection slave; +CREATE TRIGGER trg AFTER INSERT ON t2 FOR EACH ROW INSERT INTO t3 VALUES (1); +connection master; +INSERT INTO t1 VALUES (2),(3); +INSERT INTO t2 VALUES (2, default), (3, default); +connection slave; +# +# Count must be 0 +# +include/assert.inc [Table t3 should have zero rows.] +connection master; +DELETE FROM t1 WHERE id=2; +connection slave; +SET GLOBAL slave_run_triggers_for_rbr= @save_slave_run_triggers_for_rbr; +# +# Verify t1, t2 are consistent on slave. +# +include/diff_tables.inc [master:test.t1, slave:test.t1] +include/diff_tables.inc [master:test.t2, slave:test.t2] +connection master; +DROP TABLE t3,t2,t1; +connection slave; +# +# Test case 7: Triggers are present only on slave and +# 'slave_run_triggers_for_rbr=YES' +# +connection slave; +SET @save_slave_run_triggers_for_rbr= @@GLOBAL.slave_run_triggers_for_rbr; +SET GLOBAL slave_run_triggers_for_rbr= YES;; +SHOW GLOBAL VARIABLES LIKE '%slave_run_triggers_for_rbr%'; +Variable_name Value +slave_run_triggers_for_rbr YES +connection master; +CREATE TABLE t1 (id INT NOT NULL PRIMARY KEY) ENGINE=InnoDB; +CREATE TABLE t2 (t1_id INT NOT NULL, +v_col INT AS (t1_id+1) VIRTUAL, KEY (v_col), +KEY (t1_id), CONSTRAINT a FOREIGN KEY (t1_id) REFERENCES t1 (id) ON DELETE CASCADE +) ENGINE=InnoDB; +CREATE TABLE t3 (count INT NOT NULL) ENGINE=InnoDB; +connection slave; +CREATE TRIGGER trg AFTER INSERT ON t2 FOR EACH ROW INSERT INTO t3 VALUES (1); +connection master; +INSERT INTO t1 VALUES (2),(3); +INSERT INTO t2 VALUES (2, default), (3, default); +connection slave; +# +# Count must be 2 +# +include/assert.inc [Table t3 should have two rows.] +connection master; +DELETE FROM t1 WHERE id=2; +connection slave; +SET GLOBAL slave_run_triggers_for_rbr= @save_slave_run_triggers_for_rbr; +# +# Verify t1, t2 are consistent on slave. +# +include/diff_tables.inc [master:test.t1, slave:test.t1] +include/diff_tables.inc [master:test.t2, slave:test.t2] +connection master; +DROP TABLE t3,t2,t1; +connection slave; +# +# Test case 8: Triggers and Foreign Keys are present only on slave and +# 'slave_run_triggers_for_rbr=NO' +# +connection slave; +SET @save_slave_run_triggers_for_rbr= @@GLOBAL.slave_run_triggers_for_rbr; +SET GLOBAL slave_run_triggers_for_rbr= NO;; +SHOW GLOBAL VARIABLES LIKE '%slave_run_triggers_for_rbr%'; +Variable_name Value +slave_run_triggers_for_rbr NO +connection master; +CREATE TABLE t1 (id INT NOT NULL PRIMARY KEY) ENGINE=InnoDB; +SET sql_log_bin=0; +CREATE TABLE t2 (t1_id INT NOT NULL,v_col INT AS (t1_id+1) VIRTUAL) ENGINE=INNODB; +SET sql_log_bin=1; +CREATE TABLE t3 (count INT NOT NULL) ENGINE=InnoDB; +connection slave; +CREATE TABLE t2 (t1_id INT NOT NULL, +v_col INT AS (t1_id+1) VIRTUAL, KEY (v_col), KEY (t1_id), +CONSTRAINT a FOREIGN KEY (t1_id) REFERENCES t1 (id) ON DELETE CASCADE +) ENGINE=InnoDB; +CREATE TRIGGER trg AFTER INSERT ON t2 FOR EACH ROW INSERT INTO t3 VALUES (1); +connection master; +INSERT INTO t1 VALUES (2),(3); +INSERT INTO t2 VALUES (2, default), (3, default); +connection slave; +# +# Count must be 0 +# +include/assert.inc [Table t3 should have zero rows.] +connection master; +DELETE FROM t1 WHERE id=2; +# t1: Should have one row +SELECT * FROM t1; +id +3 +# t2: Should have two rows +SELECT * FROM t2; +t1_id v_col +2 3 +3 4 +connection slave; +# t1: Should have one row +SELECT * FROM t1; +id +3 +# t2: Should have one row on slave due to ON DELETE CASCASE +SELECT * FROM t2; +t1_id v_col +3 4 +SET GLOBAL slave_run_triggers_for_rbr= @save_slave_run_triggers_for_rbr; +connection master; +DROP TABLE t3,t2,t1; +connection slave; +# +# Test case 9: Triggers are Foreign Keys are present only on slave and +# 'slave_run_triggers_for_rbr=YES' +# +connection slave; +SET @save_slave_run_triggers_for_rbr= @@GLOBAL.slave_run_triggers_for_rbr; +SET GLOBAL slave_run_triggers_for_rbr= YES;; +SHOW GLOBAL VARIABLES LIKE '%slave_run_triggers_for_rbr%'; +Variable_name Value +slave_run_triggers_for_rbr YES +connection master; +CREATE TABLE t1 (id INT NOT NULL PRIMARY KEY) ENGINE=InnoDB; +SET sql_log_bin=0; +CREATE TABLE t2 (t1_id INT NOT NULL,v_col INT AS (t1_id+1) VIRTUAL) ENGINE=INNODB; +SET sql_log_bin=1; +CREATE TABLE t3 (count INT NOT NULL) ENGINE=InnoDB; +connection slave; +CREATE TABLE t2 (t1_id INT NOT NULL, +v_col INT AS (t1_id+1) VIRTUAL, KEY (v_col), KEY (t1_id), +CONSTRAINT a FOREIGN KEY (t1_id) REFERENCES t1 (id) ON DELETE CASCADE +) ENGINE=InnoDB; +CREATE TRIGGER trg AFTER INSERT ON t2 FOR EACH ROW INSERT INTO t3 VALUES (1); +connection master; +INSERT INTO t1 VALUES (2),(3); +INSERT INTO t2 VALUES (2, default), (3, default); +connection slave; +# +# Count must be 2 +# +include/assert.inc [Table t3 should have two rows.] +connection master; +DELETE FROM t1 WHERE id=2; +# t1: Should have one row +SELECT * FROM t1; +id +3 +# t2: Should have two rows +SELECT * FROM t2; +t1_id v_col +2 3 +3 4 +connection slave; +# t1: Should have one row +SELECT * FROM t1; +id +3 +# t2: Should have one row on slave due to ON DELETE CASCASE +SELECT * FROM t2; +t1_id v_col +3 4 +SET GLOBAL slave_run_triggers_for_rbr= @save_slave_run_triggers_for_rbr; +connection master; +DROP TABLE t3,t2,t1; +connection slave; +include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_row_vcol_crash.test b/mysql-test/suite/rpl/t/rpl_row_vcol_crash.test new file mode 100644 index 00000000000..84ee14977f3 --- /dev/null +++ b/mysql-test/suite/rpl/t/rpl_row_vcol_crash.test @@ -0,0 +1,425 @@ +# ==== Purpose ==== +# +# Test verifies that, slave doesn't report any assert on UPDATE or DELETE of +# row which tries to update the virtual columns with associated KEYs. +# +# Test scenarios are listed below. +# 1) KEY on a virtual column with ON DELETE CASCADE +# 2) Verify "ON DELETE CASCADE" for parent->child->child scenario +# 3) KEY on a virtual column with ON UPDATE CASCADE +# 4) Define triggers on master, their results should be replicated +# as part of row events and they should be applied on slave with +# the default slave_run_triggers_for_rbr=NO +# 5) Define triggers + Foreign Keys on master, their results should be +# replicated as part of row events and master and slave should be in sync. +# 6) Triggers are present only on slave and 'slave_run_triggers_for_rbr=NO' +# 7) Triggers are present only on slave and 'slave_run_triggers_for_rbr=YES' +# 8) Triggers and Foreign Keys are present only on slave and +# 'slave_run_triggers_for_rbr=NO' +# 9) Triggers are Foreign Keys are present only on slave and +# 'slave_run_triggers_for_rbr=YES' +# +# ==== References ==== +# +# MDEV-23033: All slaves crash once in ~24 hours and loop restart with signal 11 +# + +--source include/have_binlog_format_row.inc +--source include/have_innodb.inc +--source include/master-slave.inc + + +--echo # +--echo # Test case 1: KEY on a virtual column with ON DELETE CASCADE +--echo # +CREATE TABLE t1 (id INT NOT NULL PRIMARY KEY) ENGINE=InnoDB; +INSERT INTO t1 VALUES (1),(2),(3); + +CREATE TABLE t2 (id INT NOT NULL PRIMARY KEY, + t1_id INT NOT NULL, + v_col INT AS (t1_id+1) VIRTUAL, KEY (v_col), KEY (t1_id), + CONSTRAINT a FOREIGN KEY (t1_id) REFERENCES t1 (id) ON DELETE CASCADE +) ENGINE=InnoDB; + +INSERT INTO t2 VALUES (90,1,NULL); +INSERT INTO t2 VALUES (91,2,default); + +# Following query results in an assert on slave +DELETE FROM t1 WHERE id=1; +--sync_slave_with_master + +--echo # +--echo # Verify data consistency on slave +--echo # +--let $diff_tables= master:test.t1, slave:test.t1 +--source include/diff_tables.inc +--let $diff_tables= master:test.t2, slave:test.t2 +--source include/diff_tables.inc + +--connection master +DROP TABLE t2,t1; +--sync_slave_with_master + +--echo # +--echo # Test case 2: Verify "ON DELETE CASCADE" for parent->child->child scenario +--echo # Parent table: users +--echo # Child tables: matchmaking_groups, matchmaking_group_users +--echo # Parent table: matchmaking_groups +--echo # Child tables: matchmaking_group_users, matchmaking_group_maps +--echo # +--echo # Deleting a row from parent table should be reflected in +--echo # child tables. +--echo # matchmaking_groups->matchmaking_group_users->matchmaking_group_maps +--echo # users->matchmaking_group_users->matchmaking_group_maps +--echo # + +--connection master +CREATE TABLE users (id INT UNSIGNED AUTO_INCREMENT PRIMARY KEY, + name VARCHAR(32) NOT NULL DEFAULT '' +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE matchmaking_groups ( + id BIGINT UNSIGNED AUTO_INCREMENT PRIMARY KEY, + host_user_id INT UNSIGNED NOT NULL UNIQUE, + v_col INT AS (host_user_id+1) VIRTUAL, KEY (v_col), + CONSTRAINT FOREIGN KEY (host_user_id) REFERENCES users (id) + ON DELETE CASCADE ON UPDATE CASCADE +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE matchmaking_group_users ( + matchmaking_group_id BIGINT UNSIGNED NOT NULL, + user_id INT UNSIGNED NOT NULL, + v_col1 int as (user_id+1) virtual, KEY (v_col1), + PRIMARY KEY (matchmaking_group_id,user_id), + UNIQUE KEY user_id (user_id), + CONSTRAINT FOREIGN KEY (matchmaking_group_id) + REFERENCES matchmaking_groups (id) ON DELETE CASCADE ON UPDATE CASCADE, + CONSTRAINT FOREIGN KEY (user_id) + REFERENCES users (id) ON DELETE CASCADE ON UPDATE CASCADE +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE matchmaking_group_maps ( + matchmaking_group_id BIGINT UNSIGNED NOT NULL, + map_id TINYINT UNSIGNED NOT NULL, + v_col2 INT AS (map_id+1) VIRTUAL, KEY (v_col2), + PRIMARY KEY (matchmaking_group_id,map_id), + CONSTRAINT FOREIGN KEY (matchmaking_group_id) + REFERENCES matchmaking_groups (id) ON DELETE CASCADE ON UPDATE CASCADE +) ENGINE=InnoDB DEFAULT CHARSET=utf8; +--sync_slave_with_master + +--connection master +INSERT INTO users VALUES (NULL,'foo'),(NULL,'bar'); +INSERT INTO matchmaking_groups VALUES (10,1,default),(11,2,default); +INSERT INTO matchmaking_group_users VALUES (10,1,default),(11,2,default); +INSERT INTO matchmaking_group_maps VALUES (10,55,default),(11,66,default); + +DELETE FROM matchmaking_groups WHERE id = 10; +--sync_slave_with_master + +--echo # +--echo # No rows should be returned as ON DELETE CASCASE should have removed +--echo # corresponding rows from child tables. There should not any mismatch +--echo # of 'id' field between parent->child. +--echo # +SELECT * FROM matchmaking_group_users WHERE matchmaking_group_id NOT IN (SELECT id FROM matchmaking_groups); +SELECT * FROM matchmaking_group_maps WHERE matchmaking_group_id NOT IN (SELECT id FROM matchmaking_groups); + +--echo # +--echo # Rows with id=11 should be present +--echo # +SELECT * FROM matchmaking_group_users; +SELECT * FROM matchmaking_group_maps; + +--connection master +DELETE FROM users WHERE id = 2; +--sync_slave_with_master + +--echo # +--echo # No rows should be present in both the child tables +--echo # +SELECT * FROM matchmaking_group_users; +SELECT * FROM matchmaking_group_maps; + +--connection master +DROP TABLE matchmaking_group_maps, matchmaking_group_users, matchmaking_groups, users; +--sync_slave_with_master + +--echo # +--echo # Test case 3: KEY on a virtual column with ON UPDATE CASCADE +--echo # + +--connection master +CREATE TABLE t1 (a INT NOT NULL PRIMARY KEY, b INT NOT NULL) ENGINE=InnoDB; +INSERT INTO t1 VALUES (1, 80); + +CREATE TABLE t2 (a INT KEY, b INT, + v_col int as (b+1) virtual, KEY (v_col), + CONSTRAINT b FOREIGN KEY (b) REFERENCES t1(a) ON UPDATE CASCADE +) ENGINE=InnoDB; +INSERT INTO t2 VALUES (51, 1, default); +--sync_slave_with_master + +--connection master +UPDATE t1 SET a = 50 WHERE a = 1; + +--echo # +--echo # Master: Verify that ON UPDATE CASCADE works fine +--echo # old_row: (51, 1, 2) ON UPDATE New_row: (51, 50, 51) +--echo # +SELECT * FROM t2 WHERE b=50; +--sync_slave_with_master + +--echo # +--echo # Slave: Verify that ON UPDATE CASCADE works fine +--echo # old_row: (51, 1, 2) ON UPDATE New_row: (51, 50, 51) +--echo # +SELECT * FROM t2 WHERE b=50; + +--connection master +DROP TABLE t2, t1; +--sync_slave_with_master + +--echo # +--echo # Test case 4: Define triggers on master, their results should be +--echo # replicated as part of row events and they should be +--echo # applied on slave with the default +--echo # slave_run_triggers_for_rbr=NO +--echo # + +# In row-based replication, the binary log contains row changes. It will have +# both the changes made by the statement itself, and the changes made by the +# triggers that were invoked by the statement. Slave server(s) do not need to +# run triggers for row changes they are applying. Hence verify that this +# property remains the same and data should be available as if trigger was +# executed. Please note by default slave_run_triggers_for_rbr=NO. + +--connection master +CREATE TABLE t1 (id INT NOT NULL PRIMARY KEY) ENGINE=InnoDB; +CREATE TABLE t2 (count INT NOT NULL) ENGINE=InnoDB; +CREATE TRIGGER trg AFTER INSERT ON t1 FOR EACH ROW INSERT INTO t2 VALUES (1); +INSERT INTO t1 VALUES (2),(3); +--sync_slave_with_master + +SHOW GLOBAL VARIABLES LIKE 'slave_run_triggers_for_rbr'; +--echo # +--echo # As two rows are inserted in table 't1', two rows should get inserted +--echo # into table 't2' as part of trigger. +--echo # +--let $assert_cond= COUNT(*) = 2 FROM t2 +--let $assert_text= Table t2 should have two rows. +--source include/assert.inc + +--connection master +DROP TABLE t1,t2; +--sync_slave_with_master + +--echo # +--echo # Test case 5: Define triggers + Foreign Keys on master, their results +--echo # should be replicated as part of row events and master +--echo # and slave should be in sync. +--echo # +--connection master +CREATE TABLE t1 (id INT NOT NULL PRIMARY KEY) ENGINE=InnoDB; +CREATE TABLE t2 (t1_id INT NOT NULL, + v_col INT AS (t1_id+1) VIRTUAL, KEY (v_col), KEY (t1_id), + CONSTRAINT a FOREIGN KEY (t1_id) REFERENCES t1 (id) ON DELETE CASCADE +) ENGINE=InnoDB; +CREATE TABLE t3 (count INT NOT NULL) ENGINE=InnoDB; +CREATE TRIGGER trg AFTER INSERT ON t1 FOR EACH ROW INSERT INTO t3 VALUES (1); + +INSERT INTO t1 VALUES (2),(3); +INSERT INTO t2 VALUES (2, default), (3, default); +--sync_slave_with_master + +--echo # +--echo # As two rows are inserted in table 't1', two rows should get inserted +--echo # into table 't3' as part of trigger. +--echo # +--let $assert_cond= COUNT(*) = 2 FROM t3 +--let $assert_text= Table t3 should have two rows. +--source include/assert.inc + +--echo # +--echo # Verify ON DELETE CASCASE correctness +--echo # +--connection master +DELETE FROM t1 WHERE id=2; +--sync_slave_with_master + +--connection master +--let $diff_tables= master:test.t1, slave:test.t1 +--source include/diff_tables.inc +--let $diff_tables= master:test.t2, slave:test.t2 +--source include/diff_tables.inc +--let $diff_tables= master:test.t3, slave:test.t3 +--source include/diff_tables.inc + +DROP TABLE t3,t2,t1; +--sync_slave_with_master + +# +# Test case: Triggers only on slave +# +--write_file $MYSQLTEST_VARDIR/tmp/trig_on_slave.inc PROCEDURE + if ($slave_run_triggers_for_rbr == '') { + --die !!!ERROR IN TEST: you must set $slave_run_triggers_for_rbr + } + +--connection slave +SET @save_slave_run_triggers_for_rbr= @@GLOBAL.slave_run_triggers_for_rbr; +--eval SET GLOBAL slave_run_triggers_for_rbr= $slave_run_triggers_for_rbr; +SHOW GLOBAL VARIABLES LIKE '%slave_run_triggers_for_rbr%'; + +--connection master +CREATE TABLE t1 (id INT NOT NULL PRIMARY KEY) ENGINE=InnoDB; +CREATE TABLE t2 (t1_id INT NOT NULL, + v_col INT AS (t1_id+1) VIRTUAL, KEY (v_col), + KEY (t1_id), CONSTRAINT a FOREIGN KEY (t1_id) REFERENCES t1 (id) ON DELETE CASCADE +) ENGINE=InnoDB; +CREATE TABLE t3 (count INT NOT NULL) ENGINE=InnoDB; +--sync_slave_with_master + +CREATE TRIGGER trg AFTER INSERT ON t2 FOR EACH ROW INSERT INTO t3 VALUES (1); + +--connection master +INSERT INTO t1 VALUES (2),(3); +INSERT INTO t2 VALUES (2, default), (3, default); +--sync_slave_with_master + +if ($slave_run_triggers_for_rbr == 'NO') { +--echo # +--echo # Count must be 0 +--echo # +--let $assert_cond= COUNT(*) = 0 FROM t3 +--let $assert_text= Table t3 should have zero rows. +--source include/assert.inc +} +if ($slave_run_triggers_for_rbr == 'YES') { +--echo # +--echo # Count must be 2 +--echo # +--let $assert_cond= COUNT(*) = 2 FROM t3 +--let $assert_text= Table t3 should have two rows. +--source include/assert.inc +} + +--connection master +DELETE FROM t1 WHERE id=2; +--sync_slave_with_master +SET GLOBAL slave_run_triggers_for_rbr= @save_slave_run_triggers_for_rbr; + +--echo # +--echo # Verify t1, t2 are consistent on slave. +--echo # +--let $diff_tables= master:test.t1, slave:test.t1 +--source include/diff_tables.inc +--let $diff_tables= master:test.t2, slave:test.t2 +--source include/diff_tables.inc + +--connection master +DROP TABLE t3,t2,t1; +--sync_slave_with_master +#END OF +PROCEDURE + +--echo # +--echo # Test case 6: Triggers are present only on slave and +--echo # 'slave_run_triggers_for_rbr=NO' +--echo # +--let $slave_run_triggers_for_rbr=NO +--source $MYSQLTEST_VARDIR/tmp/trig_on_slave.inc + +--echo # +--echo # Test case 7: Triggers are present only on slave and +--echo # 'slave_run_triggers_for_rbr=YES' +--echo # +--let $slave_run_triggers_for_rbr=YES +--source $MYSQLTEST_VARDIR/tmp/trig_on_slave.inc +--remove_file $MYSQLTEST_VARDIR/tmp/trig_on_slave.inc + +# +# Test case: Trigger and Foreign Key are present only on slave +# +--write_file $MYSQLTEST_VARDIR/tmp/trig_fk_on_slave.inc PROCEDURE + if ($slave_run_triggers_for_rbr == '') { + --die !!!ERROR IN TEST: you must set $slave_run_triggers_for_rbr + } + +--connection slave +SET @save_slave_run_triggers_for_rbr= @@GLOBAL.slave_run_triggers_for_rbr; +--eval SET GLOBAL slave_run_triggers_for_rbr= $slave_run_triggers_for_rbr; +SHOW GLOBAL VARIABLES LIKE '%slave_run_triggers_for_rbr%'; + +--connection master +CREATE TABLE t1 (id INT NOT NULL PRIMARY KEY) ENGINE=InnoDB; +SET sql_log_bin=0; +CREATE TABLE t2 (t1_id INT NOT NULL,v_col INT AS (t1_id+1) VIRTUAL) ENGINE=INNODB; +SET sql_log_bin=1; +CREATE TABLE t3 (count INT NOT NULL) ENGINE=InnoDB; +--sync_slave_with_master + +# Have foreign key and trigger on slave. +CREATE TABLE t2 (t1_id INT NOT NULL, + v_col INT AS (t1_id+1) VIRTUAL, KEY (v_col), KEY (t1_id), + CONSTRAINT a FOREIGN KEY (t1_id) REFERENCES t1 (id) ON DELETE CASCADE +) ENGINE=InnoDB; +CREATE TRIGGER trg AFTER INSERT ON t2 FOR EACH ROW INSERT INTO t3 VALUES (1); + +--connection master +INSERT INTO t1 VALUES (2),(3); +INSERT INTO t2 VALUES (2, default), (3, default); +--sync_slave_with_master + +if ($slave_run_triggers_for_rbr == 'NO') { +--echo # +--echo # Count must be 0 +--echo # +--let $assert_cond= COUNT(*) = 0 FROM t3 +--let $assert_text= Table t3 should have zero rows. +--source include/assert.inc +} +if ($slave_run_triggers_for_rbr == 'YES') { +--echo # +--echo # Count must be 2 +--echo # +--let $assert_cond= COUNT(*) = 2 FROM t3 +--let $assert_text= Table t3 should have two rows. +--source include/assert.inc +} + +--connection master +DELETE FROM t1 WHERE id=2; +--echo # t1: Should have one row +SELECT * FROM t1; +--echo # t2: Should have two rows +SELECT * FROM t2; +--sync_slave_with_master +--echo # t1: Should have one row +SELECT * FROM t1; +--echo # t2: Should have one row on slave due to ON DELETE CASCASE +SELECT * FROM t2; +SET GLOBAL slave_run_triggers_for_rbr= @save_slave_run_triggers_for_rbr; + +--connection master +DROP TABLE t3,t2,t1; +--sync_slave_with_master +#END OF +PROCEDURE + +--echo # +--echo # Test case 8: Triggers and Foreign Keys are present only on slave and +--echo # 'slave_run_triggers_for_rbr=NO' +--echo # +--let $slave_run_triggers_for_rbr=NO +--source $MYSQLTEST_VARDIR/tmp/trig_fk_on_slave.inc + +--echo # +--echo # Test case 9: Triggers are Foreign Keys are present only on slave and +--echo # 'slave_run_triggers_for_rbr=YES' +--echo # +--let $slave_run_triggers_for_rbr=YES +--source $MYSQLTEST_VARDIR/tmp/trig_fk_on_slave.inc +--remove_file $MYSQLTEST_VARDIR/tmp/trig_fk_on_slave.inc + +--source include/rpl_end.inc diff --git a/sql/log_event.cc b/sql/log_event.cc index c649e1f64fa..c32f31db1f6 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -10718,7 +10718,7 @@ int Rows_log_event::do_add_row_data(uchar *row_data, size_t length) There was the same problem with MERGE MYISAM tables and so here we try to go the same way. */ -static void restore_empty_query_table_list(LEX *lex) +inline void restore_empty_query_table_list(LEX *lex) { if (lex->first_not_own_table()) (*lex->first_not_own_table()->prev_global)= NULL; @@ -10733,6 +10733,8 @@ int Rows_log_event::do_apply_event(rpl_group_info *rgi) TABLE* table; DBUG_ENTER("Rows_log_event::do_apply_event(Relay_log_info*)"); int error= 0; + LEX *lex= thd->lex; + uint8 new_trg_event_map= get_trg_event_map(); /* If m_table_id == ~0ULL, then we have a dummy event that does not contain any data. In that case, we just remove all tables in the @@ -10823,27 +10825,29 @@ int Rows_log_event::do_apply_event(rpl_group_info *rgi) DBUG_ASSERT(!debug_sync_set_action(thd, STRING_WITH_LEN(action))); };); - if (slave_run_triggers_for_rbr) - { - LEX *lex= thd->lex; - uint8 new_trg_event_map= get_trg_event_map(); - - /* - Trigger's procedures work with global table list. So we have to add - rgi->tables_to_lock content there to get trigger's in the list. + /* + Trigger's procedures work with global table list. So we have to add + rgi->tables_to_lock content there to get trigger's in the list. - Then restore_empty_query_table_list() restore the list as it was - */ - DBUG_ASSERT(lex->query_tables == NULL); - if ((lex->query_tables= rgi->tables_to_lock)) - rgi->tables_to_lock->prev_global= &lex->query_tables; + Then restore_empty_query_table_list() restore the list as it was + */ + DBUG_ASSERT(lex->query_tables == NULL); + if ((lex->query_tables= rgi->tables_to_lock)) + rgi->tables_to_lock->prev_global= &lex->query_tables; - for (TABLE_LIST *tables= rgi->tables_to_lock; tables; - tables= tables->next_global) + for (TABLE_LIST *tables= rgi->tables_to_lock; tables; + tables= tables->next_global) + { + if (slave_run_triggers_for_rbr) { tables->trg_event_map= new_trg_event_map; lex->query_tables_last= &tables->next_global; } + else if (!WSREP_ON) + { + tables->slave_fk_event_map= new_trg_event_map; + lex->query_tables_last= &tables->next_global; + } } if (open_and_lock_tables(thd, rgi->tables_to_lock, FALSE, 0)) { @@ -11193,8 +11197,7 @@ int Rows_log_event::do_apply_event(rpl_group_info *rgi) } /* remove trigger's tables */ - if (slave_run_triggers_for_rbr) - restore_empty_query_table_list(thd->lex); + restore_empty_query_table_list(thd->lex); #if defined(WITH_WSREP) && defined(HAVE_QUERY_CACHE) if (WSREP(thd) && thd->wsrep_exec_mode == REPL_RECV) @@ -11212,8 +11215,7 @@ int Rows_log_event::do_apply_event(rpl_group_info *rgi) DBUG_RETURN(error); err: - if (slave_run_triggers_for_rbr) - restore_empty_query_table_list(thd->lex); + restore_empty_query_table_list(thd->lex); rgi->slave_close_thread_tables(thd); DBUG_RETURN(error); } diff --git a/sql/sql_base.cc b/sql/sql_base.cc index 8e57ea437b6..b8d18abb50c 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -4341,6 +4341,70 @@ bool table_already_fk_prelocked(TABLE_LIST *tl, LEX_STRING *db, return false; } +/** + Extend the table_list to include foreign tables for prelocking. + + @param[in] thd Thread context. + @param[in] prelocking_ctx Prelocking context of the statement. + @param[in] table_list Table list element for table. + @param[in] sp Routine body. + @param[out] need_prelocking Set to TRUE if method detects that prelocking + required, not changed otherwise. + + @retval FALSE Success. + @retval TRUE Failure (OOM). +*/ +inline bool +prepare_fk_prelocking_list(THD *thd, Query_tables_list *prelocking_ctx, + TABLE_LIST *table_list, bool *need_prelocking, + uint8 op) +{ + List fk_list; + List_iterator fk_list_it(fk_list); + FOREIGN_KEY_INFO *fk; + Query_arena *arena, backup; + + arena= thd->activate_stmt_arena_if_needed(&backup); + + table_list->table->file->get_parent_foreign_key_list(thd, &fk_list); + if (thd->is_error()) + { + if (arena) + thd->restore_active_arena(arena, &backup); + return TRUE; + } + + *need_prelocking= TRUE; + + while ((fk= fk_list_it++)) + { + // FK_OPTION_RESTRICT and FK_OPTION_NO_ACTION only need read access + static bool can_write[]= { true, false, true, true, false, true }; + thr_lock_type lock_type; + + if ((op & (1 << TRG_EVENT_DELETE) && can_write[fk->delete_method]) + || (op & (1 << TRG_EVENT_UPDATE) && can_write[fk->update_method])) + lock_type= TL_WRITE_ALLOW_WRITE; + else + lock_type= TL_READ; + + if (table_already_fk_prelocked(prelocking_ctx->query_tables, + fk->foreign_db, fk->foreign_table, + lock_type)) + continue; + + TABLE_LIST *tl= (TABLE_LIST *) thd->alloc(sizeof(TABLE_LIST)); + tl->init_one_table_for_prelocking(fk->foreign_db->str, fk->foreign_db->length, + fk->foreign_table->str, fk->foreign_table->length, + NULL, lock_type, false, table_list->belong_to_view, + op, &prelocking_ctx->query_tables_last); + } + if (arena) + thd->restore_active_arena(arena, &backup); + + return FALSE; +} + /** Defines how prelocking algorithm for DML statements should handle table list @@ -4381,55 +4445,21 @@ handle_table(THD *thd, Query_tables_list *prelocking_ctx, add_tables_and_routines_for_triggers(thd, prelocking_ctx, table_list)) return TRUE; } - if (table_list->table->file->referenced_by_foreign_key()) { - List fk_list; - List_iterator fk_list_it(fk_list); - FOREIGN_KEY_INFO *fk; - Query_arena *arena, backup; - - arena= thd->activate_stmt_arena_if_needed(&backup); - - table_list->table->file->get_parent_foreign_key_list(thd, &fk_list); - if (thd->is_error()) - { - if (arena) - thd->restore_active_arena(arena, &backup); - return TRUE; - } - - *need_prelocking= TRUE; - - while ((fk= fk_list_it++)) - { - // FK_OPTION_RESTRICT and FK_OPTION_NO_ACTION only need read access - static bool can_write[]= { true, false, true, true, false, true }; - uint8 op= table_list->trg_event_map; - thr_lock_type lock_type; - - if ((op & (1 << TRG_EVENT_DELETE) && can_write[fk->delete_method]) - || (op & (1 << TRG_EVENT_UPDATE) && can_write[fk->update_method])) - lock_type= TL_WRITE_ALLOW_WRITE; - else - lock_type= TL_READ; - - if (table_already_fk_prelocked(prelocking_ctx->query_tables, - fk->foreign_db, fk->foreign_table, - lock_type)) - continue; - - TABLE_LIST *tl= (TABLE_LIST *) thd->alloc(sizeof(TABLE_LIST)); - tl->init_one_table_for_prelocking(fk->foreign_db->str, fk->foreign_db->length, - fk->foreign_table->str, fk->foreign_table->length, - NULL, lock_type, false, table_list->belong_to_view, - op, &prelocking_ctx->query_tables_last); - } - if (arena) - thd->restore_active_arena(arena, &backup); + return (prepare_fk_prelocking_list(thd, prelocking_ctx, table_list, + need_prelocking, + table_list->trg_event_map)); } } + else if (table_list->slave_fk_event_map && + table_list->table->file->referenced_by_foreign_key()) + { + return (prepare_fk_prelocking_list(thd, prelocking_ctx, + table_list, need_prelocking, + table_list->slave_fk_event_map)); + } return FALSE; } diff --git a/sql/table.h b/sql/table.h index 9a864f7ce9f..57706655d9b 100644 --- a/sql/table.h +++ b/sql/table.h @@ -2277,8 +2277,12 @@ struct TABLE_LIST Indicates what triggers we need to pre-load for this TABLE_LIST when opening an associated TABLE. This is filled after the parsed tree is created. + + slave_fk_event_map is filled on the slave side with bitmaps value + representing row-based event operation to help find and prelock + possible FK constrain-related child tables. */ - uint8 trg_event_map; + uint8 trg_event_map, slave_fk_event_map; /* TRUE <=> this table is a const one and was optimized away. */ bool optimized_away; -- cgit v1.2.1 From cd529ae8ef1ac1c46396d4cba3fcdc336cffbd96 Mon Sep 17 00:00:00 2001 From: Stepan Patryshev Date: Wed, 30 Dec 2020 20:51:55 +0200 Subject: MDEV-24462: Added wait condition to make sure table t1 is replicated to node_2. --- mysql-test/suite/galera/t/galera_truncate.test | 3 +++ 1 file changed, 3 insertions(+) diff --git a/mysql-test/suite/galera/t/galera_truncate.test b/mysql-test/suite/galera/t/galera_truncate.test index 79f9bad1f1b..3c3ee56a23f 100644 --- a/mysql-test/suite/galera/t/galera_truncate.test +++ b/mysql-test/suite/galera/t/galera_truncate.test @@ -14,6 +14,9 @@ CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) Engine=InnoDB; INSERT INTO t1 VALUES (1); --connection node_2 +--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.INNODB_SYS_TABLES WHERE NAME LIKE 'test/t1'; +--source include/wait_condition.inc + TRUNCATE TABLE t1; SELECT COUNT(*) = 0 FROM t1; -- cgit v1.2.1 From 9de9e0c7817d3efb040e3d25ea305830dd36ee8e Mon Sep 17 00:00:00 2001 From: Stepan Patryshev Date: Wed, 30 Dec 2020 21:13:17 +0200 Subject: MDEV-24447: Added wait condition to make sure table t1 is replicated to node_2. --- mysql-test/suite/galera/t/galera_toi_lock_shared.test | 3 +++ 1 file changed, 3 insertions(+) diff --git a/mysql-test/suite/galera/t/galera_toi_lock_shared.test b/mysql-test/suite/galera/t/galera_toi_lock_shared.test index 6857a0e08ca..6b7feec6031 100644 --- a/mysql-test/suite/galera/t/galera_toi_lock_shared.test +++ b/mysql-test/suite/galera/t/galera_toi_lock_shared.test @@ -10,6 +10,9 @@ CREATE TABLE t1 (id INT PRIMARY KEY) ENGINE=InnoDB; INSERT INTO t1 VALUES (1); --connection node_2 +--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.INNODB_SYS_TABLES WHERE NAME LIKE 'test/t1'; +--source include/wait_condition.inc + ALTER TABLE t1 ADD COLUMN f2 INTEGER, LOCK=SHARED; --connection node_1 -- cgit v1.2.1 From 1284e6c30d51443c8747c161a1d6449460616524 Mon Sep 17 00:00:00 2001 From: Stepan Patryshev Date: Wed, 30 Dec 2020 22:42:34 +0200 Subject: MDEV-24464: Added wait condition to make sure table t1 is replicated to node_2. --- mysql-test/suite/galera/t/galera_toi_ddl_sequential.test | 3 +++ 1 file changed, 3 insertions(+) diff --git a/mysql-test/suite/galera/t/galera_toi_ddl_sequential.test b/mysql-test/suite/galera/t/galera_toi_ddl_sequential.test index 51eae7005df..89a1af845c9 100644 --- a/mysql-test/suite/galera/t/galera_toi_ddl_sequential.test +++ b/mysql-test/suite/galera/t/galera_toi_ddl_sequential.test @@ -9,6 +9,9 @@ CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB; INSERT INTO t1 VALUES (1); --connection node_2 +--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.INNODB_SYS_TABLES WHERE NAME LIKE 'test/t1'; +--source include/wait_condition.inc + ALTER TABLE t1 ADD COLUMN f2 INTEGER; INSERT INTO t1 VALUES (2, 3); -- cgit v1.2.1 From 06644f704a5af72f00083ff2472c97068f42ca2d Mon Sep 17 00:00:00 2001 From: Stepan Patryshev Date: Wed, 30 Dec 2020 22:52:13 +0200 Subject: MDEV-24465: Added wait condition to make sure table t1 is replicated to node_2. --- mysql-test/suite/galera/t/galera_rsu_error.test | 3 +++ 1 file changed, 3 insertions(+) diff --git a/mysql-test/suite/galera/t/galera_rsu_error.test b/mysql-test/suite/galera/t/galera_rsu_error.test index cad8154ac76..6de7607b6ec 100644 --- a/mysql-test/suite/galera/t/galera_rsu_error.test +++ b/mysql-test/suite/galera/t/galera_rsu_error.test @@ -9,6 +9,9 @@ CREATE TABLE t1 (f1 INTEGER) Engine=InnoDB; INSERT INTO t1 VALUES (1), (1); --connection node_2 +--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.INNODB_SYS_TABLES WHERE NAME LIKE 'test/t1'; +--source include/wait_condition.inc + SET SESSION wsrep_OSU_method = "RSU"; --error ER_DUP_ENTRY ALTER TABLE t1 ADD PRIMARY KEY (f1); -- cgit v1.2.1 From 51b7438d23c02d19efa9ecd050963d2d11db0538 Mon Sep 17 00:00:00 2001 From: Stepan Patryshev Date: Wed, 30 Dec 2020 23:32:31 +0200 Subject: MDEV-24482: Added wait condition to make sure table t1 is replicated to node_2. --- mysql-test/suite/galera/t/galera_truncate_temporary.test | 3 +++ 1 file changed, 3 insertions(+) diff --git a/mysql-test/suite/galera/t/galera_truncate_temporary.test b/mysql-test/suite/galera/t/galera_truncate_temporary.test index 3ad94eb9930..ea20911bd5d 100644 --- a/mysql-test/suite/galera/t/galera_truncate_temporary.test +++ b/mysql-test/suite/galera/t/galera_truncate_temporary.test @@ -67,6 +67,9 @@ CREATE TEMPORARY TABLE t1 (f1 INTEGER) ENGINE=InnoDB; INSERT INTO t1 VALUES (2); --connection node_2 +--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.INNODB_SYS_TABLES WHERE NAME LIKE 'test/t1'; +--source include/wait_condition.inc + TRUNCATE TABLE t1; SELECT COUNT(*) = 0 FROM t1; -- cgit v1.2.1 From 7edbd27258cd2f73887b1723e515139e654cf37e Mon Sep 17 00:00:00 2001 From: Stepan Patryshev Date: Mon, 4 Jan 2021 18:24:06 +0200 Subject: MDEV-24500: Added wait condition to make sure table t1 is replicated to node_2. --- mysql-test/suite/galera_sr/t/GCF-900.test | 3 +++ 1 file changed, 3 insertions(+) diff --git a/mysql-test/suite/galera_sr/t/GCF-900.test b/mysql-test/suite/galera_sr/t/GCF-900.test index 3f1b53630b6..b44423c5013 100644 --- a/mysql-test/suite/galera_sr/t/GCF-900.test +++ b/mysql-test/suite/galera_sr/t/GCF-900.test @@ -15,6 +15,9 @@ START TRANSACTION; INSERT INTO t1 VALUES (2, 0); --connection node_2 +--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.INNODB_SYS_TABLES WHERE NAME LIKE 'test/t1'; +--source include/wait_condition.inc + ALTER TABLE t1 DROP COLUMN f2; --connection node_1 -- cgit v1.2.1 From a81fbbc63eecd5d6e08abbd1002e18706a1194e8 Mon Sep 17 00:00:00 2001 From: Nikita Malyavin Date: Wed, 23 Dec 2020 17:00:22 +1000 Subject: handler0alter.cc: extract cache eviction and stats drop to functions --- storage/innobase/handler/handler0alter.cc | 86 ++++++++++++++++--------------- 1 file changed, 44 insertions(+), 42 deletions(-) diff --git a/storage/innobase/handler/handler0alter.cc b/storage/innobase/handler/handler0alter.cc index 008f3b7d3fc..a479a96ce61 100644 --- a/storage/innobase/handler/handler0alter.cc +++ b/storage/innobase/handler/handler0alter.cc @@ -8762,6 +8762,44 @@ innobase_page_compression_try( DBUG_RETURN(false); } +static +void +dict_stats_try_drop_table(THD *thd, const table_name_t &name, + const LEX_CSTRING &table_name) +{ + char errstr[1024]; + if (dict_stats_drop_table(name.m_name, errstr, sizeof(errstr)) != DB_SUCCESS) + { + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_ALTER_INFO, + "Deleting persistent statistics" + " for table '%s' in InnoDB failed: %s", + table_name.str, + errstr); + } +} + +/** Evict the table from cache and reopen it. Drop outdated statistics. + @param thd mariadb THD entity + @param table innodb table + @param maria_table_name user-friendly table name for errors + @return newly opened table */ +static +dict_table_t* +innobase_reload_table(THD *thd, dict_table_t *table, + const LEX_CSTRING &table_name) +{ + char *tb_name= strdup(table->name.m_name); + dict_table_close(table, true, false); + dict_table_remove_from_cache(table); + table= dict_table_open_on_name(tb_name, TRUE, TRUE, + DICT_ERR_IGNORE_FK_NOKEY); + + /* Drop outdated table stats. */ + dict_stats_try_drop_table(thd, table->name, table_name); + free(tb_name); + return table; +} + /** Commit the changes made during prepare_inplace_alter_table() and inplace_alter_table() inside the data dictionary tables, when not rebuilding the table. @@ -9862,35 +9900,12 @@ foreign_fail: } if (ctx0->num_to_drop_vcol || ctx0->num_to_add_vcol) { - /* FIXME: this workaround does not seem to work with - partitioned tables */ DBUG_ASSERT(ctx0->old_table->get_ref_count() == 1); - trx_commit_for_mysql(m_prebuilt->trx); - char tb_name[NAME_LEN * 2 + 1 + 1]; - strcpy(tb_name, m_prebuilt->table->name.m_name); - dict_table_close(m_prebuilt->table, true, false); - dict_table_remove_from_cache(m_prebuilt->table); - m_prebuilt->table = dict_table_open_on_name( - tb_name, TRUE, TRUE, DICT_ERR_IGNORE_FK_NOKEY); - - /* Drop outdated table stats. */ - char errstr[1024]; - if (dict_stats_drop_table( - m_prebuilt->table->name.m_name, - errstr, sizeof(errstr)) - != DB_SUCCESS) { - push_warning_printf( - m_user_thd, - Sql_condition::WARN_LEVEL_WARN, - ER_ALTER_INFO, - "Deleting persistent statistics" - " for table '%s' in" - " InnoDB failed: %s", - table->s->table_name.str, - errstr); - } + m_prebuilt->table = innobase_reload_table(m_user_thd, + m_prebuilt->table, + table->s->table_name); row_mysql_unlock_data_dictionary(trx); trx->free(); @@ -9950,8 +9965,6 @@ foreign_fail: old copy of the table (which was renamed to ctx->tmp_name). */ - char errstr[1024]; - DBUG_ASSERT(0 == strcmp(ctx->old_table->name.m_name, ctx->tmp_name)); @@ -9960,20 +9973,9 @@ foreign_fail: DBUG_SET("+d,innodb_report_deadlock"); ); - if (dict_stats_drop_table( - ctx->new_table->name.m_name, - errstr, sizeof(errstr)) - != DB_SUCCESS) { - push_warning_printf( - m_user_thd, - Sql_condition::WARN_LEVEL_WARN, - ER_ALTER_INFO, - "Deleting persistent statistics" - " for rebuilt table '%s' in" - " InnoDB failed: %s", - table->s->table_name.str, - errstr); - } + dict_stats_try_drop_table(m_user_thd, + ctx->new_table->name, + table->s->table_name); DBUG_EXECUTE_IF( "ib_rename_index_fail3", -- cgit v1.2.1 From f0baa8648493a6368f45c6cbf459832d5027aaff Mon Sep 17 00:00:00 2001 From: Nikita Malyavin Date: Wed, 23 Dec 2020 23:59:00 +1000 Subject: ut_ad(err != DB_DUPLICATE_KEY) in row_rename_table_for_mysql --- storage/innobase/row/row0mysql.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/storage/innobase/row/row0mysql.cc b/storage/innobase/row/row0mysql.cc index c643a907722..a1620bc3212 100644 --- a/storage/innobase/row/row0mysql.cc +++ b/storage/innobase/row/row0mysql.cc @@ -4349,6 +4349,8 @@ row_rename_table_for_mysql( "END;\n" , FALSE, trx); + ut_ad(err != DB_DUPLICATE_KEY); + /* SYS_TABLESPACES and SYS_DATAFILES need to be updated if the table is in a single-table tablespace. */ if (err != DB_SUCCESS || !dict_table_is_file_per_table(table)) { -- cgit v1.2.1 From 9a645dae9e59ec398cfda33529c44002625ddc87 Mon Sep 17 00:00:00 2001 From: Nikita Malyavin Date: Mon, 21 Dec 2020 22:54:27 +1000 Subject: MDEV-23632 ALTER TABLE...ADD KEY creates corrupted index on virtual column mysql_col_offset was not updated after the new column has been added by an INSTANT ALTER TABLE -- table data dictionary had been remaining the same. When the virtual column is added or removed, table was usually evicted and then reopened, which triggered vcol info rebuild on the next open. However this also should be done when the usual column is added or removed: mariadb always stores virtual field at the end of maria record, so the shift should always happen. Fix: expand the eviction condition to the case when usual fields are added/removed Note: this should happen only in the case of !new_clustered: * When new_clustered is true, a new data dictionary is created, and vcol metadata is rebuilt in `alter_rebuild_apply_log()` * We can't do it in `new_clustered` case, because the old table is not yet subctituted correctly --- .../suite/innodb/r/innodb-virtual-columns2.result | 25 ++++++++++++++++++++++ .../suite/innodb/t/innodb-virtual-columns2.test | 20 +++++++++++++++++ storage/innobase/handler/handler0alter.cc | 5 ++++- 3 files changed, 49 insertions(+), 1 deletion(-) diff --git a/mysql-test/suite/innodb/r/innodb-virtual-columns2.result b/mysql-test/suite/innodb/r/innodb-virtual-columns2.result index 3574ba72849..99a1c610bd3 100644 --- a/mysql-test/suite/innodb/r/innodb-virtual-columns2.result +++ b/mysql-test/suite/innodb/r/innodb-virtual-columns2.result @@ -62,3 +62,28 @@ INSERT INTO t1 (i) VALUES (1),(2); SELECT * FROM t1 WHERE y BETWEEN 2012 AND 2016 FOR UPDATE; y i b vi DROP TABLE t1; +# +# MDEV-23632 ALTER TABLE...ADD KEY creates corrupted index on virtual column +# +CREATE TABLE t1(a INT PRIMARY KEY, b INT, g INT GENERATED ALWAYS AS(b)VIRTUAL) ENGINE=InnoDB; +INSERT INTO t1 VALUES (1,1,default); +ALTER TABLE t1 ADD COLUMN c INT; +ALTER TABLE t1 ADD KEY(g); +CHECK TABLE t1; +Table Op Msg_type Msg_text +test.t1 check status OK +SELECT g FROM t1 FORCE INDEX (g); +g +1 +DROP TABLE t1; +CREATE TABLE t1(a INT, b INT, g INT GENERATED ALWAYS AS(b)VIRTUAL) ENGINE=InnoDB; +INSERT INTO t1 VALUES (1,1,default); +ALTER TABLE t1 ADD COLUMN c INT PRIMARY KEY; +ALTER TABLE t1 ADD KEY(g); +CHECK TABLE t1; +Table Op Msg_type Msg_text +test.t1 check status OK +SELECT g FROM t1 FORCE INDEX (g); +g +1 +DROP TABLE t1; diff --git a/mysql-test/suite/innodb/t/innodb-virtual-columns2.test b/mysql-test/suite/innodb/t/innodb-virtual-columns2.test index 474a6354576..13ecffcc896 100644 --- a/mysql-test/suite/innodb/t/innodb-virtual-columns2.test +++ b/mysql-test/suite/innodb/t/innodb-virtual-columns2.test @@ -52,3 +52,23 @@ SELECT * FROM t1 WHERE y BETWEEN 2012 AND 2016 FOR UPDATE; INSERT INTO t1 (i) VALUES (1),(2); SELECT * FROM t1 WHERE y BETWEEN 2012 AND 2016 FOR UPDATE; DROP TABLE t1; + +--echo # +--echo # MDEV-23632 ALTER TABLE...ADD KEY creates corrupted index on virtual column +--echo # + +CREATE TABLE t1(a INT PRIMARY KEY, b INT, g INT GENERATED ALWAYS AS(b)VIRTUAL) ENGINE=InnoDB; +INSERT INTO t1 VALUES (1,1,default); +ALTER TABLE t1 ADD COLUMN c INT; +ALTER TABLE t1 ADD KEY(g); +CHECK TABLE t1; +SELECT g FROM t1 FORCE INDEX (g); +DROP TABLE t1; + +CREATE TABLE t1(a INT, b INT, g INT GENERATED ALWAYS AS(b)VIRTUAL) ENGINE=InnoDB; +INSERT INTO t1 VALUES (1,1,default); +ALTER TABLE t1 ADD COLUMN c INT PRIMARY KEY; # Triggers `new_clustered` +ALTER TABLE t1 ADD KEY(g); +CHECK TABLE t1; +SELECT g FROM t1 FORCE INDEX (g); +DROP TABLE t1; diff --git a/storage/innobase/handler/handler0alter.cc b/storage/innobase/handler/handler0alter.cc index a479a96ce61..a1c11247e66 100644 --- a/storage/innobase/handler/handler0alter.cc +++ b/storage/innobase/handler/handler0alter.cc @@ -9899,7 +9899,10 @@ foreign_fail: } } - if (ctx0->num_to_drop_vcol || ctx0->num_to_add_vcol) { + if (ctx0->num_to_drop_vcol || ctx0->num_to_add_vcol + || (ctx0->new_table->n_v_cols && !new_clustered + && (ha_alter_info->alter_info->drop_list.elements + || ha_alter_info->alter_info->create_list.elements))) { DBUG_ASSERT(ctx0->old_table->get_ref_count() == 1); trx_commit_for_mysql(m_prebuilt->trx); -- cgit v1.2.1 From f319c4265f0ae18e2703adaff951a6a6c87bcace Mon Sep 17 00:00:00 2001 From: Andrei Elkin Date: Wed, 6 Jan 2021 19:55:55 +0200 Subject: MDEV-19442 add-on fixing windows build. --- plugin/server_audit/server_audit.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin/server_audit/server_audit.c b/plugin/server_audit/server_audit.c index ebb2c6c0a05..47878e2e667 100644 --- a/plugin/server_audit/server_audit.c +++ b/plugin/server_audit/server_audit.c @@ -2161,7 +2161,7 @@ void auditing(MYSQL_THD thd, unsigned int event_class, const void *ev) { const struct mysql_event_table *event = (const struct mysql_event_table *) ev; - if (do_log_user(event->user, SAFE_STRLEN(event->user), + if (do_log_user(event->user, (int) SAFE_STRLEN(event->user), cn->proxy, cn->proxy_length, 1)) { switch (event->event_subclass) -- cgit v1.2.1 From d846b55d9b899b4ecd318567e75c9713ca44ba23 Mon Sep 17 00:00:00 2001 From: Nikita Malyavin Date: Tue, 5 Mar 2019 21:12:54 +1000 Subject: MDEV-17891 Assertion failure upon attempt to replace into a full table Problem: Assertion `transactional_table || !changed || thd->transaction.stmt.modified_non_trans_table' failed due REPLACE into a versioned table. It is not specific to system versioning/pertitioning/heap, but this combination makes it much easier to reproduce. The thing is to make first ha_update_row call succeed to make info->deleted != 0. And then make REPLACE fail by any reason. In this scenario we overflow versioned partition, so next ha_update_row succeeds, but corresponding ha_write_row fails to insert history record. Fix: modified_non_trans_table is set in one missed place --- mysql-test/suite/versioning/r/partition.result | 36 +++++++++++++++++++++ mysql-test/suite/versioning/r/replace.result | 13 ++++++++ mysql-test/suite/versioning/t/partition.test | 45 ++++++++++++++++++++++++++ mysql-test/suite/versioning/t/replace.test | 18 +++++++++++ sql/sql_insert.cc | 2 ++ 5 files changed, 114 insertions(+) diff --git a/mysql-test/suite/versioning/r/partition.result b/mysql-test/suite/versioning/r/partition.result index 06aa0ded32d..b152cbd89e9 100644 --- a/mysql-test/suite/versioning/r/partition.result +++ b/mysql-test/suite/versioning/r/partition.result @@ -530,6 +530,7 @@ set timestamp=1523466002.799571; insert into t1 values (11),(12); set timestamp=1523466004.169435; delete from t1 where pk in (11, 12); +set timestamp= default; # # MDEV-18136 Server crashes in Item_func_dyncol_create::prepare_arguments # @@ -682,6 +683,41 @@ create table t1 (a int) with system versioning partition by system_time (partition p1 history, partition pn current); alter table t1 add partition (partition p2); ERROR HY000: Wrong partitioning type, expected type: `SYSTEM_TIME` +# MDEV-17891 Assertion failures in select_insert::abort_result_set and +# mysql_load upon attempt to replace into a full table +set @@max_heap_table_size= 1024*1024; +create or replace table t1 ( +pk integer auto_increment, +primary key (pk), +f varchar(45000) +) with system versioning engine=memory +partition by system_time interval 1 year (partition p1 history, +partition pn current); +# fill the table until full +insert into t1 () values (),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(); +insert into t1 (f) select f from t1; +ERROR HY000: The table 't1' is full +# leave space for exactly one record in current partition +delete from t1 where pk = 1; +# copy all data into history partition +replace into t1 select * from t1; +replace into t1 select * from t1; +ERROR HY000: The table 't1' is full +create or replace table t1 ( +pk integer auto_increment, +primary key (pk), +f varchar(45000) +) with system versioning engine=memory +partition by system_time interval 1 year (partition p1 history, +partition pn current); +insert into t1 () values (),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(); +select * from t1 into outfile 'load.data'; +load data infile 'load.data' replace into table t1; +load data infile 'load.data' replace into table t1; +ERROR HY000: The table 't1' is full +load data infile 'load.data' replace into table t1; +ERROR HY000: The table 't1' is full +set @@max_heap_table_size= 1048576; drop table t1; # # MDEV-22178 Assertion `info->alias.str' failed in partition_info::check_partition_info instead of ER_VERS_WRONG_PARTS diff --git a/mysql-test/suite/versioning/r/replace.result b/mysql-test/suite/versioning/r/replace.result index bda61f118b0..57a992cce49 100644 --- a/mysql-test/suite/versioning/r/replace.result +++ b/mysql-test/suite/versioning/r/replace.result @@ -48,3 +48,16 @@ INSERT INTO t1 () VALUES (),(),(),(),(),(); UPDATE IGNORE t1 SET f = 1; REPLACE t1 SELECT * FROM t1; DROP TABLE t1; +# MDEV-22540 ER_DUP_ENTRY upon REPLACE or Assertion failed +set timestamp=1589245268.41934; +create table t1 (a int primary key) with system versioning; +insert into t1 values (1),(2); +connect con1,localhost,root,,test; +set timestamp=1589245268.52093; +replace into t1 values (1),(2); +connection default; +replace into t1 values (1),(2); +connection con1; +replace into t1 values (1),(2); +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +drop table t1; diff --git a/mysql-test/suite/versioning/t/partition.test b/mysql-test/suite/versioning/t/partition.test index 454e4068e15..d131bcfcc21 100644 --- a/mysql-test/suite/versioning/t/partition.test +++ b/mysql-test/suite/versioning/t/partition.test @@ -482,6 +482,7 @@ set timestamp=1523466002.799571; insert into t1 values (11),(12); set timestamp=1523466004.169435; delete from t1 where pk in (11, 12); +set timestamp= default; --echo # --echo # MDEV-18136 Server crashes in Item_func_dyncol_create::prepare_arguments @@ -649,7 +650,51 @@ create table t1 (a int) with system versioning partition by system_time --error ER_PARTITION_WRONG_TYPE alter table t1 add partition (partition p2); +--echo # MDEV-17891 Assertion failures in select_insert::abort_result_set and +--echo # mysql_load upon attempt to replace into a full table + +--let $max_heap_table_size_orig= `select @@max_heap_table_size;` +set @@max_heap_table_size= 1024*1024; +create or replace table t1 ( + pk integer auto_increment, + primary key (pk), + f varchar(45000) +) with system versioning engine=memory + partition by system_time interval 1 year (partition p1 history, + partition pn current); + +--echo # fill the table until full +insert into t1 () values (),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(); +--error ER_RECORD_FILE_FULL +insert into t1 (f) select f from t1; +--echo # leave space for exactly one record in current partition +delete from t1 where pk = 1; +--echo # copy all data into history partition +replace into t1 select * from t1; +--error ER_RECORD_FILE_FULL +replace into t1 select * from t1; + +create or replace table t1 ( + pk integer auto_increment, + primary key (pk), + f varchar(45000) +) with system versioning engine=memory + partition by system_time interval 1 year (partition p1 history, + partition pn current); + +insert into t1 () values (),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(); + +select * from t1 into outfile 'load.data'; +load data infile 'load.data' replace into table t1; +--error ER_RECORD_FILE_FULL +load data infile 'load.data' replace into table t1; +--error ER_RECORD_FILE_FULL +load data infile 'load.data' replace into table t1; + # Cleanup +--let $datadir= `select @@datadir` +--remove_file $datadir/test/load.data +eval set @@max_heap_table_size= $max_heap_table_size_orig; drop table t1; --echo # diff --git a/mysql-test/suite/versioning/t/replace.test b/mysql-test/suite/versioning/t/replace.test index 392c0ffcf35..83489f4a4b9 100644 --- a/mysql-test/suite/versioning/t/replace.test +++ b/mysql-test/suite/versioning/t/replace.test @@ -59,4 +59,22 @@ UPDATE IGNORE t1 SET f = 1; REPLACE t1 SELECT * FROM t1; DROP TABLE t1; +--echo # MDEV-22540 ER_DUP_ENTRY upon REPLACE or Assertion failed +set timestamp=1589245268.41934; +create table t1 (a int primary key) with system versioning; +insert into t1 values (1),(2); + +--connect (con1,localhost,root,,test) +set timestamp=1589245268.52093; +replace into t1 values (1),(2); + +--connection default +replace into t1 values (1),(2); + +--connection con1 +--error ER_DUP_ENTRY +replace into t1 values (1),(2); + +drop table t1; + --source suite/versioning/common_finish.inc diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index 8115b3c6397..9db6acf73f8 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -1959,6 +1959,8 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info) if (likely(!error)) { info->deleted++; + if (!table->file->has_transactions()) + thd->transaction.stmt.modified_non_trans_table= TRUE; if (table->versioned(VERS_TIMESTAMP)) { store_record(table, record[2]); -- cgit v1.2.1 From 188b328335d5c2a61d21f528fad19a685f9511ef Mon Sep 17 00:00:00 2001 From: Oleksandr Byelkin Date: Wed, 6 Jan 2021 11:25:19 +0100 Subject: Urgent fix of MDEV-23446 fix: Use the same variable in both scopes (from where we have "goto error" and target of the goto) --- sql/sql_update.cc | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/sql/sql_update.cc b/sql/sql_update.cc index c6eba609967..f24706fba19 100644 --- a/sql/sql_update.cc +++ b/sql/sql_update.cc @@ -2347,6 +2347,7 @@ int multi_update::send_data(List ¬_used_values) { TABLE_LIST *cur_table; DBUG_ENTER("multi_update::send_data"); + int error= 0; for (cur_table= update_tables; cur_table; cur_table= cur_table->next_local) { @@ -2393,7 +2394,6 @@ int multi_update::send_data(List ¬_used_values) found++; if (!can_compare_record || compare_record(table)) { - int error; if ((error= cur_table->view_check_option(thd, ignore)) != VIEW_CHECK_OK) @@ -2476,7 +2476,6 @@ error: } else { - int error; TABLE *tmp_table= tmp_tables[offset]; if (copy_funcs(tmp_table_param[offset].items_to_copy, thd)) DBUG_RETURN(1); -- cgit v1.2.1 From 033f8d13ce6bc2a8003ada5bb51b28b47cd64949 Mon Sep 17 00:00:00 2001 From: Alexey Yurchenko Date: Wed, 30 Dec 2020 23:51:29 +0200 Subject: Update wsrep-lib (new logger interface) Ensure consistent use of logging macros in wsrep-related code MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reviewed-by: Jan Lindström --- sql/wsrep_mysqld.cc | 36 +++++++++++++++++++----------------- sql/wsrep_mysqld.h | 28 ++++++++++++++-------------- sql/wsrep_server_service.cc | 11 +++++++---- wsrep-lib | 2 +- 4 files changed, 41 insertions(+), 36 deletions(-) diff --git a/sql/wsrep_mysqld.cc b/sql/wsrep_mysqld.cc index 93ea3bc539f..c0f48cca9cd 100644 --- a/sql/wsrep_mysqld.cc +++ b/sql/wsrep_mysqld.cc @@ -316,29 +316,31 @@ wsp::node_status local_status; */ Wsrep_schema *wsrep_schema= 0; -static void wsrep_log_cb(wsrep::log::level level, const char *msg) +static void wsrep_log_cb(wsrep::log::level level, + const char*, const char *msg) { /* Silence all wsrep related logging from lib and provider if wsrep is not enabled. */ - if (WSREP_ON) - { - switch (level) { - case wsrep::log::info: - sql_print_information("WSREP: %s", msg); - break; - case wsrep::log::warning: - sql_print_warning("WSREP: %s", msg); - break; - case wsrep::log::error: - sql_print_error("WSREP: %s", msg); + if (!WSREP_ON) return; + + switch (level) { + case wsrep::log::info: + WSREP_INFO("%s", msg); + break; + case wsrep::log::warning: + WSREP_WARN("%s", msg); + break; + case wsrep::log::error: + WSREP_ERROR("%s", msg); + break; + case wsrep::log::debug: + WSREP_DEBUG("%s", msg); + break; + case wsrep::log::unknown: + WSREP_UNKNOWN("%s", msg); break; - case wsrep::log::debug: - if (wsrep_debug) sql_print_information ("[Debug] WSREP: %s", msg); - default: - break; - } } } diff --git a/sql/wsrep_mysqld.h b/sql/wsrep_mysqld.h index b0050a2ebae..29b1c4cf1f4 100644 --- a/sql/wsrep_mysqld.h +++ b/sql/wsrep_mysqld.h @@ -251,30 +251,30 @@ void WSREP_LOG(void (*fun)(const char* fmt, ...), const char* fmt, ...); #define WSREP_INFO(...) WSREP_LOG(sql_print_information, ##__VA_ARGS__) #define WSREP_WARN(...) WSREP_LOG(sql_print_warning, ##__VA_ARGS__) #define WSREP_ERROR(...) WSREP_LOG(sql_print_error, ##__VA_ARGS__) +#define WSREP_UNKNOWN(fmt, ...) WSREP_ERROR("UNKNOWN: " fmt, ##__VA_ARGS__) #define WSREP_LOG_CONFLICT_THD(thd, role) \ - WSREP_LOG(sql_print_information, \ - "%s: \n " \ - " THD: %lu, mode: %s, state: %s, conflict: %s, seqno: %lld\n " \ - " SQL: %s", \ - role, \ - thd_get_thread_id(thd), \ - wsrep_thd_client_mode_str(thd), \ - wsrep_thd_client_state_str(thd), \ - wsrep_thd_transaction_state_str(thd), \ - wsrep_thd_trx_seqno(thd), \ - wsrep_thd_query(thd) \ + WSREP_INFO("%s: \n " \ + " THD: %lu, mode: %s, state: %s, conflict: %s, seqno: %lld\n " \ + " SQL: %s", \ + role, \ + thd_get_thread_id(thd), \ + wsrep_thd_client_mode_str(thd), \ + wsrep_thd_client_state_str(thd), \ + wsrep_thd_transaction_state_str(thd), \ + wsrep_thd_trx_seqno(thd), \ + wsrep_thd_query(thd) \ ); #define WSREP_LOG_CONFLICT(bf_thd, victim_thd, bf_abort) \ if (wsrep_debug || wsrep_log_conflicts) \ { \ - WSREP_LOG(sql_print_information, "cluster conflict due to %s for threads:", \ - (bf_abort) ? "high priority abort" : "certification failure" \ + WSREP_INFO("cluster conflict due to %s for threads:", \ + (bf_abort) ? "high priority abort" : "certification failure" \ ); \ if (bf_thd) WSREP_LOG_CONFLICT_THD(bf_thd, "Winning thread"); \ if (victim_thd) WSREP_LOG_CONFLICT_THD(victim_thd, "Victim thread"); \ - WSREP_LOG(sql_print_information, "context: %s:%d", __FILE__, __LINE__); \ + WSREP_INFO("context: %s:%d", __FILE__, __LINE__); \ } #define WSREP_PROVIDER_EXISTS \ diff --git a/sql/wsrep_server_service.cc b/sql/wsrep_server_service.cc index da021d4a7eb..cd432ab3eae 100644 --- a/sql/wsrep_server_service.cc +++ b/sql/wsrep_server_service.cc @@ -162,16 +162,19 @@ void Wsrep_server_service::log_message(enum wsrep::log::level level, switch (level) { case wsrep::log::debug: - sql_print_information("debug: %s", message); + WSREP_DEBUG("%s", message); break; case wsrep::log::info: - sql_print_information("%s", message); + WSREP_INFO("%s", message); break; case wsrep::log::warning: - sql_print_warning("%s", message); + WSREP_WARN("%s", message); break; case wsrep::log::error: - sql_print_error("%s", message); + WSREP_ERROR("%s", message); + break; + case wsrep::log::unknown: + WSREP_UNKNOWN("%s", message); break; } } diff --git a/wsrep-lib b/wsrep-lib index dcf3ce91cdb..515ac816f98 160000 --- a/wsrep-lib +++ b/wsrep-lib @@ -1 +1 @@ -Subproject commit dcf3ce91cdb9d254ae04ecc6a2f91f46b171280d +Subproject commit 515ac816f98329c0227d0060cc9339c889810834 -- cgit v1.2.1 From df1eefb2ad138846269d40372678af805589700a Mon Sep 17 00:00:00 2001 From: Alice Sherepa Date: Thu, 7 Jan 2021 17:53:04 +0100 Subject: MDEV-16272 rpl.rpl_semisync_ali_issues failed in buildbot, SHOW variable was done instead of waiting for the value of that variable --- mysql-test/suite/rpl/t/rpl_semisync_ali_issues.test | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/mysql-test/suite/rpl/t/rpl_semisync_ali_issues.test b/mysql-test/suite/rpl/t/rpl_semisync_ali_issues.test index 52cd9e31753..f67c6e2ac0a 100644 --- a/mysql-test/suite/rpl/t/rpl_semisync_ali_issues.test +++ b/mysql-test/suite/rpl/t/rpl_semisync_ali_issues.test @@ -31,8 +31,12 @@ echo [ enable semi-sync on slave ]; stop slave; set global rpl_semi_sync_slave_enabled = 1; start slave; +let $status_var= rpl_semi_sync_slave_status; +let $status_var_value= ON; +source include/wait_for_status_var.inc; show status like 'rpl_semi_sync_slave%'; + connection master; CREATE TABLE t1(a INT) ENGINE=InnoDB; sync_slave_with_master; @@ -190,6 +194,12 @@ connection con1; INSERT INTO t1 values (2); sync_slave_with_master; connection con1; +let $status_var= Rpl_semi_sync_master_clients; +let $status_var_value= 1; +source include/wait_for_status_var.inc; +let $status_var= Rpl_semi_sync_master_status; +let $status_var_value= ON; +source include/wait_for_status_var.inc; show status like 'Rpl_semi_sync_master_clients'; show status like 'Rpl_semi_sync_master_status'; @@ -259,7 +269,12 @@ START SLAVE IO_THREAD; --echo ######################################################### connection con1; SET GLOBAL rpl_semi_sync_master_enabled = 0; + +let $status_var= Rpl_semi_sync_master_clients; +let $status_var_value= 1; +source include/wait_for_status_var.inc; show status like 'Rpl_semi_sync_master_clients'; + INSERT INTO t1 VALUES (1); SET GLOBAL rpl_semi_sync_master_enabled = 1; INSERT INTO t1 VALUES (2); -- cgit v1.2.1 From e25623e78a3efde05e30070dc7362f8dc0d8c459 Mon Sep 17 00:00:00 2001 From: Nikita Malyavin Date: Tue, 29 Dec 2020 13:38:16 +1000 Subject: MDEV-17556 Assertion `bitmap_is_set_all(&table->s->all_set)' failed The assertion failed in handler::ha_reset upon SELECT under READ UNCOMMITTED from table with index on virtual column. This was the debug-only failure, though the problem is mush wider: * MY_BITMAP is a structure containing my_bitmap_map, the latter is a raw bitmap. * read_set, write_set and vcol_set of TABLE are the pointers to MY_BITMAP * The rest of MY_BITMAPs are stored in TABLE and TABLE_SHARE * The pointers to the stored MY_BITMAPs, like orig_read_set etc, and sometimes all_set and tmp_set, are assigned to the pointers. * Sometimes tmp_use_all_columns is used to substitute the raw bitmap directly with all_set.bitmap * Sometimes even bitmaps are directly modified, like in TABLE::update_virtual_field(): bitmap_clear_all(&tmp_set) is called. The last three bullets in the list, when used together (which is mostly always) make the program flow cumbersome and impossible to follow, notwithstanding the errors they cause, like this MDEV-17556, where tmp_set pointer was assigned to read_set, write_set and vcol_set, then its bitmap was substituted with all_set.bitmap by dbug_tmp_use_all_columns() call, and then bitmap_clear_all(&tmp_set) was applied to all this. To untangle this knot, the rule should be applied: * Never substitute bitmaps! This patch is about this. orig_*, all_set bitmaps are never substituted already. This patch changes the following function prototypes: * tmp_use_all_columns, dbug_tmp_use_all_columns to accept MY_BITMAP** and to return MY_BITMAP * instead of my_bitmap_map* * tmp_restore_column_map, dbug_tmp_restore_column_maps to accept MY_BITMAP* instead of my_bitmap_map* These functions now will substitute read_set/write_set/vcol_set directly, and won't touch underlying bitmaps. --- plugin/feedback/sender_thread.cc | 2 +- sql/field.cc | 4 +-- sql/ha_partition.cc | 6 ++-- sql/item.cc | 4 +-- sql/item_cmpfunc.cc | 6 ++-- sql/key.cc | 9 +++-- sql/opt_range.cc | 33 +++++++++-------- sql/partition_info.cc | 4 +-- sql/protocol.cc | 6 ++-- sql/sql_handler.cc | 5 ++- sql/sql_select.cc | 4 +-- sql/sql_select.h | 18 +++++----- sql/sql_show.cc | 5 ++- sql/sql_statistics.cc | 5 ++- sql/table.h | 38 ++++++++++---------- storage/archive/ha_archive.cc | 4 +-- storage/cassandra/ha_cassandra.cc | 42 +++++++++++----------- storage/connect/ha_connect.cc | 19 +++++----- storage/csv/ha_tina.cc | 9 +++-- storage/federated/ha_federated.cc | 19 +++++----- storage/federatedx/ha_federatedx.cc | 19 +++++----- storage/innobase/handler/ha_innodb.cc | 8 ++--- storage/innobase/handler/handler0alter.cc | 4 +-- storage/mroonga/lib/mrn_debug_column_access.cpp | 4 +-- storage/mroonga/lib/mrn_debug_column_access.hpp | 2 +- storage/oqgraph/ha_oqgraph.cc | 10 +++--- storage/perfschema/pfs_engine_table.cc | 23 +++++------- storage/rocksdb/ha_rocksdb.cc | 11 +++--- storage/rocksdb/rdb_datadic.cc | 6 ++-- storage/sequence/sequence.cc | 4 +-- storage/sphinx/ha_sphinx.cc | 4 +-- storage/spider/ha_spider.cc | 12 +++---- storage/spider/spd_db_conn.cc | 48 ++++++++++++------------- storage/spider/spd_db_mysql.cc | 17 +++++---- storage/tokudb/ha_tokudb.cc | 16 ++++----- 35 files changed, 206 insertions(+), 224 deletions(-) diff --git a/plugin/feedback/sender_thread.cc b/plugin/feedback/sender_thread.cc index 3210c1c9e41..55a7306e7c3 100644 --- a/plugin/feedback/sender_thread.cc +++ b/plugin/feedback/sender_thread.cc @@ -47,7 +47,7 @@ static int table_to_string(TABLE *table, String *result) res= table->file->ha_rnd_init(1); - dbug_tmp_use_all_columns(table, table->read_set); + dbug_tmp_use_all_columns(table, &table->read_set); while(!res && !table->file->ha_rnd_next(table->record[0])) { diff --git a/sql/field.cc b/sql/field.cc index 9ea1eac9db9..652228beceb 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -11166,7 +11166,7 @@ key_map Field::get_possible_keys() bool Field::validate_value_in_record_with_warn(THD *thd, const uchar *record) { - my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set); + MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, &table->read_set); bool rc; if ((rc= validate_value_in_record(thd, record))) { @@ -11178,7 +11178,7 @@ bool Field::validate_value_in_record_with_warn(THD *thd, const uchar *record) ER_THD(thd, ER_INVALID_DEFAULT_VALUE_FOR_FIELD), ErrConvString(&tmp).ptr(), field_name); } - dbug_tmp_restore_column_map(table->read_set, old_map); + dbug_tmp_restore_column_map(&table->read_set, old_map); return rc; } diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc index daac3f6a86d..523e76e511e 100644 --- a/sql/ha_partition.cc +++ b/sql/ha_partition.cc @@ -4124,7 +4124,7 @@ int ha_partition::write_row(uchar * buf) int error; longlong func_value; bool have_auto_increment= table->next_number_field && buf == table->record[0]; - my_bitmap_map *old_map; + MY_BITMAP *old_map; THD *thd= ha_thd(); sql_mode_t saved_sql_mode= thd->variables.sql_mode; bool saved_auto_inc_field_not_null= table->auto_increment_field_not_null; @@ -4173,9 +4173,9 @@ int ha_partition::write_row(uchar * buf) } } - old_map= dbug_tmp_use_all_columns(table, table->read_set); + old_map= dbug_tmp_use_all_columns(table, &table->read_set); error= m_part_info->get_partition_id(m_part_info, &part_id, &func_value); - dbug_tmp_restore_column_map(table->read_set, old_map); + dbug_tmp_restore_column_map(&table->read_set, old_map); if (unlikely(error)) { m_part_info->err_value= func_value; diff --git a/sql/item.cc b/sql/item.cc index 994d45a9dc3..a2753caf496 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -1486,7 +1486,7 @@ int Item::save_in_field_no_warnings(Field *field, bool no_conversions) TABLE *table= field->table; THD *thd= table->in_use; enum_check_fields tmp= thd->count_cuted_fields; - my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set); + MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, &table->write_set); sql_mode_t sql_mode= thd->variables.sql_mode; thd->variables.sql_mode&= ~(MODE_NO_ZERO_IN_DATE | MODE_NO_ZERO_DATE); thd->variables.sql_mode|= MODE_INVALID_DATES; @@ -1495,7 +1495,7 @@ int Item::save_in_field_no_warnings(Field *field, bool no_conversions) res= save_in_field(field, no_conversions); thd->count_cuted_fields= tmp; - dbug_tmp_restore_column_map(table->write_set, old_map); + dbug_tmp_restore_column_map(&table->write_set, old_map); thd->variables.sql_mode= sql_mode; return res; } diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc index bfd7f3dbd1b..d16c7413f0a 100644 --- a/sql/item_cmpfunc.cc +++ b/sql/item_cmpfunc.cc @@ -428,13 +428,13 @@ static bool convert_const_to_int(THD *thd, Item_field *field_item, TABLE *table= field->table; sql_mode_t orig_sql_mode= thd->variables.sql_mode; enum_check_fields orig_count_cuted_fields= thd->count_cuted_fields; - my_bitmap_map *old_maps[2] = { NULL, NULL }; + MY_BITMAP *old_maps[2] = { NULL, NULL }; ulonglong UNINIT_VAR(orig_field_val); /* original field value if valid */ /* table->read_set may not be set if we come here from a CREATE TABLE */ if (table && table->read_set) dbug_tmp_use_all_columns(table, old_maps, - table->read_set, table->write_set); + &table->read_set, &table->write_set); /* For comparison purposes allow invalid dates like 2000-01-32 */ thd->variables.sql_mode= (orig_sql_mode & ~MODE_NO_ZERO_DATE) | MODE_INVALID_DATES; @@ -478,7 +478,7 @@ static bool convert_const_to_int(THD *thd, Item_field *field_item, thd->variables.sql_mode= orig_sql_mode; thd->count_cuted_fields= orig_count_cuted_fields; if (table && table->read_set) - dbug_tmp_restore_column_maps(table->read_set, table->write_set, old_maps); + dbug_tmp_restore_column_maps(&table->read_set, &table->write_set, old_maps); } return result; } diff --git a/sql/key.cc b/sql/key.cc index 18806efc18f..689b1e9c886 100644 --- a/sql/key.cc +++ b/sql/key.cc @@ -244,14 +244,13 @@ void key_restore(uchar *to_record, const uchar *from_key, KEY *key_info, else if (key_part->key_part_flag & HA_VAR_LENGTH_PART) { Field *field= key_part->field; - my_bitmap_map *old_map; my_ptrdiff_t ptrdiff= to_record - field->table->record[0]; field->move_field_offset(ptrdiff); key_length-= HA_KEY_BLOB_LENGTH; length= MY_MIN(key_length, key_part->length); - old_map= dbug_tmp_use_all_columns(field->table, field->table->write_set); + MY_BITMAP *old_map= dbug_tmp_use_all_columns(field->table, &field->table->write_set); field->set_key_image(from_key, length); - dbug_tmp_restore_column_map(field->table->write_set, old_map); + dbug_tmp_restore_column_map(&field->table->write_set, old_map); from_key+= HA_KEY_BLOB_LENGTH; field->move_field_offset(-ptrdiff); } @@ -419,7 +418,7 @@ void field_unpack(String *to, Field *field, const uchar *rec, uint max_length, void key_unpack(String *to, TABLE *table, KEY *key) { - my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set); + MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, &table->read_set); DBUG_ENTER("key_unpack"); to->length(0); @@ -441,7 +440,7 @@ void key_unpack(String *to, TABLE *table, KEY *key) field_unpack(to, key_part->field, table->record[0], key_part->length, MY_TEST(key_part->key_part_flag & HA_PART_KEY_SEG)); } - dbug_tmp_restore_column_map(table->read_set, old_map); + dbug_tmp_restore_column_map(&table->read_set, old_map); DBUG_VOID_RETURN; } diff --git a/sql/opt_range.cc b/sql/opt_range.cc index 1b4f20bc39c..7785c768fbc 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -3248,8 +3248,7 @@ bool calculate_cond_selectivity_for_table(THD *thd, TABLE *table, Item **cond) void store_key_image_to_rec(Field *field, uchar *ptr, uint len) { - /* Do the same as print_key() does */ - my_bitmap_map *old_map; + /* Do the same as print_key() does */ if (field->real_maybe_null()) { @@ -3261,10 +3260,10 @@ void store_key_image_to_rec(Field *field, uchar *ptr, uint len) field->set_notnull(); ptr++; } - old_map= dbug_tmp_use_all_columns(field->table, - field->table->write_set); + MY_BITMAP *old_map= dbug_tmp_use_all_columns(field->table, + &field->table->write_set); field->set_key_image(ptr, len); - dbug_tmp_restore_column_map(field->table->write_set, old_map); + dbug_tmp_restore_column_map(&field->table->write_set, old_map); } #ifdef WITH_PARTITION_STORAGE_ENGINE @@ -3479,7 +3478,7 @@ bool prune_partitions(THD *thd, TABLE *table, Item *pprune_cond) PART_PRUNE_PARAM prune_param; MEM_ROOT alloc; RANGE_OPT_PARAM *range_par= &prune_param.range_param; - my_bitmap_map *old_sets[2]; + MY_BITMAP *old_sets[2]; prune_param.part_info= part_info; init_sql_alloc(&alloc, thd->variables.range_alloc_block_size, 0, @@ -3495,7 +3494,7 @@ bool prune_partitions(THD *thd, TABLE *table, Item *pprune_cond) } dbug_tmp_use_all_columns(table, old_sets, - table->read_set, table->write_set); + &table->read_set, &table->write_set); range_par->thd= thd; range_par->table= table; /* range_par->cond doesn't need initialization */ @@ -3592,7 +3591,7 @@ all_used: retval= FALSE; // some partitions are used mark_all_partitions_as_used(prune_param.part_info); end: - dbug_tmp_restore_column_maps(table->read_set, table->write_set, old_sets); + dbug_tmp_restore_column_maps(&table->read_set, &table->write_set, old_sets); thd->no_errors=0; thd->mem_root= range_par->old_root; free_root(&alloc,MYF(0)); // Return memory & allocator @@ -14816,8 +14815,8 @@ static void print_sel_arg_key(Field *field, const uchar *key, String *out) { TABLE *table= field->table; - my_bitmap_map *old_sets[2]; - dbug_tmp_use_all_columns(table, old_sets, table->read_set, table->write_set); + MY_BITMAP *old_sets[2]; + dbug_tmp_use_all_columns(table, old_sets, &table->read_set, &table->write_set); if (field->real_maybe_null()) { @@ -14837,7 +14836,7 @@ print_sel_arg_key(Field *field, const uchar *key, String *out) field->val_str(out); end: - dbug_tmp_restore_column_maps(table->read_set, table->write_set, old_sets); + dbug_tmp_restore_column_maps(&table->read_set, &table->write_set, old_sets); } @@ -14932,9 +14931,9 @@ print_key(KEY_PART *key_part, const uchar *key, uint used_length) const uchar *key_end= key+used_length; uint store_length; TABLE *table= key_part->field->table; - my_bitmap_map *old_sets[2]; + MY_BITMAP *old_sets[2]; - dbug_tmp_use_all_columns(table, old_sets, table->read_set, table->write_set); + dbug_tmp_use_all_columns(table, old_sets, &table->read_set, &table->write_set); for (; key < key_end; key+=store_length, key_part++) { @@ -14961,7 +14960,7 @@ print_key(KEY_PART *key_part, const uchar *key, uint used_length) if (key+store_length < key_end) fputc('/',DBUG_FILE); } - dbug_tmp_restore_column_maps(table->read_set, table->write_set, old_sets); + dbug_tmp_restore_column_maps(&table->read_set, &table->write_set, old_sets); } @@ -14969,16 +14968,16 @@ static void print_quick(QUICK_SELECT_I *quick, const key_map *needed_reg) { char buf[MAX_KEY/8+1]; TABLE *table; - my_bitmap_map *old_sets[2]; + MY_BITMAP *old_sets[2]; DBUG_ENTER("print_quick"); if (!quick) DBUG_VOID_RETURN; DBUG_LOCK_FILE; table= quick->head; - dbug_tmp_use_all_columns(table, old_sets, table->read_set, table->write_set); + dbug_tmp_use_all_columns(table, old_sets, &table->read_set, &table->write_set); quick->dbug_dump(0, TRUE); - dbug_tmp_restore_column_maps(table->read_set, table->write_set, old_sets); + dbug_tmp_restore_column_maps(&table->read_set, &table->write_set, old_sets); fprintf(DBUG_FILE,"other_keys: 0x%s:\n", needed_reg->print(buf)); diff --git a/sql/partition_info.cc b/sql/partition_info.cc index ef817fffe1e..503b523a66c 100644 --- a/sql/partition_info.cc +++ b/sql/partition_info.cc @@ -1642,13 +1642,13 @@ void partition_info::print_no_partition_found(TABLE *table_arg, myf errflag) buf_ptr= (char*)"from column_list"; else { - my_bitmap_map *old_map= dbug_tmp_use_all_columns(table_arg, table_arg->read_set); + MY_BITMAP *old_map= dbug_tmp_use_all_columns(table_arg, &table_arg->read_set); if (part_expr->null_value) buf_ptr= (char*)"NULL"; else longlong10_to_str(err_value, buf, part_expr->unsigned_flag ? 10 : -10); - dbug_tmp_restore_column_map(table_arg->read_set, old_map); + dbug_tmp_restore_column_map(&table_arg->read_set, old_map); } my_error(ER_NO_PARTITION_FOR_GIVEN_VALUE, errflag, buf_ptr); } diff --git a/sql/protocol.cc b/sql/protocol.cc index de6d1b96f76..1bffcfb3bdb 100644 --- a/sql/protocol.cc +++ b/sql/protocol.cc @@ -1250,15 +1250,15 @@ bool Protocol_text::store(Field *field) CHARSET_INFO *tocs= this->thd->variables.character_set_results; #ifndef DBUG_OFF TABLE *table= field->table; - my_bitmap_map *old_map= 0; + MY_BITMAP *old_map= 0; if (table->file) - old_map= dbug_tmp_use_all_columns(table, table->read_set); + old_map= dbug_tmp_use_all_columns(table, &table->read_set); #endif field->val_str(&str); #ifndef DBUG_OFF if (old_map) - dbug_tmp_restore_column_map(table->read_set, old_map); + dbug_tmp_restore_column_map(&table->read_set, old_map); #endif return store_string_aux(str.ptr(), str.length(), str.charset(), tocs); diff --git a/sql/sql_handler.cc b/sql/sql_handler.cc index a811cee998f..7c2122b6a0a 100644 --- a/sql/sql_handler.cc +++ b/sql/sql_handler.cc @@ -600,7 +600,6 @@ mysql_ha_fix_cond_and_key(SQL_HANDLER *handler, } for (keypart_map= key_len=0 ; (item=it_ke++) ; key_part++) { - my_bitmap_map *old_map; /* note that 'item' can be changed by fix_fields() call */ if ((!item->fixed && item->fix_fields(thd, it_ke.ref())) || @@ -613,9 +612,9 @@ mysql_ha_fix_cond_and_key(SQL_HANDLER *handler, } if (!in_prepare) { - old_map= dbug_tmp_use_all_columns(table, table->write_set); + MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, &table->write_set); (void) item->save_in_field(key_part->field, 1); - dbug_tmp_restore_column_map(table->write_set, old_map); + dbug_tmp_restore_column_map(&table->write_set, old_map); } key_len+= key_part->store_length; keypart_map= (keypart_map << 1) | 1; diff --git a/sql/sql_select.cc b/sql/sql_select.cc index d5e5a79eba2..5a2474cabee 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -22463,7 +22463,7 @@ cp_buffer_from_ref(THD *thd, TABLE *table, TABLE_REF *ref) { enum enum_check_fields save_count_cuted_fields= thd->count_cuted_fields; thd->count_cuted_fields= CHECK_FIELD_IGNORE; - my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set); + MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, &table->write_set); bool result= 0; for (store_key **copy=ref->key_copy ; *copy ; copy++) @@ -22475,7 +22475,7 @@ cp_buffer_from_ref(THD *thd, TABLE *table, TABLE_REF *ref) } } thd->count_cuted_fields= save_count_cuted_fields; - dbug_tmp_restore_column_map(table->write_set, old_map); + dbug_tmp_restore_column_map(&table->write_set, old_map); return result; } diff --git a/sql/sql_select.h b/sql/sql_select.h index 4584460ca3f..f41c0df5ad8 100644 --- a/sql/sql_select.h +++ b/sql/sql_select.h @@ -1842,8 +1842,8 @@ class store_key_field: public store_key enum store_key_result copy_inner() { TABLE *table= copy_field.to_field->table; - my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, - table->write_set); + MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, + &table->write_set); /* It looks like the next statement is needed only for a simplified @@ -1854,7 +1854,7 @@ class store_key_field: public store_key bzero(copy_field.to_ptr,copy_field.to_length); copy_field.do_copy(©_field); - dbug_tmp_restore_column_map(table->write_set, old_map); + dbug_tmp_restore_column_map(&table->write_set, old_map); null_key= to_field->is_null(); return err != 0 ? STORE_KEY_FATAL : STORE_KEY_OK; } @@ -1889,8 +1889,8 @@ public: enum store_key_result copy_inner() { TABLE *table= to_field->table; - my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, - table->write_set); + MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, + &table->write_set); int res= FALSE; /* @@ -1911,7 +1911,7 @@ public: */ if (!res && table->in_use->is_error()) res= 1; /* STORE_KEY_FATAL */ - dbug_tmp_restore_column_map(table->write_set, old_map); + dbug_tmp_restore_column_map(&table->write_set, old_map); null_key= to_field->is_null() || item->null_value; return ((err != 0 || res < 0 || res > 2) ? STORE_KEY_FATAL : (store_key_result) res); @@ -1947,8 +1947,8 @@ protected: { inited=1; TABLE *table= to_field->table; - my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, - table->write_set); + MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, + &table->write_set); if ((res= item->save_in_field(to_field, 1))) { if (!err) @@ -1960,7 +1960,7 @@ protected: */ if (!err && to_field->table->in_use->is_error()) err= 1; /* STORE_KEY_FATAL */ - dbug_tmp_restore_column_map(table->write_set, old_map); + dbug_tmp_restore_column_map(&table->write_set, old_map); } null_key= to_field->is_null() || item->null_value; return (err > 2 ? STORE_KEY_FATAL : (store_key_result) err); diff --git a/sql/sql_show.cc b/sql/sql_show.cc index 3babaabb37d..49659c239bc 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -1925,7 +1925,6 @@ int show_create_table(THD *thd, TABLE_LIST *table_list, String *packet, bool check_options= !(sql_mode & MODE_IGNORE_BAD_TABLE_OPTIONS) && !create_info_arg; handlerton *hton; - my_bitmap_map *old_map; int error= 0; DBUG_ENTER("show_create_table"); DBUG_PRINT("enter",("table: %s", table->s->table_name.str)); @@ -1987,7 +1986,7 @@ int show_create_table(THD *thd, TABLE_LIST *table_list, String *packet, We have to restore the read_set if we are called from insert in case of row based replication. */ - old_map= tmp_use_all_columns(table, table->read_set); + MY_BITMAP *old_map= tmp_use_all_columns(table, &table->read_set); for (ptr=table->field ; (field= *ptr); ptr++) { @@ -2352,7 +2351,7 @@ int show_create_table(THD *thd, TABLE_LIST *table_list, String *packet, } } #endif - tmp_restore_column_map(table->read_set, old_map); + tmp_restore_column_map(&table->read_set, old_map); DBUG_RETURN(error); } diff --git a/sql/sql_statistics.cc b/sql/sql_statistics.cc index d22921ba1e2..ab8edbf584b 100644 --- a/sql/sql_statistics.cc +++ b/sql/sql_statistics.cc @@ -1044,9 +1044,8 @@ public: { char buff[MAX_FIELD_WIDTH]; String val(buff, sizeof(buff), &my_charset_bin); - my_bitmap_map *old_map; - old_map= dbug_tmp_use_all_columns(stat_table, stat_table->read_set); + MY_BITMAP *old_map= dbug_tmp_use_all_columns(stat_table, &stat_table->read_set); for (uint i= COLUMN_STAT_MIN_VALUE; i <= COLUMN_STAT_HISTOGRAM; i++) { @@ -1105,7 +1104,7 @@ public: } } } - dbug_tmp_restore_column_map(stat_table->read_set, old_map); + dbug_tmp_restore_column_map(&stat_table->read_set, old_map); } diff --git a/sql/table.h b/sql/table.h index 57706655d9b..a5d412c2c08 100644 --- a/sql/table.h +++ b/sql/table.h @@ -2770,25 +2770,25 @@ typedef struct st_open_table_list{ } OPEN_TABLE_LIST; -static inline my_bitmap_map *tmp_use_all_columns(TABLE *table, - MY_BITMAP *bitmap) +static inline MY_BITMAP *tmp_use_all_columns(TABLE *table, + MY_BITMAP **bitmap) { - my_bitmap_map *old= bitmap->bitmap; - bitmap->bitmap= table->s->all_set.bitmap; + MY_BITMAP *old= *bitmap; + *bitmap= &table->s->all_set; return old; } -static inline void tmp_restore_column_map(MY_BITMAP *bitmap, - my_bitmap_map *old) +static inline void tmp_restore_column_map(MY_BITMAP **bitmap, + MY_BITMAP *old) { - bitmap->bitmap= old; + *bitmap= old; } /* The following is only needed for debugging */ -static inline my_bitmap_map *dbug_tmp_use_all_columns(TABLE *table, - MY_BITMAP *bitmap) +static inline MY_BITMAP *dbug_tmp_use_all_columns(TABLE *table, + MY_BITMAP **bitmap) { #ifndef DBUG_OFF return tmp_use_all_columns(table, bitmap); @@ -2797,8 +2797,8 @@ static inline my_bitmap_map *dbug_tmp_use_all_columns(TABLE *table, #endif } -static inline void dbug_tmp_restore_column_map(MY_BITMAP *bitmap, - my_bitmap_map *old) +static inline void dbug_tmp_restore_column_map(MY_BITMAP **bitmap, + MY_BITMAP *old) { #ifndef DBUG_OFF tmp_restore_column_map(bitmap, old); @@ -2811,22 +2811,22 @@ static inline void dbug_tmp_restore_column_map(MY_BITMAP *bitmap, Provide for the possiblity of the read set being the same as the write set */ static inline void dbug_tmp_use_all_columns(TABLE *table, - my_bitmap_map **save, - MY_BITMAP *read_set, - MY_BITMAP *write_set) + MY_BITMAP **save, + MY_BITMAP **read_set, + MY_BITMAP **write_set) { #ifndef DBUG_OFF - save[0]= read_set->bitmap; - save[1]= write_set->bitmap; + save[0]= *read_set; + save[1]= *write_set; (void) tmp_use_all_columns(table, read_set); (void) tmp_use_all_columns(table, write_set); #endif } -static inline void dbug_tmp_restore_column_maps(MY_BITMAP *read_set, - MY_BITMAP *write_set, - my_bitmap_map **old) +static inline void dbug_tmp_restore_column_maps(MY_BITMAP **read_set, + MY_BITMAP **write_set, + MY_BITMAP **old) { #ifndef DBUG_OFF tmp_restore_column_map(read_set, old[0]); diff --git a/storage/archive/ha_archive.cc b/storage/archive/ha_archive.cc index bd04295daa6..b951185e554 100644 --- a/storage/archive/ha_archive.cc +++ b/storage/archive/ha_archive.cc @@ -1546,7 +1546,7 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt) share->rows_recorded= 0; stats.auto_increment_value= 1; share->archive_write.auto_increment= 0; - my_bitmap_map *org_bitmap= tmp_use_all_columns(table, table->read_set); + MY_BITMAP *org_bitmap= tmp_use_all_columns(table, &table->read_set); while (!(rc= get_row(&archive, table->record[0]))) { @@ -1567,7 +1567,7 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt) } } - tmp_restore_column_map(table->read_set, org_bitmap); + tmp_restore_column_map(&table->read_set, org_bitmap); share->rows_recorded= (ha_rows)writer.rows; } diff --git a/storage/cassandra/ha_cassandra.cc b/storage/cassandra/ha_cassandra.cc index cf30aa6b5dc..514948baf1c 100644 --- a/storage/cassandra/ha_cassandra.cc +++ b/storage/cassandra/ha_cassandra.cc @@ -1641,18 +1641,18 @@ int ha_cassandra::index_read_map(uchar *buf, const uchar *key, char *cass_key; int cass_key_len; - my_bitmap_map *old_map; + MY_BITMAP *old_map; - old_map= dbug_tmp_use_all_columns(table, table->read_set); + old_map= dbug_tmp_use_all_columns(table, &table->read_set); if (rowkey_converter->mariadb_to_cassandra(&cass_key, &cass_key_len)) { /* We get here when making lookups like uuid_column='not-an-uuid' */ - dbug_tmp_restore_column_map(table->read_set, old_map); + dbug_tmp_restore_column_map(&table->read_set, old_map); DBUG_RETURN(HA_ERR_KEY_NOT_FOUND); } - dbug_tmp_restore_column_map(table->read_set, old_map); + dbug_tmp_restore_column_map(&table->read_set, old_map); bool found; if (se->get_slice(cass_key, cass_key_len, &found)) @@ -1726,8 +1726,8 @@ int ha_cassandra::read_cassandra_columns(bool unpack_pk) cassandra_to_mariadb() calls will use field->store(...) methods, which require that the column is in the table->write_set */ - my_bitmap_map *old_map; - old_map= dbug_tmp_use_all_columns(table, table->write_set); + MY_BITMAP *old_map; + old_map= dbug_tmp_use_all_columns(table, &table->write_set); /* Start with all fields being NULL */ for (field= table->field + 1; *field; field++) @@ -1848,7 +1848,7 @@ int ha_cassandra::read_cassandra_columns(bool unpack_pk) } err: - dbug_tmp_restore_column_map(table->write_set, old_map); + dbug_tmp_restore_column_map(&table->write_set, old_map); return res; } @@ -1933,7 +1933,7 @@ void ha_cassandra::free_dynamic_row(DYNAMIC_COLUMN_VALUE **vals, int ha_cassandra::write_row(uchar *buf) { - my_bitmap_map *old_map; + MY_BITMAP *old_map; int ires; DBUG_ENTER("ha_cassandra::write_row"); @@ -1943,7 +1943,7 @@ int ha_cassandra::write_row(uchar *buf) if (!doing_insert_batch) se->clear_insert_buffer(); - old_map= dbug_tmp_use_all_columns(table, table->read_set); + old_map= dbug_tmp_use_all_columns(table, &table->read_set); insert_lineno++; @@ -1954,7 +1954,7 @@ int ha_cassandra::write_row(uchar *buf) { my_error(ER_WARN_DATA_OUT_OF_RANGE, MYF(0), rowkey_converter->field->field_name, insert_lineno); - dbug_tmp_restore_column_map(table->read_set, old_map); + dbug_tmp_restore_column_map(&table->read_set, old_map); DBUG_RETURN(HA_ERR_INTERNAL_ERROR); } se->start_row_insert(cass_key, cass_key_len); @@ -1977,7 +1977,7 @@ int ha_cassandra::write_row(uchar *buf) free_dynamic_row(&vals, &names); if (rc) { - dbug_tmp_restore_column_map(table->read_set, old_map); + dbug_tmp_restore_column_map(&table->read_set, old_map); DBUG_RETURN(rc); } } @@ -1988,7 +1988,7 @@ int ha_cassandra::write_row(uchar *buf) { my_error(ER_WARN_DATA_OUT_OF_RANGE, MYF(0), field_converters[i]->field->field_name, insert_lineno); - dbug_tmp_restore_column_map(table->read_set, old_map); + dbug_tmp_restore_column_map(&table->read_set, old_map); DBUG_RETURN(HA_ERR_INTERNAL_ERROR); } se->add_insert_column(field_converters[i]->field->field_name, 0, @@ -1996,7 +1996,7 @@ int ha_cassandra::write_row(uchar *buf) } } - dbug_tmp_restore_column_map(table->read_set, old_map); + dbug_tmp_restore_column_map(&table->read_set, old_map); bool res; @@ -2263,8 +2263,8 @@ bool ha_cassandra::mrr_start_read() { uint key_len; - my_bitmap_map *old_map; - old_map= dbug_tmp_use_all_columns(table, table->read_set); + MY_BITMAP *old_map; + old_map= dbug_tmp_use_all_columns(table, &table->read_set); se->new_lookup_keys(); @@ -2288,7 +2288,7 @@ bool ha_cassandra::mrr_start_read() break; } - dbug_tmp_restore_column_map(table->read_set, old_map); + dbug_tmp_restore_column_map(&table->read_set, old_map); return se->multiget_slice(); } @@ -2366,7 +2366,7 @@ int ha_cassandra::update_row(const uchar *old_data, uchar *new_data) LEX_STRING *oldnames, *names; uint oldcount, count; String oldvalcol, valcol; - my_bitmap_map *old_map; + MY_BITMAP *old_map; int res; DBUG_ENTER("ha_cassandra::update_row"); /* Currently, it is guaranteed that new_data == table->record[0] */ @@ -2374,7 +2374,7 @@ int ha_cassandra::update_row(const uchar *old_data, uchar *new_data) /* For now, just rewrite the full record */ se->clear_insert_buffer(); - old_map= dbug_tmp_use_all_columns(table, table->read_set); + old_map= dbug_tmp_use_all_columns(table, &table->read_set); char *old_key; int old_key_len; @@ -2387,7 +2387,7 @@ int ha_cassandra::update_row(const uchar *old_data, uchar *new_data) { my_error(ER_WARN_DATA_OUT_OF_RANGE, MYF(0), rowkey_converter->field->field_name, insert_lineno); - dbug_tmp_restore_column_map(table->read_set, old_map); + dbug_tmp_restore_column_map(&table->read_set, old_map); DBUG_RETURN(HA_ERR_INTERNAL_ERROR); } @@ -2450,7 +2450,7 @@ int ha_cassandra::update_row(const uchar *old_data, uchar *new_data) { my_error(ER_WARN_DATA_OUT_OF_RANGE, MYF(0), field_converters[i]->field->field_name, insert_lineno); - dbug_tmp_restore_column_map(table->read_set, old_map); + dbug_tmp_restore_column_map(&table->read_set, old_map); DBUG_RETURN(HA_ERR_INTERNAL_ERROR); } se->add_insert_column(field_converters[i]->field->field_name, 0, @@ -2477,7 +2477,7 @@ int ha_cassandra::update_row(const uchar *old_data, uchar *new_data) } } - dbug_tmp_restore_column_map(table->read_set, old_map); + dbug_tmp_restore_column_map(&table->read_set, old_map); res= se->do_insert(); diff --git a/storage/connect/ha_connect.cc b/storage/connect/ha_connect.cc index 2920d86716d..c8a8c0317c7 100644 --- a/storage/connect/ha_connect.cc +++ b/storage/connect/ha_connect.cc @@ -2159,7 +2159,6 @@ int ha_connect::MakeRecord(char *buf) int rc= 0; Field* *field; Field *fp; - my_bitmap_map *org_bitmap; CHARSET_INFO *charset= tdbp->data_charset(); //MY_BITMAP readmap; MY_BITMAP *map; @@ -2174,7 +2173,7 @@ int ha_connect::MakeRecord(char *buf) *table->def_read_set.bitmap, *table->def_write_set.bitmap); // Avoid asserts in field::store() for columns that are not updated - org_bitmap= dbug_tmp_use_all_columns(table, table->write_set); + MY_BITMAP *org_bitmap= dbug_tmp_use_all_columns(table, &table->write_set); // This is for variable_length rows memset(buf, 0, table->s->null_bytes); @@ -2201,7 +2200,7 @@ int ha_connect::MakeRecord(char *buf) continue; htrc("Column %s not found\n", fp->field_name); - dbug_tmp_restore_column_map(table->write_set, org_bitmap); + dbug_tmp_restore_column_map(&table->write_set, org_bitmap); DBUG_RETURN(HA_ERR_WRONG_IN_RECORD); } // endif colp @@ -2284,7 +2283,7 @@ int ha_connect::MakeRecord(char *buf) memcpy(buf, table->record[0], table->s->stored_rec_length); // This is copied from ha_tina and is necessary to avoid asserts - dbug_tmp_restore_column_map(table->write_set, org_bitmap); + dbug_tmp_restore_column_map(&table->write_set, org_bitmap); DBUG_RETURN(rc); } // end of MakeRecord @@ -2304,7 +2303,7 @@ int ha_connect::ScanRecord(PGLOBAL g, const uchar *) //PTDBASE tp= (PTDBASE)tdbp; String attribute(attr_buffer, sizeof(attr_buffer), table->s->table_charset); - my_bitmap_map *bmap= dbug_tmp_use_all_columns(table, table->read_set); + MY_BITMAP *bmap= dbug_tmp_use_all_columns(table, &table->read_set); const CHARSET_INFO *charset= tdbp->data_charset(); String data_charset_value(data_buffer, sizeof(data_buffer), charset); @@ -2426,7 +2425,7 @@ int ha_connect::ScanRecord(PGLOBAL g, const uchar *) } // endfor field err: - dbug_tmp_restore_column_map(table->read_set, bmap); + dbug_tmp_restore_column_map(&table->read_set, bmap); return rc; } // end of ScanRecord @@ -2474,7 +2473,7 @@ bool ha_connect::MakeKeyWhere(PGLOBAL g, PSTRG qry, OPVAL vop, char q, OPVAL op; Field *fp; const key_range *ranges[2]; - my_bitmap_map *old_map; + MY_BITMAP *old_map; KEY *kfp; KEY_PART_INFO *kpart; @@ -2491,7 +2490,7 @@ bool ha_connect::MakeKeyWhere(PGLOBAL g, PSTRG qry, OPVAL vop, char q, both= ranges[0] && ranges[1]; kfp= &table->key_info[active_index]; - old_map= dbug_tmp_use_all_columns(table, table->write_set); + old_map= dbug_tmp_use_all_columns(table, &table->write_set); for (i= 0; i <= 1; i++) { if (ranges[i] == NULL) @@ -2586,11 +2585,11 @@ bool ha_connect::MakeKeyWhere(PGLOBAL g, PSTRG qry, OPVAL vop, char q, if ((oom= qry->IsTruncated())) strcpy(g->Message, "Out of memory"); - dbug_tmp_restore_column_map(table->write_set, old_map); + dbug_tmp_restore_column_map(&table->write_set, old_map); return oom; err: - dbug_tmp_restore_column_map(table->write_set, old_map); + dbug_tmp_restore_column_map(&table->write_set, old_map); return true; } // end of MakeKeyWhere diff --git a/storage/csv/ha_tina.cc b/storage/csv/ha_tina.cc index 4f192af64dd..59710ca74c2 100644 --- a/storage/csv/ha_tina.cc +++ b/storage/csv/ha_tina.cc @@ -528,7 +528,7 @@ int ha_tina::encode_quote(uchar *buf) String attribute(attribute_buffer, sizeof(attribute_buffer), &my_charset_bin); bool ietf_quotes= table_share->option_struct->ietf_quotes; - my_bitmap_map *org_bitmap= dbug_tmp_use_all_columns(table, table->read_set); + MY_BITMAP *org_bitmap= dbug_tmp_use_all_columns(table, &table->read_set); buffer.length(0); for (Field **field=table->field ; *field ; field++) @@ -606,7 +606,7 @@ int ha_tina::encode_quote(uchar *buf) //buffer.replace(buffer.length(), 0, "\n", 1); - dbug_tmp_restore_column_map(table->read_set, org_bitmap); + dbug_tmp_restore_column_map(&table->read_set, org_bitmap); return (buffer.length()); } @@ -659,7 +659,6 @@ int ha_tina::find_current_row(uchar *buf) { my_off_t end_offset, curr_offset= current_position; int eoln_len; - my_bitmap_map *org_bitmap; int error; bool read_all; bool ietf_quotes= table_share->option_struct->ietf_quotes; @@ -679,7 +678,7 @@ int ha_tina::find_current_row(uchar *buf) /* We must read all columns in case a table is opened for update */ read_all= !bitmap_is_clear_all(table->write_set); /* Avoid asserts in ::store() for columns that are not going to be updated */ - org_bitmap= dbug_tmp_use_all_columns(table, table->write_set); + MY_BITMAP *org_bitmap= dbug_tmp_use_all_columns(table, &table->write_set); error= HA_ERR_CRASHED_ON_USAGE; memset(buf, 0, table->s->null_bytes); @@ -857,7 +856,7 @@ int ha_tina::find_current_row(uchar *buf) error= 0; err: - dbug_tmp_restore_column_map(table->write_set, org_bitmap); + dbug_tmp_restore_column_map(&table->write_set, org_bitmap); DBUG_RETURN(error); } diff --git a/storage/federated/ha_federated.cc b/storage/federated/ha_federated.cc index 2e8d5b12e81..adf8c67ad36 100644 --- a/storage/federated/ha_federated.cc +++ b/storage/federated/ha_federated.cc @@ -936,7 +936,7 @@ uint ha_federated::convert_row_to_internal_format(uchar *record, { ulong *lengths; Field **field; - my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set); + MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, &table->write_set); DBUG_ENTER("ha_federated::convert_row_to_internal_format"); lengths= mysql_fetch_lengths(result); @@ -965,7 +965,7 @@ uint ha_federated::convert_row_to_internal_format(uchar *record, } (*field)->move_field_offset(-old_ptr); } - dbug_tmp_restore_column_map(table->write_set, old_map); + dbug_tmp_restore_column_map(&table->write_set, old_map); DBUG_RETURN(0); } @@ -1293,14 +1293,13 @@ bool ha_federated::create_where_from_key(String *to, char tmpbuff[FEDERATED_QUERY_BUFFER_SIZE]; String tmp(tmpbuff, sizeof(tmpbuff), system_charset_info); const key_range *ranges[2]= { start_key, end_key }; - my_bitmap_map *old_map; DBUG_ENTER("ha_federated::create_where_from_key"); tmp.length(0); if (start_key == NULL && end_key == NULL) DBUG_RETURN(1); - old_map= dbug_tmp_use_all_columns(table, table->write_set); + MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, &table->write_set); for (uint i= 0; i <= 1; i++) { bool needs_quotes; @@ -1477,7 +1476,7 @@ prepare_for_next_key_part: tmp.c_ptr_quick())); } } - dbug_tmp_restore_column_map(table->write_set, old_map); + dbug_tmp_restore_column_map(&table->write_set, old_map); if (both_not_null) if (tmp.append(STRING_WITH_LEN(") "))) @@ -1492,7 +1491,7 @@ prepare_for_next_key_part: DBUG_RETURN(0); err: - dbug_tmp_restore_column_map(table->write_set, old_map); + dbug_tmp_restore_column_map(&table->write_set, old_map); DBUG_RETURN(1); } @@ -1841,7 +1840,7 @@ int ha_federated::write_row(uchar *buf) String insert_field_value_string(insert_field_value_buffer, sizeof(insert_field_value_buffer), &my_charset_bin); - my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set); + MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, &table->read_set); DBUG_ENTER("ha_federated::write_row"); values_string.length(0); @@ -1895,7 +1894,7 @@ int ha_federated::write_row(uchar *buf) values_string.append(STRING_WITH_LEN(", ")); } } - dbug_tmp_restore_column_map(table->read_set, old_map); + dbug_tmp_restore_column_map(&table->read_set, old_map); /* if there were no fields, we don't want to add a closing paren @@ -2203,7 +2202,7 @@ int ha_federated::update_row(const uchar *old_data, uchar *new_data) else { /* otherwise = */ - my_bitmap_map *old_map= tmp_use_all_columns(table, table->read_set); + MY_BITMAP *old_map= tmp_use_all_columns(table, &table->read_set); bool needs_quote= (*field)->str_needs_quotes(); (*field)->val_str(&field_value); if (needs_quote) @@ -2212,7 +2211,7 @@ int ha_federated::update_row(const uchar *old_data, uchar *new_data) if (needs_quote) update_string.append(value_quote_char); field_value.length(0); - tmp_restore_column_map(table->read_set, old_map); + tmp_restore_column_map(&table->read_set, old_map); } update_string.append(STRING_WITH_LEN(", ")); } diff --git a/storage/federatedx/ha_federatedx.cc b/storage/federatedx/ha_federatedx.cc index 5d2ddc1fb3b..65947c730e0 100644 --- a/storage/federatedx/ha_federatedx.cc +++ b/storage/federatedx/ha_federatedx.cc @@ -859,7 +859,7 @@ uint ha_federatedx::convert_row_to_internal_format(uchar *record, ulong *lengths; Field **field; int column= 0; - my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set); + MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, &table->write_set); DBUG_ENTER("ha_federatedx::convert_row_to_internal_format"); lengths= io->fetch_lengths(result); @@ -885,7 +885,7 @@ uint ha_federatedx::convert_row_to_internal_format(uchar *record, } (*field)->move_field_offset(-old_ptr); } - dbug_tmp_restore_column_map(table->write_set, old_map); + dbug_tmp_restore_column_map(&table->write_set, old_map); DBUG_RETURN(0); } @@ -1213,14 +1213,13 @@ bool ha_federatedx::create_where_from_key(String *to, char tmpbuff[FEDERATEDX_QUERY_BUFFER_SIZE]; String tmp(tmpbuff, sizeof(tmpbuff), system_charset_info); const key_range *ranges[2]= { start_key, end_key }; - my_bitmap_map *old_map; DBUG_ENTER("ha_federatedx::create_where_from_key"); tmp.length(0); if (start_key == NULL && end_key == NULL) DBUG_RETURN(1); - old_map= dbug_tmp_use_all_columns(table, table->write_set); + MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, &table->write_set); for (uint i= 0; i <= 1; i++) { bool needs_quotes; @@ -1396,7 +1395,7 @@ prepare_for_next_key_part: tmp.c_ptr_quick())); } } - dbug_tmp_restore_column_map(table->write_set, old_map); + dbug_tmp_restore_column_map(&table->write_set, old_map); if (both_not_null) if (tmp.append(STRING_WITH_LEN(") "))) @@ -1411,7 +1410,7 @@ prepare_for_next_key_part: DBUG_RETURN(0); err: - dbug_tmp_restore_column_map(table->write_set, old_map); + dbug_tmp_restore_column_map(&table->write_set, old_map); DBUG_RETURN(1); } @@ -1978,7 +1977,7 @@ int ha_federatedx::write_row(uchar *buf) String insert_field_value_string(insert_field_value_buffer, sizeof(insert_field_value_buffer), &my_charset_bin); - my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set); + MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, &table->read_set); DBUG_ENTER("ha_federatedx::write_row"); values_string.length(0); @@ -2032,7 +2031,7 @@ int ha_federatedx::write_row(uchar *buf) values_string.append(STRING_WITH_LEN(", ")); } } - dbug_tmp_restore_column_map(table->read_set, old_map); + dbug_tmp_restore_column_map(&table->read_set, old_map); /* if there were no fields, we don't want to add a closing paren @@ -2354,7 +2353,7 @@ int ha_federatedx::update_row(const uchar *old_data, uchar *new_data) else { /* otherwise = */ - my_bitmap_map *old_map= tmp_use_all_columns(table, table->read_set); + MY_BITMAP *old_map= tmp_use_all_columns(table, &table->read_set); bool needs_quote= (*field)->str_needs_quotes(); (*field)->val_str(&field_value); if (needs_quote) @@ -2363,7 +2362,7 @@ int ha_federatedx::update_row(const uchar *old_data, uchar *new_data) if (needs_quote) update_string.append(value_quote_char); field_value.length(0); - tmp_restore_column_map(table->read_set, old_map); + tmp_restore_column_map(&table->read_set, old_map); } update_string.append(STRING_WITH_LEN(", ")); } diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index d7ccbd7f883..7cc4401d0ff 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -21994,11 +21994,11 @@ innobase_get_computed_value( field = dtuple_get_nth_v_field(row, col->v_pos); - my_bitmap_map* old_write_set = dbug_tmp_use_all_columns(mysql_table, mysql_table->write_set); - my_bitmap_map* old_read_set = dbug_tmp_use_all_columns(mysql_table, mysql_table->read_set); + MY_BITMAP *old_write_set = dbug_tmp_use_all_columns(mysql_table, &mysql_table->write_set); + MY_BITMAP *old_read_set = dbug_tmp_use_all_columns(mysql_table, &mysql_table->read_set); ret = mysql_table->update_virtual_field(mysql_table->field[col->m_col.ind]); - dbug_tmp_restore_column_map(mysql_table->read_set, old_read_set); - dbug_tmp_restore_column_map(mysql_table->write_set, old_write_set); + dbug_tmp_restore_column_map(&mysql_table->read_set, old_read_set); + dbug_tmp_restore_column_map(&mysql_table->write_set, old_write_set); if (ret != 0) { DBUG_RETURN(NULL); diff --git a/storage/innobase/handler/handler0alter.cc b/storage/innobase/handler/handler0alter.cc index f729948b2bf..0bd11d74b09 100644 --- a/storage/innobase/handler/handler0alter.cc +++ b/storage/innobase/handler/handler0alter.cc @@ -1944,9 +1944,9 @@ innobase_row_to_mysql( } } if (table->vfield) { - my_bitmap_map* old_vcol_set = tmp_use_all_columns(table, table->vcol_set); + MY_BITMAP *old_vcol_set = tmp_use_all_columns(table, &table->vcol_set); table->update_virtual_fields(table->file, VCOL_UPDATE_FOR_READ); - tmp_restore_column_map(table->vcol_set, old_vcol_set); + tmp_restore_column_map(&table->vcol_set, old_vcol_set); } } diff --git a/storage/mroonga/lib/mrn_debug_column_access.cpp b/storage/mroonga/lib/mrn_debug_column_access.cpp index 5b89baba485..ed535f150ab 100644 --- a/storage/mroonga/lib/mrn_debug_column_access.cpp +++ b/storage/mroonga/lib/mrn_debug_column_access.cpp @@ -24,13 +24,13 @@ namespace mrn { : table_(table), bitmap_(bitmap) { #ifndef DBUG_OFF - map_ = dbug_tmp_use_all_columns(table_, bitmap_); + map_ = dbug_tmp_use_all_columns(table_, &bitmap_); #endif } DebugColumnAccess::~DebugColumnAccess() { #ifndef DBUG_OFF - dbug_tmp_restore_column_map(bitmap_, map_); + dbug_tmp_restore_column_map(&bitmap_, map_); #endif } } diff --git a/storage/mroonga/lib/mrn_debug_column_access.hpp b/storage/mroonga/lib/mrn_debug_column_access.hpp index be8054efb9e..309db6e8460 100644 --- a/storage/mroonga/lib/mrn_debug_column_access.hpp +++ b/storage/mroonga/lib/mrn_debug_column_access.hpp @@ -27,7 +27,7 @@ namespace mrn { TABLE *table_; MY_BITMAP *bitmap_; #ifndef DBUG_OFF - my_bitmap_map *map_; + MY_BITMAP *map_; #endif public: DebugColumnAccess(TABLE *table, MY_BITMAP *bitmap); diff --git a/storage/oqgraph/ha_oqgraph.cc b/storage/oqgraph/ha_oqgraph.cc index 350a4c3b5ff..c75beb36297 100644 --- a/storage/oqgraph/ha_oqgraph.cc +++ b/storage/oqgraph/ha_oqgraph.cc @@ -907,7 +907,7 @@ int ha_oqgraph::index_read_idx(byte * buf, uint index, const byte * key, bmove_align(buf, table->s->default_values, table->s->reclength); key_restore(buf, (byte*) key, key_info, key_len); - my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set); + MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, &table->read_set); my_ptrdiff_t ptrdiff= buf - table->record[0]; if (ptrdiff) @@ -936,7 +936,7 @@ int ha_oqgraph::index_read_idx(byte * buf, uint index, const byte * key, field[1]->move_field_offset(-ptrdiff); field[2]->move_field_offset(-ptrdiff); } - dbug_tmp_restore_column_map(table->read_set, old_map); + dbug_tmp_restore_column_map(&table->read_set, old_map); return error_code(oqgraph::NO_MORE_DATA); } } @@ -961,7 +961,7 @@ int ha_oqgraph::index_read_idx(byte * buf, uint index, const byte * key, field[1]->move_field_offset(-ptrdiff); field[2]->move_field_offset(-ptrdiff); } - dbug_tmp_restore_column_map(table->read_set, old_map); + dbug_tmp_restore_column_map(&table->read_set, old_map); // Keep the latch around so we can use it in the query result later - // See fill_record(). @@ -994,7 +994,7 @@ int ha_oqgraph::fill_record(byte *record, const open_query::row &row) bmove_align(record, table->s->default_values, table->s->reclength); - my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set); + MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, &table->write_set); my_ptrdiff_t ptrdiff= record - table->record[0]; if (ptrdiff) @@ -1070,7 +1070,7 @@ int ha_oqgraph::fill_record(byte *record, const open_query::row &row) field[4]->move_field_offset(-ptrdiff); field[5]->move_field_offset(-ptrdiff); } - dbug_tmp_restore_column_map(table->write_set, old_map); + dbug_tmp_restore_column_map(&table->write_set, old_map); return 0; } diff --git a/storage/perfschema/pfs_engine_table.cc b/storage/perfschema/pfs_engine_table.cc index 08c0fe8ecfe..7fa217d538b 100644 --- a/storage/perfschema/pfs_engine_table.cc +++ b/storage/perfschema/pfs_engine_table.cc @@ -188,17 +188,15 @@ ha_rows PFS_engine_table_share::get_row_count(void) const int PFS_engine_table_share::write_row(TABLE *table, unsigned char *buf, Field **fields) const { - my_bitmap_map *org_bitmap; - if (m_write_row == NULL) { return HA_ERR_WRONG_COMMAND; } /* We internally read from Fields to support the write interface */ - org_bitmap= dbug_tmp_use_all_columns(table, table->read_set); + MY_BITMAP *org_bitmap= dbug_tmp_use_all_columns(table, &table->read_set); int result= m_write_row(table, buf, fields); - dbug_tmp_restore_column_map(table->read_set, org_bitmap); + dbug_tmp_restore_column_map(&table->read_set, org_bitmap); return result; } @@ -256,7 +254,6 @@ int PFS_engine_table::read_row(TABLE *table, unsigned char *buf, Field **fields) { - my_bitmap_map *org_bitmap; Field *f; Field **fields_reset; @@ -264,7 +261,7 @@ int PFS_engine_table::read_row(TABLE *table, bool read_all= !bitmap_is_clear_all(table->write_set); /* We internally write to Fields to support the read interface */ - org_bitmap= dbug_tmp_use_all_columns(table, table->write_set); + MY_BITMAP *org_bitmap= dbug_tmp_use_all_columns(table, &table->write_set); /* Some callers of the storage engine interface do not honor the @@ -276,7 +273,7 @@ int PFS_engine_table::read_row(TABLE *table, f->reset(); int result= read_row_values(table, buf, fields, read_all); - dbug_tmp_restore_column_map(table->write_set, org_bitmap); + dbug_tmp_restore_column_map(&table->write_set, org_bitmap); return result; } @@ -294,12 +291,10 @@ int PFS_engine_table::update_row(TABLE *table, unsigned char *new_buf, Field **fields) { - my_bitmap_map *org_bitmap; - /* We internally read from Fields to support the write interface */ - org_bitmap= dbug_tmp_use_all_columns(table, table->read_set); + MY_BITMAP *org_bitmap= dbug_tmp_use_all_columns(table, &table->read_set); int result= update_row_values(table, old_buf, new_buf, fields); - dbug_tmp_restore_column_map(table->read_set, org_bitmap); + dbug_tmp_restore_column_map(&table->read_set, org_bitmap); return result; } @@ -308,12 +303,10 @@ int PFS_engine_table::delete_row(TABLE *table, const unsigned char *buf, Field **fields) { - my_bitmap_map *org_bitmap; - /* We internally read from Fields to support the delete interface */ - org_bitmap= dbug_tmp_use_all_columns(table, table->read_set); + MY_BITMAP *org_bitmap= dbug_tmp_use_all_columns(table, &table->read_set); int result= delete_row_values(table, buf, fields); - dbug_tmp_restore_column_map(table->read_set, org_bitmap); + dbug_tmp_restore_column_map(&table->read_set, org_bitmap); return result; } diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc index 5cbe4c162a8..8d2080187ef 100644 --- a/storage/rocksdb/ha_rocksdb.cc +++ b/storage/rocksdb/ha_rocksdb.cc @@ -6085,8 +6085,7 @@ ulonglong ha_rocksdb::load_auto_incr_value_from_index() { Field *field = table->key_info[table->s->next_number_index].key_part[0].field; ulonglong max_val = rdb_get_int_col_max_value(field); - my_bitmap_map *const old_map = - dbug_tmp_use_all_columns(table, table->read_set); + MY_BITMAP *const old_map = dbug_tmp_use_all_columns(table, &table->read_set); last_val = field->val_int(); if (last_val != max_val) { last_val++; @@ -6101,7 +6100,7 @@ ulonglong ha_rocksdb::load_auto_incr_value_from_index() { } } #endif - dbug_tmp_restore_column_map(table->read_set, old_map); + dbug_tmp_restore_column_map(&table->read_set, old_map); } m_keyread_only = save_keyread_only; @@ -6138,15 +6137,15 @@ void ha_rocksdb::update_auto_incr_val_from_field() { field = table->key_info[table->s->next_number_index].key_part[0].field; max_val = rdb_get_int_col_max_value(field); - my_bitmap_map *const old_map = - dbug_tmp_use_all_columns(table, table->read_set); + MY_BITMAP *const old_map = + dbug_tmp_use_all_columns(table, &table->read_set); new_val = field->val_int(); // don't increment if we would wrap around if (new_val != max_val) { new_val++; } - dbug_tmp_restore_column_map(table->read_set, old_map); + dbug_tmp_restore_column_map(&table->read_set, old_map); // Only update if positive value was set for auto_incr column. if (new_val <= max_val) { diff --git a/storage/rocksdb/rdb_datadic.cc b/storage/rocksdb/rdb_datadic.cc index fee5d24eb66..31bc40b1df9 100644 --- a/storage/rocksdb/rdb_datadic.cc +++ b/storage/rocksdb/rdb_datadic.cc @@ -1488,12 +1488,12 @@ void Rdb_key_def::pack_with_make_sort_key( DBUG_ASSERT(*dst != nullptr); const int max_len = fpi->m_max_image_len; - my_bitmap_map *old_map; + MY_BITMAP*old_map; old_map= dbug_tmp_use_all_columns(field->table, - field->table->read_set); + &field->table->read_set); field->sort_string(*dst, max_len); - dbug_tmp_restore_column_map(field->table->read_set, old_map); + dbug_tmp_restore_column_map(&field->table->read_set, old_map); *dst += max_len; } diff --git a/storage/sequence/sequence.cc b/storage/sequence/sequence.cc index 948f030b479..50f4ad0eeb4 100644 --- a/storage/sequence/sequence.cc +++ b/storage/sequence/sequence.cc @@ -115,13 +115,13 @@ THR_LOCK_DATA **ha_seq::store_lock(THD *thd, THR_LOCK_DATA **to, void ha_seq::set(unsigned char *buf) { - my_bitmap_map *old_map = dbug_tmp_use_all_columns(table, table->write_set); + MY_BITMAP *old_map = dbug_tmp_use_all_columns(table, &table->write_set); my_ptrdiff_t offset = (my_ptrdiff_t) (buf - table->record[0]); Field *field = table->field[0]; field->move_field_offset(offset); field->store(cur, true); field->move_field_offset(-offset); - dbug_tmp_restore_column_map(table->write_set, old_map); + dbug_tmp_restore_column_map(&table->write_set, old_map); } int ha_seq::rnd_init(bool scan) diff --git a/storage/sphinx/ha_sphinx.cc b/storage/sphinx/ha_sphinx.cc index 67bf0744c78..3347f306cf7 100644 --- a/storage/sphinx/ha_sphinx.cc +++ b/storage/sphinx/ha_sphinx.cc @@ -3047,7 +3047,7 @@ int ha_sphinx::get_rec ( byte * buf, const byte *, uint ) } #if MYSQL_VERSION_ID>50100 - my_bitmap_map * org_bitmap = dbug_tmp_use_all_columns ( table, table->write_set ); + MY_BITMAP * org_bitmap = dbug_tmp_use_all_columns ( table, &table->write_set ); #endif Field ** field = table->field; @@ -3193,7 +3193,7 @@ int ha_sphinx::get_rec ( byte * buf, const byte *, uint ) m_iCurrentPos++; #if MYSQL_VERSION_ID > 50100 - dbug_tmp_restore_column_map ( table->write_set, org_bitmap ); + dbug_tmp_restore_column_map ( &table->write_set, org_bitmap ); #endif SPH_RET(0); diff --git a/storage/spider/ha_spider.cc b/storage/spider/ha_spider.cc index 5e7b5a1e8ce..6dbc34706da 100644 --- a/storage/spider/ha_spider.cc +++ b/storage/spider/ha_spider.cc @@ -9737,12 +9737,12 @@ int ha_spider::write_row( if (!table->auto_increment_field_not_null) { #ifndef DBUG_OFF - my_bitmap_map *tmp_map = - dbug_tmp_use_all_columns(table, table->write_set); + MY_BITMAP *tmp_map = + dbug_tmp_use_all_columns(table, &table->write_set); #endif table->next_number_field->store((longlong) 0, TRUE); #ifndef DBUG_OFF - dbug_tmp_restore_column_map(table->write_set, tmp_map); + dbug_tmp_restore_column_map(&table->write_set, tmp_map); #endif force_auto_increment = FALSE; table->file->insert_id_for_cur_row = 0; @@ -9750,13 +9750,13 @@ int ha_spider::write_row( } else if (auto_increment_mode == 2) { #ifndef DBUG_OFF - my_bitmap_map *tmp_map = - dbug_tmp_use_all_columns(table, table->write_set); + MY_BITMAP *tmp_map = + dbug_tmp_use_all_columns(table, &table->write_set); #endif table->next_number_field->store((longlong) 0, TRUE); table->auto_increment_field_not_null = FALSE; #ifndef DBUG_OFF - dbug_tmp_restore_column_map(table->write_set, tmp_map); + dbug_tmp_restore_column_map(&table->write_set, tmp_map); #endif force_auto_increment = FALSE; table->file->insert_id_for_cur_row = 0; diff --git a/storage/spider/spd_db_conn.cc b/storage/spider/spd_db_conn.cc index f77603d0d3a..8d4e9acda07 100644 --- a/storage/spider/spd_db_conn.cc +++ b/storage/spider/spd_db_conn.cc @@ -1624,7 +1624,7 @@ int spider_db_append_key_where_internal( DBUG_PRINT("info", ("spider end_key_part_map=%lu", end_key_part_map)); #ifndef DBUG_OFF - my_bitmap_map *tmp_map = dbug_tmp_use_all_columns(table, table->read_set); + MY_BITMAP *tmp_map = dbug_tmp_use_all_columns(table, &table->read_set); #endif if (sql_kind == SPIDER_SQL_KIND_HANDLER) @@ -2427,7 +2427,7 @@ end: if (sql_kind == SPIDER_SQL_KIND_SQL) dbton_hdl->set_order_pos(sql_type); #ifndef DBUG_OFF - dbug_tmp_restore_column_map(table->read_set, tmp_map); + dbug_tmp_restore_column_map(&table->read_set, tmp_map); #endif DBUG_RETURN(0); } @@ -2928,15 +2928,15 @@ int spider_db_fetch_table( bitmap_is_set(table->write_set, (*field)->field_index) )) { #ifndef DBUG_OFF - my_bitmap_map *tmp_map = - dbug_tmp_use_all_columns(table, table->write_set); + MY_BITMAP *tmp_map = + dbug_tmp_use_all_columns(table, &table->write_set); #endif DBUG_PRINT("info", ("spider bitmap is set %s", (*field)->field_name)); if ((error_num = spider_db_fetch_row(share, *field, row, ptr_diff))) DBUG_RETURN(error_num); #ifndef DBUG_OFF - dbug_tmp_restore_column_map(table->write_set, tmp_map); + dbug_tmp_restore_column_map(&table->write_set, tmp_map); #endif } row->next(); @@ -3099,15 +3099,15 @@ int spider_db_fetch_key( bitmap_is_set(table->write_set, field->field_index) )) { #ifndef DBUG_OFF - my_bitmap_map *tmp_map = - dbug_tmp_use_all_columns(table, table->write_set); + MY_BITMAP *tmp_map = + dbug_tmp_use_all_columns(table, &table->write_set); #endif DBUG_PRINT("info", ("spider bitmap is set %s", field->field_name)); if ((error_num = spider_db_fetch_row(share, field, row, ptr_diff))) DBUG_RETURN(error_num); #ifndef DBUG_OFF - dbug_tmp_restore_column_map(table->write_set, tmp_map); + dbug_tmp_restore_column_map(&table->write_set, tmp_map); #endif } row->next(); @@ -3213,14 +3213,14 @@ int spider_db_fetch_minimum_columns( bitmap_is_set(table->write_set, (*field)->field_index) )) { #ifndef DBUG_OFF - my_bitmap_map *tmp_map = - dbug_tmp_use_all_columns(table, table->write_set); + MY_BITMAP *tmp_map = + dbug_tmp_use_all_columns(table, &table->write_set); #endif DBUG_PRINT("info", ("spider bitmap is set %s", (*field)->field_name)); if ((error_num = spider_db_fetch_row(share, *field, row, ptr_diff))) DBUG_RETURN(error_num); #ifndef DBUG_OFF - dbug_tmp_restore_column_map(table->write_set, tmp_map); + dbug_tmp_restore_column_map(&table->write_set, tmp_map); #endif } row->next(); @@ -5128,15 +5128,15 @@ int spider_db_seek_tmp_table( bitmap_is_set(table->write_set, (*field)->field_index) )) { #ifndef DBUG_OFF - my_bitmap_map *tmp_map = - dbug_tmp_use_all_columns(table, table->write_set); + MY_BITMAP *tmp_map = + dbug_tmp_use_all_columns(table, &table->write_set); #endif DBUG_PRINT("info", ("spider bitmap is set %s", (*field)->field_name)); if ((error_num = spider_db_fetch_row(spider->share, *field, row, ptr_diff))) DBUG_RETURN(error_num); #ifndef DBUG_OFF - dbug_tmp_restore_column_map(table->write_set, tmp_map); + dbug_tmp_restore_column_map(&table->write_set, tmp_map); #endif } row->next(); @@ -5215,15 +5215,15 @@ int spider_db_seek_tmp_key( bitmap_is_set(table->write_set, field->field_index) )) { #ifndef DBUG_OFF - my_bitmap_map *tmp_map = - dbug_tmp_use_all_columns(table, table->write_set); + MY_BITMAP *tmp_map = + dbug_tmp_use_all_columns(table, &table->write_set); #endif DBUG_PRINT("info", ("spider bitmap is set %s", field->field_name)); if ((error_num = spider_db_fetch_row(spider->share, field, row, ptr_diff))) DBUG_RETURN(error_num); #ifndef DBUG_OFF - dbug_tmp_restore_column_map(table->write_set, tmp_map); + dbug_tmp_restore_column_map(&table->write_set, tmp_map); #endif } row->next(); @@ -5305,8 +5305,8 @@ int spider_db_seek_tmp_minimum_columns( bitmap_is_set(table->write_set, (*field)->field_index))); */ #ifndef DBUG_OFF - my_bitmap_map *tmp_map = - dbug_tmp_use_all_columns(table, table->write_set); + MY_BITMAP *tmp_map = + dbug_tmp_use_all_columns(table, &table->write_set); #endif DBUG_PRINT("info", ("spider bitmap is set %s", (*field)->field_name)); if ((error_num = @@ -5314,7 +5314,7 @@ int spider_db_seek_tmp_minimum_columns( DBUG_RETURN(error_num); row->next(); #ifndef DBUG_OFF - dbug_tmp_restore_column_map(table->write_set, tmp_map); + dbug_tmp_restore_column_map(&table->write_set, tmp_map); #endif } else if (bitmap_is_set(table->read_set, (*field)->field_index)) @@ -8680,8 +8680,8 @@ int spider_db_udf_fetch_table( DBUG_RETURN(HA_ERR_END_OF_FILE); #ifndef DBUG_OFF - my_bitmap_map *tmp_map = - dbug_tmp_use_all_columns(table, table->write_set); + MY_BITMAP *tmp_map = + dbug_tmp_use_all_columns(table, &table->write_set); #endif for ( roop_count = 0, @@ -8694,7 +8694,7 @@ int spider_db_udf_fetch_table( spider_db_udf_fetch_row(trx, *field, row))) { #ifndef DBUG_OFF - dbug_tmp_restore_column_map(table->write_set, tmp_map); + dbug_tmp_restore_column_map(&table->write_set, tmp_map); #endif DBUG_RETURN(error_num); } @@ -8704,7 +8704,7 @@ int spider_db_udf_fetch_table( for (; roop_count < set_off; roop_count++, field++) (*field)->set_default(); #ifndef DBUG_OFF - dbug_tmp_restore_column_map(table->write_set, tmp_map); + dbug_tmp_restore_column_map(&table->write_set, tmp_map); #endif table->status = 0; DBUG_RETURN(0); diff --git a/storage/spider/spd_db_mysql.cc b/storage/spider/spd_db_mysql.cc index 9ed29d015b3..1a24d80a95f 100644 --- a/storage/spider/spd_db_mysql.cc +++ b/storage/spider/spd_db_mysql.cc @@ -6528,8 +6528,7 @@ int spider_mysql_handler::append_update_set( mysql_share->append_column_name(str, (*fields)->field_index); str->q_append(SPIDER_SQL_EQUAL_STR, SPIDER_SQL_EQUAL_LEN); #ifndef DBUG_OFF - my_bitmap_map *tmp_map = dbug_tmp_use_all_columns(table, - table->read_set); + MY_BITMAP *tmp_map = dbug_tmp_use_all_columns(table, &table->read_set); #endif if ( spider_db_mysql_utility. @@ -6538,12 +6537,12 @@ int spider_mysql_handler::append_update_set( str->reserve(SPIDER_SQL_COMMA_LEN) ) { #ifndef DBUG_OFF - dbug_tmp_restore_column_map(table->read_set, tmp_map); + dbug_tmp_restore_column_map(&table->read_set, tmp_map); #endif DBUG_RETURN(HA_ERR_OUT_OF_MEM); } #ifndef DBUG_OFF - dbug_tmp_restore_column_map(table->read_set, tmp_map); + dbug_tmp_restore_column_map(&table->read_set, tmp_map); #endif } str->q_append(SPIDER_SQL_COMMA_STR, SPIDER_SQL_COMMA_LEN); @@ -9172,8 +9171,8 @@ int spider_mysql_handler::append_insert_values( bitmap_is_set(table->read_set, (*field)->field_index) ) { #ifndef DBUG_OFF - my_bitmap_map *tmp_map = - dbug_tmp_use_all_columns(table, table->read_set); + MY_BITMAP *tmp_map = + dbug_tmp_use_all_columns(table, &table->read_set); #endif add_value = TRUE; DBUG_PRINT("info",("spider is_null()=%s", @@ -9195,7 +9194,7 @@ int spider_mysql_handler::append_insert_values( if (str->reserve(SPIDER_SQL_NULL_LEN + SPIDER_SQL_COMMA_LEN)) { #ifndef DBUG_OFF - dbug_tmp_restore_column_map(table->read_set, tmp_map); + dbug_tmp_restore_column_map(&table->read_set, tmp_map); #endif str->length(0); DBUG_RETURN(HA_ERR_OUT_OF_MEM); @@ -9209,7 +9208,7 @@ int spider_mysql_handler::append_insert_values( str->reserve(SPIDER_SQL_COMMA_LEN) ) { #ifndef DBUG_OFF - dbug_tmp_restore_column_map(table->read_set, tmp_map); + dbug_tmp_restore_column_map(&table->read_set, tmp_map); #endif str->length(0); DBUG_RETURN(HA_ERR_OUT_OF_MEM); @@ -9217,7 +9216,7 @@ int spider_mysql_handler::append_insert_values( } str->q_append(SPIDER_SQL_COMMA_STR, SPIDER_SQL_COMMA_LEN); #ifndef DBUG_OFF - dbug_tmp_restore_column_map(table->read_set, tmp_map); + dbug_tmp_restore_column_map(&table->read_set, tmp_map); #endif } } diff --git a/storage/tokudb/ha_tokudb.cc b/storage/tokudb/ha_tokudb.cc index 39bc286a617..12d3a5a060a 100644 --- a/storage/tokudb/ha_tokudb.cc +++ b/storage/tokudb/ha_tokudb.cc @@ -2313,7 +2313,7 @@ int ha_tokudb::pack_row_in_buff( int r = ENOSYS; memset((void *) row, 0, sizeof(*row)); - my_bitmap_map *old_map = dbug_tmp_use_all_columns(table, table->write_set); + MY_BITMAP *old_map = dbug_tmp_use_all_columns(table, &table->write_set); // Copy null bytes memcpy(row_buff, record, table_share->null_bytes); @@ -2362,7 +2362,7 @@ int ha_tokudb::pack_row_in_buff( row->size = (size_t) (var_field_data_ptr - row_buff); r = 0; - dbug_tmp_restore_column_map(table->write_set, old_map); + dbug_tmp_restore_column_map(&table->write_set, old_map); return r; } @@ -2758,7 +2758,7 @@ DBT* ha_tokudb::create_dbt_key_from_key( { uint32_t size = 0; uchar* tmp_buff = buff; - my_bitmap_map *old_map = dbug_tmp_use_all_columns(table, table->write_set); + MY_BITMAP *old_map = dbug_tmp_use_all_columns(table, &table->write_set); key->data = buff; @@ -2797,7 +2797,7 @@ DBT* ha_tokudb::create_dbt_key_from_key( key->size = size; DBUG_DUMP("key", (uchar *) key->data, key->size); - dbug_tmp_restore_column_map(table->write_set, old_map); + dbug_tmp_restore_column_map(&table->write_set, old_map); return key; } @@ -2890,7 +2890,7 @@ DBT* ha_tokudb::pack_key( KEY* key_info = &table->key_info[keynr]; KEY_PART_INFO* key_part = key_info->key_part; KEY_PART_INFO* end = key_part + key_info->user_defined_key_parts; - my_bitmap_map* old_map = dbug_tmp_use_all_columns(table, table->write_set); + MY_BITMAP* old_map = dbug_tmp_use_all_columns(table, &table->write_set); memset((void *) key, 0, sizeof(*key)); key->data = buff; @@ -2927,7 +2927,7 @@ DBT* ha_tokudb::pack_key( key->size = (buff - (uchar *) key->data); DBUG_DUMP("key", (uchar *) key->data, key->size); - dbug_tmp_restore_column_map(table->write_set, old_map); + dbug_tmp_restore_column_map(&table->write_set, old_map); DBUG_RETURN(key); } @@ -2955,7 +2955,7 @@ DBT* ha_tokudb::pack_ext_key( KEY* key_info = &table->key_info[keynr]; KEY_PART_INFO* key_part = key_info->key_part; KEY_PART_INFO* end = key_part + key_info->user_defined_key_parts; - my_bitmap_map* old_map = dbug_tmp_use_all_columns(table, table->write_set); + MY_BITMAP* old_map = dbug_tmp_use_all_columns(table, &table->write_set); memset((void *) key, 0, sizeof(*key)); key->data = buff; @@ -3034,7 +3034,7 @@ DBT* ha_tokudb::pack_ext_key( key->size = (buff - (uchar *) key->data); DBUG_DUMP("key", (uchar *) key->data, key->size); - dbug_tmp_restore_column_map(table->write_set, old_map); + dbug_tmp_restore_column_map(&table->write_set, old_map); DBUG_RETURN(key); } #endif // defined(TOKU_INCLUDE_EXTENDED_KEYS) && TOKU_INCLUDE_EXTENDED_KEYS -- cgit v1.2.1 From cd1e5d65c6d3aab7f77860803d2d0f29bf307b4a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Fri, 8 Jan 2021 08:10:18 +0200 Subject: MDEV-19838 fixup: clang -Wunused-const-variable --- sql/sql_prepare.cc | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc index 3144b36ff77..dc2fb414de9 100644 --- a/sql/sql_prepare.cc +++ b/sql/sql_prepare.cc @@ -1,5 +1,5 @@ /* Copyright (c) 2002, 2015, Oracle and/or its affiliates. - Copyright (c) 2008, 2019, MariaDB + Copyright (c) 2008, 2021, MariaDB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -117,15 +117,14 @@ When one supplies long data for a placeholder: #include #else #include +/* Constants defining bits in parameter type flags. Flags are read from high byte of short value */ +static const uint PARAMETER_FLAG_UNSIGNED= 128U << 8; #endif #include "lock.h" // MYSQL_OPEN_FORCE_SHARED_MDL #include "sql_handler.h" #include "transaction.h" // trans_rollback_implicit #include "wsrep_mysqld.h" -/* Constants defining bits in parameter type flags. Flags are read from high byte of short value */ -static const uint PARAMETER_FLAG_UNSIGNED = 128U << 8; - /** A result class used to send cursor rows using the binary protocol. */ -- cgit v1.2.1 From 26d913a743d609bcd82339bad196b74daf1d2fa6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Lindstr=C3=B6m?= Date: Fri, 8 Jan 2021 08:44:18 +0200 Subject: Update wsrep-lib --- sql/wsrep_client_service.cc | 2 +- sql/wsrep_condition_variable.h | 2 +- wsrep-lib | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/sql/wsrep_client_service.cc b/sql/wsrep_client_service.cc index 934a9701b41..245fc1487ca 100644 --- a/sql/wsrep_client_service.cc +++ b/sql/wsrep_client_service.cc @@ -74,7 +74,7 @@ bool Wsrep_client_service::interrupted( Locking order is: 1) LOCK_thd_data 2) LOCK_thd_kill */ - mysql_mutex_assert_owner(static_cast(lock.mutex().native())); + mysql_mutex_assert_owner(static_cast(lock.mutex()->native())); mysql_mutex_lock(&m_thd->LOCK_thd_kill); bool ret= (m_thd->killed != NOT_KILLED); if (ret) diff --git a/sql/wsrep_condition_variable.h b/sql/wsrep_condition_variable.h index 4412154e67b..6ad53a3086c 100644 --- a/sql/wsrep_condition_variable.h +++ b/sql/wsrep_condition_variable.h @@ -44,7 +44,7 @@ public: void wait(wsrep::unique_lock& lock) { - mysql_mutex_t* mutex= static_cast(lock.mutex().native()); + mysql_mutex_t* mutex= static_cast(lock.mutex()->native()); mysql_cond_wait(&m_cond, mutex); } private: diff --git a/wsrep-lib b/wsrep-lib index 515ac816f98..a93955ddeef 160000 --- a/wsrep-lib +++ b/wsrep-lib @@ -1 +1 @@ -Subproject commit 515ac816f98329c0227d0060cc9339c889810834 +Subproject commit a93955ddeef5989505cbb3a9f8bb124341462569 -- cgit v1.2.1 From 61a362c9493df63dc588fcb71409537ae56ab9c8 Mon Sep 17 00:00:00 2001 From: Nikita Malyavin Date: Fri, 8 Jan 2021 22:09:26 +1000 Subject: fixup MDEV-17556: fix mroonga --- storage/mroonga/ha_mroonga.cpp | 28 ++++++++++++------------- storage/mroonga/lib/mrn_debug_column_access.cpp | 6 +++--- storage/mroonga/lib/mrn_debug_column_access.hpp | 4 ++-- 3 files changed, 19 insertions(+), 19 deletions(-) diff --git a/storage/mroonga/ha_mroonga.cpp b/storage/mroonga/ha_mroonga.cpp index 06ff71c1b5a..1025b70156f 100644 --- a/storage/mroonga/ha_mroonga.cpp +++ b/storage/mroonga/ha_mroonga.cpp @@ -5910,7 +5910,7 @@ int ha_mroonga::wrapper_write_row_index(uchar *buf) DBUG_RETURN(0); } - mrn::DebugColumnAccess debug_column_access(table, table->read_set); + mrn::DebugColumnAccess debug_column_access(table, &table->read_set); uint i; uint n_keys = table->s->keys; for (i = 0; i < n_keys; i++) { @@ -5985,7 +5985,7 @@ int ha_mroonga::storage_write_row(uchar *buf) DBUG_RETURN(error); } - mrn::DebugColumnAccess debug_column_access(table, table->read_set); + mrn::DebugColumnAccess debug_column_access(table, &table->read_set); for (i = 0; i < n_columns; i++) { Field *field = table->field[i]; @@ -6266,7 +6266,7 @@ int ha_mroonga::storage_write_row_multiple_column_indexes(uchar *buf, int error = 0; - mrn::DebugColumnAccess debug_column_access(table, table->read_set); + mrn::DebugColumnAccess debug_column_access(table, &table->read_set); uint i; uint n_keys = table->s->keys; for (i = 0; i < n_keys; i++) { @@ -6558,7 +6558,7 @@ int ha_mroonga::wrapper_update_row_index(const uchar *old_data, uchar *new_data) DBUG_RETURN(0); } - mrn::DebugColumnAccess debug_column_access(table, table->read_set); + mrn::DebugColumnAccess debug_column_access(table, &table->read_set); uint i; uint n_keys = table->s->keys; for (i = 0; i < n_keys; i++) { @@ -6678,7 +6678,7 @@ int ha_mroonga::storage_update_row(const uchar *old_data, uchar *new_data) grn_obj new_value; GRN_VOID_INIT(&new_value); { - mrn::DebugColumnAccess debug_column_access(table, table->read_set); + mrn::DebugColumnAccess debug_column_access(table, &table->read_set); generic_store_bulk(field, &new_value); } grn_obj casted_value; @@ -6707,7 +6707,7 @@ int ha_mroonga::storage_update_row(const uchar *old_data, uchar *new_data) storage_store_fields_for_prep_update(old_data, new_data, record_id); { mrn::Lock lock(&(share->record_mutex), have_unique_index()); - mrn::DebugColumnAccess debug_column_access(table, table->read_set); + mrn::DebugColumnAccess debug_column_access(table, &table->read_set); if ((error = storage_prepare_delete_row_unique_indexes(old_data, record_id))) { DBUG_RETURN(error); @@ -6732,7 +6732,7 @@ int ha_mroonga::storage_update_row(const uchar *old_data, uchar *new_data) #endif if (bitmap_is_set(table->write_set, field->field_index)) { - mrn::DebugColumnAccess debug_column_access(table, table->read_set); + mrn::DebugColumnAccess debug_column_access(table, &table->read_set); DBUG_PRINT("info", ("mroonga: update column %d(%d)",i,field->field_index)); if (field->is_null()) continue; @@ -6809,7 +6809,7 @@ int ha_mroonga::storage_update_row(const uchar *old_data, uchar *new_data) if (table->found_next_number_field && !table->s->next_number_keypart && new_data == table->record[0]) { - mrn::DebugColumnAccess debug_column_access(table, table->read_set); + mrn::DebugColumnAccess debug_column_access(table, &table->read_set); Field_num *field = (Field_num *) table->found_next_number_field; if (field->unsigned_flag || field->val_int() > 0) { MRN_LONG_TERM_SHARE *long_term_share = share->long_term_share; @@ -6865,7 +6865,7 @@ int ha_mroonga::storage_update_row_index(const uchar *old_data, uchar *new_data) my_ptrdiff_t ptr_diff = PTR_BYTE_DIFF(old_data, table->record[0]); - mrn::DebugColumnAccess debug_column_access(table, table->read_set); + mrn::DebugColumnAccess debug_column_access(table, &table->read_set); uint i; uint n_keys = table->s->keys; mrn_change_encoding(ctx, NULL); @@ -7081,7 +7081,7 @@ int ha_mroonga::wrapper_delete_row_index(const uchar *buf) DBUG_RETURN(0); } - mrn::DebugColumnAccess debug_column_access(table, table->read_set); + mrn::DebugColumnAccess debug_column_access(table, &table->read_set); uint i; uint n_keys = table->s->keys; for (i = 0; i < n_keys; i++) { @@ -7232,7 +7232,7 @@ int ha_mroonga::storage_delete_row_index(const uchar *buf) GRN_TEXT_INIT(&key, 0); GRN_TEXT_INIT(&encoded_key, 0); - mrn::DebugColumnAccess debug_column_access(table, table->read_set); + mrn::DebugColumnAccess debug_column_access(table, &table->read_set); uint i; uint n_keys = table->s->keys; mrn_change_encoding(ctx, NULL); @@ -11420,7 +11420,7 @@ void ha_mroonga::storage_store_fields(uchar *buf, grn_id record_id) } } - mrn::DebugColumnAccess debug_column_access(table, table->write_set); + mrn::DebugColumnAccess debug_column_access(table, &table->write_set); DBUG_PRINT("info", ("mroonga: store column %d(%d)",i,field->field_index)); field->move_field_offset(ptr_diff); if (strcmp(MRN_COLUMN_NAME_ID, column_name) == 0) { @@ -11485,7 +11485,7 @@ void ha_mroonga::storage_store_fields_for_prep_update(const uchar *old_data, ) #endif ) { - mrn::DebugColumnAccess debug_column_access(table, table->write_set); + mrn::DebugColumnAccess debug_column_access(table, &table->write_set); DBUG_PRINT("info", ("mroonga: store column %d(%d)",i,field->field_index)); grn_obj value; GRN_OBJ_INIT(&value, GRN_BULK, 0, grn_obj_get_range(ctx, grn_columns[i])); @@ -11521,7 +11521,7 @@ void ha_mroonga::storage_store_fields_by_index(uchar *buf) if (KEY_N_KEY_PARTS(key_info) == 1) { my_ptrdiff_t ptr_diff = PTR_BYTE_DIFF(buf, table->record[0]); Field *field = key_info->key_part->field; - mrn::DebugColumnAccess debug_column_access(table, table->write_set); + mrn::DebugColumnAccess debug_column_access(table, &table->write_set); field->move_field_offset(ptr_diff); storage_store_field(field, (const char *)key, key_length); field->move_field_offset(-ptr_diff); diff --git a/storage/mroonga/lib/mrn_debug_column_access.cpp b/storage/mroonga/lib/mrn_debug_column_access.cpp index ed535f150ab..4c88d7b3e64 100644 --- a/storage/mroonga/lib/mrn_debug_column_access.cpp +++ b/storage/mroonga/lib/mrn_debug_column_access.cpp @@ -20,17 +20,17 @@ #include "mrn_debug_column_access.hpp" namespace mrn { - DebugColumnAccess::DebugColumnAccess(TABLE *table, MY_BITMAP *bitmap) + DebugColumnAccess::DebugColumnAccess(TABLE *table, MY_BITMAP **bitmap) : table_(table), bitmap_(bitmap) { #ifndef DBUG_OFF - map_ = dbug_tmp_use_all_columns(table_, &bitmap_); + map_ = dbug_tmp_use_all_columns(table_, bitmap_); #endif } DebugColumnAccess::~DebugColumnAccess() { #ifndef DBUG_OFF - dbug_tmp_restore_column_map(&bitmap_, map_); + dbug_tmp_restore_column_map(bitmap_, map_); #endif } } diff --git a/storage/mroonga/lib/mrn_debug_column_access.hpp b/storage/mroonga/lib/mrn_debug_column_access.hpp index 309db6e8460..9f85b831def 100644 --- a/storage/mroonga/lib/mrn_debug_column_access.hpp +++ b/storage/mroonga/lib/mrn_debug_column_access.hpp @@ -25,12 +25,12 @@ namespace mrn { class DebugColumnAccess { TABLE *table_; - MY_BITMAP *bitmap_; + MY_BITMAP **bitmap_; #ifndef DBUG_OFF MY_BITMAP *map_; #endif public: - DebugColumnAccess(TABLE *table, MY_BITMAP *bitmap); + DebugColumnAccess(TABLE *table, MY_BITMAP **bitmap); ~DebugColumnAccess(); }; } -- cgit v1.2.1 From 18254c18d9c321835fbac1866be9ecbf4a8fe9c1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Fri, 8 Jan 2021 16:14:26 +0200 Subject: Cleanup: Remove unused symbol QUE_THR_PROCEDURE_WAIT --- storage/innobase/include/que0que.h | 3 +-- storage/innobase/que/que0que.cc | 3 +-- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/storage/innobase/include/que0que.h b/storage/innobase/include/que0que.h index 262be4d30a9..2798a4d40fb 100644 --- a/storage/innobase/include/que0que.h +++ b/storage/innobase/include/que0que.h @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2017, 2020, MariaDB Corporation. +Copyright (c) 2017, 2021, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -303,7 +303,6 @@ que_fork_scheduler_round_robin( /** Query thread states */ enum que_thr_state_t { QUE_THR_RUNNING, - QUE_THR_PROCEDURE_WAIT, /** in selects this means that the thread is at the end of its result set (or start, in case of a scroll cursor); in other statements, this means the thread has done its task */ diff --git a/storage/innobase/que/que0que.cc b/storage/innobase/que/que0que.cc index 3ad948af4d2..0b630bb1c8c 100644 --- a/storage/innobase/que/que0que.cc +++ b/storage/innobase/que/que0que.cc @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2017, 2018, 2020 MariaDB Corporation. +Copyright (c) 2017, 2021, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -349,7 +349,6 @@ que_fork_start_command( case QUE_THR_RUNNING: case QUE_THR_LOCK_WAIT: - case QUE_THR_PROCEDURE_WAIT: ut_error; } } -- cgit v1.2.1 From 775fccea0c24b529bb2ec6888e34e7fae2ba25dd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Lindstr=C3=B6m?= Date: Thu, 17 Dec 2020 14:20:23 +0200 Subject: MDEV-23536 : Race condition between KILL and transaction commit A race condition may occur between the execution of transaction commit, and an execution of a KILL statement that would attempt to abort that transaction. MDEV-17092 worked around this race condition by modifying InnoDB code. After that issue was closed, Sergey Vojtovich pointed out that this race condition would better be fixed above the storage engine layer: If you look carefully into the above, you can conclude that thd->free_connection() can be called concurrently with KILL/thd->awake(). Which is the bug. And it is partially fixed in THD::~THD(), that is destructor waits for KILL completion: Fix: Add necessary mutex operations to THD::free_connection() and move WSREP specific code also there. This ensures that no one is using THD while we do free_connection(). These mutexes will also ensures that there can't be concurrent KILL/THD::awake(). innobase_kill_query We can now remove usage of trx_sys_mutex introduced on MDEV-17092. trx_t::free() Poison trx->state and trx->mysql_thd This patch is validated with an RQG run similar to the one that reproduced MDEV-17092. --- sql/sql_class.cc | 38 ++++++++++++++++++++++++++++------- sql/sql_class.h | 2 +- storage/innobase/handler/ha_innodb.cc | 35 ++++++-------------------------- storage/innobase/lock/lock0lock.cc | 1 + storage/innobase/trx/trx0trx.cc | 4 ++-- 5 files changed, 41 insertions(+), 39 deletions(-) diff --git a/sql/sql_class.cc b/sql/sql_class.cc index 2595717572a..2321991d99f 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -1357,12 +1357,15 @@ void THD::change_user(void) /* Do operations that may take a long time */ -void THD::cleanup(void) +void THD::cleanup(bool have_mutex) { DBUG_ENTER("THD::cleanup"); DBUG_ASSERT(cleanup_done == 0); - set_killed(KILL_CONNECTION); + if (have_mutex) + set_killed_no_mutex(KILL_CONNECTION,0,0); + else + set_killed(KILL_CONNECTION); #ifdef ENABLE_WHEN_BINLOG_WILL_BE_ABLE_TO_PREPARE if (transaction.xid_state.xa_state == XA_PREPARED) { @@ -1437,6 +1440,28 @@ void THD::cleanup(void) void THD::free_connection() { DBUG_ASSERT(free_connection_done == 0); + /* Check that we have already called thd->unlink() */ + DBUG_ASSERT(prev == 0 && next == 0); + + /* + Other threads may have a lock on THD::LOCK_thd_data or + THD::LOCK_thd_kill to ensure that this THD is not deleted + while they access it. The following mutex_lock ensures + that no one else is using this THD and it's now safe to + continue. + + For example consider KILL-statement execution on + sql_parse.cc kill_one_thread() that will use + THD::LOCK_thd_data to protect victim thread during + THD::awake(). + */ + mysql_mutex_lock(&LOCK_thd_data); + mysql_mutex_lock(&LOCK_thd_kill); + +#ifdef WITH_WSREP + delete wsrep_rgi; + wsrep_rgi= 0; +#endif /* WITH_WSREP */ my_free(db); db= NULL; #ifndef EMBEDDED_LIBRARY @@ -1445,8 +1470,8 @@ void THD::free_connection() net.vio= 0; net_end(&net); #endif - if (!cleanup_done) - cleanup(); + if (!cleanup_done) + cleanup(true); // We have locked THD::LOCK_thd_kill ha_close_connection(this); plugin_thdvar_cleanup(this); mysql_audit_free_thd(this); @@ -1457,6 +1482,8 @@ void THD::free_connection() #if defined(ENABLED_PROFILING) profiling.restart(); // Reset profiling #endif + mysql_mutex_unlock(&LOCK_thd_kill); + mysql_mutex_unlock(&LOCK_thd_data); } /* @@ -1512,9 +1539,6 @@ THD::~THD() mysql_mutex_lock(&LOCK_thd_data); mysql_mutex_unlock(&LOCK_thd_data); -#ifdef WITH_WSREP - delete wsrep_rgi; -#endif if (!free_connection_done) free_connection(); diff --git a/sql/sql_class.h b/sql/sql_class.h index b68e3553a2d..a97f64c13b3 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -3194,7 +3194,7 @@ public: void update_all_stats(); void update_stats(void); void change_user(void); - void cleanup(void); + void cleanup(bool have_mutex=false); void cleanup_after_query(); void free_connection(); void reset_for_reuse(); diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index 7cc4401d0ff..c79b3423b63 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -5176,38 +5176,15 @@ static void innobase_kill_query(handlerton*, THD* thd, enum thd_kill_levels) if (trx_t* trx= thd_to_trx(thd)) { + ut_ad(trx->mysql_thd == thd); lock_mutex_enter(); - trx_sys_mutex_enter(); - trx_mutex_enter(trx); - /* It is possible that innobase_close_connection() is concurrently - being executed on our victim. In that case, trx->mysql_thd would - be reset before invoking trx_t::free(). Even if the trx object is later - reused for another client connection or a background transaction, - its trx->mysql_thd will differ from our thd. - - If trx never performed any changes, nothing is really protecting - the trx_t::free() call or the changes of trx_t::state when the - transaction is being rolled back and trx_commit_low() is being - executed. - - The function trx_allocate_for_mysql() acquires - trx_sys_t::mutex, but trx_allocate_for_background() will not. - Luckily, background transactions cannot be read-only, because - for read-only transactions, trx_start_low() will avoid acquiring - any of the trx_sys_t::mutex, lock_sys_t::mutex, trx_t::mutex before - assigning trx_t::state. - - At this point, trx may have been reallocated for another client - connection, or for a background operation. In that case, either - trx_t::state or trx_t::mysql_thd should not match our expectations. */ - bool cancel= trx->mysql_thd == thd && trx->state == TRX_STATE_ACTIVE && - !trx->lock.was_chosen_as_deadlock_victim; - trx_sys_mutex_exit(); - if (!cancel); - else if (lock_t *lock= trx->lock.wait_lock) + if (lock_t *lock= trx->lock.wait_lock) + { + trx_mutex_enter(trx); lock_cancel_waiting_and_release(lock); + trx_mutex_exit(trx); + } lock_mutex_exit(); - trx_mutex_exit(trx); } DBUG_VOID_RETURN; diff --git a/storage/innobase/lock/lock0lock.cc b/storage/innobase/lock/lock0lock.cc index ec37f9029f7..c95506abc39 100644 --- a/storage/innobase/lock/lock0lock.cc +++ b/storage/innobase/lock/lock0lock.cc @@ -6453,6 +6453,7 @@ lock_cancel_waiting_and_release( ut_ad(lock_mutex_own()); ut_ad(trx_mutex_own(lock->trx)); + ut_ad(lock->trx->state == TRX_STATE_ACTIVE); lock->trx->lock.cancel = true; diff --git a/storage/innobase/trx/trx0trx.cc b/storage/innobase/trx/trx0trx.cc index 245758cecd5..60e534e0f43 100644 --- a/storage/innobase/trx/trx0trx.cc +++ b/storage/innobase/trx/trx0trx.cc @@ -393,7 +393,7 @@ inline void trx_t::free() /* do not poison mutex */ MEM_NOACCESS(&id, sizeof id); MEM_NOACCESS(&no, sizeof no); - /* state is accessed by innobase_kill_connection() */ + MEM_NOACCESS(&state, sizeof state); MEM_NOACCESS(&is_recovered, sizeof is_recovered); #ifdef WITH_WSREP MEM_NOACCESS(&wsrep, sizeof wsrep); @@ -419,7 +419,7 @@ inline void trx_t::free() MEM_NOACCESS(&start_time_micro, sizeof start_time_micro); MEM_NOACCESS(&commit_lsn, sizeof commit_lsn); MEM_NOACCESS(&table_id, sizeof table_id); - /* mysql_thd is accessed by innobase_kill_connection() */ + MEM_NOACCESS(&mysql_thd, sizeof mysql_thd); MEM_NOACCESS(&mysql_log_file_name, sizeof mysql_log_file_name); MEM_NOACCESS(&mysql_log_offset, sizeof mysql_log_offset); MEM_NOACCESS(&n_mysql_tables_in_use, sizeof n_mysql_tables_in_use); -- cgit v1.2.1 From a131b976b205af4023a5d43569fadf44dceade70 Mon Sep 17 00:00:00 2001 From: Daniele Sciascia Date: Fri, 8 Jan 2021 15:05:07 +0100 Subject: Fix MTR test galera_as_slave_replay MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Galera cluster must be in node 1 and 2, and acts as slave for node 3 - Sync point commit_monitor_enter_sync was renamed to commit_monitor_master_enter_sync in 4.x Reviewed-by: Jan Lindström --- .../suite/galera/r/galera_as_slave_replay.result | 26 ++++++++++--------- .../suite/galera/t/galera_as_slave_replay.test | 30 +++++++++++----------- 2 files changed, 29 insertions(+), 27 deletions(-) diff --git a/mysql-test/suite/galera/r/galera_as_slave_replay.result b/mysql-test/suite/galera/r/galera_as_slave_replay.result index 760617be5f7..3c2cea19179 100644 --- a/mysql-test/suite/galera/r/galera_as_slave_replay.result +++ b/mysql-test/suite/galera/r/galera_as_slave_replay.result @@ -1,10 +1,13 @@ connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2; connection node_2a; +connection node_2; connection node_1; +connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3; +connection node_3; RESET MASTER; connection node_2a; START SLAVE; -connection node_1; +connection node_3; CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 CHAR(1)) engine=innodb; INSERT INTO t1 VALUES (1, 'a'); INSERT INTO t1 VALUES (3, 'a'); @@ -18,15 +21,14 @@ f1 f2 UPDATE t1 SET f2 = 'c' WHERE f1 > 1; connection node_2a; SET SESSION wsrep_sync_wait = 0; -connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3; -connection node_3; +connection node_1; SET SESSION wsrep_sync_wait = 0; connection node_2a; -SET GLOBAL wsrep_provider_options = 'dbug=d,commit_monitor_enter_sync'; +SET GLOBAL wsrep_provider_options = 'dbug=d,commit_monitor_master_enter_sync'; SET GLOBAL debug_dbug = "d,sync.wsrep_apply_cb"; -connection node_3; -INSERT INTO test.t1 VALUES (2, 'b'); connection node_1; +INSERT INTO test.t1 VALUES (2, 'b'); +connection node_3; COMMIT; connection node_2a; SET SESSION wsrep_on = 0; @@ -35,8 +37,8 @@ SET GLOBAL debug_dbug = ""; SET DEBUG_SYNC = "now SIGNAL signal.wsrep_apply_cb"; connection node_2a; SET GLOBAL wsrep_provider_options = 'dbug='; -SET GLOBAL wsrep_provider_options = 'signal=commit_monitor_enter_sync'; -connection node_1; +SET GLOBAL wsrep_provider_options = 'signal=commit_monitor_master_enter_sync'; +connection node_3; SELECT COUNT(*) = 1 FROM t1 WHERE f2 = 'a'; COUNT(*) = 1 1 @@ -61,7 +63,7 @@ SET DEBUG_SYNC = "RESET"; # # test phase with real abort # -connection node_1; +connection node_3; set binlog_format=ROW; insert into t1 values (4, 'd'); SET AUTOCOMMIT=ON; @@ -70,9 +72,9 @@ UPDATE t1 SET f2 = 'd' WHERE f1 = 3; connection node_2a; SET GLOBAL wsrep_provider_options = 'dbug=d,commit_monitor_enter_sync'; SET GLOBAL debug_dbug = "d,sync.wsrep_apply_cb"; -connection node_3; -UPDATE test.t1 SET f2 = 'e' WHERE f1 = 3; connection node_1; +UPDATE test.t1 SET f2 = 'e' WHERE f1 = 3; +connection node_3; COMMIT; connection node_2a; SET GLOBAL debug_dbug = ""; @@ -90,6 +92,6 @@ set session wsrep_sync_wait=0; STOP SLAVE; RESET SLAVE; DROP TABLE t1; -connection node_1; +connection node_3; DROP TABLE t1; RESET MASTER; diff --git a/mysql-test/suite/galera/t/galera_as_slave_replay.test b/mysql-test/suite/galera/t/galera_as_slave_replay.test index 93f95349e6d..47f70bda721 100644 --- a/mysql-test/suite/galera/t/galera_as_slave_replay.test +++ b/mysql-test/suite/galera/t/galera_as_slave_replay.test @@ -18,9 +18,10 @@ #--source suite/galera/include/galera_have_debug_sync.inc # -# node 1 is native MariaDB server operating as async replication master +# node 3 is native MariaDB server operating as async replication master # ---connection node_1 +--connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3 +--connection node_3 RESET MASTER; --connection node_2a @@ -31,14 +32,14 @@ RESET MASTER; # -# nodes 2 and 3 form a galera cluster, node 2 operates as slave for native MariaDB naster in node 1 +# nodes 1 and 2 form a galera cluster, node 2 operates as slave for native MariaDB naster in node 3 # --disable_query_log ---eval CHANGE MASTER TO MASTER_HOST='127.0.0.1', MASTER_USER='root', MASTER_PORT=$NODE_MYPORT_1; +--eval CHANGE MASTER TO MASTER_HOST='127.0.0.1', MASTER_USER='root', MASTER_PORT=$NODE_MYPORT_3; --enable_query_log START SLAVE; ---connection node_1 +--connection node_3 CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 CHAR(1)) engine=innodb; INSERT INTO t1 VALUES (1, 'a'); INSERT INTO t1 VALUES (3, 'a'); @@ -63,15 +64,14 @@ SET SESSION wsrep_sync_wait = 0; --source include/wait_condition.inc # wait for create table and inserts to be replicated in cluster ---connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3 ---connection node_3 +--connection node_1 SET SESSION wsrep_sync_wait = 0; --let $wait_condition = SELECT COUNT(*) = 2 FROM test.t1; --source include/wait_condition.inc --connection node_2a # Block the future commit of async replication ---let $galera_sync_point = commit_monitor_enter_sync +--let $galera_sync_point = commit_monitor_master_enter_sync --source include/galera_set_sync_point.inc # block also the applier before applying begins @@ -81,13 +81,13 @@ SET GLOBAL debug_dbug = "d,sync.wsrep_apply_cb"; # now inject a conflicting insert from node 3, it will replicate with # earlier seqno (than async transaction) and pause before applying in node 2 # ---connection node_3 +--connection node_1 INSERT INTO test.t1 VALUES (2, 'b'); # # send the update from master, this will succeed here, beceuase of async replication. # async replication will apply this in node 2 and pause before commit phase, ---connection node_1 +--connection node_3 --error 0 COMMIT; @@ -108,7 +108,7 @@ SET DEBUG_SYNC = "now SIGNAL signal.wsrep_apply_cb"; --source include/galera_clear_sync_point.inc --source include/galera_signal_sync_point.inc ---connection node_1 +--connection node_3 SELECT COUNT(*) = 1 FROM t1 WHERE f2 = 'a'; SELECT COUNT(*) = 1 FROM t1 WHERE f2 = 'c'; @@ -139,7 +139,7 @@ SET DEBUG_SYNC = "RESET"; --echo # test phase with real abort --echo # ---connection node_1 +--connection node_3 set binlog_format=ROW; @@ -163,11 +163,11 @@ UPDATE t1 SET f2 = 'd' WHERE f1 = 3; SET GLOBAL debug_dbug = "d,sync.wsrep_apply_cb"; # Inject a conflicting update from node 3 ---connection node_3 +--connection node_1 UPDATE test.t1 SET f2 = 'e' WHERE f1 = 3; # send the update from master ---connection node_1 +--connection node_3 --error 0 COMMIT; @@ -195,6 +195,6 @@ RESET SLAVE; DROP TABLE t1; ---connection node_1 +--connection node_3 DROP TABLE t1; RESET MASTER; -- cgit v1.2.1 From 3b548d3bbf888e7c9e9853cf826e528b5195d8bd Mon Sep 17 00:00:00 2001 From: Vladislav Vaintroub Date: Sat, 9 Jan 2021 18:34:28 +0100 Subject: MDEV-24554 Do not use verisign server for authenticode timestamping --- cmake/install_macros.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/install_macros.cmake b/cmake/install_macros.cmake index c0802399e5d..c1fb46a64a7 100644 --- a/cmake/install_macros.cmake +++ b/cmake/install_macros.cmake @@ -182,7 +182,7 @@ IF(WIN32) MARK_AS_ADVANCED(SIGNCODE) IF(SIGNCODE) SET(SIGNTOOL_PARAMETERS - /a /t http://timestamp.verisign.com/scripts/timstamp.dll + /a /t http://timestamp.globalsign.com/scripts/timstamp.dll CACHE STRING "parameters for signtool (list)") FIND_PROGRAM(SIGNTOOL_EXECUTABLE signtool PATHS "$ENV{ProgramFiles}/Microsoft SDKs/Windows/v7.0A/bin" -- cgit v1.2.1