From c04318ed243fd85eeb7256399516a9189e12a0b0 Mon Sep 17 00:00:00 2001 From: Mattias Jonsson Date: Fri, 5 Nov 2010 12:01:10 +0100 Subject: Bug#57778: failed primary key add to partitioned innodb table inconsistent and crashes It was possible to issue an ALTER TABLE ADD PRIMARY KEY on an partitioned InnoDB table that failed and crashed the server. The problem was that it succeeded to create the PK on at least one partition, and then failed on a subsequent partition, due to duplicate key violation. Since the partitions that already had added the PK was not reverted all partitions was not consistent with the table definition, which caused the crash. The solution was to add a revert step to ha_partition::add_index() that dropped the index for the already succeeded partitions, on failure. mysql-test/r/partition.result: updated result mysql-test/t/partition.test: Added test sql/ha_partition.cc: Only allow ADD/DROP flags in pairs, so that they can be reverted on failures. If add_index() fails for a partition, revert (drop the index) for the previous partitions. sql/handler.h: Added some extra info in a comment. --- sql/ha_partition.cc | 62 +++++++++++++++++++++++++++++++++++++++++++++++++---- sql/handler.h | 2 ++ 2 files changed, 60 insertions(+), 4 deletions(-) (limited to 'sql') diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc index 3d8d5ae9eb8..70cf8480ba1 100644 --- a/sql/ha_partition.cc +++ b/sql/ha_partition.cc @@ -6375,9 +6375,42 @@ bool ha_partition::get_error_message(int error, String *buf) */ uint ha_partition::alter_table_flags(uint flags) { + uint flags_to_return, flags_to_check; DBUG_ENTER("ha_partition::alter_table_flags"); - DBUG_RETURN(ht->alter_table_flags(flags) | - m_file[0]->alter_table_flags(flags)); + + flags_to_return= ht->alter_table_flags(flags); + flags_to_return|= m_file[0]->alter_table_flags(flags); + + /* + If one partition fails we must be able to revert the change for the other, + already altered, partitions. So both ADD and DROP can only be supported in + pairs. + */ + flags_to_check= HA_ONLINE_ADD_INDEX_NO_WRITES; + flags_to_check|= HA_ONLINE_DROP_INDEX_NO_WRITES; + if ((flags_to_return & flags_to_check) != flags_to_check) + flags_to_return&= ~flags_to_check; + flags_to_check= HA_ONLINE_ADD_UNIQUE_INDEX_NO_WRITES; + flags_to_check|= HA_ONLINE_DROP_UNIQUE_INDEX_NO_WRITES; + if ((flags_to_return & flags_to_check) != flags_to_check) + flags_to_return&= ~flags_to_check; + flags_to_check= HA_ONLINE_ADD_PK_INDEX_NO_WRITES; + flags_to_check|= HA_ONLINE_DROP_PK_INDEX_NO_WRITES; + if ((flags_to_return & flags_to_check) != flags_to_check) + flags_to_return&= ~flags_to_check; + flags_to_check= HA_ONLINE_ADD_INDEX; + flags_to_check|= HA_ONLINE_DROP_INDEX; + if ((flags_to_return & flags_to_check) != flags_to_check) + flags_to_return&= ~flags_to_check; + flags_to_check= HA_ONLINE_ADD_UNIQUE_INDEX; + flags_to_check|= HA_ONLINE_DROP_UNIQUE_INDEX; + if ((flags_to_return & flags_to_check) != flags_to_check) + flags_to_return&= ~flags_to_check; + flags_to_check= HA_ONLINE_ADD_PK_INDEX; + flags_to_check|= HA_ONLINE_DROP_PK_INDEX; + if ((flags_to_return & flags_to_check) != flags_to_check) + flags_to_return&= ~flags_to_check; + DBUG_RETURN(flags_to_return); } @@ -6412,6 +6445,7 @@ int ha_partition::add_index(TABLE *table_arg, KEY *key_info, uint num_of_keys) handler **file; int ret= 0; + DBUG_ENTER("ha_partition::add_index"); /* There has already been a check in fix_partition_func in mysql_alter_table before this call, which checks for unique/primary key violations of the @@ -6419,8 +6453,28 @@ int ha_partition::add_index(TABLE *table_arg, KEY *key_info, uint num_of_keys) */ for (file= m_file; *file; file++) if ((ret= (*file)->add_index(table_arg, key_info, num_of_keys))) - break; - return ret; + goto err; + DBUG_RETURN(ret); +err: + if (file > m_file) + { + uint *key_numbers= (uint*) ha_thd()->alloc(sizeof(uint) * num_of_keys); + uint old_num_of_keys= table_arg->s->keys; + uint i; + /* The newly created keys have the last id's */ + for (i= 0; i < num_of_keys; i++) + key_numbers[i]= i + old_num_of_keys; + if (!table_arg->key_info) + table_arg->key_info= key_info; + while (--file >= m_file) + { + (void) (*file)->prepare_drop_index(table_arg, key_numbers, num_of_keys); + (void) (*file)->final_drop_index(table_arg); + } + if (table_arg->key_info == key_info) + table_arg->key_info= NULL; + } + DBUG_RETURN(ret); } diff --git a/sql/handler.h b/sql/handler.h index 325df003215..0e03ea17dde 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -174,6 +174,8 @@ /* These bits are set if different kinds of indexes can be created off-line without re-create of the table (but with a table lock). + Partitioning needs both ADD and DROP to be supported by its underlying + handlers, due to error handling, see bug#57778. */ #define HA_ONLINE_ADD_INDEX_NO_WRITES (1L << 0) /*add index w/lock*/ #define HA_ONLINE_DROP_INDEX_NO_WRITES (1L << 1) /*drop index w/lock*/ -- cgit v1.2.1 From e0a8c25438dac90ec697f5edaa712d9681acf96b Mon Sep 17 00:00:00 2001 From: Mattias Jonsson Date: Thu, 11 Nov 2010 11:34:55 +0100 Subject: Bug#57890: Assertion failed: next_insert_id == 0 with on duplicate key update There was a missed corner case in the partitioning handler, which caused the next_insert_id to be changed in the second level handlers (i.e the hander of a partition), which caused this debug assertion. The solution was to always ensure that only the partitioning level generates auto_increment values, since if it was done within a partition, it may fail to match the partition function. mysql-test/suite/parts/inc/partition_auto_increment.inc: Added tests mysql-test/suite/parts/r/partition_auto_increment_blackhole.result: updated results mysql-test/suite/parts/r/partition_auto_increment_innodb.result: updated results mysql-test/suite/parts/r/partition_auto_increment_memory.result: updated results mysql-test/suite/parts/r/partition_auto_increment_myisam.result: updated results sql/ha_partition.cc: In ::write_row the auto_inc value is generated through handler::update_auto_increment (which calls ::get_auto_increment() if needed). If: * INSERT_ID was set to 0 * it was updated to 0 by 'INSERT ... ON DUPLICATE KEY UPDATE' and changed partitions for the row Then it would try to generate a auto_increment value in the ::write_row, which will trigger the assert. So the solution is to prevent this by, in ha_partition::write_row set auto_inc_field_not_null and add MODE_NO_AUTO_VALUE_ON_ZERO in ha_partition::update_row (when changing partition) temporary set table->next_number_field to NULL which calling the partitions ::write_row(). --- sql/ha_partition.cc | 37 +++++++++++++++++++++++++++++++++++-- 1 file changed, 35 insertions(+), 2 deletions(-) (limited to 'sql') diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc index 24c0f3e71f2..607f4ce2143 100644 --- a/sql/ha_partition.cc +++ b/sql/ha_partition.cc @@ -3050,7 +3050,9 @@ int ha_partition::write_row(uchar * buf) my_bitmap_map *old_map; HA_DATA_PARTITION *ha_data= (HA_DATA_PARTITION*) table_share->ha_data; THD *thd= ha_thd(); - timestamp_auto_set_type orig_timestamp_type= table->timestamp_field_type; + timestamp_auto_set_type saved_timestamp_type= table->timestamp_field_type; + ulong saved_sql_mode= thd->variables.sql_mode; + bool saved_auto_inc_field_not_null= table->auto_increment_field_not_null; #ifdef NOT_NEEDED uchar *rec0= m_rec0; #endif @@ -3086,6 +3088,22 @@ int ha_partition::write_row(uchar * buf) */ if (error) goto exit; + + /* + Don't allow generation of auto_increment value the partitions handler. + If a partitions handler would change the value, then it might not + match the partition any longer. + This can occur if 'SET INSERT_ID = 0; INSERT (NULL)', + So allow this by adding 'MODE_NO_AUTO_VALUE_ON_ZERO' to sql_mode. + The partitions handler::next_insert_id must always be 0. Otherwise + we need to forward release_auto_increment, or reset it for all + partitions. + */ + if (table->next_number_field->val_int() == 0) + { + table->auto_increment_field_not_null= TRUE; + thd->variables.sql_mode|= MODE_NO_AUTO_VALUE_ON_ZERO; + } } old_map= dbug_tmp_use_all_columns(table, table->read_set); @@ -3119,7 +3137,9 @@ int ha_partition::write_row(uchar * buf) set_auto_increment_if_higher(table->next_number_field); reenable_binlog(thd); exit: - table->timestamp_field_type= orig_timestamp_type; + thd->variables.sql_mode= saved_sql_mode; + table->auto_increment_field_not_null= saved_auto_inc_field_not_null; + table->timestamp_field_type= saved_timestamp_type; DBUG_RETURN(error); } @@ -3186,11 +3206,24 @@ int ha_partition::update_row(const uchar *old_data, uchar *new_data) } else { + Field *saved_next_number_field= table->next_number_field; + /* + Don't allow generation of auto_increment value for update. + table->next_number_field is never set on UPDATE. + But is set for INSERT ... ON DUPLICATE KEY UPDATE, + and since update_row() does not generate or update an auto_inc value, + we cannot have next_number_field set when moving a row + to another partition with write_row(), since that could + generate/update the auto_inc value. + This gives the same behavior for partitioned vs non partitioned tables. + */ + table->next_number_field= NULL; DBUG_PRINT("info", ("Update from partition %d to partition %d", old_part_id, new_part_id)); tmp_disable_binlog(thd); /* Do not replicate the low-level changes. */ error= m_file[new_part_id]->ha_write_row(new_data); reenable_binlog(thd); + table->next_number_field= saved_next_number_field; if (error) goto exit; -- cgit v1.2.1 From 41dc34d60b7e924ace2c6a044189aaa345cf2820 Mon Sep 17 00:00:00 2001 From: Marc Alff Date: Fri, 12 Nov 2010 07:23:26 +0100 Subject: Bug#58052 Binary log IO not being accounted for properly Before this fix, file io for the binary log file was not accounted properly, and showed no io at all. This bug was due to the following issues: 1) file io for the binlog was instrumented: - sometime as "wait/io/file/sql/binlog" - sometime as "wait/io/file/sql/MYSQL_LOG" leading to inconsistent event_names. 2) the binlog file itself was using an IO_CACHE, but the IO_CACHE implementation in mysys/mf_iocache.c was not instrumented to make performance schema calls to record file io. 3) The "wait/io/file/sql/MYSQL_LOG" instrumentation was used for several log files, such as: - the binary log - the slow log - the query log which caused file io in these different log files to be accounted against the same instrument. The instrumentation needs to have a finer grain and report io in different event_names, because each file really serves a different purpose. With this fix: - the IO_CACHE implementation is now instrumented - the "wait/io/file/sql/MYSQL_LOG" instrument has been removed - binlog io is now always instrumented with "wait/io/file/sql/binlog" - the slow log is instrumented with a new name, "wait/io/file/sql/slow_log" - the query log is instrumented with a new name, "wait/io/file/sql/query_log" --- sql/log.cc | 23 ++++++++++++++++++----- sql/log.h | 26 +++++++++++++++++++++----- sql/mysqld.cc | 6 ++++-- sql/mysqld.h | 3 ++- 4 files changed, 45 insertions(+), 13 deletions(-) (limited to 'sql') diff --git a/sql/log.cc b/sql/log.cc index ae0cb813742..bfc5018b556 100644 --- a/sql/log.cc +++ b/sql/log.cc @@ -2177,7 +2177,11 @@ bool MYSQL_LOG::init_and_set_log_file_name(const char *log_name, 1 error */ -bool MYSQL_LOG::open(const char *log_name, enum_log_type log_type_arg, +bool MYSQL_LOG::open( +#ifdef HAVE_PSI_INTERFACE + PSI_file_key log_file_key, +#endif + const char *log_name, enum_log_type log_type_arg, const char *new_name, enum cache_type io_cache_type_arg) { char buff[FN_REFLEN]; @@ -2205,7 +2209,12 @@ bool MYSQL_LOG::open(const char *log_name, enum_log_type log_type_arg, db[0]= 0; - if ((file= mysql_file_open(key_file_MYSQL_LOG, +#ifdef HAVE_PSI_INTERFACE + /* Keep the key for reopen */ + m_log_file_key= log_file_key; +#endif + + if ((file= mysql_file_open(log_file_key, log_file_name, open_flags, MYF(MY_WME | ME_WAITTANG))) < 0 || init_io_cache(&log_file, file, IO_SIZE, io_cache_type, @@ -2389,7 +2398,11 @@ void MYSQL_QUERY_LOG::reopen_file() Note that at this point, log_state != LOG_CLOSED (important for is_open()). */ - open(save_name, log_type, 0, io_cache_type); + open( +#ifdef HAVE_PSI_INTERFACE + m_log_file_key, +#endif + save_name, log_type, 0, io_cache_type); my_free(save_name); mysql_mutex_unlock(&LOCK_log); @@ -2855,8 +2868,8 @@ bool MYSQL_BIN_LOG::open(const char *log_name, write_error= 0; /* open the main log file */ - if (MYSQL_LOG::open(log_name, log_type_arg, new_name, - io_cache_type_arg)) + if (MYSQL_LOG::open(key_file_binlog, + log_name, log_type_arg, new_name, io_cache_type_arg)) { #ifdef HAVE_REPLICATION close_purge_index_file(); diff --git a/sql/log.h b/sql/log.h index 89b3594cd1e..d824d3afa26 100644 --- a/sql/log.h +++ b/sql/log.h @@ -196,7 +196,11 @@ public: MYSQL_LOG(); void init_pthread_objects(); void cleanup(); - bool open(const char *log_name, + bool open( +#ifdef HAVE_PSI_INTERFACE + PSI_file_key log_file_key, +#endif + const char *log_name, enum_log_type log_type, const char *new_name, enum cache_type io_cache_type_arg); @@ -223,6 +227,10 @@ public: volatile enum_log_state log_state; enum cache_type io_cache_type; friend class Log_event; +#ifdef HAVE_PSI_INTERFACE + /** Instrumentation key to use for file io in @c log_file */ + PSI_file_key m_log_file_key; +#endif }; class MYSQL_QUERY_LOG: public MYSQL_LOG @@ -241,14 +249,22 @@ public: bool open_slow_log(const char *log_name) { char buf[FN_REFLEN]; - return open(generate_name(log_name, "-slow.log", 0, buf), LOG_NORMAL, 0, - WRITE_CACHE); + return open( +#ifdef HAVE_PSI_INTERFACE + key_file_slow_log, +#endif + generate_name(log_name, "-slow.log", 0, buf), + LOG_NORMAL, 0, WRITE_CACHE); } bool open_query_log(const char *log_name) { char buf[FN_REFLEN]; - return open(generate_name(log_name, ".log", 0, buf), LOG_NORMAL, 0, - WRITE_CACHE); + return open( +#ifdef HAVE_PSI_INTERFACE + key_file_query_log, +#endif + generate_name(log_name, ".log", 0, buf), + LOG_NORMAL, 0, WRITE_CACHE); } private: diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 9f5f3d46e67..04e61635f22 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -7838,9 +7838,10 @@ PSI_file_key key_file_binlog, key_file_binlog_index, key_file_casetest, key_file_dbopt, key_file_des_key_file, key_file_ERRMSG, key_select_to_file, key_file_fileparser, key_file_frm, key_file_global_ddl_log, key_file_load, key_file_loadfile, key_file_log_event_data, key_file_log_event_info, - key_file_master_info, key_file_misc, key_file_MYSQL_LOG, key_file_partition, + key_file_master_info, key_file_misc, key_file_partition, key_file_pid, key_file_relay_log_info, key_file_send_file, key_file_tclog, key_file_trg, key_file_trn, key_file_init; +PSI_file_key key_file_query_log, key_file_slow_log; static PSI_file_info all_server_files[]= { @@ -7863,11 +7864,12 @@ static PSI_file_info all_server_files[]= { &key_file_log_event_info, "log_event_info", 0}, { &key_file_master_info, "master_info", 0}, { &key_file_misc, "misc", 0}, - { &key_file_MYSQL_LOG, "MYSQL_LOG", 0}, { &key_file_partition, "partition", 0}, { &key_file_pid, "pid", 0}, + { &key_file_query_log, "query_log", 0}, { &key_file_relay_log_info, "relay_log_info", 0}, { &key_file_send_file, "send_file", 0}, + { &key_file_slow_log, "slow_log", 0}, { &key_file_tclog, "tclog", 0}, { &key_file_trg, "trigger_name", 0}, { &key_file_trn, "trigger", 0}, diff --git a/sql/mysqld.h b/sql/mysqld.h index dc9f94c0d03..113bc3aa983 100644 --- a/sql/mysqld.h +++ b/sql/mysqld.h @@ -270,9 +270,10 @@ extern PSI_file_key key_file_binlog, key_file_binlog_index, key_file_casetest, key_file_dbopt, key_file_des_key_file, key_file_ERRMSG, key_select_to_file, key_file_fileparser, key_file_frm, key_file_global_ddl_log, key_file_load, key_file_loadfile, key_file_log_event_data, key_file_log_event_info, - key_file_master_info, key_file_misc, key_file_MYSQL_LOG, key_file_partition, + key_file_master_info, key_file_misc, key_file_partition, key_file_pid, key_file_relay_log_info, key_file_send_file, key_file_tclog, key_file_trg, key_file_trn, key_file_init; +extern PSI_file_key key_file_query_log, key_file_slow_log; void init_server_psi_keys(); #endif /* HAVE_PSI_INTERFACE */ -- cgit v1.2.1 From 3fa437cf4061c20f2995e859b89a6898d3b646b4 Mon Sep 17 00:00:00 2001 From: Alexander Nozdrin Date: Sat, 13 Nov 2010 18:05:02 +0300 Subject: Fix for Bug#56934 (mysql_stmt_fetch() incorrectly fills MYSQL_TIME structure buffer). This is a follow-up for WL#4435. The bug actually existed not only MYSQL_TYPE_DATETIME type. The problem was that Item_param::set_value() was written in an assumption that it's working with expressions, i.e. with basic data types. There are two different quick fixes here: a) Change Item_param::make_field() -- remove setting of Send_field::length, Send_field::charsetnr, Send_field::flags and Send_field::type. That would lead to marshalling all data using basic types to the client (MYSQL_TYPE_LONGLONG, MYSQL_TYPE_DOUBLE, MYSQL_TYPE_STRING and MYSQL_TYPE_NEWDECIMAL). In particular, that means, DATETIME would be sent as MYSQL_TYPE_STRING, TINYINT -- as MYSQL_TYPE_LONGLONG, etc. That could be Ok for the client, because the client library does reverse conversion automatically (the client program would see DATETIME as MYSQL_TIME object). However, there is a problem with metadata -- the metadata would be wrong (misleading): it would say that DATETIME is marshaled as MYSQL_TYPE_DATETIME, not as MYSQL_TYPE_STRING. b) Set Item_param::param_type properly to actual underlying field type. That would lead to double conversion inside the server: for example, MYSQL_TIME-object would be converted into STRING-object (in Item_param::set_value()), and then converted back to MYSQL_TIME-object (in Item_param::send()). The data however would be marshalled more properly, and also metadata would be correct. This patch implements b). There is also a possibility to avoid double conversion either by clonning the data field, or by storing a reference to it and using it on Item::send() time. That requires more work and might be done later. --- sql/item.cc | 9 ++------- sql/sp_rcontext.h | 1 + sql/sql_prepare.cc | 2 +- 3 files changed, 4 insertions(+), 8 deletions(-) (limited to 'sql') diff --git a/sql/item.cc b/sql/item.cc index 3594bf45798..71e495fd259 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -226,8 +226,6 @@ bool Item::val_bool() */ String *Item::val_str_ascii(String *str) { - DBUG_ASSERT(fixed == 1); - if (!(collation.collation->state & MY_CS_NONASCII)) return val_str(str); @@ -3459,19 +3457,16 @@ Item_param::set_value(THD *thd, sp_rcontext *ctx, Item **it) str_value.charset()); collation.set(str_value.charset(), DERIVATION_COERCIBLE); decimals= 0; - param_type= MYSQL_TYPE_STRING; break; } case REAL_RESULT: set_double(arg->val_real()); - param_type= MYSQL_TYPE_DOUBLE; break; case INT_RESULT: set_int(arg->val_int(), arg->max_length); - param_type= MYSQL_TYPE_LONG; break; case DECIMAL_RESULT: @@ -3483,8 +3478,6 @@ Item_param::set_value(THD *thd, sp_rcontext *ctx, Item **it) return TRUE; set_decimal(dv); - param_type= MYSQL_TYPE_NEWDECIMAL; - break; } @@ -3516,6 +3509,7 @@ void Item_param::set_out_param_info(Send_field *info) { m_out_param_info= info; + param_type= m_out_param_info->type; } @@ -3561,6 +3555,7 @@ void Item_param::make_field(Send_field *field) field->org_table_name= m_out_param_info->org_table_name; field->col_name= m_out_param_info->col_name; field->org_col_name= m_out_param_info->org_col_name; + field->length= m_out_param_info->length; field->charsetnr= m_out_param_info->charsetnr; field->flags= m_out_param_info->flags; diff --git a/sql/sp_rcontext.h b/sql/sp_rcontext.h index 1af758ed0af..ec8d82063e4 100644 --- a/sql/sp_rcontext.h +++ b/sql/sp_rcontext.h @@ -220,6 +220,7 @@ private: during execution. */ bool m_return_value_set; + /** TRUE if the context is created for a sub-statement. */ diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc index fcbf2c48896..6a84d050d7f 100644 --- a/sql/sql_prepare.cc +++ b/sql/sql_prepare.cc @@ -1185,7 +1185,7 @@ static bool insert_params_from_vars_with_log(Prepared_statement *stmt, uint32 length= 0; THD *thd= stmt->thd; - DBUG_ENTER("insert_params_from_vars"); + DBUG_ENTER("insert_params_from_vars_with_log"); if (query->copy(stmt->query(), stmt->query_length(), default_charset_info)) DBUG_RETURN(1); -- cgit v1.2.1 From f0c2b9c5c60973ba89eff82a6c7fb9f54e1beee2 Mon Sep 17 00:00:00 2001 From: Vladislav Vaintroub Date: Sat, 13 Nov 2010 23:16:52 +0100 Subject: add missing COMPONENT to all CMake INSTALL commands --- sql/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'sql') diff --git a/sql/CMakeLists.txt b/sql/CMakeLists.txt index 0860adde652..2c782380baf 100644 --- a/sql/CMakeLists.txt +++ b/sql/CMakeLists.txt @@ -283,7 +283,7 @@ IF(WIN32 AND MYSQLD_EXECUTABLE) COMPONENT DataFiles PATTERN "initdb.dep" EXCLUDE PATTERN "bootstrap.sql" EXCLUDE) ELSE() # Not windows or cross compiling, just install an empty directory - INSTALL(FILES ${DUMMY_FILE} DESTINATION data/mysql) + INSTALL(FILES ${DUMMY_FILE} DESTINATION data/mysql COMPONENT DataFiles) ENDIF() ENDIF() -- cgit v1.2.1 From 1945734c2de6853a2921e3560fe7c956ca286e0e Mon Sep 17 00:00:00 2001 From: Jorgen Loland Date: Mon, 15 Nov 2010 16:18:04 +0100 Subject: Bug#54812: assert in Diagnostics_area::set_ok_status during EXPLAIN Before the patch, send_eof() of some subclasses of select_result (e.g., select_send::send_eof()) could handle being called after an error had occured while others could not. The methods that were not well-behaved would trigger an ASSERT on debug builds. Release builds were not affected. Consider the following query as an example for how the ASSERT could be triggered: A user without execute privilege on f() does SELECT MAX(key1) INTO @dummy FROM t1 WHERE f() < 1; resulting in "ERROR 42000: execute command denied to user..." The server would end the query by calling send_eof(). The fact that the error had occured would make the ASSERT trigger. select_dumpvar::send_eof() was the offending method in the bug report, but the problem also applied to other subclasses of select_result. This patch uniforms send_eof() of all subclasses of select_result to handle being called after an error has occured. mysql-test/r/not_embedded_server.result: Added test for BUG#54812 mysql-test/t/not_embedded_server.test: Added test for BUG#54812 sql/sql_class.cc: send_eof() of all subclasses of select_result can now handle being called after an error has occured. sql/sql_insert.cc: send_eof() of all subclasses of select_result can now handle being called after an error has occured. Also fix call to abort() in select_create::send_eof(), which was supposed to abort the result set, not terminate the server. This call to abort() should have been changed when the function was renamed from abort_result_set() but was forgotten. New test case added by BUG#54812 covered this line and terminated server. sql/sql_prepare.cc: send_eof() of all subclasses of select_result can now handle being called after an error has occured. sql/sql_update.cc: send_eof() of all subclasses of select_result can now handle being called after an error has occured. --- sql/sql_class.cc | 12 ++++++++++-- sql/sql_insert.cc | 7 +++++-- sql/sql_prepare.cc | 9 ++++++++- sql/sql_update.cc | 4 +++- 4 files changed, 26 insertions(+), 6 deletions(-) (limited to 'sql') diff --git a/sql/sql_class.cc b/sql/sql_class.cc index daf9217a5a1..fc4ab1bd27b 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -1842,8 +1842,9 @@ void select_to_file::send_error(uint errcode,const char *err) bool select_to_file::send_eof() { int error= test(end_io_cache(&cache)); - if (mysql_file_close(file, MYF(MY_WME))) - error= 1; + if (mysql_file_close(file, MYF(MY_WME)) || thd->is_error()) + error= true; + if (!error) { ::my_ok(thd,row_count); @@ -2884,6 +2885,13 @@ bool select_dumpvar::send_eof() if (! row_count) push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_SP_FETCH_NO_DATA, ER(ER_SP_FETCH_NO_DATA)); + /* + Don't send EOF if we're in error condition (which implies we've already + sent or are sending an error) + */ + if (thd->is_error()) + return true; + ::my_ok(thd,row_count); return 0; } diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index 1f8da3fab5c..a81c9ae15a0 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -3506,6 +3506,9 @@ bool select_insert::send_eof() error= (thd->locked_tables_mode <= LTM_LOCK_TABLES ? table->file->ha_end_bulk_insert() : 0); + if (!error && thd->is_error()) + error= thd->stmt_da->sql_errno(); + table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY); table->file->extra(HA_EXTRA_WRITE_CANNOT_REPLACE); @@ -4049,7 +4052,7 @@ bool select_create::send_eof() { bool tmp=select_insert::send_eof(); if (tmp) - abort(); + abort_result_set(); else { /* @@ -4081,7 +4084,7 @@ void select_create::abort_result_set() DBUG_ENTER("select_create::abort_result_set"); /* - In select_insert::abort() we roll back the statement, including + In select_insert::abort_result_set() we roll back the statement, including truncating the transaction cache of the binary log. To do this, we pretend that the statement is transactional, even though it might be the case that it was not. diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc index 6a84d050d7f..851782c623f 100644 --- a/sql/sql_prepare.cc +++ b/sql/sql_prepare.cc @@ -2898,8 +2898,15 @@ bool Select_fetch_protocol_binary::send_result_set_metadata(List &list, ui bool Select_fetch_protocol_binary::send_eof() { + /* + Don't send EOF if we're in error condition (which implies we've already + sent or are sending an error) + */ + if (thd->is_error()) + return true; + ::my_eof(thd); - return FALSE; + return false; } diff --git a/sql/sql_update.cc b/sql/sql_update.cc index 96b1ac67b49..8cc8d511fd6 100644 --- a/sql/sql_update.cc +++ b/sql/sql_update.cc @@ -2064,7 +2064,9 @@ bool multi_update::send_eof() Does updates for the last n - 1 tables, returns 0 if ok; error takes into account killed status gained in do_updates() */ - int local_error = (table_count) ? do_updates() : 0; + int local_error= thd->is_error(); + if (!local_error) + local_error = (table_count) ? do_updates() : 0; /* if local_error is not set ON until after do_updates() then later carried out killing should not affect binlogging. -- cgit v1.2.1