summaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorSergei Golubchik <serg@mariadb.org>2014-10-15 12:59:13 +0200
committerSergei Golubchik <serg@mariadb.org>2014-10-15 12:59:13 +0200
commitf62c12b405ba7ec80b8e2490856b83c6f5899211 (patch)
tree010605c7f145da6ea6ac14b39abc4cf700d619b1 /sql
parentf947f73b2b6d2bd246b81a9038224d2a85777520 (diff)
parentf1afc003eefe0aafd3e070c7453d9e029d8445a8 (diff)
downloadmariadb-git-f62c12b405ba7ec80b8e2490856b83c6f5899211.tar.gz
Merge 10.0.14 into 10.1
Diffstat (limited to 'sql')
-rw-r--r--sql/CMakeLists.txt21
-rw-r--r--sql/create_options.cc17
-rw-r--r--sql/create_options.h2
-rw-r--r--sql/event_parse_data.cc2
-rw-r--r--sql/field.h23
-rw-r--r--sql/filesort.cc5
-rw-r--r--sql/ha_partition.cc5
-rw-r--r--sql/handler.cc31
-rw-r--r--sql/item.cc358
-rw-r--r--sql/item.h428
-rw-r--r--sql/item_cmpfunc.h18
-rw-r--r--sql/item_create.cc50
-rw-r--r--sql/item_create.h9
-rw-r--r--sql/item_func.cc101
-rw-r--r--sql/item_func.h17
-rw-r--r--sql/item_geofunc.cc8
-rw-r--r--sql/item_geofunc.h12
-rw-r--r--sql/item_strfunc.cc252
-rw-r--r--sql/item_strfunc.h30
-rw-r--r--sql/item_subselect.cc5
-rw-r--r--sql/item_sum.cc14
-rw-r--r--sql/item_timefunc.cc2
-rw-r--r--sql/item_timefunc.h10
-rw-r--r--sql/item_xmlfunc.cc39
-rw-r--r--sql/log.cc74
-rw-r--r--sql/log_event.cc211
-rw-r--r--sql/log_event.h11
-rw-r--r--sql/log_event_old.cc22
-rw-r--r--sql/log_slow.h2
-rw-r--r--sql/mf_iocache.cc2
-rw-r--r--sql/mysqld.cc152
-rw-r--r--sql/mysqld.h17
-rw-r--r--sql/net_serv.cc43
-rw-r--r--sql/opt_range.cc279
-rw-r--r--sql/opt_range.h15
-rw-r--r--sql/records.cc3
-rw-r--r--sql/replication.h14
-rw-r--r--sql/rpl_gtid.cc18
-rw-r--r--sql/rpl_handler.cc79
-rw-r--r--sql/rpl_handler.h5
-rw-r--r--sql/rpl_mi.cc13
-rw-r--r--sql/rpl_mi.h6
-rw-r--r--sql/rpl_parallel.cc644
-rw-r--r--sql/rpl_parallel.h32
-rw-r--r--sql/rpl_record.cc1
-rw-r--r--sql/rpl_record_old.cc4
-rw-r--r--sql/rpl_reporting.cc4
-rw-r--r--sql/rpl_reporting.h5
-rw-r--r--sql/rpl_rli.cc178
-rw-r--r--sql/rpl_rli.h71
-rw-r--r--sql/rpl_utility.cc13
-rw-r--r--sql/rpl_utility.h6
-rw-r--r--sql/scheduler.cc4
-rw-r--r--sql/scheduler.h6
-rw-r--r--sql/set_var.h4
-rw-r--r--sql/share/errmsg-utf8.txt29
-rw-r--r--sql/slave.cc313
-rw-r--r--sql/slave.h13
-rw-r--r--sql/sp_head.cc20
-rw-r--r--sql/sp_head.h32
-rw-r--r--sql/sql_acl.cc22
-rw-r--r--sql/sql_admin.cc2
-rw-r--r--sql/sql_base.cc119
-rw-r--r--sql/sql_base.h3
-rw-r--r--sql/sql_cache.cc4
-rw-r--r--sql/sql_class.cc401
-rw-r--r--sql/sql_class.h30
-rw-r--r--sql/sql_delete.cc3
-rw-r--r--sql/sql_derived.cc8
-rw-r--r--sql/sql_explain.cc20
-rw-r--r--sql/sql_get_diagnostics.cc6
-rw-r--r--sql/sql_help.cc2
-rw-r--r--sql/sql_insert.cc28
-rw-r--r--sql/sql_join_cache.cc2
-rw-r--r--sql/sql_lex.cc19
-rw-r--r--sql/sql_lex.h6
-rw-r--r--sql/sql_parse.cc26
-rw-r--r--sql/sql_partition.cc43
-rw-r--r--sql/sql_plugin.cc18
-rw-r--r--sql/sql_reload.cc57
-rw-r--r--sql/sql_repl.cc10
-rw-r--r--sql/sql_select.cc166
-rw-r--r--sql/sql_select.h4
-rw-r--r--sql/sql_show.cc299
-rw-r--r--sql/sql_show.h8
-rw-r--r--sql/sql_statistics.cc16
-rw-r--r--sql/sql_statistics.h6
-rw-r--r--sql/sql_string.cc15
-rw-r--r--sql/sql_string.h23
-rw-r--r--sql/sql_table.cc108
-rw-r--r--sql/sql_table.h3
-rw-r--r--sql/sql_test.cc5
-rw-r--r--sql/sql_union.cc3
-rw-r--r--sql/sql_update.cc8
-rw-r--r--sql/sql_view.cc7
-rw-r--r--sql/sql_yacc.yy124
-rw-r--r--sql/sys_vars.cc70
-rw-r--r--sql/sys_vars.h33
-rw-r--r--sql/table.cc74
-rw-r--r--sql/table.h2
-rw-r--r--sql/table_cache.cc2
-rw-r--r--sql/unireg.cc8
-rw-r--r--sql/unireg.h2
-rw-r--r--sql/wsrep_mysqld.cc2
104 files changed, 3761 insertions, 1860 deletions
diff --git a/sql/CMakeLists.txt b/sql/CMakeLists.txt
index 32499662a7c..1c41ff481aa 100644
--- a/sql/CMakeLists.txt
+++ b/sql/CMakeLists.txt
@@ -51,7 +51,8 @@ ${CMAKE_CURRENT_BINARY_DIR}/lex_hash.h
SET_SOURCE_FILES_PROPERTIES(${GEN_SOURCES} PROPERTIES GENERATED 1)
-ADD_DEFINITIONS(-DMYSQL_SERVER -DHAVE_EVENT_SCHEDULER -DHAVE_POOL_OF_THREADS)
+ADD_DEFINITIONS(-DMYSQL_SERVER -DHAVE_EVENT_SCHEDULER)
+
IF(SSL_DEFINES)
ADD_DEFINITIONS(${SSL_DEFINES})
ENDIF()
@@ -119,10 +120,16 @@ SET (SQL_SOURCE
${MYSYS_LIBWRAP_SOURCE}
)
-IF(WIN32)
- SET(SQL_SOURCE ${SQL_SOURCE} threadpool_win.cc)
-ELSE()
- SET(SQL_SOURCE ${SQL_SOURCE} threadpool_unix.cc)
+IF (CMAKE_SYSTEM_NAME MATCHES "Linux" OR
+ CMAKE_SYSTEM_NAME MATCHES "Windows" OR
+ CMAKE_SYSTEM_NAME MATCHES "SunOS" OR
+ HAVE_KQUEUE)
+ ADD_DEFINITIONS(-DHAVE_POOL_OF_THREADS)
+ IF(WIN32)
+ SET(SQL_SOURCE ${SQL_SOURCE} threadpool_win.cc)
+ ELSE()
+ SET(SQL_SOURCE ${SQL_SOURCE} threadpool_unix.cc)
+ ENDIF()
ENDIF()
MYSQL_ADD_PLUGIN(partition ha_partition.cc STORAGE_ENGINE DEFAULT STATIC_ONLY
@@ -241,7 +248,9 @@ RUN_BISON(
)
# Gen_lex_hash
-ADD_EXECUTABLE(gen_lex_hash gen_lex_hash.cc)
+IF(NOT CMAKE_CROSSCOMPILING)
+ ADD_EXECUTABLE(gen_lex_hash gen_lex_hash.cc)
+ENDIF()
ADD_CUSTOM_COMMAND(
OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/lex_hash.h
diff --git a/sql/create_options.cc b/sql/create_options.cc
index efae87e7533..5800003ed49 100644
--- a/sql/create_options.cc
+++ b/sql/create_options.cc
@@ -775,3 +775,20 @@ engine_option_value *merge_engine_table_options(engine_option_value *first,
&first, &end);
DBUG_RETURN(first);
}
+
+bool is_engine_option_known(engine_option_value *opt,
+ ha_create_table_option *rules)
+{
+ if (!rules)
+ return false;
+
+ for (; rules->name; rules++)
+ {
+ if (!my_strnncoll(system_charset_info,
+ (uchar*)rules->name, rules->name_length,
+ (uchar*)opt->name.str, opt->name.length))
+ return true;
+ }
+ return false;
+}
+
diff --git a/sql/create_options.h b/sql/create_options.h
index d6b48822c49..eb21f291ff4 100644
--- a/sql/create_options.h
+++ b/sql/create_options.h
@@ -99,4 +99,6 @@ uchar *engine_table_options_frm_image(uchar *buff,
bool engine_options_differ(void *old_struct, void *new_struct,
ha_create_table_option *rules);
+bool is_engine_option_known(engine_option_value *opt,
+ ha_create_table_option *rules);
#endif
diff --git a/sql/event_parse_data.cc b/sql/event_parse_data.cc
index 44d89887c3b..4e2d5d3f5bf 100644
--- a/sql/event_parse_data.cc
+++ b/sql/event_parse_data.cc
@@ -564,7 +564,7 @@ Event_parse_data::init_definer(THD *thd)
void Event_parse_data::check_originator_id(THD *thd)
{
/* Disable replicated events on slave. */
- if (IF_WSREP(WSREP(thd) && thd->wsrep_applier, 0) ||
+ if ((WSREP(thd) && IF_WSREP(thd->wsrep_applier, 0)) ||
(thd->system_thread == SYSTEM_THREAD_SLAVE_SQL) ||
(thd->system_thread == SYSTEM_THREAD_SLAVE_IO))
{
diff --git a/sql/field.h b/sql/field.h
index fed6084fda2..f1679379fe8 100644
--- a/sql/field.h
+++ b/sql/field.h
@@ -183,6 +183,29 @@ inline bool is_temporal_type(enum_field_types type)
return mysql_type_to_time_type(type) != MYSQL_TIMESTAMP_ERROR;
}
+
+/**
+ Tests if field type is temporal and has time part,
+ i.e. represents TIME, DATETIME or TIMESTAMP types in SQL.
+
+ @param type Field type, as returned by field->type().
+ @retval true If field type is temporal type with time part.
+ @retval false If field type is not temporal type with time part.
+*/
+inline bool is_temporal_type_with_time(enum_field_types type)
+{
+ switch (type)
+ {
+ case MYSQL_TYPE_TIME:
+ case MYSQL_TYPE_DATETIME:
+ case MYSQL_TYPE_TIMESTAMP:
+ return true;
+ default:
+ return false;
+ }
+}
+
+
/*
Virtual_column_info is the class to contain additional
characteristics that is specific for a virtual/computed
diff --git a/sql/filesort.cc b/sql/filesort.cc
index 5ca6be2a2f4..23cfd6a1817 100644
--- a/sql/filesort.cc
+++ b/sql/filesort.cc
@@ -225,6 +225,8 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length,
table, num_rows, memory_available))
{
DBUG_PRINT("info", ("filesort PQ is applicable"));
+ thd->query_plan_flags|= QPLAN_FILESORT_PRIORITY_QUEUE;
+ status_var_increment(thd->status_var.filesort_pq_sorts_);
const size_t compare_length= param.sort_length;
if (pq.init(param.max_rows,
true, // max_at_top
@@ -719,6 +721,9 @@ static ha_rows find_all_keys(Sort_param *param, SQL_SELECT *select,
/* Temporary set for register_used_fields and register_field_in_read_map */
sort_form->read_set= &sort_form->tmp_set;
register_used_fields(param);
+ if (quick_select)
+ select->quick->add_used_key_part_to_set(sort_form->read_set);
+
Item *sort_cond= !select ?
0 : !select->pre_idx_push_select_cond ?
select->cond : select->pre_idx_push_select_cond;
diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc
index da7f3aeff89..a63ec65c020 100644
--- a/sql/ha_partition.cc
+++ b/sql/ha_partition.cc
@@ -8586,8 +8586,7 @@ void ha_partition::get_auto_increment(ulonglong offset, ulonglong increment,
ulonglong first_value_part, max_first_value;
handler **file= m_file;
first_value_part= max_first_value= *first_value;
- /* Must lock and find highest value among all partitions. */
- lock_auto_increment();
+ /* Must find highest value among all partitions. */
do
{
/* Only nb_desired_values = 1 makes sense */
@@ -8598,7 +8597,6 @@ void ha_partition::get_auto_increment(ulonglong offset, ulonglong increment,
*first_value= first_value_part;
/* log that the error was between table/partition handler */
sql_print_error("Partition failed to reserve auto_increment value");
- unlock_auto_increment();
DBUG_VOID_RETURN;
}
DBUG_PRINT("info", ("first_value_part: %lu", (ulong) first_value_part));
@@ -8606,7 +8604,6 @@ void ha_partition::get_auto_increment(ulonglong offset, ulonglong increment,
} while (*(++file));
*first_value= max_first_value;
*nb_reserved_values= 1;
- unlock_auto_increment();
}
else
{
diff --git a/sql/handler.cc b/sql/handler.cc
index a24f18f4863..2251b2498e2 100644
--- a/sql/handler.cc
+++ b/sql/handler.cc
@@ -609,7 +609,19 @@ int ha_initialize_handlerton(st_plugin_int *plugin)
savepoint_alloc_size+= tmp;
hton2plugin[hton->slot]=plugin;
if (hton->prepare)
+ {
total_ha_2pc++;
+ if (tc_log && tc_log != get_tc_log_implementation())
+ {
+ total_ha_2pc--;
+ hton->prepare= 0;
+ push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
+ ER_UNKNOWN_ERROR,
+ "Cannot enable tc-log at run-time. "
+ "XA features of %s are disabled",
+ plugin->name.str);
+ }
+ }
break;
}
/* fall through */
@@ -3231,15 +3243,10 @@ void handler::get_auto_increment(ulonglong offset, ulonglong increment,
if (error)
{
if (error == HA_ERR_END_OF_FILE || error == HA_ERR_KEY_NOT_FOUND)
- {
- /* No entry found, start with 1. */
- nr= 1;
- }
+ /* No entry found, that's fine */;
else
- {
- DBUG_ASSERT(0);
- nr= ULONGLONG_MAX;
- }
+ print_error(error, MYF(0));
+ nr= 1;
}
else
nr= ((ulonglong) table->next_number_field->
@@ -4736,11 +4743,13 @@ int ha_init_key_cache(const char *name, KEY_CACHE *key_cache, void *unused
uint division_limit= (uint)key_cache->param_division_limit;
uint age_threshold= (uint)key_cache->param_age_threshold;
uint partitions= (uint)key_cache->param_partitions;
+ uint changed_blocks_hash_size= (uint)key_cache->changed_blocks_hash_size;
mysql_mutex_unlock(&LOCK_global_system_variables);
DBUG_RETURN(!init_key_cache(key_cache,
tmp_block_size,
tmp_buff_size,
division_limit, age_threshold,
+ changed_blocks_hash_size,
partitions));
}
DBUG_RETURN(0);
@@ -4761,10 +4770,12 @@ int ha_resize_key_cache(KEY_CACHE *key_cache)
long tmp_block_size= (long) key_cache->param_block_size;
uint division_limit= (uint)key_cache->param_division_limit;
uint age_threshold= (uint)key_cache->param_age_threshold;
+ uint changed_blocks_hash_size= (uint)key_cache->changed_blocks_hash_size;
mysql_mutex_unlock(&LOCK_global_system_variables);
DBUG_RETURN(!resize_key_cache(key_cache, tmp_block_size,
tmp_buff_size,
- division_limit, age_threshold));
+ division_limit, age_threshold,
+ changed_blocks_hash_size));
}
DBUG_RETURN(0);
}
@@ -4804,10 +4815,12 @@ int ha_repartition_key_cache(KEY_CACHE *key_cache)
uint division_limit= (uint)key_cache->param_division_limit;
uint age_threshold= (uint)key_cache->param_age_threshold;
uint partitions= (uint)key_cache->param_partitions;
+ uint changed_blocks_hash_size= (uint)key_cache->changed_blocks_hash_size;
mysql_mutex_unlock(&LOCK_global_system_variables);
DBUG_RETURN(!repartition_key_cache(key_cache, tmp_block_size,
tmp_buff_size,
division_limit, age_threshold,
+ changed_blocks_hash_size,
partitions));
}
DBUG_RETURN(0);
diff --git a/sql/item.cc b/sql/item.cc
index 0fefa54d849..8fc87149bc9 100644
--- a/sql/item.cc
+++ b/sql/item.cc
@@ -686,7 +686,7 @@ void Item::cleanup()
{
DBUG_ENTER("Item::cleanup");
DBUG_PRINT("enter", ("this: %p", this));
- fixed=0;
+ fixed= 0;
marker= 0;
join_tab_idx= MAX_TABLES;
if (orig_name)
@@ -1073,10 +1073,15 @@ void Item::set_name(const char *str, uint length, CHARSET_INFO *cs)
name_length= 0;
return;
}
- if (cs->ctype)
- {
- const char *str_start= str;
+ const char *str_start= str;
+ if (!cs->ctype || cs->mbminlen > 1)
+ {
+ str+= cs->cset->scan(cs, str, str + length, MY_SEQ_SPACES);
+ length-= str - str_start;
+ }
+ else
+ {
/*
This will probably need a better implementation in the future:
a function in CHARSET_INFO structure.
@@ -1086,21 +1091,21 @@ void Item::set_name(const char *str, uint length, CHARSET_INFO *cs)
length--;
str++;
}
- if (str != str_start && !is_autogenerated_name)
- {
- char buff[SAFE_NAME_LEN];
- strmake(buff, str_start,
- MY_MIN(sizeof(buff)-1, length + (int) (str-str_start)));
-
- if (length == 0)
- push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
- ER_NAME_BECOMES_EMPTY, ER(ER_NAME_BECOMES_EMPTY),
- buff);
- else
- push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
- ER_REMOVED_SPACES, ER(ER_REMOVED_SPACES),
- buff);
- }
+ }
+ if (str != str_start && !is_autogenerated_name)
+ {
+ char buff[SAFE_NAME_LEN];
+ strmake(buff, str_start,
+ MY_MIN(sizeof(buff)-1, length + (int) (str-str_start)));
+
+ if (length == 0)
+ push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
+ ER_NAME_BECOMES_EMPTY, ER(ER_NAME_BECOMES_EMPTY),
+ buff);
+ else
+ push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
+ ER_REMOVED_SPACES, ER(ER_REMOVED_SPACES),
+ buff);
}
if (!my_charset_same(cs, system_charset_info))
{
@@ -1166,6 +1171,8 @@ bool Item::eq(const Item *item, bool binary_cmp) const
Item *Item::safe_charset_converter(CHARSET_INFO *tocs)
{
+ if (!needs_charset_converter(tocs))
+ return this;
Item_func_conv_charset *conv= new Item_func_conv_charset(this, tocs, 1);
return conv->safe ? conv : NULL;
}
@@ -1192,123 +1199,55 @@ Item *Item_num::safe_charset_converter(CHARSET_INFO *tocs)
if (!(tocs->state & MY_CS_NONASCII))
return this;
- Item_string *conv;
- uint conv_errors;
- char buf[64], buf2[64];
- String tmp(buf, sizeof(buf), &my_charset_bin);
- String cstr(buf2, sizeof(buf2), &my_charset_bin);
- String *ostr= val_str(&tmp);
- char *ptr;
- cstr.copy(ostr->ptr(), ostr->length(), ostr->charset(), tocs, &conv_errors);
- if (conv_errors || !(conv= new Item_string(cstr.ptr(), cstr.length(),
- cstr.charset(),
- collation.derivation)))
- {
- /*
- Safe conversion is not possible (or EOM).
- We could not convert a string into the requested character set
- without data loss. The target charset does not cover all the
- characters from the string. Operation cannot be done correctly.
- */
- return NULL;
- }
- if (!(ptr= current_thd->strmake(cstr.ptr(), cstr.length())))
- return NULL;
- conv->str_value.set(ptr, cstr.length(), cstr.charset());
- /* Ensure that no one is going to change the result string */
- conv->str_value.mark_as_const();
- conv->fix_char_length(max_char_length());
- return conv;
-}
-
-
-Item *Item_static_float_func::safe_charset_converter(CHARSET_INFO *tocs)
-{
- Item_string *conv;
- char buf[64];
- String *s, tmp(buf, sizeof(buf), &my_charset_bin);
- s= val_str(&tmp);
- if ((conv= new Item_static_string_func(func_name, s->ptr(), s->length(),
- s->charset())))
- {
- conv->str_value.copy();
- conv->str_value.mark_as_const();
- }
+ Item *conv;
+ if ((conv= const_charset_converter(tocs, true)))
+ conv->fix_char_length(max_char_length());
return conv;
}
-Item *Item_string::safe_charset_converter(CHARSET_INFO *tocs)
-{
- return charset_converter(tocs, true);
-}
-
-
/**
- Convert a string item into the requested character set.
+ Create character set converter for constant items
+ using Item_null, Item_string or Item_static_string_func.
@param tocs Character set to to convert the string to.
@param lossless Whether data loss is acceptable.
+ @param func_name Function name, or NULL.
- @return A new item representing the converted string.
+ @return this, if conversion is not needed,
+ NULL, if safe conversion is not possible, or
+ a new item representing the converted constant.
*/
-Item *Item_string::charset_converter(CHARSET_INFO *tocs, bool lossless)
+Item *Item::const_charset_converter(CHARSET_INFO *tocs,
+ bool lossless,
+ const char *func_name)
{
- Item_string *conv;
- uint conv_errors;
- char *ptr;
- String tmp, cstr, *ostr= val_str(&tmp);
- cstr.copy(ostr->ptr(), ostr->length(), ostr->charset(), tocs, &conv_errors);
- conv_errors= lossless && conv_errors;
- if (conv_errors || !(conv= new Item_string(cstr.ptr(), cstr.length(),
- cstr.charset(),
- collation.derivation)))
- {
- /*
- Safe conversion is not possible (or EOM).
- We could not convert a string into the requested character set
- without data loss. The target charset does not cover all the
- characters from the string. Operation cannot be done correctly.
- */
- return NULL;
- }
- if (!(ptr= current_thd->strmake(cstr.ptr(), cstr.length())))
- return NULL;
- conv->str_value.set(ptr, cstr.length(), cstr.charset());
- /* Ensure that no one is going to change the result string */
- conv->str_value.mark_as_const();
- return conv;
-}
+ DBUG_ASSERT(const_item());
+ DBUG_ASSERT(fixed);
+ StringBuffer<64>tmp;
+ String *s= val_str(&tmp);
+ if (!s)
+ return new Item_null((char *) func_name, tocs);
-Item *Item_param::safe_charset_converter(CHARSET_INFO *tocs)
-{
- if (const_item())
+ if (!needs_charset_converter(s->length(), tocs))
{
- uint cnv_errors;
- String *ostr= val_str(&cnvstr);
- cnvitem->str_value.copy(ostr->ptr(), ostr->length(),
- ostr->charset(), tocs, &cnv_errors);
- if (cnv_errors)
- return NULL;
- cnvitem->str_value.mark_as_const();
- cnvitem->max_length= cnvitem->str_value.numchars() * tocs->mbmaxlen;
- return cnvitem;
+ if (collation.collation == &my_charset_bin && tocs != &my_charset_bin &&
+ !this->check_well_formed_result(s, true))
+ return NULL;
+ return this;
}
- return Item::safe_charset_converter(tocs);
-}
-
-Item *Item_static_string_func::safe_charset_converter(CHARSET_INFO *tocs)
-{
- Item_string *conv;
uint conv_errors;
- String tmp, cstr, *ostr= val_str(&tmp);
- cstr.copy(ostr->ptr(), ostr->length(), ostr->charset(), tocs, &conv_errors);
- if (conv_errors ||
- !(conv= new Item_static_string_func(func_name,
- cstr.ptr(), cstr.length(),
- cstr.charset(),
- collation.derivation)))
+ Item_string *conv= func_name ?
+ new Item_static_string_func(func_name,
+ s, tocs, &conv_errors,
+ collation.derivation,
+ collation.repertoire) :
+ new Item_string(s, tocs, &conv_errors,
+ collation.derivation,
+ collation.repertoire);
+
+ if (!conv || (conv_errors && lossless))
{
/*
Safe conversion is not possible (or EOM).
@@ -1318,23 +1257,28 @@ Item *Item_static_string_func::safe_charset_converter(CHARSET_INFO *tocs)
*/
return NULL;
}
- conv->str_value.copy();
- /* Ensure that no one is going to change the result string */
- conv->str_value.mark_as_const();
+ if (s->charset() == &my_charset_bin && tocs != &my_charset_bin &&
+ !conv->check_well_formed_result(true))
+ return NULL;
return conv;
}
-bool Item_string::eq(const Item *item, bool binary_cmp) const
+Item *Item_param::safe_charset_converter(CHARSET_INFO *tocs)
{
- if (type() == item->type() && item->basic_const_item())
- {
- if (binary_cmp)
- return !stringcmp(&str_value, &item->str_value);
- return (collation.collation == item->collation.collation &&
- !sortcmp(&str_value, &item->str_value, collation.collation));
- }
- return 0;
+ /*
+ Return "this" if in prepare. result_type may change at execition time,
+ to it's possible that the converter will not be needed at all:
+
+ PREPARE stmt FROM 'SELECT * FROM t1 WHERE field = ?';
+ SET @@arg= 1;
+ EXECUTE stms USING @arg;
+
+ In the above example result_type is STRING_RESULT at prepare time,
+ and INT_RESULT at execution time.
+ */
+ return !const_item() || state == NULL_VALUE ?
+ this : const_charset_converter(tocs, true);
}
@@ -2123,7 +2067,7 @@ bool agg_item_collations(DTCollation &c, const char *fname,
bool unknown_cs= 0;
c.set(av[0]->collation);
- for (i= 1, arg= &av[item_sep]; i < count; i++, arg++)
+ for (i= 1, arg= &av[item_sep]; i < count; i++, arg+= item_sep)
{
if (c.aggregate((*arg)->collation, flags))
{
@@ -2202,33 +2146,10 @@ bool agg_item_set_converter(DTCollation &coll, const char *fname,
for (i= 0, arg= args; i < nargs; i++, arg+= item_sep)
{
- Item* conv;
- uint32 dummy_offset;
- if (!String::needs_conversion(1, (*arg)->collation.collation,
- coll.collation,
- &dummy_offset))
- continue;
-
- /*
- No needs to add converter if an "arg" is NUMERIC or DATETIME
- value (which is pure ASCII) and at the same time target DTCollation
- is ASCII-compatible. For example, no needs to rewrite:
- SELECT * FROM t1 WHERE datetime_field = '2010-01-01';
- to
- SELECT * FROM t1 WHERE CONVERT(datetime_field USING cs) = '2010-01-01';
-
- TODO: avoid conversion of any values with
- repertoire ASCII and 7bit-ASCII-compatible,
- not only numeric/datetime origin.
- */
- if ((*arg)->collation.derivation == DERIVATION_NUMERIC &&
- (*arg)->collation.repertoire == MY_REPERTOIRE_ASCII &&
- !((*arg)->collation.collation->state & MY_CS_NONASCII) &&
- !(coll.collation->state & MY_CS_NONASCII))
+ Item* conv= (*arg)->safe_charset_converter(coll.collation);
+ if (conv == *arg)
continue;
-
- if (!(conv= (*arg)->safe_charset_converter(coll.collation)) &&
- ((*arg)->collation.repertoire == MY_REPERTOIRE_ASCII))
+ if (!conv && ((*arg)->collation.repertoire == MY_REPERTOIRE_ASCII))
conv= new Item_func_conv_charset(*arg, coll.collation, 1);
if (!conv)
@@ -3014,7 +2935,7 @@ String *Item_float::val_str(String *str)
{
// following assert is redundant, because fixed=1 assigned in constructor
DBUG_ASSERT(fixed == 1);
- str->set_real(value,decimals,&my_charset_bin);
+ str->set_real(value, decimals, &my_charset_numeric);
return str;
}
@@ -3173,10 +3094,6 @@ my_decimal *Item_string::val_decimal(my_decimal *decimal_value)
}
-bool Item_null::eq(const Item *item, bool binary_cmp) const
-{ return item->type() == type(); }
-
-
double Item_null::val_real()
{
// following assert is redundant, because fixed=1 assigned in constructor
@@ -3245,8 +3162,6 @@ Item_param::Item_param(uint pos_in_query_arg) :
value is set.
*/
maybe_null= 1;
- cnvitem= new Item_string("", 0, &my_charset_bin, DERIVATION_COERCIBLE);
- cnvstr.set(cnvbuf, sizeof(cnvbuf), &my_charset_bin);
}
@@ -3806,18 +3721,14 @@ bool Item_param::convert_str_value(THD *thd)
str_value.set_charset(value.cs_info.final_character_set_of_str_value);
/* Here str_value is guaranteed to be in final_character_set_of_str_value */
- max_length= str_value.numchars() * str_value.charset()->mbmaxlen;
-
- /* For the strings converted to numeric form within some functions */
- decimals= NOT_FIXED_DEC;
/*
str_value_ptr is returned from val_str(). It must be not alloced
to prevent it's modification by val_str() invoker.
*/
str_value_ptr.set(str_value.ptr(), str_value.length(),
str_value.charset());
- /* Synchronize item charset with value charset */
- collation.set(str_value.charset(), DERIVATION_COERCIBLE);
+ /* Synchronize item charset and length with value charset */
+ fix_charset_and_length_from_str_value(DERIVATION_COERCIBLE);
}
return rc;
}
@@ -3847,7 +3758,8 @@ Item_param::clone_item()
case STRING_VALUE:
case LONG_DATA_VALUE:
return new Item_string(name, str_value.c_ptr_quick(), str_value.length(),
- str_value.charset());
+ str_value.charset(),
+ collation.derivation, collation.repertoire);
case TIME_VALUE:
break;
case NO_VALUE:
@@ -3859,30 +3771,21 @@ Item_param::clone_item()
bool
-Item_param::eq(const Item *arg, bool binary_cmp) const
+Item_param::eq(const Item *item, bool binary_cmp) const
{
- Item *item;
- if (!basic_const_item() || !arg->basic_const_item() || arg->type() != type())
+ if (!basic_const_item())
return FALSE;
- /*
- We need to cast off const to call val_int(). This should be OK for
- a basic constant.
- */
- item= (Item*) arg;
switch (state) {
case NULL_VALUE:
- return TRUE;
+ return null_eq(item);
case INT_VALUE:
- return value.integer == item->val_int() &&
- unsigned_flag == item->unsigned_flag;
+ return int_eq(value.integer, item);
case REAL_VALUE:
- return value.real == item->val_real();
+ return real_eq(value.real, item);
case STRING_VALUE:
case LONG_DATA_VALUE:
- if (binary_cmp)
- return !stringcmp(&str_value, &item->str_value);
- return !sortcmp(&str_value, &item->str_value, collation.collation);
+ return str_eq(&str_value, item, binary_cmp);
default:
break;
}
@@ -5380,13 +5283,6 @@ bool Item_field::vcol_in_partition_func_processor(uchar *int_arg)
}
-Item *Item_field::safe_charset_converter(CHARSET_INFO *tocs)
-{
- no_const_subst= 1;
- return Item::safe_charset_converter(tocs);
-}
-
-
void Item_field::cleanup()
{
DBUG_ENTER("Item_field::cleanup");
@@ -5692,10 +5588,7 @@ String *Item::check_well_formed_result(String *str, bool send_error)
{
/* Check whether we got a well-formed string */
CHARSET_INFO *cs= str->charset();
- int well_formed_error;
- uint wlen= cs->cset->well_formed_len(cs,
- str->ptr(), str->ptr() + str->length(),
- str->length(), &well_formed_error);
+ uint wlen= str->well_formed_length();
if (wlen < str->length())
{
THD *thd= current_thd;
@@ -6183,24 +6076,6 @@ int Item_decimal::save_in_field(Field *field, bool no_conversions)
}
-bool Item_int::eq(const Item *arg, bool binary_cmp) const
-{
- /* No need to check for null value as basic constant can't be NULL */
- if (arg->basic_const_item() && arg->type() == type())
- {
- /*
- We need to cast off const to call val_int(). This should be OK for
- a basic constant.
- */
- Item *item= (Item*) arg;
- return (item->val_int() == value &&
- ((longlong) value >= 0 ||
- (item->unsigned_flag == unsigned_flag)));
- }
- return FALSE;
-}
-
-
Item *Item_int_with_ref::clone_item()
{
DBUG_ASSERT(ref->const_item());
@@ -6318,27 +6193,6 @@ void Item_float::print(String *str, enum_query_type query_type)
}
-/*
- hex item
- In string context this is a binary string.
- In number context this is a longlong value.
-*/
-
-bool Item_float::eq(const Item *arg, bool binary_cmp) const
-{
- if (arg->basic_const_item() && arg->type() == type())
- {
- /*
- We need to cast off const to call val_int(). This should be OK for
- a basic constant.
- */
- Item *item= (Item*) arg;
- return item->val_real() == value;
- }
- return FALSE;
-}
-
-
inline uint char_val(char X)
{
return (uint) (X >= '0' && X <= '9' ? X-'0' :
@@ -6394,8 +6248,6 @@ int Item_hex_hybrid::save_in_field(Field *field, bool no_conversions)
ulonglong nr;
uint32 length= str_value.length();
- if (!length)
- return 1;
if (length > 8)
{
@@ -6435,32 +6287,6 @@ void Item_hex_string::print(String *str, enum_query_type query_type)
}
-bool Item_hex_constant::eq(const Item *arg, bool binary_cmp) const
-{
- if (arg->basic_const_item() && arg->type() == type() &&
- arg->cast_to_int_type() == cast_to_int_type())
- {
- if (binary_cmp)
- return !stringcmp(&str_value, &arg->str_value);
- return !sortcmp(&str_value, &arg->str_value, collation.collation);
- }
- return FALSE;
-}
-
-
-Item *Item_hex_constant::safe_charset_converter(CHARSET_INFO *tocs)
-{
- Item_string *conv;
- String tmp, *str= val_str(&tmp);
-
- if (!(conv= new Item_string(str->ptr(), str->length(), tocs)))
- return NULL;
- conv->str_value.copy();
- conv->str_value.mark_as_const();
- return conv;
-}
-
-
/*
bin item.
In string context this is a binary string.
diff --git a/sql/item.h b/sql/item.h
index 59c30737a10..ff0c786ab94 100644
--- a/sql/item.h
+++ b/sql/item.h
@@ -679,11 +679,20 @@ public:
/* Reuse size, only used by SP local variable assignment, otherwize 0 */
uint rsize;
+protected:
/*
str_values's main purpose is to be used to cache the value in
save_in_field
*/
String str_value;
+
+public:
+ /*
+ Cache val_str() into the own buffer, e.g. to evaluate constant
+ expressions with subqueries in the ORDER/GROUP clauses.
+ */
+ String *val_str() { return val_str(&str_value); }
+
char * name; /* Name from select */
/* Original item name (if it was renamed)*/
char * orig_name;
@@ -1099,9 +1108,47 @@ public:
virtual cond_result eq_cmp_result() const { return COND_OK; }
inline uint float_length(uint decimals_par) const
{ return decimals != NOT_FIXED_DEC ? (DBL_DIG+2+decimals_par) : DBL_DIG+8;}
+ /* Returns total number of decimal digits */
virtual uint decimal_precision() const;
+ /* Returns the number of integer part digits only */
inline int decimal_int_part() const
{ return my_decimal_int_part(decimal_precision(), decimals); }
+ /*
+ Returns the number of fractional digits only.
+ NOT_FIXED_DEC is replaced to the maximum possible number
+ of fractional digits, taking into account the data type.
+ */
+ uint decimal_scale() const
+ {
+ return decimals < NOT_FIXED_DEC ? decimals :
+ is_temporal_type_with_time(field_type()) ?
+ TIME_SECOND_PART_DIGITS :
+ MY_MIN(max_length, DECIMAL_MAX_SCALE);
+ }
+ /*
+ Returns how many digits a divisor adds into a division result.
+ This is important when the integer part of the divisor can be 0.
+ In this example:
+ SELECT 1 / 0.000001; -> 1000000.0000
+ the divisor adds 5 digits into the result precision.
+
+ Currently this method only replaces NOT_FIXED_DEC to
+ TIME_SECOND_PART_DIGITS for temporal data types.
+ This method can be made virtual, to create more efficient (smaller)
+ data types for division results.
+ For example, in
+ SELECT 1/1.000001;
+ the divisor could provide no additional precision into the result,
+ so could any other items that are know to return a result
+ with non-zero integer part.
+ */
+ uint divisor_precision_increment() const
+ {
+ return decimals < NOT_FIXED_DEC ? decimals :
+ is_temporal_type_with_time(field_type()) ?
+ TIME_SECOND_PART_DIGITS :
+ decimals;
+ }
/**
TIME or DATETIME precision of the item: 0..6
*/
@@ -1258,7 +1305,6 @@ public:
virtual bool intro_version(uchar *int_arg) { return 0; }
virtual bool remove_dependence_processor(uchar * arg) { return 0; }
- virtual bool remove_fixed(uchar * arg) { fixed= 0; return 0; }
virtual bool cleanup_processor(uchar *arg);
virtual bool collect_item_field_processor(uchar * arg) { return 0; }
virtual bool add_field_to_set_processor(uchar * arg) { return 0; }
@@ -1490,6 +1536,48 @@ public:
virtual Item *expr_cache_insert_transformer(uchar *thd_arg) { return this; }
virtual bool expr_cache_is_needed(THD *) { return FALSE; }
virtual Item *safe_charset_converter(CHARSET_INFO *tocs);
+ bool needs_charset_converter(uint32 length, CHARSET_INFO *tocs)
+ {
+ /*
+ This will return "true" if conversion happens:
+ - between two non-binary different character sets
+ - from "binary" to "unsafe" character set
+ (those that can have non-well-formed string)
+ - from "binary" to UCS2-alike character set with mbminlen>1,
+ when prefix left-padding is needed for an incomplete character:
+ binary 0xFF -> ucs2 0x00FF)
+ */
+ if (!String::needs_conversion_on_storage(length,
+ collation.collation, tocs))
+ return false;
+ /*
+ No needs to add converter if an "arg" is NUMERIC or DATETIME
+ value (which is pure ASCII) and at the same time target DTCollation
+ is ASCII-compatible. For example, no needs to rewrite:
+ SELECT * FROM t1 WHERE datetime_field = '2010-01-01';
+ to
+ SELECT * FROM t1 WHERE CONVERT(datetime_field USING cs) = '2010-01-01';
+
+ TODO: avoid conversion of any values with
+ repertoire ASCII and 7bit-ASCII-compatible,
+ not only numeric/datetime origin.
+ */
+ if (collation.derivation == DERIVATION_NUMERIC &&
+ collation.repertoire == MY_REPERTOIRE_ASCII &&
+ !(collation.collation->state & MY_CS_NONASCII) &&
+ !(tocs->state & MY_CS_NONASCII))
+ return false;
+ return true;
+ }
+ bool needs_charset_converter(CHARSET_INFO *tocs)
+ {
+ // Pass 1 as length to force conversion if tocs->mbminlen>1.
+ return needs_charset_converter(1, tocs);
+ }
+ Item *const_charset_converter(CHARSET_INFO *tocs, bool lossless,
+ const char *func_name);
+ Item *const_charset_converter(CHARSET_INFO *tocs, bool lossless)
+ { return const_charset_converter(tocs, lossless, NULL); }
void delete_self()
{
cleanup();
@@ -1649,12 +1737,102 @@ public:
};
class sp_head;
+class Item_string;
-class Item_basic_constant :public Item
+
+/**
+ A common class for Item_basic_constant and Item_param
+*/
+class Item_basic_value :public Item
+{
+ bool is_basic_value(const Item *item, Type type_arg) const
+ {
+ return item->basic_const_item() && item->type() == type_arg;
+ }
+ bool is_basic_value(Type type_arg) const
+ {
+ return basic_const_item() && type() == type_arg;
+ }
+ bool str_eq(const String *value,
+ const String *other, CHARSET_INFO *cs, bool binary_cmp) const
+ {
+ return binary_cmp ?
+ value->bin_eq(other) :
+ collation.collation == cs && value->eq(other, collation.collation);
+ }
+
+protected:
+ // Value metadata, e.g. to make string processing easier
+ class Metadata: private MY_STRING_METADATA
+ {
+ public:
+ Metadata(const String *str)
+ {
+ my_string_metadata_get(this, str->charset(), str->ptr(), str->length());
+ }
+ Metadata(const String *str, uint repertoire)
+ {
+ MY_STRING_METADATA::repertoire= repertoire;
+ MY_STRING_METADATA::char_length= str->numchars();
+ }
+ uint repertoire() const { return MY_STRING_METADATA::repertoire; }
+ size_t char_length() const { return MY_STRING_METADATA::char_length; }
+ };
+ void fix_charset_and_length_from_str_value(Derivation dv, Metadata metadata)
+ {
+ /*
+ We have to have a different max_length than 'length' here to
+ ensure that we get the right length if we do use the item
+ to create a new table. In this case max_length must be the maximum
+ number of chars for a string of this type because we in Create_field::
+ divide the max_length with mbmaxlen).
+ */
+ collation.set(str_value.charset(), dv, metadata.repertoire());
+ fix_char_length(metadata.char_length());
+ decimals= NOT_FIXED_DEC;
+ }
+ void fix_charset_and_length_from_str_value(Derivation dv)
+ {
+ fix_charset_and_length_from_str_value(dv, Metadata(&str_value));
+ }
+ Item_basic_value(): Item() {}
+ /*
+ In the xxx_eq() methods below we need to cast off "const" to
+ call val_xxx(). This is OK for Item_basic_constant and Item_param.
+ */
+ bool null_eq(const Item *item) const
+ {
+ DBUG_ASSERT(is_basic_value(NULL_ITEM));
+ return item->type() == NULL_ITEM;
+ }
+ bool str_eq(const String *value, const Item *item, bool binary_cmp) const
+ {
+ DBUG_ASSERT(is_basic_value(STRING_ITEM));
+ return is_basic_value(item, STRING_ITEM) &&
+ str_eq(value, ((Item_basic_value*)item)->val_str(NULL),
+ item->collation.collation, binary_cmp);
+ }
+ bool real_eq(double value, const Item *item) const
+ {
+ DBUG_ASSERT(is_basic_value(REAL_ITEM));
+ return is_basic_value(item, REAL_ITEM) &&
+ value == ((Item_basic_value*)item)->val_real();
+ }
+ bool int_eq(longlong value, const Item *item) const
+ {
+ DBUG_ASSERT(is_basic_value(INT_ITEM));
+ return is_basic_value(item, INT_ITEM) &&
+ value == ((Item_basic_value*)item)->val_int() &&
+ (value >= 0 || item->unsigned_flag == unsigned_flag);
+ }
+};
+
+
+class Item_basic_constant :public Item_basic_value
{
table_map used_table_map;
public:
- Item_basic_constant(): Item(), used_table_map(0) {};
+ Item_basic_constant(): Item_basic_value(), used_table_map(0) {};
void set_used_tables(table_map map) { used_table_map= map; }
table_map used_tables() const { return used_table_map; }
/* to prevent drop fixed flag (no need parent cleanup call) */
@@ -2195,7 +2373,6 @@ public:
Item *replace_equal_field(uchar *arg);
inline uint32 max_disp_length() { return field->max_display_length(); }
Item_field *field_for_view_update() { return this; }
- Item *safe_charset_converter(CHARSET_INFO *tocs);
int fix_outer_field(THD *thd, Field **field, Item **reference);
virtual Item *update_value_transformer(uchar *select_arg);
virtual void print(String *str, enum_query_type query_type);
@@ -2219,16 +2396,16 @@ public:
class Item_null :public Item_basic_constant
{
public:
- Item_null(char *name_par=0)
+ Item_null(char *name_par=0, CHARSET_INFO *cs= &my_charset_bin)
{
maybe_null= null_value= TRUE;
max_length= 0;
name= name_par ? name_par : (char*) "NULL";
fixed= 1;
- collation.set(&my_charset_bin, DERIVATION_IGNORABLE);
+ collation.set(cs, DERIVATION_IGNORABLE);
}
enum Type type() const { return NULL_ITEM; }
- bool eq(const Item *item, bool binary_cmp) const;
+ bool eq(const Item *item, bool binary_cmp) const { return null_eq(item); }
double val_real();
longlong val_int();
String *val_str(String *str);
@@ -2271,14 +2448,10 @@ public:
/* Item represents one placeholder ('?') of prepared statement */
-class Item_param :public Item,
+class Item_param :public Item_basic_value,
private Settable_routine_parameter,
public Rewritable_query_parameter
{
- char cnvbuf[MAX_FIELD_WIDTH];
- String cnvstr;
- Item *cnvitem;
-
public:
enum enum_item_param_state
{
@@ -2457,7 +2630,8 @@ public:
Item_num *neg() { value= -value; return this; }
uint decimal_precision() const
{ return (uint) (max_length - MY_TEST(value < 0)); }
- bool eq(const Item *, bool binary_cmp) const;
+ bool eq(const Item *item, bool binary_cmp) const
+ { return int_eq(value, item); }
bool check_partition_func_processor(uchar *bool_arg) { return FALSE;}
bool check_vcol_func_processor(uchar *arg) { return FALSE;}
};
@@ -2578,7 +2752,8 @@ public:
{ return new Item_float(name, value, decimals, max_length); }
Item_num *neg() { value= -value; return this; }
virtual void print(String *str, enum_query_type query_type);
- bool eq(const Item *, bool binary_cmp) const;
+ bool eq(const Item *item, bool binary_cmp) const
+ { return real_eq(value, item); }
};
@@ -2596,70 +2771,98 @@ public:
str->append(func_name);
}
- Item *safe_charset_converter(CHARSET_INFO *tocs);
+ Item *safe_charset_converter(CHARSET_INFO *tocs)
+ {
+ return const_charset_converter(tocs, true, func_name);
+ }
};
class Item_string :public Item_basic_constant
{
-public:
- Item_string(const char *str,uint length,
- CHARSET_INFO *cs, Derivation dv= DERIVATION_COERCIBLE,
- uint repertoire= MY_REPERTOIRE_UNICODE30)
- : m_cs_specified(FALSE)
+ bool m_cs_specified;
+protected:
+ /**
+ Set the value of m_cs_specified attribute.
+
+ m_cs_specified attribute shows whether character-set-introducer was
+ explicitly specified in the original query for this text literal or
+ not. The attribute makes sense (is used) only for views.
+
+ This operation is to be called from the parser during parsing an input
+ query.
+ */
+ inline void set_cs_specified(bool cs_specified)
{
- str_value.set_or_copy_aligned(str, length, cs);
- collation.set(cs, dv, repertoire);
- /*
- We have to have a different max_length than 'length' here to
- ensure that we get the right length if we do use the item
- to create a new table. In this case max_length must be the maximum
- number of chars for a string of this type because we in Create_field::
- divide the max_length with mbmaxlen).
- */
- max_length= str_value.numchars()*cs->mbmaxlen;
- set_name(str, length, cs);
- decimals=NOT_FIXED_DEC;
+ m_cs_specified= cs_specified;
+ }
+ void fix_from_value(Derivation dv, const Metadata metadata)
+ {
+ fix_charset_and_length_from_str_value(dv, metadata);
// it is constant => can be used without fix_fields (and frequently used)
fixed= 1;
}
+ void fix_and_set_name_from_value(Derivation dv, const Metadata metadata)
+ {
+ fix_from_value(dv, metadata);
+ set_name(str_value.ptr(), str_value.length(), str_value.charset());
+ }
+protected:
/* Just create an item and do not fill string representation */
Item_string(CHARSET_INFO *cs, Derivation dv= DERIVATION_COERCIBLE)
: m_cs_specified(FALSE)
{
collation.set(cs, dv);
max_length= 0;
- set_name(NULL, 0, cs);
+ set_name(NULL, 0, system_charset_info);
decimals= NOT_FIXED_DEC;
fixed= 1;
}
- Item_string(const char *name_par, const char *str, uint length,
- CHARSET_INFO *cs, Derivation dv= DERIVATION_COERCIBLE,
- uint repertoire= MY_REPERTOIRE_UNICODE30)
+public:
+ // Constructors with the item name set from its value
+ Item_string(const char *str, uint length, CHARSET_INFO *cs,
+ Derivation dv, uint repertoire)
: m_cs_specified(FALSE)
{
str_value.set_or_copy_aligned(str, length, cs);
- collation.set(cs, dv, repertoire);
- max_length= str_value.numchars()*cs->mbmaxlen;
- set_name(name_par, 0, cs);
- decimals=NOT_FIXED_DEC;
- // it is constant => can be used without fix_fields (and frequently used)
- fixed= 1;
+ fix_and_set_name_from_value(dv, Metadata(&str_value, repertoire));
}
- /*
- This is used in stored procedures to avoid memory leaks and
- does a deep copy of its argument.
- */
- void set_str_with_copy(const char *str_arg, uint length_arg)
+ Item_string(const char *str, uint length,
+ CHARSET_INFO *cs, Derivation dv= DERIVATION_COERCIBLE)
+ : m_cs_specified(FALSE)
{
- str_value.copy(str_arg, length_arg, collation.collation);
- max_length= str_value.numchars() * collation.collation->mbmaxlen;
+ str_value.set_or_copy_aligned(str, length, cs);
+ fix_and_set_name_from_value(dv, Metadata(&str_value));
}
- void set_repertoire_from_value()
+ Item_string(const String *str, CHARSET_INFO *tocs, uint *conv_errors,
+ Derivation dv, uint repertoire)
+ :m_cs_specified(false)
{
- collation.repertoire= my_string_repertoire(str_value.charset(),
- str_value.ptr(),
- str_value.length());
+ if (str_value.copy(str, tocs, conv_errors))
+ str_value.set("", 0, tocs); // EOM ?
+ str_value.mark_as_const();
+ fix_and_set_name_from_value(dv, Metadata(&str_value, repertoire));
+ }
+ // Constructors with an externally provided item name
+ Item_string(const char *name_par, const char *str, uint length,
+ CHARSET_INFO *cs, Derivation dv= DERIVATION_COERCIBLE)
+ :m_cs_specified(false)
+ {
+ str_value.set_or_copy_aligned(str, length, cs);
+ fix_from_value(dv, Metadata(&str_value));
+ set_name(name_par, 0, system_charset_info);
+ }
+ Item_string(const char *name_par, const char *str, uint length,
+ CHARSET_INFO *cs, Derivation dv, uint repertoire)
+ :m_cs_specified(false)
+ {
+ str_value.set_or_copy_aligned(str, length, cs);
+ fix_from_value(dv, Metadata(&str_value, repertoire));
+ set_name(name_par, 0, system_charset_info);
+ }
+ void print_value(String *to) const
+ {
+ str_value.print(to);
}
enum Type type() const { return STRING_ITEM; }
double val_real();
@@ -2674,14 +2877,19 @@ public:
enum Item_result result_type () const { return STRING_RESULT; }
enum_field_types field_type() const { return MYSQL_TYPE_VARCHAR; }
bool basic_const_item() const { return 1; }
- bool eq(const Item *item, bool binary_cmp) const;
+ bool eq(const Item *item, bool binary_cmp) const
+ {
+ return str_eq(&str_value, item, binary_cmp);
+ }
Item *clone_item()
{
return new Item_string(name, str_value.ptr(),
- str_value.length(), collation.collation);
+ str_value.length(), collation.collation);
+ }
+ Item *safe_charset_converter(CHARSET_INFO *tocs)
+ {
+ return const_charset_converter(tocs, true);
}
- Item *safe_charset_converter(CHARSET_INFO *tocs);
- Item *charset_converter(CHARSET_INFO *tocs, bool lossless);
inline void append(char *str, uint length)
{
str_value.append(str, length);
@@ -2715,23 +2923,79 @@ public:
return m_cs_specified;
}
- /**
- Set the value of m_cs_specified attribute.
+ String *check_well_formed_result(bool send_error)
+ { return Item::check_well_formed_result(&str_value, send_error); }
- m_cs_specified attribute shows whether character-set-introducer was
- explicitly specified in the original query for this text literal or
- not. The attribute makes sense (is used) only for views.
+ enum_field_types odbc_temporal_literal_type(const LEX_STRING *type_str) const
+ {
+ /*
+ If string is a reasonably short pure ASCII string literal,
+ try to parse known ODBC style date, time or timestamp literals,
+ e.g:
+ SELECT {d'2001-01-01'};
+ SELECT {t'10:20:30'};
+ SELECT {ts'2001-01-01 10:20:30'};
+ */
+ if (collation.repertoire == MY_REPERTOIRE_ASCII &&
+ str_value.length() < MAX_DATE_STRING_REP_LENGTH * 4)
+ {
+ if (type_str->length == 1)
+ {
+ if (type_str->str[0] == 'd') /* {d'2001-01-01'} */
+ return MYSQL_TYPE_DATE;
+ else if (type_str->str[0] == 't') /* {t'10:20:30'} */
+ return MYSQL_TYPE_TIME;
+ }
+ else if (type_str->length == 2) /* {ts'2001-01-01 10:20:30'} */
+ {
+ if (type_str->str[0] == 't' && type_str->str[1] == 's')
+ return MYSQL_TYPE_DATETIME;
+ }
+ }
+ return MYSQL_TYPE_STRING; // Not a temporal literal
+ }
+};
- This operation is to be called from the parser during parsing an input
- query.
- */
- inline void set_cs_specified(bool cs_specified)
+
+class Item_string_with_introducer :public Item_string
+{
+public:
+ Item_string_with_introducer(const char *str, uint length, CHARSET_INFO *cs)
+ :Item_string(str, length, cs)
{
- m_cs_specified= cs_specified;
+ set_cs_specified(true);
}
+ Item_string_with_introducer(const String *str, CHARSET_INFO *tocs)
+ :Item_string(str->ptr(), str->length(), tocs)
+ {
+ set_cs_specified(true);
+ }
+};
-private:
- bool m_cs_specified;
+
+class Item_string_sys :public Item_string
+{
+public:
+ Item_string_sys(const char *str, uint length)
+ :Item_string(str, length, system_charset_info)
+ { }
+ Item_string_sys(const char *str)
+ :Item_string(str, strlen(str), system_charset_info)
+ { }
+};
+
+
+class Item_string_ascii :public Item_string
+{
+public:
+ Item_string_ascii(const char *str, uint length)
+ :Item_string(str, length, &my_charset_latin1,
+ DERIVATION_COERCIBLE, MY_REPERTOIRE_ASCII)
+ { }
+ Item_string_ascii(const char *str)
+ :Item_string(str, strlen(str), &my_charset_latin1,
+ DERIVATION_COERCIBLE, MY_REPERTOIRE_ASCII)
+ { }
};
@@ -2751,7 +3015,17 @@ public:
Derivation dv= DERIVATION_COERCIBLE)
:Item_string(NullS, str, length, cs, dv), func_name(name_par)
{}
- Item *safe_charset_converter(CHARSET_INFO *tocs);
+ Item_static_string_func(const char *name_par,
+ const String *str,
+ CHARSET_INFO *tocs, uint *conv_errors,
+ Derivation dv, uint repertoire)
+ :Item_string(str, tocs, conv_errors, dv, repertoire),
+ func_name(name_par)
+ {}
+ Item *safe_charset_converter(CHARSET_INFO *tocs)
+ {
+ return const_charset_converter(tocs, true, func_name);
+ }
virtual inline void print(String *str, enum_query_type query_type)
{
@@ -2854,11 +3128,19 @@ public:
enum Type type() const { return VARBIN_ITEM; }
enum Item_result result_type () const { return STRING_RESULT; }
enum_field_types field_type() const { return MYSQL_TYPE_VARCHAR; }
- virtual Item *safe_charset_converter(CHARSET_INFO *tocs);
+ virtual Item *safe_charset_converter(CHARSET_INFO *tocs)
+ {
+ return const_charset_converter(tocs, true);
+ }
bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
bool check_vcol_func_processor(uchar *arg) { return FALSE;}
bool basic_const_item() const { return 1; }
- bool eq(const Item *item, bool binary_cmp) const;
+ bool eq(const Item *item, bool binary_cmp) const
+ {
+ return item->basic_const_item() && item->type() == type() &&
+ item->cast_to_int_type() == cast_to_int_type() &&
+ str_value.bin_eq(&((Item_hex_constant*)item)->str_value);
+ }
String *val_str(String*) { DBUG_ASSERT(fixed == 1); return &str_value; }
};
@@ -3654,7 +3936,7 @@ public:
{
ref= &outer_ref;
set_properties();
- fixed= 0;
+ fixed= 0; /* reset flag set in set_properties() */
}
Item_outer_ref(Name_resolution_context *context_arg, Item **item,
const char *table_name_arg, const char *field_name_arg,
diff --git a/sql/item_cmpfunc.h b/sql/item_cmpfunc.h
index cd53ee731f7..f09cb76b75c 100644
--- a/sql/item_cmpfunc.h
+++ b/sql/item_cmpfunc.h
@@ -883,6 +883,18 @@ class in_string :public in_vector
{
char buff[STRING_BUFFER_USUAL_SIZE];
String tmp;
+ class Item_string_for_in_vector: public Item_string
+ {
+ public:
+ Item_string_for_in_vector(CHARSET_INFO *cs):
+ Item_string(cs)
+ { }
+ void set_value(const String *str)
+ {
+ str_value= *str;
+ collation.set(str->charset());
+ }
+ };
public:
in_string(uint elements,qsort2_cmp cmp_func, CHARSET_INFO *cs);
~in_string();
@@ -890,13 +902,13 @@ public:
uchar *get_value(Item *item);
Item* create_item()
{
- return new Item_string(collation);
+ return new Item_string_for_in_vector(collation);
}
void value_to_item(uint pos, Item *item)
{
String *str=((String*) base)+pos;
- Item_string *to= (Item_string*)item;
- to->str_value= *str;
+ Item_string_for_in_vector *to= (Item_string_for_in_vector*) item;
+ to->set_value(str);
}
Item_result result_type() { return STRING_RESULT; }
};
diff --git a/sql/item_create.cc b/sql/item_create.cc
index 193c7deb207..fa8249c3321 100644
--- a/sql/item_create.cc
+++ b/sql/item_create.cc
@@ -1264,6 +1264,21 @@ protected:
};
+#if defined(HAVE_SPATIAL) && !defined(DBUG_OFF)
+class Create_func_gis_debug : public Create_func_arg1
+{
+ public:
+ virtual Item *create_1_arg(THD *thd, Item *arg1);
+
+ static Create_func_gis_debug s_singleton;
+
+ protected:
+ Create_func_gis_debug() {}
+ virtual ~Create_func_gis_debug() {}
+};
+#endif
+
+
#ifdef HAVE_SPATIAL
class Create_func_glength : public Create_func_arg1
{
@@ -4159,6 +4174,17 @@ Create_func_get_lock::create_2_arg(THD *thd, Item *arg1, Item *arg2)
}
+#if defined(HAVE_SPATIAL) && !defined(DBUG_OFF)
+Create_func_gis_debug Create_func_gis_debug::s_singleton;
+
+Item*
+Create_func_gis_debug::create_1_arg(THD *thd, Item *arg1)
+{
+ return new (thd->mem_root) Item_func_gis_debug(arg1);
+}
+#endif
+
+
#ifdef HAVE_SPATIAL
Create_func_glength Create_func_glength::s_singleton;
@@ -5209,26 +5235,7 @@ Create_func_space Create_func_space::s_singleton;
Item*
Create_func_space::create_1_arg(THD *thd, Item *arg1)
{
- /**
- TODO: Fix Bug#23637
- The parsed item tree should not depend on
- <code>thd->variables.collation_connection</code>.
- */
- CHARSET_INFO *cs= thd->variables.collation_connection;
- Item *sp;
-
- if (cs->mbminlen > 1)
- {
- uint dummy_errors;
- sp= new (thd->mem_root) Item_string("", 0, cs, DERIVATION_COERCIBLE, MY_REPERTOIRE_ASCII);
- sp->str_value.copy(" ", 1, &my_charset_latin1, cs, &dummy_errors);
- }
- else
- {
- sp= new (thd->mem_root) Item_string(" ", 1, cs, DERIVATION_COERCIBLE, MY_REPERTOIRE_ASCII);
- }
-
- return new (thd->mem_root) Item_func_repeat(sp, arg1);
+ return new (thd->mem_root) Item_func_space(arg1);
}
@@ -5854,6 +5861,9 @@ static Native_func_registry func_array[] =
{ { C_STRING_WITH_LEN("ST_GEOMETRYTYPE") }, GEOM_BUILDER(Create_func_geometry_type)},
{ { C_STRING_WITH_LEN("ST_GEOMFROMTEXT") }, GEOM_BUILDER(Create_func_geometry_from_text)},
{ { C_STRING_WITH_LEN("ST_GEOMFROMWKB") }, GEOM_BUILDER(Create_func_geometry_from_wkb)},
+#ifndef DBUG_OFF
+ { { C_STRING_WITH_LEN("ST_GIS_DEBUG") }, GEOM_BUILDER(Create_func_gis_debug)},
+#endif
{ { C_STRING_WITH_LEN("ST_EQUALS") }, GEOM_BUILDER(Create_func_equals)},
{ { C_STRING_WITH_LEN("ST_INTERIORRINGN") }, GEOM_BUILDER(Create_func_interiorringn)},
{ { C_STRING_WITH_LEN("ST_INTERSECTS") }, GEOM_BUILDER(Create_func_intersects)},
diff --git a/sql/item_create.h b/sql/item_create.h
index 5f1a8c6006d..05fe48f656a 100644
--- a/sql/item_create.h
+++ b/sql/item_create.h
@@ -173,6 +173,15 @@ Item *create_temporal_literal(THD *thd,
CHARSET_INFO *cs,
enum_field_types type,
bool send_error);
+inline
+Item *create_temporal_literal(THD *thd, const String *str,
+ enum_field_types type,
+ bool send_error)
+{
+ return create_temporal_literal(thd,
+ str->ptr(), str->length(), str->charset(),
+ type, send_error);
+}
int item_create_init();
void item_create_cleanup();
diff --git a/sql/item_func.cc b/sql/item_func.cc
index 2b89aa04295..4ec0466bda8 100644
--- a/sql/item_func.cc
+++ b/sql/item_func.cc
@@ -593,7 +593,7 @@ my_decimal *Item_real_func::val_decimal(my_decimal *decimal_value)
}
-void Item_func::fix_num_length_and_dec()
+void Item_udf_func::fix_num_length_and_dec()
{
uint fl_length= 0;
decimals=0;
@@ -611,11 +611,6 @@ void Item_func::fix_num_length_and_dec()
}
-void Item_func_numhybrid::fix_num_length_and_dec()
-{}
-
-
-
/**
Count max_length and decimals for temporal functions.
@@ -803,9 +798,9 @@ bool Item_func_connection_id::fix_fields(THD *thd, Item **ref)
function of two arguments.
*/
-void Item_num_op::find_num_type(void)
+void Item_num_op::fix_length_and_dec(void)
{
- DBUG_ENTER("Item_num_op::find_num_type");
+ DBUG_ENTER("Item_num_op::fix_length_and_dec");
DBUG_PRINT("info", ("name %s", func_name()));
DBUG_ASSERT(arg_count == 2);
Item_result r0= args[0]->cast_to_int_type();
@@ -849,22 +844,26 @@ void Item_num_op::find_num_type(void)
type depends only on the first argument)
*/
-void Item_func_num1::find_num_type()
+void Item_func_num1::fix_length_and_dec()
{
- DBUG_ENTER("Item_func_num1::find_num_type");
+ DBUG_ENTER("Item_func_num1::fix_length_and_dec");
DBUG_PRINT("info", ("name %s", func_name()));
switch (cached_result_type= args[0]->cast_to_int_type()) {
case INT_RESULT:
+ max_length= args[0]->max_length;
unsigned_flag= args[0]->unsigned_flag;
break;
case STRING_RESULT:
case REAL_RESULT:
cached_result_type= REAL_RESULT;
+ decimals= args[0]->decimals; // Preserve NOT_FIXED_DEC
max_length= float_length(decimals);
break;
case TIME_RESULT:
cached_result_type= DECIMAL_RESULT;
case DECIMAL_RESULT:
+ decimals= args[0]->decimal_scale(); // Do not preserve NOT_FIXED_DEC
+ max_length= args[0]->max_length;
break;
case ROW_RESULT:
case IMPOSSIBLE_RESULT:
@@ -879,20 +878,6 @@ void Item_func_num1::find_num_type()
}
-void Item_func_num1::fix_num_length_and_dec()
-{
- decimals= args[0]->decimals;
- max_length= args[0]->max_length;
-}
-
-
-void Item_func_numhybrid::fix_length_and_dec()
-{
- fix_num_length_and_dec();
- find_num_type();
-}
-
-
String *Item_func_hybrid_result_type::val_str(String *str)
{
DBUG_ASSERT(fixed == 1);
@@ -1537,11 +1522,14 @@ my_decimal *Item_func_plus::decimal_op(my_decimal *decimal_value)
*/
void Item_func_additive_op::result_precision()
{
- decimals= MY_MAX(args[0]->decimals, args[1]->decimals);
- int arg1_int= args[0]->decimal_precision() - args[0]->decimals;
- int arg2_int= args[1]->decimal_precision() - args[1]->decimals;
+ decimals= MY_MAX(args[0]->decimal_scale(), args[1]->decimal_scale());
+ int arg1_int= args[0]->decimal_precision() - args[0]->decimal_scale();
+ int arg2_int= args[1]->decimal_precision() - args[1]->decimal_scale();
int precision= MY_MAX(arg1_int, arg2_int) + 1 + decimals;
+ DBUG_ASSERT(arg1_int >= 0);
+ DBUG_ASSERT(arg2_int >= 0);
+
/* Integer operations keep unsigned_flag if one of arguments is unsigned */
if (result_type() == INT_RESULT)
unsigned_flag= args[0]->unsigned_flag | args[1]->unsigned_flag;
@@ -1778,7 +1766,8 @@ void Item_func_mul::result_precision()
unsigned_flag= args[0]->unsigned_flag | args[1]->unsigned_flag;
else
unsigned_flag= args[0]->unsigned_flag & args[1]->unsigned_flag;
- decimals= MY_MIN(args[0]->decimals + args[1]->decimals, DECIMAL_MAX_SCALE);
+ decimals= MY_MIN(args[0]->decimal_scale() + args[1]->decimal_scale(),
+ DECIMAL_MAX_SCALE);
uint est_prec = args[0]->decimal_precision() + args[1]->decimal_precision();
uint precision= MY_MIN(est_prec, DECIMAL_MAX_PRECISION);
max_length= my_decimal_precision_to_length_no_truncation(precision, decimals,
@@ -1832,8 +1821,20 @@ my_decimal *Item_func_div::decimal_op(my_decimal *decimal_value)
void Item_func_div::result_precision()
{
+ /*
+ We need to add args[1]->divisor_precision_increment(),
+ to properly handle the cases like this:
+ SELECT 5.05 / 0.014; -> 360.714286
+ i.e. when the divisor has a zero integer part
+ and non-zero digits appear only after the decimal point.
+ Precision in this example is calculated as
+ args[0]->decimal_precision() + // 3
+ args[1]->divisor_precision_increment() + // 3
+ prec_increment // 4
+ which gives 10 decimals digits.
+ */
uint precision=MY_MIN(args[0]->decimal_precision() +
- args[1]->decimals + prec_increment,
+ args[1]->divisor_precision_increment() + prec_increment,
DECIMAL_MAX_PRECISION);
/* Integer operations keep unsigned_flag if one of arguments is unsigned */
@@ -1841,7 +1842,7 @@ void Item_func_div::result_precision()
unsigned_flag= args[0]->unsigned_flag | args[1]->unsigned_flag;
else
unsigned_flag= args[0]->unsigned_flag & args[1]->unsigned_flag;
- decimals= MY_MIN(args[0]->decimals + prec_increment, DECIMAL_MAX_SCALE);
+ decimals= MY_MIN(args[0]->decimal_scale() + prec_increment, DECIMAL_MAX_SCALE);
max_length= my_decimal_precision_to_length_no_truncation(precision, decimals,
unsigned_flag);
}
@@ -2047,7 +2048,7 @@ my_decimal *Item_func_mod::decimal_op(my_decimal *decimal_value)
void Item_func_mod::result_precision()
{
- decimals= MY_MAX(args[0]->decimals, args[1]->decimals);
+ decimals= MY_MAX(args[0]->decimal_scale(), args[1]->decimal_scale());
max_length= MY_MAX(args[0]->max_length, args[1]->max_length);
}
@@ -2103,18 +2104,12 @@ my_decimal *Item_func_neg::decimal_op(my_decimal *decimal_value)
}
-void Item_func_neg::fix_num_length_and_dec()
-{
- decimals= args[0]->decimals;
- /* 1 add because sign can appear */
- max_length= args[0]->max_length + 1;
-}
-
-
void Item_func_neg::fix_length_and_dec()
{
DBUG_ENTER("Item_func_neg::fix_length_and_dec");
Item_func_num1::fix_length_and_dec();
+ /* 1 add because sign can appear */
+ max_length= args[0]->max_length + 1;
/*
If this is in integer context keep the context as integer if possible
@@ -2421,8 +2416,12 @@ void Item_func_integer::fix_length_and_dec()
decimals=0;
}
-void Item_func_int_val::fix_num_length_and_dec()
+
+void Item_func_int_val::fix_length_and_dec()
{
+ DBUG_ENTER("Item_func_int_val::fix_length_and_dec");
+ DBUG_PRINT("info", ("name %s", func_name()));
+
ulonglong tmp_max_length= (ulonglong ) args[0]->max_length -
(args[0]->decimals ? args[0]->decimals + 1 : 0) + 2;
max_length= tmp_max_length > (ulonglong) 4294967295U ?
@@ -2430,13 +2429,7 @@ void Item_func_int_val::fix_num_length_and_dec()
uint tmp= float_length(decimals);
set_if_smaller(max_length,tmp);
decimals= 0;
-}
-
-void Item_func_int_val::find_num_type()
-{
- DBUG_ENTER("Item_func_int_val::find_num_type");
- DBUG_PRINT("info", ("name %s", func_name()));
switch (cached_result_type= args[0]->cast_to_int_type())
{
case STRING_RESULT:
@@ -2973,7 +2966,7 @@ bool Item_func_min_max::get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date)
{
ltime->time_type= MYSQL_TIMESTAMP_TIME;
ltime->hour+= (ltime->month * 32 + ltime->day) * 24;
- ltime->month= ltime->day= 0;
+ ltime->year= ltime->month= ltime->day= 0;
if (adjust_time_range_with_warn(ltime,
std::min<uint>(decimals, TIME_SECOND_PART_DIGITS)))
return (null_value= true);
@@ -3907,12 +3900,6 @@ String *Item_func_udf_decimal::val_str(String *str)
}
-void Item_func_udf_decimal::fix_length_and_dec()
-{
- fix_num_length_and_dec();
-}
-
-
/* Default max_length is max argument length */
void Item_func_udf_str::fix_length_and_dec()
@@ -3987,9 +3974,13 @@ longlong Item_master_pos_wait::val_int()
else
connection_name= thd->variables.default_master_connection;
- if (!(mi= master_info_index->get_master_info(&connection_name,
- Sql_condition::WARN_LEVEL_WARN)))
+ mysql_mutex_lock(&LOCK_active_mi);
+ mi= master_info_index->get_master_info(&connection_name,
+ Sql_condition::WARN_LEVEL_WARN);
+ mysql_mutex_unlock(&LOCK_active_mi);
+ if (!mi)
goto err;
+
if ((event_count = mi->rli.wait_for_pos(thd, log_name, pos, timeout)) == -2)
{
null_value = 1;
diff --git a/sql/item_func.h b/sql/item_func.h
index 1696898812d..18265f672dd 100644
--- a/sql/item_func.h
+++ b/sql/item_func.h
@@ -154,7 +154,6 @@ public:
virtual void print(String *str, enum_query_type query_type);
void print_op(String *str, enum_query_type query_type);
void print_args(String *str, uint from, enum_query_type query_type);
- virtual void fix_num_length_and_dec();
void count_only_length(Item **item, uint nitems);
void count_real_length();
void count_decimal_length();
@@ -541,9 +540,6 @@ public:
Item_func_numhybrid(List<Item> &list)
:Item_func_hybrid_result_type(list)
{ }
- void fix_length_and_dec();
- void fix_num_length_and_dec();
- virtual void find_num_type()= 0; /* To be called from fix_length_and_dec */
String *str_op(String *str) { DBUG_ASSERT(0); return 0; }
bool date_op(MYSQL_TIME *ltime, uint fuzzydate) { DBUG_ASSERT(0); return true; }
};
@@ -555,9 +551,7 @@ class Item_func_num1: public Item_func_numhybrid
public:
Item_func_num1(Item *a) :Item_func_numhybrid(a) {}
Item_func_num1(Item *a, Item *b) :Item_func_numhybrid(a, b) {}
-
- void fix_num_length_and_dec();
- void find_num_type();
+ void fix_length_and_dec();
};
@@ -573,7 +567,7 @@ class Item_num_op :public Item_func_numhybrid
print_op(str, query_type);
}
- void find_num_type();
+ void fix_length_and_dec();
};
@@ -795,7 +789,6 @@ public:
const char *func_name() const { return "-"; }
enum Functype functype() const { return NEG_FUNC; }
void fix_length_and_dec();
- void fix_num_length_and_dec();
uint decimal_precision() const { return args[0]->decimal_precision(); }
bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
bool check_vcol_func_processor(uchar *int_arg) { return FALSE;}
@@ -962,8 +955,7 @@ class Item_func_int_val :public Item_func_num1
{
public:
Item_func_int_val(Item *a) :Item_func_num1(a) {}
- void fix_num_length_and_dec();
- void find_num_type();
+ void fix_length_and_dec();
};
@@ -1376,6 +1368,7 @@ public:
fixed= 1;
return res;
}
+ void fix_num_length_and_dec();
void update_used_tables()
{
/*
@@ -1489,7 +1482,7 @@ public:
my_decimal *val_decimal(my_decimal *);
String *val_str(String *str);
enum Item_result result_type () const { return DECIMAL_RESULT; }
- void fix_length_and_dec();
+ void fix_length_and_dec() { fix_num_length_and_dec(); }
};
diff --git a/sql/item_geofunc.cc b/sql/item_geofunc.cc
index 1deda83907c..d9200b3e8d3 100644
--- a/sql/item_geofunc.cc
+++ b/sql/item_geofunc.cc
@@ -1739,4 +1739,12 @@ mem_error:
}
+#ifndef DBUG_OFF
+longlong Item_func_gis_debug::val_int()
+{
+ /* For now this is just a stub. TODO: implement the internal GIS debuggign */
+ return 0;
+}
+#endif
+
#endif /*HAVE_SPATIAL*/
diff --git a/sql/item_geofunc.h b/sql/item_geofunc.h
index 2d715dc8765..6d52661e5c9 100644
--- a/sql/item_geofunc.h
+++ b/sql/item_geofunc.h
@@ -496,6 +496,18 @@ public:
const char *func_name() const { return "st_distance"; }
};
+
+#ifndef DBUG_OFF
+class Item_func_gis_debug: public Item_int_func
+{
+ public:
+ Item_func_gis_debug(Item *a) :Item_int_func(a) { null_value= false; }
+ const char *func_name() const { return "st_gis_debug"; }
+ longlong val_int();
+};
+#endif
+
+
#define GEOM_NEW(thd, obj_constructor) new (thd->mem_root) obj_constructor
#else /*HAVE_SPATIAL*/
diff --git a/sql/item_strfunc.cc b/sql/item_strfunc.cc
index fa6ba706718..9a3c7589db2 100644
--- a/sql/item_strfunc.cc
+++ b/sql/item_strfunc.cc
@@ -179,16 +179,27 @@ String *Item_func_md5::val_str_ascii(String *str)
}
+/*
+ The MD5()/SHA() functions treat their parameter as being a case sensitive.
+ Thus we set binary collation on it so different instances of MD5() will be
+ compared properly.
+*/
+static CHARSET_INFO *get_checksum_charset(const char *csname)
+{
+ CHARSET_INFO *cs= get_charset_by_csname(csname, MY_CS_BINSORT, MYF(0));
+ if (!cs)
+ {
+ // Charset has no binary collation: use my_charset_bin.
+ cs= &my_charset_bin;
+ }
+ return cs;
+}
+
+
void Item_func_md5::fix_length_and_dec()
{
- /*
- The MD5() function treats its parameter as being a case sensitive. Thus
- we set binary collation on it so different instances of MD5() will be
- compared properly.
- */
- args[0]->collation.set(
- get_charset_by_csname(args[0]->collation.collation->csname,
- MY_CS_BINSORT,MYF(0)), DERIVATION_COERCIBLE);
+ CHARSET_INFO *cs= get_checksum_charset(args[0]->collation.collation->csname);
+ args[0]->collation.set(cs, DERIVATION_COERCIBLE);
fix_length_and_charset(32, default_charset());
}
@@ -218,14 +229,8 @@ String *Item_func_sha::val_str_ascii(String *str)
void Item_func_sha::fix_length_and_dec()
{
- /*
- The SHA() function treats its parameter as being a case sensitive. Thus
- we set binary collation on it so different instances of MD5() will be
- compared properly.
- */
- args[0]->collation.set(
- get_charset_by_csname(args[0]->collation.collation->csname,
- MY_CS_BINSORT,MYF(0)), DERIVATION_COERCIBLE);
+ CHARSET_INFO *cs= get_checksum_charset(args[0]->collation.collation->csname);
+ args[0]->collation.set(cs, DERIVATION_COERCIBLE);
// size of hex representation of hash
fix_length_and_charset(SHA1_HASH_SIZE * 2, default_charset());
}
@@ -348,18 +353,9 @@ void Item_func_sha2::fix_length_and_dec()
ER(ER_WRONG_PARAMETERS_TO_NATIVE_FCT), "sha2");
}
- /*
- The SHA2() function treats its parameter as being a case sensitive.
- Thus we set binary collation on it so different instances of SHA2()
- will be compared properly.
- */
+ CHARSET_INFO *cs= get_checksum_charset(args[0]->collation.collation->csname);
+ args[0]->collation.set(cs, DERIVATION_COERCIBLE);
- args[0]->collation.set(
- get_charset_by_csname(
- args[0]->collation.collation->csname,
- MY_CS_BINSORT,
- MYF(0)),
- DERIVATION_COERCIBLE);
#else
push_warning_printf(current_thd,
Sql_condition::WARN_LEVEL_WARN,
@@ -513,39 +509,42 @@ void Item_func_from_base64::fix_length_and_dec()
String *Item_func_from_base64::val_str(String *str)
{
String *res= args[0]->val_str_ascii(str);
- bool too_long= false;
int length;
const char *end_ptr;
- if (!res ||
- res->length() > (uint) base64_decode_max_arg_length() ||
- (too_long=
- ((uint) (length= base64_needed_decoded_length((int) res->length())) >
- current_thd->variables.max_allowed_packet)) ||
- tmp_value.alloc((uint) length) ||
- (length= base64_decode(res->ptr(), (int) res->length(),
+ if (!res)
+ goto err;
+
+ if (res->length() > (uint) base64_decode_max_arg_length() ||
+ ((uint) (length= base64_needed_decoded_length((int) res->length())) >
+ current_thd->variables.max_allowed_packet))
+ {
+ push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
+ ER_WARN_ALLOWED_PACKET_OVERFLOWED,
+ ER(ER_WARN_ALLOWED_PACKET_OVERFLOWED), func_name(),
+ current_thd->variables.max_allowed_packet);
+ goto err;
+ }
+
+ if (tmp_value.alloc((uint) length))
+ goto err;
+
+ if ((length= base64_decode(res->ptr(), (int) res->length(),
(char *) tmp_value.ptr(), &end_ptr, 0)) < 0 ||
end_ptr < res->ptr() + res->length())
{
- null_value= 1; // NULL input, too long input, OOM, or badly formed input
- if (too_long)
- {
- push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
- ER_WARN_ALLOWED_PACKET_OVERFLOWED,
- ER(ER_WARN_ALLOWED_PACKET_OVERFLOWED), func_name(),
- current_thd->variables.max_allowed_packet);
- }
- else if (res && length < 0)
- {
- push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
- ER_BAD_BASE64_DATA, ER(ER_BAD_BASE64_DATA),
- end_ptr - res->ptr());
- }
- return 0;
+ push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
+ ER_BAD_BASE64_DATA, ER(ER_BAD_BASE64_DATA),
+ end_ptr - res->ptr());
+ goto err;
}
+
tmp_value.length((uint) length);
null_value= 0;
return &tmp_value;
+err:
+ null_value= 1; // NULL input, too long input, OOM, or badly formed input
+ return 0;
}
///////////////////////////////////////////////////////////////////////////////
@@ -598,7 +597,7 @@ String *Item_func_decode_histogram::val_str(String *str)
val= p[i] / ((double)((1 << 8) - 1));
break;
case DOUBLE_PREC_HB:
- val= ((uint16 *)(p + i))[0] / ((double)((1 << 16) - 1));
+ val= uint2korr(p + i) / ((double)((1 << 16) - 1));
i++;
break;
default:
@@ -1962,7 +1961,7 @@ String *Item_func_ltrim::val_str(String *str)
if ((remove_length= remove_str->length()) == 0 ||
remove_length > res->length())
- return res;
+ return non_trimmed_value(res);
ptr= (char*) res->ptr();
end= ptr+res->length();
@@ -1981,9 +1980,8 @@ String *Item_func_ltrim::val_str(String *str)
end+=remove_length;
}
if (ptr == res->ptr())
- return res;
- tmp_value.set(*res,(uint) (ptr - res->ptr()),(uint) (end-ptr));
- return &tmp_value;
+ return non_trimmed_value(res);
+ return trimmed_value(res, (uint32) (ptr - res->ptr()), (uint32) (end - ptr));
}
@@ -2009,7 +2007,7 @@ String *Item_func_rtrim::val_str(String *str)
if ((remove_length= remove_str->length()) == 0 ||
remove_length > res->length())
- return res;
+ return non_trimmed_value(res);
ptr= (char*) res->ptr();
end= ptr+res->length();
@@ -2021,11 +2019,11 @@ String *Item_func_rtrim::val_str(String *str)
{
char chr=(*remove_str)[0];
#ifdef USE_MB
- if (use_mb(res->charset()))
+ if (use_mb(collation.collation))
{
while (ptr < end)
{
- if ((l=my_ismbchar(res->charset(), ptr,end))) ptr+=l,p=ptr;
+ if ((l= my_ismbchar(collation.collation, ptr, end))) ptr+= l, p=ptr;
else ++ptr;
}
ptr=p;
@@ -2038,12 +2036,12 @@ String *Item_func_rtrim::val_str(String *str)
{
const char *r_ptr=remove_str->ptr();
#ifdef USE_MB
- if (use_mb(res->charset()))
+ if (use_mb(collation.collation))
{
loop:
while (ptr + remove_length < end)
{
- if ((l=my_ismbchar(res->charset(), ptr,end))) ptr+=l;
+ if ((l= my_ismbchar(collation.collation, ptr, end))) ptr+= l;
else ++ptr;
}
if (ptr + remove_length == end && !memcmp(ptr,r_ptr,remove_length))
@@ -2062,9 +2060,8 @@ String *Item_func_rtrim::val_str(String *str)
}
}
if (end == res->ptr()+res->length())
- return res;
- tmp_value.set(*res,0,(uint) (end-res->ptr()));
- return &tmp_value;
+ return non_trimmed_value(res);
+ return trimmed_value(res, 0, (uint32) (end - res->ptr()));
}
@@ -2091,37 +2088,22 @@ String *Item_func_trim::val_str(String *str)
if ((remove_length= remove_str->length()) == 0 ||
remove_length > res->length())
- return res;
+ return non_trimmed_value(res);
ptr= (char*) res->ptr();
end= ptr+res->length();
r_ptr= remove_str->ptr();
+ while (ptr+remove_length <= end && !memcmp(ptr,r_ptr,remove_length))
+ ptr+=remove_length;
#ifdef USE_MB
- if (use_mb(res->charset()))
+ if (use_mb(collation.collation))
{
- while (ptr + remove_length <= end)
- {
- uint num_bytes= 0;
- while (num_bytes < remove_length)
- {
- uint len;
- if ((len= my_ismbchar(res->charset(), ptr + num_bytes, end)))
- num_bytes+= len;
- else
- ++num_bytes;
- }
- if (num_bytes != remove_length)
- break;
- if (memcmp(ptr, r_ptr, remove_length))
- break;
- ptr+= remove_length;
- }
char *p=ptr;
register uint32 l;
loop:
while (ptr + remove_length < end)
{
- if ((l= my_ismbchar(res->charset(), ptr,end)))
+ if ((l= my_ismbchar(collation.collation, ptr, end)))
ptr+= l;
else
++ptr;
@@ -2137,16 +2119,13 @@ String *Item_func_trim::val_str(String *str)
else
#endif /* USE_MB */
{
- while (ptr+remove_length <= end && !memcmp(ptr,r_ptr,remove_length))
- ptr+=remove_length;
while (ptr + remove_length <= end &&
!memcmp(end-remove_length,r_ptr,remove_length))
end-=remove_length;
}
if (ptr == res->ptr() && end == ptr+res->length())
- return res;
- tmp_value.set(*res,(uint) (ptr - res->ptr()),(uint) (end-ptr));
- return &tmp_value;
+ return non_trimmed_value(res);
+ return trimmed_value(res, (uint32) (ptr - res->ptr()), (uint32) (end - ptr));
}
void Item_func_trim::fix_length_and_dec()
@@ -2348,32 +2327,6 @@ void Item_func_decode::crypto_transform(String *res)
}
-Item *Item_func_sysconst::safe_charset_converter(CHARSET_INFO *tocs)
-{
- Item_string *conv;
- uint conv_errors;
- String tmp, cstr, *ostr= val_str(&tmp);
- if (null_value)
- {
- Item *null_item= new Item_null((char *) fully_qualified_func_name());
- null_item->collation.set (tocs);
- return null_item;
- }
- cstr.copy(ostr->ptr(), ostr->length(), ostr->charset(), tocs, &conv_errors);
- if (conv_errors ||
- !(conv= new Item_static_string_func(fully_qualified_func_name(),
- cstr.ptr(), cstr.length(),
- cstr.charset(),
- collation.derivation)))
- {
- return NULL;
- }
- conv->str_value.copy();
- conv->str_value.mark_as_const();
- return conv;
-}
-
-
String *Item_func_database::val_str(String *str)
{
DBUG_ASSERT(fixed == 1);
@@ -3045,6 +2998,75 @@ err:
}
+void Item_func_space::fix_length_and_dec()
+{
+ collation.set(default_charset(), DERIVATION_COERCIBLE, MY_REPERTOIRE_ASCII);
+ if (args[0]->const_item())
+ {
+ /* must be longlong to avoid truncation */
+ longlong count= args[0]->val_int();
+ if (args[0]->null_value)
+ goto end;
+ /*
+ Assumes that the maximum length of a String is < INT_MAX32.
+ Set here so that rest of code sees out-of-bound value as such.
+ */
+ if (count > INT_MAX32)
+ count= INT_MAX32;
+ fix_char_length_ulonglong(count);
+ return;
+ }
+
+end:
+ max_length= MAX_BLOB_WIDTH;
+ maybe_null= 1;
+}
+
+
+String *Item_func_space::val_str(String *str)
+{
+ uint tot_length;
+ longlong count= args[0]->val_int();
+ const CHARSET_INFO *cs= collation.collation;
+
+ if (args[0]->null_value)
+ goto err; // string and/or delim are null
+ null_value= 0;
+
+ if (count <= 0 && (count == 0 || !args[0]->unsigned_flag))
+ return make_empty_result();
+ /*
+ Assumes that the maximum length of a String is < INT_MAX32.
+ Bounds check on count: If this is triggered, we will error.
+ */
+ if ((ulonglong) count > INT_MAX32)
+ count= INT_MAX32;
+
+ // Safe length check
+ tot_length= (uint) count * cs->mbminlen;
+ if (tot_length > current_thd->variables.max_allowed_packet)
+ {
+ push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
+ ER_WARN_ALLOWED_PACKET_OVERFLOWED,
+ ER(ER_WARN_ALLOWED_PACKET_OVERFLOWED),
+ func_name(),
+ current_thd->variables.max_allowed_packet);
+ goto err;
+ }
+
+ if (str->alloc(tot_length))
+ goto err;
+ str->length(tot_length);
+ str->set_charset(cs);
+ cs->cset->fill(cs, (char*) str->ptr(), tot_length, ' ');
+ return str;
+
+err:
+ null_value= 1;
+ return 0;
+}
+
+
void Item_func_binlog_gtid_pos::fix_length_and_dec()
{
collation.set(system_charset_info);
@@ -3443,7 +3465,7 @@ void Item_func_set_collation::print(String *str, enum_query_type query_type)
str->append(STRING_WITH_LEN(" collate "));
DBUG_ASSERT(args[1]->basic_const_item() &&
args[1]->type() == Item::STRING_ITEM);
- args[1]->str_value.print(str);
+ ((Item_string *)args[1])->print_value(str);
str->append(')');
}
diff --git a/sql/item_strfunc.h b/sql/item_strfunc.h
index f3d5c064423..8377a20e0a4 100644
--- a/sql/item_strfunc.h
+++ b/sql/item_strfunc.h
@@ -349,6 +349,21 @@ class Item_func_trim :public Item_str_func
protected:
String tmp_value;
String remove;
+ String *trimmed_value(String *res, uint32 offset, uint32 length)
+ {
+ tmp_value.set(*res, offset, length);
+ /*
+ Make sure to return correct charset and collation:
+ TRIM(0x000000 FROM _ucs2 0x0061)
+ should set charset to "binary" rather than to "ucs2".
+ */
+ tmp_value.set_charset(collation.collation);
+ return &tmp_value;
+ }
+ String *non_trimmed_value(String *res)
+ {
+ return trimmed_value(res, 0, res->length());
+ }
public:
Item_func_trim(Item *a,Item *b) :Item_str_func(a,b) {}
Item_func_trim(Item *a) :Item_str_func(a) {}
@@ -527,7 +542,10 @@ class Item_func_sysconst :public Item_str_func
public:
Item_func_sysconst()
{ collation.set(system_charset_info,DERIVATION_SYSCONST); }
- Item *safe_charset_converter(CHARSET_INFO *tocs);
+ Item *safe_charset_converter(CHARSET_INFO *tocs)
+ {
+ return const_charset_converter(tocs, true, fully_qualified_func_name());
+ }
/*
Used to create correct Item name in new converted item in
safe_charset_converter, return string representation of this function
@@ -701,6 +719,16 @@ public:
};
+class Item_func_space :public Item_str_func
+{
+public:
+ Item_func_space(Item *arg1):Item_str_func(arg1) {}
+ String *val_str(String *);
+ void fix_length_and_dec();
+ const char *func_name() const { return "space"; }
+};
+
+
class Item_func_binlog_gtid_pos :public Item_str_func
{
String tmp_value;
diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc
index 38bb3121ed8..7db7b014d28 100644
--- a/sql/item_subselect.cc
+++ b/sql/item_subselect.cc
@@ -3654,8 +3654,9 @@ int subselect_single_select_engine::exec()
pushed down into the subquery. Those optimizations are ref[_or_null]
acceses. Change them to be full table scans.
*/
- for (JOIN_TAB *tab= first_linear_tab(join, WITHOUT_CONST_TABLES); tab;
- tab= next_linear_tab(join, tab, WITH_BUSH_ROOTS))
+ JOIN_TAB *tab;
+ for (tab= first_linear_tab(join, WITH_BUSH_ROOTS, WITHOUT_CONST_TABLES);
+ tab; tab= next_linear_tab(join, tab, WITH_BUSH_ROOTS))
{
if (tab && tab->keyuse)
{
diff --git a/sql/item_sum.cc b/sql/item_sum.cc
index 62db351150b..2dadf8b8835 100644
--- a/sql/item_sum.cc
+++ b/sql/item_sum.cc
@@ -3197,19 +3197,13 @@ Item_func_group_concat(Name_resolution_context *context_arg,
/*
We need to allocate:
args - arg_count_field+arg_count_order
- (for possible order items in temporare tables)
+ (for possible order items in temporary tables)
order - arg_count_order
*/
- if (!(args= (Item**) sql_alloc(sizeof(Item*) * arg_count +
+ if (!(args= (Item**) sql_alloc(sizeof(Item*) * arg_count * 2 +
sizeof(ORDER*)*arg_count_order)))
return;
- if (!(orig_args= (Item **) sql_alloc(sizeof(Item *) * arg_count)))
- {
- args= NULL;
- return;
- }
-
order= (ORDER**)(args + arg_count);
/* fill args items of show and sort */
@@ -3230,6 +3224,9 @@ Item_func_group_concat(Name_resolution_context *context_arg,
order_item->item= arg_ptr++;
}
}
+
+ /* orig_args is only used for print() */
+ orig_args= (Item**) (order + arg_count_order);
memcpy(orig_args, args, sizeof(Item*) * arg_count);
}
@@ -3313,6 +3310,7 @@ void Item_func_group_concat::cleanup()
}
DBUG_ASSERT(tree == 0);
}
+
DBUG_VOID_RETURN;
}
diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc
index 5fddad56028..4a8bb4cc77d 100644
--- a/sql/item_timefunc.cc
+++ b/sql/item_timefunc.cc
@@ -1816,7 +1816,7 @@ void Item_func_date_format::fix_length_and_dec()
if (arg1->type() == STRING_ITEM)
{ // Optimize the normal case
fixed_length=1;
- max_length= format_length(&arg1->str_value) *
+ max_length= format_length(arg1->val_str(NULL)) *
collation.collation->mbmaxlen;
}
else
diff --git a/sql/item_timefunc.h b/sql/item_timefunc.h
index 29badddad8e..cb8b59501a4 100644
--- a/sql/item_timefunc.h
+++ b/sql/item_timefunc.h
@@ -413,16 +413,15 @@ protected:
public:
Item_func_seconds_hybrid() :Item_func_numhybrid() {}
Item_func_seconds_hybrid(Item *a) :Item_func_numhybrid(a) {}
- void fix_num_length_and_dec()
+ void fix_length_and_dec()
{
if (arg_count)
decimals= args[0]->temporal_precision(arg0_expected_type());
set_if_smaller(decimals, TIME_SECOND_PART_DIGITS);
max_length=17 + (decimals ? decimals + 1 : 0);
maybe_null= true;
+ cached_result_type= decimals ? DECIMAL_RESULT : INT_RESULT;
}
- void find_num_type()
- { cached_result_type= decimals ? DECIMAL_RESULT : INT_RESULT; }
double real_op() { DBUG_ASSERT(0); return 0; }
String *str_op(String *str) { DBUG_ASSERT(0); return 0; }
bool date_op(MYSQL_TIME *ltime, uint fuzzydate) { DBUG_ASSERT(0); return true; }
@@ -470,11 +469,6 @@ protected:
public:
Item_func_time_to_sec(Item *item) :Item_func_seconds_hybrid(item) {}
const char *func_name() const { return "time_to_sec"; }
- void fix_num_length_and_dec()
- {
- maybe_null= true;
- Item_func_seconds_hybrid::fix_num_length_and_dec();
- }
bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
bool check_vcol_func_processor(uchar *int_arg) { return FALSE;}
bool check_valid_arguments_processor(uchar *int_arg)
diff --git a/sql/item_xmlfunc.cc b/sql/item_xmlfunc.cc
index 759b929ff82..932f4245c27 100644
--- a/sql/item_xmlfunc.cc
+++ b/sql/item_xmlfunc.cc
@@ -532,6 +532,32 @@ public:
};
+/**
+ A string whose value may be changed during execution.
+*/
+class Item_string_xml_non_const: public Item_string
+{
+public:
+ Item_string_xml_non_const(const char *str, uint length, CHARSET_INFO *cs)
+ :Item_string(str, length, cs)
+ { }
+ bool const_item() const { return false ; }
+ bool basic_const_item() const { return false; }
+ void set_value(const char *str, uint length, CHARSET_INFO *cs)
+ {
+ str_value.set(str, length, cs);
+ }
+ Item *safe_charset_converter(CHARSET_INFO *tocs)
+ {
+ /*
+ Item_string::safe_charset_converter() does not accept non-constants.
+ Note, conversion is not really needed here anyway.
+ */
+ return this;
+ }
+};
+
+
class Item_nodeset_to_const_comparator :public Item_bool_func
{
String *pxml;
@@ -550,7 +576,8 @@ public:
longlong val_int()
{
Item_func *comp= (Item_func*)args[1];
- Item_string *fake= (Item_string*)(comp->arguments()[0]);
+ Item_string_xml_non_const *fake=
+ (Item_string_xml_non_const*)(comp->arguments()[0]);
String *res= args[0]->val_nodeset(&tmp_nodeset);
MY_XPATH_FLT *fltbeg= (MY_XPATH_FLT*) res->ptr();
MY_XPATH_FLT *fltend= (MY_XPATH_FLT*) (res->ptr() + res->length());
@@ -568,8 +595,8 @@ public:
if ((node->parent == flt->num) &&
(node->type == MY_XML_NODE_TEXT))
{
- fake->str_value.set(node->beg, node->end - node->beg,
- collation.collation);
+ fake->set_value(node->beg, node->end - node->beg,
+ collation.collation);
if (args[1]->val_int())
return 1;
}
@@ -956,14 +983,12 @@ static Item *create_comparator(MY_XPATH *xpath,
{
/*
Compare a node set to a scalar value.
- We just create a fake Item_string() argument,
+ We just create a fake Item_string_xml_non_const() argument,
which will be filled to the partular value
in a loop through all of the nodes in the node set.
*/
- Item_string *fake= new Item_string("", 0, xpath->cs);
- /* Don't cache fake because its value will be changed during comparison.*/
- fake->set_used_tables(RAND_TABLE_BIT);
+ Item_string *fake= new Item_string_xml_non_const("", 0, xpath->cs);
Item_nodeset_func *nodeset;
Item *scalar, *comp;
if (a->type() == Item::XPATH_NODESET)
diff --git a/sql/log.cc b/sql/log.cc
index dcdf2bcc74d..4e50f57e656 100644
--- a/sql/log.cc
+++ b/sql/log.cc
@@ -2396,7 +2396,7 @@ static int find_uniq_filename(char *name)
file_info= dir_info->dir_entry;
for (i= dir_info->number_of_files ; i-- ; file_info++)
{
- if (memcmp(file_info->name, start, length) == 0 &&
+ if (strncmp(file_info->name, start, length) == 0 &&
test_if_number(file_info->name+length, &number,0))
{
set_if_bigger(max_found,(ulong) number);
@@ -2673,11 +2673,13 @@ int MYSQL_LOG::generate_new_name(char *new_name, const char *log_name)
{
if (!fn_ext(log_name)[0])
{
- if (find_uniq_filename(new_name))
+ if (DBUG_EVALUATE_IF("binlog_inject_new_name_error", TRUE, FALSE) ||
+ find_uniq_filename(new_name))
{
- my_printf_error(ER_NO_UNIQUE_LOGFILE, ER(ER_NO_UNIQUE_LOGFILE),
- MYF(ME_FATALERROR), log_name);
- sql_print_error(ER(ER_NO_UNIQUE_LOGFILE), log_name);
+ if (current_thd)
+ my_printf_error(ER_NO_UNIQUE_LOGFILE, ER(ER_NO_UNIQUE_LOGFILE),
+ MYF(ME_FATALERROR), log_name);
+ sql_print_error(ER_DEFAULT(ER_NO_UNIQUE_LOGFILE), log_name);
return 1;
}
}
@@ -2930,7 +2932,8 @@ bool MYSQL_QUERY_LOG::write(THD *thd, time_t current_time,
my_b_printf(&log_file,
"# Full_scan: %s Full_join: %s "
"Tmp_table: %s Tmp_table_on_disk: %s\n"
- "# Filesort: %s Filesort_on_disk: %s Merge_passes: %lu\n",
+ "# Filesort: %s Filesort_on_disk: %s Merge_passes: %lu "
+ "Priority_queue: %s\n",
((thd->query_plan_flags & QPLAN_FULL_SCAN) ? "Yes" : "No"),
((thd->query_plan_flags & QPLAN_FULL_JOIN) ? "Yes" : "No"),
((thd->query_plan_flags & QPLAN_TMP_TABLE) ? "Yes" : "No"),
@@ -2938,7 +2941,10 @@ bool MYSQL_QUERY_LOG::write(THD *thd, time_t current_time,
((thd->query_plan_flags & QPLAN_FILESORT) ? "Yes" : "No"),
((thd->query_plan_flags & QPLAN_FILESORT_DISK) ?
"Yes" : "No"),
- thd->query_plan_fsort_passes) == (size_t) -1)
+ thd->query_plan_fsort_passes,
+ ((thd->query_plan_flags & QPLAN_FILESORT_PRIORITY_QUEUE) ?
+ "Yes" : "No")
+ ) == (size_t) -1)
tmp_errno= errno;
if (thd->variables.log_slow_verbosity & LOG_SLOW_VERBOSITY_EXPLAIN &&
thd->lex->explain)
@@ -4121,6 +4127,7 @@ int MYSQL_BIN_LOG::purge_first_log(Relay_log_info* rli, bool included)
{
int error;
char *to_purge_if_included= NULL;
+ inuse_relaylog *ir;
DBUG_ENTER("purge_first_log");
DBUG_ASSERT(is_open());
@@ -4128,7 +4135,31 @@ int MYSQL_BIN_LOG::purge_first_log(Relay_log_info* rli, bool included)
DBUG_ASSERT(!strcmp(rli->linfo.log_file_name,rli->event_relay_log_name));
mysql_mutex_lock(&LOCK_index);
- to_purge_if_included= my_strdup(rli->group_relay_log_name, MYF(0));
+
+ ir= rli->inuse_relaylog_list;
+ while (ir)
+ {
+ inuse_relaylog *next= ir->next;
+ if (!ir->completed || ir->dequeued_count < ir->queued_count)
+ {
+ included= false;
+ break;
+ }
+ if (!included && !strcmp(ir->name, rli->group_relay_log_name))
+ break;
+ if (!next)
+ {
+ rli->last_inuse_relaylog= NULL;
+ included= 1;
+ to_purge_if_included= my_strdup(ir->name, MYF(0));
+ }
+ my_atomic_rwlock_destroy(&ir->inuse_relaylog_atomic_lock);
+ my_free(ir);
+ ir= next;
+ }
+ rli->inuse_relaylog_list= ir;
+ if (ir)
+ to_purge_if_included= my_strdup(ir->name, MYF(0));
/*
Read the next log file name from the index file and pass it back to
@@ -6845,7 +6876,7 @@ MYSQL_BIN_LOG::queue_for_group_commit(group_commit_entry *orig_entry)
/* Interrupted by kill. */
DEBUG_SYNC(orig_entry->thd, "group_commit_waiting_for_prior_killed");
wfc->wakeup_error= orig_entry->thd->killed_errno();
- if (wfc->wakeup_error)
+ if (!wfc->wakeup_error)
wfc->wakeup_error= ER_QUERY_INTERRUPTED;
my_message(wfc->wakeup_error, ER(wfc->wakeup_error), MYF(0));
DBUG_RETURN(-1);
@@ -6856,12 +6887,6 @@ MYSQL_BIN_LOG::queue_for_group_commit(group_commit_entry *orig_entry)
else
mysql_mutex_unlock(&wfc->LOCK_wait_commit);
}
- if (wfc && wfc->wakeup_error)
- {
- my_error(ER_PRIOR_COMMIT_FAILED, MYF(0));
- DBUG_RETURN(-1);
- }
-
/*
If the transaction we were waiting for has already put us into the group
commit queue (and possibly already done the entire binlog commit for us),
@@ -6870,6 +6895,12 @@ MYSQL_BIN_LOG::queue_for_group_commit(group_commit_entry *orig_entry)
if (orig_entry->queued_by_other)
DBUG_RETURN(0);
+ if (wfc && wfc->wakeup_error)
+ {
+ my_error(ER_PRIOR_COMMIT_FAILED, MYF(0));
+ DBUG_RETURN(-1);
+ }
+
/* Now enqueue ourselves in the group commit queue. */
DEBUG_SYNC(orig_entry->thd, "commit_before_enqueue");
orig_entry->thd->clear_wakeup_ready();
@@ -7473,6 +7504,13 @@ MYSQL_BIN_LOG::write_transaction_or_stmt(group_commit_entry *entry,
}
}
+ DBUG_EXECUTE_IF("inject_error_writing_xid",
+ {
+ entry->error_cache= NULL;
+ entry->commit_errno= 28;
+ DBUG_RETURN(ER_ERROR_ON_WRITE);
+ });
+
if (entry->end_event->write(&log_file))
{
entry->error_cache= NULL;
@@ -9076,6 +9114,8 @@ binlog_background_thread(void *arg __attribute__((unused)))
thd->thread_id= thread_id++;
mysql_mutex_unlock(&LOCK_thread_count);
thd->store_globals();
+ thd->security_ctx->skip_grants();
+ thd->set_command(COM_DAEMON);
/*
Load the slave replication GTID state from the mysql.gtid_slave_pos
@@ -9379,7 +9419,7 @@ int TC_LOG_BINLOG::recover(LOG_INFO *linfo, const char *last_log_name,
file= -1;
}
- if (0 == strcmp(linfo->log_file_name, last_log_name))
+ if (!strcmp(linfo->log_file_name, last_log_name))
break; // No more files to do
if ((file= open_binlog(&log, linfo->log_file_name, &errmsg)) < 0)
{
@@ -9636,7 +9676,7 @@ set_binlog_snapshot_file(const char *src)
Copy out current values of status variables, for SHOW STATUS or
information_schema.global_status.
- This is called only under LOCK_status, so we can fill in a static array.
+ This is called only under LOCK_show_status, so we can fill in a static array.
*/
void
TC_LOG_BINLOG::set_status_variables(THD *thd)
diff --git a/sql/log_event.cc b/sql/log_event.cc
index 600a98916a9..76170778bad 100644
--- a/sql/log_event.cc
+++ b/sql/log_event.cc
@@ -190,6 +190,28 @@ static const char *HA_ERR(int i)
return "No Error!";
}
+
+/*
+ Return true if an error caught during event execution is a temporary error
+ that will cause automatic retry of the event group during parallel
+ replication, false otherwise.
+
+ In parallel replication, conflicting transactions can occasionally cause
+ deadlocks; such errors are handled automatically by rolling back re-trying
+ the transactions, so should not pollute the error log.
+*/
+static bool
+is_parallel_retry_error(rpl_group_info *rgi, int err)
+{
+ if (!rgi->is_parallel_exec)
+ return false;
+ if (rgi->killed_for_retry &&
+ (err == ER_QUERY_INTERRUPTED || err == ER_CONNECTION_KILLED))
+ return true;
+ return has_temporary_error(rgi->thd);
+}
+
+
/**
Error reporting facility for Rows_log_event::do_apply_event
@@ -204,7 +226,7 @@ static const char *HA_ERR(int i)
*/
static void inline slave_rows_error_report(enum loglevel level, int ha_error,
- Relay_log_info const *rli, THD *thd,
+ rpl_group_info *rgi, THD *thd,
TABLE *table, const char * type,
const char *log_name, ulong pos)
{
@@ -214,8 +236,19 @@ static void inline slave_rows_error_report(enum loglevel level, int ha_error,
uint len;
Diagnostics_area::Sql_condition_iterator it=
thd->get_stmt_da()->sql_conditions();
+ Relay_log_info const *rli= rgi->rli;
const Sql_condition *err;
buff[0]= 0;
+ int errcode= thd->is_error() ? thd->get_stmt_da()->sql_errno() : 0;
+
+ /*
+ In parallel replication, deadlocks or other temporary errors can happen
+ occasionally in normal operation, they will be handled correctly and
+ automatically by re-trying the transactions. So do not pollute the error
+ log with messages about them.
+ */
+ if (is_parallel_retry_error(rgi, errcode))
+ return;
for (err= it++, slider= buff; err && slider < buff_end - 1;
slider += len, err= it++)
@@ -226,7 +259,7 @@ static void inline slave_rows_error_report(enum loglevel level, int ha_error,
}
if (ha_error != 0)
- rli->report(level, thd->is_error() ? thd->get_stmt_da()->sql_errno() : 0,
+ rli->report(level, errcode, rgi->gtid_info(),
"Could not execute %s event on table %s.%s;"
"%s handler error %s; "
"the event's master log %s, end_log_pos %lu",
@@ -234,7 +267,7 @@ static void inline slave_rows_error_report(enum loglevel level, int ha_error,
buff, handler_error == NULL ? "<unknown>" : handler_error,
log_name, pos);
else
- rli->report(level, thd->is_error() ? thd->get_stmt_da()->sql_errno() : 0,
+ rli->report(level, errcode, rgi->gtid_info(),
"Could not execute %s event on table %s.%s;"
"%s the event's master log %s, end_log_pos %lu",
type, table->s->db.str, table->s->table_name.str,
@@ -974,8 +1007,9 @@ Log_event::do_shall_skip(rpl_group_info *rgi)
Relay_log_info *rli= rgi->rli;
DBUG_PRINT("info", ("ev->server_id: %lu, ::server_id: %lu,"
" rli->replicate_same_server_id: %d,"
- " rli->slave_skip_counter: %lu",
- (ulong) server_id, (ulong) global_system_variables.server_id,
+ " rli->slave_skip_counter: %llu",
+ (ulong) server_id,
+ (ulong) global_system_variables.server_id,
rli->replicate_same_server_id,
rli->slave_skip_counter));
if ((server_id == global_system_variables.server_id &&
@@ -4199,25 +4233,31 @@ int Query_log_event::do_apply_event(rpl_group_info *rgi,
Record any GTID in the same transaction, so slave state is
transactionally consistent.
*/
- if (current_stmt_is_commit && (sub_id= rgi->gtid_sub_id))
+ if (current_stmt_is_commit)
{
- /* Clear the GTID from the RLI so we don't accidentally reuse it. */
- rgi->gtid_sub_id= 0;
-
- gtid= rgi->current_gtid;
thd->variables.option_bits&= ~OPTION_GTID_BEGIN;
- if (rpl_global_gtid_slave_state.record_gtid(thd, &gtid, sub_id, true, false))
+ if (rgi->gtid_pending)
{
- rli->report(ERROR_LEVEL, ER_CANNOT_UPDATE_GTID_STATE,
- "Error during COMMIT: failed to update GTID state in "
- "%s.%s: %d: %s",
- "mysql", rpl_gtid_slave_state_table_name.str,
- thd->get_stmt_da()->sql_errno(),
- thd->get_stmt_da()->message());
- trans_rollback(thd);
- sub_id= 0;
- thd->is_slave_error= 1;
- goto end;
+ sub_id= rgi->gtid_sub_id;
+ rgi->gtid_pending= false;
+
+ gtid= rgi->current_gtid;
+ if (rpl_global_gtid_slave_state.record_gtid(thd, &gtid, sub_id, true, false))
+ {
+ int errcode= thd->get_stmt_da()->sql_errno();
+ if (!is_parallel_retry_error(rgi, errcode))
+ rli->report(ERROR_LEVEL, ER_CANNOT_UPDATE_GTID_STATE,
+ rgi->gtid_info(),
+ "Error during COMMIT: failed to update GTID state in "
+ "%s.%s: %d: %s",
+ "mysql", rpl_gtid_slave_state_table_name.str,
+ errcode,
+ thd->get_stmt_da()->message());
+ trans_rollback(thd);
+ sub_id= 0;
+ thd->is_slave_error= 1;
+ goto end;
+ }
}
}
@@ -4270,7 +4310,7 @@ int Query_log_event::do_apply_event(rpl_group_info *rgi,
clear_all_errors(thd, const_cast<Relay_log_info*>(rli)); /* Can ignore query */
else
{
- rli->report(ERROR_LEVEL, expected_error,
+ rli->report(ERROR_LEVEL, expected_error, rgi->gtid_info(),
"\
Query partially completed on the master (error on master: %d) \
and was aborted. There is a chance that your master is inconsistent at this \
@@ -4326,7 +4366,7 @@ compare_errors:
!ignored_error_code(actual_error) &&
!ignored_error_code(expected_error))
{
- rli->report(ERROR_LEVEL, 0,
+ rli->report(ERROR_LEVEL, 0, rgi->gtid_info(),
"\
Query caused different errors on master and slave. \
Error on master: message (format)='%s' error code=%d ; \
@@ -4349,18 +4389,21 @@ Default database: '%s'. Query: '%s'",
{
DBUG_PRINT("info",("error ignored"));
clear_all_errors(thd, const_cast<Relay_log_info*>(rli));
- thd->reset_killed();
+ if (actual_error == ER_QUERY_INTERRUPTED ||
+ actual_error == ER_CONNECTION_KILLED)
+ thd->reset_killed();
}
/*
Other cases: mostly we expected no error and get one.
*/
else if (thd->is_slave_error || thd->is_fatal_error)
{
- rli->report(ERROR_LEVEL, actual_error,
- "Error '%s' on query. Default database: '%s'. Query: '%s'",
- (actual_error ? thd->get_stmt_da()->message() :
- "unexpected success or fatal error"),
- print_slave_db_safe(thd->db), query_arg);
+ if (!is_parallel_retry_error(rgi, actual_error))
+ rli->report(ERROR_LEVEL, actual_error, rgi->gtid_info(),
+ "Error '%s' on query. Default database: '%s'. Query: '%s'",
+ (actual_error ? thd->get_stmt_da()->message() :
+ "unexpected success or fatal error"),
+ print_slave_db_safe(thd->db), query_arg);
thd->is_slave_error= 1;
}
@@ -5028,7 +5071,7 @@ int Format_description_log_event::do_apply_event(rpl_group_info *rgi)
if (!is_artificial_event() && created && thd->transaction.all.ha_list)
{
/* This is not an error (XA is safe), just an information */
- rli->report(INFORMATION_LEVEL, 0,
+ rli->report(INFORMATION_LEVEL, 0, NULL,
"Rolling back unfinished transaction (no COMMIT "
"or ROLLBACK in relay log). A probable cause is that "
"the master died while writing the transaction to "
@@ -5969,7 +6012,7 @@ error:
sql_errno=ER_UNKNOWN_ERROR;
err=ER(sql_errno);
}
- rli->report(ERROR_LEVEL, sql_errno,"\
+ rli->report(ERROR_LEVEL, sql_errno, rgi->gtid_info(), "\
Error '%s' running LOAD DATA INFILE on table '%s'. Default database: '%s'",
err, (char*)table_name, print_slave_db_safe(remember_db));
free_root(thd->mem_root,MYF(MY_KEEP_PREALLOC));
@@ -5986,7 +6029,7 @@ Error '%s' running LOAD DATA INFILE on table '%s'. Default database: '%s'",
(char*)table_name,
print_slave_db_safe(remember_db));
- rli->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR,
+ rli->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, rgi->gtid_info(),
ER(ER_SLAVE_FATAL_ERROR), buf);
DBUG_RETURN(1);
}
@@ -6476,12 +6519,10 @@ Gtid_log_event::do_apply_event(rpl_group_info *rgi)
thd->variables.server_id= this->server_id;
thd->variables.gtid_domain_id= this->domain_id;
thd->variables.gtid_seq_no= this->seq_no;
+ mysql_reset_thd_for_next_command(thd);
if (opt_gtid_strict_mode && opt_bin_log && opt_log_slave_updates)
{
- /* Need to reset prior "ok" status to give an error. */
- thd->clear_error();
- thd->get_stmt_da()->reset_diagnostics_area();
if (mysql_bin_log.check_strict_gtid_sequence(this->domain_id,
this->server_id, this->seq_no))
return 1;
@@ -7259,28 +7300,41 @@ int Xid_log_event::do_apply_event(rpl_group_info *rgi)
bool res;
int err;
rpl_gtid gtid;
- uint64 sub_id;
+ uint64 sub_id= 0;
Relay_log_info const *rli= rgi->rli;
/*
+ XID_EVENT works like a COMMIT statement. And it also updates the
+ mysql.gtid_slave_pos table with the GTID of the current transaction.
+
+ Therefore, it acts much like a normal SQL statement, so we need to do
+ mysql_reset_thd_for_next_command() as if starting a new statement.
+ */
+ mysql_reset_thd_for_next_command(thd);
+ /*
Record any GTID in the same transaction, so slave state is transactionally
consistent.
*/
- if ((sub_id= rgi->gtid_sub_id))
+ if (rgi->gtid_pending)
{
- /* Clear the GTID from the RLI so we don't accidentally reuse it. */
- rgi->gtid_sub_id= 0;
+ sub_id= rgi->gtid_sub_id;
+ rgi->gtid_pending= false;
gtid= rgi->current_gtid;
err= rpl_global_gtid_slave_state.record_gtid(thd, &gtid, sub_id, true, false);
if (err)
{
- rli->report(ERROR_LEVEL, ER_CANNOT_UPDATE_GTID_STATE,
- "Error during XID COMMIT: failed to update GTID state in "
- "%s.%s: %d: %s",
- "mysql", rpl_gtid_slave_state_table_name.str,
- thd->get_stmt_da()->sql_errno(),
- thd->get_stmt_da()->message());
+ int ec= thd->get_stmt_da()->sql_errno();
+ /*
+ Do not report an error if this is really a kill due to a deadlock.
+ In this case, the transaction will be re-tried instead.
+ */
+ if (!is_parallel_retry_error(rgi, ec))
+ rli->report(ERROR_LEVEL, ER_CANNOT_UPDATE_GTID_STATE, rgi->gtid_info(),
+ "Error during XID COMMIT: failed to update GTID state in "
+ "%s.%s: %d: %s",
+ "mysql", rpl_gtid_slave_state_table_name.str, ec,
+ thd->get_stmt_da()->message());
trans_rollback(thd);
thd->is_slave_error= 1;
return err;
@@ -8311,7 +8365,7 @@ int Create_file_log_event::do_apply_event(rpl_group_info *rgi)
init_io_cache(&file, fd, IO_SIZE, WRITE_CACHE, (my_off_t)0, 0,
MYF(MY_WME|MY_NABP)))
{
- rli->report(ERROR_LEVEL, my_errno,
+ rli->report(ERROR_LEVEL, my_errno, rgi->gtid_info(),
"Error in Create_file event: could not open file '%s'",
fname_buf);
goto err;
@@ -8323,7 +8377,7 @@ int Create_file_log_event::do_apply_event(rpl_group_info *rgi)
if (write_base(&file))
{
strmov(ext, ".info"); // to have it right in the error message
- rli->report(ERROR_LEVEL, my_errno,
+ rli->report(ERROR_LEVEL, my_errno, rgi->gtid_info(),
"Error in Create_file event: could not write to file '%s'",
fname_buf);
goto err;
@@ -8339,14 +8393,14 @@ int Create_file_log_event::do_apply_event(rpl_group_info *rgi)
O_WRONLY | O_BINARY | O_EXCL | O_NOFOLLOW,
MYF(MY_WME))) < 0)
{
- rli->report(ERROR_LEVEL, my_errno,
+ rli->report(ERROR_LEVEL, my_errno, rgi->gtid_info(),
"Error in Create_file event: could not open file '%s'",
fname_buf);
goto err;
}
if (mysql_file_write(fd, (uchar*) block, block_len, MYF(MY_WME+MY_NABP)))
{
- rli->report(ERROR_LEVEL, my_errno,
+ rli->report(ERROR_LEVEL, my_errno, rgi->gtid_info(),
"Error in Create_file event: write to '%s' failed",
fname_buf);
goto err;
@@ -8495,7 +8549,7 @@ int Append_block_log_event::do_apply_event(rpl_group_info *rgi)
O_WRONLY | O_BINARY | O_EXCL | O_NOFOLLOW,
MYF(MY_WME))) < 0)
{
- rli->report(ERROR_LEVEL, my_errno,
+ rli->report(ERROR_LEVEL, my_errno, rgi->gtid_info(),
"Error in %s event: could not create file '%s'",
get_type_str(), fname);
goto err;
@@ -8506,7 +8560,7 @@ int Append_block_log_event::do_apply_event(rpl_group_info *rgi)
O_WRONLY | O_APPEND | O_BINARY | O_NOFOLLOW,
MYF(MY_WME))) < 0)
{
- rli->report(ERROR_LEVEL, my_errno,
+ rli->report(ERROR_LEVEL, my_errno, rgi->gtid_info(),
"Error in %s event: could not open file '%s'",
get_type_str(), fname);
goto err;
@@ -8519,7 +8573,7 @@ int Append_block_log_event::do_apply_event(rpl_group_info *rgi)
if (mysql_file_write(fd, (uchar*) block, block_len, MYF(MY_WME+MY_NABP)))
{
- rli->report(ERROR_LEVEL, my_errno,
+ rli->report(ERROR_LEVEL, my_errno, rgi->gtid_info(),
"Error in %s event: write to '%s' failed",
get_type_str(), fname);
goto err;
@@ -8736,7 +8790,7 @@ int Execute_load_log_event::do_apply_event(rpl_group_info *rgi)
init_io_cache(&file, fd, IO_SIZE, READ_CACHE, (my_off_t)0, 0,
MYF(MY_WME|MY_NABP)))
{
- rli->report(ERROR_LEVEL, my_errno,
+ rli->report(ERROR_LEVEL, my_errno, rgi->gtid_info(),
"Error in Exec_load event: could not open file '%s'",
fname);
goto err;
@@ -8748,7 +8802,7 @@ int Execute_load_log_event::do_apply_event(rpl_group_info *rgi)
opt_slave_sql_verify_checksum)) ||
lev->get_type_code() != NEW_LOAD_EVENT)
{
- rli->report(ERROR_LEVEL, 0, "Error in Exec_load event: "
+ rli->report(ERROR_LEVEL, 0, rgi->gtid_info(), "Error in Exec_load event: "
"file '%s' appears corrupted", fname);
goto err;
}
@@ -8774,7 +8828,7 @@ int Execute_load_log_event::do_apply_event(rpl_group_info *rgi)
char *tmp= my_strdup(rli->last_error().message, MYF(MY_WME));
if (tmp)
{
- rli->report(ERROR_LEVEL, rli->last_error().number,
+ rli->report(ERROR_LEVEL, rli->last_error().number, rgi->gtid_info(),
"%s. Failed executing load from '%s'", tmp, fname);
my_free(tmp);
}
@@ -8946,9 +9000,9 @@ void Execute_load_query_log_event::print(FILE* file,
if (local_fname)
{
my_b_write(&cache, (uchar*) query, fn_pos_start);
- my_b_write_string(&cache, " LOCAL INFILE \'");
- my_b_printf(&cache, "%s", local_fname);
- my_b_write_string(&cache, "\'");
+ my_b_write_string(&cache, " LOCAL INFILE ");
+ pretty_print_str(&cache, local_fname, strlen(local_fname));
+
if (dup_handling == LOAD_DUP_REPLACE)
my_b_write_string(&cache, " REPLACE");
my_b_write_string(&cache, " INTO");
@@ -9007,7 +9061,7 @@ Execute_load_query_log_event::do_apply_event(rpl_group_info *rgi)
/* Replace filename and LOCAL keyword in query before executing it */
if (buf == NULL)
{
- rli->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR,
+ rli->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, rgi->gtid_info(),
ER(ER_SLAVE_FATAL_ERROR), "Not enough memory");
return 1;
}
@@ -9625,7 +9679,8 @@ int Rows_log_event::do_apply_event(rpl_group_info *rgi)
(long long)wsrep_thd_trx_seqno(thd));
}
#endif
- if (thd->is_slave_error || thd->is_fatal_error)
+ if ((thd->is_slave_error || thd->is_fatal_error) &&
+ !is_parallel_retry_error(rgi, actual_error))
{
/*
Error reporting borrowed from Query_log_event with many excessive
@@ -9633,7 +9688,7 @@ int Rows_log_event::do_apply_event(rpl_group_info *rgi)
We should not honour --slave-skip-errors at this point as we are
having severe errors which should not be skiped.
*/
- rli->report(ERROR_LEVEL, actual_error,
+ rli->report(ERROR_LEVEL, actual_error, rgi->gtid_info(),
"Error executing row event: '%s'",
(actual_error ? thd->get_stmt_da()->message() :
"unexpected success or fatal error"));
@@ -9674,8 +9729,7 @@ int Rows_log_event::do_apply_event(rpl_group_info *rgi)
{
DBUG_ASSERT(ptr->m_tabledef_valid);
TABLE *conv_table;
- if (!ptr->m_tabledef.compatible_with(thd, const_cast<Relay_log_info*>(rli),
- ptr->table, &conv_table))
+ if (!ptr->m_tabledef.compatible_with(thd, rgi, ptr->table, &conv_table))
{
DBUG_PRINT("debug", ("Table: %s.%s is not compatible with master",
ptr->table->s->db.str,
@@ -9831,7 +9885,7 @@ int Rows_log_event::do_apply_event(rpl_group_info *rgi)
if (idempotent_error || ignored_error)
{
if (global_system_variables.log_warnings)
- slave_rows_error_report(WARNING_LEVEL, error, rli, thd, table,
+ slave_rows_error_report(WARNING_LEVEL, error, rgi, thd, table,
get_type_str(),
RPL_LOG_NAME, (ulong) log_pos);
clear_all_errors(thd, const_cast<Relay_log_info*>(rli));
@@ -9887,7 +9941,7 @@ int Rows_log_event::do_apply_event(rpl_group_info *rgi)
{
if (global_system_variables.log_warnings)
- slave_rows_error_report(WARNING_LEVEL, error, rli, thd, table,
+ slave_rows_error_report(WARNING_LEVEL, error, rgi, thd, table,
get_type_str(),
RPL_LOG_NAME, (ulong) log_pos);
clear_all_errors(thd, const_cast<Relay_log_info*>(rli));
@@ -9898,7 +9952,7 @@ int Rows_log_event::do_apply_event(rpl_group_info *rgi)
if (error)
{
- slave_rows_error_report(ERROR_LEVEL, error, rli, thd, table,
+ slave_rows_error_report(ERROR_LEVEL, error, rgi, thd, table,
get_type_str(),
RPL_LOG_NAME, (ulong) log_pos);
/*
@@ -9920,7 +9974,7 @@ int Rows_log_event::do_apply_event(rpl_group_info *rgi)
if (get_flags(STMT_END_F) && (error= rows_event_stmt_cleanup(rgi, thd)))
slave_rows_error_report(ERROR_LEVEL,
thd->is_error() ? 0 : error,
- rli, thd, table,
+ rgi, thd, table,
get_type_str(),
RPL_LOG_NAME, (ulong) log_pos);
DBUG_RETURN(error);
@@ -10899,7 +10953,7 @@ int Table_map_log_event::do_apply_event(rpl_group_info *rgi)
table_list->table_id);
if (thd->slave_thread)
- rli->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR,
+ rli->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, rgi->gtid_info(),
ER(ER_SLAVE_FATAL_ERROR), buf);
else
/*
@@ -12407,6 +12461,8 @@ Incident_log_event::Incident_log_event(const char *buf, uint event_len,
DBUG_PRINT("info",("event_len: %u; common_header_len: %d; post_header_len: %d",
event_len, common_header_len, post_header_len));
+ m_message.str= NULL;
+ m_message.length= 0;
int incident_number= uint2korr(buf + common_header_len);
if (incident_number >= INCIDENT_COUNT ||
incident_number <= INCIDENT_NONE)
@@ -12423,7 +12479,13 @@ Incident_log_event::Incident_log_event(const char *buf, uint event_len,
uint8 len= 0; // Assignment to keep compiler happy
const char *str= NULL; // Assignment to keep compiler happy
read_str(&ptr, str_end, &str, &len);
- m_message.str= const_cast<char*>(str);
+ if (!(m_message.str= (char*) my_malloc(len+1, MYF(MY_WME))))
+ {
+ /* Mark this event invalid */
+ m_incident= INCIDENT_NONE;
+ DBUG_VOID_RETURN;
+ }
+ strmake(m_message.str, str, len);
m_message.length= len;
DBUG_PRINT("info", ("m_incident: %d", m_incident));
DBUG_VOID_RETURN;
@@ -12432,6 +12494,8 @@ Incident_log_event::Incident_log_event(const char *buf, uint event_len,
Incident_log_event::~Incident_log_event()
{
+ if (m_message.str)
+ my_free(m_message.str);
}
@@ -12527,7 +12591,14 @@ Incident_log_event::do_apply_event(rpl_group_info *rgi)
{
Relay_log_info const *rli= rgi->rli;
DBUG_ENTER("Incident_log_event::do_apply_event");
- rli->report(ERROR_LEVEL, ER_SLAVE_INCIDENT,
+
+ if (ignored_error_code(ER_SLAVE_INCIDENT))
+ {
+ DBUG_PRINT("info", ("Ignoring Incident"));
+ DBUG_RETURN(0);
+ }
+
+ rli->report(ERROR_LEVEL, ER_SLAVE_INCIDENT, NULL,
ER(ER_SLAVE_INCIDENT),
description(),
m_message.length > 0 ? m_message.str : "<none>");
diff --git a/sql/log_event.h b/sql/log_event.h
index 212215d97b6..16329ab925b 100644
--- a/sql/log_event.h
+++ b/sql/log_event.h
@@ -4690,7 +4690,16 @@ public:
{
DBUG_ENTER("Incident_log_event::Incident_log_event");
DBUG_PRINT("enter", ("m_incident: %d", m_incident));
- m_message= msg;
+ m_message.str= NULL;
+ m_message.length= 0;
+ if (!(m_message.str= (char*) my_malloc(msg.length+1, MYF(MY_WME))))
+ {
+ /* Mark this event invalid */
+ m_incident= INCIDENT_NONE;
+ DBUG_VOID_RETURN;
+ }
+ strmake(m_message.str, msg.str, msg.length);
+ m_message.length= msg.length;
set_direct_logging();
/* Replicate the incident irregardless of @@skip_replication. */
flags&= ~LOG_EVENT_SKIP_REPLICATION_F;
diff --git a/sql/log_event_old.cc b/sql/log_event_old.cc
index eaa882518f5..88617e2263f 100644
--- a/sql/log_event_old.cc
+++ b/sql/log_event_old.cc
@@ -108,7 +108,7 @@ Old_rows_log_event::do_apply_event(Old_rows_log_event *ev, rpl_group_info *rgi)
Error reporting borrowed from Query_log_event with many excessive
simplifications (we don't honour --slave-skip-errors)
*/
- rli->report(ERROR_LEVEL, actual_error,
+ rli->report(ERROR_LEVEL, actual_error, NULL,
"Error '%s' on opening tables",
(actual_error ? ev_thd->get_stmt_da()->message() :
"unexpected success or fatal error"));
@@ -133,8 +133,7 @@ Old_rows_log_event::do_apply_event(Old_rows_log_event *ev, rpl_group_info *rgi)
{
DBUG_ASSERT(ptr->m_tabledef_valid);
TABLE *conv_table;
- if (!ptr->m_tabledef.compatible_with(thd, const_cast<Relay_log_info*>(rli),
- ptr->table, &conv_table))
+ if (!ptr->m_tabledef.compatible_with(thd, rgi, ptr->table, &conv_table))
{
ev_thd->is_slave_error= 1;
rgi->slave_close_thread_tables(ev_thd);
@@ -234,7 +233,7 @@ Old_rows_log_event::do_apply_event(Old_rows_log_event *ev, rpl_group_info *rgi)
break;
default:
- rli->report(ERROR_LEVEL, ev_thd->get_stmt_da()->sql_errno(),
+ rli->report(ERROR_LEVEL, ev_thd->get_stmt_da()->sql_errno(), NULL,
"Error in %s event: row application failed. %s",
ev->get_type_str(),
ev_thd->is_error() ? ev_thd->get_stmt_da()->message() : "");
@@ -251,7 +250,7 @@ Old_rows_log_event::do_apply_event(Old_rows_log_event *ev, rpl_group_info *rgi)
if (error)
{ /* error has occured during the transaction */
- rli->report(ERROR_LEVEL, ev_thd->get_stmt_da()->sql_errno(),
+ rli->report(ERROR_LEVEL, ev_thd->get_stmt_da()->sql_errno(), NULL,
"Error in %s event: error during transaction execution "
"on table %s.%s. %s",
ev->get_type_str(), table->s->db.str,
@@ -1402,7 +1401,7 @@ int Old_rows_log_event::do_apply_event(rpl_group_info *rgi)
simplifications (we don't honour --slave-skip-errors)
*/
uint actual_error= thd->net.last_errno;
- rli->report(ERROR_LEVEL, actual_error,
+ rli->report(ERROR_LEVEL, actual_error, NULL,
"Error '%s' in %s event: when locking tables",
(actual_error ? thd->net.last_error :
"unexpected success or fatal error"),
@@ -1411,7 +1410,7 @@ int Old_rows_log_event::do_apply_event(rpl_group_info *rgi)
}
else
{
- rli->report(ERROR_LEVEL, error,
+ rli->report(ERROR_LEVEL, error, NULL,
"Error in %s event: when locking tables",
get_type_str());
}
@@ -1433,8 +1432,7 @@ int Old_rows_log_event::do_apply_event(rpl_group_info *rgi)
ptr= static_cast<RPL_TABLE_LIST*>(ptr->next_global), i++)
{
TABLE *conv_table;
- if (ptr->m_tabledef.compatible_with(thd, const_cast<Relay_log_info*>(rli),
- ptr->table, &conv_table))
+ if (ptr->m_tabledef.compatible_with(thd, rgi, ptr->table, &conv_table))
{
thd->is_slave_error= 1;
rgi->slave_close_thread_tables(thd);
@@ -1558,7 +1556,7 @@ int Old_rows_log_event::do_apply_event(rpl_group_info *rgi)
break;
default:
- rli->report(ERROR_LEVEL, thd->net.last_errno,
+ rli->report(ERROR_LEVEL, thd->net.last_errno, NULL,
"Error in %s event: row application failed. %s",
get_type_str(),
thd->net.last_error ? thd->net.last_error : "");
@@ -1596,7 +1594,7 @@ int Old_rows_log_event::do_apply_event(rpl_group_info *rgi)
if (error)
{ /* error has occured during the transaction */
- rli->report(ERROR_LEVEL, thd->net.last_errno,
+ rli->report(ERROR_LEVEL, thd->net.last_errno, NULL,
"Error in %s event: error during transaction execution "
"on table %s.%s. %s",
get_type_str(), table->s->db.str,
@@ -1679,7 +1677,7 @@ int Old_rows_log_event::do_apply_event(rpl_group_info *rgi)
*/
DBUG_ASSERT(! thd->transaction_rollback_request);
if ((error= (binlog_error ? trans_rollback_stmt(thd) : trans_commit_stmt(thd))))
- rli->report(ERROR_LEVEL, error,
+ rli->report(ERROR_LEVEL, error, NULL,
"Error in %s event: commit of row events failed, "
"table `%s`.`%s`",
get_type_str(), m_table->s->db.str,
diff --git a/sql/log_slow.h b/sql/log_slow.h
index e8faf79a047..2ae07da97c3 100644
--- a/sql/log_slow.h
+++ b/sql/log_slow.h
@@ -31,6 +31,8 @@
#define QPLAN_QC_NO 1 << 6
#define QPLAN_TMP_DISK 1 << 7
#define QPLAN_TMP_TABLE 1 << 8
+#define QPLAN_FILESORT_PRIORITY_QUEUE 1 << 9
+
/* ... */
#define QPLAN_MAX ((ulong) 1) << 31 /* reserved as placeholder */
diff --git a/sql/mf_iocache.cc b/sql/mf_iocache.cc
index d8848c1ee35..3ed9261f630 100644
--- a/sql/mf_iocache.cc
+++ b/sql/mf_iocache.cc
@@ -57,7 +57,7 @@ int _my_b_net_read(register IO_CACHE *info, uchar *Buffer,
if (!info->end_of_file)
DBUG_RETURN(1); /* because my_b_get (no _) takes 1 byte at a time */
- read_length=my_net_read(net);
+ read_length= my_net_read_packet(net, 0);
if (read_length == packet_error)
{
info->error= -1;
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index 94503d507fe..fa4f92b26dd 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -375,6 +375,7 @@ static bool binlog_format_used= false;
LEX_STRING opt_init_connect, opt_init_slave;
mysql_cond_t COND_thread_cache;
static mysql_cond_t COND_flush_thread_cache;
+mysql_cond_t COND_slave_init;
static DYNAMIC_ARRAY all_options;
/* Global variables */
@@ -521,6 +522,7 @@ ulong binlog_stmt_cache_use= 0, binlog_stmt_cache_disk_use= 0;
ulong max_connections, max_connect_errors;
ulong extra_max_connections;
ulong slave_retried_transactions;
+ulong feature_files_opened_with_delayed_keys;
ulonglong denied_connections;
my_decimal decimal_zero;
@@ -706,12 +708,12 @@ pthread_key(MEM_ROOT**,THR_MALLOC);
pthread_key(THD*, THR_THD);
mysql_mutex_t LOCK_thread_count, LOCK_thread_cache;
mysql_mutex_t
- LOCK_status, LOCK_error_log, LOCK_short_uuid_generator,
+ LOCK_status, LOCK_show_status, LOCK_error_log, LOCK_short_uuid_generator,
LOCK_delayed_insert, LOCK_delayed_status, LOCK_delayed_create,
LOCK_crypt,
LOCK_global_system_variables,
LOCK_user_conn, LOCK_slave_list, LOCK_active_mi,
- LOCK_connection_count, LOCK_error_messages;
+ LOCK_connection_count, LOCK_error_messages, LOCK_slave_init;
mysql_mutex_t LOCK_stats, LOCK_global_user_client_stats,
LOCK_global_table_stats, LOCK_global_index_stats;
@@ -863,7 +865,8 @@ PSI_mutex_key key_BINLOG_LOCK_index, key_BINLOG_LOCK_xid_list,
key_LOCK_gdl, key_LOCK_global_system_variables,
key_LOCK_manager,
key_LOCK_prepared_stmt_count,
- key_LOCK_rpl_status, key_LOCK_server_started, key_LOCK_status,
+ key_LOCK_rpl_status, key_LOCK_server_started,
+ key_LOCK_status, key_LOCK_show_status,
key_LOCK_system_variables_hash, key_LOCK_thd_data,
key_LOCK_user_conn, key_LOCK_uuid_short_generator, key_LOG_LOCK_log,
key_master_info_data_lock, key_master_info_run_lock,
@@ -885,7 +888,8 @@ PSI_mutex_key key_LOCK_stats,
key_LOCK_wakeup_ready, key_LOCK_wait_commit;
PSI_mutex_key key_LOCK_gtid_waiting;
-PSI_mutex_key key_LOCK_prepare_ordered, key_LOCK_commit_ordered;
+PSI_mutex_key key_LOCK_prepare_ordered, key_LOCK_commit_ordered,
+ key_LOCK_slave_init;
PSI_mutex_key key_TABLE_SHARE_LOCK_share;
static PSI_mutex_info all_server_mutexes[]=
@@ -922,6 +926,7 @@ static PSI_mutex_info all_server_mutexes[]=
{ &key_LOCK_rpl_status, "LOCK_rpl_status", PSI_FLAG_GLOBAL},
{ &key_LOCK_server_started, "LOCK_server_started", PSI_FLAG_GLOBAL},
{ &key_LOCK_status, "LOCK_status", PSI_FLAG_GLOBAL},
+ { &key_LOCK_show_status, "LOCK_show_status", PSI_FLAG_GLOBAL},
{ &key_LOCK_system_variables_hash, "LOCK_system_variables_hash", PSI_FLAG_GLOBAL},
{ &key_LOCK_stats, "LOCK_stats", PSI_FLAG_GLOBAL},
{ &key_LOCK_global_user_client_stats, "LOCK_global_user_client_stats", PSI_FLAG_GLOBAL},
@@ -948,6 +953,7 @@ static PSI_mutex_info all_server_mutexes[]=
{ &key_LOCK_error_messages, "LOCK_error_messages", PSI_FLAG_GLOBAL},
{ &key_LOCK_prepare_ordered, "LOCK_prepare_ordered", PSI_FLAG_GLOBAL},
{ &key_LOCK_commit_ordered, "LOCK_commit_ordered", PSI_FLAG_GLOBAL},
+ { &key_LOCK_slave_init, "LOCK_slave_init", PSI_FLAG_GLOBAL},
{ &key_LOG_INFO_lock, "LOG_INFO::lock", 0},
{ &key_LOCK_thread_count, "LOCK_thread_count", PSI_FLAG_GLOBAL},
{ &key_LOCK_thread_cache, "LOCK_thread_cache", PSI_FLAG_GLOBAL},
@@ -1002,7 +1008,7 @@ PSI_cond_key key_TC_LOG_MMAP_COND_queue_busy;
PSI_cond_key key_COND_rpl_thread_queue, key_COND_rpl_thread,
key_COND_rpl_thread_pool,
key_COND_parallel_entry, key_COND_group_commit_orderer,
- key_COND_prepare_ordered;
+ key_COND_prepare_ordered, key_COND_slave_init;
PSI_cond_key key_COND_wait_gtid, key_COND_gtid_ignore_duplicates;
static PSI_cond_info all_server_conds[]=
@@ -1051,6 +1057,7 @@ static PSI_cond_info all_server_conds[]=
{ &key_COND_parallel_entry, "COND_parallel_entry", 0},
{ &key_COND_group_commit_orderer, "COND_group_commit_orderer", 0},
{ &key_COND_prepare_ordered, "COND_prepare_ordered", 0},
+ { &key_COND_slave_init, "COND_slave_init", 0},
{ &key_COND_wait_gtid, "COND_wait_gtid", 0},
{ &key_COND_gtid_ignore_duplicates, "COND_gtid_ignore_duplicates", 0}
};
@@ -1116,65 +1123,60 @@ void net_before_header_psi(struct st_net *net, void *user_data, size_t /* unused
thd= static_cast<THD*> (user_data);
DBUG_ASSERT(thd != NULL);
- if (thd->m_server_idle)
- {
- /*
- The server is IDLE, waiting for the next command.
- Technically, it is a wait on a socket, which may take a long time,
- because the call is blocking.
- Disable the socket instrumentation, to avoid recording a SOCKET event.
- Instead, start explicitly an IDLE event.
- */
- MYSQL_SOCKET_SET_STATE(net->vio->mysql_socket, PSI_SOCKET_STATE_IDLE);
- MYSQL_START_IDLE_WAIT(thd->m_idle_psi, &thd->m_idle_state);
- }
+ /*
+ We only come where when the server is IDLE, waiting for the next command.
+ Technically, it is a wait on a socket, which may take a long time,
+ because the call is blocking.
+ Disable the socket instrumentation, to avoid recording a SOCKET event.
+ Instead, start explicitly an IDLE event.
+ */
+ MYSQL_SOCKET_SET_STATE(net->vio->mysql_socket, PSI_SOCKET_STATE_IDLE);
+ MYSQL_START_IDLE_WAIT(thd->m_idle_psi, &thd->m_idle_state);
}
-void net_after_header_psi(struct st_net *net, void *user_data, size_t /* unused: count */, my_bool rc)
+void net_after_header_psi(struct st_net *net, void *user_data,
+ size_t /* unused: count */, my_bool rc)
{
THD *thd;
thd= static_cast<THD*> (user_data);
DBUG_ASSERT(thd != NULL);
- if (thd->m_server_idle)
- {
- /*
- The server just got data for a network packet header,
- from the network layer.
- The IDLE event is now complete, since we now have a message to process.
- We need to:
- - start a new STATEMENT event
- - start a new STAGE event, within this statement,
- - start recording SOCKET WAITS events, within this stage.
- The proper order is critical to get events numbered correctly,
- and nested in the proper parent.
- */
- MYSQL_END_IDLE_WAIT(thd->m_idle_psi);
-
- if (! rc)
- {
- thd->m_statement_psi= MYSQL_START_STATEMENT(&thd->m_statement_state,
- stmt_info_new_packet.m_key,
- thd->db, thd->db_length,
- thd->charset());
+ /*
+ The server just got data for a network packet header,
+ from the network layer.
+ The IDLE event is now complete, since we now have a message to process.
+ We need to:
+ - start a new STATEMENT event
+ - start a new STAGE event, within this statement,
+ - start recording SOCKET WAITS events, within this stage.
+ The proper order is critical to get events numbered correctly,
+ and nested in the proper parent.
+ */
+ MYSQL_END_IDLE_WAIT(thd->m_idle_psi);
- THD_STAGE_INFO(thd, stage_init);
- }
+ if (! rc)
+ {
+ thd->m_statement_psi= MYSQL_START_STATEMENT(&thd->m_statement_state,
+ stmt_info_new_packet.m_key,
+ thd->db, thd->db_length,
+ thd->charset());
- /*
- TODO: consider recording a SOCKET event for the bytes just read,
- by also passing count here.
- */
- MYSQL_SOCKET_SET_STATE(net->vio->mysql_socket, PSI_SOCKET_STATE_ACTIVE);
+ THD_STAGE_INFO(thd, stage_init);
}
+
+ /*
+ TODO: consider recording a SOCKET event for the bytes just read,
+ by also passing count here.
+ */
+ MYSQL_SOCKET_SET_STATE(net->vio->mysql_socket, PSI_SOCKET_STATE_ACTIVE);
}
+
void init_net_server_extension(THD *thd)
{
/* Start with a clean state for connection events. */
thd->m_idle_psi= NULL;
thd->m_statement_psi= NULL;
- thd->m_server_idle= false;
/* Hook up the NET_SERVER callback in the net layer. */
thd->m_net_server_extension.m_user_data= thd;
thd->m_net_server_extension.m_before_header= net_before_header_psi;
@@ -2201,6 +2203,7 @@ static void clean_up_mutexes()
mysql_mutex_destroy(&LOCK_thread_count);
mysql_mutex_destroy(&LOCK_thread_cache);
mysql_mutex_destroy(&LOCK_status);
+ mysql_mutex_destroy(&LOCK_show_status);
mysql_mutex_destroy(&LOCK_delayed_insert);
mysql_mutex_destroy(&LOCK_delayed_status);
mysql_mutex_destroy(&LOCK_delayed_create);
@@ -2238,6 +2241,8 @@ static void clean_up_mutexes()
mysql_mutex_destroy(&LOCK_prepare_ordered);
mysql_cond_destroy(&COND_prepare_ordered);
mysql_mutex_destroy(&LOCK_commit_ordered);
+ mysql_mutex_destroy(&LOCK_slave_init);
+ mysql_cond_destroy(&COND_slave_init);
DBUG_VOID_RETURN;
}
@@ -2440,6 +2445,7 @@ static MYSQL_SOCKET activate_tcp_port(uint port)
int error;
int arg;
char port_buf[NI_MAXSERV];
+ const char *real_bind_addr_str;
MYSQL_SOCKET ip_sock= MYSQL_INVALID_SOCKET;
DBUG_ENTER("activate_tcp_port");
DBUG_PRINT("general",("IP Socket is %d",port));
@@ -2448,16 +2454,36 @@ static MYSQL_SOCKET activate_tcp_port(uint port)
hints.ai_flags= AI_PASSIVE;
hints.ai_socktype= SOCK_STREAM;
hints.ai_family= AF_UNSPEC;
+
+ if (my_bind_addr_str && strcmp(my_bind_addr_str, "*") == 0)
+ real_bind_addr_str= NULL; // windows doesn't seem to support * here
+ else
+ real_bind_addr_str= my_bind_addr_str;
my_snprintf(port_buf, NI_MAXSERV, "%d", port);
- error= getaddrinfo(my_bind_addr_str, port_buf, &hints, &ai);
+ error= getaddrinfo(real_bind_addr_str, port_buf, &hints, &ai);
if (error != 0)
{
DBUG_PRINT("error",("Got error: %d from getaddrinfo()", error));
- sql_perror(ER_DEFAULT(ER_IPSOCK_ERROR)); /* purecov: tested */
+
+ sql_print_error("%s: %s", ER_DEFAULT(ER_IPSOCK_ERROR), gai_strerror(error));
unireg_abort(1); /* purecov: tested */
}
+ /*
+ special case: for wildcard addresses prefer ipv6 over ipv4,
+ because we later switch off IPV6_V6ONLY, so ipv6 wildcard
+ addresses will work for ipv4 too
+ */
+ if (!real_bind_addr_str && ai->ai_family == AF_INET && ai->ai_next
+ && ai->ai_next->ai_family == AF_INET6)
+ {
+ a= ai;
+ ai= ai->ai_next;
+ a->ai_next= ai->ai_next;
+ ai->ai_next= a;
+ }
+
for (a= ai; a != NULL; a= a->ai_next)
{
ip_sock= mysql_socket_socket(key_socket_tcpip, a->ai_family,
@@ -4463,6 +4489,7 @@ static int init_thread_environment()
mysql_mutex_init(key_LOCK_thread_count, &LOCK_thread_count, MY_MUTEX_INIT_FAST);
mysql_mutex_init(key_LOCK_thread_cache, &LOCK_thread_cache, MY_MUTEX_INIT_FAST);
mysql_mutex_init(key_LOCK_status, &LOCK_status, MY_MUTEX_INIT_FAST);
+ mysql_mutex_init(key_LOCK_show_status, &LOCK_show_status, MY_MUTEX_INIT_SLOW);
mysql_mutex_init(key_LOCK_delayed_insert,
&LOCK_delayed_insert, MY_MUTEX_INIT_FAST);
mysql_mutex_init(key_LOCK_delayed_status,
@@ -4497,6 +4524,9 @@ static int init_thread_environment()
mysql_cond_init(key_COND_prepare_ordered, &COND_prepare_ordered, NULL);
mysql_mutex_init(key_LOCK_commit_ordered, &LOCK_commit_ordered,
MY_MUTEX_INIT_SLOW);
+ mysql_mutex_init(key_LOCK_slave_init, &LOCK_slave_init,
+ MY_MUTEX_INIT_SLOW);
+ mysql_cond_init(key_COND_slave_init, &COND_slave_init, NULL);
#ifdef HAVE_OPENSSL
mysql_mutex_init(key_LOCK_des_key_file,
@@ -4985,6 +5015,8 @@ a file name for --log-bin-index option", opt_binlog_index_name);
if (ha_init_errors())
DBUG_RETURN(1);
+ tc_log= 0; // ha_initialize_handlerton() needs that
+
if (plugin_init(&remaining_argc, remaining_argv,
(opt_noacl ? PLUGIN_INIT_SKIP_PLUGIN_TABLE : 0) |
(opt_abort ? PLUGIN_INIT_SKIP_INITIALIZATION : 0)))
@@ -5111,12 +5143,6 @@ a file name for --log-bin-index option", opt_binlog_index_name);
tc_log= get_tc_log_implementation();
- WSREP_DEBUG("Initial TC log open: %s",
- (tc_log == &mysql_bin_log) ? "binlog" :
- (tc_log == &tc_log_mmap) ? "mmap" :
- (tc_log == &tc_log_dummy) ? "dummy" : "unknown"
- );
-
if (tc_log->open(opt_bin_log ? opt_bin_logname : opt_tc_log_file))
{
sql_print_error("Can't init tc log");
@@ -6452,7 +6478,8 @@ void handle_connections_sockets()
(void) mysql_socket_close(new_sock);
/*
The connection was refused by TCP wrappers.
- There are no details (by client IP) available to update the host_cache.
+ There are no details (by client IP) available to update the
+ host_cache.
*/
statistic_increment(connection_errors_tcpwrap, &LOCK_status);
continue;
@@ -6961,7 +6988,7 @@ struct my_option my_long_options[]=
{"autocommit", 0, "Set default value for autocommit (0 or 1)",
&opt_autocommit, &opt_autocommit, 0,
GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, NULL},
- {"bind-address", OPT_BIND_ADDRESS, "IP address to bind to.",
+ {"bind-address", 0, "IP address to bind to.",
&my_bind_addr_str, &my_bind_addr_str, 0, GET_STR,
REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"binlog-do-db", OPT_BINLOG_DO_DB,
@@ -7457,7 +7484,6 @@ static int show_slave_running(THD *thd, SHOW_VAR *var, char *buff)
var->type= SHOW_MY_BOOL;
var->value= buff;
- mysql_mutex_unlock(&LOCK_status);
mysql_mutex_lock(&LOCK_active_mi);
if (master_info_index)
{
@@ -7469,7 +7495,6 @@ static int show_slave_running(THD *thd, SHOW_VAR *var, char *buff)
mi->rli.slave_running);
}
mysql_mutex_unlock(&LOCK_active_mi);
- mysql_mutex_lock(&LOCK_status);
if (mi)
*((my_bool *)buff)= tmp;
else
@@ -7486,7 +7511,6 @@ static int show_slave_received_heartbeats(THD *thd, SHOW_VAR *var, char *buff)
var->type= SHOW_LONGLONG;
var->value= buff;
- mysql_mutex_unlock(&LOCK_status);
mysql_mutex_lock(&LOCK_active_mi);
if (master_info_index)
{
@@ -7497,7 +7521,6 @@ static int show_slave_received_heartbeats(THD *thd, SHOW_VAR *var, char *buff)
tmp= mi->received_heartbeats;
}
mysql_mutex_unlock(&LOCK_active_mi);
- mysql_mutex_lock(&LOCK_status);
if (mi)
*((longlong *)buff)= tmp;
else
@@ -7514,7 +7537,6 @@ static int show_heartbeat_period(THD *thd, SHOW_VAR *var, char *buff)
var->type= SHOW_CHAR;
var->value= buff;
- mysql_mutex_unlock(&LOCK_status);
mysql_mutex_lock(&LOCK_active_mi);
if (master_info_index)
{
@@ -7525,7 +7547,6 @@ static int show_heartbeat_period(THD *thd, SHOW_VAR *var, char *buff)
tmp= mi->heartbeat_period;
}
mysql_mutex_unlock(&LOCK_active_mi);
- mysql_mutex_lock(&LOCK_status);
if (mi)
sprintf(buff, "%.3f", tmp);
else
@@ -8058,6 +8079,7 @@ SHOW_VAR status_vars[]= {
{"Empty_queries", (char*) offsetof(STATUS_VAR, empty_queries), SHOW_LONG_STATUS},
{"Executed_events", (char*) &executed_events, SHOW_LONG_NOFLUSH },
{"Executed_triggers", (char*) offsetof(STATUS_VAR, executed_triggers), SHOW_LONG_STATUS},
+ {"Feature_delay_key_write", (char*) &feature_files_opened_with_delayed_keys, SHOW_LONG },
{"Feature_dynamic_columns", (char*) offsetof(STATUS_VAR, feature_dynamic_columns), SHOW_LONG_STATUS},
{"Feature_fulltext", (char*) offsetof(STATUS_VAR, feature_fulltext), SHOW_LONG_STATUS},
{"Feature_gis", (char*) offsetof(STATUS_VAR, feature_gis), SHOW_LONG_STATUS},
@@ -8141,6 +8163,7 @@ SHOW_VAR status_vars[]= {
{"Slow_launch_threads", (char*) &slow_launch_threads, SHOW_LONG},
{"Slow_queries", (char*) offsetof(STATUS_VAR, long_query_count), SHOW_LONG_STATUS},
{"Sort_merge_passes", (char*) offsetof(STATUS_VAR, filesort_merge_passes_), SHOW_LONG_STATUS},
+ {"Sort_priority_queue_sorts",(char*) offsetof(STATUS_VAR, filesort_pq_sorts_), SHOW_LONG_STATUS},
{"Sort_range", (char*) offsetof(STATUS_VAR, filesort_range_count_), SHOW_LONG_STATUS},
{"Sort_rows", (char*) offsetof(STATUS_VAR, filesort_rows_), SHOW_LONG_STATUS},
{"Sort_scan", (char*) offsetof(STATUS_VAR, filesort_scan_count_), SHOW_LONG_STATUS},
@@ -8949,6 +8972,7 @@ mysql_getopt_value(const char *name, uint length,
case OPT_KEY_CACHE_DIVISION_LIMIT:
case OPT_KEY_CACHE_AGE_THRESHOLD:
case OPT_KEY_CACHE_PARTITIONS:
+ case OPT_KEY_CACHE_CHANGED_BLOCKS_HASH_SIZE:
{
KEY_CACHE *key_cache;
if (!(key_cache= get_or_create_key_cache(name, length)))
@@ -8968,6 +8992,8 @@ mysql_getopt_value(const char *name, uint length,
return &key_cache->param_age_threshold;
case OPT_KEY_CACHE_PARTITIONS:
return (uchar**) &key_cache->param_partitions;
+ case OPT_KEY_CACHE_CHANGED_BLOCKS_HASH_SIZE:
+ return (uchar**) &key_cache->changed_blocks_hash_size;
}
}
case OPT_REPLICATE_DO_DB:
diff --git a/sql/mysqld.h b/sql/mysqld.h
index d68831a0d4f..37ef449874c 100644
--- a/sql/mysqld.h
+++ b/sql/mysqld.h
@@ -254,7 +254,8 @@ extern PSI_mutex_key key_BINLOG_LOCK_index, key_BINLOG_LOCK_xid_list,
key_LOCK_gdl, key_LOCK_global_system_variables,
key_LOCK_logger, key_LOCK_manager,
key_LOCK_prepared_stmt_count,
- key_LOCK_rpl_status, key_LOCK_server_started, key_LOCK_status,
+ key_LOCK_rpl_status, key_LOCK_server_started,
+ key_LOCK_status, key_LOCK_show_status,
key_LOCK_thd_data,
key_LOCK_user_conn, key_LOG_LOCK_log,
key_master_info_data_lock, key_master_info_run_lock,
@@ -513,12 +514,13 @@ extern MYSQL_PLUGIN_IMPORT key_map key_map_full; /* Should be threaded
Server mutex locks and condition variables.
*/
extern mysql_mutex_t
- LOCK_item_func_sleep, LOCK_status,
+ LOCK_item_func_sleep, LOCK_status, LOCK_show_status,
LOCK_error_log, LOCK_delayed_insert, LOCK_short_uuid_generator,
LOCK_delayed_status, LOCK_delayed_create, LOCK_crypt, LOCK_timezone,
LOCK_slave_list, LOCK_active_mi, LOCK_manager,
LOCK_global_system_variables, LOCK_user_conn,
- LOCK_prepared_stmt_count, LOCK_error_messages, LOCK_connection_count;
+ LOCK_prepared_stmt_count, LOCK_error_messages, LOCK_connection_count,
+ LOCK_slave_init;
extern MYSQL_PLUGIN_IMPORT mysql_mutex_t LOCK_thread_count;
#ifdef HAVE_OPENSSL
extern mysql_mutex_t LOCK_des_key_file;
@@ -529,6 +531,7 @@ extern mysql_rwlock_t LOCK_grant, LOCK_sys_init_connect, LOCK_sys_init_slave;
extern mysql_rwlock_t LOCK_system_variables_hash;
extern mysql_cond_t COND_thread_count;
extern mysql_cond_t COND_manager;
+extern mysql_cond_t COND_slave_init;
extern int32 thread_running;
extern int32 thread_count;
extern my_atomic_rwlock_t thread_running_lock, thread_count_lock;
@@ -548,7 +551,6 @@ extern MYSQL_PLUGIN_IMPORT pthread_key(THD*, THR_THD);
enum options_mysqld
{
OPT_to_set_the_start_number=256,
- OPT_BIND_ADDRESS,
OPT_BINLOG_DO_DB,
OPT_BINLOG_FORMAT,
OPT_BINLOG_IGNORE_DB,
@@ -556,7 +558,6 @@ enum options_mysqld
OPT_BOOTSTRAP,
OPT_CONSOLE,
OPT_DEBUG_SYNC_TIMEOUT,
- OPT_DELAY_KEY_WRITE_ALL,
OPT_DEPRECATED_OPTION,
OPT_IGNORE_DB_DIRECTORY,
OPT_ISAM_LOG,
@@ -565,6 +566,7 @@ enum options_mysqld
OPT_KEY_CACHE_BLOCK_SIZE,
OPT_KEY_CACHE_DIVISION_LIMIT,
OPT_KEY_CACHE_PARTITIONS,
+ OPT_KEY_CACHE_CHANGED_BLOCKS_HASH_SIZE,
OPT_LOG_BASENAME,
OPT_LOG_ERROR,
OPT_LOWER_CASE_TABLE_NAMES,
@@ -572,7 +574,6 @@ enum options_mysqld
OPT_PLUGIN_LOAD,
OPT_PLUGIN_LOAD_ADD,
OPT_PFS_INSTRUMENT,
- OPT_POOL_OF_THREADS,
OPT_REPLICATE_DO_DB,
OPT_REPLICATE_DO_TABLE,
OPT_REPLICATE_IGNORE_DB,
@@ -583,10 +584,7 @@ enum options_mysqld
OPT_SAFE,
OPT_SERVER_ID,
OPT_SKIP_HOST_CACHE,
- OPT_SKIP_LOCK,
OPT_SKIP_RESOLVE,
- OPT_SKIP_STACK_TRACE,
- OPT_SKIP_SYMLINKS,
OPT_SSL_CA,
OPT_SSL_CAPATH,
OPT_SSL_CERT,
@@ -594,7 +592,6 @@ enum options_mysqld
OPT_SSL_CRL,
OPT_SSL_CRLPATH,
OPT_SSL_KEY,
- OPT_UPDATE_LOG,
OPT_WANT_CORE,
OPT_MYSQL_COMPATIBILITY,
OPT_MYSQL_TO_BE_IMPLEMENTED,
diff --git a/sql/net_serv.cc b/sql/net_serv.cc
index 546542fa207..eb34fcc2d77 100644
--- a/sql/net_serv.cc
+++ b/sql/net_serv.cc
@@ -824,7 +824,8 @@ static my_bool my_net_skip_rest(NET *net, uint32 remain, thr_alarm_t *alarmed,
*/
static ulong
-my_real_read(NET *net, size_t *complen)
+my_real_read(NET *net, size_t *complen,
+ my_bool header __attribute__((unused)))
{
uchar *pos;
size_t length;
@@ -839,14 +840,16 @@ my_real_read(NET *net, size_t *complen)
NET_HEADER_SIZE);
#ifdef MYSQL_SERVER
size_t count= remain;
- struct st_net_server *server_extension;
- server_extension= static_cast<st_net_server*> (net->extension);
- if (server_extension != NULL)
+ struct st_net_server *server_extension= 0;
+
+ if (header)
{
- void *user_data= server_extension->m_user_data;
- DBUG_ASSERT(server_extension->m_before_header != NULL);
- DBUG_ASSERT(server_extension->m_after_header != NULL);
- server_extension->m_before_header(net, user_data, count);
+ server_extension= static_cast<st_net_server*> (net->extension);
+ if (server_extension != NULL)
+ {
+ void *user_data= server_extension->m_user_data;
+ server_extension->m_before_header(net, user_data, count);
+ }
}
#endif
@@ -1042,6 +1045,16 @@ end:
}
+/* Old interface. See my_net_read_packet() for function description */
+
+#undef my_net_read
+
+ulong my_net_read(NET *net)
+{
+ return my_net_read_packet(net, 0);
+}
+
+
/**
Read a packet from the client/server and return it without the internal
package header.
@@ -1053,13 +1066,17 @@ end:
If the packet was compressed, its uncompressed and the length of the
uncompressed packet is returned.
+ read_from_server is set when the server is reading a new command
+ from the client.
+
@return
The function returns the length of the found packet or packet_error.
net->read_pos points to the read data.
*/
+
ulong
-my_net_read(NET *net)
+my_net_read_packet(NET *net, my_bool read_from_server)
{
size_t len, complen;
@@ -1069,7 +1086,7 @@ my_net_read(NET *net)
if (!net->compress)
{
#endif
- len = my_real_read(net,&complen);
+ len = my_real_read(net,&complen, read_from_server);
if (len == MAX_PACKET_LENGTH)
{
/* First packet of a multi-packet. Concatenate the packets */
@@ -1079,7 +1096,7 @@ my_net_read(NET *net)
{
net->where_b += len;
total_length += len;
- len = my_real_read(net,&complen);
+ len = my_real_read(net,&complen, 0);
} while (len == MAX_PACKET_LENGTH);
if (len != packet_error)
len+= total_length;
@@ -1171,11 +1188,13 @@ my_net_read(NET *net)
}
net->where_b=buf_length;
- if ((packet_len = my_real_read(net,&complen)) == packet_error)
+ if ((packet_len = my_real_read(net,&complen, read_from_server))
+ == packet_error)
{
MYSQL_NET_READ_DONE(1, 0);
return packet_error;
}
+ read_from_server= 0;
if (my_uncompress(net->buff + net->where_b, packet_len,
&complen))
{
diff --git a/sql/opt_range.cc b/sql/opt_range.cc
index 0dac6e56c1b..1cea800fbbc 100644
--- a/sql/opt_range.cc
+++ b/sql/opt_range.cc
@@ -1,5 +1,5 @@
-/* Copyright (c) 2000, 2013, Oracle and/or its affiliates.
- Copyright (c) 2008, 2013, Monty Program Ab.
+/* Copyright (c) 2000, 2014, Oracle and/or its affiliates.
+ Copyright (c) 2008, 2014, Monty Program Ab.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -359,31 +359,54 @@ public:
elements(1),use_count(1),left(0),right(0),
next_key_part(0), color(BLACK), type(type_arg)
{}
- inline bool is_same(SEL_ARG *arg)
+ /**
+ returns true if a range predicate is equal. Use all_same()
+ to check for equality of all the predicates on this keypart.
+ */
+ inline bool is_same(const SEL_ARG *arg) const
{
if (type != arg->type || part != arg->part)
- return 0;
+ return false;
if (type != KEY_RANGE)
- return 1;
+ return true;
return cmp_min_to_min(arg) == 0 && cmp_max_to_max(arg) == 0;
}
+ /**
+ returns true if all the predicates in the keypart tree are equal
+ */
+ bool all_same(const SEL_ARG *arg) const
+ {
+ if (type != arg->type || part != arg->part)
+ return false;
+ if (type != KEY_RANGE)
+ return true;
+ if (arg == this)
+ return true;
+ const SEL_ARG *cmp_arg= arg->first();
+ const SEL_ARG *cur_arg= first();
+ for (; cur_arg && cmp_arg && cur_arg->is_same(cmp_arg);
+ cur_arg= cur_arg->next, cmp_arg= cmp_arg->next) ;
+ if (cur_arg || cmp_arg)
+ return false;
+ return true;
+ }
inline void merge_flags(SEL_ARG *arg) { maybe_flag|=arg->maybe_flag; }
inline void maybe_smaller() { maybe_flag=1; }
/* Return true iff it's a single-point null interval */
inline bool is_null_interval() { return maybe_null && max_value[0] == 1; }
- inline int cmp_min_to_min(SEL_ARG* arg)
+ inline int cmp_min_to_min(const SEL_ARG* arg) const
{
return sel_cmp(field,min_value, arg->min_value, min_flag, arg->min_flag);
}
- inline int cmp_min_to_max(SEL_ARG* arg)
+ inline int cmp_min_to_max(const SEL_ARG* arg) const
{
return sel_cmp(field,min_value, arg->max_value, min_flag, arg->max_flag);
}
- inline int cmp_max_to_max(SEL_ARG* arg)
+ inline int cmp_max_to_max(const SEL_ARG* arg) const
{
return sel_cmp(field,max_value, arg->max_value, max_flag, arg->max_flag);
}
- inline int cmp_max_to_min(SEL_ARG* arg)
+ inline int cmp_max_to_min(const SEL_ARG* arg) const
{
return sel_cmp(field,max_value, arg->min_value, max_flag, arg->min_flag);
}
@@ -563,6 +586,7 @@ public:
void test_use_count(SEL_ARG *root);
#endif
SEL_ARG *first();
+ const SEL_ARG *first() const;
SEL_ARG *last();
void make_root();
inline bool simple_key()
@@ -652,6 +676,18 @@ public:
SEL_ARG *clone_tree(RANGE_OPT_PARAM *param);
};
+/**
+ Helper function to compare two SEL_ARG's.
+*/
+static bool all_same(const SEL_ARG *sa1, const SEL_ARG *sa2)
+{
+ if (sa1 == NULL && sa2 == NULL)
+ return true;
+ if ((sa1 != NULL && sa2 == NULL) || (sa1 == NULL && sa2 != NULL))
+ return false;
+ return sa1->all_same(sa2);
+}
+
class SEL_IMERGE;
#define CLONE_KEY1_MAYBE 1
@@ -2501,6 +2537,13 @@ SEL_ARG *SEL_ARG::clone(RANGE_OPT_PARAM *param, SEL_ARG *new_parent,
return tmp;
}
+/**
+ This gives the first SEL_ARG in the interval list, and the minimal element
+ in the red-black tree
+
+ @return
+ SEL_ARG first SEL_ARG in the interval list
+*/
SEL_ARG *SEL_ARG::first()
{
SEL_ARG *next_arg=this;
@@ -2511,6 +2554,11 @@ SEL_ARG *SEL_ARG::first()
return next_arg;
}
+const SEL_ARG *SEL_ARG::first() const
+{
+ return const_cast<SEL_ARG*>(this)->first();
+}
+
SEL_ARG *SEL_ARG::last()
{
SEL_ARG *next_arg=this;
@@ -11124,6 +11172,7 @@ QUICK_RANGE_SELECT *get_quick_select_for_ref(THD *thd, TABLE *table,
uint part;
bool create_err= FALSE;
Cost_estimate cost;
+ uint max_used_key_len;
old_root= thd->mem_root;
/* The following call may change thd->mem_root */
@@ -11150,12 +11199,13 @@ QUICK_RANGE_SELECT *get_quick_select_for_ref(THD *thd, TABLE *table,
range->min_length= range->max_length= ref->key_length;
range->min_keypart_map= range->max_keypart_map=
make_prev_keypart_map(ref->key_parts);
- range->flag= (ref->key_length == key_info->key_length ? EQ_RANGE : 0);
+ range->flag= EQ_RANGE;
if (!(quick->key_parts=key_part=(KEY_PART *)
alloc_root(&quick->alloc,sizeof(KEY_PART)*ref->key_parts)))
goto err;
-
+
+ max_used_key_len=0;
for (part=0 ; part < ref->key_parts ;part++,key_part++)
{
key_part->part=part;
@@ -11164,7 +11214,12 @@ QUICK_RANGE_SELECT *get_quick_select_for_ref(THD *thd, TABLE *table,
key_part->store_length= key_info->key_part[part].store_length;
key_part->null_bit= key_info->key_part[part].null_bit;
key_part->flag= (uint8) key_info->key_part[part].key_part_flag;
+
+ max_used_key_len +=key_info->key_part[part].store_length;
}
+
+ quick->max_used_key_length= max_used_key_len;
+
if (insert_dynamic(&quick->ranges,(uchar*)&range))
goto err;
@@ -12407,6 +12462,66 @@ void QUICK_ROR_UNION_SELECT::add_keys_and_lengths(String *key_names,
}
+void QUICK_RANGE_SELECT::add_used_key_part_to_set(MY_BITMAP *col_set)
+{
+ uint key_len;
+ KEY_PART *part= key_parts;
+ for (key_len=0; key_len < max_used_key_length;
+ key_len += (part++)->store_length)
+ {
+ bitmap_set_bit(col_set, part->field->field_index);
+ }
+}
+
+
+void QUICK_GROUP_MIN_MAX_SELECT::add_used_key_part_to_set(MY_BITMAP *col_set)
+{
+ uint key_len;
+ KEY_PART_INFO *part= index_info->key_part;
+ for (key_len=0; key_len < max_used_key_length;
+ key_len += (part++)->store_length)
+ {
+ bitmap_set_bit(col_set, part->field->field_index);
+ }
+}
+
+
+void QUICK_ROR_INTERSECT_SELECT::add_used_key_part_to_set(MY_BITMAP *col_set)
+{
+ List_iterator_fast<QUICK_SELECT_WITH_RECORD> it(quick_selects);
+ QUICK_SELECT_WITH_RECORD *quick;
+ while ((quick= it++))
+ {
+ quick->quick->add_used_key_part_to_set(col_set);
+ }
+}
+
+
+void QUICK_INDEX_SORT_SELECT::add_used_key_part_to_set(MY_BITMAP *col_set)
+{
+ QUICK_RANGE_SELECT *quick;
+ List_iterator_fast<QUICK_RANGE_SELECT> it(quick_selects);
+ while ((quick= it++))
+ {
+ quick->add_used_key_part_to_set(col_set);
+ }
+ if (pk_quick_select)
+ pk_quick_select->add_used_key_part_to_set(col_set);
+}
+
+
+void QUICK_ROR_UNION_SELECT::add_used_key_part_to_set(MY_BITMAP *col_set)
+{
+ QUICK_SELECT_I *quick;
+ List_iterator_fast<QUICK_SELECT_I> it(quick_selects);
+
+ while ((quick= it++))
+ {
+ quick->add_used_key_part_to_set(col_set);
+ }
+}
+
+
/*******************************************************************************
* Implementation of QUICK_GROUP_MIN_MAX_SELECT
*******************************************************************************/
@@ -12414,6 +12529,8 @@ void QUICK_ROR_UNION_SELECT::add_keys_and_lengths(String *key_names,
static inline uint get_field_keypart(KEY *index, Field *field);
static inline SEL_ARG * get_index_range_tree(uint index, SEL_TREE* range_tree,
PARAM *param, uint *param_idx);
+static bool get_sel_arg_for_keypart(Field *field, SEL_ARG *index_range_tree,
+ SEL_ARG **cur_range);
static bool get_constant_key_infix(KEY *index_info, SEL_ARG *index_range_tree,
KEY_PART_INFO *first_non_group_part,
KEY_PART_INFO *min_max_arg_part,
@@ -12479,6 +12596,16 @@ cost_group_min_max(TABLE* table, KEY *index_info, uint used_key_parts,
never stored after a unique key lookup in the clustered index and
furhter index_next/prev calls can not be used. So loose index scan
optimization can not be used in this case.
+ SA7. If Q has both AGG_FUNC(DISTINCT ...) and MIN/MAX() functions then this
+ access method is not used.
+ For above queries MIN/MAX() aggregation has to be done at
+ nested_loops_join (end_send_group). But with current design MIN/MAX()
+ is always set as part of loose index scan. Because of this mismatch
+ MIN() and MAX() values will be set incorrectly. For such queries to
+ work we need a new interface for loose index scan. This new interface
+ should only fetch records with min and max values and let
+ end_send_group to do aggregation. Until then do not use
+ loose_index_scan.
GA1. If Q has a GROUP BY clause, then GA is a prefix of I. That is, if
G_i = A_j => i = j.
GA2. If Q has a DISTINCT clause, then there is a permutation of SA that
@@ -12510,6 +12637,8 @@ cost_group_min_max(TABLE* table, KEY *index_info, uint used_key_parts,
above tests. By transitivity then it also follows that each WA_i
participates in the index I (if this was already tested for GA, NGA
and C).
+ WA2. If there is a predicate on C, then it must be in conjunction
+ to all predicates on all earlier keyparts in I.
C) Overall query form:
SELECT EXPR([A_1,...,A_k], [B_1,...,B_m], [MIN(C)], [MAX(C)])
@@ -12644,6 +12773,13 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time)
DBUG_RETURN(NULL);
}
}
+
+ /* Check (SA7). */
+ if (is_agg_distinct && (have_max || have_min))
+ {
+ DBUG_RETURN(NULL);
+ }
+
/* Check (SA5). */
if (join->select_distinct)
{
@@ -12933,6 +13069,25 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time)
}
}
+ /**
+ Test WA2:If there are conditions on a column C participating in
+ MIN/MAX, those conditions must be conjunctions to all earlier
+ keyparts. Otherwise, Loose Index Scan cannot be used.
+ */
+ if (tree && min_max_arg_item)
+ {
+ uint dummy;
+ SEL_ARG *index_range_tree= get_index_range_tree(cur_index, tree, param,
+ &dummy);
+ SEL_ARG *cur_range= NULL;
+ if (get_sel_arg_for_keypart(min_max_arg_part->field,
+ index_range_tree, &cur_range) ||
+ (cur_range && cur_range->type != SEL_ARG::KEY_RANGE))
+ {
+ goto next_index;
+ }
+ }
+
/* If we got to this point, cur_index_info passes the test. */
key_infix_parts= cur_key_infix_len ? (uint)
(first_non_infix_part - first_non_group_part) : 0;
@@ -13250,73 +13405,75 @@ check_group_min_max_predicates(Item *cond, Item_field *min_max_arg_item,
/*
- Get SEL_ARG tree, if any, for the keypart covering non grouping
- attribute (NGA) field 'nga_field'.
+ Get the SEL_ARG tree 'tree' for the keypart covering 'field', if
+ any. 'tree' must be a unique conjunction to ALL predicates in earlier
+ keyparts of 'keypart_tree'.
+
+ E.g., if 'keypart_tree' is for a composite index (kp1,kp2) and kp2
+ covers 'field', all these conditions satisfies the requirement:
- This function enforces the NGA3 test: If 'keypart_tree' contains a
- condition for 'nga_field', there can only be one range. In the
- opposite case, this function returns with error and 'cur_range'
- should not be used.
+ 1. "(kp1=2 OR kp1=3) AND kp2=10" => returns "kp2=10"
+ 2. "(kp1=2 AND kp2=10) OR (kp1=3 AND kp2=10)" => returns "kp2=10"
+ 3. "(kp1=2 AND (kp2=10 OR kp2=11)) OR (kp1=3 AND (kp2=10 OR kp2=11))"
+ => returns "kp2=10 OR kp2=11"
- Note that the NGA1 and NGA2 requirements, like whether or not the
- range predicate for 'nga_field' is equality, is not tested by this
- function.
+ whereas these do not
+ 1. "(kp1=2 AND kp2=10) OR kp1=3"
+ 2. "(kp1=2 AND kp2=10) OR (kp1=3 AND kp2=11)"
+ 3. "(kp1=2 AND kp2=10) OR (kp1=3 AND (kp2=10 OR kp2=11))"
- @param[in] nga_field The NGA field we want the SEL_ARG tree for
+ This function effectively tests requirement WA2. In combination with
+ a test that the returned tree has no more than one range it is also
+ a test of NGA3.
+
+ @param[in] field The field we want the SEL_ARG tree for
@param[in] keypart_tree Root node of the SEL_ARG* tree for the index
@param[out] cur_range The SEL_ARG tree, if any, for the keypart
covering field 'keypart_field'
- @retval true 'keypart_tree' contained a predicate for 'nga_field' but
- multiple ranges exists. 'cur_range' should not be used.
+ @retval true 'keypart_tree' contained a predicate for 'field' that
+ is not conjunction to all predicates on earlier keyparts
@retval false otherwise
*/
static bool
-get_sel_arg_for_keypart(Field *nga_field,
+get_sel_arg_for_keypart(Field *field,
SEL_ARG *keypart_tree,
SEL_ARG **cur_range)
{
- if(keypart_tree == NULL)
+ if (keypart_tree == NULL)
return false;
- if(keypart_tree->field->eq(nga_field))
+ if (keypart_tree->field->eq(field))
{
- /*
- Enforce NGA3: If a condition for nga_field has been found, only
- a single range is allowed.
- */
- if (keypart_tree->prev || keypart_tree->next)
- return true; // There are multiple ranges
-
*cur_range= keypart_tree;
return false;
}
- SEL_ARG *found_tree= NULL;
+ SEL_ARG *tree_first_range= NULL;
SEL_ARG *first_kp= keypart_tree->first();
- for (SEL_ARG *cur_kp= first_kp; cur_kp && !found_tree;
- cur_kp= cur_kp->next)
+ for (SEL_ARG *cur_kp= first_kp; cur_kp; cur_kp= cur_kp->next)
{
+ SEL_ARG *curr_tree= NULL;
if (cur_kp->next_key_part)
{
- if (get_sel_arg_for_keypart(nga_field,
+ if (get_sel_arg_for_keypart(field,
cur_kp->next_key_part,
- &found_tree))
+ &curr_tree))
return true;
-
}
/*
- Enforce NGA3: If a condition for nga_field has been found,only
- a single range is allowed.
- */
- if (found_tree && first_kp->next)
- return true; // There are multiple ranges
+ Check if the SEL_ARG tree for 'field' is identical for all ranges in
+ 'keypart_tree
+ */
+ if (cur_kp == first_kp)
+ tree_first_range= curr_tree;
+ else if (!all_same(tree_first_range, curr_tree))
+ return true;
}
- *cur_range= found_tree;
+ *cur_range= tree_first_range;
return false;
}
-
/*
Extract a sequence of constants from a conjunction of equality predicates.
@@ -13339,7 +13496,8 @@ get_sel_arg_for_keypart(Field *nga_field,
(const_ci = NG_i).. In addition, there can only be one range when there is
such a gap.
Thus all the NGF_i attributes must fill the 'gap' between the last group-by
- attribute and the MIN/MAX attribute in the index (if present). If these
+ attribute and the MIN/MAX attribute in the index (if present). Also ensure
+ that there is only a single range on NGF_i (NGA3). If these
conditions hold, copy each constant from its corresponding predicate into
key_infix, in the order its NG_i attribute appears in the index, and update
key_infix_len with the total length of the key parts in key_infix.
@@ -13348,7 +13506,6 @@ get_sel_arg_for_keypart(Field *nga_field,
TRUE if the index passes the test
FALSE o/w
*/
-
static bool
get_constant_key_infix(KEY *index_info, SEL_ARG *index_range_tree,
KEY_PART_INFO *first_non_group_part,
@@ -13368,32 +13525,42 @@ get_constant_key_infix(KEY *index_info, SEL_ARG *index_range_tree,
{
cur_range= NULL;
/*
- Find the range tree for the current keypart. We assume that
- index_range_tree points to the first keypart in the index.
+ Check NGA3:
+ 1. get_sel_arg_for_keypart gets the range tree for the 'field' and also
+ checks for a unique conjunction of this tree with all the predicates
+ on the earlier keyparts in the index.
+ 2. Check for multiple ranges on the found keypart tree.
+
+ We assume that index_range_tree points to the leftmost keypart in
+ the index.
*/
- if(get_sel_arg_for_keypart(cur_part->field, index_range_tree, &cur_range))
+ if (get_sel_arg_for_keypart(cur_part->field, index_range_tree,
+ &cur_range))
+ return false;
+
+ if (cur_range && cur_range->elements > 1)
return false;
if (!cur_range || cur_range->type != SEL_ARG::KEY_RANGE)
{
if (min_max_arg_part)
- return FALSE; /* The current keypart has no range predicates at all. */
+ return false; /* The current keypart has no range predicates at all. */
else
{
*first_non_infix_part= cur_part;
- return TRUE;
+ return true;
}
}
if ((cur_range->min_flag & NO_MIN_RANGE) ||
(cur_range->max_flag & NO_MAX_RANGE) ||
(cur_range->min_flag & NEAR_MIN) || (cur_range->max_flag & NEAR_MAX))
- return FALSE;
+ return false;
uint field_length= cur_part->store_length;
if (cur_range->maybe_null &&
cur_range->min_value[0] && cur_range->max_value[0])
- {
+ {
/*
cur_range specifies 'IS NULL'. In this case the argument points
to a "null value" (is_null_string) that may not always be long
@@ -13412,7 +13579,7 @@ get_constant_key_infix(KEY *index_info, SEL_ARG *index_range_tree,
*key_infix_len+= field_length;
}
else
- return FALSE;
+ return false;
}
if (!min_max_arg_part && (cur_part == last_part))
diff --git a/sql/opt_range.h b/sql/opt_range.h
index f602408ea82..a5488d6124d 100644
--- a/sql/opt_range.h
+++ b/sql/opt_range.h
@@ -389,6 +389,13 @@ public:
Returns a QUICK_SELECT with reverse order of to the index.
*/
virtual QUICK_SELECT_I *make_reverse(uint used_key_parts_arg) { return NULL; }
+
+ /*
+ Add the key columns used by the quick select into table's read set.
+
+ This is used by an optimization in filesort.
+ */
+ virtual void add_used_key_part_to_set(MY_BITMAP *col_set)=0;
};
@@ -479,6 +486,9 @@ public:
#endif
virtual void replace_handler(handler *new_file) { file= new_file; }
QUICK_SELECT_I *make_reverse(uint used_key_parts_arg);
+
+ virtual void add_used_key_part_to_set(MY_BITMAP *col_set);
+
private:
/* Default copy ctor used by QUICK_SELECT_DESC */
friend class TRP_ROR_INTERSECT;
@@ -640,6 +650,8 @@ public:
virtual int read_keys_and_merge()= 0;
/* used to get rows collected in Unique */
READ_RECORD read_record;
+
+ virtual void add_used_key_part_to_set(MY_BITMAP *col_set);
};
@@ -714,6 +726,7 @@ public:
void add_keys_and_lengths(String *key_names, String *used_lengths);
Explain_quick_select *get_explain(MEM_ROOT *alloc);
bool is_keys_used(const MY_BITMAP *fields);
+ void add_used_key_part_to_set(MY_BITMAP *col_set);
#ifndef DBUG_OFF
void dbug_dump(int indent, bool verbose);
#endif
@@ -793,6 +806,7 @@ public:
void add_keys_and_lengths(String *key_names, String *used_lengths);
Explain_quick_select *get_explain(MEM_ROOT *alloc);
bool is_keys_used(const MY_BITMAP *fields);
+ void add_used_key_part_to_set(MY_BITMAP *col_set);
#ifndef DBUG_OFF
void dbug_dump(int indent, bool verbose);
#endif
@@ -935,6 +949,7 @@ public:
bool unique_key_range() { return false; }
int get_type() { return QS_TYPE_GROUP_MIN_MAX; }
void add_keys_and_lengths(String *key_names, String *used_lengths);
+ void add_used_key_part_to_set(MY_BITMAP *col_set);
#ifndef DBUG_OFF
void dbug_dump(int indent, bool verbose);
#endif
diff --git a/sql/records.cc b/sql/records.cc
index 0c6ecca9a58..242bf8dc3b2 100644
--- a/sql/records.cc
+++ b/sql/records.cc
@@ -287,8 +287,7 @@ bool init_read_record(READ_RECORD *info,THD *thd, TABLE *table,
thd->variables.read_buff_size);
}
/* Condition pushdown to storage engine */
- if ((table->file->ha_table_flags() &
- HA_CAN_TABLE_CONDITION_PUSHDOWN) &&
+ if ((table->file->ha_table_flags() & HA_CAN_TABLE_CONDITION_PUSHDOWN) &&
select && select->cond &&
(select->cond->used_tables() & table->map) &&
!table->file->pushed_cond)
diff --git a/sql/replication.h b/sql/replication.h
index 510e56a3085..9f9cc9eadfc 100644
--- a/sql/replication.h
+++ b/sql/replication.h
@@ -16,6 +16,20 @@
#ifndef REPLICATION_H
#define REPLICATION_H
+/***************************************************************************
+ NOTE: plugin locking.
+ This API was created specifically for the semisync plugin and its locking
+ logic is also matches semisync plugin usage pattern. In particular, a plugin
+ is locked on Binlog_transmit_observer::transmit_start and is unlocked after
+ Binlog_transmit_observer::transmit_stop. All other master observable events
+ happen between these two and don't lock the plugin at all. This works well
+ for the semisync_master plugin.
+
+ Also a plugin is locked on Binlog_relay_IO_observer::thread_start
+ and unlocked after Binlog_relay_IO_observer::thread_stop. This works well for
+ the semisync_slave plugin.
+***************************************************************************/
+
#include <mysql.h>
typedef struct st_mysql MYSQL;
diff --git a/sql/rpl_gtid.cc b/sql/rpl_gtid.cc
index 105bdad6f97..c8d5e2a2db0 100644
--- a/sql/rpl_gtid.cc
+++ b/sql/rpl_gtid.cc
@@ -65,16 +65,16 @@ rpl_slave_state::update_state_hash(uint64 sub_id, rpl_gtid *gtid,
int
rpl_slave_state::record_and_update_gtid(THD *thd, rpl_group_info *rgi)
{
- uint64 sub_id;
DBUG_ENTER("rpl_slave_state::record_and_update_gtid");
/*
Update the GTID position, if we have it and did not already update
it in a GTID transaction.
*/
- if ((sub_id= rgi->gtid_sub_id))
+ if (rgi->gtid_pending)
{
- rgi->gtid_sub_id= 0;
+ uint64 sub_id= rgi->gtid_sub_id;
+ rgi->gtid_pending= false;
if (rgi->gtid_ignore_duplicate_state!=rpl_group_info::GTID_DUPLICATE_IGNORE)
{
if (record_gtid(thd, &rgi->current_gtid, sub_id, false, false))
@@ -120,7 +120,7 @@ rpl_slave_state::check_duplicate_gtid(rpl_gtid *gtid, rpl_group_info *rgi)
uint32 seq_no= gtid->seq_no;
rpl_slave_state::element *elem;
int res;
- bool did_enter_cond;
+ bool did_enter_cond= false;
PSI_stage_info old_stage;
THD *thd;
Relay_log_info *rli= rgi->rli;
@@ -138,7 +138,6 @@ rpl_slave_state::check_duplicate_gtid(rpl_gtid *gtid, rpl_group_info *rgi)
each lock release and re-take.
*/
- did_enter_cond= false;
for (;;)
{
if (elem->highest_seq_no >= seq_no)
@@ -667,7 +666,7 @@ end:
if (table_opened)
{
- if (err)
+ if (err || (err= ha_commit_trans(thd, FALSE)))
{
/*
If error, we need to put any remaining elist back into the HASH so we
@@ -681,13 +680,8 @@ end:
}
ha_rollback_trans(thd, FALSE);
- close_thread_tables(thd);
- }
- else
- {
- ha_commit_trans(thd, FALSE);
- close_thread_tables(thd);
}
+ close_thread_tables(thd);
if (in_transaction)
thd->mdl_context.release_statement_locks();
else
diff --git a/sql/rpl_handler.cc b/sql/rpl_handler.cc
index a706fcd37ee..34d3df23435 100644
--- a/sql/rpl_handler.cc
+++ b/sql/rpl_handler.cc
@@ -170,40 +170,16 @@ void delegates_destroy()
/*
This macro is used by almost all the Delegate methods to iterate
over all the observers running given callback function of the
- delegate .
-
- Add observer plugins to the thd->lex list, after each statement, all
- plugins add to thd->lex will be automatically unlocked.
+ delegate.
*/
-#define FOREACH_OBSERVER(r, f, thd, args) \
+#define FOREACH_OBSERVER(r, f, do_lock, args) \
param.server_id= thd->variables.server_id; \
- /*
- Use a struct to make sure that they are allocated adjacent, check
- delete_dynamic().
- */ \
- struct { \
- DYNAMIC_ARRAY plugins; \
- /* preallocate 8 slots */ \
- plugin_ref plugins_buffer[8]; \
- } s; \
- DYNAMIC_ARRAY *plugins= &s.plugins; \
- plugin_ref *plugins_buffer= s.plugins_buffer; \
- init_dynamic_array2(plugins, sizeof(plugin_ref), \
- plugins_buffer, 8, 8, MYF(0)); \
read_lock(); \
Observer_info_iterator iter= observer_info_iter(); \
Observer_info *info= iter++; \
for (; info; info= iter++) \
{ \
- plugin_ref plugin= \
- my_plugin_lock(0, info->plugin); \
- if (!plugin) \
- { \
- /* plugin is not intialized or deleted, this is not an error */ \
- r= 0; \
- break; \
- } \
- insert_dynamic(plugins, (uchar *)&plugin); \
+ if (do_lock) plugin_lock(thd, plugin_int_to_ref(info->plugin_int)); \
if (((Observer *)info->observer)->f \
&& ((Observer *)info->observer)->f args) \
{ \
@@ -213,17 +189,7 @@ void delegates_destroy()
break; \
} \
} \
- unlock(); \
- /*
- Unlock plugins should be done after we released the Delegate lock
- to avoid possible deadlock when this is the last user of the
- plugin, and when we unlock the plugin, it will try to
- deinitialize the plugin, which will try to lock the Delegate in
- order to remove the observers.
- */ \
- plugin_unlock_list(0, (plugin_ref*)plugins->buffer, \
- plugins->elements); \
- delete_dynamic(plugins)
+ unlock();
int Trans_delegate::after_commit(THD *thd, bool all)
@@ -240,7 +206,7 @@ int Trans_delegate::after_commit(THD *thd, bool all)
param.log_pos= log_info ? log_info->log_pos : 0;
int ret= 0;
- FOREACH_OBSERVER(ret, after_commit, thd, (&param));
+ FOREACH_OBSERVER(ret, after_commit, false, (&param));
/*
This is the end of a real transaction or autocommit statement, we
@@ -268,7 +234,7 @@ int Trans_delegate::after_rollback(THD *thd, bool all)
param.log_pos= log_info ? log_info->log_pos : 0;
int ret= 0;
- FOREACH_OBSERVER(ret, after_rollback, thd, (&param));
+ FOREACH_OBSERVER(ret, after_rollback, false, (&param));
/*
This is the end of a real transaction or autocommit statement, we
@@ -307,7 +273,7 @@ int Binlog_storage_delegate::after_flush(THD *thd,
log_info->log_pos = log_pos;
int ret= 0;
- FOREACH_OBSERVER(ret, after_flush, thd,
+ FOREACH_OBSERVER(ret, after_flush, false,
(&param, log_info->log_file, log_info->log_pos, flags));
return ret;
}
@@ -321,7 +287,7 @@ int Binlog_transmit_delegate::transmit_start(THD *thd, ushort flags,
param.flags= flags;
int ret= 0;
- FOREACH_OBSERVER(ret, transmit_start, thd, (&param, log_file, log_pos));
+ FOREACH_OBSERVER(ret, transmit_start, true, (&param, log_file, log_pos));
return ret;
}
@@ -331,7 +297,7 @@ int Binlog_transmit_delegate::transmit_stop(THD *thd, ushort flags)
param.flags= flags;
int ret= 0;
- FOREACH_OBSERVER(ret, transmit_stop, thd, (&param));
+ FOREACH_OBSERVER(ret, transmit_stop, false, (&param));
return ret;
}
@@ -356,13 +322,6 @@ int Binlog_transmit_delegate::reserve_header(THD *thd, ushort flags,
Observer_info *info= iter++;
for (; info; info= iter++)
{
- plugin_ref plugin=
- my_plugin_lock(thd, info->plugin);
- if (!plugin)
- {
- ret= 1;
- break;
- }
hlen= 0;
if (((Observer *)info->observer)->reserve_header
&& ((Observer *)info->observer)->reserve_header(&param,
@@ -371,10 +330,8 @@ int Binlog_transmit_delegate::reserve_header(THD *thd, ushort flags,
&hlen))
{
ret= 1;
- plugin_unlock(thd, plugin);
break;
}
- plugin_unlock(thd, plugin);
if (hlen == 0)
continue;
if (hlen > RESERVE_HEADER_SIZE || packet->append((char *)header, hlen))
@@ -396,7 +353,7 @@ int Binlog_transmit_delegate::before_send_event(THD *thd, ushort flags,
param.flags= flags;
int ret= 0;
- FOREACH_OBSERVER(ret, before_send_event, thd,
+ FOREACH_OBSERVER(ret, before_send_event, false,
(&param, (uchar *)packet->c_ptr(),
packet->length(),
log_file+dirname_length(log_file), log_pos));
@@ -410,7 +367,7 @@ int Binlog_transmit_delegate::after_send_event(THD *thd, ushort flags,
param.flags= flags;
int ret= 0;
- FOREACH_OBSERVER(ret, after_send_event, thd,
+ FOREACH_OBSERVER(ret, after_send_event, false,
(&param, packet->c_ptr(), packet->length()));
return ret;
}
@@ -422,7 +379,7 @@ int Binlog_transmit_delegate::after_reset_master(THD *thd, ushort flags)
param.flags= flags;
int ret= 0;
- FOREACH_OBSERVER(ret, after_reset_master, thd, (&param));
+ FOREACH_OBSERVER(ret, after_reset_master, false, (&param));
return ret;
}
@@ -443,7 +400,7 @@ int Binlog_relay_IO_delegate::thread_start(THD *thd, Master_info *mi)
init_param(&param, mi);
int ret= 0;
- FOREACH_OBSERVER(ret, thread_start, thd, (&param));
+ FOREACH_OBSERVER(ret, thread_start, true, (&param));
return ret;
}
@@ -455,7 +412,7 @@ int Binlog_relay_IO_delegate::thread_stop(THD *thd, Master_info *mi)
init_param(&param, mi);
int ret= 0;
- FOREACH_OBSERVER(ret, thread_stop, thd, (&param));
+ FOREACH_OBSERVER(ret, thread_stop, false, (&param));
return ret;
}
@@ -467,7 +424,7 @@ int Binlog_relay_IO_delegate::before_request_transmit(THD *thd,
init_param(&param, mi);
int ret= 0;
- FOREACH_OBSERVER(ret, before_request_transmit, thd, (&param, (uint32)flags));
+ FOREACH_OBSERVER(ret, before_request_transmit, false, (&param, (uint32)flags));
return ret;
}
@@ -480,7 +437,7 @@ int Binlog_relay_IO_delegate::after_read_event(THD *thd, Master_info *mi,
init_param(&param, mi);
int ret= 0;
- FOREACH_OBSERVER(ret, after_read_event, thd,
+ FOREACH_OBSERVER(ret, after_read_event, false,
(&param, packet, len, event_buf, event_len));
return ret;
}
@@ -498,7 +455,7 @@ int Binlog_relay_IO_delegate::after_queue_event(THD *thd, Master_info *mi,
flags |= BINLOG_STORAGE_IS_SYNCED;
int ret= 0;
- FOREACH_OBSERVER(ret, after_queue_event, thd,
+ FOREACH_OBSERVER(ret, after_queue_event, false,
(&param, event_buf, event_len, flags));
return ret;
}
@@ -510,7 +467,7 @@ int Binlog_relay_IO_delegate::after_reset_slave(THD *thd, Master_info *mi)
init_param(&param, mi);
int ret= 0;
- FOREACH_OBSERVER(ret, after_reset_slave, thd, (&param));
+ FOREACH_OBSERVER(ret, after_reset_slave, false, (&param));
return ret;
}
#endif /* HAVE_REPLICATION */
diff --git a/sql/rpl_handler.h b/sql/rpl_handler.h
index e028fb49808..e262ebdbd6b 100644
--- a/sql/rpl_handler.h
+++ b/sql/rpl_handler.h
@@ -26,13 +26,10 @@ class Observer_info {
public:
void *observer;
st_plugin_int *plugin_int;
- plugin_ref plugin;
Observer_info(void *ob, st_plugin_int *p)
:observer(ob), plugin_int(p)
- {
- plugin= plugin_int_to_ref(plugin_int);
- }
+ { }
};
class Delegate {
diff --git a/sql/rpl_mi.cc b/sql/rpl_mi.cc
index 977dec96982..055dd09ac5c 100644
--- a/sql/rpl_mi.cc
+++ b/sql/rpl_mi.cc
@@ -38,6 +38,7 @@ Master_info::Master_info(LEX_STRING *connection_name_arg,
connect_retry(DEFAULT_CONNECT_RETRY), inited(0), abort_slave(0),
slave_running(0), slave_run_id(0), sync_counter(0),
heartbeat_period(0), received_heartbeats(0), master_id(0),
+ prev_master_id(0),
using_gtid(USE_GTID_NO), events_queued_since_last_gtid(0),
gtid_reconnect_event_skip_count(0), gtid_event_seen(false)
{
@@ -890,6 +891,9 @@ bool Master_info_index::init_all_master_info()
File index_file_nr;
DBUG_ENTER("init_all_master_info");
+ mysql_mutex_assert_owner(&LOCK_active_mi);
+ DBUG_ASSERT(master_info_index);
+
if ((index_file_nr= my_open(index_file_name,
O_RDWR | O_CREAT | O_BINARY ,
MYF(MY_WME | ME_NOREFRESH))) < 0 ||
@@ -1089,6 +1093,10 @@ Master_info_index::get_master_info(const LEX_STRING *connection_name,
("connection_name: '%.*s'", (int) connection_name->length,
connection_name->str));
+ mysql_mutex_assert_owner(&LOCK_active_mi);
+ if (!this) // master_info_index is set to NULL on server shutdown
+ return NULL;
+
/* Make name lower case for comparison */
res= strmake(buff, connection_name->str, connection_name->length);
my_casedn_str(system_charset_info, buff);
@@ -1116,6 +1124,9 @@ bool Master_info_index::check_duplicate_master_info(LEX_STRING *name_arg,
Master_info *mi;
DBUG_ENTER("check_duplicate_master_info");
+ mysql_mutex_assert_owner(&LOCK_active_mi);
+ DBUG_ASSERT(master_info_index);
+
/* Get full host and port name */
if ((mi= master_info_index->get_master_info(name_arg,
Sql_condition::WARN_LEVEL_NOTE)))
@@ -1238,6 +1249,8 @@ bool Master_info_index::give_error_if_slave_running()
{
DBUG_ENTER("warn_if_slave_running");
mysql_mutex_assert_owner(&LOCK_active_mi);
+ if (!this) // master_info_index is set to NULL on server shutdown
+ return TRUE;
for (uint i= 0; i< master_info_hash.records; ++i)
{
diff --git a/sql/rpl_mi.h b/sql/rpl_mi.h
index f20c2e21a5f..7dbe6d9b534 100644
--- a/sql/rpl_mi.h
+++ b/sql/rpl_mi.h
@@ -136,6 +136,12 @@ class Master_info : public Slave_reporting_capability
DYNAMIC_ARRAY ignore_server_ids;
ulong master_id;
/*
+ At reconnect and until the first rotate event is seen, prev_master_id is
+ the value of master_id during the previous connection, used to detect
+ silent change of master server during reconnects.
+ */
+ ulong prev_master_id;
+ /*
Which kind of GTID position (if any) is used when connecting to master.
Note that you can not change the numeric values of these, they are used
diff --git a/sql/rpl_parallel.cc b/sql/rpl_parallel.cc
index 90ee2360eb7..9b91206ca75 100644
--- a/sql/rpl_parallel.cc
+++ b/sql/rpl_parallel.cc
@@ -4,18 +4,8 @@
#include "rpl_mi.h"
#include "debug_sync.h"
-
/*
Code for optional parallel execution of replicated events on the slave.
-
- ToDo list:
-
- - Retry of failed transactions is not yet implemented for the parallel case.
-
- - All the waits (eg. in struct wait_for_commit and in
- rpl_parallel_thread_pool::get_thread()) need to be killable. And on kill,
- everything needs to be correctly rolled back and stopped in all threads,
- to ensure a consistent slave replication state.
*/
struct rpl_parallel_thread_pool global_rpl_thread_pool;
@@ -31,20 +21,22 @@ rpt_handle_event(rpl_parallel_thread::queued_event *qev,
rpl_group_info *rgi= qev->rgi;
Relay_log_info *rli= rgi->rli;
THD *thd= rgi->thd;
+ Log_event *ev;
+
+ DBUG_ASSERT(qev->typ == rpl_parallel_thread::queued_event::QUEUED_EVENT);
+ ev= qev->ev;
- thd->rgi_slave= rgi;
thd->system_thread_info.rpl_sql_info->rpl_filter = rli->mi->rpl_filter;
+ ev->thd= thd;
- /* ToDo: Access to thd, and what about rli, split out a parallel part? */
- mysql_mutex_lock(&rli->data_lock);
- qev->ev->thd= thd;
strcpy(rgi->event_relay_log_name_buf, qev->event_relay_log_name);
rgi->event_relay_log_name= rgi->event_relay_log_name_buf;
rgi->event_relay_log_pos= qev->event_relay_log_pos;
rgi->future_event_relay_log_pos= qev->future_event_relay_log_pos;
strcpy(rgi->future_event_master_log_name, qev->future_event_master_log_name);
- err= apply_event_and_update_pos(qev->ev, thd, rgi, rpt);
- thd->rgi_slave= NULL;
+ mysql_mutex_lock(&rli->data_lock);
+ /* Mutex will be released in apply_event_and_update_pos(). */
+ err= apply_event_and_update_pos(ev, thd, rgi, rpt);
thread_safe_increment64(&rli->executed_entries,
&slave_executed_entries_lock);
@@ -58,6 +50,8 @@ handle_queued_pos_update(THD *thd, rpl_parallel_thread::queued_event *qev)
{
int cmp;
Relay_log_info *rli;
+ rpl_parallel_entry *e;
+
/*
Events that are not part of an event group, such as Format Description,
Stop, GTID List and such, are executed directly in the driver SQL thread,
@@ -68,6 +62,13 @@ handle_queued_pos_update(THD *thd, rpl_parallel_thread::queued_event *qev)
if ((thd->variables.option_bits & OPTION_BEGIN) &&
opt_using_transactions)
return;
+
+ /* Do not update position if an earlier event group caused an error abort. */
+ DBUG_ASSERT(qev->typ == rpl_parallel_thread::queued_event::QUEUED_POS_UPDATE);
+ e= qev->entry_for_queued;
+ if (e->stop_on_error_sub_id < (uint64)ULONGLONG_MAX || e->force_abort)
+ return;
+
rli= qev->rgi->rli;
mysql_mutex_lock(&rli->data_lock);
cmp= strcmp(rli->group_relay_log_name, qev->event_relay_log_name);
@@ -165,6 +166,7 @@ finish_event_group(THD *thd, uint64 sub_id, rpl_parallel_entry *entry,
mysql_mutex_unlock(&entry->LOCK_parallel_entry);
thd->clear_error();
+ thd->reset_killed();
thd->get_stmt_da()->reset_diagnostics_area();
wfc->wakeup_subsequent_commits(rgi->worker_error);
}
@@ -197,6 +199,290 @@ unlock_or_exit_cond(THD *thd, mysql_mutex_t *lock, bool *did_enter_cond,
}
+static void
+register_wait_for_prior_event_group_commit(rpl_group_info *rgi,
+ rpl_parallel_entry *entry)
+{
+ mysql_mutex_assert_owner(&entry->LOCK_parallel_entry);
+ if (rgi->wait_commit_sub_id > entry->last_committed_sub_id)
+ {
+ /*
+ Register that the commit of this event group must wait for the
+ commit of the previous event group to complete before it may
+ complete itself, so that we preserve commit order.
+ */
+ wait_for_commit *waitee=
+ &rgi->wait_commit_group_info->commit_orderer;
+ rgi->commit_orderer.register_wait_for_prior_commit(waitee);
+ }
+}
+
+
+#ifndef DBUG_OFF
+static int
+dbug_simulate_tmp_error(rpl_group_info *rgi, THD *thd)
+{
+ if (rgi->current_gtid.domain_id == 0 && rgi->current_gtid.seq_no == 100 &&
+ rgi->retry_event_count == 4)
+ {
+ thd->clear_error();
+ thd->get_stmt_da()->reset_diagnostics_area();
+ my_error(ER_LOCK_DEADLOCK, MYF(0));
+ return 1;
+ }
+ return 0;
+}
+#endif
+
+
+/*
+ If we detect a deadlock due to eg. storage engine locks that conflict with
+ the fixed commit order, then the later transaction will be killed
+ asynchroneously to allow the former to complete its commit.
+
+ In this case, we convert the 'killed' error into a deadlock error, and retry
+ the later transaction. */
+static void
+convert_kill_to_deadlock_error(rpl_group_info *rgi)
+{
+ THD *thd= rgi->thd;
+ int err_code;
+
+ if (!thd->get_stmt_da()->is_error())
+ return;
+ err_code= thd->get_stmt_da()->sql_errno();
+ if ((err_code == ER_QUERY_INTERRUPTED || err_code == ER_CONNECTION_KILLED) &&
+ rgi->killed_for_retry)
+ {
+ thd->clear_error();
+ my_error(ER_LOCK_DEADLOCK, MYF(0));
+ rgi->killed_for_retry= false;
+ thd->reset_killed();
+ }
+}
+
+
+static bool
+is_group_ending(Log_event *ev, Log_event_type event_type)
+{
+ return event_type == XID_EVENT ||
+ (event_type == QUERY_EVENT &&
+ (((Query_log_event *)ev)->is_commit() ||
+ ((Query_log_event *)ev)->is_rollback()));
+}
+
+
+static int
+retry_event_group(rpl_group_info *rgi, rpl_parallel_thread *rpt,
+ rpl_parallel_thread::queued_event *orig_qev)
+{
+ IO_CACHE rlog;
+ LOG_INFO linfo;
+ File fd= (File)-1;
+ const char *errmsg= NULL;
+ inuse_relaylog *ir= rgi->relay_log;
+ uint64 event_count;
+ uint64 events_to_execute= rgi->retry_event_count;
+ Relay_log_info *rli= rgi->rli;
+ int err;
+ ulonglong cur_offset, old_offset;
+ char log_name[FN_REFLEN];
+ THD *thd= rgi->thd;
+ rpl_parallel_entry *entry= rgi->parallel_entry;
+ ulong retries= 0;
+
+do_retry:
+ event_count= 0;
+ err= 0;
+
+ /*
+ If we already started committing before getting the deadlock (or other
+ error) that caused us to need to retry, we have already signalled
+ subsequent transactions that we have started committing. This is
+ potentially a problem, as now we will rollback, and if subsequent
+ transactions would start to execute now, they could see an unexpected
+ state of the database and get eg. key not found or duplicate key error.
+
+ However, to get a deadlock in the first place, there must have been
+ another earlier transaction that is waiting for us. Thus that other
+ transaction has _not_ yet started to commit, and any subsequent
+ transactions will still be waiting at this point.
+
+ So here, we decrement back the count of transactions that started
+ committing (if we already incremented it), undoing the effect of an
+ earlier mark_start_commit(). Then later, when the retry succeeds and we
+ commit again, we can do a new mark_start_commit() and eventually wake up
+ subsequent transactions at the proper time.
+
+ We need to do the unmark before the rollback, to be sure that the
+ transaction we deadlocked with will not signal that it started to commit
+ until after the unmark.
+ */
+ rgi->unmark_start_commit();
+
+ /*
+ We might get the deadlock error that causes the retry during commit, while
+ sitting in wait_for_prior_commit(). If this happens, we will have a
+ pending error in the wait_for_commit object. So clear this by
+ unregistering (and later re-registering) the wait.
+ */
+ if(thd->wait_for_commit_ptr)
+ thd->wait_for_commit_ptr->unregister_wait_for_prior_commit();
+ rgi->cleanup_context(thd, 1);
+
+ /*
+ If we retry due to a deadlock kill that occured during the commit step, we
+ might have already updated (but not committed) an update of table
+ mysql.gtid_slave_pos, and cleared the gtid_pending flag. Now we have
+ rolled back any such update, so we must set the gtid_pending flag back to
+ true so that we will do a new update when/if we succeed with the retry.
+ */
+ rgi->gtid_pending= true;
+
+ mysql_mutex_lock(&rli->data_lock);
+ ++rli->retried_trans;
+ statistic_increment(slave_retried_transactions, LOCK_status);
+ mysql_mutex_unlock(&rli->data_lock);
+
+ mysql_mutex_lock(&entry->LOCK_parallel_entry);
+ register_wait_for_prior_event_group_commit(rgi, entry);
+ mysql_mutex_unlock(&entry->LOCK_parallel_entry);
+
+ strmake_buf(log_name, ir->name);
+ if ((fd= open_binlog(&rlog, log_name, &errmsg)) <0)
+ {
+ err= 1;
+ goto err;
+ }
+ cur_offset= rgi->retry_start_offset;
+ my_b_seek(&rlog, cur_offset);
+
+ do
+ {
+ Log_event_type event_type;
+ Log_event *ev;
+ rpl_parallel_thread::queued_event *qev;
+
+ /* The loop is here so we can try again the next relay log file on EOF. */
+ for (;;)
+ {
+ old_offset= cur_offset;
+ ev= Log_event::read_log_event(&rlog, 0,
+ rli->relay_log.description_event_for_exec /* ToDo: this needs fixing */,
+ opt_slave_sql_verify_checksum);
+ cur_offset= my_b_tell(&rlog);
+
+ if (ev)
+ break;
+ if (rlog.error < 0)
+ {
+ errmsg= "slave SQL thread aborted because of I/O error";
+ err= 1;
+ goto err;
+ }
+ if (rlog.error > 0)
+ {
+ sql_print_error("Slave SQL thread: I/O error reading "
+ "event(errno: %d cur_log->error: %d)",
+ my_errno, rlog.error);
+ errmsg= "Aborting slave SQL thread because of partial event read";
+ err= 1;
+ goto err;
+ }
+ /* EOF. Move to the next relay log. */
+ end_io_cache(&rlog);
+ mysql_file_close(fd, MYF(MY_WME));
+ fd= (File)-1;
+
+ /* Find the next relay log file. */
+ if((err= rli->relay_log.find_log_pos(&linfo, log_name, 1)) ||
+ (err= rli->relay_log.find_next_log(&linfo, 1)))
+ {
+ char buff[22];
+ sql_print_error("next log error: %d offset: %s log: %s",
+ err,
+ llstr(linfo.index_file_offset, buff),
+ log_name);
+ goto err;
+ }
+ strmake_buf(log_name ,linfo.log_file_name);
+
+ if ((fd= open_binlog(&rlog, log_name, &errmsg)) <0)
+ {
+ err= 1;
+ goto err;
+ }
+ /* Loop to try again on the new log file. */
+ }
+
+ event_type= ev->get_type_code();
+ if (!Log_event::is_group_event(event_type))
+ {
+ delete ev;
+ continue;
+ }
+ ev->thd= thd;
+
+ mysql_mutex_lock(&rpt->LOCK_rpl_thread);
+ qev= rpt->retry_get_qev(ev, orig_qev, log_name, cur_offset,
+ cur_offset - old_offset);
+ mysql_mutex_unlock(&rpt->LOCK_rpl_thread);
+ if (!qev)
+ {
+ delete ev;
+ my_error(ER_OUT_OF_RESOURCES, MYF(0));
+ err= 1;
+ goto err;
+ }
+ if (is_group_ending(ev, event_type))
+ rgi->mark_start_commit();
+
+ err= rpt_handle_event(qev, rpt);
+ ++event_count;
+ mysql_mutex_lock(&rpt->LOCK_rpl_thread);
+ rpt->free_qev(qev);
+ mysql_mutex_unlock(&rpt->LOCK_rpl_thread);
+
+ delete_or_keep_event_post_apply(rgi, event_type, ev);
+ DBUG_EXECUTE_IF("rpl_parallel_simulate_double_temp_err_gtid_0_x_100",
+ if (retries == 0) err= dbug_simulate_tmp_error(rgi, thd););
+ DBUG_EXECUTE_IF("rpl_parallel_simulate_infinite_temp_err_gtid_0_x_100",
+ err= dbug_simulate_tmp_error(rgi, thd););
+ if (err)
+ {
+ convert_kill_to_deadlock_error(rgi);
+ if (has_temporary_error(thd))
+ {
+ ++retries;
+ if (retries < slave_trans_retries)
+ {
+ end_io_cache(&rlog);
+ mysql_file_close(fd, MYF(MY_WME));
+ fd= (File)-1;
+ goto do_retry;
+ }
+ sql_print_error("Slave worker thread retried transaction %lu time(s) "
+ "in vain, giving up. Consider raising the value of "
+ "the slave_transaction_retries variable.",
+ slave_trans_retries);
+ }
+ goto err;
+ }
+ } while (event_count < events_to_execute);
+
+err:
+
+ if (fd >= 0)
+ {
+ end_io_cache(&rlog);
+ mysql_file_close(fd, MYF(MY_WME));
+ }
+ if (errmsg)
+ sql_print_error("Error reading relay log event: %s", errmsg);
+ return err;
+}
+
+
pthread_handler_t
handle_rpl_parallel_thread(void *arg)
{
@@ -215,6 +501,8 @@ handle_rpl_parallel_thread(void *arg)
rpl_sql_thread_info sql_info(NULL);
size_t total_event_size;
int err;
+ inuse_relaylog *last_ir;
+ uint64 accumulated_ir_count;
struct rpl_parallel_thread *rpt= (struct rpl_parallel_thread *)arg;
@@ -244,39 +532,6 @@ handle_rpl_parallel_thread(void *arg)
thd->set_time();
thd->variables.lock_wait_timeout= LONG_TIMEOUT;
thd->system_thread_info.rpl_sql_info= &sql_info;
- /*
- For now, we need to run the replication parallel worker threads in
- READ COMMITTED. This is needed because gap locks are not symmetric.
- For example, a gap lock from a DELETE blocks an insert intention lock,
- but not vice versa. So an INSERT followed by DELETE can group commit
- on the master, but if we are unlucky with thread scheduling we can
- then deadlock on the slave because the INSERT ends up waiting for a
- gap lock from the DELETE (and the DELETE in turn waits for the INSERT
- in wait_for_prior_commit()). See also MDEV-5914.
-
- It should be mostly safe to run in READ COMMITTED in the slave anyway.
- The commit order is already fixed from on the master, so we do not
- risk logging into the binlog in an incorrect order between worker
- threads (one that would cause different results if executed on a
- lower-level slave that uses this slave as a master). The only
- potential problem is with transactions run in a different master
- connection (using multi-source replication), or run directly on the
- slave by an application; when using READ COMMITTED we are not
- guaranteed serialisability of binlogged statements.
-
- In practice, this is unlikely to be an issue. In GTID mode, such
- parallel transactions from multi-source or application must in any
- case use a different replication domain, in which case binlog order
- by definition must be independent between the different domain. Even
- in non-GTID mode, normally one will assume that the external
- transactions are not conflicting with those applied by the slave, so
- that isolation level should make no difference. It would be rather
- strange if the result of applying query events from one master would
- depend on the timing and nature of other queries executed from
- different multi-source connections or done directly on the slave by
- an application. Still, something to be aware of.
- */
- thd->variables.tx_isolation= ISO_READ_COMMITTED;
mysql_mutex_lock(&rpt->LOCK_rpl_thread);
rpt->thd= thd;
@@ -323,7 +578,7 @@ handle_rpl_parallel_thread(void *arg)
bool end_of_group, group_ending;
total_event_size+= events->event_size;
- if (!events->ev)
+ if (events->typ == rpl_parallel_thread::queued_event::QUEUED_POS_UPDATE)
{
handle_queued_pos_update(thd, events);
events->next= qevs_to_free;
@@ -331,8 +586,33 @@ handle_rpl_parallel_thread(void *arg)
events= next;
continue;
}
+ else if (events->typ ==
+ rpl_parallel_thread::queued_event::QUEUED_MASTER_RESTART)
+ {
+ if (in_event_group)
+ {
+ /*
+ Master restarted (crashed) in the middle of an event group.
+ So we need to roll back and discard that event group.
+ */
+ group_rgi->cleanup_context(thd, 1);
+ in_event_group= false;
+ finish_event_group(thd, group_rgi->gtid_sub_id,
+ events->entry_for_queued, group_rgi);
+
+ group_rgi->next= rgis_to_free;
+ rgis_to_free= group_rgi;
+ thd->rgi_slave= group_rgi= NULL;
+ }
+
+ events->next= qevs_to_free;
+ qevs_to_free= events;
+ events= next;
+ continue;
+ }
+ DBUG_ASSERT(events->typ==rpl_parallel_thread::queued_event::QUEUED_EVENT);
- group_rgi= rgi;
+ thd->rgi_slave= group_rgi= rgi;
gco= rgi->gco;
/* Handle a new event group, which will be initiated by a GTID event. */
if ((event_type= events->ev->get_type_code()) == GTID_EVENT)
@@ -341,7 +621,6 @@ handle_rpl_parallel_thread(void *arg)
PSI_stage_info old_stage;
uint64 wait_count;
- thd->tx_isolation= (enum_tx_isolation)thd->variables.tx_isolation;
in_event_group= true;
/*
If the standalone flag is set, then this event group consists of a
@@ -352,9 +631,7 @@ handle_rpl_parallel_thread(void *arg)
(0 != (static_cast<Gtid_log_event *>(events->ev)->flags2 &
Gtid_log_event::FL_STANDALONE));
- /* Save this, as it gets cleared when the event group commits. */
event_gtid_sub_id= rgi->gtid_sub_id;
-
rgi->thd= thd;
/*
@@ -388,7 +665,7 @@ handle_rpl_parallel_thread(void *arg)
{
DEBUG_SYNC(thd, "rpl_parallel_start_waiting_for_prior_killed");
thd->send_kill_message();
- slave_output_error_info(rgi->rli, thd);
+ slave_output_error_info(rgi, thd);
signal_error_to_sql_driver_thread(thd, rgi, 1);
/*
Even though we were killed, we need to continue waiting for the
@@ -430,17 +707,9 @@ handle_rpl_parallel_thread(void *arg)
if (unlikely(entry->stop_on_error_sub_id <= rgi->wait_commit_sub_id))
skip_event_group= true;
- else if (rgi->wait_commit_sub_id > entry->last_committed_sub_id)
- {
- /*
- Register that the commit of this event group must wait for the
- commit of the previous event group to complete before it may
- complete itself, so that we preserve commit order.
- */
- wait_for_commit *waitee=
- &rgi->wait_commit_group_info->commit_orderer;
- rgi->commit_orderer.register_wait_for_prior_commit(waitee);
- }
+ else
+ register_wait_for_prior_event_group_commit(rgi, entry);
+
unlock_or_exit_cond(thd, &entry->LOCK_parallel_entry,
&did_enter_cond, &old_stage);
@@ -467,7 +736,7 @@ handle_rpl_parallel_thread(void *arg)
if (res < 0)
{
/* Error. */
- slave_output_error_info(rgi->rli, thd);
+ slave_output_error_info(rgi, thd);
signal_error_to_sql_driver_thread(thd, rgi, 1);
}
else if (!res)
@@ -482,11 +751,8 @@ handle_rpl_parallel_thread(void *arg)
}
}
- group_ending= event_type == XID_EVENT ||
- (event_type == QUERY_EVENT &&
- (((Query_log_event *)events->ev)->is_commit() ||
- ((Query_log_event *)events->ev)->is_rollback()));
- if (group_ending)
+ group_ending= is_group_ending(events->ev, event_type);
+ if (group_ending && likely(!rgi->worker_error))
{
DEBUG_SYNC(thd, "rpl_parallel_before_mark_start_commit");
rgi->mark_start_commit();
@@ -498,24 +764,42 @@ handle_rpl_parallel_thread(void *arg)
processing between the event groups as a simple way to ensure that
everything is stopped and cleaned up correctly.
*/
- if (!rgi->worker_error && !skip_event_group)
+ if (likely(!rgi->worker_error) && !skip_event_group)
+ {
+ ++rgi->retry_event_count;
err= rpt_handle_event(events, rpt);
+ delete_or_keep_event_post_apply(rgi, event_type, events->ev);
+ DBUG_EXECUTE_IF("rpl_parallel_simulate_temp_err_gtid_0_x_100",
+ err= dbug_simulate_tmp_error(rgi, thd););
+ if (err)
+ {
+ convert_kill_to_deadlock_error(rgi);
+ if (has_temporary_error(thd) && slave_trans_retries > 0)
+ err= retry_event_group(rgi, rpt, events);
+ }
+ }
else
+ {
+ delete events->ev;
err= thd->wait_for_prior_commit();
+ }
end_of_group=
in_event_group &&
((group_standalone && !Log_event::is_part_of_group(event_type)) ||
group_ending);
- delete_or_keep_event_post_apply(rgi, event_type, events->ev);
events->next= qevs_to_free;
qevs_to_free= events;
- if (unlikely(err) && !rgi->worker_error)
+ if (unlikely(err))
{
- slave_output_error_info(rgi->rli, thd);
- signal_error_to_sql_driver_thread(thd, rgi, err);
+ if (!rgi->worker_error)
+ {
+ slave_output_error_info(rgi, thd);
+ signal_error_to_sql_driver_thread(thd, rgi, err);
+ }
+ thd->reset_killed();
}
if (end_of_group)
{
@@ -523,7 +807,7 @@ handle_rpl_parallel_thread(void *arg)
finish_event_group(thd, event_gtid_sub_id, entry, rgi);
rgi->next= rgis_to_free;
rgis_to_free= rgi;
- group_rgi= rgi= NULL;
+ thd->rgi_slave= group_rgi= rgi= NULL;
skip_event_group= false;
DEBUG_SYNC(thd, "rpl_parallel_end_of_group");
}
@@ -548,12 +832,34 @@ handle_rpl_parallel_thread(void *arg)
rpt->free_rgi(rgis_to_free);
rgis_to_free= next;
}
+ last_ir= NULL;
+ accumulated_ir_count= 0;
while (qevs_to_free)
{
rpl_parallel_thread::queued_event *next= qevs_to_free->next;
+ inuse_relaylog *ir= qevs_to_free->ir;
+ /* Batch up refcount update to reduce use of synchronised operations. */
+ if (last_ir != ir)
+ {
+ if (last_ir)
+ {
+ my_atomic_rwlock_wrlock(&last_ir->inuse_relaylog_atomic_lock);
+ my_atomic_add64(&last_ir->dequeued_count, accumulated_ir_count);
+ my_atomic_rwlock_wrunlock(&last_ir->inuse_relaylog_atomic_lock);
+ accumulated_ir_count= 0;
+ }
+ last_ir= ir;
+ }
+ ++accumulated_ir_count;
rpt->free_qev(qevs_to_free);
qevs_to_free= next;
}
+ if (last_ir)
+ {
+ my_atomic_rwlock_wrlock(&last_ir->inuse_relaylog_atomic_lock);
+ my_atomic_add64(&last_ir->dequeued_count, accumulated_ir_count);
+ my_atomic_rwlock_wrunlock(&last_ir->inuse_relaylog_atomic_lock);
+ }
if ((events= rpt->event_queue) != NULL)
{
@@ -584,7 +890,7 @@ handle_rpl_parallel_thread(void *arg)
in_event_group= false;
mysql_mutex_lock(&rpt->LOCK_rpl_thread);
rpt->free_rgi(group_rgi);
- group_rgi= NULL;
+ thd->rgi_slave= group_rgi= NULL;
skip_event_group= false;
}
if (!in_event_group)
@@ -802,8 +1108,7 @@ err:
rpl_parallel_thread::queued_event *
-rpl_parallel_thread::get_qev(Log_event *ev, ulonglong event_size,
- Relay_log_info *rli)
+rpl_parallel_thread::get_qev_common(Log_event *ev, ulonglong event_size)
{
queued_event *qev;
mysql_mutex_assert_owner(&LOCK_rpl_thread);
@@ -814,9 +1119,21 @@ rpl_parallel_thread::get_qev(Log_event *ev, ulonglong event_size,
my_error(ER_OUTOFMEMORY, MYF(0), (int)sizeof(*qev));
return NULL;
}
+ qev->typ= rpl_parallel_thread::queued_event::QUEUED_EVENT;
qev->ev= ev;
qev->event_size= event_size;
qev->next= NULL;
+ return qev;
+}
+
+
+rpl_parallel_thread::queued_event *
+rpl_parallel_thread::get_qev(Log_event *ev, ulonglong event_size,
+ Relay_log_info *rli)
+{
+ queued_event *qev= get_qev_common(ev, event_size);
+ if (!qev)
+ return NULL;
strcpy(qev->event_relay_log_name, rli->event_relay_log_name);
qev->event_relay_log_pos= rli->event_relay_log_pos;
qev->future_event_relay_log_pos= rli->future_event_relay_log_pos;
@@ -825,6 +1142,24 @@ rpl_parallel_thread::get_qev(Log_event *ev, ulonglong event_size,
}
+rpl_parallel_thread::queued_event *
+rpl_parallel_thread::retry_get_qev(Log_event *ev, queued_event *orig_qev,
+ const char *relay_log_name,
+ ulonglong event_pos, ulonglong event_size)
+{
+ queued_event *qev= get_qev_common(ev, event_size);
+ if (!qev)
+ return NULL;
+ qev->rgi= orig_qev->rgi;
+ strcpy(qev->event_relay_log_name, relay_log_name);
+ qev->event_relay_log_pos= event_pos;
+ qev->future_event_relay_log_pos= event_pos+event_size;
+ strcpy(qev->future_event_master_log_name,
+ orig_qev->future_event_master_log_name);
+ return qev;
+}
+
+
void
rpl_parallel_thread::free_qev(rpl_parallel_thread::queued_event *qev)
{
@@ -836,7 +1171,7 @@ rpl_parallel_thread::free_qev(rpl_parallel_thread::queued_event *qev)
rpl_group_info*
rpl_parallel_thread::get_rgi(Relay_log_info *rli, Gtid_log_event *gtid_ev,
- rpl_parallel_entry *e)
+ rpl_parallel_entry *e, ulonglong event_size)
{
rpl_group_info *rgi;
mysql_mutex_assert_owner(&LOCK_rpl_thread);
@@ -864,6 +1199,10 @@ rpl_parallel_thread::get_rgi(Relay_log_info *rli, Gtid_log_event *gtid_ev,
return NULL;
}
rgi->parallel_entry= e;
+ rgi->relay_log= rli->last_inuse_relaylog;
+ rgi->retry_start_offset= rli->future_event_relay_log_pos-event_size;
+ rgi->retry_event_count= 0;
+ rgi->killed_for_retry= false;
return rgi;
}
@@ -1018,10 +1357,11 @@ rpl_parallel_thread_pool::release_thread(rpl_parallel_thread *rpt)
if it is still available. Otherwise a new worker thread is allocated.
*/
rpl_parallel_thread *
-rpl_parallel_entry::choose_thread(Relay_log_info *rli, bool *did_enter_cond,
+rpl_parallel_entry::choose_thread(rpl_group_info *rgi, bool *did_enter_cond,
PSI_stage_info *old_stage, bool reuse)
{
uint32 idx;
+ Relay_log_info *rli= rgi->rli;
rpl_parallel_thread *thr;
idx= rpl_thread_idx;
@@ -1066,7 +1406,7 @@ rpl_parallel_entry::choose_thread(Relay_log_info *rli, bool *did_enter_cond,
debug_sync_set_action(rli->sql_driver_thd,
STRING_WITH_LEN("now SIGNAL wait_queue_killed"));
};);
- slave_output_error_info(rli, rli->sql_driver_thd);
+ slave_output_error_info(rgi, rli->sql_driver_thd);
return NULL;
}
else
@@ -1300,6 +1640,91 @@ rpl_parallel::workers_idle()
}
+int
+rpl_parallel_entry::queue_master_restart(rpl_group_info *rgi,
+ Format_description_log_event *fdev)
+{
+ uint32 idx;
+ rpl_parallel_thread *thr;
+ rpl_parallel_thread::queued_event *qev;
+ Relay_log_info *rli= rgi->rli;
+
+ /*
+ We only need to queue the server restart if we still have a thread working
+ on a (potentially partial) event group.
+
+ If the last thread we queued for has finished, then it cannot have any
+ partial event group that needs aborting.
+
+ Thus there is no need for the full complexity of choose_thread(). We only
+ need to check if we have a current worker thread, and queue for it if so.
+ */
+ idx= rpl_thread_idx;
+ thr= rpl_threads[idx];
+ if (!thr)
+ return 0;
+ mysql_mutex_lock(&thr->LOCK_rpl_thread);
+ if (thr->current_owner != &rpl_threads[idx])
+ {
+ /* No active worker thread, so no need to queue the master restart. */
+ mysql_mutex_unlock(&thr->LOCK_rpl_thread);
+ return 0;
+ }
+
+ if (!(qev= thr->get_qev(fdev, 0, rli)))
+ {
+ mysql_mutex_unlock(&thr->LOCK_rpl_thread);
+ return 1;
+ }
+
+ qev->rgi= rgi;
+ qev->typ= rpl_parallel_thread::queued_event::QUEUED_MASTER_RESTART;
+ qev->entry_for_queued= this;
+ qev->ir= rli->last_inuse_relaylog;
+ ++qev->ir->queued_count;
+ thr->enqueue(qev);
+ mysql_mutex_unlock(&thr->LOCK_rpl_thread);
+ return 0;
+}
+
+
+int
+rpl_parallel::wait_for_workers_idle(THD *thd)
+{
+ uint32 i, max_i;
+
+ /*
+ The domain_hash is only accessed by the SQL driver thread, so it is safe
+ to iterate over without a lock.
+ */
+ max_i= domain_hash.records;
+ for (i= 0; i < max_i; ++i)
+ {
+ bool active;
+ wait_for_commit my_orderer;
+ struct rpl_parallel_entry *e;
+
+ e= (struct rpl_parallel_entry *)my_hash_element(&domain_hash, i);
+ mysql_mutex_lock(&e->LOCK_parallel_entry);
+ if ((active= (e->current_sub_id > e->last_committed_sub_id)))
+ {
+ wait_for_commit *waitee= &e->current_group_info->commit_orderer;
+ my_orderer.register_wait_for_prior_commit(waitee);
+ thd->wait_for_commit_ptr= &my_orderer;
+ }
+ mysql_mutex_unlock(&e->LOCK_parallel_entry);
+ if (active)
+ {
+ int err= my_orderer.wait_for_prior_commit(thd);
+ thd->wait_for_commit_ptr= NULL;
+ if (err)
+ return err;
+ }
+ }
+ return 0;
+}
+
+
/*
This is used when we get an error during processing in do_event();
We will not queue any event to the thread, but we still need to wake it up
@@ -1367,6 +1792,33 @@ rpl_parallel::do_event(rpl_group_info *serial_rgi, Log_event *ev,
/* ToDo: what to do with this lock?!? */
mysql_mutex_unlock(&rli->data_lock);
+ if (typ == FORMAT_DESCRIPTION_EVENT)
+ {
+ Format_description_log_event *fdev=
+ static_cast<Format_description_log_event *>(ev);
+ if (fdev->created)
+ {
+ /*
+ This format description event marks a new binlog after a master server
+ restart. We are going to close all temporary tables to clean up any
+ possible left-overs after a prior master crash.
+
+ Thus we need to wait for all prior events to execute to completion,
+ in case they need access to any of the temporary tables.
+
+ We also need to notify the worker thread running the prior incomplete
+ event group (if any), as such event group signifies an incompletely
+ written group cut short by a master crash, and must be rolled back.
+ */
+ if (current->queue_master_restart(serial_rgi, fdev) ||
+ wait_for_workers_idle(rli->sql_driver_thd))
+ {
+ delete ev;
+ return 1;
+ }
+ }
+ }
+
/*
Stop queueing additional event groups once the SQL thread is requested to
stop.
@@ -1390,15 +1842,9 @@ rpl_parallel::do_event(rpl_group_info *serial_rgi, Log_event *ev,
if (typ == GTID_EVENT)
{
- uint32 domain_id;
- if (likely(typ == GTID_EVENT))
- {
- Gtid_log_event *gtid_ev= static_cast<Gtid_log_event *>(ev);
- domain_id= (rli->mi->using_gtid == Master_info::USE_GTID_NO ?
- 0 : gtid_ev->domain_id);
- }
- else
- domain_id= 0;
+ Gtid_log_event *gtid_ev= static_cast<Gtid_log_event *>(ev);
+ uint32 domain_id= (rli->mi->using_gtid == Master_info::USE_GTID_NO ?
+ 0 : gtid_ev->domain_id);
if (!(e= find(domain_id)))
{
my_error(ER_OUT_OF_RESOURCES, MYF(MY_WME));
@@ -1417,7 +1863,8 @@ rpl_parallel::do_event(rpl_group_info *serial_rgi, Log_event *ev,
instead re-use a thread that we queued for previously.
*/
cur_thread=
- e->choose_thread(rli, &did_enter_cond, &old_stage, typ != GTID_EVENT);
+ e->choose_thread(serial_rgi, &did_enter_cond, &old_stage,
+ typ != GTID_EVENT);
if (!cur_thread)
{
/* This means we were killed. The error is already signalled. */
@@ -1437,7 +1884,7 @@ rpl_parallel::do_event(rpl_group_info *serial_rgi, Log_event *ev,
{
Gtid_log_event *gtid_ev= static_cast<Gtid_log_event *>(ev);
- if (!(rgi= cur_thread->get_rgi(rli, gtid_ev, e)))
+ if (!(rgi= cur_thread->get_rgi(rli, gtid_ev, e, event_size)))
{
cur_thread->free_qev(qev);
abandon_worker_thread(rli->sql_driver_thd, cur_thread,
@@ -1527,7 +1974,7 @@ rpl_parallel::do_event(rpl_group_info *serial_rgi, Log_event *ev,
return 1;
}
/*
- Queue an empty event, so that the position will be updated in a
+ Queue a position update, so that the position will be updated in a
reasonable way relative to other events:
- If the currently executing events are queued serially for a single
@@ -1538,7 +1985,8 @@ rpl_parallel::do_event(rpl_group_info *serial_rgi, Log_event *ev,
least the position will not be updated until one of them has reached
the current point.
*/
- qev->ev= NULL;
+ qev->typ= rpl_parallel_thread::queued_event::QUEUED_POS_UPDATE;
+ qev->entry_for_queued= e;
}
else
{
@@ -1549,6 +1997,8 @@ rpl_parallel::do_event(rpl_group_info *serial_rgi, Log_event *ev,
Queue the event for processing.
*/
rli->event_relay_log_pos= rli->future_event_relay_log_pos;
+ qev->ir= rli->last_inuse_relaylog;
+ ++qev->ir->queued_count;
cur_thread->enqueue(qev);
unlock_or_exit_cond(rli->sql_driver_thd, &cur_thread->LOCK_rpl_thread,
&did_enter_cond, &old_stage);
diff --git a/sql/rpl_parallel.h b/sql/rpl_parallel.h
index c4bb407e5eb..b114ee4ebcb 100644
--- a/sql/rpl_parallel.h
+++ b/sql/rpl_parallel.h
@@ -9,6 +9,7 @@ struct rpl_parallel_entry;
struct rpl_parallel_thread_pool;
class Relay_log_info;
+struct inuse_relaylog;
/*
@@ -71,8 +72,22 @@ struct rpl_parallel_thread {
rpl_parallel_entry *current_entry;
struct queued_event {
queued_event *next;
- Log_event *ev;
+ /*
+ queued_event can hold either an event to be executed, or just a binlog
+ position to be updated without any associated event.
+ */
+ enum queued_event_t {
+ QUEUED_EVENT,
+ QUEUED_POS_UPDATE,
+ QUEUED_MASTER_RESTART
+ } typ;
+ union {
+ Log_event *ev; /* QUEUED_EVENT */
+ rpl_parallel_entry *entry_for_queued; /* QUEUED_POS_UPDATE and
+ QUEUED_MASTER_RESTART */
+ };
rpl_group_info *rgi;
+ inuse_relaylog *ir;
ulonglong future_event_relay_log_pos;
char event_relay_log_name[FN_REFLEN];
char future_event_master_log_name[FN_REFLEN];
@@ -106,11 +121,15 @@ struct rpl_parallel_thread {
queued_size-= dequeue_size;
}
+ queued_event *get_qev_common(Log_event *ev, ulonglong event_size);
queued_event *get_qev(Log_event *ev, ulonglong event_size,
Relay_log_info *rli);
+ queued_event *retry_get_qev(Log_event *ev, queued_event *orig_qev,
+ const char *relay_log_name,
+ ulonglong event_pos, ulonglong event_size);
void free_qev(queued_event *qev);
rpl_group_info *get_rgi(Relay_log_info *rli, Gtid_log_event *gtid_ev,
- rpl_parallel_entry *e);
+ rpl_parallel_entry *e, ulonglong event_size);
void free_rgi(rpl_group_info *rgi);
group_commit_orderer *get_gco(uint64 wait_count, group_commit_orderer *prev);
void free_gco(group_commit_orderer *gco);
@@ -176,7 +195,7 @@ struct rpl_parallel_entry {
Event groups commit in order, so the rpl_group_info for an event group
will be alive (at least) as long as
- rpl_grou_info::gtid_sub_id > last_committed_sub_id. This can be used to
+ rpl_group_info::gtid_sub_id > last_committed_sub_id. This can be used to
safely refer back to previous event groups if they are still executing,
and ignore them if they completed, without requiring explicit
synchronisation between the threads.
@@ -208,10 +227,10 @@ struct rpl_parallel_entry {
/* The group_commit_orderer object for the events currently being queued. */
group_commit_orderer *current_gco;
- rpl_parallel_thread * choose_thread(Relay_log_info *rli, bool *did_enter_cond,
+ rpl_parallel_thread * choose_thread(rpl_group_info *rgi, bool *did_enter_cond,
PSI_stage_info *old_stage, bool reuse);
- group_commit_orderer *get_gco();
- void free_gco(group_commit_orderer *gco);
+ int queue_master_restart(rpl_group_info *rgi,
+ Format_description_log_event *fdev);
};
struct rpl_parallel {
HASH domain_hash;
@@ -225,6 +244,7 @@ struct rpl_parallel {
void wait_for_done(THD *thd, Relay_log_info *rli);
void stop_during_until();
bool workers_idle();
+ int wait_for_workers_idle(THD *thd);
int do_event(rpl_group_info *serial_rgi, Log_event *ev, ulonglong event_size);
};
diff --git a/sql/rpl_record.cc b/sql/rpl_record.cc
index b1cca04d947..5d1ef671159 100644
--- a/sql/rpl_record.cc
+++ b/sql/rpl_record.cc
@@ -332,6 +332,7 @@ unpack_row(rpl_group_info *rgi,
}
rgi->rli->report(ERROR_LEVEL, ER_SLAVE_CORRUPT_EVENT,
+ rgi->gtid_info(),
"Could not read field '%s' of table '%s.%s'",
f->field_name, table->s->db.str,
table->s->table_name.str);
diff --git a/sql/rpl_record_old.cc b/sql/rpl_record_old.cc
index 5afa529a63c..8b43b268c17 100644
--- a/sql/rpl_record_old.cc
+++ b/sql/rpl_record_old.cc
@@ -141,7 +141,7 @@ unpack_row_old(rpl_group_info *rgi,
f->move_field_offset(-offset);
if (!ptr)
{
- rgi->rli->report(ERROR_LEVEL, ER_SLAVE_CORRUPT_EVENT,
+ rgi->rli->report(ERROR_LEVEL, ER_SLAVE_CORRUPT_EVENT, NULL,
"Could not read field `%s` of table `%s`.`%s`",
f->field_name, table->s->db.str,
table->s->table_name.str);
@@ -183,7 +183,7 @@ unpack_row_old(rpl_group_info *rgi,
if (event_type == WRITE_ROWS_EVENT &&
((*field_ptr)->flags & mask) == mask)
{
- rgi->rli->report(ERROR_LEVEL, ER_NO_DEFAULT_FOR_FIELD,
+ rgi->rli->report(ERROR_LEVEL, ER_NO_DEFAULT_FOR_FIELD, NULL,
"Field `%s` of table `%s`.`%s` "
"has no default value and cannot be NULL",
(*field_ptr)->field_name, table->s->db.str,
diff --git a/sql/rpl_reporting.cc b/sql/rpl_reporting.cc
index 96fe6242ac3..eb362941f3e 100644
--- a/sql/rpl_reporting.cc
+++ b/sql/rpl_reporting.cc
@@ -28,6 +28,7 @@ Slave_reporting_capability::Slave_reporting_capability(char const *thread_name)
void
Slave_reporting_capability::report(loglevel level, int err_code,
+ const char *extra_info,
const char *msg, ...) const
{
void (*report_function)(const char *, ...);
@@ -67,9 +68,10 @@ Slave_reporting_capability::report(loglevel level, int err_code,
va_end(args);
/* If the msg string ends with '.', do not add a ',' it would be ugly */
- report_function("Slave %s: %s%s Internal MariaDB error code: %d",
+ report_function("Slave %s: %s%s %s%sInternal MariaDB error code: %d",
m_thread_name, pbuff,
(pbuff[0] && *(strend(pbuff)-1) == '.') ? "" : ",",
+ (extra_info ? extra_info : ""), (extra_info ? ", " : ""),
err_code);
}
diff --git a/sql/rpl_reporting.h b/sql/rpl_reporting.h
index 2b5e0527b9b..d90b7ad6650 100644
--- a/sql/rpl_reporting.h
+++ b/sql/rpl_reporting.h
@@ -52,8 +52,9 @@ public:
code, but can contain more information), in
printf() format.
*/
- void report(loglevel level, int err_code, const char *msg, ...) const
- ATTRIBUTE_FORMAT(printf, 4, 5);
+ void report(loglevel level, int err_code, const char *extra_info,
+ const char *msg, ...) const
+ ATTRIBUTE_FORMAT(printf, 5, 6);
/**
Clear errors. They will not show up under <code>SHOW SLAVE
diff --git a/sql/rpl_rli.cc b/sql/rpl_rli.cc
index a162d1d79f8..754b877f654 100644
--- a/sql/rpl_rli.cc
+++ b/sql/rpl_rli.cc
@@ -52,6 +52,7 @@ Relay_log_info::Relay_log_info(bool is_slave_recovery)
info_fd(-1), cur_log_fd(-1), relay_log(&sync_relaylog_period),
sync_counter(0), is_relay_log_recovery(is_slave_recovery),
save_temporary_tables(0), mi(0),
+ inuse_relaylog_list(0), last_inuse_relaylog(0),
cur_log_old_open_count(0), group_relay_log_pos(0),
event_relay_log_pos(0),
#if HAVE_valgrind
@@ -98,8 +99,18 @@ Relay_log_info::Relay_log_info(bool is_slave_recovery)
Relay_log_info::~Relay_log_info()
{
+ inuse_relaylog *cur;
DBUG_ENTER("Relay_log_info::~Relay_log_info");
+ cur= inuse_relaylog_list;
+ while (cur)
+ {
+ DBUG_ASSERT(cur->queued_count == cur->dequeued_count);
+ inuse_relaylog *next= cur->next;
+ my_atomic_rwlock_destroy(&cur->inuse_relaylog_atomic_lock);
+ my_free(cur);
+ cur= next;
+ }
mysql_mutex_destroy(&run_lock);
mysql_mutex_destroy(&data_lock);
mysql_mutex_destroy(&log_space_lock);
@@ -305,20 +316,80 @@ Failed to open the existing relay log info file '%s' (errno %d)",
}
rli->info_fd = info_fd;
- int relay_log_pos, master_log_pos;
+ int relay_log_pos, master_log_pos, lines;
+ char *first_non_digit;
+ /*
+ In MySQL 5.6, there is a MASTER_DELAY option to CHANGE MASTER. This is
+ not yet merged into MariaDB (as of 10.0.13). However, we detect the
+ presense of the new option in relay-log.info, as a placeholder for
+ possible later merge of the feature, and to maintain file format
+ compatibility with MySQL 5.6+.
+ */
+ int dummy_sql_delay;
+
+ /*
+ Starting from MySQL 5.6.x, relay-log.info has a new format.
+ Now, its first line contains the number of lines in the file.
+ By reading this number we can determine which version our master.info
+ comes from. We can't simply count the lines in the file, since
+ versions before 5.6.x could generate files with more lines than
+ needed. If first line doesn't contain a number, or if it
+ contains a number less than LINES_IN_RELAY_LOG_INFO_WITH_DELAY,
+ then the file is treated like a file from pre-5.6.x version.
+ There is no ambiguity when reading an old master.info: before
+ 5.6.x, the first line contained the binlog's name, which is
+ either empty or has an extension (contains a '.'), so can't be
+ confused with an integer.
+
+ So we're just reading first line and trying to figure which
+ version is this.
+ */
+
+ /*
+ The first row is temporarily stored in mi->master_log_name, if
+ it is line count and not binlog name (new format) it will be
+ overwritten by the second row later.
+ */
if (init_strvar_from_file(rli->group_relay_log_name,
sizeof(rli->group_relay_log_name),
+ &rli->info_file, ""))
+ {
+ msg="Error reading slave log configuration";
+ goto err;
+ }
+
+ lines= strtoul(rli->group_relay_log_name, &first_non_digit, 10);
+
+ if (rli->group_relay_log_name[0] != '\0' &&
+ *first_non_digit == '\0' &&
+ lines >= LINES_IN_RELAY_LOG_INFO_WITH_DELAY)
+ {
+ DBUG_PRINT("info", ("relay_log_info file is in new format."));
+ /* Seems to be new format => read relay log name from next line */
+ if (init_strvar_from_file(rli->group_relay_log_name,
+ sizeof(rli->group_relay_log_name),
+ &rli->info_file, ""))
+ {
+ msg="Error reading slave log configuration";
+ goto err;
+ }
+ }
+ else
+ DBUG_PRINT("info", ("relay_log_info file is in old format."));
+
+ if (init_intvar_from_file(&relay_log_pos,
+ &rli->info_file, BIN_LOG_HEADER_SIZE) ||
+ init_strvar_from_file(rli->group_master_log_name,
+ sizeof(rli->group_master_log_name),
&rli->info_file, "") ||
- init_intvar_from_file(&relay_log_pos,
- &rli->info_file, BIN_LOG_HEADER_SIZE) ||
- init_strvar_from_file(rli->group_master_log_name,
- sizeof(rli->group_master_log_name),
- &rli->info_file, "") ||
- init_intvar_from_file(&master_log_pos, &rli->info_file, 0))
+ init_intvar_from_file(&master_log_pos, &rli->info_file, 0) ||
+ (lines >= LINES_IN_RELAY_LOG_INFO_WITH_DELAY &&
+ init_intvar_from_file(&dummy_sql_delay, &rli->info_file, 0)))
{
msg="Error reading slave log configuration";
goto err;
}
+
strmake_buf(rli->event_relay_log_name,rli->group_relay_log_name);
rli->group_relay_log_pos= rli->event_relay_log_pos= relay_log_pos;
rli->group_master_log_pos= master_log_pos;
@@ -1024,7 +1095,6 @@ int purge_relay_logs(Relay_log_info* rli, THD *thd, bool just_reset,
DBUG_ASSERT(rli->slave_running == 0);
DBUG_ASSERT(rli->mi->slave_running == 0);
- rli->slave_skip_counter=0;
mysql_mutex_lock(&rli->data_lock);
/*
@@ -1243,7 +1313,7 @@ void Relay_log_info::stmt_done(my_off_t event_master_log_pos,
inc_group_relay_log_pos(event_master_log_pos, rgi);
if (rpl_global_gtid_slave_state.record_and_update_gtid(thd, rgi))
{
- report(WARNING_LEVEL, ER_CANNOT_UPDATE_GTID_STATE,
+ report(WARNING_LEVEL, ER_CANNOT_UPDATE_GTID_STATE, rgi->gtid_info(),
"Failed to update GTID state in %s.%s, slave state may become "
"inconsistent: %d: %s",
"mysql", rpl_gtid_slave_state_table_name.str,
@@ -1279,6 +1349,33 @@ void Relay_log_info::stmt_done(my_off_t event_master_log_pos,
DBUG_VOID_RETURN;
}
+
+int
+Relay_log_info::alloc_inuse_relaylog(const char *name)
+{
+ inuse_relaylog *ir;
+
+ if (!(ir= (inuse_relaylog *)my_malloc(sizeof(*ir), MYF(MY_WME|MY_ZEROFILL))))
+ {
+ my_error(ER_OUTOFMEMORY, MYF(0), (int)sizeof(*ir));
+ return 1;
+ }
+ strmake_buf(ir->name, name);
+
+ if (!inuse_relaylog_list)
+ inuse_relaylog_list= ir;
+ else
+ {
+ last_inuse_relaylog->completed= true;
+ last_inuse_relaylog->next= ir;
+ }
+ last_inuse_relaylog= ir;
+ my_atomic_rwlock_init(&ir->inuse_relaylog_atomic_lock);
+
+ return 0;
+}
+
+
#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
int
rpl_load_gtid_slave_state(THD *thd)
@@ -1465,6 +1562,9 @@ rpl_group_info::reinit(Relay_log_info *rli)
tables_to_lock_count= 0;
trans_retries= 0;
last_event_start_time= 0;
+ gtid_sub_id= 0;
+ commit_id= 0;
+ gtid_pending= false;
worker_error= 0;
row_stmt_start_timestamp= 0;
long_find_row_note_printed= false;
@@ -1474,7 +1574,7 @@ rpl_group_info::reinit(Relay_log_info *rli)
}
rpl_group_info::rpl_group_info(Relay_log_info *rli)
- : thd(0), gtid_sub_id(0), wait_commit_sub_id(0),
+ : thd(0), wait_commit_sub_id(0),
wait_commit_group_info(0), parallel_entry(0),
deferred_events(NULL), m_annotate_event(0), is_parallel_exec(false)
{
@@ -1505,9 +1605,11 @@ event_group_new_gtid(rpl_group_info *rgi, Gtid_log_event *gev)
return 1;
}
rgi->gtid_sub_id= sub_id;
- rgi->current_gtid.server_id= gev->server_id;
rgi->current_gtid.domain_id= gev->domain_id;
+ rgi->current_gtid.server_id= gev->server_id;
rgi->current_gtid.seq_no= gev->seq_no;
+ rgi->commit_id= gev->commit_id;
+ rgi->gtid_pending= true;
return 0;
}
@@ -1563,7 +1665,7 @@ delete_or_keep_event_post_apply(rpl_group_info *rgi,
void rpl_group_info::cleanup_context(THD *thd, bool error)
{
- DBUG_ENTER("Relay_log_info::cleanup_context");
+ DBUG_ENTER("rpl_group_info::cleanup_context");
DBUG_PRINT("enter", ("error: %d", (int) error));
DBUG_ASSERT(this->thd == thd);
@@ -1629,7 +1731,7 @@ void rpl_group_info::cleanup_context(THD *thd, bool error)
void rpl_group_info::clear_tables_to_lock()
{
- DBUG_ENTER("Relay_log_info::clear_tables_to_lock()");
+ DBUG_ENTER("rpl_group_info::clear_tables_to_lock()");
#ifndef DBUG_OFF
/**
When replicating in RBR and MyISAM Merge tables are involved
@@ -1676,7 +1778,7 @@ void rpl_group_info::clear_tables_to_lock()
void rpl_group_info::slave_close_thread_tables(THD *thd)
{
- DBUG_ENTER("Relay_log_info::slave_close_thread_tables(THD *thd)");
+ DBUG_ENTER("rpl_group_info::slave_close_thread_tables(THD *thd)");
thd->get_stmt_da()->set_overwrite_status(true);
thd->is_error() ? trans_rollback_stmt(thd) : trans_commit_stmt(thd);
thd->get_stmt_da()->set_overwrite_status(false);
@@ -1745,6 +1847,54 @@ rpl_group_info::mark_start_commit()
}
+/*
+ Format the current GTID as a string suitable for printing in error messages.
+
+ The string is stored in a buffer inside rpl_group_info, so remains valid
+ until next call to gtid_info() or until destruction of rpl_group_info.
+
+ If no GTID is available, then NULL is returned.
+*/
+char *
+rpl_group_info::gtid_info()
+{
+ if (!gtid_sub_id || !current_gtid.seq_no)
+ return NULL;
+ my_snprintf(gtid_info_buf, sizeof(gtid_info_buf), "Gtid %u-%u-%llu",
+ current_gtid.domain_id, current_gtid.server_id,
+ current_gtid.seq_no);
+ return gtid_info_buf;
+}
+
+
+/*
+ Undo the effect of a prior mark_start_commit().
+
+ This is only used for retrying a transaction in parallel replication, after
+ we have encountered a deadlock or other temporary error.
+
+ When we get such a deadlock, it means that the current group of transactions
+ did not yet all start committing (else they would not have deadlocked). So
+ we will not yet have woken up anything in the next group, our rgi->gco is
+ still live, and we can simply decrement the counter (to be incremented again
+ later, when the retry succeeds and reaches the commit step).
+*/
+void
+rpl_group_info::unmark_start_commit()
+{
+ rpl_parallel_entry *e;
+
+ if (!did_mark_start_commit)
+ return;
+
+ e= this->parallel_entry;
+ mysql_mutex_lock(&e->LOCK_parallel_entry);
+ --e->count_committing_event_groups;
+ mysql_mutex_unlock(&e->LOCK_parallel_entry);
+ did_mark_start_commit= false;
+}
+
+
rpl_sql_thread_info::rpl_sql_thread_info(Rpl_filter *filter)
: rpl_filter(filter)
{
diff --git a/sql/rpl_rli.h b/sql/rpl_rli.h
index 137571ab820..3a8d87030ad 100644
--- a/sql/rpl_rli.h
+++ b/sql/rpl_rli.h
@@ -28,6 +28,12 @@ struct RPL_TABLE_LIST;
class Master_info;
class Rpl_filter;
+
+enum {
+ LINES_IN_RELAY_LOG_INFO_WITH_DELAY= 5
+};
+
+
/****************************************************************************
Replication SQL Thread
@@ -55,6 +61,7 @@ class Rpl_filter;
*****************************************************************************/
struct rpl_group_info;
+struct inuse_relaylog;
class Relay_log_info : public Slave_reporting_capability
{
@@ -158,6 +165,13 @@ public:
Master_info *mi;
/*
+ List of active relay log files.
+ (This can be more than one in case of parallel replication).
+ */
+ inuse_relaylog *inuse_relaylog_list;
+ inuse_relaylog *last_inuse_relaylog;
+
+ /*
Needed to deal properly with cur_log getting closed and re-opened with
a different log under our feet
*/
@@ -237,10 +251,11 @@ public:
errors, and have been manually applied by DBA already.
Must be ulong as it's refered to from set_var.cc
*/
- volatile ulong slave_skip_counter;
+ volatile ulonglong slave_skip_counter;
+ ulonglong max_relay_log_size;
+
volatile ulong abort_pos_wait; /* Incremented on change master */
volatile ulong slave_run_id; /* Incremented on slave start */
- ulong max_relay_log_size;
mysql_mutex_t log_space_lock;
mysql_cond_t log_space_cond;
/*
@@ -392,6 +407,7 @@ public:
void stmt_done(my_off_t event_log_pos,
time_t event_creation_time, THD *thd,
rpl_group_info *rgi);
+ int alloc_inuse_relaylog(const char *name);
/**
Is the replication inside a group?
@@ -458,6 +474,41 @@ private:
/*
+ In parallel replication, if we need to re-try a transaction due to a
+ deadlock or other temporary error, we may need to go back and re-read events
+ out of an earlier relay log.
+
+ This structure keeps track of the relaylogs that are potentially in use.
+ Each rpl_group_info has a pointer to one of those, corresponding to the
+ first GTID event.
+
+ A pair of reference count keeps track of how long a relay log is potentially
+ in use. When the `completed' flag is set, all events have been read out of
+ the relay log, but the log might still be needed for retry in worker
+ threads. As worker threads complete an event group, they increment
+ atomically the `dequeued_count' with number of events queued. Thus, when
+ completed is set and dequeued_count equals queued_count, the relay log file
+ is finally done with and can be purged.
+
+ By separating the queued and dequeued count, only the dequeued_count needs
+ multi-thread synchronisation; the completed flag and queued_count fields
+ are only accessed by the SQL driver thread and need no synchronisation.
+*/
+struct inuse_relaylog {
+ inuse_relaylog *next;
+ /* Number of events in this relay log queued for worker threads. */
+ int64 queued_count;
+ /* Number of events completed by worker threads. */
+ volatile int64 dequeued_count;
+ /* Set when all events have been read from a relaylog. */
+ bool completed;
+ char name[FN_REFLEN];
+ /* Lock used to protect inuse_relaylog::dequeued_count */
+ my_atomic_rwlock_t inuse_relaylog_atomic_lock;
+};
+
+
+/*
This is data for various state needed to be kept for the processing of
one event group (transaction) during replication.
@@ -483,6 +534,7 @@ struct rpl_group_info
*/
uint64 gtid_sub_id;
rpl_gtid current_gtid;
+ uint64 commit_id;
/*
This is used to keep transaction commit order.
We will signal this when we commit, and can register it to wait for the
@@ -560,6 +612,8 @@ struct rpl_group_info
*/
char future_event_master_log_name[FN_REFLEN];
bool is_parallel_exec;
+ /* When gtid_pending is true, we have not yet done record_gtid(). */
+ bool gtid_pending;
int worker_error;
/*
Set true when we signalled that we reach the commit phase. Used to avoid
@@ -587,6 +641,17 @@ struct rpl_group_info
*/
time_t row_stmt_start_timestamp;
bool long_find_row_note_printed;
+ /* Needs room for "Gtid D-S-N\x00". */
+ char gtid_info_buf[5+10+1+10+1+20+1];
+
+ /*
+ Information to be able to re-try an event group in case of a deadlock or
+ other temporary error.
+ */
+ inuse_relaylog *relay_log;
+ uint64 retry_start_offset;
+ uint64 retry_event_count;
+ bool killed_for_retry;
rpl_group_info(Relay_log_info *rli_);
~rpl_group_info();
@@ -675,6 +740,8 @@ struct rpl_group_info
void slave_close_thread_tables(THD *);
void mark_start_commit_no_lock();
void mark_start_commit();
+ char *gtid_info();
+ void unmark_start_commit();
time_t get_row_stmt_start_timestamp()
{
diff --git a/sql/rpl_utility.cc b/sql/rpl_utility.cc
index 05227a29775..25dff72090c 100644
--- a/sql/rpl_utility.cc
+++ b/sql/rpl_utility.cc
@@ -826,7 +826,7 @@ can_convert_field_to(Field *field,
@retval false Master table is not compatible with slave table.
*/
bool
-table_def::compatible_with(THD *thd, Relay_log_info *rli,
+table_def::compatible_with(THD *thd, rpl_group_info *rgi,
TABLE *table, TABLE **conv_table_var)
const
{
@@ -834,6 +834,7 @@ table_def::compatible_with(THD *thd, Relay_log_info *rli,
We only check the initial columns for the tables.
*/
uint const cols_to_check= MY_MIN(table->s->fields, size());
+ Relay_log_info *rli= rgi->rli;
TABLE *tmp_table= NULL;
for (uint col= 0 ; col < cols_to_check ; ++col)
@@ -857,7 +858,7 @@ table_def::compatible_with(THD *thd, Relay_log_info *rli,
This will create the full table with all fields. This is
necessary to ge the correct field lengths for the record.
*/
- tmp_table= create_conversion_table(thd, rli, table);
+ tmp_table= create_conversion_table(thd, rgi, table);
if (tmp_table == NULL)
return false;
/*
@@ -885,7 +886,7 @@ table_def::compatible_with(THD *thd, Relay_log_info *rli,
String target_type(target_buf, sizeof(target_buf), &my_charset_latin1);
show_sql_type(type(col), field_metadata(col), &source_type, field->charset());
field->sql_type(target_type);
- rli->report(ERROR_LEVEL, ER_SLAVE_CONVERSION_FAILED,
+ rli->report(ERROR_LEVEL, ER_SLAVE_CONVERSION_FAILED, rgi->gtid_info(),
ER(ER_SLAVE_CONVERSION_FAILED),
col, db_name, tbl_name,
source_type.c_ptr_safe(), target_type.c_ptr_safe());
@@ -927,12 +928,14 @@ table_def::compatible_with(THD *thd, Relay_log_info *rli,
conversion table.
*/
-TABLE *table_def::create_conversion_table(THD *thd, Relay_log_info *rli, TABLE *target_table) const
+TABLE *table_def::create_conversion_table(THD *thd, rpl_group_info *rgi,
+ TABLE *target_table) const
{
DBUG_ENTER("table_def::create_conversion_table");
List<Create_field> field_list;
TABLE *conv_table= NULL;
+ Relay_log_info *rli= rgi->rli;
/*
At slave, columns may differ. So we should create
MY_MIN(columns@master, columns@slave) columns in the
@@ -1014,7 +1017,7 @@ TABLE *table_def::create_conversion_table(THD *thd, Relay_log_info *rli, TABLE *
err:
if (conv_table == NULL)
- rli->report(ERROR_LEVEL, ER_SLAVE_CANT_CREATE_CONVERSION,
+ rli->report(ERROR_LEVEL, ER_SLAVE_CANT_CREATE_CONVERSION, rgi->gtid_info(),
ER(ER_SLAVE_CANT_CREATE_CONVERSION),
target_table->s->db.str,
target_table->s->table_name.str);
diff --git a/sql/rpl_utility.h b/sql/rpl_utility.h
index 7568a2d786c..ed0ce16363b 100644
--- a/sql/rpl_utility.h
+++ b/sql/rpl_utility.h
@@ -30,6 +30,7 @@
class Relay_log_info;
class Log_event;
+struct rpl_group_info;
/**
A table definition from the master.
@@ -187,7 +188,7 @@ public:
@retval 0 if the table definition is compatible with @c table
*/
#ifndef MYSQL_CLIENT
- bool compatible_with(THD *thd, Relay_log_info *rli, TABLE *table,
+ bool compatible_with(THD *thd, rpl_group_info *rgi, TABLE *table,
TABLE **conv_table_var) const;
/**
@@ -212,7 +213,8 @@ public:
@return A pointer to a temporary table with memory allocated in the
thread's memroot, NULL if the table could not be created
*/
- TABLE *create_conversion_table(THD *thd, Relay_log_info *rli, TABLE *target_table) const;
+ TABLE *create_conversion_table(THD *thd, rpl_group_info *rgi,
+ TABLE *target_table) const;
#endif
diff --git a/sql/scheduler.cc b/sql/scheduler.cc
index ecf49e633ab..a9b253e478a 100644
--- a/sql/scheduler.cc
+++ b/sql/scheduler.cc
@@ -1,5 +1,5 @@
-/* Copyright (c) 2007, 2011, Oracle and/or its affiliates. All rights reserved.
- Copyright (c) 2012, 2013, Monty Program Ab
+/* Copyright (c) 2007, 2013, Oracle and/or its affiliates.
+ Copyright (c) 2012, 2014, SkySQL Ab.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
diff --git a/sql/scheduler.h b/sql/scheduler.h
index 06c17c7b114..f7aff377eac 100644
--- a/sql/scheduler.h
+++ b/sql/scheduler.h
@@ -99,15 +99,13 @@ public:
void *data; /* scheduler-specific data structure */
};
-#undef HAVE_POOL_OF_THREADS
-#if !defined(EMBEDDED_LIBRARY) && !defined(_AIX)
-#define HAVE_POOL_OF_THREADS 1
+#ifdef HAVE_POOL_OF_THREADS
void pool_of_threads_scheduler(scheduler_functions* func,
ulong *arg_max_connections,
uint *arg_connection_count);
#else
#define pool_of_threads_scheduler(A,B,C) \
one_thread_per_connection_scheduler(A, B, C)
-#endif
+#endif /*HAVE_POOL_OF_THREADS*/
#endif /* SCHEDULER_INCLUDED */
diff --git a/sql/set_var.h b/sql/set_var.h
index e48f394c316..fe2a0d8e953 100644
--- a/sql/set_var.h
+++ b/sql/set_var.h
@@ -284,9 +284,7 @@ public:
if (value_arg && value_arg->type() == Item::FIELD_ITEM)
{
Item_field *item= (Item_field*) value_arg;
- if (!(value=new Item_string(item->field_name,
- (uint) strlen(item->field_name),
- system_charset_info))) // names are utf8
+ if (!(value=new Item_string_sys(item->field_name))) // names are utf8
value=value_arg; /* Give error message later */
}
else
diff --git a/sql/share/errmsg-utf8.txt b/sql/share/errmsg-utf8.txt
index 233bb835bd8..199a822d022 100644
--- a/sql/share/errmsg-utf8.txt
+++ b/sql/share/errmsg-utf8.txt
@@ -3962,7 +3962,7 @@ ER_NEW_ABORTING_CONNECTION 08S01
spa "Abortada conexión %ld para db: '%-.192s' usuario: '%-.48s' servidor: '%-.64s' (%-.64s)"
swe "Avbröt länken för tråd %ld till db '%-.192s', användare '%-.48s', host '%-.64s' (%-.64s)"
ukr "Перервано з'єднання %ld до бази данних: '%-.192s' користувач: '%-.48s' хост: '%-.64s' (%-.64s)"
-ER_unused_2
+ER_UNUSED_10
eng "You should never see it"
ER_FLUSH_MASTER_BINLOG_CLOSED
eng "Binlog closed, cannot RESET MASTER"
@@ -5879,10 +5879,9 @@ ER_EVENT_NEITHER_M_EXPR_NOR_M_AT
ger "Kein DATETIME-Ausdruck angegeben"
ER_UNUSED_2
- eng ""
-
+ eng "You should never see it"
ER_UNUSED_3
- eng ""
+ eng "You should never see it"
ER_EVENT_CANNOT_DELETE
eng "Failed to delete the event from mysql.event"
ger "Löschen des Events aus mysql.event fehlgeschlagen"
@@ -5910,7 +5909,7 @@ ER_CANT_LOCK_LOG_TABLE
eng "You can't use locks with log tables."
ger "Log-Tabellen können nicht gesperrt werden."
ER_UNUSED_4
- eng ""
+ eng "You should never see it"
ER_COL_COUNT_DOESNT_MATCH_PLEASE_UPDATE
eng "Column count of mysql.%s is wrong. Expected %d, found %d. Created with MariaDB %d, now running %d. Please use mysql_upgrade to fix this error."
ger "Spaltenanzahl von mysql.%s falsch. %d erwartet, aber %d erhalten. Erzeugt mit MariaDB %d, jetzt unter %d. Bitte benutzen Sie mysql_upgrade, um den Fehler zu beheben"
@@ -6089,8 +6088,8 @@ ER_TRG_CANT_OPEN_TABLE
ER_CANT_CREATE_SROUTINE
eng "Cannot create stored routine `%-.64s`. Check warnings"
ger "Kann gespeicherte Routine `%-.64s` nicht erzeugen. Beachten Sie die Warnungen"
-ER_UNUSED
- eng ""
+ER_UNUSED_11
+ eng "You should never see it"
ER_NO_FORMAT_DESCRIPTION_EVENT_BEFORE_BINLOG_STATEMENT
eng "The BINLOG statement of type `%s` was not preceded by a format description BINLOG statement."
ger "Der BINLOG-Anweisung vom Typ `%s` ging keine BINLOG-Anweisung zur Formatbeschreibung voran."
@@ -6457,7 +6456,7 @@ ER_BINLOG_UNSAFE_INSERT_TWO_KEYS
ER_TABLE_IN_FK_CHECK
eng "Table is being used in foreign key check."
-ER_unused_1
+ER_UNUSED_1
eng "You should never see it"
ER_BINLOG_UNSAFE_AUTOINC_NOT_FIRST
@@ -6527,7 +6526,7 @@ ER_ROW_DOES_NOT_MATCH_GIVEN_PARTITION_SET
swe "Hittade en rad som inte passar i någon given partition"
ER_UNUSED_5
- eng ""
+ eng "You should never see it"
ER_CHANGE_RPL_INFO_REPOSITORY_FAILURE
eng "Failure while changing the type of replication repository: %s."
@@ -6993,11 +6992,11 @@ ER_UNKNOWN_OPTION
ER_BAD_OPTION_VALUE
eng "Incorrect value '%-.64s' for option '%-.64s'"
ER_UNUSED_6
- eng ""
+ eng "You should never see it"
ER_UNUSED_7
- eng ""
+ eng "You should never see it"
ER_UNUSED_8
- eng ""
+ eng "You should never see it"
ER_DATA_OVERFLOW 22003
eng "Got overflow when converting '%-.128s' to %-.32s. Value truncated."
ER_DATA_TRUNCATED 22003
@@ -7022,8 +7021,8 @@ ER_VIEW_ORDERBY_IGNORED
eng "View '%-.192s'.'%-.192s' ORDER BY clause ignored because there is other ORDER BY clause already."
ER_CONNECTION_KILLED 70100
eng "Connection was killed"
-ER_UNSED
- eng "Internal error: '%-.192s'"
+ER_UNUSED_12
+ eng "You should never see it"
ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_SKIP_REPLICATION
eng "Cannot modify @@session.skip_replication inside a transaction"
ER_STORED_FUNCTION_PREVENTS_SWITCH_SKIP_REPLICATION
@@ -7108,5 +7107,7 @@ ER_IT_IS_A_VIEW 42S02
eng "'%-.192s' is a view"
ER_SLAVE_SKIP_NOT_IN_GTID
eng "When using GTID, @@sql_slave_skip_counter can not be used. Instead, setting @@gtid_slave_pos explicitly can be used to skip to after a given GTID position."
+ER_TABLE_DEFINITION_TOO_BIG
+ eng "The definition for table %`s is too big"
ER_STATEMENT_TIMEOUT 70100
eng "Query execution was interrupted (max_statement_time exceeded)"
diff --git a/sql/slave.cc b/sql/slave.cc
index ca29410cd1d..6e70f090247 100644
--- a/sql/slave.cc
+++ b/sql/slave.cc
@@ -302,7 +302,10 @@ handle_slave_init(void *arg __attribute__((unused)))
mysql_mutex_lock(&LOCK_thread_count);
thd->thread_id= thread_id++;
mysql_mutex_unlock(&LOCK_thread_count);
+ thd->system_thread = SYSTEM_THREAD_SLAVE_INIT;
thd->store_globals();
+ thd->security_ctx->skip_grants();
+ thd->set_command(COM_DAEMON);
thd_proc_info(thd, "Loading slave GTID position from table");
if (rpl_load_gtid_slave_state(thd))
@@ -317,15 +320,22 @@ handle_slave_init(void *arg __attribute__((unused)))
mysql_mutex_unlock(&LOCK_thread_count);
my_thread_end();
- mysql_mutex_lock(&LOCK_thread_count);
+ mysql_mutex_lock(&LOCK_slave_init);
slave_init_thread_running= false;
- mysql_cond_broadcast(&COND_thread_count);
- mysql_mutex_unlock(&LOCK_thread_count);
+ mysql_cond_broadcast(&COND_slave_init);
+ mysql_mutex_unlock(&LOCK_slave_init);
return 0;
}
+/*
+ Start the slave init thread.
+
+ This thread is used to load the GTID state from mysql.gtid_slave_pos at
+ server start; reading from table requires valid THD, which is otherwise not
+ available during server init.
+*/
static int
run_slave_init_thread()
{
@@ -339,10 +349,10 @@ run_slave_init_thread()
return 1;
}
- mysql_mutex_lock(&LOCK_thread_count);
+ mysql_mutex_lock(&LOCK_slave_init);
while (slave_init_thread_running)
- mysql_cond_wait(&COND_thread_count, &LOCK_thread_count);
- mysql_mutex_unlock(&LOCK_thread_count);
+ mysql_cond_wait(&COND_slave_init, &LOCK_slave_init);
+ mysql_mutex_unlock(&LOCK_slave_init);
return 0;
}
@@ -1090,21 +1100,21 @@ static bool sql_slave_killed(rpl_group_info *rgi)
if (ret == 0)
{
- rli->report(WARNING_LEVEL, 0,
+ rli->report(WARNING_LEVEL, 0, rgi->gtid_info(),
"Request to stop slave SQL Thread received while "
"applying a group that has non-transactional "
"changes; waiting for completion of the group ... ");
}
else
{
- rli->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR,
+ rli->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, rgi->gtid_info(),
ER(ER_SLAVE_FATAL_ERROR), msg_stopped);
}
}
else
{
ret= TRUE;
- rli->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR,
+ rli->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, rgi->gtid_info(),
ER(ER_SLAVE_FATAL_ERROR),
msg_stopped);
}
@@ -1522,7 +1532,7 @@ static int get_master_version_and_clock(MYSQL* mysql, Master_info* mi)
goto slave_killed_err;
else if (is_network_error(mysql_errno(mysql)))
{
- mi->report(WARNING_LEVEL, mysql_errno(mysql),
+ mi->report(WARNING_LEVEL, mysql_errno(mysql), NULL,
"Get master clock failed with error: %s", mysql_error(mysql));
goto network_err;
}
@@ -1587,7 +1597,7 @@ not always make sense; please check the manual before using it).";
goto slave_killed_err;
else if (is_network_error(mysql_errno(mysql)))
{
- mi->report(WARNING_LEVEL, mysql_errno(mysql),
+ mi->report(WARNING_LEVEL, mysql_errno(mysql), NULL,
"Get master SERVER_ID failed with error: %s", mysql_error(mysql));
goto network_err;
}
@@ -1600,7 +1610,7 @@ when it try to get the value of SERVER_ID variable from master.";
}
else if (!master_row && master_res)
{
- mi->report(WARNING_LEVEL, ER_UNKNOWN_SYSTEM_VARIABLE,
+ mi->report(WARNING_LEVEL, ER_UNKNOWN_SYSTEM_VARIABLE, NULL,
"Unknown system variable 'SERVER_ID' on master, \
maybe it is a *VERY OLD MASTER*.");
}
@@ -1660,7 +1670,7 @@ be equal for the Statement-format replication to work";
goto slave_killed_err;
else if (is_network_error(mysql_errno(mysql)))
{
- mi->report(WARNING_LEVEL, mysql_errno(mysql),
+ mi->report(WARNING_LEVEL, mysql_errno(mysql), NULL,
"Get master COLLATION_SERVER failed with error: %s", mysql_error(mysql));
goto network_err;
}
@@ -1674,7 +1684,7 @@ when it try to get the value of COLLATION_SERVER global variable from master.";
goto err;
}
else
- mi->report(WARNING_LEVEL, ER_UNKNOWN_SYSTEM_VARIABLE,
+ mi->report(WARNING_LEVEL, ER_UNKNOWN_SYSTEM_VARIABLE, NULL,
"Unknown system variable 'COLLATION_SERVER' on master, \
maybe it is a *VERY OLD MASTER*. *NOTE*: slave may experience \
inconsistency if replicated data deals with collation.");
@@ -1723,7 +1733,7 @@ be equal for the Statement-format replication to work";
goto slave_killed_err;
else if (is_network_error(err_code= mysql_errno(mysql)))
{
- mi->report(ERROR_LEVEL, err_code,
+ mi->report(ERROR_LEVEL, err_code, NULL,
"Get master TIME_ZONE failed with error: %s",
mysql_error(mysql));
goto network_err;
@@ -1731,7 +1741,7 @@ be equal for the Statement-format replication to work";
else if (err_code == ER_UNKNOWN_SYSTEM_VARIABLE)
{
/* We use ERROR_LEVEL to get the error logged to file */
- mi->report(ERROR_LEVEL, err_code,
+ mi->report(ERROR_LEVEL, err_code, NULL,
"MySQL master doesn't have a TIME_ZONE variable. Note that"
"if your timezone is not same between master and slave, your "
@@ -1763,15 +1773,35 @@ when it try to get the value of TIME_ZONE global variable from master.";
llstr((ulonglong) (mi->heartbeat_period*1000000000UL), llbuf);
sprintf(query, query_format, llbuf);
- if (mysql_real_query(mysql, query, strlen(query))
- && !check_io_slave_killed(mi, NULL))
+ DBUG_EXECUTE_IF("simulate_slave_heartbeat_network_error",
+ { static ulong dbug_count= 0;
+ if (++dbug_count < 3)
+ goto heartbeat_network_error;
+ });
+ if (mysql_real_query(mysql, query, strlen(query)))
{
- errmsg= "The slave I/O thread stops because SET @master_heartbeat_period "
- "on master failed.";
- err_code= ER_SLAVE_FATAL_ERROR;
- sprintf(err_buff, "%s Error: %s", errmsg, mysql_error(mysql));
- mysql_free_result(mysql_store_result(mysql));
- goto err;
+ if (check_io_slave_killed(mi, NULL))
+ goto slave_killed_err;
+
+ if (is_network_error(mysql_errno(mysql)))
+ {
+ IF_DBUG(heartbeat_network_error: , )
+ mi->report(WARNING_LEVEL, mysql_errno(mysql), NULL,
+ "SET @master_heartbeat_period to master failed with error: %s",
+ mysql_error(mysql));
+ mysql_free_result(mysql_store_result(mysql));
+ goto network_err;
+ }
+ else
+ {
+ /* Fatal error */
+ errmsg= "The slave I/O thread stops because a fatal error is encountered "
+ "when it tries to SET @master_heartbeat_period on master.";
+ err_code= ER_SLAVE_FATAL_ERROR;
+ sprintf(err_buff, "%s Error: %s", errmsg, mysql_error(mysql));
+ mysql_free_result(mysql_store_result(mysql));
+ goto err;
+ }
}
mysql_free_result(mysql_store_result(mysql));
}
@@ -1808,7 +1838,7 @@ when it try to get the value of TIME_ZONE global variable from master.";
if (global_system_variables.log_warnings > 1)
{
// this is tolerable as OM -> NS is supported
- mi->report(WARNING_LEVEL, mysql_errno(mysql),
+ mi->report(WARNING_LEVEL, mysql_errno(mysql), NULL,
"Notifying master by %s failed with "
"error: %s", query, mysql_error(mysql));
}
@@ -1817,7 +1847,7 @@ when it try to get the value of TIME_ZONE global variable from master.";
{
if (is_network_error(mysql_errno(mysql)))
{
- mi->report(WARNING_LEVEL, mysql_errno(mysql),
+ mi->report(WARNING_LEVEL, mysql_errno(mysql), NULL,
"Notifying master by %s failed with "
"error: %s", query, mysql_error(mysql));
mysql_free_result(mysql_store_result(mysql));
@@ -1853,7 +1883,7 @@ when it try to get the value of TIME_ZONE global variable from master.";
goto slave_killed_err;
else if (is_network_error(mysql_errno(mysql)))
{
- mi->report(WARNING_LEVEL, mysql_errno(mysql),
+ mi->report(WARNING_LEVEL, mysql_errno(mysql), NULL,
"Get master BINLOG_CHECKSUM failed with error: %s", mysql_error(mysql));
goto network_err;
}
@@ -1890,7 +1920,7 @@ past_checksum:
err_code= mysql_errno(mysql);
if (is_network_error(err_code))
{
- mi->report(ERROR_LEVEL, err_code,
+ mi->report(ERROR_LEVEL, err_code, NULL,
"Setting master-side filtering of @@skip_replication failed "
"with error: %s", mysql_error(mysql));
goto network_err;
@@ -1934,7 +1964,7 @@ past_checksum:
err_code= mysql_errno(mysql);
if (is_network_error(err_code))
{
- mi->report(ERROR_LEVEL, err_code,
+ mi->report(ERROR_LEVEL, err_code, NULL,
"Setting @mariadb_slave_capability failed with error: %s",
mysql_error(mysql));
goto network_err;
@@ -2000,7 +2030,7 @@ after_set_capability:
err_code= mysql_errno(mysql);
if (is_network_error(err_code))
{
- mi->report(ERROR_LEVEL, err_code,
+ mi->report(ERROR_LEVEL, err_code, NULL,
"Setting @slave_connect_state failed with error: %s",
mysql_error(mysql));
goto network_err;
@@ -2033,7 +2063,7 @@ after_set_capability:
err_code= mysql_errno(mysql);
if (is_network_error(err_code))
{
- mi->report(ERROR_LEVEL, err_code,
+ mi->report(ERROR_LEVEL, err_code, NULL,
"Setting @slave_gtid_strict_mode failed with error: %s",
mysql_error(mysql));
goto network_err;
@@ -2066,7 +2096,7 @@ after_set_capability:
err_code= mysql_errno(mysql);
if (is_network_error(err_code))
{
- mi->report(ERROR_LEVEL, err_code,
+ mi->report(ERROR_LEVEL, err_code, NULL,
"Setting @slave_gtid_ignore_duplicates failed with "
"error: %s", mysql_error(mysql));
goto network_err;
@@ -2102,7 +2132,7 @@ after_set_capability:
err_code= mysql_errno(mysql);
if (is_network_error(err_code))
{
- mi->report(ERROR_LEVEL, err_code,
+ mi->report(ERROR_LEVEL, err_code, NULL,
"Setting @slave_until_gtid failed with error: %s",
mysql_error(mysql));
goto network_err;
@@ -2150,7 +2180,7 @@ after_set_capability:
goto slave_killed_err;
else if (is_network_error(mysql_errno(mysql)))
{
- mi->report(WARNING_LEVEL, mysql_errno(mysql),
+ mi->report(WARNING_LEVEL, mysql_errno(mysql), NULL,
"Get master GTID position failed with error: %s", mysql_error(mysql));
goto network_err;
}
@@ -2180,7 +2210,7 @@ err:
if (master_res)
mysql_free_result(master_res);
DBUG_ASSERT(err_code != 0);
- mi->report(ERROR_LEVEL, err_code, "%s", err_buff);
+ mi->report(ERROR_LEVEL, err_code, NULL, "%s", err_buff);
DBUG_RETURN(1);
}
@@ -2201,6 +2231,7 @@ slave_killed_err:
static bool wait_for_relay_log_space(Relay_log_info* rli)
{
bool slave_killed=0;
+ bool ignore_log_space_limit;
Master_info* mi = rli->mi;
PSI_stage_info old_stage;
THD* thd = mi->io_thd;
@@ -2216,6 +2247,11 @@ static bool wait_for_relay_log_space(Relay_log_info* rli)
!rli->ignore_log_space_limit)
mysql_cond_wait(&rli->log_space_cond, &rli->log_space_lock);
+ ignore_log_space_limit= rli->ignore_log_space_limit;
+ rli->ignore_log_space_limit= 0;
+
+ thd->EXIT_COND(&old_stage);
+
/*
Makes the IO thread read only one event at a time
until the SQL thread is able to purge the relay
@@ -2239,7 +2275,8 @@ static bool wait_for_relay_log_space(Relay_log_info* rli)
thread sleeps waiting for events.
*/
- if (rli->ignore_log_space_limit)
+
+ if (ignore_log_space_limit)
{
#ifndef DBUG_OFF
{
@@ -2261,11 +2298,8 @@ static bool wait_for_relay_log_space(Relay_log_info* rli)
mysql_mutex_unlock(&mi->data_lock);
rli->sql_force_rotate_relay= false;
}
-
- rli->ignore_log_space_limit= false;
}
- thd->EXIT_COND(&old_stage);
DBUG_RETURN(slave_killed);
}
@@ -2302,7 +2336,7 @@ static void write_ignored_events_info_to_relay_log(THD *thd, Master_info *mi)
Rotate_log_event::DUP_NAME);
rli->ign_master_log_name_end[0]= 0;
if (unlikely(!(bool)rev))
- mi->report(ERROR_LEVEL, ER_SLAVE_CREATE_EVENT_FAILURE,
+ mi->report(ERROR_LEVEL, ER_SLAVE_CREATE_EVENT_FAILURE, NULL,
ER(ER_SLAVE_CREATE_EVENT_FAILURE),
"Rotate_event (out of memory?),"
" SHOW SLAVE STATUS may be inaccurate");
@@ -2313,7 +2347,7 @@ static void write_ignored_events_info_to_relay_log(THD *thd, Master_info *mi)
Gtid_list_log_event::FLAG_IGN_GTIDS);
rli->ign_gtids.reset();
if (unlikely(!(bool)glev))
- mi->report(ERROR_LEVEL, ER_SLAVE_CREATE_EVENT_FAILURE,
+ mi->report(ERROR_LEVEL, ER_SLAVE_CREATE_EVENT_FAILURE, NULL,
ER(ER_SLAVE_CREATE_EVENT_FAILURE),
"Gtid_list_event (out of memory?),"
" gtid_slave_pos may be inaccurate");
@@ -2326,7 +2360,7 @@ static void write_ignored_events_info_to_relay_log(THD *thd, Master_info *mi)
DBUG_PRINT("info",("writing a Rotate event to track down ignored events"));
rev->server_id= 0; // don't be ignored by slave SQL thread
if (unlikely(rli->relay_log.append(rev)))
- mi->report(ERROR_LEVEL, ER_SLAVE_RELAY_LOG_WRITE_FAILURE,
+ mi->report(ERROR_LEVEL, ER_SLAVE_RELAY_LOG_WRITE_FAILURE, NULL,
ER(ER_SLAVE_RELAY_LOG_WRITE_FAILURE),
"failed to write a Rotate event"
" to the relay log, SHOW SLAVE STATUS may be"
@@ -2339,7 +2373,7 @@ static void write_ignored_events_info_to_relay_log(THD *thd, Master_info *mi)
glev->server_id= 0; // don't be ignored by slave SQL thread
glev->set_artificial_event(); // Don't mess up Exec_Master_Log_Pos
if (unlikely(rli->relay_log.append(glev)))
- mi->report(ERROR_LEVEL, ER_SLAVE_RELAY_LOG_WRITE_FAILURE,
+ mi->report(ERROR_LEVEL, ER_SLAVE_RELAY_LOG_WRITE_FAILURE, NULL,
ER(ER_SLAVE_RELAY_LOG_WRITE_FAILURE),
"failed to write a Gtid_list event to the relay log, "
"gtid_slave_pos may be inaccurate");
@@ -2424,7 +2458,7 @@ int register_slave_on_master(MYSQL* mysql, Master_info *mi,
char buf[256];
my_snprintf(buf, sizeof(buf), "%s (Errno: %d)", mysql_error(mysql),
mysql_errno(mysql));
- mi->report(ERROR_LEVEL, ER_SLAVE_MASTER_COM_FAILURE,
+ mi->report(ERROR_LEVEL, ER_SLAVE_MASTER_COM_FAILURE, NULL,
ER(ER_SLAVE_MASTER_COM_FAILURE), "COM_REGISTER_SLAVE", buf);
}
DBUG_RETURN(1);
@@ -2829,7 +2863,8 @@ bool show_all_master_info(THD* thd)
if (send_show_master_info_header(thd, 1, gtid_pos.length()))
DBUG_RETURN(TRUE);
- if (!(elements= master_info_index->master_info_hash.records))
+ if (!master_info_index ||
+ !(elements= master_info_index->master_info_hash.records))
goto end;
/*
@@ -3093,7 +3128,8 @@ static ulong read_event(MYSQL* mysql, Master_info *mi, bool* suppress_warnings)
Some errors are temporary in nature, such as
ER_LOCK_DEADLOCK and ER_LOCK_WAIT_TIMEOUT.
*/
-static int has_temporary_error(THD *thd)
+int
+has_temporary_error(THD *thd)
{
DBUG_ENTER("has_temporary_error");
@@ -3274,7 +3310,7 @@ int apply_event_and_update_pos(Log_event* ev, THD* thd,
if (error)
{
char buf[22];
- rli->report(ERROR_LEVEL, ER_UNKNOWN_ERROR,
+ rli->report(ERROR_LEVEL, ER_UNKNOWN_ERROR, rgi->gtid_info(),
"It was not possible to update the positions"
" of the relay log information: the slave may"
" be in an inconsistent state."
@@ -3290,7 +3326,7 @@ int apply_event_and_update_pos(Log_event* ev, THD* thd,
Make sure we do not errorneously update gtid_slave_pos with a lingering
GTID from this failed event group (MDEV-4906).
*/
- rgi->gtid_sub_id= 0;
+ rgi->gtid_pending= false;
}
DBUG_RETURN(exec_res ? 1 : 0);
@@ -3501,9 +3537,6 @@ static int exec_relay_log_event(THD* thd, Relay_log_info* rli,
if (opt_gtid_ignore_duplicates)
{
- serial_rgi->current_gtid.domain_id= gev->domain_id;
- serial_rgi->current_gtid.server_id= gev->server_id;
- serial_rgi->current_gtid.seq_no= gev->seq_no;
int res= rpl_global_gtid_slave_state.check_duplicate_gtid
(&serial_rgi->current_gtid, serial_rgi);
if (res < 0)
@@ -3616,7 +3649,7 @@ static int exec_relay_log_event(THD* thd, Relay_log_info* rli,
DBUG_RETURN(exec_res);
}
mysql_mutex_unlock(&rli->data_lock);
- rli->report(ERROR_LEVEL, ER_SLAVE_RELAY_LOG_READ_FAILURE,
+ rli->report(ERROR_LEVEL, ER_SLAVE_RELAY_LOG_READ_FAILURE, NULL,
ER(ER_SLAVE_RELAY_LOG_READ_FAILURE), "\
Could not parse relay log event entry. The possible reasons are: the master's \
binary log is corrupted (you can check this by running 'mysqlbinlog' on the \
@@ -3711,7 +3744,7 @@ static int try_to_reconnect(THD *thd, MYSQL *mysql, Master_info *mi,
*/
if (messages[SLAVE_RECON_MSG_COMMAND][0])
{
- mi->report(WARNING_LEVEL, ER_SLAVE_MASTER_COM_FAILURE,
+ mi->report(WARNING_LEVEL, ER_SLAVE_MASTER_COM_FAILURE, NULL,
ER(ER_SLAVE_MASTER_COM_FAILURE),
messages[SLAVE_RECON_MSG_COMMAND], buf);
}
@@ -3801,7 +3834,7 @@ pthread_handler_t handle_slave_io(void *arg)
/* Load the set of seen GTIDs, if we did not already. */
if (rpl_load_gtid_slave_state(thd))
{
- mi->report(ERROR_LEVEL, thd->get_stmt_da()->sql_errno(),
+ mi->report(ERROR_LEVEL, thd->get_stmt_da()->sql_errno(), NULL,
"Unable to load replication GTID slave state from mysql.%s: %s",
rpl_gtid_slave_state_table_name.str,
thd->get_stmt_da()->message());
@@ -3817,14 +3850,14 @@ pthread_handler_t handle_slave_io(void *arg)
if (RUN_HOOK(binlog_relay_io, thread_start, (thd, mi)))
{
- mi->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR,
+ mi->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, NULL,
ER(ER_SLAVE_FATAL_ERROR), "Failed to run 'thread_start' hook");
goto err;
}
if (!(mi->mysql = mysql = mysql_init(NULL)))
{
- mi->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR,
+ mi->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, NULL,
ER(ER_SLAVE_FATAL_ERROR), "error in mysql_init()");
goto err;
}
@@ -4006,18 +4039,18 @@ Log entry on master is longer than slave_max_allowed_packet (%lu) on \
slave. If the entry is correct, restart the server with a higher value of \
slave_max_allowed_packet",
slave_max_allowed_packet);
- mi->report(ERROR_LEVEL, ER_NET_PACKET_TOO_LARGE,
+ mi->report(ERROR_LEVEL, ER_NET_PACKET_TOO_LARGE, NULL,
"%s", "Got a packet bigger than 'slave_max_allowed_packet' bytes");
goto err;
case ER_MASTER_FATAL_ERROR_READING_BINLOG:
- mi->report(ERROR_LEVEL, ER_MASTER_FATAL_ERROR_READING_BINLOG,
+ mi->report(ERROR_LEVEL, ER_MASTER_FATAL_ERROR_READING_BINLOG, NULL,
ER(ER_MASTER_FATAL_ERROR_READING_BINLOG),
mysql_error_number, mysql_error(mysql));
goto err;
case ER_OUT_OF_RESOURCES:
sql_print_error("\
Stopping slave I/O thread due to out-of-memory error from master");
- mi->report(ERROR_LEVEL, ER_OUT_OF_RESOURCES,
+ mi->report(ERROR_LEVEL, ER_OUT_OF_RESOURCES, NULL,
"%s", ER(ER_OUT_OF_RESOURCES));
goto err;
}
@@ -4034,7 +4067,7 @@ Stopping slave I/O thread due to out-of-memory error from master");
(thd, mi,(const char*)mysql->net.read_pos + 1,
event_len, &event_buf, &event_len)))
{
- mi->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR,
+ mi->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, NULL,
ER(ER_SLAVE_FATAL_ERROR),
"Failed to run 'after_read_event' hook");
goto err;
@@ -4045,7 +4078,7 @@ Stopping slave I/O thread due to out-of-memory error from master");
bool synced= 0;
if (queue_event(mi, event_buf, event_len))
{
- mi->report(ERROR_LEVEL, ER_SLAVE_RELAY_LOG_WRITE_FAILURE,
+ mi->report(ERROR_LEVEL, ER_SLAVE_RELAY_LOG_WRITE_FAILURE, NULL,
ER(ER_SLAVE_RELAY_LOG_WRITE_FAILURE),
"could not queue event from master");
goto err;
@@ -4054,7 +4087,7 @@ Stopping slave I/O thread due to out-of-memory error from master");
if (RUN_HOOK(binlog_relay_io, after_queue_event,
(thd, mi, event_buf, event_len, synced)))
{
- mi->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR,
+ mi->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, NULL,
ER(ER_SLAVE_FATAL_ERROR),
"Failed to run 'after_queue_event' hook");
goto err;
@@ -4151,9 +4184,10 @@ err_during_init:
// TODO: make rpl_status part of Master_info
change_rpl_status(RPL_ACTIVE_SLAVE,RPL_IDLE_SLAVE);
mysql_mutex_lock(&LOCK_thread_count);
+ thd->unlink();
+ mysql_mutex_unlock(&LOCK_thread_count);
THD_CHECK_SENTRY(thd);
delete thd;
- mysql_mutex_unlock(&LOCK_thread_count);
mi->abort_slave= 0;
mi->slave_running= MYSQL_SLAVE_NOT_RUN;
mi->io_thd= 0;
@@ -4242,13 +4276,14 @@ end:
void
-slave_output_error_info(Relay_log_info *rli, THD *thd)
+slave_output_error_info(rpl_group_info *rgi, THD *thd)
{
/*
retrieve as much info as possible from the thd and, error
codes and warnings and print this to the error log as to
allow the user to locate the error
*/
+ Relay_log_info *rli= rgi->rli;
uint32 const last_errno= rli->last_error().number;
char llbuff[22];
@@ -4265,7 +4300,8 @@ slave_output_error_info(Relay_log_info *rli, THD *thd)
This function is reporting an error which was not reported
while executing exec_relay_log_event().
*/
- rli->report(ERROR_LEVEL, thd->get_stmt_da()->sql_errno(), "%s", errmsg);
+ rli->report(ERROR_LEVEL, thd->get_stmt_da()->sql_errno(),
+ rgi->gtid_info(), "%s", errmsg);
}
else if (last_errno != thd->get_stmt_da()->sql_errno())
{
@@ -4344,6 +4380,7 @@ pthread_handler_t handle_slave_sql(void *arg)
char saved_master_log_name[FN_REFLEN];
my_off_t UNINIT_VAR(saved_log_pos);
my_off_t UNINIT_VAR(saved_master_log_pos);
+ String saved_skip_gtid_pos;
my_off_t saved_skip= 0;
Master_info *mi= ((Master_info*)arg);
Relay_log_info* rli = &mi->rli;
@@ -4394,7 +4431,7 @@ pthread_handler_t handle_slave_sql(void *arg)
will be stuck if we fail here
*/
mysql_cond_broadcast(&rli->start_cond);
- rli->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR,
+ rli->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, NULL,
"Failed during slave thread initialization");
goto err_during_init;
}
@@ -4446,16 +4483,20 @@ pthread_handler_t handle_slave_sql(void *arg)
mysql_mutex_unlock(&rli->log_space_lock);
serial_rgi->gtid_sub_id= 0;
+ serial_rgi->gtid_pending= false;
if (init_relay_log_pos(rli,
rli->group_relay_log_name,
rli->group_relay_log_pos,
1 /*need data lock*/, &errmsg,
1 /*look for a description_event*/))
{
- rli->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR,
+ rli->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, NULL,
"Error initializing relay log position: %s", errmsg);
goto err;
}
+ if (rli->alloc_inuse_relaylog(rli->group_relay_log_name))
+ goto err;
+
strcpy(rli->future_event_master_log_name, rli->group_master_log_name);
THD_CHECK_SENTRY(thd);
#ifndef DBUG_OFF
@@ -4510,7 +4551,7 @@ log '%s' at position %s, relay log '%s' position: %s%s", RPL_LOG_NAME,
if (check_temp_dir(rli->slave_patternload_file))
{
- rli->report(ERROR_LEVEL, thd->get_stmt_da()->sql_errno(),
+ rli->report(ERROR_LEVEL, thd->get_stmt_da()->sql_errno(), NULL,
"Unable to use slave's temporary directory %s - %s",
slave_load_tmpdir, thd->get_stmt_da()->message());
goto err;
@@ -4519,7 +4560,7 @@ log '%s' at position %s, relay log '%s' position: %s%s", RPL_LOG_NAME,
/* Load the set of seen GTIDs, if we did not already. */
if (rpl_load_gtid_slave_state(thd))
{
- rli->report(ERROR_LEVEL, thd->get_stmt_da()->sql_errno(),
+ rli->report(ERROR_LEVEL, thd->get_stmt_da()->sql_errno(), NULL,
"Unable to load replication GTID slave state from mysql.%s: %s",
rpl_gtid_slave_state_table_name.str,
thd->get_stmt_da()->message());
@@ -4538,7 +4579,7 @@ log '%s' at position %s, relay log '%s' position: %s%s", RPL_LOG_NAME,
execute_init_command(thd, &opt_init_slave, &LOCK_sys_init_slave);
if (thd->is_slave_error)
{
- rli->report(ERROR_LEVEL, thd->get_stmt_da()->sql_errno(),
+ rli->report(ERROR_LEVEL, thd->get_stmt_da()->sql_errno(), NULL,
"Slave SQL thread aborted. Can't execute init_slave query");
goto err;
}
@@ -4555,6 +4596,12 @@ log '%s' at position %s, relay log '%s' position: %s%s", RPL_LOG_NAME,
strmake_buf(saved_master_log_name, rli->group_master_log_name);
saved_log_pos= rli->group_relay_log_pos;
saved_master_log_pos= rli->group_master_log_pos;
+ if (mi->using_gtid != Master_info::USE_GTID_NO)
+ {
+ saved_skip_gtid_pos.append(STRING_WITH_LEN(", GTID '"));
+ rpl_append_gtid_state(&saved_skip_gtid_pos, false);
+ saved_skip_gtid_pos.append(STRING_WITH_LEN("'; "));
+ }
saved_skip= rli->slave_skip_counter;
}
if ((rli->until_condition == Relay_log_info::UNTIL_MASTER_POS ||
@@ -4578,16 +4625,27 @@ log '%s' at position %s, relay log '%s' position: %s%s", RPL_LOG_NAME,
if (saved_skip && rli->slave_skip_counter == 0)
{
+ String tmp;
+ if (mi->using_gtid != Master_info::USE_GTID_NO)
+ {
+ tmp.append(STRING_WITH_LEN(", GTID '"));
+ rpl_append_gtid_state(&tmp, false);
+ tmp.append(STRING_WITH_LEN("'; "));
+ }
+
sql_print_information("'SQL_SLAVE_SKIP_COUNTER=%ld' executed at "
"relay_log_file='%s', relay_log_pos='%ld', master_log_name='%s', "
- "master_log_pos='%ld' and new position at "
+ "master_log_pos='%ld'%s and new position at "
"relay_log_file='%s', relay_log_pos='%ld', master_log_name='%s', "
- "master_log_pos='%ld' ",
+ "master_log_pos='%ld'%s ",
(ulong) saved_skip, saved_log_name, (ulong) saved_log_pos,
saved_master_log_name, (ulong) saved_master_log_pos,
+ saved_skip_gtid_pos.c_ptr_safe(),
rli->group_relay_log_name, (ulong) rli->group_relay_log_pos,
- rli->group_master_log_name, (ulong) rli->group_master_log_pos);
+ rli->group_master_log_name, (ulong) rli->group_master_log_pos,
+ tmp.c_ptr_safe());
saved_skip= 0;
+ saved_skip_gtid_pos.free();
}
if (exec_relay_log_event(thd, rli, serial_rgi))
@@ -4596,7 +4654,7 @@ log '%s' at position %s, relay log '%s' position: %s%s", RPL_LOG_NAME,
// do not scare the user if SQL thread was simply killed or stopped
if (!sql_slave_killed(serial_rgi))
{
- slave_output_error_info(rli, thd);
+ slave_output_error_info(serial_rgi, thd);
if (WSREP_ON && rli->last_error().number == ER_UNKNOWN_COM_ERROR)
{
wsrep_node_dropped= TRUE;
@@ -4791,7 +4849,7 @@ static int process_io_create_file(Master_info* mi, Create_file_log_event* cev)
xev.log_pos = cev->log_pos;
if (unlikely(mi->rli.relay_log.append(&xev)))
{
- mi->report(ERROR_LEVEL, ER_SLAVE_RELAY_LOG_WRITE_FAILURE,
+ mi->report(ERROR_LEVEL, ER_SLAVE_RELAY_LOG_WRITE_FAILURE, NULL,
ER(ER_SLAVE_RELAY_LOG_WRITE_FAILURE),
"error writing Exec_load event to relay log");
goto err;
@@ -4805,7 +4863,7 @@ static int process_io_create_file(Master_info* mi, Create_file_log_event* cev)
cev->block_len = num_bytes;
if (unlikely(mi->rli.relay_log.append(cev)))
{
- mi->report(ERROR_LEVEL, ER_SLAVE_RELAY_LOG_WRITE_FAILURE,
+ mi->report(ERROR_LEVEL, ER_SLAVE_RELAY_LOG_WRITE_FAILURE, NULL,
ER(ER_SLAVE_RELAY_LOG_WRITE_FAILURE),
"error writing Create_file event to relay log");
goto err;
@@ -4820,7 +4878,7 @@ static int process_io_create_file(Master_info* mi, Create_file_log_event* cev)
aev.log_pos = cev->log_pos;
if (unlikely(mi->rli.relay_log.append(&aev)))
{
- mi->report(ERROR_LEVEL, ER_SLAVE_RELAY_LOG_WRITE_FAILURE,
+ mi->report(ERROR_LEVEL, ER_SLAVE_RELAY_LOG_WRITE_FAILURE, NULL,
ER(ER_SLAVE_RELAY_LOG_WRITE_FAILURE),
"error writing Append_block event to relay log");
goto err;
@@ -4927,7 +4985,7 @@ static int queue_binlog_ver_1_event(Master_info *mi, const char *buf,
{
if (unlikely(!(tmp_buf=(char*)my_malloc(event_len+1,MYF(MY_WME)))))
{
- mi->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR,
+ mi->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, NULL,
ER(ER_SLAVE_FATAL_ERROR), "Memory allocation failed");
DBUG_RETURN(1);
}
@@ -5225,6 +5283,86 @@ static int queue_event(Master_info* mi,const char* buf, ulong event_len)
event_len - BINLOG_CHECKSUM_LEN : event_len,
mi->rli.relay_log.description_event_for_queue);
+ if (unlikely(mi->gtid_reconnect_event_skip_count) &&
+ unlikely(!mi->gtid_event_seen) &&
+ rev.is_artificial_event() &&
+ (mi->prev_master_id != mi->master_id ||
+ strcmp(rev.new_log_ident, mi->master_log_name) != 0))
+ {
+ /*
+ Artificial Rotate_log_event is the first event we receive at the start
+ of each master binlog file. It gives the name of the new binlog file.
+
+ Normally, we already have this name from the real rotate event at the
+ end of the previous binlog file (unless we are making a new connection
+ using GTID). But if the master server restarted/crashed, there is no
+ rotate event at the end of the prior binlog file, so the name is new.
+
+ We use this fact to handle a special case of master crashing. If the
+ master crashed while writing the binlog, it might end with a partial
+ event group lacking the COMMIT/XID event, which must be rolled
+ back. If the slave IO thread happens to get a disconnect in the middle
+ of exactly this event group, it will try to reconnect at the same GTID
+ and skip already fetched events. However, that GTID did not commit on
+ the master before the crash, so it does not really exist, and the
+ master will connect the slave at the next following GTID starting in
+ the next binlog. This could confuse the slave and make it mix the
+ start of one event group with the end of another.
+
+ But we detect this case here, by noticing the change of binlog name
+ which detects the missing rotate event at the end of the previous
+ binlog file. In this case, we reset the counters to make us not skip
+ the next event group, and queue an artificial Format Description
+ event. The previously fetched incomplete event group will then be
+ rolled back when the Format Description event is executed by the SQL
+ thread.
+
+ A similar case is if the reconnect somehow connects to a different
+ master server (like due to a network proxy or IP address takeover).
+ We detect this case by noticing a change of server_id and in this
+ case likewise rollback the partially received event group.
+ */
+ Format_description_log_event fdle(4);
+
+ if (mi->prev_master_id != mi->master_id)
+ sql_print_warning("The server_id of master server changed in the "
+ "middle of GTID %u-%u-%llu. Assuming a change of "
+ "master server, so rolling back the previously "
+ "received partial transaction. Expected: %lu, "
+ "received: %lu", mi->last_queued_gtid.domain_id,
+ mi->last_queued_gtid.server_id,
+ mi->last_queued_gtid.seq_no,
+ mi->prev_master_id, mi->master_id);
+ else if (strcmp(rev.new_log_ident, mi->master_log_name) != 0)
+ sql_print_warning("Unexpected change of master binlog file name in the "
+ "middle of GTID %u-%u-%llu, assuming that master has "
+ "crashed and rolling back the transaction. Expected: "
+ "'%s', received: '%s'",
+ mi->last_queued_gtid.domain_id,
+ mi->last_queued_gtid.server_id,
+ mi->last_queued_gtid.seq_no,
+ mi->master_log_name, rev.new_log_ident);
+
+ mysql_mutex_lock(log_lock);
+ if (likely(!fdle.write(rli->relay_log.get_log_file()) &&
+ !rli->relay_log.flush_and_sync(NULL)))
+ {
+ rli->relay_log.harvest_bytes_written(&rli->log_space_total);
+ }
+ else
+ {
+ error= ER_SLAVE_RELAY_LOG_WRITE_FAILURE;
+ mysql_mutex_unlock(log_lock);
+ goto err;
+ }
+ rli->relay_log.signal_update();
+ mysql_mutex_unlock(log_lock);
+
+ mi->gtid_reconnect_event_skip_count= 0;
+ mi->events_queued_since_last_gtid= 0;
+ }
+ mi->prev_master_id= mi->master_id;
+
if (unlikely(process_io_rotate(mi, &rev)))
{
error= ER_SLAVE_RELAY_LOG_WRITE_FAILURE;
@@ -5710,7 +5848,7 @@ err:
mysql_mutex_unlock(&mi->data_lock);
DBUG_PRINT("info", ("error: %d", error));
if (error)
- mi->report(ERROR_LEVEL, error, ER(error),
+ mi->report(ERROR_LEVEL, error, NULL, ER(error),
(error == ER_SLAVE_RELAY_LOG_WRITE_FAILURE)?
"could not queue event from master" :
error_msg.ptr());
@@ -5817,7 +5955,7 @@ static int connect_to_master(THD* thd, MYSQL* mysql, Master_info* mi,
#ifndef DBUG_OFF
mi->events_till_disconnect = disconnect_slave_event_count;
#endif
- ulong client_flag= CLIENT_REMEMBER_OPTIONS;
+ ulong client_flag= 0;
if (opt_slave_compressed_protocol)
client_flag=CLIENT_COMPRESS; /* We will use compression */
@@ -5855,7 +5993,7 @@ static int connect_to_master(THD* thd, MYSQL* mysql, Master_info* mi,
/* we disallow empty users */
if (mi->user == NULL || mi->user[0] == 0)
{
- mi->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR,
+ mi->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, NULL,
ER(ER_SLAVE_FATAL_ERROR),
"Invalid (empty) username when attempting to "
"connect to the master server. Connection attempt "
@@ -5872,7 +6010,7 @@ static int connect_to_master(THD* thd, MYSQL* mysql, Master_info* mi,
{
last_errno=mysql_errno(mysql);
suppress_warnings= 0;
- mi->report(ERROR_LEVEL, last_errno,
+ mi->report(ERROR_LEVEL, last_errno, NULL,
"error %s to master '%s@%s:%d'"
" - retry-time: %d retries: %lu message: %s",
(reconnect ? "reconnecting" : "connecting"),
@@ -6404,6 +6542,7 @@ static Log_event* next_event(rpl_group_info *rgi, ulonglong *event_size)
DBUG_ASSERT(rli->cur_log_fd >= 0);
mysql_file_close(rli->cur_log_fd, MYF(MY_WME));
rli->cur_log_fd = -1;
+ rli->last_inuse_relaylog->completed= true;
if (relay_log_purge)
{
@@ -6532,6 +6671,12 @@ static Log_event* next_event(rpl_group_info *rgi, ulonglong *event_size)
mysql_mutex_unlock(log_lock);
goto err;
}
+ if (rli->alloc_inuse_relaylog(rli->linfo.log_file_name))
+ {
+ if (!hot_log)
+ mysql_mutex_unlock(log_lock);
+ goto err;
+ }
if (!hot_log)
mysql_mutex_unlock(log_lock);
continue;
@@ -6547,6 +6692,8 @@ static Log_event* next_event(rpl_group_info *rgi, ulonglong *event_size)
if ((rli->cur_log_fd=open_binlog(cur_log,rli->linfo.log_file_name,
&errmsg)) <0)
goto err;
+ if (rli->alloc_inuse_relaylog(rli->linfo.log_file_name))
+ goto err;
}
else
{
@@ -6685,7 +6832,7 @@ bool rpl_master_has_bug(const Relay_log_info *rli, uint bug_id, bool report,
" so slave stops; check error log on slave"
" for more info", MYF(0), bug_id);
// a verbose message for the error log
- rli->report(ERROR_LEVEL, ER_UNKNOWN_ERROR,
+ rli->report(ERROR_LEVEL, ER_UNKNOWN_ERROR, NULL,
"According to the master's version ('%s'),"
" it is probable that master suffers from this bug:"
" http://bugs.mysql.com/bug.php?id=%u"
diff --git a/sql/slave.h b/sql/slave.h
index aa3976f6e6c..e65b4a589a1 100644
--- a/sql/slave.h
+++ b/sql/slave.h
@@ -229,35 +229,30 @@ int purge_relay_logs(Relay_log_info* rli, THD *thd, bool just_reset,
void set_slave_thread_options(THD* thd);
void set_slave_thread_default_charset(THD *thd, rpl_group_info *rgi);
int rotate_relay_log(Master_info* mi);
+int has_temporary_error(THD *thd);
int apply_event_and_update_pos(Log_event* ev, THD* thd,
struct rpl_group_info *rgi,
rpl_parallel_thread *rpt);
pthread_handler_t handle_slave_io(void *arg);
-void slave_output_error_info(Relay_log_info *rli, THD *thd);
+void slave_output_error_info(rpl_group_info *rgi, THD *thd);
pthread_handler_t handle_slave_sql(void *arg);
bool net_request_file(NET* net, const char* fname);
extern bool volatile abort_loop;
-extern Master_info main_mi, *active_mi; /* active_mi for multi-master */
+extern Master_info *active_mi; /* active_mi for multi-master */
extern Master_info *default_master_info; /* To replace active_mi */
extern Master_info_index *master_info_index;
extern LEX_STRING default_master_connection_name;
-extern LIST master_list;
extern my_bool replicate_same_server_id;
extern int disconnect_slave_event_count, abort_slave_event_count ;
/* the master variables are defaults read from my.cnf or command line */
-extern uint master_port, master_connect_retry, report_port;
-extern char * master_user, *master_password, *master_host;
+extern uint report_port;
extern char *master_info_file, *report_user;
extern char *report_host, *report_password;
-extern my_bool master_ssl;
-extern char *master_ssl_ca, *master_ssl_capath, *master_ssl_cert;
-extern char *master_ssl_cipher, *master_ssl_key;
-
extern I_List<THD> threads;
#else
diff --git a/sql/sp_head.cc b/sql/sp_head.cc
index f8320e830a5..296135c93e7 100644
--- a/sql/sp_head.cc
+++ b/sql/sp_head.cc
@@ -43,6 +43,7 @@
#include "sql_base.h" // close_thread_tables
#include "transaction.h" // trans_commit_stmt
#include "sql_audit.h"
+#include "debug_sync.h"
/*
Sufficient max length of printed destinations and frame offsets (all uints).
@@ -1123,6 +1124,8 @@ sp_head::execute(THD *thd, bool merge_da_on_success)
Item_change_list old_change_list;
String old_packet;
uint old_server_status;
+ const uint status_backup_mask= SERVER_STATUS_CURSOR_EXISTS |
+ SERVER_STATUS_LAST_ROW_SENT;
Reprepare_observer *save_reprepare_observer= thd->m_reprepare_observer;
Object_creation_ctx *saved_creation_ctx;
Diagnostics_area *da= thd->get_stmt_da();
@@ -1257,7 +1260,7 @@ sp_head::execute(THD *thd, bool merge_da_on_success)
It is probably safe to use same thd->convert_buff everywhere.
*/
old_packet.swap(thd->packet);
- old_server_status= thd->server_status;
+ old_server_status= thd->server_status & status_backup_mask;
/*
Switch to per-instruction arena here. We can do it since we cleanup
@@ -1275,6 +1278,7 @@ sp_head::execute(THD *thd, bool merge_da_on_success)
/* Discard the initial part of executing routines. */
thd->profiling.discard_current_query();
#endif
+ DEBUG_SYNC(thd, "sp_head_execute_before_loop");
do
{
sp_instr *i;
@@ -1379,7 +1383,7 @@ sp_head::execute(THD *thd, bool merge_da_on_success)
thd->spcont->pop_all_cursors(); // To avoid memory leaks after an error
/* Restore all saved */
- thd->server_status= old_server_status;
+ thd->server_status= (thd->server_status & ~status_backup_mask) | old_server_status;
old_packet.swap(thd->packet);
DBUG_ASSERT(thd->change_list.is_empty());
old_change_list.move_elements_to(&thd->change_list);
@@ -1852,9 +1856,7 @@ sp_head::execute_function(THD *thd, Item **argp, uint argcount,
as one select and not resetting THD::user_var_events before
each invocation.
*/
- mysql_mutex_lock(&LOCK_thread_count);
- q= global_query_id;
- mysql_mutex_unlock(&LOCK_thread_count);
+ q= get_query_id();
mysql_bin_log.start_union_events(thd, q + 1);
binlog_save_options= thd->variables.option_bits;
thd->variables.option_bits&= ~OPTION_BIN_LOG;
@@ -2290,6 +2292,11 @@ sp_head::restore_lex(THD *thd)
*/
if (sp_update_sp_used_routines(&m_sroutines, &sublex->sroutines))
DBUG_RETURN(TRUE);
+
+ /* If this substatement is a update query, then mark MODIFIES_DATA */
+ if (is_update_query(sublex->sql_command))
+ m_flags|= MODIFIES_DATA;
+
/*
Merge tables used by this statement (but not by its functions or
procedures) to multiset of tables used by this routine.
@@ -3109,7 +3116,10 @@ sp_instr_stmt::execute(THD *thd, uint *nextp)
thd->query_name_consts= 0;
if (!thd->is_error())
+ {
+ res= 0;
thd->get_stmt_da()->reset_diagnostics_area();
+ }
}
DBUG_RETURN(res || thd->is_error());
}
diff --git a/sql/sp_head.h b/sql/sp_head.h
index cc598186d08..dbdb957aa79 100644
--- a/sql/sp_head.h
+++ b/sql/sp_head.h
@@ -161,7 +161,21 @@ public:
LOG_SLOW_STATEMENTS= 256, // Used by events
LOG_GENERAL_LOG= 512, // Used by events
HAS_SQLCOM_RESET= 1024,
- HAS_SQLCOM_FLUSH= 2048
+ HAS_SQLCOM_FLUSH= 2048,
+
+ /**
+ Marks routines that directly (i.e. not by calling other routines)
+ change tables. Note that this flag is set automatically based on
+ type of statements used in the stored routine and is different
+ from routine characteristic provided by user in a form of CONTAINS
+ SQL, READS SQL DATA, MODIFIES SQL DATA clauses. The latter are
+ accepted by parser but pretty much ignored after that.
+ We don't rely on them:
+ a) for compatibility reasons.
+ b) because in CONTAINS SQL case they don't provide enough
+ information anyway.
+ */
+ MODIFIES_DATA= 4096
};
stored_procedure_type m_type;
@@ -332,11 +346,17 @@ public:
int
add_instr(sp_instr *instr);
- inline uint
- instructions()
- {
- return m_instr.elements;
- }
+ /**
+ Returns true if any substatement in the routine directly
+ (not through another routine) modifies data/changes table.
+
+ @sa Comment for MODIFIES_DATA flag.
+ */
+ bool modifies_data() const
+ { return m_flags & MODIFIES_DATA; }
+
+ inline uint instructions()
+ { return m_instr.elements; }
inline sp_instr *
last_instruction()
diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc
index 3d3c0bc835a..c6f23b3f1a3 100644
--- a/sql/sql_acl.cc
+++ b/sql/sql_acl.cc
@@ -297,7 +297,7 @@ public:
bool eq(const char *user2, const char *host2) { return !cmp(user2, host2); }
- bool wild_eq(const char *user2, const char *host2, const char *ip2 = 0)
+ bool wild_eq(const char *user2, const char *host2, const char *ip2)
{
if (strcmp(safe_str(user.str), safe_str(user2)))
return false;
@@ -1886,8 +1886,8 @@ bool acl_getroot(Security_context *sctx, char *user, char *host,
DBUG_RETURN(res);
}
-int check_user_can_set_role(const char *host, const char *user,
- const char *rolename, ulonglong *access)
+static int check_user_can_set_role(const char *user, const char *host,
+ const char *ip, const char *rolename, ulonglong *access)
{
ACL_ROLE *role;
ACL_USER_BASE *acl_user_base;
@@ -1930,7 +1930,7 @@ int check_user_can_set_role(const char *host, const char *user,
continue;
acl_user= (ACL_USER *)acl_user_base;
- if (acl_user->wild_eq(user, host))
+ if (acl_user->wild_eq(user, host, ip))
{
is_granted= TRUE;
break;
@@ -1958,9 +1958,8 @@ end:
int acl_check_setrole(THD *thd, char *rolename, ulonglong *access)
{
/* Yes! priv_user@host. Don't ask why - that's what check_access() does. */
- return check_user_can_set_role(thd->security_ctx->host,
- thd->security_ctx->priv_user,
- rolename, access);
+ return check_user_can_set_role(thd->security_ctx->priv_user,
+ thd->security_ctx->host, thd->security_ctx->ip, rolename, access);
}
@@ -2776,7 +2775,7 @@ int acl_set_default_role(THD *thd, const char *host, const char *user,
rolename= thd->security_ctx->priv_role;
}
- if (check_user_can_set_role(host, user, rolename, NULL))
+ if (check_user_can_set_role(user, host, host, rolename, NULL))
DBUG_RETURN(result);
if (!strcasecmp(rolename, "NONE"))
@@ -7665,7 +7664,7 @@ bool mysql_show_grants(THD *thd, LEX_USER *lex_user)
}
DBUG_ASSERT(rolename || username);
- Item_string *field=new Item_string("",0,&my_charset_latin1);
+ Item_string *field=new Item_string_ascii("", 0);
List<Item> field_list;
field->name=buff;
field->max_length=1024;
@@ -8944,6 +8943,7 @@ static int handle_grant_struct(enum enum_acl_lists struct_no, bool drop,
acl_user->user.str= strdup_root(&acl_memroot, user_to->user.str);
acl_user->user.length= user_to->user.length;
acl_user->host.hostname= strdup_root(&acl_memroot, user_to->host.str);
+ acl_user->hostname_length= user_to->host.length;
break;
case DB_ACL:
@@ -12561,7 +12561,7 @@ maria_declare_plugin(mysql_password)
NULL, /* status variables */
NULL, /* system variables */
"1.0", /* String version */
- MariaDB_PLUGIN_MATURITY_BETA /* Maturity */
+ MariaDB_PLUGIN_MATURITY_STABLE /* Maturity */
},
{
MYSQL_AUTHENTICATION_PLUGIN, /* type constant */
@@ -12576,7 +12576,7 @@ maria_declare_plugin(mysql_password)
NULL, /* status variables */
NULL, /* system variables */
"1.0", /* String version */
- MariaDB_PLUGIN_MATURITY_BETA /* Maturity */
+ MariaDB_PLUGIN_MATURITY_STABLE /* Maturity */
}
maria_declare_plugin_end;
diff --git a/sql/sql_admin.cc b/sql/sql_admin.cc
index 0b610718cd0..aefa88feb43 100644
--- a/sql/sql_admin.cc
+++ b/sql/sql_admin.cc
@@ -914,7 +914,7 @@ send_result_message:
protocol->store(operator_name, system_charset_info);
if (result_code) // either mysql_recreate_table or analyze failed
{
- DBUG_ASSERT(thd->is_error() || thd->killed);
+ DBUG_ASSERT(thd->is_error());
if (thd->is_error())
{
const char *err_msg= thd->get_stmt_da()->message();
diff --git a/sql/sql_base.cc b/sql/sql_base.cc
index e51eb1c1a11..0bbcca5e778 100644
--- a/sql/sql_base.cc
+++ b/sql/sql_base.cc
@@ -2085,7 +2085,10 @@ bool open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root,
DBUG_RETURN(TRUE);
if (!(flags & MYSQL_OPEN_IGNORE_KILLED) && thd->killed)
+ {
+ thd->send_kill_message();
DBUG_RETURN(TRUE);
+ }
/*
Check if we're trying to take a write lock in a read only transaction.
@@ -2923,6 +2926,7 @@ Locked_tables_list::reopen_tables(THD *thd)
size_t reopen_count= 0;
MYSQL_LOCK *lock;
MYSQL_LOCK *merged_lock;
+ DBUG_ENTER("Locked_tables_list::reopen_tables");
for (TABLE_LIST *table_list= m_locked_tables;
table_list; table_list= table_list->next_global)
@@ -2934,7 +2938,7 @@ Locked_tables_list::reopen_tables(THD *thd)
if (open_table(thd, table_list, thd->mem_root, &ot_ctx))
{
unlink_all_closed_tables(thd, 0, reopen_count);
- return TRUE;
+ DBUG_RETURN(TRUE);
}
table_list->table->pos_in_locked_tables= table_list;
/* See also the comment on lock type in init_locked_tables(). */
@@ -2966,11 +2970,11 @@ Locked_tables_list::reopen_tables(THD *thd)
unlink_all_closed_tables(thd, lock, reopen_count);
if (! thd->killed)
my_error(ER_LOCK_DEADLOCK, MYF(0));
- return TRUE;
+ DBUG_RETURN(TRUE);
}
thd->lock= merged_lock;
}
- return FALSE;
+ DBUG_RETURN(FALSE);
}
/**
@@ -3515,9 +3519,12 @@ Open_table_context::recover_from_failed_open()
/*
Return a appropriate read lock type given a table object.
- @param thd Thread context
- @param prelocking_ctx Prelocking context.
- @param table_list Table list element for table to be locked.
+ @param thd Thread context
+ @param prelocking_ctx Prelocking context.
+ @param table_list Table list element for table to be locked.
+ @param routine_modifies_data
+ Some routine that is invoked by statement
+ modifies data.
@remark Due to a statement-based replication limitation, statements such as
INSERT INTO .. SELECT FROM .. and CREATE TABLE .. SELECT FROM need
@@ -3530,9 +3537,13 @@ Open_table_context::recover_from_failed_open()
This also applies to SELECT/SET/DO statements which use stored
functions. Calls to such functions are going to be logged as a
whole and thus should be serialized against concurrent changes
- to tables used by those functions. This can be avoided if functions
- only read data but doing so requires more complex analysis than it
- is done now.
+ to tables used by those functions. This is avoided when functions
+ do not modify data but only read it, since in this case nothing is
+ written to the binary log. Argument routine_modifies_data
+ denotes the same. So effectively, if the statement is not a
+ update query and routine_modifies_data is false, then
+ prelocking_placeholder does not take importance.
+
Furthermore, this does not apply to I_S and log tables as it's
always unsafe to replicate such tables under statement-based
replication as the table on the slave might contain other data
@@ -3547,7 +3558,8 @@ Open_table_context::recover_from_failed_open()
thr_lock_type read_lock_type_for_table(THD *thd,
Query_tables_list *prelocking_ctx,
- TABLE_LIST *table_list)
+ TABLE_LIST *table_list,
+ bool routine_modifies_data)
{
/*
In cases when this function is called for a sub-statement executed in
@@ -3561,7 +3573,7 @@ thr_lock_type read_lock_type_for_table(THD *thd,
(table_list->table->s->table_category == TABLE_CATEGORY_LOG) ||
(table_list->table->s->table_category == TABLE_CATEGORY_PERFORMANCE) ||
!(is_update_query(prelocking_ctx->sql_command) ||
- table_list->prelocking_placeholder ||
+ (routine_modifies_data && table_list->prelocking_placeholder) ||
(thd->locked_tables_mode > LTM_LOCK_TABLES)))
return TL_READ;
else
@@ -3574,19 +3586,21 @@ thr_lock_type read_lock_type_for_table(THD *thd,
and, if prelocking strategy prescribes so, extend the prelocking set
with tables and routines used by it.
- @param[in] thd Thread context.
- @param[in] prelocking_ctx Prelocking context.
- @param[in] rt Element of prelocking set to be processed.
- @param[in] prelocking_strategy Strategy which specifies how the
- prelocking set should be extended when
- one of its elements is processed.
- @param[in] has_prelocking_list Indicates that prelocking set/list for
- this statement has already been built.
- @param[in] ot_ctx Context of open_table used to recover from
- locking failures.
- @param[out] need_prelocking Set to TRUE if it was detected that this
- statement will require prelocked mode for
- its execution, not touched otherwise.
+ @param[in] thd Thread context.
+ @param[in] prelocking_ctx Prelocking context.
+ @param[in] rt Element of prelocking set to be processed.
+ @param[in] prelocking_strategy Strategy which specifies how the
+ prelocking set should be extended when
+ one of its elements is processed.
+ @param[in] has_prelocking_list Indicates that prelocking set/list for
+ this statement has already been built.
+ @param[in] ot_ctx Context of open_table used to recover from
+ locking failures.
+ @param[out] need_prelocking Set to TRUE if it was detected that this
+ statement will require prelocked mode for
+ its execution, not touched otherwise.
+ @param[out] routine_modifies_data Set to TRUE if it was detected that this
+ routine does modify table data.
@retval FALSE Success.
@retval TRUE Failure (Conflicting metadata lock, OOM, other errors).
@@ -3598,11 +3612,13 @@ open_and_process_routine(THD *thd, Query_tables_list *prelocking_ctx,
Prelocking_strategy *prelocking_strategy,
bool has_prelocking_list,
Open_table_context *ot_ctx,
- bool *need_prelocking)
+ bool *need_prelocking, bool *routine_modifies_data)
{
MDL_key::enum_mdl_namespace mdl_type= rt->mdl_request.key.mdl_namespace();
DBUG_ENTER("open_and_process_routine");
+ *routine_modifies_data= false;
+
switch (mdl_type)
{
case MDL_key::FUNCTION:
@@ -3655,10 +3671,13 @@ open_and_process_routine(THD *thd, Query_tables_list *prelocking_ctx,
DBUG_RETURN(TRUE);
/* 'sp' is NULL when there is no such routine. */
- if (sp && !has_prelocking_list)
+ if (sp)
{
- prelocking_strategy->handle_routine(thd, prelocking_ctx, rt, sp,
- need_prelocking);
+ *routine_modifies_data= sp->modifies_data();
+
+ if (!has_prelocking_list)
+ prelocking_strategy->handle_routine(thd, prelocking_ctx, rt, sp,
+ need_prelocking);
}
}
else
@@ -4003,16 +4022,7 @@ open_and_process_table(THD *thd, LEX *lex, TABLE_LIST *tables,
goto end;
}
- if (tables->lock_type != TL_UNLOCK && ! thd->locked_tables_mode)
- {
- if (tables->lock_type == TL_WRITE_DEFAULT)
- tables->table->reginfo.lock_type= thd->update_lock_default;
- else if (tables->lock_type == TL_READ_DEFAULT)
- tables->table->reginfo.lock_type=
- read_lock_type_for_table(thd, lex, tables);
- else
- tables->table->reginfo.lock_type= tables->lock_type;
- }
+ /* Copy grant information from TABLE_LIST instance to TABLE one. */
tables->table->grant= tables->grant;
/* Check and update metadata version of a base table. */
@@ -4351,6 +4361,7 @@ bool open_tables(THD *thd, TABLE_LIST **start, uint *counter, uint flags,
Open_table_context ot_ctx(thd, flags);
bool error= FALSE;
MEM_ROOT new_frm_mem;
+ bool some_routine_modifies_data= FALSE;
bool has_prelocking_list;
DBUG_ENTER("open_tables");
@@ -4523,11 +4534,16 @@ restart:
sroutine_to_open= &rt->next, rt= rt->next)
{
bool need_prelocking= false;
+ bool routine_modifies_data;
TABLE_LIST **save_query_tables_last= thd->lex->query_tables_last;
error= open_and_process_routine(thd, thd->lex, rt, prelocking_strategy,
has_prelocking_list, &ot_ctx,
- &need_prelocking);
+ &need_prelocking,
+ &routine_modifies_data);
+
+ // Remember if any of SF modifies data.
+ some_routine_modifies_data|= routine_modifies_data;
if (need_prelocking && ! thd->lex->requires_prelocking())
thd->lex->mark_as_requiring_prelocking(save_query_tables_last);
@@ -4568,6 +4584,10 @@ restart:
the children are detached. Attaching and detaching are always done,
even under LOCK TABLES.
+ We also convert all TL_WRITE_DEFAULT and TL_READ_DEFAULT locks to
+ appropriate "real" lock types to be used for locking and to be passed
+ to storage engine.
+
And start wsrep TOI if needed.
*/
for (tables= *start; tables; tables= tables->next_global)
@@ -4595,6 +4615,19 @@ restart:
goto error;
}
}
+
+ /* Set appropriate TABLE::lock_type. */
+ if (tbl && tables->lock_type != TL_UNLOCK && !thd->locked_tables_mode)
+ {
+ if (tables->lock_type == TL_WRITE_DEFAULT)
+ tbl->reginfo.lock_type= thd->update_lock_default;
+ else if (tables->lock_type == TL_READ_DEFAULT)
+ tbl->reginfo.lock_type=
+ read_lock_type_for_table(thd, thd->lex, tables,
+ some_routine_modifies_data);
+ else
+ tbl->reginfo.lock_type= tables->lock_type;
+ }
}
error:
@@ -4858,11 +4891,15 @@ static bool check_lock_and_start_stmt(THD *thd,
engine is important as, for example, InnoDB uses it to determine
what kind of row locks should be acquired when executing statement
in prelocked mode or under LOCK TABLES with @@innodb_table_locks = 0.
+
+ Last argument routine_modifies_data for read_lock_type_for_table()
+ is ignored, as prelocking placeholder will never be set here.
*/
+ DBUG_ASSERT(table_list->prelocking_placeholder == false);
if (table_list->lock_type == TL_WRITE_DEFAULT)
lock_type= thd->update_lock_default;
else if (table_list->lock_type == TL_READ_DEFAULT)
- lock_type= read_lock_type_for_table(thd, prelocking_ctx, table_list);
+ lock_type= read_lock_type_for_table(thd, prelocking_ctx, table_list, true);
else
lock_type= table_list->lock_type;
@@ -5283,6 +5320,7 @@ bool lock_tables(THD *thd, TABLE_LIST *tables, uint count,
thd->lex->set_stmt_unsafe(LEX::BINLOG_STMT_UNSAFE_AUTOINC_NOT_FIRST);
}
+#ifdef NOT_USED_IN_MARIADB
/*
INSERT...ON DUPLICATE KEY UPDATE on a table with more than one unique keys
can be unsafe.
@@ -5308,6 +5346,7 @@ bool lock_tables(THD *thd, TABLE_LIST *tables, uint count,
thd->lex->duplicates == DUP_UPDATE)
thd->lex->set_stmt_unsafe(LEX::BINLOG_STMT_UNSAFE_INSERT_TWO_KEYS);
}
+#endif
/* We have to emulate LOCK TABLES if we are statement needs prelocking. */
if (thd->lex->requires_prelocking())
diff --git a/sql/sql_base.h b/sql/sql_base.h
index 6f8e9c1c03b..e39ec16028b 100644
--- a/sql/sql_base.h
+++ b/sql/sql_base.h
@@ -137,7 +137,8 @@ TABLE *find_write_locked_table(TABLE *list, const char *db,
const char *table_name);
thr_lock_type read_lock_type_for_table(THD *thd,
Query_tables_list *prelocking_ctx,
- TABLE_LIST *table_list);
+ TABLE_LIST *table_list,
+ bool routine_modifies_data);
my_bool mysql_rm_tmp_tables(void);
bool rm_temporary_table(handlerton *base, const char *path);
diff --git a/sql/sql_cache.cc b/sql/sql_cache.cc
index cf68ba36997..6001517b0c7 100644
--- a/sql/sql_cache.cc
+++ b/sql/sql_cache.cc
@@ -1711,7 +1711,7 @@ Query_cache::send_result_to_client(THD *thd, char *org_sql, uint query_length)
DBUG_ENTER("Query_cache::send_result_to_client");
/*
- Testing 'query_cache_size' without a lock here is safe: the thing
+ Testing without a lock here is safe: the thing
we may loose is that the query won't be served from cache, but we
save on mutex locking in the case when query cache is disabled.
@@ -1731,8 +1731,6 @@ Query_cache::send_result_to_client(THD *thd, char *org_sql, uint query_length)
goto err;
}
- DBUG_ASSERT(query_cache_size != 0); // otherwise cache would be disabled
-
thd->query_cache_is_applicable= 1;
sql= org_sql; sql_end= sql + query_length;
diff --git a/sql/sql_class.cc b/sql/sql_class.cc
index 430bfbf760d..9d675490b28 100644
--- a/sql/sql_class.cc
+++ b/sql/sql_class.cc
@@ -1,6 +1,6 @@
/*
Copyright (c) 2000, 2013, Oracle and/or its affiliates.
- Copyright (c) 2008, 2013, Monty Program Ab.
+ Copyright (c) 2008, 2014, SkySQL Ab.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -895,7 +895,6 @@ THD::THD(bool is_wsrep_applier)
accessed_rows_and_keys(0),
m_statement_psi(NULL),
m_idle_psi(NULL),
- m_server_idle(false),
thread_id(0),
global_disable_checkpoint(0),
failed_com_change_user(0),
@@ -4346,6 +4345,220 @@ extern "C" int thd_rpl_is_parallel(const MYSQL_THD thd)
return thd->rgi_slave && thd->rgi_slave->is_parallel_exec;
}
+/*
+ This function can optionally be called to check if thd_report_wait_for()
+ needs to be called for waits done by a given transaction.
+
+ If this function returns false for a given thd, there is no need to do any
+ calls to thd_report_wait_for() on that thd.
+
+ This call is optional; it is safe to call thd_report_wait_for() in any case.
+ This call can be used to save some redundant calls to thd_report_wait_for()
+ if desired. (This is unlikely to matter much unless there are _lots_ of
+ waits to report, as the overhead of thd_report_wait_for() is small).
+*/
+extern "C" int
+thd_need_wait_for(const MYSQL_THD thd)
+{
+ rpl_group_info *rgi;
+
+ if (!thd)
+ return false;
+ rgi= thd->rgi_slave;
+ if (!rgi)
+ return false;
+ return rgi->is_parallel_exec;
+}
+
+/*
+ Used by InnoDB/XtraDB to report that one transaction THD is about to go to
+ wait for a transactional lock held by another transactions OTHER_THD.
+
+ This is used for parallel replication, where transactions are required to
+ commit in the same order on the slave as they did on the master. If the
+ transactions on the slave encounters lock conflicts on the slave that did
+ not exist on the master, this can cause deadlocks.
+
+ Normally, such conflicts will not occur, because the same conflict would
+ have prevented the two transactions from committing in parallel on the
+ master, thus preventing them from running in parallel on the slave in the
+ first place. However, it is possible in case when the optimizer chooses a
+ different plan on the slave than on the master (eg. table scan instead of
+ index scan).
+
+ InnoDB/XtraDB reports lock waits using this call. If a lock wait causes a
+ deadlock with the pre-determined commit order, we kill the later transaction,
+ and later re-try it, to resolve the deadlock.
+
+ This call need only receive reports about waits for locks that will remain
+ until the holding transaction commits. InnoDB/XtraDB auto-increment locks
+ are released earlier, and so need not be reported. (Such false positives are
+ not harmful, but could lead to unnecessary kill and retry, so best avoided).
+*/
+extern "C" void
+thd_report_wait_for(const MYSQL_THD thd, MYSQL_THD other_thd)
+{
+ rpl_group_info *rgi;
+ rpl_group_info *other_rgi;
+
+ if (!thd || !other_thd)
+ return;
+ rgi= thd->rgi_slave;
+ other_rgi= other_thd->rgi_slave;
+ if (!rgi || !other_rgi)
+ return;
+ if (!rgi->is_parallel_exec)
+ return;
+ if (rgi->rli != other_rgi->rli)
+ return;
+ if (!rgi->gtid_sub_id || !other_rgi->gtid_sub_id)
+ return;
+ if (rgi->current_gtid.domain_id != other_rgi->current_gtid.domain_id)
+ return;
+ if (rgi->gtid_sub_id > other_rgi->gtid_sub_id)
+ return;
+ /*
+ This transaction is about to wait for another transaction that is required
+ by replication binlog order to commit after. This would cause a deadlock.
+
+ So send a kill to the other transaction, with a temporary error; this will
+ cause replication to rollback (and later re-try) the other transaction,
+ releasing the lock for this transaction so replication can proceed.
+ */
+ other_rgi->killed_for_retry= true;
+ mysql_mutex_lock(&other_thd->LOCK_thd_data);
+ other_thd->awake(KILL_CONNECTION);
+ mysql_mutex_unlock(&other_thd->LOCK_thd_data);
+}
+
+/*
+ This function is called from InnoDB/XtraDB to check if the commit order of
+ two transactions has already been decided by the upper layer. This happens
+ in parallel replication, where the commit order is forced to be the same on
+ the slave as it was originally on the master.
+
+ If this function returns false, it means that such commit order will be
+ enforced. This allows the storage engine to optionally omit gap lock waits
+ or similar measures that would otherwise be needed to ensure that
+ transactions would be serialised in a way that would cause a commit order
+ that is correct for binlogging for statement-based replication.
+
+ Since transactions are only run in parallel on the slave if they ran without
+ lock conflicts on the master, normally no lock conflicts on the slave happen
+ during parallel replication. However, there are a couple of corner cases
+ where it can happen, like these secondary-index operations:
+
+ T1: INSERT INTO t1 VALUES (7, NULL);
+ T2: DELETE FROM t1 WHERE b <= 3;
+
+ T1: UPDATE t1 SET secondary=NULL WHERE primary=1
+ T2: DELETE t1 WHERE secondary <= 3
+
+ The DELETE takes a gap lock that can block the INSERT/UPDATE, but the row
+ locks set by INSERT/UPDATE do not block the DELETE. Thus, the execution
+ order of the transactions determine whether a lock conflict occurs or
+ not. Thus a lock conflict can occur on the slave where it did not on the
+ master.
+
+ If this function returns true, normal locking should be done as required by
+ the binlogging and transaction isolation level in effect. But if it returns
+ false, the correct order will be enforced anyway, and InnoDB/XtraDB can
+ avoid taking the gap lock, preventing the lock conflict.
+
+ Calling this function is just an optimisation to avoid unnecessary
+ deadlocks. If it was not used, a gap lock would be set that could eventually
+ cause a deadlock; the deadlock would be caught by thd_report_wait_for() and
+ the transaction T2 killed and rolled back (and later re-tried).
+*/
+extern "C" int
+thd_need_ordering_with(const MYSQL_THD thd, const MYSQL_THD other_thd)
+{
+ rpl_group_info *rgi, *other_rgi;
+
+ DBUG_EXECUTE_IF("disable_thd_need_ordering_with", return 1;);
+ if (!thd || !other_thd)
+ return 1;
+ rgi= thd->rgi_slave;
+ other_rgi= other_thd->rgi_slave;
+ if (!rgi || !other_rgi)
+ return 1;
+ if (!rgi->is_parallel_exec)
+ return 1;
+ if (rgi->rli != other_rgi->rli)
+ return 1;
+ if (rgi->current_gtid.domain_id != other_rgi->current_gtid.domain_id)
+ return 1;
+ if (!rgi->commit_id || rgi->commit_id != other_rgi->commit_id)
+ return 1;
+ /*
+ Otherwise, these two threads are doing parallel replication within the same
+ replication domain. Their commit order is already fixed, so we do not need
+ gap locks or similar to otherwise enforce ordering (and in fact such locks
+ could lead to unnecessary deadlocks and transaction retry).
+ */
+ return 0;
+}
+
+
+/*
+ If the storage engine detects a deadlock, and needs to choose a victim
+ transaction to roll back, it can call this function to ask the upper
+ server layer for which of two possible transactions is prefered to be
+ aborted and rolled back.
+
+ In parallel replication, if two transactions are running in parallel and
+ one is fixed to commit before the other, then the one that commits later
+ will be prefered as the victim - chosing the early transaction as a victim
+ will not resolve the deadlock anyway, as the later transaction still needs
+ to wait for the earlier to commit.
+
+ Otherwise, a transaction that uses only transactional tables, and can thus
+ be safely rolled back, will be prefered as a deadlock victim over a
+ transaction that also modified non-transactional (eg. MyISAM) tables.
+
+ The return value is -1 if the first transaction is prefered as a deadlock
+ victim, 1 if the second transaction is prefered, or 0 for no preference (in
+ which case the storage engine can make the choice as it prefers).
+*/
+extern "C" int
+thd_deadlock_victim_preference(const MYSQL_THD thd1, const MYSQL_THD thd2)
+{
+ rpl_group_info *rgi1, *rgi2;
+ bool nontrans1, nontrans2;
+
+ if (!thd1 || !thd2)
+ return 0;
+
+ /*
+ If the transactions are participating in the same replication domain in
+ parallel replication, then request to select the one that will commit
+ later (in the fixed commit order from the master) as the deadlock victim.
+ */
+ rgi1= thd1->rgi_slave;
+ rgi2= thd2->rgi_slave;
+ if (rgi1 && rgi2 &&
+ rgi1->is_parallel_exec &&
+ rgi1->rli == rgi2->rli &&
+ rgi1->current_gtid.domain_id == rgi2->current_gtid.domain_id)
+ return rgi1->gtid_sub_id < rgi2->gtid_sub_id ? 1 : -1;
+
+ /*
+ If one transaction has modified non-transactional tables (so that it
+ cannot be safely rolled back), and the other has not, then prefer to
+ select the purely transactional one as the victim.
+ */
+ nontrans1= thd1->transaction.all.modified_non_trans_table;
+ nontrans2= thd2->transaction.all.modified_non_trans_table;
+ if (nontrans1 && !nontrans2)
+ return 1;
+ else if (!nontrans1 && nontrans2)
+ return -1;
+
+ /* No preferences, let the storage engine decide. */
+ return 0;
+}
+
+
extern "C" int thd_non_transactional_update(const MYSQL_THD thd)
{
return(thd->transaction.all.modified_non_trans_table);
@@ -4370,9 +4583,18 @@ extern "C" bool thd_binlog_filter_ok(const MYSQL_THD thd)
return binlog_filter->db_ok(thd->db);
}
+/*
+ This is similar to sqlcom_can_generate_row_events, with the expection
+ that we only return 1 if we are going to generate row events in a
+ transaction.
+ CREATE OR REPLACE is always safe to do as this will run in it's own
+ transaction.
+*/
+
extern "C" bool thd_sqlcom_can_generate_row_events(const MYSQL_THD thd)
{
- return sqlcom_can_generate_row_events(thd);
+ return (sqlcom_can_generate_row_events(thd) && thd->lex->sql_command !=
+ SQLCOM_CREATE_TABLE);
}
@@ -5888,23 +6110,35 @@ show_query_type(THD::enum_binlog_query_type qtype)
Constants required for the limit unsafe warnings suppression
*/
//seconds after which the limit unsafe warnings suppression will be activated
-#define LIMIT_UNSAFE_WARNING_ACTIVATION_TIMEOUT 50
+#define LIMIT_UNSAFE_WARNING_ACTIVATION_TIMEOUT 5*60
//number of limit unsafe warnings after which the suppression will be activated
-#define LIMIT_UNSAFE_WARNING_ACTIVATION_THRESHOLD_COUNT 50
+#define LIMIT_UNSAFE_WARNING_ACTIVATION_THRESHOLD_COUNT 10
-static ulonglong limit_unsafe_suppression_start_time= 0;
-static bool unsafe_warning_suppression_is_activated= false;
-static int limit_unsafe_warning_count= 0;
+static ulonglong unsafe_suppression_start_time= 0;
+static bool unsafe_warning_suppression_active[LEX::BINLOG_STMT_UNSAFE_COUNT];
+static ulong unsafe_warnings_count[LEX::BINLOG_STMT_UNSAFE_COUNT];
+static ulong total_unsafe_warnings_count;
/**
Auxiliary function to reset the limit unsafety warning suppression.
+ This is done without mutex protection, but this should be good
+ enough as it doesn't matter if we loose a couple of suppressed
+ messages or if this is called multiple times.
*/
-static void reset_binlog_unsafe_suppression()
+
+static void reset_binlog_unsafe_suppression(ulonglong now)
{
+ uint i;
DBUG_ENTER("reset_binlog_unsafe_suppression");
- unsafe_warning_suppression_is_activated= false;
- limit_unsafe_warning_count= 0;
- limit_unsafe_suppression_start_time= my_interval_timer()/10000000;
+
+ unsafe_suppression_start_time= now;
+ total_unsafe_warnings_count= 0;
+
+ for (i= 0 ; i < LEX::BINLOG_STMT_UNSAFE_COUNT ; i++)
+ {
+ unsafe_warnings_count[i]= 0;
+ unsafe_warning_suppression_active[i]= 0;
+ }
DBUG_VOID_RETURN;
}
@@ -5922,95 +6156,94 @@ static void print_unsafe_warning_to_log(int unsafe_type, char* buf,
}
/**
- Auxiliary function to check if the warning for limit unsafety should be
- thrown or suppressed. Details of the implementation can be found in the
- comments inline.
+ Auxiliary function to check if the warning for unsafe repliction statements
+ should be thrown or suppressed.
+
+ Logic is:
+ - If we get more than LIMIT_UNSAFE_WARNING_ACTIVATION_THRESHOLD_COUNT errors
+ of one type, that type of errors will be suppressed for
+ LIMIT_UNSAFE_WARNING_ACTIVATION_TIMEOUT.
+ - When the time limit has been reached, all suppression is reset.
+
+ This means that if one gets many different types of errors, some of them
+ may be reset less than LIMIT_UNSAFE_WARNING_ACTIVATION_TIMEOUT. However at
+ least one error is disable for this time.
+
SYNOPSIS:
@params
- buf - buffer to hold the warning message text
unsafe_type - The type of unsafety.
- query - The actual query statement.
- TODO: Remove this function and implement a general service for all warnings
- that would prevent flooding the error log.
+ RETURN:
+ 0 0k to log
+ 1 Message suppressed
*/
-static void do_unsafe_limit_checkout(char* buf, int unsafe_type, char* query)
+
+static bool protect_against_unsafe_warning_flood(int unsafe_type)
{
- ulonglong now= 0;
- DBUG_ENTER("do_unsafe_limit_checkout");
- DBUG_ASSERT(unsafe_type == LEX::BINLOG_STMT_UNSAFE_LIMIT);
- limit_unsafe_warning_count++;
+ ulong count;
+ ulonglong now= my_interval_timer()/1000000000ULL;
+ DBUG_ENTER("protect_against_unsafe_warning_flood");
+
+ count= ++unsafe_warnings_count[unsafe_type];
+ total_unsafe_warnings_count++;
+
/*
INITIALIZING:
If this is the first time this function is called with log warning
enabled, the monitoring the unsafe warnings should start.
*/
- if (limit_unsafe_suppression_start_time == 0)
+ if (unsafe_suppression_start_time == 0)
{
- limit_unsafe_suppression_start_time= my_interval_timer()/10000000;
- print_unsafe_warning_to_log(unsafe_type, buf, query);
+ reset_binlog_unsafe_suppression(now);
+ DBUG_RETURN(0);
}
- else
+
+ /*
+ The following is true if we got too many errors or if the error was
+ already suppressed
+ */
+ if (count >= LIMIT_UNSAFE_WARNING_ACTIVATION_THRESHOLD_COUNT)
{
- if (!unsafe_warning_suppression_is_activated)
- print_unsafe_warning_to_log(unsafe_type, buf, query);
+ ulonglong diff_time= (now - unsafe_suppression_start_time);
- if (limit_unsafe_warning_count >=
- LIMIT_UNSAFE_WARNING_ACTIVATION_THRESHOLD_COUNT)
+ if (!unsafe_warning_suppression_active[unsafe_type])
{
- now= my_interval_timer()/10000000;
- if (!unsafe_warning_suppression_is_activated)
+ /*
+ ACTIVATION:
+ We got LIMIT_UNSAFE_WARNING_ACTIVATION_THRESHOLD_COUNT warnings in
+ less than LIMIT_UNSAFE_WARNING_ACTIVATION_TIMEOUT we activate the
+ suppression.
+ */
+ if (diff_time <= LIMIT_UNSAFE_WARNING_ACTIVATION_TIMEOUT)
{
- /*
- ACTIVATION:
- We got LIMIT_UNSAFE_WARNING_ACTIVATION_THRESHOLD_COUNT warnings in
- less than LIMIT_UNSAFE_WARNING_ACTIVATION_TIMEOUT we activate the
- suppression.
- */
- if ((now-limit_unsafe_suppression_start_time) <=
- LIMIT_UNSAFE_WARNING_ACTIVATION_TIMEOUT)
- {
- unsafe_warning_suppression_is_activated= true;
- DBUG_PRINT("info",("A warning flood has been detected and the limit \
-unsafety warning suppression has been activated."));
- }
- else
- {
- /*
- there is no flooding till now, therefore we restart the monitoring
- */
- limit_unsafe_suppression_start_time= my_interval_timer()/10000000;
- limit_unsafe_warning_count= 0;
- }
+ unsafe_warning_suppression_active[unsafe_type]= 1;
+ sql_print_information("Suppressing warnings of type '%s' for up to %d seconds because of flooding",
+ ER(LEX::binlog_stmt_unsafe_errcode[unsafe_type]),
+ LIMIT_UNSAFE_WARNING_ACTIVATION_TIMEOUT);
}
else
{
/*
- Print the suppression note and the unsafe warning.
- */
- sql_print_information("The following warning was suppressed %d times \
-during the last %d seconds in the error log",
- limit_unsafe_warning_count,
- (int)
- (now-limit_unsafe_suppression_start_time));
- print_unsafe_warning_to_log(unsafe_type, buf, query);
- /*
- DEACTIVATION: We got LIMIT_UNSAFE_WARNING_ACTIVATION_THRESHOLD_COUNT
- warnings in more than LIMIT_UNSAFE_WARNING_ACTIVATION_TIMEOUT, the
- suppression should be deactivated.
+ There is no flooding till now, therefore we restart the monitoring
*/
- if ((now - limit_unsafe_suppression_start_time) >
- LIMIT_UNSAFE_WARNING_ACTIVATION_TIMEOUT)
- {
- reset_binlog_unsafe_suppression();
- DBUG_PRINT("info",("The limit unsafety warning supression has been \
-deactivated"));
- }
+ reset_binlog_unsafe_suppression(now);
+ }
+ }
+ else
+ {
+ /* This type of warnings was suppressed */
+ if (diff_time > LIMIT_UNSAFE_WARNING_ACTIVATION_TIMEOUT)
+ {
+ ulong save_count= total_unsafe_warnings_count;
+ /* Print a suppression note and remove the suppression */
+ reset_binlog_unsafe_suppression(now);
+ sql_print_information("Suppressed %lu unsafe warnings during "
+ "the last %d seconds",
+ save_count, (int) diff_time);
}
- limit_unsafe_warning_count= 0;
}
}
- DBUG_VOID_RETURN;
+ DBUG_RETURN(unsafe_warning_suppression_active[unsafe_type]);
}
/**
@@ -6022,6 +6255,7 @@ deactivated"));
void THD::issue_unsafe_warnings()
{
char buf[MYSQL_ERRMSG_SIZE * 2];
+ uint32 unsafe_type_flags;
DBUG_ENTER("issue_unsafe_warnings");
/*
Ensure that binlog_unsafe_warning_flags is big enough to hold all
@@ -6029,8 +6263,10 @@ void THD::issue_unsafe_warnings()
*/
DBUG_ASSERT(LEX::BINLOG_STMT_UNSAFE_COUNT <=
sizeof(binlog_unsafe_warning_flags) * CHAR_BIT);
+
+ if (!(unsafe_type_flags= binlog_unsafe_warning_flags))
+ DBUG_VOID_RETURN; // Nothing to do
- uint32 unsafe_type_flags= binlog_unsafe_warning_flags;
/*
For each unsafe_type, check if the statement is unsafe in this way
and issue a warning.
@@ -6045,13 +6281,9 @@ void THD::issue_unsafe_warnings()
ER_BINLOG_UNSAFE_STATEMENT,
ER(ER_BINLOG_UNSAFE_STATEMENT),
ER(LEX::binlog_stmt_unsafe_errcode[unsafe_type]));
- if (global_system_variables.log_warnings)
- {
- if (unsafe_type == LEX::BINLOG_STMT_UNSAFE_LIMIT)
- do_unsafe_limit_checkout( buf, unsafe_type, query());
- else //cases other than LIMIT unsafety
- print_unsafe_warning_to_log(unsafe_type, buf, query());
- }
+ if (global_system_variables.log_warnings > 0 &&
+ !protect_against_unsafe_warning_flood(unsafe_type))
+ print_unsafe_warning_to_log(unsafe_type, buf, query());
}
}
DBUG_VOID_RETURN;
@@ -6531,6 +6763,7 @@ wait_for_commit::unregister_wait_for_prior_commit2()
this->waitee= NULL;
}
}
+ wakeup_error= 0;
mysql_mutex_unlock(&LOCK_wait_commit);
}
diff --git a/sql/sql_class.h b/sql/sql_class.h
index d7bbfc3799d..e515b99b1a5 100644
--- a/sql/sql_class.h
+++ b/sql/sql_class.h
@@ -524,6 +524,14 @@ typedef struct system_variables
ulonglong sortbuff_size;
ulonglong group_concat_max_len;
ulonglong default_regex_flags;
+
+ /**
+ Place holders to store Multi-source variables in sys_var.cc during
+ update and show of variables.
+ */
+ ulonglong slave_skip_counter;
+ ulonglong max_relay_log_size;
+
ha_rows select_limit;
ha_rows max_join_size;
ha_rows expensive_subquery_limit;
@@ -589,12 +597,6 @@ typedef struct system_variables
*/
uint32 gtid_domain_id;
uint64 gtid_seq_no;
- /**
- Place holders to store Multi-source variables in sys_var.cc during
- update and show of variables.
- */
- ulong slave_skip_counter;
- ulong max_relay_log_size;
/**
Default transaction access mode. READ ONLY (true) or READ WRITE (false).
@@ -714,6 +716,7 @@ typedef struct system_status_var
ulong filesort_range_count_;
ulong filesort_rows_;
ulong filesort_scan_count_;
+ ulong filesort_pq_sorts_;
/* Prepared statements and binary protocol */
ulong com_stmt_prepare;
ulong com_stmt_reprepare;
@@ -768,6 +771,13 @@ typedef struct system_status_var
#define last_system_status_var questions
#define last_cleared_system_status_var memory_used
+/*
+ Global status variables
+*/
+
+extern ulong feature_files_opened_with_delayed_keys;
+
+
void add_to_status(STATUS_VAR *to_var, STATUS_VAR *from_var);
void add_diff_to_status(STATUS_VAR *to_var, STATUS_VAR *from_var,
@@ -1371,7 +1381,8 @@ enum enum_thread_type
SYSTEM_THREAD_SLAVE_SQL= 4,
SYSTEM_THREAD_EVENT_SCHEDULER= 8,
SYSTEM_THREAD_EVENT_WORKER= 16,
- SYSTEM_THREAD_BINLOG_BACKGROUND= 32
+ SYSTEM_THREAD_BINLOG_BACKGROUND= 32,
+ SYSTEM_THREAD_SLAVE_INIT= 64
};
inline char const *
@@ -1386,6 +1397,7 @@ show_system_thread(enum_thread_type thread)
RETURN_NAME_AS_STRING(SYSTEM_THREAD_SLAVE_SQL);
RETURN_NAME_AS_STRING(SYSTEM_THREAD_EVENT_SCHEDULER);
RETURN_NAME_AS_STRING(SYSTEM_THREAD_EVENT_WORKER);
+ RETURN_NAME_AS_STRING(SYSTEM_THREAD_SLAVE_INIT);
default:
sprintf(buf, "<UNKNOWN SYSTEM THREAD: %d>", thread);
return buf;
@@ -1753,6 +1765,8 @@ struct wait_for_commit
{
if (waitee)
unregister_wait_for_prior_commit2();
+ else
+ wakeup_error= 0;
}
/*
Remove a waiter from the list in the waitee. Used to unregister a wait.
@@ -2510,8 +2524,6 @@ public:
/** Idle instrumentation state. */
PSI_idle_locker_state m_idle_state;
#endif /* HAVE_PSI_IDLE_INTERFACE */
- /** True if the server code is IDLE for this connection. */
- bool m_server_idle;
/*
Id of current query. Statement can be reused to execute several queries
diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc
index 418c1db9b21..da2f7b156fe 100644
--- a/sql/sql_delete.cc
+++ b/sql/sql_delete.cc
@@ -937,7 +937,8 @@ multi_delete::initialize_tables(JOIN *join)
walk= delete_tables;
- for (JOIN_TAB *tab= first_linear_tab(join, WITH_CONST_TABLES);
+ for (JOIN_TAB *tab= first_linear_tab(join, WITHOUT_BUSH_ROOTS,
+ WITH_CONST_TABLES);
tab;
tab= next_linear_tab(join, tab, WITHOUT_BUSH_ROOTS))
{
diff --git a/sql/sql_derived.cc b/sql/sql_derived.cc
index a910ed6290f..8e8dbfc71d4 100644
--- a/sql/sql_derived.cc
+++ b/sql/sql_derived.cc
@@ -465,6 +465,8 @@ bool mysql_derived_merge(THD *thd, LEX *lex, TABLE_LIST *derived)
}
}
+ if (!derived->merged_for_insert)
+ dt_select->first_cond_optimization= FALSE; // consider it optimized
exit_merge:
if (arena)
thd->restore_active_arena(arena, &backup);
@@ -614,6 +616,7 @@ bool mysql_derived_prepare(THD *thd, LEX *lex, TABLE_LIST *derived)
SELECT_LEX_UNIT *unit= derived->get_unit();
DBUG_ENTER("mysql_derived_prepare");
bool res= FALSE;
+ DBUG_PRINT("enter", ("unit 0x%lx", (ulong) unit));
// Skip already prepared views/DT
if (!unit || unit->prepared ||
@@ -623,9 +626,6 @@ bool mysql_derived_prepare(THD *thd, LEX *lex, TABLE_LIST *derived)
thd->lex->sql_command == SQLCOM_DELETE_MULTI))))
DBUG_RETURN(FALSE);
- Query_arena *arena, backup;
- arena= thd->activate_stmt_arena_if_needed(&backup);
-
SELECT_LEX *first_select= unit->first_select();
/* prevent name resolving out of derived table */
@@ -743,8 +743,6 @@ exit:
if (derived->outer_join)
table->maybe_null= 1;
}
- if (arena)
- thd->restore_active_arena(arena, &backup);
DBUG_RETURN(res);
}
diff --git a/sql/sql_explain.cc b/sql/sql_explain.cc
index 53ac095d1d0..576fd48016e 100644
--- a/sql/sql_explain.cc
+++ b/sql/sql_explain.cc
@@ -203,15 +203,13 @@ bool Explain_query::print_explain_str(THD *thd, String *out_str, bool is_analyz
static void push_str(List<Item> *item_list, const char *str)
{
- item_list->push_back(new Item_string(str,
- strlen(str), system_charset_info));
+ item_list->push_back(new Item_string_sys(str));
}
static void push_string(List<Item> *item_list, String *str)
{
- item_list->push_back(new Item_string(str->ptr(), str->length(),
- system_charset_info));
+ item_list->push_back(new Item_string_sys(str->ptr(), str->length()));
}
@@ -263,8 +261,7 @@ int Explain_union::print_explain(Explain_query *query,
len+= lastop;
table_name_buffer[len - 1]= '>'; // change ',' to '>'
}
- const CHARSET_INFO *cs= system_charset_info;
- item_list.push_back(new Item_string(table_name_buffer, len, cs));
+ item_list.push_back(new Item_string_sys(table_name_buffer, len));
}
/* `partitions` column */
@@ -311,8 +308,7 @@ int Explain_union::print_explain(Explain_query *query,
{
extra_buf.append(STRING_WITH_LEN("Using filesort"));
}
- const CHARSET_INFO *cs= system_charset_info;
- item_list.push_back(new Item_string(extra_buf.ptr(), extra_buf.length(), cs));
+ item_list.push_back(new Item_string_sys(extra_buf.ptr(), extra_buf.length()));
//output->unit.offset_limit_cnt= 0;
if (output->send_data(item_list))
@@ -370,12 +366,10 @@ int Explain_select::print_explain(Explain_query *query,
if (message)
{
List<Item> item_list;
- const CHARSET_INFO *cs= system_charset_info;
Item *item_null= new Item_null();
item_list.push_back(new Item_int((int32) select_id));
- item_list.push_back(new Item_string(select_type,
- strlen(select_type), cs));
+ item_list.push_back(new Item_string_sys(select_type));
for (uint i=0 ; i < 7; i++)
item_list.push_back(item_null);
if (explain_flags & DESCRIBE_PARTITIONS)
@@ -392,7 +386,7 @@ int Explain_select::print_explain(Explain_query *query,
item_list.push_back(item_null);
}
- item_list.push_back(new Item_string(message,strlen(message),cs));
+ item_list.push_back(new Item_string_sys(message));
if (output->send_data(item_list))
return 1;
@@ -622,7 +616,7 @@ int Explain_table_access::print_explain(select_result_sink *output, uint8 explai
extra_buf.append(STRING_WITH_LEN("Using filesort"));
}
- item_list.push_back(new Item_string(extra_buf.ptr(), extra_buf.length(), cs));
+ item_list.push_back(new Item_string_sys(extra_buf.ptr(), extra_buf.length()));
if (output->send_data(item_list))
return 1;
diff --git a/sql/sql_get_diagnostics.cc b/sql/sql_get_diagnostics.cc
index be1e3589cc6..8b0d86aa7d1 100644
--- a/sql/sql_get_diagnostics.cc
+++ b/sql/sql_get_diagnostics.cc
@@ -267,9 +267,11 @@ Condition_information_item::make_utf8_string_item(THD *thd, const String *str)
CHARSET_INFO *to_cs= &my_charset_utf8_general_ci;
/* If a charset was not set, assume that no conversion is needed. */
CHARSET_INFO *from_cs= str->charset() ? str->charset() : to_cs;
- Item_string *item= new Item_string(str->ptr(), str->length(), from_cs);
+ String tmp(str->ptr(), str->length(), from_cs);
/* If necessary, convert the string (ignoring errors), then copy it over. */
- return item ? item->charset_converter(to_cs, false) : NULL;
+ uint conv_errors;
+ return new Item_string(&tmp, to_cs, &conv_errors,
+ DERIVATION_COERCIBLE, MY_REPERTOIRE_UNICODE30);
}
diff --git a/sql/sql_help.cc b/sql/sql_help.cc
index 844810af0f4..8f458ea0b9f 100644
--- a/sql/sql_help.cc
+++ b/sql/sql_help.cc
@@ -626,7 +626,7 @@ SQL_SELECT *prepare_select_for_name(THD *thd, const char *mask, uint mlen,
{
Item *cond= new Item_func_like(new Item_field(pfname),
new Item_string(mask,mlen,pfname->charset()),
- new Item_string("\\",1,&my_charset_latin1),
+ new Item_string_ascii("\\"),
FALSE);
if (thd->is_fatal_error)
return 0; // OOM
diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc
index 2b68f7766ac..71a1983878f 100644
--- a/sql/sql_insert.cc
+++ b/sql/sql_insert.cc
@@ -1,6 +1,6 @@
/*
Copyright (c) 2000, 2013, Oracle and/or its affiliates.
- Copyright (c) 2009, 2013, Monty Program Ab.
+ Copyright (c) 2010, 2014, SkySQL Ab.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -4148,15 +4148,14 @@ select_create::binlog_show_create_table(TABLE **tables, uint count)
{
/*
Note 1: In RBR mode, we generate a CREATE TABLE statement for the
- created table by calling store_create_info() (behaves as SHOW
- CREATE TABLE). In the event of an error, nothing should be
- written to the binary log, even if the table is non-transactional;
- therefore we pretend that the generated CREATE TABLE statement is
- for a transactional table. The event will then be put in the
- transaction cache, and any subsequent events (e.g., table-map
- events and binrow events) will also be put there. We can then use
- ha_autocommit_or_rollback() to either throw away the entire
- kaboodle of events, or write them to the binary log.
+ created table by calling show_create_table(). In the event of an error,
+ nothing should be written to the binary log, even if the table is
+ non-transactional; therefore we pretend that the generated CREATE TABLE
+ statement is for a transactional table. The event will then be put in the
+ transaction cache, and any subsequent events (e.g., table-map events and
+ binrow events) will also be put there. We can then use
+ ha_autocommit_or_rollback() to either throw away the entire kaboodle of
+ events, or write them to the binary log.
We write the CREATE TABLE statement here and not in prepare()
since there potentially are sub-selects or accesses to information
@@ -4175,12 +4174,9 @@ select_create::binlog_show_create_table(TABLE **tables, uint count)
tmp_table_list.table = *tables;
query.length(0); // Have to zero it since constructor doesn't
- result= store_create_info(thd, &tmp_table_list, &query, create_info,
- /* show_database */ TRUE,
- MY_TEST(create_info->org_options &
- HA_LEX_CREATE_REPLACE) ||
- create_info->table_was_deleted);
- DBUG_ASSERT(result == 0); /* store_create_info() always return 0 */
+ result= show_create_table(thd, &tmp_table_list, &query, create_info,
+ WITH_DB_NAME);
+ DBUG_ASSERT(result == 0); /* show_create_table() always return 0 */
if (WSREP_EMULATE_BINLOG(thd) || mysql_bin_log.is_open())
{
diff --git a/sql/sql_join_cache.cc b/sql/sql_join_cache.cc
index 7f97b70952e..cec0755c9b0 100644
--- a/sql/sql_join_cache.cc
+++ b/sql/sql_join_cache.cc
@@ -2094,7 +2094,7 @@ enum_nested_loop_state JOIN_CACHE::join_records(bool skip_last)
goto finish;
if (outer_join_first_inner)
{
- if (next_cache)
+ if (next_cache && join_tab != join_tab->last_inner)
{
/*
Ensure that all matches for outer records from join buffer are to be
diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc
index 45045ff54a0..772aacdaacd 100644
--- a/sql/sql_lex.cc
+++ b/sql/sql_lex.cc
@@ -1,4 +1,4 @@
-/* Copyright (c) 2000, 2013, Oracle and/or its affiliates.
+/* Copyright (c) 2000, 2014, Oracle and/or its affiliates.
Copyright (c) 2009, 2014, Monty Program Ab.
This program is free software; you can redistribute it and/or modify
@@ -30,7 +30,7 @@
#include "sp.h"
#include "sql_select.h"
-static int lex_one_token(void *arg, THD *thd);
+static int lex_one_token(YYSTYPE *yylval, THD *thd);
/*
We are using pointer to this variable for distinguishing between assignment
@@ -969,15 +969,17 @@ bool consume_comment(Lex_input_stream *lip, int remaining_recursions_permitted)
/*
MYSQLlex remember the following states from the following MYSQLlex()
+ @param yylval [out] semantic value of the token being parsed (yylval)
+ @param thd THD
+
- MY_LEX_EOQ Found end of query
- MY_LEX_OPERATOR_OR_IDENT Last state was an ident, text or number
(which can't be followed by a signed number)
*/
-int MYSQLlex(void *arg, THD *thd)
+int MYSQLlex(YYSTYPE *yylval, THD *thd)
{
Lex_input_stream *lip= & thd->m_parser_state->m_lip;
- YYSTYPE *yylval=(YYSTYPE*) arg;
int token;
if (lip->lookahead_token >= 0)
@@ -994,7 +996,7 @@ int MYSQLlex(void *arg, THD *thd)
return token;
}
- token= lex_one_token(arg, thd);
+ token= lex_one_token(yylval, thd);
switch(token) {
case WITH:
@@ -1005,7 +1007,7 @@ int MYSQLlex(void *arg, THD *thd)
to transform the grammar into a LALR(1) grammar,
which sql_yacc.yy can process.
*/
- token= lex_one_token(arg, thd);
+ token= lex_one_token(yylval, thd);
switch(token) {
case CUBE_SYM:
lip->m_digest_psi= MYSQL_ADD_TOKEN(lip->m_digest_psi, WITH_CUBE_SYM,
@@ -1034,7 +1036,7 @@ int MYSQLlex(void *arg, THD *thd)
return token;
}
-int lex_one_token(void *arg, THD *thd)
+static int lex_one_token(YYSTYPE *yylval, THD *thd)
{
reg1 uchar c;
bool comment_closed;
@@ -1043,7 +1045,6 @@ int lex_one_token(void *arg, THD *thd)
enum my_lex_states state;
Lex_input_stream *lip= & thd->m_parser_state->m_lip;
LEX *lex= thd->lex;
- YYSTYPE *yylval=(YYSTYPE*) arg;
CHARSET_INFO *const cs= thd->charset();
const uchar *const state_map= cs->state_map;
const uchar *const ident_map= cs->ident_map;
@@ -3302,7 +3303,7 @@ static void fix_prepare_info_in_table_list(THD *thd, TABLE_LIST *tbl)
{
for (; tbl; tbl= tbl->next_local)
{
- if (tbl->on_expr)
+ if (tbl->on_expr && !tbl->prep_on_expr)
{
thd->check_and_register_item_tree(&tbl->prep_on_expr, &tbl->on_expr);
tbl->on_expr= tbl->on_expr->copy_andor_structure(thd);
diff --git a/sql/sql_lex.h b/sql/sql_lex.h
index 5e9c7b9dc6a..0a1232e81d1 100644
--- a/sql/sql_lex.h
+++ b/sql/sql_lex.h
@@ -1,5 +1,5 @@
-/* Copyright (c) 2000, 2013, Oracle and/or its affiliates.
- Copyright (c) 2010, 2013, Monty Program Ab.
+/* Copyright (c) 2000, 2014, Oracle and/or its affiliates.
+ Copyright (c) 2010, 2014, Monty Program Ab.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -2897,7 +2897,7 @@ extern void lex_start(THD *thd);
extern void lex_end(LEX *lex);
void end_lex_with_single_table(THD *thd, TABLE *table, LEX *old_lex);
int init_lex_with_single_table(THD *thd, TABLE *table, LEX *lex);
-extern int MYSQLlex(void *arg, THD *thd);
+extern int MYSQLlex(union YYSTYPE *yylval, THD *thd);
extern void trim_whitespace(CHARSET_INFO *cs, LEX_STRING *str);
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index d8906b2d578..4bc32df549d 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -1,5 +1,5 @@
/* Copyright (c) 2000, 2013, Oracle and/or its affiliates.
- Copyright (c) 2008, 2014, Monty Program Ab
+ Copyright (c) 2008, 2014, SkySQL Ab.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -302,7 +302,7 @@ void init_update_queries(void)
sql_command_flags[SQLCOM_CREATE_TABLE]= CF_CHANGES_DATA | CF_REEXECUTION_FRAGILE |
CF_AUTO_COMMIT_TRANS | CF_REPORT_PROGRESS |
CF_CAN_GENERATE_ROW_EVENTS;
- sql_command_flags[SQLCOM_CREATE_INDEX]= CF_CHANGES_DATA | CF_AUTO_COMMIT_TRANS;
+ sql_command_flags[SQLCOM_CREATE_INDEX]= CF_CHANGES_DATA | CF_AUTO_COMMIT_TRANS | CF_REPORT_PROGRESS;
sql_command_flags[SQLCOM_ALTER_TABLE]= CF_CHANGES_DATA | CF_WRITE_LOGS_COMMAND |
CF_AUTO_COMMIT_TRANS | CF_REPORT_PROGRESS |
CF_INSERTS_DATA;
@@ -981,9 +981,7 @@ bool do_command(THD *thd)
*/
DEBUG_SYNC(thd, "before_do_command_net_read");
- thd->m_server_idle= TRUE;
- packet_length= my_net_read(net);
- thd->m_server_idle= FALSE;
+ packet_length= my_net_read_packet(net, 1);
#ifdef WITH_WSREP
if (WSREP(thd)) {
mysql_mutex_lock(&thd->LOCK_wsrep_thd);
@@ -2946,6 +2944,9 @@ mysql_execute_command(THD *thd)
goto error;
mysql_mutex_lock(&LOCK_active_mi);
+ if (!master_info_index)
+ goto error;
+
mi= master_info_index->get_master_info(&lex_mi->connection_name,
Sql_condition::WARN_LEVEL_NOTE);
@@ -3195,7 +3196,11 @@ mysql_execute_command(THD *thd)
goto end_with_restore_list;
}
+ /* Copy temporarily the statement flags to thd for lock_table_names() */
+ uint save_thd_create_info_options= thd->lex->create_info.options;
+ thd->lex->create_info.options|= create_info.options;
res= open_and_lock_tables(thd, lex->query_tables, TRUE, 0);
+ thd->lex->create_info.options= save_thd_create_info_options;
if (res)
{
/* Got error or warning. Set res to 1 if error */
@@ -3407,7 +3412,7 @@ end_with_restore_list:
case SQLCOM_SLAVE_ALL_START:
{
mysql_mutex_lock(&LOCK_active_mi);
- if (!master_info_index->start_all_slaves(thd))
+ if (master_info_index && !master_info_index->start_all_slaves(thd))
my_ok(thd);
mysql_mutex_unlock(&LOCK_active_mi);
break;
@@ -3423,7 +3428,7 @@ end_with_restore_list:
goto error;
}
mysql_mutex_lock(&LOCK_active_mi);
- if (!master_info_index->stop_all_slaves(thd))
+ if (master_info_index && !master_info_index->stop_all_slaves(thd))
my_ok(thd);
mysql_mutex_unlock(&LOCK_active_mi);
break;
@@ -4693,11 +4698,12 @@ end_with_restore_list:
case SQLCOM_SHOW_GRANTS:
{
LEX_USER *grant_user= lex->grant_user;
+ Security_context *sctx= thd->security_ctx;
if (!grant_user)
goto error;
- if (grant_user->user.str &&
- !strcmp(thd->security_ctx->priv_user, grant_user->user.str))
+ if (grant_user->user.str && !strcmp(sctx->priv_user, grant_user->user.str) &&
+ grant_user->host.str && !strcmp(sctx->priv_host, grant_user->host.str))
grant_user->user= current_user;
if (grant_user->user.str == current_user.str ||
@@ -7916,7 +7922,7 @@ static uint kill_threads_for_user(THD *thd, LEX_USER *user,
I_List_iterator<THD> it(threads);
while ((tmp=it++))
{
- if (tmp->get_command() == COM_DAEMON)
+ if (!tmp->security_ctx->user)
continue;
/*
Check that hostname (if given) and user name matches.
diff --git a/sql/sql_partition.cc b/sql/sql_partition.cc
index 9bc8147c75f..2b9ba2d00c4 100644
--- a/sql/sql_partition.cc
+++ b/sql/sql_partition.cc
@@ -1,5 +1,5 @@
-/* Copyright (c) 2005, 2013, Oracle and/or its affiliates.
- Copyright (c) 2009, 2013, Monty Program Ab.
+/* Copyright (c) 2005, 2014, Oracle and/or its affiliates.
+ Copyright (c) 2009, 2014, SkySQL Ab.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -3167,19 +3167,28 @@ uint32 get_partition_id_cols_list_for_endpoint(partition_info *part_info,
uint num_columns= part_info->part_field_list.elements;
uint list_index;
uint min_list_index= 0;
+ int cmp;
+ /* Notice that max_list_index = last_index + 1 here! */
uint max_list_index= part_info->num_list_values;
DBUG_ENTER("get_partition_id_cols_list_for_endpoint");
/* Find the matching partition (including taking endpoint into account). */
do
{
- /* Midpoint, adjusted down, so it can never be > last index. */
+ /* Midpoint, adjusted down, so it can never be >= max_list_index. */
list_index= (max_list_index + min_list_index) >> 1;
- if (cmp_rec_and_tuple_prune(list_col_array + list_index*num_columns,
- nparts, left_endpoint, include_endpoint) > 0)
+ cmp= cmp_rec_and_tuple_prune(list_col_array + list_index*num_columns,
+ nparts, left_endpoint, include_endpoint);
+ if (cmp > 0)
+ {
min_list_index= list_index + 1;
+ }
else
+ {
max_list_index= list_index;
+ if (cmp == 0)
+ break;
+ }
} while (max_list_index > min_list_index);
list_index= max_list_index;
@@ -3196,12 +3205,10 @@ uint32 get_partition_id_cols_list_for_endpoint(partition_info *part_info,
nparts, left_endpoint,
include_endpoint)));
- if (!left_endpoint)
- {
- /* Set the end after this list tuple if not already after the last. */
- if (list_index < part_info->num_parts)
- list_index++;
- }
+ /* Include the right endpoint if not already passed end of array. */
+ if (!left_endpoint && include_endpoint && cmp == 0 &&
+ list_index < part_info->num_list_values)
+ list_index++;
DBUG_RETURN(list_index);
}
@@ -7573,15 +7580,13 @@ static int cmp_rec_and_tuple_prune(part_column_list_val *val,
field= val->part_info->part_field_array + n_vals_in_rec;
if (!(*field))
{
- /*
- Full match, if right endpoint and not including the endpoint,
- (rec < part) return lesser.
- */
- if (!is_left_endpoint && !include_endpoint)
- return -4;
+ /* Full match. Only equal if including endpoint. */
+ if (include_endpoint)
+ return 0;
- /* Otherwise they are equal! */
- return 0;
+ if (is_left_endpoint)
+ return +4; /* Start of range, part_tuple < rec, return higher. */
+ return -4; /* End of range, rec < part_tupe, return lesser. */
}
/*
The prefix is equal and there are more partition columns to compare.
diff --git a/sql/sql_plugin.cc b/sql/sql_plugin.cc
index 9ae3d792744..4da4fcf21b0 100644
--- a/sql/sql_plugin.cc
+++ b/sql/sql_plugin.cc
@@ -1025,7 +1025,7 @@ static st_plugin_int *plugin_insert_or_reuse(struct st_plugin_int *plugin)
static bool plugin_add(MEM_ROOT *tmp_root,
const LEX_STRING *name, LEX_STRING *dl, int report)
{
- struct st_plugin_int tmp;
+ struct st_plugin_int tmp, *maybe_dupe;
struct st_maria_plugin *plugin;
uint oks= 0, errs= 0, dupes= 0;
DBUG_ENTER("plugin_add");
@@ -1055,8 +1055,14 @@ static bool plugin_add(MEM_ROOT *tmp_root,
(const uchar *)tmp.name.str, tmp.name.length))
continue; // plugin name doesn't match
- if (!name->str && plugin_find_internal(&tmp.name, MYSQL_ANY_PLUGIN))
+ if (!name->str &&
+ (maybe_dupe= plugin_find_internal(&tmp.name, MYSQL_ANY_PLUGIN)))
{
+ if (plugin->name != maybe_dupe->plugin->name)
+ {
+ report_error(report, ER_UDF_EXISTS, plugin->name);
+ DBUG_RETURN(TRUE);
+ }
dupes++;
continue; // already installed
}
@@ -1572,7 +1578,7 @@ int plugin_init(int *argc, char **argv, int flags)
if (plugin_initialize(&tmp_root, plugin_ptr, argc, argv, !is_myisam &&
(flags & PLUGIN_INIT_SKIP_INITIALIZATION)))
{
- if (mandatory)
+ if (plugin_ptr->load_option == PLUGIN_FORCE)
goto err_unlock;
plugin_ptr->state= PLUGIN_IS_DISABLED;
}
@@ -3313,7 +3319,7 @@ bool sys_var_pluginvar::session_update(THD *thd, set_var *var)
mysql_mutex_unlock(&LOCK_global_system_variables);
plugin_var->update(thd, plugin_var, tgt, src);
-
+
return false;
}
@@ -3731,7 +3737,7 @@ static int construct_options(MEM_ROOT *mem_root, struct st_plugin_int *tmp,
if (opt->flags & PLUGIN_VAR_NOCMDOPT)
continue;
- optname= (char*) memdup_root(mem_root, v->key + 1,
+ optname= (char*) memdup_root(mem_root, v->key + 1,
(optnamelen= v->name_len) + 1);
}
@@ -3993,7 +3999,7 @@ static int test_plugin_options(MEM_ROOT *tmp_root, struct st_plugin_int *tmp,
}
DBUG_RETURN(0);
-
+
err:
if (opts)
my_cleanup_options(opts);
diff --git a/sql/sql_reload.cc b/sql/sql_reload.cc
index 24e5d053145..1754ffce220 100644
--- a/sql/sql_reload.cc
+++ b/sql/sql_reload.cc
@@ -175,18 +175,21 @@ bool reload_acl_and_cache(THD *thd, unsigned long long options,
*/
tmp_write_to_binlog= 0;
mysql_mutex_lock(&LOCK_active_mi);
- if (!(mi= (master_info_index->
- get_master_info(&connection_name,
- Sql_condition::WARN_LEVEL_ERROR))))
+ if (master_info_index)
{
- result= 1;
- }
- else
- {
- mysql_mutex_lock(&mi->data_lock);
- if (rotate_relay_log(mi))
- *write_to_binlog= -1;
- mysql_mutex_unlock(&mi->data_lock);
+ if (!(mi= (master_info_index->
+ get_master_info(&connection_name,
+ Sql_condition::WARN_LEVEL_ERROR))))
+ {
+ result= 1;
+ }
+ else
+ {
+ mysql_mutex_lock(&mi->data_lock);
+ if (rotate_relay_log(mi))
+ *write_to_binlog= -1;
+ mysql_mutex_unlock(&mi->data_lock);
+ }
}
mysql_mutex_unlock(&LOCK_active_mi);
#endif
@@ -356,22 +359,24 @@ bool reload_acl_and_cache(THD *thd, unsigned long long options,
Master_info *mi;
tmp_write_to_binlog= 0;
mysql_mutex_lock(&LOCK_active_mi);
-
- if (!(mi= (master_info_index->
- get_master_info(&lex_mi->connection_name,
- Sql_condition::WARN_LEVEL_ERROR))))
+ if (master_info_index)
{
- result= 1;
- }
- else if (reset_slave(thd, mi))
- {
- /* NOTE: my_error() has been already called by reset_slave(). */
- result= 1;
- }
- else if (mi->connection_name.length && thd->lex->reset_slave_info.all)
- {
- /* If not default connection and 'all' is used */
- master_info_index->remove_master_info(&mi->connection_name);
+ if (!(mi= (master_info_index->
+ get_master_info(&lex_mi->connection_name,
+ Sql_condition::WARN_LEVEL_ERROR))))
+ {
+ result= 1;
+ }
+ else if (reset_slave(thd, mi))
+ {
+ /* NOTE: my_error() has been already called by reset_slave(). */
+ result= 1;
+ }
+ else if (mi->connection_name.length && thd->lex->reset_slave_info.all)
+ {
+ /* If not default connection and 'all' is used */
+ master_info_index->remove_master_info(&mi->connection_name);
+ }
}
mysql_mutex_unlock(&LOCK_active_mi);
}
diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc
index e91b3b0a2ed..d9c88983797 100644
--- a/sql/sql_repl.cc
+++ b/sql/sql_repl.cc
@@ -1,5 +1,5 @@
/* Copyright (c) 2000, 2013, Oracle and/or its affiliates.
- Copyright (c) 2008, 2014, Monty Program Ab
+ Copyright (c) 2008, 2014, SkySQL Ab.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -3074,6 +3074,7 @@ int reset_slave(THD *thd, Master_info* mi)
mi->clear_error();
mi->rli.clear_error();
mi->rli.clear_until_condition();
+ mi->rli.slave_skip_counter= 0;
// close master_info_file, relay_log_info_file, set mi->inited=rli->inited=0
end_master_info(mi);
@@ -3224,6 +3225,9 @@ bool change_master(THD* thd, Master_info* mi, bool *master_info_added)
LEX_MASTER_INFO* lex_mi= &thd->lex->mi;
DBUG_ENTER("change_master");
+ mysql_mutex_assert_owner(&LOCK_active_mi);
+ DBUG_ASSERT(master_info_index);
+
*master_info_added= false;
/*
We need to check if there is an empty master_host. Otherwise
@@ -3521,6 +3525,7 @@ bool change_master(THD* thd, Master_info* mi, bool *master_info_added)
/* Clear the errors, for a clean start */
mi->rli.clear_error();
mi->rli.clear_until_condition();
+ mi->rli.slave_skip_counter= 0;
sql_print_information("'CHANGE MASTER TO executed'. "
"Previous state master_host='%s', master_port='%u', master_log_file='%s', "
@@ -3622,7 +3627,8 @@ bool mysql_show_binlog_events(THD* thd)
else /* showing relay log contents */
{
mysql_mutex_lock(&LOCK_active_mi);
- if (!(mi= master_info_index->
+ if (!master_info_index ||
+ !(mi= master_info_index->
get_master_info(&thd->variables.default_master_connection,
Sql_condition::WARN_LEVEL_ERROR)))
{
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index 436a2f6ce5d..ff93aa6d103 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -1444,7 +1444,8 @@ TODO: make view to decide if it is possible to write to WHERE directly or make S
Perform the optimization on fields evaluation mentioned above
for all on expressions.
*/
- for (JOIN_TAB *tab= first_linear_tab(this, WITHOUT_CONST_TABLES); tab;
+ JOIN_TAB *tab;
+ for (tab= first_linear_tab(this, WITH_BUSH_ROOTS, WITHOUT_CONST_TABLES); tab;
tab= next_linear_tab(this, tab, WITH_BUSH_ROOTS))
{
if (*tab->on_expr_ref)
@@ -1467,7 +1468,7 @@ TODO: make view to decide if it is possible to write to WHERE directly or make S
Perform the optimization on fields evaliation mentioned above
for all used ref items.
*/
- for (JOIN_TAB *tab= first_linear_tab(this, WITHOUT_CONST_TABLES); tab;
+ for (tab= first_linear_tab(this, WITH_BUSH_ROOTS, WITHOUT_CONST_TABLES); tab;
tab= next_linear_tab(this, tab, WITH_BUSH_ROOTS))
{
uint key_copy_index=0;
@@ -2107,7 +2108,8 @@ bool JOIN::setup_subquery_caches()
if (conds)
conds= conds->transform(&Item::expr_cache_insert_transformer,
(uchar*) thd);
- for (JOIN_TAB *tab= first_linear_tab(this, WITHOUT_CONST_TABLES);
+ JOIN_TAB *tab;
+ for (tab= first_linear_tab(this, WITH_BUSH_ROOTS, WITHOUT_CONST_TABLES);
tab; tab= next_linear_tab(this, tab, WITH_BUSH_ROOTS))
{
if (tab->select_cond)
@@ -2269,7 +2271,8 @@ JOIN::reinit()
/* need to reset ref access state (see join_read_key) */
if (join_tab)
{
- for (JOIN_TAB *tab= first_linear_tab(this, WITH_CONST_TABLES); tab;
+ JOIN_TAB *tab;
+ for (tab= first_linear_tab(this, WITH_BUSH_ROOTS, WITH_CONST_TABLES); tab;
tab= next_linear_tab(this, tab, WITH_BUSH_ROOTS))
{
tab->ref.key_err= TRUE;
@@ -2531,7 +2534,7 @@ void JOIN::exec_inner()
Item *cur_const_item;
while ((cur_const_item= const_item_it++))
{
- cur_const_item->val_str(&cur_const_item->str_value);
+ cur_const_item->val_str(); // This caches val_str() to Item::str_value
if (thd->is_error())
{
error= thd->is_error();
@@ -3137,8 +3140,9 @@ JOIN::destroy()
{
if (join_tab != tmp_join->join_tab)
{
- for (JOIN_TAB *tab= first_linear_tab(this, WITH_CONST_TABLES); tab;
- tab= next_linear_tab(this, tab, WITH_BUSH_ROOTS))
+ JOIN_TAB *tab;
+ for (tab= first_linear_tab(this, WITH_BUSH_ROOTS, WITH_CONST_TABLES);
+ tab; tab= next_linear_tab(this, tab, WITH_BUSH_ROOTS))
{
tab->cleanup();
}
@@ -8211,14 +8215,24 @@ JOIN_TAB *next_top_level_tab(JOIN *join, JOIN_TAB *tab)
}
-JOIN_TAB *first_linear_tab(JOIN *join, enum enum_with_const_tables const_tbls)
+JOIN_TAB *first_linear_tab(JOIN *join,
+ enum enum_with_bush_roots include_bush_roots,
+ enum enum_with_const_tables const_tbls)
{
JOIN_TAB *first= join->join_tab;
if (const_tbls == WITHOUT_CONST_TABLES)
first+= join->const_tables;
- if (first < join->join_tab + join->top_join_tab_count)
- return first;
- return NULL; /* All tables were const tables */
+
+ if (first >= join->join_tab + join->top_join_tab_count)
+ return NULL; /* All are const tables */
+
+ if (first->bush_children && include_bush_roots == WITHOUT_BUSH_ROOTS)
+ {
+ /* This JOIN_TAB is a SJM nest; Start from first table in nest */
+ return first->bush_children->start;
+ }
+
+ return first;
}
@@ -9084,9 +9098,10 @@ inline void add_cond_and_fix(THD *thd, Item **e1, Item *e2)
static void add_not_null_conds(JOIN *join)
{
+ JOIN_TAB *tab;
DBUG_ENTER("add_not_null_conds");
- for (JOIN_TAB *tab= first_linear_tab(join, WITHOUT_CONST_TABLES);
+ for (tab= first_linear_tab(join, WITH_BUSH_ROOTS, WITHOUT_CONST_TABLES);
tab;
tab= next_linear_tab(join, tab, WITH_BUSH_ROOTS))
{
@@ -9257,7 +9272,7 @@ make_outerjoin_info(JOIN *join)
tab->table->pos_in_table_list being set.
*/
JOIN_TAB *tab;
- for (tab= first_linear_tab(join, WITHOUT_CONST_TABLES);
+ for (tab= first_linear_tab(join, WITH_BUSH_ROOTS, WITHOUT_CONST_TABLES);
tab;
tab= next_linear_tab(join, tab, WITH_BUSH_ROOTS))
{
@@ -9269,7 +9284,7 @@ make_outerjoin_info(JOIN *join)
}
}
- for (JOIN_TAB *tab= first_linear_tab(join, WITHOUT_CONST_TABLES); tab;
+ for (JOIN_TAB *tab= first_linear_tab(join, WITH_BUSH_ROOTS, WITHOUT_CONST_TABLES); tab;
tab= next_linear_tab(join, tab, WITH_BUSH_ROOTS))
{
TABLE *table= tab->table;
@@ -9901,6 +9916,25 @@ uint get_next_field_for_derived_key(uchar *arg)
}
+static
+uint get_next_field_for_derived_key_simple(uchar *arg)
+{
+ KEYUSE *keyuse= *(KEYUSE **) arg;
+ if (!keyuse)
+ return (uint) (-1);
+ TABLE *table= keyuse->table;
+ uint key= keyuse->key;
+ uint fldno= keyuse->keypart;
+ for ( ;
+ keyuse->table == table && keyuse->key == key && keyuse->keypart == fldno;
+ keyuse++)
+ ;
+ if (keyuse->key != key)
+ keyuse= 0;
+ *((KEYUSE **) arg)= keyuse;
+ return fldno;
+}
+
static
bool generate_derived_keys_for_table(KEYUSE *keyuse, uint count, uint keys)
{
@@ -9931,12 +9965,28 @@ bool generate_derived_keys_for_table(KEYUSE *keyuse, uint count, uint keys)
}
else
{
- if (table->add_tmp_key(table->s->keys, parts,
- get_next_field_for_derived_key,
- (uchar *) &first_keyuse,
- FALSE))
- return TRUE;
- table->reginfo.join_tab->keys.set_bit(table->s->keys);
+ KEYUSE *save_first_keyuse= first_keyuse;
+ if (table->check_tmp_key(table->s->keys, parts,
+ get_next_field_for_derived_key_simple,
+ (uchar *) &first_keyuse))
+
+ {
+ first_keyuse= save_first_keyuse;
+ if (table->add_tmp_key(table->s->keys, parts,
+ get_next_field_for_derived_key,
+ (uchar *) &first_keyuse,
+ FALSE))
+ return TRUE;
+ table->reginfo.join_tab->keys.set_bit(table->s->keys);
+ }
+ else
+ {
+ /* Mark keyuses for this key to be excluded */
+ for (KEYUSE *curr=save_first_keyuse; curr < first_keyuse; curr++)
+ {
+ curr->key= MAX_KEY;
+ }
+ }
first_keyuse= keyuse;
key_count++;
parts= 0;
@@ -10023,7 +10073,7 @@ bool generate_derived_keys(DYNAMIC_ARRAY *keyuse_array)
void JOIN::drop_unused_derived_keys()
{
JOIN_TAB *tab;
- for (tab= first_linear_tab(this, WITHOUT_CONST_TABLES);
+ for (tab= first_linear_tab(this, WITH_BUSH_ROOTS, WITHOUT_CONST_TABLES);
tab;
tab= next_linear_tab(this, tab, WITH_BUSH_ROOTS))
{
@@ -10711,7 +10761,7 @@ void check_join_cache_usage_for_tables(JOIN *join, ulonglong options,
JOIN_TAB *tab;
JOIN_TAB *prev_tab;
- for (tab= first_linear_tab(join, WITHOUT_CONST_TABLES);
+ for (tab= first_linear_tab(join, WITH_BUSH_ROOTS, WITHOUT_CONST_TABLES);
tab;
tab= next_linear_tab(join, tab, WITH_BUSH_ROOTS))
{
@@ -10719,7 +10769,7 @@ void check_join_cache_usage_for_tables(JOIN *join, ulonglong options,
}
uint idx= join->const_tables;
- for (tab= first_linear_tab(join, WITHOUT_CONST_TABLES);
+ for (tab= first_linear_tab(join, WITH_BUSH_ROOTS, WITHOUT_CONST_TABLES);
tab;
tab= next_linear_tab(join, tab, WITH_BUSH_ROOTS))
{
@@ -10893,7 +10943,8 @@ make_join_readinfo(JOIN *join, ulonglong options, uint no_jbuf_after)
tab->partial_join_cardinality= 1;
JOIN_TAB *prev_tab= NULL;
- for (tab= first_linear_tab(join, WITHOUT_CONST_TABLES), i= join->const_tables;
+ i= join->const_tables;
+ for (tab= first_linear_tab(join, WITH_BUSH_ROOTS, WITHOUT_CONST_TABLES);
tab;
prev_tab=tab, tab= next_linear_tab(join, tab, WITH_BUSH_ROOTS))
{
@@ -10918,7 +10969,7 @@ make_join_readinfo(JOIN *join, ulonglong options, uint no_jbuf_after)
check_join_cache_usage_for_tables(join, options, no_jbuf_after);
JOIN_TAB *first_tab;
- for (tab= first_tab= first_linear_tab(join, WITHOUT_CONST_TABLES);
+ for (tab= first_tab= first_linear_tab(join, WITH_BUSH_ROOTS, WITHOUT_CONST_TABLES);
tab;
tab= next_linear_tab(join, tab, WITH_BUSH_ROOTS))
{
@@ -11612,7 +11663,8 @@ void JOIN::cleanup(bool full)
}
if (full)
{
- JOIN_TAB *sort_tab= first_linear_tab(this, WITHOUT_CONST_TABLES);
+ JOIN_TAB *sort_tab= first_linear_tab(this, WITH_BUSH_ROOTS,
+ WITHOUT_CONST_TABLES);
if (pre_sort_join_tab)
{
if (sort_tab && sort_tab->select == pre_sort_join_tab->select)
@@ -11659,7 +11711,7 @@ void JOIN::cleanup(bool full)
}
else
{
- for (tab= first_linear_tab(this, WITH_CONST_TABLES); tab;
+ for (tab= first_linear_tab(this, WITH_BUSH_ROOTS, WITH_CONST_TABLES); tab;
tab= next_linear_tab(this, tab, WITH_BUSH_ROOTS))
{
if (tab->table)
@@ -11821,7 +11873,9 @@ only_eq_ref_tables(JOIN *join,ORDER *order,table_map tables)
static void update_depend_map(JOIN *join)
{
- for (JOIN_TAB *join_tab= first_linear_tab(join, WITH_CONST_TABLES); join_tab;
+ JOIN_TAB *join_tab;
+ for (join_tab= first_linear_tab(join, WITH_BUSH_ROOTS, WITH_CONST_TABLES);
+ join_tab;
join_tab= next_linear_tab(join, join_tab, WITH_BUSH_ROOTS))
{
TABLE_REF *ref= &join_tab->ref;
@@ -21121,7 +21175,7 @@ cp_buffer_from_ref(THD *thd, TABLE *table, TABLE_REF *ref)
ref_pointer_array and all_fields are updated.
- @param[in] thd Pointer to current thread structure
+ @param[in] thd Pointer to current thread structure
@param[in,out] ref_pointer_array All select, group and order by fields
@param[in] tables List of tables to search in (usually
FROM clause)
@@ -21163,11 +21217,11 @@ find_order_in_list(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables,
order_item->full_name(), thd->where);
return TRUE;
}
- order->item= ref_pointer_array + count - 1;
+ thd->change_item_tree((Item**)&order->item, (Item*)(ref_pointer_array + count - 1));
order->in_field_list= 1;
order->counter= count;
order->counter_used= 1;
- return FALSE;
+ return FALSE;
}
/* Lookup the current GROUP/ORDER field in the SELECT clause. */
select_item= find_item_in_list(order_item, fields, &counter,
@@ -21235,7 +21289,8 @@ find_order_in_list(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables,
warning so the user knows that the field from the FROM clause
overshadows the column reference from the SELECT list.
*/
- push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_NON_UNIQ_ERROR,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
+ ER_NON_UNIQ_ERROR,
ER(ER_NON_UNIQ_ERROR),
((Item_ident*) order_item)->field_name,
current_thd->where);
@@ -23005,13 +23060,11 @@ int print_explain_message_line(select_result_sink *result,
ha_rows *rows,
const char *message)
{
- const CHARSET_INFO *cs= system_charset_info;
Item *item_null= new Item_null();
List<Item> item_list;
item_list.push_back(new Item_int((int32) select_number));
- item_list.push_back(new Item_string(select_type,
- strlen(select_type), cs));
+ item_list.push_back(new Item_string_sys(select_type));
/* `table` */
item_list.push_back(item_null);
@@ -23046,7 +23099,7 @@ int print_explain_message_line(select_result_sink *result,
/* `Extra` */
if (message)
- item_list.push_back(new Item_string(message,strlen(message),cs));
+ item_list.push_back(new Item_string_sys(message));
else
item_list.push_back(item_null);
@@ -23107,45 +23160,39 @@ int print_explain_row(select_result_sink *result,
double r_filtered,
const char *extra)
{
- const CHARSET_INFO *cs= system_charset_info;
Item *item_null= new Item_null();
List<Item> item_list;
Item *item;
item_list.push_back(new Item_int((int32) select_number));
- item_list.push_back(new Item_string(select_type,
- strlen(select_type), cs));
- item_list.push_back(new Item_string(table_name,
- strlen(table_name), cs));
+ item_list.push_back(new Item_string_sys(select_type));
+ item_list.push_back(new Item_string_sys(table_name));
if (options & DESCRIBE_PARTITIONS)
{
if (partitions)
{
- item_list.push_back(new Item_string(partitions,
- strlen(partitions), cs));
+ item_list.push_back(new Item_string_sys(partitions));
}
else
item_list.push_back(item_null);
}
const char *jtype_str= join_type_str[jtype];
- item_list.push_back(new Item_string(jtype_str,
- strlen(jtype_str), cs));
+ item_list.push_back(new Item_string_sys(jtype_str));
- item= possible_keys? new Item_string(possible_keys, strlen(possible_keys),
- cs) : item_null;
+ item= possible_keys? new Item_string_sys(possible_keys) : item_null;
item_list.push_back(item);
/* 'index */
- item= index ? new Item_string(index, strlen(index), cs) : item_null;
+ item= index ? new Item_string_sys(index) : item_null;
item_list.push_back(item);
/* 'key_len */
- item= key_len ? new Item_string(key_len, strlen(key_len), cs) : item_null;
+ item= key_len ? new Item_string_sys(key_len) : item_null;
item_list.push_back(item);
/* 'ref' */
- item= ref ? new Item_string(ref, strlen(ref), cs) : item_null;
+ item= ref ? new Item_string_sys(ref) : item_null;
item_list.push_back(item);
/* 'rows' */
@@ -23180,7 +23227,7 @@ int print_explain_row(select_result_sink *result,
/* 'Extra' */
if (extra)
- item_list.push_back(new Item_string(extra, strlen(extra), cs));
+ item_list.push_back(new Item_string_sys(extra));
else
item_list.push_back(item_null);
@@ -23193,7 +23240,6 @@ int print_explain_row(select_result_sink *result,
int print_fake_select_lex_join(select_result_sink *result, bool on_the_fly,
SELECT_LEX *select_lex, uint8 explain_flags)
{
- const CHARSET_INFO *cs= system_charset_info;
Item *item_null= new Item_null();
List<Item> item_list;
if (on_the_fly)
@@ -23210,9 +23256,7 @@ int print_fake_select_lex_join(select_result_sink *result, bool on_the_fly,
/* id */
item_list.push_back(new Item_null);
/* select_type */
- item_list.push_back(new Item_string(select_lex->type,
- strlen(select_lex->type),
- cs));
+ item_list.push_back(new Item_string_sys(select_lex->type));
/* table */
{
SELECT_LEX *sl= select_lex->master_unit()->first_select();
@@ -23234,15 +23278,14 @@ int print_fake_select_lex_join(select_result_sink *result, bool on_the_fly,
len+= lastop;
table_name_buffer[len - 1]= '>'; // change ',' to '>'
}
- item_list.push_back(new Item_string(table_name_buffer, len, cs));
+ item_list.push_back(new Item_string_sys(table_name_buffer, len));
}
/* partitions */
if (explain_flags & DESCRIBE_PARTITIONS)
item_list.push_back(item_null);
/* type */
- item_list.push_back(new Item_string(join_type_str[JT_ALL],
- strlen(join_type_str[JT_ALL]),
- cs));
+ item_list.push_back(new Item_string_sys(join_type_str[JT_ALL]));
+
/* possible_keys */
item_list.push_back(item_null);
/* key*/
@@ -23258,10 +23301,9 @@ int print_fake_select_lex_join(select_result_sink *result, bool on_the_fly,
item_list.push_back(item_null);
/* extra */
if (select_lex->master_unit()->global_parameters->order_list.first)
- item_list.push_back(new Item_string("Using filesort",
- 14, cs));
+ item_list.push_back(new Item_string_sys("Using filesort", 14));
else
- item_list.push_back(new Item_string("", 0, cs));
+ item_list.push_back(new Item_string_sys("", 0));
if (result->send_data(item_list))
return 1;
diff --git a/sql/sql_select.h b/sql/sql_select.h
index 63fd6a6d99f..ee953a351f9 100644
--- a/sql/sql_select.h
+++ b/sql/sql_select.h
@@ -1499,7 +1499,9 @@ private:
enum enum_with_bush_roots { WITH_BUSH_ROOTS, WITHOUT_BUSH_ROOTS};
enum enum_with_const_tables { WITH_CONST_TABLES, WITHOUT_CONST_TABLES};
-JOIN_TAB *first_linear_tab(JOIN *join, enum enum_with_const_tables const_tbls);
+JOIN_TAB *first_linear_tab(JOIN *join,
+ enum enum_with_bush_roots include_bush_roots,
+ enum enum_with_const_tables const_tbls);
JOIN_TAB *next_linear_tab(JOIN* join, JOIN_TAB* tab,
enum enum_with_bush_roots include_bush_roots);
diff --git a/sql/sql_show.cc b/sql/sql_show.cc
index fcca91c456b..d1c88e35b7a 100644
--- a/sql/sql_show.cc
+++ b/sql/sql_show.cc
@@ -116,6 +116,8 @@ static void get_cs_converted_string_value(THD *thd,
bool use_hex);
#endif
+static int show_create_view(THD *thd, TABLE_LIST *table, String *buff);
+
static void append_algorithm(TABLE_LIST *table, String *buff);
bool get_lookup_field_values(THD *, COND *, TABLE_LIST *, LOOKUP_FIELD_VALUES *);
@@ -1025,9 +1027,8 @@ mysqld_show_create(THD *thd, TABLE_LIST *table_list)
buffer.set_charset(table_list->view_creation_ctx->get_client_cs());
if ((table_list->view ?
- view_store_create_info(thd, table_list, &buffer) :
- store_create_info(thd, table_list, &buffer, NULL,
- FALSE /* show_database */, FALSE)))
+ show_create_view(thd, table_list, &buffer) :
+ show_create_table(thd, table_list, &buffer, NULL, WITHOUT_DB_NAME)))
goto exit;
if (table_list->view)
@@ -1283,9 +1284,22 @@ append_identifier(THD *thd, String *packet, const char *name, uint length)
it's a keyword
*/
+ /*
+ Special code for swe7. It encodes the letter "E WITH ACUTE" on
+ the position 0x60, where backtick normally resides.
+ In swe7 we cannot append 0x60 using system_charset_info,
+ because it cannot be converted to swe7 and will be replaced to
+ question mark '?'. Use &my_charset_bin to avoid this.
+ It will prevent conversion and will append the backtick as is.
+ */
+ CHARSET_INFO *quote_charset= q == 0x60 &&
+ (packet->charset()->state & MY_CS_NONASCII) &&
+ packet->charset()->mbmaxlen == 1 ?
+ &my_charset_bin : system_charset_info;
+
(void) packet->reserve(length*2 + 2);
quote_char= (char) q;
- if (packet->append(&quote_char, 1, system_charset_info))
+ if (packet->append(&quote_char, 1, quote_charset))
return true;
for (name_end= name+length ; name < name_end ; name+= length)
@@ -1302,12 +1316,12 @@ append_identifier(THD *thd, String *packet, const char *name, uint length)
if (!length)
length= 1;
if (length == 1 && chr == (uchar) quote_char &&
- packet->append(&quote_char, 1, system_charset_info))
+ packet->append(&quote_char, 1, quote_charset))
return true;
if (packet->append(name, length, system_charset_info))
return true;
}
- return packet->append(&quote_char, 1, system_charset_info);
+ return packet->append(&quote_char, 1, quote_charset);
}
@@ -1481,13 +1495,34 @@ static bool get_field_default_value(THD *thd, Field *field, String *def_value,
@param thd thread handler
@param packet string to append
@param opt list of options
+ @param check_options only print known options
+ @param rules list of known options
*/
static void append_create_options(THD *thd, String *packet,
- engine_option_value *opt)
+ engine_option_value *opt,
+ bool check_options,
+ ha_create_table_option *rules)
{
+ bool in_comment= false;
for(; opt; opt= opt->next)
{
+ if (check_options)
+ {
+ if (is_engine_option_known(opt, rules))
+ {
+ if (in_comment)
+ packet->append(STRING_WITH_LEN(" */"));
+ in_comment= false;
+ }
+ else
+ {
+ if (!in_comment)
+ packet->append(STRING_WITH_LEN(" /*"));
+ in_comment= true;
+ }
+ }
+
DBUG_ASSERT(opt->value.str);
packet->append(' ');
append_identifier(thd, packet, opt->name.str, opt->name.length);
@@ -1497,13 +1532,15 @@ static void append_create_options(THD *thd, String *packet,
else
packet->append(opt->value.str, opt->value.length);
}
+ if (in_comment)
+ packet->append(STRING_WITH_LEN(" */"));
}
/*
Build a CREATE TABLE statement for a table.
SYNOPSIS
- store_create_info()
+ show_create_table()
thd The thread
table_list A list containing one table to write statement
for.
@@ -1513,8 +1550,7 @@ static void append_create_options(THD *thd, String *packet,
to tailor the format of the statement. Can be
NULL, in which case only SQL_MODE is considered
when building the statement.
- show_database Add database name to table name
- create_or_replace Use CREATE OR REPLACE syntax
+ with_db_name Add database name to table name
NOTE
Currently always return 0, but might return error code in the
@@ -1524,9 +1560,9 @@ static void append_create_options(THD *thd, String *packet,
0 OK
*/
-int store_create_info(THD *thd, TABLE_LIST *table_list, String *packet,
- HA_CREATE_INFO *create_info_arg, bool show_database,
- bool create_or_replace)
+int show_create_table(THD *thd, TABLE_LIST *table_list, String *packet,
+ HA_CREATE_INFO *create_info_arg,
+ enum_with_db_name with_db_name)
{
List<Item> field_list;
char tmp[MAX_FIELD_WIDTH], *for_str, buff[128], def_value_buf[MAX_FIELD_WIDTH];
@@ -1540,27 +1576,35 @@ int store_create_info(THD *thd, TABLE_LIST *table_list, String *packet,
handler *file= table->file;
TABLE_SHARE *share= table->s;
HA_CREATE_INFO create_info;
-#ifdef WITH_PARTITION_STORAGE_ENGINE
- bool show_table_options= FALSE;
-#endif /* WITH_PARTITION_STORAGE_ENGINE */
- bool foreign_db_mode= (thd->variables.sql_mode & (MODE_POSTGRESQL |
- MODE_ORACLE |
- MODE_MSSQL |
- MODE_DB2 |
- MODE_MAXDB |
- MODE_ANSI)) != 0;
- bool limited_mysql_mode= (thd->variables.sql_mode & (MODE_NO_FIELD_OPTIONS |
- MODE_MYSQL323 |
- MODE_MYSQL40)) != 0;
+ sql_mode_t sql_mode= thd->variables.sql_mode;
+ bool foreign_db_mode= sql_mode & (MODE_POSTGRESQL | MODE_ORACLE |
+ MODE_MSSQL | MODE_DB2 |
+ MODE_MAXDB | MODE_ANSI);
+ bool limited_mysql_mode= sql_mode & (MODE_NO_FIELD_OPTIONS | MODE_MYSQL323 |
+ MODE_MYSQL40);
+ bool show_table_options= !(sql_mode & MODE_NO_TABLE_OPTIONS) &&
+ !foreign_db_mode;
+ bool check_options= !(sql_mode & MODE_IGNORE_BAD_TABLE_OPTIONS) &&
+ !create_info_arg;
+ handlerton *hton;
my_bitmap_map *old_map;
int error= 0;
- DBUG_ENTER("store_create_info");
+ DBUG_ENTER("show_create_table");
DBUG_PRINT("enter",("table: %s", table->s->table_name.str));
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+ if (table->part_info)
+ hton= table->part_info->default_engine_type;
+ else
+#endif
+ hton= file->ht;
+
restore_record(table, s->default_values); // Get empty record
packet->append(STRING_WITH_LEN("CREATE "));
- if (create_or_replace)
+ if (create_info_arg &&
+ (create_info_arg->org_options & HA_LEX_CREATE_REPLACE ||
+ create_info_arg->table_was_deleted))
packet->append(STRING_WITH_LEN("OR REPLACE "));
if (share->tmp_table)
packet->append(STRING_WITH_LEN("TEMPORARY "));
@@ -1587,7 +1631,7 @@ int store_create_info(THD *thd, TABLE_LIST *table_list, String *packet,
avoid having to update gazillions of tests and result files, but
it also saves a few bytes of the binary log.
*/
- if (show_database)
+ if (with_db_name == WITH_DB_NAME)
{
const LEX_STRING *const db=
table_list->schema_table ? &INFORMATION_SCHEMA_NAME : &table->s->db;
@@ -1626,8 +1670,7 @@ int store_create_info(THD *thd, TABLE_LIST *table_list, String *packet,
field->sql_type(type);
packet->append(type.ptr(), type.length(), system_charset_info);
- if (field->has_charset() &&
- !(thd->variables.sql_mode & (MODE_MYSQL323 | MODE_MYSQL40)))
+ if (field->has_charset() && !(sql_mode & (MODE_MYSQL323 | MODE_MYSQL40)))
{
if (field->charset() != share->table_charset)
{
@@ -1684,7 +1727,7 @@ int store_create_info(THD *thd, TABLE_LIST *table_list, String *packet,
if (field->unireg_check == Field::NEXT_NUMBER &&
- !(thd->variables.sql_mode & MODE_NO_FIELD_OPTIONS))
+ !(sql_mode & MODE_NO_FIELD_OPTIONS))
packet->append(STRING_WITH_LEN(" AUTO_INCREMENT"));
if (field->comment.length)
@@ -1692,7 +1735,8 @@ int store_create_info(THD *thd, TABLE_LIST *table_list, String *packet,
packet->append(STRING_WITH_LEN(" COMMENT "));
append_unescaped(packet, field->comment.str, field->comment.length);
}
- append_create_options(thd, packet, field->option_list);
+ append_create_options(thd, packet, field->option_list, check_options,
+ hton->field_options);
}
key_info= table->key_info;
@@ -1759,7 +1803,8 @@ int store_create_info(THD *thd, TABLE_LIST *table_list, String *packet,
append_identifier(thd, packet, parser_name->str, parser_name->length);
packet->append(STRING_WITH_LEN(" */ "));
}
- append_create_options(thd, packet, key_info->option_list);
+ append_create_options(thd, packet, key_info->option_list, check_options,
+ hton->index_options);
}
/*
@@ -1774,12 +1819,8 @@ int store_create_info(THD *thd, TABLE_LIST *table_list, String *packet,
}
packet->append(STRING_WITH_LEN("\n)"));
- if (!(thd->variables.sql_mode & MODE_NO_TABLE_OPTIONS) && !foreign_db_mode)
+ if (show_table_options)
{
-#ifdef WITH_PARTITION_STORAGE_ENGINE
- show_table_options= TRUE;
-#endif /* WITH_PARTITION_STORAGE_ENGINE */
-
/*
IF check_create_info
THEN add ENGINE only if it was used when creating the table
@@ -1787,19 +1828,11 @@ int store_create_info(THD *thd, TABLE_LIST *table_list, String *packet,
if (!create_info_arg ||
(create_info_arg->used_fields & HA_CREATE_USED_ENGINE))
{
- if (thd->variables.sql_mode & (MODE_MYSQL323 | MODE_MYSQL40))
+ if (sql_mode & (MODE_MYSQL323 | MODE_MYSQL40))
packet->append(STRING_WITH_LEN(" TYPE="));
else
packet->append(STRING_WITH_LEN(" ENGINE="));
-#ifdef WITH_PARTITION_STORAGE_ENGINE
- if (table->part_info)
- packet->append(ha_resolve_storage_engine_name(
- table->part_info->default_engine_type));
- else
- packet->append(file->table_type());
-#else
- packet->append(file->table_type());
-#endif
+ packet->append(hton_name(hton));
}
/*
@@ -1821,9 +1854,7 @@ int store_create_info(THD *thd, TABLE_LIST *table_list, String *packet,
packet->append(buff, (uint) (end - buff));
}
- if (share->table_charset &&
- !(thd->variables.sql_mode & MODE_MYSQL323) &&
- !(thd->variables.sql_mode & MODE_MYSQL40))
+ if (share->table_charset && !(sql_mode & (MODE_MYSQL323 | MODE_MYSQL40)))
{
/*
IF check_create_info
@@ -1924,7 +1955,8 @@ int store_create_info(THD *thd, TABLE_LIST *table_list, String *packet,
packet->append(STRING_WITH_LEN(" CONNECTION="));
append_unescaped(packet, share->connect_string.str, share->connect_string.length);
}
- append_create_options(thd, packet, share->option_list);
+ append_create_options(thd, packet, share->option_list, check_options,
+ hton->table_options);
append_directory(thd, packet, "DATA", create_info.data_file_name);
append_directory(thd, packet, "INDEX", create_info.index_file_name);
}
@@ -2076,8 +2108,7 @@ void append_definer(THD *thd, String *buffer, const LEX_STRING *definer_user,
}
-int
-view_store_create_info(THD *thd, TABLE_LIST *table, String *buff)
+static int show_create_view(THD *thd, TABLE_LIST *table, String *buff)
{
my_bool compact_view_name= TRUE;
my_bool foreign_db_mode= (thd->variables.sql_mode & (MODE_POSTGRESQL |
@@ -2222,77 +2253,77 @@ void mysqld_list_processes(THD *thd,const char *user, bool verbose)
Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF))
DBUG_VOID_RETURN;
+ if (thd->killed)
+ DBUG_VOID_RETURN;
+
mysql_mutex_lock(&LOCK_thread_count); // For unlink from list
- if (!thd->killed)
+ I_List_iterator<THD> it(threads);
+ THD *tmp;
+ while ((tmp=it++))
{
- I_List_iterator<THD> it(threads);
- THD *tmp;
- while ((tmp=it++))
+ Security_context *tmp_sctx= tmp->security_ctx;
+ struct st_my_thread_var *mysys_var;
+ if ((tmp->vio_ok() || tmp->system_thread) &&
+ (!user || (tmp_sctx->user && !strcmp(tmp_sctx->user, user))))
{
- Security_context *tmp_sctx= tmp->security_ctx;
- struct st_my_thread_var *mysys_var;
- if ((tmp->vio_ok() || tmp->system_thread) &&
- (!user || (tmp_sctx->user && !strcmp(tmp_sctx->user, user))))
+ thread_info *thd_info= new thread_info;
+
+ thd_info->thread_id=tmp->thread_id;
+ thd_info->user= thd->strdup(tmp_sctx->user ? tmp_sctx->user :
+ (tmp->system_thread ?
+ "system user" : "unauthenticated user"));
+ if (tmp->peer_port && (tmp_sctx->host || tmp_sctx->ip) &&
+ thd->security_ctx->host_or_ip[0])
{
- thread_info *thd_info= new thread_info;
-
- thd_info->thread_id=tmp->thread_id;
- thd_info->user= thd->strdup(tmp_sctx->user ? tmp_sctx->user :
- (tmp->system_thread ?
- "system user" : "unauthenticated user"));
- if (tmp->peer_port && (tmp_sctx->host || tmp_sctx->ip) &&
- thd->security_ctx->host_or_ip[0])
- {
- if ((thd_info->host= (char*) thd->alloc(LIST_PROCESS_HOST_LEN+1)))
- my_snprintf((char *) thd_info->host, LIST_PROCESS_HOST_LEN,
- "%s:%u", tmp_sctx->host_or_ip, tmp->peer_port);
- }
- else
- thd_info->host= thd->strdup(tmp_sctx->host_or_ip[0] ?
- tmp_sctx->host_or_ip :
- tmp_sctx->host ? tmp_sctx->host : "");
- thd_info->command=(int) tmp->get_command();
- mysql_mutex_lock(&tmp->LOCK_thd_data);
- if ((thd_info->db= tmp->db)) // Safe test
- thd_info->db= thd->strdup(thd_info->db);
- if ((mysys_var= tmp->mysys_var))
- mysql_mutex_lock(&mysys_var->mutex);
- thd_info->proc_info= (char*) (tmp->killed >= KILL_QUERY ?
- "Killed" : 0);
- thd_info->state_info= thread_state_info(tmp);
- if (mysys_var)
- mysql_mutex_unlock(&mysys_var->mutex);
-
- /* Lock THD mutex that protects its data when looking at it. */
- if (tmp->query())
- {
- uint length= MY_MIN(max_query_length, tmp->query_length());
- char *q= thd->strmake(tmp->query(),length);
- /* Safety: in case strmake failed, we set length to 0. */
- thd_info->query_string=
- CSET_STRING(q, q ? length : 0, tmp->query_charset());
- }
+ if ((thd_info->host= (char*) thd->alloc(LIST_PROCESS_HOST_LEN+1)))
+ my_snprintf((char *) thd_info->host, LIST_PROCESS_HOST_LEN,
+ "%s:%u", tmp_sctx->host_or_ip, tmp->peer_port);
+ }
+ else
+ thd_info->host= thd->strdup(tmp_sctx->host_or_ip[0] ?
+ tmp_sctx->host_or_ip :
+ tmp_sctx->host ? tmp_sctx->host : "");
+ thd_info->command=(int) tmp->get_command();
+ mysql_mutex_lock(&tmp->LOCK_thd_data);
+ if ((thd_info->db= tmp->db)) // Safe test
+ thd_info->db= thd->strdup(thd_info->db);
+ if ((mysys_var= tmp->mysys_var))
+ mysql_mutex_lock(&mysys_var->mutex);
+ thd_info->proc_info= (char*) (tmp->killed >= KILL_QUERY ?
+ "Killed" : 0);
+ thd_info->state_info= thread_state_info(tmp);
+ if (mysys_var)
+ mysql_mutex_unlock(&mysys_var->mutex);
- /*
- Progress report. We need to do this under a lock to ensure that all
- is from the same stage.
- */
- if (tmp->progress.max_counter)
- {
- uint max_stage= MY_MAX(tmp->progress.max_stage, 1);
- thd_info->progress= (((tmp->progress.stage / (double) max_stage) +
- ((tmp->progress.counter /
- (double) tmp->progress.max_counter) /
- (double) max_stage)) *
- 100.0);
- set_if_smaller(thd_info->progress, 100);
- }
- else
- thd_info->progress= 0.0;
- thd_info->start_time= tmp->start_time;
- mysql_mutex_unlock(&tmp->LOCK_thd_data);
- thread_infos.append(thd_info);
+ /* Lock THD mutex that protects its data when looking at it. */
+ if (tmp->query())
+ {
+ uint length= MY_MIN(max_query_length, tmp->query_length());
+ char *q= thd->strmake(tmp->query(),length);
+ /* Safety: in case strmake failed, we set length to 0. */
+ thd_info->query_string=
+ CSET_STRING(q, q ? length : 0, tmp->query_charset());
}
+
+ /*
+ Progress report. We need to do this under a lock to ensure that all
+ is from the same stage.
+ */
+ if (tmp->progress.max_counter)
+ {
+ uint max_stage= MY_MAX(tmp->progress.max_stage, 1);
+ thd_info->progress= (((tmp->progress.stage / (double) max_stage) +
+ ((tmp->progress.counter /
+ (double) tmp->progress.max_counter) /
+ (double) max_stage)) *
+ 100.0);
+ set_if_smaller(thd_info->progress, 100);
+ }
+ else
+ thd_info->progress= 0.0;
+ thd_info->start_time= tmp->start_time;
+ mysql_mutex_unlock(&tmp->LOCK_thd_data);
+ thread_infos.append(thd_info);
}
}
mysql_mutex_unlock(&LOCK_thread_count);
@@ -2778,7 +2809,7 @@ int add_status_vars(SHOW_VAR *list)
{
int res= 0;
if (status_vars_inited)
- mysql_mutex_lock(&LOCK_status);
+ mysql_mutex_lock(&LOCK_show_status);
if (!all_status_vars.buffer && // array is not allocated yet - do it now
my_init_dynamic_array(&all_status_vars, sizeof(SHOW_VAR), 200, 20, MYF(0)))
{
@@ -2793,7 +2824,7 @@ int add_status_vars(SHOW_VAR *list)
sort_dynamic(&all_status_vars, show_var_cmp);
err:
if (status_vars_inited)
- mysql_mutex_unlock(&LOCK_status);
+ mysql_mutex_unlock(&LOCK_show_status);
return res;
}
@@ -2855,7 +2886,7 @@ void remove_status_vars(SHOW_VAR *list)
{
if (status_vars_inited)
{
- mysql_mutex_lock(&LOCK_status);
+ mysql_mutex_lock(&LOCK_show_status);
SHOW_VAR *all= dynamic_element(&all_status_vars, 0, SHOW_VAR *);
for (; list->name; list++)
@@ -2876,7 +2907,7 @@ void remove_status_vars(SHOW_VAR *list)
}
}
shrink_var_array(&all_status_vars);
- mysql_mutex_unlock(&LOCK_status);
+ mysql_mutex_unlock(&LOCK_show_status);
}
else
{
@@ -4769,7 +4800,7 @@ static int get_schema_tables_record(THD *thd, TABLE_LIST *tables,
str.qs_append(STRING_WITH_LEN(" transactional="));
str.qs_append(ha_choice_values[(uint) share->transactional]);
}
- append_create_options(thd, &str, share->option_list);
+ append_create_options(thd, &str, share->option_list, false, 0);
if (str.length())
table->field[19]->store(str.ptr()+1, str.length()-1, cs);
@@ -6903,7 +6934,7 @@ int fill_variables(THD *thd, TABLE_LIST *tables, COND *cond)
bool upper_case_names= lex->sql_command != SQLCOM_SHOW_VARIABLES;
bool sorted_vars= lex->sql_command == SQLCOM_SHOW_VARIABLES;
- if (lex->option_type == OPT_GLOBAL ||
+ if ((sorted_vars && lex->option_type == OPT_GLOBAL) ||
schema_table_idx == SCH_GLOBAL_VARIABLES)
scope= OPT_GLOBAL;
@@ -6954,14 +6985,20 @@ int fill_status(THD *thd, TABLE_LIST *tables, COND *cond)
if (partial_cond)
partial_cond->val_int();
- mysql_mutex_lock(&LOCK_status);
if (scope == OPT_GLOBAL)
+ {
+ /* We only hold LOCK_status for summary status vars */
+ mysql_mutex_lock(&LOCK_status);
calc_sum_of_all_status(&tmp);
+ mysql_mutex_unlock(&LOCK_status);
+ }
+
+ mysql_mutex_lock(&LOCK_show_status);
res= show_status_array(thd, wild,
(SHOW_VAR *)all_status_vars.buffer,
scope, tmp1, "", tables->table,
upper_case_names, partial_cond);
- mysql_mutex_unlock(&LOCK_status);
+ mysql_mutex_unlock(&LOCK_show_status);
DBUG_RETURN(res);
}
@@ -7675,7 +7712,8 @@ bool optimize_schema_tables_reads(JOIN *join)
bool result= 0;
DBUG_ENTER("optimize_schema_tables_reads");
- for (JOIN_TAB *tab= first_linear_tab(join, WITH_CONST_TABLES);
+ JOIN_TAB *tab;
+ for (tab= first_linear_tab(join, WITHOUT_BUSH_ROOTS, WITH_CONST_TABLES);
tab;
tab= next_linear_tab(join, tab, WITH_BUSH_ROOTS))
{
@@ -7743,8 +7781,9 @@ bool get_schema_tables_result(JOIN *join,
Warnings_only_error_handler err_handler;
thd->push_internal_handler(&err_handler);
old_proc_info= thd_proc_info(thd, "Filling schema table");
-
- for (JOIN_TAB *tab= first_linear_tab(join, WITH_CONST_TABLES);
+
+ JOIN_TAB *tab;
+ for (tab= first_linear_tab(join, WITHOUT_BUSH_ROOTS, WITH_CONST_TABLES);
tab;
tab= next_linear_tab(join, tab, WITH_BUSH_ROOTS))
{
diff --git a/sql/sql_show.h b/sql/sql_show.h
index a759c8d94f5..bad2b41c52c 100644
--- a/sql/sql_show.h
+++ b/sql/sql_show.h
@@ -74,10 +74,10 @@ typedef struct system_status_var STATUS_VAR;
#define IS_FILES_STATUS 36
#define IS_FILES_EXTRA 37
-int store_create_info(THD *thd, TABLE_LIST *table_list, String *packet,
- HA_CREATE_INFO *create_info_arg, bool show_database,
- bool create_or_replace);
-int view_store_create_info(THD *thd, TABLE_LIST *table, String *buff);
+typedef enum { WITHOUT_DB_NAME, WITH_DB_NAME } enum_with_db_name;
+int show_create_table(THD *thd, TABLE_LIST *table_list, String *packet,
+ HA_CREATE_INFO *create_info_arg,
+ enum_with_db_name with_db_name);
int copy_event_to_schema_table(THD *thd, TABLE *sch_table, TABLE *event_table);
diff --git a/sql/sql_statistics.cc b/sql/sql_statistics.cc
index 67e7a9c304b..9acd3d98322 100644
--- a/sql/sql_statistics.cc
+++ b/sql/sql_statistics.cc
@@ -184,7 +184,7 @@ private:
public:
inline void init(THD *thd, Field * table_field);
- inline void add(ha_rows rowno);
+ inline bool add(ha_rows rowno);
inline void finish(ha_rows rows);
inline void cleanup();
};
@@ -1550,6 +1550,7 @@ public:
uint key_parts= table->actual_n_key_parts(key_info);
empty= TRUE;
prefixes= 0;
+ LINT_INIT(calc_state);
is_single_comp_pk= FALSE;
uint pk= table->s->primary_key;
@@ -2218,9 +2219,10 @@ void Column_statistics_collected::init(THD *thd, Field *table_field)
*/
inline
-void Column_statistics_collected::add(ha_rows rowno)
+bool Column_statistics_collected::add(ha_rows rowno)
{
+ bool err= 0;
if (column->is_null())
nulls++;
else
@@ -2231,8 +2233,9 @@ void Column_statistics_collected::add(ha_rows rowno)
if (max_value && column->update_max(max_value, rowno == nulls))
set_not_null(COLUMN_STAT_MAX_VALUE);
if (count_distinct)
- count_distinct->add();
+ err= count_distinct->add();
}
+ return err;
}
@@ -2486,8 +2489,11 @@ int collect_statistics_for_table(THD *thd, TABLE *table)
table_field= *field_ptr;
if (!bitmap_is_set(table->read_set, table_field->field_index))
continue;
- table_field->collected_stats->add(rows);
+ if ((rc= table_field->collected_stats->add(rows)))
+ break;
}
+ if (rc)
+ break;
rows++;
}
file->ha_rnd_end();
@@ -2517,7 +2523,7 @@ int collect_statistics_for_table(THD *thd, TABLE *table)
else
table_field->collected_stats->cleanup();
}
-bitmap_clear_all(table->write_set);
+ bitmap_clear_all(table->write_set);
if (!rc)
{
diff --git a/sql/sql_statistics.h b/sql/sql_statistics.h
index 331e3559203..c399951b828 100644
--- a/sql/sql_statistics.h
+++ b/sql/sql_statistics.h
@@ -147,7 +147,7 @@ private:
case SINGLE_PREC_HB:
return (uint) (((uint8 *) values)[i]);
case DOUBLE_PREC_HB:
- return (uint) (((uint16 *) values)[i]);
+ return (uint) uint2korr(values + i * 2);
}
return 0;
}
@@ -214,7 +214,7 @@ public:
((uint8 *) values)[i]= (uint8) (val * prec_factor());
return;
case DOUBLE_PREC_HB:
- ((uint16 *) values)[i]= (uint16) (val * prec_factor());
+ int2store(values + i * 2, val * prec_factor());
return;
}
}
@@ -226,7 +226,7 @@ public:
((uint8 *) values)[i]= ((uint8 *) values)[i-1];
return;
case DOUBLE_PREC_HB:
- ((uint16 *) values)[i]= ((uint16 *) values)[i-1];
+ int2store(values + i * 2, uint2korr(values + i * 2 - 2));
return;
}
}
diff --git a/sql/sql_string.cc b/sql/sql_string.cc
index f8348cfb30e..a7bfa6c1455 100644
--- a/sql/sql_string.cc
+++ b/sql/sql_string.cc
@@ -580,7 +580,7 @@ bool String::append_with_prefill(const char *s,uint32 arg_length,
return FALSE;
}
-uint32 String::numchars()
+uint32 String::numchars() const
{
return str_charset->cset->numchars(str_charset, Ptr, Ptr+str_length);
}
@@ -1022,8 +1022,15 @@ well_formed_copy_nchars(CHARSET_INFO *to_cs,
wc= '?';
}
else
- break; // Not enough characters
-
+ {
+ if ((uchar *) from >= from_end)
+ break; // End of line
+ // Incomplete byte sequence
+ if (!*well_formed_error_pos)
+ *well_formed_error_pos= from;
+ from++;
+ wc= '?';
+ }
outp:
if ((cnvres= (*wc_mb)(to_cs, wc, (uchar*) to, to_end)) > 0)
to+= cnvres;
@@ -1074,7 +1081,7 @@ bool String::append_for_single_quote(const char *st, uint len)
return 0;
}
-void String::print(String *str)
+void String::print(String *str) const
{
str->append_for_single_quote(Ptr, str_length);
}
diff --git a/sql/sql_string.h b/sql/sql_string.h
index 0b7e949392d..f6f0344e2f1 100644
--- a/sql/sql_string.h
+++ b/sql/sql_string.h
@@ -353,6 +353,10 @@ public:
bool set_or_copy_aligned(const char *s, uint32 arg_length, CHARSET_INFO *cs);
bool copy(const char*s,uint32 arg_length, CHARSET_INFO *csfrom,
CHARSET_INFO *csto, uint *errors);
+ bool copy(const String *str, CHARSET_INFO *tocs, uint *errors)
+ {
+ return copy(str->ptr(), str->length(), str->charset(), tocs, errors);
+ }
void move(String &s)
{
free();
@@ -409,7 +413,7 @@ public:
friend int stringcmp(const String *a,const String *b);
friend String *copy_if_not_alloced(String *a,String *b,uint32 arg_length);
friend class Field;
- uint32 numchars();
+ uint32 numchars() const;
int charpos(longlong i,uint32 offset=0);
int reserve(uint32 space_needed)
@@ -500,7 +504,7 @@ public:
str_length+= arg_length;
return FALSE;
}
- void print(String *print);
+ void print(String *print) const;
bool append_for_single_quote(const char *st, uint len);
bool append_for_single_quote(const String *s)
@@ -519,6 +523,12 @@ public:
{
return (s->alloced && Ptr >= s->Ptr && Ptr < s->Ptr + s->str_length);
}
+ uint well_formed_length() const
+ {
+ int dummy_error;
+ return charset()->cset->well_formed_len(charset(), ptr(), ptr() + length(),
+ length(), &dummy_error);
+ }
bool is_ascii() const
{
if (length() == 0)
@@ -532,6 +542,15 @@ public:
}
return TRUE;
}
+ bool bin_eq(const String *other) const
+ {
+ return length() == other->length() &&
+ !memcmp(ptr(), other->ptr(), length());
+ }
+ bool eq(const String *other, CHARSET_INFO *cs) const
+ {
+ return !sortcmp(this, other, cs);
+ }
};
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index 68c25438f0c..b991215d30a 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -1,6 +1,6 @@
/*
Copyright (c) 2000, 2014, Oracle and/or its affiliates.
- Copyright (c) 2010, 2014, Monty Program Ab.
+ Copyright (c) 2010, 2014, SkySQL Ab.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -75,6 +75,7 @@ static bool prepare_blob_field(THD *thd, Create_field *sql_field);
static bool check_engine(THD *, const char *, const char *, HA_CREATE_INFO *);
static int mysql_prepare_create_table(THD *, HA_CREATE_INFO *, Alter_info *,
uint *, handler *, KEY **, uint *, int);
+static uint blob_length_by_type(enum_field_types type);
/**
@brief Helper function for explain_filename
@@ -3791,7 +3792,6 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
CHARSET_INFO *ft_key_charset=0; // for FULLTEXT
for (uint column_nr=0 ; (column=cols++) ; column_nr++)
{
- uint length;
Key_part_spec *dup_column;
it.rewind();
@@ -3869,7 +3869,7 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
}
if (f_is_geom(sql_field->pack_flag) && sql_field->geom_type ==
Field::GEOM_POINT)
- column->length= 25;
+ column->length= MAX_LEN_GEOM_POINT_FIELD;
if (!column->length)
{
my_error(ER_BLOB_KEY_WITHOUT_LENGTH, MYF(0), column->field_name.str);
@@ -3935,30 +3935,31 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
key_part_info->fieldnr= field;
key_part_info->offset= (uint16) sql_field->offset;
key_part_info->key_type=sql_field->pack_flag;
- length= sql_field->key_length;
+ uint key_part_length= sql_field->key_length;
if (column->length)
{
if (f_is_blob(sql_field->pack_flag))
{
- if ((length=column->length) > max_key_length ||
- length > file->max_key_part_length())
+ key_part_length= MY_MIN(column->length,
+ blob_length_by_type(sql_field->sql_type)
+ * sql_field->charset->mbmaxlen);
+ if (key_part_length > max_key_length ||
+ key_part_length > file->max_key_part_length())
{
- length=MY_MIN(max_key_length, file->max_key_part_length());
+ key_part_length= MY_MIN(max_key_length, file->max_key_part_length());
if (key->type == Key::MULTIPLE)
{
/* not a critical problem */
- char warn_buff[MYSQL_ERRMSG_SIZE];
- my_snprintf(warn_buff, sizeof(warn_buff), ER(ER_TOO_LONG_KEY),
- length);
- push_warning(thd, Sql_condition::WARN_LEVEL_WARN,
- ER_TOO_LONG_KEY, warn_buff);
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
+ ER_TOO_LONG_KEY, ER(ER_TOO_LONG_KEY),
+ key_part_length);
/* Align key length to multibyte char boundary */
- length-= length % sql_field->charset->mbmaxlen;
+ key_part_length-= key_part_length % sql_field->charset->mbmaxlen;
}
else
{
- my_error(ER_TOO_LONG_KEY,MYF(0),length);
+ my_error(ER_TOO_LONG_KEY, MYF(0), key_part_length);
DBUG_RETURN(TRUE);
}
}
@@ -3966,9 +3967,9 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
// Catch invalid use of partial keys
else if (!f_is_geom(sql_field->pack_flag) &&
// is the key partial?
- column->length != length &&
+ column->length != key_part_length &&
// is prefix length bigger than field length?
- (column->length > length ||
+ (column->length > key_part_length ||
// can the field have a partial key?
!Field::type_can_have_key_part (sql_field->sql_type) ||
// a packed field can't be used in a partial key
@@ -3977,44 +3978,43 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
((file->ha_table_flags() & HA_NO_PREFIX_CHAR_KEYS) &&
// and is this a 'unique' key?
(key_info->flags & HA_NOSAME))))
- {
+ {
my_message(ER_WRONG_SUB_KEY, ER(ER_WRONG_SUB_KEY), MYF(0));
DBUG_RETURN(TRUE);
}
else if (!(file->ha_table_flags() & HA_NO_PREFIX_CHAR_KEYS))
- length=column->length;
+ key_part_length= column->length;
}
- else if (length == 0 && (sql_field->flags & NOT_NULL_FLAG))
+ else if (key_part_length == 0 && (sql_field->flags & NOT_NULL_FLAG))
{
my_error(ER_WRONG_KEY_COLUMN, MYF(0), file->table_type(),
column->field_name.str);
DBUG_RETURN(TRUE);
}
- if (length > file->max_key_part_length() && key->type != Key::FULLTEXT)
+ if (key_part_length > file->max_key_part_length() &&
+ key->type != Key::FULLTEXT)
{
- length= file->max_key_part_length();
+ key_part_length= file->max_key_part_length();
if (key->type == Key::MULTIPLE)
{
/* not a critical problem */
- char warn_buff[MYSQL_ERRMSG_SIZE];
- my_snprintf(warn_buff, sizeof(warn_buff), ER(ER_TOO_LONG_KEY),
- length);
- push_warning(thd, Sql_condition::WARN_LEVEL_WARN,
- ER_TOO_LONG_KEY, warn_buff);
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
+ ER_TOO_LONG_KEY, ER(ER_TOO_LONG_KEY),
+ key_part_length);
/* Align key length to multibyte char boundary */
- length-= length % sql_field->charset->mbmaxlen;
+ key_part_length-= key_part_length % sql_field->charset->mbmaxlen;
}
else
{
- my_error(ER_TOO_LONG_KEY,MYF(0),length);
+ my_error(ER_TOO_LONG_KEY, MYF(0), key_part_length);
DBUG_RETURN(TRUE);
}
}
- key_part_info->length=(uint16) length;
+ key_part_info->length= (uint16) key_part_length;
/* Use packed keys for long strings on the first column */
if (!((*db_options) & HA_OPTION_NO_PACK_KEYS) &&
!((create_info->table_options & HA_OPTION_NO_PACK_KEYS)) &&
- (length >= KEY_DEFAULT_PACK_LENGTH &&
+ (key_part_length >= KEY_DEFAULT_PACK_LENGTH &&
(sql_field->sql_type == MYSQL_TYPE_STRING ||
sql_field->sql_type == MYSQL_TYPE_VARCHAR ||
sql_field->pack_flag & FIELDFLAG_BLOB)))
@@ -4026,10 +4026,10 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
key_info->flags|= HA_PACK_KEY;
}
/* Check if the key segment is partial, set the key flag accordingly */
- if (length != sql_field->key_length)
+ if (key_part_length != sql_field->key_length)
key_info->flags|= HA_KEY_HAS_PART_KEY_SEG;
- key_length+=length;
+ key_length+= key_part_length;
key_part_info++;
/* Create the key name based on the first column (if not given) */
@@ -4351,9 +4351,6 @@ handler *mysql_create_frm_image(THD *thd,
DBUG_RETURN(NULL);
}
- if (check_engine(thd, db, table_name, create_info))
- DBUG_RETURN(NULL);
-
set_table_default_charset(thd, create_info, (char*) db);
db_options= create_info->table_options;
@@ -4759,6 +4756,9 @@ int create_table_impl(THD *thd,
THD_STAGE_INFO(thd, stage_creating_table);
+ if (check_engine(thd, orig_db, orig_table_name, create_info))
+ goto err;
+
if (create_table_mode == C_ASSISTED_DISCOVERY)
{
/* check that it's used correctly */
@@ -4950,7 +4950,7 @@ bool mysql_create_table(THD *thd, TABLE_LIST *create_table,
const char *db= create_table->db;
const char *table_name= create_table->table_name;
bool is_trans= FALSE;
- bool result= 0;
+ bool result;
int create_table_mode;
TABLE_LIST *pos_in_locked_tables= 0;
MDL_ticket *mdl_ticket= 0;
@@ -4958,8 +4958,16 @@ bool mysql_create_table(THD *thd, TABLE_LIST *create_table,
DBUG_ASSERT(create_table == thd->lex->query_tables);
+ /* Copy temporarily the statement flags to thd for lock_table_names() */
+ uint save_thd_create_info_options= thd->lex->create_info.options;
+ thd->lex->create_info.options|= create_info->options;
+
/* Open or obtain an exclusive metadata lock on table being created */
- if (open_and_lock_tables(thd, create_table, FALSE, 0))
+ result= open_and_lock_tables(thd, create_table, FALSE, 0);
+
+ thd->lex->create_info.options= save_thd_create_info_options;
+
+ if (result)
{
/* is_error() may be 0 if table existed and we generated a warning */
DBUG_RETURN(thd->is_error());
@@ -5000,7 +5008,10 @@ bool mysql_create_table(THD *thd, TABLE_LIST *create_table,
*/
thd->locked_tables_list.add_back_last_deleted_lock(pos_in_locked_tables);
if (thd->locked_tables_list.reopen_tables(thd))
+ {
thd->locked_tables_list.unlink_all_closed_tables(thd, NULL, 0);
+ result= 1;
+ }
else
{
TABLE *table= pos_in_locked_tables->table;
@@ -5260,8 +5271,16 @@ bool mysql_create_like_table(THD* thd, TABLE_LIST* table,
Thus by holding both these locks we ensure that our statement is
properly isolated from all concurrent operations which matter.
*/
- if (open_tables(thd, &thd->lex->query_tables, &not_used, 0))
+
+ /* Copy temporarily the statement flags to thd for lock_table_names() */
+ uint save_thd_create_info_options= thd->lex->create_info.options;
+ thd->lex->create_info.options|= create_info->options;
+ res= open_tables(thd, &thd->lex->query_tables, &not_used, 0);
+ thd->lex->create_info.options= save_thd_create_info_options;
+
+ if (res)
{
+ /* is_error() may be 0 if table existed and we generated a warning */
res= thd->is_error();
goto err;
}
@@ -5344,7 +5363,10 @@ bool mysql_create_like_table(THD* thd, TABLE_LIST* table,
*/
thd->locked_tables_list.add_back_last_deleted_lock(pos_in_locked_tables);
if (thd->locked_tables_list.reopen_tables(thd))
+ {
thd->locked_tables_list.unlink_all_closed_tables(thd, NULL, 0);
+ res= 1; // We got an error
+ }
else
{
/*
@@ -5419,7 +5441,7 @@ bool mysql_create_like_table(THD* thd, TABLE_LIST* table,
table->open_strategy= TABLE_LIST::OPEN_NORMAL;
/*
- In order for store_create_info() to work we need to open
+ In order for show_create_table() to work we need to open
destination table if it is not already open (i.e. if it
has not existed before). We don't need acquire metadata
lock in order to do this as we already hold exclusive
@@ -5443,13 +5465,9 @@ bool mysql_create_like_table(THD* thd, TABLE_LIST* table,
if (!table->view)
{
int result __attribute__((unused))=
- store_create_info(thd, table, &query,
- create_info, FALSE /* show_database */,
- MY_TEST(create_info->org_options &
- HA_LEX_CREATE_REPLACE) ||
- create_info->table_was_deleted);
+ show_create_table(thd, table, &query, create_info, WITHOUT_DB_NAME);
- DBUG_ASSERT(result == 0); // store_create_info() always return 0
+ DBUG_ASSERT(result == 0); // show_create_table() always return 0
do_logging= FALSE;
if (write_bin_log(thd, TRUE, query.ptr(), query.length()))
{
diff --git a/sql/sql_table.h b/sql/sql_table.h
index 6a7fddb96ab..c3e903aa505 100644
--- a/sql/sql_table.h
+++ b/sql/sql_table.h
@@ -117,6 +117,9 @@ enum enum_explain_filename_mode
EXPLAIN_PARTITIONS_AS_COMMENT
};
+/* Maximum length of GEOM_POINT Field */
+#define MAX_LEN_GEOM_POINT_FIELD 25
+
/* depends on errmsg.txt Database `db`, Table `t` ... */
#define EXPLAIN_FILENAME_MAX_EXTRA_LENGTH 63
diff --git a/sql/sql_test.cc b/sql/sql_test.cc
index ae16a281277..60e9b2cc54c 100644
--- a/sql/sql_test.cc
+++ b/sql/sql_test.cc
@@ -468,8 +468,7 @@ static void display_table_locks(void)
DYNAMIC_ARRAY saved_table_locks;
(void) my_init_dynamic_array(&saved_table_locks,sizeof(TABLE_LOCK_INFO),
- tc_records() + 20, 50,
- MYF(MY_THREAD_SPECIFIC));
+ tc_records() + 20, 50, MYF(0));
mysql_mutex_lock(&THR_LOCK_lock);
for (list= thr_lock_thread_list; list; list= list_rest(list))
{
@@ -576,7 +575,6 @@ void mysql_print_status()
/* Print key cache status */
puts("\nKey caches:");
process_key_caches(print_key_cache_status, 0);
- mysql_mutex_lock(&LOCK_status);
printf("\nhandler status:\n\
read_key: %10lu\n\
read_next: %10lu\n\
@@ -592,7 +590,6 @@ update: %10lu\n",
tmp.ha_write_count,
tmp.ha_delete_count,
tmp.ha_update_count);
- mysql_mutex_unlock(&LOCK_status);
printf("\nTable status:\n\
Opened tables: %10lu\n\
Open tables: %10lu\n\
diff --git a/sql/sql_union.cc b/sql/sql_union.cc
index 9d068e464f5..fe8bb7a6620 100644
--- a/sql/sql_union.cc
+++ b/sql/sql_union.cc
@@ -1,4 +1,5 @@
-/* Copyright (c) 2000, 2011, Oracle and/or its affiliates.
+/* Copyright (c) 2000, 2014, Oracle and/or its affiliates.
+ Copyright (c) 2010, 2014, SkySQL Ab.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
diff --git a/sql/sql_update.cc b/sql/sql_update.cc
index aa290c91569..fa5b6968795 100644
--- a/sql/sql_update.cc
+++ b/sql/sql_update.cc
@@ -1446,11 +1446,15 @@ int mysql_multi_update_prepare(THD *thd)
another table instance used by this statement which is going to
be write-locked (for example, trigger to be invoked might try
to update this table).
+ Last argument routine_modifies_data for read_lock_type_for_table()
+ is ignored, as prelocking placeholder will never be set here.
*/
+ DBUG_ASSERT(tl->prelocking_placeholder == false);
+ thr_lock_type lock_type= read_lock_type_for_table(thd, lex, tl, true);
if (using_lock_tables)
- tl->lock_type= read_lock_type_for_table(thd, lex, tl);
+ tl->lock_type= lock_type;
else
- tl->set_lock_type(thd, read_lock_type_for_table(thd, lex, tl));
+ tl->set_lock_type(thd, lock_type);
tl->updating= 0;
}
}
diff --git a/sql/sql_view.cc b/sql/sql_view.cc
index a18193c6eb6..07169f299d7 100644
--- a/sql/sql_view.cc
+++ b/sql/sql_view.cc
@@ -400,9 +400,7 @@ bool mysql_create_view(THD *thd, TABLE_LIST *views,
TABLE_LIST *tables= lex->query_tables;
TABLE_LIST *tbl;
SELECT_LEX *select_lex= &lex->select_lex;
-#ifndef NO_EMBEDDED_ACCESS_CHECKS
SELECT_LEX *sl;
-#endif
SELECT_LEX_UNIT *unit= &lex->unit;
bool res= FALSE;
DBUG_ENTER("mysql_create_view");
@@ -547,7 +545,8 @@ bool mysql_create_view(THD *thd, TABLE_LIST *views,
}
/* Check if the auto generated column names are conforming. */
- make_valid_column_names(select_lex->item_list);
+ for (sl= select_lex; sl; sl= sl->next_select())
+ make_valid_column_names(sl->item_list);
if (check_duplicate_names(select_lex->item_list, 1))
{
@@ -624,7 +623,7 @@ bool mysql_create_view(THD *thd, TABLE_LIST *views,
if (!res)
tdc_remove_table(thd, TDC_RT_REMOVE_ALL, view->db, view->table_name, false);
- if (mysql_bin_log.is_open())
+ if (!res && mysql_bin_log.is_open())
{
String buff;
const LEX_STRING command[3]=
diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
index e7fcdfbe596..41852e36b9b 100644
--- a/sql/sql_yacc.yy
+++ b/sql/sql_yacc.yy
@@ -77,7 +77,7 @@ int yylex(void *yylval, void *yythd);
ulong val= *(F); \
if (my_yyoverflow((B), (D), &val)) \
{ \
- yyerror(current_thd, (char*) (A)); \
+ yyerror(thd, (char*) (A)); \
return 2; \
} \
else \
@@ -1606,7 +1606,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%type <lex_str>
IDENT IDENT_QUOTED TEXT_STRING DECIMAL_NUM FLOAT_NUM NUM LONG_NUM
- HEX_NUM HEX_STRING hex_num_or_string
+ HEX_NUM HEX_STRING
LEX_HOSTNAME ULONGLONG_NUM field_ident select_alias ident ident_or_text
IDENT_sys TEXT_STRING_sys TEXT_STRING_literal
NCHAR_STRING opt_component key_cache_name
@@ -1625,7 +1625,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
wild_and_where
%type <string>
- text_string opt_gconcat_separator
+ text_string hex_or_bin_String opt_gconcat_separator
%type <num>
type type_with_opt_collate int_type real_type order_dir lock_option
@@ -6278,7 +6278,8 @@ spatial_type:
| GEOMETRYCOLLECTION { $$= Field::GEOM_GEOMETRYCOLLECTION; }
| POINT_SYM
{
- Lex->length= (char*)"25";
+ Lex->length= const_cast<char*>(STRINGIFY_ARG
+ (MAX_LEN_GEOM_POINT_FIELD));
$$= Field::GEOM_POINT;
}
| MULTIPOINT { $$= Field::GEOM_MULTIPOINT; }
@@ -6498,11 +6499,6 @@ now_or_signed_literal:
{ $$=$1; }
;
-hex_num_or_string:
- HEX_NUM {}
- | HEX_STRING {}
- ;
-
charset:
CHAR_SYM SET {}
| CHARSET {}
@@ -9183,7 +9179,6 @@ simple_expr:
}
| '{' ident expr '}'
{
- Item_string *item;
$$= NULL;
/*
If "expr" is reasonably short pure ASCII string literal,
@@ -9193,31 +9188,13 @@ simple_expr:
SELECT {t'10:20:30'};
SELECT {ts'2001-01-01 10:20:30'};
*/
- if ($3->type() == Item::STRING_ITEM &&
- (item= (Item_string *) $3) &&
- item->collation.repertoire == MY_REPERTOIRE_ASCII &&
- item->str_value.length() < MAX_DATE_STRING_REP_LENGTH * 4)
- {
- enum_field_types type= MYSQL_TYPE_STRING;
- LEX_STRING *ls= &$2;
- if (ls->length == 1)
- {
- if (ls->str[0] == 'd') /* {d'2001-01-01'} */
- type= MYSQL_TYPE_DATE;
- else if (ls->str[0] == 't') /* {t'10:20:30'} */
- type= MYSQL_TYPE_TIME;
- }
- else if (ls->length == 2) /* {ts'2001-01-01 10:20:30'} */
- {
- if (ls->str[0] == 't' && ls->str[1] == 's')
- type= MYSQL_TYPE_DATETIME;
- }
+ if ($3->type() == Item::STRING_ITEM)
+ {
+ Item_string *item= (Item_string *) $3;
+ enum_field_types type= item->odbc_temporal_literal_type(&$2);
if (type != MYSQL_TYPE_STRING)
{
- $$= create_temporal_literal(thd,
- item->str_value.ptr(),
- item->str_value.length(),
- item->str_value.charset(),
+ $$= create_temporal_literal(thd, item->val_str(NULL),
type, false);
}
}
@@ -11136,8 +11113,8 @@ opt_escape:
{
Lex->escape_used= FALSE;
$$= ((thd->variables.sql_mode & MODE_NO_BACKSLASH_ESCAPES) ?
- new (thd->mem_root) Item_string("", 0, &my_charset_latin1) :
- new (thd->mem_root) Item_string("\\", 1, &my_charset_latin1));
+ new (thd->mem_root) Item_string_ascii("", 0) :
+ new (thd->mem_root) Item_string_ascii("\\", 1));
if ($$ == NULL)
MYSQL_YYABORT;
}
@@ -13253,14 +13230,10 @@ text_literal:
}
| UNDERSCORE_CHARSET TEXT_STRING
{
- Item_string *str= new (thd->mem_root) Item_string($2.str,
+ $$= new (thd->mem_root) Item_string_with_introducer($2.str,
$2.length, $1);
- if (str == NULL)
+ if ($$ == NULL)
MYSQL_YYABORT;
- str->set_repertoire_from_value();
- str->set_cs_specified(TRUE);
-
- $$= str;
}
| text_literal TEXT_STRING_literal
{
@@ -13289,7 +13262,12 @@ text_string:
if ($$ == NULL)
MYSQL_YYABORT;
}
- | HEX_NUM
+ | hex_or_bin_String { $$= $1; }
+ ;
+
+
+hex_or_bin_String:
+ HEX_NUM
{
Item *tmp= new (thd->mem_root) Item_hex_hybrid($1.str, $1.length);
if (tmp == NULL)
@@ -13396,60 +13374,12 @@ literal:
if ($$ == NULL)
MYSQL_YYABORT;
}
- | UNDERSCORE_CHARSET hex_num_or_string
- {
- Item *tmp= new (thd->mem_root) Item_hex_string($2.str, $2.length);
- if (tmp == NULL)
- MYSQL_YYABORT;
- /*
- it is OK only emulate fix_fieds, because we need only
- value of constant
- */
- tmp->quick_fix_field();
- String *str= tmp->val_str((String*) 0);
-
- Item_string *item_str;
- item_str= new (thd->mem_root)
- Item_string(NULL, /* name will be set in select_item */
- str ? str->ptr() : "",
- str ? str->length() : 0,
- $1);
- if (!item_str ||
- !item_str->check_well_formed_result(&item_str->str_value, TRUE))
- {
- MYSQL_YYABORT;
- }
-
- item_str->set_repertoire_from_value();
- item_str->set_cs_specified(TRUE);
-
- $$= item_str;
- }
- | UNDERSCORE_CHARSET BIN_NUM
+ | UNDERSCORE_CHARSET hex_or_bin_String
{
- Item *tmp= new (thd->mem_root) Item_bin_string($2.str, $2.length);
- if (tmp == NULL)
+ Item_string_with_introducer *item_str;
+ item_str= new (thd->mem_root) Item_string_with_introducer($2, $1);
+ if (!item_str || !item_str->check_well_formed_result(true))
MYSQL_YYABORT;
- /*
- it is OK only emulate fix_fieds, because we need only
- value of constant
- */
- tmp->quick_fix_field();
- String *str= tmp->val_str((String*) 0);
-
- Item_string *item_str;
- item_str= new (thd->mem_root)
- Item_string(NULL, /* name will be set in select_item */
- str ? str->ptr() : "",
- str ? str->length() : 0,
- $1);
- if (!item_str ||
- !item_str->check_well_formed_result(&item_str->str_value, TRUE))
- {
- MYSQL_YYABORT;
- }
-
- item_str->set_cs_specified(TRUE);
$$= item_str;
}
@@ -14873,19 +14803,19 @@ set_expr_or_default:
| DEFAULT { $$=0; }
| ON
{
- $$=new (thd->mem_root) Item_string("ON", 2, system_charset_info);
+ $$=new (thd->mem_root) Item_string_sys("ON", 2);
if ($$ == NULL)
MYSQL_YYABORT;
}
| ALL
{
- $$=new (thd->mem_root) Item_string("ALL", 3, system_charset_info);
+ $$=new (thd->mem_root) Item_string_sys("ALL", 3);
if ($$ == NULL)
MYSQL_YYABORT;
}
| BINARY
{
- $$=new (thd->mem_root) Item_string("binary", 6, system_charset_info);
+ $$=new (thd->mem_root) Item_string_sys("binary", 6);
if ($$ == NULL)
MYSQL_YYABORT;
}
diff --git a/sql/sys_vars.cc b/sql/sys_vars.cc
index c8b589e0fd6..6252e89b199 100644
--- a/sql/sys_vars.cc
+++ b/sql/sys_vars.cc
@@ -1,4 +1,4 @@
-/* Copyright (c) 2002, 2013, Oracle and/or its affiliates.
+/* Copyright (c) 2002, 2014, Oracle and/or its affiliates.
Copyright (c) 2012, 2014, SkySQL Ab.
This program is free software; you can redistribute it and/or modify
@@ -1074,6 +1074,17 @@ static Sys_var_keycache Sys_key_cache_age_threshold(
BLOCK_SIZE(100), NO_MUTEX_GUARD, NOT_IN_BINLOG, ON_CHECK(0),
ON_UPDATE(change_keycache_param));
+static Sys_var_keycache Sys_key_cache_file_hash_size(
+ "key_cache_file_hash_size",
+ "Number of hash buckets for open and changed files. If you have a lot of MyISAM "
+ "files open you should increase this for faster flush of changes. A good "
+ "value is probably 1/10 of number of possible open MyISAM files.",
+ KEYCACHE_VAR(changed_blocks_hash_size),
+ CMD_LINE(REQUIRED_ARG, OPT_KEY_CACHE_CHANGED_BLOCKS_HASH_SIZE),
+ VALID_RANGE(128, 16384), DEFAULT(512),
+ BLOCK_SIZE(1), NO_MUTEX_GUARD, NOT_IN_BINLOG, ON_CHECK(0),
+ ON_UPDATE(resize_keycache));
+
static Sys_var_mybool Sys_large_files_support(
"large_files_support",
"Whether mysqld was compiled with options for large file support",
@@ -3231,9 +3242,10 @@ static Sys_var_ulonglong Sys_tmp_table_size(
static Sys_var_mybool Sys_timed_mutexes(
"timed_mutexes",
- "Specify whether to time mutexes (only InnoDB mutexes are currently "
- "supported)",
- GLOBAL_VAR(timed_mutexes), CMD_LINE(OPT_ARG), DEFAULT(0));
+ "Specify whether to time mutexes. Deprecated, has no effect.",
+ GLOBAL_VAR(timed_mutexes), CMD_LINE(OPT_ARG), DEFAULT(0),
+ NO_MUTEX_GUARD, NOT_IN_BINLOG, ON_CHECK(NULL), ON_UPDATE(NULL),
+ DEPRECATED(""));
static char *server_version_ptr;
static Sys_var_charptr Sys_version(
@@ -4241,11 +4253,11 @@ static Sys_var_uint Sys_slave_net_timeout(
Return 0 + warning if it doesn't exist
*/
-uint Sys_var_multi_source_ulong::
-get_master_info_uint_value(THD *thd, ptrdiff_t offset)
+ulonglong Sys_var_multi_source_ulonglong::
+get_master_info_ulonglong_value(THD *thd, ptrdiff_t offset)
{
Master_info *mi;
- uint res= 0; // Default value
+ ulonglong res= 0; // Default value
mysql_mutex_unlock(&LOCK_global_system_variables);
mysql_mutex_lock(&LOCK_active_mi);
mi= master_info_index->
@@ -4254,7 +4266,7 @@ get_master_info_uint_value(THD *thd, ptrdiff_t offset)
if (mi)
{
mysql_mutex_lock(&mi->rli.data_lock);
- res= *((uint*) (((uchar*) mi) + master_info_offset));
+ res= *((ulonglong*) (((uchar*) mi) + master_info_offset));
mysql_mutex_unlock(&mi->rli.data_lock);
}
mysql_mutex_unlock(&LOCK_active_mi);
@@ -4266,7 +4278,7 @@ get_master_info_uint_value(THD *thd, ptrdiff_t offset)
bool update_multi_source_variable(sys_var *self_var, THD *thd,
enum_var_type type)
{
- Sys_var_multi_source_ulong *self= (Sys_var_multi_source_ulong*) self_var;
+ Sys_var_multi_source_ulonglong *self= (Sys_var_multi_source_ulonglong*) self_var;
bool result= true;
Master_info *mi;
@@ -4292,11 +4304,6 @@ bool update_multi_source_variable(sys_var *self_var, THD *thd,
static bool update_slave_skip_counter(sys_var *self, THD *thd, Master_info *mi)
{
- if (mi->using_gtid != Master_info::USE_GTID_NO)
- {
- my_error(ER_SLAVE_SKIP_NOT_IN_GTID, MYF(0));
- return true;
- }
if (mi->rli.slave_running)
{
my_error(ER_SLAVE_MUST_STOP, MYF(0), mi->connection_name.length,
@@ -4308,16 +4315,12 @@ static bool update_slave_skip_counter(sys_var *self, THD *thd, Master_info *mi)
return false;
}
-
-static Sys_var_multi_source_ulong
-Sys_slave_skip_counter("sql_slave_skip_counter",
- "Skip the next N events from the master log",
- SESSION_VAR(slave_skip_counter),
- NO_CMD_LINE,
- my_offsetof(Master_info, rli.slave_skip_counter),
- VALID_RANGE(0, UINT_MAX), DEFAULT(0), BLOCK_SIZE(1),
- ON_UPDATE(update_slave_skip_counter));
-
+static Sys_var_multi_source_ulonglong Sys_slave_skip_counter(
+ "sql_slave_skip_counter", "Skip the next N events from the master log",
+ SESSION_VAR(slave_skip_counter), NO_CMD_LINE,
+ MASTER_INFO_VAR(rli.slave_skip_counter),
+ VALID_RANGE(0, UINT_MAX), DEFAULT(0), BLOCK_SIZE(1),
+ ON_UPDATE(update_slave_skip_counter));
static bool update_max_relay_log_size(sys_var *self, THD *thd, Master_info *mi)
{
@@ -4326,17 +4329,14 @@ static bool update_max_relay_log_size(sys_var *self, THD *thd, Master_info *mi)
return false;
}
-static Sys_var_multi_source_ulong
-Sys_max_relay_log_size( "max_relay_log_size",
- "relay log will be rotated automatically when the "
- "size exceeds this value. If 0 at startup, it's "
- "set to max_binlog_size",
- SESSION_VAR(max_relay_log_size),
- CMD_LINE(REQUIRED_ARG),
- my_offsetof(Master_info, rli.max_relay_log_size),
- VALID_RANGE(0, 1024L*1024*1024), DEFAULT(0),
- BLOCK_SIZE(IO_SIZE),
- ON_UPDATE(update_max_relay_log_size));
+static Sys_var_multi_source_ulonglong Sys_max_relay_log_size(
+ "max_relay_log_size",
+ "relay log will be rotated automatically when the size exceeds this "
+ "value. If 0 at startup, it's set to max_binlog_size",
+ SESSION_VAR(max_relay_log_size), CMD_LINE(REQUIRED_ARG),
+ MASTER_INFO_VAR(rli.max_relay_log_size),
+ VALID_RANGE(0, 1024L*1024*1024), DEFAULT(0), BLOCK_SIZE(IO_SIZE),
+ ON_UPDATE(update_max_relay_log_size));
static Sys_var_charptr Sys_slave_skip_errors(
"slave_skip_errors", "Tells the slave thread to continue "
diff --git a/sql/sys_vars.h b/sql/sys_vars.h
index fa997416cbd..da93b765d0f 100644
--- a/sql/sys_vars.h
+++ b/sql/sys_vars.h
@@ -1985,7 +1985,8 @@ public:
like sql_slave_skip_counter are GLOBAL.
*/
-class Sys_var_multi_source_ulong;
+#define MASTER_INFO_VAR(X) my_offsetof(Master_info, X), sizeof(((Master_info *)0x10)->X)
+class Sys_var_multi_source_ulonglong;
class Master_info;
typedef bool (*on_multi_source_update_function)(sys_var *self, THD *thd,
@@ -1994,31 +1995,27 @@ bool update_multi_source_variable(sys_var *self,
THD *thd, enum_var_type type);
-class Sys_var_multi_source_ulong :public Sys_var_ulong
+class Sys_var_multi_source_ulonglong :public Sys_var_ulonglong
{
ptrdiff_t master_info_offset;
on_multi_source_update_function update_multi_source_variable_func;
public:
- Sys_var_multi_source_ulong(const char *name_arg,
+ Sys_var_multi_source_ulonglong(const char *name_arg,
const char *comment, int flag_args,
ptrdiff_t off, size_t size,
CMD_LINE getopt,
ptrdiff_t master_info_offset_arg,
- uint min_val, uint max_val, uint def_val,
- uint block_size,
+ size_t master_info_arg_size,
+ ulonglong min_val, ulonglong max_val,
+ ulonglong def_val, uint block_size,
on_multi_source_update_function on_update_func)
- :Sys_var_ulong(name_arg, comment, flag_args, off, size,
- getopt, min_val, max_val, def_val, block_size,
- 0, VARIABLE_NOT_IN_BINLOG, 0, update_multi_source_variable),
+ :Sys_var_ulonglong(name_arg, comment, flag_args, off, size,
+ getopt, min_val, max_val, def_val, block_size,
+ 0, VARIABLE_NOT_IN_BINLOG, 0, update_multi_source_variable),
master_info_offset(master_info_offset_arg),
update_multi_source_variable_func(on_update_func)
{
- }
- bool session_update(THD *thd, set_var *var)
- {
- session_var(thd, uint)= (uint) (var->save_result.ulonglong_value);
- /* Value should be moved to multi_master in on_update_func */
- return false;
+ SYSVAR_ASSERT(master_info_arg_size == size);
}
bool global_update(THD *thd, set_var *var)
{
@@ -2031,9 +2028,9 @@ public:
}
uchar *session_value_ptr(THD *thd, const LEX_STRING *base)
{
- uint *tmp, res;
- tmp= (uint*) (((uchar*)&(thd->variables)) + offset);
- res= get_master_info_uint_value(thd, master_info_offset);
+ ulonglong *tmp, res;
+ tmp= (ulonglong*) (((uchar*)&(thd->variables)) + offset);
+ res= get_master_info_ulonglong_value(thd, master_info_offset);
*tmp= res;
return (uchar*) tmp;
}
@@ -2041,7 +2038,7 @@ public:
{
return session_value_ptr(thd, base);
}
- uint get_master_info_uint_value(THD *thd, ptrdiff_t offset);
+ ulonglong get_master_info_ulonglong_value(THD *thd, ptrdiff_t offset);
bool update_variable(THD *thd, Master_info *mi)
{
return update_multi_source_variable_func(this, thd, mi);
diff --git a/sql/table.cc b/sql/table.cc
index 4f642cadaa2..6ac45445136 100644
--- a/sql/table.cc
+++ b/sql/table.cc
@@ -1774,13 +1774,25 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
key_part= keyinfo->key_part;
for (i=0 ; i < keyinfo->user_defined_key_parts ;i++)
{
- uint fieldnr= key_part[i].fieldnr;
- if (!fieldnr ||
- share->field[fieldnr-1]->null_ptr ||
- share->field[fieldnr-1]->key_length() !=
- key_part[i].length)
+ DBUG_ASSERT(key_part[i].fieldnr > 0);
+ // Table field corresponding to the i'th key part.
+ Field *table_field= share->field[key_part[i].fieldnr - 1];
+
+ /*
+ If the key column is of NOT NULL BLOB type, then it
+ will definitly have key prefix. And if key part prefix size
+ is equal to the BLOB column max size, then we can promote
+ it to primary key.
+ */
+ if (!table_field->real_maybe_null() &&
+ table_field->type() == MYSQL_TYPE_BLOB &&
+ table_field->field_length == key_part[i].length)
+ continue;
+
+ if (table_field->real_maybe_null() ||
+ table_field->key_length() != key_part[i].length)
{
- primary_key=MAX_KEY; // Can't be used
+ primary_key= MAX_KEY; // Can't be used
break;
}
}
@@ -4210,7 +4222,8 @@ bool TABLE_LIST::create_field_translation(THD *thd)
while ((item= it++))
{
- transl[field_count].name= item->name;
+ DBUG_ASSERT(item->name && item->name[0]);
+ transl[field_count].name= thd->strdup(item->name);
transl[field_count++].item= item;
}
field_translation= transl;
@@ -6104,6 +6117,52 @@ void TABLE::create_key_part_by_field(KEY *keyinfo,
/**
@brief
+ Check validity of a possible key for the derived table
+
+ @param key the number of the key
+ @param key_parts number of components of the key
+ @param next_field_no the call-back function that returns the number of
+ the field used as the next component of the key
+ @param arg the argument for the above function
+
+ @details
+ The function checks whether a possible key satisfies the constraints
+ imposed on the keys of any temporary table.
+
+ @return TRUE if the key is valid
+ @return FALSE otherwise
+*/
+
+bool TABLE::check_tmp_key(uint key, uint key_parts,
+ uint (*next_field_no) (uchar *), uchar *arg)
+{
+ Field **reg_field;
+ uint i;
+ uint key_len= 0;
+
+ for (i= 0; i < key_parts; i++)
+ {
+ uint fld_idx= next_field_no(arg);
+ reg_field= field + fld_idx;
+ uint fld_store_len= (uint16) (*reg_field)->key_length();
+ if ((*reg_field)->real_maybe_null())
+ fld_store_len+= HA_KEY_NULL_LENGTH;
+ if ((*reg_field)->type() == MYSQL_TYPE_BLOB ||
+ (*reg_field)->real_type() == MYSQL_TYPE_VARCHAR ||
+ (*reg_field)->type() == MYSQL_TYPE_GEOMETRY)
+ fld_store_len+= HA_KEY_BLOB_LENGTH;
+ key_len+= fld_store_len;
+ }
+ /*
+ We use MI_MAX_KEY_LENGTH (myisam's default) below because it is
+ smaller than MAX_KEY_LENGTH (heap's default) and it's unknown whether
+ myisam or heap will be used for the temporary table.
+ */
+ return key_len <= MI_MAX_KEY_LENGTH;
+}
+
+/**
+ @brief
Add one key to a temporary table
@param key the number of the key
@@ -6133,6 +6192,7 @@ bool TABLE::add_tmp_key(uint key, uint key_parts,
KEY* keyinfo;
Field **reg_field;
uint i;
+
bool key_start= TRUE;
KEY_PART_INFO* key_part_info=
(KEY_PART_INFO*) alloc_root(&mem_root, sizeof(KEY_PART_INFO)*key_parts);
diff --git a/sql/table.h b/sql/table.h
index eca35d6c52c..69462539a20 100644
--- a/sql/table.h
+++ b/sql/table.h
@@ -1336,6 +1336,8 @@ public:
{ return !db_stat || m_needs_reopen; }
bool alloc_keys(uint key_count);
+ bool check_tmp_key(uint key, uint key_parts,
+ uint (*next_field_no) (uchar *), uchar *arg);
bool add_tmp_key(uint key, uint key_parts,
uint (*next_field_no) (uchar *), uchar *arg,
bool unique);
diff --git a/sql/table_cache.cc b/sql/table_cache.cc
index 8b768240b4f..097f37d26d8 100644
--- a/sql/table_cache.cc
+++ b/sql/table_cache.cc
@@ -267,7 +267,7 @@ void tc_add_table(THD *thd, TABLE *table)
TABLE_SHARE *purge_share= 0;
TABLE_SHARE *share;
TABLE *entry;
- ulonglong purge_time;
+ ulonglong UNINIT_VAR(purge_time);
TDC_iterator tdc_it;
tdc_it.init();
diff --git a/sql/unireg.cc b/sql/unireg.cc
index 3eb7a8ce5eb..e02420d9468 100644
--- a/sql/unireg.cc
+++ b/sql/unireg.cc
@@ -211,7 +211,13 @@ LEX_CUSTRING build_frm_image(THD *thd, const char *table,
filepos= frm.length;
frm.length+= FRM_FORMINFO_SIZE; // forminfo
frm.length+= packed_fields_length(create_fields);
-
+
+ if (frm.length > FRM_MAX_SIZE)
+ {
+ my_error(ER_TABLE_DEFINITION_TOO_BIG, MYF(0), table);
+ DBUG_RETURN(frm);
+ }
+
frm_ptr= (uchar*) my_malloc(frm.length, MYF(MY_WME | MY_ZEROFILL |
MY_THREAD_SPECIFIC));
if (!frm_ptr)
diff --git a/sql/unireg.h b/sql/unireg.h
index 9b40b7b0779..5f133da674f 100644
--- a/sql/unireg.h
+++ b/sql/unireg.h
@@ -203,7 +203,7 @@ LEX_CUSTRING build_frm_image(THD *thd, const char *table,
#define FRM_HEADER_SIZE 64
#define FRM_FORMINFO_SIZE 288
-#define FRM_MAX_SIZE (256*1024)
+#define FRM_MAX_SIZE (512*1024)
static inline bool is_binary_frm_header(uchar *head)
{
diff --git a/sql/wsrep_mysqld.cc b/sql/wsrep_mysqld.cc
index 64b7d6c8721..81b1e182282 100644
--- a/sql/wsrep_mysqld.cc
+++ b/sql/wsrep_mysqld.cc
@@ -2391,7 +2391,7 @@ bool wsrep_create_like_table(THD* thd, TABLE_LIST* table,
String query(buf, sizeof(buf), system_charset_info);
query.length(0); // Have to zero it since constructor doesn't
- (void) store_create_info(thd, &tbl, &query, NULL, TRUE, FALSE);
+ (void) show_create_table(thd, &tbl, &query, NULL, WITH_DB_NAME);
WSREP_DEBUG("TMP TABLE: %s", query.ptr());
thd->wsrep_TOI_pre_query= query.ptr();