summaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
Diffstat (limited to 'sql')
-rw-r--r--sql/CMakeLists.txt1
-rw-r--r--sql/event_db_repository.cc4
-rw-r--r--sql/events.cc7
-rw-r--r--sql/item_create.cc24
-rw-r--r--sql/item_func.cc6
-rw-r--r--sql/item_strfunc.cc108
-rw-r--r--sql/item_strfunc.h49
-rw-r--r--sql/item_timefunc.cc10
-rw-r--r--sql/lex.h1
-rw-r--r--sql/lex_charset.cc113
-rw-r--r--sql/lex_charset.h162
-rw-r--r--sql/log.cc18
-rw-r--r--sql/log_event_server.cc34
-rw-r--r--sql/mysqld.cc18
-rw-r--r--sql/mysqld.h2
-rw-r--r--sql/opt_split.cc2
-rw-r--r--sql/opt_subselect.cc17
-rw-r--r--sql/opt_table_elimination.cc270
-rw-r--r--sql/privilege.h2
-rw-r--r--sql/protocol.cc8
-rw-r--r--sql/rpl_mi.cc7
-rw-r--r--sql/rpl_mi.h15
-rw-r--r--sql/share/errmsg-utf8.txt6
-rw-r--r--sql/slave.cc11
-rw-r--r--sql/sp.cc16
-rw-r--r--sql/sp_head.cc12
-rw-r--r--sql/sql_class.cc40
-rw-r--r--sql/sql_class.h56
-rw-r--r--sql/sql_delete.cc2
-rw-r--r--sql/sql_i_s.h16
-rw-r--r--sql/sql_insert.cc9
-rw-r--r--sql/sql_lex.cc15
-rw-r--r--sql/sql_lex.h7
-rw-r--r--sql/sql_parse.cc6
-rw-r--r--sql/sql_repl.cc84
-rw-r--r--sql/sql_select.cc656
-rw-r--r--sql/sql_select.h31
-rw-r--r--sql/sql_show.cc96
-rw-r--r--sql/sql_table.cc12
-rw-r--r--sql/sql_time.cc4
-rw-r--r--sql/sql_type.cc2
-rw-r--r--sql/sql_yacc.yy13
-rw-r--r--sql/sys_vars.cc55
-rw-r--r--sql/sys_vars.inl2
-rw-r--r--sql/table.h1
-rw-r--r--sql/temporary_tables.cc8
-rw-r--r--sql/wsrep_allowlist_service.cc56
-rw-r--r--sql/wsrep_allowlist_service.h29
-rw-r--r--sql/wsrep_mysqld.cc57
-rw-r--r--sql/wsrep_mysqld.h2
-rw-r--r--sql/wsrep_schema.cc227
-rw-r--r--sql/wsrep_schema.h22
-rw-r--r--sql/wsrep_server_state.cc17
-rw-r--r--sql/wsrep_server_state.h9
-rw-r--r--sql/wsrep_types.h2
55 files changed, 2079 insertions, 380 deletions
diff --git a/sql/CMakeLists.txt b/sql/CMakeLists.txt
index 6ed70e2814e..bbd4407dc74 100644
--- a/sql/CMakeLists.txt
+++ b/sql/CMakeLists.txt
@@ -23,6 +23,7 @@ IF(WITH_WSREP AND NOT EMBEDDED_LIBRARY)
wsrep_storage_service.cc
wsrep_server_state.cc
wsrep_status.cc
+ wsrep_allowlist_service.cc
wsrep_utils.cc
wsrep_xid.cc
wsrep_check_opts.cc
diff --git a/sql/event_db_repository.cc b/sql/event_db_repository.cc
index cf27e9b7326..9e31ccb3338 100644
--- a/sql/event_db_repository.cc
+++ b/sql/event_db_repository.cc
@@ -150,12 +150,12 @@ const TABLE_FIELD_TYPE event_table_fields[ET_FIELD_COUNT] =
},
{
{ STRING_WITH_LEN("collation_connection") },
- { STRING_WITH_LEN("char(32)") },
+ { STRING_WITH_LEN("char(") },
{ STRING_WITH_LEN("utf8mb3") }
},
{
{ STRING_WITH_LEN("db_collation") },
- { STRING_WITH_LEN("char(32)") },
+ { STRING_WITH_LEN("char(") },
{ STRING_WITH_LEN("utf8mb3") }
},
{
diff --git a/sql/events.cc b/sql/events.cc
index f06068e84e8..6ecdf975178 100644
--- a/sql/events.cc
+++ b/sql/events.cc
@@ -721,15 +721,16 @@ send_show_create_event(THD *thd, Event_timed *et, Protocol *protocol)
field_list.push_back(new (mem_root)
Item_empty_string(thd, "character_set_client",
- MY_CS_NAME_SIZE), mem_root);
+ MY_CS_CHARACTER_SET_NAME_SIZE),
+ mem_root);
field_list.push_back(new (mem_root)
Item_empty_string(thd, "collation_connection",
- MY_CS_NAME_SIZE), mem_root);
+ MY_CS_COLLATION_NAME_SIZE), mem_root);
field_list.push_back(new (mem_root)
Item_empty_string(thd, "Database Collation",
- MY_CS_NAME_SIZE), mem_root);
+ MY_CS_COLLATION_NAME_SIZE), mem_root);
if (protocol->send_result_set_metadata(&field_list,
Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF))
diff --git a/sql/item_create.cc b/sql/item_create.cc
index 7caa0123faf..53acc7946be 100644
--- a/sql/item_create.cc
+++ b/sql/item_create.cc
@@ -1873,6 +1873,19 @@ protected:
};
+class Create_func_random_bytes : public Create_func_arg1
+{
+public:
+ virtual Item *create_1_arg(THD *thd, Item *arg1);
+
+ static Create_func_random_bytes s_singleton;
+
+protected:
+ Create_func_random_bytes() {}
+ virtual ~Create_func_random_bytes() {}
+};
+
+
class Create_func_release_all_locks : public Create_func_arg0
{
public:
@@ -4985,6 +4998,16 @@ Create_func_rand::create_native(THD *thd, const LEX_CSTRING *name,
}
+Create_func_random_bytes Create_func_random_bytes::s_singleton;
+
+Item *Create_func_random_bytes::create_1_arg(THD *thd, Item *arg1)
+{
+ thd->lex->set_stmt_unsafe(LEX::BINLOG_STMT_UNSAFE_SYSTEM_FUNCTION);
+ thd->lex->uncacheable(UNCACHEABLE_RAND);
+ return new (thd->mem_root) Item_func_random_bytes(thd, arg1);
+}
+
+
Create_func_release_all_locks Create_func_release_all_locks::s_singleton;
Item*
@@ -5804,6 +5827,7 @@ Native_func_registry func_array[] =
{ { STRING_WITH_LEN("POW") }, BUILDER(Create_func_pow)},
{ { STRING_WITH_LEN("POWER") }, BUILDER(Create_func_pow)},
{ { STRING_WITH_LEN("QUOTE") }, BUILDER(Create_func_quote)},
+ { { STRING_WITH_LEN("RANDOM_BYTES")}, BUILDER(Create_func_random_bytes)},
{ { STRING_WITH_LEN("REGEXP_INSTR") }, BUILDER(Create_func_regexp_instr)},
{ { STRING_WITH_LEN("REGEXP_REPLACE") }, BUILDER(Create_func_regexp_replace)},
{ { STRING_WITH_LEN("REGEXP_SUBSTR") }, BUILDER(Create_func_regexp_substr)},
diff --git a/sql/item_func.cc b/sql/item_func.cc
index 7d4b2e01efd..a07595cbbd8 100644
--- a/sql/item_func.cc
+++ b/sql/item_func.cc
@@ -778,7 +778,7 @@ bool Item_func_connection_id::fix_fields(THD *thd, Item **ref)
{
if (Item_int_func::fix_fields(thd, ref))
return TRUE;
- thd->thread_specific_used= TRUE;
+ thd->used|= THD::THREAD_SPECIFIC_USED;
value= thd->variables.pseudo_thread_id;
return FALSE;
}
@@ -2821,9 +2821,9 @@ bool Item_func_rand::fix_fields(THD *thd,Item **ref)
Once events are forwarded rather than recreated,
the following can be skipped if inside the slave thread
*/
- if (!thd->rand_used)
+ if (!(thd->used & THD::RAND_USED))
{
- thd->rand_used= 1;
+ thd->used|= THD::RAND_USED;
thd->rand_saved_seed1= thd->rand.seed1;
thd->rand_saved_seed2= thd->rand.seed2;
}
diff --git a/sql/item_strfunc.cc b/sql/item_strfunc.cc
index cfec9292aac..4d69280cb98 100644
--- a/sql/item_strfunc.cc
+++ b/sql/item_strfunc.cc
@@ -733,6 +733,17 @@ bool Item_func_concat::fix_length_and_dec(THD *thd)
Encryption result is longer than original by formula:
@code new_length= org_length + (8-(org_length % 8))+1 @endcode
*/
+bool Item_func_des_encrypt::fix_length_and_dec(THD *thd)
+{
+ set_maybe_null();
+ /* 9 = MAX ((8- (arg_len % 8)) + 1) */
+ max_length = args[0]->max_length + 9;
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE, ER_WARN_DEPRECATED_SYNTAX,
+ ER_THD(thd, ER_WARN_DEPRECATED_SYNTAX_NO_REPLACEMENT),
+ func_name_cstring().str);
+ return FALSE;
+}
+
String *Item_func_des_encrypt::val_str(String *str)
{
@@ -833,6 +844,20 @@ error:
}
+bool Item_func_des_decrypt::fix_length_and_dec(THD *thd)
+{
+ set_maybe_null();
+ /* 9 = MAX ((8- (arg_len % 8)) + 1) */
+ max_length= args[0]->max_length;
+ if (max_length >= 9U)
+ max_length-= 9U;
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE, ER_WARN_DEPRECATED_SYNTAX,
+ ER_THD(thd, ER_WARN_DEPRECATED_SYNTAX_NO_REPLACEMENT),
+ func_name_cstring().str);
+ return FALSE;
+}
+
+
String *Item_func_des_decrypt::val_str(String *str)
{
DBUG_ASSERT(fixed());
@@ -1449,6 +1474,70 @@ String *Item_func_sformat::val_str(String *res)
return null_value ? NULL : res;
}
+#include"my_global.h"
+#include <openssl/rand.h>
+#include <openssl/err.h>
+
+bool Item_func_random_bytes::fix_length_and_dec(THD *thd)
+{
+ used_tables_cache|= RAND_TABLE_BIT;
+ if (args[0]->can_eval_in_optimize())
+ {
+ int32 v= (int32) args[0]->val_int();
+ max_length= MY_MAX(0, MY_MIN(v, MAX_RANDOM_BYTES));
+ return false;
+ }
+ max_length= MAX_RANDOM_BYTES;
+ return false;
+}
+
+
+void Item_func_random_bytes::update_used_tables()
+{
+ Item_str_func::update_used_tables();
+ used_tables_cache|= RAND_TABLE_BIT;
+}
+
+
+String *Item_func_random_bytes::val_str(String *str)
+{
+ longlong count= args[0]->val_int();
+
+ if (args[0]->null_value)
+ goto err;
+ null_value= 0;
+
+ if (count < 0 || count > MAX_RANDOM_BYTES)
+ goto err;
+
+ if (count == 0)
+ return make_empty_result(str);
+
+ if (str->alloc((uint) count))
+ goto err;
+
+ str->length(count);
+ str->set_charset(&my_charset_bin);
+ if (my_random_bytes((unsigned char *) str->ptr(), (int32) count))
+ {
+ ulong ssl_err;
+ while ((ssl_err= ERR_get_error()))
+ {
+ char buf[256];
+ ERR_error_string_n(ssl_err, buf, sizeof(buf));
+ sql_print_warning("SSL error: %s", buf);
+ }
+ goto err;
+ }
+
+ return str;
+
+err:
+ null_value= 1;
+ return 0;
+}
+
+
/*********************************************************************/
bool Item_func_regexp_replace::fix_length_and_dec(THD *thd)
{
@@ -3728,14 +3817,10 @@ bool Item_func_set_collation::fix_length_and_dec(THD *thd)
{
if (agg_arg_charsets_for_string_result(collation, args, 1))
return true;
- if (!my_charset_same(collation.collation, m_set_collation))
- {
- my_error(ER_COLLATION_CHARSET_MISMATCH, MYF(0),
- m_set_collation->coll_name.str,
- collation.collation->cs_name.str);
- return TRUE;
- }
- collation.set(m_set_collation, DERIVATION_EXPLICIT,
+ Lex_exact_charset_opt_extended_collate cl(collation.collation, true);
+ if (cl.merge_collation_override(m_set_collation))
+ return true;
+ collation.set(cl.collation().charset_info(), DERIVATION_EXPLICIT,
args[0]->collation.repertoire);
max_length= args[0]->max_length;
return FALSE;
@@ -3753,7 +3838,7 @@ void Item_func_set_collation::print(String *str, enum_query_type query_type)
{
args[0]->print_parenthesised(str, query_type, precedence());
str->append(STRING_WITH_LEN(" collate "));
- str->append(m_set_collation->coll_name);
+ str->append(m_set_collation.collation_name_for_show());
}
String *Item_func_charset::val_str(String *str)
@@ -3785,7 +3870,7 @@ bool Item_func_weight_string::fix_length_and_dec(THD *thd)
{
CHARSET_INFO *cs= args[0]->collation.collation;
collation.set(&my_charset_bin, args[0]->collation.derivation);
- weigth_flags= my_strxfrm_flag_normalize(weigth_flags, cs->levels_for_order);
+ weigth_flags= my_strxfrm_flag_normalize(cs, weigth_flags);
/*
Use result_length if it was given explicitly in constructor,
otherwise calculate max_length using argument's max_length
@@ -3795,7 +3880,8 @@ bool Item_func_weight_string::fix_length_and_dec(THD *thd)
{
size_t char_length;
char_length= ((cs->state & MY_CS_STRNXFRM_BAD_NWEIGHTS) || !nweights) ?
- args[0]->max_char_length() : nweights * cs->levels_for_order;
+ args[0]->max_char_length() : nweights *
+ my_count_bits_uint32(cs->levels_for_order);
max_length= (uint32) cs->strnxfrmlen(char_length * cs->mbmaxlen);
}
set_maybe_null();
diff --git a/sql/item_strfunc.h b/sql/item_strfunc.h
index 68b6f954b0a..2938895efa7 100644
--- a/sql/item_strfunc.h
+++ b/sql/item_strfunc.h
@@ -384,6 +384,32 @@ public:
{ return get_item_copy<Item_func_concat_ws>(thd, this); }
};
+
+class Item_func_random_bytes : public Item_str_func
+{
+public:
+ Item_func_random_bytes(THD *thd, Item *arg1) : Item_str_func(thd, arg1) {}
+ bool fix_length_and_dec(THD *thd) override;
+ void update_used_tables() override;
+ String *val_str(String *) override;
+ LEX_CSTRING func_name_cstring() const override
+ {
+ static LEX_CSTRING name= {STRING_WITH_LEN("random_bytes")};
+ return name;
+ }
+ bool check_vcol_func_processor(void *arg) override
+ {
+ return mark_unsupported_function(func_name(), "()", arg,
+ VCOL_NON_DETERMINISTIC | VCOL_NOT_VIRTUAL);
+ }
+ Item *get_copy(THD *thd) override
+ {
+ return get_item_copy<Item_func_random_bytes>(thd, this);
+ }
+ static const int MAX_RANDOM_BYTES= 1024;
+};
+
+
class Item_func_reverse :public Item_str_func
{
String tmp_value;
@@ -897,13 +923,7 @@ public:
Item_func_des_encrypt(THD *thd, Item *a, Item *b)
:Item_str_binary_checksum_func(thd, a, b) {}
String *val_str(String *) override;
- bool fix_length_and_dec(THD *thd) override
- {
- set_maybe_null();
- /* 9 = MAX ((8- (arg_len % 8)) + 1) */
- max_length = args[0]->max_length + 9;
- return FALSE;
- }
+ bool fix_length_and_dec(THD *thd) override;
LEX_CSTRING func_name_cstring() const override
{
static LEX_CSTRING name= {STRING_WITH_LEN("des_encrypt") };
@@ -922,15 +942,7 @@ public:
Item_func_des_decrypt(THD *thd, Item *a, Item *b)
:Item_str_binary_checksum_func(thd, a, b) {}
String *val_str(String *) override;
- bool fix_length_and_dec(THD *thd) override
- {
- set_maybe_null();
- /* 9 = MAX ((8- (arg_len % 8)) + 1) */
- max_length= args[0]->max_length;
- if (max_length >= 9U)
- max_length-= 9U;
- return FALSE;
- }
+ bool fix_length_and_dec(THD *thd) override;
LEX_CSTRING func_name_cstring() const override
{
static LEX_CSTRING name= {STRING_WITH_LEN("des_decrypt") };
@@ -1825,9 +1837,10 @@ public:
class Item_func_set_collation :public Item_str_func
{
- CHARSET_INFO *m_set_collation;
+ Lex_extended_collation_st m_set_collation;
public:
- Item_func_set_collation(THD *thd, Item *a, CHARSET_INFO *set_collation):
+ Item_func_set_collation(THD *thd, Item *a,
+ const Lex_extended_collation_st &set_collation):
Item_str_func(thd, a), m_set_collation(set_collation) {}
String *val_str(String *) override;
bool fix_length_and_dec(THD *thd) override;
diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc
index 8399e5dc0ac..26adc4eddaa 100644
--- a/sql/item_timefunc.cc
+++ b/sql/item_timefunc.cc
@@ -1509,7 +1509,7 @@ bool Item_func_from_days::get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzz
void Item_func_curdate_local::store_now_in_TIME(THD *thd, MYSQL_TIME *now_time)
{
thd->variables.time_zone->gmt_sec_to_TIME(now_time, thd->query_start());
- thd->time_zone_used= 1;
+ thd->used |= THD::TIME_ZONE_USED;
}
@@ -1601,7 +1601,7 @@ void Item_func_curtime_local::store_now_in_TIME(THD *thd, MYSQL_TIME *now_time)
now_time->year= now_time->month= now_time->day= 0;
now_time->time_type= MYSQL_TIMESTAMP_TIME;
set_sec_part(thd->query_start_sec_part(), now_time, this);
- thd->time_zone_used= 1;
+ thd->used|= THD::TIME_ZONE_USED;
}
@@ -1667,7 +1667,7 @@ void Item_func_now_local::store_now_in_TIME(THD *thd, MYSQL_TIME *now_time)
{
thd->variables.time_zone->gmt_sec_to_TIME(now_time, thd->query_start());
set_sec_part(thd->query_start_sec_part(), now_time, this);
- thd->time_zone_used= 1;
+ thd->used|= THD::TIME_ZONE_USED;
}
@@ -1710,7 +1710,7 @@ void Item_func_sysdate_local::store_now_in_TIME(THD *thd, MYSQL_TIME *now_time)
my_hrtime_t now= my_hrtime();
thd->variables.time_zone->gmt_sec_to_TIME(now_time, hrtime_to_my_time(now));
set_sec_part(hrtime_sec_part(now), now_time, this);
- thd->time_zone_used= 1;
+ thd->used|= THD::TIME_ZONE_USED;
}
@@ -2732,7 +2732,7 @@ null_date:
bool Item_func_from_unixtime::fix_length_and_dec(THD *thd)
{
- thd->time_zone_used= 1;
+ thd->used|= THD::TIME_ZONE_USED;
tz= thd->variables.time_zone;
Type_std_attributes::set(
Type_temporal_attributes_not_fixed_dec(MAX_DATETIME_WIDTH,
diff --git a/sql/lex.h b/sql/lex.h
index 4ce88ccc2ee..18519d1f8d1 100644
--- a/sql/lex.h
+++ b/sql/lex.h
@@ -385,6 +385,7 @@ SYMBOL symbols[] = {
{ "MASTER_SSL_VERIFY_SERVER_CERT", SYM(MASTER_SSL_VERIFY_SERVER_CERT_SYM)},
{ "MASTER_USER", SYM(MASTER_USER_SYM)},
{ "MASTER_USE_GTID", SYM(MASTER_USE_GTID_SYM)},
+ { "MASTER_DEMOTE_TO_SLAVE", SYM(MASTER_DEMOTE_TO_SLAVE_SYM)},
{ "MASTER_HEARTBEAT_PERIOD", SYM(MASTER_HEARTBEAT_PERIOD_SYM)},
{ "MATCH", SYM(MATCH)},
{ "MAX_CONNECTIONS_PER_HOUR", SYM(MAX_CONNECTIONS_PER_HOUR)},
diff --git a/sql/lex_charset.cc b/sql/lex_charset.cc
index b83d175307f..cfb74a0bf04 100644
--- a/sql/lex_charset.cc
+++ b/sql/lex_charset.cc
@@ -45,7 +45,7 @@ raise_ER_CONFLICTING_DECLARATIONS(const char *clause1,
const char *name2,
bool reverse_order)
{
- char def[MY_CS_NAME_SIZE * 2];
+ char def[MY_CS_CHARACTER_SET_NAME_SIZE * 2];
my_snprintf(def, sizeof(def), "%s (%s)", name1, name1_part2);
raise_ER_CONFLICTING_DECLARATIONS(clause1, def,
clause2, name2,
@@ -127,19 +127,43 @@ Lex_exact_collation::
raise_if_conflicts_with_context_collation(const Lex_context_collation &cl,
bool reverse_order) const
{
- if (cl.is_contextually_typed_collate_default() &&
- !(m_ci->state & MY_CS_PRIMARY))
+ if (cl.is_contextually_typed_collate_default())
{
- raise_ER_CONFLICTING_DECLARATIONS("COLLATE ", m_ci->coll_name.str,
- "COLLATE ", "DEFAULT", reverse_order);
- return true;
+ if (!(m_ci->state & MY_CS_PRIMARY))
+ {
+ raise_ER_CONFLICTING_DECLARATIONS("COLLATE ", m_ci->coll_name.str,
+ "COLLATE ", "DEFAULT", reverse_order);
+ return true;
+ }
+ return false;
}
- if (cl.is_contextually_typed_binary_style() &&
- !(m_ci->state & MY_CS_BINSORT))
+ if (cl.is_contextually_typed_binary_style())
{
- raise_ER_CONFLICTING_DECLARATIONS("COLLATE ", m_ci->coll_name.str,
- "", "BINARY", reverse_order);
+ if (!(m_ci->state & MY_CS_BINSORT))
+ {
+ raise_ER_CONFLICTING_DECLARATIONS("COLLATE ", m_ci->coll_name.str,
+ "", "BINARY", reverse_order);
+ return true;
+ }
+ return false;
+ }
+
+ DBUG_ASSERT(!strncmp(cl.charset_info()->coll_name.str,
+ STRING_WITH_LEN("utf8mb4_uca1400_")));
+
+ Charset_loader_server loader;
+ CHARSET_INFO *ci= loader.get_exact_collation_by_context_name(
+ m_ci,
+ cl.collation_name_context_suffix().str,
+ MYF(0));
+ if (m_ci != ci)
+ {
+ raise_ER_CONFLICTING_DECLARATIONS("COLLATE ",
+ m_ci->coll_name.str,
+ "COLLATE ",
+ cl.collation_name_for_show().str,
+ reverse_order);
return true;
}
return false;
@@ -153,9 +177,16 @@ Lex_context_collation::raise_if_not_equal(const Lex_context_collation &cl) const
Only equal context collations are possible here so far:
- Column grammar only supports BINARY, but does not support COLLATE DEFAULT
- DB/Table grammar only support COLLATE DEFAULT
- But we'll have different collations here - uca140 is coming soon.
*/
- DBUG_ASSERT(m_ci == cl.m_ci);
+ if (m_ci != cl.m_ci)
+ {
+ my_error(ER_CONFLICTING_DECLARATIONS, MYF(0),
+ is_contextually_typed_binary_style() ? "" : "COLLATE ",
+ collation_name_for_show().str,
+ cl.is_contextually_typed_binary_style() ? "" : "COLLATE ",
+ cl.collation_name_for_show().str);
+ return true;
+ }
return false;
}
@@ -193,12 +224,16 @@ bool Lex_exact_charset_opt_extended_collate::
return false;
}
- /*
- A non-binary and non-default contextually typed collation.
- We don't have such yet - the parser cannot produce this.
- But we have "uca1400_as_ci" coming soon.
- */
- DBUG_ASSERT(0);
+ DBUG_ASSERT(!strncmp(cl.charset_info()->coll_name.str,
+ STRING_WITH_LEN("utf8mb4_uca1400_")));
+
+ CHARSET_INFO *ci= Charset_loader_server().
+ get_exact_collation_by_context_name_or_error(m_ci,
+ cl.charset_info()->coll_name.str + 8, MYF(0));
+ if (!ci)
+ return true;
+ m_ci= ci;
+ m_with_collate= true;
return false;
}
@@ -244,7 +279,7 @@ bool Lex_extended_collation_st::
CONTEXT + EXACT
CHAR(10) COLLATE DEFAULT .. COLLATE latin1_swedish_ci
CHAR(10) BINARY .. COLLATE latin1_bin
- CHAR(10) COLLATE uca1400_as_ci .. COLLATE latin1_bin - coming soon
+ CHAR(10) COLLATE uca1400_as_ci .. COLLATE latin1_bin
*/
if (rhs.raise_if_conflicts_with_context_collation(
Lex_context_collation(m_ci), true))
@@ -320,6 +355,38 @@ bool Lex_extended_collation_st::merge(const Lex_extended_collation_st &rhs)
}
+LEX_CSTRING Lex_context_collation::collation_name_for_show() const
+{
+ if (is_contextually_typed_collate_default())
+ return LEX_CSTRING({STRING_WITH_LEN("DEFAULT")});
+ if (is_contextually_typed_binary_style())
+ return LEX_CSTRING({STRING_WITH_LEN("BINARY")});
+ return collation_name_context_suffix();
+}
+
+
+bool Lex_extended_collation_st::set_by_name(const char *name, myf my_flags)
+{
+ Charset_loader_server loader;
+ CHARSET_INFO *cs;
+
+ if (!strncasecmp(name, STRING_WITH_LEN("uca1400_")))
+ {
+ if (!(cs= loader.get_context_collation_or_error(name, my_flags)))
+ return true;
+
+ *this= Lex_extended_collation(Lex_context_collation(cs));
+ return false;
+ }
+
+ if (!(cs= loader.get_exact_collation_or_error(name, my_flags)))
+ return true;
+
+ *this= Lex_extended_collation(Lex_exact_collation(cs));
+ return false;
+}
+
+
/** find a collation with binary comparison rules
*/
CHARSET_INFO *Lex_exact_charset_opt_extended_collate::find_bin_collation() const
@@ -499,6 +566,14 @@ bool Lex_exact_charset_opt_extended_collate::
// CHARACTER SET latin1 [COLLATE latin1_bin] .. COLLATE latin1_bin
if (m_with_collate)
return Lex_exact_collation(m_ci).raise_if_not_equal(cl);
+ return merge_exact_collation_override(cl);
+}
+
+
+bool Lex_exact_charset_opt_extended_collate::
+ merge_exact_collation_override(const Lex_exact_collation &cl)
+{
+ // CHARACTER SET latin1 [COLLATE latin1_bin] .. COLLATE latin1_bin
if (raise_if_not_applicable(cl))
return true;
*this= Lex_exact_charset_opt_extended_collate(cl);
diff --git a/sql/lex_charset.h b/sql/lex_charset.h
index d8d4422b34f..2bbeff8a4a6 100644
--- a/sql/lex_charset.h
+++ b/sql/lex_charset.h
@@ -18,6 +18,126 @@
/*
+ An extention for Charset_loader_mysys,
+ with server error and warning support.
+*/
+class Charset_loader_server: public Charset_loader_mysys
+{
+public:
+ using Charset_loader_mysys::Charset_loader_mysys;
+ void raise_unknown_collation_error(const char *name) const;
+ void raise_not_applicable_error(const char *cs, const char *cl) const;
+
+ /*
+ Find an exact collation by name.
+ Raise an error on a faulure.
+
+ @param cs - the character set
+ @param collation_name - the collation name, e.g. "utf8_bin"
+ @param my_flags - my flags, e.g. MYF(WME)
+ @returns - a NULL pointer in case of failure, or
+ a CHARSET_INFO pointer on success.
+ */
+
+ CHARSET_INFO *
+ get_exact_collation_or_error(const char *name, myf my_flags= MYF(0))
+ {
+ CHARSET_INFO *ci= get_exact_collation(name, my_flags);
+ if (!ci)
+ raise_unknown_collation_error(name);
+ return ci;
+ }
+
+ /*
+ Find an exact collation by a character set and a
+ contextually typed collation name.
+ Raise an error on in case of a faulure.
+
+ @param cs - the character set
+ @param context_cl_name - the context name, e.g. "uca1400_cs_ci"
+ @param my_flags - my flags, e.g. MYF(WME)
+ @returns - a NULL pointer in case of failure, or
+ a CHARSET_INFO pointer on success.
+ */
+ CHARSET_INFO *
+ get_exact_collation_by_context_name_or_error(CHARSET_INFO *cs,
+ const char *name,
+ myf my_flags= MYF(0))
+ {
+ CHARSET_INFO *ci= get_exact_collation_by_context_name(cs, name, my_flags);
+ if (!ci)
+ raise_not_applicable_error(cs->cs_name.str, name);
+ return ci;
+ }
+
+ /*
+ Find an abstract context collation by name.
+ Raise an error on a faulure.
+ The returned pointer needs to be resolved to a character set name.
+ It should not be passed directly to the character set routines.
+
+ @param cs - the character set
+ @param context_cl_name - the context name, e.g. "uca1400_cs_ci"
+ @param my_flags - my flags, e.g. MYF(WME)
+ @returns - a NULL pointer in case of failure, or
+ a CHARSET_INFO pointer on success.
+ */
+
+ CHARSET_INFO *
+ get_context_collation_or_error(const char *collation_name,
+ myf my_flags= MYF(0))
+ {
+ CHARSET_INFO *ci= get_context_collation(collation_name, my_flags);
+ if (!ci)
+ raise_unknown_collation_error(collation_name);
+ return ci;
+ }
+
+ /*
+ Find an exact binary collation in the given character set.
+ Raise an error on a faulure.
+
+ @param cs - the character set
+ @param my_flags - my flags, e.g. MYF(WME)
+ @returns - a NULL pointer in case of failure, or
+ a CHARSET_INFO pointer on success.
+ */
+
+ CHARSET_INFO *
+ get_bin_collation_or_error(CHARSET_INFO *cs,
+ myf my_flags= MYF(0))
+ {
+ const char *cs_name= cs->cs_name.str;
+ if (!(cs= get_bin_collation(cs, my_flags)))
+ {
+ char tmp[65];
+ strxnmov(tmp, sizeof(tmp)-1, cs_name, "_bin", NULL);
+ raise_unknown_collation_error(tmp);
+ }
+ return cs;
+ }
+
+ /*
+ Find an exact default collation in the given character set.
+ This routine does not fail.
+ Any character set must have a default collation.
+
+ @param cs - the character set
+ @param my_flags - my flags, e.g. MYF(WME)
+ @returns - a CHARSET_INFO pointer
+ */
+
+ CHARSET_INFO *get_default_collation(CHARSET_INFO *cs,
+ myf my_flags= MYF(0))
+ {
+ return Charset_loader_mysys::get_default_collation(cs, my_flags);
+ }
+};
+
+
+/////////////////////////////////////////////////////////////////////
+
+/*
An exact character set, e.g:
CHARACTER SET latin1
*/
@@ -96,6 +216,15 @@ public:
return m_ci == &my_collation_contextually_typed_binary;
}
bool raise_if_not_equal(const Lex_context_collation &cl) const;
+ /*
+ Skip the character set prefix, return the suffix.
+ utf8mb4_uca1400_as_ci -> uca1400_as_ci
+ */
+ LEX_CSTRING collation_name_context_suffix() const
+ {
+ return m_ci->get_collation_name(MY_COLLATION_NAME_MODE_CONTEXT);
+ }
+ LEX_CSTRING collation_name_for_show() const;
};
@@ -148,11 +277,23 @@ public:
}
CHARSET_INFO *charset_info() const { return m_ci; }
Type type() const { return m_type; }
+ LEX_CSTRING collation_name_for_show() const
+ {
+ switch (m_type) {
+ case TYPE_CONTEXTUALLY_TYPED:
+ return Lex_context_collation(m_ci).collation_name_for_show();
+ case TYPE_EXACT:
+ return m_ci->coll_name;
+ }
+ DBUG_ASSERT(0);
+ return m_ci->coll_name;
+ }
void set_collate_default()
{
m_ci= &my_collation_contextually_typed_default;
m_type= TYPE_CONTEXTUALLY_TYPED;
}
+ bool set_by_name(const char *name, myf my_flags); // e.g. MY_UTF8_IS_UTF8MB3
bool raise_if_conflicts_with_context_collation(const Lex_context_collation &)
const;
bool merge_exact_charset(const Lex_exact_charset &rhs);
@@ -172,6 +313,10 @@ public:
{
init(rhs.charset_info(), TYPE_EXACT);
}
+ Lex_extended_collation(const Lex_context_collation &rhs)
+ {
+ init(rhs.charset_info(), TYPE_CONTEXTUALLY_TYPED);
+ }
};
@@ -221,6 +366,19 @@ public:
DBUG_ASSERT(0);
return false;
}
+ bool merge_collation_override(const Lex_extended_collation_st &cl)
+ {
+ switch (cl.type()) {
+ case Lex_extended_collation_st::TYPE_EXACT:
+ return merge_exact_collation_override(
+ Lex_exact_collation(cl.charset_info()));
+ case Lex_extended_collation_st::TYPE_CONTEXTUALLY_TYPED:
+ return merge_context_collation_override(
+ Lex_context_collation(cl.charset_info()));
+ }
+ DBUG_ASSERT(0);
+ return false;
+ }
/*
Add a context collation:
CHARACTER SET cs [COLLATE cl] ... COLLATE DEFAULT
@@ -232,6 +390,7 @@ public:
CHARACTER SET cs [COLLATE cl] ... COLLATE latin1_bin
*/
bool merge_exact_collation(const Lex_exact_collation &cl);
+ bool merge_exact_collation_override(const Lex_exact_collation &cl);
Lex_exact_collation collation() const
{
return Lex_exact_collation(m_ci);
@@ -410,6 +569,7 @@ public:
case TYPE_COLLATE_EXACT:
return merge_exact_collation(Lex_exact_collation(cl.charset_info()));
case TYPE_COLLATE_CONTEXTUALLY_TYPED:
+ return merge_context_collation(Lex_context_collation(cl.charset_info()));
case TYPE_CHARACTER_SET:
case TYPE_CHARACTER_SET_COLLATE_EXACT:
break;
@@ -426,7 +586,6 @@ public:
bool merge_column_collate_clause_and_collate_clause(
const Lex_exact_charset_extended_collation_attrs_st &cl)
{
- DBUG_ASSERT(m_type != TYPE_COLLATE_CONTEXTUALLY_TYPED);
DBUG_ASSERT(m_type != TYPE_CHARACTER_SET);
switch (cl.type()) {
case TYPE_EMPTY:
@@ -434,6 +593,7 @@ public:
case TYPE_COLLATE_EXACT:
return merge_exact_collation(Lex_exact_collation(cl.charset_info()));
case TYPE_COLLATE_CONTEXTUALLY_TYPED:
+ return merge_context_collation(Lex_context_collation(cl.charset_info()));
case TYPE_CHARACTER_SET:
case TYPE_CHARACTER_SET_COLLATE_EXACT:
break;
diff --git a/sql/log.cc b/sql/log.cc
index 4161b522438..b032bcf4963 100644
--- a/sql/log.cc
+++ b/sql/log.cc
@@ -834,14 +834,13 @@ bool Log_to_csv_event_handler::
uint field_index;
Silence_log_table_errors error_handler;
Open_tables_backup open_tables_backup;
- bool save_time_zone_used;
+ THD::used_t save_time_zone_used= thd->used & THD::TIME_ZONE_USED;
DBUG_ENTER("log_general");
/*
CSV uses TIME_to_timestamp() internally if table needs to be repaired
- which will set thd->time_zone_used
+ which will set TIME_ZONE_USED
*/
- save_time_zone_used= thd->time_zone_used;
table_list.init_one_table(&MYSQL_SCHEMA_NAME, &GENERAL_LOG_NAME, 0,
TL_WRITE_CONCURRENT_INSERT);
@@ -942,7 +941,7 @@ err:
if (need_close)
close_log_table(thd, &open_tables_backup);
- thd->time_zone_used= save_time_zone_used;
+ thd->used= (thd->used & ~THD::TIME_ZONE_USED) | save_time_zone_used;
DBUG_RETURN(result);
}
@@ -989,7 +988,7 @@ bool Log_to_csv_event_handler::
Silence_log_table_errors error_handler;
Open_tables_backup open_tables_backup;
CHARSET_INFO *client_cs= thd->variables.character_set_client;
- bool save_time_zone_used;
+ THD::used_t save_time_zone_used= thd->used & THD::TIME_ZONE_USED;
ulong query_time= (ulong) MY_MIN(query_utime/1000000, TIME_MAX_VALUE_SECONDS);
ulong lock_time= (ulong) MY_MIN(lock_utime/1000000, TIME_MAX_VALUE_SECONDS);
ulong query_time_micro= (ulong) (query_utime % 1000000);
@@ -997,11 +996,6 @@ bool Log_to_csv_event_handler::
DBUG_ENTER("Log_to_csv_event_handler::log_slow");
thd->push_internal_handler(& error_handler);
- /*
- CSV uses TIME_to_timestamp() internally if table needs to be repaired
- which will set thd->time_zone_used
- */
- save_time_zone_used= thd->time_zone_used;
table_list.init_one_table(&MYSQL_SCHEMA_NAME, &SLOW_LOG_NAME, 0,
TL_WRITE_CONCURRENT_INSERT);
@@ -1129,7 +1123,7 @@ err:
}
if (need_close)
close_log_table(thd, &open_tables_backup);
- thd->time_zone_used= save_time_zone_used;
+ thd->used= (thd->used & ~THD::TIME_ZONE_USED) | save_time_zone_used;
DBUG_RETURN(result);
}
@@ -6896,7 +6890,7 @@ bool MYSQL_BIN_LOG::write(Log_event *event_info, my_bool *with_annotate)
if (write_event(&e, cache_data, file))
goto err;
}
- if (thd->rand_used)
+ if (thd->used & THD::RAND_USED)
{
Rand_log_event e(thd,thd->rand_saved_seed1,thd->rand_saved_seed2,
using_trans, direct);
diff --git a/sql/log_event_server.cc b/sql/log_event_server.cc
index 78fb1a97409..98325fe7498 100644
--- a/sql/log_event_server.cc
+++ b/sql/log_event_server.cc
@@ -200,7 +200,7 @@ static void inline slave_rows_error_report(enum loglevel level, int ha_error,
err->get_sql_errno());
}
- if (ha_error != 0)
+ if (ha_error != 0 && !thd->killed)
rli->report(level, errcode, rgi->gtid_info(),
"Could not execute %s event on table %s.%s;"
"%s handler error %s; "
@@ -1289,7 +1289,7 @@ bool Query_log_event::write()
}
}
- if (thd && thd->query_start_sec_part_used)
+ if (thd && (thd->used & THD::QUERY_START_SEC_PART_USED))
{
*start++= Q_HRNOW;
get_time();
@@ -1415,8 +1415,8 @@ Query_log_event::Query_log_event(THD* thd_arg, const char* query_arg,
bool direct, bool suppress_use, int errcode)
:Log_event(thd_arg,
- (thd_arg->thread_specific_used ? LOG_EVENT_THREAD_SPECIFIC_F :
- 0) |
+ ((thd_arg->used & THD::THREAD_SPECIFIC_USED)
+ ? LOG_EVENT_THREAD_SPECIFIC_F : 0) |
(suppress_use ? LOG_EVENT_SUPPRESS_USE_F : 0),
using_trans),
data_buf(0), query(query_arg), catalog(thd_arg->catalog),
@@ -1502,7 +1502,7 @@ Query_log_event::Query_log_event(THD* thd_arg, const char* query_arg,
int2store(charset, thd_arg->variables.character_set_client->number);
int2store(charset+2, thd_arg->variables.collation_connection->number);
int2store(charset+4, thd_arg->variables.collation_server->number);
- if (thd_arg->time_zone_used)
+ if (thd_arg->used & THD::TIME_ZONE_USED)
{
/*
Note that our event becomes dependent on the Time_zone object
@@ -2953,7 +2953,8 @@ Load_log_event::Load_log_event(THD *thd_arg, const sql_exchange *ex,
enum enum_duplicates handle_dup,
bool ignore, bool using_trans)
:Log_event(thd_arg,
- thd_arg->thread_specific_used ? LOG_EVENT_THREAD_SPECIFIC_F : 0,
+ (thd_arg->used & THD::THREAD_SPECIFIC_USED)
+ ? LOG_EVENT_THREAD_SPECIFIC_F : 0,
using_trans),
thread_id(thd_arg->thread_id),
slave_proxy_id((ulong)thd_arg->variables.pseudo_thread_id),
@@ -4594,7 +4595,9 @@ void User_var_log_event::pack_info(Protocol* protocol)
case STRING_RESULT:
{
/* 15 is for 'COLLATE' and other chars */
- char buf_mem[FN_REFLEN + 512 + 1 + 2*MY_CS_NAME_SIZE+15];
+ char buf_mem[FN_REFLEN + 512 + 1 + 15 +
+ MY_CS_CHARACTER_SET_NAME_SIZE +
+ MY_CS_COLLATION_NAME_SIZE];
String buf(buf_mem, sizeof(buf_mem), system_charset_info);
CHARSET_INFO *cs;
buf.length(0);
@@ -4614,7 +4617,7 @@ void User_var_log_event::pack_info(Protocol* protocol)
return;
old_len= buf.length();
if (buf.reserve(old_len + val_len * 2 + 3 + sizeof(" COLLATE ") +
- MY_CS_NAME_SIZE))
+ MY_CS_COLLATION_NAME_SIZE))
return;
beg= const_cast<char *>(buf.ptr()) + old_len;
end= str_to_hex(beg, val, val_len);
@@ -5733,6 +5736,13 @@ int Rows_log_event::do_apply_event(rpl_group_info *rgi)
DBUG_ASSERT(rgi->thd == thd);
/*
+ Where a Query_log_event can rely on the normal command execution logic to
+ set/reset the slave thread's timer; a Rows_log_event update needs to set
+ the timer itself
+ */
+ thd->set_query_timer();
+
+ /*
If there is no locks taken, this is the first binrow event seen
after the table map events. We should then lock all the tables
used in the transaction and proceed with execution of the actual
@@ -6124,6 +6134,12 @@ int Rows_log_event::do_apply_event(rpl_group_info *rgi)
if (likely(error == 0) && !transactional_table)
thd->transaction->all.modified_non_trans_table=
thd->transaction->stmt.modified_non_trans_table= TRUE;
+ if (likely(error == 0))
+ {
+ error= thd->killed_errno();
+ if (error && !thd->is_error())
+ my_error(error, MYF(0));
+ }
} // row processing loop
while (error == 0 && (m_curr_row != m_rows_end));
@@ -6193,11 +6209,13 @@ int Rows_log_event::do_apply_event(rpl_group_info *rgi)
free_root(thd->mem_root, MYF(MY_KEEP_PREALLOC));
}
+ thd->reset_query_timer();
DBUG_RETURN(error);
err:
restore_empty_query_table_list(thd->lex);
rgi->slave_close_thread_tables(thd);
+ thd->reset_query_timer();
DBUG_RETURN(error);
}
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index 498d32bac7a..48580138673 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -461,6 +461,8 @@ ulonglong binlog_cache_size=0;
ulonglong binlog_file_cache_size=0;
ulonglong max_binlog_cache_size=0;
ulong slave_max_allowed_packet= 0;
+double slave_max_statement_time_double;
+ulonglong slave_max_statement_time;
ulonglong binlog_stmt_cache_size=0;
ulonglong max_binlog_stmt_cache_size=0;
ulonglong test_flags;
@@ -6365,13 +6367,6 @@ struct my_option my_long_options[]=
{"help", '?', "Display this help and exit.",
&opt_help, &opt_help, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0,
0, 0},
- {"allow-suspicious-udfs", 0,
- "Allows use of UDFs consisting of only one symbol xxx() "
- "without corresponding xxx_init() or xxx_deinit(). That also means "
- "that one can load any function from any library, for example exit() "
- "from libc.so",
- &opt_allow_suspicious_udfs, &opt_allow_suspicious_udfs,
- 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
{"ansi", 'a', "Use ANSI SQL syntax instead of MySQL syntax. This mode "
"will also set transaction isolation level 'serializable'.", 0, 0, 0,
GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
@@ -6696,12 +6691,6 @@ struct my_option my_long_options[]=
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
{"silent-startup", OPT_SILENT, "Don't print [Note] to the error log during startup.",
&opt_silent_startup, &opt_silent_startup, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
-#ifndef DISABLE_GRANT_OPTIONS
- {"skip-grant-tables", 0,
- "Start without grant tables. This gives all users FULL ACCESS to all tables.",
- &opt_noacl, &opt_noacl, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0,
- 0},
-#endif
{"skip-host-cache", OPT_SKIP_HOST_CACHE, "Don't cache host names.", 0, 0, 0,
GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
{"skip-slave-start", 0,
@@ -7396,6 +7385,7 @@ SHOW_VAR status_vars[]= {
{"Handler_update", (char*) offsetof(STATUS_VAR, ha_update_count), SHOW_LONG_STATUS},
{"Handler_write", (char*) offsetof(STATUS_VAR, ha_write_count), SHOW_LONG_STATUS},
{"Key", (char*) &show_default_keycache, SHOW_FUNC},
+ {"optimizer_join_prefixes_check_calls", (char*) offsetof(STATUS_VAR, optimizer_join_prefixes_check_calls), SHOW_LONG_STATUS},
{"Last_query_cost", (char*) offsetof(STATUS_VAR, last_query_cost), SHOW_DOUBLE_STATUS},
#ifndef DBUG_OFF
{"malloc_calls", (char*) &malloc_calls, SHOW_LONG},
@@ -8751,6 +8741,8 @@ static int get_options(int *argc_ptr, char ***argv_ptr)
max_relay_log_size_var->option.def_value=
max_binlog_size_var->option.def_value;
}
+ slave_max_statement_time=
+ double2ulonglong(slave_max_statement_time_double * 1e6);
}
#endif
diff --git a/sql/mysqld.h b/sql/mysqld.h
index b9549054d77..d36e7c0a014 100644
--- a/sql/mysqld.h
+++ b/sql/mysqld.h
@@ -244,6 +244,8 @@ extern ulonglong binlog_cache_size, binlog_stmt_cache_size, binlog_file_cache_si
extern ulonglong max_binlog_cache_size, max_binlog_stmt_cache_size;
extern ulong max_binlog_size;
extern ulong slave_max_allowed_packet;
+extern ulonglong slave_max_statement_time;
+extern double slave_max_statement_time_double;
extern ulong opt_binlog_rows_event_max_size;
extern ulong binlog_row_metadata;
extern ulong thread_cache_size;
diff --git a/sql/opt_split.cc b/sql/opt_split.cc
index 85e2df2dbb3..86ed442814c 100644
--- a/sql/opt_split.cc
+++ b/sql/opt_split.cc
@@ -769,7 +769,7 @@ void JOIN::add_keyuses_for_splitting()
added_keyuse->validity_ref= &keyuse_ext->validity_var;
}
- if (sort_and_filter_keyuse(thd, &keyuse, true))
+ if (sort_and_filter_keyuse(this, &keyuse, true))
goto err;
optimize_keyuse(this, &keyuse);
diff --git a/sql/opt_subselect.cc b/sql/opt_subselect.cc
index a7e4f982aa2..da871e099dd 100644
--- a/sql/opt_subselect.cc
+++ b/sql/opt_subselect.cc
@@ -2895,7 +2895,7 @@ void optimize_semi_joins(JOIN *join, table_map remaining_tables, uint idx,
pos[-1].inner_tables_handled_with_other_sjs;
}
- pos->prefix_cost.convert_from_cost(*current_read_time);
+ pos->prefix_cost= *current_read_time;
pos->prefix_record_count= *current_record_count;
{
@@ -3017,7 +3017,7 @@ void optimize_semi_joins(JOIN *join, table_map remaining_tables, uint idx,
update_sj_state(join, new_join_tab, idx, remaining_tables);
- pos->prefix_cost.convert_from_cost(*current_read_time);
+ pos->prefix_cost= *current_read_time;
pos->prefix_record_count= *current_record_count;
pos->dups_producing_tables= dups_producing_tables;
}
@@ -3108,15 +3108,15 @@ bool Sj_materialization_picker::check_qep(JOIN *join,
else
{
/* This is SJ-Materialization with lookups */
- Cost_estimate prefix_cost;
+ double prefix_cost;
signed int first_tab= (int)idx - mat_info->tables;
- double prefix_rec_count;
+ double prefix_rec_count, mat_read_time;
Json_writer_object trace(join->thd);
trace.add("strategy", "SJ-Materialization");
if (first_tab < (int)join->const_tables)
{
- prefix_cost.reset();
+ prefix_cost= 0;
prefix_rec_count= 1.0;
}
else
@@ -3125,9 +3125,8 @@ bool Sj_materialization_picker::check_qep(JOIN *join,
prefix_rec_count= join->positions[first_tab].prefix_record_count;
}
- double mat_read_time= prefix_cost.total_cost();
mat_read_time=
- COST_ADD(mat_read_time,
+ COST_ADD(prefix_cost,
COST_ADD(mat_info->materialization_cost.total_cost(),
COST_MULT(prefix_rec_count,
mat_info->lookup_cost.total_cost())));
@@ -3172,7 +3171,7 @@ bool Sj_materialization_picker::check_qep(JOIN *join,
}
else
{
- prefix_cost= join->positions[first_tab - 1].prefix_cost.total_cost();
+ prefix_cost= join->positions[first_tab - 1].prefix_cost;
prefix_rec_count= join->positions[first_tab - 1].prefix_record_count;
}
@@ -3536,7 +3535,7 @@ bool Duplicate_weedout_picker::check_qep(JOIN *join,
}
else
{
- dups_cost= join->positions[first_tab - 1].prefix_cost.total_cost();
+ dups_cost= join->positions[first_tab - 1].prefix_cost;
prefix_rec_count= join->positions[first_tab - 1].prefix_record_count;
temptable_rec_size= 8; /* This is not true but we'll make it so */
}
diff --git a/sql/opt_table_elimination.cc b/sql/opt_table_elimination.cc
index 8c4720bdec4..b8e046995e9 100644
--- a/sql/opt_table_elimination.cc
+++ b/sql/opt_table_elimination.cc
@@ -134,6 +134,11 @@
- Nodes representing unique keys. Unique key has
= incoming edges from key component value modules
= outgoing edge to key's table module
+ - Nodes representing unique pseudo-keys for derived tables.
+ Unique pseudo-keys are composed as a result of GROUP BY expressions.
+ Like normal unique keys, they have:
+ = incoming edges from key component value modules
+ = outgoing edge to key's table module
- Inner side of outer join module. Outer join module has
= incoming edges from table value modules
= No outgoing edges. Once we reach it, we know we can eliminate the
@@ -205,6 +210,7 @@ class Dep_module;
class Dep_module_expr;
class Dep_module_goal;
class Dep_module_key;
+ class Dep_module_pseudo_key;
class Dep_analysis_context;
@@ -278,6 +284,8 @@ private:
Dep_module_key *key_dep;
/* Otherwise, this and advance */
uint equality_no;
+ /* Or this one and advance */
+ Dep_module_pseudo_key *pseudo_key_dep;
};
friend class Dep_analysis_context;
friend class Field_dependency_recorder;
@@ -302,12 +310,20 @@ class Dep_value_table : public Dep_value
{
public:
Dep_value_table(TABLE *table_arg) :
- table(table_arg), fields(NULL), keys(NULL)
+ table(table_arg), fields(NULL), keys(NULL), pseudo_key(NULL)
{}
TABLE *table; /* Table this object is representing */
/* Ordered list of fields that belong to this table */
Dep_value_field *fields;
- Dep_module_key *keys; /* Ordered list of Unique keys in this table */
+
+ /* Ordered list of Unique keys in this table */
+ Dep_module_key *keys;
+
+ /*
+ Possible unique pseudo-key applicable for this table
+ (only none or a single one is possible)
+ */
+ Dep_module_pseudo_key *pseudo_key;
/* Iteration over unbound modules that are our dependencies */
Iterator init_unbound_modules_iter(char *buf);
@@ -443,9 +459,63 @@ private:
const size_t Dep_module_key::iterator_size=
ALIGN_SIZE(sizeof(Dep_module_key::Value_iter));
-const size_t Dep_module::iterator_size=
- MY_MAX(Dep_module_expr::iterator_size, Dep_module_key::iterator_size);
+/*
+ A unique pseudo-key module for a derived table.
+ For example, a derived table
+ "SELECT a, count(*) from t1 GROUP BY a"
+ has unique values in its first field "a" due to GROUP BY expression
+ so this can be considered as a unique key for this derived table
+*/
+
+class Dep_module_pseudo_key : public Dep_module
+{
+public:
+ Dep_module_pseudo_key(Dep_value_table *table_arg,
+ MY_BITMAP *exposed_fields,
+ uint exposed_fields_num)
+ : table(table_arg), exposed_fields_map(exposed_fields)
+ {
+ unbound_args= exposed_fields_num;
+ }
+
+ Dep_value_table *table;
+
+ Iterator init_unbound_values_iter(char *buf) override;
+
+ Dep_value *get_next_unbound_value(Dep_analysis_context *dac,
+ Iterator iter) override;
+
+ bool covers_field(int field_index);
+
+ static const size_t iterator_size;
+
+private:
+ /*
+ Bitmap of field numbers in the derived table's SELECT list
+ which are included in the GROUP BY expression.
+ For example, unique pseudo-key for SQL
+ "SELECT count(*), b, a FROM t1 GROUP BY a, b"
+ will include two elements: {2} and {1}, since "a" and "b" are on the
+ GROUP BY list and also are present on the SELECT list with numbers 2 and 1
+ (numeration starts from 0).
+ */
+ MY_BITMAP *exposed_fields_map;
+
+ class Value_iter
+ {
+ public:
+ Dep_value_table *table;
+ };
+};
+
+const size_t Dep_module_pseudo_key::iterator_size=
+ ALIGN_SIZE(sizeof(Dep_module_pseudo_key::Value_iter));
+
+const size_t Dep_module::iterator_size=
+ MY_MAX(Dep_module_expr::iterator_size,
+ MY_MAX(Dep_module_key::iterator_size,
+ Dep_module_pseudo_key::iterator_size));
/*
A module that represents outer join that we're trying to eliminate. If we
@@ -507,13 +577,18 @@ public:
to see if expression equality_mods[expr_no] depends on the given field.
*/
MY_BITMAP expr_deps;
-
- Dep_value_table *create_table_value(TABLE *table);
+
+ Dep_value_table *create_table_value(TABLE_LIST *table_list);
Dep_value_field *get_field_value(Field *field);
#ifndef DBUG_OFF
void dbug_print_deps();
#endif
+
+private:
+ void create_unique_pseudo_key_if_needed(TABLE_LIST *table_list,
+ Dep_value_table *tbl_dep);
+ int find_field_in_list(List<Item> &fields_list, Item *field);
};
@@ -851,7 +926,7 @@ bool check_func_dependency(JOIN *join,
/* Create Dep_value_table objects for all tables we're trying to eliminate */
if (oj_tbl)
{
- if (!dac.create_table_value(oj_tbl->table))
+ if (!dac.create_table_value(oj_tbl))
return FALSE; /* purecov: inspected */
}
else
@@ -861,7 +936,7 @@ bool check_func_dependency(JOIN *join,
{
if (tbl->table && (tbl->table->map & dep_tables))
{
- if (!dac.create_table_value(tbl->table))
+ if (!dac.create_table_value(tbl))
return FALSE; /* purecov: inspected */
}
}
@@ -1577,33 +1652,156 @@ void add_module_expr(Dep_analysis_context *ctx, Dep_module_expr **eq_mod,
DESCRIPTION
Create a Dep_value_table object for the given table. Also create
Dep_module_key objects for all unique keys in the table.
+ Create a unique pseudo-key if this table is derived and has
+ a GROUP BY expression.
RETURN
Created table value object
NULL if out of memory
*/
-Dep_value_table *Dep_analysis_context::create_table_value(TABLE *table)
+Dep_value_table *
+Dep_analysis_context::create_table_value(TABLE_LIST *table_list)
{
Dep_value_table *tbl_dep;
- if (!(tbl_dep= new Dep_value_table(table)))
+ if (!(tbl_dep= new Dep_value_table(table_list->table)))
return NULL; /* purecov: inspected */
Dep_module_key **key_list= &(tbl_dep->keys);
/* Add dependencies for unique keys */
- for (uint i=0; i < table->s->keys; i++)
+ for (uint i= 0; i < table_list->table->s->keys; i++)
{
- KEY *key= table->key_info + i;
+ KEY *key= table_list->table->key_info + i;
if (key->flags & HA_NOSAME)
{
Dep_module_key *key_dep;
- if (!(key_dep= new Dep_module_key(tbl_dep, i, key->user_defined_key_parts)))
+ if (!(key_dep= new Dep_module_key(tbl_dep, i,
+ key->user_defined_key_parts)))
return NULL;
*key_list= key_dep;
key_list= &(key_dep->next_table_key);
}
}
- return table_deps[table->tablenr]= tbl_dep;
+
+ create_unique_pseudo_key_if_needed(table_list, tbl_dep);
+ return table_deps[table_list->table->tablenr]= tbl_dep;
+}
+
+
+/*
+ @brief
+ Check if we can create a unique pseudo-key for the passed table.
+ If we can, create a dependency for it
+
+ @detail
+ Currently, pseudo-key is created for the list of GROUP BY columns.
+
+ TODO: also it can be created if the query uses
+ - SELECT DISTINCT
+ - UNION DISTINCT (not UNION ALL)
+*/
+
+void Dep_analysis_context::create_unique_pseudo_key_if_needed(
+ TABLE_LIST *table_list, Dep_value_table *tbl_dep)
+{
+ auto select_unit= table_list->get_unit();
+ SELECT_LEX *first_select= nullptr;
+ if (select_unit)
+ {
+ first_select= select_unit->first_select();
+
+ /*
+ Exclude UNION (ALL) queries from consideration by checking
+ next_select() == nullptr
+ */
+ if (unlikely(select_unit->first_select()->next_select()))
+ first_select= nullptr;
+ }
+
+ /*
+ GROUP BY expression is considered as a unique pseudo-key
+ for the derived table. Add this pseudo key as a dependency.
+
+ first_select->join is NULL for degenerate derived tables
+ which are known to have just one row and so were already materialized
+ by the optimizer, check this here
+ */
+ if (first_select && first_select->join &&
+ first_select->group_list.elements > 0)
+ {
+ auto max_possible_elements= first_select->join->fields_list.elements;
+ void *buf;
+ MY_BITMAP *exposed_fields= (MY_BITMAP*)
+ current_thd->alloc(sizeof(MY_BITMAP));
+ if (!(buf= current_thd->alloc(bitmap_buffer_size(max_possible_elements))) ||
+ my_bitmap_init(exposed_fields, (my_bitmap_map*)buf,
+ max_possible_elements))
+ // Memory allocation failed
+ return;
+ bitmap_clear_all(exposed_fields);
+ uint exposed_fields_count= 0;
+
+ bool valid= true;
+ for (auto cur_group= first_select->group_list.first;
+ cur_group;
+ cur_group= cur_group->next)
+ {
+ auto elem= *(cur_group->item);
+ /*
+ Make sure GROUP BY elements contain only fields
+ and no functions or other expressions
+ */
+ if (elem->type() != Item::FIELD_ITEM)
+ {
+ valid= false;
+ break;
+ }
+ auto field_no= find_field_in_list(first_select->join->fields_list, elem);
+ if (field_no == -1)
+ {
+ /*
+ This GROUP BY element is not present in the select list. This is a
+ case like this:
+ (SELECT a FROM t1 GROUP by a,b) as TBL
+ Here, the combination of (a,b) is unique, but the select doesn't
+ include "b". "a" alone is not unique, so TBL doesn't have a unique
+ pseudo-key.
+ */
+ valid= false;
+ break;
+ }
+ bitmap_set_bit(exposed_fields, field_no);
+ exposed_fields_count++;
+ }
+ if (valid)
+ {
+ Dep_module_pseudo_key *pseudo_key;
+ pseudo_key= new Dep_module_pseudo_key(tbl_dep, exposed_fields,
+ exposed_fields_count);
+ tbl_dep->pseudo_key= pseudo_key;
+ }
+ }
+}
+
+
+/*
+ Iterate the list of fields and look for the given field.
+ Returns the index of the field if it is found on the list
+ and -1 otherwise
+*/
+
+int Dep_analysis_context::find_field_in_list(List<Item> &fields_list,
+ Item *field)
+{
+ List_iterator<Item> it(fields_list);
+ int field_idx= 0;
+ while (auto next_field= it++)
+ {
+ if (next_field->eq(field, false))
+ return field_idx;
+ field_idx++;
+ }
+ return -1; /*not found*/
}
@@ -1746,11 +1944,39 @@ Dep_value* Dep_module_key::get_next_unbound_value(Dep_analysis_context *dac,
}
+char *Dep_module_pseudo_key::init_unbound_values_iter(char *buf)
+{
+ Value_iter *iter= ALIGN_PTR(my_ptrdiff_t(buf), Value_iter);
+ iter->table= table;
+ return (char *) iter;
+}
+
+Dep_value *
+Dep_module_pseudo_key::get_next_unbound_value(Dep_analysis_context *dac,
+ Dep_module::Iterator iter)
+{
+ Dep_value *res= ((Value_iter *) iter)->table;
+ ((Value_iter *) iter)->table= NULL;
+ return res;
+}
+
+
+/*
+ Check if column number field_no is covered by the pseudo-key.
+*/
+
+bool Dep_module_pseudo_key::covers_field(int field_no)
+{
+ return bitmap_is_set(exposed_fields_map, field_no) > 0;
+}
+
+
Dep_value::Iterator Dep_value_field::init_unbound_modules_iter(char *buf)
{
Module_iter *iter= ALIGN_PTR(my_ptrdiff_t(buf), Module_iter);
iter->key_dep= table->keys;
iter->equality_no= 0;
+ iter->pseudo_key_dep= table->pseudo_key;
return (char*)iter;
}
@@ -1758,7 +1984,8 @@ Dep_value::Iterator Dep_value_field::init_unbound_modules_iter(char *buf)
void
Dep_value_field::make_unbound_modules_iter_skip_keys(Dep_value::Iterator iter)
{
- ((Module_iter*)iter)->key_dep= NULL;
+ ((Module_iter*) iter)->key_dep= NULL;
+ ((Module_iter*) iter)->pseudo_key_dep= NULL;
}
@@ -1786,6 +2013,16 @@ Dep_module* Dep_value_field::get_next_unbound_module(Dep_analysis_context *dac,
}
else
di->key_dep= NULL;
+
+ Dep_module_pseudo_key *pseudo_key_dep= di->pseudo_key_dep;
+ if (pseudo_key_dep && !pseudo_key_dep->is_applicable() &&
+ pseudo_key_dep->covers_field(field->field_index))
+ {
+ di->pseudo_key_dep= NULL;
+ return pseudo_key_dep;
+ }
+ else
+ di->pseudo_key_dep= NULL;
/*
Then walk through [multi]equalities and find those that
@@ -1819,7 +2056,7 @@ static void mark_as_eliminated(JOIN *join, TABLE_LIST *tbl,
TABLE *table;
/*
NOTE: there are TABLE_LIST object that have
- tbl->table!= NULL && tbl->nested_join!=NULL and
+ tbl->table!= NULL && tbl->nested_join!=NULL and
tbl->table == tbl->nested_join->join_list->element(..)->table
*/
if (tbl->nested_join)
@@ -1848,7 +2085,6 @@ static void mark_as_eliminated(JOIN *join, TABLE_LIST *tbl,
tbl->on_expr->walk(&Item::mark_as_eliminated_processor, FALSE, NULL);
}
-
#ifndef DBUG_OFF
/* purecov: begin inspected */
void Dep_analysis_context::dbug_print_deps()
diff --git a/sql/privilege.h b/sql/privilege.h
index 9e505691736..f6980aeb6b1 100644
--- a/sql/privilege.h
+++ b/sql/privilege.h
@@ -570,6 +570,8 @@ constexpr privilege_t PRIV_SET_SYSTEM_GLOBAL_VAR_SLAVE_EXEC_MODE=
REPL_SLAVE_ADMIN_ACL | SUPER_ACL;
constexpr privilege_t PRIV_SET_SYSTEM_GLOBAL_VAR_SLAVE_MAX_ALLOWED_PACKET=
REPL_SLAVE_ADMIN_ACL | SUPER_ACL;
+constexpr privilege_t PRIV_SET_SYSTEM_GLOBAL_VAR_SLAVE_MAX_STATEMENT_TIME=
+ REPL_SLAVE_ADMIN_ACL;
constexpr privilege_t PRIV_SET_SYSTEM_GLOBAL_VAR_SLAVE_NET_TIMEOUT=
REPL_SLAVE_ADMIN_ACL | SUPER_ACL;
constexpr privilege_t PRIV_SET_SYSTEM_GLOBAL_VAR_SLAVE_PARALLEL_MAX_QUEUED=
diff --git a/sql/protocol.cc b/sql/protocol.cc
index 33d865a3f9f..eee6236ffe7 100644
--- a/sql/protocol.cc
+++ b/sql/protocol.cc
@@ -876,13 +876,17 @@ bool Protocol_text::store_field_metadata(const THD * thd,
if (charset_for_protocol == &my_charset_bin || thd_charset == NULL)
{
/* No conversion */
- int2store(pos, charset_for_protocol->number);
+ uint id= charset_for_protocol->get_id(MY_COLLATION_ID_TYPE_COMPAT_100800);
+ DBUG_ASSERT(id <= UINT_MAX16);
+ int2store(pos, (uint16) id);
int4store(pos + 2, field.length);
}
else
{
/* With conversion */
- int2store(pos, thd_charset->number);
+ uint id= thd_charset->get_id(MY_COLLATION_ID_TYPE_COMPAT_100800);
+ DBUG_ASSERT(id <= UINT_MAX16);
+ int2store(pos, (uint16) id);
uint32 field_length= field.max_octet_length(charset_for_protocol,
thd_charset);
int4store(pos + 2, field_length);
diff --git a/sql/rpl_mi.cc b/sql/rpl_mi.cc
index b9aea39e547..ab2523d960b 100644
--- a/sql/rpl_mi.cc
+++ b/sql/rpl_mi.cc
@@ -39,7 +39,7 @@ Master_info::Master_info(LEX_CSTRING *connection_name_arg,
clock_diff_with_master(0),
sync_counter(0), heartbeat_period(0), received_heartbeats(0),
master_id(0), prev_master_id(0),
- using_gtid(USE_GTID_NO), events_queued_since_last_gtid(0),
+ using_gtid(USE_GTID_SLAVE_POS), events_queued_since_last_gtid(0),
gtid_reconnect_event_skip_count(0), gtid_event_seen(false),
in_start_all_slaves(0), in_stop_all_slaves(0), in_flush_all_relay_logs(0),
users(0), killed(0),
@@ -210,7 +210,10 @@ void init_master_log_pos(Master_info* mi)
mi->master_log_name[0] = 0;
mi->master_log_pos = BIN_LOG_HEADER_SIZE; // skip magic number
- mi->using_gtid= Master_info::USE_GTID_NO;
+ if (mi->master_supports_gtid)
+ {
+ mi->using_gtid= Master_info::USE_GTID_SLAVE_POS;
+ }
mi->gtid_current_pos.reset();
mi->events_queued_since_last_gtid= 0;
mi->gtid_reconnect_event_skip_count= 0;
diff --git a/sql/rpl_mi.h b/sql/rpl_mi.h
index ecfecabd6c9..6058b7fb34c 100644
--- a/sql/rpl_mi.h
+++ b/sql/rpl_mi.h
@@ -375,6 +375,21 @@ class Master_info : public Slave_reporting_capability
at time its alter info struct is about to be appened to the list.
*/
bool is_shutdown= false;
+
+ /*
+ A replica will default to Slave_Pos for using Using_Gtid; however, we
+ first need to test if the master supports GTIDs. If not, fall back to 'No'.
+ Cache the value so future RESET SLAVE commands don't revert to Slave_Pos.
+ */
+ bool master_supports_gtid= true;
+
+ /*
+ When TRUE, transition this server from being an active master to a slave.
+ This updates the replication state to account for any transactions which
+ were committed into the binary log. In particular, it merges
+ gtid_binlog_pos into gtid_slave_pos.
+ */
+ bool is_demotion= false;
};
struct start_alter_thd_args
diff --git a/sql/share/errmsg-utf8.txt b/sql/share/errmsg-utf8.txt
index 4a357722064..1338d85865f 100644
--- a/sql/share/errmsg-utf8.txt
+++ b/sql/share/errmsg-utf8.txt
@@ -10074,3 +10074,9 @@ ER_INCONSISTENT_SLAVE_TEMP_TABLE
eng "Replicated query '%s' table `%s.%s` can not be temporary"
ER_VERS_HIST_PART_FAILED
eng "Versioned table %`s.%`s: adding HISTORY partition(s) failed"
+WARN_OPTION_CHANGING
+ eng "%s is implicitly changing the value of '%s' from '%s' to '%s'"
+ER_CM_OPTION_MISSING_REQUIREMENT
+ eng "CHANGE MASTER TO option '%s=%s' is missing requirement %s"
+ER_SLAVE_STATEMENT_TIMEOUT 70100
+ eng "Slave log event execution was interrupted (slave_max_statement_time exceeded)"
diff --git a/sql/slave.cc b/sql/slave.cc
index 5fdecb3e1b6..e39efcc6bbc 100644
--- a/sql/slave.cc
+++ b/sql/slave.cc
@@ -1775,6 +1775,9 @@ static int get_master_version_and_clock(MYSQL* mysql, Master_info* mi)
}
else
{
+ DBUG_EXECUTE_IF("mock_mariadb_primary_v5_in_get_master_version",
+ version= 5;);
+
/*
Note the following switch will bug when we have MySQL branch 30 ;)
*/
@@ -2358,6 +2361,14 @@ past_checksum:
after_set_capability:
#endif
+ if (!(mi->master_supports_gtid= version >= 10))
+ {
+ sql_print_information(
+ "Slave I/O thread: Falling back to Using_Gtid=No because "
+ "master does not support GTIDs");
+ mi->using_gtid= Master_info::USE_GTID_NO;
+ }
+
if (mi->using_gtid != Master_info::USE_GTID_NO)
{
/* Request dump to start from slave replication GTID state. */
diff --git a/sql/sp.cc b/sql/sp.cc
index 74743347816..6cfe1045b0c 100644
--- a/sql/sp.cc
+++ b/sql/sp.cc
@@ -217,12 +217,12 @@ TABLE_FIELD_TYPE proc_table_fields[MYSQL_PROC_FIELD_COUNT] =
},
{
{ STRING_WITH_LEN("collation_connection") },
- { STRING_WITH_LEN("char(32)") },
+ { STRING_WITH_LEN("char(") },
{ STRING_WITH_LEN("utf8mb3") }
},
{
{ STRING_WITH_LEN("db_collation") },
- { STRING_WITH_LEN("char(32)") },
+ { STRING_WITH_LEN("char(") },
{ STRING_WITH_LEN("utf8mb3") }
},
{
@@ -687,8 +687,7 @@ bool AUTHID::read_from_mysql_proc_row(THD *thd, TABLE *table)
*/
int
-Sp_handler::db_find_routine(THD *thd,
- const Database_qualified_name *name,
+Sp_handler::db_find_routine(THD *thd, const Database_qualified_name *name,
sp_head **sphp) const
{
TABLE *table;
@@ -697,7 +696,7 @@ Sp_handler::db_find_routine(THD *thd,
longlong created;
longlong modified;
Sp_chistics chistics;
- bool saved_time_zone_used= thd->time_zone_used;
+ THD::used_t saved_time_zone_used= thd->used & THD::TIME_ZONE_USED;
bool trans_commited= 0;
sql_mode_t sql_mode;
Stored_program_creation_ctx *creation_ctx;
@@ -763,15 +762,14 @@ Sp_handler::db_find_routine(THD *thd,
thd->commit_whole_transaction_and_close_tables();
new_trans.restore_old_transaction();
- ret= db_load_routine(thd, name, sphp,
- sql_mode, params, returns, body, chistics, definer,
- created, modified, NULL, creation_ctx);
+ ret= db_load_routine(thd, name, sphp, sql_mode, params, returns, body,
+ chistics, definer, created, modified, NULL, creation_ctx);
done:
/*
Restore the time zone flag as the timezone usage in proc table
does not affect replication.
*/
- thd->time_zone_used= saved_time_zone_used;
+ thd->used= (thd->used & ~THD::TIME_ZONE_USED) | saved_time_zone_used;
if (!trans_commited)
{
if (table)
diff --git a/sql/sp_head.cc b/sql/sp_head.cc
index 7e5ea9e6a96..bb1ebb04e26 100644
--- a/sql/sp_head.cc
+++ b/sql/sp_head.cc
@@ -3080,17 +3080,17 @@ sp_head::show_create_routine_get_fields(THD *thd, const Sp_handler *sph,
fields->push_back(new (mem_root)
Item_empty_string(thd, "character_set_client",
- MY_CS_NAME_SIZE),
+ MY_CS_CHARACTER_SET_NAME_SIZE),
mem_root);
fields->push_back(new (mem_root)
Item_empty_string(thd, "collation_connection",
- MY_CS_NAME_SIZE),
+ MY_CS_COLLATION_NAME_SIZE),
mem_root);
fields->push_back(new (mem_root)
Item_empty_string(thd, "Database Collation",
- MY_CS_NAME_SIZE),
+ MY_CS_COLLATION_NAME_SIZE),
mem_root);
}
@@ -3156,17 +3156,17 @@ sp_head::show_create_routine(THD *thd, const Sp_handler *sph)
fields.push_back(new (mem_root)
Item_empty_string(thd, "character_set_client",
- MY_CS_NAME_SIZE),
+ MY_CS_CHARACTER_SET_NAME_SIZE),
thd->mem_root);
fields.push_back(new (mem_root)
Item_empty_string(thd, "collation_connection",
- MY_CS_NAME_SIZE),
+ MY_CS_COLLATION_NAME_SIZE),
thd->mem_root);
fields.push_back(new (mem_root)
Item_empty_string(thd, "Database Collation",
- MY_CS_NAME_SIZE),
+ MY_CS_CHARACTER_SET_NAME_SIZE),
thd->mem_root);
if (protocol->send_result_set_metadata(&fields,
diff --git a/sql/sql_class.cc b/sql/sql_class.cc
index 57da25b5800..c3fc62f5f5d 100644
--- a/sql/sql_class.cc
+++ b/sql/sql_class.cc
@@ -637,7 +637,7 @@ THD::THD(my_thread_id id, bool is_wsrep_applier)
protocol_text(this), protocol_binary(this), initial_status_var(0),
m_current_stage_key(0), m_psi(0),
in_sub_stmt(0), log_all_errors(0),
- binlog_unsafe_warning_flags(0),
+ binlog_unsafe_warning_flags(0), used(0),
current_stmt_binlog_format(BINLOG_FORMAT_MIXED),
bulk_param(0),
table_map_for_update(0),
@@ -657,8 +657,6 @@ THD::THD(my_thread_id id, bool is_wsrep_applier)
is_fatal_error(0),
transaction_rollback_request(0),
is_fatal_sub_stmt_error(false),
- rand_used(0),
- time_zone_used(0),
in_lock_tables(0),
bootstrap(0),
derived_tables_processing(FALSE),
@@ -771,11 +769,10 @@ THD::THD(my_thread_id id, bool is_wsrep_applier)
security_ctx= &main_security_ctx;
no_errors= 0;
password= 0;
- query_start_sec_part_used= 0;
count_cuted_fields= CHECK_FIELD_IGNORE;
killed= NOT_KILLED;
killed_err= 0;
- is_slave_error= thread_specific_used= FALSE;
+ is_slave_error= FALSE;
my_hash_clear(&handler_tables_hash);
my_hash_clear(&ull_hash);
tmp_table=0;
@@ -2099,7 +2096,8 @@ int THD::killed_errno()
DBUG_RETURN(ER_QUERY_INTERRUPTED);
case KILL_TIMEOUT:
case KILL_TIMEOUT_HARD:
- DBUG_RETURN(ER_STATEMENT_TIMEOUT);
+ DBUG_RETURN(slave_thread ?
+ ER_SLAVE_STATEMENT_TIMEOUT : ER_STATEMENT_TIMEOUT);
case KILL_SERVER:
case KILL_SERVER_HARD:
DBUG_RETURN(ER_SERVER_SHUTDOWN);
@@ -2235,13 +2233,13 @@ void THD::cleanup_after_query()
thd_progress_end(this);
/*
- Reset rand_used so that detection of calls to rand() will save random
+ Reset RAND_USED so that detection of calls to rand() will save random
seeds if needed by the slave.
- Do not reset rand_used if inside a stored function or trigger because
+ Do not reset RAND_USED if inside a stored function or trigger because
only the call to these operations is logged. Thus only the calling
statement needs to detect rand() calls made by its substatements. These
- substatements must not set rand_used to 0 because it would remove the
+ substatements must not set RAND_USED to 0 because it would remove the
detection of rand() by the calling statement.
*/
if (!in_sub_stmt) /* stored functions and triggers are a special case */
@@ -2249,7 +2247,7 @@ void THD::cleanup_after_query()
/* Forget those values, for next binlogger: */
stmt_depends_on_first_successful_insert_id_in_prev_stmt= 0;
auto_inc_intervals_in_cur_stmt_for_binlog.empty();
- rand_used= 0;
+ used&= ~THD::RAND_USED;
#ifndef EMBEDDED_LIBRARY
/*
Clean possible unused INSERT_ID events by current statement.
@@ -7588,7 +7586,7 @@ MYSQL_TIME THD::query_start_TIME()
MYSQL_TIME res;
variables.time_zone->gmt_sec_to_TIME(&res, query_start());
res.second_part= query_start_sec_part();
- time_zone_used= 1;
+ used|= TIME_ZONE_USED;
return res;
}
@@ -8327,7 +8325,7 @@ Query_arena_stmt::~Query_arena_stmt()
bool THD::timestamp_to_TIME(MYSQL_TIME *ltime, my_time_t ts,
ulong sec_part, date_mode_t fuzzydate)
{
- time_zone_used= 1;
+ used|= TIME_ZONE_USED;
if (ts == 0 && sec_part == 0)
{
if (fuzzydate & TIME_NO_ZERO_DATE)
@@ -8370,3 +8368,21 @@ THD::charset_collation_context_alter_table(const TABLE_SHARE *s)
return Charset_collation_context(get_default_db_collation(this, s->db.str),
s->table_charset);
}
+
+
+void Charset_loader_server::raise_unknown_collation_error(const char *name) const
+{
+ ErrConvString err(name, &my_charset_utf8mb4_general_ci);
+ my_error(ER_UNKNOWN_COLLATION, MYF(0), err.ptr());
+ if (error[0])
+ push_warning_printf(current_thd,
+ Sql_condition::WARN_LEVEL_WARN,
+ ER_UNKNOWN_COLLATION, "%s", error);
+}
+
+
+void Charset_loader_server::raise_not_applicable_error(const char *cs,
+ const char *cl) const
+{
+ my_error(ER_COLLATION_CHARSET_MISMATCH, MYF(0), cl, cs);
+}
diff --git a/sql/sql_class.h b/sql/sql_class.h
index 24120e5d77f..0ccd695788f 100644
--- a/sql/sql_class.h
+++ b/sql/sql_class.h
@@ -726,6 +726,7 @@ typedef struct system_variables
ulong net_retry_count;
ulong net_wait_timeout;
ulong net_write_timeout;
+ ulong optimizer_extra_pruning_depth;
ulong optimizer_prune_level;
ulong optimizer_search_depth;
ulong optimizer_selectivity_sampling_limit;
@@ -945,6 +946,7 @@ typedef struct system_status_var
ulong filesort_rows_;
ulong filesort_scan_count_;
ulong filesort_pq_sorts_;
+ ulong optimizer_join_prefixes_check_calls;
/* Features used */
ulong feature_custom_aggregate_functions; /* +1 when custom aggregate
@@ -1063,33 +1065,6 @@ static inline void update_global_memory_status(int64 size)
my_atomic_add64_explicit(ptr, size, MY_MEMORY_ORDER_RELAXED);
}
-/**
- Get collation by name, send error to client on failure.
- @param name Collation name
- @param name_cs Character set of the name string
- @return
- @retval NULL on error
- @retval Pointter to CHARSET_INFO with the given name on success
-*/
-static inline CHARSET_INFO *
-mysqld_collation_get_by_name(const char *name, myf utf8_flag,
- CHARSET_INFO *name_cs= system_charset_info)
-{
- CHARSET_INFO *cs;
- MY_CHARSET_LOADER loader;
- my_charset_loader_init_mysys(&loader);
-
- if (!(cs= my_collation_get_by_name(&loader, name, MYF(utf8_flag))))
- {
- ErrConvString err(name, name_cs);
- my_error(ER_UNKNOWN_COLLATION, MYF(0), err.ptr());
- if (loader.error[0])
- push_warning_printf(current_thd,
- Sql_condition::WARN_LEVEL_WARN,
- ER_UNKNOWN_COLLATION, "%s", loader.error);
- }
- return cs;
-}
static inline bool is_supported_parser_charset(CHARSET_INFO *cs)
{
@@ -2925,6 +2900,12 @@ public:
*/
uint32 binlog_unsafe_warning_flags;
+ typedef uint used_t;
+ enum { RAND_USED=1, TIME_ZONE_USED=2, QUERY_START_SEC_PART_USED=4,
+ THREAD_SPECIFIC_USED=8 };
+
+ used_t used;
+
#ifndef MYSQL_CLIENT
binlog_cache_mngr * binlog_setup_trx_data();
/*
@@ -3609,15 +3590,11 @@ public:
Reset to FALSE when we leave the sub-statement mode.
*/
bool is_fatal_sub_stmt_error;
- bool rand_used, time_zone_used;
- bool query_start_sec_part_used;
/* for IS NULL => = last_insert_id() fix in remove_eq_conds() */
bool substitute_null_with_insert_id;
bool in_lock_tables;
bool bootstrap, cleanup_done, free_connection_done;
- /** is set if some thread specific value(s) used in a statement. */
- bool thread_specific_used;
/**
is set if a statement accesses a temporary table created through
CREATE TEMPORARY TABLE.
@@ -3927,7 +3904,7 @@ public:
ulong sec_part, date_mode_t fuzzydate);
inline my_time_t query_start() { return start_time; }
inline ulong query_start_sec_part()
- { query_start_sec_part_used=1; return start_time_sec_part; }
+ { used|= QUERY_START_SEC_PART_USED; return start_time_sec_part; }
MYSQL_TIME query_start_TIME();
time_round_mode_t temporal_round_mode() const
{
@@ -5451,23 +5428,28 @@ public:
{
#ifndef EMBEDDED_LIBRARY
/*
+ Slave vs user threads have timeouts configured via different variables,
+ so pick the appropriate one to use.
+ */
+ ulonglong timeout_val=
+ slave_thread ? slave_max_statement_time : variables.max_statement_time;
+
+ /*
Don't start a query timer if
- If timeouts are not set
- if we are in a stored procedure or sub statement
- - If this is a slave thread
- If we already have set a timeout (happens when running prepared
statements that calls mysql_execute_command())
*/
- if (!variables.max_statement_time || spcont || in_sub_stmt ||
- slave_thread || query_timer.expired == 0)
+ if (!timeout_val || spcont || in_sub_stmt || query_timer.expired == 0)
return;
- thr_timer_settime(&query_timer, variables.max_statement_time);
+ thr_timer_settime(&query_timer, timeout_val);
#endif
}
void reset_query_timer()
{
#ifndef EMBEDDED_LIBRARY
- if (spcont || in_sub_stmt || slave_thread)
+ if (spcont || in_sub_stmt)
return;
if (!query_timer.expired)
thr_timer_end(&query_timer);
diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc
index d4384f49112..1f0945b1a2d 100644
--- a/sql/sql_delete.cc
+++ b/sql/sql_delete.cc
@@ -1640,7 +1640,7 @@ bool multi_delete::send_eof()
thd->clear_error();
else
errcode= query_error_code(thd, killed_status == NOT_KILLED);
- thd->thread_specific_used= TRUE;
+ thd->used|= THD::THREAD_SPECIFIC_USED;
StatementBinlog stmt_binlog(thd, thd->binlog_need_stmt_format(transactional_tables));
if (unlikely(thd->binlog_query(THD::ROW_QUERY_TYPE,
thd->query(), thd->query_length(),
diff --git a/sql/sql_i_s.h b/sql/sql_i_s.h
index a3614d889c9..263031ae2c9 100644
--- a/sql/sql_i_s.h
+++ b/sql/sql_i_s.h
@@ -18,7 +18,7 @@
#include "sql_const.h" // MAX_FIELD_VARCHARLENGTH
#include "sql_basic_types.h" // enum_nullability
-#include "sql_string.h" // strlen, MY_CS_NAME_SIZE
+#include "sql_string.h" // strlen, MY_CS_CHARACTER_SET_NAME_SIZE
#include "lex_string.h" // LEX_CSTRING
#include "mysql_com.h" // enum_field_types
#include "my_time.h" // TIME_SECOND_PART_DIGITS
@@ -162,6 +162,11 @@ class Yes_or_empty: public Varchar
{
public:
Yes_or_empty(): Varchar(3) { }
+ static LEX_CSTRING value(bool val)
+ {
+ return val ? Lex_cstring(STRING_WITH_LEN("Yes")) :
+ Lex_cstring();
+ }
};
@@ -196,7 +201,14 @@ public:
class CSName: public Varchar
{
public:
- CSName(): Varchar(MY_CS_NAME_SIZE) { }
+ CSName(): Varchar(MY_CS_CHARACTER_SET_NAME_SIZE) { }
+};
+
+
+class CLName: public Varchar
+{
+public:
+ CLName(): Varchar(MY_CS_COLLATION_NAME_SIZE) { }
};
diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc
index a7fc7afb19f..adcc46637d9 100644
--- a/sql/sql_insert.cc
+++ b/sql/sql_insert.cc
@@ -2280,7 +2280,8 @@ public:
ulong start_time_sec_part;
sql_mode_t sql_mode;
bool auto_increment_field_not_null;
- bool ignore, log_query, query_start_sec_part_used;
+ bool ignore, log_query;
+ THD::used_t query_start_sec_part_used;
bool stmt_depends_on_first_successful_insert_id_in_prev_stmt;
ulonglong first_successful_insert_id_in_prev_stmt;
ulonglong forced_insert_id;
@@ -2938,7 +2939,7 @@ int write_delayed(THD *thd, TABLE *table, enum_duplicates duplic,
row->start_time= thd->start_time;
row->start_time_sec_part= thd->start_time_sec_part;
- row->query_start_sec_part_used= thd->query_start_sec_part_used;
+ row->query_start_sec_part_used= thd->used & THD::QUERY_START_SEC_PART_USED;
/*
those are for the binlog: LAST_INSERT_ID() has been evaluated at this
time, so record does not need it, but statement-based binlogging of the
@@ -2955,7 +2956,7 @@ int write_delayed(THD *thd, TABLE *table, enum_duplicates duplic,
So we can get time_zone object from thread which handling delayed statement.
See the comment of my_tz_find() for detail.
*/
- if (thd->time_zone_used)
+ if (thd->used & THD::TIME_ZONE_USED)
{
row->time_zone = thd->variables.time_zone;
}
@@ -3569,7 +3570,7 @@ bool Delayed_insert::handle_inserts(void)
thd.start_time=row->start_time;
thd.start_time_sec_part=row->start_time_sec_part;
- thd.query_start_sec_part_used=row->query_start_sec_part_used;
+ thd.used= row->query_start_sec_part_used;
/*
To get the exact auto_inc interval to store in the binlog we must not
use values from the previous interval (of the previous rows).
diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc
index 68c1fbf5447..5e9d9a8eb17 100644
--- a/sql/sql_lex.cc
+++ b/sql/sql_lex.cc
@@ -3389,7 +3389,7 @@ bool st_select_lex::test_limit()
-st_select_lex* st_select_lex_unit::outer_select()
+st_select_lex* st_select_lex_unit::outer_select() const
{
return (st_select_lex*) master;
}
@@ -11909,15 +11909,24 @@ bool SELECT_LEX_UNIT::explainable() const
EXPLAIN/ANALYZE unit, when:
(1) if it's a subquery - it's not part of eliminated WHERE/ON clause.
(2) if it's a CTE - it's not hanging (needed for execution)
- (3) if it's a derived - it's not merged
+ (3) if it's a derived - it's not merged or eliminated
if it's not 1/2/3 - it's some weird internal thing, ignore it
*/
+
return item ?
!item->eliminated : // (1)
with_element ?
derived && derived->derived_result &&
!with_element->is_hanging_recursive(): // (2)
derived ?
- derived->is_materialized_derived() : // (3)
+ derived->is_materialized_derived() && // (3)
+ !is_derived_eliminated() :
false;
}
+
+bool SELECT_LEX_UNIT::is_derived_eliminated() const
+{
+ if (!derived)
+ return false;
+ return derived->table->map & outer_select()->join->eliminated_tables;
+}
diff --git a/sql/sql_lex.h b/sql/sql_lex.h
index de4b219ffc0..14629052c2e 100644
--- a/sql/sql_lex.h
+++ b/sql/sql_lex.h
@@ -499,6 +499,7 @@ struct LEX_MASTER_INFO
uint port, connect_retry;
float heartbeat_period;
int sql_delay;
+ bool is_demotion_opt;
/*
Enum is used for making it possible to detect if the user
changed variable or if it should be left at old value
@@ -541,6 +542,7 @@ struct LEX_MASTER_INFO
gtid_pos_str= null_clex_str;
use_gtid_opt= LEX_GTID_UNCHANGED;
sql_delay= -1;
+ is_demotion_opt= 0;
}
};
@@ -971,7 +973,7 @@ public:
};
void init_query();
- st_select_lex* outer_select();
+ st_select_lex* outer_select() const;
const st_select_lex* first_select() const
{
return reinterpret_cast<const st_select_lex*>(slave);
@@ -1039,6 +1041,9 @@ public:
bool set_lock_to_the_last_select(Lex_select_lock l);
friend class st_select_lex;
+
+private:
+ bool is_derived_eliminated() const;
};
typedef class st_select_lex_unit SELECT_LEX_UNIT;
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index c1d0fb06573..bc6f4445bb9 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -7559,8 +7559,8 @@ void THD::reset_for_next_command(bool do_clear_error)
}
#endif /* WITH_WSREP */
- query_start_sec_part_used= 0;
- is_fatal_error= time_zone_used= 0;
+ used= 0;
+ is_fatal_error= 0;
variables.option_bits&= ~OPTION_BINLOG_THIS_STMT;
/*
@@ -7579,14 +7579,12 @@ void THD::reset_for_next_command(bool do_clear_error)
transaction->all.reset();
}
DBUG_ASSERT(security_ctx== &main_security_ctx);
- thread_specific_used= FALSE;
if (opt_bin_log)
reset_dynamic(&user_var_events);
DBUG_ASSERT(user_var_events_alloc == &main_mem_root);
enable_slow_log= true;
get_stmt_da()->reset_for_next_command();
- rand_used= 0;
m_sent_row_count= m_examined_row_count= 0;
accessed_rows_and_keys= 0;
diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc
index e990b94b43c..d338dc302e6 100644
--- a/sql/sql_repl.cc
+++ b/sql/sql_repl.cc
@@ -3424,6 +3424,16 @@ int reset_slave(THD *thd, Master_info* mi)
goto err;
}
+ if (mi->using_gtid != Master_info::USE_GTID_SLAVE_POS &&
+ mi->master_supports_gtid)
+ {
+ push_warning_printf(
+ thd, Sql_condition::WARN_LEVEL_NOTE, WARN_OPTION_CHANGING,
+ ER_THD(thd, WARN_OPTION_CHANGING), "RESET SLAVE", "Using_Gtid",
+ mi->using_gtid_astext(mi->using_gtid),
+ mi->using_gtid_astext(Master_info::USE_GTID_SLAVE_POS));
+ }
+
/* Clear master's log coordinates and associated information */
mi->clear_in_memory_info(thd->lex->reset_slave_info.all);
@@ -3816,11 +3826,51 @@ bool change_master(THD* thd, Master_info* mi, bool *master_info_added)
if (lex_mi->use_gtid_opt == LEX_MASTER_INFO::LEX_GTID_SLAVE_POS)
mi->using_gtid= Master_info::USE_GTID_SLAVE_POS;
else if (lex_mi->use_gtid_opt == LEX_MASTER_INFO::LEX_GTID_CURRENT_POS)
+ {
mi->using_gtid= Master_info::USE_GTID_CURRENT_POS;
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
+ ER_WARN_DEPRECATED_SYNTAX_NO_REPLACEMENT,
+ ER_THD(thd, ER_WARN_DEPRECATED_SYNTAX),
+ "master_use_gtid=current_pos", "master_demote_to_slave=1");
+ }
else if (lex_mi->use_gtid_opt == LEX_MASTER_INFO::LEX_GTID_NO ||
lex_mi->log_file_name || lex_mi->pos ||
lex_mi->relay_log_name || lex_mi->relay_log_pos)
+ {
+ if (lex_mi->use_gtid_opt != LEX_MASTER_INFO::LEX_GTID_NO)
+ {
+ push_warning_printf(
+ thd, Sql_condition::WARN_LEVEL_NOTE, WARN_OPTION_CHANGING,
+ ER_THD(thd, WARN_OPTION_CHANGING), "CHANGE MASTER TO", "Using_Gtid",
+ mi->using_gtid_astext(mi->using_gtid),
+ mi->using_gtid_astext(Master_info::USE_GTID_NO));
+ }
mi->using_gtid= Master_info::USE_GTID_NO;
+ }
+
+ /*
+ Warn about ignored options if there are GTID/log coordinate option
+ conflicts
+ */
+ if (mi->using_gtid != Master_info::USE_GTID_NO)
+ {
+ if (lex_mi->log_file_name)
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
+ WARN_OPTION_IGNORED,
+ ER_THD(thd, WARN_OPTION_IGNORED), "MASTER_LOG_FILE");
+ if (lex_mi->pos)
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
+ WARN_OPTION_IGNORED,
+ ER_THD(thd, WARN_OPTION_IGNORED), "MASTER_LOG_POS");
+ if (lex_mi->relay_log_name)
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
+ WARN_OPTION_IGNORED,
+ ER_THD(thd, WARN_OPTION_IGNORED), "RELAY_LOG_FILE");
+ if (lex_mi->relay_log_pos)
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
+ WARN_OPTION_IGNORED,
+ ER_THD(thd, WARN_OPTION_IGNORED), "RELAY_LOG_POS");
+ }
do_ids= ((lex_mi->repl_do_domain_ids_opt ==
LEX_MASTER_INFO::LEX_MI_ENABLE) ?
@@ -3874,6 +3924,40 @@ bool change_master(THD* thd, Master_info* mi, bool *master_info_added)
}
/*
+ MASTER_DEMOTE_TO_SLAVE is set. Merge gtid_binlog_pos into gtid_slave_pos.
+ */
+ if (lex_mi->is_demotion_opt)
+ {
+ String new_gtid_state;
+
+ if (mi->using_gtid != Master_info::USE_GTID_SLAVE_POS)
+ {
+ my_error(ER_CM_OPTION_MISSING_REQUIREMENT, MYF(0),
+ "MASTER_DEMOTE_TO_SLAVE", "TRUE", "Using_Gtid=Slave_Pos");
+ ret= TRUE;
+ goto err;
+ }
+
+ if (!mysql_bin_log.is_open())
+ {
+ my_error(ER_NO_BINARY_LOGGING, MYF(0));
+ ret= TRUE;
+ goto err;
+ }
+
+ if ((ret= rpl_append_gtid_state(&new_gtid_state, true)))
+ goto err;
+
+ if (rpl_global_gtid_slave_state->load(
+ thd, new_gtid_state.ptr(), new_gtid_state.length(), true, true))
+ {
+ my_error(ER_FAILED_GTID_STATE_INIT, MYF(0));
+ ret= TRUE;
+ goto err;
+ }
+ }
+
+ /*
Relay log's IO_CACHE may not be inited, if rli->inited==0 (server was never
a slave before).
*/
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index 9cd3b3baf03..669a1de8d60 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -109,21 +109,22 @@ static ha_rows get_quick_record_count(THD *thd, SQL_SELECT *select,
const key_map *keys,ha_rows limit);
static void optimize_straight_join(JOIN *join, table_map join_tables);
static bool greedy_search(JOIN *join, table_map remaining_tables,
- uint depth, uint prune_level,
- uint use_cond_selectivity);
+ uint depth, uint use_cond_selectivity);
+
enum enum_best_search {
SEARCH_ABORT= -2,
SEARCH_ERROR= -1,
SEARCH_OK= 0,
SEARCH_FOUND_EDGE=1
};
+
static enum_best_search
best_extension_by_limited_search(JOIN *join,
table_map remaining_tables,
uint idx, double record_count,
double read_time, uint depth,
- uint prune_level,
- uint use_cond_selectivity);
+ uint use_cond_selectivity,
+ table_map *processed_eq_ref_tables);
static uint determine_search_depth(JOIN* join);
C_MODE_START
static int join_tab_cmp(const void *dummy, const void* ptr1, const void* ptr2);
@@ -492,6 +493,7 @@ void JOIN::init(THD *thd_arg, List<Item> &fields_arg,
original_join_tab= 0;
explain= NULL;
tmp_table_keep_current_rowid= 0;
+ allowed_top_level_tables= 0;
all_fields= fields_arg;
if (&fields_list != &fields_arg) /* Avoid valgrind-warning */
@@ -2186,6 +2188,9 @@ JOIN::optimize_inner()
thd->restore_active_arena(arena, &backup);
}
+ if (!allowed_top_level_tables)
+ calc_allowed_top_level_tables(select_lex);
+
if (optimize_constant_subqueries())
DBUG_RETURN(1);
@@ -5250,6 +5255,7 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
int error= 0;
TABLE *UNINIT_VAR(table); /* inited in all loops */
uint i,table_count,const_count,key;
+ uint sort_space;
table_map found_const_table_map, all_table_map;
key_map const_ref, eq_part;
bool has_expensive_keyparts;
@@ -5267,6 +5273,13 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
table_count=join->table_count;
/*
+ best_extension_by_limited_search need sort space for 2POSITIION
+ objects per remaining table, which gives us
+ 2*(T + T-1 + T-2 + T-3...1 POSITIONS) = 2*(T+1)/2*T = (T*T+T)
+ */
+ join->sort_space= sort_space= (table_count*table_count + table_count);
+
+ /*
best_positions is ok to allocate with alloc() as we copy things to it with
memcpy()
*/
@@ -5277,6 +5290,7 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
&stat_vector, sizeof(JOIN_TAB*)* (table_count +1),
&table_vector, sizeof(TABLE*)*(table_count*2),
&join->positions, sizeof(POSITION)*(table_count + 1),
+ &join->sort_positions, sizeof(POSITION)*(sort_space),
&join->best_positions,
sizeof(POSITION)*(table_count + 1),
NullS))
@@ -5288,6 +5302,8 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
/* Initialize POSITION objects */
for (i=0 ; i <= table_count ; i++)
(void) new ((char*) (join->positions + i)) POSITION;
+ for (i=0 ; i <= sort_space ; i++)
+ (void) new ((char*) (join->sort_positions + i)) POSITION;
join->best_ref= stat_vector;
@@ -5454,6 +5470,17 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
}
}
+ {
+ for (JOIN_TAB *s= stat ; s < stat_end ; s++)
+ {
+ TABLE_LIST *tl= s->table->pos_in_table_list;
+ if (tl->embedding && tl->embedding->sj_subq_pred)
+ {
+ s->embedded_dependent= tl->embedding->original_subq_pred_used_tables;
+ }
+ }
+ }
+
if (thd->trace_started())
trace_table_dependencies(thd, stat, join->table_count);
@@ -5471,7 +5498,7 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
join->unit->item->get_IN_subquery()->test_strategy(SUBS_IN_TO_EXISTS));
if (keyuse_array->elements &&
- sort_and_filter_keyuse(thd, keyuse_array,
+ sort_and_filter_keyuse(join, keyuse_array,
skip_unprefixed_keyparts))
goto error;
DBUG_EXECUTE("opt", print_keyuse_array(keyuse_array););
@@ -7269,6 +7296,32 @@ update_ref_and_keys(THD *thd, DYNAMIC_ARRAY *keyuse,JOIN_TAB *join_tab,
DBUG_RETURN(FALSE);
}
+/*
+ check if key could be used with eq_ref
+
+ The assumption is that all previous key parts where used
+*/
+
+static void remember_if_eq_ref_key(JOIN *join, KEYUSE *use)
+{
+ DBUG_ASSERT(use->keypart != FT_KEYPART && use->key != MAX_KEY);
+ TABLE *table= use->table;
+ KEY *key= table->key_info+use->key;
+ ulong key_flags= table->actual_key_flags(key);
+
+ /*
+ Check if possible eq_ref key
+ This may include keys that does not have HA_NULL_PART_KEY
+ set, but this is ok as best_access_path will resolve this.
+ */
+ if ((key_flags & (HA_NOSAME | HA_EXT_NOSAME)))
+ {
+ uint key_parts= table->actual_n_key_parts(key);
+ if (use->keypart+1 == key_parts)
+ join->eq_ref_tables|= table->map;
+ }
+}
+
/**
Sort the array of possible keys and remove the following key parts:
@@ -7279,14 +7332,19 @@ update_ref_and_keys(THD *thd, DYNAMIC_ARRAY *keyuse,JOIN_TAB *join_tab,
(e.g. if there is a key(a,b,c) but only b < 5 (or a=2 and c < 3) is
used in the query, we drop the partial key parts from consideration).
Special treatment for ft-keys.
+ Update join->eq_ref_tables with a bitmap of all tables that can possible
+ have a EQ_REF key.
*/
-bool sort_and_filter_keyuse(THD *thd, DYNAMIC_ARRAY *keyuse,
+bool sort_and_filter_keyuse(JOIN *join, DYNAMIC_ARRAY *keyuse,
bool skip_unprefixed_keyparts)
{
+ THD *thd= join->thd;
KEYUSE key_end, *prev, *save_pos, *use;
uint found_eq_constant, i;
+ bool found_unprefixed_key_part= 0;
+ join->eq_ref_tables= 0;
DBUG_ASSERT(keyuse->elements);
my_qsort(keyuse->buffer, keyuse->elements, sizeof(KEYUSE),
@@ -7314,18 +7372,45 @@ bool sort_and_filter_keyuse(THD *thd, DYNAMIC_ARRAY *keyuse,
{
if (use->key == prev->key && use->table == prev->table)
{
- if ((prev->keypart+1 < use->keypart && skip_unprefixed_keyparts) ||
- (prev->keypart == use->keypart && found_eq_constant))
- continue; /* remove */
+ if (prev->keypart == use->keypart && found_eq_constant)
+ continue;
+ if (prev->keypart+1 < use->keypart)
+ {
+ found_unprefixed_key_part= 1;
+ if (skip_unprefixed_keyparts)
+ continue; /* remove */
+ }
+ }
+ else
+ {
+ /* Key changed, check if previous key was a primary/unique key lookup */
+ if (prev != &key_end && !found_unprefixed_key_part)
+ remember_if_eq_ref_key(join, prev);
+ found_unprefixed_key_part= 0;
+ if (use->keypart != 0)
+ {
+ found_unprefixed_key_part= 1;
+ if (skip_unprefixed_keyparts)
+ continue; /* remove - first found key part must be 0 */
+ }
}
- else if (use->keypart != 0 && skip_unprefixed_keyparts)
- continue; /* remove - first found must be 0 */
}
-
+ else /* FT_KEY_PART */
+ {
+ if (prev != &key_end && !found_unprefixed_key_part)
+ remember_if_eq_ref_key(join, prev);
+ found_unprefixed_key_part= 1; // This key cannot be EQ_REF
+ }
prev= use;
found_eq_constant= !use->used_tables;
use->table->reginfo.join_tab->checked_keys.set_bit(use->key);
}
+ else
+ {
+ if (prev != &key_end && !found_unprefixed_key_part)
+ remember_if_eq_ref_key(join, prev);
+ prev= &key_end;
+ }
/*
Old gcc used a memcpy(), which is undefined if save_pos==use:
http://gcc.gnu.org/bugzilla/show_bug.cgi?id=19410
@@ -7339,6 +7424,8 @@ bool sort_and_filter_keyuse(THD *thd, DYNAMIC_ARRAY *keyuse,
use->table->reginfo.join_tab->keyuse= save_pos;
save_pos++;
}
+ if (prev != &key_end && !found_unprefixed_key_part)
+ remember_if_eq_ref_key(join, prev);
i= (uint) (save_pos-(KEYUSE*) keyuse->buffer);
(void) set_dynamic(keyuse,(uchar*) &key_end,i);
keyuse->elements= i;
@@ -7787,6 +7874,8 @@ best_access_path(JOIN *join,
Json_writer_object trace_wrapper(thd, "best_access_path");
+ trace_wrapper.add_table_name(s);
+
bitmap_clear_all(eq_join_set);
loose_scan_opt.init(join, s, remaining_tables);
@@ -7952,7 +8041,8 @@ best_access_path(JOIN *join,
(!(key_flags & HA_NULL_PART_KEY) || // (2)
all_key_parts == notnull_part)) // (3)
{
-
+ /* Check that eq_ref_tables are correctly updated */
+ DBUG_ASSERT(join->eq_ref_tables & table->map);
/* TODO: Adjust cost for covering and clustering key */
type= JT_EQ_REF;
trace_access_idx.add("access_type", join_type_str[type])
@@ -8339,10 +8429,13 @@ best_access_path(JOIN *join,
*/
if (s->key_start_dependent)
key_dependent= s->key_dependent;
+ /* Add dependencey for sub queries */
+ key_dependent|= s->embedded_dependent;
}
/* Check that s->key_dependent contains all used_tables found in s->keyuse */
key_dependent&= ~PSEUDO_TABLE_BITS;
- DBUG_ASSERT((key_dependent & s->key_dependent) == key_dependent);
+ DBUG_ASSERT((key_dependent & (s->key_dependent | s->embedded_dependent)) ==
+ key_dependent);
/*
If there is no key to access the table, but there is an equi-join
@@ -8754,7 +8847,6 @@ bool
choose_plan(JOIN *join, table_map join_tables)
{
uint search_depth= join->thd->variables.optimizer_search_depth;
- uint prune_level= join->thd->variables.optimizer_prune_level;
uint use_cond_selectivity=
join->thd->variables.optimizer_use_condition_selectivity;
bool straight_join= MY_TEST(join->select_options & SELECT_STRAIGHT_JOIN);
@@ -8762,6 +8854,9 @@ choose_plan(JOIN *join, table_map join_tables)
DBUG_ENTER("choose_plan");
join->cur_embedding_map= 0;
+ join->extra_heuristic_pruning= false;
+ join->prune_level= join->thd->variables.optimizer_prune_level;
+
reset_nj_counters(join, join->join_list);
qsort2_cmp jtab_sort_func;
@@ -8818,8 +8913,14 @@ choose_plan(JOIN *join, table_map join_tables)
if (search_depth == 0)
/* Automatically determine a reasonable value for 'search_depth' */
search_depth= determine_search_depth(join);
- if (greedy_search(join, join_tables, search_depth, prune_level,
- use_cond_selectivity))
+
+ if (join->prune_level >= 1 &&
+ search_depth >= thd->variables.optimizer_extra_pruning_depth)
+ {
+ join->extra_heuristic_pruning= true;
+ }
+
+ if (greedy_search(join, join_tables, search_depth, use_cond_selectivity))
DBUG_RETURN(TRUE);
}
@@ -8931,14 +9032,9 @@ join_tab_cmp(const void *dummy, const void* ptr1, const void* ptr2)
if ((cmp= compare_embedding_subqueries(jt1, jt2)) != 0)
return cmp;
/*
- After that,
- take care about ordering imposed by LEFT JOIN constraints,
- possible [eq]ref accesses, and numbers of matching records in the table.
+ After that do ordering according to numbers of
+ records in the table.
*/
- if (jt1->dependent & jt2->table->map)
- return 1;
- if (jt2->dependent & jt1->table->map)
- return -1;
if (jt1->found_records > jt2->found_records)
return 1;
if (jt1->found_records < jt2->found_records)
@@ -8969,10 +9065,15 @@ join_tab_cmp_straight(const void *dummy, const void* ptr1, const void* ptr2)
if ((cmp= compare_embedding_subqueries(jt1, jt2)) != 0)
return cmp;
+ /*
+ We have to check dependency with straight_join as we don't reorder
+ later as we do for other plans in best_extension_by_limited_search().
+ */
if (jt1->dependent & jt2->table->map)
return 1;
if (jt2->dependent & jt1->table->map)
return -1;
+
return jt1 > jt2 ? 1 : (jt1 < jt2 ? -1 : 0);
}
@@ -8994,11 +9095,6 @@ join_tab_cmp_embedded_first(const void *emb, const void* ptr1, const void* ptr2
if (jt1->emb_sj_nest != emb_nest && jt2->emb_sj_nest == emb_nest)
return 1;
- if (jt1->dependent & jt2->table->map)
- return 1;
- if (jt2->dependent & jt1->table->map)
- return -1;
-
if (jt1->found_records > jt2->found_records)
return 1;
if (jt1->found_records < jt2->found_records)
@@ -9071,9 +9167,9 @@ determine_search_depth(JOIN *join)
access method. The final optimal plan is stored in the array
'join->best_positions', and the corresponding cost in 'join->best_read'.
- @param join pointer to the structure providing all context info for
- the query
- @param join_tables set of the tables in the query
+ @param join pointer to the structure providing all context info
+ for the query
+ @param remaining_tables set of the tables in the query
@note
This function can be applied to:
@@ -9102,10 +9198,7 @@ optimize_straight_join(JOIN *join, table_map remaining_tables)
POSITION *position= join->positions + idx;
Json_writer_object trace_one_table(thd);
if (unlikely(thd->trace_started()))
- {
trace_plan_prefix(join, idx, remaining_tables);
- trace_one_table.add_table_name(s);
- }
/* Find the best access method from 's' to the current partial plan */
best_access_path(join, s, remaining_tables, join->positions, idx,
disable_jbuf, record_count,
@@ -9139,7 +9232,7 @@ optimize_straight_join(JOIN *join, table_map remaining_tables)
memcpy((uchar*) join->best_positions, (uchar*) join->positions,
sizeof(POSITION)*idx);
join->join_record_count= record_count;
- join->best_read= read_time - 0.001;
+ join->best_read= read_time - COST_EPS;
}
@@ -9215,8 +9308,6 @@ optimize_straight_join(JOIN *join, table_map remaining_tables)
for the query
@param remaining_tables set of tables not included into the partial plan yet
@param search_depth controlls the exhaustiveness of the search
- @param prune_level the pruning heuristics that should be applied during
- search
@param use_cond_selectivity specifies how the selectivity of the conditions
pushed to a table should be taken into account
@@ -9230,7 +9321,6 @@ static bool
greedy_search(JOIN *join,
table_map remaining_tables,
uint search_depth,
- uint prune_level,
uint use_cond_selectivity)
{
double record_count= 1.0;
@@ -9238,6 +9328,7 @@ greedy_search(JOIN *join,
uint idx= join->const_tables; // index into 'join->best_ref'
uint best_idx;
uint size_remain; // cardinality of remaining_tables
+ table_map usable_tables, eq_ref_tables;
POSITION best_pos;
JOIN_TAB *best_table; // the next plan node to be added to the curr QEP
// ==join->tables or # tables in the sj-mat nest we're optimizing
@@ -9245,21 +9336,26 @@ greedy_search(JOIN *join,
DBUG_ENTER("greedy_search");
/* number of tables that remain to be optimized */
- n_tables= size_remain= my_count_bits(remaining_tables &
- (join->emb_sjm_nest?
- (join->emb_sjm_nest->sj_inner_tables &
- ~join->const_table_map)
- :
- ~(table_map)0));
+ usable_tables= (join->emb_sjm_nest ?
+ (join->emb_sjm_nest->sj_inner_tables &
+ ~join->const_table_map & remaining_tables):
+ remaining_tables);
+ n_tables= size_remain= my_count_bits(usable_tables);
+ join->next_sort_position= join->sort_positions;
do {
- /* Find the extension of the current QEP with the lowest cost */
+ /*
+ Find the extension of the current QEP with the lowest cost
+ We are using remaining_table instead of usable tables here as
+ in case of an emb_sjm_nest, we want to be able to check if
+ an embedded table is depending on an outer table.
+ */
join->best_read= DBL_MAX;
if ((int) best_extension_by_limited_search(join, remaining_tables, idx,
record_count,
read_time, search_depth,
- prune_level,
- use_cond_selectivity) <
+ use_cond_selectivity,
+ &eq_ref_tables) <
(int) SEARCH_OK)
DBUG_RETURN(TRUE);
/*
@@ -9316,13 +9412,13 @@ greedy_search(JOIN *join,
while (pos && best_table != pos)
pos= join->best_ref[++best_idx];
DBUG_ASSERT((pos != NULL)); // should always find 'best_table'
+
/*
- move 'best_table' at the first free position in the array of joins,
- keeping the sorted table order intact
+ Move 'best_table' at the first free position in the array of joins
+ We don't need to keep the array sorted as
+ best_extension_by_limited_search() will sort them.
*/
- memmove(join->best_ref + idx + 1, join->best_ref + idx,
- sizeof(JOIN_TAB*) * (best_idx - idx));
- join->best_ref[idx]= best_table;
+ swap_variables(JOIN_TAB*, join->best_ref[idx], join->best_ref[best_idx]);
/* compute the cost of the new plan extended with 'best_table' */
record_count= COST_MULT(record_count, join->positions[idx].records_read);
@@ -9927,6 +10023,129 @@ check_if_edge_table(POSITION *pos,
}
+struct SORT_POSITION
+{
+ JOIN_TAB **join_tab;
+ POSITION *position;
+};
+
+
+/*
+ Sort SORT_POSITIONS according to expected number of rows found
+ If number of combinations are the same sort according to join_tab order
+ (same table order as used in the original SQL query)
+*/
+
+static int
+sort_positions(SORT_POSITION *a, SORT_POSITION *b)
+{
+ int cmp;
+ if ((cmp= compare_embedding_subqueries(*a->join_tab, *b->join_tab)) != 0)
+ return cmp;
+
+ if (a->position->records_read > b->position->records_read)
+ return 1;
+ if (a->position->records_read < b->position->records_read)
+ return -1;
+ return CMP_NUM(*a->join_tab, *b->join_tab);
+}
+
+
+/*
+ Call best_access_path() for a set of tables and collect results
+
+ @param join JOIN object
+ @param trace_one_table Current optimizer_trace
+ @param pos Pointer to remanining tables
+ @param allowed_tables bitmap of allowed tables. On return set to
+ the collected tables.
+ @param store_poisition Points to where to store next found SORT_POSITION.
+ Will be updated to next free position.
+ @param stop_on_eq_ref Stop searching for more tables if we found an EQ_REF
+ table.
+
+ @return
+ 0 Normal
+ 1 Eq_ref table found (only if stop_on_eq_ref is used)
+
+ join->next_sort_position will be update to next free position.
+*/
+
+static bool
+get_costs_for_tables(JOIN *join, table_map remaining_tables, uint idx,
+ double record_count,
+ Json_writer_object *trace_one_table,
+ JOIN_TAB **pos, SORT_POSITION **store_position,
+ table_map *allowed_tables,
+ bool stop_on_eq_ref)
+{
+ THD *thd= join->thd;
+ POSITION *sort_position= join->next_sort_position;
+ SORT_POSITION *sort_end= *store_position;
+ JOIN_TAB *s;
+ table_map found_tables= 0;
+ bool found_eq_ref= 0;
+ bool disable_jbuf= join->thd->variables.join_cache_level == 0;
+ DBUG_ENTER("get_plans_for_tables");
+
+ s= *pos;
+ do
+ {
+ table_map real_table_bit= s->table->map;
+ if ((*allowed_tables & real_table_bit) &&
+ !(remaining_tables & s->dependent))
+ {
+#ifdef DBUG_ASSERT_EXISTS
+ DBUG_ASSERT(!check_interleaving_with_nj(s));
+ restore_prev_nj_state(s); // Revert effect of check_... call
+#endif
+ sort_end->join_tab= pos;
+ sort_end->position= sort_position;
+
+
+ Json_writer_object wrapper(thd);
+ /* Find the best access method from 's' to the current partial plan */
+ best_access_path(join, s, remaining_tables, join->positions, idx,
+ disable_jbuf, record_count,
+ sort_position, sort_position + 1);
+ found_tables|= s->table->map;
+ sort_end++;
+ sort_position+= 2;
+ if (unlikely(stop_on_eq_ref) && sort_position[-2].type == JT_EQ_REF)
+ {
+ /* Found an eq_ref tables. Use this, ignoring the other tables */
+ found_eq_ref= 1;
+ if (found_tables == s->table->map)
+ break; // First table
+
+ /* Store the found eq_ref table first in store_position */
+ sort_position-= 2;
+ *allowed_tables= s->table->map;
+ (*store_position)->join_tab= pos;
+ (*store_position)->position= sort_position;
+ (*store_position)++;
+ join->next_sort_position[0]= sort_position[0];
+ join->next_sort_position[1]= sort_position[1];
+ join->next_sort_position+= 2;
+ DBUG_RETURN(1);
+ }
+ }
+ else
+ {
+ /* Verify that 'allowed_current_tables' was calculated correctly */
+ DBUG_ASSERT((remaining_tables & s->dependent) ||
+ !(remaining_tables & real_table_bit) ||
+ !(*allowed_tables & real_table_bit) ||
+ check_interleaving_with_nj(s));
+ }
+ } while ((s= *++pos));
+
+ *allowed_tables= found_tables;
+ *store_position= sort_end;
+ join->next_sort_position= sort_position;
+ DBUG_RETURN(found_eq_ref);
+}
+
/**
Find a good, possibly optimal, query execution plan (QEP) by a possibly
exhaustive search.
@@ -10014,8 +10233,7 @@ check_if_edge_table(POSITION *pos,
When 'best_extension_by_limited_search' is called for the first time,
'join->best_read' must be set to the largest possible value (e.g. DBL_MAX).
The actual implementation provides a way to optionally use pruning
- heuristic (controlled by the parameter 'prune_level') to reduce the search
- space by skipping some partial plans.
+ heuristic to reduce the search space by skipping some partial plans.
@note
The parameter 'search_depth' provides control over the recursion
@@ -10034,8 +10252,6 @@ check_if_edge_table(POSITION *pos,
@param search_depth maximum depth of the recursion and thus size of the
found optimal plan
(0 < search_depth <= join->tables+1).
- @param prune_level pruning heuristics that should be applied during
- optimization
(values: 0 = EXHAUSTIVE, 1 = PRUNE_BY_TIME_OR_ROWS)
@param use_cond_selectivity specifies how the selectivity of the conditions
pushed to a table should be taken into account
@@ -10058,22 +10274,27 @@ best_extension_by_limited_search(JOIN *join,
double record_count,
double read_time,
uint search_depth,
- uint prune_level,
- uint use_cond_selectivity)
+ uint use_cond_selectivity,
+ table_map *processed_eq_ref_tables)
{
THD *thd= join->thd;
/*
'join' is a partial plan with lower cost than the best plan so far,
so continue expanding it further with the tables in 'remaining_tables'.
*/
- JOIN_TAB *s, **pos;
+ JOIN_TAB *s;
double best_record_count= DBL_MAX;
double best_read_time= DBL_MAX;
- bool disable_jbuf= join->thd->variables.join_cache_level == 0;
enum_best_search best_res;
+ uint tables_left= join->table_count - idx, found_tables;
+ uint accepted_tables __attribute__((unused));
+ table_map found_eq_ref_tables= 0, used_eq_ref_table= 0;
+ table_map allowed_tables, allowed_current_tables;
+ SORT_POSITION *sort= (SORT_POSITION*) alloca(sizeof(SORT_POSITION)*tables_left);
+ SORT_POSITION *sort_end;
DBUG_ENTER("best_extension_by_limited_search");
- DBUG_EXECUTE_IF("show_explain_probe_best_ext_lim_search",
+ DBUG_EXECUTE_IF("show_explain_probe_best_ext_lim_search",
if (dbug_user_var_equals_int(thd,
"show_explain_probe_select_id",
join->select_lex->select_number))
@@ -10085,30 +10306,98 @@ best_extension_by_limited_search(JOIN *join,
DBUG_EXECUTE("opt", print_plan(join, idx, record_count, read_time, read_time,
"part_plan"););
+ status_var_increment(thd->status_var.optimizer_join_prefixes_check_calls);
- /*
- If we are searching for the execution plan of a materialized semi-join nest
- then allowed_tables contains bits only for the tables from this nest.
- */
- table_map allowed_tables= ~(table_map)0;
if (join->emb_sjm_nest)
- allowed_tables= join->emb_sjm_nest->sj_inner_tables & ~join->const_table_map;
+ {
+ /*
+ If we are searching for the execution plan of a materialized semi-join nest
+ then allowed_tables contains bits only for the tables from this nest.
+ */
+ allowed_tables= (join->emb_sjm_nest->sj_inner_tables & remaining_tables);
+ allowed_current_tables= join->get_allowed_nj_tables(idx) & remaining_tables;
+ }
+ else
+ {
+ /*
+ allowed_tables is used to check if there are tables left that can improve
+ a key search and to see if there are more tables to add in next iteration.
- for (pos= join->best_ref + idx ; (s= *pos) ; pos++)
+ allowed_current_tables tells us which tables we can add to the current
+ plan at this stage.
+ */
+ allowed_tables= remaining_tables;
+ allowed_current_tables= join->get_allowed_nj_tables(idx) & remaining_tables;
+ }
+ DBUG_ASSERT(allowed_tables & remaining_tables);
+
+ sort_end= sort;
{
- table_map real_table_bit= s->table->map;
- DBUG_ASSERT(remaining_tables & real_table_bit);
+ Json_writer_object trace_one_table(thd);
+ JOIN_TAB **best_ref= join->best_ref + idx;
+ if (unlikely(thd->trace_started()))
+ trace_plan_prefix(join, idx, remaining_tables);
- swap_variables(JOIN_TAB*, join->best_ref[idx], *pos);
+ Json_writer_array arr(thd, "get_costs_for_tables");
+
+ if (idx > join->const_tables && join->prune_level >= 2 &&
+ join->positions[idx-1].type == JT_EQ_REF &&
+ (join->eq_ref_tables & allowed_current_tables))
+ {
+ /* Previous table was an EQ REF table, only add other possible EQ_REF
+ tables to the chain, stop after first one is found.
+ */
+ table_map table_map= join->eq_ref_tables & allowed_current_tables;
+ if (get_costs_for_tables(join, remaining_tables, idx, record_count,
+ &trace_one_table, best_ref, &sort_end,
+ &table_map, 1))
+ used_eq_ref_table= (*sort->join_tab)->table->map;
+ else
+ {
+ /* We didn't find another EQ_REF table, add remaining tables */
+ if ((table_map= allowed_current_tables & ~table_map))
+ get_costs_for_tables(join, remaining_tables, idx, record_count,
+ &trace_one_table, best_ref, &sort_end, &table_map,
+ 0);
+ }
+ }
+ else
+ {
+ table_map table_map= allowed_current_tables;
+ get_costs_for_tables(join, remaining_tables, idx, record_count,
+ &trace_one_table, best_ref, &sort_end, &table_map,
+ 0);
+ }
+ found_tables= (uint) (sort_end - sort);
+ DBUG_ASSERT(found_tables > 0);
+
+ /*
+ Sort tables in ascending order of generated row combinations
+ */
+ if (found_tables > 1)
+ my_qsort(sort, found_tables, sizeof(SORT_POSITION),
+ (qsort_cmp) sort_positions);
+ }
+ DBUG_ASSERT(join->next_sort_position <=
+ join->sort_positions + join->sort_space);
- if ((allowed_tables & real_table_bit) &&
- !(remaining_tables & s->dependent) &&
+ accepted_tables= 0;
+ double min_rec_count= DBL_MAX;
+ double min_rec_count_read_time= DBL_MAX;
+
+ double min_cost= DBL_MAX;
+ double min_cost_record_count= DBL_MAX;
+
+ for (SORT_POSITION *pos= sort ; pos < sort_end ; pos++)
+ {
+ s= *pos->join_tab;
+ if (!(found_eq_ref_tables & s->table->map) &&
!check_interleaving_with_nj(s))
{
+ table_map real_table_bit= s->table->map;
double current_record_count, current_read_time;
double partial_join_cardinality;
- POSITION *position= join->positions + idx;
- POSITION loose_scan_pos;
+ POSITION *position= join->positions + idx, *loose_scan_pos;
Json_writer_object trace_one_table(thd);
if (unlikely(thd->trace_started()))
@@ -10117,9 +10406,9 @@ best_extension_by_limited_search(JOIN *join,
trace_one_table.add_table_name(s);
}
- /* Find the best access method from 's' to the current partial plan */
- best_access_path(join, s, remaining_tables, join->positions, idx,
- disable_jbuf, record_count, position, &loose_scan_pos);
+ accepted_tables++;
+ *position= *pos->position; // Get stored result
+ loose_scan_pos= pos->position+1;
/* Compute the cost of the new plan extended with 's' */
current_record_count= COST_MULT(record_count, position->records_read);
@@ -10138,7 +10427,7 @@ best_extension_by_limited_search(JOIN *join,
trace_one_table.add("cost_for_plan", current_read_time);
}
optimize_semi_joins(join, remaining_tables, idx, &current_record_count,
- &current_read_time, &loose_scan_pos);
+ &current_read_time, loose_scan_pos);
/* Expand only partial plans with lower cost than the best QEP so far */
if (current_read_time >= join->best_read)
@@ -10148,7 +10437,11 @@ best_extension_by_limited_search(JOIN *join,
read_time,
current_read_time,
"prune_by_cost"););
- trace_one_table.add("pruned_by_cost", true);
+ trace_one_table
+ .add("pruned_by_cost", true)
+ .add("current_cost", current_read_time)
+ .add("best_cost", join->best_read + COST_EPS);
+
restore_prev_nj_state(s);
restore_prev_sj_state(remaining_tables, s, idx);
continue;
@@ -10158,8 +10451,31 @@ best_extension_by_limited_search(JOIN *join,
Prune some less promising partial plans. This heuristic may miss
the optimal QEPs, thus it results in a non-exhaustive search.
*/
- if (prune_level == 1)
+ if (join->prune_level >= 1)
{
+ // Collect the members with min_cost and min_read_time.
+ bool min_rec_hit= false;
+ bool min_cost_hit= false;
+
+ if (join->extra_heuristic_pruning &&
+ (!(position->key_dependent & allowed_tables) ||
+ position->records_read < 2.0))
+ {
+ if (current_record_count < min_rec_count)
+ {
+ min_rec_count= current_record_count;
+ min_rec_count_read_time= current_read_time;
+ min_rec_hit= true;
+ }
+
+ if (current_read_time < min_cost)
+ {
+ min_cost_record_count= current_record_count;
+ min_cost= current_read_time;
+ min_cost_hit= true;
+ }
+ }
+
if (best_record_count > current_record_count ||
best_read_time > current_read_time ||
(idx == join->const_tables && // 's' is the first table in the QEP
@@ -10184,6 +10500,13 @@ best_extension_by_limited_search(JOIN *join,
}
else
{
+ /*
+ Typically, we get here if:
+ best_record_count < current_record_count &&
+ best_read_time < current_read_time
+ That is, both record_count and read_time are worse than the best_
+ ones. This plan doesn't look promising, prune it away.
+ */
DBUG_EXECUTE("opt", print_plan(join, idx+1,
current_record_count,
read_time,
@@ -10194,6 +10517,25 @@ best_extension_by_limited_search(JOIN *join,
restore_prev_sj_state(remaining_tables, s, idx);
continue;
}
+
+ const char* prune_reason= NULL;
+ if (!min_rec_hit &&
+ current_record_count >= min_rec_count &&
+ current_read_time >= min_rec_count_read_time)
+ prune_reason= "min_record_count";
+
+ if (!min_cost_hit &&
+ current_record_count >= min_cost_record_count &&
+ current_read_time >= min_cost)
+ prune_reason= "min_read_time";
+
+ if (prune_reason)
+ {
+ trace_one_table.add("pruned_by_heuristic", prune_reason);
+ restore_prev_nj_state(s);
+ restore_prev_sj_state(remaining_tables, s, idx);
+ continue;
+ }
}
double pushdown_cond_selectivity= 1.0;
@@ -10216,11 +10558,13 @@ best_extension_by_limited_search(JOIN *join,
}
}
- if ((search_depth > 1) && (remaining_tables & ~real_table_bit) &
- allowed_tables)
+ if ((search_depth > 1) &&
+ ((remaining_tables & ~real_table_bit) & allowed_tables))
{
/* Recursively expand the current partial plan */
Json_writer_array trace_rest(thd, "rest_of_plan");
+
+ swap_variables(JOIN_TAB*, join->best_ref[idx], *pos->join_tab);
best_res=
best_extension_by_limited_search(join,
remaining_tables &
@@ -10229,8 +10573,10 @@ best_extension_by_limited_search(JOIN *join,
partial_join_cardinality,
current_read_time,
search_depth - 1,
- prune_level,
- use_cond_selectivity);
+ use_cond_selectivity,
+ &found_eq_ref_tables);
+ swap_variables(JOIN_TAB*, join->best_ref[idx], *pos->join_tab);
+
if ((int) best_res < (int) SEARCH_OK)
goto end; // Return best_res
if (best_res == SEARCH_FOUND_EDGE &&
@@ -10262,7 +10608,7 @@ best_extension_by_limited_search(JOIN *join,
memcpy((uchar*) join->best_positions, (uchar*) join->positions,
sizeof(POSITION) * (idx + 1));
join->join_record_count= partial_join_cardinality;
- join->best_read= current_read_time - 0.001;
+ join->best_read= current_read_time - COST_EPS;
}
DBUG_EXECUTE("opt", print_plan(join, idx+1,
current_record_count,
@@ -10276,27 +10622,21 @@ best_extension_by_limited_search(JOIN *join,
restore_prev_sj_state(remaining_tables, s, idx);
if (best_res == SEARCH_FOUND_EDGE)
{
- trace_one_table.add("pruned_by_hanging_leaf", true);
+ if (pos+1 < sort_end) // If not last table
+ trace_one_table.add("pruned_by_hanging_leaf", true);
goto end;
}
}
}
+ DBUG_ASSERT(accepted_tables > 0);
best_res= SEARCH_OK;
end:
- /* Restore original table order */
- if (!*pos)
- pos--; // Revert last pos++ in for loop
- if (pos != join->best_ref + idx)
- {
- JOIN_TAB *tmp= join->best_ref[idx];
- uint elements= (uint) (pos - (join->best_ref + idx));
-
- memmove((void*) (join->best_ref + idx),
- (void*) (join->best_ref + idx + 1),
- elements * sizeof(JOIN_TAB*));
- *pos= tmp;
- }
+ join->next_sort_position-= found_tables*2;
+ if (used_eq_ref_table)
+ *processed_eq_ref_tables|= used_eq_ref_table | found_eq_ref_tables;
+ else
+ *processed_eq_ref_tables= 0;
DBUG_RETURN(best_res);
}
@@ -17591,6 +17931,116 @@ static void restore_prev_nj_state(JOIN_TAB *last)
}
+/*
+ Compute allowed_top_level_tables - a bitmap of tables one can put into the
+ join order if the last table in the join prefix is not inside any outer
+ join nest.
+
+ NESTED_JOIN::direct_children_map - a bitmap of tables ... if the last
+ table in the join prefix is inside the join nest.
+
+ Note: it looks like a sensible way to do this is a top-down descent on
+ JOIN::join_list, but apparently that list is missing I_S tables.
+ e.g. for SHOW TABLES WHERE col IN (SELECT ...) it will just have a
+ semi-join nest.
+*/
+
+void JOIN::calc_allowed_top_level_tables(SELECT_LEX *lex)
+{
+ TABLE_LIST *tl;
+ List_iterator<TABLE_LIST> ti(lex->leaf_tables);
+ DBUG_ENTER("JOIN::calc_allowed_top_level_tables");
+ DBUG_ASSERT(allowed_top_level_tables == 0); // Should only be called once
+
+ while ((tl= ti++))
+ {
+ table_map map;
+ TABLE_LIST *embedding= tl->embedding;
+
+ if (tl->table)
+ map= tl->table->map;
+ else
+ {
+ DBUG_ASSERT(tl->jtbm_subselect);
+ map= table_map(1) << tl->jtbm_table_no;
+ }
+
+ if (!(embedding= tl->embedding))
+ {
+ allowed_top_level_tables |= map;
+ continue;
+ }
+
+ // Walk out of any semi-join nests
+ while (embedding && !embedding->on_expr)
+ {
+ // semi-join nest or an INSERT-INTO view...
+ embedding->nested_join->direct_children_map |= map;
+ embedding= embedding->embedding;
+ }
+
+ // Ok we are in the parent nested outer join nest.
+ if (!embedding)
+ {
+ allowed_top_level_tables |= map;
+ continue;
+ }
+ embedding->nested_join->direct_children_map |= map;
+
+ // Walk to grand-parent join nest.
+ embedding= embedding->embedding;
+
+ // Walk out of any semi-join nests
+ while (embedding && !embedding->on_expr)
+ {
+ DBUG_ASSERT(embedding->sj_on_expr);
+ embedding->nested_join->direct_children_map |= map;
+ embedding= embedding->embedding;
+ }
+
+ if (embedding)
+ {
+ DBUG_ASSERT(embedding->on_expr); // Impossible, see above
+ embedding->nested_join->direct_children_map |= map;
+ }
+ else
+ allowed_top_level_tables |= map;
+ }
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ Get the tables that one is allowed to have as the next table in the
+ current plan
+*/
+
+table_map JOIN::get_allowed_nj_tables(uint idx)
+{
+ TABLE_LIST *last_emb;
+ if (idx > const_tables &&
+ (last_emb= positions[idx-1].table->table->pos_in_table_list->embedding))
+ {
+ for (;last_emb && last_emb != emb_sjm_nest;
+ last_emb= last_emb->embedding)
+ {
+ if (!last_emb->sj_on_expr)
+ {
+ NESTED_JOIN *nest= last_emb->nested_join;
+ if (!nest->is_fully_covered())
+ {
+ // Return tables that are direct members of this join nest
+ return nest->direct_children_map;
+ }
+ }
+ }
+ }
+ // Return bitmap of tables not in any join nest
+ if (emb_sjm_nest)
+ return emb_sjm_nest->nested_join->direct_children_map;
+ return allowed_top_level_tables;
+}
+
/*
Change access methods not to use join buffering and adjust costs accordingly
@@ -17633,7 +18083,7 @@ void optimize_wo_join_buffering(JOIN *join, uint first_tab, uint last_tab,
if (first_tab > join->const_tables)
{
- cost= join->positions[first_tab - 1].prefix_cost.total_cost();
+ cost= join->positions[first_tab - 1].prefix_cost;
rec_count= join->positions[first_tab - 1].prefix_record_count;
}
else
@@ -28905,7 +29355,7 @@ JOIN::reoptimize(Item *added_where, table_map join_tables,
/* added_keyuse contents is copied, and it is no longer needed. */
delete_dynamic(&added_keyuse);
- if (sort_and_filter_keyuse(thd, &keyuse, true))
+ if (sort_and_filter_keyuse(this, &keyuse, true))
return REOPT_ERROR;
optimize_keyuse(this, &keyuse);
diff --git a/sql/sql_select.h b/sql/sql_select.h
index 6426223622d..2e37a4bdced 100644
--- a/sql/sql_select.h
+++ b/sql/sql_select.h
@@ -359,6 +359,13 @@ typedef struct st_join_table {
table_map dependent,key_dependent;
/*
+ This is set for embedded sub queries. It contains the table map of
+ the outer expression, like 'A' in the following expression:
+ WHERE A in (SELECT ....)
+ */
+ table_map embedded_dependent;
+
+ /*
1 - use quick select
2 - use "Range checked for each record"
*/
@@ -945,6 +952,9 @@ public:
double prefix_record_count;
+ /* Cost for the join prefix */
+ double prefix_cost;
+
/*
NULL - 'index' or 'range' or 'index_merge' or 'ALL' access is used.
Other - [eq_]ref[_or_null] access is used. Pointer to {t.keypart1 = expr}
@@ -976,9 +986,6 @@ public:
LooseScan_picker loosescan_picker;
Sj_materialization_picker sjmat_picker;
- /* Cumulative cost and record count for the join prefix */
- Cost_estimate prefix_cost;
-
/*
Current optimization state: Semi-join strategy to be used for this
and preceding join tables.
@@ -1254,6 +1261,10 @@ public:
table_map outer_join;
/* Bitmap of tables used in the select list items */
table_map select_list_used_tables;
+ /* Tables that have a possiblity to use EQ_ref */
+ table_map eq_ref_tables;
+
+ table_map allowed_top_level_tables;
ha_rows send_records,found_records,join_examined_rows, accepted_rows;
/*
@@ -1286,9 +1297,12 @@ public:
/* Finally picked QEP. This is result of join optimization */
POSITION *best_positions;
+ POSITION *sort_positions; /* Temporary space used by greedy_search */
+ POSITION *next_sort_position; /* Next free space in sort_positions */
Pushdown_query *pushdown_query;
JOIN_TAB *original_join_tab;
+ uint sort_space;
/******* Join optimization state members start *******/
/*
@@ -1315,6 +1329,13 @@ public:
*/
table_map cur_sj_inner_tables;
+ /* A copy of thd->variables.optimizer_prune_level */
+ uint prune_level;
+ /*
+ If true, do extra heuristic pruning (enabled based on
+ optimizer_extra_pruning_depth)
+ */
+ bool extra_heuristic_pruning;
#ifndef DBUG_OFF
void dbug_verify_sj_inner_tables(uint n_positions) const;
int dbug_join_tab_array_size;
@@ -1756,6 +1777,8 @@ public:
bool transform_in_predicates_into_in_subq(THD *thd);
bool optimize_upper_rownum_func();
+ void calc_allowed_top_level_tables(SELECT_LEX *lex);
+ table_map get_allowed_nj_tables(uint idx);
private:
/**
@@ -2425,7 +2448,7 @@ void fix_list_after_tbl_changes(SELECT_LEX *new_parent, List<TABLE_LIST> *tlist)
double get_tmp_table_lookup_cost(THD *thd, double row_count, uint row_size);
double get_tmp_table_write_cost(THD *thd, double row_count, uint row_size);
void optimize_keyuse(JOIN *join, DYNAMIC_ARRAY *keyuse_array);
-bool sort_and_filter_keyuse(THD *thd, DYNAMIC_ARRAY *keyuse,
+bool sort_and_filter_keyuse(JOIN *join, DYNAMIC_ARRAY *keyuse,
bool skip_unprefixed_keyparts);
struct st_cond_statistic
diff --git a/sql/sql_show.cc b/sql/sql_show.cc
index 2320c148b3a..af4d4409115 100644
--- a/sql/sql_show.cc
+++ b/sql/sql_show.cc
@@ -1263,11 +1263,11 @@ mysqld_show_create_get_fields(THD *thd, TABLE_LIST *table_list,
mem_root);
field_list->push_back(new (mem_root)
Item_empty_string(thd, "character_set_client",
- MY_CS_NAME_SIZE),
+ MY_CS_CHARACTER_SET_NAME_SIZE),
mem_root);
field_list->push_back(new (mem_root)
Item_empty_string(thd, "collation_connection",
- MY_CS_NAME_SIZE),
+ MY_CS_COLLATION_NAME_SIZE),
mem_root);
}
else
@@ -6355,16 +6355,39 @@ int fill_schema_collation(THD *thd, TABLE_LIST *tables, COND *cond)
if (!(wild && wild[0] &&
wild_case_compare(scs, tmp_cl->coll_name.str, wild)))
{
- const char *tmp_buff;
+ LEX_CSTRING context_collation_name=
+ tmp_cl->get_collation_name(MY_COLLATION_NAME_MODE_CONTEXT);
+ LEX_CSTRING full_collation_name=
+ tmp_cl->get_collation_name(MY_COLLATION_NAME_MODE_FULL);
+ bool is_context= cmp(context_collation_name, full_collation_name);
+ /*
+ Some collations are applicable to multiple character sets.
+ Display them only once, with the short name (without the
+ character set prefix).
+ */
+ if (is_context &&
+ cmp(tmp_cl->cs_name, Lex_cstring(STRING_WITH_LEN("utf8mb4"))))
+ continue;
restore_record(table, s->default_values);
- table->field[0]->store(tmp_cl->coll_name.str, tmp_cl->coll_name.length,
- scs);
- table->field[1]->store(&tmp_cl->cs_name, scs);
- table->field[2]->store((longlong) tmp_cl->number, TRUE);
- tmp_buff= (tmp_cl->state & MY_CS_PRIMARY) ? "Yes" : "";
- table->field[3]->store(tmp_buff, strlen(tmp_buff), scs);
- tmp_buff= (tmp_cl->state & MY_CS_COMPILED)? "Yes" : "";
- table->field[4]->store(tmp_buff, strlen(tmp_buff), scs);
+ table->field[0]->store(context_collation_name, scs);
+ if (is_context)
+ {
+ table->field[1]->set_null(); // CHARACTER_SET_NAME
+ table->field[2]->set_null(); // ID
+ table->field[3]->set_null(); // IS_DEFAULT
+ }
+ else
+ {
+ table->field[1]->set_notnull(); // CHARACTER_SET_NAME
+ table->field[1]->store(tmp_cl->cs_name, scs);
+ table->field[2]->set_notnull(); // ID
+ table->field[2]->store((longlong) tmp_cl->number, TRUE);
+ table->field[3]->set_notnull(); // IS_DEFAULT
+ table->field[3]->store(
+ Show::Yes_or_empty::value(tmp_cl->default_flag()), scs);
+ }
+ table->field[4]->store(
+ Show::Yes_or_empty::value(tmp_cl->compiled_flag()), scs);
table->field[5]->store((longlong) tmp_cl->strxfrm_multiply, TRUE);
if (schema_table_store_record(thd, table))
return 1;
@@ -6399,8 +6422,16 @@ int fill_schema_coll_charset_app(THD *thd, TABLE_LIST *tables, COND *cond)
!my_charset_same(tmp_cs,tmp_cl))
continue;
restore_record(table, s->default_values);
- table->field[0]->store(&tmp_cl->coll_name, scs);
+ LEX_CSTRING context_collation_name=
+ tmp_cl->get_collation_name(MY_COLLATION_NAME_MODE_CONTEXT);
+ LEX_CSTRING full_collation_name=
+ tmp_cl->get_collation_name(MY_COLLATION_NAME_MODE_FULL);
+ table->field[0]->store(context_collation_name, scs);
table->field[1]->store(&tmp_cl->cs_name, scs);
+ table->field[2]->store(full_collation_name, scs);
+ table->field[3]->store(tmp_cl->number);
+ table->field[4]->store(
+ Show::Yes_or_empty::value(tmp_cl->default_flag()), scs);
if (schema_table_store_record(thd, table))
return 1;
}
@@ -9075,7 +9106,7 @@ ST_FIELD_INFO schema_fields_info[]=
Column("CATALOG_NAME", Catalog(), NOT_NULL),
Column("SCHEMA_NAME", Name(), NOT_NULL, "Database"),
Column("DEFAULT_CHARACTER_SET_NAME", CSName(), NOT_NULL),
- Column("DEFAULT_COLLATION_NAME", CSName(), NOT_NULL),
+ Column("DEFAULT_COLLATION_NAME", CLName(), NOT_NULL),
Column("SQL_PATH", Varchar(FN_REFLEN), NULLABLE),
Column("SCHEMA_COMMENT", Varchar(DATABASE_COMMENT_MAXLEN), NOT_NULL),
CEnd()
@@ -9104,7 +9135,7 @@ ST_FIELD_INFO tables_fields_info[]=
Column("CREATE_TIME", Datetime(0), NULLABLE, "Create_time",OPEN_FULL_TABLE),
Column("UPDATE_TIME", Datetime(0), NULLABLE, "Update_time",OPEN_FULL_TABLE),
Column("CHECK_TIME", Datetime(0), NULLABLE, "Check_time", OPEN_FULL_TABLE),
- Column("TABLE_COLLATION", CSName(), NULLABLE, "Collation", OPEN_FRM_ONLY),
+ Column("TABLE_COLLATION", CLName(), NULLABLE, "Collation", OPEN_FRM_ONLY),
Column("CHECKSUM", ULonglong(), NULLABLE, "Checksum", OPEN_FULL_TABLE),
Column("CREATE_OPTIONS", Varchar(2048),NULLABLE, "Create_options",
OPEN_FULL_TABLE),
@@ -9134,7 +9165,7 @@ ST_FIELD_INFO columns_fields_info[]=
Column("NUMERIC_SCALE", ULonglong(), NULLABLE, OPEN_FRM_ONLY),
Column("DATETIME_PRECISION", ULonglong(), NULLABLE, OPEN_FRM_ONLY),
Column("CHARACTER_SET_NAME", CSName(), NULLABLE, OPEN_FRM_ONLY),
- Column("COLLATION_NAME", CSName(), NULLABLE, "Collation", OPEN_FRM_ONLY),
+ Column("COLLATION_NAME", CLName(), NULLABLE, "Collation", OPEN_FRM_ONLY),
Column("COLUMN_TYPE", Longtext(65535), NOT_NULL, "Type", OPEN_FRM_ONLY),
Column("COLUMN_KEY", Varchar(3), NOT_NULL, "Key", OPEN_FRM_ONLY),
Column("EXTRA", Varchar(80), NOT_NULL, "Extra", OPEN_FRM_ONLY),
@@ -9151,7 +9182,7 @@ ST_FIELD_INFO columns_fields_info[]=
ST_FIELD_INFO charsets_fields_info[]=
{
Column("CHARACTER_SET_NAME", CSName(), NOT_NULL, "Charset"),
- Column("DEFAULT_COLLATE_NAME", CSName(), NOT_NULL, "Default collation"),
+ Column("DEFAULT_COLLATE_NAME", CLName(), NOT_NULL, "Default collation"),
Column("DESCRIPTION", Varchar(60), NOT_NULL, "Description"),
Column("MAXLEN", SLonglong(3), NOT_NULL, "Maxlen"),
CEnd()
@@ -9160,10 +9191,10 @@ ST_FIELD_INFO charsets_fields_info[]=
ST_FIELD_INFO collation_fields_info[]=
{
- Column("COLLATION_NAME", CSName(), NOT_NULL, "Collation"),
- Column("CHARACTER_SET_NAME", CSName(), NOT_NULL, "Charset"),
- Column("ID", SLonglong(MY_INT32_NUM_DECIMAL_DIGITS), NOT_NULL, "Id"),
- Column("IS_DEFAULT", Yes_or_empty(), NOT_NULL, "Default"),
+ Column("COLLATION_NAME", CLName(), NOT_NULL, "Collation"),
+ Column("CHARACTER_SET_NAME", CSName(), NULLABLE, "Charset"),
+ Column("ID", SLonglong(MY_INT32_NUM_DECIMAL_DIGITS), NULLABLE, "Id"),
+ Column("IS_DEFAULT", Yes_or_empty(), NULLABLE, "Default"),
Column("IS_COMPILED", Yes_or_empty(), NOT_NULL, "Compiled"),
Column("SORTLEN", SLonglong(3), NOT_NULL, "Sortlen"),
CEnd()
@@ -9236,8 +9267,8 @@ ST_FIELD_INFO events_fields_info[]=
Column("EVENT_COMMENT", Name(), NOT_NULL),
Column("ORIGINATOR", SLonglong(10),NOT_NULL,"Originator"),
Column("CHARACTER_SET_CLIENT", CSName(), NOT_NULL, "character_set_client"),
- Column("COLLATION_CONNECTION", CSName(), NOT_NULL, "collation_connection"),
- Column("DATABASE_COLLATION", CSName(), NOT_NULL, "Database Collation"),
+ Column("COLLATION_CONNECTION", CLName(), NOT_NULL, "collation_connection"),
+ Column("DATABASE_COLLATION", CLName(), NOT_NULL, "Database Collation"),
CEnd()
};
@@ -9245,8 +9276,11 @@ ST_FIELD_INFO events_fields_info[]=
ST_FIELD_INFO coll_charset_app_fields_info[]=
{
- Column("COLLATION_NAME", CSName(), NOT_NULL),
+ Column("COLLATION_NAME", CLName(), NOT_NULL),
Column("CHARACTER_SET_NAME", CSName(), NOT_NULL),
+ Column("FULL_COLLATION_NAME",CLName(), NOT_NULL),
+ Column("ID", SLonglong(MY_INT32_NUM_DECIMAL_DIGITS), NOT_NULL),
+ Column("IS_DEFAULT", Yes_or_empty(), NOT_NULL),
CEnd()
};
@@ -9282,8 +9316,8 @@ ST_FIELD_INFO proc_fields_info[]=
Column("ROUTINE_COMMENT", Longtext(65535), NOT_NULL, "Comment"),
Column("DEFINER", Definer(), NOT_NULL, "Definer"),
Column("CHARACTER_SET_CLIENT", CSName(), NOT_NULL, "character_set_client"),
- Column("COLLATION_CONNECTION", CSName(), NOT_NULL, "collation_connection"),
- Column("DATABASE_COLLATION", CSName(), NOT_NULL, "Database Collation"),
+ Column("COLLATION_CONNECTION", CLName(), NOT_NULL, "collation_connection"),
+ Column("DATABASE_COLLATION", CLName(), NOT_NULL, "Database Collation"),
CEnd()
};
@@ -9323,7 +9357,7 @@ ST_FIELD_INFO view_fields_info[]=
Column("DEFINER", Definer(), NOT_NULL, OPEN_FRM_ONLY),
Column("SECURITY_TYPE", Varchar(7), NOT_NULL, OPEN_FRM_ONLY),
Column("CHARACTER_SET_CLIENT", CSName(), NOT_NULL, OPEN_FRM_ONLY),
- Column("COLLATION_CONNECTION", CSName(), NOT_NULL, OPEN_FRM_ONLY),
+ Column("COLLATION_CONNECTION", CLName(), NOT_NULL, OPEN_FRM_ONLY),
Column("ALGORITHM", Varchar(10),NOT_NULL, OPEN_FRM_ONLY),
CEnd()
};
@@ -9450,9 +9484,9 @@ ST_FIELD_INFO triggers_fields_info[]=
Column("DEFINER", Definer(), NOT_NULL, "Definer", OPEN_FRM_ONLY),
Column("CHARACTER_SET_CLIENT", CSName(), NOT_NULL, "character_set_client",
OPEN_FRM_ONLY),
- Column("COLLATION_CONNECTION", CSName(), NOT_NULL, "collation_connection",
+ Column("COLLATION_CONNECTION", CLName(), NOT_NULL, "collation_connection",
OPEN_FRM_ONLY),
- Column("DATABASE_COLLATION", CSName(), NOT_NULL, "Database Collation",
+ Column("DATABASE_COLLATION", CLName(), NOT_NULL, "Database Collation",
OPEN_FRM_ONLY),
CEnd()
};
@@ -10018,17 +10052,17 @@ static bool show_create_trigger_impl(THD *thd, Trigger *trigger)
fields.push_back(new (mem_root)
Item_empty_string(thd, "character_set_client",
- MY_CS_NAME_SIZE),
+ MY_CS_CHARACTER_SET_NAME_SIZE),
mem_root);
fields.push_back(new (mem_root)
Item_empty_string(thd, "collation_connection",
- MY_CS_NAME_SIZE),
+ MY_CS_COLLATION_NAME_SIZE),
mem_root);
fields.push_back(new (mem_root)
Item_empty_string(thd, "Database Collation",
- MY_CS_NAME_SIZE),
+ MY_CS_COLLATION_NAME_SIZE),
mem_root);
static const Datetime zero_datetime(Datetime::zero());
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index 00b60877826..25c5fc7d8bb 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -1817,7 +1817,7 @@ report_error:
table->table ? table->table->s : NULL));
}
DEBUG_SYNC(thd, "rm_table_no_locks_before_binlog");
- thd->thread_specific_used= TRUE;
+ thd->used|= THD::THREAD_SPECIFIC_USED;
error= 0;
err:
@@ -4628,7 +4628,7 @@ int create_table_impl(THD *thd,
if (is_trans != NULL)
*is_trans= table->file->has_transactions();
- thd->thread_specific_used= TRUE;
+ thd->used|= THD::THREAD_SPECIFIC_USED;
create_info->table= table; // Store pointer to table
}
@@ -4819,11 +4819,9 @@ bool mysql_create_table(THD *thd, TABLE_LIST *create_table,
thd->abort_on_warning= thd->is_strict_mode();
if (mysql_create_table_no_lock(thd, &ddl_log_state_create, &ddl_log_state_rm,
- &create_table->db,
- &create_table->table_name, create_info,
- alter_info,
- &is_trans, create_table_mode,
- create_table) > 0)
+ &create_table->db, &create_table->table_name,
+ create_info, alter_info, &is_trans,
+ create_table_mode, create_table) > 0)
{
result= 1;
goto err;
diff --git a/sql/sql_time.cc b/sql/sql_time.cc
index 12e4460ed25..753f78ccc22 100644
--- a/sql/sql_time.cc
+++ b/sql/sql_time.cc
@@ -508,7 +508,7 @@ bool int_to_datetime_with_warn(THD *thd, const Longlong_hybrid &nr,
my_time_t TIME_to_timestamp(THD *thd, const MYSQL_TIME *t, uint *error_code)
{
- thd->time_zone_used= 1;
+ thd->used|= THD::TIME_ZONE_USED;
return thd->variables.time_zone->TIME_to_gmt_sec(t, error_code);
}
@@ -1274,7 +1274,7 @@ mix_date_and_time(MYSQL_TIME *to, const MYSQL_TIME *from)
void set_current_date(THD *thd, MYSQL_TIME *to)
{
thd->variables.time_zone->gmt_sec_to_TIME(to, thd->query_start());
- thd->time_zone_used= 1;
+ thd->used|= THD::TIME_ZONE_USED;
datetime_to_date(to);
}
diff --git a/sql/sql_type.cc b/sql/sql_type.cc
index 1c433043db7..3759c0ba02f 100644
--- a/sql/sql_type.cc
+++ b/sql/sql_type.cc
@@ -1064,7 +1064,7 @@ Datetime::Datetime(THD *thd, const timeval &tv)
{
thd->variables.time_zone->gmt_sec_to_TIME(this, tv.tv_sec);
second_part= tv.tv_usec;
- thd->time_zone_used= 1;
+ thd->used|= THD::TIME_ZONE_USED;
DBUG_ASSERT(is_valid_value_slow());
}
diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
index 03e038573be..a36f1cab62f 100644
--- a/sql/sql_yacc.yy
+++ b/sql/sql_yacc.yy
@@ -936,6 +936,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize);
%token <kwd> MASTER_USER_SYM
%token <kwd> MASTER_USE_GTID_SYM
%token <kwd> MASTER_HEARTBEAT_PERIOD_SYM
+%token <kwd> MASTER_DEMOTE_TO_SLAVE_SYM
%token <kwd> MAX_CONNECTIONS_PER_HOUR
%token <kwd> MAX_QUERIES_PER_HOUR
%token <kwd> MAX_ROWS
@@ -2292,6 +2293,10 @@ master_file_def:
my_yyabort_error((ER_DUP_ARGUMENT, MYF(0), "MASTER_use_gtid"));
Lex->mi.use_gtid_opt= LEX_MASTER_INFO::LEX_GTID_NO;
}
+ | MASTER_DEMOTE_TO_SLAVE_SYM '=' bool
+ {
+ Lex->mi.is_demotion_opt= (bool) $3;
+ }
;
optional_connection_name:
@@ -6485,11 +6490,8 @@ old_or_new_charset_name_or_default:
collation_name:
ident_or_text
{
- CHARSET_INFO *cs;
- if (unlikely(!(cs= mysqld_collation_get_by_name($1.str,
- thd->get_utf8_flag()))))
+ if ($$.set_by_name($1.str, thd->get_utf8_flag()))
MYSQL_YYABORT;
- $$= Lex_extended_collation(Lex_exact_collation(cs));
}
;
@@ -9755,8 +9757,7 @@ string_factor_expr:
| string_factor_expr COLLATE_SYM collation_name
{
if (unlikely(!($$= new (thd->mem_root)
- Item_func_set_collation(thd, $1,
- $3.charset_info()))))
+ Item_func_set_collation(thd, $1, $3))))
MYSQL_YYABORT;
}
;
diff --git a/sql/sys_vars.cc b/sql/sys_vars.cc
index 2c938f88733..f82727c4349 100644
--- a/sql/sys_vars.cc
+++ b/sql/sys_vars.cc
@@ -728,7 +728,7 @@ static Sys_var_bit Sys_explicit_defaults_for_timestamp(
"as NULL with DEFAULT NULL attribute, Without this option, "
"TIMESTAMP columns are NOT NULL and have implicit DEFAULT clauses.",
SESSION_VAR(option_bits), CMD_LINE(OPT_ARG),
- OPTION_EXPLICIT_DEF_TIMESTAMP, DEFAULT(FALSE), NO_MUTEX_GUARD, IN_BINLOG);
+ OPTION_EXPLICIT_DEF_TIMESTAMP, DEFAULT(TRUE), NO_MUTEX_GUARD, IN_BINLOG);
static Sys_var_ulonglong Sys_bulk_insert_buff_size(
"bulk_insert_buffer_size", "Size of tree cache used in bulk "
@@ -2456,6 +2456,27 @@ Sys_gtid_ignore_duplicates(
DEFAULT(FALSE), NO_MUTEX_GUARD,
NOT_IN_BINLOG, ON_CHECK(check_gtid_ignore_duplicates),
ON_UPDATE(fix_gtid_ignore_duplicates));
+
+static bool
+update_slave_max_statement_time(sys_var *self, THD *thd, enum_var_type type)
+{
+ slave_max_statement_time=
+ double2ulonglong(slave_max_statement_time_double * 1e6);
+
+ return false;
+}
+
+static Sys_var_on_access_global<
+ Sys_var_double, PRIV_SET_SYSTEM_GLOBAL_VAR_SLAVE_MAX_STATEMENT_TIME>
+ Sys_slave_max_statement_time(
+ "slave_max_statement_time",
+ "A query that has taken more than slave_max_statement_time seconds to "
+ "run on the slave will be aborted. The argument will be treated as a "
+ "decimal value with microsecond precision. A value of 0 (default) "
+ "means no timeout",
+ GLOBAL_VAR(slave_max_statement_time_double), CMD_LINE(REQUIRED_ARG),
+ VALID_RANGE(0, LONG_TIMEOUT), DEFAULT(0), NO_MUTEX_GUARD,
+ NOT_IN_BINLOG, ON_CHECK(0), ON_UPDATE(update_slave_max_statement_time));
#endif
@@ -2683,6 +2704,20 @@ static Sys_var_mybool Sys_old_mode(
SESSION_VAR(old_mode), CMD_LINE(OPT_ARG), DEFAULT(FALSE), 0, NOT_IN_BINLOG, ON_CHECK(0),
ON_UPDATE(set_old_mode), DEPRECATED("'@@old_mode'"));
+static Sys_var_mybool Sys_opt_allow_suspicious_udfs(
+ "allow_suspicious_udfs",
+ "Allows use of user-defined functions (UDFs) consisting of only one symbol xxx() without corresponding xxx_init() or xxx_deinit(). That also means that one can load any function from any library, for example exit() from libc.so",
+ READ_ONLY GLOBAL_VAR(opt_allow_suspicious_udfs),
+ CMD_LINE(OPT_ARG), DEFAULT(FALSE));
+
+#ifndef DISABLE_GRANT_OPTIONS
+static Sys_var_mybool Sys_skip_grant_tables(
+ "skip_grant_tables",
+ "Start without grant tables. This gives all users FULL ACCESS to all tables.",
+ READ_ONLY GLOBAL_VAR(opt_noacl),
+ CMD_LINE(OPT_ARG), DEFAULT(FALSE));
+#endif
+
static const char *alter_algorithm_modes[]= {"DEFAULT", "COPY", "INPLACE",
"NOCOPY", "INSTANT", NULL};
@@ -2726,9 +2761,10 @@ static Sys_var_ulong Sys_optimizer_prune_level(
"Controls the heuristic(s) applied during query optimization to prune "
"less-promising partial plans from the optimizer search space. "
"Meaning: 0 - do not apply any heuristic, thus perform exhaustive "
- "search; 1 - prune plans based on number of retrieved rows",
+ "search: 1 - prune plans based on cost and number of retrieved rows "
+ "eq_ref: 2 - prune also if we find an eq_ref chain",
SESSION_VAR(optimizer_prune_level), CMD_LINE(REQUIRED_ARG),
- VALID_RANGE(0, 1), DEFAULT(1), BLOCK_SIZE(1));
+ VALID_RANGE(0, 2), DEFAULT(2), BLOCK_SIZE(1));
static Sys_var_ulong Sys_optimizer_selectivity_sampling_limit(
"optimizer_selectivity_sampling_limit",
@@ -2770,6 +2806,13 @@ static Sys_var_ulong Sys_optimizer_search_depth(
SESSION_VAR(optimizer_search_depth), CMD_LINE(REQUIRED_ARG),
VALID_RANGE(0, MAX_TABLES+1), DEFAULT(MAX_TABLES+1), BLOCK_SIZE(1));
+static Sys_var_ulong Sys_optimizer_extra_pruning_depth(
+ "optimizer_extra_pruning_depth",
+ "If the optimizer needs to enumerate join prefix of this size or "
+ "larger, then it will try agressively prune away the search space.",
+ SESSION_VAR(optimizer_extra_pruning_depth), CMD_LINE(REQUIRED_ARG),
+ VALID_RANGE(0, MAX_TABLES+1), DEFAULT(8), BLOCK_SIZE(1));
+
/* this is used in the sigsegv handler */
export const char *optimizer_switch_names[]=
{
@@ -6214,6 +6257,12 @@ static Sys_var_charptr Sys_wsrep_patch_version(
READ_ONLY GLOBAL_VAR(wsrep_patch_version_ptr), CMD_LINE_HELP_ONLY,
DEFAULT(WSREP_PATCH_VERSION));
+
+static Sys_var_charptr Sys_wsrep_allowlist(
+ "wsrep_allowlist", "Allowed IP addresses split by comma delimiter",
+ READ_ONLY GLOBAL_VAR(wsrep_allowlist), CMD_LINE(REQUIRED_ARG),
+ DEFAULT(""));
+
#endif /* WITH_WSREP */
static bool fix_host_cache_size(sys_var *, THD *, enum_var_type)
diff --git a/sql/sys_vars.inl b/sql/sys_vars.inl
index 97e3a28b67e..b1d7bc31255 100644
--- a/sql/sys_vars.inl
+++ b/sql/sys_vars.inl
@@ -2241,7 +2241,7 @@ public:
timezone). If it's the global value which was used we can't replicate
(binlog code stores session value only).
*/
- thd->time_zone_used= 1;
+ thd->used|= THD::TIME_ZONE_USED;
return valptr(thd, session_var(thd, Time_zone *));
}
const uchar *global_value_ptr(THD *thd, const LEX_CSTRING *base) const
diff --git a/sql/table.h b/sql/table.h
index ca8ff1c1848..ff9f93becbf 100644
--- a/sql/table.h
+++ b/sql/table.h
@@ -3154,6 +3154,7 @@ typedef struct st_nested_join
table_map sj_depends_on;
/* Outer non-trivially correlated tables */
table_map sj_corr_tables;
+ table_map direct_children_map;
List<Item_ptr> sj_outer_expr_list;
/**
True if this join nest node is completely covered by the query execution
diff --git a/sql/temporary_tables.cc b/sql/temporary_tables.cc
index 5aacd0e6e99..b43a38e7fa2 100644
--- a/sql/temporary_tables.cc
+++ b/sql/temporary_tables.cc
@@ -427,7 +427,7 @@ bool THD::open_temporary_table(TABLE_LIST *tl)
#endif
table->query_id= query_id;
- thread_specific_used= true;
+ used|= THREAD_SPECIFIC_USED;
/* It is neither a derived table nor non-updatable view. */
tl->updatable= true;
@@ -1355,7 +1355,7 @@ bool THD::log_events_and_free_tmp_shares()
{
if (IS_USER_TABLE(share))
{
- bool save_thread_specific_used= thread_specific_used;
+ used_t save_thread_specific_used= used & THREAD_SPECIFIC_USED;
my_thread_id save_pseudo_thread_id= variables.pseudo_thread_id;
char db_buf[FN_REFLEN];
String db(db_buf, sizeof(db_buf), system_charset_info);
@@ -1405,7 +1405,7 @@ bool THD::log_events_and_free_tmp_shares()
clear_error();
CHARSET_INFO *cs_save= variables.character_set_client;
variables.character_set_client= system_charset_info;
- thread_specific_used= true;
+ used|= THREAD_SPECIFIC_USED;
Query_log_event qinfo(this, s_query.ptr(),
s_query.length() - 1 /* to remove trailing ',' */,
@@ -1438,7 +1438,7 @@ bool THD::log_events_and_free_tmp_shares()
get_stmt_da()->set_overwrite_status(false);
}
variables.pseudo_thread_id= save_pseudo_thread_id;
- thread_specific_used= save_thread_specific_used;
+ used = (used & ~THREAD_SPECIFIC_USED) | save_thread_specific_used;
}
else
{
diff --git a/sql/wsrep_allowlist_service.cc b/sql/wsrep_allowlist_service.cc
new file mode 100644
index 00000000000..23ade8b32b9
--- /dev/null
+++ b/sql/wsrep_allowlist_service.cc
@@ -0,0 +1,56 @@
+/* Copyright 2021-2022 Codership Oy <info@codership.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
+
+#include "wsrep_allowlist_service.h"
+
+#include "my_global.h"
+#include "wsrep_mysqld.h"
+#include "wsrep_priv.h"
+#include "wsrep_schema.h"
+
+#include <algorithm>
+#include <memory>
+#include <vector>
+
+class Wsrep_allowlist_service : public wsrep::allowlist_service
+{
+public:
+ bool allowlist_cb(wsrep::allowlist_service::allowlist_key key,
+ const wsrep::const_buffer& value) WSREP_NOEXCEPT override;
+};
+
+bool Wsrep_allowlist_service::allowlist_cb (
+ wsrep::allowlist_service::allowlist_key key,
+ const wsrep::const_buffer& value)
+ WSREP_NOEXCEPT
+{
+ std::string string_value(value.data());
+ bool res= wsrep_schema->allowlist_check(key, string_value);
+ return res;
+}
+
+std::unique_ptr<wsrep::allowlist_service> entrypoint;
+
+wsrep::allowlist_service* wsrep_allowlist_service_init()
+{
+ entrypoint = std::unique_ptr<wsrep::allowlist_service>(new Wsrep_allowlist_service);
+ return entrypoint.get();
+}
+
+void wsrep_allowlist_service_deinit()
+{
+ entrypoint.reset();
+}
+
diff --git a/sql/wsrep_allowlist_service.h b/sql/wsrep_allowlist_service.h
new file mode 100644
index 00000000000..2d96139b5c6
--- /dev/null
+++ b/sql/wsrep_allowlist_service.h
@@ -0,0 +1,29 @@
+/* Copyright 2021 Codership Oy <info@codership.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
+
+/*
+ Implementation of wsrep provider threads instrumentation.
+ */
+
+#ifndef WSREP_PROVIDER_ALLOWLIST_H
+#define WSREP_PROVIDER_ALLOWLIST_H
+
+#include "wsrep/allowlist_service.hpp"
+
+wsrep::allowlist_service* wsrep_allowlist_service_init();
+
+void wsrep_allowlist_service_deinit();
+
+#endif /* WSREP_PROVIDER_ALLOWLIST_H */
diff --git a/sql/wsrep_mysqld.cc b/sql/wsrep_mysqld.cc
index c65536ac064..d9670dccfab 100644
--- a/sql/wsrep_mysqld.cc
+++ b/sql/wsrep_mysqld.cc
@@ -84,6 +84,7 @@ const char *wsrep_data_home_dir;
const char *wsrep_dbug_option;
const char *wsrep_notify_cmd;
const char *wsrep_status_file;
+const char *wsrep_allowlist;
ulong wsrep_debug; // Debug level logging
my_bool wsrep_convert_LOCK_to_trx; // Convert locking sessions to trx
@@ -454,6 +455,17 @@ void wsrep_init_schema()
WSREP_ERROR("Failed to init wsrep schema");
unireg_abort(1);
}
+ // If we are bootstraping new cluster we should
+ // clear allowlist table and populate it from variable
+ if (wsrep_new_cluster)
+ {
+ wsrep_schema->clear_allowlist();
+ std::vector<std::string> ip_allowlist;
+ if (wsrep_split_allowlist(ip_allowlist))
+ {
+ wsrep_schema->store_allowlist(ip_allowlist);
+ }
+ }
}
}
@@ -880,10 +892,14 @@ int wsrep_init()
if (!wsrep_data_home_dir || strlen(wsrep_data_home_dir) == 0)
wsrep_data_home_dir= mysql_real_data_home;
- if (Wsrep_server_state::instance().load_provider(wsrep_provider,
- wsrep_provider_options))
+ Wsrep_server_state::init_provider_services();
+ if (Wsrep_server_state::instance().load_provider(
+ wsrep_provider,
+ wsrep_provider_options,
+ Wsrep_server_state::instance().provider_services()))
{
WSREP_ERROR("Failed to load provider");
+ Wsrep_server_state::deinit_provider_services();
return 1;
}
@@ -897,6 +913,7 @@ int wsrep_init()
"supports streaming replication.",
wsrep_provider, global_system_variables.wsrep_trx_fragment_size);
Wsrep_server_state::instance().unload_provider();
+ Wsrep_server_state::deinit_provider_services();
return 1;
}
@@ -1012,6 +1029,8 @@ void wsrep_deinit(bool free_options)
WSREP_DEBUG("wsrep_deinit");
Wsrep_server_state::instance().unload_provider();
+ Wsrep_server_state::deinit_provider_services();
+
provider_name[0]= '\0';
provider_version[0]= '\0';
provider_vendor[0]= '\0';
@@ -1161,8 +1180,9 @@ bool wsrep_start_replication(const char *wsrep_cluster_address)
// --wsrep-new-cluster flag is not used, checking wsrep_cluster_address
// it should match gcomm:// only to be considered as bootstrap node.
// This logic is used in galera.
- if (!wsrep_new_cluster && (strlen(wsrep_cluster_address) == 8) &&
- !strncmp(wsrep_cluster_address, "gcomm://", 8))
+ if (!wsrep_new_cluster &&
+ (strlen(wsrep_cluster_address) == 8) &&
+ !strncmp(wsrep_cluster_address, "gcomm://", 8))
{
wsrep_new_cluster= true;
}
@@ -1793,6 +1813,34 @@ bool wsrep_reload_ssl()
}
}
+bool wsrep_split_allowlist(std::vector<std::string>& allowlist)
+{
+ if (!wsrep_allowlist || 0 == strlen(wsrep_allowlist))
+ {
+ return false;
+ }
+ std::istringstream ss{wsrep_allowlist};
+ std::string token;
+ while (std::getline(ss, token, ','))
+ {
+ if (!token.empty())
+ {
+ struct sockaddr_in sa_4;
+ struct sockaddr_in6 sa_6;
+ if ((inet_pton(AF_INET, token.c_str(), &(sa_4.sin_addr)) != 0) ||
+ (inet_pton(AF_INET6, token.c_str(), &(sa_6.sin6_addr)) != 0))
+ {
+ allowlist.push_back(token);
+ }
+ else
+ {
+ WSREP_WARN("Invalid IP address %s provided in `wsrep_allowlist` variable", token.c_str());
+ }
+ }
+ }
+ return allowlist.size();
+}
+
/*!
* @param db Database string
* @param table Table string
@@ -3328,7 +3376,6 @@ void wsrep_wait_appliers_close(THD *thd)
is also applier, we are still running...
*/
}
-
int wsrep_must_ignore_error(THD* thd)
{
const int error= thd->get_stmt_da()->sql_errno();
diff --git a/sql/wsrep_mysqld.h b/sql/wsrep_mysqld.h
index a57f4639116..81d38df5fb5 100644
--- a/sql/wsrep_mysqld.h
+++ b/sql/wsrep_mysqld.h
@@ -68,6 +68,7 @@ extern ulong wsrep_max_ws_size;
extern ulong wsrep_max_ws_rows;
extern const char* wsrep_notify_cmd;
extern const char* wsrep_status_file;
+extern const char* wsrep_allowlist;
extern my_bool wsrep_certify_nonPK;
extern long int wsrep_protocol_version;
extern my_bool wsrep_desync;
@@ -220,6 +221,7 @@ extern int wsrep_check_opts();
extern void wsrep_prepend_PATH (const char* path);
extern bool wsrep_append_fk_parent_table(THD* thd, TABLE_LIST* table, wsrep::key_array* keys);
extern bool wsrep_reload_ssl();
+extern bool wsrep_split_allowlist(std::vector<std::string>& allowlist);
/* Other global variables */
extern wsrep_seqno_t wsrep_locked_seqno;
diff --git a/sql/wsrep_schema.cc b/sql/wsrep_schema.cc
index 1cfdae2890b..5595e0a090c 100644
--- a/sql/wsrep_schema.cc
+++ b/sql/wsrep_schema.cc
@@ -1,4 +1,4 @@
-/* Copyright (C) 2015-2021 Codership Oy <info@codership.com>
+/* Copyright (C) 2015-2022 Codership Oy <info@codership.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -39,6 +39,7 @@
#define WSREP_STREAMING_TABLE "wsrep_streaming_log"
#define WSREP_CLUSTER_TABLE "wsrep_cluster"
#define WSREP_MEMBERS_TABLE "wsrep_cluster_members"
+#define WSREP_ALLOWLIST_TABLE "wsrep_allowlist"
const char* wsrep_sr_table_name_full= WSREP_SCHEMA "/" WSREP_STREAMING_TABLE;
@@ -46,6 +47,7 @@ static const std::string wsrep_schema_str= WSREP_SCHEMA;
static const std::string sr_table_str= WSREP_STREAMING_TABLE;
static const std::string cluster_table_str= WSREP_CLUSTER_TABLE;
static const std::string members_table_str= WSREP_MEMBERS_TABLE;
+static const std::string allowlist_table_str= WSREP_ALLOWLIST_TABLE;
static const std::string create_cluster_table_str=
"CREATE TABLE IF NOT EXISTS " + wsrep_schema_str + "." + cluster_table_str +
@@ -91,6 +93,13 @@ static const std::string create_frag_table_str=
"PRIMARY KEY (node_uuid, trx_id, seqno)"
") ENGINE=InnoDB STATS_PERSISTENT=0";
+static const std::string create_allowlist_table_str=
+ "CREATE TABLE IF NOT EXISTS " + wsrep_schema_str + "." + allowlist_table_str +
+ "("
+ "ip CHAR(64) NOT NULL,"
+ "PRIMARY KEY (ip)"
+ ") ENGINE=InnoDB STATS_PERSISTENT=0";
+
static const std::string delete_from_cluster_table=
"DELETE FROM " + wsrep_schema_str + "." + cluster_table_str;
@@ -440,11 +449,18 @@ static int insert(TABLE* table) {
}
if ((error= table->file->ha_write_row(table->record[0]))) {
- WSREP_ERROR("Error writing into %s.%s: %d",
- table->s->db.str,
- table->s->table_name.str,
- error);
- ret= 1;
+ if (error == HA_ERR_FOUND_DUPP_KEY) {
+ WSREP_WARN("Duplicate key found when writing into %s.%s",
+ table->s->db.str,
+ table->s->table_name.str);
+ ret= HA_ERR_FOUND_DUPP_KEY;
+ } else {
+ WSREP_ERROR("Error writing into %s.%s: %d",
+ table->s->db.str,
+ table->s->table_name.str,
+ error);
+ ret= 1;
+ }
}
DBUG_RETURN(ret);
@@ -685,6 +701,8 @@ static void wsrep_init_thd_for_schema(THD *thd)
wsrep_store_threadvars(thd);
}
+static bool wsrep_schema_ready= false;
+
int Wsrep_schema::init()
{
DBUG_ENTER("Wsrep_schema::init()");
@@ -720,12 +738,16 @@ int Wsrep_schema::init()
alter_members_table.size()) ||
Wsrep_schema_impl::execute_SQL(thd,
alter_frag_table.c_str(),
- alter_frag_table.size()))
+ alter_frag_table.size()) ||
+ Wsrep_schema_impl::execute_SQL(thd,
+ create_allowlist_table_str.c_str(),
+ create_allowlist_table_str.size()))
{
ret= 1;
}
else
{
+ wsrep_schema_ready= true;
ret= 0;
}
@@ -1496,3 +1518,194 @@ int Wsrep_schema::recover_sr_transactions(THD *orig_thd)
out:
DBUG_RETURN(ret);
}
+
+void Wsrep_schema::clear_allowlist()
+{
+ THD* thd= new THD(next_thread_id());
+ if (!thd)
+ {
+ WSREP_ERROR("Unable to get thd");
+ return;
+ }
+
+ thd->thread_stack= (char*)&thd;
+ wsrep_init_thd_for_schema(thd);
+ TABLE* allowlist_table= 0;
+ int error= 0;
+
+ Wsrep_schema_impl::init_stmt(thd);
+
+ if (Wsrep_schema_impl::open_for_write(thd, allowlist_table_str.c_str(),
+ &allowlist_table) ||
+ Wsrep_schema_impl::init_for_scan(allowlist_table))
+ {
+ WSREP_ERROR("Failed to open mysql.wsrep_allowlist table");
+ goto out;
+ }
+
+ while (0 == error)
+ {
+ if ((error= Wsrep_schema_impl::next_record(allowlist_table)) == 0)
+ {
+ Wsrep_schema_impl::delete_row(allowlist_table);
+ }
+ else if (error == HA_ERR_END_OF_FILE)
+ {
+ continue;
+ }
+ else
+ {
+ WSREP_ERROR("Allowlist table scan returned error %d", error);
+ }
+ }
+
+ Wsrep_schema_impl::end_scan(allowlist_table);
+ Wsrep_schema_impl::finish_stmt(thd);
+out:
+ delete thd;
+}
+
+void Wsrep_schema::store_allowlist(std::vector<std::string>& ip_allowlist)
+{
+ THD* thd= new THD(next_thread_id());
+ if (!thd)
+ {
+ WSREP_ERROR("Unable to get thd");
+ return;
+ }
+
+ thd->thread_stack= (char*)&thd;
+ wsrep_init_thd_for_schema(thd);
+ TABLE* allowlist_table= 0;
+ int error;
+ Wsrep_schema_impl::init_stmt(thd);
+ if (Wsrep_schema_impl::open_for_write(thd, allowlist_table_str.c_str(),
+ &allowlist_table))
+ {
+ WSREP_ERROR("Failed to open mysql.wsrep_allowlist table");
+ goto out;
+ }
+ for (size_t i= 0; i < ip_allowlist.size(); ++i)
+ {
+ Wsrep_schema_impl::store(allowlist_table, 0, ip_allowlist[i]);
+ if ((error= Wsrep_schema_impl::insert(allowlist_table)))
+ {
+ if (error == HA_ERR_FOUND_DUPP_KEY)
+ {
+ WSREP_WARN("Duplicate entry (%s) found in `wsrep_allowlist` list", ip_allowlist[i].c_str());
+ }
+ else
+ {
+ WSREP_ERROR("Failed to write mysql.wsrep_allowlist table: %d", error);
+ goto out;
+ }
+ }
+ }
+ Wsrep_schema_impl::finish_stmt(thd);
+out:
+ delete thd;
+}
+
+typedef struct Allowlist_check_arg
+{
+ Allowlist_check_arg(const std::string& value)
+ : value(value)
+ , response(false)
+ {
+ }
+ std::string value;
+ bool response;
+} Allowlist_check_arg;
+
+static void *allowlist_check_thread(void *param)
+{
+ Allowlist_check_arg *arg= (Allowlist_check_arg *) param;
+
+ my_thread_init();
+ THD thd(0);
+ thd.thread_stack= (char *) &thd;
+ wsrep_init_thd_for_schema(&thd);
+
+ int error;
+ TABLE *allowlist_table= 0;
+ bool match_found_or_empty= false;
+ bool table_have_rows= false;
+ char row[64]= {
+ 0,
+ };
+
+ /*
+ * Read allowlist table
+ */
+ Wsrep_schema_impl::init_stmt(&thd);
+ if (Wsrep_schema_impl::open_for_read(&thd, allowlist_table_str.c_str(),
+ &allowlist_table) ||
+ Wsrep_schema_impl::init_for_scan(allowlist_table))
+ {
+ goto out;
+ }
+ while (true)
+ {
+ if ((error= Wsrep_schema_impl::next_record(allowlist_table)) == 0)
+ {
+ if (Wsrep_schema_impl::scan(allowlist_table, 0, row, sizeof(row)))
+ {
+ goto out;
+ }
+ table_have_rows= true;
+ if (!arg->value.compare(row))
+ {
+ match_found_or_empty= true;
+ break;
+ }
+ }
+ else if (error == HA_ERR_END_OF_FILE)
+ {
+ if (!table_have_rows)
+ {
+ WSREP_DEBUG("allowlist table empty, allowing all connections.");
+ // If table is empty we are allowing all connections
+ match_found_or_empty= true;
+ }
+ break;
+ }
+ else
+ {
+ goto out;
+ }
+ }
+ if (Wsrep_schema_impl::end_scan(allowlist_table))
+ {
+ goto out;
+ }
+ Wsrep_schema_impl::finish_stmt(&thd);
+ (void) trans_commit(&thd);
+out:
+ my_thread_end();
+ arg->response = match_found_or_empty;
+ return 0;
+}
+
+bool Wsrep_schema::allowlist_check(Wsrep_allowlist_key key,
+ const std::string &value)
+{
+ // We don't have wsrep schema initialized at this point
+ if (wsrep_schema_ready == false)
+ {
+ return true;
+ }
+ pthread_t allowlist_check_thd;
+ int ret;
+ Allowlist_check_arg arg(value);
+ ret= mysql_thread_create(0, /* Not instrumented */
+ &allowlist_check_thd, NULL,
+ allowlist_check_thread, &arg);
+ if (ret)
+ {
+ WSREP_ERROR("allowlist_check(): mysql_thread_create() failed: %d (%s)",
+ ret, strerror(ret));
+ return false;
+ }
+ pthread_join(allowlist_check_thd, NULL);
+ return arg.response;
+}
diff --git a/sql/wsrep_schema.h b/sql/wsrep_schema.h
index 36e23998d19..05522e77089 100644
--- a/sql/wsrep_schema.h
+++ b/sql/wsrep_schema.h
@@ -133,6 +133,28 @@ class Wsrep_schema
*/
int recover_sr_transactions(THD* orig_thd);
+
+ /**
+ Delete all rows on bootstrap from `wsrep_allowlist` variable
+ */
+ void clear_allowlist();
+
+ /**
+ Store allowlist ip on bootstrap from `wsrep_allowlist` variable
+ */
+ void store_allowlist(std::vector<std::string>& ip_allowlist);
+
+ /**
+ Scan white list table against accepted connection. Allow if ip
+ is found in table or if table is empty.
+
+ @param key Which allowlist column to compare
+ @param value Value to be checked against allowlist
+
+ @return True if found or empty table, false on not found
+ */
+ bool allowlist_check(Wsrep_allowlist_key key, const std::string& val);
+
private:
/* Non-copyable */
Wsrep_schema(const Wsrep_schema&);
diff --git a/sql/wsrep_server_state.cc b/sql/wsrep_server_state.cc
index 973850871b1..8e5f20c8b96 100644
--- a/sql/wsrep_server_state.cc
+++ b/sql/wsrep_server_state.cc
@@ -16,6 +16,7 @@
#include "my_global.h"
#include "wsrep_api.h"
#include "wsrep_server_state.h"
+#include "wsrep_allowlist_service.h"
#include "wsrep_binlog.h" /* init/deinit group commit */
mysql_mutex_t LOCK_wsrep_server_state;
@@ -26,6 +27,8 @@ PSI_mutex_key key_LOCK_wsrep_server_state;
PSI_cond_key key_COND_wsrep_server_state;
#endif
+wsrep::provider::services Wsrep_server_state::m_provider_services;
+
Wsrep_server_state::Wsrep_server_state(const std::string& name,
const std::string& incoming_address,
const std::string& address,
@@ -74,7 +77,6 @@ void Wsrep_server_state::init_once(const std::string& name,
void Wsrep_server_state::destroy()
{
-
if (m_instance)
{
delete m_instance;
@@ -83,3 +85,16 @@ void Wsrep_server_state::destroy()
mysql_cond_destroy(&COND_wsrep_server_state);
}
}
+
+void Wsrep_server_state::init_provider_services()
+{
+ m_provider_services.allowlist_service= wsrep_allowlist_service_init();
+}
+
+void Wsrep_server_state::deinit_provider_services()
+{
+ if (m_provider_services.allowlist_service)
+ wsrep_allowlist_service_deinit();
+ m_provider_services= wsrep::provider::services();
+}
+
diff --git a/sql/wsrep_server_state.h b/sql/wsrep_server_state.h
index 1ef937300f6..8759f7a9d84 100644
--- a/sql/wsrep_server_state.h
+++ b/sql/wsrep_server_state.h
@@ -55,6 +55,14 @@ public:
{
return (get_provider().capabilities() & capability);
}
+
+ static void init_provider_services();
+ static void deinit_provider_services();
+
+ static const wsrep::provider::services& provider_services()
+ {
+ return m_provider_services;
+ }
private:
Wsrep_server_state(const std::string& name,
@@ -67,6 +75,7 @@ private:
Wsrep_mutex m_mutex;
Wsrep_condition_variable m_cond;
Wsrep_server_service m_service;
+ static wsrep::provider::services m_provider_services;
static Wsrep_server_state* m_instance;
};
diff --git a/sql/wsrep_types.h b/sql/wsrep_types.h
index 9da00e305a7..cd53ab95d0c 100644
--- a/sql/wsrep_types.h
+++ b/sql/wsrep_types.h
@@ -21,9 +21,11 @@
#include "wsrep/seqno.hpp"
#include "wsrep/view.hpp"
+#include "wsrep/allowlist_service.hpp"
typedef wsrep::id Wsrep_id;
typedef wsrep::seqno Wsrep_seqno;
typedef wsrep::view Wsrep_view;
+typedef enum wsrep::allowlist_service::allowlist_key Wsrep_allowlist_key;
#endif /* WSREP_TYPES_H */