summaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorSergei Golubchik <serg@mariadb.org>2015-08-05 09:45:36 +0200
committerSergei Golubchik <serg@mariadb.org>2015-08-05 09:45:36 +0200
commit50ef00663322aeb36b18048193f0d47c9d388ae3 (patch)
tree5194ab20feb3d18a61b4394484752ed3048270cd /sql
parent928edb5a9139502abf6e1c1b1552234b39d38a78 (diff)
parentd6d54584ee813c6592cecb01eab3d896ed1475a8 (diff)
downloadmariadb-git-50ef00663322aeb36b18048193f0d47c9d388ae3.tar.gz
Merge branch '10.0' into bb-10.0-serg
Diffstat (limited to 'sql')
-rw-r--r--sql/handler.cc20
-rw-r--r--sql/log_event.cc2
-rw-r--r--sql/rpl_parallel.cc39
-rw-r--r--sql/sql_cursor.cc37
-rw-r--r--sql/sql_parse.cc2
-rw-r--r--sql/sql_update.cc21
6 files changed, 114 insertions, 7 deletions
diff --git a/sql/handler.cc b/sql/handler.cc
index 1f8daf3927b..868a1475a7f 100644
--- a/sql/handler.cc
+++ b/sql/handler.cc
@@ -1582,6 +1582,26 @@ int ha_rollback_trans(THD *thd, bool all)
DBUG_ASSERT(thd->transaction.stmt.ha_list == NULL ||
trans == &thd->transaction.stmt);
+#ifdef HAVE_REPLICATION
+ if (is_real_trans)
+ {
+ /*
+ In parallel replication, if we need to rollback during commit, we must
+ first inform following transactions that we are going to abort our commit
+ attempt. Otherwise those following transactions can run too early, and
+ possibly cause replication to fail. See comments in retry_event_group().
+
+ There were several bugs with this in the past that were very hard to
+ track down (MDEV-7458, MDEV-8302). So we add here an assertion for
+ rollback without signalling following transactions. And in release
+ builds, we explicitly do the signalling before rolling back.
+ */
+ DBUG_ASSERT(!(thd->rgi_slave && thd->rgi_slave->did_mark_start_commit));
+ if (thd->rgi_slave && thd->rgi_slave->did_mark_start_commit)
+ thd->rgi_slave->unmark_start_commit();
+ }
+#endif
+
if (thd->in_sub_stmt)
{
DBUG_ASSERT(0);
diff --git a/sql/log_event.cc b/sql/log_event.cc
index a75e2137f69..fef0e70b164 100644
--- a/sql/log_event.cc
+++ b/sql/log_event.cc
@@ -4277,7 +4277,6 @@ int Query_log_event::do_apply_event(rpl_group_info *rgi,
"mysql", rpl_gtid_slave_state_table_name.str,
errcode,
thd->get_stmt_da()->message());
- trans_rollback(thd);
sub_id= 0;
thd->is_slave_error= 1;
goto end;
@@ -7367,7 +7366,6 @@ int Xid_log_event::do_apply_event(rpl_group_info *rgi)
"%s.%s: %d: %s",
"mysql", rpl_gtid_slave_state_table_name.str, ec,
thd->get_stmt_da()->message());
- trans_rollback(thd);
thd->is_slave_error= 1;
return err;
}
diff --git a/sql/rpl_parallel.cc b/sql/rpl_parallel.cc
index 305e8053032..600d2ab41aa 100644
--- a/sql/rpl_parallel.cc
+++ b/sql/rpl_parallel.cc
@@ -226,6 +226,11 @@ static void
signal_error_to_sql_driver_thread(THD *thd, rpl_group_info *rgi, int err)
{
rgi->worker_error= err;
+ /*
+ In case we get an error during commit, inform following transactions that
+ we aborted our commit.
+ */
+ rgi->unmark_start_commit();
rgi->cleanup_context(thd, true);
rgi->rli->abort_slave= true;
rgi->rli->stop_for_until= false;
@@ -370,6 +375,7 @@ do_retry:
transaction we deadlocked with will not signal that it started to commit
until after the unmark.
*/
+ DBUG_EXECUTE_IF("inject_mdev8302", { my_sleep(20000);});
rgi->unmark_start_commit();
DEBUG_SYNC(thd, "rpl_parallel_retry_after_unmark");
@@ -884,9 +890,24 @@ handle_rpl_parallel_thread(void *arg)
group_ending= is_group_ending(qev->ev, event_type);
if (group_ending && likely(!rgi->worker_error))
{
- DEBUG_SYNC(thd, "rpl_parallel_before_mark_start_commit");
- rgi->mark_start_commit();
- DEBUG_SYNC(thd, "rpl_parallel_after_mark_start_commit");
+ /*
+ Do an extra check for (deadlock) kill here. This helps prevent a
+ lingering deadlock kill that occured during normal DML processing to
+ propagate past the mark_start_commit(). If we detect a deadlock only
+ after mark_start_commit(), we have to unmark, which has at least a
+ theoretical possibility of leaving a window where it looks like all
+ transactions in a GCO have started committing, while in fact one
+ will need to rollback and retry. This is not supposed to be possible
+ (since there is a deadlock, at least one transaction should be
+ blocked from reaching commit), but this seems a fragile ensurance,
+ and there were historically a number of subtle bugs in this area.
+ */
+ if (!thd->killed)
+ {
+ DEBUG_SYNC(thd, "rpl_parallel_before_mark_start_commit");
+ rgi->mark_start_commit();
+ DEBUG_SYNC(thd, "rpl_parallel_after_mark_start_commit");
+ }
}
/*
@@ -911,7 +932,17 @@ handle_rpl_parallel_thread(void *arg)
});
if (!err)
#endif
- err= rpt_handle_event(qev, rpt);
+ {
+ if (thd->check_killed())
+ {
+ thd->clear_error();
+ thd->get_stmt_da()->reset_diagnostics_area();
+ thd->send_kill_message();
+ err= 1;
+ }
+ else
+ err= rpt_handle_event(qev, rpt);
+ }
delete_or_keep_event_post_apply(rgi, event_type, qev->ev);
DBUG_EXECUTE_IF("rpl_parallel_simulate_temp_err_gtid_0_x_100",
err= dbug_simulate_tmp_error(rgi, thd););
diff --git a/sql/sql_cursor.cc b/sql/sql_cursor.cc
index c09f3269d7a..d8684086e68 100644
--- a/sql/sql_cursor.cc
+++ b/sql/sql_cursor.cc
@@ -54,6 +54,8 @@ public:
virtual void fetch(ulong num_rows);
virtual void close();
virtual ~Materialized_cursor();
+
+ void on_table_fill_finished();
};
@@ -74,6 +76,18 @@ public:
Select_materialize(select_result *result_arg)
:result(result_arg), materialized_cursor(0) {}
virtual bool send_result_set_metadata(List<Item> &list, uint flags);
+ bool send_eof()
+ {
+ if (materialized_cursor)
+ materialized_cursor->on_table_fill_finished();
+ return false;
+ }
+
+ void abort_result_set()
+ {
+ if (materialized_cursor)
+ materialized_cursor->on_table_fill_finished();
+ }
};
@@ -388,6 +402,29 @@ Materialized_cursor::~Materialized_cursor()
}
+/*
+ @brief
+ Perform actions that are to be done when cursor materialization has
+ finished.
+
+ @detail
+ This function is called when "OPEN $cursor" has finished filling the
+ temporary table with rows that the cursor will return.
+
+ Temporary table has table->field->orig_table pointing at the tables
+ that are used in the cursor definition query. Pointers to these tables
+ will not be valid after the query finishes. So, we do what is done for
+ regular tables: have orig_table point at the table that the fields belong
+ to.
+*/
+
+void Materialized_cursor::on_table_fill_finished()
+{
+ uint fields= table->s->fields;
+ for (uint i= 0; i < fields; i++)
+ table->field[i]->orig_table= table->field[i]->table;
+}
+
/***************************************************************************
Select_materialize
****************************************************************************/
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index e88f3b898e6..1374a93e6b9 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -7751,6 +7751,8 @@ bool multi_update_precheck(THD *thd, TABLE_LIST *tables)
*/
for (table= tables; table; table= table->next_local)
{
+ if (table->is_jtbm())
+ continue;
if (table->derived)
table->grant.privilege= SELECT_ACL;
else if ((check_access(thd, UPDATE_ACL, table->db,
diff --git a/sql/sql_update.cc b/sql/sql_update.cc
index f616549097b..03d8e0205ff 100644
--- a/sql/sql_update.cc
+++ b/sql/sql_update.cc
@@ -1161,7 +1161,7 @@ bool unsafe_key_update(List<TABLE_LIST> leaves, table_map tables_for_update)
while ((tl= it++))
{
- if (tl->table->map & tables_for_update)
+ if (!tl->is_jtbm() && (tl->table->map & tables_for_update))
{
TABLE *table1= tl->table;
bool primkey_clustered= (table1->file->primary_key_is_clustered() &&
@@ -1178,6 +1178,8 @@ bool unsafe_key_update(List<TABLE_LIST> leaves, table_map tables_for_update)
it2.rewind();
while ((tl2= it2++))
{
+ if (tl2->is_jtbm())
+ continue;
/*
Look at "next" tables only since all previous tables have
already been checked
@@ -1409,6 +1411,9 @@ int mysql_multi_update_prepare(THD *thd)
{
TABLE *table= tl->table;
+ if (tl->is_jtbm())
+ continue;
+
/* if table will be updated then check that it is unique */
if (table->map & tables_for_update)
{
@@ -1457,6 +1462,8 @@ int mysql_multi_update_prepare(THD *thd)
for (tl= table_list; tl; tl= tl->next_local)
{
bool not_used= false;
+ if (tl->is_jtbm())
+ continue;
if (multi_update_check_table_access(thd, tl, tables_for_update, &not_used))
DBUG_RETURN(TRUE);
}
@@ -1464,6 +1471,8 @@ int mysql_multi_update_prepare(THD *thd)
/* check single table update for view compound from several tables */
for (tl= table_list; tl; tl= tl->next_local)
{
+ if (tl->is_jtbm())
+ continue;
if (tl->is_merged_derived())
{
TABLE_LIST *for_update= 0;
@@ -1493,6 +1502,8 @@ int mysql_multi_update_prepare(THD *thd)
ti.rewind();
while ((tl= ti++))
{
+ if (tl->is_jtbm())
+ continue;
TABLE *table= tl->table;
TABLE_LIST *tlist;
if (!(tlist= tl->top_table())->derived)
@@ -1635,6 +1646,9 @@ int multi_update::prepare(List<Item> &not_used_values,
*/
while ((table_ref= ti++))
{
+ if (table_ref->is_jtbm())
+ continue;
+
TABLE *table= table_ref->table;
if (tables_to_update & table->map)
{
@@ -1654,6 +1668,9 @@ int multi_update::prepare(List<Item> &not_used_values,
ti.rewind();
while ((table_ref= ti++))
{
+ if (table_ref->is_jtbm())
+ continue;
+
TABLE *table= table_ref->table;
if (tables_to_update & table->map)
{
@@ -1684,6 +1701,8 @@ int multi_update::prepare(List<Item> &not_used_values,
while ((table_ref= ti++))
{
/* TODO: add support of view of join support */
+ if (table_ref->is_jtbm())
+ continue;
TABLE *table=table_ref->table;
leaf_table_count++;
if (tables_to_update & table->map)