summaryrefslogtreecommitdiff
path: root/storage
diff options
context:
space:
mode:
Diffstat (limited to 'storage')
-rw-r--r--storage/innobase/trx/trx0purge.cc5
-rw-r--r--storage/maria/ha_maria.cc80
-rw-r--r--storage/maria/ma_delete_all.c52
-rw-r--r--storage/maria/ma_recovery.c18
4 files changed, 105 insertions, 50 deletions
diff --git a/storage/innobase/trx/trx0purge.cc b/storage/innobase/trx/trx0purge.cc
index 7d52894051d..22782cc0d1e 100644
--- a/storage/innobase/trx/trx0purge.cc
+++ b/storage/innobase/trx/trx0purge.cc
@@ -766,11 +766,12 @@ not_free:
auto block= reinterpret_cast<buf_block_t*>(bpage);
if (!bpage->lock.x_lock_try())
{
+ rescan:
/* Let buf_pool_t::release_freed_page() proceed. */
mysql_mutex_unlock(&buf_pool.flush_list_mutex);
- std::this_thread::yield();
+ mysql_mutex_lock(&buf_pool.mutex);
mysql_mutex_lock(&buf_pool.flush_list_mutex);
- rescan:
+ mysql_mutex_unlock(&buf_pool.mutex);
bpage= UT_LIST_GET_LAST(buf_pool.flush_list);
continue;
}
diff --git a/storage/maria/ha_maria.cc b/storage/maria/ha_maria.cc
index abe7834f36d..feff9c50232 100644
--- a/storage/maria/ha_maria.cc
+++ b/storage/maria/ha_maria.cc
@@ -872,7 +872,7 @@ extern "C" {
int _ma_killed_ptr(HA_CHECK *param)
{
- if (likely(thd_killed((THD*)param->thd)) == 0)
+ if (!param->thd || likely(thd_killed((THD*)param->thd)) == 0)
return 0;
my_errno= HA_ERR_ABORTED_BY_USER;
return 1;
@@ -901,9 +901,10 @@ int _ma_killed_ptr(HA_CHECK *param)
void _ma_report_progress(HA_CHECK *param, ulonglong progress,
ulonglong max_progress)
{
- thd_progress_report((THD*)param->thd,
- progress + max_progress * param->stage,
- max_progress * param->max_stage);
+ if (param->thd)
+ thd_progress_report((THD*)param->thd,
+ progress + max_progress * param->stage,
+ max_progress * param->max_stage);
}
@@ -2263,7 +2264,6 @@ void ha_maria::start_bulk_insert(ha_rows rows, uint flags)
{
bulk_insert_single_undo= BULK_INSERT_SINGLE_UNDO_AND_NO_REPAIR;
write_log_record_for_bulk_insert(file);
- _ma_tmp_disable_logging_for_table(file, TRUE);
/*
Pages currently in the page cache have type PAGECACHE_LSN_PAGE, we
are not allowed to overwrite them with PAGECACHE_PLAIN_PAGE, so
@@ -2271,8 +2271,12 @@ void ha_maria::start_bulk_insert(ha_rows rows, uint flags)
forced an UNDO which will for sure empty the table if we crash. The
upcoming unique-key insertions however need a proper index, so we
cannot leave the corrupted on-disk index file, thus we truncate it.
+
+ The following call will log the truncate and update the lsn for the table
+ to ensure that all redo's before this will be ignored.
*/
maria_delete_all_rows(file);
+ _ma_tmp_disable_logging_for_table(file, TRUE);
}
}
else if (!file->bulk_insert &&
@@ -2303,23 +2307,58 @@ void ha_maria::start_bulk_insert(ha_rows rows, uint flags)
int ha_maria::end_bulk_insert()
{
- int first_error, error;
- my_bool abort= file->s->deleting;
+ int first_error, first_errno= 0, error;
+ my_bool abort= file->s->deleting, empty_table= 0;
+ uint enable_index_mode= HA_KEY_SWITCH_NONUNIQ_SAVE;
DBUG_ENTER("ha_maria::end_bulk_insert");
if ((first_error= maria_end_bulk_insert(file, abort)))
+ {
+ first_errno= my_errno;
abort= 1;
+ }
if ((error= maria_extra(file, HA_EXTRA_NO_CACHE, 0)))
{
- first_error= first_error ? first_error : error;
+ if (!first_error)
+ {
+ first_error= error;
+ first_errno= my_errno;
+ }
abort= 1;
}
- if (!abort && can_enable_indexes)
- if ((error= enable_indexes(HA_KEY_SWITCH_NONUNIQ_SAVE)))
- first_error= first_error ? first_error : error;
+ if (bulk_insert_single_undo != BULK_INSERT_NONE)
+ {
+ if (log_not_redoable_operation("BULK_INSERT"))
+ {
+ /* Got lock timeout. revert back to empty file and give error */
+ if (!first_error)
+ {
+ first_error= 1;
+ first_errno= my_errno;
+ }
+ enable_index_mode= HA_KEY_SWITCH_ALL;
+ empty_table= 1;
+ /*
+ Ignore all changed pages, required by _ma_renable_logging_for_table()
+ */
+ _ma_flush_table_files(file, MARIA_FLUSH_DATA|MARIA_FLUSH_INDEX,
+ FLUSH_IGNORE_CHANGED, FLUSH_IGNORE_CHANGED);
+ }
+ }
+ if (!abort && can_enable_indexes)
+ {
+ if ((error= enable_indexes(enable_index_mode)))
+ {
+ if (!first_error)
+ {
+ first_error= 1;
+ first_errno= my_errno;
+ }
+ }
+ }
if (bulk_insert_single_undo != BULK_INSERT_NONE)
{
/*
@@ -2328,12 +2367,23 @@ int ha_maria::end_bulk_insert()
*/
if ((error= _ma_reenable_logging_for_table(file,
bulk_insert_single_undo ==
- BULK_INSERT_SINGLE_UNDO_AND_NO_REPAIR)))
- first_error= first_error ? first_error : error;
- bulk_insert_single_undo= BULK_INSERT_NONE; // Safety
- log_not_redoable_operation("BULK_INSERT");
+ BULK_INSERT_SINGLE_UNDO_AND_NO_REPAIR)) &&
+ !empty_table)
+ {
+ if (!first_error)
+ {
+ first_error= 1;
+ first_errno= my_errno;
+ }
+ }
+ bulk_insert_single_undo= BULK_INSERT_NONE; // Safety if called again
}
+ if (empty_table)
+ maria_delete_all_rows(file);
+
can_enable_indexes= 0;
+ if (first_error)
+ my_errno= first_errno;
DBUG_RETURN(first_error);
}
diff --git a/storage/maria/ma_delete_all.c b/storage/maria/ma_delete_all.c
index e06974fed66..f355d0da3e8 100644
--- a/storage/maria/ma_delete_all.c
+++ b/storage/maria/ma_delete_all.c
@@ -64,6 +64,7 @@ int maria_delete_all_rows(MARIA_HA *info)
*/
LEX_CUSTRING log_array[TRANSLOG_INTERNAL_PARTS + 1];
uchar log_data[FILEID_STORE_SIZE];
+ my_bool error;
log_array[TRANSLOG_INTERNAL_PARTS + 0].str= log_data;
log_array[TRANSLOG_INTERNAL_PARTS + 0].length= sizeof(log_data);
if (unlikely(translog_write_record(&lsn, LOGREC_REDO_DELETE_ALL,
@@ -78,6 +79,32 @@ int maria_delete_all_rows(MARIA_HA *info)
*/
if (_ma_mark_file_changed(share))
goto err;
+
+ /*
+ Because LOGREC_REDO_DELETE_ALL does not operate on pages, it has the
+ following problem:
+ delete_all; inserts (redo_insert); all pages get flushed; checkpoint:
+ the dirty pages list will be empty. In recovery, delete_all is executed,
+ but redo_insert are skipped (dirty pages list is empty).
+ To avoid this, we need to set skip_redo_lsn now, and thus need to sync
+ files.
+ Also fixes the problem of:
+ bulk insert; insert; delete_all; crash:
+ "bulk insert" is skipped (no REDOs), so if "insert" would not be skipped
+ (if we didn't update skip_redo_lsn below) then "insert" would be tried
+ and fail, saying that it sees that the first page has to be created
+ though the inserted row has rownr>0.
+
+ We use lsn-1 below to ensure that the above redo will be executed
+ */
+ error= _ma_state_info_write(share,
+ MA_STATE_INFO_WRITE_DONT_MOVE_OFFSET |
+ MA_STATE_INFO_WRITE_LOCK) ||
+ _ma_update_state_lsns(share, lsn-1, info->trn->trid, FALSE, FALSE) ||
+ _ma_sync_table_files(info);
+ info->trn->rec_lsn= LSN_IMPOSSIBLE;
+ if (error)
+ goto err;
}
else
{
@@ -113,28 +140,9 @@ int maria_delete_all_rows(MARIA_HA *info)
if (log_record)
{
- /*
- Because LOGREC_REDO_DELETE_ALL does not operate on pages, it has the
- following problem:
- delete_all; inserts (redo_insert); all pages get flushed; checkpoint:
- the dirty pages list will be empty. In recovery, delete_all is executed,
- but redo_insert are skipped (dirty pages list is empty).
- To avoid this, we need to set skip_redo_lsn now, and thus need to sync
- files.
- Also fixes the problem of:
- bulk insert; insert; delete_all; crash:
- "bulk insert" is skipped (no REDOs), so if "insert" would not be skipped
- (if we didn't update skip_redo_lsn below) then "insert" would be tried
- and fail, saying that it sees that the first page has to be created
- though the inserted row has rownr>0.
- */
- my_bool error= _ma_state_info_write(share,
- MA_STATE_INFO_WRITE_DONT_MOVE_OFFSET |
- MA_STATE_INFO_WRITE_LOCK) ||
- _ma_update_state_lsns(share, lsn, info->trn->trid, FALSE, FALSE) ||
- _ma_sync_table_files(info);
- info->trn->rec_lsn= LSN_IMPOSSIBLE;
- if (error)
+ /* Update lsn to signal that the above redo does not have to be executed anymore */
+ if ( _ma_update_state_lsns(share, lsn, info->trn->trid, FALSE, FALSE) ||
+ _ma_sync_table_files(info))
goto err;
}
diff --git a/storage/maria/ma_recovery.c b/storage/maria/ma_recovery.c
index ef8bf3b169b..006c8bef672 100644
--- a/storage/maria/ma_recovery.c
+++ b/storage/maria/ma_recovery.c
@@ -1175,16 +1175,6 @@ prototype_redo_exec_hook(REDO_REPAIR_TABLE)
/* We try to get table first, so that we get the table in in the trace log */
info= get_MARIA_HA_from_REDO_record(rec);
- if (skip_DDLs)
- {
- /*
- REPAIR is not exactly a DDL, but it manipulates files without logging
- insertions into them.
- */
- tprint(tracef, "we skip DDLs\n");
- DBUG_RETURN(0);
- }
-
if (!info)
{
/* no such table, don't need to warn */
@@ -1196,6 +1186,13 @@ prototype_redo_exec_hook(REDO_REPAIR_TABLE)
tprint(tracef, "we skip repairing crashed table\n");
DBUG_RETURN(0);
}
+
+ if (rec->lsn <= info->s->state.is_of_horizon)
+ {
+ DBUG_PRINT("info", ("Table is up to date, skipping redo"));
+ DBUG_RETURN(0);
+ }
+
/*
Otherwise, the mapping is newer than the table, and our record is newer
than the mapping, so we can repair.
@@ -1560,7 +1557,6 @@ prototype_redo_exec_hook(REDO_INSERT_ROW_HEAD)
uchar *buff= NULL;
MARIA_HA *info= get_MARIA_HA_from_REDO_record(rec);
if (info == NULL || maria_is_crashed(info))
-
{
/*
Table was skipped at open time (because later dropped/renamed, not