summaryrefslogtreecommitdiff
path: root/storage/innobase/sync/sync0sync.c
diff options
context:
space:
mode:
Diffstat (limited to 'storage/innobase/sync/sync0sync.c')
-rw-r--r--storage/innobase/sync/sync0sync.c47
1 files changed, 26 insertions, 21 deletions
diff --git a/storage/innobase/sync/sync0sync.c b/storage/innobase/sync/sync0sync.c
index 251a392a02c..8ea57b8655c 100644
--- a/storage/innobase/sync/sync0sync.c
+++ b/storage/innobase/sync/sync0sync.c
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1995, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1995, 2011, Innobase Oy. All Rights Reserved.
Copyright (c) 2008, Google Inc.
Portions of this file contain modifications contributed and copyrighted by
@@ -690,7 +690,7 @@ mutex_set_debug_info(
ut_ad(mutex);
ut_ad(file_name);
- sync_thread_add_level(mutex, mutex->level, FALSE);
+ sync_thread_add_level(mutex, mutex->level);
mutex->file_name = file_name;
mutex->line = line;
@@ -1133,9 +1133,8 @@ void
sync_thread_add_level(
/*==================*/
void* latch, /*!< in: pointer to a mutex or an rw-lock */
- ulint level, /*!< in: level in the latching order; if
+ ulint level) /*!< in: level in the latching order; if
SYNC_LEVEL_VARYING, nothing is done */
- ibool relock) /*!< in: TRUE if re-entering an x-lock */
{
ulint i;
sync_level_t* slot;
@@ -1186,10 +1185,6 @@ sync_thread_add_level(
array = thread_slot->levels;
- if (relock) {
- goto levels_ok;
- }
-
/* NOTE that there is a problem with _NODE and _LEAF levels: if the
B-tree height changes, then a leaf can change to an internal node
or the other way around. We do not know at present if this can cause
@@ -1214,7 +1209,6 @@ sync_thread_add_level(
case SYNC_WORK_QUEUE:
case SYNC_LOG:
case SYNC_LOG_FLUSH_ORDER:
- case SYNC_THR_LOCAL:
case SYNC_ANY_LATCH:
case SYNC_FILE_FORMAT_TAG:
case SYNC_DOUBLEWRITE:
@@ -1232,6 +1226,7 @@ sync_thread_add_level(
case SYNC_DICT_HEADER:
case SYNC_TRX_I_S_RWLOCK:
case SYNC_TRX_I_S_LAST_READ:
+ case SYNC_IBUF_MUTEX:
if (!sync_thread_levels_g(array, level, TRUE)) {
fprintf(stderr,
"InnoDB: sync_thread_levels_g(array, %lu)"
@@ -1317,22 +1312,33 @@ sync_thread_add_level(
|| sync_thread_levels_g(array, SYNC_TREE_NODE - 1, TRUE));
break;
case SYNC_TREE_NODE_NEW:
- ut_a(sync_thread_levels_contain(array, SYNC_FSP_PAGE)
- || sync_thread_levels_contain(array, SYNC_IBUF_MUTEX));
+ ut_a(sync_thread_levels_contain(array, SYNC_FSP_PAGE));
break;
case SYNC_INDEX_TREE:
- if (sync_thread_levels_contain(array, SYNC_IBUF_MUTEX)
- && sync_thread_levels_contain(array, SYNC_FSP)) {
- ut_a(sync_thread_levels_g(array, SYNC_FSP_PAGE - 1,
- TRUE));
+ ut_a(sync_thread_levels_g(array, SYNC_TREE_NODE - 1, TRUE));
+ break;
+ case SYNC_IBUF_TREE_NODE:
+ ut_a(sync_thread_levels_contain(array, SYNC_IBUF_INDEX_TREE)
+ || sync_thread_levels_g(array, SYNC_IBUF_TREE_NODE - 1,
+ TRUE));
+ break;
+ case SYNC_IBUF_TREE_NODE_NEW:
+ /* ibuf_add_free_page() allocates new pages for the
+ change buffer while only holding the tablespace
+ x-latch. These pre-allocated new pages may only be
+ taken in use while holding ibuf_mutex, in
+ btr_page_alloc_for_ibuf(). */
+ ut_a(sync_thread_levels_contain(array, SYNC_IBUF_MUTEX)
+ || sync_thread_levels_contain(array, SYNC_FSP));
+ break;
+ case SYNC_IBUF_INDEX_TREE:
+ if (sync_thread_levels_contain(array, SYNC_FSP)) {
+ ut_a(sync_thread_levels_g(array, level - 1, TRUE));
} else {
- ut_a(sync_thread_levels_g(array, SYNC_TREE_NODE - 1,
- TRUE));
+ ut_a(sync_thread_levels_g(
+ array, SYNC_IBUF_TREE_NODE - 1, TRUE));
}
break;
- case SYNC_IBUF_MUTEX:
- ut_a(sync_thread_levels_g(array, SYNC_FSP_PAGE - 1, TRUE));
- break;
case SYNC_IBUF_PESS_INSERT_MUTEX:
ut_a(sync_thread_levels_g(array, SYNC_FSP - 1, TRUE));
ut_a(!sync_thread_levels_contain(array, SYNC_IBUF_MUTEX));
@@ -1355,7 +1361,6 @@ sync_thread_add_level(
ut_error;
}
-levels_ok:
if (array->next_free == ULINT_UNDEFINED) {
ut_a(array->n_elems < array->max_elems);