diff options
author | unknown <marko@hundin.mysql.fi> | 2004-03-12 17:14:51 +0200 |
---|---|---|
committer | unknown <marko@hundin.mysql.fi> | 2004-03-12 17:14:51 +0200 |
commit | e1cb1ca6fe25380edae06ded5e22e106761dcb46 (patch) | |
tree | 6a59a674bf740a7b4046a20e847ad2578a573d69 /innobase/ibuf | |
parent | 9a817bea5a369306dbb2113fbc599e63d5bb7d17 (diff) | |
download | mariadb-git-e1cb1ca6fe25380edae06ded5e22e106761dcb46.tar.gz |
Allow UNIV_SYNC_DEBUG to be disabled while UNIV_DEBUG is enabled
Diffstat (limited to 'innobase/ibuf')
-rw-r--r-- | innobase/ibuf/ibuf0ibuf.c | 32 |
1 files changed, 29 insertions, 3 deletions
diff --git a/innobase/ibuf/ibuf0ibuf.c b/innobase/ibuf/ibuf0ibuf.c index 7c31ca7c320..d851e7c4d01 100644 --- a/innobase/ibuf/ibuf0ibuf.c +++ b/innobase/ibuf/ibuf0ibuf.c @@ -49,7 +49,7 @@ The solution is the following: We put into each tablespace an insert buffer of its own. Let all the tree and page latches connected with the insert buffer be later in the latching order than the fsp latch and fsp page latches. Insert buffer pages must be such that the insert buffer is never invoked -when these pages area accessed as this would result in a recursion violating +when these pages are accessed as this would result in a recursion violating the latching order. We let a special i/o-handler thread take care of i/o to the insert buffer pages and the ibuf bitmap pages, as well as the fsp bitmap pages and the first inode page, which contains the inode of the ibuf tree: let @@ -239,7 +239,9 @@ ibuf_header_page_get( page = buf_page_get(space, FSP_IBUF_HEADER_PAGE_NO, RW_X_LATCH, mtr); +#ifdef UNIV_SYNC_DEBUG buf_page_dbg_add_level(page, SYNC_IBUF_HEADER); +#endif /* UNIV_SYNC_DEBUG */ return(page); } @@ -263,7 +265,9 @@ ibuf_tree_root_get( page = buf_page_get(space, FSP_IBUF_TREE_ROOT_PAGE_NO, RW_X_LATCH, mtr); +#ifdef UNIV_SYNC_DEBUG buf_page_dbg_add_level(page, SYNC_TREE_NODE); +#endif /* UNIV_SYNC_DEBUG */ return(page); } @@ -375,7 +379,9 @@ ibuf_data_sizes_update( { ulint old_size; +#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&ibuf_mutex)); +#endif /* UNIV_SYNC_DEBUG */ old_size = data->size; @@ -455,7 +461,9 @@ ibuf_data_init_for_space( root = buf_page_get(space, FSP_IBUF_TREE_ROOT_PAGE_NO, RW_X_LATCH, &mtr); +#ifdef UNIV_SYNC_DEBUG buf_page_dbg_add_level(root, SYNC_TREE_NODE); +#endif /* UNIV_SYNC_DEBUG */ data->size = 0; data->n_inserts = 0; @@ -679,7 +687,9 @@ ibuf_bitmap_get_map_page( page = buf_page_get(space, ibuf_bitmap_page_no_calc(page_no), RW_X_LATCH, mtr); +#ifdef UNIV_SYNC_DEBUG buf_page_dbg_add_level(page, SYNC_IBUF_BITMAP); +#endif /* UNIV_SYNC_DEBUG */ return(page); } @@ -1198,7 +1208,9 @@ ibuf_data_enough_free_for_insert( /* out: TRUE if enough free pages in list */ ibuf_data_t* data) /* in: ibuf data for the space */ { +#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&ibuf_mutex)); +#endif /* UNIV_SYNC_DEBUG */ /* We want a big margin of free pages, because a B-tree can sometimes grow in size also if records are deleted from it, as the node pointers @@ -1224,7 +1236,9 @@ ibuf_data_too_much_free( /* out: TRUE if enough free pages in list */ ibuf_data_t* data) /* in: ibuf data for the space */ { +#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&ibuf_mutex)); +#endif /* UNIV_SYNC_DEBUG */ if (data->free_list_len >= 3 + data->size / 2 + 3 * data->height) { @@ -1282,7 +1296,9 @@ ibuf_add_free_page( page = buf_page_get(space, page_no, RW_X_LATCH, &mtr); +#ifdef UNIV_SYNC_DEBUG buf_page_dbg_add_level(page, SYNC_TREE_NODE_NEW); +#endif /* UNIV_SYNC_DEBUG */ ibuf_enter(); @@ -1402,7 +1418,9 @@ ibuf_remove_free_page( page = buf_page_get(space, page_no, RW_X_LATCH, &mtr); +#ifdef UNIV_SYNC_DEBUG buf_page_dbg_add_level(page, SYNC_TREE_NODE); +#endif /* UNIV_SYNC_DEBUG */ /* Remove the page from the free list and update the ibuf size data */ @@ -1443,8 +1461,9 @@ ibuf_free_excess_pages( { ibuf_data_t* ibuf_data; ulint i; - +#ifdef UNIV_SYNC_DEBUG ut_ad(rw_lock_own(fil_space_get_latch(space), RW_LOCK_EX)); +#endif /* UNIV_SYNC_DEBUG */ ut_ad(rw_lock_get_x_lock_count(fil_space_get_latch(space)) == 1); ut_ad(!ibuf_inside()); @@ -1909,7 +1928,9 @@ ibuf_get_volume_buffered( prev_page = buf_page_get(space, prev_page_no, RW_X_LATCH, mtr); +#ifdef UNIV_SYNC_DEBUG buf_page_dbg_add_level(prev_page, SYNC_TREE_NODE); +#endif /* UNIV_SYNC_DEBUG */ rec = page_get_supremum_rec(prev_page); rec = page_rec_get_prev(rec); @@ -1968,7 +1989,9 @@ count_later: next_page = buf_page_get(space, next_page_no, RW_X_LATCH, mtr); +#ifdef UNIV_SYNC_DEBUG buf_page_dbg_add_level(next_page, SYNC_TREE_NODE); +#endif /* UNIV_SYNC_DEBUG */ rec = page_get_infimum_rec(next_page); rec = page_rec_get_next(rec); @@ -2592,8 +2615,9 @@ loop: IB__FILE__, __LINE__, &mtr); ut_a(success); - +#ifdef UNIV_SYNC_DEBUG buf_page_dbg_add_level(page, SYNC_TREE_NODE); +#endif /* UNIV_SYNC_DEBUG */ } /* Position pcur in the insert buffer at the first entry for this @@ -2744,7 +2768,9 @@ ibuf_validate_low(void) ibuf_data_t* data; ulint sum_sizes; +#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&ibuf_mutex)); +#endif /* UNIV_SYNC_DEBUG */ sum_sizes = 0; |