summaryrefslogtreecommitdiff
path: root/innobase
diff options
context:
space:
mode:
Diffstat (limited to 'innobase')
-rw-r--r--innobase/buf/buf0buf.c2
-rw-r--r--innobase/dict/dict0dict.c2
-rw-r--r--innobase/include/buf0buf.h13
-rw-r--r--innobase/include/srv0srv.h2
-rw-r--r--innobase/include/trx0undo.h1
-rw-r--r--innobase/srv/srv0srv.c2
-rw-r--r--innobase/trx/trx0trx.c8
-rw-r--r--innobase/trx/trx0undo.c29
8 files changed, 43 insertions, 16 deletions
diff --git a/innobase/buf/buf0buf.c b/innobase/buf/buf0buf.c
index 9df48495355..6cfcb0fc724 100644
--- a/innobase/buf/buf0buf.c
+++ b/innobase/buf/buf0buf.c
@@ -2260,6 +2260,7 @@ buf_print(void)
ut_a(buf_validate());
}
+#ifdef UNIV_DEBUG
/*************************************************************************
Returns the number of latched pages in the buffer pool. */
@@ -2290,6 +2291,7 @@ buf_get_latched_pages_number(void)
mutex_exit(&(buf_pool->mutex));
return fixed_pages_number;
}
+#endif /* UNIV_DEBUG */
/*************************************************************************
Returns the number of pending buf pool ios. */
diff --git a/innobase/dict/dict0dict.c b/innobase/dict/dict0dict.c
index 96c822857df..b8d9f362b06 100644
--- a/innobase/dict/dict0dict.c
+++ b/innobase/dict/dict0dict.c
@@ -3554,7 +3554,7 @@ loop:
ptr = dict_accept(ptr, "FOREIGN", &success);
- if (!success) {
+ if (!success || !ib_isspace(*ptr)) {
goto loop;
}
diff --git a/innobase/include/buf0buf.h b/innobase/include/buf0buf.h
index 11e5bb39e63..f802ffa6510 100644
--- a/innobase/include/buf0buf.h
+++ b/innobase/include/buf0buf.h
@@ -495,7 +495,15 @@ Prints info of the buffer pool data structure. */
void
buf_print(void);
/*============*/
+
+/*************************************************************************
+Returns the number of latched pages in the buffer pool. */
+
+ulint
+buf_get_latched_pages_number(void);
+/*==============================*/
#endif /* UNIV_DEBUG */
+
/************************************************************************
Prints a page to stderr. */
@@ -503,12 +511,7 @@ void
buf_page_print(
/*===========*/
byte* read_buf); /* in: a database page */
-/*************************************************************************
-Returns the number of latched pages in the buffer pool. */
-ulint
-buf_get_latched_pages_number(void);
-/*==============================*/
/*************************************************************************
Returns the number of pending buf pool ios. */
diff --git a/innobase/include/srv0srv.h b/innobase/include/srv0srv.h
index f379efa98eb..97e9136040d 100644
--- a/innobase/include/srv0srv.h
+++ b/innobase/include/srv0srv.h
@@ -531,7 +531,9 @@ struct export_var_struct{
ulint innodb_buffer_pool_pages_dirty;
ulint innodb_buffer_pool_pages_misc;
ulint innodb_buffer_pool_pages_free;
+#ifdef UNIV_DEBUG
ulint innodb_buffer_pool_pages_latched;
+#endif /* UNIV_DEBUG */
ulint innodb_buffer_pool_read_requests;
ulint innodb_buffer_pool_reads;
ulint innodb_buffer_pool_wait_free;
diff --git a/innobase/include/trx0undo.h b/innobase/include/trx0undo.h
index bd7337e4f90..4f1847aa88c 100644
--- a/innobase/include/trx0undo.h
+++ b/innobase/include/trx0undo.h
@@ -237,6 +237,7 @@ trx_undo_set_state_at_finish(
/*=========================*/
/* out: undo log segment header page,
x-latched */
+ trx_rseg_t* rseg, /* in: rollback segment memory object */
trx_t* trx, /* in: transaction */
trx_undo_t* undo, /* in: undo log memory copy */
mtr_t* mtr); /* in: mtr */
diff --git a/innobase/srv/srv0srv.c b/innobase/srv/srv0srv.c
index 1227824ef80..431138400b6 100644
--- a/innobase/srv/srv0srv.c
+++ b/innobase/srv/srv0srv.c
@@ -1803,7 +1803,9 @@ srv_export_innodb_status(void)
export_vars.innodb_buffer_pool_pages_data= UT_LIST_GET_LEN(buf_pool->LRU);
export_vars.innodb_buffer_pool_pages_dirty= UT_LIST_GET_LEN(buf_pool->flush_list);
export_vars.innodb_buffer_pool_pages_free= UT_LIST_GET_LEN(buf_pool->free);
+#ifdef UNIV_DEBUG
export_vars.innodb_buffer_pool_pages_latched= buf_get_latched_pages_number();
+#endif /* UNIV_DEBUG */
export_vars.innodb_buffer_pool_pages_total= buf_pool->curr_size;
export_vars.innodb_buffer_pool_pages_misc= buf_pool->max_size -
UT_LIST_GET_LEN(buf_pool->LRU) - UT_LIST_GET_LEN(buf_pool->free);
diff --git a/innobase/trx/trx0trx.c b/innobase/trx/trx0trx.c
index d865c709bf6..70fd73f2488 100644
--- a/innobase/trx/trx0trx.c
+++ b/innobase/trx/trx0trx.c
@@ -761,8 +761,8 @@ trx_commit_off_kernel(
mutex_enter(&(rseg->mutex));
if (trx->insert_undo != NULL) {
- trx_undo_set_state_at_finish(trx, trx->insert_undo,
- &mtr);
+ trx_undo_set_state_at_finish(
+ rseg, trx, trx->insert_undo, &mtr);
}
undo = trx->update_undo;
@@ -777,8 +777,8 @@ trx_commit_off_kernel(
because only a single OS thread is allowed to do the
transaction commit for this transaction. */
- update_hdr_page = trx_undo_set_state_at_finish(trx,
- undo, &mtr);
+ update_hdr_page = trx_undo_set_state_at_finish(
+ rseg, trx, undo, &mtr);
/* We have to do the cleanup for the update log while
holding the rseg mutex because update log headers
diff --git a/innobase/trx/trx0undo.c b/innobase/trx/trx0undo.c
index 7441dd3f152..251cd355897 100644
--- a/innobase/trx/trx0undo.c
+++ b/innobase/trx/trx0undo.c
@@ -1724,6 +1724,7 @@ trx_undo_set_state_at_finish(
/*=========================*/
/* out: undo log segment header page,
x-latched */
+ trx_rseg_t* rseg, /* in: rollback segment memory object */
trx_t* trx __attribute__((unused)), /* in: transaction */
trx_undo_t* undo, /* in: undo log memory copy */
mtr_t* mtr) /* in: mtr */
@@ -1732,8 +1733,10 @@ trx_undo_set_state_at_finish(
trx_upagef_t* page_hdr;
page_t* undo_page;
ulint state;
-
- ut_ad(trx && undo && mtr);
+
+ ut_ad(trx);
+ ut_ad(undo);
+ ut_ad(mtr);
if (undo->id >= TRX_RSEG_N_SLOTS) {
fprintf(stderr, "InnoDB: Error: undo->id is %lu\n",
@@ -1747,9 +1750,23 @@ trx_undo_set_state_at_finish(
seg_hdr = undo_page + TRX_UNDO_SEG_HDR;
page_hdr = undo_page + TRX_UNDO_PAGE_HDR;
- if (undo->size == 1 && mach_read_from_2(page_hdr + TRX_UNDO_PAGE_FREE)
- < TRX_UNDO_PAGE_REUSE_LIMIT) {
- state = TRX_UNDO_CACHED;
+ if (undo->size == 1
+ && mach_read_from_2(page_hdr + TRX_UNDO_PAGE_FREE)
+ < TRX_UNDO_PAGE_REUSE_LIMIT) {
+
+ /* This is a heuristic to avoid the problem of all UNDO
+ slots ending up in one of the UNDO lists. Previously if
+ the server crashed with all the slots in one of the lists,
+ transactions that required the slots of a different type
+ would fail for lack of slots. */
+
+ if (UT_LIST_GET_LEN(rseg->update_undo_list) < 500
+ && UT_LIST_GET_LEN(rseg->insert_undo_list) < 500) {
+
+ state = TRX_UNDO_CACHED;
+ } else {
+ state = TRX_UNDO_TO_FREE;
+ }
} else if (undo->type == TRX_UNDO_INSERT) {
@@ -1759,7 +1776,7 @@ trx_undo_set_state_at_finish(
}
undo->state = state;
-
+
mlog_write_ulint(seg_hdr + TRX_UNDO_STATE, state, MLOG_2BYTES, mtr);
return(undo_page);