summaryrefslogtreecommitdiff
path: root/innobase
diff options
context:
space:
mode:
authorunknown <monty@narttu.mysql.fi>2003-05-19 16:35:49 +0300
committerunknown <monty@narttu.mysql.fi>2003-05-19 16:35:49 +0300
commitdaac922bc306847581b9acee4bcf0a31707d72e7 (patch)
tree6025913cf3d482ba0783bf3420f7341c10cd574a /innobase
parent7c189b0dcf26ad8e408b8eaa7d69dbbe913ba421 (diff)
parent68aa31f268660db07b634f021716ecb872e19679 (diff)
downloadmariadb-git-daac922bc306847581b9acee4bcf0a31707d72e7.tar.gz
Merge with 4.0.13
BitKeeper/etc/ignore: auto-union BitKeeper/etc/logging_ok: auto-union BUILD/SETUP.sh: Auto merged BitKeeper/deleted/.del-libmysql.def~29fc6d70335f1c4c: Auto merged Makefile.am: Auto merged acinclude.m4: Auto merged BitKeeper/triggers/post-commit: Auto merged Build-tools/Do-compile: Auto merged VC++Files/libmysql/libmysql.dsp: Auto merged VC++Files/mysql.dsw: Auto merged client/mysql.cc: Auto merged client/mysqlbinlog.cc: Auto merged client/mysqldump.c: Auto merged include/config-win.h: Auto merged include/my_base.h: Auto merged include/my_global.h: Auto merged include/my_pthread.h: Auto merged include/my_sys.h: Auto merged include/violite.h: Auto merged innobase/buf/buf0flu.c: Auto merged innobase/buf/buf0lru.c: Auto merged innobase/include/buf0buf.h: Auto merged innobase/include/buf0lru.h: Auto merged innobase/include/row0mysql.h: Auto merged innobase/include/srv0srv.h: Auto merged innobase/lock/lock0lock.c: Auto merged innobase/log/log0log.c: Auto merged innobase/log/log0recv.c: Auto merged innobase/os/os0file.c: Auto merged innobase/row/row0mysql.c: Auto merged innobase/row/row0sel.c: Auto merged innobase/srv/srv0srv.c: Auto merged innobase/srv/srv0start.c: Auto merged innobase/trx/trx0sys.c: Auto merged innobase/trx/trx0trx.c: Auto merged innobase/ut/ut0ut.c: Auto merged myisam/ft_boolean_search.c: Auto merged myisam/mi_check.c: Auto merged myisam/mi_key.c: Auto merged myisam/mi_open.c: Auto merged myisam/mi_range.c: Auto merged myisam/mi_search.c: Auto merged myisam/sort.c: Auto merged mysql-test/r/delete.result: Auto merged mysql-test/r/fulltext.result: Auto merged mysql-test/r/innodb_handler.result: Auto merged mysql-test/r/join.result: Auto merged mysql-test/r/join_outer.result: Auto merged mysql-test/r/key.result: Auto merged mysql-test/r/multi_update.result: Auto merged mysql-test/r/myisam.result: Auto merged mysql-test/r/query_cache.result: Auto merged mysql-test/r/select.result: Auto merged mysql-test/r/variables.result: Auto merged mysql-test/t/alter_table.test: Auto merged mysql-test/t/ctype_latin1_de.test: Auto merged mysql-test/t/delete.test: Auto merged mysql-test/t/fulltext.test: Auto merged mysql-test/t/innodb_handler.test: Auto merged mysql-test/t/join.test: Auto merged mysql-test/t/join_outer.test: Auto merged mysql-test/t/key.test: Auto merged mysql-test/t/multi_update.test: Auto merged mysql-test/t/myisam.test: Auto merged mysql-test/t/query_cache.test: Auto merged mysql-test/t/repair.test: Auto merged mysql-test/t/select_safe.test: Auto merged mysql-test/t/type_decimal.test: Auto merged mysql-test/t/variables.test: Auto merged mysys/default.c: Auto merged mysys/my_pthread.c: Auto merged scripts/mysql_fix_privilege_tables.sh: Auto merged scripts/mysqld_safe.sh: Auto merged sql/ha_heap.h: Auto merged sql/ha_innodb.h: Auto merged sql/ha_myisam.cc: Auto merged sql/handler.cc: Auto merged sql/handler.h: Auto merged sql/init.cc: Auto merged sql/item.cc: Auto merged sql/item_create.cc: Auto merged sql/item_strfunc.cc: Auto merged sql/item_sum.cc: Auto merged sql/mini_client.cc: Auto merged sql/net_serv.cc: Auto merged sql/opt_range.cc: Auto merged sql/records.cc: Auto merged sql/slave.cc: Auto merged sql/sql_acl.h: Auto merged sql/sql_analyse.cc: Auto merged sql/sql_class.cc: Auto merged sql/sql_class.h: Auto merged sql/sql_list.h: Auto merged sql/sql_load.cc: Auto merged sql/share/czech/errmsg.txt: Auto merged sql/share/danish/errmsg.txt: Auto merged sql/share/dutch/errmsg.txt: Auto merged sql/share/english/errmsg.txt: Auto merged sql/share/estonian/errmsg.txt: Auto merged sql/share/french/errmsg.txt: Auto merged sql/share/greek/errmsg.txt: Auto merged sql/share/hungarian/errmsg.txt: Auto merged sql/share/italian/errmsg.txt: Auto merged sql/share/japanese/errmsg.txt: Auto merged sql/share/korean/errmsg.txt: Auto merged sql/share/norwegian-ny/errmsg.txt: Auto merged sql/share/norwegian/errmsg.txt: Auto merged sql/share/portuguese/errmsg.txt: Auto merged sql/share/romanian/errmsg.txt: Auto merged sql/share/russian/errmsg.txt: Auto merged sql/share/slovak/errmsg.txt: Auto merged sql/share/spanish/errmsg.txt: Auto merged sql/share/swedish/errmsg.txt: Auto merged sql/share/ukrainian/errmsg.txt: Auto merged sql/unireg.h: Auto merged sql-bench/crash-me.sh: Auto merged sql-bench/test-transactions.sh: Auto merged strings/ctype-tis620.c: Auto merged tests/grant.res: Auto merged sql/log_event.cc: Merge with 4.0.13 Cleaned up comment syntax
Diffstat (limited to 'innobase')
-rw-r--r--innobase/btr/Makefile.am2
-rw-r--r--innobase/buf/Makefile.am2
-rw-r--r--innobase/buf/buf0buf.c36
-rw-r--r--innobase/buf/buf0flu.c53
-rw-r--r--innobase/buf/buf0lru.c58
-rw-r--r--innobase/com/Makefile.am2
-rw-r--r--innobase/data/Makefile.am2
-rw-r--r--innobase/dict/Makefile.am2
-rw-r--r--innobase/dict/dict0crea.c8
-rw-r--r--innobase/dict/dict0dict.c679
-rw-r--r--innobase/dict/dict0load.c39
-rw-r--r--innobase/dyn/Makefile.am2
-rw-r--r--innobase/eval/Makefile.am2
-rw-r--r--innobase/fil/Makefile.am2
-rw-r--r--innobase/fsp/Makefile.am2
-rw-r--r--innobase/fut/Makefile.am2
-rw-r--r--innobase/ha/Makefile.am2
-rw-r--r--innobase/ibuf/Makefile.am2
-rw-r--r--innobase/ibuf/ibuf0ibuf.c13
-rw-r--r--innobase/include/Makefile.i2
-rw-r--r--innobase/include/buf0buf.h7
-rw-r--r--innobase/include/buf0lru.h25
-rw-r--r--innobase/include/db0err.h2
-rw-r--r--innobase/include/dict0dict.h36
-rw-r--r--innobase/include/log0log.h65
-rw-r--r--innobase/include/row0ins.h1
-rw-r--r--innobase/include/row0mysql.h10
-rw-r--r--innobase/include/srv0srv.h3
-rw-r--r--innobase/include/trx0trx.h18
-rw-r--r--innobase/include/univ.i9
-rw-r--r--innobase/include/ut0dbg.h5
-rw-r--r--innobase/lock/Makefile.am2
-rw-r--r--innobase/lock/lock0lock.c42
-rw-r--r--innobase/log/Makefile.am2
-rw-r--r--innobase/log/log0log.c158
-rw-r--r--innobase/log/log0recv.c11
-rw-r--r--innobase/mach/Makefile.am2
-rw-r--r--innobase/mem/Makefile.am2
-rw-r--r--innobase/mtr/Makefile.am2
-rw-r--r--innobase/odbc/Makefile.am2
-rw-r--r--innobase/os/Makefile.am2
-rw-r--r--innobase/os/os0file.c49
-rw-r--r--innobase/os/os0sync.c18
-rw-r--r--innobase/page/Makefile.am2
-rw-r--r--innobase/pars/Makefile.am2
-rw-r--r--innobase/que/Makefile.am2
-rw-r--r--innobase/read/Makefile.am2
-rw-r--r--innobase/rem/Makefile.am2
-rw-r--r--innobase/row/Makefile.am2
-rw-r--r--innobase/row/row0ins.c232
-rw-r--r--innobase/row/row0mysql.c137
-rw-r--r--innobase/row/row0sel.c98
-rw-r--r--innobase/row/row0upd.c2
-rw-r--r--innobase/srv/Makefile.am2
-rw-r--r--innobase/srv/srv0srv.c194
-rw-r--r--innobase/srv/srv0start.c42
-rw-r--r--innobase/sync/Makefile.am2
-rw-r--r--innobase/thr/Makefile.am2
-rw-r--r--innobase/trx/Makefile.am2
-rw-r--r--innobase/trx/trx0sys.c13
-rw-r--r--innobase/trx/trx0trx.c52
-rw-r--r--innobase/usr/Makefile.am2
-rw-r--r--innobase/ut/Makefile.am2
-rw-r--r--innobase/ut/ut0ut.c2
64 files changed, 1652 insertions, 529 deletions
diff --git a/innobase/btr/Makefile.am b/innobase/btr/Makefile.am
index 6e3dd4fb007..ed61facb695 100644
--- a/innobase/btr/Makefile.am
+++ b/innobase/btr/Makefile.am
@@ -17,7 +17,7 @@
include ../include/Makefile.i
-libs_LIBRARIES = libbtr.a
+noinst_LIBRARIES = libbtr.a
libbtr_a_SOURCES = btr0btr.c btr0cur.c btr0pcur.c btr0sea.c
diff --git a/innobase/buf/Makefile.am b/innobase/buf/Makefile.am
index b1463c2220e..3f56c8b02d7 100644
--- a/innobase/buf/Makefile.am
+++ b/innobase/buf/Makefile.am
@@ -17,7 +17,7 @@
include ../include/Makefile.i
-libs_LIBRARIES = libbuf.a
+noinst_LIBRARIES = libbuf.a
libbuf_a_SOURCES = buf0buf.c buf0flu.c buf0lru.c buf0rea.c
diff --git a/innobase/buf/buf0buf.c b/innobase/buf/buf0buf.c
index 14d538a14bc..1cdddaf6cb4 100644
--- a/innobase/buf/buf0buf.c
+++ b/innobase/buf/buf0buf.c
@@ -597,8 +597,9 @@ buf_pool_init(
/* Wipe contents of frame to eliminate a Purify
warning */
+#ifdef HAVE_purify
memset(block->frame, '\0', UNIV_PAGE_SIZE);
-
+#endif
if (srv_use_awe) {
/* Add to the list of blocks mapped to
frames */
@@ -1837,7 +1838,7 @@ buf_pool_invalidate(void)
freed = TRUE;
while (freed) {
- freed = buf_LRU_search_and_free_block(0);
+ freed = buf_LRU_search_and_free_block(100);
}
mutex_enter(&(buf_pool->mutex));
@@ -2057,6 +2058,29 @@ buf_get_n_pending_ios(void)
}
/*************************************************************************
+Returns the ratio in percents of modified pages in the buffer pool /
+database pages in the buffer pool. */
+
+ulint
+buf_get_modified_ratio_pct(void)
+/*============================*/
+{
+ ulint ratio;
+
+ mutex_enter(&(buf_pool->mutex));
+
+ ratio = (100 * UT_LIST_GET_LEN(buf_pool->flush_list))
+ / (1 + UT_LIST_GET_LEN(buf_pool->LRU)
+ + UT_LIST_GET_LEN(buf_pool->free));
+
+ /* 1 + is there to avoid division by zero */
+
+ mutex_exit(&(buf_pool->mutex));
+
+ return(ratio);
+}
+
+/*************************************************************************
Prints info of the buffer i/o. */
void
@@ -2109,8 +2133,10 @@ buf_print_io(
buf += sprintf(buf,
"Pending writes: LRU %lu, flush list %lu, single page %lu\n",
- buf_pool->n_flush[BUF_FLUSH_LRU],
- buf_pool->n_flush[BUF_FLUSH_LIST],
+ buf_pool->n_flush[BUF_FLUSH_LRU]
+ + buf_pool->init_flush[BUF_FLUSH_LRU],
+ buf_pool->n_flush[BUF_FLUSH_LIST]
+ + buf_pool->init_flush[BUF_FLUSH_LIST],
buf_pool->n_flush[BUF_FLUSH_SINGLE_PAGE]);
current_time = time(NULL);
@@ -2144,7 +2170,7 @@ buf_print_io(
/ (buf_pool->n_page_gets - buf_pool->n_page_gets_old)));
} else {
buf += sprintf(buf,
- "No buffer pool activity since the last printout\n");
+ "No buffer pool page gets since the last printout\n");
}
buf_pool->n_page_gets_old = buf_pool->n_page_gets;
diff --git a/innobase/buf/buf0flu.c b/innobase/buf/buf0flu.c
index 02587487a92..47ac9c6b041 100644
--- a/innobase/buf/buf0flu.c
+++ b/innobase/buf/buf0flu.c
@@ -107,7 +107,7 @@ buf_flush_ready_for_replace(
BUF_BLOCK_FILE_PAGE and in the LRU list */
{
ut_ad(mutex_own(&(buf_pool->mutex)));
- ut_ad(block->state == BUF_BLOCK_FILE_PAGE);
+ ut_a(block->state == BUF_BLOCK_FILE_PAGE);
if ((ut_dulint_cmp(block->oldest_modification, ut_dulint_zero) > 0)
|| (block->buf_fix_count != 0)
@@ -227,7 +227,9 @@ buf_flush_buffered_writes(void)
}
for (i = 0; i < trx_doublewrite->first_free; i++) {
+
block = trx_doublewrite->buf_block_arr[i];
+ ut_a(block->state == BUF_BLOCK_FILE_PAGE);
if (block->check_index_page_at_flush
&& !page_simple_validate(block->frame)) {
@@ -236,10 +238,12 @@ buf_flush_buffered_writes(void)
ut_print_timestamp(stderr);
fprintf(stderr,
- " InnoDB: Apparent corruption of an index page\n"
+ " InnoDB: Apparent corruption of an index page n:o %lu in space %lu\n"
"InnoDB: to be written to data file. We intentionally crash server\n"
"InnoDB: to prevent corrupt data from ending up in data\n"
- "InnoDB: files.\n");
+ "InnoDB: files.\n",
+ block->offset, block->space);
+
ut_a(0);
}
}
@@ -394,7 +398,7 @@ buf_flush_write_block_low(
"Warning: cannot force log to disk in the log debug version!\n");
#else
/* Force the log to the disk before writing the modified block */
- log_flush_up_to(block->newest_modification, LOG_WAIT_ALL_GROUPS);
+ log_write_up_to(block->newest_modification, LOG_WAIT_ALL_GROUPS, TRUE);
#endif
buf_flush_init_for_writing(block->frame, block->newest_modification,
block->space, block->offset);
@@ -432,6 +436,8 @@ buf_flush_try_page(
block = buf_page_hash_get(space, offset);
+ ut_a(block->state == BUF_BLOCK_FILE_PAGE);
+
if (flush_type == BUF_FLUSH_LIST
&& block && buf_flush_ready_for_flush(block, flush_type)) {
@@ -567,7 +573,8 @@ buf_flush_try_page(
rw_lock_s_lock_gen(&(block->lock), BUF_IO_WRITE);
if (buf_debug_prints) {
- printf("Flushing single page space %lu, page no %lu \n",
+ printf(
+ "Flushing single page space %lu, page no %lu \n",
block->space, block->offset);
}
@@ -608,15 +615,7 @@ buf_flush_try_neighbors(
low = offset;
high = offset + 1;
- } else if (flush_type == BUF_FLUSH_LIST) {
- /* Since semaphore waits require us to flush the
- doublewrite buffer to disk, it is best that the
- search area is just the page itself, to minimize
- chances for semaphore waits */
-
- low = offset;
- high = offset + 1;
- }
+ }
/* printf("Flush area: low %lu high %lu\n", low, high); */
@@ -633,13 +632,20 @@ buf_flush_try_neighbors(
if (block && flush_type == BUF_FLUSH_LRU && i != offset
&& !block->old) {
- /* We avoid flushing 'non-old' blocks in an LRU flush,
- because the flushed blocks are soon freed */
+ /* We avoid flushing 'non-old' blocks in an LRU flush,
+ because the flushed blocks are soon freed */
- continue;
+ continue;
}
- if (block && buf_flush_ready_for_flush(block, flush_type)) {
+ if (block && buf_flush_ready_for_flush(block, flush_type)
+ && (i == offset || block->buf_fix_count == 0)) {
+ /* We only try to flush those neighbors != offset
+ where the buf fix count is zero, as we then know that
+ we probably can latch the page without a semaphore
+ wait. Semaphore waits are expensive because we must
+ flush the doublewrite buffer before we start
+ waiting. */
mutex_exit(&(buf_pool->mutex));
@@ -758,7 +764,6 @@ buf_flush_batch(
page_count +=
buf_flush_try_neighbors(space, offset,
flush_type);
-
/* printf(
"Flush type %lu, page no %lu, neighb %lu\n",
flush_type, offset,
@@ -884,11 +889,19 @@ buf_flush_free_margin(void)
/*=======================*/
{
ulint n_to_flush;
+ ulint n_flushed;
n_to_flush = buf_flush_LRU_recommendation();
if (n_to_flush > 0) {
- buf_flush_batch(BUF_FLUSH_LRU, n_to_flush, ut_dulint_zero);
+ n_flushed = buf_flush_batch(BUF_FLUSH_LRU, n_to_flush,
+ ut_dulint_zero);
+ if (n_flushed == ULINT_UNDEFINED) {
+ /* There was an LRU type flush batch already running;
+ let us wait for it to end */
+
+ buf_flush_wait_batch_end(BUF_FLUSH_LRU);
+ }
}
}
diff --git a/innobase/buf/buf0lru.c b/innobase/buf/buf0lru.c
index 051aa0191f6..40f49f1fddc 100644
--- a/innobase/buf/buf0lru.c
+++ b/innobase/buf/buf0lru.c
@@ -104,12 +104,15 @@ ibool
buf_LRU_search_and_free_block(
/*==========================*/
/* out: TRUE if freed */
- ulint n_iterations __attribute__((unused))) /* in: how many times
- this has been called repeatedly without
- result: a high value means that we should
- search farther */
+ ulint n_iterations) /* in: how many times this has been called
+ repeatedly without result: a high value means
+ that we should search farther; if value is
+ k < 10, then we only search k/10 * [number
+ of pages in the buffer pool] from the end
+ of the LRU list */
{
buf_block_t* block;
+ ulint distance = 0;
ibool freed;
mutex_enter(&(buf_pool->mutex));
@@ -152,6 +155,18 @@ buf_LRU_search_and_free_block(
}
block = UT_LIST_GET_PREV(LRU, block);
+ distance++;
+
+ if (!freed && n_iterations <= 10
+ && distance > 100 + (n_iterations * buf_pool->curr_size)
+ / 10) {
+
+ buf_pool->LRU_flush_ended = 0;
+
+ mutex_exit(&(buf_pool->mutex));
+
+ return(FALSE);
+ }
}
if (buf_pool->LRU_flush_ended > 0) {
@@ -186,7 +201,7 @@ buf_LRU_try_free_flushed_blocks(void)
mutex_exit(&(buf_pool->mutex));
- buf_LRU_search_and_free_block(0);
+ buf_LRU_search_and_free_block(1);
mutex_enter(&(buf_pool->mutex));
}
@@ -208,7 +223,7 @@ buf_LRU_get_free_block(void)
{
buf_block_t* block = NULL;
ibool freed;
- ulint n_iterations = 0;
+ ulint n_iterations = 1;
ibool mon_value_was = 0; /* remove bug */
ibool started_monitor = FALSE;
loop:
@@ -236,9 +251,12 @@ loop:
fprintf(stderr,
" InnoDB: WARNING: over 4 / 5 of the buffer pool is occupied by\n"
"InnoDB: lock heaps or the adaptive hash index! Check that your\n"
-"InnoDB: transactions do not set too many row locks. Starting InnoDB\n"
-"InnoDB: Monitor to print diagnostics, including lock heap and hash index\n"
-"InnoDB: sizes.\n");
+"InnoDB: transactions do not set too many row locks.\n"
+"InnoDB: Your buffer pool size is %lu MB. Maybe you should make\n"
+"InnoDB: the buffer pool bigger?\n"
+"InnoDB: Starting the InnoDB Monitor to print diagnostics, including\n"
+"InnoDB: lock heap and hash index sizes.\n",
+ buf_pool->curr_size / (1024 * 1024 / UNIV_PAGE_SIZE));
srv_print_innodb_monitor = TRUE;
@@ -251,14 +269,6 @@ loop:
srv_print_innodb_monitor = FALSE;
}
-
- if (buf_pool->LRU_flush_ended > 0) {
- mutex_exit(&(buf_pool->mutex));
-
- buf_LRU_try_free_flushed_blocks();
-
- mutex_enter(&(buf_pool->mutex));
- }
/* If there is a block in the free list, take it */
if (UT_LIST_GET_LEN(buf_pool->free) > 0) {
@@ -340,6 +350,20 @@ loop:
os_aio_simulated_wake_handler_threads();
+ mutex_enter(&(buf_pool->mutex));
+
+ if (buf_pool->LRU_flush_ended > 0) {
+ /* We have written pages in an LRU flush. To make the insert
+ buffer more efficient, we try to move these pages to the free
+ list. */
+
+ mutex_exit(&(buf_pool->mutex));
+
+ buf_LRU_try_free_flushed_blocks();
+ } else {
+ mutex_exit(&(buf_pool->mutex));
+ }
+
if (n_iterations > 10) {
os_thread_sleep(500000);
diff --git a/innobase/com/Makefile.am b/innobase/com/Makefile.am
index 27ae396bc6e..a3d2f8a76c6 100644
--- a/innobase/com/Makefile.am
+++ b/innobase/com/Makefile.am
@@ -17,7 +17,7 @@
include ../include/Makefile.i
-libs_LIBRARIES = libcom.a
+noinst_LIBRARIES = libcom.a
libcom_a_SOURCES = com0com.c com0shm.c
diff --git a/innobase/data/Makefile.am b/innobase/data/Makefile.am
index 0e502708e85..eeb6f129de0 100644
--- a/innobase/data/Makefile.am
+++ b/innobase/data/Makefile.am
@@ -17,7 +17,7 @@
include ../include/Makefile.i
-libs_LIBRARIES = libdata.a
+noinst_LIBRARIES = libdata.a
libdata_a_SOURCES = data0data.c data0type.c
diff --git a/innobase/dict/Makefile.am b/innobase/dict/Makefile.am
index 693048b6784..0034d2f8f1e 100644
--- a/innobase/dict/Makefile.am
+++ b/innobase/dict/Makefile.am
@@ -17,7 +17,7 @@
include ../include/Makefile.i
-libs_LIBRARIES = libdict.a
+noinst_LIBRARIES = libdict.a
libdict_a_SOURCES = dict0boot.c dict0crea.c dict0dict.c dict0load.c\
dict0mem.c
diff --git a/innobase/dict/dict0crea.c b/innobase/dict/dict0crea.c
index b0f84e5663a..3619ac02f4d 100644
--- a/innobase/dict/dict0crea.c
+++ b/innobase/dict/dict0crea.c
@@ -1173,6 +1173,7 @@ dict_create_add_foreigns_to_dictionary(
if (NULL == dict_table_get_low((char *) "SYS_FOREIGN")) {
fprintf(stderr,
"InnoDB: table SYS_FOREIGN not found from internal data dictionary\n");
+
return(DB_ERROR);
}
@@ -1259,6 +1260,13 @@ loop:
"InnoDB: at http://www.innodb.com/ibman.html\n");
}
+ mutex_enter(&dict_foreign_err_mutex);
+ ut_sprintf_timestamp(buf);
+ sprintf(buf + strlen(buf),
+" Internal error in foreign key constraint creation for table %.500s.\n"
+"See the MySQL .err log in the datadir for more information.\n", table->name);
+ mutex_exit(&dict_foreign_err_mutex);
+
return(error);
}
diff --git a/innobase/dict/dict0dict.c b/innobase/dict/dict0dict.c
index 9be10fe70d8..c11a5f76d94 100644
--- a/innobase/dict/dict0dict.c
+++ b/innobase/dict/dict0dict.c
@@ -185,6 +185,14 @@ dict_foreign_free(
/*==============*/
dict_foreign_t* foreign); /* in, own: foreign key struct */
+/* Buffers for storing detailed information about the latest foreign key
+and unique key errors */
+char* dict_foreign_err_buf = NULL;
+char* dict_unique_err_buf = NULL;
+mutex_t dict_foreign_err_mutex; /* mutex protecting the foreign
+ and unique error buffers */
+
+
/************************************************************************
Checks if the database name in two table names is the same. */
static
@@ -573,6 +581,13 @@ dict_init(void)
rw_lock_create(&dict_operation_lock);
rw_lock_set_level(&dict_operation_lock, SYNC_DICT_OPERATION);
+
+ dict_foreign_err_buf = mem_alloc(DICT_FOREIGN_ERR_BUF_LEN);
+ dict_foreign_err_buf[0] = '\0';
+ dict_unique_err_buf = mem_alloc(DICT_FOREIGN_ERR_BUF_LEN);
+ dict_unique_err_buf[0] = '\0';
+ mutex_create(&dict_foreign_err_mutex);
+ mutex_set_level(&dict_foreign_err_mutex, SYNC_ANY_LATCH);
}
/**************************************************************************
@@ -1818,6 +1833,7 @@ dict_foreign_add_to_cache(
dict_foreign_t* for_in_cache = NULL;
dict_index_t* index;
ibool added_to_referenced_list = FALSE;
+ char* buf = dict_foreign_err_buf;
ut_ad(mutex_own(&(dict_sys->mutex)));
@@ -1850,9 +1866,29 @@ dict_foreign_add_to_cache(
for_in_cache->foreign_index);
if (index == NULL) {
+ mutex_enter(&dict_foreign_err_mutex);
+ ut_sprintf_timestamp(buf);
+ sprintf(buf + strlen(buf),
+" Error in foreign key constraint of table %.500s:\n"
+"there is no index in referenced table which would contain\n"
+"the columns as the first columns, or the data types in the\n"
+"referenced table do not match to the ones in table. Constraint:\n",
+ for_in_cache->foreign_table_name);
+ dict_print_info_on_foreign_key_in_create_format(
+ for_in_cache, buf + strlen(buf));
+ if (for_in_cache->foreign_index) {
+ sprintf(buf + strlen(buf),
+"\nThe index in the foreign key in table is %.500s\n"
+"See http://www.innodb.com/ibman.html about correct foreign key definition.\n",
+ for_in_cache->foreign_index->name);
+ }
+ ut_a(strlen(buf) < DICT_FOREIGN_ERR_BUF_LEN);
+ mutex_exit(&dict_foreign_err_mutex);
+
if (for_in_cache == foreign) {
mem_heap_free(foreign->heap);
}
+
return(DB_CANNOT_ADD_CONSTRAINT);
}
@@ -1871,6 +1907,25 @@ dict_foreign_add_to_cache(
for_in_cache->referenced_index);
if (index == NULL) {
+ mutex_enter(&dict_foreign_err_mutex);
+ ut_sprintf_timestamp(buf);
+ sprintf(buf + strlen(buf),
+" Error in foreign key constraint of table %.500s:\n"
+"there is no index in the table which would contain\n"
+"the columns as the first columns, or the data types in the\n"
+"table do not match to the ones in the referenced table. Constraint:\n",
+ for_in_cache->foreign_table_name);
+ dict_print_info_on_foreign_key_in_create_format(
+ for_in_cache, buf + strlen(buf));
+ if (for_in_cache->foreign_index) {
+ sprintf(buf + strlen(buf),
+"\nIndex of the foreign key in the referenced table is %.500s\n"
+"See http://www.innodb.com/ibman.html about correct foreign key definition.\n",
+ for_in_cache->referenced_index->name);
+ }
+ ut_a(strlen(buf) < DICT_FOREIGN_ERR_BUF_LEN);
+ mutex_exit(&dict_foreign_err_mutex);
+
if (for_in_cache == foreign) {
if (added_to_referenced_list) {
UT_LIST_REMOVE(referenced_list,
@@ -2038,7 +2093,7 @@ dict_scan_col(
if (*ptr == '`') {
ptr++;
}
-
+
return(ptr);
}
@@ -2141,18 +2196,21 @@ dict_scan_table_name(
}
/*************************************************************************
-Skips one 'word', like an id. For the lexical definition of 'word', see the
-code below. */
+Scans an id. For the lexical definition of an 'id', see the code below.
+Strips backquotes from around the id. */
static
char*
-dict_skip_word(
-/*===========*/
+dict_scan_id(
+/*=========*/
/* out: scanned to */
char* ptr, /* in: scanned to */
- ibool* success)/* out: TRUE if success, FALSE if just spaces left in
- string */
+ char** start, /* out: start of the id; NULL if no id was
+ scannable */
+ ulint* len) /* out: length of the id */
{
- *success = FALSE;
+ ibool scanned_backquote = FALSE;
+
+ *start = NULL;
while (isspace(*ptr)) {
ptr++;
@@ -2164,24 +2222,61 @@ dict_skip_word(
}
if (*ptr == '`') {
+ scanned_backquote = TRUE;
ptr++;
}
- while (!isspace(*ptr) && *ptr != ',' && *ptr != '(' && *ptr != '`'
- && *ptr != '\0') {
+ *start = ptr;
+
+ while (!isspace(*ptr) && *ptr != ',' && *ptr != '(' && *ptr != ')'
+ && *ptr != '\0' && *ptr != '`') {
ptr++;
}
- *success = TRUE;
+ *len = (ulint) (ptr - *start);
+
+ if (scanned_backquote) {
+ if (*ptr == '`') {
+ ptr++;
+ } else {
+ /* Syntax error */
+ *start = NULL;
+ }
+ }
return(ptr);
}
/*************************************************************************
+Skips one id. */
+static
+char*
+dict_skip_word(
+/*===========*/
+ /* out: scanned to */
+ char* ptr, /* in: scanned to */
+ ibool* success)/* out: TRUE if success, FALSE if just spaces left in
+ string or a syntax error */
+{
+ char* start;
+ ulint len;
+
+ *success = FALSE;
+
+ ptr = dict_scan_id(ptr, &start, &len);
+
+ if (start) {
+ *success = TRUE;
+ }
+
+ return(ptr);
+}
+
+#ifdef currentlynotused
+/*************************************************************************
Returns the number of opening brackets '(' subtracted by the number
of closing brackets ')' between string and ptr. */
-#ifdef NOT_USED
static
int
dict_bracket_count(
@@ -2206,16 +2301,116 @@ dict_bracket_count(
return(count);
}
#endif
+
+/*************************************************************************
+Removes MySQL comments from an SQL string. A comment is either
+(a) '#' to the end of the line,
+(b) '--<space>' to the end of the line, or
+(c) '<slash><asterisk>' till the next '<asterisk><slash>' (like the familiar
+C comment syntax). */
+static
+char*
+dict_strip_comments(
+/*================*/
+ /* out, own: SQL string stripped from
+ comments; the caller must free this
+ with mem_free()! */
+ char* sql_string) /* in: SQL string */
+{
+ char* str;
+ char* sptr;
+ char* ptr;
+
+ str = mem_alloc(strlen(sql_string) + 1);
+
+ sptr = sql_string;
+ ptr = str;
+
+ for (;;) {
+ if (*sptr == '\0') {
+ *ptr = '\0';
+
+ return(str);
+ }
+
+ if (*sptr == '#'
+ || (strlen(sptr) >= 3 && 0 == memcmp("-- ", sptr, 3))) {
+ for (;;) {
+ /* In Unix a newline is 0x0D while in Windows
+ it is 0x0A followed by 0x0D */
+
+ if (*sptr == (char)0x0A
+ || *sptr == (char)0x0D
+ || *sptr == '\0') {
+
+ break;
+ }
+
+ sptr++;
+ }
+ }
+
+ if (strlen(sptr) >= 2 && *sptr == '/' && *(sptr + 1) == '*') {
+ for (;;) {
+ if (strlen(sptr) >= 2
+ && *sptr == '*' && *(sptr + 1) == '/') {
+
+ sptr += 2;
+
+ break;
+ }
+
+ if (*sptr == '\0') {
+
+ break;
+ }
+
+ sptr++;
+ }
+ }
+
+ *ptr = *sptr;
+
+ ptr++;
+ sptr++;
+ }
+}
+
+/*************************************************************************
+Reports a simple foreign key create clause syntax error. */
+static
+void
+dict_foreign_report_syntax_err(
+/*===========================*/
+ char* name, /* in: table name */
+ char* start_of_latest_foreign,/* in: start of the foreign key clause
+ in the SQL string */
+ char* ptr) /* in: place of the syntax error */
+{
+ char* buf = dict_foreign_err_buf;
+
+ mutex_enter(&dict_foreign_err_mutex);
+
+ ut_sprintf_timestamp(buf);
+
+ sprintf(buf + strlen(buf),
+" Error in foreign key constraint of table %.500s,\n%.500s.\n"
+"Syntax error close to:\n%.500s\n", name, start_of_latest_foreign, ptr);
+
+ ut_a(strlen(buf) < DICT_FOREIGN_ERR_BUF_LEN);
+ mutex_exit(&dict_foreign_err_mutex);
+}
+
/*************************************************************************
Scans a table create SQL string and adds to the data dictionary the foreign
key constraints declared in the string. This function should be called after
the indexes for a table have been created. Each foreign key constraint must
be accompanied with indexes in both participating tables. The indexes are
allowed to contain more fields than mentioned in the constraint. */
-
+static
ulint
-dict_create_foreign_constraints(
-/*============================*/
+dict_create_foreign_constraints_low(
+/*================================*/
/* out: error code or DB_SUCCESS */
trx_t* trx, /* in: transaction */
char* sql_string, /* in: table create or ALTER TABLE
@@ -2231,7 +2426,9 @@ dict_create_foreign_constraints(
dict_table_t* referenced_table;
dict_index_t* index;
dict_foreign_t* foreign;
- char* ptr = sql_string;
+ char* ptr = sql_string;
+ char* start_of_latest_foreign = sql_string;
+ char* buf = dict_foreign_err_buf;
ibool success;
ulint error;
ulint i;
@@ -2249,6 +2446,15 @@ dict_create_foreign_constraints(
table = dict_table_get_low(name);
if (table == NULL) {
+ mutex_enter(&dict_foreign_err_mutex);
+ ut_sprintf_timestamp(buf);
+ sprintf(buf + strlen(buf),
+" Error in foreign key constraint of table %.500s.\n"
+"Cannot find the table from the internal data dictionary of InnoDB.\n"
+"Create table statement:\n%.2000\n", name, sql_string);
+ ut_a(strlen(buf) < DICT_FOREIGN_ERR_BUF_LEN);
+ mutex_exit(&dict_foreign_err_mutex);
+
return(DB_ERROR);
}
loop:
@@ -2264,6 +2470,8 @@ loop:
return(error);
}
+ start_of_latest_foreign = ptr;
+
ptr = dict_accept(ptr, (char *) "FOREIGN", &success);
if (!isspace(*ptr)) {
@@ -2284,13 +2492,19 @@ loop:
ptr = dict_skip_word(ptr, &success);
if (!success) {
+ dict_foreign_report_syntax_err(name,
+ start_of_latest_foreign, ptr);
+
return(DB_CANNOT_ADD_CONSTRAINT);
}
ptr = dict_accept(ptr, (char *) "(", &success);
if (!success) {
- return(DB_CANNOT_ADD_CONSTRAINT);
+ /* We do not flag a syntax error here because in an
+ ALTER TABLE we may also have DROP FOREIGN KEY abc */
+
+ goto loop;
}
}
@@ -2301,6 +2515,15 @@ col_loop1:
ptr = dict_scan_col(ptr, &success, table, columns + i,
column_names + i, column_name_lens + i);
if (!success) {
+ mutex_enter(&dict_foreign_err_mutex);
+ ut_sprintf_timestamp(buf);
+ sprintf(buf + strlen(buf),
+" Error in foreign key constraint of table %.500s,\n%.500s.\n"
+"Cannot resolve column name close to:\n%.500s\n", name,
+ start_of_latest_foreign, ptr);
+ ut_a(strlen(buf) < DICT_FOREIGN_ERR_BUF_LEN);
+ mutex_exit(&dict_foreign_err_mutex);
+
return(DB_CANNOT_ADD_CONSTRAINT);
}
@@ -2315,6 +2538,8 @@ col_loop1:
ptr = dict_accept(ptr, (char *) ")", &success);
if (!success) {
+ dict_foreign_report_syntax_err(name, start_of_latest_foreign,
+ ptr);
return(DB_CANNOT_ADD_CONSTRAINT);
}
@@ -2324,12 +2549,24 @@ col_loop1:
index = dict_foreign_find_index(table, column_names, i, NULL);
if (!index) {
+ mutex_enter(&dict_foreign_err_mutex);
+ ut_sprintf_timestamp(buf);
+ sprintf(buf + strlen(buf),
+" Error in foreign key constraint of table %.500s:\n"
+"There is no index in the table %.500s where the columns appear\n"
+"as the first columns. Constraint:\n%.500s\n"
+"See http://www.innodb.com/ibman.html for correct foreign key definition.\n",
+ name, name, start_of_latest_foreign);
+ ut_a(strlen(buf) < DICT_FOREIGN_ERR_BUF_LEN);
+ mutex_exit(&dict_foreign_err_mutex);
+
return(DB_CANNOT_ADD_CONSTRAINT);
}
-
ptr = dict_accept(ptr, (char *) "REFERENCES", &success);
if (!success || !isspace(*ptr)) {
+ dict_foreign_report_syntax_err(name, start_of_latest_foreign,
+ ptr);
return(DB_CANNOT_ADD_CONSTRAINT);
}
@@ -2359,6 +2596,15 @@ col_loop1:
if (!success || (!referenced_table && trx->check_foreigns)) {
dict_foreign_free(foreign);
+ mutex_enter(&dict_foreign_err_mutex);
+ ut_sprintf_timestamp(buf);
+ sprintf(buf + strlen(buf),
+" Error in foreign key constraint of table %.500s,\n%.500s.\n"
+"Cannot resolve table name close to:\n"
+"%.500s\n", name, start_of_latest_foreign, ptr);
+ ut_a(strlen(buf) < DICT_FOREIGN_ERR_BUF_LEN);
+ mutex_exit(&dict_foreign_err_mutex);
+
return(DB_CANNOT_ADD_CONSTRAINT);
}
@@ -2366,7 +2612,8 @@ col_loop1:
if (!success) {
dict_foreign_free(foreign);
-
+ dict_foreign_report_syntax_err(name, start_of_latest_foreign,
+ ptr);
return(DB_CANNOT_ADD_CONSTRAINT);
}
@@ -2381,6 +2628,15 @@ col_loop2:
if (!success) {
dict_foreign_free(foreign);
+ mutex_enter(&dict_foreign_err_mutex);
+ ut_sprintf_timestamp(buf);
+ sprintf(buf + strlen(buf),
+" Error in foreign key constraint of table %.500s,\n%.500s\n"
+"Cannot resolve column name close to:\n"
+"%.500s\n", name, start_of_latest_foreign, ptr);
+ ut_a(strlen(buf) < DICT_FOREIGN_ERR_BUF_LEN);
+ mutex_exit(&dict_foreign_err_mutex);
+
return(DB_CANNOT_ADD_CONSTRAINT);
}
@@ -2395,6 +2651,8 @@ col_loop2:
if (!success || foreign->n_fields != i) {
dict_foreign_free(foreign);
+ dict_foreign_report_syntax_err(name, start_of_latest_foreign,
+ ptr);
return(DB_CANNOT_ADD_CONSTRAINT);
}
@@ -2417,9 +2675,10 @@ scan_on_conditions:
ptr = dict_accept(ptr, "UPDATE", &success);
if (!success) {
-
dict_foreign_free(foreign);
+ dict_foreign_report_syntax_err(name,
+ start_of_latest_foreign, ptr);
return(DB_CANNOT_ADD_CONSTRAINT);
}
@@ -2455,6 +2714,8 @@ scan_on_conditions:
if (!success) {
dict_foreign_free(foreign);
+ dict_foreign_report_syntax_err(name,
+ start_of_latest_foreign, ptr);
return(DB_CANNOT_ADD_CONSTRAINT);
}
@@ -2472,7 +2733,8 @@ scan_on_conditions:
if (!success) {
dict_foreign_free(foreign);
-
+ dict_foreign_report_syntax_err(name, start_of_latest_foreign,
+ ptr);
return(DB_CANNOT_ADD_CONSTRAINT);
}
@@ -2480,7 +2742,8 @@ scan_on_conditions:
if (!success) {
dict_foreign_free(foreign);
-
+ dict_foreign_report_syntax_err(name, start_of_latest_foreign,
+ ptr);
return(DB_CANNOT_ADD_CONSTRAINT);
}
@@ -2494,6 +2757,15 @@ scan_on_conditions:
dict_foreign_free(foreign);
+ mutex_enter(&dict_foreign_err_mutex);
+ ut_sprintf_timestamp(buf);
+ sprintf(buf + strlen(buf),
+" Error in foreign key constraint of table %.500s,\n%.500s.\n"
+"You have defined a SET NULL condition though some of the\n"
+"columns is defined as NOT NULL.\n", name, start_of_latest_foreign);
+ ut_a(strlen(buf) < DICT_FOREIGN_ERR_BUF_LEN);
+ mutex_exit(&dict_foreign_err_mutex);
+
return(DB_CANNOT_ADD_CONSTRAINT);
}
}
@@ -2512,6 +2784,15 @@ try_find_index:
dict_foreign_free(foreign);
+ mutex_enter(&dict_foreign_err_mutex);
+ ut_sprintf_timestamp(buf);
+ sprintf(buf + strlen(buf),
+" Error in foreign key constraint of table %.500s,\n%.500s.\n"
+"You have twice an ON DELETE clause or twice an ON UPDATE clause.\n",
+ name, start_of_latest_foreign);
+ ut_a(strlen(buf) < DICT_FOREIGN_ERR_BUF_LEN);
+ mutex_exit(&dict_foreign_err_mutex);
+
return(DB_CANNOT_ADD_CONSTRAINT);
}
@@ -2525,6 +2806,18 @@ try_find_index:
foreign->foreign_index);
if (!index) {
dict_foreign_free(foreign);
+ mutex_enter(&dict_foreign_err_mutex);
+ ut_sprintf_timestamp(buf);
+ sprintf(buf + strlen(buf),
+" Error in foreign key constraint of table %.500s:\n"
+"Cannot find an index in the referenced table where the\n"
+"referenced columns appear as the first columns, or column types\n"
+"in the table and the referenced table do not match for constraint:\n%.500s\n"
+"See http://www.innodb.com/ibman.html for correct foreign key definition.\n",
+ name, start_of_latest_foreign);
+ ut_a(strlen(buf) < DICT_FOREIGN_ERR_BUF_LEN);
+ mutex_exit(&dict_foreign_err_mutex);
+
return(DB_CANNOT_ADD_CONSTRAINT);
}
} else {
@@ -2565,6 +2858,165 @@ try_find_index:
goto loop;
}
+/*************************************************************************
+Scans a table create SQL string and adds to the data dictionary the foreign
+key constraints declared in the string. This function should be called after
+the indexes for a table have been created. Each foreign key constraint must
+be accompanied with indexes in both participating tables. The indexes are
+allowed to contain more fields than mentioned in the constraint. */
+
+ulint
+dict_create_foreign_constraints(
+/*============================*/
+ /* out: error code or DB_SUCCESS */
+ trx_t* trx, /* in: transaction */
+ char* sql_string, /* in: table create or ALTER TABLE
+ statement where foreign keys are declared like:
+ FOREIGN KEY (a, b) REFERENCES table2(c, d),
+ table2 can be written also with the database
+ name before it: test.table2; the default
+ database id the database of parameter name */
+ char* name) /* in: table full name in the normalized form
+ database_name/table_name */
+{
+ char* str;
+ ulint err;
+
+ str = dict_strip_comments(sql_string);
+
+ err = dict_create_foreign_constraints_low(trx, str, name);
+
+ mem_free(str);
+
+ return(err);
+}
+
+/**************************************************************************
+Parses the CONSTRAINT id's to be dropped in an ALTER TABLE statement. */
+
+ulint
+dict_foreign_parse_drop_constraints(
+/*================================*/
+ /* out: DB_SUCCESS or
+ DB_CANNOT_DROP_CONSTRAINT if
+ syntax error or the constraint
+ id does not match */
+ mem_heap_t* heap, /* in: heap from which we can
+ allocate memory */
+ trx_t* trx, /* in: transaction */
+ dict_table_t* table, /* in: table */
+ ulint* n, /* out: number of constraints
+ to drop */
+ char*** constraints_to_drop) /* out: id's of the
+ constraints to drop */
+{
+ dict_foreign_t* foreign;
+ ibool success;
+ char* str;
+ char* ptr;
+ char* buf = dict_foreign_err_buf;
+ char* start;
+ char* id;
+ ulint len;
+
+ *n = 0;
+
+ *constraints_to_drop = mem_heap_alloc(heap, 1000 * sizeof(char*));
+
+ str = dict_strip_comments(*(trx->mysql_query_str));
+ ptr = str;
+
+ ut_ad(mutex_own(&(dict_sys->mutex)));
+loop:
+ ptr = dict_scan_to(ptr, (char *) "DROP");
+
+ if (*ptr == '\0') {
+ ut_a(*n < 1000);
+
+ mem_free(str);
+
+ return(DB_SUCCESS);
+ }
+
+ ptr = dict_accept(ptr, (char *) "DROP", &success);
+
+ if (!isspace(*ptr)) {
+
+ goto loop;
+ }
+
+ ptr = dict_accept(ptr, (char *) "FOREIGN", &success);
+
+ if (!success) {
+
+ goto loop;
+ }
+
+ ptr = dict_accept(ptr, (char *) "KEY", &success);
+
+ if (!success) {
+
+ goto syntax_error;
+ }
+
+ ptr = dict_scan_id(ptr, &start, &len);
+
+ if (start == NULL) {
+
+ goto syntax_error;
+ }
+
+ id = mem_heap_alloc(heap, len + 1);
+ ut_memcpy(id, start, len);
+ id[len] = '\0';
+ (*constraints_to_drop)[*n] = id;
+ (*n)++;
+
+ /* Look for the given constraint id */
+
+ foreign = UT_LIST_GET_FIRST(table->foreign_list);
+
+ while (foreign != NULL) {
+ if (0 == ut_strcmp(foreign->id, id)) {
+
+ /* Found */
+ break;
+ }
+
+ foreign = UT_LIST_GET_NEXT(foreign_list, foreign);
+ }
+
+ if (foreign == NULL) {
+ mutex_enter(&dict_foreign_err_mutex);
+ ut_sprintf_timestamp(buf);
+ sprintf(buf + strlen(buf),
+" Error in dropping of a foreign key constraint of table %.500s,\n"
+"just before:\n%s\n in SQL command\n%s\nCannot find a constraint with the\n"
+"given id %s.\n", table->name, ptr, str, id);
+ ut_a(strlen(buf) < DICT_FOREIGN_ERR_BUF_LEN);
+ mutex_exit(&dict_foreign_err_mutex);
+
+ mem_free(str);
+
+ return(DB_CANNOT_DROP_CONSTRAINT);
+ }
+
+ goto loop;
+
+syntax_error:
+ mutex_enter(&dict_foreign_err_mutex);
+ ut_sprintf_timestamp(buf);
+ sprintf(buf + strlen(buf),
+" Syntax error in dropping of a foreign key constraint of table %.500s,\n"
+"close to:\n%s\n in SQL command\n%s\n", table->name, ptr, str);
+ ut_a(strlen(buf) < DICT_FOREIGN_ERR_BUF_LEN);
+ mutex_exit(&dict_foreign_err_mutex);
+
+ mem_free(str);
+
+ return(DB_CANNOT_DROP_CONSTRAINT);
+}
+
/*==================== END OF FOREIGN KEY PROCESSING ====================*/
/**************************************************************************
@@ -3286,7 +3738,6 @@ dict_index_print_low(
n_vals = index->stat_n_diff_key_vals[1];
}
-
printf(
" INDEX: name %s, table name %s, id %lu %lu, fields %lu/%lu, type %lu\n",
index->name, index->table_name,
@@ -3328,6 +3779,99 @@ dict_field_print_low(
}
/**************************************************************************
+Sprintfs to a string info on a foreign key of a table in a format suitable
+for CREATE TABLE. */
+
+char*
+dict_print_info_on_foreign_key_in_create_format(
+/*============================================*/
+ /* out: how far in buf we printed */
+ dict_foreign_t* foreign,/* in: foreign key constraint */
+ char* buf) /* in: buffer of at least 5000 bytes */
+{
+ char* buf2 = buf;
+ ulint i;
+
+ buf2 += sprintf(buf2, ",\n CONSTRAINT `%s` FOREIGN KEY (",
+ foreign->id);
+ for (i = 0; i < foreign->n_fields; i++) {
+ if ((ulint)(buf2 - buf) >= 4000) {
+
+ goto no_space;
+ }
+ buf2 += sprintf(buf2, "`%.250s`",
+ foreign->foreign_col_names[i]);
+
+ if (i + 1 < foreign->n_fields) {
+ buf2 += sprintf(buf2, ", ");
+ }
+ }
+
+ if (dict_tables_have_same_db(foreign->foreign_table_name,
+ foreign->referenced_table_name)) {
+ /* Do not print the database name of the referenced
+ table */
+ buf2 += sprintf(buf2, ") REFERENCES `%.500s` (",
+ dict_remove_db_name(
+ foreign->referenced_table_name));
+ } else {
+ buf2 += sprintf(buf2, ") REFERENCES `%.500s` (",
+ foreign->referenced_table_name);
+ /* Change the '/' in the table name to '.' */
+
+ for (i = ut_strlen(buf); i > 0; i--) {
+ if (buf[i] == '/') {
+
+ buf[i] = '.';
+
+ break;
+ }
+ }
+ }
+
+ for (i = 0; i < foreign->n_fields; i++) {
+ if ((ulint)(buf2 - buf) >= 4000) {
+
+ goto no_space;
+ }
+ buf2 += sprintf(buf2, "`%.250s`",
+ foreign->referenced_col_names[i]);
+ if (i + 1 < foreign->n_fields) {
+ buf2 += sprintf(buf2, ", ");
+ }
+ }
+
+ buf2 += sprintf(buf2, ")");
+
+ if (foreign->type & DICT_FOREIGN_ON_DELETE_CASCADE) {
+ buf2 += sprintf(buf2, " ON DELETE CASCADE");
+ }
+
+ if (foreign->type & DICT_FOREIGN_ON_DELETE_SET_NULL) {
+ buf2 += sprintf(buf2, " ON DELETE SET NULL");
+ }
+
+ if (foreign->type & DICT_FOREIGN_ON_DELETE_NO_ACTION) {
+ buf2 += sprintf(buf2, " ON DELETE NO ACTION");
+ }
+
+ if (foreign->type & DICT_FOREIGN_ON_UPDATE_CASCADE) {
+ buf2 += sprintf(buf2, " ON UPDATE CASCADE");
+ }
+
+ if (foreign->type & DICT_FOREIGN_ON_UPDATE_SET_NULL) {
+ buf2 += sprintf(buf2, " ON UPDATE SET NULL");
+ }
+
+ if (foreign->type & DICT_FOREIGN_ON_UPDATE_NO_ACTION) {
+ buf2 += sprintf(buf2, " ON UPDATE NO ACTION");
+ }
+
+no_space:
+ return(buf2);
+}
+
+/**************************************************************************
Sprintfs to a string info on foreign keys of a table in a format suitable
for CREATE TABLE. */
static
@@ -3336,13 +3880,12 @@ dict_print_info_on_foreign_keys_in_create_format(
/*=============================================*/
char* buf, /* in: auxiliary buffer */
char* str, /* in/out: pointer to a string */
- ulint len, /* in: str has to be a buffer at least
- len + 5000 bytes */
+ ulint len, /* in: buf has to be a buffer of at least
+ len + 5000 bytes; str must have at least
+ len + 1 bytes */
dict_table_t* table) /* in: table */
{
-
dict_foreign_t* foreign;
- ulint i;
char* buf2;
buf2 = buf;
@@ -3358,78 +3901,12 @@ dict_print_info_on_foreign_keys_in_create_format(
}
while (foreign != NULL) {
- buf2 += sprintf(buf2, ",\n FOREIGN KEY (");
-
- for (i = 0; i < foreign->n_fields; i++) {
- if ((ulint)(buf2 - buf) >= len) {
- goto no_space;
- }
- buf2 += sprintf(buf2, "`%s`",
- foreign->foreign_col_names[i]);
-
- if (i + 1 < foreign->n_fields) {
- buf2 += sprintf(buf2, ", ");
- }
- }
-
- if (dict_tables_have_same_db(table->name,
- foreign->referenced_table_name)) {
- /* Do not print the database name of the referenced
- table */
- buf2 += sprintf(buf2, ") REFERENCES `%s` (",
- dict_remove_db_name(
- foreign->referenced_table_name));
- } else {
- buf2 += sprintf(buf2, ") REFERENCES `%s` (",
- foreign->referenced_table_name);
- /* Change the '/' in the table name to '.' */
-
- for (i = ut_strlen(buf); i > 0; i--) {
- if (buf[i] == '/') {
+ if ((ulint)(buf2 - buf) >= len) {
+ goto no_space;
+ }
- buf[i] = '.';
-
- break;
- }
- }
- }
-
- for (i = 0; i < foreign->n_fields; i++) {
- if ((ulint)(buf2 - buf) >= len) {
- goto no_space;
- }
- buf2 += sprintf(buf2, "`%s`",
- foreign->referenced_col_names[i]);
- if (i + 1 < foreign->n_fields) {
- buf2 += sprintf(buf2, ", ");
- }
- }
-
- buf2 += sprintf(buf2, ")");
-
- if (foreign->type & DICT_FOREIGN_ON_DELETE_CASCADE) {
- buf2 += sprintf(buf2, " ON DELETE CASCADE");
- }
-
- if (foreign->type & DICT_FOREIGN_ON_DELETE_SET_NULL) {
- buf2 += sprintf(buf2, " ON DELETE SET NULL");
- }
-
- if (foreign->type & DICT_FOREIGN_ON_DELETE_NO_ACTION) {
- buf2 += sprintf(buf2, " ON DELETE NO ACTION");
- }
-
- if (foreign->type & DICT_FOREIGN_ON_UPDATE_CASCADE) {
- buf2 += sprintf(buf2, " ON UPDATE CASCADE");
- }
-
- if (foreign->type & DICT_FOREIGN_ON_UPDATE_SET_NULL) {
- buf2 += sprintf(buf2, " ON UPDATE SET NULL");
- }
-
- if (foreign->type & DICT_FOREIGN_ON_UPDATE_NO_ACTION) {
- buf2 += sprintf(buf2, " ON UPDATE NO ACTION");
- }
+ buf2 = dict_print_info_on_foreign_key_in_create_format(
+ foreign, buf2);
foreign = UT_LIST_GET_NEXT(foreign_list, foreign);
}
@@ -3490,7 +3967,7 @@ dict_print_info_on_foreign_keys(
goto no_space;
}
- buf2 += sprintf(buf2, "%s",
+ buf2 += sprintf(buf2, "%.500s",
foreign->foreign_col_names[i]);
if (i + 1 < foreign->n_fields) {
@@ -3498,14 +3975,14 @@ dict_print_info_on_foreign_keys(
}
}
- buf2 += sprintf(buf2, ") REFER %s(",
+ buf2 += sprintf(buf2, ") REFER %.500s(",
foreign->referenced_table_name);
for (i = 0; i < foreign->n_fields; i++) {
if ((ulint)(buf2 - buf) >= len) {
goto no_space;
}
- buf2 += sprintf(buf2, "%s",
+ buf2 += sprintf(buf2, "%.500s",
foreign->referenced_col_names[i]);
if (i + 1 < foreign->n_fields) {
buf2 += sprintf(buf2, " ");
diff --git a/innobase/dict/dict0load.c b/innobase/dict/dict0load.c
index 1070a8f5426..999eb55bb20 100644
--- a/innobase/dict/dict0load.c
+++ b/innobase/dict/dict0load.c
@@ -456,7 +456,7 @@ dict_load_indexes(
ut_ad(len == 8);
id = mach_read_from_8(field);
- ut_a(0 == ut_strcmp((void*) "NAME",
+ ut_a(0 == ut_strcmp((char*) "NAME",
dict_field_get_col(
dict_index_get_nth_field(
dict_table_get_first_index(sys_indexes), 4))->name));
@@ -515,7 +515,7 @@ dict_load_indexes(
&& ((type & DICT_CLUSTERED)
|| ((table == dict_sys->sys_tables)
&& (name_len == ut_strlen("ID_IND"))
- && (0 == ut_memcmp(name_buf, (void*) "ID_IND",
+ && (0 == ut_memcmp(name_buf, (char*) "ID_IND",
name_len))))) {
/* The index was created in memory already in
@@ -566,6 +566,7 @@ dict_load_table(
char* buf;
ulint space;
ulint n_cols;
+ ulint err;
mtr_t mtr;
ut_ad(mutex_own(&(dict_sys->mutex)));
@@ -674,8 +675,25 @@ dict_load_table(
dict_load_indexes(table, heap);
- ut_a(DB_SUCCESS == dict_load_foreigns(table->name));
+ err = dict_load_foreigns(table->name);
+/*
+ if (err != DB_SUCCESS) {
+
+ mutex_enter(&dict_foreign_err_mutex);
+ ut_print_timestamp(stderr);
+
+ fprintf(stderr,
+" InnoDB: Error: could not make a foreign key definition to match\n"
+"InnoDB: the foreign key table or the referenced table!\n"
+"InnoDB: The data dictionary of InnoDB is corrupt. You may need to drop\n"
+"InnoDB: and recreate the foreign key table or the referenced table.\n"
+"InnoDB: Send a detailed bug report to mysql@lists.mysql.com\n"
+"InnoDB: Latest foreign key error printout:\n%s\n", dict_foreign_err_buf);
+
+ mutex_exit(&dict_foreign_err_mutex);
+ }
+*/
mem_heap_free(heap);
return(table);
@@ -978,8 +996,8 @@ dict_load_foreign(
field = rec_get_nth_field(rec, 4, &len);
- foreign->referenced_table_name = mem_heap_alloc(foreign->heap, 1 + len);
-
+ foreign->referenced_table_name = mem_heap_alloc(foreign->heap,
+ 1 + len);
ut_memcpy(foreign->referenced_table_name, field, len);
foreign->referenced_table_name[len] = '\0';
@@ -988,10 +1006,19 @@ dict_load_foreign(
dict_load_foreign_cols(id, foreign);
+ /* If the foreign table is not yet in the dictionary cache, we
+ have to load it so that we are able to make type comparisons
+ in the next function call. */
+
+ dict_table_get_low(foreign->foreign_table_name);
+
/* Note that there may already be a foreign constraint object in
the dictionary cache for this constraint: then the following
call only sets the pointers in it to point to the appropriate table
- and index objects and frees the newly created object foreign. */
+ and index objects and frees the newly created object foreign.
+ Adding to the cache should always succeed since we are not creating
+ a new foreign key constraint but loading one from the data
+ dictionary. */
err = dict_foreign_add_to_cache(foreign);
diff --git a/innobase/dyn/Makefile.am b/innobase/dyn/Makefile.am
index 79c0000868c..ec33a3c18a9 100644
--- a/innobase/dyn/Makefile.am
+++ b/innobase/dyn/Makefile.am
@@ -17,7 +17,7 @@
include ../include/Makefile.i
-libs_LIBRARIES = libdyn.a
+noinst_LIBRARIES = libdyn.a
libdyn_a_SOURCES = dyn0dyn.c
diff --git a/innobase/eval/Makefile.am b/innobase/eval/Makefile.am
index 5dd0eab4c9b..aebffb91be3 100644
--- a/innobase/eval/Makefile.am
+++ b/innobase/eval/Makefile.am
@@ -17,7 +17,7 @@
include ../include/Makefile.i
-libs_LIBRARIES = libeval.a
+noinst_LIBRARIES = libeval.a
libeval_a_SOURCES = eval0eval.c eval0proc.c
diff --git a/innobase/fil/Makefile.am b/innobase/fil/Makefile.am
index a9473fdb762..dc0baff7d1a 100644
--- a/innobase/fil/Makefile.am
+++ b/innobase/fil/Makefile.am
@@ -17,7 +17,7 @@
include ../include/Makefile.i
-libs_LIBRARIES = libfil.a
+noinst_LIBRARIES = libfil.a
libfil_a_SOURCES = fil0fil.c
diff --git a/innobase/fsp/Makefile.am b/innobase/fsp/Makefile.am
index b3e9ab44d9b..edf06bda0d6 100644
--- a/innobase/fsp/Makefile.am
+++ b/innobase/fsp/Makefile.am
@@ -18,7 +18,7 @@
include ../include/Makefile.i
-libs_LIBRARIES = libfsp.a
+noinst_LIBRARIES = libfsp.a
libfsp_a_SOURCES = fsp0fsp.c
diff --git a/innobase/fut/Makefile.am b/innobase/fut/Makefile.am
index a4b1e30e03c..839fdb1580e 100644
--- a/innobase/fut/Makefile.am
+++ b/innobase/fut/Makefile.am
@@ -17,7 +17,7 @@
include ../include/Makefile.i
-libs_LIBRARIES = libfut.a
+noinst_LIBRARIES = libfut.a
libfut_a_SOURCES = fut0fut.c fut0lst.c
diff --git a/innobase/ha/Makefile.am b/innobase/ha/Makefile.am
index ce846d37622..121bafe167d 100644
--- a/innobase/ha/Makefile.am
+++ b/innobase/ha/Makefile.am
@@ -17,7 +17,7 @@
include ../include/Makefile.i
-libs_LIBRARIES = libha.a
+noinst_LIBRARIES = libha.a
libha_a_SOURCES = ha0ha.c hash0hash.c
diff --git a/innobase/ibuf/Makefile.am b/innobase/ibuf/Makefile.am
index 1c1d196c40c..fb813d38ee5 100644
--- a/innobase/ibuf/Makefile.am
+++ b/innobase/ibuf/Makefile.am
@@ -17,7 +17,7 @@
include ../include/Makefile.i
-libs_LIBRARIES = libibuf.a
+noinst_LIBRARIES = libibuf.a
libibuf_a_SOURCES = ibuf0ibuf.c
diff --git a/innobase/ibuf/ibuf0ibuf.c b/innobase/ibuf/ibuf0ibuf.c
index 5cd066afc27..187afa17047 100644
--- a/innobase/ibuf/ibuf0ibuf.c
+++ b/innobase/ibuf/ibuf0ibuf.c
@@ -2391,7 +2391,7 @@ ibuf_delete_rec(
ut_ad(ibuf_inside());
- success = btr_cur_optimistic_delete(btr_pcur_get_btr_cur(pcur), mtr);
+ success = btr_cur_optimistic_delete(btr_pcur_get_btr_cur(pcur), mtr);
if (success) {
#ifdef UNIV_IBUF_DEBUG
@@ -2401,7 +2401,7 @@ ibuf_delete_rec(
return(FALSE);
}
- /* We have to resort to a pessimistic delete from ibuf */
+ /* We have to resort to a pessimistic delete from ibuf */
btr_pcur_store_position(pcur, mtr);
btr_pcur_commit_specify_mtr(pcur, mtr);
@@ -2420,17 +2420,22 @@ ibuf_delete_rec(
fprintf(stderr, "InnoDB: ibuf cursor restoration fails!\n");
fprintf(stderr, "InnoDB: ibuf record inserted to page %lu\n",
page_no);
+ fflush(stderr);
+
rec_print(btr_pcur_get_rec(pcur));
rec_print(pcur->old_rec);
dtuple_print(search_tuple);
rec_print(page_rec_get_next(btr_pcur_get_rec(pcur)));
+ fflush(stdout);
mtr_commit(mtr);
fprintf(stderr, "InnoDB: Validating insert buffer tree:\n");
- ut_a(btr_validate_tree(ibuf_data->index->tree));
- fprintf(stderr, "InnoDB: Ibuf tree ok\n");
+ ut_a(btr_validate_tree(ibuf_data->index->tree));
+
+ fprintf(stderr, "InnoDB: ibuf tree ok\n");
+ fflush(stderr);
}
ut_a(success);
diff --git a/innobase/include/Makefile.i b/innobase/include/Makefile.i
index 985ec525950..f3e3fbe989e 100644
--- a/innobase/include/Makefile.i
+++ b/innobase/include/Makefile.i
@@ -1,7 +1,5 @@
# Makefile included in Makefile.am in every subdirectory
-libsdir = ../libs
-
INCLUDES = -I$(srcdir)/../include -I$(srcdir)/../../include -I../../include
# Don't update the files from bitkeeper
diff --git a/innobase/include/buf0buf.h b/innobase/include/buf0buf.h
index 81eeb7fced8..b613d60ebf7 100644
--- a/innobase/include/buf0buf.h
+++ b/innobase/include/buf0buf.h
@@ -496,6 +496,13 @@ buf_print_io(
/*=========*/
char* buf, /* in/out: buffer where to print */
char* buf_end);/* in: buffer end */
+/*************************************************************************
+Returns the ratio in percents of modified pages in the buffer pool /
+database pages in the buffer pool. */
+
+ulint
+buf_get_modified_ratio_pct(void);
+/*============================*/
/**************************************************************************
Refreshes the statistics used to print per-second averages. */
diff --git a/innobase/include/buf0lru.h b/innobase/include/buf0lru.h
index 6a3c948507d..5c995b259bf 100644
--- a/innobase/include/buf0lru.h
+++ b/innobase/include/buf0lru.h
@@ -46,6 +46,20 @@ buf_LRU_get_recent_limit(void);
/*==========================*/
/* out: the limit; zero if could not determine it */
/**********************************************************************
+Look for a replaceable block from the end of the LRU list and put it to
+the free list if found. */
+
+ibool
+buf_LRU_search_and_free_block(
+/*==========================*/
+ /* out: TRUE if freed */
+ ulint n_iterations); /* in: how many times this has been called
+ repeatedly without result: a high value means
+ that we should search farther; if value is
+ k < 10, then we only search k/10 * number
+ of pages in the buffer pool from the end
+ of the LRU list */
+/**********************************************************************
Returns a free block from the buf_pool. The block is taken off the
free list. If it is empty, blocks are moved from the end of the
LRU list to the free list. */
@@ -88,17 +102,6 @@ void
buf_LRU_make_block_old(
/*===================*/
buf_block_t* block); /* in: control block */
-/**********************************************************************
-Look for a replaceable block from the end of the LRU list and put it to
-the free list if found. */
-
-ibool
-buf_LRU_search_and_free_block(
-/*==========================*/
- /* out: TRUE if freed */
- ulint n_iterations); /* in: how many times this has been called
- repeatedly without result: a high value
- means that we should search farther */
/**************************************************************************
Validates the LRU list. */
diff --git a/innobase/include/db0err.h b/innobase/include/db0err.h
index c67c09bad27..ab7d0caa35c 100644
--- a/innobase/include/db0err.h
+++ b/innobase/include/db0err.h
@@ -44,6 +44,8 @@ Created 5/24/1996 Heikki Tuuri
#define DB_CORRUPTION 39 /* data structure corruption noticed */
#define DB_COL_APPEARS_TWICE_IN_INDEX 40 /* InnoDB cannot handle an index
where same column appears twice */
+#define DB_CANNOT_DROP_CONSTRAINT 40 /* dropping a foreign key constraint
+ from a table failed */
/* The following are partial failure codes */
#define DB_FAIL 1000
diff --git a/innobase/include/dict0dict.h b/innobase/include/dict0dict.h
index b5e6e04a1de..97486a7c2f6 100644
--- a/innobase/include/dict0dict.h
+++ b/innobase/include/dict0dict.h
@@ -219,6 +219,24 @@ dict_create_foreign_constraints(
char* name); /* in: table full name in the normalized form
database_name/table_name */
/**************************************************************************
+Parses the CONSTRAINT id's to be dropped in an ALTER TABLE statement. */
+
+ulint
+dict_foreign_parse_drop_constraints(
+/*================================*/
+ /* out: DB_SUCCESS or
+ DB_CANNOT_DROP_CONSTRAINT if
+ syntax error or the constraint
+ id does not match */
+ mem_heap_t* heap, /* in: heap from which we can
+ allocate memory */
+ trx_t* trx, /* in: transaction */
+ dict_table_t* table, /* in: table */
+ ulint* n, /* out: number of constraints
+ to drop */
+ char*** constraints_to_drop); /* out: id's of the
+ constraints to drop */
+/**************************************************************************
Returns a table object and memoryfixes it. NOTE! This is a high-level
function to be used mainly from outside the 'dict' directory. Inside this
directory dict_table_get_low is usually the appropriate function. */
@@ -333,6 +351,16 @@ dict_print_info_on_foreign_keys(
char* str, /* in/out: pointer to a string */
ulint len, /* in: space in str available for info */
dict_table_t* table); /* in: table */
+/**************************************************************************
+Sprintfs to a string info on a foreign key of a table in a format suitable
+for CREATE TABLE. */
+
+char*
+dict_print_info_on_foreign_key_in_create_format(
+/*============================================*/
+ /* out: how far in buf we printed */
+ dict_foreign_t* foreign,/* in: foreign key constraint */
+ char* buf); /* in: buffer of at least 5000 bytes */
/************************************************************************
Gets the first index on the table (the clustered index). */
UNIV_INLINE
@@ -808,6 +836,14 @@ void
dict_mutex_exit_for_mysql(void);
/*===========================*/
+/* The following len must be at least 10000 bytes! */
+#define DICT_FOREIGN_ERR_BUF_LEN 10000
+
+/* Buffers for storing detailed information about the latest foreign key
+and unique key errors */
+extern char* dict_foreign_err_buf;
+extern char* dict_unique_err_buf;
+extern mutex_t dict_foreign_err_mutex; /* mutex protecting the buffers */
extern dict_sys_t* dict_sys; /* the dictionary system */
extern rw_lock_t dict_operation_lock;
diff --git a/innobase/include/log0log.h b/innobase/include/log0log.h
index f200371de9d..4e1404b15fe 100644
--- a/innobase/include/log0log.h
+++ b/innobase/include/log0log.h
@@ -20,7 +20,7 @@ typedef struct log_group_struct log_group_t;
extern ibool log_do_write;
extern ibool log_debug_writes;
-/* Wait modes for log_flush_up_to */
+/* Wait modes for log_write_up_to */
#define LOG_NO_WAIT 91
#define LOG_WAIT_ONE_GROUP 92
#define LOG_WAIT_ALL_GROUPS 93
@@ -157,26 +157,21 @@ log_io_complete(
/*============*/
log_group_t* group); /* in: log group */
/**********************************************************
-Flushes the log files to the disk, using, for example, the Unix fsync.
-This function does the flush even if the user has set
-srv_flush_log_at_trx_commit = FALSE. */
-
-void
-log_flush_to_disk(void);
-/*===================*/
-/**********************************************************
This function is called, e.g., when a transaction wants to commit. It checks
-that the log has been flushed to disk up to the last log entry written by the
-transaction. If there is a flush running, it waits and checks if the flush
-flushed enough. If not, starts a new flush. */
+that the log has been written to the log file up to the last log entry written
+by the transaction. If there is a flush running, it waits and checks if the
+flush flushed enough. If not, starts a new flush. */
void
-log_flush_up_to(
+log_write_up_to(
/*============*/
dulint lsn, /* in: log sequence number up to which the log should
- be flushed, ut_dulint_max if not specified */
- ulint wait); /* in: LOG_NO_WAIT, LOG_WAIT_ONE_GROUP,
+ be written, ut_dulint_max if not specified */
+ ulint wait, /* in: LOG_NO_WAIT, LOG_WAIT_ONE_GROUP,
or LOG_WAIT_ALL_GROUPS */
+ ibool flush_to_disk);
+ /* in: TRUE if we want the written log also to be
+ flushed to disk */
/********************************************************************
Advances the smallest lsn for which there are unflushed dirty blocks in the
buffer pool and also may make a new checkpoint. NOTE: this function may only
@@ -741,27 +736,37 @@ struct log_struct{
be advanced, it is enough that the
write i/o has been completed for all
log groups */
- dulint flush_lsn; /* end lsn for the current flush */
- ulint flush_end_offset;/* the data in buffer has been flushed
+ dulint write_lsn; /* end lsn for the current running
+ write */
+ ulint write_end_offset;/* the data in buffer has been written
up to this offset when the current
- flush ends: this field will then
+ write ends: this field will then
be copied to buf_next_to_write */
- ulint n_pending_writes;/* number of currently pending flush
- writes */
+ dulint current_flush_lsn;/* end lsn for the current running
+ write + flush operation */
+ dulint flushed_to_disk_lsn;
+ /* how far we have written the log
+ AND flushed to disk */
+ ulint n_pending_writes;/* number of currently pending flushes
+ or writes */
+ /* NOTE on the 'flush' in names of the fields below: starting from
+ 4.0.14, we separate the write of the log file and the actual fsync()
+ or other method to flush it to disk. The names below shhould really
+ be 'flush_or_write'! */
os_event_t no_flush_event; /* this event is in the reset state
- when a flush is running; a thread
- should wait for this without owning
- the log mutex, but NOTE that to set or
- reset this event, the thread MUST own
- the log mutex! */
+ when a flush or a write is running;
+ a thread should wait for this without
+ owning the log mutex, but NOTE that
+ to set or reset this event, the
+ thread MUST own the log mutex! */
ibool one_flushed; /* during a flush, this is first FALSE
and becomes TRUE when one log group
- has been flushed */
+ has been written or flushed */
os_event_t one_flushed_event;/* this event is reset when the
- flush has not yet completed for any
- log group; e.g., this means that a
- transaction has been committed when
- this is set; a thread should wait
+ flush or write has not yet completed
+ for any log group; e.g., this means
+ that a transaction has been committed
+ when this is set; a thread should wait
for this without owning the log mutex,
but NOTE that to set or reset this
event, the thread MUST own the log
diff --git a/innobase/include/row0ins.h b/innobase/include/row0ins.h
index cc3b9fa7e9a..a5b4b74e7fc 100644
--- a/innobase/include/row0ins.h
+++ b/innobase/include/row0ins.h
@@ -35,7 +35,6 @@ row_ins_check_foreign_constraint(
dictionary cache if they exist at all */
dict_table_t* table, /* in: if check_ref is TRUE, then the foreign
table, else the referenced table */
- dict_index_t* index, /* in: index in table */
dtuple_t* entry, /* in: index entry for index */
que_thr_t* thr); /* in: query thread */
/*************************************************************************
diff --git a/innobase/include/row0mysql.h b/innobase/include/row0mysql.h
index 972fabc74cf..1964f53dabb 100644
--- a/innobase/include/row0mysql.h
+++ b/innobase/include/row0mysql.h
@@ -427,13 +427,21 @@ struct row_prebuilt_struct {
index where the ordering column is
the row id: in this case this flag
is set to TRUE */
- dict_index_t* index; /* current index for a search, if any */
+ dict_index_t* index; /* current index for a search, if
+ any */
ulint read_just_key; /* set to 1 when MySQL calls
ha_innobase::extra with the
argument HA_EXTRA_KEYREAD; it is enough
to read just columns defined in
the index (i.e., no read of the
clustered index record necessary) */
+ ibool used_in_HANDLER;/* TRUE if we have been using this
+ handle in a MySQL HANDLER low level
+ index cursor command: then we must
+ store the pcur position even in a
+ unique search from a clustered index,
+ because HANDLER allows NEXT and PREV
+ in such a situation */
ulint template_type; /* ROW_MYSQL_WHOLE_ROW,
ROW_MYSQL_REC_FIELDS,
ROW_MYSQL_DUMMY_TEMPLATE, or
diff --git a/innobase/include/srv0srv.h b/innobase/include/srv0srv.h
index bc0960ae023..121e9c44a24 100644
--- a/innobase/include/srv0srv.h
+++ b/innobase/include/srv0srv.h
@@ -75,6 +75,9 @@ extern ulint srv_lock_wait_timeout;
extern char* srv_file_flush_method_str;
extern ulint srv_unix_file_flush_method;
extern ulint srv_win_file_flush_method;
+
+extern ulint srv_max_dirty_pages_pct;
+
extern ulint srv_force_recovery;
extern ulint srv_thread_concurrency;
diff --git a/innobase/include/trx0trx.h b/innobase/include/trx0trx.h
index 34f820f03e7..39229923375 100644
--- a/innobase/include/trx0trx.h
+++ b/innobase/include/trx0trx.h
@@ -157,6 +157,15 @@ trx_commit_for_mysql(
/* out: 0 or error number */
trx_t* trx); /* in: trx handle */
/**************************************************************************
+If required, flushes the log to disk if we called trx_commit_for_mysql()
+with trx->flush_log_later == TRUE. */
+
+ulint
+trx_commit_complete_for_mysql(
+/*==========================*/
+ /* out: 0 or error number */
+ trx_t* trx); /* in: trx handle */
+/**************************************************************************
Marks the latest SQL statement ended. */
void
@@ -343,6 +352,11 @@ struct trx_struct{
dulint no; /* transaction serialization number ==
max trx id when the transaction is
moved to COMMITTED_IN_MEMORY state */
+ ibool flush_log_later;/* when we commit the transaction
+ in MySQL's binlog write, we will
+ flush the log to disk later in
+ a separate call */
+ dulint commit_lsn; /* lsn at the time of the commit */
ibool dict_operation; /* TRUE if the trx is used to create
a table, create an index, or drop a
table */
@@ -418,10 +432,6 @@ struct trx_struct{
lock_t* auto_inc_lock; /* possible auto-inc lock reserved by
the transaction; note that it is also
in the lock list trx_locks */
- ibool ignore_duplicates_in_insert;
- /* in an insert roll back only insert
- of the latest row in case
- of a duplicate key error */
UT_LIST_NODE_T(trx_t)
trx_list; /* list of transactions */
UT_LIST_NODE_T(trx_t)
diff --git a/innobase/include/univ.i b/innobase/include/univ.i
index bf606efcf64..e29f3ec92e1 100644
--- a/innobase/include/univ.i
+++ b/innobase/include/univ.i
@@ -100,6 +100,15 @@ memory is read outside the allocated blocks. */
#define YYDEBUG 1
+#ifdef HAVE_purify
+/* The following sets all new allocated memory to zero before use:
+this can be used to eliminate unnecessary Purify warnings, but note that
+it also masks many bugs Purify could detect. For detailed Purify analysis it
+is best to remove the define below and look through the warnings one
+by one. */
+#define UNIV_SET_MEM_TO_ZERO
+#endif
+
/*
#define UNIV_SQL_DEBUG
#define UNIV_LOG_DEBUG
diff --git a/innobase/include/ut0dbg.h b/innobase/include/ut0dbg.h
index 3407483696c..e99dc8c09d6 100644
--- a/innobase/include/ut0dbg.h
+++ b/innobase/include/ut0dbg.h
@@ -20,7 +20,6 @@ extern ibool ut_dbg_stop_threads;
extern ulint* ut_dbg_null_ptr;
-
#define ut_a(EXPR)\
{\
ulint dbg_i;\
@@ -31,8 +30,10 @@ extern ulint* ut_dbg_null_ptr;
" InnoDB: Assertion failure in thread %lu in file %s line %lu\n",\
os_thread_pf(os_thread_get_curr_id()), IB__FILE__,\
(ulint)__LINE__);\
+ fprintf(stderr,\
+ "InnoDB: Failing assertion: " #EXPR);\
fprintf(stderr,\
- "InnoDB: We intentionally generate a memory trap.\n");\
+ "\nInnoDB: We intentionally generate a memory trap.\n");\
fprintf(stderr,\
"InnoDB: Send a detailed bug report to mysql@lists.mysql.com\n");\
ut_dbg_stop_threads = TRUE;\
diff --git a/innobase/lock/Makefile.am b/innobase/lock/Makefile.am
index f9e1b227f3c..549eb2604e3 100644
--- a/innobase/lock/Makefile.am
+++ b/innobase/lock/Makefile.am
@@ -17,7 +17,7 @@
include ../include/Makefile.i
-libs_LIBRARIES = liblock.a
+noinst_LIBRARIES = liblock.a
liblock_a_SOURCES = lock0lock.c
diff --git a/innobase/lock/lock0lock.c b/innobase/lock/lock0lock.c
index d4329c4873c..74dc4aea515 100644
--- a/innobase/lock/lock0lock.c
+++ b/innobase/lock/lock0lock.c
@@ -3092,8 +3092,7 @@ lock_deadlock_recursive(
err_buf += strlen(err_buf);
err_buf += sprintf(err_buf,
- " LATEST DETECTED DEADLOCK:\n"
- "*** (1) TRANSACTION:\n");
+ "\n*** (1) TRANSACTION:\n");
trx_print(err_buf, wait_lock->trx);
err_buf += strlen(err_buf);
@@ -3935,24 +3934,15 @@ lock_print_info(
return;
}
-
- buf += sprintf(buf, "Trx id counter %lu %lu\n",
- ut_dulint_get_high(trx_sys->max_trx_id),
- ut_dulint_get_low(trx_sys->max_trx_id));
-
- buf += sprintf(buf,
- "Purge done for trx's n:o < %lu %lu undo n:o < %lu %lu\n",
- ut_dulint_get_high(purge_sys->purge_trx_no),
- ut_dulint_get_low(purge_sys->purge_trx_no),
- ut_dulint_get_high(purge_sys->purge_undo_no),
- ut_dulint_get_low(purge_sys->purge_undo_no));
lock_mutex_enter_kernel();
- buf += sprintf(buf,
- "Total number of lock structs in row lock hash table %lu\n",
- lock_get_n_rec_locks());
if (lock_deadlock_found) {
+
+ buf += sprintf(buf,
+"------------------------\n"
+"LATEST DETECTED DEADLOCK\n"
+"------------------------\n");
if ((ulint)(buf_end - buf)
< 100 + strlen(lock_latest_err_buf)) {
@@ -3973,6 +3963,26 @@ lock_print_info(
return;
}
+ buf += sprintf(buf,
+"------------\n"
+"TRANSACTIONS\n"
+"------------\n");
+
+ buf += sprintf(buf, "Trx id counter %lu %lu\n",
+ ut_dulint_get_high(trx_sys->max_trx_id),
+ ut_dulint_get_low(trx_sys->max_trx_id));
+
+ buf += sprintf(buf,
+ "Purge done for trx's n:o < %lu %lu undo n:o < %lu %lu\n",
+ ut_dulint_get_high(purge_sys->purge_trx_no),
+ ut_dulint_get_low(purge_sys->purge_trx_no),
+ ut_dulint_get_high(purge_sys->purge_undo_no),
+ ut_dulint_get_low(purge_sys->purge_undo_no));
+
+ buf += sprintf(buf,
+ "Total number of lock structs in row lock hash table %lu\n",
+ lock_get_n_rec_locks());
+
buf += sprintf(buf, "LIST OF TRANSACTIONS FOR EACH SESSION:\n");
/* First print info on non-active transactions */
diff --git a/innobase/log/Makefile.am b/innobase/log/Makefile.am
index 3910a25ab1a..2dbaf93e6d9 100644
--- a/innobase/log/Makefile.am
+++ b/innobase/log/Makefile.am
@@ -17,7 +17,7 @@
include ../include/Makefile.i
-libs_LIBRARIES = liblog.a
+noinst_LIBRARIES = liblog.a
liblog_a_SOURCES = log0log.c log0recv.c
diff --git a/innobase/log/log0log.c b/innobase/log/log0log.c
index bdfce783a43..e717d897e41 100644
--- a/innobase/log/log0log.c
+++ b/innobase/log/log0log.c
@@ -178,7 +178,7 @@ loop:
/* Not enough free space, do a syncronous flush of the log
buffer */
- log_flush_up_to(ut_dulint_max, LOG_WAIT_ALL_GROUPS);
+ log_write_up_to(ut_dulint_max, LOG_WAIT_ALL_GROUPS, TRUE);
count++;
@@ -675,7 +675,9 @@ log_init(void)
log_sys->buf_next_to_write = 0;
- log_sys->flush_lsn = ut_dulint_zero;
+ log_sys->write_lsn = ut_dulint_zero;
+ log_sys->current_flush_lsn = ut_dulint_zero;
+ log_sys->flushed_to_disk_lsn = ut_dulint_zero;
log_sys->written_to_some_lsn = log_sys->lsn;
log_sys->written_to_all_lsn = log_sys->lsn;
@@ -867,7 +869,7 @@ log_group_check_flush_completion(
printf("Log flushed first to group %lu\n", group->id);
}
- log_sys->written_to_some_lsn = log_sys->flush_lsn;
+ log_sys->written_to_some_lsn = log_sys->write_lsn;
log_sys->one_flushed = TRUE;
return(LOG_UNLOCK_NONE_FLUSHED_LOCK);
@@ -896,15 +898,15 @@ log_sys_check_flush_completion(void)
if (log_sys->n_pending_writes == 0) {
- log_sys->written_to_all_lsn = log_sys->flush_lsn;
- log_sys->buf_next_to_write = log_sys->flush_end_offset;
+ log_sys->written_to_all_lsn = log_sys->write_lsn;
+ log_sys->buf_next_to_write = log_sys->write_end_offset;
- if (log_sys->flush_end_offset > log_sys->max_buf_free / 2) {
+ if (log_sys->write_end_offset > log_sys->max_buf_free / 2) {
/* Move the log buffer content to the start of the
buffer */
move_start = ut_calc_align_down(
- log_sys->flush_end_offset,
+ log_sys->write_end_offset,
OS_FILE_LOG_BLOCK_SIZE);
move_end = ut_calc_align(log_sys->buf_free,
OS_FILE_LOG_BLOCK_SIZE);
@@ -982,57 +984,6 @@ log_io_complete(
}
/**********************************************************
-Flushes the log files to the disk, using, for example, the Unix fsync.
-This function does the flush even if the user has set
-srv_flush_log_at_trx_commit = FALSE. */
-
-void
-log_flush_to_disk(void)
-/*===================*/
-{
- log_group_t* group;
-loop:
- mutex_enter(&(log_sys->mutex));
-
- if (log_sys->n_pending_writes > 0) {
- /* A log file write is running */
-
- mutex_exit(&(log_sys->mutex));
-
- /* Wait for the log file write to complete and try again */
-
- os_event_wait(log_sys->no_flush_event);
-
- goto loop;
- }
-
- group = UT_LIST_GET_FIRST(log_sys->log_groups);
-
- log_sys->n_pending_writes++;
- group->n_pending_writes++;
-
- os_event_reset(log_sys->no_flush_event);
- os_event_reset(log_sys->one_flushed_event);
-
- mutex_exit(&(log_sys->mutex));
-
- fil_flush(group->space_id);
-
- mutex_enter(&(log_sys->mutex));
-
- ut_a(group->n_pending_writes == 1);
- ut_a(log_sys->n_pending_writes == 1);
-
- group->n_pending_writes--;
- log_sys->n_pending_writes--;
-
- os_event_set(log_sys->no_flush_event);
- os_event_set(log_sys->one_flushed_event);
-
- mutex_exit(&(log_sys->mutex));
-}
-
-/**********************************************************
Writes a log file header to a log file space. */
static
void
@@ -1205,12 +1156,15 @@ by the transaction. If there is a flush running, it waits and checks if the
flush flushed enough. If not, starts a new flush. */
void
-log_flush_up_to(
+log_write_up_to(
/*============*/
dulint lsn, /* in: log sequence number up to which the log should
be written, ut_dulint_max if not specified */
- ulint wait) /* in: LOG_NO_WAIT, LOG_WAIT_ONE_GROUP,
+ ulint wait, /* in: LOG_NO_WAIT, LOG_WAIT_ONE_GROUP,
or LOG_WAIT_ALL_GROUPS */
+ ibool flush_to_disk)
+ /* in: TRUE if we want the written log also to be
+ flushed to disk */
{
log_group_t* group;
ulint start_offset;
@@ -1239,9 +1193,18 @@ loop:
mutex_enter(&(log_sys->mutex));
- if ((ut_dulint_cmp(log_sys->written_to_all_lsn, lsn) >= 0)
- || ((ut_dulint_cmp(log_sys->written_to_some_lsn, lsn) >= 0)
- && (wait != LOG_WAIT_ALL_GROUPS))) {
+ if (flush_to_disk
+ && ut_dulint_cmp(log_sys->flushed_to_disk_lsn, lsn) >= 0) {
+
+ mutex_exit(&(log_sys->mutex));
+
+ return;
+ }
+
+ if (!flush_to_disk
+ && (ut_dulint_cmp(log_sys->written_to_all_lsn, lsn) >= 0
+ || (ut_dulint_cmp(log_sys->written_to_some_lsn, lsn) >= 0
+ && wait != LOG_WAIT_ALL_GROUPS))) {
mutex_exit(&(log_sys->mutex));
@@ -1249,10 +1212,19 @@ loop:
}
if (log_sys->n_pending_writes > 0) {
- /* A flush is running */
+ /* A write (+ possibly flush to disk) is running */
+
+ if (flush_to_disk
+ && ut_dulint_cmp(log_sys->current_flush_lsn, lsn) >= 0) {
+ /* The write + flush will write enough: wait for it to
+ complete */
+
+ goto do_waits;
+ }
- if (ut_dulint_cmp(log_sys->flush_lsn, lsn) >= 0) {
- /* The flush will flush enough: wait for it to
+ if (!flush_to_disk
+ && ut_dulint_cmp(log_sys->write_lsn, lsn) >= 0) {
+ /* The write will write enough: wait for it to
complete */
goto do_waits;
@@ -1260,16 +1232,17 @@ loop:
mutex_exit(&(log_sys->mutex));
- /* Wait for the flush to complete and try to start a new
- flush */
+ /* Wait for the write to complete and try to start a new
+ write */
os_event_wait(log_sys->no_flush_event);
goto loop;
}
- if (log_sys->buf_free == log_sys->buf_next_to_write) {
- /* Nothing to flush */
+ if (!flush_to_disk
+ && log_sys->buf_free == log_sys->buf_next_to_write) {
+ /* Nothing to write and no flush to disk requested */
mutex_exit(&(log_sys->mutex));
@@ -1277,7 +1250,7 @@ loop:
}
if (log_debug_writes) {
- printf("Flushing log from %lu %lu up to lsn %lu %lu\n",
+ printf("Writing log from %lu %lu up to lsn %lu %lu\n",
ut_dulint_get_high(log_sys->written_to_all_lsn),
ut_dulint_get_low(log_sys->written_to_all_lsn),
ut_dulint_get_high(log_sys->lsn),
@@ -1301,7 +1274,12 @@ loop:
ut_ad(area_end - area_start > 0);
- log_sys->flush_lsn = log_sys->lsn;
+ log_sys->write_lsn = log_sys->lsn;
+
+ if (flush_to_disk) {
+ log_sys->current_flush_lsn = log_sys->lsn;
+ }
+
log_sys->one_flushed = FALSE;
log_block_set_flush_bit(log_sys->buf + area_start, TRUE);
@@ -1318,10 +1296,12 @@ loop:
OS_FILE_LOG_BLOCK_SIZE);
log_sys->buf_free += OS_FILE_LOG_BLOCK_SIZE;
- log_sys->flush_end_offset = log_sys->buf_free;
+ log_sys->write_end_offset = log_sys->buf_free;
group = UT_LIST_GET_FIRST(log_sys->log_groups);
+ /* Do the write to the log files */
+
while (group) {
log_group_write_buf(LOG_FLUSH, group,
log_sys->buf + area_start,
@@ -1330,20 +1310,25 @@ loop:
OS_FILE_LOG_BLOCK_SIZE),
start_offset - area_start);
- log_group_set_fields(group, log_sys->flush_lsn);
+ log_group_set_fields(group, log_sys->write_lsn);
group = UT_LIST_GET_NEXT(log_groups, group);
}
mutex_exit(&(log_sys->mutex));
- if (srv_unix_file_flush_method != SRV_UNIX_O_DSYNC
- && srv_unix_file_flush_method != SRV_UNIX_NOSYNC
- && srv_flush_log_at_trx_commit != 2) {
+ if (srv_unix_file_flush_method == SRV_UNIX_O_DSYNC) {
+ /* O_DSYNC means the OS did not buffer the log file at all:
+ so we have also flushed to disk what we have written */
+
+ log_sys->flushed_to_disk_lsn = log_sys->write_lsn;
+
+ } else if (flush_to_disk) {
group = UT_LIST_GET_FIRST(log_sys->log_groups);
fil_flush(group->space_id);
+ log_sys->flushed_to_disk_lsn = log_sys->write_lsn;
}
mutex_enter(&(log_sys->mutex));
@@ -1403,7 +1388,7 @@ log_flush_margin(void)
mutex_exit(&(log->mutex));
if (do_flush) {
- log_flush_up_to(ut_dulint_max, LOG_NO_WAIT);
+ log_write_up_to(ut_dulint_max, LOG_NO_WAIT, FALSE);
}
}
@@ -1555,7 +1540,8 @@ log_group_checkpoint(
buf = group->checkpoint_buf;
mach_write_to_8(buf + LOG_CHECKPOINT_NO, log_sys->next_checkpoint_no);
- mach_write_to_8(buf + LOG_CHECKPOINT_LSN, log_sys->next_checkpoint_lsn);
+ mach_write_to_8(buf + LOG_CHECKPOINT_LSN,
+ log_sys->next_checkpoint_lsn);
mach_write_to_4(buf + LOG_CHECKPOINT_OFFSET,
log_group_calc_lsn_offset(
@@ -1664,8 +1650,10 @@ log_reset_first_header_and_checkpoint(
lsn = ut_dulint_add(start, LOG_BLOCK_HDR_SIZE);
/* Write the label of ibbackup --restore */
- sprintf((char*) hdr_buf + LOG_FILE_WAS_CREATED_BY_HOT_BACKUP, "ibbackup ");
- ut_sprintf_timestamp((char*) hdr_buf + LOG_FILE_WAS_CREATED_BY_HOT_BACKUP
+ sprintf((char*) hdr_buf + LOG_FILE_WAS_CREATED_BY_HOT_BACKUP,
+ "ibbackup ");
+ ut_sprintf_timestamp(
+ (char*) hdr_buf + LOG_FILE_WAS_CREATED_BY_HOT_BACKUP
+ strlen("ibbackup "));
buf = hdr_buf + LOG_CHECKPOINT_1;
@@ -1773,7 +1761,7 @@ log_checkpoint(
write-ahead-logging algorithm ensures that the log has been flushed
up to oldest_lsn. */
- log_flush_up_to(oldest_lsn, LOG_WAIT_ALL_GROUPS);
+ log_write_up_to(oldest_lsn, LOG_WAIT_ALL_GROUPS, TRUE);
mutex_enter(&(log_sys->mutex));
@@ -2466,7 +2454,7 @@ loop:
mutex_exit(&(log_sys->mutex));
- log_flush_up_to(limit_lsn, LOG_WAIT_ALL_GROUPS);
+ log_write_up_to(limit_lsn, LOG_WAIT_ALL_GROUPS, TRUE);
calc_new_limit = FALSE;
@@ -3104,8 +3092,8 @@ log_print(
"Last checkpoint at %lu %lu\n",
ut_dulint_get_high(log_sys->lsn),
ut_dulint_get_low(log_sys->lsn),
- ut_dulint_get_high(log_sys->written_to_some_lsn),
- ut_dulint_get_low(log_sys->written_to_some_lsn),
+ ut_dulint_get_high(log_sys->flushed_to_disk_lsn),
+ ut_dulint_get_low(log_sys->flushed_to_disk_lsn),
ut_dulint_get_high(log_sys->last_checkpoint_lsn),
ut_dulint_get_low(log_sys->last_checkpoint_lsn));
diff --git a/innobase/log/log0recv.c b/innobase/log/log0recv.c
index 3945b47933d..4efe4e7b23d 100644
--- a/innobase/log/log0recv.c
+++ b/innobase/log/log0recv.c
@@ -1833,7 +1833,12 @@ recv_report_corrupt_log(
"InnoDB: WARNING: the log file may have been corrupt and it\n"
"InnoDB: is possible that the log scan did not proceed\n"
"InnoDB: far enough in recovery! Please run CHECK TABLE\n"
- "InnoDB: on your InnoDB tables to check that they are ok!\n");
+ "InnoDB: on your InnoDB tables to check that they are ok!\n"
+ "InnoDB: If mysqld crashes after this recovery, look at\n"
+ "InnoDB: section 6.1 of http://www.innodb.com/ibman.html\n"
+ "InnoDB: about forcing recovery.\n");
+
+ fflush(stderr);
}
/***********************************************************
@@ -2470,7 +2475,7 @@ recv_recovery_from_checkpoint_start(
log_hdr_buf, max_cp_group);
if (0 == ut_memcmp(log_hdr_buf + LOG_FILE_WAS_CREATED_BY_HOT_BACKUP,
- "ibbackup", ut_strlen("ibbackup"))) {
+ (byte*)"ibbackup", ut_strlen((char*)"ibbackup"))) {
/* This log file was created by ibbackup --restore: print
a note to the user about it */
@@ -2481,7 +2486,7 @@ recv_recovery_from_checkpoint_start(
/* Wipe over the label now */
ut_memcpy(log_hdr_buf + LOG_FILE_WAS_CREATED_BY_HOT_BACKUP,
- " ", 4);
+ (char*)" ", 4);
/* Write to the log file to wipe over the label */
fil_io(OS_FILE_WRITE | OS_FILE_LOG, TRUE,
max_cp_group->space_id,
diff --git a/innobase/mach/Makefile.am b/innobase/mach/Makefile.am
index 8195831e92e..ce827c8033f 100644
--- a/innobase/mach/Makefile.am
+++ b/innobase/mach/Makefile.am
@@ -17,7 +17,7 @@
include ../include/Makefile.i
-libs_LIBRARIES = libmach.a
+noinst_LIBRARIES = libmach.a
libmach_a_SOURCES = mach0data.c
diff --git a/innobase/mem/Makefile.am b/innobase/mem/Makefile.am
index 84f642e4469..10b7771b580 100644
--- a/innobase/mem/Makefile.am
+++ b/innobase/mem/Makefile.am
@@ -17,7 +17,7 @@
include ../include/Makefile.i
-libs_LIBRARIES = libmem.a
+noinst_LIBRARIES = libmem.a
libmem_a_SOURCES = mem0mem.c mem0pool.c
diff --git a/innobase/mtr/Makefile.am b/innobase/mtr/Makefile.am
index 972dcaca80e..1e93a34ce23 100644
--- a/innobase/mtr/Makefile.am
+++ b/innobase/mtr/Makefile.am
@@ -17,7 +17,7 @@
include ../include/Makefile.i
-libs_LIBRARIES = libmtr.a
+noinst_LIBRARIES = libmtr.a
libmtr_a_SOURCES = mtr0mtr.c mtr0log.c
diff --git a/innobase/odbc/Makefile.am b/innobase/odbc/Makefile.am
index d1a47bd8c18..f4282ba3907 100644
--- a/innobase/odbc/Makefile.am
+++ b/innobase/odbc/Makefile.am
@@ -17,7 +17,7 @@
include ../include/Makefile.i
-libs_LIBRARIES = libodbc.a
+noinst_LIBRARIES = libodbc.a
libodbc_a_SOURCES = odbc0odbc.c
diff --git a/innobase/os/Makefile.am b/innobase/os/Makefile.am
index b06670bc703..132ce07c83b 100644
--- a/innobase/os/Makefile.am
+++ b/innobase/os/Makefile.am
@@ -17,7 +17,7 @@
include ../include/Makefile.i
-libs_LIBRARIES = libos.a
+noinst_LIBRARIES = libos.a
libos_a_SOURCES = os0proc.c os0shm.c os0sync.c os0thread.c os0file.c
diff --git a/innobase/os/os0file.c b/innobase/os/os0file.c
index 5ffcabf6fe6..53224eb59c5 100644
--- a/innobase/os/os0file.c
+++ b/innobase/os/os0file.c
@@ -214,9 +214,14 @@ os_file_get_last_error(void)
"InnoDB: the directory. It may also be you have created a subdirectory\n"
"InnoDB: of the same name as a data file.\n");
} else {
- fprintf(stderr,
- "InnoDB: Look from section 13.2 at http://www.innodb.com/ibman.html\n"
- "InnoDB: what the error number means.\n");
+ if (strerror((int)err) != NULL) {
+ fprintf(stderr,
+ "InnoDB: Error number %lu means '%s'.\n", err, strerror((int)err));
+ }
+
+ fprintf(stderr,
+ "InnoDB: See also section 13.2 at http://www.innodb.com/ibman.html\n"
+ "InnoDB: about operating system error numbers.\n");
}
}
@@ -252,9 +257,14 @@ os_file_get_last_error(void)
"InnoDB: The error means mysqld does not have the access rights to\n"
"InnoDB: the directory.\n");
} else {
- fprintf(stderr,
- "InnoDB: Look from section 13.2 at http://www.innodb.com/ibman.html\n"
- "InnoDB: what the error number means or use the perror program of MySQL.\n");
+ if (strerror((int)err) != NULL) {
+ fprintf(stderr,
+ "InnoDB: Error number %lu means '%s'.\n", err, strerror((int)err));
+ }
+
+ fprintf(stderr,
+ "InnoDB: See also section 13.2 at http://www.innodb.com/ibman.html\n"
+ "InnoDB: about operating system error numbers.\n");
}
}
@@ -511,10 +521,11 @@ try_again:
}
#endif
#ifdef UNIV_NON_BUFFERED_IO
- if (type == OS_LOG_FILE && srv_flush_log_at_trx_commit == 2) {
+ if (type == OS_LOG_FILE) {
/* Do not use unbuffered i/o to log files because
- value 2 denotes that we do not flush the log at every
- commit, but only once per second */
+ to allow group commit to work when MySQL binlogging
+ is used we must separate log file write and log
+ file flush to disk. */
} else {
if (srv_win_file_flush_method ==
SRV_WIN_IO_UNBUFFERED) {
@@ -741,7 +752,12 @@ os_file_set_size(
offset = 0;
low = (ib_longlong)size + (((ib_longlong)size_high) << 32);
+
+ if (low >= (ib_longlong)(100 * 1024 * 1024)) {
+ fprintf(stderr, "InnoDB: Progress in MB:");
+ }
+
while (offset < low) {
if (low - offset < UNIV_PAGE_SIZE * 512) {
n_bytes = (ulint)(low - offset);
@@ -757,9 +773,24 @@ os_file_set_size(
ut_free(buf2);
goto error_handling;
}
+
+ /* Print about progress for each 100 MB written */
+ if ((offset + n_bytes) / (ib_longlong)(100 * 1024 * 1024)
+ != offset / (ib_longlong)(100 * 1024 * 1024)) {
+
+ fprintf(stderr, " %lu00",
+ (ulint)((offset + n_bytes)
+ / (ib_longlong)(100 * 1024 * 1024)));
+ }
+
offset += n_bytes;
}
+ if (low >= (ib_longlong)(100 * 1024 * 1024)) {
+
+ fprintf(stderr, "\n");
+ }
+
ut_free(buf2);
ret = os_file_flush(file);
diff --git a/innobase/os/os0sync.c b/innobase/os/os0sync.c
index a9127e6310a..407b280f805 100644
--- a/innobase/os/os0sync.c
+++ b/innobase/os/os0sync.c
@@ -68,9 +68,10 @@ os_event_create(
os_fast_mutex_init(&(event->os_mutex));
#if defined(UNIV_HOTBACKUP) && defined(UNIV_HPUX10)
- pthread_cond_init(&(event->cond_var), pthread_condattr_default);
+ ut_a(0 == pthread_cond_init(&(event->cond_var),
+ pthread_condattr_default));
#else
- pthread_cond_init(&(event->cond_var), NULL);
+ ut_a(0 == pthread_cond_init(&(event->cond_var), NULL));
#endif
event->is_set = FALSE;
@@ -130,7 +131,7 @@ os_event_set(
/* Do nothing */
} else {
event->is_set = TRUE;
- pthread_cond_broadcast(&(event->cond_var));
+ ut_a(0 == pthread_cond_broadcast(&(event->cond_var)));
}
os_fast_mutex_unlock(&(event->os_mutex));
@@ -182,7 +183,7 @@ os_event_free(
ut_a(event);
os_fast_mutex_free(&(event->os_mutex));
- pthread_cond_destroy(&(event->cond_var));
+ ut_a(0 == pthread_cond_destroy(&(event->cond_var)));
ut_free(event);
#endif
@@ -446,9 +447,9 @@ os_fast_mutex_init(
InitializeCriticalSection((LPCRITICAL_SECTION) fast_mutex);
#else
#if defined(UNIV_HOTBACKUP) && defined(UNIV_HPUX10)
- pthread_mutex_init(fast_mutex, pthread_mutexattr_default);
+ ut_a(0 == pthread_mutex_init(fast_mutex, pthread_mutexattr_default));
#else
- pthread_mutex_init(fast_mutex, MY_MUTEX_INIT_FAST);
+ ut_a(0 == pthread_mutex_init(fast_mutex, MY_MUTEX_INIT_FAST));
#endif
#endif
}
@@ -495,10 +496,7 @@ os_fast_mutex_free(
ut_a(fast_mutex);
DeleteCriticalSection((LPCRITICAL_SECTION) fast_mutex);
-#elif defined(__NETWARE__) || defined(SAFE_MUTEX_DETECT_DESTROY)
- pthread_mutex_destroy(fast_mutex);
#else
- UT_NOT_USED(fast_mutex);
-
+ ut_a(0 == pthread_mutex_destroy(fast_mutex));
#endif
}
diff --git a/innobase/page/Makefile.am b/innobase/page/Makefile.am
index 85fe585a633..2e260787438 100644
--- a/innobase/page/Makefile.am
+++ b/innobase/page/Makefile.am
@@ -17,7 +17,7 @@
include ../include/Makefile.i
-libs_LIBRARIES = libpage.a
+noinst_LIBRARIES = libpage.a
libpage_a_SOURCES = page0page.c page0cur.c
diff --git a/innobase/pars/Makefile.am b/innobase/pars/Makefile.am
index e5611f9dfc6..2356f330486 100644
--- a/innobase/pars/Makefile.am
+++ b/innobase/pars/Makefile.am
@@ -17,7 +17,7 @@
include ../include/Makefile.i
-libs_LIBRARIES = libpars.a
+noinst_LIBRARIES = libpars.a
noinst_HEADERS = pars0grm.h
diff --git a/innobase/que/Makefile.am b/innobase/que/Makefile.am
index b74d4dbf6a0..d9c046b4f4c 100644
--- a/innobase/que/Makefile.am
+++ b/innobase/que/Makefile.am
@@ -17,7 +17,7 @@
include ../include/Makefile.i
-libs_LIBRARIES = libque.a
+noinst_LIBRARIES = libque.a
libque_a_SOURCES = que0que.c
diff --git a/innobase/read/Makefile.am b/innobase/read/Makefile.am
index 16224f4f7f4..7edf2a5a2e1 100644
--- a/innobase/read/Makefile.am
+++ b/innobase/read/Makefile.am
@@ -17,7 +17,7 @@
include ../include/Makefile.i
-libs_LIBRARIES = libread.a
+noinst_LIBRARIES = libread.a
libread_a_SOURCES = read0read.c
diff --git a/innobase/rem/Makefile.am b/innobase/rem/Makefile.am
index ef0cde9bd7a..e2b2fdaf669 100644
--- a/innobase/rem/Makefile.am
+++ b/innobase/rem/Makefile.am
@@ -17,7 +17,7 @@
include ../include/Makefile.i
-libs_LIBRARIES = librem.a
+noinst_LIBRARIES = librem.a
librem_a_SOURCES = rem0rec.c rem0cmp.c
diff --git a/innobase/row/Makefile.am b/innobase/row/Makefile.am
index e4fcbe8f715..bd09f9a237d 100644
--- a/innobase/row/Makefile.am
+++ b/innobase/row/Makefile.am
@@ -17,7 +17,7 @@
include ../include/Makefile.i
-libs_LIBRARIES = librow.a
+noinst_LIBRARIES = librow.a
librow_a_SOURCES = row0ins.c row0mysql.c row0purge.c row0row.c row0sel.c\
row0uins.c row0umod.c row0undo.c row0upd.c row0vers.c
diff --git a/innobase/row/row0ins.c b/innobase/row/row0ins.c
index 990ef99b2a4..3af9e1b752b 100644
--- a/innobase/row/row0ins.c
+++ b/innobase/row/row0ins.c
@@ -323,7 +323,7 @@ row_ins_clust_index_entry_by_modify(
/*************************************************************************
Returns TRUE if in a cascaded update/delete an ancestor node of node
-updates table. */
+updates (not DELETE, but UPDATE) table. */
static
ibool
row_ins_cascade_ancestor_updates_table(
@@ -341,7 +341,7 @@ row_ins_cascade_ancestor_updates_table(
upd_node = parent;
- if (upd_node->table == table) {
+ if (upd_node->table == table && upd_node->is_delete == FALSE) {
return(TRUE);
}
@@ -438,6 +438,111 @@ row_ins_cascade_calc_update_vec(
}
/*************************************************************************
+Reports a foreign key error associated with an update or a delete of a
+parent table index entry. */
+static
+void
+row_ins_foreign_report_err(
+/*=======================*/
+ char* errstr, /* in: error string from the viewpoint
+ of the parent table */
+ que_thr_t* thr, /* in: query thread whose run_node
+ is an update node */
+ dict_foreign_t* foreign, /* in: foreign key constraint */
+ rec_t* rec, /* in: a matching index record in the
+ child table */
+ dtuple_t* entry) /* in: index entry in the parent
+ table */
+{
+ char* buf = dict_foreign_err_buf;
+
+ mutex_enter(&dict_foreign_err_mutex);
+ ut_sprintf_timestamp(buf);
+ sprintf(buf + strlen(buf), " Transaction:\n");
+ trx_print(buf + strlen(buf), thr_get_trx(thr));
+
+ sprintf(buf + strlen(buf),
+"Foreign key constraint fails for table %.500s:\n",
+ foreign->foreign_table_name);
+ dict_print_info_on_foreign_key_in_create_format(
+ foreign, buf + strlen(buf));
+ sprintf(buf + strlen(buf), "\n%s", errstr);
+ sprintf(buf + strlen(buf),
+" in parent table, in index %.500s tuple:\n",
+ foreign->referenced_index->name);
+ if (entry) {
+ dtuple_sprintf(buf + strlen(buf), 1000, entry);
+ }
+ sprintf(buf + strlen(buf),
+"\nBut in child table %.500s, in index %.500s, there is a record:\n",
+ foreign->foreign_table_name, foreign->foreign_index->name);
+ if (rec) {
+ rec_sprintf(buf + strlen(buf), 1000, rec);
+ }
+ sprintf(buf + strlen(buf), "\n");
+
+ ut_a(strlen(buf) < DICT_FOREIGN_ERR_BUF_LEN);
+
+ mutex_exit(&dict_foreign_err_mutex);
+}
+
+/*************************************************************************
+Reports a foreign key error to dict_foreign_err_buf when we are trying
+to add an index entry to a child table. Note that the adding may be the result
+of an update, too. */
+static
+void
+row_ins_foreign_report_add_err(
+/*===========================*/
+ que_thr_t* thr, /* in: query thread whose run_node
+ is an insert node */
+ dict_foreign_t* foreign, /* in: foreign key constraint */
+ rec_t* rec, /* in: a record in the parent table:
+ it does not match entry because we
+ have an error! */
+ dtuple_t* entry) /* in: index entry to insert in the
+ child table */
+{
+ char* buf = dict_foreign_err_buf;
+
+ mutex_enter(&dict_foreign_err_mutex);
+ ut_sprintf_timestamp(buf);
+ sprintf(buf + strlen(buf), " Transaction:\n");
+ trx_print(buf + strlen(buf), thr_get_trx(thr));
+ sprintf(buf + strlen(buf),
+"Foreign key constraint fails for table %.500s:\n",
+ foreign->foreign_table_name);
+ dict_print_info_on_foreign_key_in_create_format(
+ foreign, buf + strlen(buf));
+ sprintf(buf + strlen(buf),
+"\nTrying to add in child table, in index %.500s tuple:\n",
+ foreign->foreign_index->name);
+ if (entry) {
+ dtuple_sprintf(buf + strlen(buf), 1000, entry);
+ }
+ sprintf(buf + strlen(buf),
+"\nBut in parent table %.500s, in index %.500s,\n"
+"the closest match we can find is record:\n",
+ foreign->referenced_table_name,
+ foreign->referenced_index->name);
+ if (rec && page_rec_is_supremum(rec)) {
+ /* If the cursor ended on a supremum record, it is better
+ to report the previous record in the error message, so that
+ the user gets a more descriptive error message. */
+ rec = page_rec_get_prev(rec);
+ }
+
+ if (rec) {
+ rec_sprintf(buf + strlen(buf), 1000, rec);
+ }
+ sprintf(buf + strlen(buf), "\n");
+
+ ut_a(strlen(buf) < DICT_FOREIGN_ERR_BUF_LEN);
+
+ mutex_exit(&dict_foreign_err_mutex);
+}
+
+/*************************************************************************
Perform referential actions or checks when a parent row is deleted or updated
and the constraint had an ON DELETE or ON UPDATE condition which was not
RESTRICT. */
@@ -453,6 +558,8 @@ row_ins_foreign_check_on_constraint(
type is != 0 */
btr_pcur_t* pcur, /* in: cursor placed on a matching
index record in the child table */
+ dtuple_t* entry, /* in: index entry in the parent
+ table */
mtr_t* mtr) /* in: mtr holding the latch of pcur
page */
{
@@ -506,6 +613,10 @@ row_ins_foreign_check_on_constraint(
return(DB_SUCCESS);
}
+ row_ins_foreign_report_err((char*)"Trying to delete",
+ thr, foreign,
+ btr_pcur_get_rec(pcur), entry);
+
return(DB_ROW_IS_REFERENCED);
}
@@ -523,6 +634,10 @@ row_ins_foreign_check_on_constraint(
return(DB_SUCCESS);
}
+ row_ins_foreign_report_err((char*)"Trying to update",
+ thr, foreign,
+ btr_pcur_get_rec(pcur), entry);
+
return(DB_ROW_IS_REFERENCED);
}
@@ -563,14 +678,13 @@ row_ins_foreign_check_on_constraint(
}
}
- /* We do not allow cyclic cascaded updating of the same
- table. Check that we are not updating the same table which
- is already being modified in this cascade chain. We have to
- check this because the modification of the indexes of a
- 'parent' table may still be incomplete, and we must avoid
- seeing the indexes of the parent table in an inconsistent
- state! In this way we also prevent possible infinite
- update loops caused by cyclic cascaded updates. */
+ /* We do not allow cyclic cascaded updating (DELETE is allowed,
+ but not UPDATE) of the same table, as this can lead to an infinite
+ cycle. Check that we are not updating the same table which is
+ already being modified in this cascade chain. We have to check
+ this also because the modification of the indexes of a 'parent'
+ table may still be incomplete, and we must avoid seeing the indexes
+ of the parent table in an inconsistent state! */
if (!cascade->is_delete
&& row_ins_cascade_ancestor_updates_table(cascade, table)) {
@@ -580,6 +694,10 @@ row_ins_foreign_check_on_constraint(
err = DB_ROW_IS_REFERENCED;
+ row_ins_foreign_report_err(
+(char*)"Trying an update, possibly causing a cyclic cascaded update\n"
+"in the child table,", thr, foreign, btr_pcur_get_rec(pcur), entry);
+
goto nonstandard_exit_func;
}
@@ -809,11 +927,10 @@ row_ins_check_foreign_constraint(
dictionary cache if they exist at all */
dict_table_t* table, /* in: if check_ref is TRUE, then the foreign
table, else the referenced table */
- dict_index_t* index __attribute__((unused)),/* in: index in table */
dtuple_t* entry, /* in: index entry for index */
que_thr_t* thr) /* in: query thread */
{
- upd_node_t* upd_node;
+ upd_node_t* upd_node;
dict_table_t* check_table;
dict_index_t* check_index;
ulint n_fields_cmp;
@@ -824,6 +941,7 @@ row_ins_check_foreign_constraint(
int cmp;
ulint err;
ulint i;
+ char* buf = dict_foreign_err_buf;
mtr_t mtr;
run_again:
@@ -884,6 +1002,25 @@ run_again:
if (check_table == NULL) {
if (check_ref) {
+ mutex_enter(&dict_foreign_err_mutex);
+ ut_sprintf_timestamp(buf);
+ sprintf(buf + strlen(buf), " Transaction:\n");
+ trx_print(buf + strlen(buf), thr_get_trx(thr));
+ sprintf(buf + strlen(buf),
+"Foreign key constraint fails for table %.500s:\n",
+ foreign->foreign_table_name);
+ dict_print_info_on_foreign_key_in_create_format(
+ foreign, buf + strlen(buf));
+ sprintf(buf + strlen(buf),
+"\nTrying to add to index %.500s tuple:\n", foreign->foreign_index->name);
+ dtuple_sprintf(buf + strlen(buf), 1000, entry);
+ sprintf(buf + strlen(buf),
+"\nBut the parent table %.500s does not currently exist!\n",
+ foreign->referenced_table_name);
+
+ ut_a(strlen(buf) < DICT_FOREIGN_ERR_BUF_LEN);
+ mutex_exit(&dict_foreign_err_mutex);
+
return(DB_NO_REFERENCED_ROW);
}
@@ -949,7 +1086,8 @@ run_again:
if (cmp == 0) {
if (rec_get_deleted_flag(rec)) {
- err = row_ins_set_shared_rec_lock(LOCK_ORDINARY,
+ err = row_ins_set_shared_rec_lock(
+ LOCK_ORDINARY,
rec, check_index, thr);
if (err != DB_SUCCESS) {
@@ -989,13 +1127,17 @@ run_again:
err =
row_ins_foreign_check_on_constraint(
- thr, foreign, &pcur, &mtr);
-
+ thr, foreign, &pcur, entry,
+ &mtr);
if (err != DB_SUCCESS) {
break;
}
} else {
+ row_ins_foreign_report_err(
+ (char*)"Trying to delete or update",
+ thr, foreign, rec, entry);
+
err = DB_ROW_IS_REFERENCED;
break;
}
@@ -1012,6 +1154,8 @@ run_again:
if (check_ref) {
err = DB_NO_REFERENCED_ROW;
+ row_ins_foreign_report_add_err(
+ thr, foreign, rec, entry);
} else {
err = DB_SUCCESS;
}
@@ -1025,6 +1169,9 @@ next_rec:
if (!moved) {
if (check_ref) {
+ rec = btr_pcur_get_rec(&pcur);
+ row_ins_foreign_report_add_err(
+ thr, foreign, rec, entry);
err = DB_NO_REFERENCED_ROW;
} else {
err = DB_SUCCESS;
@@ -1100,7 +1247,7 @@ row_ins_check_foreign_constraints(
}
err = row_ins_check_foreign_constraint(TRUE, foreign,
- table, index, entry, thr);
+ table, entry, thr);
if (got_s_lock) {
row_mysql_unfreeze_data_dictionary(trx);
}
@@ -1116,6 +1263,48 @@ row_ins_check_foreign_constraints(
return(DB_SUCCESS);
}
+/*************************************************************************
+Reports a UNIQUE key error to dict_unique_err_buf so that SHOW INNODB
+STATUS can print it. */
+static
+void
+row_ins_unique_report_err(
+/*======================*/
+ que_thr_t* thr, /* in: query thread */
+ rec_t* rec, /* in: a record in the index */
+ dtuple_t* entry, /* in: index entry to insert in the index */
+ dict_index_t* index) /* in: index */
+{
+ char* buf = dict_unique_err_buf;
+
+ /* The foreign err mutex protects also dict_unique_err_buf */
+
+ mutex_enter(&dict_foreign_err_mutex);
+
+ ut_sprintf_timestamp(buf);
+ sprintf(buf + strlen(buf), " Transaction:\n");
+ trx_print(buf + strlen(buf), thr_get_trx(thr));
+
+ sprintf(buf + strlen(buf),
+"Unique key constraint fails for table %.500s.\n", index->table_name);
+ sprintf(buf + strlen(buf),
+"Trying to add in index %.500s (%lu fields unique) tuple:\n", index->name,
+ dict_index_get_n_unique(index));
+
+ dtuple_sprintf(buf + strlen(buf), 1000, entry);
+
+ sprintf(buf + strlen(buf),
+"\nBut there is already a record:\n");
+
+ rec_sprintf(buf + strlen(buf), 1000, rec);
+
+ sprintf(buf + strlen(buf), "\n");
+
+ ut_a(strlen(buf) < DICT_FOREIGN_ERR_BUF_LEN);
+
+ mutex_exit(&dict_foreign_err_mutex);
+}
+
/*******************************************************************
Checks if a unique key violation to rec would occur at the index entry
insert. */
@@ -1246,10 +1435,8 @@ row_ins_scan_sec_index_for_duplicate(
if (cmp == 0) {
if (row_ins_dupl_error_with_rec(rec, entry, index)) {
- /* printf("Duplicate key in index %s\n",
- index->name);
- dtuple_print(entry); */
-
+ row_ins_unique_report_err(thr, rec, entry,
+ index);
err = DB_DUPLICATE_KEY;
thr_get_trx(thr)->error_info = index;
@@ -1344,7 +1531,8 @@ row_ins_duplicate_error_in_clust(
if (row_ins_dupl_error_with_rec(rec, entry,
cursor->index)) {
trx->error_info = cursor->index;
-
+ row_ins_unique_report_err(thr, rec, entry,
+ cursor->index);
return(DB_DUPLICATE_KEY);
}
}
@@ -1368,6 +1556,8 @@ row_ins_duplicate_error_in_clust(
cursor->index)) {
trx->error_info = cursor->index;
+ row_ins_unique_report_err(thr, rec, entry,
+ cursor->index);
return(DB_DUPLICATE_KEY);
}
}
diff --git a/innobase/row/row0mysql.c b/innobase/row/row0mysql.c
index 1bb33551da8..6d1f6f6e40e 100644
--- a/innobase/row/row0mysql.c
+++ b/innobase/row/row0mysql.c
@@ -6,7 +6,7 @@ Contains also create table and other data dictionary operations.
Created 9/17/2000 Heikki Tuuri
*******************************************************/
-
+
#include "row0mysql.h"
#ifdef UNIV_NONINL
@@ -289,6 +289,17 @@ handle_new_error:
"InnoDB: my.cnf and restart the database.\n");
exit(1);
+ } else if (err == DB_CORRUPTION) {
+
+ fprintf(stderr,
+ "InnoDB: We detected index corruption in an InnoDB type table.\n"
+ "InnoDB: You have to dump + drop + reimport the table or, in\n"
+ "InnoDB: a case of widespread corruption, dump all InnoDB\n"
+ "InnoDB: tables and recreate the whole InnoDB tablespace.\n"
+ "InnoDB: If the mysqld server crashes after the startup or when\n"
+ "InnoDB: you dump the tables, look at section 6.1 of\n"
+ "InnoDB: http://www.innodb.com/ibman.html for help.\n");
+
} else {
fprintf(stderr, "InnoDB: unknown error code %lu\n", err);
ut_a(0);
@@ -337,6 +348,9 @@ row_create_prebuilt(
prebuilt->mysql_has_locked = FALSE;
prebuilt->index = NULL;
+
+ prebuilt->used_in_HANDLER = FALSE;
+
prebuilt->n_template = 0;
prebuilt->mysql_template = NULL;
@@ -1169,7 +1183,7 @@ row_mysql_recover_tmp_table(
return(DB_ERROR);
}
- if (0 == ut_memcmp(ptr, "/rsql", 5)) {
+ if (0 == ut_memcmp(ptr, (char*)"/rsql", 5)) {
ptr++;
*ptr = '#';
@@ -1293,10 +1307,10 @@ row_create_table_for_mysql(
}
trx->op_info = (char *) "creating table";
-
- if (0 == ut_strcmp(table->name, "mysql/host")
- || 0 == ut_strcmp(table->name, "mysql/user")
- || 0 == ut_strcmp(table->name, "mysql/db")) {
+
+ if (0 == ut_strcmp(table->name, (char*)"mysql/host")
+ || 0 == ut_strcmp(table->name, (char*)"mysql/user")
+ || 0 == ut_strcmp(table->name, (char*)"mysql/db")) {
fprintf(stderr,
"InnoDB: Error: trying to create a MySQL system table %s of type InnoDB.\n"
@@ -1316,7 +1330,7 @@ row_create_table_for_mysql(
if (namelen >= keywordlen
&& 0 == ut_memcmp(table->name + namelen - keywordlen,
- "_recover_innodb_tmp_table", keywordlen)) {
+ (char*)"_recover_innodb_tmp_table", keywordlen)) {
/* MySQL prevents accessing of tables whose name begins
with #sql, that is temporary tables. If mysqld crashes in
@@ -1384,7 +1398,7 @@ row_create_table_for_mysql(
if (namelen >= keywordlen
&& 0 == ut_memcmp(table->name + namelen - keywordlen,
- "innodb_mem_validate", keywordlen)) {
+ (char*)"innodb_mem_validate", keywordlen)) {
/* We define here a debugging feature intended for
developers */
@@ -1494,7 +1508,7 @@ row_create_index_for_mysql(
if (namelen >= keywordlen
&& 0 == ut_memcmp(
index->table_name + namelen - keywordlen,
- "_recover_innodb_tmp_table", keywordlen)) {
+ (char*)"_recover_innodb_tmp_table", keywordlen)) {
return(DB_SUCCESS);
}
@@ -1599,7 +1613,7 @@ row_table_add_foreign_constraints(
if (namelen >= keywordlen
&& 0 == ut_memcmp(
name + namelen - keywordlen,
- "_recover_innodb_tmp_table", keywordlen)) {
+ (char*)"_recover_innodb_tmp_table", keywordlen)) {
return(DB_SUCCESS);
}
@@ -1663,7 +1677,7 @@ row_drop_table_for_mysql_in_background(
the InnoDB data dictionary get out-of-sync if the user runs
with innodb_flush_log_at_trx_commit = 0 */
- log_flush_up_to(ut_dulint_max, LOG_WAIT_ONE_GROUP);
+ log_write_up_to(ut_dulint_max, LOG_WAIT_ONE_GROUP, TRUE);
trx_commit_for_mysql(trx);
@@ -1821,7 +1835,6 @@ row_drop_table_for_mysql(
ulint len;
ulint namelen;
ulint keywordlen;
- ulint rounds = 0;
ibool locked_dictionary = FALSE;
char buf[10000];
@@ -2168,7 +2181,7 @@ row_is_mysql_tmp_table_name(
ulint i;
for (i = 0; i <= ut_strlen(name) - 5; i++) {
- if (ut_memcmp(name + i, "/#sql", 5) == 0) {
+ if (ut_memcmp(name + i, (char*)"/#sql", 5) == 0) {
return(TRUE);
}
@@ -2190,12 +2203,16 @@ row_rename_table_for_mysql(
{
dict_table_t* table;
que_thr_t* thr;
- que_t* graph;
+ que_t* graph = NULL;
ulint err;
char* str1;
char* str2;
char* str3;
+ mem_heap_t* heap = NULL;
+ char** constraints_to_drop = NULL;
+ ulint n_constraints_to_drop = 0;
ulint len;
+ ulint i;
char buf[10000];
ut_ad(trx->mysql_thread_id == os_thread_get_curr_id());
@@ -2213,10 +2230,10 @@ row_rename_table_for_mysql(
trx_commit_for_mysql(trx);
return(DB_ERROR);
}
-
- if (0 == ut_strcmp(new_name, "mysql/host")
- || 0 == ut_strcmp(new_name, "mysql/user")
- || 0 == ut_strcmp(new_name, "mysql/db")) {
+
+ if (0 == ut_strcmp(new_name, (char*)"mysql/host")
+ || 0 == ut_strcmp(new_name, (char*)"mysql/user")
+ || 0 == ut_strcmp(new_name, (char*)"mysql/db")) {
fprintf(stderr,
"InnoDB: Error: trying to create a MySQL system table %s of type InnoDB.\n"
@@ -2230,6 +2247,19 @@ row_rename_table_for_mysql(
trx->op_info = (char *) "renaming table";
trx_start_if_not_started(trx);
+ /* Serialize data dictionary operations with dictionary mutex:
+ no deadlocks can occur then in these operations */
+
+ row_mysql_lock_data_dictionary(trx);
+
+ table = dict_table_get_low(old_name);
+
+ if (!table) {
+ err = DB_TABLE_NOT_FOUND;
+
+ goto funct_exit;
+ }
+
str1 = (char *)
"PROCEDURE RENAME_TABLE_PROC () IS\n"
"new_table_name CHAR;\n"
@@ -2242,14 +2272,43 @@ row_rename_table_for_mysql(
if (row_is_mysql_tmp_table_name(new_name)) {
- /* We want to preserve the original foreign key
- constraint definitions despite the name change */
+ /* MySQL is doing an ALTER TABLE command and it renames the
+ original table to a temporary table name. We want to preserve
+ the original foreign key constraint definitions despite the
+ name change. An exception is those constraints for which
+ the ALTER TABLE contained DROP FOREIGN KEY <foreign key id>.*/
- str3 = (char*)
- "';\n"
- "UPDATE SYS_TABLES SET NAME = new_table_name\n"
- "WHERE NAME = old_table_name;\n"
- "END;\n";
+ heap = mem_heap_create(100);
+
+ err = dict_foreign_parse_drop_constraints(heap, trx,
+ table,
+ &n_constraints_to_drop,
+ &constraints_to_drop);
+ if (err != DB_SUCCESS) {
+
+ goto funct_exit;
+ }
+
+ str3 = mem_heap_alloc(heap,
+ 1000 + 500 * n_constraints_to_drop);
+ *str3 = '\0';
+ sprintf(str3,
+ "';\n"
+ "UPDATE SYS_TABLES SET NAME = new_table_name\n"
+ "WHERE NAME = old_table_name;\n");
+
+ for (i = 0; i < n_constraints_to_drop; i++) {
+ sprintf(str3 + strlen(str3),
+ "DELETE FROM SYS_FOREIGN_COLS WHERE ID = '%s';\n"
+ "DELETE FROM SYS_FOREIGN WHERE ID = '%s';\n",
+ constraints_to_drop[i],
+ constraints_to_drop[i]);
+ }
+
+ sprintf(str3 + strlen(str3),
+ "END;\n");
+
+ ut_a(strlen(str3) < 1000 + 500 * n_constraints_to_drop);
} else {
str3 = (char*)
"';\n"
@@ -2280,13 +2339,6 @@ row_rename_table_for_mysql(
ut_memcpy(buf + len, str3, ut_strlen(str3) + 1);
- /* Serialize data dictionary operations with dictionary mutex:
- no deadlocks can occur then in these operations */
-
- row_mysql_lock_data_dictionary(trx);
-
- table = dict_table_get_low(old_name);
-
graph = pars_sql(buf);
ut_a(graph);
@@ -2296,12 +2348,6 @@ row_rename_table_for_mysql(
graph->fork_type = QUE_FORK_MYSQL_INTERFACE;
- if (!table) {
- err = DB_TABLE_NOT_FOUND;
-
- goto funct_exit;
- }
-
ut_a(thr = que_fork_start_command(graph, SESS_COMM_EXECUTE, 0));
que_run_threads(thr);
@@ -2342,6 +2388,13 @@ row_rename_table_for_mysql(
if (row_is_mysql_tmp_table_name(old_name)) {
+ /* MySQL is doing an ALTER TABLE command and it
+ renames the created temporary table to the name
+ of the original table. In the ALTER TABLE we maybe
+ created some FOREIGN KEY constraints for the temporary
+ table. But we want to load also the foreign key
+ constraint definitions for the original table name. */
+
err = dict_load_foreigns(new_name);
if (err != DB_SUCCESS) {
@@ -2367,7 +2420,13 @@ row_rename_table_for_mysql(
funct_exit:
row_mysql_unlock_data_dictionary(trx);
- que_graph_free(graph);
+ if (graph) {
+ que_graph_free(graph);
+ }
+
+ if (heap) {
+ mem_heap_free(heap);
+ }
trx_commit_for_mysql(trx);
diff --git a/innobase/row/row0sel.c b/innobase/row/row0sel.c
index fb508e7b1da..97a69f76eaa 100644
--- a/innobase/row/row0sel.c
+++ b/innobase/row/row0sel.c
@@ -2620,6 +2620,24 @@ row_search_for_mysql(
printf("N tables locked %lu\n", trx->mysql_n_tables_locked);
*/
/*-------------------------------------------------------------*/
+ /* PHASE 0: Release a possible s-latch we are holding on the
+ adaptive hash index latch if there is someone waiting behind */
+
+ if (trx->has_search_latch
+ && btr_search_latch.writer != RW_LOCK_NOT_LOCKED) {
+
+ /* There is an x-latch request on the adaptive hash index:
+ release the s-latch to reduce starvation and wait for
+ BTR_SEA_TIMEOUT rounds before trying to keep it again over
+ calls from MySQL */
+
+ rw_lock_s_unlock(&btr_search_latch);
+ trx->has_search_latch = FALSE;
+
+ trx->search_latch_timeout = BTR_SEA_TIMEOUT;
+ }
+
+ /*-------------------------------------------------------------*/
/* PHASE 1: Try to pop the row from the prefetch cache */
if (direction == 0) {
@@ -2685,16 +2703,31 @@ row_search_for_mysql(
mode = pcur->search_mode;
}
+ if ((direction == ROW_SEL_NEXT || direction == ROW_SEL_PREV)
+ && pcur->old_stored != BTR_PCUR_OLD_STORED) {
+
+ /* MySQL sometimes seems to do fetch next or fetch prev even
+ if the search condition is unique; this can, for example,
+ happen with the HANDLER commands; we do not always store the
+ pcur position in this case, so we cannot restore cursor
+ position, and must return immediately */
+
+ /* printf("%s record not found 1\n", index->name); */
+
+ trx->op_info = (char *) "";
+ return(DB_RECORD_NOT_FOUND);
+ }
+
mtr_start(&mtr);
/* In a search where at most one record in the index may match, we
- can use a LOCK_REC_NOT_GAP type record lock when locking a non-delete
+ can use a LOCK_REC_NOT_GAP type record lock when locking a non-delete-
marked matching record.
- Note that in a unique secondary index there may be different delete
+ Note that in a unique secondary index there may be different delete-
marked versions of a record where only the primary key values differ:
thus in a secondary index we must use next-key locks when locking
- delete marked records. */
+ delete-marked records. */
if (match_mode == ROW_SEL_EXACT
&& index->type & DICT_UNIQUE
@@ -2715,25 +2748,9 @@ row_search_for_mysql(
if (unique_search
&& index->type & DICT_CLUSTERED
&& !prebuilt->templ_contains_blob
+ && !prebuilt->used_in_HANDLER
&& (prebuilt->mysql_row_len < UNIV_PAGE_SIZE / 8)) {
- if (direction == ROW_SEL_NEXT) {
- /* MySQL sometimes seems to do fetch next even
- if the search condition is unique; we do not store
- pcur position in this case, so we cannot
- restore cursor position, and must return
- immediately */
-
- mtr_commit(&mtr);
-
- /* printf("%s record not found 1\n", index->name); */
-
- trx->op_info = (char *) "";
- return(DB_RECORD_NOT_FOUND);
- }
-
- ut_a(direction == 0); /* We cannot do fetch prev, as we have
- not stored the cursor position */
mode = PAGE_CUR_GE;
unique_search_from_clust_index = TRUE;
@@ -2754,23 +2771,7 @@ row_search_for_mysql(
NOT prepared to inserts interleaved with the SELECT,
and if we try that, we can deadlock on the adaptive
hash index semaphore! */
-
- if (btr_search_latch.writer != RW_LOCK_NOT_LOCKED) {
- /* There is an x-latch request: release
- a possible s-latch to reduce starvation
- and wait for BTR_SEA_TIMEOUT rounds before
- trying to keep it again over calls from
- MySQL */
-
- if (trx->has_search_latch) {
- rw_lock_s_unlock(&btr_search_latch);
- trx->has_search_latch = FALSE;
- }
- trx->search_latch_timeout = BTR_SEA_TIMEOUT;
-
- goto no_shortcut;
- }
#ifndef UNIV_SEARCH_DEBUG
if (!trx->has_search_latch) {
rw_lock_s_lock(&btr_search_latch);
@@ -2806,6 +2807,10 @@ row_search_for_mysql(
}
trx->op_info = (char *) "";
+
+ /* NOTE that we do NOT store the cursor
+ position */
+
return(DB_SUCCESS);
} else if (shortcut == SEL_EXHAUSTED) {
@@ -2825,6 +2830,10 @@ row_search_for_mysql(
}
trx->op_info = (char *) "";
+
+ /* NOTE that we do NOT store the cursor
+ position */
+
return(DB_RECORD_NOT_FOUND);
}
@@ -2833,7 +2842,6 @@ row_search_for_mysql(
}
}
-no_shortcut:
/*-------------------------------------------------------------*/
/* PHASE 3: Open or restore index cursor position */
@@ -3206,6 +3214,7 @@ rec_loop:
&& prebuilt->select_lock_type == LOCK_NONE
&& !prebuilt->templ_contains_blob
&& !prebuilt->clust_index_was_generated
+ && !prebuilt->used_in_HANDLER
&& prebuilt->template_type
!= ROW_MYSQL_DUMMY_TEMPLATE) {
@@ -3214,7 +3223,9 @@ rec_loop:
update, that is why we require ...lock_type == LOCK_NONE.
Since we keep space in prebuilt only for the BLOBs of
a single row, we cannot cache rows in the case there
- are BLOBs in the fields to be fetched. */
+ are BLOBs in the fields to be fetched. In HANDLER we do
+ not cache rows because there the cursor is a scrollable
+ cursor. */
row_sel_push_cache_row_for_mysql(prebuilt, rec);
@@ -3243,11 +3254,16 @@ rec_loop:
}
}
got_row:
- /* TODO: should we in every case store the cursor position, even
- if this is just a join, for example? */
+ /* We have an optimization to save CPU time: if this is a consistent
+ read on a unique condition on the clustered index, then we do not
+ store the pcur position, because any fetch next or prev will anyway
+ return 'end of file'. An exception is the MySQL HANDLER command
+ where the user can move the cursor with PREV or NEXT even after
+ a unique search. */
if (!unique_search_from_clust_index
- || prebuilt->select_lock_type == LOCK_X) {
+ || prebuilt->select_lock_type == LOCK_X
+ || prebuilt->used_in_HANDLER) {
/* Inside an update always store the cursor position */
diff --git a/innobase/row/row0upd.c b/innobase/row/row0upd.c
index 64569bf3f96..5fce1c1861b 100644
--- a/innobase/row/row0upd.c
+++ b/innobase/row/row0upd.c
@@ -218,7 +218,7 @@ row_upd_check_references_constraints(
being dropped while the check is running. */
err = row_ins_check_foreign_constraint(FALSE, foreign,
- table, index, entry, thr);
+ table, entry, thr);
if (foreign->foreign_table) {
mutex_enter(&(dict_sys->mutex));
diff --git a/innobase/srv/Makefile.am b/innobase/srv/Makefile.am
index b4bdeb7c03b..752683b82b8 100644
--- a/innobase/srv/Makefile.am
+++ b/innobase/srv/Makefile.am
@@ -17,7 +17,7 @@
include ../include/Makefile.i
-libs_LIBRARIES = libsrv.a
+noinst_LIBRARIES = libsrv.a
libsrv_a_SOURCES = srv0srv.c srv0que.c srv0start.c
diff --git a/innobase/srv/srv0srv.c b/innobase/srv/srv0srv.c
index af11516e9f0..7cd831fe239 100644
--- a/innobase/srv/srv0srv.c
+++ b/innobase/srv/srv0srv.c
@@ -162,6 +162,13 @@ char* srv_file_flush_method_str = NULL;
ulint srv_unix_file_flush_method = SRV_UNIX_FDATASYNC;
ulint srv_win_file_flush_method = SRV_WIN_IO_UNBUFFERED;
+/* The InnoDB main thread tries to keep the ratio of modified pages
+in the buffer pool to all database pages in the buffer pool smaller than
+the following number. But it is not guaranteed that the value stays below
+that during a time of heavy update/insert activity. */
+
+ulint srv_max_buf_pool_modified_pct = 90;
+
/* If the following is != 0 we do not allow inserts etc. This protects
the user from forgetting the innodb_force_recovery keyword to my.cnf */
@@ -1881,11 +1888,11 @@ retry:
/* Go to wait for the event; when a thread leaves InnoDB it will
release this thread */
- trx->op_info = "waiting in InnoDB queue";
+ trx->op_info = (char*)"waiting in InnoDB queue";
os_event_wait(slot->event);
- trx->op_info = "";
+ trx->op_info = (char*)"";
os_fast_mutex_lock(&srv_conc_mutex);
@@ -2368,12 +2375,35 @@ srv_sprintf_innodb_monitor(
buf = buf + strlen(buf);
ut_a(buf < buf_end + 1500);
- buf += sprintf(buf, "------------\n"
- "TRANSACTIONS\n"
- "------------\n");
+ if (*dict_foreign_err_buf != '\0') {
+ buf += sprintf(buf,
+ "------------------------\n"
+ "LATEST FOREIGN KEY ERROR\n"
+ "------------------------\n");
+
+ if (buf_end - buf > 6000) {
+ buf+= sprintf(buf, "%.4000s", dict_foreign_err_buf);
+ }
+ }
+
+ ut_a(buf < buf_end + 1500);
+
+ if (*dict_unique_err_buf != '\0') {
+ buf += sprintf(buf,
+"---------------------------------------------------------------\n"
+"LATEST UNIQUE KEY ERROR (is masked in REPLACE or INSERT IGNORE)\n"
+"---------------------------------------------------------------\n");
+
+ if (buf_end - buf > 6000) {
+ buf+= sprintf(buf, "%.4000s", dict_unique_err_buf);
+ }
+ }
+
+ ut_a(buf < buf_end + 1500);
+
lock_print_info(buf, buf_end);
buf = buf + strlen(buf);
-
+
buf += sprintf(buf, "--------\n"
"FILE I/O\n"
"--------\n");
@@ -2777,6 +2807,7 @@ srv_master_thread(
ulint n_ios_old;
ulint n_ios_very_old;
ulint n_pend_ios;
+ ibool skip_sleep = FALSE;
ulint i;
UT_NOT_USED(arg);
@@ -2794,24 +2825,42 @@ srv_master_thread(
os_event_set(srv_sys->operational);
loop:
+ /*****************************************************************/
+ /* ---- When there is database activity by users, we cycle in this
+ loop */
+
srv_main_thread_op_info = (char*) "reserving kernel mutex";
n_ios_very_old = log_sys->n_log_ios + buf_pool->n_pages_read
+ buf_pool->n_pages_written;
mutex_enter(&kernel_mutex);
+ /* Store the user activity counter at the start of this loop */
old_activity_count = srv_activity_count;
mutex_exit(&kernel_mutex);
- /* We run purge and a batch of ibuf_contract every 10 seconds, even
- if the server were active: */
+ if (srv_force_recovery >= SRV_FORCE_NO_BACKGROUND) {
+
+ goto suspend_thread;
+ }
+
+ /* ---- We run the following loop approximately once per second
+ when there is database activity */
+
+ skip_sleep = FALSE;
for (i = 0; i < 10; i++) {
n_ios_old = log_sys->n_log_ios + buf_pool->n_pages_read
+ buf_pool->n_pages_written;
srv_main_thread_op_info = (char*)"sleeping";
- os_thread_sleep(1000000);
+
+ if (!skip_sleep) {
+
+ os_thread_sleep(1000000);
+ }
+
+ skip_sleep = FALSE;
/* ALTER TABLE in MySQL requires on Unix that the table handler
can drop tables lazily after there no longer are SELECT
@@ -2824,9 +2873,9 @@ loop:
srv_main_thread_op_info = (char*)"";
- if (srv_force_recovery >= SRV_FORCE_NO_BACKGROUND) {
+ if (srv_fast_shutdown && srv_shutdown_state > 0) {
- goto suspend_thread;
+ goto background_loop;
}
/* We flush the log once in a second even if no commit
@@ -2834,10 +2883,9 @@ loop:
at transaction commit */
srv_main_thread_op_info = (char*)"flushing log";
- log_flush_up_to(ut_dulint_max, LOG_WAIT_ONE_GROUP);
- log_flush_to_disk();
+ log_write_up_to(ut_dulint_max, LOG_WAIT_ONE_GROUP, TRUE);
- /* If there were less than 10 i/os during the
+ /* If there were less than 5 i/os during the
one second sleep, we assume that there is free
disk i/o capacity available, and it makes sense to
do an insert buffer merge. */
@@ -2846,35 +2894,45 @@ loop:
+ log_sys->n_pending_writes;
n_ios = log_sys->n_log_ios + buf_pool->n_pages_read
+ buf_pool->n_pages_written;
- if (n_pend_ios < 3 && (n_ios - n_ios_old < 10)) {
+ if (n_pend_ios < 3 && (n_ios - n_ios_old < 5)) {
srv_main_thread_op_info =
(char*)"doing insert buffer merge";
ibuf_contract_for_n_pages(TRUE, 5);
srv_main_thread_op_info =
(char*)"flushing log";
- log_flush_up_to(ut_dulint_max, LOG_WAIT_ONE_GROUP);
- log_flush_to_disk();
+ log_write_up_to(ut_dulint_max, LOG_WAIT_ONE_GROUP,
+ TRUE);
}
-
- if (srv_fast_shutdown && srv_shutdown_state > 0) {
- goto background_loop;
+ if (buf_get_modified_ratio_pct() >
+ srv_max_buf_pool_modified_pct) {
+
+ /* Try to keep the number of modified pages in the
+ buffer pool under the limit wished by the user */
+
+ n_pages_flushed = buf_flush_batch(BUF_FLUSH_LIST, 100,
+ ut_dulint_max);
+
+ /* If we had to do the flush, it may have taken
+ even more than 1 second, and also, there may be more
+ to flush. Do not sleep 1 second during the next
+ iteration of this loop. */
+
+ skip_sleep = TRUE;
}
if (srv_activity_count == old_activity_count) {
- if (srv_print_thread_releases) {
- printf("Master thread wakes up!\n");
- }
+ /* There is no user activity at the moment, go to
+ the background loop */
goto background_loop;
}
}
- if (srv_print_thread_releases) {
- printf("Master thread wakes up!\n");
- }
+ /* ---- We perform the following code approximately once per
+ 10 seconds when there is database activity */
#ifdef MEM_PERIODIC_CHECK
/* Check magic numbers of every allocated mem block once in 10
@@ -2883,7 +2941,7 @@ loop:
#endif
/* If there were less than 200 i/os during the 10 second period,
we assume that there is free disk i/o capacity available, and it
- makes sense to do a buffer pool flush. */
+ makes sense to flush 100 pages. */
n_pend_ios = buf_get_n_pending_ios() + log_sys->n_pending_writes;
n_ios = log_sys->n_log_ios + buf_pool->n_pages_read
@@ -2891,11 +2949,10 @@ loop:
if (n_pend_ios < 3 && (n_ios - n_ios_very_old < 200)) {
srv_main_thread_op_info = (char*) "flushing buffer pool pages";
- buf_flush_batch(BUF_FLUSH_LIST, 50, ut_dulint_max);
+ buf_flush_batch(BUF_FLUSH_LIST, 100, ut_dulint_max);
srv_main_thread_op_info = (char*) "flushing log";
- log_flush_up_to(ut_dulint_max, LOG_WAIT_ONE_GROUP);
- log_flush_to_disk();
+ log_write_up_to(ut_dulint_max, LOG_WAIT_ONE_GROUP, TRUE);
}
/* We run a batch of insert buffer merge every 10 seconds,
@@ -2905,8 +2962,7 @@ loop:
ibuf_contract_for_n_pages(TRUE, 5);
srv_main_thread_op_info = (char*)"flushing log";
- log_flush_up_to(ut_dulint_max, LOG_WAIT_ONE_GROUP);
- log_flush_to_disk();
+ log_write_up_to(ut_dulint_max, LOG_WAIT_ONE_GROUP, TRUE);
/* We run a full purge every 10 seconds, even if the server
were active */
@@ -2930,27 +2986,32 @@ loop:
if (difftime(current_time, last_flush_time) > 1) {
srv_main_thread_op_info = (char*) "flushing log";
- log_flush_up_to(ut_dulint_max, LOG_WAIT_ONE_GROUP);
- log_flush_to_disk();
+ log_write_up_to(ut_dulint_max, LOG_WAIT_ONE_GROUP,
+ TRUE);
last_flush_time = current_time;
}
}
+
+ srv_main_thread_op_info = (char*)"flushing buffer pool pages";
-background_loop:
- /* In this loop we run background operations when the server
- is quiet and we also come here about once in 10 seconds */
+ /* Flush a few oldest pages to make a new checkpoint younger */
- srv_main_thread_op_info = (char*)"doing background drop tables";
+ if (buf_get_modified_ratio_pct() > 70) {
- n_tables_to_drop = row_drop_tables_for_mysql_in_background();
+ /* If there are lots of modified pages in the buffer pool
+ (> 70 %), we assume we can afford reserving the disk(s) for
+ the time it requires to flush 100 pages */
- srv_main_thread_op_info = (char*)"";
-
- srv_main_thread_op_info = (char*)"flushing buffer pool pages";
-
- /* Flush a few oldest pages to make the checkpoint younger */
+ n_pages_flushed = buf_flush_batch(BUF_FLUSH_LIST, 100,
+ ut_dulint_max);
+ } else {
+ /* Otherwise, we only flush a small number of pages so that
+ we do not unnecessarily use much disk i/o capacity from
+ other work */
- n_pages_flushed = buf_flush_batch(BUF_FLUSH_LIST, 10, ut_dulint_max);
+ n_pages_flushed = buf_flush_batch(BUF_FLUSH_LIST, 10,
+ ut_dulint_max);
+ }
srv_main_thread_op_info = (char*)"making checkpoint";
@@ -2961,16 +3022,31 @@ background_loop:
srv_main_thread_op_info = (char*)"reserving kernel mutex";
mutex_enter(&kernel_mutex);
+
+ /* ---- When there is database activity, we jump from here back to
+ the start of loop */
+
if (srv_activity_count != old_activity_count) {
mutex_exit(&kernel_mutex);
goto loop;
}
- old_activity_count = srv_activity_count;
+
mutex_exit(&kernel_mutex);
+ /* If the database is quiet, we enter the background loop */
+
+ /*****************************************************************/
+background_loop:
+ /* ---- In this loop we run background operations when the server
+ is quiet from user activity */
+
/* The server has been quiet for a while: start running background
operations */
+ srv_main_thread_op_info = (char*)"doing background drop tables";
+
+ n_tables_to_drop = row_drop_tables_for_mysql_in_background();
+
srv_main_thread_op_info = (char*)"purging";
if (srv_fast_shutdown && srv_shutdown_state > 0) {
@@ -3005,6 +3081,7 @@ background_loop:
}
mutex_exit(&kernel_mutex);
+flush_loop:
srv_main_thread_op_info = (char*)"flushing buffer pool pages";
n_pages_flushed = buf_flush_batch(BUF_FLUSH_LIST, 100, ut_dulint_max);
@@ -3017,13 +3094,22 @@ background_loop:
}
mutex_exit(&kernel_mutex);
- srv_main_thread_op_info = (char*) "waiting for buffer pool flush to end";
+ srv_main_thread_op_info =
+ (char*) "waiting for buffer pool flush to end";
buf_flush_wait_batch_end(BUF_FLUSH_LIST);
srv_main_thread_op_info = (char*)"making checkpoint";
log_checkpoint(TRUE, FALSE);
+ if (buf_get_modified_ratio_pct() > srv_max_buf_pool_modified_pct) {
+
+ /* Try to keep the number of modified pages in the
+ buffer pool under the limit wished by the user */
+
+ goto flush_loop;
+ }
+
srv_main_thread_op_info = (char*)"reserving kernel mutex";
mutex_enter(&kernel_mutex);
@@ -3038,15 +3124,24 @@ background_loop:
log_archive_do(FALSE, &n_bytes_archived);
+ /* Keep looping in the background loop if still work to do */
+
if (srv_fast_shutdown && srv_shutdown_state > 0) {
if (n_tables_to_drop + n_pages_flushed
+ n_bytes_archived != 0) {
+ /* If we are doing a fast shutdown (= the default)
+ we do not do purge or insert buffer merge. But we
+ flush the buffer pool completely to disk. */
+
goto background_loop;
}
} else if (n_tables_to_drop +
- n_pages_purged + n_bytes_merged + n_pages_flushed
+ n_pages_purged + n_bytes_merged + n_pages_flushed
+ n_bytes_archived != 0) {
+ /* In a 'slow' shutdown we run purge and the insert buffer
+ merge to completion */
+
goto background_loop;
}
@@ -3078,6 +3173,9 @@ suspend_thread:
os_event_wait(event);
+ /* When there is user activity, InnoDB will set the event and the main
+ thread goes back to loop: */
+
goto loop;
#ifndef __WIN__
diff --git a/innobase/srv/srv0start.c b/innobase/srv/srv0start.c
index 33d4a30e227..536aa5b67e4 100644
--- a/innobase/srv/srv0start.c
+++ b/innobase/srv/srv0start.c
@@ -161,13 +161,13 @@ srv_parse_data_file_paths_and_sizes(
}
if (strlen(str) >= ut_strlen(":autoextend")
- && 0 == ut_memcmp(str, ":autoextend",
+ && 0 == ut_memcmp(str, (char*)":autoextend",
ut_strlen(":autoextend"))) {
str += ut_strlen(":autoextend");
if (strlen(str) >= ut_strlen(":max:")
- && 0 == ut_memcmp(str, ":max:",
+ && 0 == ut_memcmp(str, (char*)":max:",
ut_strlen(":max:"))) {
str += ut_strlen(":max:");
@@ -265,7 +265,7 @@ srv_parse_data_file_paths_and_sizes(
(*data_file_sizes)[i] = size;
if (strlen(str) >= ut_strlen(":autoextend")
- && 0 == ut_memcmp(str, ":autoextend",
+ && 0 == ut_memcmp(str, (char*)":autoextend",
ut_strlen(":autoextend"))) {
*is_auto_extending = TRUE;
@@ -273,7 +273,7 @@ srv_parse_data_file_paths_and_sizes(
str += ut_strlen(":autoextend");
if (strlen(str) >= ut_strlen(":max:")
- && 0 == ut_memcmp(str, ":max:",
+ && 0 == ut_memcmp(str, (char*)":max:",
ut_strlen(":max:"))) {
str += ut_strlen(":max:");
@@ -864,6 +864,7 @@ open_or_create_data_files(
return(DB_SUCCESS);
}
+#ifdef notdefined
/*********************************************************************
This thread is used to measure contention of latches. */
static
@@ -935,6 +936,7 @@ test_measure_cont(
return(0);
}
+#endif
/********************************************************************
Starts InnoDB and creates a new database if database files
@@ -1053,20 +1055,24 @@ innobase_start_or_create_for_mysql(void)
srv_win_file_flush_method = SRV_WIN_IO_UNBUFFERED;
#ifndef __WIN__
- } else if (0 == ut_strcmp(srv_file_flush_method_str, "fdatasync")) {
+ } else if (0 == ut_strcmp(srv_file_flush_method_str,
+ (char*)"fdatasync")) {
srv_unix_file_flush_method = SRV_UNIX_FDATASYNC;
- } else if (0 == ut_strcmp(srv_file_flush_method_str, "O_DSYNC")) {
+ } else if (0 == ut_strcmp(srv_file_flush_method_str,
+ (char*)"O_DSYNC")) {
srv_unix_file_flush_method = SRV_UNIX_O_DSYNC;
} else if (0 == ut_strcmp(srv_file_flush_method_str,
- "littlesync")) {
+ (char*)"littlesync")) {
srv_unix_file_flush_method = SRV_UNIX_LITTLESYNC;
- } else if (0 == ut_strcmp(srv_file_flush_method_str, "nosync")) {
+ } else if (0 == ut_strcmp(srv_file_flush_method_str,
+ (char*)"nosync")) {
srv_unix_file_flush_method = SRV_UNIX_NOSYNC;
#else
- } else if (0 == ut_strcmp(srv_file_flush_method_str, "normal")) {
+ } else if (0 == ut_strcmp(srv_file_flush_method_str,
+ (char*)"normal")) {
srv_win_file_flush_method = SRV_WIN_IO_NORMAL;
os_aio_use_native_aio = FALSE;
@@ -1196,7 +1202,14 @@ innobase_start_or_create_for_mysql(void)
&max_flushed_lsn, &max_arch_log_no,
&sum_of_new_sizes);
if (err != DB_SUCCESS) {
- fprintf(stderr, "InnoDB: Could not open data files\n");
+ fprintf(stderr,
+"InnoDB: Could not open or create data files.\n"
+"InnoDB: If you tried to add new data files, and it failed here,\n"
+"InnoDB: you should now edit innodb_data_file_path in my.cnf back\n"
+"InnoDB: to what it was, and remove the new ibdata files InnoDB created\n"
+"InnoDB: in this failed attempt. InnoDB only wrote those files full of\n"
+"InnoDB: zeros, but did not yet use them in any way. But be careful: do not\n"
+"InnoDB: remove old data files which contain your precious data!\n");
return((int) err);
}
@@ -1207,7 +1220,10 @@ innobase_start_or_create_for_mysql(void)
and restore them from the doublewrite buffer if
possible */
- trx_sys_doublewrite_restore_corrupt_pages();
+ if (srv_force_recovery < SRV_FORCE_NO_LOG_REDO) {
+
+ trx_sys_doublewrite_restore_corrupt_pages();
+ }
}
srv_normalize_path_for_win(srv_arch_dir);
@@ -1478,7 +1494,9 @@ innobase_start_or_create_for_mysql(void)
fprintf(stderr,
"InnoDB: !!! innodb_force_recovery is set to %lu !!!\n",
srv_force_recovery);
- }
+ }
+
+ fflush(stderr);
return((int) DB_SUCCESS);
}
diff --git a/innobase/sync/Makefile.am b/innobase/sync/Makefile.am
index 7504525bf84..4acd4516e35 100644
--- a/innobase/sync/Makefile.am
+++ b/innobase/sync/Makefile.am
@@ -17,7 +17,7 @@
include ../include/Makefile.i
-libs_LIBRARIES = libsync.a
+noinst_LIBRARIES = libsync.a
libsync_a_SOURCES = sync0arr.c sync0ipm.c sync0rw.c sync0sync.c
diff --git a/innobase/thr/Makefile.am b/innobase/thr/Makefile.am
index 5f42138e734..62c39492c07 100644
--- a/innobase/thr/Makefile.am
+++ b/innobase/thr/Makefile.am
@@ -17,7 +17,7 @@
include ../include/Makefile.i
-libs_LIBRARIES = libthr.a
+noinst_LIBRARIES = libthr.a
libthr_a_SOURCES = thr0loc.c
diff --git a/innobase/trx/Makefile.am b/innobase/trx/Makefile.am
index 63b2c52da33..9e2b3c398e3 100644
--- a/innobase/trx/Makefile.am
+++ b/innobase/trx/Makefile.am
@@ -17,7 +17,7 @@
include ../include/Makefile.i
-libs_LIBRARIES = libtrx.a
+noinst_LIBRARIES = libtrx.a
libtrx_a_SOURCES = trx0purge.c trx0rec.c trx0roll.c trx0rseg.c\
trx0sys.c trx0trx.c trx0undo.c
diff --git a/innobase/trx/trx0sys.c b/innobase/trx/trx0sys.c
index 1ae9f00ae1f..b9e4a9fea4b 100644
--- a/innobase/trx/trx0sys.c
+++ b/innobase/trx/trx0sys.c
@@ -340,7 +340,6 @@ trx_sys_doublewrite_restore_corrupt_pages(void)
/* It is an unwritten doublewrite buffer page:
do nothing */
-
} else {
/* Read in the actual page from the data files */
@@ -357,9 +356,19 @@ trx_sys_doublewrite_restore_corrupt_pages(void)
"InnoDB: Trying to recover it from the doublewrite buffer.\n");
if (buf_page_is_corrupted(page)) {
+ fprintf(stderr,
+ "InnoDB: Dump of the page:\n");
+ buf_page_print(read_buf);
+ fprintf(stderr,
+ "InnoDB: Dump of corresponding page in doublewrite buffer:\n");
+ buf_page_print(page);
+
fprintf(stderr,
"InnoDB: Also the page in the doublewrite buffer is corrupt.\n"
- "InnoDB: Cannot continue operation.\n");
+ "InnoDB: Cannot continue operation.\n"
+ "InnoDB: You can try to recover the database with the my.cnf\n"
+ "InnoDB: option:\n"
+ "InnoDB: set-variable=innodb_force_recovery=6\n");
exit(1);
}
diff --git a/innobase/trx/trx0trx.c b/innobase/trx/trx0trx.c
index 5ac49397c90..5753b5b338e 100644
--- a/innobase/trx/trx0trx.c
+++ b/innobase/trx/trx0trx.c
@@ -89,6 +89,8 @@ trx_create(
trx->check_foreigns = TRUE;
trx->check_unique_secondary = TRUE;
+ trx->flush_log_later = FALSE;
+
trx->dict_operation = FALSE;
trx->mysql_thd = NULL;
@@ -102,8 +104,6 @@ trx_create(
trx->mysql_master_log_file_name = "";
trx->mysql_master_log_pos = 0;
- trx->ignore_duplicates_in_insert = FALSE;
-
mutex_create(&(trx->undo_mutex));
mutex_set_level(&(trx->undo_mutex), SYNC_TRX_UNDO);
@@ -782,13 +782,26 @@ trx_commit_off_kernel(
/*-------------------------------------*/
- /* Most MySQL users run with srv_flush_.. set to FALSE: */
+ /* Most MySQL users run with srv_flush_.. set to 0: */
- if (srv_flush_log_at_trx_commit) {
-
- log_flush_up_to(lsn, LOG_WAIT_ONE_GROUP);
+ if (srv_flush_log_at_trx_commit != 0) {
+ if (srv_unix_file_flush_method != SRV_UNIX_NOSYNC
+ && srv_flush_log_at_trx_commit != 2
+ && !trx->flush_log_later) {
+
+ /* Write the log to the log files AND flush
+ them to disk */
+
+ log_write_up_to(lsn, LOG_WAIT_ONE_GROUP, TRUE);
+ } else {
+ /* Write the log but do not flush it to disk */
+
+ log_write_up_to(lsn, LOG_WAIT_ONE_GROUP, FALSE);
+ }
}
+ trx->commit_lsn = lsn;
+
/*-------------------------------------*/
mutex_enter(&kernel_mutex);
@@ -1470,6 +1483,33 @@ trx_commit_for_mysql(
}
/**************************************************************************
+If required, flushes the log to disk if we called trx_commit_for_mysql()
+with trx->flush_log_later == TRUE. */
+
+ulint
+trx_commit_complete_for_mysql(
+/*==========================*/
+ /* out: 0 or error number */
+ trx_t* trx) /* in: trx handle */
+{
+ ut_a(trx);
+
+ if (srv_flush_log_at_trx_commit == 1
+ && srv_unix_file_flush_method != SRV_UNIX_NOSYNC) {
+
+ trx->op_info = (char *) "flushing log";
+
+ /* Flush the log files to disk */
+
+ log_write_up_to(trx->commit_lsn, LOG_WAIT_ONE_GROUP, TRUE);
+
+ trx->op_info = (char *) "";
+ }
+
+ return(0);
+}
+
+/**************************************************************************
Marks the latest SQL statement ended. */
void
diff --git a/innobase/usr/Makefile.am b/innobase/usr/Makefile.am
index a71d0d41ac0..bdcc832a76e 100644
--- a/innobase/usr/Makefile.am
+++ b/innobase/usr/Makefile.am
@@ -17,7 +17,7 @@
include ../include/Makefile.i
-libs_LIBRARIES = libusr.a
+noinst_LIBRARIES = libusr.a
libusr_a_SOURCES = usr0sess.c
diff --git a/innobase/ut/Makefile.am b/innobase/ut/Makefile.am
index de3cf41b767..2fdbb99e0f3 100644
--- a/innobase/ut/Makefile.am
+++ b/innobase/ut/Makefile.am
@@ -17,7 +17,7 @@
include ../include/Makefile.i
-libs_LIBRARIES = libut.a
+noinst_LIBRARIES = libut.a
libut_a_SOURCES = ut0byte.c ut0dbg.c ut0mem.c ut0rnd.c ut0ut.c
diff --git a/innobase/ut/ut0ut.c b/innobase/ut/ut0ut.c
index 31a137b5fac..c503cda54b9 100644
--- a/innobase/ut/ut0ut.c
+++ b/innobase/ut/ut0ut.c
@@ -206,7 +206,7 @@ ut_get_year_month_day(
cal_tm_ptr = localtime(&tm);
- *year = (ulint)cal_tm_ptr->tm_year;
+ *year = (ulint)cal_tm_ptr->tm_year + 1900;
*month = (ulint)cal_tm_ptr->tm_mon + 1;
*day = (ulint)cal_tm_ptr->tm_mday;
#endif