summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJan Lindström <jan.lindstrom@mariadb.com>2019-02-01 11:02:03 +0200
committerJan Lindström <jan.lindstrom@mariadb.com>2019-02-01 11:02:03 +0200
commitd6feda205af1f05fa7143501674a2d8c28fda561 (patch)
treeb42321e8decad353fc8b03bdbe423027a50472df
parentc2caca02ac39454e18db8de563e7e7c8eaf8b1c7 (diff)
parent368eda060f5922929eb4741e97b37a205591bdf3 (diff)
downloadmariadb-git-d6feda205af1f05fa7143501674a2d8c28fda561.tar.gz
Merge tag 'mariadb-10.0.38' into 10.0-galera
-rw-r--r--CMakeLists.txt10
-rw-r--r--client/CMakeLists.txt2
-rw-r--r--client/mysqltest.cc51
-rw-r--r--cmake/build_configurations/mysql_release.cmake1
-rw-r--r--cmake/ssl.cmake11
-rw-r--r--cmake/zlib.cmake5
-rw-r--r--config.h.cmake6
-rw-r--r--include/my_global.h2
-rw-r--r--include/my_valgrind.h4
-rw-r--r--include/mysql.h2
-rw-r--r--include/mysql.h.pp2
-rw-r--r--include/mysql/service_kill_statement.h4
-rw-r--r--libmysqld/examples/CMakeLists.txt2
-rw-r--r--mysql-test/disabled.def1
-rwxr-xr-xmysql-test/lib/v1/mysql-test-run.pl2
-rwxr-xr-xmysql-test/mysql-test-run.pl2
-rw-r--r--mysql-test/r/auto_increment_ranges_innodb.result14
-rw-r--r--mysql-test/r/bigint.result11
-rw-r--r--mysql-test/r/func_group_innodb.result30
-rw-r--r--mysql-test/r/huge_frm-6224.result2
-rw-r--r--mysql-test/r/innodb_ext_key.result95
-rw-r--r--mysql-test/r/mysql.result26
-rw-r--r--mysql-test/r/mysqldump.result6
-rw-r--r--mysql-test/r/partition.result100
-rw-r--r--mysql-test/r/partition_innodb.result26
-rw-r--r--mysql-test/r/range_innodb.result42
-rw-r--r--mysql-test/r/read_only.result15
-rw-r--r--mysql-test/r/row-checksum-old.result16
-rw-r--r--mysql-test/r/row-checksum.result16
-rw-r--r--mysql-test/r/stat_tables.result19
-rw-r--r--mysql-test/r/stat_tables_innodb.result19
-rw-r--r--mysql-test/r/subselect2.result22
-rw-r--r--mysql-test/r/subselect_exists2in.result4
-rw-r--r--mysql-test/r/subselect_mat.result16
-rw-r--r--mysql-test/r/union.result38
-rw-r--r--mysql-test/r/view.result4
-rw-r--r--mysql-test/suite/engines/iuds/r/insert_number.result100
-rw-r--r--mysql-test/suite/engines/iuds/r/update_delete_number.result17
-rw-r--r--mysql-test/suite/innodb/r/alter_candidate_key.result107
-rw-r--r--mysql-test/suite/innodb/r/foreign_key.result33
-rw-r--r--mysql-test/suite/innodb/r/innodb-alter.result159
-rw-r--r--mysql-test/suite/innodb/r/innodb-index.result33
-rw-r--r--mysql-test/suite/innodb/r/innodb-table-online.result11
-rw-r--r--mysql-test/suite/innodb/r/innodb-virtual-columns.result15
-rw-r--r--mysql-test/suite/innodb/r/innodb_28867993.result9
-rw-r--r--mysql-test/suite/innodb/t/alter_candidate_key.test72
-rw-r--r--mysql-test/suite/innodb/t/foreign_key.test31
-rw-r--r--mysql-test/suite/innodb/t/innodb-alter.test114
-rw-r--r--mysql-test/suite/innodb/t/innodb-index.test30
-rw-r--r--mysql-test/suite/innodb/t/innodb-table-online.test4
-rw-r--r--mysql-test/suite/innodb/t/innodb-virtual-columns.test11
-rw-r--r--mysql-test/suite/innodb/t/innodb_28867993.test12
-rw-r--r--mysql-test/suite/perfschema/r/dml_setup_instruments.result4
-rw-r--r--mysql-test/suite/perfschema/t/dml_setup_instruments.test5
-rw-r--r--mysql-test/suite/roles/flush_roles-17898.result13
-rw-r--r--mysql-test/suite/roles/flush_roles-17898.test11
-rw-r--r--mysql-test/suite/rpl/r/rpl_idempotency.result12
-rw-r--r--mysql-test/suite/rpl/r/rpl_row_big_table_id_32bit.result38
-rw-r--r--mysql-test/suite/rpl/r/rpl_row_big_table_id_64bit.result38
-rw-r--r--mysql-test/suite/rpl/t/rpl_idempotency.test21
-rw-r--r--mysql-test/suite/rpl/t/rpl_row_big_table_id.inc56
-rw-r--r--mysql-test/suite/rpl/t/rpl_row_big_table_id_32bit.test11
-rw-r--r--mysql-test/suite/rpl/t/rpl_row_big_table_id_64bit.test11
-rw-r--r--mysql-test/suite/sys_vars/r/table_definition_cache_basic.result16
-rw-r--r--mysql-test/suite/sys_vars/t/table_definition_cache_basic.test6
-rw-r--r--mysql-test/t/auto_increment_ranges_innodb.test13
-rw-r--r--mysql-test/t/bigint.test9
-rw-r--r--mysql-test/t/func_group_innodb.test26
-rw-r--r--mysql-test/t/huge_frm-6224.test11
-rw-r--r--mysql-test/t/innodb_ext_key.test106
-rw-r--r--mysql-test/t/mysql.test22
-rw-r--r--mysql-test/t/mysqldump.test2
-rw-r--r--mysql-test/t/partition.test61
-rw-r--r--mysql-test/t/partition_innodb.test30
-rw-r--r--mysql-test/t/range_innodb.test42
-rw-r--r--mysql-test/t/read_only.test21
-rw-r--r--mysql-test/t/row-checksum.test17
-rw-r--r--mysql-test/t/stat_tables.test17
-rw-r--r--mysql-test/t/subselect2.test20
-rw-r--r--mysql-test/t/subselect_mat.test13
-rw-r--r--mysql-test/t/union.test35
-rw-r--r--mysql-test/unstable-tests119
-rw-r--r--mysys/mf_iocache.c4
-rw-r--r--mysys/my_file.c7
-rw-r--r--mysys/my_pread.c26
-rw-r--r--mysys/my_read.c48
-rw-r--r--mysys/safemalloc.c2
-rw-r--r--res22
-rw-r--r--scripts/mysql_install_db.sh19
-rw-r--r--scripts/mytop.sh19
-rw-r--r--sql-common/client.c30
-rw-r--r--sql/CMakeLists.txt2
-rw-r--r--sql/handler.h12
-rw-r--r--sql/item.h4
-rw-r--r--sql/item_cmpfunc.cc190
-rw-r--r--sql/item_cmpfunc.h5
-rw-r--r--sql/log.cc6
-rw-r--r--sql/log_event.cc2
-rw-r--r--sql/mysql_install_db.cc72
-rw-r--r--sql/opt_range.cc16
-rw-r--r--sql/partition_info.cc54
-rw-r--r--sql/partition_info.h1
-rw-r--r--sql/sql_acl.cc115
-rw-r--r--sql/sql_array.h15
-rw-r--r--sql/sql_const.h12
-rw-r--r--sql/sql_lex.cc2
-rw-r--r--sql/sql_parse.cc7
-rw-r--r--sql/sql_repl.cc6
-rw-r--r--sql/sql_select.cc4
-rw-r--r--sql/sql_statistics.cc43
-rw-r--r--sql/sql_statistics.h1
-rw-r--r--sql/sql_table.cc80
-rw-r--r--sql/sql_type_int.h28
-rw-r--r--sql/sql_update.cc2
-rw-r--r--sql/sql_yacc.yy23
-rw-r--r--sql/sys_vars.cc8
-rw-r--r--sql/table.cc48
-rw-r--r--sql/table.h2
-rw-r--r--sql/table_cache.cc5
-rw-r--r--sql/unireg.h2
-rw-r--r--storage/connect/global.h6
-rw-r--r--storage/connect/ha_connect.cc4
-rw-r--r--storage/connect/jsonudf.cpp12
-rw-r--r--storage/connect/mysql-test/connect/r/jdbc_oracle.result18
-rw-r--r--storage/connect/mysql-test/connect/r/jdbc_postgresql.result10
-rw-r--r--storage/connect/mysql-test/connect/r/part_table.result4
-rw-r--r--storage/connect/mysql-test/connect/t/part_table.test2
-rw-r--r--storage/connect/plugutil.cpp28
-rw-r--r--storage/connect/reldef.cpp11
-rw-r--r--storage/connect/tabfmt.h2
-rw-r--r--storage/connect/tabjson.cpp52
-rw-r--r--storage/connect/tabjson.h8
-rw-r--r--storage/connect/tabodbc.cpp317
-rw-r--r--storage/connect/tabxml.cpp292
-rw-r--r--storage/connect/tabxml.h6
-rw-r--r--storage/connect/user_connect.cc4
-rw-r--r--storage/innobase/buf/buf0buf.cc172
-rw-r--r--storage/innobase/dict/dict0dict.cc7
-rw-r--r--storage/innobase/dict/dict0mem.cc4
-rw-r--r--storage/innobase/fil/fil0fil.cc27
-rw-r--r--storage/innobase/fts/fts0fts.cc20
-rw-r--r--storage/innobase/handler/ha_innodb.cc103
-rw-r--r--storage/innobase/handler/ha_innodb.h2
-rw-r--r--storage/innobase/handler/handler0alter.cc174
-rw-r--r--storage/innobase/include/buf0buf.h6
-rw-r--r--storage/innobase/include/dict0mem.h4
-rw-r--r--storage/innobase/include/fil0fil.h20
-rw-r--r--storage/innobase/include/os0file.h6
-rw-r--r--storage/innobase/include/page0page.h15
-rw-r--r--storage/innobase/include/page0zip.h15
-rw-r--r--storage/innobase/include/univ.i13
-rw-r--r--storage/innobase/os/os0proc.cc3
-rw-r--r--storage/innobase/page/page0page.cc42
-rw-r--r--storage/innobase/page/page0zip.cc95
-rw-r--r--storage/innobase/row/row0ftsort.cc7
-rw-r--r--storage/innobase/row/row0merge.cc5
-rw-r--r--storage/innobase/row/row0mysql.cc6
-rw-r--r--storage/innobase/row/row0sel.cc4
-rw-r--r--storage/innobase/srv/srv0start.cc4
-rw-r--r--storage/tokudb/PerconaFT/COPYING.APACHEv2174
-rw-r--r--storage/tokudb/PerconaFT/README.md5
-rw-r--r--storage/tokudb/PerconaFT/ft/txn/txn_manager.h4
-rw-r--r--storage/tokudb/PerconaFT/locktree/concurrent_tree.cc14
-rw-r--r--storage/tokudb/PerconaFT/locktree/concurrent_tree.h14
-rw-r--r--storage/tokudb/PerconaFT/locktree/keyrange.cc13
-rw-r--r--storage/tokudb/PerconaFT/locktree/keyrange.h13
-rw-r--r--storage/tokudb/PerconaFT/locktree/lock_request.cc13
-rw-r--r--storage/tokudb/PerconaFT/locktree/lock_request.h13
-rw-r--r--storage/tokudb/PerconaFT/locktree/locktree.cc13
-rw-r--r--storage/tokudb/PerconaFT/locktree/locktree.h13
-rw-r--r--storage/tokudb/PerconaFT/locktree/manager.cc13
-rw-r--r--storage/tokudb/PerconaFT/locktree/range_buffer.cc13
-rw-r--r--storage/tokudb/PerconaFT/locktree/range_buffer.h13
-rw-r--r--storage/tokudb/PerconaFT/locktree/treenode.cc13
-rw-r--r--storage/tokudb/PerconaFT/locktree/treenode.h13
-rw-r--r--storage/tokudb/PerconaFT/locktree/txnid_set.cc13
-rw-r--r--storage/tokudb/PerconaFT/locktree/txnid_set.h13
-rw-r--r--storage/tokudb/PerconaFT/locktree/wfg.cc13
-rw-r--r--storage/tokudb/PerconaFT/locktree/wfg.h13
-rw-r--r--storage/tokudb/PerconaFT/portability/toku_instr_mysql.cc12
-rw-r--r--storage/tokudb/PerconaFT/portability/toku_instr_mysql.h11
-rw-r--r--storage/tokudb/PerconaFT/portability/toku_pthread.h78
-rw-r--r--storage/tokudb/PerconaFT/util/growable_array.h13
-rw-r--r--storage/tokudb/PerconaFT/util/omt.cc2261
-rw-r--r--storage/tokudb/PerconaFT/util/omt.h13
-rw-r--r--storage/tokudb/ha_tokudb.cc10
-rw-r--r--storage/tokudb/hatoku_hton.cc4
-rw-r--r--storage/tokudb/hatoku_hton.h1
-rw-r--r--[-rwxr-xr-x]storage/tokudb/mysql-test/tokudb/t/change_column_bin.py0
-rw-r--r--[-rwxr-xr-x]storage/tokudb/mysql-test/tokudb/t/change_column_bin_rename.py0
-rw-r--r--[-rwxr-xr-x]storage/tokudb/mysql-test/tokudb/t/change_column_char.py0
-rw-r--r--[-rwxr-xr-x]storage/tokudb/mysql-test/tokudb/t/change_column_char_binary.py0
-rw-r--r--[-rwxr-xr-x]storage/tokudb/mysql-test/tokudb/t/change_column_char_charbinary.py0
-rw-r--r--[-rwxr-xr-x]storage/tokudb/mysql-test/tokudb/t/change_column_char_rename.py0
-rw-r--r--[-rwxr-xr-x]storage/tokudb/mysql-test/tokudb/t/change_column_int.py0
-rw-r--r--[-rwxr-xr-x]storage/tokudb/mysql-test/tokudb/t/change_column_int_key.py0
-rw-r--r--[-rwxr-xr-x]storage/tokudb/mysql-test/tokudb/t/change_column_int_not_supported.py0
-rw-r--r--[-rwxr-xr-x]storage/tokudb/mysql-test/tokudb/t/change_column_int_rename.py0
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/r/PS-4979.result2
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/t/PS-4979.test13
-rw-r--r--storage/tokudb/tokudb_background.cc4
-rw-r--r--storage/tokudb/tokudb_sysvars.cc14
-rw-r--r--storage/tokudb/tokudb_sysvars.h4
-rw-r--r--storage/xtradb/buf/buf0buf.cc174
-rw-r--r--storage/xtradb/dict/dict0dict.cc7
-rw-r--r--storage/xtradb/dict/dict0mem.cc4
-rw-r--r--storage/xtradb/fil/fil0fil.cc36
-rw-r--r--storage/xtradb/fts/fts0fts.cc20
-rw-r--r--storage/xtradb/fts/fts0pars.cc4
-rw-r--r--storage/xtradb/fts/fts0pars.y4
-rw-r--r--storage/xtradb/handler/ha_innodb.cc103
-rw-r--r--storage/xtradb/handler/ha_innodb.h8
-rw-r--r--storage/xtradb/handler/handler0alter.cc141
-rw-r--r--storage/xtradb/include/buf0buf.h6
-rw-r--r--storage/xtradb/include/data0type.ic1
-rw-r--r--storage/xtradb/include/dict0mem.h7
-rw-r--r--storage/xtradb/include/fil0fil.h20
-rw-r--r--storage/xtradb/include/page0page.h18
-rw-r--r--storage/xtradb/include/univ.i15
-rw-r--r--storage/xtradb/log/log0online.cc41
-rw-r--r--storage/xtradb/os/os0proc.cc3
-rw-r--r--storage/xtradb/page/page0page.cc46
-rw-r--r--storage/xtradb/page/page0zip.cc87
-rw-r--r--storage/xtradb/row/row0ftsort.cc7
-rw-r--r--storage/xtradb/row/row0import.cc9
-rw-r--r--storage/xtradb/row/row0mysql.cc15
-rw-r--r--storage/xtradb/row/row0sel.cc5
-rw-r--r--storage/xtradb/srv/srv0start.cc3
-rw-r--r--support-files/mysql.server.sh6
-rw-r--r--unittest/mysys/lf-t.c5
-rw-r--r--unittest/mysys/my_atomic-t.c17
-rw-r--r--unittest/mysys/thr_template.c31
-rw-r--r--unittest/mysys/waiting_threads-t.c4
-rw-r--r--win/packaging/heidisql.cmake2
234 files changed, 5459 insertions, 2902 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index d35ebb1ae00..98f271bb713 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -265,9 +265,15 @@ IF(HAVE_GGDB3)
SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -ggdb3")
ENDIF(HAVE_GGDB3)
-OPTION(ENABLED_LOCAL_INFILE
- "If we should should enable LOAD DATA LOCAL by default" ${IF_WIN})
+SET(ENABLED_LOCAL_INFILE "AUTO" CACHE STRING "If we should should enable LOAD DATA LOCAL by default (OFF/ON/AUTO)")
MARK_AS_ADVANCED(ENABLED_LOCAL_INFILE)
+IF (ENABLED_LOCAL_INFILE MATCHES "^(0|FALSE)$")
+ SET(ENABLED_LOCAL_INFILE OFF)
+ELSEIF(ENABLED_LOCAL_INFILE MATCHES "^(1|TRUE)$")
+ SET(ENABLED_LOCAL_INFILE ON)
+ELSEIF (NOT ENABLED_LOCAL_INFILE MATCHES "^(ON|OFF|AUTO)$")
+ MESSAGE(FATAL_ERROR "ENABLED_LOCAL_INFILE must be one of OFF, ON, AUTO")
+ENDIF()
OPTION(WITH_FAST_MUTEXES "Compile with fast mutexes" OFF)
MARK_AS_ADVANCED(WITH_FAST_MUTEXES)
diff --git a/client/CMakeLists.txt b/client/CMakeLists.txt
index c75abd4956d..c760a9dbf14 100644
--- a/client/CMakeLists.txt
+++ b/client/CMakeLists.txt
@@ -41,7 +41,7 @@ ENDIF(UNIX)
MYSQL_ADD_EXECUTABLE(mysqltest mysqltest.cc COMPONENT Test)
SET_SOURCE_FILES_PROPERTIES(mysqltest.cc PROPERTIES COMPILE_FLAGS "-DTHREADS")
-TARGET_LINK_LIBRARIES(mysqltest mysqlclient pcre pcreposix)
+TARGET_LINK_LIBRARIES(mysqltest mysqlclient pcreposix pcre)
SET_TARGET_PROPERTIES(mysqltest PROPERTIES ENABLE_EXPORTS TRUE)
diff --git a/client/mysqltest.cc b/client/mysqltest.cc
index bcb75214e79..7ec2a62dcf7 100644
--- a/client/mysqltest.cc
+++ b/client/mysqltest.cc
@@ -20,7 +20,7 @@
Tool used for executing a .test file
See the "MySQL Test framework manual" for more information
- http://dev.mysql.com/doc/mysqltest/en/index.html
+ https://mariadb.com/kb/en/library/mysqltest/
Please keep the test framework tools identical in all versions!
@@ -6105,7 +6105,6 @@ void do_connect(struct st_command *command)
#endif
if (opt_compress || con_compress)
mysql_options(con_slot->mysql, MYSQL_OPT_COMPRESS, NullS);
- mysql_options(con_slot->mysql, MYSQL_OPT_LOCAL_INFILE, 0);
mysql_options(con_slot->mysql, MYSQL_SET_CHARSET_NAME,
charset_info->csname);
if (opt_charsets_dir)
@@ -6205,6 +6204,11 @@ void do_connect(struct st_command *command)
if (con_slot == next_con)
next_con++; /* if we used the next_con slot, advance the pointer */
}
+ else // Failed to connect. Free the memory.
+ {
+ mysql_close(con_slot->mysql);
+ con_slot->mysql= NULL;
+ }
dynstr_free(&ds_connection_name);
dynstr_free(&ds_host);
@@ -6577,8 +6581,6 @@ static inline bool is_escape_char(char c, char in_string)
SYNOPSIS
read_line
- buf buffer for the read line
- size size of the buffer i.e max size to read
DESCRIPTION
This function actually reads several lines and adds them to the
@@ -6596,10 +6598,15 @@ static inline bool is_escape_char(char c, char in_string)
*/
-int read_line(char *buf, int size)
+static char *read_command_buf= NULL;
+static size_t read_command_buflen= 0;
+static const size_t max_multibyte_length= 6;
+
+int read_line()
{
char c, last_quote=0, last_char= 0;
- char *p= buf, *buf_end= buf + size - 1;
+ char *p= read_command_buf;
+ char *buf_end= read_command_buf + read_command_buflen - max_multibyte_length;
int skip_char= 0;
my_bool have_slash= FALSE;
@@ -6607,10 +6614,21 @@ int read_line(char *buf, int size)
R_COMMENT, R_LINE_START} state= R_LINE_START;
DBUG_ENTER("read_line");
+ *p= 0;
start_lineno= cur_file->lineno;
DBUG_PRINT("info", ("Starting to read at lineno: %d", start_lineno));
- for (; p < buf_end ;)
+ while (1)
{
+ if (p >= buf_end)
+ {
+ my_ptrdiff_t off= p - read_command_buf;
+ read_command_buf= (char*)my_realloc(read_command_buf,
+ read_command_buflen*2, MYF(MY_FAE));
+ p= read_command_buf + off;
+ read_command_buflen*= 2;
+ buf_end= read_command_buf + read_command_buflen - max_multibyte_length;
+ }
+
skip_char= 0;
c= my_getc(cur_file->file);
if (feof(cur_file->file))
@@ -6646,7 +6664,7 @@ int read_line(char *buf, int size)
cur_file->lineno++;
/* Convert cr/lf to lf */
- if (p != buf && *(p-1) == '\r')
+ if (p != read_command_buf && *(p-1) == '\r')
p--;
}
@@ -6661,9 +6679,9 @@ int read_line(char *buf, int size)
}
else if ((c == '{' &&
(!my_strnncoll_simple(charset_info, (const uchar*) "while", 5,
- (uchar*) buf, MY_MIN(5, p - buf), 0) ||
+ (uchar*) read_command_buf, MY_MIN(5, p - read_command_buf), 0) ||
!my_strnncoll_simple(charset_info, (const uchar*) "if", 2,
- (uchar*) buf, MY_MIN(2, p - buf), 0))))
+ (uchar*) read_command_buf, MY_MIN(2, p - read_command_buf), 0))))
{
/* Only if and while commands can be terminated by { */
*p++= c;
@@ -6797,8 +6815,6 @@ int read_line(char *buf, int size)
*p++= c;
}
}
- die("The input buffer is too small for this query.x\n" \
- "check your query or increase MAX_QUERY and recompile");
DBUG_RETURN(0);
}
@@ -6943,12 +6959,8 @@ bool is_delimiter(const char* p)
terminated by new line '\n' regardless how many "delimiter" it contain.
*/
-#define MAX_QUERY (256*1024*2) /* 256K -- a test in sp-big is >128K */
-static char read_command_buf[MAX_QUERY];
-
int read_command(struct st_command** command_ptr)
{
- char *p= read_command_buf;
struct st_command* command;
DBUG_ENTER("read_command");
@@ -6964,8 +6976,7 @@ int read_command(struct st_command** command_ptr)
die("Out of memory");
command->type= Q_UNKNOWN;
- read_command_buf[0]= 0;
- if (read_line(read_command_buf, sizeof(read_command_buf)))
+ if (read_line())
{
check_eol_junk(read_command_buf);
DBUG_RETURN(1);
@@ -6974,6 +6985,7 @@ int read_command(struct st_command** command_ptr)
if (opt_result_format_version == 1)
convert_to_format_v1(read_command_buf);
+ char *p= read_command_buf;
DBUG_PRINT("info", ("query: '%s'", read_command_buf));
if (*p == '#')
{
@@ -9126,6 +9138,8 @@ int main(int argc, char **argv)
init_win_path_patterns();
#endif
+ read_command_buf= (char*)my_malloc(read_command_buflen= 65536, MYF(MY_FAE));
+
init_dynamic_string(&ds_res, "", 2048, 2048);
init_alloc_root(&require_file_root, 1024, 1024, MYF(0));
@@ -9196,7 +9210,6 @@ int main(int argc, char **argv)
(void *) &opt_connect_timeout);
if (opt_compress)
mysql_options(con->mysql,MYSQL_OPT_COMPRESS,NullS);
- mysql_options(con->mysql, MYSQL_OPT_LOCAL_INFILE, 0);
mysql_options(con->mysql, MYSQL_SET_CHARSET_NAME,
charset_info->csname);
if (opt_charsets_dir)
diff --git a/cmake/build_configurations/mysql_release.cmake b/cmake/build_configurations/mysql_release.cmake
index 78314342426..b82b87b6237 100644
--- a/cmake/build_configurations/mysql_release.cmake
+++ b/cmake/build_configurations/mysql_release.cmake
@@ -96,7 +96,6 @@ IF(FEATURE_SET)
ENDFOREACH()
ENDIF()
-OPTION(ENABLED_LOCAL_INFILE "" ON)
IF(RPM)
SET(WITH_SSL system CACHE STRING "")
SET(WITH_ZLIB system CACHE STRING "")
diff --git a/cmake/ssl.cmake b/cmake/ssl.cmake
index c76e73927c0..6985932d165 100644
--- a/cmake/ssl.cmake
+++ b/cmake/ssl.cmake
@@ -174,15 +174,24 @@ MACRO (MYSQL_CHECK_SSL)
OPENSSL_MAJOR_VERSION "${OPENSSL_VERSION_NUMBER}"
)
INCLUDE(CheckSymbolExists)
+ INCLUDE(CheckCSourceCompiles)
SET(CMAKE_REQUIRED_INCLUDES ${OPENSSL_INCLUDE_DIR})
CHECK_SYMBOL_EXISTS(SHA512_DIGEST_LENGTH "openssl/sha.h"
HAVE_SHA512_DIGEST_LENGTH)
+ CHECK_C_SOURCE_COMPILES("
+ #include <openssl/dh.h>
+ int main()
+ {
+ DH dh;
+ return sizeof(dh.version);
+ }" OLD_OPENSSL_API)
+
SET(OPENSSL_FOUND TRUE)
ELSE()
SET(OPENSSL_FOUND FALSE)
ENDIF()
- IF(OPENSSL_FOUND AND OPENSSL_MAJOR_VERSION STRLESS "101" AND
+ IF(OPENSSL_FOUND AND OLD_OPENSSL_API AND
HAVE_SHA512_DIGEST_LENGTH)
MESSAGE(STATUS "OPENSSL_INCLUDE_DIR = ${OPENSSL_INCLUDE_DIR}")
MESSAGE(STATUS "OPENSSL_LIBRARIES = ${OPENSSL_LIBRARIES}")
diff --git a/cmake/zlib.cmake b/cmake/zlib.cmake
index 4b7faacc466..e269c473f36 100644
--- a/cmake/zlib.cmake
+++ b/cmake/zlib.cmake
@@ -34,11 +34,6 @@ ENDMACRO()
MACRO (MYSQL_CHECK_ZLIB_WITH_COMPRESS)
- # For NDBCLUSTER: Use bundled zlib by default
- IF (NOT WITH_ZLIB)
- SET(WITH_ZLIB "bundled" CACHE STRING "By default use bundled zlib on this platform")
- ENDIF()
-
IF(WITH_ZLIB STREQUAL "bundled")
MYSQL_USE_BUNDLED_ZLIB()
ELSE()
diff --git a/config.h.cmake b/config.h.cmake
index e4efdaaef12..00122d1fa70 100644
--- a/config.h.cmake
+++ b/config.h.cmake
@@ -537,7 +537,11 @@
/*
MySQL features
*/
-#cmakedefine ENABLED_LOCAL_INFILE 1
+#define LOCAL_INFILE_MODE_OFF 0
+#define LOCAL_INFILE_MODE_ON 1
+#define LOCAL_INFILE_MODE_AUTO 2
+#define ENABLED_LOCAL_INFILE LOCAL_INFILE_MODE_@ENABLED_LOCAL_INFILE@
+
#cmakedefine ENABLED_PROFILING 1
#cmakedefine EXTRA_DEBUG 1
#cmakedefine BACKUP_TEST 1
diff --git a/include/my_global.h b/include/my_global.h
index 057c613d64c..8acbb9a4346 100644
--- a/include/my_global.h
+++ b/include/my_global.h
@@ -1090,7 +1090,7 @@ typedef ulong myf; /* Type of MyFlags in my_funcs */
static inline char *dlerror(void)
{
static char win_errormsg[2048];
- FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM,
+ FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM,
0, GetLastError(), 0, win_errormsg, 2048, NULL);
return win_errormsg;
}
diff --git a/include/my_valgrind.h b/include/my_valgrind.h
index 5d08a271d4a..a85e610f049 100644
--- a/include/my_valgrind.h
+++ b/include/my_valgrind.h
@@ -42,7 +42,7 @@ https://github.com/google/sanitizers/wiki/AddressSanitizerManualPoisoning */
# define MEM_CHECK_ADDRESSABLE(a,len) ((void) 0)
# define MEM_CHECK_DEFINED(a,len) ((void) 0)
#else
-# define MEM_UNDEFINED(a,len) ((void) 0)
+# define MEM_UNDEFINED(a,len) ((void) (a), (void) (len))
# define MEM_NOACCESS(a,len) ((void) 0)
# define MEM_CHECK_ADDRESSABLE(a,len) ((void) 0)
# define MEM_CHECK_DEFINED(a,len) ((void) 0)
@@ -51,7 +51,7 @@ https://github.com/google/sanitizers/wiki/AddressSanitizerManualPoisoning */
#ifndef DBUG_OFF
#define TRASH_FILL(A,B,C) do { const size_t trash_tmp= (B); MEM_UNDEFINED(A, trash_tmp); memset(A, C, trash_tmp); } while (0)
#else
-#define TRASH_FILL(A,B,C) do { const size_t trash_tmp __attribute__((unused))= (B); MEM_UNDEFINED(A,trash_tmp); } while (0)
+#define TRASH_FILL(A,B,C) do { MEM_UNDEFINED((A), (B)); } while (0)
#endif
#define TRASH_ALLOC(A,B) do { TRASH_FILL(A,B,0xA5); MEM_UNDEFINED(A,B); } while(0)
#define TRASH_FREE(A,B) do { TRASH_FILL(A,B,0x8F); MEM_NOACCESS(A,B); } while(0)
diff --git a/include/mysql.h b/include/mysql.h
index f088ad668a1..fc814787c22 100644
--- a/include/mysql.h
+++ b/include/mysql.h
@@ -283,7 +283,7 @@ typedef struct st_mysql
/* session-wide random string */
char scramble[SCRAMBLE_LENGTH+1];
- my_bool unused1;
+ my_bool auto_local_infile;
void *unused2, *unused3, *unused4, *unused5;
LIST *stmts; /* list of all statements */
diff --git a/include/mysql.h.pp b/include/mysql.h.pp
index a593f526c6e..f891ad6f2c9 100644
--- a/include/mysql.h.pp
+++ b/include/mysql.h.pp
@@ -354,7 +354,7 @@ typedef struct st_mysql
my_bool free_me;
my_bool reconnect;
char scramble[20 +1];
- my_bool unused1;
+ my_bool auto_local_infile;
void *unused2, *unused3, *unused4, *unused5;
LIST *stmts;
const struct st_mysql_methods *methods;
diff --git a/include/mysql/service_kill_statement.h b/include/mysql/service_kill_statement.h
index 995b21f0a9f..bfb222301eb 100644
--- a/include/mysql/service_kill_statement.h
+++ b/include/mysql/service_kill_statement.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013, Monty Program Ab.
+/* Copyright (c) 2013, 2018, MariaDB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -27,7 +27,7 @@
time-consuming loops, and gracefully abort the operation if it is
non-zero.
- thd_is_killed(thd)
+ thd_killed(thd)
@return 0 - no KILL statement was issued, continue normally
@return 1 - there was a KILL statement, abort the execution.
diff --git a/libmysqld/examples/CMakeLists.txt b/libmysqld/examples/CMakeLists.txt
index d47638ad2f9..1eb07a2adf8 100644
--- a/libmysqld/examples/CMakeLists.txt
+++ b/libmysqld/examples/CMakeLists.txt
@@ -34,7 +34,7 @@ ENDIF(UNIX)
MYSQL_ADD_EXECUTABLE(mysqltest_embedded ../../client/mysqltest.cc
COMPONENT Test)
-TARGET_LINK_LIBRARIES(mysqltest_embedded mysqlserver pcre pcreposix)
+TARGET_LINK_LIBRARIES(mysqltest_embedded mysqlserver pcreposix pcre)
IF(CMAKE_GENERATOR MATCHES "Xcode")
# It does not seem possible to tell Xcode the resulting target might need
diff --git a/mysql-test/disabled.def b/mysql-test/disabled.def
index 17bc921ab66..9292081b45e 100644
--- a/mysql-test/disabled.def
+++ b/mysql-test/disabled.def
@@ -19,3 +19,4 @@ ssl_crl : broken upstream
ssl_crl_clrpath : broken upstream
file_contents : MDEV-6526 these files are not installed anymore
lowercase_fs_on : lower_case_table_names=0 is not an error until 10.1
+partition_open_files_limit : open_files_limit check broken by MDEV-18360
diff --git a/mysql-test/lib/v1/mysql-test-run.pl b/mysql-test/lib/v1/mysql-test-run.pl
index baeb141f18c..973497c90df 100755
--- a/mysql-test/lib/v1/mysql-test-run.pl
+++ b/mysql-test/lib/v1/mysql-test-run.pl
@@ -23,7 +23,7 @@
# Tool used for executing a suite of .test file
#
# See the "MySQL Test framework manual" for more information
-# http://dev.mysql.com/doc/mysqltest/en/index.html
+# https://mariadb.com/kb/en/library/mysqltest/
#
# Please keep the test framework tools identical in all versions!
#
diff --git a/mysql-test/mysql-test-run.pl b/mysql-test/mysql-test-run.pl
index 0ea628f75e0..3ceeb1fab78 100755
--- a/mysql-test/mysql-test-run.pl
+++ b/mysql-test/mysql-test-run.pl
@@ -25,7 +25,7 @@
# Tool used for executing a suite of .test files
#
# See the "MySQL Test framework manual" for more information
-# http://dev.mysql.com/doc/mysqltest/en/index.html
+# https://mariadb.com/kb/en/library/mysqltest/
#
#
##############################################################################
diff --git a/mysql-test/r/auto_increment_ranges_innodb.result b/mysql-test/r/auto_increment_ranges_innodb.result
index fb936ddfd2b..961f8b870ec 100644
--- a/mysql-test/r/auto_increment_ranges_innodb.result
+++ b/mysql-test/r/auto_increment_ranges_innodb.result
@@ -264,3 +264,17 @@ delete from t1 where a=32767;
insert into t1 values(NULL);
ERROR 22003: Out of range value for column 'a' at row 1
drop table t1;
+create table t1 (pk int auto_increment primary key, f varchar(20));
+insert t1 (f) values ('a'), ('b'), ('c'), ('d');
+select null, f into outfile 'load.data' from t1 limit 1;
+load data infile 'load.data' into table t1;
+insert t1 (f) values ('<===');
+select * from t1;
+pk f
+1 a
+2 b
+3 c
+4 d
+5 a
+6 <===
+drop table t1;
diff --git a/mysql-test/r/bigint.result b/mysql-test/r/bigint.result
index b06ec5805a0..760b9c7b205 100644
--- a/mysql-test/r/bigint.result
+++ b/mysql-test/r/bigint.result
@@ -508,3 +508,14 @@ DROP TABLE t1;
SELECT 100 BETWEEN 1 AND 9223372036854775808;
100 BETWEEN 1 AND 9223372036854775808
1
+#
+# MDEV-17724 Wrong result for BETWEEN 0 AND 18446744073709551615
+#
+CREATE TABLE t1 (c1 bigint(20) unsigned NOT NULL);
+INSERT INTO t1 VALUES (0),(101),(255);
+SELECT * FROM t1 WHERE c1 BETWEEN 0 AND 18446744073709551615 ORDER BY c1;
+c1
+0
+101
+255
+DROP TABLE t1;
diff --git a/mysql-test/r/func_group_innodb.result b/mysql-test/r/func_group_innodb.result
index 52d5922df95..17b3c1e797e 100644
--- a/mysql-test/r/func_group_innodb.result
+++ b/mysql-test/r/func_group_innodb.result
@@ -246,4 +246,34 @@ EXPLAIN SELECT MIN(c) FROM t1 GROUP BY b;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range NULL b 263 NULL 3 Using index for group-by
DROP TABLE t1;
+#
+# MDEV-17589: Stack-buffer-overflow with indexed varchar (utf8) field
+#
+set @save_innodb_file_format= @@innodb_file_format;
+set @save_innodb_large_prefix= @@innodb_large_prefix;
+set global innodb_file_format = BARRACUDA;
+set global innodb_large_prefix = ON;
+CREATE TABLE t1 (v1 varchar(1020), v2 varchar(2), v3 varchar(2),
+KEY k1 (v3,v2,v1)) ENGINE=InnoDB CHARACTER SET=utf8 ROW_FORMAT=DYNAMIC;
+INSERT INTO t1 VALUES ('king', 'qu','qu'), ('bad','go','go');
+explain
+SELECT MIN(t1.v1) FROM t1 where t1.v2='qu' and t1.v3='qu';
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Select tables optimized away
+SELECT MIN(t1.v1) FROM t1 where t1.v2='qu' and t1.v3='qu';
+MIN(t1.v1)
+king
+drop table t1;
+CREATE TABLE t1 (v1 varchar(1024) CHARACTER SET utf8, KEY v1 (v1)) ENGINE=InnoDB ROW_FORMAT=DYNAMIC;
+INSERT INTO t1 VALUES ('king'), ('bad');
+explain
+SELECT MIN(x.v1) FROM (SELECT t1.* FROM t1 WHERE t1.v1 >= 'p') x;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL No matching min/max row
+SELECT MIN(x.v1) FROM (SELECT t1.* FROM t1 WHERE t1.v1 >= 'p') x;
+MIN(x.v1)
+NULL
+drop table t1;
+set global innodb_file_format = @save_innodb_file_format;
+set global innodb_large_prefix = @save_innodb_large_prefix;
End of 5.5 tests
diff --git a/mysql-test/r/huge_frm-6224.result b/mysql-test/r/huge_frm-6224.result
index 3772317c04d..0d6dd968295 100644
--- a/mysql-test/r/huge_frm-6224.result
+++ b/mysql-test/r/huge_frm-6224.result
@@ -1 +1,3 @@
+set global max_allowed_packet=1024*1024*10;
ERROR HY000: The definition for table `t1` is too big
+set global max_allowed_packet=default;
diff --git a/mysql-test/r/innodb_ext_key.result b/mysql-test/r/innodb_ext_key.result
index 2b3b98eb26a..600269ba433 100644
--- a/mysql-test/r/innodb_ext_key.result
+++ b/mysql-test/r/innodb_ext_key.result
@@ -1068,5 +1068,100 @@ a
1
drop table t1, t2;
set optimizer_switch=@save_optimizer_switch;
+#
+# MDEV-10360: Extended keys: index properties depend on index order
+#
+create table t0 (a int);
+insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+create table t1 (
+index_id bigint(20) unsigned NOT NULL,
+index_class varchar(265) COLLATE latin1_general_ci DEFAULT NULL ,
+index_object_id int(10) unsigned NOT NULL DEFAULT '0' ,
+index_date_updated int(10) unsigned DEFAULT NULL ,
+PRIMARY KEY (index_id),
+KEY object (index_class(181),index_object_id),
+KEY index_date_updated (index_date_updated)
+) engine=innodb;
+create table t2 (
+index_id bigint(20) unsigned NOT NULL,
+index_class varchar(265) COLLATE latin1_general_ci DEFAULT NULL ,
+index_object_id int(10) unsigned NOT NULL DEFAULT '0' ,
+index_date_updated int(10) unsigned DEFAULT NULL ,
+PRIMARY KEY (index_id),
+KEY index_date_updated (index_date_updated),
+KEY object (index_class(181),index_object_id)
+) engine=innodb;
+insert into t1 select
+@a:=A.a + 10*B.a + 100*C.a,
+concat('val-', @a),
+123456,
+A.a + 10*B.a
+from
+t0 A, t0 B, t0 C;
+insert into t2 select * from t1;
+# This must have the same query plan as the query below it:
+# type=range, key=index_date_updated, key_len=13
+explain
+select * from t1 force index(index_date_updated)
+where index_date_updated= 10 and index_id < 800;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range index_date_updated index_date_updated 13 NULL # Using index condition
+# This used to work from the start:
+explain
+select * from t2 force index(index_date_updated)
+where index_date_updated= 10 and index_id < 800;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 range index_date_updated index_date_updated 13 NULL # Using index condition
+drop table t0,t1,t2;
+#
+# MDEV-11196: Error:Run-Time Check Failure #2 - Stack around the variable 'key_buff'
+# was corrupted, server crashes in opt_sum_query
+set @save_innodb_file_format= @@innodb_file_format;
+set @save_innodb_large_prefix= @@innodb_large_prefix;
+set global innodb_file_format = BARRACUDA;
+set global innodb_large_prefix = ON;
+CREATE TABLE t1 (
+pk INT,
+f1 VARCHAR(3),
+f2 VARCHAR(1024),
+PRIMARY KEY (pk),
+KEY(f2)
+) ENGINE=InnoDB CHARSET utf8 ROW_FORMAT= DYNAMIC;
+INSERT INTO t1 VALUES (1,'foo','abc'),(2,'bar','def');
+SELECT MAX(t2.pk) FROM t1 t2 INNER JOIN t1 t3 ON t2.f1 = t3.f1 WHERE t2.pk <= 4;
+MAX(t2.pk)
+2
+drop table t1;
+CREATE TABLE t1 (
+pk1 INT,
+pk2 INT,
+f1 VARCHAR(3),
+f2 VARCHAR(1021),
+PRIMARY KEY (pk1,pk2),
+KEY(f2)
+) ENGINE=InnoDB CHARSET utf8 ROW_FORMAT= DYNAMIC;
+INSERT INTO t1 VALUES (1,2,'2','abc'),(2,3,'3','def');
+explain
+select * from t1 force index(f2) where pk1 <= 5 and pk2 <=5 and f2 = 'abc' and f1 <= '3';
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range f2 f2 3070 NULL 1 Using index condition; Using where
+drop table t1;
+CREATE TABLE t1 (
+f2 INT,
+pk2 INT,
+f1 VARCHAR(3),
+pk1 VARCHAR(1000),
+PRIMARY KEY (pk1,pk2),
+KEY k1(pk1,f2)
+) ENGINE=InnoDB CHARSET utf8 ROW_FORMAT= DYNAMIC;
+INSERT INTO t1 VALUES (1,2,'2','abc'),(2,3,'3','def');
+explain
+select * from t1 force index(k1) where f2 <= 5 and pk2 <=5 and pk1 = 'abc' and f1 <= '3';
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range k1 k1 3011 NULL 1 Using index condition; Using where
+drop table t1;
+set optimizer_switch=@save_ext_key_optimizer_switch;
+set global innodb_file_format = @save_innodb_file_format;
+set global innodb_large_prefix = @save_innodb_large_prefix;
set optimizer_switch=@save_ext_key_optimizer_switch;
SET SESSION STORAGE_ENGINE=DEFAULT;
diff --git a/mysql-test/r/mysql.result b/mysql-test/r/mysql.result
index 8a24128daa2..ffa5d020153 100644
--- a/mysql-test/r/mysql.result
+++ b/mysql-test/r/mysql.result
@@ -587,3 +587,29 @@ a
2
drop table "a1\""b1";
set sql_mode=default;
+create table t1 (a text);
+select count(*) from t1;
+count(*)
+41
+truncate table t1;
+select count(*) from t1;
+count(*)
+41
+truncate table t1;
+select count(*) from t1;
+count(*)
+0
+truncate table t1;
+select count(*) from t1;
+count(*)
+0
+truncate table t1;
+select count(*) from t1;
+count(*)
+41
+truncate table t1;
+select count(*) from t1;
+count(*)
+0
+truncate table t1;
+drop table t1;
diff --git a/mysql-test/r/mysqldump.result b/mysql-test/r/mysqldump.result
index efcfec4ee7a..39e78362a92 100644
--- a/mysql-test/r/mysqldump.result
+++ b/mysql-test/r/mysqldump.result
@@ -4317,12 +4317,12 @@ second ee1 root@localhost UTC ONE TIME 2035-12-31 20:01:23 NULL NULL NULL NULL E
show create event ee1;
Event sql_mode time_zone Create Event character_set_client collation_connection Database Collation
ee1 UTC CREATE DEFINER=`root`@`localhost` EVENT `ee1` ON SCHEDULE AT '2035-12-31 20:01:23' ON COMPLETION NOT PRESERVE ENABLE DO set @a=5 latin1 latin1_swedish_ci latin1_swedish_ci
-create event ee2 on schedule at '2018-12-31 21:01:23' do set @a=5;
+create event ee2 on schedule at '2030-12-31 21:01:22' do set @a=5;
create event ee3 on schedule at '2030-12-31 22:01:23' do set @a=5;
show events;
Db Name Definer Time zone Type Execute at Interval value Interval field Starts Ends Status Originator character_set_client collation_connection Database Collation
second ee1 root@localhost UTC ONE TIME 2035-12-31 20:01:23 NULL NULL NULL NULL ENABLED 1 latin1 latin1_swedish_ci latin1_swedish_ci
-second ee2 root@localhost UTC ONE TIME 2018-12-31 21:01:23 NULL NULL NULL NULL ENABLED 1 latin1 latin1_swedish_ci latin1_swedish_ci
+second ee2 root@localhost UTC ONE TIME 2030-12-31 21:01:22 NULL NULL NULL NULL ENABLED 1 latin1 latin1_swedish_ci latin1_swedish_ci
second ee3 root@localhost UTC ONE TIME 2030-12-31 22:01:23 NULL NULL NULL NULL ENABLED 1 latin1 latin1_swedish_ci latin1_swedish_ci
drop database second;
create database third;
@@ -4330,7 +4330,7 @@ use third;
show events;
Db Name Definer Time zone Type Execute at Interval value Interval field Starts Ends Status Originator character_set_client collation_connection Database Collation
third ee1 root@localhost UTC ONE TIME 2035-12-31 20:01:23 NULL NULL NULL NULL ENABLED 1 latin1 latin1_swedish_ci latin1_swedish_ci
-third ee2 root@localhost UTC ONE TIME 2018-12-31 21:01:23 NULL NULL NULL NULL ENABLED 1 latin1 latin1_swedish_ci latin1_swedish_ci
+third ee2 root@localhost UTC ONE TIME 2030-12-31 21:01:22 NULL NULL NULL NULL ENABLED 1 latin1 latin1_swedish_ci latin1_swedish_ci
third ee3 root@localhost UTC ONE TIME 2030-12-31 22:01:23 NULL NULL NULL NULL ENABLED 1 latin1 latin1_swedish_ci latin1_swedish_ci
drop database third;
set time_zone = 'SYSTEM';
diff --git a/mysql-test/r/partition.result b/mysql-test/r/partition.result
index c6669176b3d..6732782c5f7 100644
--- a/mysql-test/r/partition.result
+++ b/mysql-test/r/partition.result
@@ -2645,3 +2645,103 @@ Warnings:
Note 1517 Duplicate partition name p2
DEALLOCATE PREPARE stmt;
DROP TABLE t1;
+#
+# MDEV-17032: Estimates are higher for partitions of a table with @@use_stat_tables= PREFERABLY
+#
+create table t0(a int);
+insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+create table t1(a int);
+insert into t1 select A.a + B.a* 10 + C.a * 100 from t0 A, t0 B, t0 C;
+create table t2 (
+part_key int,
+a int,
+b int
+) partition by list(part_key) (
+partition p0 values in (0),
+partition p1 values in (1),
+partition p2 values in (2),
+partition p3 values in (3),
+partition p4 values in (4)
+);
+insert into t2
+select mod(a,5), a/100, mod(a,5) from t1;
+set @save_use_stat_tables= @@use_stat_tables;
+set @save_optimizer_use_condition_selectivity=@@optimizer_use_condition_selectivity;
+#
+# Tests using stats provided by the storage engine
+#
+explain extended select * from t2 where part_key=1;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t2 ALL NULL NULL NULL NULL 200 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t2`.`part_key` AS `part_key`,`test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b` from `test`.`t2` where (`test`.`t2`.`part_key` = 1)
+explain partitions select * from t2 where part_key=1;
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 p1 ALL NULL NULL NULL NULL 200 Using where
+explain extended select * from t2 where part_key in (1,2);
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t2 ALL NULL NULL NULL NULL 400 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t2`.`part_key` AS `part_key`,`test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b` from `test`.`t2` where (`test`.`t2`.`part_key` in (1,2))
+explain partitions select * from t2 where part_key in (1,2);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 p1,p2 ALL NULL NULL NULL NULL 400 Using where
+explain extended select * from t2 where b=5;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t2 ALL NULL NULL NULL NULL 1000 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t2`.`part_key` AS `part_key`,`test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b` from `test`.`t2` where (`test`.`t2`.`b` = 5)
+explain partitions select * from t2 where b=5;
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 p0,p1,p2,p3,p4 ALL NULL NULL NULL NULL 1000 Using where
+explain extended select * from t2 partition(p0) where b=1;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t2 ALL NULL NULL NULL NULL 200 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t2`.`part_key` AS `part_key`,`test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b` from `test`.`t2` PARTITION (`p0`) where (`test`.`t2`.`b` = 1)
+set @save_histogram_size=@@histogram_size;
+set @@histogram_size=100;
+set @@use_stat_tables= PREFERABLY;
+set @@optimizer_use_condition_selectivity=4;
+analyze table t2;
+Table Op Msg_type Msg_text
+test.t2 analyze status Engine-independent statistics collected
+test.t2 analyze status OK
+#
+# Tests using EITS
+#
+# filtered should be 100
+explain extended select * from t2 where part_key=1;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t2 ALL NULL NULL NULL NULL 200 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t2`.`part_key` AS `part_key`,`test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b` from `test`.`t2` where (`test`.`t2`.`part_key` = 1)
+explain partitions select * from t2 where part_key=1;
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 p1 ALL NULL NULL NULL NULL 200 Using where
+# filtered should be 100
+explain extended select * from t2 where part_key in (1,2);
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t2 ALL NULL NULL NULL NULL 400 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t2`.`part_key` AS `part_key`,`test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b` from `test`.`t2` where (`test`.`t2`.`part_key` in (1,2))
+explain partitions select * from t2 where part_key in (1,2);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 p1,p2 ALL NULL NULL NULL NULL 400 Using where
+explain extended select * from t2 where b=5;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t2 ALL NULL NULL NULL NULL 1000 19.80 Using where
+Warnings:
+Note 1003 select `test`.`t2`.`part_key` AS `part_key`,`test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b` from `test`.`t2` where (`test`.`t2`.`b` = 5)
+explain partitions select * from t2 where b=5;
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 p0,p1,p2,p3,p4 ALL NULL NULL NULL NULL 1000 Using where
+explain extended select * from t2 partition(p0) where b=1;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t2 ALL NULL NULL NULL NULL 200 19.80 Using where
+Warnings:
+Note 1003 select `test`.`t2`.`part_key` AS `part_key`,`test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b` from `test`.`t2` PARTITION (`p0`) where (`test`.`t2`.`b` = 1)
+set @@use_stat_tables= @save_use_stat_tables;
+set @@optimizer_use_condition_selectivity= @save_optimizer_use_condition_selectivity;
+set @@histogram_size= @save_histogram_size;
+drop table t0,t1,t2;
diff --git a/mysql-test/r/partition_innodb.result b/mysql-test/r/partition_innodb.result
index f863ec5522a..b1405be3f12 100644
--- a/mysql-test/r/partition_innodb.result
+++ b/mysql-test/r/partition_innodb.result
@@ -890,3 +890,29 @@ ERROR HY000: Table definition has changed, please retry transaction
SELECT b FROM t1 WHERE b = 0;
ERROR HY000: Table definition has changed, please retry transaction
DROP TABLE t1;
+#
+# MDEV-11167: InnoDB: Warning: using a partial-field key prefix
+# in search, results in assertion failure or "Can't find record" error
+#
+CREATE TABLE t1 (a INT) ENGINE=InnoDB;
+CREATE TABLE t2 (b INT, c INT, KEY(b)) ENGINE=InnoDB PARTITION BY HASH(c) PARTITIONS 2;
+CREATE ALGORITHM = MERGE VIEW v AS SELECT a, b FROM t1 STRAIGHT_JOIN t2 WHERE b = 'foo' WITH CHECK OPTION;
+INSERT INTO t1 VALUES (1),(2);
+INSERT IGNORE INTO t2 VALUES (2,2),('three',3),(4,4);
+Warnings:
+Warning 1366 Incorrect integer value: 'three' for column 'b' at row 2
+UPDATE v SET a = NULL;
+Warnings:
+Warning 1292 Truncated incorrect DOUBLE value: 'foo'
+DROP view v;
+DROP TABLE t1, t2;
+SET @save_isp=@@innodb_stats_persistent;
+SET GLOBAL innodb_stats_persistent= ON;
+CREATE TABLE t (f1 INT, f2 INT, KEY(f2)) ENGINE=InnoDB PARTITION BY HASH (f1) PARTITIONS 2;
+INSERT IGNORE INTO t VALUES (NULL,0),(NULL,0),(0,21),(4,0),(1,8),(5,66);
+CREATE ALGORITHM=MERGE VIEW v AS SELECT t1.* FROM t t1 JOIN t t2 WHERE t1.f1 < t2.f2 WITH LOCAL CHECK OPTION;
+UPDATE v SET f2 = NULL;
+ERROR HY000: CHECK OPTION failed 'test.v'
+SET GLOBAL innodb_stats_persistent= @save_isp;
+DROP view v;
+DROP TABLE t;
diff --git a/mysql-test/r/range_innodb.result b/mysql-test/r/range_innodb.result
index 794e6c7b3cc..6572b248911 100644
--- a/mysql-test/r/range_innodb.result
+++ b/mysql-test/r/range_innodb.result
@@ -37,3 +37,45 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t0 ALL NULL NULL NULL NULL 10
1 SIMPLE t2 range a,b b 5 NULL 201 Using where; Using join buffer (flat, BNL join)
drop table t0,t1,t2;
+CREATE TABLE t1 (
+pk INT PRIMARY KEY, f1 INT, f2 CHAR(1), f3 CHAR(1),
+KEY(f1), KEY(f2)
+) ENGINE=InnoDB;
+INSERT INTO t1 VALUES
+(1,4,'v',NULL),(2,6,'v',NULL),(3,7,'c',NULL),(4,1,'e',NULL),(5,0,'x',NULL),
+(6,7,'i',NULL),(7,7,'e',NULL),(8,1,'p',NULL),(9,7,'s',NULL),(10,1,'j',NULL),
+(11,5,'z',NULL),(12,2,'c',NULL),(13,0,'a',NULL),(14,1,'q',NULL),(15,8,'y',NULL),
+(16,1,'m',NULL),(17,1,'r',NULL),(18,9,'v',NULL),(19,1,'n',NULL);
+CREATE TABLE t2 (f4 INT, f5 CHAR(1)) ENGINE=InnoDB;
+INSERT INTO t2 VALUES (4,'q'),(NULL,'j');
+SELECT * FROM t1 AS t1_1, t1 AS t1_2, t2
+WHERE f5 = t1_2.f2 AND ( t1_1.f1 = 103 AND t1_1.f2 = 'o' OR t1_1.pk < f4 );
+pk f1 f2 f3 pk f1 f2 f3 f4 f5
+1 4 v NULL 14 1 q NULL 4 q
+2 6 v NULL 14 1 q NULL 4 q
+3 7 c NULL 14 1 q NULL 4 q
+drop table t1,t2;
+#
+# MDEV-14440: Server crash in in handler::ha_external_lock or Assertion `inited==RND'
+# failed in handler::ha_rnd_end upon SELECT from partitioned table
+#
+set @optimizer_switch_save= @@optimizer_switch;
+set optimizer_switch='index_merge_sort_intersection=off';
+create table t0 (a int)engine=innodb;
+insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+create table t1 (
+a int, b int, c int,
+key(a),key(b),key(c)
+)engine=innodb;
+insert into t1
+select A.a+10*B.a, A.a+10*B.a, A.a+10*B.a+100*C.a
+from t0 A, t0 B, t0 C, t0 D where D.a<5;
+set @@global.debug_dbug="+d,ha_index_init_fail";
+explain select * from t1 where a=10 and b=10;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index_merge a,b a,b 5,5 NULL 1 Using intersect(a,b); Using where
+select * from t1 where a=10 and b=10;
+ERROR HY000: Table definition has changed, please retry transaction
+DROP TABLE t0,t1;
+set @@global.debug_dbug="-d";
+set @@optimizer_switch= @optimizer_switch_save;
diff --git a/mysql-test/r/read_only.result b/mysql-test/r/read_only.result
index ee35549eb78..da4926f1a9e 100644
--- a/mysql-test/r/read_only.result
+++ b/mysql-test/r/read_only.result
@@ -163,11 +163,23 @@ flush privileges;
drop database mysqltest_db1;
set global read_only= @start_read_only;
#
+# MDEV-16987 - ALTER DATABASE possible in read-only mode
+#
+GRANT ALTER ON test1.* TO user1@localhost;
+CREATE DATABASE test1;
+SET GLOBAL read_only=1;
+ALTER DATABASE test1 CHARACTER SET utf8;
+ERROR HY000: The MariaDB server is running with the --read-only option so it cannot execute this statement
+SET GLOBAL read_only=0;
+DROP DATABASE test1;
+DROP USER user1@localhost;
+USE test;
+# End of 5.5 tests
+#
# WL#5968 Implement START TRANSACTION READ (WRITE|ONLY);
#
#
# Test interaction with read_only system variable.
-DROP TABLE IF EXISTS t1;
CREATE TABLE t1(a INT);
INSERT INTO t1 VALUES (1), (2);
CREATE USER user1;
@@ -199,3 +211,4 @@ COMMIT;
DROP USER user1;
SET GLOBAL read_only= 0;
DROP TABLE t1;
+# End of 10.0 tests
diff --git a/mysql-test/r/row-checksum-old.result b/mysql-test/r/row-checksum-old.result
index ef523463860..920c5dbe838 100644
--- a/mysql-test/r/row-checksum-old.result
+++ b/mysql-test/r/row-checksum-old.result
@@ -85,3 +85,19 @@ checksum table t1 extended;
Table Checksum
test.t1 4108368782
drop table t1;
+#
+# MDEV-17085: CHECKSUM TABLE EXTENDED does not work correctly
+#
+CREATE TABLE t1 ( c1 int NOT NULL, c2 int NOT NULL, c4 varchar(20), c5 varchar(20), c6 varchar(20), c7 varchar(20), c8 varchar(20), c9 varchar(20), c10 varchar(20), c11 varchar(20), c12 varchar(20), c13 varchar(20), c14 varchar(20), c15 varchar(20), c16 varchar(20), c19 int NOT NULL, c20 int NOT NULL, c21 varchar(20), c22 VARCHAR(20), c23 varchar(20));
+insert into t1 values (5,0,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,0,0,"dog",NULL,NULL);
+# Important is that checksum is different from following
+CHECKSUM TABLE t1 EXTENDED;
+Table Checksum
+test.t1 2514025256
+UPDATE t1 SET c21='cat' WHERE c1=5;
+# Important is that checksum is different from above
+CHECKSUM TABLE t1 EXTENDED;
+Table Checksum
+test.t1 2326430205
+drop table t1;
+# End of 5.5 tests
diff --git a/mysql-test/r/row-checksum.result b/mysql-test/r/row-checksum.result
index fb8a1260a1d..0f8311b703a 100644
--- a/mysql-test/r/row-checksum.result
+++ b/mysql-test/r/row-checksum.result
@@ -85,3 +85,19 @@ checksum table t1 extended;
Table Checksum
test.t1 3885665021
drop table t1;
+#
+# MDEV-17085: CHECKSUM TABLE EXTENDED does not work correctly
+#
+CREATE TABLE t1 ( c1 int NOT NULL, c2 int NOT NULL, c4 varchar(20), c5 varchar(20), c6 varchar(20), c7 varchar(20), c8 varchar(20), c9 varchar(20), c10 varchar(20), c11 varchar(20), c12 varchar(20), c13 varchar(20), c14 varchar(20), c15 varchar(20), c16 varchar(20), c19 int NOT NULL, c20 int NOT NULL, c21 varchar(20), c22 VARCHAR(20), c23 varchar(20));
+insert into t1 values (5,0,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,0,0,"dog",NULL,NULL);
+# Important is that checksum is different from following
+CHECKSUM TABLE t1 EXTENDED;
+Table Checksum
+test.t1 2514025256
+UPDATE t1 SET c21='cat' WHERE c1=5;
+# Important is that checksum is different from above
+CHECKSUM TABLE t1 EXTENDED;
+Table Checksum
+test.t1 2326430205
+drop table t1;
+# End of 5.5 tests
diff --git a/mysql-test/r/stat_tables.result b/mysql-test/r/stat_tables.result
index cd78d44462e..6266526a0d1 100644
--- a/mysql-test/r/stat_tables.result
+++ b/mysql-test/r/stat_tables.result
@@ -590,3 +590,22 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE user ALL NULL NULL NULL NULL 4 Using join buffer (flat, BNL join)
set @@optimizer_use_condition_selectivity=@save_optimizer_use_condition_selectivity;
set use_stat_tables=@save_use_stat_tables;
+#
+# MDEV-17734: AddressSanitizer: use-after-poison in create_key_parts_for_pseudo_indexes
+#
+set @@use_stat_tables= PREFERABLY;
+set @save_optimizer_use_condition_selectivity= @@optimizer_use_condition_selectivity;
+set @@optimizer_use_condition_selectivity=4;
+set @save_use_stat_tables= @@use_stat_tables;
+create table t1 (a int, b int);
+insert into t1(a,b) values (1,2),(1,3),(1,4),(1,5),(2,6),(2,7),(3,8),(3,9),(3,9),(4,10);
+analyze table t1 persistent for columns (a) indexes ();
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+select * from t1 where a=1 and b=3;
+a b
+1 3
+set @@optimizer_use_condition_selectivity= @save_optimizer_use_condition_selectivity;
+set use_stat_tables=@save_use_stat_tables;
+drop table t1;
diff --git a/mysql-test/r/stat_tables_innodb.result b/mysql-test/r/stat_tables_innodb.result
index 02a07fa8bbb..cc5354bf2d7 100644
--- a/mysql-test/r/stat_tables_innodb.result
+++ b/mysql-test/r/stat_tables_innodb.result
@@ -617,5 +617,24 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE user ALL NULL NULL NULL NULL 4 Using join buffer (flat, BNL join)
set @@optimizer_use_condition_selectivity=@save_optimizer_use_condition_selectivity;
set use_stat_tables=@save_use_stat_tables;
+#
+# MDEV-17734: AddressSanitizer: use-after-poison in create_key_parts_for_pseudo_indexes
+#
+set @@use_stat_tables= PREFERABLY;
+set @save_optimizer_use_condition_selectivity= @@optimizer_use_condition_selectivity;
+set @@optimizer_use_condition_selectivity=4;
+set @save_use_stat_tables= @@use_stat_tables;
+create table t1 (a int, b int);
+insert into t1(a,b) values (1,2),(1,3),(1,4),(1,5),(2,6),(2,7),(3,8),(3,9),(3,9),(4,10);
+analyze table t1 persistent for columns (a) indexes ();
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+select * from t1 where a=1 and b=3;
+a b
+1 3
+set @@optimizer_use_condition_selectivity= @save_optimizer_use_condition_selectivity;
+set use_stat_tables=@save_use_stat_tables;
+drop table t1;
set optimizer_switch=@save_optimizer_switch_for_stat_tables_test;
SET SESSION STORAGE_ENGINE=DEFAULT;
diff --git a/mysql-test/r/subselect2.result b/mysql-test/r/subselect2.result
index 401d333ccfb..420ce6d537e 100644
--- a/mysql-test/r/subselect2.result
+++ b/mysql-test/r/subselect2.result
@@ -394,3 +394,25 @@ select null in (select a from t1 where a < out3.a union select a from t2 where
(select a from t3) +1 < out3.a+1) from t3 out3;
ERROR 21000: Subquery returns more than 1 row
drop table t1, t2, t3;
+CREATE TABLE t1(
+q11 int, q12 int, q13 int, q14 int, q15 int, q16 int, q17 int, q18 int, q19 int,
+q21 int, q22 int, q23 int, q24 int, q25 int, q26 int, q27 int, q28 int, q29 int,
+f1 int
+);
+CREATE TABLE t2(f2 int, f21 int, f3 timestamp, f4 int, f5 int, f6 int);
+INSERT INTO t1 (f1) VALUES (1),(1),(2),(2);
+INSERT INTO t2 VALUES (1,1,"2004-02-29 11:11:11",0,0,0), (2,2,"2004-02-29 11:11:11",0,0,0);
+SELECT f1,
+(SELECT t.f21 from t2 t where max(
+q11+q12+q13+q14+q15+q16+q17+q18+q19+
+q21+q22+q23+q24+q25+q26+q27+q28+q29) = t.f2 UNION
+SELECT t.f3 FROM t2 AS t WHERE t1.f1=t.f2 AND t.f3=MAX(t1.f1) UNION
+SELECT 1 LIMIT 1) AS test
+FROM t1 GROUP BY f1;
+f1 test
+1 1
+2 1
+Warnings:
+Warning 1292 Incorrect datetime value: '1'
+Warning 1292 Incorrect datetime value: '2'
+DROP TABLE t1,t2;
diff --git a/mysql-test/r/subselect_exists2in.result b/mysql-test/r/subselect_exists2in.result
index d47e446fe8f..b6b2f5b476f 100644
--- a/mysql-test/r/subselect_exists2in.result
+++ b/mysql-test/r/subselect_exists2in.result
@@ -330,7 +330,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
3 MATERIALIZED t3 ALL NULL NULL NULL NULL 2 100.00 Using where
Warnings:
Note 1276 Field or reference 'test.t2.b' of SELECT #3 was resolved in SELECT #2
-Note 1003 select (select 1 from dual where (not(((1 is not null) and <in_optimizer>(1,1 in ( <materialize> (select `test`.`t3`.`c` from `test`.`t3` where (`test`.`t3`.`c` is not null) ), <primary_index_lookup>(1 in <temporary table> on distinct_key where ((1 = `<subquery3>`.`c`))))))))) AS `( SELECT b FROM t2 WHERE NOT EXISTS ( SELECT c FROM t3 WHERE c = b ) )` from `test`.`t1`
+Note 1003 select (select 1 from dual where (not(((1 is not null) and <in_optimizer>(1,1 in (<primary_index_lookup>(1 in <temporary table> on distinct_key where ((1 = `<subquery3>`.`c`))))))))) AS `( SELECT b FROM t2 WHERE NOT EXISTS ( SELECT c FROM t3 WHERE c = b ) )` from `test`.`t1`
SELECT ( SELECT b FROM t2 WHERE NOT EXISTS ( SELECT c FROM t3 WHERE c = b ) ) FROM t1;
( SELECT b FROM t2 WHERE NOT EXISTS ( SELECT c FROM t3 WHERE c = b ) )
1
@@ -344,7 +344,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
3 MATERIALIZED t3 ALL NULL NULL NULL NULL 2 100.00 Using where
Warnings:
Note 1276 Field or reference 'test.t2.b' of SELECT #3 was resolved in SELECT #2
-Note 1003 select (select 1 from dual where (not(((1 is not null) and <in_optimizer>(1,1 in ( <materialize> (select `test`.`t3`.`c` from `test`.`t3` where (`test`.`t3`.`c` is not null) ), <primary_index_lookup>(1 in <temporary table> on distinct_key where ((1 = `<subquery3>`.`c`))))))))) AS `( SELECT b FROM t2 WHERE NOT EXISTS ( SELECT c FROM t3 WHERE c = b ) )` from `test`.`t1`
+Note 1003 select (select 1 from dual where (not(((1 is not null) and <in_optimizer>(1,1 in (<primary_index_lookup>(1 in <temporary table> on distinct_key where ((1 = `<subquery3>`.`c`))))))))) AS `( SELECT b FROM t2 WHERE NOT EXISTS ( SELECT c FROM t3 WHERE c = b ) )` from `test`.`t1`
SELECT ( SELECT b FROM t2 WHERE NOT EXISTS ( SELECT c FROM t3 WHERE c = b ) ) FROM t1;
( SELECT b FROM t2 WHERE NOT EXISTS ( SELECT c FROM t3 WHERE c = b ) )
1
diff --git a/mysql-test/r/subselect_mat.result b/mysql-test/r/subselect_mat.result
index aa0ac73abd2..7907b86135e 100644
--- a/mysql-test/r/subselect_mat.result
+++ b/mysql-test/r/subselect_mat.result
@@ -2822,3 +2822,19 @@ id select_type table type possible_keys key key_len ref rows Extra
SELECT * FROM t2 WHERE f IN ( SELECT LEFT('foo',0) FROM t1 ORDER BY 1 );
f
DROP TABLE t1, t2;
+#
+# MDEV-18255: Server crashes in Bitmap<64u>::intersect
+#
+create table t1 (v1 varchar(1)) engine=myisam ;
+create table t2 (v1 varchar(1)) engine=myisam ;
+explain
+select 1 from t1 where exists
+(select 1 from t1 where t1.v1 in (select t2.v1 from t2 having t2.v1 < 'j')) ;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
+2 SUBQUERY NULL NULL NULL NULL NULL NULL NULL no matching row in const table
+3 MATERIALIZED NULL NULL NULL NULL NULL NULL NULL no matching row in const table
+select 1 from t1 where exists
+(select 1 from t1 where t1.v1 in (select t2.v1 from t2 having t2.v1 < 'j')) ;
+1
+drop table t1,t2;
diff --git a/mysql-test/r/union.result b/mysql-test/r/union.result
index 5ea0f975a91..9b7a361fdc5 100644
--- a/mysql-test/r/union.result
+++ b/mysql-test/r/union.result
@@ -2049,3 +2049,41 @@ a
1000003.0
1.0
End of 5.5 tests
+#
+# MDEV-13784: query causes seg fault
+#
+CREATE TABLE t1 (`bug_id` int NOT NULL PRIMARY KEY, `product_id` int NOT NULL);
+INSERT INTO t1 VALUES (45199,1184);
+CREATE TABLE t2 (`product_id` int NOT NULL,`userid` int NOT NULL, PRIMARY KEY (`product_id`,`userid`));
+INSERT INTO t2 VALUES (1184,103),(1184,624),(1184,1577),(1184,1582);
+CREATE TABLE t3 (`id` int NOT NULL PRIMARY KEY,`name` varchar(64));
+CREATE TABLE t4 ( `userid` int NOT NULL PRIMARY KEY, `login_name` varchar(255));
+INSERT INTO t4 VALUES (103,'foo'),(624,'foo'),(1577,'foo'),(1582,'foo');
+CREATE TABLE t5 (`id` int NOT NULL PRIMARY KEY, `name` varchar(64));
+explain select
+(
+select login_name from t4 where userId = (
+select userid from t2 where product_id = t1.product_id
+union
+select userid from t2 where product_id = (
+select id from t5 where name = (select name from t3 where id = t1.product_id)) limit 1 )
+) as x from t1 where (t1.bug_id=45199);
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t1 system PRIMARY NULL NULL NULL 1
+2 SUBQUERY t4 eq_ref PRIMARY PRIMARY 4 func 1 Using where
+3 SUBQUERY t2 ref PRIMARY PRIMARY 4 const 3 Using index
+4 UNION t2 ref PRIMARY PRIMARY 4 func 1 Using where; Using index
+5 SUBQUERY NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
+6 SUBQUERY NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
+NULL UNION RESULT <union3,4> ALL NULL NULL NULL NULL NULL
+select
+(
+select login_name from t4 where userId = (
+select userid from t2 where product_id = t1.product_id
+union
+select userid from t2 where product_id = (
+select id from t5 where name = (select name from t3 where id = t1.product_id)) limit 1 )
+) as x from t1 where (t1.bug_id=45199);
+x
+foo
+drop table t1, t2, t3, t4, t5;
diff --git a/mysql-test/r/view.result b/mysql-test/r/view.result
index 4e3146052e9..3088704e911 100644
--- a/mysql-test/r/view.result
+++ b/mysql-test/r/view.result
@@ -4633,7 +4633,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
2 DEPENDENT SUBQUERY t4 ALL NULL NULL NULL NULL 2 100.00 Using where
Warnings:
Note 1276 Field or reference 'test.t1.a' of SELECT #2 was resolved in SELECT #1
-Note 1003 select `test`.`t1`.`a` AS `a`,10 AS `a` from `test`.`t1` where (not(<expr_cache><10,`test`.`t1`.`a`>(<in_optimizer>(10,<exists>(select NULL from `test`.`t4` where ((`test`.`t4`.`a` >= `test`.`t1`.`a`) and trigcond(((<cache>(10) = NULL) or <cache>(isnull(NULL))))) having trigcond(<is_not_null_test>(NULL)))))))
+Note 1003 select `test`.`t1`.`a` AS `a`,10 AS `a` from `test`.`t1` where (not(<expr_cache><10,`test`.`t1`.`a`>(<in_optimizer>(10,<exists>(select NULL from `test`.`t4` where ((`test`.`t4`.`a` >= `test`.`t1`.`a`) and trigcond(((<cache>(10) = NULL) or 1))) having trigcond(<is_not_null_test>(NULL)))))))
SELECT * FROM t1, t2
WHERE t2.a NOT IN (SELECT t3.b FROM t3 RIGHT JOIN t4 ON (t4.a = t3.a)
WHERE t4.a >= t1.a);
@@ -4649,7 +4649,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
2 DEPENDENT SUBQUERY t4 ALL NULL NULL NULL NULL 2 100.00 Using where
Warnings:
Note 1276 Field or reference 'v1.a' of SELECT #2 was resolved in SELECT #1
-Note 1003 select `test`.`t1`.`a` AS `a`,10 AS `a` from `test`.`t1` where (not(<expr_cache><10,`test`.`t1`.`a`>(<in_optimizer>(10,<exists>(select NULL from `test`.`t4` where ((`test`.`t4`.`a` >= `test`.`t1`.`a`) and trigcond(((<cache>(10) = NULL) or <cache>(isnull(NULL))))) having trigcond(<is_not_null_test>(NULL)))))))
+Note 1003 select `test`.`t1`.`a` AS `a`,10 AS `a` from `test`.`t1` where (not(<expr_cache><10,`test`.`t1`.`a`>(<in_optimizer>(10,<exists>(select NULL from `test`.`t4` where ((`test`.`t4`.`a` >= `test`.`t1`.`a`) and trigcond(((<cache>(10) = NULL) or 1))) having trigcond(<is_not_null_test>(NULL)))))))
SELECT * FROM v1, t2
WHERE t2.a NOT IN (SELECT t3.b FROM t3 RIGHT JOIN t4 ON (t4.a = t3.a)
WHERE t4.a >= v1.a);
diff --git a/mysql-test/suite/engines/iuds/r/insert_number.result b/mysql-test/suite/engines/iuds/r/insert_number.result
index ab56b82807c..9f937b33364 100644
--- a/mysql-test/suite/engines/iuds/r/insert_number.result
+++ b/mysql-test/suite/engines/iuds/r/insert_number.result
@@ -31565,8 +31565,33 @@ c1 c2 c3 c4 c5 c6 c7
0 -9223372036854775808 1 2 3 4 5
SELECT * FROM t2 WHERE c1 BETWEEN 0 AND 18446744073709551615 ORDER BY c1,c6;
c1 c2 c3 c4 c5 c6 c7
+0 NULL 5 6 NULL 0 NULL
+0 -9223372036854775808 1 2 3 4 5
+0 0 17 18 19 20 21
+0 124 22 23 24 25 26
+0 124 27 28 29 30 31
+0 -9223372036854775808 31 32 33 34 35
+0 0 32 32 34 35 36
+101 0 37 38 39 40 41
+101 -102 103 104 105 106 107
+102 -109 110 111 112 113 114
+103 -109 110 111 112 113 114
+105 NULL 102 103 104 105 106
+108 -109 110 111 112 101 114
+108 -109 110 111 112 102 114
+108 -109 110 111 112 113 114
+115 -116 117 118 119 120 121
+122 -123 124 125 126 127 128
+255 -2147483648 6 7 8 9 10
+65535 -8388608 11 12 13 14 15
+16777215 -32768 16 17 18 19 20
+4294967295 -128 21 22 23 24 25
+18446744073709551615 9223372036854775807 26 27 28 29 30
+18446744073709551615 9223372036854775807 36 37 38 39 40
SELECT * FROM t2 WHERE c1 BETWEEN 0 AND 18446744073709551615 ORDER BY c1,c6 LIMIT 2;
c1 c2 c3 c4 c5 c6 c7
+0 NULL 5 6 NULL 0 NULL
+0 -9223372036854775808 1 2 3 4 5
SELECT * FROM t2 WHERE c1 IN (0,18446744073709551615) ORDER BY c1,c6;
c1 c2 c3 c4 c5 c6 c7
0 NULL 5 6 NULL 0 NULL
@@ -31726,8 +31751,33 @@ c1 c2 c3 c4 c5 c6 c7
0 -9223372036854775808 31 32 33 34 35
SELECT * FROM t2 WHERE c1 BETWEEN 0 AND 18446744073709551615 ORDER BY c1,c6 DESC;
c1 c2 c3 c4 c5 c6 c7
+0 0 32 32 34 35 36
+0 -9223372036854775808 31 32 33 34 35
+0 124 27 28 29 30 31
+0 124 22 23 24 25 26
+0 0 17 18 19 20 21
+0 -9223372036854775808 1 2 3 4 5
+0 NULL 5 6 NULL 0 NULL
+101 -102 103 104 105 106 107
+101 0 37 38 39 40 41
+102 -109 110 111 112 113 114
+103 -109 110 111 112 113 114
+105 NULL 102 103 104 105 106
+108 -109 110 111 112 113 114
+108 -109 110 111 112 102 114
+108 -109 110 111 112 101 114
+115 -116 117 118 119 120 121
+122 -123 124 125 126 127 128
+255 -2147483648 6 7 8 9 10
+65535 -8388608 11 12 13 14 15
+16777215 -32768 16 17 18 19 20
+4294967295 -128 21 22 23 24 25
+18446744073709551615 9223372036854775807 36 37 38 39 40
+18446744073709551615 9223372036854775807 26 27 28 29 30
SELECT * FROM t2 WHERE c1 BETWEEN 0 AND 18446744073709551615 ORDER BY c1,c6 DESC LIMIT 2;
c1 c2 c3 c4 c5 c6 c7
+0 0 32 32 34 35 36
+0 -9223372036854775808 31 32 33 34 35
SELECT * FROM t2 WHERE c1 IN (0,18446744073709551615) ORDER BY c1,c6 DESC;
c1 c2 c3 c4 c5 c6 c7
0 0 32 32 34 35 36
@@ -31993,8 +32043,33 @@ c1 c2 c3 c4 c5 c6 c7
18446744073709551615 9223372036854775807 36 37 38 39 40
SELECT * FROM t2 WHERE c1 BETWEEN 0 AND 18446744073709551615 ORDER BY c1,c6;
c1 c2 c3 c4 c5 c6 c7
+0 NULL 5 6 NULL 0 NULL
+0 -9223372036854775808 1 2 3 4 5
+0 0 17 18 19 20 21
+0 124 22 23 24 25 26
+0 124 27 28 29 30 31
+0 -9223372036854775808 31 32 33 34 35
+0 0 32 32 34 35 36
+101 0 37 38 39 40 41
+101 -102 103 104 105 106 107
+102 -109 110 111 112 113 114
+103 -109 110 111 112 113 114
+105 NULL 102 103 104 105 106
+108 -109 110 111 112 101 114
+108 -109 110 111 112 102 114
+108 -109 110 111 112 113 114
+115 -116 117 118 119 120 121
+122 -123 124 125 126 127 128
+255 -2147483648 6 7 8 9 10
+65535 -8388608 11 12 13 14 15
+16777215 -32768 16 17 18 19 20
+4294967295 -128 21 22 23 24 25
+18446744073709551615 9223372036854775807 26 27 28 29 30
+18446744073709551615 9223372036854775807 36 37 38 39 40
SELECT * FROM t2 WHERE c1 BETWEEN 0 AND 18446744073709551615 ORDER BY c1,c6 LIMIT 2;
c1 c2 c3 c4 c5 c6 c7
+0 NULL 5 6 NULL 0 NULL
+0 -9223372036854775808 1 2 3 4 5
SELECT * FROM t2 WHERE c1 IN (0,18446744073709551615) ORDER BY c1,c6;
c1 c2 c3 c4 c5 c6 c7
0 NULL 5 6 NULL 0 NULL
@@ -32154,8 +32229,33 @@ c1 c2 c3 c4 c5 c6 c7
18446744073709551615 9223372036854775807 26 27 28 29 30
SELECT * FROM t2 WHERE c1 BETWEEN 0 AND 18446744073709551615 ORDER BY c1,c6 DESC;
c1 c2 c3 c4 c5 c6 c7
+0 0 32 32 34 35 36
+0 -9223372036854775808 31 32 33 34 35
+0 124 27 28 29 30 31
+0 124 22 23 24 25 26
+0 0 17 18 19 20 21
+0 -9223372036854775808 1 2 3 4 5
+0 NULL 5 6 NULL 0 NULL
+101 -102 103 104 105 106 107
+101 0 37 38 39 40 41
+102 -109 110 111 112 113 114
+103 -109 110 111 112 113 114
+105 NULL 102 103 104 105 106
+108 -109 110 111 112 113 114
+108 -109 110 111 112 102 114
+108 -109 110 111 112 101 114
+115 -116 117 118 119 120 121
+122 -123 124 125 126 127 128
+255 -2147483648 6 7 8 9 10
+65535 -8388608 11 12 13 14 15
+16777215 -32768 16 17 18 19 20
+4294967295 -128 21 22 23 24 25
+18446744073709551615 9223372036854775807 36 37 38 39 40
+18446744073709551615 9223372036854775807 26 27 28 29 30
SELECT * FROM t2 WHERE c1 BETWEEN 0 AND 18446744073709551615 ORDER BY c1,c6 DESC LIMIT 2;
c1 c2 c3 c4 c5 c6 c7
+0 0 32 32 34 35 36
+0 -9223372036854775808 31 32 33 34 35
SELECT * FROM t2 WHERE c1 IN (0,18446744073709551615) ORDER BY c1,c6 DESC;
c1 c2 c3 c4 c5 c6 c7
0 0 32 32 34 35 36
diff --git a/mysql-test/suite/engines/iuds/r/update_delete_number.result b/mysql-test/suite/engines/iuds/r/update_delete_number.result
index 15de16ed714..b89dea75d96 100644
--- a/mysql-test/suite/engines/iuds/r/update_delete_number.result
+++ b/mysql-test/suite/engines/iuds/r/update_delete_number.result
@@ -3768,8 +3768,25 @@ c1 c2 c3
-12 18446744073709551615 12
SELECT * FROM t2 WHERE c2 BETWEEN 0 AND 18446744073709551615 ORDER BY c2,c1;
c1 c2 c3
+-4 4 4
+-9 9 9
+0 255 13
+-9223372036854775808 18446744073709551615 12
+-12 18446744073709551615 12
+-11 18446744073709551615 11
+-8 18446744073709551615 8
+-7 18446744073709551615 7
+-6 18446744073709551615 6
+-5 18446744073709551615 5
+-3 18446744073709551615 3
+-2 18446744073709551615 2
+-1 18446744073709551615 1
+50 18446744073709551615 10
+9223372036854775807 18446744073709551615 14
SELECT * FROM t2 WHERE c2 BETWEEN 0 AND 18446744073709551615 ORDER BY c2,c1 DESC LIMIT 2;
c1 c2 c3
+-4 4 4
+-9 9 9
SELECT * FROM t2 WHERE c2 IN(0,18446744073709551615) ORDER BY c2,c1 DESC;
c1 c2 c3
9223372036854775807 18446744073709551615 14
diff --git a/mysql-test/suite/innodb/r/alter_candidate_key.result b/mysql-test/suite/innodb/r/alter_candidate_key.result
new file mode 100644
index 00000000000..b0b8e390c7e
--- /dev/null
+++ b/mysql-test/suite/innodb/r/alter_candidate_key.result
@@ -0,0 +1,107 @@
+CREATE TABLE t1 (f1 INT NOT NULL, f2 INT NOT NULL,
+UNIQUE KEY uidx2(f1,f2),
+UNIQUE KEY uidx1(f2)) ENGINE=InnoDB;
+INSERT INTO t1 VALUES(1, 1);
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `f1` int(11) NOT NULL,
+ `f2` int(11) NOT NULL,
+ UNIQUE KEY `uidx2` (`f1`,`f2`),
+ UNIQUE KEY `uidx1` (`f2`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+SET DEBUG_SYNC = 'innodb_inplace_alter_table_enter
+ SIGNAL conc_dml WAIT_FOR go_ahead';
+ALTER TABLE t1 CHANGE COLUMN f1 f11 INT, ALGORITHM=INPLACE;
+SET DEBUG_SYNC = 'now WAIT_FOR conc_dml';
+DELETE FROM t1;
+SET DEBUG_SYNC = 'now SIGNAL go_ahead';
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `f11` int(11) DEFAULT NULL,
+ `f2` int(11) NOT NULL,
+ UNIQUE KEY `uidx1` (`f2`),
+ UNIQUE KEY `uidx2` (`f11`,`f2`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+CHECK TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 check status OK
+DROP TABLE t1;
+CREATE TABLE t1(f1 INT, f2 INT,
+PRIMARY KEY(f1, f2),
+UNIQUE INDEX uidx2 (f1, f2),
+UNIQUE INDEX uidx1 (f2))ENGINE=InnoDB;
+ALTER TABLE t1 DROP PRIMARY KEY;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `f1` int(11) NOT NULL DEFAULT '0',
+ `f2` int(11) NOT NULL DEFAULT '0',
+ UNIQUE KEY `uidx2` (`f1`,`f2`),
+ UNIQUE KEY `uidx1` (`f2`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+SET DEBUG_SYNC = 'innodb_inplace_alter_table_enter
+ SIGNAL conc_dml WAIT_FOR go_ahead';
+ALTER TABLE t1 CHANGE COLUMN f1 f11 INT, ALGORITHM=INPLACE;
+SET DEBUG_SYNC = 'now WAIT_FOR conc_dml';
+INSERT INTO t1 VALUES(1, 1), (1, 1);
+ERROR 23000: Duplicate entry '1-1' for key 'uidx2'
+SET DEBUG_SYNC = 'now SIGNAL go_ahead';
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `f11` int(11) DEFAULT NULL,
+ `f2` int(11) NOT NULL DEFAULT '0',
+ UNIQUE KEY `uidx1` (`f2`),
+ UNIQUE KEY `uidx2` (`f11`,`f2`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+CHECK TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 check status OK
+DROP TABLE t1;
+SET SQL_MODE= strict_trans_tables;
+CREATE TABLE t1(a INT UNIQUE) ENGINE=InnoDB;
+SET DEBUG_SYNC='row_log_table_apply1_before SIGNAL dml WAIT_FOR dml_done';
+ALTER TABLE t1 MODIFY COLUMN a INT NOT NULL;
+SET DEBUG_SYNC='now WAIT_FOR dml';
+BEGIN;
+INSERT INTO t1 SET a=NULL;
+ROLLBACK;
+set DEBUG_SYNC='now SIGNAL dml_done';
+ERROR 22004: Invalid use of NULL value
+DROP TABLE t1;
+SET DEBUG_SYNC="RESET";
+SET SQL_MODE=DEFAULT;
+CREATE TABLE t1(f1 INT NOT NULL, f2 INT NOT NULL, PRIMARY KEY(f1, f2),
+UNIQUE KEY(f2))ENGINE=InnoDB;
+ALTER TABLE t1 DROP PRIMARY KEY;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `f1` int(11) NOT NULL,
+ `f2` int(11) NOT NULL,
+ UNIQUE KEY `f2` (`f2`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+DROP TABLE t1;
+CREATE TABLE t1(f1 INT NOT NULL, f2 INT NOT NULL,
+UNIQUE KEY(f2), UNIQUE KEY(f2))ENGINE=InnoDB;
+Warnings:
+Note 1831 Duplicate index `f2_2`. This is deprecated and will be disallowed in a future release.
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `f1` int(11) NOT NULL,
+ `f2` int(11) NOT NULL,
+ UNIQUE KEY `f2` (`f2`),
+ UNIQUE KEY `f2_2` (`f2`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+ALTER TABLE t1 DROP INDEX f2, ALGORITHM=INPLACE;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `f1` int(11) NOT NULL,
+ `f2` int(11) NOT NULL,
+ UNIQUE KEY `f2_2` (`f2`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+DROP TABLE t1;
diff --git a/mysql-test/suite/innodb/r/foreign_key.result b/mysql-test/suite/innodb/r/foreign_key.result
index b6462000b46..4e253261f2e 100644
--- a/mysql-test/suite/innodb/r/foreign_key.result
+++ b/mysql-test/suite/innodb/r/foreign_key.result
@@ -49,3 +49,36 @@ INSERT INTO t3 SET a=1;
kill query @id;
ERROR 70100: Query execution was interrupted
DROP TABLE t3,t1;
+#
+# MDEV-18222 InnoDB: Failing assertion: heap->magic_n == MEM_BLOCK_MAGIC_N
+# or ASAN heap-use-after-free in dict_foreign_remove_from_cache upon CHANGE COLUMN
+#
+CREATE TABLE t1 (a INT, UNIQUE(a), KEY(a)) ENGINE=InnoDB;
+ALTER TABLE t1 ADD FOREIGN KEY (a) REFERENCES t1 (a);
+SET SESSION FOREIGN_KEY_CHECKS = OFF;
+ALTER TABLE t1 CHANGE COLUMN a a TIME NOT NULL;
+ALTER TABLE t1 ADD pk INT NOT NULL AUTO_INCREMENT PRIMARY KEY;
+ALTER TABLE t1 CHANGE COLUMN a b TIME;
+SET SESSION FOREIGN_KEY_CHECKS = ON;
+DROP TABLE t1;
+#
+# MDEV-18256 InnoDB: Failing assertion: heap->magic_n == MEM_BLOCK_MAGIC_N
+# upon DROP FOREIGN KEY
+#
+CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB;
+CREATE TABLE t2 (b INT PRIMARY KEY, FOREIGN KEY fk1 (b) REFERENCES t1 (a))
+ENGINE=InnoDB;
+ALTER TABLE t2 DROP FOREIGN KEY fk1, DROP FOREIGN KEY fk1;
+DROP TABLE t2, t1;
+CREATE TABLE t1 (f VARCHAR(256)) ENGINE=InnoDB;
+SET SESSION FOREIGN_KEY_CHECKS = OFF;
+ALTER TABLE t1 ADD FOREIGN KEY (f) REFERENCES non_existing_table (x);
+SET SESSION FOREIGN_KEY_CHECKS = ON;
+ALTER TABLE t1 ADD FULLTEXT INDEX ft1 (f);
+Warnings:
+Warning 124 InnoDB rebuilding table to add column FTS_DOC_ID
+Warning 1088 failed to load FOREIGN KEY constraints
+ALTER TABLE t1 ADD FULLTEXT INDEX ft2 (f);
+Warnings:
+Warning 1088 failed to load FOREIGN KEY constraints
+DROP TABLE t1;
diff --git a/mysql-test/suite/innodb/r/innodb-alter.result b/mysql-test/suite/innodb/r/innodb-alter.result
index bd82cc8a764..57638a84517 100644
--- a/mysql-test/suite/innodb/r/innodb-alter.result
+++ b/mysql-test/suite/innodb/r/innodb-alter.result
@@ -714,6 +714,7 @@ t2 CREATE TABLE `t2` (
CONSTRAINT `t2_ibfk_1` FOREIGN KEY (`c2`) REFERENCES `t1` (`c1`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1
ALTER TABLE t1 CHANGE COLUMN c1 C1 INT;
+ALTER TABLE t2 CHANGE COLUMN c2 C2 INT;
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
@@ -723,24 +724,168 @@ t1 CREATE TABLE `t1` (
SHOW CREATE TABLE t2;
Table Create Table
t2 CREATE TABLE `t2` (
- `c2` int(11) NOT NULL,
- KEY `c2` (`c2`),
+ `C2` int(11) DEFAULT NULL,
+ KEY `c2` (`C2`),
CONSTRAINT `t2_ibfk_1` FOREIGN KEY (`c2`) REFERENCES `t1` (`c1`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1
+ALTER TABLE t1 CHANGE COLUMN C1 c5 INT;
+ALTER TABLE t2 CHANGE COLUMN C2 c6 INT;
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
- `C1` int(11) NOT NULL DEFAULT '0',
- PRIMARY KEY (`C1`)
+ `c5` int(11) NOT NULL DEFAULT '0',
+ PRIMARY KEY (`c5`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1
SHOW CREATE TABLE t2;
Table Create Table
t2 CREATE TABLE `t2` (
- `c2` int(11) NOT NULL,
- KEY `c2` (`c2`),
- CONSTRAINT `t2_ibfk_1` FOREIGN KEY (`c2`) REFERENCES `t1` (`c1`)
+ `c6` int(11) DEFAULT NULL,
+ KEY `c2` (`c6`),
+ CONSTRAINT `t2_ibfk_1` FOREIGN KEY (`c6`) REFERENCES `t1` (`c5`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+SELECT C.NAME FROM INFORMATION_SCHEMA.INNODB_SYS_COLUMNS C INNER JOIN
+INFORMATION_SCHEMA.INNODB_SYS_TABLES T ON C.TABLE_ID=T.TABLE_ID
+WHERE T.NAME='test/t1';
+NAME
+c5
+SELECT F.NAME FROM INFORMATION_SCHEMA.INNODB_SYS_FIELDS F INNER JOIN
+INFORMATION_SCHEMA.INNODB_SYS_INDEXES I ON F.INDEX_ID=I.INDEX_ID INNER JOIN
+INFORMATION_SCHEMA.INNODB_SYS_TABLES T ON I.TABLE_ID=T.TABLE_ID
+WHERE T.NAME='test/t1' AND I.NAME='PRIMARY';
+NAME
+c5
+SELECT C.REF_COL_NAME, C.FOR_COL_NAME FROM INFORMATION_SCHEMA.INNODB_SYS_FOREIGN_COLS C INNER JOIN
+INFORMATION_SCHEMA.INNODB_SYS_FOREIGN F ON C.ID=F.ID
+WHERE F.FOR_NAME='test/t2';
+REF_COL_NAME FOR_COL_NAME
+c5 c6
+DROP TABLE t2, t1;
+# virtual columns case too
+CREATE TABLE t1 (a INT, b INT GENERATED ALWAYS AS (a) VIRTUAL) ENGINE = InnoDB;
+ALTER TABLE t1 CHANGE COLUMN a A INT;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `A` int(11) DEFAULT NULL,
+ `b` int(11) AS (a) VIRTUAL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+SELECT C.NAME FROM INFORMATION_SCHEMA.INNODB_SYS_COLUMNS C INNER JOIN
+INFORMATION_SCHEMA.INNODB_SYS_TABLES T ON C.TABLE_ID=T.TABLE_ID
+WHERE T.NAME='test/t1';
+NAME
+a
+DROP TABLE t1;
+# different FOREIGN KEY cases
+CREATE TABLE t1 (
+a INT UNIQUE KEY,
+b INT UNIQUE KEY,
+c INT UNIQUE KEY,
+d INT UNIQUE KEY
+) ENGINE=INNODB;
+CREATE TABLE t2 (
+aa INT,
+bb INT,
+cc INT,
+dd INT
+) ENGINE=INNODB;
+INSERT INTO t1 VALUES (1, 1, 1, 1);
+INSERT INTO t2 VALUES (1, 1, 1, 1);
+ALTER TABLE t1 CHANGE a A INT, ALGORITHM=INPLACE;
+ALTER TABLE t1 CHANGE c C INT, ALGORITHM=INPLACE;
+ALTER TABLE t2 CHANGE cc CC INT, ALGORITHM=INPLACE;
+ALTER TABLE t2 CHANGE dd DD INT, ALGORITHM=INPLACE;
+SET foreign_key_checks=0;
+ALTER TABLE t2
+ADD FOREIGN KEY(aa) REFERENCES t1(a),
+ADD FOREIGN KEY(bb) REFERENCES t1(b),
+ADD FOREIGN KEY(cc) REFERENCES t1(c),
+ADD FOREIGN KEY(dd) REFERENCES t1(d),
+ALGORITHM=INPLACE;
+ALTER TABLE t1 CHANGE b B INT, ALGORITHM=INPLACE;
+ALTER TABLE t2 CHANGE aa AA INT, ALGORITHM=INPLACE;
+ALTER TABLE t1 CHANGE d D INT, ALGORITHM=INPLACE;
+ALTER TABLE t2 CHANGE bb BB INT, ALGORITHM=INPLACE;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `A` int(11) DEFAULT NULL,
+ `B` int(11) DEFAULT NULL,
+ `C` int(11) DEFAULT NULL,
+ `D` int(11) DEFAULT NULL,
+ UNIQUE KEY `a` (`A`),
+ UNIQUE KEY `b` (`B`),
+ UNIQUE KEY `c` (`C`),
+ UNIQUE KEY `d` (`D`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+SHOW CREATE TABLE t2;
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `AA` int(11) DEFAULT NULL,
+ `BB` int(11) DEFAULT NULL,
+ `CC` int(11) DEFAULT NULL,
+ `DD` int(11) DEFAULT NULL,
+ KEY `aa` (`AA`),
+ KEY `bb` (`BB`),
+ KEY `CC` (`CC`),
+ KEY `DD` (`DD`),
+ CONSTRAINT `t2_ibfk_1` FOREIGN KEY (`aa`) REFERENCES `t1` (`a`),
+ CONSTRAINT `t2_ibfk_2` FOREIGN KEY (`bb`) REFERENCES `t1` (`b`),
+ CONSTRAINT `t2_ibfk_3` FOREIGN KEY (`cc`) REFERENCES `t1` (`c`),
+ CONSTRAINT `t2_ibfk_4` FOREIGN KEY (`dd`) REFERENCES `t1` (`d`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1
+DELETE FROM t1 WHERE a=1;
+ERROR 23000: Cannot delete or update a parent row: a foreign key constraint fails (`test`.`t2`, CONSTRAINT `t2_ibfk_1` FOREIGN KEY (`aa`) REFERENCES `t1` (`a`))
+DELETE FROM t1 WHERE A=1;
+ERROR 23000: Cannot delete or update a parent row: a foreign key constraint fails (`test`.`t2`, CONSTRAINT `t2_ibfk_1` FOREIGN KEY (`aa`) REFERENCES `t1` (`a`))
+DELETE FROM t1 WHERE b=1;
+ERROR 23000: Cannot delete or update a parent row: a foreign key constraint fails (`test`.`t2`, CONSTRAINT `t2_ibfk_1` FOREIGN KEY (`aa`) REFERENCES `t1` (`a`))
+DELETE FROM t1 WHERE B=1;
+ERROR 23000: Cannot delete or update a parent row: a foreign key constraint fails (`test`.`t2`, CONSTRAINT `t2_ibfk_1` FOREIGN KEY (`aa`) REFERENCES `t1` (`a`))
+DELETE FROM t1 WHERE c=1;
+ERROR 23000: Cannot delete or update a parent row: a foreign key constraint fails (`test`.`t2`, CONSTRAINT `t2_ibfk_1` FOREIGN KEY (`aa`) REFERENCES `t1` (`a`))
+DELETE FROM t1 WHERE C=1;
+ERROR 23000: Cannot delete or update a parent row: a foreign key constraint fails (`test`.`t2`, CONSTRAINT `t2_ibfk_1` FOREIGN KEY (`aa`) REFERENCES `t1` (`a`))
+DELETE FROM t1 WHERE d=1;
+ERROR 23000: Cannot delete or update a parent row: a foreign key constraint fails (`test`.`t2`, CONSTRAINT `t2_ibfk_1` FOREIGN KEY (`aa`) REFERENCES `t1` (`a`))
+DELETE FROM t1 WHERE D=1;
+ERROR 23000: Cannot delete or update a parent row: a foreign key constraint fails (`test`.`t2`, CONSTRAINT `t2_ibfk_1` FOREIGN KEY (`aa`) REFERENCES `t1` (`a`))
DROP TABLE t2, t1;
+# virtual columns case too
+CREATE TABLE t1 (a INT, b INT GENERATED ALWAYS AS (a) VIRTUAL) ENGINE = InnoDB;
+ALTER TABLE t1 CHANGE COLUMN a A INT;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `A` int(11) DEFAULT NULL,
+ `b` int(11) AS (a) VIRTUAL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+SELECT C.NAME FROM INFORMATION_SCHEMA.INNODB_SYS_COLUMNS C INNER JOIN
+INFORMATION_SCHEMA.INNODB_SYS_TABLES T ON C.TABLE_ID=T.TABLE_ID
+WHERE T.NAME='test/t1';
+NAME
+a
+DROP TABLE t1;
+# and an MDEV-18041 regression related to indexes prefixes
+create table `test` (
+`test_old` varchar(255) NOT NULL,
+`other` varchar(255) NOT NULL,
+PRIMARY KEY (`test_old`,`other`),
+UNIQUE KEY uk (`test_old`(100), `other`)
+) ENGINE=InnoDB;
+select name, pos from information_schema.innodb_SYS_FIELDS where name in ('test_old', 'other', 'test_new');
+name pos
+test_old 0
+other 1
+test_old 0
+other 1
+alter table `test` CHANGE COLUMN `test_old` `test_new` varchar(255) NOT NULL;
+select name, pos from information_schema.innodb_SYS_FIELDS where name in ('test_old', 'other', 'test_new');
+name pos
+test_new 0
+other 1
+test_new 0
+other 1
+drop table `test`;
#
# BUG 20029625 - HANDLE_FATAL_SIGNAL (SIG=11) IN
# DICT_MEM_TABLE_COL_RENAME_LOW
diff --git a/mysql-test/suite/innodb/r/innodb-index.result b/mysql-test/suite/innodb/r/innodb-index.result
index 3d5a0f840c1..681b07249b6 100644
--- a/mysql-test/suite/innodb/r/innodb-index.result
+++ b/mysql-test/suite/innodb/r/innodb-index.result
@@ -1180,3 +1180,36 @@ t2c CREATE TABLE `t2c` (
KEY `t2a` (`a`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1
DROP TABLE t1,t2,t2c,t2i;
+SET @save_format = @@GLOBAL.innodb_file_format;
+SET @save_prefix = @@GLOBAL.innodb_large_prefix;
+SET GLOBAL innodb_file_format=barracuda;
+SET GLOBAL innodb_large_prefix=ON;
+CREATE TABLE t1 (c VARCHAR(1024),
+c1 CHAR(255) NOT NULL,c2 CHAR(255) NOT NULL,c3 CHAR(255) NOT NULL,
+c4 CHAR(255) NOT NULL,c5 CHAR(255) NOT NULL,c6 CHAR(255) NOT NULL,
+c7 CHAR(255) NOT NULL,c8 CHAR(255) NOT NULL,c9 CHAR(255) NOT NULL,
+ca CHAR(255) NOT NULL,cb CHAR(255) NOT NULL,cc CHAR(255) NOT NULL,
+cd CHAR(255) NOT NULL,ce CHAR(255) NOT NULL,cf CHAR(255) NOT NULL,
+d0 CHAR(255) NOT NULL,d1 CHAR(255) NOT NULL,d2 CHAR(255) NOT NULL,
+d3 CHAR(255) NOT NULL,d4 CHAR(255) NOT NULL,d5 CHAR(255) NOT NULL,
+d6 CHAR(255) NOT NULL,d7 CHAR(255) NOT NULL,d8 CHAR(255) NOT NULL,
+d9 CHAR(255) NOT NULL,da CHAR(255) NOT NULL,db CHAR(255) NOT NULL,
+dc CHAR(255) NOT NULL,dd CHAR(255) NOT NULL,de CHAR(255) NOT NULL,
+UNIQUE KEY(c))
+ENGINE=InnoDB ROW_FORMAT=DYNAMIC;
+INSERT INTO t1 VALUES
+(repeat('a',999),'','','','','','','','','','','','','','','','','','','','','','','','','','','','','',''),
+(CONCAT(repeat('a',999),'b'),'','','','','','','','','','','','','','','','','','','','','','','','','','','','','','');
+ALTER TABLE t1 ROW_FORMAT=REDUNDANT, algorithm=inplace;
+ERROR HY000: Index column size too large. The maximum column size is 767 bytes.
+ALTER TABLE t1 ROW_FORMAT=REDUNDANT, algorithm=copy;
+ERROR HY000: Index column size too large. The maximum column size is 767 bytes.
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+2
+CHECK TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 check status OK
+DROP TABLE t1;
+SET GLOBAL innodb_file_format=@save_format;
+SET GLOBAL innodb_large_prefix=@save_prefix;
diff --git a/mysql-test/suite/innodb/r/innodb-table-online.result b/mysql-test/suite/innodb/r/innodb-table-online.result
index eb55ba57e36..0a8d1c2fefb 100644
--- a/mysql-test/suite/innodb/r/innodb-table-online.result
+++ b/mysql-test/suite/innodb/r/innodb-table-online.result
@@ -85,17 +85,6 @@ t1 CREATE TABLE `t1` (
UNIQUE KEY `c2` (`c2`),
UNIQUE KEY `c2_2` (`c2`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1 ROW_FORMAT=COMPACT
-ALTER TABLE t1 DROP INDEX c2, ALGORITHM = INPLACE;
-ERROR 0A000: ALGORITHM=INPLACE is not supported. Reason: Dropping a primary key is not allowed without also adding a new primary key. Try ALGORITHM=COPY.
-SHOW CREATE TABLE t1;
-Table Create Table
-t1 CREATE TABLE `t1` (
- `c1` int(11) NOT NULL,
- `c2` int(11) NOT NULL,
- `c3` text NOT NULL,
- UNIQUE KEY `c2` (`c2`),
- UNIQUE KEY `c2_2` (`c2`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1 ROW_FORMAT=COMPACT
ALTER TABLE t1 DROP INDEX c2, ADD PRIMARY KEY(c1);
# session default
SET DEBUG_SYNC = 'now WAIT_FOR scanned';
diff --git a/mysql-test/suite/innodb/r/innodb-virtual-columns.result b/mysql-test/suite/innodb/r/innodb-virtual-columns.result
index 558bb23de0a..900fca8309e 100644
--- a/mysql-test/suite/innodb/r/innodb-virtual-columns.result
+++ b/mysql-test/suite/innodb/r/innodb-virtual-columns.result
@@ -320,3 +320,18 @@ term uw_id plan wdraw_rsn admit_term
1035 2 CSM ACAD 1009
drop table grad_degree;
drop table gso_grad_supr;
+CREATE TABLE t1 (a INT, b CHAR(12), c INT AS (a) VIRTUAL, FULLTEXT KEY(b)) ENGINE=InnoDB;
+INSERT INTO t1 (a,b) VALUES (1,'foo');
+SELECT * FROM t1;
+a b c
+1 foo 1
+DROP TABLE t1;
+CREATE TABLE t1 (a INT, b CHAR(12), c INT AS (a) VIRTUAL) ENGINE=InnoDB;
+INSERT INTO t1 (a,b) VALUES (1,'foo');
+ALTER TABLE t1 ADD FULLTEXT KEY(b);
+Warnings:
+Warning 124 InnoDB rebuilding table to add column FTS_DOC_ID
+SELECT * FROM t1;
+a b c
+1 foo 1
+DROP TABLE t1;
diff --git a/mysql-test/suite/innodb/r/innodb_28867993.result b/mysql-test/suite/innodb/r/innodb_28867993.result
new file mode 100644
index 00000000000..acc6734eaee
--- /dev/null
+++ b/mysql-test/suite/innodb/r/innodb_28867993.result
@@ -0,0 +1,9 @@
+create table t1 (a int) engine=innodb;
+insert t1 values (1),(2);
+create database ib_logfile2;
+select * from t1;
+a
+1
+2
+drop table t1;
+drop database ib_logfile2;
diff --git a/mysql-test/suite/innodb/t/alter_candidate_key.test b/mysql-test/suite/innodb/t/alter_candidate_key.test
new file mode 100644
index 00000000000..7429cd89a1a
--- /dev/null
+++ b/mysql-test/suite/innodb/t/alter_candidate_key.test
@@ -0,0 +1,72 @@
+--source include/have_innodb.inc
+--source include/have_debug.inc
+--source include/have_debug_sync.inc
+
+CREATE TABLE t1 (f1 INT NOT NULL, f2 INT NOT NULL,
+ UNIQUE KEY uidx2(f1,f2),
+ UNIQUE KEY uidx1(f2)) ENGINE=InnoDB;
+INSERT INTO t1 VALUES(1, 1);
+SHOW CREATE TABLE t1;
+SET DEBUG_SYNC = 'innodb_inplace_alter_table_enter
+ SIGNAL conc_dml WAIT_FOR go_ahead';
+--send ALTER TABLE t1 CHANGE COLUMN f1 f11 INT, ALGORITHM=INPLACE
+connect (con1,localhost,root,,);
+SET DEBUG_SYNC = 'now WAIT_FOR conc_dml';
+DELETE FROM t1;
+SET DEBUG_SYNC = 'now SIGNAL go_ahead';
+connection default;
+reap;
+SHOW CREATE TABLE t1;
+CHECK TABLE t1;
+DROP TABLE t1;
+
+CREATE TABLE t1(f1 INT, f2 INT,
+ PRIMARY KEY(f1, f2),
+ UNIQUE INDEX uidx2 (f1, f2),
+ UNIQUE INDEX uidx1 (f2))ENGINE=InnoDB;
+ALTER TABLE t1 DROP PRIMARY KEY;
+SHOW CREATE TABLE t1;
+SET DEBUG_SYNC = 'innodb_inplace_alter_table_enter
+ SIGNAL conc_dml WAIT_FOR go_ahead';
+--send ALTER TABLE t1 CHANGE COLUMN f1 f11 INT, ALGORITHM=INPLACE
+connection con1;
+SET DEBUG_SYNC = 'now WAIT_FOR conc_dml';
+--error ER_DUP_ENTRY
+INSERT INTO t1 VALUES(1, 1), (1, 1);
+SET DEBUG_SYNC = 'now SIGNAL go_ahead';
+connection default;
+reap;
+SHOW CREATE TABLE t1;
+CHECK TABLE t1;
+DROP TABLE t1;
+
+SET SQL_MODE= strict_trans_tables;
+CREATE TABLE t1(a INT UNIQUE) ENGINE=InnoDB;
+SET DEBUG_SYNC='row_log_table_apply1_before SIGNAL dml WAIT_FOR dml_done';
+--send ALTER TABLE t1 MODIFY COLUMN a INT NOT NULL
+connection con1;
+SET DEBUG_SYNC='now WAIT_FOR dml';
+BEGIN;
+INSERT INTO t1 SET a=NULL;
+ROLLBACK;
+set DEBUG_SYNC='now SIGNAL dml_done';
+connection default;
+--error ER_INVALID_USE_OF_NULL
+reap;
+DROP TABLE t1;
+disconnect con1;
+SET DEBUG_SYNC="RESET";
+SET SQL_MODE=DEFAULT;
+
+CREATE TABLE t1(f1 INT NOT NULL, f2 INT NOT NULL, PRIMARY KEY(f1, f2),
+ UNIQUE KEY(f2))ENGINE=InnoDB;
+ALTER TABLE t1 DROP PRIMARY KEY;
+SHOW CREATE TABLE t1;
+DROP TABLE t1;
+
+CREATE TABLE t1(f1 INT NOT NULL, f2 INT NOT NULL,
+ UNIQUE KEY(f2), UNIQUE KEY(f2))ENGINE=InnoDB;
+SHOW CREATE TABLE t1;
+ALTER TABLE t1 DROP INDEX f2, ALGORITHM=INPLACE;
+SHOW CREATE TABLE t1;
+DROP TABLE t1;
diff --git a/mysql-test/suite/innodb/t/foreign_key.test b/mysql-test/suite/innodb/t/foreign_key.test
index c1a92697dab..b4e2ee1bbe7 100644
--- a/mysql-test/suite/innodb/t/foreign_key.test
+++ b/mysql-test/suite/innodb/t/foreign_key.test
@@ -73,3 +73,34 @@ reap;
disconnect fk;
DROP TABLE t3,t1;
+
+--echo #
+--echo # MDEV-18222 InnoDB: Failing assertion: heap->magic_n == MEM_BLOCK_MAGIC_N
+--echo # or ASAN heap-use-after-free in dict_foreign_remove_from_cache upon CHANGE COLUMN
+--echo #
+CREATE TABLE t1 (a INT, UNIQUE(a), KEY(a)) ENGINE=InnoDB;
+ALTER TABLE t1 ADD FOREIGN KEY (a) REFERENCES t1 (a);
+SET SESSION FOREIGN_KEY_CHECKS = OFF;
+ALTER TABLE t1 CHANGE COLUMN a a TIME NOT NULL;
+ALTER TABLE t1 ADD pk INT NOT NULL AUTO_INCREMENT PRIMARY KEY;
+ALTER TABLE t1 CHANGE COLUMN a b TIME;
+SET SESSION FOREIGN_KEY_CHECKS = ON;
+DROP TABLE t1;
+
+--echo #
+--echo # MDEV-18256 InnoDB: Failing assertion: heap->magic_n == MEM_BLOCK_MAGIC_N
+--echo # upon DROP FOREIGN KEY
+--echo #
+CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB;
+CREATE TABLE t2 (b INT PRIMARY KEY, FOREIGN KEY fk1 (b) REFERENCES t1 (a))
+ENGINE=InnoDB;
+ALTER TABLE t2 DROP FOREIGN KEY fk1, DROP FOREIGN KEY fk1;
+DROP TABLE t2, t1;
+
+CREATE TABLE t1 (f VARCHAR(256)) ENGINE=InnoDB;
+SET SESSION FOREIGN_KEY_CHECKS = OFF;
+ALTER TABLE t1 ADD FOREIGN KEY (f) REFERENCES non_existing_table (x);
+SET SESSION FOREIGN_KEY_CHECKS = ON;
+ALTER TABLE t1 ADD FULLTEXT INDEX ft1 (f);
+ALTER TABLE t1 ADD FULLTEXT INDEX ft2 (f);
+DROP TABLE t1;
diff --git a/mysql-test/suite/innodb/t/innodb-alter.test b/mysql-test/suite/innodb/t/innodb-alter.test
index 0d3cc6f1733..a47573626aa 100644
--- a/mysql-test/suite/innodb/t/innodb-alter.test
+++ b/mysql-test/suite/innodb/t/innodb-alter.test
@@ -419,15 +419,123 @@ CREATE TABLE t2(c2 INT NOT NULL, FOREIGN KEY(c2) REFERENCES t1(c1))ENGINE=INNODB
SHOW CREATE TABLE t1;
SHOW CREATE TABLE t2;
ALTER TABLE t1 CHANGE COLUMN c1 C1 INT;
+ALTER TABLE t2 CHANGE COLUMN c2 C2 INT;
SHOW CREATE TABLE t1;
SHOW CREATE TABLE t2;
-# FIXME: MDEV-13671 InnoDB should use case-insensitive column name comparisons
-# like the rest of the server
-#ALTER TABLE t1 CHANGE COLUMN C1 c5 INT;
+ALTER TABLE t1 CHANGE COLUMN C1 c5 INT;
+ALTER TABLE t2 CHANGE COLUMN C2 c6 INT;
SHOW CREATE TABLE t1;
SHOW CREATE TABLE t2;
+
+SELECT C.NAME FROM INFORMATION_SCHEMA.INNODB_SYS_COLUMNS C INNER JOIN
+ INFORMATION_SCHEMA.INNODB_SYS_TABLES T ON C.TABLE_ID=T.TABLE_ID
+ WHERE T.NAME='test/t1';
+
+SELECT F.NAME FROM INFORMATION_SCHEMA.INNODB_SYS_FIELDS F INNER JOIN
+ INFORMATION_SCHEMA.INNODB_SYS_INDEXES I ON F.INDEX_ID=I.INDEX_ID INNER JOIN
+ INFORMATION_SCHEMA.INNODB_SYS_TABLES T ON I.TABLE_ID=T.TABLE_ID
+ WHERE T.NAME='test/t1' AND I.NAME='PRIMARY';
+
+SELECT C.REF_COL_NAME, C.FOR_COL_NAME FROM INFORMATION_SCHEMA.INNODB_SYS_FOREIGN_COLS C INNER JOIN
+ INFORMATION_SCHEMA.INNODB_SYS_FOREIGN F ON C.ID=F.ID
+ WHERE F.FOR_NAME='test/t2';
+
+DROP TABLE t2, t1;
+--echo # virtual columns case too
+CREATE TABLE t1 (a INT, b INT GENERATED ALWAYS AS (a) VIRTUAL) ENGINE = InnoDB;
+ALTER TABLE t1 CHANGE COLUMN a A INT;
+SHOW CREATE TABLE t1;
+SELECT C.NAME FROM INFORMATION_SCHEMA.INNODB_SYS_COLUMNS C INNER JOIN
+ INFORMATION_SCHEMA.INNODB_SYS_TABLES T ON C.TABLE_ID=T.TABLE_ID
+ WHERE T.NAME='test/t1';
+DROP TABLE t1;
+
+
+--echo # different FOREIGN KEY cases
+CREATE TABLE t1 (
+ a INT UNIQUE KEY,
+ b INT UNIQUE KEY,
+ c INT UNIQUE KEY,
+ d INT UNIQUE KEY
+) ENGINE=INNODB;
+CREATE TABLE t2 (
+ aa INT,
+ bb INT,
+ cc INT,
+ dd INT
+) ENGINE=INNODB;
+
+INSERT INTO t1 VALUES (1, 1, 1, 1);
+INSERT INTO t2 VALUES (1, 1, 1, 1);
+
+ALTER TABLE t1 CHANGE a A INT, ALGORITHM=INPLACE;
+ALTER TABLE t1 CHANGE c C INT, ALGORITHM=INPLACE;
+ALTER TABLE t2 CHANGE cc CC INT, ALGORITHM=INPLACE;
+ALTER TABLE t2 CHANGE dd DD INT, ALGORITHM=INPLACE;
+
+SET foreign_key_checks=0;
+ALTER TABLE t2
+ ADD FOREIGN KEY(aa) REFERENCES t1(a),
+ ADD FOREIGN KEY(bb) REFERENCES t1(b),
+ ADD FOREIGN KEY(cc) REFERENCES t1(c),
+ ADD FOREIGN KEY(dd) REFERENCES t1(d),
+ ALGORITHM=INPLACE;
+
+ALTER TABLE t1 CHANGE b B INT, ALGORITHM=INPLACE;
+ALTER TABLE t2 CHANGE aa AA INT, ALGORITHM=INPLACE;
+
+--source include/restart_mysqld.inc
+
+ALTER TABLE t1 CHANGE d D INT, ALGORITHM=INPLACE;
+ALTER TABLE t2 CHANGE bb BB INT, ALGORITHM=INPLACE;
+SHOW CREATE TABLE t1;
+SHOW CREATE TABLE t2;
+
+--error ER_ROW_IS_REFERENCED_2
+DELETE FROM t1 WHERE a=1;
+--error ER_ROW_IS_REFERENCED_2
+DELETE FROM t1 WHERE A=1;
+
+--error ER_ROW_IS_REFERENCED_2
+DELETE FROM t1 WHERE b=1;
+--error ER_ROW_IS_REFERENCED_2
+DELETE FROM t1 WHERE B=1;
+
+--error ER_ROW_IS_REFERENCED_2
+DELETE FROM t1 WHERE c=1;
+--error ER_ROW_IS_REFERENCED_2
+DELETE FROM t1 WHERE C=1;
+
+--error ER_ROW_IS_REFERENCED_2
+DELETE FROM t1 WHERE d=1;
+--error ER_ROW_IS_REFERENCED_2
+DELETE FROM t1 WHERE D=1;
+
DROP TABLE t2, t1;
+--echo # virtual columns case too
+CREATE TABLE t1 (a INT, b INT GENERATED ALWAYS AS (a) VIRTUAL) ENGINE = InnoDB;
+ALTER TABLE t1 CHANGE COLUMN a A INT;
+SHOW CREATE TABLE t1;
+SELECT C.NAME FROM INFORMATION_SCHEMA.INNODB_SYS_COLUMNS C INNER JOIN
+ INFORMATION_SCHEMA.INNODB_SYS_TABLES T ON C.TABLE_ID=T.TABLE_ID
+ WHERE T.NAME='test/t1';
+DROP TABLE t1;
+
+--echo # and an MDEV-18041 regression related to indexes prefixes
+create table `test` (
+ `test_old` varchar(255) NOT NULL,
+ `other` varchar(255) NOT NULL,
+ PRIMARY KEY (`test_old`,`other`),
+ UNIQUE KEY uk (`test_old`(100), `other`)
+) ENGINE=InnoDB;
+
+select name, pos from information_schema.innodb_SYS_FIELDS where name in ('test_old', 'other', 'test_new');
+alter table `test` CHANGE COLUMN `test_old` `test_new` varchar(255) NOT NULL;
+select name, pos from information_schema.innodb_SYS_FIELDS where name in ('test_old', 'other', 'test_new');
+drop table `test`;
+
+
--echo #
--echo # BUG 20029625 - HANDLE_FATAL_SIGNAL (SIG=11) IN
--echo # DICT_MEM_TABLE_COL_RENAME_LOW
diff --git a/mysql-test/suite/innodb/t/innodb-index.test b/mysql-test/suite/innodb/t/innodb-index.test
index 8598647de66..d28930de815 100644
--- a/mysql-test/suite/innodb/t/innodb-index.test
+++ b/mysql-test/suite/innodb/t/innodb-index.test
@@ -563,3 +563,33 @@ DROP TABLE t1,t2,t2c,t2i;
eval SET GLOBAL innodb_file_format=$innodb_file_format_orig;
eval SET GLOBAL innodb_file_format_max=$innodb_file_format_max_orig;
--enable_query_log
+
+SET @save_format = @@GLOBAL.innodb_file_format;
+SET @save_prefix = @@GLOBAL.innodb_large_prefix;
+SET GLOBAL innodb_file_format=barracuda;
+SET GLOBAL innodb_large_prefix=ON;
+CREATE TABLE t1 (c VARCHAR(1024),
+c1 CHAR(255) NOT NULL,c2 CHAR(255) NOT NULL,c3 CHAR(255) NOT NULL,
+c4 CHAR(255) NOT NULL,c5 CHAR(255) NOT NULL,c6 CHAR(255) NOT NULL,
+c7 CHAR(255) NOT NULL,c8 CHAR(255) NOT NULL,c9 CHAR(255) NOT NULL,
+ca CHAR(255) NOT NULL,cb CHAR(255) NOT NULL,cc CHAR(255) NOT NULL,
+cd CHAR(255) NOT NULL,ce CHAR(255) NOT NULL,cf CHAR(255) NOT NULL,
+d0 CHAR(255) NOT NULL,d1 CHAR(255) NOT NULL,d2 CHAR(255) NOT NULL,
+d3 CHAR(255) NOT NULL,d4 CHAR(255) NOT NULL,d5 CHAR(255) NOT NULL,
+d6 CHAR(255) NOT NULL,d7 CHAR(255) NOT NULL,d8 CHAR(255) NOT NULL,
+d9 CHAR(255) NOT NULL,da CHAR(255) NOT NULL,db CHAR(255) NOT NULL,
+dc CHAR(255) NOT NULL,dd CHAR(255) NOT NULL,de CHAR(255) NOT NULL,
+UNIQUE KEY(c))
+ENGINE=InnoDB ROW_FORMAT=DYNAMIC;
+INSERT INTO t1 VALUES
+(repeat('a',999),'','','','','','','','','','','','','','','','','','','','','','','','','','','','','',''),
+(CONCAT(repeat('a',999),'b'),'','','','','','','','','','','','','','','','','','','','','','','','','','','','','','');
+--error ER_INDEX_COLUMN_TOO_LONG
+ALTER TABLE t1 ROW_FORMAT=REDUNDANT, algorithm=inplace;
+--error ER_INDEX_COLUMN_TOO_LONG
+ALTER TABLE t1 ROW_FORMAT=REDUNDANT, algorithm=copy;
+SELECT COUNT(*) FROM t1;
+CHECK TABLE t1;
+DROP TABLE t1;
+SET GLOBAL innodb_file_format=@save_format;
+SET GLOBAL innodb_large_prefix=@save_prefix;
diff --git a/mysql-test/suite/innodb/t/innodb-table-online.test b/mysql-test/suite/innodb/t/innodb-table-online.test
index 4e9f2f13344..b0711412a52 100644
--- a/mysql-test/suite/innodb/t/innodb-table-online.test
+++ b/mysql-test/suite/innodb/t/innodb-table-online.test
@@ -101,10 +101,6 @@ ALTER TABLE t1 ADD UNIQUE INDEX(c2),
LOCK = EXCLUSIVE, ALGORITHM = INPLACE;
SHOW CREATE TABLE t1;
-# We do not support plain DROP_PK_INDEX without ADD_PK_INDEX.
---error ER_ALTER_OPERATION_NOT_SUPPORTED_REASON
-ALTER TABLE t1 DROP INDEX c2, ALGORITHM = INPLACE;
-SHOW CREATE TABLE t1;
# Now the previous DEBUG_SYNC should kick in.
--send
ALTER TABLE t1 DROP INDEX c2, ADD PRIMARY KEY(c1);
diff --git a/mysql-test/suite/innodb/t/innodb-virtual-columns.test b/mysql-test/suite/innodb/t/innodb-virtual-columns.test
index 368c6fc8cb1..99f550eb667 100644
--- a/mysql-test/suite/innodb/t/innodb-virtual-columns.test
+++ b/mysql-test/suite/innodb/t/innodb-virtual-columns.test
@@ -300,3 +300,14 @@ select * from gso_grad_supr;
drop table grad_degree;
drop table gso_grad_supr;
+
+CREATE TABLE t1 (a INT, b CHAR(12), c INT AS (a) VIRTUAL, FULLTEXT KEY(b)) ENGINE=InnoDB;
+INSERT INTO t1 (a,b) VALUES (1,'foo');
+SELECT * FROM t1;
+DROP TABLE t1;
+
+CREATE TABLE t1 (a INT, b CHAR(12), c INT AS (a) VIRTUAL) ENGINE=InnoDB;
+INSERT INTO t1 (a,b) VALUES (1,'foo');
+ALTER TABLE t1 ADD FULLTEXT KEY(b);
+SELECT * FROM t1;
+DROP TABLE t1;
diff --git a/mysql-test/suite/innodb/t/innodb_28867993.test b/mysql-test/suite/innodb/t/innodb_28867993.test
new file mode 100644
index 00000000000..61e9578df7b
--- /dev/null
+++ b/mysql-test/suite/innodb/t/innodb_28867993.test
@@ -0,0 +1,12 @@
+#
+# Bug#28867993: POSSIBLE ISSUE WITH MYSQL SERVER RESTART
+#
+
+source include/have_innodb.inc;
+create table t1 (a int) engine=innodb;
+insert t1 values (1),(2);
+create database ib_logfile2;
+source include/restart_mysqld.inc;
+select * from t1;
+drop table t1;
+drop database ib_logfile2;
diff --git a/mysql-test/suite/perfschema/r/dml_setup_instruments.result b/mysql-test/suite/perfschema/r/dml_setup_instruments.result
index b264a075eba..8d561c48828 100644
--- a/mysql-test/suite/perfschema/r/dml_setup_instruments.result
+++ b/mysql-test/suite/perfschema/r/dml_setup_instruments.result
@@ -16,7 +16,9 @@ wait/synch/mutex/sql/LOCK_binlog_state YES YES
wait/synch/mutex/sql/LOCK_commit_ordered YES YES
select * from performance_schema.setup_instruments
where name like 'Wait/Synch/Rwlock/sql/%'
- and name not in ('wait/synch/rwlock/sql/CRYPTO_dynlock_value::lock')
+ and name not in (
+'wait/synch/rwlock/sql/CRYPTO_dynlock_value::lock',
+'wait/synch/rwlock/sql/LOCK_named_pipe_full_access_group')
order by name limit 10;
NAME ENABLED TIMED
wait/synch/rwlock/sql/LOCK_dboptions YES YES
diff --git a/mysql-test/suite/perfschema/t/dml_setup_instruments.test b/mysql-test/suite/perfschema/t/dml_setup_instruments.test
index 6b4fe89a1cf..098d6bd031d 100644
--- a/mysql-test/suite/perfschema/t/dml_setup_instruments.test
+++ b/mysql-test/suite/perfschema/t/dml_setup_instruments.test
@@ -22,10 +22,13 @@ select * from performance_schema.setup_instruments
order by name limit 10;
# CRYPTO_dynlock_value::lock is dependent on the build (SSL)
+# LOCK_named_pipe_full_access_group is dependent on the build (Windows)
select * from performance_schema.setup_instruments
where name like 'Wait/Synch/Rwlock/sql/%'
- and name not in ('wait/synch/rwlock/sql/CRYPTO_dynlock_value::lock')
+ and name not in (
+ 'wait/synch/rwlock/sql/CRYPTO_dynlock_value::lock',
+ 'wait/synch/rwlock/sql/LOCK_named_pipe_full_access_group')
order by name limit 10;
# COND_handler_count is dependent on the build (Windows only)
diff --git a/mysql-test/suite/roles/flush_roles-17898.result b/mysql-test/suite/roles/flush_roles-17898.result
new file mode 100644
index 00000000000..c09fa166dc0
--- /dev/null
+++ b/mysql-test/suite/roles/flush_roles-17898.result
@@ -0,0 +1,13 @@
+use mysql;
+insert db (db,user,select_priv) values ('foo','dwr_foo','Y'), ('bar','dwr_bar','Y');
+insert roles_mapping (user,role) values ('dwr_qux_dev','dwr_foo'),('dwr_qux_dev','dwr_bar');
+insert user (user,show_db_priv,is_role) values ('dwr_foo','N','Y'), ('dwr_bar','N','Y'), ('dwr_qux_dev','Y','Y');
+Warnings:
+Warning 1364 Field 'ssl_cipher' doesn't have a default value
+Warning 1364 Field 'x509_issuer' doesn't have a default value
+Warning 1364 Field 'x509_subject' doesn't have a default value
+Warning 1364 Field 'authentication_string' doesn't have a default value
+flush privileges;
+drop role dwr_foo;
+drop role dwr_bar;
+drop role dwr_qux_dev;
diff --git a/mysql-test/suite/roles/flush_roles-17898.test b/mysql-test/suite/roles/flush_roles-17898.test
new file mode 100644
index 00000000000..e94efc44dd0
--- /dev/null
+++ b/mysql-test/suite/roles/flush_roles-17898.test
@@ -0,0 +1,11 @@
+#
+# MDEV-17898 FLUSH PRIVILEGES crashes server with segfault
+#
+use mysql;
+insert db (db,user,select_priv) values ('foo','dwr_foo','Y'), ('bar','dwr_bar','Y');
+insert roles_mapping (user,role) values ('dwr_qux_dev','dwr_foo'),('dwr_qux_dev','dwr_bar');
+insert user (user,show_db_priv,is_role) values ('dwr_foo','N','Y'), ('dwr_bar','N','Y'), ('dwr_qux_dev','Y','Y');
+flush privileges;
+drop role dwr_foo;
+drop role dwr_bar;
+drop role dwr_qux_dev;
diff --git a/mysql-test/suite/rpl/r/rpl_idempotency.result b/mysql-test/suite/rpl/r/rpl_idempotency.result
index 38b955d7697..03482e6fefb 100644
--- a/mysql-test/suite/rpl/r/rpl_idempotency.result
+++ b/mysql-test/suite/rpl/r/rpl_idempotency.result
@@ -67,6 +67,18 @@ a
-3
1
include/check_slave_no_error.inc
+drop table t1, t2;
DROP TABLE t1, t2;
+include/check_slave_no_error.inc
+create database d;
+create database e;
+create database d;
+create database if not exists e;
+include/check_slave_no_error.inc
+drop database d;
+drop database e;
+drop database d;
+drop database if exists e;
+include/check_slave_no_error.inc
SET @@global.slave_exec_mode= @old_slave_exec_mode;
include/rpl_end.inc
diff --git a/mysql-test/suite/rpl/r/rpl_row_big_table_id_32bit.result b/mysql-test/suite/rpl/r/rpl_row_big_table_id_32bit.result
new file mode 100644
index 00000000000..f84f02cc416
--- /dev/null
+++ b/mysql-test/suite/rpl/r/rpl_row_big_table_id_32bit.result
@@ -0,0 +1,38 @@
+include/master-slave.inc
+[connection master]
+include/rpl_restart_server.inc [server_number=1]
+SET @@debug_dbug="d,simulate_big_table_id";
+CREATE TABLE t (a int);
+INSERT INTO t SET a= 0;
+ALTER TABLE t comment '';
+INSERT INTO t SET a= 1;
+ALTER TABLE t comment '';
+INSERT INTO t SET a= 2;
+ALTER TABLE t comment '';
+INSERT INTO t SET a= 3;
+show binlog events in <file> from <pos>;
+Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.000002 # Gtid 1 # BEGIN GTID #-#-#
+master-bin.000002 # Table_map 1 # table_id: 4294967294 (test.t)
+master-bin.000002 # Write_rows_v1 1 # table_id: 4294967294 flags: STMT_END_F
+master-bin.000002 # Query 1 # COMMIT
+master-bin.000002 # Gtid 1 # GTID #-#-#
+master-bin.000002 # Query 1 # use `test`; ALTER TABLE t comment ''
+master-bin.000002 # Gtid 1 # BEGIN GTID #-#-#
+master-bin.000002 # Table_map 1 # table_id: 1 (test.t)
+master-bin.000002 # Write_rows_v1 1 # table_id: 1 flags: STMT_END_F
+master-bin.000002 # Query 1 # COMMIT
+master-bin.000002 # Gtid 1 # GTID #-#-#
+master-bin.000002 # Query 1 # use `test`; ALTER TABLE t comment ''
+master-bin.000002 # Gtid 1 # BEGIN GTID #-#-#
+master-bin.000002 # Table_map 1 # table_id: 2 (test.t)
+master-bin.000002 # Write_rows_v1 1 # table_id: 2 flags: STMT_END_F
+master-bin.000002 # Query 1 # COMMIT
+master-bin.000002 # Gtid 1 # GTID #-#-#
+master-bin.000002 # Query 1 # use `test`; ALTER TABLE t comment ''
+master-bin.000002 # Gtid 1 # BEGIN GTID #-#-#
+master-bin.000002 # Table_map 1 # table_id: 3 (test.t)
+master-bin.000002 # Write_rows_v1 1 # table_id: 3 flags: STMT_END_F
+master-bin.000002 # Query 1 # COMMIT
+DROP TABLE t;
+include/rpl_end.inc
diff --git a/mysql-test/suite/rpl/r/rpl_row_big_table_id_64bit.result b/mysql-test/suite/rpl/r/rpl_row_big_table_id_64bit.result
new file mode 100644
index 00000000000..d8ecadc61d1
--- /dev/null
+++ b/mysql-test/suite/rpl/r/rpl_row_big_table_id_64bit.result
@@ -0,0 +1,38 @@
+include/master-slave.inc
+[connection master]
+include/rpl_restart_server.inc [server_number=1]
+SET @@debug_dbug="d,simulate_big_table_id";
+CREATE TABLE t (a int);
+INSERT INTO t SET a= 0;
+ALTER TABLE t comment '';
+INSERT INTO t SET a= 1;
+ALTER TABLE t comment '';
+INSERT INTO t SET a= 2;
+ALTER TABLE t comment '';
+INSERT INTO t SET a= 3;
+show binlog events in <file> from <pos>;
+Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.000002 # Gtid 1 # BEGIN GTID #-#-#
+master-bin.000002 # Table_map 1 # table_id: 4294967294 (test.t)
+master-bin.000002 # Write_rows_v1 1 # table_id: 4294967294 flags: STMT_END_F
+master-bin.000002 # Query 1 # COMMIT
+master-bin.000002 # Gtid 1 # GTID #-#-#
+master-bin.000002 # Query 1 # use `test`; ALTER TABLE t comment ''
+master-bin.000002 # Gtid 1 # BEGIN GTID #-#-#
+master-bin.000002 # Table_map 1 # table_id: 4294967295 (test.t)
+master-bin.000002 # Write_rows_v1 1 # table_id: 4294967295 flags: STMT_END_F
+master-bin.000002 # Query 1 # COMMIT
+master-bin.000002 # Gtid 1 # GTID #-#-#
+master-bin.000002 # Query 1 # use `test`; ALTER TABLE t comment ''
+master-bin.000002 # Gtid 1 # BEGIN GTID #-#-#
+master-bin.000002 # Table_map 1 # table_id: 4294967296 (test.t)
+master-bin.000002 # Write_rows_v1 1 # table_id: 4294967296 flags: STMT_END_F
+master-bin.000002 # Query 1 # COMMIT
+master-bin.000002 # Gtid 1 # GTID #-#-#
+master-bin.000002 # Query 1 # use `test`; ALTER TABLE t comment ''
+master-bin.000002 # Gtid 1 # BEGIN GTID #-#-#
+master-bin.000002 # Table_map 1 # table_id: 4294967297 (test.t)
+master-bin.000002 # Write_rows_v1 1 # table_id: 4294967297 flags: STMT_END_F
+master-bin.000002 # Query 1 # COMMIT
+DROP TABLE t;
+include/rpl_end.inc
diff --git a/mysql-test/suite/rpl/t/rpl_idempotency.test b/mysql-test/suite/rpl/t/rpl_idempotency.test
index 186c6260154..e801aac9b5e 100644
--- a/mysql-test/suite/rpl/t/rpl_idempotency.test
+++ b/mysql-test/suite/rpl/t/rpl_idempotency.test
@@ -75,9 +75,30 @@ SELECT * FROM t1 ORDER BY a;
SELECT * FROM t2 ORDER BY a;
--source include/check_slave_no_error.inc
+connection slave;
+drop table t1, t2;
+
connection master;
DROP TABLE t1, t2;
sync_slave_with_master;
+--source include/check_slave_no_error.inc
+create database d;
+create database e;
+
+connection master;
+create database d;
+create database if not exists e;
+
+sync_slave_with_master;
+--source include/check_slave_no_error.inc
+drop database d;
+drop database e;
+
+connection master;
+drop database d;
+drop database if exists e;
+sync_slave_with_master;
+--source include/check_slave_no_error.inc
SET @@global.slave_exec_mode= @old_slave_exec_mode;
diff --git a/mysql-test/suite/rpl/t/rpl_row_big_table_id.inc b/mysql-test/suite/rpl/t/rpl_row_big_table_id.inc
new file mode 100644
index 00000000000..926b6e8b0ac
--- /dev/null
+++ b/mysql-test/suite/rpl/t/rpl_row_big_table_id.inc
@@ -0,0 +1,56 @@
+##################################################################
+# rpl_row_big_table_id
+#
+# MDEV-17803 Row-based event is not applied when
+# table map id is greater 32 bit int
+#
+# Verify row-based events applying when table map id value is about and greater
+# than 1 << 32.
+##################################################################
+--source include/have_debug.inc
+--source include/have_binlog_format_row.inc
+--source include/master-slave.inc
+
+--connection master
+# To reset last table id
+--let $rpl_server_number= 1
+--source include/rpl_restart_server.inc
+
+SET @@debug_dbug="d,simulate_big_table_id";
+CREATE TABLE t (a int);
+
+--let $binlog_file= query_get_value(SHOW MASTER STATUS, File, 1)
+--let $binlog_pos= query_get_value(SHOW MASTER STATUS, Position, 1)
+INSERT INTO t SET a= 0;
+ALTER TABLE t comment '';
+INSERT INTO t SET a= 1;
+ALTER TABLE t comment '';
+INSERT INTO t SET a= 2;
+ALTER TABLE t comment '';
+INSERT INTO t SET a= 3;
+
+# display simulated big table_id
+--let $_in_from=in '$binlog_file' from $binlog_pos
+--replace_result "$_in_from" "in <file> from <pos>"
+--replace_column 2 # 5 #
+--replace_regex /\/\* xid=.* \*\//\/* XID *\// /file_id=[0-9]+/file_id=#/ /GTID [0-9]+-[0-9]+-[0-9]+/GTID #-#-#/
+--eval show binlog events in '$binlog_file' from $binlog_pos
+
+
+--sync_slave_with_master
+
+if (`SELECT sum(a) != 6 FROM t`)
+{
+ --echo *** unexpected result; check slave applier ***
+ --die
+}
+
+
+# Cleanup
+
+--connection master
+DROP TABLE t;
+
+--sync_slave_with_master
+
+--source include/rpl_end.inc
diff --git a/mysql-test/suite/rpl/t/rpl_row_big_table_id_32bit.test b/mysql-test/suite/rpl/t/rpl_row_big_table_id_32bit.test
new file mode 100644
index 00000000000..08e1827c4c6
--- /dev/null
+++ b/mysql-test/suite/rpl/t/rpl_row_big_table_id_32bit.test
@@ -0,0 +1,11 @@
+##################################################################
+# rpl_row_big_table_id
+#
+# MDEV-17803 Row-based event is not applied when
+# table map id is greater 32 bit int
+#
+# Verify row-based events applying when table map id value is about and greater
+# than 1 << 32.
+##################################################################
+--source include/have_32bit.inc
+--source rpl_row_big_table_id.inc
diff --git a/mysql-test/suite/rpl/t/rpl_row_big_table_id_64bit.test b/mysql-test/suite/rpl/t/rpl_row_big_table_id_64bit.test
new file mode 100644
index 00000000000..f9e021cdd2c
--- /dev/null
+++ b/mysql-test/suite/rpl/t/rpl_row_big_table_id_64bit.test
@@ -0,0 +1,11 @@
+##################################################################
+# rpl_row_big_table_id
+#
+# MDEV-17803 Row-based event is not applied when
+# table map id is greater 32 bit int
+#
+# Verify row-based events applying when table map id value is about and greater
+# than 1 << 32.
+##################################################################
+--source include/have_64bit.inc
+--source rpl_row_big_table_id.inc
diff --git a/mysql-test/suite/sys_vars/r/table_definition_cache_basic.result b/mysql-test/suite/sys_vars/r/table_definition_cache_basic.result
index f6befe51bc1..f7ce3f53bfc 100644
--- a/mysql-test/suite/sys_vars/r/table_definition_cache_basic.result
+++ b/mysql-test/suite/sys_vars/r/table_definition_cache_basic.result
@@ -28,14 +28,14 @@ Warning 1292 Truncated incorrect table_definition_cache value: '2'
SELECT @@global.table_definition_cache;
@@global.table_definition_cache
400
-SET @@global.table_definition_cache = 524287;
+SET @@global.table_definition_cache = 2097151;
SELECT @@global.table_definition_cache;
@@global.table_definition_cache
-524287
-SET @@global.table_definition_cache = 524288;
+2097151
+SET @@global.table_definition_cache = 2097152;
SELECT @@global.table_definition_cache;
@@global.table_definition_cache
-524288
+2097152
'#--------------------FN_DYNVARS_019_04-------------------------#'
SET @@global.table_definition_cache = 0;
Warnings:
@@ -49,18 +49,18 @@ Warning 1292 Truncated incorrect table_definition_cache value: '-1024'
SELECT @@global.table_definition_cache;
@@global.table_definition_cache
400
-SET @@global.table_definition_cache = 524289;
+SET @@global.table_definition_cache = 2097153;
Warnings:
-Warning 1292 Truncated incorrect table_definition_cache value: '524289'
+Warning 1292 Truncated incorrect table_definition_cache value: '2097153'
SELECT @@global.table_definition_cache;
@@global.table_definition_cache
-524288
+2097152
SET @@global.table_definition_cache = 42949672950;
Warnings:
Warning 1292 Truncated incorrect table_definition_cache value: '42949672950'
SELECT @@global.table_definition_cache;
@@global.table_definition_cache
-524288
+2097152
SET @@global.table_definition_cache = 21221204.10;
ERROR 42000: Incorrect argument type to variable 'table_definition_cache'
SET @@global.table_definition_cache = ON;
diff --git a/mysql-test/suite/sys_vars/t/table_definition_cache_basic.test b/mysql-test/suite/sys_vars/t/table_definition_cache_basic.test
index 69f29108645..183d1d0316e 100644
--- a/mysql-test/suite/sys_vars/t/table_definition_cache_basic.test
+++ b/mysql-test/suite/sys_vars/t/table_definition_cache_basic.test
@@ -64,9 +64,9 @@ SET @@global.table_definition_cache = 1;
SELECT @@global.table_definition_cache;
SET @@global.table_definition_cache = 2;
SELECT @@global.table_definition_cache;
-SET @@global.table_definition_cache = 524287;
+SET @@global.table_definition_cache = 2097151;
SELECT @@global.table_definition_cache;
-SET @@global.table_definition_cache = 524288;
+SET @@global.table_definition_cache = 2097152;
SELECT @@global.table_definition_cache;
@@ -79,7 +79,7 @@ SET @@global.table_definition_cache = 0;
SELECT @@global.table_definition_cache;
SET @@global.table_definition_cache = -1024;
SELECT @@global.table_definition_cache;
-SET @@global.table_definition_cache = 524289;
+SET @@global.table_definition_cache = 2097153;
SELECT @@global.table_definition_cache;
SET @@global.table_definition_cache = 42949672950;
SELECT @@global.table_definition_cache;
diff --git a/mysql-test/t/auto_increment_ranges_innodb.test b/mysql-test/t/auto_increment_ranges_innodb.test
index c2afee7ac66..016ca16bd91 100644
--- a/mysql-test/t/auto_increment_ranges_innodb.test
+++ b/mysql-test/t/auto_increment_ranges_innodb.test
@@ -5,3 +5,16 @@
--source include/have_innodb.inc
set default_storage_engine=innodb;
--source auto_increment_ranges.inc
+
+#
+# MDEV-17377 invalid gap in auto-increment values after LOAD DATA
+#
+create table t1 (pk int auto_increment primary key, f varchar(20));
+insert t1 (f) values ('a'), ('b'), ('c'), ('d');
+select null, f into outfile 'load.data' from t1 limit 1;
+load data infile 'load.data' into table t1;
+insert t1 (f) values ('<===');
+select * from t1;
+drop table t1;
+--let $datadir=`select @@datadir`
+--remove_file $datadir/test/load.data
diff --git a/mysql-test/t/bigint.test b/mysql-test/t/bigint.test
index fb18d60edd9..71fef813a8d 100644
--- a/mysql-test/t/bigint.test
+++ b/mysql-test/t/bigint.test
@@ -414,3 +414,12 @@ DROP TABLE t1;
--echo # MDEV-9372 select 100 between 1 and 9223372036854775808 returns false
--echo #
SELECT 100 BETWEEN 1 AND 9223372036854775808;
+
+--echo #
+--echo # MDEV-17724 Wrong result for BETWEEN 0 AND 18446744073709551615
+--echo #
+
+CREATE TABLE t1 (c1 bigint(20) unsigned NOT NULL);
+INSERT INTO t1 VALUES (0),(101),(255);
+SELECT * FROM t1 WHERE c1 BETWEEN 0 AND 18446744073709551615 ORDER BY c1;
+DROP TABLE t1;
diff --git a/mysql-test/t/func_group_innodb.test b/mysql-test/t/func_group_innodb.test
index c62d3d08496..a65d2326d0f 100644
--- a/mysql-test/t/func_group_innodb.test
+++ b/mysql-test/t/func_group_innodb.test
@@ -192,4 +192,30 @@ EXPLAIN SELECT MIN(c) FROM t1 GROUP BY b;
DROP TABLE t1;
+--echo #
+--echo # MDEV-17589: Stack-buffer-overflow with indexed varchar (utf8) field
+--echo #
+
+set @save_innodb_file_format= @@innodb_file_format;
+set @save_innodb_large_prefix= @@innodb_large_prefix;
+set global innodb_file_format = BARRACUDA;
+set global innodb_large_prefix = ON;
+
+CREATE TABLE t1 (v1 varchar(1020), v2 varchar(2), v3 varchar(2),
+ KEY k1 (v3,v2,v1)) ENGINE=InnoDB CHARACTER SET=utf8 ROW_FORMAT=DYNAMIC;
+INSERT INTO t1 VALUES ('king', 'qu','qu'), ('bad','go','go');
+explain
+SELECT MIN(t1.v1) FROM t1 where t1.v2='qu' and t1.v3='qu';
+SELECT MIN(t1.v1) FROM t1 where t1.v2='qu' and t1.v3='qu';
+drop table t1;
+
+CREATE TABLE t1 (v1 varchar(1024) CHARACTER SET utf8, KEY v1 (v1)) ENGINE=InnoDB ROW_FORMAT=DYNAMIC;
+INSERT INTO t1 VALUES ('king'), ('bad');
+explain
+SELECT MIN(x.v1) FROM (SELECT t1.* FROM t1 WHERE t1.v1 >= 'p') x;
+SELECT MIN(x.v1) FROM (SELECT t1.* FROM t1 WHERE t1.v1 >= 'p') x;
+drop table t1;
+set global innodb_file_format = @save_innodb_file_format;
+set global innodb_large_prefix = @save_innodb_large_prefix;
+
--echo End of 5.5 tests
diff --git a/mysql-test/t/huge_frm-6224.test b/mysql-test/t/huge_frm-6224.test
index 418722a7b51..322abd01738 100644
--- a/mysql-test/t/huge_frm-6224.test
+++ b/mysql-test/t/huge_frm-6224.test
@@ -4,17 +4,24 @@
# verify that huge frms are rejected during creation, not on opening
#
--source include/have_partition.inc
+set global max_allowed_packet=1024*1024*10;
+connect con1,localhost,root;
-let $n=5646;
+let $n=8164;
let $a=create table t1 (a int) engine=myisam partition by hash(a) partitions $n (;
dec $n;
while ($n)
{
- let $a=$a partition p01234567890123456789012345678901234567890123456789012345678$n,;
+ let $a=$a partition p01234567890123456789012345678901234567890123456789012345678$n COMMENT 'partition p01234567890123456789012345678901234567890123456789012345678$n',;
dec $n;
}
--disable_query_log
--error ER_TABLE_DEFINITION_TOO_BIG
eval $a partition foo);
+--enable_query_log
+
+connection default;
+disconnect con1;
+set global max_allowed_packet=default;
diff --git a/mysql-test/t/innodb_ext_key.test b/mysql-test/t/innodb_ext_key.test
index 9f3a89ff948..d53deb46348 100644
--- a/mysql-test/t/innodb_ext_key.test
+++ b/mysql-test/t/innodb_ext_key.test
@@ -693,5 +693,111 @@ drop table t1, t2;
set optimizer_switch=@save_optimizer_switch;
+--echo #
+--echo # MDEV-10360: Extended keys: index properties depend on index order
+--echo #
+create table t0 (a int);
+insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+
+create table t1 (
+ index_id bigint(20) unsigned NOT NULL,
+ index_class varchar(265) COLLATE latin1_general_ci DEFAULT NULL ,
+ index_object_id int(10) unsigned NOT NULL DEFAULT '0' ,
+ index_date_updated int(10) unsigned DEFAULT NULL ,
+
+ PRIMARY KEY (index_id),
+ KEY object (index_class(181),index_object_id),
+ KEY index_date_updated (index_date_updated)
+) engine=innodb;
+
+create table t2 (
+ index_id bigint(20) unsigned NOT NULL,
+ index_class varchar(265) COLLATE latin1_general_ci DEFAULT NULL ,
+ index_object_id int(10) unsigned NOT NULL DEFAULT '0' ,
+ index_date_updated int(10) unsigned DEFAULT NULL ,
+
+ PRIMARY KEY (index_id),
+ KEY index_date_updated (index_date_updated),
+ KEY object (index_class(181),index_object_id)
+) engine=innodb;
+
+insert into t1 select
+ @a:=A.a + 10*B.a + 100*C.a,
+ concat('val-', @a),
+ 123456,
+ A.a + 10*B.a
+from
+ t0 A, t0 B, t0 C;
+
+insert into t2 select * from t1;
+
+--echo # This must have the same query plan as the query below it:
+--echo # type=range, key=index_date_updated, key_len=13
+--replace_column 9 #
+explain
+select * from t1 force index(index_date_updated)
+where index_date_updated= 10 and index_id < 800;
+
+--echo # This used to work from the start:
+--replace_column 9 #
+explain
+select * from t2 force index(index_date_updated)
+where index_date_updated= 10 and index_id < 800;
+
+drop table t0,t1,t2;
+
+
+--echo #
+--echo # MDEV-11196: Error:Run-Time Check Failure #2 - Stack around the variable 'key_buff'
+--echo # was corrupted, server crashes in opt_sum_query
+
+set @save_innodb_file_format= @@innodb_file_format;
+set @save_innodb_large_prefix= @@innodb_large_prefix;
+set global innodb_file_format = BARRACUDA;
+set global innodb_large_prefix = ON;
+
+CREATE TABLE t1 (
+ pk INT,
+ f1 VARCHAR(3),
+ f2 VARCHAR(1024),
+ PRIMARY KEY (pk),
+ KEY(f2)
+) ENGINE=InnoDB CHARSET utf8 ROW_FORMAT= DYNAMIC;
+
+INSERT INTO t1 VALUES (1,'foo','abc'),(2,'bar','def');
+SELECT MAX(t2.pk) FROM t1 t2 INNER JOIN t1 t3 ON t2.f1 = t3.f1 WHERE t2.pk <= 4;
+drop table t1;
+
+CREATE TABLE t1 (
+ pk1 INT,
+ pk2 INT,
+ f1 VARCHAR(3),
+ f2 VARCHAR(1021),
+ PRIMARY KEY (pk1,pk2),
+ KEY(f2)
+) ENGINE=InnoDB CHARSET utf8 ROW_FORMAT= DYNAMIC;
+
+INSERT INTO t1 VALUES (1,2,'2','abc'),(2,3,'3','def');
+explain
+select * from t1 force index(f2) where pk1 <= 5 and pk2 <=5 and f2 = 'abc' and f1 <= '3';
+drop table t1;
+
+CREATE TABLE t1 (
+f2 INT,
+pk2 INT,
+f1 VARCHAR(3),
+pk1 VARCHAR(1000),
+PRIMARY KEY (pk1,pk2),
+KEY k1(pk1,f2)
+) ENGINE=InnoDB CHARSET utf8 ROW_FORMAT= DYNAMIC;
+INSERT INTO t1 VALUES (1,2,'2','abc'),(2,3,'3','def');
+explain
+select * from t1 force index(k1) where f2 <= 5 and pk2 <=5 and pk1 = 'abc' and f1 <= '3';
+drop table t1;
+
+set optimizer_switch=@save_ext_key_optimizer_switch;
+set global innodb_file_format = @save_innodb_file_format;
+set global innodb_large_prefix = @save_innodb_large_prefix;
+
set optimizer_switch=@save_ext_key_optimizer_switch;
SET SESSION STORAGE_ENGINE=DEFAULT;
diff --git a/mysql-test/t/mysql.test b/mysql-test/t/mysql.test
index 60dc16bfac7..a8e0a0094e4 100644
--- a/mysql-test/t/mysql.test
+++ b/mysql-test/t/mysql.test
@@ -656,3 +656,25 @@ show create table "a1\""b1";
select * from "a1\""b1";
drop table "a1\""b1";
set sql_mode=default;
+
+#
+# mysql --local-infile
+#
+--let $ldli = load data local infile '$MYSQLTEST_VARDIR/tmp/bug.sql' into table test.t1;
+create table t1 (a text);
+--exec $MYSQL -e "$ldli"
+select count(*) from t1; truncate table t1;
+--exec $MYSQL --enable-local-infile -e "$ldli"
+select count(*) from t1; truncate table t1;
+--error 1
+--exec $MYSQL --disable-local-infile -e "$ldli"
+select count(*) from t1; truncate table t1;
+--error 1
+--exec $MYSQL -e "/*q*/$ldli"
+select count(*) from t1; truncate table t1;
+--exec $MYSQL --enable-local-infile -e "/*q*/$ldli"
+select count(*) from t1; truncate table t1;
+--error 1
+--exec $MYSQL --disable-local-infile -e "/*q*/$ldli"
+select count(*) from t1; truncate table t1;
+drop table t1;
diff --git a/mysql-test/t/mysqldump.test b/mysql-test/t/mysqldump.test
index 784e65af3f5..7a410ccce15 100644
--- a/mysql-test/t/mysqldump.test
+++ b/mysql-test/t/mysqldump.test
@@ -1806,7 +1806,7 @@ show create event ee1;
## prove three works (with spaces and tabs on the end)
# start with one from the previous restore
-create event ee2 on schedule at '2018-12-31 21:01:23' do set @a=5;
+create event ee2 on schedule at '2030-12-31 21:01:22' do set @a=5;
create event ee3 on schedule at '2030-12-31 22:01:23' do set @a=5;
show events;
--exec $MYSQL_DUMP --events second > $MYSQLTEST_VARDIR/tmp/bug16853-2.sql
diff --git a/mysql-test/t/partition.test b/mysql-test/t/partition.test
index 1c8cd0375d6..b6a5db2db7c 100644
--- a/mysql-test/t/partition.test
+++ b/mysql-test/t/partition.test
@@ -2897,3 +2897,64 @@ EXECUTE stmt;
DEALLOCATE PREPARE stmt;
DROP TABLE t1;
+--echo #
+--echo # MDEV-17032: Estimates are higher for partitions of a table with @@use_stat_tables= PREFERABLY
+--echo #
+
+create table t0(a int);
+insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+
+create table t1(a int);
+insert into t1 select A.a + B.a* 10 + C.a * 100 from t0 A, t0 B, t0 C;
+
+
+create table t2 (
+ part_key int,
+ a int,
+ b int
+) partition by list(part_key) (
+ partition p0 values in (0),
+ partition p1 values in (1),
+ partition p2 values in (2),
+ partition p3 values in (3),
+ partition p4 values in (4)
+);
+insert into t2
+select mod(a,5), a/100, mod(a,5) from t1;
+
+set @save_use_stat_tables= @@use_stat_tables;
+set @save_optimizer_use_condition_selectivity=@@optimizer_use_condition_selectivity;
+--echo #
+--echo # Tests using stats provided by the storage engine
+--echo #
+explain extended select * from t2 where part_key=1;
+explain partitions select * from t2 where part_key=1;
+explain extended select * from t2 where part_key in (1,2);
+explain partitions select * from t2 where part_key in (1,2);
+explain extended select * from t2 where b=5;
+explain partitions select * from t2 where b=5;
+explain extended select * from t2 partition(p0) where b=1;
+
+
+set @save_histogram_size=@@histogram_size;
+set @@histogram_size=100;
+set @@use_stat_tables= PREFERABLY;
+set @@optimizer_use_condition_selectivity=4;
+analyze table t2;
+--echo #
+--echo # Tests using EITS
+--echo #
+--echo # filtered should be 100
+explain extended select * from t2 where part_key=1;
+explain partitions select * from t2 where part_key=1;
+--echo # filtered should be 100
+explain extended select * from t2 where part_key in (1,2);
+explain partitions select * from t2 where part_key in (1,2);
+explain extended select * from t2 where b=5;
+explain partitions select * from t2 where b=5;
+explain extended select * from t2 partition(p0) where b=1;
+
+set @@use_stat_tables= @save_use_stat_tables;
+set @@optimizer_use_condition_selectivity= @save_optimizer_use_condition_selectivity;
+set @@histogram_size= @save_histogram_size;
+drop table t0,t1,t2;
diff --git a/mysql-test/t/partition_innodb.test b/mysql-test/t/partition_innodb.test
index f6faa4cb0e6..5674a889023 100644
--- a/mysql-test/t/partition_innodb.test
+++ b/mysql-test/t/partition_innodb.test
@@ -996,3 +996,33 @@ SELECT b FROM t1 WHERE b = 0;
SELECT b FROM t1 WHERE b = 0;
--disconnect con1
DROP TABLE t1;
+
+--echo #
+--echo # MDEV-11167: InnoDB: Warning: using a partial-field key prefix
+--echo # in search, results in assertion failure or "Can't find record" error
+--echo #
+
+CREATE TABLE t1 (a INT) ENGINE=InnoDB;
+CREATE TABLE t2 (b INT, c INT, KEY(b)) ENGINE=InnoDB PARTITION BY HASH(c) PARTITIONS 2;
+CREATE ALGORITHM = MERGE VIEW v AS SELECT a, b FROM t1 STRAIGHT_JOIN t2 WHERE b = 'foo' WITH CHECK OPTION;
+
+INSERT INTO t1 VALUES (1),(2);
+INSERT IGNORE INTO t2 VALUES (2,2),('three',3),(4,4);
+UPDATE v SET a = NULL;
+
+DROP view v;
+DROP TABLE t1, t2;
+
+SET @save_isp=@@innodb_stats_persistent;
+SET GLOBAL innodb_stats_persistent= ON;
+
+CREATE TABLE t (f1 INT, f2 INT, KEY(f2)) ENGINE=InnoDB PARTITION BY HASH (f1) PARTITIONS 2;
+INSERT IGNORE INTO t VALUES (NULL,0),(NULL,0),(0,21),(4,0),(1,8),(5,66);
+CREATE ALGORITHM=MERGE VIEW v AS SELECT t1.* FROM t t1 JOIN t t2 WHERE t1.f1 < t2.f2 WITH LOCAL CHECK OPTION;
+--error ER_VIEW_CHECK_FAILED
+UPDATE v SET f2 = NULL;
+
+SET GLOBAL innodb_stats_persistent= @save_isp;
+DROP view v;
+DROP TABLE t;
+
diff --git a/mysql-test/t/range_innodb.test b/mysql-test/t/range_innodb.test
index f76794814ef..a17ef3f1146 100644
--- a/mysql-test/t/range_innodb.test
+++ b/mysql-test/t/range_innodb.test
@@ -3,6 +3,7 @@
--echo #
--source include/have_innodb.inc
+--source include/have_debug.inc
--disable_warnings
drop table if exists t0, t1, t2;
@@ -45,3 +46,44 @@ explain select * from t0 left join t2 on t2.a <t0.a and t2.b between 50 and 250;
drop table t0,t1,t2;
+CREATE TABLE t1 (
+ pk INT PRIMARY KEY, f1 INT, f2 CHAR(1), f3 CHAR(1),
+ KEY(f1), KEY(f2)
+) ENGINE=InnoDB;
+
+INSERT INTO t1 VALUES
+(1,4,'v',NULL),(2,6,'v',NULL),(3,7,'c',NULL),(4,1,'e',NULL),(5,0,'x',NULL),
+(6,7,'i',NULL),(7,7,'e',NULL),(8,1,'p',NULL),(9,7,'s',NULL),(10,1,'j',NULL),
+(11,5,'z',NULL),(12,2,'c',NULL),(13,0,'a',NULL),(14,1,'q',NULL),(15,8,'y',NULL),
+(16,1,'m',NULL),(17,1,'r',NULL),(18,9,'v',NULL),(19,1,'n',NULL);
+
+CREATE TABLE t2 (f4 INT, f5 CHAR(1)) ENGINE=InnoDB;
+INSERT INTO t2 VALUES (4,'q'),(NULL,'j');
+
+SELECT * FROM t1 AS t1_1, t1 AS t1_2, t2
+WHERE f5 = t1_2.f2 AND ( t1_1.f1 = 103 AND t1_1.f2 = 'o' OR t1_1.pk < f4 );
+drop table t1,t2;
+
+--echo #
+--echo # MDEV-14440: Server crash in in handler::ha_external_lock or Assertion `inited==RND'
+--echo # failed in handler::ha_rnd_end upon SELECT from partitioned table
+--echo #
+
+set @optimizer_switch_save= @@optimizer_switch;
+set optimizer_switch='index_merge_sort_intersection=off';
+create table t0 (a int)engine=innodb;
+insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+create table t1 (
+ a int, b int, c int,
+ key(a),key(b),key(c)
+)engine=innodb;
+insert into t1
+select A.a+10*B.a, A.a+10*B.a, A.a+10*B.a+100*C.a
+from t0 A, t0 B, t0 C, t0 D where D.a<5;
+set @@global.debug_dbug="+d,ha_index_init_fail";
+explain select * from t1 where a=10 and b=10;
+--error ER_TABLE_DEF_CHANGED
+select * from t1 where a=10 and b=10;
+DROP TABLE t0,t1;
+set @@global.debug_dbug="-d";
+set @@optimizer_switch= @optimizer_switch_save;
diff --git a/mysql-test/t/read_only.test b/mysql-test/t/read_only.test
index 691c4104148..de5b548142a 100644
--- a/mysql-test/t/read_only.test
+++ b/mysql-test/t/read_only.test
@@ -307,6 +307,22 @@ flush privileges;
drop database mysqltest_db1;
set global read_only= @start_read_only;
+--echo #
+--echo # MDEV-16987 - ALTER DATABASE possible in read-only mode
+--echo #
+GRANT ALTER ON test1.* TO user1@localhost;
+CREATE DATABASE test1;
+SET GLOBAL read_only=1;
+change_user user1;
+--error ER_OPTION_PREVENTS_STATEMENT
+ALTER DATABASE test1 CHARACTER SET utf8;
+change_user root;
+SET GLOBAL read_only=0;
+DROP DATABASE test1;
+DROP USER user1@localhost;
+USE test;
+
+--echo # End of 5.5 tests
--echo #
--echo # WL#5968 Implement START TRANSACTION READ (WRITE|ONLY);
@@ -315,10 +331,6 @@ set global read_only= @start_read_only;
--echo #
--echo # Test interaction with read_only system variable.
---disable_warnings
-DROP TABLE IF EXISTS t1;
---enable_warnings
-
CREATE TABLE t1(a INT);
INSERT INTO t1 VALUES (1), (2);
@@ -369,3 +381,4 @@ DROP TABLE t1;
# Wait till all disconnects are completed
--source include/wait_until_count_sessions.inc
+--echo # End of 10.0 tests
diff --git a/mysql-test/t/row-checksum.test b/mysql-test/t/row-checksum.test
index 920a2384aa8..6b79827d066 100644
--- a/mysql-test/t/row-checksum.test
+++ b/mysql-test/t/row-checksum.test
@@ -60,3 +60,20 @@ checksum table t1;
checksum table t1 quick;
checksum table t1 extended;
drop table t1;
+
+--echo #
+--echo # MDEV-17085: CHECKSUM TABLE EXTENDED does not work correctly
+--echo #
+
+CREATE TABLE t1 ( c1 int NOT NULL, c2 int NOT NULL, c4 varchar(20), c5 varchar(20), c6 varchar(20), c7 varchar(20), c8 varchar(20), c9 varchar(20), c10 varchar(20), c11 varchar(20), c12 varchar(20), c13 varchar(20), c14 varchar(20), c15 varchar(20), c16 varchar(20), c19 int NOT NULL, c20 int NOT NULL, c21 varchar(20), c22 VARCHAR(20), c23 varchar(20));
+
+insert into t1 values (5,0,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,0,0,"dog",NULL,NULL);
+--echo # Important is that checksum is different from following
+CHECKSUM TABLE t1 EXTENDED;
+UPDATE t1 SET c21='cat' WHERE c1=5;
+--echo # Important is that checksum is different from above
+CHECKSUM TABLE t1 EXTENDED;
+
+drop table t1;
+
+--echo # End of 5.5 tests
diff --git a/mysql-test/t/stat_tables.test b/mysql-test/t/stat_tables.test
index a0b2a22b946..37106f4ec7a 100644
--- a/mysql-test/t/stat_tables.test
+++ b/mysql-test/t/stat_tables.test
@@ -368,3 +368,20 @@ explain
SELECT * FROM INFORMATION_SCHEMA.PROFILING, mysql.user;
set @@optimizer_use_condition_selectivity=@save_optimizer_use_condition_selectivity;
set use_stat_tables=@save_use_stat_tables;
+
+--echo #
+--echo # MDEV-17734: AddressSanitizer: use-after-poison in create_key_parts_for_pseudo_indexes
+--echo #
+
+set @@use_stat_tables= PREFERABLY;
+set @save_optimizer_use_condition_selectivity= @@optimizer_use_condition_selectivity;
+set @@optimizer_use_condition_selectivity=4;
+set @save_use_stat_tables= @@use_stat_tables;
+create table t1 (a int, b int);
+insert into t1(a,b) values (1,2),(1,3),(1,4),(1,5),(2,6),(2,7),(3,8),(3,9),(3,9),(4,10);
+
+analyze table t1 persistent for columns (a) indexes ();
+select * from t1 where a=1 and b=3;
+set @@optimizer_use_condition_selectivity= @save_optimizer_use_condition_selectivity;
+set use_stat_tables=@save_use_stat_tables;
+drop table t1;
diff --git a/mysql-test/t/subselect2.test b/mysql-test/t/subselect2.test
index ae210b865a2..73b0e77ade6 100644
--- a/mysql-test/t/subselect2.test
+++ b/mysql-test/t/subselect2.test
@@ -411,3 +411,23 @@ insert into t3 select a from t1;
select null in (select a from t1 where a < out3.a union select a from t2 where
(select a from t3) +1 < out3.a+1) from t3 out3;
drop table t1, t2, t3;
+
+#
+# Bug #28499924: INCORRECT BEHAVIOR WITH UNION IN SUBQUERY
+#
+CREATE TABLE t1(
+ q11 int, q12 int, q13 int, q14 int, q15 int, q16 int, q17 int, q18 int, q19 int,
+ q21 int, q22 int, q23 int, q24 int, q25 int, q26 int, q27 int, q28 int, q29 int,
+ f1 int
+);
+CREATE TABLE t2(f2 int, f21 int, f3 timestamp, f4 int, f5 int, f6 int);
+INSERT INTO t1 (f1) VALUES (1),(1),(2),(2);
+INSERT INTO t2 VALUES (1,1,"2004-02-29 11:11:11",0,0,0), (2,2,"2004-02-29 11:11:11",0,0,0);
+SELECT f1,
+ (SELECT t.f21 from t2 t where max(
+ q11+q12+q13+q14+q15+q16+q17+q18+q19+
+ q21+q22+q23+q24+q25+q26+q27+q28+q29) = t.f2 UNION
+ SELECT t.f3 FROM t2 AS t WHERE t1.f1=t.f2 AND t.f3=MAX(t1.f1) UNION
+ SELECT 1 LIMIT 1) AS test
+ FROM t1 GROUP BY f1;
+DROP TABLE t1,t2;
diff --git a/mysql-test/t/subselect_mat.test b/mysql-test/t/subselect_mat.test
index 5211f35b48b..66a6cc97acb 100644
--- a/mysql-test/t/subselect_mat.test
+++ b/mysql-test/t/subselect_mat.test
@@ -267,3 +267,16 @@ explain
SELECT * FROM t2 WHERE f IN ( SELECT LEFT('foo',0) FROM t1 ORDER BY 1 );
SELECT * FROM t2 WHERE f IN ( SELECT LEFT('foo',0) FROM t1 ORDER BY 1 );
DROP TABLE t1, t2;
+
+--echo #
+--echo # MDEV-18255: Server crashes in Bitmap<64u>::intersect
+--echo #
+create table t1 (v1 varchar(1)) engine=myisam ;
+create table t2 (v1 varchar(1)) engine=myisam ;
+
+explain
+select 1 from t1 where exists
+ (select 1 from t1 where t1.v1 in (select t2.v1 from t2 having t2.v1 < 'j')) ;
+select 1 from t1 where exists
+ (select 1 from t1 where t1.v1 in (select t2.v1 from t2 having t2.v1 < 'j')) ;
+drop table t1,t2;
diff --git a/mysql-test/t/union.test b/mysql-test/t/union.test
index 240115837c7..8ef8f7c4017 100644
--- a/mysql-test/t/union.test
+++ b/mysql-test/t/union.test
@@ -1437,3 +1437,38 @@ SET @advertAcctId = 1000003;
select @advertAcctId as a from dual union all select 1.0 from dual;
--echo End of 5.5 tests
+
+--echo #
+--echo # MDEV-13784: query causes seg fault
+--echo #
+
+CREATE TABLE t1 (`bug_id` int NOT NULL PRIMARY KEY, `product_id` int NOT NULL);
+INSERT INTO t1 VALUES (45199,1184);
+
+CREATE TABLE t2 (`product_id` int NOT NULL,`userid` int NOT NULL, PRIMARY KEY (`product_id`,`userid`));
+INSERT INTO t2 VALUES (1184,103),(1184,624),(1184,1577),(1184,1582);
+
+CREATE TABLE t3 (`id` int NOT NULL PRIMARY KEY,`name` varchar(64));
+
+
+CREATE TABLE t4 ( `userid` int NOT NULL PRIMARY KEY, `login_name` varchar(255));
+INSERT INTO t4 VALUES (103,'foo'),(624,'foo'),(1577,'foo'),(1582,'foo');
+CREATE TABLE t5 (`id` int NOT NULL PRIMARY KEY, `name` varchar(64));
+
+explain select
+(
+ select login_name from t4 where userId = (
+ select userid from t2 where product_id = t1.product_id
+ union
+ select userid from t2 where product_id = (
+ select id from t5 where name = (select name from t3 where id = t1.product_id)) limit 1 )
+) as x from t1 where (t1.bug_id=45199);
+select
+(
+ select login_name from t4 where userId = (
+ select userid from t2 where product_id = t1.product_id
+ union
+ select userid from t2 where product_id = (
+ select id from t5 where name = (select name from t3 where id = t1.product_id)) limit 1 )
+) as x from t1 where (t1.bug_id=45199);
+drop table t1, t2, t3, t4, t5;
diff --git a/mysql-test/unstable-tests b/mysql-test/unstable-tests
index 6725e2ae9ff..10f15f3ddc4 100644
--- a/mysql-test/unstable-tests
+++ b/mysql-test/unstable-tests
@@ -23,93 +23,72 @@
#
##############################################################################
-# Based on 10.0 6ced789186fabd7dce97b3d6d171ff9e5ddc5f48
+# Based on bb-10.0-release 1522ee2949ae304ad9092894896a6272dc08bb39
main.alter_table : Modified in 10.0.37
-main.assign_key_cache : Added in 10.0.36
-main.assign_key_cache_debug : Added in 10.0.36
-main.auto_increment : Modified in 10.0.36
-main.bootstrap : Modified in 10.0.36
-main.connect_debug : Added in 10.0.36
+main.auto_increment_ranges_innodb : Modified in 10.0.38
+main.bigint : Modified in 10.0.38
main.count_distinct2 : MDEV-11768 - timeout
main.create_delayed : MDEV-10605 - failed with timeout
main.create_or_replace : Modified in 10.0.37
-main.ctype_binary : Modified in 10.0.36
-main.ctype_eucjpms : Modified in 10.0.36
-main.ctype_euckr : Modified in 10.0.36
-main.ctype_gbk : Modified in 10.0.36
-main.ctype_latin1 : Modified in 10.0.36
+main.ctype_latin1 : Modified in 10.0.38
main.ctype_uca : Modified in 10.0.37
-main.ctype_ucs : Modified in 10.0.36
-main.ctype_ujis : Modified in 10.0.36
-main.ctype_utf16le : Modified in 10.0.36
-main.ctype_utf16 : Modified in 10.0.36
-main.ctype_utf32 : Modified in 10.0.36
-main.ctype_utf8mb4 : Modified in 10.0.36
-main.ctype_utf8 : Modified in 10.0.36
main.debug_sync : MDEV-10607 - internal error
-main.derived : Modified in 10.0.36
main.derived_opt : MDEV-11768 - timeout; modified in 10.0.37
+main.events_bugs : MDEV-12892 - Server crash
main.events_slowlog : MDEV-12821 - wrong result
main.func_concat : Modified in 10.0.37
+main.func_group_innodb : Modified in 10.0.38
main.func_isnull : Modified in 10.0.37
main.func_time : Modified in 10.0.37
main.gis : MDEV-13411 - wrong result on P8; modified in 10.0.37
main.grant : Modified in 10.0.37
-main.grant2 : Modified in 10.0.36
-main.grant_not_windows : Added in 10.0.36
main.group_min_max : Modified in 10.0.37
-main.having : Modified in 10.0.36
main.host_cache_size_functionality : MDEV-10606 - sporadic failure on shutdown
+main.huge_frm-6224 : Modified in 10.0.38
main.index_intersect_innodb : MDEV-10643 - failed with timeout
main.index_merge_innodb : MDEV-7142 - wrong result
+main.index_merge_myisam : Modified in 10.0.38
+main.innodb_ext_key : Modified in 10.0.38
main.innodb_mysql_lock : MDEV-7861 - sporadic lock detection failure
-main.insert_select : Modified in 10.0.36
main.join : Modified in 10.0.37
-main.join_cache : Modified in 10.0.36
-main.join_outer : Modified in 10.0.36
main.kill_processlist-6619 : MDEV-10793 - wrong result
-main.limit : Modified in 10.0.36
main.log_tables-big : MDEV-13408 - wrong result
main.lowercase_fs_off : Modified in 10.0.37
main.mdev-504 : MDEV-10607 - sporadic "can't connect"
main.mdev375 : MDEV-10607 - sporadic "can't connect"
main.merge : MDEV-10607 - sporadic "can't connect"
-main.myisam : Modified in 10.0.36
-main.mysql : Modified in 10.0.36
-main.mysql_cp932 : Modified in 10.0.36
-main.mysqldump : Modified in 10.0.36
+main.mysql : Modified in 10.0.38
+main.mysqldump : Modified in 10.0.38
main.mysqlhotcopy_myisam : MDEV-10995 - test hangs on debug build
-main.mysqlslap : Modified in 10.0.36
main.mysqltest : MDEV-9269 - fails on Alpha
main.mysql_client_test_nonblock : MDEV-15096 - exec failed
main.order_by_zerolength-4285 : Modified in 10.0.37
+main.partition : Modified in 10.0.38
main.partition_explicit_prune : Modified in 10.0.37
+main.partition_innodb : Modified in 10.0.38
main.ps : MDEV-11017 - sporadic wrong Prepared_stmt_count
-main.rename : Modified in 10.0.36
main.query_cache_debug : MDEV-15281 - resize or similar command in progress
+main.range_innodb : Modified in 10.0.38
+main.read_only : Modified in 10.0.38
+main.row-checksum : Modified in 10.0.38
main.selectivity : Modified in 10.0.37
main.show_explain : MDEV-10674 - wrong result
main.sp : Modified in 10.0.37
-main.sp-innodb : Modified in 10.0.36
main.sp_notembedded : MDEV-10607 - internal error
main.sp-security : MDEV-10607 - sporadic "can't connect"; modified in 10.0.37
-main.statistics : Modified in 10.0.36
-main.statistics_close : Added in 10.0.36
-main.stat_tables : Modified in 10.0.37
+main.stat_tables : Modified in 10.0.38
main.stat_tables_par_innodb : MDEV-14155 - wrong rounding
-main.subselect : Modified in 10.0.36
+main.subselect2 : Modified in 10.0.38
main.subselect_extra_no_semijoin : Modified in 10.0.37
main.subselect_innodb : MDEV-10614 - sporadic wrong results
-main.subselect_sj : Modified in 10.0.36
-main.subselect_sj_mat : Modified in 10.0.36
-main.subselect_sj2_mat : Modified in 10.0.36
-main.subselect4 : Modified in 10.0.36
+main.subselect_mat : Modified in 10.0.38
main.tc_heuristic_recover : MDEV-15200 - wrong error on mysqld_stub_cmd
main.type_datetime : Modified in 10.0.37
main.type_float : Modified in 10.0.37
+main.type_newdecimal : Modified in 10.0.38
main.type_year : Modified in 10.0.37
-main.union : Modified in 10.0.36
+main.union : Modified in 10.0.38
main.xa : MDEV-11769 - lock wait timeout
#----------------------------------------------------------------
@@ -124,11 +103,11 @@ archive-test_sql_discovery.discover : MDEV-16817 - Table marked as crashed
#----------------------------------------------------------------
binlog.binlog_commit_wait : MDEV-10150 - Error: too much time elapsed
-binlog.binlog_tmp_table_row : Added in 10.0.36
binlog.binlog_xa_recover : MDEV-8517 - Extra checkpoint
#----------------------------------------------------------------
+connect.part_table : Modified in 10.0.38
connect.zip : MDEV-13884 - wrong result
#----------------------------------------------------------------
@@ -137,16 +116,14 @@ engines/rr_trx.* : MDEV-10998 - tests not maintained
#----------------------------------------------------------------
-federated.assisted_discovery : Include file modified in 10.0.36
-federated.federatedx : MDEV-10617 - Wrong checksum, timeouts; include file modified in 10.0.36
+federated.federatedx : MDEV-10617 - Wrong checksum, timeouts
federated.federated_bug_35333 : MDEV-13410 - Wrong result
federated.federated_innodb : MDEV-10617, MDEV-10417 - Wrong checksum, timeouts, fails on Mips
-federated.federated_partition : MDEV-10417 - Fails on Mips; include file modified in 10.0.36
-federated.federated_transactions : MDEV-10617, MDEV-10417 - Wrong checksum, timeouts, fails on Mips; include file modified in 10.0.36
+federated.federated_partition : MDEV-10417 - Fails on Mips
+federated.federated_transactions : MDEV-10617, MDEV-10417 - Wrong checksum, timeouts, fails on Mips
#----------------------------------------------------------------
-funcs_1.is_engines_federated : Include file modified in 10.0.36
funcs_1.memory_views : MDEV-11773 - timeout
funcs_1.processlist_val_ps : MDEV-12175 - Wrong result
funcs_1.processlist_val_no_prot : MDEV-11223 - Wrong result
@@ -155,50 +132,43 @@ funcs_2/charset.* : MDEV-10999 - test not maintained
#----------------------------------------------------------------
-handler.ps : Added in 10.0.36
-
-#----------------------------------------------------------------
-
-heap.heap_auto_increment : Modified in 10.0.36
heap.heap_btree : Modified in 10.0.37
#----------------------------------------------------------------
+innodb.alter_candidate_key : Added in 10.0.38
innodb.alter_inplace_perfschema : Added in 10.0.37
-innodb.alter_partitioned_xa : Added in 10.0.36
innodb.binlog_consistent : MDEV-10618 - Server fails to start
innodb.foreign-keys : Modified in 10.0.37
-innodb.foreign_key : Added in 10.0.37
+innodb.foreign_key : Modified in 10.0.38
innodb.group_commit_crash : MDEV-11770 - checksum mismatch
innodb.group_commit_crash_no_optimize_thread : MDEV-11770 - checksum mismatch
-innodb.innodb-alter : Modified in 10.0.36
+innodb.innodb_28867993 : Added in 10.0.38
+innodb.innodb-alter : Modified in 10.0.38
innodb.innodb-alter-debug : Modified in 10.0.37
innodb.innodb-alter-table : MDEV-10619 - Testcase timeout
innodb.innodb_bug30423 : MDEV-7311 - Wrong number of rows in the plan
innodb.innodb_bug48024 : MDEV-14352 - Assertion failure
-innodb.innodb_bug54044 : Modified in 10.0.36
-innodb.innodb-mdev7046 : Modified in 10.0.36
+innodb.innodb-index : Modified in 10.0.38
innodb.innodb_monitor : MDEV-10939 - Testcase timeout
-innodb.innodb-wl5522 : Modified in 10.0.36
+innodb.innodb_simulate_comp_failures : MDEV-18417 - ASAN failures
+innodb.innodb-table-online : Modified in 10.0.38
+innodb.innodb-virtual-columns : Modified in 10.0.38
innodb.log_file_size : MDEV-15668 - Not found pattern
innodb.recovery_shutdown : MDEV-15671 - Warning: database page corruption
-innodb.rename_table : Added in 10.0.36
innodb.table_definition_cache_debug : MDEV-14206 - Unexpected warning
innodb.table_flags : Modified in 10.0.37
innodb.xa_recovery : MDEV-15279 - mysqld got exception
-innodb_fts.basic : Added in 10.0.36
innodb_fts.fts_kill_query : Modified in 10.0.37
innodb_fts.innodb-fts-fic : MDEV-14154 - Assertion failure
innodb_fts.innodb_fts_misc_debug : MDEV-14156 - Unexpected warning
-innodb_fts.sync_ddl : Added in 10.0.36
#----------------------------------------------------------------
-maria.alter : Modified in 10.0.36
maria.create : Added in 10.0.37
maria.fulltext2 : Added in 10.0.37
-maria.lock : Modified in 10.0.36
+maria.insert_select : MDEV-12757 - Timeout
maria.maria : MDEV-14430 - Wrong result; modified in 10.0.37
#----------------------------------------------------------------
@@ -221,16 +191,15 @@ multi_source.status_vars : MDEV-4632 - failed while waiting for Slave_received_h
#----------------------------------------------------------------
-parts.alter_data_directory_innodb : Added in 10.0.36
parts.partition_auto_increment_archive : MDEV-16491 - Table marked as crashed
parts.partition_auto_increment_maria : MDEV-14430 - wrong result
parts.partition_exch_qa_10 : MDEV-11765 - wrong result
-parts.truncate_locked : Added in 10.0.36
parts.update_and_cache : Added in 10.0.37
#----------------------------------------------------------------
perfschema.connect_attrs : MDEV-17283 - Wrong result
+perfschema.dml_setup_instruments : Modified in 10.0.38
perfschema.func_file_io : MDEV-5708 - fails for s390x
perfschema.func_mutex : MDEV-5708 - fails for s390x
perfschema.hostcache_ipv6_ssl : MDEV-10696 - crash on shutdown
@@ -242,17 +211,17 @@ perfschema_stress.* : MDEV-10996 - tests not maintained
#----------------------------------------------------------------
plugins.feedback_plugin_send : MDEV-7932 - ssl failed for url, MDEV-11118 - wrong result
-plugins.server_audit : MDEV-9562 - crashes on sol10-sparc; modified in 10.0.36
+plugins.server_audit : MDEV-9562 - crashes on sol10-sparc
plugins.thread_pool_server_audit : MDEV-9562 - crashes on sol10-sparc, MDEV-14295 - wrong result
#----------------------------------------------------------------
roles.create_and_grant_role : MDEV-11772 - wrong result
+roles.flush_roles-17898 : Added in 10.0.38
#----------------------------------------------------------------
rpl.last_insert_id : MDEV-10625 - warnings in error log
-rpl.rename : Added in 10.0.36
rpl.rpl_15919 : Added in 10.0.37
rpl.rpl_auto_increment : MDEV-10417 - Fails on Mips
rpl.rpl_auto_increment_bug45679 : MDEV-10417 - Fails on Mips
@@ -263,6 +232,7 @@ rpl.rpl_foreign_key_innodb : Modified in 10.0.37
rpl.rpl_gtid_crash : MDEV-9501 - Warning: failed registering on master
rpl.rpl_gtid_stop_start : MDEV-10629 - Crash on shutdown
rpl.rpl_gtid_until : MDEV-10625 - warnings in error log
+rpl.rpl_idempotency : Modified in 10.0.38
rpl.rpl_innodb_bug30888 : MDEV-10417 - Fails on Mips
rpl.rpl_insert : MDEV-9329 - Fails on Ubuntu/s390x
rpl.rpl_insert_delayed : MDEV-9329 - Fails on Ubuntu/s390x
@@ -270,14 +240,14 @@ rpl.rpl_insert_id_pk : MDEV-16567 - Assertion failure
rpl.rpl_invoked_features : MDEV-10417 - Fails on Mips
rpl.rpl_lcase_tblnames_rewrite_db : Added in 10.0.37
rpl.rpl_mdev6020 : MDEV-10417 - Timeouts, fails on Mips
-rpl.rpl_mixed_implicit_commit_binlog : Included file modified in 10.0.36
rpl.rpl_parallel : MDEV-10653 - Timeouts
rpl.rpl_parallel_mdev6589 : MDEV-12979 - Assertion failure
rpl.rpl_parallel_multilevel2 : MDEV-14723 - Timeout
rpl.rpl_parallel_temptable : MDEV-10356 - Crash in close_thread_tables
rpl.rpl_partition_innodb : MDEV-10417 - Fails on Mips
rpl.rpl_row_basic_11bugs : MDEV-12171 - Server failed to start
-rpl.rpl_row_implicit_commit_binlog : Included file modified in 10.0.36
+rpl.rpl_row_big_table_id_32bit : Added in 10.0.38
+rpl.rpl_row_big_table_id_64bit : Added in 10.0.38
rpl.rpl_row_index_choice : MDEV-13409 - Server crash
rpl.rpl_row_lcase_tblnames : Added in 10.0.37
rpl.rpl_row_sp001 : MDEV-9329 - Fails on Ubuntu/s390x
@@ -287,7 +257,7 @@ rpl.rpl_semi_sync_uninstall_plugin : MDEV-7140 - Wrong plugin status
rpl.rpl_show_slave_hosts : MDEV-12171 - Server failed to start
rpl.rpl_skip_replication : MDEV-9268 - Fails with timeout in sync_slave_with_master on Alpha
rpl.rpl_slave_grp_exec : MDEV-10514 - Unexpected deadlock
-rpl.rpl_stm_implicit_commit_binlog : Included file modified in 10.0.36
+rpl.rpl_start_stop_slave : MDEV-13567 - Timeout in sync
rpl.rpl_stm_lcase_tblnames : Added in 10.0.37
rpl.rpl_sync : MDEV-10633 - Database page corruption
rpl.rpl_temporary_error2 : MDEV-10634 - Wrong number of retries
@@ -324,6 +294,7 @@ stress.ddl_innodb : MDEV-10635 - Testcase timeout
sys_vars.autocommit_func2 : MDEV-9329 - Fails on Ubuntu/s390x
sys_vars.innodb_ft_result_cache_limit_32 : Added in 10.0.37
sys_vars.innodb_ft_result_cache_limit_64 : Added in 10.0.37
+sys_vars.table_definition_cache_basic : Modified in 10.0.38
sys_vars.thread_cache_size_func : MDEV-11775 - wrong result
#----------------------------------------------------------------
@@ -345,6 +316,7 @@ tokudb.savepoint-5 : MDEV-15280 - wrong result
tokudb_backup.* : MDEV-11001 - tests don't work
tokudb_bugs.PS-3773 : Added in 10.0.37
+tokudb_bugs.PS-4979 : Added in 10.0.38
tokudb_bugs.alter_table_comment_rebuild_data : Added in 10.0.37
tokudb_bugs.checkpoint_lock : MDEV-10637 - Wrong processlist output
tokudb_bugs.checkpoint_lock_3 : MDEV-10637 - Wrong processlist output
@@ -360,14 +332,13 @@ rpl-tokudb.* : MDEV-14354 - Tests fail with tcmalloc
#----------------------------------------------------------------
-unit.lf : MDEV-12897 - Unexpected return code
+unit.lf : MDEV-18416 - Object was probably modified after being freed
unit.ma_test_loghandler : MDEV-10638 - record read not ok
-unit.my_atomic : MDEV-15670 - Signal 11 thrown
#----------------------------------------------------------------
vcol.not_supported : MDEV-10639 - Testcase timeout
vcol.vcol_keys_innodb : MDEV-10639 - Testcase timeout
-vcol.vcol_misc : MDEV-16651 - Wrong error message; modified in 10.0.36
+vcol.vcol_misc : MDEV-16651 - Wrong error message
#----------------------------------------------------------------
diff --git a/mysys/mf_iocache.c b/mysys/mf_iocache.c
index f356aa91929..bb638c94d18 100644
--- a/mysys/mf_iocache.c
+++ b/mysys/mf_iocache.c
@@ -282,6 +282,10 @@ int init_io_cache(IO_CACHE *info, File file, size_t cachesize,
}
info->inited=info->aio_result.pending=0;
#endif
+ if (type == READ_CACHE || type == WRITE_CACHE || type == SEQ_READ_APPEND)
+ info->myflags|= MY_FULL_IO;
+ else
+ info->myflags&= ~MY_FULL_IO;
DBUG_RETURN(0);
} /* init_io_cache */
diff --git a/mysys/my_file.c b/mysys/my_file.c
index a23ab487d00..23226595b2e 100644
--- a/mysys/my_file.c
+++ b/mysys/my_file.c
@@ -52,10 +52,9 @@ static uint set_max_open_files(uint max_file_limit)
DBUG_PRINT("info", ("rlim_cur: %u rlim_max: %u",
(uint) rlimit.rlim_cur,
(uint) rlimit.rlim_max));
- if ((ulonglong) rlimit.rlim_cur == (ulonglong) RLIM_INFINITY)
- rlimit.rlim_cur = max_file_limit;
- if (rlimit.rlim_cur >= max_file_limit)
- DBUG_RETURN(rlimit.rlim_cur); /* purecov: inspected */
+ if ((ulonglong) rlimit.rlim_cur == (ulonglong) RLIM_INFINITY ||
+ rlimit.rlim_cur >= max_file_limit)
+ DBUG_RETURN(max_file_limit);
rlimit.rlim_cur= rlimit.rlim_max= max_file_limit;
if (setrlimit(RLIMIT_NOFILE, &rlimit))
max_file_limit= old_cur; /* Use original value */
diff --git a/mysys/my_pread.c b/mysys/my_pread.c
index 745cde9ec41..1770843d0ac 100644
--- a/mysys/my_pread.c
+++ b/mysys/my_pread.c
@@ -47,8 +47,7 @@
size_t my_pread(File Filedes, uchar *Buffer, size_t Count, my_off_t offset,
myf MyFlags)
{
- size_t readbytes;
- int error= 0;
+ size_t readbytes, save_count= 0;
DBUG_ENTER("my_pread");
@@ -66,11 +65,10 @@ size_t my_pread(File Filedes, uchar *Buffer, size_t Count, my_off_t offset,
#else
readbytes= pread(Filedes, Buffer, Count, offset);
#endif
- error = (readbytes != Count);
- if (error)
+ if (readbytes != Count)
{
- my_errno= errno ? errno : -1;
+ my_errno= errno;
if (errno == 0 || (readbytes != (size_t) -1 &&
(MyFlags & (MY_NABP | MY_FNABP))))
my_errno= HA_ERR_FILE_TOO_SHORT;
@@ -82,6 +80,18 @@ size_t my_pread(File Filedes, uchar *Buffer, size_t Count, my_off_t offset,
(int) readbytes));
continue; /* Interrupted */
}
+
+ /* Do a read retry if we didn't get enough data on first read */
+ if (readbytes != (size_t) -1 && readbytes != 0 &&
+ (MyFlags & MY_FULL_IO))
+ {
+ Buffer+= readbytes;
+ Count-= readbytes;
+ save_count+= readbytes;
+ offset+= readbytes;
+ continue;
+ }
+
if (MyFlags & (MY_WME | MY_FAE | MY_FNABP))
{
if (readbytes == (size_t) -1)
@@ -97,8 +107,10 @@ size_t my_pread(File Filedes, uchar *Buffer, size_t Count, my_off_t offset,
DBUG_RETURN(MY_FILE_ERROR); /* Return with error */
}
if (MyFlags & (MY_NABP | MY_FNABP))
- DBUG_RETURN(0); /* Read went ok; Return 0 */
- DBUG_RETURN(readbytes); /* purecov: inspected */
+ readbytes= 0; /* Read went ok; Return 0 */
+ else
+ readbytes+= save_count;
+ DBUG_RETURN(readbytes);
}
} /* my_pread */
diff --git a/mysys/my_read.c b/mysys/my_read.c
index 922da5a7e95..58ab9070c8c 100644
--- a/mysys/my_read.c
+++ b/mysys/my_read.c
@@ -35,17 +35,16 @@
size_t my_read(File Filedes, uchar *Buffer, size_t Count, myf MyFlags)
{
- size_t readbytes, save_count;
+ size_t readbytes, save_count= 0;
DBUG_ENTER("my_read");
DBUG_PRINT("my",("fd: %d Buffer: %p Count: %lu MyFlags: %lu",
Filedes, Buffer, (ulong) Count, MyFlags));
- save_count= Count;
if (!(MyFlags & (MY_WME | MY_FAE | MY_FNABP)))
MyFlags|= my_global_flags;
for (;;)
{
- errno= 0; /* Linux, Windows don't reset this on EOF/success */
+ errno= 0; /* Linux, Windows don't reset this on EOF/success */
#ifdef _WIN32
readbytes= my_win_read(Filedes, Buffer, Count);
#else
@@ -61,47 +60,52 @@ size_t my_read(File Filedes, uchar *Buffer, size_t Count, myf MyFlags)
if (readbytes != Count)
{
- my_errno= errno;
- if (errno == 0 || (readbytes != (size_t) -1 &&
- (MyFlags & (MY_NABP | MY_FNABP))))
- my_errno= HA_ERR_FILE_TOO_SHORT;
+ int got_errno= my_errno= errno;
DBUG_PRINT("warning",("Read only %d bytes off %lu from %d, errno: %d",
(int) readbytes, (ulong) Count, Filedes,
- my_errno));
+ got_errno));
+
+ if (got_errno == 0 || (readbytes != (size_t) -1 &&
+ (MyFlags & (MY_NABP | MY_FNABP))))
+ my_errno= HA_ERR_FILE_TOO_SHORT;
- if ((readbytes == 0 || (int) readbytes == -1) && errno == EINTR)
- {
+ if ((readbytes == 0 || (int) readbytes == -1) && got_errno == EINTR)
+ {
DBUG_PRINT("debug", ("my_read() was interrupted and returned %ld",
(long) readbytes));
continue; /* Interrupted */
}
+ /* Do a read retry if we didn't get enough data on first read */
+ if (readbytes != (size_t) -1 && readbytes != 0 &&
+ (MyFlags & MY_FULL_IO))
+ {
+ Buffer+= readbytes;
+ Count-= readbytes;
+ save_count+= readbytes;
+ continue;
+ }
+
if (MyFlags & (MY_WME | MY_FAE | MY_FNABP))
{
if (readbytes == (size_t) -1)
my_error(EE_READ,
MYF(ME_BELL | ME_WAITTANG | (MyFlags & (ME_JUST_INFO | ME_NOREFRESH))),
- my_filename(Filedes),my_errno);
+ my_filename(Filedes), got_errno);
else if (MyFlags & (MY_NABP | MY_FNABP))
my_error(EE_EOFERR,
MYF(ME_BELL | ME_WAITTANG | (MyFlags & (ME_JUST_INFO | ME_NOREFRESH))),
- my_filename(Filedes),my_errno);
+ my_filename(Filedes), got_errno);
}
if (readbytes == (size_t) -1 ||
((MyFlags & (MY_FNABP | MY_NABP)) && !(MyFlags & MY_FULL_IO)))
- DBUG_RETURN(MY_FILE_ERROR); /* Return with error */
- if (readbytes != (size_t) -1 && (MyFlags & MY_FULL_IO))
- {
- Buffer+= readbytes;
- Count-= readbytes;
- continue;
- }
+ DBUG_RETURN(MY_FILE_ERROR); /* Return with error */
}
if (MyFlags & (MY_NABP | MY_FNABP))
- readbytes= 0; /* Ok on read */
- else if (MyFlags & MY_FULL_IO)
- readbytes= save_count;
+ readbytes= 0; /* Ok on read */
+ else
+ readbytes+= save_count;
break;
}
DBUG_RETURN(readbytes);
diff --git a/mysys/safemalloc.c b/mysys/safemalloc.c
index 5d19647c989..08e43d0a80a 100644
--- a/mysys/safemalloc.c
+++ b/mysys/safemalloc.c
@@ -282,8 +282,8 @@ static void warn(const char *format,...)
va_list args;
DBUG_PRINT("error", ("%s", format));
va_start(args,format);
- fflush(stderr);
vfprintf(stderr, format, args);
+ fflush(stderr);
va_end(args);
#ifdef HAVE_BACKTRACE
diff --git a/res b/res
deleted file mode 100644
index e95690277cc..00000000000
--- a/res
+++ /dev/null
@@ -1,22 +0,0 @@
-diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc
-index 6cbf6774dc7..7233fe6c745 100644
---- a/storage/innobase/handler/ha_innodb.cc
-+++ b/storage/innobase/handler/ha_innodb.cc
-@@ -108,6 +108,7 @@ MYSQL_PLUGIN_IMPORT extern char mysql_unpacked_real_data_home[];
- #endif /* UNIV_DEBUG */
- #include "fts0priv.h"
- #include "page0zip.h"
-+#include "dict0priv.h"
-
- #define thd_get_trx_isolation(X) ((enum_tx_isolation)thd_tx_isolation(X))
-
-@@ -8598,7 +8599,8 @@ ha_innobase::delete_row(
- wsrep_on(user_thd) &&
- !wsrep_thd_skip_append_keys(user_thd))
- {
-- if (wsrep_append_keys(user_thd, false, record, NULL)) {
-+ if (wsrep_append_keys(user_thd, WSREP_KEY_EXCLUSIVE, record,
-+ NULL)) {
- DBUG_PRINT("wsrep", ("delete fail"));
- error = (dberr_t)HA_ERR_INTERNAL_ERROR;
- goto wsrep_error;
diff --git a/scripts/mysql_install_db.sh b/scripts/mysql_install_db.sh
index e4f2d419ea5..0c1dd7c724f 100644
--- a/scripts/mysql_install_db.sh
+++ b/scripts/mysql_install_db.sh
@@ -36,6 +36,9 @@ in_rpm=0
ip_only=0
cross_bootstrap=0
+dirname0=`dirname $0 2>/dev/null`
+dirname0=`dirname $dirname0 2>/dev/null`
+
usage()
{
cat <<EOF
@@ -213,11 +216,6 @@ cannot_find_file()
echo "If you don't want to do a full install, you can use the --srcdir"
echo "option to only install the mysql database and privilege tables"
echo
- echo "If you compiled from source, you need to either run 'make install' to"
- echo "copy the software into the correct location ready for operation."
- echo "If you don't want to do a full install, you can use the --srcdir"
- echo "option to only install the mysql database and privilege tables"
- echo
echo "If you are using a binary release, you must either be at the top"
echo "level of the extracted archive, or pass the --basedir option"
echo "pointing to that location."
@@ -258,6 +256,9 @@ then
cannot_find_file my_print_defaults $basedir/bin $basedir/extra
exit 1
fi
+elif test -n "$dirname0" -a -x "$dirname0/@bindir@/my_print_defaults"
+then
+ print_defaults="$dirname0/@bindir@/my_print_defaults"
else
print_defaults="@bindir@/my_print_defaults"
fi
@@ -309,6 +310,14 @@ then
cannot_find_file fill_help_tables.sql @pkgdata_locations@
exit 1
fi
+# relative from where the script was run for a relocatable install
+elif test -n "$dirname0" -a -x "$dirname0/@INSTALL_SBINDIR@/mysqld"
+then
+ basedir="$dirname0"
+ bindir="$basedir/@INSTALL_SBINDIR@"
+ resolveip="$bindir/resolveip"
+ mysqld="$basedir/@INSTALL_SBINDIR@/mysqld"
+ pkgdatadir="$basedir/@INSTALL_MYSQLSHAREDIR@"
else
basedir="@prefix@"
bindir="@bindir@"
diff --git a/scripts/mytop.sh b/scripts/mytop.sh
index 1cf1c5313b4..0619f996549 100644
--- a/scripts/mytop.sh
+++ b/scripts/mytop.sh
@@ -437,7 +437,7 @@ while (1)
if ($key eq 'C')
{
- if ( $HAS_COLOR )
+ if ( $HAS_COLOR )
{
$HAS_COLOR = 0;
}
@@ -817,11 +817,11 @@ sub GetData()
if ($config{header})
{
my @recs = "";
- if ( $db_release > 4 )
+ if ( $db_release > 4 )
{
@recs = Hashes("show global status");
- }
- else
+ }
+ else
{
@recs = Hashes("show status");
}
@@ -978,7 +978,7 @@ sub GetData()
# print("q_diff: $STATUS{Questions} - $OLD_STATUS{Questions} / $t_delta = $q_diff\n");
printf(" Sorts: %5.0f qps now: %4.0f Slow qps: %3.1f Threads: %4.0f (%4.0f/%4.0f) %02.0f/%02.0f/%02.0f/%02.0f\n",
- ( $STATUS{Sort_rows} - $OLD_STATUS{Sort_rows} ) / $t_delta,
+ ( $STATUS{Sort_rows} - $OLD_STATUS{Sort_rows} ) / $t_delta,
( $STATUS{Questions} - $OLD_STATUS{Questions} ) / $t_delta,
( # slow now (qps)
($STATUS{Slow_queries} ) ?
@@ -989,7 +989,7 @@ sub GetData()
$STATUS{Threads_running},
$STATUS{Threads_cached},
- (100 * ($STATUS{Com_select} - $OLD_STATUS{Com_select} +
+ (100 * ($STATUS{Com_select} - $OLD_STATUS{Com_select} +
($STATUS{Qcache_hits}||0) - ($OLD_STATUS{Qcache_hits}||0)
) ) / ($q_diff ),
(100 * ($STATUS{Com_insert} - $OLD_STATUS{Com_insert} +
@@ -1075,7 +1075,7 @@ sub GetData()
$t_delta,
($STATUS{Rows_tmp_read} - $OLD_STATUS{Rows_tmp_read}) /
$t_delta,
- ($STATUS{Handler_tmp_write}
+ ($STATUS{Handler_tmp_write}
-$OLD_STATUS{Handler_tmp_write})/$t_delta,
($STATUS{Handler_tmp_update} -
$OLD_STATUS{Handler_tmp_update})/$t_delta);
@@ -1119,6 +1119,7 @@ sub GetData()
}
}
print " Replication ";
+ print "Master:$data->{Master_Host} ";
print "IO:$data->{Slave_IO_Running} ";
print "SQL:$data->{Slave_SQL_Running} ";
print RESET() if ($HAS_COLOR);
@@ -1225,9 +1226,9 @@ sub GetData()
$thread->{State} ||= "";
$thread->{Progress} ||= 0;
- ## alter double hyphen comments so they don't break
+ ## alter double hyphen comments so they don't break
## the query when newlines are removed - http://freshmeat.net/users/jerjones
- $thread->{Info} =~ s~\s--(.*)$~ /* $1 */ ~mg;
+ $thread->{Info} =~ s~\s--(.*)$~ /* $1 */ ~mg;
## Normalize spaces -- mostly disabled for now. This can
## break EXPLAIN if you try to explain a mangled query. It
diff --git a/sql-common/client.c b/sql-common/client.c
index d78e6167809..4bebf9ec63e 100644
--- a/sql-common/client.c
+++ b/sql-common/client.c
@@ -110,6 +110,12 @@ my_bool net_flush(NET *net);
#include <my_context.h>
#include <mysql_async.h>
+typedef enum {
+ ALWAYS_ACCEPT, /* heuristics is disabled, use CLIENT_LOCAL_FILES */
+ WAIT_FOR_QUERY, /* heuristics is enabled, not sending files */
+ ACCEPT_FILE_REQUEST /* heuristics is enabled, ready to send a file */
+} auto_local_infile_state;
+
#define native_password_plugin_name "mysql_native_password"
#define old_password_plugin_name "mysql_old_password"
@@ -1632,8 +1638,10 @@ mysql_init(MYSQL *mysql)
--enable-local-infile
*/
-#if defined(ENABLED_LOCAL_INFILE) && !defined(MYSQL_SERVER)
+#if ENABLED_LOCAL_INFILE && !defined(MYSQL_SERVER)
mysql->options.client_flag|= CLIENT_LOCAL_FILES;
+ mysql->auto_local_infile= ENABLED_LOCAL_INFILE == LOCAL_INFILE_MODE_AUTO
+ ? WAIT_FOR_QUERY : ALWAYS_ACCEPT;
#endif
#ifdef HAVE_SMEM
@@ -3999,8 +4007,14 @@ static my_bool cli_read_query_result(MYSQL *mysql)
ulong field_count;
MYSQL_DATA *fields;
ulong length;
+#ifdef MYSQL_CLIENT
+ my_bool can_local_infile= mysql->auto_local_infile != WAIT_FOR_QUERY;
+#endif
DBUG_ENTER("cli_read_query_result");
+ if (mysql->auto_local_infile == ACCEPT_FILE_REQUEST)
+ mysql->auto_local_infile= WAIT_FOR_QUERY;
+
if ((length = cli_safe_read(mysql)) == packet_error)
DBUG_RETURN(1);
free_old_query(mysql); /* Free old result */
@@ -4037,7 +4051,8 @@ get_info:
{
int error;
- if (!(mysql->options.client_flag & CLIENT_LOCAL_FILES))
+ if (!(mysql->options.client_flag & CLIENT_LOCAL_FILES) ||
+ !can_local_infile)
{
set_mysql_error(mysql, CR_MALFORMED_PACKET, unknown_sqlstate);
DBUG_RETURN(1);
@@ -4075,6 +4090,13 @@ int STDCALL
mysql_send_query(MYSQL* mysql, const char* query, ulong length)
{
DBUG_ENTER("mysql_send_query");
+ if (mysql->options.client_flag & CLIENT_LOCAL_FILES &&
+ mysql->auto_local_infile == WAIT_FOR_QUERY &&
+ (*query == 'l' || *query == 'L'))
+ {
+ if (strncasecmp(query, STRING_WITH_LEN("load")) == 0)
+ mysql->auto_local_infile= ACCEPT_FILE_REQUEST;
+ }
DBUG_RETURN(simple_command(mysql, COM_QUERY, (uchar*) query, length, 1));
}
@@ -4288,10 +4310,12 @@ mysql_options(MYSQL *mysql,enum mysql_option option, const void *arg)
mysql->options.protocol=MYSQL_PROTOCOL_PIPE; /* Force named pipe */
break;
case MYSQL_OPT_LOCAL_INFILE: /* Allow LOAD DATA LOCAL ?*/
- if (!arg || MY_TEST(*(uint*) arg))
+ if (!arg || *(uint*) arg)
mysql->options.client_flag|= CLIENT_LOCAL_FILES;
else
mysql->options.client_flag&= ~CLIENT_LOCAL_FILES;
+ mysql->auto_local_infile= arg && *(uint*)arg == LOCAL_INFILE_MODE_AUTO
+ ? WAIT_FOR_QUERY : ALWAYS_ACCEPT;
break;
case MYSQL_INIT_COMMAND:
add_init_command(&mysql->options,arg);
diff --git a/sql/CMakeLists.txt b/sql/CMakeLists.txt
index 8187484408f..7ea19e0a137 100644
--- a/sql/CMakeLists.txt
+++ b/sql/CMakeLists.txt
@@ -449,7 +449,7 @@ IF(WIN32)
COMPONENT Server
)
SET_TARGET_PROPERTIES(mysql_install_db PROPERTIES COMPILE_FLAGS -DINSTALL_PLUGINDIR=${INSTALL_PLUGINDIR})
- TARGET_LINK_LIBRARIES(mysql_install_db mysys)
+ TARGET_LINK_LIBRARIES(mysql_install_db mysys shlwapi)
ADD_LIBRARY(winservice STATIC winservice.c)
TARGET_LINK_LIBRARIES(winservice shell32)
diff --git a/sql/handler.h b/sql/handler.h
index 937a913eef0..f6b988b1dc3 100644
--- a/sql/handler.h
+++ b/sql/handler.h
@@ -384,6 +384,12 @@ enum enum_alter_inplace_result {
#define HA_KEY_NULL_LENGTH 1
#define HA_KEY_BLOB_LENGTH 2
+/* Maximum length of any index lookup key, in bytes */
+
+#define MAX_KEY_LENGTH (MAX_DATA_LENGTH_FOR_KEY \
+ +(MAX_REF_PARTS \
+ *(HA_KEY_NULL_LENGTH + HA_KEY_BLOB_LENGTH)))
+
#define HA_LEX_CREATE_TMP_TABLE 1
#define HA_LEX_CREATE_IF_NOT_EXISTS 2
#define HA_LEX_CREATE_TABLE_LIKE 4
@@ -3243,14 +3249,14 @@ public:
uint max_key_parts() const
{ return MY_MIN(MAX_REF_PARTS, max_supported_key_parts()); }
uint max_key_length() const
- { return MY_MIN(MAX_KEY_LENGTH, max_supported_key_length()); }
+ { return MY_MIN(MAX_DATA_LENGTH_FOR_KEY, max_supported_key_length()); }
uint max_key_part_length() const
- { return MY_MIN(MAX_KEY_LENGTH, max_supported_key_part_length()); }
+ { return MY_MIN(MAX_DATA_LENGTH_FOR_KEY, max_supported_key_part_length()); }
virtual uint max_supported_record_length() const { return HA_MAX_REC_LENGTH; }
virtual uint max_supported_keys() const { return 0; }
virtual uint max_supported_key_parts() const { return MAX_REF_PARTS; }
- virtual uint max_supported_key_length() const { return MAX_KEY_LENGTH; }
+ virtual uint max_supported_key_length() const { return MAX_DATA_LENGTH_FOR_KEY; }
virtual uint max_supported_key_part_length() const { return 255; }
virtual uint min_record_length(uint options) const { return 1; }
diff --git a/sql/item.h b/sql/item.h
index 5bcdc45ce23..11efac08757 100644
--- a/sql/item.h
+++ b/sql/item.h
@@ -825,6 +825,10 @@ public:
If value is not null null_value flag will be reset to FALSE.
*/
virtual longlong val_int()=0;
+ Longlong_hybrid to_longlong_hybrid()
+ {
+ return Longlong_hybrid(val_int(), unsigned_flag);
+ }
/*
This is just a shortcut to avoid the cast. You should still use
unsigned_flag to check the sign of the item.
diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc
index bdef3c1a89f..a0c5798cb69 100644
--- a/sql/item_cmpfunc.cc
+++ b/sql/item_cmpfunc.cc
@@ -159,7 +159,10 @@ static int cmp_row_type(Item* item1, Item* item2)
0 otherwise
*/
-static int agg_cmp_type(Item_result *type, Item **items, uint nitems)
+static int agg_cmp_type(Item_result *type,
+ Item **items,
+ uint nitems,
+ bool int_uint_as_dec)
{
uint unsigned_count= items[0]->unsigned_flag;
type[0]= items[0]->cmp_type();
@@ -181,7 +184,9 @@ static int agg_cmp_type(Item_result *type, Item **items, uint nitems)
If all arguments are of INT type but have different unsigned_flag values,
switch to DECIMAL_RESULT.
*/
- if (type[0] == INT_RESULT && unsigned_count != nitems && unsigned_count != 0)
+ if (int_uint_as_dec &&
+ type[0] == INT_RESULT &&
+ unsigned_count != nitems && unsigned_count != 0)
type[0]= DECIMAL_RESULT;
return 0;
}
@@ -2359,7 +2364,7 @@ void Item_func_between::fix_length_and_dec()
*/
if (!args[0] || !args[1] || !args[2])
return;
- if ( agg_cmp_type(&cmp_type, args, 3))
+ if (agg_cmp_type(&cmp_type, args, 3, false))
return;
if (cmp_type == STRING_RESULT &&
agg_arg_charsets_for_comparison(cmp_collation, args, 3))
@@ -2393,6 +2398,97 @@ void Item_func_between::fix_length_and_dec()
}
+longlong Item_func_between::val_int_cmp_string()
+{
+ String *value,*a,*b;
+ value=args[0]->val_str(&value0);
+ if ((null_value=args[0]->null_value))
+ return 0;
+ a= args[1]->val_str(&value1);
+ b= args[2]->val_str(&value2);
+ if (!args[1]->null_value && !args[2]->null_value)
+ return (longlong) ((sortcmp(value,a,cmp_collation.collation) >= 0 &&
+ sortcmp(value,b,cmp_collation.collation) <= 0) !=
+ negated);
+ if (args[1]->null_value && args[2]->null_value)
+ null_value= true;
+ else if (args[1]->null_value)
+ {
+ // Set to not null if false range.
+ null_value= sortcmp(value,b,cmp_collation.collation) <= 0;
+ }
+ else
+ {
+ // Set to not null if false range.
+ null_value= sortcmp(value,a,cmp_collation.collation) >= 0;
+ }
+ return (longlong) (!null_value && negated);
+}
+
+
+longlong Item_func_between::val_int_cmp_int()
+{
+ Longlong_hybrid value= args[0]->to_longlong_hybrid();
+ if ((null_value= args[0]->null_value))
+ return 0; /* purecov: inspected */
+ Longlong_hybrid a= args[1]->to_longlong_hybrid();
+ Longlong_hybrid b= args[2]->to_longlong_hybrid();
+ if (!args[1]->null_value && !args[2]->null_value)
+ return (longlong) ((value.cmp(a) >= 0 && value.cmp(b) <= 0) != negated);
+ if (args[1]->null_value && args[2]->null_value)
+ null_value= true;
+ else if (args[1]->null_value)
+ null_value= value.cmp(b) <= 0; // not null if false range.
+ else
+ null_value= value.cmp(a) >= 0;
+ return (longlong) (!null_value && negated);
+}
+
+
+longlong Item_func_between::val_int_cmp_decimal()
+{
+ my_decimal dec_buf, *dec= args[0]->val_decimal(&dec_buf),
+ a_buf, *a_dec, b_buf, *b_dec;
+ if ((null_value=args[0]->null_value))
+ return 0; /* purecov: inspected */
+ a_dec= args[1]->val_decimal(&a_buf);
+ b_dec= args[2]->val_decimal(&b_buf);
+ if (!args[1]->null_value && !args[2]->null_value)
+ return (longlong) ((my_decimal_cmp(dec, a_dec) >= 0 &&
+ my_decimal_cmp(dec, b_dec) <= 0) != negated);
+ if (args[1]->null_value && args[2]->null_value)
+ null_value= true;
+ else if (args[1]->null_value)
+ null_value= (my_decimal_cmp(dec, b_dec) <= 0);
+ else
+ null_value= (my_decimal_cmp(dec, a_dec) >= 0);
+ return (longlong) (!null_value && negated);
+}
+
+
+longlong Item_func_between::val_int_cmp_real()
+{
+ double value= args[0]->val_real(),a,b;
+ if ((null_value=args[0]->null_value))
+ return 0; /* purecov: inspected */
+ a= args[1]->val_real();
+ b= args[2]->val_real();
+ if (!args[1]->null_value && !args[2]->null_value)
+ return (longlong) ((value >= a && value <= b) != negated);
+ if (args[1]->null_value && args[2]->null_value)
+ null_value= true;
+ else if (args[1]->null_value)
+ {
+ null_value= value <= b; // not null if false range.
+ }
+ else
+ {
+ null_value= value >= a;
+ }
+ return (longlong) (!null_value && negated);
+}
+
+
longlong Item_func_between::val_int()
{
DBUG_ASSERT(fixed == 1);
@@ -2434,94 +2530,14 @@ longlong Item_func_between::val_int()
null_value= value >= a;
break;
}
-
case STRING_RESULT:
- {
- String *value,*a,*b;
- value=args[0]->val_str(&value0);
- if ((null_value=args[0]->null_value))
- return 0;
- a=args[1]->val_str(&value1);
- b=args[2]->val_str(&value2);
- if (!args[1]->null_value && !args[2]->null_value)
- return (longlong) ((sortcmp(value,a,cmp_collation.collation) >= 0 &&
- sortcmp(value,b,cmp_collation.collation) <= 0) !=
- negated);
- if (args[1]->null_value && args[2]->null_value)
- null_value=1;
- else if (args[1]->null_value)
- {
- // Set to not null if false range.
- null_value= sortcmp(value,b,cmp_collation.collation) <= 0;
- }
- else
- {
- // Set to not null if false range.
- null_value= sortcmp(value,a,cmp_collation.collation) >= 0;
- }
- break;
- }
+ return val_int_cmp_string();
case INT_RESULT:
- {
- longlong value=args[0]->val_int(), a, b;
- if ((null_value=args[0]->null_value))
- return 0; /* purecov: inspected */
- a=args[1]->val_int();
- b=args[2]->val_int();
- if (!args[1]->null_value && !args[2]->null_value)
- return (longlong) ((value >= a && value <= b) != negated);
- if (args[1]->null_value && args[2]->null_value)
- null_value=1;
- else if (args[1]->null_value)
- {
- null_value= value <= b; // not null if false range.
- }
- else
- {
- null_value= value >= a;
- }
- break;
- }
+ return val_int_cmp_int();
case DECIMAL_RESULT:
- {
- my_decimal dec_buf, *dec= args[0]->val_decimal(&dec_buf),
- a_buf, *a_dec, b_buf, *b_dec;
- if ((null_value=args[0]->null_value))
- return 0; /* purecov: inspected */
- a_dec= args[1]->val_decimal(&a_buf);
- b_dec= args[2]->val_decimal(&b_buf);
- if (!args[1]->null_value && !args[2]->null_value)
- return (longlong) ((my_decimal_cmp(dec, a_dec) >= 0 &&
- my_decimal_cmp(dec, b_dec) <= 0) != negated);
- if (args[1]->null_value && args[2]->null_value)
- null_value=1;
- else if (args[1]->null_value)
- null_value= (my_decimal_cmp(dec, b_dec) <= 0);
- else
- null_value= (my_decimal_cmp(dec, a_dec) >= 0);
- break;
- }
+ return val_int_cmp_decimal();
case REAL_RESULT:
- {
- double value= args[0]->val_real(),a,b;
- if ((null_value=args[0]->null_value))
- return 0; /* purecov: inspected */
- a= args[1]->val_real();
- b= args[2]->val_real();
- if (!args[1]->null_value && !args[2]->null_value)
- return (longlong) ((value >= a && value <= b) != negated);
- if (args[1]->null_value && args[2]->null_value)
- null_value=1;
- else if (args[1]->null_value)
- {
- null_value= value <= b; // not null if false range.
- }
- else
- {
- null_value= value >= a;
- }
- break;
- }
+ return val_int_cmp_real();
case ROW_RESULT:
case IMPOSSIBLE_RESULT:
DBUG_ASSERT(0);
diff --git a/sql/item_cmpfunc.h b/sql/item_cmpfunc.h
index fdefcc86c64..eec3684c2c1 100644
--- a/sql/item_cmpfunc.h
+++ b/sql/item_cmpfunc.h
@@ -698,6 +698,11 @@ public:
bool eval_not_null_tables(uchar *opt_arg);
void fix_after_pullout(st_select_lex *new_parent, Item **ref, bool merge);
bool count_sargable_conds(uchar *arg);
+
+ longlong val_int_cmp_string();
+ longlong val_int_cmp_int();
+ longlong val_int_cmp_real();
+ longlong val_int_cmp_decimal();
};
diff --git a/sql/log.cc b/sql/log.cc
index 4cc3165377c..dd553d3f186 100644
--- a/sql/log.cc
+++ b/sql/log.cc
@@ -1,5 +1,5 @@
-/* Copyright (c) 2000, 2016, Oracle and/or its affiliates.
- Copyright (c) 2009, 2017, MariaDB
+/* Copyright (c) 2000, 2018, Oracle and/or its affiliates.
+ Copyright (c) 2009, 2019, MariaDB Corporation
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -3886,7 +3886,7 @@ int MYSQL_BIN_LOG::find_log_pos(LOG_INFO *linfo, const char *log_name,
// if the log entry matches, null string matching anything
if (!log_name ||
(log_name_len == fname_len-1 && full_fname[log_name_len] == '\n' &&
- !memcmp(full_fname, full_log_name, log_name_len)))
+ !strncmp(full_fname, full_log_name, log_name_len)))
{
DBUG_PRINT("info", ("Found log file entry"));
full_fname[fname_len-1]= 0; // remove last \n
diff --git a/sql/log_event.cc b/sql/log_event.cc
index da4e63f02e1..cac31dad33d 100644
--- a/sql/log_event.cc
+++ b/sql/log_event.cc
@@ -11203,7 +11203,7 @@ int Table_map_log_event::do_apply_event(rpl_group_info *rgi)
table_list->updating= 1;
table_list->required_type= FRMTYPE_TABLE;
- DBUG_PRINT("debug", ("table: %s is mapped to %u", table_list->table_name,
+ DBUG_PRINT("debug", ("table: %s is mapped to %llu", table_list->table_name,
table_list->table_id));
#ifdef RBR_TRIGGERS
table_list->master_had_triggers= ((m_flags & TM_BIT_HAS_TRIGGERS_F) ? 1 : 0);
diff --git a/sql/mysql_install_db.cc b/sql/mysql_install_db.cc
index f9343aab011..76da7e7a177 100644
--- a/sql/mysql_install_db.cc
+++ b/sql/mysql_install_db.cc
@@ -27,6 +27,8 @@
#include <shellapi.h>
#include <accctrl.h>
#include <aclapi.h>
+struct IUnknown;
+#include <shlwapi.h>
#define USAGETEXT \
"mysql_install_db.exe Ver 1.00 for Windows\n" \
@@ -549,20 +551,78 @@ static int create_db_instance()
DWORD cwd_len= MAX_PATH;
char cmdline[3*MAX_PATH];
FILE *in;
+ bool cleanup_datadir= true;
+ DWORD last_error;
verbose("Running bootstrap");
GetCurrentDirectory(cwd_len, cwd);
- CreateDirectory(opt_datadir, NULL); /*ignore error, it might already exist */
+
+ /* Create datadir and datadir/mysql, if they do not already exist. */
+
+ if (!CreateDirectory(opt_datadir, NULL) && (GetLastError() != ERROR_ALREADY_EXISTS))
+ {
+ last_error = GetLastError();
+ switch(last_error)
+ {
+ case ERROR_ACCESS_DENIED:
+ die("Can't create data directory '%s' (access denied)\n",
+ opt_datadir);
+ break;
+ case ERROR_PATH_NOT_FOUND:
+ die("Can't create data directory '%s' "
+ "(one or more intermediate directories do not exist)\n",
+ opt_datadir);
+ break;
+ default:
+ die("Can't create data directory '%s', last error %u\n",
+ opt_datadir, last_error);
+ break;
+ }
+ }
if (!SetCurrentDirectory(opt_datadir))
{
- die("Cannot set current directory to '%s'\n",opt_datadir);
- return -1;
+ last_error = GetLastError();
+ switch (last_error)
+ {
+ case ERROR_DIRECTORY:
+ die("Can't set current directory to '%s', the path is not a valid directory \n",
+ opt_datadir);
+ break;
+ default:
+ die("Can' set current directory to '%s', last error %u\n",
+ opt_datadir, last_error);
+ break;
+ }
+ }
+
+ if (PathIsDirectoryEmpty(opt_datadir))
+ {
+ cleanup_datadir= false;
}
- CreateDirectory("mysql",NULL);
- CreateDirectory("test", NULL);
+ if (!CreateDirectory("mysql",NULL))
+ {
+ last_error = GetLastError();
+ DWORD attributes;
+ switch(last_error)
+ {
+ case ERROR_ACCESS_DENIED:
+ die("Can't create subdirectory 'mysql' in '%s' (access denied)\n",opt_datadir);
+ break;
+ case ERROR_ALREADY_EXISTS:
+ attributes = GetFileAttributes("mysql");
+
+ if (attributes == INVALID_FILE_ATTRIBUTES)
+ die("GetFileAttributes() failed for existing file '%s\\mysql', last error %u",
+ opt_datadir, GetLastError());
+ else if (!(attributes & FILE_ATTRIBUTE_DIRECTORY))
+ die("File '%s\\mysql' exists, but it is not a directory", opt_datadir);
+
+ break;
+ }
+ }
/*
Set data directory permissions for both current user and
@@ -675,7 +735,7 @@ static int create_db_instance()
}
end:
- if (ret)
+ if (ret && cleanup_datadir)
{
SetCurrentDirectory(cwd);
clean_directory(opt_datadir);
diff --git a/sql/opt_range.cc b/sql/opt_range.cc
index 3bcaa72e32f..ef40e0b6daa 100644
--- a/sql/opt_range.cc
+++ b/sql/opt_range.cc
@@ -2161,6 +2161,7 @@ failure:
head->column_bitmaps_set(save_read_set, save_write_set);
delete file;
file= save_file;
+ free_file= false;
DBUG_RETURN(1);
}
@@ -3326,10 +3327,9 @@ bool create_key_parts_for_pseudo_indexes(RANGE_OPT_PARAM *param,
for (field_ptr= table->field; *field_ptr; field_ptr++)
{
- Column_statistics* col_stats= (*field_ptr)->read_stats;
- if (bitmap_is_set(used_fields, (*field_ptr)->field_index)
- && col_stats && !col_stats->no_stat_values_provided()
- && !((*field_ptr)->type() == MYSQL_TYPE_GEOMETRY))
+ Field *field= *field_ptr;
+ if (bitmap_is_set(used_fields, field->field_index) &&
+ is_eits_usable(field))
parts++;
}
@@ -3347,10 +3347,10 @@ bool create_key_parts_for_pseudo_indexes(RANGE_OPT_PARAM *param,
uint max_key_len= 0;
for (field_ptr= table->field; *field_ptr; field_ptr++)
{
- if (bitmap_is_set(used_fields, (*field_ptr)->field_index))
+ Field *field= *field_ptr;
+ if (bitmap_is_set(used_fields, field->field_index))
{
- Field *field= *field_ptr;
- if (field->type() == MYSQL_TYPE_GEOMETRY)
+ if (!is_eits_usable(field))
continue;
uint16 store_length;
@@ -7141,6 +7141,8 @@ TRP_ROR_INTERSECT *get_best_ror_intersect(const PARAM *param, SEL_TREE *tree,
if (ror_intersect_add(intersect, cpk_scan, TRUE) &&
(intersect->total_cost < min_cost))
intersect_best= intersect; //just set pointer here
+ else
+ cpk_scan= 0; // Don't use cpk_scan
}
else
cpk_scan= 0; // Don't use cpk_scan
diff --git a/sql/partition_info.cc b/sql/partition_info.cc
index 52bda560c1c..7106e741331 100644
--- a/sql/partition_info.cc
+++ b/sql/partition_info.cc
@@ -1990,12 +1990,12 @@ bool partition_info::check_partition_field_length()
for (i= 0; i < num_part_fields; i++)
store_length+= get_partition_field_store_length(part_field_array[i]);
- if (store_length > MAX_KEY_LENGTH)
+ if (store_length > MAX_DATA_LENGTH_FOR_KEY)
DBUG_RETURN(TRUE);
store_length= 0;
for (i= 0; i < num_subpart_fields; i++)
store_length+= get_partition_field_store_length(subpart_field_array[i]);
- if (store_length > MAX_KEY_LENGTH)
+ if (store_length > MAX_DATA_LENGTH_FOR_KEY)
DBUG_RETURN(TRUE);
DBUG_RETURN(FALSE);
}
@@ -2748,23 +2748,6 @@ end:
}
-bool partition_info::error_if_requires_values() const
-{
- switch (part_type) {
- case NOT_A_PARTITION:
- case HASH_PARTITION:
- break;
- case RANGE_PARTITION:
- my_error(ER_PARTITION_REQUIRES_VALUES_ERROR, MYF(0), "RANGE", "LESS THAN");
- return true;
- case LIST_PARTITION:
- my_error(ER_PARTITION_REQUIRES_VALUES_ERROR, MYF(0), "LIST", "IN");
- return true;
- }
- return false;
-}
-
-
/**
Fix partition data from parser.
@@ -3164,6 +3147,23 @@ void partition_info::print_debug(const char *str, uint *value)
DBUG_PRINT("info", ("parser: %s", str));
DBUG_VOID_RETURN;
}
+
+bool partition_info::field_in_partition_expr(Field *field) const
+{
+ uint i;
+ for (i= 0; i < num_part_fields; i++)
+ {
+ if (field->eq(part_field_array[i]))
+ return TRUE;
+ }
+ for (i= 0; i < num_subpart_fields; i++)
+ {
+ if (field->eq(subpart_field_array[i]))
+ return TRUE;
+ }
+ return FALSE;
+}
+
#else /* WITH_PARTITION_STORAGE_ENGINE */
/*
For builds without partitioning we need to define these functions
@@ -3215,3 +3215,19 @@ bool check_partition_dirs(partition_info *part_info)
}
#endif /* WITH_PARTITION_STORAGE_ENGINE */
+
+bool partition_info::error_if_requires_values() const
+{
+ switch (part_type) {
+ case NOT_A_PARTITION:
+ case HASH_PARTITION:
+ break;
+ case RANGE_PARTITION:
+ my_error(ER_PARTITION_REQUIRES_VALUES_ERROR, MYF(0), "RANGE", "LESS THAN");
+ return true;
+ case LIST_PARTITION:
+ my_error(ER_PARTITION_REQUIRES_VALUES_ERROR, MYF(0), "LIST", "IN");
+ return true;
+ }
+ return false;
+}
diff --git a/sql/partition_info.h b/sql/partition_info.h
index f250c5496bf..10b8954ace7 100644
--- a/sql/partition_info.h
+++ b/sql/partition_info.h
@@ -384,6 +384,7 @@ private:
bool is_full_part_expr_in_fields(List<Item> &fields);
public:
bool has_unique_name(partition_element *element);
+ bool field_in_partition_expr(Field *field) const;
};
uint32 get_next_partition_id_range(struct st_partition_iter* part_iter);
diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc
index a47f94cfc7f..6a4ce9dc891 100644
--- a/sql/sql_acl.cc
+++ b/sql/sql_acl.cc
@@ -714,7 +714,9 @@ bool ROLE_GRANT_PAIR::init(MEM_ROOT *mem, char *username,
/* Flag to mark that on_node was already called for this role */
#define ROLE_OPENED (1L << 3)
-static DYNAMIC_ARRAY acl_hosts, acl_users, acl_dbs, acl_proxy_users;
+static DYNAMIC_ARRAY acl_hosts, acl_users, acl_proxy_users;
+static Dynamic_array<ACL_DB> acl_dbs(0U,50U);
+typedef Dynamic_array<ACL_DB>::CMP_FUNC acl_dbs_cmp;
static HASH acl_roles;
/*
An hash containing mappings user <--> role
@@ -1408,12 +1410,11 @@ static my_bool acl_load(THD *thd, TABLE_LIST *tables)
db.access|=REFERENCES_ACL | INDEX_ACL | ALTER_ACL;
}
#endif
- (void) push_dynamic(&acl_dbs,(uchar*) &db);
+ acl_dbs.push(db);
}
- my_qsort((uchar*) dynamic_element(&acl_dbs,0,ACL_DB*),acl_dbs.elements,
- sizeof(ACL_DB),(qsort_cmp) acl_compare);
end_read_record(&read_record_info);
- freeze_size(&acl_dbs);
+ acl_dbs.sort((acl_dbs_cmp)acl_compare);
+ acl_dbs.freeze();
if (tables[3].table)
{
@@ -1502,7 +1503,7 @@ void acl_free(bool end)
free_root(&acl_memroot,MYF(0));
delete_dynamic(&acl_hosts);
delete_dynamic_with_callback(&acl_users, (FREE_FUNC) free_acl_user);
- delete_dynamic(&acl_dbs);
+ acl_dbs.free_memory();
delete_dynamic(&acl_wild_hosts);
delete_dynamic(&acl_proxy_users);
my_hash_free(&acl_check_hosts);
@@ -1541,7 +1542,8 @@ void acl_free(bool end)
my_bool acl_reload(THD *thd)
{
TABLE_LIST tables[5];
- DYNAMIC_ARRAY old_acl_hosts, old_acl_users, old_acl_dbs, old_acl_proxy_users;
+ DYNAMIC_ARRAY old_acl_hosts, old_acl_users, old_acl_proxy_users;
+ Dynamic_array<ACL_DB> old_acl_dbs(0U,0U);
HASH old_acl_roles, old_acl_roles_mappings;
MEM_ROOT old_mem;
my_bool return_val= TRUE;
@@ -1595,7 +1597,7 @@ my_bool acl_reload(THD *thd)
old_acl_dbs= acl_dbs;
my_init_dynamic_array(&acl_hosts, sizeof(ACL_HOST), 20, 50, MYF(0));
my_init_dynamic_array(&acl_users, sizeof(ACL_USER), 50, 100, MYF(0));
- my_init_dynamic_array(&acl_dbs, sizeof(ACL_DB), 50, 100, MYF(0));
+ acl_dbs.init(50, 100);
my_init_dynamic_array(&acl_proxy_users, sizeof(ACL_PROXY_USER), 50, 100, MYF(0));
my_hash_init2(&acl_roles,50, &my_charset_utf8_bin,
0, 0, 0, (my_hash_get_key) acl_role_get_key, 0,
@@ -1616,6 +1618,7 @@ my_bool acl_reload(THD *thd)
acl_roles_mappings= old_acl_roles_mappings;
acl_proxy_users= old_acl_proxy_users;
acl_dbs= old_acl_dbs;
+ old_acl_dbs.init(0,0);
acl_memroot= old_mem;
init_check_host();
}
@@ -1626,7 +1629,6 @@ my_bool acl_reload(THD *thd)
delete_dynamic(&old_acl_hosts);
delete_dynamic_with_callback(&old_acl_users, (FREE_FUNC) free_acl_user);
delete_dynamic(&old_acl_proxy_users);
- delete_dynamic(&old_acl_dbs);
my_hash_free(&old_acl_roles_mappings);
}
mysql_mutex_unlock(&acl_cache->lock);
@@ -1809,9 +1811,9 @@ bool acl_getroot(Security_context *sctx, char *user, char *host,
if (acl_user)
{
res= 0;
- for (i=0 ; i < acl_dbs.elements ; i++)
+ for (i=0 ; i < acl_dbs.elements() ; i++)
{
- ACL_DB *acl_db= dynamic_element(&acl_dbs, i, ACL_DB*);
+ ACL_DB *acl_db= &acl_dbs.at(i);
if (!acl_db->user ||
(user && user[0] && !strcmp(user, acl_db->user)))
{
@@ -1840,9 +1842,9 @@ bool acl_getroot(Security_context *sctx, char *user, char *host,
if (acl_role)
{
res= 0;
- for (i=0 ; i < acl_dbs.elements ; i++)
+ for (i=0 ; i < acl_dbs.elements() ; i++)
{
- ACL_DB *acl_db= dynamic_element(&acl_dbs, i, ACL_DB*);
+ ACL_DB *acl_db= &acl_dbs.at(i);
if (!acl_db->user ||
(user && user[0] && !strcmp(user, acl_db->user)))
{
@@ -2136,9 +2138,9 @@ static bool acl_update_db(const char *user, const char *host, const char *db,
bool updated= false;
- for (uint i=0 ; i < acl_dbs.elements ; i++)
+ for (uint i=0 ; i < acl_dbs.elements() ; i++)
{
- ACL_DB *acl_db=dynamic_element(&acl_dbs,i,ACL_DB*);
+ ACL_DB *acl_db= &acl_dbs.at(i);
if ((!acl_db->user && !user[0]) ||
(acl_db->user &&
!strcmp(user,acl_db->user)))
@@ -2157,7 +2159,7 @@ static bool acl_update_db(const char *user, const char *host, const char *db,
acl_db->initial_access= acl_db->access;
}
else
- delete_dynamic_element(&acl_dbs,i);
+ acl_dbs.del(i);
updated= true;
}
}
@@ -2192,9 +2194,8 @@ static void acl_insert_db(const char *user, const char *host, const char *db,
acl_db.db=strdup_root(&acl_memroot,db);
acl_db.initial_access= acl_db.access= privileges;
acl_db.sort=get_sort(3,acl_db.host.hostname,acl_db.db,acl_db.user);
- (void) push_dynamic(&acl_dbs,(uchar*) &acl_db);
- my_qsort((uchar*) dynamic_element(&acl_dbs,0,ACL_DB*),acl_dbs.elements,
- sizeof(ACL_DB),(qsort_cmp) acl_compare);
+ acl_dbs.push(acl_db);
+ acl_dbs.sort((acl_dbs_cmp)acl_compare);
}
@@ -2240,9 +2241,9 @@ ulong acl_get(const char *host, const char *ip,
/*
Check if there are some access rights for database and user
*/
- for (i=0 ; i < acl_dbs.elements ; i++)
+ for (i=0 ; i < acl_dbs.elements() ; i++)
{
- ACL_DB *acl_db=dynamic_element(&acl_dbs,i,ACL_DB*);
+ ACL_DB *acl_db= &acl_dbs.at(i);
if (!acl_db->user || !strcmp(user,acl_db->user))
{
if (compare_hostname(&acl_db->host,host,ip))
@@ -4927,9 +4928,9 @@ static bool merge_role_global_privileges(ACL_ROLE *grantee)
return old != grantee->access;
}
-static int db_name_sort(ACL_DB * const *db1, ACL_DB * const *db2)
+static int db_name_sort(const int *db1, const int *db2)
{
- return strcmp((*db1)->db, (*db2)->db);
+ return strcmp(acl_dbs.at(*db1).db, acl_dbs.at(*db2).db);
}
/**
@@ -4945,14 +4946,14 @@ static int db_name_sort(ACL_DB * const *db1, ACL_DB * const *db2)
2 - ACL_DB was added
4 - ACL_DB was deleted
*/
-static int update_role_db(ACL_DB *merged, ACL_DB **first, ulong access, char *role)
+static int update_role_db(int merged, int first, ulong access, char *role)
{
- if (!first)
+ if (first < 0)
return 0;
DBUG_EXECUTE_IF("role_merge_stats", role_db_merges++;);
- if (merged == NULL)
+ if (merged < 0)
{
/*
there's no ACL_DB for this role (all db grants come from granted roles)
@@ -4967,11 +4968,11 @@ static int update_role_db(ACL_DB *merged, ACL_DB **first, ulong access, char *ro
acl_db.user= role;
acl_db.host.hostname= const_cast<char*>("");
acl_db.host.ip= acl_db.host.ip_mask= 0;
- acl_db.db= first[0]->db;
+ acl_db.db= acl_dbs.at(first).db;
acl_db.access= access;
acl_db.initial_access= 0;
acl_db.sort=get_sort(3, "", acl_db.db, role);
- push_dynamic(&acl_dbs,(uchar*) &acl_db);
+ acl_dbs.push(acl_db);
return 2;
}
else if (access == 0)
@@ -4987,13 +4988,13 @@ static int update_role_db(ACL_DB *merged, ACL_DB **first, ulong access, char *ro
2. it's O(N) operation, and we may need many of them
so we only mark elements deleted and will delete later.
*/
- merged->sort= 0; // lower than any valid ACL_DB sort value, will be sorted last
+ acl_dbs.at(merged).sort= 0; // lower than any valid ACL_DB sort value, will be sorted last
return 4;
}
- else if (merged->access != access)
+ else if (acl_dbs.at(merged).access != access)
{
/* this is easy */
- merged->access= access;
+ acl_dbs.at(merged).access= access;
return 1;
}
return 0;
@@ -5008,7 +5009,7 @@ static int update_role_db(ACL_DB *merged, ACL_DB **first, ulong access, char *ro
static bool merge_role_db_privileges(ACL_ROLE *grantee, const char *dbname,
role_hash_t *rhash)
{
- Dynamic_array<ACL_DB *> dbs;
+ Dynamic_array<int> dbs;
/*
Supposedly acl_dbs can be huge, but only a handful of db grants
@@ -5016,9 +5017,9 @@ static bool merge_role_db_privileges(ACL_ROLE *grantee, const char *dbname,
Collect these applicable db grants.
*/
- for (uint i=0 ; i < acl_dbs.elements ; i++)
+ for (uint i=0 ; i < acl_dbs.elements() ; i++)
{
- ACL_DB *db= dynamic_element(&acl_dbs,i,ACL_DB*);
+ ACL_DB *db= &acl_dbs.at(i);
if (db->host.hostname[0])
continue;
if (dbname && strcmp(db->db, dbname))
@@ -5026,7 +5027,7 @@ static bool merge_role_db_privileges(ACL_ROLE *grantee, const char *dbname,
ACL_ROLE *r= rhash->find(db->user, strlen(db->user));
if (!r)
continue;
- dbs.append(db);
+ dbs.append(i);
}
dbs.sort(db_name_sort);
@@ -5035,21 +5036,21 @@ static bool merge_role_db_privileges(ACL_ROLE *grantee, const char *dbname,
(that should be merged) are sorted together. The grantee's ACL_DB element
is not necessarily the first and may be not present at all.
*/
- ACL_DB **first= NULL, *UNINIT_VAR(merged);
+ int first= -1, merged= -1;
ulong UNINIT_VAR(access), update_flags= 0;
- for (ACL_DB **cur= dbs.front(); cur <= dbs.back(); cur++)
+ for (int *p= dbs.front(); p <= dbs.back(); p++)
{
- if (!first || (!dbname && strcmp(cur[0]->db, cur[-1]->db)))
+ if (first<0 || (!dbname && strcmp(acl_dbs.at(*p).db, acl_dbs.at(*p-1).db)))
{ // new db name series
update_flags|= update_role_db(merged, first, access, grantee->user.str);
- merged= NULL;
+ merged= -1;
access= 0;
- first= cur;
+ first= *p;
}
- if (strcmp(cur[0]->user, grantee->user.str) == 0)
- access|= (merged= cur[0])->initial_access;
+ if (strcmp(acl_dbs.at(*p).user, grantee->user.str) == 0)
+ access|= acl_dbs.at(merged= *p).initial_access;
else
- access|= cur[0]->access;
+ access|= acl_dbs.at(*p).access;
}
update_flags|= update_role_db(merged, first, access, grantee->user.str);
@@ -5062,14 +5063,12 @@ static bool merge_role_db_privileges(ACL_ROLE *grantee, const char *dbname,
*/
if (update_flags & (2|4))
{ // inserted or deleted, need to sort
- my_qsort((uchar*) dynamic_element(&acl_dbs,0,ACL_DB*),acl_dbs.elements,
- sizeof(ACL_DB),(qsort_cmp) acl_compare);
+ acl_dbs.sort((acl_dbs_cmp)acl_compare);
}
if (update_flags & 4)
{ // deleted, trim the end
- while (acl_dbs.elements &&
- dynamic_element(&acl_dbs, acl_dbs.elements-1, ACL_DB*)->sort == 0)
- acl_dbs.elements--;
+ while (acl_dbs.elements() && acl_dbs.back()->sort == 0)
+ acl_dbs.pop();
}
return update_flags;
}
@@ -7951,16 +7950,14 @@ static bool show_database_privileges(THD *thd, const char *username,
const char *hostname,
char *buff, size_t buffsize)
{
- ACL_DB *acl_db;
ulong want_access;
- uint counter;
Protocol *protocol= thd->protocol;
- for (counter=0 ; counter < acl_dbs.elements ; counter++)
+ for (uint i=0 ; i < acl_dbs.elements() ; i++)
{
const char *user, *host;
- acl_db=dynamic_element(&acl_dbs,counter,ACL_DB*);
+ ACL_DB *acl_db= &acl_dbs.at(i);
user= safe_str(acl_db->user);
host=acl_db->host.hostname;
@@ -8838,7 +8835,7 @@ static int handle_grant_struct(enum enum_acl_lists struct_no, bool drop,
elements= acl_users.elements;
break;
case DB_ACL:
- elements= acl_dbs.elements;
+ elements= acl_dbs.elements();
break;
case COLUMN_PRIVILEGES_HASH:
grant_name_hash= &column_priv_hash;
@@ -8882,7 +8879,7 @@ static int handle_grant_struct(enum enum_acl_lists struct_no, bool drop,
break;
case DB_ACL:
- acl_db= dynamic_element(&acl_dbs, idx, ACL_DB*);
+ acl_db= &acl_dbs.at(idx);
user= acl_db->user;
host= acl_db->host.hostname;
break;
@@ -8966,7 +8963,7 @@ static int handle_grant_struct(enum enum_acl_lists struct_no, bool drop,
break;
case DB_ACL:
- delete_dynamic_element(&acl_dbs, idx);
+ acl_dbs.del(idx);
break;
case COLUMN_PRIVILEGES_HASH:
@@ -9654,11 +9651,11 @@ bool mysql_revoke_all(THD *thd, List <LEX_USER> &list)
*/
do
{
- for (counter= 0, revoked= 0 ; counter < acl_dbs.elements ; )
+ for (counter= 0, revoked= 0 ; counter < acl_dbs.elements() ; )
{
const char *user,*host;
- acl_db=dynamic_element(&acl_dbs,counter,ACL_DB*);
+ acl_db=&acl_dbs.at(counter);
user= safe_str(acl_db->user);
host= safe_str(acl_db->host.hostname);
@@ -10491,11 +10488,11 @@ int fill_schema_schema_privileges(THD *thd, TABLE_LIST *tables, COND *cond)
DBUG_RETURN(0);
mysql_mutex_lock(&acl_cache->lock);
- for (counter=0 ; counter < acl_dbs.elements ; counter++)
+ for (counter=0 ; counter < acl_dbs.elements() ; counter++)
{
const char *user, *host, *is_grantable="YES";
- acl_db=dynamic_element(&acl_dbs,counter,ACL_DB*);
+ acl_db=&acl_dbs.at(counter);
user= safe_str(acl_db->user);
host= safe_str(acl_db->host.hostname);
diff --git a/sql/sql_array.h b/sql/sql_array.h
index 8202e94ce41..c18bec1df52 100644
--- a/sql/sql_array.h
+++ b/sql/sql_array.h
@@ -107,8 +107,7 @@ public:
void init(uint prealloc=16, uint increment=16)
{
- my_init_dynamic_array(&array, sizeof(Elem), prealloc, increment,
- MYF(0));
+ init_dynamic_array2(&array, sizeof(Elem), 0, prealloc, increment, MYF(0));
}
/**
@@ -201,6 +200,11 @@ public:
set_dynamic(&array, &el, idx);
}
+ void freeze()
+ {
+ freeze_size(&array);
+ }
+
bool resize(size_t new_size, Elem default_val)
{
size_t old_size= elements();
@@ -223,6 +227,11 @@ public:
delete_dynamic(&array);
}
+ void free_memory()
+ {
+ delete_dynamic(&array);
+ }
+
typedef int (*CMP_FUNC)(const Elem *el1, const Elem *el2);
void sort(CMP_FUNC cmp_func)
@@ -230,7 +239,7 @@ public:
my_qsort(array.buffer, array.elements, sizeof(Elem), (qsort_cmp)cmp_func);
}
- typedef int (*CMP_FUNC2)(const Elem *el1, const Elem *el2, void *);
+ typedef int (*CMP_FUNC2)(void *, const Elem *el1, const Elem *el2);
void sort(CMP_FUNC2 cmp_func, void *data)
{
my_qsort2(array.buffer, array.elements, sizeof(Elem), (qsort2_cmp)cmp_func, data);
diff --git a/sql/sql_const.h b/sql/sql_const.h
index c37d8dd68f7..c0b343c6ca4 100644
--- a/sql/sql_const.h
+++ b/sql/sql_const.h
@@ -33,7 +33,17 @@
#define MAX_SYS_VAR_LENGTH 32
#define MAX_KEY MAX_INDEXES /* Max used keys */
#define MAX_REF_PARTS 32 /* Max parts used as ref */
-#define MAX_KEY_LENGTH 3072 /* max possible key */
+
+/*
+ Maximum length of the data part of an index lookup key.
+
+ The "data part" is defined as the value itself, not including the
+ NULL-indicator bytes or varchar length bytes ("the Extras"). We need this
+ value because there was a bug where length of the Extras were not counted.
+
+ You probably need MAX_KEY_LENGTH, not this constant.
+*/
+#define MAX_DATA_LENGTH_FOR_KEY 3072
#if SIZEOF_OFF_T > 4
#define MAX_REFLENGTH 8 /* Max length for record ref */
#else
diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc
index 21fbda29244..5df090cc3df 100644
--- a/sql/sql_lex.cc
+++ b/sql/sql_lex.cc
@@ -3562,6 +3562,8 @@ bool st_select_lex::optimize_unflattened_subqueries(bool const_only)
inner_join->select_options|= SELECT_DESCRIBE;
}
res= inner_join->optimize();
+ if (!inner_join->cleaned)
+ sl->update_used_tables();
sl->update_correlated_cache();
is_correlated_unit|= sl->is_correlated;
inner_join->select_options= save_options;
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index 7e3590bcc83..755c77ebd03 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -1168,6 +1168,7 @@ static bool deny_updates_if_read_only_option(THD *thd, TABLE_LIST *all_tables)
DBUG_RETURN(FALSE);
if (lex->sql_command == SQLCOM_CREATE_DB ||
+ lex->sql_command == SQLCOM_ALTER_DB ||
lex->sql_command == SQLCOM_DROP_DB)
DBUG_RETURN(TRUE);
@@ -4204,6 +4205,9 @@ end_with_restore_list:
my_message(ER_SLAVE_IGNORED_TABLE, ER(ER_SLAVE_IGNORED_TABLE), MYF(0));
break;
}
+ if (slave_ddl_exec_mode_options == SLAVE_EXEC_MODE_IDEMPOTENT &&
+ !(lex->create_info.options & HA_LEX_CREATE_IF_NOT_EXISTS))
+ create_info.options|= HA_LEX_CREATE_IF_NOT_EXISTS;
}
#endif
if (check_access(thd, CREATE_ACL, lex->name.str, NULL, NULL, 1, 0))
@@ -4236,6 +4240,9 @@ end_with_restore_list:
my_message(ER_SLAVE_IGNORED_TABLE, ER(ER_SLAVE_IGNORED_TABLE), MYF(0));
break;
}
+ if (!thd->slave_expected_error &&
+ slave_ddl_exec_mode_options == SLAVE_EXEC_MODE_IDEMPOTENT)
+ lex->check_exists= 1;
}
#endif
if (check_access(thd, DROP_ACL, lex->name.str, NULL, NULL, 1, 0))
diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc
index f1bbe58a8b8..9e03e54ea26 100644
--- a/sql/sql_repl.cc
+++ b/sql/sql_repl.cc
@@ -1,5 +1,5 @@
-/* Copyright (c) 2000, 2017, Oracle and/or its affiliates.
- Copyright (c) 2008, 2017, MariaDB Corporation
+/* Copyright (c) 2000, 2018, Oracle and/or its affiliates.
+ Copyright (c) 2008, 2019, MariaDB Corporation
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -481,7 +481,7 @@ bool log_in_use(const char* log_name)
if ((linfo = tmp->current_linfo))
{
mysql_mutex_lock(&linfo->lock);
- result = !memcmp(log_name, linfo->log_file_name, log_name_len);
+ result = !strncmp(log_name, linfo->log_file_name, log_name_len);
mysql_mutex_unlock(&linfo->lock);
if (result)
break;
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index ca9a6a46fda..6fafbbb11df 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -18883,6 +18883,10 @@ test_if_quick_select(JOIN_TAB *tab)
delete tab->select->quick;
tab->select->quick=0;
+
+ if (tab->table->file->inited != handler::NONE)
+ tab->table->file->ha_index_or_rnd_end();
+
return tab->select->test_quick_select(tab->join->thd, tab->keys,
(table_map) 0, HA_POS_ERROR, 0,
FALSE);
diff --git a/sql/sql_statistics.cc b/sql/sql_statistics.cc
index cb75a5c2176..a2e6621055d 100644
--- a/sql/sql_statistics.cc
+++ b/sql/sql_statistics.cc
@@ -30,6 +30,7 @@
#include "opt_range.h"
#include "my_atomic.h"
#include "sql_show.h"
+#include "sql_partition.h"
/*
The system variable 'use_stat_tables' can take one of the
@@ -3589,6 +3590,22 @@ void set_statistics_for_table(THD *thd, TABLE *table)
(use_stat_table_mode <= COMPLEMENTARY ||
!table->stats_is_read || read_stats->cardinality_is_null) ?
table->file->stats.records : read_stats->cardinality;
+
+ /*
+ For partitioned table, EITS statistics is based on data from all partitions.
+
+ On the other hand, Partition Pruning figures which partitions will be
+ accessed and then computes the estimate of rows in used_partitions.
+
+ Use the estimate from Partition Pruning as it is typically more precise.
+ Ideally, EITS should provide per-partition statistics but this is not
+ implemented currently.
+ */
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+ if (table->part_info)
+ table->used_stat_records= table->file->stats.records;
+#endif
+
KEY *key_info, *key_info_end;
for (key_info= table->key_info, key_info_end= key_info+table->s->keys;
key_info < key_info_end; key_info++)
@@ -3904,3 +3921,29 @@ bool is_stat_table(const char *db, const char *table)
}
return false;
}
+
+/*
+ Check wheter we can use EITS statistics for a field or not
+
+ TRUE : Use EITS for the columns
+ FALSE: Otherwise
+*/
+
+bool is_eits_usable(Field *field)
+{
+ /*
+ (1): checks if we have EITS statistics for a particular column
+ (2): Don't use EITS for GEOMETRY columns
+ (3): Disabling reading EITS statistics for columns involved in the
+ partition list of a table. We assume the selecticivity for
+ such columns would be handled during partition pruning.
+ */
+ Column_statistics* col_stats= field->read_stats;
+ return col_stats && !col_stats->no_stat_values_provided() && //(1)
+ field->type() != MYSQL_TYPE_GEOMETRY && //(2)
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+ (!field->table->part_info ||
+ !field->table->part_info->field_in_partition_expr(field)) && //(3)
+#endif
+ true;
+}
diff --git a/sql/sql_statistics.h b/sql/sql_statistics.h
index f28d56e4a69..a891bef3164 100644
--- a/sql/sql_statistics.h
+++ b/sql/sql_statistics.h
@@ -109,6 +109,7 @@ double get_column_range_cardinality(Field *field,
key_range *max_endp,
uint range_flag);
bool is_stat_table(const char *db, const char *table);
+bool is_eits_usable(Field* field);
class Histogram
{
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index dc4c20f6698..47ff914b08d 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -5766,7 +5766,8 @@ static bool is_candidate_key(KEY *key)
KEY_PART_INFO *key_part;
KEY_PART_INFO *key_part_end= key->key_part + key->user_defined_key_parts;
- if (!(key->flags & HA_NOSAME) || (key->flags & HA_NULL_PART_KEY))
+ if (!(key->flags & HA_NOSAME) || (key->flags & HA_NULL_PART_KEY) ||
+ (key->flags & HA_KEY_HAS_PART_KEY_SEG))
return false;
for (key_part= key->key_part; key_part < key_part_end; key_part++)
@@ -6232,9 +6233,7 @@ static int compare_uint(const uint *s, const uint *t)
@retval false success
*/
-static bool fill_alter_inplace_info(THD *thd,
- TABLE *table,
- bool varchar,
+static bool fill_alter_inplace_info(THD *thd, TABLE *table, bool varchar,
Alter_inplace_info *ha_alter_info)
{
Field **f_ptr, *field;
@@ -6242,7 +6241,6 @@ static bool fill_alter_inplace_info(THD *thd,
Create_field *new_field;
KEY_PART_INFO *key_part, *new_part;
KEY_PART_INFO *end;
- uint candidate_key_count= 0;
Alter_info *alter_info= ha_alter_info->alter_info;
DBUG_ENTER("fill_alter_inplace_info");
@@ -6514,6 +6512,17 @@ static bool fill_alter_inplace_info(THD *thd,
KEY *new_key;
KEY *new_key_end=
ha_alter_info->key_info_buffer + ha_alter_info->key_count;
+ /*
+ Primary key index for the new table
+ */
+ const KEY* const new_pk= (ha_alter_info->key_count > 0 &&
+ (!my_strcasecmp(system_charset_info,
+ ha_alter_info->key_info_buffer->name,
+ primary_key_name) ||
+ is_candidate_key(ha_alter_info->key_info_buffer))) ?
+ ha_alter_info->key_info_buffer : NULL;
+ const KEY *const old_pk= table->s->primary_key == MAX_KEY ? NULL :
+ table->key_info + table->s->primary_key;
DBUG_PRINT("info", ("index count old: %d new: %d",
table->s->keys, ha_alter_info->key_count));
@@ -6588,6 +6597,16 @@ static bool fill_alter_inplace_info(THD *thd,
new_field->field->field_index != key_part->fieldnr - 1)
goto index_changed;
}
+
+ /*
+ Rebuild the index if following condition get satisfied:
+
+ (i) Old table doesn't have primary key, new table has it and vice-versa
+ (ii) Primary key changed to another existing index
+ */
+ if ((new_key == new_pk) != (table_key == old_pk))
+ goto index_changed;
+
continue;
index_changed:
@@ -6639,22 +6658,6 @@ static bool fill_alter_inplace_info(THD *thd,
/* Now let us calculate flags for storage engine API. */
- /* Count all existing candidate keys. */
- for (table_key= table->key_info; table_key < table_key_end; table_key++)
- {
- /*
- Check if key is a candidate key, This key is either already primary key
- or could be promoted to primary key if the original primary key is
- dropped.
- In MySQL one is allowed to create primary key with partial fields (i.e.
- primary key which is not considered candidate). For simplicity we count
- such key as a candidate key here.
- */
- if (((uint) (table_key - table->key_info) == table->s->primary_key) ||
- is_candidate_key(table_key))
- candidate_key_count++;
- }
-
/* Figure out what kind of indexes we are dropping. */
KEY **dropped_key;
KEY **dropped_key_end= ha_alter_info->index_drop_buffer +
@@ -6667,21 +6670,10 @@ static bool fill_alter_inplace_info(THD *thd,
if (table_key->flags & HA_NOSAME)
{
- /*
- Unique key. Check for PRIMARY KEY. Also see comment about primary
- and candidate keys above.
- */
- if ((uint) (table_key - table->key_info) == table->s->primary_key)
- {
+ if (table_key == old_pk)
ha_alter_info->handler_flags|= Alter_inplace_info::DROP_PK_INDEX;
- candidate_key_count--;
- }
else
- {
ha_alter_info->handler_flags|= Alter_inplace_info::DROP_UNIQUE_INDEX;
- if (is_candidate_key(table_key))
- candidate_key_count--;
- }
}
else
ha_alter_info->handler_flags|= Alter_inplace_info::DROP_INDEX;
@@ -6694,23 +6686,10 @@ static bool fill_alter_inplace_info(THD *thd,
if (new_key->flags & HA_NOSAME)
{
- bool is_pk= !my_strcasecmp(system_charset_info, new_key->name, primary_key_name);
-
- if ((!(new_key->flags & HA_KEY_HAS_PART_KEY_SEG) &&
- !(new_key->flags & HA_NULL_PART_KEY)) ||
- is_pk)
- {
- /* Candidate key or primary key! */
- if (candidate_key_count == 0 || is_pk)
- ha_alter_info->handler_flags|= Alter_inplace_info::ADD_PK_INDEX;
- else
- ha_alter_info->handler_flags|= Alter_inplace_info::ADD_UNIQUE_INDEX;
- candidate_key_count++;
- }
+ if (new_key == new_pk)
+ ha_alter_info->handler_flags|= Alter_inplace_info::ADD_PK_INDEX;
else
- {
ha_alter_info->handler_flags|= Alter_inplace_info::ADD_UNIQUE_INDEX;
- }
}
else
ha_alter_info->handler_flags|= Alter_inplace_info::ADD_INDEX;
@@ -9996,7 +9975,10 @@ bool mysql_checksum_table(THD *thd, TABLE_LIST *tables,
{
/* calculating table's checksum */
ha_checksum crc= 0;
- uchar null_mask=256 - (1 << t->s->last_null_bit_pos);
+ DBUG_ASSERT(t->s->last_null_bit_pos < 8);
+ uchar null_mask= (t->s->last_null_bit_pos ?
+ (256 - (1 << t->s->last_null_bit_pos)):
+ 0);
t->use_all_columns();
diff --git a/sql/sql_type_int.h b/sql/sql_type_int.h
index 1eda5651df5..7433bd5249f 100644
--- a/sql/sql_type_int.h
+++ b/sql/sql_type_int.h
@@ -24,12 +24,25 @@ class Longlong_hybrid
protected:
longlong m_value;
bool m_unsigned;
+ int cmp_signed(const Longlong_hybrid& other) const
+ {
+ return m_value < other.m_value ? -1 : m_value == other.m_value ? 0 : 1;
+ }
+ int cmp_unsigned(const Longlong_hybrid& other) const
+ {
+ return (ulonglong) m_value < (ulonglong) other.m_value ? -1 :
+ m_value == other.m_value ? 0 : 1;
+ }
public:
Longlong_hybrid(longlong nr, bool unsigned_flag)
:m_value(nr), m_unsigned(unsigned_flag)
{ }
longlong value() const { return m_value; }
bool is_unsigned() const { return m_unsigned; }
+ bool is_unsigned_outside_of_signed_range() const
+ {
+ return m_unsigned && ((ulonglong) m_value) > (ulonglong) LONGLONG_MAX;
+ }
bool neg() const { return m_value < 0 && !m_unsigned; }
ulonglong abs() const
{
@@ -39,6 +52,21 @@ public:
return ((ulonglong) LONGLONG_MAX) + 1;
return m_value < 0 ? -m_value : m_value;
}
+ int cmp(const Longlong_hybrid& other) const
+ {
+ if (m_unsigned == other.m_unsigned)
+ return m_unsigned ? cmp_unsigned(other) : cmp_signed(other);
+ if (is_unsigned_outside_of_signed_range())
+ return 1;
+ if (other.is_unsigned_outside_of_signed_range())
+ return -1;
+ /*
+ The unsigned argument is in the range 0..LONGLONG_MAX.
+ The signed argument is in the range LONGLONG_MIN..LONGLONG_MAX.
+ Safe to compare as signed.
+ */
+ return cmp_signed(other);
+ }
};
#endif // SQL_TYPE_INT_INCLUDED
diff --git a/sql/sql_update.cc b/sql/sql_update.cc
index b48f42056f0..95fb4ce3281 100644
--- a/sql/sql_update.cc
+++ b/sql/sql_update.cc
@@ -2317,7 +2317,7 @@ int multi_update::do_updates()
check_opt_it.rewind();
while(TABLE *tbl= check_opt_it++)
{
- if ((local_error= tbl->file->ha_rnd_init(1)))
+ if ((local_error= tbl->file->ha_rnd_init(0)))
{
err_table= tbl;
goto err;
diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
index 919e061fc85..5dd0e7dbcda 100644
--- a/sql/sql_yacc.yy
+++ b/sql/sql_yacc.yy
@@ -13080,6 +13080,7 @@ load:
lex->field_list.empty();
lex->update_list.empty();
lex->value_list.empty();
+ lex->many_values.empty();
}
opt_load_data_charset
{ Lex->exchange->cs= $15; }
@@ -15781,19 +15782,21 @@ subselect_end:
lex->current_select = lex->current_select->return_after_parsing();
lex->nest_level--;
lex->current_select->n_child_sum_items += child->n_sum_items;
- /*
- A subselect can add fields to an outer select. Reserve space for
- them.
- */
- lex->current_select->select_n_where_fields+=
- child->select_n_where_fields;
/*
- Aggregate functions in having clause may add fields to an outer
- select. Count them also.
+ A subquery (and all the subsequent query blocks in a UNION) can
+ add columns to an outer query block. Reserve space for them.
+ Aggregate functions in having clause can also add fields to an
+ outer select.
*/
- lex->current_select->select_n_having_items+=
- child->select_n_having_items;
+ for (SELECT_LEX *temp= child->master_unit()->first_select();
+ temp != NULL; temp= temp->next_select())
+ {
+ lex->current_select->select_n_where_fields+=
+ temp->select_n_where_fields;
+ lex->current_select->select_n_having_items+=
+ temp->select_n_having_items;
+ }
}
;
diff --git a/sql/sys_vars.cc b/sql/sys_vars.cc
index 75b1f809d73..558ff709918 100644
--- a/sql/sys_vars.cc
+++ b/sql/sys_vars.cc
@@ -3060,11 +3060,15 @@ static Sys_var_charptr Sys_system_time_zone(
NO_CMD_LINE,
IN_SYSTEM_CHARSET, DEFAULT(system_time_zone));
+/*
+ If One use views with prepared statements this should be bigger than
+ table_open_cache (now we allow 2 times bigger value)
+*/
static Sys_var_ulong Sys_table_def_size(
"table_definition_cache",
"The number of cached table definitions",
GLOBAL_VAR(tdc_size), CMD_LINE(REQUIRED_ARG),
- VALID_RANGE(TABLE_DEF_CACHE_MIN, 512*1024),
+ VALID_RANGE(TABLE_DEF_CACHE_MIN, 2*1024*1024),
DEFAULT(TABLE_DEF_CACHE_DEFAULT), BLOCK_SIZE(1));
@@ -3076,7 +3080,7 @@ static bool fix_table_open_cache(sys_var *, THD *, enum_var_type)
return false;
}
-
+/* Check the table_definition_cache comment if makes changes */
static Sys_var_ulong Sys_table_cache_size(
"table_open_cache", "The number of cached open tables",
GLOBAL_VAR(tc_size), CMD_LINE(REQUIRED_ARG),
diff --git a/sql/table.cc b/sql/table.cc
index 15c7617eccc..ca4d58b0941 100644
--- a/sql/table.cc
+++ b/sql/table.cc
@@ -1693,6 +1693,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
keyinfo= share->key_info;
uint primary_key= my_strcasecmp(system_charset_info, share->keynames.type_names[0],
primary_key_name) ? MAX_KEY : 0;
+ KEY* key_first_info= NULL;
if (primary_key >= MAX_KEY && keyinfo->flags & HA_NOSAME)
{
@@ -1772,34 +1773,71 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
keyinfo->name_length+1);
}
+ if (!key)
+ key_first_info= keyinfo;
+
if (ext_key_parts > share->key_parts && key)
{
KEY_PART_INFO *new_key_part= (keyinfo-1)->key_part +
(keyinfo-1)->ext_key_parts;
+ uint add_keyparts_for_this_key= add_first_key_parts;
+ uint length_bytes= 0, len_null_byte= 0, ext_key_length= 0;
+ Field *field;
/*
Do not extend the key that contains a component
defined over the beginning of a field.
*/
for (i= 0; i < keyinfo->user_defined_key_parts; i++)
- {
+ {
uint fieldnr= keyinfo->key_part[i].fieldnr;
+ field= share->field[keyinfo->key_part[i].fieldnr-1];
+
+ if (field->null_ptr)
+ len_null_byte= HA_KEY_NULL_LENGTH;
+
+ if (field->type() == MYSQL_TYPE_BLOB ||
+ field->real_type() == MYSQL_TYPE_VARCHAR ||
+ field->type() == MYSQL_TYPE_GEOMETRY)
+ {
+ length_bytes= HA_KEY_BLOB_LENGTH;
+ }
+ ext_key_length+= keyinfo->key_part[i].length + len_null_byte
+ + length_bytes;
if (share->field[fieldnr-1]->key_length() !=
keyinfo->key_part[i].length)
{
- add_first_key_parts= 0;
+ add_keyparts_for_this_key= 0;
break;
}
}
- if (add_first_key_parts < keyinfo->ext_key_parts-keyinfo->user_defined_key_parts)
- {
+ if (add_keyparts_for_this_key)
+ {
+ for (i= 0; i < add_keyparts_for_this_key; i++)
+ {
+ uint pk_part_length= key_first_info->key_part[i].store_length;
+ if (keyinfo->ext_key_part_map & 1<<i)
+ {
+ if (ext_key_length + pk_part_length > MAX_DATA_LENGTH_FOR_KEY)
+ {
+ add_keyparts_for_this_key= i;
+ break;
+ }
+ ext_key_length+= pk_part_length;
+ }
+ }
+ }
+
+ if (add_keyparts_for_this_key < keyinfo->ext_key_parts -
+ keyinfo->user_defined_key_parts)
+ {
share->ext_key_parts-= keyinfo->ext_key_parts;
key_part_map ext_key_part_map= keyinfo->ext_key_part_map;
keyinfo->ext_key_parts= keyinfo->user_defined_key_parts;
keyinfo->ext_key_flags= keyinfo->flags;
keyinfo->ext_key_part_map= 0;
- for (i= 0; i < add_first_key_parts; i++)
+ for (i= 0; i < add_keyparts_for_this_key; i++)
{
if (ext_key_part_map & 1<<i)
{
diff --git a/sql/table.h b/sql/table.h
index 4a1552f8c0d..10c1d1bc68e 100644
--- a/sql/table.h
+++ b/sql/table.h
@@ -1816,7 +1816,7 @@ struct TABLE_LIST
/* Index names in a "... JOIN ... USE/IGNORE INDEX ..." clause. */
List<Index_hint> *index_hints;
TABLE *table; /* opened table */
- uint table_id; /* table id (from binlog) for opened table */
+ ulonglong table_id; /* table id (from binlog) for opened table */
/*
select_result for derived table to pass it from table creation to table
filling procedure
diff --git a/sql/table_cache.cc b/sql/table_cache.cc
index f13d7183a99..1154017d8d5 100644
--- a/sql/table_cache.cc
+++ b/sql/table_cache.cc
@@ -1206,6 +1206,9 @@ void tdc_assign_new_table_id(TABLE_SHARE *share)
DBUG_ASSERT(share);
DBUG_ASSERT(tdc_inited);
+ DBUG_EXECUTE_IF("simulate_big_table_id",
+ if (last_table_id < UINT_MAX32)
+ last_table_id= UINT_MAX32 - 1;);
/*
There is one reserved number that cannot be used. Remember to
change this when 6-byte global table id's are introduced.
@@ -1215,7 +1218,7 @@ void tdc_assign_new_table_id(TABLE_SHARE *share)
my_atomic_rwlock_wrlock(&LOCK_tdc_atomics);
tid= my_atomic_add64(&last_table_id, 1);
my_atomic_rwlock_wrunlock(&LOCK_tdc_atomics);
- } while (unlikely(tid == ~0UL));
+ } while (unlikely(tid == ~0UL || tid == 0));
share->table_map_id= tid;
DBUG_PRINT("info", ("table_id= %lu", share->table_map_id));
diff --git a/sql/unireg.h b/sql/unireg.h
index b13dd494c74..f32a2fdfe12 100644
--- a/sql/unireg.h
+++ b/sql/unireg.h
@@ -202,7 +202,7 @@ LEX_CUSTRING build_frm_image(THD *thd, const char *table,
#define FRM_HEADER_SIZE 64
#define FRM_FORMINFO_SIZE 288
-#define FRM_MAX_SIZE (512*1024)
+#define FRM_MAX_SIZE (1024*1024)
static inline bool is_binary_frm_header(uchar *head)
{
diff --git a/storage/connect/global.h b/storage/connect/global.h
index 36e8a311124..dc1e149745f 100644
--- a/storage/connect/global.h
+++ b/storage/connect/global.h
@@ -219,11 +219,11 @@ DllExport LPCSTR PlugSetPath(LPSTR to, LPCSTR prefix, LPCSTR name, LPCSTR dir);
DllExport BOOL PlugIsAbsolutePath(LPCSTR path);
DllExport bool AllocSarea(PGLOBAL, uint);
DllExport void FreeSarea(PGLOBAL);
-DllExport BOOL PlugSubSet(PGLOBAL, void *, uint);
+DllExport BOOL PlugSubSet(void *, uint);
+DllExport void *PlugSubAlloc(PGLOBAL, void *, size_t);
DllExport char *PlugDup(PGLOBAL g, const char *str);
DllExport void *MakePtr(void *, OFFSET);
DllExport void htrc(char const *fmt, ...);
-//DllExport int GetTraceValue(void);
DllExport uint GetTraceValue(void);
#if defined(__cplusplus)
@@ -233,6 +233,6 @@ DllExport uint GetTraceValue(void);
/***********************************************************************/
/* Non exported routine declarations. */
/***********************************************************************/
-void *PlugSubAlloc(PGLOBAL, void *, size_t); // Does throw
+//void *PlugSubAlloc(PGLOBAL, void *, size_t); // Does throw
/*-------------------------- End of Global.H --------------------------*/
diff --git a/storage/connect/ha_connect.cc b/storage/connect/ha_connect.cc
index bf890724d5e..1e826f67573 100644
--- a/storage/connect/ha_connect.cc
+++ b/storage/connect/ha_connect.cc
@@ -4191,7 +4191,7 @@ int ha_connect::rnd_pos(uchar *buf, uchar *pos)
rc= rnd_next(buf);
} else {
PGLOBAL g = GetPlug((table) ? table->in_use : NULL, xp);
- strcpy(g->Message, "Not supported by this table type");
+// strcpy(g->Message, "Not supported by this table type");
my_message(ER_ILLEGAL_HA, g->Message, MYF(0));
rc= HA_ERR_INTERNAL_ERROR;
} // endif SetRecpos
@@ -7307,7 +7307,7 @@ maria_declare_plugin(connect)
PLUGIN_LICENSE_GPL,
connect_init_func, /* Plugin Init */
connect_done_func, /* Plugin Deinit */
- 0x0107, /* version number (1.05) */
+ 0x0106, /* version number (1.06) */
NULL, /* status variables */
connect_system_variables, /* system variables */
"1.06.0008", /* string version */
diff --git a/storage/connect/jsonudf.cpp b/storage/connect/jsonudf.cpp
index 26455d572b6..d5a3a840173 100644
--- a/storage/connect/jsonudf.cpp
+++ b/storage/connect/jsonudf.cpp
@@ -3055,7 +3055,7 @@ my_bool json_array_grp_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
PGLOBAL g = (PGLOBAL)initid->ptr;
- PlugSubSet(g, g->Sarea, g->Sarea_Size);
+ PlugSubSet(g->Sarea, g->Sarea_Size);
g->Activityp = (PACTIVITY)JsonNew(g, TYPE_JAR);
g->N = (int)n;
return false;
@@ -3098,7 +3098,7 @@ void json_array_grp_clear(UDF_INIT *initid, char*, char*)
{
PGLOBAL g = (PGLOBAL)initid->ptr;
- PlugSubSet(g, g->Sarea, g->Sarea_Size);
+ PlugSubSet(g->Sarea, g->Sarea_Size);
g->Activityp = (PACTIVITY)JsonNew(g, TYPE_JAR);
g->N = GetJsonGroupSize();
} // end of json_array_grp_clear
@@ -3132,7 +3132,7 @@ my_bool json_object_grp_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
PGLOBAL g = (PGLOBAL)initid->ptr;
- PlugSubSet(g, g->Sarea, g->Sarea_Size);
+ PlugSubSet(g->Sarea, g->Sarea_Size);
g->Activityp = (PACTIVITY)JsonNew(g, TYPE_JOB);
g->N = (int)n;
return false;
@@ -3169,7 +3169,7 @@ void json_object_grp_clear(UDF_INIT *initid, char*, char*)
{
PGLOBAL g = (PGLOBAL)initid->ptr;
- PlugSubSet(g, g->Sarea, g->Sarea_Size);
+ PlugSubSet(g->Sarea, g->Sarea_Size);
g->Activityp = (PACTIVITY)JsonNew(g, TYPE_JOB);
g->N = GetJsonGroupSize();
} // end of json_object_grp_clear
@@ -4418,7 +4418,7 @@ char *json_file(UDF_INIT *initid, UDF_ARGS *args, char *result,
} else if (initid->const_item)
g->N = 1;
- PlugSubSet(g, g->Sarea, g->Sarea_Size);
+ PlugSubSet(g->Sarea, g->Sarea_Size);
fn = MakePSZ(g, args, 0);
if (args->arg_count > 1) {
@@ -5662,7 +5662,7 @@ char *jbin_file(UDF_INIT *initid, UDF_ARGS *args, char *result,
if (bsp && !bsp->Changed)
goto fin;
- PlugSubSet(g, g->Sarea, g->Sarea_Size);
+ PlugSubSet(g->Sarea, g->Sarea_Size);
g->Xchk = NULL;
fn = MakePSZ(g, args, 0);
pretty = (args->arg_count > 2 && args->args[2]) ? (int)*(longlong*)args->args[2] : 3;
diff --git a/storage/connect/mysql-test/connect/r/jdbc_oracle.result b/storage/connect/mysql-test/connect/r/jdbc_oracle.result
index 2e36891a037..ec314c5f072 100644
--- a/storage/connect/mysql-test/connect/r/jdbc_oracle.result
+++ b/storage/connect/mysql-test/connect/r/jdbc_oracle.result
@@ -8,12 +8,19 @@ SELECT * FROM t2 WHERE command = 'drop table employee';
command number message
drop table employee 0 Execute: java.sql.SQLSyntaxErrorException: ORA-00942: table or view does not exist
+Warnings:
+Warning 1105 Execute: java.sql.SQLSyntaxErrorException: ORA-00942: table or view does not exist
+
SELECT * FROM t2 WHERE command = 'create table employee (id int not null, name varchar(32), title char(16), salary number(8,2))';
command number message
create table employee (id int not null, name varchar(32), title char(16), salary number(8,2)) 0 Affected rows
+Warnings:
+Warning 1105 Affected rows
SELECT * FROM t2 WHERE command = "insert into employee values(4567,'Johnson', 'Engineer', 12560.50)";
command number message
insert into employee values(4567,'Johnson', 'Engineer', 12560.50) 1 Affected rows
+Warnings:
+Warning 1105 Affected rows
CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JDBC CATFUNC=tables
CONNECTION='jdbc:oracle:thin:@localhost:1521:xe'
OPTION_LIST='User=system,Password=manager';
@@ -27,8 +34,8 @@ OPTION_LIST='User=system,Password=manager';
SELECT * FROM t1;
Table_Cat Table_Schema Table_Name Column_Name Data_Type Type_Name Column_Size Buffer_Length Decimal_Digits Radix Nullable Remarks
NULL SYSTEM EMPLOYEE ID 3 NUMBER 38 0 0 10 0 NULL
-NULL SYSTEM EMPLOYEE NAME 12 VARCHAR2 32 0 0 10 1 NULL
-NULL SYSTEM EMPLOYEE TITLE 1 CHAR 16 0 0 10 1 NULL
+NULL SYSTEM EMPLOYEE NAME 12 VARCHAR2 32 0 NULL 10 1 NULL
+NULL SYSTEM EMPLOYEE TITLE 1 CHAR 16 0 NULL 10 1 NULL
NULL SYSTEM EMPLOYEE SALARY 3 NUMBER 8 0 2 10 1 NULL
DROP TABLE t1;
CREATE SERVER 'oracle' FOREIGN DATA WRAPPER 'oracle.jdbc.driver.OracleDriver' OPTIONS (
@@ -52,7 +59,7 @@ Note 1105 EMPLOYEE: 1 affected rows
SELECT * FROM t1;
ID NAME TITLE SALARY
4567 Trump Engineer 12560.50
-6214 Clinton Retired 0.00
+6214 Clinton Retired NULL
DELETE FROM t1 WHERE id = 6214;
Warnings:
Note 1105 EMPLOYEE: 1 affected rows
@@ -63,8 +70,7 @@ DROP TABLE t1;
SELECT * FROM t2 WHERE command = 'drop table employee';
command number message
drop table employee 0 Affected rows
+Warnings:
+Warning 1105 Affected rows
DROP TABLE t2;
DROP SERVER 'oracle';
-SET GLOBAL connect_jvm_path=NULL;
-SET GLOBAL connect_class_path=NULL;
-SET GLOBAL time_zone = SYSTEM;
diff --git a/storage/connect/mysql-test/connect/r/jdbc_postgresql.result b/storage/connect/mysql-test/connect/r/jdbc_postgresql.result
index 7969672dd66..bec1dc8725b 100644
--- a/storage/connect/mysql-test/connect/r/jdbc_postgresql.result
+++ b/storage/connect/mysql-test/connect/r/jdbc_postgresql.result
@@ -1,4 +1,4 @@
-SET GLOBAL connect_class_path='C:/MariaDB-10.2/MariaDB/storage/connect/mysql-test/connect/std_data/JavaWrappers.jar;C:/Jconnectors/postgresql-42.2.1.jar';
+SET GLOBAL connect_class_path='C:/MariaDB-10.0/MariaDB/storage/connect/mysql-test/connect/std_data/JavaWrappers.jar;C:/Jconnectors/postgresql-42.2.1.jar';
CREATE TABLE t2 (
command varchar(128) not null,
number int(5) not null flag=1,
@@ -9,12 +9,18 @@ OPTION_LIST='Execsrc=1';
SELECT * FROM t2 WHERE command='drop table employee';
command number message
drop table employee 0 Execute: org.postgresql.util.PSQLException: ERREUR: la table « employee » n'existe pas
+Warnings:
+Warning 1105 Execute: org.postgresql.util.PSQLException: ERREUR: la table « employee » n'existe pas
SELECT * FROM t2 WHERE command = 'create table employee (id int not null, name varchar(32), title char(16), salary decimal(8,2))';
command number message
create table employee (id int not null, name varchar(32), title char(16), salary decimal(8,2)) 0 Affected rows
+Warnings:
+Warning 1105 Affected rows
SELECT * FROM t2 WHERE command = "insert into employee values(4567,'Johnson', 'Engineer', 12560.50)";
command number message
insert into employee values(4567,'Johnson', 'Engineer', 12560.50) 1 Affected rows
+Warnings:
+Warning 1105 Affected rows
CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JDBC CATFUNC=tables
CONNECTION='jdbc:postgresql://localhost/test?user=postgres&password=tinono'
OPTION_LIST='Tabtype=TABLE,Maxres=10';
@@ -63,4 +69,6 @@ DROP SERVER 'postgresql';
SELECT * FROM t2 WHERE command='drop table employee';
command number message
drop table employee 0 Affected rows
+Warnings:
+Warning 1105 Affected rows
DROP TABLE t2;
diff --git a/storage/connect/mysql-test/connect/r/part_table.result b/storage/connect/mysql-test/connect/r/part_table.result
index f3a556ae784..ee17a1d32b9 100644
--- a/storage/connect/mysql-test/connect/r/part_table.result
+++ b/storage/connect/mysql-test/connect/r/part_table.result
@@ -23,7 +23,7 @@ id msg
CREATE TABLE xt3 (
id INT KEY NOT NULL,
msg VARCHAR(32))
-ENGINE=CONNECT TABLE_TYPE=CSV AVG_ROW_LENGTH=10;
+ENGINE=CONNECT TABLE_TYPE=CSV AVG_ROW_LENGTH=6;
Warnings:
Warning 1105 No file name. Table will use xt3.csv
INSERT INTO xt3 VALUES(60,'sixty'),(81,'eighty one'),(72,'seventy two');
@@ -92,7 +92,7 @@ id msg
EXPLAIN PARTITIONS
SELECT * FROM t1 WHERE id = 81;
id select_type table partitions type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 3 ALL NULL NULL NULL NULL 4 Using where
+1 SIMPLE t1 3 ALL NULL NULL NULL NULL 6 Using where
DELETE FROM t1;
Warnings:
Note 1105 xt1: 4 affected rows
diff --git a/storage/connect/mysql-test/connect/t/part_table.test b/storage/connect/mysql-test/connect/t/part_table.test
index 5edd5766bd6..0fb2a11f0f9 100644
--- a/storage/connect/mysql-test/connect/t/part_table.test
+++ b/storage/connect/mysql-test/connect/t/part_table.test
@@ -22,7 +22,7 @@ SELECT * FROM xt2;
CREATE TABLE xt3 (
id INT KEY NOT NULL,
msg VARCHAR(32))
-ENGINE=CONNECT TABLE_TYPE=CSV AVG_ROW_LENGTH=10;
+ENGINE=CONNECT TABLE_TYPE=CSV AVG_ROW_LENGTH=6;
INSERT INTO xt3 VALUES(60,'sixty'),(81,'eighty one'),(72,'seventy two');
SELECT * FROM xt3;
diff --git a/storage/connect/plugutil.cpp b/storage/connect/plugutil.cpp
index 887527e38ab..048f00be75f 100644
--- a/storage/connect/plugutil.cpp
+++ b/storage/connect/plugutil.cpp
@@ -514,27 +514,31 @@ void FreeSarea(PGLOBAL g)
/* Here there should be some verification done such as validity of */
/* the address and size not larger than memory size. */
/***********************************************************************/
-BOOL PlugSubSet(PGLOBAL g __attribute__((unused)), void *memp, uint size)
+BOOL PlugSubSet(void *memp, uint size)
{
PPOOLHEADER pph = (PPOOLHEADER)memp;
pph->To_Free = (OFFSET)sizeof(POOLHEADER);
pph->FreeBlk = size - pph->To_Free;
-
return FALSE;
} /* end of PlugSubSet */
/***********************************************************************/
+/* Use it to export a function that do throwing. */
+/***********************************************************************/
+void *DoThrow(int n)
+{
+ throw n;
+} /* end of DoThrow */
+
+/***********************************************************************/
/* Program for sub-allocating one item in a storage area. */
-/* Note: SubAlloc routines of OS/2 are no more used to increase the */
-/* code portability and avoid problems when a grammar compiled under */
-/* one version of OS/2 is used under another version. */
-/* The simple way things are done here is also based on the fact */
-/* that no freeing of suballocated blocks is permitted in Plug. */
+/* The simple way things are done here is based on the fact */
+/* that no freeing of suballocated blocks is permitted in CONNECT. */
/***********************************************************************/
void *PlugSubAlloc(PGLOBAL g, void *memp, size_t size)
- {
- PPOOLHEADER pph; /* Points on area header. */
+{
+ PPOOLHEADER pph; /* Points on area header. */
if (!memp)
/*******************************************************************/
@@ -559,8 +563,8 @@ void *PlugSubAlloc(PGLOBAL g, void *memp, size_t size)
if (trace(1))
htrc("PlugSubAlloc: %s\n", g->Message);
- throw 1234;
- } /* endif size OS32 code */
+ DoThrow(1234);
+ } /* endif size OS32 code */
/*********************************************************************/
/* Do the suballocation the simplest way. */
@@ -574,7 +578,7 @@ void *PlugSubAlloc(PGLOBAL g, void *memp, size_t size)
memp, pph->To_Free, pph->FreeBlk);
return (memp);
- } /* end of PlugSubAlloc */
+} /* end of PlugSubAlloc */
/***********************************************************************/
/* Program for sub-allocating and copying a string in a storage area. */
diff --git a/storage/connect/reldef.cpp b/storage/connect/reldef.cpp
index e4f169575f8..30d8063d1a6 100644
--- a/storage/connect/reldef.cpp
+++ b/storage/connect/reldef.cpp
@@ -522,8 +522,15 @@ PTABDEF OEMDEF::GetXdef(PGLOBAL g)
// Get the function returning an instance of the external DEF class
if (!(getdef = (XGETDEF)GetProcAddress((HINSTANCE)Hdll, getname))) {
- sprintf(g->Message, MSG(PROCADD_ERROR), GetLastError(), getname);
- FreeLibrary((HMODULE)Hdll);
+ char buf[256];
+ DWORD rc = GetLastError();
+
+ sprintf(g->Message, MSG(PROCADD_ERROR), rc, getname);
+ FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM |
+ FORMAT_MESSAGE_IGNORE_INSERTS, NULL, rc, 0,
+ (LPTSTR)buf, sizeof(buf), NULL);
+ strcat(strcat(g->Message, ": "), buf);
+ FreeLibrary((HMODULE)Hdll);
return NULL;
} // endif getdef
#else // !__WIN__
diff --git a/storage/connect/tabfmt.h b/storage/connect/tabfmt.h
index 396bba568ff..10f0757c60b 100644
--- a/storage/connect/tabfmt.h
+++ b/storage/connect/tabfmt.h
@@ -13,7 +13,7 @@ typedef class TDBFMT *PTDBFMT;
/***********************************************************************/
/* Functions used externally. */
/***********************************************************************/
-PQRYRES CSVColumns(PGLOBAL g, PCSZ dp, PTOS topt, bool info);
+DllExport PQRYRES CSVColumns(PGLOBAL g, PCSZ dp, PTOS topt, bool info);
/***********************************************************************/
/* CSV table. */
diff --git a/storage/connect/tabjson.cpp b/storage/connect/tabjson.cpp
index 9e4f5ab987d..c0d36efcf42 100644
--- a/storage/connect/tabjson.cpp
+++ b/storage/connect/tabjson.cpp
@@ -1,6 +1,6 @@
/************* tabjson C++ Program Source Code File (.CPP) *************/
-/* PROGRAM NAME: tabjson Version 1.5 */
-/* (C) Copyright to the author Olivier BERTRAND 2014 - 2017 */
+/* PROGRAM NAME: tabjson Version 1.6 */
+/* (C) Copyright to the author Olivier BERTRAND 2014 - 2018 */
/* This program are the JSON class DB execution routines. */
/***********************************************************************/
@@ -173,6 +173,7 @@ JSONDISC::JSONDISC(PGLOBAL g, uint *lg)
int JSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt)
{
+ char filename[_MAX_PATH];
bool mgo = (GetTypeID(topt->type) == TAB_MONGO);
PCSZ level = GetStringTableOption(g, topt, "Level", NULL);
@@ -209,6 +210,12 @@ int JSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt)
return 0;
} // endif Fn
+ if (tdp->Fn) {
+ // We used the file name relative to recorded datapath
+ PlugSetPath(filename, tdp->Fn, tdp->GetPath());
+ tdp->Fn = PlugDup(g, filename);
+ } // endif Fn
+
if (trace(1))
htrc("File %s objname=%s pretty=%d lvl=%d\n",
tdp->Fn, tdp->Objname, tdp->Pretty, lvl);
@@ -299,7 +306,7 @@ int JSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt)
memset(G, 0, sizeof(GLOBAL));
G->Sarea_Size = tdp->Lrecl * 10;
G->Sarea = PlugSubAlloc(g, NULL, G->Sarea_Size);
- PlugSubSet(G, G->Sarea, G->Sarea_Size);
+ PlugSubSet(G->Sarea, G->Sarea_Size);
G->jump_level = 0;
tjnp->SetG(G);
@@ -342,7 +349,7 @@ int JSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt)
strncpy(colname, jpp->GetKey(), 64);
fmt[bf] = 0;
- if (Find(g, jpp->GetVal(), MY_MIN(lvl, 0)))
+ if (Find(g, jpp->GetVal(), colname, MY_MIN(lvl, 0)))
goto err;
} // endfor jpp
@@ -385,7 +392,7 @@ err:
return 0;
} // end of GetColumns
-bool JSONDISC::Find(PGLOBAL g, PJVAL jvp, int j)
+bool JSONDISC::Find(PGLOBAL g, PJVAL jvp, PCSZ key, int j)
{
char *p, *pc = colname + strlen(colname);
int ars;
@@ -413,12 +420,14 @@ bool JSONDISC::Find(PGLOBAL g, PJVAL jvp, int j)
job = (PJOB)jsp;
for (PJPR jrp = job->GetFirst(); jrp; jrp = jrp->GetNext()) {
- if (*jrp->GetKey() != '$') {
- strncat(strncat(fmt, sep, 128), jrp->GetKey(), 128);
- strncat(strncat(colname, "_", 64), jrp->GetKey(), 64);
+ PCSZ k = jrp->GetKey();
+
+ if (*k != '$') {
+ strncat(strncat(fmt, sep, 128), k, 128);
+ strncat(strncat(colname, "_", 64), k, 64);
} // endif Key
- if (Find(g, jrp->GetVal(), j + 1))
+ if (Find(g, jrp->GetVal(), k, j + 1))
return true;
*p = *pc = 0;
@@ -428,13 +437,13 @@ bool JSONDISC::Find(PGLOBAL g, PJVAL jvp, int j)
case TYPE_JAR:
jar = (PJAR)jsp;
- if (all || (tdp->Xcol && !stricmp(tdp->Xcol, colname)))
+ if (all || (tdp->Xcol && !stricmp(tdp->Xcol, key)))
ars = jar->GetSize(false);
else
ars = MY_MIN(jar->GetSize(false), 1);
for (int k = 0; k < ars; k++) {
- if (!tdp->Xcol || stricmp(tdp->Xcol, colname)) {
+ if (!tdp->Xcol || stricmp(tdp->Xcol, key)) {
sprintf(buf, "%d", k);
if (tdp->Uri)
@@ -448,7 +457,7 @@ bool JSONDISC::Find(PGLOBAL g, PJVAL jvp, int j)
} else
strncat(fmt, (tdp->Uri ? sep : "[*]"), 128);
- if (Find(g, jar->GetValue(k), j))
+ if (Find(g, jar->GetValue(k), "", j))
return true;
*p = *pc = 0;
@@ -522,7 +531,9 @@ void JSONDISC::AddColumn(PGLOBAL g)
n++;
} // endif jcp
- pjcp = jcp;
+ if (jcp)
+ pjcp = jcp;
+
} // end of AddColumn
@@ -549,7 +560,7 @@ JSONDEF::JSONDEF(void)
/***********************************************************************/
/* DefineAM: define specific AM block values. */
/***********************************************************************/
-bool JSONDEF::DefineAM(PGLOBAL g, LPCSTR, int poff)
+bool JSONDEF::DefineAM(PGLOBAL g, LPCSTR am, int poff)
{
Schema = GetStringCatInfo(g, "DBname", Schema);
Jmode = (JMODE)GetIntCatInfo("Jmode", MODE_OBJECT);
@@ -561,7 +572,8 @@ bool JSONDEF::DefineAM(PGLOBAL g, LPCSTR, int poff)
Sep = *GetStringCatInfo(g, "Separator", ".");
Accept = GetBoolCatInfo("Accept", false);
- if (Uri = GetStringCatInfo(g, "Connect", NULL)) {
+ // Don't use url as uri when called from REST OEM module
+ if (stricmp(am, "REST") && (Uri = GetStringCatInfo(g, "Connect", NULL))) {
#if defined(JAVA_SUPPORT) || defined(CMGO_SUPPORT)
Collname = GetStringCatInfo(g, "Name",
(Catfunc & (FNC_TABLE | FNC_COL)) ? NULL : Name);
@@ -670,7 +682,7 @@ PTDB JSONDEF::GetTable(PGLOBAL g, MODE m)
memset(G, 0, sizeof(GLOBAL));
G->Sarea_Size = Lrecl * 10;
G->Sarea = PlugSubAlloc(g, NULL, G->Sarea_Size);
- PlugSubSet(G, G->Sarea, G->Sarea_Size);
+ PlugSubSet(G->Sarea, G->Sarea_Size);
G->jump_level = 0;
((TDBJSN*)tdbp)->G = G;
} else {
@@ -963,7 +975,7 @@ int TDBJSN::ReadDB(PGLOBAL g)
return rc;
// Recover the memory used for parsing
- PlugSubSet(G, G->Sarea, G->Sarea_Size);
+ PlugSubSet(G->Sarea, G->Sarea_Size);
if ((Row = ParseJson(G, To_Line, strlen(To_Line), &Pretty, &Comma))) {
Row = FindRow(g);
@@ -1079,13 +1091,13 @@ int TDBJSN::MakeTopTree(PGLOBAL g, PJSON jsp)
} // end of PrepareWriting
/***********************************************************************/
-/* WriteDB: Data Base write routine for DOS access method. */
+/* WriteDB: Data Base write routine for JSON access method. */
/***********************************************************************/
int TDBJSN::WriteDB(PGLOBAL g)
{
int rc = TDBDOS::WriteDB(g);
- PlugSubSet(G, G->Sarea, G->Sarea_Size);
+ PlugSubSet(G->Sarea, G->Sarea_Size);
Row->Clear();
return rc;
} // end of WriteDB
@@ -2340,7 +2352,7 @@ void TDBJSON::CloseDB(PGLOBAL g)
TDBJCL::TDBJCL(PJDEF tdp) : TDBCAT(tdp)
{
Topt = tdp->GetTopt();
- Db = tdp->Schema;
+ Db = tdp->Schema;
Dsn = tdp->Uri;
} // end of TDBJCL constructor
diff --git a/storage/connect/tabjson.h b/storage/connect/tabjson.h
index 2ff72905e86..8721a2a5ab7 100644
--- a/storage/connect/tabjson.h
+++ b/storage/connect/tabjson.h
@@ -1,11 +1,11 @@
/*************** tabjson H Declares Source Code File (.H) **************/
/* Name: tabjson.h Version 1.3 */
/* */
-/* (C) Copyright to the author Olivier BERTRAND 2014 - 2017 */
+/* (C) Copyright to the author Olivier BERTRAND 2014 - 2018 */
/* */
/* This file contains the JSON classes declares. */
/***********************************************************************/
-#include "osutil.h"
+//#include "osutil.h" // Unuseful and bad for OEM
#include "block.h"
#include "colblk.h"
#include "json.h"
@@ -16,7 +16,7 @@ typedef class JSONDEF *PJDEF;
typedef class TDBJSON *PJTDB;
typedef class JSONCOL *PJCOL;
class TDBJSN;
-PQRYRES JSONColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt, bool info);
+DllExport PQRYRES JSONColumns(PGLOBAL, PCSZ, PCSZ, PTOS, bool);
/***********************************************************************/
/* The JSON tree node. Can be an Object or an Array. */
@@ -52,7 +52,7 @@ public:
// Functions
int GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt);
- bool Find(PGLOBAL g, PJVAL jvp, int j);
+ bool Find(PGLOBAL g, PJVAL jvp, PCSZ key, int j);
void AddColumn(PGLOBAL g);
// Members
diff --git a/storage/connect/tabodbc.cpp b/storage/connect/tabodbc.cpp
index fddfb0c0420..0fa117c3d2f 100644
--- a/storage/connect/tabodbc.cpp
+++ b/storage/connect/tabodbc.cpp
@@ -5,7 +5,7 @@
/* */
/* COPYRIGHT: */
/* ---------- */
-/* (C) Copyright to the author Olivier BERTRAND 2000-2017 */
+/* (C) Copyright to the author Olivier BERTRAND 2000-2018 */
/* */
/* WHAT THIS PROGRAM DOES: */
/* ----------------------- */
@@ -95,23 +95,23 @@ bool ExactInfo(void);
/* Constructor. */
/***********************************************************************/
ODBCDEF::ODBCDEF(void)
- {
+{
Connect = NULL;
Catver = 0;
UseCnc = false;
- } // end of ODBCDEF constructor
+} // end of ODBCDEF constructor
/***********************************************************************/
/* DefineAM: define specific AM block values from XDB file. */
/***********************************************************************/
bool ODBCDEF::DefineAM(PGLOBAL g, LPCSTR am, int poff)
- {
+{
Desc = Connect = GetStringCatInfo(g, "Connect", NULL);
if (!Connect && !Catfunc) {
sprintf(g->Message, "Missing connection for ODBC table %s", Name);
return true;
- } // endif Connect
+ } // endif Connect
if (EXTDEF::DefineAM(g, am, poff))
return true;
@@ -123,13 +123,13 @@ bool ODBCDEF::DefineAM(PGLOBAL g, LPCSTR am, int poff)
Qto= GetIntCatInfo("QueryTimeout", DEFAULT_QUERY_TIMEOUT);
UseCnc = GetBoolCatInfo("UseDSN", false);
return false;
- } // end of DefineAM
+} // end of DefineAM
/***********************************************************************/
/* GetTable: makes a new Table Description Block. */
/***********************************************************************/
PTDB ODBCDEF::GetTable(PGLOBAL g, MODE m)
- {
+{
PTDB tdbp = NULL;
/*********************************************************************/
@@ -158,10 +158,10 @@ PTDB ODBCDEF::GetTable(PGLOBAL g, MODE m)
tdbp = new(g) TDBMUL(tdbp);
else if (Multiple == 2)
strcpy(g->Message, MSG(NO_ODBC_MUL));
- } // endswitch Catfunc
+ } // endswitch Catfunc
return tdbp;
- } // end of GetTable
+} // end of GetTable
/* -------------------------- Class TDBODBC -------------------------- */
@@ -169,7 +169,7 @@ PTDB ODBCDEF::GetTable(PGLOBAL g, MODE m)
/* Implementation of the TDBODBC class. */
/***********************************************************************/
TDBODBC::TDBODBC(PODEF tdp) : TDBEXT(tdp)
- {
+{
Ocp = NULL;
Cnp = NULL;
@@ -191,19 +191,19 @@ TDBODBC::TDBODBC(PODEF tdp) : TDBEXT(tdp)
Ops.UseCnc = false;
} // endif tdp
- } // end of TDBODBC standard constructor
+} // end of TDBODBC standard constructor
TDBODBC::TDBODBC(PTDBODBC tdbp) : TDBEXT(tdbp)
- {
+{
Ocp = tdbp->Ocp; // is that right ?
Cnp = tdbp->Cnp;
Connect = tdbp->Connect;
Ops = tdbp->Ops;
- } // end of TDBODBC copy constructor
+} // end of TDBODBC copy constructor
// Method
PTDB TDBODBC::Clone(PTABS t)
- {
+{
PTDB tp;
PODBCCOL cp1, cp2;
PGLOBAL g = t->G; // Is this really useful ???
@@ -213,18 +213,18 @@ PTDB TDBODBC::Clone(PTABS t)
for (cp1 = (PODBCCOL)Columns; cp1; cp1 = (PODBCCOL)cp1->GetNext()) {
cp2 = new(g) ODBCCOL(cp1, tp); // Make a copy
NewPointer(t, cp1, cp2);
- } // endfor cp1
+ } // endfor cp1
return tp;
- } // end of CopyOne
+} // end of CopyOne
/***********************************************************************/
/* Allocate ODBC column description block. */
/***********************************************************************/
PCOL TDBODBC::MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n)
- {
+{
return new(g) ODBCCOL(cdp, this, cprec, n);
- } // end of MakeCol
+} // end of MakeCol
/***********************************************************************/
/* Extract the filename from connect string and return it. */
@@ -232,7 +232,7 @@ PCOL TDBODBC::MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n)
/* with a place holder to be used by SetFile. */
/***********************************************************************/
PCSZ TDBODBC::GetFile(PGLOBAL g)
- {
+{
if (Connect) {
char *p1, *p2;
int i;
@@ -263,18 +263,18 @@ PCSZ TDBODBC::GetFile(PGLOBAL g)
memcpy(MulConn, Connect, p1 - Connect);
MulConn[p1 - Connect] = '\0';
strcat(strcat(MulConn, "%s"), (p2) ? p2 : ";");
- } // endif p1
+ } // endif p1
- } // endif Connect
+ } // endif Connect
return (DBQ) ? DBQ : (PSZ)"???";
- } // end of GetFile
+} // end of GetFile
/***********************************************************************/
/* Set DBQ and get the new file name into the connect string. */
/***********************************************************************/
void TDBODBC::SetFile(PGLOBAL g, PCSZ fn)
- {
+{
if (MulConn) {
int n = strlen(MulConn) + strlen(fn) - 1;
@@ -283,20 +283,20 @@ void TDBODBC::SetFile(PGLOBAL g, PCSZ fn)
// of having to reallocate it is reduced.
BufSize = n + 6;
Connect = (char*)PlugSubAlloc(g, NULL, BufSize);
- } // endif n
+ } // endif n
// Make the complete connect string
sprintf(Connect, MulConn, fn);
- } // endif MultConn
+ } // endif MultConn
DBQ = PlugDup(g, fn);
- } // end of SetFile
+} // end of SetFile
/***********************************************************************/
/* MakeInsert: make the Insert statement used with ODBC connection. */
/***********************************************************************/
bool TDBODBC::MakeInsert(PGLOBAL g)
- {
+{
PCSZ schmp = NULL;
char *catp = NULL, buf[NAM_LEN * 3];
int len = 0;
@@ -377,7 +377,7 @@ bool TDBODBC::MakeInsert(PGLOBAL g)
} else
Query->Append(buf);
- } // endfor colp
+ } // endfor colp
Query->Append(") VALUES (");
@@ -390,32 +390,32 @@ bool TDBODBC::MakeInsert(PGLOBAL g)
Query->RepLast(')');
return oom;
- } // end of MakeInsert
+} // end of MakeInsert
/***********************************************************************/
/* ODBC Bind Parameter function. */
/***********************************************************************/
bool TDBODBC::BindParameters(PGLOBAL g)
- {
- PODBCCOL colp;
+{
+ PODBCCOL colp;
- for (colp = (PODBCCOL)Columns; colp; colp = (PODBCCOL)colp->Next) {
- colp->AllocateBuffers(g, 0);
+ for (colp = (PODBCCOL)Columns; colp; colp = (PODBCCOL)colp->Next) {
+ colp->AllocateBuffers(g, 0);
- if (Ocp->BindParam(colp))
- return true;
+ if (Ocp->BindParam(colp))
+ return true;
- } // endfor colp
+ } // endfor colp
- return false;
- } // end of BindParameters
+ return false;
+} // end of BindParameters
#if 0
/***********************************************************************/
/* MakeUpdate: make the SQL statement to send to ODBC connection. */
/***********************************************************************/
char *TDBODBC::MakeUpdate(PGLOBAL g)
- {
+{
char *qc, *stmt = NULL, cmd[8], tab[96], end[1024];
stmt = (char*)PlugSubAlloc(g, NULL, strlen(Qrystr) + 64);
@@ -440,60 +440,60 @@ char *TDBODBC::MakeUpdate(PGLOBAL g)
strcat(stmt, end);
return stmt;
- } // end of MakeUpdate
+} // end of MakeUpdate
/***********************************************************************/
/* MakeDelete: make the SQL statement to send to ODBC connection. */
/***********************************************************************/
char *TDBODBC::MakeDelete(PGLOBAL g)
- {
- char *qc, *stmt = NULL, cmd[8], from[8], tab[96], end[512];
-
- stmt = (char*)PlugSubAlloc(g, NULL, strlen(Qrystr) + 64);
- memset(end, 0, sizeof(end));
-
- if (sscanf(Qrystr, "%s %s `%[^`]`%511c", cmd, from, tab, end) > 2 ||
- sscanf(Qrystr, "%s %s \"%[^\"]\"%511c", cmd, from, tab, end) > 2)
- qc = Ocp->GetQuoteChar();
- else if (sscanf(Qrystr, "%s %s %s%511c", cmd, from, tab, end) > 2)
- qc = (Quoted) ? Quote : "";
- else {
- strcpy(g->Message, "Cannot use this DELETE command");
- return NULL;
- } // endif sscanf
-
- assert(!stricmp(cmd, "delete") && !stricmp(from, "from"));
- strcat(strcat(strcat(strcpy(stmt, "DELETE FROM "), qc), TableName), qc);
-
- if (*end) {
- for (int i = 0; end[i]; i++)
- if (end[i] == '`')
- end[i] = *qc;
-
- strcat(stmt, end);
- } // endif end
-
- return stmt;
- } // end of MakeDelete
+{
+ char *qc, *stmt = NULL, cmd[8], from[8], tab[96], end[512];
+
+ stmt = (char*)PlugSubAlloc(g, NULL, strlen(Qrystr) + 64);
+ memset(end, 0, sizeof(end));
+
+ if (sscanf(Qrystr, "%s %s `%[^`]`%511c", cmd, from, tab, end) > 2 ||
+ sscanf(Qrystr, "%s %s \"%[^\"]\"%511c", cmd, from, tab, end) > 2)
+ qc = Ocp->GetQuoteChar();
+ else if (sscanf(Qrystr, "%s %s %s%511c", cmd, from, tab, end) > 2)
+ qc = (Quoted) ? Quote : "";
+ else {
+ strcpy(g->Message, "Cannot use this DELETE command");
+ return NULL;
+ } // endif sscanf
+
+ assert(!stricmp(cmd, "delete") && !stricmp(from, "from"));
+ strcat(strcat(strcat(strcpy(stmt, "DELETE FROM "), qc), TableName), qc);
+
+ if (*end) {
+ for (int i = 0; end[i]; i++)
+ if (end[i] == '`')
+ end[i] = *qc;
+
+ strcat(stmt, end);
+ } // endif end
+
+ return stmt;
+} // end of MakeDelete
#endif // 0
/***********************************************************************/
/* ResetSize: call by TDBMUL when calculating size estimate. */
/***********************************************************************/
void TDBODBC::ResetSize(void)
- {
+{
MaxSize = -1;
if (Ocp && Ocp->IsOpen())
Ocp->Close();
- } // end of ResetSize
+} // end of ResetSize
/***********************************************************************/
/* ODBC Cardinality: returns table size in number of rows. */
/***********************************************************************/
int TDBODBC::Cardinality(PGLOBAL g)
- {
+{
if (!g)
return (Mode == MODE_ANY && !Srcdef) ? 1 : 0;
@@ -526,7 +526,7 @@ int TDBODBC::Cardinality(PGLOBAL g)
Cardinal = 10; // To make MySQL happy
return Cardinal;
- } // end of Cardinality
+} // end of Cardinality
/***********************************************************************/
/* ODBC Access Method opening routine. */
@@ -535,7 +535,7 @@ int TDBODBC::Cardinality(PGLOBAL g)
/* join block of next table if it exists or else are discarted. */
/***********************************************************************/
bool TDBODBC::OpenDB(PGLOBAL g)
- {
+{
bool rc = true;
if (trace(1))
@@ -571,7 +571,7 @@ bool TDBODBC::OpenDB(PGLOBAL g)
Fpos = 0;
Curpos = 1;
return false;
- } // endif use
+ } // endif use
/*********************************************************************/
/* Open an ODBC connection for this table. */
@@ -593,7 +593,7 @@ bool TDBODBC::OpenDB(PGLOBAL g)
Use = USE_OPEN; // Do it now in case we are recursively called
/*********************************************************************/
- /* Make the command and allocate whatever is used for getting results. */
+ /* Make the command and allocate whatever is used for getting results*/
/*********************************************************************/
if (Mode == MODE_READ || Mode == MODE_READX) {
if (Memory > 1 && !Srcdef) {
@@ -624,7 +624,7 @@ bool TDBODBC::OpenDB(PGLOBAL g)
} else
return true;
- } // endif Memory
+ } // endif Memory
if (!(rc = MakeSQL(g, false))) {
for (PODBCCOL colp = (PODBCCOL)Columns; colp;
@@ -635,7 +635,7 @@ bool TDBODBC::OpenDB(PGLOBAL g)
rc = (Mode == MODE_READ)
? ((Rows = Ocp->ExecDirectSQL(Query->GetStr(), (PODBCCOL)Columns)) < 0)
: false;
- } // endif rc
+ } // endif rc
} else if (Mode == MODE_INSERT) {
if (!(rc = MakeInsert(g))) {
@@ -645,7 +645,7 @@ bool TDBODBC::OpenDB(PGLOBAL g)
} else
rc = BindParameters(g);
- } // endif rc
+ } // endif rc
} else if (Mode == MODE_UPDATE || Mode == MODE_DELETE) {
rc = false; // wait for CheckCond before calling MakeCommand(g);
@@ -655,30 +655,30 @@ bool TDBODBC::OpenDB(PGLOBAL g)
if (rc) {
Ocp->Close();
return true;
- } // endif rc
+ } // endif rc
/*********************************************************************/
/* Reset statistics values. */
/*********************************************************************/
num_read = num_there = num_eq[0] = num_eq[1] = 0;
return false;
- } // end of OpenDB
+} // end of OpenDB
#if 0
/***********************************************************************/
/* GetRecpos: return the position of last read record. */
/***********************************************************************/
int TDBODBC::GetRecpos(void)
- {
+{
return Fpos;
- } // end of GetRecpos
+} // end of GetRecpos
#endif // 0
/***********************************************************************/
/* SetRecpos: set the position of next read record. */
/***********************************************************************/
bool TDBODBC::SetRecpos(PGLOBAL g, int recpos)
- {
+{
if (Ocp->m_Full) {
Fpos = 0;
CurNum = recpos - 1;
@@ -696,14 +696,15 @@ bool TDBODBC::SetRecpos(PGLOBAL g, int recpos)
} // endif recpos
} else {
- strcpy(g->Message, "This action requires a scrollable cursor");
+ strcpy(g->Message,
+ "This action requires Memory setting or a scrollable cursor");
return true;
} // endif's
// Indicate the table position was externally set
Placed = true;
return false;
- } // end of SetRecpos
+} // end of SetRecpos
/***********************************************************************/
/* Data Base indexed read routine for ODBC access method. */
@@ -721,7 +722,7 @@ bool TDBODBC::ReadKey(PGLOBAL g, OPVAL op, const key_range *kr)
Rows = Ocp->ExecDirectSQL((char*)Query->GetStr(), (PODBCCOL)Columns);
Mode = MODE_READ;
return (Rows < 0);
- } // endif key
+ } // endif key
return false;
} else {
@@ -737,7 +738,7 @@ bool TDBODBC::ReadKey(PGLOBAL g, OPVAL op, const key_range *kr)
if ((To_CondFil = hc->CheckCond(g, To_CondFil, Cond)))
PlugSubAlloc(g, NULL, strlen(To_CondFil->Body) + 1);
- } // endif active_index
+ } // endif active_index
if (To_CondFil)
if (Query->Append(" AND ") || Query->Append(To_CondFil->Body)) {
@@ -762,7 +763,7 @@ bool TDBODBC::ReadKey(PGLOBAL g, OPVAL op, const key_range *kr)
/* VRDNDOS: Data Base read routine for odbc access method. */
/***********************************************************************/
int TDBODBC::ReadDB(PGLOBAL g)
- {
+{
int rc;
if (trace(2))
@@ -784,7 +785,7 @@ int TDBODBC::ReadDB(PGLOBAL g)
} else
return RC_FX; // Error
- } // endif Mode
+ } // endif Mode
/*********************************************************************/
/* Now start the reading process. */
@@ -813,7 +814,7 @@ int TDBODBC::ReadDB(PGLOBAL g)
Qrp->Nblin++;
Fpos++; // Used for memory and pos
- } // endif rc
+ } // endif rc
} // endif Placed
@@ -821,13 +822,13 @@ int TDBODBC::ReadDB(PGLOBAL g)
htrc(" Read: Rbuf=%d rc=%d\n", Rbuf, rc);
return rc;
- } // end of ReadDB
+} // end of ReadDB
/***********************************************************************/
/* Data Base Insert write routine for ODBC access method. */
/***********************************************************************/
int TDBODBC::WriteDB(PGLOBAL g)
- {
+{
int n = Ocp->ExecuteSQL();
if (n < 0) {
@@ -837,13 +838,13 @@ int TDBODBC::WriteDB(PGLOBAL g)
AftRows += n;
return RC_OK;
- } // end of WriteDB
+} // end of WriteDB
/***********************************************************************/
/* Data Base delete line routine for ODBC access method. */
/***********************************************************************/
int TDBODBC::DeleteDB(PGLOBAL g, int irc)
- {
+{
if (irc == RC_FX) {
if (!Query && MakeCommand(g))
return RC_FX;
@@ -863,13 +864,13 @@ int TDBODBC::DeleteDB(PGLOBAL g, int irc)
} else
return RC_OK; // Ignore
- } // end of DeleteDB
+} // end of DeleteDB
/***********************************************************************/
/* Data Base close routine for ODBC access method. */
/***********************************************************************/
void TDBODBC::CloseDB(PGLOBAL g)
- {
+{
if (Ocp)
Ocp->Close();
@@ -877,7 +878,7 @@ void TDBODBC::CloseDB(PGLOBAL g)
if (trace(1))
htrc("ODBC CloseDB: closing %s\n", Name);
- } // end of CloseDB
+} // end of CloseDB
/* --------------------------- ODBCCOL ------------------------------- */
@@ -886,33 +887,33 @@ void TDBODBC::CloseDB(PGLOBAL g)
/***********************************************************************/
ODBCCOL::ODBCCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am)
: EXTCOL(cdp, tdbp, cprec, i, am)
- {
+{
// Set additional ODBC access method information for column.
Slen = 0;
StrLen = &Slen;
Sqlbuf = NULL;
- } // end of ODBCCOL constructor
+} // end of ODBCCOL constructor
/***********************************************************************/
/* ODBCCOL private constructor. */
/***********************************************************************/
ODBCCOL::ODBCCOL(void) : EXTCOL()
- {
+{
Slen = 0;
StrLen = &Slen;
Sqlbuf = NULL;
- } // end of ODBCCOL constructor
+} // end of ODBCCOL constructor
/***********************************************************************/
/* ODBCCOL constructor used for copying columns. */
/* tdbp is the pointer to the new table descriptor. */
/***********************************************************************/
ODBCCOL::ODBCCOL(ODBCCOL *col1, PTDB tdbp) : EXTCOL(col1, tdbp)
- {
+{
Slen = col1->Slen;
StrLen = col1->StrLen;
Sqlbuf = col1->Sqlbuf;
- } // end of ODBCCOL copy constructor
+} // end of ODBCCOL copy constructor
/***********************************************************************/
/* ReadColumn: when SQLFetch is used there is nothing to do as the */
@@ -920,7 +921,7 @@ ODBCCOL::ODBCCOL(ODBCCOL *col1, PTDB tdbp) : EXTCOL(col1, tdbp)
/* when calculating MaxSize (Bufp is NULL even when Rows is not). */
/***********************************************************************/
void ODBCCOL::ReadColumn(PGLOBAL g)
- {
+{
PTDBODBC tdbp = (PTDBODBC)To_Tdb;
int i = tdbp->Fpos - 1, n = tdbp->CurNum;
@@ -953,7 +954,7 @@ void ODBCCOL::ReadColumn(PGLOBAL g)
else
Value->SetValue_pvblk(Blkp, n);
- } // endif Bufp
+ } // endif Bufp
if (Buf_Type == TYPE_DATE) {
struct tm dbtime;
@@ -980,7 +981,7 @@ void ODBCCOL::ReadColumn(PGLOBAL g)
htrc("ODBC Column %s: rows=%d buf=%p type=%d value=%s\n",
Name, tdbp->Rows, Bufp, Buf_Type, Value->GetCharString(buf));
- } // endif trace
+ } // endif trace
put:
if (tdbp->Memory != 2)
@@ -997,7 +998,7 @@ void ODBCCOL::ReadColumn(PGLOBAL g)
} else
Crp->Kdata->SetValue(Value, i);
- } // end of ReadColumn
+} // end of ReadColumn
/***********************************************************************/
/* AllocateBuffers: allocate the extended buffer for SQLExtendedFetch */
@@ -1005,7 +1006,7 @@ void ODBCCOL::ReadColumn(PGLOBAL g)
/* for the ending null character. */
/***********************************************************************/
void ODBCCOL::AllocateBuffers(PGLOBAL g, int rows)
- {
+{
if (Buf_Type == TYPE_DATE)
Sqlbuf = (TIMESTAMP_STRUCT*)PlugSubAlloc(g, NULL,
sizeof(TIMESTAMP_STRUCT));
@@ -1019,31 +1020,31 @@ void ODBCCOL::AllocateBuffers(PGLOBAL g, int rows)
Blkp = AllocValBlock(g, NULL, Buf_Type, rows, GetBuflen(),
GetScale(), true, false, false);
Bufp = Blkp->GetValPointer();
- } // endelse
+ } // endelse
if (rows > 1)
StrLen = (SQLLEN *)PlugSubAlloc(g, NULL, rows * sizeof(SQLLEN));
- } // end of AllocateBuffers
+} // end of AllocateBuffers
/***********************************************************************/
/* Returns the buffer to use for Fetch or Extended Fetch. */
/***********************************************************************/
void *ODBCCOL::GetBuffer(DWORD rows)
- {
+{
if (rows && To_Tdb) {
assert(rows == (DWORD)((TDBODBC*)To_Tdb)->Rows);
return Bufp;
} else
return (Buf_Type == TYPE_DATE) ? Sqlbuf : Value->GetTo_Val();
- } // end of GetBuffer
+} // end of GetBuffer
/***********************************************************************/
/* Returns the buffer length to use for Fetch or Extended Fetch. */
/***********************************************************************/
SWORD ODBCCOL::GetBuflen(void)
- {
+{
SWORD flen;
switch (Buf_Type) {
@@ -1059,13 +1060,13 @@ SWORD ODBCCOL::GetBuflen(void)
} // endswitch Buf_Type
return flen;
- } // end of GetBuflen
+} // end of GetBuflen
/***********************************************************************/
/* WriteColumn: make sure the bind buffer is updated. */
/***********************************************************************/
void ODBCCOL::WriteColumn(PGLOBAL g)
- {
+{
/*********************************************************************/
/* Do convert the column value if necessary. */
/*********************************************************************/
@@ -1095,7 +1096,7 @@ void ODBCCOL::WriteColumn(PGLOBAL g)
*StrLen = (Value->IsNull()) ? SQL_NULL_DATA :
(IsTypeChar(Buf_Type)) ? SQL_NTS : 0;
- } // end of WriteColumn
+} // end of WriteColumn
/* -------------------------- Class TDBXDBC -------------------------- */
@@ -1119,7 +1120,7 @@ TDBXDBC::TDBXDBC(PTDBXDBC tdbp) : TDBODBC(tdbp)
} // end of TDBXDBC copy constructor
PTDB TDBXDBC::Clone(PTABS t)
- {
+{
PTDB tp;
PXSRCCOL cp1, cp2;
PGLOBAL g = t->G; // Is this really useful ???
@@ -1129,29 +1130,29 @@ PTDB TDBXDBC::Clone(PTABS t)
for (cp1 = (PXSRCCOL)Columns; cp1; cp1 = (PXSRCCOL)cp1->GetNext()) {
cp2 = new(g) XSRCCOL(cp1, tp); // Make a copy
NewPointer(t, cp1, cp2);
- } // endfor cp1
+ } // endfor cp1
return tp;
- } // end of CopyOne
+} // end of CopyOne
/***********************************************************************/
/* Allocate XSRC column description block. */
/***********************************************************************/
PCOL TDBXDBC::MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n)
- {
+{
PXSRCCOL colp = new(g) XSRCCOL(cdp, this, cprec, n);
if (!colp->Flag)
Cmdcol = colp->GetName();
return colp;
- } // end of MakeCol
+} // end of MakeCol
/***********************************************************************/
/* MakeCMD: make the SQL statement to send to ODBC connection. */
/***********************************************************************/
PCMD TDBXDBC::MakeCMD(PGLOBAL g)
- {
+{
PCMD xcmd = NULL;
if (To_CondFil) {
@@ -1171,14 +1172,14 @@ PCMD TDBXDBC::MakeCMD(PGLOBAL g)
xcmd = new(g) CMD(g, Srcdef);
return xcmd;
- } // end of MakeCMD
+} // end of MakeCMD
#if 0
/***********************************************************************/
/* ODBC Bind Parameter function. */
/***********************************************************************/
bool TDBXDBC::BindParameters(PGLOBAL g)
- {
+{
PODBCCOL colp;
for (colp = (PODBCCOL)Columns; colp; colp = (PODBCCOL)colp->Next) {
@@ -1190,19 +1191,19 @@ bool TDBXDBC::BindParameters(PGLOBAL g)
} // endfor colp
return false;
- } // end of BindParameters
+} // end of BindParameters
#endif // 0
/***********************************************************************/
/* XDBC GetMaxSize: returns table size (not always one row). */
/***********************************************************************/
int TDBXDBC::GetMaxSize(PGLOBAL g)
- {
+{
if (MaxSize < 0)
MaxSize = 10; // Just a guess
return MaxSize;
- } // end of GetMaxSize
+} // end of GetMaxSize
/***********************************************************************/
/* ODBC Access Method opening routine. */
@@ -1211,7 +1212,7 @@ int TDBXDBC::GetMaxSize(PGLOBAL g)
/* join block of next table if it exists or else are discarted. */
/***********************************************************************/
bool TDBXDBC::OpenDB(PGLOBAL g)
- {
+{
bool rc = false;
if (trace(1))
@@ -1221,7 +1222,7 @@ bool TDBXDBC::OpenDB(PGLOBAL g)
if (Use == USE_OPEN) {
strcpy(g->Message, "Multiple execution is not allowed");
return true;
- } // endif use
+ } // endif use
/*********************************************************************/
/* Open an ODBC connection for this table. */
@@ -1243,7 +1244,7 @@ bool TDBXDBC::OpenDB(PGLOBAL g)
if (Mode != MODE_READ && Mode != MODE_READX) {
strcpy(g->Message, "No INSERT/DELETE/UPDATE of XDBC tables");
return true;
- } // endif Mode
+ } // endif Mode
/*********************************************************************/
/* Get the command to execute. */
@@ -1256,13 +1257,13 @@ bool TDBXDBC::OpenDB(PGLOBAL g)
Rows = 1;
return false;
- } // end of OpenDB
+} // end of OpenDB
/***********************************************************************/
/* ReadDB: Data Base read routine for xdbc access method. */
/***********************************************************************/
int TDBXDBC::ReadDB(PGLOBAL g)
- {
+{
if (Cmdlist) {
if (!Query)
Query = new(g)STRING(g, 0, Cmdlist->Cmd);
@@ -1280,25 +1281,25 @@ int TDBXDBC::ReadDB(PGLOBAL g)
return RC_EF;
} // endif Cmdlist
- } // end of ReadDB
+} // end of ReadDB
/***********************************************************************/
-/* Data Base delete line routine for ODBC access method. */
+/* Data Base write line routine for XDBC access method. */
/***********************************************************************/
int TDBXDBC::WriteDB(PGLOBAL g)
- {
+{
strcpy(g->Message, "Execsrc tables are read only");
return RC_FX;
- } // end of DeleteDB
+} // end of DeleteDB
/***********************************************************************/
-/* Data Base delete line routine for ODBC access method. */
+/* Data Base delete line routine for XDBC access method. */
/***********************************************************************/
int TDBXDBC::DeleteDB(PGLOBAL g, int irc)
- {
+{
strcpy(g->Message, MSG(NO_ODBC_DELETE));
return RC_FX;
- } // end of DeleteDB
+} // end of DeleteDB
/* --------------------------- XSRCCOL ------------------------------- */
@@ -1307,25 +1308,25 @@ int TDBXDBC::DeleteDB(PGLOBAL g, int irc)
/***********************************************************************/
XSRCCOL::XSRCCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am)
: ODBCCOL(cdp, tdbp, cprec, i, am)
- {
+{
// Set additional ODBC access method information for column.
Flag = cdp->GetOffset();
- } // end of XSRCCOL constructor
+} // end of XSRCCOL constructor
/***********************************************************************/
/* XSRCCOL constructor used for copying columns. */
/* tdbp is the pointer to the new table descriptor. */
/***********************************************************************/
XSRCCOL::XSRCCOL(XSRCCOL *col1, PTDB tdbp) : ODBCCOL(col1, tdbp)
- {
+{
Flag = col1->Flag;
- } // end of XSRCCOL copy constructor
+} // end of XSRCCOL copy constructor
/***********************************************************************/
/* ReadColumn: set column value according to Flag. */
/***********************************************************************/
void XSRCCOL::ReadColumn(PGLOBAL g)
- {
+{
PTDBXDBC tdbp = (PTDBXDBC)To_Tdb;
switch (Flag) {
@@ -1335,15 +1336,15 @@ void XSRCCOL::ReadColumn(PGLOBAL g)
default: Value->SetValue_psz("Invalid Flag"); break;
} // endswitch Flag
- } // end of ReadColumn
+} // end of ReadColumn
/***********************************************************************/
/* WriteColumn: Should never be called. */
/***********************************************************************/
void XSRCCOL::WriteColumn(PGLOBAL g)
- {
+{
// Should never be called
- } // end of WriteColumn
+} // end of WriteColumn
/* ---------------------------TDBDRV class --------------------------- */
@@ -1351,9 +1352,9 @@ void XSRCCOL::WriteColumn(PGLOBAL g)
/* GetResult: Get the list of ODBC drivers. */
/***********************************************************************/
PQRYRES TDBDRV::GetResult(PGLOBAL g)
- {
+{
return ODBCDrivers(g, Maxres, false);
- } // end of GetResult
+} // end of GetResult
/* ---------------------------TDBSRC class --------------------------- */
@@ -1361,9 +1362,9 @@ PQRYRES TDBDRV::GetResult(PGLOBAL g)
/* GetResult: Get the list of ODBC data sources. */
/***********************************************************************/
PQRYRES TDBSRC::GetResult(PGLOBAL g)
- {
+{
return ODBCDataSources(g, Maxres, false);
- } // end of GetResult
+} // end of GetResult
/* ---------------------------TDBOTB class --------------------------- */
@@ -1371,7 +1372,7 @@ PQRYRES TDBSRC::GetResult(PGLOBAL g)
/* TDBOTB class constructor. */
/***********************************************************************/
TDBOTB::TDBOTB(PODEF tdp) : TDBDRV(tdp)
- {
+{
Dsn = tdp->GetConnect();
Schema = tdp->GetTabschema();
Tab = tdp->GetTabname();
@@ -1381,15 +1382,15 @@ TDBOTB::TDBOTB(PODEF tdp) : TDBDRV(tdp)
Ops.Cto = tdp->Cto;
Ops.Qto = tdp->Qto;
Ops.UseCnc = tdp->UseCnc;
- } // end of TDBOTB constructor
+} // end of TDBOTB constructor
/***********************************************************************/
/* GetResult: Get the list of ODBC tables. */
/***********************************************************************/
PQRYRES TDBOTB::GetResult(PGLOBAL g)
- {
+{
return ODBCTables(g, Dsn, Schema, Tab, Tabtyp, Maxres, false, &Ops);
- } // end of GetResult
+} // end of GetResult
/* ---------------------------TDBOCL class --------------------------- */
@@ -1405,8 +1406,8 @@ TDBOCL::TDBOCL(PODEF tdp) : TDBOTB(tdp)
/* GetResult: Get the list of ODBC table columns. */
/***********************************************************************/
PQRYRES TDBOCL::GetResult(PGLOBAL g)
- {
+{
return ODBCColumns(g, Dsn, Schema, Tab, Colpat, Maxres, false, &Ops);
- } // end of GetResult
+} // end of GetResult
/* ------------------------ End of Tabodbc --------------------------- */
diff --git a/storage/connect/tabxml.cpp b/storage/connect/tabxml.cpp
index c96e0844497..d808bd5ecd4 100644
--- a/storage/connect/tabxml.cpp
+++ b/storage/connect/tabxml.cpp
@@ -163,8 +163,11 @@ PQRYRES XMLColumns(PGLOBAL g, char *db, char *tab, PTOS topt, bool info)
return NULL;
tdp->Tabname = tab;
+ tdp->Tabname = (char*)GetStringTableOption(g, topt, "Tabname", tab);
+ tdp->Rowname = (char*)GetStringTableOption(g, topt, "Rownode", NULL);
tdp->Zipped = GetBooleanTableOption(g, topt, "Zipped", false);
tdp->Entry = GetStringTableOption(g, topt, "Entry", NULL);
+ tdp->Skip = GetBooleanTableOption(g, topt, "Skipnull", false);
if (!(op = GetStringTableOption(g, topt, "Xmlsup", NULL)))
#if defined(__WIN__)
@@ -280,7 +283,9 @@ PQRYRES XMLColumns(PGLOBAL g, char *db, char *tab, PTOS topt, bool info)
if (!vp->atp)
node = vp->nl->GetItem(g, vp->k++, tdp->Usedom ? node : NULL);
- strncat(fmt, colname, XLEN(fmt));
+ if (!j)
+ strncat(fmt, colname, XLEN(fmt));
+
strncat(fmt, "/", XLEN(fmt));
strncat(xcol->Name, "_", XLEN(xcol->Name));
j++;
@@ -302,6 +307,7 @@ PQRYRES XMLColumns(PGLOBAL g, char *db, char *tab, PTOS topt, bool info)
case RC_INFO:
PushWarning(g, txmp);
case RC_OK:
+ xcol->Cbn = !strlen(buf);
break;
default:
goto err;
@@ -327,9 +333,9 @@ PQRYRES XMLColumns(PGLOBAL g, char *db, char *tab, PTOS topt, bool info)
xcp->Len = MY_MAX(xcp->Len, xcol->Len);
xcp->Scale = MY_MAX(xcp->Scale, xcol->Scale);
- xcp->Cbn |= xcol->Cbn;
+ xcp->Cbn |= (xcol->Cbn || !xcol->Len);
xcp->Found = true;
- } else {
+ } else if(xcol->Len || !tdp->Skip) {
// New column
xcp = new(g) XMCOL(g, xcol, fmt, i);
length[0] = MY_MAX(length[0], strlen(xcol->Name));
@@ -344,7 +350,8 @@ PQRYRES XMLColumns(PGLOBAL g, char *db, char *tab, PTOS topt, bool info)
n++;
} // endif xcp
- pxcp = xcp;
+ if (xcp)
+ pxcp = xcp;
if (vp->atp)
vp->atp = vp->atp->GetNext(g);
@@ -445,6 +452,7 @@ XMLDEF::XMLDEF(void)
Usedom = false;
Zipped = false;
Mulentries = false;
+ Skip = false;
} // end of XMLDEF constructor
/***********************************************************************/
@@ -682,6 +690,14 @@ PTDB TDBXML::Clone(PTABS t)
} // end of Clone
/***********************************************************************/
+/* Must not be in tabxml.h because of OEM tables */
+/***********************************************************************/
+const CHARSET_INFO *TDBXML::data_charset()
+{
+ return &my_charset_utf8_general_ci;
+} // end of data_charset
+
+/***********************************************************************/
/* Allocate XML column description block. */
/***********************************************************************/
PCOL TDBXML::MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n)
@@ -806,127 +822,141 @@ bool TDBXML::Initialize(PGLOBAL g)
} // endif Bufdone
#if !defined(UNIX)
- if (!Root) try {
+ if (!Root) try {
#else
- if (!Root) {
+ if (!Root) {
#endif
- char tabpath[64], filename[_MAX_PATH];
-
- // We used the file name relative to recorded datapath
- PlugSetPath(filename, Xfile, GetPath());
-
- // Load or re-use the table file
- rc = LoadTableFile(g, filename);
-
- if (rc == RC_OK) {
- // Get root node
- if (!(Root = Docp->GetRoot(g))) {
- // This should never happen as load should have failed
- strcpy(g->Message, MSG(EMPTY_DOC));
- goto error;
- } // endif Root
-
- // If tabname is not an Xpath,
- // construct one that will find it anywhere
- if (!strchr(Tabname, '/'))
- strcat(strcpy(tabpath, "//"), Tabname);
- else
- strcpy(tabpath, Tabname);
-
- // Evaluate table xpath
- if ((TabNode = Root->SelectSingleNode(g, tabpath))) {
- if (TabNode->GetType() != XML_ELEMENT_NODE) {
- sprintf(g->Message, MSG(BAD_NODE_TYPE), TabNode->GetType());
- goto error;
- } // endif Type
-
- } else if (Mode == MODE_INSERT && XmlDB) {
- // We are adding a new table to a multi-table file
-
- // If XmlDB is not an Xpath,
- // construct one that will find it anywhere
- if (!strchr(XmlDB, '/'))
- strcat(strcpy(tabpath, "//"), XmlDB);
- else
- strcpy(tabpath, XmlDB);
-
- if (!(DBnode = Root->SelectSingleNode(g, tabpath))) {
- // DB node does not exist yet; we cannot create it
- // because we don't know where it should be placed
- sprintf(g->Message, MSG(MISSING_NODE), XmlDB, Xfile);
- goto error;
- } // endif DBnode
-
- if (!(TabNode = DBnode->AddChildNode(g, Tabname))) {
- sprintf(g->Message, MSG(FAIL_ADD_NODE), Tabname);
- goto error;
- } // endif TabNode
-
- DBnode->AddText(g, "\n");
- } else
- TabNode = Root; // Try this ?
-
- } else if (rc == RC_NF || rc == RC_EF) {
- // The XML file does not exist or is void
- if (Mode == MODE_INSERT) {
- // New Document
- char buf[64];
-
- // Create the XML node
- if (Docp->NewDoc(g, "1.0")) {
- strcpy(g->Message, MSG(NEW_DOC_FAILED));
- goto error;
- } // endif NewDoc
-
- // Now we can link the Xblock
- To_Xb = Docp->LinkXblock(g, Mode, rc, filename);
-
- // Add a CONNECT comment node
- strcpy(buf, " Created by the MariaDB CONNECT Storage Engine");
- Docp->AddComment(g, buf);
-
- if (XmlDB) {
- // This is a multi-table file
- DBnode = Root = Docp->NewRoot(g, XmlDB);
- DBnode->AddText(g, "\n");
- TabNode = DBnode->AddChildNode(g, Tabname);
- DBnode->AddText(g, "\n");
- } else
- TabNode = Root = Docp->NewRoot(g, Tabname);
-
- if (TabNode == NULL || Root == NULL) {
- strcpy(g->Message, MSG(XML_INIT_ERROR));
- goto error;
- } else if (SetTabNode(g))
- goto error;
-
- } else {
- sprintf(g->Message, MSG(FILE_UNFOUND), Xfile);
-
- if (Mode == MODE_READ) {
- PushWarning(g, this);
- Void = true;
- } // endif Mode
-
- goto error;
- } // endif Mode
-
- } else if (rc == RC_INFO) {
- // Loading failed
- sprintf(g->Message, MSG(LOADING_FAILED), Xfile);
- goto error;
- } else // (rc == RC_FX)
- goto error;
-
- // Get row node list
- if (Rowname)
- Nlist = TabNode->SelectNodes(g, Rowname);
- else
- Nlist = TabNode->GetChildElements(g);
-
- Docp->SetNofree(true); // For libxml2
+ char tabpath[64], filename[_MAX_PATH];
+
+ // We used the file name relative to recorded datapath
+ PlugSetPath(filename, Xfile, GetPath());
+
+ // Load or re-use the table file
+ rc = LoadTableFile(g, filename);
+
+ if (rc == RC_OK) {
+ // Get root node
+ if (!(Root = Docp->GetRoot(g))) {
+ // This should never happen as load should have failed
+ strcpy(g->Message, MSG(EMPTY_DOC));
+ goto error;
+ } // endif Root
+
+ // If tabname is not an Xpath,
+ // construct one that will find it anywhere
+ if (!strchr(Tabname, '/'))
+ strcat(strcpy(tabpath, "//"), Tabname);
+ else
+ strcpy(tabpath, Tabname);
+
+ // Evaluate table xpath
+ if ((TabNode = Root->SelectSingleNode(g, tabpath))) {
+ if (TabNode->GetType() != XML_ELEMENT_NODE) {
+ sprintf(g->Message, MSG(BAD_NODE_TYPE), TabNode->GetType());
+ goto error;
+ } // endif Type
+
+ } else if (Mode == MODE_INSERT && XmlDB) {
+ // We are adding a new table to a multi-table file
+
+ // If XmlDB is not an Xpath,
+ // construct one that will find it anywhere
+ if (!strchr(XmlDB, '/'))
+ strcat(strcpy(tabpath, "//"), XmlDB);
+ else
+ strcpy(tabpath, XmlDB);
+
+ if (!(DBnode = Root->SelectSingleNode(g, tabpath))) {
+ // DB node does not exist yet; we cannot create it
+ // because we don't know where it should be placed
+ sprintf(g->Message, MSG(MISSING_NODE), XmlDB, Xfile);
+ goto error;
+ } // endif DBnode
+
+ if (!(TabNode = DBnode->AddChildNode(g, Tabname))) {
+ sprintf(g->Message, MSG(FAIL_ADD_NODE), Tabname);
+ goto error;
+ } // endif TabNode
+
+ DBnode->AddText(g, "\n");
+ } else {
+ TabNode = Root; // Try this ?
+ Tabname = TabNode->GetName(g);
+ } // endif's
+
+ } else if (rc == RC_NF || rc == RC_EF) {
+ // The XML file does not exist or is void
+ if (Mode == MODE_INSERT) {
+ // New Document
+ char buf[64];
+
+ // Create the XML node
+ if (Docp->NewDoc(g, "1.0")) {
+ strcpy(g->Message, MSG(NEW_DOC_FAILED));
+ goto error;
+ } // endif NewDoc
+
+ // Now we can link the Xblock
+ To_Xb = Docp->LinkXblock(g, Mode, rc, filename);
+
+ // Add a CONNECT comment node
+ strcpy(buf, " Created by the MariaDB CONNECT Storage Engine");
+ Docp->AddComment(g, buf);
+
+ if (XmlDB) {
+ // This is a multi-table file
+ DBnode = Root = Docp->NewRoot(g, XmlDB);
+ DBnode->AddText(g, "\n");
+ TabNode = DBnode->AddChildNode(g, Tabname);
+ DBnode->AddText(g, "\n");
+ } else
+ TabNode = Root = Docp->NewRoot(g, Tabname);
+
+ if (TabNode == NULL || Root == NULL) {
+ strcpy(g->Message, MSG(XML_INIT_ERROR));
+ goto error;
+ } else if (SetTabNode(g))
+ goto error;
+
+ } else {
+ sprintf(g->Message, MSG(FILE_UNFOUND), Xfile);
+
+ if (Mode == MODE_READ) {
+ PushWarning(g, this);
+ Void = true;
+ } // endif Mode
+
+ goto error;
+ } // endif Mode
+
+ } else if (rc == RC_INFO) {
+ // Loading failed
+ sprintf(g->Message, MSG(LOADING_FAILED), Xfile);
+ goto error;
+ } else // (rc == RC_FX)
+ goto error;
+
+ if (!Rowname) {
+ for (PXNODE n = TabNode->GetChild(g); n; n = n->GetNext(g))
+ if (n->GetType() == XML_ELEMENT_NODE) {
+ Rowname = n->GetName(g);
+ break;
+ } // endif Type
+
+ if (!Rowname)
+ Rowname = TabNode->GetName(g);
+ } // endif Rowname
+
+ // Get row node list
+ if (strcmp(Rowname, Tabname))
+ Nlist = TabNode->SelectNodes(g, Rowname);
+ else
+ Nrow = 1;
+
+
+ Docp->SetNofree(true); // For libxml2
#if defined(__WIN__)
- } catch(_com_error e) {
+ } catch (_com_error e) {
// We come here if a DOM command threw an error
char buf[128];
@@ -1213,10 +1243,14 @@ int TDBXML::ReadDB(PGLOBAL g)
htrc("TDBXML ReadDB: Irow=%d RowNode=%p\n", Irow, RowNode);
// Get the new row node
- if ((RowNode = Nlist->GetItem(g, Irow, RowNode)) == NULL) {
- sprintf(g->Message, MSG(MISSING_ROWNODE), Irow);
- return RC_FX;
- } // endif RowNode
+ if (Nlist) {
+ if ((RowNode = Nlist->GetItem(g, Irow, RowNode)) == NULL) {
+ sprintf(g->Message, MSG(MISSING_ROWNODE), Irow);
+ return RC_FX;
+ } // endif RowNode
+
+ } else
+ RowNode = TabNode;
if (Colname && Coltype == 2)
Clist = RowNode->SelectNodes(g, Colname, Clist);
@@ -1271,6 +1305,7 @@ int TDBXML::WriteDB(PGLOBAL g)
/***********************************************************************/
int TDBXML::DeleteDB(PGLOBAL g, int irc)
{
+ // TODO: Handle null Nlist
if (irc == RC_FX) {
// Delete all rows
for (Irow = 0; Irow < Nrow; Irow++)
@@ -2209,8 +2244,9 @@ void XPOSCOL::WriteColumn(PGLOBAL g)
TDBXCT::TDBXCT(PXMLDEF tdp) : TDBCAT(tdp)
{
Topt = tdp->GetTopt();
- Db = (char*)tdp->GetDB();
- Tabn = tdp->Tabname;
+ //Db = (char*)tdp->GetDB();
+ Db = (char*)tdp->Schema;
+ Tabn = tdp->Tabname;
} // end of TDBXCT constructor
/***********************************************************************/
diff --git a/storage/connect/tabxml.h b/storage/connect/tabxml.h
index f55b7d98de7..fb3913f08ea 100644
--- a/storage/connect/tabxml.h
+++ b/storage/connect/tabxml.h
@@ -9,6 +9,8 @@ typedef class XMLDEF *PXMLDEF;
typedef class TDBXML *PTDBXML;
typedef class XMLCOL *PXMLCOL;
+DllExport PQRYRES XMLColumns(PGLOBAL, char *, char *, PTOS, bool);
+
/* --------------------------- XML classes --------------------------- */
/***********************************************************************/
@@ -50,6 +52,7 @@ class DllExport XMLDEF : public TABDEF { /* Logical table description */
bool Usedom; /* True: DOM, False: libxml2 */
bool Zipped; /* True: Zipped XML file(s) */
bool Mulentries; /* True: multiple entries in zip file*/
+ bool Skip; /* Skip null columns */
}; // end of XMLDEF
#if defined(INCLUDE_TDBXML)
@@ -100,8 +103,7 @@ class DllExport TDBXML : public TDBASE {
virtual int DeleteDB(PGLOBAL g, int irc);
virtual void CloseDB(PGLOBAL g);
virtual int CheckWrite(PGLOBAL g) {Checked = true; return 0;}
- virtual const CHARSET_INFO *data_charset()
- {return &my_charset_utf8_general_ci;}
+ virtual const CHARSET_INFO *data_charset();
protected:
// Members
diff --git a/storage/connect/user_connect.cc b/storage/connect/user_connect.cc
index e2d3b664aeb..a2a8faf9b38 100644
--- a/storage/connect/user_connect.cc
+++ b/storage/connect/user_connect.cc
@@ -107,7 +107,7 @@ bool user_connect::user_init()
g= PlugInit(NULL, worksize);
// Check whether the initialization is complete
- if (!g || !g->Sarea || PlugSubSet(g, g->Sarea, g->Sarea_Size)
+ if (!g || !g->Sarea || PlugSubSet(g->Sarea, g->Sarea_Size)
|| !(dup= PlgMakeUser(g))) {
if (g)
printf("%s\n", g->Message);
@@ -172,7 +172,7 @@ bool user_connect::CheckCleanup(bool force)
} // endif worksize
- PlugSubSet(g, g->Sarea, g->Sarea_Size);
+ PlugSubSet(g->Sarea, g->Sarea_Size);
g->Xchk = NULL;
g->Createas = 0;
g->Alchecked = 0;
diff --git a/storage/innobase/buf/buf0buf.cc b/storage/innobase/buf/buf0buf.cc
index 11dceacf592..eb29d46daff 100644
--- a/storage/innobase/buf/buf0buf.cc
+++ b/storage/innobase/buf/buf0buf.cc
@@ -581,6 +581,8 @@ buf_page_is_corrupted(
ulint checksum_field1;
ulint checksum_field2;
+ ib_uint32_t crc32 = ULINT32_UNDEFINED;
+ bool crc32_inited = false;
if (!zip_size
&& memcmp(read_buf + FIL_PAGE_LSN + 4,
@@ -660,120 +662,124 @@ buf_page_is_corrupted(
return(FALSE);
}
- ulint page_no = mach_read_from_4(read_buf + FIL_PAGE_OFFSET);
- ulint space_id = mach_read_from_4(read_buf + FIL_PAGE_SPACE_ID);
const srv_checksum_algorithm_t curr_algo =
static_cast<srv_checksum_algorithm_t>(srv_checksum_algorithm);
switch (curr_algo) {
- case SRV_CHECKSUM_ALGORITHM_CRC32:
case SRV_CHECKSUM_ALGORITHM_STRICT_CRC32:
-
- if (buf_page_is_checksum_valid_crc32(read_buf,
- checksum_field1, checksum_field2)) {
- return(FALSE);
+ if (buf_page_is_checksum_valid_crc32(read_buf, checksum_field1,
+ checksum_field2)) {
+ return FALSE;
}
- if (buf_page_is_checksum_valid_none(read_buf,
- checksum_field1, checksum_field2)) {
- if (curr_algo
- == SRV_CHECKSUM_ALGORITHM_STRICT_CRC32) {
- page_warn_strict_checksum(
- curr_algo,
- SRV_CHECKSUM_ALGORITHM_NONE,
- space_id, page_no);
- }
+ return TRUE;
+ case SRV_CHECKSUM_ALGORITHM_STRICT_INNODB:
+ if (buf_page_is_checksum_valid_innodb(read_buf, checksum_field1,
+ checksum_field2)) {
+ return FALSE;
+ }
- return(FALSE);
+ return TRUE;
+ case SRV_CHECKSUM_ALGORITHM_STRICT_NONE:
+ if (buf_page_is_checksum_valid_none(read_buf, checksum_field1,
+ checksum_field2)) {
+ return FALSE;
}
- if (buf_page_is_checksum_valid_innodb(read_buf,
- checksum_field1, checksum_field2)) {
- if (curr_algo
- == SRV_CHECKSUM_ALGORITHM_STRICT_CRC32) {
- page_warn_strict_checksum(
- curr_algo,
- SRV_CHECKSUM_ALGORITHM_INNODB,
- space_id, page_no);
- }
+ return TRUE;
+ case SRV_CHECKSUM_ALGORITHM_CRC32:
+ case SRV_CHECKSUM_ALGORITHM_INNODB:
+ /* Verify old versions of InnoDB only stored 8 byte lsn to the
+ start and end of the page. */
- return(FALSE);
- }
+ /* Since innodb_checksum_algorithm is not strict_* allow
+ any of the algos to match for the old field. */
- return(TRUE);
+ if (checksum_field2
+ != mach_read_from_4(read_buf + FIL_PAGE_LSN)
+ && checksum_field2 != BUF_NO_CHECKSUM_MAGIC) {
- case SRV_CHECKSUM_ALGORITHM_INNODB:
- case SRV_CHECKSUM_ALGORITHM_STRICT_INNODB:
+ if (srv_checksum_algorithm
+ == SRV_CHECKSUM_ALGORITHM_CRC32) {
- if (buf_page_is_checksum_valid_innodb(read_buf,
- checksum_field1, checksum_field2)) {
- return(FALSE);
- }
+ crc32 = buf_calc_page_crc32(read_buf);
+ crc32_inited = true;
- if (buf_page_is_checksum_valid_none(read_buf,
- checksum_field1, checksum_field2)) {
- if (curr_algo
- == SRV_CHECKSUM_ALGORITHM_STRICT_INNODB) {
- page_warn_strict_checksum(
- curr_algo,
- SRV_CHECKSUM_ALGORITHM_NONE,
- space_id, page_no);
- }
+ if (checksum_field2 != crc32
+ && checksum_field2
+ != buf_calc_page_old_checksum(read_buf)) {
+ return TRUE;
+ }
+ } else {
+ ut_ad(srv_checksum_algorithm
+ == SRV_CHECKSUM_ALGORITHM_INNODB);
- return(FALSE);
- }
+ if (checksum_field2
+ != buf_calc_page_old_checksum(read_buf)) {
- if (buf_page_is_checksum_valid_crc32(read_buf,
- checksum_field1, checksum_field2)) {
- if (curr_algo
- == SRV_CHECKSUM_ALGORITHM_STRICT_INNODB) {
- page_warn_strict_checksum(
- curr_algo,
- SRV_CHECKSUM_ALGORITHM_CRC32,
- space_id, page_no);
- }
+ crc32 = buf_calc_page_crc32(read_buf);
+ crc32_inited = TRUE;
- return(FALSE);
+ if (checksum_field2 != crc32) {
+ return TRUE;
+ }
+ }
+ }
}
- return(TRUE);
+ /* Old field is fine, check the new field */
- case SRV_CHECKSUM_ALGORITHM_STRICT_NONE:
+ if (checksum_field1 != 0
+ && checksum_field1 != BUF_NO_CHECKSUM_MAGIC) {
- if (buf_page_is_checksum_valid_none(read_buf,
- checksum_field1, checksum_field2)) {
- return(FALSE);
- }
+ if (srv_checksum_algorithm
+ == SRV_CHECKSUM_ALGORITHM_CRC32) {
- if (buf_page_is_checksum_valid_crc32(read_buf,
- checksum_field1, checksum_field2)) {
- page_warn_strict_checksum(
- curr_algo,
- SRV_CHECKSUM_ALGORITHM_CRC32,
- space_id, page_no);
- return(FALSE);
+ if (!crc32_inited) {
+ crc32 = buf_calc_page_crc32(read_buf);
+ crc32_inited = TRUE;
+ }
+
+ if (checksum_field1 != crc32
+ && checksum_field1
+ != buf_calc_page_new_checksum(read_buf)) {
+ return TRUE;
+ }
+ } else {
+ ut_ad(srv_checksum_algorithm
+ == SRV_CHECKSUM_ALGORITHM_INNODB);
+
+ if (checksum_field1
+ != buf_calc_page_new_checksum(read_buf)) {
+
+ if (!crc32_inited) {
+ crc32 = buf_calc_page_crc32(
+ read_buf);
+ crc32_inited = TRUE;
+ }
+
+ if (checksum_field1 != crc32) {
+ return TRUE;
+ }
+ }
+ }
}
- if (buf_page_is_checksum_valid_innodb(read_buf,
- checksum_field1, checksum_field2)) {
- page_warn_strict_checksum(
- curr_algo,
- SRV_CHECKSUM_ALGORITHM_INNODB,
- space_id, page_no);
- return(FALSE);
+ if (crc32_inited
+ && ((checksum_field1 == crc32
+ && checksum_field2 != crc32)
+ || (checksum_field1 != crc32
+ && checksum_field2 == crc32))) {
+ return TRUE;
}
- return(TRUE);
+ break;
case SRV_CHECKSUM_ALGORITHM_NONE:
- /* should have returned FALSE earlier */
- break;
- /* no default so the compiler will emit a warning if new enum
- is added and not handled here */
+ ut_error;
}
- ut_error;
- return(FALSE);
+ return FALSE;
}
/** Dump a page to stderr.
diff --git a/storage/innobase/dict/dict0dict.cc b/storage/innobase/dict/dict0dict.cc
index cc06fa7146f..0485f0470df 100644
--- a/storage/innobase/dict/dict0dict.cc
+++ b/storage/innobase/dict/dict0dict.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2018, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2012, Facebook Inc.
Copyright (c) 2014, 2017, MariaDB Corporation.
@@ -3370,10 +3370,7 @@ dict_foreign_find_index(
table, col_names, columns, n_cols,
index, types_idx,
check_charsets, check_null,
- error, err_col_no,err_index)
- && (!(index->online_status ==
- ONLINE_INDEX_ABORTED_DROPPED
- ||index->online_status == ONLINE_INDEX_ABORTED))) {
+ error, err_col_no,err_index)) {
if (error) {
*error = DB_SUCCESS;
}
diff --git a/storage/innobase/dict/dict0mem.cc b/storage/innobase/dict/dict0mem.cc
index 81a4cd9c6c6..c182aaba676 100644
--- a/storage/innobase/dict/dict0mem.cc
+++ b/storage/innobase/dict/dict0mem.cc
@@ -498,9 +498,7 @@ dict_mem_table_col_rename(
s += len + 1;
}
- /* This could fail if the data dictionaries are out of sync.
- Proceed with the renaming anyway. */
- ut_ad(!strcmp(from, s));
+ ut_ad(!my_strcasecmp(system_charset_info, from, s));
dict_mem_table_col_rename_low(table, nth_col, to, s);
}
diff --git a/storage/innobase/fil/fil0fil.cc b/storage/innobase/fil/fil0fil.cc
index f852a64e2e9..e05d9565507 100644
--- a/storage/innobase/fil/fil0fil.cc
+++ b/storage/innobase/fil/fil0fil.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2014, 2017, MariaDB Corporation. All Rights Reserved.
+Copyright (c) 2014, 2019, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -4914,6 +4914,8 @@ retry:
" failed with error %d",
node->name, start_offset, len + start_offset,
err);
+ } else {
+ os_file_flush(node->handle);
}
DBUG_EXECUTE_IF("ib_os_aio_func_io_failure_28",
@@ -5025,7 +5027,7 @@ file_extended:
size_after_extend, *actual_size); */
mutex_exit(&fil_system->mutex);
- fil_flush(space_id);
+ fil_flush(space_id, true);
return(success);
}
@@ -5641,21 +5643,16 @@ fil_aio_wait(
}
#endif /* UNIV_HOTBACKUP */
-/**********************************************************************//**
-Flushes to disk possible writes cached by the OS. If the space does not exist
-or is being dropped, does not do anything. */
-UNIV_INTERN
-void
-fil_flush(
-/*======*/
- ulint space_id) /*!< in: file space id (this can be a group of
- log files or a tablespace of the database) */
+/** Make persistent possible writes cached by the OS.
+If the space does not exist or is being dropped, do nothing.
+@param[in] space_id tablespace identifier
+@param[in] metadata whether to update file system metadata */
+UNIV_INTERN void fil_flush(ulint space_id, bool metadata)
{
fil_space_t* space;
fil_node_t* node;
pfs_os_file_t file;
-
mutex_enter(&fil_system->mutex);
space = fil_space_get_by_id(space_id);
@@ -5684,8 +5681,10 @@ fil_flush(
}
#endif /* UNIV_DEBUG */
- mutex_exit(&fil_system->mutex);
- return;
+ if (!metadata) {
+ mutex_exit(&fil_system->mutex);
+ return;
+ }
}
space->n_pending_flushes++; /*!< prevent dropping of the space while
diff --git a/storage/innobase/fts/fts0fts.cc b/storage/innobase/fts/fts0fts.cc
index d4ca9cb3660..4891e572741 100644
--- a/storage/innobase/fts/fts0fts.cc
+++ b/storage/innobase/fts/fts0fts.cc
@@ -870,19 +870,19 @@ fts_drop_index(
err = fts_drop_index_tables(trx, index);
while (index->index_fts_syncing
- && !trx_is_interrupted(trx)) {
- DICT_BG_YIELD(trx);
- }
+ && !trx_is_interrupted(trx)) {
+ DICT_BG_YIELD(trx);
+ }
- fts_free(table);
+ fts_free(table);
return(err);
}
while (index->index_fts_syncing
- && !trx_is_interrupted(trx)) {
- DICT_BG_YIELD(trx);
- }
+ && !trx_is_interrupted(trx)) {
+ DICT_BG_YIELD(trx);
+ }
current_doc_id = table->fts->cache->next_doc_id;
first_doc_id = table->fts->cache->first_doc_id;
@@ -901,9 +901,9 @@ fts_drop_index(
if (index_cache != NULL) {
while (index->index_fts_syncing
- && !trx_is_interrupted(trx)) {
- DICT_BG_YIELD(trx);
- }
+ && !trx_is_interrupted(trx)) {
+ DICT_BG_YIELD(trx);
+ }
if (index_cache->words) {
fts_words_free(index_cache->words);
rbt_free(index_cache->words);
diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc
index 2244cd1101a..ecf2318bf81 100644
--- a/storage/innobase/handler/ha_innodb.cc
+++ b/storage/innobase/handler/ha_innodb.cc
@@ -2072,11 +2072,6 @@ innobase_get_lower_case_table_names(void)
{
return(lower_case_table_names);
}
-/** return one of the tmpdir path
-@return tmpdir path*/
-UNIV_INTERN
-char*
-innobase_mysql_tmpdir(void) { return (mysql_tmpdir); }
/** Create a temporary file in the location specified by the parameter
path. If the path is null, then it will be created in tmpdir.
@@ -5452,19 +5447,21 @@ ha_innobase::open(
ib_table = dict_table_open_on_name(norm_name, FALSE, TRUE, ignore_err);
if (ib_table
- && ((!DICT_TF2_FLAG_IS_SET(ib_table, DICT_TF2_FTS_HAS_DOC_ID)
- && table->s->stored_fields != dict_table_get_n_user_cols(ib_table))
- || (DICT_TF2_FLAG_IS_SET(ib_table, DICT_TF2_FTS_HAS_DOC_ID)
- && (table->s->fields
- != dict_table_get_n_user_cols(ib_table) - 1)))) {
+ && (table->s->stored_fields != dict_table_get_n_user_cols(ib_table)
+ - (DICT_TF2_FLAG_IS_SET(ib_table, DICT_TF2_FTS_HAS_DOC_ID)
+ ? 1 : 0))) {
ib_logf(IB_LOG_LEVEL_WARN,
"table %s contains %lu user defined columns "
"in InnoDB, but %lu columns in MySQL. Please "
"check INFORMATION_SCHEMA.INNODB_SYS_COLUMNS and "
REFMAN "innodb-troubleshooting.html "
"for how to resolve it",
- norm_name, (ulong) dict_table_get_n_user_cols(ib_table),
- (ulong) table->s->fields);
+ norm_name,
+ (ulong) (dict_table_get_n_user_cols(ib_table)
+ - DICT_TF2_FLAG_IS_SET(ib_table,
+ DICT_TF2_FTS_HAS_DOC_ID)
+ ? 1 : 0),
+ (ulong) table->s->stored_fields);
/* Mark this table as corrupted, so the drop table
or force recovery can still use it, but not others. */
@@ -9825,16 +9822,6 @@ next_record:
return(HA_ERR_END_OF_FILE);
}
-/*************************************************************************
-*/
-
-void
-ha_innobase::ft_end()
-{
- fprintf(stderr, "ft_end()\n");
-
- rnd_end();
-}
#ifdef WITH_WSREP
extern dict_index_t*
wsrep_dict_foreign_find_index(
@@ -10264,7 +10251,6 @@ ha_innobase::wsrep_append_keys(
DBUG_RETURN(0);
}
#endif /* WITH_WSREP */
-
/*********************************************************************//**
Stores a reference to the current row to 'ref' field of the handle. Note
that in the case where we have generated the clustered index for the
@@ -10597,10 +10583,6 @@ err_col:
: ER_TABLESPACE_EXISTS, MYF(0), display_name);
}
- if (err == DB_SUCCESS && (flags2 & DICT_TF2_FTS)) {
- fts_optimize_add_table(table);
- }
-
error_ret:
DBUG_RETURN(convert_error_code_to_mysql(err, flags, thd));
}
@@ -11667,6 +11649,10 @@ ha_innobase::create(
trx_free_for_mysql(trx);
DBUG_RETURN(-1);
}
+
+ mutex_enter(&dict_sys->mutex);
+ fts_optimize_add_table(innobase_table);
+ mutex_exit(&dict_sys->mutex);
}
/* Note: We can't call update_thd() as prebuilt will not be
@@ -12118,36 +12104,35 @@ innobase_rename_table(
row_mysql_lock_data_dictionary(trx);
- dict_table_t* table = NULL;
- table = dict_table_open_on_name(norm_from, TRUE, FALSE,
- DICT_ERR_IGNORE_NONE);
+ dict_table_t* table = dict_table_open_on_name(norm_from, TRUE, FALSE,
+ DICT_ERR_IGNORE_NONE);
- /* Since DICT_BG_YIELD has sleep for 250 milliseconds,
+ /* Since DICT_BG_YIELD has sleep for 250 milliseconds,
Convert lock_wait_timeout unit from second to 250 milliseconds */
- long int lock_wait_timeout = thd_lock_wait_timeout(thd) * 4;
- if (table != NULL) {
- for (dict_index_t* index = dict_table_get_first_index(table);
- index != NULL;
- index = dict_table_get_next_index(index)) {
-
- if (index->type & DICT_FTS) {
- /* Found */
- while (index->index_fts_syncing
- && !trx_is_interrupted(trx)
- && (lock_wait_timeout--) > 0) {
- DICT_BG_YIELD(trx);
- }
- }
- }
- dict_table_close(table, TRUE, FALSE);
- }
+ long int lock_wait_timeout = thd_lock_wait_timeout(thd) * 4;
+ if (table != NULL) {
+ for (dict_index_t* index = dict_table_get_first_index(table);
+ index != NULL;
+ index = dict_table_get_next_index(index)) {
+
+ if (index->type & DICT_FTS) {
+ /* Found */
+ while (index->index_fts_syncing
+ && !trx_is_interrupted(trx)
+ && (lock_wait_timeout--) > 0) {
+ DICT_BG_YIELD(trx);
+ }
+ }
+ }
+ dict_table_close(table, TRUE, FALSE);
+ }
- /* FTS sync is in progress. We shall timeout this operation */
- if (lock_wait_timeout < 0) {
- error = DB_LOCK_WAIT_TIMEOUT;
- row_mysql_unlock_data_dictionary(trx);
- DBUG_RETURN(error);
- }
+ /* FTS sync is in progress. We shall timeout this operation */
+ if (lock_wait_timeout < 0) {
+ error = DB_LOCK_WAIT_TIMEOUT;
+ row_mysql_unlock_data_dictionary(trx);
+ DBUG_RETURN(error);
+ }
/* Transaction must be flagged as a locking transaction or it hasn't
been started yet. */
@@ -12302,13 +12287,11 @@ ha_innobase::rename_table(
my_error(ER_TABLE_EXISTS_ERROR, MYF(0), to);
error = DB_ERROR;
- }
+ } else if (error == DB_LOCK_WAIT_TIMEOUT) {
+ my_error(ER_LOCK_WAIT_TIMEOUT, MYF(0), to);
- else if (error == DB_LOCK_WAIT_TIMEOUT) {
- my_error(ER_LOCK_WAIT_TIMEOUT, MYF(0), to);
-
- error = DB_LOCK_WAIT;
- }
+ error = DB_LOCK_WAIT;
+ }
DBUG_RETURN(convert_error_code_to_mysql(error, 0, NULL));
}
diff --git a/storage/innobase/handler/ha_innodb.h b/storage/innobase/handler/ha_innodb.h
index 85e8f887060..db27cfe6e6f 100644
--- a/storage/innobase/handler/ha_innodb.h
+++ b/storage/innobase/handler/ha_innodb.h
@@ -161,7 +161,7 @@ class ha_innobase: public handler
int rnd_pos(uchar * buf, uchar *pos);
int ft_init();
- void ft_end();
+ void ft_end() { rnd_end(); }
FT_INFO *ft_init_ext(uint flags, uint inx, String* key);
int ft_read(uchar* buf);
diff --git a/storage/innobase/handler/handler0alter.cc b/storage/innobase/handler/handler0alter.cc
index 802ec2a6f13..a0a4a57dec9 100644
--- a/storage/innobase/handler/handler0alter.cc
+++ b/storage/innobase/handler/handler0alter.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 2005, 2018, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2017, 2018, MariaDB Corporation.
+Copyright (c) 2017, 2019, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -3586,9 +3586,8 @@ check_if_ok_to_rename:
/* Check each index's column length to make sure they do not
exceed limit */
- for (ulint i = 0; i < ha_alter_info->index_add_count; i++) {
- const KEY* key = &ha_alter_info->key_info_buffer[
- ha_alter_info->index_add_buffer[i]];
+ for (ulint i = 0; i < ha_alter_info->key_count; i++) {
+ const KEY* key = &ha_alter_info->key_info_buffer[i];
if (key->flags & HA_FULLTEXT) {
/* The column length does not matter for
@@ -3695,12 +3694,14 @@ check_if_ok_to_rename:
continue;
}
+ dict_foreign_t* foreign;
+
for (dict_foreign_set::iterator it
= prebuilt->table->foreign_set.begin();
it != prebuilt->table->foreign_set.end();
++it) {
- dict_foreign_t* foreign = *it;
+ foreign = *it;
const char* fid = strchr(foreign->id, '/');
DBUG_ASSERT(fid);
@@ -3711,7 +3712,6 @@ check_if_ok_to_rename:
if (!my_strcasecmp(system_charset_info,
fid, drop->name)) {
- drop_fk[n_drop_fk++] = foreign;
goto found_fk;
}
}
@@ -3720,12 +3720,19 @@ check_if_ok_to_rename:
drop->name);
goto err_exit;
found_fk:
+ for (ulint i = n_drop_fk; i--; ) {
+ if (drop_fk[i] == foreign) {
+ goto dup_fk;
+ }
+ }
+ drop_fk[n_drop_fk++] = foreign;
+dup_fk:
continue;
}
DBUG_ASSERT(n_drop_fk > 0);
DBUG_ASSERT(n_drop_fk
- == ha_alter_info->alter_info->drop_list.elements);
+ <= ha_alter_info->alter_info->drop_list.elements);
} else {
drop_fk = NULL;
}
@@ -4177,23 +4184,11 @@ oom:
table. Either way, we should be seeing and
reporting a bogus duplicate key error. */
dup_key = NULL;
- } else if (prebuilt->trx->error_key_num == 0) {
+ } else {
+ DBUG_ASSERT(prebuilt->trx->error_key_num
+ < ha_alter_info->key_count);
dup_key = &ha_alter_info->key_info_buffer[
prebuilt->trx->error_key_num];
- } else {
- /* Check if there is generated cluster index column */
- if (ctx->num_to_add_index > ha_alter_info->key_count) {
- DBUG_ASSERT(prebuilt->trx->error_key_num
- <= ha_alter_info->key_count);
- dup_key = &ha_alter_info->key_info_buffer[
- prebuilt->trx->error_key_num - 1];
- }
- else {
- DBUG_ASSERT(prebuilt->trx->error_key_num
- < ha_alter_info->key_count);
- dup_key = &ha_alter_info->key_info_buffer[
- prebuilt->trx->error_key_num];
- }
}
print_keydup_error(altered_table, dup_key, MYF(0));
break;
@@ -4518,7 +4513,6 @@ innobase_rename_column_try(
pars_info_add_ull_literal(info, "tableid", user_table->id);
pars_info_add_int4_literal(info, "nth", nth_col);
- pars_info_add_str_literal(info, "old", from);
pars_info_add_str_literal(info, "new", to);
trx->op_info = "renaming column in SYS_COLUMNS";
@@ -4528,7 +4522,7 @@ innobase_rename_column_try(
"PROCEDURE RENAME_SYS_COLUMNS_PROC () IS\n"
"BEGIN\n"
"UPDATE SYS_COLUMNS SET NAME=:new\n"
- "WHERE TABLE_ID=:tableid AND NAME=:old\n"
+ "WHERE TABLE_ID=:tableid\n"
"AND POS=:nth;\n"
"END;\n",
FALSE, trx);
@@ -4551,35 +4545,40 @@ err_exit:
index != NULL;
index = dict_table_get_next_index(index)) {
+ bool has_prefixes = false;
+ for (size_t i = 0; i < dict_index_get_n_fields(index); i++) {
+ if (dict_index_get_nth_field(index, i)->prefix_len) {
+ has_prefixes = true;
+ break;
+ }
+ }
+
for (ulint i = 0; i < dict_index_get_n_fields(index); i++) {
- if (strcmp(dict_index_get_nth_field(index, i)->name,
- from)) {
+ const dict_field_t* field
+ = dict_index_get_nth_field(index, i);
+ if (my_strcasecmp(system_charset_info, field->name,
+ from)) {
continue;
}
info = pars_info_create();
+ ulint pos = i;
+ if (has_prefixes) {
+ pos = (pos << 16) + field->prefix_len;
+ }
+
pars_info_add_ull_literal(info, "indexid", index->id);
- pars_info_add_int4_literal(info, "nth", i);
- pars_info_add_str_literal(info, "old", from);
+ pars_info_add_int4_literal(info, "nth", pos);
pars_info_add_str_literal(info, "new", to);
error = que_eval_sql(
info,
"PROCEDURE RENAME_SYS_FIELDS_PROC () IS\n"
"BEGIN\n"
-
"UPDATE SYS_FIELDS SET COL_NAME=:new\n"
- "WHERE INDEX_ID=:indexid AND COL_NAME=:old\n"
+ "WHERE INDEX_ID=:indexid\n"
"AND POS=:nth;\n"
-
- /* Try again, in case there is a prefix_len
- encoded in SYS_FIELDS.POS */
-
- "UPDATE SYS_FIELDS SET COL_NAME=:new\n"
- "WHERE INDEX_ID=:indexid AND COL_NAME=:old\n"
- "AND POS>=65536*:nth AND POS<65536*(:nth+1);\n"
-
"END;\n",
FALSE, trx);
@@ -4592,7 +4591,7 @@ err_exit:
rename_foreign:
trx->op_info = "renaming column in SYS_FOREIGN_COLS";
- std::list<dict_foreign_t*> fk_evict;
+ std::set<dict_foreign_t*> fk_evict;
bool foreign_modified;
for (dict_foreign_set::const_iterator it = user_table->foreign_set.begin();
@@ -4603,7 +4602,9 @@ rename_foreign:
foreign_modified = false;
for (unsigned i = 0; i < foreign->n_fields; i++) {
- if (strcmp(foreign->foreign_col_names[i], from)) {
+ if (my_strcasecmp(system_charset_info,
+ foreign->foreign_col_names[i],
+ from)) {
continue;
}
@@ -4611,7 +4612,6 @@ rename_foreign:
pars_info_add_str_literal(info, "id", foreign->id);
pars_info_add_int4_literal(info, "nth", i);
- pars_info_add_str_literal(info, "old", from);
pars_info_add_str_literal(info, "new", to);
error = que_eval_sql(
@@ -4620,8 +4620,7 @@ rename_foreign:
"BEGIN\n"
"UPDATE SYS_FOREIGN_COLS\n"
"SET FOR_COL_NAME=:new\n"
- "WHERE ID=:id AND POS=:nth\n"
- "AND FOR_COL_NAME=:old;\n"
+ "WHERE ID=:id AND POS=:nth;\n"
"END;\n",
FALSE, trx);
@@ -4632,7 +4631,7 @@ rename_foreign:
}
if (foreign_modified) {
- fk_evict.push_back(foreign);
+ fk_evict.insert(foreign);
}
}
@@ -4645,7 +4644,9 @@ rename_foreign:
dict_foreign_t* foreign = *it;
for (unsigned i = 0; i < foreign->n_fields; i++) {
- if (strcmp(foreign->referenced_col_names[i], from)) {
+ if (my_strcasecmp(system_charset_info,
+ foreign->referenced_col_names[i],
+ from)) {
continue;
}
@@ -4653,7 +4654,6 @@ rename_foreign:
pars_info_add_str_literal(info, "id", foreign->id);
pars_info_add_int4_literal(info, "nth", i);
- pars_info_add_str_literal(info, "old", from);
pars_info_add_str_literal(info, "new", to);
error = que_eval_sql(
@@ -4662,8 +4662,7 @@ rename_foreign:
"BEGIN\n"
"UPDATE SYS_FOREIGN_COLS\n"
"SET REF_COL_NAME=:new\n"
- "WHERE ID=:id AND POS=:nth\n"
- "AND REF_COL_NAME=:old;\n"
+ "WHERE ID=:id AND POS=:nth;\n"
"END;\n",
FALSE, trx);
@@ -4674,7 +4673,7 @@ rename_foreign:
}
if (foreign_modified) {
- fk_evict.push_back(foreign);
+ fk_evict.insert(foreign);
}
}
@@ -5070,7 +5069,7 @@ commit_try_rebuild(
& Alter_inplace_info::DROP_FOREIGN_KEY)
|| ctx->num_to_drop_fk > 0);
DBUG_ASSERT(ctx->num_to_drop_fk
- == ha_alter_info->alter_info->drop_list.elements);
+ <= ha_alter_info->alter_info->drop_list.elements);
for (dict_index_t* index = dict_table_get_first_index(rebuilt_table);
index;
@@ -5121,18 +5120,9 @@ commit_try_rebuild(
FTS_DOC_ID. */
dup_key = NULL;
} else {
- if (ctx->num_to_add_index > ha_alter_info->key_count) {
- DBUG_ASSERT(err_key <=
- ha_alter_info->key_count);
- dup_key = &ha_alter_info
- ->key_info_buffer[err_key - 1];
- }
- else {
- DBUG_ASSERT(err_key <
- ha_alter_info->key_count);
- dup_key = &ha_alter_info
- ->key_info_buffer[err_key];
- }
+ DBUG_ASSERT(err_key < ha_alter_info->key_count);
+ dup_key = &ha_alter_info
+ ->key_info_buffer[err_key];
}
print_keydup_error(altered_table, dup_key, MYF(0));
@@ -5331,7 +5321,7 @@ commit_try_norebuild(
& Alter_inplace_info::DROP_FOREIGN_KEY)
|| ctx->num_to_drop_fk > 0);
DBUG_ASSERT(ctx->num_to_drop_fk
- == ha_alter_info->alter_info->drop_list.elements);
+ <= ha_alter_info->alter_info->drop_list.elements);
for (ulint i = 0; i < ctx->num_to_add_index; i++) {
dict_index_t* index = ctx->add_index[i];
@@ -5652,7 +5642,6 @@ ha_innobase::commit_inplace_alter_table(
Alter_inplace_info* ha_alter_info,
bool commit)
{
- dberr_t error;
ha_innobase_inplace_ctx* ctx0
= static_cast<ha_innobase_inplace_ctx*>
(ha_alter_info->handler_ctx);
@@ -5719,7 +5708,7 @@ ha_innobase::commit_inplace_alter_table(
transactions collected during crash recovery could be
holding InnoDB locks only, not MySQL locks. */
- error = row_merge_lock_table(
+ dberr_t error = row_merge_lock_table(
prebuilt->trx, ctx->old_table, LOCK_X);
if (error != DB_SUCCESS) {
@@ -5904,9 +5893,9 @@ rollback_trx:
file operations that will be performed in
commit_cache_rebuild(), and if none, generate
the redo log for these operations. */
- error = fil_mtr_rename_log(ctx->old_table,
- ctx->new_table,
- ctx->tmp_name, &mtr);
+ dberr_t error = fil_mtr_rename_log(
+ ctx->old_table, ctx->new_table, ctx->tmp_name,
+ &mtr);
if (error != DB_SUCCESS) {
/* Out of memory or a problem will occur
when renaming files. */
@@ -6031,39 +6020,30 @@ rollback_trx:
/* Rename the tablespace files. */
commit_cache_rebuild(ctx);
- error = innobase_update_foreign_cache(ctx, user_thd);
- if (error != DB_SUCCESS) {
- goto foreign_fail;
+ if (innobase_update_foreign_cache(ctx, user_thd)
+ != DB_SUCCESS
+ && prebuilt->trx->check_foreigns) {
+foreign_fail:
+ push_warning_printf(
+ user_thd,
+ Sql_condition::WARN_LEVEL_WARN,
+ ER_ALTER_INFO,
+ "failed to load FOREIGN KEY"
+ " constraints");
}
} else {
- error = innobase_update_foreign_cache(ctx, user_thd);
+ bool fk_fail = innobase_update_foreign_cache(
+ ctx, user_thd) != DB_SUCCESS;
- if (error != DB_SUCCESS) {
-foreign_fail:
- /* The data dictionary cache
- should be corrupted now. The
- best solution should be to
- kill and restart the server,
- but the *.frm file has not
- been replaced yet. */
- my_error(ER_CANNOT_ADD_FOREIGN,
- MYF(0));
- sql_print_error(
- "InnoDB: dict_load_foreigns()"
- " returned %u for %s",
- (unsigned) error,
- thd_query_string(user_thd)
- ->str);
- ut_ad(0);
- } else {
- if (!commit_cache_norebuild(
- ctx, table, trx)) {
- ut_a(!prebuilt->trx->check_foreigns);
- }
+ if (!commit_cache_norebuild(ctx, table, trx)) {
+ fk_fail = true;
+ ut_ad(!prebuilt->trx->check_foreigns);
+ }
- innobase_rename_columns_cache(
- ha_alter_info, table,
- ctx->new_table);
+ innobase_rename_columns_cache(ha_alter_info, table,
+ ctx->new_table);
+ if (fk_fail && prebuilt->trx->check_foreigns) {
+ goto foreign_fail;
}
}
DBUG_INJECT_CRASH("ib_commit_inplace_crash",
diff --git a/storage/innobase/include/buf0buf.h b/storage/innobase/include/buf0buf.h
index cddab05f84d..a5a493ab769 100644
--- a/storage/innobase/include/buf0buf.h
+++ b/storage/innobase/include/buf0buf.h
@@ -1443,6 +1443,9 @@ struct buf_page_t{
by buf_pool->mutex. */
ib_uint32_t offset; /*!< page number; also protected
by buf_pool->mutex. */
+ buf_page_t* hash; /*!< node used in chaining to
+ buf_pool->page_hash or
+ buf_pool->zip_hash */
/** count of how manyfold this block is currently bufferfixed */
#ifdef PAGE_ATOMIC_REF_COUNT
ib_uint32_t buf_fix_count;
@@ -1489,9 +1492,6 @@ struct buf_page_t{
zip.data == NULL means an active
buf_pool->watch */
#ifndef UNIV_HOTBACKUP
- buf_page_t* hash; /*!< node used in chaining to
- buf_pool->page_hash or
- buf_pool->zip_hash */
#ifdef UNIV_DEBUG
ibool in_page_hash; /*!< TRUE if in buf_pool->page_hash */
ibool in_zip_hash; /*!< TRUE if in buf_pool->zip_hash */
diff --git a/storage/innobase/include/dict0mem.h b/storage/innobase/include/dict0mem.h
index 9b9c0cab43c..e190ddf9efd 100644
--- a/storage/innobase/include/dict0mem.h
+++ b/storage/innobase/include/dict0mem.h
@@ -929,8 +929,10 @@ struct dict_table_t{
table_id_t id; /*!< id of the table */
+ hash_node_t id_hash; /*!< hash chain node */
mem_heap_t* heap; /*!< memory heap */
char* name; /*!< table name */
+ hash_node_t name_hash; /*!< hash chain node */
const char* dir_path_of_temp_table;/*!< NULL or the directory path
where a TEMPORARY table that was explicitly
created by a user should be placed if
@@ -986,8 +988,6 @@ struct dict_table_t{
dictionary information and
MySQL FRM information mismatch. */
#ifndef UNIV_HOTBACKUP
- hash_node_t name_hash; /*!< hash chain node */
- hash_node_t id_hash; /*!< hash chain node */
UT_LIST_BASE_NODE_T(dict_index_t)
indexes; /*!< list of indexes of the table */
diff --git a/storage/innobase/include/fil0fil.h b/storage/innobase/include/fil0fil.h
index 6e772e31772..ad279487a3d 100644
--- a/storage/innobase/include/fil0fil.h
+++ b/storage/innobase/include/fil0fil.h
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2017, 2018, MariaDB Corporation.
+Copyright (c) 2017, 2019, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -245,7 +245,9 @@ struct fil_node_t {
struct fil_space_t {
char* name; /*!< space name = the path to the first file in
it */
+ hash_node_t name_hash;/*!< hash chain the name_hash table */
ulint id; /*!< space id */
+ hash_node_t hash; /*!< hash chain node */
ib_int64_t tablespace_version;
/*!< in DISCARD/IMPORT this timestamp
is used to check if we should ignore
@@ -292,8 +294,6 @@ struct fil_space_t {
trying to read a block.
Dropping of the tablespace is forbidden
if this is positive */
- hash_node_t hash; /*!< hash chain node */
- hash_node_t name_hash;/*!< hash chain the name_hash table */
#ifndef UNIV_HOTBACKUP
rw_lock_t latch; /*!< latch protecting the file space storage
allocation */
@@ -972,15 +972,11 @@ fil_aio_wait(
/*=========*/
ulint segment); /*!< in: the number of the segment in the aio
array to wait for */
-/**********************************************************************//**
-Flushes to disk possible writes cached by the OS. If the space does not exist
-or is being dropped, does not do anything. */
-UNIV_INTERN
-void
-fil_flush(
-/*======*/
- ulint space_id); /*!< in: file space id (this can be a group of
- log files or a tablespace of the database) */
+/** Make persistent possible writes cached by the OS.
+If the space does not exist or is being dropped, do nothing.
+@param[in] space_id tablespace identifier
+@param[in] metadata whether to update file system metadata */
+UNIV_INTERN void fil_flush(ulint space_id, bool metadata = false);
/**********************************************************************//**
Flushes to disk writes in file spaces of the given type possibly cached by
the OS. */
diff --git a/storage/innobase/include/os0file.h b/storage/innobase/include/os0file.h
index e73aeca6e5a..a78e3d64773 100644
--- a/storage/innobase/include/os0file.h
+++ b/storage/innobase/include/os0file.h
@@ -1,6 +1,6 @@
/***********************************************************************
-Copyright (c) 1995, 2018, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2009, Percona Inc.
Copyright (c) 2013, 2017, MariaDB Corporation.
@@ -1314,10 +1314,6 @@ os_file_get_status(
file can be opened in RW mode */
#if !defined(UNIV_HOTBACKUP)
-
-/** return one of the tmpdir path
- @return tmpdir path*/
-char *innobase_mysql_tmpdir(void);
/** Create a temporary file in the location specified by the parameter
path. If the path is null, then it will be created in tmpdir.
@param[in] path location for creating temporary file
diff --git a/storage/innobase/include/page0page.h b/storage/innobase/include/page0page.h
index 5cbbe30401f..5a572b1f314 100644
--- a/storage/innobase/include/page0page.h
+++ b/storage/innobase/include/page0page.h
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2013, 2016, MariaDB Corporation
+Copyright (c) 2013, 2018, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -1123,19 +1123,6 @@ const rec_t*
page_find_rec_max_not_deleted(
const page_t* page);
-/** Issue a warning when the checksum that is stored in the page is valid,
-but different than the global setting innodb_checksum_algorithm.
-@param[in] current_algo current checksum algorithm
-@param[in] page_checksum page valid checksum
-@param[in] space_id tablespace id
-@param[in] page_no page number */
-void
-page_warn_strict_checksum(
- srv_checksum_algorithm_t curr_algo,
- srv_checksum_algorithm_t page_checksum,
- ulint space_id,
- ulint page_no);
-
#ifdef UNIV_MATERIALIZE
#undef UNIV_INLINE
#define UNIV_INLINE UNIV_INLINE_ORIGINAL
diff --git a/storage/innobase/include/page0zip.h b/storage/innobase/include/page0zip.h
index 4e362cec641..0bf77e2fcf3 100644
--- a/storage/innobase/include/page0zip.h
+++ b/storage/innobase/include/page0zip.h
@@ -546,21 +546,6 @@ from outside the buffer pool.
# define UNIV_INLINE UNIV_INLINE_ORIGINAL
#endif
-#ifdef UNIV_INNOCHECKSUM
-/** Issue a warning when the checksum that is stored in the page is valid,
-but different than the global setting innodb_checksum_algorithm.
-@param[in] current_algo current checksum algorithm
-@param[in] page_checksum page valid checksum
-@param[in] space_id tablespace id
-@param[in] page_no page number */
-void
-page_warn_strict_checksum(
- srv_checksum_algorithm_t curr_algo,
- srv_checksum_algorithm_t page_checksum,
- ulint space_id,
- ulint page_no);
-#endif /* UNIV_INNOCHECKSUM */
-
#ifndef UNIV_INNOCHECKSUM
#ifndef UNIV_NONINL
# include "page0zip.ic"
diff --git a/storage/innobase/include/univ.i b/storage/innobase/include/univ.i
index a9d886607c0..f1bf416b370 100644
--- a/storage/innobase/include/univ.i
+++ b/storage/innobase/include/univ.i
@@ -45,7 +45,7 @@ Created 1/20/1994 Heikki Tuuri
#define INNODB_VERSION_MAJOR 5
#define INNODB_VERSION_MINOR 6
-#define INNODB_VERSION_BUGFIX 42
+#define INNODB_VERSION_BUGFIX 43
/* The following is the InnoDB version as shown in
SELECT plugin_version FROM information_schema.plugins;
@@ -585,12 +585,14 @@ typedef void* os_thread_ret_t;
#include "ut0dbg.h"
#include "ut0ut.h"
#include "db0err.h"
+#include <my_valgrind.h>
+/* define UNIV macros in terms of my_valgrind.h */
+#define UNIV_MEM_INVALID(addr, size) MEM_UNDEFINED(addr, size)
+#define UNIV_MEM_FREE(addr, size) MEM_NOACCESS(addr, size)
+#define UNIV_MEM_ALLOC(addr, size) UNIV_MEM_INVALID(addr, size)
#ifdef UNIV_DEBUG_VALGRIND
# include <valgrind/memcheck.h>
# define UNIV_MEM_VALID(addr, size) VALGRIND_MAKE_MEM_DEFINED(addr, size)
-# define UNIV_MEM_INVALID(addr, size) VALGRIND_MAKE_MEM_UNDEFINED(addr, size)
-# define UNIV_MEM_FREE(addr, size) VALGRIND_MAKE_MEM_NOACCESS(addr, size)
-# define UNIV_MEM_ALLOC(addr, size) VALGRIND_MAKE_MEM_UNDEFINED(addr, size)
# define UNIV_MEM_DESC(addr, size) VALGRIND_CREATE_BLOCK(addr, size, #addr)
# define UNIV_MEM_UNDESC(b) VALGRIND_DISCARD(b)
# define UNIV_MEM_ASSERT_RW_LOW(addr, size, should_abort) do { \
@@ -625,9 +627,6 @@ typedef void* os_thread_ret_t;
} while (0)
#else
# define UNIV_MEM_VALID(addr, size) do {} while(0)
-# define UNIV_MEM_INVALID(addr, size) do {} while(0)
-# define UNIV_MEM_FREE(addr, size) do {} while(0)
-# define UNIV_MEM_ALLOC(addr, size) do {} while(0)
# define UNIV_MEM_DESC(addr, size) do {} while(0)
# define UNIV_MEM_UNDESC(b) do {} while(0)
# define UNIV_MEM_ASSERT_RW_LOW(addr, size, should_abort) do {} while(0)
diff --git a/storage/innobase/os/os0proc.cc b/storage/innobase/os/os0proc.cc
index ff6d65e4ae6..f711fbb025b 100644
--- a/storage/innobase/os/os0proc.cc
+++ b/storage/innobase/os/os0proc.cc
@@ -192,7 +192,6 @@ os_mem_free_large(
ut_a(ut_total_allocated_memory >= size);
ut_total_allocated_memory -= size;
os_fast_mutex_unlock(&ut_list_mutex);
- UNIV_MEM_FREE(ptr, size);
return;
}
#endif /* HAVE_LARGE_PAGES && UNIV_LINUX */
@@ -208,7 +207,6 @@ os_mem_free_large(
ut_a(ut_total_allocated_memory >= size);
ut_total_allocated_memory -= size;
os_fast_mutex_unlock(&ut_list_mutex);
- UNIV_MEM_FREE(ptr, size);
}
#elif !defined OS_MAP_ANON
ut_free(ptr);
@@ -226,7 +224,6 @@ os_mem_free_large(
ut_a(ut_total_allocated_memory >= size);
ut_total_allocated_memory -= size;
os_fast_mutex_unlock(&ut_list_mutex);
- UNIV_MEM_FREE(ptr, size);
}
#endif
}
diff --git a/storage/innobase/page/page0page.cc b/storage/innobase/page/page0page.cc
index fcd722f3492..dd1136eb23a 100644
--- a/storage/innobase/page/page0page.cc
+++ b/storage/innobase/page/page0page.cc
@@ -2807,45 +2807,3 @@ page_find_rec_max_not_deleted(
}
return(prev_rec);
}
-
-/** Issue a warning when the checksum that is stored in the page is valid,
-but different than the global setting innodb_checksum_algorithm.
-@param[in] current_algo current checksum algorithm
-@param[in] page_checksum page valid checksum
-@param[in] space_id tablespace id
-@param[in] page_no page number */
-void
-page_warn_strict_checksum(
- srv_checksum_algorithm_t curr_algo,
- srv_checksum_algorithm_t page_checksum,
- ulint space_id,
- ulint page_no)
-{
- srv_checksum_algorithm_t curr_algo_nonstrict;
- switch (curr_algo) {
- case SRV_CHECKSUM_ALGORITHM_STRICT_CRC32:
- curr_algo_nonstrict = SRV_CHECKSUM_ALGORITHM_CRC32;
- break;
- case SRV_CHECKSUM_ALGORITHM_STRICT_INNODB:
- curr_algo_nonstrict = SRV_CHECKSUM_ALGORITHM_INNODB;
- break;
- case SRV_CHECKSUM_ALGORITHM_STRICT_NONE:
- curr_algo_nonstrict = SRV_CHECKSUM_ALGORITHM_NONE;
- break;
- default:
- ut_error;
- }
-
- ib_logf(IB_LOG_LEVEL_WARN,
- "innodb_checksum_algorithm is set to \"%s\""
- " but the page [page id: space=" ULINTPF ","
- " page number=" ULINTPF "] contains a valid checksum \"%s\"."
- " Accepting the page as valid. Change innodb_checksum_algorithm"
- " to \"%s\" to silently accept such pages or rewrite all pages"
- " so that they contain \"%s\" checksum.",
- buf_checksum_algorithm_name(curr_algo),
- space_id, page_no,
- buf_checksum_algorithm_name(page_checksum),
- buf_checksum_algorithm_name(curr_algo_nonstrict),
- buf_checksum_algorithm_name(curr_algo_nonstrict));
-}
diff --git a/storage/innobase/page/page0zip.cc b/storage/innobase/page/page0zip.cc
index 878a0b8728f..f9a4e38064c 100644
--- a/storage/innobase/page/page0zip.cc
+++ b/storage/innobase/page/page0zip.cc
@@ -2,7 +2,7 @@
Copyright (c) 2005, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2012, Facebook Inc.
-Copyright (c) 2017, MariaDB Corporation.
+Copyright (c) 2014, 2018, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -48,8 +48,6 @@ using namespace std;
#include "btr0cur.h"
#include "page0types.h"
#include "log0recv.h"
-#else
-#define page_warn_strict_checksum(A,B,C,D)
#endif /* !UNIV_INNOCHECKSUM */
#include "zlib.h"
#ifndef UNIV_HOTBACKUP
@@ -4926,13 +4924,6 @@ page_zip_verify_checksum(
stored = static_cast<ib_uint32_t>(mach_read_from_4(
static_cast<const unsigned char*>(data) + FIL_PAGE_SPACE_OR_CHKSUM));
- ulint page_no MY_ATTRIBUTE((unused)) =
- mach_read_from_4(static_cast<const unsigned char*>
- (data) + FIL_PAGE_OFFSET);
- ulint space_id MY_ATTRIBUTE((unused)) =
- mach_read_from_4(static_cast<const unsigned char*>
- (data) + FIL_PAGE_SPACE_ID);
-
#if FIL_PAGE_LSN % 8
#error "FIL_PAGE_LSN must be 64 bit aligned"
#endif
@@ -4974,97 +4965,31 @@ page_zip_verify_checksum(
switch (curr_algo) {
case SRV_CHECKSUM_ALGORITHM_STRICT_CRC32:
- case SRV_CHECKSUM_ALGORITHM_CRC32:
+ case SRV_CHECKSUM_ALGORITHM_STRICT_INNODB:
+ case SRV_CHECKSUM_ALGORITHM_STRICT_NONE:
+ return stored == calc;
+ case SRV_CHECKSUM_ALGORITHM_CRC32:
if (stored == BUF_NO_CHECKSUM_MAGIC) {
- if (curr_algo
- == SRV_CHECKSUM_ALGORITHM_STRICT_CRC32) {
- page_warn_strict_checksum(
- curr_algo,
- SRV_CHECKSUM_ALGORITHM_NONE,
- space_id, page_no);
- }
-
return(TRUE);
}
+ crc32 = calc;
innodb = static_cast<ib_uint32_t>(page_zip_calc_checksum(
data, size, SRV_CHECKSUM_ALGORITHM_INNODB));
-
- if (stored == innodb) {
- if (curr_algo
- == SRV_CHECKSUM_ALGORITHM_STRICT_CRC32) {
- page_warn_strict_checksum(
- curr_algo,
- SRV_CHECKSUM_ALGORITHM_INNODB,
- space_id, page_no);
- }
-
- return(TRUE);
- }
-
break;
- case SRV_CHECKSUM_ALGORITHM_STRICT_INNODB:
case SRV_CHECKSUM_ALGORITHM_INNODB:
-
if (stored == BUF_NO_CHECKSUM_MAGIC) {
- if (curr_algo
- == SRV_CHECKSUM_ALGORITHM_STRICT_INNODB) {
- page_warn_strict_checksum(
- curr_algo,
- SRV_CHECKSUM_ALGORITHM_NONE,
- space_id, page_no);
- }
-
- return(TRUE);
+ return TRUE;
}
crc32 = static_cast<ib_uint32_t>(page_zip_calc_checksum(
data, size, SRV_CHECKSUM_ALGORITHM_CRC32));
-
- if (stored == crc32) {
- if (curr_algo
- == SRV_CHECKSUM_ALGORITHM_STRICT_INNODB) {
- page_warn_strict_checksum(
- curr_algo,
- SRV_CHECKSUM_ALGORITHM_CRC32,
- space_id, page_no);
- }
-
- return(TRUE);
- }
-
- break;
- case SRV_CHECKSUM_ALGORITHM_STRICT_NONE:
-
- crc32 = static_cast<ib_uint32_t>(page_zip_calc_checksum(
- data, size, SRV_CHECKSUM_ALGORITHM_CRC32));
-
- if (stored == crc32) {
- page_warn_strict_checksum(
- curr_algo, SRV_CHECKSUM_ALGORITHM_CRC32,
- space_id, page_no);
-
- return(TRUE);
- }
-
- innodb = static_cast<ib_uint32_t>(page_zip_calc_checksum(
- data, size, SRV_CHECKSUM_ALGORITHM_INNODB));
-
- if (stored == innodb) {
- page_warn_strict_checksum(
- curr_algo,
- SRV_CHECKSUM_ALGORITHM_INNODB,
- space_id, page_no);
- return(TRUE);
- }
-
+ innodb = calc;
break;
case SRV_CHECKSUM_ALGORITHM_NONE:
- ut_error;
- /* no default so the compiler will emit a warning if new enum
- is added and not handled here */
+ return TRUE;
}
- return(FALSE);
+ return (stored == crc32 || stored == innodb);
}
diff --git a/storage/innobase/row/row0ftsort.cc b/storage/innobase/row/row0ftsort.cc
index 757e268c3a7..ccc26478c72 100644
--- a/storage/innobase/row/row0ftsort.cc
+++ b/storage/innobase/row/row0ftsort.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 2010, 2016, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2017, 2018, MariaDB Corporation.
+Copyright (c) 2017, 2019, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -741,7 +741,7 @@ loop:
goto func_exit;
}
- UNIV_MEM_INVALID(block[t_ctx.buf_used][0], srv_sort_buf_size);
+ UNIV_MEM_INVALID(block[t_ctx.buf_used], srv_sort_buf_size);
buf[t_ctx.buf_used] = row_merge_buf_empty(buf[t_ctx.buf_used]);
mycount[t_ctx.buf_used] += t_ctx.rows_added[t_ctx.buf_used];
t_ctx.rows_added[t_ctx.buf_used] = 0;
@@ -834,8 +834,7 @@ exit:
goto func_exit;
}
- UNIV_MEM_INVALID(block[i][0],
- srv_sort_buf_size);
+ UNIV_MEM_INVALID(block[i], srv_sort_buf_size);
}
buf[i] = row_merge_buf_empty(buf[i]);
diff --git a/storage/innobase/row/row0merge.cc b/storage/innobase/row/row0merge.cc
index 5d3e2d2cca6..ec9b8f79e49 100644
--- a/storage/innobase/row/row0merge.cc
+++ b/storage/innobase/row/row0merge.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 2005, 2018, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2005, 2017, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2014, 2018, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
@@ -3138,10 +3138,9 @@ row_merge_file_create_low(
file APIs, add instrumentation to register with
performance schema */
struct PSI_file_locker* locker = NULL;
-
PSI_file_locker_state state;
if (!path) {
- path = innobase_mysql_tmpdir();
+ path = mysql_tmpdir;
}
static const char label[] = "/Innodb Merge Temp File";
char* name = static_cast<char*>(
diff --git a/storage/innobase/row/row0mysql.cc b/storage/innobase/row/row0mysql.cc
index d9e18a99201..f623845f289 100644
--- a/storage/innobase/row/row0mysql.cc
+++ b/storage/innobase/row/row0mysql.cc
@@ -4323,7 +4323,8 @@ do_drop:
char msg_tablename[MAX_FULL_NAME_LEN + 1];
innobase_format_name(
- msg_tablename, sizeof(tablename),
+ msg_tablename,
+ sizeof msg_tablename,
tablename, FALSE);
ib_logf(IB_LOG_LEVEL_INFO,
@@ -5128,9 +5129,6 @@ row_rename_table_for_mysql(
" = TO_BINARY(:old_table_name);\n"
"END;\n"
, FALSE, trx);
- if (err != DB_SUCCESS) {
- goto end;
- }
} else if (n_constraints_to_drop > 0) {
/* Drop some constraints of tmp tables. */
diff --git a/storage/innobase/row/row0sel.cc b/storage/innobase/row/row0sel.cc
index 9426953d173..c2954742f8e 100644
--- a/storage/innobase/row/row0sel.cc
+++ b/storage/innobase/row/row0sel.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1997, 2018, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1997, 2017, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2008, Google Inc.
Copyright (c) 2015, 2018, MariaDB Corporation.
@@ -4550,7 +4550,7 @@ no_gap_lock:
prebuilt->new_rec_locks = 1;
}
err = DB_SUCCESS;
- break;
+ /* fall through */
case DB_SUCCESS:
break;
case DB_LOCK_WAIT:
diff --git a/storage/innobase/srv/srv0start.cc b/storage/innobase/srv/srv0start.cc
index c62c848c705..220e96d2c05 100644
--- a/storage/innobase/srv/srv0start.cc
+++ b/storage/innobase/srv/srv0start.cc
@@ -2257,6 +2257,10 @@ innobase_start_or_create_for_mysql()
break;
}
+ if (stat_info.type != OS_FILE_TYPE_FILE) {
+ break;
+ }
+
if (!srv_file_check_mode(logfilename)) {
return(DB_ERROR);
}
diff --git a/storage/tokudb/PerconaFT/COPYING.APACHEv2 b/storage/tokudb/PerconaFT/COPYING.APACHEv2
new file mode 100644
index 00000000000..ecbfc770fa9
--- /dev/null
+++ b/storage/tokudb/PerconaFT/COPYING.APACHEv2
@@ -0,0 +1,174 @@
+Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
diff --git a/storage/tokudb/PerconaFT/README.md b/storage/tokudb/PerconaFT/README.md
index ffb646b67af..26333df877e 100644
--- a/storage/tokudb/PerconaFT/README.md
+++ b/storage/tokudb/PerconaFT/README.md
@@ -104,11 +104,14 @@ All source code and test contributions must be provided under a [BSD 2-Clause][b
License
-------
+Portions of the PerconaFT library (the 'locktree' and 'omt') are available under the Apache version 2 license.
PerconaFT is available under the GPL version 2, and AGPL version 3.
-See [COPYING.AGPLv3][agpllicense],
+See [COPYING.APACHEv2][apachelicense],
+[COPYING.AGPLv3][agpllicense],
[COPYING.GPLv2][gpllicense], and
[PATENTS][patents].
+[apachelicense]: http://github.com/Percona/PerconaFT/blob/master/COPYING.APACHEv2
[agpllicense]: http://github.com/Percona/PerconaFT/blob/master/COPYING.AGPLv3
[gpllicense]: http://github.com/Percona/PerconaFT/blob/master/COPYING.GPLv2
[patents]: http://github.com/Percona/PerconaFT/blob/master/PATENTS
diff --git a/storage/tokudb/PerconaFT/ft/txn/txn_manager.h b/storage/tokudb/PerconaFT/ft/txn/txn_manager.h
index 7cdc52c4f43..25fa6032112 100644
--- a/storage/tokudb/PerconaFT/ft/txn/txn_manager.h
+++ b/storage/tokudb/PerconaFT/ft/txn/txn_manager.h
@@ -46,11 +46,11 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
void set_test_txn_sync_callback(void (*) (pthread_t, void*), void*);
#define toku_test_txn_sync_callback(a) ((test_txn_sync_callback)? test_txn_sync_callback( a,test_txn_sync_callback_extra) : (void) 0)
-#if TOKU_DEBUG_TXN_SYNC
+#if defined(TOKU_DEBUG_TXN_SYNC)
#define toku_debug_txn_sync(a) toku_test_txn_sync_callback(a)
#else
#define toku_debug_txn_sync(a) ((void) 0)
-#endif
+#endif // defined(TOKU_DEBUG_TXN_SYNC)
typedef struct txn_manager *TXN_MANAGER;
diff --git a/storage/tokudb/PerconaFT/locktree/concurrent_tree.cc b/storage/tokudb/PerconaFT/locktree/concurrent_tree.cc
index 9347267db49..e07f32c98fb 100644
--- a/storage/tokudb/PerconaFT/locktree/concurrent_tree.cc
+++ b/storage/tokudb/PerconaFT/locktree/concurrent_tree.cc
@@ -32,6 +32,20 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
You should have received a copy of the GNU Affero General Public License
along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
======= */
#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
diff --git a/storage/tokudb/PerconaFT/locktree/concurrent_tree.h b/storage/tokudb/PerconaFT/locktree/concurrent_tree.h
index 1eb339b7317..66a7ff176bb 100644
--- a/storage/tokudb/PerconaFT/locktree/concurrent_tree.h
+++ b/storage/tokudb/PerconaFT/locktree/concurrent_tree.h
@@ -32,6 +32,20 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
You should have received a copy of the GNU Affero General Public License
along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
======= */
#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
diff --git a/storage/tokudb/PerconaFT/locktree/keyrange.cc b/storage/tokudb/PerconaFT/locktree/keyrange.cc
index 8c2a69d4703..2b4b3bbd4fd 100644
--- a/storage/tokudb/PerconaFT/locktree/keyrange.cc
+++ b/storage/tokudb/PerconaFT/locktree/keyrange.cc
@@ -32,6 +32,19 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
You should have received a copy of the GNU Affero General Public License
along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
======= */
#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
diff --git a/storage/tokudb/PerconaFT/locktree/keyrange.h b/storage/tokudb/PerconaFT/locktree/keyrange.h
index 079ac3d7a80..a454287cbc8 100644
--- a/storage/tokudb/PerconaFT/locktree/keyrange.h
+++ b/storage/tokudb/PerconaFT/locktree/keyrange.h
@@ -32,6 +32,19 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
You should have received a copy of the GNU Affero General Public License
along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
======= */
#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
diff --git a/storage/tokudb/PerconaFT/locktree/lock_request.cc b/storage/tokudb/PerconaFT/locktree/lock_request.cc
index 8d49ccf8a1f..3d4d43b9e25 100644
--- a/storage/tokudb/PerconaFT/locktree/lock_request.cc
+++ b/storage/tokudb/PerconaFT/locktree/lock_request.cc
@@ -32,6 +32,19 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
You should have received a copy of the GNU Affero General Public License
along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
======= */
#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
diff --git a/storage/tokudb/PerconaFT/locktree/lock_request.h b/storage/tokudb/PerconaFT/locktree/lock_request.h
index a8d8cb7785b..91a6ff12b52 100644
--- a/storage/tokudb/PerconaFT/locktree/lock_request.h
+++ b/storage/tokudb/PerconaFT/locktree/lock_request.h
@@ -32,6 +32,19 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
You should have received a copy of the GNU Affero General Public License
along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
======= */
#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
diff --git a/storage/tokudb/PerconaFT/locktree/locktree.cc b/storage/tokudb/PerconaFT/locktree/locktree.cc
index 069aae26f66..8ba3f0f00ae 100644
--- a/storage/tokudb/PerconaFT/locktree/locktree.cc
+++ b/storage/tokudb/PerconaFT/locktree/locktree.cc
@@ -32,6 +32,19 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
You should have received a copy of the GNU Affero General Public License
along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
======= */
#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
diff --git a/storage/tokudb/PerconaFT/locktree/locktree.h b/storage/tokudb/PerconaFT/locktree/locktree.h
index 1ba7a51b124..7006b6fb01d 100644
--- a/storage/tokudb/PerconaFT/locktree/locktree.h
+++ b/storage/tokudb/PerconaFT/locktree/locktree.h
@@ -32,6 +32,19 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
You should have received a copy of the GNU Affero General Public License
along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
======= */
#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
diff --git a/storage/tokudb/PerconaFT/locktree/manager.cc b/storage/tokudb/PerconaFT/locktree/manager.cc
index 6bb5c77bf32..21f8dc6cf01 100644
--- a/storage/tokudb/PerconaFT/locktree/manager.cc
+++ b/storage/tokudb/PerconaFT/locktree/manager.cc
@@ -32,6 +32,19 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
You should have received a copy of the GNU Affero General Public License
along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
======= */
#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
diff --git a/storage/tokudb/PerconaFT/locktree/range_buffer.cc b/storage/tokudb/PerconaFT/locktree/range_buffer.cc
index 3ddfd0faf97..d1f14fc4a52 100644
--- a/storage/tokudb/PerconaFT/locktree/range_buffer.cc
+++ b/storage/tokudb/PerconaFT/locktree/range_buffer.cc
@@ -32,6 +32,19 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
You should have received a copy of the GNU Affero General Public License
along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
======= */
#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
diff --git a/storage/tokudb/PerconaFT/locktree/range_buffer.h b/storage/tokudb/PerconaFT/locktree/range_buffer.h
index b0e36968e73..811b0f85e69 100644
--- a/storage/tokudb/PerconaFT/locktree/range_buffer.h
+++ b/storage/tokudb/PerconaFT/locktree/range_buffer.h
@@ -32,6 +32,19 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
You should have received a copy of the GNU Affero General Public License
along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
======= */
#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
diff --git a/storage/tokudb/PerconaFT/locktree/treenode.cc b/storage/tokudb/PerconaFT/locktree/treenode.cc
index cc3a4969643..0247242f975 100644
--- a/storage/tokudb/PerconaFT/locktree/treenode.cc
+++ b/storage/tokudb/PerconaFT/locktree/treenode.cc
@@ -32,6 +32,19 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
You should have received a copy of the GNU Affero General Public License
along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
======= */
#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
diff --git a/storage/tokudb/PerconaFT/locktree/treenode.h b/storage/tokudb/PerconaFT/locktree/treenode.h
index 08aad2b6636..981e8b5a9cf 100644
--- a/storage/tokudb/PerconaFT/locktree/treenode.h
+++ b/storage/tokudb/PerconaFT/locktree/treenode.h
@@ -32,6 +32,19 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
You should have received a copy of the GNU Affero General Public License
along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
======= */
#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
diff --git a/storage/tokudb/PerconaFT/locktree/txnid_set.cc b/storage/tokudb/PerconaFT/locktree/txnid_set.cc
index 82b59453156..bd4e9723155 100644
--- a/storage/tokudb/PerconaFT/locktree/txnid_set.cc
+++ b/storage/tokudb/PerconaFT/locktree/txnid_set.cc
@@ -32,6 +32,19 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
You should have received a copy of the GNU Affero General Public License
along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
======= */
#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
diff --git a/storage/tokudb/PerconaFT/locktree/txnid_set.h b/storage/tokudb/PerconaFT/locktree/txnid_set.h
index 109d7f798e4..81fd45b6dde 100644
--- a/storage/tokudb/PerconaFT/locktree/txnid_set.h
+++ b/storage/tokudb/PerconaFT/locktree/txnid_set.h
@@ -32,6 +32,19 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
You should have received a copy of the GNU Affero General Public License
along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
======= */
#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
diff --git a/storage/tokudb/PerconaFT/locktree/wfg.cc b/storage/tokudb/PerconaFT/locktree/wfg.cc
index 9a234f50060..26b7a3b5295 100644
--- a/storage/tokudb/PerconaFT/locktree/wfg.cc
+++ b/storage/tokudb/PerconaFT/locktree/wfg.cc
@@ -32,6 +32,19 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
You should have received a copy of the GNU Affero General Public License
along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
======= */
#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
diff --git a/storage/tokudb/PerconaFT/locktree/wfg.h b/storage/tokudb/PerconaFT/locktree/wfg.h
index c56886e1362..5c1599592e6 100644
--- a/storage/tokudb/PerconaFT/locktree/wfg.h
+++ b/storage/tokudb/PerconaFT/locktree/wfg.h
@@ -32,6 +32,19 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
You should have received a copy of the GNU Affero General Public License
along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
======= */
#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
diff --git a/storage/tokudb/PerconaFT/portability/toku_instr_mysql.cc b/storage/tokudb/PerconaFT/portability/toku_instr_mysql.cc
index d742555f878..786a6ef0546 100644
--- a/storage/tokudb/PerconaFT/portability/toku_instr_mysql.cc
+++ b/storage/tokudb/PerconaFT/portability/toku_instr_mysql.cc
@@ -184,9 +184,9 @@ void toku_instr_file_io_end(toku_io_instrumentation &io_instr, ssize_t count) {
void toku_instr_mutex_init(const toku_instr_key &key, toku_mutex_t &mutex) {
mutex.psi_mutex = PSI_MUTEX_CALL(init_mutex)(key.id(), &mutex.pmutex);
-#if TOKU_PTHREAD_DEBUG
+#if defined(TOKU_PTHREAD_DEBUG)
mutex.instr_key_id = key.id();
-#endif
+#endif // defined(TOKU_PTHREAD_DEBUG)
}
void toku_instr_mutex_destroy(PSI_mutex *&mutex_instr) {
@@ -242,9 +242,9 @@ void toku_instr_mutex_unlock(PSI_mutex *mutex_instr) {
void toku_instr_cond_init(const toku_instr_key &key, toku_cond_t &cond) {
cond.psi_cond = PSI_COND_CALL(init_cond)(key.id(), &cond.pcond);
-#if TOKU_PTHREAD_DEBUG
+#if defined(TOKU_PTHREAD_DEBUG)
cond.instr_key_id = key.id();
-#endif
+#endif // // defined(TOKU_PTHREAD_DEBUG)
}
void toku_instr_cond_destroy(PSI_cond *&cond_instr) {
@@ -295,9 +295,9 @@ void toku_instr_cond_broadcast(const toku_cond_t &cond) {
void toku_instr_rwlock_init(const toku_instr_key &key,
toku_pthread_rwlock_t &rwlock) {
rwlock.psi_rwlock = PSI_RWLOCK_CALL(init_rwlock)(key.id(), &rwlock.rwlock);
-#if TOKU_PTHREAD_DEBUG
+#if defined(TOKU_PTHREAD_DEBUG)
rwlock.instr_key_id = key.id();
-#endif
+#endif // defined(TOKU_PTHREAD_DEBUG)
}
void toku_instr_rwlock_destroy(PSI_rwlock *&rwlock_instr) {
diff --git a/storage/tokudb/PerconaFT/portability/toku_instr_mysql.h b/storage/tokudb/PerconaFT/portability/toku_instr_mysql.h
index d6b0ed35ce9..beb833a163c 100644
--- a/storage/tokudb/PerconaFT/portability/toku_instr_mysql.h
+++ b/storage/tokudb/PerconaFT/portability/toku_instr_mysql.h
@@ -12,8 +12,15 @@
// undefine them here to avoid compilation errors.
#undef __STDC_FORMAT_MACROS
#undef __STDC_LIMIT_MACROS
-#include <mysql/psi/mysql_file.h> // PSI_file
-#include <mysql/psi/mysql_thread.h> // PSI_mutex
+#include "mysql/psi/mysql_file.h" // PSI_file
+#include "mysql/psi/mysql_thread.h" // PSI_mutex
+#include "mysql/psi/mysql_stage.h" // PSI_stage
+
+#if (MYSQL_VERSION_ID >= 80000) && ( MYSQL_VERSION_ID <= 100000)
+#include "mysql/psi/mysql_cond.h"
+#include "mysql/psi/mysql_mutex.h"
+#include "mysql/psi/mysql_rwlock.h"
+#endif // (MYSQL_VERSION_ID >= nn)
#ifndef HAVE_PSI_MUTEX_INTERFACE
#error HAVE_PSI_MUTEX_INTERFACE required
diff --git a/storage/tokudb/PerconaFT/portability/toku_pthread.h b/storage/tokudb/PerconaFT/portability/toku_pthread.h
index a0dfcc246a7..d05c6fabf53 100644
--- a/storage/tokudb/PerconaFT/portability/toku_pthread.h
+++ b/storage/tokudb/PerconaFT/portability/toku_pthread.h
@@ -64,23 +64,23 @@ struct toku_mutex_t {
pthread_mutex_t pmutex;
struct PSI_mutex
*psi_mutex; /* The performance schema instrumentation hook */
-#if TOKU_PTHREAD_DEBUG
+#if defined(TOKU_PTHREAD_DEBUG)
pthread_t owner; // = pthread_self(); // for debugging
bool locked;
bool valid;
pfs_key_t instr_key_id;
-#endif
+#endif // defined(TOKU_PTHREAD_DEBUG)
};
struct toku_cond_t {
pthread_cond_t pcond;
struct PSI_cond *psi_cond;
-#if TOKU_PTHREAD_DEBUG
+#if defined(TOKU_PTHREAD_DEBUG)
pfs_key_t instr_key_id;
-#endif
+#endif // defined(TOKU_PTHREAD_DEBUG)
};
-#ifdef TOKU_PTHREAD_DEBUG
+#if defined(TOKU_PTHREAD_DEBUG)
#define TOKU_COND_INITIALIZER \
{ \
.pcond = PTHREAD_COND_INITIALIZER, .psi_cond = nullptr, \
@@ -89,14 +89,14 @@ struct toku_cond_t {
#else
#define TOKU_COND_INITIALIZER \
{ .pcond = PTHREAD_COND_INITIALIZER, .psi_cond = nullptr }
-#endif
+#endif // defined(TOKU_PTHREAD_DEBUG)
struct toku_pthread_rwlock_t {
pthread_rwlock_t rwlock;
struct PSI_rwlock *psi_rwlock;
-#if TOKU_PTHREAD_DEBUG
+#if defined(TOKU_PTHREAD_DEBUG)
pfs_key_t instr_key_id;
-#endif
+#endif // defined(TOKU_PTHREAD_DEBUG)
};
typedef struct toku_mutex_aligned {
@@ -117,7 +117,7 @@ typedef struct toku_mutex_aligned {
#define ZERO_MUTEX_INITIALIZER \
{}
-#if TOKU_PTHREAD_DEBUG
+#if defined(TOKU_PTHREAD_DEBUG)
#define TOKU_MUTEX_INITIALIZER \
{ \
.pmutex = PTHREAD_MUTEX_INITIALIZER, .psi_mutex = nullptr, .owner = 0, \
@@ -126,12 +126,12 @@ typedef struct toku_mutex_aligned {
#else
#define TOKU_MUTEX_INITIALIZER \
{ .pmutex = PTHREAD_MUTEX_INITIALIZER, .psi_mutex = nullptr }
-#endif
+#endif // defined(TOKU_PTHREAD_DEBUG)
// Darwin doesn't provide adaptive mutexes
#if defined(__APPLE__)
#define TOKU_MUTEX_ADAPTIVE PTHREAD_MUTEX_DEFAULT
-#if TOKU_PTHREAD_DEBUG
+#if defined(TOKU_PTHREAD_DEBUG)
#define TOKU_ADAPTIVE_MUTEX_INITIALIZER \
{ \
.pmutex = PTHREAD_MUTEX_INITIALIZER, .psi_mutex = nullptr, .owner = 0, \
@@ -140,10 +140,10 @@ typedef struct toku_mutex_aligned {
#else
#define TOKU_ADAPTIVE_MUTEX_INITIALIZER \
{ .pmutex = PTHREAD_MUTEX_INITIALIZER, .psi_mutex = nullptr }
-#endif
+#endif // defined(TOKU_PTHREAD_DEBUG)
#else // __FreeBSD__, __linux__, at least
#define TOKU_MUTEX_ADAPTIVE PTHREAD_MUTEX_ADAPTIVE_NP
-#if TOKU_PTHREAD_DEBUG
+#if defined(TOKU_PTHREAD_DEBUG)
#define TOKU_ADAPTIVE_MUTEX_INITIALIZER \
{ \
.pmutex = PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP, .psi_mutex = nullptr, \
@@ -152,8 +152,8 @@ typedef struct toku_mutex_aligned {
#else
#define TOKU_ADAPTIVE_MUTEX_INITIALIZER \
{ .pmutex = PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP, .psi_mutex = nullptr }
-#endif
-#endif
+#endif // defined(TOKU_PTHREAD_DEBUG)
+#endif // defined(__APPLE__)
// Different OSes implement mutexes as different amounts of nested structs.
// C++ will fill out all missing values with zeroes if you provide at least one
@@ -198,7 +198,7 @@ toku_mutexattr_destroy(toku_pthread_mutexattr_t *attr) {
assert_zero(r);
}
-#if TOKU_PTHREAD_DEBUG
+#if defined(TOKU_PTHREAD_DEBUG)
static inline void toku_mutex_assert_locked(const toku_mutex_t *mutex) {
invariant(mutex->locked);
invariant(mutex->owner == pthread_self());
@@ -207,7 +207,7 @@ static inline void toku_mutex_assert_locked(const toku_mutex_t *mutex) {
static inline void
toku_mutex_assert_locked(const toku_mutex_t *mutex __attribute__((unused))) {
}
-#endif
+#endif // defined(TOKU_PTHREAD_DEBUG)
// asserting that a mutex is unlocked only makes sense
// if the calling thread can guaruntee that no other threads
@@ -217,7 +217,7 @@ toku_mutex_assert_locked(const toku_mutex_t *mutex __attribute__((unused))) {
// when a node is locked the caller knows that no other threads
// can be trying to lock its childrens' mutexes. the children
// are in one of two fixed states: locked or unlocked.
-#if TOKU_PTHREAD_DEBUG
+#if defined(TOKU_PTHREAD_DEBUG)
static inline void
toku_mutex_assert_unlocked(toku_mutex_t *mutex) {
invariant(mutex->owner == 0);
@@ -226,7 +226,7 @@ toku_mutex_assert_unlocked(toku_mutex_t *mutex) {
#else
static inline void toku_mutex_assert_unlocked(toku_mutex_t *mutex
__attribute__((unused))) {}
-#endif
+#endif // defined(TOKU_PTHREAD_DEBUG)
#define toku_mutex_lock(M) \
toku_mutex_lock_with_source_location(M, __FILE__, __LINE__)
@@ -241,13 +241,13 @@ static inline void toku_cond_init(toku_cond_t *cond,
toku_mutex_trylock_with_source_location(M, __FILE__, __LINE__)
inline void toku_mutex_unlock(toku_mutex_t *mutex) {
-#if TOKU_PTHREAD_DEBUG
+#if defined(TOKU_PTHREAD_DEBUG)
invariant(mutex->owner == pthread_self());
invariant(mutex->valid);
invariant(mutex->locked);
mutex->locked = false;
mutex->owner = 0;
-#endif
+#endif // defined(TOKU_PTHREAD_DEBUG)
toku_instr_mutex_unlock(mutex->psi_mutex);
int r = pthread_mutex_unlock(&mutex->pmutex);
assert_zero(r);
@@ -264,13 +264,13 @@ inline void toku_mutex_lock_with_source_location(toku_mutex_t *mutex,
toku_instr_mutex_lock_end(mutex_instr, r);
assert_zero(r);
-#if TOKU_PTHREAD_DEBUG
+#if defined(TOKU_PTHREAD_DEBUG)
invariant(mutex->valid);
invariant(!mutex->locked);
invariant(mutex->owner == 0);
mutex->locked = true;
mutex->owner = pthread_self();
-#endif
+#endif // defined(TOKU_PTHREAD_DEBUG)
}
inline int toku_mutex_trylock_with_source_location(toku_mutex_t *mutex,
@@ -283,7 +283,7 @@ inline int toku_mutex_trylock_with_source_location(toku_mutex_t *mutex,
const int r = pthread_mutex_lock(&mutex->pmutex);
toku_instr_mutex_lock_end(mutex_instr, r);
-#if TOKU_PTHREAD_DEBUG
+#if defined(TOKU_PTHREAD_DEBUG)
if (r == 0) {
invariant(mutex->valid);
invariant(!mutex->locked);
@@ -291,7 +291,7 @@ inline int toku_mutex_trylock_with_source_location(toku_mutex_t *mutex,
mutex->locked = true;
mutex->owner = pthread_self();
}
-#endif
+#endif // defined(TOKU_PTHREAD_DEBUG)
return r;
}
@@ -320,11 +320,11 @@ inline void toku_cond_wait_with_source_location(toku_cond_t *cond,
const char *src_file,
uint src_line) {
-#if TOKU_PTHREAD_DEBUG
+#if defined(TOKU_PTHREAD_DEBUG)
invariant(mutex->locked);
mutex->locked = false;
mutex->owner = 0;
-#endif
+#endif // defined(TOKU_PTHREAD_DEBUG)
/* Instrumentation start */
toku_cond_instrumentation cond_instr;
@@ -342,11 +342,11 @@ inline void toku_cond_wait_with_source_location(toku_cond_t *cond,
toku_instr_cond_wait_end(cond_instr, r);
assert_zero(r);
-#if TOKU_PTHREAD_DEBUG
+#if defined(TOKU_PTHREAD_DEBUG)
invariant(!mutex->locked);
mutex->locked = true;
mutex->owner = pthread_self();
-#endif
+#endif // defined(TOKU_PTHREAD_DEBUG)
}
inline int toku_cond_timedwait_with_source_location(toku_cond_t *cond,
@@ -354,11 +354,11 @@ inline int toku_cond_timedwait_with_source_location(toku_cond_t *cond,
toku_timespec_t *wakeup_at,
const char *src_file,
uint src_line) {
-#if TOKU_PTHREAD_DEBUG
+#if defined(TOKU_PTHREAD_DEBUG)
invariant(mutex->locked);
mutex->locked = false;
mutex->owner = 0;
-#endif
+#endif // defined(TOKU_PTHREAD_DEBUG)
/* Instrumentation start */
toku_cond_instrumentation cond_instr;
@@ -376,11 +376,11 @@ inline int toku_cond_timedwait_with_source_location(toku_cond_t *cond,
/* Instrumentation end */
toku_instr_cond_wait_end(cond_instr, r);
-#if TOKU_PTHREAD_DEBUG
+#if defined(TOKU_PTHREAD_DEBUG)
invariant(!mutex->locked);
mutex->locked = true;
mutex->owner = pthread_self();
-#endif
+#endif // defined(TOKU_PTHREAD_DEBUG)
return r;
}
@@ -399,26 +399,26 @@ inline void toku_cond_broadcast(toku_cond_t *cond) {
inline void toku_mutex_init(const toku_instr_key &key,
toku_mutex_t *mutex,
const toku_pthread_mutexattr_t *attr) {
-#if TOKU_PTHREAD_DEBUG
+#if defined(TOKU_PTHREAD_DEBUG)
mutex->valid = true;
-#endif
+#endif // defined(TOKU_PTHREAD_DEBUG)
toku_instr_mutex_init(key, *mutex);
const int r = pthread_mutex_init(&mutex->pmutex, attr);
assert_zero(r);
-#if TOKU_PTHREAD_DEBUG
+#if defined(TOKU_PTHREAD_DEBUG)
mutex->locked = false;
invariant(mutex->valid);
mutex->valid = true;
mutex->owner = 0;
-#endif
+#endif // defined(TOKU_PTHREAD_DEBUG)
}
inline void toku_mutex_destroy(toku_mutex_t *mutex) {
-#if TOKU_PTHREAD_DEBUG
+#if defined(TOKU_PTHREAD_DEBUG)
invariant(mutex->valid);
mutex->valid = false;
invariant(!mutex->locked);
-#endif
+#endif // defined(TOKU_PTHREAD_DEBUG)
toku_instr_mutex_destroy(mutex->psi_mutex);
int r = pthread_mutex_destroy(&mutex->pmutex);
assert_zero(r);
diff --git a/storage/tokudb/PerconaFT/util/growable_array.h b/storage/tokudb/PerconaFT/util/growable_array.h
index e8873ae4abd..ad60ea6395b 100644
--- a/storage/tokudb/PerconaFT/util/growable_array.h
+++ b/storage/tokudb/PerconaFT/util/growable_array.h
@@ -32,6 +32,19 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
You should have received a copy of the GNU Affero General Public License
along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
======= */
#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
diff --git a/storage/tokudb/PerconaFT/util/omt.cc b/storage/tokudb/PerconaFT/util/omt.cc
index 1fae0712c77..846c4df7f54 100644
--- a/storage/tokudb/PerconaFT/util/omt.cc
+++ b/storage/tokudb/PerconaFT/util/omt.cc
@@ -32,1105 +32,1356 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
You should have received a copy of the GNU Affero General Public License
along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
======= */
-#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
+#ident \
+ "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
-#include <string.h>
#include <db.h>
+#include <string.h>
#include <portability/memory.h>
namespace toku {
-template<typename omtdata_t, typename omtdataout_t, bool supports_marks>
-void omt<omtdata_t, omtdataout_t, supports_marks>::create(void) {
- this->create_internal(2);
- if (supports_marks) {
- this->convert_to_tree();
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ void omt<omtdata_t, omtdataout_t, supports_marks>::create(void) {
+ this->create_internal(2);
+ if (supports_marks) {
+ this->convert_to_tree();
+ }
}
-}
-template<typename omtdata_t, typename omtdataout_t, bool supports_marks>
-void omt<omtdata_t, omtdataout_t, supports_marks>::create_no_array(void) {
- if (!supports_marks) {
- this->create_internal_no_array(0);
- } else {
- this->is_array = false;
- this->capacity = 0;
- this->d.t.nodes = nullptr;
- this->d.t.root.set_to_null();
- this->d.t.free_idx = 0;
- }
-}
-
-template<typename omtdata_t, typename omtdataout_t, bool supports_marks>
-void omt<omtdata_t, omtdataout_t, supports_marks>::create_from_sorted_array(const omtdata_t *const values, const uint32_t numvalues) {
- this->create_internal(numvalues);
- memcpy(this->d.a.values, values, numvalues * (sizeof values[0]));
- this->d.a.num_values = numvalues;
- if (supports_marks) {
- this->convert_to_tree();
- }
-}
-
-template<typename omtdata_t, typename omtdataout_t, bool supports_marks>
-void omt<omtdata_t, omtdataout_t, supports_marks>::create_steal_sorted_array(omtdata_t **const values, const uint32_t numvalues, const uint32_t new_capacity) {
- paranoid_invariant_notnull(values);
- this->create_internal_no_array(new_capacity);
- this->d.a.num_values = numvalues;
- this->d.a.values = *values;
- *values = nullptr;
- if (supports_marks) {
- this->convert_to_tree();
- }
-}
-
-template<typename omtdata_t, typename omtdataout_t, bool supports_marks>
-int omt<omtdata_t, omtdataout_t, supports_marks>::split_at(omt *const newomt, const uint32_t idx) {
- barf_if_marked(*this);
- paranoid_invariant_notnull(newomt);
- if (idx > this->size()) { return EINVAL; }
- this->convert_to_array();
- const uint32_t newsize = this->size() - idx;
- newomt->create_from_sorted_array(&this->d.a.values[this->d.a.start_idx + idx], newsize);
- this->d.a.num_values = idx;
- this->maybe_resize_array(idx);
- if (supports_marks) {
- this->convert_to_tree();
- }
- return 0;
-}
-
-template<typename omtdata_t, typename omtdataout_t, bool supports_marks>
-void omt<omtdata_t, omtdataout_t, supports_marks>::merge(omt *const leftomt, omt *const rightomt) {
- barf_if_marked(*this);
- paranoid_invariant_notnull(leftomt);
- paranoid_invariant_notnull(rightomt);
- const uint32_t leftsize = leftomt->size();
- const uint32_t rightsize = rightomt->size();
- const uint32_t newsize = leftsize + rightsize;
-
- if (leftomt->is_array) {
- if (leftomt->capacity - (leftomt->d.a.start_idx + leftomt->d.a.num_values) >= rightsize) {
- this->create_steal_sorted_array(&leftomt->d.a.values, leftomt->d.a.num_values, leftomt->capacity);
- this->d.a.start_idx = leftomt->d.a.start_idx;
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ void omt<omtdata_t, omtdataout_t, supports_marks>::create_no_array(void) {
+ if (!supports_marks) {
+ this->create_internal_no_array(0);
+ } else {
+ this->is_array = false;
+ this->capacity = 0;
+ this->d.t.nodes = nullptr;
+ this->d.t.root.set_to_null();
+ this->d.t.free_idx = 0;
+ }
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ void omt<omtdata_t, omtdataout_t, supports_marks>::create_from_sorted_array(
+ const omtdata_t *const values,
+ const uint32_t numvalues) {
+ this->create_internal(numvalues);
+ memcpy(this->d.a.values, values, numvalues * (sizeof values[0]));
+ this->d.a.num_values = numvalues;
+ if (supports_marks) {
+ this->convert_to_tree();
+ }
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ void
+ omt<omtdata_t, omtdataout_t, supports_marks>::create_steal_sorted_array(
+ omtdata_t **const values,
+ const uint32_t numvalues,
+ const uint32_t new_capacity) {
+ paranoid_invariant_notnull(values);
+ this->create_internal_no_array(new_capacity);
+ this->d.a.num_values = numvalues;
+ this->d.a.values = *values;
+ *values = nullptr;
+ if (supports_marks) {
+ this->convert_to_tree();
+ }
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ int omt<omtdata_t, omtdataout_t, supports_marks>::split_at(
+ omt *const newomt,
+ const uint32_t idx) {
+ barf_if_marked(*this);
+ paranoid_invariant_notnull(newomt);
+ if (idx > this->size()) {
+ return EINVAL;
+ }
+ this->convert_to_array();
+ const uint32_t newsize = this->size() - idx;
+ newomt->create_from_sorted_array(
+ &this->d.a.values[this->d.a.start_idx + idx], newsize);
+ this->d.a.num_values = idx;
+ this->maybe_resize_array(idx);
+ if (supports_marks) {
+ this->convert_to_tree();
+ }
+ return 0;
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ void omt<omtdata_t, omtdataout_t, supports_marks>::merge(
+ omt *const leftomt,
+ omt *const rightomt) {
+ barf_if_marked(*this);
+ paranoid_invariant_notnull(leftomt);
+ paranoid_invariant_notnull(rightomt);
+ const uint32_t leftsize = leftomt->size();
+ const uint32_t rightsize = rightomt->size();
+ const uint32_t newsize = leftsize + rightsize;
+
+ if (leftomt->is_array) {
+ if (leftomt->capacity -
+ (leftomt->d.a.start_idx + leftomt->d.a.num_values) >=
+ rightsize) {
+ this->create_steal_sorted_array(&leftomt->d.a.values,
+ leftomt->d.a.num_values,
+ leftomt->capacity);
+ this->d.a.start_idx = leftomt->d.a.start_idx;
+ } else {
+ this->create_internal(newsize);
+ memcpy(&this->d.a.values[0],
+ &leftomt->d.a.values[leftomt->d.a.start_idx],
+ leftomt->d.a.num_values * (sizeof this->d.a.values[0]));
+ }
} else {
this->create_internal(newsize);
+ leftomt->fill_array_with_subtree_values(&this->d.a.values[0],
+ leftomt->d.t.root);
+ }
+ leftomt->destroy();
+ this->d.a.num_values = leftsize;
+
+ if (rightomt->is_array) {
+ memcpy(
+ &this->d.a.values[this->d.a.start_idx + this->d.a.num_values],
+ &rightomt->d.a.values[rightomt->d.a.start_idx],
+ rightomt->d.a.num_values * (sizeof this->d.a.values[0]));
+ } else {
+ rightomt->fill_array_with_subtree_values(
+ &this->d.a.values[this->d.a.start_idx + this->d.a.num_values],
+ rightomt->d.t.root);
+ }
+ rightomt->destroy();
+ this->d.a.num_values += rightsize;
+ paranoid_invariant(this->size() == newsize);
+ if (supports_marks) {
+ this->convert_to_tree();
+ }
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ void omt<omtdata_t, omtdataout_t, supports_marks>::clone(const omt &src) {
+ barf_if_marked(*this);
+ this->create_internal(src.size());
+ if (src.is_array) {
memcpy(&this->d.a.values[0],
- &leftomt->d.a.values[leftomt->d.a.start_idx],
- leftomt->d.a.num_values * (sizeof this->d.a.values[0]));
- }
- } else {
- this->create_internal(newsize);
- leftomt->fill_array_with_subtree_values(&this->d.a.values[0], leftomt->d.t.root);
- }
- leftomt->destroy();
- this->d.a.num_values = leftsize;
-
- if (rightomt->is_array) {
- memcpy(&this->d.a.values[this->d.a.start_idx + this->d.a.num_values],
- &rightomt->d.a.values[rightomt->d.a.start_idx],
- rightomt->d.a.num_values * (sizeof this->d.a.values[0]));
- } else {
- rightomt->fill_array_with_subtree_values(&this->d.a.values[this->d.a.start_idx + this->d.a.num_values],
- rightomt->d.t.root);
- }
- rightomt->destroy();
- this->d.a.num_values += rightsize;
- paranoid_invariant(this->size() == newsize);
- if (supports_marks) {
- this->convert_to_tree();
- }
-}
-
-template<typename omtdata_t, typename omtdataout_t, bool supports_marks>
-void omt<omtdata_t, omtdataout_t, supports_marks>::clone(const omt &src) {
- barf_if_marked(*this);
- this->create_internal(src.size());
- if (src.is_array) {
- memcpy(&this->d.a.values[0], &src.d.a.values[src.d.a.start_idx], src.d.a.num_values * (sizeof this->d.a.values[0]));
- } else {
- src.fill_array_with_subtree_values(&this->d.a.values[0], src.d.t.root);
- }
- this->d.a.num_values = src.size();
- if (supports_marks) {
- this->convert_to_tree();
- }
-}
-
-template<typename omtdata_t, typename omtdataout_t, bool supports_marks>
-void omt<omtdata_t, omtdataout_t, supports_marks>::clear(void) {
- if (this->is_array) {
- this->d.a.start_idx = 0;
- this->d.a.num_values = 0;
- } else {
- this->d.t.root.set_to_null();
- this->d.t.free_idx = 0;
- }
-}
-
-template<typename omtdata_t, typename omtdataout_t, bool supports_marks>
-void omt<omtdata_t, omtdataout_t, supports_marks>::destroy(void) {
- this->clear();
- this->capacity = 0;
- if (this->is_array) {
- if (this->d.a.values != nullptr) {
- toku_free(this->d.a.values);
+ &src.d.a.values[src.d.a.start_idx],
+ src.d.a.num_values * (sizeof this->d.a.values[0]));
+ } else {
+ src.fill_array_with_subtree_values(&this->d.a.values[0],
+ src.d.t.root);
}
- this->d.a.values = nullptr;
- } else {
- if (this->d.t.nodes != nullptr) {
- toku_free(this->d.t.nodes);
+ this->d.a.num_values = src.size();
+ if (supports_marks) {
+ this->convert_to_tree();
+ }
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ void omt<omtdata_t, omtdataout_t, supports_marks>::clear(void) {
+ if (this->is_array) {
+ this->d.a.start_idx = 0;
+ this->d.a.num_values = 0;
+ } else {
+ this->d.t.root.set_to_null();
+ this->d.t.free_idx = 0;
+ }
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ void omt<omtdata_t, omtdataout_t, supports_marks>::destroy(void) {
+ this->clear();
+ this->capacity = 0;
+ if (this->is_array) {
+ if (this->d.a.values != nullptr) {
+ toku_free(this->d.a.values);
+ }
+ this->d.a.values = nullptr;
+ } else {
+ if (this->d.t.nodes != nullptr) {
+ toku_free(this->d.t.nodes);
+ }
+ this->d.t.nodes = nullptr;
}
- this->d.t.nodes = nullptr;
}
-}
-template<typename omtdata_t, typename omtdataout_t, bool supports_marks>
-uint32_t omt<omtdata_t, omtdataout_t, supports_marks>::size(void) const {
- if (this->is_array) {
- return this->d.a.num_values;
- } else {
- return this->nweight(this->d.t.root);
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ uint32_t omt<omtdata_t, omtdataout_t, supports_marks>::size(void) const {
+ if (this->is_array) {
+ return this->d.a.num_values;
+ } else {
+ return this->nweight(this->d.t.root);
+ }
}
-}
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ template <typename omtcmp_t, int (*h)(const omtdata_t &, const omtcmp_t &)>
+ int omt<omtdata_t, omtdataout_t, supports_marks>::insert(
+ const omtdata_t &value,
+ const omtcmp_t &v,
+ uint32_t *const idx) {
+ int r;
+ uint32_t insert_idx;
-template<typename omtdata_t, typename omtdataout_t, bool supports_marks>
-template<typename omtcmp_t, int (*h)(const omtdata_t &, const omtcmp_t &)>
-int omt<omtdata_t, omtdataout_t, supports_marks>::insert(const omtdata_t &value, const omtcmp_t &v, uint32_t *const idx) {
- int r;
- uint32_t insert_idx;
+ r = this->find_zero<omtcmp_t, h>(v, nullptr, &insert_idx);
+ if (r == 0) {
+ if (idx)
+ *idx = insert_idx;
+ return DB_KEYEXIST;
+ }
+ if (r != DB_NOTFOUND)
+ return r;
- r = this->find_zero<omtcmp_t, h>(v, nullptr, &insert_idx);
- if (r==0) {
- if (idx) *idx = insert_idx;
- return DB_KEYEXIST;
+ if ((r = this->insert_at(value, insert_idx)))
+ return r;
+ if (idx)
+ *idx = insert_idx;
+
+ return 0;
}
- if (r != DB_NOTFOUND) return r;
- if ((r = this->insert_at(value, insert_idx))) return r;
- if (idx) *idx = insert_idx;
+ // The following 3 functions implement a static if for us.
+ template <typename omtdata_t, typename omtdataout_t>
+ static void barf_if_marked(
+ const omt<omtdata_t, omtdataout_t, false> &UU(omt)) {}
- return 0;
-}
+ template <typename omtdata_t, typename omtdataout_t>
+ static void barf_if_marked(const omt<omtdata_t, omtdataout_t, true> &omt) {
+ invariant(!omt.has_marks());
+ }
-// The following 3 functions implement a static if for us.
-template<typename omtdata_t, typename omtdataout_t>
-static void barf_if_marked(const omt<omtdata_t, omtdataout_t, false> &UU(omt)) {
-}
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ bool omt<omtdata_t, omtdataout_t, supports_marks>::has_marks(void) const {
+ static_assert(supports_marks, "Does not support marks");
+ if (this->d.t.root.is_null()) {
+ return false;
+ }
+ const omt_node &node = this->d.t.nodes[this->d.t.root.get_index()];
+ return node.get_marks_below() || node.get_marked();
+ }
-template<typename omtdata_t, typename omtdataout_t>
-static void barf_if_marked(const omt<omtdata_t, omtdataout_t, true> &omt) {
- invariant(!omt.has_marks());
-}
-
-template<typename omtdata_t, typename omtdataout_t, bool supports_marks>
-bool omt<omtdata_t, omtdataout_t, supports_marks>::has_marks(void) const {
- static_assert(supports_marks, "Does not support marks");
- if (this->d.t.root.is_null()) {
- return false;
- }
- const omt_node &node = this->d.t.nodes[this->d.t.root.get_index()];
- return node.get_marks_below() || node.get_marked();
-}
-
-template<typename omtdata_t, typename omtdataout_t, bool supports_marks>
-int omt<omtdata_t, omtdataout_t, supports_marks>::insert_at(const omtdata_t &value, const uint32_t idx) {
- barf_if_marked(*this);
- if (idx > this->size()) { return EINVAL; }
-
- this->maybe_resize_or_convert(this->size() + 1);
- if (this->is_array && idx != this->d.a.num_values &&
- (idx != 0 || this->d.a.start_idx == 0)) {
- this->convert_to_tree();
- }
- if (this->is_array) {
- if (idx == this->d.a.num_values) {
- this->d.a.values[this->d.a.start_idx + this->d.a.num_values] = value;
- }
- else {
- this->d.a.values[--this->d.a.start_idx] = value;
- }
- this->d.a.num_values++;
- }
- else {
- subtree *rebalance_subtree = nullptr;
- this->insert_internal(&this->d.t.root, value, idx, &rebalance_subtree);
- if (rebalance_subtree != nullptr) {
- this->rebalance(rebalance_subtree);
- }
- }
- return 0;
-}
-
-template<typename omtdata_t, typename omtdataout_t, bool supports_marks>
-int omt<omtdata_t, omtdataout_t, supports_marks>::set_at(const omtdata_t &value, const uint32_t idx) {
- barf_if_marked(*this);
- if (idx >= this->size()) { return EINVAL; }
-
- if (this->is_array) {
- this->set_at_internal_array(value, idx);
- } else {
- this->set_at_internal(this->d.t.root, value, idx);
- }
- return 0;
-}
-
-template<typename omtdata_t, typename omtdataout_t, bool supports_marks>
-int omt<omtdata_t, omtdataout_t, supports_marks>::delete_at(const uint32_t idx) {
- barf_if_marked(*this);
- if (idx >= this->size()) { return EINVAL; }
-
- this->maybe_resize_or_convert(this->size() - 1);
- if (this->is_array && idx != 0 && idx != this->d.a.num_values - 1) {
- this->convert_to_tree();
- }
- if (this->is_array) {
- //Testing for 0 does not rule out it being the last entry.
- //Test explicitly for num_values-1
- if (idx != this->d.a.num_values - 1) {
- this->d.a.start_idx++;
- }
- this->d.a.num_values--;
- } else {
- subtree *rebalance_subtree = nullptr;
- this->delete_internal(&this->d.t.root, idx, nullptr, &rebalance_subtree);
- if (rebalance_subtree != nullptr) {
- this->rebalance(rebalance_subtree);
- }
- }
- return 0;
-}
-
-template<typename omtdata_t, typename omtdataout_t, bool supports_marks>
-template<typename iterate_extra_t,
- int (*f)(const omtdata_t &, const uint32_t, iterate_extra_t *const)>
-int omt<omtdata_t, omtdataout_t, supports_marks>::iterate(iterate_extra_t *const iterate_extra) const {
- return this->iterate_on_range<iterate_extra_t, f>(0, this->size(), iterate_extra);
-}
-
-template<typename omtdata_t, typename omtdataout_t, bool supports_marks>
-template<typename iterate_extra_t,
- int (*f)(const omtdata_t &, const uint32_t, iterate_extra_t *const)>
-int omt<omtdata_t, omtdataout_t, supports_marks>::iterate_on_range(const uint32_t left, const uint32_t right, iterate_extra_t *const iterate_extra) const {
- if (right > this->size()) { return EINVAL; }
- if (left == right) { return 0; }
- if (this->is_array) {
- return this->iterate_internal_array<iterate_extra_t, f>(left, right, iterate_extra);
- }
- return this->iterate_internal<iterate_extra_t, f>(left, right, this->d.t.root, 0, iterate_extra);
-}
-
-template<typename omtdata_t, typename omtdataout_t, bool supports_marks>
-template<typename iterate_extra_t,
- int (*f)(const omtdata_t &, const uint32_t, iterate_extra_t *const)>
-int omt<omtdata_t, omtdataout_t, supports_marks>::iterate_and_mark_range(const uint32_t left, const uint32_t right, iterate_extra_t *const iterate_extra) {
- static_assert(supports_marks, "does not support marks");
- if (right > this->size()) { return EINVAL; }
- if (left == right) { return 0; }
- paranoid_invariant(!this->is_array);
- return this->iterate_and_mark_range_internal<iterate_extra_t, f>(left, right, this->d.t.root, 0, iterate_extra);
-}
-
-//TODO: We can optimize this if we steal 3 bits. 1 bit: this node is marked. 1 bit: left subtree has marks. 1 bit: right subtree has marks.
-template<typename omtdata_t, typename omtdataout_t, bool supports_marks>
-template<typename iterate_extra_t,
- int (*f)(const omtdata_t &, const uint32_t, iterate_extra_t *const)>
-int omt<omtdata_t, omtdataout_t, supports_marks>::iterate_over_marked(iterate_extra_t *const iterate_extra) const {
- static_assert(supports_marks, "does not support marks");
- paranoid_invariant(!this->is_array);
- return this->iterate_over_marked_internal<iterate_extra_t, f>(this->d.t.root, 0, iterate_extra);
-}
-
-template<typename omtdata_t, typename omtdataout_t, bool supports_marks>
-void omt<omtdata_t, omtdataout_t, supports_marks>::unmark(const subtree &subtree, const uint32_t index, GrowableArray<node_idx> *const indexes) {
- if (subtree.is_null()) { return; }
- omt_node &n = this->d.t.nodes[subtree.get_index()];
- const uint32_t index_root = index + this->nweight(n.left);
-
- const bool below = n.get_marks_below();
- if (below) {
- this->unmark(n.left, index, indexes);
- }
- if (n.get_marked()) {
- indexes->push(index_root);
- }
- n.clear_stolen_bits();
- if (below) {
- this->unmark(n.right, index_root + 1, indexes);
- }
-}
-
-template<typename omtdata_t, typename omtdataout_t, bool supports_marks>
-void omt<omtdata_t, omtdataout_t, supports_marks>::delete_all_marked(void) {
- static_assert(supports_marks, "does not support marks");
- if (!this->has_marks()) {
- return;
- }
- paranoid_invariant(!this->is_array);
- GrowableArray<node_idx> marked_indexes;
- marked_indexes.init();
-
- // Remove all marks.
- // We need to delete all the stolen bits before calling delete_at to prevent barfing.
- this->unmark(this->d.t.root, 0, &marked_indexes);
-
- for (uint32_t i = 0; i < marked_indexes.get_size(); i++) {
- // Delete from left to right, shift by number already deleted.
- // Alternative is delete from right to left.
- int r = this->delete_at(marked_indexes.fetch_unchecked(i) - i);
- lazy_assert_zero(r);
- }
- marked_indexes.deinit();
- barf_if_marked(*this);
-}
-
-template<typename omtdata_t, typename omtdataout_t, bool supports_marks>
-uint32_t omt<omtdata_t, omtdataout_t, supports_marks>::verify_marks_consistent_internal(const subtree &subtree, const bool UU(allow_marks)) const {
- if (subtree.is_null()) {
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ int omt<omtdata_t, omtdataout_t, supports_marks>::insert_at(
+ const omtdata_t &value,
+ const uint32_t idx) {
+ barf_if_marked(*this);
+ if (idx > this->size()) {
+ return EINVAL;
+ }
+
+ this->maybe_resize_or_convert(this->size() + 1);
+ if (this->is_array && idx != this->d.a.num_values &&
+ (idx != 0 || this->d.a.start_idx == 0)) {
+ this->convert_to_tree();
+ }
+ if (this->is_array) {
+ if (idx == this->d.a.num_values) {
+ this->d.a.values[this->d.a.start_idx + this->d.a.num_values] =
+ value;
+ } else {
+ this->d.a.values[--this->d.a.start_idx] = value;
+ }
+ this->d.a.num_values++;
+ } else {
+ subtree *rebalance_subtree = nullptr;
+ this->insert_internal(
+ &this->d.t.root, value, idx, &rebalance_subtree);
+ if (rebalance_subtree != nullptr) {
+ this->rebalance(rebalance_subtree);
+ }
+ }
return 0;
}
- const omt_node &node = this->d.t.nodes[subtree.get_index()];
- uint32_t num_marks = verify_marks_consistent_internal(node.left, node.get_marks_below());
- num_marks += verify_marks_consistent_internal(node.right, node.get_marks_below());
- if (node.get_marks_below()) {
- paranoid_invariant(allow_marks);
- paranoid_invariant(num_marks > 0);
- } else {
- // redundant with invariant below, but nice to have explicitly
- paranoid_invariant(num_marks == 0);
- }
- if (node.get_marked()) {
- paranoid_invariant(allow_marks);
- ++num_marks;
- }
- return num_marks;
-}
-
-template<typename omtdata_t, typename omtdataout_t, bool supports_marks>
-void omt<omtdata_t, omtdataout_t, supports_marks>::verify_marks_consistent(void) const {
- static_assert(supports_marks, "does not support marks");
- paranoid_invariant(!this->is_array);
- this->verify_marks_consistent_internal(this->d.t.root, true);
-}
-
-template<typename omtdata_t, typename omtdataout_t, bool supports_marks>
-template<typename iterate_extra_t,
- int (*f)(omtdata_t *, const uint32_t, iterate_extra_t *const)>
-void omt<omtdata_t, omtdataout_t, supports_marks>::iterate_ptr(iterate_extra_t *const iterate_extra) {
- if (this->is_array) {
- this->iterate_ptr_internal_array<iterate_extra_t, f>(0, this->size(), iterate_extra);
- } else {
- this->iterate_ptr_internal<iterate_extra_t, f>(0, this->size(), this->d.t.root, 0, iterate_extra);
- }
-}
-
-template<typename omtdata_t, typename omtdataout_t, bool supports_marks>
-int omt<omtdata_t, omtdataout_t, supports_marks>::fetch(const uint32_t idx, omtdataout_t *const value) const {
- if (idx >= this->size()) { return EINVAL; }
- if (this->is_array) {
- this->fetch_internal_array(idx, value);
- } else {
- this->fetch_internal(this->d.t.root, idx, value);
- }
- return 0;
-}
-
-template<typename omtdata_t, typename omtdataout_t, bool supports_marks>
-template<typename omtcmp_t,
- int (*h)(const omtdata_t &, const omtcmp_t &)>
-int omt<omtdata_t, omtdataout_t, supports_marks>::find_zero(const omtcmp_t &extra, omtdataout_t *const value, uint32_t *const idxp) const {
- uint32_t tmp_index;
- uint32_t *const child_idxp = (idxp != nullptr) ? idxp : &tmp_index;
- int r;
- if (this->is_array) {
- r = this->find_internal_zero_array<omtcmp_t, h>(extra, value, child_idxp);
- }
- else {
- r = this->find_internal_zero<omtcmp_t, h>(this->d.t.root, extra, value, child_idxp);
- }
- return r;
-}
-
-template<typename omtdata_t, typename omtdataout_t, bool supports_marks>
-template<typename omtcmp_t,
- int (*h)(const omtdata_t &, const omtcmp_t &)>
-int omt<omtdata_t, omtdataout_t, supports_marks>::find(const omtcmp_t &extra, int direction, omtdataout_t *const value, uint32_t *const idxp) const {
- uint32_t tmp_index;
- uint32_t *const child_idxp = (idxp != nullptr) ? idxp : &tmp_index;
- paranoid_invariant(direction != 0);
- if (direction < 0) {
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ int omt<omtdata_t, omtdataout_t, supports_marks>::set_at(
+ const omtdata_t &value,
+ const uint32_t idx) {
+ barf_if_marked(*this);
+ if (idx >= this->size()) {
+ return EINVAL;
+ }
+
if (this->is_array) {
- return this->find_internal_minus_array<omtcmp_t, h>(extra, value, child_idxp);
+ this->set_at_internal_array(value, idx);
} else {
- return this->find_internal_minus<omtcmp_t, h>(this->d.t.root, extra, value, child_idxp);
+ this->set_at_internal(this->d.t.root, value, idx);
+ }
+ return 0;
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ int omt<omtdata_t, omtdataout_t, supports_marks>::delete_at(
+ const uint32_t idx) {
+ barf_if_marked(*this);
+ if (idx >= this->size()) {
+ return EINVAL;
+ }
+
+ this->maybe_resize_or_convert(this->size() - 1);
+ if (this->is_array && idx != 0 && idx != this->d.a.num_values - 1) {
+ this->convert_to_tree();
}
- } else {
if (this->is_array) {
- return this->find_internal_plus_array<omtcmp_t, h>(extra, value, child_idxp);
+ // Testing for 0 does not rule out it being the last entry.
+ // Test explicitly for num_values-1
+ if (idx != this->d.a.num_values - 1) {
+ this->d.a.start_idx++;
+ }
+ this->d.a.num_values--;
} else {
- return this->find_internal_plus<omtcmp_t, h>(this->d.t.root, extra, value, child_idxp);
+ subtree *rebalance_subtree = nullptr;
+ this->delete_internal(
+ &this->d.t.root, idx, nullptr, &rebalance_subtree);
+ if (rebalance_subtree != nullptr) {
+ this->rebalance(rebalance_subtree);
+ }
}
+ return 0;
}
-}
-template<typename omtdata_t, typename omtdataout_t, bool supports_marks>
-size_t omt<omtdata_t, omtdataout_t, supports_marks>::memory_size(void) {
- if (this->is_array) {
- return (sizeof *this) + this->capacity * (sizeof this->d.a.values[0]);
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ template <
+ typename iterate_extra_t,
+ int (*f)(const omtdata_t &, const uint32_t, iterate_extra_t *const)>
+ int omt<omtdata_t, omtdataout_t, supports_marks>::iterate(
+ iterate_extra_t *const iterate_extra) const {
+ return this->iterate_on_range<iterate_extra_t, f>(
+ 0, this->size(), iterate_extra);
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ template <
+ typename iterate_extra_t,
+ int (*f)(const omtdata_t &, const uint32_t, iterate_extra_t *const)>
+ int omt<omtdata_t, omtdataout_t, supports_marks>::iterate_on_range(
+ const uint32_t left,
+ const uint32_t right,
+ iterate_extra_t *const iterate_extra) const {
+ if (right > this->size()) {
+ return EINVAL;
+ }
+ if (left == right) {
+ return 0;
+ }
+ if (this->is_array) {
+ return this->iterate_internal_array<iterate_extra_t, f>(
+ left, right, iterate_extra);
+ }
+ return this->iterate_internal<iterate_extra_t, f>(
+ left, right, this->d.t.root, 0, iterate_extra);
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ template <
+ typename iterate_extra_t,
+ int (*f)(const omtdata_t &, const uint32_t, iterate_extra_t *const)>
+ int omt<omtdata_t, omtdataout_t, supports_marks>::iterate_and_mark_range(
+ const uint32_t left,
+ const uint32_t right,
+ iterate_extra_t *const iterate_extra) {
+ static_assert(supports_marks, "does not support marks");
+ if (right > this->size()) {
+ return EINVAL;
+ }
+ if (left == right) {
+ return 0;
+ }
+ paranoid_invariant(!this->is_array);
+ return this->iterate_and_mark_range_internal<iterate_extra_t, f>(
+ left, right, this->d.t.root, 0, iterate_extra);
+ }
+
+ // TODO: We can optimize this if we steal 3 bits. 1 bit: this node is
+ // marked. 1 bit: left subtree has marks. 1 bit: right subtree has marks.
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ template <
+ typename iterate_extra_t,
+ int (*f)(const omtdata_t &, const uint32_t, iterate_extra_t *const)>
+ int omt<omtdata_t, omtdataout_t, supports_marks>::iterate_over_marked(
+ iterate_extra_t *const iterate_extra) const {
+ static_assert(supports_marks, "does not support marks");
+ paranoid_invariant(!this->is_array);
+ return this->iterate_over_marked_internal<iterate_extra_t, f>(
+ this->d.t.root, 0, iterate_extra);
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ void omt<omtdata_t, omtdataout_t, supports_marks>::unmark(
+ const subtree &st,
+ const uint32_t index,
+ GrowableArray<node_idx> *const indexes) {
+ if (st.is_null()) {
+ return;
+ }
+ omt_node &n = this->d.t.nodes[st.get_index()];
+ const uint32_t index_root = index + this->nweight(n.left);
+
+ const bool below = n.get_marks_below();
+ if (below) {
+ this->unmark(n.left, index, indexes);
+ }
+ if (n.get_marked()) {
+ indexes->push(index_root);
+ }
+ n.clear_stolen_bits();
+ if (below) {
+ this->unmark(n.right, index_root + 1, indexes);
+ }
}
- return (sizeof *this) + this->capacity * (sizeof this->d.t.nodes[0]);
-}
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ void omt<omtdata_t, omtdataout_t, supports_marks>::delete_all_marked(void) {
+ static_assert(supports_marks, "does not support marks");
+ if (!this->has_marks()) {
+ return;
+ }
+ paranoid_invariant(!this->is_array);
+ GrowableArray<node_idx> marked_indexes;
+ marked_indexes.init();
+
+ // Remove all marks.
+ // We need to delete all the stolen bits before calling delete_at to
+ // prevent barfing.
+ this->unmark(this->d.t.root, 0, &marked_indexes);
+
+ for (uint32_t i = 0; i < marked_indexes.get_size(); i++) {
+ // Delete from left to right, shift by number already deleted.
+ // Alternative is delete from right to left.
+ int r = this->delete_at(marked_indexes.fetch_unchecked(i) - i);
+ lazy_assert_zero(r);
+ }
+ marked_indexes.deinit();
+ barf_if_marked(*this);
+ }
-template<typename omtdata_t, typename omtdataout_t, bool supports_marks>
-void omt<omtdata_t, omtdataout_t, supports_marks>::create_internal_no_array(const uint32_t new_capacity) {
- this->is_array = true;
- this->d.a.start_idx = 0;
- this->d.a.num_values = 0;
- this->d.a.values = nullptr;
- this->capacity = new_capacity;
-}
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ uint32_t omt<omtdata_t, omtdataout_t, supports_marks>::
+ verify_marks_consistent_internal(const subtree &st,
+ const bool UU(allow_marks)) const {
+ if (st.is_null()) {
+ return 0;
+ }
+ const omt_node &node = this->d.t.nodes[st.get_index()];
+ uint32_t num_marks =
+ verify_marks_consistent_internal(node.left, node.get_marks_below());
+ num_marks += verify_marks_consistent_internal(node.right,
+ node.get_marks_below());
+ if (node.get_marks_below()) {
+ paranoid_invariant(allow_marks);
+ paranoid_invariant(num_marks > 0);
+ } else {
+ // redundant with invariant below, but nice to have explicitly
+ paranoid_invariant(num_marks == 0);
+ }
+ if (node.get_marked()) {
+ paranoid_invariant(allow_marks);
+ ++num_marks;
+ }
+ return num_marks;
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ void omt<omtdata_t, omtdataout_t, supports_marks>::verify_marks_consistent(
+ void) const {
+ static_assert(supports_marks, "does not support marks");
+ paranoid_invariant(!this->is_array);
+ this->verify_marks_consistent_internal(this->d.t.root, true);
+ }
-template<typename omtdata_t, typename omtdataout_t, bool supports_marks>
-void omt<omtdata_t, omtdataout_t, supports_marks>::create_internal(const uint32_t new_capacity) {
- this->create_internal_no_array(new_capacity);
- XMALLOC_N(this->capacity, this->d.a.values);
-}
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ template <typename iterate_extra_t,
+ int (*f)(omtdata_t *, const uint32_t, iterate_extra_t *const)>
+ void omt<omtdata_t, omtdataout_t, supports_marks>::iterate_ptr(
+ iterate_extra_t *const iterate_extra) {
+ if (this->is_array) {
+ this->iterate_ptr_internal_array<iterate_extra_t, f>(
+ 0, this->size(), iterate_extra);
+ } else {
+ this->iterate_ptr_internal<iterate_extra_t, f>(
+ 0, this->size(), this->d.t.root, 0, iterate_extra);
+ }
+ }
-template<typename omtdata_t, typename omtdataout_t, bool supports_marks>
-uint32_t omt<omtdata_t, omtdataout_t, supports_marks>::nweight(const subtree &subtree) const {
- if (subtree.is_null()) {
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ int omt<omtdata_t, omtdataout_t, supports_marks>::fetch(
+ const uint32_t idx,
+ omtdataout_t *const value) const {
+ if (idx >= this->size()) {
+ return EINVAL;
+ }
+ if (this->is_array) {
+ this->fetch_internal_array(idx, value);
+ } else {
+ this->fetch_internal(this->d.t.root, idx, value);
+ }
return 0;
- } else {
- return this->d.t.nodes[subtree.get_index()].weight;
- }
-}
-
-template<typename omtdata_t, typename omtdataout_t, bool supports_marks>
-typename omt<omtdata_t, omtdataout_t, supports_marks>::node_idx omt<omtdata_t, omtdataout_t, supports_marks>::node_malloc(void) {
- paranoid_invariant(this->d.t.free_idx < this->capacity);
- omt_node &n = this->d.t.nodes[this->d.t.free_idx];
- n.clear_stolen_bits();
- return this->d.t.free_idx++;
-}
-
-template<typename omtdata_t, typename omtdataout_t, bool supports_marks>
-void omt<omtdata_t, omtdataout_t, supports_marks>::node_free(const node_idx UU(idx)) {
- paranoid_invariant(idx < this->capacity);
-}
-
-template<typename omtdata_t, typename omtdataout_t, bool supports_marks>
-void omt<omtdata_t, omtdataout_t, supports_marks>::maybe_resize_array(const uint32_t n) {
- const uint32_t new_size = n<=2 ? 4 : 2*n;
- const uint32_t room = this->capacity - this->d.a.start_idx;
-
- if (room < n || this->capacity / 2 >= new_size) {
- omtdata_t *XMALLOC_N(new_size, tmp_values);
- memcpy(tmp_values, &this->d.a.values[this->d.a.start_idx],
- this->d.a.num_values * (sizeof tmp_values[0]));
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ template <typename omtcmp_t, int (*h)(const omtdata_t &, const omtcmp_t &)>
+ int omt<omtdata_t, omtdataout_t, supports_marks>::find_zero(
+ const omtcmp_t &extra,
+ omtdataout_t *const value,
+ uint32_t *const idxp) const {
+ uint32_t tmp_index;
+ uint32_t *const child_idxp = (idxp != nullptr) ? idxp : &tmp_index;
+ int r;
+ if (this->is_array) {
+ r = this->find_internal_zero_array<omtcmp_t, h>(
+ extra, value, child_idxp);
+ } else {
+ r = this->find_internal_zero<omtcmp_t, h>(
+ this->d.t.root, extra, value, child_idxp);
+ }
+ return r;
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ template <typename omtcmp_t, int (*h)(const omtdata_t &, const omtcmp_t &)>
+ int omt<omtdata_t, omtdataout_t, supports_marks>::find(
+ const omtcmp_t &extra,
+ int direction,
+ omtdataout_t *const value,
+ uint32_t *const idxp) const {
+ uint32_t tmp_index;
+ uint32_t *const child_idxp = (idxp != nullptr) ? idxp : &tmp_index;
+ paranoid_invariant(direction != 0);
+ if (direction < 0) {
+ if (this->is_array) {
+ return this->find_internal_minus_array<omtcmp_t, h>(
+ extra, value, child_idxp);
+ } else {
+ return this->find_internal_minus<omtcmp_t, h>(
+ this->d.t.root, extra, value, child_idxp);
+ }
+ } else {
+ if (this->is_array) {
+ return this->find_internal_plus_array<omtcmp_t, h>(
+ extra, value, child_idxp);
+ } else {
+ return this->find_internal_plus<omtcmp_t, h>(
+ this->d.t.root, extra, value, child_idxp);
+ }
+ }
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ size_t omt<omtdata_t, omtdataout_t, supports_marks>::memory_size(void) {
+ if (this->is_array) {
+ return (sizeof *this) +
+ this->capacity * (sizeof this->d.a.values[0]);
+ }
+ return (sizeof *this) + this->capacity * (sizeof this->d.t.nodes[0]);
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ void omt<omtdata_t, omtdataout_t, supports_marks>::create_internal_no_array(
+ const uint32_t new_capacity) {
+ this->is_array = true;
this->d.a.start_idx = 0;
- this->capacity = new_size;
- toku_free(this->d.a.values);
- this->d.a.values = tmp_values;
- }
-}
-
-template<typename omtdata_t, typename omtdataout_t, bool supports_marks>
-void omt<omtdata_t, omtdataout_t, supports_marks>::fill_array_with_subtree_values(omtdata_t *const array, const subtree &subtree) const {
- if (subtree.is_null()) return;
- const omt_node &tree = this->d.t.nodes[subtree.get_index()];
- this->fill_array_with_subtree_values(&array[0], tree.left);
- array[this->nweight(tree.left)] = tree.value;
- this->fill_array_with_subtree_values(&array[this->nweight(tree.left) + 1], tree.right);
-}
-
-template<typename omtdata_t, typename omtdataout_t, bool supports_marks>
-void omt<omtdata_t, omtdataout_t, supports_marks>::convert_to_array(void) {
- if (!this->is_array) {
- const uint32_t num_values = this->size();
- uint32_t new_size = 2*num_values;
- new_size = new_size < 4 ? 4 : new_size;
-
- omtdata_t *XMALLOC_N(new_size, tmp_values);
- this->fill_array_with_subtree_values(tmp_values, this->d.t.root);
- toku_free(this->d.t.nodes);
- this->is_array = true;
- this->capacity = new_size;
- this->d.a.num_values = num_values;
- this->d.a.values = tmp_values;
- this->d.a.start_idx = 0;
- }
-}
-
-template<typename omtdata_t, typename omtdataout_t, bool supports_marks>
-void omt<omtdata_t, omtdataout_t, supports_marks>::rebuild_from_sorted_array(subtree *const subtree, const omtdata_t *const values, const uint32_t numvalues) {
- if (numvalues==0) {
- subtree->set_to_null();
- } else {
- const uint32_t halfway = numvalues/2;
- const node_idx newidx = this->node_malloc();
- omt_node *const newnode = &this->d.t.nodes[newidx];
- newnode->weight = numvalues;
- newnode->value = values[halfway];
- subtree->set_index(newidx);
- // update everything before the recursive calls so the second call can be a tail call.
- this->rebuild_from_sorted_array(&newnode->left, &values[0], halfway);
- this->rebuild_from_sorted_array(&newnode->right, &values[halfway+1], numvalues - (halfway+1));
- }
-}
-
-template<typename omtdata_t, typename omtdataout_t, bool supports_marks>
-void omt<omtdata_t, omtdataout_t, supports_marks>::convert_to_tree(void) {
- if (this->is_array) {
- const uint32_t num_nodes = this->size();
- uint32_t new_size = num_nodes*2;
- new_size = new_size < 4 ? 4 : new_size;
-
- omt_node *XMALLOC_N(new_size, new_nodes);
- omtdata_t *const values = this->d.a.values;
- omtdata_t *const tmp_values = &values[this->d.a.start_idx];
- this->is_array = false;
- this->d.t.nodes = new_nodes;
- this->capacity = new_size;
- this->d.t.free_idx = 0;
- this->d.t.root.set_to_null();
- this->rebuild_from_sorted_array(&this->d.t.root, tmp_values, num_nodes);
- toku_free(values);
- }
-}
-
-template<typename omtdata_t, typename omtdataout_t, bool supports_marks>
-void omt<omtdata_t, omtdataout_t, supports_marks>::maybe_resize_or_convert(const uint32_t n) {
- if (this->is_array) {
- this->maybe_resize_array(n);
- } else {
- const uint32_t new_size = n<=2 ? 4 : 2*n;
- const uint32_t num_nodes = this->nweight(this->d.t.root);
- if ((this->capacity/2 >= new_size) ||
- (this->d.t.free_idx >= this->capacity && num_nodes < n) ||
- (this->capacity<n)) {
- this->convert_to_array();
- // if we had a free list, the "supports_marks" version could
- // just resize, as it is now, we have to convert to and back
- // from an array.
- if (supports_marks) {
- this->convert_to_tree();
+ this->d.a.num_values = 0;
+ this->d.a.values = nullptr;
+ this->capacity = new_capacity;
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ void omt<omtdata_t, omtdataout_t, supports_marks>::create_internal(
+ const uint32_t new_capacity) {
+ this->create_internal_no_array(new_capacity);
+ XMALLOC_N(this->capacity, this->d.a.values);
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ uint32_t omt<omtdata_t, omtdataout_t, supports_marks>::nweight(
+ const subtree &st) const {
+ if (st.is_null()) {
+ return 0;
+ } else {
+ return this->d.t.nodes[st.get_index()].weight;
+ }
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ typename omt<omtdata_t, omtdataout_t, supports_marks>::node_idx
+ omt<omtdata_t, omtdataout_t, supports_marks>::node_malloc(void) {
+ paranoid_invariant(this->d.t.free_idx < this->capacity);
+ omt_node &n = this->d.t.nodes[this->d.t.free_idx];
+ n.clear_stolen_bits();
+ return this->d.t.free_idx++;
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ void omt<omtdata_t, omtdataout_t, supports_marks>::node_free(
+ const node_idx UU(idx)) {
+ paranoid_invariant(idx < this->capacity);
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ void omt<omtdata_t, omtdataout_t, supports_marks>::maybe_resize_array(
+ const uint32_t n) {
+ const uint32_t new_size = n <= 2 ? 4 : 2 * n;
+ const uint32_t room = this->capacity - this->d.a.start_idx;
+
+ if (room < n || this->capacity / 2 >= new_size) {
+ omtdata_t *XMALLOC_N(new_size, tmp_values);
+ memcpy(tmp_values,
+ &this->d.a.values[this->d.a.start_idx],
+ this->d.a.num_values * (sizeof tmp_values[0]));
+ this->d.a.start_idx = 0;
+ this->capacity = new_size;
+ toku_free(this->d.a.values);
+ this->d.a.values = tmp_values;
+ }
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ void omt<omtdata_t, omtdataout_t, supports_marks>::
+ fill_array_with_subtree_values(omtdata_t *const array,
+ const subtree &st) const {
+ if (st.is_null())
+ return;
+ const omt_node &tree = this->d.t.nodes[st.get_index()];
+ this->fill_array_with_subtree_values(&array[0], tree.left);
+ array[this->nweight(tree.left)] = tree.value;
+ this->fill_array_with_subtree_values(
+ &array[this->nweight(tree.left) + 1], tree.right);
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ void omt<omtdata_t, omtdataout_t, supports_marks>::convert_to_array(void) {
+ if (!this->is_array) {
+ const uint32_t num_values = this->size();
+ uint32_t new_size = 2 * num_values;
+ new_size = new_size < 4 ? 4 : new_size;
+
+ omtdata_t *XMALLOC_N(new_size, tmp_values);
+ this->fill_array_with_subtree_values(tmp_values, this->d.t.root);
+ toku_free(this->d.t.nodes);
+ this->is_array = true;
+ this->capacity = new_size;
+ this->d.a.num_values = num_values;
+ this->d.a.values = tmp_values;
+ this->d.a.start_idx = 0;
+ }
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ void
+ omt<omtdata_t, omtdataout_t, supports_marks>::rebuild_from_sorted_array(
+ subtree *const st,
+ const omtdata_t *const values,
+ const uint32_t numvalues) {
+ if (numvalues == 0) {
+ st->set_to_null();
+ } else {
+ const uint32_t halfway = numvalues / 2;
+ const node_idx newidx = this->node_malloc();
+ omt_node *const newnode = &this->d.t.nodes[newidx];
+ newnode->weight = numvalues;
+ newnode->value = values[halfway];
+ st->set_index(newidx);
+ // update everything before the recursive calls so the second call
+ // can be a tail call.
+ this->rebuild_from_sorted_array(
+ &newnode->left, &values[0], halfway);
+ this->rebuild_from_sorted_array(&newnode->right,
+ &values[halfway + 1],
+ numvalues - (halfway + 1));
+ }
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ void omt<omtdata_t, omtdataout_t, supports_marks>::convert_to_tree(void) {
+ if (this->is_array) {
+ const uint32_t num_nodes = this->size();
+ uint32_t new_size = num_nodes * 2;
+ new_size = new_size < 4 ? 4 : new_size;
+
+ omt_node *XMALLOC_N(new_size, new_nodes);
+ omtdata_t *const values = this->d.a.values;
+ omtdata_t *const tmp_values = &values[this->d.a.start_idx];
+ this->is_array = false;
+ this->d.t.nodes = new_nodes;
+ this->capacity = new_size;
+ this->d.t.free_idx = 0;
+ this->d.t.root.set_to_null();
+ this->rebuild_from_sorted_array(
+ &this->d.t.root, tmp_values, num_nodes);
+ toku_free(values);
+ }
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ void omt<omtdata_t, omtdataout_t, supports_marks>::maybe_resize_or_convert(
+ const uint32_t n) {
+ if (this->is_array) {
+ this->maybe_resize_array(n);
+ } else {
+ const uint32_t new_size = n <= 2 ? 4 : 2 * n;
+ const uint32_t num_nodes = this->nweight(this->d.t.root);
+ if ((this->capacity / 2 >= new_size) ||
+ (this->d.t.free_idx >= this->capacity && num_nodes < n) ||
+ (this->capacity < n)) {
+ this->convert_to_array();
+ // if we had a free list, the "supports_marks" version could
+ // just resize, as it is now, we have to convert to and back
+ // from an array.
+ if (supports_marks) {
+ this->convert_to_tree();
+ }
}
}
}
-}
-
-template<typename omtdata_t, typename omtdataout_t, bool supports_marks>
-bool omt<omtdata_t, omtdataout_t, supports_marks>::will_need_rebalance(const subtree &subtree, const int leftmod, const int rightmod) const {
- if (subtree.is_null()) { return false; }
- const omt_node &n = this->d.t.nodes[subtree.get_index()];
- // one of the 1's is for the root.
- // the other is to take ceil(n/2)
- const uint32_t weight_left = this->nweight(n.left) + leftmod;
- const uint32_t weight_right = this->nweight(n.right) + rightmod;
- return ((1+weight_left < (1+1+weight_right)/2)
- ||
- (1+weight_right < (1+1+weight_left)/2));
-}
-
-template<typename omtdata_t, typename omtdataout_t, bool supports_marks>
-void omt<omtdata_t, omtdataout_t, supports_marks>::insert_internal(subtree *const subtreep, const omtdata_t &value, const uint32_t idx, subtree **const rebalance_subtree) {
- if (subtreep->is_null()) {
- paranoid_invariant_zero(idx);
- const node_idx newidx = this->node_malloc();
- omt_node *const newnode = &this->d.t.nodes[newidx];
- newnode->weight = 1;
- newnode->left.set_to_null();
- newnode->right.set_to_null();
- newnode->value = value;
- subtreep->set_index(newidx);
- } else {
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ bool omt<omtdata_t, omtdataout_t, supports_marks>::will_need_rebalance(
+ const subtree &st,
+ const int leftmod,
+ const int rightmod) const {
+ if (st.is_null()) {
+ return false;
+ }
+ const omt_node &n = this->d.t.nodes[st.get_index()];
+ // one of the 1's is for the root.
+ // the other is to take ceil(n/2)
+ const uint32_t weight_left = this->nweight(n.left) + leftmod;
+ const uint32_t weight_right = this->nweight(n.right) + rightmod;
+ return ((1 + weight_left < (1 + 1 + weight_right) / 2) ||
+ (1 + weight_right < (1 + 1 + weight_left) / 2));
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ void omt<omtdata_t, omtdataout_t, supports_marks>::insert_internal(
+ subtree *const subtreep,
+ const omtdata_t &value,
+ const uint32_t idx,
+ subtree **const rebalance_subtree) {
+ if (subtreep->is_null()) {
+ paranoid_invariant_zero(idx);
+ const node_idx newidx = this->node_malloc();
+ omt_node *const newnode = &this->d.t.nodes[newidx];
+ newnode->weight = 1;
+ newnode->left.set_to_null();
+ newnode->right.set_to_null();
+ newnode->value = value;
+ subtreep->set_index(newidx);
+ } else {
+ omt_node &n = this->d.t.nodes[subtreep->get_index()];
+ n.weight++;
+ if (idx <= this->nweight(n.left)) {
+ if (*rebalance_subtree == nullptr &&
+ this->will_need_rebalance(*subtreep, 1, 0)) {
+ *rebalance_subtree = subtreep;
+ }
+ this->insert_internal(&n.left, value, idx, rebalance_subtree);
+ } else {
+ if (*rebalance_subtree == nullptr &&
+ this->will_need_rebalance(*subtreep, 0, 1)) {
+ *rebalance_subtree = subtreep;
+ }
+ const uint32_t sub_index = idx - this->nweight(n.left) - 1;
+ this->insert_internal(
+ &n.right, value, sub_index, rebalance_subtree);
+ }
+ }
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ void omt<omtdata_t, omtdataout_t, supports_marks>::set_at_internal_array(
+ const omtdata_t &value,
+ const uint32_t idx) {
+ this->d.a.values[this->d.a.start_idx + idx] = value;
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ void omt<omtdata_t, omtdataout_t, supports_marks>::set_at_internal(
+ const subtree &st,
+ const omtdata_t &value,
+ const uint32_t idx) {
+ paranoid_invariant(!st.is_null());
+ omt_node &n = this->d.t.nodes[st.get_index()];
+ const uint32_t leftweight = this->nweight(n.left);
+ if (idx < leftweight) {
+ this->set_at_internal(n.left, value, idx);
+ } else if (idx == leftweight) {
+ n.value = value;
+ } else {
+ this->set_at_internal(n.right, value, idx - leftweight - 1);
+ }
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ void omt<omtdata_t, omtdataout_t, supports_marks>::delete_internal(
+ subtree *const subtreep,
+ const uint32_t idx,
+ omt_node *const copyn,
+ subtree **const rebalance_subtree) {
+ paranoid_invariant_notnull(subtreep);
+ paranoid_invariant_notnull(rebalance_subtree);
+ paranoid_invariant(!subtreep->is_null());
omt_node &n = this->d.t.nodes[subtreep->get_index()];
- n.weight++;
- if (idx <= this->nweight(n.left)) {
- if (*rebalance_subtree == nullptr && this->will_need_rebalance(*subtreep, 1, 0)) {
+ const uint32_t leftweight = this->nweight(n.left);
+ if (idx < leftweight) {
+ n.weight--;
+ if (*rebalance_subtree == nullptr &&
+ this->will_need_rebalance(*subtreep, -1, 0)) {
*rebalance_subtree = subtreep;
}
- this->insert_internal(&n.left, value, idx, rebalance_subtree);
+ this->delete_internal(&n.left, idx, copyn, rebalance_subtree);
+ } else if (idx == leftweight) {
+ if (n.left.is_null()) {
+ const uint32_t oldidx = subtreep->get_index();
+ *subtreep = n.right;
+ if (copyn != nullptr) {
+ copyn->value = n.value;
+ }
+ this->node_free(oldidx);
+ } else if (n.right.is_null()) {
+ const uint32_t oldidx = subtreep->get_index();
+ *subtreep = n.left;
+ if (copyn != nullptr) {
+ copyn->value = n.value;
+ }
+ this->node_free(oldidx);
+ } else {
+ if (*rebalance_subtree == nullptr &&
+ this->will_need_rebalance(*subtreep, 0, -1)) {
+ *rebalance_subtree = subtreep;
+ }
+ // don't need to copy up value, it's only used by this
+ // next call, and when that gets to the bottom there
+ // won't be any more recursion
+ n.weight--;
+ this->delete_internal(&n.right, 0, &n, rebalance_subtree);
+ }
} else {
- if (*rebalance_subtree == nullptr && this->will_need_rebalance(*subtreep, 0, 1)) {
+ n.weight--;
+ if (*rebalance_subtree == nullptr &&
+ this->will_need_rebalance(*subtreep, 0, -1)) {
*rebalance_subtree = subtreep;
}
- const uint32_t sub_index = idx - this->nweight(n.left) - 1;
- this->insert_internal(&n.right, value, sub_index, rebalance_subtree);
- }
- }
-}
-
-template<typename omtdata_t, typename omtdataout_t, bool supports_marks>
-void omt<omtdata_t, omtdataout_t, supports_marks>::set_at_internal_array(const omtdata_t &value, const uint32_t idx) {
- this->d.a.values[this->d.a.start_idx + idx] = value;
-}
-
-template<typename omtdata_t, typename omtdataout_t, bool supports_marks>
-void omt<omtdata_t, omtdataout_t, supports_marks>::set_at_internal(const subtree &subtree, const omtdata_t &value, const uint32_t idx) {
- paranoid_invariant(!subtree.is_null());
- omt_node &n = this->d.t.nodes[subtree.get_index()];
- const uint32_t leftweight = this->nweight(n.left);
- if (idx < leftweight) {
- this->set_at_internal(n.left, value, idx);
- } else if (idx == leftweight) {
- n.value = value;
- } else {
- this->set_at_internal(n.right, value, idx - leftweight - 1);
- }
-}
-
-template<typename omtdata_t, typename omtdataout_t, bool supports_marks>
-void omt<omtdata_t, omtdataout_t, supports_marks>::delete_internal(subtree *const subtreep, const uint32_t idx, omt_node *const copyn, subtree **const rebalance_subtree) {
- paranoid_invariant_notnull(subtreep);
- paranoid_invariant_notnull(rebalance_subtree);
- paranoid_invariant(!subtreep->is_null());
- omt_node &n = this->d.t.nodes[subtreep->get_index()];
- const uint32_t leftweight = this->nweight(n.left);
- if (idx < leftweight) {
- n.weight--;
- if (*rebalance_subtree == nullptr && this->will_need_rebalance(*subtreep, -1, 0)) {
- *rebalance_subtree = subtreep;
- }
- this->delete_internal(&n.left, idx, copyn, rebalance_subtree);
- } else if (idx == leftweight) {
- if (n.left.is_null()) {
- const uint32_t oldidx = subtreep->get_index();
- *subtreep = n.right;
- if (copyn != nullptr) {
- copyn->value = n.value;
+ this->delete_internal(
+ &n.right, idx - leftweight - 1, copyn, rebalance_subtree);
+ }
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ template <
+ typename iterate_extra_t,
+ int (*f)(const omtdata_t &, const uint32_t, iterate_extra_t *const)>
+ int omt<omtdata_t, omtdataout_t, supports_marks>::iterate_internal_array(
+ const uint32_t left,
+ const uint32_t right,
+ iterate_extra_t *const iterate_extra) const {
+ int r;
+ for (uint32_t i = left; i < right; ++i) {
+ r = f(this->d.a.values[this->d.a.start_idx + i], i, iterate_extra);
+ if (r != 0) {
+ return r;
}
- this->node_free(oldidx);
- } else if (n.right.is_null()) {
- const uint32_t oldidx = subtreep->get_index();
- *subtreep = n.left;
- if (copyn != nullptr) {
- copyn->value = n.value;
+ }
+ return 0;
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ template <typename iterate_extra_t,
+ int (*f)(omtdata_t *, const uint32_t, iterate_extra_t *const)>
+ void omt<omtdata_t, omtdataout_t, supports_marks>::iterate_ptr_internal(
+ const uint32_t left,
+ const uint32_t right,
+ const subtree &st,
+ const uint32_t idx,
+ iterate_extra_t *const iterate_extra) {
+ if (!st.is_null()) {
+ omt_node &n = this->d.t.nodes[st.get_index()];
+ const uint32_t idx_root = idx + this->nweight(n.left);
+ if (left < idx_root) {
+ this->iterate_ptr_internal<iterate_extra_t, f>(
+ left, right, n.left, idx, iterate_extra);
}
- this->node_free(oldidx);
- } else {
- if (*rebalance_subtree == nullptr && this->will_need_rebalance(*subtreep, 0, -1)) {
- *rebalance_subtree = subtreep;
+ if (left <= idx_root && idx_root < right) {
+ int r = f(&n.value, idx_root, iterate_extra);
+ lazy_assert_zero(r);
+ }
+ if (idx_root + 1 < right) {
+ this->iterate_ptr_internal<iterate_extra_t, f>(
+ left, right, n.right, idx_root + 1, iterate_extra);
}
- // don't need to copy up value, it's only used by this
- // next call, and when that gets to the bottom there
- // won't be any more recursion
- n.weight--;
- this->delete_internal(&n.right, 0, &n, rebalance_subtree);
- }
- } else {
- n.weight--;
- if (*rebalance_subtree == nullptr && this->will_need_rebalance(*subtreep, 0, -1)) {
- *rebalance_subtree = subtreep;
- }
- this->delete_internal(&n.right, idx - leftweight - 1, copyn, rebalance_subtree);
- }
-}
-
-template<typename omtdata_t, typename omtdataout_t, bool supports_marks>
-template<typename iterate_extra_t,
- int (*f)(const omtdata_t &, const uint32_t, iterate_extra_t *const)>
-int omt<omtdata_t, omtdataout_t, supports_marks>::iterate_internal_array(const uint32_t left, const uint32_t right,
- iterate_extra_t *const iterate_extra) const {
- int r;
- for (uint32_t i = left; i < right; ++i) {
- r = f(this->d.a.values[this->d.a.start_idx + i], i, iterate_extra);
- if (r != 0) {
- return r;
}
}
- return 0;
-}
-template<typename omtdata_t, typename omtdataout_t, bool supports_marks>
-template<typename iterate_extra_t,
- int (*f)(omtdata_t *, const uint32_t, iterate_extra_t *const)>
-void omt<omtdata_t, omtdataout_t, supports_marks>::iterate_ptr_internal(const uint32_t left, const uint32_t right,
- const subtree &subtree, const uint32_t idx,
- iterate_extra_t *const iterate_extra) {
- if (!subtree.is_null()) {
- omt_node &n = this->d.t.nodes[subtree.get_index()];
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ template <typename iterate_extra_t,
+ int (*f)(omtdata_t *, const uint32_t, iterate_extra_t *const)>
+ void
+ omt<omtdata_t, omtdataout_t, supports_marks>::iterate_ptr_internal_array(
+ const uint32_t left,
+ const uint32_t right,
+ iterate_extra_t *const iterate_extra) {
+ for (uint32_t i = left; i < right; ++i) {
+ int r =
+ f(&this->d.a.values[this->d.a.start_idx + i], i, iterate_extra);
+ lazy_assert_zero(r);
+ }
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ template <
+ typename iterate_extra_t,
+ int (*f)(const omtdata_t &, const uint32_t, iterate_extra_t *const)>
+ int omt<omtdata_t, omtdataout_t, supports_marks>::iterate_internal(
+ const uint32_t left,
+ const uint32_t right,
+ const subtree &st,
+ const uint32_t idx,
+ iterate_extra_t *const iterate_extra) const {
+ if (st.is_null()) {
+ return 0;
+ }
+ int r;
+ const omt_node &n = this->d.t.nodes[st.get_index()];
const uint32_t idx_root = idx + this->nweight(n.left);
if (left < idx_root) {
- this->iterate_ptr_internal<iterate_extra_t, f>(left, right, n.left, idx, iterate_extra);
+ r = this->iterate_internal<iterate_extra_t, f>(
+ left, right, n.left, idx, iterate_extra);
+ if (r != 0) {
+ return r;
+ }
}
if (left <= idx_root && idx_root < right) {
- int r = f(&n.value, idx_root, iterate_extra);
- lazy_assert_zero(r);
+ r = f(n.value, idx_root, iterate_extra);
+ if (r != 0) {
+ return r;
+ }
}
if (idx_root + 1 < right) {
- this->iterate_ptr_internal<iterate_extra_t, f>(left, right, n.right, idx_root + 1, iterate_extra);
- }
- }
-}
-
-template<typename omtdata_t, typename omtdataout_t, bool supports_marks>
-template<typename iterate_extra_t,
- int (*f)(omtdata_t *, const uint32_t, iterate_extra_t *const)>
-void omt<omtdata_t, omtdataout_t, supports_marks>::iterate_ptr_internal_array(const uint32_t left, const uint32_t right,
- iterate_extra_t *const iterate_extra) {
- for (uint32_t i = left; i < right; ++i) {
- int r = f(&this->d.a.values[this->d.a.start_idx + i], i, iterate_extra);
- lazy_assert_zero(r);
- }
-}
-
-template<typename omtdata_t, typename omtdataout_t, bool supports_marks>
-template<typename iterate_extra_t,
- int (*f)(const omtdata_t &, const uint32_t, iterate_extra_t *const)>
-int omt<omtdata_t, omtdataout_t, supports_marks>::iterate_internal(const uint32_t left, const uint32_t right,
- const subtree &subtree, const uint32_t idx,
- iterate_extra_t *const iterate_extra) const {
- if (subtree.is_null()) { return 0; }
- int r;
- const omt_node &n = this->d.t.nodes[subtree.get_index()];
- const uint32_t idx_root = idx + this->nweight(n.left);
- if (left < idx_root) {
- r = this->iterate_internal<iterate_extra_t, f>(left, right, n.left, idx, iterate_extra);
- if (r != 0) { return r; }
- }
- if (left <= idx_root && idx_root < right) {
- r = f(n.value, idx_root, iterate_extra);
- if (r != 0) { return r; }
- }
- if (idx_root + 1 < right) {
- return this->iterate_internal<iterate_extra_t, f>(left, right, n.right, idx_root + 1, iterate_extra);
- }
- return 0;
-}
-
-template<typename omtdata_t, typename omtdataout_t, bool supports_marks>
-template<typename iterate_extra_t,
- int (*f)(const omtdata_t &, const uint32_t, iterate_extra_t *const)>
-int omt<omtdata_t, omtdataout_t, supports_marks>::iterate_and_mark_range_internal(const uint32_t left, const uint32_t right,
- const subtree &subtree, const uint32_t idx,
- iterate_extra_t *const iterate_extra) {
- paranoid_invariant(!subtree.is_null());
- int r;
- omt_node &n = this->d.t.nodes[subtree.get_index()];
- const uint32_t idx_root = idx + this->nweight(n.left);
- if (left < idx_root && !n.left.is_null()) {
- n.set_marks_below_bit();
- r = this->iterate_and_mark_range_internal<iterate_extra_t, f>(left, right, n.left, idx, iterate_extra);
- if (r != 0) { return r; }
- }
- if (left <= idx_root && idx_root < right) {
- n.set_marked_bit();
- r = f(n.value, idx_root, iterate_extra);
- if (r != 0) { return r; }
- }
- if (idx_root + 1 < right && !n.right.is_null()) {
- n.set_marks_below_bit();
- return this->iterate_and_mark_range_internal<iterate_extra_t, f>(left, right, n.right, idx_root + 1, iterate_extra);
- }
- return 0;
-}
-
-template<typename omtdata_t, typename omtdataout_t, bool supports_marks>
-template<typename iterate_extra_t,
- int (*f)(const omtdata_t &, const uint32_t, iterate_extra_t *const)>
-int omt<omtdata_t, omtdataout_t, supports_marks>::iterate_over_marked_internal(const subtree &subtree, const uint32_t idx,
- iterate_extra_t *const iterate_extra) const {
- if (subtree.is_null()) { return 0; }
- int r;
- const omt_node &n = this->d.t.nodes[subtree.get_index()];
- const uint32_t idx_root = idx + this->nweight(n.left);
- if (n.get_marks_below()) {
- r = this->iterate_over_marked_internal<iterate_extra_t, f>(n.left, idx, iterate_extra);
- if (r != 0) { return r; }
- }
- if (n.get_marked()) {
- r = f(n.value, idx_root, iterate_extra);
- if (r != 0) { return r; }
- }
- if (n.get_marks_below()) {
- return this->iterate_over_marked_internal<iterate_extra_t, f>(n.right, idx_root + 1, iterate_extra);
- }
- return 0;
-}
-
-template<typename omtdata_t, typename omtdataout_t, bool supports_marks>
-void omt<omtdata_t, omtdataout_t, supports_marks>::fetch_internal_array(const uint32_t i, omtdataout_t *const value) const {
- if (value != nullptr) {
- copyout(value, &this->d.a.values[this->d.a.start_idx + i]);
- }
-}
-
-template<typename omtdata_t, typename omtdataout_t, bool supports_marks>
-void omt<omtdata_t, omtdataout_t, supports_marks>::fetch_internal(const subtree &subtree, const uint32_t i, omtdataout_t *const value) const {
- omt_node &n = this->d.t.nodes[subtree.get_index()];
- const uint32_t leftweight = this->nweight(n.left);
- if (i < leftweight) {
- this->fetch_internal(n.left, i, value);
- } else if (i == leftweight) {
- if (value != nullptr) {
- copyout(value, &n);
- }
- } else {
- this->fetch_internal(n.right, i - leftweight - 1, value);
- }
-}
-
-template<typename omtdata_t, typename omtdataout_t, bool supports_marks>
-void omt<omtdata_t, omtdataout_t, supports_marks>::fill_array_with_subtree_idxs(node_idx *const array, const subtree &subtree) const {
- if (!subtree.is_null()) {
- const omt_node &tree = this->d.t.nodes[subtree.get_index()];
- this->fill_array_with_subtree_idxs(&array[0], tree.left);
- array[this->nweight(tree.left)] = subtree.get_index();
- this->fill_array_with_subtree_idxs(&array[this->nweight(tree.left) + 1], tree.right);
- }
-}
-
-template<typename omtdata_t, typename omtdataout_t, bool supports_marks>
-void omt<omtdata_t, omtdataout_t, supports_marks>::rebuild_subtree_from_idxs(subtree *const subtree, const node_idx *const idxs, const uint32_t numvalues) {
- if (numvalues==0) {
- subtree->set_to_null();
- } else {
- uint32_t halfway = numvalues/2;
- subtree->set_index(idxs[halfway]);
- //node_idx newidx = idxs[halfway];
- omt_node &newnode = this->d.t.nodes[subtree->get_index()];
- newnode.weight = numvalues;
- // value is already in there.
- this->rebuild_subtree_from_idxs(&newnode.left, &idxs[0], halfway);
- this->rebuild_subtree_from_idxs(&newnode.right, &idxs[halfway+1], numvalues-(halfway+1));
- //n_idx = newidx;
- }
-}
-
-template<typename omtdata_t, typename omtdataout_t, bool supports_marks>
-void omt<omtdata_t, omtdataout_t, supports_marks>::rebalance(subtree *const subtree) {
- node_idx idx = subtree->get_index();
- if (idx==this->d.t.root.get_index()) {
- //Try to convert to an array.
- //If this fails, (malloc) nothing will have changed.
- //In the failure case we continue on to the standard rebalance
- //algorithm.
- this->convert_to_array();
- if (supports_marks) {
- this->convert_to_tree();
+ return this->iterate_internal<iterate_extra_t, f>(
+ left, right, n.right, idx_root + 1, iterate_extra);
}
- } else {
- const omt_node &n = this->d.t.nodes[idx];
- node_idx *tmp_array;
- size_t mem_needed = n.weight * (sizeof tmp_array[0]);
- size_t mem_free = (this->capacity - this->d.t.free_idx) * (sizeof this->d.t.nodes[0]);
- bool malloced;
- if (mem_needed<=mem_free) {
- //There is sufficient free space at the end of the nodes array
- //to hold enough node indexes to rebalance.
- malloced = false;
- tmp_array = reinterpret_cast<node_idx *>(&this->d.t.nodes[this->d.t.free_idx]);
- }
- else {
- malloced = true;
- XMALLOC_N(n.weight, tmp_array);
- }
- this->fill_array_with_subtree_idxs(tmp_array, *subtree);
- this->rebuild_subtree_from_idxs(subtree, tmp_array, n.weight);
- if (malloced) toku_free(tmp_array);
- }
-}
-
-template<typename omtdata_t, typename omtdataout_t, bool supports_marks>
-void omt<omtdata_t, omtdataout_t, supports_marks>::copyout(omtdata_t *const out, const omt_node *const n) {
- *out = n->value;
-}
-
-template<typename omtdata_t, typename omtdataout_t, bool supports_marks>
-void omt<omtdata_t, omtdataout_t, supports_marks>::copyout(omtdata_t **const out, omt_node *const n) {
- *out = &n->value;
-}
-
-template<typename omtdata_t, typename omtdataout_t, bool supports_marks>
-void omt<omtdata_t, omtdataout_t, supports_marks>::copyout(omtdata_t *const out, const omtdata_t *const stored_value_ptr) {
- *out = *stored_value_ptr;
-}
-
-template<typename omtdata_t, typename omtdataout_t, bool supports_marks>
-void omt<omtdata_t, omtdataout_t, supports_marks>::copyout(omtdata_t **const out, omtdata_t *const stored_value_ptr) {
- *out = stored_value_ptr;
-}
-
-template<typename omtdata_t, typename omtdataout_t, bool supports_marks>
-template<typename omtcmp_t,
- int (*h)(const omtdata_t &, const omtcmp_t &)>
-int omt<omtdata_t, omtdataout_t, supports_marks>::find_internal_zero_array(const omtcmp_t &extra, omtdataout_t *const value, uint32_t *const idxp) const {
- paranoid_invariant_notnull(idxp);
- uint32_t min = this->d.a.start_idx;
- uint32_t limit = this->d.a.start_idx + this->d.a.num_values;
- uint32_t best_pos = subtree::NODE_NULL;
- uint32_t best_zero = subtree::NODE_NULL;
-
- while (min!=limit) {
- uint32_t mid = (min + limit) / 2;
- int hv = h(this->d.a.values[mid], extra);
- if (hv<0) {
- min = mid+1;
- }
- else if (hv>0) {
- best_pos = mid;
- limit = mid;
- }
- else {
- best_zero = mid;
- limit = mid;
- }
- }
- if (best_zero!=subtree::NODE_NULL) {
- //Found a zero
- if (value != nullptr) {
- copyout(value, &this->d.a.values[best_zero]);
+ return 0;
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ template <
+ typename iterate_extra_t,
+ int (*f)(const omtdata_t &, const uint32_t, iterate_extra_t *const)>
+ int omt<omtdata_t, omtdataout_t, supports_marks>::
+ iterate_and_mark_range_internal(const uint32_t left,
+ const uint32_t right,
+ const subtree &st,
+ const uint32_t idx,
+ iterate_extra_t *const iterate_extra) {
+ paranoid_invariant(!st.is_null());
+ int r;
+ omt_node &n = this->d.t.nodes[st.get_index()];
+ const uint32_t idx_root = idx + this->nweight(n.left);
+ if (left < idx_root && !n.left.is_null()) {
+ n.set_marks_below_bit();
+ r = this->iterate_and_mark_range_internal<iterate_extra_t, f>(
+ left, right, n.left, idx, iterate_extra);
+ if (r != 0) {
+ return r;
+ }
+ }
+ if (left <= idx_root && idx_root < right) {
+ n.set_marked_bit();
+ r = f(n.value, idx_root, iterate_extra);
+ if (r != 0) {
+ return r;
+ }
+ }
+ if (idx_root + 1 < right && !n.right.is_null()) {
+ n.set_marks_below_bit();
+ return this->iterate_and_mark_range_internal<iterate_extra_t, f>(
+ left, right, n.right, idx_root + 1, iterate_extra);
}
- *idxp = best_zero - this->d.a.start_idx;
return 0;
}
- if (best_pos!=subtree::NODE_NULL) *idxp = best_pos - this->d.a.start_idx;
- else *idxp = this->d.a.num_values;
- return DB_NOTFOUND;
-}
-
-template<typename omtdata_t, typename omtdataout_t, bool supports_marks>
-template<typename omtcmp_t,
- int (*h)(const omtdata_t &, const omtcmp_t &)>
-int omt<omtdata_t, omtdataout_t, supports_marks>::find_internal_zero(const subtree &subtree, const omtcmp_t &extra, omtdataout_t *const value, uint32_t *const idxp) const {
- paranoid_invariant_notnull(idxp);
- if (subtree.is_null()) {
- *idxp = 0;
- return DB_NOTFOUND;
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ template <
+ typename iterate_extra_t,
+ int (*f)(const omtdata_t &, const uint32_t, iterate_extra_t *const)>
+ int
+ omt<omtdata_t, omtdataout_t, supports_marks>::iterate_over_marked_internal(
+ const subtree &st,
+ const uint32_t idx,
+ iterate_extra_t *const iterate_extra) const {
+ if (st.is_null()) {
+ return 0;
+ }
+ int r;
+ const omt_node &n = this->d.t.nodes[st.get_index()];
+ const uint32_t idx_root = idx + this->nweight(n.left);
+ if (n.get_marks_below()) {
+ r = this->iterate_over_marked_internal<iterate_extra_t, f>(
+ n.left, idx, iterate_extra);
+ if (r != 0) {
+ return r;
+ }
+ }
+ if (n.get_marked()) {
+ r = f(n.value, idx_root, iterate_extra);
+ if (r != 0) {
+ return r;
+ }
+ }
+ if (n.get_marks_below()) {
+ return this->iterate_over_marked_internal<iterate_extra_t, f>(
+ n.right, idx_root + 1, iterate_extra);
+ }
+ return 0;
}
- omt_node &n = this->d.t.nodes[subtree.get_index()];
- int hv = h(n.value, extra);
- if (hv<0) {
- int r = this->find_internal_zero<omtcmp_t, h>(n.right, extra, value, idxp);
- *idxp += this->nweight(n.left)+1;
- return r;
- } else if (hv>0) {
- return this->find_internal_zero<omtcmp_t, h>(n.left, extra, value, idxp);
- } else {
- int r = this->find_internal_zero<omtcmp_t, h>(n.left, extra, value, idxp);
- if (r==DB_NOTFOUND) {
- *idxp = this->nweight(n.left);
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ void omt<omtdata_t, omtdataout_t, supports_marks>::fetch_internal_array(
+ const uint32_t i,
+ omtdataout_t *const value) const {
+ if (value != nullptr) {
+ copyout(value, &this->d.a.values[this->d.a.start_idx + i]);
+ }
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ void omt<omtdata_t, omtdataout_t, supports_marks>::fetch_internal(
+ const subtree &st,
+ const uint32_t i,
+ omtdataout_t *const value) const {
+ omt_node &n = this->d.t.nodes[st.get_index()];
+ const uint32_t leftweight = this->nweight(n.left);
+ if (i < leftweight) {
+ this->fetch_internal(n.left, i, value);
+ } else if (i == leftweight) {
if (value != nullptr) {
copyout(value, &n);
}
- r = 0;
+ } else {
+ this->fetch_internal(n.right, i - leftweight - 1, value);
}
- return r;
}
-}
-template<typename omtdata_t, typename omtdataout_t, bool supports_marks>
-template<typename omtcmp_t,
- int (*h)(const omtdata_t &, const omtcmp_t &)>
-int omt<omtdata_t, omtdataout_t, supports_marks>::find_internal_plus_array(const omtcmp_t &extra, omtdataout_t *const value, uint32_t *const idxp) const {
- paranoid_invariant_notnull(idxp);
- uint32_t min = this->d.a.start_idx;
- uint32_t limit = this->d.a.start_idx + this->d.a.num_values;
- uint32_t best = subtree::NODE_NULL;
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ void
+ omt<omtdata_t, omtdataout_t, supports_marks>::fill_array_with_subtree_idxs(
+ node_idx *const array,
+ const subtree &st) const {
+ if (!st.is_null()) {
+ const omt_node &tree = this->d.t.nodes[st.get_index()];
+ this->fill_array_with_subtree_idxs(&array[0], tree.left);
+ array[this->nweight(tree.left)] = st.get_index();
+ this->fill_array_with_subtree_idxs(
+ &array[this->nweight(tree.left) + 1], tree.right);
+ }
+ }
- while (min != limit) {
- const uint32_t mid = (min + limit) / 2;
- const int hv = h(this->d.a.values[mid], extra);
- if (hv > 0) {
- best = mid;
- limit = mid;
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ void
+ omt<omtdata_t, omtdataout_t, supports_marks>::rebuild_subtree_from_idxs(
+ subtree *const st,
+ const node_idx *const idxs,
+ const uint32_t numvalues) {
+ if (numvalues == 0) {
+ st->set_to_null();
} else {
- min = mid + 1;
+ uint32_t halfway = numvalues / 2;
+ st->set_index(idxs[halfway]);
+ // node_idx newidx = idxs[halfway];
+ omt_node &newnode = this->d.t.nodes[st->get_index()];
+ newnode.weight = numvalues;
+ // value is already in there.
+ this->rebuild_subtree_from_idxs(&newnode.left, &idxs[0], halfway);
+ this->rebuild_subtree_from_idxs(
+ &newnode.right, &idxs[halfway + 1], numvalues - (halfway + 1));
+ // n_idx = newidx;
}
}
- if (best == subtree::NODE_NULL) { return DB_NOTFOUND; }
- if (value != nullptr) {
- copyout(value, &this->d.a.values[best]);
- }
- *idxp = best - this->d.a.start_idx;
- return 0;
-}
-template<typename omtdata_t, typename omtdataout_t, bool supports_marks>
-template<typename omtcmp_t,
- int (*h)(const omtdata_t &, const omtcmp_t &)>
-int omt<omtdata_t, omtdataout_t, supports_marks>::find_internal_plus(const subtree &subtree, const omtcmp_t &extra, omtdataout_t *const value, uint32_t *const idxp) const {
- paranoid_invariant_notnull(idxp);
- if (subtree.is_null()) {
- return DB_NOTFOUND;
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ void omt<omtdata_t, omtdataout_t, supports_marks>::rebalance(
+ subtree *const st) {
+ node_idx idx = st->get_index();
+ if (idx == this->d.t.root.get_index()) {
+ // Try to convert to an array.
+ // If this fails, (malloc) nothing will have changed.
+ // In the failure case we continue on to the standard rebalance
+ // algorithm.
+ this->convert_to_array();
+ if (supports_marks) {
+ this->convert_to_tree();
+ }
+ } else {
+ const omt_node &n = this->d.t.nodes[idx];
+ node_idx *tmp_array;
+ size_t mem_needed = n.weight * (sizeof tmp_array[0]);
+ size_t mem_free = (this->capacity - this->d.t.free_idx) *
+ (sizeof this->d.t.nodes[0]);
+ bool malloced;
+ if (mem_needed <= mem_free) {
+ // There is sufficient free space at the end of the nodes array
+ // to hold enough node indexes to rebalance.
+ malloced = false;
+ tmp_array = reinterpret_cast<node_idx *>(
+ &this->d.t.nodes[this->d.t.free_idx]);
+ } else {
+ malloced = true;
+ XMALLOC_N(n.weight, tmp_array);
+ }
+ this->fill_array_with_subtree_idxs(tmp_array, *st);
+ this->rebuild_subtree_from_idxs(st, tmp_array, n.weight);
+ if (malloced)
+ toku_free(tmp_array);
+ }
}
- omt_node *const n = &this->d.t.nodes[subtree.get_index()];
- int hv = h(n->value, extra);
- int r;
- if (hv > 0) {
- r = this->find_internal_plus<omtcmp_t, h>(n->left, extra, value, idxp);
- if (r == DB_NOTFOUND) {
- *idxp = this->nweight(n->left);
- if (value != nullptr) {
- copyout(value, n);
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ void omt<omtdata_t, omtdataout_t, supports_marks>::copyout(
+ omtdata_t *const out,
+ const omt_node *const n) {
+ *out = n->value;
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ void omt<omtdata_t, omtdataout_t, supports_marks>::copyout(
+ omtdata_t **const out,
+ omt_node *const n) {
+ *out = &n->value;
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ void omt<omtdata_t, omtdataout_t, supports_marks>::copyout(
+ omtdata_t *const out,
+ const omtdata_t *const stored_value_ptr) {
+ *out = *stored_value_ptr;
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ void omt<omtdata_t, omtdataout_t, supports_marks>::copyout(
+ omtdata_t **const out,
+ omtdata_t *const stored_value_ptr) {
+ *out = stored_value_ptr;
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ template <typename omtcmp_t, int (*h)(const omtdata_t &, const omtcmp_t &)>
+ int omt<omtdata_t, omtdataout_t, supports_marks>::find_internal_zero_array(
+ const omtcmp_t &extra,
+ omtdataout_t *const value,
+ uint32_t *const idxp) const {
+ paranoid_invariant_notnull(idxp);
+ uint32_t min = this->d.a.start_idx;
+ uint32_t limit = this->d.a.start_idx + this->d.a.num_values;
+ uint32_t best_pos = subtree::NODE_NULL;
+ uint32_t best_zero = subtree::NODE_NULL;
+
+ while (min != limit) {
+ uint32_t mid = (min + limit) / 2;
+ int hv = h(this->d.a.values[mid], extra);
+ if (hv < 0) {
+ min = mid + 1;
+ } else if (hv > 0) {
+ best_pos = mid;
+ limit = mid;
+ } else {
+ best_zero = mid;
+ limit = mid;
}
- r = 0;
}
- } else {
- r = this->find_internal_plus<omtcmp_t, h>(n->right, extra, value, idxp);
- if (r == 0) {
- *idxp += this->nweight(n->left) + 1;
+ if (best_zero != subtree::NODE_NULL) {
+ // Found a zero
+ if (value != nullptr) {
+ copyout(value, &this->d.a.values[best_zero]);
+ }
+ *idxp = best_zero - this->d.a.start_idx;
+ return 0;
}
+ if (best_pos != subtree::NODE_NULL)
+ *idxp = best_pos - this->d.a.start_idx;
+ else
+ *idxp = this->d.a.num_values;
+ return DB_NOTFOUND;
}
- return r;
-}
-
-template<typename omtdata_t, typename omtdataout_t, bool supports_marks>
-template<typename omtcmp_t,
- int (*h)(const omtdata_t &, const omtcmp_t &)>
-int omt<omtdata_t, omtdataout_t, supports_marks>::find_internal_minus_array(const omtcmp_t &extra, omtdataout_t *const value, uint32_t *const idxp) const {
- paranoid_invariant_notnull(idxp);
- uint32_t min = this->d.a.start_idx;
- uint32_t limit = this->d.a.start_idx + this->d.a.num_values;
- uint32_t best = subtree::NODE_NULL;
- while (min != limit) {
- const uint32_t mid = (min + limit) / 2;
- const int hv = h(this->d.a.values[mid], extra);
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ template <typename omtcmp_t, int (*h)(const omtdata_t &, const omtcmp_t &)>
+ int omt<omtdata_t, omtdataout_t, supports_marks>::find_internal_zero(
+ const subtree &st,
+ const omtcmp_t &extra,
+ omtdataout_t *const value,
+ uint32_t *const idxp) const {
+ paranoid_invariant_notnull(idxp);
+ if (st.is_null()) {
+ *idxp = 0;
+ return DB_NOTFOUND;
+ }
+ omt_node &n = this->d.t.nodes[st.get_index()];
+ int hv = h(n.value, extra);
if (hv < 0) {
- best = mid;
- min = mid + 1;
+ int r = this->find_internal_zero<omtcmp_t, h>(
+ n.right, extra, value, idxp);
+ *idxp += this->nweight(n.left) + 1;
+ return r;
+ } else if (hv > 0) {
+ return this->find_internal_zero<omtcmp_t, h>(
+ n.left, extra, value, idxp);
} else {
- limit = mid;
+ int r = this->find_internal_zero<omtcmp_t, h>(
+ n.left, extra, value, idxp);
+ if (r == DB_NOTFOUND) {
+ *idxp = this->nweight(n.left);
+ if (value != nullptr) {
+ copyout(value, &n);
+ }
+ r = 0;
+ }
+ return r;
}
}
- if (best == subtree::NODE_NULL) { return DB_NOTFOUND; }
- if (value != nullptr) {
- copyout(value, &this->d.a.values[best]);
- }
- *idxp = best - this->d.a.start_idx;
- return 0;
-}
-template<typename omtdata_t, typename omtdataout_t, bool supports_marks>
-template<typename omtcmp_t,
- int (*h)(const omtdata_t &, const omtcmp_t &)>
-int omt<omtdata_t, omtdataout_t, supports_marks>::find_internal_minus(const subtree &subtree, const omtcmp_t &extra, omtdataout_t *const value, uint32_t *const idxp) const {
- paranoid_invariant_notnull(idxp);
- if (subtree.is_null()) {
- return DB_NOTFOUND;
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ template <typename omtcmp_t, int (*h)(const omtdata_t &, const omtcmp_t &)>
+ int omt<omtdata_t, omtdataout_t, supports_marks>::find_internal_plus_array(
+ const omtcmp_t &extra,
+ omtdataout_t *const value,
+ uint32_t *const idxp) const {
+ paranoid_invariant_notnull(idxp);
+ uint32_t min = this->d.a.start_idx;
+ uint32_t limit = this->d.a.start_idx + this->d.a.num_values;
+ uint32_t best = subtree::NODE_NULL;
+
+ while (min != limit) {
+ const uint32_t mid = (min + limit) / 2;
+ const int hv = h(this->d.a.values[mid], extra);
+ if (hv > 0) {
+ best = mid;
+ limit = mid;
+ } else {
+ min = mid + 1;
+ }
+ }
+ if (best == subtree::NODE_NULL) {
+ return DB_NOTFOUND;
+ }
+ if (value != nullptr) {
+ copyout(value, &this->d.a.values[best]);
+ }
+ *idxp = best - this->d.a.start_idx;
+ return 0;
}
- omt_node *const n = &this->d.t.nodes[subtree.get_index()];
- int hv = h(n->value, extra);
- if (hv < 0) {
- int r = this->find_internal_minus<omtcmp_t, h>(n->right, extra, value, idxp);
- if (r == 0) {
- *idxp += this->nweight(n->left) + 1;
- } else if (r == DB_NOTFOUND) {
- *idxp = this->nweight(n->left);
- if (value != nullptr) {
- copyout(value, n);
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ template <typename omtcmp_t, int (*h)(const omtdata_t &, const omtcmp_t &)>
+ int omt<omtdata_t, omtdataout_t, supports_marks>::find_internal_plus(
+ const subtree &st,
+ const omtcmp_t &extra,
+ omtdataout_t *const value,
+ uint32_t *const idxp) const {
+ paranoid_invariant_notnull(idxp);
+ if (st.is_null()) {
+ return DB_NOTFOUND;
+ }
+ omt_node *const n = &this->d.t.nodes[st.get_index()];
+ int hv = h(n->value, extra);
+ int r;
+ if (hv > 0) {
+ r = this->find_internal_plus<omtcmp_t, h>(
+ n->left, extra, value, idxp);
+ if (r == DB_NOTFOUND) {
+ *idxp = this->nweight(n->left);
+ if (value != nullptr) {
+ copyout(value, n);
+ }
+ r = 0;
+ }
+ } else {
+ r = this->find_internal_plus<omtcmp_t, h>(
+ n->right, extra, value, idxp);
+ if (r == 0) {
+ *idxp += this->nweight(n->left) + 1;
}
- r = 0;
}
return r;
- } else {
- return this->find_internal_minus<omtcmp_t, h>(n->left, extra, value, idxp);
}
-}
-} // namespace toku
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ template <typename omtcmp_t, int (*h)(const omtdata_t &, const omtcmp_t &)>
+ int omt<omtdata_t, omtdataout_t, supports_marks>::find_internal_minus_array(
+ const omtcmp_t &extra,
+ omtdataout_t *const value,
+ uint32_t *const idxp) const {
+ paranoid_invariant_notnull(idxp);
+ uint32_t min = this->d.a.start_idx;
+ uint32_t limit = this->d.a.start_idx + this->d.a.num_values;
+ uint32_t best = subtree::NODE_NULL;
+
+ while (min != limit) {
+ const uint32_t mid = (min + limit) / 2;
+ const int hv = h(this->d.a.values[mid], extra);
+ if (hv < 0) {
+ best = mid;
+ min = mid + 1;
+ } else {
+ limit = mid;
+ }
+ }
+ if (best == subtree::NODE_NULL) {
+ return DB_NOTFOUND;
+ }
+ if (value != nullptr) {
+ copyout(value, &this->d.a.values[best]);
+ }
+ *idxp = best - this->d.a.start_idx;
+ return 0;
+ }
+
+ template <typename omtdata_t, typename omtdataout_t, bool supports_marks>
+ template <typename omtcmp_t, int (*h)(const omtdata_t &, const omtcmp_t &)>
+ int omt<omtdata_t, omtdataout_t, supports_marks>::find_internal_minus(
+ const subtree &st,
+ const omtcmp_t &extra,
+ omtdataout_t *const value,
+ uint32_t *const idxp) const {
+ paranoid_invariant_notnull(idxp);
+ if (st.is_null()) {
+ return DB_NOTFOUND;
+ }
+ omt_node *const n = &this->d.t.nodes[st.get_index()];
+ int hv = h(n->value, extra);
+ if (hv < 0) {
+ int r = this->find_internal_minus<omtcmp_t, h>(
+ n->right, extra, value, idxp);
+ if (r == 0) {
+ *idxp += this->nweight(n->left) + 1;
+ } else if (r == DB_NOTFOUND) {
+ *idxp = this->nweight(n->left);
+ if (value != nullptr) {
+ copyout(value, n);
+ }
+ r = 0;
+ }
+ return r;
+ } else {
+ return this->find_internal_minus<omtcmp_t, h>(
+ n->left, extra, value, idxp);
+ }
+ }
+} // namespace toku
diff --git a/storage/tokudb/PerconaFT/util/omt.h b/storage/tokudb/PerconaFT/util/omt.h
index 36946401381..dc26b2d5718 100644
--- a/storage/tokudb/PerconaFT/util/omt.h
+++ b/storage/tokudb/PerconaFT/util/omt.h
@@ -32,6 +32,19 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
You should have received a copy of the GNU Affero General Public License
along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
+
+----------------------------------------
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
======= */
#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
diff --git a/storage/tokudb/ha_tokudb.cc b/storage/tokudb/ha_tokudb.cc
index d64438088c0..204f2c0c40d 100644
--- a/storage/tokudb/ha_tokudb.cc
+++ b/storage/tokudb/ha_tokudb.cc
@@ -7300,6 +7300,16 @@ int ha_tokudb::create(
tokudb_trx_data *trx = NULL;
THD* thd = ha_thd();
+ String database_name, table_name, dictionary_name;
+ tokudb_split_dname(name, database_name, table_name, dictionary_name);
+ if (database_name.is_empty() || table_name.is_empty()) {
+ push_warning_printf(thd,
+ Sql_condition::WARN_LEVEL_WARN,
+ ER_TABLE_NAME,
+ "TokuDB: Table Name or Database Name is empty");
+ DBUG_RETURN(ER_TABLE_NAME);
+ }
+
memset(&kc_info, 0, sizeof(kc_info));
#if 100000 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 100999
diff --git a/storage/tokudb/hatoku_hton.cc b/storage/tokudb/hatoku_hton.cc
index 39ffa6daa70..dedc2047636 100644
--- a/storage/tokudb/hatoku_hton.cc
+++ b/storage/tokudb/hatoku_hton.cc
@@ -566,10 +566,10 @@ static int tokudb_init_func(void *p) {
db_env->set_update(db_env, tokudb_update_fun);
- db_env_set_direct_io(tokudb::sysvars::directio == TRUE);
+ db_env_set_direct_io(tokudb::sysvars::directio);
db_env_set_compress_buffers_before_eviction(
- tokudb::sysvars::compress_buffers_before_eviction == TRUE);
+ tokudb::sysvars::compress_buffers_before_eviction);
db_env->change_fsync_log_period(db_env, tokudb::sysvars::fsync_log_period);
diff --git a/storage/tokudb/hatoku_hton.h b/storage/tokudb/hatoku_hton.h
index c5b6aab1769..e90af067b00 100644
--- a/storage/tokudb/hatoku_hton.h
+++ b/storage/tokudb/hatoku_hton.h
@@ -190,7 +190,6 @@ inline bool tokudb_killed_thd_callback(void* extra,
return thd_killed(thd) != 0;
}
-extern HASH tokudb_open_tables;
extern const char* tokudb_hton_name;
extern int tokudb_hton_initialized;
extern tokudb::thread::rwlock_t tokudb_hton_initialized_lock;
diff --git a/storage/tokudb/mysql-test/tokudb/t/change_column_bin.py b/storage/tokudb/mysql-test/tokudb/t/change_column_bin.py
index 2c7d8dd9a54..2c7d8dd9a54 100755..100644
--- a/storage/tokudb/mysql-test/tokudb/t/change_column_bin.py
+++ b/storage/tokudb/mysql-test/tokudb/t/change_column_bin.py
diff --git a/storage/tokudb/mysql-test/tokudb/t/change_column_bin_rename.py b/storage/tokudb/mysql-test/tokudb/t/change_column_bin_rename.py
index 6bd5de38fe8..6bd5de38fe8 100755..100644
--- a/storage/tokudb/mysql-test/tokudb/t/change_column_bin_rename.py
+++ b/storage/tokudb/mysql-test/tokudb/t/change_column_bin_rename.py
diff --git a/storage/tokudb/mysql-test/tokudb/t/change_column_char.py b/storage/tokudb/mysql-test/tokudb/t/change_column_char.py
index c53442ade50..c53442ade50 100755..100644
--- a/storage/tokudb/mysql-test/tokudb/t/change_column_char.py
+++ b/storage/tokudb/mysql-test/tokudb/t/change_column_char.py
diff --git a/storage/tokudb/mysql-test/tokudb/t/change_column_char_binary.py b/storage/tokudb/mysql-test/tokudb/t/change_column_char_binary.py
index e92797918d5..e92797918d5 100755..100644
--- a/storage/tokudb/mysql-test/tokudb/t/change_column_char_binary.py
+++ b/storage/tokudb/mysql-test/tokudb/t/change_column_char_binary.py
diff --git a/storage/tokudb/mysql-test/tokudb/t/change_column_char_charbinary.py b/storage/tokudb/mysql-test/tokudb/t/change_column_char_charbinary.py
index 065e37b186d..065e37b186d 100755..100644
--- a/storage/tokudb/mysql-test/tokudb/t/change_column_char_charbinary.py
+++ b/storage/tokudb/mysql-test/tokudb/t/change_column_char_charbinary.py
diff --git a/storage/tokudb/mysql-test/tokudb/t/change_column_char_rename.py b/storage/tokudb/mysql-test/tokudb/t/change_column_char_rename.py
index fe73fce0d53..fe73fce0d53 100755..100644
--- a/storage/tokudb/mysql-test/tokudb/t/change_column_char_rename.py
+++ b/storage/tokudb/mysql-test/tokudb/t/change_column_char_rename.py
diff --git a/storage/tokudb/mysql-test/tokudb/t/change_column_int.py b/storage/tokudb/mysql-test/tokudb/t/change_column_int.py
index 6f69156e260..6f69156e260 100755..100644
--- a/storage/tokudb/mysql-test/tokudb/t/change_column_int.py
+++ b/storage/tokudb/mysql-test/tokudb/t/change_column_int.py
diff --git a/storage/tokudb/mysql-test/tokudb/t/change_column_int_key.py b/storage/tokudb/mysql-test/tokudb/t/change_column_int_key.py
index fd7e5868c40..fd7e5868c40 100755..100644
--- a/storage/tokudb/mysql-test/tokudb/t/change_column_int_key.py
+++ b/storage/tokudb/mysql-test/tokudb/t/change_column_int_key.py
diff --git a/storage/tokudb/mysql-test/tokudb/t/change_column_int_not_supported.py b/storage/tokudb/mysql-test/tokudb/t/change_column_int_not_supported.py
index 1708c65efde..1708c65efde 100755..100644
--- a/storage/tokudb/mysql-test/tokudb/t/change_column_int_not_supported.py
+++ b/storage/tokudb/mysql-test/tokudb/t/change_column_int_not_supported.py
diff --git a/storage/tokudb/mysql-test/tokudb/t/change_column_int_rename.py b/storage/tokudb/mysql-test/tokudb/t/change_column_int_rename.py
index 5222564a9a2..5222564a9a2 100755..100644
--- a/storage/tokudb/mysql-test/tokudb/t/change_column_int_rename.py
+++ b/storage/tokudb/mysql-test/tokudb/t/change_column_int_rename.py
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/r/PS-4979.result b/storage/tokudb/mysql-test/tokudb_bugs/r/PS-4979.result
new file mode 100644
index 00000000000..5bf7a270fe5
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb_bugs/r/PS-4979.result
@@ -0,0 +1,2 @@
+CREATE TABLE `#mysql50#q.q`(f1 INT KEY) ENGINE=TOKUDB;
+ERROR 42000: Incorrect table name '#mysql50#q.q'
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/t/PS-4979.test b/storage/tokudb/mysql-test/tokudb_bugs/t/PS-4979.test
new file mode 100644
index 00000000000..cb902f6e52a
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb_bugs/t/PS-4979.test
@@ -0,0 +1,13 @@
+--source include/have_tokudb.inc
+# PS-4979 : Dropping TokuDB table with non-alphanumeric characters could lead
+# to a crash
+#
+# `#mysql50#q.q` is an invalid table name, but the server side doesn't detect it
+# and complain. Instead it passes in an empty table name to the engine. The
+# engine expects a table name in the form of a relative path like
+# "./databasename/tablename". InnoDB detects this in parsing the table name
+# during the creation and returns an error.
+# MariaDB server detect above error.
+
+--error ER_WRONG_TABLE_NAME
+CREATE TABLE `#mysql50#q.q`(f1 INT KEY) ENGINE=TOKUDB;
diff --git a/storage/tokudb/tokudb_background.cc b/storage/tokudb/tokudb_background.cc
index 13e0e9321cc..19f03dbca65 100644
--- a/storage/tokudb/tokudb_background.cc
+++ b/storage/tokudb/tokudb_background.cc
@@ -182,14 +182,14 @@ void* job_manager_t::real_thread_func() {
if (res == tokudb::thread::semaphore_t::E_INTERRUPTED || _shutdown) {
break;
} else if (res == tokudb::thread::semaphore_t::E_SIGNALLED) {
-#if TOKUDB_DEBUG
+#if defined(TOKUDB_DEBUG)
if (TOKUDB_UNLIKELY(
tokudb::sysvars::debug_pause_background_job_manager)) {
_sem.signal();
tokudb::time::sleep_microsec(250000);
continue;
}
-#endif // TOKUDB_DEBUG
+#endif // defined(TOKUDB_DEBUG)
mutex_t_lock(_mutex);
assert_debug(_background_jobs.size() > 0);
diff --git a/storage/tokudb/tokudb_sysvars.cc b/storage/tokudb/tokudb_sysvars.cc
index 7771204dc11..88449242d36 100644
--- a/storage/tokudb/tokudb_sysvars.cc
+++ b/storage/tokudb/tokudb_sysvars.cc
@@ -661,13 +661,13 @@ static MYSQL_THDVAR_ULONGLONG(
~0ULL,
1);
-static MYSQL_THDVAR_STR(
- last_lock_timeout,
- PLUGIN_VAR_MEMALLOC,
- "last lock timeout",
- NULL,
- NULL,
- NULL);
+static MYSQL_THDVAR_STR(last_lock_timeout,
+ PLUGIN_VAR_MEMALLOC | PLUGIN_VAR_NOCMDOPT |
+ PLUGIN_VAR_READONLY,
+ "last lock timeout",
+ NULL,
+ NULL,
+ NULL);
static MYSQL_THDVAR_BOOL(
load_save_space,
diff --git a/storage/tokudb/tokudb_sysvars.h b/storage/tokudb/tokudb_sysvars.h
index 2454f8fefd2..23199baa7be 100644
--- a/storage/tokudb/tokudb_sysvars.h
+++ b/storage/tokudb/tokudb_sysvars.h
@@ -113,10 +113,10 @@ extern my_bool gdb_on_fatal;
extern my_bool check_jemalloc;
-#if TOKUDB_DEBUG
+#if defined(TOKUDB_DEBUG)
// used to control background job manager
extern my_bool debug_pause_background_job_manager;
-#endif // TOKUDB_DEBUG
+#endif // defined(TOKUDB_DEBUG)
// session/thread
my_bool alter_print_error(THD* thd);
diff --git a/storage/xtradb/buf/buf0buf.cc b/storage/xtradb/buf/buf0buf.cc
index 2d9c47e7ad8..aaaf486dddb 100644
--- a/storage/xtradb/buf/buf0buf.cc
+++ b/storage/xtradb/buf/buf0buf.cc
@@ -653,6 +653,8 @@ buf_page_is_corrupted(
ulint checksum_field1;
ulint checksum_field2;
+ ib_uint32_t crc32 = ULINT32_UNDEFINED;
+ bool crc32_inited = false;
if (!zip_size
&& memcmp(read_buf + FIL_PAGE_LSN + 4,
@@ -732,120 +734,124 @@ buf_page_is_corrupted(
return(FALSE);
}
- ulint page_no = mach_read_from_4(read_buf + FIL_PAGE_OFFSET);
- ulint space_id = mach_read_from_4(read_buf + FIL_PAGE_SPACE_ID);
const srv_checksum_algorithm_t curr_algo =
static_cast<srv_checksum_algorithm_t>(srv_checksum_algorithm);
switch (curr_algo) {
- case SRV_CHECKSUM_ALGORITHM_CRC32:
case SRV_CHECKSUM_ALGORITHM_STRICT_CRC32:
-
- if (buf_page_is_checksum_valid_crc32(read_buf,
- checksum_field1, checksum_field2)) {
- return(FALSE);
+ if (buf_page_is_checksum_valid_crc32(read_buf, checksum_field1,
+ checksum_field2)) {
+ return FALSE;
}
- if (buf_page_is_checksum_valid_none(read_buf,
- checksum_field1, checksum_field2)) {
- if (curr_algo
- == SRV_CHECKSUM_ALGORITHM_STRICT_CRC32) {
- page_warn_strict_checksum(
- curr_algo,
- SRV_CHECKSUM_ALGORITHM_NONE,
- space_id, page_no);
- }
+ return TRUE;
- return(FALSE);
+ case SRV_CHECKSUM_ALGORITHM_STRICT_INNODB:
+ if (buf_page_is_checksum_valid_innodb(read_buf, checksum_field1,
+ checksum_field2)) {
+ return FALSE;
}
- if (buf_page_is_checksum_valid_innodb(read_buf,
- checksum_field1, checksum_field2)) {
- if (curr_algo
- == SRV_CHECKSUM_ALGORITHM_STRICT_CRC32) {
- page_warn_strict_checksum(
- curr_algo,
- SRV_CHECKSUM_ALGORITHM_INNODB,
- space_id, page_no);
- }
-
- return(FALSE);
+ return TRUE;
+ case SRV_CHECKSUM_ALGORITHM_STRICT_NONE:
+ if (buf_page_is_checksum_valid_none(read_buf, checksum_field1,
+ checksum_field2)) {
+ return FALSE;
}
- return(TRUE);
-
+ return TRUE;
+ case SRV_CHECKSUM_ALGORITHM_CRC32:
case SRV_CHECKSUM_ALGORITHM_INNODB:
- case SRV_CHECKSUM_ALGORITHM_STRICT_INNODB:
+ /* Verify old versions of InnoDB only stored 8 byte lsn to the
+ start and end of the page. */
- if (buf_page_is_checksum_valid_innodb(read_buf,
- checksum_field1, checksum_field2)) {
- return(FALSE);
- }
+ /* Since innodb_checksum_algorithm is not strict_* allow
+ any of the algos to match for the old field. */
- if (buf_page_is_checksum_valid_none(read_buf,
- checksum_field1, checksum_field2)) {
- if (curr_algo
- == SRV_CHECKSUM_ALGORITHM_STRICT_INNODB) {
- page_warn_strict_checksum(
- curr_algo,
- SRV_CHECKSUM_ALGORITHM_NONE,
- space_id, page_no);
- }
+ if (checksum_field2
+ != mach_read_from_4(read_buf + FIL_PAGE_LSN)
+ && checksum_field2 != BUF_NO_CHECKSUM_MAGIC) {
- return(FALSE);
- }
+ if (srv_checksum_algorithm
+ == SRV_CHECKSUM_ALGORITHM_CRC32) {
- if (buf_page_is_checksum_valid_crc32(read_buf,
- checksum_field1, checksum_field2)) {
- if (curr_algo
- == SRV_CHECKSUM_ALGORITHM_STRICT_INNODB) {
- page_warn_strict_checksum(
- curr_algo,
- SRV_CHECKSUM_ALGORITHM_CRC32,
- space_id, page_no);
- }
+ crc32 = buf_calc_page_crc32(read_buf);
+ crc32_inited = true;
- return(FALSE);
- }
+ if (checksum_field2 != crc32
+ && checksum_field2
+ != buf_calc_page_old_checksum(read_buf)) {
+ return TRUE;
+ }
+ } else {
+ ut_ad(srv_checksum_algorithm
+ == SRV_CHECKSUM_ALGORITHM_INNODB);
- return(TRUE);
+ if (checksum_field2
+ != buf_calc_page_old_checksum(read_buf)) {
- case SRV_CHECKSUM_ALGORITHM_STRICT_NONE:
+ crc32 = buf_calc_page_crc32(read_buf);
+ crc32_inited = TRUE;
- if (buf_page_is_checksum_valid_none(read_buf,
- checksum_field1, checksum_field2)) {
- return(FALSE);
+ if (checksum_field2 != crc32) {
+ return TRUE;
+ }
+ }
+ }
}
- if (buf_page_is_checksum_valid_crc32(read_buf,
- checksum_field1, checksum_field2)) {
- page_warn_strict_checksum(
- curr_algo,
- SRV_CHECKSUM_ALGORITHM_CRC32,
- space_id, page_no);
- return(FALSE);
- }
+ /* Old field is fine, check the new field */
- if (buf_page_is_checksum_valid_innodb(read_buf,
- checksum_field1, checksum_field2)) {
- page_warn_strict_checksum(
- curr_algo,
- SRV_CHECKSUM_ALGORITHM_INNODB,
- space_id, page_no);
- return(FALSE);
+ if (checksum_field1 != 0
+ && checksum_field1 != BUF_NO_CHECKSUM_MAGIC) {
+
+ if (srv_checksum_algorithm
+ == SRV_CHECKSUM_ALGORITHM_CRC32) {
+
+ if (!crc32_inited) {
+ crc32 = buf_calc_page_crc32(read_buf);
+ crc32_inited = TRUE;
+ }
+
+ if (checksum_field1 != crc32
+ && checksum_field1
+ != buf_calc_page_new_checksum(read_buf)) {
+ return TRUE;
+ }
+ } else {
+ ut_ad(srv_checksum_algorithm
+ == SRV_CHECKSUM_ALGORITHM_INNODB);
+
+ if (checksum_field1
+ != buf_calc_page_new_checksum(read_buf)) {
+
+ if (!crc32_inited) {
+ crc32 = buf_calc_page_crc32(
+ read_buf);
+ crc32_inited = TRUE;
+ }
+
+ if (checksum_field1 != crc32) {
+ return TRUE;
+ }
+ }
+ }
}
- return(TRUE);
+ if (crc32_inited
+ && ((checksum_field1 == crc32
+ && checksum_field2 != crc32)
+ || (checksum_field1 != crc32
+ && checksum_field2 == crc32))) {
+ return TRUE;
+ }
- case SRV_CHECKSUM_ALGORITHM_NONE:
- /* should have returned FALSE earlier */
break;
- /* no default so the compiler will emit a warning if new enum
- is added and not handled here */
+ case SRV_CHECKSUM_ALGORITHM_NONE:
+ ut_error;
}
- ut_error;
- return(FALSE);
+ return FALSE;
}
/** Dump a page to stderr.
diff --git a/storage/xtradb/dict/dict0dict.cc b/storage/xtradb/dict/dict0dict.cc
index 29ada53e412..b055374c23b 100644
--- a/storage/xtradb/dict/dict0dict.cc
+++ b/storage/xtradb/dict/dict0dict.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2018, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1996, 2017, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2012, Facebook Inc.
Copyright (c) 2014, 2017, MariaDB Corporation.
@@ -3387,10 +3387,7 @@ dict_foreign_find_index(
table, col_names, columns, n_cols,
index, types_idx,
check_charsets, check_null,
- error, err_col_no,err_index)
- && (!(index->online_status ==
- ONLINE_INDEX_ABORTED_DROPPED
- ||index->online_status == ONLINE_INDEX_ABORTED))) {
+ error, err_col_no,err_index)) {
if (error) {
*error = DB_SUCCESS;
}
diff --git a/storage/xtradb/dict/dict0mem.cc b/storage/xtradb/dict/dict0mem.cc
index 12cbd2f4a97..fb6895867fe 100644
--- a/storage/xtradb/dict/dict0mem.cc
+++ b/storage/xtradb/dict/dict0mem.cc
@@ -499,9 +499,7 @@ dict_mem_table_col_rename(
s += len + 1;
}
- /* This could fail if the data dictionaries are out of sync.
- Proceed with the renaming anyway. */
- ut_ad(!strcmp(from, s));
+ ut_ad(!my_strcasecmp(system_charset_info, from, s));
dict_mem_table_col_rename_low(table, nth_col, to, s);
}
diff --git a/storage/xtradb/fil/fil0fil.cc b/storage/xtradb/fil/fil0fil.cc
index 49c5f4b090b..755672d9962 100644
--- a/storage/xtradb/fil/fil0fil.cc
+++ b/storage/xtradb/fil/fil0fil.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2014, 2017, MariaDB Corporation. All Rights Reserved.
+Copyright (c) 2014, 2019, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -4947,6 +4947,17 @@ retry:
} while (err == EINTR
&& srv_shutdown_state == SRV_SHUTDOWN_NONE);
+ success = !err;
+ if (!success) {
+ ib_logf(IB_LOG_LEVEL_ERROR, "extending file %s"
+ " from " INT64PF " to " INT64PF " bytes"
+ " failed with error %d",
+ node->name, start_offset, len + start_offset,
+ err);
+ } else {
+ os_file_flush(node->handle);
+ }
+
DBUG_EXECUTE_IF("ib_os_aio_func_io_failure_28",
success = FALSE; os_has_said_disk_full = TRUE;);
@@ -5056,7 +5067,7 @@ file_extended:
size_after_extend, *actual_size); */
mutex_exit(&fil_system->mutex);
- fil_flush(space_id);
+ fil_flush(space_id, true);
return(success);
}
@@ -5705,21 +5716,16 @@ fil_aio_wait(
}
#endif /* UNIV_HOTBACKUP */
-/**********************************************************************//**
-Flushes to disk possible writes cached by the OS. If the space does not exist
-or is being dropped, does not do anything. */
-UNIV_INTERN
-void
-fil_flush(
-/*======*/
- ulint space_id) /*!< in: file space id (this can be a group of
- log files or a tablespace of the database) */
+/** Make persistent possible writes cached by the OS.
+If the space does not exist or is being dropped, do nothing.
+@param[in] space_id tablespace identifier
+@param[in] metadata whether to update file system metadata */
+UNIV_INTERN void fil_flush(ulint space_id, bool metadata)
{
fil_space_t* space;
fil_node_t* node;
pfs_os_file_t file;
-
mutex_enter(&fil_system->mutex);
space = fil_space_get_by_id(space_id);
@@ -5748,8 +5754,10 @@ fil_flush(
}
#endif /* UNIV_DEBUG */
- mutex_exit(&fil_system->mutex);
- return;
+ if (!metadata) {
+ mutex_exit(&fil_system->mutex);
+ return;
+ }
}
space->n_pending_flushes++; /*!< prevent dropping of the space while
diff --git a/storage/xtradb/fts/fts0fts.cc b/storage/xtradb/fts/fts0fts.cc
index 9e038c2edd5..e2a479bf0ae 100644
--- a/storage/xtradb/fts/fts0fts.cc
+++ b/storage/xtradb/fts/fts0fts.cc
@@ -870,19 +870,19 @@ fts_drop_index(
err = fts_drop_index_tables(trx, index);
while (index->index_fts_syncing
- && !trx_is_interrupted(trx)) {
- DICT_BG_YIELD(trx);
- }
+ && !trx_is_interrupted(trx)) {
+ DICT_BG_YIELD(trx);
+ }
- fts_free(table);
+ fts_free(table);
return(err);
}
while (index->index_fts_syncing
- && !trx_is_interrupted(trx)) {
- DICT_BG_YIELD(trx);
- }
+ && !trx_is_interrupted(trx)) {
+ DICT_BG_YIELD(trx);
+ }
current_doc_id = table->fts->cache->next_doc_id;
first_doc_id = table->fts->cache->first_doc_id;
@@ -901,9 +901,9 @@ fts_drop_index(
if (index_cache != NULL) {
while (index->index_fts_syncing
- && !trx_is_interrupted(trx)) {
- DICT_BG_YIELD(trx);
- }
+ && !trx_is_interrupted(trx)) {
+ DICT_BG_YIELD(trx);
+ }
if (index_cache->words) {
fts_words_free(index_cache->words);
rbt_free(index_cache->words);
diff --git a/storage/xtradb/fts/fts0pars.cc b/storage/xtradb/fts/fts0pars.cc
index b7fef4ea8ce..19917ccd26a 100644
--- a/storage/xtradb/fts/fts0pars.cc
+++ b/storage/xtradb/fts/fts0pars.cc
@@ -106,8 +106,8 @@ extern int ftserror(const char* p);
typedef int (*fts_scanner)(YYSTYPE* val, yyscan_t yyscanner);
struct fts_lexer_t {
- fts_scanner scanner;
- void* yyscanner;
+ fts_scanner scanner;
+ void* yyscanner;
};
diff --git a/storage/xtradb/fts/fts0pars.y b/storage/xtradb/fts/fts0pars.y
index 36dae9f7ceb..65c4189eece 100644
--- a/storage/xtradb/fts/fts0pars.y
+++ b/storage/xtradb/fts/fts0pars.y
@@ -52,8 +52,8 @@ extern int ftserror(const char* p);
typedef int (*fts_scanner)(YYSTYPE* val, yyscan_t yyscanner);
struct fts_lexer_struct {
- fts_scanner scanner;
- void* yyscanner;
+ fts_scanner scanner;
+ void* yyscanner;
};
%}
diff --git a/storage/xtradb/handler/ha_innodb.cc b/storage/xtradb/handler/ha_innodb.cc
index 89539bfeed2..8c0db0231ed 100644
--- a/storage/xtradb/handler/ha_innodb.cc
+++ b/storage/xtradb/handler/ha_innodb.cc
@@ -6119,19 +6119,21 @@ ha_innobase::open(
ib_table = dict_table_open_on_name(norm_name, FALSE, TRUE, ignore_err);
if (ib_table
- && ((!DICT_TF2_FLAG_IS_SET(ib_table, DICT_TF2_FTS_HAS_DOC_ID)
- && table->s->stored_fields != dict_table_get_n_user_cols(ib_table))
- || (DICT_TF2_FLAG_IS_SET(ib_table, DICT_TF2_FTS_HAS_DOC_ID)
- && (table->s->fields
- != dict_table_get_n_user_cols(ib_table) - 1)))) {
+ && (table->s->stored_fields != dict_table_get_n_user_cols(ib_table)
+ - (DICT_TF2_FLAG_IS_SET(ib_table, DICT_TF2_FTS_HAS_DOC_ID)
+ ? 1 : 0))) {
ib_logf(IB_LOG_LEVEL_WARN,
"table %s contains %lu user defined columns "
"in InnoDB, but %lu columns in MySQL. Please "
"check INFORMATION_SCHEMA.INNODB_SYS_COLUMNS and "
REFMAN "innodb-troubleshooting.html "
"for how to resolve it",
- norm_name, (ulong) dict_table_get_n_user_cols(ib_table),
- (ulong) table->s->fields);
+ norm_name,
+ (ulong) (dict_table_get_n_user_cols(ib_table)
+ - DICT_TF2_FLAG_IS_SET(ib_table,
+ DICT_TF2_FTS_HAS_DOC_ID)
+ ? 1 : 0),
+ (ulong) table->s->stored_fields);
/* Mark this table as corrupted, so the drop table
or force recovery can still use it, but not others. */
@@ -10591,16 +10593,6 @@ next_record:
return(HA_ERR_END_OF_FILE);
}
-/*************************************************************************
-*/
-
-void
-ha_innobase::ft_end()
-{
- fprintf(stderr, "ft_end()\n");
-
- rnd_end();
-}
#ifdef WITH_WSREP
extern dict_index_t*
wsrep_dict_foreign_find_index(
@@ -11030,7 +11022,6 @@ ha_innobase::wsrep_append_keys(
DBUG_RETURN(0);
}
#endif /* WITH_WSREP */
-
/*********************************************************************//**
Stores a reference to the current row to 'ref' field of the handle. Note
that in the case where we have generated the clustered index for the
@@ -11364,10 +11355,6 @@ err_col:
: ER_TABLESPACE_EXISTS, MYF(0), display_name);
}
- if (err == DB_SUCCESS && (flags2 & DICT_TF2_FTS)) {
- fts_optimize_add_table(table);
- }
-
error_ret:
DBUG_RETURN(convert_error_code_to_mysql(err, flags, thd));
}
@@ -12439,6 +12426,10 @@ ha_innobase::create(
trx_free_for_mysql(trx);
DBUG_RETURN(-1);
}
+
+ mutex_enter(&dict_sys->mutex);
+ fts_optimize_add_table(innobase_table);
+ mutex_exit(&dict_sys->mutex);
}
/* Note: We can't call update_thd() as prebuilt will not be
@@ -12931,36 +12922,35 @@ innobase_rename_table(
row_mysql_lock_data_dictionary(trx);
- dict_table_t* table = NULL;
- table = dict_table_open_on_name(norm_from, TRUE, FALSE,
- DICT_ERR_IGNORE_NONE);
+ dict_table_t* table = dict_table_open_on_name(norm_from, TRUE, FALSE,
+ DICT_ERR_IGNORE_NONE);
- /* Since DICT_BG_YIELD has sleep for 250 milliseconds,
+ /* Since DICT_BG_YIELD has sleep for 250 milliseconds,
Convert lock_wait_timeout unit from second to 250 milliseconds */
- long int lock_wait_timeout = thd_lock_wait_timeout(thd) * 4;
- if (table != NULL) {
- for (dict_index_t* index = dict_table_get_first_index(table);
- index != NULL;
- index = dict_table_get_next_index(index)) {
-
- if (index->type & DICT_FTS) {
- /* Found */
- while (index->index_fts_syncing
- && !trx_is_interrupted(trx)
- && (lock_wait_timeout--) > 0) {
- DICT_BG_YIELD(trx);
- }
- }
- }
- dict_table_close(table, TRUE, FALSE);
- }
+ long int lock_wait_timeout = thd_lock_wait_timeout(thd) * 4;
+ if (table != NULL) {
+ for (dict_index_t* index = dict_table_get_first_index(table);
+ index != NULL;
+ index = dict_table_get_next_index(index)) {
+
+ if (index->type & DICT_FTS) {
+ /* Found */
+ while (index->index_fts_syncing
+ && !trx_is_interrupted(trx)
+ && (lock_wait_timeout--) > 0) {
+ DICT_BG_YIELD(trx);
+ }
+ }
+ }
+ dict_table_close(table, TRUE, FALSE);
+ }
- /* FTS sync is in progress. We shall timeout this operation */
- if (lock_wait_timeout < 0) {
- error = DB_LOCK_WAIT_TIMEOUT;
- row_mysql_unlock_data_dictionary(trx);
- DBUG_RETURN(error);
- }
+ /* FTS sync is in progress. We shall timeout this operation */
+ if (lock_wait_timeout < 0) {
+ error = DB_LOCK_WAIT_TIMEOUT;
+ row_mysql_unlock_data_dictionary(trx);
+ DBUG_RETURN(error);
+ }
/* Transaction must be flagged as a locking transaction or it hasn't
been started yet. */
@@ -13126,12 +13116,6 @@ ha_innobase::rename_table(
error = DB_LOCK_WAIT;
}
- else if (error == DB_LOCK_WAIT_TIMEOUT) {
- my_error(ER_LOCK_WAIT_TIMEOUT, MYF(0), to);
-
- error = DB_LOCK_WAIT;
- }
-
DBUG_RETURN(convert_error_code_to_mysql(error, 0, NULL));
}
@@ -21194,15 +21178,6 @@ int ha_innobase::multi_range_read_explain_info(uint mrr_mode, char *str, size_t
return ds_mrr.dsmrr_explain_info(mrr_mode, str, size);
}
-/*
- A helper function used only in index_cond_func_innodb
-*/
-
-bool ha_innobase::is_thd_killed()
-{
- return thd_kill_level(user_thd);
-}
-
/**********************************************************************
Issue a warning that the row is too big. */
UNIV_INTERN
diff --git a/storage/xtradb/handler/ha_innodb.h b/storage/xtradb/handler/ha_innodb.h
index 2b0e44213a6..ddafcd85749 100644
--- a/storage/xtradb/handler/ha_innodb.h
+++ b/storage/xtradb/handler/ha_innodb.h
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 2000, 2017, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2013, 2017, MariaDB Corporation.
+Copyright (c) 2013, 2018, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -164,7 +164,7 @@ class ha_innobase: public handler
int rnd_pos(uchar * buf, uchar *pos);
int ft_init();
- void ft_end();
+ void ft_end() { rnd_end(); }
FT_INFO *ft_init_ext(uint flags, uint inx, String* key);
int ft_read(uchar* buf);
@@ -372,10 +372,6 @@ public:
* @return idx_cond if pushed; NULL if not pushed
*/
class Item* idx_cond_push(uint keyno, class Item* idx_cond);
-
- /* An helper function for index_cond_func_innodb: */
- bool is_thd_killed();
-
private:
/** The multi range read session object */
DsMrr_impl ds_mrr;
diff --git a/storage/xtradb/handler/handler0alter.cc b/storage/xtradb/handler/handler0alter.cc
index e9f31a97e96..c54ea2e123c 100644
--- a/storage/xtradb/handler/handler0alter.cc
+++ b/storage/xtradb/handler/handler0alter.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 2005, 2018, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2017, 2018, MariaDB Corporation.
+Copyright (c) 2017, 2019, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -3600,9 +3600,8 @@ check_if_ok_to_rename:
/* Check each index's column length to make sure they do not
exceed limit */
- for (ulint i = 0; i < ha_alter_info->index_add_count; i++) {
- const KEY* key = &ha_alter_info->key_info_buffer[
- ha_alter_info->index_add_buffer[i]];
+ for (ulint i = 0; i < ha_alter_info->key_count; i++) {
+ const KEY* key = &ha_alter_info->key_info_buffer[i];
if (key->flags & HA_FULLTEXT) {
/* The column length does not matter for
@@ -3709,12 +3708,14 @@ check_if_ok_to_rename:
continue;
}
+ dict_foreign_t* foreign;
+
for (dict_foreign_set::iterator it
= prebuilt->table->foreign_set.begin();
it != prebuilt->table->foreign_set.end();
++it) {
- dict_foreign_t* foreign = *it;
+ foreign = *it;
const char* fid = strchr(foreign->id, '/');
DBUG_ASSERT(fid);
@@ -3725,7 +3726,6 @@ check_if_ok_to_rename:
if (!my_strcasecmp(system_charset_info,
fid, drop->name)) {
- drop_fk[n_drop_fk++] = foreign;
goto found_fk;
}
}
@@ -3734,12 +3734,19 @@ check_if_ok_to_rename:
drop->name);
goto err_exit;
found_fk:
+ for (ulint i = n_drop_fk; i--; ) {
+ if (drop_fk[i] == foreign) {
+ goto dup_fk;
+ }
+ }
+ drop_fk[n_drop_fk++] = foreign;
+dup_fk:
continue;
}
DBUG_ASSERT(n_drop_fk > 0);
DBUG_ASSERT(n_drop_fk
- == ha_alter_info->alter_info->drop_list.elements);
+ <= ha_alter_info->alter_info->drop_list.elements);
} else {
drop_fk = NULL;
}
@@ -4520,7 +4527,6 @@ innobase_rename_column_try(
pars_info_add_ull_literal(info, "tableid", user_table->id);
pars_info_add_int4_literal(info, "nth", nth_col);
- pars_info_add_str_literal(info, "old", from);
pars_info_add_str_literal(info, "new", to);
trx->op_info = "renaming column in SYS_COLUMNS";
@@ -4530,7 +4536,7 @@ innobase_rename_column_try(
"PROCEDURE RENAME_SYS_COLUMNS_PROC () IS\n"
"BEGIN\n"
"UPDATE SYS_COLUMNS SET NAME=:new\n"
- "WHERE TABLE_ID=:tableid AND NAME=:old\n"
+ "WHERE TABLE_ID=:tableid\n"
"AND POS=:nth;\n"
"END;\n",
FALSE, trx);
@@ -4553,35 +4559,40 @@ err_exit:
index != NULL;
index = dict_table_get_next_index(index)) {
+ bool has_prefixes = false;
+ for (size_t i = 0; i < dict_index_get_n_fields(index); i++) {
+ if (dict_index_get_nth_field(index, i)->prefix_len) {
+ has_prefixes = true;
+ break;
+ }
+ }
+
for (ulint i = 0; i < dict_index_get_n_fields(index); i++) {
- if (strcmp(dict_index_get_nth_field(index, i)->name,
- from)) {
+ const dict_field_t* field
+ = dict_index_get_nth_field(index, i);
+ if (my_strcasecmp(system_charset_info, field->name,
+ from)) {
continue;
}
info = pars_info_create();
+ ulint pos = i;
+ if (has_prefixes) {
+ pos = (pos << 16) + field->prefix_len;
+ }
+
pars_info_add_ull_literal(info, "indexid", index->id);
- pars_info_add_int4_literal(info, "nth", i);
- pars_info_add_str_literal(info, "old", from);
+ pars_info_add_int4_literal(info, "nth", pos);
pars_info_add_str_literal(info, "new", to);
error = que_eval_sql(
info,
"PROCEDURE RENAME_SYS_FIELDS_PROC () IS\n"
"BEGIN\n"
-
"UPDATE SYS_FIELDS SET COL_NAME=:new\n"
- "WHERE INDEX_ID=:indexid AND COL_NAME=:old\n"
+ "WHERE INDEX_ID=:indexid\n"
"AND POS=:nth;\n"
-
- /* Try again, in case there is a prefix_len
- encoded in SYS_FIELDS.POS */
-
- "UPDATE SYS_FIELDS SET COL_NAME=:new\n"
- "WHERE INDEX_ID=:indexid AND COL_NAME=:old\n"
- "AND POS>=65536*:nth AND POS<65536*(:nth+1);\n"
-
"END;\n",
FALSE, trx);
@@ -4594,7 +4605,7 @@ err_exit:
rename_foreign:
trx->op_info = "renaming column in SYS_FOREIGN_COLS";
- std::list<dict_foreign_t*> fk_evict;
+ std::set<dict_foreign_t*> fk_evict;
bool foreign_modified;
for (dict_foreign_set::const_iterator it = user_table->foreign_set.begin();
@@ -4605,7 +4616,9 @@ rename_foreign:
foreign_modified = false;
for (unsigned i = 0; i < foreign->n_fields; i++) {
- if (strcmp(foreign->foreign_col_names[i], from)) {
+ if (my_strcasecmp(system_charset_info,
+ foreign->foreign_col_names[i],
+ from)) {
continue;
}
@@ -4613,7 +4626,6 @@ rename_foreign:
pars_info_add_str_literal(info, "id", foreign->id);
pars_info_add_int4_literal(info, "nth", i);
- pars_info_add_str_literal(info, "old", from);
pars_info_add_str_literal(info, "new", to);
error = que_eval_sql(
@@ -4622,8 +4634,7 @@ rename_foreign:
"BEGIN\n"
"UPDATE SYS_FOREIGN_COLS\n"
"SET FOR_COL_NAME=:new\n"
- "WHERE ID=:id AND POS=:nth\n"
- "AND FOR_COL_NAME=:old;\n"
+ "WHERE ID=:id AND POS=:nth;\n"
"END;\n",
FALSE, trx);
@@ -4634,7 +4645,7 @@ rename_foreign:
}
if (foreign_modified) {
- fk_evict.push_back(foreign);
+ fk_evict.insert(foreign);
}
}
@@ -4647,7 +4658,9 @@ rename_foreign:
dict_foreign_t* foreign = *it;
for (unsigned i = 0; i < foreign->n_fields; i++) {
- if (strcmp(foreign->referenced_col_names[i], from)) {
+ if (my_strcasecmp(system_charset_info,
+ foreign->referenced_col_names[i],
+ from)) {
continue;
}
@@ -4655,7 +4668,6 @@ rename_foreign:
pars_info_add_str_literal(info, "id", foreign->id);
pars_info_add_int4_literal(info, "nth", i);
- pars_info_add_str_literal(info, "old", from);
pars_info_add_str_literal(info, "new", to);
error = que_eval_sql(
@@ -4664,8 +4676,7 @@ rename_foreign:
"BEGIN\n"
"UPDATE SYS_FOREIGN_COLS\n"
"SET REF_COL_NAME=:new\n"
- "WHERE ID=:id AND POS=:nth\n"
- "AND REF_COL_NAME=:old;\n"
+ "WHERE ID=:id AND POS=:nth;\n"
"END;\n",
FALSE, trx);
@@ -4676,7 +4687,7 @@ rename_foreign:
}
if (foreign_modified) {
- fk_evict.push_back(foreign);
+ fk_evict.insert(foreign);
}
}
@@ -5072,7 +5083,7 @@ commit_try_rebuild(
& Alter_inplace_info::DROP_FOREIGN_KEY)
|| ctx->num_to_drop_fk > 0);
DBUG_ASSERT(ctx->num_to_drop_fk
- == ha_alter_info->alter_info->drop_list.elements);
+ <= ha_alter_info->alter_info->drop_list.elements);
for (dict_index_t* index = dict_table_get_first_index(rebuilt_table);
index;
@@ -5326,7 +5337,7 @@ commit_try_norebuild(
& Alter_inplace_info::DROP_FOREIGN_KEY)
|| ctx->num_to_drop_fk > 0);
DBUG_ASSERT(ctx->num_to_drop_fk
- == ha_alter_info->alter_info->drop_list.elements);
+ <= ha_alter_info->alter_info->drop_list.elements);
for (ulint i = 0; i < ctx->num_to_add_index; i++) {
dict_index_t* index = ctx->add_index[i];
@@ -5647,7 +5658,6 @@ ha_innobase::commit_inplace_alter_table(
Alter_inplace_info* ha_alter_info,
bool commit)
{
- dberr_t error;
ha_innobase_inplace_ctx* ctx0
= static_cast<ha_innobase_inplace_ctx*>
(ha_alter_info->handler_ctx);
@@ -5714,7 +5724,7 @@ ha_innobase::commit_inplace_alter_table(
transactions collected during crash recovery could be
holding InnoDB locks only, not MySQL locks. */
- error = row_merge_lock_table(
+ dberr_t error = row_merge_lock_table(
prebuilt->trx, ctx->old_table, LOCK_X);
if (error != DB_SUCCESS) {
@@ -5899,9 +5909,9 @@ rollback_trx:
file operations that will be performed in
commit_cache_rebuild(), and if none, generate
the redo log for these operations. */
- error = fil_mtr_rename_log(ctx->old_table,
- ctx->new_table,
- ctx->tmp_name, &mtr);
+ dberr_t error = fil_mtr_rename_log(
+ ctx->old_table, ctx->new_table, ctx->tmp_name,
+ &mtr);
if (error != DB_SUCCESS) {
/* Out of memory or a problem will occur
when renaming files. */
@@ -6026,39 +6036,30 @@ rollback_trx:
/* Rename the tablespace files. */
commit_cache_rebuild(ctx);
- error = innobase_update_foreign_cache(ctx, user_thd);
- if (error != DB_SUCCESS) {
- goto foreign_fail;
+ if (innobase_update_foreign_cache(ctx, user_thd)
+ != DB_SUCCESS
+ && prebuilt->trx->check_foreigns) {
+foreign_fail:
+ push_warning_printf(
+ user_thd,
+ Sql_condition::WARN_LEVEL_WARN,
+ ER_ALTER_INFO,
+ "failed to load FOREIGN KEY"
+ " constraints");
}
} else {
- error = innobase_update_foreign_cache(ctx, user_thd);
+ bool fk_fail = innobase_update_foreign_cache(
+ ctx, user_thd) != DB_SUCCESS;
- if (error != DB_SUCCESS) {
-foreign_fail:
- /* The data dictionary cache
- should be corrupted now. The
- best solution should be to
- kill and restart the server,
- but the *.frm file has not
- been replaced yet. */
- my_error(ER_CANNOT_ADD_FOREIGN,
- MYF(0));
- sql_print_error(
- "InnoDB: dict_load_foreigns()"
- " returned %u for %s",
- (unsigned) error,
- thd_query_string(user_thd)
- ->str);
- ut_ad(0);
- } else {
- if (!commit_cache_norebuild(
- ctx, table, trx)) {
- ut_a(!prebuilt->trx->check_foreigns);
- }
+ if (!commit_cache_norebuild(ctx, table, trx)) {
+ fk_fail = true;
+ ut_ad(!prebuilt->trx->check_foreigns);
+ }
- innobase_rename_columns_cache(
- ha_alter_info, table,
- ctx->new_table);
+ innobase_rename_columns_cache(ha_alter_info, table,
+ ctx->new_table);
+ if (fk_fail && prebuilt->trx->check_foreigns) {
+ goto foreign_fail;
}
}
DBUG_INJECT_CRASH("ib_commit_inplace_crash",
diff --git a/storage/xtradb/include/buf0buf.h b/storage/xtradb/include/buf0buf.h
index a5b5cc84166..0f6b8b67250 100644
--- a/storage/xtradb/include/buf0buf.h
+++ b/storage/xtradb/include/buf0buf.h
@@ -1477,6 +1477,9 @@ struct buf_page_t{
ib_uint32_t space; /*!< tablespace id. */
ib_uint32_t offset; /*!< page number. */
+ buf_page_t* hash; /*!< node used in chaining to
+ buf_pool->page_hash or
+ buf_pool->zip_hash */
/** count of how manyfold this block is currently bufferfixed */
#ifdef PAGE_ATOMIC_REF_COUNT
ib_uint32_t buf_fix_count;
@@ -1530,9 +1533,6 @@ struct buf_page_t{
zip.data == NULL means an active
buf_pool->watch */
#ifndef UNIV_HOTBACKUP
- buf_page_t* hash; /*!< node used in chaining to
- buf_pool->page_hash or
- buf_pool->zip_hash */
#ifdef UNIV_DEBUG
ibool in_page_hash; /*!< TRUE if in buf_pool->page_hash */
ibool in_zip_hash; /*!< TRUE if in buf_pool->zip_hash */
diff --git a/storage/xtradb/include/data0type.ic b/storage/xtradb/include/data0type.ic
index c55ec38ad43..a7e2eb0682c 100644
--- a/storage/xtradb/include/data0type.ic
+++ b/storage/xtradb/include/data0type.ic
@@ -603,6 +603,7 @@ dtype_get_min_size_low(
return(0);
}
#endif /* UNIV_DEBUG */
+ /* fall through */
case DATA_CHAR:
case DATA_FIXBINARY:
case DATA_INT:
diff --git a/storage/xtradb/include/dict0mem.h b/storage/xtradb/include/dict0mem.h
index 8417eb26368..b20fee3e516 100644
--- a/storage/xtradb/include/dict0mem.h
+++ b/storage/xtradb/include/dict0mem.h
@@ -292,7 +292,8 @@ dict_mem_table_add_col(
const char* name, /*!< in: column name, or NULL */
ulint mtype, /*!< in: main datatype */
ulint prtype, /*!< in: precise type */
- ulint len); /*!< in: precision */
+ ulint len) /*!< in: precision */
+ MY_ATTRIBUTE((nonnull(1)));
/**********************************************************************//**
Renames a column of a table in the data dictionary cache. */
UNIV_INTERN
@@ -939,8 +940,10 @@ struct dict_table_t{
table_id_t id; /*!< id of the table */
+ hash_node_t id_hash; /*!< hash chain node */
mem_heap_t* heap; /*!< memory heap */
char* name; /*!< table name */
+ hash_node_t name_hash; /*!< hash chain node */
const char* dir_path_of_temp_table;/*!< NULL or the directory path
where a TEMPORARY table that was explicitly
created by a user should be placed if
@@ -996,8 +999,6 @@ struct dict_table_t{
dictionary information and
MySQL FRM information mismatch. */
#ifndef UNIV_HOTBACKUP
- hash_node_t name_hash; /*!< hash chain node */
- hash_node_t id_hash; /*!< hash chain node */
UT_LIST_BASE_NODE_T(dict_index_t)
indexes; /*!< list of indexes of the table */
diff --git a/storage/xtradb/include/fil0fil.h b/storage/xtradb/include/fil0fil.h
index 86b1c561349..8e737f8fb0e 100644
--- a/storage/xtradb/include/fil0fil.h
+++ b/storage/xtradb/include/fil0fil.h
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2017, 2018, MariaDB Corporation.
+Copyright (c) 2017, 2019, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -238,7 +238,9 @@ struct fil_node_t {
struct fil_space_t {
char* name; /*!< space name = the path to the first file in
it */
+ hash_node_t name_hash;/*!< hash chain the name_hash table */
ulint id; /*!< space id */
+ hash_node_t hash; /*!< hash chain node */
ib_int64_t tablespace_version;
/*!< in DISCARD/IMPORT this timestamp
is used to check if we should ignore
@@ -285,8 +287,6 @@ struct fil_space_t {
trying to read a block.
Dropping of the tablespace is forbidden
if this is positive */
- hash_node_t hash; /*!< hash chain node */
- hash_node_t name_hash;/*!< hash chain the name_hash table */
#ifndef UNIV_HOTBACKUP
prio_rw_lock_t latch; /*!< latch protecting the file space storage
allocation */
@@ -976,15 +976,11 @@ fil_aio_wait(
/*=========*/
ulint segment); /*!< in: the number of the segment in the aio
array to wait for */
-/**********************************************************************//**
-Flushes to disk possible writes cached by the OS. If the space does not exist
-or is being dropped, does not do anything. */
-UNIV_INTERN
-void
-fil_flush(
-/*======*/
- ulint space_id); /*!< in: file space id (this can be a group of
- log files or a tablespace of the database) */
+/** Make persistent possible writes cached by the OS.
+If the space does not exist or is being dropped, do nothing.
+@param[in] space_id tablespace identifier
+@param[in] metadata whether to update file system metadata */
+UNIV_INTERN void fil_flush(ulint space_id, bool metadata = false);
/**********************************************************************//**
Flushes to disk writes in file spaces of the given type possibly cached by
the OS. */
diff --git a/storage/xtradb/include/page0page.h b/storage/xtradb/include/page0page.h
index eefa0fa4c5b..26e71ff8081 100644
--- a/storage/xtradb/include/page0page.h
+++ b/storage/xtradb/include/page0page.h
@@ -1,6 +1,7 @@
/*****************************************************************************
Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2013, 2018, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -1109,23 +1110,6 @@ const rec_t*
page_find_rec_max_not_deleted(
const page_t* page);
-#endif /* #ifndef UNIV_INNOCHECKSUM */
-
-/** Issue a warning when the checksum that is stored in the page is valid,
-but different than the global setting innodb_checksum_algorithm.
-@param[in] current_algo current checksum algorithm
-@param[in] page_checksum page valid checksum
-@param[in] space_id tablespace id
-@param[in] page_no page number */
-void
-page_warn_strict_checksum(
- srv_checksum_algorithm_t curr_algo,
- srv_checksum_algorithm_t page_checksum,
- ulint space_id,
- ulint page_no);
-
-#ifndef UNIV_INNOCHECKSUM
-
#ifdef UNIV_MATERIALIZE
#undef UNIV_INLINE
#define UNIV_INLINE UNIV_INLINE_ORIGINAL
diff --git a/storage/xtradb/include/univ.i b/storage/xtradb/include/univ.i
index 5df8a10bcf9..6522a19c128 100644
--- a/storage/xtradb/include/univ.i
+++ b/storage/xtradb/include/univ.i
@@ -45,10 +45,10 @@ Created 1/20/1994 Heikki Tuuri
#define INNODB_VERSION_MAJOR 5
#define INNODB_VERSION_MINOR 6
-#define INNODB_VERSION_BUGFIX 39
+#define INNODB_VERSION_BUGFIX 42
#ifndef PERCONA_INNODB_VERSION
-#define PERCONA_INNODB_VERSION 84.1
+#define PERCONA_INNODB_VERSION 84.2
#endif
/* Enable UNIV_LOG_ARCHIVE in XtraDB */
@@ -607,12 +607,14 @@ typedef void* os_thread_ret_t;
#include "ut0dbg.h"
#include "ut0ut.h"
#include "db0err.h"
+#include <my_valgrind.h>
+/* define UNIV macros in terms of my_valgrind.h */
+#define UNIV_MEM_INVALID(addr, size) MEM_UNDEFINED(addr, size)
+#define UNIV_MEM_FREE(addr, size) MEM_NOACCESS(addr, size)
+#define UNIV_MEM_ALLOC(addr, size) UNIV_MEM_INVALID(addr, size)
#ifdef UNIV_DEBUG_VALGRIND
# include <valgrind/memcheck.h>
# define UNIV_MEM_VALID(addr, size) VALGRIND_MAKE_MEM_DEFINED(addr, size)
-# define UNIV_MEM_INVALID(addr, size) VALGRIND_MAKE_MEM_UNDEFINED(addr, size)
-# define UNIV_MEM_FREE(addr, size) VALGRIND_MAKE_MEM_NOACCESS(addr, size)
-# define UNIV_MEM_ALLOC(addr, size) VALGRIND_MAKE_MEM_UNDEFINED(addr, size)
# define UNIV_MEM_DESC(addr, size) VALGRIND_CREATE_BLOCK(addr, size, #addr)
# define UNIV_MEM_UNDESC(b) VALGRIND_DISCARD(b)
# define UNIV_MEM_ASSERT_RW_LOW(addr, size, should_abort) do { \
@@ -647,9 +649,6 @@ typedef void* os_thread_ret_t;
} while (0)
#else
# define UNIV_MEM_VALID(addr, size) do {} while(0)
-# define UNIV_MEM_INVALID(addr, size) do {} while(0)
-# define UNIV_MEM_FREE(addr, size) do {} while(0)
-# define UNIV_MEM_ALLOC(addr, size) do {} while(0)
# define UNIV_MEM_DESC(addr, size) do {} while(0)
# define UNIV_MEM_UNDESC(b) do {} while(0)
# define UNIV_MEM_ASSERT_RW_LOW(addr, size, should_abort) do {} while(0)
diff --git a/storage/xtradb/log/log0online.cc b/storage/xtradb/log/log0online.cc
index 736ef3e3d08..bf26690e8b3 100644
--- a/storage/xtradb/log/log0online.cc
+++ b/storage/xtradb/log/log0online.cc
@@ -1873,6 +1873,8 @@ log_online_purge_changed_page_bitmaps(
for (i = 0; i < bitmap_files.count; i++) {
+ char full_bmp_file_name[2 * FN_REFLEN + 2];
+
/* We consider the end LSN of the current bitmap, derived from
the start LSN of the subsequent bitmap file, to determine
whether to remove the current bitmap. Note that bitmap_files
@@ -1888,8 +1890,45 @@ log_online_purge_changed_page_bitmaps(
break;
}
+
+ /* In some non-trivial cases the sequence of .xdb files may
+ have gaps. For instance:
+ ib_modified_log_1_0.xdb
+ ib_modified_log_2_<mmm>.xdb
+ ib_modified_log_4_<nnn>.xdb
+ Adding this check as a safety precaution. */
+ if (bitmap_files.files[i].name[0] == '\0')
+ continue;
+
+ /* If redo log tracking is enabled, reuse 'bmp_file_home'
+ from 'log_bmp_sys'. Otherwise, compose the full '.xdb' file
+ path from 'srv_data_home', adding a path separator if
+ necessary. */
+ if (log_bmp_sys != NULL) {
+ ut_snprintf(full_bmp_file_name,
+ sizeof(full_bmp_file_name),
+ "%s%s", log_bmp_sys->bmp_file_home,
+ bitmap_files.files[i].name);
+ }
+ else {
+ char separator[2] = {0, 0};
+ const size_t srv_data_home_len =
+ strlen(srv_data_home);
+
+ ut_a(srv_data_home_len < FN_REFLEN);
+ if (srv_data_home_len != 0 &&
+ srv_data_home[srv_data_home_len - 1] !=
+ SRV_PATH_SEPARATOR) {
+ separator[0] = SRV_PATH_SEPARATOR;
+ }
+ ut_snprintf(full_bmp_file_name,
+ sizeof(full_bmp_file_name), "%s%s%s",
+ srv_data_home, separator,
+ bitmap_files.files[i].name);
+ }
+
if (!os_file_delete_if_exists(innodb_file_bmp_key,
- bitmap_files.files[i].name)) {
+ full_bmp_file_name)) {
os_file_get_last_error(TRUE);
result = TRUE;
diff --git a/storage/xtradb/os/os0proc.cc b/storage/xtradb/os/os0proc.cc
index 6c9116e1397..c64c37705b5 100644
--- a/storage/xtradb/os/os0proc.cc
+++ b/storage/xtradb/os/os0proc.cc
@@ -247,7 +247,6 @@ os_mem_free_large(
ut_a(ut_total_allocated_memory >= size);
ut_total_allocated_memory -= size;
os_fast_mutex_unlock(&ut_list_mutex);
- UNIV_MEM_FREE(ptr, size);
return;
}
#endif /* HAVE_LARGE_PAGES && UNIV_LINUX */
@@ -263,7 +262,6 @@ os_mem_free_large(
ut_a(ut_total_allocated_memory >= size);
ut_total_allocated_memory -= size;
os_fast_mutex_unlock(&ut_list_mutex);
- UNIV_MEM_FREE(ptr, size);
}
#elif !defined OS_MAP_ANON
ut_free(ptr);
@@ -281,7 +279,6 @@ os_mem_free_large(
ut_a(ut_total_allocated_memory >= size);
ut_total_allocated_memory -= size;
os_fast_mutex_unlock(&ut_list_mutex);
- UNIV_MEM_FREE(ptr, size);
}
#endif
}
diff --git a/storage/xtradb/page/page0page.cc b/storage/xtradb/page/page0page.cc
index 518400a9bf0..76c41941b08 100644
--- a/storage/xtradb/page/page0page.cc
+++ b/storage/xtradb/page/page0page.cc
@@ -2814,49 +2814,3 @@ page_find_rec_max_not_deleted(
}
#endif /* #ifndef UNIV_INNOCHECKSUM */
-
-/** Issue a warning when the checksum that is stored in the page is valid,
-but different than the global setting innodb_checksum_algorithm.
-@param[in] current_algo current checksum algorithm
-@param[in] page_checksum page valid checksum
-@param[in] space_id tablespace id
-@param[in] page_no page number */
-void
-page_warn_strict_checksum(
- srv_checksum_algorithm_t curr_algo,
- srv_checksum_algorithm_t page_checksum,
- ulint space_id,
- ulint page_no)
-{
- srv_checksum_algorithm_t curr_algo_nonstrict = srv_checksum_algorithm_t();
- switch (curr_algo) {
- case SRV_CHECKSUM_ALGORITHM_STRICT_CRC32:
- curr_algo_nonstrict = SRV_CHECKSUM_ALGORITHM_CRC32;
- break;
- case SRV_CHECKSUM_ALGORITHM_STRICT_INNODB:
- curr_algo_nonstrict = SRV_CHECKSUM_ALGORITHM_INNODB;
- break;
- case SRV_CHECKSUM_ALGORITHM_STRICT_NONE:
- curr_algo_nonstrict = SRV_CHECKSUM_ALGORITHM_NONE;
- break;
- default:
- ut_error;
- }
-
-#ifdef UNIV_INNOCHECKSUM
- fprintf(stderr,
-#else
- ib_logf(IB_LOG_LEVEL_WARN,
-#endif
- "innodb_checksum_algorithm is set to \"%s\""
- " but the page [page id: space=" ULINTPF ","
- " page number=" ULINTPF "] contains a valid checksum \"%s\"."
- " Accepting the page as valid. Change innodb_checksum_algorithm"
- " to \"%s\" to silently accept such pages or rewrite all pages"
- " so that they contain \"%s\" checksum.",
- buf_checksum_algorithm_name(curr_algo),
- space_id, page_no,
- buf_checksum_algorithm_name(page_checksum),
- buf_checksum_algorithm_name(curr_algo_nonstrict),
- buf_checksum_algorithm_name(curr_algo_nonstrict));
-}
diff --git a/storage/xtradb/page/page0zip.cc b/storage/xtradb/page/page0zip.cc
index 0d2bb7fb986..b9b05db24cc 100644
--- a/storage/xtradb/page/page0zip.cc
+++ b/storage/xtradb/page/page0zip.cc
@@ -2,7 +2,7 @@
Copyright (c) 2005, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2012, Facebook Inc.
-Copyright (c) 2014, SkySQL Ab. All Rights Reserved.
+Copyright (c) 2014, 2018, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -4934,10 +4934,6 @@ page_zip_verify_checksum(
stored = static_cast<ib_uint32_t>(mach_read_from_4(
static_cast<const unsigned char*>(data) + FIL_PAGE_SPACE_OR_CHKSUM));
- ulint page_no = mach_read_from_4(static_cast<const unsigned char*> (data) + FIL_PAGE_OFFSET);
- ulint space_id = mach_read_from_4(static_cast<const unsigned char*>
- (data) + FIL_PAGE_SPACE_ID);
-
#if FIL_PAGE_LSN % 8
#error "FIL_PAGE_LSN must be 64 bit aligned"
#endif
@@ -4974,97 +4970,30 @@ page_zip_verify_checksum(
switch (curr_algo) {
case SRV_CHECKSUM_ALGORITHM_STRICT_CRC32:
+ case SRV_CHECKSUM_ALGORITHM_STRICT_INNODB:
+ case SRV_CHECKSUM_ALGORITHM_STRICT_NONE:
+ return stored == calc;
case SRV_CHECKSUM_ALGORITHM_CRC32:
-
if (stored == BUF_NO_CHECKSUM_MAGIC) {
- if (curr_algo
- == SRV_CHECKSUM_ALGORITHM_STRICT_CRC32) {
- page_warn_strict_checksum(
- curr_algo,
- SRV_CHECKSUM_ALGORITHM_NONE,
- space_id, page_no);
- }
-
return(TRUE);
}
+ crc32 = calc;
innodb = static_cast<ib_uint32_t>(page_zip_calc_checksum(
data, size, SRV_CHECKSUM_ALGORITHM_INNODB));
-
- if (stored == innodb) {
- if (curr_algo
- == SRV_CHECKSUM_ALGORITHM_STRICT_CRC32) {
- page_warn_strict_checksum(
- curr_algo,
- SRV_CHECKSUM_ALGORITHM_INNODB,
- space_id, page_no);
- }
-
- return(TRUE);
- }
-
break;
- case SRV_CHECKSUM_ALGORITHM_STRICT_INNODB:
case SRV_CHECKSUM_ALGORITHM_INNODB:
-
if (stored == BUF_NO_CHECKSUM_MAGIC) {
- if (curr_algo
- == SRV_CHECKSUM_ALGORITHM_STRICT_INNODB) {
- page_warn_strict_checksum(
- curr_algo,
- SRV_CHECKSUM_ALGORITHM_NONE,
- space_id, page_no);
- }
-
return(TRUE);
}
crc32 = static_cast<ib_uint32_t>(page_zip_calc_checksum(
data, size, SRV_CHECKSUM_ALGORITHM_CRC32));
-
- if (stored == crc32) {
- if (curr_algo
- == SRV_CHECKSUM_ALGORITHM_STRICT_INNODB) {
- page_warn_strict_checksum(
- curr_algo,
- SRV_CHECKSUM_ALGORITHM_CRC32,
- space_id, page_no);
- }
-
- return(TRUE);
- }
-
- break;
- case SRV_CHECKSUM_ALGORITHM_STRICT_NONE:
-
- crc32 = static_cast<ib_uint32_t>(page_zip_calc_checksum(
- data, size, SRV_CHECKSUM_ALGORITHM_CRC32));
-
- if (stored == crc32) {
- page_warn_strict_checksum(
- curr_algo, SRV_CHECKSUM_ALGORITHM_CRC32,
- space_id, page_no);
-
- return(TRUE);
- }
-
- innodb = static_cast<ib_uint32_t>(page_zip_calc_checksum(
- data, size, SRV_CHECKSUM_ALGORITHM_INNODB));
-
- if (stored == innodb) {
- page_warn_strict_checksum(
- curr_algo,
- SRV_CHECKSUM_ALGORITHM_INNODB,
- space_id, page_no);
- return(TRUE);
- }
-
+ innodb = calc;
break;
case SRV_CHECKSUM_ALGORITHM_NONE:
- ut_error;
- /* no default so the compiler will emit a warning if new enum
- is added and not handled here */
+ return TRUE;
}
- return(FALSE);
+ return (stored == crc32 || stored == innodb);
}
diff --git a/storage/xtradb/row/row0ftsort.cc b/storage/xtradb/row/row0ftsort.cc
index bb9821d4484..d5df122c9a8 100644
--- a/storage/xtradb/row/row0ftsort.cc
+++ b/storage/xtradb/row/row0ftsort.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 2010, 2016, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2017, 2018, MariaDB Corporation.
+Copyright (c) 2017, 2019, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -744,7 +744,7 @@ loop:
goto func_exit;
}
- UNIV_MEM_INVALID(block[t_ctx.buf_used][0], srv_sort_buf_size);
+ UNIV_MEM_INVALID(block[t_ctx.buf_used], srv_sort_buf_size);
buf[t_ctx.buf_used] = row_merge_buf_empty(buf[t_ctx.buf_used]);
mycount[t_ctx.buf_used] += t_ctx.rows_added[t_ctx.buf_used];
t_ctx.rows_added[t_ctx.buf_used] = 0;
@@ -837,8 +837,7 @@ exit:
goto func_exit;
}
- UNIV_MEM_INVALID(block[i][0],
- srv_sort_buf_size);
+ UNIV_MEM_INVALID(block[i], srv_sort_buf_size);
}
buf[i] = row_merge_buf_empty(buf[i]);
diff --git a/storage/xtradb/row/row0import.cc b/storage/xtradb/row/row0import.cc
index f3163a63754..5c42ab6f2bb 100644
--- a/storage/xtradb/row/row0import.cc
+++ b/storage/xtradb/row/row0import.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 2012, 2018, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2012, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2015, 2018, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
@@ -1969,11 +1969,7 @@ PageConverter::update_index_page(
return(DB_SUCCESS);
}
- if (!page_is_leaf(block->frame)) {
- return (DB_SUCCESS);
- }
-
- return(update_records(block));
+ return page_is_leaf(block->frame) ? update_records(block) : DB_SUCCESS;
}
/**
@@ -3834,4 +3830,3 @@ row_import_for_mysql(
return(row_import_cleanup(prebuilt, trx, err));
}
-
diff --git a/storage/xtradb/row/row0mysql.cc b/storage/xtradb/row/row0mysql.cc
index cb6e6bbcb1f..d51771820a2 100644
--- a/storage/xtradb/row/row0mysql.cc
+++ b/storage/xtradb/row/row0mysql.cc
@@ -4347,7 +4347,8 @@ do_drop:
char msg_tablename[MAX_FULL_NAME_LEN + 1];
innobase_format_name(
- msg_tablename, sizeof(tablename),
+ msg_tablename,
+ sizeof msg_tablename,
tablename, FALSE);
ib_logf(IB_LOG_LEVEL_INFO,
@@ -4995,18 +4996,6 @@ row_rename_table_for_mysql(
goto funct_exit;
}
- /* Wait for background fts sync to finish */
- for (retry = 1; dict_fts_index_syncing(table); ++retry) {
- DICT_BG_YIELD(trx);
- if (retry % 100 == 0) {
- ib_logf(IB_LOG_LEVEL_INFO,
- "Unable to rename table %s to new name"
- " %s because FTS sync is running on table."
- " Retrying\n",
- old_name, new_name);
- }
- }
-
/* We use the private SQL parser of Innobase to generate the query
graphs needed in updating the dictionary data from system tables. */
diff --git a/storage/xtradb/row/row0sel.cc b/storage/xtradb/row/row0sel.cc
index ce5d74b5a3b..daef291248b 100644
--- a/storage/xtradb/row/row0sel.cc
+++ b/storage/xtradb/row/row0sel.cc
@@ -2736,7 +2736,8 @@ row_sel_field_store_in_mysql_format_func(
case DATA_SYS:
/* These column types should never be shipped to MySQL. */
ut_ad(0);
- break;
+ /* fall through */
+
case DATA_CHAR:
case DATA_FIXBINARY:
case DATA_FLOAT:
@@ -4566,7 +4567,7 @@ no_gap_lock:
prebuilt->new_rec_locks = 1;
}
err = DB_SUCCESS;
- break;
+ /* fall through */
case DB_SUCCESS:
break;
case DB_LOCK_WAIT:
diff --git a/storage/xtradb/srv/srv0start.cc b/storage/xtradb/srv/srv0start.cc
index 34560969b67..3309a602e7f 100644
--- a/storage/xtradb/srv/srv0start.cc
+++ b/storage/xtradb/srv/srv0start.cc
@@ -2337,6 +2337,9 @@ innobase_start_or_create_for_mysql()
break;
}
+ if (stat_info.type != OS_FILE_TYPE_FILE) {
+ break;
+ }
if (!srv_file_check_mode(logfilename)) {
return(DB_ERROR);
}
diff --git a/support-files/mysql.server.sh b/support-files/mysql.server.sh
index 96444385e45..910870ae034 100644
--- a/support-files/mysql.server.sh
+++ b/support-files/mysql.server.sh
@@ -181,7 +181,11 @@ fi
user='@MYSQLD_USER@'
su_kill() {
- su - $user -s /bin/sh -c "kill $*" >/dev/null 2>&1
+ if test "$USER" = "$user"; then
+ kill $* >/dev/null 2>&1
+ else
+ su - $user -s /bin/sh -c "kill $*" >/dev/null 2>&1
+ fi
}
#
diff --git a/unittest/mysys/lf-t.c b/unittest/mysys/lf-t.c
index 573a56cc1d6..c1c89f60864 100644
--- a/unittest/mysys/lf-t.c
+++ b/unittest/mysys/lf-t.c
@@ -48,9 +48,6 @@ pthread_handler_t test_lf_pinbox(void *arg)
pins= lf_pinbox_get_pins(&lf_allocator.pinbox);
}
lf_pinbox_put_pins(pins);
- pthread_mutex_lock(&mutex);
- if (!--running_threads) pthread_cond_signal(&cond);
- pthread_mutex_unlock(&mutex);
if (with_my_thread_init)
my_thread_end();
@@ -105,7 +102,6 @@ pthread_handler_t test_lf_alloc(void *arg)
bad|= lf_allocator.mallocs - lf_alloc_pool_count(&lf_allocator);
#endif
}
- if (!--running_threads) pthread_cond_signal(&cond);
pthread_mutex_unlock(&mutex);
if (with_my_thread_init)
@@ -159,7 +155,6 @@ pthread_handler_t test_lf_hash(void *arg)
lf_hash.size, inserts);
bad|= lf_hash.count;
}
- if (!--running_threads) pthread_cond_signal(&cond);
pthread_mutex_unlock(&mutex);
if (with_my_thread_init)
my_thread_end();
diff --git a/unittest/mysys/my_atomic-t.c b/unittest/mysys/my_atomic-t.c
index 35e782eb360..5eb988e2e15 100644
--- a/unittest/mysys/my_atomic-t.c
+++ b/unittest/mysys/my_atomic-t.c
@@ -35,9 +35,6 @@ pthread_handler_t test_atomic_add(void *arg)
my_atomic_add32(&bad, -x);
my_atomic_rwlock_wrunlock(&rwl);
}
- pthread_mutex_lock(&mutex);
- if (!--running_threads) pthread_cond_signal(&cond);
- pthread_mutex_unlock(&mutex);
return 0;
}
@@ -58,13 +55,6 @@ pthread_handler_t test_atomic_add64(void *arg)
my_atomic_add64(&a64, -x);
my_atomic_rwlock_wrunlock(&rwl);
}
- pthread_mutex_lock(&mutex);
- if (!--running_threads)
- {
- bad= (a64 != 0);
- pthread_cond_signal(&cond);
- }
- pthread_mutex_unlock(&mutex);
return 0;
}
@@ -108,9 +98,6 @@ pthread_handler_t test_atomic_fas(void *arg)
my_atomic_add32(&bad, -x);
my_atomic_rwlock_wrunlock(&rwl);
- pthread_mutex_lock(&mutex);
- if (!--running_threads) pthread_cond_signal(&cond);
- pthread_mutex_unlock(&mutex);
return 0;
}
@@ -140,9 +127,6 @@ pthread_handler_t test_atomic_cas(void *arg)
my_atomic_rwlock_wrunlock(&rwl);
} while (!ok) ;
}
- pthread_mutex_lock(&mutex);
- if (!--running_threads) pthread_cond_signal(&cond);
- pthread_mutex_unlock(&mutex);
return 0;
}
@@ -178,6 +162,7 @@ void do_tests()
}
a64=0;
test_concurrently("my_atomic_add64", test_atomic_add64, THREADS, CYCLES);
+ bad= (a64 != 0);
my_atomic_rwlock_destroy(&rwl);
}
diff --git a/unittest/mysys/thr_template.c b/unittest/mysys/thr_template.c
index 7304eb50955..0e06bf6e731 100644
--- a/unittest/mysys/thr_template.c
+++ b/unittest/mysys/thr_template.c
@@ -20,35 +20,34 @@
#include <tap.h>
volatile uint32 bad;
-pthread_attr_t thr_attr;
pthread_mutex_t mutex;
-pthread_cond_t cond;
-uint running_threads;
void do_tests();
void test_concurrently(const char *test, pthread_handler handler, int n, int m)
{
- pthread_t t;
+ pthread_t *threads= malloc(n * sizeof(pthread_t));
+ int i;
ulonglong now= my_interval_timer();
+ assert(threads);
bad= 0;
diag("Testing %s with %d threads, %d iterations... ", test, n, m);
- for (running_threads= n ; n ; n--)
+ for (i= 0; i < n; i++)
{
- if (pthread_create(&t, &thr_attr, handler, &m) != 0)
+ if (pthread_create(&threads[i], 0, handler, &m) != 0)
{
diag("Could not create thread");
abort();
}
}
- pthread_mutex_lock(&mutex);
- while (running_threads)
- pthread_cond_wait(&cond, &mutex);
- pthread_mutex_unlock(&mutex);
+
+ for (i= 0; i < n; i++)
+ pthread_join(threads[i], 0);
now= my_interval_timer() - now;
+ free(threads);
ok(!bad, "tested %s in %g secs (%d)", test, ((double)now)/1e9, bad);
}
@@ -60,9 +59,6 @@ int main(int argc __attribute__((unused)), char **argv)
DBUG_SET_INITIAL(argv[1]);
pthread_mutex_init(&mutex, 0);
- pthread_cond_init(&cond, 0);
- pthread_attr_init(&thr_attr);
- pthread_attr_setdetachstate(&thr_attr,PTHREAD_CREATE_DETACHED);
#ifdef MY_ATOMIC_MODE_RWLOCKS
#if defined(HPUX11) || defined(__POWERPC__) /* showed to be very slow (scheduler-related) */
@@ -79,16 +75,7 @@ int main(int argc __attribute__((unused)), char **argv)
do_tests();
- /*
- workaround until we know why it crashes randomly on some machine
- (BUG#22320).
- */
-#ifdef NOT_USED
- sleep(2);
-#endif
pthread_mutex_destroy(&mutex);
- pthread_cond_destroy(&cond);
- pthread_attr_destroy(&thr_attr);
my_end(0);
return exit_status();
}
diff --git a/unittest/mysys/waiting_threads-t.c b/unittest/mysys/waiting_threads-t.c
index 35e86aca319..eca6ba408c3 100644
--- a/unittest/mysys/waiting_threads-t.c
+++ b/unittest/mysys/waiting_threads-t.c
@@ -136,10 +136,8 @@ retry:
pthread_mutex_unlock(&lock);
pthread_mutex_unlock(& thds[id].lock);
wt_thd_destroy(& thds[id].thd);
-
- if (!--running_threads) /* now, signal when everybody is done with deinit */
- pthread_cond_signal(&cond);
pthread_mutex_unlock(&mutex);
+
DBUG_PRINT("wt", ("exiting"));
my_thread_end();
return 0;
diff --git a/win/packaging/heidisql.cmake b/win/packaging/heidisql.cmake
index 772834e7c7d..29ecdd8eb4f 100644
--- a/win/packaging/heidisql.cmake
+++ b/win/packaging/heidisql.cmake
@@ -1,4 +1,4 @@
-SET(HEIDISQL_BASE_NAME "HeidiSQL_9.4_Portable")
+SET(HEIDISQL_BASE_NAME "HeidiSQL_9.5_Portable")
SET(HEIDISQL_ZIP "${HEIDISQL_BASE_NAME}.zip")
SET(HEIDISQL_URL "http://www.heidisql.com/downloads/releases/${HEIDISQL_ZIP}")
SET(HEIDISQL_DOWNLOAD_DIR ${THIRD_PARTY_DOWNLOAD_LOCATION}/${HEIDISQL_BASE_NAME})