summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rwxr-xr-xBUILD/SETUP.sh1
-rwxr-xr-xBUILD/compile-pentium-debug-max2
-rw-r--r--configure.in16
-rw-r--r--extra/yassl/taocrypt/cmakelists.txt3
-rw-r--r--include/my_sys.h2
-rw-r--r--mysql-test/Makefile.am8
-rw-r--r--mysql-test/extra/rpl_tests/rpl_sv_relay_space.test22
-rw-r--r--mysql-test/include/rpl_multi_engine3.inc4
-rwxr-xr-xmysql-test/mysql-test-run.pl79
-rw-r--r--mysql-test/mysql-test-run.sh2
-rw-r--r--mysql-test/ndb/ndbcluster.sh6
-rw-r--r--mysql-test/r/func_time.result25
-rw-r--r--mysql-test/r/index_merge_innodb.result22
-rw-r--r--mysql-test/r/ndb_basic.result7
-rw-r--r--mysql-test/r/ndb_bitfield.result12
-rw-r--r--mysql-test/r/ndb_partition_key.result14
-rw-r--r--mysql-test/r/ndb_restore.result113
-rw-r--r--mysql-test/r/partition_pruning.result10
-rw-r--r--mysql-test/r/partition_range.result24
-rw-r--r--mysql-test/r/ps.result130
-rw-r--r--mysql-test/r/rpl_ndb_2innodb.result855
-rw-r--r--mysql-test/r/rpl_ndb_2myisam.result855
-rw-r--r--mysql-test/r/rpl_ndb_basic.result25
-rw-r--r--mysql-test/r/rpl_ndb_dd_partitions.result726
-rw-r--r--mysql-test/r/rpl_ndb_relay_space.result25
-rw-r--r--mysql-test/r/rpl_relay_space_innodb.result8
-rw-r--r--mysql-test/r/rpl_relay_space_myisam.result8
-rw-r--r--mysql-test/std_data/ndb_backup50/BACKUP-1-0.1.Databin0 -> 116228 bytes
-rw-r--r--mysql-test/std_data/ndb_backup50/BACKUP-1-0.2.Databin0 -> 113556 bytes
-rw-r--r--mysql-test/std_data/ndb_backup50/BACKUP-1.1.ctlbin0 -> 7936 bytes
-rw-r--r--mysql-test/std_data/ndb_backup50/BACKUP-1.1.logbin0 -> 7592 bytes
-rw-r--r--mysql-test/std_data/ndb_backup50/BACKUP-1.2.ctlbin0 -> 7936 bytes
-rw-r--r--mysql-test/std_data/ndb_backup50/BACKUP-1.2.logbin0 -> 7128 bytes
-rw-r--r--mysql-test/std_data/ndb_backup51/BACKUP-1-0.1.Databin0 -> 184636 bytes
-rw-r--r--mysql-test/std_data/ndb_backup51/BACKUP-1-0.2.Databin0 -> 181060 bytes
-rw-r--r--mysql-test/std_data/ndb_backup51/BACKUP-1.1.ctlbin0 -> 12320 bytes
-rw-r--r--mysql-test/std_data/ndb_backup51/BACKUP-1.1.logbin0 -> 21776 bytes
-rw-r--r--mysql-test/std_data/ndb_backup51/BACKUP-1.2.ctlbin0 -> 12320 bytes
-rw-r--r--mysql-test/std_data/ndb_backup51/BACKUP-1.2.logbin0 -> 19008 bytes
-rw-r--r--mysql-test/t/disabled.def24
-rw-r--r--mysql-test/t/func_time.test22
-rw-r--r--mysql-test/t/index_merge_innodb.test52
-rw-r--r--mysql-test/t/innodb.test13
-rw-r--r--mysql-test/t/ndb_basic.test11
-rw-r--r--mysql-test/t/ndb_partition_key.test18
-rw-r--r--mysql-test/t/ndb_restore.test35
-rw-r--r--mysql-test/t/partition_range.test28
-rw-r--r--mysql-test/t/ps.test143
-rw-r--r--mysql-test/t/rpl_ndb_basic.test31
-rw-r--r--mysql-test/t/rpl_ndb_dd_partitions.test310
-rw-r--r--mysql-test/t/rpl_ndb_relay_space.test21
-rw-r--r--mysql-test/t/rpl_view-slave.opt1
-rw-r--r--scripts/mysql_prepare_privilege_tables_for_5.sql53
-rw-r--r--scripts/mysqld_safe-watch.sh150
-rw-r--r--sql/field.cc10
-rw-r--r--sql/ha_innodb.cc130
-rw-r--r--sql/ha_innodb.h3
-rw-r--r--sql/ha_ndbcluster.cc197
-rw-r--r--sql/ha_ndbcluster.h28
-rw-r--r--sql/ha_ndbcluster_binlog.cc172
-rw-r--r--sql/ha_partition.cc283
-rw-r--r--sql/ha_partition.h3
-rw-r--r--sql/handler.h13
-rw-r--r--sql/item_cmpfunc.cc12
-rw-r--r--sql/item_row.cc16
-rw-r--r--sql/item_row.h3
-rw-r--r--sql/log_event.cc31
-rw-r--r--sql/mysql_priv.h192
-rw-r--r--sql/mysqld.cc52
-rw-r--r--sql/opt_range.cc2
-rw-r--r--sql/partition_element.h6
-rw-r--r--sql/partition_info.cc51
-rw-r--r--sql/partition_info.h12
-rw-r--r--sql/set_var.cc52
-rw-r--r--sql/set_var.h57
-rw-r--r--sql/share/errmsg.txt4
-rw-r--r--sql/sql_base.cc15
-rw-r--r--sql/sql_class.cc119
-rw-r--r--sql/sql_class.h37
-rw-r--r--sql/sql_partition.cc1176
-rw-r--r--sql/sql_prepare.cc17
-rw-r--r--sql/sql_repl.cc18
-rw-r--r--sql/sql_table.cc1073
-rw-r--r--sql/sql_yacc.yy75
-rw-r--r--sql/table.cc35
-rw-r--r--sql/unireg.cc9
-rwxr-xr-xsql/watchdog_mysqld126
-rw-r--r--storage/myisam/mi_open.c2
-rwxr-xr-xstorage/ndb/home/bin/Linuxmkisofsbin503146 -> 0 bytes
-rwxr-xr-xstorage/ndb/home/bin/Solarismkisofsbin634084 -> 0 bytes
-rwxr-xr-xstorage/ndb/home/bin/cvs2cl.pl1865
-rwxr-xr-xstorage/ndb/home/bin/fix-cvs-root17
-rwxr-xr-xstorage/ndb/home/bin/import-from-bk.sh158
-rwxr-xr-xstorage/ndb/home/bin/ndb_deploy27
-rwxr-xr-xstorage/ndb/home/bin/ndbdoxy.pl184
-rwxr-xr-xstorage/ndb/home/bin/ngcalc78
-rw-r--r--storage/ndb/home/bin/parseConfigFile.awk98
-rwxr-xr-xstorage/ndb/home/bin/setup-test.sh272
-rw-r--r--storage/ndb/home/bin/signallog2html.lib/signallog2list.awk102
-rw-r--r--storage/ndb/home/bin/signallog2html.lib/uniq_blocks.awk29
-rwxr-xr-xstorage/ndb/home/bin/signallog2html.sh349
-rwxr-xr-xstorage/ndb/home/bin/stripcr90
-rw-r--r--storage/ndb/home/lib/funcs.sh294
-rw-r--r--storage/ndb/include/kernel/signaldata/DropFilegroup.hpp1
-rw-r--r--storage/ndb/include/kernel/signaldata/TupFrag.hpp3
-rw-r--r--storage/ndb/include/ndbapi/NdbDictionary.hpp4
-rw-r--r--storage/ndb/include/util/NdbSqlUtil.hpp6
-rw-r--r--storage/ndb/include/util/ndb_opts.h6
-rw-r--r--storage/ndb/src/common/util/NdbSqlUtil.cpp52
-rw-r--r--storage/ndb/src/kernel/blocks/backup/BackupFormat.hpp11
-rw-r--r--storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp83
-rw-r--r--storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp52
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupAbort.cpp1
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp33
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupDiskAlloc.cpp8
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp17
-rw-r--r--storage/ndb/src/kernel/blocks/dbtux/DbtuxMeta.cpp5
-rw-r--r--storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp70
-rw-r--r--storage/ndb/src/ndbapi/NdbDictionary.cpp36
-rw-r--r--storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp42
-rw-r--r--storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp3
-rw-r--r--storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp59
-rw-r--r--storage/ndb/src/ndbapi/NdbEventOperationImpl.hpp7
-rw-r--r--storage/ndb/src/ndbapi/NdbScanOperation.cpp5
-rw-r--r--storage/ndb/src/ndbapi/NdbTransaction.cpp4
-rw-r--r--storage/ndb/src/ndbapi/Ndbif.cpp3
-rw-r--r--storage/ndb/src/ndbapi/TransporterFacade.cpp9
-rw-r--r--storage/ndb/src/ndbapi/TransporterFacade.hpp3
-rw-r--r--storage/ndb/test/ndbapi/bank/Bank.cpp3
-rw-r--r--storage/ndb/test/ndbapi/bank/Bank.hpp2
-rw-r--r--storage/ndb/test/ndbapi/bank/BankLoad.cpp4
-rw-r--r--storage/ndb/test/ndbapi/bank/bankCreator.cpp3
-rw-r--r--storage/ndb/test/ndbapi/testBasic.cpp39
-rw-r--r--storage/ndb/test/run-test/daily-basic-tests.txt4
-rw-r--r--storage/ndb/tools/restore/Restore.cpp82
-rw-r--r--storage/ndb/tools/restore/Restore.hpp2
-rw-r--r--storage/ndb/tools/restore/restore_main.cpp5
-rw-r--r--unittest/examples/Makefile.am4
-rw-r--r--unittest/mytap/t/Makefile.am3
139 files changed, 7020 insertions, 5164 deletions
diff --git a/BUILD/SETUP.sh b/BUILD/SETUP.sh
index 3c3facae34f..6c83fffa8fb 100755
--- a/BUILD/SETUP.sh
+++ b/BUILD/SETUP.sh
@@ -116,6 +116,7 @@ valgrind_flags="$valgrind_flags -DMYSQL_SERVER_SUFFIX=-valgrind-max"
# Used in -debug builds
debug_cflags="-DUNIV_MUST_NOT_INLINE -DEXTRA_DEBUG -DFORCE_INIT_OF_VARS "
debug_cflags="$debug_cflags -DSAFEMALLOC -DPEDANTIC_SAFEMALLOC -DSAFE_MUTEX"
+error_inject="--with-error-inject "
#
# Base C++ flags for all builds
base_cxxflags="-felide-constructors -fno-exceptions -fno-rtti"
diff --git a/BUILD/compile-pentium-debug-max b/BUILD/compile-pentium-debug-max
index d799311526b..adb9b7899a5 100755
--- a/BUILD/compile-pentium-debug-max
+++ b/BUILD/compile-pentium-debug-max
@@ -4,6 +4,6 @@ path=`dirname $0`
. "$path/SETUP.sh" "$@" --with-debug=full
extra_flags="$pentium_cflags $debug_cflags"
-extra_configs="$pentium_configs $debug_configs $max_configs"
+extra_configs="$pentium_configs $debug_configs $max_configs $error_inject"
. "$path/FINISH.sh"
diff --git a/configure.in b/configure.in
index 6984f5b5f89..44e9d286e1f 100644
--- a/configure.in
+++ b/configure.in
@@ -666,6 +666,7 @@ else
AC_MSG_RESULT([no])
fi
+
MYSQL_SYS_LARGEFILE
# Types that must be checked AFTER large file support is checked
@@ -1608,6 +1609,21 @@ else
CXXFLAGS="$OPTIMIZE_CXXFLAGS -DDBUG_OFF $CXXFLAGS"
fi
+# If we should allow error injection tests
+AC_ARG_WITH(error-inject,
+ [ --with-error-inject Enable error injection in MySQL Server],
+ [ with_error_inject=$withval ],
+ [ with_error_inject=no ])
+
+if test $with_debug != "no"
+then
+ if test "$with_error_inject" = "yes"
+ then
+ AC_DEFINE([ERROR_INJECT_SUPPORT], [1],
+ [Enable error injection in MySQL Server])
+ fi
+fi
+
AC_ARG_WITH([fast-mutexes],
AC_HELP_STRING([--with-fast-mutexes],
[Compile with fast mutexes (default is disabled)]),
diff --git a/extra/yassl/taocrypt/cmakelists.txt b/extra/yassl/taocrypt/cmakelists.txt
index 6f4d0395ff6..3ad9195b372 100644
--- a/extra/yassl/taocrypt/cmakelists.txt
+++ b/extra/yassl/taocrypt/cmakelists.txt
@@ -1,8 +1,5 @@
INCLUDE_DIRECTORIES(../mySTL include)
-SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /GX-")
-SET(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /GX-")
-
ADD_LIBRARY(taocrypt src/aes.cpp src/aestables.cpp src/algebra.cpp src/arc4.cpp src/asn.cpp src/coding.cpp
src/des.cpp src/dh.cpp src/dsa.cpp src/file.cpp src/hash.cpp src/integer.cpp src/md2.cpp
src/md5.cpp src/misc.cpp src/random.cpp src/ripemd.cpp src/rsa.cpp src/sha.cpp
diff --git a/include/my_sys.h b/include/my_sys.h
index 51883e8d6f9..41851b91cbd 100644
--- a/include/my_sys.h
+++ b/include/my_sys.h
@@ -559,7 +559,7 @@ extern File my_register_filename(File fd, const char *FileName,
enum file_type type_of_file,
uint error_message_number, myf MyFlags);
extern File my_create(const char *FileName,int CreateFlags,
- int AccsesFlags, myf MyFlags);
+ int AccessFlags, myf MyFlags);
extern int my_close(File Filedes,myf MyFlags);
extern File my_dup(File file, myf MyFlags);
extern int my_mkdir(const char *dir, int Flags, myf MyFlags);
diff --git a/mysql-test/Makefile.am b/mysql-test/Makefile.am
index 8ddf7668844..5aaddf36aa3 100644
--- a/mysql-test/Makefile.am
+++ b/mysql-test/Makefile.am
@@ -43,6 +43,8 @@ dist-hook:
$(distdir)/r \
$(distdir)/include \
$(distdir)/std_data \
+ $(distdir)/std_data/ndb_backup50 \
+ $(distdir)/std_data/ndb_backup51 \
$(distdir)/lib
-$(INSTALL_DATA) $(srcdir)/t/*.def $(distdir)/t
$(INSTALL_DATA) $(srcdir)/t/*.test $(distdir)/t
@@ -63,6 +65,8 @@ dist-hook:
$(INSTALL_DATA) $(srcdir)/std_data/*.pem $(distdir)/std_data
$(INSTALL_DATA) $(srcdir)/std_data/*.frm $(distdir)/std_data
$(INSTALL_DATA) $(srcdir)/std_data/*.cnf $(distdir)/std_data
+ $(INSTALL_DATA) $(srcdir)/std_data/ndb_backup50/BACKUP* $(distdir)/std_data/ndb_backup50
+ $(INSTALL_DATA) $(srcdir)/std_data/ndb_backup51/BACKUP* $(distdir)/std_data/ndb_backup51
$(INSTALL_DATA) $(srcdir)/lib/init_db.sql $(distdir)/lib
$(INSTALL_DATA) $(srcdir)/lib/*.pl $(distdir)/lib
@@ -74,6 +78,8 @@ install-data-local:
$(DESTDIR)$(testdir)/r \
$(DESTDIR)$(testdir)/include \
$(DESTDIR)$(testdir)/std_data \
+ $(DESTDIR)$(testdir)/std_data/ndb_backup50 \
+ $(DESTDIR)$(testdir)/std_data/ndb_backup51 \
$(DESTDIR)$(testdir)/lib
$(INSTALL_DATA) $(srcdir)/README $(DESTDIR)$(testdir)
-$(INSTALL_DATA) $(srcdir)/t/*.def $(DESTDIR)$(testdir)/t
@@ -98,6 +104,8 @@ install-data-local:
$(INSTALL_DATA) $(srcdir)/std_data/*.pem $(DESTDIR)$(testdir)/std_data
$(INSTALL_DATA) $(srcdir)/std_data/*.frm $(DESTDIR)$(testdir)/std_data
$(INSTALL_DATA) $(srcdir)/std_data/*.cnf $(DESTDIR)$(testdir)/std_data
+ $(INSTALL_DATA) $(srcdir)/std_data/ndb_backup50/BACKUP* $(DESTDIR)$(testdir)/std_data/ndb_backup50
+ $(INSTALL_DATA) $(srcdir)/std_data/ndb_backup51/BACKUP* $(DESTDIR)$(testdir)/std_data/ndb_backup51
$(INSTALL_DATA) $(srcdir)/lib/init_db.sql $(DESTDIR)$(testdir)/lib
$(INSTALL_DATA) $(srcdir)/lib/*.pl $(DESTDIR)$(testdir)/lib
diff --git a/mysql-test/extra/rpl_tests/rpl_sv_relay_space.test b/mysql-test/extra/rpl_tests/rpl_sv_relay_space.test
index 57b74c80b58..3b6fe7a0ef4 100644
--- a/mysql-test/extra/rpl_tests/rpl_sv_relay_space.test
+++ b/mysql-test/extra/rpl_tests/rpl_sv_relay_space.test
@@ -14,20 +14,22 @@ source include/master-slave.inc;
#
SHOW VARIABLES LIKE 'relay_log_space_limit';
+# Matz says: I have no idea what this is supposed to test, but it has
+# potential for generating different results with some storage engines
+# that process rows in an order not dependent on the insertion order.
+# For instance, I would assume that distributed storage engines (like
+# NDB) could process rows based on locality.
+
eval CREATE TABLE t1 (name varchar(64), age smallint(3))ENGINE=$engine_type;
-INSERT INTO t1 SET name='Andy', age=31;
-INSERT t1 SET name='Jacob', age=2;
-INSERT into t1 SET name='Caleb', age=1;
+INSERT INTO t1 SET name='Andy', age=31;
+INSERT INTO t1 SET name='Jacob', age=2;
+INSERT INTO t1 SET name='Caleb', age=1;
ALTER TABLE t1 ADD id int(8) ZEROFILL AUTO_INCREMENT PRIMARY KEY;
SELECT * FROM t1 ORDER BY id;
-save_master_pos;
-connection slave;
-sync_with_master;
+sync_slave_with_master;
SELECT * FROM t1 ORDER BY id;
connection master;
-drop table t1;
-save_master_pos;
-connection slave;
-sync_with_master;
+DROP TABLE t1;
+sync_slave_with_master;
# End of 4.1 tests
diff --git a/mysql-test/include/rpl_multi_engine3.inc b/mysql-test/include/rpl_multi_engine3.inc
index be89c1907f3..5d8f7e46409 100644
--- a/mysql-test/include/rpl_multi_engine3.inc
+++ b/mysql-test/include/rpl_multi_engine3.inc
@@ -28,6 +28,7 @@ INSERT INTO t1 VALUES(412,1,'Testing MySQL databases is a cool ',
select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id;
sync_slave_with_master;
+--sleep 5
--echo --- Select from t1 on slave ---
select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id;
@@ -41,9 +42,9 @@ SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412;
# Must give injector thread a little time to get update
# into the binlog other wise we will miss the update.
-sleep 3;
sync_slave_with_master;
+--sleep 5
--echo --- Check Update on slave ---
SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412;
@@ -55,6 +56,7 @@ DELETE FROM t1 WHERE id = 42;
SELECT COUNT(*) FROM t1;
sync_slave_with_master;
+--sleep 5
--echo --- Show current count on slave for t1 ---
SELECT COUNT(*) FROM t1;
diff --git a/mysql-test/mysql-test-run.pl b/mysql-test/mysql-test-run.pl
index 40b3954b533..a095b4535fe 100755
--- a/mysql-test/mysql-test-run.pl
+++ b/mysql-test/mysql-test-run.pl
@@ -284,7 +284,7 @@ our $opt_start_from;
our $opt_strace_client;
-our $opt_timer;
+our $opt_timer= 1;
our $opt_user;
our $opt_user_test;
@@ -321,6 +321,8 @@ our $opt_with_ndbcluster_slave;
our $opt_with_ndbcluster_all= 0;
our $opt_with_ndbcluster_only= 0;
our $opt_ndb_extra_test= 0;
+our $opt_skip_master_binlog= 0;
+our $opt_skip_slave_binlog= 0;
our $exe_ndb_mgm;
our $path_ndb_tools_dir;
@@ -599,6 +601,8 @@ sub command_line_setup () {
'with-ndbcluster-all' => \$opt_with_ndbcluster_all,
'with-ndbcluster-only' => \$opt_with_ndbcluster_only,
'ndb-extra-test' => \$opt_ndb_extra_test,
+ 'skip-master-binlog' => \$opt_skip_master_binlog,
+ 'skip-slave-binlog' => \$opt_skip_slave_binlog,
'do-test=s' => \$opt_do_test,
'start-from=s' => \$opt_start_from,
'suite=s' => \$opt_suite,
@@ -682,7 +686,7 @@ sub command_line_setup () {
'socket=s' => \$opt_socket,
'start-dirty' => \$opt_start_dirty,
'start-and-exit' => \$opt_start_and_exit,
- 'timer' => \$opt_timer,
+ 'timer!' => \$opt_timer,
'unified-diff|udiff' => \$opt_udiff,
'user-test=s' => \$opt_user_test,
'user=s' => \$opt_user,
@@ -1520,6 +1524,7 @@ sub ndbcluster_start ($) {
if ( mtr_run("$glob_mysql_test_dir/ndb/ndbcluster",
["--port=$opt_ndbcluster_port",
"--data-dir=$opt_vardir",
+ "--character-sets-dir=$path_charsetsdir",
"--verbose=2",
"--core"],
"", "/dev/null", "", "") )
@@ -2157,6 +2162,14 @@ sub run_testcase ($) {
if ( $using_ndbcluster_master and ! $master->[1]->{'pid'} )
{
# Test needs cluster, start an extra mysqld connected to cluster
+ # First wait for first mysql server to have created ndb system tables ok
+ if ( ! sleep_until_file_created("$master->[0]->{'path_myddir'}/cluster/apply_status.ndb",
+ $master->[0]->{'start_timeout'},
+ $master->[0]->{'pid'}))
+ {
+ report_failure_and_restart($tinfo);
+ return;
+ }
mtr_tofile($master->[1]->{'path_myerr'},"CURRENT_TEST: $tname\n");
$master->[1]->{'pid'}=
mysqld_start('master',1,$tinfo->{'master_opt'},[],
@@ -2555,8 +2568,11 @@ sub mysqld_arguments ($$$$$$) {
{
my $id= $idx > 0 ? $idx + 101 : 1;
- mtr_add_arg($args, "%s--log-bin=%s/log/master-bin%s", $prefix,
- $opt_vardir, $sidx);
+ if (! $opt_skip_master_binlog)
+ {
+ mtr_add_arg($args, "%s--log-bin=%s/log/master-bin%s", $prefix,
+ $opt_vardir, $sidx);
+ }
mtr_add_arg($args, "%s--pid-file=%s", $prefix,
$master->[$idx]->{'path_mypid'});
mtr_add_arg($args, "%s--port=%d", $prefix,
@@ -2597,9 +2613,12 @@ sub mysqld_arguments ($$$$$$) {
# FIXME slave get this option twice?!
mtr_add_arg($args, "%s--exit-info=256", $prefix);
mtr_add_arg($args, "%s--init-rpl-role=slave", $prefix);
- mtr_add_arg($args, "%s--log-bin=%s/log/slave%s-bin", $prefix,
- $opt_vardir, $sidx); # FIXME use own dir for binlogs
- mtr_add_arg($args, "%s--log-slave-updates", $prefix);
+ if (! $opt_skip_slave_binlog)
+ {
+ mtr_add_arg($args, "%s--log-bin=%s/log/slave%s-bin", $prefix,
+ $opt_vardir, $sidx); # FIXME use own dir for binlogs
+ mtr_add_arg($args, "%s--log-slave-updates", $prefix);
+ }
# FIXME option duplicated for slave
mtr_add_arg($args, "%s--log=%s", $prefix,
$slave->[$idx]->{'path_mylog'});
@@ -3595,15 +3614,17 @@ Options to control what engine/variation to run
compress Use the compressed protocol between client and server
ssl Use ssl protocol between client and server
skip-ssl Dont start server with support for ssl connections
- bench Run the benchmark suite FIXME
- small-bench FIXME
+ bench Run the benchmark suite
+ small-bench Run the benchmarks with --small-tests --small-tables
Options to control directories to use
- vardir=DIR The directory where files generated from the test run
- is stored(default: ./var). Specifying a ramdisk or tmpfs
- will speed up tests.
+ benchdir=DIR The directory where the benchmark suite is stored
+ (default: ../../mysql-bench)
tmpdir=DIR The directory where temporary files are stored
(default: ./var/tmp).
+ vardir=DIR The directory where files generated from the test run
+ is stored (default: ./var). Specifying a ramdisk or
+ tmpfs will speed up tests.
Options to control what test suites or cases to run
@@ -3618,8 +3639,9 @@ Options to control what test suites or cases to run
skip-rpl Skip the replication test cases.
skip-im Don't start IM, and skip the IM test cases
skip-test=PREFIX Skip test cases which name are prefixed with PREFIX
- big-test Pass "--big-test" to mysqltest which will set the environment
- variable BIG_TEST, which can be checked from test cases.
+ big-test Pass "--big-test" to mysqltest which will set the
+ environment variable BIG_TEST, which can be checked
+ from test cases.
Options that specify ports
@@ -3645,25 +3667,29 @@ Options to run test on running server
Options for debugging the product
- gdb Start the mysqld(s) in gdb
- manual-gdb Let user manually start mysqld in gdb, before running test(s)
- manual-debug Let user manually start mysqld in debugger, before running test(s)
+ client-ddd Start mysqltest client in ddd
+ client-debugger=NAME Start mysqltest in the selected debugger
client-gdb Start mysqltest client in gdb
ddd Start mysqld in ddd
- client-ddd Start mysqltest client in ddd
+ debug Dump trace output for all servers and client programs
debugger=NAME Start mysqld in the selected debugger
- client-debugger=NAME Start mysqltest in the selected debugger
- strace-client FIXME
+ gdb Start the mysqld(s) in gdb
+ manual-debug Let user manually start mysqld in debugger, before
+ running test(s)
+ manual-gdb Let user manually start mysqld in gdb, before running
+ test(s)
master-binary=PATH Specify the master "mysqld" to use
slave-binary=PATH Specify the slave "mysqld" to use
+ strace-client Create strace output for mysqltest client
Options for coverage, profiling etc
gcov FIXME
gprof FIXME
- valgrind Run the "mysqltest" and "mysqld" executables using valgrind
- valgrind-all Same as "valgrind" but will also add "verbose" and "--show-reachable"
- flags to valgrind
+ valgrind Run the "mysqltest" and "mysqld" executables using
+ valgrind
+ valgrind-all Same as "valgrind" but will also add "verbose" and
+ "--show-reachable" flags to valgrind
valgrind-mysqltest Run the "mysqltest" executable with valgrind
valgrind-mysqld Run the "mysqld" executable with valgrind
valgrind-options=ARGS Extra options to give valgrind
@@ -3672,10 +3698,10 @@ Options for coverage, profiling etc
Misc options
comment=STR Write STR to the output
+ notimer Don't show test case execution time
script-debug Debug this script itself
- timer Show test case execution time
- start-and-exit Only initiate and start the "mysqld" servers, use the startup
- settings for the specified test case if any
+ start-and-exit Only initiate and start the "mysqld" servers, use
+ the startup settings for the specified test case if any
start-dirty Only start the "mysqld" servers without initiation
fast Don't try to cleanup from earlier runs
reorder Reorder tests to get less server restarts
@@ -3690,7 +3716,6 @@ Deprecated options
Options not yet described, or that I want to look into more
- debug
local
local-master
netware
diff --git a/mysql-test/mysql-test-run.sh b/mysql-test/mysql-test-run.sh
index aad71f89ef2..54c5ce20047 100644
--- a/mysql-test/mysql-test-run.sh
+++ b/mysql-test/mysql-test-run.sh
@@ -1230,7 +1230,7 @@ start_ndbcluster()
then
NDBCLUSTER_EXTRA_OPTS="--small"
fi
- OPTS="$NDBCLUSTER_OPTS $NDBCLUSTER_EXTRA_OPTS --verbose=$NDB_VERBOSE --initial --relative-config-data-dir --core"
+ OPTS="$NDBCLUSTER_OPTS $NDBCLUSTER_EXTRA_OPTS --character-sets-dir=$CHARSETSDIR --verbose=$NDB_VERBOSE --initial --relative-config-data-dir --core"
if [ "x$NDB_VERBOSE" != "x0" ] ; then
echo "Starting master ndbcluster " $OPTS
fi
diff --git a/mysql-test/ndb/ndbcluster.sh b/mysql-test/ndb/ndbcluster.sh
index 6812067a8ed..d55b53da2ae 100644
--- a/mysql-test/ndb/ndbcluster.sh
+++ b/mysql-test/ndb/ndbcluster.sh
@@ -66,6 +66,7 @@ VERBOSE=100
NDB_MGM_EXTRA_OPTS=
NDB_MGMD_EXTRA_OPTS=
NDBD_EXTRA_OPTS=
+CHARSETSDIR=
while test $# -gt 0; do
case "$1" in
@@ -119,6 +120,9 @@ while test $# -gt 0; do
--ndbd-extra-opts=*)
NDBD_EXTRA_OPTS=`echo "$1" | sed -e "s;--ndbd-extra-opts=;;"`
;;
+ --character-sets-dir=*)
+ CHARSETSDIR=`echo "$1" | sed -e "s;--character-sets-dir=;;"`
+ ;;
--core)
opt_core="--core"
;;
@@ -159,7 +163,7 @@ fi
exec_mgmtclient="$exec_mgmtclient --no-defaults $opt_core $NDB_MGM_EXTRA_OPTS"
exec_mgmtsrvr="$exec_mgmtsrvr --no-defaults $opt_core $NDB_MGMD_EXTRA_OPTS"
-exec_ndb="$exec_ndb --no-defaults $opt_core $NDBD_EXTRA_OPTS"
+exec_ndb="$exec_ndb --no-defaults $opt_core $NDBD_EXTRA_OPTS --character-sets-dir=$CHARSETSDIR"
exec_waiter="$exec_waiter --no-defaults $opt_core"
ndb_host="localhost"
diff --git a/mysql-test/r/func_time.result b/mysql-test/r/func_time.result
index edbdd6b08da..b5fd35d926f 100644
--- a/mysql-test/r/func_time.result
+++ b/mysql-test/r/func_time.result
@@ -847,3 +847,28 @@ timestampdiff(year,'2004-02-28','2005-02-28')
select timestampdiff(year,'2004-02-29','2005-02-28');
timestampdiff(year,'2004-02-29','2005-02-28')
0
+CREATE TABLE t1 (id int NOT NULL PRIMARY KEY, day date);
+CREATE TABLE t2 (id int NOT NULL PRIMARY KEY, day date);
+INSERT INTO t1 VALUES
+(1, '2005-06-01'), (2, '2005-02-01'), (3, '2005-07-01');
+INSERT INTO t2 VALUES
+(1, '2005-08-01'), (2, '2005-06-15'), (3, '2005-07-15');
+SELECT * FROM t1, t2
+WHERE t1.day BETWEEN
+'2005.09.01' - INTERVAL 6 MONTH AND t2.day;
+id day id day
+1 2005-06-01 1 2005-08-01
+3 2005-07-01 1 2005-08-01
+1 2005-06-01 2 2005-06-15
+1 2005-06-01 3 2005-07-15
+3 2005-07-01 3 2005-07-15
+SELECT * FROM t1, t2
+WHERE CAST(t1.day AS DATE) BETWEEN
+'2005.09.01' - INTERVAL 6 MONTH AND t2.day;
+id day id day
+1 2005-06-01 1 2005-08-01
+3 2005-07-01 1 2005-08-01
+1 2005-06-01 2 2005-06-15
+1 2005-06-01 3 2005-07-15
+3 2005-07-01 3 2005-07-15
+DROP TABLE t1,t2;
diff --git a/mysql-test/r/index_merge_innodb.result b/mysql-test/r/index_merge_innodb.result
index b20e66a1098..d7dcc7853cd 100644
--- a/mysql-test/r/index_merge_innodb.result
+++ b/mysql-test/r/index_merge_innodb.result
@@ -260,3 +260,25 @@ t_vers t_rele t_cust filler1
7.6 a
7.6 a
drop table t1;
+create table t1 (
+pk int(11) not null auto_increment,
+a int(11) not null default '0',
+b int(11) not null default '0',
+c int(11) not null default '0',
+filler1 datetime, filler2 varchar(15),
+filler3 longtext,
+kp1 varchar(4), kp2 varchar(7),
+kp3 varchar(2), kp4 varchar(4),
+kp5 varchar(7),
+filler4 char(1),
+primary key (pk),
+key idx1(a,b,c),
+key idx2(c),
+key idx3(kp1,kp2,kp3,kp4,kp5)
+) engine=innodb default charset=latin1;
+set @fill=NULL;
+SELECT COUNT(*) FROM t1 WHERE b = 0 AND a = 0 AND c = 13286427 AND
+kp1='279' AND kp2='ELM0678' AND kp3='6' AND kp4='10' AND kp5 = 'R ';
+COUNT(*)
+1
+drop table t1;
diff --git a/mysql-test/r/ndb_basic.result b/mysql-test/r/ndb_basic.result
index 9477caf97ab..4a1f5f587df 100644
--- a/mysql-test/r/ndb_basic.result
+++ b/mysql-test/r/ndb_basic.result
@@ -6,13 +6,6 @@ attr1 INT NOT NULL,
attr2 INT,
attr3 VARCHAR(10)
) ENGINE=ndbcluster;
-drop table t1;
-CREATE TABLE t1 (
-pk1 INT NOT NULL PRIMARY KEY,
-attr1 INT NOT NULL,
-attr2 INT,
-attr3 VARCHAR(10)
-) ENGINE=ndbcluster;
SHOW INDEX FROM t1;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment
t1 0 PRIMARY 1 pk1 A 0 NULL NULL BTREE
diff --git a/mysql-test/r/ndb_bitfield.result b/mysql-test/r/ndb_bitfield.result
index 13fd31d7e88..9a8c571cfba 100644
--- a/mysql-test/r/ndb_bitfield.result
+++ b/mysql-test/r/ndb_bitfield.result
@@ -201,21 +201,21 @@ create table t1 (
pk1 bit(9) not null primary key,
b int
) engine=ndbcluster;
-ERROR HY000: Can't create table 'test.t1' (errno: 140)
+ERROR HY000: Can't create table 'test.t1' (errno: 906)
show warnings;
Level Code Message
-Error 1296 Got error 739 'Unsupported primary key length' from NDB
-Error 1005 Can't create table 'test.t1' (errno: 140)
+Error 1296 Got error 906 'Unsupported attribute type in index' from NDB
+Error 1005 Can't create table 'test.t1' (errno: 906)
create table t1 (
pk1 int not null primary key,
b bit(9),
key(b)
) engine=ndbcluster;
-ERROR HY000: Can't create table 'test.t1' (errno: 140)
+ERROR HY000: Can't create table 'test.t1' (errno: 906)
show warnings;
Level Code Message
-Error 1296 Got error 743 'Unsupported character set in table or index' from NDB
-Error 1005 Can't create table 'test.t1' (errno: 140)
+Error 1296 Got error 906 'Unsupported attribute type in index' from NDB
+Error 1005 Can't create table 'test.t1' (errno: 906)
create table t1 (
pk1 int primary key,
b bit(32) not null
diff --git a/mysql-test/r/ndb_partition_key.result b/mysql-test/r/ndb_partition_key.result
index 8225fca7a54..503283df532 100644
--- a/mysql-test/r/ndb_partition_key.result
+++ b/mysql-test/r/ndb_partition_key.result
@@ -165,6 +165,20 @@ ENGINE=NDB
PARTITION BY KEY(c3) PARTITIONS 5;
ALTER TABLE t1 COALESCE PARTITION 4;
DROP TABLE t1;
+CREATE TABLE t1 (a int primary key)
+ENGINE=NDB
+PARTITION BY KEY(a);
+ALTER TABLE t1 OPTIMIZE PARTITION p0;
+ERROR HY000: Table storage engine for 't1' doesn't have this option
+ALTER TABLE t1 CHECK PARTITION p0;
+ERROR HY000: Table storage engine for 't1' doesn't have this option
+ALTER TABLE t1 REPAIR PARTITION p0;
+ERROR HY000: Table storage engine for 't1' doesn't have this option
+ALTER TABLE t1 ANALYZE PARTITION p0;
+ERROR HY000: Table storage engine for 't1' doesn't have this option
+ALTER TABLE t1 REBUILD PARTITION p0;
+ERROR HY000: Table storage engine for 't1' doesn't have this option
+DROP TABLE t1;
CREATE TABLE t1 (
c1 MEDIUMINT NOT NULL AUTO_INCREMENT,
c2 TEXT NOT NULL,
diff --git a/mysql-test/r/ndb_restore.result b/mysql-test/r/ndb_restore.result
index 7dc4057e615..c745869daee 100644
--- a/mysql-test/r/ndb_restore.result
+++ b/mysql-test/r/ndb_restore.result
@@ -467,3 +467,116 @@ Create table test/def/t2_c failed: Translate frm error
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
drop table if exists t2_c;
520093696,<the_backup_id>
+DROP DATABASE IF EXISTS BANK;
+CREATE DATABASE BANK default charset=latin1 default collate=latin1_bin;
+USE BANK;
+SHOW TABLES;
+Tables_in_BANK
+ACCOUNT
+GL
+ACCOUNT_TYPE
+TRANSACTION
+SYSTEM_VALUES
+SELECT * FROM GL ORDER BY TIME,ACCOUNT_TYPE;
+TIME ACCOUNT_TYPE BALANCE DEPOSIT_COUNT DEPOSIT_SUM WITHDRAWAL_COUNT WITHDRAWAL_SUM PURGED
+0 0 10000000 0 0 0 0 1
+0 1 30000 0 0 0 0 1
+0 2 20000 0 0 0 0 1
+0 3 20000 0 0 0 0 1
+0 4 20000 0 0 0 0 1
+1 0 10000000 0 0 0 0 1
+1 1 30000 0 0 0 0 1
+1 2 20000 0 0 0 0 1
+1 3 20000 0 0 0 0 1
+1 4 20000 0 0 0 0 1
+2 0 9857062 54 225197 76 368135 1
+2 1 60601 174 822920 181 792319 1
+2 2 68832 117 531214 98 482382 1
+2 3 83550 106 521953 104 458403 1
+2 4 19955 118 532084 110 532129 1
+3 0 9732896 62 289563 88 413729 1
+3 1 51056 202 895888 193 905433 0
+3 2 67183 122 596787 127 598436 1
+3 3 97669 159 761743 141 747624 1
+3 4 141196 140 727808 136 606567 1
+4 0 9616621 138 603930 142 720205 0
+4 1 178927 348 1741521 344 1613650 0
+4 2 52141 236 1169929 232 1184971 0
+4 3 48938 228 1147957 244 1196688 0
+4 4 193373 246 1257982 234 1205805 0
+5 0 9515281 156 726253 166 827593 0
+5 1 253798 597 2840640 545 2765769 0
+5 2 102776 362 1821680 364 1771045 0
+5 3 87349 359 1778652 375 1740241 0
+5 4 130796 351 1727448 375 1790025 0
+SELECT * FROM ACCOUNT ORDER BY ACCOUNT_ID;
+ACCOUNT_ID OWNER BALANCE ACCOUNT_TYPE
+0 0 9531306 0
+1 3001 123844 1
+2 3002 30800 2
+3 3003 3133 3
+4 3004 6524 4
+5 3005 80152 1
+6 3006 107390 1
+7 3007 69448 2
+8 3008 663 3
+9 3009 136740 4
+SELECT COUNT(*) FROM TRANSACTION;
+COUNT(*)
+6649
+SELECT * FROM SYSTEM_VALUES ORDER BY SYSTEM_VALUES_ID;
+SYSTEM_VALUES_ID VALUE
+0 4767
+1 6
+TRUNCATE GL;
+TRUNCATE ACCOUNT;
+TRUNCATE TRANSACTION;
+TRUNCATE SYSTEM_VALUES;
+TRUNCATE ACCOUNT_TYPE;
+SELECT * FROM GL ORDER BY TIME,ACCOUNT_TYPE;
+TIME ACCOUNT_TYPE BALANCE DEPOSIT_COUNT DEPOSIT_SUM WITHDRAWAL_COUNT WITHDRAWAL_SUM PURGED
+0 0 10000000 0 0 0 0 1
+0 1 30000 0 0 0 0 1
+0 2 20000 0 0 0 0 1
+0 3 20000 0 0 0 0 1
+0 4 20000 0 0 0 0 1
+1 0 10000000 0 0 0 0 1
+1 1 30000 0 0 0 0 1
+1 2 20000 0 0 0 0 1
+1 3 20000 0 0 0 0 1
+1 4 20000 0 0 0 0 1
+2 0 10000000 0 0 0 0 1
+2 1 30000 0 0 0 0 1
+2 2 20000 0 0 0 0 1
+2 3 20000 0 0 0 0 1
+2 4 20000 0 0 0 0 1
+3 0 9963591 14 59111 19 95520 0
+3 1 44264 49 255559 53 241295 0
+3 2 25515 39 177806 36 172291 0
+3 3 16779 26 129200 29 132421 0
+3 4 39851 43 182771 34 162920 0
+4 0 9733661 141 632616 162 862546 0
+4 1 63853 426 2005337 415 1985748 0
+4 2 140473 314 1548632 297 1433674 0
+4 3 13481 310 1528043 324 1531341 0
+4 4 138532 316 1540206 309 1441525 0
+SELECT * FROM ACCOUNT ORDER BY ACCOUNT_ID;
+ACCOUNT_ID OWNER BALANCE ACCOUNT_TYPE
+0 0 9679579 0
+1 3001 18130 1
+2 3002 12318 2
+3 3003 3049 3
+4 3004 39517 4
+5 3005 37051 1
+6 3006 144497 1
+7 3007 130670 2
+8 3008 13747 3
+9 3009 11442 4
+SELECT COUNT(*) FROM TRANSACTION;
+COUNT(*)
+4056
+SELECT * FROM SYSTEM_VALUES ORDER BY SYSTEM_VALUES_ID;
+SYSTEM_VALUES_ID VALUE
+0 2297
+1 5
+DROP DATABASE BANK;
diff --git a/mysql-test/r/partition_pruning.result b/mysql-test/r/partition_pruning.result
index b13f0d55e39..405cc3e6e25 100644
--- a/mysql-test/r/partition_pruning.result
+++ b/mysql-test/r/partition_pruning.result
@@ -596,7 +596,7 @@ f_int1
NULL
explain partitions select * from t1 where f_int1 is null;
id select_type table partitions type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 part4_p2sp0 system NULL NULL NULL NULL 1
+1 SIMPLE t1 part4_part4sp0 system NULL NULL NULL NULL 1
drop table t1;
create table t1 (a int not null, b int not null)
partition by list(a)
@@ -625,16 +625,16 @@ insert into t1 values (1,1),(1,2),(1,3),(1,4),
(2,1),(2,2),(2,3),(2,4), (NULL,1);
explain partitions select * from t1 where a IS NULL AND (b=1 OR b=2);
id select_type table partitions type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 pn_p3sp0,pn_p3sp1 system NULL NULL NULL NULL 1
+1 SIMPLE t1 pn_pnsp0,pn_pnsp1 system NULL NULL NULL NULL 1
explain partitions select * from t1 where (a IS NULL or a < 1) AND (b=1 OR b=2);
id select_type table partitions type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 pn_p3sp0,pn_p3sp1 system NULL NULL NULL NULL 1
+1 SIMPLE t1 pn_pnsp0,pn_pnsp1 system NULL NULL NULL NULL 1
explain partitions select * from t1 where (a IS NULL or a < 2) AND (b=1 OR b=2);
id select_type table partitions type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 p0_p0sp0,p0_p0sp1,pn_p3sp0,pn_p3sp1 ALL NULL NULL NULL NULL 5 Using where
+1 SIMPLE t1 p0_p0sp0,p0_p0sp1,pn_pnsp0,pn_pnsp1 ALL NULL NULL NULL NULL 5 Using where
explain partitions select * from t1 where (a IS NULL or a <= 1) AND (b=1 OR b=2);
id select_type table partitions type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 p0_p0sp0,p0_p0sp1,pn_p3sp0,pn_p3sp1 ALL NULL NULL NULL NULL 5 Using where
+1 SIMPLE t1 p0_p0sp0,p0_p0sp1,pn_pnsp0,pn_pnsp1 ALL NULL NULL NULL NULL 5 Using where
drop table t1;
create table t1 ( a int) partition by list (MOD(a, 10))
( partition p0 values in (0), partition p1 values in (1),
diff --git a/mysql-test/r/partition_range.result b/mysql-test/r/partition_range.result
index fc9350f5902..c7257db4910 100644
--- a/mysql-test/r/partition_range.result
+++ b/mysql-test/r/partition_range.result
@@ -363,3 +363,27 @@ SELECT COUNT(*) FROM t1 WHERE c3 < '2000-12-31';
COUNT(*)
10
DROP TABLE t1;
+create table t1 (a int)
+partition by range (MOD(a,3))
+subpartition by hash(a)
+subpartitions 2
+(partition p0 values less than (1),
+partition p1 values less than (2),
+partition p2 values less than (3),
+partition p3 values less than (4));
+ALTER TABLE t1 DROP PARTITION p3;
+ALTER TABLE t1 DROP PARTITION p1;
+ALTER TABLE t1 DROP PARTITION p2;
+drop table t1;
+create table t1 (a int)
+partition by range (MOD(a,3))
+subpartition by hash(a)
+subpartitions 2
+(partition p0 values less than (1),
+partition p1 values less than (2),
+partition p2 values less than (3),
+partition p3 values less than (4));
+ALTER TABLE t1 DROP PARTITION p0;
+ALTER TABLE t1 DROP PARTITION p1;
+ALTER TABLE t1 DROP PARTITION p2;
+drop table t1;
diff --git a/mysql-test/r/ps.result b/mysql-test/r/ps.result
index 6db5e8f8157..3de7ab61e73 100644
--- a/mysql-test/r/ps.result
+++ b/mysql-test/r/ps.result
@@ -108,6 +108,9 @@ set @fvar= 123.4567;
prepare stmt1 from @fvar;
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '123.4567' at line 1
drop table t1,t2;
+deallocate prepare stmt3;
+deallocate prepare stmt4;
+deallocate prepare stmt5;
PREPARE stmt1 FROM "select _utf8 'A' collate utf8_bin = ?";
set @var='A';
EXECUTE stmt1 USING @var;
@@ -253,6 +256,7 @@ set names latin1;
execute `ü`;
1234
1234
+deallocate prepare `ü`;
set names default;
create table t1 (a varchar(10)) charset=utf8;
insert into t1 (a) values ('yahoo');
@@ -781,6 +785,7 @@ EXECUTE b12651;
1
DROP VIEW b12651_V1;
DROP TABLE b12651_T1, b12651_T2;
+DEALLOCATE PREPARE b12651;
prepare stmt from "select @@time_zone";
execute stmt;
@@time_zone
@@ -873,6 +878,130 @@ length(a)
10
drop table t1;
deallocate prepare stmt;
+create table t1 (col1 integer, col2 integer);
+insert into t1 values(100,100),(101,101),(102,102),(103,103);
+prepare stmt from 'select col1, col2 from t1 where (col1, col2) in ((?,?))';
+set @a=100, @b=100;
+execute stmt using @a,@b;
+col1 col2
+100 100
+set @a=101, @b=101;
+execute stmt using @a,@b;
+col1 col2
+101 101
+set @a=102, @b=102;
+execute stmt using @a,@b;
+col1 col2
+102 102
+set @a=102, @b=103;
+execute stmt using @a,@b;
+col1 col2
+deallocate prepare stmt;
+drop table t1;
+set @old_max_prepared_stmt_count= @@max_prepared_stmt_count;
+show variables like 'max_prepared_stmt_count';
+Variable_name Value
+max_prepared_stmt_count 16382
+show variables like 'prepared_stmt_count';
+Variable_name Value
+prepared_stmt_count 0
+select @@max_prepared_stmt_count, @@prepared_stmt_count;
+@@max_prepared_stmt_count @@prepared_stmt_count
+16382 0
+set global max_prepared_stmt_count=-1;
+select @@max_prepared_stmt_count;
+@@max_prepared_stmt_count
+0
+set global max_prepared_stmt_count=10000000000000000;
+select @@max_prepared_stmt_count;
+@@max_prepared_stmt_count
+1048576
+set global max_prepared_stmt_count=default;
+select @@max_prepared_stmt_count;
+@@max_prepared_stmt_count
+16382
+set @@max_prepared_stmt_count=1;
+ERROR HY000: Variable 'max_prepared_stmt_count' is a GLOBAL variable and should be set with SET GLOBAL
+set max_prepared_stmt_count=1;
+ERROR HY000: Variable 'max_prepared_stmt_count' is a GLOBAL variable and should be set with SET GLOBAL
+set local max_prepared_stmt_count=1;
+ERROR HY000: Variable 'max_prepared_stmt_count' is a GLOBAL variable and should be set with SET GLOBAL
+set local prepared_stmt_count=0;
+ERROR HY000: Variable 'prepared_stmt_count' is a read only variable
+set @@prepared_stmt_count=0;
+ERROR HY000: Variable 'prepared_stmt_count' is a read only variable
+set global prepared_stmt_count=1;
+ERROR HY000: Variable 'prepared_stmt_count' is a read only variable
+set global max_prepared_stmt_count=1;
+select @@max_prepared_stmt_count;
+@@max_prepared_stmt_count
+1
+set global max_prepared_stmt_count=0;
+select @@max_prepared_stmt_count, @@prepared_stmt_count;
+@@max_prepared_stmt_count @@prepared_stmt_count
+0 0
+prepare stmt from "select 1";
+ERROR 42000: Can't create more than max_prepared_stmt_count statements (current value: 0)
+select @@prepared_stmt_count;
+@@prepared_stmt_count
+0
+set global max_prepared_stmt_count=1;
+prepare stmt from "select 1";
+select @@prepared_stmt_count;
+@@prepared_stmt_count
+1
+prepare stmt1 from "select 1";
+ERROR 42000: Can't create more than max_prepared_stmt_count statements (current value: 1)
+select @@prepared_stmt_count;
+@@prepared_stmt_count
+1
+deallocate prepare stmt;
+select @@prepared_stmt_count;
+@@prepared_stmt_count
+0
+prepare stmt from "select 1";
+select @@prepared_stmt_count;
+@@prepared_stmt_count
+1
+prepare stmt from "select 2";
+select @@prepared_stmt_count;
+@@prepared_stmt_count
+1
+select @@prepared_stmt_count, @@max_prepared_stmt_count;
+@@prepared_stmt_count @@max_prepared_stmt_count
+1 1
+set global max_prepared_stmt_count=0;
+prepare stmt from "select 1";
+ERROR 42000: Can't create more than max_prepared_stmt_count statements (current value: 0)
+execute stmt;
+ERROR HY000: Unknown prepared statement handler (stmt) given to EXECUTE
+select @@prepared_stmt_count;
+@@prepared_stmt_count
+0
+prepare stmt from "select 1";
+ERROR 42000: Can't create more than max_prepared_stmt_count statements (current value: 0)
+select @@prepared_stmt_count;
+@@prepared_stmt_count
+0
+set global max_prepared_stmt_count=3;
+select @@max_prepared_stmt_count, @@prepared_stmt_count;
+@@max_prepared_stmt_count @@prepared_stmt_count
+3 0
+prepare stmt from "select 1";
+prepare stmt from "select 2";
+prepare stmt1 from "select 3";
+prepare stmt2 from "select 4";
+ERROR 42000: Can't create more than max_prepared_stmt_count statements (current value: 3)
+prepare stmt2 from "select 4";
+ERROR 42000: Can't create more than max_prepared_stmt_count statements (current value: 3)
+select @@max_prepared_stmt_count, @@prepared_stmt_count;
+@@max_prepared_stmt_count @@prepared_stmt_count
+3 3
+deallocate prepare stmt;
+select @@max_prepared_stmt_count, @@prepared_stmt_count;
+@@max_prepared_stmt_count @@prepared_stmt_count
+3 0
+set global max_prepared_stmt_count= @old_max_prepared_stmt_count;
create table t1 (id int);
prepare ins_call from "insert into t1 (id) values (1)";
execute ins_call;
@@ -883,6 +1012,7 @@ drop table t1;
create table t1 (a int, b int);
insert into t1 (a,b) values (2,8),(1,9),(3,7);
prepare stmt from "select * from t1 order by ?";
+set @a=NULL;
execute stmt using @a;
a b
2 8
diff --git a/mysql-test/r/rpl_ndb_2innodb.result b/mysql-test/r/rpl_ndb_2innodb.result
new file mode 100644
index 00000000000..f8ec4624062
--- /dev/null
+++ b/mysql-test/r/rpl_ndb_2innodb.result
@@ -0,0 +1,855 @@
+stop slave;
+drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
+reset master;
+reset slave;
+drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
+start slave;
+SET storage_engine=ndb;
+--- Doing pre test cleanup ---
+DROP TABLE IF EXISTS t1;
+--- Start test 1 Basic testing ---
+--- Create Table Section ---
+CREATE TABLE t1 (id MEDIUMINT NOT NULL, b1 BIT(8), vc VARCHAR(255),
+bc CHAR(255), d DECIMAL(10,4) DEFAULT 0,
+f FLOAT DEFAULT 0, total BIGINT UNSIGNED,
+y YEAR, t DATE,PRIMARY KEY(id));
+--- Show table on master ---
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` mediumint(9) NOT NULL,
+ `b1` bit(8) DEFAULT NULL,
+ `vc` varchar(255) DEFAULT NULL,
+ `bc` char(255) DEFAULT NULL,
+ `d` decimal(10,4) DEFAULT '0.0000',
+ `f` float DEFAULT '0',
+ `total` bigint(20) unsigned DEFAULT NULL,
+ `y` year(4) DEFAULT NULL,
+ `t` date DEFAULT NULL,
+ PRIMARY KEY (`id`)
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY KEY ()
+--- Show table on slave ---
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` mediumint(9) NOT NULL,
+ `b1` bit(8) DEFAULT NULL,
+ `vc` varchar(255) DEFAULT NULL,
+ `bc` char(255) DEFAULT NULL,
+ `d` decimal(10,4) DEFAULT '0.0000',
+ `f` float DEFAULT '0',
+ `total` bigint(20) unsigned DEFAULT NULL,
+ `y` year(4) DEFAULT NULL,
+ `t` date DEFAULT NULL,
+ PRIMARY KEY (`id`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+--- Perform basic operation on master ---
+--- and ensure replicated correctly ---
+"--- Insert into t1 --" as "";
+--- Select from t1 on master ---
+select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id;
+id hex(b1) vc bc d f total y t
+2 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1965-11-14
+4 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1985-11-14
+42 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1905-11-14
+142 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1995-11-14
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2005-11-14
+--- Select from t1 on slave ---
+select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id;
+id hex(b1) vc bc d f total y t
+2 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1965-11-14
+4 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1985-11-14
+42 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1905-11-14
+142 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1995-11-14
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2005-11-14
+--- Update t1 on master --
+UPDATE t1 SET b1 = 0, t="2006-02-22" WHERE id = 412;
+--- Check the update on master ---
+SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412;
+id hex(b1) vc bc d f total y t
+412 0 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2006-02-22
+--- Check Update on slave ---
+SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412;
+id hex(b1) vc bc d f total y t
+412 0 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2006-02-22
+--- Remove a record from t1 on master ---
+DELETE FROM t1 WHERE id = 42;
+--- Show current count on master for t1 ---
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+4
+--- Show current count on slave for t1 ---
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+4
+DELETE FROM t1;
+--- Check that simple Alter statements are replicated correctly --
+ALTER TABLE t1 DROP PRIMARY KEY, ADD PRIMARY KEY(id, total);
+ALTER TABLE t1 MODIFY vc TEXT;
+--- Show the new improved table on the master ---
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` mediumint(9) NOT NULL,
+ `b1` bit(8) DEFAULT NULL,
+ `vc` text,
+ `bc` char(255) DEFAULT NULL,
+ `d` decimal(10,4) DEFAULT '0.0000',
+ `f` float DEFAULT '0',
+ `total` bigint(20) unsigned NOT NULL DEFAULT '0',
+ `y` year(4) DEFAULT NULL,
+ `t` date DEFAULT NULL,
+ PRIMARY KEY (`id`,`total`)
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY KEY ()
+--- Make sure that our tables on slave are still same engine ---
+--- and that the alter statements replicated correctly ---
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` mediumint(9) NOT NULL,
+ `b1` bit(8) DEFAULT NULL,
+ `vc` text,
+ `bc` char(255) DEFAULT NULL,
+ `d` decimal(10,4) DEFAULT '0.0000',
+ `f` float DEFAULT '0',
+ `total` bigint(20) unsigned NOT NULL DEFAULT '0',
+ `y` year(4) DEFAULT NULL,
+ `t` date DEFAULT NULL,
+ PRIMARY KEY (`id`,`total`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+--- Perform basic operation on master ---
+--- and ensure replicated correctly ---
+"--- Insert into t1 --" as "";
+--- Select from t1 on master ---
+select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id;
+id hex(b1) vc bc d f total y t
+2 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1965-11-14
+4 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1985-11-14
+42 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1905-11-14
+142 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1995-11-14
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2005-11-14
+--- Select from t1 on slave ---
+select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id;
+id hex(b1) vc bc d f total y t
+2 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1965-11-14
+4 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1985-11-14
+42 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1905-11-14
+142 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1995-11-14
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2005-11-14
+--- Update t1 on master --
+UPDATE t1 SET b1 = 0, t="2006-02-22" WHERE id = 412;
+--- Check the update on master ---
+SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412;
+id hex(b1) vc bc d f total y t
+412 0 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2006-02-22
+--- Check Update on slave ---
+SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412;
+id hex(b1) vc bc d f total y t
+412 0 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2006-02-22
+--- Remove a record from t1 on master ---
+DELETE FROM t1 WHERE id = 42;
+--- Show current count on master for t1 ---
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+4
+--- Show current count on slave for t1 ---
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+4
+DELETE FROM t1;
+--- End test 1 Basic testing ---
+--- Do Cleanup --
+DROP TABLE IF EXISTS t1;
+--- Start test 2 partition RANGE testing --
+--- Do setup --
+CREATE TABLE t1 (id MEDIUMINT NOT NULL, b1 BIT(8), vc VARCHAR(255),
+bc CHAR(255), d DECIMAL(10,4) DEFAULT 0,
+f FLOAT DEFAULT 0, total BIGINT UNSIGNED,
+y YEAR, t DATE)
+PARTITION BY RANGE (YEAR(t))
+(PARTITION p0 VALUES LESS THAN (1901),
+PARTITION p1 VALUES LESS THAN (1946),
+PARTITION p2 VALUES LESS THAN (1966),
+PARTITION p3 VALUES LESS THAN (1986),
+PARTITION p4 VALUES LESS THAN (2005),
+PARTITION p5 VALUES LESS THAN MAXVALUE);
+--- Show table on master ---
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` mediumint(9) NOT NULL,
+ `b1` bit(8) DEFAULT NULL,
+ `vc` varchar(255) DEFAULT NULL,
+ `bc` char(255) DEFAULT NULL,
+ `d` decimal(10,4) DEFAULT '0.0000',
+ `f` float DEFAULT '0',
+ `total` bigint(20) unsigned DEFAULT NULL,
+ `y` year(4) DEFAULT NULL,
+ `t` date DEFAULT NULL
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY RANGE (YEAR(t)) (PARTITION p0 VALUES LESS THAN (1901) ENGINE = ndbcluster, PARTITION p1 VALUES LESS THAN (1946) ENGINE = ndbcluster, PARTITION p2 VALUES LESS THAN (1966) ENGINE = ndbcluster, PARTITION p3 VALUES LESS THAN (1986) ENGINE = ndbcluster, PARTITION p4 VALUES LESS THAN (2005) ENGINE = ndbcluster, PARTITION p5 VALUES LESS THAN MAXVALUE ENGINE = ndbcluster)
+--- Show table on slave --
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` mediumint(9) NOT NULL,
+ `b1` bit(8) DEFAULT NULL,
+ `vc` varchar(255) DEFAULT NULL,
+ `bc` char(255) DEFAULT NULL,
+ `d` decimal(10,4) DEFAULT '0.0000',
+ `f` float DEFAULT '0',
+ `total` bigint(20) unsigned DEFAULT NULL,
+ `y` year(4) DEFAULT NULL,
+ `t` date DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1 PARTITION BY RANGE (YEAR(t)) (PARTITION p0 VALUES LESS THAN (1901) ENGINE = InnoDB, PARTITION p1 VALUES LESS THAN (1946) ENGINE = InnoDB, PARTITION p2 VALUES LESS THAN (1966) ENGINE = InnoDB, PARTITION p3 VALUES LESS THAN (1986) ENGINE = InnoDB, PARTITION p4 VALUES LESS THAN (2005) ENGINE = InnoDB, PARTITION p5 VALUES LESS THAN MAXVALUE ENGINE = InnoDB)
+--- Perform basic operation on master ---
+--- and ensure replicated correctly ---
+"--- Insert into t1 --" as "";
+--- Select from t1 on master ---
+select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id;
+id hex(b1) vc bc d f total y t
+2 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1965-11-14
+4 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1985-11-14
+42 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1905-11-14
+142 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1995-11-14
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2005-11-14
+--- Select from t1 on slave ---
+select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id;
+id hex(b1) vc bc d f total y t
+2 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1965-11-14
+4 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1985-11-14
+42 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1905-11-14
+142 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1995-11-14
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2005-11-14
+--- Update t1 on master --
+UPDATE t1 SET b1 = 0, t="2006-02-22" WHERE id = 412;
+--- Check the update on master ---
+SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412;
+id hex(b1) vc bc d f total y t
+412 0 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2006-02-22
+--- Check Update on slave ---
+SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412;
+id hex(b1) vc bc d f total y t
+412 0 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2006-02-22
+--- Remove a record from t1 on master ---
+DELETE FROM t1 WHERE id = 42;
+--- Show current count on master for t1 ---
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+4
+--- Show current count on slave for t1 ---
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+4
+DELETE FROM t1;
+--- Check that simple Alter statements are replicated correctly ---
+ALTER TABLE t1 ADD PRIMARY KEY(t,id);
+ALTER TABLE t1 MODIFY vc TEXT;
+--- Show the new improved table on the master ---
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` mediumint(9) NOT NULL,
+ `b1` bit(8) DEFAULT NULL,
+ `vc` text,
+ `bc` char(255) DEFAULT NULL,
+ `d` decimal(10,4) DEFAULT '0.0000',
+ `f` float DEFAULT '0',
+ `total` bigint(20) unsigned DEFAULT NULL,
+ `y` year(4) DEFAULT NULL,
+ `t` date NOT NULL DEFAULT '0000-00-00',
+ PRIMARY KEY (`t`,`id`)
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY RANGE (YEAR(t)) (PARTITION p0 VALUES LESS THAN (1901) ENGINE = ndbcluster, PARTITION p1 VALUES LESS THAN (1946) ENGINE = ndbcluster, PARTITION p2 VALUES LESS THAN (1966) ENGINE = ndbcluster, PARTITION p3 VALUES LESS THAN (1986) ENGINE = ndbcluster, PARTITION p4 VALUES LESS THAN (2005) ENGINE = ndbcluster, PARTITION p5 VALUES LESS THAN MAXVALUE ENGINE = ndbcluster)
+--- Make sure that our tables on slave are still same engine ---
+--- and that the alter statements replicated correctly ---
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` mediumint(9) NOT NULL,
+ `b1` bit(8) DEFAULT NULL,
+ `vc` text,
+ `bc` char(255) DEFAULT NULL,
+ `d` decimal(10,4) DEFAULT '0.0000',
+ `f` float DEFAULT '0',
+ `total` bigint(20) unsigned DEFAULT NULL,
+ `y` year(4) DEFAULT NULL,
+ `t` date NOT NULL DEFAULT '0000-00-00',
+ PRIMARY KEY (`t`,`id`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1 PARTITION BY RANGE (YEAR(t)) (PARTITION p0 VALUES LESS THAN (1901) ENGINE = InnoDB, PARTITION p1 VALUES LESS THAN (1946) ENGINE = InnoDB, PARTITION p2 VALUES LESS THAN (1966) ENGINE = InnoDB, PARTITION p3 VALUES LESS THAN (1986) ENGINE = InnoDB, PARTITION p4 VALUES LESS THAN (2005) ENGINE = InnoDB, PARTITION p5 VALUES LESS THAN MAXVALUE ENGINE = InnoDB)
+--- Perform basic operation on master ---
+--- and ensure replicated correctly ---
+"--- Insert into t1 --" as "";
+--- Select from t1 on master ---
+select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id;
+id hex(b1) vc bc d f total y t
+2 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1965-11-14
+4 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1985-11-14
+42 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1905-11-14
+142 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1995-11-14
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2005-11-14
+--- Select from t1 on slave ---
+select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id;
+id hex(b1) vc bc d f total y t
+2 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1965-11-14
+4 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1985-11-14
+42 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1905-11-14
+142 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1995-11-14
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2005-11-14
+--- Update t1 on master --
+UPDATE t1 SET b1 = 0, t="2006-02-22" WHERE id = 412;
+--- Check the update on master ---
+SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412;
+id hex(b1) vc bc d f total y t
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2006-02-22
+--- Check Update on slave ---
+SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412;
+id hex(b1) vc bc d f total y t
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2006-02-22
+--- Remove a record from t1 on master ---
+DELETE FROM t1 WHERE id = 42;
+--- Show current count on master for t1 ---
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+4
+--- Show current count on slave for t1 ---
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+4
+DELETE FROM t1;
+--- End test 2 partition RANGE testing ---
+--- Do Cleanup ---
+DROP TABLE IF EXISTS t1;
+--- Start test 3 partition LIST testing ---
+--- Do setup ---
+CREATE TABLE t1 (id MEDIUMINT NOT NULL, b1 BIT(8), vc VARCHAR(255),
+bc CHAR(255), d DECIMAL(10,4) DEFAULT 0,
+f FLOAT DEFAULT 0, total BIGINT UNSIGNED,
+y YEAR, t DATE)
+PARTITION BY LIST(id)
+(PARTITION p0 VALUES IN (2, 4),
+PARTITION p1 VALUES IN (42, 142));
+--- Test 3 Alter to add partition ---
+ALTER TABLE t1 ADD PARTITION (PARTITION p2 VALUES IN (412));
+--- Show table on master ---
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` mediumint(9) NOT NULL,
+ `b1` bit(8) DEFAULT NULL,
+ `vc` varchar(255) DEFAULT NULL,
+ `bc` char(255) DEFAULT NULL,
+ `d` decimal(10,4) DEFAULT '0.0000',
+ `f` float DEFAULT '0',
+ `total` bigint(20) unsigned DEFAULT NULL,
+ `y` year(4) DEFAULT NULL,
+ `t` date DEFAULT NULL
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY LIST (id) (PARTITION p0 VALUES IN (2,4) ENGINE = ndbcluster, PARTITION p1 VALUES IN (42,142) ENGINE = ndbcluster, PARTITION p2 VALUES IN (412) ENGINE = ndbcluster)
+--- Show table on slave ---
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` mediumint(9) NOT NULL,
+ `b1` bit(8) DEFAULT NULL,
+ `vc` varchar(255) DEFAULT NULL,
+ `bc` char(255) DEFAULT NULL,
+ `d` decimal(10,4) DEFAULT '0.0000',
+ `f` float DEFAULT '0',
+ `total` bigint(20) unsigned DEFAULT NULL,
+ `y` year(4) DEFAULT NULL,
+ `t` date DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1 PARTITION BY LIST (id) (PARTITION p0 VALUES IN (2,4) ENGINE = InnoDB, PARTITION p1 VALUES IN (42,142) ENGINE = InnoDB, PARTITION p2 VALUES IN (412) ENGINE = InnoDB)
+--- Perform basic operation on master ---
+--- and ensure replicated correctly ---
+"--- Insert into t1 --" as "";
+--- Select from t1 on master ---
+select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id;
+id hex(b1) vc bc d f total y t
+2 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1965-11-14
+4 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1985-11-14
+42 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1905-11-14
+142 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1995-11-14
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2005-11-14
+--- Select from t1 on slave ---
+select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id;
+id hex(b1) vc bc d f total y t
+2 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1965-11-14
+4 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1985-11-14
+42 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1905-11-14
+142 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1995-11-14
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2005-11-14
+--- Update t1 on master --
+UPDATE t1 SET b1 = 0, t="2006-02-22" WHERE id = 412;
+--- Check the update on master ---
+SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412;
+id hex(b1) vc bc d f total y t
+412 0 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2006-02-22
+--- Check Update on slave ---
+SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412;
+id hex(b1) vc bc d f total y t
+412 0 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2006-02-22
+--- Remove a record from t1 on master ---
+DELETE FROM t1 WHERE id = 42;
+--- Show current count on master for t1 ---
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+4
+--- Show current count on slave for t1 ---
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+4
+DELETE FROM t1;
+--- Check that simple Alter statements are replicated correctly ---
+ALTER TABLE t1 ADD PRIMARY KEY(id);
+ALTER TABLE t1 MODIFY vc TEXT;
+--- Show the new improved table on the master ---
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` mediumint(9) NOT NULL,
+ `b1` bit(8) DEFAULT NULL,
+ `vc` text,
+ `bc` char(255) DEFAULT NULL,
+ `d` decimal(10,4) DEFAULT '0.0000',
+ `f` float DEFAULT '0',
+ `total` bigint(20) unsigned DEFAULT NULL,
+ `y` year(4) DEFAULT NULL,
+ `t` date DEFAULT NULL,
+ PRIMARY KEY (`id`)
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY LIST (id) (PARTITION p0 VALUES IN (2,4) ENGINE = ndbcluster, PARTITION p1 VALUES IN (42,142) ENGINE = ndbcluster, PARTITION p2 VALUES IN (412) ENGINE = ndbcluster)
+--- Make sure that our tables on slave are still same engine ---
+--- and that the alter statements replicated correctly ---
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` mediumint(9) NOT NULL,
+ `b1` bit(8) DEFAULT NULL,
+ `vc` text,
+ `bc` char(255) DEFAULT NULL,
+ `d` decimal(10,4) DEFAULT '0.0000',
+ `f` float DEFAULT '0',
+ `total` bigint(20) unsigned DEFAULT NULL,
+ `y` year(4) DEFAULT NULL,
+ `t` date DEFAULT NULL,
+ PRIMARY KEY (`id`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1 PARTITION BY LIST (id) (PARTITION p0 VALUES IN (2,4) ENGINE = InnoDB, PARTITION p1 VALUES IN (42,142) ENGINE = InnoDB, PARTITION p2 VALUES IN (412) ENGINE = InnoDB)
+--- Perform basic operation on master ---
+--- and ensure replicated correctly ---
+"--- Insert into t1 --" as "";
+--- Select from t1 on master ---
+select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id;
+id hex(b1) vc bc d f total y t
+2 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1965-11-14
+4 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1985-11-14
+42 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1905-11-14
+142 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1995-11-14
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2005-11-14
+--- Select from t1 on slave ---
+select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id;
+id hex(b1) vc bc d f total y t
+2 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1965-11-14
+4 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1985-11-14
+42 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1905-11-14
+142 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1995-11-14
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2005-11-14
+--- Update t1 on master --
+UPDATE t1 SET b1 = 0, t="2006-02-22" WHERE id = 412;
+--- Check the update on master ---
+SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412;
+id hex(b1) vc bc d f total y t
+412 0 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2006-02-22
+--- Check Update on slave ---
+SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412;
+id hex(b1) vc bc d f total y t
+412 0 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2006-02-22
+--- Remove a record from t1 on master ---
+DELETE FROM t1 WHERE id = 42;
+--- Show current count on master for t1 ---
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+4
+--- Show current count on slave for t1 ---
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+4
+DELETE FROM t1;
+--- End test 3 partition LIST testing ---
+--- Do Cleanup --
+DROP TABLE IF EXISTS t1;
+--- Start test 4 partition HASH testing ---
+--- Do setup ---
+CREATE TABLE t1 (id MEDIUMINT NOT NULL, b1 BIT(8), vc VARCHAR(255),
+bc CHAR(255), d DECIMAL(10,4) DEFAULT 0,
+f FLOAT DEFAULT 0, total BIGINT UNSIGNED,
+y YEAR, t DATE)
+PARTITION BY HASH( YEAR(t) )
+PARTITIONS 4;
+--- show that tables have been created correctly ---
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` mediumint(9) NOT NULL,
+ `b1` bit(8) DEFAULT NULL,
+ `vc` varchar(255) DEFAULT NULL,
+ `bc` char(255) DEFAULT NULL,
+ `d` decimal(10,4) DEFAULT '0.0000',
+ `f` float DEFAULT '0',
+ `total` bigint(20) unsigned DEFAULT NULL,
+ `y` year(4) DEFAULT NULL,
+ `t` date DEFAULT NULL
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY HASH ( YEAR(t)) PARTITIONS 4
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` mediumint(9) NOT NULL,
+ `b1` bit(8) DEFAULT NULL,
+ `vc` varchar(255) DEFAULT NULL,
+ `bc` char(255) DEFAULT NULL,
+ `d` decimal(10,4) DEFAULT '0.0000',
+ `f` float DEFAULT '0',
+ `total` bigint(20) unsigned DEFAULT NULL,
+ `y` year(4) DEFAULT NULL,
+ `t` date DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1 PARTITION BY HASH ( YEAR(t)) PARTITIONS 4
+--- Perform basic operation on master ---
+--- and ensure replicated correctly ---
+"--- Insert into t1 --" as "";
+--- Select from t1 on master ---
+select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id;
+id hex(b1) vc bc d f total y t
+2 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1965-11-14
+4 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1985-11-14
+42 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1905-11-14
+142 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1995-11-14
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2005-11-14
+--- Select from t1 on slave ---
+select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id;
+id hex(b1) vc bc d f total y t
+2 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1965-11-14
+4 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1985-11-14
+42 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1905-11-14
+142 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1995-11-14
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2005-11-14
+--- Update t1 on master --
+UPDATE t1 SET b1 = 0, t="2006-02-22" WHERE id = 412;
+--- Check the update on master ---
+SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412;
+id hex(b1) vc bc d f total y t
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2006-02-22
+--- Check Update on slave ---
+SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412;
+id hex(b1) vc bc d f total y t
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2006-02-22
+--- Remove a record from t1 on master ---
+DELETE FROM t1 WHERE id = 42;
+--- Show current count on master for t1 ---
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+4
+--- Show current count on slave for t1 ---
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+4
+DELETE FROM t1;
+--- Check that simple Alter statements are replicated correctly ---
+ALTER TABLE t1 ADD PRIMARY KEY(t,id);
+ALTER TABLE t1 MODIFY vc TEXT;
+--- Show the new improved table on the master ---
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` mediumint(9) NOT NULL,
+ `b1` bit(8) DEFAULT NULL,
+ `vc` text,
+ `bc` char(255) DEFAULT NULL,
+ `d` decimal(10,4) DEFAULT '0.0000',
+ `f` float DEFAULT '0',
+ `total` bigint(20) unsigned DEFAULT NULL,
+ `y` year(4) DEFAULT NULL,
+ `t` date NOT NULL DEFAULT '0000-00-00',
+ PRIMARY KEY (`t`,`id`)
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY HASH ( YEAR(t)) PARTITIONS 4
+--- Make sure that our tables on slave are still same engine ---
+--- and that the alter statements replicated correctly ---
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` mediumint(9) NOT NULL,
+ `b1` bit(8) DEFAULT NULL,
+ `vc` text,
+ `bc` char(255) DEFAULT NULL,
+ `d` decimal(10,4) DEFAULT '0.0000',
+ `f` float DEFAULT '0',
+ `total` bigint(20) unsigned DEFAULT NULL,
+ `y` year(4) DEFAULT NULL,
+ `t` date NOT NULL DEFAULT '0000-00-00',
+ PRIMARY KEY (`t`,`id`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1 PARTITION BY HASH ( YEAR(t)) PARTITIONS 4
+--- Perform basic operation on master ---
+--- and ensure replicated correctly ---
+"--- Insert into t1 --" as "";
+--- Select from t1 on master ---
+select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id;
+id hex(b1) vc bc d f total y t
+2 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1965-11-14
+4 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1985-11-14
+42 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1905-11-14
+142 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1995-11-14
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2005-11-14
+--- Select from t1 on slave ---
+select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id;
+id hex(b1) vc bc d f total y t
+2 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1965-11-14
+4 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1985-11-14
+42 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1905-11-14
+142 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1995-11-14
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2005-11-14
+--- Update t1 on master --
+UPDATE t1 SET b1 = 0, t="2006-02-22" WHERE id = 412;
+--- Check the update on master ---
+SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412;
+id hex(b1) vc bc d f total y t
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2006-02-22
+--- Check Update on slave ---
+SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412;
+id hex(b1) vc bc d f total y t
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2006-02-22
+--- Remove a record from t1 on master ---
+DELETE FROM t1 WHERE id = 42;
+--- Show current count on master for t1 ---
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+4
+--- Show current count on slave for t1 ---
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+4
+DELETE FROM t1;
+--- End test 4 partition HASH testing ---
+--- Do Cleanup --
+DROP TABLE IF EXISTS t1;
+--- Start test 5 partition by key testing ---
+--- Create Table Section ---
+CREATE TABLE t1 (id MEDIUMINT NOT NULL, b1 BIT(8), vc VARCHAR(255),
+bc CHAR(255), d DECIMAL(10,4) DEFAULT 0,
+f FLOAT DEFAULT 0, total BIGINT UNSIGNED,
+y YEAR, t DATE,PRIMARY KEY(id))
+PARTITION BY KEY()
+PARTITIONS 4;
+--- Show that tables on master are ndbcluster tables ---
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` mediumint(9) NOT NULL,
+ `b1` bit(8) DEFAULT NULL,
+ `vc` varchar(255) DEFAULT NULL,
+ `bc` char(255) DEFAULT NULL,
+ `d` decimal(10,4) DEFAULT '0.0000',
+ `f` float DEFAULT '0',
+ `total` bigint(20) unsigned DEFAULT NULL,
+ `y` year(4) DEFAULT NULL,
+ `t` date DEFAULT NULL,
+ PRIMARY KEY (`id`)
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY KEY () PARTITIONS 4
+--- Show that tables on slave ---
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` mediumint(9) NOT NULL,
+ `b1` bit(8) DEFAULT NULL,
+ `vc` varchar(255) DEFAULT NULL,
+ `bc` char(255) DEFAULT NULL,
+ `d` decimal(10,4) DEFAULT '0.0000',
+ `f` float DEFAULT '0',
+ `total` bigint(20) unsigned DEFAULT NULL,
+ `y` year(4) DEFAULT NULL,
+ `t` date DEFAULT NULL,
+ PRIMARY KEY (`id`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1 PARTITION BY KEY () PARTITIONS 4
+--- Perform basic operation on master ---
+--- and ensure replicated correctly ---
+"--- Insert into t1 --" as "";
+--- Select from t1 on master ---
+select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id;
+id hex(b1) vc bc d f total y t
+2 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1965-11-14
+4 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1985-11-14
+42 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1905-11-14
+142 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1995-11-14
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2005-11-14
+--- Select from t1 on slave ---
+select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id;
+id hex(b1) vc bc d f total y t
+2 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1965-11-14
+4 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1985-11-14
+42 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1905-11-14
+142 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1995-11-14
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2005-11-14
+--- Update t1 on master --
+UPDATE t1 SET b1 = 0, t="2006-02-22" WHERE id = 412;
+--- Check the update on master ---
+SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412;
+id hex(b1) vc bc d f total y t
+412 0 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2006-02-22
+--- Check Update on slave ---
+SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412;
+id hex(b1) vc bc d f total y t
+412 0 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2006-02-22
+--- Remove a record from t1 on master ---
+DELETE FROM t1 WHERE id = 42;
+--- Show current count on master for t1 ---
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+4
+--- Show current count on slave for t1 ---
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+4
+DELETE FROM t1;
+--- Check that simple Alter statements are replicated correctly ---
+ALTER TABLE t1 DROP PRIMARY KEY, ADD PRIMARY KEY(id, total);
+--- Show the new improved table on the master ---
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` mediumint(9) NOT NULL,
+ `b1` bit(8) DEFAULT NULL,
+ `vc` varchar(255) DEFAULT NULL,
+ `bc` char(255) DEFAULT NULL,
+ `d` decimal(10,4) DEFAULT '0.0000',
+ `f` float DEFAULT '0',
+ `total` bigint(20) unsigned NOT NULL DEFAULT '0',
+ `y` year(4) DEFAULT NULL,
+ `t` date DEFAULT NULL,
+ PRIMARY KEY (`id`,`total`)
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY KEY () PARTITIONS 4
+--- Make sure that our tables on slave are still right type ---
+--- and that the alter statements replicated correctly ---
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` mediumint(9) NOT NULL,
+ `b1` bit(8) DEFAULT NULL,
+ `vc` varchar(255) DEFAULT NULL,
+ `bc` char(255) DEFAULT NULL,
+ `d` decimal(10,4) DEFAULT '0.0000',
+ `f` float DEFAULT '0',
+ `total` bigint(20) unsigned NOT NULL DEFAULT '0',
+ `y` year(4) DEFAULT NULL,
+ `t` date DEFAULT NULL,
+ PRIMARY KEY (`id`,`total`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1 PARTITION BY KEY () PARTITIONS 4
+--- Perform basic operation on master ---
+--- and ensure replicated correctly ---
+"--- Insert into t1 --" as "";
+--- Select from t1 on master ---
+select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id;
+id hex(b1) vc bc d f total y t
+2 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1965-11-14
+4 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1985-11-14
+42 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1905-11-14
+142 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1995-11-14
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2005-11-14
+--- Select from t1 on slave ---
+select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id;
+id hex(b1) vc bc d f total y t
+2 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1965-11-14
+4 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1985-11-14
+42 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1905-11-14
+142 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1995-11-14
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2005-11-14
+--- Update t1 on master --
+UPDATE t1 SET b1 = 0, t="2006-02-22" WHERE id = 412;
+--- Check the update on master ---
+SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412;
+id hex(b1) vc bc d f total y t
+412 0 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2006-02-22
+--- Check Update on slave ---
+SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412;
+id hex(b1) vc bc d f total y t
+412 0 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2006-02-22
+--- Remove a record from t1 on master ---
+DELETE FROM t1 WHERE id = 42;
+--- Show current count on master for t1 ---
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+4
+--- Show current count on slave for t1 ---
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+4
+DELETE FROM t1;
+--- Check that simple Alter statements are replicated correctly ---
+ALTER TABLE t1 MODIFY vc TEXT;
+--- Show the new improved table on the master ---
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` mediumint(9) NOT NULL,
+ `b1` bit(8) DEFAULT NULL,
+ `vc` text,
+ `bc` char(255) DEFAULT NULL,
+ `d` decimal(10,4) DEFAULT '0.0000',
+ `f` float DEFAULT '0',
+ `total` bigint(20) unsigned NOT NULL DEFAULT '0',
+ `y` year(4) DEFAULT NULL,
+ `t` date DEFAULT NULL,
+ PRIMARY KEY (`id`,`total`)
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY KEY () PARTITIONS 4
+--- Make sure that our tables on slave are still same engine ---
+--- and that the alter statements replicated correctly ---
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` mediumint(9) NOT NULL,
+ `b1` bit(8) DEFAULT NULL,
+ `vc` text,
+ `bc` char(255) DEFAULT NULL,
+ `d` decimal(10,4) DEFAULT '0.0000',
+ `f` float DEFAULT '0',
+ `total` bigint(20) unsigned NOT NULL DEFAULT '0',
+ `y` year(4) DEFAULT NULL,
+ `t` date DEFAULT NULL,
+ PRIMARY KEY (`id`,`total`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1 PARTITION BY KEY () PARTITIONS 4
+--- Perform basic operation on master ---
+--- and ensure replicated correctly ---
+"--- Insert into t1 --" as "";
+--- Select from t1 on master ---
+select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id;
+id hex(b1) vc bc d f total y t
+2 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1965-11-14
+4 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1985-11-14
+42 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1905-11-14
+142 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1995-11-14
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2005-11-14
+--- Select from t1 on slave ---
+select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id;
+id hex(b1) vc bc d f total y t
+2 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1965-11-14
+4 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1985-11-14
+42 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1905-11-14
+142 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1995-11-14
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2005-11-14
+--- Update t1 on master --
+UPDATE t1 SET b1 = 0, t="2006-02-22" WHERE id = 412;
+--- Check the update on master ---
+SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412;
+id hex(b1) vc bc d f total y t
+412 0 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2006-02-22
+--- Check Update on slave ---
+SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412;
+id hex(b1) vc bc d f total y t
+412 0 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2006-02-22
+--- Remove a record from t1 on master ---
+DELETE FROM t1 WHERE id = 42;
+--- Show current count on master for t1 ---
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+4
+--- Show current count on slave for t1 ---
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+4
+DELETE FROM t1;
+--- End test 5 key partition testing ---
+--- Do Cleanup ---
+DROP TABLE IF EXISTS t1;
diff --git a/mysql-test/r/rpl_ndb_2myisam.result b/mysql-test/r/rpl_ndb_2myisam.result
new file mode 100644
index 00000000000..00fb2f5455f
--- /dev/null
+++ b/mysql-test/r/rpl_ndb_2myisam.result
@@ -0,0 +1,855 @@
+stop slave;
+drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
+reset master;
+reset slave;
+drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
+start slave;
+SET storage_engine=ndb;
+--- Doing pre test cleanup ---
+DROP TABLE IF EXISTS t1;
+--- Start test 1 Basic testing ---
+--- Create Table Section ---
+CREATE TABLE t1 (id MEDIUMINT NOT NULL, b1 BIT(8), vc VARCHAR(255),
+bc CHAR(255), d DECIMAL(10,4) DEFAULT 0,
+f FLOAT DEFAULT 0, total BIGINT UNSIGNED,
+y YEAR, t DATE,PRIMARY KEY(id));
+--- Show table on master ---
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` mediumint(9) NOT NULL,
+ `b1` bit(8) DEFAULT NULL,
+ `vc` varchar(255) DEFAULT NULL,
+ `bc` char(255) DEFAULT NULL,
+ `d` decimal(10,4) DEFAULT '0.0000',
+ `f` float DEFAULT '0',
+ `total` bigint(20) unsigned DEFAULT NULL,
+ `y` year(4) DEFAULT NULL,
+ `t` date DEFAULT NULL,
+ PRIMARY KEY (`id`)
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY KEY ()
+--- Show table on slave ---
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` mediumint(9) NOT NULL,
+ `b1` bit(8) DEFAULT NULL,
+ `vc` varchar(255) DEFAULT NULL,
+ `bc` char(255) DEFAULT NULL,
+ `d` decimal(10,4) DEFAULT '0.0000',
+ `f` float DEFAULT '0',
+ `total` bigint(20) unsigned DEFAULT NULL,
+ `y` year(4) DEFAULT NULL,
+ `t` date DEFAULT NULL,
+ PRIMARY KEY (`id`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+--- Perform basic operation on master ---
+--- and ensure replicated correctly ---
+"--- Insert into t1 --" as "";
+--- Select from t1 on master ---
+select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id;
+id hex(b1) vc bc d f total y t
+2 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1965-11-14
+4 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1985-11-14
+42 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1905-11-14
+142 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1995-11-14
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2005-11-14
+--- Select from t1 on slave ---
+select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id;
+id hex(b1) vc bc d f total y t
+2 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1965-11-14
+4 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1985-11-14
+42 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1905-11-14
+142 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1995-11-14
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2005-11-14
+--- Update t1 on master --
+UPDATE t1 SET b1 = 0, t="2006-02-22" WHERE id = 412;
+--- Check the update on master ---
+SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412;
+id hex(b1) vc bc d f total y t
+412 0 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2006-02-22
+--- Check Update on slave ---
+SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412;
+id hex(b1) vc bc d f total y t
+412 0 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2006-02-22
+--- Remove a record from t1 on master ---
+DELETE FROM t1 WHERE id = 42;
+--- Show current count on master for t1 ---
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+4
+--- Show current count on slave for t1 ---
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+4
+DELETE FROM t1;
+--- Check that simple Alter statements are replicated correctly --
+ALTER TABLE t1 DROP PRIMARY KEY, ADD PRIMARY KEY(id, total);
+ALTER TABLE t1 MODIFY vc TEXT;
+--- Show the new improved table on the master ---
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` mediumint(9) NOT NULL,
+ `b1` bit(8) DEFAULT NULL,
+ `vc` text,
+ `bc` char(255) DEFAULT NULL,
+ `d` decimal(10,4) DEFAULT '0.0000',
+ `f` float DEFAULT '0',
+ `total` bigint(20) unsigned NOT NULL DEFAULT '0',
+ `y` year(4) DEFAULT NULL,
+ `t` date DEFAULT NULL,
+ PRIMARY KEY (`id`,`total`)
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY KEY ()
+--- Make sure that our tables on slave are still same engine ---
+--- and that the alter statements replicated correctly ---
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` mediumint(9) NOT NULL,
+ `b1` bit(8) DEFAULT NULL,
+ `vc` text,
+ `bc` char(255) DEFAULT NULL,
+ `d` decimal(10,4) DEFAULT '0.0000',
+ `f` float DEFAULT '0',
+ `total` bigint(20) unsigned NOT NULL DEFAULT '0',
+ `y` year(4) DEFAULT NULL,
+ `t` date DEFAULT NULL,
+ PRIMARY KEY (`id`,`total`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+--- Perform basic operation on master ---
+--- and ensure replicated correctly ---
+"--- Insert into t1 --" as "";
+--- Select from t1 on master ---
+select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id;
+id hex(b1) vc bc d f total y t
+2 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1965-11-14
+4 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1985-11-14
+42 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1905-11-14
+142 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1995-11-14
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2005-11-14
+--- Select from t1 on slave ---
+select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id;
+id hex(b1) vc bc d f total y t
+2 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1965-11-14
+4 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1985-11-14
+42 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1905-11-14
+142 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1995-11-14
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2005-11-14
+--- Update t1 on master --
+UPDATE t1 SET b1 = 0, t="2006-02-22" WHERE id = 412;
+--- Check the update on master ---
+SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412;
+id hex(b1) vc bc d f total y t
+412 0 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2006-02-22
+--- Check Update on slave ---
+SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412;
+id hex(b1) vc bc d f total y t
+412 0 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2006-02-22
+--- Remove a record from t1 on master ---
+DELETE FROM t1 WHERE id = 42;
+--- Show current count on master for t1 ---
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+4
+--- Show current count on slave for t1 ---
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+4
+DELETE FROM t1;
+--- End test 1 Basic testing ---
+--- Do Cleanup --
+DROP TABLE IF EXISTS t1;
+--- Start test 2 partition RANGE testing --
+--- Do setup --
+CREATE TABLE t1 (id MEDIUMINT NOT NULL, b1 BIT(8), vc VARCHAR(255),
+bc CHAR(255), d DECIMAL(10,4) DEFAULT 0,
+f FLOAT DEFAULT 0, total BIGINT UNSIGNED,
+y YEAR, t DATE)
+PARTITION BY RANGE (YEAR(t))
+(PARTITION p0 VALUES LESS THAN (1901),
+PARTITION p1 VALUES LESS THAN (1946),
+PARTITION p2 VALUES LESS THAN (1966),
+PARTITION p3 VALUES LESS THAN (1986),
+PARTITION p4 VALUES LESS THAN (2005),
+PARTITION p5 VALUES LESS THAN MAXVALUE);
+--- Show table on master ---
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` mediumint(9) NOT NULL,
+ `b1` bit(8) DEFAULT NULL,
+ `vc` varchar(255) DEFAULT NULL,
+ `bc` char(255) DEFAULT NULL,
+ `d` decimal(10,4) DEFAULT '0.0000',
+ `f` float DEFAULT '0',
+ `total` bigint(20) unsigned DEFAULT NULL,
+ `y` year(4) DEFAULT NULL,
+ `t` date DEFAULT NULL
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY RANGE (YEAR(t)) (PARTITION p0 VALUES LESS THAN (1901) ENGINE = ndbcluster, PARTITION p1 VALUES LESS THAN (1946) ENGINE = ndbcluster, PARTITION p2 VALUES LESS THAN (1966) ENGINE = ndbcluster, PARTITION p3 VALUES LESS THAN (1986) ENGINE = ndbcluster, PARTITION p4 VALUES LESS THAN (2005) ENGINE = ndbcluster, PARTITION p5 VALUES LESS THAN MAXVALUE ENGINE = ndbcluster)
+--- Show table on slave --
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` mediumint(9) NOT NULL,
+ `b1` bit(8) DEFAULT NULL,
+ `vc` varchar(255) DEFAULT NULL,
+ `bc` char(255) DEFAULT NULL,
+ `d` decimal(10,4) DEFAULT '0.0000',
+ `f` float DEFAULT '0',
+ `total` bigint(20) unsigned DEFAULT NULL,
+ `y` year(4) DEFAULT NULL,
+ `t` date DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1 PARTITION BY RANGE (YEAR(t)) (PARTITION p0 VALUES LESS THAN (1901) ENGINE = MyISAM, PARTITION p1 VALUES LESS THAN (1946) ENGINE = MyISAM, PARTITION p2 VALUES LESS THAN (1966) ENGINE = MyISAM, PARTITION p3 VALUES LESS THAN (1986) ENGINE = MyISAM, PARTITION p4 VALUES LESS THAN (2005) ENGINE = MyISAM, PARTITION p5 VALUES LESS THAN MAXVALUE ENGINE = MyISAM)
+--- Perform basic operation on master ---
+--- and ensure replicated correctly ---
+"--- Insert into t1 --" as "";
+--- Select from t1 on master ---
+select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id;
+id hex(b1) vc bc d f total y t
+2 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1965-11-14
+4 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1985-11-14
+42 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1905-11-14
+142 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1995-11-14
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2005-11-14
+--- Select from t1 on slave ---
+select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id;
+id hex(b1) vc bc d f total y t
+2 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1965-11-14
+4 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1985-11-14
+42 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1905-11-14
+142 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1995-11-14
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2005-11-14
+--- Update t1 on master --
+UPDATE t1 SET b1 = 0, t="2006-02-22" WHERE id = 412;
+--- Check the update on master ---
+SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412;
+id hex(b1) vc bc d f total y t
+412 0 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2006-02-22
+--- Check Update on slave ---
+SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412;
+id hex(b1) vc bc d f total y t
+412 0 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2006-02-22
+--- Remove a record from t1 on master ---
+DELETE FROM t1 WHERE id = 42;
+--- Show current count on master for t1 ---
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+4
+--- Show current count on slave for t1 ---
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+4
+DELETE FROM t1;
+--- Check that simple Alter statements are replicated correctly ---
+ALTER TABLE t1 ADD PRIMARY KEY(t,id);
+ALTER TABLE t1 MODIFY vc TEXT;
+--- Show the new improved table on the master ---
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` mediumint(9) NOT NULL,
+ `b1` bit(8) DEFAULT NULL,
+ `vc` text,
+ `bc` char(255) DEFAULT NULL,
+ `d` decimal(10,4) DEFAULT '0.0000',
+ `f` float DEFAULT '0',
+ `total` bigint(20) unsigned DEFAULT NULL,
+ `y` year(4) DEFAULT NULL,
+ `t` date NOT NULL DEFAULT '0000-00-00',
+ PRIMARY KEY (`t`,`id`)
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY RANGE (YEAR(t)) (PARTITION p0 VALUES LESS THAN (1901) ENGINE = ndbcluster, PARTITION p1 VALUES LESS THAN (1946) ENGINE = ndbcluster, PARTITION p2 VALUES LESS THAN (1966) ENGINE = ndbcluster, PARTITION p3 VALUES LESS THAN (1986) ENGINE = ndbcluster, PARTITION p4 VALUES LESS THAN (2005) ENGINE = ndbcluster, PARTITION p5 VALUES LESS THAN MAXVALUE ENGINE = ndbcluster)
+--- Make sure that our tables on slave are still same engine ---
+--- and that the alter statements replicated correctly ---
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` mediumint(9) NOT NULL,
+ `b1` bit(8) DEFAULT NULL,
+ `vc` text,
+ `bc` char(255) DEFAULT NULL,
+ `d` decimal(10,4) DEFAULT '0.0000',
+ `f` float DEFAULT '0',
+ `total` bigint(20) unsigned DEFAULT NULL,
+ `y` year(4) DEFAULT NULL,
+ `t` date NOT NULL DEFAULT '0000-00-00',
+ PRIMARY KEY (`t`,`id`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1 PARTITION BY RANGE (YEAR(t)) (PARTITION p0 VALUES LESS THAN (1901) ENGINE = MyISAM, PARTITION p1 VALUES LESS THAN (1946) ENGINE = MyISAM, PARTITION p2 VALUES LESS THAN (1966) ENGINE = MyISAM, PARTITION p3 VALUES LESS THAN (1986) ENGINE = MyISAM, PARTITION p4 VALUES LESS THAN (2005) ENGINE = MyISAM, PARTITION p5 VALUES LESS THAN MAXVALUE ENGINE = MyISAM)
+--- Perform basic operation on master ---
+--- and ensure replicated correctly ---
+"--- Insert into t1 --" as "";
+--- Select from t1 on master ---
+select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id;
+id hex(b1) vc bc d f total y t
+2 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1965-11-14
+4 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1985-11-14
+42 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1905-11-14
+142 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1995-11-14
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2005-11-14
+--- Select from t1 on slave ---
+select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id;
+id hex(b1) vc bc d f total y t
+2 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1965-11-14
+4 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1985-11-14
+42 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1905-11-14
+142 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1995-11-14
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2005-11-14
+--- Update t1 on master --
+UPDATE t1 SET b1 = 0, t="2006-02-22" WHERE id = 412;
+--- Check the update on master ---
+SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412;
+id hex(b1) vc bc d f total y t
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2006-02-22
+--- Check Update on slave ---
+SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412;
+id hex(b1) vc bc d f total y t
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2006-02-22
+--- Remove a record from t1 on master ---
+DELETE FROM t1 WHERE id = 42;
+--- Show current count on master for t1 ---
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+4
+--- Show current count on slave for t1 ---
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+4
+DELETE FROM t1;
+--- End test 2 partition RANGE testing ---
+--- Do Cleanup ---
+DROP TABLE IF EXISTS t1;
+--- Start test 3 partition LIST testing ---
+--- Do setup ---
+CREATE TABLE t1 (id MEDIUMINT NOT NULL, b1 BIT(8), vc VARCHAR(255),
+bc CHAR(255), d DECIMAL(10,4) DEFAULT 0,
+f FLOAT DEFAULT 0, total BIGINT UNSIGNED,
+y YEAR, t DATE)
+PARTITION BY LIST(id)
+(PARTITION p0 VALUES IN (2, 4),
+PARTITION p1 VALUES IN (42, 142));
+--- Test 3 Alter to add partition ---
+ALTER TABLE t1 ADD PARTITION (PARTITION p2 VALUES IN (412));
+--- Show table on master ---
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` mediumint(9) NOT NULL,
+ `b1` bit(8) DEFAULT NULL,
+ `vc` varchar(255) DEFAULT NULL,
+ `bc` char(255) DEFAULT NULL,
+ `d` decimal(10,4) DEFAULT '0.0000',
+ `f` float DEFAULT '0',
+ `total` bigint(20) unsigned DEFAULT NULL,
+ `y` year(4) DEFAULT NULL,
+ `t` date DEFAULT NULL
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY LIST (id) (PARTITION p0 VALUES IN (2,4) ENGINE = ndbcluster, PARTITION p1 VALUES IN (42,142) ENGINE = ndbcluster, PARTITION p2 VALUES IN (412) ENGINE = ndbcluster)
+--- Show table on slave ---
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` mediumint(9) NOT NULL,
+ `b1` bit(8) DEFAULT NULL,
+ `vc` varchar(255) DEFAULT NULL,
+ `bc` char(255) DEFAULT NULL,
+ `d` decimal(10,4) DEFAULT '0.0000',
+ `f` float DEFAULT '0',
+ `total` bigint(20) unsigned DEFAULT NULL,
+ `y` year(4) DEFAULT NULL,
+ `t` date DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1 PARTITION BY LIST (id) (PARTITION p0 VALUES IN (2,4) ENGINE = MyISAM, PARTITION p1 VALUES IN (42,142) ENGINE = MyISAM, PARTITION p2 VALUES IN (412) ENGINE = MyISAM)
+--- Perform basic operation on master ---
+--- and ensure replicated correctly ---
+"--- Insert into t1 --" as "";
+--- Select from t1 on master ---
+select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id;
+id hex(b1) vc bc d f total y t
+2 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1965-11-14
+4 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1985-11-14
+42 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1905-11-14
+142 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1995-11-14
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2005-11-14
+--- Select from t1 on slave ---
+select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id;
+id hex(b1) vc bc d f total y t
+2 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1965-11-14
+4 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1985-11-14
+42 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1905-11-14
+142 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1995-11-14
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2005-11-14
+--- Update t1 on master --
+UPDATE t1 SET b1 = 0, t="2006-02-22" WHERE id = 412;
+--- Check the update on master ---
+SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412;
+id hex(b1) vc bc d f total y t
+412 0 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2006-02-22
+--- Check Update on slave ---
+SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412;
+id hex(b1) vc bc d f total y t
+412 0 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2006-02-22
+--- Remove a record from t1 on master ---
+DELETE FROM t1 WHERE id = 42;
+--- Show current count on master for t1 ---
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+4
+--- Show current count on slave for t1 ---
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+4
+DELETE FROM t1;
+--- Check that simple Alter statements are replicated correctly ---
+ALTER TABLE t1 ADD PRIMARY KEY(id);
+ALTER TABLE t1 MODIFY vc TEXT;
+--- Show the new improved table on the master ---
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` mediumint(9) NOT NULL,
+ `b1` bit(8) DEFAULT NULL,
+ `vc` text,
+ `bc` char(255) DEFAULT NULL,
+ `d` decimal(10,4) DEFAULT '0.0000',
+ `f` float DEFAULT '0',
+ `total` bigint(20) unsigned DEFAULT NULL,
+ `y` year(4) DEFAULT NULL,
+ `t` date DEFAULT NULL,
+ PRIMARY KEY (`id`)
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY LIST (id) (PARTITION p0 VALUES IN (2,4) ENGINE = ndbcluster, PARTITION p1 VALUES IN (42,142) ENGINE = ndbcluster, PARTITION p2 VALUES IN (412) ENGINE = ndbcluster)
+--- Make sure that our tables on slave are still same engine ---
+--- and that the alter statements replicated correctly ---
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` mediumint(9) NOT NULL,
+ `b1` bit(8) DEFAULT NULL,
+ `vc` text,
+ `bc` char(255) DEFAULT NULL,
+ `d` decimal(10,4) DEFAULT '0.0000',
+ `f` float DEFAULT '0',
+ `total` bigint(20) unsigned DEFAULT NULL,
+ `y` year(4) DEFAULT NULL,
+ `t` date DEFAULT NULL,
+ PRIMARY KEY (`id`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1 PARTITION BY LIST (id) (PARTITION p0 VALUES IN (2,4) ENGINE = MyISAM, PARTITION p1 VALUES IN (42,142) ENGINE = MyISAM, PARTITION p2 VALUES IN (412) ENGINE = MyISAM)
+--- Perform basic operation on master ---
+--- and ensure replicated correctly ---
+"--- Insert into t1 --" as "";
+--- Select from t1 on master ---
+select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id;
+id hex(b1) vc bc d f total y t
+2 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1965-11-14
+4 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1985-11-14
+42 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1905-11-14
+142 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1995-11-14
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2005-11-14
+--- Select from t1 on slave ---
+select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id;
+id hex(b1) vc bc d f total y t
+2 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1965-11-14
+4 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1985-11-14
+42 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1905-11-14
+142 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1995-11-14
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2005-11-14
+--- Update t1 on master --
+UPDATE t1 SET b1 = 0, t="2006-02-22" WHERE id = 412;
+--- Check the update on master ---
+SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412;
+id hex(b1) vc bc d f total y t
+412 0 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2006-02-22
+--- Check Update on slave ---
+SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412;
+id hex(b1) vc bc d f total y t
+412 0 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2006-02-22
+--- Remove a record from t1 on master ---
+DELETE FROM t1 WHERE id = 42;
+--- Show current count on master for t1 ---
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+4
+--- Show current count on slave for t1 ---
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+4
+DELETE FROM t1;
+--- End test 3 partition LIST testing ---
+--- Do Cleanup --
+DROP TABLE IF EXISTS t1;
+--- Start test 4 partition HASH testing ---
+--- Do setup ---
+CREATE TABLE t1 (id MEDIUMINT NOT NULL, b1 BIT(8), vc VARCHAR(255),
+bc CHAR(255), d DECIMAL(10,4) DEFAULT 0,
+f FLOAT DEFAULT 0, total BIGINT UNSIGNED,
+y YEAR, t DATE)
+PARTITION BY HASH( YEAR(t) )
+PARTITIONS 4;
+--- show that tables have been created correctly ---
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` mediumint(9) NOT NULL,
+ `b1` bit(8) DEFAULT NULL,
+ `vc` varchar(255) DEFAULT NULL,
+ `bc` char(255) DEFAULT NULL,
+ `d` decimal(10,4) DEFAULT '0.0000',
+ `f` float DEFAULT '0',
+ `total` bigint(20) unsigned DEFAULT NULL,
+ `y` year(4) DEFAULT NULL,
+ `t` date DEFAULT NULL
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY HASH ( YEAR(t)) PARTITIONS 4
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` mediumint(9) NOT NULL,
+ `b1` bit(8) DEFAULT NULL,
+ `vc` varchar(255) DEFAULT NULL,
+ `bc` char(255) DEFAULT NULL,
+ `d` decimal(10,4) DEFAULT '0.0000',
+ `f` float DEFAULT '0',
+ `total` bigint(20) unsigned DEFAULT NULL,
+ `y` year(4) DEFAULT NULL,
+ `t` date DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1 PARTITION BY HASH ( YEAR(t)) PARTITIONS 4
+--- Perform basic operation on master ---
+--- and ensure replicated correctly ---
+"--- Insert into t1 --" as "";
+--- Select from t1 on master ---
+select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id;
+id hex(b1) vc bc d f total y t
+2 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1965-11-14
+4 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1985-11-14
+42 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1905-11-14
+142 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1995-11-14
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2005-11-14
+--- Select from t1 on slave ---
+select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id;
+id hex(b1) vc bc d f total y t
+2 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1965-11-14
+4 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1985-11-14
+42 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1905-11-14
+142 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1995-11-14
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2005-11-14
+--- Update t1 on master --
+UPDATE t1 SET b1 = 0, t="2006-02-22" WHERE id = 412;
+--- Check the update on master ---
+SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412;
+id hex(b1) vc bc d f total y t
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2006-02-22
+--- Check Update on slave ---
+SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412;
+id hex(b1) vc bc d f total y t
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2006-02-22
+--- Remove a record from t1 on master ---
+DELETE FROM t1 WHERE id = 42;
+--- Show current count on master for t1 ---
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+4
+--- Show current count on slave for t1 ---
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+4
+DELETE FROM t1;
+--- Check that simple Alter statements are replicated correctly ---
+ALTER TABLE t1 ADD PRIMARY KEY(t,id);
+ALTER TABLE t1 MODIFY vc TEXT;
+--- Show the new improved table on the master ---
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` mediumint(9) NOT NULL,
+ `b1` bit(8) DEFAULT NULL,
+ `vc` text,
+ `bc` char(255) DEFAULT NULL,
+ `d` decimal(10,4) DEFAULT '0.0000',
+ `f` float DEFAULT '0',
+ `total` bigint(20) unsigned DEFAULT NULL,
+ `y` year(4) DEFAULT NULL,
+ `t` date NOT NULL DEFAULT '0000-00-00',
+ PRIMARY KEY (`t`,`id`)
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY HASH ( YEAR(t)) PARTITIONS 4
+--- Make sure that our tables on slave are still same engine ---
+--- and that the alter statements replicated correctly ---
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` mediumint(9) NOT NULL,
+ `b1` bit(8) DEFAULT NULL,
+ `vc` text,
+ `bc` char(255) DEFAULT NULL,
+ `d` decimal(10,4) DEFAULT '0.0000',
+ `f` float DEFAULT '0',
+ `total` bigint(20) unsigned DEFAULT NULL,
+ `y` year(4) DEFAULT NULL,
+ `t` date NOT NULL DEFAULT '0000-00-00',
+ PRIMARY KEY (`t`,`id`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1 PARTITION BY HASH ( YEAR(t)) PARTITIONS 4
+--- Perform basic operation on master ---
+--- and ensure replicated correctly ---
+"--- Insert into t1 --" as "";
+--- Select from t1 on master ---
+select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id;
+id hex(b1) vc bc d f total y t
+2 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1965-11-14
+4 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1985-11-14
+42 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1905-11-14
+142 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1995-11-14
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2005-11-14
+--- Select from t1 on slave ---
+select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id;
+id hex(b1) vc bc d f total y t
+2 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1965-11-14
+4 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1985-11-14
+42 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1905-11-14
+142 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1995-11-14
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2005-11-14
+--- Update t1 on master --
+UPDATE t1 SET b1 = 0, t="2006-02-22" WHERE id = 412;
+--- Check the update on master ---
+SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412;
+id hex(b1) vc bc d f total y t
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2006-02-22
+--- Check Update on slave ---
+SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412;
+id hex(b1) vc bc d f total y t
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2006-02-22
+--- Remove a record from t1 on master ---
+DELETE FROM t1 WHERE id = 42;
+--- Show current count on master for t1 ---
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+4
+--- Show current count on slave for t1 ---
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+4
+DELETE FROM t1;
+--- End test 4 partition HASH testing ---
+--- Do Cleanup --
+DROP TABLE IF EXISTS t1;
+--- Start test 5 partition by key testing ---
+--- Create Table Section ---
+CREATE TABLE t1 (id MEDIUMINT NOT NULL, b1 BIT(8), vc VARCHAR(255),
+bc CHAR(255), d DECIMAL(10,4) DEFAULT 0,
+f FLOAT DEFAULT 0, total BIGINT UNSIGNED,
+y YEAR, t DATE,PRIMARY KEY(id))
+PARTITION BY KEY()
+PARTITIONS 4;
+--- Show that tables on master are ndbcluster tables ---
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` mediumint(9) NOT NULL,
+ `b1` bit(8) DEFAULT NULL,
+ `vc` varchar(255) DEFAULT NULL,
+ `bc` char(255) DEFAULT NULL,
+ `d` decimal(10,4) DEFAULT '0.0000',
+ `f` float DEFAULT '0',
+ `total` bigint(20) unsigned DEFAULT NULL,
+ `y` year(4) DEFAULT NULL,
+ `t` date DEFAULT NULL,
+ PRIMARY KEY (`id`)
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY KEY () PARTITIONS 4
+--- Show that tables on slave ---
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` mediumint(9) NOT NULL,
+ `b1` bit(8) DEFAULT NULL,
+ `vc` varchar(255) DEFAULT NULL,
+ `bc` char(255) DEFAULT NULL,
+ `d` decimal(10,4) DEFAULT '0.0000',
+ `f` float DEFAULT '0',
+ `total` bigint(20) unsigned DEFAULT NULL,
+ `y` year(4) DEFAULT NULL,
+ `t` date DEFAULT NULL,
+ PRIMARY KEY (`id`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1 PARTITION BY KEY () PARTITIONS 4
+--- Perform basic operation on master ---
+--- and ensure replicated correctly ---
+"--- Insert into t1 --" as "";
+--- Select from t1 on master ---
+select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id;
+id hex(b1) vc bc d f total y t
+2 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1965-11-14
+4 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1985-11-14
+42 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1905-11-14
+142 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1995-11-14
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2005-11-14
+--- Select from t1 on slave ---
+select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id;
+id hex(b1) vc bc d f total y t
+2 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1965-11-14
+4 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1985-11-14
+42 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1905-11-14
+142 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1995-11-14
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2005-11-14
+--- Update t1 on master --
+UPDATE t1 SET b1 = 0, t="2006-02-22" WHERE id = 412;
+--- Check the update on master ---
+SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412;
+id hex(b1) vc bc d f total y t
+412 0 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2006-02-22
+--- Check Update on slave ---
+SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412;
+id hex(b1) vc bc d f total y t
+412 0 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2006-02-22
+--- Remove a record from t1 on master ---
+DELETE FROM t1 WHERE id = 42;
+--- Show current count on master for t1 ---
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+4
+--- Show current count on slave for t1 ---
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+4
+DELETE FROM t1;
+--- Check that simple Alter statements are replicated correctly ---
+ALTER TABLE t1 DROP PRIMARY KEY, ADD PRIMARY KEY(id, total);
+--- Show the new improved table on the master ---
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` mediumint(9) NOT NULL,
+ `b1` bit(8) DEFAULT NULL,
+ `vc` varchar(255) DEFAULT NULL,
+ `bc` char(255) DEFAULT NULL,
+ `d` decimal(10,4) DEFAULT '0.0000',
+ `f` float DEFAULT '0',
+ `total` bigint(20) unsigned NOT NULL DEFAULT '0',
+ `y` year(4) DEFAULT NULL,
+ `t` date DEFAULT NULL,
+ PRIMARY KEY (`id`,`total`)
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY KEY () PARTITIONS 4
+--- Make sure that our tables on slave are still right type ---
+--- and that the alter statements replicated correctly ---
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` mediumint(9) NOT NULL,
+ `b1` bit(8) DEFAULT NULL,
+ `vc` varchar(255) DEFAULT NULL,
+ `bc` char(255) DEFAULT NULL,
+ `d` decimal(10,4) DEFAULT '0.0000',
+ `f` float DEFAULT '0',
+ `total` bigint(20) unsigned NOT NULL DEFAULT '0',
+ `y` year(4) DEFAULT NULL,
+ `t` date DEFAULT NULL,
+ PRIMARY KEY (`id`,`total`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1 PARTITION BY KEY () PARTITIONS 4
+--- Perform basic operation on master ---
+--- and ensure replicated correctly ---
+"--- Insert into t1 --" as "";
+--- Select from t1 on master ---
+select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id;
+id hex(b1) vc bc d f total y t
+2 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1965-11-14
+4 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1985-11-14
+42 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1905-11-14
+142 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1995-11-14
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2005-11-14
+--- Select from t1 on slave ---
+select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id;
+id hex(b1) vc bc d f total y t
+2 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1965-11-14
+4 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1985-11-14
+42 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1905-11-14
+142 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1995-11-14
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2005-11-14
+--- Update t1 on master --
+UPDATE t1 SET b1 = 0, t="2006-02-22" WHERE id = 412;
+--- Check the update on master ---
+SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412;
+id hex(b1) vc bc d f total y t
+412 0 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2006-02-22
+--- Check Update on slave ---
+SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412;
+id hex(b1) vc bc d f total y t
+412 0 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2006-02-22
+--- Remove a record from t1 on master ---
+DELETE FROM t1 WHERE id = 42;
+--- Show current count on master for t1 ---
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+4
+--- Show current count on slave for t1 ---
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+4
+DELETE FROM t1;
+--- Check that simple Alter statements are replicated correctly ---
+ALTER TABLE t1 MODIFY vc TEXT;
+--- Show the new improved table on the master ---
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` mediumint(9) NOT NULL,
+ `b1` bit(8) DEFAULT NULL,
+ `vc` text,
+ `bc` char(255) DEFAULT NULL,
+ `d` decimal(10,4) DEFAULT '0.0000',
+ `f` float DEFAULT '0',
+ `total` bigint(20) unsigned NOT NULL DEFAULT '0',
+ `y` year(4) DEFAULT NULL,
+ `t` date DEFAULT NULL,
+ PRIMARY KEY (`id`,`total`)
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY KEY () PARTITIONS 4
+--- Make sure that our tables on slave are still same engine ---
+--- and that the alter statements replicated correctly ---
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` mediumint(9) NOT NULL,
+ `b1` bit(8) DEFAULT NULL,
+ `vc` text,
+ `bc` char(255) DEFAULT NULL,
+ `d` decimal(10,4) DEFAULT '0.0000',
+ `f` float DEFAULT '0',
+ `total` bigint(20) unsigned NOT NULL DEFAULT '0',
+ `y` year(4) DEFAULT NULL,
+ `t` date DEFAULT NULL,
+ PRIMARY KEY (`id`,`total`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1 PARTITION BY KEY () PARTITIONS 4
+--- Perform basic operation on master ---
+--- and ensure replicated correctly ---
+"--- Insert into t1 --" as "";
+--- Select from t1 on master ---
+select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id;
+id hex(b1) vc bc d f total y t
+2 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1965-11-14
+4 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1985-11-14
+42 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1905-11-14
+142 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1995-11-14
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2005-11-14
+--- Select from t1 on slave ---
+select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id;
+id hex(b1) vc bc d f total y t
+2 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1965-11-14
+4 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1985-11-14
+42 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1905-11-14
+142 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1995-11-14
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2005-11-14
+--- Update t1 on master --
+UPDATE t1 SET b1 = 0, t="2006-02-22" WHERE id = 412;
+--- Check the update on master ---
+SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412;
+id hex(b1) vc bc d f total y t
+412 0 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2006-02-22
+--- Check Update on slave ---
+SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412;
+id hex(b1) vc bc d f total y t
+412 0 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2006-02-22
+--- Remove a record from t1 on master ---
+DELETE FROM t1 WHERE id = 42;
+--- Show current count on master for t1 ---
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+4
+--- Show current count on slave for t1 ---
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+4
+DELETE FROM t1;
+--- End test 5 key partition testing ---
+--- Do Cleanup ---
+DROP TABLE IF EXISTS t1;
diff --git a/mysql-test/r/rpl_ndb_basic.result b/mysql-test/r/rpl_ndb_basic.result
index 40e3384be3b..b23e5f03f27 100644
--- a/mysql-test/r/rpl_ndb_basic.result
+++ b/mysql-test/r/rpl_ndb_basic.result
@@ -122,3 +122,28 @@ select * from t1 order by nid;
nid nom prenom
1 DEAD ABC1
DROP TABLE t1;
+CREATE TABLE t1 (c1 INT KEY) ENGINE=NDB;
+INSERT INTO t1 VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),(10);
+ALTER TABLE t1 ADD c2 INT;
+SELECT * FROM t1 ORDER BY c1;
+c1 c2
+1 NULL
+2 NULL
+3 NULL
+4 NULL
+5 NULL
+6 NULL
+7 NULL
+8 NULL
+9 NULL
+10 NULL
+ALTER TABLE t1 CHANGE c2 c2 TEXT CHARACTER SET utf8;
+ALTER TABLE t1 CHANGE c2 c2 BLOB;
+SELECT * FROM t1 ORDER BY c1 LIMIT 5;
+c1 c2
+1 NULL
+2 NULL
+3 NULL
+4 NULL
+5 NULL
+DROP TABLE t1;
diff --git a/mysql-test/r/rpl_ndb_dd_partitions.result b/mysql-test/r/rpl_ndb_dd_partitions.result
new file mode 100644
index 00000000000..ece6b84c227
--- /dev/null
+++ b/mysql-test/r/rpl_ndb_dd_partitions.result
@@ -0,0 +1,726 @@
+stop slave;
+drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
+reset master;
+reset slave;
+drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
+start slave;
+--- Doing pre test cleanup ---
+DROP TABLE IF EXISTS t1;
+CREATE LOGFILE GROUP lg1
+ADD UNDOFILE 'undofile.dat'
+INITIAL_SIZE 16M
+UNDO_BUFFER_SIZE = 1M
+ENGINE=NDB;
+ALTER LOGFILE GROUP lg1
+ADD UNDOFILE 'undofile02.dat'
+INITIAL_SIZE = 4M
+ENGINE=NDB;
+CREATE TABLESPACE ts1
+ADD DATAFILE 'datafile.dat'
+USE LOGFILE GROUP lg1
+INITIAL_SIZE 12M
+ENGINE NDB;
+ALTER TABLESPACE ts1
+ADD DATAFILE 'datafile02.dat'
+INITIAL_SIZE = 4M
+ENGINE=NDB;
+--- Start test 2 partition RANGE testing --
+--- Do setup --
+CREATE TABLE t1 (id MEDIUMINT NOT NULL, b1 BIT(8), vc VARCHAR(63),
+bc CHAR(63), d DECIMAL(10,4) DEFAULT 0,
+f FLOAT DEFAULT 0, total BIGINT UNSIGNED,
+y YEAR, t DATE)
+TABLESPACE ts1 STORAGE DISK
+ENGINE=NDB
+PARTITION BY RANGE (YEAR(t))
+(PARTITION p0 VALUES LESS THAN (1901),
+PARTITION p1 VALUES LESS THAN (1946),
+PARTITION p2 VALUES LESS THAN (1966),
+PARTITION p3 VALUES LESS THAN (1986),
+PARTITION p4 VALUES LESS THAN (2005),
+PARTITION p5 VALUES LESS THAN MAXVALUE);
+--- Show table on master ---
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` mediumint(9) NOT NULL,
+ `b1` bit(8) DEFAULT NULL,
+ `vc` varchar(63) DEFAULT NULL,
+ `bc` char(63) DEFAULT NULL,
+ `d` decimal(10,4) DEFAULT '0.0000',
+ `f` float DEFAULT '0',
+ `total` bigint(20) unsigned DEFAULT NULL,
+ `y` year(4) DEFAULT NULL,
+ `t` date DEFAULT NULL
+) TABLESPACE ts1 STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY RANGE (YEAR(t)) (PARTITION p0 VALUES LESS THAN (1901) ENGINE = ndbcluster, PARTITION p1 VALUES LESS THAN (1946) ENGINE = ndbcluster, PARTITION p2 VALUES LESS THAN (1966) ENGINE = ndbcluster, PARTITION p3 VALUES LESS THAN (1986) ENGINE = ndbcluster, PARTITION p4 VALUES LESS THAN (2005) ENGINE = ndbcluster, PARTITION p5 VALUES LESS THAN MAXVALUE ENGINE = ndbcluster)
+--- Show table on slave --
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` mediumint(9) NOT NULL,
+ `b1` bit(8) DEFAULT NULL,
+ `vc` varchar(63) DEFAULT NULL,
+ `bc` char(63) DEFAULT NULL,
+ `d` decimal(10,4) DEFAULT '0.0000',
+ `f` float DEFAULT '0',
+ `total` bigint(20) unsigned DEFAULT NULL,
+ `y` year(4) DEFAULT NULL,
+ `t` date DEFAULT NULL
+) TABLESPACE ts1 STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY RANGE (YEAR(t)) (PARTITION p0 VALUES LESS THAN (1901) ENGINE = ndbcluster, PARTITION p1 VALUES LESS THAN (1946) ENGINE = ndbcluster, PARTITION p2 VALUES LESS THAN (1966) ENGINE = ndbcluster, PARTITION p3 VALUES LESS THAN (1986) ENGINE = ndbcluster, PARTITION p4 VALUES LESS THAN (2005) ENGINE = ndbcluster, PARTITION p5 VALUES LESS THAN MAXVALUE ENGINE = ndbcluster)
+--- Perform basic operation on master ---
+--- and ensure replicated correctly ---
+"--- Insert into t1 --" as "";
+--- Select from t1 on master ---
+select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id;
+id hex(b1) vc bc d f total y t
+2 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1965-11-14
+4 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1985-11-14
+42 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1905-11-14
+142 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1995-11-14
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2005-11-14
+--- Select from t1 on slave ---
+select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id;
+id hex(b1) vc bc d f total y t
+2 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1965-11-14
+4 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1985-11-14
+42 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1905-11-14
+142 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1995-11-14
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2005-11-14
+--- Update t1 on master --
+UPDATE t1 SET b1 = 0, t="2006-02-22" WHERE id = 412;
+--- Check the update on master ---
+SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412;
+id hex(b1) vc bc d f total y t
+412 0 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2006-02-22
+--- Check Update on slave ---
+SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412;
+id hex(b1) vc bc d f total y t
+412 0 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2006-02-22
+--- Remove a record from t1 on master ---
+DELETE FROM t1 WHERE id = 42;
+--- Show current count on master for t1 ---
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+4
+--- Show current count on slave for t1 ---
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+4
+DELETE FROM t1;
+--- Check that simple Alter statements are replicated correctly ---
+ALTER TABLE t1 MODIFY vc VARCHAR(255);
+--- Show the new improved table on the master ---
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` mediumint(9) NOT NULL,
+ `b1` bit(8) DEFAULT NULL,
+ `vc` varchar(255) DEFAULT NULL,
+ `bc` char(63) DEFAULT NULL,
+ `d` decimal(10,4) DEFAULT '0.0000',
+ `f` float DEFAULT '0',
+ `total` bigint(20) unsigned DEFAULT NULL,
+ `y` year(4) DEFAULT NULL,
+ `t` date DEFAULT NULL
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY RANGE (YEAR(t)) (PARTITION p0 VALUES LESS THAN (1901) ENGINE = ndbcluster, PARTITION p1 VALUES LESS THAN (1946) ENGINE = ndbcluster, PARTITION p2 VALUES LESS THAN (1966) ENGINE = ndbcluster, PARTITION p3 VALUES LESS THAN (1986) ENGINE = ndbcluster, PARTITION p4 VALUES LESS THAN (2005) ENGINE = ndbcluster, PARTITION p5 VALUES LESS THAN MAXVALUE ENGINE = ndbcluster)
+--- Make sure that our tables on slave are still same engine ---
+--- and that the alter statements replicated correctly ---
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` mediumint(9) NOT NULL,
+ `b1` bit(8) DEFAULT NULL,
+ `vc` varchar(255) DEFAULT NULL,
+ `bc` char(63) DEFAULT NULL,
+ `d` decimal(10,4) DEFAULT '0.0000',
+ `f` float DEFAULT '0',
+ `total` bigint(20) unsigned DEFAULT NULL,
+ `y` year(4) DEFAULT NULL,
+ `t` date DEFAULT NULL
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY RANGE (YEAR(t)) (PARTITION p0 VALUES LESS THAN (1901) ENGINE = ndbcluster, PARTITION p1 VALUES LESS THAN (1946) ENGINE = ndbcluster, PARTITION p2 VALUES LESS THAN (1966) ENGINE = ndbcluster, PARTITION p3 VALUES LESS THAN (1986) ENGINE = ndbcluster, PARTITION p4 VALUES LESS THAN (2005) ENGINE = ndbcluster, PARTITION p5 VALUES LESS THAN MAXVALUE ENGINE = ndbcluster)
+--- Perform basic operation on master ---
+--- and ensure replicated correctly ---
+"--- Insert into t1 --" as "";
+--- Select from t1 on master ---
+select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id;
+id hex(b1) vc bc d f total y t
+2 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1965-11-14
+4 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1985-11-14
+42 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1905-11-14
+142 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1995-11-14
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2005-11-14
+--- Select from t1 on slave ---
+select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id;
+id hex(b1) vc bc d f total y t
+2 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1965-11-14
+4 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1985-11-14
+42 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1905-11-14
+142 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1995-11-14
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2005-11-14
+--- Update t1 on master --
+UPDATE t1 SET b1 = 0, t="2006-02-22" WHERE id = 412;
+--- Check the update on master ---
+SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412;
+id hex(b1) vc bc d f total y t
+412 0 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2006-02-22
+--- Check Update on slave ---
+SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412;
+id hex(b1) vc bc d f total y t
+412 0 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2006-02-22
+--- Remove a record from t1 on master ---
+DELETE FROM t1 WHERE id = 42;
+--- Show current count on master for t1 ---
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+4
+--- Show current count on slave for t1 ---
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+4
+DELETE FROM t1;
+--- End test 2 partition RANGE testing ---
+--- Do Cleanup ---
+DROP TABLE IF EXISTS t1;
+--- Start test 3 partition LIST testing ---
+--- Do setup ---
+CREATE TABLE t1 (id MEDIUMINT NOT NULL, b1 BIT(8), vc VARCHAR(63),
+bc CHAR(63), d DECIMAL(10,4) DEFAULT 0,
+f FLOAT DEFAULT 0, total BIGINT UNSIGNED,
+y YEAR, t DATE)
+TABLESPACE ts1 STORAGE DISK
+ENGINE=NDB
+PARTITION BY LIST(id)
+(PARTITION p0 VALUES IN (2, 4),
+PARTITION p1 VALUES IN (42, 142));
+--- Test 3 Alter to add partition ---
+ALTER TABLE t1 ADD PARTITION (PARTITION p2 VALUES IN (412));
+--- Show table on master ---
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` mediumint(9) NOT NULL,
+ `b1` bit(8) DEFAULT NULL,
+ `vc` varchar(63) DEFAULT NULL,
+ `bc` char(63) DEFAULT NULL,
+ `d` decimal(10,4) DEFAULT '0.0000',
+ `f` float DEFAULT '0',
+ `total` bigint(20) unsigned DEFAULT NULL,
+ `y` year(4) DEFAULT NULL,
+ `t` date DEFAULT NULL
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY LIST (id) (PARTITION p0 VALUES IN (2,4) ENGINE = ndbcluster, PARTITION p1 VALUES IN (42,142) ENGINE = ndbcluster, PARTITION p2 VALUES IN (412) ENGINE = ndbcluster)
+--- Show table on slave ---
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` mediumint(9) NOT NULL,
+ `b1` bit(8) DEFAULT NULL,
+ `vc` varchar(63) DEFAULT NULL,
+ `bc` char(63) DEFAULT NULL,
+ `d` decimal(10,4) DEFAULT '0.0000',
+ `f` float DEFAULT '0',
+ `total` bigint(20) unsigned DEFAULT NULL,
+ `y` year(4) DEFAULT NULL,
+ `t` date DEFAULT NULL
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY LIST (id) (PARTITION p0 VALUES IN (2,4) ENGINE = ndbcluster, PARTITION p1 VALUES IN (42,142) ENGINE = ndbcluster, PARTITION p2 VALUES IN (412) ENGINE = ndbcluster)
+--- Perform basic operation on master ---
+--- and ensure replicated correctly ---
+"--- Insert into t1 --" as "";
+--- Select from t1 on master ---
+select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id;
+id hex(b1) vc bc d f total y t
+2 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1965-11-14
+4 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1985-11-14
+42 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1905-11-14
+142 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1995-11-14
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2005-11-14
+--- Select from t1 on slave ---
+select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id;
+id hex(b1) vc bc d f total y t
+2 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1965-11-14
+4 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1985-11-14
+42 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1905-11-14
+142 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1995-11-14
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2005-11-14
+--- Update t1 on master --
+UPDATE t1 SET b1 = 0, t="2006-02-22" WHERE id = 412;
+--- Check the update on master ---
+SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412;
+id hex(b1) vc bc d f total y t
+412 0 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2006-02-22
+--- Check Update on slave ---
+SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412;
+id hex(b1) vc bc d f total y t
+412 0 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2006-02-22
+--- Remove a record from t1 on master ---
+DELETE FROM t1 WHERE id = 42;
+--- Show current count on master for t1 ---
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+4
+--- Show current count on slave for t1 ---
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+4
+DELETE FROM t1;
+--- Check that simple Alter statements are replicated correctly ---
+ALTER TABLE t1 MODIFY vc VARCHAR(255);
+--- Show the new improved table on the master ---
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` mediumint(9) NOT NULL,
+ `b1` bit(8) DEFAULT NULL,
+ `vc` varchar(255) DEFAULT NULL,
+ `bc` char(63) DEFAULT NULL,
+ `d` decimal(10,4) DEFAULT '0.0000',
+ `f` float DEFAULT '0',
+ `total` bigint(20) unsigned DEFAULT NULL,
+ `y` year(4) DEFAULT NULL,
+ `t` date DEFAULT NULL
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY LIST (id) (PARTITION p0 VALUES IN (2,4) ENGINE = ndbcluster, PARTITION p1 VALUES IN (42,142) ENGINE = ndbcluster, PARTITION p2 VALUES IN (412) ENGINE = ndbcluster)
+--- Make sure that our tables on slave are still same engine ---
+--- and that the alter statements replicated correctly ---
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` mediumint(9) NOT NULL,
+ `b1` bit(8) DEFAULT NULL,
+ `vc` varchar(255) DEFAULT NULL,
+ `bc` char(63) DEFAULT NULL,
+ `d` decimal(10,4) DEFAULT '0.0000',
+ `f` float DEFAULT '0',
+ `total` bigint(20) unsigned DEFAULT NULL,
+ `y` year(4) DEFAULT NULL,
+ `t` date DEFAULT NULL
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY LIST (id) (PARTITION p0 VALUES IN (2,4) ENGINE = ndbcluster, PARTITION p1 VALUES IN (42,142) ENGINE = ndbcluster, PARTITION p2 VALUES IN (412) ENGINE = ndbcluster)
+--- Perform basic operation on master ---
+--- and ensure replicated correctly ---
+"--- Insert into t1 --" as "";
+--- Select from t1 on master ---
+select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id;
+id hex(b1) vc bc d f total y t
+2 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1965-11-14
+4 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1985-11-14
+42 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1905-11-14
+142 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1995-11-14
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2005-11-14
+--- Select from t1 on slave ---
+select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id;
+id hex(b1) vc bc d f total y t
+2 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1965-11-14
+4 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1985-11-14
+42 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1905-11-14
+142 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1995-11-14
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2005-11-14
+--- Update t1 on master --
+UPDATE t1 SET b1 = 0, t="2006-02-22" WHERE id = 412;
+--- Check the update on master ---
+SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412;
+id hex(b1) vc bc d f total y t
+412 0 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2006-02-22
+--- Check Update on slave ---
+SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412;
+id hex(b1) vc bc d f total y t
+412 0 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2006-02-22
+--- Remove a record from t1 on master ---
+DELETE FROM t1 WHERE id = 42;
+--- Show current count on master for t1 ---
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+4
+--- Show current count on slave for t1 ---
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+4
+DELETE FROM t1;
+--- End test 3 partition LIST testing ---
+--- Do Cleanup --
+DROP TABLE IF EXISTS t1;
+--- Start test 4 partition HASH testing ---
+--- Do setup ---
+CREATE TABLE t1 (id MEDIUMINT NOT NULL, b1 BIT(8), vc VARCHAR(63),
+bc CHAR(63), d DECIMAL(10,4) DEFAULT 0,
+f FLOAT DEFAULT 0, total BIGINT UNSIGNED,
+y YEAR, t DATE)
+TABLESPACE ts1 STORAGE DISK
+ENGINE=NDB
+PARTITION BY HASH( YEAR(t) )
+PARTITIONS 4;
+--- show that tables have been created correctly ---
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` mediumint(9) NOT NULL,
+ `b1` bit(8) DEFAULT NULL,
+ `vc` varchar(63) DEFAULT NULL,
+ `bc` char(63) DEFAULT NULL,
+ `d` decimal(10,4) DEFAULT '0.0000',
+ `f` float DEFAULT '0',
+ `total` bigint(20) unsigned DEFAULT NULL,
+ `y` year(4) DEFAULT NULL,
+ `t` date DEFAULT NULL
+) TABLESPACE ts1 STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY HASH ( YEAR(t)) PARTITIONS 4
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` mediumint(9) NOT NULL,
+ `b1` bit(8) DEFAULT NULL,
+ `vc` varchar(63) DEFAULT NULL,
+ `bc` char(63) DEFAULT NULL,
+ `d` decimal(10,4) DEFAULT '0.0000',
+ `f` float DEFAULT '0',
+ `total` bigint(20) unsigned DEFAULT NULL,
+ `y` year(4) DEFAULT NULL,
+ `t` date DEFAULT NULL
+) TABLESPACE ts1 STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY HASH ( YEAR(t)) PARTITIONS 4
+--- Perform basic operation on master ---
+--- and ensure replicated correctly ---
+"--- Insert into t1 --" as "";
+--- Select from t1 on master ---
+select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id;
+id hex(b1) vc bc d f total y t
+2 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1965-11-14
+4 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1985-11-14
+42 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1905-11-14
+142 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1995-11-14
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2005-11-14
+--- Select from t1 on slave ---
+select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id;
+id hex(b1) vc bc d f total y t
+2 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1965-11-14
+4 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1985-11-14
+42 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1905-11-14
+142 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1995-11-14
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2005-11-14
+--- Update t1 on master --
+UPDATE t1 SET b1 = 0, t="2006-02-22" WHERE id = 412;
+--- Check the update on master ---
+SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412;
+id hex(b1) vc bc d f total y t
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2006-02-22
+--- Check Update on slave ---
+SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412;
+id hex(b1) vc bc d f total y t
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2006-02-22
+--- Remove a record from t1 on master ---
+DELETE FROM t1 WHERE id = 42;
+--- Show current count on master for t1 ---
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+4
+--- Show current count on slave for t1 ---
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+4
+DELETE FROM t1;
+--- Check that simple Alter statements are replicated correctly ---
+ALTER TABLE t1 MODIFY vc VARCHAR(255);
+--- Show the new improved table on the master ---
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` mediumint(9) NOT NULL,
+ `b1` bit(8) DEFAULT NULL,
+ `vc` varchar(255) DEFAULT NULL,
+ `bc` char(63) DEFAULT NULL,
+ `d` decimal(10,4) DEFAULT '0.0000',
+ `f` float DEFAULT '0',
+ `total` bigint(20) unsigned DEFAULT NULL,
+ `y` year(4) DEFAULT NULL,
+ `t` date DEFAULT NULL
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY HASH ( YEAR(t)) PARTITIONS 4
+--- Make sure that our tables on slave are still same engine ---
+--- and that the alter statements replicated correctly ---
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` mediumint(9) NOT NULL,
+ `b1` bit(8) DEFAULT NULL,
+ `vc` varchar(255) DEFAULT NULL,
+ `bc` char(63) DEFAULT NULL,
+ `d` decimal(10,4) DEFAULT '0.0000',
+ `f` float DEFAULT '0',
+ `total` bigint(20) unsigned DEFAULT NULL,
+ `y` year(4) DEFAULT NULL,
+ `t` date DEFAULT NULL
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY HASH ( YEAR(t)) PARTITIONS 4
+--- Perform basic operation on master ---
+--- and ensure replicated correctly ---
+"--- Insert into t1 --" as "";
+--- Select from t1 on master ---
+select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id;
+id hex(b1) vc bc d f total y t
+2 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1965-11-14
+4 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1985-11-14
+42 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1905-11-14
+142 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1995-11-14
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2005-11-14
+--- Select from t1 on slave ---
+select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id;
+id hex(b1) vc bc d f total y t
+2 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1965-11-14
+4 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1985-11-14
+42 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1905-11-14
+142 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1995-11-14
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2005-11-14
+--- Update t1 on master --
+UPDATE t1 SET b1 = 0, t="2006-02-22" WHERE id = 412;
+--- Check the update on master ---
+SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412;
+id hex(b1) vc bc d f total y t
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2006-02-22
+--- Check Update on slave ---
+SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412;
+id hex(b1) vc bc d f total y t
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2006-02-22
+--- Remove a record from t1 on master ---
+DELETE FROM t1 WHERE id = 42;
+--- Show current count on master for t1 ---
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+4
+--- Show current count on slave for t1 ---
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+4
+DELETE FROM t1;
+--- End test 4 partition HASH testing ---
+--- Do Cleanup --
+DROP TABLE IF EXISTS t1;
+--- Start test 5 partition by key testing ---
+--- Create Table Section ---
+CREATE TABLE t1 (id MEDIUMINT NOT NULL, b1 BIT(8), vc VARCHAR(63),
+bc CHAR(63), d DECIMAL(10,4) DEFAULT 0,
+f FLOAT DEFAULT 0, total BIGINT UNSIGNED,
+y YEAR, t DATE,PRIMARY KEY(id))
+TABLESPACE ts1 STORAGE DISK
+ENGINE=NDB
+PARTITION BY KEY()
+PARTITIONS 4;
+--- Show that tables on master are ndbcluster tables ---
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` mediumint(9) NOT NULL,
+ `b1` bit(8) DEFAULT NULL,
+ `vc` varchar(63) DEFAULT NULL,
+ `bc` char(63) DEFAULT NULL,
+ `d` decimal(10,4) DEFAULT '0.0000',
+ `f` float DEFAULT '0',
+ `total` bigint(20) unsigned DEFAULT NULL,
+ `y` year(4) DEFAULT NULL,
+ `t` date DEFAULT NULL,
+ PRIMARY KEY (`id`)
+) TABLESPACE ts1 STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY KEY () PARTITIONS 4
+--- Show that tables on slave ---
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` mediumint(9) NOT NULL,
+ `b1` bit(8) DEFAULT NULL,
+ `vc` varchar(63) DEFAULT NULL,
+ `bc` char(63) DEFAULT NULL,
+ `d` decimal(10,4) DEFAULT '0.0000',
+ `f` float DEFAULT '0',
+ `total` bigint(20) unsigned DEFAULT NULL,
+ `y` year(4) DEFAULT NULL,
+ `t` date DEFAULT NULL,
+ PRIMARY KEY (`id`)
+) TABLESPACE ts1 STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY KEY () PARTITIONS 4
+--- Perform basic operation on master ---
+--- and ensure replicated correctly ---
+"--- Insert into t1 --" as "";
+--- Select from t1 on master ---
+select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id;
+id hex(b1) vc bc d f total y t
+2 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1965-11-14
+4 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1985-11-14
+42 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1905-11-14
+142 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1995-11-14
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2005-11-14
+--- Select from t1 on slave ---
+select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id;
+id hex(b1) vc bc d f total y t
+2 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1965-11-14
+4 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1985-11-14
+42 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1905-11-14
+142 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1995-11-14
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2005-11-14
+--- Update t1 on master --
+UPDATE t1 SET b1 = 0, t="2006-02-22" WHERE id = 412;
+--- Check the update on master ---
+SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412;
+id hex(b1) vc bc d f total y t
+412 0 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2006-02-22
+--- Check Update on slave ---
+SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412;
+id hex(b1) vc bc d f total y t
+412 0 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2006-02-22
+--- Remove a record from t1 on master ---
+DELETE FROM t1 WHERE id = 42;
+--- Show current count on master for t1 ---
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+4
+--- Show current count on slave for t1 ---
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+4
+DELETE FROM t1;
+--- Check that simple Alter statements are replicated correctly ---
+ALTER TABLE t1 DROP PRIMARY KEY, ADD PRIMARY KEY(id, total);
+--- Show the new improved table on the master ---
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` mediumint(9) NOT NULL,
+ `b1` bit(8) DEFAULT NULL,
+ `vc` varchar(63) DEFAULT NULL,
+ `bc` char(63) DEFAULT NULL,
+ `d` decimal(10,4) DEFAULT '0.0000',
+ `f` float DEFAULT '0',
+ `total` bigint(20) unsigned NOT NULL DEFAULT '0',
+ `y` year(4) DEFAULT NULL,
+ `t` date DEFAULT NULL,
+ PRIMARY KEY (`id`,`total`)
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY KEY () PARTITIONS 4
+--- Make sure that our tables on slave are still right type ---
+--- and that the alter statements replicated correctly ---
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` mediumint(9) NOT NULL,
+ `b1` bit(8) DEFAULT NULL,
+ `vc` varchar(63) DEFAULT NULL,
+ `bc` char(63) DEFAULT NULL,
+ `d` decimal(10,4) DEFAULT '0.0000',
+ `f` float DEFAULT '0',
+ `total` bigint(20) unsigned NOT NULL DEFAULT '0',
+ `y` year(4) DEFAULT NULL,
+ `t` date DEFAULT NULL,
+ PRIMARY KEY (`id`,`total`)
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY KEY () PARTITIONS 4
+--- Perform basic operation on master ---
+--- and ensure replicated correctly ---
+"--- Insert into t1 --" as "";
+--- Select from t1 on master ---
+select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id;
+id hex(b1) vc bc d f total y t
+2 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1965-11-14
+4 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1985-11-14
+42 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1905-11-14
+142 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1995-11-14
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2005-11-14
+--- Select from t1 on slave ---
+select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id;
+id hex(b1) vc bc d f total y t
+2 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1965-11-14
+4 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1985-11-14
+42 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1905-11-14
+142 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1995-11-14
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2005-11-14
+--- Update t1 on master --
+UPDATE t1 SET b1 = 0, t="2006-02-22" WHERE id = 412;
+--- Check the update on master ---
+SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412;
+id hex(b1) vc bc d f total y t
+412 0 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2006-02-22
+--- Check Update on slave ---
+SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412;
+id hex(b1) vc bc d f total y t
+412 0 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2006-02-22
+--- Remove a record from t1 on master ---
+DELETE FROM t1 WHERE id = 42;
+--- Show current count on master for t1 ---
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+4
+--- Show current count on slave for t1 ---
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+4
+DELETE FROM t1;
+--- Check that simple Alter statements are replicated correctly ---
+ALTER TABLE t1 MODIFY vc VARCHAR(255);
+--- Show the new improved table on the master ---
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` mediumint(9) NOT NULL,
+ `b1` bit(8) DEFAULT NULL,
+ `vc` varchar(255) DEFAULT NULL,
+ `bc` char(63) DEFAULT NULL,
+ `d` decimal(10,4) DEFAULT '0.0000',
+ `f` float DEFAULT '0',
+ `total` bigint(20) unsigned NOT NULL DEFAULT '0',
+ `y` year(4) DEFAULT NULL,
+ `t` date DEFAULT NULL,
+ PRIMARY KEY (`id`,`total`)
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY KEY () PARTITIONS 4
+--- Make sure that our tables on slave are still same engine ---
+--- and that the alter statements replicated correctly ---
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` mediumint(9) NOT NULL,
+ `b1` bit(8) DEFAULT NULL,
+ `vc` varchar(255) DEFAULT NULL,
+ `bc` char(63) DEFAULT NULL,
+ `d` decimal(10,4) DEFAULT '0.0000',
+ `f` float DEFAULT '0',
+ `total` bigint(20) unsigned NOT NULL DEFAULT '0',
+ `y` year(4) DEFAULT NULL,
+ `t` date DEFAULT NULL,
+ PRIMARY KEY (`id`,`total`)
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY KEY () PARTITIONS 4
+--- Perform basic operation on master ---
+--- and ensure replicated correctly ---
+"--- Insert into t1 --" as "";
+--- Select from t1 on master ---
+select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id;
+id hex(b1) vc bc d f total y t
+2 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1965-11-14
+4 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1985-11-14
+42 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1905-11-14
+142 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1995-11-14
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2005-11-14
+--- Select from t1 on slave ---
+select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id;
+id hex(b1) vc bc d f total y t
+2 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1965-11-14
+4 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1985-11-14
+42 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1905-11-14
+142 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1995-11-14
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2005-11-14
+--- Update t1 on master --
+UPDATE t1 SET b1 = 0, t="2006-02-22" WHERE id = 412;
+--- Check the update on master ---
+SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412;
+id hex(b1) vc bc d f total y t
+412 0 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2006-02-22
+--- Check Update on slave ---
+SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412;
+id hex(b1) vc bc d f total y t
+412 0 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2006-02-22
+--- Remove a record from t1 on master ---
+DELETE FROM t1 WHERE id = 42;
+--- Show current count on master for t1 ---
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+4
+--- Show current count on slave for t1 ---
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+4
+DELETE FROM t1;
+--- End test 5 key partition testing ---
+--- Do Cleanup ---
+DROP TABLE IF EXISTS t1;
+alter tablespace ts1
+drop datafile 'datafile.dat'
+engine=ndb;
+alter tablespace ts1
+drop datafile 'datafile02.dat'
+engine=ndb;
+DROP TABLESPACE ts1 ENGINE=NDB;
+DROP LOGFILE GROUP lg1 ENGINE=NDB;
diff --git a/mysql-test/r/rpl_ndb_relay_space.result b/mysql-test/r/rpl_ndb_relay_space.result
deleted file mode 100644
index 1e25b7fe8c1..00000000000
--- a/mysql-test/r/rpl_ndb_relay_space.result
+++ /dev/null
@@ -1,25 +0,0 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
-SHOW VARIABLES LIKE 'relay_log_space_limit';
-Variable_name Value
-relay_log_space_limit 0
-CREATE TABLE t1 (name varchar(64), age smallint(3))ENGINE=NDB;
-INSERT INTO t1 SET name='Andy', age=31;
-INSERT t1 SET name='Jacob', age=2;
-INSERT into t1 SET name='Caleb', age=1;
-ALTER TABLE t1 ADD id int(8) ZEROFILL AUTO_INCREMENT PRIMARY KEY;
-SELECT * FROM t1 ORDER BY id;
-name age id
-Andy 31 00000001
-Caleb 1 00000002
-Jacob 2 00000003
-SELECT * FROM t1 ORDER BY id;
-name age id
-Andy 31 00000001
-Caleb 1 00000002
-Jacob 2 00000003
-drop table t1;
diff --git a/mysql-test/r/rpl_relay_space_innodb.result b/mysql-test/r/rpl_relay_space_innodb.result
index 80d8c48c241..54aac2eca35 100644
--- a/mysql-test/r/rpl_relay_space_innodb.result
+++ b/mysql-test/r/rpl_relay_space_innodb.result
@@ -8,9 +8,9 @@ SHOW VARIABLES LIKE 'relay_log_space_limit';
Variable_name Value
relay_log_space_limit 0
CREATE TABLE t1 (name varchar(64), age smallint(3))ENGINE=InnoDB;
-INSERT INTO t1 SET name='Andy', age=31;
-INSERT t1 SET name='Jacob', age=2;
-INSERT into t1 SET name='Caleb', age=1;
+INSERT INTO t1 SET name='Andy', age=31;
+INSERT INTO t1 SET name='Jacob', age=2;
+INSERT INTO t1 SET name='Caleb', age=1;
ALTER TABLE t1 ADD id int(8) ZEROFILL AUTO_INCREMENT PRIMARY KEY;
SELECT * FROM t1 ORDER BY id;
name age id
@@ -22,4 +22,4 @@ name age id
Andy 31 00000001
Jacob 2 00000002
Caleb 1 00000003
-drop table t1;
+DROP TABLE t1;
diff --git a/mysql-test/r/rpl_relay_space_myisam.result b/mysql-test/r/rpl_relay_space_myisam.result
index 02bff7ae881..e8d2d63e46e 100644
--- a/mysql-test/r/rpl_relay_space_myisam.result
+++ b/mysql-test/r/rpl_relay_space_myisam.result
@@ -8,9 +8,9 @@ SHOW VARIABLES LIKE 'relay_log_space_limit';
Variable_name Value
relay_log_space_limit 0
CREATE TABLE t1 (name varchar(64), age smallint(3))ENGINE=MyISAM;
-INSERT INTO t1 SET name='Andy', age=31;
-INSERT t1 SET name='Jacob', age=2;
-INSERT into t1 SET name='Caleb', age=1;
+INSERT INTO t1 SET name='Andy', age=31;
+INSERT INTO t1 SET name='Jacob', age=2;
+INSERT INTO t1 SET name='Caleb', age=1;
ALTER TABLE t1 ADD id int(8) ZEROFILL AUTO_INCREMENT PRIMARY KEY;
SELECT * FROM t1 ORDER BY id;
name age id
@@ -22,4 +22,4 @@ name age id
Andy 31 00000001
Jacob 2 00000002
Caleb 1 00000003
-drop table t1;
+DROP TABLE t1;
diff --git a/mysql-test/std_data/ndb_backup50/BACKUP-1-0.1.Data b/mysql-test/std_data/ndb_backup50/BACKUP-1-0.1.Data
new file mode 100644
index 00000000000..32494d5a1e7
--- /dev/null
+++ b/mysql-test/std_data/ndb_backup50/BACKUP-1-0.1.Data
Binary files differ
diff --git a/mysql-test/std_data/ndb_backup50/BACKUP-1-0.2.Data b/mysql-test/std_data/ndb_backup50/BACKUP-1-0.2.Data
new file mode 100644
index 00000000000..2141fb0a6e4
--- /dev/null
+++ b/mysql-test/std_data/ndb_backup50/BACKUP-1-0.2.Data
Binary files differ
diff --git a/mysql-test/std_data/ndb_backup50/BACKUP-1.1.ctl b/mysql-test/std_data/ndb_backup50/BACKUP-1.1.ctl
new file mode 100644
index 00000000000..cbe548e0ca5
--- /dev/null
+++ b/mysql-test/std_data/ndb_backup50/BACKUP-1.1.ctl
Binary files differ
diff --git a/mysql-test/std_data/ndb_backup50/BACKUP-1.1.log b/mysql-test/std_data/ndb_backup50/BACKUP-1.1.log
new file mode 100644
index 00000000000..e4e114d4b46
--- /dev/null
+++ b/mysql-test/std_data/ndb_backup50/BACKUP-1.1.log
Binary files differ
diff --git a/mysql-test/std_data/ndb_backup50/BACKUP-1.2.ctl b/mysql-test/std_data/ndb_backup50/BACKUP-1.2.ctl
new file mode 100644
index 00000000000..cbe548e0ca5
--- /dev/null
+++ b/mysql-test/std_data/ndb_backup50/BACKUP-1.2.ctl
Binary files differ
diff --git a/mysql-test/std_data/ndb_backup50/BACKUP-1.2.log b/mysql-test/std_data/ndb_backup50/BACKUP-1.2.log
new file mode 100644
index 00000000000..a1c89b7015c
--- /dev/null
+++ b/mysql-test/std_data/ndb_backup50/BACKUP-1.2.log
Binary files differ
diff --git a/mysql-test/std_data/ndb_backup51/BACKUP-1-0.1.Data b/mysql-test/std_data/ndb_backup51/BACKUP-1-0.1.Data
new file mode 100644
index 00000000000..22b99ddc18d
--- /dev/null
+++ b/mysql-test/std_data/ndb_backup51/BACKUP-1-0.1.Data
Binary files differ
diff --git a/mysql-test/std_data/ndb_backup51/BACKUP-1-0.2.Data b/mysql-test/std_data/ndb_backup51/BACKUP-1-0.2.Data
new file mode 100644
index 00000000000..159cff93053
--- /dev/null
+++ b/mysql-test/std_data/ndb_backup51/BACKUP-1-0.2.Data
Binary files differ
diff --git a/mysql-test/std_data/ndb_backup51/BACKUP-1.1.ctl b/mysql-test/std_data/ndb_backup51/BACKUP-1.1.ctl
new file mode 100644
index 00000000000..f814027a22b
--- /dev/null
+++ b/mysql-test/std_data/ndb_backup51/BACKUP-1.1.ctl
Binary files differ
diff --git a/mysql-test/std_data/ndb_backup51/BACKUP-1.1.log b/mysql-test/std_data/ndb_backup51/BACKUP-1.1.log
new file mode 100644
index 00000000000..58d42117eb5
--- /dev/null
+++ b/mysql-test/std_data/ndb_backup51/BACKUP-1.1.log
Binary files differ
diff --git a/mysql-test/std_data/ndb_backup51/BACKUP-1.2.ctl b/mysql-test/std_data/ndb_backup51/BACKUP-1.2.ctl
new file mode 100644
index 00000000000..f814027a22b
--- /dev/null
+++ b/mysql-test/std_data/ndb_backup51/BACKUP-1.2.ctl
Binary files differ
diff --git a/mysql-test/std_data/ndb_backup51/BACKUP-1.2.log b/mysql-test/std_data/ndb_backup51/BACKUP-1.2.log
new file mode 100644
index 00000000000..a0f9b792028
--- /dev/null
+++ b/mysql-test/std_data/ndb_backup51/BACKUP-1.2.log
Binary files differ
diff --git a/mysql-test/t/disabled.def b/mysql-test/t/disabled.def
index c0827e09990..8766e9f3f6a 100644
--- a/mysql-test/t/disabled.def
+++ b/mysql-test/t/disabled.def
@@ -21,16 +21,15 @@ ndb_cache_multi2 : BUG#18597 2006-04-10 kent simultaneous drop table an
partition_03ndb : BUG#16385 2006-03-24 mikael Partitions: crash when updating a range partitioned NDB table
ps_7ndb : BUG#18950 2006-02-16 jmiller create table like does not obtain LOCK_open
rpl_deadlock_innodb : BUG#16920 2006-04-12 kent fails in show slave status (randomly)
-rpl_ndb_2innodb : BUG#19004 2006-03-22 tomas ndb: partition by range and update hangs
-rpl_ndb_2myisam : BUG#19004 2006-03-22 tomas ndb: partition by range and update hangs
+rpl_ndb_2innodb : BUG#19227 2006-04-20 pekka pk delete apparently not replicated
+rpl_ndb_2myisam : BUG#19227 2006-04-20 pekka pk delete apparently not replicated
rpl_ndb_auto_inc : BUG#17086 2006-02-16 jmiller CR: auto_increment_increment and auto_increment_offset produce duplicate key er
-rpl_ndb_ddl : result file needs update + test needs to checked
-rpl_ndb_innodb2ndb : BUG#18094 2006-03-16 mats Slave caches invalid table definition after atlters causes select failure
+rpl_ndb_dd_partitions : BUG#19259 2006-04-21 rpl_ndb_dd_partitions fails on solaris
+rpl_ndb_ddl : BUG#18946 result file needs update + test needs to checked
+rpl_ndb_innodb2ndb : BUG#17400 2006-04-19 tomas Cluster Replication: delete & update of rows in table without pk fails on slave.
rpl_ndb_log : BUG#18947 2006-03-21 tomas CRBR: order in binlog of create table and insert (on different table) not determ
-rpl_ndb_myisam2ndb : BUG#18094 2006-03-16 mats Slave caches invalid table definition after atlters causes select failure
-rpl_ndb_relay_space : BUG#16993 2006-02-16 jmiller RBR: ALTER TABLE ZEROFILL AUTO_INCREMENT is not replicated correctly
+rpl_ndb_myisam2ndb : BUG#17400 2006-04-19 tomas Cluster Replication: delete & update of rows in table without pk fails on slave.
rpl_switch_stm_row_mixed : BUG#18590 2006-03-28 brian
-rpl_row_basic_7ndb : BUG#17400 2006-04-09 brian Cluster Replication: delete & update of rows in table without pk fails on slave.
rpl_row_blob_innodb : BUG#18980 2006-04-10 kent Test fails randomly
rpl_row_func003 : BUG#19074 2006-13-04 andrei test failed
rpl_row_inexist_tbl : BUG#18948 2006-03-09 mats Disabled since patch makes this test wait forever
@@ -42,14 +41,3 @@ udf : BUG#18564 2006-03-27 ian (Permission by Brian)
# the below testcase have been reworked to avoid the bug, test contains comment, keep bug open
#ndb_binlog_ddl_multi : BUG#18976 2006-04-10 kent CRBR: multiple binlog, second binlog may miss schema log events
-
-# the below ndb failures have not been objerved for > 5 push builds, close bugs
-#ndb_gis : BUG#18600 2006-03-28 brian ndb_gis test failure
-#ndb_load : BUG#17233 2006-02-16 jmiller failed load data from infile causes mysqld dbug_assert, binlog not flushed
-#rpl_ndb_basic : BUG#18592 2006-03-28 brian rpl_ndb_basic failure
-#rpl_ndb_dd_advance : BUG#18924 2006-04-09 brian rpl_ndb_dd_advance failure
-#rpl_ndb_dd_basic : BUG#18569 2006-03-28 brian rpl_ndb_dd_basic failure
-#rpl_ndb_insert_ignore : BUG#18567 2006-03-28 brian rpl_ndb_insert_ignore failure
-#rpl_ndb_multi_update2 : BUG#18928 2006-04-09 brian rpl_ndb_multi_update2 failed
-#rpl_ndb_multi_update3 : BUG#18627 2006-03-29 monty Cluster Replication: rpl_ndb_multi_update3 fails on Intel 64 bit
-#rpl_ndb_trig004 : BUG#18977 2006-04-10 kent Test fails randomly
diff --git a/mysql-test/t/func_time.test b/mysql-test/t/func_time.test
index c98c1c94609..4e4fb8f777a 100644
--- a/mysql-test/t/func_time.test
+++ b/mysql-test/t/func_time.test
@@ -446,4 +446,26 @@ select timestampdiff(year,'1999-09-11','2001-9-11');
select timestampdiff(year,'2004-02-28','2005-02-28');
select timestampdiff(year,'2004-02-29','2005-02-28');
+#
+# Bug #18618: BETWEEN for dates with the second argument being a constant
+# expression and the first and the third arguments being fields
+#
+
+CREATE TABLE t1 (id int NOT NULL PRIMARY KEY, day date);
+CREATE TABLE t2 (id int NOT NULL PRIMARY KEY, day date);
+
+INSERT INTO t1 VALUES
+ (1, '2005-06-01'), (2, '2005-02-01'), (3, '2005-07-01');
+INSERT INTO t2 VALUES
+ (1, '2005-08-01'), (2, '2005-06-15'), (3, '2005-07-15');
+
+SELECT * FROM t1, t2
+ WHERE t1.day BETWEEN
+ '2005.09.01' - INTERVAL 6 MONTH AND t2.day;
+SELECT * FROM t1, t2
+ WHERE CAST(t1.day AS DATE) BETWEEN
+ '2005.09.01' - INTERVAL 6 MONTH AND t2.day;
+
+DROP TABLE t1,t2;
+
# End of 5.0 tests
diff --git a/mysql-test/t/index_merge_innodb.test b/mysql-test/t/index_merge_innodb.test
index a48626a9ec3..25f4e0b4e65 100644
--- a/mysql-test/t/index_merge_innodb.test
+++ b/mysql-test/t/index_merge_innodb.test
@@ -248,3 +248,55 @@ select t_vers,t_rele,t_cust,filler1 from t1 where t_vers = '7.6'
drop table t1;
+# BUG#19021: Crash in index_merge/ROR-intersection optimizer under
+# specific circumstances.
+create table t1 (
+ pk int(11) not null auto_increment,
+ a int(11) not null default '0',
+ b int(11) not null default '0',
+ c int(11) not null default '0',
+
+ filler1 datetime, filler2 varchar(15),
+ filler3 longtext,
+
+ kp1 varchar(4), kp2 varchar(7),
+ kp3 varchar(2), kp4 varchar(4),
+ kp5 varchar(7),
+ filler4 char(1),
+
+ primary key (pk),
+ key idx1(a,b,c),
+ key idx2(c),
+ key idx3(kp1,kp2,kp3,kp4,kp5)
+) engine=innodb default charset=latin1;
+--disable_query_log
+set @fill= uncompress(unhex(concat(
+'F91D0000789CDD993D6FDB301086F7FE0A6D4E0105B8E3F1335D5BA028DA0EEDE28E1D320408',
+'52A0713BF4D7571FB62C51A475924839080307B603E77DEE787C8FA41F9E9EEF7F1F8A87A7C3',
+'AFE280C5DF9F8F7FEE9F8B1B2CB114D6902E918455245DB91300FA16E42D5201FA4EE29DA05D',
+'B9FB3718A33718A3FA8C30AEFAFDE1F317D016AA67BA7A60FDE45BF5F8BA7B5BDE8812AA9F1A',
+'069DB03C9804346644F3A3A6A1338DB572756A3C4D1BCC804CABF912C654AE9BB855A2B85962',
+'3A479259CAE6A86C0411D01AE5483581EDCBD9A39C45252D532E533979EB9F82E971D979BDB4',
+'8531105670740AFBFD1E34AAB0029E4AD0A1D46A6D0946A21A16038A5CD965CD2D524673F712',
+'20C304477315CE18405EAF9BD0AFFEAC74FDA14F1FBF5BD34C769D73FBBEDF4750ADD4E5A99C',
+'5C8DC04934AFA275D483D536D174C11B12AF27F8F888B41B6FC9DBA569E1FD7BD72D698130B7',
+'91B23A98803512B3D31881E8DCDA2AC1754E3644C4BB3A8466750B911681274A39E35E8624B7',
+'444A42AC1213F354758E3CF1A4CDD5A688C767CF1B11ABC5867CB15D8A18E0B91E9EC275BB94',
+'58F33C2936F64690D55BC29E4A293D95A798D84217736CEAAA538CE1354269EE2162053FBC66',
+'496D90CB53323CB279D3A6AF651B4B22B9E430743D83BE48E995A09D4FC9871C22D8D189B945',
+'706911BCB8C3C774B9C08D2FC6ED853ADACA37A14A4CB2E027630E5B80ECACD939431B1CDF62',
+'7D71487536EA2C678F59685E91F4B6C144BCCB94C1EBA9FA6F5552DDCA4E4539BE326A2720CB',
+'45ED028EB3616AC93C46E775FEA9FA6DA7CFCEC6DEBA5FCD1F915EED4D983BDDB881528AD9AB',
+'43C1576F29AAB35BDFBC21D422F52B307D350589D45225A887AC46C8EDD72D99EC3ED2E1BCEF',
+'7AF26FC4C74097B6768A5EDAFA660CC64278F7E63F99AC954B')));
+prepare x from @fill;
+execute x;
+deallocate prepare x;
+--enable_query_log
+set @fill=NULL;
+SELECT COUNT(*) FROM t1 WHERE b = 0 AND a = 0 AND c = 13286427 AND
+ kp1='279' AND kp2='ELM0678' AND kp3='6' AND kp4='10' AND kp5 = 'R ';
+
+drop table t1;
+
+
diff --git a/mysql-test/t/innodb.test b/mysql-test/t/innodb.test
index bcd80c18326..55316988bdb 100644
--- a/mysql-test/t/innodb.test
+++ b/mysql-test/t/innodb.test
@@ -2113,6 +2113,18 @@ disconnect a;
disconnect b;
#
+# Bug #14360: problem with intervals
+#
+
+create table t1(a date) engine=innodb;
+create table t2(a date, key(a)) engine=innodb;
+insert into t1 values('2005-10-01');
+insert into t2 values('2005-10-01');
+select * from t1, t2
+ where t2.a between t1.a - interval 2 day and t1.a + interval 2 day;
+drop table t1, t2;
+
+#
# Test that cascading updates leading to duplicate keys give the correct
# error message (bug #9680)
#
@@ -2164,7 +2176,6 @@ alter table t1 drop foreign key c2_fk;
show create table t1;
#
drop table t1, t2;
-
#
# Bug #14360: problem with intervals
#
diff --git a/mysql-test/t/ndb_basic.test b/mysql-test/t/ndb_basic.test
index df94545abea..ab62e3dd13a 100644
--- a/mysql-test/t/ndb_basic.test
+++ b/mysql-test/t/ndb_basic.test
@@ -6,17 +6,6 @@ DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7;
drop database if exists mysqltest;
--enable_warnings
-# workaround for bug#16445
-# remove to reproduce bug and run tests drom ndb start
-# and with ndb_autodiscover disabled
-CREATE TABLE t1 (
- pk1 INT NOT NULL PRIMARY KEY,
- attr1 INT NOT NULL,
- attr2 INT,
- attr3 VARCHAR(10)
-) ENGINE=ndbcluster;
-drop table t1;
-
#
# Basic test to show that the NDB
# table handler is working
diff --git a/mysql-test/t/ndb_partition_key.test b/mysql-test/t/ndb_partition_key.test
index 87933671529..ce939663ab8 100644
--- a/mysql-test/t/ndb_partition_key.test
+++ b/mysql-test/t/ndb_partition_key.test
@@ -155,6 +155,24 @@ ALTER TABLE t1 COALESCE PARTITION 4;
DROP TABLE t1;
#
+# Bug 16822: OPTIMIZE TABLE hangs test
+#
+CREATE TABLE t1 (a int primary key)
+ENGINE=NDB
+PARTITION BY KEY(a);
+--error 1031
+ALTER TABLE t1 OPTIMIZE PARTITION p0;
+--error 1031
+ALTER TABLE t1 CHECK PARTITION p0;
+--error 1031
+ALTER TABLE t1 REPAIR PARTITION p0;
+--error 1031
+ALTER TABLE t1 ANALYZE PARTITION p0;
+--error 1031
+ALTER TABLE t1 REBUILD PARTITION p0;
+DROP TABLE t1;
+
+#
# BUG 16806: ALTER TABLE fails
#
CREATE TABLE t1 (
diff --git a/mysql-test/t/ndb_restore.test b/mysql-test/t/ndb_restore.test
index f11324492c2..01fdb2ecc5a 100644
--- a/mysql-test/t/ndb_restore.test
+++ b/mysql-test/t/ndb_restore.test
@@ -373,3 +373,38 @@ drop table if exists t2_c;
--exec $NDB_TOOLS_DIR/ndb_select_all --no-defaults -d sys -D , SYSTAB_0 | grep 520093696, | sed "s/,$the_backup_id/,<the_backup_id>/"
# End of 4.1 tests
+
+#
+# Bug #18594 ndb_restore log boken in 5.1
+#
+
+--disable_warnings
+DROP DATABASE IF EXISTS BANK;
+--enable_warnings
+CREATE DATABASE BANK default charset=latin1 default collate=latin1_bin;
+USE BANK;
+--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults -b 1 -n 1 -p 1 -m -r $MYSQL_TEST_DIR/std_data/ndb_backup51 >> $NDB_TOOLS_OUTPUT
+--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults -b 1 -n 2 -p 1 -r $MYSQL_TEST_DIR/std_data/ndb_backup51 >> $NDB_TOOLS_OUTPUT
+SHOW TABLES;
+SELECT * FROM GL ORDER BY TIME,ACCOUNT_TYPE;
+SELECT * FROM ACCOUNT ORDER BY ACCOUNT_ID;
+SELECT COUNT(*) FROM TRANSACTION;
+SELECT * FROM SYSTEM_VALUES ORDER BY SYSTEM_VALUES_ID;
+
+#
+# verify restore of 5.0 backup
+# here we must use the already created tables as restoring the old
+# table definitions will not work
+#
+TRUNCATE GL;
+TRUNCATE ACCOUNT;
+TRUNCATE TRANSACTION;
+TRUNCATE SYSTEM_VALUES;
+TRUNCATE ACCOUNT_TYPE;
+--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults -b 1 -n 1 -p 1 -r $MYSQL_TEST_DIR/std_data/ndb_backup50 >> $NDB_TOOLS_OUTPUT
+--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults -b 1 -n 2 -p 1 -r $MYSQL_TEST_DIR/std_data/ndb_backup50 >> $NDB_TOOLS_OUTPUT
+SELECT * FROM GL ORDER BY TIME,ACCOUNT_TYPE;
+SELECT * FROM ACCOUNT ORDER BY ACCOUNT_ID;
+SELECT COUNT(*) FROM TRANSACTION;
+SELECT * FROM SYSTEM_VALUES ORDER BY SYSTEM_VALUES_ID;
+DROP DATABASE BANK;
diff --git a/mysql-test/t/partition_range.test b/mysql-test/t/partition_range.test
index a4d8c3740b7..ef539e2001f 100644
--- a/mysql-test/t/partition_range.test
+++ b/mysql-test/t/partition_range.test
@@ -388,3 +388,31 @@ SELECT COUNT(*) FROM t1 WHERE c3 BETWEEN '1996-12-31' AND '2000-12-31';
SELECT COUNT(*) FROM t1 WHERE c3 < '2000-12-31';
DROP TABLE t1;
+#
+# BUG 18962 Errors in DROP PARTITION
+#
+create table t1 (a int)
+partition by range (MOD(a,3))
+subpartition by hash(a)
+subpartitions 2
+(partition p0 values less than (1),
+ partition p1 values less than (2),
+ partition p2 values less than (3),
+ partition p3 values less than (4));
+ALTER TABLE t1 DROP PARTITION p3;
+ALTER TABLE t1 DROP PARTITION p1;
+ALTER TABLE t1 DROP PARTITION p2;
+drop table t1;
+
+create table t1 (a int)
+partition by range (MOD(a,3))
+subpartition by hash(a)
+subpartitions 2
+(partition p0 values less than (1),
+ partition p1 values less than (2),
+ partition p2 values less than (3),
+ partition p3 values less than (4));
+ALTER TABLE t1 DROP PARTITION p0;
+ALTER TABLE t1 DROP PARTITION p1;
+ALTER TABLE t1 DROP PARTITION p2;
+drop table t1;
diff --git a/mysql-test/t/ps.test b/mysql-test/t/ps.test
index b54efcad3a3..9ffdb72ca22 100644
--- a/mysql-test/t/ps.test
+++ b/mysql-test/t/ps.test
@@ -114,6 +114,9 @@ set @fvar= 123.4567;
prepare stmt1 from @fvar;
drop table t1,t2;
+deallocate prepare stmt3;
+deallocate prepare stmt4;
+deallocate prepare stmt5;
#
# Bug #4105: Server crash on attempt to prepare a statement with character
@@ -257,6 +260,7 @@ prepare `ü` from 'select 1234';
execute `ü` ;
set names latin1;
execute `ü`;
+deallocate prepare `ü`;
set names default;
@@ -823,6 +827,7 @@ EXECUTE b12651;
DROP VIEW b12651_V1;
DROP TABLE b12651_T1, b12651_T2;
+DEALLOCATE PREPARE b12651;
#
# Bug#9359 "Prepared statements take snapshot of system vars at PREPARE
@@ -921,6 +926,143 @@ select length(a) from t1;
drop table t1;
deallocate prepare stmt;
+#
+# Bug#16248 "WHERE (col1,col2) IN ((?,?)) gives wrong results":
+# check that ROW implementation is reexecution-friendly.
+#
+create table t1 (col1 integer, col2 integer);
+insert into t1 values(100,100),(101,101),(102,102),(103,103);
+prepare stmt from 'select col1, col2 from t1 where (col1, col2) in ((?,?))';
+set @a=100, @b=100;
+execute stmt using @a,@b;
+set @a=101, @b=101;
+execute stmt using @a,@b;
+set @a=102, @b=102;
+execute stmt using @a,@b;
+set @a=102, @b=103;
+execute stmt using @a,@b;
+deallocate prepare stmt;
+drop table t1;
+
+#
+# Bug#16365 Prepared Statements: DoS with too many open statements
+# Check that the limit @@max_prpeared_stmt_count works.
+#
+# Save the old value
+set @old_max_prepared_stmt_count= @@max_prepared_stmt_count;
+#
+# Disable prepared statement protocol: in this test we set
+# @@max_prepared_stmt_count to 0 or 1 and would like to test the limit
+# manually.
+#
+--disable_ps_protocol
+#
+# A. Check that the new variables are present in SHOW VARIABLES list.
+#
+show variables like 'max_prepared_stmt_count';
+show variables like 'prepared_stmt_count';
+#
+# B. Check that the new variables are selectable.
+#
+select @@max_prepared_stmt_count, @@prepared_stmt_count;
+#
+# C. Check that max_prepared_stmt_count is settable (global only),
+# whereas prepared_stmt_count is readonly.
+#
+set global max_prepared_stmt_count=-1;
+select @@max_prepared_stmt_count;
+set global max_prepared_stmt_count=10000000000000000;
+select @@max_prepared_stmt_count;
+set global max_prepared_stmt_count=default;
+select @@max_prepared_stmt_count;
+--error ER_GLOBAL_VARIABLE
+set @@max_prepared_stmt_count=1;
+--error ER_GLOBAL_VARIABLE
+set max_prepared_stmt_count=1;
+--error ER_GLOBAL_VARIABLE
+set local max_prepared_stmt_count=1;
+--error ER_INCORRECT_GLOBAL_LOCAL_VAR
+set local prepared_stmt_count=0;
+--error ER_INCORRECT_GLOBAL_LOCAL_VAR
+set @@prepared_stmt_count=0;
+--error ER_INCORRECT_GLOBAL_LOCAL_VAR
+set global prepared_stmt_count=1;
+# set to a reasonable limit works
+set global max_prepared_stmt_count=1;
+select @@max_prepared_stmt_count;
+#
+# D. Check that the variables actually work.
+#
+set global max_prepared_stmt_count=0;
+select @@max_prepared_stmt_count, @@prepared_stmt_count;
+--error ER_MAX_PREPARED_STMT_COUNT_REACHED
+prepare stmt from "select 1";
+select @@prepared_stmt_count;
+set global max_prepared_stmt_count=1;
+prepare stmt from "select 1";
+select @@prepared_stmt_count;
+--error ER_MAX_PREPARED_STMT_COUNT_REACHED
+prepare stmt1 from "select 1";
+select @@prepared_stmt_count;
+deallocate prepare stmt;
+select @@prepared_stmt_count;
+#
+# E. Check that we can prepare a statement with the same name
+# successfully, without hitting the limit.
+#
+prepare stmt from "select 1";
+select @@prepared_stmt_count;
+prepare stmt from "select 2";
+select @@prepared_stmt_count;
+#
+# F. We can set the max below the current count. In this case no new
+# statements should be allowed to prepare.
+#
+select @@prepared_stmt_count, @@max_prepared_stmt_count;
+set global max_prepared_stmt_count=0;
+--error ER_MAX_PREPARED_STMT_COUNT_REACHED
+prepare stmt from "select 1";
+# Result: the old statement is deallocated, the new is not created.
+--error 1243 # ER_UNKNOWN_STMT_HANDLER
+execute stmt;
+select @@prepared_stmt_count;
+--error ER_MAX_PREPARED_STMT_COUNT_REACHED
+prepare stmt from "select 1";
+select @@prepared_stmt_count;
+#
+# G. Show that the variables are up to date even after a connection with all
+# statements in it was terminated.
+#
+set global max_prepared_stmt_count=3;
+select @@max_prepared_stmt_count, @@prepared_stmt_count;
+prepare stmt from "select 1";
+connect (con1,localhost,root,,);
+connection con1;
+prepare stmt from "select 2";
+prepare stmt1 from "select 3";
+--error ER_MAX_PREPARED_STMT_COUNT_REACHED
+prepare stmt2 from "select 4";
+connection default;
+--error ER_MAX_PREPARED_STMT_COUNT_REACHED
+prepare stmt2 from "select 4";
+select @@max_prepared_stmt_count, @@prepared_stmt_count;
+disconnect con1;
+connection default;
+# Wait for the connection to die: deal with a possible race
+deallocate prepare stmt;
+let $count= `select @@prepared_stmt_count`;
+if ($count)
+{
+--sleep 2
+ let $count= `select @@prepared_stmt_count`;
+}
+select @@max_prepared_stmt_count, @@prepared_stmt_count;
+#
+# Restore the old value.
+#
+set global max_prepared_stmt_count= @old_max_prepared_stmt_count;
+--enable_ps_protocol
+
# End of 4.1 tests
#
@@ -946,6 +1088,7 @@ insert into t1 (a,b) values (2,8),(1,9),(3,7);
# Will order by index
prepare stmt from "select * from t1 order by ?";
+set @a=NULL;
execute stmt using @a;
set @a=1;
execute stmt using @a;
diff --git a/mysql-test/t/rpl_ndb_basic.test b/mysql-test/t/rpl_ndb_basic.test
index d7d1d50d88f..c702908ed68 100644
--- a/mysql-test/t/rpl_ndb_basic.test
+++ b/mysql-test/t/rpl_ndb_basic.test
@@ -146,4 +146,35 @@ select * from t1 order by nid;
# cleanup
--connection master
DROP TABLE t1;
+
+
+#
+# BUG#18094
+# Slave caches invalid table definition after atlters causes select failure
+#
+--connection master
+CREATE TABLE t1 (c1 INT KEY) ENGINE=NDB;
+
+INSERT INTO t1 VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),(10);
+
+ALTER TABLE t1 ADD c2 INT;
+
+--sync_slave_with_master
+connection slave;
+SELECT * FROM t1 ORDER BY c1;
+
+connection master;
+ALTER TABLE t1 CHANGE c2 c2 TEXT CHARACTER SET utf8;
+ALTER TABLE t1 CHANGE c2 c2 BLOB;
+
+--sync_slave_with_master
+connection slave;
+# here we would get error 1412 prior to bug
+SELECT * FROM t1 ORDER BY c1 LIMIT 5;
+
+
+
+# cleanup
+--connection master
+DROP TABLE t1;
-- source include/master-slave-end.inc
diff --git a/mysql-test/t/rpl_ndb_dd_partitions.test b/mysql-test/t/rpl_ndb_dd_partitions.test
new file mode 100644
index 00000000000..9291f38e8db
--- /dev/null
+++ b/mysql-test/t/rpl_ndb_dd_partitions.test
@@ -0,0 +1,310 @@
+#######################################
+# Author: JBM #
+# Date: 2006-03-09 #
+# Purpose: To test the replication of #
+# Cluster Disk Data using partitions #
+#######################################
+
+--source include/have_ndb.inc
+--source include/have_binlog_format_row.inc
+--source include/master-slave.inc
+
+--echo --- Doing pre test cleanup ---
+
+connection master;
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_query_log
+
+
+# Start by creating a logfile group
+##################################
+
+CREATE LOGFILE GROUP lg1
+ADD UNDOFILE 'undofile.dat'
+INITIAL_SIZE 16M
+UNDO_BUFFER_SIZE = 1M
+ENGINE=NDB;
+
+ALTER LOGFILE GROUP lg1
+ADD UNDOFILE 'undofile02.dat'
+INITIAL_SIZE = 4M
+ENGINE=NDB;
+
+###################################################
+# Create a tablespace connected to the logfile group
+###################################################
+
+CREATE TABLESPACE ts1
+ADD DATAFILE 'datafile.dat'
+USE LOGFILE GROUP lg1
+INITIAL_SIZE 12M
+ENGINE NDB;
+
+ALTER TABLESPACE ts1
+ADD DATAFILE 'datafile02.dat'
+INITIAL_SIZE = 4M
+ENGINE=NDB;
+
+#################################################################
+
+--echo --- Start test 2 partition RANGE testing --
+--echo --- Do setup --
+
+
+#################################################
+# Requirment: Create table that is partitioned #
+# by range on year i.e. year(t) and replicate #
+# basice operations such at insert, update #
+# delete between 2 different storage engines #
+# Alter table and ensure table is handled #
+# Correctly on the slave #
+#################################################
+
+CREATE TABLE t1 (id MEDIUMINT NOT NULL, b1 BIT(8), vc VARCHAR(63),
+ bc CHAR(63), d DECIMAL(10,4) DEFAULT 0,
+ f FLOAT DEFAULT 0, total BIGINT UNSIGNED,
+ y YEAR, t DATE)
+ TABLESPACE ts1 STORAGE DISK
+ ENGINE=NDB
+ PARTITION BY RANGE (YEAR(t))
+ (PARTITION p0 VALUES LESS THAN (1901),
+ PARTITION p1 VALUES LESS THAN (1946),
+ PARTITION p2 VALUES LESS THAN (1966),
+ PARTITION p3 VALUES LESS THAN (1986),
+ PARTITION p4 VALUES LESS THAN (2005),
+ PARTITION p5 VALUES LESS THAN MAXVALUE);
+
+--echo --- Show table on master ---
+
+SHOW CREATE TABLE t1;
+
+--echo --- Show table on slave --
+
+sync_slave_with_master;
+SHOW CREATE TABLE t1;
+
+--echo --- Perform basic operation on master ---
+--echo --- and ensure replicated correctly ---
+
+--source include/rpl_multi_engine3.inc
+
+--echo --- Check that simple Alter statements are replicated correctly ---
+
+ALTER TABLE t1 MODIFY vc VARCHAR(255);
+
+--echo --- Show the new improved table on the master ---
+
+SHOW CREATE TABLE t1;
+
+--echo --- Make sure that our tables on slave are still same engine ---
+--echo --- and that the alter statements replicated correctly ---
+
+sync_slave_with_master;
+SHOW CREATE TABLE t1;
+
+--echo --- Perform basic operation on master ---
+--echo --- and ensure replicated correctly ---
+--enable_query_log
+
+--source include/rpl_multi_engine3.inc
+
+--echo --- End test 2 partition RANGE testing ---
+--echo --- Do Cleanup ---
+
+DROP TABLE IF EXISTS t1;
+
+########################################################
+
+--echo --- Start test 3 partition LIST testing ---
+--echo --- Do setup ---
+#################################################
+
+
+CREATE TABLE t1 (id MEDIUMINT NOT NULL, b1 BIT(8), vc VARCHAR(63),
+ bc CHAR(63), d DECIMAL(10,4) DEFAULT 0,
+ f FLOAT DEFAULT 0, total BIGINT UNSIGNED,
+ y YEAR, t DATE)
+ TABLESPACE ts1 STORAGE DISK
+ ENGINE=NDB
+ PARTITION BY LIST(id)
+ (PARTITION p0 VALUES IN (2, 4),
+ PARTITION p1 VALUES IN (42, 142));
+
+--echo --- Test 3 Alter to add partition ---
+
+ALTER TABLE t1 ADD PARTITION (PARTITION p2 VALUES IN (412));
+
+--echo --- Show table on master ---
+
+SHOW CREATE TABLE t1;
+
+--echo --- Show table on slave ---
+
+sync_slave_with_master;
+SHOW CREATE TABLE t1;
+
+--echo --- Perform basic operation on master ---
+--echo --- and ensure replicated correctly ---
+
+--source include/rpl_multi_engine3.inc
+
+--echo --- Check that simple Alter statements are replicated correctly ---
+
+ALTER TABLE t1 MODIFY vc VARCHAR(255);
+
+--echo --- Show the new improved table on the master ---
+
+SHOW CREATE TABLE t1;
+
+--echo --- Make sure that our tables on slave are still same engine ---
+--echo --- and that the alter statements replicated correctly ---
+
+sync_slave_with_master;
+SHOW CREATE TABLE t1;
+
+--echo --- Perform basic operation on master ---
+--echo --- and ensure replicated correctly ---
+
+--source include/rpl_multi_engine3.inc
+
+--echo --- End test 3 partition LIST testing ---
+--echo --- Do Cleanup --
+
+DROP TABLE IF EXISTS t1;
+
+########################################################
+
+--echo --- Start test 4 partition HASH testing ---
+--echo --- Do setup ---
+#################################################
+
+
+CREATE TABLE t1 (id MEDIUMINT NOT NULL, b1 BIT(8), vc VARCHAR(63),
+ bc CHAR(63), d DECIMAL(10,4) DEFAULT 0,
+ f FLOAT DEFAULT 0, total BIGINT UNSIGNED,
+ y YEAR, t DATE)
+ TABLESPACE ts1 STORAGE DISK
+ ENGINE=NDB
+ PARTITION BY HASH( YEAR(t) )
+ PARTITIONS 4;
+
+--echo --- show that tables have been created correctly ---
+
+SHOW CREATE TABLE t1;
+sync_slave_with_master;
+SHOW CREATE TABLE t1;
+
+--echo --- Perform basic operation on master ---
+--echo --- and ensure replicated correctly ---
+
+--source include/rpl_multi_engine3.inc
+
+--echo --- Check that simple Alter statements are replicated correctly ---
+
+ALTER TABLE t1 MODIFY vc VARCHAR(255);
+
+--echo --- Show the new improved table on the master ---
+
+SHOW CREATE TABLE t1;
+
+--echo --- Make sure that our tables on slave are still same engine ---
+--echo --- and that the alter statements replicated correctly ---
+
+sync_slave_with_master;
+SHOW CREATE TABLE t1;
+
+--echo --- Perform basic operation on master ---
+--echo --- and ensure replicated correctly ---
+
+--source include/rpl_multi_engine3.inc
+
+--echo --- End test 4 partition HASH testing ---
+--echo --- Do Cleanup --
+
+DROP TABLE IF EXISTS t1;
+
+########################################################
+
+--echo --- Start test 5 partition by key testing ---
+--echo --- Create Table Section ---
+
+#################################################
+
+CREATE TABLE t1 (id MEDIUMINT NOT NULL, b1 BIT(8), vc VARCHAR(63),
+ bc CHAR(63), d DECIMAL(10,4) DEFAULT 0,
+ f FLOAT DEFAULT 0, total BIGINT UNSIGNED,
+ y YEAR, t DATE,PRIMARY KEY(id))
+ TABLESPACE ts1 STORAGE DISK
+ ENGINE=NDB
+ PARTITION BY KEY()
+ PARTITIONS 4;
+
+--echo --- Show that tables on master are ndbcluster tables ---
+
+SHOW CREATE TABLE t1;
+
+--echo --- Show that tables on slave ---
+
+sync_slave_with_master;
+SHOW CREATE TABLE t1;
+
+--echo --- Perform basic operation on master ---
+--echo --- and ensure replicated correctly ---
+
+--source include/rpl_multi_engine3.inc
+
+# Okay lets see how it holds up to table changes
+--echo --- Check that simple Alter statements are replicated correctly ---
+
+ALTER TABLE t1 DROP PRIMARY KEY, ADD PRIMARY KEY(id, total);
+
+--echo --- Show the new improved table on the master ---
+
+SHOW CREATE TABLE t1;
+
+--echo --- Make sure that our tables on slave are still right type ---
+--echo --- and that the alter statements replicated correctly ---
+
+sync_slave_with_master;
+SHOW CREATE TABLE t1;
+
+--echo --- Perform basic operation on master ---
+--echo --- and ensure replicated correctly ---
+
+--source include/rpl_multi_engine3.inc
+
+--echo --- Check that simple Alter statements are replicated correctly ---
+
+ALTER TABLE t1 MODIFY vc VARCHAR(255);
+
+--echo --- Show the new improved table on the master ---
+
+SHOW CREATE TABLE t1;
+
+--echo --- Make sure that our tables on slave are still same engine ---
+--echo --- and that the alter statements replicated correctly ---
+
+sync_slave_with_master;
+SHOW CREATE TABLE t1;
+
+--echo --- Perform basic operation on master ---
+--echo --- and ensure replicated correctly ---
+
+--source include/rpl_multi_engine3.inc
+
+--echo --- End test 5 key partition testing ---
+--echo --- Do Cleanup ---
+
+DROP TABLE IF EXISTS t1;
+alter tablespace ts1
+drop datafile 'datafile.dat'
+engine=ndb;
+alter tablespace ts1
+drop datafile 'datafile02.dat'
+engine=ndb;
+DROP TABLESPACE ts1 ENGINE=NDB;
+DROP LOGFILE GROUP lg1 ENGINE=NDB;
+--sync_slave_with_master
+
+# End of 5.1 test case
diff --git a/mysql-test/t/rpl_ndb_relay_space.test b/mysql-test/t/rpl_ndb_relay_space.test
deleted file mode 100644
index 0484d807996..00000000000
--- a/mysql-test/t/rpl_ndb_relay_space.test
+++ /dev/null
@@ -1,21 +0,0 @@
-###################################
-# Wrapper rpl_sv_relay_space.test #
-# This test has to be wrapped as #
-# It tests ndb, innodb and MyISAM.#
-# By Wrapping we are saving some #
-# space and making the test more #
-# Maintainable by only having one #
-# test file and reusing the code #
-# In Addition, INNODB has to have #
-# Option files during this test #
-# to force innodb on the slave #
-# else the test will fail #
-###################################
-#Change Author: JBM #
-#Change Date: 2006-02-03 #
-#Change: Added Comments #
-###################################
---source include/have_ndb.inc
-let $engine_type=NDB;
--- source extra/rpl_tests/rpl_sv_relay_space.test
-
diff --git a/mysql-test/t/rpl_view-slave.opt b/mysql-test/t/rpl_view-slave.opt
new file mode 100644
index 00000000000..79b3bf6174b
--- /dev/null
+++ b/mysql-test/t/rpl_view-slave.opt
@@ -0,0 +1 @@
+--replicate-ignore-table=test.foo
diff --git a/scripts/mysql_prepare_privilege_tables_for_5.sql b/scripts/mysql_prepare_privilege_tables_for_5.sql
deleted file mode 100644
index a9b6d43aee0..00000000000
--- a/scripts/mysql_prepare_privilege_tables_for_5.sql
+++ /dev/null
@@ -1,53 +0,0 @@
-
-use mysql;
-
---
--- merging `host` table and `db`
---
-
-UPDATE IGNORE host SET Host='%' WHERE Host='';
-DELETE FROM host WHERE Host='';
-
-INSERT IGNORE INTO db (User, Host, Select_priv, Insert_priv, Update_priv,
- Delete_priv, Create_priv, Drop_priv, Grant_priv, References_priv,
- Index_priv, Alter_priv, Create_tmp_table_priv, Lock_tables_priv)
- SELECT d.User, h.Host,
- (d.Select_priv = 'Y' || h.Select_priv = 'Y') + 1,
- (d.Insert_priv = 'Y' || h.Select_priv = 'Y') + 1,
- (d.Update_priv = 'Y' || h.Update_priv = 'Y') + 1,
- (d.Delete_priv = 'Y' || h.Delete_priv = 'Y') + 1,
- (d.Create_priv = 'Y' || h.Create_priv = 'Y') + 1,
- (d.Drop_priv = 'Y' || h.Drop_priv = 'Y') + 1,
- (d.Grant_priv = 'Y' || h.Grant_priv = 'Y') + 1,
- (d.References_priv = 'Y' || h.References_priv = 'Y') + 1,
- (d.Index_priv = 'Y' || h.Index_priv = 'Y') + 1,
- (d.Alter_priv = 'Y' || h.Alter_priv = 'Y') + 1,
- (d.Create_tmp_table_priv = 'Y' || h.Create_tmp_table_priv = 'Y') + 1,
- (d.Lock_tables_priv = 'Y' || h.Lock_tables_priv = 'Y') + 1
- FROM db d, host h WHERE d.Host = '';
-
-UPDATE IGNORE db SET Host='%' WHERE Host = '';
-DELETE FROM db WHERE Host='';
-
-TRUNCATE TABLE host;
-
---
--- Adding missing users to `user` table
---
--- note that invalid password causes the user to be skipped during the
--- load of grand tables (at mysqld startup) thus three following inserts
--- do not affect anything
-
-INSERT IGNORE user (User, Host, Password) SELECT User, Host, "*" FROM db;
-INSERT IGNORE user (User, Host, Password) SELECT User, Host, "*" FROM tables_priv;
-INSERT IGNORE user (User, Host, Password) SELECT User, Host, "*" FROM columns_priv;
-
-SELECT DISTINCT
-"There are user accounts with the username 'PUBLIC'. In the SQL-1999
-(or later) standard this name is reserved for PUBLIC role and can
-not be used as a valid user name. Consider renaming these accounts before
-upgrading to MySQL-5.0.
-These accounts are:" x
-FROM user WHERE user='PUBLIC';
-SELECT CONCAT(user,'@',host) FROM user WHERE user='PUBLIC';
-
diff --git a/scripts/mysqld_safe-watch.sh b/scripts/mysqld_safe-watch.sh
deleted file mode 100644
index c59b3b2614d..00000000000
--- a/scripts/mysqld_safe-watch.sh
+++ /dev/null
@@ -1,150 +0,0 @@
-#!/bin/sh
-# Copyright Abandoned 1996 TCX DataKonsult AB & Monty Program KB & Detron HB
-# This file is public domain and comes with NO WARRANTY of any kind
-#
-# scripts to start the MySQL demon and restart it if it dies unexpectedly
-#
-# This should be executed in the MySQL base directory if you are using a
-# binary installation that has other paths than you are using.
-#
-# mysql.server works by first doing a cd to the base directory and from there
-# executing mysqld_safe
-
-# Check if we are starting this relative (for the binary release)
-if test -f ./data/mysql/db.frm -a -f ./share/mysql/english/errmsg.sys -a \
- -x ./bin/mysqld
-then
- MY_BASEDIR_VERSION=`pwd` # Where bin, share and data is
- DATADIR=$MY_BASEDIR_VERSION/data # Where the databases are
- ledir=$MY_BASEDIR_VERSION/bin # Where mysqld are
-# Check if this is a 'moved install directory'
-elif test -f ./var/mysql/db.frm -a -f ./share/mysql/english/errmsg.sys -a \
- -x ./libexec/mysqld
-then
- MY_BASEDIR_VERSION=`pwd` # Where libexec, share and var is
- DATADIR=$MY_BASEDIR_VERSION/var # Where the databases are
- ledir=$MY_BASEDIR_VERSION/libexec # Where mysqld are
-else
- MY_BASEDIR_VERSION=/usr/local/mysql
- DATADIR=/usr/local/mysql/var
- ledir=/usr/local/mysql/libexec
-fi
-
-hostname=`@HOSTNAME@`
-pidfile=$DATADIR/$hostname.pid
-log=$DATADIR/$hostname.log
-err=$DATADIR/$hostname.err
-lockfile=$DATADIR/$hostname.lock
-
-#
-# If there exists an old pid file, check if the demon is already running
-# Note: The switches to 'ps' may depend on your operating system
-
-if test -f $pidfile
-then
- PID=`cat $pidfile`
- if /bin/kill -0 $PID
- then
- if /bin/ps -p $PID | grep mysqld > /dev/null
- then # The pid contains a mysqld process
- echo "A mysqld process already exists"
- echo "A mysqld process already exists at " `date` >> $log
- exit 1;
- fi
- fi
- rm -f $pidfile
- if test -f $pidfile
- then
- echo "Fatal error: Can't remove the pid file: $pidfile"
- echo "Fatal error: Can't remove the pid file: $pidfile at " `date` >> $log
- echo "Please remove it manually and start $0 again"
- echo "mysqld demon not started"
- exit 1;
- fi
-fi
-
-echo "Starting mysqld demon with databases from $DATADIR"
-
-#Default communication ports
-#MYSQL_TCP_PORT=3306
-if test -z "$MYSQL_UNIX_PORT"
-then
- MYSQL_UNIX_PORT="/tmp/mysql.sock"
- export MYSQL_UNIX_PORT
-fi
-#export MYSQL_TCP_PORT
-
-# Does this work on all systems?
-#if type ulimit | grep "shell builtin" > /dev/null
-#then
-# ulimit -n 256 > /dev/null 2>&1 # Fix for BSD and FreeBSD systems
-#fi
-
-echo "mysqld started on " `date` >> $log
-bin/zap -f $lockfile < /dev/null > /dev/null 2>&1
-rm -f $lockfile
-$MY_BASEDIR_VERSION/bin/watchdog_mysqld $lockfile $pidfile $MY_BASEDIR_VERSION/bin $DATADIR 3 10 >> $err 2>&1 &
-restart_pid=$!
-
-while true
-do
- rm -f $MYSQL_UNIX_PORT $pidfile # Some extra safety
- lockfile -1 -r10 $lockfile >/dev/null 2>&1
- if test "$#" -eq 0
- then
- nohup $ledir/mysqld --basedir=$MY_BASEDIR_VERSION --datadir=$DATADIR \
- --skip-locking >> $err 2>&1 &
- else
- nohup $ledir/mysqld --basedir=$MY_BASEDIR_VERSION --datadir=$DATADIR \
- --skip-locking "$@" >> $err 2>&1 &
- fi
- pid=$!
- rm -f $lockfile
- wait $pid;
-
- lockfile -1 -r10 $lockfile >/dev/null 2>&1
- rm -f $lockfile
- if test ! -f $pidfile # This is removed if normal shutdown
- then
- break;
- fi
- if true
- then
- # Test if one proces was hanging.
- # This is only a fix for Linux (running as base 3 mysqld processes)
- # but should work for the rest of the servers.
- # The only thing is ps x => redhat 5 gives warnings when using ps -x.
- # kill -9 is used or the proces won't react on the kill.
- numofproces=`ps x | grep -v "grep" | grep -c $ledir/mysqld`
- echo -e "\nNumber of processes running now: $numofproces" | tee -a $log
- I=1
- while test "$I" -le "$numofproces"
- do
- PROC=`ps x | grep $ledir/mysqld | grep -v "grep" | tail -1`
- for T in $PROC
- do
- break
- done
- # echo "TEST $I - $T **"
- if kill -9 $T
- then
- echo "mysqld proces hanging, pid $T - killed" | tee -a $log
- else
- break
- fi
- I=`expr $I + 1`
- done
- fi
- echo "mysqld restarted" | tee -a $log
- # Check all tables and repair any wrong tables.
- $MY_BASEDIR_VERSION/bin/isamchk -sf $DATADIR/*/*.ISM >> $err 2>&1
-done
-if test $restart_pid -gt 0
-then
- kill $restart_pid > /dev/null 2>&1
- sleep 1;
- kill -9 $restart_pid > /dev/null 2>&1
-fi
-
-echo -n "mysqld ended on " `date` >> $log
-echo "mysqld demon ended"
diff --git a/sql/field.cc b/sql/field.cc
index 1176257359f..9c504f186b3 100644
--- a/sql/field.cc
+++ b/sql/field.cc
@@ -5989,7 +5989,7 @@ int Field_str::store(double nr)
uint Field::is_equal(create_field *new_field)
{
- return (new_field->sql_type == type());
+ return (new_field->sql_type == real_type());
}
@@ -6001,7 +6001,7 @@ uint Field_str::is_equal(create_field *new_field)
(flags & (BINCMP_FLAG | BINARY_FLAG))))
return 0; /* One of the fields is binary and the other one isn't */
- return ((new_field->sql_type == type()) &&
+ return ((new_field->sql_type == real_type()) &&
new_field->charset == field_charset &&
new_field->length == max_length());
}
@@ -6798,7 +6798,7 @@ Field *Field_varstring::new_key_field(MEM_ROOT *root,
uint Field_varstring::is_equal(create_field *new_field)
{
- if (new_field->sql_type == type() &&
+ if (new_field->sql_type == real_type() &&
new_field->charset == field_charset)
{
if (new_field->length == max_length())
@@ -7957,12 +7957,12 @@ bool Field_num::eq_def(Field *field)
uint Field_num::is_equal(create_field *new_field)
{
- return ((new_field->sql_type == type()) &&
+ return ((new_field->sql_type == real_type()) &&
((new_field->flags & UNSIGNED_FLAG) == (uint) (flags &
UNSIGNED_FLAG)) &&
((new_field->flags & AUTO_INCREMENT_FLAG) ==
(uint) (flags & AUTO_INCREMENT_FLAG)) &&
- (new_field->length >= max_length()));
+ (new_field->length <= max_length()));
}
diff --git a/sql/ha_innodb.cc b/sql/ha_innodb.cc
index a24a083ec8a..42def845174 100644
--- a/sql/ha_innodb.cc
+++ b/sql/ha_innodb.cc
@@ -1769,25 +1769,6 @@ innobase_report_binlog_offset_and_commit(
trx->mysql_log_file_name = log_file_name;
trx->mysql_log_offset = (ib_longlong)end_offset;
-#ifdef HAVE_REPLICATION
- if (thd->variables.sync_replication) {
- /* Let us store the binlog file name and the position, so that
- we know how long to wait for the binlog to the replicated to
- the slave in synchronous replication. */
-
- if (trx->repl_wait_binlog_name == NULL) {
-
- trx->repl_wait_binlog_name =
- (char*)mem_alloc_noninline(FN_REFLEN + 100);
- }
-
- ut_a(strlen(log_file_name) < FN_REFLEN + 100);
-
- strcpy(trx->repl_wait_binlog_name, log_file_name);
-
- trx->repl_wait_binlog_pos = (ib_longlong)end_offset;
- }
-#endif /* HAVE_REPLICATION */
trx->flush_log_later = TRUE;
innobase_commit(thd, TRUE);
@@ -1856,117 +1837,6 @@ innobase_commit_complete(
trx_commit_complete_for_mysql(trx);
}
-#ifdef HAVE_REPLICATION
- if (thd->variables.sync_replication
- && trx->repl_wait_binlog_name
- && innobase_repl_state != 0) {
-
- struct timespec abstime;
- int cmp;
- int ret;
-
- /* In synchronous replication, let us wait until the MySQL
- replication has sent the relevant binlog segment to the
- replication slave. */
-
- pthread_mutex_lock(&innobase_repl_cond_mutex);
-try_again:
- if (innobase_repl_state == 0) {
-
- pthread_mutex_unlock(&innobase_repl_cond_mutex);
-
- return(0);
- }
-
- cmp = strcmp(innobase_repl_file_name,
- trx->repl_wait_binlog_name);
- if (cmp > 0
- || (cmp == 0 && innobase_repl_pos
- >= (my_off_t)trx->repl_wait_binlog_pos)) {
- /* We have already sent the relevant binlog to the
- slave: no need to wait here */
-
- pthread_mutex_unlock(&innobase_repl_cond_mutex);
-
-/* printf("Binlog now sent\n"); */
-
- return(0);
- }
-
- /* Let us update the info about the minimum binlog position
- of waiting threads in the innobase_repl_... variables */
-
- if (innobase_repl_wait_file_name_inited != 0) {
- cmp = strcmp(trx->repl_wait_binlog_name,
- innobase_repl_wait_file_name);
- if (cmp < 0
- || (cmp == 0
- && (my_off_t)trx->repl_wait_binlog_pos
- <= innobase_repl_wait_pos)) {
- /* This thd has an even lower position, let
- us update the minimum info */
-
- strcpy(innobase_repl_wait_file_name,
- trx->repl_wait_binlog_name);
-
- innobase_repl_wait_pos =
- trx->repl_wait_binlog_pos;
- }
- } else {
- strcpy(innobase_repl_wait_file_name,
- trx->repl_wait_binlog_name);
-
- innobase_repl_wait_pos = trx->repl_wait_binlog_pos;
-
- innobase_repl_wait_file_name_inited = 1;
- }
- set_timespec(abstime, thd->variables.sync_replication_timeout);
-
- /* Let us suspend this thread to wait on the condition;
- when replication has progressed far enough, we will release
- these waiting threads. The following call
- pthread_cond_timedwait also atomically unlocks
- innobase_repl_cond_mutex. */
-
- innobase_repl_n_wait_threads++;
-
-/* printf("Waiting for binlog to be sent\n"); */
-
- ret = pthread_cond_timedwait(&innobase_repl_cond,
- &innobase_repl_cond_mutex, &abstime);
- innobase_repl_n_wait_threads--;
-
- if (ret != 0) {
- ut_print_timestamp(stderr);
-
- sql_print_error("MySQL synchronous replication was "
- "not able to send the binlog to the "
- "slave within the timeout %lu. We "
- "assume that the slave has become "
- "inaccessible, and switch off "
- "synchronous replication until the "
- "communication to the slave works "
- "again. MySQL synchronous replication "
- "has sent binlog to the slave up to "
- "file %s, position %lu. This "
- "transaction needs it to be sent up "
- "to file %s, position %lu.",
- thd->variables.sync_replication_timeout,
- innobase_repl_file_name,
- (ulong) innobase_repl_pos,
- trx->repl_wait_binlog_name,
- (ulong) trx->repl_wait_binlog_pos);
-
- innobase_repl_state = 0;
-
- pthread_mutex_unlock(&innobase_repl_cond_mutex);
-
- return(0);
- }
-
- goto try_again;
- }
-#endif // HAVE_REPLICATION
return(0);
}
diff --git a/sql/ha_innodb.h b/sql/ha_innodb.h
index 6bbe3a562d7..4f0c9eb151b 100644
--- a/sql/ha_innodb.h
+++ b/sql/ha_innodb.h
@@ -316,9 +316,6 @@ int innobase_rollback_by_xid(
XID *xid); /* in : X/Open XA Transaction Identification */
-int innobase_repl_report_sent_binlog(THD *thd, char *log_file_name,
- my_off_t end_offset);
-
/***********************************************************************
Create a consistent view for a cursor based on current transaction
which is created if the corresponding MySQL thread still lacks one.
diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc
index 587eabb82d2..aa555e310cb 100644
--- a/sql/ha_ndbcluster.cc
+++ b/sql/ha_ndbcluster.cc
@@ -315,6 +315,14 @@ int execute_no_commit_ie(ha_ndbcluster *h, NdbTransaction *trans)
/*
Place holder for ha_ndbcluster thread specific data
*/
+static
+byte *thd_ndb_share_get_key(THD_NDB_SHARE *thd_ndb_share, uint *length,
+ my_bool not_used __attribute__((unused)))
+{
+ *length= sizeof(thd_ndb_share->key);
+ return (byte*) thd_ndb_share->key;
+}
+
Thd_ndb::Thd_ndb()
{
ndb= new Ndb(g_ndb_cluster_connection, "");
@@ -324,6 +332,8 @@ Thd_ndb::Thd_ndb()
stmt= NULL;
error= 0;
options= 0;
+ (void) hash_init(&open_tables, &my_charset_bin, 5, 0, 0,
+ (hash_get_key)thd_ndb_share_get_key, 0, 0);
}
Thd_ndb::~Thd_ndb()
@@ -347,6 +357,44 @@ Thd_ndb::~Thd_ndb()
ndb= NULL;
}
changed_tables.empty();
+ hash_free(&open_tables);
+}
+
+void
+Thd_ndb::init_open_tables()
+{
+ count= 0;
+ error= 0;
+ my_hash_reset(&open_tables);
+}
+
+THD_NDB_SHARE *
+Thd_ndb::get_open_table(THD *thd, const void *key)
+{
+ DBUG_ENTER("Thd_ndb::get_open_table");
+ HASH_SEARCH_STATE state;
+ THD_NDB_SHARE *thd_ndb_share=
+ (THD_NDB_SHARE*)hash_first(&open_tables, (byte *)key, sizeof(key), &state);
+ while (thd_ndb_share && thd_ndb_share->key != key)
+ thd_ndb_share= (THD_NDB_SHARE*)hash_next(&open_tables, (byte *)key, sizeof(key), &state);
+ if (thd_ndb_share == 0)
+ {
+ thd_ndb_share= (THD_NDB_SHARE *) alloc_root(&thd->transaction.mem_root,
+ sizeof(THD_NDB_SHARE));
+ thd_ndb_share->key= key;
+ thd_ndb_share->stat.last_count= count;
+ thd_ndb_share->stat.no_uncommitted_rows_count= 0;
+ thd_ndb_share->stat.records= ~(ha_rows)0;
+ my_hash_insert(&open_tables, (byte *)thd_ndb_share);
+ }
+ else if (thd_ndb_share->stat.last_count != count)
+ {
+ thd_ndb_share->stat.last_count= count;
+ thd_ndb_share->stat.no_uncommitted_rows_count= 0;
+ thd_ndb_share->stat.records= ~(ha_rows)0;
+ }
+ DBUG_PRINT("exit", ("thd_ndb_share: 0x%x key: 0x%x", thd_ndb_share, key));
+ DBUG_RETURN(thd_ndb_share);
}
inline
@@ -359,12 +407,6 @@ Ndb *ha_ndbcluster::get_ndb()
* manage uncommitted insert/deletes during transactio to get records correct
*/
-struct Ndb_local_table_statistics {
- int no_uncommitted_rows_count;
- ulong last_count;
- ha_rows records;
-};
-
void ha_ndbcluster::set_rec_per_key()
{
DBUG_ENTER("ha_ndbcluster::get_status_const");
@@ -380,14 +422,14 @@ void ha_ndbcluster::records_update()
if (m_ha_not_exact_count)
return;
DBUG_ENTER("ha_ndbcluster::records_update");
- struct Ndb_local_table_statistics *info=
- (struct Ndb_local_table_statistics *)m_table_info;
+ struct Ndb_local_table_statistics *info= m_table_info;
DBUG_PRINT("info", ("id=%d, no_uncommitted_rows_count=%d",
((const NDBTAB *)m_table)->getTableId(),
info->no_uncommitted_rows_count));
// if (info->records == ~(ha_rows)0)
{
Ndb *ndb= get_ndb();
+ ndb->setDatabaseName(m_dbname);
struct Ndb_statistics stat;
if (ndb_get_table_statistics(ndb, m_tabname, &stat) == 0){
mean_rec_length= stat.row_size;
@@ -413,33 +455,12 @@ void ha_ndbcluster::no_uncommitted_rows_execute_failure()
DBUG_VOID_RETURN;
}
-void ha_ndbcluster::no_uncommitted_rows_init(THD *thd)
-{
- if (m_ha_not_exact_count)
- return;
- DBUG_ENTER("ha_ndbcluster::no_uncommitted_rows_init");
- struct Ndb_local_table_statistics *info=
- (struct Ndb_local_table_statistics *)m_table_info;
- Thd_ndb *thd_ndb= get_thd_ndb(thd);
- if (info->last_count != thd_ndb->count)
- {
- info->last_count= thd_ndb->count;
- info->no_uncommitted_rows_count= 0;
- info->records= ~(ha_rows)0;
- DBUG_PRINT("info", ("id=%d, no_uncommitted_rows_count=%d",
- ((const NDBTAB *)m_table)->getTableId(),
- info->no_uncommitted_rows_count));
- }
- DBUG_VOID_RETURN;
-}
-
void ha_ndbcluster::no_uncommitted_rows_update(int c)
{
if (m_ha_not_exact_count)
return;
DBUG_ENTER("ha_ndbcluster::no_uncommitted_rows_update");
- struct Ndb_local_table_statistics *info=
- (struct Ndb_local_table_statistics *)m_table_info;
+ struct Ndb_local_table_statistics *info= m_table_info;
info->no_uncommitted_rows_count+= c;
DBUG_PRINT("info", ("id=%d, no_uncommitted_rows_count=%d",
((const NDBTAB *)m_table)->getTableId(),
@@ -466,7 +487,7 @@ void ha_ndbcluster::no_uncommitted_rows_reset(THD *thd)
# The mapped error code
*/
-int ha_ndbcluster::invalidate_dictionary_cache(bool global)
+int ha_ndbcluster::invalidate_dictionary_cache(bool global, const NDBTAB *ndbtab)
{
NDBDICT *dict= get_ndb()->getDictionary();
DBUG_ENTER("invalidate_dictionary_cache");
@@ -494,20 +515,17 @@ int ha_ndbcluster::invalidate_dictionary_cache(bool global)
DBUG_PRINT("info", ("Released ndbcluster mutex"));
}
#endif
- const NDBTAB *tab= dict->getTable(m_tabname);
- if (!tab)
- DBUG_RETURN(1);
- if (tab->getObjectStatus() == NdbDictionary::Object::Invalid)
+ if (!ndbtab)
{
- // Global cache has already been invalidated
- dict->removeCachedTable(m_tabname);
- global= FALSE;
- DBUG_PRINT("info", ("global: %d", global));
+ ndbtab= dict->getTable(m_tabname);
+ if (!ndbtab)
+ DBUG_RETURN(1);
}
- else
- dict->invalidateTable(m_tabname);
+ dict->invalidateTable(ndbtab);
table_share->version= 0L; /* Free when thread is ready */
}
+ else if (ndbtab)
+ dict->removeCachedTable(ndbtab);
else
dict->removeCachedTable(m_tabname);
@@ -564,7 +582,7 @@ int ha_ndbcluster::ndb_err(NdbTransaction *trans)
table_list.alias= table_list.table_name= m_tabname;
close_cached_tables(current_thd, 0, &table_list);
- invalidate_dictionary_cache(TRUE);
+ invalidate_dictionary_cache(TRUE, m_table);
if (err.code==284)
{
@@ -1041,7 +1059,7 @@ int ha_ndbcluster::get_metadata(const char *path)
// Check if thread has stale local cache
if (tab->getObjectStatus() == NdbDictionary::Object::Invalid)
{
- invalidate_dictionary_cache(FALSE);
+ invalidate_dictionary_cache(FALSE, tab);
if (!(tab= dict->getTable(m_tabname)))
ERR_RETURN(dict->getNdbError());
DBUG_PRINT("info", ("Table schema version: %d", tab->getObjectVersion()));
@@ -1064,7 +1082,7 @@ int ha_ndbcluster::get_metadata(const char *path)
if (!invalidating_ndb_table)
{
DBUG_PRINT("info", ("Invalidating table"));
- invalidate_dictionary_cache(TRUE);
+ invalidate_dictionary_cache(TRUE, tab);
invalidating_ndb_table= TRUE;
}
else
@@ -1091,7 +1109,7 @@ int ha_ndbcluster::get_metadata(const char *path)
DBUG_RETURN(error);
m_table_version= tab->getObjectVersion();
- m_table= (void *)tab;
+ m_table= tab;
m_table_info= NULL; // Set in external lock
DBUG_RETURN(open_indexes(ndb, table, FALSE));
@@ -1150,7 +1168,7 @@ int ha_ndbcluster::table_changed(const void *pack_frm_data, uint pack_frm_len)
// Check if thread has stale local cache
if (orig_tab->getObjectStatus() == NdbDictionary::Object::Invalid)
{
- dict->removeCachedTable(m_tabname);
+ dict->removeCachedTable(orig_tab);
if (!(orig_tab= dict->getTable(m_tabname)))
ERR_RETURN(dict->getNdbError());
}
@@ -1219,13 +1237,31 @@ int ha_ndbcluster::add_index_handle(THD *thd, NDBDICT *dict, KEY *key_info,
int error= 0;
NDB_INDEX_TYPE idx_type= get_index_type_from_table(index_no);
m_index[index_no].type= idx_type;
- DBUG_ENTER("ha_ndbcluster::get_index_handle");
+ DBUG_ENTER("ha_ndbcluster::add_index_handle");
+ DBUG_PRINT("enter", ("table %s", m_tabname));
if (idx_type != PRIMARY_KEY_INDEX && idx_type != UNIQUE_INDEX)
{
DBUG_PRINT("info", ("Get handle to index %s", index_name));
- const NDBINDEX *index= dict->getIndex(index_name, m_tabname);
- if (!index) ERR_RETURN(dict->getNdbError());
+ const NDBINDEX *index;
+ do
+ {
+ index= dict->getIndex(index_name, m_tabname);
+ if (!index)
+ ERR_RETURN(dict->getNdbError());
+ DBUG_PRINT("info", ("index: 0x%x id: %d version: %d.%d status: %d",
+ index,
+ index->getObjectId(),
+ index->getObjectVersion() & 0xFFFFFF,
+ index->getObjectVersion() >> 24,
+ index->getObjectStatus()));
+ if (index->getObjectStatus() != NdbDictionary::Object::Retrieved)
+ {
+ dict->removeCachedIndex(index);
+ continue;
+ }
+ break;
+ } while (1);
m_index[index_no].index= (void *) index;
// ordered index - add stats
NDB_INDEX_DATA& d=m_index[index_no];
@@ -1254,8 +1290,25 @@ int ha_ndbcluster::add_index_handle(THD *thd, NDBDICT *dict, KEY *key_info,
m_has_unique_index= TRUE;
strxnmov(unique_index_name, FN_LEN, index_name, unique_suffix, NullS);
DBUG_PRINT("info", ("Get handle to unique_index %s", unique_index_name));
- const NDBINDEX *index= dict->getIndex(unique_index_name, m_tabname);
- if (!index) ERR_RETURN(dict->getNdbError());
+ const NDBINDEX *index;
+ do
+ {
+ index= dict->getIndex(unique_index_name, m_tabname);
+ if (!index)
+ ERR_RETURN(dict->getNdbError());
+ DBUG_PRINT("info", ("index: 0x%x id: %d version: %d.%d status: %d",
+ index,
+ index->getObjectId(),
+ index->getObjectVersion() & 0xFFFFFF,
+ index->getObjectVersion() >> 24,
+ index->getObjectStatus()));
+ if (index->getObjectStatus() != NdbDictionary::Object::Retrieved)
+ {
+ dict->removeCachedIndex(index);
+ continue;
+ }
+ break;
+ } while (1);
m_index[index_no].unique_index= (void *) index;
error= fix_unique_index_attr_order(m_index[index_no], index, key_info);
}
@@ -3544,6 +3597,7 @@ void ha_ndbcluster::info(uint flag)
if ((my_errno= check_ndb_connection()))
DBUG_VOID_RETURN;
Ndb *ndb= get_ndb();
+ ndb->setDatabaseName(m_dbname);
struct Ndb_statistics stat;
if (current_thd->variables.ndb_use_exact_count &&
ndb_get_table_statistics(ndb, m_tabname, &stat) == 0)
@@ -3883,7 +3937,7 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type)
trans= ndb->startTransaction();
if (trans == NULL)
ERR_RETURN(ndb->getNdbError());
- no_uncommitted_rows_reset(thd);
+ thd_ndb->init_open_tables();
thd_ndb->stmt= trans;
trans_register_ha(thd, FALSE, &ndbcluster_hton);
}
@@ -3898,7 +3952,7 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type)
trans= ndb->startTransaction();
if (trans == NULL)
ERR_RETURN(ndb->getNdbError());
- no_uncommitted_rows_reset(thd);
+ thd_ndb->init_open_tables();
thd_ndb->all= trans;
trans_register_ha(thd, TRUE, &ndbcluster_hton);
@@ -3943,8 +3997,7 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type)
{
NDBDICT *dict= ndb->getDictionary();
const NDBTAB *tab;
- void *tab_info;
- if (!(tab= dict->getTable(m_tabname, &tab_info)))
+ if (!(tab= dict->getTable(m_tabname)))
ERR_RETURN(dict->getNdbError());
DBUG_PRINT("info", ("Table schema version: %d",
tab->getObjectVersion()));
@@ -3954,8 +4007,8 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type)
if ((trans && tab->getObjectStatus() != NdbDictionary::Object::Retrieved)
|| tab->getObjectStatus() == NdbDictionary::Object::Invalid)
{
- invalidate_dictionary_cache(FALSE);
- if (!(tab= dict->getTable(m_tabname, &tab_info)))
+ invalidate_dictionary_cache(FALSE, tab);
+ if (!(tab= dict->getTable(m_tabname)))
ERR_RETURN(dict->getNdbError());
DBUG_PRINT("info", ("Table schema version: %d",
tab->getObjectVersion()));
@@ -3970,14 +4023,14 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type)
}
if (m_table != (void *)tab)
{
- m_table= (void *)tab;
+ m_table= tab;
m_table_version = tab->getObjectVersion();
if (!(my_errno= open_indexes(ndb, table, FALSE)))
DBUG_RETURN(my_errno);
}
- m_table_info= tab_info;
}
- no_uncommitted_rows_init(thd);
+ m_thd_ndb_share= thd_ndb->get_open_table(thd, m_table);
+ m_table_info= &m_thd_ndb_share->stat;
}
else
{
@@ -4711,7 +4764,10 @@ int ha_ndbcluster::create(const char *name,
DBUG_RETURN(my_errno);
}
-int ha_ndbcluster::create_handler_files(const char *file, HA_CREATE_INFO *info)
+int ha_ndbcluster::create_handler_files(const char *file,
+ const char *old_name,
+ int action_flag,
+ HA_CREATE_INFO *info)
{
char path[FN_REFLEN];
const char *name;
@@ -4723,6 +4779,10 @@ int ha_ndbcluster::create_handler_files(const char *file, HA_CREATE_INFO *info)
DBUG_ENTER("create_handler_files");
+ if (action_flag != CHF_INDEX_FLAG)
+ {
+ DBUG_RETURN(FALSE);
+ }
DBUG_PRINT("enter", ("file: %s", file));
if (!(ndb= get_ndb()))
DBUG_RETURN(HA_ERR_NO_CONNECTION);
@@ -4990,7 +5050,7 @@ int ha_ndbcluster::rename_table(const char *from, const char *to)
// Check if thread has stale local cache
if (orig_tab->getObjectStatus() == NdbDictionary::Object::Invalid)
{
- dict->removeCachedTable(m_tabname);
+ dict->removeCachedTable(orig_tab);
if (!(orig_tab= dict->getTable(m_tabname)))
ERR_RETURN(dict->getNdbError());
}
@@ -5002,7 +5062,7 @@ int ha_ndbcluster::rename_table(const char *from, const char *to)
DBUG_ASSERT(r == 0);
}
#endif
- m_table= (void *)orig_tab;
+ m_table= orig_tab;
// Change current database to that of target table
set_dbname(to);
ndb->setDatabaseName(m_dbname);
@@ -5518,9 +5578,6 @@ Thd_ndb* ha_ndbcluster::seize_thd_ndb()
DBUG_ENTER("seize_thd_ndb");
thd_ndb= new Thd_ndb();
- thd_ndb->ndb->getDictionary()->set_local_table_data_size(
- sizeof(Ndb_local_table_statistics)
- );
if (thd_ndb->ndb->init(max_transactions) != 0)
{
ERR_PRINT(thd_ndb->ndb->getNdbError());
@@ -5610,7 +5667,6 @@ int ndbcluster_discover(THD* thd, const char *db, const char *name,
DBUG_RETURN(HA_ERR_NO_CONNECTION);
ndb->setDatabaseName(db);
NDBDICT* dict= ndb->getDictionary();
- dict->set_local_table_data_size(sizeof(Ndb_local_table_statistics));
dict->invalidateTable(name);
build_table_filename(key, sizeof(key), db, name, "");
NDB_SHARE *share= get_share(key, 0, false);
@@ -5682,7 +5738,6 @@ int ndbcluster_table_exists_in_engine(THD* thd, const char *db, const char *name
ndb->setDatabaseName(db);
NDBDICT* dict= ndb->getDictionary();
- dict->set_local_table_data_size(sizeof(Ndb_local_table_statistics));
dict->invalidateTable(name);
if (!(tab= dict->getTable(name)))
{
@@ -6180,7 +6235,6 @@ static bool ndbcluster_init()
DBUG_PRINT("error", ("failed to create global ndb object"));
goto ndbcluster_init_error;
}
- g_ndb->getDictionary()->set_local_table_data_size(sizeof(Ndb_local_table_statistics));
if (g_ndb->init() != 0)
{
ERR_PRINT (g_ndb->getNdbError());
@@ -6475,8 +6529,7 @@ ha_ndbcluster::records_in_range(uint inx, key_range *min_key,
{
// We must provide approx table rows
Uint64 table_rows=0;
- Ndb_local_table_statistics *info=
- (Ndb_local_table_statistics *)m_table_info;
+ Ndb_local_table_statistics *info= m_table_info;
if (info->records != ~(ha_rows)0 && info->records != 0)
{
table_rows = info->records;
@@ -9988,7 +10041,7 @@ bool ha_ndbcluster::get_no_parts(const char *name, uint *no_parts)
// Check if thread has stale local cache
if (tab->getObjectStatus() == NdbDictionary::Object::Invalid)
{
- invalidate_dictionary_cache(FALSE);
+ invalidate_dictionary_cache(FALSE, tab);
if (!(tab= dict->getTable(m_tabname)))
ERR_BREAK(dict->getNdbError(), err);
}
diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h
index b375e30338f..0af65a373bd 100644
--- a/sql/ha_ndbcluster.h
+++ b/sql/ha_ndbcluster.h
@@ -523,11 +523,26 @@ enum THD_NDB_OPTIONS
TNO_NO_LOG_SCHEMA_OP= 1 << 0
};
+struct Ndb_local_table_statistics {
+ int no_uncommitted_rows_count;
+ ulong last_count;
+ ha_rows records;
+};
+
+typedef struct st_thd_ndb_share {
+ const void *key;
+ struct Ndb_local_table_statistics stat;
+} THD_NDB_SHARE;
+
class Thd_ndb
{
public:
Thd_ndb();
~Thd_ndb();
+
+ void init_open_tables();
+ THD_NDB_SHARE *get_open_table(THD *thd, const void *key);
+
Ndb *ndb;
ulong count;
uint lock_count;
@@ -536,6 +551,7 @@ class Thd_ndb
int error;
uint32 options;
List<NDB_SHARE> changed_tables;
+ HASH open_tables;
};
class ha_ndbcluster: public handler
@@ -610,7 +626,8 @@ class ha_ndbcluster: public handler
int rename_table(const char *from, const char *to);
int delete_table(const char *name);
int create(const char *name, TABLE *form, HA_CREATE_INFO *info);
- int create_handler_files(const char *file, HA_CREATE_INFO *info);
+ int create_handler_files(const char *file, const char *old_name,
+ int action_flag, HA_CREATE_INFO *info);
int get_default_no_partitions(ulonglong max_rows);
bool get_no_parts(const char *name, uint *no_parts);
void set_auto_partitions(partition_info *part_info);
@@ -778,7 +795,8 @@ private:
void print_results();
ulonglong get_auto_increment();
- int invalidate_dictionary_cache(bool global);
+ int invalidate_dictionary_cache(bool global,
+ const NdbDictionary::Table *ndbtab);
int ndb_err(NdbTransaction*);
bool uses_blob_value();
@@ -792,7 +810,6 @@ private:
void records_update();
void no_uncommitted_rows_execute_failure();
void no_uncommitted_rows_update(int);
- void no_uncommitted_rows_init(THD *);
void no_uncommitted_rows_reset(THD *);
/*
@@ -816,9 +833,9 @@ private:
NdbTransaction *m_active_trans;
NdbScanOperation *m_active_cursor;
- void *m_table;
+ const NdbDictionary::Table *m_table;
int m_table_version;
- void *m_table_info;
+ struct Ndb_local_table_statistics *m_table_info;
char m_dbname[FN_HEADLEN];
//char m_schemaname[FN_HEADLEN];
char m_tabname[FN_HEADLEN];
@@ -826,6 +843,7 @@ private:
THR_LOCK_DATA m_lock;
NDB_SHARE *m_share;
NDB_INDEX_DATA m_index[MAX_KEY];
+ THD_NDB_SHARE *m_thd_ndb_share;
// NdbRecAttr has no reference to blob
NdbValue m_value[NDB_MAX_ATTRIBUTES_IN_TABLE];
byte m_ref[NDB_HIDDEN_PRIMARY_KEY_LENGTH];
diff --git a/sql/ha_ndbcluster_binlog.cc b/sql/ha_ndbcluster_binlog.cc
index 79e4fc790e0..a39d92ae7a5 100644
--- a/sql/ha_ndbcluster_binlog.cc
+++ b/sql/ha_ndbcluster_binlog.cc
@@ -1068,20 +1068,27 @@ int ndbcluster_log_schema_op(THD *thd, NDB_SHARE *share,
MY_BITMAP schema_subscribers;
uint32 bitbuf[sizeof(ndb_schema_object->slock)/4];
{
- int i;
+ int i, updated= 0;
+ int no_storage_nodes= g_ndb_cluster_connection->no_db_nodes();
bitmap_init(&schema_subscribers, bitbuf, sizeof(bitbuf)*8, false);
bitmap_set_all(&schema_subscribers);
(void) pthread_mutex_lock(&schema_share->mutex);
- for (i= 0; i < ndb_number_of_storage_nodes; i++)
+ for (i= 0; i < no_storage_nodes; i++)
{
MY_BITMAP *table_subscribers= &schema_share->subscriber_bitmap[i];
if (!bitmap_is_clear_all(table_subscribers))
+ {
bitmap_intersect(&schema_subscribers,
table_subscribers);
+ updated= 1;
+ }
}
(void) pthread_mutex_unlock(&schema_share->mutex);
- bitmap_clear_bit(&schema_subscribers, node_id);
-
+ if (updated)
+ bitmap_clear_bit(&schema_subscribers, node_id);
+ else
+ bitmap_clear_all(&schema_subscribers);
+
if (ndb_schema_object)
{
(void) pthread_mutex_lock(&ndb_schema_object->mutex);
@@ -1227,13 +1234,14 @@ end:
{
struct timespec abstime;
int i;
+ int no_storage_nodes= g_ndb_cluster_connection->no_db_nodes();
set_timespec(abstime, 1);
int ret= pthread_cond_timedwait(&injector_cond,
&ndb_schema_object->mutex,
&abstime);
(void) pthread_mutex_lock(&schema_share->mutex);
- for (i= 0; i < ndb_number_of_storage_nodes; i++)
+ for (i= 0; i < no_storage_nodes; i++)
{
/* remove any unsubscribed from schema_subscribers */
MY_BITMAP *tmp= &schema_share->subscriber_bitmap[i];
@@ -1430,6 +1438,10 @@ ndb_handle_schema_change(THD *thd, Ndb *ndb, NdbEventOperation *pOp,
NDB_SHARE *share)
{
DBUG_ENTER("ndb_handle_schema_change");
+ TABLE* table= share->table;
+ TABLE_SHARE *table_share= table->s;
+ const char *dbname= table_share->db.str;
+ const char *tabname= table_share->table_name.str;
bool do_close_cached_tables= FALSE;
bool is_online_alter_table= FALSE;
bool is_rename_table= FALSE;
@@ -1449,70 +1461,68 @@ ndb_handle_schema_change(THD *thd, Ndb *ndb, NdbEventOperation *pOp,
}
}
- if (is_remote_change) /* includes CLUSTER_FAILURE */
+ /*
+ Refresh local dictionary cache by
+ invalidating table and all it's indexes
+ */
+ ndb->setDatabaseName(dbname);
+ Thd_ndb *thd_ndb= get_thd_ndb(thd);
+ DBUG_ASSERT(thd_ndb != NULL);
+ Ndb* old_ndb= thd_ndb->ndb;
+ thd_ndb->ndb= ndb;
+ ha_ndbcluster table_handler(table_share);
+ (void)strxmov(table_handler.m_dbname, dbname, NullS);
+ (void)strxmov(table_handler.m_tabname, tabname, NullS);
+ table_handler.open_indexes(ndb, table, TRUE);
+ table_handler.invalidate_dictionary_cache(TRUE, 0);
+ thd_ndb->ndb= old_ndb;
+
+ /*
+ Refresh local frm file and dictionary cache if
+ remote on-line alter table
+ */
+ if (is_remote_change && is_online_alter_table)
{
- TABLE* table= share->table;
- TABLE_SHARE *table_share= table->s;
- const char *dbname= table_share->db.str;
+ const char *tabname= table_share->table_name.str;
+ char key[FN_REFLEN];
+ const void *data= 0, *pack_data= 0;
+ uint length, pack_length;
+ int error;
+ NDBDICT *dict= ndb->getDictionary();
+ const NDBTAB *altered_table= pOp->getTable();
- /*
- Invalidate table and all it's indexes
+ DBUG_PRINT("info", ("Detected frm change of table %s.%s",
+ dbname, tabname));
+ build_table_filename(key, FN_LEN-1, dbname, tabname, NullS);
+ /*
+ If the frm of the altered table is different than the one on
+ disk then overwrite it with the new table definition
*/
- ndb->setDatabaseName(dbname);
- Thd_ndb *thd_ndb= get_thd_ndb(thd);
- DBUG_ASSERT(thd_ndb != NULL);
- Ndb* old_ndb= thd_ndb->ndb;
- thd_ndb->ndb= ndb;
- ha_ndbcluster table_handler(table_share);
- table_handler.set_dbname(share->key);
- table_handler.set_tabname(share->key);
- table_handler.open_indexes(ndb, table, TRUE);
- table_handler.invalidate_dictionary_cache(TRUE);
- thd_ndb->ndb= old_ndb;
-
- if (is_online_alter_table)
- {
- const char *tabname= table_share->table_name.str;
- char key[FN_REFLEN];
- const void *data= 0, *pack_data= 0;
- uint length, pack_length;
- int error;
- NDBDICT *dict= ndb->getDictionary();
- const NDBTAB *altered_table= pOp->getTable();
-
- DBUG_PRINT("info", ("Detected frm change of table %s.%s",
- dbname, tabname));
- build_table_filename(key, FN_LEN-1, dbname, tabname, NullS);
- /*
- If the frm of the altered table is different than the one on
- disk then overwrite it with the new table definition
- */
- if (readfrm(key, &data, &length) == 0 &&
- packfrm(data, length, &pack_data, &pack_length) == 0 &&
- cmp_frm(altered_table, pack_data, pack_length))
+ if (readfrm(key, &data, &length) == 0 &&
+ packfrm(data, length, &pack_data, &pack_length) == 0 &&
+ cmp_frm(altered_table, pack_data, pack_length))
+ {
+ DBUG_DUMP("frm", (char*)altered_table->getFrmData(),
+ altered_table->getFrmLength());
+ pthread_mutex_lock(&LOCK_open);
+ const NDBTAB *old= dict->getTable(tabname);
+ if (!old &&
+ old->getObjectVersion() != altered_table->getObjectVersion())
+ dict->putTable(altered_table);
+
+ if ((error= unpackfrm(&data, &length, altered_table->getFrmData())) ||
+ (error= writefrm(key, data, length)))
{
- DBUG_DUMP("frm", (char*)altered_table->getFrmData(),
- altered_table->getFrmLength());
- pthread_mutex_lock(&LOCK_open);
- const NDBTAB *old= dict->getTable(tabname);
- if (!old &&
- old->getObjectVersion() != altered_table->getObjectVersion())
- dict->putTable(altered_table);
-
- if ((error= unpackfrm(&data, &length, altered_table->getFrmData())) ||
- (error= writefrm(key, data, length)))
- {
- sql_print_information("NDB: Failed write frm for %s.%s, error %d",
- dbname, tabname, error);
- }
- ndbcluster_binlog_close_table(thd, share);
- close_cached_tables((THD*) 0, 0, (TABLE_LIST*) 0, TRUE);
- if ((error= ndbcluster_binlog_open_table(thd, share,
- table_share, table)))
- sql_print_information("NDB: Failed to re-open table %s.%s",
- dbname, tabname);
- pthread_mutex_unlock(&LOCK_open);
+ sql_print_information("NDB: Failed write frm for %s.%s, error %d",
+ dbname, tabname, error);
}
+ ndbcluster_binlog_close_table(thd, share);
+ close_cached_tables((THD*) 0, 0, (TABLE_LIST*) 0, TRUE);
+ if ((error= ndbcluster_binlog_open_table(thd, share,
+ table_share, table)))
+ sql_print_information("NDB: Failed to re-open table %s.%s",
+ dbname, tabname);
+ pthread_mutex_unlock(&LOCK_open);
}
}
@@ -1540,6 +1550,21 @@ ndb_handle_schema_change(THD *thd, Ndb *ndb, NdbEventOperation *pOp,
share->table->s->db.length= strlen(share->db);
share->table->s->table_name.str= share->table_name;
share->table->s->table_name.length= strlen(share->table_name);
+ /*
+ Refresh local dictionary cache by invalidating any
+ old table with same name and all it's indexes
+ */
+ ndb->setDatabaseName(dbname);
+ Thd_ndb *thd_ndb= get_thd_ndb(thd);
+ DBUG_ASSERT(thd_ndb != NULL);
+ Ndb* old_ndb= thd_ndb->ndb;
+ thd_ndb->ndb= ndb;
+ ha_ndbcluster table_handler(table_share);
+ table_handler.set_dbname(share->key);
+ table_handler.set_tabname(share->key);
+ table_handler.open_indexes(ndb, table, TRUE);
+ table_handler.invalidate_dictionary_cache(TRUE, 0);
+ thd_ndb->ndb= old_ndb;
}
DBUG_ASSERT(share->op == pOp || share->op_old == pOp);
if (share->op_old == pOp)
@@ -3070,6 +3095,9 @@ pthread_handler_t ndb_binlog_thread_func(void *arg)
Thd_ndb *thd_ndb=0;
int ndb_update_binlog_index= 1;
injector *inj= injector::instance();
+#ifdef RUN_NDB_BINLOG_TIMER
+ Timer main_timer;
+#endif
pthread_mutex_lock(&injector_mutex);
/*
@@ -3172,7 +3200,8 @@ pthread_handler_t ndb_binlog_thread_func(void *arg)
thd->proc_info= "Waiting for ndbcluster to start";
pthread_mutex_lock(&injector_mutex);
- while (!schema_share || !apply_status_share)
+ while (!schema_share ||
+ (ndb_binlog_running && !apply_status_share))
{
/* ndb not connected yet */
struct timespec abstime;
@@ -3207,9 +3236,6 @@ pthread_handler_t ndb_binlog_thread_func(void *arg)
thd->db= db;
}
-#ifdef RUN_NDB_BINLOG_TIMER
- Timer main_timer;
-#endif
for ( ; !((abort_loop || do_ndbcluster_binlog_close_connection) &&
ndb_latest_handled_binlog_epoch >= g_latest_trans_gci); )
{
@@ -3290,15 +3316,16 @@ pthread_handler_t ndb_binlog_thread_func(void *arg)
if (res > 0)
{
DBUG_PRINT("info", ("pollEvents res: %d", res));
-#ifdef RUN_NDB_BINLOG_TIMER
- Timer gci_timer, write_timer;
- int event_count= 0;
-#endif
thd->proc_info= "Processing events";
NdbEventOperation *pOp= i_ndb->nextEvent();
Binlog_index_row row;
while (pOp != NULL)
{
+#ifdef RUN_NDB_BINLOG_TIMER
+ Timer gci_timer, write_timer;
+ int event_count= 0;
+ gci_timer.start();
+#endif
gci= pOp->getGCI();
DBUG_PRINT("info", ("Handling gci: %d", (unsigned)gci));
// sometimes get TE_ALTER with invalid table
@@ -3477,6 +3504,7 @@ pthread_handler_t ndb_binlog_thread_func(void *arg)
DBUG_PRINT("info", ("COMMIT gci: %lld", gci));
if (ndb_update_binlog_index)
ndb_add_binlog_index(thd, &row);
+ ndb_latest_applied_binlog_epoch= gci;
}
ndb_latest_handled_binlog_epoch= gci;
#ifdef RUN_NDB_BINLOG_TIMER
diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc
index 3ee9a2954eb..1ab2c4270fd 100644
--- a/sql/ha_partition.cc
+++ b/sql/ha_partition.cc
@@ -404,88 +404,6 @@ int ha_partition::ha_initialise()
MODULE meta data changes
****************************************************************************/
/*
- Create partition names
-
- SYNOPSIS
- create_partition_name()
- out:out Created partition name string
- in1 First part
- in2 Second part
- name_variant Normal, temporary or renamed partition name
-
- RETURN VALUE
- NONE
-
- DESCRIPTION
- This method is used to calculate the partition name, service routine to
- the del_ren_cre_table method.
-*/
-
-#define NORMAL_PART_NAME 0
-#define TEMP_PART_NAME 1
-#define RENAMED_PART_NAME 2
-static void create_partition_name(char *out, const char *in1,
- const char *in2, uint name_variant,
- bool translate)
-{
- char transl_part_name[FN_REFLEN];
- const char *transl_part;
-
- if (translate)
- {
- tablename_to_filename(in2, transl_part_name, FN_REFLEN);
- transl_part= transl_part_name;
- }
- else
- transl_part= in2;
- if (name_variant == NORMAL_PART_NAME)
- strxmov(out, in1, "#P#", transl_part, NullS);
- else if (name_variant == TEMP_PART_NAME)
- strxmov(out, in1, "#P#", transl_part, "#TMP#", NullS);
- else if (name_variant == RENAMED_PART_NAME)
- strxmov(out, in1, "#P#", transl_part, "#REN#", NullS);
-}
-
-/*
- Create subpartition name
-
- SYNOPSIS
- create_subpartition_name()
- out:out Created partition name string
- in1 First part
- in2 Second part
- in3 Third part
- name_variant Normal, temporary or renamed partition name
-
- RETURN VALUE
- NONE
-
- DESCRIPTION
- This method is used to calculate the subpartition name, service routine to
- the del_ren_cre_table method.
-*/
-
-static void create_subpartition_name(char *out, const char *in1,
- const char *in2, const char *in3,
- uint name_variant)
-{
- char transl_part_name[FN_REFLEN], transl_subpart_name[FN_REFLEN];
-
- tablename_to_filename(in2, transl_part_name, FN_REFLEN);
- tablename_to_filename(in3, transl_subpart_name, FN_REFLEN);
- if (name_variant == NORMAL_PART_NAME)
- strxmov(out, in1, "#P#", transl_part_name,
- "#SP#", transl_subpart_name, NullS);
- else if (name_variant == TEMP_PART_NAME)
- strxmov(out, in1, "#P#", transl_part_name,
- "#SP#", transl_subpart_name, "#TMP#", NullS);
- else if (name_variant == RENAMED_PART_NAME)
- strxmov(out, in1, "#P#", transl_part_name,
- "#SP#", transl_subpart_name, "#REN#", NullS);
-}
-
-
-/*
Delete a table
SYNOPSIS
@@ -576,7 +494,9 @@ int ha_partition::rename_table(const char *from, const char *to)
and types of engines in the partitions.
*/
-int ha_partition::create_handler_files(const char *name,
+int ha_partition::create_handler_files(const char *path,
+ const char *old_path,
+ int action_flag,
HA_CREATE_INFO *create_info)
{
DBUG_ENTER("ha_partition::create_handler_files()");
@@ -585,10 +505,29 @@ int ha_partition::create_handler_files(const char *name,
We need to update total number of parts since we might write the handler
file as part of a partition management command
*/
- if (create_handler_file(name))
+ if (action_flag == CHF_DELETE_FLAG ||
+ action_flag == CHF_RENAME_FLAG)
{
- my_error(ER_CANT_CREATE_HANDLER_FILE, MYF(0));
- DBUG_RETURN(1);
+ char name[FN_REFLEN];
+ char old_name[FN_REFLEN];
+
+ strxmov(name, path, ha_par_ext, NullS);
+ strxmov(old_name, old_path, ha_par_ext, NullS);
+ if ((action_flag == CHF_DELETE_FLAG &&
+ my_delete(name, MYF(MY_WME))) ||
+ (action_flag == CHF_RENAME_FLAG &&
+ my_rename(old_name, name, MYF(MY_WME))))
+ {
+ DBUG_RETURN(TRUE);
+ }
+ }
+ else if (action_flag == CHF_CREATE_FLAG)
+ {
+ if (create_handler_file(path))
+ {
+ my_error(ER_CANT_CREATE_HANDLER_FILE, MYF(0));
+ DBUG_RETURN(1);
+ }
}
DBUG_RETURN(0);
}
@@ -654,45 +593,26 @@ int ha_partition::create(const char *name, TABLE *table_arg,
int ha_partition::drop_partitions(const char *path)
{
List_iterator<partition_element> part_it(m_part_info->partitions);
- List_iterator<partition_element> temp_it(m_part_info->temp_partitions);
char part_name_buff[FN_REFLEN];
uint no_parts= m_part_info->partitions.elements;
uint part_count= 0;
uint no_subparts= m_part_info->no_subparts;
uint i= 0;
uint name_variant;
- int error= 1;
- bool reorged_parts= (m_reorged_parts > 0);
- bool temp_partitions= (m_part_info->temp_partitions.elements > 0);
+ int ret_error;
+ int error= 0;
DBUG_ENTER("ha_partition::drop_partitions");
- if (temp_partitions)
- no_parts= m_part_info->temp_partitions.elements;
do
{
- partition_element *part_elem;
- if (temp_partitions)
- {
- /*
- We need to remove the reorganised partitions that were put in the
- temp_partitions-list.
- */
- part_elem= temp_it++;
- DBUG_ASSERT(part_elem->part_state == PART_TO_BE_DROPPED);
- }
- else
- part_elem= part_it++;
- if (part_elem->part_state == PART_TO_BE_DROPPED ||
- part_elem->part_state == PART_IS_CHANGED)
+ partition_element *part_elem= part_it++;
+ if (part_elem->part_state == PART_TO_BE_DROPPED)
{
handler *file;
/*
This part is to be dropped, meaning the part or all its subparts.
*/
name_variant= NORMAL_PART_NAME;
- if (part_elem->part_state == PART_IS_CHANGED ||
- (part_elem->part_state == PART_TO_BE_DROPPED && temp_partitions))
- name_variant= RENAMED_PART_NAME;
if (m_is_sub_partitioned)
{
List_iterator<partition_element> sub_it(part_elem->subpartitions);
@@ -704,12 +624,10 @@ int ha_partition::drop_partitions(const char *path)
create_subpartition_name(part_name_buff, path,
part_elem->partition_name,
sub_elem->partition_name, name_variant);
- if (reorged_parts)
- file= m_reorged_file[part_count++];
- else
- file= m_file[part];
+ file= m_file[part];
DBUG_PRINT("info", ("Drop subpartition %s", part_name_buff));
- error= file->delete_table((const char *) part_name_buff);
+ if ((ret_error= file->delete_table((const char *) part_name_buff)))
+ error= ret_error;
} while (++j < no_subparts);
}
else
@@ -717,12 +635,10 @@ int ha_partition::drop_partitions(const char *path)
create_partition_name(part_name_buff, path,
part_elem->partition_name, name_variant,
TRUE);
- if (reorged_parts)
- file= m_reorged_file[part_count++];
- else
- file= m_file[i];
+ file= m_file[i];
DBUG_PRINT("info", ("Drop partition %s", part_name_buff));
- error= file->delete_table((const char *) part_name_buff);
+ if ((ret_error= file->delete_table((const char *) part_name_buff)))
+ error= ret_error;
}
if (part_elem->part_state == PART_IS_CHANGED)
part_elem->part_state= PART_NORMAL;
@@ -764,7 +680,8 @@ int ha_partition::rename_partitions(const char *path)
uint no_subparts= m_part_info->no_subparts;
uint i= 0;
uint j= 0;
- int error= 1;
+ int error= 0;
+ int ret_error;
uint temp_partitions= m_part_info->temp_partitions.elements;
handler *file;
partition_element *part_elem, *sub_elem;
@@ -772,6 +689,14 @@ int ha_partition::rename_partitions(const char *path)
if (temp_partitions)
{
+ /*
+ These are the reorganised partitions that have already been copied.
+ We delete the partitions and log the delete by inactivating the
+ delete log entry in the table log. We only need to synchronise
+ these writes before moving to the next loop since there is no
+ interaction among reorganised partitions, they cannot have the
+ same name.
+ */
do
{
part_elem= temp_it++;
@@ -782,39 +707,59 @@ int ha_partition::rename_partitions(const char *path)
{
sub_elem= sub_it++;
file= m_reorged_file[part_count++];
- create_subpartition_name(part_name_buff, path,
- part_elem->partition_name,
- sub_elem->partition_name,
- RENAMED_PART_NAME);
create_subpartition_name(norm_name_buff, path,
part_elem->partition_name,
sub_elem->partition_name,
NORMAL_PART_NAME);
- DBUG_PRINT("info", ("Rename subpartition from %s to %s",
- norm_name_buff, part_name_buff));
- error= file->rename_table((const char *) norm_name_buff,
- (const char *) part_name_buff);
+ DBUG_PRINT("info", ("Delete subpartition %s", norm_name_buff));
+ if ((ret_error= file->delete_table((const char *) norm_name_buff)))
+ error= ret_error;
+ else if (deactivate_ddl_log_entry(sub_elem->log_entry->entry_pos))
+ error= 1;
+ else
+ sub_elem->log_entry= NULL; /* Indicate success */
} while (++j < no_subparts);
}
else
{
file= m_reorged_file[part_count++];
- create_partition_name(part_name_buff, path,
- part_elem->partition_name, RENAMED_PART_NAME,
- TRUE);
create_partition_name(norm_name_buff, path,
part_elem->partition_name, NORMAL_PART_NAME,
TRUE);
- DBUG_PRINT("info", ("Rename partition from %s to %s",
- norm_name_buff, part_name_buff));
- error= file->rename_table((const char *) norm_name_buff,
- (const char *) part_name_buff);
+ DBUG_PRINT("info", ("Delete partition %s", norm_name_buff));
+ if ((ret_error= file->delete_table((const char *) norm_name_buff)))
+ error= ret_error;
+ else if (deactivate_ddl_log_entry(part_elem->log_entry->entry_pos))
+ error= 1;
+ else
+ part_elem->log_entry= NULL; /* Indicate success */
}
} while (++i < temp_partitions);
+ VOID(sync_ddl_log());
}
i= 0;
do
{
+ /*
+ When state is PART_IS_CHANGED it means that we have created a new
+ TEMP partition that is to be renamed to normal partition name and
+ we are to delete the old partition with currently the normal name.
+
+ We perform this operation by
+ 1) Delete old partition with normal partition name
+ 2) Signal this in table log entry
+ 3) Synch table log to ensure we have consistency in crashes
+ 4) Rename temporary partition name to normal partition name
+ 5) Signal this to table log entry
+ It is not necessary to synch the last state since a new rename
+ should not corrupt things if there was no temporary partition.
+
+ The only other parts we need to cater for are new parts that
+ replace reorganised parts. The reorganised parts were deleted
+ by the code above that goes through the temp_partitions list.
+ Thus the synch above makes it safe to simply perform step 4 and 5
+ for those entries.
+ */
part_elem= part_it++;
if (part_elem->part_state == PART_IS_CHANGED ||
(part_elem->part_state == PART_IS_ADDED && temp_partitions))
@@ -836,14 +781,12 @@ int ha_partition::rename_partitions(const char *path)
if (part_elem->part_state == PART_IS_CHANGED)
{
file= m_reorged_file[part_count++];
- create_subpartition_name(part_name_buff, path,
- part_elem->partition_name,
- sub_elem->partition_name,
- RENAMED_PART_NAME);
- DBUG_PRINT("info", ("Rename subpartition from %s to %s",
- norm_name_buff, part_name_buff));
- error= file->rename_table((const char *) norm_name_buff,
- (const char *) part_name_buff);
+ DBUG_PRINT("info", ("Delete subpartition %s", norm_name_buff));
+ if ((ret_error= file->delete_table((const char *) norm_name_buff)))
+ error= ret_error;
+ else if (deactivate_ddl_log_entry(sub_elem->log_entry->entry_pos))
+ error= 1;
+ VOID(sync_ddl_log());
}
file= m_new_file[part];
create_subpartition_name(part_name_buff, path,
@@ -852,8 +795,13 @@ int ha_partition::rename_partitions(const char *path)
TEMP_PART_NAME);
DBUG_PRINT("info", ("Rename subpartition from %s to %s",
part_name_buff, norm_name_buff));
- error= file->rename_table((const char *) part_name_buff,
- (const char *) norm_name_buff);
+ if ((ret_error= file->rename_table((const char *) part_name_buff,
+ (const char *) norm_name_buff)))
+ error= ret_error;
+ else if (deactivate_ddl_log_entry(sub_elem->log_entry->entry_pos))
+ error= 1;
+ else
+ sub_elem->log_entry= NULL;
} while (++j < no_subparts);
}
else
@@ -864,13 +812,12 @@ int ha_partition::rename_partitions(const char *path)
if (part_elem->part_state == PART_IS_CHANGED)
{
file= m_reorged_file[part_count++];
- create_partition_name(part_name_buff, path,
- part_elem->partition_name, RENAMED_PART_NAME,
- TRUE);
- DBUG_PRINT("info", ("Rename partition from %s to %s",
- norm_name_buff, part_name_buff));
- error= file->rename_table((const char *) norm_name_buff,
- (const char *) part_name_buff);
+ DBUG_PRINT("info", ("Delete partition %s", norm_name_buff));
+ if ((ret_error= file->delete_table((const char *) norm_name_buff)))
+ error= ret_error;
+ else if (deactivate_ddl_log_entry(part_elem->log_entry->entry_pos))
+ error= 1;
+ VOID(sync_ddl_log());
}
file= m_new_file[i];
create_partition_name(part_name_buff, path,
@@ -878,11 +825,17 @@ int ha_partition::rename_partitions(const char *path)
TRUE);
DBUG_PRINT("info", ("Rename partition from %s to %s",
part_name_buff, norm_name_buff));
- error= file->rename_table((const char *) part_name_buff,
- (const char *) norm_name_buff);
+ if ((ret_error= file->rename_table((const char *) part_name_buff,
+ (const char *) norm_name_buff)))
+ error= ret_error;
+ else if (deactivate_ddl_log_entry(part_elem->log_entry->entry_pos))
+ error= 1;
+ else
+ part_elem->log_entry= NULL;
}
}
} while (++i < no_parts);
+ VOID(sync_ddl_log());
DBUG_RETURN(error);
}
@@ -1204,7 +1157,6 @@ int ha_partition::prepare_new_partition(TABLE *table,
error:
if (create_flag)
VOID(file->delete_table(part_name));
- print_error(error, MYF(0));
DBUG_RETURN(error);
}
@@ -1331,7 +1283,7 @@ int ha_partition::change_partitions(HA_CREATE_INFO *create_info,
(m_reorged_parts + 1))))
{
mem_alloc_error(sizeof(partition_element*)*(m_reorged_parts+1));
- DBUG_RETURN(TRUE);
+ DBUG_RETURN(ER_OUTOFMEMORY);
}
/*
@@ -1363,7 +1315,7 @@ int ha_partition::change_partitions(HA_CREATE_INFO *create_info,
(2*(no_remain_partitions + 1)))))
{
mem_alloc_error(sizeof(handler*)*2*(no_remain_partitions+1));
- DBUG_RETURN(TRUE);
+ DBUG_RETURN(ER_OUTOFMEMORY);
}
m_added_file= &new_file_array[no_remain_partitions + 1];
@@ -1435,7 +1387,7 @@ int ha_partition::change_partitions(HA_CREATE_INFO *create_info,
part_elem->engine_type)))
{
mem_alloc_error(sizeof(handler));
- DBUG_RETURN(TRUE);
+ DBUG_RETURN(ER_OUTOFMEMORY);
}
} while (++j < no_subparts);
}
@@ -1483,7 +1435,7 @@ int ha_partition::change_partitions(HA_CREATE_INFO *create_info,
(const char *)part_name_buff)))
{
cleanup_new_partition(part_count);
- DBUG_RETURN(TRUE);
+ DBUG_RETURN(error);
}
m_added_file[part_count++]= new_file_array[part];
} while (++j < no_subparts);
@@ -1499,7 +1451,7 @@ int ha_partition::change_partitions(HA_CREATE_INFO *create_info,
(const char *)part_name_buff)))
{
cleanup_new_partition(part_count);
- DBUG_RETURN(TRUE);
+ DBUG_RETURN(error);
}
m_added_file[part_count++]= new_file_array[i];
}
@@ -1605,8 +1557,7 @@ int ha_partition::copy_partitions(ulonglong *copied, ulonglong *deleted)
}
DBUG_RETURN(FALSE);
error:
- print_error(result, MYF(0));
- DBUG_RETURN(TRUE);
+ DBUG_RETURN(result);
}
@@ -1873,8 +1824,8 @@ bool ha_partition::create_handler_file(const char *name)
{
part_elem= part_it++;
if (part_elem->part_state != PART_NORMAL &&
- part_elem->part_state != PART_IS_ADDED &&
- part_elem->part_state != PART_IS_CHANGED)
+ part_elem->part_state != PART_TO_BE_ADDED &&
+ part_elem->part_state != PART_CHANGED)
continue;
tablename_to_filename(part_elem->partition_name, part_name,
FN_REFLEN);
@@ -1925,8 +1876,8 @@ bool ha_partition::create_handler_file(const char *name)
{
part_elem= part_it++;
if (part_elem->part_state != PART_NORMAL &&
- part_elem->part_state != PART_IS_ADDED &&
- part_elem->part_state != PART_IS_CHANGED)
+ part_elem->part_state != PART_TO_BE_ADDED &&
+ part_elem->part_state != PART_CHANGED)
continue;
if (!m_is_sub_partitioned)
{
diff --git a/sql/ha_partition.h b/sql/ha_partition.h
index ecaa7e1e8fa..b31b9af28a3 100644
--- a/sql/ha_partition.h
+++ b/sql/ha_partition.h
@@ -179,7 +179,8 @@ public:
virtual int rename_table(const char *from, const char *to);
virtual int create(const char *name, TABLE *form,
HA_CREATE_INFO *create_info);
- virtual int create_handler_files(const char *name,
+ virtual int create_handler_files(const char *name,
+ const char *old_name, int action_flag,
HA_CREATE_INFO *create_info);
virtual void update_create_info(HA_CREATE_INFO *create_info);
virtual char *update_table_comment(const char *comment);
diff --git a/sql/handler.h b/sql/handler.h
index e93fdfe67e3..c7c3aa54c3b 100644
--- a/sql/handler.h
+++ b/sql/handler.h
@@ -632,6 +632,7 @@ typedef struct {
#define UNDEF_NODEGROUP 65535
class Item;
+struct st_table_log_memory_entry;
class partition_info;
@@ -639,7 +640,6 @@ struct st_partition_iter;
#define NOT_A_PARTITION_ID ((uint32)-1)
-
typedef struct st_ha_create_information
{
CHARSET_INFO *table_charset, *default_table_charset;
@@ -1379,8 +1379,15 @@ public:
virtual void drop_table(const char *name);
virtual int create(const char *name, TABLE *form, HA_CREATE_INFO *info)=0;
- virtual int create_handler_files(const char *name, HA_CREATE_INFO *info)
- { return FALSE;}
+
+#define CHF_CREATE_FLAG 0
+#define CHF_DELETE_FLAG 1
+#define CHF_RENAME_FLAG 2
+#define CHF_INDEX_FLAG 3
+
+ virtual int create_handler_files(const char *name, const char *old_name,
+ int action_flag, HA_CREATE_INFO *info)
+ { return FALSE; }
virtual int change_partitions(HA_CREATE_INFO *create_info,
const char *path,
diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc
index 493c3dbc60e..acee912c912 100644
--- a/sql/item_cmpfunc.cc
+++ b/sql/item_cmpfunc.cc
@@ -52,7 +52,6 @@ static void agg_cmp_type(THD *thd, Item_result *type, Item **items, uint nitems)
{
uint i;
Field *field= NULL;
- bool all_constant= TRUE;
/* If the first argument is a FIELD_ITEM, pull out the field. */
if (items[0]->real_item()->type() == Item::FIELD_ITEM)
@@ -65,16 +64,9 @@ static void agg_cmp_type(THD *thd, Item_result *type, Item **items, uint nitems)
for (i= 1; i < nitems; i++)
{
type[0]= item_cmp_type(type[0], items[i]->result_type());
- if (field && !convert_constant_item(thd, field, &items[i]))
- all_constant= FALSE;
+ if (field && convert_constant_item(thd, field, &items[i]))
+ type[0]= INT_RESULT;
}
-
- /*
- If we had a field that can be compared as a longlong, and all constant
- items, then the aggregate result will be an INT_RESULT.
- */
- if (field && all_constant)
- type[0]= INT_RESULT;
}
diff --git a/sql/item_row.cc b/sql/item_row.cc
index 75c3f8a2922..f5c8d511025 100644
--- a/sql/item_row.cc
+++ b/sql/item_row.cc
@@ -26,7 +26,7 @@
*/
Item_row::Item_row(List<Item> &arg):
- Item(), used_tables_cache(0), array_holder(1), const_item_cache(1), with_null(0)
+ Item(), used_tables_cache(0), const_item_cache(1), with_null(0)
{
//TODO: think placing 2-3 component items in item (as it done for function)
@@ -85,6 +85,20 @@ bool Item_row::fix_fields(THD *thd, Item **ref)
}
+void Item_row::cleanup()
+{
+ DBUG_ENTER("Item_row::cleanup");
+
+ Item::cleanup();
+ /* Reset to the original values */
+ used_tables_cache= 0;
+ const_item_cache= 1;
+ with_null= 0;
+
+ DBUG_VOID_RETURN;
+}
+
+
void Item_row::split_sum_func(THD *thd, Item **ref_pointer_array,
List<Item> &fields)
{
diff --git a/sql/item_row.h b/sql/item_row.h
index 6fbe7436b72..d6dd4371372 100644
--- a/sql/item_row.h
+++ b/sql/item_row.h
@@ -19,7 +19,6 @@ class Item_row: public Item
Item **items;
table_map used_tables_cache;
uint arg_count;
- bool array_holder;
bool const_item_cache;
bool with_null;
public:
@@ -29,7 +28,6 @@ public:
items(item->items),
used_tables_cache(item->used_tables_cache),
arg_count(item->arg_count),
- array_holder(0),
const_item_cache(item->const_item_cache),
with_null(0)
{}
@@ -62,6 +60,7 @@ public:
return 0;
};
bool fix_fields(THD *thd, Item **ref);
+ void cleanup();
void split_sum_func(THD *thd, Item **ref_pointer_array, List<Item> &fields);
table_map used_tables() const { return used_tables_cache; };
bool const_item() const { return const_item_cache; };
diff --git a/sql/log_event.cc b/sql/log_event.cc
index 8a39b1fc4eb..d51a0ef4c9f 100644
--- a/sql/log_event.cc
+++ b/sql/log_event.cc
@@ -6579,18 +6579,6 @@ int Delete_rows_log_event::do_before_row_operations(TABLE *table)
if (!m_memory)
return HA_ERR_OUT_OF_MEM;
- if (table->s->keys > 0)
- {
- /* We have a key: search the table using the index */
- if (!table->file->inited)
- error= table->file->ha_index_init(0, FALSE);
- }
- else
- {
- /* We doesn't have a key: search the table using rnd_next() */
- error= table->file->ha_rnd_init(1);
- }
-
return error;
}
@@ -6638,6 +6626,20 @@ int Delete_rows_log_event::do_exec_row(TABLE *table)
{
DBUG_ASSERT(table != NULL);
+ if (table->s->keys > 0)
+ {
+ /* We have a key: search the table using the index */
+ if (!table->file->inited)
+ if (int error= table->file->ha_index_init(0, FALSE))
+ return error;
+ }
+ else
+ {
+ /* We doesn't have a key: search the table using rnd_next() */
+ if (int error= table->file->ha_rnd_init(1))
+ return error;
+ }
+
int error= find_and_fetch_row(table, m_key);
if (error)
return error;
@@ -6649,6 +6651,11 @@ int Delete_rows_log_event::do_exec_row(TABLE *table)
*/
error= table->file->ha_delete_row(table->record[0]);
+ /*
+ Have to restart the scan to be able to fetch the next row.
+ */
+ table->file->ha_index_or_rnd_end();
+
return error;
}
diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h
index c471b11fee2..69b193f94ea 100644
--- a/sql/mysql_priv.h
+++ b/sql/mysql_priv.h
@@ -613,6 +613,100 @@ struct Query_cache_query_flags
#define query_cache_invalidate_by_MyISAM_filename_ref NULL
#endif /*HAVE_QUERY_CACHE*/
+/*
+ Error injector Macros to enable easy testing of recovery after failures
+ in various error cases.
+*/
+#ifndef ERROR_INJECT_SUPPORT
+
+#define ERROR_INJECT(x) 0
+#define ERROR_INJECT_ACTION(x,action) 0
+#define ERROR_INJECT_CRASH(x) 0
+#define ERROR_INJECT_VALUE(x) 0
+#define ERROR_INJECT_VALUE_ACTION(x,action) 0
+#define ERROR_INJECT_VALUE_CRASH(x) 0
+#define SET_ERROR_INJECT_VALUE(x)
+
+#else
+
+inline bool check_and_unset_keyword(const char *dbug_str)
+{
+ const char *extra_str= "-d,";
+ char total_str[200];
+ if (_db_strict_keyword_ (dbug_str))
+ {
+ strxmov(total_str, extra_str, dbug_str, NullS);
+ DBUG_SET(total_str);
+ return 1;
+ }
+ return 0;
+}
+
+
+inline bool
+check_and_unset_inject_value(int value)
+{
+ THD *thd= current_thd;
+ if (thd->error_inject_value == (uint)value)
+ {
+ thd->error_inject_value= 0;
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ ERROR INJECT MODULE:
+ --------------------
+ These macros are used to insert macros from the application code.
+ The event that activates those error injections can be activated
+ from SQL by using:
+ SET SESSION dbug=+d,code;
+
+ After the error has been injected, the macros will automatically
+ remove the debug code, thus similar to using:
+ SET SESSION dbug=-d,code
+ from SQL.
+
+ ERROR_INJECT_CRASH will inject a crash of the MySQL Server if code
+ is set when macro is called. ERROR_INJECT_CRASH can be used in
+ if-statements, it will always return FALSE unless of course it
+ crashes in which case it doesn't return at all.
+
+ ERROR_INJECT_ACTION will inject the action specified in the action
+ parameter of the macro, before performing the action the code will
+ be removed such that no more events occur. ERROR_INJECT_ACTION
+ can also be used in if-statements and always returns FALSE.
+ ERROR_INJECT can be used in a normal if-statement, where the action
+ part is performed in the if-block. The macro returns TRUE if the
+ error was activated and otherwise returns FALSE. If activated the
+ code is removed.
+
+ Sometimes it is necessary to perform error inject actions as a serie
+ of events. In this case one can use one variable on the THD object.
+ Thus one sets this value by using e.g. SET_ERROR_INJECT_VALUE(100).
+ Then one can later test for it by using ERROR_INJECT_CRASH_VALUE,
+ ERROR_INJECT_ACTION_VALUE and ERROR_INJECT_VALUE. This have the same
+ behaviour as the above described macros except that they use the
+ error inject value instead of a code used by DBUG macros.
+*/
+#define SET_ERROR_INJECT_VALUE(x) \
+ current_thd->error_inject_value= (x)
+#define ERROR_INJECT_CRASH(code) \
+ DBUG_EVALUATE_IF(code, (abort(), 0), 0)
+#define ERROR_INJECT_ACTION(code, action) \
+ (check_and_unset_keyword(code) ? ((action), 0) : 0)
+#define ERROR_INJECT(code) \
+ check_and_unset_keyword(code)
+#define ERROR_INJECT_VALUE(value) \
+ check_and_unset_inject_value(value)
+#define ERROR_INJECT_VALUE_ACTION(value,action) \
+ (check_and_unset_inject_value(value) ? (action) : 0)
+#define ERROR_INJECT_VALUE_CRASH(value) \
+ ERROR_INJECT_VALUE_ACTION(value, (abort(), 0))
+
+#endif
+
uint build_table_path(char *buff, size_t bufflen, const char *db,
const char *table, const char *ext);
void write_bin_log(THD *thd, bool clear_error,
@@ -1090,6 +1184,16 @@ uint prep_alter_part_table(THD *thd, TABLE *table, ALTER_INFO *alter_info,
bool remove_table_from_cache(THD *thd, const char *db, const char *table,
uint flags);
+#define NORMAL_PART_NAME 0
+#define TEMP_PART_NAME 1
+#define RENAMED_PART_NAME 2
+void create_partition_name(char *out, const char *in1,
+ const char *in2, uint name_variant,
+ bool translate);
+void create_subpartition_name(char *out, const char *in1,
+ const char *in2, const char *in3,
+ uint name_variant);
+
typedef struct st_lock_param_type
{
ulonglong copied;
@@ -1109,14 +1213,94 @@ typedef struct st_lock_param_type
uint key_count;
uint db_options;
uint pack_frm_len;
+ partition_info *part_info;
} ALTER_PARTITION_PARAM_TYPE;
void mem_alloc_error(size_t size);
-#define WFRM_INITIAL_WRITE 1
-#define WFRM_CREATE_HANDLER_FILES 2
+
+enum ddl_log_entry_code
+{
+ /*
+ DDL_LOG_EXECUTE_CODE:
+ This is a code that indicates that this is a log entry to
+ be executed, from this entry a linked list of log entries
+ can be found and executed.
+ DDL_LOG_ENTRY_CODE:
+ An entry to be executed in a linked list from an execute log
+ entry.
+ DDL_IGNORE_LOG_ENTRY_CODE:
+ An entry that is to be ignored
+ */
+ DDL_LOG_EXECUTE_CODE = 'e',
+ DDL_LOG_ENTRY_CODE = 'l',
+ DDL_IGNORE_LOG_ENTRY_CODE = 'i'
+};
+
+enum ddl_log_action_code
+{
+ /*
+ The type of action that a DDL_LOG_ENTRY_CODE entry is to
+ perform.
+ DDL_LOG_DELETE_ACTION:
+ Delete an entity
+ DDL_LOG_RENAME_ACTION:
+ Rename an entity
+ DDL_LOG_REPLACE_ACTION:
+ Rename an entity after removing the previous entry with the
+ new name, that is replace this entry.
+ */
+ DDL_LOG_DELETE_ACTION = 'd',
+ DDL_LOG_RENAME_ACTION = 'r',
+ DDL_LOG_REPLACE_ACTION = 's'
+};
+
+
+typedef struct st_ddl_log_entry
+{
+ const char *name;
+ const char *from_name;
+ const char *handler_name;
+ uint next_entry;
+ uint entry_pos;
+ enum ddl_log_entry_code entry_type;
+ enum ddl_log_action_code action_type;
+ /*
+ Most actions have only one phase. REPLACE does however have two
+ phases. The first phase removes the file with the new name if
+ there was one there before and the second phase renames the
+ old name to the new name.
+ */
+ char phase;
+} DDL_LOG_ENTRY;
+
+typedef struct st_ddl_log_memory_entry
+{
+ uint entry_pos;
+ struct st_ddl_log_memory_entry *next_log_entry;
+ struct st_ddl_log_memory_entry *prev_log_entry;
+ struct st_ddl_log_memory_entry *next_active_log_entry;
+} DDL_LOG_MEMORY_ENTRY;
+
+
+bool write_ddl_log_entry(DDL_LOG_ENTRY *ddl_log_entry,
+ DDL_LOG_MEMORY_ENTRY **active_entry);
+bool write_execute_ddl_log_entry(uint first_entry,
+ bool complete,
+ DDL_LOG_MEMORY_ENTRY **active_entry);
+bool deactivate_ddl_log_entry(uint entry_no);
+void release_ddl_log_memory_entry(DDL_LOG_MEMORY_ENTRY *log_entry);
+bool sync_ddl_log();
+void release_ddl_log();
+void execute_ddl_log_recovery();
+bool execute_ddl_log_entry(THD *thd, uint first_entry);
+
+extern pthread_mutex_t LOCK_gdl;
+
+#define WFRM_WRITE_SHADOW 1
+#define WFRM_INSTALL_SHADOW 2
#define WFRM_PACK_FRM 4
bool mysql_write_frm(ALTER_PARTITION_PARAM_TYPE *lpt, uint flags);
-bool abort_and_upgrade_lock(ALTER_PARTITION_PARAM_TYPE *lpt);
+int abort_and_upgrade_lock(ALTER_PARTITION_PARAM_TYPE *lpt);
void close_open_tables_and_downgrade(ALTER_PARTITION_PARAM_TYPE *lpt);
void mysql_wait_completed_table(ALTER_PARTITION_PARAM_TYPE *lpt, TABLE *my_table);
@@ -1296,6 +1480,7 @@ extern ulong slave_net_timeout, slave_trans_retries;
extern uint max_user_connections;
extern ulong what_to_log,flush_time;
extern ulong query_buff_size, thread_stack;
+extern ulong max_prepared_stmt_count, prepared_stmt_count;
extern ulong binlog_cache_size, max_binlog_cache_size, open_files_limit;
extern ulong max_binlog_size, max_relay_log_size;
#ifdef HAVE_ROW_BASED_REPLICATION
@@ -1350,6 +1535,7 @@ extern pthread_mutex_t LOCK_mysql_create_db,LOCK_Acl,LOCK_open, LOCK_lock_db,
LOCK_delayed_status, LOCK_delayed_create, LOCK_crypt, LOCK_timezone,
LOCK_slave_list, LOCK_active_mi, LOCK_manager, LOCK_global_read_lock,
LOCK_global_system_variables, LOCK_user_conn,
+ LOCK_prepared_stmt_count,
LOCK_bytes_sent, LOCK_bytes_received;
#ifdef HAVE_OPENSSL
extern pthread_mutex_t LOCK_des_key_file;
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index 541cc13aaea..fd640fb153c 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -500,6 +500,22 @@ ulong specialflag=0;
ulong binlog_cache_use= 0, binlog_cache_disk_use= 0;
ulong max_connections, max_connect_errors;
uint max_user_connections= 0;
+/*
+ Limit of the total number of prepared statements in the server.
+ Is necessary to protect the server against out-of-memory attacks.
+*/
+ulong max_prepared_stmt_count;
+/*
+ Current total number of prepared statements in the server. This number
+ is exact, and therefore may not be equal to the difference between
+ `com_stmt_prepare' and `com_stmt_close' (global status variables), as
+ the latter ones account for all registered attempts to prepare
+ a statement (including unsuccessful ones). Prepared statements are
+ currently connection-local: if the same SQL query text is prepared in
+ two different connections, this counts as two distinct prepared
+ statements.
+*/
+ulong prepared_stmt_count=0;
ulong thread_id=1L,current_pid;
ulong slow_launch_threads = 0, sync_binlog_period;
ulong expire_logs_days = 0;
@@ -577,6 +593,14 @@ pthread_mutex_t LOCK_mysql_create_db, LOCK_Acl, LOCK_open, LOCK_thread_count,
LOCK_crypt, LOCK_bytes_sent, LOCK_bytes_received,
LOCK_global_system_variables,
LOCK_user_conn, LOCK_slave_list, LOCK_active_mi;
+/*
+ The below lock protects access to two global server variables:
+ max_prepared_stmt_count and prepared_stmt_count. These variables
+ set the limit and hold the current total number of prepared statements
+ in the server, respectively. As PREPARE/DEALLOCATE rate in a loaded
+ server may be fairly high, we need a dedicated lock.
+*/
+pthread_mutex_t LOCK_prepared_stmt_count;
#ifdef HAVE_OPENSSL
pthread_mutex_t LOCK_des_key_file;
#endif
@@ -1288,6 +1312,7 @@ static void clean_up_mutexes()
(void) pthread_mutex_destroy(&LOCK_global_system_variables);
(void) pthread_mutex_destroy(&LOCK_global_read_lock);
(void) pthread_mutex_destroy(&LOCK_uuid_generator);
+ (void) pthread_mutex_destroy(&LOCK_prepared_stmt_count);
(void) pthread_cond_destroy(&COND_thread_count);
(void) pthread_cond_destroy(&COND_refresh);
(void) pthread_cond_destroy(&COND_thread_cache);
@@ -2810,6 +2835,7 @@ static int init_thread_environment()
(void) pthread_mutex_init(&LOCK_active_mi, MY_MUTEX_INIT_FAST);
(void) pthread_mutex_init(&LOCK_global_system_variables, MY_MUTEX_INIT_FAST);
(void) pthread_mutex_init(&LOCK_global_read_lock, MY_MUTEX_INIT_FAST);
+ (void) pthread_mutex_init(&LOCK_prepared_stmt_count, MY_MUTEX_INIT_FAST);
(void) pthread_mutex_init(&LOCK_uuid_generator, MY_MUTEX_INIT_FAST);
#ifdef HAVE_OPENSSL
(void) pthread_mutex_init(&LOCK_des_key_file,MY_MUTEX_INIT_FAST);
@@ -3579,6 +3605,7 @@ we force server id to 2, but this MySQL server will not act as a slave.");
unireg_abort(1);
}
}
+ execute_ddl_log_recovery();
create_shutdown_thread();
create_maintenance_thread();
@@ -3630,6 +3657,7 @@ we force server id to 2, but this MySQL server will not act as a slave.");
pthread_cond_wait(&COND_thread_count,&LOCK_thread_count);
(void) pthread_mutex_unlock(&LOCK_thread_count);
+ release_ddl_log();
#if defined(__WIN__) && !defined(EMBEDDED_LIBRARY)
if (Service.IsNT() && start_mode)
Service.Stop();
@@ -4634,7 +4662,8 @@ enum options_mysqld
OPT_MAX_BINLOG_CACHE_SIZE, OPT_MAX_BINLOG_SIZE,
OPT_MAX_CONNECTIONS, OPT_MAX_CONNECT_ERRORS,
OPT_MAX_DELAYED_THREADS, OPT_MAX_HEP_TABLE_SIZE,
- OPT_MAX_JOIN_SIZE, OPT_MAX_RELAY_LOG_SIZE, OPT_MAX_SORT_LENGTH,
+ OPT_MAX_JOIN_SIZE, OPT_MAX_PREPARED_STMT_COUNT,
+ OPT_MAX_RELAY_LOG_SIZE, OPT_MAX_SORT_LENGTH,
OPT_MAX_SEEKS_FOR_KEY, OPT_MAX_TMP_TABLES, OPT_MAX_USER_CONNECTIONS,
OPT_MAX_LENGTH_FOR_SORT_DATA,
OPT_MAX_WRITE_LOCK_COUNT, OPT_BULK_INSERT_BUFFER_SIZE,
@@ -5890,6 +5919,10 @@ The minimum value for this variable is 4096.",
(gptr*) &global_system_variables.max_length_for_sort_data,
(gptr*) &max_system_variables.max_length_for_sort_data, 0, GET_ULONG,
REQUIRED_ARG, 1024, 4, 8192*1024L, 0, 1, 0},
+ {"max_prepared_stmt_count", OPT_MAX_PREPARED_STMT_COUNT,
+ "Maximum numbrer of prepared statements in the server.",
+ (gptr*) &max_prepared_stmt_count, (gptr*) &max_prepared_stmt_count,
+ 0, GET_ULONG, REQUIRED_ARG, 16382, 0, 1*1024*1024, 0, 1, 0},
{"max_relay_log_size", OPT_MAX_RELAY_LOG_SIZE,
"If non-zero: relay log will be rotated automatically when the size exceeds this value; if zero (the default): when the size exceeds max_binlog_size. 0 excepted, the minimum value for this variable is 4096.",
(gptr*) &max_relay_log_size, (gptr*) &max_relay_log_size, 0, GET_ULONG,
@@ -6129,23 +6162,6 @@ The minimum value for this variable is 4096.",
{"sync-frm", OPT_SYNC_FRM, "Sync .frm to disk on create. Enabled by default.",
(gptr*) &opt_sync_frm, (gptr*) &opt_sync_frm, 0, GET_BOOL, NO_ARG, 1, 0,
0, 0, 0, 0},
-#ifdef HAVE_REPLICATION
- {"sync-replication", OPT_SYNC_REPLICATION,
- "Enable synchronous replication.",
- (gptr*) &global_system_variables.sync_replication,
- (gptr*) &global_system_variables.sync_replication,
- 0, GET_ULONG, REQUIRED_ARG, 0, 0, 1, 0, 1, 0},
- {"sync-replication-slave-id", OPT_SYNC_REPLICATION_SLAVE_ID,
- "Synchronous replication is wished for this slave.",
- (gptr*) &global_system_variables.sync_replication_slave_id,
- (gptr*) &global_system_variables.sync_replication_slave_id,
- 0, GET_ULONG, REQUIRED_ARG, 0, 0, ~0L, 0, 1, 0},
- {"sync-replication-timeout", OPT_SYNC_REPLICATION_TIMEOUT,
- "Synchronous replication timeout.",
- (gptr*) &global_system_variables.sync_replication_timeout,
- (gptr*) &global_system_variables.sync_replication_timeout,
- 0, GET_ULONG, REQUIRED_ARG, 10, 0, ~0L, 0, 1, 0},
-#endif /* HAVE_REPLICATION */
{"table_cache", OPT_TABLE_OPEN_CACHE,
"Deprecated; use --table_open_cache instead.",
(gptr*) &table_cache_size, (gptr*) &table_cache_size, 0, GET_ULONG,
diff --git a/sql/opt_range.cc b/sql/opt_range.cc
index 42f723eb382..3fddd780171 100644
--- a/sql/opt_range.cc
+++ b/sql/opt_range.cc
@@ -4370,7 +4370,7 @@ TRP_ROR_INTERSECT *get_best_covering_ror_intersect(PARAM *param,
sizeof(ROR_SCAN_INFO*)*
best_num)))
DBUG_RETURN(NULL);
- memcpy(trp->first_scan, ror_scan_mark, best_num*sizeof(ROR_SCAN_INFO*));
+ memcpy(trp->first_scan, tree->ror_scans, best_num*sizeof(ROR_SCAN_INFO*));
trp->last_scan= trp->first_scan + best_num;
trp->is_covering= TRUE;
trp->read_cost= total_cost;
diff --git a/sql/partition_element.h b/sql/partition_element.h
index d20715d2408..13693934c0f 100644
--- a/sql/partition_element.h
+++ b/sql/partition_element.h
@@ -36,6 +36,8 @@ enum partition_state {
PART_IS_ADDED= 8
};
+struct st_ddl_log_memory_entry;
+
class partition_element :public Sql_alloc {
public:
List<partition_element> subpartitions;
@@ -44,6 +46,7 @@ public:
ulonglong part_min_rows;
char *partition_name;
char *tablespace_name;
+ struct st_ddl_log_memory_entry *log_entry;
longlong range_value;
char* part_comment;
char* data_file_name;
@@ -55,7 +58,8 @@ public:
partition_element()
: part_max_rows(0), part_min_rows(0), partition_name(NULL),
- tablespace_name(NULL), range_value(0), part_comment(NULL),
+ tablespace_name(NULL), log_entry(NULL),
+ range_value(0), part_comment(NULL),
data_file_name(NULL), index_file_name(NULL),
engine_type(NULL),part_state(PART_NORMAL),
nodegroup_id(UNDEF_NODEGROUP), has_null_value(FALSE)
diff --git a/sql/partition_info.cc b/sql/partition_info.cc
index e2bf37d6ef3..dfc5dd2989b 100644
--- a/sql/partition_info.cc
+++ b/sql/partition_info.cc
@@ -88,10 +88,10 @@ partition_info *partition_info::get_clone()
The external routine needing this code is check_partition_info
*/
-#define MAX_PART_NAME_SIZE 16
+#define MAX_PART_NAME_SIZE 8
char *partition_info::create_default_partition_names(uint part_no, uint no_parts,
- uint start_no, bool is_subpart)
+ uint start_no)
{
char *ptr= sql_calloc(no_parts*MAX_PART_NAME_SIZE);
char *move_ptr= ptr;
@@ -102,10 +102,7 @@ char *partition_info::create_default_partition_names(uint part_no, uint no_parts
{
do
{
- if (is_subpart)
- my_sprintf(move_ptr, (move_ptr,"p%usp%u", part_no, (start_no + i)));
- else
- my_sprintf(move_ptr, (move_ptr,"p%u", (start_no + i)));
+ my_sprintf(move_ptr, (move_ptr,"p%u", (start_no + i)));
move_ptr+=MAX_PART_NAME_SIZE;
} while (++i < no_parts);
}
@@ -118,6 +115,36 @@ char *partition_info::create_default_partition_names(uint part_no, uint no_parts
/*
+ Create a unique name for the subpartition as part_name'sp''subpart_no'
+ SYNOPSIS
+ create_subpartition_name()
+ subpart_no Number of subpartition
+ part_name Name of partition
+ RETURN VALUES
+ >0 A reference to the created name string
+ 0 Memory allocation error
+*/
+
+char *partition_info::create_subpartition_name(uint subpart_no,
+ const char *part_name)
+{
+ uint size_alloc= strlen(part_name) + MAX_PART_NAME_SIZE;
+ char *ptr= sql_calloc(size_alloc);
+ DBUG_ENTER("create_subpartition_name");
+
+ if (likely(ptr != NULL))
+ {
+ my_sprintf(ptr, (ptr, "%ssp%u", part_name, subpart_no));
+ }
+ else
+ {
+ mem_alloc_error(size_alloc);
+ }
+ DBUG_RETURN(ptr);
+}
+
+
+/*
Set up all the default partitions not set-up by the user in the SQL
statement. Also perform a number of checks that the user hasn't tried
to use default values where no defaults exists.
@@ -167,8 +194,7 @@ bool partition_info::set_up_default_partitions(handler *file, ulonglong max_rows
goto end;
}
if (unlikely((!(default_name= create_default_partition_names(0, no_parts,
- start_no,
- FALSE)))))
+ start_no)))))
goto end;
i= 0;
do
@@ -238,18 +264,17 @@ bool partition_info::set_up_default_subpartitions(handler *file,
{
part_elem= part_it++;
j= 0;
- name_ptr= create_default_partition_names(i, no_subparts, (uint)0, TRUE);
- if (unlikely(!name_ptr))
- goto end;
do
{
partition_element *subpart_elem= new partition_element();
if (likely(subpart_elem != 0 &&
(!part_elem->subpartitions.push_back(subpart_elem))))
{
+ char *ptr= create_subpartition_name(j, part_elem->partition_name);
+ if (!ptr)
+ goto end;
subpart_elem->engine_type= default_engine_type;
- subpart_elem->partition_name= name_ptr;
- name_ptr+= MAX_PART_NAME_SIZE;
+ subpart_elem->partition_name= ptr;
}
else
{
diff --git a/sql/partition_info.h b/sql/partition_info.h
index 664c8834b0b..3a1e6be4050 100644
--- a/sql/partition_info.h
+++ b/sql/partition_info.h
@@ -28,7 +28,7 @@ typedef int (*get_part_id_func)(partition_info *part_info,
longlong *func_value);
typedef uint32 (*get_subpart_id_func)(partition_info *part_info);
-
+struct st_ddl_log_memory_entry;
class partition_info : public Sql_alloc
{
@@ -76,7 +76,11 @@ public:
Item *subpart_expr;
Item *item_free_list;
-
+
+ struct st_ddl_log_memory_entry *first_log_entry;
+ struct st_ddl_log_memory_entry *exec_log_entry;
+ struct st_ddl_log_memory_entry *frm_log_entry;
+
/*
A bitmap of partitions used by the current query.
Usage pattern:
@@ -191,6 +195,7 @@ public:
part_field_array(NULL), subpart_field_array(NULL),
full_part_field_array(NULL),
part_expr(NULL), subpart_expr(NULL), item_free_list(NULL),
+ first_log_entry(NULL), exec_log_entry(NULL), frm_log_entry(NULL),
list_array(NULL),
part_info_string(NULL),
part_func_string(NULL), subpart_func_string(NULL),
@@ -256,7 +261,8 @@ private:
uint start_no);
bool set_up_default_subpartitions(handler *file, ulonglong max_rows);
char *create_default_partition_names(uint part_no, uint no_parts,
- uint start_no, bool is_subpart);
+ uint start_no);
+ char *create_subpartition_name(uint subpart_no, const char *part_name);
bool has_unique_name(partition_element *element);
};
diff --git a/sql/set_var.cc b/sql/set_var.cc
index f2694f651f4..16a0c752639 100644
--- a/sql/set_var.cc
+++ b/sql/set_var.cc
@@ -161,6 +161,7 @@ static KEY_CACHE *create_key_cache(const char *name, uint length);
void fix_sql_mode_var(THD *thd, enum_var_type type);
static byte *get_error_count(THD *thd);
static byte *get_warning_count(THD *thd);
+static byte *get_prepared_stmt_count(THD *thd);
/*
Variable definition list
@@ -311,6 +312,10 @@ sys_var_thd_ha_rows sys_sql_max_join_size("sql_max_join_size",
&SV::max_join_size,
fix_max_join_size);
#endif
+static sys_var_long_ptr_global
+sys_max_prepared_stmt_count("max_prepared_stmt_count",
+ &max_prepared_stmt_count,
+ &LOCK_prepared_stmt_count);
sys_var_long_ptr sys_max_relay_log_size("max_relay_log_size",
&max_relay_log_size,
fix_max_relay_log_size);
@@ -431,14 +436,6 @@ sys_var_thd_storage_engine sys_storage_engine("storage_engine",
&SV::table_type);
#ifdef HAVE_REPLICATION
sys_var_sync_binlog_period sys_sync_binlog_period("sync_binlog", &sync_binlog_period);
-sys_var_thd_ulong sys_sync_replication("sync_replication",
- &SV::sync_replication);
-sys_var_thd_ulong sys_sync_replication_slave_id(
- "sync_replication_slave_id",
- &SV::sync_replication_slave_id);
-sys_var_thd_ulong sys_sync_replication_timeout(
- "sync_replication_timeout",
- &SV::sync_replication_timeout);
#endif
sys_var_bool_ptr sys_sync_frm("sync_frm", &opt_sync_frm);
sys_var_long_ptr sys_table_def_size("table_definition_cache",
@@ -604,6 +601,9 @@ static sys_var_readonly sys_warning_count("warning_count",
OPT_SESSION,
SHOW_LONG,
get_warning_count);
+static sys_var_readonly sys_prepared_stmt_count("prepared_stmt_count",
+ OPT_GLOBAL, SHOW_LONG,
+ get_prepared_stmt_count);
/* alias for last_insert_id() to be compatible with Sybase */
#ifdef HAVE_REPLICATION
@@ -847,6 +847,8 @@ SHOW_VAR init_vars[]= {
{sys_max_join_size.name, (char*) &sys_max_join_size, SHOW_SYS},
{sys_max_length_for_sort_data.name, (char*) &sys_max_length_for_sort_data,
SHOW_SYS},
+ {sys_max_prepared_stmt_count.name, (char*) &sys_max_prepared_stmt_count,
+ SHOW_SYS},
{sys_max_relay_log_size.name, (char*) &sys_max_relay_log_size, SHOW_SYS},
{sys_max_seeks_for_key.name, (char*) &sys_max_seeks_for_key, SHOW_SYS},
{sys_max_sort_length.name, (char*) &sys_max_sort_length, SHOW_SYS},
@@ -900,6 +902,7 @@ SHOW_VAR init_vars[]= {
SHOW_SYS},
{"pid_file", (char*) pidfile_name, SHOW_CHAR},
{"plugin_dir", (char*) opt_plugin_dir, SHOW_CHAR},
+ {sys_prepared_stmt_count.name, (char*) &sys_prepared_stmt_count, SHOW_SYS},
{"port", (char*) &mysqld_port, SHOW_INT},
{sys_preload_buff_size.name, (char*) &sys_preload_buff_size, SHOW_SYS},
{"protocol_version", (char*) &protocol_version, SHOW_INT},
@@ -955,11 +958,6 @@ SHOW_VAR init_vars[]= {
{sys_sync_binlog_period.name,(char*) &sys_sync_binlog_period, SHOW_SYS},
#endif
{sys_sync_frm.name, (char*) &sys_sync_frm, SHOW_SYS},
-#ifdef HAVE_REPLICATION
- {sys_sync_replication.name, (char*) &sys_sync_replication, SHOW_SYS},
- {sys_sync_replication_slave_id.name, (char*) &sys_sync_replication_slave_id,SHOW_SYS},
- {sys_sync_replication_timeout.name, (char*) &sys_sync_replication_timeout,SHOW_SYS},
-#endif
#ifdef HAVE_TZNAME
{"system_time_zone", system_time_zone, SHOW_CHAR},
#endif
@@ -1367,29 +1365,40 @@ static void fix_server_id(THD *thd, enum_var_type type)
server_id_supplied = 1;
}
-bool sys_var_long_ptr::check(THD *thd, set_var *var)
+
+sys_var_long_ptr::
+sys_var_long_ptr(const char *name_arg, ulong *value_ptr,
+ sys_after_update_func after_update_arg)
+ :sys_var_long_ptr_global(name_arg, value_ptr,
+ &LOCK_global_system_variables, after_update_arg)
+{}
+
+
+bool sys_var_long_ptr_global::check(THD *thd, set_var *var)
{
longlong v= var->value->val_int();
var->save_result.ulonglong_value= v < 0 ? 0 : v;
return 0;
}
-bool sys_var_long_ptr::update(THD *thd, set_var *var)
+bool sys_var_long_ptr_global::update(THD *thd, set_var *var)
{
ulonglong tmp= var->save_result.ulonglong_value;
- pthread_mutex_lock(&LOCK_global_system_variables);
+ pthread_mutex_lock(guard);
if (option_limits)
*value= (ulong) getopt_ull_limit_value(tmp, option_limits);
else
*value= (ulong) tmp;
- pthread_mutex_unlock(&LOCK_global_system_variables);
+ pthread_mutex_unlock(guard);
return 0;
}
-void sys_var_long_ptr::set_default(THD *thd, enum_var_type type)
+void sys_var_long_ptr_global::set_default(THD *thd, enum_var_type type)
{
+ pthread_mutex_lock(guard);
*value= (ulong) option_limits->def_value;
+ pthread_mutex_unlock(guard);
}
@@ -2824,6 +2833,13 @@ static byte *get_error_count(THD *thd)
return (byte*) &thd->sys_var_tmp.long_value;
}
+static byte *get_prepared_stmt_count(THD *thd)
+{
+ pthread_mutex_lock(&LOCK_prepared_stmt_count);
+ thd->sys_var_tmp.ulong_value= prepared_stmt_count;
+ pthread_mutex_unlock(&LOCK_prepared_stmt_count);
+ return (byte*) &thd->sys_var_tmp.ulong_value;
+}
/****************************************************************************
Main handling of variables:
diff --git a/sql/set_var.h b/sql/set_var.h
index f62d6ce8d2a..8076f10bb0a 100644
--- a/sql/set_var.h
+++ b/sql/set_var.h
@@ -48,11 +48,7 @@ public:
sys_after_update_func after_update;
bool no_support_one_shot;
- sys_var(const char *name_arg)
- :name(name_arg), after_update(0)
- , no_support_one_shot(1)
- { add_sys_var(); }
- sys_var(const char *name_arg,sys_after_update_func func)
+ sys_var(const char *name_arg,sys_after_update_func func= NULL)
:name(name_arg), after_update(func)
, no_support_one_shot(1)
{ add_sys_var(); }
@@ -83,15 +79,35 @@ public:
};
-class sys_var_long_ptr :public sys_var
+/*
+ A base class for all variables that require its access to
+ be guarded with a mutex.
+*/
+
+class sys_var_global: public sys_var
+{
+protected:
+ pthread_mutex_t *guard;
+public:
+ sys_var_global(const char *name_arg, sys_after_update_func after_update_arg,
+ pthread_mutex_t *guard_arg)
+ :sys_var(name_arg, after_update_arg), guard(guard_arg) {}
+};
+
+
+/*
+ A global-only ulong variable that requires its access to be
+ protected with a mutex.
+*/
+
+class sys_var_long_ptr_global: public sys_var_global
{
public:
ulong *value;
- sys_var_long_ptr(const char *name_arg, ulong *value_ptr)
- :sys_var(name_arg),value(value_ptr) {}
- sys_var_long_ptr(const char *name_arg, ulong *value_ptr,
- sys_after_update_func func)
- :sys_var(name_arg,func), value(value_ptr) {}
+ sys_var_long_ptr_global(const char *name_arg, ulong *value_ptr,
+ pthread_mutex_t *guard_arg,
+ sys_after_update_func after_update_arg= NULL)
+ :sys_var_global(name_arg, after_update_arg, guard_arg), value(value_ptr) {}
bool check(THD *thd, set_var *var);
bool update(THD *thd, set_var *var);
void set_default(THD *thd, enum_var_type type);
@@ -101,6 +117,18 @@ public:
};
+/*
+ A global ulong variable that is protected by LOCK_global_system_variables
+*/
+
+class sys_var_long_ptr :public sys_var_long_ptr_global
+{
+public:
+ sys_var_long_ptr(const char *name_arg, ulong *value_ptr,
+ sys_after_update_func after_update_arg= NULL);
+};
+
+
class sys_var_ulonglong_ptr :public sys_var
{
public:
@@ -179,7 +207,7 @@ class sys_var_const_str :public sys_var
public:
char *value; // Pointer to const value
sys_var_const_str(const char *name_arg, const char *value_arg)
- :sys_var(name_arg), value((char*) value_arg)
+ :sys_var(name_arg),value((char*) value_arg)
{}
bool check(THD *thd, set_var *var)
{
@@ -226,10 +254,7 @@ public:
class sys_var_thd :public sys_var
{
public:
- sys_var_thd(const char *name_arg)
- :sys_var(name_arg)
- {}
- sys_var_thd(const char *name_arg, sys_after_update_func func)
+ sys_var_thd(const char *name_arg, sys_after_update_func func= NULL)
:sys_var(name_arg,func)
{}
bool check_type(enum_var_type type) { return 0; }
diff --git a/sql/share/errmsg.txt b/sql/share/errmsg.txt
index 5702464a80d..1a505afb509 100644
--- a/sql/share/errmsg.txt
+++ b/sql/share/errmsg.txt
@@ -5826,9 +5826,13 @@ ER_NDB_CANT_SWITCH_BINLOG_FORMAT
eng "The NDB cluster engine does not support changing the binlog format on the fly yet"
ER_PARTITION_NO_TEMPORARY
eng "Cannot create temporary table with partitions"
+ER_DDL_LOG_ERROR
+ eng "Error in DDL log"
ER_NULL_IN_VALUES_LESS_THAN
eng "Not allowed to use NULL value in VALUES LESS THAN"
swe "Det är inte tillåtet att använda NULL-värden i VALUES LESS THAN"
ER_WRONG_PARTITION_NAME
eng "Incorrect partition name"
swe "Felaktigt partitionsnamn"
+ER_MAX_PREPARED_STMT_COUNT_REACHED 42000
+ eng "Can't create more than max_prepared_stmt_count statements (current value: %lu)"
diff --git a/sql/sql_base.cc b/sql/sql_base.cc
index e09fed180ba..94b7ce1f3cc 100644
--- a/sql/sql_base.cc
+++ b/sql/sql_base.cc
@@ -6140,9 +6140,8 @@ bool is_equal(const LEX_STRING *a, const LEX_STRING *b)
abort_and_upgrade_lock()
lpt Parameter passing struct
All parameters passed through the ALTER_PARTITION_PARAM_TYPE object
- RETURN VALUES
- TRUE Failure
- FALSE Success
+ RETURN VALUE
+ 0
DESCRIPTION
Remember old lock level (for possible downgrade later on), abort all
waiting threads and ensure that all keeping locks currently are
@@ -6156,23 +6155,17 @@ bool is_equal(const LEX_STRING *a, const LEX_STRING *b)
old_lock_level Old lock level
*/
-bool abort_and_upgrade_lock(ALTER_PARTITION_PARAM_TYPE *lpt)
+int abort_and_upgrade_lock(ALTER_PARTITION_PARAM_TYPE *lpt)
{
uint flags= RTFC_WAIT_OTHER_THREAD_FLAG | RTFC_CHECK_KILLED_FLAG;
- int error= FALSE;
DBUG_ENTER("abort_and_upgrade_locks");
lpt->old_lock_type= lpt->table->reginfo.lock_type;
VOID(pthread_mutex_lock(&LOCK_open));
mysql_lock_abort(lpt->thd, lpt->table, TRUE);
VOID(remove_table_from_cache(lpt->thd, lpt->db, lpt->table_name, flags));
- if (lpt->thd->killed)
- {
- lpt->thd->no_warnings_for_error= 0;
- error= TRUE;
- }
VOID(pthread_mutex_unlock(&LOCK_open));
- DBUG_RETURN(error);
+ DBUG_RETURN(0);
}
diff --git a/sql/sql_class.cc b/sql/sql_class.cc
index 123152c95ec..ac03585f0a4 100644
--- a/sql/sql_class.cc
+++ b/sql/sql_class.cc
@@ -223,6 +223,9 @@ THD::THD()
cuted_fields= sent_row_count= 0L;
limit_found_rows= 0;
statement_id_counter= 0UL;
+#ifdef ERROR_INJECT_SUPPORT
+ error_inject_value= 0UL;
+#endif
// Must be reset to handle error with THD's created for init of mysqld
lex->current_select= 0;
start_time=(time_t) 0;
@@ -447,7 +450,7 @@ THD::~THD()
net_end(&net);
}
#endif
- stmt_map.destroy(); /* close all prepared statements */
+ stmt_map.reset(); /* close all prepared statements */
DBUG_ASSERT(lock_info.n_cursors == 0);
if (!cleanup_done)
cleanup();
@@ -1769,21 +1772,72 @@ Statement_map::Statement_map() :
}
-int Statement_map::insert(Statement *statement)
+/*
+ Insert a new statement to the thread-local statement map.
+
+ DESCRIPTION
+ If there was an old statement with the same name, replace it with the
+ new one. Otherwise, check if max_prepared_stmt_count is not reached yet,
+ increase prepared_stmt_count, and insert the new statement. It's okay
+ to delete an old statement and fail to insert the new one.
+
+ POSTCONDITIONS
+ All named prepared statements are also present in names_hash.
+ Statement names in names_hash are unique.
+ The statement is added only if prepared_stmt_count < max_prepard_stmt_count
+ last_found_statement always points to a valid statement or is 0
+
+ RETURN VALUE
+ 0 success
+ 1 error: out of resources or max_prepared_stmt_count limit has been
+ reached. An error is sent to the client, the statement is deleted.
+*/
+
+int Statement_map::insert(THD *thd, Statement *statement)
{
- int res= my_hash_insert(&st_hash, (byte *) statement);
- if (res)
- return res;
- if (statement->name.str)
+ if (my_hash_insert(&st_hash, (byte*) statement))
{
- if ((res= my_hash_insert(&names_hash, (byte*)statement)))
- {
- hash_delete(&st_hash, (byte*)statement);
- return res;
- }
+ /*
+ Delete is needed only in case of an insert failure. In all other
+ cases hash_delete will also delete the statement.
+ */
+ delete statement;
+ my_error(ER_OUT_OF_RESOURCES, MYF(0));
+ goto err_st_hash;
+ }
+ if (statement->name.str && my_hash_insert(&names_hash, (byte*) statement))
+ {
+ my_error(ER_OUT_OF_RESOURCES, MYF(0));
+ goto err_names_hash;
+ }
+ pthread_mutex_lock(&LOCK_prepared_stmt_count);
+ /*
+ We don't check that prepared_stmt_count is <= max_prepared_stmt_count
+ because we would like to allow to lower the total limit
+ of prepared statements below the current count. In that case
+ no new statements can be added until prepared_stmt_count drops below
+ the limit.
+ */
+ if (prepared_stmt_count >= max_prepared_stmt_count)
+ {
+ pthread_mutex_unlock(&LOCK_prepared_stmt_count);
+ my_error(ER_MAX_PREPARED_STMT_COUNT_REACHED, MYF(0),
+ max_prepared_stmt_count);
+ goto err_max;
}
+ prepared_stmt_count++;
+ pthread_mutex_unlock(&LOCK_prepared_stmt_count);
+
last_found_statement= statement;
- return res;
+ return 0;
+
+err_max:
+ if (statement->name.str)
+ hash_delete(&names_hash, (byte*) statement);
+err_names_hash:
+ hash_delete(&st_hash, (byte*) statement);
+err_st_hash:
+ return 1;
}
@@ -1797,6 +1851,47 @@ void Statement_map::close_transient_cursors()
}
+void Statement_map::erase(Statement *statement)
+{
+ if (statement == last_found_statement)
+ last_found_statement= 0;
+ if (statement->name.str)
+ hash_delete(&names_hash, (byte *) statement);
+
+ hash_delete(&st_hash, (byte *) statement);
+ pthread_mutex_lock(&LOCK_prepared_stmt_count);
+ DBUG_ASSERT(prepared_stmt_count > 0);
+ prepared_stmt_count--;
+ pthread_mutex_unlock(&LOCK_prepared_stmt_count);
+}
+
+
+void Statement_map::reset()
+{
+ /* Must be first, hash_free will reset st_hash.records */
+ pthread_mutex_lock(&LOCK_prepared_stmt_count);
+ DBUG_ASSERT(prepared_stmt_count >= st_hash.records);
+ prepared_stmt_count-= st_hash.records;
+ pthread_mutex_unlock(&LOCK_prepared_stmt_count);
+
+ my_hash_reset(&names_hash);
+ my_hash_reset(&st_hash);
+ last_found_statement= 0;
+}
+
+
+Statement_map::~Statement_map()
+{
+ /* Must go first, hash_free will reset st_hash.records */
+ pthread_mutex_lock(&LOCK_prepared_stmt_count);
+ DBUG_ASSERT(prepared_stmt_count >= st_hash.records);
+ prepared_stmt_count-= st_hash.records;
+ pthread_mutex_unlock(&LOCK_prepared_stmt_count);
+
+ hash_free(&names_hash);
+ hash_free(&st_hash);
+}
+
bool select_dumpvar::send_data(List<Item> &items)
{
List_iterator_fast<Item_func_set_user_var> li(vars);
diff --git a/sql/sql_class.h b/sql/sql_class.h
index 60ff558ac48..fdb70b6c991 100644
--- a/sql/sql_class.h
+++ b/sql/sql_class.h
@@ -241,11 +241,6 @@ struct system_variables
my_bool new_mode;
my_bool query_cache_wlock_invalidate;
my_bool engine_condition_pushdown;
-#ifdef HAVE_REPLICATION
- ulong sync_replication;
- ulong sync_replication_slave_id;
- ulong sync_replication_timeout;
-#endif /* HAVE_REPLICATION */
my_bool innodb_table_locks;
my_bool innodb_support_xa;
my_bool ndb_force_send;
@@ -545,7 +540,7 @@ class Statement_map
public:
Statement_map();
- int insert(Statement *statement);
+ int insert(THD *thd, Statement *statement);
Statement *find_by_name(LEX_STRING *name)
{
@@ -567,36 +562,16 @@ public:
}
return last_found_statement;
}
- void erase(Statement *statement)
- {
- if (statement == last_found_statement)
- last_found_statement= 0;
- if (statement->name.str)
- {
- hash_delete(&names_hash, (byte *) statement);
- }
- hash_delete(&st_hash, (byte *) statement);
- }
/*
Close all cursors of this connection that use tables of a storage
engine that has transaction-specific state and therefore can not
survive COMMIT or ROLLBACK. Currently all but MyISAM cursors are closed.
*/
void close_transient_cursors();
+ void erase(Statement *statement);
/* Erase all statements (calls Statement destructor) */
- void reset()
- {
- my_hash_reset(&names_hash);
- my_hash_reset(&st_hash);
- transient_cursor_list.empty();
- last_found_statement= 0;
- }
-
- void destroy()
- {
- hash_free(&names_hash);
- hash_free(&st_hash);
- }
+ void reset();
+ ~Statement_map();
private:
HASH st_hash;
HASH names_hash;
@@ -1119,6 +1094,9 @@ public:
query_id_t query_id, warn_id;
ulong thread_id, col_access;
+#ifdef ERROR_INJECT_SUPPORT
+ ulong error_inject_value;
+#endif
/* Statement id is thread-wide. This counter is used to generate ids */
ulong statement_id_counter;
ulong rand_saved_seed1, rand_saved_seed2;
@@ -1179,6 +1157,7 @@ public:
{
my_bool my_bool_value;
long long_value;
+ ulong ulong_value;
} sys_var_tmp;
struct {
diff --git a/sql/sql_partition.cc b/sql/sql_partition.cc
index aae80f07b71..0442ad724d2 100644
--- a/sql/sql_partition.cc
+++ b/sql/sql_partition.cc
@@ -1792,15 +1792,10 @@ char *generate_partition_syntax(partition_info *part_info,
char path[FN_REFLEN];
int err= 0;
List_iterator<partition_element> part_it(part_info->partitions);
- List_iterator<partition_element> temp_it(part_info->temp_partitions);
File fptr;
char *buf= NULL; //Return buffer
- uint use_temp= 0;
- uint no_temp_parts= part_info->temp_partitions.elements;
- bool write_part_state;
DBUG_ENTER("generate_partition_syntax");
- write_part_state= (part_info->part_state && !part_info->part_state_len);
if (unlikely(((fptr= create_temp_file(path,mysql_tmpdir,"psy",
O_RDWR | O_BINARY | O_TRUNC |
O_TEMPORARY, MYF(MY_WME)))) < 0))
@@ -1865,67 +1860,26 @@ char *generate_partition_syntax(partition_info *part_info,
err+= add_space(fptr);
}
}
- no_parts= part_info->no_parts;
- tot_no_parts= no_parts + no_temp_parts;
+ tot_no_parts= part_info->partitions.elements;
no_subparts= part_info->no_subparts;
if (write_all || (!part_info->use_default_partitions))
{
+ bool first= TRUE;
err+= add_begin_parenthesis(fptr);
i= 0;
do
{
- /*
- We need to do some clever list manipulation here since we have two
- different needs for our list processing and here we take some of the
- cost of using a simpler list processing for the other parts of the
- code.
-
- ALTER TABLE REORGANIZE PARTITIONS has the list of partitions to be
- the final list as the main list and the reorganised partitions is in
- the temporary partition list. Thus when finding the first part added
- we insert the temporary list if there is such a list. If there is no
- temporary list we are performing an ADD PARTITION.
- */
- if (use_temp && use_temp <= no_temp_parts)
- {
- part_elem= temp_it++;
- DBUG_ASSERT(no_temp_parts);
- no_temp_parts--;
- }
- else if (use_temp)
- {
- DBUG_ASSERT(no_parts);
- part_elem= save_part_elem;
- use_temp= 0;
- no_parts--;
- }
- else
- {
- part_elem= part_it++;
- if ((part_elem->part_state == PART_TO_BE_ADDED ||
- part_elem->part_state == PART_IS_ADDED) && no_temp_parts)
- {
- save_part_elem= part_elem;
- part_elem= temp_it++;
- no_temp_parts--;
- use_temp= 1;
- }
- else
- {
- DBUG_ASSERT(no_parts);
- no_parts--;
- }
- }
-
- if (part_elem->part_state != PART_IS_DROPPED)
+ part_elem= part_it++;
+ if (part_elem->part_state != PART_TO_BE_DROPPED &&
+ part_elem->part_state != PART_REORGED_DROPPED)
{
- if (write_part_state)
+ if (!first)
{
- uint32 part_state_id= part_info->part_state_len;
- part_info->part_state[part_state_id]= (uchar)part_elem->part_state;
- part_info->part_state_len= part_state_id+1;
+ err+= add_comma(fptr);
+ err+= add_space(fptr);
}
+ first= FALSE;
err+= add_partition(fptr);
err+= add_name_string(fptr, part_elem->partition_name);
err+= add_space(fptr);
@@ -1955,16 +1909,10 @@ char *generate_partition_syntax(partition_info *part_info,
err+= add_end_parenthesis(fptr);
} while (++j < no_subparts);
}
- if (i != (tot_no_parts-1))
- {
- err+= add_comma(fptr);
- err+= add_space(fptr);
- }
}
if (i == (tot_no_parts-1))
err+= add_end_parenthesis(fptr);
} while (++i < tot_no_parts);
- DBUG_ASSERT(!no_parts && !no_temp_parts);
}
if (err)
goto close_file;
@@ -3527,27 +3475,6 @@ set_engine_all_partitions(partition_info *part_info,
}
/*
SYNOPSIS
- fast_alter_partition_error_handler()
- lpt Container for parameters
-
- RETURN VALUES
- None
-
- DESCRIPTION
- Support routine to clean up after failures of on-line ALTER TABLE
- for partition management.
-*/
-
-static void fast_alter_partition_error_handler(ALTER_PARTITION_PARAM_TYPE *lpt)
-{
- DBUG_ENTER("fast_alter_partition_error_handler");
- /* TODO: WL 2826 Error handling */
- DBUG_VOID_RETURN;
-}
-
-
-/*
- SYNOPSIS
fast_end_partition()
thd Thread object
out:copied Number of records copied
@@ -3567,6 +3494,7 @@ static void fast_alter_partition_error_handler(ALTER_PARTITION_PARAM_TYPE *lpt)
static int fast_end_partition(THD *thd, ulonglong copied,
ulonglong deleted,
+ TABLE *table,
TABLE_LIST *table_list, bool is_empty,
ALTER_PARTITION_PARAM_TYPE *lpt,
bool written_bin_log)
@@ -3594,7 +3522,7 @@ static int fast_end_partition(THD *thd, ulonglong copied,
send_ok(thd,copied+deleted,0L,tmp_name);
DBUG_RETURN(FALSE);
}
- fast_alter_partition_error_handler(lpt);
+ table->file->print_error(error, MYF(0));
DBUG_RETURN(TRUE);
}
@@ -3843,7 +3771,8 @@ uint prep_alter_part_table(THD *thd, TABLE *table, ALTER_INFO *alter_info,
after the change as before. Thus we can reply ok immediately
without any changes at all.
*/
- DBUG_RETURN(fast_end_partition(thd, ULL(0), ULL(0), NULL,
+ DBUG_RETURN(fast_end_partition(thd, ULL(0), ULL(0),
+ table, NULL,
TRUE, NULL, FALSE));
}
else if (new_part_no > curr_part_no)
@@ -4195,6 +4124,7 @@ that are reorganised.
my_error(ER_ROW_IS_REFERENCED, MYF(0));
DBUG_RETURN(TRUE);
}
+ tab_part_info->no_parts-= no_parts_dropped;
}
else if ((alter_info->flags & ALTER_OPTIMIZE_PARTITION) ||
(alter_info->flags & ALTER_ANALYZE_PARTITION) ||
@@ -4239,6 +4169,11 @@ that are reorganised.
my_error(ER_DROP_PARTITION_NON_EXISTENT, MYF(0), ptr);
DBUG_RETURN(TRUE);
}
+ if (!(*fast_alter_partition))
+ {
+ table->file->print_error(HA_ERR_WRONG_COMMAND, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
}
else if (alter_info->flags & ALTER_COALESCE_PARTITION)
{
@@ -4597,6 +4532,7 @@ the generated partition syntax in a correct manner.
/*
Make sure change of engine happens to all partitions.
*/
+ DBUG_PRINT("info", ("partition changed"));
set_engine_all_partitions(thd->work_part_info, create_info->db_type);
*partition_changed= TRUE;
}
@@ -4611,7 +4547,10 @@ the generated partition syntax in a correct manner.
using the partition handler.
*/
if (thd->work_part_info != table->part_info)
+ {
+ DBUG_PRINT("info", ("partition changed"));
*partition_changed= TRUE;
+ }
if (create_info->db_type == &partition_hton)
part_info->default_engine_type= table->part_info->default_engine_type;
else
@@ -4663,14 +4602,22 @@ the generated partition syntax in a correct manner.
static bool mysql_change_partitions(ALTER_PARTITION_PARAM_TYPE *lpt)
{
char path[FN_REFLEN+1];
+ int error;
+ handler *file= lpt->table->file;
DBUG_ENTER("mysql_change_partitions");
build_table_filename(path, sizeof(path), lpt->db, lpt->table_name, "");
- DBUG_RETURN(lpt->table->file->change_partitions(lpt->create_info, path,
- &lpt->copied,
- &lpt->deleted,
- lpt->pack_frm_data,
- lpt->pack_frm_len));
+ if ((error= file->change_partitions(lpt->create_info, path, &lpt->copied,
+ &lpt->deleted, lpt->pack_frm_data,
+ lpt->pack_frm_len)))
+ {
+ if (error != ER_OUTOFMEMORY)
+ file->print_error(error, MYF(0));
+ else
+ lpt->thd->fatal_error();
+ DBUG_RETURN(TRUE);
+ }
+ DBUG_RETURN(FALSE);
}
@@ -4696,10 +4643,17 @@ static bool mysql_change_partitions(ALTER_PARTITION_PARAM_TYPE *lpt)
static bool mysql_rename_partitions(ALTER_PARTITION_PARAM_TYPE *lpt)
{
char path[FN_REFLEN+1];
+ int error;
DBUG_ENTER("mysql_rename_partitions");
build_table_filename(path, sizeof(path), lpt->db, lpt->table_name, "");
- DBUG_RETURN(lpt->table->file->rename_partitions(path));
+ if ((error= lpt->table->file->rename_partitions(path)))
+ {
+ if (error != 1)
+ lpt->table->file->print_error(error, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+ DBUG_RETURN(FALSE);
}
@@ -4730,11 +4684,13 @@ static bool mysql_drop_partitions(ALTER_PARTITION_PARAM_TYPE *lpt)
List_iterator<partition_element> part_it(part_info->partitions);
uint i= 0;
uint remove_count= 0;
+ int error;
DBUG_ENTER("mysql_drop_partitions");
build_table_filename(path, sizeof(path), lpt->db, lpt->table_name, "");
- if (lpt->table->file->drop_partitions(path))
+ if ((error= lpt->table->file->drop_partitions(path)))
{
+ lpt->table->file->print_error(error, MYF(0));
DBUG_RETURN(TRUE);
}
do
@@ -4752,6 +4708,767 @@ static bool mysql_drop_partitions(ALTER_PARTITION_PARAM_TYPE *lpt)
/*
+ Insert log entry into list
+ SYNOPSIS
+ insert_part_info_log_entry_list()
+ log_entry
+ RETURN VALUES
+ NONE
+*/
+
+static void insert_part_info_log_entry_list(partition_info *part_info,
+ DDL_LOG_MEMORY_ENTRY *log_entry)
+{
+ log_entry->next_active_log_entry= part_info->first_log_entry;
+ part_info->first_log_entry= log_entry;
+}
+
+
+/*
+ Release all log entries for this partition info struct
+ SYNOPSIS
+ release_part_info_log_entries()
+ first_log_entry First log entry in list to release
+ RETURN VALUES
+ NONE
+*/
+
+static void release_part_info_log_entries(DDL_LOG_MEMORY_ENTRY *log_entry)
+{
+ DBUG_ENTER("release_part_info_log_entries");
+
+ while (log_entry)
+ {
+ release_ddl_log_memory_entry(log_entry);
+ log_entry= log_entry->next_active_log_entry;
+ }
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ Log an delete/rename frm file
+ SYNOPSIS
+ write_log_replace_delete_frm()
+ lpt Struct for parameters
+ next_entry Next reference to use in log record
+ from_path Name to rename from
+ to_path Name to rename to
+ replace_flag TRUE if replace, else delete
+ RETURN VALUES
+ TRUE Error
+ FALSE Success
+ DESCRIPTION
+ Support routine that writes a replace or delete of an frm file into the
+ ddl log. It also inserts an entry that keeps track of used space into
+ the partition info object
+*/
+
+static bool write_log_replace_delete_frm(ALTER_PARTITION_PARAM_TYPE *lpt,
+ uint next_entry,
+ const char *from_path,
+ const char *to_path,
+ bool replace_flag)
+{
+ DDL_LOG_ENTRY ddl_log_entry;
+ DDL_LOG_MEMORY_ENTRY *log_entry;
+ DBUG_ENTER("write_log_replace_delete_frm");
+
+ if (replace_flag)
+ ddl_log_entry.action_type= DDL_LOG_REPLACE_ACTION;
+ else
+ ddl_log_entry.action_type= DDL_LOG_DELETE_ACTION;
+ ddl_log_entry.next_entry= next_entry;
+ ddl_log_entry.handler_name= reg_ext;
+ ddl_log_entry.name= to_path;
+ if (replace_flag)
+ ddl_log_entry.from_name= from_path;
+ if (write_ddl_log_entry(&ddl_log_entry, &log_entry))
+ {
+ DBUG_RETURN(TRUE);
+ }
+ insert_part_info_log_entry_list(lpt->part_info, log_entry);
+ DBUG_RETURN(FALSE);
+}
+
+
+/*
+ Log final partition changes in change partition
+ SYNOPSIS
+ write_log_changed_partitions()
+ lpt Struct containing parameters
+ RETURN VALUES
+ TRUE Error
+ FALSE Success
+ DESCRIPTION
+ This code is used to perform safe ADD PARTITION for HASH partitions
+ and COALESCE for HASH partitions and REORGANIZE for any type of
+ partitions.
+ We prepare entries for all partitions except the reorganised partitions
+ in REORGANIZE partition, those are handled by
+ write_log_dropped_partitions. For those partitions that are replaced
+ special care is needed to ensure that this is performed correctly and
+ this requires a two-phased approach with this log as a helper for this.
+
+ This code is closely intertwined with the code in rename_partitions in
+ the partition handler.
+*/
+
+static bool write_log_changed_partitions(ALTER_PARTITION_PARAM_TYPE *lpt,
+ uint *next_entry, const char *path)
+{
+ DDL_LOG_ENTRY ddl_log_entry;
+ partition_info *part_info= lpt->part_info;
+ DDL_LOG_MEMORY_ENTRY *log_entry;
+ char tmp_path[FN_LEN];
+ char normal_path[FN_LEN];
+ List_iterator<partition_element> part_it(part_info->partitions);
+ uint temp_partitions= part_info->temp_partitions.elements;
+ uint no_elements= part_info->partitions.elements;
+ uint i= 0;
+ DBUG_ENTER("write_log_changed_partitions");
+
+ do
+ {
+ partition_element *part_elem= part_it++;
+ if (part_elem->part_state == PART_IS_CHANGED ||
+ (part_elem->part_state == PART_IS_ADDED && temp_partitions))
+ {
+ if (part_info->is_sub_partitioned())
+ {
+ List_iterator<partition_element> sub_it(part_elem->subpartitions);
+ uint no_subparts= part_info->no_subparts;
+ uint j= 0;
+ do
+ {
+ partition_element *sub_elem= sub_it++;
+ ddl_log_entry.next_entry= *next_entry;
+ ddl_log_entry.handler_name=
+ ha_resolve_storage_engine_name(sub_elem->engine_type);
+ create_subpartition_name(tmp_path, path,
+ part_elem->partition_name,
+ sub_elem->partition_name,
+ TEMP_PART_NAME);
+ create_subpartition_name(normal_path, path,
+ part_elem->partition_name,
+ sub_elem->partition_name,
+ NORMAL_PART_NAME);
+ ddl_log_entry.name= normal_path;
+ ddl_log_entry.from_name= tmp_path;
+ if (part_elem->part_state == PART_IS_CHANGED)
+ ddl_log_entry.action_type= DDL_LOG_REPLACE_ACTION;
+ else
+ ddl_log_entry.action_type= DDL_LOG_RENAME_ACTION;
+ if (write_ddl_log_entry(&ddl_log_entry, &log_entry))
+ {
+ DBUG_RETURN(TRUE);
+ }
+ *next_entry= log_entry->entry_pos;
+ sub_elem->log_entry= log_entry;
+ insert_part_info_log_entry_list(part_info, log_entry);
+ } while (++j < no_subparts);
+ }
+ else
+ {
+ ddl_log_entry.next_entry= *next_entry;
+ ddl_log_entry.handler_name=
+ ha_resolve_storage_engine_name(part_elem->engine_type);
+ create_partition_name(tmp_path, path,
+ part_elem->partition_name,
+ TEMP_PART_NAME, TRUE);
+ create_partition_name(normal_path, path,
+ part_elem->partition_name,
+ NORMAL_PART_NAME, TRUE);
+ ddl_log_entry.name= normal_path;
+ ddl_log_entry.from_name= tmp_path;
+ if (part_elem->part_state == PART_IS_CHANGED)
+ ddl_log_entry.action_type= DDL_LOG_REPLACE_ACTION;
+ else
+ ddl_log_entry.action_type= DDL_LOG_RENAME_ACTION;
+ if (write_ddl_log_entry(&ddl_log_entry, &log_entry))
+ {
+ DBUG_RETURN(TRUE);
+ }
+ *next_entry= log_entry->entry_pos;
+ part_elem->log_entry= log_entry;
+ insert_part_info_log_entry_list(part_info, log_entry);
+ }
+ }
+ } while (++i < no_elements);
+ DBUG_RETURN(FALSE);
+}
+
+
+/*
+ Log dropped partitions
+ SYNOPSIS
+ write_log_dropped_partitions()
+ lpt Struct containing parameters
+ RETURN VALUES
+ TRUE Error
+ FALSE Success
+*/
+
+static bool write_log_dropped_partitions(ALTER_PARTITION_PARAM_TYPE *lpt,
+ uint *next_entry,
+ const char *path,
+ bool temp_list)
+{
+ DDL_LOG_ENTRY ddl_log_entry;
+ partition_info *part_info= lpt->part_info;
+ DDL_LOG_MEMORY_ENTRY *log_entry;
+ char tmp_path[FN_LEN];
+ List_iterator<partition_element> part_it(part_info->partitions);
+ List_iterator<partition_element> temp_it(part_info->temp_partitions);
+ uint no_temp_partitions= part_info->temp_partitions.elements;
+ uint no_elements= part_info->partitions.elements;
+ uint i= 0;
+ DBUG_ENTER("write_log_dropped_partitions");
+
+ ddl_log_entry.action_type= DDL_LOG_DELETE_ACTION;
+ if (temp_list)
+ no_elements= no_temp_partitions;
+ while (no_elements--)
+ {
+ partition_element *part_elem;
+ if (temp_list)
+ part_elem= temp_it++;
+ else
+ part_elem= part_it++;
+ if (part_elem->part_state == PART_TO_BE_DROPPED ||
+ part_elem->part_state == PART_TO_BE_ADDED ||
+ part_elem->part_state == PART_CHANGED)
+ {
+ uint name_variant;
+ if (part_elem->part_state == PART_CHANGED ||
+ (part_elem->part_state == PART_TO_BE_ADDED &&
+ no_temp_partitions))
+ name_variant= TEMP_PART_NAME;
+ else
+ name_variant= NORMAL_PART_NAME;
+ if (part_info->is_sub_partitioned())
+ {
+ List_iterator<partition_element> sub_it(part_elem->subpartitions);
+ uint no_subparts= part_info->no_subparts;
+ uint j= 0;
+ do
+ {
+ partition_element *sub_elem= sub_it++;
+ ddl_log_entry.next_entry= *next_entry;
+ ddl_log_entry.handler_name=
+ ha_resolve_storage_engine_name(sub_elem->engine_type);
+ create_subpartition_name(tmp_path, path,
+ part_elem->partition_name,
+ sub_elem->partition_name,
+ name_variant);
+ ddl_log_entry.name= tmp_path;
+ if (write_ddl_log_entry(&ddl_log_entry, &log_entry))
+ {
+ DBUG_RETURN(TRUE);
+ }
+ *next_entry= log_entry->entry_pos;
+ if (temp_list)
+ sub_elem->log_entry= log_entry;
+ insert_part_info_log_entry_list(part_info, log_entry);
+ } while (++j < no_subparts);
+ }
+ else
+ {
+ ddl_log_entry.next_entry= *next_entry;
+ ddl_log_entry.handler_name=
+ ha_resolve_storage_engine_name(part_elem->engine_type);
+ create_partition_name(tmp_path, path,
+ part_elem->partition_name,
+ name_variant, TRUE);
+ ddl_log_entry.name= tmp_path;
+ if (write_ddl_log_entry(&ddl_log_entry, &log_entry))
+ {
+ DBUG_RETURN(TRUE);
+ }
+ *next_entry= log_entry->entry_pos;
+ if (temp_list)
+ part_elem->log_entry= log_entry;
+ insert_part_info_log_entry_list(part_info, log_entry);
+ }
+ }
+ }
+ DBUG_RETURN(FALSE);
+}
+
+
+/*
+ Set execute log entry in ddl log for this partitioned table
+ SYNOPSIS
+ set_part_info_exec_log_entry()
+ part_info Partition info object
+ exec_log_entry Log entry
+ RETURN VALUES
+ NONE
+*/
+
+static void set_part_info_exec_log_entry(partition_info *part_info,
+ DDL_LOG_MEMORY_ENTRY *exec_log_entry)
+{
+ part_info->exec_log_entry= exec_log_entry;
+ exec_log_entry->next_active_log_entry= NULL;
+}
+
+
+/*
+ Write the log entry to ensure that the shadow frm file is removed at
+ crash.
+ SYNOPSIS
+ write_log_drop_shadow_frm()
+ lpt Struct containing parameters
+ install_frm Should we log action to install shadow frm or should
+ the action be to remove the shadow frm file.
+ RETURN VALUES
+ TRUE Error
+ FALSE Success
+ DESCRIPTION
+ Prepare an entry to the ddl log indicating a drop/install of the shadow frm
+ file and its corresponding handler file.
+*/
+
+static bool write_log_drop_shadow_frm(ALTER_PARTITION_PARAM_TYPE *lpt)
+{
+ DDL_LOG_ENTRY ddl_log_entry;
+ partition_info *part_info= lpt->part_info;
+ DDL_LOG_MEMORY_ENTRY *log_entry;
+ DDL_LOG_MEMORY_ENTRY *exec_log_entry= NULL;
+ char shadow_path[FN_LEN];
+ DBUG_ENTER("write_log_drop_shadow_frm");
+
+ build_table_filename(shadow_path, sizeof(shadow_path), lpt->db,
+ lpt->table_name, "#");
+ pthread_mutex_lock(&LOCK_gdl);
+ if (write_log_replace_delete_frm(lpt, 0UL, NULL,
+ (const char*)shadow_path, FALSE))
+ goto error;
+ log_entry= part_info->first_log_entry;
+ if (write_execute_ddl_log_entry(log_entry->entry_pos,
+ FALSE, &exec_log_entry))
+ goto error;
+ pthread_mutex_unlock(&LOCK_gdl);
+ set_part_info_exec_log_entry(part_info, exec_log_entry);
+ DBUG_RETURN(FALSE);
+
+error:
+ release_part_info_log_entries(part_info->first_log_entry);
+ pthread_mutex_unlock(&LOCK_gdl);
+ part_info->first_log_entry= NULL;
+ my_error(ER_DDL_LOG_ERROR, MYF(0));
+ DBUG_RETURN(TRUE);
+}
+
+
+/*
+ Log renaming of shadow frm to real frm name and dropping of old frm
+ SYNOPSIS
+ write_log_rename_frm()
+ lpt Struct containing parameters
+ RETURN VALUES
+ TRUE Error
+ FALSE Success
+ DESCRIPTION
+ Prepare an entry to ensure that we complete the renaming of the frm
+ file if failure occurs in the middle of the rename process.
+*/
+
+static bool write_log_rename_frm(ALTER_PARTITION_PARAM_TYPE *lpt)
+{
+ DDL_LOG_ENTRY ddl_log_entry;
+ partition_info *part_info= lpt->part_info;
+ DDL_LOG_MEMORY_ENTRY *log_entry;
+ DDL_LOG_MEMORY_ENTRY *exec_log_entry= part_info->exec_log_entry;
+ char path[FN_LEN];
+ char shadow_path[FN_LEN];
+ DDL_LOG_MEMORY_ENTRY *old_first_log_entry= part_info->first_log_entry;
+ DBUG_ENTER("write_log_rename_frm");
+
+ part_info->first_log_entry= NULL;
+ build_table_filename(path, sizeof(path), lpt->db,
+ lpt->table_name, "");
+ build_table_filename(shadow_path, sizeof(shadow_path), lpt->db,
+ lpt->table_name, "#");
+ pthread_mutex_lock(&LOCK_gdl);
+ if (write_log_replace_delete_frm(lpt, 0UL, shadow_path, path, TRUE))
+ goto error;
+ log_entry= part_info->first_log_entry;
+ part_info->frm_log_entry= log_entry;
+ if (write_execute_ddl_log_entry(log_entry->entry_pos,
+ FALSE, &exec_log_entry))
+ goto error;
+ release_part_info_log_entries(old_first_log_entry);
+ pthread_mutex_unlock(&LOCK_gdl);
+ DBUG_RETURN(FALSE);
+
+error:
+ release_part_info_log_entries(part_info->first_log_entry);
+ pthread_mutex_unlock(&LOCK_gdl);
+ part_info->first_log_entry= old_first_log_entry;
+ part_info->frm_log_entry= NULL;
+ my_error(ER_DDL_LOG_ERROR, MYF(0));
+ DBUG_RETURN(TRUE);
+}
+
+
+/*
+ Write the log entries to ensure that the drop partition command is completed
+ even in the presence of a crash.
+
+ SYNOPSIS
+ write_log_drop_partition()
+ lpt Struct containing parameters
+ RETURN VALUES
+ TRUE Error
+ FALSE Success
+ DESCRIPTION
+ Prepare entries to the ddl log indicating all partitions to drop and to
+ install the shadow frm file and remove the old frm file.
+*/
+
+static bool write_log_drop_partition(ALTER_PARTITION_PARAM_TYPE *lpt)
+{
+ DDL_LOG_ENTRY ddl_log_entry;
+ partition_info *part_info= lpt->part_info;
+ DDL_LOG_MEMORY_ENTRY *log_entry;
+ DDL_LOG_MEMORY_ENTRY *exec_log_entry= part_info->exec_log_entry;
+ char tmp_path[FN_LEN];
+ char path[FN_LEN];
+ uint next_entry= 0;
+ DDL_LOG_MEMORY_ENTRY *old_first_log_entry= part_info->first_log_entry;
+ DBUG_ENTER("write_log_drop_partition");
+
+ part_info->first_log_entry= NULL;
+ build_table_filename(path, sizeof(path), lpt->db,
+ lpt->table_name, "");
+ build_table_filename(tmp_path, sizeof(tmp_path), lpt->db,
+ lpt->table_name, "#");
+ pthread_mutex_lock(&LOCK_gdl);
+ if (write_log_dropped_partitions(lpt, &next_entry, (const char*)path,
+ FALSE))
+ goto error;
+ if (write_log_replace_delete_frm(lpt, next_entry, (const char*)tmp_path,
+ (const char*)path, TRUE))
+ goto error;
+ log_entry= part_info->first_log_entry;
+ part_info->frm_log_entry= log_entry;
+ if (write_execute_ddl_log_entry(log_entry->entry_pos,
+ FALSE, &exec_log_entry))
+ goto error;
+ release_part_info_log_entries(old_first_log_entry);
+ pthread_mutex_unlock(&LOCK_gdl);
+ DBUG_RETURN(FALSE);
+
+error:
+ release_part_info_log_entries(part_info->first_log_entry);
+ pthread_mutex_unlock(&LOCK_gdl);
+ part_info->first_log_entry= old_first_log_entry;
+ part_info->frm_log_entry= NULL;
+ my_error(ER_DDL_LOG_ERROR, MYF(0));
+ DBUG_RETURN(TRUE);
+}
+
+
+/*
+ Write the log entries to ensure that the add partition command is not
+ executed at all if a crash before it has completed
+
+ SYNOPSIS
+ write_log_add_change_partition()
+ lpt Struct containing parameters
+ RETURN VALUES
+ TRUE Error
+ FALSE Success
+ DESCRIPTION
+ Prepare entries to the ddl log indicating all partitions to drop and to
+ remove the shadow frm file.
+ We always inject entries backwards in the list in the ddl log since we
+ don't know the entry position until we have written it.
+*/
+
+static bool write_log_add_change_partition(ALTER_PARTITION_PARAM_TYPE *lpt)
+{
+ partition_info *part_info= lpt->part_info;
+ DDL_LOG_MEMORY_ENTRY *log_entry;
+ DDL_LOG_MEMORY_ENTRY *exec_log_entry= NULL;
+ char tmp_path[FN_LEN];
+ char path[FN_LEN];
+ uint next_entry= 0;
+ DBUG_ENTER("write_log_add_change_partition");
+
+ build_table_filename(path, sizeof(path), lpt->db,
+ lpt->table_name, "");
+ build_table_filename(tmp_path, sizeof(tmp_path), lpt->db,
+ lpt->table_name, "#");
+ pthread_mutex_lock(&LOCK_gdl);
+ if (write_log_dropped_partitions(lpt, &next_entry, (const char*)path,
+ FALSE))
+ goto error;
+ if (write_log_replace_delete_frm(lpt, next_entry, NULL, tmp_path,
+ FALSE))
+ goto error;
+ log_entry= part_info->first_log_entry;
+ if (write_execute_ddl_log_entry(log_entry->entry_pos,
+ FALSE, &exec_log_entry))
+ goto error;
+ pthread_mutex_unlock(&LOCK_gdl);
+ set_part_info_exec_log_entry(part_info, exec_log_entry);
+ DBUG_RETURN(FALSE);
+
+error:
+ release_part_info_log_entries(part_info->first_log_entry);
+ pthread_mutex_unlock(&LOCK_gdl);
+ part_info->first_log_entry= NULL;
+ my_error(ER_DDL_LOG_ERROR, MYF(0));
+ DBUG_RETURN(TRUE);
+}
+
+
+/*
+ Write description of how to complete the operation after first phase of
+ change partitions.
+
+ SYNOPSIS
+ write_log_final_change_partition()
+ lpt Struct containing parameters
+ RETURN VALUES
+ TRUE Error
+ FALSE Success
+ DESCRIPTION
+ We will write log entries that specify to remove all partitions reorganised,
+ to rename others to reflect the new naming scheme and to install the shadow
+ frm file.
+*/
+
+static bool write_log_final_change_partition(ALTER_PARTITION_PARAM_TYPE *lpt)
+{
+ DDL_LOG_ENTRY ddl_log_entry;
+ partition_info *part_info= lpt->part_info;
+ DDL_LOG_MEMORY_ENTRY *log_entry;
+ DDL_LOG_MEMORY_ENTRY *exec_log_entry= part_info->exec_log_entry;
+ char path[FN_LEN];
+ char shadow_path[FN_LEN];
+ DDL_LOG_MEMORY_ENTRY *old_first_log_entry= part_info->first_log_entry;
+ uint next_entry= 0;
+ DBUG_ENTER("write_log_final_change_partition");
+
+ part_info->first_log_entry= NULL;
+ build_table_filename(path, sizeof(path), lpt->db,
+ lpt->table_name, "");
+ build_table_filename(shadow_path, sizeof(shadow_path), lpt->db,
+ lpt->table_name, "#");
+ pthread_mutex_lock(&LOCK_gdl);
+ if (write_log_dropped_partitions(lpt, &next_entry, (const char*)path,
+ TRUE))
+ goto error;
+ if (write_log_changed_partitions(lpt, &next_entry, (const char*)path))
+ goto error;
+ if (write_log_replace_delete_frm(lpt, 0UL, shadow_path, path, TRUE))
+ goto error;
+ log_entry= part_info->first_log_entry;
+ part_info->frm_log_entry= log_entry;
+ if (write_execute_ddl_log_entry(log_entry->entry_pos,
+ FALSE, &exec_log_entry))
+ goto error;
+ release_part_info_log_entries(old_first_log_entry);
+ pthread_mutex_unlock(&LOCK_gdl);
+ DBUG_RETURN(FALSE);
+
+error:
+ release_part_info_log_entries(part_info->first_log_entry);
+ pthread_mutex_unlock(&LOCK_gdl);
+ part_info->first_log_entry= old_first_log_entry;
+ part_info->frm_log_entry= NULL;
+ my_error(ER_DDL_LOG_ERROR, MYF(0));
+ DBUG_RETURN(TRUE);
+}
+
+
+/*
+ Remove entry from ddl log and release resources for others to use
+
+ SYNOPSIS
+ write_log_completed()
+ lpt Struct containing parameters
+ RETURN VALUES
+ TRUE Error
+ FALSE Success
+*/
+
+static void write_log_completed(ALTER_PARTITION_PARAM_TYPE *lpt,
+ bool dont_crash)
+{
+ partition_info *part_info= lpt->part_info;
+ uint count_loop= 0;
+ bool not_success;
+ DDL_LOG_MEMORY_ENTRY *log_entry= part_info->exec_log_entry;
+ DBUG_ENTER("write_log_completed");
+
+ DBUG_ASSERT(log_entry);
+ pthread_mutex_lock(&LOCK_gdl);
+ if (write_execute_ddl_log_entry(0UL, TRUE, &log_entry))
+ {
+ /*
+ Failed to write, Bad...
+ We have completed the operation but have log records to REMOVE
+ stuff that shouldn't be removed. What clever things could one do
+ here? An error output was written to the error output by the
+ above method so we don't do anything here.
+ */
+ ;
+ }
+ release_part_info_log_entries(part_info->first_log_entry);
+ release_part_info_log_entries(part_info->exec_log_entry);
+ pthread_mutex_unlock(&LOCK_gdl);
+ part_info->exec_log_entry= NULL;
+ part_info->first_log_entry= NULL;
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ Release all log entries
+ SYNOPSIS
+ release_log_entries()
+ part_info Partition info struct
+ RETURN VALUES
+ NONE
+*/
+
+static void release_log_entries(partition_info *part_info)
+{
+ pthread_mutex_lock(&LOCK_gdl);
+ release_part_info_log_entries(part_info->first_log_entry);
+ release_part_info_log_entries(part_info->exec_log_entry);
+ pthread_mutex_unlock(&LOCK_gdl);
+ part_info->first_log_entry= NULL;
+ part_info->exec_log_entry= NULL;
+}
+
+
+/*
+ Handle errors for ALTER TABLE for partitioning
+ SYNOPSIS
+ handle_alter_part_error()
+ lpt Struct carrying parameters
+ not_completed Was request in complete phase when error occurred
+ RETURN VALUES
+ NONE
+*/
+
+void handle_alter_part_error(ALTER_PARTITION_PARAM_TYPE *lpt,
+ bool not_completed,
+ bool drop_partition,
+ bool frm_install)
+{
+ partition_info *part_info= lpt->part_info;
+ DBUG_ENTER("handle_alter_part_error");
+
+ if (!part_info->first_log_entry &&
+ execute_ddl_log_entry(current_thd,
+ part_info->first_log_entry->entry_pos))
+ {
+ /*
+ We couldn't recover from error, most likely manual interaction
+ is required.
+ */
+ write_log_completed(lpt, FALSE);
+ release_log_entries(part_info);
+ if (not_completed)
+ {
+ if (drop_partition)
+ {
+ /* Table is still ok, but we left a shadow frm file behind. */
+ push_warning_printf(lpt->thd, MYSQL_ERROR::WARN_LEVEL_WARN, 1,
+ "%s %s",
+ "Operation was unsuccessful, table is still intact,",
+ "but it is possible that a shadow frm file was left behind");
+ }
+ else
+ {
+ push_warning_printf(lpt->thd, MYSQL_ERROR::WARN_LEVEL_WARN, 1,
+ "%s %s %s %s",
+ "Operation was unsuccessful, table is still intact,",
+ "but it is possible that a shadow frm file was left behind.",
+ "It is also possible that temporary partitions are left behind,",
+ "these could be empty or more or less filled with records");
+ }
+ }
+ else
+ {
+ if (frm_install)
+ {
+ /*
+ Failed during install of shadow frm file, table isn't intact
+ and dropped partitions are still there
+ */
+ push_warning_printf(lpt->thd, MYSQL_ERROR::WARN_LEVEL_WARN, 1,
+ "%s %s %s",
+ "Failed during alter of partitions, table is no longer intact.",
+ "The frm file is in an unknown state, and a backup",
+ "is required.");
+ }
+ else if (drop_partition)
+ {
+ /*
+ Table is ok, we have switched to new table but left dropped
+ partitions still in their places. We remove the log records and
+ ask the user to perform the action manually. We remove the log
+ records and ask the user to perform the action manually.
+ */
+ push_warning_printf(lpt->thd, MYSQL_ERROR::WARN_LEVEL_WARN, 1,
+ "%s %s",
+ "Failed during drop of partitions, table is intact.",
+ "Manual drop of remaining partitions is required");
+ }
+ else
+ {
+ /*
+ We failed during renaming of partitions. The table is most
+ certainly in a very bad state so we give user warning and disable
+ the table by writing an ancient frm version into it.
+ */
+ push_warning_printf(lpt->thd, MYSQL_ERROR::WARN_LEVEL_WARN, 1,
+ "%s %s %s",
+ "Failed during renaming of partitions. We are now in a position",
+ "where table is not reusable",
+ "Table is disabled by writing ancient frm file version into it");
+ }
+ }
+ }
+ else
+ {
+ release_log_entries(part_info);
+ if (not_completed)
+ {
+ /*
+ We hit an error before things were completed but managed
+ to recover from the error. An error occurred and we have
+ restored things to original so no need for further action.
+ */
+ ;
+ }
+ else
+ {
+ /*
+ We hit an error after we had completed most of the operation
+ and were successful in a second attempt so the operation
+ actually is successful now. We need to issue a warning that
+ even though we reported an error the operation was successfully
+ completed.
+ */
+ push_warning_printf(lpt->thd, MYSQL_ERROR::WARN_LEVEL_WARN, 1,"%s %s",
+ "Operation was successfully completed by failure handling,",
+ "after failure of normal operation");
+ }
+ }
+ DBUG_VOID_RETURN;
+}
+
+
+/*
Actually perform the change requested by ALTER TABLE of partitions
previously prepared.
@@ -4792,9 +5509,12 @@ uint fast_alter_partition_table(THD *thd, TABLE *table,
ALTER_PARTITION_PARAM_TYPE lpt_obj;
ALTER_PARTITION_PARAM_TYPE *lpt= &lpt_obj;
bool written_bin_log= TRUE;
+ bool not_completed= TRUE;
+ bool frm_install= FALSE;
DBUG_ENTER("fast_alter_partition_table");
lpt->thd= thd;
+ lpt->part_info= part_info;
lpt->create_info= create_info;
lpt->create_list= create_list;
lpt->key_list= key_list;
@@ -4826,17 +5546,18 @@ uint fast_alter_partition_table(THD *thd, TABLE *table,
In this case it is enough to call optimise_partitions, there is no
need to change frm files or anything else.
*/
+ int error;
written_bin_log= FALSE;
if (((alter_info->flags & ALTER_OPTIMIZE_PARTITION) &&
- (table->file->optimize_partitions(thd))) ||
+ (error= table->file->optimize_partitions(thd))) ||
((alter_info->flags & ALTER_ANALYZE_PARTITION) &&
- (table->file->analyze_partitions(thd))) ||
+ (error= table->file->analyze_partitions(thd))) ||
((alter_info->flags & ALTER_CHECK_PARTITION) &&
- (table->file->check_partitions(thd))) ||
+ (error= table->file->check_partitions(thd))) ||
((alter_info->flags & ALTER_REPAIR_PARTITION) &&
- (table->file->repair_partitions(thd))))
+ (error= table->file->repair_partitions(thd))))
{
- fast_alter_partition_error_handler(lpt);
+ table->file->print_error(error, MYF(0));
DBUG_RETURN(TRUE);
}
}
@@ -4881,10 +5602,9 @@ uint fast_alter_partition_table(THD *thd, TABLE *table,
1) Write the new frm, pack it and then delete it
2) Perform the change within the handler
*/
- if ((mysql_write_frm(lpt, WFRM_INITIAL_WRITE | WFRM_PACK_FRM)) ||
- (mysql_change_partitions(lpt)))
+ if (mysql_write_frm(lpt, WFRM_WRITE_SHADOW | WFRM_PACK_FRM) ||
+ mysql_change_partitions(lpt))
{
- fast_alter_partition_error_handler(lpt);
DBUG_RETURN(TRUE);
}
}
@@ -4912,32 +5632,62 @@ uint fast_alter_partition_table(THD *thd, TABLE *table,
after a DROP PARTITION) if one ensured that failed accesses to the
dropped partitions was aborted for sure (thus only possible for
transactional engines).
-
- 1) Lock the table in TL_WRITE_ONLY to ensure all other accesses to
+
+ 0) Write an entry that removes the shadow frm file if crash occurs
+ 1) Write the new frm file as a shadow frm
+ 2) Write the ddl log to ensure that the operation is completed
+ even in the presence of a MySQL Server crash
+ 3) Lock the table in TL_WRITE_ONLY to ensure all other accesses to
the table have completed
- 2) Write the new frm file where the partitions have changed but are
- still remaining with the state PART_TO_BE_DROPPED
- 3) Write the bin log
- 4) Prepare MyISAM handlers for drop of partitions
- 5) Ensure that any users that has opened the table but not yet
+ 4) Write the bin log
+ Unfortunately the writing of the binlog is not synchronised with
+ other logging activities. So no matter in which order the binlog
+ is written compared to other activities there will always be cases
+ where crashes make strange things occur. In this placement it can
+ happen that the ALTER TABLE DROP PARTITION gets performed in the
+ master but not in the slaves if we have a crash, after writing the
+ ddl log but before writing the binlog. A solution to this would
+ require writing the statement first in the ddl log and then
+ when recovering from the crash read the binlog and insert it into
+ the binlog if not written already.
+ 5) Install the previously written shadow frm file
+ 6) Ensure that any users that has opened the table but not yet
reached the abort lock do that before downgrading the lock.
- 6) Drop the partitions
- 7) Write the frm file that the partition has been dropped
- 8) Wait until all accesses using the old frm file has completed
- 9) Complete query
+ 7) Prepare MyISAM handlers for drop of partitions
+ 8) Drop the partitions
+ 9) Remove entries from ddl log
+ 10) Wait until all accesses using the old frm file has completed
+ 11) Complete query
+
+ We insert Error injections at all places where it could be interesting
+ to test if recovery is properly done.
*/
- if ((abort_and_upgrade_lock(lpt)) ||
- (mysql_write_frm(lpt, WFRM_INITIAL_WRITE)) ||
+ if (write_log_drop_shadow_frm(lpt) ||
+ ERROR_INJECT_CRASH("crash_drop_partition_1") ||
+ mysql_write_frm(lpt, WFRM_WRITE_SHADOW) ||
+ ERROR_INJECT_CRASH("crash_drop_partition_2") ||
+ write_log_drop_partition(lpt) ||
+ ERROR_INJECT_CRASH("crash_drop_partition_3") ||
+ (not_completed= FALSE) ||
+ abort_and_upgrade_lock(lpt) || /* Always returns 0 */
((!thd->lex->no_write_to_binlog) &&
(write_bin_log(thd, FALSE,
- thd->query, thd->query_length), FALSE)) ||
- (table->file->extra(HA_EXTRA_PREPARE_FOR_DELETE)) ||
+ thd->query, thd->query_length), FALSE)) ||
+ ERROR_INJECT_CRASH("crash_drop_partition_4") ||
+ (table->file->extra(HA_EXTRA_PREPARE_FOR_DELETE), FALSE) ||
+ ERROR_INJECT_CRASH("crash_drop_partition_5") ||
+ ((frm_install= TRUE), FALSE) ||
+ mysql_write_frm(lpt, WFRM_INSTALL_SHADOW) ||
+ ((frm_install= FALSE), FALSE) ||
(close_open_tables_and_downgrade(lpt), FALSE) ||
- (mysql_drop_partitions(lpt)) ||
- (mysql_write_frm(lpt, WFRM_CREATE_HANDLER_FILES)) ||
+ ERROR_INJECT_CRASH("crash_drop_partition_6") ||
+ mysql_drop_partitions(lpt) ||
+ ERROR_INJECT_CRASH("crash_drop_partition_7") ||
+ (write_log_completed(lpt, FALSE), FALSE) ||
+ ERROR_INJECT_CRASH("crash_drop_partition_8") ||
(mysql_wait_completed_table(lpt, table), FALSE))
{
- fast_alter_partition_error_handler(lpt);
+ handle_alter_part_error(lpt, not_completed, TRUE, frm_install);
DBUG_RETURN(TRUE);
}
}
@@ -4954,28 +5704,45 @@ uint fast_alter_partition_table(THD *thd, TABLE *table,
miss updates made by a transaction serialised before it that are
inserted into the new partition.
- 1) Write the new frm file where state of added partitions is
- changed to PART_TO_BE_ADDED
+ 0) Write an entry that removes the shadow frm file if crash occurs
+ 1) Write the new frm file as a shadow frm file
+ 2) Log the changes to happen in ddl log
2) Add the new partitions
3) Lock all partitions in TL_WRITE_ONLY to ensure that no users
are still using the old partitioning scheme. Wait until all
ongoing users have completed before progressing.
- 4) Write a new frm file of the table where the partitions are added
- to the table.
- 5) Write binlog
- 6) Wait until all accesses using the old frm file has completed
- 7) Complete query
+ 4) Write binlog
+ 5) Now the change is completed except for the installation of the
+ new frm file. We thus write an action in the log to change to
+ the shadow frm file
+ 6) Install the new frm file of the table where the partitions are
+ added to the table.
+ 7) Wait until all accesses using the old frm file has completed
+ 8) Remove entries from ddl log
+ 9) Complete query
*/
- if ((mysql_write_frm(lpt, WFRM_INITIAL_WRITE)) ||
- (mysql_change_partitions(lpt)) ||
- (abort_and_upgrade_lock(lpt)) ||
- (mysql_write_frm(lpt, WFRM_CREATE_HANDLER_FILES)) ||
+ if (write_log_add_change_partition(lpt) ||
+ ERROR_INJECT_CRASH("crash_add_partition_1") ||
+ mysql_write_frm(lpt, WFRM_WRITE_SHADOW) ||
+ ERROR_INJECT_CRASH("crash_add_partition_2") ||
+ mysql_change_partitions(lpt) ||
+ ERROR_INJECT_CRASH("crash_add_partition_3") ||
+ abort_and_upgrade_lock(lpt) || /* Always returns 0 */
((!thd->lex->no_write_to_binlog) &&
(write_bin_log(thd, FALSE,
thd->query, thd->query_length), FALSE)) ||
- (close_open_tables_and_downgrade(lpt), FALSE))
+ ERROR_INJECT_CRASH("crash_add_partition_4") ||
+ write_log_rename_frm(lpt) ||
+ (not_completed= FALSE) ||
+ ERROR_INJECT_CRASH("crash_add_partition_5") ||
+ ((frm_install= TRUE), FALSE) ||
+ mysql_write_frm(lpt, WFRM_INSTALL_SHADOW) ||
+ ERROR_INJECT_CRASH("crash_add_partition_6") ||
+ (close_open_tables_and_downgrade(lpt), FALSE) ||
+ (write_log_completed(lpt, FALSE), FALSE) ||
+ ERROR_INJECT_CRASH("crash_add_partition_7"))
{
- fast_alter_partition_error_handler(lpt);
+ handle_alter_part_error(lpt, not_completed, FALSE, frm_install);
DBUG_RETURN(TRUE);
}
}
@@ -5012,44 +5779,57 @@ uint fast_alter_partition_table(THD *thd, TABLE *table,
use a lower lock level. This can be handled inside store_lock in the
respective handler.
- 1) Write the new frm file where state of added partitions is
- changed to PART_TO_BE_ADDED and the reorganised partitions
- are set in state PART_TO_BE_REORGED.
- 2) Add the new partitions
+ 0) Write an entry that removes the shadow frm file if crash occurs
+ 1) Write the shadow frm file of new partitioning
+ 2) Log such that temporary partitions added in change phase are
+ removed in a crash situation
+ 3) Add the new partitions
Copy from the reorganised partitions to the new partitions
- 3) Lock all partitions in TL_WRITE_ONLY to ensure that no users
+ 4) Log that operation is completed and log all complete actions
+ needed to complete operation from here
+ 5) Lock all partitions in TL_WRITE_ONLY to ensure that no users
are still using the old partitioning scheme. Wait until all
ongoing users have completed before progressing.
- 4) Prepare MyISAM handlers for rename and delete of partitions
- 5) Write a new frm file of the table where the partitions are
- reorganised.
- 6) Rename the reorged partitions such that they are no longer
+ 6) Prepare MyISAM handlers for rename and delete of partitions
+ 7) Rename the reorged partitions such that they are no longer
used and rename those added to their real new names.
- 7) Write bin log
- 8) Wait until all accesses using the old frm file has completed
- 9) Drop the reorganised partitions
- 10)Write a new frm file of the table where the partitions are
- reorganised.
- 11)Wait until all accesses using the old frm file has completed
- 12)Complete query
+ 8) Write bin log
+ 9) Install the shadow frm file
+ 10) Wait until all accesses using the old frm file has completed
+ 11) Drop the reorganised partitions
+ 12) Remove log entry
+ 13)Wait until all accesses using the old frm file has completed
+ 14)Complete query
*/
-
- if ((mysql_write_frm(lpt, WFRM_INITIAL_WRITE)) ||
- (mysql_change_partitions(lpt)) ||
- (abort_and_upgrade_lock(lpt)) ||
- (mysql_write_frm(lpt, WFRM_CREATE_HANDLER_FILES)) ||
- (table->file->extra(HA_EXTRA_PREPARE_FOR_DELETE)) ||
- (mysql_rename_partitions(lpt)) ||
+ if (write_log_add_change_partition(lpt) ||
+ ERROR_INJECT_CRASH("crash_change_partition_1") ||
+ mysql_write_frm(lpt, WFRM_WRITE_SHADOW) ||
+ ERROR_INJECT_CRASH("crash_change_partition_2") ||
+ mysql_change_partitions(lpt) ||
+ ERROR_INJECT_CRASH("crash_change_partition_3") ||
+ write_log_final_change_partition(lpt) ||
+ ERROR_INJECT_CRASH("crash_change_partition_4") ||
+ (not_completed= FALSE) ||
+ abort_and_upgrade_lock(lpt) || /* Always returns 0 */
((!thd->lex->no_write_to_binlog) &&
(write_bin_log(thd, FALSE,
thd->query, thd->query_length), FALSE)) ||
+ ERROR_INJECT_CRASH("crash_change_partition_5") ||
+ (table->file->extra(HA_EXTRA_PREPARE_FOR_DELETE), FALSE) ||
+ ERROR_INJECT_CRASH("crash_change_partition_6") ||
+ mysql_rename_partitions(lpt) ||
+ ((frm_install= TRUE), FALSE) ||
+ ERROR_INJECT_CRASH("crash_change_partition_7") ||
+ mysql_write_frm(lpt, WFRM_INSTALL_SHADOW) ||
+ ERROR_INJECT_CRASH("crash_change_partition_8") ||
(close_open_tables_and_downgrade(lpt), FALSE) ||
- (mysql_drop_partitions(lpt)) ||
- (mysql_write_frm(lpt, 0UL)) ||
+ ERROR_INJECT_CRASH("crash_change_partition_9") ||
+ (write_log_completed(lpt, FALSE), FALSE) ||
+ ERROR_INJECT_CRASH("crash_change_partition_10") ||
(mysql_wait_completed_table(lpt, table), FALSE))
{
- fast_alter_partition_error_handler(lpt);
- DBUG_RETURN(TRUE);
+ handle_alter_part_error(lpt, not_completed, FALSE, frm_install);
+ DBUG_RETURN(TRUE);
}
}
/*
@@ -5057,7 +5837,7 @@ uint fast_alter_partition_table(THD *thd, TABLE *table,
user
*/
DBUG_RETURN(fast_end_partition(thd, lpt->copied, lpt->deleted,
- table_list, FALSE, lpt,
+ table, table_list, FALSE, lpt,
written_bin_log));
}
#endif
@@ -5709,5 +6489,85 @@ static uint32 get_next_subpartition_via_walking(PARTITION_ITERATOR *part_iter)
field->store(part_iter->field_vals.cur++, FALSE);
return part_iter->part_info->get_subpartition_id(part_iter->part_info);
}
+
+
+/*
+ Create partition names
+
+ SYNOPSIS
+ create_partition_name()
+ out:out Created partition name string
+ in1 First part
+ in2 Second part
+ name_variant Normal, temporary or renamed partition name
+
+ RETURN VALUE
+ NONE
+
+ DESCRIPTION
+ This method is used to calculate the partition name, service routine to
+ the del_ren_cre_table method.
+*/
+
+void create_partition_name(char *out, const char *in1,
+ const char *in2, uint name_variant,
+ bool translate)
+{
+ char transl_part_name[FN_REFLEN];
+ const char *transl_part;
+
+ if (translate)
+ {
+ tablename_to_filename(in2, transl_part_name, FN_REFLEN);
+ transl_part= transl_part_name;
+ }
+ else
+ transl_part= in2;
+ if (name_variant == NORMAL_PART_NAME)
+ strxmov(out, in1, "#P#", transl_part, NullS);
+ else if (name_variant == TEMP_PART_NAME)
+ strxmov(out, in1, "#P#", transl_part, "#TMP#", NullS);
+ else if (name_variant == RENAMED_PART_NAME)
+ strxmov(out, in1, "#P#", transl_part, "#REN#", NullS);
+}
+
+
+/*
+ Create subpartition name
+
+ SYNOPSIS
+ create_subpartition_name()
+ out:out Created partition name string
+ in1 First part
+ in2 Second part
+ in3 Third part
+ name_variant Normal, temporary or renamed partition name
+
+ RETURN VALUE
+ NONE
+
+ DESCRIPTION
+ This method is used to calculate the subpartition name, service routine to
+ the del_ren_cre_table method.
+*/
+
+void create_subpartition_name(char *out, const char *in1,
+ const char *in2, const char *in3,
+ uint name_variant)
+{
+ char transl_part_name[FN_REFLEN], transl_subpart_name[FN_REFLEN];
+
+ tablename_to_filename(in2, transl_part_name, FN_REFLEN);
+ tablename_to_filename(in3, transl_subpart_name, FN_REFLEN);
+ if (name_variant == NORMAL_PART_NAME)
+ strxmov(out, in1, "#P#", transl_part_name,
+ "#SP#", transl_subpart_name, NullS);
+ else if (name_variant == TEMP_PART_NAME)
+ strxmov(out, in1, "#P#", transl_part_name,
+ "#SP#", transl_subpart_name, "#TMP#", NullS);
+ else if (name_variant == RENAMED_PART_NAME)
+ strxmov(out, in1, "#P#", transl_part_name,
+ "#SP#", transl_subpart_name, "#REN#", NullS);
+}
#endif
diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc
index ced15b3f728..e0cf9095a22 100644
--- a/sql/sql_prepare.cc
+++ b/sql/sql_prepare.cc
@@ -1848,10 +1848,13 @@ void mysql_stmt_prepare(THD *thd, const char *packet, uint packet_length)
if (! (stmt= new Prepared_statement(thd, &thd->protocol_prep)))
DBUG_VOID_RETURN; /* out of memory: error is set in Sql_alloc */
- if (thd->stmt_map.insert(stmt))
+ if (thd->stmt_map.insert(thd, stmt))
{
- delete stmt;
- DBUG_VOID_RETURN; /* out of memory */
+ /*
+ The error is set in the insert. The statement itself
+ will be also deleted there (this is how the hash works).
+ */
+ DBUG_VOID_RETURN;
}
/* Reset warnings from previous command */
@@ -2028,11 +2031,17 @@ void mysql_sql_stmt_prepare(THD *thd)
DBUG_VOID_RETURN; /* out of memory */
}
- if (stmt->set_name(name) || thd->stmt_map.insert(stmt))
+ /* Set the name first, insert should know that this statement has a name */
+ if (stmt->set_name(name))
{
delete stmt;
DBUG_VOID_RETURN;
}
+ if (thd->stmt_map.insert(thd, stmt))
+ {
+ /* The statement is deleted and an error is set if insert fails */
+ DBUG_VOID_RETURN;
+ }
if (stmt->prepare(query, query_len+1))
{
diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc
index 6ec010b8a44..918c9f507e2 100644
--- a/sql/sql_repl.cc
+++ b/sql/sql_repl.cc
@@ -694,7 +694,7 @@ impossible position";
if (loop_breaker)
break;
-
+
end_io_cache(&log);
(void) my_close(file, MYF(MY_WME));
@@ -834,7 +834,7 @@ int start_slave(THD* thd , MASTER_INFO* mi, bool net_report)
/* Issuing warning then started without --skip-slave-start */
if (!opt_skip_slave_start)
push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
- ER_MISSING_SKIP_SLAVE,
+ ER_MISSING_SKIP_SLAVE,
ER(ER_MISSING_SKIP_SLAVE));
}
@@ -860,7 +860,7 @@ int start_slave(THD* thd , MASTER_INFO* mi, bool net_report)
push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, ER_SLAVE_WAS_RUNNING,
ER(ER_SLAVE_WAS_RUNNING));
}
-
+
unlock_slave_threads(mi);
if (slave_errno)
@@ -1023,7 +1023,7 @@ err:
slave_server_id the slave's server id
*/
-
+
void kill_zombie_dump_threads(uint32 slave_server_id)
{
@@ -1088,9 +1088,9 @@ bool change_master(THD* thd, MASTER_INFO* mi)
*/
/*
- If the user specified host or port without binlog or position,
+ If the user specified host or port without binlog or position,
reset binlog's name to FIRST and position to 4.
- */
+ */
if ((lex_mi->host || lex_mi->port) && !lex_mi->log_file_name && !lex_mi->pos)
{
@@ -1117,7 +1117,7 @@ bool change_master(THD* thd, MASTER_INFO* mi)
mi->port = lex_mi->port;
if (lex_mi->connect_retry)
mi->connect_retry = lex_mi->connect_retry;
-
+
if (lex_mi->ssl != LEX_MASTER_INFO::SSL_UNCHANGED)
mi->ssl= (lex_mi->ssl == LEX_MASTER_INFO::SSL_ENABLE);
if (lex_mi->ssl_ca)
@@ -1133,7 +1133,7 @@ bool change_master(THD* thd, MASTER_INFO* mi)
#ifndef HAVE_OPENSSL
if (lex_mi->ssl || lex_mi->ssl_ca || lex_mi->ssl_capath ||
lex_mi->ssl_cert || lex_mi->ssl_cipher || lex_mi->ssl_key )
- push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
+ push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
ER_SLAVE_IGNORED_SSL_PARAMS, ER(ER_SLAVE_IGNORED_SSL_PARAMS));
#endif
@@ -1500,7 +1500,7 @@ bool show_binlogs(THD* thd)
}
field_list.push_back(new Item_empty_string("Log_name", 255));
- field_list.push_back(new Item_return_int("File_size", 20,
+ field_list.push_back(new Item_return_int("File_size", 20,
MYSQL_TYPE_LONGLONG));
if (protocol->send_fields(&field_list,
Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF))
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index 6cc2ad266e5..ca13fb27f96 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -248,6 +248,904 @@ static int mysql_copy_key_list(List<Key> *orig_key,
DBUG_RETURN(FALSE);
}
+/*
+--------------------------------------------------------------------------
+
+ MODULE: DDL log
+ -----------------
+
+ This module is used to ensure that we can recover from crashes that occur
+ in the middle of a meta-data operation in MySQL. E.g. DROP TABLE t1, t2;
+ We need to ensure that both t1 and t2 are dropped and not only t1 and
+ also that each table drop is entirely done and not "half-baked".
+
+ To support this we create log entries for each meta-data statement in the
+ ddl log while we are executing. These entries are dropped when the
+ operation is completed.
+
+ At recovery those entries that were not completed will be executed.
+
+ There is only one ddl log in the system and it is protected by a mutex
+ and there is a global struct that contains information about its current
+ state.
+
+ History:
+ First version written in 2006 by Mikael Ronstrom
+--------------------------------------------------------------------------
+*/
+
+
+typedef struct st_global_ddl_log
+{
+ /*
+ We need to adjust buffer size to be able to handle downgrades/upgrades
+ where IO_SIZE has changed. We'll set the buffer size such that we can
+ handle that the buffer size was upto 4 times bigger in the version
+ that wrote the DDL log.
+ */
+ char file_entry_buf[4*IO_SIZE];
+ char file_name_str[FN_REFLEN];
+ char *file_name;
+ DDL_LOG_MEMORY_ENTRY *first_free;
+ DDL_LOG_MEMORY_ENTRY *first_used;
+ uint num_entries;
+ File file_id;
+ uint name_len;
+ uint io_size;
+ bool inited;
+ bool recovery_phase;
+} GLOBAL_DDL_LOG;
+
+GLOBAL_DDL_LOG global_ddl_log;
+
+pthread_mutex_t LOCK_gdl;
+
+#define DDL_LOG_ENTRY_TYPE_POS 0
+#define DDL_LOG_ACTION_TYPE_POS 1
+#define DDL_LOG_PHASE_POS 2
+#define DDL_LOG_NEXT_ENTRY_POS 4
+#define DDL_LOG_NAME_POS 8
+
+#define DDL_LOG_NUM_ENTRY_POS 0
+#define DDL_LOG_NAME_LEN_POS 4
+#define DDL_LOG_IO_SIZE_POS 8
+
+/*
+ Read one entry from ddl log file
+ SYNOPSIS
+ read_ddl_log_file_entry()
+ entry_no Entry number to read
+ RETURN VALUES
+ TRUE Error
+ FALSE Success
+*/
+
+static bool read_ddl_log_file_entry(uint entry_no)
+{
+ bool error= FALSE;
+ File file_id= global_ddl_log.file_id;
+ char *file_entry_buf= (char*)global_ddl_log.file_entry_buf;
+ uint io_size= global_ddl_log.io_size;
+ DBUG_ENTER("read_ddl_log_file_entry");
+
+ if (my_pread(file_id, (byte*)file_entry_buf, io_size, io_size * entry_no,
+ MYF(MY_WME)) != io_size)
+ error= TRUE;
+ DBUG_RETURN(error);
+}
+
+
+/*
+ Write one entry from ddl log file
+ SYNOPSIS
+ write_ddl_log_file_entry()
+ entry_no Entry number to read
+ RETURN VALUES
+ TRUE Error
+ FALSE Success
+*/
+
+static bool write_ddl_log_file_entry(uint entry_no)
+{
+ bool error= FALSE;
+ File file_id= global_ddl_log.file_id;
+ char *file_entry_buf= (char*)global_ddl_log.file_entry_buf;
+ DBUG_ENTER("write_ddl_log_file_entry");
+
+ if (my_pwrite(file_id, (byte*)file_entry_buf,
+ IO_SIZE, IO_SIZE * entry_no, MYF(MY_WME)) != IO_SIZE)
+ error= TRUE;
+ DBUG_RETURN(error);
+}
+
+
+/*
+ Write ddl log header
+ SYNOPSIS
+ write_ddl_log_header()
+ RETURN VALUES
+ TRUE Error
+ FALSE Success
+*/
+
+static bool write_ddl_log_header()
+{
+ uint16 const_var;
+ bool error= FALSE;
+ DBUG_ENTER("write_ddl_log_header");
+
+ int4store(&global_ddl_log.file_entry_buf[DDL_LOG_NUM_ENTRY_POS],
+ global_ddl_log.num_entries);
+ const_var= FN_LEN;
+ int4store(&global_ddl_log.file_entry_buf[DDL_LOG_NAME_LEN_POS],
+ const_var);
+ const_var= IO_SIZE;
+ int4store(&global_ddl_log.file_entry_buf[DDL_LOG_IO_SIZE_POS],
+ const_var);
+ if (write_ddl_log_file_entry(0UL))
+ {
+ sql_print_error("Error writing ddl log header");
+ DBUG_RETURN(TRUE);
+ }
+ VOID(sync_ddl_log());
+ DBUG_RETURN(error);
+}
+
+
+/*
+ Create ddl log file name
+ SYNOPSIS
+ create_ddl_log_file_name()
+ file_name Filename setup
+ RETURN VALUES
+ NONE
+*/
+
+static inline void create_ddl_log_file_name(char *file_name)
+{
+ strxmov(file_name, mysql_data_home, "/", "ddl_log.log", NullS);
+}
+
+
+/*
+ Read header of ddl log file
+ SYNOPSIS
+ read_ddl_log_header()
+ RETURN VALUES
+ > 0 Last entry in ddl log
+ 0 No entries in ddl log
+ DESCRIPTION
+ When we read the ddl log header we get information about maximum sizes
+ of names in the ddl log and we also get information about the number
+ of entries in the ddl log.
+*/
+
+static uint read_ddl_log_header()
+{
+ char *file_entry_buf= (char*)global_ddl_log.file_entry_buf;
+ char file_name[FN_REFLEN];
+ uint entry_no;
+ bool successful_open= FALSE;
+ DBUG_ENTER("read_ddl_log_header");
+
+ bzero(file_entry_buf, sizeof(global_ddl_log.file_entry_buf));
+ global_ddl_log.inited= FALSE;
+ global_ddl_log.recovery_phase= TRUE;
+ global_ddl_log.io_size= IO_SIZE;
+ create_ddl_log_file_name(file_name);
+ if ((global_ddl_log.file_id= my_open(file_name,
+ O_RDWR | O_BINARY, MYF(MY_WME))) >= 0)
+ {
+ if (read_ddl_log_file_entry(0UL))
+ {
+ /* Write message into error log */
+ sql_print_error("Failed to read ddl log file in recovery");
+ }
+ else
+ successful_open= TRUE;
+ }
+ entry_no= uint4korr(&file_entry_buf[DDL_LOG_NUM_ENTRY_POS]);
+ global_ddl_log.name_len= uint4korr(&file_entry_buf[DDL_LOG_NAME_LEN_POS]);
+ if (successful_open)
+ {
+ global_ddl_log.io_size= uint4korr(&file_entry_buf[DDL_LOG_IO_SIZE_POS]);
+ DBUG_ASSERT(global_ddl_log.io_size <=
+ sizeof(global_ddl_log.file_entry_buf));
+ }
+ else
+ {
+ entry_no= 0;
+ }
+ global_ddl_log.first_free= NULL;
+ global_ddl_log.first_used= NULL;
+ global_ddl_log.num_entries= 0;
+ VOID(pthread_mutex_init(&LOCK_gdl, MY_MUTEX_INIT_FAST));
+ DBUG_RETURN(entry_no);
+}
+
+
+/*
+ Read a ddl log entry
+ SYNOPSIS
+ read_ddl_log_entry()
+ read_entry Number of entry to read
+ out:entry_info Information from entry
+ RETURN VALUES
+ TRUE Error
+ FALSE Success
+ DESCRIPTION
+ Read a specified entry in the ddl log
+*/
+
+bool read_ddl_log_entry(uint read_entry, DDL_LOG_ENTRY *ddl_log_entry)
+{
+ char *file_entry_buf= (char*)&global_ddl_log.file_entry_buf;
+ uint inx;
+ uchar single_char;
+ DBUG_ENTER("read_ddl_log_entry");
+
+ if (read_ddl_log_file_entry(read_entry))
+ {
+ DBUG_RETURN(TRUE);
+ }
+ ddl_log_entry->entry_pos= read_entry;
+ single_char= file_entry_buf[DDL_LOG_ENTRY_TYPE_POS];
+ ddl_log_entry->entry_type= (enum ddl_log_entry_code)single_char;
+ single_char= file_entry_buf[DDL_LOG_ACTION_TYPE_POS];
+ ddl_log_entry->action_type= (enum ddl_log_action_code)single_char;
+ ddl_log_entry->phase= file_entry_buf[DDL_LOG_PHASE_POS];
+ ddl_log_entry->next_entry= uint4korr(&file_entry_buf[DDL_LOG_NEXT_ENTRY_POS]);
+ ddl_log_entry->name= &file_entry_buf[DDL_LOG_NAME_POS];
+ inx= DDL_LOG_NAME_POS + global_ddl_log.name_len;
+ ddl_log_entry->from_name= &file_entry_buf[inx];
+ inx+= global_ddl_log.name_len;
+ ddl_log_entry->handler_name= &file_entry_buf[inx];
+ DBUG_RETURN(FALSE);
+}
+
+
+/*
+ Initialise ddl log
+ SYNOPSIS
+ init_ddl_log()
+ RETURN VALUES
+ TRUE Error
+ FALSE Success
+ DESCRIPTION
+ Write the header of the ddl log file and length of names. Also set
+ number of entries to zero.
+*/
+
+static bool init_ddl_log()
+{
+ bool error= FALSE;
+ char file_name[FN_REFLEN];
+ DBUG_ENTER("init_ddl_log");
+
+ if (global_ddl_log.inited)
+ {
+ DBUG_RETURN(FALSE);
+ }
+ global_ddl_log.io_size= IO_SIZE;
+ create_ddl_log_file_name(file_name);
+ if ((global_ddl_log.file_id= my_create(file_name,
+ CREATE_MODE,
+ O_RDWR | O_TRUNC | O_BINARY,
+ MYF(MY_WME))) < 0)
+ {
+ /* Couldn't create ddl log file, this is serious error */
+ sql_print_error("Failed to open ddl log file");
+ DBUG_RETURN(TRUE);
+ }
+ global_ddl_log.inited= TRUE;
+ if (write_ddl_log_header())
+ {
+ global_ddl_log.inited= FALSE;
+ DBUG_RETURN(TRUE);
+ }
+ DBUG_RETURN(FALSE);
+}
+
+
+/*
+ Execute one action in a ddl log entry
+ SYNOPSIS
+ execute_ddl_log_action()
+ ddl_log_entry Information in action entry to execute
+ RETURN VALUES
+ TRUE Error
+ FALSE Success
+*/
+
+static int execute_ddl_log_action(THD *thd, DDL_LOG_ENTRY *ddl_log_entry)
+{
+ bool frm_action= FALSE;
+ LEX_STRING handler_name;
+ handler *file= NULL;
+ MEM_ROOT mem_root;
+ int error= TRUE;
+ char to_path[FN_REFLEN];
+ char from_path[FN_REFLEN];
+ char *par_ext= (char*)".par";
+ handlerton *hton;
+ DBUG_ENTER("execute_ddl_log_action");
+
+ if (ddl_log_entry->entry_type == DDL_IGNORE_LOG_ENTRY_CODE)
+ {
+ DBUG_RETURN(FALSE);
+ }
+ handler_name.str= (char*)ddl_log_entry->handler_name;
+ handler_name.length= strlen(ddl_log_entry->handler_name);
+ init_sql_alloc(&mem_root, TABLE_ALLOC_BLOCK_SIZE, 0);
+ if (!strcmp(ddl_log_entry->handler_name, reg_ext))
+ frm_action= TRUE;
+ else
+ {
+ TABLE_SHARE dummy;
+
+ hton= ha_resolve_by_name(thd, &handler_name);
+ if (!hton)
+ {
+ my_error(ER_ILLEGAL_HA, MYF(0), ddl_log_entry->handler_name);
+ goto error;
+ }
+ bzero(&dummy, sizeof(TABLE_SHARE));
+ file= get_new_handler(&dummy, &mem_root, hton);
+ if (!file)
+ {
+ mem_alloc_error(sizeof(handler));
+ goto error;
+ }
+ }
+ switch (ddl_log_entry->action_type)
+ {
+ case DDL_LOG_REPLACE_ACTION:
+ case DDL_LOG_DELETE_ACTION:
+ {
+ if (ddl_log_entry->phase == 0)
+ {
+ if (frm_action)
+ {
+ strxmov(to_path, ddl_log_entry->name, reg_ext, NullS);
+ if ((error= my_delete(to_path, MYF(MY_WME))))
+ {
+ if (my_errno != ENOENT)
+ break;
+ }
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+ strxmov(to_path, ddl_log_entry->name, par_ext, NullS);
+ VOID(my_delete(to_path, MYF(MY_WME)));
+#endif
+ }
+ else
+ {
+ if ((error= file->delete_table(ddl_log_entry->name)))
+ {
+ if (error != ENOENT && error != HA_ERR_NO_SUCH_TABLE)
+ break;
+ }
+ }
+ if ((deactivate_ddl_log_entry(ddl_log_entry->entry_pos)))
+ break;
+ VOID(sync_ddl_log());
+ error= FALSE;
+ if (ddl_log_entry->action_type == DDL_LOG_DELETE_ACTION)
+ break;
+ }
+ DBUG_ASSERT(ddl_log_entry->action_type == DDL_LOG_REPLACE_ACTION);
+ /*
+ Fall through and perform the rename action of the replace
+ action. We have already indicated the success of the delete
+ action in the log entry by stepping up the phase.
+ */
+ }
+ case DDL_LOG_RENAME_ACTION:
+ {
+ error= TRUE;
+ if (frm_action)
+ {
+ strxmov(to_path, ddl_log_entry->name, reg_ext, NullS);
+ strxmov(from_path, ddl_log_entry->from_name, reg_ext, NullS);
+ if (my_rename(from_path, to_path, MYF(MY_WME)))
+ break;
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+ strxmov(to_path, ddl_log_entry->name, par_ext, NullS);
+ strxmov(from_path, ddl_log_entry->from_name, par_ext, NullS);
+ VOID(my_rename(from_path, to_path, MYF(MY_WME)));
+#endif
+ }
+ else
+ {
+ if (file->rename_table(ddl_log_entry->from_name,
+ ddl_log_entry->name))
+ break;
+ }
+ if ((deactivate_ddl_log_entry(ddl_log_entry->entry_pos)))
+ break;
+ VOID(sync_ddl_log());
+ error= FALSE;
+ break;
+ }
+ default:
+ DBUG_ASSERT(0);
+ break;
+ }
+ delete file;
+error:
+ free_root(&mem_root, MYF(0));
+ DBUG_RETURN(error);
+}
+
+
+/*
+ Get a free entry in the ddl log
+ SYNOPSIS
+ get_free_ddl_log_entry()
+ out:active_entry A ddl log memory entry returned
+ RETURN VALUES
+ TRUE Error
+ FALSE Success
+*/
+
+static bool get_free_ddl_log_entry(DDL_LOG_MEMORY_ENTRY **active_entry,
+ bool *write_header)
+{
+ DDL_LOG_MEMORY_ENTRY *used_entry;
+ DDL_LOG_MEMORY_ENTRY *first_used= global_ddl_log.first_used;
+ DBUG_ENTER("get_free_ddl_log_entry");
+
+ if (global_ddl_log.first_free == NULL)
+ {
+ if (!(used_entry= (DDL_LOG_MEMORY_ENTRY*)my_malloc(
+ sizeof(DDL_LOG_MEMORY_ENTRY), MYF(MY_WME))))
+ {
+ sql_print_error("Failed to allocate memory for ddl log free list");
+ DBUG_RETURN(TRUE);
+ }
+ global_ddl_log.num_entries++;
+ used_entry->entry_pos= global_ddl_log.num_entries;
+ *write_header= TRUE;
+ }
+ else
+ {
+ used_entry= global_ddl_log.first_free;
+ global_ddl_log.first_free= used_entry->next_log_entry;
+ *write_header= FALSE;
+ }
+ /*
+ Move from free list to used list
+ */
+ used_entry->next_log_entry= first_used;
+ used_entry->prev_log_entry= NULL;
+ global_ddl_log.first_used= used_entry;
+ if (first_used)
+ first_used->prev_log_entry= used_entry;
+
+ *active_entry= used_entry;
+ DBUG_RETURN(FALSE);
+}
+
+
+/*
+ External interface methods for the DDL log Module
+ ---------------------------------------------------
+*/
+
+/*
+ SYNOPSIS
+ write_ddl_log_entry()
+ ddl_log_entry Information about log entry
+ out:entry_written Entry information written into
+
+ RETURN VALUES
+ TRUE Error
+ FALSE Success
+
+ DESCRIPTION
+ A careful write of the ddl log is performed to ensure that we can
+ handle crashes occurring during CREATE and ALTER TABLE processing.
+*/
+
+bool write_ddl_log_entry(DDL_LOG_ENTRY *ddl_log_entry,
+ DDL_LOG_MEMORY_ENTRY **active_entry)
+{
+ bool error, write_header;
+ DBUG_ENTER("write_ddl_log_entry");
+
+ if (init_ddl_log())
+ {
+ DBUG_RETURN(TRUE);
+ }
+ global_ddl_log.file_entry_buf[DDL_LOG_ENTRY_TYPE_POS]=
+ (char)DDL_LOG_ENTRY_CODE;
+ global_ddl_log.file_entry_buf[DDL_LOG_ACTION_TYPE_POS]=
+ (char)ddl_log_entry->action_type;
+ global_ddl_log.file_entry_buf[DDL_LOG_PHASE_POS]= 0;
+ int4store(&global_ddl_log.file_entry_buf[DDL_LOG_NEXT_ENTRY_POS],
+ ddl_log_entry->next_entry);
+ DBUG_ASSERT(strlen(ddl_log_entry->name) < FN_LEN);
+ strmake(&global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS],
+ ddl_log_entry->name, FN_LEN - 1);
+ if (ddl_log_entry->action_type == DDL_LOG_RENAME_ACTION ||
+ ddl_log_entry->action_type == DDL_LOG_REPLACE_ACTION)
+ {
+ DBUG_ASSERT(strlen(ddl_log_entry->from_name) < FN_LEN);
+ strmake(&global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS + FN_LEN],
+ ddl_log_entry->from_name, FN_LEN - 1);
+ }
+ else
+ global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS + FN_LEN]= 0;
+ DBUG_ASSERT(strlen(ddl_log_entry->handler_name) < FN_LEN);
+ strmake(&global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS + (2*FN_LEN)],
+ ddl_log_entry->handler_name, FN_LEN - 1);
+ if (get_free_ddl_log_entry(active_entry, &write_header))
+ {
+ DBUG_RETURN(TRUE);
+ }
+ error= FALSE;
+ if (write_ddl_log_file_entry((*active_entry)->entry_pos))
+ {
+ error= TRUE;
+ sql_print_error("Failed to write entry_no = %u",
+ (*active_entry)->entry_pos);
+ }
+ if (write_header && !error)
+ {
+ VOID(sync_ddl_log());
+ if (write_ddl_log_header())
+ error= TRUE;
+ }
+ if (error)
+ release_ddl_log_memory_entry(*active_entry);
+ DBUG_RETURN(error);
+}
+
+
+/*
+ Write final entry in the ddl log
+ SYNOPSIS
+ write_execute_ddl_log_entry()
+ first_entry First entry in linked list of entries
+ to execute, if 0 = NULL it means that
+ the entry is removed and the entries
+ are put into the free list.
+ complete Flag indicating we are simply writing
+ info about that entry has been completed
+ in:out:active_entry Entry to execute, 0 = NULL if the entry
+ is written first time and needs to be
+ returned. In this case the entry written
+ is returned in this parameter
+ RETURN VALUES
+ TRUE Error
+ FALSE Success
+
+ DESCRIPTION
+ This is the last write in the ddl log. The previous log entries have
+ already been written but not yet synched to disk.
+ We write a couple of log entries that describes action to perform.
+ This entries are set-up in a linked list, however only when a first
+ execute entry is put as the first entry these will be executed.
+ This routine writes this first
+*/
+
+bool write_execute_ddl_log_entry(uint first_entry,
+ bool complete,
+ DDL_LOG_MEMORY_ENTRY **active_entry)
+{
+ bool write_header= FALSE;
+ char *file_entry_buf= (char*)global_ddl_log.file_entry_buf;
+ DBUG_ENTER("write_execute_ddl_log_entry");
+
+ if (init_ddl_log())
+ {
+ DBUG_RETURN(TRUE);
+ }
+ if (!complete)
+ {
+ /*
+ We haven't synched the log entries yet, we synch them now before
+ writing the execute entry. If complete is true we haven't written
+ any log entries before, we are only here to write the execute
+ entry to indicate it is done.
+ */
+ VOID(sync_ddl_log());
+ file_entry_buf[DDL_LOG_ENTRY_TYPE_POS]= (char)DDL_LOG_EXECUTE_CODE;
+ }
+ else
+ file_entry_buf[DDL_LOG_ENTRY_TYPE_POS]= (char)DDL_IGNORE_LOG_ENTRY_CODE;
+ file_entry_buf[DDL_LOG_ACTION_TYPE_POS]= 0; /* Ignored for execute entries */
+ file_entry_buf[DDL_LOG_PHASE_POS]= 0;
+ int4store(&file_entry_buf[DDL_LOG_NEXT_ENTRY_POS], first_entry);
+ file_entry_buf[DDL_LOG_NAME_POS]= 0;
+ file_entry_buf[DDL_LOG_NAME_POS + FN_LEN]= 0;
+ file_entry_buf[DDL_LOG_NAME_POS + 2*FN_LEN]= 0;
+ if (!(*active_entry))
+ {
+ if (get_free_ddl_log_entry(active_entry, &write_header))
+ {
+ DBUG_RETURN(TRUE);
+ }
+ }
+ if (write_ddl_log_file_entry((*active_entry)->entry_pos))
+ {
+ sql_print_error("Error writing execute entry in ddl log");
+ release_ddl_log_memory_entry(*active_entry);
+ DBUG_RETURN(TRUE);
+ }
+ VOID(sync_ddl_log());
+ if (write_header)
+ {
+ if (write_ddl_log_header())
+ {
+ release_ddl_log_memory_entry(*active_entry);
+ DBUG_RETURN(TRUE);
+ }
+ }
+ DBUG_RETURN(FALSE);
+}
+
+
+/*
+ For complex rename operations we need to deactivate individual entries.
+ SYNOPSIS
+ deactivate_ddl_log_entry()
+ entry_no Entry position of record to change
+ RETURN VALUES
+ TRUE Error
+ FALSE Success
+ DESCRIPTION
+ During replace operations where we start with an existing table called
+ t1 and a replacement table called t1#temp or something else and where
+ we want to delete t1 and rename t1#temp to t1 this is not possible to
+ do in a safe manner unless the ddl log is informed of the phases in
+ the change.
+
+ Delete actions are 1-phase actions that can be ignored immediately after
+ being executed.
+ Rename actions from x to y is also a 1-phase action since there is no
+ interaction with any other handlers named x and y.
+ Replace action where drop y and x -> y happens needs to be a two-phase
+ action. Thus the first phase will drop y and the second phase will
+ rename x -> y.
+*/
+
+bool deactivate_ddl_log_entry(uint entry_no)
+{
+ char *file_entry_buf= (char*)global_ddl_log.file_entry_buf;
+ DBUG_ENTER("deactivate_ddl_log_entry");
+
+ if (!read_ddl_log_file_entry(entry_no))
+ {
+ if (file_entry_buf[DDL_LOG_ENTRY_TYPE_POS] == DDL_LOG_ENTRY_CODE)
+ {
+ if (file_entry_buf[DDL_LOG_ACTION_TYPE_POS] == DDL_LOG_DELETE_ACTION ||
+ file_entry_buf[DDL_LOG_ACTION_TYPE_POS] == DDL_LOG_RENAME_ACTION ||
+ (file_entry_buf[DDL_LOG_ACTION_TYPE_POS] == DDL_LOG_REPLACE_ACTION &&
+ file_entry_buf[DDL_LOG_PHASE_POS] == 1))
+ file_entry_buf[DDL_LOG_ENTRY_TYPE_POS]= DDL_IGNORE_LOG_ENTRY_CODE;
+ else if (file_entry_buf[DDL_LOG_ACTION_TYPE_POS] == DDL_LOG_REPLACE_ACTION)
+ {
+ DBUG_ASSERT(file_entry_buf[DDL_LOG_PHASE_POS] == 0);
+ file_entry_buf[DDL_LOG_PHASE_POS]= 1;
+ }
+ else
+ {
+ DBUG_ASSERT(0);
+ }
+ if (write_ddl_log_file_entry(entry_no))
+ {
+ sql_print_error("Error in deactivating log entry. Position = %u",
+ entry_no);
+ DBUG_RETURN(TRUE);
+ }
+ }
+ }
+ else
+ {
+ sql_print_error("Failed in reading entry before deactivating it");
+ DBUG_RETURN(TRUE);
+ }
+ DBUG_RETURN(FALSE);
+}
+
+
+/*
+ Sync ddl log file
+ SYNOPSIS
+ sync_ddl_log()
+ RETURN VALUES
+ TRUE Error
+ FALSE Success
+*/
+
+bool sync_ddl_log()
+{
+ bool error= FALSE;
+ DBUG_ENTER("sync_ddl_log");
+
+ if ((!global_ddl_log.recovery_phase) &&
+ init_ddl_log())
+ {
+ DBUG_RETURN(TRUE);
+ }
+ if (my_sync(global_ddl_log.file_id, MYF(0)))
+ {
+ /* Write to error log */
+ sql_print_error("Failed to sync ddl log");
+ error= TRUE;
+ }
+ DBUG_RETURN(error);
+}
+
+
+/*
+ Release a log memory entry
+ SYNOPSIS
+ release_ddl_log_memory_entry()
+ log_memory_entry Log memory entry to release
+ RETURN VALUES
+ NONE
+*/
+
+void release_ddl_log_memory_entry(DDL_LOG_MEMORY_ENTRY *log_entry)
+{
+ DDL_LOG_MEMORY_ENTRY *first_free= global_ddl_log.first_free;
+ DDL_LOG_MEMORY_ENTRY *next_log_entry= log_entry->next_log_entry;
+ DDL_LOG_MEMORY_ENTRY *prev_log_entry= log_entry->prev_log_entry;
+ DBUG_ENTER("release_ddl_log_memory_entry");
+
+ global_ddl_log.first_free= log_entry;
+ log_entry->next_log_entry= first_free;
+
+ if (prev_log_entry)
+ prev_log_entry->next_log_entry= next_log_entry;
+ else
+ global_ddl_log.first_used= next_log_entry;
+ if (next_log_entry)
+ next_log_entry->prev_log_entry= prev_log_entry;
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ Execute one entry in the ddl log. Executing an entry means executing
+ a linked list of actions.
+ SYNOPSIS
+ execute_ddl_log_entry()
+ first_entry Reference to first action in entry
+ RETURN VALUES
+ TRUE Error
+ FALSE Success
+*/
+
+bool execute_ddl_log_entry(THD *thd, uint first_entry)
+{
+ DDL_LOG_ENTRY ddl_log_entry;
+ uint read_entry= first_entry;
+ DBUG_ENTER("execute_ddl_log_entry");
+
+ pthread_mutex_lock(&LOCK_gdl);
+ do
+ {
+ if (read_ddl_log_entry(read_entry, &ddl_log_entry))
+ {
+ /* Write to error log and continue with next log entry */
+ sql_print_error("Failed to read entry = %u from ddl log",
+ read_entry);
+ break;
+ }
+ DBUG_ASSERT(ddl_log_entry.entry_type == DDL_LOG_ENTRY_CODE ||
+ ddl_log_entry.entry_type == DDL_IGNORE_LOG_ENTRY_CODE);
+
+ if (execute_ddl_log_action(thd, &ddl_log_entry))
+ {
+ /* Write to error log and continue with next log entry */
+ sql_print_error("Failed to execute action for entry = %u from ddl log",
+ read_entry);
+ break;
+ }
+ read_entry= ddl_log_entry.next_entry;
+ } while (read_entry);
+ pthread_mutex_unlock(&LOCK_gdl);
+ DBUG_RETURN(FALSE);
+}
+
+
+/*
+ Execute the ddl log at recovery of MySQL Server
+ SYNOPSIS
+ execute_ddl_log_recovery()
+ RETURN VALUES
+ NONE
+*/
+
+void execute_ddl_log_recovery()
+{
+ uint num_entries, i;
+ THD *thd;
+ DDL_LOG_ENTRY ddl_log_entry;
+ char file_name[FN_REFLEN];
+ DBUG_ENTER("execute_ddl_log_recovery");
+
+ /*
+ To be able to run this from boot, we allocate a temporary THD
+ */
+ if (!(thd=new THD))
+ DBUG_VOID_RETURN;
+ thd->thread_stack= (char*) &thd;
+ thd->store_globals();
+
+ num_entries= read_ddl_log_header();
+ for (i= 1; i < num_entries + 1; i++)
+ {
+ if (read_ddl_log_entry(i, &ddl_log_entry))
+ {
+ sql_print_error("Failed to read entry no = %u from ddl log",
+ i);
+ continue;
+ }
+ if (ddl_log_entry.entry_type == DDL_LOG_EXECUTE_CODE)
+ {
+ if (execute_ddl_log_entry(thd, ddl_log_entry.next_entry))
+ {
+ /* Real unpleasant scenario but we continue anyways. */
+ continue;
+ }
+ }
+ }
+ create_ddl_log_file_name(file_name);
+ VOID(my_delete(file_name, MYF(0)));
+ global_ddl_log.recovery_phase= FALSE;
+ delete thd;
+ /* Remember that we don't have a THD */
+ my_pthread_setspecific_ptr(THR_THD, 0);
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ Release all memory allocated to the ddl log
+ SYNOPSIS
+ release_ddl_log()
+ RETURN VALUES
+ NONE
+*/
+
+void release_ddl_log()
+{
+ DDL_LOG_MEMORY_ENTRY *free_list= global_ddl_log.first_free;
+ DDL_LOG_MEMORY_ENTRY *used_list= global_ddl_log.first_used;
+ DBUG_ENTER("release_ddl_log");
+
+ pthread_mutex_lock(&LOCK_gdl);
+ while (used_list)
+ {
+ DDL_LOG_MEMORY_ENTRY *tmp= used_list->next_log_entry;
+ my_free((char*)used_list, MYF(0));
+ used_list= tmp;
+ }
+ while (free_list)
+ {
+ DDL_LOG_MEMORY_ENTRY *tmp= free_list->next_log_entry;
+ my_free((char*)free_list, MYF(0));
+ free_list= tmp;
+ }
+ VOID(my_close(global_ddl_log.file_id, MYF(0)));
+ pthread_mutex_unlock(&LOCK_gdl);
+ VOID(pthread_mutex_destroy(&LOCK_gdl));
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+---------------------------------------------------------------------------
+
+ END MODULE DDL log
+ --------------------
+
+---------------------------------------------------------------------------
+*/
+
/*
SYNOPSIS
@@ -281,83 +1179,68 @@ bool mysql_write_frm(ALTER_PARTITION_PARAM_TYPE *lpt, uint flags)
*/
int error= 0;
char path[FN_REFLEN+1];
+ char shadow_path[FN_REFLEN+1];
+ char shadow_frm_name[FN_REFLEN+1];
char frm_name[FN_REFLEN+1];
DBUG_ENTER("mysql_write_frm");
- if (flags & WFRM_INITIAL_WRITE)
+ /*
+ Build shadow frm file name
+ */
+ build_table_filename(shadow_path, sizeof(shadow_path), lpt->db,
+ lpt->table_name, "#");
+ strxmov(shadow_frm_name, shadow_path, reg_ext, NullS);
+ if (flags & WFRM_WRITE_SHADOW)
{
- error= mysql_copy_create_list(lpt->create_list,
- &lpt->new_create_list);
- error+= mysql_copy_key_list(lpt->key_list,
- &lpt->new_key_list);
- if (error)
+ if (mysql_copy_create_list(lpt->create_list,
+ &lpt->new_create_list) ||
+ mysql_copy_key_list(lpt->key_list,
+ &lpt->new_key_list) ||
+ mysql_prepare_table(lpt->thd, lpt->create_info,
+ &lpt->new_create_list,
+ &lpt->new_key_list,
+ /*tmp_table*/ 1,
+ &lpt->db_options,
+ lpt->table->file,
+ &lpt->key_info_buffer,
+ &lpt->key_count,
+ /*select_field_count*/ 0))
{
DBUG_RETURN(TRUE);
}
- }
- build_table_filename(path, sizeof(path), lpt->db, lpt->table_name, "");
- strxmov(frm_name, path, reg_ext, NullS);
- if ((flags & WFRM_INITIAL_WRITE) &&
- (mysql_prepare_table(lpt->thd, lpt->create_info, &lpt->new_create_list,
- &lpt->new_key_list,/*tmp_table*/ 1, &lpt->db_options,
- lpt->table->file, &lpt->key_info_buffer,
- &lpt->key_count, /*select_field_count*/ 0)))
- {
- DBUG_RETURN(TRUE);
- }
#ifdef WITH_PARTITION_STORAGE_ENGINE
- {
- partition_info *part_info= lpt->table->part_info;
- char *part_syntax_buf;
- uint syntax_len, i;
- bool any_unnormal_state= FALSE;
-
- if (part_info)
{
- uint max_part_state_len= part_info->partitions.elements +
- part_info->temp_partitions.elements;
- if (!(part_info->part_state= (uchar*)sql_alloc(max_part_state_len)))
- {
- DBUG_RETURN(TRUE);
- }
- part_info->part_state_len= 0;
- if (!(part_syntax_buf= generate_partition_syntax(part_info,
- &syntax_len,
- TRUE, FALSE)))
- {
- DBUG_RETURN(TRUE);
- }
- for (i= 0; i < part_info->part_state_len; i++)
- {
- enum partition_state part_state=
- (enum partition_state)part_info->part_state[i];
- if (part_state != PART_NORMAL && part_state != PART_IS_ADDED)
- any_unnormal_state= TRUE;
- }
- if (!any_unnormal_state)
+ partition_info *part_info= lpt->table->part_info;
+ char *part_syntax_buf;
+ uint syntax_len;
+
+ if (part_info)
{
- part_info->part_state= NULL;
- part_info->part_state_len= 0;
+ if (!(part_syntax_buf= generate_partition_syntax(part_info,
+ &syntax_len,
+ TRUE, FALSE)))
+ {
+ DBUG_RETURN(TRUE);
+ }
+ part_info->part_info_string= part_syntax_buf;
+ part_info->part_info_len= syntax_len;
}
- part_info->part_info_string= part_syntax_buf;
- part_info->part_info_len= syntax_len;
}
- }
#endif
- /*
- We write the frm file with the LOCK_open mutex since otherwise we could
- overwrite the frm file as another is reading it in open_table.
- */
- lpt->create_info->table_options= lpt->db_options;
- VOID(pthread_mutex_lock(&LOCK_open));
- if ((mysql_create_frm(lpt->thd, frm_name, lpt->db, lpt->table_name,
- lpt->create_info, lpt->new_create_list, lpt->key_count,
- lpt->key_info_buffer, lpt->table->file)) ||
- ((flags & WFRM_CREATE_HANDLER_FILES) &&
- lpt->table->file->create_handler_files(path, lpt->create_info)))
- {
- error= 1;
- goto end;
+ /* Write shadow frm file */
+ lpt->create_info->table_options= lpt->db_options;
+ if ((mysql_create_frm(lpt->thd, shadow_frm_name, lpt->db,
+ lpt->table_name, lpt->create_info,
+ lpt->new_create_list, lpt->key_count,
+ lpt->key_info_buffer, lpt->table->file)) ||
+ lpt->table->file->create_handler_files(shadow_path, NULL,
+ CHF_CREATE_FLAG,
+ lpt->create_info))
+ {
+ my_delete(shadow_frm_name, MYF(0));
+ error= 1;
+ goto end;
+ }
}
if (flags & WFRM_PACK_FRM)
{
@@ -369,7 +1252,7 @@ bool mysql_write_frm(ALTER_PARTITION_PARAM_TYPE *lpt, uint flags)
*/
const void *data= 0;
uint length= 0;
- if (readfrm(path, &data, &length) ||
+ if (readfrm(shadow_path, &data, &length) ||
packfrm(data, length, &lpt->pack_frm_data, &lpt->pack_frm_len))
{
my_free((char*)data, MYF(MY_ALLOW_ZERO_PTR));
@@ -378,11 +1261,56 @@ bool mysql_write_frm(ALTER_PARTITION_PARAM_TYPE *lpt, uint flags)
error= 1;
goto end;
}
- error= my_delete(frm_name, MYF(MY_WME));
+ error= my_delete(shadow_frm_name, MYF(MY_WME));
}
- /* Frm file have been updated to reflect the change about to happen. */
+ if (flags & WFRM_INSTALL_SHADOW)
+ {
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+ partition_info *part_info= lpt->part_info;
+#endif
+ /*
+ Build frm file name
+ */
+ build_table_filename(path, sizeof(path), lpt->db,
+ lpt->table_name, "");
+ strxmov(frm_name, path, reg_ext, NullS);
+ /*
+ When we are changing to use new frm file we need to ensure that we
+ don't collide with another thread in process to open the frm file.
+ We start by deleting the .frm file and possible .par file. Then we
+ write to the DDL log that we have completed the delete phase by
+ increasing the phase of the log entry. Next step is to rename the
+ new .frm file and the new .par file to the real name. After
+ completing this we write a new phase to the log entry that will
+ deactivate it.
+ */
+ VOID(pthread_mutex_lock(&LOCK_open));
+ if (my_delete(frm_name, MYF(MY_WME)) ||
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+ lpt->table->file->create_handler_files(path, shadow_path,
+ CHF_DELETE_FLAG, NULL) ||
+ deactivate_ddl_log_entry(part_info->frm_log_entry->entry_pos) ||
+ (sync_ddl_log(), FALSE) ||
+#endif
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+ my_rename(shadow_frm_name, frm_name, MYF(MY_WME)) ||
+ lpt->table->file->create_handler_files(path, shadow_path,
+ CHF_RENAME_FLAG, NULL))
+#else
+ my_rename(shadow_frm_name, frm_name, MYF(MY_WME)))
+#endif
+ {
+ error= 1;
+ }
+ VOID(pthread_mutex_unlock(&LOCK_open));
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+ deactivate_ddl_log_entry(part_info->frm_log_entry->entry_pos);
+ part_info->frm_log_entry= NULL;
+ VOID(sync_ddl_log());
+#endif
+ }
+
end:
- VOID(pthread_mutex_unlock(&LOCK_open));
DBUG_RETURN(error);
}
@@ -4790,7 +5718,8 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
error= (mysql_create_frm(thd, reg_path, db, table_name,
create_info, prepared_create_list, key_count,
key_info_buffer, table->file) ||
- table->file->create_handler_files(path, create_info));
+ table->file->create_handler_files(reg_path, NULL, CHF_INDEX_FLAG,
+ create_info));
VOID(pthread_mutex_unlock(&LOCK_open));
if (error)
goto err;
@@ -4836,7 +5765,8 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
error= (mysql_create_frm(thd, reg_path, db, table_name,
create_info, prepared_create_list, key_count,
key_info_buffer, table->file) ||
- table->file->create_handler_files(path, create_info));
+ table->file->create_handler_files(reg_path, NULL, CHF_INDEX_FLAG,
+ create_info));
VOID(pthread_mutex_unlock(&LOCK_open));
if (error)
goto err;
@@ -5060,7 +5990,8 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
VOID(pthread_mutex_lock(&LOCK_open));
}
/* Tell the handler that a new frm file is in place. */
- if (table->file->create_handler_files(path, create_info))
+ if (table->file->create_handler_files(path, NULL, CHF_INDEX_FLAG,
+ create_info))
{
VOID(pthread_mutex_unlock(&LOCK_open));
goto err;
diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
index 2f91472ad2d..23a74de7791 100644
--- a/sql/sql_yacc.yy
+++ b/sql/sql_yacc.yy
@@ -3558,75 +3558,14 @@ part_definition:
LEX *lex= Lex;
partition_info *part_info= lex->part_info;
partition_element *p_elem= new partition_element();
- uint part_id= part_info->partitions.elements +
- part_info->temp_partitions.elements;
- enum partition_state part_state;
+ uint part_id= part_info->partitions.elements;
- if (part_info->part_state)
- part_state= (enum partition_state)part_info->part_state[part_id];
- else
- part_state= PART_NORMAL;
- switch (part_state)
+ if (!p_elem || part_info->partitions.push_back(p_elem))
{
- case PART_TO_BE_DROPPED:
- /*
- This part is currently removed so we keep it in a
- temporary list for REPAIR TABLE to be able to handle
- failures during drop partition process.
- */
- case PART_TO_BE_ADDED:
- /*
- This part is currently being added so we keep it in a
- temporary list for REPAIR TABLE to be able to handle
- failures during add partition process.
- */
- if (!p_elem || part_info->temp_partitions.push_back(p_elem))
- {
- mem_alloc_error(sizeof(partition_element));
- YYABORT;
- }
- break;
- case PART_IS_ADDED:
- /*
- Part has been added and is now a normal partition
- */
- case PART_TO_BE_REORGED:
- /*
- This part is currently reorganised, it is still however
- used so we keep it in the list of partitions. We do
- however need the state to be able to handle REPAIR TABLE
- after failures in the reorganisation process.
- */
- case PART_REORGED_DROPPED:
- /*
- This part is currently reorganised as part of a
- COALESCE PARTITION and it will be dropped without a new
- replacement partition after completing the reorganisation.
- */
- case PART_CHANGED:
- /*
- This part is currently split or merged as part of ADD
- PARTITION for a hash partition or as part of COALESCE
- PARTITION for a hash partitioned table.
- */
- case PART_IS_CHANGED:
- /*
- This part has been split or merged as part of ADD
- PARTITION for a hash partition or as part of COALESCE
- PARTITION for a hash partitioned table.
- */
- case PART_NORMAL:
- if (!p_elem || part_info->partitions.push_back(p_elem))
- {
- mem_alloc_error(sizeof(partition_element));
- YYABORT;
- }
- break;
- default:
- mem_alloc_error((part_id * 1000) + part_state);
- YYABORT;
+ mem_alloc_error(sizeof(partition_element));
+ YYABORT;
}
- p_elem->part_state= part_state;
+ p_elem->part_state= PART_NORMAL;
part_info->curr_part_elem= p_elem;
part_info->current_partition= p_elem;
part_info->use_default_partitions= FALSE;
@@ -4801,7 +4740,7 @@ alter:
lex->sql_command= SQLCOM_CREATE_VIEW;
lex->create_view_mode= VIEW_ALTER;
/* first table in list is target VIEW name */
- lex->select_lex.add_table_to_list(thd, $6, NULL, 0);
+ lex->select_lex.add_table_to_list(thd, $6, NULL, TL_OPTION_UPDATING);
}
view_list_opt AS view_select view_check_option
{}
@@ -10904,7 +10843,7 @@ view_tail:
LEX *lex= thd->lex;
lex->sql_command= SQLCOM_CREATE_VIEW;
/* first table in list is target VIEW name */
- if (!lex->select_lex.add_table_to_list(thd, $3, NULL, 0))
+ if (!lex->select_lex.add_table_to_list(thd, $3, NULL, TL_OPTION_UPDATING))
YYABORT;
}
view_list_opt AS view_select view_check_option
diff --git a/sql/table.cc b/sql/table.cc
index 41621a19900..6ba66569f5c 100644
--- a/sql/table.cc
+++ b/sql/table.cc
@@ -667,36 +667,17 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head,
#endif
next_chunk+= 5 + partition_info_len;
}
- if (share->mysql_version > 50105 && next_chunk + 5 < buff_end)
+#if MYSQL_VERSION_ID < 50200
+ if (share->mysql_version >= 50106 && share->mysql_version <= 50109)
{
/*
- Partition state was introduced to support partition management in version 5.1.5
+ Partition state array was here in version 5.1.6 to 5.1.9, this code
+ makes it possible to load a 5.1.6 table in later versions. Can most
+ likely be removed at some point in time. Will only be used for
+ upgrades within 5.1 series of versions. Upgrade to 5.2 can only be
+ done from newer 5.1 versions.
*/
- uint32 part_state_len= uint4korr(next_chunk);
-#ifdef WITH_PARTITION_STORAGE_ENGINE
- if ((share->part_state_len= part_state_len))
- if (!(share->part_state=
- (uchar*) memdup_root(&share->mem_root, next_chunk + 4,
- part_state_len)))
- {
- my_free(buff, MYF(0));
- goto err;
- }
-#else
- if (part_state_len)
- {
- DBUG_PRINT("info", ("WITH_PARTITION_STORAGE_ENGINE is not defined"));
- my_free(buff, MYF(0));
- goto err;
- }
-#endif
- next_chunk+= 4 + part_state_len;
- }
-#ifdef WITH_PARTITION_STORAGE_ENGINE
- else
- {
- share->part_state_len= 0;
- share->part_state= NULL;
+ next_chunk+= 4;
}
#endif
keyinfo= share->key_info;
diff --git a/sql/unireg.cc b/sql/unireg.cc
index bb197181e2a..bbb4d970d37 100644
--- a/sql/unireg.cc
+++ b/sql/unireg.cc
@@ -136,7 +136,6 @@ bool mysql_create_frm(THD *thd, const char *file_name,
if (part_info)
{
create_info->extra_size+= part_info->part_info_len;
- create_info->extra_size+= part_info->part_state_len;
}
#endif
@@ -209,12 +208,6 @@ bool mysql_create_frm(THD *thd, const char *file_name,
my_write(file, (const byte*)part_info->part_info_string,
part_info->part_info_len + 1, MYF_RW))
goto err;
- DBUG_PRINT("info", ("Part state len = %d", part_info->part_state_len));
- int4store(buff, part_info->part_state_len);
- if (my_write(file, (const byte*)buff, 4, MYF_RW) ||
- my_write(file, (const byte*)part_info->part_state,
- part_info->part_state_len, MYF_RW))
- goto err;
}
else
#endif
@@ -330,7 +323,7 @@ int rea_create_table(THD *thd, const char *path,
// Make sure mysql_create_frm din't remove extension
DBUG_ASSERT(*fn_rext(frm_name));
- if (file->create_handler_files(path, create_info))
+ if (file->create_handler_files(path, NULL, CHF_CREATE_FLAG, create_info))
goto err_handler;
if (!create_info->frm_only && ha_create_table(thd, path, db, table_name,
create_info,0))
diff --git a/sql/watchdog_mysqld b/sql/watchdog_mysqld
deleted file mode 100755
index 0b26bb15acd..00000000000
--- a/sql/watchdog_mysqld
+++ /dev/null
@@ -1,126 +0,0 @@
-#!/usr/bin/perl
-# Copyright (C) 1979-1998 TcX AB & Monty Program KB & Detron HB
-#
-# This software is distributed with NO WARRANTY OF ANY KIND. No author or
-# distributor accepts any responsibility for the consequences of using it, or
-# for whether it serves any particular purpose or works at all, unless he or
-# she says so in writing. Refer to the Free Public License (the "License")
-# for full details.
-#
-# Every copy of this file must include a copy of the License, normally in a
-# plain ASCII text file named PUBLIC. The License grants you the right to
-# copy, modify and redistribute this file, but only under certain conditions
-# described in the License. Among other things, the License requires that
-# the copyright notice and this notice be preserved on all copies. */
-
-#
-# This scripts is started by safe_mysqld. It checks that MySQL is alive and
-# working ( = answering to ping). If not, force mysqld down, check all
-# tables and let safe_mysqld restart the server.
-#
-# For this to work, you should have procmail installed as the commands
-# 'lockfile' and is used to sync with safe_mysqld
-#
-# NOTE: You should only use this script as a last resort if mysqld locks
-# up unexpectedly in a critical application and you have to get it to
-# work temporarily while waiting for a solution from mysql@tcx.se or
-# mysql-support@tcx.se
-
-
-use POSIX "waitpid";
-
-# Arguments from safe_mysqld
-
-if ($#ARGV != 4)
-{
- print "$0: Wrong number of arguments. Aborting\n";
- exit 1;
-}
-
-$lock_file=shift; # File to lock to sync with safe_mysqld
-$pid_file=shift; # Pid file used by mysqld
-$bin_dir=shift; # Directory where mysqladmin is
-$test_timeout=shift; # Time between testing if mysqld is alive
-$wait_timeout=shift; # How long time to wait for ping
-
-$|=1; # autoflush
-
-# Check that mysqld has started properly
-
-for ($i=1 ; $i < 10 ; $i ++)
-{
- last if (-e $pid_file);
-}
-sleep(1); # If server has just created the file
-if (($mysqld_pid=`cat $pid_file`) <= 0)
-{
- print "$0: Error: Invalid pidfile (contains '$mysqld_pid'). Aborting\n";
-}
-
-# Start pinging mysqld
-
-for (;;)
-{
- sleep($test_timeout); # Time between tests
- `lockfile $lock_file > /dev/null 2>&1`; # Sync with safe_mysqld
- if (($pid=fork()) == 0)
- {
- setpgrp(0,0);
- exit(int(system("$bin_dir/mysqladmin -w status > /dev/null")/256));
- }
- for ($i=0; ($res=waitpid(-1,&POSIX::WNOHANG)) == 0 && $i < $wait_timeout ; $i++)
- {
- sleep(1);
- }
- if ($res == 0)
- {
- print "$0: Warning: mysqld hanged; Killing it so that safe_mysqld can restart it!\n";
- $mysqld_pid= `cat $pid_file`;
- if ($mysqld_pid <= 0)
- {
- print "$0: Error: Invalid pidfile (contains '$mysqld_pid'). Aborting\n";
- system("rm -f $lock_file");
- kill(-9,$pid);
- exit 1;
- }
- print "$0: Sending signal 15 to $mysqld_pid\n";
- kill(-15, $pid,$mysqld_pid); # Give it a last change to die nicely
- for ($i=0 ; $i < 5 ; $i++) { sleep(1); } # Wait 5 seconds (signal safe)
- waitpid(-1,&POSIX::WNOHANG);
- if (kill(0,$pid,$mysqld_pid) != 0)
- {
- print "$0: Sending signal 9 to $mysqld_pid\n";
- kill(-9,$pid,$mysqld_pid); # No time to be nice anymore
- sleep(2); # Give system time to clean up
- waitpid(-1,&POSIX::WNOHANG);
- if (kill(0,$mysqld_pid) != 0)
- {
- print "$0: Warning: mysqld don't want to die. Aborting\n";
- system("rm -f $lock_file");
- exit 1;
- }
- }
- # safe_mysqld will not restart mysqld if the pid file doesn't exists
- system("rm $pid_file");
- system("touch $pid_file");
- }
- elsif ($res == -1)
- {
- print "$0: Error: waitpid returned $res when wating for pid $pid\nPlease verify that $0 is correct for your system\n";
- system("rm -f $lock_file");
- exit 1;
- }
- else
- {
- $exit_code=int($?/256);
- if ($exit_code != 0)
- {
- print "$0: Warning: mysqladmin returned exit code $exit_code\n";
- }
- else
- {
- #print "mysqld is alive and feeling well\n";
- }
- }
- system("rm -f $lock_file"); # safemysqld will now take over
-}
diff --git a/storage/myisam/mi_open.c b/storage/myisam/mi_open.c
index abf1d1ea9a7..91bf438035f 100644
--- a/storage/myisam/mi_open.c
+++ b/storage/myisam/mi_open.c
@@ -96,7 +96,7 @@ MI_INFO *mi_open(const char *name, int mode, uint open_flags)
bzero((byte*) &info,sizeof(info));
my_realpath(name_buff, fn_format(org_name,name,"",MI_NAME_IEXT,
- MY_UNPACK_FILENAME|MY_APPEND_EXT),MYF(0));
+ MY_UNPACK_FILENAME),MYF(0));
pthread_mutex_lock(&THR_LOCK_myisam);
if (!(old_info=test_if_reopen(name_buff)))
{
diff --git a/storage/ndb/home/bin/Linuxmkisofs b/storage/ndb/home/bin/Linuxmkisofs
deleted file mode 100755
index a531f4cca7b..00000000000
--- a/storage/ndb/home/bin/Linuxmkisofs
+++ /dev/null
Binary files differ
diff --git a/storage/ndb/home/bin/Solarismkisofs b/storage/ndb/home/bin/Solarismkisofs
deleted file mode 100755
index b239eaed6ad..00000000000
--- a/storage/ndb/home/bin/Solarismkisofs
+++ /dev/null
Binary files differ
diff --git a/storage/ndb/home/bin/cvs2cl.pl b/storage/ndb/home/bin/cvs2cl.pl
deleted file mode 100755
index 9e6da5acf5b..00000000000
--- a/storage/ndb/home/bin/cvs2cl.pl
+++ /dev/null
@@ -1,1865 +0,0 @@
-#!/bin/sh
-exec perl -w -x $0 ${1+"$@"} # -*- mode: perl; perl-indent-level: 2; -*-
-#!perl -w
-
-##############################################################
-### ###
-### cvs2cl.pl: produce ChangeLog(s) from `cvs log` output. ###
-### ###
-##############################################################
-
-## $Revision: 2.38 $
-## $Date: 2001/02/12 19:54:35 $
-## $Author: kfogel $
-##
-## (C) 1999 Karl Fogel <kfogel@red-bean.com>, under the GNU GPL.
-##
-## (Extensively hacked on by Melissa O'Neill <oneill@cs.sfu.ca>.)
-##
-## cvs2cl.pl is free software; you can redistribute it and/or modify
-## it under the terms of the GNU General Public License as published by
-## the Free Software Foundation; either version 2, or (at your option)
-## any later version.
-##
-## cvs2cl.pl is distributed in the hope that it will be useful,
-## but WITHOUT ANY WARRANTY; without even the implied warranty of
-## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-## GNU General Public License for more details.
-##
-## You may have received a copy of the GNU General Public License
-## along with cvs2cl.pl; see the file COPYING. If not, write to the
-## Free Software Foundation, Inc., 59 Temple Place - Suite 330,
-## Boston, MA 02111-1307, USA.
-
-
-
-use strict;
-use Text::Wrap;
-use Time::Local;
-use File::Basename;
-
-
-# The Plan:
-#
-# Read in the logs for multiple files, spit out a nice ChangeLog that
-# mirrors the information entered during `cvs commit'.
-#
-# The problem presents some challenges. In an ideal world, we could
-# detect files with the same author, log message, and checkin time --
-# each <filelist, author, time, logmessage> would be a changelog entry.
-# We'd sort them; and spit them out. Unfortunately, CVS is *not atomic*
-# so checkins can span a range of times. Also, the directory structure
-# could be hierarchical.
-#
-# Another question is whether we really want to have the ChangeLog
-# exactly reflect commits. An author could issue two related commits,
-# with different log entries, reflecting a single logical change to the
-# source. GNU style ChangeLogs group these under a single author/date.
-# We try to do the same.
-#
-# So, we parse the output of `cvs log', storing log messages in a
-# multilevel hash that stores the mapping:
-# directory => author => time => message => filelist
-# As we go, we notice "nearby" commit times and store them together
-# (i.e., under the same timestamp), so they appear in the same log
-# entry.
-#
-# When we've read all the logs, we twist this mapping into
-# a time => author => message => filelist mapping for each directory.
-#
-# If we're not using the `--distributed' flag, the directory is always
-# considered to be `./', even as descend into subdirectories.
-
-
-############### Globals ################
-
-
-# What we run to generate it:
-my $Log_Source_Command = "cvs log";
-
-# In case we have to print it out:
-my $VERSION = '$Revision: 2.38 $';
-$VERSION =~ s/\S+\s+(\S+)\s+\S+/$1/;
-
-## Vars set by options:
-
-# Print debugging messages?
-my $Debug = 0;
-
-# Just show version and exit?
-my $Print_Version = 0;
-
-# Just print usage message and exit?
-my $Print_Usage = 0;
-
-# Single top-level ChangeLog, or one per subdirectory?
-my $Distributed = 0;
-
-# What file should we generate (defaults to "ChangeLog")?
-my $Log_File_Name = "ChangeLog";
-
-# Grab most recent entry date from existing ChangeLog file, just add
-# to that ChangeLog.
-my $Cumulative = 0;
-
-# Expand usernames to email addresses based on a map file?
-my $User_Map_File = "";
-
-# Output to a file or to stdout?
-my $Output_To_Stdout = 0;
-
-# Eliminate empty log messages?
-my $Prune_Empty_Msgs = 0;
-
-# Don't call Text::Wrap on the body of the message
-my $No_Wrap = 0;
-
-# Separates header from log message. Code assumes it is either " " or
-# "\n\n", so if there's ever an option to set it to something else,
-# make sure to go through all conditionals that use this var.
-my $After_Header = " ";
-
-# Format more for programs than for humans.
-my $XML_Output = 0;
-
-# Do some special tweaks for log data that was written in FSF
-# ChangeLog style.
-my $FSF_Style = 0;
-
-# Show times in UTC instead of local time
-my $UTC_Times = 0;
-
-# Show day of week in output?
-my $Show_Day_Of_Week = 0;
-
-# Show revision numbers in output?
-my $Show_Revisions = 0;
-
-# Show tags (symbolic names) in output?
-my $Show_Tags = 0;
-
-# Show branches by symbolic name in output?
-my $Show_Branches = 0;
-
-# Show only revisions on these branches or their ancestors.
-my @Follow_Branches;
-
-# Don't bother with files matching this regexp.
-my @Ignore_Files;
-
-# How exactly we match entries. We definitely want "o",
-# and user might add "i" by using --case-insensitive option.
-my $Case_Insensitive = 0;
-
-# Maybe only show log messages matching a certain regular expression.
-my $Regexp_Gate = "";
-
-# Pass this global option string along to cvs, to the left of `log':
-my $Global_Opts = "";
-
-# Pass this option string along to the cvs log subcommand:
-my $Command_Opts = "";
-
-# Read log output from stdin instead of invoking cvs log?
-my $Input_From_Stdin = 0;
-
-# Don't show filenames in output.
-my $Hide_Filenames = 0;
-
-# Max checkin duration. CVS checkin is not atomic, so we may have checkin
-# times that span a range of time. We assume that checkins will last no
-# longer than $Max_Checkin_Duration seconds, and that similarly, no
-# checkins will happen from the same users with the same message less
-# than $Max_Checkin_Duration seconds apart.
-my $Max_Checkin_Duration = 180;
-
-# What to put at the front of [each] ChangeLog.
-my $ChangeLog_Header = "";
-
-## end vars set by options.
-
-# In 'cvs log' output, one long unbroken line of equal signs separates
-# files:
-my $file_separator = "======================================="
- . "======================================";
-
-# In 'cvs log' output, a shorter line of dashes separates log messages
-# within a file:
-my $logmsg_separator = "----------------------------";
-
-
-############### End globals ############
-
-
-
-
-&parse_options ();
-&derive_change_log ();
-
-
-
-### Everything below is subroutine definitions. ###
-
-# If accumulating, grab the boundary date from pre-existing ChangeLog.
-sub maybe_grab_accumulation_date ()
-{
- if (! $Cumulative) {
- return "";
- }
-
- # else
-
- open (LOG, "$Log_File_Name")
- or die ("trouble opening $Log_File_Name for reading ($!)");
-
- my $boundary_date;
- while (<LOG>)
- {
- if (/^(\d\d\d\d-\d\d-\d\d\s+\d\d:\d\d)/)
- {
- $boundary_date = "$1";
- last;
- }
- }
-
- close (LOG);
- return $boundary_date;
-}
-
-
-# Fills up a ChangeLog structure in the current directory.
-sub derive_change_log ()
-{
- # See "The Plan" above for a full explanation.
-
- my %grand_poobah;
-
- my $file_full_path;
- my $time;
- my $revision;
- my $author;
- my $msg_txt;
- my $detected_file_separator;
-
- # Might be adding to an existing ChangeLog
- my $accumulation_date = &maybe_grab_accumulation_date ();
- if ($accumulation_date) {
- $Log_Source_Command .= " -d\'>${accumulation_date}\'";
- }
-
- # We might be expanding usernames
- my %usermap;
-
- # In general, it's probably not very maintainable to use state
- # variables like this to tell the loop what it's doing at any given
- # moment, but this is only the first one, and if we never have more
- # than a few of these, it's okay.
- my $collecting_symbolic_names = 0;
- my %symbolic_names; # Where tag names get stored.
- my %branch_names; # We'll grab branch names while we're at it.
- my %branch_numbers; # Save some revisions for @Follow_Branches
- my @branch_roots; # For showing which files are branch ancestors.
-
- # Bleargh. Compensate for a deficiency of custom wrapping.
- if (($After_Header ne " ") and $FSF_Style)
- {
- $After_Header .= "\t";
- }
-
- if (! $Input_From_Stdin) {
- open (LOG_SOURCE, "$Log_Source_Command |")
- or die "unable to run \"${Log_Source_Command}\"";
- }
- else {
- open (LOG_SOURCE, "-") or die "unable to open stdin for reading";
- }
-
- %usermap = &maybe_read_user_map_file ();
-
- while (<LOG_SOURCE>)
- {
- # If on a new file and don't see filename, skip until we find it, and
- # when we find it, grab it.
- if ((! (defined $file_full_path)) and /^Working file: (.*)/)
- {
- $file_full_path = $1;
- if (@Ignore_Files)
- {
- my $base;
- ($base, undef, undef) = fileparse ($file_full_path);
- # Ouch, I wish trailing operators in regexps could be
- # evaluated on the fly!
- if ($Case_Insensitive) {
- if (grep ($file_full_path =~ m|$_|i, @Ignore_Files)) {
- undef $file_full_path;
- }
- }
- elsif (grep ($file_full_path =~ m|$_|, @Ignore_Files)) {
- undef $file_full_path;
- }
- }
- next;
- }
-
- # Just spin wheels if no file defined yet.
- next if (! $file_full_path);
-
- # Collect tag names in case we're asked to print them in the output.
- if (/^symbolic names:$/) {
- $collecting_symbolic_names = 1;
- next; # There's no more info on this line, so skip to next
- }
- if ($collecting_symbolic_names)
- {
- # All tag names are listed with whitespace in front in cvs log
- # output; so if see non-whitespace, then we're done collecting.
- if (/^\S/) {
- $collecting_symbolic_names = 0;
- }
- else # we're looking at a tag name, so parse & store it
- {
- # According to the Cederqvist manual, in node "Tags", tag
- # names must start with an uppercase or lowercase letter and
- # can contain uppercase and lowercase letters, digits, `-',
- # and `_'. However, it's not our place to enforce that, so
- # we'll allow anything CVS hands us to be a tag:
- /^\s+([^:]+): ([\d.]+)$/;
- my $tag_name = $1;
- my $tag_rev = $2;
-
- # A branch number either has an odd number of digit sections
- # (and hence an even number of dots), or has ".0." as the
- # second-to-last digit section. Test for these conditions.
- my $real_branch_rev = "";
- if (($tag_rev =~ /^(\d+\.\d+\.)+\d+$/) # Even number of dots...
- and (! ($tag_rev =~ /^(1\.)+1$/))) # ...but not "1.[1.]1"
- {
- $real_branch_rev = $tag_rev;
- }
- elsif ($tag_rev =~ /(\d+\.(\d+\.)+)0.(\d+)/) # Has ".0."
- {
- $real_branch_rev = $1 . $3;
- }
- # If we got a branch, record its number.
- if ($real_branch_rev)
- {
- $branch_names{$real_branch_rev} = $tag_name;
- if (@Follow_Branches) {
- if (grep ($_ eq $tag_name, @Follow_Branches)) {
- $branch_numbers{$tag_name} = $real_branch_rev;
- }
- }
- }
- else {
- # Else it's just a regular (non-branch) tag.
- push (@{$symbolic_names{$tag_rev}}, $tag_name);
- }
- }
- }
- # End of code for collecting tag names.
-
- # If have file name, but not revision, and see revision, then grab
- # it. (We collect unconditionally, even though we may or may not
- # ever use it.)
- if ((! (defined $revision)) and (/^revision (\d+\.[\d.]+)/))
- {
- $revision = $1;
-
- if (@Follow_Branches)
- {
- foreach my $branch (@Follow_Branches)
- {
- # Special case for following trunk revisions
- if (($branch =~ /^trunk$/i) and ($revision =~ /^[0-9]+\.[0-9]+$/))
- {
- goto dengo;
- }
-
- my $branch_number = $branch_numbers{$branch};
- if ($branch_number)
- {
- # Are we on one of the follow branches or an ancestor of
- # same?
- #
- # If this revision is a prefix of the branch number, or
- # possibly is less in the minormost number, OR if this
- # branch number is a prefix of the revision, then yes.
- # Otherwise, no.
- #
- # So below, we determine if any of those conditions are
- # met.
-
- # Trivial case: is this revision on the branch?
- # (Compare this way to avoid regexps that screw up Emacs
- # indentation, argh.)
- if ((substr ($revision, 0, ((length ($branch_number)) + 1)))
- eq ($branch_number . "."))
- {
- goto dengo;
- }
- # Non-trivial case: check if rev is ancestral to branch
- elsif ((length ($branch_number)) > (length ($revision)))
- {
- $revision =~ /^((?:\d+\.)+)(\d+)$/;
- my $r_left = $1; # still has the trailing "."
- my $r_end = $2;
-
- $branch_number =~ /^((?:\d+\.)+)(\d+)\.\d+$/;
- my $b_left = $1; # still has trailing "."
- my $b_mid = $2; # has no trailing "."
-
- if (($r_left eq $b_left)
- && ($r_end <= $b_mid))
- {
- goto dengo;
- }
- }
- }
- }
- }
- else # (! @Follow_Branches)
- {
- next;
- }
-
- # Else we are following branches, but this revision isn't on the
- # path. So skip it.
- undef $revision;
- dengo:
- next;
- }
-
- # If we don't have a revision right now, we couldn't possibly
- # be looking at anything useful.
- if (! (defined ($revision))) {
- $detected_file_separator = /^$file_separator$/o;
- if ($detected_file_separator) {
- # No revisions for this file; can happen, e.g. "cvs log -d DATE"
- goto CLEAR;
- }
- else {
- next;
- }
- }
-
- # If have file name but not date and author, and see date or
- # author, then grab them:
- unless (defined $time)
- {
- if (/^date: .*/)
- {
- ($time, $author) = &parse_date_and_author ($_);
- if (defined ($usermap{$author}) and $usermap{$author}) {
- $author = $usermap{$author};
- }
- }
- else {
- $detected_file_separator = /^$file_separator$/o;
- if ($detected_file_separator) {
- # No revisions for this file; can happen, e.g. "cvs log -d DATE"
- goto CLEAR;
- }
- }
- # If the date/time/author hasn't been found yet, we couldn't
- # possibly care about anything we see. So skip:
- next;
- }
-
- # A "branches: ..." line here indicates that one or more branches
- # are rooted at this revision. If we're showing branches, then we
- # want to show that fact as well, so we collect all the branches
- # that this is the latest ancestor of and store them in
- # @branch_roots. Just for reference, the format of the line we're
- # seeing at this point is:
- #
- # branches: 1.5.2; 1.5.4; ...;
- #
- # Okay, here goes:
-
- if (/^branches:\s+(.*);$/)
- {
- if ($Show_Branches)
- {
- my $lst = $1;
- $lst =~ s/(1\.)+1;|(1\.)+1$//; # ignore the trivial branch 1.1.1
- if ($lst) {
- @branch_roots = split (/;\s+/, $lst);
- }
- else {
- undef @branch_roots;
- }
- next;
- }
- else
- {
- # Ugh. This really bothers me. Suppose we see a log entry
- # like this:
- #
- # ----------------------------
- # revision 1.1
- # date: 1999/10/17 03:07:38; author: jrandom; state: Exp;
- # branches: 1.1.2;
- # Intended first line of log message begins here.
- # ----------------------------
- #
- # The question is, how we can tell the difference between that
- # log message and a *two*-line log message whose first line is
- #
- # "branches: 1.1.2;"
- #
- # See the problem? The output of "cvs log" is inherently
- # ambiguous.
- #
- # For now, we punt: we liberally assume that people don't
- # write log messages like that, and just toss a "branches:"
- # line if we see it but are not showing branches. I hope no
- # one ever loses real log data because of this.
- next;
- }
- }
-
- # If have file name, time, and author, then we're just grabbing
- # log message texts:
- $detected_file_separator = /^$file_separator$/o;
- if ($detected_file_separator && ! (defined $revision)) {
- # No revisions for this file; can happen, e.g. "cvs log -d DATE"
- goto CLEAR;
- }
- unless ($detected_file_separator || /^$logmsg_separator$/o)
- {
- $msg_txt .= $_; # Normally, just accumulate the message...
- next;
- }
- # ... until a msg separator is encountered:
- # Ensure the message contains something:
- if ((! $msg_txt)
- || ($msg_txt =~ /^\s*\.\s*$|^\s*$/)
- || ($msg_txt =~ /\*\*\* empty log message \*\*\*/))
- {
- if ($Prune_Empty_Msgs) {
- goto CLEAR;
- }
- # else
- $msg_txt = "[no log message]\n";
- }
-
- ### Store it all in the Grand Poobah:
- {
- my $dir_key; # key into %grand_poobah
- my %qunk; # complicated little jobbie, see below
-
- # Each revision of a file has a little data structure (a `qunk')
- # associated with it. That data structure holds not only the
- # file's name, but any additional information about the file
- # that might be needed in the output, such as the revision
- # number, tags, branches, etc. The reason to have these things
- # arranged in a data structure, instead of just appending them
- # textually to the file's name, is that we may want to do a
- # little rearranging later as we write the output. For example,
- # all the files on a given tag/branch will go together, followed
- # by the tag in parentheses (so trunk or otherwise non-tagged
- # files would go at the end of the file list for a given log
- # message). This rearrangement is a lot easier to do if we
- # don't have to reparse the text.
- #
- # A qunk looks like this:
- #
- # {
- # filename => "hello.c",
- # revision => "1.4.3.2",
- # time => a timegm() return value (moment of commit)
- # tags => [ "tag1", "tag2", ... ],
- # branch => "branchname" # There should be only one, right?
- # branchroots => [ "branchtag1", "branchtag2", ... ]
- # }
-
- if ($Distributed) {
- # Just the basename, don't include the path.
- ($qunk{'filename'}, $dir_key, undef) = fileparse ($file_full_path);
- }
- else {
- $dir_key = "./";
- $qunk{'filename'} = $file_full_path;
- }
-
- # This may someday be used in a more sophisticated calculation
- # of what other files are involved in this commit. For now, we
- # don't use it, because the common-commit-detection algorithm is
- # hypothesized to be "good enough" as it stands.
- $qunk{'time'} = $time;
-
- # We might be including revision numbers and/or tags and/or
- # branch names in the output. Most of the code from here to
- # loop-end deals with organizing these in qunk.
-
- $qunk{'revision'} = $revision;
-
- # Grab the branch, even though we may or may not need it:
- $qunk{'revision'} =~ /((?:\d+\.)+)\d+/;
- my $branch_prefix = $1;
- $branch_prefix =~ s/\.$//; # strip off final dot
- if ($branch_names{$branch_prefix}) {
- $qunk{'branch'} = $branch_names{$branch_prefix};
- }
-
- # If there's anything in the @branch_roots array, then this
- # revision is the root of at least one branch. We'll display
- # them as branch names instead of revision numbers, the
- # substitution for which is done directly in the array:
- if (@branch_roots) {
- my @roots = map { $branch_names{$_} } @branch_roots;
- $qunk{'branchroots'} = \@roots;
- }
-
- # Save tags too.
- if (defined ($symbolic_names{$revision})) {
- $qunk{'tags'} = $symbolic_names{$revision};
- delete $symbolic_names{$revision};
- }
-
- # Add this file to the list
- # (We use many spoonfuls of autovivication magic. Hashes and arrays
- # will spring into existence if they aren't there already.)
-
- &debug ("(pushing log msg for ${dir_key}$qunk{'filename'})\n");
-
- # Store with the files in this commit. Later we'll loop through
- # again, making sure that revisions with the same log message
- # and nearby commit times are grouped together as one commit.
- push (@{$grand_poobah{$dir_key}{$author}{$time}{$msg_txt}}, \%qunk);
- }
-
- CLEAR:
- # Make way for the next message
- undef $msg_txt;
- undef $time;
- undef $revision;
- undef $author;
- undef @branch_roots;
-
- # Maybe even make way for the next file:
- if ($detected_file_separator) {
- undef $file_full_path;
- undef %branch_names;
- undef %branch_numbers;
- undef %symbolic_names;
- }
- }
-
- close (LOG_SOURCE);
-
- ### Process each ChangeLog
-
- while (my ($dir,$authorhash) = each %grand_poobah)
- {
- &debug ("DOING DIR: $dir\n");
-
- # Here we twist our hash around, from being
- # author => time => message => filelist
- # in %$authorhash to
- # time => author => message => filelist
- # in %changelog.
- #
- # This is also where we merge entries. The algorithm proceeds
- # through the timeline of the changelog with a sliding window of
- # $Max_Checkin_Duration seconds; within that window, entries that
- # have the same log message are merged.
- #
- # (To save space, we zap %$authorhash after we've copied
- # everything out of it.)
-
- my %changelog;
- while (my ($author,$timehash) = each %$authorhash)
- {
- my $lasttime;
- my %stamptime;
- foreach my $time (sort {$main::a <=> $main::b} (keys %$timehash))
- {
- my $msghash = $timehash->{$time};
- while (my ($msg,$qunklist) = each %$msghash)
- {
- my $stamptime = $stamptime{$msg};
- if ((defined $stamptime)
- and (($time - $stamptime) < $Max_Checkin_Duration)
- and (defined $changelog{$stamptime}{$author}{$msg}))
- {
- push(@{$changelog{$stamptime}{$author}{$msg}}, @$qunklist);
- }
- else {
- $changelog{$time}{$author}{$msg} = $qunklist;
- $stamptime{$msg} = $time;
- }
- }
- }
- }
- undef (%$authorhash);
-
- ### Now we can write out the ChangeLog!
-
- my ($logfile_here, $logfile_bak, $tmpfile);
-
- if (! $Output_To_Stdout) {
- $logfile_here = $dir . $Log_File_Name;
- $logfile_here =~ s/^\.\/\//\//; # fix any leading ".//" problem
- $tmpfile = "${logfile_here}.cvs2cl$$.tmp";
- $logfile_bak = "${logfile_here}.bak";
-
- open (LOG_OUT, ">$tmpfile") or die "Unable to open \"$tmpfile\"";
- }
- else {
- open (LOG_OUT, ">-") or die "Unable to open stdout for writing";
- }
-
- print LOG_OUT $ChangeLog_Header;
-
- if ($XML_Output) {
- print LOG_OUT "<?xml version=\"1.0\"?>\n\n"
- . "<changelog xmlns=\"http://www.red-bean.com/xmlns/cvs2cl/\">\n\n";
- }
-
- foreach my $time (sort {$main::b <=> $main::a} (keys %changelog))
- {
- my $authorhash = $changelog{$time};
- while (my ($author,$mesghash) = each %$authorhash)
- {
- # If XML, escape in outer loop to avoid compound quoting:
- if ($XML_Output) {
- $author = &xml_escape ($author);
- }
-
- while (my ($msg,$qunklist) = each %$mesghash)
- {
- my $files = &pretty_file_list ($qunklist);
- my $header_line; # date and author
- my $body; # see below
- my $wholething; # $header_line + $body
-
- # Set up the date/author line.
- # kff todo: do some more XML munging here, on the header
- # part of the entry:
- my ($ignore,$min,$hour,$mday,$mon,$year,$wday)
- = $UTC_Times ? gmtime($time) : localtime($time);
-
- # XML output includes everything else, we might as well make
- # it always include Day Of Week too, for consistency.
- if ($Show_Day_Of_Week or $XML_Output) {
- $wday = ("Sunday", "Monday", "Tuesday", "Wednesday",
- "Thursday", "Friday", "Saturday")[$wday];
- $wday = ($XML_Output) ? "<weekday>${wday}</weekday>\n" : " $wday";
- }
- else {
- $wday = "";
- }
-
- if ($XML_Output) {
- $header_line =
- sprintf ("<date>%4u-%02u-%02u</date>\n"
- . "${wday}"
- . "<time>%02u:%02u</time>\n"
- . "<author>%s</author>\n",
- $year+1900, $mon+1, $mday, $hour, $min, $author);
- }
- else {
- $header_line =
- sprintf ("%4u-%02u-%02u${wday} %02u:%02u %s\n\n",
- $year+1900, $mon+1, $mday, $hour, $min, $author);
- }
-
- # Reshape the body according to user preferences.
- if ($XML_Output)
- {
- $msg = &preprocess_msg_text ($msg);
- $body = $files . $msg;
- }
- elsif ($No_Wrap)
- {
- $msg = &preprocess_msg_text ($msg);
- $files = wrap ("\t", " ", "$files");
- $msg =~ s/\n(.*)/\n\t$1/g;
- unless ($After_Header eq " ") {
- $msg =~ s/^(.*)/\t$1/g;
- }
- $body = $files . $After_Header . $msg;
- }
- else # do wrapping, either FSF-style or regular
- {
- if ($FSF_Style)
- {
- $files = wrap ("\t", " ", "$files");
-
- my $files_last_line_len = 0;
- if ($After_Header eq " ")
- {
- $files_last_line_len = &last_line_len ($files);
- $files_last_line_len += 1; # for $After_Header
- }
-
- $msg = &wrap_log_entry
- ($msg, "\t", 69 - $files_last_line_len, 69);
- $body = $files . $After_Header . $msg;
- }
- else # not FSF-style
- {
- $msg = &preprocess_msg_text ($msg);
- $body = $files . $After_Header . $msg;
- $body = wrap ("\t", " ", "$body");
- }
- }
-
- $wholething = $header_line . $body;
-
- if ($XML_Output) {
- $wholething = "<entry>\n${wholething}</entry>\n";
- }
-
- # One last check: make sure it passes the regexp test, if the
- # user asked for that. We have to do it here, so that the
- # test can match against information in the header as well
- # as in the text of the log message.
-
- # How annoying to duplicate so much code just because I
- # can't figure out a way to evaluate scalars on the trailing
- # operator portion of a regular expression. Grrr.
- if ($Case_Insensitive) {
- unless ($Regexp_Gate && ($wholething !~ /$Regexp_Gate/oi)) {
- print LOG_OUT "${wholething}\n";
- }
- }
- else {
- unless ($Regexp_Gate && ($wholething !~ /$Regexp_Gate/o)) {
- print LOG_OUT "${wholething}\n";
- }
- }
- }
- }
- }
-
- if ($XML_Output) {
- print LOG_OUT "</changelog>\n";
- }
-
- close (LOG_OUT);
-
- if (! $Output_To_Stdout)
- {
- # If accumulating, append old data to new before renaming. But
- # don't append the most recent entry, since it's already in the
- # new log due to CVS's idiosyncratic interpretation of "log -d".
- if ($Cumulative && -f $logfile_here)
- {
- open (NEW_LOG, ">>$tmpfile")
- or die "trouble appending to $tmpfile ($!)";
-
- open (OLD_LOG, "<$logfile_here")
- or die "trouble reading from $logfile_here ($!)";
-
- my $started_first_entry = 0;
- my $passed_first_entry = 0;
- while (<OLD_LOG>)
- {
- if (! $passed_first_entry)
- {
- if ((! $started_first_entry)
- && /^(\d\d\d\d-\d\d-\d\d\s+\d\d:\d\d)/) {
- $started_first_entry = 1;
- }
- elsif (/^(\d\d\d\d-\d\d-\d\d\s+\d\d:\d\d)/) {
- $passed_first_entry = 1;
- print NEW_LOG $_;
- }
- }
- else {
- print NEW_LOG $_;
- }
- }
-
- close (NEW_LOG);
- close (OLD_LOG);
- }
-
- if (-f $logfile_here) {
- rename ($logfile_here, $logfile_bak);
- }
- rename ($tmpfile, $logfile_here);
- }
- }
-}
-
-
-sub parse_date_and_author ()
-{
- # Parses the date/time and author out of a line like:
- #
- # date: 1999/02/19 23:29:05; author: apharris; state: Exp;
-
- my $line = shift;
-
- my ($year, $mon, $mday, $hours, $min, $secs, $author) = $line =~
- m#(\d+)/(\d+)/(\d+)\s+(\d+):(\d+):(\d+);\s+author:\s+([^;]+);#
- or die "Couldn't parse date ``$line''";
- die "Bad date or Y2K issues" unless ($year > 1969 and $year < 2258);
- # Kinda arbitrary, but useful as a sanity check
- my $time = timegm($secs,$min,$hours,$mday,$mon-1,$year-1900);
-
- return ($time, $author);
-}
-
-
-# Here we take a bunch of qunks and convert them into printed
-# summary that will include all the information the user asked for.
-sub pretty_file_list ()
-{
- if ($Hide_Filenames and (! $XML_Output)) {
- return "";
- }
-
- my $qunksref = shift;
- my @qunkrefs = @$qunksref;
- my @filenames;
- my $beauty = ""; # The accumulating header string for this entry.
- my %non_unanimous_tags; # Tags found in a proper subset of qunks
- my %unanimous_tags; # Tags found in all qunks
- my %all_branches; # Branches found in any qunk
- my $common_dir = undef; # Dir prefix common to all files ("" if none)
- my $fbegun = 0; # Did we begin printing filenames yet?
-
- # First, loop over the qunks gathering all the tag/branch names.
- # We'll put them all in non_unanimous_tags, and take out the
- # unanimous ones later.
- foreach my $qunkref (@qunkrefs)
- {
- # Keep track of whether all the files in this commit were in the
- # same directory, and memorize it if so. We can make the output a
- # little more compact by mentioning the directory only once.
- if ((scalar (@qunkrefs)) > 1)
- {
- if (! (defined ($common_dir)))
- {
- my ($base, $dir);
- ($base, $dir, undef) = fileparse ($$qunkref{'filename'});
-
- if ((! (defined ($dir))) # this first case is sheer paranoia
- or ($dir eq "")
- or ($dir eq "./")
- or ($dir eq ".\\"))
- {
- $common_dir = "";
- }
- else
- {
- $common_dir = $dir;
- }
- }
- elsif ($common_dir ne "")
- {
- # Already have a common dir prefix, so how much of it can we preserve?
- $common_dir = &common_path_prefix ($$qunkref{'filename'}, $common_dir);
- }
- }
- else # only one file in this entry anyway, so common dir not an issue
- {
- $common_dir = "";
- }
-
- if (defined ($$qunkref{'branch'})) {
- $all_branches{$$qunkref{'branch'}} = 1;
- }
- if (defined ($$qunkref{'tags'})) {
- foreach my $tag (@{$$qunkref{'tags'}}) {
- $non_unanimous_tags{$tag} = 1;
- }
- }
- }
-
- # Any tag held by all qunks will be printed specially... but only if
- # there are multiple qunks in the first place!
- if ((scalar (@qunkrefs)) > 1) {
- foreach my $tag (keys (%non_unanimous_tags)) {
- my $everyone_has_this_tag = 1;
- foreach my $qunkref (@qunkrefs) {
- if ((! (defined ($$qunkref{'tags'})))
- or (! (grep ($_ eq $tag, @{$$qunkref{'tags'}})))) {
- $everyone_has_this_tag = 0;
- }
- }
- if ($everyone_has_this_tag) {
- $unanimous_tags{$tag} = 1;
- delete $non_unanimous_tags{$tag};
- }
- }
- }
-
- if ($XML_Output)
- {
- # If outputting XML, then our task is pretty simple, because we
- # don't have to detect common dir, common tags, branch prefixing,
- # etc. We just output exactly what we have, and don't worry about
- # redundancy or readability.
-
- foreach my $qunkref (@qunkrefs)
- {
- my $filename = $$qunkref{'filename'};
- my $revision = $$qunkref{'revision'};
- my $tags = $$qunkref{'tags'};
- my $branch = $$qunkref{'branch'};
- my $branchroots = $$qunkref{'branchroots'};
-
- $filename = &xml_escape ($filename); # probably paranoia
- $revision = &xml_escape ($revision); # definitely paranoia
-
- $beauty .= "<file>\n";
- $beauty .= "<name>${filename}</name>\n";
- $beauty .= "<revision>${revision}</revision>\n";
- if ($branch) {
- $branch = &xml_escape ($branch); # more paranoia
- $beauty .= "<branch>${branch}</branch>\n";
- }
- foreach my $tag (@$tags) {
- $tag = &xml_escape ($tag); # by now you're used to the paranoia
- $beauty .= "<tag>${tag}</tag>\n";
- }
- foreach my $root (@$branchroots) {
- $root = &xml_escape ($root); # which is good, because it will continue
- $beauty .= "<branchroot>${root}</branchroot>\n";
- }
- $beauty .= "</file>\n";
- }
-
- # Theoretically, we could go home now. But as long as we're here,
- # let's print out the common_dir and utags, as a convenience to
- # the receiver (after all, earlier code calculated that stuff
- # anyway, so we might as well take advantage of it).
-
- if ((scalar (keys (%unanimous_tags))) > 1) {
- foreach my $utag ((keys (%unanimous_tags))) {
- $utag = &xml_escape ($utag); # the usual paranoia
- $beauty .= "<utag>${utag}</utag>\n";
- }
- }
- if ($common_dir) {
- $common_dir = &xml_escape ($common_dir);
- $beauty .= "<commondir>${common_dir}</commondir>\n";
- }
-
- # That's enough for XML, time to go home:
- return $beauty;
- }
-
- # Else not XML output, so complexly compactify for chordate
- # consumption. At this point we have enough global information
- # about all the qunks to organize them non-redundantly for output.
-
- if ($common_dir) {
- # Note that $common_dir still has its trailing slash
- $beauty .= "$common_dir: ";
- }
-
- if ($Show_Branches)
- {
- # For trailing revision numbers.
- my @brevisions;
-
- foreach my $branch (keys (%all_branches))
- {
- foreach my $qunkref (@qunkrefs)
- {
- if ((defined ($$qunkref{'branch'}))
- and ($$qunkref{'branch'} eq $branch))
- {
- if ($fbegun) {
- # kff todo: comma-delimited in XML too? Sure.
- $beauty .= ", ";
- }
- else {
- $fbegun = 1;
- }
- my $fname = substr ($$qunkref{'filename'}, length ($common_dir));
- $beauty .= $fname;
- $$qunkref{'printed'} = 1; # Just setting a mark bit, basically
-
- if ($Show_Tags && (defined @{$$qunkref{'tags'}})) {
- my @tags = grep ($non_unanimous_tags{$_}, @{$$qunkref{'tags'}});
- if (@tags) {
- $beauty .= " (tags: ";
- $beauty .= join (', ', @tags);
- $beauty .= ")";
- }
- }
-
- if ($Show_Revisions) {
- # Collect the revision numbers' last components, but don't
- # print them -- they'll get printed with the branch name
- # later.
- $$qunkref{'revision'} =~ /.+\.([\d]+)$/;
- push (@brevisions, $1);
-
- # todo: we're still collecting branch roots, but we're not
- # showing them anywhere. If we do show them, it would be
- # nifty to just call them revision "0" on a the branch.
- # Yeah, that's the ticket.
- }
- }
- }
- $beauty .= " ($branch";
- if (@brevisions) {
- if ((scalar (@brevisions)) > 1) {
- $beauty .= ".[";
- $beauty .= (join (',', @brevisions));
- $beauty .= "]";
- }
- else {
- $beauty .= ".$brevisions[0]";
- }
- }
- $beauty .= ")";
- }
- }
-
- # Okay; any qunks that were done according to branch are taken care
- # of, and marked as printed. Now print everyone else.
-
- foreach my $qunkref (@qunkrefs)
- {
- next if (defined ($$qunkref{'printed'})); # skip if already printed
-
- if ($fbegun) {
- $beauty .= ", ";
- }
- else {
- $fbegun = 1;
- }
- $beauty .= substr ($$qunkref{'filename'}, length ($common_dir));
- # todo: Shlomo's change was this:
- # $beauty .= substr ($$qunkref{'filename'},
- # (($common_dir eq "./") ? "" : length ($common_dir)));
- $$qunkref{'printed'} = 1; # Set a mark bit.
-
- if ($Show_Revisions || $Show_Tags)
- {
- my $started_addendum = 0;
-
- if ($Show_Revisions) {
- $started_addendum = 1;
- $beauty .= " (";
- $beauty .= "$$qunkref{'revision'}";
- }
- if ($Show_Tags && (defined $$qunkref{'tags'})) {
- my @tags = grep ($non_unanimous_tags{$_}, @{$$qunkref{'tags'}});
- if ((scalar (@tags)) > 0) {
- if ($started_addendum) {
- $beauty .= ", ";
- }
- else {
- $beauty .= " (tags: ";
- }
- $beauty .= join (', ', @tags);
- $started_addendum = 1;
- }
- }
- if ($started_addendum) {
- $beauty .= ")";
- }
- }
- }
-
- # Unanimous tags always come last.
- if ($Show_Tags && %unanimous_tags)
- {
- $beauty .= " (utags: ";
- $beauty .= join (', ', keys (%unanimous_tags));
- $beauty .= ")";
- }
-
- # todo: still have to take care of branch_roots?
-
- $beauty = "* $beauty:";
-
- return $beauty;
-}
-
-
-sub common_path_prefix ()
-{
- my $path1 = shift;
- my $path2 = shift;
-
- my ($dir1, $dir2);
- (undef, $dir1, undef) = fileparse ($path1);
- (undef, $dir2, undef) = fileparse ($path2);
-
- # Transmogrify Windows filenames to look like Unix.
- # (It is far more likely that someone is running cvs2cl.pl under
- # Windows than that they would genuinely have backslashes in their
- # filenames.)
- $dir1 =~ tr#\\#/#;
- $dir2 =~ tr#\\#/#;
-
- my $accum1 = "";
- my $accum2 = "";
- my $last_common_prefix = "";
-
- while ($accum1 eq $accum2)
- {
- $last_common_prefix = $accum1;
- last if ($accum1 eq $dir1);
- my ($tmp1) = split (/\//, (substr ($dir1, length ($accum1))));
- my ($tmp2) = split (/\//, (substr ($dir2, length ($accum2))));
- $accum1 .= "$tmp1/" if ((defined ($tmp1)) and $tmp1);
- $accum2 .= "$tmp2/" if ((defined ($tmp2)) and $tmp2);
- }
-
- return $last_common_prefix;
-}
-
-
-sub preprocess_msg_text ()
-{
- my $text = shift;
-
- # Strip out carriage returns (as they probably result from DOSsy editors).
- $text =~ s/\r\n/\n/g;
-
- # If it *looks* like two newlines, make it *be* two newlines:
- $text =~ s/\n\s*\n/\n\n/g;
-
- if ($XML_Output)
- {
- $text = &xml_escape ($text);
- $text = "<msg>${text}</msg>\n";
- }
- elsif (! $No_Wrap)
- {
- # Strip off lone newlines, but only for lines that don't begin with
- # whitespace or a mail-quoting character, since we want to preserve
- # that kind of formatting. Also don't strip newlines that follow a
- # period; we handle those specially next. And don't strip
- # newlines that precede an open paren.
- 1 while ($text =~ s/(^|\n)([^>\s].*[^.\n])\n([^>\n])/$1$2 $3/g);
-
- # If a newline follows a period, make sure that when we bring up the
- # bottom sentence, it begins with two spaces.
- 1 while ($text =~ s/(^|\n)([^>\s].*)\n([^>\n])/$1$2 $3/g);
- }
-
- return $text;
-}
-
-
-sub last_line_len ()
-{
- my $files_list = shift;
- my @lines = split (/\n/, $files_list);
- my $last_line = pop (@lines);
- return length ($last_line);
-}
-
-
-# A custom wrap function, sensitive to some common constructs used in
-# log entries.
-sub wrap_log_entry ()
-{
- my $text = shift; # The text to wrap.
- my $left_pad_str = shift; # String to pad with on the left.
-
- # These do NOT take left_pad_str into account:
- my $length_remaining = shift; # Amount left on current line.
- my $max_line_length = shift; # Amount left for a blank line.
-
- my $wrapped_text = ""; # The accumulating wrapped entry.
- my $user_indent = ""; # Inherited user_indent from prev line.
-
- my $first_time = 1; # First iteration of the loop?
- my $suppress_line_start_match = 0; # Set to disable line start checks.
-
- my @lines = split (/\n/, $text);
- while (@lines) # Don't use `foreach' here, it won't work.
- {
- my $this_line = shift (@lines);
- chomp $this_line;
-
- if ($this_line =~ /^(\s+)/) {
- $user_indent = $1;
- }
- else {
- $user_indent = "";
- }
-
- # If it matches any of the line-start regexps, print a newline now...
- if ($suppress_line_start_match)
- {
- $suppress_line_start_match = 0;
- }
- elsif (($this_line =~ /^(\s*)\*\s+[a-zA-Z0-9]/)
- || ($this_line =~ /^(\s*)\* [a-zA-Z0-9_\.\/\+-]+/)
- || ($this_line =~ /^(\s*)\([a-zA-Z0-9_\.\/\+-]+(\)|,\s*)/)
- || ($this_line =~ /^(\s+)(\S+)/)
- || ($this_line =~ /^(\s*)- +/)
- || ($this_line =~ /^()\s*$/)
- || ($this_line =~ /^(\s*)\*\) +/)
- || ($this_line =~ /^(\s*)[a-zA-Z0-9](\)|\.|\:) +/))
- {
- # Make a line break immediately, unless header separator is set
- # and this line is the first line in the entry, in which case
- # we're getting the blank line for free already and shouldn't
- # add an extra one.
- unless (($After_Header ne " ") and ($first_time))
- {
- if ($this_line =~ /^()\s*$/) {
- $suppress_line_start_match = 1;
- $wrapped_text .= "\n${left_pad_str}";
- }
-
- $wrapped_text .= "\n${left_pad_str}";
- }
-
- $length_remaining = $max_line_length - (length ($user_indent));
- }
-
- # Now that any user_indent has been preserved, strip off leading
- # whitespace, so up-folding has no ugly side-effects.
- $this_line =~ s/^\s*//;
-
- # Accumulate the line, and adjust parameters for next line.
- my $this_len = length ($this_line);
- if ($this_len == 0)
- {
- # Blank lines should cancel any user_indent level.
- $user_indent = "";
- $length_remaining = $max_line_length;
- }
- elsif ($this_len >= $length_remaining) # Line too long, try breaking it.
- {
- # Walk backwards from the end. At first acceptable spot, break
- # a new line.
- my $idx = $length_remaining - 1;
- if ($idx < 0) { $idx = 0 };
- while ($idx > 0)
- {
- if (substr ($this_line, $idx, 1) =~ /\s/)
- {
- my $line_now = substr ($this_line, 0, $idx);
- my $next_line = substr ($this_line, $idx);
- $this_line = $line_now;
-
- # Clean whitespace off the end.
- chomp $this_line;
-
- # The current line is ready to be printed.
- $this_line .= "\n${left_pad_str}";
-
- # Make sure the next line is allowed full room.
- $length_remaining = $max_line_length - (length ($user_indent));
-
- # Strip next_line, but then preserve any user_indent.
- $next_line =~ s/^\s*//;
-
- # Sneak a peek at the user_indent of the upcoming line, so
- # $next_line (which will now precede it) can inherit that
- # indent level. Otherwise, use whatever user_indent level
- # we currently have, which might be none.
- my $next_next_line = shift (@lines);
- if ((defined ($next_next_line)) && ($next_next_line =~ /^(\s+)/)) {
- $next_line = $1 . $next_line if (defined ($1));
- # $length_remaining = $max_line_length - (length ($1));
- $next_next_line =~ s/^\s*//;
- }
- else {
- $next_line = $user_indent . $next_line;
- }
- if (defined ($next_next_line)) {
- unshift (@lines, $next_next_line);
- }
- unshift (@lines, $next_line);
-
- # Our new next line might, coincidentally, begin with one of
- # the line-start regexps, so we temporarily turn off
- # sensitivity to that until we're past the line.
- $suppress_line_start_match = 1;
-
- last;
- }
- else
- {
- $idx--;
- }
- }
-
- if ($idx == 0)
- {
- # We bottomed out because the line is longer than the
- # available space. But that could be because the space is
- # small, or because the line is longer than even the maximum
- # possible space. Handle both cases below.
-
- if ($length_remaining == ($max_line_length - (length ($user_indent))))
- {
- # The line is simply too long -- there is no hope of ever
- # breaking it nicely, so just insert it verbatim, with
- # appropriate padding.
- $this_line = "\n${left_pad_str}${this_line}";
- }
- else
- {
- # Can't break it here, but may be able to on the next round...
- unshift (@lines, $this_line);
- $length_remaining = $max_line_length - (length ($user_indent));
- $this_line = "\n${left_pad_str}";
- }
- }
- }
- else # $this_len < $length_remaining, so tack on what we can.
- {
- # Leave a note for the next iteration.
- $length_remaining = $length_remaining - $this_len;
-
- if ($this_line =~ /\.$/)
- {
- $this_line .= " ";
- $length_remaining -= 2;
- }
- else # not a sentence end
- {
- $this_line .= " ";
- $length_remaining -= 1;
- }
- }
-
- # Unconditionally indicate that loop has run at least once.
- $first_time = 0;
-
- $wrapped_text .= "${user_indent}${this_line}";
- }
-
- # One last bit of padding.
- $wrapped_text .= "\n";
-
- return $wrapped_text;
-}
-
-
-sub xml_escape ()
-{
- my $txt = shift;
- $txt =~ s/&/&amp;/g;
- $txt =~ s/</&lt;/g;
- $txt =~ s/>/&gt;/g;
- return $txt;
-}
-
-
-sub maybe_read_user_map_file ()
-{
- my %expansions;
-
- if ($User_Map_File)
- {
- open (MAPFILE, "<$User_Map_File")
- or die ("Unable to open $User_Map_File ($!)");
-
- while (<MAPFILE>)
- {
- next if /^\s*#/; # Skip comment lines.
- next if not /:/; # Skip lines without colons.
-
- # It is now safe to split on ':'.
- my ($username, $expansion) = split ':';
- chomp $expansion;
- $expansion =~ s/^'(.*)'$/$1/;
- $expansion =~ s/^"(.*)"$/$1/;
-
- # If it looks like the expansion has a real name already, then
- # we toss the username we got from CVS log. Otherwise, keep
- # it to use in combination with the email address.
-
- if ($expansion =~ /^\s*<{0,1}\S+@.*/) {
- # Also, add angle brackets if none present
- if (! ($expansion =~ /<\S+@\S+>/)) {
- $expansions{$username} = "$username <$expansion>";
- }
- else {
- $expansions{$username} = "$username $expansion";
- }
- }
- else {
- $expansions{$username} = $expansion;
- }
- }
-
- close (MAPFILE);
- }
-
- return %expansions;
-}
-
-
-sub parse_options ()
-{
- # Check this internally before setting the global variable.
- my $output_file;
-
- # If this gets set, we encountered unknown options and will exit at
- # the end of this subroutine.
- my $exit_with_admonishment = 0;
-
- while (my $arg = shift (@ARGV))
- {
- if ($arg =~ /^-h$|^-help$|^--help$|^--usage$|^-?$/) {
- $Print_Usage = 1;
- }
- elsif ($arg =~ /^--debug$/) { # unadvertised option, heh
- $Debug = 1;
- }
- elsif ($arg =~ /^--version$/) {
- $Print_Version = 1;
- }
- elsif ($arg =~ /^-g$|^--global-opts$/) {
- my $narg = shift (@ARGV) || die "$arg needs argument.\n";
- # Don't assume CVS is called "cvs" on the user's system:
- $Log_Source_Command =~ s/(^\S*)/$1 $narg/;
- }
- elsif ($arg =~ /^-l$|^--log-opts$/) {
- my $narg = shift (@ARGV) || die "$arg needs argument.\n";
- $Log_Source_Command .= " $narg";
- }
- elsif ($arg =~ /^-f$|^--file$/) {
- my $narg = shift (@ARGV) || die "$arg needs argument.\n";
- $output_file = $narg;
- }
- elsif ($arg =~ /^--accum$/) {
- $Cumulative = 1;
- }
- elsif ($arg =~ /^--fsf$/) {
- $FSF_Style = 1;
- }
- elsif ($arg =~ /^-U$|^--usermap$/) {
- my $narg = shift (@ARGV) || die "$arg needs argument.\n";
- $User_Map_File = $narg;
- }
- elsif ($arg =~ /^-W$|^--window$/) {
- my $narg = shift (@ARGV) || die "$arg needs argument.\n";
- $Max_Checkin_Duration = $narg;
- }
- elsif ($arg =~ /^-I$|^--ignore$/) {
- my $narg = shift (@ARGV) || die "$arg needs argument.\n";
- push (@Ignore_Files, $narg);
- }
- elsif ($arg =~ /^-C$|^--case-insensitive$/) {
- $Case_Insensitive = 1;
- }
- elsif ($arg =~ /^-R$|^--regexp$/) {
- my $narg = shift (@ARGV) || die "$arg needs argument.\n";
- $Regexp_Gate = $narg;
- }
- elsif ($arg =~ /^--stdout$/) {
- $Output_To_Stdout = 1;
- }
- elsif ($arg =~ /^--version$/) {
- $Print_Version = 1;
- }
- elsif ($arg =~ /^-d$|^--distributed$/) {
- $Distributed = 1;
- }
- elsif ($arg =~ /^-P$|^--prune$/) {
- $Prune_Empty_Msgs = 1;
- }
- elsif ($arg =~ /^-S$|^--separate-header$/) {
- $After_Header = "\n\n";
- }
- elsif ($arg =~ /^--no-wrap$/) {
- $No_Wrap = 1;
- }
- elsif ($arg =~ /^--gmt$|^--utc$/) {
- $UTC_Times = 1;
- }
- elsif ($arg =~ /^-w$|^--day-of-week$/) {
- $Show_Day_Of_Week = 1;
- }
- elsif ($arg =~ /^-r$|^--revisions$/) {
- $Show_Revisions = 1;
- }
- elsif ($arg =~ /^-t$|^--tags$/) {
- $Show_Tags = 1;
- }
- elsif ($arg =~ /^-b$|^--branches$/) {
- $Show_Branches = 1;
- }
- elsif ($arg =~ /^-F$|^--follow$/) {
- my $narg = shift (@ARGV) || die "$arg needs argument.\n";
- push (@Follow_Branches, $narg);
- }
- elsif ($arg =~ /^--stdin$/) {
- $Input_From_Stdin = 1;
- }
- elsif ($arg =~ /^--header$/) {
- my $narg = shift (@ARGV) || die "$arg needs argument.\n";
- $ChangeLog_Header = &slurp_file ($narg);
- if (! defined ($ChangeLog_Header)) {
- $ChangeLog_Header = "";
- }
- }
- elsif ($arg =~ /^--xml$/) {
- $XML_Output = 1;
- }
- elsif ($arg =~ /^--hide-filenames$/) {
- $Hide_Filenames = 1;
- $After_Header = "";
- }
- else {
- # Just add a filename as argument to the log command
- $Log_Source_Command .= " $arg";
- }
- }
-
- ## Check for contradictions...
-
- if ($Output_To_Stdout && $Distributed) {
- print STDERR "cannot pass both --stdout and --distributed\n";
- $exit_with_admonishment = 1;
- }
-
- if ($Output_To_Stdout && $output_file) {
- print STDERR "cannot pass both --stdout and --file\n";
- $exit_with_admonishment = 1;
- }
-
- if ($XML_Output && $Cumulative) {
- print STDERR "cannot pass both --xml and --accum\n";
- $exit_with_admonishment = 1;
- }
-
- # Or if any other error message has already been printed out, we
- # just leave now:
- if ($exit_with_admonishment) {
- &usage ();
- exit (1);
- }
- elsif ($Print_Usage) {
- &usage ();
- exit (0);
- }
- elsif ($Print_Version) {
- &version ();
- exit (0);
- }
-
- ## Else no problems, so proceed.
-
- if ($output_file) {
- $Log_File_Name = $output_file;
- }
-}
-
-
-sub slurp_file ()
-{
- my $filename = shift || die ("no filename passed to slurp_file()");
- my $retstr;
-
- open (SLURPEE, "<${filename}") or die ("unable to open $filename ($!)");
- my $saved_sep = $/;
- undef $/;
- $retstr = <SLURPEE>;
- $/ = $saved_sep;
- close (SLURPEE);
- return $retstr;
-}
-
-
-sub debug ()
-{
- if ($Debug) {
- my $msg = shift;
- print STDERR $msg;
- }
-}
-
-
-sub version ()
-{
- print "cvs2cl.pl version ${VERSION}; distributed under the GNU GPL.\n";
-}
-
-
-sub usage ()
-{
- &version ();
- print <<'END_OF_INFO';
-Generate GNU-style ChangeLogs in CVS working copies.
-
-Notes about the output format(s):
-
- The default output of cvs2cl.pl is designed to be compact, formally
- unambiguous, but still easy for humans to read. It is largely
- self-explanatory, I hope; the one abbreviation that might not be
- obvious is "utags". That stands for "universal tags" -- a
- universal tag is one held by all the files in a given change entry.
-
- If you need output that's easy for a program to parse, use the
- --xml option. Note that with XML output, just about all available
- information is included with each change entry, whether you asked
- for it or not, on the theory that your parser can ignore anything
- it's not looking for.
-
-Notes about the options and arguments (the actual options are listed
-last in this usage message):
-
- * The -I and -F options may appear multiple times.
-
- * To follow trunk revisions, use "-F trunk" ("-F TRUNK" also works).
- This is okay because no would ever, ever be crazy enough to name a
- branch "trunk", right? Right.
-
- * For the -U option, the UFILE should be formatted like
- CVSROOT/users. That is, each line of UFILE looks like this
- jrandom:jrandom@red-bean.com
- or maybe even like this
- jrandom:'Jesse Q. Random <jrandom@red-bean.com>'
- Don't forget to quote the portion after the colon if necessary.
-
- * Many people want to filter by date. To do so, invoke cvs2cl.pl
- like this:
- cvs2cl.pl -l "-d'DATESPEC'"
- where DATESPEC is any date specification valid for "cvs log -d".
- (Note that CVS 1.10.7 and below requires there be no space between
- -d and its argument).
-
-Options/Arguments:
-
- -h, -help, --help, or -? Show this usage and exit
- --version Show version and exit
- -r, --revisions Show revision numbers in output
- -b, --branches Show branch names in revisions when possible
- -t, --tags Show tags (symbolic names) in output
- --stdin Read from stdin, don't run cvs log
- --stdout Output to stdout not to ChangeLog
- -d, --distributed Put ChangeLogs in subdirs
- -f FILE, --file FILE Write to FILE instead of "ChangeLog"
- --fsf Use this if log data is in FSF ChangeLog style
- -W SECS, --window SECS Window of time within which log entries unify
- -U UFILE, --usermap UFILE Expand usernames to email addresses from UFILE
- -R REGEXP, --regexp REGEXP Include only entries that match REGEXP
- -I REGEXP, --ignore REGEXP Ignore files whose names match REGEXP
- -C, --case-insensitive Any regexp matching is done case-insensitively
- -F BRANCH, --follow BRANCH Show only revisions on or ancestral to BRANCH
- -S, --separate-header Blank line between each header and log message
- --no-wrap Don't auto-wrap log message (recommend -S also)
- --gmt, --utc Show times in GMT/UTC instead of local time
- --accum Add to an existing ChangeLog (incompat w/ --xml)
- -w, --day-of-week Show day of week
- --header FILE Get ChangeLog header from FILE ("-" means stdin)
- --xml Output XML instead of ChangeLog format
- --hide-filenames Don't show filenames (ignored for XML output)
- -P, --prune Don't show empty log messages
- -g OPTS, --global-opts OPTS Invoke like this "cvs OPTS log ..."
- -l OPTS, --log-opts OPTS Invoke like this "cvs ... log OPTS"
- FILE1 [FILE2 ...] Show only log information for the named FILE(s)
-
-See http://www.red-bean.com/cvs2cl for maintenance and bug info.
-END_OF_INFO
-}
-
-__END__
-
-=head1 NAME
-
-cvs2cl.pl - produces GNU-style ChangeLogs in CVS working copies, by
- running "cvs log" and parsing the output. Shared log entries are
- unified in an intuitive way.
-
-=head1 DESCRIPTION
-
-This script generates GNU-style ChangeLog files from CVS log
-information. Basic usage: just run it inside a working copy and a
-ChangeLog will appear. It requires repository access (i.e., 'cvs log'
-must work). Run "cvs2cl.pl --help" to see more advanced options.
-
-See http://www.red-bean.com/cvs2cl for updates, and for instructions
-on getting anonymous CVS access to this script.
-
-Maintainer: Karl Fogel <kfogel@red-bean.com>
-Please report bugs to <bug-cvs2cl@red-bean.com>.
-
-=head1 README
-
-This script generates GNU-style ChangeLog files from CVS log
-information. Basic usage: just run it inside a working copy and a
-ChangeLog will appear. It requires repository access (i.e., 'cvs log'
-must work). Run "cvs2cl.pl --help" to see more advanced options.
-
-See http://www.red-bean.com/cvs2cl for updates, and for instructions
-on getting anonymous CVS access to this script.
-
-Maintainer: Karl Fogel <kfogel@red-bean.com>
-Please report bugs to <bug-cvs2cl@red-bean.com>.
-
-=head1 PREREQUISITES
-
-This script requires C<Text::Wrap>, C<Time::Local>, and
-C<File::Basename>.
-It also seems to require C<Perl 5.004_04> or higher.
-
-=pod OSNAMES
-
-any
-
-=pod SCRIPT CATEGORIES
-
-Version_Control/CVS
-
-=cut
-
-
--*- -*- -*- -*- -*- -*- -*- -*- -*- -*- -*- -*- -*- -*- -*- -*- -*- -*-
-
-Note about a bug-slash-opportunity:
------------------------------------
-
-There's a bug in Text::Wrap, which affects cvs2cl. This script
-reveals it:
-
- #!/usr/bin/perl -w
-
- use Text::Wrap;
-
- my $test_text =
- "This script demonstrates a bug in Text::Wrap. The very long line
- following this paragraph will be relocated relative to the surrounding
- text:
-
- ====================================================================
-
- See? When the bug happens, we'll get the line of equal signs below
- this paragraph, even though it should be above.";
-
-
- # Print out the test text with no wrapping:
- print "$test_text";
- print "\n";
- print "\n";
-
- # Now print it out wrapped, and see the bug:
- print wrap ("\t", " ", "$test_text");
- print "\n";
- print "\n";
-
-If the line of equal signs were one shorter, then the bug doesn't
-happen. Interesting.
-
-Anyway, rather than fix this in Text::Wrap, we might as well write a
-new wrap() which has the following much-needed features:
-
-* initial indentation, like current Text::Wrap()
-* subsequent line indentation, like current Text::Wrap()
-* user chooses among: force-break long words, leave them alone, or die()?
-* preserve existing indentation: chopped chunks from an indented line
- are indented by same (like this line, not counting the asterisk!)
-* optional list of things to preserve on line starts, default ">"
-
-Note that the last two are essentially the same concept, so unify in
-implementation and give a good interface to controlling them.
-
-And how about:
-
-Optionally, when encounter a line pre-indented by same as previous
-line, then strip the newline and refill, but indent by the same.
-Yeah...
diff --git a/storage/ndb/home/bin/fix-cvs-root b/storage/ndb/home/bin/fix-cvs-root
deleted file mode 100755
index 2c4f158f825..00000000000
--- a/storage/ndb/home/bin/fix-cvs-root
+++ /dev/null
@@ -1,17 +0,0 @@
-#! /bin/sh
-
-# change all CVS/Root to current CVSROOT
-
-[ "$CVSROOT" ] || { echo "no CVSROOT in environment" >&2; exit 1; }
-
-echo "changing all CVS/Root files under `pwd`"
-sleep 1
-
-find . -path '*/CVS/Root' -print |
-while read file; do
- echo "$file"
- chmod +w $file || exit 1
- echo $CVSROOT >$file || exit 1
-done
-
-echo "done"
diff --git a/storage/ndb/home/bin/import-from-bk.sh b/storage/ndb/home/bin/import-from-bk.sh
deleted file mode 100755
index 4e3957be6d5..00000000000
--- a/storage/ndb/home/bin/import-from-bk.sh
+++ /dev/null
@@ -1,158 +0,0 @@
-#! /bin/sh
-
-# XXX does not delete files
-# XXX does not handle nested new dirs
-# this script screams for perl, no time now
-# look for bk2cvs on the net
-
-PATH=/usr/local/bin:$PATH; export PATH
-LD_LIBRARY_PATH=/usr/local/lib:$LD_LIBRARY_PATH; export LD_LIBRARY_PATH
-
-batch=n
-if [ "$1" = "-batch" ]; then
- batch=y
- shift
-fi
-
-say() {
- echo "$*"
-}
-
-die() {
- case $# in
- 0) set -- "command failed" ;;
- esac
- say "$* -- aborted" >&2
- exit 1
-}
-
-usage() {
- die "usage: $0 [-batch] top -- copy from mysql/ndb to another NDB_TOP"
-}
-
-doit() {
- cmd="$*"
- if [ $batch = n ]; then
- echo -n "$cmd [y]"
- read junk
- sh -c "$cmd"
- return 0
- else
- echo "$cmd"
- sh -c "$cmd"
- return $?
- fi
-}
-
-say "======================"
-say "`date`"
-
-case $# in
-1) [ -d $1/src/CVS ] || die "$1 is not an NDB_TOP"
- top=$1 ;;
-*) usage ;;
-esac
-
-if ! fgrep ndb_kernel_version.h $top/include/kernel/CVS/Entries >/dev/null 2>&1; then
- die "$top is not an NDB_TOP"
-fi
-
-if find $top -path '*/CVS/Tag' -print | grep . >/dev/null; then
- die "$top: contains CVS/Tag files, not accepted"
-fi
-
-if [ ! -f include/SCCS/s.ndb_version.h ]; then
- die "current dir ($PWD) is not an NDB_TOP"
-fi
-
-doit "bk pull" || exit 1
-doit "bk -r clean"
-doit "bk -r get -q"
-
-files=`bk -r. sfiles -g |
- fgrep -v ' ' |
- fgrep -v /.cvsignore`
-
-n=0
-files2=
-for f in $files; do
- if [ ! -f $f ]; then
- die "$f: no such file"
- fi
- if [ -w $f ]; then
- say "$f: is writable, accept anyway"
- fi
- files2="$files2 $f"
- n=$((n+1))
-done
-files=$files2
-say "$n files..."
-
-adddirs= addfiles= updfiles=
-for f in $files; do
- d=`dirname $f`
- b=`basename $f`
- if [ ! -f $top/$d/CVS/Entries ]; then
- found=n
- for x in $adddirs; do
- if [ $x = $d ]; then found=y; break; fi
- done
- if [ $found = n ]; then
- say "$d: to create dir"
- adddirs="$adddirs $d"
- fi
- addfiles="$addfiles $f"
- say "$f: to create"
- elif ! fgrep "/$b/" $top/$d/CVS/Entries >/dev/null; then
- addfiles="$addfiles $f"
- say "$f: to create"
- else
- cmp $f $top/$f >/dev/null
- case $? in
- 0) continue ;;
- 1) ;;
- *) die "$f: unknown error" ;;
- esac
- updfiles="$updfiles $f"
- say "$f: to update"
- fi
-done
-
-for d in $adddirs; do
- doit "cd $top && mkdir -p $d" || die
-done
-
-for f in $addfiles $updfiles; do
- doit "cp -fp $f $top/$f" || die
-done
-
-for d in $adddirs; do
- # fix 1 level up
- d2=`dirname $d`
- if [ ! -d $top/$d2/CVS ]; then
- doit "cd $top && cvs add $d2" || die
- fi
- doit "cd $top && cvs add $d" || die
-done
-
-for f in $addfiles; do
- kb=
- if echo $f | perl -nle "print(-B $_)" | grep 1 >/dev/null; then
- kb="-kb"
- fi
- doit "cd $top && cvs add $kb $f" || die
-done
-
-tag=import_bk_`date +%Y_%m_%d`
-
-doit "cd $top && cvs commit -m $tag" || die
-doit "cd $top && cvs tag -F $tag" || die
-
-env="NDB_TOP=$top; export NDB_TOP"
-env="$env; USER_FLAGS='-DAPI_TRACE -fmessage-length=0'; export USER_FLAGS"
-doit "$env; cd $top && ./configure"
-doit "$env; cd $top && sh config/GuessConfig.sh"
-doit "$env; cd $top && make clean nuke-deps vim-tags"
-doit "$env; cd $top && make" || die
-
-say "imported ok"
diff --git a/storage/ndb/home/bin/ndb_deploy b/storage/ndb/home/bin/ndb_deploy
deleted file mode 100755
index 773fc9b8fd7..00000000000
--- a/storage/ndb/home/bin/ndb_deploy
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/bin/sh
-
-if [ $# -eq 0 ]
-then
- for i in $DEPLOY_DST
- do
- rsync -r -v --exclude '*.a' $NDB_TOP/bin $NDB_TOP/lib $i/
- done
-else
- while [ $# -gt 0 ]
- do
- arg=$1
- shift;
- if [ `echo $arg | grep -c lib` -eq 0 ]
- then
- dst=bin/
- else
- dst=lib/
- fi
-
- for i in $DEPLOY_DST
- do
- rsync -v $arg $i/$dst
- done
- done
-fi
-
diff --git a/storage/ndb/home/bin/ndbdoxy.pl b/storage/ndb/home/bin/ndbdoxy.pl
deleted file mode 100755
index 89b7de8440e..00000000000
--- a/storage/ndb/home/bin/ndbdoxy.pl
+++ /dev/null
@@ -1,184 +0,0 @@
-#!/usr/local/bin/perl
-#
-# ndbdoxy.pl Executes doxygen on a checked out version of NDB Cluster
-#
-# Written by Lars Thalmann, 2003.
-
-use strict;
-umask 000;
-
-# -----------------------------------------------------------------------------
-# Settings
-# -----------------------------------------------------------------------------
-
-my $root = "/home/elathal/public_html/cvsdoxy";
-
-$ENV{LD_LIBRARY_PATH} = "/usr/local/lib:/opt/as/local/lib";
-$ENV{LD_LIBRARY_PATH} = $ENV{LD_LIBRARY_PATH} . ":/opt/as/forte6/SUNWspro/lib";
-$ENV{PATH} = $ENV{PATH} . ":/usr/local/bin:/opt/as/local/bin";
-$ENV{PATH} = $ENV{PATH} . ":/opt/as/local/teTeX/bin/sparc-sun-solaris2.8";
-
-my $DOXYGEN = "doxygen";
-my $PDFLATEX = "pdflatex";
-my $MAKEINDEX = "makeindex";
-
-# -----------------------------------------------------------------------------
-# Argument handling
-# -----------------------------------------------------------------------------
-
-if (@ARGV != 3) {
- print<<END;
-Usage:
- ndbdoxy.pl <module> <title> <version>
-
- where
- <module> is cvsdoxy module to doxgenify
- <title> is title of report
- <version> is version of NDB Cluster
-END
- exit;
-}
-my $module = $ARGV[0];
-my $title = $ARGV[1];
-my $version = $ARGV[2];
-my $destdir = ".";
-
-# -----------------------------------------------------------------------------
-# Execute Doxygen -g
-# -----------------------------------------------------------------------------
-
-if (-r "${root}/doxyfiles/${module}.doxyfile") {
- system("cd ${destdir}; \
- cp ${root}/doxyfiles/${module}.doxyfile Doxyfile");
-} elsif (-r "${root}/doxyfiles/default.doxyfile") {
- system("cd ${destdir}; \
- cp ${root}/doxyfiles/default.doxyfile Doxyfile");
-} else {
- system("cd ${destdir}; $DOXYGEN -g");
-}
-
-# -----------------------------------------------------------------------------
-# HTML Footer
-# -----------------------------------------------------------------------------
-
-if (-r "${root}/doxyfiles/htmlfooter") {
- system("cd ${destdir}; \
- cp ${root}/doxyfiles/htmlfooter footer.html");
-
- open (INFILE, "< ${destdir}/footer.html")
- or die "Error opening ${destdir}/footer.html.\n";
- open (OUTFILE, "> ${destdir}/footer.html.new")
- or die "Error opening ${destdir}/footer.html.new.\n";
- while (<INFILE>) {
- if (/(.*)DATE(.*)$/) {
- print OUTFILE $1 . localtime() . $2;
- } else {
- print OUTFILE;
- }
- }
- close INFILE;
- close OUTFILE;
-
- system("mv ${destdir}/footer.html.new ${destdir}/footer.html");
-} else {
- print("Warning: No ${root}/doxyfiles/${module}.htmlfooter");
-}
-
-# -----------------------------------------------------------------------------
-# Execute Doxygen
-# -----------------------------------------------------------------------------
-
-system("cd ${destdir}; $DOXYGEN");
-
-# -----------------------------------------------------------------------------
-# Change a little in refman.tex
-# -----------------------------------------------------------------------------
-
-open (INFILE, "< ${destdir}/latex/refman.tex")
- or die "Error opening ${destdir}/latex/refman.tex.\n";
-open (OUTFILE, "> ${destdir}/latex/refman.tex.new")
- or die "Error opening ${destdir}/latex/refman.tex.new.\n";
-
-while (<INFILE>)
-{
- if (/(.*)Reference Manual(.*)$/) {
- print OUTFILE $1 .
- "\\mbox{}\\vspace{-3cm}\\mbox{}" .
- "\\hrule\\bigskip\\bigskip\\bigskip\\bigskip" .
- "\\Huge{" . $title . "}" . $2;
- } elsif (/(.*)Generated by Doxygen 1.2.1[0-9](.*)$/) {
- print OUTFILE $1 .
- "\\begin{center}" .
- "\\LARGE{MySQL AB}" .
- "\\end{center}".
- "\\hfill\\bigskip\\bigskip\\bigskip\\hrule" .
- "\\bigskip\\bigskip\\bigskip\\bigskip\\bigskip" .
- "\\bigskip\\bigskip\\bigskip\\bigskip\\bigskip" .
- "\\bigskip\\bigskip NDB Cluster Release " . $version .
- "\\bigskip\\bigskip\\bigskip\\bigskip\\bigskip\\hfill" .
- $2;
- } elsif (/\\chapter\{File Index\}/) {
- print OUTFILE "\%\\chapter{File Index}\n";
- } elsif (/\\input{files}/) {
- print OUTFILE "\%\\input{files}\n";
- } elsif (/\\chapter\{Page Index\}/) {
- print OUTFILE "\%\\chapter{Page Index}\n";
- } elsif (/\\input{pages}/) {
- print OUTFILE "\%\\input{pages}\n";
- } else {
- print OUTFILE;
- }
-}
-
-close INFILE;
-close OUTFILE;
-
-system("mv ${destdir}/latex/refman.tex.new ${destdir}/latex/refman.tex");
-
-# -----------------------------------------------------------------------------
-# Change a little in doxygen.sty
-# -----------------------------------------------------------------------------
-
-open (INFILE, "< ${destdir}/latex/doxygen.sty")
- or die "Error opening INFILE.\n";
-open (OUTFILE, "> ${destdir}/latex/doxygen.sty.new")
- or die "Error opening OUTFILE.\n";
-
-while (<INFILE>)
-{
- if (/\\rfoot/) {
- print OUTFILE "\\rfoot[\\fancyplain{}{\\bfseries\\small \\copyright~Copyright 2003 MySQL AB\\hfill support-cluster\@mysql.com}]{}\n";
- } elsif (/\\lfoot/) {
- print OUTFILE "\\lfoot[]{\\fancyplain{}{\\bfseries\\small support-cluster\@mysql.com\\hfill \\copyright~Copyright 2003 MySQL AB}}\n";
- } else {
- print OUTFILE;
- }
-}
-
-close INFILE;
-close OUTFILE;
-
-system("mv ${destdir}/latex/doxygen.sty.new ${destdir}/latex/doxygen.sty");
-
-# -----------------------------------------------------------------------------
-# Other
-# -----------------------------------------------------------------------------
-
-#system("cd ${root}/tmp/${module}; \
-# mkdir html.tar; \
-# cd html.tar; \
-# cp -r ../html ${module}; \
-# tar cf ${module}.html.tar ${module}; \
-# /usr/local/bin/gzip ${module}.html.tar; \
-# /bin/rm -rf ${root}/tmp/${module}/html.tar/${module}");
-
-#system("cd ${destdir}/latex/; \
-# $PDFLATEX refman.tex \
-# $MAKEINDEX refman.idx \
-# $PDFLATEX refman.tex \
-# mv -f refman.pdf ${module}.pdf");
-
-print<<END;
-Execute:
- latex refman; makeindex refman; latex refman
-END
diff --git a/storage/ndb/home/bin/ngcalc b/storage/ndb/home/bin/ngcalc
deleted file mode 100755
index a289d384db9..00000000000
--- a/storage/ndb/home/bin/ngcalc
+++ /dev/null
@@ -1,78 +0,0 @@
-#! /usr/local/bin/perl
-
-use strict;
-use Getopt::Long;
-
-sub usage {
- print <<END;
-ngcalc -- calculate node groups and table fragments
-usage: ngcalc [ options ] f1 f2 ...
--g num number of node groups (default 2)
--r num number of replicas (default 2)
--n list comma-separated list of db nodes (default 1,2,...)
-fX number of fragments per node group in table X (e.g. 1,2,8)
- (all replicas count as same fragment)
-END
- exit(1);
-};
-
-use vars qw($cnoOfNodeGroups $cnoReplicas $nodeArray);
-
-$cnoOfNodeGroups = 2;
-$cnoReplicas = 2;
-GetOptions(
- "g=i" => \$cnoOfNodeGroups,
- "r=i" => \$cnoReplicas,
- "n=s" => \$nodeArray,
-) or &usage;
-
-my @tableList = @ARGV;
-
-$cnoOfNodeGroups > 0 or &usage;
-$cnoReplicas > 0 or &usage;
-if (! defined($nodeArray)) {
- $nodeArray = join(',', 1..($cnoOfNodeGroups*$cnoReplicas));
-}
-$nodeArray =~ /^\d+(,\d+)*$/ or &usage;
-my @nodeArray = split(/,/, $nodeArray);
-@nodeArray == $cnoOfNodeGroups*$cnoReplicas or &usage;
-
-my @nodeGroupRecord;
-for (my $i = 0; $i < $cnoOfNodeGroups; $i++) {
- my $rec = {};
- my $nodes = [];
- for (my $j = 0; $j < $cnoReplicas; $j++) {
- push(@$nodes, $nodeArray[$i * $cnoReplicas + $j]);
- }
- $rec->{nodesInGroup} = $nodes;
- $rec->{nodeCount} = $cnoReplicas;
- $rec->{nextReplicaNode} = 0;
- $nodeGroupRecord[$i] = $rec;
- print "NG $i: ", join(" ", @{$rec->{nodesInGroup}}), "\n";
-}
-
-# see Dbdih::execCREATE_FRAGMENTATION_REQ
-
-my $c_nextNodeGroup = 0;
-for (my $t = 0; $t < @tableList; $t++) {
- use integer;
- my $f = $tableList[$t];
- my $ng = $c_nextNodeGroup++;
- $c_nextNodeGroup = 0 if $c_nextNodeGroup == $cnoOfNodeGroups;
- my $noOfFragments = $f * $cnoOfNodeGroups;
- my @fragments;
- for (my $fragNo = 0; $fragNo < $noOfFragments; $fragNo++) {
- my $rec = $nodeGroupRecord[$ng];
- my $max = $rec->{nodeCount};
- my $ind = $rec->{nextReplicaNode};
- $rec->{nextReplicaNode} = ($ind + 1 >= $max ? 0 : $ind + 1);
- for (my $replicaNo = 0; $replicaNo < $cnoReplicas; $replicaNo++) {
- my $nodeId = $rec->{nodesInGroup}[$ind++];
- push(@fragments, $nodeId);
- $ind = ($ind == $max ? 0 : $ind);
- }
- $ng++;
- $ng = ($ng == $cnoOfNodeGroups ? 0 : $ng);
- }
- printf "%02d %s\n", $t, join(" ", @fragments);
-}
diff --git a/storage/ndb/home/bin/parseConfigFile.awk b/storage/ndb/home/bin/parseConfigFile.awk
deleted file mode 100644
index 6903949156c..00000000000
--- a/storage/ndb/home/bin/parseConfigFile.awk
+++ /dev/null
@@ -1,98 +0,0 @@
-BEGIN{
- where=0;
- n_hosts=0;
- n_api=0;
- n_ndb=0;
- n_mgm=0;
- n_ports=0;
-}
-/COMPUTERS/ {
- where=1;
-}
-/\[[ \t]*COMPUTER[ \t]*\]/ {
- where=1;
-}
-/PROCESSES/ {
- where=2;
-}
-/Type: MGMT/ {
- if(where!=1){
- where=2;
- n_mgm++;
- }
-}
-/\[[ \t]*MGM[ \t]*\]/ {
- where=2;
- n_mgm++;
-}
-/Type: DB/ {
- if(where!=1){
- where=3;
- n_ndb++;
- }
-}
-/\[[ \t]*DB[ \t]*\]/ {
- where=3;
- n_ndb++;
-}
-/Type: API/ {
- if(where!=1){
- where=4;
- n_api++;
- }
-}
-/\[[ \t]*API[ \t]*\]/ {
- where=4;
- n_api++;
-}
-/HostName:/ {
- host_names[host_ids[n_hosts]]=$2;
-}
-
-/FileSystemPath:/ {
- if (where==3){
- ndb_fs[ndb_ids[n_ndb]]=$2;
- }
-}
-
-/Id:/{
- if(where==1){
- n_hosts++;
- host_ids[n_hosts]=$2;
- }
- if(where==2){
- mgm_ids[n_mgm]=$2;
- }
- if(where==3){
- ndb_ids[n_ndb]=$2;
- }
- if(where==4){
- api_ids[n_api]=$2;
- }
-}
-/ExecuteOnComputer:/{
- if(where==2){
- mgm_hosts[mgm_ids[n_mgm]]=host_names[$2];
- }
- if(where==3){
- ndb_hosts[ndb_ids[n_ndb]]=host_names[$2];
- }
- if(where==4){
- api_hosts[api_ids[n_api]]=host_names[$2];
- }
-}
-END {
- for(i=1; i<=n_mgm; i++){
- printf("mgm_%d=%s\n", mgm_ids[i], mgm_hosts[mgm_ids[i]]);
- }
- for(i=1; i<=n_ndb; i++){
- printf("ndb_%d=%s\n", ndb_ids[i], ndb_hosts[ndb_ids[i]]);
- printf("ndbfs_%d=%s\n", ndb_ids[i], ndb_fs[ndb_ids[i]]);
- }
- for(i=1; i<=n_api; i++){
- printf("api_%d=%s\n", api_ids[i], api_hosts[api_ids[i]]);
- }
- printf("mgm_nodes=%d\n", n_mgm);
- printf("ndb_nodes=%d\n", n_ndb);
- printf("api_nodes=%d\n", n_api);
-}
diff --git a/storage/ndb/home/bin/setup-test.sh b/storage/ndb/home/bin/setup-test.sh
deleted file mode 100755
index 61097c30027..00000000000
--- a/storage/ndb/home/bin/setup-test.sh
+++ /dev/null
@@ -1,272 +0,0 @@
-#!/bin/sh
-
-# NAME
-# run-test.sh - Run a test program
-#
-# SYNOPSIS
-# setup-test.sh [ -n <ndb dir>] [ -r <run dir>]
-#
-# DESCRIPTION
-# run a test
-#
-# OPTIONS
-#
-# EXAMPLES
-#
-# ENVIRONMENT
-# NDB_PROJ_HOME Home dir for ndb
-#
-# FILES
-# $NDB_PROJ_HOME/lib/funcs.sh shell script functions
-#
-# DIAGNOSTICTS
-#
-# VERSION
-# 1.01
-#
-# AUTHOR
-# Jonas Oreland
-#
-#
-
-progname=`basename $0`
-synopsis="setup-test.sh [-x xterm] [ -n <ndb dir>] [ -r <run dir>]"
-
-: ${NDB_PROJ_HOME:?} # If undefined, exit with error message
-
-: ${RUN_NDB_NODE_OPTIONS:=--} # If undef, set to --. Keeps getopts happy.
- # You may have to experiment a bit
- # to get quoting right (if you need it).
-
-
-. $NDB_PROJ_HOME/lib/funcs.sh # Load some good stuff
-
-# defaults for options related variables
-#
-
-verbose=yes
-options=""
-ndb_dir=$NDB_TOP
-if [ -z "$ndb_dir" ]
-then
- ndb_dir=`pwd`
-fi
-
-local_dir=`pwd`
-own_host=`hostname`
-uniq_id=$$.$$
-
-_xterm=$XTERM
-_rlogin="ssh -X"
-
-# used if error when parsing the options environment variable
-#
-env_opterr="options environment variable: <<$options>>"
-
-
-# Option parsing, for the options variable as well as the command line.
-#
-# We want to be able to set options in an environment variable,
-# as well as on the command line. In order not to have to repeat
-# the same getopts information twice, we loop two times over the
-# getopts while loop. The first time, we process options from
-# the options environment variable, the second time we process
-# options from the command line.
-#
-# The things to change are the actual options and what they do.
-#
-#
-for optstring in "$options" "" # 1. options variable 2. cmd line
-do
- while getopts n:r:x: i $optstring # optstring empty => no arg => cmd line
- do
- case $i in
-
- n) ndb_dir=$OPTARG;; # Ndb dir
- r) run_dir=$OPTARG;; # Run dir
- x) _xterm=$OPTARG;;
- \?) syndie $env_opterr;; # print synopsis and exit
-
- esac
- done
-
- [ -n "$optstring" ] && OPTIND=1 # Reset for round 2, cmdline options
-
- env_opterr= # Round 2 should not use the value
-
-done
-shift `expr $OPTIND - 1`
-
-# --- option parsing done ---
-
-ndb_dir=`abspath $ndb_dir`
-run_dir=`abspath $run_dir`
-
-trace "Verifying arguments"
-
-if [ ! -d $ndb_dir/bin ] || [ ! -d $ndb_dir/lib ]
-then
- msg "Ndb home path seems incorrect either $ndb_dir/bin or $ndb_dir/lib not found"
- exit 1004
-fi
-
-ndb_bin=$ndb_dir/bin/ndb
-mgm_bin=$ndb_dir/bin/mgmtsrvr
-api_lib=$ndb_dir/lib/libNDB_API.so
-
-if [ ! -x $ndb_bin ]
-then
- msg "Ndb path seems incorrect ndb binary not found: $ndb_bin"
- exit 1004
-fi
-
-if [ ! -x $mgm_bin ]
-then
- msg "Ndb path seems incorrect management server binary not found: $mgm_bin"
- exit 1004
-fi
-
-init_config=$run_dir/mgm.1/initconfig.txt
-local_config=$run_dir/mgm.1/localcfg.txt
-if [ ! -r $init_config ] || [ ! -r $local_config ]
-then
- msg "Run path seems incorrect $init_config or $local_config not found"
- exit 1004
-fi
-
-trace "Parsing $init_config"
-awk -f $NDB_PROJ_HOME/bin/parseConfigFile.awk $init_config > /tmp/run-test.$uniq_id
-. /tmp/run-test.$uniq_id
-cat /tmp/run-test.$uniq_id
-rm -f /tmp/run-test.$uniq_id
-
-trace "Parsing $local_config"
-MgmPort=`grep -v "OwnProcessId" $local_config | cut -d " " -f 2`
-
-trace "Verifying that mgm port is empty"
-telnet $mgm_1 $MgmPort > /tmp/mgm_port.$uniq_id 2>&1 <<EOF
-EOF
-
-if [ 0 -lt `grep -c -i connected /tmp/mgm_port.$uniq_id` ]
-then
- rm /tmp/mgm_port.$uniq_id
- msg "There is already something using port $mgm_1:$MgmPort"
- exit 1003
-fi
-rm /tmp/mgm_port.$uniq_id
-
-fixhost(){
- if [ "$1" != localhost ]
- then
- echo $1
- else
- uname -n
- fi
-}
-
-do_xterm(){
- title=$1
- shift
- xterm -fg black -title "$title" -e $*
-}
-
-save_profile(){
- cp $HOME/.profile /tmp/.profile.$uniq_id
-}
-
-wait_restore_profile(){
- while [ -r /tmp/.profile.$uniq_id ]
- do
- sleep 1
- done
-}
-
-start_mgm(){
- trace "Starting Management server on: $mgm_1"
- save_profile
- mgm_1=`fixhost $mgm_1`
-
- (
- echo "PATH=$ndb_dir/bin:\$PATH"
- echo "LD_LIBRARY_PATH=$ndb_dir/lib:\$LD_LIBRARY_PATH"
- echo "export PATH LD_LIBRARY_PATH"
- echo "cd $run_dir/mgm.1"
- echo "ulimit -Sc unlimited"
- echo "mv /tmp/.profile.$uniq_id $HOME/.profile"
- ) >> $HOME/.profile
- do_xterm "Mmg on $mgm_1" ${_rlogin} $mgm_1 &
- wait_restore_profile
-}
-
-start_ndb_node(){
- node_id=$1
- dir=$run_dir/ndb.$1
- ndb_host=`eval echo "\$"ndb_$node_id`
- ndb_host=`fixhost $ndb_host`
- ndb_fs=`eval echo "\$"ndbfs_$node_id`
-
- trace "Starting Ndb node $node_id on $ndb_host"
- save_profile
-
- (
- echo "PATH=$ndb_dir/bin:\$PATH"
- echo "LD_LIBRARY_PATH=$ndb_dir/lib:\$LD_LIBRARY_PATH"
- echo "mkdir -p $ndb_fs"
- echo "export PATH LD_LIBRARY_PATH"
- echo "cd $dir"
- echo "ulimit -Sc unlimited"
- echo "mv /tmp/.profile.$uniq_id $HOME/.profile"
- ) >> $HOME/.profile
- do_xterm "Ndb: $node_id on $ndb_host" ${_rlogin} $ndb_host &
- wait_restore_profile
-}
-
-start_api_node(){
- node_id=$1
- dir=$run_dir/api.$1
- api_host=`eval echo "\$"api_$node_id`
- api_host=`fixhost $api_host`
-
- trace "Starting api node $node_id on $api_host"
- save_profile
-
- (
- echo "PATH=$ndb_dir/bin:\$PATH"
- echo "LD_LIBRARY_PATH=$ndb_dir/lib:\$LD_LIBRARY_PATH"
- echo "export PATH LD_LIBRARY_PATH NDB_PROJ_HOME"
- echo "cd $dir"
- echo "ulimit -Sc unlimited"
- echo "mv /tmp/.profile.$uniq_id $HOME/.profile"
- ) >> $HOME/.profile
- do_xterm "API: $node_id on $api_host" ${_rlogin} $api_host &
- wait_restore_profile
-}
-
-for_each_ndb_node(){
- i=1
- j=`expr $mgm_nodes + 1`
- while [ $i -le $ndb_nodes ]
- do
- $* $j
- j=`expr $j + 1`
- i=`expr $i + 1`
- done
-}
-
-for_each_api_node(){
- i=1
- j=`expr $mgm_nodes + $ndb_nodes + 1`
- while [ $i -le $api_nodes ]
- do
- $* $j
- j=`expr $j + 1`
- i=`expr $i + 1`
- done
-}
-
-start_mgm
-for_each_ndb_node start_ndb_node
-for_each_api_node start_api_node
-
-exit 0
-
diff --git a/storage/ndb/home/bin/signallog2html.lib/signallog2list.awk b/storage/ndb/home/bin/signallog2html.lib/signallog2list.awk
deleted file mode 100644
index 9839f314556..00000000000
--- a/storage/ndb/home/bin/signallog2html.lib/signallog2list.awk
+++ /dev/null
@@ -1,102 +0,0 @@
-BEGIN{
- PRINT=0;
- SIGNAL_ARRAY[0]="";
- BLOCK_ID=0;
- SIGNAL_ID=-22;
-}
-{
- SIGNAL_ARRAY[SIGNAL_ID]=SIGNAL_ID;
-}
-
-/^---- Send ----- Signal ----------------/ {
- DIRECTION="S";
- SENDER="";
- SENDPROCESS="";
- RECEIVER="";
- RECPROCESS="";
- SIGNAL="";
- RECSIGID="?";
- SIGID="?";
- DELAY="N/A";
-}
-
-/^---- Send delay Signal/ {
- DIRECTION="SD";
- SENDER="";
- SENDPROCESS="";
- RECEIVER="";
- RECPROCESS="";
- SIGNAL="";
- RECSIGID="?";
- SIGID="?";
- DELAY=$5;
-
- LEN=length(DELAY);
- DELAY=substr(DELAY,2,LEN);
-}
-
-/^---- Received - Signal ----------------/ {
- DIRECTION="R";
- SENDER="";
- SENDPROCESS="";
- RECEIVER="";
- RECPROCESS="";
- SIGNAL="";
- RECSIGID="?";
- SIGID="?";
- DELAY="N/A";
-}
-
-/r.bn:/{
-
- RECEIVER=$3;
- RECPROCESS=$5;
-
- if(DIRECTION == "R"){
- SIGNAL=$10;
- RECSIGID=$7;
- }
- else
- SIGNAL=$8;
-}
-
-/s.bn:/{
-
- SENDER=$3;
- SIGID=$7;
-
- if(SIGID == SIGNAL_ARRAY[SIGID]){
- PRINT=1;
- if(DIRECTION == "R"){
- SIGNAL_ARRAY[RECSIGID]=RECSIGID;
- };
- }
-
- SENDPROCESS=$5;
-
- LEN=length(RECEIVER);
- RECEIVER=substr(RECEIVER,2,LEN-3);
-
- if(BLOCK_ID == "ALL" || RECEIVER==BLOCK_ID){PRINT=1; }
-
- LEN=length(SENDER);
- SENDER=substr(SENDER,2,LEN-3);
- if(BLOCK_ID == "ALL" || SENDER == BLOCK_ID){ PRINT=1;}
-
- LEN=length(SIGNAL);
- SIGNAL=substr(SIGNAL,2,LEN-2);
-
- LEN=length(SENDPROCESS);
- SENDPROCESS=substr(SENDPROCESS,1,LEN-1);
-
- LEN=length(RECPROCESS);
- RECPROCESS=substr(RECPROCESS,1,LEN-1);
-
- if( PRINT == 1){
- print DIRECTION" "SENDPROCESS" "SENDER" "RECPROCESS" "RECEIVER" "SIGNAL" "SIGID" "RECSIGID" "DELAY;
- }
-
- PRINT=0;
-}
-
-
diff --git a/storage/ndb/home/bin/signallog2html.lib/uniq_blocks.awk b/storage/ndb/home/bin/signallog2html.lib/uniq_blocks.awk
deleted file mode 100644
index 43f48d1cde1..00000000000
--- a/storage/ndb/home/bin/signallog2html.lib/uniq_blocks.awk
+++ /dev/null
@@ -1,29 +0,0 @@
-BEGIN{
- NAMES[""]="";
- ORDER[0]="";
- NUM=0;
-}
-
-{
- if(NAMES[$2$3]!=$2$3){
- NAMES[$2$3]=$2$3;
- ORDER[NUM]=$2$3;
- NUM++;
- }
-
- if(NAMES[$4$5]!=$4$5){
- NAMES[$4$5]=$4$5;
- ORDER[NUM]=$4$5;
- NUM++;
- }
-
-
-}
-END{
- for(i=0; i<NUM; i++){
- LIST=ORDER[i]" "LIST;
-
- }
- print LIST;
-}
-
diff --git a/storage/ndb/home/bin/signallog2html.sh b/storage/ndb/home/bin/signallog2html.sh
deleted file mode 100755
index 5665275807c..00000000000
--- a/storage/ndb/home/bin/signallog2html.sh
+++ /dev/null
@@ -1,349 +0,0 @@
-#!/bin/sh
-# NAME
-# signallog2html.sh
-#
-# SYNOPSIS
-# signallog2html.sh [ -b <block_name | ALL> ] [ -s <signal_id> ] -f signal_log_file
-#
-# DESCRIPTION
-# Creates a signal sequence diagram in HTML format that can be
-# viewed from a web browser. The HTML file is created from a signal
-# log file and it contains a big table with jpeg files in every
-# table cell. Every row in the table is a signal. The block_name
-# could be one of the following: CMVMI MISSRA NDBFS NDBCNTR DBACC
-# DBDICT DBLQH DBDIH DBTC DBTUP QMGR ALL. The signal_id is a
-# number. If no block_name or signal_id is given the default
-# block_name "ALL" is used.
-#
-#
-#
-# OPTIONS
-#
-# EXAMPLES
-#
-#
-# ENVIRONMENT
-# NDB_PROJ_HOME Home dir for ndb
-#
-# FILES
-# $NDB_PROJ_HOME/lib/funcs.sh General shell script functions.
-# uniq_blocks.awk Creates a list of unique blocks
-# in the signal_log_file.
-# signallog2list.awk Creates a list file from the signal_log_file.
-# empty.JPG Jpeg file, must exist in the HTML file
-# directory for viewing.
-# left_line.JPG
-# line.JPG
-# right_line.JPG
-# self_line.JPG
-#
-#
-# SEE ALSO
-#
-# DIAGNOSTICTS
-#
-# VERSION
-# 1.0
-#
-# DATE
-# 011029
-#
-# AUTHOR
-# Jan Markborg
-#
-
-progname=`basename $0`
-synopsis="signallog2html.sh [ -b <block_name | ALL> ] [ -s <signal_id> ] -f signal_log_file"
-block_name=""
-signal_id=""
-verbose=yes
-signal_log_file=""
-
-: ${NDB_PROJ_HOME:?} # If undefined, exit with error message
-
-: ${NDB_LOCAL_BUILD_OPTIONS:=--} # If undef, set to --. Keeps getopts happy.
- # You may have to experiment a bit
- # to get quoting right (if you need it).
-
-
-. $NDB_PROJ_HOME/lib/funcs.sh # Load some good stuff
-
-# defaults for options related variables
-#
-report_date=`date '+%Y-%m-%d'`
-
-# Option parsing for the the command line.
-#
-
-while getopts f:b:s: i
-do
- case $i in
- f) signal_log_file=$OPTARG;;
- b) block_name=$OPTARG;;
- s) signal_id=$OPTARG;;
- \?) syndie ;; # print synopsis and exit
- esac
-done
-
-# -- Verify
-trace "Verifying signal_log_file $signal_log_file"
-
-if [ x$signal_log_file = "x" ]
-then
- syndie "Invalid signal_log_file name: $signal_log_file not found"
-fi
-
-
-if [ ! -r $signal_log_file ]
-then
- syndie "Invalid signal_log_file name: $signal_log_file not found"
-fi
-
-
-
-if [ blocknameSET = 1 ]
-then
-
- trace "Verifying block_name"
- case $block_name in
- CMVMI| MISSRA| NDBFS| NDBCNTR| DBACC| DBDICT| DBLQH| DBDIH| DBTC| DBTUP| QMGR);;
- ALL) trace "Signals to/from every block will be traced!";;
- *) syndie "Unknown block name: $block_name";;
- esac
-fi
-
-if [ block_name="" -a signal_id="" ]
-then
- block_name=ALL
- trace "block_name = $block_name"
-fi
-
-trace "Arguments OK"
-
-###
-#
-# General html functions
-header(){
- cat <<EOF
-<html><head><title>$*</title></head>
-<body>
-EOF
-}
-
-footer(){
- cat <<EOF
-</body></html>
-EOF
-}
-
-heading(){
- h=$1; shift
- cat <<EOF
-<h$h>$*</h$h>
-EOF
-}
-
-table(){
- echo "<table $*>"
-}
-
-table_header(){
- echo "<th>$*</th>"
-}
-
-end_table(){
- echo "</table>"
-}
-
-row(){
- echo "<tr>"
-}
-
-end_row(){
- echo "</tr>"
-}
-
-c_column(){
- cat <<EOF
-<td valign=center align=center>$*</td>
-EOF
-}
-
-bold(){
- cat <<EOF
-<b>$*</b>
-EOF
-}
-
-column(){
- cat <<EOF
-<td align=left>$*</td>
-EOF
-}
-
-para(){
- cat <<EOF
-<p></p>
-EOF
-}
-
-hr(){
- cat <<EOF
-<hr>
-EOF
-}
-
-img_column(){
- cat <<EOF
-<td><center><$* height=100% width=100%></center></td>
-EOF
-}
-
-# Check the direction of arrow.
-# arrowDirection(){ $columnarray $sendnode$sendblock $recnode$recblock
-arrowDirection(){
-if [ $2 = $3 ]
-then
- arrow=SELF
- return;
-else
- for x in $1
- do
- if [ $x = $2 ]
- then
- arrow=RIGHT
- break
- elif [ $x = $3 ]
- then
- arrow=LEFT
- break
- fi
- done
-fi
-}
-
-drawImages(){
-for x in $columnarray
-do
- case $arrow in
- SELF)
- if [ $x = $sendnode$sendblock ]
- then
- img_column img SRC=\"self_line.JPG\"
- else
- img_column img SRC=\"empty.JPG\"
- fi;;
-
- RIGHT)
- if [ $x = $recnode$recblock ]
- then
- img_column img SRC=\"right_line.JPG\"
- weHavePassedRec=1
- elif [ $x = $sendnode$sendblock ]
- then
- img_column img SRC=\"empty.JPG\"
- weHavePassedSen=1
- elif [ $weHavePassedRec = 1 -o $weHavePassedSen = 0 ]
- then
- img_column img SRC=\"empty.JPG\"
- elif [ $weHavePassedRec = 0 -a $weHavePassedSen = 1 ]
- then
- img_column img SRC=\"line.JPG\"
- fi;;
-
- LEFT)
- if [ $x = $recnode$recblock ]
- then
- img_column img SRC=\"empty.JPG\"
- weHaveJustPassedRec=1
- weHavePassedRec=1
- continue
- fi
- if [ $x = $sendnode$sendblock -a $weHaveJustPassedRec = 1 ]
- then
- img_column img SRC=\"left_line.JPG\"
- weHaveJustPassedRec=0
- weHavePassedSen=1
- continue
- fi
- if [ $x = $sendnode$sendblock ]
- then
- img_column img SRC=\"line.JPG\"
- weHavePassedSen=1
- continue
- fi
- if [ $weHaveJustPassedRec = 1 ]
- then
- img_column img SRC=\"left_line.JPG\"
- weHaveJustPassedRec=0
- continue
- fi
- if [ $weHavePassedSen = 1 -o $weHavePassedRec = 0 ]
- then
- img_column img SRC=\"empty.JPG\"
- continue
- fi
-
- if [ $weHavePassedRec = 1 -a $weHavePassedSen = 0 ]
- then
- img_column img SRC=\"line.JPG\"
- continue
-
- fi
- column ERROR;;
-
- *)
- echo ERROR;;
- esac
-done
-column $signal
-}
-
-### Main
-trace "Making HTML file"
-(
- header "Signal sequence diagram $report_date"
- heading 1 "Signal sequence diagram $report_date"
-
- trace "Making list file"
- #make a signal list file from the signal log file.
- `awk -f /home/ndb/bin/signallog2html.lib/signallog2list.awk SIGNAL_ID=$signal_id BLOCK_ID=$block_name $signal_log_file > $signal_log_file.list`
-
- COLUMNS=`awk -f /home/ndb/bin/signallog2html.lib/uniq_blocks.awk $signal_log_file.list | wc -w`
-
- table "border=0 cellspacing=0 cellpadding=0 cols=`expr $COLUMNS + 1`"
-
- columnarray=`awk -f /home/ndb/bin/signallog2html.lib/uniq_blocks.awk $signal_log_file.list`
-
- row
- column #make an empty first column!
- for col in $columnarray
- do
- table_header $col
- done
-
- grep "" $signal_log_file.list | \
- while read direction sendnode sendblock recnode recblock signal sigid recsigid delay
- do
- if [ $direction = "R" ]
- then
- row
- weHavePassedRec=0
- weHavePassedSen=0
- weHaveJustPassedRec=0
- arrow=""
-
- # calculate the direction of the arrow.
- arrowDirection "$columnarray" "$sendnode$sendblock" "$recnode$recblock"
-
- # Draw the arrow images.
- drawImages
- end_row
- fi
- done
- end_table
-
- footer
-) > $signal_log_file.html
-
-exit 0
diff --git a/storage/ndb/home/bin/stripcr b/storage/ndb/home/bin/stripcr
deleted file mode 100755
index 540418f88cf..00000000000
--- a/storage/ndb/home/bin/stripcr
+++ /dev/null
@@ -1,90 +0,0 @@
-#!/bin/sh
-
-
-# NAME
-# stripcr - a program for removing carriage return chars from dos-files.
-#
-# SYNOPSIS
-# stripcr [file...]
-#
-# DESCRIPTION
-# stripcr deletes all CR characters from the given files.
-# The files are edited in place.
-# If no files are given, stdin and stdout are used instead.
-#
-# OPTIONS
-# -s extension Make a copy of the original of each file, and
-# give it the given extension (.bak, .orig, -bak, ...).
-#
-# EXAMPLES
-# stripcr file.txt innerloop.cc
-# stripcr -i.bak *.cc
-#
-# ENVIRONMENT
-# NDB_PROJ_HOME Home dir for ndb
-#
-# FILES
-# $NDB_PROJ_HOME/lib/funcs.sh Some userful functions for safe execution
-# of commands, printing, and tracing.
-#
-# VERSION
-# 1.0
-#
-# AUTHOR
-# Jonas Mölsä
-#
-
-
-progname=`basename $0`
-synopsis="stripcr [-s extension] [file...]"
-
-
-: ${NDB_PROJ_HOME:?} # If undefined, exit with error message
-
-: ${STRIPCR_OPTIONS:=--} # If undefined, set to --, to keep getopts happy.
- # You may have to experiment, to get quoting right.
-
-. $NDB_PROJ_HOME/lib/funcs.sh
-
-
-# defaults for options related variables
-#
-extension=
-options="$STRIPCR_OPTIONS"
-
-# used if error when parsing the options environment variable
-#
-env_opterr="options environment variable: <<$options>>"
-
-
-
-# We want to be able to set options in an environment variable,
-# as well as on the command line. In order not to have to repeat
-# the same getopts information twice, we loop two times over the
-# getopts while loop. The first time, we process options from
-# the options environment variable, the second time we process
-# options from the command line.
-#
-# The things to change are the actual options and what they do.
-#
-#
-for optstring in "$options" "" # 1. options variable 2. cmd line
-do
- while getopts s: i $optstring # optstring empty => no arg => cmd line
- do
- case $i in
-
- s) extension="$OPTARG";;
- \?) syndie $env_opterr;; # print synopsis and exit
-
- esac
- done
-
- [ -n "$optstring" ] && OPTIND=1 # Reset for round 2, cmd line options
-
- env_opterr= # Round 2 should not use the value
-done
-shift `expr $OPTIND - 1`
-
-
-safe perl -i$extension -lpe 'tr/\r//d' $*
diff --git a/storage/ndb/home/lib/funcs.sh b/storage/ndb/home/lib/funcs.sh
deleted file mode 100644
index b7d8914035e..00000000000
--- a/storage/ndb/home/lib/funcs.sh
+++ /dev/null
@@ -1,294 +0,0 @@
-# NAME
-# safe, safe_eval, die, rawdie, syndie, msg, errmsg,
-# rawmsg, rawerrmsg, trace, errtrace, is_wordmatch
-# - functions for safe execution and convenient printing and tracing
-#
-# abspath - make a path absolute
-#
-# SYNOPSIS
-# . funcs.sh
-#
-# is_wordmatch requires perl.
-#
-# DESCRIPTION
-# Funcs.sh is a collection of somewhat related functions.
-# The main categories and their respective functions are:
-# Controlled execution - safe, safe_eval
-# Exiting with a message - die, rawdie, syndie
-# Printing messages - msg, errmsg, rawmsg, rawerrmsg
-# Tracing - trace, errtrace
-# Pattern matching - is_wordmatch
-#
-#
-# ENVIRONMENT
-# These variables are not exported, but they are still visible
-# to, and used by, these functions.
-#
-# progname basename of $0
-# verbose empty or non-emtpy, used for tracing
-# synopsis string describing the syntax of $progname
-#
-# VERSION
-# 2.0
-#
-# AUTHOR
-# Jonas Mvlsd
-# Jonas Oreland - added abspath
-
-
-
-
-
-# Safely executes the given command and exits
-# with the given commands exit code if != 0,
-# else the return value ("the functions exit
-# code") is 0. Eg: safely cd $install_dir
-#
-safely ()
-{
- "$@"
- safely_code__=$?
- [ $safely_code__ -ne 0 ] &&
- { errmsg "Command failed: $@. Exit code: $safely_code__.";
- exit $safely_code__; }
-
- : # return "exit code" 0 from function
-}
-
-
-
-
-# Safely_eval executes "eval command" and exits
-# with the given commands exit code if != 0,
-# else the return value (the functions "exit
-# code") is 0.
-#
-# Safely_eval is just like like safely, but safely_eval does
-# "eval command" instead of just "command"
-#
-# Safely_eval even works with pipes etc., but you have to quote
-# the special characters. Eg: safely_eval ls \| wc \> tst.txt 2\>\&1
-#
-#
-safely_eval ()
-{
- eval "$@"
- safely_eval_code__=$?
- [ $safely_eval_code__ -ne 0 ] &&
- { errmsg "Command failed: $@. Exit code: $safely_eval_code__.";
- exit $safely_eval_code__; }
-
- : # return "exit code" 0 from function
-}
-
-
-
-
-
-
-#
-# safe and safe_eval are deprecated, use safely and safely_eval instead
-#
-
-# Safe executes the given command and exits
-# with the given commands exit code if != 0,
-# else the return value ("the functions exit
-# code") is 0.
-#
-safe ()
-{
- "$@"
- safe_code__=$?
- [ $safe_code__ -ne 0 ] &&
- { errmsg "Command failed: $@. Exit code: $safe_code__.";
- exit $safe_code__; }
-
- : # return "exit code" 0 from function
-}
-
-
-
-
-# Safe_eval executes "eval command" and exits
-# with the given commands exit code if != 0,
-# else the return value (the functions "exit
-# code") is 0.
-#
-# Safe_eval is just like like safe, but safe_eval does
-# "eval command" instead of just "command"
-#
-# Safe_eval even works with pipes etc., but you have to quote
-# the special characters. Eg: safe_eval ls \| wc \> tst.txt 2\>\&1
-#
-#
-safe_eval ()
-{
- eval "$@"
- safe_eval_code__=$?
- [ $safe_eval_code__ -ne 0 ] &&
- { errmsg "Command failed: $@. Exit code: $safe_eval_code__.";
- exit $safe_eval_code__; }
-
- : # return "exit code" 0 from function
-}
-
-
-
-
-
-
-# die prints the supplied message to stderr,
-# prefixed with the program name, and exits
-# with the exit code given by "-e num" or
-# 1, if no -e option is present.
-#
-die ()
-{
- die_code__=1
- [ "X$1" = X-e ] && { die_code__=$2; shift 2; }
- [ "X$1" = X-- ] && shift
- errmsg "$@"
- exit $die_code__
-}
-
-
-
-# rawdie prints the supplied message to stderr.
-# It then exits with the exit code given with "-e num"
-# or 1, if no -e option is present.
-#
-rawdie ()
-{
- rawdie_code__=1
- [ "X$1" = X-e ] && { rawdie_code__=$2; shift 2; }
- [ "X$1" = X-- ] && shift
- rawerrmsg "$@"
- exit $rawdie_code__
-}
-
-
-
-
-# Syndie prints the supplied message (if present) to stderr,
-# prefixed with the program name, on the first line.
-# On the second line, it prints $synopsis.
-# It then exits with the exit code given with "-e num"
-# or 1, if no -e option is present.
-#
-syndie ()
-{
- syndie_code__=1
- [ "X$1" = X-e ] && { syndie_code__=$2; shift 2; }
- [ "X$1" = X-- ] && shift
- [ -n "$*" ] && msg "$*"
- rawdie -e $syndie_code__ "Synopsis: $synopsis"
-}
-
-
-
-
-# msg prints the supplied message to stdout,
-# prefixed with the program name.
-#
-msg ()
-{
- echo "${progname:-<no program name set>}:" "$@"
-}
-
-
-
-# msg prints the supplied message to stderr,
-# prefixed with the program name.
-#
-errmsg ()
-{
- echo "${progname:-<no program name set>}:" "$@" >&2
-}
-
-
-
-rawmsg () { echo "$*"; } # print the supplied message to stdout
-rawerrmsg () { echo "$*" >&2; } # print the supplied message to stderr
-
-
-
-# trace prints the supplied message to stdout if verbose is non-null
-#
-trace ()
-{
- [ -n "$verbose" ] && msg "$@"
-}
-
-
-# errtrace prints the supplied message to stderr if verbose is non-null
-#
-errtrace ()
-{
- [ -n "$verbose" ] && msg "$@" >&2
-}
-
-
-
-# SYNTAX
-# is_wordmatch candidatelist wordlist
-#
-# DESCRIPTION
-# is_wordmatch returns true if any of the words (candidates)
-# in candidatelist is present in wordlist, otherwise it
-# returns false.
-#
-# EXAMPLES
-# is_wordmatch "tuareg nixdorf low content" "xx yy zz low fgj turn roff sd"
-# returns true, since "low" in candidatelist is present in wordlist.
-#
-# is_wordmatch "tuareg nixdorf low content" "xx yy zz slow fgj turn roff sd"
-# returns false, since none of the words in candidatelist occurs in wordlist.
-#
-# is_wordmatch "tuareg nixdorf low content" "xx yy zz low fgj tuareg roff"
-# returns true, since "low" and "tuareg" in candidatelist occurs in wordlist.
-#
-is_wordmatch ()
-{
- is_wordmatch_pattern__=`echo $1 |
- sed 's/^/\\\\b/;
- s/[ ][ ]*/\\\\b|\\\\b/g;
- s/$/\\\\b/;'`
- shift
- echo "$*" |
- perl -lne "m/$is_wordmatch_pattern__/ || exit 1"
-}
-
-#
-# abspath
-#
-# Stolen from http://oase-shareware.org/shell/shelltips/script_programmer.html
-#
-abspath()
-{
- __abspath_D=`dirname "$1"`
- __abspath_B=`basename "$1"`
- echo "`cd \"$__abspath_D\" 2>/dev/null && pwd || echo \"$__abspath_D\"`/$__abspath_B"
-}
-
-#
-#
-# NdbExit
-#
-#
-NdbExit()
-{
- echo "NdbExit: $1"
- exit $1
-}
-
-NdbGetExitCode()
-{
- __res__=`echo $* | awk '{if($1=="NdbExit:") print $2;}'`
- if [ -n $__res__ ]
- then
- echo $__res__
- else
- echo 255
- fi
-}
-
diff --git a/storage/ndb/include/kernel/signaldata/DropFilegroup.hpp b/storage/ndb/include/kernel/signaldata/DropFilegroup.hpp
index 7cf275b1f9e..c1dbc95380c 100644
--- a/storage/ndb/include/kernel/signaldata/DropFilegroup.hpp
+++ b/storage/ndb/include/kernel/signaldata/DropFilegroup.hpp
@@ -150,6 +150,7 @@ struct DropFileRef {
enum ErrorCode {
NoError = 0,
Busy = 701,
+ NotMaster = 702,
NoSuchFile = 766,
DropUndoFileNotSupported = 769,
InvalidSchemaObjectVersion = 774
diff --git a/storage/ndb/include/kernel/signaldata/TupFrag.hpp b/storage/ndb/include/kernel/signaldata/TupFrag.hpp
index 53581dec56d..d8f2139de61 100644
--- a/storage/ndb/include/kernel/signaldata/TupFrag.hpp
+++ b/storage/ndb/include/kernel/signaldata/TupFrag.hpp
@@ -147,7 +147,8 @@ public:
enum ErrorCode {
NoError = 0,
InvalidCharset = 743,
- TooManyBitsUsed = 831
+ TooManyBitsUsed = 831,
+ UnsupportedType = 906
};
private:
Uint32 userPtr;
diff --git a/storage/ndb/include/ndbapi/NdbDictionary.hpp b/storage/ndb/include/ndbapi/NdbDictionary.hpp
index 28e238d6049..b31b35cba89 100644
--- a/storage/ndb/include/ndbapi/NdbDictionary.hpp
+++ b/storage/ndb/include/ndbapi/NdbDictionary.hpp
@@ -1745,11 +1745,15 @@ public:
const char * tableName);
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
+ void removeCachedTable(const Table *table);
+ void removeCachedIndex(const Index *index);
+ void invalidateTable(const Table *table);
/**
* Invalidate cached index object
*/
void invalidateIndex(const char * indexName,
const char * tableName);
+ void invalidateIndex(const Index *index);
/**
* Force gcp and wait for gcp complete
*/
diff --git a/storage/ndb/include/util/NdbSqlUtil.hpp b/storage/ndb/include/util/NdbSqlUtil.hpp
index 3e98dcd1805..36a75136c45 100644
--- a/storage/ndb/include/util/NdbSqlUtil.hpp
+++ b/storage/ndb/include/util/NdbSqlUtil.hpp
@@ -117,9 +117,9 @@ public:
/**
* Check character set.
*/
- static bool usable_in_pk(Uint32 typeId, const void* info);
- static bool usable_in_hash_index(Uint32 typeId, const void* info);
- static bool usable_in_ordered_index(Uint32 typeId, const void* info);
+ static uint check_column_for_pk(Uint32 typeId, const void* info);
+ static uint check_column_for_hash_index(Uint32 typeId, const void* info);
+ static uint check_column_for_ordered_index(Uint32 typeId, const void* info);
/**
* Get number of length bytes and length from variable length string.
diff --git a/storage/ndb/include/util/ndb_opts.h b/storage/ndb/include/util/ndb_opts.h
index 787c32f06fd..08ab4a2e9df 100644
--- a/storage/ndb/include/util/ndb_opts.h
+++ b/storage/ndb/include/util/ndb_opts.h
@@ -84,7 +84,10 @@ const char *opt_debug= 0;
0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },\
{ "core-file", OPT_WANT_CORE, "Write core on errors.",\
(gptr*) &opt_core, (gptr*) &opt_core, 0,\
- GET_BOOL, NO_ARG, OPT_WANT_CORE_DEFAULT, 0, 0, 0, 0, 0}
+ GET_BOOL, NO_ARG, OPT_WANT_CORE_DEFAULT, 0, 0, 0, 0, 0},\
+ {"character-sets-dir", OPT_CHARSETS_DIR,\
+ "Directory where character sets are.", (gptr*) &charsets_dir,\
+ (gptr*) &charsets_dir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}\
#ifndef DBUG_OFF
#define NDB_STD_OPTS(prog_name) \
@@ -111,6 +114,7 @@ enum ndb_std_options {
OPT_WANT_CORE,
OPT_NDB_MGMD,
OPT_NDB_NODEID,
+ OPT_CHARSETS_DIR,
NDB_STD_OPTIONS_LAST /* should always be last in this enum */
};
diff --git a/storage/ndb/src/common/util/NdbSqlUtil.cpp b/storage/ndb/src/common/util/NdbSqlUtil.cpp
index 09e150dbacf..f2506eda6d4 100644
--- a/storage/ndb/src/common/util/NdbSqlUtil.cpp
+++ b/storage/ndb/src/common/util/NdbSqlUtil.cpp
@@ -872,8 +872,8 @@ NdbSqlUtil::likeLongvarbinary(const void* info, const void* p1, unsigned n1, con
// check charset
-bool
-NdbSqlUtil::usable_in_pk(Uint32 typeId, const void* info)
+uint
+NdbSqlUtil::check_column_for_pk(Uint32 typeId, const void* info)
{
const Type& type = getType(typeId);
switch (type.m_typeId) {
@@ -882,12 +882,14 @@ NdbSqlUtil::usable_in_pk(Uint32 typeId, const void* info)
case Type::Longvarchar:
{
const CHARSET_INFO *cs = (const CHARSET_INFO*)info;
- return
- cs != 0 &&
- cs->cset != 0 &&
- cs->coll != 0 &&
- cs->coll->strnxfrm != 0 &&
- cs->strxfrm_multiply <= MAX_XFRM_MULTIPLY;
+ if(cs != 0 &&
+ cs->cset != 0 &&
+ cs->coll != 0 &&
+ cs->coll->strnxfrm != 0 &&
+ cs->strxfrm_multiply <= MAX_XFRM_MULTIPLY)
+ return 0;
+ else
+ return 743;
}
break;
case Type::Undefined:
@@ -896,19 +898,19 @@ NdbSqlUtil::usable_in_pk(Uint32 typeId, const void* info)
case Type::Bit:
break;
default:
- return true;
+ return 0;
}
- return false;
+ return 906;
}
-bool
-NdbSqlUtil::usable_in_hash_index(Uint32 typeId, const void* info)
+uint
+NdbSqlUtil::check_column_for_hash_index(Uint32 typeId, const void* info)
{
- return usable_in_pk(typeId, info);
+ return check_column_for_pk(typeId, info);
}
-bool
-NdbSqlUtil::usable_in_ordered_index(Uint32 typeId, const void* info)
+uint
+NdbSqlUtil::check_column_for_ordered_index(Uint32 typeId, const void* info)
{
const Type& type = getType(typeId);
if (type.m_cmp == NULL)
@@ -919,13 +921,15 @@ NdbSqlUtil::usable_in_ordered_index(Uint32 typeId, const void* info)
case Type::Longvarchar:
{
const CHARSET_INFO *cs = (const CHARSET_INFO*)info;
- return
- cs != 0 &&
- cs->cset != 0 &&
- cs->coll != 0 &&
- cs->coll->strnxfrm != 0 &&
- cs->coll->strnncollsp != 0 &&
- cs->strxfrm_multiply <= MAX_XFRM_MULTIPLY;
+ if (cs != 0 &&
+ cs->cset != 0 &&
+ cs->coll != 0 &&
+ cs->coll->strnxfrm != 0 &&
+ cs->coll->strnncollsp != 0 &&
+ cs->strxfrm_multiply <= MAX_XFRM_MULTIPLY)
+ return 0;
+ else
+ return 743;
}
break;
case Type::Undefined:
@@ -934,9 +938,9 @@ NdbSqlUtil::usable_in_ordered_index(Uint32 typeId, const void* info)
case Type::Bit: // can be fixed
break;
default:
- return true;
+ return 0;
}
- return false;
+ return 906;
}
// utilities
diff --git a/storage/ndb/src/kernel/blocks/backup/BackupFormat.hpp b/storage/ndb/src/kernel/blocks/backup/BackupFormat.hpp
index 92680a5b6c9..9fa5800c120 100644
--- a/storage/ndb/src/kernel/blocks/backup/BackupFormat.hpp
+++ b/storage/ndb/src/kernel/blocks/backup/BackupFormat.hpp
@@ -144,6 +144,17 @@ struct BackupFormat {
// If TriggerEvent & 0x10000 == true then GCI is right after data
Uint32 TriggerEvent;
Uint32 FragId;
+ Uint32 Data[1]; // Len = Length - 3
+ };
+
+ /**
+ * Log Entry pre NDBD_FRAGID_VERSION
+ */
+ struct LogEntry_no_fragid {
+ Uint32 Length;
+ Uint32 TableId;
+ // If TriggerEvent & 0x10000 == true then GCI is right after data
+ Uint32 TriggerEvent;
Uint32 Data[1]; // Len = Length - 2
};
};
diff --git a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
index 87bd1d7c53b..57aa9890f24 100644
--- a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
+++ b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
@@ -13396,6 +13396,24 @@ Dbdict::execCREATE_FILE_REQ(Signal* signal){
Uint32 requestInfo = req->requestInfo;
do {
+ if(getOwnNodeId() != c_masterNodeId){
+ jam();
+ ref->errorCode = CreateFileRef::NotMaster;
+ ref->status = 0;
+ ref->errorKey = 0;
+ ref->errorLine = __LINE__;
+ break;
+ }
+
+ if (c_blockState != BS_IDLE){
+ jam();
+ ref->errorCode = CreateFileRef::Busy;
+ ref->status = 0;
+ ref->errorKey = 0;
+ ref->errorLine = __LINE__;
+ break;
+ }
+
Ptr<SchemaTransaction> trans_ptr;
if (! c_Trans.seize(trans_ptr)){
ref->errorCode = CreateFileRef::Busy;
@@ -13455,6 +13473,9 @@ Dbdict::execCREATE_FILE_REQ(Signal* signal){
tmp.init<CreateObjRef>(rg, GSN_CREATE_OBJ_REF, trans_key);
sendSignal(rg, GSN_CREATE_OBJ_REQ, signal,
CreateObjReq::SignalLength, JBB);
+
+ c_blockState = BS_CREATE_TAB;
+
return;
} while(0);
@@ -13480,15 +13501,6 @@ Dbdict::execCREATE_FILEGROUP_REQ(Signal* signal){
Uint32 type = req->objType;
do {
- Ptr<SchemaTransaction> trans_ptr;
- if (! c_Trans.seize(trans_ptr)){
- ref->errorCode = CreateFilegroupRef::Busy;
- ref->status = 0;
- ref->errorKey = 0;
- ref->errorLine = __LINE__;
- break;
- }
-
if(getOwnNodeId() != c_masterNodeId){
jam();
ref->errorCode = CreateFilegroupRef::NotMaster;
@@ -13506,6 +13518,15 @@ Dbdict::execCREATE_FILEGROUP_REQ(Signal* signal){
ref->errorLine = __LINE__;
break;
}
+
+ Ptr<SchemaTransaction> trans_ptr;
+ if (! c_Trans.seize(trans_ptr)){
+ ref->errorCode = CreateFilegroupRef::Busy;
+ ref->status = 0;
+ ref->errorKey = 0;
+ ref->errorLine = __LINE__;
+ break;
+ }
const Uint32 trans_key = ++c_opRecordSequence;
trans_ptr.p->key = trans_key;
@@ -13554,6 +13575,9 @@ Dbdict::execCREATE_FILEGROUP_REQ(Signal* signal){
tmp.init<CreateObjRef>(rg, GSN_CREATE_OBJ_REF, trans_key);
sendSignal(rg, GSN_CREATE_OBJ_REQ, signal,
CreateObjReq::SignalLength, JBB);
+
+ c_blockState = BS_CREATE_TAB;
+
return;
} while(0);
@@ -13581,6 +13605,22 @@ Dbdict::execDROP_FILE_REQ(Signal* signal)
Uint32 version = req->file_version;
do {
+ if(getOwnNodeId() != c_masterNodeId){
+ jam();
+ ref->errorCode = DropFileRef::NotMaster;
+ ref->errorKey = 0;
+ ref->errorLine = __LINE__;
+ break;
+ }
+
+ if (c_blockState != BS_IDLE){
+ jam();
+ ref->errorCode = DropFileRef::Busy;
+ ref->errorKey = 0;
+ ref->errorLine = __LINE__;
+ break;
+ }
+
Ptr<File> file_ptr;
if (!c_file_hash.find(file_ptr, objId))
{
@@ -13636,6 +13676,9 @@ Dbdict::execDROP_FILE_REQ(Signal* signal)
tmp.init<CreateObjRef>(rg, GSN_DROP_OBJ_REF, trans_key);
sendSignal(rg, GSN_DROP_OBJ_REQ, signal,
DropObjReq::SignalLength, JBB);
+
+ c_blockState = BS_CREATE_TAB;
+
return;
} while(0);
@@ -13663,6 +13706,22 @@ Dbdict::execDROP_FILEGROUP_REQ(Signal* signal)
Uint32 version = req->filegroup_version;
do {
+ if(getOwnNodeId() != c_masterNodeId){
+ jam();
+ ref->errorCode = DropFilegroupRef::NotMaster;
+ ref->errorKey = 0;
+ ref->errorLine = __LINE__;
+ break;
+ }
+
+ if (c_blockState != BS_IDLE){
+ jam();
+ ref->errorCode = DropFilegroupRef::Busy;
+ ref->errorKey = 0;
+ ref->errorLine = __LINE__;
+ break;
+ }
+
Ptr<Filegroup> filegroup_ptr;
if (!c_filegroup_hash.find(filegroup_ptr, objId))
{
@@ -13718,6 +13777,9 @@ Dbdict::execDROP_FILEGROUP_REQ(Signal* signal)
tmp.init<CreateObjRef>(rg, GSN_DROP_OBJ_REF, trans_key);
sendSignal(rg, GSN_DROP_OBJ_REQ, signal,
DropObjReq::SignalLength, JBB);
+
+ c_blockState = BS_CREATE_TAB;
+
return;
} while(0);
@@ -13892,6 +13954,7 @@ Dbdict::trans_commit_complete_done(Signal* signal,
//@todo check api failed
sendSignal(trans_ptr.p->m_senderRef, GSN_CREATE_FILEGROUP_CONF, signal,
CreateFilegroupConf::SignalLength, JBB);
+
break;
}
case GSN_CREATE_FILE_REQ:{
@@ -13935,6 +13998,7 @@ Dbdict::trans_commit_complete_done(Signal* signal,
}
c_Trans.release(trans_ptr);
+ ndbrequire(c_blockState == BS_CREATE_TAB);
c_blockState = BS_IDLE;
return;
}
@@ -14047,6 +14111,7 @@ Dbdict::trans_abort_complete_done(Signal* signal,
}
c_Trans.release(trans_ptr);
+ ndbrequire(c_blockState == BS_CREATE_TAB);
c_blockState = BS_IDLE;
return;
}
diff --git a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
index 69db36c8517..2b452e9529b 100644
--- a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
+++ b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
@@ -5016,12 +5016,13 @@ void Dblqh::packLqhkeyreqLab(Signal* signal)
Uint32 nextNodeId = regTcPtr->nextReplica;
Uint32 nextVersion = getNodeInfo(nextNodeId).m_version;
+ UintR TAiLen = regTcPtr->reclenAiLqhkey;
UintR TapplAddressIndicator = (regTcPtr->nextSeqNoReplica == 0 ? 0 : 1);
LqhKeyReq::setApplicationAddressFlag(Treqinfo, TapplAddressIndicator);
LqhKeyReq::setInterpretedFlag(Treqinfo, regTcPtr->opExec);
LqhKeyReq::setSeqNoReplica(Treqinfo, regTcPtr->nextSeqNoReplica);
- LqhKeyReq::setAIInLqhKeyReq(Treqinfo, regTcPtr->reclenAiLqhkey);
+ LqhKeyReq::setAIInLqhKeyReq(Treqinfo, TAiLen);
if (unlikely(nextVersion < NDBD_ROWID_VERSION))
{
@@ -5124,22 +5125,32 @@ void Dblqh::packLqhkeyreqLab(Signal* signal)
lqhKeyReq->variableData[nextPos + 0] = sig0;
nextPos += LqhKeyReq::getGCIFlag(Treqinfo);
- sig0 = regTcPtr->firstAttrinfo[0];
- sig1 = regTcPtr->firstAttrinfo[1];
- sig2 = regTcPtr->firstAttrinfo[2];
- sig3 = regTcPtr->firstAttrinfo[3];
- sig4 = regTcPtr->firstAttrinfo[4];
- UintR TAiLen = regTcPtr->reclenAiLqhkey;
BlockReference lqhRef = calcLqhBlockRef(regTcPtr->nextReplica);
+
+ if (likely(nextPos + TAiLen + LqhKeyReq::FixedSignalLength <= 25))
+ {
+ jam();
+ sig0 = regTcPtr->firstAttrinfo[0];
+ sig1 = regTcPtr->firstAttrinfo[1];
+ sig2 = regTcPtr->firstAttrinfo[2];
+ sig3 = regTcPtr->firstAttrinfo[3];
+ sig4 = regTcPtr->firstAttrinfo[4];
- lqhKeyReq->variableData[nextPos] = sig0;
- lqhKeyReq->variableData[nextPos + 1] = sig1;
- lqhKeyReq->variableData[nextPos + 2] = sig2;
- lqhKeyReq->variableData[nextPos + 3] = sig3;
- lqhKeyReq->variableData[nextPos + 4] = sig4;
-
- nextPos += TAiLen;
-
+ lqhKeyReq->variableData[nextPos] = sig0;
+ lqhKeyReq->variableData[nextPos + 1] = sig1;
+ lqhKeyReq->variableData[nextPos + 2] = sig2;
+ lqhKeyReq->variableData[nextPos + 3] = sig3;
+ lqhKeyReq->variableData[nextPos + 4] = sig4;
+
+ nextPos += TAiLen;
+ TAiLen = 0;
+ }
+ else
+ {
+ Treqinfo &= ~(Uint32)(RI_AI_IN_THIS_MASK << RI_AI_IN_THIS_SHIFT);
+ lqhKeyReq->requestInfo = Treqinfo;
+ }
+
sendSignal(lqhRef, GSN_LQHKEYREQ, signal,
nextPos + LqhKeyReq::FixedSignalLength, JBB);
if (regTcPtr->primKeyLen > 4) {
@@ -5165,6 +5176,17 @@ void Dblqh::packLqhkeyreqLab(Signal* signal)
signal->theData[0] = sig0;
signal->theData[1] = sig1;
signal->theData[2] = sig2;
+
+ if (unlikely(nextPos + TAiLen + LqhKeyReq::FixedSignalLength > 25))
+ {
+ jam();
+ /**
+ * 4 replicas...
+ */
+ memcpy(signal->theData+3, regTcPtr->firstAttrinfo, TAiLen << 2);
+ sendSignal(lqhRef, GSN_ATTRINFO, signal, 3 + TAiLen, JBB);
+ }
+
AttrbufPtr regAttrinbufptr;
regAttrinbufptr.i = regTcPtr->firstAttrinbuf;
while (regAttrinbufptr.i != RNIL) {
diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupAbort.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupAbort.cpp
index 4e507d1b690..273ccb9e1e6 100644
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupAbort.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupAbort.cpp
@@ -170,7 +170,6 @@ void Dbtup::execTUP_ABORTREQ(Signal* signal)
/**
* Aborting last operation that performed ALLOC
*/
- ndbout_c("clearing ALLOC");
tuple_ptr->m_header_bits &= ~(Uint32)Tuple_header::ALLOC;
tuple_ptr->m_header_bits |= Tuple_header::FREED;
}
diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp
index 782679eac18..fc3419e694a 100644
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp
@@ -473,13 +473,16 @@ void Dbtup::execTUP_COMMITREQ(Signal* signal)
ptrCheckGuard(regTabPtr, no_of_tablerec, tablerec);
PagePtr page;
- Tuple_header* tuple_ptr= 0;
+ Tuple_header* tuple_ptr= (Tuple_header*)
+ get_ptr(&page, &regOperPtr.p->m_tuple_location, regTabPtr.p);
+
+ bool get_page = false;
if(regOperPtr.p->op_struct.m_load_diskpage_on_commit)
{
+ Page_cache_client::Request req;
ndbassert(regOperPtr.p->is_first_operation() &&
regOperPtr.p->is_last_operation());
- Page_cache_client::Request req;
/**
* Check for page
*/
@@ -490,15 +493,33 @@ void Dbtup::execTUP_COMMITREQ(Signal* signal)
memcpy(&req.m_page,
tmp->get_disk_ref_ptr(regTabPtr.p), sizeof(Local_key));
+
+ if (unlikely(regOperPtr.p->op_struct.op_type == ZDELETE &&
+ tmp->m_header_bits & Tuple_header::DISK_ALLOC))
+ {
+ jam();
+ /**
+ * Insert+Delete
+ */
+ regOperPtr.p->op_struct.m_load_diskpage_on_commit = 0;
+ regOperPtr.p->op_struct.m_wait_log_buffer = 0;
+ disk_page_abort_prealloc(signal, regFragPtr.p,
+ &req.m_page, req.m_page.m_page_idx);
+
+ c_lgman->free_log_space(regFragPtr.p->m_logfile_group_id,
+ regOperPtr.p->m_undo_buffer_space);
+ ndbout_c("insert+delete");
+ goto skip_disk;
+ }
}
else
{
// initial delete
ndbassert(regOperPtr.p->op_struct.op_type == ZDELETE);
- tuple_ptr= (Tuple_header*)
- get_ptr(&page, &regOperPtr.p->m_tuple_location, regTabPtr.p);
memcpy(&req.m_page,
tuple_ptr->get_disk_ref_ptr(regTabPtr.p), sizeof(Local_key));
+
+ ndbassert(tuple_ptr->m_header_bits & Tuple_header::DISK_PART);
}
req.m_callback.m_callbackData= regOperPtr.i;
req.m_callback.m_callbackFunction =
@@ -522,6 +543,7 @@ void Dbtup::execTUP_COMMITREQ(Signal* signal)
ndbrequire("NOT YET IMPLEMENTED" == 0);
break;
}
+ get_page = true;
disk_page_set_dirty(*(Ptr<Page>*)&m_pgman.m_ptr);
regOperPtr.p->m_commit_disk_callback_page= res;
regOperPtr.p->op_struct.m_load_diskpage_on_commit= 0;
@@ -555,6 +577,7 @@ void Dbtup::execTUP_COMMITREQ(Signal* signal)
tuple_ptr = (Tuple_header*)
get_ptr(&page, &regOperPtr.p->m_tuple_location,regTabPtr.p);
}
+skip_disk:
req_struct.m_tuple_ptr = tuple_ptr;
if(get_tuple_state(regOperPtr.p) == TUPLE_PREPARED)
@@ -599,6 +622,8 @@ void Dbtup::execTUP_COMMITREQ(Signal* signal)
else
{
removeActiveOpList(regOperPtr.p, tuple_ptr);
+ if (get_page)
+ ndbassert(tuple_ptr->m_header_bits & Tuple_header::DISK_PART);
dealloc_tuple(signal, gci, page.p, tuple_ptr,
regOperPtr.p, regFragPtr.p, regTabPtr.p);
}
diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupDiskAlloc.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupDiskAlloc.cpp
index d74e4b6811e..ec3231f55f5 100644
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupDiskAlloc.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupDiskAlloc.cpp
@@ -1053,6 +1053,7 @@ Dbtup::disk_page_abort_prealloc_callback(Signal* signal,
Ptr<Fragrecord> fragPtr;
getFragmentrec(fragPtr, pagePtr.p->m_fragment_id, tabPtr.p);
+ disk_page_set_dirty(pagePtr);
disk_page_abort_prealloc_callback_1(signal, fragPtr.p, pagePtr, sz);
}
@@ -1074,6 +1075,13 @@ Dbtup::disk_page_abort_prealloc_callback_1(Signal* signal,
ddassert(alloc.calc_page_free_bits(free - used) == old_idx);
Uint32 new_idx = alloc.calc_page_free_bits(free - used + sz);
+#ifdef VM_TRACE
+ Local_key key;
+ key.m_page_no = pagePtr.p->m_page_no;
+ key.m_file_no = pagePtr.p->m_file_no;
+ ndbout << "disk_page_abort_prealloc_callback_1" << key << endl;
+#endif
+
Ptr<Extent_info> extentPtr;
c_extent_pool.getPtr(extentPtr, ext);
if (old_idx != new_idx)
diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp
index 7305827b6ac..ff917c8482d 100644
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp
@@ -1382,8 +1382,9 @@ int Dbtup::handleInsertReq(Signal* signal,
regOperPtr.p->userpointer,
&regOperPtr.p->m_tuple_location);
- ((Tuple_header*)ptr)->m_operation_ptr_i= regOperPtr.i;
- ((Tuple_header*)ptr)->m_header_bits= Tuple_header::ALLOC |
+ base = (Tuple_header*)ptr;
+ base->m_operation_ptr_i= regOperPtr.i;
+ base->m_header_bits= Tuple_header::ALLOC |
(varsize ? Tuple_header::CHAINED_ROW : 0);
regOperPtr.p->m_tuple_location.m_page_no = real_page_id;
}
@@ -1407,6 +1408,8 @@ int Dbtup::handleInsertReq(Signal* signal,
}
req_struct->m_use_rowid = false;
base->m_header_bits &= ~(Uint32)Tuple_header::FREE;
+ base->m_header_bits |= Tuple_header::ALLOC &
+ (regOperPtr.p->is_first_operation() ? ~0 : 1);
}
else
{
@@ -1415,6 +1418,8 @@ int Dbtup::handleInsertReq(Signal* signal,
{
ndbout_c("no mem insert but rowid (same)");
base->m_header_bits &= ~(Uint32)Tuple_header::FREE;
+ base->m_header_bits |= Tuple_header::ALLOC &
+ (regOperPtr.p->is_first_operation() ? ~0 : 1);
}
else
{
@@ -1467,7 +1472,7 @@ int Dbtup::handleInsertReq(Signal* signal,
size_change_error:
jam();
terrorCode = ZMEM_NOMEM_ERROR;
- goto disk_prealloc_error;
+ goto exit_error;
undo_buffer_error:
jam();
@@ -1501,9 +1506,13 @@ update_error:
regOperPtr.p->op_struct.in_active_list = false;
regOperPtr.p->m_tuple_location.setNull();
}
-disk_prealloc_error:
+exit_error:
tupkeyErrorLab(signal);
return -1;
+
+disk_prealloc_error:
+ base->m_header_bits |= Tuple_header::FREED;
+ goto exit_error;
}
/* ---------------------------------------------------------------- */
diff --git a/storage/ndb/src/kernel/blocks/dbtux/DbtuxMeta.cpp b/storage/ndb/src/kernel/blocks/dbtux/DbtuxMeta.cpp
index 7703b3e6ab8..5911dead4a0 100644
--- a/storage/ndb/src/kernel/blocks/dbtux/DbtuxMeta.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtux/DbtuxMeta.cpp
@@ -215,11 +215,12 @@ Dbtux::execTUX_ADD_ATTRREQ(Signal* signal)
break;
}
if (descAttr.m_charset != 0) {
+ uint err;
CHARSET_INFO *cs = all_charsets[descAttr.m_charset];
ndbrequire(cs != 0);
- if (! NdbSqlUtil::usable_in_ordered_index(descAttr.m_typeId, cs)) {
+ if ((err = NdbSqlUtil::check_column_for_ordered_index(descAttr.m_typeId, cs))) {
jam();
- errorCode = TuxAddAttrRef::InvalidCharset;
+ errorCode = (TuxAddAttrRef::ErrorCode) err;
break;
}
}
diff --git a/storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp b/storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp
index e0324c2c8ea..b0a4d6264fb 100644
--- a/storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp
+++ b/storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp
@@ -312,11 +312,12 @@ void AsyncFile::openReq(Request* request)
Uint32 new_flags = 0;
// Convert file open flags from Solaris to Liux
- if(flags & FsOpenReq::OM_CREATE){
+ if (flags & FsOpenReq::OM_CREATE)
+ {
new_flags |= O_CREAT;
}
-
- if(flags & FsOpenReq::OM_TRUNCATE){
+
+ if (flags & FsOpenReq::OM_TRUNCATE){
#if 0
if(Global_unlinkO_CREAT){
unlink(theFileName.c_str());
@@ -330,25 +331,25 @@ void AsyncFile::openReq(Request* request)
m_syncFrequency = 1024*1024; // Hard coded to 1M
}
- if(flags & FsOpenReq::OM_APPEND){
+ if (flags & FsOpenReq::OM_APPEND){
new_flags |= O_APPEND;
}
- if((flags & FsOpenReq::OM_SYNC) && ! (flags & FsOpenReq::OM_INIT))
+ if ((flags & FsOpenReq::OM_SYNC) && ! (flags & FsOpenReq::OM_INIT))
{
#ifdef O_SYNC
new_flags |= O_SYNC;
#endif
}
-#ifndef NDB_NO_O_DIRECT /* to allow tmpfs */
+//#ifndef NDB_NO_O_DIRECT /* to allow tmpfs */
#ifdef O_DIRECT
if (flags & FsOpenReq::OM_DIRECT)
{
new_flags |= O_DIRECT;
}
#endif
-#endif
+//#endif
switch(flags & 0x3){
case FsOpenReq::OM_READONLY:
@@ -370,44 +371,73 @@ void AsyncFile::openReq(Request* request)
const int mode = S_IRUSR | S_IWUSR |
S_IRGRP | S_IWGRP |
S_IROTH | S_IWOTH;
- if(flags & FsOpenReq::OM_CREATE_IF_NONE){
- if((theFd = ::open(theFileName.c_str(), new_flags, mode)) != -1) {
+ if (flags & FsOpenReq::OM_CREATE_IF_NONE)
+ {
+ Uint32 tmp_flags = new_flags;
+#ifdef O_DIRECT
+ tmp_flags &= ~O_DIRECT;
+#endif
+ if ((theFd = ::open(theFileName.c_str(), tmp_flags, mode)) != -1)
+ {
close(theFd);
request->error = FsRef::fsErrFileExists;
return;
}
new_flags |= O_CREAT;
}
-
- if (-1 == (theFd = ::open(theFileName.c_str(), new_flags, mode))) {
+
+no_odirect:
+ if (-1 == (theFd = ::open(theFileName.c_str(), new_flags, mode)))
+ {
PRINT_ERRORANDFLAGS(new_flags);
- if( (errno == ENOENT ) && (new_flags & O_CREAT ) ) {
+ if ((errno == ENOENT) && (new_flags & O_CREAT))
+ {
createDirectories();
- if (-1 == (theFd = ::open(theFileName.c_str(), new_flags, mode))) {
+ if (-1 == (theFd = ::open(theFileName.c_str(), new_flags, mode)))
+ {
+#ifdef O_DIRECT
+ if (new_flags & O_DIRECT)
+ {
+ new_flags &= ~O_DIRECT;
+ goto no_odirect;
+ }
+#endif
PRINT_ERRORANDFLAGS(new_flags);
request->error = errno;
return;
}
- } else {
+ }
+#ifdef O_DIRECT
+ else if (new_flags & O_DIRECT)
+ {
+ new_flags &= ~O_DIRECT;
+ goto no_odirect;
+ }
+#endif
+ else
+ {
request->error = errno;
return;
}
}
- if(flags & FsOpenReq::OM_CHECK_SIZE)
+ if (flags & FsOpenReq::OM_CHECK_SIZE)
{
struct stat buf;
- if((fstat(theFd, &buf) == -1))
+ if ((fstat(theFd, &buf) == -1))
{
request->error = errno;
- } else if(buf.st_size != request->par.open.file_size){
+ }
+ else if(buf.st_size != request->par.open.file_size)
+ {
request->error = FsRef::fsErrInvalidFileSize;
}
- if(request->error)
+ if (request->error)
return;
}
-
- if(flags & FsOpenReq::OM_INIT){
+
+ if (flags & FsOpenReq::OM_INIT)
+ {
off_t off = 0;
const off_t sz = request->par.open.file_size;
Uint32 tmp[sizeof(SignalHeader)+25];
diff --git a/storage/ndb/src/ndbapi/NdbDictionary.cpp b/storage/ndb/src/ndbapi/NdbDictionary.cpp
index 0c9243887d0..b0ebf90915f 100644
--- a/storage/ndb/src/ndbapi/NdbDictionary.cpp
+++ b/storage/ndb/src/ndbapi/NdbDictionary.cpp
@@ -772,17 +772,17 @@ NdbDictionary::Index::getLogging() const {
NdbDictionary::Object::Status
NdbDictionary::Index::getObjectStatus() const {
- return m_impl.m_status;
+ return m_impl.m_table->m_status;
}
int
NdbDictionary::Index::getObjectVersion() const {
- return m_impl.m_version;
+ return m_impl.m_table->m_version;
}
int
NdbDictionary::Index::getObjectId() const {
- return m_impl.m_id;
+ return m_impl.m_table->m_id;
}
@@ -1396,12 +1396,24 @@ NdbDictionary::Dictionary::invalidateTable(const char * name){
}
void
+NdbDictionary::Dictionary::invalidateTable(const Table *table){
+ NdbTableImpl &t = NdbTableImpl::getImpl(*table);
+ m_impl.invalidateObject(t);
+}
+
+void
NdbDictionary::Dictionary::removeCachedTable(const char * name){
NdbTableImpl * t = m_impl.getTable(name);
if(t)
m_impl.removeCachedObject(* t);
}
+void
+NdbDictionary::Dictionary::removeCachedTable(const Table *table){
+ NdbTableImpl &t = NdbTableImpl::getImpl(*table);
+ m_impl.removeCachedObject(t);
+}
+
int
NdbDictionary::Dictionary::createIndex(const Index & ind)
{
@@ -1426,6 +1438,15 @@ NdbDictionary::Dictionary::getIndex(const char * indexName,
}
void
+NdbDictionary::Dictionary::invalidateIndex(const Index *index){
+ DBUG_ENTER("NdbDictionary::Dictionary::invalidateIndex");
+ NdbIndexImpl &i = NdbIndexImpl::getImpl(*index);
+ assert(i.m_table != 0);
+ m_impl.invalidateObject(* i.m_table);
+ DBUG_VOID_RETURN;
+}
+
+void
NdbDictionary::Dictionary::invalidateIndex(const char * indexName,
const char * tableName){
DBUG_ENTER("NdbDictionaryImpl::invalidateIndex");
@@ -1444,6 +1465,15 @@ NdbDictionary::Dictionary::forceGCPWait()
}
void
+NdbDictionary::Dictionary::removeCachedIndex(const Index *index){
+ DBUG_ENTER("NdbDictionary::Dictionary::removeCachedIndex");
+ NdbIndexImpl &i = NdbIndexImpl::getImpl(*index);
+ assert(i.m_table != 0);
+ m_impl.removeCachedObject(* i.m_table);
+ DBUG_VOID_RETURN;
+}
+
+void
NdbDictionary::Dictionary::removeCachedIndex(const char * indexName,
const char * tableName){
NdbIndexImpl * i = m_impl.getIndex(indexName, tableName);
diff --git a/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp b/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp
index 6bed6310052..4808d22b9e0 100644
--- a/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp
+++ b/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp
@@ -43,6 +43,7 @@
#include <my_sys.h>
#include <NdbEnv.h>
#include <NdbMem.h>
+#include <ndb_version.h>
#define DEBUG_PRINT 0
#define INCOMPATIBLE_VERSION -2
@@ -1963,7 +1964,8 @@ indexTypeMapping[] = {
int
NdbDictInterface::parseTableInfo(NdbTableImpl ** ret,
const Uint32 * data, Uint32 len,
- bool fullyQualifiedNames)
+ bool fullyQualifiedNames,
+ Uint32 version)
{
SimplePropertiesLinearReader it(data, len);
DictTabInfo::Table *tableDesc;
@@ -2142,7 +2144,14 @@ NdbDictInterface::parseTableInfo(NdbTableImpl ** ret,
* ret = impl;
NdbMem_Free((void*)tableDesc);
- DBUG_ASSERT(impl->m_fragmentCount > 0);
+ if (version < MAKE_VERSION(5,1,3))
+ {
+ ;
+ }
+ else
+ {
+ DBUG_ASSERT(impl->m_fragmentCount > 0);
+ }
DBUG_RETURN(0);
}
@@ -2292,7 +2301,7 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb,
NdbTableImpl & impl,
bool alter)
{
- unsigned i;
+ unsigned i, err;
char *ts_names[MAX_NDB_PARTITIONS];
DBUG_ENTER("NdbDictInterface::createOrAlterTable");
@@ -2593,8 +2602,10 @@ loop:
DBUG_RETURN(-1);
}
// primary key type check
- if (col->m_pk && ! NdbSqlUtil::usable_in_pk(col->m_type, col->m_cs)) {
- m_error.code= (col->m_cs != 0 ? 743 : 739);
+ if (col->m_pk &&
+ (err = NdbSqlUtil::check_column_for_pk(col->m_type, col->m_cs)))
+ {
+ m_error.code= err;
DBUG_RETURN(-1);
}
// distribution key not supported for Char attribute
@@ -3025,7 +3036,7 @@ NdbDictInterface::createIndex(Ndb & ndb,
{
//validate();
//aggregate();
- unsigned i;
+ unsigned i, err;
UtilBufferWriter w(m_buffer);
const size_t len = strlen(impl.m_externalName.c_str()) + 1;
if(len > MAX_TAB_NAME_SIZE) {
@@ -3074,10 +3085,12 @@ NdbDictInterface::createIndex(Ndb & ndb,
// index key type check
if (it == DictTabInfo::UniqueHashIndex &&
- ! NdbSqlUtil::usable_in_hash_index(col->m_type, col->m_cs) ||
+ (err = NdbSqlUtil::check_column_for_hash_index(col->m_type, col->m_cs))
+ ||
it == DictTabInfo::OrderedIndex &&
- ! NdbSqlUtil::usable_in_ordered_index(col->m_type, col->m_cs)) {
- m_error.code = 743;
+ (err = NdbSqlUtil::check_column_for_ordered_index(col->m_type, col->m_cs)))
+ {
+ m_error.code = err;
return -1;
}
// API uses external column number to talk to DICT
@@ -3840,9 +3853,10 @@ NdbDictionaryImpl::dropBlobEvents(const NdbEventImpl& evnt)
if (! c.getBlobType() || c.getPartSize() == 0)
continue;
n--;
- char bename[MAX_TAB_NAME_SIZE];
- NdbBlob::getBlobEventName(bename, &evnt, &c);
- (void)dropEvent(bename);
+ NdbEventImpl* blob_evnt = getBlobEvent(evnt, i);
+ if (blob_evnt == NULL)
+ continue;
+ (void)dropEvent(*blob_evnt);
}
} else {
// loop over MAX_ATTRIBUTES_IN_TABLE ...
@@ -4396,7 +4410,7 @@ NdbDictInterface::create_file(const NdbFileImpl & file,
ptr[0].p = (Uint32*)m_buffer.get_data();
ptr[0].sz = m_buffer.length() / 4;
- int err[] = { CreateFileRef::Busy, 0};
+ int err[] = { CreateFileRef::Busy, CreateFileRef::NotMaster, 0};
/*
Send signal without time-out since creating files can take a very long
time if the file is very big.
@@ -4440,7 +4454,7 @@ NdbDictInterface::drop_file(const NdbFileImpl & file){
req->file_id = file.m_id;
req->file_version = file.m_version;
- int err[] = { DropFileRef::Busy, 0};
+ int err[] = { DropFileRef::Busy, DropFileRef::NotMaster, 0};
DBUG_RETURN(dictSignal(&tSignal, 0, 0,
0, // master
WAIT_CREATE_INDX_REQ,
diff --git a/storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp b/storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp
index 01aa6b09c90..38033e7237b 100644
--- a/storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp
+++ b/storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp
@@ -454,7 +454,8 @@ public:
static int parseTableInfo(NdbTableImpl ** dst,
const Uint32 * data, Uint32 len,
- bool fullyQualifiedNames);
+ bool fullyQualifiedNames,
+ Uint32 version= 0xFFFFFFFF);
static int parseFileInfo(NdbFileImpl &dst,
const Uint32 * data, Uint32 len);
diff --git a/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp b/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp
index f282ab825f0..36037fba9ed 100644
--- a/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp
+++ b/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp
@@ -46,7 +46,7 @@
#include <EventLogger.hpp>
extern EventLogger g_eventLogger;
-static Gci_container g_empty_gci_container;
+static Gci_container_pod g_empty_gci_container;
static const Uint32 ACTIVE_GCI_DIRECTORY_SIZE = 4;
static const Uint32 ACTIVE_GCI_MASK = ACTIVE_GCI_DIRECTORY_SIZE - 1;
@@ -1224,11 +1224,21 @@ operator<<(NdbOut& out, const Gci_container& gci)
}
static
+NdbOut&
+operator<<(NdbOut& out, const Gci_container_pod& gci)
+{
+ Gci_container* ptr = (Gci_container*)&gci;
+ out << *ptr;
+ return out;
+}
+
+
+static
Gci_container*
-find_bucket_chained(Vector<Gci_container> * active, Uint64 gci)
+find_bucket_chained(Vector<Gci_container_pod> * active, Uint64 gci)
{
Uint32 pos = (gci & ACTIVE_GCI_MASK);
- Gci_container *bucket= active->getBase() + pos;
+ Gci_container *bucket= ((Gci_container*)active->getBase()) + pos;
if(gci > bucket->m_gci)
{
@@ -1237,8 +1247,9 @@ find_bucket_chained(Vector<Gci_container> * active, Uint64 gci)
do
{
active->fill(move_pos, g_empty_gci_container);
- bucket = active->getBase() + pos; // Needs to recomputed after fill
- move = active->getBase() + move_pos;
+ // Needs to recomputed after fill
+ bucket = ((Gci_container*)active->getBase()) + pos;
+ move = ((Gci_container*)active->getBase()) + move_pos;
if(move->m_gcp_complete_rep_count == 0)
{
memcpy(move, bucket, sizeof(Gci_container));
@@ -1269,16 +1280,33 @@ find_bucket_chained(Vector<Gci_container> * active, Uint64 gci)
inline
Gci_container*
-find_bucket(Vector<Gci_container> * active, Uint64 gci)
+find_bucket(Vector<Gci_container_pod> * active, Uint64 gci)
{
Uint32 pos = (gci & ACTIVE_GCI_MASK);
- Gci_container *bucket= active->getBase() + pos;
+ Gci_container *bucket= ((Gci_container*)active->getBase()) + pos;
if(likely(gci == bucket->m_gci))
return bucket;
return find_bucket_chained(active,gci);
}
+static
+void
+crash_on_invalid_SUB_GCP_COMPLETE_REP(const Gci_container* bucket,
+ const SubGcpCompleteRep * const rep,
+ Uint32 nodes)
+{
+ Uint32 old_cnt = bucket->m_gcp_complete_rep_count;
+
+ ndbout_c("INVALID SUB_GCP_COMPLETE_REP");
+ ndbout_c("gci: %d", rep->gci);
+ ndbout_c("sender: %x", rep->senderRef);
+ ndbout_c("count: %d", rep->gcp_complete_rep_count);
+ ndbout_c("bucket count: %u", old_cnt);
+ ndbout_c("nodes: %u", nodes);
+ abort();
+}
+
void
NdbEventBuffer::execSUB_GCP_COMPLETE_REP(const SubGcpCompleteRep * const rep)
{
@@ -1317,9 +1345,13 @@ NdbEventBuffer::execSUB_GCP_COMPLETE_REP(const SubGcpCompleteRep * const rep)
old_cnt = m_system_nodes;
}
- assert(old_cnt >= cnt);
+ //assert(old_cnt >= cnt);
+ if (unlikely(! (old_cnt >= cnt)))
+ {
+ crash_on_invalid_SUB_GCP_COMPLETE_REP(bucket, rep, m_system_nodes);
+ }
bucket->m_gcp_complete_rep_count = old_cnt - cnt;
-
+
if(old_cnt == cnt)
{
if(likely(gci == m_latestGCI + 1 || m_latestGCI == 0))
@@ -1349,7 +1381,8 @@ NdbEventBuffer::execSUB_GCP_COMPLETE_REP(const SubGcpCompleteRep * const rep)
{
/** out of order something */
ndbout_c("out of order bucket: %d gci: %lld m_latestGCI: %lld",
- bucket-m_active_gci.getBase(), gci, m_latestGCI);
+ bucket-(Gci_container*)m_active_gci.getBase(),
+ gci, m_latestGCI);
bucket->m_state = Gci_container::GC_COMPLETE;
bucket->m_gcp_complete_rep_count = 1; // Prevent from being reused
m_latest_complete_GCI = gci;
@@ -1366,7 +1399,7 @@ NdbEventBuffer::complete_outof_order_gcis()
Uint64 stop_gci = m_latest_complete_GCI;
const Uint32 size = m_active_gci.size();
- Gci_container* array= m_active_gci.getBase();
+ Gci_container* array= (Gci_container*)m_active_gci.getBase();
ndbout_c("complete_outof_order_gcis");
for(Uint32 i = 0; i<size; i++)
@@ -1469,7 +1502,7 @@ NdbEventBuffer::completeClusterFailed()
Uint32 sz= m_active_gci.size();
Uint64 gci= ~0;
Gci_container* bucket = 0;
- Gci_container* array = m_active_gci.getBase();
+ Gci_container* array = (Gci_container*)m_active_gci.getBase();
for(Uint32 i = 0; i<sz; i++)
{
if(array[i].m_gcp_complete_rep_count && array[i].m_gci < gci)
@@ -2517,5 +2550,5 @@ EventBufData_hash::search(Pos& hpos, NdbEventOperationImpl* op, LinearSectionPtr
DBUG_VOID_RETURN_EVENT;
}
-template class Vector<Gci_container>;
+template class Vector<Gci_container_pod>;
template class Vector<NdbEventBuffer::EventBufData_chunk*>;
diff --git a/storage/ndb/src/ndbapi/NdbEventOperationImpl.hpp b/storage/ndb/src/ndbapi/NdbEventOperationImpl.hpp
index bffc2174be5..8d413cc8d14 100644
--- a/storage/ndb/src/ndbapi/NdbEventOperationImpl.hpp
+++ b/storage/ndb/src/ndbapi/NdbEventOperationImpl.hpp
@@ -272,6 +272,11 @@ struct Gci_container
EventBufData_hash m_data_hash;
};
+struct Gci_container_pod
+{
+ char data[sizeof(Gci_container)];
+};
+
class NdbEventOperationImpl : public NdbEventOperation {
public:
NdbEventOperationImpl(NdbEventOperation &f,
@@ -365,7 +370,7 @@ public:
~NdbEventBuffer();
const Uint32 &m_system_nodes;
- Vector<Gci_container> m_active_gci;
+ Vector<Gci_container_pod> m_active_gci;
NdbEventOperation *createEventOperation(const char* eventName,
NdbError &);
NdbEventOperationImpl *createEventOperation(NdbEventImpl& evnt,
diff --git a/storage/ndb/src/ndbapi/NdbScanOperation.cpp b/storage/ndb/src/ndbapi/NdbScanOperation.cpp
index ea7fbe4077d..6a4e657d172 100644
--- a/storage/ndb/src/ndbapi/NdbScanOperation.cpp
+++ b/storage/ndb/src/ndbapi/NdbScanOperation.cpp
@@ -1106,6 +1106,11 @@ int
NdbIndexScanOperation::setBound(const NdbColumnImpl* tAttrInfo,
int type, const void* aValue)
{
+ if (!tAttrInfo)
+ {
+ setErrorCodeAbort(4318); // Invalid attribute
+ return -1;
+ }
if (theOperationType == OpenRangeScanRequest &&
(0 <= type && type <= 4)) {
// insert bound type
diff --git a/storage/ndb/src/ndbapi/NdbTransaction.cpp b/storage/ndb/src/ndbapi/NdbTransaction.cpp
index 3d98dea7edc..3158dca5c40 100644
--- a/storage/ndb/src/ndbapi/NdbTransaction.cpp
+++ b/storage/ndb/src/ndbapi/NdbTransaction.cpp
@@ -1192,9 +1192,9 @@ NdbTransaction::getNdbIndexScanOperation(const NdbIndexImpl* index,
if(tOp)
{
tOp->m_currentTable = table;
+ // Mark that this really an NdbIndexScanOperation
+ tOp->m_type = NdbOperation::OrderedIndexScan;
}
- // Mark that this really an NdbIndexScanOperation
- tOp->m_type = NdbOperation::OrderedIndexScan;
return tOp;
} else {
setOperationErrorCodeAbort(4271);
diff --git a/storage/ndb/src/ndbapi/Ndbif.cpp b/storage/ndb/src/ndbapi/Ndbif.cpp
index b0692eb8236..7799a71749e 100644
--- a/storage/ndb/src/ndbapi/Ndbif.cpp
+++ b/storage/ndb/src/ndbapi/Ndbif.cpp
@@ -1435,8 +1435,7 @@ NdbTransaction::sendTC_COMMIT_ACK(TransporterFacade *tp,
Uint32 * dataPtr = aSignal->getDataPtrSend();
dataPtr[0] = transId1;
dataPtr[1] = transId2;
-
- tp->sendSignal(aSignal, refToNode(aTCRef));
+ tp->sendSignalUnCond(aSignal, refToNode(aTCRef));
}
int
diff --git a/storage/ndb/src/ndbapi/TransporterFacade.cpp b/storage/ndb/src/ndbapi/TransporterFacade.cpp
index 7005d5d2325..15127953051 100644
--- a/storage/ndb/src/ndbapi/TransporterFacade.cpp
+++ b/storage/ndb/src/ndbapi/TransporterFacade.cpp
@@ -343,7 +343,7 @@ execute(void * callbackObj, SignalHeader * const header,
Uint32 aNodeId= refToNode(ref);
tSignal.theReceiversBlockNumber= refToBlock(ref);
tSignal.theVerId_signalNumber= GSN_SUB_GCP_COMPLETE_ACK;
- theFacade->sendSignal(&tSignal, aNodeId);
+ theFacade->sendSignalUnCond(&tSignal, aNodeId);
}
break;
}
@@ -987,7 +987,7 @@ TransporterFacade::sendSignal(NdbApiSignal * aSignal, NodeId aNode){
LinearSectionPtr ptr[3];
signalLogger.sendSignal(* aSignal,
1,
- aSignal->getDataPtr(),
+ tDataPtr,
aNode, ptr, 0);
signalLogger.flushSignalLog();
aSignal->theSendersBlockRef = tmp;
@@ -1014,6 +1014,7 @@ TransporterFacade::sendSignal(NdbApiSignal * aSignal, NodeId aNode){
int
TransporterFacade::sendSignalUnCond(NdbApiSignal * aSignal, NodeId aNode){
+ Uint32* tDataPtr = aSignal->getDataPtrSend();
#ifdef API_TRACE
if(setSignalLog() && TRACE_GSN(aSignal->theVerId_signalNumber)){
Uint32 tmp = aSignal->theSendersBlockRef;
@@ -1021,7 +1022,7 @@ TransporterFacade::sendSignalUnCond(NdbApiSignal * aSignal, NodeId aNode){
LinearSectionPtr ptr[3];
signalLogger.sendSignal(* aSignal,
0,
- aSignal->getDataPtr(),
+ tDataPtr,
aNode, ptr, 0);
signalLogger.flushSignalLog();
aSignal->theSendersBlockRef = tmp;
@@ -1032,7 +1033,7 @@ TransporterFacade::sendSignalUnCond(NdbApiSignal * aSignal, NodeId aNode){
(aSignal->theReceiversBlockNumber != 0));
SendStatus ss = theTransporterRegistry->prepareSend(aSignal,
0,
- aSignal->getDataPtr(),
+ tDataPtr,
aNode,
0);
diff --git a/storage/ndb/src/ndbapi/TransporterFacade.hpp b/storage/ndb/src/ndbapi/TransporterFacade.hpp
index b64ce2e8614..eb2f162da88 100644
--- a/storage/ndb/src/ndbapi/TransporterFacade.hpp
+++ b/storage/ndb/src/ndbapi/TransporterFacade.hpp
@@ -175,7 +175,8 @@ private:
friend class GrepSS;
friend class Ndb;
friend class Ndb_cluster_connection_impl;
-
+ friend class NdbTransaction;
+
int sendSignalUnCond(NdbApiSignal *, NodeId nodeId);
bool isConnected(NodeId aNodeId);
diff --git a/storage/ndb/test/ndbapi/bank/Bank.cpp b/storage/ndb/test/ndbapi/bank/Bank.cpp
index 5ef01533e07..80edbef7e74 100644
--- a/storage/ndb/test/ndbapi/bank/Bank.cpp
+++ b/storage/ndb/test/ndbapi/bank/Bank.cpp
@@ -22,7 +22,8 @@
Bank::Bank(Ndb_cluster_connection& con, bool _init, const char * dbase):
m_ndb(&con, dbase),
m_maxAccount(-1),
- m_initialized(false)
+ m_initialized(false),
+ m_skip_create(false)
{
if(_init)
init();
diff --git a/storage/ndb/test/ndbapi/bank/Bank.hpp b/storage/ndb/test/ndbapi/bank/Bank.hpp
index 60ec7745b59..494f39930eb 100644
--- a/storage/ndb/test/ndbapi/bank/Bank.hpp
+++ b/storage/ndb/test/ndbapi/bank/Bank.hpp
@@ -29,6 +29,7 @@ public:
Bank(Ndb_cluster_connection&, bool init = true, const char *dbase="BANK");
+ int setSkipCreate(bool skip) { m_skip_create = skip; }
int createAndLoadBank(bool overWrite, bool disk= false, int num_accounts=10);
int dropBank();
@@ -140,6 +141,7 @@ private:
Ndb m_ndb;
int m_maxAccount;
bool m_initialized;
+ bool m_skip_create;
};
#endif
diff --git a/storage/ndb/test/ndbapi/bank/BankLoad.cpp b/storage/ndb/test/ndbapi/bank/BankLoad.cpp
index 45d6a860a3d..5a81a4d2498 100644
--- a/storage/ndb/test/ndbapi/bank/BankLoad.cpp
+++ b/storage/ndb/test/ndbapi/bank/BankLoad.cpp
@@ -58,7 +58,7 @@ int Bank::createAndLoadBank(bool ovrWrt, bool disk, int num_accounts){
m_ndb.init();
if (m_ndb.waitUntilReady() != 0)
return NDBT_FAILED;
-
+
const NdbDictionary::Table* pSysValTab =
m_ndb.getDictionary()->getTable("SYSTEM_VALUES");
if (pSysValTab != NULL){
@@ -69,7 +69,7 @@ int Bank::createAndLoadBank(bool ovrWrt, bool disk, int num_accounts){
}
}
- if (createTables(disk) != NDBT_OK)
+ if (!m_skip_create && createTables(disk) != NDBT_OK)
return NDBT_FAILED;
if (clearTables() != NDBT_OK)
diff --git a/storage/ndb/test/ndbapi/bank/bankCreator.cpp b/storage/ndb/test/ndbapi/bank/bankCreator.cpp
index 39e4920867f..30c024d799c 100644
--- a/storage/ndb/test/ndbapi/bank/bankCreator.cpp
+++ b/storage/ndb/test/ndbapi/bank/bankCreator.cpp
@@ -31,10 +31,12 @@ int main(int argc, const char** argv){
int _help = 0;
char * _database = "BANK";
int disk = 0;
+ int skip_create = 0;
struct getargs args[] = {
{ "database", 'd', arg_string, &_database, "Database name", ""},
{ "disk", 0, arg_flag, &disk, "Use disk tables", "" },
+ { "skip-create", 0, arg_flag, &skip_create, "Skip create", "" },
{ "usage", '?', arg_flag, &_help, "Print help", "" }
};
int num_args = sizeof(args) / sizeof(args[0]);
@@ -55,6 +57,7 @@ int main(int argc, const char** argv){
Bank bank(con,_database);
int overWriteExisting = true;
+ bank.setSkipCreate(skip_create);
if (bank.createAndLoadBank(overWriteExisting, disk) != NDBT_OK)
return NDBT_ProgramExit(NDBT_FAILED);
return NDBT_ProgramExit(NDBT_OK);
diff --git a/storage/ndb/test/ndbapi/testBasic.cpp b/storage/ndb/test/ndbapi/testBasic.cpp
index 879a4979220..69f3d8daef6 100644
--- a/storage/ndb/test/ndbapi/testBasic.cpp
+++ b/storage/ndb/test/ndbapi/testBasic.cpp
@@ -1034,6 +1034,38 @@ runMassiveRollback2(NDBT_Context* ctx, NDBT_Step* step){
return result;
}
+int
+runMassiveRollback3(NDBT_Context* ctx, NDBT_Step* step){
+
+ int result = NDBT_OK;
+ HugoOperations hugoOps(*ctx->getTab());
+ Ndb* pNdb = GETNDB(step);
+
+ const Uint32 BATCH = 10;
+ const Uint32 OPS_TOTAL = 20;
+ const Uint32 LOOPS = 100;
+
+ for(Uint32 loop = 0; loop<LOOPS; loop++)
+ {
+ CHECK(hugoOps.startTransaction(pNdb) == 0);
+ bool ok = true;
+ for (Uint32 i = 0; i<OPS_TOTAL; i+= BATCH)
+ {
+ CHECK(hugoOps.pkInsertRecord(pNdb, i, BATCH, 0) == 0);
+ if (hugoOps.execute_NoCommit(pNdb) != 0)
+ {
+ ok = false;
+ break;
+ }
+ }
+ hugoOps.execute_Rollback(pNdb);
+ CHECK(hugoOps.closeTransaction(pNdb) == 0);
+ }
+
+ hugoOps.closeTransaction(pNdb);
+ return result;
+}
+
/**
* TUP errors
*/
@@ -1360,6 +1392,13 @@ TESTCASE("MassiveRollback2",
INITIALIZER(runMassiveRollback2);
FINALIZER(runClearTable2);
}
+TESTCASE("MassiveRollback3",
+ "Test rollback of 4096 operations"){
+ INITIALIZER(runClearTable2);
+ STEP(runMassiveRollback3);
+ STEP(runMassiveRollback3);
+ FINALIZER(runClearTable2);
+}
TESTCASE("MassiveTransaction",
"Test very large insert transaction"){
INITIALIZER(runLoadTable2);
diff --git a/storage/ndb/test/run-test/daily-basic-tests.txt b/storage/ndb/test/run-test/daily-basic-tests.txt
index 08467a652f0..ba415d657a0 100644
--- a/storage/ndb/test/run-test/daily-basic-tests.txt
+++ b/storage/ndb/test/run-test/daily-basic-tests.txt
@@ -209,6 +209,10 @@ args: -n MassiveRollback2 T1 T6 T13 D1 D2
max-time: 500
cmd: testBasic
+args: -n MassiveRollback3 T1 D1
+
+max-time: 500
+cmd: testBasic
args: -n TupError
max-time: 500
diff --git a/storage/ndb/tools/restore/Restore.cpp b/storage/ndb/tools/restore/Restore.cpp
index 928cfbc6580..7762785ef61 100644
--- a/storage/ndb/tools/restore/Restore.cpp
+++ b/storage/ndb/tools/restore/Restore.cpp
@@ -25,6 +25,7 @@
#include <SimpleProperties.hpp>
#include <signaldata/DictTabInfo.hpp>
#include <ndb_limits.h>
+#include <NdbAutoPtr.hpp>
#include "../../../../sql/ha_ndbcluster_tables.h"
@@ -291,6 +292,7 @@ RestoreMetaData::markSysTables()
strcmp(tableName, "NDB$EVENTS_0") == 0 ||
strcmp(tableName, "sys/def/SYSTAB_0") == 0 ||
strcmp(tableName, "sys/def/NDB$EVENTS_0") == 0 ||
+ strcmp(tableName, "cluster_replication/def/" NDB_APPLY_TABLE) == 0 ||
strcmp(tableName, NDB_REP_DB "/def/" NDB_APPLY_TABLE) == 0 ||
strcmp(tableName, NDB_REP_DB "/def/" NDB_SCHEMA_TABLE)== 0 )
table->isSysTable = true;
@@ -377,7 +379,8 @@ bool
RestoreMetaData::parseTableDescriptor(const Uint32 * data, Uint32 len)
{
NdbTableImpl* tableImpl = 0;
- int ret = NdbDictInterface::parseTableInfo(&tableImpl, data, len, false);
+ int ret = NdbDictInterface::parseTableInfo(&tableImpl, data, len, false,
+ m_fileHeader.NdbVersion);
if (ret != 0) {
err << "parseTableInfo " << " failed" << endl;
@@ -956,14 +959,17 @@ RestoreLogIterator::RestoreLogIterator(const RestoreMetaData & md)
}
const LogEntry *
-RestoreLogIterator::getNextLogEntry(int & res, bool *alloc_flag) {
+RestoreLogIterator::getNextLogEntry(int & res) {
// Read record length
- typedef BackupFormat::LogFile::LogEntry LogE;
-
- LogE * logE= 0;
- Uint32 len= ~0;
const Uint32 stopGCP = m_metaData.getStopGCP();
+ Uint32 tableId;
+ Uint32 triggerEvent;
+ Uint32 frag_id;
+ Uint32 *attr_data;
+ Uint32 attr_data_len;
do {
+ Uint32 len;
+ Uint32 *logEntryPtr;
if (buffer_read_ahead(&len, sizeof(Uint32), 1) != 1){
res= -1;
return 0;
@@ -971,7 +977,7 @@ RestoreLogIterator::getNextLogEntry(int & res, bool *alloc_flag) {
len= ntohl(len);
Uint32 data_len = sizeof(Uint32) + len*4;
- if (buffer_get_ptr((void **)(&logE), 1, data_len) != data_len) {
+ if (buffer_get_ptr((void **)(&logEntryPtr), 1, data_len) != data_len) {
res= -2;
return 0;
}
@@ -980,7 +986,8 @@ RestoreLogIterator::getNextLogEntry(int & res, bool *alloc_flag) {
res= 0;
return 0;
}
- if (m_metaData.getFileHeader().NdbVersion < NDBD_FRAGID_VERSION)
+
+ if (unlikely(m_metaData.getFileHeader().NdbVersion < NDBD_FRAGID_VERSION))
{
/*
FragId was introduced in LogEntry in version
@@ -989,35 +996,37 @@ RestoreLogIterator::getNextLogEntry(int & res, bool *alloc_flag) {
do not support restore of user defined partitioned
tables.
*/
- int i;
- LogE *tmpLogE = (LogE*)NdbMem_Allocate(data_len + 4);
- if (!tmpLogE)
- {
- res = -2;
- return 0;
- }
- tmpLogE->Length = logE->Length;
- tmpLogE->TableId = logE->TableId;
- tmpLogE->TriggerEvent = logE->TriggerEvent;
- tmpLogE->FragId = 0;
- for (i = 0; i < len - 3; i++)
- tmpLogE->Data[i] = logE->Data[i-1];
- *alloc_flag= true;
+ typedef BackupFormat::LogFile::LogEntry_no_fragid LogE_no_fragid;
+ LogE_no_fragid * logE_no_fragid= (LogE_no_fragid *)logEntryPtr;
+ tableId= ntohl(logE_no_fragid->TableId);
+ triggerEvent= ntohl(logE_no_fragid->TriggerEvent);
+ frag_id= 0;
+ attr_data= &logE_no_fragid->Data[0];
+ attr_data_len= len - ((offsetof(LogE_no_fragid, Data) >> 2) - 1);
+ }
+ else /* normal case */
+ {
+ typedef BackupFormat::LogFile::LogEntry LogE;
+ LogE * logE= (LogE *)logEntryPtr;
+ tableId= ntohl(logE->TableId);
+ triggerEvent= ntohl(logE->TriggerEvent);
+ frag_id= ntohl(logE->FragId);
+ attr_data= &logE->Data[0];
+ attr_data_len= len - ((offsetof(LogE, Data) >> 2) - 1);
}
- logE->TableId= ntohl(logE->TableId);
- logE->TriggerEvent= ntohl(logE->TriggerEvent);
-
- const bool hasGcp= (logE->TriggerEvent & 0x10000) != 0;
- logE->TriggerEvent &= 0xFFFF;
+ const bool hasGcp= (triggerEvent & 0x10000) != 0;
+ triggerEvent &= 0xFFFF;
+
if(hasGcp){
- len--;
- m_last_gci = ntohl(logE->Data[len-2]);
+ // last attr_data is gci info
+ attr_data_len--;
+ m_last_gci = ntohl(*(attr_data + attr_data_len));
}
} while(m_last_gci > stopGCP + 1);
-
- m_logEntry.m_table = m_metaData.getTable(logE->TableId);
- switch(logE->TriggerEvent){
+
+ m_logEntry.m_table = m_metaData.getTable(tableId);
+ switch(triggerEvent){
case TriggerEvent::TE_INSERT:
m_logEntry.m_type = LogEntry::LE_INSERT;
break;
@@ -1035,10 +1044,10 @@ RestoreLogIterator::getNextLogEntry(int & res, bool *alloc_flag) {
const TableS * tab = m_logEntry.m_table;
m_logEntry.clear();
- AttributeHeader * ah = (AttributeHeader *)&logE->Data[0];
- AttributeHeader *end = (AttributeHeader *)&logE->Data[len - 2];
+ AttributeHeader * ah = (AttributeHeader *)attr_data;
+ AttributeHeader *end = (AttributeHeader *)(attr_data + attr_data_len);
AttributeS * attr;
- m_logEntry.m_frag_id = ntohl(logE->FragId);
+ m_logEntry.m_frag_id = frag_id;
while(ah < end){
attr= m_logEntry.add_attr();
if(attr == NULL) {
@@ -1047,6 +1056,9 @@ RestoreLogIterator::getNextLogEntry(int & res, bool *alloc_flag) {
return 0;
}
+ if(unlikely(!m_hostByteOrder))
+ *(Uint32*)ah = Twiddle32(*(Uint32*)ah);
+
attr->Desc = (* tab)[ah->getAttributeId()];
assert(attr->Desc != 0);
diff --git a/storage/ndb/tools/restore/Restore.hpp b/storage/ndb/tools/restore/Restore.hpp
index 516225cfab6..8698d0943e2 100644
--- a/storage/ndb/tools/restore/Restore.hpp
+++ b/storage/ndb/tools/restore/Restore.hpp
@@ -386,7 +386,7 @@ public:
RestoreLogIterator(const RestoreMetaData &);
virtual ~RestoreLogIterator() {};
- const LogEntry * getNextLogEntry(int & res, bool *alloc_flag);
+ const LogEntry * getNextLogEntry(int & res);
};
NdbOut& operator<<(NdbOut& ndbout, const TableS&);
diff --git a/storage/ndb/tools/restore/restore_main.cpp b/storage/ndb/tools/restore/restore_main.cpp
index bf57664ba9b..20bcb552a1a 100644
--- a/storage/ndb/tools/restore/restore_main.cpp
+++ b/storage/ndb/tools/restore/restore_main.cpp
@@ -615,14 +615,11 @@ main(int argc, char** argv)
}
const LogEntry * logEntry = 0;
- bool alloc_flag = false;
- while ((logEntry = logIter.getNextLogEntry(res= 0, &alloc_flag)) != 0)
+ while ((logEntry = logIter.getNextLogEntry(res= 0)) != 0)
{
if (checkSysTable(logEntry->m_table))
for(Uint32 i= 0; i < g_consumers.size(); i++)
g_consumers[i]->logEntry(* logEntry);
- if (alloc_flag)
- NdbMem_Free((void*)logEntry);
}
if (res < 0)
{
diff --git a/unittest/examples/Makefile.am b/unittest/examples/Makefile.am
index 797784760ff..94a67927d12 100644
--- a/unittest/examples/Makefile.am
+++ b/unittest/examples/Makefile.am
@@ -1,5 +1,5 @@
-AM_CPPFLAGS = -I$(srcdir) -I$(top_builddir)/include
-AM_CPPFLAGS += -I$(top_srcdir)/unittest/mytap
+AM_CPPFLAGS = -I$(srcdir) -I$(top_builddir)/include \
+ -I$(top_srcdir)/unittest/mytap -I$(top_srcdir)/include
AM_LDFLAGS = -L$(top_builddir)/unittest/mytap
diff --git a/unittest/mytap/t/Makefile.am b/unittest/mytap/t/Makefile.am
index 948132783bc..88c31cfeb7f 100644
--- a/unittest/mytap/t/Makefile.am
+++ b/unittest/mytap/t/Makefile.am
@@ -1,6 +1,5 @@
-AM_CPPFLAGS = -I$(srcdir) -I$(top_builddir)/include
-AM_CPPFLAGS += -I$(srcdir)/..
+AM_CPPFLAGS = -I$(srcdir) -I$(top_builddir)/include -I$(srcdir)/.. -I$(top_srcdir)/include
AM_LDFLAGS = -L$(top_builddir)/unittest/mytap