summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.bzrignore3
-rwxr-xr-xBUILD/compile-ndb-autotest9
-rwxr-xr-xBUILD/compile-pentium-gcov2
-rw-r--r--config/ac-macros/compiler_flag.m422
-rw-r--r--configure.in4
-rw-r--r--include/my_sys.h8
-rwxr-xr-xmysql-test/mysql-test-run.pl4
-rw-r--r--mysql-test/r/insert_select.result5
-rw-r--r--mysql-test/r/key.result10
-rw-r--r--mysql-test/r/log_state.result22
-rw-r--r--mysql-test/r/ndb_blob_partition.result104
-rw-r--r--mysql-test/r/ndb_dd_backuprestore.result323
-rw-r--r--mysql-test/r/ndb_dd_ddl.result76
-rw-r--r--mysql-test/r/ndb_partition_error.result47
-rw-r--r--mysql-test/r/ndb_partition_key.result199
-rw-r--r--mysql-test/r/ndb_partition_list.result51
-rw-r--r--mysql-test/r/ndb_partition_range.result263
-rw-r--r--mysql-test/r/partition_innodb.result15
-rw-r--r--mysql-test/r/partition_pruning.result2
-rw-r--r--mysql-test/r/renamedb.result4
-rw-r--r--mysql-test/r/rpl_ndb_dd_partitions.result726
-rw-r--r--mysql-test/r/show_check.result6
-rw-r--r--mysql-test/r/view_grant.result12
-rw-r--r--mysql-test/t/events_stress.test3
-rw-r--r--mysql-test/t/insert_select.test9
-rw-r--r--mysql-test/t/key.test11
-rw-r--r--mysql-test/t/log_state.test16
-rw-r--r--mysql-test/t/ndb_blob_partition.test93
-rw-r--r--mysql-test/t/ndb_dd_backuprestore.test170
-rw-r--r--mysql-test/t/ndb_dd_ddl.test97
-rw-r--r--mysql-test/t/ndb_partition_error.test71
-rw-r--r--mysql-test/t/ndb_partition_key.test198
-rw-r--r--mysql-test/t/ndb_partition_list.test64
-rw-r--r--mysql-test/t/ndb_partition_range.test260
-rw-r--r--mysql-test/t/partition_innodb.test12
-rw-r--r--mysql-test/t/renamedb.test8
-rw-r--r--mysql-test/t/rpl_ndb_auto_inc.test3
-rw-r--r--mysql-test/t/rpl_ndb_dd_partitions.test310
-rw-r--r--mysql-test/t/rpl_openssl.test4
-rw-r--r--mysql-test/t/show_check.test13
-rw-r--r--mysql-test/t/view_grant.test20
-rw-r--r--mysys/my_delete.c51
-rw-r--r--mysys/my_handler.c3
-rw-r--r--mysys/my_init.c24
-rw-r--r--sql/field.cc30
-rw-r--r--sql/field.h3
-rw-r--r--sql/filesort.cc10
-rw-r--r--sql/ha_ndbcluster.cc225
-rw-r--r--sql/ha_ndbcluster.h3
-rw-r--r--sql/ha_ndbcluster_binlog.cc27
-rw-r--r--sql/handler.h2
-rw-r--r--sql/key.cc4
-rw-r--r--sql/log.cc4
-rw-r--r--sql/log_event.cc2
-rw-r--r--sql/mysql_priv.h2
-rw-r--r--sql/mysqld.cc2
-rw-r--r--sql/partition_info.cc27
-rw-r--r--sql/partition_info.h8
-rw-r--r--sql/share/errmsg.txt26
-rw-r--r--sql/sql_base.cc2
-rw-r--r--sql/sql_class.cc9
-rw-r--r--sql/sql_db.cc6
-rw-r--r--sql/sql_parse.cc53
-rw-r--r--sql/sql_partition.cc20
-rw-r--r--sql/sql_partition.h2
-rw-r--r--sql/sql_table.cc10
-rw-r--r--sql/table.cc21
-rw-r--r--storage/ndb/include/kernel/GlobalSignalNumbers.h4
-rw-r--r--storage/ndb/include/kernel/signaldata/BackupContinueB.hpp3
-rw-r--r--storage/ndb/include/kernel/signaldata/BackupImpl.hpp22
-rw-r--r--storage/ndb/include/kernel/signaldata/BackupSignalData.hpp8
-rw-r--r--storage/ndb/include/kernel/signaldata/CreateFilegroup.hpp4
-rw-r--r--storage/ndb/include/kernel/signaldata/DictTabInfo.hpp11
-rw-r--r--storage/ndb/include/kernel/signaldata/LqhFrag.hpp8
-rw-r--r--storage/ndb/include/kernel/signaldata/SystemError.hpp3
-rw-r--r--storage/ndb/include/kernel/signaldata/TupFrag.hpp13
-rw-r--r--storage/ndb/include/ndbapi/NdbDictionary.hpp60
-rw-r--r--storage/ndb/include/ndbapi/NdbIndexScanOperation.hpp6
-rw-r--r--storage/ndb/include/ndbapi/NdbScanOperation.hpp4
-rw-r--r--storage/ndb/src/common/debugger/signaldata/BackupImpl.cpp6
-rw-r--r--storage/ndb/src/common/debugger/signaldata/BackupSignalData.cpp6
-rw-r--r--storage/ndb/src/common/debugger/signaldata/DictTabInfo.cpp9
-rw-r--r--storage/ndb/src/common/debugger/signaldata/LqhFrag.cpp6
-rw-r--r--storage/ndb/src/kernel/blocks/backup/Backup.cpp214
-rw-r--r--storage/ndb/src/kernel/blocks/backup/Backup.hpp8
-rw-r--r--storage/ndb/src/kernel/blocks/backup/BackupFormat.hpp17
-rw-r--r--storage/ndb/src/kernel/blocks/backup/BackupInit.cpp3
-rw-r--r--storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp126
-rw-r--r--storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp2
-rw-r--r--storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp1
-rw-r--r--storage/ndb/src/kernel/blocks/dbdih/DbdihInit.cpp3
-rw-r--r--storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp20
-rw-r--r--storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp10
-rw-r--r--storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp94
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp9
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp6
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp124
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp11
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupVarAlloc.cpp7
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/tuppage.hpp10
-rw-r--r--storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp7
-rw-r--r--storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp8
-rw-r--r--storage/ndb/src/kernel/blocks/suma/Suma.cpp3
-rw-r--r--storage/ndb/src/kernel/vm/RequestTracker.hpp4
-rw-r--r--storage/ndb/src/kernel/vm/SafeCounter.hpp22
-rw-r--r--storage/ndb/src/mgmsrv/ConfigInfo.cpp15
-rw-r--r--storage/ndb/src/mgmsrv/MgmtSrvr.cpp10
-rw-r--r--storage/ndb/src/mgmsrv/MgmtSrvr.hpp4
-rw-r--r--storage/ndb/src/ndbapi/NdbDictionary.cpp121
-rw-r--r--storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp120
-rw-r--r--storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp26
-rw-r--r--storage/ndb/src/ndbapi/NdbReceiver.cpp10
-rw-r--r--storage/ndb/src/ndbapi/NdbScanOperation.cpp82
-rw-r--r--storage/ndb/src/ndbapi/ndberror.c1
-rw-r--r--storage/ndb/test/ndbapi/testScan.cpp102
-rw-r--r--storage/ndb/test/ndbapi/test_event.cpp57
-rw-r--r--storage/ndb/tools/restore/Restore.cpp58
-rw-r--r--storage/ndb/tools/restore/Restore.hpp15
-rw-r--r--storage/ndb/tools/restore/consumer_restore.cpp23
-rw-r--r--strings/Makefile.am6
-rw-r--r--support-files/mysql.spec.sh13
121 files changed, 4813 insertions, 587 deletions
diff --git a/.bzrignore b/.bzrignore
index e07aa0cf86a..d570c36c7f5 100644
--- a/.bzrignore
+++ b/.bzrignore
@@ -7,6 +7,8 @@
*.d
*.da
*.exe
+*.gcda
+*.gcno
*.gcov
*.idb
*.la
@@ -1778,3 +1780,4 @@ vio/viotest-sslconnect.cpp
vio/viotest.cpp
zlib/*.ds?
zlib/*.vcproj
+server-tools/instance-manager/net_serv.cc
diff --git a/BUILD/compile-ndb-autotest b/BUILD/compile-ndb-autotest
new file mode 100755
index 00000000000..3ef0091c155
--- /dev/null
+++ b/BUILD/compile-ndb-autotest
@@ -0,0 +1,9 @@
+#! /bin/sh
+
+path=`dirname $0`
+. "$path/SETUP.sh"
+
+extra_flags="$fast_cflags $max_cflags -g"
+extra_configs="$max_configs --with-ndb-test --with-ndb-ccflags="-DERROR_INSERT""
+
+. "$path/FINISH.sh"
diff --git a/BUILD/compile-pentium-gcov b/BUILD/compile-pentium-gcov
index b024bba49bf..ca37f78e283 100755
--- a/BUILD/compile-pentium-gcov
+++ b/BUILD/compile-pentium-gcov
@@ -14,7 +14,7 @@ export LDFLAGS="-fprofile-arcs -ftest-coverage"
# The -fprofile-arcs and -ftest-coverage options cause GCC to instrument the
# code with profiling information used by gcov.
# the -DDISABLE_TAO_ASM is needed to avoid build failures in Yassl.
-extra_flags="$pentium_cflags -fprofile-arcs -ftest-coverage -DDISABLE_TAO_ASM"
+extra_flags="$pentium_cflags -fprofile-arcs -ftest-coverage -DDISABLE_TAO_ASM -DHAVE_MUTEX_THREAD_ONLY"
extra_configs="$pentium_configs $debug_configs --disable-shared $static_link"
extra_configs="$extra_configs $max_configs"
diff --git a/config/ac-macros/compiler_flag.m4 b/config/ac-macros/compiler_flag.m4
index a236f61a198..88097c7a62e 100644
--- a/config/ac-macros/compiler_flag.m4
+++ b/config/ac-macros/compiler_flag.m4
@@ -38,3 +38,25 @@ AC_DEFUN([AC_SYS_OS_COMPILER_FLAG],
fi
])
+AC_DEFUN([AC_CHECK_NOEXECSTACK],
+[
+ AC_CACHE_CHECK(whether --noexecstack is desirable for .S files,
+ mysql_cv_as_noexecstack, [dnl
+ cat > conftest.c <<EOF
+void foo (void) { }
+EOF
+ if AC_TRY_COMMAND([${CC-cc} $CFLAGS $CPPFLAGS
+ -S -o conftest.s conftest.c 1>&AS_MESSAGE_LOG_FD]) \
+ && grep .note.GNU-stack conftest.s >/dev/null \
+ && AC_TRY_COMMAND([${CC-cc} $CCASFLAGS $CPPFLAGS -Wa,--noexecstack
+ -c -o conftest.o conftest.s 1>&AS_MESSAGE_LOG_FD])
+ then
+ mysql_cv_as_noexecstack=yes
+ else
+ mysql_cv_as_noexecstack=no
+ fi
+ rm -f conftest*])
+ if test $mysql_cv_as_noexecstack = yes; then
+ CCASFLAGS="$CCASFLAGS -Wa,--noexecstack"
+ fi
+])
diff --git a/configure.in b/configure.in
index 120863dafbd..1306a3cc74d 100644
--- a/configure.in
+++ b/configure.in
@@ -482,6 +482,10 @@ AM_PROG_CC_STDC
# We need an assembler, too
AM_PROG_AS
+CCASFLAGS="$CCASFLAGS $ASFLAGS"
+
+# Check if we need noexec stack for assembler
+AC_CHECK_NOEXECSTACK
if test "$am_cv_prog_cc_stdc" = "no"
then
diff --git a/include/my_sys.h b/include/my_sys.h
index 5d155eb20cc..2dc4053f70d 100644
--- a/include/my_sys.h
+++ b/include/my_sys.h
@@ -531,6 +531,7 @@ typedef int (*Process_option_func)(void *ctx, const char *group_name,
#include <my_alloc.h>
+
/* Prototypes for mysys and my_func functions */
extern int my_copy(const char *from,const char *to,myf MyFlags);
@@ -604,6 +605,13 @@ extern File my_sopen(const char *path, int oflag, int shflag, int pmode);
extern int check_if_legal_filename(const char *path);
extern int check_if_legal_tablename(const char *path);
+#if defined(__WIN__) && defined(__NT__)
+extern int nt_share_delete(const char *name,myf MyFlags);
+#define my_delete_allow_opened(fname,flags) nt_share_delete((fname),(flags))
+#else
+#define my_delete_allow_opened(fname,flags) my_delete((fname),(flags))
+#endif
+
#ifndef TERMINATE
extern void TERMINATE(FILE *file);
#endif
diff --git a/mysql-test/mysql-test-run.pl b/mysql-test/mysql-test-run.pl
index ca2eb6641cd..f757c59d90e 100755
--- a/mysql-test/mysql-test-run.pl
+++ b/mysql-test/mysql-test-run.pl
@@ -1049,7 +1049,9 @@ sub executable_setup () {
# New CMake locations.
"$glob_basedir/client/release",
"$glob_basedir/client/debug");
- $exe_mysqld= mtr_exe_exists ("$path_client_bindir/mysqld-nt",
+ $exe_mysqld= mtr_exe_exists ("$path_client_bindir/mysqld-max-nt",
+ "$path_client_bindir/mysqld-max",
+ "$path_client_bindir/mysqld-nt",
"$path_client_bindir/mysqld",
"$path_client_bindir/mysqld-debug",
"$path_client_bindir/mysqld-max",
diff --git a/mysql-test/r/insert_select.result b/mysql-test/r/insert_select.result
index bcad0460acc..390e76caf2c 100644
--- a/mysql-test/r/insert_select.result
+++ b/mysql-test/r/insert_select.result
@@ -690,3 +690,8 @@ CREATE TABLE t1 (a int PRIMARY KEY);
INSERT INTO t1 values (1), (2);
INSERT INTO t1 SELECT a + 2 FROM t1 LIMIT 1;
DROP TABLE t1;
+CREATE TABLE t1 (x int, y int);
+CREATE TABLE t2 (z int, y int);
+CREATE TABLE t3 (a int, b int);
+INSERT INTO t3 (SELECT x, y FROM t1 JOIN t2 USING (y) WHERE z = 1);
+DROP TABLE IF EXISTS t1,t2,t3;
diff --git a/mysql-test/r/key.result b/mysql-test/r/key.result
index 68174774531..af1db92b1ef 100644
--- a/mysql-test/r/key.result
+++ b/mysql-test/r/key.result
@@ -330,6 +330,16 @@ alter table t1 add key (c1,c1,c2);
ERROR 42S21: Duplicate column name 'c1'
drop table t1;
create table t1 (
+i1 INT NOT NULL,
+i2 INT NOT NULL,
+UNIQUE i1idx (i1),
+UNIQUE i2idx (i2));
+desc t1;
+Field Type Null Key Default Extra
+i1 int(11) NO UNI
+i2 int(11) NO UNI
+drop table t1;
+create table t1 (
c1 int,
c2 varchar(20) not null,
primary key (c1),
diff --git a/mysql-test/r/log_state.result b/mysql-test/r/log_state.result
index df81f05fea5..43735243787 100644
--- a/mysql-test/r/log_state.result
+++ b/mysql-test/r/log_state.result
@@ -14,15 +14,15 @@ set global general_log= ON;
create table t1(f1 int);
select * from mysql.general_log;
event_time user_host thread_id server_id command_type argument
-TIMESTAMP root[root] @ localhost [] # 1 Query create table t1(f1 int)
-TIMESTAMP root[root] @ localhost [] # 1 Query select * from mysql.general_log
+TIMESTAMP USER_HOST # 1 Query create table t1(f1 int)
+TIMESTAMP USER_HOST # 1 Query select * from mysql.general_log
set global general_log= OFF;
drop table t1;
select * from mysql.general_log;
event_time user_host thread_id server_id command_type argument
-TIMESTAMP root[root] @ localhost [] # 1 Query create table t1(f1 int)
-TIMESTAMP root[root] @ localhost [] # 1 Query select * from mysql.general_log
-TIMESTAMP root[root] @ localhost [] # 1 Query set global general_log= OFF
+TIMESTAMP USER_HOST # 1 Query create table t1(f1 int)
+TIMESTAMP USER_HOST # 1 Query select * from mysql.general_log
+TIMESTAMP USER_HOST # 1 Query set global general_log= OFF
set global general_log= ON;
flush logs;
show global variables
@@ -46,7 +46,7 @@ sleep(2)
0
select * from mysql.slow_log;
start_time user_host query_time lock_time rows_sent rows_examined db last_insert_id insert_id server_id sql_text
-TIMESTAMP, root[root] @ localhost [] USER_HOST, QUERY_TIME 1 0 test 0 0 1 select sleep(2)
+TIMESTAMP USER_HOST QUERY_TIME 00:00:00 1 0 test 0 0 1 select sleep(2)
show global variables
where Variable_name = 'log' or Variable_name = 'log_slow_queries' or
Variable_name = 'general_log' or Variable_name = 'slow_query_log';
@@ -67,9 +67,9 @@ create table t1(f1 int);
drop table t1;
select * from mysql.general_log;
event_time user_host thread_id server_id command_type argument
-TIMESTAMP root[root] @ localhost [] # 1 Query create table t1(f1 int)
-TIMESTAMP root[root] @ localhost [] # 1 Query drop table t1
-TIMESTAMP root[root] @ localhost [] # 1 Query select * from mysql.general_log
+TIMESTAMP USER_HOST # 1 Query create table t1(f1 int)
+TIMESTAMP USER_HOST # 1 Query drop table t1
+TIMESTAMP USER_HOST # 1 Query select * from mysql.general_log
set global general_log= OFF;
truncate table mysql.general_log;
select * from mysql.general_log;
@@ -151,5 +151,5 @@ set global general_log=ON;
drop table t1;
select * from mysql.general_log;
event_time user_host thread_id server_id command_type argument
-TIMESTAMP root[root] @ localhost [] # 1 Query drop table t1
-TIMESTAMP root[root] @ localhost [] # 1 Query select * from mysql.general_log
+TIMESTAMP USER_HOST # 1 Query drop table t1
+TIMESTAMP USER_HOST # 1 Query select * from mysql.general_log
diff --git a/mysql-test/r/ndb_blob_partition.result b/mysql-test/r/ndb_blob_partition.result
new file mode 100644
index 00000000000..b08a91f0cdd
--- /dev/null
+++ b/mysql-test/r/ndb_blob_partition.result
@@ -0,0 +1,104 @@
+drop table if exists t1;
+create table t1 (
+a mediumint not null,
+b text not null,
+c int not null,
+d longblob,
+primary key using hash (a,c),
+unique key (c)
+)
+engine=ndb
+partition by range (c)
+partitions 3
+( partition p1 values less than (200),
+partition p2 values less than (300),
+partition p3 values less than (400));
+insert into t1 values (1, @v1, 101, @v2);
+insert into t1 values (1, @v2, 102, @v3);
+insert into t1 values (1, @v3, 103, @v4);
+insert into t1 values (2, @v4, 201, @v5);
+insert into t1 values (2, @v5, 202, @v6);
+insert into t1 values (2, @v6, 203, @v7);
+insert into t1 values (3, @v7, 301, @v8);
+insert into t1 values (3, @v8, 302, @v9);
+insert into t1 values (3, @v9, 303, @v1);
+select a, sha1(b), c, sha1(d) from t1 order by a;
+a sha1(b) c sha1(d)
+1 1d42dd9090cf78314a06665d4ea938c35cc760f4 101 10d3c783026b310218d10b7188da96a2401648c6
+1 10d3c783026b310218d10b7188da96a2401648c6 102 a33549d9844092289a58ac348dd59f09fc28406a
+1 a33549d9844092289a58ac348dd59f09fc28406a 103 daa61c6de36a0526f0d47dc29d6b9de7e6d2630c
+2 daa61c6de36a0526f0d47dc29d6b9de7e6d2630c 201 70fc9a7d08beebc522258bfb02000a30c77a8f1d
+2 70fc9a7d08beebc522258bfb02000a30c77a8f1d 202 090565c580809efed3d369481a4bbb168b20713e
+2 090565c580809efed3d369481a4bbb168b20713e 203 1e0070bec426871a46291de27b9bd6e4255ab4e5
+3 1e0070bec426871a46291de27b9bd6e4255ab4e5 301 acbaba01bc2e682f015f40e79d9cbe475db3002e
+3 acbaba01bc2e682f015f40e79d9cbe475db3002e 302 9ee30d99162574f79c66ae95cdf132dcf9cbc259
+3 9ee30d99162574f79c66ae95cdf132dcf9cbc259 303 1d42dd9090cf78314a06665d4ea938c35cc760f4
+select a, sha1(b), c, sha1(d) from t1 where a = 1 and c = 101;
+a sha1(b) c sha1(d)
+1 1d42dd9090cf78314a06665d4ea938c35cc760f4 101 10d3c783026b310218d10b7188da96a2401648c6
+select a, sha1(b), c, sha1(d) from t1 where a = 2 and c = 201;
+a sha1(b) c sha1(d)
+2 daa61c6de36a0526f0d47dc29d6b9de7e6d2630c 201 70fc9a7d08beebc522258bfb02000a30c77a8f1d
+select a, sha1(b), c, sha1(d) from t1 where a = 3 and c = 301;
+a sha1(b) c sha1(d)
+3 1e0070bec426871a46291de27b9bd6e4255ab4e5 301 acbaba01bc2e682f015f40e79d9cbe475db3002e
+update t1 set b = @v3, d = @v4 where a = 1 and c = 102;
+update t1 set b = @v6, d = @v7 where a = 2 and c = 202;
+update t1 set b = @v9, d = @v1 where a = 3 and c = 302;
+select a, sha1(b), c, sha1(d) from t1 order by a;
+a sha1(b) c sha1(d)
+1 1d42dd9090cf78314a06665d4ea938c35cc760f4 101 10d3c783026b310218d10b7188da96a2401648c6
+1 a33549d9844092289a58ac348dd59f09fc28406a 102 daa61c6de36a0526f0d47dc29d6b9de7e6d2630c
+1 a33549d9844092289a58ac348dd59f09fc28406a 103 daa61c6de36a0526f0d47dc29d6b9de7e6d2630c
+2 daa61c6de36a0526f0d47dc29d6b9de7e6d2630c 201 70fc9a7d08beebc522258bfb02000a30c77a8f1d
+2 090565c580809efed3d369481a4bbb168b20713e 202 1e0070bec426871a46291de27b9bd6e4255ab4e5
+2 090565c580809efed3d369481a4bbb168b20713e 203 1e0070bec426871a46291de27b9bd6e4255ab4e5
+3 1e0070bec426871a46291de27b9bd6e4255ab4e5 301 acbaba01bc2e682f015f40e79d9cbe475db3002e
+3 9ee30d99162574f79c66ae95cdf132dcf9cbc259 302 1d42dd9090cf78314a06665d4ea938c35cc760f4
+3 9ee30d99162574f79c66ae95cdf132dcf9cbc259 303 1d42dd9090cf78314a06665d4ea938c35cc760f4
+update t1 set b = @v4, d = @v5 where c = 103;
+update t1 set b = @v7, d = @v8 where c = 203;
+update t1 set b = @v1, d = @v2 where c = 303;
+select a, sha1(b), c, sha1(d) from t1 order by a;
+a sha1(b) c sha1(d)
+1 1d42dd9090cf78314a06665d4ea938c35cc760f4 101 10d3c783026b310218d10b7188da96a2401648c6
+1 a33549d9844092289a58ac348dd59f09fc28406a 102 daa61c6de36a0526f0d47dc29d6b9de7e6d2630c
+1 daa61c6de36a0526f0d47dc29d6b9de7e6d2630c 103 70fc9a7d08beebc522258bfb02000a30c77a8f1d
+2 daa61c6de36a0526f0d47dc29d6b9de7e6d2630c 201 70fc9a7d08beebc522258bfb02000a30c77a8f1d
+2 090565c580809efed3d369481a4bbb168b20713e 202 1e0070bec426871a46291de27b9bd6e4255ab4e5
+2 1e0070bec426871a46291de27b9bd6e4255ab4e5 203 acbaba01bc2e682f015f40e79d9cbe475db3002e
+3 1e0070bec426871a46291de27b9bd6e4255ab4e5 301 acbaba01bc2e682f015f40e79d9cbe475db3002e
+3 9ee30d99162574f79c66ae95cdf132dcf9cbc259 302 1d42dd9090cf78314a06665d4ea938c35cc760f4
+3 1d42dd9090cf78314a06665d4ea938c35cc760f4 303 10d3c783026b310218d10b7188da96a2401648c6
+update t1 set b = @v5, d = @v6;
+select a, sha1(b), c, sha1(d) from t1 order by a;
+a sha1(b) c sha1(d)
+1 70fc9a7d08beebc522258bfb02000a30c77a8f1d 101 090565c580809efed3d369481a4bbb168b20713e
+1 70fc9a7d08beebc522258bfb02000a30c77a8f1d 102 090565c580809efed3d369481a4bbb168b20713e
+1 70fc9a7d08beebc522258bfb02000a30c77a8f1d 103 090565c580809efed3d369481a4bbb168b20713e
+2 70fc9a7d08beebc522258bfb02000a30c77a8f1d 201 090565c580809efed3d369481a4bbb168b20713e
+2 70fc9a7d08beebc522258bfb02000a30c77a8f1d 202 090565c580809efed3d369481a4bbb168b20713e
+2 70fc9a7d08beebc522258bfb02000a30c77a8f1d 203 090565c580809efed3d369481a4bbb168b20713e
+3 70fc9a7d08beebc522258bfb02000a30c77a8f1d 301 090565c580809efed3d369481a4bbb168b20713e
+3 70fc9a7d08beebc522258bfb02000a30c77a8f1d 302 090565c580809efed3d369481a4bbb168b20713e
+3 70fc9a7d08beebc522258bfb02000a30c77a8f1d 303 090565c580809efed3d369481a4bbb168b20713e
+update t1 set b = @v1, d = @v2 where 100 < c and c < 200;
+update t1 set b = @v4, d = @v5 where 200 < c and c < 300;
+update t1 set b = @v7, d = @v8 where 300 < c and c < 400;
+select a, sha1(b), c, sha1(d) from t1 order by a;
+a sha1(b) c sha1(d)
+1 1d42dd9090cf78314a06665d4ea938c35cc760f4 101 10d3c783026b310218d10b7188da96a2401648c6
+1 1d42dd9090cf78314a06665d4ea938c35cc760f4 102 10d3c783026b310218d10b7188da96a2401648c6
+1 1d42dd9090cf78314a06665d4ea938c35cc760f4 103 10d3c783026b310218d10b7188da96a2401648c6
+2 daa61c6de36a0526f0d47dc29d6b9de7e6d2630c 201 70fc9a7d08beebc522258bfb02000a30c77a8f1d
+2 daa61c6de36a0526f0d47dc29d6b9de7e6d2630c 202 70fc9a7d08beebc522258bfb02000a30c77a8f1d
+2 daa61c6de36a0526f0d47dc29d6b9de7e6d2630c 203 70fc9a7d08beebc522258bfb02000a30c77a8f1d
+3 1e0070bec426871a46291de27b9bd6e4255ab4e5 301 acbaba01bc2e682f015f40e79d9cbe475db3002e
+3 1e0070bec426871a46291de27b9bd6e4255ab4e5 302 acbaba01bc2e682f015f40e79d9cbe475db3002e
+3 1e0070bec426871a46291de27b9bd6e4255ab4e5 303 acbaba01bc2e682f015f40e79d9cbe475db3002e
+delete from t1 where a = 1 and c = 101;
+delete from t1 where c = 102;
+delete from t1;
+select a, sha1(b), c, sha1(d) from t1 order by a;
+a sha1(b) c sha1(d)
+drop table t1;
diff --git a/mysql-test/r/ndb_dd_backuprestore.result b/mysql-test/r/ndb_dd_backuprestore.result
index 33edf6783e6..cb6c62b16da 100644
--- a/mysql-test/r/ndb_dd_backuprestore.result
+++ b/mysql-test/r/ndb_dd_backuprestore.result
@@ -155,10 +155,333 @@ DROP TABLE test.t1;
DROP TABLE test.t2;
DROP TABLE test.t3;
DROP TABLE test.t4;
+**** Test 3 Adding partition Test backup and restore ****
+CREATE TABLESPACE table_space2
+ADD DATAFILE './table_space2/datafile.dat'
+USE LOGFILE GROUP log_group1
+INITIAL_SIZE 12M
+ENGINE NDB;
+CREATE TABLE test.t1 (pk1 MEDIUMINT NOT NULL AUTO_INCREMENT, c2 VARCHAR(150) NOT NULL, c3 INT NOT NULL, c4 BIT NOT NULL, PRIMARY KEY(pk1,c3))TABLESPACE table_space1 STORAGE DISK ENGINE=NDB PARTITION BY HASH(c3) PARTITIONS 4;
+CREATE TABLE test.t4 (pk1 MEDIUMINT NOT NULL AUTO_INCREMENT, c2 VARCHAR(180) NOT NULL, c3 INT NOT NULL, c4 BIT NOT NULL, PRIMARY KEY(pk1,c3))ENGINE=NDB PARTITION BY HASH(c3) PARTITIONS 2;
+CREATE TABLE test.t2 (pk1 MEDIUMINT NOT NULL AUTO_INCREMENT, c2 TEXT NOT NULL, c3 INT NOT NULL, c4 BIT NOT NULL, PRIMARY KEY(pk1,c3))TABLESPACE table_space2 STORAGE DISK ENGINE=NDB PARTITION BY KEY(c3) (PARTITION p0 ENGINE = NDB, PARTITION p1 ENGINE = NDB);
+CREATE TABLE test.t5 (pk1 MEDIUMINT NOT NULL AUTO_INCREMENT, c2 TEXT NOT NULL, c3 INT NOT NULL, c4 BIT NOT NULL, PRIMARY KEY(pk1,c3))ENGINE=NDB PARTITION BY KEY(pk1) (PARTITION p0 ENGINE = NDB, PARTITION p1 ENGINE = NDB);
+CREATE TABLE test.t3 (pk1 MEDIUMINT NOT NULL AUTO_INCREMENT, c2 VARCHAR(202) NOT NULL, c3 INT NOT NULL, c4 BIT NOT NULL, PRIMARY KEY(pk1,c3))TABLESPACE table_space2 STORAGE DISK ENGINE=NDB PARTITION BY RANGE (c3) PARTITIONS 3 (PARTITION x1 VALUES LESS THAN (105), PARTITION x2 VALUES LESS THAN (333), PARTITION x3 VALUES LESS THAN (720));
+CREATE TABLE test.t6 (pk1 MEDIUMINT NOT NULL AUTO_INCREMENT, c2 VARCHAR(220) NOT NULL, c3 INT NOT NULL, c4 BIT NOT NULL, PRIMARY KEY(pk1,c3))ENGINE=NDB PARTITION BY RANGE (pk1) PARTITIONS 2 (PARTITION x1 VALUES LESS THAN (333), PARTITION x2 VALUES LESS THAN (720));
+SHOW CREATE TABLE test.t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `pk1` mediumint(9) NOT NULL AUTO_INCREMENT,
+ `c2` varchar(150) NOT NULL,
+ `c3` int(11) NOT NULL,
+ `c4` bit(1) NOT NULL,
+ PRIMARY KEY (`pk1`,`c3`)
+) TABLESPACE table_space1 STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY HASH (c3) PARTITIONS 4 */
+SHOW CREATE TABLE test.t2;
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `pk1` mediumint(9) NOT NULL AUTO_INCREMENT,
+ `c2` text NOT NULL,
+ `c3` int(11) NOT NULL,
+ `c4` bit(1) NOT NULL,
+ PRIMARY KEY (`pk1`,`c3`)
+) TABLESPACE table_space2 STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY KEY (c3) (PARTITION p0 ENGINE = ndbcluster, PARTITION p1 ENGINE = ndbcluster) */
+SHOW CREATE TABLE test.t3;
+Table Create Table
+t3 CREATE TABLE `t3` (
+ `pk1` mediumint(9) NOT NULL AUTO_INCREMENT,
+ `c2` varchar(202) NOT NULL,
+ `c3` int(11) NOT NULL,
+ `c4` bit(1) NOT NULL,
+ PRIMARY KEY (`pk1`,`c3`)
+) TABLESPACE table_space2 STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY RANGE (c3) (PARTITION x1 VALUES LESS THAN (105) ENGINE = ndbcluster, PARTITION x2 VALUES LESS THAN (333) ENGINE = ndbcluster, PARTITION x3 VALUES LESS THAN (720) ENGINE = ndbcluster) */
+SHOW CREATE TABLE test.t4;
+Table Create Table
+t4 CREATE TABLE `t4` (
+ `pk1` mediumint(9) NOT NULL AUTO_INCREMENT,
+ `c2` varchar(180) NOT NULL,
+ `c3` int(11) NOT NULL,
+ `c4` bit(1) NOT NULL,
+ PRIMARY KEY (`pk1`,`c3`)
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY HASH (c3) PARTITIONS 2 */
+SHOW CREATE TABLE test.t5;
+Table Create Table
+t5 CREATE TABLE `t5` (
+ `pk1` mediumint(9) NOT NULL AUTO_INCREMENT,
+ `c2` text NOT NULL,
+ `c3` int(11) NOT NULL,
+ `c4` bit(1) NOT NULL,
+ PRIMARY KEY (`pk1`,`c3`)
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY KEY (pk1) (PARTITION p0 ENGINE = ndbcluster, PARTITION p1 ENGINE = ndbcluster) */
+SHOW CREATE TABLE test.t6;
+Table Create Table
+t6 CREATE TABLE `t6` (
+ `pk1` mediumint(9) NOT NULL AUTO_INCREMENT,
+ `c2` varchar(220) NOT NULL,
+ `c3` int(11) NOT NULL,
+ `c4` bit(1) NOT NULL,
+ PRIMARY KEY (`pk1`,`c3`)
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY RANGE (pk1) (PARTITION x1 VALUES LESS THAN (333) ENGINE = ndbcluster, PARTITION x2 VALUES LESS THAN (720) ENGINE = ndbcluster) */
+SELECT * FROM information_schema.partitions WHERE table_name= 't1';
+TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME
+NULL test t1 p0 NULL 1 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default 0 default
+NULL test t1 p1 NULL 2 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default 0 default
+NULL test t1 p2 NULL 3 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default 0 default
+NULL test t1 p3 NULL 4 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default 0 default
+SELECT * FROM information_schema.partitions WHERE table_name= 't2';
+TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME
+NULL test t2 p0 NULL 1 NULL KEY NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default 0 default
+NULL test t2 p1 NULL 2 NULL KEY NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default 0 default
+SELECT * FROM information_schema.partitions WHERE table_name= 't3';
+TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME
+NULL test t3 x1 NULL 1 NULL RANGE NULL c3 NULL 105 0 0 0 NULL 0 0 NULL NULL NULL NULL default 0 default
+NULL test t3 x2 NULL 2 NULL RANGE NULL c3 NULL 333 0 0 0 NULL 0 0 NULL NULL NULL NULL default 0 default
+NULL test t3 x3 NULL 3 NULL RANGE NULL c3 NULL 720 0 0 0 NULL 0 0 NULL NULL NULL NULL default 0 default
+SELECT * FROM information_schema.partitions WHERE table_name= 't4';
+TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME
+NULL test t4 p0 NULL 1 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default 0 default
+NULL test t4 p1 NULL 2 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default 0 default
+SELECT * FROM information_schema.partitions WHERE table_name= 't5';
+TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME
+NULL test t5 p0 NULL 1 NULL KEY NULL pk1 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default 0 default
+NULL test t5 p1 NULL 2 NULL KEY NULL pk1 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default 0 default
+SELECT * FROM information_schema.partitions WHERE table_name= 't6';
+TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME
+NULL test t6 x1 NULL 1 NULL RANGE NULL pk1 NULL 333 0 0 0 NULL 0 0 NULL NULL NULL NULL default 0 default
+NULL test t6 x2 NULL 2 NULL RANGE NULL pk1 NULL 720 0 0 0 NULL 0 0 NULL NULL NULL NULL default 0 default
+SELECT COUNT(*) FROM test.t1;
+COUNT(*)
+250
+SELECT pk1, c2, c3, hex(c4) FROM test.t1 ORDER BY c3 LIMIT 5;
+pk1 c2 c3 hex(c4)
+250 Sweden, Texas 2 0
+249 Sweden, Texas 4 0
+248 Sweden, Texas 6 0
+247 Sweden, Texas 8 0
+246 Sweden, Texas 10 0
+SELECT COUNT(*) FROM test.t2;
+COUNT(*)
+250
+SELECT pk1, c2, c3, hex(c4) FROM test.t2 ORDER BY c3 LIMIT 5;
+pk1 c2 c3 hex(c4)
+250 Sweden, Texas, ITALY, Kyle, JO, JBM,TU 1 1
+249 Sweden, Texas, ITALY, Kyle, JO, JBM,TU 3 1
+248 Sweden, Texas, ITALY, Kyle, JO, JBM,TU 5 1
+247 Sweden, Texas, ITALY, Kyle, JO, JBM,TU 7 1
+246 Sweden, Texas, ITALY, Kyle, JO, JBM,TU 9 1
+SELECT COUNT(*) FROM test.t3;
+COUNT(*)
+250
+SELECT pk1, c2, c3, hex(c4) FROM test.t3 ORDER BY c3 LIMIT 5;
+pk1 c2 c3 hex(c4)
+250 TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU 0 1
+249 TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU 2 1
+248 TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU 4 1
+247 TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU 6 1
+246 TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU 8 1
+SELECT COUNT(*) FROM test.t4;
+COUNT(*)
+250
+SELECT pk1, c2, c3, hex(c4) FROM test.t4 ORDER BY c3 LIMIT 5;
+pk1 c2 c3 hex(c4)
+250 Sweden, Texas 2 0
+249 Sweden, Texas 4 0
+248 Sweden, Texas 6 0
+247 Sweden, Texas 8 0
+246 Sweden, Texas 10 0
+SELECT COUNT(*) FROM test.t5;
+COUNT(*)
+250
+SELECT pk1, c2, c3, hex(c4) FROM test.t5 ORDER BY c3 LIMIT 5;
+pk1 c2 c3 hex(c4)
+250 Sweden, Texas, ITALY, Kyle, JO, JBM,TU 1 1
+249 Sweden, Texas, ITALY, Kyle, JO, JBM,TU 3 1
+248 Sweden, Texas, ITALY, Kyle, JO, JBM,TU 5 1
+247 Sweden, Texas, ITALY, Kyle, JO, JBM,TU 7 1
+246 Sweden, Texas, ITALY, Kyle, JO, JBM,TU 9 1
+SELECT COUNT(*) FROM test.t6;
+COUNT(*)
+250
+SELECT pk1, c2, c3, hex(c4) FROM test.t6 ORDER BY c3 LIMIT 5;
+pk1 c2 c3 hex(c4)
+250 TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU 0 1
+249 TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU 2 1
+248 TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU 4 1
+247 TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU 6 1
+246 TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU 8 1
+CREATE TEMPORARY TABLE IF NOT EXISTS test.backup_info (id INT, backup_id INT) ENGINE = HEAP;
+DELETE FROM test.backup_info;
+LOAD DATA INFILE '../tmp.dat' INTO TABLE test.backup_info FIELDS TERMINATED BY ',';
+SELECT @the_backup_id:=backup_id FROM test.backup_info;
+@the_backup_id:=backup_id
+<the_backup_id>
+DROP TABLE test.backup_info;
+DROP TABLE test.t1;
+DROP TABLE test.t2;
+DROP TABLE test.t3;
+DROP TABLE test.t4;
+DROP TABLE test.t5;
+DROP TABLE test.t6;
ALTER TABLESPACE table_space1
DROP DATAFILE './table_space1/datafile.dat'
ENGINE = NDB;
+ALTER TABLESPACE table_space2
+DROP DATAFILE './table_space2/datafile.dat'
+ENGINE = NDB;
DROP TABLESPACE table_space1
ENGINE = NDB;
+DROP TABLESPACE table_space2
+ENGINE = NDB;
DROP LOGFILE GROUP log_group1
ENGINE =NDB;
+SHOW CREATE TABLE test.t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `pk1` mediumint(9) NOT NULL AUTO_INCREMENT,
+ `c2` varchar(150) NOT NULL,
+ `c3` int(11) NOT NULL,
+ `c4` bit(1) NOT NULL,
+ PRIMARY KEY (`pk1`,`c3`)
+) TABLESPACE table_space1 STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY HASH (c3) PARTITIONS 4 */
+SHOW CREATE TABLE test.t2;
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `pk1` mediumint(9) NOT NULL AUTO_INCREMENT,
+ `c2` text NOT NULL,
+ `c3` int(11) NOT NULL,
+ `c4` bit(1) NOT NULL,
+ PRIMARY KEY (`pk1`,`c3`)
+) TABLESPACE table_space2 STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY KEY (c3) (PARTITION p0 ENGINE = ndbcluster, PARTITION p1 ENGINE = ndbcluster) */
+SHOW CREATE TABLE test.t3;
+Table Create Table
+t3 CREATE TABLE `t3` (
+ `pk1` mediumint(9) NOT NULL AUTO_INCREMENT,
+ `c2` varchar(202) NOT NULL,
+ `c3` int(11) NOT NULL,
+ `c4` bit(1) NOT NULL,
+ PRIMARY KEY (`pk1`,`c3`)
+) TABLESPACE table_space2 STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY RANGE (c3) (PARTITION x1 VALUES LESS THAN (105) ENGINE = ndbcluster, PARTITION x2 VALUES LESS THAN (333) ENGINE = ndbcluster, PARTITION x3 VALUES LESS THAN (720) ENGINE = ndbcluster) */
+SHOW CREATE TABLE test.t4;
+Table Create Table
+t4 CREATE TABLE `t4` (
+ `pk1` mediumint(9) NOT NULL AUTO_INCREMENT,
+ `c2` varchar(180) NOT NULL,
+ `c3` int(11) NOT NULL,
+ `c4` bit(1) NOT NULL,
+ PRIMARY KEY (`pk1`,`c3`)
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY HASH (c3) PARTITIONS 2 */
+SHOW CREATE TABLE test.t5;
+Table Create Table
+t5 CREATE TABLE `t5` (
+ `pk1` mediumint(9) NOT NULL AUTO_INCREMENT,
+ `c2` text NOT NULL,
+ `c3` int(11) NOT NULL,
+ `c4` bit(1) NOT NULL,
+ PRIMARY KEY (`pk1`,`c3`)
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY KEY (pk1) (PARTITION p0 ENGINE = ndbcluster, PARTITION p1 ENGINE = ndbcluster) */
+SHOW CREATE TABLE test.t6;
+Table Create Table
+t6 CREATE TABLE `t6` (
+ `pk1` mediumint(9) NOT NULL AUTO_INCREMENT,
+ `c2` varchar(220) NOT NULL,
+ `c3` int(11) NOT NULL,
+ `c4` bit(1) NOT NULL,
+ PRIMARY KEY (`pk1`,`c3`)
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY RANGE (pk1) (PARTITION x1 VALUES LESS THAN (333) ENGINE = ndbcluster, PARTITION x2 VALUES LESS THAN (720) ENGINE = ndbcluster) */
+SELECT * FROM information_schema.partitions WHERE table_name= 't1';
+TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME
+NULL test t1 p0 NULL 1 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default 0 default
+NULL test t1 p1 NULL 2 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default 0 default
+NULL test t1 p2 NULL 3 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default 0 default
+NULL test t1 p3 NULL 4 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default 0 default
+SELECT * FROM information_schema.partitions WHERE table_name= 't2';
+TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME
+NULL test t2 p0 NULL 1 NULL KEY NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default 0 default
+NULL test t2 p1 NULL 2 NULL KEY NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default 0 default
+SELECT * FROM information_schema.partitions WHERE table_name= 't3';
+TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME
+NULL test t3 x1 NULL 1 NULL RANGE NULL c3 NULL 105 0 0 0 NULL 0 0 NULL NULL NULL NULL default 0 default
+NULL test t3 x2 NULL 2 NULL RANGE NULL c3 NULL 333 0 0 0 NULL 0 0 NULL NULL NULL NULL default 0 default
+NULL test t3 x3 NULL 3 NULL RANGE NULL c3 NULL 720 0 0 0 NULL 0 0 NULL NULL NULL NULL default 0 default
+SELECT * FROM information_schema.partitions WHERE table_name= 't4';
+TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME
+NULL test t4 p0 NULL 1 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default 0 default
+NULL test t4 p1 NULL 2 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default 0 default
+SELECT * FROM information_schema.partitions WHERE table_name= 't5';
+TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME
+NULL test t5 p0 NULL 1 NULL KEY NULL pk1 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default 0 default
+NULL test t5 p1 NULL 2 NULL KEY NULL pk1 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default 0 default
+SELECT * FROM information_schema.partitions WHERE table_name= 't6';
+TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME
+NULL test t6 x1 NULL 1 NULL RANGE NULL pk1 NULL 333 0 0 0 NULL 0 0 NULL NULL NULL NULL default 0 default
+NULL test t6 x2 NULL 2 NULL RANGE NULL pk1 NULL 720 0 0 0 NULL 0 0 NULL NULL NULL NULL default 0 default
+SELECT COUNT(*) FROM test.t1;
+COUNT(*)
+250
+SELECT pk1, c2, c3, hex(c4) FROM test.t1 ORDER BY c3 LIMIT 5;
+pk1 c2 c3 hex(c4)
+250 Sweden, Texas 2 0
+249 Sweden, Texas 4 0
+248 Sweden, Texas 6 0
+247 Sweden, Texas 8 0
+246 Sweden, Texas 10 0
+SELECT COUNT(*) FROM test.t2;
+COUNT(*)
+250
+SELECT pk1, c2, c3, hex(c4) FROM test.t2 ORDER BY c3 LIMIT 5;
+pk1 c2 c3 hex(c4)
+250 Sweden, Texas, ITALY, Kyle, JO, JBM,TU 1 1
+249 Sweden, Texas, ITALY, Kyle, JO, JBM,TU 3 1
+248 Sweden, Texas, ITALY, Kyle, JO, JBM,TU 5 1
+247 Sweden, Texas, ITALY, Kyle, JO, JBM,TU 7 1
+246 Sweden, Texas, ITALY, Kyle, JO, JBM,TU 9 1
+SELECT COUNT(*) FROM test.t3;
+COUNT(*)
+250
+SELECT pk1, c2, c3, hex(c4) FROM test.t3 ORDER BY c3 LIMIT 5;
+pk1 c2 c3 hex(c4)
+250 TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU 0 1
+249 TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU 2 1
+248 TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU 4 1
+247 TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU 6 1
+246 TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU 8 1
+SELECT COUNT(*) FROM test.t4;
+COUNT(*)
+250
+SELECT pk1, c2, c3, hex(c4) FROM test.t4 ORDER BY c3 LIMIT 5;
+pk1 c2 c3 hex(c4)
+250 Sweden, Texas 2 0
+249 Sweden, Texas 4 0
+248 Sweden, Texas 6 0
+247 Sweden, Texas 8 0
+246 Sweden, Texas 10 0
+SELECT COUNT(*) FROM test.t5;
+COUNT(*)
+250
+SELECT pk1, c2, c3, hex(c4) FROM test.t5 ORDER BY c3 LIMIT 5;
+pk1 c2 c3 hex(c4)
+250 Sweden, Texas, ITALY, Kyle, JO, JBM,TU 1 1
+249 Sweden, Texas, ITALY, Kyle, JO, JBM,TU 3 1
+248 Sweden, Texas, ITALY, Kyle, JO, JBM,TU 5 1
+247 Sweden, Texas, ITALY, Kyle, JO, JBM,TU 7 1
+246 Sweden, Texas, ITALY, Kyle, JO, JBM,TU 9 1
+SELECT COUNT(*) FROM test.t6;
+COUNT(*)
+250
+SELECT pk1, c2, c3, hex(c4) FROM test.t6 ORDER BY c3 LIMIT 5;
+pk1 c2 c3 hex(c4)
+250 TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU 0 1
+249 TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU 2 1
+248 TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU 4 1
+247 TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU 6 1
+246 TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU 8 1
+DROP TABLE test.t1;
+DROP TABLE test.t2;
+DROP TABLE test.t3;
+DROP TABLE test.t4;
+DROP TABLE test.t5;
+DROP TABLE test.t6;
+ALTER TABLESPACE table_space1 DROP DATAFILE './table_space1/datafile.dat' ENGINE=NDB;
+ALTER TABLESPACE table_space2 DROP DATAFILE './table_space2/datafile.dat' ENGINE=NDB;
+DROP TABLESPACE table_space1 ENGINE = NDB;
+DROP TABLESPACE table_space2 ENGINE = NDB;
+DROP LOGFILE GROUP log_group1 ENGINE = NDB;
diff --git a/mysql-test/r/ndb_dd_ddl.result b/mysql-test/r/ndb_dd_ddl.result
index 47b95214024..eea80090768 100644
--- a/mysql-test/r/ndb_dd_ddl.result
+++ b/mysql-test/r/ndb_dd_ddl.result
@@ -4,12 +4,12 @@ CREATE DATABASE mysqltest;
**** Begin Duplicate Statement Testing ****
CREATE LOGFILE GROUP lg1
ADD UNDOFILE 'undofile.dat'
-INITIAL_SIZE 16M
+INITIAL_SIZE 1M
UNDO_BUFFER_SIZE = 1M
ENGINE=NDB;
CREATE LOGFILE GROUP lg2
ADD UNDOFILE 'undofile2.dat'
-INITIAL_SIZE 16M
+INITIAL_SIZE 1M
UNDO_BUFFER_SIZE 1M
ENGINE NDB;
ERROR HY000: Failed to create LOGFILE GROUP
@@ -19,35 +19,35 @@ Error 1296 Got error 1514 'Currently there is a limit of one logfile group' from
Error 1515 Failed to create LOGFILE GROUP
CREATE LOGFILE GROUP lg1
ADD UNDOFILE 'undofile.dat'
-INITIAL_SIZE 16M
+INITIAL_SIZE 1M
UNDO_BUFFER_SIZE = 1M
ENGINE=NDB;
ERROR HY000: Failed to create LOGFILE GROUP
ALTER LOGFILE GROUP lg1
ADD UNDOFILE 'undofile02.dat'
-INITIAL_SIZE 4M ENGINE NDB;
+INITIAL_SIZE 1M ENGINE NDB;
ALTER LOGFILE GROUP lg1
ADD UNDOFILE 'undofile02.dat'
-INITIAL_SIZE 4M ENGINE=NDB;
+INITIAL_SIZE 1M ENGINE=NDB;
ERROR HY000: Failed to alter: CREATE UNDOFILE
CREATE TABLESPACE ts1
ADD DATAFILE 'datafile.dat'
USE LOGFILE GROUP lg1
-INITIAL_SIZE 12M
+INITIAL_SIZE 1M
ENGINE NDB;
CREATE TABLESPACE ts1
ADD DATAFILE 'datafile.dat'
USE LOGFILE GROUP lg1
-INITIAL_SIZE 12M
+INITIAL_SIZE 1M
ENGINE NDB;
ERROR HY000: Failed to create TABLESPACE
ALTER TABLESPACE ts1
ADD DATAFILE 'datafile2.dat'
-INITIAL_SIZE 12M
+INITIAL_SIZE 1M
ENGINE=NDB;
ALTER TABLESPACE ts1
ADD DATAFILE 'datafile2.dat'
-INITIAL_SIZE 12M
+INITIAL_SIZE 1M
ENGINE=NDB;
ERROR HY000: Failed to alter: CREATE DATAFILE
CREATE TABLE mysqltest.t1
@@ -94,20 +94,20 @@ DROP DATABASE IF EXISTS mysqltest;
**** Begin Statment CaSe Testing ****
creaTE LOgfilE GrOuP lg1
adD undoFILE 'undofile.dat'
-initiAL_siZE 16M
+initiAL_siZE 1M
UnDo_BuFfEr_SiZe = 1M
ENGInE=NDb;
altER LOgFiLE GrOUp lg1
AdD UnDOfILe 'uNdOfiLe02.daT'
-INItIAl_SIzE 4M ENgINE nDB;
+INItIAl_SIzE 1M ENgINE nDB;
CrEAtE TABLEspaCE ts1
ADD DATAfilE 'datafile.dat'
UsE LoGFiLE GRoUP lg1
-INITiaL_SizE 12M
+INITiaL_SizE 1M
ENGiNe NDb;
AlTeR tAbLeSpAcE ts1
AdD DaTaFiLe 'dAtAfiLe2.daT'
-InItIaL_SiZe 12M
+InItIaL_SiZe 1M
EnGiNe=NDB;
CREATE TABLE t1
(pk1 int not null primary key, b int not null, c int not null)
@@ -129,21 +129,21 @@ EnGiNe=nDb;
**** Begin = And No = Testing ****
CREATE LOGFILE GROUP lg1
ADD UNDOFILE 'undofile.dat'
-INITIAL_SIZE=16M
+INITIAL_SIZE=1M
UNDO_BUFFER_SIZE=1M
ENGINE=NDB;
ALTER LOGFILE GROUP lg1
ADD UNDOFILE 'undofile02.dat'
-INITIAL_SIZE=4M
+INITIAL_SIZE=1M
ENGINE=NDB;
CREATE TABLESPACE ts1
ADD DATAFILE 'datafile.dat'
USE LOGFILE GROUP lg1
-INITIAL_SIZE=12M
+INITIAL_SIZE=1M
ENGINE=NDB;
ALTER TABLESPACE ts1
ADD DATAFILE 'datafile2.dat'
-INITIAL_SIZE=12M
+INITIAL_SIZE=1M
ENGINE=NDB;
CREATE TABLE t1
(pk1 INT NOT NULL PRIMARY KEY, b INT NOT NULL, c INT NOT NULL)
@@ -165,21 +165,21 @@ ENGINE=NDB;
CREATE LOGFILE GROUP lg1
ADD UNDOFILE 'undofile.dat'
-INITIAL_SIZE 16M
+INITIAL_SIZE 1M
UNDO_BUFFER_SIZE 1M
ENGINE NDB;
ALTER LOGFILE GROUP lg1
ADD UNDOFILE 'undofile02.dat'
-INITIAL_SIZE 4M
+INITIAL_SIZE 1M
ENGINE NDB;
CREATE TABLESPACE ts1
ADD DATAFILE 'datafile.dat'
USE LOGFILE GROUP lg1
-INITIAL_SIZE 12M
+INITIAL_SIZE 1M
ENGINE NDB;
ALTER TABLESPACE ts1
ADD DATAFILE 'datafile2.dat'
-INITIAL_SIZE 12M
+INITIAL_SIZE 1M
ENGINE NDB;
CREATE TABLE t1
(pk1 INT NOT NULL PRIMARY KEY, b INT NOT NULL, c INT NOT NULL)
@@ -188,6 +188,19 @@ ENGINE NDB;
CREATE INDEX b_i on t1(b);
CREATE INDEX bc_i on t1(b, c);
DROP TABLE t1;
+CREATE TABLESPACE ts2
+ADD DATAFILE 'datafile3.dat'
+USE LOGFILE GROUP lg1
+INITIAL_SIZE 1M
+ENGINE NDB;
+ALTER TABLESPACE ts1
+DROP DATAFILE 'datafile3.dat'
+ENGINE NDB;
+ERROR HY000: Failed to alter: NO SUCH FILE
+ALTER TABLESPACE ts2
+DROP DATAFILE 'datafile2.dat'
+ENGINE NDB;
+ERROR HY000: Failed to alter: NO SUCH FILE
ALTER TABLESPACE ts1
DROP DATAFILE 'datafile2.dat'
ENGINE NDB;
@@ -196,6 +209,27 @@ DROP DATAFILE 'datafile.dat'
ENGINE NDB;
DROP TABLESPACE ts1
ENGINE NDB;
+ALTER TABLESPACE ts2
+DROP DATAFILE 'datafile3.dat'
+ENGINE NDB;
+DROP TABLESPACE ts2
+ENGINE NDB;
DROP LOGFILE GROUP lg1
ENGINE NDB;
**** End = And No = ****
+create table t1 (a int primary key) engine = myisam;
+create logfile group lg1 add undofile 'MYSQLTEST_VARDIR/master-data/test/t1.frm' initial_size 1M undo_buffer_size = 1M engine=ndb;;
+ERROR HY000: Failed to create UNDOFILE
+create logfile group lg1
+add undofile 'undofile.dat'
+initial_size 1M
+undo_buffer_size = 1M
+engine=ndb;
+create tablespace ts1 add datafile 'MYSQLTEST_VARDIR/master-data/test/t1.frm' use logfile group lg1 initial_size 1M engine ndb;;
+ERROR HY000: Failed to create DATAFILE
+drop tablespace ts1
+engine ndb;
+ERROR HY000: Failed to drop TABLESPACE
+drop logfile group lg1
+engine ndb;
+drop table t1;
diff --git a/mysql-test/r/ndb_partition_error.result b/mysql-test/r/ndb_partition_error.result
new file mode 100644
index 00000000000..d86dc382185
--- /dev/null
+++ b/mysql-test/r/ndb_partition_error.result
@@ -0,0 +1,47 @@
+drop table if exists t1;
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b),
+index (a))
+engine = ndb
+partition by range (a)
+partitions 3
+(partition x1 values less than (5) nodegroup 12,
+partition x2 values less than (10) nodegroup 13,
+partition x3 values less than (20) nodegroup 14);
+ERROR HY000: Can't create table 'test.t1' (errno: 140)
+show warnings;
+Level Code Message
+Error 1296 Got error 771 'Given NODEGROUP doesn't exist in this cluster' from NDB
+Error 1005 Can't create table 'test.t1' (errno: 140)
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a))
+engine = ndb
+partition by range (a)
+partitions 3
+(partition x1 values less than (5),
+partition x2 values less than (10),
+partition x3 values less than (20));
+drop table t1;
+CREATE TABLE t1 (id INT) ENGINE=NDB
+PARTITION BY LIST(id)
+(PARTITION p0 VALUES IN (2, 4),
+PARTITION p1 VALUES IN (42, 142));
+INSERT INTO t1 VALUES (2);
+UPDATE t1 SET id=5 WHERE id=2;
+ERROR HY000: Table has no partition for value 5
+DROP TABLE t1;
+create table t1 (a int,b int, c int)
+engine = ndb
+partition by list(a)
+partitions 2
+(partition x123 values in (11, 12),
+partition x234 values in (5, 1));
+insert into t1 values (NULL,1,1);
+ERROR HY000: Table has no partition for value NULL
+drop table t1;
diff --git a/mysql-test/r/ndb_partition_key.result b/mysql-test/r/ndb_partition_key.result
new file mode 100644
index 00000000000..fd793c4c2c7
--- /dev/null
+++ b/mysql-test/r/ndb_partition_key.result
@@ -0,0 +1,199 @@
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (a int, b int, c int, d int, PRIMARY KEY(a,b,c))
+ENGINE = NDB
+PARTITION BY KEY (a,b);
+insert into t1 values (1,1,1,1);
+select * from t1;
+a b c d
+1 1 1 1
+update t1 set d = 2 where a = 1 and b = 1 and c = 1;
+select * from t1;
+a b c d
+1 1 1 2
+delete from t1;
+select * from t1;
+a b c d
+drop table t1;
+CREATE TABLE t1 (a int, b int, c int, d int, PRIMARY KEY(a,b))
+ENGINE = NDB
+PARTITION BY KEY (c);
+ERROR HY000: A PRIMARY KEY need to include all fields in the partition function
+CREATE TABLE t1 (a int, b int, c int, PRIMARY KEY(a,b))
+ENGINE = NDB
+PARTITION BY KEY (a);
+insert into t1 values
+(1,1,3),(1,2,3),(1,3,3),(1,4,3),(1,5,3),(1,6,3),
+(1,7,3),(1,8,3),(1,9,3),(1,10,3),(1,11,3),(1,12,3);
+select * from t1 order by b;
+a b c
+1 1 3
+1 2 3
+1 3 3
+1 4 3
+1 5 3
+1 6 3
+1 7 3
+1 8 3
+1 9 3
+1 10 3
+1 11 3
+1 12 3
+DROP TABLE t1;
+CREATE TABLE t1 (a INT, b CHAR(10) COLLATE latin1_bin, c INT, d INT,
+PRIMARY KEY (a,b,c) USING HASH)
+ENGINE=NDB
+DEFAULT CHARSET=latin1
+PARTITION BY KEY (b);
+insert into t1 values (1,"a",1,1),(2,"a",1,1),(3,"a",1,1);
+-- t1 --
+
+Fragment type: 5
+K Value: 6
+Min load factor: 78
+Max load factor: 80
+Temporary table: no
+Number of attributes: 4
+Number of primary keys: 3
+Length of frm data: #
+Row Checksum: 1
+Row GCI: 1
+TableStatus: Retrieved
+-- Attributes --
+a Int PRIMARY KEY AT=FIXED ST=MEMORY
+b Char(10;latin1_bin) PRIMARY KEY DISTRIBUTION KEY AT=FIXED ST=MEMORY
+c Int PRIMARY KEY AT=FIXED ST=MEMORY
+d Int NULL AT=FIXED ST=MEMORY
+
+-- Indexes --
+PRIMARY KEY(a, b, c) - UniqueHashIndex
+
+
+NDBT_ProgramExit: 0 - OK
+
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) NOT NULL DEFAULT '0',
+ `b` char(10) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL DEFAULT '',
+ `c` int(11) NOT NULL DEFAULT '0',
+ `d` int(11) DEFAULT NULL,
+ PRIMARY KEY (`a`,`b`,`c`) USING HASH
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY KEY (b) */
+DROP TABLE t1;
+CREATE TABLE t1 (a int not null primary key)
+PARTITION BY KEY(a)
+(PARTITION p0 ENGINE = NDB, PARTITION p1 ENGINE = NDB);
+drop table t1;
+CREATE TABLE t1 (a int not null primary key);
+ALTER TABLE t1
+PARTITION BY KEY(a)
+(PARTITION p0 ENGINE = NDB, PARTITION p1 ENGINE = NDB);
+drop table t1;
+create table t1 (a int)
+engine=ndb
+partition by key(a)
+(partition p0, partition p1);
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY KEY (a) (PARTITION p0 ENGINE = ndbcluster, PARTITION p1 ENGINE = ndbcluster) */
+alter table t1 engine=heap;
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL
+) ENGINE=MEMORY DEFAULT CHARSET=latin1 /*!50100 PARTITION BY KEY (a) (PARTITION p0 ENGINE = MEMORY, PARTITION p1 ENGINE = MEMORY) */
+alter table t1 engine=ndb;
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY KEY (a) (PARTITION p0 ENGINE = ndbcluster, PARTITION p1 ENGINE = ndbcluster) */
+alter table t1 engine=heap remove partitioning;
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL
+) ENGINE=MEMORY DEFAULT CHARSET=latin1
+alter table t1 engine=ndb
+partition by key(a)
+(partition p0, partition p1 engine = ndb);
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY KEY (a) (PARTITION p0 ENGINE = ndbcluster, PARTITION p1 ENGINE = ndbcluster) */
+alter table t1
+partition by key (a)
+(partition p0 engine=ndb, partition p1 engine=ndb);
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY KEY (a) (PARTITION p0 ENGINE = ndbcluster, PARTITION p1 ENGINE = ndbcluster) */
+alter table t1 remove partitioning;
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1
+alter table t1
+partition by key(a)
+(partition p0 engine=ndb, partition p1);
+ERROR HY000: The mix of handlers in the partitions is not allowed in this version of MySQL
+alter table t1
+engine=ndb
+partition by key(a)
+(partition p0 engine=ndb, partition p1 engine = ndb);
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY KEY (a) (PARTITION p0 ENGINE = ndbcluster, PARTITION p1 ENGINE = ndbcluster) */
+drop table t1;
+CREATE TABLE t1 (
+c1 MEDIUMINT NOT NULL AUTO_INCREMENT,
+c2 TEXT NOT NULL,
+c3 INT NOT NULL,
+c4 BIT NOT NULL,
+c5 FLOAT,
+c6 VARCHAR(255),
+c7 TIMESTAMP,
+PRIMARY KEY(c1,c3))
+ENGINE=NDB
+PARTITION BY KEY(c3) PARTITIONS 5;
+ALTER TABLE t1 COALESCE PARTITION 4;
+DROP TABLE t1;
+CREATE TABLE t1 (a int primary key)
+ENGINE=NDB
+PARTITION BY KEY(a);
+ALTER TABLE t1 OPTIMIZE PARTITION p0;
+ERROR HY000: Table storage engine for 't1' doesn't have this option
+ALTER TABLE t1 CHECK PARTITION p0;
+ERROR HY000: Table storage engine for 't1' doesn't have this option
+ALTER TABLE t1 REPAIR PARTITION p0;
+ERROR HY000: Table storage engine for 't1' doesn't have this option
+ALTER TABLE t1 ANALYZE PARTITION p0;
+ERROR HY000: Table storage engine for 't1' doesn't have this option
+ALTER TABLE t1 REBUILD PARTITION p0;
+ERROR HY000: Table storage engine for 't1' doesn't have this option
+DROP TABLE t1;
+CREATE TABLE t1 (
+c1 MEDIUMINT NOT NULL AUTO_INCREMENT,
+c2 TEXT NOT NULL,
+c3 INT NOT NULL,
+PRIMARY KEY(c1,c3))
+ENGINE=NDB
+PARTITION BY KEY(c3) PARTITIONS 5;
+ALTER TABLE t1 ADD COLUMN c4 INT AFTER c1;
+DROP TABLE t1;
+CREATE TABLE t1 (
+c1 MEDIUMINT NOT NULL AUTO_INCREMENT,
+c2 TEXT NOT NULL,
+c3 INT NOT NULL,
+PRIMARY KEY(c1,c3))
+ENGINE=NDB
+PARTITION BY KEY(c3);
+ALTER TABLE t1 ADD COLUMN c4 INT AFTER c1;
+DROP TABLE t1;
diff --git a/mysql-test/r/ndb_partition_list.result b/mysql-test/r/ndb_partition_list.result
new file mode 100644
index 00000000000..ce2574ddcc4
--- /dev/null
+++ b/mysql-test/r/ndb_partition_list.result
@@ -0,0 +1,51 @@
+drop table if exists t1;
+CREATE TABLE t1 ( f_int1 INTEGER NOT NULL, f_int2 INTEGER NOT NULL,
+f_char1 CHAR(10),
+f_char2 CHAR(10), f_charbig VARCHAR(1000),
+PRIMARY KEY (f_int1,f_int2))
+ENGINE = NDB
+PARTITION BY LIST(MOD(f_int1 + f_int2,4))
+(PARTITION part_3 VALUES IN (-3),
+PARTITION part_2 VALUES IN (-2),
+PARTITION part_1 VALUES IN (-1),
+PARTITION part0 VALUES IN (0),
+PARTITION part1 VALUES IN (1),
+PARTITION part2 VALUES IN (2),
+PARTITION part3 VALUES IN (3,4,5));
+INSERT INTO t1 SET f_int1 = -2, f_int2 = 20, f_char1 = '20', f_char2 = '20', f_charbig = '===20===';
+INSERT INTO t1 SET f_int1 = 1, f_int2 = 1, f_char1 = '1', f_char2 = '1', f_charbig = '===1===';
+INSERT INTO t1 SET f_int1 = 2, f_int2 = 1, f_char1 = '1', f_char2 = '1', f_charbig = '===1===';
+INSERT INTO t1 SET f_int1 = 3, f_int2 = 1, f_char1 = '1', f_char2 = '1', f_charbig = '===1===';
+INSERT INTO t1 SET f_int1 = 4, f_int2 = 1, f_char1 = '1', f_char2 = '1', f_charbig = '===1===';
+INSERT INTO t1 SET f_int1 = 5, f_int2 = 1, f_char1 = '1', f_char2 = '1', f_charbig = '===1===';
+INSERT INTO t1 SET f_int1 = 20, f_int2 = 1, f_char1 = '1', f_char2 = '1', f_charbig = '===1===';
+SELECT * FROM t1 ORDER BY f_int1;
+f_int1 f_int2 f_char1 f_char2 f_charbig
+-2 20 20 20 ===20===
+1 1 1 1 ===1===
+2 1 1 1 ===1===
+3 1 1 1 ===1===
+4 1 1 1 ===1===
+5 1 1 1 ===1===
+20 1 1 1 ===1===
+DROP TABLE t1;
+CREATE TABLE t1 ( f_int1 INTEGER, f_int2 INTEGER, f_char1 CHAR(10),
+f_char2 CHAR(10), f_charbig VARCHAR(1000))
+ENGINE = NDB
+PARTITION BY LIST(f_int1)
+(PARTITION part_1 VALUES IN (-1),
+PARTITION part0 VALUES IN (0,1),
+PARTITION part1 VALUES IN (2));
+INSERT INTO t1 SET f_int1 = -1, f_int2 = 20, f_char1 = '20', f_char2 = '20', f_charbig = '===20===';
+INSERT INTO t1 SET f_int1 = 0, f_int2 = 20, f_char1 = '20', f_char2 = '20', f_charbig = '===20===';
+INSERT INTO t1 SET f_int1 = 1, f_int2 = 1, f_char1 = '1', f_char2 = '1', f_charbig = '===1===';
+INSERT INTO t1 SET f_int1 = 2, f_int2 = 1, f_char1 = '1', f_char2 = '1', f_charbig = '===1===';
+INSERT INTO t1 SET f_int1 = 20, f_int2 = 1, f_char1 = '1', f_char2 = '1', f_charbig = '===1===';
+ERROR HY000: Table has no partition for value 20
+SELECT * FROM t1 ORDER BY f_int1;
+f_int1 f_int2 f_char1 f_char2 f_charbig
+-1 20 20 20 ===20===
+0 20 20 20 ===20===
+1 1 1 1 ===1===
+2 1 1 1 ===1===
+DROP TABLE t1;
diff --git a/mysql-test/r/ndb_partition_range.result b/mysql-test/r/ndb_partition_range.result
new file mode 100644
index 00000000000..9cc9aa2cda9
--- /dev/null
+++ b/mysql-test/r/ndb_partition_range.result
@@ -0,0 +1,263 @@
+drop table if exists t1;
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b),
+index (a))
+engine = ndb
+partition by range (a)
+partitions 3
+(partition x1 values less than (5),
+partition x2 values less than (10),
+partition x3 values less than (20));
+INSERT into t1 values (1, 1, 1);
+INSERT into t1 values (6, 1, 1);
+INSERT into t1 values (10, 1, 1);
+INSERT into t1 values (15, 1, 1);
+select * from information_schema.partitions where table_name= 't1';
+TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME
+NULL test t1 x1 NULL 1 NULL RANGE NULL a NULL 5 0 0 0 # 0 0 # # NULL NULL default 0 default
+NULL test t1 x2 NULL 2 NULL RANGE NULL a NULL 10 0 0 0 # 0 0 # # NULL NULL default 0 default
+NULL test t1 x3 NULL 3 NULL RANGE NULL a NULL 20 0 0 0 # 0 0 # # NULL NULL default 0 default
+select * from t1 order by a;
+a b c
+1 1 1
+6 1 1
+10 1 1
+15 1 1
+select * from t1 where a=1 order by a;
+a b c
+1 1 1
+select * from t1 where a=15 and b=1 order by a;
+a b c
+15 1 1
+select * from t1 where a=21 and b=1 order by a;
+a b c
+select * from t1 where a=21 order by a;
+a b c
+select * from t1 where a in (1,6,10,21) order by a;
+a b c
+1 1 1
+6 1 1
+10 1 1
+select * from t1 where b=1 and a in (1,6,10,21) order by a;
+a b c
+1 1 1
+6 1 1
+10 1 1
+drop table t1;
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(b),
+unique (a))
+engine = ndb
+partition by range (b)
+partitions 3
+(partition x1 values less than (5),
+partition x2 values less than (10),
+partition x3 values less than (20));
+INSERT into t1 values (1, 1, 1);
+INSERT into t1 values (2, 6, 1);
+INSERT into t1 values (3, 10, 1);
+INSERT into t1 values (4, 15, 1);
+select * from t1 order by a;
+a b c
+1 1 1
+2 6 1
+3 10 1
+4 15 1
+UPDATE t1 set a = 5 WHERE b = 15;
+select * from t1 order by a;
+a b c
+1 1 1
+2 6 1
+3 10 1
+5 15 1
+UPDATE t1 set a = 6 WHERE a = 5;
+select * from t1 order by a;
+a b c
+1 1 1
+2 6 1
+3 10 1
+6 15 1
+select * from t1 where b=1 order by b;
+a b c
+1 1 1
+select * from t1 where b=15 and a=1 order by b;
+a b c
+select * from t1 where b=21 and a=1 order by b;
+a b c
+select * from t1 where b=21 order by b;
+a b c
+select * from t1 where b in (1,6,10,21) order by b;
+a b c
+1 1 1
+2 6 1
+3 10 1
+select * from t1 where a in (1,2,5,6) order by b;
+a b c
+1 1 1
+2 6 1
+6 15 1
+select * from t1 where a=1 and b in (1,6,10,21) order by b;
+a b c
+1 1 1
+DELETE from t1 WHERE b = 6;
+DELETE from t1 WHERE a = 6;
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) NOT NULL,
+ `b` int(11) NOT NULL,
+ `c` int(11) NOT NULL,
+ PRIMARY KEY (`b`),
+ UNIQUE KEY `a` (`a`)
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY RANGE (b) (PARTITION x1 VALUES LESS THAN (5) ENGINE = ndbcluster, PARTITION x2 VALUES LESS THAN (10) ENGINE = ndbcluster, PARTITION x3 VALUES LESS THAN (20) ENGINE = ndbcluster) */
+drop table t1;
+CREATE TABLE t1
+(id MEDIUMINT NOT NULL,
+b1 BIT(8),
+vc VARCHAR(255),
+bc CHAR(255),
+d DECIMAL(10,4) DEFAULT 0,
+f FLOAT DEFAULT 0,
+total BIGINT UNSIGNED,
+y YEAR,
+t DATE) ENGINE=NDB
+PARTITION BY RANGE (YEAR(t))
+(PARTITION p0 VALUES LESS THAN (1901),
+PARTITION p1 VALUES LESS THAN (1946),
+PARTITION p2 VALUES LESS THAN (1966),
+PARTITION p3 VALUES LESS THAN (1986),
+PARTITION p4 VALUES LESS THAN (2005),
+PARTITION p5 VALUES LESS THAN MAXVALUE);
+INSERT INTO t1 VALUES (0,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL);
+SELECT * FROM t1;
+id b1 vc bc d f total y t
+0 NULL NULL NULL NULL NULL NULL NULL NULL
+ALTER TABLE t1 ENGINE=MYISAM;
+SELECT * FROM t1;
+id b1 vc bc d f total y t
+0 NULL NULL NULL NULL NULL NULL NULL NULL
+DROP TABLE t1;
+CREATE LOGFILE GROUP lg1
+ADD UNDOFILE 'undofile.dat'
+ INITIAL_SIZE 16M
+UNDO_BUFFER_SIZE=1M
+ENGINE=NDB;
+CREATE TABLESPACE ts1
+ADD DATAFILE 'datafile.dat'
+ USE LOGFILE GROUP lg1
+INITIAL_SIZE 12M
+ENGINE NDB;
+CREATE TABLE test.t1 (
+a1 INT,
+a2 TEXT NOT NULL,
+a3 BIT NOT NULL,
+a4 DECIMAL(8,3),
+a5 INT NOT NULL,
+a6 INT,
+PRIMARY KEY(a1))
+TABLESPACE ts1 STORAGE DISK ENGINE=NDB
+PARTITION BY LIST (a1)
+(PARTITION p0 VALUES IN (1,2,3,4,5),
+PARTITION p1 VALUES IN (6,7,8,9, 10),
+PARTITION p2 VALUES IN (11, 12, 13, 14, 15));
+ALTER TABLE test.t1 DROP COLUMN a6;
+ALTER TABLE test.t1 ADD COLUMN a6 VARCHAR(255);
+SELECT COUNT(*) FROM test.t1;
+COUNT(*)
+15
+ALTER TABLE test.t1 DROP COLUMN a4;
+SELECT COUNT(*) FROM test.t1;
+COUNT(*)
+15
+DROP TABLE t1;
+CREATE TABLE test.t1 (
+a1 INT,
+a2 TEXT NOT NULL,
+a3 BIT NOT NULL,
+a4 DECIMAL(8,3),
+a5 INT NOT NULL,
+a6 VARCHAR(255),
+PRIMARY KEY(a1))
+TABLESPACE ts1 STORAGE DISK ENGINE=NDB
+PARTITION BY HASH(a1)
+PARTITIONS 4;
+SELECT COUNT(*) FROM test.t1;
+COUNT(*)
+15
+ALTER TABLE test.t1 DROP COLUMN a4;
+SELECT COUNT(*) FROM test.t1;
+COUNT(*)
+15
+DROP TABLE t1;
+ALTER TABLESPACE ts1
+DROP DATAFILE 'datafile.dat'
+ENGINE=NDB;
+DROP TABLESPACE ts1 ENGINE=NDB;
+DROP LOGFILE GROUP lg1 ENGINE=NDB;
+CREATE TABLE t1
+(id MEDIUMINT NOT NULL,
+b1 BIT(8),
+vc VARCHAR(255),
+bc CHAR(255),
+d DECIMAL(10,4) DEFAULT 0,
+f FLOAT DEFAULT 0,
+total BIGINT UNSIGNED,
+y YEAR,
+t DATE) ENGINE=NDB
+PARTITION BY LIST(id)
+(PARTITION p0 VALUES IN (2, 4),
+PARTITION p1 VALUES IN (42, 142));
+INSERT INTO t1 VALUES (2,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL);
+SELECT * FROM t1;
+id b1 vc bc d f total y t
+2 NULL NULL NULL NULL NULL NULL NULL NULL
+ALTER TABLE t1 ADD PARTITION
+(PARTITION p2 VALUES IN (412));
+SELECT * FROM t1;
+id b1 vc bc d f total y t
+2 NULL NULL NULL NULL NULL NULL NULL NULL
+DROP TABLE t1;
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null)
+partition by list(a)
+partitions 2
+(partition x123 values in (1,5,6),
+partition x234 values in (4,7,8));
+INSERT into t1 VALUES (5,1,1);
+select * from t1;
+a b c
+5 1 1
+UPDATE t1 SET a=8 WHERE a=5 AND b=1;
+select * from t1;
+a b c
+8 1 1
+drop table t1;
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) engine=ndb
+PARTITION BY RANGE(f1)
+( PARTITION part1 VALUES LESS THAN (2),
+PARTITION part2 VALUES LESS THAN (1000));
+INSERT INTO t1 VALUES(1, '---1---');
+INSERT INTO t1 VALUES(2, '---2---');
+select * from t1 order by f1;
+f1 f2
+1 ---1---
+2 ---2---
+UPDATE t1 SET f1 = f1 + 4 WHERE f1 = 2;
+select * from t1 order by f1;
+f1 f2
+1 ---1---
+6 ---2---
+UPDATE t1 SET f1 = f1 + 4 WHERE f1 = 1;
+select * from t1 order by f1;
+f1 f2
+5 ---1---
+6 ---2---
+drop table t1;
diff --git a/mysql-test/r/partition_innodb.result b/mysql-test/r/partition_innodb.result
index 5b1221dd64c..5e5931fdbf8 100644
--- a/mysql-test/r/partition_innodb.result
+++ b/mysql-test/r/partition_innodb.result
@@ -92,3 +92,18 @@ DROP TABLE IF EXISTS t1;
DROP TABLE IF EXISTS t0_aux;
DROP TABLE IF EXISTS t0_definition;
DROP TABLE IF EXISTS t0_template;
+create table t1 (id varchar(64) primary key) engine=innodb
+partition by key(id) partitions 5;
+insert into t1 values ('a');
+insert into t1 values ('aa');
+insert into t1 values ('aaa');
+select * from t1 where id = 'a';
+id
+a
+select * from t1 where id = 'aa';
+id
+aa
+select * from t1 where id = 'aaa';
+id
+aaa
+drop table t1;
diff --git a/mysql-test/r/partition_pruning.result b/mysql-test/r/partition_pruning.result
index 950a83c6d4f..ee294242bf7 100644
--- a/mysql-test/r/partition_pruning.result
+++ b/mysql-test/r/partition_pruning.result
@@ -31,7 +31,7 @@ id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t2 p0,p1 ALL NULL NULL NULL NULL 3 Using where
explain partitions select * from t2 where a=1 and b=1;
id select_type table partitions type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 p0 ALL NULL NULL NULL NULL 3 Using where
+1 SIMPLE t2 p0 ALL NULL NULL NULL NULL 2 Using where
create table t3 (
a int
)
diff --git a/mysql-test/r/renamedb.result b/mysql-test/r/renamedb.result
index 26ae42e72aa..b22322fbe8d 100644
--- a/mysql-test/r/renamedb.result
+++ b/mysql-test/r/renamedb.result
@@ -27,3 +27,7 @@ a
2
3
drop database testdb2;
+create database testdb1;
+rename database testdb1 to testdb1;
+ERROR HY000: Can't create database 'testdb1'; database exists
+drop database testdb1;
diff --git a/mysql-test/r/rpl_ndb_dd_partitions.result b/mysql-test/r/rpl_ndb_dd_partitions.result
new file mode 100644
index 00000000000..ece6b84c227
--- /dev/null
+++ b/mysql-test/r/rpl_ndb_dd_partitions.result
@@ -0,0 +1,726 @@
+stop slave;
+drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
+reset master;
+reset slave;
+drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
+start slave;
+--- Doing pre test cleanup ---
+DROP TABLE IF EXISTS t1;
+CREATE LOGFILE GROUP lg1
+ADD UNDOFILE 'undofile.dat'
+INITIAL_SIZE 16M
+UNDO_BUFFER_SIZE = 1M
+ENGINE=NDB;
+ALTER LOGFILE GROUP lg1
+ADD UNDOFILE 'undofile02.dat'
+INITIAL_SIZE = 4M
+ENGINE=NDB;
+CREATE TABLESPACE ts1
+ADD DATAFILE 'datafile.dat'
+USE LOGFILE GROUP lg1
+INITIAL_SIZE 12M
+ENGINE NDB;
+ALTER TABLESPACE ts1
+ADD DATAFILE 'datafile02.dat'
+INITIAL_SIZE = 4M
+ENGINE=NDB;
+--- Start test 2 partition RANGE testing --
+--- Do setup --
+CREATE TABLE t1 (id MEDIUMINT NOT NULL, b1 BIT(8), vc VARCHAR(63),
+bc CHAR(63), d DECIMAL(10,4) DEFAULT 0,
+f FLOAT DEFAULT 0, total BIGINT UNSIGNED,
+y YEAR, t DATE)
+TABLESPACE ts1 STORAGE DISK
+ENGINE=NDB
+PARTITION BY RANGE (YEAR(t))
+(PARTITION p0 VALUES LESS THAN (1901),
+PARTITION p1 VALUES LESS THAN (1946),
+PARTITION p2 VALUES LESS THAN (1966),
+PARTITION p3 VALUES LESS THAN (1986),
+PARTITION p4 VALUES LESS THAN (2005),
+PARTITION p5 VALUES LESS THAN MAXVALUE);
+--- Show table on master ---
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` mediumint(9) NOT NULL,
+ `b1` bit(8) DEFAULT NULL,
+ `vc` varchar(63) DEFAULT NULL,
+ `bc` char(63) DEFAULT NULL,
+ `d` decimal(10,4) DEFAULT '0.0000',
+ `f` float DEFAULT '0',
+ `total` bigint(20) unsigned DEFAULT NULL,
+ `y` year(4) DEFAULT NULL,
+ `t` date DEFAULT NULL
+) TABLESPACE ts1 STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY RANGE (YEAR(t)) (PARTITION p0 VALUES LESS THAN (1901) ENGINE = ndbcluster, PARTITION p1 VALUES LESS THAN (1946) ENGINE = ndbcluster, PARTITION p2 VALUES LESS THAN (1966) ENGINE = ndbcluster, PARTITION p3 VALUES LESS THAN (1986) ENGINE = ndbcluster, PARTITION p4 VALUES LESS THAN (2005) ENGINE = ndbcluster, PARTITION p5 VALUES LESS THAN MAXVALUE ENGINE = ndbcluster)
+--- Show table on slave --
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` mediumint(9) NOT NULL,
+ `b1` bit(8) DEFAULT NULL,
+ `vc` varchar(63) DEFAULT NULL,
+ `bc` char(63) DEFAULT NULL,
+ `d` decimal(10,4) DEFAULT '0.0000',
+ `f` float DEFAULT '0',
+ `total` bigint(20) unsigned DEFAULT NULL,
+ `y` year(4) DEFAULT NULL,
+ `t` date DEFAULT NULL
+) TABLESPACE ts1 STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY RANGE (YEAR(t)) (PARTITION p0 VALUES LESS THAN (1901) ENGINE = ndbcluster, PARTITION p1 VALUES LESS THAN (1946) ENGINE = ndbcluster, PARTITION p2 VALUES LESS THAN (1966) ENGINE = ndbcluster, PARTITION p3 VALUES LESS THAN (1986) ENGINE = ndbcluster, PARTITION p4 VALUES LESS THAN (2005) ENGINE = ndbcluster, PARTITION p5 VALUES LESS THAN MAXVALUE ENGINE = ndbcluster)
+--- Perform basic operation on master ---
+--- and ensure replicated correctly ---
+"--- Insert into t1 --" as "";
+--- Select from t1 on master ---
+select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id;
+id hex(b1) vc bc d f total y t
+2 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1965-11-14
+4 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1985-11-14
+42 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1905-11-14
+142 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1995-11-14
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2005-11-14
+--- Select from t1 on slave ---
+select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id;
+id hex(b1) vc bc d f total y t
+2 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1965-11-14
+4 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1985-11-14
+42 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1905-11-14
+142 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1995-11-14
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2005-11-14
+--- Update t1 on master --
+UPDATE t1 SET b1 = 0, t="2006-02-22" WHERE id = 412;
+--- Check the update on master ---
+SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412;
+id hex(b1) vc bc d f total y t
+412 0 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2006-02-22
+--- Check Update on slave ---
+SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412;
+id hex(b1) vc bc d f total y t
+412 0 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2006-02-22
+--- Remove a record from t1 on master ---
+DELETE FROM t1 WHERE id = 42;
+--- Show current count on master for t1 ---
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+4
+--- Show current count on slave for t1 ---
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+4
+DELETE FROM t1;
+--- Check that simple Alter statements are replicated correctly ---
+ALTER TABLE t1 MODIFY vc VARCHAR(255);
+--- Show the new improved table on the master ---
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` mediumint(9) NOT NULL,
+ `b1` bit(8) DEFAULT NULL,
+ `vc` varchar(255) DEFAULT NULL,
+ `bc` char(63) DEFAULT NULL,
+ `d` decimal(10,4) DEFAULT '0.0000',
+ `f` float DEFAULT '0',
+ `total` bigint(20) unsigned DEFAULT NULL,
+ `y` year(4) DEFAULT NULL,
+ `t` date DEFAULT NULL
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY RANGE (YEAR(t)) (PARTITION p0 VALUES LESS THAN (1901) ENGINE = ndbcluster, PARTITION p1 VALUES LESS THAN (1946) ENGINE = ndbcluster, PARTITION p2 VALUES LESS THAN (1966) ENGINE = ndbcluster, PARTITION p3 VALUES LESS THAN (1986) ENGINE = ndbcluster, PARTITION p4 VALUES LESS THAN (2005) ENGINE = ndbcluster, PARTITION p5 VALUES LESS THAN MAXVALUE ENGINE = ndbcluster)
+--- Make sure that our tables on slave are still same engine ---
+--- and that the alter statements replicated correctly ---
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` mediumint(9) NOT NULL,
+ `b1` bit(8) DEFAULT NULL,
+ `vc` varchar(255) DEFAULT NULL,
+ `bc` char(63) DEFAULT NULL,
+ `d` decimal(10,4) DEFAULT '0.0000',
+ `f` float DEFAULT '0',
+ `total` bigint(20) unsigned DEFAULT NULL,
+ `y` year(4) DEFAULT NULL,
+ `t` date DEFAULT NULL
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY RANGE (YEAR(t)) (PARTITION p0 VALUES LESS THAN (1901) ENGINE = ndbcluster, PARTITION p1 VALUES LESS THAN (1946) ENGINE = ndbcluster, PARTITION p2 VALUES LESS THAN (1966) ENGINE = ndbcluster, PARTITION p3 VALUES LESS THAN (1986) ENGINE = ndbcluster, PARTITION p4 VALUES LESS THAN (2005) ENGINE = ndbcluster, PARTITION p5 VALUES LESS THAN MAXVALUE ENGINE = ndbcluster)
+--- Perform basic operation on master ---
+--- and ensure replicated correctly ---
+"--- Insert into t1 --" as "";
+--- Select from t1 on master ---
+select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id;
+id hex(b1) vc bc d f total y t
+2 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1965-11-14
+4 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1985-11-14
+42 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1905-11-14
+142 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1995-11-14
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2005-11-14
+--- Select from t1 on slave ---
+select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id;
+id hex(b1) vc bc d f total y t
+2 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1965-11-14
+4 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1985-11-14
+42 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1905-11-14
+142 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1995-11-14
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2005-11-14
+--- Update t1 on master --
+UPDATE t1 SET b1 = 0, t="2006-02-22" WHERE id = 412;
+--- Check the update on master ---
+SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412;
+id hex(b1) vc bc d f total y t
+412 0 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2006-02-22
+--- Check Update on slave ---
+SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412;
+id hex(b1) vc bc d f total y t
+412 0 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2006-02-22
+--- Remove a record from t1 on master ---
+DELETE FROM t1 WHERE id = 42;
+--- Show current count on master for t1 ---
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+4
+--- Show current count on slave for t1 ---
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+4
+DELETE FROM t1;
+--- End test 2 partition RANGE testing ---
+--- Do Cleanup ---
+DROP TABLE IF EXISTS t1;
+--- Start test 3 partition LIST testing ---
+--- Do setup ---
+CREATE TABLE t1 (id MEDIUMINT NOT NULL, b1 BIT(8), vc VARCHAR(63),
+bc CHAR(63), d DECIMAL(10,4) DEFAULT 0,
+f FLOAT DEFAULT 0, total BIGINT UNSIGNED,
+y YEAR, t DATE)
+TABLESPACE ts1 STORAGE DISK
+ENGINE=NDB
+PARTITION BY LIST(id)
+(PARTITION p0 VALUES IN (2, 4),
+PARTITION p1 VALUES IN (42, 142));
+--- Test 3 Alter to add partition ---
+ALTER TABLE t1 ADD PARTITION (PARTITION p2 VALUES IN (412));
+--- Show table on master ---
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` mediumint(9) NOT NULL,
+ `b1` bit(8) DEFAULT NULL,
+ `vc` varchar(63) DEFAULT NULL,
+ `bc` char(63) DEFAULT NULL,
+ `d` decimal(10,4) DEFAULT '0.0000',
+ `f` float DEFAULT '0',
+ `total` bigint(20) unsigned DEFAULT NULL,
+ `y` year(4) DEFAULT NULL,
+ `t` date DEFAULT NULL
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY LIST (id) (PARTITION p0 VALUES IN (2,4) ENGINE = ndbcluster, PARTITION p1 VALUES IN (42,142) ENGINE = ndbcluster, PARTITION p2 VALUES IN (412) ENGINE = ndbcluster)
+--- Show table on slave ---
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` mediumint(9) NOT NULL,
+ `b1` bit(8) DEFAULT NULL,
+ `vc` varchar(63) DEFAULT NULL,
+ `bc` char(63) DEFAULT NULL,
+ `d` decimal(10,4) DEFAULT '0.0000',
+ `f` float DEFAULT '0',
+ `total` bigint(20) unsigned DEFAULT NULL,
+ `y` year(4) DEFAULT NULL,
+ `t` date DEFAULT NULL
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY LIST (id) (PARTITION p0 VALUES IN (2,4) ENGINE = ndbcluster, PARTITION p1 VALUES IN (42,142) ENGINE = ndbcluster, PARTITION p2 VALUES IN (412) ENGINE = ndbcluster)
+--- Perform basic operation on master ---
+--- and ensure replicated correctly ---
+"--- Insert into t1 --" as "";
+--- Select from t1 on master ---
+select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id;
+id hex(b1) vc bc d f total y t
+2 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1965-11-14
+4 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1985-11-14
+42 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1905-11-14
+142 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1995-11-14
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2005-11-14
+--- Select from t1 on slave ---
+select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id;
+id hex(b1) vc bc d f total y t
+2 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1965-11-14
+4 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1985-11-14
+42 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1905-11-14
+142 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1995-11-14
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2005-11-14
+--- Update t1 on master --
+UPDATE t1 SET b1 = 0, t="2006-02-22" WHERE id = 412;
+--- Check the update on master ---
+SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412;
+id hex(b1) vc bc d f total y t
+412 0 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2006-02-22
+--- Check Update on slave ---
+SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412;
+id hex(b1) vc bc d f total y t
+412 0 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2006-02-22
+--- Remove a record from t1 on master ---
+DELETE FROM t1 WHERE id = 42;
+--- Show current count on master for t1 ---
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+4
+--- Show current count on slave for t1 ---
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+4
+DELETE FROM t1;
+--- Check that simple Alter statements are replicated correctly ---
+ALTER TABLE t1 MODIFY vc VARCHAR(255);
+--- Show the new improved table on the master ---
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` mediumint(9) NOT NULL,
+ `b1` bit(8) DEFAULT NULL,
+ `vc` varchar(255) DEFAULT NULL,
+ `bc` char(63) DEFAULT NULL,
+ `d` decimal(10,4) DEFAULT '0.0000',
+ `f` float DEFAULT '0',
+ `total` bigint(20) unsigned DEFAULT NULL,
+ `y` year(4) DEFAULT NULL,
+ `t` date DEFAULT NULL
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY LIST (id) (PARTITION p0 VALUES IN (2,4) ENGINE = ndbcluster, PARTITION p1 VALUES IN (42,142) ENGINE = ndbcluster, PARTITION p2 VALUES IN (412) ENGINE = ndbcluster)
+--- Make sure that our tables on slave are still same engine ---
+--- and that the alter statements replicated correctly ---
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` mediumint(9) NOT NULL,
+ `b1` bit(8) DEFAULT NULL,
+ `vc` varchar(255) DEFAULT NULL,
+ `bc` char(63) DEFAULT NULL,
+ `d` decimal(10,4) DEFAULT '0.0000',
+ `f` float DEFAULT '0',
+ `total` bigint(20) unsigned DEFAULT NULL,
+ `y` year(4) DEFAULT NULL,
+ `t` date DEFAULT NULL
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY LIST (id) (PARTITION p0 VALUES IN (2,4) ENGINE = ndbcluster, PARTITION p1 VALUES IN (42,142) ENGINE = ndbcluster, PARTITION p2 VALUES IN (412) ENGINE = ndbcluster)
+--- Perform basic operation on master ---
+--- and ensure replicated correctly ---
+"--- Insert into t1 --" as "";
+--- Select from t1 on master ---
+select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id;
+id hex(b1) vc bc d f total y t
+2 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1965-11-14
+4 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1985-11-14
+42 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1905-11-14
+142 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1995-11-14
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2005-11-14
+--- Select from t1 on slave ---
+select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id;
+id hex(b1) vc bc d f total y t
+2 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1965-11-14
+4 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1985-11-14
+42 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1905-11-14
+142 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1995-11-14
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2005-11-14
+--- Update t1 on master --
+UPDATE t1 SET b1 = 0, t="2006-02-22" WHERE id = 412;
+--- Check the update on master ---
+SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412;
+id hex(b1) vc bc d f total y t
+412 0 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2006-02-22
+--- Check Update on slave ---
+SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412;
+id hex(b1) vc bc d f total y t
+412 0 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2006-02-22
+--- Remove a record from t1 on master ---
+DELETE FROM t1 WHERE id = 42;
+--- Show current count on master for t1 ---
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+4
+--- Show current count on slave for t1 ---
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+4
+DELETE FROM t1;
+--- End test 3 partition LIST testing ---
+--- Do Cleanup --
+DROP TABLE IF EXISTS t1;
+--- Start test 4 partition HASH testing ---
+--- Do setup ---
+CREATE TABLE t1 (id MEDIUMINT NOT NULL, b1 BIT(8), vc VARCHAR(63),
+bc CHAR(63), d DECIMAL(10,4) DEFAULT 0,
+f FLOAT DEFAULT 0, total BIGINT UNSIGNED,
+y YEAR, t DATE)
+TABLESPACE ts1 STORAGE DISK
+ENGINE=NDB
+PARTITION BY HASH( YEAR(t) )
+PARTITIONS 4;
+--- show that tables have been created correctly ---
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` mediumint(9) NOT NULL,
+ `b1` bit(8) DEFAULT NULL,
+ `vc` varchar(63) DEFAULT NULL,
+ `bc` char(63) DEFAULT NULL,
+ `d` decimal(10,4) DEFAULT '0.0000',
+ `f` float DEFAULT '0',
+ `total` bigint(20) unsigned DEFAULT NULL,
+ `y` year(4) DEFAULT NULL,
+ `t` date DEFAULT NULL
+) TABLESPACE ts1 STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY HASH ( YEAR(t)) PARTITIONS 4
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` mediumint(9) NOT NULL,
+ `b1` bit(8) DEFAULT NULL,
+ `vc` varchar(63) DEFAULT NULL,
+ `bc` char(63) DEFAULT NULL,
+ `d` decimal(10,4) DEFAULT '0.0000',
+ `f` float DEFAULT '0',
+ `total` bigint(20) unsigned DEFAULT NULL,
+ `y` year(4) DEFAULT NULL,
+ `t` date DEFAULT NULL
+) TABLESPACE ts1 STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY HASH ( YEAR(t)) PARTITIONS 4
+--- Perform basic operation on master ---
+--- and ensure replicated correctly ---
+"--- Insert into t1 --" as "";
+--- Select from t1 on master ---
+select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id;
+id hex(b1) vc bc d f total y t
+2 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1965-11-14
+4 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1985-11-14
+42 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1905-11-14
+142 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1995-11-14
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2005-11-14
+--- Select from t1 on slave ---
+select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id;
+id hex(b1) vc bc d f total y t
+2 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1965-11-14
+4 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1985-11-14
+42 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1905-11-14
+142 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1995-11-14
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2005-11-14
+--- Update t1 on master --
+UPDATE t1 SET b1 = 0, t="2006-02-22" WHERE id = 412;
+--- Check the update on master ---
+SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412;
+id hex(b1) vc bc d f total y t
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2006-02-22
+--- Check Update on slave ---
+SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412;
+id hex(b1) vc bc d f total y t
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2006-02-22
+--- Remove a record from t1 on master ---
+DELETE FROM t1 WHERE id = 42;
+--- Show current count on master for t1 ---
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+4
+--- Show current count on slave for t1 ---
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+4
+DELETE FROM t1;
+--- Check that simple Alter statements are replicated correctly ---
+ALTER TABLE t1 MODIFY vc VARCHAR(255);
+--- Show the new improved table on the master ---
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` mediumint(9) NOT NULL,
+ `b1` bit(8) DEFAULT NULL,
+ `vc` varchar(255) DEFAULT NULL,
+ `bc` char(63) DEFAULT NULL,
+ `d` decimal(10,4) DEFAULT '0.0000',
+ `f` float DEFAULT '0',
+ `total` bigint(20) unsigned DEFAULT NULL,
+ `y` year(4) DEFAULT NULL,
+ `t` date DEFAULT NULL
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY HASH ( YEAR(t)) PARTITIONS 4
+--- Make sure that our tables on slave are still same engine ---
+--- and that the alter statements replicated correctly ---
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` mediumint(9) NOT NULL,
+ `b1` bit(8) DEFAULT NULL,
+ `vc` varchar(255) DEFAULT NULL,
+ `bc` char(63) DEFAULT NULL,
+ `d` decimal(10,4) DEFAULT '0.0000',
+ `f` float DEFAULT '0',
+ `total` bigint(20) unsigned DEFAULT NULL,
+ `y` year(4) DEFAULT NULL,
+ `t` date DEFAULT NULL
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY HASH ( YEAR(t)) PARTITIONS 4
+--- Perform basic operation on master ---
+--- and ensure replicated correctly ---
+"--- Insert into t1 --" as "";
+--- Select from t1 on master ---
+select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id;
+id hex(b1) vc bc d f total y t
+2 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1965-11-14
+4 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1985-11-14
+42 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1905-11-14
+142 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1995-11-14
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2005-11-14
+--- Select from t1 on slave ---
+select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id;
+id hex(b1) vc bc d f total y t
+2 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1965-11-14
+4 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1985-11-14
+42 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1905-11-14
+142 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1995-11-14
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2005-11-14
+--- Update t1 on master --
+UPDATE t1 SET b1 = 0, t="2006-02-22" WHERE id = 412;
+--- Check the update on master ---
+SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412;
+id hex(b1) vc bc d f total y t
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2006-02-22
+--- Check Update on slave ---
+SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412;
+id hex(b1) vc bc d f total y t
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2006-02-22
+--- Remove a record from t1 on master ---
+DELETE FROM t1 WHERE id = 42;
+--- Show current count on master for t1 ---
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+4
+--- Show current count on slave for t1 ---
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+4
+DELETE FROM t1;
+--- End test 4 partition HASH testing ---
+--- Do Cleanup --
+DROP TABLE IF EXISTS t1;
+--- Start test 5 partition by key testing ---
+--- Create Table Section ---
+CREATE TABLE t1 (id MEDIUMINT NOT NULL, b1 BIT(8), vc VARCHAR(63),
+bc CHAR(63), d DECIMAL(10,4) DEFAULT 0,
+f FLOAT DEFAULT 0, total BIGINT UNSIGNED,
+y YEAR, t DATE,PRIMARY KEY(id))
+TABLESPACE ts1 STORAGE DISK
+ENGINE=NDB
+PARTITION BY KEY()
+PARTITIONS 4;
+--- Show that tables on master are ndbcluster tables ---
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` mediumint(9) NOT NULL,
+ `b1` bit(8) DEFAULT NULL,
+ `vc` varchar(63) DEFAULT NULL,
+ `bc` char(63) DEFAULT NULL,
+ `d` decimal(10,4) DEFAULT '0.0000',
+ `f` float DEFAULT '0',
+ `total` bigint(20) unsigned DEFAULT NULL,
+ `y` year(4) DEFAULT NULL,
+ `t` date DEFAULT NULL,
+ PRIMARY KEY (`id`)
+) TABLESPACE ts1 STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY KEY () PARTITIONS 4
+--- Show that tables on slave ---
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` mediumint(9) NOT NULL,
+ `b1` bit(8) DEFAULT NULL,
+ `vc` varchar(63) DEFAULT NULL,
+ `bc` char(63) DEFAULT NULL,
+ `d` decimal(10,4) DEFAULT '0.0000',
+ `f` float DEFAULT '0',
+ `total` bigint(20) unsigned DEFAULT NULL,
+ `y` year(4) DEFAULT NULL,
+ `t` date DEFAULT NULL,
+ PRIMARY KEY (`id`)
+) TABLESPACE ts1 STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY KEY () PARTITIONS 4
+--- Perform basic operation on master ---
+--- and ensure replicated correctly ---
+"--- Insert into t1 --" as "";
+--- Select from t1 on master ---
+select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id;
+id hex(b1) vc bc d f total y t
+2 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1965-11-14
+4 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1985-11-14
+42 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1905-11-14
+142 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1995-11-14
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2005-11-14
+--- Select from t1 on slave ---
+select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id;
+id hex(b1) vc bc d f total y t
+2 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1965-11-14
+4 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1985-11-14
+42 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1905-11-14
+142 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1995-11-14
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2005-11-14
+--- Update t1 on master --
+UPDATE t1 SET b1 = 0, t="2006-02-22" WHERE id = 412;
+--- Check the update on master ---
+SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412;
+id hex(b1) vc bc d f total y t
+412 0 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2006-02-22
+--- Check Update on slave ---
+SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412;
+id hex(b1) vc bc d f total y t
+412 0 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2006-02-22
+--- Remove a record from t1 on master ---
+DELETE FROM t1 WHERE id = 42;
+--- Show current count on master for t1 ---
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+4
+--- Show current count on slave for t1 ---
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+4
+DELETE FROM t1;
+--- Check that simple Alter statements are replicated correctly ---
+ALTER TABLE t1 DROP PRIMARY KEY, ADD PRIMARY KEY(id, total);
+--- Show the new improved table on the master ---
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` mediumint(9) NOT NULL,
+ `b1` bit(8) DEFAULT NULL,
+ `vc` varchar(63) DEFAULT NULL,
+ `bc` char(63) DEFAULT NULL,
+ `d` decimal(10,4) DEFAULT '0.0000',
+ `f` float DEFAULT '0',
+ `total` bigint(20) unsigned NOT NULL DEFAULT '0',
+ `y` year(4) DEFAULT NULL,
+ `t` date DEFAULT NULL,
+ PRIMARY KEY (`id`,`total`)
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY KEY () PARTITIONS 4
+--- Make sure that our tables on slave are still right type ---
+--- and that the alter statements replicated correctly ---
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` mediumint(9) NOT NULL,
+ `b1` bit(8) DEFAULT NULL,
+ `vc` varchar(63) DEFAULT NULL,
+ `bc` char(63) DEFAULT NULL,
+ `d` decimal(10,4) DEFAULT '0.0000',
+ `f` float DEFAULT '0',
+ `total` bigint(20) unsigned NOT NULL DEFAULT '0',
+ `y` year(4) DEFAULT NULL,
+ `t` date DEFAULT NULL,
+ PRIMARY KEY (`id`,`total`)
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY KEY () PARTITIONS 4
+--- Perform basic operation on master ---
+--- and ensure replicated correctly ---
+"--- Insert into t1 --" as "";
+--- Select from t1 on master ---
+select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id;
+id hex(b1) vc bc d f total y t
+2 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1965-11-14
+4 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1985-11-14
+42 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1905-11-14
+142 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1995-11-14
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2005-11-14
+--- Select from t1 on slave ---
+select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id;
+id hex(b1) vc bc d f total y t
+2 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1965-11-14
+4 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1985-11-14
+42 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1905-11-14
+142 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1995-11-14
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2005-11-14
+--- Update t1 on master --
+UPDATE t1 SET b1 = 0, t="2006-02-22" WHERE id = 412;
+--- Check the update on master ---
+SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412;
+id hex(b1) vc bc d f total y t
+412 0 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2006-02-22
+--- Check Update on slave ---
+SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412;
+id hex(b1) vc bc d f total y t
+412 0 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2006-02-22
+--- Remove a record from t1 on master ---
+DELETE FROM t1 WHERE id = 42;
+--- Show current count on master for t1 ---
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+4
+--- Show current count on slave for t1 ---
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+4
+DELETE FROM t1;
+--- Check that simple Alter statements are replicated correctly ---
+ALTER TABLE t1 MODIFY vc VARCHAR(255);
+--- Show the new improved table on the master ---
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` mediumint(9) NOT NULL,
+ `b1` bit(8) DEFAULT NULL,
+ `vc` varchar(255) DEFAULT NULL,
+ `bc` char(63) DEFAULT NULL,
+ `d` decimal(10,4) DEFAULT '0.0000',
+ `f` float DEFAULT '0',
+ `total` bigint(20) unsigned NOT NULL DEFAULT '0',
+ `y` year(4) DEFAULT NULL,
+ `t` date DEFAULT NULL,
+ PRIMARY KEY (`id`,`total`)
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY KEY () PARTITIONS 4
+--- Make sure that our tables on slave are still same engine ---
+--- and that the alter statements replicated correctly ---
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` mediumint(9) NOT NULL,
+ `b1` bit(8) DEFAULT NULL,
+ `vc` varchar(255) DEFAULT NULL,
+ `bc` char(63) DEFAULT NULL,
+ `d` decimal(10,4) DEFAULT '0.0000',
+ `f` float DEFAULT '0',
+ `total` bigint(20) unsigned NOT NULL DEFAULT '0',
+ `y` year(4) DEFAULT NULL,
+ `t` date DEFAULT NULL,
+ PRIMARY KEY (`id`,`total`)
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY KEY () PARTITIONS 4
+--- Perform basic operation on master ---
+--- and ensure replicated correctly ---
+"--- Insert into t1 --" as "";
+--- Select from t1 on master ---
+select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id;
+id hex(b1) vc bc d f total y t
+2 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1965-11-14
+4 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1985-11-14
+42 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1905-11-14
+142 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1995-11-14
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2005-11-14
+--- Select from t1 on slave ---
+select id,hex(b1),vc,bc,d,f,total,y,t from t1 order by id;
+id hex(b1) vc bc d f total y t
+2 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1965-11-14
+4 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1985-11-14
+42 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1905-11-14
+142 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 1995-11-14
+412 1 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2005-11-14
+--- Update t1 on master --
+UPDATE t1 SET b1 = 0, t="2006-02-22" WHERE id = 412;
+--- Check the update on master ---
+SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412;
+id hex(b1) vc bc d f total y t
+412 0 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2006-02-22
+--- Check Update on slave ---
+SELECT id,hex(b1),vc,bc,d,f,total,y,t FROM t1 WHERE id = 412;
+id hex(b1) vc bc d f total y t
+412 0 Testing MySQL databases is a cool Must make it bug free for the customer 654321.4321 15.21 0 1965 2006-02-22
+--- Remove a record from t1 on master ---
+DELETE FROM t1 WHERE id = 42;
+--- Show current count on master for t1 ---
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+4
+--- Show current count on slave for t1 ---
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+4
+DELETE FROM t1;
+--- End test 5 key partition testing ---
+--- Do Cleanup ---
+DROP TABLE IF EXISTS t1;
+alter tablespace ts1
+drop datafile 'datafile.dat'
+engine=ndb;
+alter tablespace ts1
+drop datafile 'datafile02.dat'
+engine=ndb;
+DROP TABLESPACE ts1 ENGINE=NDB;
+DROP LOGFILE GROUP lg1 ENGINE=NDB;
diff --git a/mysql-test/r/show_check.result b/mysql-test/r/show_check.result
index bb6c2c3beee..7237cf11fc0 100644
--- a/mysql-test/r/show_check.result
+++ b/mysql-test/r/show_check.result
@@ -623,4 +623,10 @@ delete from mysql.db where user='mysqltest_4';
delete from mysql.tables_priv where user='mysqltest_4';
flush privileges;
drop database mysqltest;
+show full plugin;
+show warnings;
+Level Code Message
+Warning 1541 The syntax 'SHOW PLUGIN' is deprecated and will be removed in MySQL 5.2. Please use 'SHOW PLUGINS' instead.
+show plugin;
+show plugins;
End of 5.1 tests
diff --git a/mysql-test/r/view_grant.result b/mysql-test/r/view_grant.result
index ad6028b8a8c..5f91127d284 100644
--- a/mysql-test/r/view_grant.result
+++ b/mysql-test/r/view_grant.result
@@ -649,3 +649,15 @@ DROP VIEW mysqltest_db1.view1;
DROP TABLE mysqltest_db1.t1;
DROP SCHEMA mysqltest_db1;
DROP USER mysqltest_db1@localhost;
+CREATE DATABASE test1;
+CREATE DATABASE test2;
+CREATE TABLE test1.t0 (a VARCHAR(20));
+CREATE TABLE test2.t1 (a VARCHAR(20));
+CREATE VIEW test2.t3 AS SELECT * FROM test1.t0;
+CREATE OR REPLACE VIEW test.v1 AS
+SELECT ta.a AS col1, tb.a AS col2 FROM test2.t3 ta, test2.t1 tb;
+DROP VIEW test.v1;
+DROP VIEW test2.t3;
+DROP TABLE test2.t1, test1.t0;
+DROP DATABASE test2;
+DROP DATABASE test1;
diff --git a/mysql-test/t/events_stress.test b/mysql-test/t/events_stress.test
index a3a683a0a1a..6546bce3a76 100644
--- a/mysql-test/t/events_stress.test
+++ b/mysql-test/t/events_stress.test
@@ -1,5 +1,6 @@
# Can't test with embedded server that doesn't support grants
--- source include/not_embedded.inc
+--source include/not_embedded.inc
+--source include/big_test.inc
CREATE DATABASE IF NOT EXISTS events_test;
#
diff --git a/mysql-test/t/insert_select.test b/mysql-test/t/insert_select.test
index 0b9a0e86ba9..05953a1fd49 100644
--- a/mysql-test/t/insert_select.test
+++ b/mysql-test/t/insert_select.test
@@ -238,3 +238,12 @@ INSERT INTO t1 SELECT a + 2 FROM t1 LIMIT 1;
DROP TABLE t1;
# End of 4.1 tests
+
+#
+# Bug #18080: INSERT ... SELECT ... JOIN results in ambiguous field list error
+#
+CREATE TABLE t1 (x int, y int);
+CREATE TABLE t2 (z int, y int);
+CREATE TABLE t3 (a int, b int);
+INSERT INTO t3 (SELECT x, y FROM t1 JOIN t2 USING (y) WHERE z = 1);
+DROP TABLE IF EXISTS t1,t2,t3;
diff --git a/mysql-test/t/key.test b/mysql-test/t/key.test
index f95cee8e8eb..34a66febcac 100644
--- a/mysql-test/t/key.test
+++ b/mysql-test/t/key.test
@@ -326,6 +326,17 @@ alter table t1 add key (c1,c1,c2);
drop table t1;
#
+# Bug#11228: DESC shows arbitrary column as "PRI"
+#
+create table t1 (
+ i1 INT NOT NULL,
+ i2 INT NOT NULL,
+ UNIQUE i1idx (i1),
+ UNIQUE i2idx (i2));
+desc t1;
+drop table t1;
+
+#
# Bug#12565 - ERROR 1034 when running simple UPDATE or DELETE
# on large MyISAM table
#
diff --git a/mysql-test/t/log_state.test b/mysql-test/t/log_state.test
index d3dec841dc1..41fbd068dce 100644
--- a/mysql-test/t/log_state.test
+++ b/mysql-test/t/log_state.test
@@ -11,11 +11,11 @@ Variable_name = 'general_log' or Variable_name = 'slow_query_log';
flush logs;
set global general_log= ON;
create table t1(f1 int);
---replace_column 1 TIMESTAMP 3 #
+--replace_column 1 TIMESTAMP 2 USER_HOST 3 #
select * from mysql.general_log;
set global general_log= OFF;
drop table t1;
---replace_column 1 TIMESTAMP 3 #
+--replace_column 1 TIMESTAMP 2 USER_HOST 3 #
select * from mysql.general_log;
set global general_log= ON;
flush logs;
@@ -27,7 +27,7 @@ connect (con1,localhost,root,,);
connection con1;
set session long_query_time=1;
select sleep(2);
---replace_column 1 TIMESTAMP, 3 USER_HOST, 4 QUERY_TIME
+--replace_column 1 TIMESTAMP 2 USER_HOST 3 QUERY_TIME
select * from mysql.slow_log;
connection default;
@@ -35,7 +35,7 @@ set global slow_query_log= ON;
connection con1;
set session long_query_time=1;
select sleep(2);
---replace_column 1 TIMESTAMP, 3 USER_HOST, 4 QUERY_TIME
+--replace_column 1 TIMESTAMP 2 USER_HOST 3 QUERY_TIME
select * from mysql.slow_log;
disconnect con1;
connection default;
@@ -54,11 +54,11 @@ set global general_log= ON;
truncate table mysql.general_log;
create table t1(f1 int);
drop table t1;
---replace_column 1 TIMESTAMP 3 #
+--replace_column 1 TIMESTAMP 2 USER_HOST 3 #
select * from mysql.general_log;
set global general_log= OFF;
truncate table mysql.general_log;
---replace_column 1 TIMESTAMP 3 #
+--replace_column 1 TIMESTAMP 2 USER_HOST 3 #
select * from mysql.general_log;
set global general_log= ON;
show global variables
@@ -109,14 +109,14 @@ truncate table mysql.general_log;
show variables like 'log_output';
set global general_log=ON;
create table t1(f1 int);
---replace_column 1 TIMESTAMP 3 #
+--replace_column 1 TIMESTAMP 2 USER_HOST 3 #
select * from mysql.general_log;
set global general_log=OFF;
set global log_output="FILE,TABLE";
show variables like 'log_output';
set global general_log=ON;
drop table t1;
---replace_column 1 TIMESTAMP 3 #
+--replace_column 1 TIMESTAMP 2 USER_HOST 3 #
select * from mysql.general_log;
--enable_ps_protocol
diff --git a/mysql-test/t/ndb_blob_partition.test b/mysql-test/t/ndb_blob_partition.test
new file mode 100644
index 00000000000..a3948cc9491
--- /dev/null
+++ b/mysql-test/t/ndb_blob_partition.test
@@ -0,0 +1,93 @@
+--source include/have_ndb.inc
+-- source include/not_embedded.inc
+
+--disable_warnings
+drop table if exists t1;
+--enable_warnings
+
+#
+# Minimal NDB blobs test with range partitions.
+#
+
+create table t1 (
+ a mediumint not null,
+ b text not null,
+ c int not null,
+ d longblob,
+ primary key using hash (a,c),
+ unique key (c)
+)
+ engine=ndb
+ partition by range (c)
+ partitions 3
+ ( partition p1 values less than (200),
+ partition p2 values less than (300),
+ partition p3 values less than (400));
+
+--disable_query_log
+sleep 1;
+
+# length 61
+set @s0 = 'rggurloniukyehuxdbfkkyzlceixzrehqhvxvxbpwizzvjzpucqmzrhzxzfau';
+set @s1 = 'ykyymbzqgqlcjhlhmyqelfoaaohvtbekvifukdtnvcrrjveevfakxarxexomz';
+set @s2 = 'dbnfqyzgtqxalcrwtfsqabknvtfcbpoonxsjiqvmhnfikxxhcgoexlkoezvah';
+
+set @v1 = repeat(@s0, 100); -- 1d42dd9090cf78314a06665d4ea938c35cc760f4
+set @v2 = repeat(@s1, 200); -- 10d3c783026b310218d10b7188da96a2401648c6
+set @v3 = repeat(@s2, 300); -- a33549d9844092289a58ac348dd59f09fc28406a
+set @v4 = repeat(@s0, 400); -- daa61c6de36a0526f0d47dc29d6b9de7e6d2630c
+set @v5 = repeat(@s1, 500); -- 70fc9a7d08beebc522258bfb02000a30c77a8f1d
+set @v6 = repeat(@s2, 600); -- 090565c580809efed3d369481a4bbb168b20713e
+set @v7 = repeat(@s0, 700); -- 1e0070bec426871a46291de27b9bd6e4255ab4e5
+set @v8 = repeat(@s1, 800); -- acbaba01bc2e682f015f40e79d9cbe475db3002e
+set @v9 = repeat(@s2, 900); -- 9ee30d99162574f79c66ae95cdf132dcf9cbc259
+--enable_query_log
+
+# -- insert --
+insert into t1 values (1, @v1, 101, @v2);
+insert into t1 values (1, @v2, 102, @v3);
+insert into t1 values (1, @v3, 103, @v4);
+insert into t1 values (2, @v4, 201, @v5);
+insert into t1 values (2, @v5, 202, @v6);
+insert into t1 values (2, @v6, 203, @v7);
+insert into t1 values (3, @v7, 301, @v8);
+insert into t1 values (3, @v8, 302, @v9);
+insert into t1 values (3, @v9, 303, @v1);
+select a, sha1(b), c, sha1(d) from t1 order by a;
+
+# -- pk read --
+select a, sha1(b), c, sha1(d) from t1 where a = 1 and c = 101;
+select a, sha1(b), c, sha1(d) from t1 where a = 2 and c = 201;
+select a, sha1(b), c, sha1(d) from t1 where a = 3 and c = 301;
+
+# -- pk update --
+update t1 set b = @v3, d = @v4 where a = 1 and c = 102;
+update t1 set b = @v6, d = @v7 where a = 2 and c = 202;
+update t1 set b = @v9, d = @v1 where a = 3 and c = 302;
+select a, sha1(b), c, sha1(d) from t1 order by a;
+
+# -- hash index update --
+update t1 set b = @v4, d = @v5 where c = 103;
+update t1 set b = @v7, d = @v8 where c = 203;
+update t1 set b = @v1, d = @v2 where c = 303;
+select a, sha1(b), c, sha1(d) from t1 order by a;
+
+# -- full scan update --
+update t1 set b = @v5, d = @v6;
+select a, sha1(b), c, sha1(d) from t1 order by a;
+
+# -- range scan update
+update t1 set b = @v1, d = @v2 where 100 < c and c < 200;
+update t1 set b = @v4, d = @v5 where 200 < c and c < 300;
+update t1 set b = @v7, d = @v8 where 300 < c and c < 400;
+select a, sha1(b), c, sha1(d) from t1 order by a;
+
+# -- delete --
+delete from t1 where a = 1 and c = 101;
+delete from t1 where c = 102;
+# delete from t1 where c < 300; # XXX coredump
+delete from t1;
+select a, sha1(b), c, sha1(d) from t1 order by a;
+
+# -- clean up --
+drop table t1;
diff --git a/mysql-test/t/ndb_dd_backuprestore.test b/mysql-test/t/ndb_dd_backuprestore.test
index 1508cccb46d..be6d73e27b4 100644
--- a/mysql-test/t/ndb_dd_backuprestore.test
+++ b/mysql-test/t/ndb_dd_backuprestore.test
@@ -159,15 +159,185 @@ DROP TABLE test.t1;
DROP TABLE test.t2;
DROP TABLE test.t3;
DROP TABLE test.t4;
+###################### Adding partition #################################
+-- echo **** Test 3 Adding partition Test backup and restore ****
+
+CREATE TABLESPACE table_space2
+ADD DATAFILE './table_space2/datafile.dat'
+USE LOGFILE GROUP log_group1
+INITIAL_SIZE 12M
+ENGINE NDB;
+
+CREATE TABLE test.t1 (pk1 MEDIUMINT NOT NULL AUTO_INCREMENT, c2 VARCHAR(150) NOT NULL, c3 INT NOT NULL, c4 BIT NOT NULL, PRIMARY KEY(pk1,c3))TABLESPACE table_space1 STORAGE DISK ENGINE=NDB PARTITION BY HASH(c3) PARTITIONS 4;
+
+CREATE TABLE test.t4 (pk1 MEDIUMINT NOT NULL AUTO_INCREMENT, c2 VARCHAR(180) NOT NULL, c3 INT NOT NULL, c4 BIT NOT NULL, PRIMARY KEY(pk1,c3))ENGINE=NDB PARTITION BY HASH(c3) PARTITIONS 2;
+
+CREATE TABLE test.t2 (pk1 MEDIUMINT NOT NULL AUTO_INCREMENT, c2 TEXT NOT NULL, c3 INT NOT NULL, c4 BIT NOT NULL, PRIMARY KEY(pk1,c3))TABLESPACE table_space2 STORAGE DISK ENGINE=NDB PARTITION BY KEY(c3) (PARTITION p0 ENGINE = NDB, PARTITION p1 ENGINE = NDB);
+
+CREATE TABLE test.t5 (pk1 MEDIUMINT NOT NULL AUTO_INCREMENT, c2 TEXT NOT NULL, c3 INT NOT NULL, c4 BIT NOT NULL, PRIMARY KEY(pk1,c3))ENGINE=NDB PARTITION BY KEY(pk1) (PARTITION p0 ENGINE = NDB, PARTITION p1 ENGINE = NDB);
+
+CREATE TABLE test.t3 (pk1 MEDIUMINT NOT NULL AUTO_INCREMENT, c2 VARCHAR(202) NOT NULL, c3 INT NOT NULL, c4 BIT NOT NULL, PRIMARY KEY(pk1,c3))TABLESPACE table_space2 STORAGE DISK ENGINE=NDB PARTITION BY RANGE (c3) PARTITIONS 3 (PARTITION x1 VALUES LESS THAN (105), PARTITION x2 VALUES LESS THAN (333), PARTITION x3 VALUES LESS THAN (720));
+
+CREATE TABLE test.t6 (pk1 MEDIUMINT NOT NULL AUTO_INCREMENT, c2 VARCHAR(220) NOT NULL, c3 INT NOT NULL, c4 BIT NOT NULL, PRIMARY KEY(pk1,c3))ENGINE=NDB PARTITION BY RANGE (pk1) PARTITIONS 2 (PARTITION x1 VALUES LESS THAN (333), PARTITION x2 VALUES LESS THAN (720));
+
+SHOW CREATE TABLE test.t1;
+
+SHOW CREATE TABLE test.t2;
+
+SHOW CREATE TABLE test.t3;
+
+SHOW CREATE TABLE test.t4;
+
+SHOW CREATE TABLE test.t5;
+
+SHOW CREATE TABLE test.t6;
+
+SELECT * FROM information_schema.partitions WHERE table_name= 't1';
+
+SELECT * FROM information_schema.partitions WHERE table_name= 't2';
+
+SELECT * FROM information_schema.partitions WHERE table_name= 't3';
+
+SELECT * FROM information_schema.partitions WHERE table_name= 't4';
+
+SELECT * FROM information_schema.partitions WHERE table_name= 't5';
+
+SELECT * FROM information_schema.partitions WHERE table_name= 't6';
+
+
+let $j= 500;
+--disable_query_log
+while ($j)
+{
+ eval INSERT INTO test.t1 VALUES (NULL, "Sweden, Texas", $j, b'0');
+ eval INSERT INTO test.t4 VALUES (NULL, "Sweden, Texas", $j, b'0');
+ dec $j;
+ eval INSERT INTO test.t2 VALUES (NULL, "Sweden, Texas, ITALY, Kyle, JO, JBM,TU", $j, b'1');
+ eval INSERT INTO test.t5 VALUES (NULL, "Sweden, Texas, ITALY, Kyle, JO, JBM,TU", $j, b'1');
+ dec $j;
+ eval INSERT INTO test.t3 VALUES (NULL, "TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU", $j, b'1');
+ eval INSERT INTO test.t6 VALUES (NULL, "TEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXASTEXAS, ITALY, Kyle, JO, JBM,TU", $j, b'1'); } --enable_query_log
+
+SELECT COUNT(*) FROM test.t1;
+
+SELECT pk1, c2, c3, hex(c4) FROM test.t1 ORDER BY c3 LIMIT 5;
+
+SELECT COUNT(*) FROM test.t2;
+
+SELECT pk1, c2, c3, hex(c4) FROM test.t2 ORDER BY c3 LIMIT 5;
+
+SELECT COUNT(*) FROM test.t3;
+
+SELECT pk1, c2, c3, hex(c4) FROM test.t3 ORDER BY c3 LIMIT 5;
+
+SELECT COUNT(*) FROM test.t4;
+
+SELECT pk1, c2, c3, hex(c4) FROM test.t4 ORDER BY c3 LIMIT 5;
+
+SELECT COUNT(*) FROM test.t5;
+
+SELECT pk1, c2, c3, hex(c4) FROM test.t5 ORDER BY c3 LIMIT 5;
+
+SELECT COUNT(*) FROM test.t6;
+
+SELECT pk1, c2, c3, hex(c4) FROM test.t6 ORDER BY c3 LIMIT 5;
+
+-- source include/ndb_backup.inc
+
+DROP TABLE test.t1;
+DROP TABLE test.t2;
+DROP TABLE test.t3;
+DROP TABLE test.t4;
+DROP TABLE test.t5;
+DROP TABLE test.t6;
ALTER TABLESPACE table_space1
DROP DATAFILE './table_space1/datafile.dat'
ENGINE = NDB;
+ALTER TABLESPACE table_space2
+DROP DATAFILE './table_space2/datafile.dat'
+ENGINE = NDB;
+
DROP TABLESPACE table_space1
ENGINE = NDB;
+DROP TABLESPACE table_space2
+ENGINE = NDB;
+
DROP LOGFILE GROUP log_group1
ENGINE =NDB;
+-- source include/ndb_restore_master.inc
+
+
+SHOW CREATE TABLE test.t1;
+
+SHOW CREATE TABLE test.t2;
+
+SHOW CREATE TABLE test.t3;
+
+SHOW CREATE TABLE test.t4;
+
+SHOW CREATE TABLE test.t5;
+
+SHOW CREATE TABLE test.t6;
+
+SELECT * FROM information_schema.partitions WHERE table_name= 't1';
+
+SELECT * FROM information_schema.partitions WHERE table_name= 't2';
+
+SELECT * FROM information_schema.partitions WHERE table_name= 't3';
+
+SELECT * FROM information_schema.partitions WHERE table_name= 't4';
+
+SELECT * FROM information_schema.partitions WHERE table_name= 't5';
+
+SELECT * FROM information_schema.partitions WHERE table_name= 't6';
+
+SELECT COUNT(*) FROM test.t1;
+
+SELECT pk1, c2, c3, hex(c4) FROM test.t1 ORDER BY c3 LIMIT 5;
+
+SELECT COUNT(*) FROM test.t2;
+
+SELECT pk1, c2, c3, hex(c4) FROM test.t2 ORDER BY c3 LIMIT 5;
+
+SELECT COUNT(*) FROM test.t3;
+
+SELECT pk1, c2, c3, hex(c4) FROM test.t3 ORDER BY c3 LIMIT 5;
+
+SELECT COUNT(*) FROM test.t4;
+
+SELECT pk1, c2, c3, hex(c4) FROM test.t4 ORDER BY c3 LIMIT 5;
+
+SELECT COUNT(*) FROM test.t5;
+
+SELECT pk1, c2, c3, hex(c4) FROM test.t5 ORDER BY c3 LIMIT 5;
+
+SELECT COUNT(*) FROM test.t6;
+
+SELECT pk1, c2, c3, hex(c4) FROM test.t6 ORDER BY c3 LIMIT 5;
+
+# Cleanup
+
+DROP TABLE test.t1;
+DROP TABLE test.t2;
+DROP TABLE test.t3;
+DROP TABLE test.t4;
+DROP TABLE test.t5;
+DROP TABLE test.t6;
+
+ALTER TABLESPACE table_space1 DROP DATAFILE './table_space1/datafile.dat' ENGINE=NDB;
+
+ALTER TABLESPACE table_space2 DROP DATAFILE './table_space2/datafile.dat' ENGINE=NDB;
+
+DROP TABLESPACE table_space1 ENGINE = NDB;
+
+DROP TABLESPACE table_space2 ENGINE = NDB;
+
+DROP LOGFILE GROUP log_group1 ENGINE = NDB;
+
#End 5.1 test case
+
+
diff --git a/mysql-test/t/ndb_dd_ddl.test b/mysql-test/t/ndb_dd_ddl.test
index 339f7bc2f22..1a470d52c6c 100644
--- a/mysql-test/t/ndb_dd_ddl.test
+++ b/mysql-test/t/ndb_dd_ddl.test
@@ -40,7 +40,7 @@ CREATE DATABASE mysqltest;
CREATE LOGFILE GROUP lg1
ADD UNDOFILE 'undofile.dat'
-INITIAL_SIZE 16M
+INITIAL_SIZE 1M
UNDO_BUFFER_SIZE = 1M
ENGINE=NDB;
@@ -48,7 +48,7 @@ ENGINE=NDB;
--error ER_CREATE_FILEGROUP_FAILED
CREATE LOGFILE GROUP lg2
ADD UNDOFILE 'undofile2.dat'
-INITIAL_SIZE 16M
+INITIAL_SIZE 1M
UNDO_BUFFER_SIZE 1M
ENGINE NDB;
SHOW WARNINGS;
@@ -56,42 +56,42 @@ SHOW WARNINGS;
--error ER_CREATE_FILEGROUP_FAILED
CREATE LOGFILE GROUP lg1
ADD UNDOFILE 'undofile.dat'
-INITIAL_SIZE 16M
+INITIAL_SIZE 1M
UNDO_BUFFER_SIZE = 1M
ENGINE=NDB;
ALTER LOGFILE GROUP lg1
ADD UNDOFILE 'undofile02.dat'
-INITIAL_SIZE 4M ENGINE NDB;
+INITIAL_SIZE 1M ENGINE NDB;
--error ER_ALTER_FILEGROUP_FAILED
ALTER LOGFILE GROUP lg1
ADD UNDOFILE 'undofile02.dat'
-INITIAL_SIZE 4M ENGINE=NDB;
+INITIAL_SIZE 1M ENGINE=NDB;
CREATE TABLESPACE ts1
ADD DATAFILE 'datafile.dat'
USE LOGFILE GROUP lg1
-INITIAL_SIZE 12M
+INITIAL_SIZE 1M
ENGINE NDB;
--error ER_CREATE_FILEGROUP_FAILED
CREATE TABLESPACE ts1
ADD DATAFILE 'datafile.dat'
USE LOGFILE GROUP lg1
-INITIAL_SIZE 12M
+INITIAL_SIZE 1M
ENGINE NDB;
ALTER TABLESPACE ts1
ADD DATAFILE 'datafile2.dat'
-INITIAL_SIZE 12M
+INITIAL_SIZE 1M
ENGINE=NDB;
--error ER_ALTER_FILEGROUP_FAILED
ALTER TABLESPACE ts1
ADD DATAFILE 'datafile2.dat'
-INITIAL_SIZE 12M
+INITIAL_SIZE 1M
ENGINE=NDB;
CREATE TABLE mysqltest.t1
@@ -155,23 +155,23 @@ DROP DATABASE IF EXISTS mysqltest;
creaTE LOgfilE GrOuP lg1
adD undoFILE 'undofile.dat'
-initiAL_siZE 16M
+initiAL_siZE 1M
UnDo_BuFfEr_SiZe = 1M
ENGInE=NDb;
altER LOgFiLE GrOUp lg1
AdD UnDOfILe 'uNdOfiLe02.daT'
-INItIAl_SIzE 4M ENgINE nDB;
+INItIAl_SIzE 1M ENgINE nDB;
CrEAtE TABLEspaCE ts1
ADD DATAfilE 'datafile.dat'
UsE LoGFiLE GRoUP lg1
-INITiaL_SizE 12M
+INITiaL_SizE 1M
ENGiNe NDb;
AlTeR tAbLeSpAcE ts1
AdD DaTaFiLe 'dAtAfiLe2.daT'
-InItIaL_SiZe 12M
+InItIaL_SiZe 1M
EnGiNe=NDB;
CREATE TABLE t1
@@ -203,24 +203,24 @@ EnGiNe=nDb;
CREATE LOGFILE GROUP lg1
ADD UNDOFILE 'undofile.dat'
-INITIAL_SIZE=16M
+INITIAL_SIZE=1M
UNDO_BUFFER_SIZE=1M
ENGINE=NDB;
ALTER LOGFILE GROUP lg1
ADD UNDOFILE 'undofile02.dat'
-INITIAL_SIZE=4M
+INITIAL_SIZE=1M
ENGINE=NDB;
CREATE TABLESPACE ts1
ADD DATAFILE 'datafile.dat'
USE LOGFILE GROUP lg1
-INITIAL_SIZE=12M
+INITIAL_SIZE=1M
ENGINE=NDB;
ALTER TABLESPACE ts1
ADD DATAFILE 'datafile2.dat'
-INITIAL_SIZE=12M
+INITIAL_SIZE=1M
ENGINE=NDB;
CREATE TABLE t1
@@ -250,24 +250,24 @@ ENGINE=NDB;
CREATE LOGFILE GROUP lg1
ADD UNDOFILE 'undofile.dat'
-INITIAL_SIZE 16M
+INITIAL_SIZE 1M
UNDO_BUFFER_SIZE 1M
ENGINE NDB;
ALTER LOGFILE GROUP lg1
ADD UNDOFILE 'undofile02.dat'
-INITIAL_SIZE 4M
+INITIAL_SIZE 1M
ENGINE NDB;
CREATE TABLESPACE ts1
ADD DATAFILE 'datafile.dat'
USE LOGFILE GROUP lg1
-INITIAL_SIZE 12M
+INITIAL_SIZE 1M
ENGINE NDB;
ALTER TABLESPACE ts1
ADD DATAFILE 'datafile2.dat'
-INITIAL_SIZE 12M
+INITIAL_SIZE 1M
ENGINE NDB;
CREATE TABLE t1
@@ -280,6 +280,25 @@ CREATE INDEX bc_i on t1(b, c);
DROP TABLE t1;
+# bug#20053
+
+CREATE TABLESPACE ts2
+ADD DATAFILE 'datafile3.dat'
+USE LOGFILE GROUP lg1
+INITIAL_SIZE 1M
+ENGINE NDB;
+
+--error ER_ALTER_FILEGROUP_FAILED
+ALTER TABLESPACE ts1
+DROP DATAFILE 'datafile3.dat'
+ENGINE NDB;
+
+--error ER_ALTER_FILEGROUP_FAILED
+ALTER TABLESPACE ts2
+DROP DATAFILE 'datafile2.dat'
+ENGINE NDB;
+# bug#20053
+
ALTER TABLESPACE ts1
DROP DATAFILE 'datafile2.dat'
ENGINE NDB;
@@ -291,11 +310,45 @@ ENGINE NDB;
DROP TABLESPACE ts1
ENGINE NDB;
+ALTER TABLESPACE ts2
+DROP DATAFILE 'datafile3.dat'
+ENGINE NDB;
+
+DROP TABLESPACE ts2
+ENGINE NDB;
+
DROP LOGFILE GROUP lg1
ENGINE NDB;
--echo **** End = And No = ****
############ End = And No = ##################
-# End 5.1 test
+###
+#
+# bug#16341
+create table t1 (a int primary key) engine = myisam;
+
+--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
+--error ER_CREATE_FILEGROUP_FAILED
+--eval create logfile group lg1 add undofile '$MYSQLTEST_VARDIR/master-data/test/t1.frm' initial_size 1M undo_buffer_size = 1M engine=ndb;
+
+create logfile group lg1
+add undofile 'undofile.dat'
+initial_size 1M
+undo_buffer_size = 1M
+engine=ndb;
+--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
+--error ER_CREATE_FILEGROUP_FAILED
+--eval create tablespace ts1 add datafile '$MYSQLTEST_VARDIR/master-data/test/t1.frm' use logfile group lg1 initial_size 1M engine ndb;
+
+--error ER_DROP_FILEGROUP_FAILED
+drop tablespace ts1
+engine ndb;
+
+drop logfile group lg1
+engine ndb;
+
+drop table t1;
+
+# End 5.1 test
diff --git a/mysql-test/t/ndb_partition_error.test b/mysql-test/t/ndb_partition_error.test
new file mode 100644
index 00000000000..06581f1270f
--- /dev/null
+++ b/mysql-test/t/ndb_partition_error.test
@@ -0,0 +1,71 @@
+-- source include/have_ndb.inc
+#--disable_abort_on_error
+#
+# Simple test for the partition storage engine
+# Focuses on range partitioning tests
+#
+#-- source include/have_partition.inc
+
+--disable_warnings
+drop table if exists t1;
+--enable_warnings
+
+#
+# Partition by range, generate node group error
+#
+--error 1005
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b),
+index (a))
+engine = ndb
+partition by range (a)
+partitions 3
+(partition x1 values less than (5) nodegroup 12,
+ partition x2 values less than (10) nodegroup 13,
+ partition x3 values less than (20) nodegroup 14);
+show warnings;
+
+#
+# Partition by range, create normal valid table
+#
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a))
+engine = ndb
+partition by range (a)
+partitions 3
+(partition x1 values less than (5),
+ partition x2 values less than (10),
+ partition x3 values less than (20));
+
+drop table t1;
+
+#
+# Bug #17763 mysqld cores with list partitioning if update to missing partition
+#
+CREATE TABLE t1 (id INT) ENGINE=NDB
+ PARTITION BY LIST(id)
+ (PARTITION p0 VALUES IN (2, 4),
+ PARTITION p1 VALUES IN (42, 142));
+INSERT INTO t1 VALUES (2);
+--error ER_NO_PARTITION_FOR_GIVEN_VALUE
+UPDATE t1 SET id=5 WHERE id=2;
+DROP TABLE t1;
+
+#
+# NULL for LIST partition
+#
+create table t1 (a int,b int, c int)
+engine = ndb
+partition by list(a)
+partitions 2
+(partition x123 values in (11, 12),
+ partition x234 values in (5, 1));
+--error ER_NO_PARTITION_FOR_GIVEN_VALUE
+insert into t1 values (NULL,1,1);
+drop table t1;
diff --git a/mysql-test/t/ndb_partition_key.test b/mysql-test/t/ndb_partition_key.test
new file mode 100644
index 00000000000..fb0581eb6f6
--- /dev/null
+++ b/mysql-test/t/ndb_partition_key.test
@@ -0,0 +1,198 @@
+-- source include/have_ndb.inc
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+#
+# Basic syntax test
+#
+
+# Support for partition key verified
+CREATE TABLE t1 (a int, b int, c int, d int, PRIMARY KEY(a,b,c))
+ ENGINE = NDB
+ PARTITION BY KEY (a,b);
+
+insert into t1 values (1,1,1,1);
+select * from t1;
+update t1 set d = 2 where a = 1 and b = 1 and c = 1;
+select * from t1;
+delete from t1;
+select * from t1;
+
+drop table t1;
+
+# only support for partition key on primary key
+--error ER_UNIQUE_KEY_NEED_ALL_FIELDS_IN_PF
+CREATE TABLE t1 (a int, b int, c int, d int, PRIMARY KEY(a,b))
+ ENGINE = NDB
+ PARTITION BY KEY (c);
+
+CREATE TABLE t1 (a int, b int, c int, PRIMARY KEY(a,b))
+ ENGINE = NDB
+ PARTITION BY KEY (a);
+
+insert into t1 values
+ (1,1,3),(1,2,3),(1,3,3),(1,4,3),(1,5,3),(1,6,3),
+ (1,7,3),(1,8,3),(1,9,3),(1,10,3),(1,11,3),(1,12,3);
+
+select * from t1 order by b;
+
+DROP TABLE t1;
+
+#
+# Test partition and char support
+#
+
+CREATE TABLE t1 (a INT, b CHAR(10) COLLATE latin1_bin, c INT, d INT,
+ PRIMARY KEY (a,b,c) USING HASH)
+ ENGINE=NDB
+ DEFAULT CHARSET=latin1
+ PARTITION BY KEY (b);
+
+insert into t1 values (1,"a",1,1),(2,"a",1,1),(3,"a",1,1);
+
+# should show only one attribute with DISTRIBUTION KEY
+--exec $NDB_TOOLS_DIR/ndb_desc --no-defaults -d test t1 | sed 's/Version: [0-9]*//' | sed 's/\(Length of frm data: \)[0-9]*/\1#/'
+
+#
+# Test that explicit partition info is not shown in show create table
+# result should not contain (PARTITION P0 ... etc) since this is what shows up in
+# mysqldump, and we don't want that info there
+#
+show create table t1;
+
+DROP TABLE t1;
+
+#
+# Bug #13155: Problem in Create Table using SHOW CREATE TABLE syntax
+#
+CREATE TABLE t1 (a int not null primary key)
+PARTITION BY KEY(a)
+(PARTITION p0 ENGINE = NDB, PARTITION p1 ENGINE = NDB);
+
+drop table t1;
+
+CREATE TABLE t1 (a int not null primary key);
+ALTER TABLE t1
+PARTITION BY KEY(a)
+(PARTITION p0 ENGINE = NDB, PARTITION p1 ENGINE = NDB);
+
+drop table t1;
+
+#
+# Bug #17754 Improper handling of removal of partitioning in ALTER TABLE
+# Also added a number of general test cases in the same area
+#
+create table t1 (a int)
+engine=ndb
+partition by key(a)
+(partition p0, partition p1);
+show create table t1;
+
+alter table t1 engine=heap;
+show create table t1;
+
+alter table t1 engine=ndb;
+show create table t1;
+
+alter table t1 engine=heap remove partitioning;
+show create table t1;
+
+alter table t1 engine=ndb
+partition by key(a)
+(partition p0, partition p1 engine = ndb);
+show create table t1;
+
+alter table t1
+partition by key (a)
+(partition p0 engine=ndb, partition p1 engine=ndb);
+show create table t1;
+
+alter table t1 remove partitioning;
+show create table t1;
+
+--error ER_MIX_HANDLER_ERROR
+alter table t1
+partition by key(a)
+(partition p0 engine=ndb, partition p1);
+
+alter table t1
+engine=ndb
+partition by key(a)
+(partition p0 engine=ndb, partition p1 engine = ndb);
+show create table t1;
+
+drop table t1;
+
+#
+# BUG 16810 Out of memory when coalesce partition
+#
+CREATE TABLE t1 (
+ c1 MEDIUMINT NOT NULL AUTO_INCREMENT,
+ c2 TEXT NOT NULL,
+ c3 INT NOT NULL,
+ c4 BIT NOT NULL,
+ c5 FLOAT,
+ c6 VARCHAR(255),
+ c7 TIMESTAMP,
+ PRIMARY KEY(c1,c3))
+ ENGINE=NDB
+ PARTITION BY KEY(c3) PARTITIONS 5;
+
+let $j= 11;
+--disable_query_log
+while ($j)
+{
+ eval INSERT INTO t1 VALUES (NULL, "Tested Remotely from Texas, USA", $j,
+b'0',
+ $j.00,"By JBM $j","2006-01-26");
+ dec $j;
+}
+--enable_query_log
+ALTER TABLE t1 COALESCE PARTITION 4;
+
+DROP TABLE t1;
+
+#
+# Bug 16822: OPTIMIZE TABLE hangs test
+#
+CREATE TABLE t1 (a int primary key)
+ENGINE=NDB
+PARTITION BY KEY(a);
+--error 1031
+ALTER TABLE t1 OPTIMIZE PARTITION p0;
+--error 1031
+ALTER TABLE t1 CHECK PARTITION p0;
+--error 1031
+ALTER TABLE t1 REPAIR PARTITION p0;
+--error 1031
+ALTER TABLE t1 ANALYZE PARTITION p0;
+--error 1031
+ALTER TABLE t1 REBUILD PARTITION p0;
+DROP TABLE t1;
+
+#
+# BUG 16806: ALTER TABLE fails
+#
+CREATE TABLE t1 (
+ c1 MEDIUMINT NOT NULL AUTO_INCREMENT,
+ c2 TEXT NOT NULL,
+ c3 INT NOT NULL,
+ PRIMARY KEY(c1,c3))
+ ENGINE=NDB
+ PARTITION BY KEY(c3) PARTITIONS 5;
+
+ALTER TABLE t1 ADD COLUMN c4 INT AFTER c1;
+DROP TABLE t1;
+
+CREATE TABLE t1 (
+ c1 MEDIUMINT NOT NULL AUTO_INCREMENT,
+ c2 TEXT NOT NULL,
+ c3 INT NOT NULL,
+ PRIMARY KEY(c1,c3))
+ ENGINE=NDB
+ PARTITION BY KEY(c3);
+
+ALTER TABLE t1 ADD COLUMN c4 INT AFTER c1;
+DROP TABLE t1;
diff --git a/mysql-test/t/ndb_partition_list.test b/mysql-test/t/ndb_partition_list.test
new file mode 100644
index 00000000000..2ad37b8768c
--- /dev/null
+++ b/mysql-test/t/ndb_partition_list.test
@@ -0,0 +1,64 @@
+--source include/have_ndb.inc
+#
+# Simple test for the partition storage engine
+# Focuses on range partitioning tests
+#
+#-- source include/have_partition.inc
+
+--disable_warnings
+drop table if exists t1;
+--enable_warnings
+
+#
+# Partition by list, basic
+#
+
+CREATE TABLE t1 ( f_int1 INTEGER NOT NULL, f_int2 INTEGER NOT NULL,
+ f_char1 CHAR(10),
+ f_char2 CHAR(10), f_charbig VARCHAR(1000),
+PRIMARY KEY (f_int1,f_int2))
+ENGINE = NDB
+PARTITION BY LIST(MOD(f_int1 + f_int2,4))
+(PARTITION part_3 VALUES IN (-3),
+ PARTITION part_2 VALUES IN (-2),
+ PARTITION part_1 VALUES IN (-1),
+ PARTITION part0 VALUES IN (0),
+ PARTITION part1 VALUES IN (1),
+ PARTITION part2 VALUES IN (2),
+ PARTITION part3 VALUES IN (3,4,5));
+
+INSERT INTO t1 SET f_int1 = -2, f_int2 = 20, f_char1 = '20', f_char2 = '20', f_charbig = '===20===';
+INSERT INTO t1 SET f_int1 = 1, f_int2 = 1, f_char1 = '1', f_char2 = '1', f_charbig = '===1===';
+INSERT INTO t1 SET f_int1 = 2, f_int2 = 1, f_char1 = '1', f_char2 = '1', f_charbig = '===1===';
+INSERT INTO t1 SET f_int1 = 3, f_int2 = 1, f_char1 = '1', f_char2 = '1', f_charbig = '===1===';
+INSERT INTO t1 SET f_int1 = 4, f_int2 = 1, f_char1 = '1', f_char2 = '1', f_charbig = '===1===';
+INSERT INTO t1 SET f_int1 = 5, f_int2 = 1, f_char1 = '1', f_char2 = '1', f_charbig = '===1===';
+INSERT INTO t1 SET f_int1 = 20, f_int2 = 1, f_char1 = '1', f_char2 = '1', f_charbig = '===1===';
+
+SELECT * FROM t1 ORDER BY f_int1;
+
+DROP TABLE t1;
+
+#
+# Partition by list, no pk
+#
+
+CREATE TABLE t1 ( f_int1 INTEGER, f_int2 INTEGER, f_char1 CHAR(10),
+ f_char2 CHAR(10), f_charbig VARCHAR(1000))
+ENGINE = NDB
+PARTITION BY LIST(f_int1)
+(PARTITION part_1 VALUES IN (-1),
+ PARTITION part0 VALUES IN (0,1),
+ PARTITION part1 VALUES IN (2));
+
+INSERT INTO t1 SET f_int1 = -1, f_int2 = 20, f_char1 = '20', f_char2 = '20', f_charbig = '===20===';
+INSERT INTO t1 SET f_int1 = 0, f_int2 = 20, f_char1 = '20', f_char2 = '20', f_charbig = '===20===';
+INSERT INTO t1 SET f_int1 = 1, f_int2 = 1, f_char1 = '1', f_char2 = '1', f_charbig = '===1===';
+INSERT INTO t1 SET f_int1 = 2, f_int2 = 1, f_char1 = '1', f_char2 = '1', f_charbig = '===1===';
+--error ER_NO_PARTITION_FOR_GIVEN_VALUE
+INSERT INTO t1 SET f_int1 = 20, f_int2 = 1, f_char1 = '1', f_char2 = '1', f_charbig = '===1===';
+
+SELECT * FROM t1 ORDER BY f_int1;
+
+DROP TABLE t1;
+
diff --git a/mysql-test/t/ndb_partition_range.test b/mysql-test/t/ndb_partition_range.test
new file mode 100644
index 00000000000..981467d4055
--- /dev/null
+++ b/mysql-test/t/ndb_partition_range.test
@@ -0,0 +1,260 @@
+-- source include/have_ndb.inc
+#--disable_abort_on_error
+#
+# Simple test for the partition storage engine
+# Focuses on range partitioning tests
+#
+#-- source include/have_partition.inc
+
+--disable_warnings
+drop table if exists t1;
+--enable_warnings
+
+#
+# Partition by range, basic
+#
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(a,b),
+index (a))
+engine = ndb
+partition by range (a)
+partitions 3
+(partition x1 values less than (5),
+ partition x2 values less than (10),
+ partition x3 values less than (20));
+
+# Simple insert and verify test
+INSERT into t1 values (1, 1, 1);
+INSERT into t1 values (6, 1, 1);
+INSERT into t1 values (10, 1, 1);
+INSERT into t1 values (15, 1, 1);
+
+--replace_column 16 # 19 # 20 #
+select * from information_schema.partitions where table_name= 't1';
+
+select * from t1 order by a;
+
+select * from t1 where a=1 order by a;
+select * from t1 where a=15 and b=1 order by a;
+select * from t1 where a=21 and b=1 order by a;
+select * from t1 where a=21 order by a;
+select * from t1 where a in (1,6,10,21) order by a;
+select * from t1 where b=1 and a in (1,6,10,21) order by a;
+
+drop table t1;
+
+#
+# Partition by range, basic
+#
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null,
+primary key(b),
+unique (a))
+engine = ndb
+partition by range (b)
+partitions 3
+(partition x1 values less than (5),
+ partition x2 values less than (10),
+ partition x3 values less than (20));
+
+# Simple insert and verify test
+INSERT into t1 values (1, 1, 1);
+INSERT into t1 values (2, 6, 1);
+INSERT into t1 values (3, 10, 1);
+INSERT into t1 values (4, 15, 1);
+
+select * from t1 order by a;
+UPDATE t1 set a = 5 WHERE b = 15;
+select * from t1 order by a;
+UPDATE t1 set a = 6 WHERE a = 5;
+select * from t1 order by a;
+
+select * from t1 where b=1 order by b;
+select * from t1 where b=15 and a=1 order by b;
+select * from t1 where b=21 and a=1 order by b;
+select * from t1 where b=21 order by b;
+select * from t1 where b in (1,6,10,21) order by b;
+select * from t1 where a in (1,2,5,6) order by b;
+select * from t1 where a=1 and b in (1,6,10,21) order by b;
+
+DELETE from t1 WHERE b = 6;
+DELETE from t1 WHERE a = 6;
+
+#
+# Test that explicit partition info _is_ shown in show create table
+# result _should_ contain (PARTITION x1 ... etc)
+#
+show create table t1;
+
+drop table t1;
+
+#
+# Bug #17499, #17687
+# Alter partitioned NDB table causes mysqld to core
+#
+
+CREATE TABLE t1
+ (id MEDIUMINT NOT NULL,
+ b1 BIT(8),
+ vc VARCHAR(255),
+ bc CHAR(255),
+ d DECIMAL(10,4) DEFAULT 0,
+ f FLOAT DEFAULT 0,
+ total BIGINT UNSIGNED,
+ y YEAR,
+ t DATE) ENGINE=NDB
+ PARTITION BY RANGE (YEAR(t))
+ (PARTITION p0 VALUES LESS THAN (1901),
+ PARTITION p1 VALUES LESS THAN (1946),
+ PARTITION p2 VALUES LESS THAN (1966),
+ PARTITION p3 VALUES LESS THAN (1986),
+ PARTITION p4 VALUES LESS THAN (2005),
+ PARTITION p5 VALUES LESS THAN MAXVALUE);
+
+INSERT INTO t1 VALUES (0,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL);
+SELECT * FROM t1;
+ALTER TABLE t1 ENGINE=MYISAM;
+SELECT * FROM t1;
+DROP TABLE t1;
+
+CREATE LOGFILE GROUP lg1
+ ADD UNDOFILE 'undofile.dat'
+ INITIAL_SIZE 16M
+ UNDO_BUFFER_SIZE=1M
+ ENGINE=NDB;
+
+CREATE TABLESPACE ts1
+ ADD DATAFILE 'datafile.dat'
+ USE LOGFILE GROUP lg1
+ INITIAL_SIZE 12M
+ ENGINE NDB;
+
+CREATE TABLE test.t1 (
+ a1 INT,
+ a2 TEXT NOT NULL,
+ a3 BIT NOT NULL,
+ a4 DECIMAL(8,3),
+ a5 INT NOT NULL,
+ a6 INT,
+ PRIMARY KEY(a1))
+ TABLESPACE ts1 STORAGE DISK ENGINE=NDB
+ PARTITION BY LIST (a1)
+ (PARTITION p0 VALUES IN (1,2,3,4,5),
+ PARTITION p1 VALUES IN (6,7,8,9, 10),
+ PARTITION p2 VALUES IN (11, 12, 13, 14, 15));
+
+# Alter table directly without any statements inbetween
+ALTER TABLE test.t1 DROP COLUMN a6;
+ALTER TABLE test.t1 ADD COLUMN a6 VARCHAR(255);
+
+let $j= 15;
+--disable_query_log
+while ($j)
+{
+eval INSERT INTO test.t1 VALUES ($j, "Tested Remotely from Texas, USA",
+b'1',$j.00,$j+1,"By NIK $j");
+dec $j;
+}
+--enable_query_log
+SELECT COUNT(*) FROM test.t1;
+
+ALTER TABLE test.t1 DROP COLUMN a4;
+SELECT COUNT(*) FROM test.t1;
+
+DROP TABLE t1;
+
+CREATE TABLE test.t1 (
+ a1 INT,
+ a2 TEXT NOT NULL,
+ a3 BIT NOT NULL,
+ a4 DECIMAL(8,3),
+ a5 INT NOT NULL,
+ a6 VARCHAR(255),
+ PRIMARY KEY(a1))
+ TABLESPACE ts1 STORAGE DISK ENGINE=NDB
+ PARTITION BY HASH(a1)
+ PARTITIONS 4;
+
+let $j= 15;
+--disable_query_log
+while ($j)
+{
+eval INSERT INTO test.t1 VALUES ($j, "Tested Remotely from Texas, USA",
+b'1',$j.00,$j+1,"By NIK $j");
+dec $j;
+}
+--enable_query_log
+SELECT COUNT(*) FROM test.t1;
+
+ALTER TABLE test.t1 DROP COLUMN a4;
+SELECT COUNT(*) FROM test.t1;
+
+DROP TABLE t1;
+
+ALTER TABLESPACE ts1
+ DROP DATAFILE 'datafile.dat'
+ ENGINE=NDB;
+DROP TABLESPACE ts1 ENGINE=NDB;
+DROP LOGFILE GROUP lg1 ENGINE=NDB;
+
+
+#
+# Bug #17701 ALTER TABLE t1 ADD PARTITION for PARTITION BY LIST hangs test
+#
+
+CREATE TABLE t1
+ (id MEDIUMINT NOT NULL,
+ b1 BIT(8),
+ vc VARCHAR(255),
+ bc CHAR(255),
+ d DECIMAL(10,4) DEFAULT 0,
+ f FLOAT DEFAULT 0,
+ total BIGINT UNSIGNED,
+ y YEAR,
+ t DATE) ENGINE=NDB
+ PARTITION BY LIST(id)
+ (PARTITION p0 VALUES IN (2, 4),
+ PARTITION p1 VALUES IN (42, 142));
+
+INSERT INTO t1 VALUES (2,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL);
+SELECT * FROM t1;
+ALTER TABLE t1 ADD PARTITION
+ (PARTITION p2 VALUES IN (412));
+SELECT * FROM t1;
+DROP TABLE t1;
+
+#
+# Bug #17806 Update on NDB table with list partition causes mysqld to core
+# Bug #16385 Partitions: crash when updating a range partitioned NDB table
+#
+CREATE TABLE t1 (
+a int not null,
+b int not null,
+c int not null)
+partition by list(a)
+partitions 2
+(partition x123 values in (1,5,6),
+ partition x234 values in (4,7,8));
+INSERT into t1 VALUES (5,1,1);
+select * from t1;
+UPDATE t1 SET a=8 WHERE a=5 AND b=1;
+select * from t1;
+drop table t1;
+
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) engine=ndb
+PARTITION BY RANGE(f1)
+( PARTITION part1 VALUES LESS THAN (2),
+PARTITION part2 VALUES LESS THAN (1000));
+INSERT INTO t1 VALUES(1, '---1---');
+INSERT INTO t1 VALUES(2, '---2---');
+select * from t1 order by f1;
+UPDATE t1 SET f1 = f1 + 4 WHERE f1 = 2;
+select * from t1 order by f1;
+UPDATE t1 SET f1 = f1 + 4 WHERE f1 = 1;
+select * from t1 order by f1;
+drop table t1;
diff --git a/mysql-test/t/partition_innodb.test b/mysql-test/t/partition_innodb.test
index a31d0793565..51f5b0fec01 100644
--- a/mysql-test/t/partition_innodb.test
+++ b/mysql-test/t/partition_innodb.test
@@ -66,3 +66,15 @@ DROP TABLE IF EXISTS t0_definition;
DROP TABLE IF EXISTS t0_template;
--enable_warnings
+#
+# Bug#20086: Can't get data from key partitioned tables with VARCHAR key
+#
+create table t1 (id varchar(64) primary key) engine=innodb
+partition by key(id) partitions 5;
+insert into t1 values ('a');
+insert into t1 values ('aa');
+insert into t1 values ('aaa');
+select * from t1 where id = 'a';
+select * from t1 where id = 'aa';
+select * from t1 where id = 'aaa';
+drop table t1;
diff --git a/mysql-test/t/renamedb.test b/mysql-test/t/renamedb.test
index 5cfb2ce0c12..1e71adb3bf3 100644
--- a/mysql-test/t/renamedb.test
+++ b/mysql-test/t/renamedb.test
@@ -16,3 +16,11 @@ select database();
show tables;
select a from t1 order by a;
drop database testdb2;
+
+#
+# Bug#19392 Rename Database: Crash if case change
+#
+create database testdb1;
+--error 1007
+rename database testdb1 to testdb1;
+drop database testdb1;
diff --git a/mysql-test/t/rpl_ndb_auto_inc.test b/mysql-test/t/rpl_ndb_auto_inc.test
index a9cc9563d88..0fc31de9b3a 100644
--- a/mysql-test/t/rpl_ndb_auto_inc.test
+++ b/mysql-test/t/rpl_ndb_auto_inc.test
@@ -6,7 +6,8 @@
# Date: 2006-02-10
# Change: Augmented test to use with cluster
#####################################
--- source include/master-slave.inc
+--source include/master-slave.inc
+--source include/have_binlog_format_mixed_or_row.inc
--echo ***************** Test 1 ************************
--echo
diff --git a/mysql-test/t/rpl_ndb_dd_partitions.test b/mysql-test/t/rpl_ndb_dd_partitions.test
new file mode 100644
index 00000000000..9291f38e8db
--- /dev/null
+++ b/mysql-test/t/rpl_ndb_dd_partitions.test
@@ -0,0 +1,310 @@
+#######################################
+# Author: JBM #
+# Date: 2006-03-09 #
+# Purpose: To test the replication of #
+# Cluster Disk Data using partitions #
+#######################################
+
+--source include/have_ndb.inc
+--source include/have_binlog_format_row.inc
+--source include/master-slave.inc
+
+--echo --- Doing pre test cleanup ---
+
+connection master;
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_query_log
+
+
+# Start by creating a logfile group
+##################################
+
+CREATE LOGFILE GROUP lg1
+ADD UNDOFILE 'undofile.dat'
+INITIAL_SIZE 16M
+UNDO_BUFFER_SIZE = 1M
+ENGINE=NDB;
+
+ALTER LOGFILE GROUP lg1
+ADD UNDOFILE 'undofile02.dat'
+INITIAL_SIZE = 4M
+ENGINE=NDB;
+
+###################################################
+# Create a tablespace connected to the logfile group
+###################################################
+
+CREATE TABLESPACE ts1
+ADD DATAFILE 'datafile.dat'
+USE LOGFILE GROUP lg1
+INITIAL_SIZE 12M
+ENGINE NDB;
+
+ALTER TABLESPACE ts1
+ADD DATAFILE 'datafile02.dat'
+INITIAL_SIZE = 4M
+ENGINE=NDB;
+
+#################################################################
+
+--echo --- Start test 2 partition RANGE testing --
+--echo --- Do setup --
+
+
+#################################################
+# Requirment: Create table that is partitioned #
+# by range on year i.e. year(t) and replicate #
+# basice operations such at insert, update #
+# delete between 2 different storage engines #
+# Alter table and ensure table is handled #
+# Correctly on the slave #
+#################################################
+
+CREATE TABLE t1 (id MEDIUMINT NOT NULL, b1 BIT(8), vc VARCHAR(63),
+ bc CHAR(63), d DECIMAL(10,4) DEFAULT 0,
+ f FLOAT DEFAULT 0, total BIGINT UNSIGNED,
+ y YEAR, t DATE)
+ TABLESPACE ts1 STORAGE DISK
+ ENGINE=NDB
+ PARTITION BY RANGE (YEAR(t))
+ (PARTITION p0 VALUES LESS THAN (1901),
+ PARTITION p1 VALUES LESS THAN (1946),
+ PARTITION p2 VALUES LESS THAN (1966),
+ PARTITION p3 VALUES LESS THAN (1986),
+ PARTITION p4 VALUES LESS THAN (2005),
+ PARTITION p5 VALUES LESS THAN MAXVALUE);
+
+--echo --- Show table on master ---
+
+SHOW CREATE TABLE t1;
+
+--echo --- Show table on slave --
+
+sync_slave_with_master;
+SHOW CREATE TABLE t1;
+
+--echo --- Perform basic operation on master ---
+--echo --- and ensure replicated correctly ---
+
+--source include/rpl_multi_engine3.inc
+
+--echo --- Check that simple Alter statements are replicated correctly ---
+
+ALTER TABLE t1 MODIFY vc VARCHAR(255);
+
+--echo --- Show the new improved table on the master ---
+
+SHOW CREATE TABLE t1;
+
+--echo --- Make sure that our tables on slave are still same engine ---
+--echo --- and that the alter statements replicated correctly ---
+
+sync_slave_with_master;
+SHOW CREATE TABLE t1;
+
+--echo --- Perform basic operation on master ---
+--echo --- and ensure replicated correctly ---
+--enable_query_log
+
+--source include/rpl_multi_engine3.inc
+
+--echo --- End test 2 partition RANGE testing ---
+--echo --- Do Cleanup ---
+
+DROP TABLE IF EXISTS t1;
+
+########################################################
+
+--echo --- Start test 3 partition LIST testing ---
+--echo --- Do setup ---
+#################################################
+
+
+CREATE TABLE t1 (id MEDIUMINT NOT NULL, b1 BIT(8), vc VARCHAR(63),
+ bc CHAR(63), d DECIMAL(10,4) DEFAULT 0,
+ f FLOAT DEFAULT 0, total BIGINT UNSIGNED,
+ y YEAR, t DATE)
+ TABLESPACE ts1 STORAGE DISK
+ ENGINE=NDB
+ PARTITION BY LIST(id)
+ (PARTITION p0 VALUES IN (2, 4),
+ PARTITION p1 VALUES IN (42, 142));
+
+--echo --- Test 3 Alter to add partition ---
+
+ALTER TABLE t1 ADD PARTITION (PARTITION p2 VALUES IN (412));
+
+--echo --- Show table on master ---
+
+SHOW CREATE TABLE t1;
+
+--echo --- Show table on slave ---
+
+sync_slave_with_master;
+SHOW CREATE TABLE t1;
+
+--echo --- Perform basic operation on master ---
+--echo --- and ensure replicated correctly ---
+
+--source include/rpl_multi_engine3.inc
+
+--echo --- Check that simple Alter statements are replicated correctly ---
+
+ALTER TABLE t1 MODIFY vc VARCHAR(255);
+
+--echo --- Show the new improved table on the master ---
+
+SHOW CREATE TABLE t1;
+
+--echo --- Make sure that our tables on slave are still same engine ---
+--echo --- and that the alter statements replicated correctly ---
+
+sync_slave_with_master;
+SHOW CREATE TABLE t1;
+
+--echo --- Perform basic operation on master ---
+--echo --- and ensure replicated correctly ---
+
+--source include/rpl_multi_engine3.inc
+
+--echo --- End test 3 partition LIST testing ---
+--echo --- Do Cleanup --
+
+DROP TABLE IF EXISTS t1;
+
+########################################################
+
+--echo --- Start test 4 partition HASH testing ---
+--echo --- Do setup ---
+#################################################
+
+
+CREATE TABLE t1 (id MEDIUMINT NOT NULL, b1 BIT(8), vc VARCHAR(63),
+ bc CHAR(63), d DECIMAL(10,4) DEFAULT 0,
+ f FLOAT DEFAULT 0, total BIGINT UNSIGNED,
+ y YEAR, t DATE)
+ TABLESPACE ts1 STORAGE DISK
+ ENGINE=NDB
+ PARTITION BY HASH( YEAR(t) )
+ PARTITIONS 4;
+
+--echo --- show that tables have been created correctly ---
+
+SHOW CREATE TABLE t1;
+sync_slave_with_master;
+SHOW CREATE TABLE t1;
+
+--echo --- Perform basic operation on master ---
+--echo --- and ensure replicated correctly ---
+
+--source include/rpl_multi_engine3.inc
+
+--echo --- Check that simple Alter statements are replicated correctly ---
+
+ALTER TABLE t1 MODIFY vc VARCHAR(255);
+
+--echo --- Show the new improved table on the master ---
+
+SHOW CREATE TABLE t1;
+
+--echo --- Make sure that our tables on slave are still same engine ---
+--echo --- and that the alter statements replicated correctly ---
+
+sync_slave_with_master;
+SHOW CREATE TABLE t1;
+
+--echo --- Perform basic operation on master ---
+--echo --- and ensure replicated correctly ---
+
+--source include/rpl_multi_engine3.inc
+
+--echo --- End test 4 partition HASH testing ---
+--echo --- Do Cleanup --
+
+DROP TABLE IF EXISTS t1;
+
+########################################################
+
+--echo --- Start test 5 partition by key testing ---
+--echo --- Create Table Section ---
+
+#################################################
+
+CREATE TABLE t1 (id MEDIUMINT NOT NULL, b1 BIT(8), vc VARCHAR(63),
+ bc CHAR(63), d DECIMAL(10,4) DEFAULT 0,
+ f FLOAT DEFAULT 0, total BIGINT UNSIGNED,
+ y YEAR, t DATE,PRIMARY KEY(id))
+ TABLESPACE ts1 STORAGE DISK
+ ENGINE=NDB
+ PARTITION BY KEY()
+ PARTITIONS 4;
+
+--echo --- Show that tables on master are ndbcluster tables ---
+
+SHOW CREATE TABLE t1;
+
+--echo --- Show that tables on slave ---
+
+sync_slave_with_master;
+SHOW CREATE TABLE t1;
+
+--echo --- Perform basic operation on master ---
+--echo --- and ensure replicated correctly ---
+
+--source include/rpl_multi_engine3.inc
+
+# Okay lets see how it holds up to table changes
+--echo --- Check that simple Alter statements are replicated correctly ---
+
+ALTER TABLE t1 DROP PRIMARY KEY, ADD PRIMARY KEY(id, total);
+
+--echo --- Show the new improved table on the master ---
+
+SHOW CREATE TABLE t1;
+
+--echo --- Make sure that our tables on slave are still right type ---
+--echo --- and that the alter statements replicated correctly ---
+
+sync_slave_with_master;
+SHOW CREATE TABLE t1;
+
+--echo --- Perform basic operation on master ---
+--echo --- and ensure replicated correctly ---
+
+--source include/rpl_multi_engine3.inc
+
+--echo --- Check that simple Alter statements are replicated correctly ---
+
+ALTER TABLE t1 MODIFY vc VARCHAR(255);
+
+--echo --- Show the new improved table on the master ---
+
+SHOW CREATE TABLE t1;
+
+--echo --- Make sure that our tables on slave are still same engine ---
+--echo --- and that the alter statements replicated correctly ---
+
+sync_slave_with_master;
+SHOW CREATE TABLE t1;
+
+--echo --- Perform basic operation on master ---
+--echo --- and ensure replicated correctly ---
+
+--source include/rpl_multi_engine3.inc
+
+--echo --- End test 5 key partition testing ---
+--echo --- Do Cleanup ---
+
+DROP TABLE IF EXISTS t1;
+alter tablespace ts1
+drop datafile 'datafile.dat'
+engine=ndb;
+alter tablespace ts1
+drop datafile 'datafile02.dat'
+engine=ndb;
+DROP TABLESPACE ts1 ENGINE=NDB;
+DROP LOGFILE GROUP lg1 ENGINE=NDB;
+--sync_slave_with_master
+
+# End of 5.1 test case
diff --git a/mysql-test/t/rpl_openssl.test b/mysql-test/t/rpl_openssl.test
index 00ae5c935bf..313e22193f5 100644
--- a/mysql-test/t/rpl_openssl.test
+++ b/mysql-test/t/rpl_openssl.test
@@ -1,3 +1,7 @@
+# TODO: THIS TEST DOES NOT WORK ON WINDOWS
+# This should be fixed.
+--source include/not_windows.inc
+
source include/have_openssl.inc;
source include/master-slave.inc;
diff --git a/mysql-test/t/show_check.test b/mysql-test/t/show_check.test
index 2f44f4e97c5..94894ef50de 100644
--- a/mysql-test/t/show_check.test
+++ b/mysql-test/t/show_check.test
@@ -445,4 +445,17 @@ delete from mysql.tables_priv where user='mysqltest_4';
flush privileges;
drop database mysqltest;
+#
+# Ensure that show plugin code is tested
+#
+
+--disable_result_log
+show full plugin;
+--enable_result_log
+show warnings;
+--disable_result_log
+show plugin;
+show plugins;
+--enable_result_log
+
--echo End of 5.1 tests
diff --git a/mysql-test/t/view_grant.test b/mysql-test/t/view_grant.test
index 2a1977702c2..801bd13fab7 100644
--- a/mysql-test/t/view_grant.test
+++ b/mysql-test/t/view_grant.test
@@ -852,3 +852,23 @@ DROP VIEW mysqltest_db1.view1;
DROP TABLE mysqltest_db1.t1;
DROP SCHEMA mysqltest_db1;
DROP USER mysqltest_db1@localhost;
+#
+# BUG#20482: failure on Create join view with sources views/tables
+# in different schemas
+#
+--disable_warnings
+CREATE DATABASE test1;
+CREATE DATABASE test2;
+--enable_warnings
+
+CREATE TABLE test1.t0 (a VARCHAR(20));
+CREATE TABLE test2.t1 (a VARCHAR(20));
+CREATE VIEW test2.t3 AS SELECT * FROM test1.t0;
+CREATE OR REPLACE VIEW test.v1 AS
+ SELECT ta.a AS col1, tb.a AS col2 FROM test2.t3 ta, test2.t1 tb;
+
+DROP VIEW test.v1;
+DROP VIEW test2.t3;
+DROP TABLE test2.t1, test1.t0;
+DROP DATABASE test2;
+DROP DATABASE test1;
diff --git a/mysys/my_delete.c b/mysys/my_delete.c
index 5670f03da64..de2a9814a56 100644
--- a/mysys/my_delete.c
+++ b/mysys/my_delete.c
@@ -32,3 +32,54 @@ int my_delete(const char *name, myf MyFlags)
}
DBUG_RETURN(err);
} /* my_delete */
+
+#if defined(__WIN__) && defined(__NT__)
+/*
+ Delete file which is possibly not closed.
+
+ This function is intended to be used exclusively as a temporal solution
+ for Win NT in case when it is needed to delete a not closed file (note
+ that the file must be opened everywhere with FILE_SHARE_DELETE mode).
+ Deleting not-closed files can not be supported on Win 98|ME (and because
+ of that is considered harmful).
+
+ The function deletes the file with its preliminary renaming. This is
+ because when not-closed share-delete file is deleted it still lives on
+ a disk until it will not be closed everwhere. This may conflict with an
+ attempt to create a new file with the same name. The deleted file is
+ renamed to <name>.<num>.deleted where <name> - the initial name of the
+ file, <num> - a hexadecimal number chosen to make the temporal name to
+ be unique.
+*/
+int nt_share_delete(const char *name, myf MyFlags)
+{
+ char buf[MAX_PATH + 20];
+ ulong cnt;
+ DBUG_ENTER("nt_share_delete");
+ DBUG_PRINT("my",("name %s MyFlags %d", name, MyFlags));
+
+ for (cnt= GetTickCount(); cnt; cnt--)
+ {
+ sprintf(buf, "%s.%08X.deleted", name, cnt);
+ if (MoveFile(name, buf))
+ break;
+
+ if ((errno= GetLastError()) == ERROR_ALREADY_EXISTS)
+ continue;
+
+ DBUG_PRINT("warning", ("Failed to rename %s to %s, errno: %d",
+ name, buf, errno));
+ break;
+ }
+
+ if (DeleteFile(buf))
+ DBUG_RETURN(0);
+
+ my_errno= GetLastError();
+ if (MyFlags & (MY_FAE+MY_WME))
+ my_error(EE_DELETE, MYF(ME_BELL + ME_WAITTANG + (MyFlags & ME_NOINPUT)),
+ name, my_errno);
+
+ DBUG_RETURN(-1);
+}
+#endif
diff --git a/mysys/my_handler.c b/mysys/my_handler.c
index da619a49ffd..bfec44d57a4 100644
--- a/mysys/my_handler.c
+++ b/mysys/my_handler.c
@@ -548,7 +548,8 @@ HA_KEYSEG *ha_find_null(HA_KEYSEG *keyseg, uchar *a)
case HA_KEYTYPE_DOUBLE:
a= end;
break;
- case HA_KEYTYPE_END:
+ case HA_KEYTYPE_END: /* purecov: inspected */
+ /* keep compiler happy */
DBUG_ASSERT(0);
break;
}
diff --git a/mysys/my_init.c b/mysys/my_init.c
index 588bb6f46d6..dca68637161 100644
--- a/mysys/my_init.c
+++ b/mysys/my_init.c
@@ -246,6 +246,22 @@ void setEnvString(char *ret, const char *name, const char *value)
DBUG_VOID_RETURN ;
}
+/*
+ my_paramter_handler
+ Invalid paramter handler we will use instead of the one "baked" into the CRT
+ for MSC v8. This one just prints out what invalid parameter was encountered.
+ By providing this routine, routines like lseek will return -1 when we expect them
+ to instead of crash.
+*/
+void my_parameter_handler(const wchar_t * expression, const wchar_t * function,
+ const wchar_t * file, unsigned int line,
+ uintptr_t pReserved)
+{
+ DBUG_PRINT("my",("Expression: %s function: %s file: %s, line: %d",
+ expression, function, file, line));
+}
+
+
static void my_win_init(void)
{
HKEY hSoftMysql ;
@@ -263,12 +279,18 @@ static void my_win_init(void)
setlocale(LC_CTYPE, ""); /* To get right sortorder */
-#if defined(_MSC_VER) && (_MSC_VER < 1300)
+#if defined(_MSC_VER)
+#if _MSC_VER < 1300
/*
Clear the OS system variable TZ and avoid the 100% CPU usage
Only for old versions of Visual C++
*/
_putenv( "TZ=" );
+#endif
+#if _MSC_VER >= 1400
+ /* this is required to make crt functions return -1 appropriately */
+ _set_invalid_parameter_handler(my_parameter_handler);
+#endif
#endif
_tzset();
diff --git a/sql/field.cc b/sql/field.cc
index 1002725ef53..bb4530dc377 100644
--- a/sql/field.cc
+++ b/sql/field.cc
@@ -1243,6 +1243,21 @@ uint Field::offset()
}
+void Field::hash(ulong *nr, ulong *nr2)
+{
+ if (is_null())
+ {
+ *nr^= (*nr << 1) | 1;
+ }
+ else
+ {
+ uint len= pack_length();
+ CHARSET_INFO *cs= charset();
+ cs->coll->hash_sort(cs, (uchar*) ptr, len, nr, nr2);
+ }
+}
+
+
void Field::copy_from_tmp(int row_offset)
{
memcpy(ptr,ptr+row_offset,pack_length());
@@ -6925,6 +6940,21 @@ uint Field_varstring::is_equal(create_field *new_field)
}
+void Field_varstring::hash(ulong *nr, ulong *nr2)
+{
+ if (is_null())
+ {
+ *nr^= (*nr << 1) | 1;
+ }
+ else
+ {
+ uint len= length_bytes == 1 ? (uint) (uchar) *ptr : uint2korr(ptr);
+ CHARSET_INFO *cs= charset();
+ cs->coll->hash_sort(cs, (uchar*) ptr + length_bytes, len, nr, nr2);
+ }
+}
+
+
/****************************************************************************
** blob type
** A blob is saved as a length and a pointer. The length is stored in the
diff --git a/sql/field.h b/sql/field.h
index b19d0ebe139..55f2c037109 100644
--- a/sql/field.h
+++ b/sql/field.h
@@ -351,6 +351,8 @@ public:
return field_length / charset()->mbmaxlen;
}
+ /* Hash value */
+ virtual void hash(ulong *nr, ulong *nr2);
friend bool reopen_table(THD *,struct st_table *,bool);
friend int cre_myisam(my_string name, register TABLE *form, uint options,
ulonglong auto_increment_value);
@@ -1120,6 +1122,7 @@ public:
char *new_ptr, uchar *new_null_ptr,
uint new_null_bit);
uint is_equal(create_field *new_field);
+ void hash(ulong *nr, ulong *nr2);
};
diff --git a/sql/filesort.cc b/sql/filesort.cc
index e712bed9e13..f41d72ac07a 100644
--- a/sql/filesort.cc
+++ b/sql/filesort.cc
@@ -688,9 +688,17 @@ static void make_sortkey(register SORTPARAM *param,
bzero((char*) to-1,sort_field->length+1);
else
{
+ /* purecov: begin deadcode */
+ /*
+ This should only happen during extreme conditions if we run out
+ of memory or have an item marked not null when it can be null.
+ This code is here mainly to avoid a hard crash in this case.
+ */
+ DBUG_ASSERT(0);
DBUG_PRINT("warning",
("Got null on something that shouldn't be null"));
bzero((char*) to,sort_field->length); // Avoid crash
+ /* purecov: end */
}
break;
}
@@ -699,7 +707,7 @@ static void make_sortkey(register SORTPARAM *param,
diff=(int) (sort_field_length - length);
if (diff < 0)
{
- diff=0; /* purecov: inspected */
+ diff=0;
length= sort_field_length;
}
if (sort_field->suffix_length)
diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc
index 997072bd2a5..bbeea2ca1ba 100644
--- a/sql/ha_ndbcluster.cc
+++ b/sql/ha_ndbcluster.cc
@@ -186,8 +186,8 @@ static int update_status_variables(Ndb_cluster_connection *c)
SHOW_VAR ndb_status_variables[]= {
{"cluster_node_id", (char*) &ndb_cluster_node_id, SHOW_LONG},
- {"connected_host", (char*) &ndb_connected_host, SHOW_CHAR_PTR},
- {"connected_port", (char*) &ndb_connected_port, SHOW_LONG},
+ {"config_from_host", (char*) &ndb_connected_host, SHOW_CHAR_PTR},
+ {"config_from_port", (char*) &ndb_connected_port, SHOW_LONG},
// {"number_of_replicas", (char*) &ndb_number_of_replicas, SHOW_LONG},
{"number_of_storage_nodes",(char*) &ndb_number_of_storage_nodes, SHOW_LONG},
{NullS, NullS, SHOW_LONG}
@@ -414,6 +414,37 @@ void ha_ndbcluster::set_rec_per_key()
DBUG_VOID_RETURN;
}
+ha_rows ha_ndbcluster::records()
+{
+ ha_rows retval;
+ DBUG_ENTER("ha_ndbcluster::records");
+ struct Ndb_local_table_statistics *info= m_table_info;
+ DBUG_PRINT("info", ("id=%d, no_uncommitted_rows_count=%d",
+ ((const NDBTAB *)m_table)->getTableId(),
+ info->no_uncommitted_rows_count));
+
+ Ndb *ndb= get_ndb();
+ ndb->setDatabaseName(m_dbname);
+ struct Ndb_statistics stat;
+ if (ndb_get_table_statistics(ndb, m_table, &stat) == 0)
+ {
+ retval= stat.row_count;
+ }
+ else
+ {
+ /**
+ * Be consistent with BUG#19914 until we fix it properly
+ */
+ DBUG_RETURN(-1);
+ }
+
+ THD *thd= current_thd;
+ if (get_thd_ndb(thd)->error)
+ info->no_uncommitted_rows_count= 0;
+
+ DBUG_RETURN(retval + info->no_uncommitted_rows_count);
+}
+
void ha_ndbcluster::records_update()
{
if (m_ha_not_exact_count)
@@ -426,8 +457,8 @@ void ha_ndbcluster::records_update()
// if (info->records == ~(ha_rows)0)
{
Ndb *ndb= get_ndb();
- ndb->setDatabaseName(m_dbname);
struct Ndb_statistics stat;
+ ndb->setDatabaseName(m_dbname);
if (ndb_get_table_statistics(ndb, m_table, &stat) == 0)
{
stats.mean_rec_length= stat.row_size;
@@ -3563,6 +3594,7 @@ void ha_ndbcluster::info(uint flag)
Ndb *ndb= get_ndb();
ndb->setDatabaseName(m_dbname);
struct Ndb_statistics stat;
+ ndb->setDatabaseName(m_dbname);
if (current_thd->variables.ndb_use_exact_count &&
ndb_get_table_statistics(ndb, m_table, &stat) == 0)
{
@@ -4667,7 +4699,15 @@ int ha_ndbcluster::create(const char *name,
// reset at return
m_table= ndbtab_g.get_table();
// TODO check also that we have the same frm...
- DBUG_ASSERT(m_table != 0);
+ if (!m_table)
+ {
+ /* purecov: begin deadcode */
+ const NdbError err= dict->getNdbError();
+ ERR_PRINT(err);
+ my_errno= ndb_to_mysql_error(&err);
+ DBUG_RETURN(my_errno);
+ /* purecov: end */
+ }
DBUG_PRINT("info", ("Table %s/%s created successfully",
m_dbname, m_tabname));
@@ -5455,7 +5495,8 @@ void ha_ndbcluster::get_auto_increment(ulonglong offset, ulonglong increment,
HA_PRIMARY_KEY_REQUIRED_FOR_POSITION | \
HA_PRIMARY_KEY_REQUIRED_FOR_DELETE | \
HA_PARTIAL_COLUMN_READ | \
- HA_HAS_OWN_BINLOGGING
+ HA_HAS_OWN_BINLOGGING | \
+ HA_HAS_RECORDS
ha_ndbcluster::ha_ndbcluster(TABLE_SHARE *table_arg):
handler(&ndbcluster_hton, table_arg),
@@ -7371,6 +7412,7 @@ ndb_get_table_statistics(Ndb* ndb, const NDBTAB *ndbtab,
DBUG_ENTER("ndb_get_table_statistics");
DBUG_PRINT("enter", ("table: %s", ndbtab->getName()));
NdbTransaction* pTrans;
+ NdbError error;
int retries= 10;
int retry_sleep= 30 * 1000; /* 30 milliseconds */
@@ -7378,57 +7420,54 @@ ndb_get_table_statistics(Ndb* ndb, const NDBTAB *ndbtab,
do
{
- pTrans= ndb->startTransaction();
- if (pTrans == NULL)
+ Uint64 rows, commits, mem;
+ Uint32 size;
+ Uint32 count= 0;
+ Uint64 sum_rows= 0;
+ Uint64 sum_commits= 0;
+ Uint64 sum_row_size= 0;
+ Uint64 sum_mem= 0;
+ NdbScanOperation*pOp;
+ NdbResultSet *rs;
+ int check;
+
+ if ((pTrans= ndb->startTransaction()) == NULL)
{
- if (ndb->getNdbError().status == NdbError::TemporaryError &&
- retries--)
- {
- my_sleep(retry_sleep);
- continue;
- }
- ERR_RETURN(ndb->getNdbError());
+ error= ndb->getNdbError();
+ goto retry;
+ }
+
+ if ((pOp= pTrans->getNdbScanOperation(ndbtab)) == NULL)
+ {
+ error= pTrans->getNdbError();
+ goto retry;
}
-
- NdbScanOperation* pOp= pTrans->getNdbScanOperation(ndbtab);
- if (pOp == NULL)
- break;
if (pOp->readTuples(NdbOperation::LM_CommittedRead))
- break;
+ {
+ error= pOp->getNdbError();
+ goto retry;
+ }
- int check= pOp->interpret_exit_last_row();
- if (check == -1)
- break;
+ if (pOp->interpret_exit_last_row() == -1)
+ {
+ error= pOp->getNdbError();
+ goto retry;
+ }
- Uint64 rows, commits, mem;
- Uint32 size;
pOp->getValue(NdbDictionary::Column::ROW_COUNT, (char*)&rows);
pOp->getValue(NdbDictionary::Column::COMMIT_COUNT, (char*)&commits);
pOp->getValue(NdbDictionary::Column::ROW_SIZE, (char*)&size);
pOp->getValue(NdbDictionary::Column::FRAGMENT_MEMORY, (char*)&mem);
- check= pTrans->execute(NdbTransaction::NoCommit,
- NdbTransaction::AbortOnError,
- TRUE);
- if (check == -1)
+ if (pTrans->execute(NdbTransaction::NoCommit,
+ NdbTransaction::AbortOnError,
+ TRUE) == -1)
{
- if (pTrans->getNdbError().status == NdbError::TemporaryError &&
- retries--)
- {
- ndb->closeTransaction(pTrans);
- pTrans= 0;
- my_sleep(retry_sleep);
- continue;
- }
- break;
+ error= pTrans->getNdbError();
+ goto retry;
}
-
- Uint32 count= 0;
- Uint64 sum_rows= 0;
- Uint64 sum_commits= 0;
- Uint64 sum_row_size= 0;
- Uint64 sum_mem= 0;
+
while ((check= pOp->nextResult(TRUE, TRUE)) == 0)
{
sum_rows+= rows;
@@ -7440,7 +7479,10 @@ ndb_get_table_statistics(Ndb* ndb, const NDBTAB *ndbtab,
}
if (check == -1)
- break;
+ {
+ error= pOp->getNdbError();
+ goto retry;
+ }
pOp->close(TRUE);
@@ -7457,12 +7499,21 @@ ndb_get_table_statistics(Ndb* ndb, const NDBTAB *ndbtab,
sum_mem, count));
DBUG_RETURN(0);
+retry:
+ if (pTrans)
+ {
+ ndb->closeTransaction(pTrans);
+ pTrans= NULL;
+ }
+ if (error.status == NdbError::TemporaryError && retries--)
+ {
+ my_sleep(retry_sleep);
+ continue;
+ }
+ break;
} while(1);
-
- if (pTrans)
- ndb->closeTransaction(pTrans);
- DBUG_PRINT("exit", ("failed"));
- DBUG_RETURN(-1);
+ DBUG_PRINT("exit", ("failed, error %u(%s)", error.code, error.message));
+ ERR_RETURN(error);
}
/*
@@ -9628,10 +9679,22 @@ static bool adjusted_frag_count(uint no_fragments, uint no_nodes,
return (reported_frags < no_fragments);
}
-int ha_ndbcluster::get_default_no_partitions(ulonglong max_rows)
+int ha_ndbcluster::get_default_no_partitions(HA_CREATE_INFO *info)
{
+ ha_rows max_rows, min_rows;
+ if (info)
+ {
+ max_rows= info->max_rows;
+ min_rows= info->min_rows;
+ }
+ else
+ {
+ max_rows= table_share->max_rows;
+ min_rows= table_share->min_rows;
+ }
uint reported_frags;
- uint no_fragments= get_no_fragments(max_rows);
+ uint no_fragments=
+ get_no_fragments(max_rows >= min_rows ? max_rows : min_rows);
uint no_nodes= g_ndb_cluster_connection->no_db_nodes();
if (adjusted_frag_count(no_fragments, no_nodes, reported_frags))
{
@@ -9879,7 +9942,17 @@ uint ha_ndbcluster::set_up_partition_info(partition_info *part_info,
} while (++i < part_info->no_parts);
tab->setDefaultNoPartitionsFlag(part_info->use_default_no_partitions);
tab->setLinearFlag(part_info->linear_hash_ind);
- tab->setMaxRows(table->s->max_rows);
+ {
+ ha_rows max_rows= table_share->max_rows;
+ ha_rows min_rows= table_share->min_rows;
+ if (max_rows < min_rows)
+ max_rows= min_rows;
+ if (max_rows != (ha_rows)0) /* default setting, don't set fragmentation */
+ {
+ tab->setMaxRows(max_rows);
+ tab->setMinRows(min_rows);
+ }
+ }
tab->setTablespaceNames(ts_names, fd_index*sizeof(char*));
tab->setFragmentCount(fd_index);
tab->setFragmentData(&frag_data, fd_index*2);
@@ -9982,8 +10055,9 @@ int ndbcluster_alter_tablespace(THD* thd, st_alter_tablespace *info)
{
DBUG_RETURN(HA_ERR_NO_CONNECTION);
}
-
- NDBDICT *dict = ndb->getDictionary();
+
+ NdbError err;
+ NDBDICT *dict= ndb->getDictionary();
int error;
const char * errmsg;
LINT_INIT(errmsg);
@@ -9995,6 +10069,7 @@ int ndbcluster_alter_tablespace(THD* thd, st_alter_tablespace *info)
NdbDictionary::Tablespace ndb_ts;
NdbDictionary::Datafile ndb_df;
+ NdbDictionary::ObjectId objid;
if (set_up_tablespace(info, &ndb_ts))
{
DBUG_RETURN(1);
@@ -10004,7 +10079,7 @@ int ndbcluster_alter_tablespace(THD* thd, st_alter_tablespace *info)
DBUG_RETURN(1);
}
errmsg= "TABLESPACE";
- if (dict->createTablespace(ndb_ts))
+ if (dict->createTablespace(ndb_ts, &objid))
{
DBUG_PRINT("error", ("createTablespace returned %d", error));
goto ndberror;
@@ -10013,8 +10088,17 @@ int ndbcluster_alter_tablespace(THD* thd, st_alter_tablespace *info)
errmsg= "DATAFILE";
if (dict->createDatafile(ndb_df))
{
+ err= dict->getNdbError();
+ NdbDictionary::Tablespace tmp= dict->getTablespace(ndb_ts.getName());
+ if (dict->getNdbError().code == 0 &&
+ tmp.getObjectId() == objid.getObjectId() &&
+ tmp.getObjectVersion() == objid.getObjectVersion())
+ {
+ dict->dropTablespace(tmp);
+ }
+
DBUG_PRINT("error", ("createDatafile returned %d", error));
- goto ndberror;
+ goto ndberror2;
}
is_tablespace= 1;
break;
@@ -10037,9 +10121,12 @@ int ndbcluster_alter_tablespace(THD* thd, st_alter_tablespace *info)
}
else if(info->ts_alter_tablespace_type == ALTER_TABLESPACE_DROP_FILE)
{
- NdbDictionary::Datafile df = dict->getDatafile(0,
- info->data_file_name);
- if (strcmp(df.getPath(), info->data_file_name) == 0)
+ NdbDictionary::Tablespace ts= dict->getTablespace(info->tablespace_name);
+ NdbDictionary::Datafile df= dict->getDatafile(0, info->data_file_name);
+ NdbDictionary::ObjectId objid;
+ df.getTablespaceId(&objid);
+ if (ts.getObjectId() == objid.getObjectId() &&
+ strcmp(df.getPath(), info->data_file_name) == 0)
{
errmsg= " DROP DATAFILE";
if (dict->dropDatafile(df))
@@ -10068,6 +10155,7 @@ int ndbcluster_alter_tablespace(THD* thd, st_alter_tablespace *info)
error= ER_CREATE_FILEGROUP_FAILED;
NdbDictionary::LogfileGroup ndb_lg;
NdbDictionary::Undofile ndb_uf;
+ NdbDictionary::ObjectId objid;
if (info->undo_file_name == NULL)
{
/*
@@ -10080,7 +10168,7 @@ int ndbcluster_alter_tablespace(THD* thd, st_alter_tablespace *info)
DBUG_RETURN(1);
}
errmsg= "LOGFILE GROUP";
- if (dict->createLogfileGroup(ndb_lg))
+ if (dict->createLogfileGroup(ndb_lg, &objid))
{
goto ndberror;
}
@@ -10092,7 +10180,15 @@ int ndbcluster_alter_tablespace(THD* thd, st_alter_tablespace *info)
errmsg= "UNDOFILE";
if (dict->createUndofile(ndb_uf))
{
- goto ndberror;
+ err= dict->getNdbError();
+ NdbDictionary::LogfileGroup tmp= dict->getLogfileGroup(ndb_lg.getName());
+ if (dict->getNdbError().code == 0 &&
+ tmp.getObjectId() == objid.getObjectId() &&
+ tmp.getObjectVersion() == objid.getObjectVersion())
+ {
+ dict->dropLogfileGroup(tmp);
+ }
+ goto ndberror2;
}
break;
}
@@ -10169,7 +10265,8 @@ int ndbcluster_alter_tablespace(THD* thd, st_alter_tablespace *info)
DBUG_RETURN(FALSE);
ndberror:
- const NdbError err= dict->getNdbError();
+ err= dict->getNdbError();
+ndberror2:
ERR_PRINT(err);
ndb_to_mysql_error(&err);
@@ -10358,10 +10455,12 @@ static int ndbcluster_fill_files_table(THD *thd, TABLE_LIST *tables,
table->field[c++]->set_null(); // TABLE_NAME
// LOGFILE_GROUP_NAME
+ NdbDictionary::ObjectId objid;
+ uf.getLogfileGroupId(&objid);
table->field[c++]->store(uf.getLogfileGroup(),
strlen(uf.getLogfileGroup()),
system_charset_info);
- table->field[c++]->store(uf.getLogfileGroupId()); // LOGFILE_GROUP_NUMBER
+ table->field[c++]->store(objid.getObjectId()); // LOGFILE_GROUP_NUMBER
table->field[c++]->store(ndbcluster_hton_name,
ndbcluster_hton_name_length,
system_charset_info); // ENGINE
diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h
index 2e78a00ef94..5e13a6664f0 100644
--- a/sql/ha_ndbcluster.h
+++ b/sql/ha_ndbcluster.h
@@ -622,6 +622,7 @@ class ha_ndbcluster: public handler
int read_multi_range_next(KEY_MULTI_RANGE **found_range_p);
bool get_error_message(int error, String *buf);
+ ha_rows records();
void info(uint);
void get_dynamic_partition_info(PARTITION_INFO *stat_info, uint part_id);
int extra(enum ha_extra_function operation);
@@ -651,7 +652,7 @@ class ha_ndbcluster: public handler
int create(const char *name, TABLE *form, HA_CREATE_INFO *info);
int create_handler_files(const char *file, const char *old_name,
int action_flag, HA_CREATE_INFO *info);
- int get_default_no_partitions(ulonglong max_rows);
+ int get_default_no_partitions(HA_CREATE_INFO *info);
bool get_no_parts(const char *name, uint *no_parts);
void set_auto_partitions(partition_info *part_info);
diff --git a/sql/ha_ndbcluster_binlog.cc b/sql/ha_ndbcluster_binlog.cc
index 40a98563613..b88002b8529 100644
--- a/sql/ha_ndbcluster_binlog.cc
+++ b/sql/ha_ndbcluster_binlog.cc
@@ -125,6 +125,7 @@ static TABLE_LIST binlog_tables;
*/
#ifndef DBUG_OFF
+/* purecov: begin deadcode */
static void print_records(TABLE *table, const char *record)
{
for (uint j= 0; j < table->s->fields; j++)
@@ -144,6 +145,7 @@ static void print_records(TABLE *table, const char *record)
DBUG_PRINT("info",("[%u]field_ptr[0->%d]: %s", j, n, buf));
}
}
+/* purecov: end */
#else
#define print_records(a,b)
#endif
@@ -1770,8 +1772,31 @@ ndb_binlog_thread_handle_schema_event(THD *thd, Ndb *ndb,
/* acknowledge this query _after_ epoch completion */
post_epoch_unlock= 1;
break;
- case SOT_CREATE_TABLE:
case SOT_TRUNCATE_TABLE:
+ {
+ char key[FN_REFLEN];
+ build_table_filename(key, sizeof(key), schema->db, schema->name, "");
+ NDB_SHARE *share= get_share(key, 0, FALSE, FALSE);
+ // invalidation already handled by binlog thread
+ if (!share || !share->op)
+ {
+ {
+ injector_ndb->setDatabaseName(schema->db);
+ Ndb_table_guard ndbtab_g(injector_ndb->getDictionary(),
+ schema->name);
+ ndbtab_g.invalidate();
+ }
+ TABLE_LIST table_list;
+ bzero((char*) &table_list,sizeof(table_list));
+ table_list.db= schema->db;
+ table_list.alias= table_list.table_name= schema->name;
+ close_cached_tables(thd, 0, &table_list, FALSE);
+ }
+ if (share)
+ free_share(&share);
+ }
+ // fall through
+ case SOT_CREATE_TABLE:
pthread_mutex_lock(&LOCK_open);
if (ndb_create_table_from_engine(thd, schema->db, schema->name))
{
diff --git a/sql/handler.h b/sql/handler.h
index fb5f0f4ba05..94f4519a2e7 100644
--- a/sql/handler.h
+++ b/sql/handler.h
@@ -1335,7 +1335,7 @@ public:
virtual const char *table_type() const =0;
virtual const char **bas_ext() const =0;
- virtual int get_default_no_partitions(ulonglong max_rows) { return 1;}
+ virtual int get_default_no_partitions(HA_CREATE_INFO *info) { return 1;}
virtual void set_auto_partitions(partition_info *part_info) { return; }
virtual bool get_no_parts(const char *name,
uint *no_parts)
diff --git a/sql/key.cc b/sql/key.cc
index 11dd267875f..69557d971e8 100644
--- a/sql/key.cc
+++ b/sql/key.cc
@@ -210,9 +210,13 @@ void key_restore(byte *to_record, byte *from_key, KEY *key_info,
}
else if (key_part->key_part_flag & HA_VAR_LENGTH_PART)
{
+ my_bitmap_map *old_map;
key_length-= HA_KEY_BLOB_LENGTH;
length= min(key_length, key_part->length);
+ old_map= dbug_tmp_use_all_columns(key_part->field->table,
+ key_part->field->table->write_set);
key_part->field->set_key_image((char *) from_key, length);
+ dbug_tmp_restore_column_map(key_part->field->table->write_set, old_map);
from_key+= HA_KEY_BLOB_LENGTH;
}
else
diff --git a/sql/log.cc b/sql/log.cc
index 7168f7e2da7..ec73400ea3c 100644
--- a/sql/log.cc
+++ b/sql/log.cc
@@ -2492,14 +2492,14 @@ bool MYSQL_BIN_LOG::reset_logs(THD* thd)
for (;;)
{
- my_delete(linfo.log_file_name, MYF(MY_WME));
+ my_delete_allow_opened(linfo.log_file_name, MYF(MY_WME));
if (find_next_log(&linfo, 0))
break;
}
/* Start logging with a new file */
close(LOG_CLOSE_INDEX);
- my_delete(index_file_name, MYF(MY_WME)); // Reset (open will update)
+ my_delete_allow_opened(index_file_name, MYF(MY_WME)); // Reset (open will update)
if (!thd->slave_thread)
need_start_event=1;
if (!open_index_file(index_file_name, 0))
diff --git a/sql/log_event.cc b/sql/log_event.cc
index 7cffa552954..36805e0043d 100644
--- a/sql/log_event.cc
+++ b/sql/log_event.cc
@@ -5918,6 +5918,7 @@ int Table_map_log_event::exec_event(st_relay_log_info *rli)
if (col <= tsh->fields)
{
+ /* purecov: begin inspected */
/*
If we get here, the number of columns in the event didn't
match the number of columns in the table on the slave, *or*
@@ -5950,6 +5951,7 @@ int Table_map_log_event::exec_event(st_relay_log_info *rli)
thd->query_error= 1;
error= ERR_BAD_TABLE_DEF;
goto err;
+ /* purecov: end */
}
/*
diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h
index 72fd60169fd..c0b453b7d69 100644
--- a/sql/mysql_priv.h
+++ b/sql/mysql_priv.h
@@ -532,6 +532,8 @@ void cleanup_items(Item *item);
class THD;
void close_thread_tables(THD *thd, bool locked=0, bool skip_derived=0);
bool check_one_table_access(THD *thd, ulong privilege, TABLE_LIST *tables);
+bool check_single_table_access(THD *thd, ulong privilege,
+ TABLE_LIST *tables);
bool check_routine_access(THD *thd,ulong want_access,char *db,char *name,
bool is_proc, bool no_errors);
bool check_some_access(THD *thd, ulong want_access, TABLE_LIST *table);
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index 4ab1b365f73..6e57993a61a 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -3223,9 +3223,11 @@ server.");
/* fall back to the log files if tables are not present */
if (have_csv_db == SHOW_OPTION_NO)
{
+ /* purecov: begin inspected */
sql_print_error("CSV engine is not present, falling back to the "
"log files");
log_output_options= (log_output_options & ~LOG_TABLE) | LOG_FILE;
+ /* purecov: end */
}
logger.set_handlers(LOG_FILE, opt_slow_log ? log_output_options:LOG_NONE,
diff --git a/sql/partition_info.cc b/sql/partition_info.cc
index 39c8d976732..286637bd9aa 100644
--- a/sql/partition_info.cc
+++ b/sql/partition_info.cc
@@ -153,7 +153,7 @@ char *partition_info::create_subpartition_name(uint subpart_no,
SYNOPSIS
set_up_default_partitions()
file A reference to a handler of the table
- max_rows Maximum number of rows stored in the table
+ info Create info
start_no Starting partition number
RETURN VALUE
@@ -169,7 +169,8 @@ char *partition_info::create_subpartition_name(uint subpart_no,
The external routine needing this code is check_partition_info
*/
-bool partition_info::set_up_default_partitions(handler *file, ulonglong max_rows,
+bool partition_info::set_up_default_partitions(handler *file,
+ HA_CREATE_INFO *info,
uint start_no)
{
uint i;
@@ -188,7 +189,7 @@ bool partition_info::set_up_default_partitions(handler *file, ulonglong max_rows
goto end;
}
if (no_parts == 0)
- no_parts= file->get_default_no_partitions(max_rows);
+ no_parts= file->get_default_no_partitions(info);
if (unlikely(no_parts > MAX_PARTITIONS))
{
my_error(ER_TOO_MANY_PARTITIONS_ERROR, MYF(0));
@@ -228,7 +229,7 @@ end:
SYNOPSIS
set_up_default_subpartitions()
file A reference to a handler of the table
- max_rows Maximum number of rows stored in the table
+ info Create info
RETURN VALUE
TRUE Error, attempted default values not possible
@@ -244,7 +245,7 @@ end:
*/
bool partition_info::set_up_default_subpartitions(handler *file,
- ulonglong max_rows)
+ HA_CREATE_INFO *info)
{
uint i, j;
char *default_name, *name_ptr;
@@ -254,7 +255,7 @@ bool partition_info::set_up_default_subpartitions(handler *file,
DBUG_ENTER("partition_info::set_up_default_subpartitions");
if (no_subparts == 0)
- no_subparts= file->get_default_no_partitions(max_rows);
+ no_subparts= file->get_default_no_partitions(info);
if (unlikely((no_parts * no_subparts) > MAX_PARTITIONS))
{
my_error(ER_TOO_MANY_PARTITIONS_ERROR, MYF(0));
@@ -296,7 +297,7 @@ end:
SYNOPSIS
set_up_defaults_for_partitioning()
file A reference to a handler of the table
- max_rows Maximum number of rows stored in the table
+ info Create info
start_no Starting partition number
RETURN VALUE
@@ -309,7 +310,7 @@ end:
*/
bool partition_info::set_up_defaults_for_partitioning(handler *file,
- ulonglong max_rows,
+ HA_CREATE_INFO *info,
uint start_no)
{
DBUG_ENTER("partition_info::set_up_defaults_for_partitioning");
@@ -318,10 +319,10 @@ bool partition_info::set_up_defaults_for_partitioning(handler *file,
{
default_partitions_setup= TRUE;
if (use_default_partitions)
- DBUG_RETURN(set_up_default_partitions(file, max_rows, start_no));
+ DBUG_RETURN(set_up_default_partitions(file, info, start_no));
if (is_sub_partitioned() &&
use_default_subpartitions)
- DBUG_RETURN(set_up_default_subpartitions(file, max_rows));
+ DBUG_RETURN(set_up_default_subpartitions(file, info));
}
DBUG_RETURN(FALSE);
}
@@ -692,7 +693,7 @@ end:
SYNOPSIS
check_partition_info()
file A reference to a handler of the table
- max_rows Maximum number of rows stored in the table
+ info Create info
engine_type Return value for used engine in partitions
RETURN VALUE
@@ -708,7 +709,7 @@ end:
*/
bool partition_info::check_partition_info(THD *thd, handlerton **eng_type,
- handler *file, ulonglong max_rows)
+ handler *file, HA_CREATE_INFO *info)
{
handlerton **engine_array= NULL;
uint part_count= 0;
@@ -743,7 +744,7 @@ bool partition_info::check_partition_info(THD *thd, handlerton **eng_type,
my_error(ER_SUBPARTITION_ERROR, MYF(0));
goto end;
}
- if (unlikely(set_up_defaults_for_partitioning(file, max_rows, (uint)0)))
+ if (unlikely(set_up_defaults_for_partitioning(file, info, (uint)0)))
goto end;
tot_partitions= get_tot_partitions();
if (unlikely(tot_partitions > MAX_PARTITIONS))
diff --git a/sql/partition_info.h b/sql/partition_info.h
index 3d8c6a40221..d938d21653a 100644
--- a/sql/partition_info.h
+++ b/sql/partition_info.h
@@ -243,21 +243,21 @@ public:
return no_parts * (is_sub_partitioned() ? no_subparts : 1);
}
- bool set_up_defaults_for_partitioning(handler *file, ulonglong max_rows,
+ bool set_up_defaults_for_partitioning(handler *file, HA_CREATE_INFO *info,
uint start_no);
char *has_unique_names();
static bool check_engine_mix(handlerton **engine_array, uint no_parts);
bool check_range_constants();
bool check_list_constants();
bool check_partition_info(THD *thd, handlerton **eng_type,
- handler *file, ulonglong max_rows);
+ handler *file, HA_CREATE_INFO *info);
void print_no_partition_found(TABLE *table);
private:
static int list_part_cmp(const void* a, const void* b);
static int list_part_cmp_unsigned(const void* a, const void* b);
- bool set_up_default_partitions(handler *file, ulonglong max_rows,
+ bool set_up_default_partitions(handler *file, HA_CREATE_INFO *info,
uint start_no);
- bool set_up_default_subpartitions(handler *file, ulonglong max_rows);
+ bool set_up_default_subpartitions(handler *file, HA_CREATE_INFO *info);
char *create_default_partition_names(uint part_no, uint no_parts,
uint start_no);
char *create_subpartition_name(uint subpart_no, const char *part_name);
diff --git a/sql/share/errmsg.txt b/sql/share/errmsg.txt
index 476bc2f2f02..8d2822370f2 100644
--- a/sql/share/errmsg.txt
+++ b/sql/share/errmsg.txt
@@ -54,7 +54,7 @@ ER_CANT_CREATE_FILE
cze "Nemohu vytvo-Bøit soubor '%-.64s' (chybový kód: %d)"
dan "Kan ikke oprette filen '%-.64s' (Fejlkode: %d)"
nla "Kan file '%-.64s' niet aanmaken (Errcode: %d)"
- eng "Can't create file '%-.64s' (errno: %d)"
+ eng "Can't create file '%-.200s' (errno: %d)"
est "Ei suuda luua faili '%-.64s' (veakood: %d)"
fre "Ne peut créer le fichier '%-.64s' (Errcode: %d)"
ger "Kann Datei '%-.64s' nicht erzeugen (Fehler: %d)"
@@ -278,7 +278,7 @@ ER_CANT_GET_STAT
cze "Nemohu z-Bískat stav '%-.64s' (chybový kód: %d)"
dan "Kan ikke læse status af '%-.64s' (Fejlkode: %d)"
nla "Kan de status niet krijgen van '%-.64s' (Errcode: %d)"
- eng "Can't get status of '%-.64s' (errno: %d)"
+ eng "Can't get status of '%-.200s' (errno: %d)"
jps "'%-.64s' ‚̃XƒeƒCƒ^ƒX‚ª“¾‚ç‚ê‚Ü‚¹‚ñ. (errno: %d)",
est "Ei suuda lugeda '%-.64s' olekut (veakood: %d)"
fre "Ne peut obtenir le status de '%-.64s' (Errcode: %d)"
@@ -353,7 +353,7 @@ ER_CANT_OPEN_FILE
cze "Nemohu otev-Bøít soubor '%-.64s' (chybový kód: %d)"
dan "Kan ikke åbne fil: '%-.64s' (Fejlkode: %d)"
nla "Kan de file '%-.64s' niet openen (Errcode: %d)"
- eng "Can't open file: '%-.64s' (errno: %d)"
+ eng "Can't open file: '%-.200s' (errno: %d)"
jps "'%-.64s' ƒtƒ@ƒCƒ‹‚ðŠJ‚­Ž–‚ª‚Å‚«‚Ü‚¹‚ñ (errno: %d)",
est "Ei suuda avada faili '%-.64s' (veakood: %d)"
fre "Ne peut ouvrir le fichier: '%-.64s' (Errcode: %d)"
@@ -378,7 +378,7 @@ ER_FILE_NOT_FOUND
cze "Nemohu naj-Bít soubor '%-.64s' (chybový kód: %d)"
dan "Kan ikke finde fila: '%-.64s' (Fejlkode: %d)"
nla "Kan de file: '%-.64s' niet vinden (Errcode: %d)"
- eng "Can't find file: '%-.64s' (errno: %d)"
+ eng "Can't find file: '%-.200s' (errno: %d)"
jps "'%-.64s' ƒtƒ@ƒCƒ‹‚ðŒ©•t‚¯‚鎖‚ª‚Å‚«‚Ü‚¹‚ñ.(errno: %d)",
est "Ei suuda leida faili '%-.64s' (veakood: %d)"
fre "Ne peut trouver le fichier: '%-.64s' (Errcode: %d)"
@@ -549,7 +549,7 @@ ER_ERROR_ON_READ
cze "Chyba p-Bøi ètení souboru '%-.64s' (chybový kód: %d)"
dan "Fejl ved læsning af '%-.64s' (Fejlkode: %d)"
nla "Fout bij het lezen van file '%-.64s' (Errcode: %d)"
- eng "Error reading file '%-.64s' (errno: %d)"
+ eng "Error reading file '%-.200s' (errno: %d)"
jps "'%-.64s' ƒtƒ@ƒCƒ‹‚Ì“Ç‚Ýž‚݃Gƒ‰[ (errno: %d)",
est "Viga faili '%-.64s' lugemisel (veakood: %d)"
fre "Erreur en lecture du fichier '%-.64s' (Errcode: %d)"
@@ -599,7 +599,7 @@ ER_ERROR_ON_WRITE
cze "Chyba p-Bøi zápisu do souboru '%-.64s' (chybový kód: %d)"
dan "Fejl ved skriving av filen '%-.64s' (Fejlkode: %d)"
nla "Fout bij het wegschrijven van file '%-.64s' (Errcode: %d)"
- eng "Error writing file '%-.64s' (errno: %d)"
+ eng "Error writing file '%-.200s' (errno: %d)"
jps "'%-.64s' ƒtƒ@ƒCƒ‹‚ð‘‚­Ž–‚ª‚Å‚«‚Ü‚¹‚ñ (errno: %d)",
est "Viga faili '%-.64s' kirjutamisel (veakood: %d)"
fre "Erreur d'écriture du fichier '%-.64s' (Errcode: %d)"
@@ -772,7 +772,7 @@ ER_NOT_FORM_FILE
cze "Nespr-Bávná informace v souboru '%-.64s'"
dan "Forkert indhold i: '%-.64s'"
nla "Verkeerde info in file: '%-.64s'"
- eng "Incorrect information in file: '%-.64s'"
+ eng "Incorrect information in file: '%-.200s'"
jps "ƒtƒ@ƒCƒ‹ '%-.64s' ‚Ì info ‚ªŠÔˆá‚Á‚Ä‚¢‚é‚悤‚Å‚·",
est "Vigane informatsioon failis '%-.64s'"
fre "Information erronnée dans le fichier: '%-.64s'"
@@ -797,7 +797,7 @@ ER_NOT_KEYFILE
cze "Nespr-Bávný klíè pro tabulku '%-.64s'; pokuste se ho opravit"
dan "Fejl i indeksfilen til tabellen '%-.64s'; prøv at reparere den"
nla "Verkeerde zoeksleutel file voor tabel: '%-.64s'; probeer het te repareren"
- eng "Incorrect key file for table '%-.64s'; try to repair it"
+ eng "Incorrect key file for table '%-.200s'; try to repair it"
jps "'%-.64s' ƒe[ƒuƒ‹‚Ì key file ‚ªŠÔˆá‚Á‚Ä‚¢‚é‚悤‚Å‚·. C•œ‚ð‚µ‚Ä‚­‚¾‚³‚¢",
est "Tabeli '%-.64s' võtmefail on vigane; proovi seda parandada"
fre "Index corrompu dans la table: '%-.64s'; essayez de le réparer"
@@ -2044,7 +2044,7 @@ ER_TEXTFILE_NOT_READABLE
cze "Soubor '%-.64s' mus-Bí být v adresáøi databáze nebo èitelný pro v¹echny"
dan "Filen '%-.64s' skal være i database-folderen og kunne læses af alle"
nla "Het bestand '%-.64s' dient in de database directory voor the komen of leesbaar voor iedereen te zijn."
- eng "The file '%-.64s' must be in the database directory or be readable by all"
+ eng "The file '%-.128s' must be in the database directory or be readable by all"
jps "ƒtƒ@ƒCƒ‹ '%-.64s' ‚Í databse ‚Ì directory ‚É‚ ‚é‚©‘S‚Ẵ†[ƒU[‚ª“Ç‚ß‚é‚悤‚É‹–‰Â‚³‚ê‚Ä‚¢‚È‚¯‚ê‚΂Ȃè‚Ü‚¹‚ñ.",
est "Fail '%-.64s' peab asuma andmebaasi kataloogis või olema kõigile loetav"
fre "Le fichier '%-.64s' doit être dans le répertoire de la base et lisible par tous"
@@ -2069,7 +2069,7 @@ ER_FILE_EXISTS_ERROR
cze "Soubor '%-.64s' ji-B¾ existuje"
dan "Filen '%-.64s' eksisterer allerede"
nla "Het bestand '%-.64s' bestaat reeds"
- eng "File '%-.80s' already exists"
+ eng "File '%-.200s' already exists"
jps "File '%-.64s' ‚ÍŠù‚É‘¶Ý‚µ‚Ü‚·",
est "Fail '%-.80s' juba eksisteerib"
fre "Le fichier '%-.64s' existe déjà"
@@ -2345,7 +2345,7 @@ ER_NO_UNIQUE_LOGFILE
cze "Nemohu vytvo-Bøit jednoznaèné jméno logovacího souboru %s.(1-999)\n"
dan "Kan ikke lave unikt log-filnavn %s.(1-999)\n"
nla "Het is niet mogelijk een unieke naam te maken voor de logfile %s.(1-999)\n"
- eng "Can't generate a unique log-filename %-.64s.(1-999)\n"
+ eng "Can't generate a unique log-filename %-.200s.(1-999)\n"
est "Ei suuda luua unikaalset logifaili nime %-.64s.(1-999)\n"
fre "Ne peut générer un unique nom de journal %s.(1-999)\n"
ger "Kann keinen eindeutigen Dateinamen für die Logdatei %-.64s(1-999) erzeugen\n"
@@ -5193,7 +5193,7 @@ ER_FPARSER_BAD_HEADER
rus "îÅ×ÅÒÎÙÊ ÚÁÇÏÌÏ×ÏË ÔÉÐÁ ÆÁÊÌÁ '%-.64s'"
ukr "îÅצÒÎÉÊ ÚÁÇÏÌÏ×ÏË ÔÉÐÕ Õ ÆÁÊ̦ '%-.64s'"
ER_FPARSER_EOF_IN_COMMENT
- eng "Unexpected end of file while parsing comment '%-.64s'"
+ eng "Unexpected end of file while parsing comment '%-.200s'"
ger "Unerwartetes Dateiende beim Parsen des Kommentars '%-.64s'"
rus "îÅÏÖÉÄÁÎÎÙÊ ËÏÎÅà ÆÁÊÌÁ × ËÏÍÅÎÔÁÒÉÉ '%-.64s'"
ukr "îÅÓÐÏĦ×ÁÎÎÉÊ Ë¦ÎÅÃØ ÆÁÊÌÕ Õ ËÏÍÅÎÔÁÒ¦ '%-.64s'"
@@ -5362,7 +5362,7 @@ ER_LOGGING_PROHIBIT_CHANGING_OF
eng "Binary logging and replication forbid changing the global server %s"
ger "Binärlogs und Replikation verhindern Wechsel des globalen Servers %s"
ER_NO_FILE_MAPPING
- eng "Can't map file: %-.64s, errno: %d"
+ eng "Can't map file: %-.200s, errno: %d"
ger "Kann Datei nicht abbilden: %-.64s, Fehler: %d"
ER_WRONG_MAGIC
eng "Wrong magic in %-.64s"
diff --git a/sql/sql_base.cc b/sql/sql_base.cc
index f8ec4531995..5b039f6bcc0 100644
--- a/sql/sql_base.cc
+++ b/sql/sql_base.cc
@@ -5471,7 +5471,7 @@ bool setup_tables_and_check_access(THD *thd,
for (; leaves_tmp; leaves_tmp= leaves_tmp->next_leaf)
{
if (leaves_tmp->belong_to_view &&
- check_one_table_access(thd, want_access, leaves_tmp))
+ check_single_table_access(thd, want_access, leaves_tmp))
{
tables->hide_view_error(thd);
return TRUE;
diff --git a/sql/sql_class.cc b/sql/sql_class.cc
index 7fa0173ddea..c8c8ff16199 100644
--- a/sql/sql_class.cc
+++ b/sql/sql_class.cc
@@ -1941,15 +1941,10 @@ bool select_dumpvar::send_data(List<Item> &items)
Item_func_set_user_var *xx;
Item_splocal *yy;
my_var *zz;
- DBUG_ENTER("send_data");
- if (unit->offset_limit_cnt)
- { // using limit offset,count
- unit->offset_limit_cnt--;
- DBUG_RETURN(0);
- }
+ DBUG_ENTER("select_dumpvar::send_data");
if (unit->offset_limit_cnt)
- { // Using limit offset,count
+ { // using limit offset,count
unit->offset_limit_cnt--;
DBUG_RETURN(0);
}
diff --git a/sql/sql_db.cc b/sql/sql_db.cc
index 8dd62fc8494..bcd1b99b91a 100644
--- a/sql/sql_db.cc
+++ b/sql/sql_db.cc
@@ -134,9 +134,9 @@ void lock_db_delete(const char *name, uint length)
{
my_dblock_t *opt;
safe_mutex_assert_owner(&LOCK_lock_db);
- opt= (my_dblock_t *)hash_search(&lock_db_cache, (const byte*) name, length);
- DBUG_ASSERT(opt != NULL);
- hash_delete(&lock_db_cache, (byte*) opt);
+ if ((opt= (my_dblock_t *)hash_search(&lock_db_cache,
+ (const byte*) name, length)))
+ hash_delete(&lock_db_cache, (byte*) opt);
}
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index 1ecc7b78315..232df095816 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -3382,15 +3382,6 @@ end_with_restore_list:
&lex->value_list,
lex->duplicates, lex->ignore)))
{
- /*
- Skip first table, which is the table we are inserting in.
- Below we set context.table_list again because the call above to
- mysql_insert_select_prepare() calls resolve_in_table_list_only(),
- which in turn resets context.table_list and
- context.first_name_resolution_table.
- */
- select_lex->context.table_list=
- select_lex->context.first_name_resolution_table= second_table;
res= handle_select(thd, lex, result, OPTION_SETUP_TABLES_DONE);
/*
Invalidate the table in the query cache if something changed
@@ -5218,7 +5209,7 @@ static bool execute_sqlcom_select(THD *thd, TABLE_LIST *all_tables)
even if the query itself redirects the output.
*/
if (!(result= new select_send()))
- return 1;
+ return 1; /* purecov: inspected */
thd->send_explain_fields(result);
res= mysql_explain_union(thd, &thd->lex->unit, result);
if (lex->describe & DESCRIBE_EXTENDED)
@@ -5237,7 +5228,7 @@ static bool execute_sqlcom_select(THD *thd, TABLE_LIST *all_tables)
else
{
if (!result && !(result= new select_send()))
- return 1;
+ return 1; /* purecov: inspected */
query_cache_store_query(thd, all_tables);
res= handle_select(thd, lex, result, 0);
if (result != lex->result)
@@ -5249,11 +5240,10 @@ static bool execute_sqlcom_select(THD *thd, TABLE_LIST *all_tables)
/*
- Check grants for commands which work only with one table and all other
- tables belonging to subselects or implicitly opened tables.
+ Check grants for commands which work only with one table.
SYNOPSIS
- check_one_table_access()
+ check_single_table_access()
thd Thread handler
privilege requested privilege
all_tables global table list of query
@@ -5263,7 +5253,8 @@ static bool execute_sqlcom_select(THD *thd, TABLE_LIST *all_tables)
1 - access denied, error is sent to client
*/
-bool check_one_table_access(THD *thd, ulong privilege, TABLE_LIST *all_tables)
+bool check_single_table_access(THD *thd, ulong privilege,
+ TABLE_LIST *all_tables)
{
Security_context * backup_ctx= thd->security_ctx;
@@ -5288,19 +5279,41 @@ bool check_one_table_access(THD *thd, ulong privilege, TABLE_LIST *all_tables)
goto deny;
thd->security_ctx= backup_ctx;
+ return 0;
+
+deny:
+ thd->security_ctx= backup_ctx;
+ return 1;
+}
+
+/*
+ Check grants for commands which work only with one table and all other
+ tables belonging to subselects or implicitly opened tables.
+
+ SYNOPSIS
+ check_one_table_access()
+ thd Thread handler
+ privilege requested privilege
+ all_tables global table list of query
+
+ RETURN
+ 0 - OK
+ 1 - access denied, error is sent to client
+*/
+
+bool check_one_table_access(THD *thd, ulong privilege, TABLE_LIST *all_tables)
+{
+ if (check_single_table_access (thd,privilege,all_tables))
+ return 1;
/* Check rights on tables of subselects and implictly opened tables */
TABLE_LIST *subselects_tables;
if ((subselects_tables= all_tables->next_global))
{
if ((check_table_access(thd, SELECT_ACL, subselects_tables, 0)))
- goto deny;
+ return 1;
}
return 0;
-
-deny:
- thd->security_ctx= backup_ctx;
- return 1;
}
diff --git a/sql/sql_partition.cc b/sql/sql_partition.cc
index 00c15c2dbca..44c0b8ffcd9 100644
--- a/sql/sql_partition.cc
+++ b/sql/sql_partition.cc
@@ -2103,26 +2103,15 @@ static inline longlong part_val_int(Item *item_expr)
static uint32 calculate_key_value(Field **field_array)
{
- uint32 hashnr= 0;
+ ulong nr1= 1;
ulong nr2= 4;
do
{
Field *field= *field_array;
- if (field->is_null())
- {
- hashnr^= (hashnr << 1) | 1;
- }
- else
- {
- uint len= field->pack_length();
- ulong nr1= 1;
- CHARSET_INFO *cs= field->charset();
- cs->coll->hash_sort(cs, (uchar*)field->ptr, len, &nr1, &nr2);
- hashnr^= (uint32)nr1;
- }
+ field->hash(&nr1, &nr2);
} while (*(++field_array));
- return hashnr;
+ return (uint32) nr1;
}
@@ -3834,14 +3823,13 @@ uint prep_alter_part_table(THD *thd, TABLE *table, ALTER_INFO *alter_info,
if (alter_info->flags == ALTER_TABLE_REORG)
{
uint new_part_no, curr_part_no;
- ulonglong max_rows= table->s->max_rows;
if (tab_part_info->part_type != HASH_PARTITION ||
tab_part_info->use_default_no_partitions)
{
my_error(ER_REORG_NO_PARAM_ERROR, MYF(0));
DBUG_RETURN(TRUE);
}
- new_part_no= table->file->get_default_no_partitions(max_rows);
+ new_part_no= table->file->get_default_no_partitions(create_info);
curr_part_no= tab_part_info->no_parts;
if (new_part_no == curr_part_no)
{
diff --git a/sql/sql_partition.h b/sql/sql_partition.h
index 845180ad592..e34d71dfdc5 100644
--- a/sql/sql_partition.h
+++ b/sql/sql_partition.h
@@ -65,7 +65,7 @@ int get_part_for_delete(const byte *buf, const byte *rec0,
partition_info *part_info, uint32 *part_id);
void prune_partition_set(const TABLE *table, part_id_range *part_spec);
bool check_partition_info(partition_info *part_info,handlerton **eng_type,
- TABLE *table, handler *file, ulonglong max_rows);
+ TABLE *table, handler *file, HA_CREATE_INFO *info);
bool fix_partition_func(THD *thd, TABLE *table, bool create_table_ind);
char *generate_partition_syntax(partition_info *part_info,
uint *buf_length, bool use_sql_alloc,
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index 2ecbc94541a..7d8631e3236 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -3185,8 +3185,7 @@ bool mysql_create_table_internal(THD *thd,
}
DBUG_PRINT("info", ("db_type = %d",
ha_legacy_type(part_info->default_engine_type)));
- if (part_info->check_partition_info(thd, &engine_type, file,
- create_info->max_rows))
+ if (part_info->check_partition_info(thd, &engine_type, file, create_info))
goto err;
part_info->default_engine_type= engine_type;
@@ -3224,7 +3223,8 @@ bool mysql_create_table_internal(THD *thd,
*/
if (part_info->use_default_no_partitions &&
part_info->no_parts &&
- (int)part_info->no_parts != file->get_default_no_partitions(0ULL))
+ (int)part_info->no_parts !=
+ file->get_default_no_partitions(create_info))
{
uint i;
List_iterator<partition_element> part_it(part_info->partitions);
@@ -3237,10 +3237,10 @@ bool mysql_create_table_internal(THD *thd,
part_info->use_default_no_subpartitions &&
part_info->no_subparts &&
(int)part_info->no_subparts !=
- file->get_default_no_partitions(0ULL))
+ file->get_default_no_partitions(create_info))
{
DBUG_ASSERT(thd->lex->sql_command != SQLCOM_CREATE_TABLE);
- part_info->no_subparts= file->get_default_no_partitions(0ULL);
+ part_info->no_subparts= file->get_default_no_partitions(create_info);
}
}
else if (create_info->db_type != engine_type)
diff --git a/sql/table.cc b/sql/table.cc
index a96ca0da881..c7b851949fc 100644
--- a/sql/table.cc
+++ b/sql/table.cc
@@ -1025,27 +1025,6 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head,
if (share->key_info[key].flags & HA_FULLTEXT)
share->key_info[key].algorithm= HA_KEY_ALG_FULLTEXT;
- if (primary_key >= MAX_KEY && (keyinfo->flags & HA_NOSAME))
- {
- /*
- If the UNIQUE key doesn't have NULL columns and is not a part key
- declare this as a primary key.
- */
- primary_key=key;
- for (i=0 ; i < keyinfo->key_parts ;i++)
- {
- uint fieldnr= key_part[i].fieldnr;
- if (!fieldnr ||
- share->field[fieldnr-1]->null_ptr ||
- share->field[fieldnr-1]->key_length() !=
- key_part[i].length)
- {
- primary_key=MAX_KEY; // Can't be used
- break;
- }
- }
- }
-
for (i=0 ; i < keyinfo->key_parts ; key_part++,i++)
{
Field *field;
diff --git a/storage/ndb/include/kernel/GlobalSignalNumbers.h b/storage/ndb/include/kernel/GlobalSignalNumbers.h
index 950b2629d9e..4b2c69e4bc6 100644
--- a/storage/ndb/include/kernel/GlobalSignalNumbers.h
+++ b/storage/ndb/include/kernel/GlobalSignalNumbers.h
@@ -691,6 +691,8 @@ extern const GlobalSignalNumber NO_OF_SIGNAL_NAMES;
#define GSN_BACKUP_FRAGMENT_REF 546
#define GSN_BACKUP_FRAGMENT_CONF 547
+#define GSN_BACKUP_FRAGMENT_COMPLETE_REP 575
+
#define GSN_STOP_BACKUP_REQ 548
#define GSN_STOP_BACKUP_REF 549
#define GSN_STOP_BACKUP_CONF 550
@@ -740,7 +742,7 @@ extern const GlobalSignalNumber NO_OF_SIGNAL_NAMES;
#define GSN_SUB_STOP_REQ 572
#define GSN_SUB_STOP_REF 573
#define GSN_SUB_STOP_CONF 574
-/* 575 unused */
+/* 575 used */
#define GSN_SUB_CREATE_REQ 576
#define GSN_SUB_CREATE_REF 577
#define GSN_SUB_CREATE_CONF 578
diff --git a/storage/ndb/include/kernel/signaldata/BackupContinueB.hpp b/storage/ndb/include/kernel/signaldata/BackupContinueB.hpp
index d3d3f79f310..fe3f48444ec 100644
--- a/storage/ndb/include/kernel/signaldata/BackupContinueB.hpp
+++ b/storage/ndb/include/kernel/signaldata/BackupContinueB.hpp
@@ -31,7 +31,8 @@ private:
BUFFER_UNDERFLOW = 1,
BUFFER_FULL_SCAN = 2,
BUFFER_FULL_FRAG_COMPLETE = 3,
- BUFFER_FULL_META = 4
+ BUFFER_FULL_META = 4,
+ BACKUP_FRAGMENT_INFO = 5
};
};
diff --git a/storage/ndb/include/kernel/signaldata/BackupImpl.hpp b/storage/ndb/include/kernel/signaldata/BackupImpl.hpp
index c7bfd07a63d..82fd24558b7 100644
--- a/storage/ndb/include/kernel/signaldata/BackupImpl.hpp
+++ b/storage/ndb/include/kernel/signaldata/BackupImpl.hpp
@@ -252,15 +252,31 @@ class BackupFragmentConf {
friend bool printBACKUP_FRAGMENT_CONF(FILE *, const Uint32 *, Uint32, Uint16);
public:
- STATIC_CONST( SignalLength = 6 );
+ STATIC_CONST( SignalLength = 8 );
private:
Uint32 backupId;
Uint32 backupPtr;
Uint32 tableId;
Uint32 fragmentNo;
- Uint32 noOfRecords;
- Uint32 noOfBytes;
+ Uint32 noOfRecordsLow;
+ Uint32 noOfBytesLow;
+ Uint32 noOfRecordsHigh;
+ Uint32 noOfBytesHigh;
+};
+
+class BackupFragmentCompleteRep {
+public:
+ STATIC_CONST( SignalLength = 8 );
+
+ Uint32 backupId;
+ Uint32 backupPtr;
+ Uint32 tableId;
+ Uint32 fragmentNo;
+ Uint32 noOfTableRowsLow;
+ Uint32 noOfFragmentRowsLow;
+ Uint32 noOfTableRowsHigh;
+ Uint32 noOfFragmentRowsHigh;
};
class StopBackupReq {
diff --git a/storage/ndb/include/kernel/signaldata/BackupSignalData.hpp b/storage/ndb/include/kernel/signaldata/BackupSignalData.hpp
index e1b8c6203a1..9e34ea3a211 100644
--- a/storage/ndb/include/kernel/signaldata/BackupSignalData.hpp
+++ b/storage/ndb/include/kernel/signaldata/BackupSignalData.hpp
@@ -201,17 +201,19 @@ class BackupCompleteRep {
friend bool printBACKUP_COMPLETE_REP(FILE *, const Uint32 *, Uint32, Uint16);
public:
- STATIC_CONST( SignalLength = 8 + NdbNodeBitmask::Size );
+ STATIC_CONST( SignalLength = 10 + NdbNodeBitmask::Size );
private:
Uint32 senderData;
Uint32 backupId;
Uint32 startGCP;
Uint32 stopGCP;
- Uint32 noOfBytes;
- Uint32 noOfRecords;
+ Uint32 noOfBytesLow;
+ Uint32 noOfRecordsLow;
Uint32 noOfLogBytes;
Uint32 noOfLogRecords;
NdbNodeBitmask nodes;
+ Uint32 noOfBytesHigh;
+ Uint32 noOfRecordsHigh;
};
/**
diff --git a/storage/ndb/include/kernel/signaldata/CreateFilegroup.hpp b/storage/ndb/include/kernel/signaldata/CreateFilegroup.hpp
index 810f9cdfd03..59e4a33b89d 100644
--- a/storage/ndb/include/kernel/signaldata/CreateFilegroup.hpp
+++ b/storage/ndb/include/kernel/signaldata/CreateFilegroup.hpp
@@ -159,7 +159,8 @@ struct CreateFileRef {
InvalidFilegroupVersion = 754,
FilenameAlreadyExists = 760,
OutOfFileRecords = 751,
- InvalidFileType = 750
+ InvalidFileType = 750,
+ NotSupportedWhenDiskless = 775
};
Uint32 senderData;
@@ -193,6 +194,7 @@ struct CreateFileConf {
Uint32 senderData;
Uint32 senderRef;
Uint32 fileId;
+ Uint32 fileVersion;
};
#endif
diff --git a/storage/ndb/include/kernel/signaldata/DictTabInfo.hpp b/storage/ndb/include/kernel/signaldata/DictTabInfo.hpp
index 42e06fba381..1382b09eabf 100644
--- a/storage/ndb/include/kernel/signaldata/DictTabInfo.hpp
+++ b/storage/ndb/include/kernel/signaldata/DictTabInfo.hpp
@@ -117,6 +117,7 @@ public:
CustomTriggerId = 25,
FrmLen = 26,
FrmData = 27,
+
FragmentCount = 128, // No of fragments in table (!fragment replicas)
FragmentDataLen = 129,
FragmentData = 130, // CREATE_FRAGMENTATION reply
@@ -132,6 +133,8 @@ public:
MaxRowsHigh = 140,
DefaultNoPartFlag = 141,
LinearHashFlag = 142,
+ MinRowsLow = 143,
+ MinRowsHigh = 144,
RowGCIFlag = 150,
RowChecksumFlag = 151,
@@ -312,8 +315,6 @@ public:
Uint32 CustomTriggerId;
Uint32 TablespaceId;
Uint32 TablespaceVersion;
- Uint32 MaxRowsLow;
- Uint32 MaxRowsHigh;
Uint32 DefaultNoPartFlag;
Uint32 LinearHashFlag;
/*
@@ -328,6 +329,12 @@ public:
Uint16 ReplicaData[MAX_FRAGMENT_DATA_BYTES];
Uint32 FragmentDataLen;
Uint16 FragmentData[3*MAX_NDB_PARTITIONS];
+
+ Uint32 MaxRowsLow;
+ Uint32 MaxRowsHigh;
+ Uint32 MinRowsLow;
+ Uint32 MinRowsHigh;
+
Uint32 TablespaceDataLen;
Uint32 TablespaceData[2*MAX_NDB_PARTITIONS];
Uint32 RangeListDataLen;
diff --git a/storage/ndb/include/kernel/signaldata/LqhFrag.hpp b/storage/ndb/include/kernel/signaldata/LqhFrag.hpp
index 05845d5fd21..97481ea2c3e 100644
--- a/storage/ndb/include/kernel/signaldata/LqhFrag.hpp
+++ b/storage/ndb/include/kernel/signaldata/LqhFrag.hpp
@@ -106,7 +106,7 @@ class LqhFragReq {
friend bool printLQH_FRAG_REQ(FILE *, const Uint32 *, Uint32, Uint16);
public:
- STATIC_CONST( SignalLength = 20 );
+ STATIC_CONST( SignalLength = 24 );
enum RequestInfo {
CreateInRunning = 0x8000000,
@@ -123,7 +123,7 @@ private:
Uint32 kValue;
Uint32 schemaVersion;
Uint32 nextLCP;
- Uint32 noOfNewAttr; // noOfCharsets in upper half
+ Uint32 noOfCharsets;
Uint32 startGci;
Uint32 tableType; // DictTabInfo::TableType
Uint32 primaryTableId; // table of index or RNIL
@@ -140,6 +140,10 @@ private:
Uint8 checksumIndicator;
Uint8 GCPIndicator;
Uint32 logPartId;
+ Uint32 maxRowsLow;
+ Uint32 maxRowsHigh;
+ Uint32 minRowsLow;
+ Uint32 minRowsHigh;
};
class LqhFragConf {
diff --git a/storage/ndb/include/kernel/signaldata/SystemError.hpp b/storage/ndb/include/kernel/signaldata/SystemError.hpp
index b3646a858f6..afc25fb004f 100644
--- a/storage/ndb/include/kernel/signaldata/SystemError.hpp
+++ b/storage/ndb/include/kernel/signaldata/SystemError.hpp
@@ -45,7 +45,8 @@ public:
CopyFragRefError = 5,
TestStopOnError = 6,
CopySubscriptionRef = 7,
- CopySubscriberRef = 8
+ CopySubscriberRef = 8,
+ StartFragRefError = 9
};
Uint32 errorRef;
diff --git a/storage/ndb/include/kernel/signaldata/TupFrag.hpp b/storage/ndb/include/kernel/signaldata/TupFrag.hpp
index d8f2139de61..bc44877bb1c 100644
--- a/storage/ndb/include/kernel/signaldata/TupFrag.hpp
+++ b/storage/ndb/include/kernel/signaldata/TupFrag.hpp
@@ -30,7 +30,7 @@ class TupFragReq {
friend class Dblqh;
friend class Dbtup;
public:
- STATIC_CONST( SignalLength = 15 );
+ STATIC_CONST( SignalLength = 17 );
private:
Uint32 userPtr;
Uint32 userRef;
@@ -38,7 +38,16 @@ private:
Uint32 tableId;
Uint32 noOfAttr;
Uint32 fragId;
- Uint32 todo[8];
+ Uint32 maxRowsLow;
+ Uint32 maxRowsHigh;
+ Uint32 minRowsLow;
+ Uint32 minRowsHigh;
+ Uint32 noOfNullAttr;
+ Uint32 schemaVersion;
+ Uint32 noOfKeyAttr;
+ Uint32 noOfCharsets;
+ Uint32 checksumIndicator;
+ Uint32 globalCheckpointIdIndicator;
Uint32 tablespaceid;
};
diff --git a/storage/ndb/include/ndbapi/NdbDictionary.hpp b/storage/ndb/include/ndbapi/NdbDictionary.hpp
index 27e0aede36d..ea4a2a9ca29 100644
--- a/storage/ndb/include/ndbapi/NdbDictionary.hpp
+++ b/storage/ndb/include/ndbapi/NdbDictionary.hpp
@@ -163,6 +163,31 @@ public:
};
};
+ class Dictionary; // Forward declaration
+
+ class ObjectId : public Object
+ {
+ public:
+ ObjectId();
+ virtual ~ObjectId();
+
+ /**
+ * Get status of object
+ */
+ virtual Status getObjectStatus() const;
+
+ /**
+ * Get version of object
+ */
+ virtual int getObjectVersion() const;
+
+ virtual int getObjectId() const;
+
+ private:
+ friend class NdbDictObjectImpl;
+ class NdbDictObjectImpl & m_impl;
+ };
+
class Table; // forward declaration
class Tablespace; // forward declaration
// class NdbEventOperation; // forward declaration
@@ -806,13 +831,6 @@ public:
virtual int getObjectVersion() const;
/**
- * Set/Get Maximum number of rows in table (only used to calculate
- * number of partitions).
- */
- void setMaxRows(Uint64 maxRows);
- Uint64 getMaxRows() const;
-
- /**
* Set/Get indicator if default number of partitions is used in table.
*/
void setDefaultNoPartitionsFlag(Uint32 indicator);
@@ -863,6 +881,20 @@ public:
*/
void setObjectType(Object::Type type);
+ /**
+ * Set/Get Maximum number of rows in table (only used to calculate
+ * number of partitions).
+ */
+ void setMaxRows(Uint64 maxRows);
+ Uint64 getMaxRows() const;
+
+ /**
+ * Set/Get Minimum number of rows in table (only used to calculate
+ * number of partitions).
+ */
+ void setMinRows(Uint64 minRows);
+ Uint64 getMinRows() const;
+
/** @} *******************************************************************/
/**
@@ -1437,11 +1469,11 @@ public:
void setSize(Uint64);
Uint64 getSize() const;
Uint64 getFree() const;
-
+
void setTablespace(const char * name);
void setTablespace(const class Tablespace &);
const char * getTablespace() const;
- Uint32 getTablespaceId() const;
+ void getTablespaceId(ObjectId * dst) const;
void setNode(Uint32 nodeId);
Uint32 getNode() const;
@@ -1484,7 +1516,7 @@ public:
void setLogfileGroup(const char * name);
void setLogfileGroup(const class LogfileGroup &);
const char * getLogfileGroup() const;
- Uint32 getLogfileGroupId() const;
+ void getLogfileGroupId(ObjectId * dst) const;
void setNode(Uint32 nodeId);
Uint32 getNode() const;
@@ -1781,20 +1813,20 @@ public:
* @{
*/
- int createLogfileGroup(const LogfileGroup &);
+ int createLogfileGroup(const LogfileGroup &, ObjectId* = 0);
int dropLogfileGroup(const LogfileGroup&);
LogfileGroup getLogfileGroup(const char * name);
- int createTablespace(const Tablespace &);
+ int createTablespace(const Tablespace &, ObjectId* = 0);
int dropTablespace(const Tablespace&);
Tablespace getTablespace(const char * name);
Tablespace getTablespace(Uint32 tablespaceId);
- int createDatafile(const Datafile &, bool overwrite_existing = false);
+ int createDatafile(const Datafile &, bool overwrite_existing = false, ObjectId* = 0);
int dropDatafile(const Datafile&);
Datafile getDatafile(Uint32 node, const char * path);
- int createUndofile(const Undofile &, bool overwrite_existing = false);
+ int createUndofile(const Undofile &, bool overwrite_existing = false, ObjectId * = 0);
int dropUndofile(const Undofile&);
Undofile getUndofile(Uint32 node, const char * path);
diff --git a/storage/ndb/include/ndbapi/NdbIndexScanOperation.hpp b/storage/ndb/include/ndbapi/NdbIndexScanOperation.hpp
index 15b07f5d598..eeafe98b46e 100644
--- a/storage/ndb/include/ndbapi/NdbIndexScanOperation.hpp
+++ b/storage/ndb/include/ndbapi/NdbIndexScanOperation.hpp
@@ -42,7 +42,9 @@ public:
* @param parallel No of fragments to scan in parallel (0=max)
*/
virtual int readTuples(LockMode lock_mode = LM_Read,
- Uint32 scan_flags = 0, Uint32 parallel = 0);
+ Uint32 scan_flags = 0,
+ Uint32 parallel = 0,
+ Uint32 batch = 0);
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
/**
@@ -70,7 +72,7 @@ public:
(SF_ReadRangeNo & -(Int32)read_range_no) |
(SF_KeyInfo & -(Int32)keyinfo);
- return readTuples(lock_mode, scan_flags, parallel);
+ return readTuples(lock_mode, scan_flags, parallel, batch);
}
#endif
diff --git a/storage/ndb/include/ndbapi/NdbScanOperation.hpp b/storage/ndb/include/ndbapi/NdbScanOperation.hpp
index 0c1fe6d4eaf..34f62defa66 100644
--- a/storage/ndb/include/ndbapi/NdbScanOperation.hpp
+++ b/storage/ndb/include/ndbapi/NdbScanOperation.hpp
@@ -58,7 +58,9 @@ public:
*/
virtual
int readTuples(LockMode lock_mode = LM_Read,
- Uint32 scan_flags = 0, Uint32 parallel = 0);
+ Uint32 scan_flags = 0,
+ Uint32 parallel = 0,
+ Uint32 batch = 0);
#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED
/**
diff --git a/storage/ndb/src/common/debugger/signaldata/BackupImpl.cpp b/storage/ndb/src/common/debugger/signaldata/BackupImpl.cpp
index 8d624ea311e..bc321203590 100644
--- a/storage/ndb/src/common/debugger/signaldata/BackupImpl.cpp
+++ b/storage/ndb/src/common/debugger/signaldata/BackupImpl.cpp
@@ -92,8 +92,10 @@ printBACKUP_FRAGMENT_CONF(FILE * out, const Uint32 * data, Uint32 l, Uint16 b){
BackupFragmentConf* sig = (BackupFragmentConf*)data;
fprintf(out, " backupPtr: %d backupId: %d\n",
sig->backupPtr, sig->backupId);
- fprintf(out, " tableId: %d fragmentNo: %d records: %d bytes: %d\n",
- sig->tableId, sig->fragmentNo, sig->noOfRecords, sig->noOfBytes);
+ fprintf(out, " tableId: %d fragmentNo: %d records: %llu bytes: %llu\n",
+ sig->tableId, sig->fragmentNo,
+ sig->noOfRecordsLow + (((Uint64)sig->noOfRecordsHigh) << 32),
+ sig->noOfBytesLow + (((Uint64)sig->noOfBytesHigh) << 32));
return true;
}
diff --git a/storage/ndb/src/common/debugger/signaldata/BackupSignalData.cpp b/storage/ndb/src/common/debugger/signaldata/BackupSignalData.cpp
index 4b0a0e07b66..27fed22ac72 100644
--- a/storage/ndb/src/common/debugger/signaldata/BackupSignalData.cpp
+++ b/storage/ndb/src/common/debugger/signaldata/BackupSignalData.cpp
@@ -72,11 +72,11 @@ printBACKUP_ABORT_REP(FILE * out, const Uint32 * data, Uint32 len, Uint16 bno){
bool
printBACKUP_COMPLETE_REP(FILE * out, const Uint32 * data, Uint32 len, Uint16 b){
BackupCompleteRep* sig = (BackupCompleteRep*)data;
- fprintf(out, " senderData: %d backupId: %d records: %d bytes: %d\n",
+ fprintf(out, " senderData: %d backupId: %d records: %llu bytes: %llu\n",
sig->senderData,
sig->backupId,
- sig->noOfRecords,
- sig->noOfBytes);
+ sig->noOfRecordsLow + (((Uint64)sig->noOfRecordsHigh) << 32),
+ sig->noOfBytesLow + (((Uint64)sig->noOfBytesHigh) << 32));
return true;
}
diff --git a/storage/ndb/src/common/debugger/signaldata/DictTabInfo.cpp b/storage/ndb/src/common/debugger/signaldata/DictTabInfo.cpp
index e46fc2ff7da..2ed97892488 100644
--- a/storage/ndb/src/common/debugger/signaldata/DictTabInfo.cpp
+++ b/storage/ndb/src/common/debugger/signaldata/DictTabInfo.cpp
@@ -62,6 +62,10 @@ DictTabInfo::TableMapping[] = {
DTIMAP(Table, TablespaceVersion, TablespaceVersion),
DTIMAP(Table, RowGCIFlag, RowGCIFlag),
DTIMAP(Table, RowChecksumFlag, RowChecksumFlag),
+ DTIMAP(Table, MaxRowsLow, MaxRowsLow),
+ DTIMAP(Table, MaxRowsHigh, MaxRowsHigh),
+ DTIMAP(Table, MinRowsLow, MinRowsLow),
+ DTIMAP(Table, MinRowsHigh, MinRowsHigh),
DTIBREAK(AttributeName)
};
@@ -154,6 +158,11 @@ DictTabInfo::Table::init(){
RowGCIFlag = ~0;
RowChecksumFlag = ~0;
+
+ MaxRowsLow = 0;
+ MaxRowsHigh = 0;
+ MinRowsLow = 0;
+ MinRowsHigh = 0;
}
void
diff --git a/storage/ndb/src/common/debugger/signaldata/LqhFrag.cpp b/storage/ndb/src/common/debugger/signaldata/LqhFrag.cpp
index 6d727959a67..3175582c3a2 100644
--- a/storage/ndb/src/common/debugger/signaldata/LqhFrag.cpp
+++ b/storage/ndb/src/common/debugger/signaldata/LqhFrag.cpp
@@ -37,8 +37,10 @@ printLQH_FRAG_REQ(FILE * output, const Uint32 * theData, Uint32 len, Uint16 recB
fprintf(output, " noOfAttributes: %d noOfNullAttributes: %d keyLength: %d\n",
sig->noOfAttributes, sig->noOfNullAttributes, sig->keyLength);
- fprintf(output, " noOfPagesToPreAllocate: %d schemaVersion: %d nextLCP: %d\n",
- sig->noOfPagesToPreAllocate, sig->schemaVersion, sig->nextLCP);
+ fprintf(output, " maxRowsLow/High: %u/%u minRowsLow/High: %u/%u\n",
+ sig->maxRowsLow, sig->maxRowsHigh, sig->minRowsLow, sig->minRowsHigh);
+ fprintf(output, " schemaVersion: %d nextLCP: %d\n",
+ sig->schemaVersion, sig->nextLCP);
return true;
}
diff --git a/storage/ndb/src/kernel/blocks/backup/Backup.cpp b/storage/ndb/src/kernel/blocks/backup/Backup.cpp
index 07df1db862b..2e8d8b548ce 100644
--- a/storage/ndb/src/kernel/blocks/backup/Backup.cpp
+++ b/storage/ndb/src/kernel/blocks/backup/Backup.cpp
@@ -170,6 +170,65 @@ Backup::execCONTINUEB(Signal* signal)
const Uint32 Tdata2 = signal->theData[2];
switch(Tdata0) {
+ case BackupContinueB::BACKUP_FRAGMENT_INFO:
+ {
+ const Uint32 ptr_I = Tdata1;
+ Uint32 tabPtr_I = Tdata2;
+ Uint32 fragPtr_I = signal->theData[3];
+
+ BackupRecordPtr ptr;
+ c_backupPool.getPtr(ptr, ptr_I);
+ TablePtr tabPtr;
+ ptr.p->tables.getPtr(tabPtr, tabPtr_I);
+ FragmentPtr fragPtr;
+ tabPtr.p->fragments.getPtr(fragPtr, fragPtr_I);
+
+ BackupFilePtr filePtr;
+ ptr.p->files.getPtr(filePtr, ptr.p->ctlFilePtr);
+
+ const Uint32 sz = sizeof(BackupFormat::CtlFile::FragmentInfo) >> 2;
+ Uint32 * dst;
+ if (!filePtr.p->operation.dataBuffer.getWritePtr(&dst, sz))
+ {
+ sendSignalWithDelay(BACKUP_REF, GSN_CONTINUEB, signal, 100, 4);
+ return;
+ }
+
+ BackupFormat::CtlFile::FragmentInfo * fragInfo =
+ (BackupFormat::CtlFile::FragmentInfo*)dst;
+ fragInfo->SectionType = htonl(BackupFormat::FRAGMENT_INFO);
+ fragInfo->SectionLength = htonl(sz);
+ fragInfo->TableId = htonl(fragPtr.p->tableId);
+ fragInfo->FragmentNo = htonl(fragPtr_I);
+ fragInfo->NoOfRecordsLow = htonl(fragPtr.p->noOfRecords & 0xFFFFFFFF);
+ fragInfo->NoOfRecordsHigh = htonl(fragPtr.p->noOfRecords >> 32);
+ fragInfo->FilePosLow = htonl(0 & 0xFFFFFFFF);
+ fragInfo->FilePosHigh = htonl(0 >> 32);
+
+ filePtr.p->operation.dataBuffer.updateWritePtr(sz);
+
+ fragPtr_I++;
+ if (fragPtr_I == tabPtr.p->fragments.getSize())
+ {
+ signal->theData[0] = tabPtr.p->tableId;
+ signal->theData[1] = 0; // unlock
+ EXECUTE_DIRECT(DBDICT, GSN_BACKUP_FRAGMENT_REQ, signal, 2);
+
+ fragPtr_I = 0;
+ ptr.p->tables.next(tabPtr);
+ if ((tabPtr_I = tabPtr.i) == RNIL)
+ {
+ closeFiles(signal, ptr);
+ return;
+ }
+ }
+ signal->theData[0] = BackupContinueB::BACKUP_FRAGMENT_INFO;
+ signal->theData[1] = ptr_I;
+ signal->theData[2] = tabPtr_I;
+ signal->theData[3] = fragPtr_I;
+ sendSignal(BACKUP_REF, GSN_CONTINUEB, signal, 4, JBB);
+ return;
+ }
case BackupContinueB::START_FILE_THREAD:
case BackupContinueB::BUFFER_UNDERFLOW:
{
@@ -372,7 +431,7 @@ Backup::findTable(const BackupRecordPtr & ptr,
return false;
}
-static Uint32 xps(Uint32 x, Uint64 ms)
+static Uint32 xps(Uint64 x, Uint64 ms)
{
float fx = x;
float fs = ms;
@@ -386,9 +445,9 @@ static Uint32 xps(Uint32 x, Uint64 ms)
}
struct Number {
- Number(Uint32 r) { val = r;}
- Number & operator=(Uint32 r) { val = r; return * this; }
- Uint32 val;
+ Number(Uint64 r) { val = r;}
+ Number & operator=(Uint64 r) { val = r; return * this; }
+ Uint64 val;
};
NdbOut &
@@ -462,8 +521,10 @@ Backup::execBACKUP_COMPLETE_REP(Signal* signal)
startTime = NdbTick_CurrentMillisecond() - startTime;
ndbout_c("Backup %d has completed", rep->backupId);
- const Uint32 bytes = rep->noOfBytes;
- const Uint32 records = rep->noOfRecords;
+ const Uint64 bytes =
+ rep->noOfBytesLow + (((Uint64)rep->noOfBytesHigh) << 32);
+ const Uint64 records =
+ rep->noOfRecordsLow + (((Uint64)rep->noOfRecordsHigh) << 32);
Number rps = xps(records, startTime);
Number bps = xps(bytes, startTime);
@@ -1722,8 +1783,10 @@ Backup::execBACKUP_FRAGMENT_CONF(Signal* signal)
const Uint32 tableId = conf->tableId;
const Uint32 fragmentNo = conf->fragmentNo;
const Uint32 nodeId = refToNode(signal->senderBlockRef());
- const Uint32 noOfBytes = conf->noOfBytes;
- const Uint32 noOfRecords = conf->noOfRecords;
+ const Uint64 noOfBytes =
+ conf->noOfBytesLow + (((Uint64)conf->noOfBytesHigh) << 32);
+ const Uint64 noOfRecords =
+ conf->noOfRecordsLow + (((Uint64)conf->noOfRecordsHigh) << 32);
BackupRecordPtr ptr;
c_backupPool.getPtr(ptr, ptrI);
@@ -1735,9 +1798,13 @@ Backup::execBACKUP_FRAGMENT_CONF(Signal* signal)
TablePtr tabPtr;
ndbrequire(findTable(ptr, tabPtr, tableId));
+ tabPtr.p->noOfRecords += noOfRecords;
+
FragmentPtr fragPtr;
tabPtr.p->fragments.getPtr(fragPtr, fragmentNo);
+ fragPtr.p->noOfRecords = noOfRecords;
+
ndbrequire(fragPtr.p->scanned == 0);
ndbrequire(fragPtr.p->scanning == 1);
ndbrequire(fragPtr.p->node == nodeId);
@@ -1761,6 +1828,24 @@ Backup::execBACKUP_FRAGMENT_CONF(Signal* signal)
}
else
{
+ NodeBitmask nodes = ptr.p->nodes;
+ nodes.clear(getOwnNodeId());
+ if (!nodes.isclear())
+ {
+ BackupFragmentCompleteRep *rep =
+ (BackupFragmentCompleteRep*)signal->getDataPtrSend();
+ rep->backupId = ptr.p->backupId;
+ rep->backupPtr = ptr.i;
+ rep->tableId = tableId;
+ rep->fragmentNo = fragmentNo;
+ rep->noOfTableRowsLow = (Uint32)(tabPtr.p->noOfRecords & 0xFFFFFFFF);
+ rep->noOfTableRowsHigh = (Uint32)(tabPtr.p->noOfRecords >> 32);
+ rep->noOfFragmentRowsLow = (Uint32)(noOfRecords & 0xFFFFFFFF);
+ rep->noOfFragmentRowsHigh = (Uint32)(noOfRecords >> 32);
+ NodeReceiverGroup rg(BACKUP, ptr.p->nodes);
+ sendSignal(rg, GSN_BACKUP_FRAGMENT_COMPLETE_REP, signal,
+ BackupFragmentCompleteRep::SignalLength, JBB);
+ }
nextFragment(signal, ptr);
}
}
@@ -1823,6 +1908,29 @@ err:
execABORT_BACKUP_ORD(signal);
}
+void
+Backup::execBACKUP_FRAGMENT_COMPLETE_REP(Signal* signal)
+{
+ jamEntry();
+ BackupFragmentCompleteRep * rep =
+ (BackupFragmentCompleteRep*)signal->getDataPtr();
+
+ BackupRecordPtr ptr;
+ c_backupPool.getPtr(ptr, rep->backupPtr);
+
+ TablePtr tabPtr;
+ ndbrequire(findTable(ptr, tabPtr, rep->tableId));
+
+ tabPtr.p->noOfRecords =
+ rep->noOfTableRowsLow + (((Uint64)rep->noOfTableRowsHigh) << 32);
+
+ FragmentPtr fragPtr;
+ tabPtr.p->fragments.getPtr(fragPtr, rep->fragmentNo);
+
+ fragPtr.p->noOfRecords =
+ rep->noOfFragmentRowsLow + (((Uint64)rep->noOfFragmentRowsHigh) << 32);
+}
+
/*****************************************************************************
*
* Slave functionallity - Drop triggers
@@ -1876,19 +1984,18 @@ Backup::sendDropTrig(Signal* signal, BackupRecordPtr ptr)
gcp->StartGCP = htonl(ptr.p->startGCP);
gcp->StopGCP = htonl(ptr.p->stopGCP - 1);
filePtr.p->operation.dataBuffer.updateWritePtr(gcpSz);
- }
- { // UNLOCK while dropping trigger for better timeslicing
- TablePtr tabPtr;
- for(ptr.p->tables.first(tabPtr); tabPtr.i != RNIL;
- ptr.p->tables.next(tabPtr))
{
- signal->theData[0] = tabPtr.p->tableId;
- signal->theData[1] = 0; // unlock
- EXECUTE_DIRECT(DBDICT, GSN_BACKUP_FRAGMENT_REQ, signal, 2);
+ TablePtr tabPtr;
+ ptr.p->tables.first(tabPtr);
+
+ signal->theData[0] = BackupContinueB::BACKUP_FRAGMENT_INFO;
+ signal->theData[1] = ptr.i;
+ signal->theData[2] = tabPtr.i;
+ signal->theData[3] = 0;
+ sendSignal(BACKUP_REF, GSN_CONTINUEB, signal, 4, JBB);
}
}
- closeFiles(signal, ptr);
}
}
@@ -2051,8 +2158,10 @@ Backup::stopBackupReply(Signal* signal, BackupRecordPtr ptr, Uint32 nodeId)
rep->senderData = ptr.p->clientData;
rep->startGCP = ptr.p->startGCP;
rep->stopGCP = ptr.p->stopGCP;
- rep->noOfBytes = ptr.p->noOfBytes;
- rep->noOfRecords = ptr.p->noOfRecords;
+ rep->noOfBytesLow = (Uint32)(ptr.p->noOfBytes & 0xFFFFFFFF);
+ rep->noOfRecordsLow = (Uint32)(ptr.p->noOfRecords & 0xFFFFFFFF);
+ rep->noOfBytesHigh = (Uint32)(ptr.p->noOfBytes >> 32);
+ rep->noOfRecordsHigh = (Uint32)(ptr.p->noOfRecords >> 32);
rep->noOfLogBytes = ptr.p->noOfLogBytes;
rep->noOfLogRecords = ptr.p->noOfLogRecords;
rep->nodes = ptr.p->nodes;
@@ -2065,12 +2174,14 @@ Backup::stopBackupReply(Signal* signal, BackupRecordPtr ptr, Uint32 nodeId)
signal->theData[2] = ptr.p->backupId;
signal->theData[3] = ptr.p->startGCP;
signal->theData[4] = ptr.p->stopGCP;
- signal->theData[5] = ptr.p->noOfBytes;
- signal->theData[6] = ptr.p->noOfRecords;
+ signal->theData[5] = (Uint32)(ptr.p->noOfBytes & 0xFFFFFFFF);
+ signal->theData[6] = (Uint32)(ptr.p->noOfRecords & 0xFFFFFFFF);
signal->theData[7] = ptr.p->noOfLogBytes;
signal->theData[8] = ptr.p->noOfLogRecords;
ptr.p->nodes.copyto(NdbNodeBitmask::Size, signal->theData+9);
- sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 9+NdbNodeBitmask::Size, JBB);
+ signal->theData[9+NdbNodeBitmask::Size] = (Uint32)(ptr.p->noOfBytes >> 32);
+ signal->theData[10+NdbNodeBitmask::Size] = (Uint32)(ptr.p->noOfRecords >> 32);
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 11+NdbNodeBitmask::Size, JBB);
}
else
{
@@ -2912,6 +3023,7 @@ Backup::parseTableDescription(Signal* signal,
/**
* Initialize table object
*/
+ tabPtr.p->noOfRecords = 0;
tabPtr.p->schemaVersion = tmpTab.TableVersion;
tabPtr.p->noOfAttributes = tmpTab.NoOfAttributes;
tabPtr.p->noOfNull = 0;
@@ -3637,9 +3749,10 @@ Backup::fragmentCompleted(Signal* signal, BackupFilePtr filePtr)
conf->backupPtr = ptr.i;
conf->tableId = filePtr.p->tableId;
conf->fragmentNo = filePtr.p->fragmentNo;
- conf->noOfRecords = op.noOfRecords;
- conf->noOfBytes = op.noOfBytes;
-
+ conf->noOfRecordsLow = (Uint32)(op.noOfRecords & 0xFFFFFFFF);
+ conf->noOfRecordsHigh = (Uint32)(op.noOfRecords >> 32);
+ conf->noOfBytesLow = (Uint32)(op.noOfBytes & 0xFFFFFFFF);
+ conf->noOfBytesHigh = (Uint32)(op.noOfBytes >> 32);
sendSignal(ptr.p->masterRef, GSN_BACKUP_FRAGMENT_CONF, signal,
BackupFragmentConf::SignalLength, JBB);
@@ -3819,19 +3932,37 @@ Backup::checkFile(Signal* signal, BackupFilePtr filePtr)
FsAppendReq::SignalLength, JBA);
return;
}//if
-
- filePtr.p->fileRunning = 0;
- filePtr.p->fileClosing = 1;
-
- FsCloseReq * req = (FsCloseReq *)signal->getDataPtrSend();
- req->filePointer = filePtr.p->filePointer;
- req->userPointer = filePtr.i;
- req->userReference = reference();
- req->fileFlag = 0;
+
+#ifdef DEBUG_ABORT
+ Uint32 running= filePtr.p->fileRunning;
+ Uint32 closing= filePtr.p->fileClosing;
+#endif
+
+ if(!filePtr.p->fileClosing)
+ {
+ filePtr.p->fileRunning = 0;
+ filePtr.p->fileClosing = 1;
+
+ FsCloseReq * req = (FsCloseReq *)signal->getDataPtrSend();
+ req->filePointer = filePtr.p->filePointer;
+ req->userPointer = filePtr.i;
+ req->userReference = reference();
+ req->fileFlag = 0;
#ifdef DEBUG_ABORT
- ndbout_c("***** a FSCLOSEREQ filePtr.i = %u", filePtr.i);
+ ndbout_c("***** a FSCLOSEREQ filePtr.i = %u run=%d cl=%d", filePtr.i,
+ running, closing);
#endif
- sendSignal(NDBFS_REF, GSN_FSCLOSEREQ, signal, FsCloseReq::SignalLength, JBA);
+ sendSignal(NDBFS_REF, GSN_FSCLOSEREQ, signal, FsCloseReq::SignalLength, JBA);
+ }
+ else
+ {
+#ifdef DEBUG_ABORT
+ ndbout_c("***** a NOT SENDING FSCLOSEREQ filePtr.i = %u run=%d cl=%d",
+ filePtr.i,
+ running, closing);
+#endif
+
+ }
}
@@ -4082,9 +4213,7 @@ Backup::closeFiles(Signal* sig, BackupRecordPtr ptr)
jam();
continue;
}//if
-
- filePtr.p->fileClosing = 1;
-
+
if(filePtr.p->fileRunning == 1){
jam();
#ifdef DEBUG_ABORT
@@ -4093,7 +4222,10 @@ Backup::closeFiles(Signal* sig, BackupRecordPtr ptr)
filePtr.p->operation.dataBuffer.eof();
} else {
jam();
-
+ filePtr.p->fileClosing = 1;
+ filePtr.p->operation.dataBuffer.eof();
+ checkFile(sig, filePtr); // make sure we write everything before closing
+
FsCloseReq * req = (FsCloseReq *)sig->getDataPtrSend();
req->filePointer = filePtr.p->filePointer;
req->userPointer = filePtr.i;
@@ -4555,7 +4687,6 @@ Backup::execLCP_PREPARE_REQ(Signal* signal)
jam();
BackupFilePtr filePtr;
c_backupFilePool.getPtr(filePtr, ptr.p->dataFilePtr);
- filePtr.p->fileClosing = 1;
filePtr.p->operation.dataBuffer.eof();
}
@@ -4647,7 +4778,6 @@ Backup::execEND_LCPREQ(Signal* signal)
BackupFilePtr filePtr;
c_backupFilePool.getPtr(filePtr, ptr.p->dataFilePtr);
- filePtr.p->fileClosing = 1;
filePtr.p->operation.dataBuffer.eof();
return;
}
diff --git a/storage/ndb/src/kernel/blocks/backup/Backup.hpp b/storage/ndb/src/kernel/blocks/backup/Backup.hpp
index 73f898261ca..afacf01ab2f 100644
--- a/storage/ndb/src/kernel/blocks/backup/Backup.hpp
+++ b/storage/ndb/src/kernel/blocks/backup/Backup.hpp
@@ -70,6 +70,7 @@ protected:
void execBACKUP_DATA(Signal* signal);
void execSTART_BACKUP_REQ(Signal* signal);
void execBACKUP_FRAGMENT_REQ(Signal* signal);
+ void execBACKUP_FRAGMENT_COMPLETE_REP(Signal* signal);
void execSTOP_BACKUP_REQ(Signal* signal);
void execBACKUP_STATUS_REQ(Signal* signal);
void execABORT_BACKUP_ORD(Signal* signal);
@@ -192,6 +193,7 @@ public:
typedef Ptr<Attribute> AttributePtr;
struct Fragment {
+ Uint64 noOfRecords;
Uint32 tableId;
Uint16 node;
Uint16 fragmentId;
@@ -205,6 +207,8 @@ public:
struct Table {
Table(ArrayPool<Attribute> &, ArrayPool<Fragment> &);
+ Uint64 noOfRecords;
+
Uint32 tableId;
Uint32 schemaVersion;
Uint32 tableType;
@@ -280,8 +284,8 @@ public:
Uint32 tablePtr; // Ptr.i to current table
FsBuffer dataBuffer;
- Uint32 noOfRecords;
- Uint32 noOfBytes;
+ Uint64 noOfRecords;
+ Uint64 noOfBytes;
Uint32 maxRecordSize;
private:
diff --git a/storage/ndb/src/kernel/blocks/backup/BackupFormat.hpp b/storage/ndb/src/kernel/blocks/backup/BackupFormat.hpp
index 9fa5800c120..dddcf8192d5 100644
--- a/storage/ndb/src/kernel/blocks/backup/BackupFormat.hpp
+++ b/storage/ndb/src/kernel/blocks/backup/BackupFormat.hpp
@@ -32,7 +32,8 @@ struct BackupFormat {
FRAGMENT_FOOTER = 3,
TABLE_LIST = 4,
TABLE_DESCRIPTION = 5,
- GCP_ENTRY = 6
+ GCP_ENTRY = 6,
+ FRAGMENT_INFO = 7
};
struct FileHeader {
@@ -128,6 +129,20 @@ struct BackupFormat {
Uint32 StartGCP;
Uint32 StopGCP;
};
+
+ /**
+ * Fragment Info
+ */
+ struct FragmentInfo {
+ Uint32 SectionType;
+ Uint32 SectionLength;
+ Uint32 TableId;
+ Uint32 FragmentNo;
+ Uint32 NoOfRecordsLow;
+ Uint32 NoOfRecordsHigh;
+ Uint32 FilePosLow;
+ Uint32 FilePosHigh;
+ };
};
/**
diff --git a/storage/ndb/src/kernel/blocks/backup/BackupInit.cpp b/storage/ndb/src/kernel/blocks/backup/BackupInit.cpp
index 5f49a1a8725..38a60ac04d6 100644
--- a/storage/ndb/src/kernel/blocks/backup/BackupInit.cpp
+++ b/storage/ndb/src/kernel/blocks/backup/BackupInit.cpp
@@ -95,6 +95,9 @@ Backup::Backup(Block_context& ctx) :
addRecSignal(GSN_BACKUP_FRAGMENT_REQ, &Backup::execBACKUP_FRAGMENT_REQ);
addRecSignal(GSN_BACKUP_FRAGMENT_REF, &Backup::execBACKUP_FRAGMENT_REF);
addRecSignal(GSN_BACKUP_FRAGMENT_CONF, &Backup::execBACKUP_FRAGMENT_CONF);
+
+ addRecSignal(GSN_BACKUP_FRAGMENT_COMPLETE_REP,
+ &Backup::execBACKUP_FRAGMENT_COMPLETE_REP);
addRecSignal(GSN_STOP_BACKUP_REQ, &Backup::execSTOP_BACKUP_REQ);
addRecSignal(GSN_STOP_BACKUP_REF, &Backup::execSTOP_BACKUP_REF);
diff --git a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
index a4cb2b706e5..32bfd5d7146 100644
--- a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
+++ b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
@@ -459,6 +459,8 @@ Dbdict::packTableIntoPages(SimpleProperties::Writer & w,
w.add(DictTabInfo::DefaultNoPartFlag, tablePtr.p->defaultNoPartFlag);
w.add(DictTabInfo::LinearHashFlag, tablePtr.p->linearHashFlag);
w.add(DictTabInfo::FragmentCount, tablePtr.p->fragmentCount);
+ w.add(DictTabInfo::MinRowsLow, tablePtr.p->minRowsLow);
+ w.add(DictTabInfo::MinRowsHigh, tablePtr.p->minRowsHigh);
if(signal)
{
@@ -1855,6 +1857,8 @@ void Dbdict::initialiseTableRecord(TableRecordPtr tablePtr)
tablePtr.p->defaultNoPartFlag = true;
tablePtr.p->linearHashFlag = true;
tablePtr.p->m_bits = 0;
+ tablePtr.p->minRowsLow = 0;
+ tablePtr.p->minRowsHigh = 0;
tablePtr.p->tableType = DictTabInfo::UserTable;
tablePtr.p->primaryTableId = RNIL;
// volatile elements
@@ -4731,11 +4735,6 @@ Dbdict::alterTab_writeTableConf(Signal* signal,
SegmentedSectionPtr tabInfoPtr;
getSection(tabInfoPtr, alterTabPtr.p->m_tabInfoPtrI);
signal->setSection(tabInfoPtr, AlterTabReq::DICT_TAB_INFO);
-#ifndef DBUG_OFF
- ndbout_c("DICT_TAB_INFO in DICT");
- SimplePropertiesSectionReader reader(tabInfoPtr, getSectionSegmentPool());
- reader.printAll(ndbout);
-#endif
EXECUTE_DIRECT(SUMA, GSN_ALTER_TAB_REQ, signal,
AlterTabReq::SignalLength);
releaseSections(signal);
@@ -5331,6 +5330,13 @@ Dbdict::execADD_FRAGREQ(Signal* signal) {
Uint32 lhPageBits = 0;
::calcLHbits(&lhPageBits, &lhDistrBits, fragId, fragCount);
+ Uint64 maxRows = tabPtr.p->maxRowsLow +
+ (((Uint64)tabPtr.p->maxRowsHigh) << 32);
+ Uint64 minRows = tabPtr.p->minRowsLow +
+ (((Uint64)tabPtr.p->minRowsHigh) << 32);
+ maxRows = (maxRows + fragCount - 1) / fragCount;
+ minRows = (minRows + fragCount - 1) / fragCount;
+
{
LqhFragReq* req = (LqhFragReq*)signal->getDataPtrSend();
req->senderData = senderData;
@@ -5346,16 +5352,17 @@ Dbdict::execADD_FRAGREQ(Signal* signal) {
req->lh3PageBits = 0; //lhPageBits;
req->noOfAttributes = tabPtr.p->noOfAttributes;
req->noOfNullAttributes = tabPtr.p->noOfNullBits;
- req->noOfPagesToPreAllocate = 0;
+ req->maxRowsLow = maxRows & 0xFFFFFFFF;
+ req->maxRowsHigh = maxRows >> 32;
+ req->minRowsLow = minRows & 0xFFFFFFFF;
+ req->minRowsHigh = minRows >> 32;
req->schemaVersion = tabPtr.p->tableVersion;
Uint32 keyLen = tabPtr.p->tupKeyLength;
req->keyLength = keyLen; // wl-2066 no more "long keys"
req->nextLCP = lcpNo;
req->noOfKeyAttr = tabPtr.p->noOfPrimkey;
- req->noOfNewAttr = 0;
- // noOfCharsets passed to TUP in upper half
- req->noOfNewAttr |= (tabPtr.p->noOfCharsets << 16);
+ req->noOfCharsets = tabPtr.p->noOfCharsets;
req->checksumIndicator = 1;
req->GCPIndicator = 1;
req->startGci = startGci;
@@ -5976,9 +5983,16 @@ void Dbdict::handleTabInfoInit(SimpleProperties::Reader & it,
tablePtr.p->m_tablespace_id = c_tableDesc.TablespaceId;
tablePtr.p->maxRowsLow = c_tableDesc.MaxRowsLow;
tablePtr.p->maxRowsHigh = c_tableDesc.MaxRowsHigh;
+ tablePtr.p->minRowsLow = c_tableDesc.MinRowsLow;
+ tablePtr.p->minRowsHigh = c_tableDesc.MinRowsHigh;
tablePtr.p->defaultNoPartFlag = c_tableDesc.DefaultNoPartFlag;
tablePtr.p->linearHashFlag = c_tableDesc.LinearHashFlag;
+ Uint64 maxRows =
+ (((Uint64)tablePtr.p->maxRowsHigh) << 32) + tablePtr.p->maxRowsLow;
+ Uint64 minRows =
+ (((Uint64)tablePtr.p->minRowsHigh) << 32) + tablePtr.p->minRowsLow;
+
{
Rope frm(c_rope_pool, tablePtr.p->frmData);
tabRequire(frm.assign(c_tableDesc.FrmData, c_tableDesc.FrmLen),
@@ -7002,13 +7016,37 @@ void Dbdict::releaseTableObject(Uint32 tableId, bool removeFromHash)
{
TableRecordPtr tablePtr;
c_tableRecordPool.getPtr(tablePtr, tableId);
- if (removeFromHash){
+ if (removeFromHash)
+ {
jam();
release_object(tablePtr.p->m_obj_ptr_i);
}
+ else
+ {
+ Rope tmp(c_rope_pool, tablePtr.p->tableName);
+ tmp.erase();
+ }
- Rope frm(c_rope_pool, tablePtr.p->frmData);
- frm.erase();
+ {
+ Rope tmp(c_rope_pool, tablePtr.p->frmData);
+ tmp.erase();
+ }
+
+ {
+ Rope tmp(c_rope_pool, tablePtr.p->tsData);
+ tmp.erase();
+ }
+
+ {
+ Rope tmp(c_rope_pool, tablePtr.p->ngData);
+ tmp.erase();
+ }
+
+ {
+ Rope tmp(c_rope_pool, tablePtr.p->rangeData);
+ tmp.erase();
+ }
+
tablePtr.p->tabState = TableRecord::NOT_DEFINED;
LocalDLFifoList<AttributeRecord> list(c_attributeRecordPool,
@@ -9499,7 +9537,14 @@ Dbdict::createEventComplete_RT_USER_GET(Signal* signal,
NodeReceiverGroup rg(DBDICT, c_aliveNodes);
RequestTracker & p = evntRecPtr.p->m_reqTracker;
- p.init<CreateEvntRef>(c_counterMgr, rg, GSN_CREATE_EVNT_REF, evntRecPtr.i);
+ if (!p.init<CreateEvntRef>(c_counterMgr, rg, GSN_CREATE_EVNT_REF,
+ evntRecPtr.i))
+ {
+ jam();
+ evntRecPtr.p->m_errorCode = 701;
+ createEvent_sendReply(signal, evntRecPtr);
+ return;
+ }
sendSignal(rg, GSN_CREATE_EVNT_REQ, signal, CreateEvntReq::SignalLength, JBB);
}
@@ -9787,8 +9832,12 @@ void Dbdict::execSUB_START_REQ(Signal* signal)
return;
}
OpSubEventPtr subbPtr;
+ Uint32 errCode = 0;
if (!c_opSubEvent.seize(subbPtr)) {
+ errCode = SubStartRef::Busy;
+busy:
SubStartRef * ref = (SubStartRef *)signal->getDataPtrSend();
+
{ // fix
Uint32 subcriberRef = ((SubStartReq*)signal->getDataPtr())->subscriberRef;
ref->subscriberRef = subcriberRef;
@@ -9798,7 +9847,7 @@ void Dbdict::execSUB_START_REQ(Signal* signal)
// ret->setErrorLine(__LINE__);
// ret->setErrorNode(reference());
ref->senderRef = reference();
- ref->errorCode = SubStartRef::Busy;
+ ref->errorCode = errCode;
sendSignal(origSenderRef, GSN_SUB_START_REF, signal,
SubStartRef::SignalLength2, JBB);
@@ -9821,7 +9870,12 @@ void Dbdict::execSUB_START_REQ(Signal* signal)
subbPtr.p->m_senderRef = origSenderRef; // not sure if API sets correctly
NodeReceiverGroup rg(DBDICT, c_aliveNodes);
RequestTracker & p = subbPtr.p->m_reqTracker;
- p.init<SubStartRef>(c_counterMgr, rg, GSN_SUB_START_REF, subbPtr.i);
+ if (!p.init<SubStartRef>(c_counterMgr, rg, GSN_SUB_START_REF, subbPtr.i))
+ {
+ c_opSubEvent.release(subbPtr);
+ errCode = SubStartRef::Busy;
+ goto busy;
+ }
SubStartReq* req = (SubStartReq*) signal->getDataPtrSend();
@@ -10011,14 +10065,17 @@ void Dbdict::execSUB_STOP_REQ(Signal* signal)
return;
}
OpSubEventPtr subbPtr;
+ Uint32 errCode = 0;
if (!c_opSubEvent.seize(subbPtr)) {
+ errCode = SubStopRef::Busy;
+busy:
SubStopRef * ref = (SubStopRef *)signal->getDataPtrSend();
jam();
// ret->setErrorCode(SubStartRef::SeizeError);
// ret->setErrorLine(__LINE__);
// ret->setErrorNode(reference());
ref->senderRef = reference();
- ref->errorCode = SubStopRef::Busy;
+ ref->errorCode = errCode;
sendSignal(origSenderRef, GSN_SUB_STOP_REF, signal,
SubStopRef::SignalLength, JBB);
@@ -10043,10 +10100,16 @@ void Dbdict::execSUB_STOP_REQ(Signal* signal)
subbPtr.p->m_senderRef = origSenderRef; // not sure if API sets correctly
NodeReceiverGroup rg(DBDICT, c_aliveNodes);
RequestTracker & p = subbPtr.p->m_reqTracker;
- p.init<SubStopRef>(c_counterMgr, rg, GSN_SUB_STOP_REF, subbPtr.i);
-
+ if (!p.init<SubStopRef>(c_counterMgr, rg, GSN_SUB_STOP_REF, subbPtr.i))
+ {
+ jam();
+ c_opSubEvent.release(subbPtr);
+ errCode = SubStopRef::Busy;
+ goto busy;
+ }
+
SubStopReq* req = (SubStopReq*) signal->getDataPtrSend();
-
+
req->senderRef = reference();
req->senderData = subbPtr.i;
@@ -10336,9 +10399,14 @@ Dbdict::dropEventUTIL_EXECUTE_READ(Signal* signal,
NodeReceiverGroup rg(DBDICT, c_aliveNodes);
RequestTracker & p = evntRecPtr.p->m_reqTracker;
- p.init<SubRemoveRef>(c_counterMgr, rg, GSN_SUB_REMOVE_REF,
- evntRecPtr.i);
-
+ if (!p.init<SubRemoveRef>(c_counterMgr, rg, GSN_SUB_REMOVE_REF,
+ evntRecPtr.i))
+ {
+ evntRecPtr.p->m_errorCode = 701;
+ dropEvent_sendReply(signal, evntRecPtr);
+ return;
+ }
+
SubRemoveReq* req = (SubRemoveReq*) signal->getDataPtrSend();
req->senderRef = reference();
@@ -14277,7 +14345,8 @@ Dbdict::trans_commit_complete_done(Signal* signal,
conf->senderRef = reference();
conf->senderData = trans_ptr.p->m_senderData;
conf->fileId = f_ptr.p->key;
-
+ conf->fileVersion = f_ptr.p->m_version;
+
//@todo check api failed
sendSignal(trans_ptr.p->m_senderRef, GSN_CREATE_FILE_CONF, signal,
CreateFileConf::SignalLength, JBB);
@@ -15444,6 +15513,17 @@ Dbdict::create_file_prepare_start(Signal* signal, SchemaOp* op){
break;
}
+ {
+ Uint32 dl;
+ const ndb_mgm_configuration_iterator * p =
+ m_ctx.m_config.getOwnConfigIterator();
+ if(!ndb_mgm_get_int_parameter(p, CFG_DB_DISCLESS, &dl) && dl)
+ {
+ op->m_errorCode = CreateFileRef::NotSupportedWhenDiskless;
+ break;
+ }
+ }
+
// Loop through all filenames...
if(!c_obj_pool.seize(obj_ptr)){
op->m_errorCode = CreateTableRef::NoMoreTableRecords;
diff --git a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp
index 58656023e4e..228dab57650 100644
--- a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp
+++ b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp
@@ -206,6 +206,8 @@ public:
TableRecord(){}
Uint32 maxRowsLow;
Uint32 maxRowsHigh;
+ Uint32 minRowsLow;
+ Uint32 minRowsHigh;
/* Table id (array index in DICT and other blocks) */
Uint32 tableId;
Uint32 m_obj_ptr_i;
diff --git a/storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp b/storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp
index a8dad87d81c..46effed867f 100644
--- a/storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp
+++ b/storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp
@@ -683,6 +683,7 @@ private:
void execGETGCIREQ(Signal *);
void execDIH_RESTARTREQ(Signal *);
void execSTART_RECCONF(Signal *);
+ void execSTART_FRAGREF(Signal *);
void execSTART_FRAGCONF(Signal *);
void execADD_FRAGCONF(Signal *);
void execADD_FRAGREF(Signal *);
diff --git a/storage/ndb/src/kernel/blocks/dbdih/DbdihInit.cpp b/storage/ndb/src/kernel/blocks/dbdih/DbdihInit.cpp
index ec4ac3c812a..468a52f23f5 100644
--- a/storage/ndb/src/kernel/blocks/dbdih/DbdihInit.cpp
+++ b/storage/ndb/src/kernel/blocks/dbdih/DbdihInit.cpp
@@ -257,6 +257,9 @@ Dbdih::Dbdih(Block_context& ctx):
addRecSignal(GSN_DICT_LOCK_CONF, &Dbdih::execDICT_LOCK_CONF);
addRecSignal(GSN_DICT_LOCK_REF, &Dbdih::execDICT_LOCK_REF);
+ addRecSignal(GSN_START_FRAGREF,
+ &Dbdih::execSTART_FRAGREF);
+
apiConnectRecord = 0;
connectRecord = 0;
fileRecord = 0;
diff --git a/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp b/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
index 620f7aeca85..c265f54bf30 100644
--- a/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
+++ b/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
@@ -1107,6 +1107,26 @@ void Dbdih::execSTART_FRAGCONF(Signal* signal)
return;
}//Dbdih::execSTART_FRAGCONF()
+void Dbdih::execSTART_FRAGREF(Signal* signal)
+{
+ jamEntry();
+
+ /**
+ * Kill starting node
+ */
+ Uint32 errCode = signal->theData[1];
+ Uint32 nodeId = signal->theData[2];
+
+ SystemError * const sysErr = (SystemError*)&signal->theData[0];
+ sysErr->errorCode = SystemError::StartFragRefError;
+ sysErr->errorRef = reference();
+ sysErr->data1 = errCode;
+ sysErr->data2 = 0;
+ sendSignal(calcNdbCntrBlockRef(nodeId), GSN_SYSTEM_ERROR, signal,
+ SystemError::SignalLength, JBB);
+ return;
+}//Dbdih::execSTART_FRAGCONF()
+
void Dbdih::execSTART_MEREF(Signal* signal)
{
jamEntry();
diff --git a/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp b/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp
index f1d1fdbf000..c1d4175833e 100644
--- a/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp
+++ b/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp
@@ -431,7 +431,6 @@ public:
UintR dictConnectptr;
UintR fragmentPtr;
UintR nextAddfragrec;
- UintR noOfAllocPages;
UintR schemaVer;
UintR tupConnectptr;
UintR tuxConnectptr;
@@ -449,13 +448,16 @@ public:
Uint16 totalAttrReceived;
Uint16 fragCopyCreation;
Uint16 noOfKeyAttr;
- Uint32 noOfNewAttr; // noOfCharsets in upper half
- Uint16 noOfAttributeGroups;
+ Uint16 noOfCharsets;
Uint16 lh3DistrBits;
Uint16 tableType;
Uint16 primaryTableId;
Uint32 tablespace_id;
- };// Size 108 bytes
+ Uint32 maxRowsLow;
+ Uint32 maxRowsHigh;
+ Uint32 minRowsLow;
+ Uint32 minRowsHigh;
+ };// Size 128 bytes
typedef Ptr<AddFragRecord> AddFragRecordPtr;
/* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */
diff --git a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
index bdd19839a02..3890fb69b2e 100644
--- a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
+++ b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
@@ -999,12 +999,15 @@ void Dblqh::execLQHFRAGREQ(Signal* signal)
Uint8 tlh = req->lh3PageBits;
Uint32 tnoOfAttr = req->noOfAttributes;
Uint32 tnoOfNull = req->noOfNullAttributes;
- Uint32 noOfAlloc = req->noOfPagesToPreAllocate;
+ Uint32 maxRowsLow = req->maxRowsLow;
+ Uint32 maxRowsHigh = req->maxRowsHigh;
+ Uint32 minRowsLow = req->minRowsLow;
+ Uint32 minRowsHigh = req->minRowsHigh;
Uint32 tschemaVersion = req->schemaVersion;
Uint32 ttupKeyLength = req->keyLength;
Uint32 nextLcp = req->nextLCP;
Uint32 noOfKeyAttr = req->noOfKeyAttr;
- Uint32 noOfNewAttr = req->noOfNewAttr;
+ Uint32 noOfCharsets = req->noOfCharsets;
Uint32 checksumIndicator = req->checksumIndicator;
Uint32 gcpIndicator = req->GCPIndicator;
Uint32 startGci = req->startGci;
@@ -1112,7 +1115,10 @@ void Dblqh::execLQHFRAGREQ(Signal* signal)
addfragptr.p->m_senderAttrPtr = RNIL;
addfragptr.p->noOfAttr = tnoOfAttr;
addfragptr.p->noOfNull = tnoOfNull;
- addfragptr.p->noOfAllocPages = noOfAlloc;
+ addfragptr.p->maxRowsLow = maxRowsLow;
+ addfragptr.p->maxRowsHigh = maxRowsHigh;
+ addfragptr.p->minRowsLow = minRowsLow;
+ addfragptr.p->minRowsHigh = minRowsHigh;
addfragptr.p->tabId = tabptr.i;
addfragptr.p->totalAttrReceived = 0;
addfragptr.p->attrSentToTup = ZNIL;/* TO FIND PROGRAMMING ERRORS QUICKLY */
@@ -1121,7 +1127,7 @@ void Dblqh::execLQHFRAGREQ(Signal* signal)
addfragptr.p->fragCopyCreation = (tmp == 0 ? 0 : 1);
addfragptr.p->addfragErrorCode = 0;
addfragptr.p->noOfKeyAttr = noOfKeyAttr;
- addfragptr.p->noOfNewAttr = noOfNewAttr;
+ addfragptr.p->noOfCharsets = noOfCharsets;
addfragptr.p->checksumIndicator = checksumIndicator;
addfragptr.p->GCPIndicator = gcpIndicator;
addfragptr.p->lh3DistrBits = tlhstar;
@@ -1257,44 +1263,49 @@ Dblqh::sendAddFragReq(Signal* signal)
fragptr.i = addfragptr.p->fragmentPtr;
c_fragment_pool.getPtr(fragptr);
if (addfragptr.p->addfragStatus == AddFragRecord::WAIT_TUP){
+ TupFragReq* const tupFragReq = (TupFragReq*)signal->getDataPtrSend();
if (DictTabInfo::isTable(addfragptr.p->tableType) ||
DictTabInfo::isHashIndex(addfragptr.p->tableType)) {
jam();
- signal->theData[0] = addfragptr.i;
- signal->theData[1] = cownref;
- signal->theData[2] = 0; /* ADD TABLE */
- signal->theData[3] = addfragptr.p->tabId;
- signal->theData[4] = addfragptr.p->noOfAttr;
- signal->theData[5] = addfragptr.p->addFragid;
- signal->theData[6] = (addfragptr.p->noOfAllocPages >> 1) + 1;
- signal->theData[7] = addfragptr.p->noOfNull;
- signal->theData[8] = addfragptr.p->schemaVer;
- signal->theData[9] = addfragptr.p->noOfKeyAttr;
- signal->theData[10] = addfragptr.p->noOfNewAttr;
- signal->theData[11] = addfragptr.p->checksumIndicator;
- signal->theData[12] = addfragptr.p->noOfAttributeGroups;
- signal->theData[13] = addfragptr.p->GCPIndicator;
- signal->theData[14] = addfragptr.p->tablespace_id;
+ tupFragReq->userPtr = addfragptr.i;
+ tupFragReq->userRef = cownref;
+ tupFragReq->reqInfo = 0; /* ADD TABLE */
+ tupFragReq->tableId = addfragptr.p->tabId;
+ tupFragReq->noOfAttr = addfragptr.p->noOfAttr;
+ tupFragReq->fragId = addfragptr.p->addFragid;
+ tupFragReq->maxRowsLow = addfragptr.p->maxRowsLow;
+ tupFragReq->maxRowsHigh = addfragptr.p->maxRowsHigh;
+ tupFragReq->minRowsLow = addfragptr.p->minRowsLow;
+ tupFragReq->minRowsHigh = addfragptr.p->minRowsHigh;
+ tupFragReq->noOfNullAttr = addfragptr.p->noOfNull;
+ tupFragReq->schemaVersion = addfragptr.p->schemaVer;
+ tupFragReq->noOfKeyAttr = addfragptr.p->noOfKeyAttr;
+ tupFragReq->noOfCharsets = addfragptr.p->noOfCharsets;
+ tupFragReq->checksumIndicator = addfragptr.p->checksumIndicator;
+ tupFragReq->globalCheckpointIdIndicator = addfragptr.p->GCPIndicator;
+ tupFragReq->tablespaceid = addfragptr.p->tablespace_id;
sendSignal(fragptr.p->tupBlockref, GSN_TUPFRAGREQ,
signal, TupFragReq::SignalLength, JBB);
return;
}
if (DictTabInfo::isOrderedIndex(addfragptr.p->tableType)) {
jam();
- signal->theData[0] = addfragptr.i;
- signal->theData[1] = cownref;
- signal->theData[2] = 0; /* ADD TABLE */
- signal->theData[3] = addfragptr.p->tabId;
- signal->theData[4] = 1; /* ordered index: one array attr */
- signal->theData[5] = addfragptr.p->addFragid;
- signal->theData[6] = (addfragptr.p->noOfAllocPages >> 1) + 1;
- signal->theData[7] = 0; /* ordered index: no nullable */
- signal->theData[8] = addfragptr.p->schemaVer;
- signal->theData[9] = 1; /* ordered index: one key */
- signal->theData[10] = addfragptr.p->noOfNewAttr;
- signal->theData[11] = addfragptr.p->checksumIndicator;
- signal->theData[12] = addfragptr.p->noOfAttributeGroups;
- signal->theData[13] = addfragptr.p->GCPIndicator;
+ tupFragReq->userPtr = addfragptr.i;
+ tupFragReq->userRef = cownref;
+ tupFragReq->reqInfo = 0; /* ADD TABLE */
+ tupFragReq->tableId = addfragptr.p->tabId;
+ tupFragReq->noOfAttr = 1; /* ordered index: one array attr */
+ tupFragReq->fragId = addfragptr.p->addFragid;
+ tupFragReq->maxRowsLow = addfragptr.p->maxRowsLow;
+ tupFragReq->maxRowsHigh = addfragptr.p->maxRowsHigh;
+ tupFragReq->minRowsLow = addfragptr.p->minRowsLow;
+ tupFragReq->minRowsHigh = addfragptr.p->minRowsHigh;
+ tupFragReq->noOfNullAttr = 0; /* ordered index: no nullable */
+ tupFragReq->schemaVersion = addfragptr.p->schemaVer;
+ tupFragReq->noOfKeyAttr = 1; /* ordered index: one key */
+ tupFragReq->noOfCharsets = addfragptr.p->noOfCharsets;
+ tupFragReq->checksumIndicator = addfragptr.p->checksumIndicator;
+ tupFragReq->globalCheckpointIdIndicator = addfragptr.p->GCPIndicator;
sendSignal(fragptr.p->tupBlockref, GSN_TUPFRAGREQ,
signal, TupFragReq::SignalLength, JBB);
return;
@@ -1598,16 +1609,19 @@ void Dblqh::abortAddFragOps(Signal* signal)
{
fragptr.i = addfragptr.p->fragmentPtr;
c_fragment_pool.getPtr(fragptr);
- signal->theData[0] = (Uint32)-1;
if (addfragptr.p->tupConnectptr != RNIL) {
jam();
- signal->theData[1] = addfragptr.p->tupConnectptr;
+ TupFragReq* const tupFragReq = (TupFragReq*)signal->getDataPtrSend();
+ tupFragReq->userPtr = (Uint32)-1;
+ tupFragReq->userRef = addfragptr.p->tupConnectptr;
sendSignal(fragptr.p->tupBlockref, GSN_TUPFRAGREQ, signal, 2, JBB);
addfragptr.p->tupConnectptr = RNIL;
}
if (addfragptr.p->tuxConnectptr != RNIL) {
jam();
- signal->theData[1] = addfragptr.p->tuxConnectptr;
+ TuxFragReq* const tuxFragReq = (TuxFragReq*)signal->getDataPtrSend();
+ tuxFragReq->userPtr = (Uint32)-1;
+ tuxFragReq->userRef = addfragptr.p->tuxConnectptr;
sendSignal(fragptr.p->tuxBlockref, GSN_TUXFRAGREQ, signal, 2, JBB);
addfragptr.p->tuxConnectptr = RNIL;
}
@@ -8059,15 +8073,15 @@ void Dblqh::scanLockReleasedLab(Signal* signal)
scanptr.p->m_curr_batch_size_rows = 0;
scanptr.p->m_curr_batch_size_bytes = 0;
closeScanLab(signal);
+ } else if (scanptr.p->m_last_row && !scanptr.p->scanLockHold) {
+ jam();
+ closeScanLab(signal);
+ return;
} else if (scanptr.p->check_scan_batch_completed() &&
scanptr.p->scanLockHold != ZTRUE) {
jam();
scanptr.p->scanState = ScanRecord::WAIT_SCAN_NEXTREQ;
sendScanFragConf(signal, ZFALSE);
- } else if (scanptr.p->m_last_row && !scanptr.p->scanLockHold) {
- jam();
- closeScanLab(signal);
- return;
} else {
jam();
/*
diff --git a/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp b/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp
index 4ff6e069963..9bc916c8c22 100644
--- a/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp
+++ b/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp
@@ -246,6 +246,7 @@ inline const Uint32* ALIGN_WORD(const void* ptr)
#define ZTUP_SCAN 10
#define ZFREE_EXTENT 11
#define ZUNMAP_PAGES 12
+#define ZFREE_VAR_PAGES 13
#define ZSCAN_PROCEDURE 0
#define ZCOPY_PROCEDURE 2
@@ -327,7 +328,8 @@ typedef Ptr<Attrbufrec> AttrbufrecPtr;
struct Fragoperrec {
- bool definingFragment;
+ Uint64 minRows;
+ Uint64 maxRows;
Uint32 nextFragoprec;
Uint32 lqhPtrFrag;
Uint32 fragidFrag;
@@ -340,6 +342,7 @@ struct Fragoperrec {
Uint32 m_var_attributes_size[2]; // In bytes
BlockReference lqhBlockrefFrag;
bool inUse;
+ bool definingFragment;
};
typedef Ptr<Fragoperrec> FragoperrecPtr;
@@ -601,6 +604,8 @@ struct Fragrecord {
Uint32 currentPageRange;
Uint32 rootPageRange;
Uint32 noOfPages;
+ Uint32 noOfPagesToGrow;
+
DLList<Page>::Head emptyPrimPage; // allocated pages (not init)
DLList<Page>::Head thFreeFirst; // pages with atleast 1 free record
SLList<Page>::Head m_empty_pages; // Empty pages not in logical/physical map
@@ -620,6 +625,7 @@ struct Fragrecord {
Uint32 m_tablespace_id;
Uint32 m_logfile_group_id;
Disk_alloc_info m_disk_alloc_info;
+ Uint32 m_var_page_chunks;
};
typedef Ptr<Fragrecord> FragrecordPtr;
@@ -2335,6 +2341,7 @@ private:
void releaseFragment(Signal* signal, Uint32 tableId);
+ void drop_fragment_free_var_pages(Signal*);
void drop_fragment_free_exent(Signal*, TablerecPtr, FragrecordPtr, Uint32);
void drop_fragment_unmap_pages(Signal*, TablerecPtr, FragrecordPtr, Uint32);
void drop_fragment_unmap_page_callback(Signal* signal, Uint32, Uint32);
diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp
index 8a68905cef9..c59cf4015af 100644
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp
@@ -227,6 +227,12 @@ void Dbtup::execCONTINUEB(Signal* signal)
drop_fragment_unmap_pages(signal, tabPtr, fragPtr, signal->theData[3]);
return;
}
+ case ZFREE_VAR_PAGES:
+ {
+ ljam();
+ drop_fragment_free_var_pages(signal);
+ return;
+ }
default:
ndbrequire(false);
break;
diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp
index 5cfd1f8cb77..88845a6ef64 100644
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp
@@ -37,7 +37,8 @@ void Dbtup::execTUPFRAGREQ(Signal* signal)
{
ljamEntry();
- if (signal->theData[0] == (Uint32)-1) {
+ TupFragReq* tupFragReq = (TupFragReq*)signal->getDataPtr();
+ if (tupFragReq->userPtr == (Uint32)-1) {
ljam();
abortAddFragOp(signal);
return;
@@ -47,31 +48,32 @@ void Dbtup::execTUPFRAGREQ(Signal* signal)
FragrecordPtr regFragPtr;
TablerecPtr regTabPtr;
- Uint32 userptr= signal->theData[0];
- Uint32 userblockref= signal->theData[1];
- Uint32 reqinfo= signal->theData[2];
- regTabPtr.i= signal->theData[3];
- Uint32 noOfAttributes= signal->theData[4];
- Uint32 fragId= signal->theData[5];
- Uint32 pages= signal->theData[6];
- Uint32 noOfNullAttr= signal->theData[7];
- /* Uint32 schemaVersion= signal->theData[8];*/
- Uint32 noOfKeyAttr= signal->theData[9];
-
- //Uint32 noOfNewAttr= (signal->theData[10] & 0xFFFF);
- /* DICT sends number of character sets in upper half */
- Uint32 noOfCharsets= (signal->theData[10] >> 16);
- Uint32 gcpIndicator = signal->theData[13];
- Uint32 tablespace= signal->theData[14];
-
- Uint32 checksumIndicator= signal->theData[11];
+ Uint32 userptr = tupFragReq->userPtr;
+ Uint32 userblockref = tupFragReq->userRef;
+ Uint32 reqinfo = tupFragReq->reqInfo;
+ regTabPtr.i = tupFragReq->tableId;
+ Uint32 noOfAttributes = tupFragReq->noOfAttr;
+ Uint32 fragId = tupFragReq->fragId;
+ Uint32 noOfNullAttr = tupFragReq->noOfNullAttr;
+ /* Uint32 schemaVersion = tupFragReq->schemaVersion;*/
+ Uint32 noOfKeyAttr = tupFragReq->noOfKeyAttr;
+ Uint32 noOfCharsets = tupFragReq->noOfCharsets;
+
+ Uint32 checksumIndicator = tupFragReq->checksumIndicator;
+ Uint32 gcpIndicator = tupFragReq->globalCheckpointIdIndicator;
+ Uint32 tablespace_id= tupFragReq->tablespaceid;
+
+ Uint64 maxRows =
+ (((Uint64)tupFragReq->maxRowsHigh) << 32) + tupFragReq->maxRowsLow;
+ Uint64 minRows =
+ (((Uint64)tupFragReq->minRowsHigh) << 32) + tupFragReq->minRowsLow;
#ifndef VM_TRACE
// config mismatch - do not crash if release compiled
if (regTabPtr.i >= cnoOfTablerec) {
ljam();
- signal->theData[0] = userptr;
- signal->theData[1] = 800;
+ tupFragReq->userPtr = userptr;
+ tupFragReq->userRef = 800;
sendSignal(userblockref, GSN_TUPFRAGREF, signal, 2, JBB);
return;
}
@@ -80,8 +82,8 @@ void Dbtup::execTUPFRAGREQ(Signal* signal)
ptrCheckGuard(regTabPtr, cnoOfTablerec, tablerec);
if (cfirstfreeFragopr == RNIL) {
ljam();
- signal->theData[0]= userptr;
- signal->theData[1]= ZNOFREE_FRAGOP_ERROR;
+ tupFragReq->userPtr = userptr;
+ tupFragReq->userRef = ZNOFREE_FRAGOP_ERROR;
sendSignal(userblockref, GSN_TUPFRAGREF, signal, 2, JBB);
return;
}
@@ -101,6 +103,8 @@ void Dbtup::execTUPFRAGREQ(Signal* signal)
sizeof(fragOperPtr.p->m_var_attributes_size));
fragOperPtr.p->charsetIndex = 0;
+ fragOperPtr.p->minRows = minRows;
+ fragOperPtr.p->maxRows = maxRows;
ndbrequire(reqinfo == ZADDFRAG);
@@ -136,19 +140,11 @@ void Dbtup::execTUPFRAGREQ(Signal* signal)
regFragPtr.p->fragTableId= regTabPtr.i;
regFragPtr.p->fragmentId= fragId;
- regFragPtr.p->m_tablespace_id= tablespace;
+ regFragPtr.p->m_tablespace_id= tablespace_id;
regFragPtr.p->m_undo_complete= false;
regFragPtr.p->m_lcp_scan_op = RNIL;
regFragPtr.p->m_lcp_keep_list = RNIL;
-
- Uint32 noAllocatedPages= allocFragPages(regFragPtr.p, pages);
-
- if (noAllocatedPages == 0) {
- ljam();
- terrorCode= ZNO_PAGES_ALLOCATED_ERROR;
- fragrefuse3Lab(signal, fragOperPtr, regFragPtr, regTabPtr.p, fragId);
- return;
- }
+ regFragPtr.p->m_var_page_chunks = RNIL;
if (ERROR_INSERTED(4007) && regTabPtr.p->fragid[0] == fragId ||
ERROR_INSERTED(4008) && regTabPtr.p->fragid[1] == fragId) {
@@ -538,6 +534,28 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal)
}
#endif
+ {
+ Uint32 fix_tupheader = regTabPtr.p->m_offsets[MM].m_fix_header_size;
+ if(regTabPtr.p->m_attributes[MM].m_no_of_varsize != 0)
+ fix_tupheader += Tuple_header::HeaderSize + 1;
+ ndbassert(fix_tupheader > 0);
+ Uint32 noRowsPerPage = ZWORDS_ON_PAGE / fix_tupheader;
+ Uint32 noAllocatedPages =
+ (fragOperPtr.p->minRows + noRowsPerPage - 1 )/ noRowsPerPage;
+ if (fragOperPtr.p->minRows == 0)
+ noAllocatedPages = 2;
+ else if (noAllocatedPages == 0)
+ noAllocatedPages = 2;
+ noAllocatedPages = allocFragPages(regFragPtr.p, noAllocatedPages);
+
+ if (noAllocatedPages == 0) {
+ ljam();
+ terrorCode = ZNO_PAGES_ALLOCATED_ERROR;
+ addattrrefuseLab(signal, regFragPtr, fragOperPtr, regTabPtr.p, fragId);
+ return;
+ }//if
+ }
+
CreateFilegroupImplReq rep;
if(regTabPtr.p->m_no_of_disk_attributes)
{
@@ -970,7 +988,7 @@ Dbtup::drop_fragment_unmap_pages(Signal *signal,
case -1:
break;
default:
- ndbrequire(res == pagePtr.i);
+ ndbrequire((Uint32)res == pagePtr.i);
drop_fragment_unmap_page_callback(signal, pos, res);
}
return;
@@ -1052,6 +1070,44 @@ Dbtup::drop_fragment_free_exent(Signal *signal,
}
}
+ signal->theData[0] = ZFREE_VAR_PAGES;
+ signal->theData[1] = tabPtr.i;
+ signal->theData[2] = fragPtr.i;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB);
+}
+
+void
+Dbtup::drop_fragment_free_var_pages(Signal* signal)
+{
+ ljam();
+ Uint32 tableId = signal->theData[1];
+ Uint32 fragPtrI = signal->theData[2];
+
+ TablerecPtr tabPtr;
+ tabPtr.i= tableId;
+ ptrCheckGuard(tabPtr, cnoOfTablerec, tablerec);
+
+ FragrecordPtr fragPtr;
+ fragPtr.i = fragPtrI;
+ ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord);
+
+ PagePtr pagePtr;
+ if ((pagePtr.i = fragPtr.p->m_var_page_chunks) != RNIL)
+ {
+ c_page_pool.getPtr(pagePtr);
+ Var_page* page = (Var_page*)pagePtr.p;
+ fragPtr.p->m_var_page_chunks = page->next_chunk;
+
+ Uint32 sz = page->chunk_size;
+ returnCommonArea(pagePtr.i, sz);
+
+ signal->theData[0] = ZFREE_VAR_PAGES;
+ signal->theData[1] = tabPtr.i;
+ signal->theData[2] = fragPtr.i;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 3, JBB);
+ return;
+ }
+
releaseFragPages(fragPtr.p);
Uint32 i;
diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp
index db6f5e3b185..90fdd8c69d7 100644
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp
@@ -299,6 +299,11 @@ void Dbtup::releaseFragPages(Fragrecord* regFragPtr)
LocalDLList<Page> tmp(c_page_pool, regFragPtr->thFreeFirst);
tmp.remove();
}
+
+ {
+ LocalSLList<Page> tmp(c_page_pool, regFragPtr->m_empty_pages);
+ tmp.remove();
+ }
return;
} else {
@@ -346,6 +351,7 @@ void Dbtup::initFragRange(Fragrecord* const regFragPtr)
regFragPtr->rootPageRange = RNIL;
regFragPtr->currentPageRange = RNIL;
regFragPtr->noOfPages = 0;
+ regFragPtr->noOfPagesToGrow = 2;
regFragPtr->nextStartRange = 0;
}//initFragRange()
@@ -421,9 +427,10 @@ Uint32 Dbtup::allocFragPages(Fragrecord* regFragPtr, Uint32 tafpNoAllocRequested
void Dbtup::allocMoreFragPages(Fragrecord* const regFragPtr)
{
- Uint32 noAllocPages = regFragPtr->noOfPages >> 3; // 12.5%
- noAllocPages += regFragPtr->noOfPages >> 4; // 6.25%
+ Uint32 noAllocPages = regFragPtr->noOfPagesToGrow >> 3; // 12.5%
+ noAllocPages += regFragPtr->noOfPagesToGrow >> 4; // 6.25%
noAllocPages += 2;
+ regFragPtr->noOfPagesToGrow += noAllocPages;
/* -----------------------------------------------------------------*/
// We will grow by 18.75% plus two more additional pages to grow
// a little bit quicker in the beginning.
diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupVarAlloc.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupVarAlloc.cpp
index 94bd75108a4..52ab66b5c0e 100644
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupVarAlloc.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupVarAlloc.cpp
@@ -323,6 +323,13 @@ Dbtup::get_empty_var_page(Fragrecord* fragPtr)
ptr.p->nextList = RNIL;
list.add(ret.i + 1, ptr);
}
+
+ c_page_pool.getPtr(ret);
+
+ Var_page* page = (Var_page*)ret.p;
+ page->chunk_size = cnt;
+ page->next_chunk = fragPtr->m_var_page_chunks;
+ fragPtr->m_var_page_chunks = ret.i;
return ret.i;
}
diff --git a/storage/ndb/src/kernel/blocks/dbtup/tuppage.hpp b/storage/ndb/src/kernel/blocks/dbtup/tuppage.hpp
index 04ed18da58d..4b4df909061 100644
--- a/storage/ndb/src/kernel/blocks/dbtup/tuppage.hpp
+++ b/storage/ndb/src/kernel/blocks/dbtup/tuppage.hpp
@@ -107,8 +107,14 @@ struct Tup_varsize_page
Uint32 page_state;
Uint32 next_page;
Uint32 prev_page;
- Uint32 first_cluster_page;
- Uint32 last_cluster_page;
+ union {
+ Uint32 first_cluster_page;
+ Uint32 chunk_size;
+ };
+ union {
+ Uint32 last_cluster_page;
+ Uint32 next_chunk;
+ };
Uint32 next_cluster_page;
Uint32 prev_cluster_page;
Uint32 frag_page_id;
diff --git a/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp b/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp
index 7c48ebb5e8b..c3140bea25b 100644
--- a/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp
+++ b/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp
@@ -205,6 +205,13 @@ void Ndbcntr::execSYSTEM_ERROR(Signal* signal)
killingNode, data1);
break;
+ case SystemError::StartFragRefError:
+ BaseString::snprintf(buf, sizeof(buf),
+ "Node %d killed this node because "
+ "it replied StartFragRef error code: %u.",
+ killingNode, data1);
+ break;
+
case SystemError::CopySubscriptionRef:
BaseString::snprintf(buf, sizeof(buf),
"Node %d killed this node because "
diff --git a/storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp b/storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp
index b0a4d6264fb..a3b6104a059 100644
--- a/storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp
+++ b/storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp
@@ -228,6 +228,7 @@ AsyncFile::run()
endReq();
return;
default:
+ DEBUG(ndbout_c("Invalid Request"));
abort();
break;
}//switch
@@ -676,6 +677,7 @@ AsyncFile::extendfile(Request* request) {
return 0;
#else
request = request;
+ DEBUG(ndbout_c("no pwrite"));
abort();
return -1;
#endif
@@ -792,6 +794,7 @@ AsyncFile::writeBuffer(const char * buf, size_t size, off_t offset,
bytes_written = return_value;
if(bytes_written == 0){
+ DEBUG(ndbout_c("no bytes written"));
abort();
}
@@ -830,8 +833,10 @@ AsyncFile::closeReq(Request * request)
#else
if (-1 == ::close(theFd)) {
#ifndef DBUG_OFF
- if (theFd == -1)
+ if (theFd == -1) {
+ DEBUG(ndbout_c("close on fd = -1"));
abort();
+ }
#endif
request->error = errno;
}
@@ -899,6 +904,7 @@ AsyncFile::appendReq(Request * request){
return;
}
if(n == 0){
+ DEBUG(ndbout_c("append with n=0"));
abort();
}
size -= n;
diff --git a/storage/ndb/src/kernel/blocks/suma/Suma.cpp b/storage/ndb/src/kernel/blocks/suma/Suma.cpp
index 686ae476879..42666a9e5d9 100644
--- a/storage/ndb/src/kernel/blocks/suma/Suma.cpp
+++ b/storage/ndb/src/kernel/blocks/suma/Suma.cpp
@@ -2480,7 +2480,8 @@ Suma::execSUB_STOP_REQ(Signal* signal){
TablePtr tabPtr;
tabPtr.i = subPtr.p->m_table_ptrI;
- if (!(tabPtr.p = c_tables.getPtr(tabPtr.i)) ||
+ if (tabPtr.i == RNIL ||
+ !(tabPtr.p = c_tables.getPtr(tabPtr.i)) ||
tabPtr.p->m_tableId != subPtr.p->m_tableId)
{
jam();
diff --git a/storage/ndb/src/kernel/vm/RequestTracker.hpp b/storage/ndb/src/kernel/vm/RequestTracker.hpp
index 5fd1ae7255a..ac9ed85ae4b 100644
--- a/storage/ndb/src/kernel/vm/RequestTracker.hpp
+++ b/storage/ndb/src/kernel/vm/RequestTracker.hpp
@@ -26,12 +26,12 @@ public:
void init() { m_confs.clear(); m_nRefs = 0; }
template<typename SignalClass>
- void init(SafeCounterManager& mgr,
+ bool init(SafeCounterManager& mgr,
NodeReceiverGroup rg, Uint16 GSN, Uint32 senderData)
{
init();
SafeCounter tmp(mgr, m_sc);
- tmp.init<SignalClass>(rg, GSN, senderData);
+ return tmp.init<SignalClass>(rg, GSN, senderData);
}
bool ignoreRef(SafeCounterManager& mgr, Uint32 nodeId)
diff --git a/storage/ndb/src/kernel/vm/SafeCounter.hpp b/storage/ndb/src/kernel/vm/SafeCounter.hpp
index 3ee5e076ab8..917a67f2508 100644
--- a/storage/ndb/src/kernel/vm/SafeCounter.hpp
+++ b/storage/ndb/src/kernel/vm/SafeCounter.hpp
@@ -230,10 +230,13 @@ inline
bool
SafeCounter::init(NodeReceiverGroup rg, Uint16 GSN, Uint32 senderData){
- bool b = init<Ref>(rg.m_block, GSN, senderData);
- m_nodes = rg.m_nodes;
- m_count = m_nodes.count();
- return b;
+ if (init<Ref>(rg.m_block, GSN, senderData))
+ {
+ m_nodes = rg.m_nodes;
+ m_count = m_nodes.count();
+ return true;
+ }
+ return false;
}
template<typename Ref>
@@ -241,10 +244,13 @@ inline
bool
SafeCounter::init(NodeReceiverGroup rg, Uint32 senderData){
- bool b = init<Ref>(rg.m_block, Ref::GSN, senderData);
- m_nodes = rg.m_nodes;
- m_count = m_nodes.count();
- return b;
+ if (init<Ref>(rg.m_block, Ref::GSN, senderData))
+ {
+ m_nodes = rg.m_nodes;
+ m_count = m_nodes.count();
+ return true;
+ }
+ return false;
}
inline
diff --git a/storage/ndb/src/mgmsrv/ConfigInfo.cpp b/storage/ndb/src/mgmsrv/ConfigInfo.cpp
index 42776798385..6c172a29819 100644
--- a/storage/ndb/src/mgmsrv/ConfigInfo.cpp
+++ b/storage/ndb/src/mgmsrv/ConfigInfo.cpp
@@ -30,6 +30,7 @@ extern my_bool opt_core;
#define MAX_LINE_LENGTH 255
#define KEY_INTERNAL 0
#define MAX_INT_RNIL 0xfffffeff
+#define MAX_PORT_NO 65535
#define _STR_VALUE(x) #x
#define STR_VALUE(x) _STR_VALUE(x)
@@ -422,7 +423,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::CI_INT,
UNDEFINED,
"1",
- STR_VALUE(MAX_INT_RNIL) },
+ STR_VALUE(MAX_PORT_NO) },
{
CFG_DB_NO_REPLICAS,
@@ -877,7 +878,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
false,
ConfigInfo::CI_INT,
"8",
- "1",
+ "3",
STR_VALUE(MAX_INT_RNIL) },
{
@@ -1510,7 +1511,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::CI_INT,
NDB_PORT,
"0",
- STR_VALUE(MAX_INT_RNIL) },
+ STR_VALUE(MAX_PORT_NO) },
{
KEY_INTERNAL,
@@ -1522,7 +1523,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::CI_INT,
UNDEFINED,
"0",
- STR_VALUE(MAX_INT_RNIL) },
+ STR_VALUE(MAX_PORT_NO) },
{
CFG_NODE_ARBIT_RANK,
@@ -1664,7 +1665,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::CI_INT,
MANDATORY,
"0",
- STR_VALUE(MAX_INT_RNIL) },
+ STR_VALUE(MAX_PORT_NO) },
{
CFG_TCP_SEND_BUFFER_SIZE,
@@ -1770,7 +1771,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::CI_INT,
MANDATORY,
"0",
- STR_VALUE(MAX_INT_RNIL) },
+ STR_VALUE(MAX_PORT_NO) },
{
CFG_SHM_SIGNUM,
@@ -1992,7 +1993,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::CI_INT,
MANDATORY,
"0",
- STR_VALUE(MAX_INT_RNIL) },
+ STR_VALUE(MAX_PORT_NO) },
{
CFG_SCI_HOST1_ID_0,
diff --git a/storage/ndb/src/mgmsrv/MgmtSrvr.cpp b/storage/ndb/src/mgmsrv/MgmtSrvr.cpp
index 511536d2fdd..50a623920d2 100644
--- a/storage/ndb/src/mgmsrv/MgmtSrvr.cpp
+++ b/storage/ndb/src/mgmsrv/MgmtSrvr.cpp
@@ -2487,14 +2487,20 @@ MgmtSrvr::startBackup(Uint32& backupId, int waitCompleted)
event.Event = BackupEvent::BackupCompleted;
event.Completed.BackupId = rep->backupId;
- event.Completed.NoOfBytes = rep->noOfBytes;
+ event.Completed.NoOfBytes = rep->noOfBytesLow;
event.Completed.NoOfLogBytes = rep->noOfLogBytes;
- event.Completed.NoOfRecords = rep->noOfRecords;
+ event.Completed.NoOfRecords = rep->noOfRecordsLow;
event.Completed.NoOfLogRecords = rep->noOfLogRecords;
event.Completed.stopGCP = rep->stopGCP;
event.Completed.startGCP = rep->startGCP;
event.Nodes = rep->nodes;
+ if (signal->header.theLength >= BackupCompleteRep::SignalLength)
+ {
+ event.Completed.NoOfBytes += ((Uint64)rep->noOfBytesHigh) << 32;
+ event.Completed.NoOfRecords += ((Uint64)rep->noOfRecordsHigh) << 32;
+ }
+
backupId = rep->backupId;
return 0;
}
diff --git a/storage/ndb/src/mgmsrv/MgmtSrvr.hpp b/storage/ndb/src/mgmsrv/MgmtSrvr.hpp
index a00ba2cda12..1473ec90c33 100644
--- a/storage/ndb/src/mgmsrv/MgmtSrvr.hpp
+++ b/storage/ndb/src/mgmsrv/MgmtSrvr.hpp
@@ -323,9 +323,9 @@ public:
Uint32 ErrorCode;
} FailedToStart ;
struct {
+ Uint64 NoOfBytes;
+ Uint64 NoOfRecords;
Uint32 BackupId;
- Uint32 NoOfBytes;
- Uint32 NoOfRecords;
Uint32 NoOfLogBytes;
Uint32 NoOfLogRecords;
Uint32 startGCP;
diff --git a/storage/ndb/src/ndbapi/NdbDictionary.cpp b/storage/ndb/src/ndbapi/NdbDictionary.cpp
index c71689d2e81..4948095f970 100644
--- a/storage/ndb/src/ndbapi/NdbDictionary.cpp
+++ b/storage/ndb/src/ndbapi/NdbDictionary.cpp
@@ -18,6 +18,32 @@
#include "NdbDictionaryImpl.hpp"
#include <NdbOut.hpp>
+NdbDictionary::ObjectId::ObjectId()
+ : m_impl(* new NdbDictObjectImpl(NdbDictionary::Object::TypeUndefined))
+{
+}
+
+NdbDictionary::ObjectId::~ObjectId()
+{
+ NdbDictObjectImpl * tmp = &m_impl;
+ delete tmp;
+}
+
+NdbDictionary::Object::Status
+NdbDictionary::ObjectId::getObjectStatus() const {
+ return m_impl.m_status;
+}
+
+int
+NdbDictionary::ObjectId::getObjectVersion() const {
+ return m_impl.m_version;
+}
+
+int
+NdbDictionary::ObjectId::getObjectId() const {
+ return m_impl.m_id;
+}
+
/*****************************************************************
* Column facade
*/
@@ -426,6 +452,18 @@ NdbDictionary::Table::getMaxRows() const
}
void
+NdbDictionary::Table::setMinRows(Uint64 minRows)
+{
+ m_impl.m_min_rows = minRows;
+}
+
+Uint64
+NdbDictionary::Table::getMinRows() const
+{
+ return m_impl.m_min_rows;
+}
+
+void
NdbDictionary::Table::setDefaultNoPartitionsFlag(Uint32 flag)
{
m_impl.m_default_no_part_flag = flag;;
@@ -1198,9 +1236,14 @@ NdbDictionary::Datafile::getTablespace() const {
return m_impl.m_filegroup_name.c_str();
}
-Uint32
-NdbDictionary::Datafile::getTablespaceId() const {
- return m_impl.m_filegroup_id;
+void
+NdbDictionary::Datafile::getTablespaceId(NdbDictionary::ObjectId* dst) const
+{
+ if (dst)
+ {
+ NdbDictObjectImpl::getImpl(* dst).m_id = m_impl.m_filegroup_id;
+ NdbDictObjectImpl::getImpl(* dst).m_version = m_impl.m_filegroup_version;
+ }
}
NdbDictionary::Object::Status
@@ -1284,9 +1327,14 @@ NdbDictionary::Undofile::getLogfileGroup() const {
return m_impl.m_filegroup_name.c_str();
}
-Uint32
-NdbDictionary::Undofile::getLogfileGroupId() const {
- return m_impl.m_filegroup_id;
+void
+NdbDictionary::Undofile::getLogfileGroupId(NdbDictionary::ObjectId * dst)const
+{
+ if (dst)
+ {
+ NdbDictObjectImpl::getImpl(* dst).m_id = m_impl.m_filegroup_id;
+ NdbDictObjectImpl::getImpl(* dst).m_version = m_impl.m_filegroup_version;
+ }
}
NdbDictionary::Object::Status
@@ -1799,17 +1847,23 @@ operator<<(NdbOut& out, const NdbDictionary::Column& col)
}
int
-NdbDictionary::Dictionary::createLogfileGroup(const LogfileGroup & lg){
- return m_impl.createLogfileGroup(NdbLogfileGroupImpl::getImpl(lg));
+NdbDictionary::Dictionary::createLogfileGroup(const LogfileGroup & lg,
+ ObjectId * obj)
+{
+ return m_impl.createLogfileGroup(NdbLogfileGroupImpl::getImpl(lg),
+ obj ?
+ & NdbDictObjectImpl::getImpl(* obj) : 0);
}
int
-NdbDictionary::Dictionary::dropLogfileGroup(const LogfileGroup & lg){
+NdbDictionary::Dictionary::dropLogfileGroup(const LogfileGroup & lg)
+{
return m_impl.dropLogfileGroup(NdbLogfileGroupImpl::getImpl(lg));
}
NdbDictionary::LogfileGroup
-NdbDictionary::Dictionary::getLogfileGroup(const char * name){
+NdbDictionary::Dictionary::getLogfileGroup(const char * name)
+{
NdbDictionary::LogfileGroup tmp;
m_impl.m_receiver.get_filegroup(NdbLogfileGroupImpl::getImpl(tmp),
NdbDictionary::Object::LogfileGroup, name);
@@ -1817,17 +1871,23 @@ NdbDictionary::Dictionary::getLogfileGroup(const char * name){
}
int
-NdbDictionary::Dictionary::createTablespace(const Tablespace & lg){
- return m_impl.createTablespace(NdbTablespaceImpl::getImpl(lg));
+NdbDictionary::Dictionary::createTablespace(const Tablespace & lg,
+ ObjectId * obj)
+{
+ return m_impl.createTablespace(NdbTablespaceImpl::getImpl(lg),
+ obj ?
+ & NdbDictObjectImpl::getImpl(* obj) : 0);
}
int
-NdbDictionary::Dictionary::dropTablespace(const Tablespace & lg){
+NdbDictionary::Dictionary::dropTablespace(const Tablespace & lg)
+{
return m_impl.dropTablespace(NdbTablespaceImpl::getImpl(lg));
}
NdbDictionary::Tablespace
-NdbDictionary::Dictionary::getTablespace(const char * name){
+NdbDictionary::Dictionary::getTablespace(const char * name)
+{
NdbDictionary::Tablespace tmp;
m_impl.m_receiver.get_filegroup(NdbTablespaceImpl::getImpl(tmp),
NdbDictionary::Object::Tablespace, name);
@@ -1835,7 +1895,8 @@ NdbDictionary::Dictionary::getTablespace(const char * name){
}
NdbDictionary::Tablespace
-NdbDictionary::Dictionary::getTablespace(Uint32 tablespaceId){
+NdbDictionary::Dictionary::getTablespace(Uint32 tablespaceId)
+{
NdbDictionary::Tablespace tmp;
m_impl.m_receiver.get_filegroup(NdbTablespaceImpl::getImpl(tmp),
NdbDictionary::Object::Tablespace,
@@ -1844,17 +1905,24 @@ NdbDictionary::Dictionary::getTablespace(Uint32 tablespaceId){
}
int
-NdbDictionary::Dictionary::createDatafile(const Datafile & df, bool force){
- return m_impl.createDatafile(NdbDatafileImpl::getImpl(df), force);
+NdbDictionary::Dictionary::createDatafile(const Datafile & df,
+ bool force,
+ ObjectId * obj)
+{
+ return m_impl.createDatafile(NdbDatafileImpl::getImpl(df),
+ force,
+ obj ? & NdbDictObjectImpl::getImpl(* obj) : 0);
}
int
-NdbDictionary::Dictionary::dropDatafile(const Datafile& df){
+NdbDictionary::Dictionary::dropDatafile(const Datafile& df)
+{
return m_impl.dropDatafile(NdbDatafileImpl::getImpl(df));
}
NdbDictionary::Datafile
-NdbDictionary::Dictionary::getDatafile(Uint32 node, const char * path){
+NdbDictionary::Dictionary::getDatafile(Uint32 node, const char * path)
+{
NdbDictionary::Datafile tmp;
m_impl.m_receiver.get_file(NdbDatafileImpl::getImpl(tmp),
NdbDictionary::Object::Datafile,
@@ -1863,17 +1931,24 @@ NdbDictionary::Dictionary::getDatafile(Uint32 node, const char * path){
}
int
-NdbDictionary::Dictionary::createUndofile(const Undofile & df, bool force){
- return m_impl.createUndofile(NdbUndofileImpl::getImpl(df), force);
+NdbDictionary::Dictionary::createUndofile(const Undofile & df,
+ bool force,
+ ObjectId * obj)
+{
+ return m_impl.createUndofile(NdbUndofileImpl::getImpl(df),
+ force,
+ obj ? & NdbDictObjectImpl::getImpl(* obj) : 0);
}
int
-NdbDictionary::Dictionary::dropUndofile(const Undofile& df){
+NdbDictionary::Dictionary::dropUndofile(const Undofile& df)
+{
return m_impl.dropUndofile(NdbUndofileImpl::getImpl(df));
}
NdbDictionary::Undofile
-NdbDictionary::Dictionary::getUndofile(Uint32 node, const char * path){
+NdbDictionary::Dictionary::getUndofile(Uint32 node, const char * path)
+{
NdbDictionary::Undofile tmp;
m_impl.m_receiver.get_file(NdbUndofileImpl::getImpl(tmp),
NdbDictionary::Object::Undofile,
diff --git a/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp b/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp
index 22a5d2f20a5..1e33a843a42 100644
--- a/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp
+++ b/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp
@@ -445,7 +445,6 @@ NdbTableImpl::init(){
m_hashpointerValue= 0;
m_linear_flag= true;
m_primaryTable.clear();
- m_max_rows = 0;
m_default_no_part_flag = 1;
m_logging= true;
m_row_gci = true;
@@ -461,6 +460,8 @@ NdbTableImpl::init(){
m_noOfDistributionKeys= 0;
m_noOfBlobs= 0;
m_replicaCount= 0;
+ m_min_rows = 0;
+ m_max_rows = 0;
m_tablespace_name.clear();
m_tablespace_id = ~0;
m_tablespace_version = ~0;
@@ -729,6 +730,9 @@ NdbTableImpl::assign(const NdbTableImpl& org)
m_version = org.m_version;
m_status = org.m_status;
+ m_max_rows = org.m_max_rows;
+ m_min_rows = org.m_min_rows;
+
m_tablespace_name = org.m_tablespace_name;
m_tablespace_id= org.m_tablespace_id;
m_tablespace_version = org.m_tablespace_version;
@@ -2066,6 +2070,9 @@ NdbDictInterface::parseTableInfo(NdbTableImpl ** ret,
Uint64 max_rows = ((Uint64)tableDesc->MaxRowsHigh) << 32;
max_rows += tableDesc->MaxRowsLow;
impl->m_max_rows = max_rows;
+ Uint64 min_rows = ((Uint64)tableDesc->MinRowsHigh) << 32;
+ min_rows += tableDesc->MinRowsLow;
+ impl->m_min_rows = min_rows;
impl->m_default_no_part_flag = tableDesc->DefaultNoPartFlag;
impl->m_linear_flag = tableDesc->LinearHashFlag;
impl->m_logging = tableDesc->TableLoggedFlag;
@@ -2521,6 +2528,8 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb,
tmpTab->NoOfAttributes = sz;
tmpTab->MaxRowsHigh = (Uint32)(impl.m_max_rows >> 32);
tmpTab->MaxRowsLow = (Uint32)(impl.m_max_rows & 0xFFFFFFFF);
+ tmpTab->MinRowsHigh = (Uint32)(impl.m_min_rows >> 32);
+ tmpTab->MinRowsLow = (Uint32)(impl.m_min_rows & 0xFFFFFFFF);
tmpTab->DefaultNoPartFlag = impl.m_default_no_part_flag;
tmpTab->LinearHashFlag = impl.m_linear_flag;
@@ -4391,19 +4400,23 @@ NdbUndofileImpl::assign(const NdbUndofileImpl& org)
}
int
-NdbDictionaryImpl::createDatafile(const NdbDatafileImpl & file, bool force){
+NdbDictionaryImpl::createDatafile(const NdbDatafileImpl & file,
+ bool force,
+ NdbDictObjectImpl* obj)
+
+{
DBUG_ENTER("NdbDictionaryImpl::createDatafile");
NdbFilegroupImpl tmp(NdbDictionary::Object::Tablespace);
if(file.m_filegroup_version != ~(Uint32)0){
tmp.m_id = file.m_filegroup_id;
tmp.m_version = file.m_filegroup_version;
- DBUG_RETURN(m_receiver.create_file(file, tmp));
+ DBUG_RETURN(m_receiver.create_file(file, tmp, force, obj));
}
if(m_receiver.get_filegroup(tmp, NdbDictionary::Object::Tablespace,
file.m_filegroup_name.c_str()) == 0){
- DBUG_RETURN(m_receiver.create_file(file, tmp, force));
+ DBUG_RETURN(m_receiver.create_file(file, tmp, force, obj));
}
DBUG_RETURN(-1);
}
@@ -4414,53 +4427,65 @@ NdbDictionaryImpl::dropDatafile(const NdbDatafileImpl & file){
}
int
-NdbDictionaryImpl::createUndofile(const NdbUndofileImpl & file, bool force){
+NdbDictionaryImpl::createUndofile(const NdbUndofileImpl & file,
+ bool force,
+ NdbDictObjectImpl* obj)
+{
DBUG_ENTER("NdbDictionaryImpl::createUndofile");
NdbFilegroupImpl tmp(NdbDictionary::Object::LogfileGroup);
if(file.m_filegroup_version != ~(Uint32)0){
tmp.m_id = file.m_filegroup_id;
tmp.m_version = file.m_filegroup_version;
- DBUG_RETURN(m_receiver.create_file(file, tmp));
+ DBUG_RETURN(m_receiver.create_file(file, tmp, force, obj));
}
if(m_receiver.get_filegroup(tmp, NdbDictionary::Object::LogfileGroup,
file.m_filegroup_name.c_str()) == 0){
- DBUG_RETURN(m_receiver.create_file(file, tmp, force));
+ DBUG_RETURN(m_receiver.create_file(file, tmp, force, obj));
}
DBUG_PRINT("info", ("Failed to find filegroup"));
DBUG_RETURN(-1);
}
int
-NdbDictionaryImpl::dropUndofile(const NdbUndofileImpl & file){
+NdbDictionaryImpl::dropUndofile(const NdbUndofileImpl & file)
+{
return m_receiver.drop_file(file);
}
int
-NdbDictionaryImpl::createTablespace(const NdbTablespaceImpl & fg){
- return m_receiver.create_filegroup(fg);
+NdbDictionaryImpl::createTablespace(const NdbTablespaceImpl & fg,
+ NdbDictObjectImpl* obj)
+{
+ return m_receiver.create_filegroup(fg, obj);
}
int
-NdbDictionaryImpl::dropTablespace(const NdbTablespaceImpl & fg){
+NdbDictionaryImpl::dropTablespace(const NdbTablespaceImpl & fg)
+{
return m_receiver.drop_filegroup(fg);
}
int
-NdbDictionaryImpl::createLogfileGroup(const NdbLogfileGroupImpl & fg){
- return m_receiver.create_filegroup(fg);
+NdbDictionaryImpl::createLogfileGroup(const NdbLogfileGroupImpl & fg,
+ NdbDictObjectImpl* obj)
+{
+ return m_receiver.create_filegroup(fg, obj);
}
int
-NdbDictionaryImpl::dropLogfileGroup(const NdbLogfileGroupImpl & fg){
+NdbDictionaryImpl::dropLogfileGroup(const NdbLogfileGroupImpl & fg)
+{
return m_receiver.drop_filegroup(fg);
}
int
NdbDictInterface::create_file(const NdbFileImpl & file,
const NdbFilegroupImpl & group,
- bool overwrite){
+ bool overwrite,
+ NdbDictObjectImpl* obj)
+{
DBUG_ENTER("NdbDictInterface::create_file");
UtilBufferWriter w(m_buffer);
DictFilegroupInfo::File f; f.init();
@@ -4503,23 +4528,39 @@ NdbDictInterface::create_file(const NdbFileImpl & file,
Send signal without time-out since creating files can take a very long
time if the file is very big.
*/
- DBUG_RETURN(dictSignal(&tSignal, ptr, 1,
- 0, // master
- WAIT_CREATE_INDX_REQ,
- -1, 100,
- err));
+ int ret = dictSignal(&tSignal, ptr, 1,
+ 0, // master
+ WAIT_CREATE_INDX_REQ,
+ -1, 100,
+ err);
+
+ if (ret == 0 && obj)
+ {
+ Uint32* data = (Uint32*)m_buffer.get_data();
+ obj->m_id = data[0];
+ obj->m_version = data[1];
+ }
+
+ DBUG_RETURN(ret);
}
void
NdbDictInterface::execCREATE_FILE_CONF(NdbApiSignal * signal,
- LinearSectionPtr ptr[3])
+ LinearSectionPtr ptr[3])
{
+ const CreateFileConf* conf=
+ CAST_CONSTPTR(CreateFileConf, signal->getDataPtr());
+ m_buffer.grow(4 * 2); // 2 words
+ Uint32* data = (Uint32*)m_buffer.get_data();
+ data[0] = conf->fileId;
+ data[1] = conf->fileVersion;
+
m_waiter.signal(NO_WAIT);
}
void
NdbDictInterface::execCREATE_FILE_REF(NdbApiSignal * signal,
- LinearSectionPtr ptr[3])
+ LinearSectionPtr ptr[3])
{
const CreateFileRef* ref =
CAST_CONSTPTR(CreateFileRef, signal->getDataPtr());
@@ -4529,7 +4570,8 @@ NdbDictInterface::execCREATE_FILE_REF(NdbApiSignal * signal,
}
int
-NdbDictInterface::drop_file(const NdbFileImpl & file){
+NdbDictInterface::drop_file(const NdbFileImpl & file)
+{
DBUG_ENTER("NdbDictInterface::drop_file");
NdbApiSignal tSignal(m_reference);
tSignal.theReceiversBlockNumber = DBDICT;
@@ -4569,7 +4611,9 @@ NdbDictInterface::execDROP_FILE_REF(NdbApiSignal * signal,
}
int
-NdbDictInterface::create_filegroup(const NdbFilegroupImpl & group){
+NdbDictInterface::create_filegroup(const NdbFilegroupImpl & group,
+ NdbDictObjectImpl* obj)
+{
DBUG_ENTER("NdbDictInterface::create_filegroup");
UtilBufferWriter w(m_buffer);
DictFilegroupInfo::Filegroup fg; fg.init();
@@ -4638,17 +4682,32 @@ NdbDictInterface::create_filegroup(const NdbFilegroupImpl & group){
ptr[0].sz = m_buffer.length() / 4;
int err[] = { CreateFilegroupRef::Busy, CreateFilegroupRef::NotMaster, 0};
- DBUG_RETURN(dictSignal(&tSignal, ptr, 1,
- 0, // master
- WAIT_CREATE_INDX_REQ,
- DICT_WAITFOR_TIMEOUT, 100,
- err));
+ int ret = dictSignal(&tSignal, ptr, 1,
+ 0, // master
+ WAIT_CREATE_INDX_REQ,
+ DICT_WAITFOR_TIMEOUT, 100,
+ err);
+
+ if (ret == 0 && obj)
+ {
+ Uint32* data = (Uint32*)m_buffer.get_data();
+ obj->m_id = data[0];
+ obj->m_version = data[1];
+ }
+
+ DBUG_RETURN(ret);
}
void
NdbDictInterface::execCREATE_FILEGROUP_CONF(NdbApiSignal * signal,
LinearSectionPtr ptr[3])
{
+ const CreateFilegroupConf* conf=
+ CAST_CONSTPTR(CreateFilegroupConf, signal->getDataPtr());
+ m_buffer.grow(4 * 2); // 2 words
+ Uint32* data = (Uint32*)m_buffer.get_data();
+ data[0] = conf->filegroupId;
+ data[1] = conf->filegroupVersion;
m_waiter.signal(NO_WAIT);
}
@@ -4664,7 +4723,8 @@ NdbDictInterface::execCREATE_FILEGROUP_REF(NdbApiSignal * signal,
}
int
-NdbDictInterface::drop_filegroup(const NdbFilegroupImpl & group){
+NdbDictInterface::drop_filegroup(const NdbFilegroupImpl & group)
+{
DBUG_ENTER("NdbDictInterface::drop_filegroup");
NdbApiSignal tSignal(m_reference);
tSignal.theReceiversBlockNumber = DBDICT;
diff --git a/storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp b/storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp
index b6961edd019..35db103aa9f 100644
--- a/storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp
+++ b/storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp
@@ -46,7 +46,17 @@ public:
NdbDictionary::Object::Status m_status;
bool change();
+
+ static NdbDictObjectImpl & getImpl(NdbDictionary::ObjectId & t) {
+ return t.m_impl;
+ }
+ static const NdbDictObjectImpl & getImpl(const NdbDictionary::ObjectId & t){
+ return t.m_impl;
+ }
+
protected:
+ friend class NdbDictionary::ObjectId;
+
NdbDictObjectImpl(NdbDictionary::Object::Type type) :
m_type(type),
m_status(NdbDictionary::Object::New) {
@@ -184,6 +194,7 @@ public:
Vector<Uint16> m_fragments;
Uint64 m_max_rows;
+ Uint64 m_min_rows;
Uint32 m_default_no_part_flag;
bool m_linear_flag;
bool m_logging;
@@ -468,9 +479,10 @@ public:
static int parseFilegroupInfo(NdbFilegroupImpl &dst,
const Uint32 * data, Uint32 len);
- int create_file(const NdbFileImpl &, const NdbFilegroupImpl&, bool overwrite = false);
+ int create_file(const NdbFileImpl &, const NdbFilegroupImpl&,
+ bool overwrite, NdbDictObjectImpl*);
int drop_file(const NdbFileImpl &);
- int create_filegroup(const NdbFilegroupImpl &);
+ int create_filegroup(const NdbFilegroupImpl &, NdbDictObjectImpl*);
int drop_filegroup(const NdbFilegroupImpl &);
int get_filegroup(NdbFilegroupImpl&, NdbDictionary::Object::Type, Uint32);
@@ -622,17 +634,17 @@ public:
NdbEventImpl * getBlobEvent(const NdbEventImpl& ev, uint col_no);
NdbEventImpl * getEventImpl(const char * internalName);
- int createDatafile(const NdbDatafileImpl &, bool force = false);
+ int createDatafile(const NdbDatafileImpl &, bool force, NdbDictObjectImpl*);
int dropDatafile(const NdbDatafileImpl &);
- int createUndofile(const NdbUndofileImpl &, bool force = false);
+ int createUndofile(const NdbUndofileImpl &, bool force, NdbDictObjectImpl*);
int dropUndofile(const NdbUndofileImpl &);
- int createTablespace(const NdbTablespaceImpl &);
+ int createTablespace(const NdbTablespaceImpl &, NdbDictObjectImpl*);
int dropTablespace(const NdbTablespaceImpl &);
- int createLogfileGroup(const NdbLogfileGroupImpl &);
+ int createLogfileGroup(const NdbLogfileGroupImpl &, NdbDictObjectImpl*);
int dropLogfileGroup(const NdbLogfileGroupImpl &);
-
+
const NdbError & getNdbError() const;
NdbError m_error;
Uint32 m_local_table_data_size;
diff --git a/storage/ndb/src/ndbapi/NdbReceiver.cpp b/storage/ndb/src/ndbapi/NdbReceiver.cpp
index 0e83be75679..dae825202d9 100644
--- a/storage/ndb/src/ndbapi/NdbReceiver.cpp
+++ b/storage/ndb/src/ndbapi/NdbReceiver.cpp
@@ -121,7 +121,15 @@ NdbReceiver::calculate_batch_size(Uint32 key_size,
* no more than MAX_SCAN_BATCH_SIZE is sent from all nodes in total per
* batch.
*/
- batch_byte_size= max_batch_byte_size;
+ if (batch_size == 0)
+ {
+ batch_byte_size= max_batch_byte_size;
+ }
+ else
+ {
+ batch_byte_size= batch_size * tot_size;
+ }
+
if (batch_byte_size * parallelism > max_scan_batch_size) {
batch_byte_size= max_scan_batch_size / parallelism;
}
diff --git a/storage/ndb/src/ndbapi/NdbScanOperation.cpp b/storage/ndb/src/ndbapi/NdbScanOperation.cpp
index 7379433e1d5..5852570a686 100644
--- a/storage/ndb/src/ndbapi/NdbScanOperation.cpp
+++ b/storage/ndb/src/ndbapi/NdbScanOperation.cpp
@@ -119,7 +119,8 @@ NdbScanOperation::init(const NdbTableImpl* tab, NdbTransaction* myConnection)
int
NdbScanOperation::readTuples(NdbScanOperation::LockMode lm,
Uint32 scan_flags,
- Uint32 parallel)
+ Uint32 parallel,
+ Uint32 batch)
{
m_ordered = m_descending = false;
Uint32 fragCount = m_currentTable->m_fragmentCount;
@@ -191,8 +192,11 @@ NdbScanOperation::readTuples(NdbScanOperation::LockMode lm,
tupScan = false;
}
- theParallelism = parallel;
-
+ if (rangeScan && (scan_flags & SF_OrderBy))
+ parallel = fragCount;
+
+ theParallelism = parallel;
+
if(fix_receivers(parallel) == -1){
setErrorCodeAbort(4000);
return -1;
@@ -211,6 +215,7 @@ NdbScanOperation::readTuples(NdbScanOperation::LockMode lm,
req->tableSchemaVersion = m_accessTable->m_version;
req->storedProcId = 0xFFFF;
req->buddyConPtr = theNdbCon->theBuddyConPtr;
+ req->first_batch_size = batch; // Save user specified batch size
Uint32 reqInfo = 0;
ScanTabReq::setParallelism(reqInfo, parallel);
@@ -768,13 +773,14 @@ int NdbScanOperation::prepareSendScan(Uint32 aTC_ConnectPtr,
* The number of records sent by each LQH is calculated and the kernel
* is informed of this number by updating the SCAN_TABREQ signal
*/
- Uint32 batch_size, batch_byte_size, first_batch_size;
+ ScanTabReq * req = CAST_PTR(ScanTabReq, theSCAN_TABREQ->getDataPtrSend());
+ Uint32 batch_size = req->first_batch_size; // User specified
+ Uint32 batch_byte_size, first_batch_size;
theReceiver.calculate_batch_size(key_size,
theParallelism,
batch_size,
batch_byte_size,
first_batch_size);
- ScanTabReq * req = CAST_PTR(ScanTabReq, theSCAN_TABREQ->getDataPtrSend());
ScanTabReq::setScanBatch(req->requestInfo, batch_size);
req->batch_byte_size= batch_byte_size;
req->first_batch_size= first_batch_size;
@@ -1268,13 +1274,14 @@ NdbIndexScanOperation::getKeyFromSCANTABREQ(Uint32* data, Uint32 size)
int
NdbIndexScanOperation::readTuples(LockMode lm,
Uint32 scan_flags,
- Uint32 parallel)
+ Uint32 parallel,
+ Uint32 batch)
{
const bool order_by = scan_flags & SF_OrderBy;
const bool order_desc = scan_flags & SF_Descending;
const bool read_range_no = scan_flags & SF_ReadRangeNo;
-
- int res = NdbScanOperation::readTuples(lm, scan_flags, 0);
+
+ int res = NdbScanOperation::readTuples(lm, scan_flags, parallel, batch);
if(!res && read_range_no)
{
m_read_range_no = 1;
@@ -1567,13 +1574,68 @@ NdbScanOperation::close_impl(TransporterFacade* tp, bool forceSend,
return -1;
}
+ bool holdLock = false;
+ if (theSCAN_TABREQ)
+ {
+ ScanTabReq * req = CAST_PTR(ScanTabReq, theSCAN_TABREQ->getDataPtrSend());
+ holdLock = ScanTabReq::getHoldLockFlag(req->requestInfo);
+ }
+
+ /**
+ * When using locks, force close of scan directly
+ */
+ if (holdLock && theError.code == 0 &&
+ (m_sent_receivers_count + m_conf_receivers_count + m_api_receivers_count))
+ {
+ NdbApiSignal tSignal(theNdb->theMyRef);
+ tSignal.setSignal(GSN_SCAN_NEXTREQ);
+
+ Uint32* theData = tSignal.getDataPtrSend();
+ Uint64 transId = theNdbCon->theTransactionId;
+ theData[0] = theNdbCon->theTCConPtr;
+ theData[1] = 1;
+ theData[2] = transId;
+ theData[3] = (Uint32) (transId >> 32);
+
+ tSignal.setLength(4);
+ int ret = tp->sendSignal(&tSignal, nodeId);
+ if (ret)
+ {
+ setErrorCode(4008);
+ return -1;
+ }
+
+ /**
+ * If no receiver is outstanding...
+ * set it to 1 as execCLOSE_SCAN_REP resets it
+ */
+ m_sent_receivers_count = m_sent_receivers_count ? m_sent_receivers_count : 1;
+
+ while(theError.code == 0 && (m_sent_receivers_count + m_conf_receivers_count))
+ {
+ int return_code = poll_guard->wait_scan(WAITFOR_SCAN_TIMEOUT, nodeId, forceSend);
+ switch(return_code){
+ case 0:
+ break;
+ case -1:
+ setErrorCode(4008);
+ case -2:
+ m_api_receivers_count = 0;
+ m_conf_receivers_count = 0;
+ m_sent_receivers_count = 0;
+ theNdbCon->theReleaseOnClose = true;
+ return -1;
+ }
+ }
+ return 0;
+ }
+
/**
* Wait for outstanding
*/
while(theError.code == 0 && m_sent_receivers_count)
{
- int return_code= poll_guard->wait_scan(WAITFOR_SCAN_TIMEOUT, nodeId,
- false);
+ int return_code= poll_guard->wait_scan(WAITFOR_SCAN_TIMEOUT, nodeId, forceSend);
switch(return_code){
case 0:
break;
diff --git a/storage/ndb/src/ndbapi/ndberror.c b/storage/ndb/src/ndbapi/ndberror.c
index 4816cda1c74..486d78538f0 100644
--- a/storage/ndb/src/ndbapi/ndberror.c
+++ b/storage/ndb/src/ndbapi/ndberror.c
@@ -420,6 +420,7 @@ ErrorBundle ErrorCodes[] = {
{ 1514, DMEC, SE, "Currently there is a limit of one logfile group" },
{ 773, DMEC, SE, "Out of string memory, please modify StringMemory config parameter" },
+ { 775, DMEC, SE, "Create file is not supported when Diskless=1" },
/**
* FunctionNotImplemented
diff --git a/storage/ndb/test/ndbapi/testScan.cpp b/storage/ndb/test/ndbapi/testScan.cpp
index d8c45985630..097454f69b2 100644
--- a/storage/ndb/test/ndbapi/testScan.cpp
+++ b/storage/ndb/test/ndbapi/testScan.cpp
@@ -1151,70 +1151,76 @@ runScanVariants(NDBT_Context* ctx, NDBT_Step* step)
{
for(int flags = 0; flags < 4; flags++)
{
- for (int par = 0; par < 16; par += 1 + (rand() % 3))
+ for (int batch = 0; batch < 100; batch += (1 + batch + (batch >> 3)))
{
- bool disk = flags & 1;
- bool tups = flags & 2;
- g_info << "lm: " << lm
- << " disk: " << disk
- << " tup scan: " << tups
- << " par: " << par
- << endl;
-
- NdbConnection* pCon = pNdb->startTransaction();
- NdbScanOperation* pOp = pCon->getNdbScanOperation(pTab->getName());
- if (pOp == NULL) {
- ERR(pCon->getNdbError());
- return NDBT_FAILED;
- }
-
- if( pOp->readTuples((NdbOperation::LockMode)lm,
- tups ? NdbScanOperation::SF_TupScan : 0,
- par) != 0)
+ for (int par = 0; par < 16; par += 1 + (rand() % 3))
{
- ERR(pCon->getNdbError());
- return NDBT_FAILED;
- }
-
- int check = pOp->interpret_exit_ok();
- if( check == -1 ) {
- ERR(pCon->getNdbError());
- return NDBT_FAILED;
- }
-
- // Define attributes to read
- bool found_disk = false;
- for(int a = 0; a<pTab->getNoOfColumns(); a++){
- if (pTab->getColumn(a)->getStorageType() == NdbDictionary::Column::StorageTypeDisk)
- {
- found_disk = true;
- if (!disk)
- continue;
+ bool disk = flags & 1;
+ bool tups = flags & 2;
+ g_info << "lm: " << lm
+ << " disk: " << disk
+ << " tup scan: " << tups
+ << " par: " << par
+ << " batch: " << batch
+ << endl;
+
+ NdbConnection* pCon = pNdb->startTransaction();
+ NdbScanOperation* pOp = pCon->getNdbScanOperation(pTab->getName());
+ if (pOp == NULL) {
+ ERR(pCon->getNdbError());
+ return NDBT_FAILED;
}
- if((pOp->getValue(pTab->getColumn(a)->getName())) == 0) {
+ if( pOp->readTuples((NdbOperation::LockMode)lm,
+ tups ? NdbScanOperation::SF_TupScan : 0,
+ par,
+ batch) != 0)
+ {
ERR(pCon->getNdbError());
return NDBT_FAILED;
}
- }
-
- if (! (disk && !found_disk))
- {
- check = pCon->execute(NoCommit);
+
+ int check = pOp->interpret_exit_ok();
if( check == -1 ) {
ERR(pCon->getNdbError());
return NDBT_FAILED;
}
- int res;
- int row = 0;
- while((res = pOp->nextResult()) == 0);
+ // Define attributes to read
+ bool found_disk = false;
+ for(int a = 0; a<pTab->getNoOfColumns(); a++){
+ if (pTab->getColumn(a)->getStorageType() ==
+ NdbDictionary::Column::StorageTypeDisk)
+ {
+ found_disk = true;
+ if (!disk)
+ continue;
+ }
+
+ if((pOp->getValue(pTab->getColumn(a)->getName())) == 0) {
+ ERR(pCon->getNdbError());
+ return NDBT_FAILED;
+ }
+ }
+
+ if (! (disk && !found_disk))
+ {
+ check = pCon->execute(NoCommit);
+ if( check == -1 ) {
+ ERR(pCon->getNdbError());
+ return NDBT_FAILED;
+ }
+
+ int res;
+ int row = 0;
+ while((res = pOp->nextResult()) == 0);
+ }
+ pCon->close();
}
- pCon->close();
}
}
}
-
+
return NDBT_OK;
}
diff --git a/storage/ndb/test/ndbapi/test_event.cpp b/storage/ndb/test/ndbapi/test_event.cpp
index d8939f06b14..d132ec103ee 100644
--- a/storage/ndb/test/ndbapi/test_event.cpp
+++ b/storage/ndb/test/ndbapi/test_event.cpp
@@ -1559,6 +1559,56 @@ static int runCreateDropNR(NDBT_Context* ctx, NDBT_Step* step)
DBUG_RETURN(result);
}
+static
+int
+runSubscribeUnsubscribe(NDBT_Context* ctx, NDBT_Step* step)
+{
+ char buf[1024];
+ const NdbDictionary::Table & tab = * ctx->getTab();
+ sprintf(buf, "%s_EVENT", tab.getName());
+ Ndb* ndb = GETNDB(step);
+ int loops = 5 * ctx->getNumLoops();
+
+ while (--loops)
+ {
+ NdbEventOperation *pOp= ndb->createEventOperation(buf);
+ if (pOp == 0)
+ {
+ g_err << "createEventOperation: "
+ << ndb->getNdbError().code << " "
+ << ndb->getNdbError().message << endl;
+ return NDBT_FAILED;
+ }
+
+ int n_columns= tab.getNoOfColumns();
+ for (int j = 0; j < n_columns; j++)
+ {
+ pOp->getValue(tab.getColumn(j)->getName());
+ pOp->getPreValue(tab.getColumn(j)->getName());
+ }
+ if ( pOp->execute() )
+ {
+ g_err << "pOp->execute(): "
+ << pOp->getNdbError().code << " "
+ << pOp->getNdbError().message << endl;
+
+ ndb->dropEventOperation(pOp);
+
+ return NDBT_FAILED;
+ }
+
+ if (ndb->dropEventOperation(pOp))
+ {
+ g_err << "pOp->execute(): "
+ << ndb->getNdbError().code << " "
+ << ndb->getNdbError().message << endl;
+ return NDBT_FAILED;
+ }
+ }
+
+ return NDBT_OK;
+}
+
NDBT_TESTSUITE(test_event);
TESTCASE("BasicEventOperation",
"Verify that we can listen to Events"
@@ -1673,6 +1723,13 @@ TESTCASE("CreateDropNR",
"NOTE! No errors are allowed!" ){
FINALIZER(runCreateDropNR);
}
+TESTCASE("SubscribeUnsubscribe",
+ "A bunch of threads doing subscribe/unsubscribe in loop"
+ "NOTE! No errors are allowed!" ){
+ INITIALIZER(runCreateEvent);
+ STEPS(runSubscribeUnsubscribe, 16);
+ FINALIZER(runDropEvent);
+}
NDBT_TESTSUITE_END(test_event);
int main(int argc, const char** argv){
diff --git a/storage/ndb/tools/restore/Restore.cpp b/storage/ndb/tools/restore/Restore.cpp
index 7762785ef61..ef535cf9e26 100644
--- a/storage/ndb/tools/restore/Restore.cpp
+++ b/storage/ndb/tools/restore/Restore.cpp
@@ -85,7 +85,12 @@ RestoreMetaData::RestoreMetaData(const char* path, Uint32 nodeId, Uint32 bNo) {
RestoreMetaData::~RestoreMetaData(){
for(Uint32 i= 0; i < allTables.size(); i++)
- delete allTables[i];
+ {
+ TableS *table = allTables[i];
+ for(Uint32 j= 0; j < table->m_fragmentInfo.size(); j++)
+ delete table->m_fragmentInfo[j];
+ delete table;
+ }
allTables.clear();
}
@@ -118,6 +123,9 @@ RestoreMetaData::loadContent()
return 0;
if(!readGCPEntry())
return 0;
+
+ if(!readFragmentInfo())
+ return 0;
return 1;
}
@@ -353,6 +361,52 @@ RestoreMetaData::readGCPEntry() {
return true;
}
+bool
+RestoreMetaData::readFragmentInfo()
+{
+ BackupFormat::CtlFile::FragmentInfo fragInfo;
+ TableS * table = 0;
+ Uint32 tableId = RNIL;
+
+ while (buffer_read(&fragInfo, 4, 2) == 2)
+ {
+ fragInfo.SectionType = ntohl(fragInfo.SectionType);
+ fragInfo.SectionLength = ntohl(fragInfo.SectionLength);
+
+ if (fragInfo.SectionType != BackupFormat::FRAGMENT_INFO)
+ {
+ err << "readFragmentInfo invalid section type: " <<
+ fragInfo.SectionType << endl;
+ return false;
+ }
+
+ if (buffer_read(&fragInfo.TableId, (fragInfo.SectionLength-2)*4, 1) != 1)
+ {
+ err << "readFragmentInfo invalid section length: " <<
+ fragInfo.SectionLength << endl;
+ return false;
+ }
+
+ fragInfo.TableId = ntohl(fragInfo.TableId);
+ if (fragInfo.TableId != tableId)
+ {
+ tableId = fragInfo.TableId;
+ table = getTable(tableId);
+ }
+
+ FragmentInfo * tmp = new FragmentInfo;
+ tmp->fragmentNo = ntohl(fragInfo.FragmentNo);
+ tmp->noOfRecords = ntohl(fragInfo.NoOfRecordsLow) +
+ (((Uint64)ntohl(fragInfo.NoOfRecordsHigh)) << 32);
+ tmp->filePosLow = ntohl(fragInfo.FilePosLow);
+ tmp->filePosHigh = ntohl(fragInfo.FilePosHigh);
+
+ table->m_fragmentInfo.push_back(tmp);
+ table->m_noOfRecords += tmp->noOfRecords;
+ }
+ return true;
+}
+
TableS::TableS(Uint32 version, NdbTableImpl* tableImpl)
: m_dictTable(tableImpl)
{
@@ -360,6 +414,7 @@ TableS::TableS(Uint32 version, NdbTableImpl* tableImpl)
m_noOfNullable = m_nullBitmaskSize = 0;
m_auto_val_id= ~(Uint32)0;
m_max_auto_val= 0;
+ m_noOfRecords= 0;
backupVersion = version;
isSysTable = false;
@@ -1161,4 +1216,5 @@ operator<<(NdbOut& ndbout, const TableS & table){
template class Vector<TableS*>;
template class Vector<AttributeS*>;
template class Vector<AttributeDesc*>;
+template class Vector<FragmentInfo*>;
template class Vector<DictObject>;
diff --git a/storage/ndb/tools/restore/Restore.hpp b/storage/ndb/tools/restore/Restore.hpp
index 8698d0943e2..e824f1bbdc0 100644
--- a/storage/ndb/tools/restore/Restore.hpp
+++ b/storage/ndb/tools/restore/Restore.hpp
@@ -114,6 +114,14 @@ public:
AttributeData * getData(int i) const;
}; // class TupleS
+struct FragmentInfo
+{
+ Uint32 fragmentNo;
+ Uint64 noOfRecords;
+ Uint32 filePosLow;
+ Uint32 filePosHigh;
+};
+
class TableS {
friend class TupleS;
@@ -136,6 +144,9 @@ class TableS {
bool isSysTable;
+ Uint64 m_noOfRecords;
+ Vector<FragmentInfo *> m_fragmentInfo;
+
void createAttr(NdbDictionary::Column *column);
public:
@@ -146,6 +157,9 @@ public:
Uint32 getTableId() const {
return m_dictTable->getTableId();
}
+ Uint32 getNoOfRecords() const {
+ return m_noOfRecords;
+ }
/*
void setMysqlTableName(char * tableName) {
strpcpy(mysqlTableName, tableName);
@@ -286,6 +300,7 @@ class RestoreMetaData : public BackupFile {
bool markSysTables();
bool readGCPEntry();
+ bool readFragmentInfo();
Uint32 readMetaTableList();
Uint32 m_startGCP;
diff --git a/storage/ndb/tools/restore/consumer_restore.cpp b/storage/ndb/tools/restore/consumer_restore.cpp
index 6b0d42ee0d2..b190652232e 100644
--- a/storage/ndb/tools/restore/consumer_restore.cpp
+++ b/storage/ndb/tools/restore/consumer_restore.cpp
@@ -533,9 +533,11 @@ BackupRestore::object(Uint32 type, const void * ptr)
if (!m_no_restore_disk)
{
NdbDictionary::Datafile old(*(NdbDictionary::Datafile*)ptr);
- NdbDictionary::Tablespace * ts = m_tablespaces[old.getTablespaceId()];
+ NdbDictionary::ObjectId objid;
+ old.getTablespaceId(&objid);
+ NdbDictionary::Tablespace * ts = m_tablespaces[objid.getObjectId()];
debug << "Connecting datafile " << old.getPath()
- << " to tablespace: oldid: " << old.getTablespaceId()
+ << " to tablespace: oldid: " << objid.getObjectId()
<< " newid: " << ts->getObjectId() << endl;
old.setTablespace(* ts);
info << "Creating datafile \"" << old.getPath() << "\"..." << flush;
@@ -554,10 +556,11 @@ BackupRestore::object(Uint32 type, const void * ptr)
if (!m_no_restore_disk)
{
NdbDictionary::Undofile old(*(NdbDictionary::Undofile*)ptr);
- NdbDictionary::LogfileGroup * lg =
- m_logfilegroups[old.getLogfileGroupId()];
+ NdbDictionary::ObjectId objid;
+ old.getLogfileGroupId(&objid);
+ NdbDictionary::LogfileGroup * lg = m_logfilegroups[objid.getObjectId()];
debug << "Connecting undofile " << old.getPath()
- << " to logfile group: oldid: " << old.getLogfileGroupId()
+ << " to logfile group: oldid: " << objid.getObjectId()
<< " newid: " << lg->getObjectId()
<< " " << (void*)lg << endl;
old.setLogfileGroup(* lg);
@@ -709,6 +712,16 @@ BackupRestore::table(const TableS & table){
copy.setFragmentData((const void *)ng_array, no_parts << 1);
}
+ /*
+ update min and max rows to reflect the table, this to
+ ensure that memory is allocated properly in the ndb kernel
+ */
+ copy.setMinRows(table.getNoOfRecords());
+ if (table.getNoOfRecords() > copy.getMaxRows())
+ {
+ copy.setMaxRows(table.getNoOfRecords());
+ }
+
if (dict->createTable(copy) == -1)
{
err << "Create table " << table.getTableName() << " failed: "
diff --git a/strings/Makefile.am b/strings/Makefile.am
index 1cba4bab3f0..416442dd564 100644
--- a/strings/Makefile.am
+++ b/strings/Makefile.am
@@ -67,12 +67,6 @@ conf_to_src_LDFLAGS= @NOINST_LDFLAGS@
#strtoull.o: @CHARSET_OBJS@
-if ASSEMBLER
-# On Linux gcc can compile the assembly files
-%.o : %.s
- $(AS) $(ASFLAGS) -o $@ $<
-endif
-
FLAGS=$(DEFS) $(INCLUDES) $(CPPFLAGS) $(CFLAGS) @NOINST_LDFLAGS@
str_test: str_test.c $(pkglib_LIBRARIES)
diff --git a/support-files/mysql.spec.sh b/support-files/mysql.spec.sh
index 501403a0880..72e230da0c2 100644
--- a/support-files/mysql.spec.sh
+++ b/support-files/mysql.spec.sh
@@ -148,6 +148,19 @@ They should be used with caution.
%{see_base}
+%package bench
+Requires: %{name}-client perl-DBI perl
+Summary: MySQL - Benchmarks and test system
+Group: Applications/Databases
+Provides: mysql-bench
+Obsoletes: mysql-bench
+AutoReqProv: no
+
+%description bench
+This package contains MySQL benchmark scripts and data.
+
+%{see_base}
+
%package devel
Summary: MySQL - Development header files and libraries
Group: Applications/Databases