diff options
author | unknown <cmiller@zippy.(none)> | 2006-07-10 13:38:22 -0400 |
---|---|---|
committer | unknown <cmiller@zippy.(none)> | 2006-07-10 13:38:22 -0400 |
commit | d6e3a9ddcb6a877feb65a98953b3ec9e6426be47 (patch) | |
tree | 555b6e52dd857768fcf99821dd6c45b62b80dd30 | |
parent | 9b6a1384114a41beb8b492aed335096ef03c86df (diff) | |
parent | 8154ca618b68784098dcb8b84ba2cf77707c80e4 (diff) | |
download | mariadb-git-d6e3a9ddcb6a877feb65a98953b3ec9e6426be47.tar.gz |
Merge zippy.(none):/home/cmiller/work/mysql/merge/mysql-5.1
into zippy.(none):/home/cmiller/work/mysql/merge/mysql-5.1-new-maint
mysql-test/mysql-test-run.pl:
Auto merged
mysql-test/r/create.result:
Auto merged
mysql-test/r/ps.result:
Auto merged
mysql-test/r/sp.result:
Auto merged
mysql-test/t/create.test:
Auto merged
mysql-test/t/ndb_autodiscover3.test:
Auto merged
mysql-test/t/ps.test:
Auto merged
mysql-test/t/sp.test:
Auto merged
mysql-test/t/wait_timeout.test:
Auto merged
sql/field.cc:
Auto merged
sql/field.h:
Auto merged
137 files changed, 5022 insertions, 910 deletions
diff --git a/include/my_sys.h b/include/my_sys.h index 2dc4053f70d..f2b08e2b372 100644 --- a/include/my_sys.h +++ b/include/my_sys.h @@ -163,7 +163,7 @@ extern gptr my_realloc(gptr oldpoint,uint Size,myf MyFlags); extern void my_no_flags_free(gptr ptr); extern gptr my_memdup(const byte *from,uint length,myf MyFlags); extern char *my_strdup(const char *from,myf MyFlags); -extern char *my_strndup(const byte *from, uint length, +extern char *my_strndup(const char *from, uint length, myf MyFlags); /* we do use FG (as a no-op) in below so that a typo on FG is caught */ #define my_free(PTR,FG) ((void)FG,my_no_flags_free(PTR)) diff --git a/include/sql_common.h b/include/sql_common.h index c07a4a831bb..9fc8d4f457b 100644 --- a/include/sql_common.h +++ b/include/sql_common.h @@ -22,6 +22,7 @@ extern const char *not_error_sqlstate; extern "C" { #endif +extern CHARSET_INFO *default_client_charset_info; MYSQL_FIELD *unpack_fields(MYSQL_DATA *data,MEM_ROOT *alloc,uint fields, my_bool default_value, uint server_capabilities); void free_rows(MYSQL_DATA *cur); diff --git a/libmysqld/lib_sql.cc b/libmysqld/lib_sql.cc index 9e763df8a0a..4995e904bc0 100644 --- a/libmysqld/lib_sql.cc +++ b/libmysqld/lib_sql.cc @@ -37,6 +37,8 @@ extern "C" int check_user(THD *thd, enum enum_server_command command, const char *passwd, uint passwd_len, const char *db, bool check_count); +void thd_init_client_charset(THD *thd, uint cs_number); + C_MODE_START #include <mysql.h> @@ -604,11 +606,14 @@ err: return NULL; } + #ifdef NO_EMBEDDED_ACCESS_CHECKS int check_embedded_connection(MYSQL *mysql) { int result; THD *thd= (THD*)mysql->thd; + thd_init_client_charset(thd, mysql->charset->number); + thd->update_charset(); Security_context *sctx= thd->security_ctx; sctx->host_or_ip= sctx->host= (char*) my_localhost; strmake(sctx->priv_host, (char*) my_localhost, MAX_HOSTNAME-1); @@ -627,6 +632,8 @@ int check_embedded_connection(MYSQL *mysql) char scramble_buff[SCRAMBLE_LENGTH]; int passwd_len; + thd_init_client_charset(thd, mysql->charset->number); + thd->update_charset(); if (mysql->options.client_ip) { sctx->host= my_strdup(mysql->options.client_ip, MYF(0)); diff --git a/libmysqld/libmysqld.c b/libmysqld/libmysqld.c index cad1bd4c47b..5df61783451 100644 --- a/libmysqld/libmysqld.c +++ b/libmysqld/libmysqld.c @@ -90,49 +90,7 @@ static void end_server(MYSQL *mysql) } -static int mysql_init_charset(MYSQL *mysql) -{ - char charset_name_buff[16], *charset_name; - - if ((charset_name=mysql->options.charset_name)) - { - const char *save=charsets_dir; - if (mysql->options.charset_dir) - charsets_dir=mysql->options.charset_dir; - mysql->charset=get_charset_by_name(mysql->options.charset_name, - MYF(MY_WME)); - charsets_dir=save; - } - else if (mysql->server_language) - { - charset_name=charset_name_buff; - sprintf(charset_name,"%d",mysql->server_language); /* In case of errors */ - mysql->charset=get_charset((uint8) mysql->server_language, MYF(MY_WME)); - } - else - mysql->charset=default_charset_info; - - if (!mysql->charset) - { - mysql->net.last_errno=CR_CANT_READ_CHARSET; - strmov(mysql->net.sqlstate, "HY0000"); - if (mysql->options.charset_dir) - sprintf(mysql->net.last_error,ER(mysql->net.last_errno), - charset_name ? charset_name : "unknown", - mysql->options.charset_dir); - else - { - char cs_dir_name[FN_REFLEN]; - get_charsets_dir(cs_dir_name); - sprintf(mysql->net.last_error,ER(mysql->net.last_errno), - charset_name ? charset_name : "unknown", - cs_dir_name); - } - return mysql->net.last_errno; - } - return 0; -} - +int mysql_init_character_set(MYSQL *mysql); MYSQL * STDCALL mysql_real_connect(MYSQL *mysql,const char *host, const char *user, @@ -222,10 +180,10 @@ mysql_real_connect(MYSQL *mysql,const char *host, const char *user, init_embedded_mysql(mysql, client_flag, db_name); - if (check_embedded_connection(mysql)) + if (mysql_init_character_set(mysql)) goto error; - if (mysql_init_charset(mysql)) + if (check_embedded_connection(mysql)) goto error; mysql->server_status= SERVER_STATUS_AUTOCOMMIT; diff --git a/mysql-test/include/ndb_default_cluster.inc b/mysql-test/include/ndb_default_cluster.inc index 2f900b6a0b4..de7eda3c596 100644 --- a/mysql-test/include/ndb_default_cluster.inc +++ b/mysql-test/include/ndb_default_cluster.inc @@ -1,4 +1,4 @@ -- require r/ndb_default_cluster.require disable_query_log; -show status like "Ndb_connected_host"; +show status like "Ndb_config_from_host"; enable_query_log; diff --git a/mysql-test/mysql-test-run.pl b/mysql-test/mysql-test-run.pl index 43cab03f12a..cce8decc271 100755 --- a/mysql-test/mysql-test-run.pl +++ b/mysql-test/mysql-test-run.pl @@ -552,6 +552,11 @@ sub command_line_setup () { "($opt_master_myport - $opt_master_myport + 10)"); } + # This is needed for test log evaluation in "gen-build-status-page" + # in all cases where the calling tool does not log the commands + # directly before it executes them, like "make test-force-pl" in RPM builds. + print "Logging: $0 ", join(" ", @ARGV), "\n"; + # Read the command line # Note: Keep list, and the order, in sync with usage at end of this file diff --git a/mysql-test/r/archive.result b/mysql-test/r/archive.result index cacf4aaf304..1dfec8ff713 100644 --- a/mysql-test/r/archive.result +++ b/mysql-test/r/archive.result @@ -13812,6 +13812,8 @@ select * from t1 where i between 2 and 4 and v in ('def','3r4f','lmn'); i v 4 3r4f alter table t1 data directory="$MYSQLTEST_VARDIR/tmp"; +Warnings: +Warning 0 DATA DIRECTORY option ignored select * from t1; i v 1 def diff --git a/mysql-test/r/create.result b/mysql-test/r/create.result index c3710865b15..b9d97751e0d 100644 --- a/mysql-test/r/create.result +++ b/mysql-test/r/create.result @@ -620,7 +620,7 @@ create database mysqltest; use mysqltest; drop database mysqltest; create table test.t1 like x; -ERROR 42000: Incorrect database name 'NULL' +ERROR 3D000: No database selected drop table if exists test.t1; create database mysqltest; use mysqltest; diff --git a/mysql-test/r/events_logs_tests.result b/mysql-test/r/events_logs_tests.result index 9202d63fd2c..950090399d5 100644 --- a/mysql-test/r/events_logs_tests.result +++ b/mysql-test/r/events_logs_tests.result @@ -49,8 +49,8 @@ USER_HOST SLEEPVAL events_test SELECT SLEEP(2) SET SESSION long_query_time=300; "Make it quite long" TRUNCATE mysql.slow_log; -SET SESSION long_query_time=1; CREATE TABLE slow_event_test (slo_val tinyint, val tinyint); +SET SESSION long_query_time=1; "This won't go to the slow log" CREATE EVENT long_event ON SCHEDULE EVERY 1 MINUTE DO INSERT INTO slow_event_test SELECT @@long_query_time, SLEEP(3); SELECT * FROM slow_event_test; diff --git a/mysql-test/r/func_sapdb.result b/mysql-test/r/func_sapdb.result index 0be9ea9cf86..7e9bba9710c 100644 --- a/mysql-test/r/func_sapdb.result +++ b/mysql-test/r/func_sapdb.result @@ -81,6 +81,12 @@ makedate(1997,1) select makedate(1997,0); makedate(1997,0) NULL +select makedate(9999,365); +makedate(9999,365) +9999-12-31 +select makedate(9999,366); +makedate(9999,366) +NULL select addtime("1997-12-31 23:59:59.999999", "1 1:1:1.000002"); addtime("1997-12-31 23:59:59.999999", "1 1:1:1.000002") 1998-01-02 01:01:01.000001 diff --git a/mysql-test/r/func_time.result b/mysql-test/r/func_time.result index 34f4f5fb98d..5760ccdaa95 100644 --- a/mysql-test/r/func_time.result +++ b/mysql-test/r/func_time.result @@ -361,6 +361,12 @@ extract(SECOND FROM "1999-01-02 10:11:12") select extract(MONTH FROM "2001-02-00"); extract(MONTH FROM "2001-02-00") 2 +SELECT DATE_SUB(str_to_date('9999-12-31 00:01:00','%Y-%m-%d %H:%i:%s'), INTERVAL 1 MINUTE); +DATE_SUB(str_to_date('9999-12-31 00:01:00','%Y-%m-%d %H:%i:%s'), INTERVAL 1 MINUTE) +9999-12-31 00:00:00 +SELECT DATE_ADD(str_to_date('9999-12-30 23:59:00','%Y-%m-%d %H:%i:%s'), INTERVAL 1 MINUTE); +DATE_ADD(str_to_date('9999-12-30 23:59:00','%Y-%m-%d %H:%i:%s'), INTERVAL 1 MINUTE) +9999-12-31 00:00:00 SELECT EXTRACT(QUARTER FROM '2004-01-15') AS quarter; quarter 1 diff --git a/mysql-test/r/gis-rtree.result b/mysql-test/r/gis-rtree.result index cd6a2510001..e2e7a612dec 100644 --- a/mysql-test/r/gis-rtree.result +++ b/mysql-test/r/gis-rtree.result @@ -816,3 +816,43 @@ check table t1 extended; Table Op Msg_type Msg_text test.t1 check status OK drop table t1; +CREATE TABLE t1 ( +c1 geometry NOT NULL default '', +SPATIAL KEY i1 (c1(32)) +) ENGINE=MyISAM DEFAULT CHARSET=latin1; +INSERT INTO t1 (c1) VALUES ( +PolygonFromText('POLYGON((-18.6086111000 -66.9327777000, + -18.6055555000 -66.8158332999, + -18.7186111000 -66.8102777000, + -18.7211111000 -66.9269443999, + -18.6086111000 -66.9327777000))')); +CHECK TABLE t1 EXTENDED; +Table Op Msg_type Msg_text +test.t1 check status OK +DROP TABLE t1; +CREATE TABLE t1 ( +c1 geometry NOT NULL default '', +SPATIAL KEY i1 (c1(32)) +) ENGINE=MyISAM DEFAULT CHARSET=latin1; +INSERT INTO t1 (c1) VALUES ( +PolygonFromText('POLYGON((-18.6086111000 -66.9327777000, + -18.6055555000 -66.8158332999, + -18.7186111000 -66.8102777000, + -18.7211111000 -66.9269443999, + -18.6086111000 -66.9327777000))')); +INSERT INTO t1 (c1) VALUES ( +PolygonFromText('POLYGON((-65.7402776999 -96.6686111000, + -65.7372222000 -96.5516666000, + -65.8502777000 -96.5461111000, + -65.8527777000 -96.6627777000, + -65.7402776999 -96.6686111000))')); +INSERT INTO t1 (c1) VALUES ( +PolygonFromText('POLYGON((-18.6086111000 -66.9327777000, + -18.6055555000 -66.8158332999, + -18.7186111000 -66.8102777000, + -18.7211111000 -66.9269443999, + -18.6086111000 -66.9327777000))')); +CHECK TABLE t1 EXTENDED; +Table Op Msg_type Msg_text +test.t1 check status OK +DROP TABLE t1; diff --git a/mysql-test/r/gis.result b/mysql-test/r/gis.result index 13e2d56d83e..7a0f689df36 100644 --- a/mysql-test/r/gis.result +++ b/mysql-test/r/gis.result @@ -694,3 +694,13 @@ alter table t1 add primary key pti(pt); ERROR 42000: BLOB/TEXT column 'pt' used in key specification without a key length alter table t1 add primary key pti(pt(20)); drop table t1; +create table t1 (g GEOMETRY); +select * from t1; +Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr +def test t1 t1 g g 255 4294967295 0 Y 144 0 63 +g +select asbinary(g) from t1; +Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr +def asbinary(g) 252 8192 0 Y 128 0 63 +asbinary(g) +drop table t1; diff --git a/mysql-test/r/lock_multi.result b/mysql-test/r/lock_multi.result index f8cf539bd02..8ff02d898a3 100644 --- a/mysql-test/r/lock_multi.result +++ b/mysql-test/r/lock_multi.result @@ -66,6 +66,21 @@ Select_priv N use test; use test; +CREATE TABLE t1 (c1 int); +LOCK TABLE t1 WRITE; + FLUSH TABLES WITH READ LOCK; +CREATE TABLE t2 (c1 int); +UNLOCK TABLES; +UNLOCK TABLES; +DROP TABLE t1, t2; +CREATE TABLE t1 (c1 int); +LOCK TABLE t1 WRITE; + FLUSH TABLES WITH READ LOCK; +CREATE TABLE t2 AS SELECT * FROM t1; +ERROR HY000: Table 't2' was not locked with LOCK TABLES +UNLOCK TABLES; +UNLOCK TABLES; +DROP TABLE t1; CREATE DATABASE mysqltest_1; FLUSH TABLES WITH READ LOCK; DROP DATABASE mysqltest_1; @@ -80,19 +95,3 @@ lock tables t1 write; alter table t1 auto_increment=0; alter table t1 auto_increment=0; alter table t1 auto_increment=0; alter table t1 auto_increment=0; alter table t1 auto_increment=0; // unlock tables; drop table t1; -use mysql; -LOCK TABLES columns_priv WRITE, db WRITE, host WRITE, user WRITE; -FLUSH TABLES; -use mysql; - SELECT user.Select_priv FROM user, db WHERE user.user = db.user LIMIT 1; -OPTIMIZE TABLES columns_priv, db, host, user; -Table Op Msg_type Msg_text -mysql.columns_priv optimize status OK -mysql.db optimize status OK -mysql.host optimize status OK -mysql.user optimize status OK -UNLOCK TABLES; -Select_priv -N -use test; -use test; diff --git a/mysql-test/r/log_state.result b/mysql-test/r/log_state.result index 43735243787..0547c5a5bbf 100644 --- a/mysql-test/r/log_state.result +++ b/mysql-test/r/log_state.result @@ -102,7 +102,7 @@ show variables like 'general_log_file'; Variable_name Value general_log_file # set global general_log= OFF; -set global general_log_file='/tmp/log.master'; +set global general_log_file='MYSQLTEST_VARDIR/tmp/log.master'; set global general_log= ON; create table t1(f1 int); drop table t1; diff --git a/mysql-test/r/ndb_dd_advance.result b/mysql-test/r/ndb_dd_advance.result new file mode 100644 index 00000000000..09fe75805d5 --- /dev/null +++ b/mysql-test/r/ndb_dd_advance.result @@ -0,0 +1,1088 @@ +DROP TABLE IF EXISTS test.t1; +DROP TABLE IF EXISTS test.t2; +**** Test Setup Section **** +CREATE LOGFILE GROUP log_group1 +ADD UNDOFILE './log_group1/undofile.dat' +INITIAL_SIZE 16M +UNDO_BUFFER_SIZE = 1M +ENGINE=NDB; +CREATE TABLESPACE table_space1 +ADD DATAFILE './table_space1/datafile.dat' +USE LOGFILE GROUP log_group1 +INITIAL_SIZE 12M +ENGINE NDB; +CREATE TABLE test.t1 +(pk1 INT NOT NULL PRIMARY KEY, b INT NOT NULL, c INT NOT NULL) +TABLESPACE table_space1 STORAGE DISK +ENGINE=NDB; +CREATE TABLE test.t2 +(pk2 INT NOT NULL PRIMARY KEY, b2 INT NOT NULL, c2 INT NOT NULL) +ENGINE=NDB; + +**** Data load for first test **** +INSERT INTO test.t1 VALUES +(1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5), +(6,6,6),(7,7,7),(8,8,8),(9,9,9),(10,10,10), +(11,11,11),(12,12,12),(13,13,13),(14,14,14),(15,15,15), +(16,16,16),(17,17,17),(18,18,18),(19,19,19),(20,20,20), +(21,21,21),(22,22,22),(23,23,23),(24,24,24),(25,25,25), +(26,26,26),(27,27,27),(28,28,28),(29,29,29),(30,30,30), +(31,31,31),(32,32,32),(33,33,33),(34,34,34),(35,35,35), +(36,36,36),(37,37,37),(38,38,38),(39,39,39),(40,40,40), +(41,41,41),(42,42,42),(43,43,43),(44,44,44),(45,45,45), +(46,46,46),(47,47,47),(48,48,48),(49,49,49),(50,50,50), +(51,51,51),(52,52,52),(53,53,53),(54,54,54),(55,55,55), +(56,56,56),(57,57,57),(58,58,58),(59,59,59),(60,60,60), +(61,61,61),(62,62,62),(63,63,63),(64,64,64),(65,65,65), +(66,66,66),(67,67,67),(68,68,68),(69,69,69),(70,70,70), +(71,71,71),(72,72,72),(73,73,73),(74,74,74),(75,75,75); +INSERT INTO test.t2 VALUES +(1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5), +(6,6,6),(7,7,7),(8,8,8),(9,9,9),(10,10,10), +(11,11,11),(12,12,12),(13,13,13),(14,14,14),(15,15,15), +(16,16,16),(17,17,17),(18,18,18),(19,19,19),(20,20,20), +(21,21,21),(22,22,22),(23,23,23),(24,24,24),(25,25,25), +(26,26,26),(27,27,27),(28,28,28),(29,29,29),(30,30,30), +(31,31,31),(32,32,32),(33,33,33),(34,34,34),(35,35,35), +(36,36,36),(37,37,37),(38,38,38),(39,39,39),(40,40,40), +(41,41,41),(42,42,42),(43,43,43),(44,44,44),(45,45,45), +(46,46,46),(47,47,47),(48,48,48),(49,49,49),(50,50,50), +(51,51,51),(52,52,52),(53,53,53),(54,54,54),(55,55,55), +(56,56,56),(57,57,57),(58,58,58),(59,59,59),(60,60,60), +(61,61,61),(62,62,62),(63,63,63),(64,64,64),(65,65,65), +(66,66,66),(67,67,67),(68,68,68),(69,69,69),(70,70,70), +(71,71,71),(72,72,72),(73,73,73),(74,74,74),(75,75,75); + +*** Test 1 Section Begins *** +SELECT COUNT(*) FROM test.t2 LEFT JOIN test.t1 ON pk2=pk1 WHERE b2 IN (4); +COUNT(*) +1 +SELECT * FROM test.t2 LEFT JOIN test.t1 ON pk2=pk1 WHERE b2 IN (4); +pk2 b2 c2 pk1 b c +4 4 4 4 4 4 +SELECT COUNT(*) FROM test.t1 LEFT JOIN test.t2 ON pk1=pk2 WHERE b IN (4); +COUNT(*) +1 +SELECT COUNT(*) FROM test.t1 LEFT JOIN test.t2 ON b=b2 WHERE pk1 IN (75); +COUNT(*) +1 +SELECT b, c FROM test.t1 LEFT JOIN test.t2 ON pk1=pk2 ORDER BY b; +b c +1 1 +2 2 +3 3 +4 4 +5 5 +6 6 +7 7 +8 8 +9 9 +10 10 +11 11 +12 12 +13 13 +14 14 +15 15 +16 16 +17 17 +18 18 +19 19 +20 20 +21 21 +22 22 +23 23 +24 24 +25 25 +26 26 +27 27 +28 28 +29 29 +30 30 +31 31 +32 32 +33 33 +34 34 +35 35 +36 36 +37 37 +38 38 +39 39 +40 40 +41 41 +42 42 +43 43 +44 44 +45 45 +46 46 +47 47 +48 48 +49 49 +50 50 +51 51 +52 52 +53 53 +54 54 +55 55 +56 56 +57 57 +58 58 +59 59 +60 60 +61 61 +62 62 +63 63 +64 64 +65 65 +66 66 +67 67 +68 68 +69 69 +70 70 +71 71 +72 72 +73 73 +74 74 +75 75 + +*** Setup for test 2 **** +DELETE FROM test.t1; +INSERT INTO test.t1 VALUES +(1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5), +(6,6,6),(7,7,7),(8,8,8),(9,9,9),(10,10,10), +(11,11,11),(12,12,12),(13,13,13),(14,14,14),(15,15,15), +(16,16,16),(17,17,17),(18,18,18),(19,19,19),(20,20,20), +(21,21,21),(22,22,22),(23,23,23),(24,24,24),(25,25,25), +(26,26,26),(27,27,27),(28,28,28),(29,29,29),(30,30,30), +(31,31,31),(32,32,32),(33,33,33),(34,34,34),(35,35,35), +(36,36,36),(37,37,37),(38,38,38),(39,39,39),(40,40,40), +(41,41,41),(42,42,42),(43,43,43),(44,44,44),(45,45,45); + +**** Test Section 2 **** +SELECT b, c FROM test.t1 LEFT JOIN test.t2 ON pk1=pk2 ORDER BY b; +b c +1 1 +2 2 +3 3 +4 4 +5 5 +6 6 +7 7 +8 8 +9 9 +10 10 +11 11 +12 12 +13 13 +14 14 +15 15 +16 16 +17 17 +18 18 +19 19 +20 20 +21 21 +22 22 +23 23 +24 24 +25 25 +26 26 +27 27 +28 28 +29 29 +30 30 +31 31 +32 32 +33 33 +34 34 +35 35 +36 36 +37 37 +38 38 +39 39 +40 40 +41 41 +42 42 +43 43 +44 44 +45 45 +SELECT COUNT(*) FROM test.t1 LEFT JOIN test.t2 ON b=b2; +COUNT(*) +45 +SELECT COUNT(*) FROM test.t1 RIGHT JOIN test.t2 ON b=b2; +COUNT(*) +75 +SHOW CREATE TABLE test.t2; +Table Create Table +t2 CREATE TABLE `t2` ( + `pk2` int(11) NOT NULL, + `b2` int(11) NOT NULL, + `c2` int(11) NOT NULL, + PRIMARY KEY (`pk2`) +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 +SHOW CREATE TABLE test.t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `pk1` int(11) NOT NULL, + `b` int(11) NOT NULL, + `c` int(11) NOT NULL, + PRIMARY KEY (`pk1`) +) TABLESPACE table_space1 STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 +ALTER TABLE test.t2 TABLESPACE table_space1 STORAGE DISK +ENGINE=NDB; +SHOW CREATE TABLE test.t2; +Table Create Table +t2 CREATE TABLE `t2` ( + `pk2` int(11) NOT NULL, + `b2` int(11) NOT NULL, + `c2` int(11) NOT NULL, + PRIMARY KEY (`pk2`) +) TABLESPACE table_space1 STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 +ALTER TABLE test.t1 ENGINE=NDBCLUSTER; +SHOW CREATE TABLE test.t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `pk1` int(11) NOT NULL, + `b` int(11) NOT NULL, + `c` int(11) NOT NULL, + PRIMARY KEY (`pk1`) +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 + +DROP TABLE test.t1; +DROP TABLE test.t2; +*** Setup for Test Section 3 *** +CREATE TABLE test.t1 ( +usr_id INT unsigned NOT NULL, +uniq_id INT unsigned NOT NULL AUTO_INCREMENT, +start_num INT unsigned NOT NULL DEFAULT 1, +increment INT unsigned NOT NULL DEFAULT 1, +PRIMARY KEY (uniq_id), +INDEX usr_uniq_idx (usr_id, uniq_id), +INDEX uniq_usr_idx (uniq_id, usr_id)) +TABLESPACE table_space1 STORAGE DISK +ENGINE=NDB; +CREATE TABLE test.t2 ( +id INT unsigned NOT NULL DEFAULT 0, +usr2_id INT unsigned NOT NULL DEFAULT 0, +max INT unsigned NOT NULL DEFAULT 0, +c_amount INT unsigned NOT NULL DEFAULT 0, +d_max INT unsigned NOT NULL DEFAULT 0, +d_num INT unsigned NOT NULL DEFAULT 0, +orig_time INT unsigned NOT NULL DEFAULT 0, +c_time INT unsigned NOT NULL DEFAULT 0, +active ENUM ("no","yes") NOT NULL, +PRIMARY KEY (id,usr2_id), +INDEX id_idx (id), +INDEX usr2_idx (usr2_id)) +ENGINE=NDB; +INSERT INTO test.t1 VALUES (3,NULL,0,50),(3,NULL,0,200),(3,NULL,0,25),(3,NULL,0,84676),(3,NULL,0,235),(3,NULL,0,10),(3,NULL,0,3098),(3,NULL,0,2947),(3,NULL,0,8987),(3,NULL,0,8347654),(3,NULL,0,20398),(3,NULL,0,8976),(3,NULL,0,500),(3,NULL,0,198); + +**** Test Section 3 **** +SELECT test.t1.usr_id,test.t1.uniq_id,test.t1.increment, +test.t2.usr2_id,test.t2.c_amount,test.t2.max +FROM test.t1 +LEFT JOIN test.t2 ON test.t2.id = test.t1.uniq_id +WHERE test.t1.uniq_id = 4 +ORDER BY test.t2.c_amount; +usr_id uniq_id increment usr2_id c_amount max +3 4 84676 NULL NULL NULL +INSERT INTO test.t2 VALUES (2,3,3000,6000,0,0,746584,837484,'yes'); +INSERT INTO test.t2 VALUES (4,3,3000,6000,0,0,746584,837484,'yes'); +INSERT INTO test.t2 VALUES (7,3,1000,2000,0,0,746294,937484,'yes'); +SELECT test.t1.usr_id,test.t1.uniq_id,test.t1.increment, +test.t2.usr2_id,test.t2.c_amount,test.t2.max +FROM test.t1 +LEFT JOIN test.t2 ON test.t2.id = test.t1.uniq_id +WHERE test.t1.uniq_id = 4 +ORDER BY test.t2.c_amount; +usr_id uniq_id increment usr2_id c_amount max +3 4 84676 3 6000 3000 + +DROP TABLE test.t1; +DROP TABLE test.t2; +ALTER TABLESPACE table_space1 +DROP DATAFILE './table_space1/datafile.dat' +ENGINE = NDB; +DROP TABLESPACE table_space1 +ENGINE = NDB; +DROP LOGFILE GROUP log_group1 +ENGINE =NDB; +CREATE LOGFILE GROUP lg +ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M +UNDO_BUFFER_SIZE = 1M +ENGINE=NDB; +CREATE TABLESPACE ts1 +ADD DATAFILE './table_space1/datafile.dat' + USE LOGFILE GROUP lg +INITIAL_SIZE 12M +ENGINE NDB; +CREATE TABLESPACE ts2 +ADD DATAFILE './table_space2/datafile.dat' + USE LOGFILE GROUP lg +INITIAL_SIZE 12M +ENGINE NDB; +CREATE TABLE t1 (a int NOT NULL PRIMARY KEY, b int) +TABLESPACE ts1 STORAGE DISK ENGINE=NDB; +CREATE TABLE t2 (a int NOT NULL PRIMARY KEY, b int) +ENGINE=NDB; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL, + `b` int(11) DEFAULT NULL, + PRIMARY KEY (`a`) +) TABLESPACE ts1 STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 +SHOW CREATE TABLE t2; +Table Create Table +t2 CREATE TABLE `t2` ( + `a` int(11) NOT NULL, + `b` int(11) DEFAULT NULL, + PRIMARY KEY (`a`) +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 +INSERT INTO t1 VALUES (1,1); +INSERT INTO t1 VALUES (2,2); +SELECT * FROM t1 order by a; +a b +1 1 +2 2 +INSERT INTO t2(a,b) SELECT * FROM t1; +SELECT * FROM t2 order by a; +a b +1 1 +2 2 +TRUNCATE t1; +TRUNCATE t2; +INSERT INTO t2 VALUES (3,3); +INSERT INTO t2 VALUES (4,4); +INSERT INTO t1(a,b) SELECT * FROM t2; +SELECT * FROM t1 order by a; +a b +3 3 +4 4 +DROP TABLE t1, t2; +ALTER TABLESPACE ts1 +DROP DATAFILE './table_space1/datafile.dat' + ENGINE NDB; +DROP TABLESPACE ts1 ENGINE NDB; +ALTER TABLESPACE ts2 +DROP DATAFILE './table_space2/datafile.dat' + ENGINE NDB; +DROP TABLESPACE ts2 ENGINE NDB; +DROP LOGFILE GROUP lg +ENGINE=NDB; +CREATE LOGFILE GROUP lg +ADD UNDOFILE './undofile.dat' + INITIAL_SIZE 16M +UNDO_BUFFER_SIZE = 1M +ENGINE=NDB; +CREATE TABLESPACE ts +ADD DATAFILE './datafile.dat' + USE LOGFILE GROUP lg +INITIAL_SIZE 12M +ENGINE NDB; +CREATE TABLE test.t ( +a smallint NOT NULL, +b int NOT NULL, +c bigint NOT NULL, +d char(10), +e TEXT, +f VARCHAR(255), +PRIMARY KEY(a) +) TABLESPACE ts STORAGE DISK ENGINE=NDB; +ALTER TABLE test.t ADD INDEX (d), ADD INDEX (f); +SHOW CREATE TABLE test.t; +Table Create Table +t CREATE TABLE `t` ( + `a` smallint(6) NOT NULL, + `b` int(11) NOT NULL, + `c` bigint(20) NOT NULL, + `d` char(10) DEFAULT NULL, + `e` text, + `f` varchar(255) DEFAULT NULL, + PRIMARY KEY (`a`), + KEY `d` (`d`), + KEY `f` (`f`) +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 +SELECT * FROM test.t order by a; +a b c d e f +1 2 3 aaa1 bbb1 ccccc1 +2 3 4 aaa2 bbb2 ccccc2 +3 4 5 aaa3 bbb3 ccccc3 +4 5 6 aaa4 bbb4 ccccc4 +5 6 7 aaa5 bbb5 ccccc5 +6 7 8 aaa6 bbb6 ccccc6 +7 8 9 aaa7 bbb7 ccccc7 +8 9 10 aaa8 bbb8 ccccc8 +9 10 11 aaa9 bbb9 ccccc9 +10 11 12 aaa10 bbb10 ccccc10 +11 12 13 aaa11 bbb11 ccccc11 +12 13 14 aaa12 bbb12 ccccc12 +13 14 15 aaa13 bbb13 ccccc13 +14 15 16 aaa14 bbb14 ccccc14 +15 16 17 aaa15 bbb15 ccccc15 +16 17 18 aaa16 bbb16 ccccc16 +17 18 19 aaa17 bbb17 ccccc17 +18 19 20 aaa18 bbb18 ccccc18 +19 20 21 aaa19 bbb19 ccccc19 +20 21 22 aaa20 bbb20 ccccc20 +21 22 23 aaa21 bbb21 ccccc21 +22 23 24 aaa22 bbb22 ccccc22 +23 24 25 aaa23 bbb23 ccccc23 +24 25 26 aaa24 bbb24 ccccc24 +25 26 27 aaa25 bbb25 ccccc25 +26 27 28 aaa26 bbb26 ccccc26 +27 28 29 aaa27 bbb27 ccccc27 +28 29 30 aaa28 bbb28 ccccc28 +29 30 31 aaa29 bbb29 ccccc29 +30 31 32 aaa30 bbb30 ccccc30 +31 32 33 aaa31 bbb31 ccccc31 +32 33 34 aaa32 bbb32 ccccc32 +33 34 35 aaa33 bbb33 ccccc33 +34 35 36 aaa34 bbb34 ccccc34 +35 36 37 aaa35 bbb35 ccccc35 +36 37 38 aaa36 bbb36 ccccc36 +37 38 39 aaa37 bbb37 ccccc37 +38 39 40 aaa38 bbb38 ccccc38 +39 40 41 aaa39 bbb39 ccccc39 +40 41 42 aaa40 bbb40 ccccc40 +41 42 43 aaa41 bbb41 ccccc41 +42 43 44 aaa42 bbb42 ccccc42 +43 44 45 aaa43 bbb43 ccccc43 +44 45 46 aaa44 bbb44 ccccc44 +45 46 47 aaa45 bbb45 ccccc45 +46 47 48 aaa46 bbb46 ccccc46 +47 48 49 aaa47 bbb47 ccccc47 +48 49 50 aaa48 bbb48 ccccc48 +49 50 51 aaa49 bbb49 ccccc49 +50 51 52 aaa50 bbb50 ccccc50 +51 52 53 aaa51 bbb51 ccccc51 +52 53 54 aaa52 bbb52 ccccc52 +53 54 55 aaa53 bbb53 ccccc53 +54 55 56 aaa54 bbb54 ccccc54 +55 56 57 aaa55 bbb55 ccccc55 +56 57 58 aaa56 bbb56 ccccc56 +57 58 59 aaa57 bbb57 ccccc57 +58 59 60 aaa58 bbb58 ccccc58 +59 60 61 aaa59 bbb59 ccccc59 +60 61 62 aaa60 bbb60 ccccc60 +61 62 63 aaa61 bbb61 ccccc61 +62 63 64 aaa62 bbb62 ccccc62 +63 64 65 aaa63 bbb63 ccccc63 +64 65 66 aaa64 bbb64 ccccc64 +65 66 67 aaa65 bbb65 ccccc65 +66 67 68 aaa66 bbb66 ccccc66 +67 68 69 aaa67 bbb67 ccccc67 +68 69 70 aaa68 bbb68 ccccc68 +69 70 71 aaa69 bbb69 ccccc69 +70 71 72 aaa70 bbb70 ccccc70 +71 72 73 aaa71 bbb71 ccccc71 +72 73 74 aaa72 bbb72 ccccc72 +73 74 75 aaa73 bbb73 ccccc73 +74 75 76 aaa74 bbb74 ccccc74 +75 76 77 aaa75 bbb75 ccccc75 +76 77 78 aaa76 bbb76 ccccc76 +77 78 79 aaa77 bbb77 ccccc77 +78 79 80 aaa78 bbb78 ccccc78 +79 80 81 aaa79 bbb79 ccccc79 +80 81 82 aaa80 bbb80 ccccc80 +81 82 83 aaa81 bbb81 ccccc81 +82 83 84 aaa82 bbb82 ccccc82 +83 84 85 aaa83 bbb83 ccccc83 +84 85 86 aaa84 bbb84 ccccc84 +85 86 87 aaa85 bbb85 ccccc85 +86 87 88 aaa86 bbb86 ccccc86 +87 88 89 aaa87 bbb87 ccccc87 +88 89 90 aaa88 bbb88 ccccc88 +89 90 91 aaa89 bbb89 ccccc89 +90 91 92 aaa90 bbb90 ccccc90 +91 92 93 aaa91 bbb91 ccccc91 +92 93 94 aaa92 bbb92 ccccc92 +93 94 95 aaa93 bbb93 ccccc93 +94 95 96 aaa94 bbb94 ccccc94 +95 96 97 aaa95 bbb95 ccccc95 +96 97 98 aaa96 bbb96 ccccc96 +97 98 99 aaa97 bbb97 ccccc97 +98 99 100 aaa98 bbb98 ccccc98 +99 100 101 aaa99 bbb99 ccccc99 +100 101 102 aaa100 bbb100 ccccc100 +DROP TABLE test.t; +USE test; +show tables; +Tables_in_test +t +SELECT * FROM test.t order by a; +a b c d e f +1 2 3 aaa1 bbb1 ccccc1 +2 3 4 aaa2 bbb2 ccccc2 +3 4 5 aaa3 bbb3 ccccc3 +4 5 6 aaa4 bbb4 ccccc4 +5 6 7 aaa5 bbb5 ccccc5 +6 7 8 aaa6 bbb6 ccccc6 +7 8 9 aaa7 bbb7 ccccc7 +8 9 10 aaa8 bbb8 ccccc8 +9 10 11 aaa9 bbb9 ccccc9 +10 11 12 aaa10 bbb10 ccccc10 +11 12 13 aaa11 bbb11 ccccc11 +12 13 14 aaa12 bbb12 ccccc12 +13 14 15 aaa13 bbb13 ccccc13 +14 15 16 aaa14 bbb14 ccccc14 +15 16 17 aaa15 bbb15 ccccc15 +16 17 18 aaa16 bbb16 ccccc16 +17 18 19 aaa17 bbb17 ccccc17 +18 19 20 aaa18 bbb18 ccccc18 +19 20 21 aaa19 bbb19 ccccc19 +20 21 22 aaa20 bbb20 ccccc20 +21 22 23 aaa21 bbb21 ccccc21 +22 23 24 aaa22 bbb22 ccccc22 +23 24 25 aaa23 bbb23 ccccc23 +24 25 26 aaa24 bbb24 ccccc24 +25 26 27 aaa25 bbb25 ccccc25 +26 27 28 aaa26 bbb26 ccccc26 +27 28 29 aaa27 bbb27 ccccc27 +28 29 30 aaa28 bbb28 ccccc28 +29 30 31 aaa29 bbb29 ccccc29 +30 31 32 aaa30 bbb30 ccccc30 +31 32 33 aaa31 bbb31 ccccc31 +32 33 34 aaa32 bbb32 ccccc32 +33 34 35 aaa33 bbb33 ccccc33 +34 35 36 aaa34 bbb34 ccccc34 +35 36 37 aaa35 bbb35 ccccc35 +36 37 38 aaa36 bbb36 ccccc36 +37 38 39 aaa37 bbb37 ccccc37 +38 39 40 aaa38 bbb38 ccccc38 +39 40 41 aaa39 bbb39 ccccc39 +40 41 42 aaa40 bbb40 ccccc40 +41 42 43 aaa41 bbb41 ccccc41 +42 43 44 aaa42 bbb42 ccccc42 +43 44 45 aaa43 bbb43 ccccc43 +44 45 46 aaa44 bbb44 ccccc44 +45 46 47 aaa45 bbb45 ccccc45 +46 47 48 aaa46 bbb46 ccccc46 +47 48 49 aaa47 bbb47 ccccc47 +48 49 50 aaa48 bbb48 ccccc48 +49 50 51 aaa49 bbb49 ccccc49 +50 51 52 aaa50 bbb50 ccccc50 +51 52 53 aaa51 bbb51 ccccc51 +52 53 54 aaa52 bbb52 ccccc52 +53 54 55 aaa53 bbb53 ccccc53 +54 55 56 aaa54 bbb54 ccccc54 +55 56 57 aaa55 bbb55 ccccc55 +56 57 58 aaa56 bbb56 ccccc56 +57 58 59 aaa57 bbb57 ccccc57 +58 59 60 aaa58 bbb58 ccccc58 +59 60 61 aaa59 bbb59 ccccc59 +60 61 62 aaa60 bbb60 ccccc60 +61 62 63 aaa61 bbb61 ccccc61 +62 63 64 aaa62 bbb62 ccccc62 +63 64 65 aaa63 bbb63 ccccc63 +64 65 66 aaa64 bbb64 ccccc64 +65 66 67 aaa65 bbb65 ccccc65 +66 67 68 aaa66 bbb66 ccccc66 +67 68 69 aaa67 bbb67 ccccc67 +68 69 70 aaa68 bbb68 ccccc68 +69 70 71 aaa69 bbb69 ccccc69 +70 71 72 aaa70 bbb70 ccccc70 +71 72 73 aaa71 bbb71 ccccc71 +72 73 74 aaa72 bbb72 ccccc72 +73 74 75 aaa73 bbb73 ccccc73 +74 75 76 aaa74 bbb74 ccccc74 +75 76 77 aaa75 bbb75 ccccc75 +76 77 78 aaa76 bbb76 ccccc76 +77 78 79 aaa77 bbb77 ccccc77 +78 79 80 aaa78 bbb78 ccccc78 +79 80 81 aaa79 bbb79 ccccc79 +80 81 82 aaa80 bbb80 ccccc80 +81 82 83 aaa81 bbb81 ccccc81 +82 83 84 aaa82 bbb82 ccccc82 +83 84 85 aaa83 bbb83 ccccc83 +84 85 86 aaa84 bbb84 ccccc84 +85 86 87 aaa85 bbb85 ccccc85 +86 87 88 aaa86 bbb86 ccccc86 +87 88 89 aaa87 bbb87 ccccc87 +88 89 90 aaa88 bbb88 ccccc88 +89 90 91 aaa89 bbb89 ccccc89 +90 91 92 aaa90 bbb90 ccccc90 +91 92 93 aaa91 bbb91 ccccc91 +92 93 94 aaa92 bbb92 ccccc92 +93 94 95 aaa93 bbb93 ccccc93 +94 95 96 aaa94 bbb94 ccccc94 +95 96 97 aaa95 bbb95 ccccc95 +96 97 98 aaa96 bbb96 ccccc96 +97 98 99 aaa97 bbb97 ccccc97 +98 99 100 aaa98 bbb98 ccccc98 +99 100 101 aaa99 bbb99 ccccc99 +100 101 102 aaa100 bbb100 ccccc100 +DROP TABLE test.t; +ALTER TABLESPACE ts +DROP DATAFILE './datafile.dat' + ENGINE NDB; +DROP TABLESPACE ts ENGINE NDB; +DROP LOGFILE GROUP lg +ENGINE=NDB; +DROP table IF EXISTS test.t1; +Warnings: +Note 1051 Unknown table 't1' +DROP table IF EXISTS test.t2; +Warnings: +Note 1051 Unknown table 't2' +CREATE LOGFILE GROUP lg +ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M +UNDO_BUFFER_SIZE = 1M +ENGINE=NDB; +CREATE TABLESPACE ts1 +ADD DATAFILE './table_space1/datafile.dat' + USE LOGFILE GROUP lg +INITIAL_SIZE 12M +ENGINE NDB; +CREATE TABLESPACE ts2 +ADD DATAFILE './table_space2/datafile.dat' + USE LOGFILE GROUP lg +INITIAL_SIZE 12M +ENGINE NDB; +CREATE TABLE test.t1 ( +a1 smallint NOT NULL, +a2 int NOT NULL, +a3 bigint NOT NULL, +a4 char(10), +a5 decimal(5,1), +a6 time, +a7 date, +a8 datetime, +a9 VARCHAR(255), +a10 blob, +PRIMARY KEY(a1) +) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; +ALTER TABLE test.t1 ADD INDEX (a2), ADD INDEX (a3), ADD INDEX (a8); +SHOW CREATE TABLE test.t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a1` smallint(6) NOT NULL, + `a2` int(11) NOT NULL, + `a3` bigint(20) NOT NULL, + `a4` char(10) DEFAULT NULL, + `a5` decimal(5,1) DEFAULT NULL, + `a6` time DEFAULT NULL, + `a7` date DEFAULT NULL, + `a8` datetime DEFAULT NULL, + `a9` varchar(255) DEFAULT NULL, + `a10` blob, + PRIMARY KEY (`a1`), + KEY `a2` (`a2`), + KEY `a3` (`a3`), + KEY `a8` (`a8`) +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 +CREATE TABLE test.t2 ( +b1 smallint NOT NULL, +b2 int NOT NULL, +b3 bigint NOT NULL, +b4 char(10), +b5 decimal(5,1), +b6 time, +b7 date, +b8 datetime, +b9 VARCHAR(255), +b10 blob, +PRIMARY KEY(b1) +) ENGINE=NDB; +ALTER TABLE test.t2 ADD INDEX (b2), ADD INDEX (b3), ADD INDEX (b8); +SHOW CREATE TABLE test.t2; +Table Create Table +t2 CREATE TABLE `t2` ( + `b1` smallint(6) NOT NULL, + `b2` int(11) NOT NULL, + `b3` bigint(20) NOT NULL, + `b4` char(10) DEFAULT NULL, + `b5` decimal(5,1) DEFAULT NULL, + `b6` time DEFAULT NULL, + `b7` date DEFAULT NULL, + `b8` datetime DEFAULT NULL, + `b9` varchar(255) DEFAULT NULL, + `b10` blob, + PRIMARY KEY (`b1`), + KEY `b2` (`b2`), + KEY `b3` (`b3`), + KEY `b8` (`b8`) +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 +SELECT * FROM test.t1 order by a1; +a1 a2 a3 a4 a5 a6 a7 a8 a9 a10 +1 2 2000000001 aaa1 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb1 binary data +2 3 2000000002 aaa2 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb2 binary data +3 4 2000000003 aaa3 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb3 binary data +4 5 2000000004 aaa4 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb4 binary data +5 6 2000000005 aaa5 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb5 binary data +6 7 2000000006 aaa6 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb6 binary data +7 8 2000000007 aaa7 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb7 binary data +8 9 2000000008 aaa8 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb8 binary data +9 10 2000000009 aaa9 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb9 binary data +10 11 2000000010 aaa10 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb10 binary data +11 12 2000000011 aaa11 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb11 binary data +12 13 2000000012 aaa12 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb12 binary data +13 14 2000000013 aaa13 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb13 binary data +14 15 2000000014 aaa14 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb14 binary data +15 16 2000000015 aaa15 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb15 binary data +16 17 2000000016 aaa16 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb16 binary data +17 18 2000000017 aaa17 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb17 binary data +18 19 2000000018 aaa18 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb18 binary data +19 20 2000000019 aaa19 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb19 binary data +20 21 2000000020 aaa20 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb20 binary data +SELECT * FROM test.t2 order by b1; +b1 b2 b3 b4 b5 b6 b7 b8 b9 b10 +3 4 3000000001 aaa1 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb1 binary data +4 5 3000000002 aaa2 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb2 binary data +5 6 3000000003 aaa3 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb3 binary data +6 7 3000000004 aaa4 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb4 binary data +7 8 3000000005 aaa5 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb5 binary data +8 9 3000000006 aaa6 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb6 binary data +9 10 3000000007 aaa7 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb7 binary data +10 11 3000000008 aaa8 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb8 binary data +11 12 3000000009 aaa9 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb9 binary data +12 13 3000000010 aaa10 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb10 binary data +13 14 3000000011 aaa11 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb11 binary data +14 15 3000000012 aaa12 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb12 binary data +15 16 3000000013 aaa13 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb13 binary data +16 17 3000000014 aaa14 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb14 binary data +17 18 3000000015 aaa15 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb15 binary data +18 19 3000000016 aaa16 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb16 binary data +19 20 3000000017 aaa17 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb17 binary data +20 21 3000000018 aaa18 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb18 binary data +21 22 3000000019 aaa19 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb19 binary data +22 23 3000000020 aaa20 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb20 binary data +SELECT COUNT(a1), a1, COUNT(a1)*a1 FROM test.t1 GROUP BY a1; +COUNT(a1) a1 COUNT(a1)*a1 +1 1 1 +1 2 2 +1 3 3 +1 4 4 +1 5 5 +1 6 6 +1 7 7 +1 8 8 +1 9 9 +1 10 10 +1 11 11 +1 12 12 +1 13 13 +1 14 14 +1 15 15 +1 16 16 +1 17 17 +1 18 18 +1 19 19 +1 20 20 +SELECT COUNT(a2), (a2+1), COUNT(a2)*(a2+0) FROM test.t1 GROUP BY a2; +COUNT(a2) (a2+1) COUNT(a2)*(a2+0) +1 3 2 +1 4 3 +1 5 4 +1 6 5 +1 7 6 +1 8 7 +1 9 8 +1 10 9 +1 11 10 +1 12 11 +1 13 12 +1 14 13 +1 15 14 +1 16 15 +1 17 16 +1 18 17 +1 19 18 +1 20 19 +1 21 20 +1 22 21 +DROP TABLE test.t1; +DROP TABLE test.t2; +create table test.t1 (a int not null,b char(5), c text) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; +Warnings: +Error 1465 Table storage engine 'ndbcluster' does not support the create option 'Binlog of table with BLOB attribute and no PK' +insert into test.t1 (a) values (1),(2),(3),(4),(1),(2),(3),(4); +select distinct a from test.t1 group by b,a having a > 2 order by a desc; +a +4 +3 +select distinct a,c from test.t1 group by b,c,a having a > 2 order by a desc; +a c +4 NULL +3 NULL +select distinct a from test.t1 group by b,a having a > 2 order by a asc; +a +3 +4 +select distinct a,c from test.t1 group by b,c,a having a > 2 order by a asc; +a c +3 NULL +4 NULL +drop table test.t1; +create table test.t1 (a char(1), key(a)) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; +insert into test.t1 values('1'),('1'),('1'),('2'),('2'),('3'),('3'); +select * from test.t1 where a >= '1' order by a; +a +1 +1 +1 +2 +2 +3 +3 +select distinct a from test.t1 order by a desc; +a +3 +2 +1 +select distinct a from test.t1 where a >= '1' order by a desc; +a +3 +2 +1 +select distinct a from test.t1 where a >= '1' order by a asc; +a +1 +2 +3 +drop table test.t1; +CREATE TABLE test.t1 (email varchar(50), infoID BIGINT, dateentered DATETIME) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; +CREATE TABLE test.t2 (infoID BIGINT, shipcode varchar(10)) ENGINE=NDB; +INSERT INTO test.t1 (email, infoID, dateentered) VALUES +('test1@testdomain.com', 1, '2002-07-30 22:56:38'), +('test1@testdomain.com', 1, '2002-07-27 22:58:16'), +('test2@testdomain.com', 1, '2002-06-19 15:22:19'), +('test2@testdomain.com', 2, '2002-06-18 14:23:47'), +('test3@testdomain.com', 1, '2002-05-19 22:17:32'); +INSERT INTO test.t2(infoID, shipcode) VALUES +(1, 'Z001'), +(2, 'R002'); +SELECT DISTINCTROW email, shipcode FROM test.t1, test.t2 WHERE t1.infoID=t2.infoID order by email, shipcode; +email shipcode +test1@testdomain.com Z001 +test2@testdomain.com R002 +test2@testdomain.com Z001 +test3@testdomain.com Z001 +SELECT DISTINCTROW email FROM test.t1 ORDER BY dateentered DESC; +email +test1@testdomain.com +test2@testdomain.com +test3@testdomain.com +SELECT DISTINCTROW email, shipcode FROM test.t1, test.t2 WHERE test.t1.infoID=test.t2.infoID ORDER BY dateentered DESC; +email shipcode +test1@testdomain.com Z001 +test2@testdomain.com Z001 +test2@testdomain.com R002 +test3@testdomain.com Z001 +drop table test.t1,test.t2; +ALTER TABLESPACE ts1 +DROP DATAFILE './table_space1/datafile.dat' + ENGINE NDB; +DROP TABLESPACE ts1 ENGINE NDB; +ALTER TABLESPACE ts2 +DROP DATAFILE './table_space2/datafile.dat' + ENGINE NDB; +DROP TABLESPACE ts2 ENGINE NDB; +DROP LOGFILE GROUP lg +ENGINE=NDB; +DROP TABLE IF EXISTS test.t; +Warnings: +Note 1051 Unknown table 't' +create table test.t (f1 varchar(50) primary key, f2 text,f3 int) engine=NDB; +insert into test.t (f1,f2,f3)VALUES("111111","aaaaaa",1); +insert into test.t (f1,f2,f3)VALUES("222222","bbbbbb",2); +select * from test.t order by f1; +f1 f2 f3 +111111 aaaaaa 1 +222222 bbbbbb 2 +select f1,f2 from test.t order by f2; +f1 f2 +111111 aaaaaa +222222 bbbbbb +select f2 from test.t order by f2; +f2 +aaaaaa +bbbbbb +select f1,f2 from test.t order by f1; +f1 f2 +111111 aaaaaa +222222 bbbbbb +drop table test.t; +CREATE LOGFILE GROUP lg +ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M +UNDO_BUFFER_SIZE = 1M +ENGINE=NDB; +CREATE TABLESPACE ts +ADD DATAFILE './table_space/datafile.dat' + USE LOGFILE GROUP lg +INITIAL_SIZE 12M +ENGINE NDB; +CREATE TABLE test.t1 (a1 INT, a2 BLOB, a3 TEXT) ENGINE=InnoDB; +SHOW CREATE TABLE test.t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a1` int(11) DEFAULT NULL, + `a2` blob, + `a3` text +) ENGINE=InnoDB DEFAULT CHARSET=latin1 +ALTER TABLE test.t1 TABLESPACE ts STORAGE DISK ENGINE=NDB; +Warnings: +Error 1465 Table storage engine 'ndbcluster' does not support the create option 'Binlog of table with BLOB attribute and no PK' +SHOW CREATE TABLE test.t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a1` int(11) DEFAULT NULL, + `a2` blob, + `a3` text +) TABLESPACE ts STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 +DROP TABLE test.t1; +CREATE TABLE test.t1 (a1 INT, a2 BLOB, a3 TEXT) ENGINE=MyISAM; +SHOW CREATE TABLE test.t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a1` int(11) DEFAULT NULL, + `a2` blob, + `a3` text +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +ALTER TABLE test.t1 TABLESPACE ts STORAGE DISK ENGINE=NDB; +Warnings: +Error 1465 Table storage engine 'ndbcluster' does not support the create option 'Binlog of table with BLOB attribute and no PK' +SHOW CREATE TABLE test.t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a1` int(11) DEFAULT NULL, + `a2` blob, + `a3` text +) TABLESPACE ts STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 +DROP TABLE test.t1; +CREATE TABLE test.t1 (a1 INT PRIMARY KEY, a2 BLOB, a3 TEXT) TABLESPACE ts STORAGE DISK ENGINE=NDB; +SHOW CREATE TABLE test.t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a1` int(11) NOT NULL, + `a2` blob, + `a3` text, + PRIMARY KEY (`a1`) +) TABLESPACE ts STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 +ALTER TABLE test.t1 ENGINE=InnoDB; +SHOW CREATE TABLE test.t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a1` int(11) NOT NULL, + `a2` blob, + `a3` text, + PRIMARY KEY (`a1`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1 +DROP TABLE test.t1; +CREATE TABLE test.t1 (a1 INT, a2 BLOB, a3 TEXT) TABLESPACE ts STORAGE DISK ENGINE=NDB; +Warnings: +Error 1465 Table storage engine 'ndbcluster' does not support the create option 'Binlog of table with BLOB attribute and no PK' +SHOW CREATE TABLE test.t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a1` int(11) DEFAULT NULL, + `a2` blob, + `a3` text +) TABLESPACE ts STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 +ALTER TABLE test.t1 ENGINE=MyISAM; +SHOW CREATE TABLE test.t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a1` int(11) DEFAULT NULL, + `a2` blob, + `a3` text +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +DROP TABLE test.t1; +CREATE TABLE test.t1 (a1 INT PRIMARY KEY) TABLESPACE ts STORAGE DISK ENGINE=NDB; +ALTER TABLE test.t1 ADD a2 FLOAT, ADD a3 DOUBLE, ADD a4 BIT, ADD a5 TINYINT, ADD a6 BIGINT, ADD a7 DATE, ADD a8 TIME, ADD a9 DATETIME, ADD a10 TINYTEXT, ADD a11 MEDIUMTEXT, ADD a12 LONGTEXT, ADD a13 TEXT, ADD a14 BLOB; +SHOW CREATE TABLE test.t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a1` int(11) NOT NULL, + `a2` float DEFAULT NULL, + `a3` double DEFAULT NULL, + `a4` bit(1) DEFAULT NULL, + `a5` tinyint(4) DEFAULT NULL, + `a6` bigint(20) DEFAULT NULL, + `a7` date DEFAULT NULL, + `a8` time DEFAULT NULL, + `a9` datetime DEFAULT NULL, + `a10` tinytext, + `a11` mediumtext, + `a12` longtext, + `a13` text, + `a14` blob, + PRIMARY KEY (`a1`) +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 +ALTER TABLE test.t1 ADD INDEX (a2), ADD INDEX (a3), ADD INDEX (a5), ADD INDEX (a6), +ADD INDEX (a7), ADD INDEX (a8); +SHOW CREATE TABLE test.t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a1` int(11) NOT NULL, + `a2` float DEFAULT NULL, + `a3` double DEFAULT NULL, + `a4` bit(1) DEFAULT NULL, + `a5` tinyint(4) DEFAULT NULL, + `a6` bigint(20) DEFAULT NULL, + `a7` date DEFAULT NULL, + `a8` time DEFAULT NULL, + `a9` datetime DEFAULT NULL, + `a10` tinytext, + `a11` mediumtext, + `a12` longtext, + `a13` text, + `a14` blob, + PRIMARY KEY (`a1`), + KEY `a2` (`a2`), + KEY `a3` (`a3`), + KEY `a5` (`a5`), + KEY `a6` (`a6`), + KEY `a7` (`a7`), + KEY `a8` (`a8`) +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 +DROP TABLE test.t1; +CREATE TABLE test.t1 (a1 INT PRIMARY KEY) TABLESPACE ts STORAGE DISK ENGINE=NDB; +ALTER TABLE test.t1 ADD a2 FLOAT, ADD a3 DOUBLE, ADD a4 BIT, ADD a5 TINYINT, ADD a6 BIGINT, ADD a7 DATE, ADD a8 TIME, ADD a9 DATETIME, ADD a10 TINYTEXT, ADD a11 MEDIUMTEXT, ADD a12 LONGTEXT, ADD a13 TEXT, ADD a14 BLOB; +SHOW CREATE TABLE test.t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a1` int(11) NOT NULL, + `a2` float DEFAULT NULL, + `a3` double DEFAULT NULL, + `a4` bit(1) DEFAULT NULL, + `a5` tinyint(4) DEFAULT NULL, + `a6` bigint(20) DEFAULT NULL, + `a7` date DEFAULT NULL, + `a8` time DEFAULT NULL, + `a9` datetime DEFAULT NULL, + `a10` tinytext, + `a11` mediumtext, + `a12` longtext, + `a13` text, + `a14` blob, + PRIMARY KEY (`a1`) +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 +ALTER TABLE test.t1 DROP a14; +ALTER TABLE test.t1 DROP a13; +ALTER TABLE test.t1 DROP a12; +ALTER TABLE test.t1 DROP a11; +ALTER TABLE test.t1 DROP a10; +ALTER TABLE test.t1 DROP a9; +ALTER TABLE test.t1 DROP a8; +ALTER TABLE test.t1 DROP a7; +ALTER TABLE test.t1 DROP a6; +ALTER TABLE test.t1 DROP PRIMARY KEY; +SHOW CREATE TABLE test.t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a1` int(11) NOT NULL, + `a2` float DEFAULT NULL, + `a3` double DEFAULT NULL, + `a4` bit(1) DEFAULT NULL, + `a5` tinyint(4) DEFAULT NULL +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 +DROP TABLE test.t1; +ALTER TABLESPACE ts +DROP DATAFILE './table_space/datafile.dat' + ENGINE NDB; +DROP TABLESPACE ts ENGINE NDB; +DROP LOGFILE GROUP lg +ENGINE=NDB; diff --git a/mysql-test/r/ndb_dd_advance2.result b/mysql-test/r/ndb_dd_advance2.result new file mode 100644 index 00000000000..c7fcda650e6 --- /dev/null +++ b/mysql-test/r/ndb_dd_advance2.result @@ -0,0 +1,746 @@ +DROP TABLE IF EXISTS test.t1; +DROP TABLE IF EXISTS test.t2; +DROP TABLE IF EXISTS test.t3; +***** +**** Copy data from table in one table space to table in different table space +***** +CREATE LOGFILE GROUP lg +ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M +UNDO_BUFFER_SIZE = 1M +ENGINE=NDB; +CREATE TABLESPACE ts1 +ADD DATAFILE './table_space1/datafile.dat' + USE LOGFILE GROUP lg +INITIAL_SIZE 12M +ENGINE NDB; +CREATE TABLESPACE ts2 +ADD DATAFILE './table_space2/datafile.dat' + USE LOGFILE GROUP lg +INITIAL_SIZE 12M +ENGINE NDB; +CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) +TABLESPACE ts1 STORAGE DISK ENGINE=NDB; +CREATE TABLE test.t2 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) +TABLESPACE ts2 STORAGE DISK ENGINE=NDB; +SHOW CREATE TABLE test.t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a1` int(11) NOT NULL, + `a2` varchar(256) DEFAULT NULL, + `a3` blob, + PRIMARY KEY (`a1`) +) TABLESPACE ts1 STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 +SHOW CREATE TABLE test.t2; +Table Create Table +t2 CREATE TABLE `t2` ( + `a1` int(11) NOT NULL, + `a2` varchar(256) DEFAULT NULL, + `a3` blob, + PRIMARY KEY (`a1`) +) TABLESPACE ts2 STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 +INSERT INTO test.t1 VALUES (1,'111111','aaaaaaaa'); +INSERT INTO test.t1 VALUES (2,'222222','bbbbbbbb'); +SELECT * FROM test.t1 ORDER BY a1; +a1 a2 a3 +1 111111 aaaaaaaa +2 222222 bbbbbbbb +INSERT INTO test.t2(a1,a2,a3) SELECT * FROM test.t1; +SELECT * FROM test.t2 ORDER BY a1; +a1 a2 a3 +1 111111 aaaaaaaa +2 222222 bbbbbbbb +DROP TABLE test.t1, test.t2; +set @vc1 = repeat('a', 200); +set @vc2 = repeat('b', 500); +set @vc3 = repeat('c', 1000); +set @vc4 = repeat('d', 4000); +set @x0 = '01234567012345670123456701234567'; +set @x0 = concat(@x0,@x0,@x0,@x0,@x0,@x0,@x0,@x0); +set @b1 = 'b1'; +set @b1 = concat(@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1); +set @b1 = concat(@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1); +set @b1 = concat(@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1); +set @b1 = concat(@b1,@x0); +set @d1 = 'dd1'; +set @d1 = concat(@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1); +set @d1 = concat(@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1); +set @d1 = concat(@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1); +set @b2 = 'b2'; +set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2); +set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2); +set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2); +set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2); +set @d2 = 'dd2'; +set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2); +set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2); +set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2); +set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2); +select length(@x0),length(@b1),length(@d1) from dual; +length(@x0) length(@b1) length(@d1) +256 2256 3000 +select length(@x0),length(@b2),length(@d2) from dual; +length(@x0) length(@b2) length(@d2) +256 20000 30000 +CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(5000), a3 BLOB) +TABLESPACE ts1 STORAGE DISK ENGINE=NDB; +CREATE TABLE test.t2 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(5000), a3 BLOB) +TABLESPACE ts2 STORAGE DISK ENGINE=NDB; +SHOW CREATE TABLE test.t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a1` int(11) NOT NULL, + `a2` varchar(5000) DEFAULT NULL, + `a3` blob, + PRIMARY KEY (`a1`) +) TABLESPACE ts1 STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 +SHOW CREATE TABLE test.t2; +Table Create Table +t2 CREATE TABLE `t2` ( + `a1` int(11) NOT NULL, + `a2` varchar(5000) DEFAULT NULL, + `a3` blob, + PRIMARY KEY (`a1`) +) TABLESPACE ts2 STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 +INSERT INTO test.t1 VALUES (1,@vc1,@d1); +INSERT INTO test.t1 VALUES (2,@vc2,@b1); +INSERT INTO test.t1 VALUES (3,@vc3,@d2); +INSERT INTO test.t1 VALUES (4,@vc4,@b2); +SELECT a1,length(a2),substr(a2,180,2),length(a3),substr(a3,1+3*900,3) +FROM test.t1 WHERE a1=1; +a1 length(a2) substr(a2,180,2) length(a3) substr(a3,1+3*900,3) +1 200 aa 3000 dd1 +SELECT a1,length(a2),substr(a2,480,2),length(a3),substr(a3,1+2*900,3) +FROM test.t1 where a1=2; +a1 length(a2) substr(a2,480,2) length(a3) substr(a3,1+2*900,3) +2 500 bb 2256 b1b +INSERT INTO test.t2(a1,a2,a3) SELECT * FROM test.t1; +SELECT a1,length(a2),substr(a2,180,2),length(a3),substr(a3,1+3*900,3) +FROM test.t2 WHERE a1=1; +a1 length(a2) substr(a2,180,2) length(a3) substr(a3,1+3*900,3) +1 200 aa 3000 dd1 +SELECT a1,length(a2),substr(a2,480,2),length(a3),substr(a3,1+2*900,3) +FROM test.t2 where a1=2; +a1 length(a2) substr(a2,480,2) length(a3) substr(a3,1+2*900,3) +2 500 bb 2256 b1b +DROP TABLE test.t1, test.t2; +ALTER TABLESPACE ts1 +DROP DATAFILE './table_space1/datafile.dat' + ENGINE NDB; +DROP TABLESPACE ts1 ENGINE NDB; +ALTER TABLESPACE ts2 +DROP DATAFILE './table_space2/datafile.dat' + ENGINE NDB; +DROP TABLESPACE ts2 ENGINE NDB; +DROP LOGFILE GROUP lg +ENGINE=NDB; +***** +**** Insert, Update, Delete from NDB table with BLOB fields +***** +CREATE LOGFILE GROUP lg +ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M +UNDO_BUFFER_SIZE = 1M +ENGINE=NDB; +CREATE TABLESPACE ts1 +ADD DATAFILE './table_space1/datafile.dat' + USE LOGFILE GROUP lg +INITIAL_SIZE 12M +ENGINE NDB; +set @vc1 = repeat('a', 200); +set @vc2 = repeat('b', 500); +set @vc3 = repeat('c', 1000); +set @vc4 = repeat('d', 4000); +set @vc5 = repeat('d', 5000); +set @bb1 = repeat('1', 2000); +set @bb2 = repeat('2', 5000); +set @bb3 = repeat('3', 10000); +set @bb4 = repeat('4', 40000); +set @bb5 = repeat('5', 50000); +select length(@vc1),length(@vc2),length(@vc3),length(@vc4),length(@vc5) from dual; +length(@vc1) length(@vc2) length(@vc3) length(@vc4) length(@vc5) +200 500 1000 4000 5000 +select length(@bb1),length(@bb2),length(@bb3),length(@bb4),length(@bb5) from dual; +length(@bb1) length(@bb2) length(@bb3) length(@bb4) length(@bb5) +2000 5000 10000 40000 50000 +CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(5000), a3 BLOB) +TABLESPACE ts1 STORAGE DISK ENGINE=NDB; +INSERT INTO test.t1 VALUES (1,@vc1,@bb1); +INSERT INTO test.t1 VALUES (2,@vc2,@bb2); +INSERT INTO test.t1 VALUES (3,@vc3,@bb3); +INSERT INTO test.t1 VALUES (4,@vc4,@bb4); +INSERT INTO test.t1 VALUES (5,@vc5,@bb5); +UPDATE test.t1 SET a2=@vc5, a3=@bb5 WHERE a1=1; +SELECT a1,length(a2),substr(a2,4998,2),length(a3),substr(a3,49997,3) +FROM test.t1 WHERE a1=1; +a1 length(a2) substr(a2,4998,2) length(a3) substr(a3,49997,3) +1 5000 dd 50000 555 +UPDATE test.t1 SET a2=@vc4, a3=@bb4 WHERE a1=2; +SELECT a1,length(a2),substr(a2,3998,2),length(a3),substr(a3,39997,3) +FROM test.t1 WHERE a1=2; +a1 length(a2) substr(a2,3998,2) length(a3) substr(a3,39997,3) +2 4000 dd 40000 444 +UPDATE test.t1 SET a2=@vc2, a3=@bb2 WHERE a1=3; +SELECT a1,length(a2),substr(a2,498,2),length(a3),substr(a3,3997,3) +FROM test.t1 WHERE a1=3; +a1 length(a2) substr(a2,498,2) length(a3) substr(a3,3997,3) +3 500 bb 5000 222 +UPDATE test.t1 SET a2=@vc3, a3=@bb3 WHERE a1=4; +SELECT a1,length(a2),substr(a2,998,2),length(a3),substr(a3,9997,3) +FROM test.t1 WHERE a1=4; +a1 length(a2) substr(a2,998,2) length(a3) substr(a3,9997,3) +4 1000 cc 10000 333 +UPDATE test.t1 SET a2=@vc1, a3=@bb1 WHERE a1=5; +SELECT a1,length(a2),substr(a2,198,2),length(a3),substr(a3,1997,3) +FROM test.t1 WHERE a1=5; +a1 length(a2) substr(a2,198,2) length(a3) substr(a3,1997,3) +5 200 aa 2000 111 +DELETE FROM test.t1 where a1=5; +SELECT count(*) from test.t1; +count(*) +4 +DELETE FROM test.t1 where a1=4; +SELECT count(*) from test.t1; +count(*) +3 +DELETE FROM test.t1 where a1=3; +SELECT count(*) from test.t1; +count(*) +2 +DELETE FROM test.t1 where a1=2; +SELECT count(*) from test.t1; +count(*) +1 +DELETE FROM test.t1 where a1=1; +SELECT count(*) from test.t1; +count(*) +0 +DROP TABLE test.t1; +ALTER TABLESPACE ts1 +DROP DATAFILE './table_space1/datafile.dat' + ENGINE NDB; +DROP TABLESPACE ts1 ENGINE NDB; +DROP LOGFILE GROUP lg +ENGINE=NDB; +***** +**** Create Stored procedures that use disk based tables +***** +CREATE LOGFILE GROUP lg +ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M +UNDO_BUFFER_SIZE = 1M +ENGINE=NDB; +CREATE TABLESPACE ts1 +ADD DATAFILE './table_space1/datafile.dat' + USE LOGFILE GROUP lg +INITIAL_SIZE 12M +ENGINE NDB; +CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) +TABLESPACE ts1 STORAGE DISK ENGINE=NDB// +CREATE PROCEDURE test.sp1() +BEGIN +INSERT INTO test.t1 values (1,'111111','aaaaaaaa'); +END// +CALL test.sp1(); +SELECT * FROM test.t1; +a1 a2 a3 +1 111111 aaaaaaaa +CREATE PROCEDURE test.sp2(n INT, vc VARCHAR(256), blb BLOB) +BEGIN +UPDATE test.t1 SET a2=vc, a3=blb where a1=n; +END// +CALL test.sp2(1,'222222','bbbbbbbb'); +SELECT * FROM test.t1; +a1 a2 a3 +1 222222 bbbbbbbb +DELETE FROM test.t1; +DROP PROCEDURE test.sp1; +DROP PROCEDURE test.sp2; +DROP TABLE test.t1; +ALTER TABLESPACE ts1 +DROP DATAFILE './table_space1/datafile.dat' + ENGINE=NDB; +DROP TABLESPACE ts1 ENGINE=NDB; +DROP LOGFILE GROUP lg +ENGINE=NDB; +***** +***** Create function that operate on disk based tables +***** +CREATE LOGFILE GROUP lg +ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M +UNDO_BUFFER_SIZE = 1M +ENGINE=NDB; +CREATE TABLESPACE ts1 +ADD DATAFILE './table_space1/datafile.dat' + USE LOGFILE GROUP lg +INITIAL_SIZE 12M +ENGINE NDB; +CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) +TABLESPACE ts1 STORAGE DISK ENGINE=NDB; +CREATE FUNCTION test.fn1(n INT) RETURNS INT +BEGIN +DECLARE v INT; +SELECT a1 INTO v FROM test.t1 WHERE a1=n; +RETURN v; +END// +CREATE FUNCTION test.fn2(n INT, blb BLOB) RETURNS BLOB +BEGIN +DECLARE vv BLOB; +UPDATE test.t1 SET a3=blb where a1=n; +SELECT a3 INTO vv FROM test.t1 WHERE a1=n; +RETURN vv; +END// +SELECT test.fn1(10) FROM DUAL; +test.fn1(10) +10 +SELECT test.fn2(50, 'new BLOB content') FROM DUAL; +test.fn2(50, 'new BLOB content') +new BLOB content +DELETE FROM test.t1; +DROP FUNCTION test.fn1; +DROP FUNCTION test.fn2; +DROP TABLE test.t1; +ALTER TABLESPACE ts1 +DROP DATAFILE './table_space1/datafile.dat' + ENGINE=NDB; +DROP TABLESPACE ts1 ENGINE=NDB; +DROP LOGFILE GROUP lg +ENGINE=NDB; +***** +***** Create triggers that operate on disk based tables +***** +CREATE LOGFILE GROUP lg +ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M +UNDO_BUFFER_SIZE = 1M +ENGINE=NDB; +CREATE TABLESPACE ts1 +ADD DATAFILE './table_space1/datafile.dat' + USE LOGFILE GROUP lg +INITIAL_SIZE 12M +ENGINE NDB; +CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) +TABLESPACE ts1 STORAGE DISK ENGINE=NDB; +CREATE TRIGGER test.trg1 BEFORE INSERT ON test.t1 FOR EACH ROW +BEGIN +if isnull(new.a2) then +set new.a2:= 'trg1 works on a2 field'; +end if; +if isnull(new.a3) then +set new.a3:= 'trg1 works on a3 field'; +end if; +end// +insert into test.t1 (a1) values (1)// +insert into test.t1 (a1,a2) values (2, 'ccccccc')// +select * from test.t1 order by a1// +a1 a2 a3 +1 trg1 works on a2 field trg1 works on a3 field +2 ccccccc trg1 works on a3 field +DELETE FROM test.t1; +DROP TRIGGER test.trg1; +DROP TABLE test.t1; +ALTER TABLESPACE ts1 +DROP DATAFILE './table_space1/datafile.dat' + ENGINE=NDB; +DROP TABLESPACE ts1 ENGINE=NDB; +DROP LOGFILE GROUP lg +ENGINE=NDB; +***** +***** Create, update views that operate on disk based tables +***** +CREATE LOGFILE GROUP lg +ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M +UNDO_BUFFER_SIZE = 1M +ENGINE=NDB; +CREATE TABLESPACE ts1 +ADD DATAFILE './table_space1/datafile.dat' + USE LOGFILE GROUP lg +INITIAL_SIZE 12M +ENGINE NDB; +CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) +TABLESPACE ts1 STORAGE DISK ENGINE=NDB; +CREATE VIEW test.v1 AS SELECT * FROM test.t1; +SELECT * FROM test.v1 order by a1; +a1 a2 a3 +1 aaaaa1 bbbbb1 +2 aaaaa2 bbbbb2 +3 aaaaa3 bbbbb3 +4 aaaaa4 bbbbb4 +5 aaaaa5 bbbbb5 +6 aaaaa6 bbbbb6 +7 aaaaa7 bbbbb7 +8 aaaaa8 bbbbb8 +9 aaaaa9 bbbbb9 +10 aaaaa10 bbbbb10 +CHECK TABLE test.v1, test.t1; +Table Op Msg_type Msg_text +test.v1 check status OK +test.t1 check note The storage engine for the table doesn't support check +UPDATE test.v1 SET a2='zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz' WHERE a1=5; +SELECT * FROM test.v1 order by a1; +a1 a2 a3 +1 aaaaa1 bbbbb1 +2 aaaaa2 bbbbb2 +3 aaaaa3 bbbbb3 +4 aaaaa4 bbbbb4 +5 zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz bbbbb5 +6 aaaaa6 bbbbb6 +7 aaaaa7 bbbbb7 +8 aaaaa8 bbbbb8 +9 aaaaa9 bbbbb9 +10 aaaaa10 bbbbb10 +DROP VIEW test.v1; +DELETE FROM test.t1; +DROP TABLE test.t1; +ALTER TABLESPACE ts1 +DROP DATAFILE './table_space1/datafile.dat' + ENGINE=NDB; +DROP TABLESPACE ts1 ENGINE=NDB; +DROP LOGFILE GROUP lg +ENGINE=NDB; +***** +***** Create and use disk based table that use auto inc +***** +CREATE LOGFILE GROUP lg +ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M +UNDO_BUFFER_SIZE = 1M +ENGINE=NDB; +CREATE TABLESPACE ts1 +ADD DATAFILE './table_space1/datafile.dat' + USE LOGFILE GROUP lg +INITIAL_SIZE 12M +ENGINE NDB; +CREATE TABLE test.t1 (a1 INT NOT NULL AUTO_INCREMENT PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; +SELECT * FROM test.t1 ORDER BY a1; +a1 a2 a3 +1 aaaaa10 bbbbb10 +2 aaaaa9 bbbbb9 +3 aaaaa8 bbbbb8 +4 aaaaa7 bbbbb7 +5 aaaaa6 bbbbb6 +6 aaaaa5 bbbbb5 +7 aaaaa4 bbbbb4 +8 aaaaa3 bbbbb3 +9 aaaaa2 bbbbb2 +10 aaaaa1 bbbbb1 +DELETE FROM test.t1; +DROP TABLE test.t1; +ALTER TABLESPACE ts1 +DROP DATAFILE './table_space1/datafile.dat' + ENGINE=NDB; +DROP TABLESPACE ts1 ENGINE=NDB; +DROP LOGFILE GROUP lg +ENGINE=NDB; +***** +***** Create test that use transaction (commit, rollback) +***** +CREATE LOGFILE GROUP lg +ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M +UNDO_BUFFER_SIZE = 1M +ENGINE=NDB; +CREATE TABLESPACE ts1 +ADD DATAFILE './table_space1/datafile.dat' + USE LOGFILE GROUP lg +INITIAL_SIZE 12M +ENGINE NDB; +SET AUTOCOMMIT=0; +CREATE TABLE test.t1 (a1 INT NOT NULL AUTO_INCREMENT PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; +INSERT INTO test.t1 VALUES(NULL, "aaaaa1", "bbbbb1"); +COMMIT; +SELECT * FROM test.t1 ORDER BY a1; +a1 a2 a3 +1 aaaaa1 bbbbb1 +INSERT INTO test.t1 VALUES(NULL, "aaaaa2", "bbbbb2"); +ROLLBACK; +SELECT * FROM test.t1 ORDER BY a1; +a1 a2 a3 +1 aaaaa1 bbbbb1 +DELETE FROM test.t1; +DROP TABLE test.t1; +SET AUTOCOMMIT=1; +CREATE TABLE test.t1 (a1 INT NOT NULL AUTO_INCREMENT PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; +START TRANSACTION; +INSERT INTO test.t1 VALUES(NULL, "aaaaa1", "bbbbb1"); +COMMIT; +SELECT * FROM test.t1 ORDER BY a1; +a1 a2 a3 +1 aaaaa1 bbbbb1 +START TRANSACTION; +INSERT INTO test.t1 VALUES(NULL, "aaaaa2", "bbbbb2"); +ROLLBACK; +SELECT * FROM test.t1 ORDER BY a1; +a1 a2 a3 +1 aaaaa1 bbbbb1 +DELETE FROM test.t1; +DROP TABLE test.t1; +ALTER TABLESPACE ts1 +DROP DATAFILE './table_space1/datafile.dat' + ENGINE=NDB; +DROP TABLESPACE ts1 ENGINE=NDB; +DROP LOGFILE GROUP lg +ENGINE=NDB; +***** +***** Create test that uses locks +***** +CREATE LOGFILE GROUP lg +ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M +UNDO_BUFFER_SIZE = 1M +ENGINE=NDB; +CREATE TABLESPACE ts1 +ADD DATAFILE './table_space1/datafile.dat' + USE LOGFILE GROUP lg +INITIAL_SIZE 12M +ENGINE NDB; +drop table if exists test.t1; +CREATE TABLE test.t1 (a1 INT NOT NULL AUTO_INCREMENT PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; +LOCK TABLES test.t1 write; +INSERT INTO test.t1 VALUES(NULL, "aaaaa1", "bbbbb1"); +INSERT INTO test.t1 VALUES(NULL, "aaaaa2", "bbbbb2"); +SELECT * FROM test.t1 ORDER BY a1; +a1 a2 a3 +1 aaaaa1 bbbbb1 +2 aaaaa2 bbbbb2 +SELECT * FROM test.t1 ORDER BY a1; +a1 a2 a3 +1 aaaaa1 bbbbb1 +2 aaaaa2 bbbbb2 +INSERT INTO test.t1 VALUES(NULL, "aaaaa3", "bbbbb3"); +UNLOCK TABLES; +INSERT INTO test.t1 VALUES(NULL, "aaaaa3", "bbbbb3"); +SELECT * FROM test.t1 ORDER BY a1; +a1 a2 a3 +1 aaaaa1 bbbbb1 +2 aaaaa2 bbbbb2 +3 aaaaa3 bbbbb3 +4 aaaaa3 bbbbb3 +DELETE FROM test.t1; +DROP TABLE test.t1; +ALTER TABLESPACE ts1 +DROP DATAFILE './table_space1/datafile.dat' + ENGINE=NDB; +DROP TABLESPACE ts1 ENGINE=NDB; +DROP LOGFILE GROUP lg +ENGINE=NDB; +***** +***** Create large disk base table, do random queries, check cache hits +***** +set @vc1 = repeat('a', 200); +SELECT @vc1 FROM DUAL; +@vc1 +aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa +set @vc2 = repeat('b', 500); +set @vc3 = repeat('b', 998); +set @x0 = '01234567012345670123456701234567'; +set @x0 = concat(@x0,@x0,@x0,@x0,@x0,@x0,@x0,@x0); +set @b1 = 'b1'; +set @b1 = concat(@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1); +set @b1 = concat(@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1); +set @b1 = concat(@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1); +set @b1 = concat(@b1,@x0); +set @d1 = 'dd1'; +set @d1 = concat(@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1); +set @d1 = concat(@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1); +set @d1 = concat(@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1); +set @b2 = 'b2'; +set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2); +set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2); +set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2); +set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2); +set @d2 = 'dd2'; +set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2); +set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2); +set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2); +set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2); +select length(@x0),length(@b1),length(@d1) from dual; +length(@x0) length(@b1) length(@d1) +256 2256 3000 +select length(@x0),length(@b2),length(@d2) from dual; +length(@x0) length(@b2) length(@d2) +256 20000 30000 +CREATE LOGFILE GROUP lg +ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M +UNDO_BUFFER_SIZE = 1M +ENGINE=NDB; +CREATE TABLESPACE ts1 +ADD DATAFILE './table_space1/datafile.dat' + USE LOGFILE GROUP lg +INITIAL_SIZE 12M +ENGINE NDB; +CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(1000), a3 BLOB) +TABLESPACE ts1 STORAGE DISK ENGINE=NDB; +INSERT INTO test.t1 values(1,@vc1,@d1); +INSERT INTO test.t1 values(2,@vc2,@d2); +explain SELECT * from test.t1 WHERE a1 = 1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1 +SELECT a1,length(a2),substr(a2,1+2*900,2),length(a3),substr(a3,1+3*900,3) +FROM test.t1 WHERE a1=1 ORDER BY a1; +a1 length(a2) substr(a2,1+2*900,2) length(a3) substr(a3,1+3*900,3) +1 200 3000 dd1 +SELECT a1,length(a2),substr(a2,1+2*9000,2),length(a3),substr(a3,1+3*9000,3) +FROM test.t1 where a1=2 ORDER BY a1; +a1 length(a2) substr(a2,1+2*9000,2) length(a3) substr(a3,1+3*9000,3) +2 500 30000 dd2 +UPDATE test.t1 set a2=@vc2,a3=@d2 where a1=1; +UPDATE test.t1 set a2=@vc1,a3=@d1 where a1=2; +SELECT a1,length(a2),substr(a2,1+2*9000,2),length(a3),substr(a3,1+3*9000,3) +FROM test.t1 where a1=1; +a1 length(a2) substr(a2,1+2*9000,2) length(a3) substr(a3,1+3*9000,3) +1 500 30000 dd2 +SELECT a1,length(a2),substr(a2,1+2*900,2),length(a3),substr(a3,1+3*900,3) +FROM test.t1 where a1=2; +a1 length(a2) substr(a2,1+2*900,2) length(a3) substr(a3,1+3*900,3) +2 200 3000 dd1 +DELETE FROM test.t1; +DROP TABLE test.t1; +ALTER TABLESPACE ts1 +DROP DATAFILE './table_space1/datafile.dat' + ENGINE=NDB; +DROP TABLESPACE ts1 ENGINE=NDB; +DROP LOGFILE GROUP lg +ENGINE=NDB; +***** +***** Create test that uses COUNT(), SUM(), MAX(), MIN(), NOW(), USER(), TRUNCATE +***** +CREATE LOGFILE GROUP lg +ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M +UNDO_BUFFER_SIZE = 1M +ENGINE=NDB; +CREATE TABLESPACE ts1 +ADD DATAFILE './table_space1/datafile.dat' + USE LOGFILE GROUP lg +INITIAL_SIZE 12M +ENGINE NDB; +CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB, a4 DATE, a5 CHAR(250)) +TABLESPACE ts1 STORAGE DISK ENGINE=NDB; +SELECT COUNT(*) from test.t1; +COUNT(*) +100 +SELECT SUM(a1) from test.t1; +SUM(a1) +5050 +SELECT MIN(a1) from test.t1; +MIN(a1) +1 +SELECT MAX(a1) from test.t1; +MAX(a1) +100 +SELECT a5 from test.t1 where a1=50; +a5 +root@localhost +SELECT * from test.t1 order by a1; +a1 a2 a3 a4 a5 +1 aaaaaaaaaaaaaaaa1 bbbbbbbbbbbbbbbbbb1 2006-06-20 root@localhost +2 aaaaaaaaaaaaaaaa2 bbbbbbbbbbbbbbbbbb2 2006-06-20 root@localhost +3 aaaaaaaaaaaaaaaa3 bbbbbbbbbbbbbbbbbb3 2006-06-20 root@localhost +4 aaaaaaaaaaaaaaaa4 bbbbbbbbbbbbbbbbbb4 2006-06-20 root@localhost +5 aaaaaaaaaaaaaaaa5 bbbbbbbbbbbbbbbbbb5 2006-06-20 root@localhost +6 aaaaaaaaaaaaaaaa6 bbbbbbbbbbbbbbbbbb6 2006-06-20 root@localhost +7 aaaaaaaaaaaaaaaa7 bbbbbbbbbbbbbbbbbb7 2006-06-20 root@localhost +8 aaaaaaaaaaaaaaaa8 bbbbbbbbbbbbbbbbbb8 2006-06-20 root@localhost +9 aaaaaaaaaaaaaaaa9 bbbbbbbbbbbbbbbbbb9 2006-06-20 root@localhost +10 aaaaaaaaaaaaaaaa10 bbbbbbbbbbbbbbbbbb10 2006-06-20 root@localhost +11 aaaaaaaaaaaaaaaa11 bbbbbbbbbbbbbbbbbb11 2006-06-20 root@localhost +12 aaaaaaaaaaaaaaaa12 bbbbbbbbbbbbbbbbbb12 2006-06-20 root@localhost +13 aaaaaaaaaaaaaaaa13 bbbbbbbbbbbbbbbbbb13 2006-06-20 root@localhost +14 aaaaaaaaaaaaaaaa14 bbbbbbbbbbbbbbbbbb14 2006-06-20 root@localhost +15 aaaaaaaaaaaaaaaa15 bbbbbbbbbbbbbbbbbb15 2006-06-20 root@localhost +16 aaaaaaaaaaaaaaaa16 bbbbbbbbbbbbbbbbbb16 2006-06-20 root@localhost +17 aaaaaaaaaaaaaaaa17 bbbbbbbbbbbbbbbbbb17 2006-06-20 root@localhost +18 aaaaaaaaaaaaaaaa18 bbbbbbbbbbbbbbbbbb18 2006-06-20 root@localhost +19 aaaaaaaaaaaaaaaa19 bbbbbbbbbbbbbbbbbb19 2006-06-20 root@localhost +20 aaaaaaaaaaaaaaaa20 bbbbbbbbbbbbbbbbbb20 2006-06-20 root@localhost +21 aaaaaaaaaaaaaaaa21 bbbbbbbbbbbbbbbbbb21 2006-06-20 root@localhost +22 aaaaaaaaaaaaaaaa22 bbbbbbbbbbbbbbbbbb22 2006-06-20 root@localhost +23 aaaaaaaaaaaaaaaa23 bbbbbbbbbbbbbbbbbb23 2006-06-20 root@localhost +24 aaaaaaaaaaaaaaaa24 bbbbbbbbbbbbbbbbbb24 2006-06-20 root@localhost +25 aaaaaaaaaaaaaaaa25 bbbbbbbbbbbbbbbbbb25 2006-06-20 root@localhost +26 aaaaaaaaaaaaaaaa26 bbbbbbbbbbbbbbbbbb26 2006-06-20 root@localhost +27 aaaaaaaaaaaaaaaa27 bbbbbbbbbbbbbbbbbb27 2006-06-20 root@localhost +28 aaaaaaaaaaaaaaaa28 bbbbbbbbbbbbbbbbbb28 2006-06-20 root@localhost +29 aaaaaaaaaaaaaaaa29 bbbbbbbbbbbbbbbbbb29 2006-06-20 root@localhost +30 aaaaaaaaaaaaaaaa30 bbbbbbbbbbbbbbbbbb30 2006-06-20 root@localhost +31 aaaaaaaaaaaaaaaa31 bbbbbbbbbbbbbbbbbb31 2006-06-20 root@localhost +32 aaaaaaaaaaaaaaaa32 bbbbbbbbbbbbbbbbbb32 2006-06-20 root@localhost +33 aaaaaaaaaaaaaaaa33 bbbbbbbbbbbbbbbbbb33 2006-06-20 root@localhost +34 aaaaaaaaaaaaaaaa34 bbbbbbbbbbbbbbbbbb34 2006-06-20 root@localhost +35 aaaaaaaaaaaaaaaa35 bbbbbbbbbbbbbbbbbb35 2006-06-20 root@localhost +36 aaaaaaaaaaaaaaaa36 bbbbbbbbbbbbbbbbbb36 2006-06-20 root@localhost +37 aaaaaaaaaaaaaaaa37 bbbbbbbbbbbbbbbbbb37 2006-06-20 root@localhost +38 aaaaaaaaaaaaaaaa38 bbbbbbbbbbbbbbbbbb38 2006-06-20 root@localhost +39 aaaaaaaaaaaaaaaa39 bbbbbbbbbbbbbbbbbb39 2006-06-20 root@localhost +40 aaaaaaaaaaaaaaaa40 bbbbbbbbbbbbbbbbbb40 2006-06-20 root@localhost +41 aaaaaaaaaaaaaaaa41 bbbbbbbbbbbbbbbbbb41 2006-06-20 root@localhost +42 aaaaaaaaaaaaaaaa42 bbbbbbbbbbbbbbbbbb42 2006-06-20 root@localhost +43 aaaaaaaaaaaaaaaa43 bbbbbbbbbbbbbbbbbb43 2006-06-20 root@localhost +44 aaaaaaaaaaaaaaaa44 bbbbbbbbbbbbbbbbbb44 2006-06-20 root@localhost +45 aaaaaaaaaaaaaaaa45 bbbbbbbbbbbbbbbbbb45 2006-06-20 root@localhost +46 aaaaaaaaaaaaaaaa46 bbbbbbbbbbbbbbbbbb46 2006-06-20 root@localhost +47 aaaaaaaaaaaaaaaa47 bbbbbbbbbbbbbbbbbb47 2006-06-20 root@localhost +48 aaaaaaaaaaaaaaaa48 bbbbbbbbbbbbbbbbbb48 2006-06-20 root@localhost +49 aaaaaaaaaaaaaaaa49 bbbbbbbbbbbbbbbbbb49 2006-06-20 root@localhost +50 aaaaaaaaaaaaaaaa50 bbbbbbbbbbbbbbbbbb50 2006-06-20 root@localhost +51 aaaaaaaaaaaaaaaa51 bbbbbbbbbbbbbbbbbb51 2006-06-20 root@localhost +52 aaaaaaaaaaaaaaaa52 bbbbbbbbbbbbbbbbbb52 2006-06-20 root@localhost +53 aaaaaaaaaaaaaaaa53 bbbbbbbbbbbbbbbbbb53 2006-06-20 root@localhost +54 aaaaaaaaaaaaaaaa54 bbbbbbbbbbbbbbbbbb54 2006-06-20 root@localhost +55 aaaaaaaaaaaaaaaa55 bbbbbbbbbbbbbbbbbb55 2006-06-20 root@localhost +56 aaaaaaaaaaaaaaaa56 bbbbbbbbbbbbbbbbbb56 2006-06-20 root@localhost +57 aaaaaaaaaaaaaaaa57 bbbbbbbbbbbbbbbbbb57 2006-06-20 root@localhost +58 aaaaaaaaaaaaaaaa58 bbbbbbbbbbbbbbbbbb58 2006-06-20 root@localhost +59 aaaaaaaaaaaaaaaa59 bbbbbbbbbbbbbbbbbb59 2006-06-20 root@localhost +60 aaaaaaaaaaaaaaaa60 bbbbbbbbbbbbbbbbbb60 2006-06-20 root@localhost +61 aaaaaaaaaaaaaaaa61 bbbbbbbbbbbbbbbbbb61 2006-06-20 root@localhost +62 aaaaaaaaaaaaaaaa62 bbbbbbbbbbbbbbbbbb62 2006-06-20 root@localhost +63 aaaaaaaaaaaaaaaa63 bbbbbbbbbbbbbbbbbb63 2006-06-20 root@localhost +64 aaaaaaaaaaaaaaaa64 bbbbbbbbbbbbbbbbbb64 2006-06-20 root@localhost +65 aaaaaaaaaaaaaaaa65 bbbbbbbbbbbbbbbbbb65 2006-06-20 root@localhost +66 aaaaaaaaaaaaaaaa66 bbbbbbbbbbbbbbbbbb66 2006-06-20 root@localhost +67 aaaaaaaaaaaaaaaa67 bbbbbbbbbbbbbbbbbb67 2006-06-20 root@localhost +68 aaaaaaaaaaaaaaaa68 bbbbbbbbbbbbbbbbbb68 2006-06-20 root@localhost +69 aaaaaaaaaaaaaaaa69 bbbbbbbbbbbbbbbbbb69 2006-06-20 root@localhost +70 aaaaaaaaaaaaaaaa70 bbbbbbbbbbbbbbbbbb70 2006-06-20 root@localhost +71 aaaaaaaaaaaaaaaa71 bbbbbbbbbbbbbbbbbb71 2006-06-20 root@localhost +72 aaaaaaaaaaaaaaaa72 bbbbbbbbbbbbbbbbbb72 2006-06-20 root@localhost +73 aaaaaaaaaaaaaaaa73 bbbbbbbbbbbbbbbbbb73 2006-06-20 root@localhost +74 aaaaaaaaaaaaaaaa74 bbbbbbbbbbbbbbbbbb74 2006-06-20 root@localhost +75 aaaaaaaaaaaaaaaa75 bbbbbbbbbbbbbbbbbb75 2006-06-20 root@localhost +76 aaaaaaaaaaaaaaaa76 bbbbbbbbbbbbbbbbbb76 2006-06-20 root@localhost +77 aaaaaaaaaaaaaaaa77 bbbbbbbbbbbbbbbbbb77 2006-06-20 root@localhost +78 aaaaaaaaaaaaaaaa78 bbbbbbbbbbbbbbbbbb78 2006-06-20 root@localhost +79 aaaaaaaaaaaaaaaa79 bbbbbbbbbbbbbbbbbb79 2006-06-20 root@localhost +80 aaaaaaaaaaaaaaaa80 bbbbbbbbbbbbbbbbbb80 2006-06-20 root@localhost +81 aaaaaaaaaaaaaaaa81 bbbbbbbbbbbbbbbbbb81 2006-06-20 root@localhost +82 aaaaaaaaaaaaaaaa82 bbbbbbbbbbbbbbbbbb82 2006-06-20 root@localhost +83 aaaaaaaaaaaaaaaa83 bbbbbbbbbbbbbbbbbb83 2006-06-20 root@localhost +84 aaaaaaaaaaaaaaaa84 bbbbbbbbbbbbbbbbbb84 2006-06-20 root@localhost +85 aaaaaaaaaaaaaaaa85 bbbbbbbbbbbbbbbbbb85 2006-06-20 root@localhost +86 aaaaaaaaaaaaaaaa86 bbbbbbbbbbbbbbbbbb86 2006-06-20 root@localhost +87 aaaaaaaaaaaaaaaa87 bbbbbbbbbbbbbbbbbb87 2006-06-20 root@localhost +88 aaaaaaaaaaaaaaaa88 bbbbbbbbbbbbbbbbbb88 2006-06-20 root@localhost +89 aaaaaaaaaaaaaaaa89 bbbbbbbbbbbbbbbbbb89 2006-06-20 root@localhost +90 aaaaaaaaaaaaaaaa90 bbbbbbbbbbbbbbbbbb90 2006-06-20 root@localhost +91 aaaaaaaaaaaaaaaa91 bbbbbbbbbbbbbbbbbb91 2006-06-20 root@localhost +92 aaaaaaaaaaaaaaaa92 bbbbbbbbbbbbbbbbbb92 2006-06-20 root@localhost +93 aaaaaaaaaaaaaaaa93 bbbbbbbbbbbbbbbbbb93 2006-06-20 root@localhost +94 aaaaaaaaaaaaaaaa94 bbbbbbbbbbbbbbbbbb94 2006-06-20 root@localhost +95 aaaaaaaaaaaaaaaa95 bbbbbbbbbbbbbbbbbb95 2006-06-20 root@localhost +96 aaaaaaaaaaaaaaaa96 bbbbbbbbbbbbbbbbbb96 2006-06-20 root@localhost +97 aaaaaaaaaaaaaaaa97 bbbbbbbbbbbbbbbbbb97 2006-06-20 root@localhost +98 aaaaaaaaaaaaaaaa98 bbbbbbbbbbbbbbbbbb98 2006-06-20 root@localhost +99 aaaaaaaaaaaaaaaa99 bbbbbbbbbbbbbbbbbb99 2006-06-20 root@localhost +100 aaaaaaaaaaaaaaaa100 bbbbbbbbbbbbbbbbbb100 2006-06-20 root@localhost +DROP TABLE test.t1; +ALTER TABLESPACE ts1 +DROP DATAFILE './table_space1/datafile.dat' + ENGINE=NDB; +DROP TABLESPACE ts1 ENGINE=NDB; +DROP LOGFILE GROUP lg +ENGINE=NDB; diff --git a/mysql-test/r/ndb_default_cluster.require b/mysql-test/r/ndb_default_cluster.require index aa4988cdca3..3616ae0f343 100644 --- a/mysql-test/r/ndb_default_cluster.require +++ b/mysql-test/r/ndb_default_cluster.require @@ -1,2 +1,2 @@ Variable_name Value -Ndb_connected_host localhost +Ndb_config_from_host localhost diff --git a/mysql-test/r/ndb_restore.result b/mysql-test/r/ndb_restore.result index 7dc4057e615..b946d97bea1 100644 --- a/mysql-test/r/ndb_restore.result +++ b/mysql-test/r/ndb_restore.result @@ -1,6 +1,6 @@ use test; -drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; -drop table if exists t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c; +drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9,t10; +drop table if exists t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c,t10_c; CREATE TABLE `t1_c` ( `capgoaledatta` smallint(5) unsigned NOT NULL auto_increment, `goaledatta` char(2) NOT NULL default '', @@ -116,6 +116,8 @@ CREATE TABLE `t9_c` ( PRIMARY KEY (`kattjame`,`hunderaaarbagefa`,`hassetistart`,`hassetino`) ) ENGINE=ndbcluster DEFAULT CHARSET=latin1; INSERT INTO `t9_c` VALUES ('3g4jh8gar2t','joe','q3.net','elredun.com','q3.net','436643316120','436643316939','91341234568968','695595699','1.1.1.1','2.2.6.2','3','86989','34','x','x','2012-03-12 18:35:04','2012-12-05 12:35:04',3123123,9569,6565,1),('4tt45345235','pap','q3plus.qt','q3plus.qt','q3.net','436643316120','436643316939','8956234534568968','5254595969','1.1.1.1','8.6.2.2','4','86989','34','x','x','2012-03-12 12:55:34','2012-12-05 11:20:04',3223433,3369,9565,2),('4545435545','john','q3.net','q3.net','acne.li','436643316120','436643316939','45345234568968','995696699','1.1.1.1','2.9.9.2','2','86998','34','x','x','2012-03-12 11:35:03','2012-12-05 08:50:04',8823123,169,3565,3); +CREATE TABLE t10_c (a INT AUTO_INCREMENT KEY) ENGINE=ndbcluster DEFAULT CHARSET=latin1; +INSERT INTO t10_c VALUES (1),(2),(3); create table t1 engine=myisam as select * from t1_c; create table t2 engine=myisam as select * from t2_c; create table t3 engine=myisam as select * from t3_c; @@ -125,6 +127,7 @@ create table t6 engine=myisam as select * from t6_c; create table t7 engine=myisam as select * from t7_c; create table t8 engine=myisam as select * from t8_c; create table t9 engine=myisam as select * from t9_c; +create table t10 engine=myisam as select * from t10_c; CREATE TEMPORARY TABLE IF NOT EXISTS test.backup_info (id INT, backup_id INT) ENGINE = HEAP; DELETE FROM test.backup_info; LOAD DATA INFILE '../tmp.dat' INTO TABLE test.backup_info FIELDS TERMINATED BY ','; @@ -132,7 +135,7 @@ SELECT @the_backup_id:=backup_id FROM test.backup_info; @the_backup_id:=backup_id <the_backup_id> DROP TABLE test.backup_info; -drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c; +drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c,t10_c; select count(*) from t1; count(*) 5 @@ -232,20 +235,11 @@ from (select * from t9 union select * from t9_c) a; count(*) 3 -ALTER TABLE t1_c -PARTITION BY RANGE (`capgoaledatta`) -(PARTITION p0 VALUES LESS THAN MAXVALUE); -ALTER TABLE t2_c -PARTITION BY LIST(`capgotod`) -(PARTITION p0 VALUES IN (0,1,2,3,4,5,6)); -ALTER TABLE t3_c -PARTITION BY HASH (`CapGoaledatta`); -ALTER TABLE t5_c -PARTITION BY HASH (`capfa`) -PARTITIONS 4; -ALTER TABLE t6_c -PARTITION BY LINEAR HASH (`relatta`) -PARTITIONS 4; +select * from t10_c order by a; +a +1 +2 +3 ALTER TABLE t7_c PARTITION BY LINEAR KEY (`dardtestard`); CREATE TEMPORARY TABLE IF NOT EXISTS test.backup_info (id INT, backup_id INT) ENGINE = HEAP; @@ -255,7 +249,7 @@ SELECT @the_backup_id:=backup_id FROM test.backup_info; @the_backup_id:=backup_id <the_backup_id> DROP TABLE test.backup_info; -drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c; +drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c,t10_c; select count(*) from t1; count(*) 5 @@ -355,7 +349,7 @@ from (select * from t9 union select * from t9_c) a; count(*) 3 -drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c; +drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c,t10_c; select count(*) from t1; count(*) 5 @@ -455,7 +449,7 @@ from (select * from t9 union select * from t9_c) a; count(*) 3 -drop table t1_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c; +drop table t1_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c,t10_c; CREATE TEMPORARY TABLE IF NOT EXISTS test.backup_info (id INT, backup_id INT) ENGINE = HEAP; DELETE FROM test.backup_info; LOAD DATA INFILE '../tmp.dat' INTO TABLE test.backup_info FIELDS TERMINATED BY ','; @@ -463,7 +457,6 @@ SELECT @the_backup_id:=backup_id FROM test.backup_info; @the_backup_id:=backup_id <the_backup_id> DROP TABLE test.backup_info; -Create table test/def/t2_c failed: Translate frm error -drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; +drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9,t10; drop table if exists t2_c; 520093696,<the_backup_id> diff --git a/mysql-test/r/ps.result b/mysql-test/r/ps.result index 3e3981c0050..c046fa2a0c6 100644 --- a/mysql-test/r/ps.result +++ b/mysql-test/r/ps.result @@ -1158,3 +1158,108 @@ Warnings: Error 1146 Table 'test.t4' doesn't exist deallocate prepare stmt; drop table t1, t2, t3; +create database mysqltest_long_database_name_to_thrash_heap; +use test; +create table t1 (i int); +prepare stmt from "alter table test.t1 rename t1"; +use mysqltest_long_database_name_to_thrash_heap; +execute stmt; +show tables like 't1'; +Tables_in_mysqltest_long_database_name_to_thrash_heap (t1) +prepare stmt from "alter table test.t1 rename t1"; +use test; +execute stmt; +show tables like 't1'; +Tables_in_test (t1) +use mysqltest_long_database_name_to_thrash_heap; +show tables like 't1'; +Tables_in_mysqltest_long_database_name_to_thrash_heap (t1) +t1 +deallocate prepare stmt; +use mysqltest_long_database_name_to_thrash_heap; +prepare stmt_create from "create table t1 (i int)"; +prepare stmt_insert from "insert into t1 (i) values (1)"; +prepare stmt_update from "update t1 set i=2"; +prepare stmt_delete from "delete from t1 where i=2"; +prepare stmt_select from "select * from t1"; +prepare stmt_alter from "alter table t1 add column (b int)"; +prepare stmt_alter1 from "alter table t1 drop column b"; +prepare stmt_analyze from "analyze table t1"; +prepare stmt_optimize from "optimize table t1"; +prepare stmt_show from "show tables like 't1'"; +prepare stmt_truncate from "truncate table t1"; +prepare stmt_drop from "drop table t1"; +drop table t1; +use test; +execute stmt_create; +show tables like 't1'; +Tables_in_test (t1) +use mysqltest_long_database_name_to_thrash_heap; +show tables like 't1'; +Tables_in_mysqltest_long_database_name_to_thrash_heap (t1) +t1 +use test; +execute stmt_insert; +select * from mysqltest_long_database_name_to_thrash_heap.t1; +i +1 +execute stmt_update; +select * from mysqltest_long_database_name_to_thrash_heap.t1; +i +2 +execute stmt_delete; +execute stmt_select; +i +execute stmt_alter; +show columns from mysqltest_long_database_name_to_thrash_heap.t1; +Field Type Null Key Default Extra +i int(11) YES NULL +b int(11) YES NULL +execute stmt_alter1; +show columns from mysqltest_long_database_name_to_thrash_heap.t1; +Field Type Null Key Default Extra +i int(11) YES NULL +execute stmt_analyze; +Table Op Msg_type Msg_text +mysqltest_long_database_name_to_thrash_heap.t1 analyze status Table is already up to date +execute stmt_optimize; +Table Op Msg_type Msg_text +mysqltest_long_database_name_to_thrash_heap.t1 optimize status Table is already up to date +execute stmt_show; +Tables_in_mysqltest_long_database_name_to_thrash_heap (t1) +t1 +execute stmt_truncate; +execute stmt_drop; +show tables like 't1'; +Tables_in_test (t1) +use mysqltest_long_database_name_to_thrash_heap; +show tables like 't1'; +Tables_in_mysqltest_long_database_name_to_thrash_heap (t1) +drop database mysqltest_long_database_name_to_thrash_heap; +prepare stmt_create from "create table t1 (i int)"; +ERROR 3D000: No database selected +prepare stmt_insert from "insert into t1 (i) values (1)"; +ERROR 3D000: No database selected +prepare stmt_update from "update t1 set i=2"; +ERROR 3D000: No database selected +prepare stmt_delete from "delete from t1 where i=2"; +ERROR 3D000: No database selected +prepare stmt_select from "select * from t1"; +ERROR 3D000: No database selected +prepare stmt_alter from "alter table t1 add column (b int)"; +ERROR 3D000: No database selected +prepare stmt_alter1 from "alter table t1 drop column b"; +ERROR 3D000: No database selected +prepare stmt_analyze from "analyze table t1"; +ERROR 3D000: No database selected +prepare stmt_optimize from "optimize table t1"; +ERROR 3D000: No database selected +prepare stmt_show from "show tables like 't1'"; +ERROR 3D000: No database selected +prepare stmt_truncate from "truncate table t1"; +ERROR 3D000: No database selected +prepare stmt_drop from "drop table t1"; +ERROR 3D000: No database selected +create temporary table t1 (i int); +ERROR 3D000: No database selected +use test; diff --git a/mysql-test/r/sp.result b/mysql-test/r/sp.result index 45f96b08425..db72d190441 100644 --- a/mysql-test/r/sp.result +++ b/mysql-test/r/sp.result @@ -4984,6 +4984,52 @@ CALL bug18037_p2()| DROP FUNCTION bug18037_f1| DROP PROCEDURE bug18037_p1| DROP PROCEDURE bug18037_p2| +use test| +create table t3 (i int)| +insert into t3 values (1), (2)| +create database mysqltest1| +use mysqltest1| +create function bug17199() returns varchar(2) deterministic return 'ok'| +use test| +select *, mysqltest1.bug17199() from t3| +i mysqltest1.bug17199() +1 ok +2 ok +use mysqltest1| +create function bug18444(i int) returns int no sql deterministic return i + 1| +use test| +select mysqltest1.bug18444(i) from t3| +mysqltest1.bug18444(i) +2 +3 +drop database mysqltest1| +create database mysqltest1 charset=utf8| +create database mysqltest2 charset=utf8| +create procedure mysqltest1.p1() +begin +-- alters the default collation of database test +alter database character set koi8r; +end| +use mysqltest1| +call p1()| +show create database mysqltest1| +Database Create Database +mysqltest1 CREATE DATABASE `mysqltest1` /*!40100 DEFAULT CHARACTER SET koi8r */ +show create database mysqltest2| +Database Create Database +mysqltest2 CREATE DATABASE `mysqltest2` /*!40100 DEFAULT CHARACTER SET utf8 */ +alter database mysqltest1 character set utf8| +use mysqltest2| +call mysqltest1.p1()| +show create database mysqltest1| +Database Create Database +mysqltest1 CREATE DATABASE `mysqltest1` /*!40100 DEFAULT CHARACTER SET koi8r */ +show create database mysqltest2| +Database Create Database +mysqltest2 CREATE DATABASE `mysqltest2` /*!40100 DEFAULT CHARACTER SET utf8 */ +drop database mysqltest1| +drop database mysqltest2| +use test| drop table if exists t3| drop procedure if exists bug15217| create table t3 as select 1| diff --git a/mysql-test/r/symlink.result b/mysql-test/r/symlink.result index 3b24210dd5d..efd6f8710aa 100644 --- a/mysql-test/r/symlink.result +++ b/mysql-test/r/symlink.result @@ -74,18 +74,24 @@ t9 CREATE TABLE `t9` ( ) ENGINE=MyISAM AUTO_INCREMENT=16725 DEFAULT CHARSET=latin1 DATA DIRECTORY='MYSQLTEST_VARDIR/tmp/' INDEX DIRECTORY='MYSQLTEST_VARDIR/run/' drop database mysqltest; create table t1 (a int not null) engine=myisam; +Warnings: +Warning 0 DATA DIRECTORY option ignored show create table t1; Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) NOT NULL ) ENGINE=MyISAM DEFAULT CHARSET=latin1 alter table t1 add b int; +Warnings: +Warning 0 DATA DIRECTORY option ignored show create table t1; Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) NOT NULL, `b` int(11) DEFAULT NULL ) ENGINE=MyISAM DEFAULT CHARSET=latin1 +Warnings: +Warning 0 INDEX DIRECTORY option ignored show create table t1; Table Create Table t1 CREATE TABLE `t1` ( diff --git a/mysql-test/r/wait_timeout.result b/mysql-test/r/wait_timeout.result index 683986abf5d..b865a17454d 100644 --- a/mysql-test/r/wait_timeout.result +++ b/mysql-test/r/wait_timeout.result @@ -1,3 +1,7 @@ +select 0; +0 +0 +flush status; select 1; 1 1 diff --git a/mysql-test/t/create.test b/mysql-test/t/create.test index b4ba3878580..140cdccc218 100644 --- a/mysql-test/t/create.test +++ b/mysql-test/t/create.test @@ -521,7 +521,7 @@ DROP TABLE t12913; create database mysqltest; use mysqltest; drop database mysqltest; ---error 1102 +--error ER_NO_DB_ERROR create table test.t1 like x; --disable_warnings drop table if exists test.t1; diff --git a/mysql-test/t/disabled.def b/mysql-test/t/disabled.def index 6f26847f8d7..ebe61e1af4a 100644 --- a/mysql-test/t/disabled.def +++ b/mysql-test/t/disabled.def @@ -18,9 +18,6 @@ #im_life_cycle : Bug#20368 2006-06-10 alik im_life_cycle test fails ndb_autodiscover : BUG#18952 2006-02-16 jmiller Needs to be fixed w.r.t binlog ndb_autodiscover2 : BUG#18952 2006-02-16 jmiller Needs to be fixed w.r.t binlog -#ndb_binlog_discover : BUG#19395 2006-04-28 tomas/knielsen mysqld does not always detect cluster shutdown -#ndb_cache2 : BUG#18597 2006-03-28 brian simultaneous drop table and ndb statistics update triggers node failure -#ndb_cache_multi2 : BUG#18597 2006-04-10 kent simultaneous drop table and ndb statistics update triggers node failure ndb_load : BUG#17233 2006-05-04 tomas failed load data from infile causes mysqld dbug_assert, binlog not flushed partition_03ndb : BUG#16385 2006-03-24 mikael Partitions: crash when updating a range partitioned NDB table ps_7ndb : BUG#18950 2006-02-16 jmiller create table like does not obtain LOCK_open diff --git a/mysql-test/t/events_logs_tests.test b/mysql-test/t/events_logs_tests.test index 5f4ec852cd3..5c252b1174b 100644 --- a/mysql-test/t/events_logs_tests.test +++ b/mysql-test/t/events_logs_tests.test @@ -69,8 +69,8 @@ SELECT user_host, query_time, db, sql_text FROM mysql.slow_log; SET SESSION long_query_time=300; --echo "Make it quite long" TRUNCATE mysql.slow_log; -SET SESSION long_query_time=1; CREATE TABLE slow_event_test (slo_val tinyint, val tinyint); +SET SESSION long_query_time=1; --echo "This won't go to the slow log" CREATE EVENT long_event ON SCHEDULE EVERY 1 MINUTE DO INSERT INTO slow_event_test SELECT @@long_query_time, SLEEP(3); SELECT * FROM slow_event_test; diff --git a/mysql-test/t/func_sapdb.test b/mysql-test/t/func_sapdb.test index 6189712b5fe..97101fba615 100644 --- a/mysql-test/t/func_sapdb.test +++ b/mysql-test/t/func_sapdb.test @@ -43,6 +43,8 @@ select weekofyear("1997-11-30 23:59:59.000001"); select makedate(1997,1); select makedate(1997,0); +select makedate(9999,365); +select makedate(9999,366); #Time functions diff --git a/mysql-test/t/func_time.test b/mysql-test/t/func_time.test index 7428ac45422..cce8f8e5b7a 100644 --- a/mysql-test/t/func_time.test +++ b/mysql-test/t/func_time.test @@ -143,6 +143,10 @@ select extract(SECOND FROM "1999-01-02 10:11:12"); select extract(MONTH FROM "2001-02-00"); # +# MySQL Bugs: #12356: DATE_SUB or DATE_ADD incorrectly returns null +# +SELECT DATE_SUB(str_to_date('9999-12-31 00:01:00','%Y-%m-%d %H:%i:%s'), INTERVAL 1 MINUTE); +SELECT DATE_ADD(str_to_date('9999-12-30 23:59:00','%Y-%m-%d %H:%i:%s'), INTERVAL 1 MINUTE); # test EXTRACT QUARTER (Bug #18100) # diff --git a/mysql-test/t/gis-rtree.test b/mysql-test/t/gis-rtree.test index 02e45861706..163f2806ad2 100644 --- a/mysql-test/t/gis-rtree.test +++ b/mysql-test/t/gis-rtree.test @@ -187,4 +187,48 @@ check table t1 extended; drop table t1; +# +# Bug#17877 - Corrupted spatial index +# +CREATE TABLE t1 ( + c1 geometry NOT NULL default '', + SPATIAL KEY i1 (c1(32)) +) ENGINE=MyISAM DEFAULT CHARSET=latin1; +INSERT INTO t1 (c1) VALUES ( + PolygonFromText('POLYGON((-18.6086111000 -66.9327777000, + -18.6055555000 -66.8158332999, + -18.7186111000 -66.8102777000, + -18.7211111000 -66.9269443999, + -18.6086111000 -66.9327777000))')); +# This showed a missing key. +CHECK TABLE t1 EXTENDED; +DROP TABLE t1; +# +CREATE TABLE t1 ( + c1 geometry NOT NULL default '', + SPATIAL KEY i1 (c1(32)) +) ENGINE=MyISAM DEFAULT CHARSET=latin1; +INSERT INTO t1 (c1) VALUES ( + PolygonFromText('POLYGON((-18.6086111000 -66.9327777000, + -18.6055555000 -66.8158332999, + -18.7186111000 -66.8102777000, + -18.7211111000 -66.9269443999, + -18.6086111000 -66.9327777000))')); +INSERT INTO t1 (c1) VALUES ( + PolygonFromText('POLYGON((-65.7402776999 -96.6686111000, + -65.7372222000 -96.5516666000, + -65.8502777000 -96.5461111000, + -65.8527777000 -96.6627777000, + -65.7402776999 -96.6686111000))')); +# This is the same as the first insert to get a non-unique key. +INSERT INTO t1 (c1) VALUES ( + PolygonFromText('POLYGON((-18.6086111000 -66.9327777000, + -18.6055555000 -66.8158332999, + -18.7186111000 -66.8102777000, + -18.7211111000 -66.9269443999, + -18.6086111000 -66.9327777000))')); +# This showed (and still shows) OK. +CHECK TABLE t1 EXTENDED; +DROP TABLE t1; + # End of 4.1 tests diff --git a/mysql-test/t/gis.test b/mysql-test/t/gis.test index bb3f621d194..4c6ff9b2fe7 100644 --- a/mysql-test/t/gis.test +++ b/mysql-test/t/gis.test @@ -409,3 +409,10 @@ create table t1(pt GEOMETRY); alter table t1 add primary key pti(pt); alter table t1 add primary key pti(pt(20)); drop table t1; + +--enable_metadata +create table t1 (g GEOMETRY); +select * from t1; +select asbinary(g) from t1; +--disable_metadata +drop table t1; diff --git a/mysql-test/t/lock_multi.test b/mysql-test/t/lock_multi.test index 5bebec49b88..9c9e68f931f 100644 --- a/mysql-test/t/lock_multi.test +++ b/mysql-test/t/lock_multi.test @@ -156,6 +156,55 @@ connection locker; use test; # connection default; +# +# Test if CREATE TABLE with LOCK TABLE deadlocks. +# +connection writer; +CREATE TABLE t1 (c1 int); +LOCK TABLE t1 WRITE; +# +# This waits until t1 is unlocked. +connection locker; +send FLUSH TABLES WITH READ LOCK; +--sleep 1 +# +# This must not block. +connection writer; +CREATE TABLE t2 (c1 int); +UNLOCK TABLES; +# +# This awakes now. +connection locker; +reap; +UNLOCK TABLES; +# +connection default; +DROP TABLE t1, t2; +# +# Test if CREATE TABLE SELECT with LOCK TABLE deadlocks. +# +connection writer; +CREATE TABLE t1 (c1 int); +LOCK TABLE t1 WRITE; +# +# This waits until t1 is unlocked. +connection locker; +send FLUSH TABLES WITH READ LOCK; +--sleep 1 +# +# This must not block. +connection writer; +--error 1100 +CREATE TABLE t2 AS SELECT * FROM t1; +UNLOCK TABLES; +# +# This awakes now. +connection locker; +reap; +UNLOCK TABLES; +# +connection default; +DROP TABLE t1; # # Bug#19815 - CREATE/RENAME/DROP DATABASE can deadlock on a global read lock @@ -218,32 +267,4 @@ connection locker; drop table t1; # End of 5.0 tests -# Bug#16986 - Deadlock condition with MyISAM tables -# -connection locker; -use mysql; -LOCK TABLES columns_priv WRITE, db WRITE, host WRITE, user WRITE; -FLUSH TABLES; ---sleep 1 -# -connection reader; -use mysql; -#NOTE: This must be a multi-table select, otherwise the deadlock will not occur -send SELECT user.Select_priv FROM user, db WHERE user.user = db.user LIMIT 1; ---sleep 1 -# -connection locker; -# Make test case independent from earlier grants. ---replace_result "Table is already up to date" "OK" -OPTIMIZE TABLES columns_priv, db, host, user; -UNLOCK TABLES; -# -connection reader; -reap; -use test; -# -connection locker; -use test; -# -connection default; diff --git a/mysql-test/t/log_state.test b/mysql-test/t/log_state.test index 41fbd068dce..6fc0f3421a7 100644 --- a/mysql-test/t/log_state.test +++ b/mysql-test/t/log_state.test @@ -80,7 +80,8 @@ set global general_log_file=''; --replace_column 2 # show variables like 'general_log_file'; set global general_log= OFF; -set global general_log_file='/tmp/log.master'; +--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR +eval set global general_log_file='$MYSQLTEST_VARDIR/tmp/log.master'; set global general_log= ON; create table t1(f1 int); drop table t1; diff --git a/mysql-test/t/ndb_autodiscover3.test b/mysql-test/t/ndb_autodiscover3.test index 80d7d41ef87..95f13cff69d 100644 --- a/mysql-test/t/ndb_autodiscover3.test +++ b/mysql-test/t/ndb_autodiscover3.test @@ -2,7 +2,6 @@ -- source include/have_multi_ndb.inc -- source include/not_embedded.inc - --disable_warnings drop table if exists t1, t2; --enable_warnings diff --git a/mysql-test/t/ndb_blob_partition.test b/mysql-test/t/ndb_blob_partition.test index a3948cc9491..6173c9d9851 100644 --- a/mysql-test/t/ndb_blob_partition.test +++ b/mysql-test/t/ndb_blob_partition.test @@ -1,6 +1,10 @@ --source include/have_ndb.inc -- source include/not_embedded.inc +--disable_query_log +set new=on; +--enable_query_log + --disable_warnings drop table if exists t1; --enable_warnings diff --git a/mysql-test/t/ndb_dd_advance.test b/mysql-test/t/ndb_dd_advance.test new file mode 100755 index 00000000000..e882ec794c1 --- /dev/null +++ b/mysql-test/t/ndb_dd_advance.test @@ -0,0 +1,630 @@ +############################################################## +# Author: JBM +# Date: 2006-01-12 +# Purpose: To test using ndb memory and disk tables together. +############################################################## + +############################################################## +# Author: Nikolay +# Date: 2006-05-12 +# Purpose: To test using ndb memory and disk tables together. +# +# Select from disk into memory table +# Select from disk into memory table +# Create test that loads data, use mysql dump to dump data, drop table, +# create table and load from mysql dump. +# Use group by asc and dec; Use having; Use order by +# ALTER Tests (Meta data testing): +# ALTER from InnoDB to Cluster Disk Data +# ALTER from MyISAM to Cluster Disk Data +# ALTER from Cluster Disk Data to InnoDB +# ALTER from Cluster Disk Data to MyISAM +# ALTER DD Tables and add columns +# ALTER DD Tables and add Indexes +# ALTER DD Tables and drop columns +# +############################################################## + +-- source include/have_ndb.inc +-- source include/not_embedded.inc + +--disable_warnings +DROP TABLE IF EXISTS test.t1; +DROP TABLE IF EXISTS test.t2; +--enable_warnings + +############ Test Setup Section ############# +-- echo **** Test Setup Section **** + +CREATE LOGFILE GROUP log_group1 +ADD UNDOFILE './log_group1/undofile.dat' +INITIAL_SIZE 16M +UNDO_BUFFER_SIZE = 1M +ENGINE=NDB; + +CREATE TABLESPACE table_space1 +ADD DATAFILE './table_space1/datafile.dat' +USE LOGFILE GROUP log_group1 +INITIAL_SIZE 12M +ENGINE NDB; + + +CREATE TABLE test.t1 +(pk1 INT NOT NULL PRIMARY KEY, b INT NOT NULL, c INT NOT NULL) +TABLESPACE table_space1 STORAGE DISK +ENGINE=NDB; + +CREATE TABLE test.t2 +(pk2 INT NOT NULL PRIMARY KEY, b2 INT NOT NULL, c2 INT NOT NULL) +ENGINE=NDB; + +--echo +##################### Data load for first test #################### +--echo **** Data load for first test **** + +INSERT INTO test.t1 VALUES +(1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5), +(6,6,6),(7,7,7),(8,8,8),(9,9,9),(10,10,10), +(11,11,11),(12,12,12),(13,13,13),(14,14,14),(15,15,15), +(16,16,16),(17,17,17),(18,18,18),(19,19,19),(20,20,20), +(21,21,21),(22,22,22),(23,23,23),(24,24,24),(25,25,25), +(26,26,26),(27,27,27),(28,28,28),(29,29,29),(30,30,30), +(31,31,31),(32,32,32),(33,33,33),(34,34,34),(35,35,35), +(36,36,36),(37,37,37),(38,38,38),(39,39,39),(40,40,40), +(41,41,41),(42,42,42),(43,43,43),(44,44,44),(45,45,45), +(46,46,46),(47,47,47),(48,48,48),(49,49,49),(50,50,50), +(51,51,51),(52,52,52),(53,53,53),(54,54,54),(55,55,55), +(56,56,56),(57,57,57),(58,58,58),(59,59,59),(60,60,60), +(61,61,61),(62,62,62),(63,63,63),(64,64,64),(65,65,65), +(66,66,66),(67,67,67),(68,68,68),(69,69,69),(70,70,70), +(71,71,71),(72,72,72),(73,73,73),(74,74,74),(75,75,75); + + +INSERT INTO test.t2 VALUES +(1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5), +(6,6,6),(7,7,7),(8,8,8),(9,9,9),(10,10,10), +(11,11,11),(12,12,12),(13,13,13),(14,14,14),(15,15,15), +(16,16,16),(17,17,17),(18,18,18),(19,19,19),(20,20,20), +(21,21,21),(22,22,22),(23,23,23),(24,24,24),(25,25,25), +(26,26,26),(27,27,27),(28,28,28),(29,29,29),(30,30,30), +(31,31,31),(32,32,32),(33,33,33),(34,34,34),(35,35,35), +(36,36,36),(37,37,37),(38,38,38),(39,39,39),(40,40,40), +(41,41,41),(42,42,42),(43,43,43),(44,44,44),(45,45,45), +(46,46,46),(47,47,47),(48,48,48),(49,49,49),(50,50,50), +(51,51,51),(52,52,52),(53,53,53),(54,54,54),(55,55,55), +(56,56,56),(57,57,57),(58,58,58),(59,59,59),(60,60,60), +(61,61,61),(62,62,62),(63,63,63),(64,64,64),(65,65,65), +(66,66,66),(67,67,67),(68,68,68),(69,69,69),(70,70,70), +(71,71,71),(72,72,72),(73,73,73),(74,74,74),(75,75,75); + +--echo +##################### Test 1 Section Begins ############### +--echo *** Test 1 Section Begins *** +SELECT COUNT(*) FROM test.t2 LEFT JOIN test.t1 ON pk2=pk1 WHERE b2 IN (4); +SELECT * FROM test.t2 LEFT JOIN test.t1 ON pk2=pk1 WHERE b2 IN (4); +SELECT COUNT(*) FROM test.t1 LEFT JOIN test.t2 ON pk1=pk2 WHERE b IN (4); +SELECT COUNT(*) FROM test.t1 LEFT JOIN test.t2 ON b=b2 WHERE pk1 IN (75); +SELECT b, c FROM test.t1 LEFT JOIN test.t2 ON pk1=pk2 ORDER BY b; +--echo +####################### Test 1 Section End ################ + +##################### Setup for test 2 #################### +--echo *** Setup for test 2 **** +DELETE FROM test.t1; +INSERT INTO test.t1 VALUES +(1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5), +(6,6,6),(7,7,7),(8,8,8),(9,9,9),(10,10,10), +(11,11,11),(12,12,12),(13,13,13),(14,14,14),(15,15,15), +(16,16,16),(17,17,17),(18,18,18),(19,19,19),(20,20,20), +(21,21,21),(22,22,22),(23,23,23),(24,24,24),(25,25,25), +(26,26,26),(27,27,27),(28,28,28),(29,29,29),(30,30,30), +(31,31,31),(32,32,32),(33,33,33),(34,34,34),(35,35,35), +(36,36,36),(37,37,37),(38,38,38),(39,39,39),(40,40,40), +(41,41,41),(42,42,42),(43,43,43),(44,44,44),(45,45,45); +--echo +############################# Test Section 2 ############### +--echo **** Test Section 2 **** +SELECT b, c FROM test.t1 LEFT JOIN test.t2 ON pk1=pk2 ORDER BY b; +SELECT COUNT(*) FROM test.t1 LEFT JOIN test.t2 ON b=b2; +SELECT COUNT(*) FROM test.t1 RIGHT JOIN test.t2 ON b=b2; +SHOW CREATE TABLE test.t2; +SHOW CREATE TABLE test.t1; +ALTER TABLE test.t2 TABLESPACE table_space1 STORAGE DISK +ENGINE=NDB; +SHOW CREATE TABLE test.t2; +ALTER TABLE test.t1 ENGINE=NDBCLUSTER; +SHOW CREATE TABLE test.t1; +--echo +######################### End Test Section 2 ################# +DROP TABLE test.t1; +DROP TABLE test.t2; +##################### Setup for Test Section 3 ############### +--echo *** Setup for Test Section 3 *** +CREATE TABLE test.t1 ( + usr_id INT unsigned NOT NULL, + uniq_id INT unsigned NOT NULL AUTO_INCREMENT, + start_num INT unsigned NOT NULL DEFAULT 1, + increment INT unsigned NOT NULL DEFAULT 1, + PRIMARY KEY (uniq_id), + INDEX usr_uniq_idx (usr_id, uniq_id), + INDEX uniq_usr_idx (uniq_id, usr_id)) +TABLESPACE table_space1 STORAGE DISK +ENGINE=NDB; + + +CREATE TABLE test.t2 ( + id INT unsigned NOT NULL DEFAULT 0, + usr2_id INT unsigned NOT NULL DEFAULT 0, + max INT unsigned NOT NULL DEFAULT 0, + c_amount INT unsigned NOT NULL DEFAULT 0, + d_max INT unsigned NOT NULL DEFAULT 0, + d_num INT unsigned NOT NULL DEFAULT 0, + orig_time INT unsigned NOT NULL DEFAULT 0, + c_time INT unsigned NOT NULL DEFAULT 0, + active ENUM ("no","yes") NOT NULL, + PRIMARY KEY (id,usr2_id), + INDEX id_idx (id), + INDEX usr2_idx (usr2_id)) +ENGINE=NDB; + +INSERT INTO test.t1 VALUES (3,NULL,0,50),(3,NULL,0,200),(3,NULL,0,25),(3,NULL,0,84676),(3,NULL,0,235),(3,NULL,0,10),(3,NULL,0,3098),(3,NULL,0,2947),(3,NULL,0,8987),(3,NULL,0,8347654),(3,NULL,0,20398),(3,NULL,0,8976),(3,NULL,0,500),(3,NULL,0,198); + +--echo +###################### Test Section 3 ###################### +--echo **** Test Section 3 **** +SELECT test.t1.usr_id,test.t1.uniq_id,test.t1.increment, +test.t2.usr2_id,test.t2.c_amount,test.t2.max +FROM test.t1 +LEFT JOIN test.t2 ON test.t2.id = test.t1.uniq_id +WHERE test.t1.uniq_id = 4 +ORDER BY test.t2.c_amount; + +INSERT INTO test.t2 VALUES (2,3,3000,6000,0,0,746584,837484,'yes'); +INSERT INTO test.t2 VALUES (4,3,3000,6000,0,0,746584,837484,'yes'); +INSERT INTO test.t2 VALUES (7,3,1000,2000,0,0,746294,937484,'yes'); + +SELECT test.t1.usr_id,test.t1.uniq_id,test.t1.increment, +test.t2.usr2_id,test.t2.c_amount,test.t2.max +FROM test.t1 +LEFT JOIN test.t2 ON test.t2.id = test.t1.uniq_id +WHERE test.t1.uniq_id = 4 +ORDER BY test.t2.c_amount; +--echo +####################### End Section 3 ######################### +DROP TABLE test.t1; +DROP TABLE test.t2; +ALTER TABLESPACE table_space1 +DROP DATAFILE './table_space1/datafile.dat' +ENGINE = NDB; + +DROP TABLESPACE table_space1 +ENGINE = NDB; + +DROP LOGFILE GROUP log_group1 +ENGINE =NDB; + +####################### Section 4 ######################### + + CREATE LOGFILE GROUP lg + ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M + UNDO_BUFFER_SIZE = 1M + ENGINE=NDB; + + CREATE TABLESPACE ts1 + ADD DATAFILE './table_space1/datafile.dat' + USE LOGFILE GROUP lg + INITIAL_SIZE 12M + ENGINE NDB; + + CREATE TABLESPACE ts2 + ADD DATAFILE './table_space2/datafile.dat' + USE LOGFILE GROUP lg + INITIAL_SIZE 12M + ENGINE NDB; + + +### Select from disk into memory table ### + + CREATE TABLE t1 (a int NOT NULL PRIMARY KEY, b int) + TABLESPACE ts1 STORAGE DISK ENGINE=NDB; + CREATE TABLE t2 (a int NOT NULL PRIMARY KEY, b int) + ENGINE=NDB; + + SHOW CREATE TABLE t1; + SHOW CREATE TABLE t2; + + INSERT INTO t1 VALUES (1,1); + INSERT INTO t1 VALUES (2,2); + SELECT * FROM t1 order by a; + INSERT INTO t2(a,b) SELECT * FROM t1; + SELECT * FROM t2 order by a; + +### Select from disk into memory table ### + + TRUNCATE t1; + TRUNCATE t2; + INSERT INTO t2 VALUES (3,3); + INSERT INTO t2 VALUES (4,4); + INSERT INTO t1(a,b) SELECT * FROM t2; + SELECT * FROM t1 order by a; + + DROP TABLE t1, t2; + + ALTER TABLESPACE ts1 + DROP DATAFILE './table_space1/datafile.dat' + ENGINE NDB; + + DROP TABLESPACE ts1 ENGINE NDB; + + ALTER TABLESPACE ts2 + DROP DATAFILE './table_space2/datafile.dat' + ENGINE NDB; + + DROP TABLESPACE ts2 ENGINE NDB; + + DROP LOGFILE GROUP lg + ENGINE=NDB; + +#### Create test that loads data, use mysql dump to dump data, drop table, +#### create table and load from mysql dump. + +# DROP DATABASE IF EXISTS test; + + CREATE LOGFILE GROUP lg + ADD UNDOFILE './undofile.dat' + INITIAL_SIZE 16M + UNDO_BUFFER_SIZE = 1M + ENGINE=NDB; + + CREATE TABLESPACE ts + ADD DATAFILE './datafile.dat' + USE LOGFILE GROUP lg + INITIAL_SIZE 12M + ENGINE NDB; + +# CREATE DATABASE test; + +CREATE TABLE test.t ( + a smallint NOT NULL, + b int NOT NULL, + c bigint NOT NULL, + d char(10), + e TEXT, + f VARCHAR(255), + PRIMARY KEY(a) +) TABLESPACE ts STORAGE DISK ENGINE=NDB; + + ALTER TABLE test.t ADD INDEX (d), ADD INDEX (f); + SHOW CREATE TABLE test.t; + +# insert records into tables + + let $1=100; + disable_query_log; + while ($1) + { + eval insert into test.t values($1, $1+1, $1+2, "aaa$1", "bbb$1", "ccccc$1"); + dec $1; + } + enable_query_log; + + SELECT * FROM test.t order by a; +--exec $MYSQL_DUMP --skip-comments --databases test > $MYSQLTEST_VARDIR/tmp/t_dump.sql +DROP TABLE test.t; +--exec $MYSQL test < $MYSQLTEST_VARDIR/tmp/t_dump.sql +USE test; +show tables; + +SELECT * FROM test.t order by a; + + DROP TABLE test.t; +# DROP DATABASE test; + + ALTER TABLESPACE ts + DROP DATAFILE './datafile.dat' + ENGINE NDB; + + DROP TABLESPACE ts ENGINE NDB; + + DROP LOGFILE GROUP lg + ENGINE=NDB; + +#### BUG 18856 test case comented out +##### Use "SELECT * INTO OUTFILE" to dump data and "LOAD DATA INFILE" to load ##### data back to the data file. + +# CREATE LOGFILE GROUP lg +# ADD UNDOFILE './undofile.dat' +# INITIAL_SIZE 16M +# UNDO_BUFFER_SIZE = 1M +# ENGINE=NDB; + +# CREATE TABLESPACE ts +# ADD DATAFILE './datafile.dat' +# USE LOGFILE GROUP lg +# INITIAL_SIZE 12M +# ENGINE NDB; + +#CREATE DATABASE test; + +#CREATE TABLE test.t ( +# a smallint NOT NULL, +# b int NOT NULL, +# c bigint NOT NULL, +# d char(10), +# e TEXT, +# f VARCHAR(255), +# PRIMARY KEY(a) +#) TABLESPACE ts STORAGE DISK ENGINE=NDB; + +# ALTER TABLE test.t ADD INDEX (d), ADD INDEX (f); +# SHOW CREATE TABLE test.t; + +# insert records into tables + +# let $1=100; +# disable_query_log; +# while ($1) +# { +# eval insert into test.t values($1, $1+1, $1+2, "aaa$1", "bbb$1", "ccccc$1"); +# dec $1; +# } +# enable_query_log; + +# SELECT * FROM test.t order by a; + +# SELECT * INTO OUTFILE 't_backup' FROM test.t; +# TRUNCATE test.t; + +#'TRUNCATE test.t' failed: 1205: Lock wait timeout exceeded; try restarting #transaction. TABLESPACE ts STORAGE DISK ENGINE=NDB; + +# SELECT count(*) FROM test.t; +# LOAD DATA INFILE 't_backup' INTO TABLE test.t; + +# SELECT * FROM test.t order by a; + +# DROP TABLE test.t; +# DROP DATABASE test; + +# ALTER TABLESPACE ts +# DROP DATAFILE './datafile.dat' +# ENGINE NDB; +# DROP TABLESPACE ts ENGINE NDB; +# DROP LOGFILE GROUP lg +# ENGINE=NDB; + +#### Use group by asc and dec; Use having; Use order by. #### + +# DROP DATABASE IF EXISTS test; + DROP table IF EXISTS test.t1; + DROP table IF EXISTS test.t2; + + CREATE LOGFILE GROUP lg + ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M + UNDO_BUFFER_SIZE = 1M + ENGINE=NDB; + + CREATE TABLESPACE ts1 + ADD DATAFILE './table_space1/datafile.dat' + USE LOGFILE GROUP lg + INITIAL_SIZE 12M + ENGINE NDB; + + CREATE TABLESPACE ts2 + ADD DATAFILE './table_space2/datafile.dat' + USE LOGFILE GROUP lg + INITIAL_SIZE 12M + ENGINE NDB; + +# CREATE DATABASE test; + +CREATE TABLE test.t1 ( + a1 smallint NOT NULL, + a2 int NOT NULL, + a3 bigint NOT NULL, + a4 char(10), + a5 decimal(5,1), + a6 time, + a7 date, + a8 datetime, + a9 VARCHAR(255), + a10 blob, + PRIMARY KEY(a1) +) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; + + ALTER TABLE test.t1 ADD INDEX (a2), ADD INDEX (a3), ADD INDEX (a8); + SHOW CREATE TABLE test.t1; + +CREATE TABLE test.t2 ( + b1 smallint NOT NULL, + b2 int NOT NULL, + b3 bigint NOT NULL, + b4 char(10), + b5 decimal(5,1), + b6 time, + b7 date, + b8 datetime, + b9 VARCHAR(255), + b10 blob, + PRIMARY KEY(b1) +) ENGINE=NDB; + + ALTER TABLE test.t2 ADD INDEX (b2), ADD INDEX (b3), ADD INDEX (b8); + SHOW CREATE TABLE test.t2; + +let $1=20; +disable_query_log; +while ($1) +{ + eval insert into test.t1 values($1, $1+1, $1+2000000000, "aaa$1", 34.2, '4:3:2', '2006-1-1', '1971-5-28 16:55:03', "bbbbbbbbbbbbb$1", "binary data"); + eval insert into test.t2 values($1+2, $1+3, $1+3000000000, "aaa$1", 35.2, '4:3:2', '2006-1-1', '1971-5-28 16:55:03', "bbbbbbbbbbbbb$1", "binary data"); + dec $1; +} +enable_query_log; + +SELECT * FROM test.t1 order by a1; +SELECT * FROM test.t2 order by b1; +SELECT COUNT(a1), a1, COUNT(a1)*a1 FROM test.t1 GROUP BY a1; +SELECT COUNT(a2), (a2+1), COUNT(a2)*(a2+0) FROM test.t1 GROUP BY a2; + +DROP TABLE test.t1; +DROP TABLE test.t2; + +create table test.t1 (a int not null,b char(5), c text) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; + +insert into test.t1 (a) values (1),(2),(3),(4),(1),(2),(3),(4); +select distinct a from test.t1 group by b,a having a > 2 order by a desc; +select distinct a,c from test.t1 group by b,c,a having a > 2 order by a desc; +select distinct a from test.t1 group by b,a having a > 2 order by a asc; +select distinct a,c from test.t1 group by b,c,a having a > 2 order by a asc; +drop table test.t1; + +create table test.t1 (a char(1), key(a)) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; +insert into test.t1 values('1'),('1'),('1'),('2'),('2'),('3'),('3'); +select * from test.t1 where a >= '1' order by a; +select distinct a from test.t1 order by a desc; +select distinct a from test.t1 where a >= '1' order by a desc; +select distinct a from test.t1 where a >= '1' order by a asc; +drop table test.t1; + +CREATE TABLE test.t1 (email varchar(50), infoID BIGINT, dateentered DATETIME) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; +CREATE TABLE test.t2 (infoID BIGINT, shipcode varchar(10)) ENGINE=NDB; + +INSERT INTO test.t1 (email, infoID, dateentered) VALUES + ('test1@testdomain.com', 1, '2002-07-30 22:56:38'), + ('test1@testdomain.com', 1, '2002-07-27 22:58:16'), + ('test2@testdomain.com', 1, '2002-06-19 15:22:19'), + ('test2@testdomain.com', 2, '2002-06-18 14:23:47'), + ('test3@testdomain.com', 1, '2002-05-19 22:17:32'); + +INSERT INTO test.t2(infoID, shipcode) VALUES + (1, 'Z001'), + (2, 'R002'); + +SELECT DISTINCTROW email, shipcode FROM test.t1, test.t2 WHERE t1.infoID=t2.infoID order by email, shipcode; +SELECT DISTINCTROW email FROM test.t1 ORDER BY dateentered DESC; +SELECT DISTINCTROW email, shipcode FROM test.t1, test.t2 WHERE test.t1.infoID=test.t2.infoID ORDER BY dateentered DESC; +drop table test.t1,test.t2; + + ALTER TABLESPACE ts1 + DROP DATAFILE './table_space1/datafile.dat' + ENGINE NDB; + DROP TABLESPACE ts1 ENGINE NDB; + ALTER TABLESPACE ts2 + DROP DATAFILE './table_space2/datafile.dat' + ENGINE NDB; + DROP TABLESPACE ts2 ENGINE NDB; + DROP LOGFILE GROUP lg + ENGINE=NDB; +#################################################################### + + +#### Customer posted order by test case + +DROP TABLE IF EXISTS test.t; +create table test.t (f1 varchar(50) primary key, f2 text,f3 int) engine=NDB; +insert into test.t (f1,f2,f3)VALUES("111111","aaaaaa",1); +insert into test.t (f1,f2,f3)VALUES("222222","bbbbbb",2); +select * from test.t order by f1; +select f1,f2 from test.t order by f2; +select f2 from test.t order by f2; +select f1,f2 from test.t order by f1; +drop table test.t; + +################## ALTER Tests (Meta data testing) #################### + + CREATE LOGFILE GROUP lg + ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M + UNDO_BUFFER_SIZE = 1M + ENGINE=NDB; + + CREATE TABLESPACE ts + ADD DATAFILE './table_space/datafile.dat' + USE LOGFILE GROUP lg + INITIAL_SIZE 12M + ENGINE NDB; + +#### Try to ALTER from InnoDB to Cluster Disk Data + +CREATE TABLE test.t1 (a1 INT, a2 BLOB, a3 TEXT) ENGINE=InnoDB; +SHOW CREATE TABLE test.t1; +ALTER TABLE test.t1 TABLESPACE ts STORAGE DISK ENGINE=NDB; +SHOW CREATE TABLE test.t1; +DROP TABLE test.t1; + +#### Try to ALTER from MyISAM to Cluster Disk Data + +CREATE TABLE test.t1 (a1 INT, a2 BLOB, a3 TEXT) ENGINE=MyISAM; +SHOW CREATE TABLE test.t1; +ALTER TABLE test.t1 TABLESPACE ts STORAGE DISK ENGINE=NDB; +SHOW CREATE TABLE test.t1; +DROP TABLE test.t1; + +#### Try to ALTER from Cluster Disk Data to InnoDB + +CREATE TABLE test.t1 (a1 INT PRIMARY KEY, a2 BLOB, a3 TEXT) TABLESPACE ts STORAGE DISK ENGINE=NDB; +SHOW CREATE TABLE test.t1; +ALTER TABLE test.t1 ENGINE=InnoDB; +SHOW CREATE TABLE test.t1; +DROP TABLE test.t1; + +#### Try to ALTER from Cluster Disk Data to MyISAM + +CREATE TABLE test.t1 (a1 INT, a2 BLOB, a3 TEXT) TABLESPACE ts STORAGE DISK ENGINE=NDB; +SHOW CREATE TABLE test.t1; +ALTER TABLE test.t1 ENGINE=MyISAM; +SHOW CREATE TABLE test.t1; +DROP TABLE test.t1; + +#### Try to ALTER DD Tables and add columns + +CREATE TABLE test.t1 (a1 INT PRIMARY KEY) TABLESPACE ts STORAGE DISK ENGINE=NDB; + +ALTER TABLE test.t1 ADD a2 FLOAT, ADD a3 DOUBLE, ADD a4 BIT, ADD a5 TINYINT, ADD a6 BIGINT, ADD a7 DATE, ADD a8 TIME, ADD a9 DATETIME, ADD a10 TINYTEXT, ADD a11 MEDIUMTEXT, ADD a12 LONGTEXT, ADD a13 TEXT, ADD a14 BLOB; + +SHOW CREATE TABLE test.t1; + +#### Try to ALTER DD Tables and add Indexes + +ALTER TABLE test.t1 ADD INDEX (a2), ADD INDEX (a3), ADD INDEX (a5), ADD INDEX (a6), +ADD INDEX (a7), ADD INDEX (a8); + +SHOW CREATE TABLE test.t1; + +DROP TABLE test.t1; + +#### Try to ALTER DD Tables and drop columns + +CREATE TABLE test.t1 (a1 INT PRIMARY KEY) TABLESPACE ts STORAGE DISK ENGINE=NDB; + +ALTER TABLE test.t1 ADD a2 FLOAT, ADD a3 DOUBLE, ADD a4 BIT, ADD a5 TINYINT, ADD a6 BIGINT, ADD a7 DATE, ADD a8 TIME, ADD a9 DATETIME, ADD a10 TINYTEXT, ADD a11 MEDIUMTEXT, ADD a12 LONGTEXT, ADD a13 TEXT, ADD a14 BLOB; + +SHOW CREATE TABLE test.t1; + +ALTER TABLE test.t1 DROP a14; +ALTER TABLE test.t1 DROP a13; +ALTER TABLE test.t1 DROP a12; +ALTER TABLE test.t1 DROP a11; +ALTER TABLE test.t1 DROP a10; +ALTER TABLE test.t1 DROP a9; +ALTER TABLE test.t1 DROP a8; +ALTER TABLE test.t1 DROP a7; +ALTER TABLE test.t1 DROP a6; +ALTER TABLE test.t1 DROP PRIMARY KEY; + +SHOW CREATE TABLE test.t1; + +DROP TABLE test.t1; + + ALTER TABLESPACE ts + DROP DATAFILE './table_space/datafile.dat' + ENGINE NDB; + DROP TABLESPACE ts ENGINE NDB; + DROP LOGFILE GROUP lg + ENGINE=NDB; + +####################### End section 4 ######################### +#End 5.1 test case + diff --git a/mysql-test/t/ndb_dd_advance2.test b/mysql-test/t/ndb_dd_advance2.test new file mode 100755 index 00000000000..7b7a15ef01a --- /dev/null +++ b/mysql-test/t/ndb_dd_advance2.test @@ -0,0 +1,724 @@ +############################################################## +# Author: Nikolay +# Date: 2006-04-01 +# Purpose: Specific Blob and Varchar testing using disk tables. +############################################################## +# Create Stored procedures that use disk based tables. +# Create function that operate on disk based tables. +# Create triggers that operate on disk based tables. +# Create views that operate on disk based tables. +# Try to create FK constraints on disk based tables. +# Create and use disk based table that use auto inc. +# Create test that use transaction (commit, rollback) +# Create large disk base table, do random queries, check cache hits, do same +# query 10 times check cache hits. +# Create test that uses COUNT(), SUM(), MAX(), MIN(), NOW(), USER(), ... other built in # SQL functions +# Create test that uses locks. +# Create test using truncate. +############################################################## + +-- source include/have_ndb.inc + +--disable_warnings +DROP TABLE IF EXISTS test.t1; +DROP TABLE IF EXISTS test.t2; +DROP TABLE IF EXISTS test.t3; +--enable_warnings + +#### Copy data from table in one table space to table in different table space. #### +--echo ***** +--echo **** Copy data from table in one table space to table in different table space +--echo ***** + + CREATE LOGFILE GROUP lg + ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M + UNDO_BUFFER_SIZE = 1M + ENGINE=NDB; + + CREATE TABLESPACE ts1 + ADD DATAFILE './table_space1/datafile.dat' + USE LOGFILE GROUP lg + INITIAL_SIZE 12M + ENGINE NDB; + + CREATE TABLESPACE ts2 + ADD DATAFILE './table_space2/datafile.dat' + USE LOGFILE GROUP lg + INITIAL_SIZE 12M + ENGINE NDB; + + CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) + TABLESPACE ts1 STORAGE DISK ENGINE=NDB; + CREATE TABLE test.t2 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) + TABLESPACE ts2 STORAGE DISK ENGINE=NDB; + + SHOW CREATE TABLE test.t1; + SHOW CREATE TABLE test.t2; + + INSERT INTO test.t1 VALUES (1,'111111','aaaaaaaa'); + INSERT INTO test.t1 VALUES (2,'222222','bbbbbbbb'); + SELECT * FROM test.t1 ORDER BY a1; + INSERT INTO test.t2(a1,a2,a3) SELECT * FROM test.t1; + SELECT * FROM test.t2 ORDER BY a1; + + DROP TABLE test.t1, test.t2; + + # populate BLOB field with large data + +set @vc1 = repeat('a', 200); +set @vc2 = repeat('b', 500); +set @vc3 = repeat('c', 1000); +set @vc4 = repeat('d', 4000); + +# x0 size 256 +set @x0 = '01234567012345670123456701234567'; +set @x0 = concat(@x0,@x0,@x0,@x0,@x0,@x0,@x0,@x0); + +# b1 length 2000+256 +set @b1 = 'b1'; +set @b1 = concat(@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1); +set @b1 = concat(@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1); +set @b1 = concat(@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1); +set @b1 = concat(@b1,@x0); +# d1 length 3000 +set @d1 = 'dd1'; +set @d1 = concat(@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1); +set @d1 = concat(@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1); +set @d1 = concat(@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1); + +# b2 length 20000 +set @b2 = 'b2'; +set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2); +set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2); +set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2); +set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2); +# d2 length 30000 +set @d2 = 'dd2'; +set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2); +set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2); +set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2); +set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2); + +select length(@x0),length(@b1),length(@d1) from dual; +select length(@x0),length(@b2),length(@d2) from dual; + + CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(5000), a3 BLOB) + TABLESPACE ts1 STORAGE DISK ENGINE=NDB; + CREATE TABLE test.t2 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(5000), a3 BLOB) + TABLESPACE ts2 STORAGE DISK ENGINE=NDB; + + SHOW CREATE TABLE test.t1; + SHOW CREATE TABLE test.t2; + + INSERT INTO test.t1 VALUES (1,@vc1,@d1); + INSERT INTO test.t1 VALUES (2,@vc2,@b1); + INSERT INTO test.t1 VALUES (3,@vc3,@d2); + INSERT INTO test.t1 VALUES (4,@vc4,@b2); + + SELECT a1,length(a2),substr(a2,180,2),length(a3),substr(a3,1+3*900,3) + FROM test.t1 WHERE a1=1; + SELECT a1,length(a2),substr(a2,480,2),length(a3),substr(a3,1+2*900,3) + FROM test.t1 where a1=2; + + INSERT INTO test.t2(a1,a2,a3) SELECT * FROM test.t1; + SELECT a1,length(a2),substr(a2,180,2),length(a3),substr(a3,1+3*900,3) + FROM test.t2 WHERE a1=1; + SELECT a1,length(a2),substr(a2,480,2),length(a3),substr(a3,1+2*900,3) + FROM test.t2 where a1=2; + + + DROP TABLE test.t1, test.t2; + + + ALTER TABLESPACE ts1 + DROP DATAFILE './table_space1/datafile.dat' + ENGINE NDB; + DROP TABLESPACE ts1 ENGINE NDB; + + ALTER TABLESPACE ts2 + DROP DATAFILE './table_space2/datafile.dat' + ENGINE NDB; + DROP TABLESPACE ts2 ENGINE NDB; + + DROP LOGFILE GROUP lg + ENGINE=NDB; + +#### Insert, Update, Delete from NDB table with BLOB fields #### +--echo ***** +--echo **** Insert, Update, Delete from NDB table with BLOB fields +--echo ***** + + CREATE LOGFILE GROUP lg + ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M + UNDO_BUFFER_SIZE = 1M + ENGINE=NDB; + + CREATE TABLESPACE ts1 + ADD DATAFILE './table_space1/datafile.dat' + USE LOGFILE GROUP lg + INITIAL_SIZE 12M + ENGINE NDB; + +set @vc1 = repeat('a', 200); +set @vc2 = repeat('b', 500); +set @vc3 = repeat('c', 1000); +set @vc4 = repeat('d', 4000); +set @vc5 = repeat('d', 5000); + +set @bb1 = repeat('1', 2000); +set @bb2 = repeat('2', 5000); +set @bb3 = repeat('3', 10000); +set @bb4 = repeat('4', 40000); +set @bb5 = repeat('5', 50000); + +select length(@vc1),length(@vc2),length(@vc3),length(@vc4),length(@vc5) from dual; +select length(@bb1),length(@bb2),length(@bb3),length(@bb4),length(@bb5) from dual; + + CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(5000), a3 BLOB) + TABLESPACE ts1 STORAGE DISK ENGINE=NDB; +# CREATE TABLE test.t2 (a1 int NOT NULL, a2 VARCHAR(5000), a3 BLOB) +# TABLESPACE ts2 STORAGE DISK ENGINE=NDB; + + INSERT INTO test.t1 VALUES (1,@vc1,@bb1); + INSERT INTO test.t1 VALUES (2,@vc2,@bb2); + INSERT INTO test.t1 VALUES (3,@vc3,@bb3); + INSERT INTO test.t1 VALUES (4,@vc4,@bb4); + INSERT INTO test.t1 VALUES (5,@vc5,@bb5); + + UPDATE test.t1 SET a2=@vc5, a3=@bb5 WHERE a1=1; + SELECT a1,length(a2),substr(a2,4998,2),length(a3),substr(a3,49997,3) + FROM test.t1 WHERE a1=1; + + UPDATE test.t1 SET a2=@vc4, a3=@bb4 WHERE a1=2; + SELECT a1,length(a2),substr(a2,3998,2),length(a3),substr(a3,39997,3) + FROM test.t1 WHERE a1=2; + + UPDATE test.t1 SET a2=@vc2, a3=@bb2 WHERE a1=3; + SELECT a1,length(a2),substr(a2,498,2),length(a3),substr(a3,3997,3) + FROM test.t1 WHERE a1=3; + + UPDATE test.t1 SET a2=@vc3, a3=@bb3 WHERE a1=4; + SELECT a1,length(a2),substr(a2,998,2),length(a3),substr(a3,9997,3) + FROM test.t1 WHERE a1=4; + + UPDATE test.t1 SET a2=@vc1, a3=@bb1 WHERE a1=5; + SELECT a1,length(a2),substr(a2,198,2),length(a3),substr(a3,1997,3) + FROM test.t1 WHERE a1=5; + + DELETE FROM test.t1 where a1=5; + SELECT count(*) from test.t1; + DELETE FROM test.t1 where a1=4; + SELECT count(*) from test.t1; + DELETE FROM test.t1 where a1=3; + SELECT count(*) from test.t1; + DELETE FROM test.t1 where a1=2; + SELECT count(*) from test.t1; + DELETE FROM test.t1 where a1=1; + SELECT count(*) from test.t1; + + DROP TABLE test.t1; + + ALTER TABLESPACE ts1 + DROP DATAFILE './table_space1/datafile.dat' + ENGINE NDB; + DROP TABLESPACE ts1 ENGINE NDB; + + DROP LOGFILE GROUP lg + ENGINE=NDB; + +##### Create Stored procedures that use disk based tables ##### +--echo ***** +--echo **** Create Stored procedures that use disk based tables +--echo ***** + + CREATE LOGFILE GROUP lg + ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M + UNDO_BUFFER_SIZE = 1M + ENGINE=NDB; + + CREATE TABLESPACE ts1 + ADD DATAFILE './table_space1/datafile.dat' + USE LOGFILE GROUP lg + INITIAL_SIZE 12M + ENGINE NDB; + +delimiter //; + CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) + TABLESPACE ts1 STORAGE DISK ENGINE=NDB// + CREATE PROCEDURE test.sp1() + BEGIN + INSERT INTO test.t1 values (1,'111111','aaaaaaaa'); + END// +delimiter ;// + + CALL test.sp1(); + SELECT * FROM test.t1; + +delimiter //; + CREATE PROCEDURE test.sp2(n INT, vc VARCHAR(256), blb BLOB) + BEGIN + UPDATE test.t1 SET a2=vc, a3=blb where a1=n; + END// +delimiter ;// + + CALL test.sp2(1,'222222','bbbbbbbb'); + SELECT * FROM test.t1; + + DELETE FROM test.t1; + DROP PROCEDURE test.sp1; + DROP PROCEDURE test.sp2; + DROP TABLE test.t1; + + ALTER TABLESPACE ts1 + DROP DATAFILE './table_space1/datafile.dat' + ENGINE=NDB; + DROP TABLESPACE ts1 ENGINE=NDB; + + DROP LOGFILE GROUP lg + ENGINE=NDB; + +#### Create function that operate on disk based tables #### +--echo ***** +--echo ***** Create function that operate on disk based tables +--echo ***** + + CREATE LOGFILE GROUP lg + ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M + UNDO_BUFFER_SIZE = 1M + ENGINE=NDB; + + CREATE TABLESPACE ts1 + ADD DATAFILE './table_space1/datafile.dat' + USE LOGFILE GROUP lg + INITIAL_SIZE 12M + ENGINE NDB; + + CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) + TABLESPACE ts1 STORAGE DISK ENGINE=NDB; + let $1=100; + disable_query_log; + while ($1) + { + eval insert into test.t1 values($1, "aaaaa$1", "bbbbb$1"); + dec $1; + } + enable_query_log; + + delimiter //; + CREATE FUNCTION test.fn1(n INT) RETURNS INT + BEGIN + DECLARE v INT; + SELECT a1 INTO v FROM test.t1 WHERE a1=n; + RETURN v; + END// + delimiter ;// + +delimiter //; + CREATE FUNCTION test.fn2(n INT, blb BLOB) RETURNS BLOB + BEGIN + DECLARE vv BLOB; + UPDATE test.t1 SET a3=blb where a1=n; + SELECT a3 INTO vv FROM test.t1 WHERE a1=n; + RETURN vv; + END// + delimiter ;// + + SELECT test.fn1(10) FROM DUAL; + SELECT test.fn2(50, 'new BLOB content') FROM DUAL; + + DELETE FROM test.t1; + DROP FUNCTION test.fn1; + DROP FUNCTION test.fn2; + DROP TABLE test.t1; + + ALTER TABLESPACE ts1 + DROP DATAFILE './table_space1/datafile.dat' + ENGINE=NDB; + DROP TABLESPACE ts1 ENGINE=NDB; + + DROP LOGFILE GROUP lg + ENGINE=NDB; + +#### Create triggers that operate on disk based tables #### +--echo ***** +--echo ***** Create triggers that operate on disk based tables +--echo ***** + + CREATE LOGFILE GROUP lg + ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M + UNDO_BUFFER_SIZE = 1M + ENGINE=NDB; + + CREATE TABLESPACE ts1 + ADD DATAFILE './table_space1/datafile.dat' + USE LOGFILE GROUP lg + INITIAL_SIZE 12M + ENGINE NDB; + + CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) + TABLESPACE ts1 STORAGE DISK ENGINE=NDB; + + delimiter //; + CREATE TRIGGER test.trg1 BEFORE INSERT ON test.t1 FOR EACH ROW + BEGIN + if isnull(new.a2) then + set new.a2:= 'trg1 works on a2 field'; + end if; + if isnull(new.a3) then + set new.a3:= 'trg1 works on a3 field'; + end if; + end// + insert into test.t1 (a1) values (1)// + insert into test.t1 (a1,a2) values (2, 'ccccccc')// + select * from test.t1 order by a1// + delimiter ;// + + DELETE FROM test.t1; + DROP TRIGGER test.trg1; + DROP TABLE test.t1; + + ALTER TABLESPACE ts1 + DROP DATAFILE './table_space1/datafile.dat' + ENGINE=NDB; + DROP TABLESPACE ts1 ENGINE=NDB; + + DROP LOGFILE GROUP lg + ENGINE=NDB; + +#### Create, update views that operate on disk based tables #### +--echo ***** +--echo ***** Create, update views that operate on disk based tables +--echo ***** + + CREATE LOGFILE GROUP lg + ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M + UNDO_BUFFER_SIZE = 1M + ENGINE=NDB; + + CREATE TABLESPACE ts1 + ADD DATAFILE './table_space1/datafile.dat' + USE LOGFILE GROUP lg + INITIAL_SIZE 12M + ENGINE NDB; + + CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) + TABLESPACE ts1 STORAGE DISK ENGINE=NDB; + let $1=10; + disable_query_log; + while ($1) + { + eval insert into test.t1 values($1, "aaaaa$1", "bbbbb$1"); + dec $1; + } + enable_query_log; + CREATE VIEW test.v1 AS SELECT * FROM test.t1; + SELECT * FROM test.v1 order by a1; + CHECK TABLE test.v1, test.t1; + + UPDATE test.v1 SET a2='zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz' WHERE a1=5; + SELECT * FROM test.v1 order by a1; + + DROP VIEW test.v1; + DELETE FROM test.t1; + DROP TABLE test.t1; + + ALTER TABLESPACE ts1 + DROP DATAFILE './table_space1/datafile.dat' + ENGINE=NDB; + DROP TABLESPACE ts1 ENGINE=NDB; + + DROP LOGFILE GROUP lg + ENGINE=NDB; + +#### Create and use disk based table that use auto inc #### +--echo ***** +--echo ***** Create and use disk based table that use auto inc +--echo ***** + + CREATE LOGFILE GROUP lg + ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M + UNDO_BUFFER_SIZE = 1M + ENGINE=NDB; + + CREATE TABLESPACE ts1 + ADD DATAFILE './table_space1/datafile.dat' + USE LOGFILE GROUP lg + INITIAL_SIZE 12M + ENGINE NDB; + + CREATE TABLE test.t1 (a1 INT NOT NULL AUTO_INCREMENT PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; + let $1=10; + disable_query_log; + while ($1) + { + eval insert into test.t1 values(NULL, "aaaaa$1", "bbbbb$1"); + dec $1; + } + enable_query_log; + SELECT * FROM test.t1 ORDER BY a1; + DELETE FROM test.t1; + DROP TABLE test.t1; + + ALTER TABLESPACE ts1 + DROP DATAFILE './table_space1/datafile.dat' + ENGINE=NDB; + DROP TABLESPACE ts1 ENGINE=NDB; + + DROP LOGFILE GROUP lg + ENGINE=NDB; + +#### Create test that use transaction (commit, rollback) #### +--echo ***** +--echo ***** Create test that use transaction (commit, rollback) +--echo ***** + + CREATE LOGFILE GROUP lg + ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M + UNDO_BUFFER_SIZE = 1M + ENGINE=NDB; + + CREATE TABLESPACE ts1 + ADD DATAFILE './table_space1/datafile.dat' + USE LOGFILE GROUP lg + INITIAL_SIZE 12M + ENGINE NDB; + + SET AUTOCOMMIT=0; + CREATE TABLE test.t1 (a1 INT NOT NULL AUTO_INCREMENT PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; + + INSERT INTO test.t1 VALUES(NULL, "aaaaa1", "bbbbb1"); + COMMIT; + SELECT * FROM test.t1 ORDER BY a1; + INSERT INTO test.t1 VALUES(NULL, "aaaaa2", "bbbbb2"); + ROLLBACK; + SELECT * FROM test.t1 ORDER BY a1; + + DELETE FROM test.t1; + DROP TABLE test.t1; + SET AUTOCOMMIT=1; + +# Now do the same thing with START TRANSACTION without using AUTOCOMMIT. + + CREATE TABLE test.t1 (a1 INT NOT NULL AUTO_INCREMENT PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; + + START TRANSACTION; + INSERT INTO test.t1 VALUES(NULL, "aaaaa1", "bbbbb1"); + COMMIT; + SELECT * FROM test.t1 ORDER BY a1; + + START TRANSACTION; + INSERT INTO test.t1 VALUES(NULL, "aaaaa2", "bbbbb2"); + ROLLBACK; + SELECT * FROM test.t1 ORDER BY a1; + + DELETE FROM test.t1; + DROP TABLE test.t1; + + ALTER TABLESPACE ts1 + DROP DATAFILE './table_space1/datafile.dat' + ENGINE=NDB; + DROP TABLESPACE ts1 ENGINE=NDB; + + DROP LOGFILE GROUP lg + ENGINE=NDB; + +#### Create test that uses locks #### +--echo ***** +--echo ***** Create test that uses locks +--echo ***** + + connect (con1,localhost,root,,); + connect (con2,localhost,root,,); + + CREATE LOGFILE GROUP lg + ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M + UNDO_BUFFER_SIZE = 1M + ENGINE=NDB; + + CREATE TABLESPACE ts1 + ADD DATAFILE './table_space1/datafile.dat' + USE LOGFILE GROUP lg + INITIAL_SIZE 12M + ENGINE NDB; + +# connection con1; +--disable_warnings + drop table if exists test.t1; + CREATE TABLE test.t1 (a1 INT NOT NULL AUTO_INCREMENT PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; +--enable_warnings + + LOCK TABLES test.t1 write; + INSERT INTO test.t1 VALUES(NULL, "aaaaa1", "bbbbb1"); + INSERT INTO test.t1 VALUES(NULL, "aaaaa2", "bbbbb2"); + SELECT * FROM test.t1 ORDER BY a1; + + connection con2; + SELECT * FROM test.t1 ORDER BY a1; + INSERT INTO test.t1 VALUES(NULL, "aaaaa3", "bbbbb3"); + + connection con1; + UNLOCK TABLES; + + connection con2; + INSERT INTO test.t1 VALUES(NULL, "aaaaa3", "bbbbb3"); + SELECT * FROM test.t1 ORDER BY a1; + DELETE FROM test.t1; + DROP TABLE test.t1; + + #connection defualt; + + ALTER TABLESPACE ts1 + DROP DATAFILE './table_space1/datafile.dat' + ENGINE=NDB; + DROP TABLESPACE ts1 ENGINE=NDB; + + DROP LOGFILE GROUP lg + ENGINE=NDB; + +#### Create large disk base table, do random queries, check cache hits #### +--echo ***** +--echo ***** Create large disk base table, do random queries, check cache hits +--echo ***** + +set @vc1 = repeat('a', 200); +SELECT @vc1 FROM DUAL; +set @vc2 = repeat('b', 500); +set @vc3 = repeat('b', 998); + +# x0 size 256 +set @x0 = '01234567012345670123456701234567'; +set @x0 = concat(@x0,@x0,@x0,@x0,@x0,@x0,@x0,@x0); + +# b1 length 2000+256 (blob part aligned) +set @b1 = 'b1'; +set @b1 = concat(@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1); +set @b1 = concat(@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1); +set @b1 = concat(@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1); +set @b1 = concat(@b1,@x0); +# d1 length 3000 +set @d1 = 'dd1'; +set @d1 = concat(@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1); +set @d1 = concat(@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1); +set @d1 = concat(@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1); + +# b2 length 20000 +set @b2 = 'b2'; +set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2); +set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2); +set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2); +set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2); +# d2 length 30000 +set @d2 = 'dd2'; +set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2); +set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2); +set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2); +set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2); + +select length(@x0),length(@b1),length(@d1) from dual; +select length(@x0),length(@b2),length(@d2) from dual; + + CREATE LOGFILE GROUP lg + ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M + UNDO_BUFFER_SIZE = 1M + ENGINE=NDB; + + CREATE TABLESPACE ts1 + ADD DATAFILE './table_space1/datafile.dat' + USE LOGFILE GROUP lg + INITIAL_SIZE 12M + ENGINE NDB; + + CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(1000), a3 BLOB) + TABLESPACE ts1 STORAGE DISK ENGINE=NDB; + + INSERT INTO test.t1 values(1,@vc1,@d1); + INSERT INTO test.t1 values(2,@vc2,@d2); + explain SELECT * from test.t1 WHERE a1 = 1; + + SELECT a1,length(a2),substr(a2,1+2*900,2),length(a3),substr(a3,1+3*900,3) + FROM test.t1 WHERE a1=1 ORDER BY a1; + SELECT a1,length(a2),substr(a2,1+2*9000,2),length(a3),substr(a3,1+3*9000,3) + FROM test.t1 where a1=2 ORDER BY a1; + + UPDATE test.t1 set a2=@vc2,a3=@d2 where a1=1; + UPDATE test.t1 set a2=@vc1,a3=@d1 where a1=2; + + SELECT a1,length(a2),substr(a2,1+2*9000,2),length(a3),substr(a3,1+3*9000,3) + FROM test.t1 where a1=1; + SELECT a1,length(a2),substr(a2,1+2*900,2),length(a3),substr(a3,1+3*900,3) + FROM test.t1 where a1=2; + + #SHOW VARIABLES LIKE 'have_query_cache'; + #SHOW STATUS LIKE 'Qcache%'; + + DELETE FROM test.t1; + DROP TABLE test.t1; + + ALTER TABLESPACE ts1 + DROP DATAFILE './table_space1/datafile.dat' + ENGINE=NDB; + DROP TABLESPACE ts1 ENGINE=NDB; + + DROP LOGFILE GROUP lg + ENGINE=NDB; + +#### Create test that uses COUNT(), SUM(), MAX(), MIN(), NOW(), USER(), TRUNCATE #### +--echo ***** +--echo ***** Create test that uses COUNT(), SUM(), MAX(), MIN(), NOW(), USER(), TRUNCATE +--echo ***** + + CREATE LOGFILE GROUP lg + ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M + UNDO_BUFFER_SIZE = 1M + ENGINE=NDB; + + CREATE TABLESPACE ts1 + ADD DATAFILE './table_space1/datafile.dat' + USE LOGFILE GROUP lg + INITIAL_SIZE 12M + ENGINE NDB; + + CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB, a4 DATE, a5 CHAR(250)) + TABLESPACE ts1 STORAGE DISK ENGINE=NDB; + let $1=100; + disable_query_log; + while ($1) + { + eval insert into test.t1 values($1, "aaaaaaaaaaaaaaaa$1", "bbbbbbbbbbbbbbbbbb$1", '2006-06-20' , USER()); + dec $1; + } + enable_query_log; + + SELECT COUNT(*) from test.t1; + SELECT SUM(a1) from test.t1; + SELECT MIN(a1) from test.t1; + SELECT MAX(a1) from test.t1; + SELECT a5 from test.t1 where a1=50; + + + SELECT * from test.t1 order by a1; + + DROP TABLE test.t1; + + ALTER TABLESPACE ts1 + DROP DATAFILE './table_space1/datafile.dat' + ENGINE=NDB; + DROP TABLESPACE ts1 ENGINE=NDB; + + DROP LOGFILE GROUP lg + ENGINE=NDB; + + +#End 5.1 test case + diff --git a/mysql-test/t/ndb_dd_backuprestore.test b/mysql-test/t/ndb_dd_backuprestore.test index be6d73e27b4..48db8ec3e0b 100644 --- a/mysql-test/t/ndb_dd_backuprestore.test +++ b/mysql-test/t/ndb_dd_backuprestore.test @@ -5,6 +5,12 @@ ######################################## -- source include/have_ndb.inc +-- source include/ndb_default_cluster.inc +-- source include/not_embedded.inc + +--disable_query_log +set new=on; +--enable_query_log --disable_warnings DROP TABLE IF EXISTS test.t1; diff --git a/mysql-test/t/ndb_multi.test b/mysql-test/t/ndb_multi.test index d2dc0561955..36018e6c679 100644 --- a/mysql-test/t/ndb_multi.test +++ b/mysql-test/t/ndb_multi.test @@ -3,7 +3,6 @@ -- source include/not_embedded.inc -- source include/have_binlog_format_statement.inc - --disable_warnings connection server2; drop table if exists t1, t2, t3, t4; diff --git a/mysql-test/t/ndb_partition_error.test b/mysql-test/t/ndb_partition_error.test index 06581f1270f..9db2a6a6f6d 100644 --- a/mysql-test/t/ndb_partition_error.test +++ b/mysql-test/t/ndb_partition_error.test @@ -10,6 +10,9 @@ drop table if exists t1; --enable_warnings +--disable_query_log +set new=on; +--enable_query_log # # Partition by range, generate node group error # diff --git a/mysql-test/t/ndb_partition_list.test b/mysql-test/t/ndb_partition_list.test index 2ad37b8768c..ccfcdbc84f4 100644 --- a/mysql-test/t/ndb_partition_list.test +++ b/mysql-test/t/ndb_partition_list.test @@ -5,6 +5,10 @@ # #-- source include/have_partition.inc +--disable_query_log +set new=on; +--enable_query_log + --disable_warnings drop table if exists t1; --enable_warnings diff --git a/mysql-test/t/ndb_partition_range.test b/mysql-test/t/ndb_partition_range.test index 981467d4055..7952ba502d2 100644 --- a/mysql-test/t/ndb_partition_range.test +++ b/mysql-test/t/ndb_partition_range.test @@ -6,6 +6,10 @@ # #-- source include/have_partition.inc +--disable_query_log +set new=on; +--enable_query_log + --disable_warnings drop table if exists t1; --enable_warnings diff --git a/mysql-test/t/ndb_restore.test b/mysql-test/t/ndb_restore.test index f11324492c2..9030dfbe304 100644 --- a/mysql-test/t/ndb_restore.test +++ b/mysql-test/t/ndb_restore.test @@ -4,8 +4,8 @@ --disable_warnings use test; -drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; -drop table if exists t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c; +drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9,t10; +drop table if exists t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c,t10_c; --enable_warnings CREATE TABLE `t1_c` ( @@ -132,6 +132,13 @@ CREATE TABLE `t9_c` ( ) ENGINE=ndbcluster DEFAULT CHARSET=latin1; INSERT INTO `t9_c` VALUES ('3g4jh8gar2t','joe','q3.net','elredun.com','q3.net','436643316120','436643316939','91341234568968','695595699','1.1.1.1','2.2.6.2','3','86989','34','x','x','2012-03-12 18:35:04','2012-12-05 12:35:04',3123123,9569,6565,1),('4tt45345235','pap','q3plus.qt','q3plus.qt','q3.net','436643316120','436643316939','8956234534568968','5254595969','1.1.1.1','8.6.2.2','4','86989','34','x','x','2012-03-12 12:55:34','2012-12-05 11:20:04',3223433,3369,9565,2),('4545435545','john','q3.net','q3.net','acne.li','436643316120','436643316939','45345234568968','995696699','1.1.1.1','2.9.9.2','2','86998','34','x','x','2012-03-12 11:35:03','2012-12-05 08:50:04',8823123,169,3565,3); +# Bug #20820 +# auto inc table not handled correctly when restored from cluster backup +# - before fix ndb_restore would not set auto inc value correct, +# seen by select below +CREATE TABLE t10_c (a INT AUTO_INCREMENT KEY) ENGINE=ndbcluster DEFAULT CHARSET=latin1; +INSERT INTO t10_c VALUES (1),(2),(3); + create table t1 engine=myisam as select * from t1_c; create table t2 engine=myisam as select * from t2_c; create table t3 engine=myisam as select * from t3_c; @@ -141,10 +148,11 @@ create table t6 engine=myisam as select * from t6_c; create table t7 engine=myisam as select * from t7_c; create table t8 engine=myisam as select * from t8_c; create table t9 engine=myisam as select * from t9_c; +create table t10 engine=myisam as select * from t10_c; --source include/ndb_backup.inc -drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c; +drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c,t10_c; --exec $NDB_TOOLS_DIR/ndb_restore --no-defaults -b $the_backup_id -n 1 -m -r --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-$the_backup_id >> $NDB_TOOLS_OUTPUT --exec $NDB_TOOLS_DIR/ndb_restore --no-defaults -b $the_backup_id -n 2 -r --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-$the_backup_id >> $NDB_TOOLS_OUTPUT @@ -205,33 +213,17 @@ select count(*) from (select * from t9 union select * from t9_c) a; +# Bug #20820 cont'd +select * from t10_c order by a; + # # Try Partitioned tables as well # -ALTER TABLE t1_c -PARTITION BY RANGE (`capgoaledatta`) -(PARTITION p0 VALUES LESS THAN MAXVALUE); - -ALTER TABLE t2_c -PARTITION BY LIST(`capgotod`) -(PARTITION p0 VALUES IN (0,1,2,3,4,5,6)); - -ALTER TABLE t3_c -PARTITION BY HASH (`CapGoaledatta`); - -ALTER TABLE t5_c -PARTITION BY HASH (`capfa`) -PARTITIONS 4; - -ALTER TABLE t6_c -PARTITION BY LINEAR HASH (`relatta`) -PARTITIONS 4; - ALTER TABLE t7_c PARTITION BY LINEAR KEY (`dardtestard`); --source include/ndb_backup.inc -drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c; +drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c,t10_c; --exec $NDB_TOOLS_DIR/ndb_restore --no-defaults -b $the_backup_id -n 1 -m -r --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-$the_backup_id >> $NDB_TOOLS_OUTPUT --exec $NDB_TOOLS_DIR/ndb_restore --no-defaults -b $the_backup_id -n 2 -r --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-$the_backup_id >> $NDB_TOOLS_OUTPUT @@ -289,7 +281,7 @@ select count(*) from (select * from t9 union select * from t9_c) a; -drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c; +drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c,t10_c; --exec $NDB_TOOLS_DIR/ndb_restore --no-defaults -b $the_backup_id -n 1 -m -r --ndb-nodegroup_map '(0,0)' --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-$the_backup_id >> $NDB_TOOLS_OUTPUT --exec $NDB_TOOLS_DIR/ndb_restore --no-defaults -b $the_backup_id -n 2 -r --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-$the_backup_id >> $NDB_TOOLS_OUTPUT @@ -353,7 +345,7 @@ select count(*) # guaranteed to be from t2_c, this since order of tables in backup # is none deterministic # -drop table t1_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c; +drop table t1_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c,t10_c; --source include/ndb_backup.inc --exec $NDB_TOOLS_DIR/ndb_restore --no-defaults --core=0 -b $the_backup_id -n 1 -m -r --ndb-nodegroup_map '(0,1)' $NDB_BACKUP_DIR/BACKUP/BACKUP-$the_backup_id 2>&1 | grep Translate || true @@ -362,7 +354,7 @@ drop table t1_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c; # --disable_warnings -drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; +drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9,t10; drop table if exists t2_c; --enable_warnings @@ -372,4 +364,4 @@ drop table if exists t2_c; --exec $NDB_TOOLS_DIR/ndb_select_all --no-defaults -d sys -D , SYSTAB_0 | grep 520093696, | sed "s/,$the_backup_id/,<the_backup_id>/" -# End of 4.1 tests +# End of 5.0 tests (4.1 test intermixed to save test time) diff --git a/mysql-test/t/ps.test b/mysql-test/t/ps.test index 4a336962293..2313f179a48 100644 --- a/mysql-test/t/ps.test +++ b/mysql-test/t/ps.test @@ -1146,4 +1146,122 @@ execute stmt; execute stmt; deallocate prepare stmt; drop table t1, t2, t3; + +# +# Bug#17199 "Table not found" error occurs if the query contains a call +# to a function from another database. +# Test prepared statements- related behaviour. +# +# +# ALTER TABLE RENAME and Prepared Statements: wrong DB name buffer was used +# in ALTER ... RENAME which caused memory corruption in prepared statements. +# No need to fix this problem in 4.1 as ALTER TABLE is not allowed in +# Prepared Statements in 4.1. +# +create database mysqltest_long_database_name_to_thrash_heap; +use test; +create table t1 (i int); +prepare stmt from "alter table test.t1 rename t1"; +use mysqltest_long_database_name_to_thrash_heap; +execute stmt; +show tables like 't1'; +prepare stmt from "alter table test.t1 rename t1"; +use test; +execute stmt; +show tables like 't1'; +use mysqltest_long_database_name_to_thrash_heap; +show tables like 't1'; +deallocate prepare stmt; +# +# Check that a prepared statement initializes its current database at +# PREPARE, and then works correctly even if the current database has been +# changed. +# +use mysqltest_long_database_name_to_thrash_heap; +# Necessary for preparation of INSERT/UPDATE/DELETE to succeed +prepare stmt_create from "create table t1 (i int)"; +prepare stmt_insert from "insert into t1 (i) values (1)"; +prepare stmt_update from "update t1 set i=2"; +prepare stmt_delete from "delete from t1 where i=2"; +prepare stmt_select from "select * from t1"; +prepare stmt_alter from "alter table t1 add column (b int)"; +prepare stmt_alter1 from "alter table t1 drop column b"; +prepare stmt_analyze from "analyze table t1"; +prepare stmt_optimize from "optimize table t1"; +prepare stmt_show from "show tables like 't1'"; +prepare stmt_truncate from "truncate table t1"; +prepare stmt_drop from "drop table t1"; +# Drop the table that was used to prepare INSERT/UPDATE/DELETE: we will +# create a new one by executing stmt_create +drop table t1; +# Switch the current database +use test; +# Check that all prepared statements operate on the database that was +# active at PREPARE +execute stmt_create; +# should return empty set +show tables like 't1'; +use mysqltest_long_database_name_to_thrash_heap; +show tables like 't1'; +use test; +execute stmt_insert; +select * from mysqltest_long_database_name_to_thrash_heap.t1; +execute stmt_update; +select * from mysqltest_long_database_name_to_thrash_heap.t1; +execute stmt_delete; +execute stmt_select; +execute stmt_alter; +show columns from mysqltest_long_database_name_to_thrash_heap.t1; +execute stmt_alter1; +show columns from mysqltest_long_database_name_to_thrash_heap.t1; +execute stmt_analyze; +execute stmt_optimize; +execute stmt_show; +execute stmt_truncate; +execute stmt_drop; +show tables like 't1'; +use mysqltest_long_database_name_to_thrash_heap; +show tables like 't1'; +# +# Attempt a statement PREPARE when there is no current database: +# is expected to return an error. +# +drop database mysqltest_long_database_name_to_thrash_heap; +--error ER_NO_DB_ERROR +prepare stmt_create from "create table t1 (i int)"; +--error ER_NO_DB_ERROR +prepare stmt_insert from "insert into t1 (i) values (1)"; +--error ER_NO_DB_ERROR +prepare stmt_update from "update t1 set i=2"; +--error ER_NO_DB_ERROR +prepare stmt_delete from "delete from t1 where i=2"; +--error ER_NO_DB_ERROR +prepare stmt_select from "select * from t1"; +--error ER_NO_DB_ERROR +prepare stmt_alter from "alter table t1 add column (b int)"; +--error ER_NO_DB_ERROR +prepare stmt_alter1 from "alter table t1 drop column b"; +--error ER_NO_DB_ERROR +prepare stmt_analyze from "analyze table t1"; +--error ER_NO_DB_ERROR +prepare stmt_optimize from "optimize table t1"; +--error ER_NO_DB_ERROR +prepare stmt_show from "show tables like 't1'"; +--error ER_NO_DB_ERROR +prepare stmt_truncate from "truncate table t1"; +--error ER_NO_DB_ERROR +prepare stmt_drop from "drop table t1"; +# +# The above has automatically deallocated all our statements. +# +# Attempt to CREATE a temporary table when no DB used: it should fail +# This proves that no table can be used without explicit specification of +# its database if there is no current database. +# +--error ER_NO_DB_ERROR +create temporary table t1 (i int); +# +# Restore the old environemnt +# +use test; # End of 5.0 tests diff --git a/mysql-test/t/rpl_ndb_dd_advance.test b/mysql-test/t/rpl_ndb_dd_advance.test index 80ff533ec5b..30d5deb47ad 100644 --- a/mysql-test/t/rpl_ndb_dd_advance.test +++ b/mysql-test/t/rpl_ndb_dd_advance.test @@ -7,6 +7,8 @@ #### Include Section #### --source include/have_ndb.inc --source include/have_binlog_format_row.inc +--source include/ndb_default_cluster.inc +--source include/not_embedded.inc #--source include/have_ndb_extra.inc --source include/master-slave.inc diff --git a/mysql-test/t/rpl_ndb_sync.test b/mysql-test/t/rpl_ndb_sync.test index 95f56609ed7..20d4f5707f8 100644 --- a/mysql-test/t/rpl_ndb_sync.test +++ b/mysql-test/t/rpl_ndb_sync.test @@ -1,4 +1,6 @@ --source include/have_ndb.inc +--source include/ndb_default_cluster.inc +--source include/not_embedded.inc --source include/have_binlog_format_row.inc --source include/master-slave.inc diff --git a/mysql-test/t/sp.test b/mysql-test/t/sp.test index 201d362f2da..99f3bbbbd14 100644 --- a/mysql-test/t/sp.test +++ b/mysql-test/t/sp.test @@ -5877,6 +5877,52 @@ DROP PROCEDURE bug18037_p1| DROP PROCEDURE bug18037_p2| # +# Bug#17199: "Table not found" error occurs if the query contains a call +# to a function from another database. +# See also ps.test for an additional test case for this bug. +# +use test| +create table t3 (i int)| +insert into t3 values (1), (2)| +create database mysqltest1| +use mysqltest1| +create function bug17199() returns varchar(2) deterministic return 'ok'| +use test| +select *, mysqltest1.bug17199() from t3| +# +# Bug#18444: Fully qualified stored function names don't work correctly +# in select statements +# +use mysqltest1| +create function bug18444(i int) returns int no sql deterministic return i + 1| +use test| +select mysqltest1.bug18444(i) from t3| +drop database mysqltest1| +# +# Check that current database has no influence to a stored procedure +# +create database mysqltest1 charset=utf8| +create database mysqltest2 charset=utf8| +create procedure mysqltest1.p1() +begin +-- alters the default collation of database test + alter database character set koi8r; +end| +use mysqltest1| +call p1()| +show create database mysqltest1| +show create database mysqltest2| +alter database mysqltest1 character set utf8| +use mysqltest2| +call mysqltest1.p1()| +show create database mysqltest1| +show create database mysqltest2| +drop database mysqltest1| +drop database mysqltest2| +# +# Restore the old environemnt +use test| +# # Bug#15217 "Using a SP cursor on a table created with PREPARE fails with # weird error". Check that the code that is supposed to work at # the first execution of a stored procedure actually works for diff --git a/mysql-test/t/wait_timeout.test b/mysql-test/t/wait_timeout.test index fef77f7cdc7..dbd792e48d8 100644 --- a/mysql-test/t/wait_timeout.test +++ b/mysql-test/t/wait_timeout.test @@ -9,16 +9,20 @@ # Connect with another connection and reset counters --disable_query_log connect (wait_con,localhost,root,,test,,); -flush status; # Reset counters connection wait_con; set session wait_timeout=100; let $retries=300; -let $aborted_clients = `SHOW STATUS LIKE 'aborted_clients'`; set @aborted_clients= 0; --enable_query_log # Disable reconnect and do the query connection default; +# If slow host (Valgrind...), we may have already timed out here. +# So force a reconnect if necessary, using a dummy query. And issue a +# 'flush status' to reset the 'aborted_clients' counter. +--enable_reconnect +select 0; +flush status; --disable_reconnect select 1; @@ -49,6 +53,9 @@ connection default; select 2; --enable_reconnect select 3; +# Disconnect so that we will not be confused by a future abort from this +# connection. +disconnect default # # Do the same test as above on a TCP connection @@ -59,7 +66,6 @@ select 3; connection wait_con; flush status; # Reset counters let $retries=300; -let $aborted_clients = `SHOW STATUS LIKE 'aborted_clients'`; set @aborted_clients= 0; --enable_query_log diff --git a/mysys/my_malloc.c b/mysys/my_malloc.c index 3fb3866f79c..9dd5530bd28 100644 --- a/mysys/my_malloc.c +++ b/mysys/my_malloc.c @@ -83,7 +83,7 @@ char *my_strdup(const char *from, myf my_flags) } -char *my_strndup(const byte *from, uint length, myf my_flags) +char *my_strndup(const char *from, uint length, myf my_flags) { gptr ptr; if ((ptr=my_malloc(length+1,my_flags)) != 0) diff --git a/mysys/safemalloc.c b/mysys/safemalloc.c index e40fd751037..b431667063a 100644 --- a/mysys/safemalloc.c +++ b/mysys/safemalloc.c @@ -525,9 +525,8 @@ char *_my_strdup(const char *from, const char *filename, uint lineno, } /* _my_strdup */ -char *_my_strndup(const byte *from, uint length, - const char *filename, uint lineno, - myf MyFlags) +char *_my_strndup(const char *from, uint length, const char *filename, + uint lineno, myf MyFlags) { gptr ptr; if ((ptr=_mymalloc(length+1,filename,lineno,MyFlags)) != 0) diff --git a/mysys/thr_lock.c b/mysys/thr_lock.c index 74d6f7431a8..36cb83ae754 100644 --- a/mysys/thr_lock.c +++ b/mysys/thr_lock.c @@ -204,6 +204,8 @@ static void check_locks(THR_LOCK *lock, const char *where, { if ((int) data->type == (int) TL_READ_NO_INSERT) count++; + /* Protect against infinite loop. */ + DBUG_ASSERT(count <= lock->read_no_write_count); } if (count != lock->read_no_write_count) { diff --git a/scripts/make_binary_distribution.sh b/scripts/make_binary_distribution.sh index 1b071a294ed..58799880769 100644 --- a/scripts/make_binary_distribution.sh +++ b/scripts/make_binary_distribution.sh @@ -138,7 +138,7 @@ BIN_FILES="extra/comp_err$BS extra/replace$BS extra/perror$BS \ client/mysqlslap$BS \ client/mysqldump$BS client/mysqlimport$BS \ client/mysqltest$BS client/mysqlcheck$BS \ - client/mysqlbinlog$BS \ + client/mysqlbinlog$BS client/mysql_upgrade$BS \ tests/mysql_client_test$BS \ libmysqld/examples/mysql_client_test_embedded$BS \ libmysqld/examples/mysqltest_embedded$BS \ @@ -181,11 +181,21 @@ if [ $BASE_SYSTEM = "netware" ] ; then fi copyfileto $BASE/lib \ - libmysql/.libs/libmysqlclient.a libmysql/.libs/libmysqlclient.so* \ - libmysql/libmysqlclient.* libmysql_r/.libs/libmysqlclient_r.a \ - libmysql_r/.libs/libmysqlclient_r.so* libmysql_r/libmysqlclient_r.* \ + libmysql/.libs/libmysqlclient.a \ + libmysql/.libs/libmysqlclient.so* \ + libmysql/.libs/libmysqlclient.sl* \ + libmysql/.libs/libmysqlclient*.dylib \ + libmysql/libmysqlclient.* \ + libmysql_r/.libs/libmysqlclient_r.a \ + libmysql_r/.libs/libmysqlclient_r.so* \ + libmysql_r/.libs/libmysqlclient_r.sl* \ + libmysql_r/.libs/libmysqlclient_r*.dylib \ + libmysql_r/libmysqlclient_r.* \ + libmysqld/.libs/libmysqld.a \ + libmysqld/.libs/libmysqld.so* \ + libmysqld/.libs/libmysqld.sl* \ + libmysqld/.libs/libmysqld*.dylib \ mysys/libmysys.a strings/libmystrings.a dbug/libdbug.a \ - libmysqld/.libs/libmysqld.a libmysqld/.libs/libmysqld.so* \ libmysqld/libmysqld.a netware/libmysql.imp \ zlib/.libs/libz.a diff --git a/server-tools/instance-manager/parse.h b/server-tools/instance-manager/parse.h index ae29c7eb64a..fd970f54d29 100644 --- a/server-tools/instance-manager/parse.h +++ b/server-tools/instance-manager/parse.h @@ -69,7 +69,7 @@ private: inline char *Named_value::alloc_str(const LEX_STRING *str) { - return my_strndup((const byte *) str->str, str->length, MYF(0)); + return my_strndup(str->str, str->length, MYF(0)); } inline char *Named_value::alloc_str(const char *str) diff --git a/sql-common/client.c b/sql-common/client.c index 08d87f9d083..160f6eb2602 100644 --- a/sql-common/client.c +++ b/sql-common/client.c @@ -125,6 +125,8 @@ static void mysql_close_free(MYSQL *mysql); static int wait_for_data(my_socket fd, uint timeout); #endif +CHARSET_INFO *default_client_charset_info = &my_charset_latin1; + /**************************************************************************** A modified version of connect(). my_connect() allows you to specify @@ -1426,7 +1428,7 @@ mysql_init(MYSQL *mysql) bzero((char*) (mysql), sizeof(*(mysql))); mysql->options.connect_timeout= CONNECT_TIMEOUT; mysql->last_used_con= mysql->next_slave= mysql->master = mysql; - mysql->charset=default_charset_info; + mysql->charset=default_client_charset_info; strmov(mysql->net.sqlstate, not_error_sqlstate); /* By default, we are a replication pivot. The caller must reset it @@ -1655,6 +1657,50 @@ static MYSQL_METHODS client_methods= #endif }; +C_MODE_START +int mysql_init_character_set(MYSQL *mysql) +{ + NET *net= &mysql->net; + /* Set character set */ + if (!mysql->options.charset_name && + !(mysql->options.charset_name= + my_strdup(MYSQL_DEFAULT_CHARSET_NAME,MYF(MY_WME)))) + return 1; + + { + const char *save= charsets_dir; + if (mysql->options.charset_dir) + charsets_dir=mysql->options.charset_dir; + mysql->charset=get_charset_by_csname(mysql->options.charset_name, + MY_CS_PRIMARY, MYF(MY_WME)); + charsets_dir= save; + } + + if (!mysql->charset) + { + net->last_errno=CR_CANT_READ_CHARSET; + strmov(net->sqlstate, unknown_sqlstate); + if (mysql->options.charset_dir) + my_snprintf(net->last_error, sizeof(net->last_error)-1, + ER(net->last_errno), + mysql->options.charset_name, + mysql->options.charset_dir); + else + { + char cs_dir_name[FN_REFLEN]; + get_charsets_dir(cs_dir_name); + my_snprintf(net->last_error, sizeof(net->last_error)-1, + ER(net->last_errno), + mysql->options.charset_name, + cs_dir_name); + } + return 1; + } + return 0; +} +C_MODE_END + + MYSQL * CLI_MYSQL_REAL_CONNECT(MYSQL *mysql,const char *host, const char *user, const char *passwd, const char *db, @@ -1992,42 +2038,8 @@ CLI_MYSQL_REAL_CONNECT(MYSQL *mysql,const char *host, const char *user, goto error; } - /* Set character set */ - if (!mysql->options.charset_name && - !(mysql->options.charset_name= - my_strdup(MYSQL_DEFAULT_CHARSET_NAME,MYF(MY_WME)))) - goto error; - - { - const char *save= charsets_dir; - if (mysql->options.charset_dir) - charsets_dir=mysql->options.charset_dir; - mysql->charset=get_charset_by_csname(mysql->options.charset_name, - MY_CS_PRIMARY, MYF(MY_WME)); - charsets_dir= save; - } - - if (!mysql->charset) - { - net->last_errno=CR_CANT_READ_CHARSET; - strmov(net->sqlstate, unknown_sqlstate); - if (mysql->options.charset_dir) - my_snprintf(net->last_error, sizeof(net->last_error)-1, - ER(net->last_errno), - mysql->options.charset_name, - mysql->options.charset_dir); - else - { - char cs_dir_name[FN_REFLEN]; - get_charsets_dir(cs_dir_name); - my_snprintf(net->last_error, sizeof(net->last_error)-1, - ER(net->last_errno), - mysql->options.charset_name, - cs_dir_name); - } + if (mysql_init_character_set(mysql)) goto error; - } - /* Save connection information */ if (!my_multi_malloc(MYF(0), diff --git a/sql/event_timed.cc b/sql/event_timed.cc index 4ec875f32a3..98369e0e055 100644 --- a/sql/event_timed.cc +++ b/sql/event_timed.cc @@ -143,24 +143,13 @@ Event_timed::init_name(THD *thd, sp_name *spn) MEM_ROOT *root= thd->mem_root; /* We have to copy strings to get them into the right memroot */ - if (spn) - { - dbname.length= spn->m_db.length; - if (spn->m_db.length == 0) - dbname.str= NULL; - else - dbname.str= strmake_root(root, spn->m_db.str, spn->m_db.length); - name.length= spn->m_name.length; - name.str= strmake_root(root, spn->m_name.str, spn->m_name.length); + dbname.length= spn->m_db.length; + dbname.str= strmake_root(root, spn->m_db.str, spn->m_db.length); + name.length= spn->m_name.length; + name.str= strmake_root(root, spn->m_name.str, spn->m_name.length); - if (spn->m_qname.length == 0) - spn->init_qname(thd); - } - else if (thd->db) - { - dbname.length= thd->db_length; - dbname.str= strmake_root(root, thd->db, dbname.length); - } + if (spn->m_qname.length == 0) + spn->init_qname(thd); DBUG_PRINT("dbname", ("len=%d db=%s",dbname.length, dbname.str)); DBUG_PRINT("name", ("len=%d name=%s",name.length, name.str)); diff --git a/sql/events.cc b/sql/events.cc index d67c42326e3..210cc2c4735 100644 --- a/sql/events.cc +++ b/sql/events.cc @@ -598,8 +598,9 @@ db_create_event(THD *thd, Event_timed *et, my_bool create_if_not, int ret= 0; CHARSET_INFO *scs= system_charset_info; TABLE *table; - char olddb[128]; - bool dbchanged= false; + char old_db_buf[NAME_LEN+1]; + LEX_STRING old_db= { old_db_buf, sizeof(old_db_buf) }; + bool dbchanged= FALSE; DBUG_ENTER("db_create_event"); DBUG_PRINT("enter", ("name: %.*s", et->name.length, et->name.str)); @@ -626,8 +627,7 @@ db_create_event(THD *thd, Event_timed *et, my_bool create_if_not, } DBUG_PRINT("info", ("non-existant, go forward")); - if ((ret= sp_use_new_db(thd, et->dbname.str,olddb, sizeof(olddb),0, - &dbchanged))) + if ((ret= sp_use_new_db(thd, et->dbname, &old_db, 0, &dbchanged))) { my_error(ER_BAD_DB_ERROR, MYF(0)); goto err; @@ -691,14 +691,14 @@ db_create_event(THD *thd, Event_timed *et, my_bool create_if_not, *rows_affected= 1; ok: if (dbchanged) - (void) mysql_change_db(thd, olddb, 1); + (void) mysql_change_db(thd, old_db.str, 1); if (table) close_thread_tables(thd); DBUG_RETURN(EVEX_OK); err: if (dbchanged) - (void) mysql_change_db(thd, olddb, 1); + (void) mysql_change_db(thd, old_db.str, 1); if (table) close_thread_tables(thd); DBUG_RETURN(EVEX_GENERAL_ERROR); diff --git a/sql/field.cc b/sql/field.cc index 57279298cbd..d4bd38724ae 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -1536,7 +1536,8 @@ bool Field::optimize_range(uint idx, uint part) } -Field *Field::new_field(MEM_ROOT *root, struct st_table *new_table) +Field *Field::new_field(MEM_ROOT *root, struct st_table *new_table, + bool keep_type __attribute__((unused))) { Field *tmp; if (!(tmp= (Field*) memdup_root(root,(char*) this,size_of()))) @@ -1561,7 +1562,7 @@ Field *Field::new_key_field(MEM_ROOT *root, struct st_table *new_table, uint new_null_bit) { Field *tmp; - if ((tmp= new_field(root, new_table))) + if ((tmp= new_field(root, new_table, table == new_table))) { tmp->ptr= new_ptr; tmp->null_ptr= new_null_ptr; @@ -6383,11 +6384,12 @@ uint Field_string::max_packed_col_length(uint max_length) } -Field *Field_string::new_field(MEM_ROOT *root, struct st_table *new_table) +Field *Field_string::new_field(MEM_ROOT *root, struct st_table *new_table, + bool keep_type) { Field *field; - if (type() != MYSQL_TYPE_VAR_STRING || table == new_table) - return Field::new_field(root, new_table); + if (type() != MYSQL_TYPE_VAR_STRING || keep_type) + return Field::new_field(root, new_table, keep_type); /* Old VARCHAR field which should be modified to a VARCHAR on copy @@ -6396,17 +6398,7 @@ Field *Field_string::new_field(MEM_ROOT *root, struct st_table *new_table) */ if ((field= new Field_varstring(field_length, maybe_null(), field_name, new_table->s, charset()))) - { field->init(new_table); - /* - delayed_insert::get_local_table() needs a ptr copied from old table. - This is what other new_field() methods do too. The above method of - Field_varstring sets ptr to NULL. - */ - field->ptr= ptr; - field->null_ptr= null_ptr; - field->null_bit= null_bit; - } return field; } @@ -6908,9 +6900,11 @@ int Field_varstring::cmp_binary(const char *a_ptr, const char *b_ptr, } -Field *Field_varstring::new_field(MEM_ROOT *root, struct st_table *new_table) +Field *Field_varstring::new_field(MEM_ROOT *root, struct st_table *new_table, + bool keep_type) { - Field_varstring *res= (Field_varstring*) Field::new_field(root, new_table); + Field_varstring *res= (Field_varstring*) Field::new_field(root, new_table, + keep_type); if (res) res->length_bytes= length_bytes; return res; diff --git a/sql/field.h b/sql/field.h index b1221e34cc5..3a4118df01e 100644 --- a/sql/field.h +++ b/sql/field.h @@ -219,7 +219,8 @@ public: */ virtual bool can_be_compared_as_longlong() const { return FALSE; } virtual void free() {} - virtual Field *new_field(MEM_ROOT *root, struct st_table *new_table); + virtual Field *new_field(MEM_ROOT *root, struct st_table *new_table, + bool keep_type); virtual Field *new_key_field(MEM_ROOT *root, struct st_table *new_table, char *new_ptr, uchar *new_null_ptr, uint new_null_bit); @@ -1045,7 +1046,7 @@ public: enum_field_types real_type() const { return FIELD_TYPE_STRING; } bool has_charset(void) const { return charset() == &my_charset_bin ? FALSE : TRUE; } - Field *new_field(MEM_ROOT *root, struct st_table *new_table); + Field *new_field(MEM_ROOT *root, struct st_table *new_table, bool keep_type); }; @@ -1118,7 +1119,7 @@ public: enum_field_types real_type() const { return MYSQL_TYPE_VARCHAR; } bool has_charset(void) const { return charset() == &my_charset_bin ? FALSE : TRUE; } - Field *new_field(MEM_ROOT *root, struct st_table *new_table); + Field *new_field(MEM_ROOT *root, struct st_table *new_table, bool keep_type); Field *new_key_field(MEM_ROOT *root, struct st_table *new_table, char *new_ptr, uchar *new_null_ptr, uint new_null_bit); diff --git a/sql/ha_federated.cc b/sql/ha_federated.cc index 91111a433dc..a44000be19c 100644 --- a/sql/ha_federated.cc +++ b/sql/ha_federated.cc @@ -629,10 +629,8 @@ static int parse_url(FEDERATED_SHARE *share, TABLE *table, DBUG_PRINT("info", ("Length: %d", table->s->connect_string.length)); DBUG_PRINT("info", ("String: '%.*s'", table->s->connect_string.length, table->s->connect_string.str)); - share->scheme= my_strndup((const byte*)table->s-> - connect_string.str, - table->s->connect_string.length, - MYF(0)); + share->scheme= my_strndup(table->s->connect_string.str, + table->s->connect_string.length, MYF(0)); // Add a null for later termination of table name share->scheme[table->s->connect_string.length]= 0; diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index bbeea2ca1ba..8b17dae9d7e 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -4781,7 +4781,7 @@ int ha_ndbcluster::create(const char *name, expect it to be there. */ if (!ndbcluster_create_event(ndb, m_table, event_name.c_ptr(), share, - share && do_event_op /* push warning */)) + share && do_event_op ? 2 : 1/* push warning */)) { if (ndb_extra_logging) sql_print_information("NDB Binlog: CREATE TABLE Event: %s", @@ -5175,7 +5175,7 @@ int ha_ndbcluster::rename_table(const char *from, const char *to) const NDBTAB *ndbtab= ndbtab_g2.get_table(); if (!ndbcluster_create_event(ndb, ndbtab, event_name.c_ptr(), share, - share && ndb_binlog_running /* push warning */)) + share && ndb_binlog_running ? 2 : 1/* push warning */)) { if (ndb_extra_logging) sql_print_information("NDB Binlog: RENAME Event: %s", @@ -7420,7 +7420,7 @@ ndb_get_table_statistics(Ndb* ndb, const NDBTAB *ndbtab, do { - Uint64 rows, commits, mem; + Uint64 rows, commits, fixed_mem, var_mem; Uint32 size; Uint32 count= 0; Uint64 sum_rows= 0; @@ -7458,7 +7458,10 @@ ndb_get_table_statistics(Ndb* ndb, const NDBTAB *ndbtab, pOp->getValue(NdbDictionary::Column::ROW_COUNT, (char*)&rows); pOp->getValue(NdbDictionary::Column::COMMIT_COUNT, (char*)&commits); pOp->getValue(NdbDictionary::Column::ROW_SIZE, (char*)&size); - pOp->getValue(NdbDictionary::Column::FRAGMENT_MEMORY, (char*)&mem); + pOp->getValue(NdbDictionary::Column::FRAGMENT_FIXED_MEMORY, + (char*)&fixed_mem); + pOp->getValue(NdbDictionary::Column::FRAGMENT_VARSIZED_MEMORY, + (char*)&var_mem); if (pTrans->execute(NdbTransaction::NoCommit, NdbTransaction::AbortOnError, @@ -7474,7 +7477,7 @@ ndb_get_table_statistics(Ndb* ndb, const NDBTAB *ndbtab, sum_commits+= commits; if (sum_row_size < size) sum_row_size= size; - sum_mem+= mem; + sum_mem+= fixed_mem + var_mem; count++; } @@ -9866,7 +9869,6 @@ uint ha_ndbcluster::set_up_partition_info(partition_info *part_info, } else { -#ifdef NOT_YET if (!current_thd->variables.new_mode) { push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_ERROR, @@ -9875,9 +9877,8 @@ uint ha_ndbcluster::set_up_partition_info(partition_info *part_info, ndbcluster_hton_name, "LIST, RANGE and HASH partition disabled by default," " use --new option to enable"); - return HA_ERR_UNSUPPORTED; + DBUG_RETURN(HA_ERR_UNSUPPORTED); } -#endif /* Create a shadow field for those tables that have user defined partitioning. This field stores the value of the partition diff --git a/sql/ha_ndbcluster_binlog.cc b/sql/ha_ndbcluster_binlog.cc index b88002b8529..8e9f0077dd0 100644 --- a/sql/ha_ndbcluster_binlog.cc +++ b/sql/ha_ndbcluster_binlog.cc @@ -1220,7 +1220,7 @@ int ndbcluster_log_schema_op(THD *thd, NDB_SHARE *share, type_str= "create table"; break; case SOT_ALTER_TABLE: - type_str= "create table"; + type_str= "alter table"; break; case SOT_DROP_DB: type_str= "drop db"; @@ -2500,7 +2500,7 @@ ndbcluster_create_event(Ndb *ndb, const NDBTAB *ndbtab, /* failed, print a warning */ - if (push_warning) + if (push_warning > 1) push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_ERROR, ER_GET_ERRMSG, ER(ER_GET_ERRMSG), dict->getNdbError().code, @@ -2528,7 +2528,7 @@ ndbcluster_create_event(Ndb *ndb, const NDBTAB *ndbtab, if (dict->getNdbError().code == NDB_INVALID_SCHEMA_OBJECT && dict->dropEvent(my_event.getName())) { - if (push_warning) + if (push_warning > 1) push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_ERROR, ER_GET_ERRMSG, ER(ER_GET_ERRMSG), dict->getNdbError().code, @@ -2547,7 +2547,7 @@ ndbcluster_create_event(Ndb *ndb, const NDBTAB *ndbtab, */ if (dict->createEvent(my_event)) { - if (push_warning) + if (push_warning > 1) push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_ERROR, ER_GET_ERRMSG, ER(ER_GET_ERRMSG), dict->getNdbError().code, @@ -3442,17 +3442,31 @@ restart: // wait for the first event thd->proc_info= "Waiting for first event from ndbcluster"; DBUG_PRINT("info", ("Waiting for the first event")); - int schema_res= 0; - Uint64 schema_gci= 0; - while (schema_res == 0 && !abort_loop) + int schema_res, res; + Uint64 schema_gci; + do { + if (abort_loop) + goto err; schema_res= s_ndb->pollEvents(100, &schema_gci); + } while (ndb_latest_received_binlog_epoch == schema_gci); + if (ndb_binlog_running) + { + Uint64 gci= i_ndb->getLatestGCI(); + while (gci < schema_gci || gci == ndb_latest_received_binlog_epoch) + { + if (abort_loop) + goto err; + res= i_ndb->pollEvents(10, &gci); + } + if (gci > schema_gci) + { + schema_gci= gci; + } } // now check that we have epochs consistant with what we had before the restart DBUG_PRINT("info", ("schema_res: %d schema_gci: %d", schema_res, schema_gci)); - if (schema_res > 0) { - i_ndb->pollEvents(0); i_ndb->flushIncompleteEvents(schema_gci); s_ndb->flushIncompleteEvents(schema_gci); if (schema_gci < ndb_latest_handled_binlog_epoch) @@ -3466,6 +3480,17 @@ restart: ndb_latest_applied_binlog_epoch= 0; ndb_latest_received_binlog_epoch= 0; } + else if (ndb_latest_applied_binlog_epoch > 0) + { + sql_print_warning("NDB Binlog: cluster has reconnected. " + "Changes to the database that occured while " + "disconnected will not be in the binlog"); + } + if (ndb_extra_logging) + { + sql_print_information("NDB Binlog: starting log at epoch %u", + (unsigned)schema_gci); + } } } { diff --git a/sql/item_geofunc.h b/sql/item_geofunc.h index c45fb88a48a..1f64fdba609 100644 --- a/sql/item_geofunc.h +++ b/sql/item_geofunc.h @@ -32,6 +32,7 @@ public: Item_geometry_func(Item *a,Item *b,Item *c) :Item_str_func(a,b,c) {} Item_geometry_func(List<Item> &list) :Item_str_func(list) {} void fix_length_and_dec(); + enum_field_types field_type() const { return MYSQL_TYPE_GEOMETRY; } }; class Item_func_geometry_from_text: public Item_geometry_func @@ -67,6 +68,7 @@ public: Item_func_as_wkb(Item *a): Item_geometry_func(a) {} const char *func_name() const { return "aswkb"; } String *val_str(String *); + enum_field_types field_type() const { return MYSQL_TYPE_BLOB; } }; class Item_func_geometry_type: public Item_str_func diff --git a/sql/item_strfunc.cc b/sql/item_strfunc.cc index 4d16e7743b2..2b1fd96601d 100644 --- a/sql/item_strfunc.cc +++ b/sql/item_strfunc.cc @@ -1667,13 +1667,13 @@ String *Item_func_database::val_str(String *str) { DBUG_ASSERT(fixed == 1); THD *thd= current_thd; - if (!thd->db) + if (thd->db == NULL) { null_value= 1; return 0; } else - str->copy((const char*) thd->db,(uint) strlen(thd->db),system_charset_info); + str->copy(thd->db, thd->db_length, system_charset_info); return str; } diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc index d38069e54da..dadedef4db9 100644 --- a/sql/item_timefunc.cc +++ b/sql/item_timefunc.cc @@ -27,6 +27,7 @@ /* TODO: Move month and days to language files */ +/* Day number for Dec 31st, 9999 */ #define MAX_DAY_NUMBER 3652424L static const char *month_names[]= @@ -408,7 +409,7 @@ static bool extract_date_time(DATE_TIME_FORMAT *format, if (yearday > 0) { uint days= calc_daynr(l_time->year,1,1) + yearday - 1; - if (days <= 0 || days >= MAX_DAY_NUMBER) + if (days <= 0 || days > MAX_DAY_NUMBER) goto err; get_date_from_daynr(days,&l_time->year,&l_time->month,&l_time->day); } @@ -454,7 +455,7 @@ static bool extract_date_time(DATE_TIME_FORMAT *format, (weekday - 1); } - if (days <= 0 || days >= MAX_DAY_NUMBER) + if (days <= 0 || days > MAX_DAY_NUMBER) goto err; get_date_from_daynr(days,&l_time->year,&l_time->month,&l_time->day); } @@ -1962,7 +1963,6 @@ bool Item_date_add_interval::get_date(TIME *ltime, uint fuzzy_date) if (date_sub_interval) interval.neg = !interval.neg; - if (ltime->year < YY_MAGIC_BELOW) return (null_value=1); @@ -2450,7 +2450,7 @@ String *Item_func_makedate::val_str(String *str) days= calc_daynr(yearnr,1,1) + daynr - 1; /* Day number from year 0 to 9999-12-31 */ - if (days >= 0 && days < MAX_DAY_NUMBER) + if (days >= 0 && days <= MAX_DAY_NUMBER) { null_value=0; get_date_from_daynr(days,&l_time.year,&l_time.month,&l_time.day); diff --git a/sql/lock.cc b/sql/lock.cc index e5003325df6..8e75ea42f7d 100644 --- a/sql/lock.cc +++ b/sql/lock.cc @@ -935,7 +935,7 @@ void unlock_table_name(THD *thd, TABLE_LIST *table_list) if (table_list->table) { hash_delete(&open_cache, (byte*) table_list->table); - (void) pthread_cond_broadcast(&COND_refresh); + broadcast_refresh(); } } @@ -1037,9 +1037,9 @@ end: (default 0, which will unlock all tables) NOTES - One must have a lock on LOCK_open when calling this - This function will send a COND_refresh signal to inform other threads - that the name locks are removed + One must have a lock on LOCK_open when calling this. + This function will broadcast refresh signals to inform other threads + that the name locks are removed. RETURN 0 ok @@ -1054,7 +1054,7 @@ void unlock_table_names(THD *thd, TABLE_LIST *table_list, table != last_table; table= table->next_local) unlock_table_name(thd,table); - pthread_cond_broadcast(&COND_refresh); + broadcast_refresh(); DBUG_VOID_RETURN; } @@ -1344,3 +1344,39 @@ bool make_global_read_lock_block_commit(THD *thd) thd->exit_cond(old_message); // this unlocks LOCK_global_read_lock DBUG_RETURN(error); } + + +/* + Broadcast COND_refresh and COND_global_read_lock. + + SYNOPSIS + broadcast_refresh() + void No parameters. + + DESCRIPTION + Due to a bug in a threading library it could happen that a signal + did not reach its target. A condition for this was that the same + condition variable was used with different mutexes in + pthread_cond_wait(). Some time ago we changed LOCK_open to + LOCK_global_read_lock in global read lock handling. So COND_refresh + was used with LOCK_open and LOCK_global_read_lock. + + We did now also change from COND_refresh to COND_global_read_lock + in global read lock handling. But now it is necessary to signal + both conditions at the same time. + + NOTE + When signalling COND_global_read_lock within the global read lock + handling, it is not necessary to also signal COND_refresh. + + RETURN + void +*/ + +void broadcast_refresh(void) +{ + VOID(pthread_cond_broadcast(&COND_refresh)); + VOID(pthread_cond_broadcast(&COND_global_read_lock)); +} + + diff --git a/sql/log_event.cc b/sql/log_event.cc index 7a59147b0dd..823fad1e8e2 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -1928,9 +1928,10 @@ end: don't suffer from these assignments to 0 as DROP TEMPORARY TABLE uses the db.table syntax. */ - thd->db= thd->catalog= 0; // prevent db from being freed + thd->catalog= 0; + thd->reset_db(NULL, 0); // prevent db from being freed thd->query= 0; // just to be sure - thd->query_length= thd->db_length =0; + thd->query_length= 0; VOID(pthread_mutex_unlock(&LOCK_thread_count)); close_thread_tables(thd); free_root(thd->mem_root,MYF(MY_KEEP_PREALLOC)); @@ -2954,7 +2955,7 @@ int Load_log_event::exec_event(NET* net, struct st_relay_log_info* rli, TABLE_LIST tables; bzero((char*) &tables,sizeof(tables)); - tables.db = thd->db; + tables.db= thd->strmake(thd->db, thd->db_length); tables.alias = tables.table_name = (char*) table_name; tables.lock_type = TL_WRITE; tables.updating= 1; @@ -3049,7 +3050,7 @@ int Load_log_event::exec_event(NET* net, struct st_relay_log_info* rli, ex.skip_lines = skip_lines; List<Item> field_list; thd->main_lex.select_lex.context.resolve_in_table_list_only(&tables); - set_fields(thd->db, field_list, &thd->main_lex.select_lex.context); + set_fields(tables.db, field_list, &thd->main_lex.select_lex.context); thd->variables.pseudo_thread_id= thread_id; List<Item> set_fields; if (net) @@ -3096,11 +3097,12 @@ int Load_log_event::exec_event(NET* net, struct st_relay_log_info* rli, error: thd->net.vio = 0; - char *save_db= thd->db; + const char *remember_db= thd->db; VOID(pthread_mutex_lock(&LOCK_thread_count)); - thd->db= thd->catalog= 0; + thd->catalog= 0; + thd->reset_db(NULL, 0); thd->query= 0; - thd->query_length= thd->db_length= 0; + thd->query_length= 0; VOID(pthread_mutex_unlock(&LOCK_thread_count)); close_thread_tables(thd); if (thd->query_error) @@ -3117,7 +3119,7 @@ error: } slave_print_msg(ERROR_LEVEL, rli, sql_errno,"\ Error '%s' running LOAD DATA INFILE on table '%s'. Default database: '%s'", - err, (char*)table_name, print_slave_db_safe(save_db)); + err, (char*)table_name, print_slave_db_safe(remember_db)); free_root(thd->mem_root,MYF(MY_KEEP_PREALLOC)); return 1; } @@ -3127,7 +3129,7 @@ Error '%s' running LOAD DATA INFILE on table '%s'. Default database: '%s'", { slave_print_msg(ERROR_LEVEL, rli, ER_UNKNOWN_ERROR, "\ Fatal error running LOAD DATA INFILE on table '%s'. Default database: '%s'", - (char*)table_name, print_slave_db_safe(save_db)); + (char*)table_name, print_slave_db_safe(remember_db)); return 1; } @@ -3202,8 +3204,7 @@ Rotate_log_event::Rotate_log_event(const char* new_log_ident_arg, llstr(pos_arg, buff), flags)); #endif if (flags & DUP_NAME) - new_log_ident= my_strndup((const byte*) new_log_ident_arg, - ident_len, MYF(MY_WME)); + new_log_ident= my_strndup(new_log_ident_arg, ident_len, MYF(MY_WME)); DBUG_VOID_RETURN; } #endif @@ -3226,9 +3227,7 @@ Rotate_log_event::Rotate_log_event(const char* buf, uint event_len, (header_size+post_header_len)); ident_offset = post_header_len; set_if_smaller(ident_len,FN_REFLEN-1); - new_log_ident= my_strndup((byte*) buf + ident_offset, - (uint) ident_len, - MYF(MY_WME)); + new_log_ident= my_strndup(buf + ident_offset, (uint) ident_len, MYF(MY_WME)); DBUG_VOID_RETURN; } @@ -5840,7 +5839,7 @@ int Table_map_log_event::exec_event(st_relay_log_info *rli) if (memory == NULL) DBUG_RETURN(HA_ERR_OUT_OF_MEM); - uint32 dummy_len; + uint dummy_len; bzero(table_list, sizeof(*table_list)); table_list->db = db_mem; table_list->alias= table_list->table_name = tname_mem; diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h index c0b453b7d69..7cff7d7531c 100644 --- a/sql/mysql_priv.h +++ b/sql/mysql_priv.h @@ -1696,6 +1696,7 @@ void start_waiting_global_read_lock(THD *thd); bool make_global_read_lock_block_commit(THD *thd); bool set_protect_against_global_read_lock(void); void unset_protect_against_global_read_lock(void); +void broadcast_refresh(void); /* Lock based on name */ int lock_and_wait_for_table_name(THD *thd, TABLE_LIST *table_list); diff --git a/sql/rpl_filter.cc b/sql/rpl_filter.cc index 143cd027b5f..c01b5189887 100644 --- a/sql/rpl_filter.cc +++ b/sql/rpl_filter.cc @@ -513,7 +513,7 @@ Rpl_filter::get_wild_ignore_table(String* str) const char* -Rpl_filter::get_rewrite_db(const char* db, uint32 *new_len) +Rpl_filter::get_rewrite_db(const char* db, uint *new_len) { if (rewrite_db.is_empty() || !db) return db; diff --git a/sql/rpl_filter.h b/sql/rpl_filter.h index 58d2b97c9c6..718fd401c56 100644 --- a/sql/rpl_filter.h +++ b/sql/rpl_filter.h @@ -70,7 +70,7 @@ public: void get_wild_do_table(String* str); void get_wild_ignore_table(String* str); - const char* get_rewrite_db(const char* db, uint32 *new_len); + const char* get_rewrite_db(const char* db, uint *new_len); I_List<i_string>* get_do_db(); I_List<i_string>* get_ignore_db(); diff --git a/sql/set_var.cc b/sql/set_var.cc index b0ecc7eccef..bb9ef4d453f 100644 --- a/sql/set_var.cc +++ b/sql/set_var.cc @@ -1097,7 +1097,7 @@ bool update_sys_var_str(sys_var_str *var_str, rw_lock_t *var_mutex, uint new_length= (var ? var->value->str_value.length() : 0); if (!old_value) old_value= (char*) ""; - if (!(res= my_strndup((byte*)old_value, new_length, MYF(0)))) + if (!(res= my_strndup(old_value, new_length, MYF(0)))) return 1; /* Replace the old value in such a way that the any thread using @@ -2632,7 +2632,7 @@ bool update_sys_var_str_path(THD *thd, sys_var_str *var_str, old_value= make_default_log_name(buff, log_ext); str_length= strlen(old_value); } - if (!(res= my_strndup((byte*)old_value, str_length, MYF(MY_FAE+MY_WME)))) + if (!(res= my_strndup(old_value, str_length, MYF(MY_FAE+MY_WME)))) { result= 1; goto err; diff --git a/sql/set_var.h b/sql/set_var.h index d01ce833d14..b968b4c7840 100644 --- a/sql/set_var.h +++ b/sql/set_var.h @@ -1055,7 +1055,7 @@ public: uint name_length_arg, gptr data_arg) :name_length(name_length_arg), data(data_arg) { - name= my_strndup((byte*) name_arg, name_length, MYF(MY_WME)); + name= my_strndup(name_arg, name_length, MYF(MY_WME)); links->push_back(this); } inline bool cmp(const char *name_cmp, uint length) diff --git a/sql/slave.cc b/sql/slave.cc index d9895323b92..2bc3b1c4bfd 100644 --- a/sql/slave.cc +++ b/sql/slave.cc @@ -1347,9 +1347,8 @@ static int create_table_from_dump(THD* thd, MYSQL *mysql, const char* db, // save old db in case we are creating in a different database save_db = thd->db; save_db_length= thd->db_length; - thd->db = (char*)db; - DBUG_ASSERT(thd->db != 0); - thd->db_length= strlen(thd->db); + DBUG_ASSERT(db != 0); + thd->reset_db((char*)db, strlen(db)); mysql_parse(thd, thd->query, packet_len); // run create table thd->db = save_db; // leave things the way the were before thd->db_length= save_db_length; @@ -3511,8 +3510,9 @@ err: sql_print_information("Slave I/O thread exiting, read up to log '%s', position %s", IO_RPL_LOG_NAME, llstr(mi->master_log_pos,llbuff)); VOID(pthread_mutex_lock(&LOCK_thread_count)); - thd->query = thd->db = 0; // extra safety - thd->query_length= thd->db_length= 0; + thd->query= 0; // extra safety + thd->query_length= 0; + thd->reset_db(NULL, 0); VOID(pthread_mutex_unlock(&LOCK_thread_count)); if (mysql) { @@ -3760,8 +3760,10 @@ the slave SQL thread with \"SLAVE START\". We stopped at log \ should already have done these assignments (each event which sets these variables is supposed to set them to 0 before terminating)). */ - thd->query= thd->db= thd->catalog= 0; - thd->query_length= thd->db_length= 0; + thd->catalog= 0; + thd->reset_db(NULL, 0); + thd->query= 0; + thd->query_length= 0; VOID(pthread_mutex_unlock(&LOCK_thread_count)); thd->proc_info = "Waiting for slave mutex on exit"; pthread_mutex_lock(&rli->run_lock); diff --git a/sql/sp.cc b/sql/sp.cc index 93e21170156..e5c565150d8 100644 --- a/sql/sp.cc +++ b/sql/sp.cc @@ -406,7 +406,8 @@ db_load_routine(THD *thd, int type, sp_name *name, sp_head **sphp, { LEX *old_lex= thd->lex, newlex; String defstr; - char olddb[128]; + char old_db_buf[NAME_LEN+1]; + LEX_STRING old_db= { old_db_buf, sizeof(old_db_buf) }; bool dbchanged; ulong old_sql_mode= thd->variables.sql_mode; ha_rows old_select_limit= thd->variables.select_limit; @@ -450,9 +451,7 @@ db_load_routine(THD *thd, int type, sp_name *name, sp_head **sphp, goto end; } - dbchanged= FALSE; - if ((ret= sp_use_new_db(thd, name->m_db.str, olddb, sizeof(olddb), - 1, &dbchanged))) + if ((ret= sp_use_new_db(thd, name->m_db, &old_db, 1, &dbchanged))) goto end; lex_start(thd, (uchar*)defstr.c_ptr(), defstr.length()); @@ -462,14 +461,14 @@ db_load_routine(THD *thd, int type, sp_name *name, sp_head **sphp, { sp_head *sp= newlex.sphead; - if (dbchanged && (ret= mysql_change_db(thd, olddb, 1))) + if (dbchanged && (ret= mysql_change_db(thd, old_db.str, 1))) goto end; delete sp; ret= SP_PARSE_ERROR; } else { - if (dbchanged && (ret= mysql_change_db(thd, olddb, 1))) + if (dbchanged && (ret= mysql_change_db(thd, old_db.str, 1))) goto end; *sphp= newlex.sphead; (*sphp)->set_definer(&definer_user_name, &definer_host_name); @@ -507,15 +506,14 @@ db_create_routine(THD *thd, int type, sp_head *sp) int ret; TABLE *table; char definer[USER_HOST_BUFF_SIZE]; - char olddb[128]; + char old_db_buf[NAME_LEN+1]; + LEX_STRING old_db= { old_db_buf, sizeof(old_db_buf) }; bool dbchanged; DBUG_ENTER("db_create_routine"); DBUG_PRINT("enter", ("type: %d name: %.*s",type,sp->m_name.length, sp->m_name.str)); - dbchanged= FALSE; - if ((ret= sp_use_new_db(thd, sp->m_db.str, olddb, sizeof(olddb), - 0, &dbchanged))) + if ((ret= sp_use_new_db(thd, sp->m_db, &old_db, 0, &dbchanged))) { ret= SP_NO_DB_ERROR; goto done; @@ -642,7 +640,7 @@ db_create_routine(THD *thd, int type, sp_head *sp) done: close_thread_tables(thd); if (dbchanged) - (void)mysql_change_db(thd, olddb, 1); + (void) mysql_change_db(thd, old_db.str, 1); DBUG_RETURN(ret); } @@ -1815,49 +1813,76 @@ create_string(THD *thd, String *buf, } -// -// Utilities... -// + +/* + Change the current database if needed. + + SYNOPSIS + sp_use_new_db() + thd thread handle + + new_db new database name (a string and its length) + + old_db [IN] str points to a buffer where to store the old + database, length contains the size of the buffer + [OUT] if old db was not NULL, its name is copied + to the buffer pointed at by str and length is updated + accordingly. Otherwise str[0] is set to '\0' and length + is set to 0. The out parameter should be used only if + the database name has been changed (see dbchangedp). + + dbchangedp [OUT] is set to TRUE if the current database is changed, + FALSE otherwise. A database is not changed if the old + name is the same as the new one, both names are empty, + or an error has occurred. + + RETURN VALUE + 0 success + 1 access denied or out of memory (the error message is + set in THD) +*/ int -sp_use_new_db(THD *thd, char *newdb, char *olddb, uint olddblen, +sp_use_new_db(THD *thd, LEX_STRING new_db, LEX_STRING *old_db, bool no_access_check, bool *dbchangedp) { - bool changeit; + int ret; + static char empty_c_string[1]= {0}; /* used for not defined db */ DBUG_ENTER("sp_use_new_db"); - DBUG_PRINT("enter", ("newdb: %s", newdb)); + DBUG_PRINT("enter", ("newdb: %s", new_db.str)); - if (! newdb) - newdb= (char *)""; - if (thd->db && thd->db[0]) + /* + Set new_db to an empty string if it's NULL, because mysql_change_db + requires a non-NULL argument. + new_db.str can be NULL only if we're restoring the old database after + execution of a stored procedure and there were no current database + selected. The stored procedure itself must always have its database + initialized. + */ + if (new_db.str == NULL) + new_db.str= empty_c_string; + + if (thd->db) { - if (my_strcasecmp(system_charset_info, thd->db, newdb) == 0) - changeit= 0; - else - { - changeit= 1; - strnmov(olddb, thd->db, olddblen); - } + old_db->length= (strmake(old_db->str, thd->db, old_db->length) - + old_db->str); } else - { // thd->db empty - if (newdb[0]) - changeit= 1; - else - changeit= 0; - olddb[0] = '\0'; + { + old_db->str[0]= '\0'; + old_db->length= 0; } - if (!changeit) + + /* Don't change the database if the new name is the same as the old one. */ + if (my_strcasecmp(system_charset_info, old_db->str, new_db.str) == 0) { *dbchangedp= FALSE; DBUG_RETURN(0); } - else - { - int ret= mysql_change_db(thd, newdb, no_access_check); - if (! ret) - *dbchangedp= TRUE; - DBUG_RETURN(ret); - } + ret= mysql_change_db(thd, new_db.str, no_access_check); + + *dbchangedp= ret == 0; + DBUG_RETURN(ret); } + @@ -104,15 +104,15 @@ extern "C" byte* sp_sroutine_key(const byte *ptr, uint *plen, my_bool first); TABLE *open_proc_table_for_read(THD *thd, Open_tables_state *backup); void close_proc_table(THD *thd, Open_tables_state *backup); -// -// Utilities... -// -// Do a "use newdb". The current db is stored at olddb. -// If newdb is the same as the current one, nothing is changed. -// dbchangedp is set to true if the db was actually changed. +/* + Do a "use new_db". The current db is stored at old_db. If new_db is the + same as the current one, nothing is changed. dbchangedp is set to true if + the db was actually changed. +*/ + int -sp_use_new_db(THD *thd, char *newdb, char *olddb, uint olddbmax, +sp_use_new_db(THD *thd, LEX_STRING new_db, LEX_STRING *old_db, bool no_access_check, bool *dbchangedp); #endif /* _SP_H_ */ diff --git a/sql/sp_head.cc b/sql/sp_head.cc index 88460337526..81f5d502ec9 100644 --- a/sql/sp_head.cc +++ b/sql/sp_head.cc @@ -385,24 +385,6 @@ sp_name::init_qname(THD *thd) m_name.length, m_name.str); } -sp_name * -sp_name_current_db_new(THD *thd, LEX_STRING name) -{ - sp_name *qname; - - if (! thd->db) - qname= new sp_name(name); - else - { - LEX_STRING db; - - db.length= strlen(thd->db); - db.str= thd->strmake(thd->db, db.length); - qname= new sp_name(db, name); - } - qname->init_qname(thd); - return qname; -} /* Check that the name 'ident' is ok. It's assumed to be an 'ident' @@ -513,14 +495,14 @@ sp_head::init_strings(THD *thd, LEX *lex, sp_name *name) /* During parsing, we must use thd->mem_root */ MEM_ROOT *root= thd->mem_root; - /* We have to copy strings to get them into the right memroot */ if (name) { + /* Must be initialized in the parser */ + DBUG_ASSERT(name->m_db.str && name->m_db.length); + + /* We have to copy strings to get them into the right memroot */ m_db.length= name->m_db.length; - if (name->m_db.length == 0) - m_db.str= NULL; - else - m_db.str= strmake_root(root, name->m_db.str, name->m_db.length); + m_db.str= strmake_root(root, name->m_db.str, name->m_db.length); m_name.length= name->m_name.length; m_name.str= strmake_root(root, name->m_name.str, name->m_name.length); @@ -529,10 +511,15 @@ sp_head::init_strings(THD *thd, LEX *lex, sp_name *name) m_qname.length= name->m_qname.length; m_qname.str= strmake_root(root, name->m_qname.str, m_qname.length); } - else if (thd->db) + else { - m_db.length= thd->db_length; - m_db.str= strmake_root(root, thd->db, m_db.length); + /* + FIXME: the only use case when name is NULL is events, and it should + be rewritten soon. Remove the else part and replace 'if' with + an assert when this is done. + */ + LEX_STRING str_reset= { NULL, 0 }; + m_db= m_name= m_qname= str_reset; } if (m_param_begin && m_param_end) @@ -949,7 +936,8 @@ bool sp_head::execute(THD *thd) { DBUG_ENTER("sp_head::execute"); - char olddb[128]; + char old_db_buf[NAME_LEN+1]; + LEX_STRING old_db= { old_db_buf, sizeof(old_db_buf) }; bool dbchanged; sp_rcontext *ctx; bool err_status= FALSE; @@ -996,10 +984,8 @@ sp_head::execute(THD *thd) m_first_instance->m_last_cached_sp == this) || (m_recursion_level + 1 == m_next_cached_sp->m_recursion_level)); - dbchanged= FALSE; if (m_db.length && - (err_status= sp_use_new_db(thd, m_db.str, olddb, sizeof(olddb), 0, - &dbchanged))) + (err_status= sp_use_new_db(thd, m_db, &old_db, 0, &dbchanged))) goto done; if ((ctx= thd->spcont)) @@ -1170,10 +1156,10 @@ sp_head::execute(THD *thd) { /* No access check when changing back to where we came from. - (It would generate an error from mysql_change_db() when olddb=="") + (It would generate an error from mysql_change_db() when old_db=="") */ if (! thd->killed) - err_status|= mysql_change_db(thd, olddb, 1); + err_status|= mysql_change_db(thd, old_db.str, 1); } m_flags&= ~IS_INVOKED; DBUG_PRINT("info", @@ -1857,9 +1843,6 @@ sp_head::reset_thd_mem_root(THD *thd) (ulong) &mem_root, (ulong) &thd->mem_root)); free_list= thd->free_list; // Keep the old list thd->free_list= NULL; // Start a new one - /* Copy the db, since substatements will point to it */ - m_thd_db= thd->db; - thd->db= thd->strmake(thd->db, thd->db_length); m_thd= thd; DBUG_VOID_RETURN; } @@ -1875,7 +1858,6 @@ sp_head::restore_thd_mem_root(THD *thd) DBUG_PRINT("info", ("mem_root 0x%lx returned from thd mem root 0x%lx", (ulong) &mem_root, (ulong) &thd->mem_root)); thd->free_list= flist; // Restore the old one - thd->db= m_thd_db; // Restore the original db pointer thd->mem_root= m_thd_root; m_thd= NULL; DBUG_VOID_RETURN; diff --git a/sql/sp_head.h b/sql/sp_head.h index 791343f0061..2fd38b7176c 100644 --- a/sql/sp_head.h +++ b/sql/sp_head.h @@ -61,13 +61,6 @@ public: */ LEX_STRING m_sroutines_key; - sp_name(LEX_STRING name) - : m_name(name) - { - m_db.str= m_qname.str= m_sroutines_key.str= 0; - m_db.length= m_qname.length= m_sroutines_key.length= 0; - } - sp_name(LEX_STRING db, LEX_STRING name) : m_db(db), m_name(name) { @@ -101,8 +94,6 @@ public: {} }; -sp_name * -sp_name_current_db_new(THD *thd, LEX_STRING name); bool check_routine_name(LEX_STRING name); @@ -356,7 +347,6 @@ private: MEM_ROOT *m_thd_root; // Temp. store for thd's mem_root THD *m_thd; // Set if we have reset mem_root - char *m_thd_db; // Original thd->db pointer sp_pcontext *m_pcont; // Parse context List<LEX> m_lex; // Temp. store for the other lex diff --git a/sql/sql_base.cc b/sql/sql_base.cc index 5b039f6bcc0..c2ebd140720 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -1104,7 +1104,7 @@ void close_thread_tables(THD *thd, bool lock_in_use, bool skip_derived) if (found_old_table) { /* Tell threads waiting for refresh that something has happened */ - VOID(pthread_cond_broadcast(&COND_refresh)); + broadcast_refresh(); } if (!lock_in_use) VOID(pthread_mutex_unlock(&LOCK_open)); @@ -1674,7 +1674,7 @@ TABLE *unlink_open_table(THD *thd, TABLE *list, TABLE *find) } *prev=0; // Notify any 'refresh' threads - pthread_cond_broadcast(&COND_refresh); + broadcast_refresh(); return start; } @@ -2224,7 +2224,7 @@ static bool reopen_table(TABLE *table) if (table->triggers) table->triggers->set_table(table); - VOID(pthread_cond_broadcast(&COND_refresh)); + broadcast_refresh(); error=0; end: @@ -2325,7 +2325,7 @@ bool reopen_tables(THD *thd,bool get_locks,bool in_refresh) { my_afree((gptr) tables); } - VOID(pthread_cond_broadcast(&COND_refresh)); // Signal to refresh + broadcast_refresh(); *prev=0; DBUG_RETURN(error); } @@ -2361,7 +2361,7 @@ void close_old_data_files(THD *thd, TABLE *table, bool abort_locks, } } if (found) - VOID(pthread_cond_broadcast(&COND_refresh)); // Signal to refresh + broadcast_refresh(); DBUG_VOID_RETURN; } @@ -2514,6 +2514,8 @@ TABLE *drop_locked_tables(THD *thd,const char *db, const char *table_name) } } *prev=0; + if (found) + broadcast_refresh(); if (thd->locked_tables && thd->locked_tables->table_count == 0) { my_free((gptr) thd->locked_tables,MYF(0)); @@ -6194,7 +6196,7 @@ bool remove_table_from_cache(THD *thd, const char *db, const char *table_name, Signal any thread waiting for tables to be freed to reopen their tables */ - (void) pthread_cond_broadcast(&COND_refresh); + broadcast_refresh(); DBUG_PRINT("info", ("Waiting for refresh signal")); if (!(flags & RTFC_CHECK_KILLED_FLAG) || !thd->killed) { diff --git a/sql/sql_class.cc b/sql/sql_class.cc index c8c8ff16199..1a7d5d4b89a 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -1015,7 +1015,7 @@ bool select_send::send_data(List<Item> &items) Protocol *protocol= thd->protocol; char buff[MAX_FIELD_WIDTH]; String buffer(buff, sizeof(buff), &my_charset_bin); - DBUG_ENTER("send_data"); + DBUG_ENTER("select_send::send_data"); protocol->prepare_for_resend(); Item *item; @@ -1228,7 +1228,7 @@ select_export::prepare(List<Item> &list, SELECT_LEX_UNIT *u) bool select_export::send_data(List<Item> &items) { - DBUG_ENTER("send_data"); + DBUG_ENTER("select_export::send_data"); char buff[MAX_FIELD_WIDTH],null_buff[2],space[MAX_FIELD_WIDTH]; bool space_inited=0; String tmp(buff,sizeof(buff),&my_charset_bin),*res; @@ -1385,7 +1385,7 @@ bool select_dump::send_data(List<Item> &items) String tmp(buff,sizeof(buff),&my_charset_bin),*res; tmp.length(0); Item *item; - DBUG_ENTER("send_data"); + DBUG_ENTER("select_dump::send_data"); if (unit->offset_limit_cnt) { // using limit offset,count diff --git a/sql/sql_class.h b/sql/sql_class.h index 5222e75f309..b79f0753603 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -1135,7 +1135,7 @@ public: uint tmp_table, global_read_lock; uint server_status,open_options; enum enum_thread_type system_thread; - uint32 db_length; + uint db_length; uint select_number; //number of select (used for EXPLAIN) /* variables.transaction_isolation is reset to this after each commit */ enum_tx_isolation session_tx_isolation; @@ -1443,6 +1443,47 @@ public: current_stmt_binlog_row_based= FALSE; #endif } + + /* + Initialize the current database from a NULL-terminated string with length + */ + void set_db(const char *new_db, uint new_db_len) + { + if (new_db) + { + /* Do not reallocate memory if current chunk is big enough. */ + if (db && db_length >= new_db_len) + memcpy(db, new_db, new_db_len+1); + else + { + safeFree(db); + db= my_strndup(new_db, new_db_len, MYF(MY_WME)); + } + db_length= db ? new_db_len: 0; + } + } + void reset_db(char *new_db, uint new_db_len) + { + db= new_db; + db_length= new_db_len; + } + /* + Copy the current database to the argument. Use the current arena to + allocate memory for a deep copy: current database may be freed after + a statement is parsed but before it's executed. + */ + bool copy_db_to(char **p_db, uint *p_db_length) + { + if (db == NULL) + { + my_message(ER_NO_DB_ERROR, ER(ER_NO_DB_ERROR), MYF(0)); + return TRUE; + } + *p_db= strmake(db, db_length); + if (p_db_length) + *p_db_length= db_length; + return FALSE; + } }; @@ -1790,7 +1831,7 @@ typedef struct st_sort_buffer { class Table_ident :public Sql_alloc { - public: +public: LEX_STRING db; LEX_STRING table; SELECT_LEX_UNIT *sel; diff --git a/sql/sql_db.cc b/sql/sql_db.cc index bcd1b99b91a..41e7e5df1f7 100644 --- a/sql/sql_db.cc +++ b/sql/sql_db.cc @@ -930,8 +930,7 @@ exit: { if (!(thd->slave_thread)) /* a slave thread will free it itself */ x_free(thd->db); - thd->db= 0; - thd->db_length= 0; + thd->reset_db(NULL, 0); } VOID(pthread_mutex_unlock(&LOCK_mysql_create_db)); start_waiting_global_read_lock(thd); @@ -1345,14 +1344,10 @@ end: { if (!(thd->slave_thread)) my_free(dbname, MYF(0)); - thd->db= NULL; - thd->db_length= 0; + thd->reset_db(NULL, 0); } else - { - thd->db= dbname; // THD::~THD will free this - thd->db_length= db_length; - } + thd->reset_db(dbname, db_length); // THD::~THD will free this #ifndef NO_EMBEDDED_ACCESS_CHECKS if (!no_access_check) sctx->db_access= db_access; diff --git a/sql/sql_handler.cc b/sql/sql_handler.cc index bf035401bea..0d893a6c9be 100644 --- a/sql/sql_handler.cc +++ b/sql/sql_handler.cc @@ -254,7 +254,8 @@ err: DESCRIPTION Though this function takes a list of tables, only the first list entry - will be closed. Broadcasts a COND_refresh condition. + will be closed. + Broadcasts refresh if it closed the table. RETURN FALSE ok @@ -291,7 +292,7 @@ bool mysql_ha_close(THD *thd, TABLE_LIST *tables) if (close_thread_table(thd, table_ptr)) { /* Tell threads waiting for refresh that something has happened */ - VOID(pthread_cond_broadcast(&COND_refresh)); + broadcast_refresh(); } VOID(pthread_mutex_unlock(&LOCK_open)); } @@ -615,7 +616,7 @@ err0: tables are closed (if MYSQL_HA_FLUSH_ALL) is set. If 'tables' is NULL and MYSQL_HA_FLUSH_ALL is not set, all HANDLER tables marked for flush are closed. - Broadcasts a COND_refresh condition, for every table closed. + Broadcasts refresh for every table closed. NOTE Since mysql_ha_flush() is called when the base table has to be closed, @@ -712,7 +713,7 @@ int mysql_ha_flush(THD *thd, TABLE_LIST *tables, uint mode_flags, MYSQL_HA_REOPEN_ON_USAGE mark for reopen. DESCRIPTION - Broadcasts a COND_refresh condition, for every table closed. + Broadcasts refresh if it closed the table. The caller must lock LOCK_open. RETURN @@ -750,7 +751,7 @@ static int mysql_ha_flush_table(THD *thd, TABLE **table_ptr, uint mode_flags) if (close_thread_table(thd, table_ptr)) { /* Tell threads waiting for refresh that something has happened */ - VOID(pthread_cond_broadcast(&COND_refresh)); + broadcast_refresh(); } DBUG_RETURN(0); diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index 088bd3e59e5..1f93bc99483 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -17,6 +17,44 @@ /* Insert of records */ +/* + INSERT DELAYED + + Insert delayed is distinguished from a normal insert by lock_type == + TL_WRITE_DELAYED instead of TL_WRITE. It first tries to open a + "delayed" table (delayed_get_table()), but falls back to + open_and_lock_tables() on error and proceeds as normal insert then. + + Opening a "delayed" table means to find a delayed insert thread that + has the table open already. If this fails, a new thread is created and + waited for to open and lock the table. + + If accessing the thread succeeded, in + delayed_insert::get_local_table() the table of the thread is copied + for local use. A copy is required because the normal insert logic + works on a target table, but the other threads table object must not + be used. The insert logic uses the record buffer to create a record. + And the delayed insert thread uses the record buffer to pass the + record to the table handler. So there must be different objects. Also + the copied table is not included in the lock, so that the statement + can proceed even if the real table cannot be accessed at this moment. + + Copying a table object is not a trivial operation. Besides the TABLE + object there are the field pointer array, the field objects and the + record buffer. After copying the field objects, their pointers into + the record must be "moved" to point to the new record buffer. + + After this setup the normal insert logic is used. Only that for + delayed inserts write_delayed() is called instead of write_record(). + It inserts the rows into a queue and signals the delayed insert thread + instead of writing directly to the table. + + The delayed insert thread awakes from the signal. It locks the table, + inserts the rows from the queue, unlocks the table, and waits for the + next signal. It does normally live until a FLUSH TABLES or SHUTDOWN. + +*/ + #include "mysql_priv.h" #include "sp_head.h" #include "sql_trigger.h" @@ -312,9 +350,8 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list, { if (thd->locked_tables) { - if (find_locked_table(thd, - table_list->db ? table_list->db : thd->db, - table_list->table_name)) + DBUG_ASSERT(table_list->db); /* Must be set in the parser */ + if (find_locked_table(thd, table_list->db, table_list->table_name)) { my_error(ER_DELAYED_INSERT_TABLE_LOCKED, MYF(0), table_list->table_name); @@ -1401,8 +1438,8 @@ static TABLE *delayed_get_table(THD *thd,TABLE_LIST *table_list) TABLE *table; DBUG_ENTER("delayed_get_table"); - if (!table_list->db) - table_list->db=thd->db; + /* Must be set in the parser */ + DBUG_ASSERT(table_list->db); /* Find the thread which handles this table. */ if (!(tmp=find_handler(thd,table_list))) @@ -1421,18 +1458,6 @@ static TABLE *delayed_get_table(THD *thd,TABLE_LIST *table_list) */ if (! (tmp= find_handler(thd, table_list))) { - /* - Avoid that a global read lock steps in while we are creating the - new thread. It would block trying to open the table. Hence, the - DI thread and this thread would wait until after the global - readlock is gone. Since the insert thread needs to wait for a - global read lock anyway, we do it right now. Note that - wait_if_global_read_lock() sets a protection against a new - global read lock when it succeeds. This needs to be released by - start_waiting_global_read_lock(). - */ - if (wait_if_global_read_lock(thd, 0, 1)) - goto err; if (!(tmp=new delayed_insert())) { my_error(ER_OUTOFMEMORY,MYF(0),sizeof(delayed_insert)); @@ -1441,15 +1466,15 @@ static TABLE *delayed_get_table(THD *thd,TABLE_LIST *table_list) pthread_mutex_lock(&LOCK_thread_count); thread_count++; pthread_mutex_unlock(&LOCK_thread_count); - if (!(tmp->thd.db=my_strdup(table_list->db,MYF(MY_WME))) || - !(tmp->thd.query=my_strdup(table_list->table_name,MYF(MY_WME)))) + tmp->thd.set_db(table_list->db, strlen(table_list->db)); + tmp->thd.query= my_strdup(table_list->table_name,MYF(MY_WME)); + if (tmp->thd.db == NULL || tmp->thd.query == NULL) { delete tmp; my_message(ER_OUT_OF_RESOURCES, ER(ER_OUT_OF_RESOURCES), MYF(0)); goto err1; } tmp->table_list= *table_list; // Needed to open table - tmp->table_list.db= tmp->thd.db; tmp->table_list.alias= tmp->table_list.table_name= tmp->thd.query; tmp->lock(); pthread_mutex_lock(&tmp->mutex); @@ -1473,11 +1498,6 @@ static TABLE *delayed_get_table(THD *thd,TABLE_LIST *table_list) pthread_cond_wait(&tmp->cond_client,&tmp->mutex); } pthread_mutex_unlock(&tmp->mutex); - /* - Release the protection against the global read lock and wake - everyone, who might want to set a global read lock. - */ - start_waiting_global_read_lock(thd); thd->proc_info="got old table"; if (tmp->thd.killed) { @@ -1513,11 +1533,6 @@ static TABLE *delayed_get_table(THD *thd,TABLE_LIST *table_list) err1: thd->fatal_error(); - /* - Release the protection against the global read lock and wake - everyone, who might want to set a global read lock. - */ - start_waiting_global_read_lock(thd); err: pthread_mutex_unlock(&LOCK_delayed_create); DBUG_RETURN(0); // Continue with normal insert @@ -1538,6 +1553,7 @@ TABLE *delayed_insert::get_local_table(THD* client_thd) TABLE *copy; TABLE_SHARE *share= table->s; byte *bitmap; + DBUG_ENTER("delayed_insert::get_local_table"); /* First request insert thread to get a lock */ status=1; @@ -1561,6 +1577,13 @@ TABLE *delayed_insert::get_local_table(THD* client_thd) } } + /* + Allocate memory for the TABLE object, the field pointers array, and + one record buffer of reclength size. Normally a table has three + record buffers of rec_buff_length size, which includes alignment + bytes. Since the table copy is used for creating one record only, + the other record buffers and alignment are unnecessary. + */ client_thd->proc_info="allocating local table"; copy= (TABLE*) client_thd->alloc(sizeof(*copy)+ (share->fields+1)*sizeof(Field**)+ @@ -1568,23 +1591,28 @@ TABLE *delayed_insert::get_local_table(THD* client_thd) share->column_bitmap_size*2); if (!copy) goto error; - *copy= *table; + /* Copy the TABLE object. */ + *copy= *table; /* We don't need to change the file handler here */ - field= copy->field= (Field**) (copy+1); - bitmap= (byte*) (field+share->fields+1); - copy->record[0]= (bitmap+ share->column_bitmap_size*2); - memcpy((char*) copy->record[0],(char*) table->record[0],share->reclength); - - /* Make a copy of all fields */ - - adjust_ptrs=PTR_BYTE_DIFF(copy->record[0],table->record[0]); - - found_next_number_field=table->found_next_number_field; - for (org_field=table->field ; *org_field ; org_field++,field++) + /* Assign the pointers for the field pointers array and the record. */ + field= copy->field= (Field**) (copy + 1); + bitmap= (byte*) (field + share->fields + 1); + copy->record[0]= (bitmap + share->column_bitmap_size * 2); + memcpy((char*) copy->record[0], (char*) table->record[0], share->reclength); + /* + Make a copy of all fields. + The copied fields need to point into the copied record. This is done + by copying the field objects with their old pointer values and then + "move" the pointers by the distance between the original and copied + records. That way we preserve the relative positions in the records. + */ + adjust_ptrs= PTR_BYTE_DIFF(copy->record[0], table->record[0]); + found_next_number_field= table->found_next_number_field; + for (org_field= table->field; *org_field; org_field++, field++) { - if (!(*field= (*org_field)->new_field(client_thd->mem_root,copy))) - return 0; + if (!(*field= (*org_field)->new_field(client_thd->mem_root, copy, 1))) + DBUG_RETURN(0); (*field)->orig_table= copy; // Remove connection (*field)->move_field_offset(adjust_ptrs); // Point at copy->record[0] if (*org_field == found_next_number_field) @@ -1617,14 +1645,14 @@ TABLE *delayed_insert::get_local_table(THD* client_thd) copy->read_set= ©->def_read_set; copy->write_set= ©->def_write_set; - return copy; + DBUG_RETURN(copy); /* Got fatal error */ error: tables_in_use--; status=1; pthread_cond_signal(&cond); // Inform thread about abort - return 0; + DBUG_RETURN(0); } @@ -2876,7 +2904,7 @@ bool select_create::send_eof() if (!table->s->tmp_table) { if (close_thread_table(thd, &table)) - VOID(pthread_cond_broadcast(&COND_refresh)); + broadcast_refresh(); } thd->extra_lock=0; table=0; @@ -2906,7 +2934,7 @@ void select_create::abort() quick_rm_table(table_type, create_table->db, create_table->table_name); /* Tell threads waiting for refresh that something has happened */ if (version != refresh_version) - VOID(pthread_cond_broadcast(&COND_refresh)); + broadcast_refresh(); } else if (!create_info->table_existed) close_temporary_table(thd, table, 1, 1); diff --git a/sql/sql_lex.h b/sql/sql_lex.h index a46aaa0bab7..c889c2c5f94 100644 --- a/sql/sql_lex.h +++ b/sql/sql_lex.h @@ -808,6 +808,11 @@ public: *this= *state; } + /* + Direct addition to the list of query tables. + If you are using this function, you must ensure that the table + object, in particular table->db member, is initialized. + */ void add_to_query_tables(TABLE_LIST *table) { *(table->prev_global= query_tables_last)= table; diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 232df095816..308f7a0ac60 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -66,7 +66,6 @@ static void time_out_user_resource_limits(THD *thd, USER_CONN *uc); static int check_for_max_user_connections(THD *thd, USER_CONN *uc); #endif static void decrease_user_connections(USER_CONN *uc); -static bool check_db_used(THD *thd,TABLE_LIST *tables); static bool check_multi_update_lock(THD *thd); static void remove_escape(char *name); static bool execute_sqlcom_select(THD *thd, TABLE_LIST *all_tables); @@ -111,8 +110,6 @@ const char *xa_state_names[]={ "NON-EXISTING", "ACTIVE", "IDLE", "PREPARED" }; -static char empty_c_string[1]= {0}; // Used for not defined 'db' - #ifdef __WIN__ static void test_signal(int sig_ptr) { @@ -315,8 +312,7 @@ int check_user(THD *thd, enum enum_server_command command, thd->db is saved in caller and needs to be freed by caller if this function returns 0 */ - thd->db= 0; - thd->db_length= 0; + thd->reset_db(NULL, 0); if (mysql_change_db(thd, db, FALSE)) { /* Send the error to the client */ @@ -356,9 +352,8 @@ int check_user(THD *thd, enum enum_server_command command, if connect failed. Also in case of 'CHANGE USER' failure, current database will be switched to 'no database selected'. */ - thd->db= 0; - thd->db_length= 0; - + thd->reset_db(NULL, 0); + USER_RESOURCES ur; int res= acl_getroot(thd, &ur, passwd, passwd_len); #ifndef EMBEDDED_LIBRARY @@ -823,6 +818,37 @@ static void reset_mqh(LEX_USER *lu, bool get_them= 0) #endif /* NO_EMBEDDED_ACCESS_CHECKS */ } +void thd_init_client_charset(THD *thd, uint cs_number) +{ + /* + Use server character set and collation if + - opt_character_set_client_handshake is not set + - client has not specified a character set + - client character set is the same as the servers + - client character set doesn't exists in server + */ + if (!opt_character_set_client_handshake || + !(thd->variables.character_set_client= get_charset(cs_number, MYF(0))) || + !my_strcasecmp(&my_charset_latin1, + global_system_variables.character_set_client->name, + thd->variables.character_set_client->name)) + { + thd->variables.character_set_client= + global_system_variables.character_set_client; + thd->variables.collation_connection= + global_system_variables.collation_connection; + thd->variables.character_set_results= + global_system_variables.character_set_results; + } + else + { + thd->variables.character_set_results= + thd->variables.collation_connection= + thd->variables.character_set_client; + } +} + + /* Perform handshake, authorize client and update thd ACL variables. SYNOPSIS @@ -958,33 +984,7 @@ static int check_connection(THD *thd) thd->client_capabilities|= ((ulong) uint2korr(net->read_pos+2)) << 16; thd->max_client_packet_length= uint4korr(net->read_pos+4); DBUG_PRINT("info", ("client_character_set: %d", (uint) net->read_pos[8])); - /* - Use server character set and collation if - - opt_character_set_client_handshake is not set - - client has not specified a character set - - client character set is the same as the servers - - client character set doesn't exists in server - */ - if (!opt_character_set_client_handshake || - !(thd->variables.character_set_client= - get_charset((uint) net->read_pos[8], MYF(0))) || - !my_strcasecmp(&my_charset_latin1, - global_system_variables.character_set_client->name, - thd->variables.character_set_client->name)) - { - thd->variables.character_set_client= - global_system_variables.character_set_client; - thd->variables.collation_connection= - global_system_variables.collation_connection; - thd->variables.character_set_results= - global_system_variables.character_set_results; - } - else - { - thd->variables.character_set_results= - thd->variables.collation_connection= - thd->variables.character_set_client; - } + thd_init_client_charset(thd, (uint) net->read_pos[8]); thd->update_charset(); end= (char*) net->read_pos+32; } @@ -1365,7 +1365,8 @@ end: DBUG_RETURN(0); } - /* This works because items are allocated with sql_alloc() */ + +/* This works because items are allocated with sql_alloc() */ void free_items(Item *item) { @@ -1379,7 +1380,7 @@ void free_items(Item *item) DBUG_VOID_RETURN; } - /* This works because items are allocated with sql_alloc() */ +/* This works because items are allocated with sql_alloc() */ void cleanup_items(Item *item) { @@ -1389,7 +1390,26 @@ void cleanup_items(Item *item) DBUG_VOID_RETURN; } -int mysql_table_dump(THD* thd, char* db, char* tbl_name, int fd) +/* + Handle COM_TABLE_DUMP command + + SYNOPSIS + mysql_table_dump + thd thread handle + db database name or an empty string. If empty, + the current database of the connection is used + tbl_name name of the table to dump + + NOTES + This function is written to handle one specific command only. + + RETURN VALUE + 0 success + 1 error, the error message is set in THD +*/ + +static +int mysql_table_dump(THD* thd, char* db, char* tbl_name) { TABLE* table; TABLE_LIST* table_list; @@ -1426,7 +1446,7 @@ int mysql_table_dump(THD* thd, char* db, char* tbl_name, int fd) goto err; } net_flush(&thd->net); - if ((error= table->file->dump(thd,fd))) + if ((error= table->file->dump(thd,-1))) my_error(ER_GET_ERRNO, MYF(0), error); err: @@ -1678,7 +1698,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd, } tbl_name= strmake(db, packet + 1, db_len)+1; strmake(tbl_name, packet + db_len + 2, tbl_len); - mysql_table_dump(thd, db, tbl_name, -1); + mysql_table_dump(thd, db, tbl_name); break; } case COM_CHANGE_USER: @@ -1853,11 +1873,8 @@ bool dispatch_command(enum enum_server_command command, THD *thd, statistic_increment(thd->status_var.com_stat[SQLCOM_SHOW_FIELDS], &LOCK_status); bzero((char*) &table_list,sizeof(table_list)); - if (!(table_list.db=thd->db)) - { - my_message(ER_NO_DB_ERROR, ER(ER_NO_DB_ERROR), MYF(0)); + if (thd->copy_db_to(&table_list.db, 0)) break; - } pend= strend(packet); thd->convert_string(&conv_name, system_charset_info, packet, (uint) (pend-packet), thd->charset()); @@ -2205,6 +2222,34 @@ void log_slow_statement(THD *thd) } +/* + Create a TABLE_LIST object for an INFORMATION_SCHEMA table. + + SYNOPSIS + prepare_schema_table() + thd thread handle + lex current lex + table_ident table alias if it's used + schema_table_idx the type of the INFORMATION_SCHEMA table to be + created + + DESCRIPTION + This function is used in the parser to convert a SHOW or DESCRIBE + table_name command to a SELECT from INFORMATION_SCHEMA. + It prepares a SELECT_LEX and a TABLE_LIST object to represent the + given command as a SELECT parse tree. + + NOTES + Due to the way this function works with memory and LEX it cannot + be used outside the parser (parse tree transformations outside + the parser break PS and SP). + + RETURN VALUE + 0 success + 1 out of memory or SHOW commands are not allowed + in this version of the server. +*/ + int prepare_schema_table(THD *thd, LEX *lex, Table_ident *table_ident, enum enum_schema_tables schema_table_idx) { @@ -2233,13 +2278,13 @@ int prepare_schema_table(THD *thd, LEX *lex, Table_ident *table_ident, DBUG_RETURN(1); #else { - char *db= lex->select_lex.db ? lex->select_lex.db : thd->db; - if (!db) + char *db; + if (lex->select_lex.db == NULL && + thd->copy_db_to(&lex->select_lex.db, 0)) { - my_message(ER_NO_DB_ERROR, - ER(ER_NO_DB_ERROR), MYF(0)); /* purecov: inspected */ - DBUG_RETURN(1); /* purecov: inspected */ + DBUG_RETURN(1); } + db= lex->select_lex.db; remove_escape(db); // Fix escaped '_' if (check_db_name(db)) { @@ -2256,11 +2301,6 @@ int prepare_schema_table(THD *thd, LEX *lex, Table_ident *table_ident, db); DBUG_RETURN(1); } - /* - We need to do a copy to make this prepared statement safe if this - was thd->db - */ - lex->select_lex.db= thd->strdup(db); break; } #endif @@ -2390,17 +2430,37 @@ static void reset_one_shot_variables(THD *thd) } -/**************************************************************************** -** mysql_execute_command -** Execute command saved in thd and current_lex->sql_command -****************************************************************************/ +/* + Execute command saved in thd and current_lex->sql_command + + SYNOPSIS + mysql_execute_command() + thd Thread handle + + IMPLEMENTATION + + Before every operation that can request a write lock for a table + wait if a global read lock exists. However do not wait if this + thread has locked tables already. No new locks can be requested + until the other locks are released. The thread that requests the + global read lock waits for write locked tables to become unlocked. + + Note that wait_if_global_read_lock() sets a protection against a new + global read lock when it succeeds. This needs to be released by + start_waiting_global_read_lock() after the operation. + + RETURN + FALSE OK + TRUE Error +*/ bool mysql_execute_command(THD *thd) { - bool res= FALSE; - int result= 0; - LEX *lex= thd->lex; + bool res= FALSE; + bool need_start_waiting= FALSE; // have protection against global read lock + int result= 0; + LEX *lex= thd->lex; /* first SELECT_LEX (have special meaning for many of non-SELECTcommands) */ SELECT_LEX *select_lex= &lex->select_lex; /* first table of first SELECT_LEX */ @@ -2677,8 +2737,7 @@ mysql_execute_command(THD *thd) case SQLCOM_BACKUP_TABLE: { DBUG_ASSERT(first_table == all_tables && first_table != 0); - if (check_db_used(thd, all_tables) || - check_table_access(thd, SELECT_ACL, all_tables, 0) || + if (check_table_access(thd, SELECT_ACL, all_tables, 0) || check_global_access(thd, FILE_ACL)) goto error; /* purecov: inspected */ thd->enable_slow_log= opt_log_slow_admin_statements; @@ -2690,8 +2749,7 @@ mysql_execute_command(THD *thd) case SQLCOM_RESTORE_TABLE: { DBUG_ASSERT(first_table == all_tables && first_table != 0); - if (check_db_used(thd, all_tables) || - check_table_access(thd, INSERT_ACL, all_tables, 0) || + if (check_table_access(thd, INSERT_ACL, all_tables, 0) || check_global_access(thd, FILE_ACL)) goto error; /* purecov: inspected */ thd->enable_slow_log= opt_log_slow_admin_statements; @@ -2703,8 +2761,7 @@ mysql_execute_command(THD *thd) case SQLCOM_ASSIGN_TO_KEYCACHE: { DBUG_ASSERT(first_table == all_tables && first_table != 0); - if (check_db_used(thd, all_tables) || - check_access(thd, INDEX_ACL, first_table->db, + if (check_access(thd, INDEX_ACL, first_table->db, &first_table->grant.privilege, 0, 0, test(first_table->schema_table))) goto error; @@ -2714,8 +2771,7 @@ mysql_execute_command(THD *thd) case SQLCOM_PRELOAD_KEYS: { DBUG_ASSERT(first_table == all_tables && first_table != 0); - if (check_db_used(thd, all_tables) || - check_access(thd, INDEX_ACL, first_table->db, + if (check_access(thd, INDEX_ACL, first_table->db, &first_table->grant.privilege, 0, 0, test(first_table->schema_table))) goto error; @@ -2777,8 +2833,8 @@ mysql_execute_command(THD *thd) case SQLCOM_LOAD_MASTER_TABLE: { DBUG_ASSERT(first_table == all_tables && first_table != 0); - if (!first_table->db) - first_table->db= thd->db; + DBUG_ASSERT(first_table->db); /* Must be set in the parser */ + if (check_access(thd, CREATE_ACL, first_table->db, &first_table->grant.privilege, 0, 0, test(first_table->schema_table))) @@ -2865,7 +2921,8 @@ mysql_execute_command(THD *thd) TABLE in the same way. That way we avoid that a new table is created during a gobal read lock. */ - if (wait_if_global_read_lock(thd, 0, 1)) + if (!thd->locked_tables && + !(need_start_waiting= !wait_if_global_read_lock(thd, 0, 1))) { res= 1; goto end_with_restore_list; @@ -2901,7 +2958,7 @@ mysql_execute_command(THD *thd) { update_non_unique_table_error(create_table, "CREATE", duplicate); res= 1; - goto end_with_restart_wait; + goto end_with_restore_list; } } /* If we create merge table, we have to test tables in merge, too */ @@ -2917,7 +2974,7 @@ mysql_execute_command(THD *thd) { update_non_unique_table_error(tab, "CREATE", duplicate); res= 1; - goto end_with_restart_wait; + goto end_with_restore_list; } } } @@ -2962,13 +3019,6 @@ mysql_execute_command(THD *thd) send_ok(thd); } -end_with_restart_wait: - /* - Release the protection against the global read lock and wake - everyone, who might want to set a global read lock. - */ - start_waiting_global_read_lock(thd); - /* put tables back for PS rexecuting */ end_with_restore_list: lex->link_first_table_back(create_table, link_to_local); @@ -3039,25 +3089,8 @@ end_with_restore_list: my_error(ER_WRONG_TABLE_NAME, MYF(0), lex->name); goto error; } - if (!select_lex->db) - { - /* - In the case of ALTER TABLE ... RENAME we should supply the - default database if the new name is not explicitly qualified - by a database. (Bug #11493) - */ - if (lex->alter_info.flags & ALTER_RENAME) - { - if (! thd->db) - { - my_message(ER_NO_DB_ERROR, ER(ER_NO_DB_ERROR), MYF(0)); - goto error; - } - select_lex->db= thd->db; - } - else - select_lex->db= first_table->db; - } + /* Must be set in the parser */ + DBUG_ASSERT(select_lex->db); if (check_access(thd, priv_needed, first_table->db, &first_table->grant.privilege, 0, 0, test(first_table->schema_table)) || @@ -3084,11 +3117,24 @@ end_with_restore_list: } } /* Don't yet allow changing of symlinks with ALTER TABLE */ + if (lex->create_info.data_file_name) + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, 0, + "DATA DIRECTORY option ignored"); + if (lex->create_info.index_file_name) + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, 0, + "INDEX DIRECTORY option ignored"); lex->create_info.data_file_name=lex->create_info.index_file_name=0; /* ALTER TABLE ends previous transaction */ if (end_active_trans(thd)) goto error; + if (!thd->locked_tables && + !(need_start_waiting= !wait_if_global_read_lock(thd, 0, 1))) + { + res= 1; + break; + } + thd->enable_slow_log= opt_log_slow_admin_statements; res= mysql_alter_table(thd, select_lex->db, lex->name, &lex->create_info, @@ -3105,8 +3151,6 @@ end_with_restore_list: { DBUG_ASSERT(first_table == all_tables && first_table != 0); TABLE_LIST *table; - if (check_db_used(thd, all_tables)) - goto error; for (table= first_table; table; table= table->next_local->next_local) { if (check_access(thd, ALTER_ACL | DROP_ACL, table->db, @@ -3163,8 +3207,7 @@ end_with_restore_list: if (lex->only_view) first_table->skip_temporary= 1; - if (check_db_used(thd, all_tables) || - check_access(thd, SELECT_ACL | EXTRA_ACL, first_table->db, + if (check_access(thd, SELECT_ACL | EXTRA_ACL, first_table->db, &first_table->grant.privilege, 0, 0, test(first_table->schema_table))) goto error; @@ -3177,8 +3220,7 @@ end_with_restore_list: case SQLCOM_CHECKSUM: { DBUG_ASSERT(first_table == all_tables && first_table != 0); - if (check_db_used(thd, all_tables) || - check_table_access(thd, SELECT_ACL | EXTRA_ACL, all_tables, 0)) + if (check_table_access(thd, SELECT_ACL | EXTRA_ACL, all_tables, 0)) goto error; /* purecov: inspected */ res = mysql_checksum_table(thd, first_table, &lex->check_opt); break; @@ -3186,8 +3228,7 @@ end_with_restore_list: case SQLCOM_REPAIR: { DBUG_ASSERT(first_table == all_tables && first_table != 0); - if (check_db_used(thd, all_tables) || - check_table_access(thd, SELECT_ACL | INSERT_ACL, all_tables, 0)) + if (check_table_access(thd, SELECT_ACL | INSERT_ACL, all_tables, 0)) goto error; /* purecov: inspected */ thd->enable_slow_log= opt_log_slow_admin_statements; res= mysql_repair_table(thd, first_table, &lex->check_opt); @@ -3208,8 +3249,7 @@ end_with_restore_list: case SQLCOM_CHECK: { DBUG_ASSERT(first_table == all_tables && first_table != 0); - if (check_db_used(thd, all_tables) || - check_table_access(thd, SELECT_ACL | EXTRA_ACL , all_tables, 0)) + if (check_table_access(thd, SELECT_ACL | EXTRA_ACL , all_tables, 0)) goto error; /* purecov: inspected */ thd->enable_slow_log= opt_log_slow_admin_statements; res = mysql_check_table(thd, first_table, &lex->check_opt); @@ -3220,8 +3260,7 @@ end_with_restore_list: case SQLCOM_ANALYZE: { DBUG_ASSERT(first_table == all_tables && first_table != 0); - if (check_db_used(thd, all_tables) || - check_table_access(thd, SELECT_ACL | INSERT_ACL, all_tables, 0)) + if (check_table_access(thd, SELECT_ACL | INSERT_ACL, all_tables, 0)) goto error; /* purecov: inspected */ thd->enable_slow_log= opt_log_slow_admin_statements; res= mysql_analyze_table(thd, first_table, &lex->check_opt); @@ -3243,8 +3282,7 @@ end_with_restore_list: case SQLCOM_OPTIMIZE: { DBUG_ASSERT(first_table == all_tables && first_table != 0); - if (check_db_used(thd, all_tables) || - check_table_access(thd, SELECT_ACL | INSERT_ACL, all_tables, 0)) + if (check_table_access(thd, SELECT_ACL | INSERT_ACL, all_tables, 0)) goto error; /* purecov: inspected */ thd->enable_slow_log= opt_log_slow_admin_statements; res= (specialflag & (SPECIAL_SAFE_MODE | SPECIAL_NO_NEW_FUNC)) ? @@ -3345,6 +3383,14 @@ end_with_restore_list: break; /* Skip first table, which is the table we are inserting in */ select_lex->context.table_list= first_table->next_local; + + if (!thd->locked_tables && + !(need_start_waiting= !wait_if_global_read_lock(thd, 0, 1))) + { + res= 1; + break; + } + res= mysql_insert(thd, all_tables, lex->field_list, lex->many_values, lex->update_list, lex->value_list, lex->duplicates, lex->ignore); @@ -3368,6 +3414,14 @@ end_with_restore_list: select_lex->options|= SELECT_NO_UNLOCK; unit->set_limit(select_lex); + + if (! thd->locked_tables && + ! (need_start_waiting= ! wait_if_global_read_lock(thd, 0, 1))) + { + res= 1; + break; + } + if (!(res= open_and_lock_tables(thd, all_tables))) { /* Skip first table, which is the table we are inserting in */ @@ -3435,6 +3489,14 @@ end_with_restore_list: break; DBUG_ASSERT(select_lex->offset_limit == 0); unit->set_limit(select_lex); + + if (!thd->locked_tables && + !(need_start_waiting= !wait_if_global_read_lock(thd, 0, 1))) + { + res= 1; + break; + } + res = mysql_delete(thd, all_tables, select_lex->where, &select_lex->order_list, unit->select_limit_cnt, select_lex->options, @@ -3448,6 +3510,13 @@ end_with_restore_list: (TABLE_LIST *)thd->lex->auxilliary_table_list.first; multi_delete *result; + if (!thd->locked_tables && + !(need_start_waiting= !wait_if_global_read_lock(thd, 0, 1))) + { + res= 1; + break; + } + if ((res= multi_delete_precheck(thd, all_tables))) break; @@ -3633,7 +3702,7 @@ end_with_restore_list: break; case SQLCOM_LOCK_TABLES: unlock_locked_tables(thd); - if (check_db_used(thd, all_tables) || end_active_trans(thd)) + if (end_active_trans(thd)) goto error; if (check_table_access(thd, LOCK_TABLES_ACL | SELECT_ACL, all_tables, 0)) goto error; @@ -3772,12 +3841,8 @@ end_with_restore_list: } case SQLCOM_ALTER_DB: { - char *db= lex->name ? lex->name : thd->db; - if (!db) - { - my_message(ER_NO_DB_ERROR, ER(ER_NO_DB_ERROR), MYF(0)); - break; - } + char *db= lex->name; + DBUG_ASSERT(db); /* Must be set in the parser */ if (!strip_sp(db) || check_db_name(db)) { my_error(ER_WRONG_DB_NAME, MYF(0), db); @@ -4114,7 +4179,7 @@ end_with_restore_list: case SQLCOM_FLUSH: { bool write_to_binlog; - if (check_global_access(thd,RELOAD_ACL) || check_db_used(thd, all_tables)) + if (check_global_access(thd,RELOAD_ACL)) goto error; /* reload_acl_and_cache() will tell us if we are allowed to write to the @@ -4163,15 +4228,12 @@ end_with_restore_list: #endif case SQLCOM_HA_OPEN: DBUG_ASSERT(first_table == all_tables && first_table != 0); - if (check_db_used(thd, all_tables) || - check_table_access(thd, SELECT_ACL, all_tables, 0)) + if (check_table_access(thd, SELECT_ACL, all_tables, 0)) goto error; res= mysql_ha_open(thd, first_table, 0); break; case SQLCOM_HA_CLOSE: DBUG_ASSERT(first_table == all_tables && first_table != 0); - if (check_db_used(thd, all_tables)) - goto error; res= mysql_ha_close(thd, first_table); break; case SQLCOM_HA_READ: @@ -4181,8 +4243,6 @@ end_with_restore_list: if a user has no permissions to read a table, he won't be able to open it (with SQLCOM_HA_OPEN) in the first place. */ - if (check_db_used(thd, all_tables)) - goto error; unit->set_limit(select_lex); res= mysql_ha_read(thd, first_table, lex->ha_read_mode, lex->ident.str, lex->insert_list, lex->ha_rkey_mode, select_lex->where, @@ -4312,23 +4372,11 @@ end_with_restore_list: case SQLCOM_CREATE_SPFUNCTION: { uint namelen; - char *name, *db; + char *name; int result; DBUG_ASSERT(lex->sphead != 0); - - if (!lex->sphead->m_db.str || !lex->sphead->m_db.str[0]) - { - if (!thd->db) - { - my_message(ER_NO_DB_ERROR, ER(ER_NO_DB_ERROR), MYF(0)); - delete lex->sphead; - lex->sphead= 0; - goto error; - } - lex->sphead->m_db.length= strlen(thd->db); - lex->sphead->m_db.str= thd->db; - } + DBUG_ASSERT(lex->sphead->m_db.str); /* Must be initialized in the parser */ if (check_access(thd, CREATE_PROC_ACL, lex->sphead->m_db.str, 0, 0, 0, is_schema_db(lex->sphead->m_db.str))) @@ -4445,34 +4493,17 @@ end_with_restore_list: } #endif /* NO_EMBEDDED_ACCESS_CHECKS */ - /* - We need to copy name and db in order to use them for - check_routine_access which is called after lex->sphead has - been deleted. - */ - name= thd->strdup(name); - lex->sphead->m_db.str= db= thd->strmake(lex->sphead->m_db.str, - lex->sphead->m_db.length); res= (result= lex->sphead->create(thd)); if (result == SP_OK) { - /* - We must cleanup the unit and the lex here because - sp_grant_privileges calls (indirectly) db_find_routine, - which in turn may call MYSQLparse with THD::lex. - TODO: fix db_find_routine to use a temporary lex. - */ - lex->unit.cleanup(); - delete lex->sphead; - lex->sphead= 0; #ifndef NO_EMBEDDED_ACCESS_CHECKS /* only add privileges if really neccessary */ if (sp_automatic_privileges && !opt_noacl && check_routine_access(thd, DEFAULT_CREATE_PROC_ACLS, - db, name, + lex->sphead->m_db.str, name, lex->sql_command == SQLCOM_CREATE_PROCEDURE, 1)) { - if (sp_grant_privileges(thd, db, name, + if (sp_grant_privileges(thd, lex->sphead->m_db.str, name, lex->sql_command == SQLCOM_CREATE_PROCEDURE)) push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_PROC_AUTO_GRANT_FAIL, @@ -4480,6 +4511,9 @@ end_with_restore_list: close_thread_tables(thd); } #endif + lex->unit.cleanup(); + delete lex->sphead; + lex->sphead= 0; send_ok(thd); } else @@ -4896,7 +4930,8 @@ end_with_restore_list: view_store_options(thd, first_table, &buff); buff.append(STRING_WITH_LEN("VIEW ")); /* Test if user supplied a db (ie: we did not use thd->db) */ - if (first_table->db != thd->db && first_table->db[0]) + if (first_table->db && first_table->db[0] && + (thd->db == NULL || strcmp(first_table->db, thd->db))) { append_identifier(thd, &buff, first_table->db, first_table->db_length); @@ -5178,11 +5213,22 @@ end: */ if (!(sql_command_flags[lex->sql_command] & CF_HAS_ROW_COUNT)) thd->row_count_func= -1; - DBUG_RETURN(res || thd->net.report_error); + + goto finish; error: - res= 1; // would be better to set res=1 before "goto error" - goto end; + res= TRUE; + +finish: + if (need_start_waiting) + { + /* + Release the protection against the global read lock and wake + everyone, who might want to set a global read lock. + */ + start_waiting_global_read_lock(thd); + } + DBUG_RETURN(res || thd->net.report_error); } @@ -5549,7 +5595,7 @@ check_table_access(THD *thd, ulong want_access,TABLE_LIST *tables, (want_access & ~EXTRA_ACL) && thd->db) tables->grant.privilege= want_access; - else if (tables->db && tables->db == thd->db) + else if (tables->db && thd->db && strcmp(tables->db, thd->db) == 0) { if (found && !grant_option) // db already checked tables->grant.privilege=found_access; @@ -5695,24 +5741,6 @@ bool check_merge_table_access(THD *thd, char *db, } -static bool check_db_used(THD *thd,TABLE_LIST *tables) -{ - for (; tables; tables= tables->next_global) - { - if (!tables->db) - { - if (!(tables->db=thd->db)) - { - my_message(ER_NO_DB_ERROR, ER(ER_NO_DB_ERROR), - MYF(0)); /* purecov: tested */ - return TRUE; /* purecov: tested */ - } - } - } - return FALSE; -} - - /**************************************************************************** Check stack size; Send error if there isn't enough stack to continue ****************************************************************************/ @@ -6354,19 +6382,8 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd, ptr->db= table->db.str; ptr->db_length= table->db.length; } - else if (thd->db) - { - ptr->db= thd->db; - ptr->db_length= thd->db_length; - } - else - { - /* The following can't be "" as we may do 'casedn_str()' on it */ - ptr->db= empty_c_string; - ptr->db_length= 0; - } - if (thd->stmt_arena->is_stmt_prepare_or_first_sp_execute()) - ptr->db= thd->strdup(ptr->db); + else if (thd->copy_db_to(&ptr->db, &ptr->db_length)) + DBUG_RETURN(0); ptr->alias= alias_str; if (lower_case_table_names && table->table.length) @@ -7419,8 +7436,7 @@ bool multi_delete_precheck(THD *thd, TABLE_LIST *tables) /* sql_yacc guarantees that tables and aux_tables are not zero */ DBUG_ASSERT(aux_tables != 0); - if (check_db_used(thd, tables) || check_db_used(thd,aux_tables) || - check_table_access(thd, SELECT_ACL, tables, 0)) + if (check_table_access(thd, SELECT_ACL, tables, 0)) DBUG_RETURN(TRUE); /* @@ -7520,8 +7536,7 @@ bool update_precheck(THD *thd, TABLE_LIST *tables) my_message(ER_WRONG_VALUE_COUNT, ER(ER_WRONG_VALUE_COUNT), MYF(0)); DBUG_RETURN(TRUE); } - DBUG_RETURN(check_db_used(thd, tables) || - check_one_table_access(thd, UPDATE_ACL, tables)); + DBUG_RETURN(check_one_table_access(thd, UPDATE_ACL, tables)); } diff --git a/sql/sql_select.cc b/sql/sql_select.cc index d34ff070eb1..5b70d1649e7 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -8058,7 +8058,8 @@ Field *create_tmp_field_from_field(THD *thd, Field *org_field, org_field->field_name, table->s, org_field->charset()); else - new_field= org_field->new_field(thd->mem_root, table); + new_field= org_field->new_field(thd->mem_root, table, + table == org_field->table); if (new_field) { new_field->init(table); @@ -13166,7 +13167,7 @@ setup_copy_fields(THD *thd, TMP_TABLE_PARAM *param, saved value */ Field *field= item->field; - item->result_field=field->new_field(thd->mem_root,field->table); + item->result_field=field->new_field(thd->mem_root,field->table, 1); char *tmp=(char*) sql_alloc(field->pack_length()+1); if (!tmp) goto err; diff --git a/sql/sql_table.cc b/sql/sql_table.cc index d5d33373e6c..d9a43e4580f 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -3312,8 +3312,7 @@ bool mysql_create_table_internal(THD *thd, my_error(ER_TABLE_EXISTS_ERROR, MYF(0), alias); goto err; } - if (wait_if_global_read_lock(thd, 0, 1)) - goto err; + VOID(pthread_mutex_lock(&LOCK_open)); if (!internal_tmp_table && !(create_info->options & HA_LEX_CREATE_TMP_TABLE)) { @@ -3389,7 +3388,6 @@ bool mysql_create_table_internal(THD *thd, error= FALSE; unlock_and_end: VOID(pthread_mutex_unlock(&LOCK_open)); - start_waiting_global_read_lock(thd); err: thd->proc_info="After create"; @@ -3621,7 +3619,7 @@ void close_cached_table(THD *thd, TABLE *table) thd->open_tables=unlink_open_table(thd,thd->open_tables,table); /* When lock on LOCK_open is freed other threads can continue */ - pthread_cond_broadcast(&COND_refresh); + broadcast_refresh(); DBUG_VOID_RETURN; } @@ -4408,7 +4406,8 @@ bool mysql_create_like_table(THD* thd, TABLE_LIST* table, { DBUG_RETURN(TRUE); } - src_db= table_ident->db.str ? table_ident->db.str : thd->db; + DBUG_ASSERT(table_ident->db.str); /* Must be set in the parser */ + src_db= table_ident->db.str; /* Validate the source table @@ -6133,7 +6132,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name, } } VOID(pthread_mutex_unlock(&LOCK_open)); - VOID(pthread_cond_broadcast(&COND_refresh)); + broadcast_refresh(); /* The ALTER TABLE is always in its own transaction. Commit must not be called while LOCK_open is locked. It could call diff --git a/sql/sql_trigger.cc b/sql/sql_trigger.cc index 0ea87f3dfe4..5abdfa27d58 100644 --- a/sql/sql_trigger.cc +++ b/sql/sql_trigger.cc @@ -733,7 +733,8 @@ bool Table_triggers_list::prepare_record1_accessors(TABLE *table) QQ: it is supposed that it is ok to use this function for field cloning... */ - if (!(*old_fld= (*fld)->new_field(&table->mem_root, table))) + if (!(*old_fld= (*fld)->new_field(&table->mem_root, table, + table == (*fld)->table))) return 1; (*old_fld)->move_field_offset((my_ptrdiff_t)(table->record[1] - table->record[0])); @@ -928,8 +929,7 @@ bool Table_triggers_list::check_n_load(THD *thd, const char *db, save_db.str= thd->db; save_db.length= thd->db_length; - thd->db_length= strlen(db); - thd->db= (char *) db; + thd->reset_db((char*) db, strlen(db)); while ((trg_create_str= it++)) { trg_sql_mode= itm++; @@ -1031,8 +1031,7 @@ bool Table_triggers_list::check_n_load(THD *thd, const char *db, lex_end(&lex); } - thd->db= save_db.str; - thd->db_length= save_db.length; + thd->reset_db(save_db.str, save_db.length); thd->lex= old_lex; thd->spcont= save_spcont; thd->variables.sql_mode= save_sql_mode; @@ -1045,8 +1044,7 @@ err_with_lex_cleanup: thd->lex= old_lex; thd->spcont= save_spcont; thd->variables.sql_mode= save_sql_mode; - thd->db= save_db.str; - thd->db_length= save_db.length; + thd->reset_db(save_db.str, save_db.length); DBUG_RETURN(1); } diff --git a/sql/sql_udf.cc b/sql/sql_udf.cc index 4b9de6905fe..b92a9363a22 100644 --- a/sql/sql_udf.cc +++ b/sql/sql_udf.cc @@ -114,7 +114,8 @@ void udf_init() READ_RECORD read_record_info; TABLE *table; int error; - DBUG_ENTER("udf_init"); + DBUG_ENTER("ufd_init"); + char db[]= "mysql"; /* A subject to casednstr, can't be constant */ if (initialized) DBUG_VOID_RETURN; @@ -135,13 +136,12 @@ void udf_init() initialized = 1; new_thd->thread_stack= (char*) &new_thd; new_thd->store_globals(); - new_thd->db= my_strdup("mysql", MYF(0)); - new_thd->db_length=5; + new_thd->set_db(db, sizeof(db)-1); bzero((gptr) &tables,sizeof(tables)); tables.alias= tables.table_name= (char*) "func"; tables.lock_type = TL_READ; - tables.db=new_thd->db; + tables.db= db; if (simple_open_n_lock_tables(new_thd, &tables)) { diff --git a/sql/sql_view.cc b/sql/sql_view.cc index d1e7ba80ecf..94ece79da81 100644 --- a/sql/sql_view.cc +++ b/sql/sql_view.cc @@ -452,15 +452,15 @@ bool mysql_create_view(THD *thd, */ for (sl= select_lex; sl; sl= sl->next_select()) { - char *db= view->db ? view->db : thd->db; + DBUG_ASSERT(view->db); /* Must be set in the parser */ List_iterator_fast<Item> it(sl->item_list); Item *item; - fill_effective_table_privileges(thd, &view->grant, db, + fill_effective_table_privileges(thd, &view->grant, view->db, view->table_name); while ((item= it++)) { Item_field *fld; - uint priv= (get_column_grant(thd, &view->grant, db, + uint priv= (get_column_grant(thd, &view->grant, view->db, view->table_name, item->name) & VIEW_ANY_ACL); if ((fld= item->filed_for_view_update())) @@ -643,8 +643,7 @@ static int mysql_register_view(THD *thd, TABLE_LIST *view, if (!parser->ok() || !is_equal(&view_type, parser->type())) { - my_error(ER_WRONG_OBJECT, MYF(0), - (view->db ? view->db : thd->db), view->table_name, "VIEW"); + my_error(ER_WRONG_OBJECT, MYF(0), view->db, view->table_name, "VIEW"); DBUG_RETURN(-1); } diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index 0632e2298cd..68aeefc1278 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -1594,12 +1594,18 @@ sp_name: } | ident { + THD *thd= YYTHD; + LEX_STRING db; if (check_routine_name($1)) { my_error(ER_SP_WRONG_NAME, MYF(0), $1.str); YYABORT; } - $$= sp_name_current_db_new(YYTHD, $1); + if (thd->copy_db_to(&db.str, &db.length)) + YYABORT; + $$= new sp_name(db, $1); + if ($$) + $$->init_qname(YYTHD); } ; @@ -3184,14 +3190,26 @@ create2: | LIKE table_ident { LEX *lex=Lex; + THD *thd= lex->thd; if (!(lex->like_name= $2)) YYABORT; + if ($2->db.str == NULL && + thd->copy_db_to(&($2->db.str), &($2->db.length))) + { + YYABORT; + } } | '(' LIKE table_ident ')' { LEX *lex=Lex; + THD *thd= lex->thd; if (!(lex->like_name= $3)) YYABORT; + if ($3->db.str == NULL && + thd->copy_db_to(&($3->db.str), &($3->db.length))) + { + YYABORT; + } } ; @@ -4640,8 +4658,10 @@ alter: lex->key_list.empty(); lex->col_list.empty(); lex->select_lex.init_order(); - lex->select_lex.db=lex->name= 0; + lex->name= 0; lex->like_name= 0; + lex->select_lex.db= + ((TABLE_LIST*) lex->select_lex.table_list.first)->db; bzero((char*) &lex->create_info,sizeof(lex->create_info)); lex->create_info.db_type= 0; lex->create_info.default_table_charset= NULL; @@ -4660,8 +4680,11 @@ alter: opt_create_database_options { LEX *lex=Lex; + THD *thd= Lex->thd; lex->sql_command=SQLCOM_ALTER_DB; lex->name= $3; + if (lex->name == NULL && thd->copy_db_to(&lex->name, NULL)) + YYABORT; } | ALTER PROCEDURE sp_name { @@ -5096,14 +5119,20 @@ alter_list_item: | RENAME opt_to table_ident { LEX *lex=Lex; + THD *thd= lex->thd; lex->select_lex.db=$3->db.str; - lex->name= $3->table.str; + if (lex->select_lex.db == NULL && + thd->copy_db_to(&lex->select_lex.db, NULL)) + { + YYABORT; + } if (check_table_name($3->table.str,$3->table.length) || $3->db.str && check_db_name($3->db.str)) { my_error(ER_WRONG_TABLE_NAME, MYF(0), $3->table.str); YYABORT; } + lex->name= $3->table.str; lex->alter_info.flags|= ALTER_RENAME; } | CONVERT_SYM TO_SYM charset charset_name_or_default opt_collate @@ -6449,7 +6478,13 @@ simple_expr: #endif /* HAVE_DLOPEN */ { LEX *lex= Lex; - sp_name *name= sp_name_current_db_new(YYTHD, $1); + THD *thd= lex->thd; + LEX_STRING db; + if (thd->copy_db_to(&db.str, &db.length)) + YYABORT; + sp_name *name= new sp_name(db, $1); + if (name) + name->init_qname(thd); sp_add_used_routine(lex, YYTHD, name, TYPE_ENUM_FUNCTION); if ($4) @@ -10328,7 +10363,9 @@ grant_ident: '*' { LEX *lex= Lex; - lex->current_select->db= lex->thd->db; + THD *thd= lex->thd; + if (thd->copy_db_to(&lex->current_select->db, NULL)) + YYABORT; if (lex->grant == GLOBAL_ACLS) lex->grant = DB_ACLS & ~GRANT_ACL; else if (lex->columns.elements) diff --git a/sql/table.cc b/sql/table.cc index a96ca0da881..8bee8bf1598 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -1456,7 +1456,7 @@ int open_table_from_share(THD *thd, TABLE_SHARE *share, const char *alias, Create a new field for the key part that matches the index */ field= key_part->field=field->new_field(&outparam->mem_root, - outparam); + outparam, 0); field->field_length= key_part->length; } } diff --git a/sql/table.h b/sql/table.h index 71a199d946e..067c9d15887 100644 --- a/sql/table.h +++ b/sql/table.h @@ -695,7 +695,8 @@ typedef struct st_table_list thr_lock_type lock_type; uint outer_join; /* Which join type */ uint shared; /* Used in multi-upd */ - uint32 db_length, table_name_length; + uint db_length; + uint32 table_name_length; bool updatable; /* VIEW/TABLE can be updated now */ bool straight; /* optimize with prev table */ bool updating; /* for replicate-do/ignore table */ diff --git a/sql/time.cc b/sql/time.cc index ae776a32aab..0461f7723c6 100644 --- a/sql/time.cc +++ b/sql/time.cc @@ -749,6 +749,7 @@ void make_truncated_value_warning(THD *thd, const char *str_val, ER_TRUNCATED_WRONG_VALUE, warn_buff); } +/* Daynumber from year 0 to 9999-12-31 */ #define MAX_DAY_NUMBER 3652424L bool date_add_interval(TIME *ltime, interval_type int_type, INTERVAL interval) @@ -804,7 +805,7 @@ bool date_add_interval(TIME *ltime, interval_type int_type, INTERVAL interval) ltime->hour= (uint) (sec/3600); daynr= calc_daynr(ltime->year,ltime->month,1) + days; /* Day number from year 0 to 9999-12-31 */ - if ((ulonglong) daynr >= MAX_DAY_NUMBER) + if ((ulonglong) daynr > MAX_DAY_NUMBER) goto invalid_date; get_date_from_daynr((long) daynr, <ime->year, <ime->month, <ime->day); @@ -815,7 +816,7 @@ bool date_add_interval(TIME *ltime, interval_type int_type, INTERVAL interval) period= (calc_daynr(ltime->year,ltime->month,ltime->day) + sign * (long) interval.day); /* Daynumber from year 0 to 9999-12-31 */ - if ((ulong) period >= MAX_DAY_NUMBER) + if ((ulong) period > MAX_DAY_NUMBER) goto invalid_date; get_date_from_daynr((long) period,<ime->year,<ime->month,<ime->day); break; diff --git a/sql/tztime.cc b/sql/tztime.cc index f8de9bb48aa..4f6542bd043 100644 --- a/sql/tztime.cc +++ b/sql/tztime.cc @@ -1560,6 +1560,7 @@ my_tz_init(THD *org_thd, const char *default_tzname, my_bool bootstrap) TABLE *table; Tz_names_entry *tmp_tzname; my_bool return_val= 1; + char db[]= "mysql"; int res; DBUG_ENTER("my_tz_init"); @@ -1616,13 +1617,12 @@ my_tz_init(THD *org_thd, const char *default_tzname, my_bool bootstrap) leap seconds shared by all time zones. */ - thd->db= my_strdup("mysql",MYF(0)); - thd->db_length= 5; // Safety + thd->set_db(db, sizeof(db)-1); bzero((char*) &tables_buff, sizeof(TABLE_LIST)); tables_buff[0].alias= tables_buff[0].table_name= (char*)"time_zone_leap_second"; tables_buff[0].lock_type= TL_READ; - tables_buff[0].db= thd->db; + tables_buff[0].db= db; /* Fill TABLE_LIST for the rest of the time zone describing tables and link it to first one. diff --git a/storage/myisam/mi_check.c b/storage/myisam/mi_check.c index d91597e9138..91c04866b5a 100644 --- a/storage/myisam/mi_check.c +++ b/storage/myisam/mi_check.c @@ -1158,13 +1158,14 @@ int chk_data_link(MI_CHECK *param, MI_INFO *info,int extend) #ifdef HAVE_RTREE_KEYS (keyinfo->flag & HA_SPATIAL) ? rtree_find_first(info, key, info->lastkey, key_length, - SEARCH_SAME) : + MBR_EQUAL | MBR_DATA) : #endif _mi_search(info,keyinfo,info->lastkey,key_length, SEARCH_SAME, info->s->state.key_root[key]); if (search_result) { - mi_check_print_error(param,"Record at: %10s Can't find key for index: %2d", + mi_check_print_error(param,"Record at: %10s " + "Can't find key for index: %2d", llstr(start_recpos,llbuff),key+1); if (error++ > MAXERR || !(param->testflag & T_VERBOSE)) goto err2; diff --git a/storage/myisam/mi_create.c b/storage/myisam/mi_create.c index 22cbde278be..c5a9af08def 100644 --- a/storage/myisam/mi_create.c +++ b/storage/myisam/mi_create.c @@ -60,6 +60,8 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs, my_off_t key_root[MI_MAX_POSSIBLE_KEY],key_del[MI_MAX_KEY_BLOCK_SIZE]; MI_CREATE_INFO tmp_create_info; DBUG_ENTER("mi_create"); + DBUG_PRINT("enter", ("keys: %u columns: %u uniques: %u flags: %u", + keys, columns, uniques, flags)); if (!ci) { @@ -482,6 +484,16 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs, uniques * MI_UNIQUEDEF_SIZE + (key_segs + unique_key_parts)*HA_KEYSEG_SIZE+ columns*MI_COLUMNDEF_SIZE); + DBUG_PRINT("info", ("info_length: %u", info_length)); + /* There are only 16 bits for the total header length. */ + if (info_length > 65535) + { + my_printf_error(0, "MyISAM table '%s' has too many columns and/or " + "indexes and/or unique constraints.", + MYF(0), name + dirname_length(name)); + my_errno= HA_WRONG_CREATE_OPTION; + goto err; + } bmove(share.state.header.file_version,(byte*) myisam_file_magic,4); ci->old_options=options| (ci->old_options & HA_OPTION_TEMP_COMPRESS_RECORD ? @@ -650,6 +662,7 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs, errpos=3; } + DBUG_PRINT("info", ("write state info and base info")); if (mi_state_info_write(file, &share.state, 2) || mi_base_info_write(file, &share.base)) goto err; @@ -663,6 +676,7 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs, #endif /* Write key and keyseg definitions */ + DBUG_PRINT("info", ("write key and keyseg definitions")); for (i=0 ; i < share.base.keys - uniques; i++) { uint sp_segs=(keydefs[i].flag & HA_SPATIAL) ? 2*SPDIMS : 0; @@ -713,6 +727,7 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs, } /* Save unique definition */ + DBUG_PRINT("info", ("write unique definitions")); for (i=0 ; i < share.state.header.uniques ; i++) { HA_KEYSEG *keyseg_end; @@ -743,6 +758,7 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs, goto err; } } + DBUG_PRINT("info", ("write field definitions")); for (i=0 ; i < share.base.fields ; i++) if (mi_recinfo_write(file, &recinfo[i])) goto err; @@ -757,6 +773,7 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs, #endif /* Enlarge files */ + DBUG_PRINT("info", ("enlarge to keystart: %lu", (ulong) share.base.keystart)); if (my_chsize(file,(ulong) share.base.keystart,0,MYF(0))) goto err; diff --git a/storage/myisam/mi_delete_table.c b/storage/myisam/mi_delete_table.c index df0e9deb3ec..b72e97d3215 100644 --- a/storage/myisam/mi_delete_table.c +++ b/storage/myisam/mi_delete_table.c @@ -34,12 +34,24 @@ int mi_delete_table(const char *name) #ifdef USE_RAID { MI_INFO *info; - /* we use 'open_for_repair' to be able to delete a crashed table */ - if (!(info=mi_open(name, O_RDONLY, HA_OPEN_FOR_REPAIR))) - DBUG_RETURN(my_errno); - raid_type = info->s->base.raid_type; - raid_chunks = info->s->base.raid_chunks; - mi_close(info); + /* + When built with RAID support, we need to determine if this table + makes use of the raid feature. If yes, we need to remove all raid + chunks. This is done with my_raid_delete(). Unfortunately it is + necessary to open the table just to check this. We use + 'open_for_repair' to be able to open even a crashed table. If even + this open fails, we assume no raid configuration for this table + and try to remove the normal data file only. This may however + leave the raid chunks behind. + */ + if (!(info= mi_open(name, O_RDONLY, HA_OPEN_FOR_REPAIR))) + raid_type= 0; + else + { + raid_type= info->s->base.raid_type; + raid_chunks= info->s->base.raid_chunks; + mi_close(info); + } } #ifdef EXTRA_DEBUG check_table_is_closed(name,"delete"); diff --git a/storage/myisam/mi_dynrec.c b/storage/myisam/mi_dynrec.c index 9d76a1fb9a5..0487500ad33 100644 --- a/storage/myisam/mi_dynrec.c +++ b/storage/myisam/mi_dynrec.c @@ -1329,6 +1329,9 @@ int _mi_read_dynamic_record(MI_INFO *info, my_off_t filepos, byte *buf) info->rec_cache.pos_in_file <= block_info.next_filepos && flush_io_cache(&info->rec_cache)) goto err; + /* A corrupted table can have wrong pointers. (Bug# 19835) */ + if (block_info.next_filepos == HA_OFFSET_ERROR) + goto panic; info->rec_cache.seek_not_done=1; if ((b_type=_mi_get_block_info(&block_info,file, block_info.next_filepos)) diff --git a/storage/myisam/mi_rkey.c b/storage/myisam/mi_rkey.c index e6f4d39ab49..a9a8cbacb4b 100644 --- a/storage/myisam/mi_rkey.c +++ b/storage/myisam/mi_rkey.c @@ -68,6 +68,7 @@ int mi_rkey(MI_INFO *info, byte *buf, int inx, const byte *key, uint key_len, if (fast_mi_readinfo(info)) goto err; + if (share->concurrent_insert) rw_rdlock(&share->key_root_lock[inx]); @@ -90,24 +91,35 @@ int mi_rkey(MI_INFO *info, byte *buf, int inx, const byte *key, uint key_len, case HA_KEY_ALG_BTREE: default: if (!_mi_search(info, keyinfo, key_buff, use_key_length, - myisam_read_vec[search_flag], info->s->state.key_root[inx])) + myisam_read_vec[search_flag], info->s->state.key_root[inx])) { - while (info->lastpos >= info->state->data_file_length) + /* + If we are searching for an exact key (including the data pointer) + and this was added by an concurrent insert, + then the result is "key not found". + */ + if ((search_flag == HA_READ_KEY_EXACT) && + (info->lastpos >= info->state->data_file_length)) + { + my_errno= HA_ERR_KEY_NOT_FOUND; + info->lastpos= HA_OFFSET_ERROR; + } + else while (info->lastpos >= info->state->data_file_length) { /* Skip rows that are inserted by other threads since we got a lock Note that this can only happen if we are not searching after an exact key, because the keys are sorted according to position */ - if (_mi_search_next(info, keyinfo, info->lastkey, - info->lastkey_length, - myisam_readnext_vec[search_flag], - info->s->state.key_root[inx])) + info->lastkey_length, + myisam_readnext_vec[search_flag], + info->s->state.key_root[inx])) break; } } } + if (share->concurrent_insert) rw_unlock(&share->key_root_lock[inx]); diff --git a/storage/myisam/rt_index.c b/storage/myisam/rt_index.c index 97554dca4e6..1806476dc39 100644 --- a/storage/myisam/rt_index.c +++ b/storage/myisam/rt_index.c @@ -183,9 +183,11 @@ int rtree_find_first(MI_INFO *info, uint keynr, uchar *key, uint key_length, return -1; } - /* Save searched key */ - memcpy(info->first_mbr_key, key, keyinfo->keylength - - info->s->base.rec_reflength); + /* + Save searched key, include data pointer. + The data pointer is required if the search_flag contains MBR_DATA. + */ + memcpy(info->first_mbr_key, key, keyinfo->keylength); info->last_rkey_length = key_length; info->rtree_recursion_depth = -1; diff --git a/storage/myisam/rt_mbr.c b/storage/myisam/rt_mbr.c index c43daec2f7c..897862c1c9a 100644 --- a/storage/myisam/rt_mbr.c +++ b/storage/myisam/rt_mbr.c @@ -52,10 +52,14 @@ if (EQUAL_CMP(amin, amax, bmin, bmax)) \ return 1; \ } \ - else /* if (nextflag & MBR_DISJOINT) */ \ + else if (nextflag & MBR_DISJOINT) \ { \ if (DISJOINT_CMP(amin, amax, bmin, bmax)) \ return 1; \ + }\ + else /* if unknown comparison operator */ \ + { \ + DBUG_ASSERT(0); \ } #define RT_CMP_KORR(type, korr_func, len, nextflag) \ diff --git a/storage/ndb/include/kernel/AttributeHeader.hpp b/storage/ndb/include/kernel/AttributeHeader.hpp index b17bb456bf0..b78b5912bec 100644 --- a/storage/ndb/include/kernel/AttributeHeader.hpp +++ b/storage/ndb/include/kernel/AttributeHeader.hpp @@ -39,12 +39,13 @@ public: STATIC_CONST( RANGE_NO = 0xFFFB ); // Read range no (when batched ranges) STATIC_CONST( ROW_SIZE = 0xFFFA ); - STATIC_CONST( FRAGMENT_MEMORY= 0xFFF9 ); + STATIC_CONST( FRAGMENT_FIXED_MEMORY= 0xFFF9 ); STATIC_CONST( RECORDS_IN_RANGE = 0xFFF8 ); STATIC_CONST( DISK_REF = 0xFFF7 ); STATIC_CONST( ROWID = 0xFFF6 ); STATIC_CONST( ROW_GCI = 0xFFF5 ); + STATIC_CONST( FRAGMENT_VARSIZED_MEMORY = 0xFFF4 ); // NOTE: in 5.1 ctors and init take size in bytes diff --git a/storage/ndb/include/mgmapi/ndbd_exit_codes.h b/storage/ndb/include/mgmapi/ndbd_exit_codes.h index b16f1a63a8d..79df36e7955 100644 --- a/storage/ndb/include/mgmapi/ndbd_exit_codes.h +++ b/storage/ndb/include/mgmapi/ndbd_exit_codes.h @@ -71,6 +71,7 @@ typedef ndbd_exit_classification_enum ndbd_exit_classification; #define NDBD_EXIT_INDEX_NOTINRANGE 2304 #define NDBD_EXIT_ARBIT_SHUTDOWN 2305 #define NDBD_EXIT_POINTER_NOTINRANGE 2306 +#define NDBD_EXIT_PARTITIONED_SHUTDOWN 2307 #define NDBD_EXIT_SR_OTHERNODEFAILED 2308 #define NDBD_EXIT_NODE_NOT_DEAD 2309 #define NDBD_EXIT_SR_REDOLOG 2310 diff --git a/storage/ndb/include/ndbapi/Ndb.hpp b/storage/ndb/include/ndbapi/Ndb.hpp index dcd03cdc467..07f11f6e78a 100644 --- a/storage/ndb/include/ndbapi/Ndb.hpp +++ b/storage/ndb/include/ndbapi/Ndb.hpp @@ -1553,6 +1553,7 @@ private: const char* aCatalogName, const char* aSchemaName); void connected(Uint32 block_reference); + void report_node_connected(Uint32 nodeId); NdbTransaction* startTransactionLocal(Uint32 aPrio, Uint32 aFragmentId); diff --git a/storage/ndb/include/ndbapi/NdbDictionary.hpp b/storage/ndb/include/ndbapi/NdbDictionary.hpp index ea4a2a9ca29..35b0d927bda 100644 --- a/storage/ndb/include/ndbapi/NdbDictionary.hpp +++ b/storage/ndb/include/ndbapi/NdbDictionary.hpp @@ -525,7 +525,8 @@ public: const char* getDefaultValue() const; static const Column * FRAGMENT; - static const Column * FRAGMENT_MEMORY; + static const Column * FRAGMENT_FIXED_MEMORY; + static const Column * FRAGMENT_VARSIZED_MEMORY; static const Column * ROW_COUNT; static const Column * COMMIT_COUNT; static const Column * ROW_SIZE; diff --git a/storage/ndb/src/common/debugger/signaldata/SignalNames.cpp b/storage/ndb/src/common/debugger/signaldata/SignalNames.cpp index 7304a46a278..a96d0de0560 100644 --- a/storage/ndb/src/common/debugger/signaldata/SignalNames.cpp +++ b/storage/ndb/src/common/debugger/signaldata/SignalNames.cpp @@ -365,6 +365,8 @@ const GsnName SignalNames [] = { ,{ GSN_EVENT_SUBSCRIBE_REF, "EVENT_SUBSCRIBE_REF" } ,{ GSN_DUMP_STATE_ORD, "DUMP_STATE_ORD" } + ,{ GSN_NODE_START_REP, "NODE_START_REP" } + ,{ GSN_START_INFOREQ, "START_INFOREQ" } ,{ GSN_START_INFOREF, "START_INFOREF" } ,{ GSN_START_INFOCONF, "START_INFOCONF" } diff --git a/storage/ndb/src/kernel/blocks/dbdict/DictLock.txt b/storage/ndb/src/kernel/blocks/dbdict/DictLock.txt index 17f24119e9d..72e23ed15a5 100644 --- a/storage/ndb/src/kernel/blocks/dbdict/DictLock.txt +++ b/storage/ndb/src/kernel/blocks/dbdict/DictLock.txt @@ -85,10 +85,14 @@ DIH/s START_MECONF DIH/s -* sp7 - release DICT lock +* (copy data, omitted) -DIH/s - DICT_UNLOCK_ORD - DICT/m +* SL_STARTED - release DICT lock + +CNTR/s + NODE_START_REP + DIH/s + DICT_UNLOCK_ORD + DICT/m # vim: set et sw=4: diff --git a/storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp b/storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp index 46effed867f..9d9ea6af2f5 100644 --- a/storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp +++ b/storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp @@ -1609,6 +1609,9 @@ private: void dump_replica_info(); + // DIH specifics for execNODE_START_REP (sendDictUnlockOrd) + void exec_node_start_rep(Signal* signal); + /* * Lock master DICT. Only current use is by starting node * during NR. A pool of slave records is convenient anyway. diff --git a/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp b/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp index c265f54bf30..0595c018b2e 100644 --- a/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp +++ b/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp @@ -1387,24 +1387,6 @@ void Dbdih::execNDB_STTOR(Signal* signal) } ndbrequire(false); break; - case ZNDB_SPH7: - jam(); - switch (typestart) { - case NodeState::ST_INITIAL_START: - case NodeState::ST_SYSTEM_RESTART: - jam(); - ndbsttorry10Lab(signal, __LINE__); - return; - case NodeState::ST_NODE_RESTART: - case NodeState::ST_INITIAL_NODE_RESTART: - jam(); - sendDictUnlockOrd(signal, c_dictLockSlavePtrI_nodeRestart); - c_dictLockSlavePtrI_nodeRestart = RNIL; - ndbsttorry10Lab(signal, __LINE__); - return; - } - ndbrequire(false); - break; default: jam(); ndbsttorry10Lab(signal, __LINE__); @@ -1413,6 +1395,27 @@ void Dbdih::execNDB_STTOR(Signal* signal) }//Dbdih::execNDB_STTOR() void +Dbdih::exec_node_start_rep(Signal* signal) +{ + /* + * Send DICT_UNLOCK_ORD when this node is SL_STARTED. + * + * Sending it before (sp 7) conflicts with code which assumes + * SL_STARTING means we are in copy phase of NR. + * + * NodeState::starting.restartType is not supposed to be used + * when SL_STARTED. Also it seems NODE_START_REP can arrive twice. + * + * For these reasons there are no consistency checks and + * we rely on c_dictLockSlavePtrI_nodeRestart alone. + */ + if (c_dictLockSlavePtrI_nodeRestart != RNIL) { + sendDictUnlockOrd(signal, c_dictLockSlavePtrI_nodeRestart); + c_dictLockSlavePtrI_nodeRestart = RNIL; + } +} + +void Dbdih::createMutexes(Signal * signal, Uint32 count){ Callback c = { safe_cast(&Dbdih::createMutex_done), count }; @@ -1636,6 +1639,7 @@ void Dbdih::nodeRestartPh2Lab(Signal* signal) void Dbdih::recvDictLockConf_nodeRestart(Signal* signal, Uint32 data, Uint32 ret) { ndbrequire(c_dictLockSlavePtrI_nodeRestart == RNIL); + ndbrequire(data != RNIL); c_dictLockSlavePtrI_nodeRestart = data; nodeRestartPh2Lab2(signal); diff --git a/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp b/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp index c1d4175833e..be52e06eb81 100644 --- a/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp +++ b/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp @@ -238,6 +238,7 @@ class Dbtup; #define ZSCAN_MARKERS 18 #define ZOPERATION_EVENT_REP 19 #define ZPREP_DROP_TABLE 20 +#define ZENABLE_EXPAND_CHECK 21 /* ------------------------------------------------------------------------- */ /* NODE STATE DURING SYSTEM RESTART, VARIABLES CNODES_SR_STATE */ diff --git a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp index 3890fb69b2e..695580d556c 100644 --- a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp +++ b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp @@ -416,6 +416,35 @@ void Dblqh::execCONTINUEB(Signal* signal) checkDropTab(signal); return; break; + case ZENABLE_EXPAND_CHECK: + { + jam(); + fragptr.i = signal->theData[1]; + if (fragptr.i != RNIL) + { + jam(); + c_redo_complete_fragments.getPtr(fragptr); + signal->theData[0] = fragptr.p->tabRef; + signal->theData[1] = fragptr.p->fragId; + sendSignal(DBACC_REF, GSN_EXPANDCHECK2, signal, 2, JBB); + + c_redo_complete_fragments.next(fragptr); + signal->theData[0] = ZENABLE_EXPAND_CHECK; + signal->theData[1] = fragptr.i; + sendSignal(DBLQH_REF, GSN_CONTINUEB, signal, 2, JBB); + return; + } + else + { + jam(); + c_redo_complete_fragments.remove(); + StartRecConf * conf = (StartRecConf*)signal->getDataPtrSend(); + conf->startingNodeId = getOwnNodeId(); + sendSignal(cmasterDihBlockref, GSN_START_RECCONF, signal, + StartRecConf::SignalLength, JBB); + return; + } + } default: ndbrequire(false); break; @@ -469,6 +498,7 @@ void Dblqh::execSTTOR(Signal* signal) csignalKey = signal->theData[6]; #if defined VM_TRACE || defined ERROR_INSERT || defined NDBD_TRACENR char *name; + FILE *out = 0; #endif switch (tstartPhase) { case ZSTART_PHASE1: @@ -480,8 +510,14 @@ void Dblqh::execSTTOR(Signal* signal) sendsttorryLab(signal); #if defined VM_TRACE || defined ERROR_INSERT || defined NDBD_TRACENR - name = NdbConfig_SignalLogFileName(getOwnNodeId()); - tracenrout = new NdbOut(* new FileOutputStream(fopen(name, "w+"))); +#ifdef VM_TRACE + out = globalSignalLoggers.getOutputStream(); +#endif + if (out == 0) { + name = NdbConfig_SignalLogFileName(getOwnNodeId()); + out = fopen(name, "a"); + } + tracenrout = new NdbOut(* new FileOutputStream(out)); #endif #ifdef ERROR_INSERT @@ -15658,24 +15694,23 @@ void Dblqh::srFourthComp(Signal* signal) } else if ((cstartType == NodeState::ST_NODE_RESTART) || (cstartType == NodeState::ST_SYSTEM_RESTART)) { jam(); - - + if(cstartType == NodeState::ST_SYSTEM_RESTART) + { + jam(); + if (c_redo_complete_fragments.first(fragptr)) + { + jam(); + signal->theData[0] = ZENABLE_EXPAND_CHECK; + signal->theData[1] = fragptr.i; + sendSignal(DBLQH_REF, GSN_CONTINUEB, signal, 2, JBB); + return; + } + } StartRecConf * conf = (StartRecConf*)signal->getDataPtrSend(); conf->startingNodeId = getOwnNodeId(); sendSignal(cmasterDihBlockref, GSN_START_RECCONF, signal, - StartRecConf::SignalLength, JBB); - - if(cstartType == NodeState::ST_SYSTEM_RESTART){ - c_redo_complete_fragments.first(fragptr); - while(fragptr.i != RNIL){ - signal->theData[0] = fragptr.p->tabRef; - signal->theData[1] = fragptr.p->fragId; - sendSignal(DBACC_REF, GSN_EXPANDCHECK2, signal, 2, JBB); - c_redo_complete_fragments.next(fragptr); - } - c_redo_complete_fragments.remove(); - } + StartRecConf::SignalLength, JBB); } else { ndbrequire(false); }//if diff --git a/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp b/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp index 9bc916c8c22..3cf62fe08ec 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp +++ b/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp @@ -604,6 +604,7 @@ struct Fragrecord { Uint32 currentPageRange; Uint32 rootPageRange; Uint32 noOfPages; + Uint32 noOfVarPages; Uint32 noOfPagesToGrow; DLList<Page>::Head emptyPrimPage; // allocated pages (not init) diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp index 90fdd8c69d7..82bac432545 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp +++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp @@ -351,6 +351,7 @@ void Dbtup::initFragRange(Fragrecord* const regFragPtr) regFragPtr->rootPageRange = RNIL; regFragPtr->currentPageRange = RNIL; regFragPtr->noOfPages = 0; + regFragPtr->noOfVarPages = 0; regFragPtr->noOfPagesToGrow = 2; regFragPtr->nextStartRange = 0; }//initFragRange() diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp index 940ccf54ba7..677eff53559 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp +++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp @@ -1135,13 +1135,20 @@ Dbtup::read_pseudo(Uint32 attrId, case AttributeHeader::FRAGMENT: * outBuffer = fragptr.p->fragmentId; return 1; - case AttributeHeader::FRAGMENT_MEMORY: - { - Uint64 tmp= fragptr.p->noOfPages; - tmp*= 32768; - memcpy(outBuffer,&tmp,8); - } - return 2; + case AttributeHeader::FRAGMENT_FIXED_MEMORY: + { + Uint64 tmp= fragptr.p->noOfPages; + tmp*= 32768; + memcpy(outBuffer,&tmp,8); + } + return 2; + case AttributeHeader::FRAGMENT_VARSIZED_MEMORY: + { + Uint64 tmp= fragptr.p->noOfVarPages; + tmp*= 32768; + memcpy(outBuffer,&tmp,8); + } + return 2; case AttributeHeader::ROW_SIZE: * outBuffer = tabptr.p->m_offsets[MM].m_fix_header_size << 2; return 1; diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupVarAlloc.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupVarAlloc.cpp index 52ab66b5c0e..5f6dd68956a 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupVarAlloc.cpp +++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupVarAlloc.cpp @@ -302,6 +302,7 @@ Dbtup::get_empty_var_page(Fragrecord* fragPtr) Uint32 cnt; allocConsPages(10, cnt, ptr.i); + fragPtr->noOfVarPages+= cnt; if (unlikely(cnt == 0)) { return RNIL; diff --git a/storage/ndb/src/kernel/blocks/qmgr/Qmgr.hpp b/storage/ndb/src/kernel/blocks/qmgr/Qmgr.hpp index 70c0fdfc988..de080237668 100644 --- a/storage/ndb/src/kernel/blocks/qmgr/Qmgr.hpp +++ b/storage/ndb/src/kernel/blocks/qmgr/Qmgr.hpp @@ -248,6 +248,7 @@ private: void execAPI_FAILCONF(Signal* signal); void execREAD_NODESREQ(Signal* signal); void execSET_VAR_REQ(Signal* signal); + void execAPI_FAILREQ(Signal* signal); void execREAD_NODESREF(Signal* signal); void execREAD_NODESCONF(Signal* signal); diff --git a/storage/ndb/src/kernel/blocks/qmgr/QmgrInit.cpp b/storage/ndb/src/kernel/blocks/qmgr/QmgrInit.cpp index 6ee24561b0a..8ec5e681045 100644 --- a/storage/ndb/src/kernel/blocks/qmgr/QmgrInit.cpp +++ b/storage/ndb/src/kernel/blocks/qmgr/QmgrInit.cpp @@ -81,6 +81,7 @@ Qmgr::Qmgr(Block_context& ctx) addRecSignal(GSN_API_REGREQ, &Qmgr::execAPI_REGREQ); addRecSignal(GSN_API_VERSION_REQ, &Qmgr::execAPI_VERSION_REQ); addRecSignal(GSN_DISCONNECT_REP, &Qmgr::execDISCONNECT_REP); + addRecSignal(GSN_API_FAILREQ, &Qmgr::execAPI_FAILREQ); addRecSignal(GSN_API_FAILCONF, &Qmgr::execAPI_FAILCONF); addRecSignal(GSN_READ_NODESREQ, &Qmgr::execREAD_NODESREQ); addRecSignal(GSN_SET_VAR_REQ, &Qmgr::execSET_VAR_REQ); diff --git a/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp b/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp index 1eac369ec65..0da2de3b7a2 100644 --- a/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp +++ b/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp @@ -438,6 +438,7 @@ void Qmgr::execCONNECT_REP(Signal* signal) void Qmgr::execREAD_NODESCONF(Signal* signal) { + jamEntry(); check_readnodes_reply(signal, refToNode(signal->getSendersBlockRef()), GSN_READ_NODESCONF); @@ -446,6 +447,7 @@ Qmgr::execREAD_NODESCONF(Signal* signal) void Qmgr::execREAD_NODESREF(Signal* signal) { + jamEntry(); check_readnodes_reply(signal, refToNode(signal->getSendersBlockRef()), GSN_READ_NODESREF); @@ -907,9 +909,9 @@ retry: char buf[255]; BaseString::snprintf(buf, sizeof(buf), - "Partitioned cluster! check StartPartialTimeout, " - " node %d thinks %d is president, " - " I think president is: %d", + "check StartPartialTimeout, " + "node %d thinks %d is president, " + "I think president is: %d", nodeId, president, cpresident); ndbout_c(buf); @@ -941,7 +943,7 @@ retry: CRASH_INSERTION(932); progError(__LINE__, - NDBD_EXIT_ARBIT_SHUTDOWN, + NDBD_EXIT_PARTITIONED_SHUTDOWN, buf); ndbrequire(false); @@ -2338,6 +2340,8 @@ void Qmgr::sendApiFailReq(Signal* signal, Uint16 failedNodeNo) ndbrequire(failedNodePtr.p->failState == NORMAL); failedNodePtr.p->failState = WAITING_FOR_FAILCONF1; + NodeReceiverGroup rg(QMGR, c_clusterNodes); + sendSignal(rg, GSN_API_FAILREQ, signal, 2, JBA); sendSignal(DBTC_REF, GSN_API_FAILREQ, signal, 2, JBA); sendSignal(DBDICT_REF, GSN_API_FAILREQ, signal, 2, JBA); sendSignal(SUMA_REF, GSN_API_FAILREQ, signal, 2, JBA); @@ -2361,6 +2365,27 @@ void Qmgr::sendApiFailReq(Signal* signal, Uint16 failedNodeNo) CloseComReqConf::SignalLength, JBA); }//Qmgr::sendApiFailReq() +void Qmgr::execAPI_FAILREQ(Signal* signal) +{ + jamEntry(); + NodeRecPtr failedNodePtr; + failedNodePtr.i = signal->theData[0]; + // signal->theData[1] == QMGR_REF + ptrCheckGuard(failedNodePtr, MAX_NODES, nodeRec); + + ndbrequire(getNodeInfo(failedNodePtr.i).getType() != NodeInfo::DB); + + // ignore if api not active + if (failedNodePtr.p->phase != ZAPI_ACTIVE) + return; + + signal->theData[0] = NDB_LE_Disconnected; + signal->theData[1] = failedNodePtr.i; + sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB); + + node_failed(signal, failedNodePtr.i); +} + void Qmgr::execAPI_FAILCONF(Signal* signal) { NodeRecPtr failedNodePtr; @@ -2798,7 +2823,7 @@ void Qmgr::failReportLab(Signal* signal, Uint16 aFailedNode, break; case FailRep::ZPARTITIONED_CLUSTER: { - code = NDBD_EXIT_ARBIT_SHUTDOWN; + code = NDBD_EXIT_PARTITIONED_SHUTDOWN; char buf1[100], buf2[100]; c_clusterNodes.getText(buf1); if (signal->getLength()== FailRep::SignalLength + FailRep::ExtraLength && @@ -2809,16 +2834,14 @@ void Qmgr::failReportLab(Signal* signal, Uint16 aFailedNode, part.assign(NdbNodeBitmask::Size, rep->partition); part.getText(buf2); BaseString::snprintf(extra, sizeof(extra), - "Partitioned cluster!" - " Our cluster: %s other cluster: %s", + "Our cluster: %s other cluster: %s", buf1, buf2); } else { jam(); BaseString::snprintf(extra, sizeof(extra), - "Partitioned cluster!" - " Our cluster: %s ", buf1); + "Our cluster: %s", buf1); } msg = extra; break; diff --git a/storage/ndb/src/kernel/blocks/suma/Suma.cpp b/storage/ndb/src/kernel/blocks/suma/Suma.cpp index 42666a9e5d9..2b746fdbdd8 100644 --- a/storage/ndb/src/kernel/blocks/suma/Suma.cpp +++ b/storage/ndb/src/kernel/blocks/suma/Suma.cpp @@ -1445,12 +1445,13 @@ Suma::initTable(Signal *signal, Uint32 tableId, TablePtr &tabPtr) tabPtr.p->m_error = 0; tabPtr.p->m_schemaVersion = RNIL; tabPtr.p->m_state = Table::DEFINING; - tabPtr.p->m_hasTriggerDefined[0] = 0; - tabPtr.p->m_hasTriggerDefined[1] = 0; - tabPtr.p->m_hasTriggerDefined[2] = 0; - tabPtr.p->m_triggerIds[0] = ILLEGAL_TRIGGER_ID; - tabPtr.p->m_triggerIds[1] = ILLEGAL_TRIGGER_ID; - tabPtr.p->m_triggerIds[2] = ILLEGAL_TRIGGER_ID; + tabPtr.p->m_drop_subbPtr.p = 0; + for (int j= 0; j < 3; j++) + { + tabPtr.p->m_hasTriggerDefined[j] = 0; + tabPtr.p->m_hasOutstandingTriggerReq[j] = 0; + tabPtr.p->m_triggerIds[j] = ILLEGAL_TRIGGER_ID; + } c_tables.add(tabPtr); @@ -2491,6 +2492,13 @@ Suma::execSUB_STOP_REQ(Signal* signal){ DBUG_VOID_RETURN; } + if (tabPtr.p->m_drop_subbPtr.p != 0) { + jam(); + DBUG_PRINT("error", ("table locked")); + sendSubStopRef(signal, 1420); + DBUG_VOID_RETURN; + } + DBUG_PRINT("info",("subscription: %u tableId: %u[i=%u] id: %u key: %u", subPtr.i, subPtr.p->m_tableId, tabPtr.i, subPtr.p->m_subscriptionId,subPtr.p->m_subscriptionKey)); @@ -2543,7 +2551,7 @@ Suma::execSUB_STOP_REQ(Signal* signal){ subPtr.p->m_senderRef = senderRef; // store ref to requestor subPtr.p->m_senderData = senderData; // store ref to requestor - tabPtr.p->m_drop_subbPtr= subbPtr; + tabPtr.p->m_drop_subbPtr = subbPtr; if (subPtr.p->m_state == Subscription::DEFINED) { @@ -2560,6 +2568,7 @@ Suma::execSUB_STOP_REQ(Signal* signal){ tabPtr.p->m_tableId, tabPtr.p->n_subscribers)); tabPtr.p->checkRelease(*this); sendSubStopComplete(signal, tabPtr.p->m_drop_subbPtr); + tabPtr.p->m_drop_subbPtr.p = 0; } else { @@ -2667,7 +2676,8 @@ Suma::reportAllSubscribers(Signal *signal, { SubTableData * data = (SubTableData*)signal->getDataPtrSend(); - if (table_event == NdbDictionary::Event::_TE_SUBSCRIBE) + if (table_event == NdbDictionary::Event::_TE_SUBSCRIBE && + !c_startup.m_restart_server_node_id) { data->gci = m_last_complete_gci + 1; data->tableId = subPtr.p->m_tableId; @@ -2893,6 +2903,9 @@ Suma::Table::dropTrigger(Signal* signal,Suma& suma) jam(); DBUG_ENTER("Suma::dropTrigger"); + m_hasOutstandingTriggerReq[0] = + m_hasOutstandingTriggerReq[1] = + m_hasOutstandingTriggerReq[2] = 1; for(Uint32 j = 0; j<3; j++){ jam(); suma.suma_ndbrequire(m_triggerIds[j] != ILLEGAL_TRIGGER_ID); @@ -2971,14 +2984,18 @@ Suma::Table::runDropTrigger(Signal* signal, suma.suma_ndbrequire(type < 3); suma.suma_ndbrequire(m_triggerIds[type] == triggerId); + suma.suma_ndbrequire(m_hasTriggerDefined[type] > 0); + suma.suma_ndbrequire(m_hasOutstandingTriggerReq[type] == 1); m_hasTriggerDefined[type]--; + m_hasOutstandingTriggerReq[type] = 0; if (m_hasTriggerDefined[type] == 0) { jam(); m_triggerIds[type] = ILLEGAL_TRIGGER_ID; } - if( m_hasTriggerDefined[0] != m_hasTriggerDefined[1] || - m_hasTriggerDefined[0] != m_hasTriggerDefined[2]) + if( m_hasOutstandingTriggerReq[0] || + m_hasOutstandingTriggerReq[1] || + m_hasOutstandingTriggerReq[2]) { // more to come jam(); @@ -2996,6 +3013,7 @@ Suma::Table::runDropTrigger(Signal* signal, checkRelease(suma); suma.sendSubStopComplete(signal, m_drop_subbPtr); + m_drop_subbPtr.p = 0; } void Suma::suma_ndbrequire(bool v) { ndbrequire(v); } @@ -3550,13 +3568,17 @@ Suma::execDROP_TAB_CONF(Signal *signal) DBUG_PRINT("info",("drop table id: %d[i=%u]", tableId, tabPtr.i)); tabPtr.p->m_state = Table::DROPPED; - tabPtr.p->m_hasTriggerDefined[0] = 0; - tabPtr.p->m_hasTriggerDefined[1] = 0; - tabPtr.p->m_hasTriggerDefined[2] = 0; - tabPtr.p->m_triggerIds[0] = ILLEGAL_TRIGGER_ID; - tabPtr.p->m_triggerIds[1] = ILLEGAL_TRIGGER_ID; - tabPtr.p->m_triggerIds[2] = ILLEGAL_TRIGGER_ID; - + for (int j= 0; j < 3; j++) + { + if (!tabPtr.p->m_hasOutstandingTriggerReq[j]) + { + tabPtr.p->m_hasTriggerDefined[j] = 0; + tabPtr.p->m_hasOutstandingTriggerReq[j] = 0; + tabPtr.p->m_triggerIds[j] = ILLEGAL_TRIGGER_ID; + } + else + tabPtr.p->m_hasTriggerDefined[j] = 1; + } if (senderRef == 0) { DBUG_VOID_RETURN; diff --git a/storage/ndb/src/kernel/blocks/suma/Suma.hpp b/storage/ndb/src/kernel/blocks/suma/Suma.hpp index 51f5fa4a8c8..4408d6aff8d 100644 --- a/storage/ndb/src/kernel/blocks/suma/Suma.hpp +++ b/storage/ndb/src/kernel/blocks/suma/Suma.hpp @@ -301,7 +301,8 @@ public: union { Uint32 m_tableId; Uint32 key; }; Uint32 m_schemaVersion; - Uint32 m_hasTriggerDefined[3]; // Insert/Update/Delete + Uint8 m_hasTriggerDefined[3]; // Insert/Update/Delete + Uint8 m_hasOutstandingTriggerReq[3]; // Insert/Update/Delete Uint32 m_triggerIds[3]; // Insert/Update/Delete Uint32 m_error; diff --git a/storage/ndb/src/kernel/error/ndbd_exit_codes.c b/storage/ndb/src/kernel/error/ndbd_exit_codes.c index 172125c35a1..2c32c31a15f 100644 --- a/storage/ndb/src/kernel/error/ndbd_exit_codes.c +++ b/storage/ndb/src/kernel/error/ndbd_exit_codes.c @@ -54,6 +54,8 @@ static const ErrStruct errArray[] = {NDBD_EXIT_ARBIT_SHUTDOWN, XAE, "Node lost connection to other nodes and " "can not form a unpartitioned cluster, please investigate if there are " "error(s) on other node(s)"}, + {NDBD_EXIT_PARTITIONED_SHUTDOWN, XAE, "Partitioned cluster detected. " + "Please check if cluster is already running"}, {NDBD_EXIT_POINTER_NOTINRANGE, XIE, "Pointer too large"}, {NDBD_EXIT_SR_OTHERNODEFAILED, XRE, "Another node failed during system " "restart, please investigate error(s) on other node(s)"}, diff --git a/storage/ndb/src/kernel/vm/SimulatedBlock.cpp b/storage/ndb/src/kernel/vm/SimulatedBlock.cpp index 1de47197867..4e01038d343 100644 --- a/storage/ndb/src/kernel/vm/SimulatedBlock.cpp +++ b/storage/ndb/src/kernel/vm/SimulatedBlock.cpp @@ -921,6 +921,15 @@ SimulatedBlock::execCONTINUE_FRAGMENTED(Signal * signal){ void SimulatedBlock::execNODE_START_REP(Signal* signal) { + // common stuff for all blocks + + // block specific stuff by virtual method override (default empty) + exec_node_start_rep(signal); +} + +void +SimulatedBlock::exec_node_start_rep(Signal* signal) +{ } void diff --git a/storage/ndb/src/kernel/vm/SimulatedBlock.hpp b/storage/ndb/src/kernel/vm/SimulatedBlock.hpp index ab698f7826c..3e90b20705e 100644 --- a/storage/ndb/src/kernel/vm/SimulatedBlock.hpp +++ b/storage/ndb/src/kernel/vm/SimulatedBlock.hpp @@ -446,7 +446,8 @@ private: void execCONTINUE_FRAGMENTED(Signal* signal); void execAPI_START_REP(Signal* signal); void execNODE_START_REP(Signal* signal); - + virtual void exec_node_start_rep(Signal* signal); + Uint32 c_fragmentIdCounter; ArrayPool<FragmentInfo> c_fragmentInfoPool; DLHashTable<FragmentInfo> c_fragmentInfoHash; diff --git a/storage/ndb/src/ndbapi/ClusterMgr.cpp b/storage/ndb/src/ndbapi/ClusterMgr.cpp index 63fdb73c49f..49815ae6c13 100644 --- a/storage/ndb/src/ndbapi/ClusterMgr.cpp +++ b/storage/ndb/src/ndbapi/ClusterMgr.cpp @@ -396,6 +396,8 @@ ClusterMgr::execNF_COMPLETEREP(const Uint32 * theData){ void ClusterMgr::reportConnected(NodeId nodeId){ + DBUG_ENTER("ClusterMgr::reportConnected"); + DBUG_PRINT("info", ("nodeId: %u", nodeId)); /** * Ensure that we are sending heartbeat every 100 ms * until we have got the first reply from NDB providing @@ -421,6 +423,7 @@ ClusterMgr::reportConnected(NodeId nodeId){ theNode.nfCompleteRep = true; theFacade.ReportNodeAlive(nodeId); + DBUG_VOID_RETURN; } void diff --git a/storage/ndb/src/ndbapi/DictCache.cpp b/storage/ndb/src/ndbapi/DictCache.cpp index 8a0bf2f8e8b..c06bb6fc62a 100644 --- a/storage/ndb/src/ndbapi/DictCache.cpp +++ b/storage/ndb/src/ndbapi/DictCache.cpp @@ -312,12 +312,15 @@ GlobalDictCache::invalidate_all() if (vers->size()) { TableVersion * ver = & vers->back(); - ver->m_impl->m_status = NdbDictionary::Object::Invalid; - ver->m_status = DROPPED; - if (ver->m_refCount == 0) + if (ver->m_status != RETREIVING) { - delete ver->m_impl; - vers->erase(vers->size() - 1); + ver->m_impl->m_status = NdbDictionary::Object::Invalid; + ver->m_status = DROPPED; + if (ver->m_refCount == 0) + { + delete ver->m_impl; + vers->erase(vers->size() - 1); + } } } curr = m_tableHash.getNext(curr); diff --git a/storage/ndb/src/ndbapi/Ndb.cpp b/storage/ndb/src/ndbapi/Ndb.cpp index 5b0a9e9d330..5eddbc35665 100644 --- a/storage/ndb/src/ndbapi/Ndb.cpp +++ b/storage/ndb/src/ndbapi/Ndb.cpp @@ -1025,14 +1025,19 @@ int Ndb::initAutoIncrement() setDatabaseName("sys"); setDatabaseSchemaName("def"); - m_sys_tab_0 = getDictionary()->getTableGlobal("SYSTAB_0"); + m_sys_tab_0 = theDictionary->getTableGlobal("SYSTAB_0"); // Restore current name space setDatabaseName(currentDb.c_str()); setDatabaseSchemaName(currentSchema.c_str()); + if (m_sys_tab_0 == NULL) { + assert(theDictionary->m_error.code != 0); + theError.code = theDictionary->m_error.code; + return -1; + } - return (m_sys_tab_0 == NULL); + return 0; } int @@ -1043,19 +1048,19 @@ Ndb::opTupleIdOnNdb(const NdbTableImpl* table, Uint32 aTableId = table->m_id; DBUG_PRINT("enter", ("table=%u value=%llu op=%u", aTableId, opValue, op)); - NdbTransaction* tConnection; - NdbOperation* tOperation= 0; // Compiler warning if not initialized + NdbTransaction* tConnection = NULL; + NdbOperation* tOperation = NULL; Uint64 tValue; NdbRecAttr* tRecAttrResult; - CHECK_STATUS_MACRO_ZERO; + CHECK_STATUS_MACRO; - if (initAutoIncrement()) - goto error_return; + if (initAutoIncrement() == -1) + goto error_handler; tConnection = this->startTransaction(); if (tConnection == NULL) - goto error_return; + goto error_handler; tOperation = tConnection->getNdbOperation(m_sys_tab_0); if (tOperation == NULL) @@ -1065,7 +1070,7 @@ Ndb::opTupleIdOnNdb(const NdbTableImpl* table, { case 0: tOperation->interpretedUpdateTuple(); - tOperation->equal("SYSKEY_0", aTableId ); + tOperation->equal("SYSKEY_0", aTableId); tOperation->incValue("NEXTID", opValue); tRecAttrResult = tOperation->getValue("NEXTID"); @@ -1130,14 +1135,21 @@ Ndb::opTupleIdOnNdb(const NdbTableImpl* table, DBUG_RETURN(0); - error_handler: - theError.code = tConnection->theError.code; - this->closeTransaction(tConnection); - error_return: +error_handler: DBUG_PRINT("error", ("ndb=%d con=%d op=%d", theError.code, - tConnection ? tConnection->theError.code : -1, - tOperation ? tOperation->theError.code : -1)); + tConnection != NULL ? tConnection->theError.code : -1, + tOperation != NULL ? tOperation->theError.code : -1)); + + if (theError.code == 0 && tConnection != NULL) + theError.code = tConnection->theError.code; + if (theError.code == 0 && tOperation != NULL) + theError.code = tOperation->theError.code; + DBUG_ASSERT(theError.code != 0); + + if (tConnection != NULL) + this->closeTransaction(tConnection); + DBUG_RETURN(-1); } diff --git a/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp b/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp index 1e33a843a42..b9c03f0b209 100644 --- a/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp +++ b/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp @@ -328,9 +328,14 @@ NdbColumnImpl::create_pseudo(const char * name){ col->m_impl.m_attrId = AttributeHeader::FRAGMENT; col->m_impl.m_attrSize = 4; col->m_impl.m_arraySize = 1; - } else if(!strcmp(name, "NDB$FRAGMENT_MEMORY")){ + } else if(!strcmp(name, "NDB$FRAGMENT_FIXED_MEMORY")){ col->setType(NdbDictionary::Column::Bigunsigned); - col->m_impl.m_attrId = AttributeHeader::FRAGMENT_MEMORY; + col->m_impl.m_attrId = AttributeHeader::FRAGMENT_FIXED_MEMORY; + col->m_impl.m_attrSize = 8; + col->m_impl.m_arraySize = 1; + } else if(!strcmp(name, "NDB$FRAGMENT_VARSIZED_MEMORY")){ + col->setType(NdbDictionary::Column::Bigunsigned); + col->m_impl.m_attrId = AttributeHeader::FRAGMENT_VARSIZED_MEMORY; col->m_impl.m_attrSize = 8; col->m_impl.m_arraySize = 1; } else if(!strcmp(name, "NDB$ROW_COUNT")){ @@ -1316,7 +1321,8 @@ NdbDictionaryImpl::~NdbDictionaryImpl() m_globalHash->lock(); if(--f_dictionary_count == 0){ delete NdbDictionary::Column::FRAGMENT; - delete NdbDictionary::Column::FRAGMENT_MEMORY; + delete NdbDictionary::Column::FRAGMENT_FIXED_MEMORY; + delete NdbDictionary::Column::FRAGMENT_VARSIZED_MEMORY; delete NdbDictionary::Column::ROW_COUNT; delete NdbDictionary::Column::COMMIT_COUNT; delete NdbDictionary::Column::ROW_SIZE; @@ -1326,7 +1332,8 @@ NdbDictionaryImpl::~NdbDictionaryImpl() delete NdbDictionary::Column::ROWID; delete NdbDictionary::Column::ROW_GCI; NdbDictionary::Column::FRAGMENT= 0; - NdbDictionary::Column::FRAGMENT_MEMORY= 0; + NdbDictionary::Column::FRAGMENT_FIXED_MEMORY= 0; + NdbDictionary::Column::FRAGMENT_VARSIZED_MEMORY= 0; NdbDictionary::Column::ROW_COUNT= 0; NdbDictionary::Column::COMMIT_COUNT= 0; NdbDictionary::Column::ROW_SIZE= 0; @@ -1483,8 +1490,10 @@ NdbDictionaryImpl::setTransporter(class Ndb* ndb, if(f_dictionary_count++ == 0){ NdbDictionary::Column::FRAGMENT= NdbColumnImpl::create_pseudo("NDB$FRAGMENT"); - NdbDictionary::Column::FRAGMENT_MEMORY= - NdbColumnImpl::create_pseudo("NDB$FRAGMENT_MEMORY"); + NdbDictionary::Column::FRAGMENT_FIXED_MEMORY= + NdbColumnImpl::create_pseudo("NDB$FRAGMENT_FIXED_MEMORY"); + NdbDictionary::Column::FRAGMENT_VARSIZED_MEMORY= + NdbColumnImpl::create_pseudo("NDB$FRAGMENT_VARSIZED_MEMORY"); NdbDictionary::Column::ROW_COUNT= NdbColumnImpl::create_pseudo("NDB$ROW_COUNT"); NdbDictionary::Column::COMMIT_COUNT= @@ -5041,7 +5050,8 @@ template class Vector<NdbTableImpl*>; template class Vector<NdbColumnImpl*>; const NdbDictionary::Column * NdbDictionary::Column::FRAGMENT = 0; -const NdbDictionary::Column * NdbDictionary::Column::FRAGMENT_MEMORY = 0; +const NdbDictionary::Column * NdbDictionary::Column::FRAGMENT_FIXED_MEMORY = 0; +const NdbDictionary::Column * NdbDictionary::Column::FRAGMENT_VARSIZED_MEMORY = 0; const NdbDictionary::Column * NdbDictionary::Column::ROW_COUNT = 0; const NdbDictionary::Column * NdbDictionary::Column::COMMIT_COUNT = 0; const NdbDictionary::Column * NdbDictionary::Column::ROW_SIZE = 0; diff --git a/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp b/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp index 6f096046440..06b0d7ea5b9 100644 --- a/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp +++ b/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp @@ -942,6 +942,7 @@ NdbEventBuffer::NdbEventBuffer(Ndb *ndb) : { #ifdef VM_TRACE m_latest_command= "NdbEventBuffer::NdbEventBuffer"; + m_flush_gci = 0; #endif if ((p_cond = NdbCondition_Create()) == NULL) { @@ -1109,6 +1110,8 @@ NdbEventBuffer::flushIncompleteEvents(Uint64 gci) /** * Find min complete gci */ + // called by user thread, so we need to lock the data + lock(); Uint32 i; Uint32 sz= m_active_gci.size(); Gci_container* array = (Gci_container*)m_active_gci.getBase(); @@ -1127,6 +1130,10 @@ NdbEventBuffer::flushIncompleteEvents(Uint64 gci) bzero(tmp, sizeof(Gci_container)); } } +#ifdef VM_TRACE + m_flush_gci = gci; +#endif + unlock(); return 0; } @@ -1301,7 +1308,11 @@ operator<<(NdbOut& out, const Gci_container_pod& gci) static Gci_container* -find_bucket_chained(Vector<Gci_container_pod> * active, Uint64 gci) +find_bucket_chained(Vector<Gci_container_pod> * active, Uint64 gci +#ifdef VM_TRACE + ,Uint64 flush_gci +#endif + ) { Uint32 pos = (gci & ACTIVE_GCI_MASK); Gci_container *bucket= ((Gci_container*)active->getBase()) + pos; @@ -1322,6 +1333,13 @@ find_bucket_chained(Vector<Gci_container_pod> * active, Uint64 gci) bzero(bucket, sizeof(Gci_container)); bucket->m_gci = gci; bucket->m_gcp_complete_rep_count = ~(Uint32)0; +#ifdef VM_TRACE + if (gci < flush_gci) + { + ndbout_c("received old gci %llu < flush gci %llu", gci, flush_gci); + assert(false); + } +#endif return bucket; } move_pos += ACTIVE_GCI_DIRECTORY_SIZE; @@ -1336,7 +1354,16 @@ find_bucket_chained(Vector<Gci_container_pod> * active, Uint64 gci) bucket += ACTIVE_GCI_DIRECTORY_SIZE; if(bucket->m_gci == gci) + { +#ifdef VM_TRACE + if (gci < flush_gci) + { + ndbout_c("received old gci %llu < flush gci %llu", gci, flush_gci); + assert(false); + } +#endif return bucket; + } } while(pos < size); @@ -1346,14 +1373,22 @@ find_bucket_chained(Vector<Gci_container_pod> * active, Uint64 gci) inline Gci_container* -find_bucket(Vector<Gci_container_pod> * active, Uint64 gci) +find_bucket(Vector<Gci_container_pod> * active, Uint64 gci +#ifdef VM_TRACE + ,Uint64 flush_gci +#endif + ) { Uint32 pos = (gci & ACTIVE_GCI_MASK); Gci_container *bucket= ((Gci_container*)active->getBase()) + pos; if(likely(gci == bucket->m_gci)) return bucket; - return find_bucket_chained(active,gci); + return find_bucket_chained(active,gci +#ifdef VM_TRACE + , flush_gci +#endif + ); } static @@ -1386,7 +1421,11 @@ NdbEventBuffer::execSUB_GCP_COMPLETE_REP(const SubGcpCompleteRep * const rep) const Uint64 gci= rep->gci; const Uint32 cnt= rep->gcp_complete_rep_count; - Gci_container *bucket = find_bucket(&m_active_gci, gci); + Gci_container *bucket = find_bucket(&m_active_gci, gci +#ifdef VM_TRACE + , m_flush_gci +#endif + ); if (unlikely(bucket == 0)) { @@ -1522,6 +1561,46 @@ NdbEventBuffer::complete_outof_order_gcis() } void +NdbEventBuffer::report_node_connected(Uint32 node_id) +{ + NdbEventOperation* op= m_ndb->getEventOperation(0); + if (op == 0) + return; + + DBUG_ENTER("NdbEventBuffer::report_node_connected"); + SubTableData data; + LinearSectionPtr ptr[3]; + bzero(&data, sizeof(data)); + bzero(ptr, sizeof(ptr)); + + data.tableId = ~0; + data.operation = NdbDictionary::Event::_TE_ACTIVE; + data.req_nodeid = (Uint8)node_id; + data.ndbd_nodeid = (Uint8)node_id; + data.logType = SubTableData::LOG; + data.gci = m_latestGCI + 1; + /** + * Insert this event for each operation + */ + { + // no need to lock()/unlock(), receive thread calls this + NdbEventOperationImpl* impl = &op->m_impl; + do if (!impl->m_node_bit_mask.isclear()) + { + data.senderData = impl->m_oid; + insertDataL(impl, &data, ptr); + } while((impl = impl->m_next)); + for (impl = m_dropped_ev_op; impl; impl = impl->m_next) + if (!impl->m_node_bit_mask.isclear()) + { + data.senderData = impl->m_oid; + insertDataL(impl, &data, ptr); + } + } + DBUG_VOID_RETURN; +} + +void NdbEventBuffer::report_node_failure(Uint32 node_id) { NdbEventOperation* op= m_ndb->getEventOperation(0); @@ -1579,6 +1658,10 @@ NdbEventBuffer::completeClusterFailed() data.logType = SubTableData::LOG; data.gci = m_latestGCI + 1; +#ifdef VM_TRACE + m_flush_gci = 0; +#endif + /** * Insert this event for each operation */ @@ -1712,7 +1795,11 @@ NdbEventBuffer::insertDataL(NdbEventOperationImpl *op, if ( likely((Uint32)op->mi_type & (1 << (Uint32)sdata->operation)) ) { - Gci_container* bucket= find_bucket(&m_active_gci, gci); + Gci_container* bucket= find_bucket(&m_active_gci, gci +#ifdef VM_TRACE + , m_flush_gci +#endif + ); DBUG_PRINT_EVENT("info", ("data insertion in eventId %d", op->m_eventId)); DBUG_PRINT_EVENT("info", ("gci=%d tab=%d op=%d node=%d", diff --git a/storage/ndb/src/ndbapi/NdbEventOperationImpl.hpp b/storage/ndb/src/ndbapi/NdbEventOperationImpl.hpp index c14ca83128f..561e79a137e 100644 --- a/storage/ndb/src/ndbapi/NdbEventOperationImpl.hpp +++ b/storage/ndb/src/ndbapi/NdbEventOperationImpl.hpp @@ -422,6 +422,7 @@ public: void execSUB_GCP_COMPLETE_REP(const SubGcpCompleteRep * const rep); void complete_outof_order_gcis(); + void report_node_connected(Uint32 node_id); void report_node_failure(Uint32 node_id); void completeClusterFailed(); @@ -462,6 +463,7 @@ public: #ifdef VM_TRACE const char *m_latest_command; + Uint64 m_flush_gci; #endif Ndb *m_ndb; diff --git a/storage/ndb/src/ndbapi/NdbScanOperation.cpp b/storage/ndb/src/ndbapi/NdbScanOperation.cpp index 5852570a686..21caf8349b6 100644 --- a/storage/ndb/src/ndbapi/NdbScanOperation.cpp +++ b/storage/ndb/src/ndbapi/NdbScanOperation.cpp @@ -1574,62 +1574,6 @@ NdbScanOperation::close_impl(TransporterFacade* tp, bool forceSend, return -1; } - bool holdLock = false; - if (theSCAN_TABREQ) - { - ScanTabReq * req = CAST_PTR(ScanTabReq, theSCAN_TABREQ->getDataPtrSend()); - holdLock = ScanTabReq::getHoldLockFlag(req->requestInfo); - } - - /** - * When using locks, force close of scan directly - */ - if (holdLock && theError.code == 0 && - (m_sent_receivers_count + m_conf_receivers_count + m_api_receivers_count)) - { - NdbApiSignal tSignal(theNdb->theMyRef); - tSignal.setSignal(GSN_SCAN_NEXTREQ); - - Uint32* theData = tSignal.getDataPtrSend(); - Uint64 transId = theNdbCon->theTransactionId; - theData[0] = theNdbCon->theTCConPtr; - theData[1] = 1; - theData[2] = transId; - theData[3] = (Uint32) (transId >> 32); - - tSignal.setLength(4); - int ret = tp->sendSignal(&tSignal, nodeId); - if (ret) - { - setErrorCode(4008); - return -1; - } - - /** - * If no receiver is outstanding... - * set it to 1 as execCLOSE_SCAN_REP resets it - */ - m_sent_receivers_count = m_sent_receivers_count ? m_sent_receivers_count : 1; - - while(theError.code == 0 && (m_sent_receivers_count + m_conf_receivers_count)) - { - int return_code = poll_guard->wait_scan(WAITFOR_SCAN_TIMEOUT, nodeId, forceSend); - switch(return_code){ - case 0: - break; - case -1: - setErrorCode(4008); - case -2: - m_api_receivers_count = 0; - m_conf_receivers_count = 0; - m_sent_receivers_count = 0; - theNdbCon->theReleaseOnClose = true; - return -1; - } - } - return 0; - } - /** * Wait for outstanding */ diff --git a/storage/ndb/src/ndbapi/Ndbif.cpp b/storage/ndb/src/ndbapi/Ndbif.cpp index ecaf6a3f435..0527744afe1 100644 --- a/storage/ndb/src/ndbapi/Ndbif.cpp +++ b/storage/ndb/src/ndbapi/Ndbif.cpp @@ -177,6 +177,7 @@ Ndb::executeMessage(void* NdbObject, void Ndb::connected(Uint32 ref) { +// cluster connect, a_node == own reference theMyRef= ref; Uint32 tmpTheNode= refToNode(ref); Uint64 tBlockNo= refToBlock(ref); @@ -209,16 +210,30 @@ void Ndb::connected(Uint32 ref) theNode= tmpTheNode; // flag that Ndb object is initialized } +void Ndb::report_node_connected(Uint32 nodeId) +{ + if (theEventBuffer) + { + // node connected + // eventOperations in the ndb object should be notified + theEventBuffer->report_node_connected(nodeId); + } +} + void Ndb::statusMessage(void* NdbObject, Uint32 a_node, bool alive, bool nfComplete) { DBUG_ENTER("Ndb::statusMessage"); + DBUG_PRINT("info", ("a_node: %u alive: %u nfComplete: %u", + a_node, alive, nfComplete)); Ndb* tNdb = (Ndb*)NdbObject; if (alive) { if (nfComplete) { + // cluster connect, a_node == own reference tNdb->connected(a_node); DBUG_VOID_RETURN; }//if + tNdb->report_node_connected(a_node); } else { if (nfComplete) { tNdb->report_node_failure_completed(a_node); diff --git a/storage/ndb/src/ndbapi/TransporterFacade.cpp b/storage/ndb/src/ndbapi/TransporterFacade.cpp index 15127953051..2f421271e91 100644 --- a/storage/ndb/src/ndbapi/TransporterFacade.cpp +++ b/storage/ndb/src/ndbapi/TransporterFacade.cpp @@ -794,6 +794,8 @@ TransporterFacade::connected() void TransporterFacade::ReportNodeDead(NodeId tNodeId) { + DBUG_ENTER("TransporterFacade::ReportNodeDead"); + DBUG_PRINT("enter",("nodeid= %d", tNodeId)); /** * When a node fails we must report this to each Ndb object. * The function that is used for communicating node failures is called. @@ -810,6 +812,7 @@ TransporterFacade::ReportNodeDead(NodeId tNodeId) (*RegPC) (obj, tNodeId, false, false); } } + DBUG_VOID_RETURN; } void diff --git a/storage/ndb/src/ndbapi/ndberror.c b/storage/ndb/src/ndbapi/ndberror.c index 486d78538f0..d0d26c19cfa 100644 --- a/storage/ndb/src/ndbapi/ndberror.c +++ b/storage/ndb/src/ndbapi/ndberror.c @@ -484,6 +484,8 @@ ErrorBundle ErrorCodes[] = { { 1418, DMEC, SE, "Subscription dropped, no new subscribers allowed" }, { 1419, DMEC, SE, "Subscription already dropped" }, + { 1420, DMEC, TR, "Subscriber manager busy with adding/removing a table" }, + { 4004, DMEC, AE, "Attribute name not found in the Table" }, { 4100, DMEC, AE, "Status Error in NDB" }, diff --git a/storage/ndb/test/ndbapi/testNodeRestart.cpp b/storage/ndb/test/ndbapi/testNodeRestart.cpp index 5474837228a..ad1ea5ed6f2 100644 --- a/storage/ndb/test/ndbapi/testNodeRestart.cpp +++ b/storage/ndb/test/ndbapi/testNodeRestart.cpp @@ -294,6 +294,7 @@ int runRestarts(NDBT_Context* ctx, NDBT_Step* step){ } i++; } + ctx->stopTest(); return result; } diff --git a/storage/ndb/test/run-test/daily-basic-tests.txt b/storage/ndb/test/run-test/daily-basic-tests.txt index 8d893f11288..68e653b1ead 100644 --- a/storage/ndb/test/run-test/daily-basic-tests.txt +++ b/storage/ndb/test/run-test/daily-basic-tests.txt @@ -531,6 +531,10 @@ max-time: 1500 cmd: testDict args: -n TemporaryTables T1 T6 T7 T8 +max-time: 1500 +cmd: testDict +args: -n Restart_NR2 T1 + # # TEST NDBAPI # diff --git a/storage/ndb/tools/desc.cpp b/storage/ndb/tools/desc.cpp index 7c5ce68c950..49f188d12c0 100644 --- a/storage/ndb/tools/desc.cpp +++ b/storage/ndb/tools/desc.cpp @@ -293,7 +293,8 @@ void print_part_info(Ndb* pNdb, NDBT_Table* pTab) { "Partition", 0, NdbDictionary::Column::FRAGMENT }, { "Row count", 0, NdbDictionary::Column::ROW_COUNT }, { "Commit count", 0, NdbDictionary::Column::COMMIT_COUNT }, - { "Frag memory", 0, NdbDictionary::Column::FRAGMENT_MEMORY }, + { "Frag fixed memory", 0, NdbDictionary::Column::FRAGMENT_FIXED_MEMORY }, + { "Frag varsized memory", 0, NdbDictionary::Column::FRAGMENT_VARSIZED_MEMORY }, { 0, 0, 0 } }; diff --git a/storage/ndb/tools/restore/consumer_restore.cpp b/storage/ndb/tools/restore/consumer_restore.cpp index b190652232e..6f9b025222c 100644 --- a/storage/ndb/tools/restore/consumer_restore.cpp +++ b/storage/ndb/tools/restore/consumer_restore.cpp @@ -148,17 +148,38 @@ BackupRestore::finalize_table(const TableS & table){ bool ret= true; if (!m_restore && !m_restore_meta) return ret; - if (table.have_auto_inc()) + if (!table.have_auto_inc()) + return ret; + + Uint64 max_val= table.get_max_auto_val(); + do { - Uint64 max_val= table.get_max_auto_val(); - Uint64 auto_val; + Uint64 auto_val = ~(Uint64)0; int r= m_ndb->readAutoIncrementValue(get_table(table.m_dictTable), auto_val); - if (r == -1 && m_ndb->getNdbError().code != 626) + if (r == -1 && m_ndb->getNdbError().status == NdbError::TemporaryError) + { + NdbSleep_MilliSleep(50); + continue; // retry + } + else if (r == -1 && m_ndb->getNdbError().code != 626) + { ret= false; - else if (r == -1 || max_val+1 > auto_val) - ret= m_ndb->setAutoIncrementValue(get_table(table.m_dictTable), max_val+1, false) != -1; - } - return ret; + } + else if ((r == -1 && m_ndb->getNdbError().code == 626) || + max_val+1 > auto_val || auto_val == ~(Uint64)0) + { + r= m_ndb->setAutoIncrementValue(get_table(table.m_dictTable), + max_val+1, false); + if (r == -1 && + m_ndb->getNdbError().status == NdbError::TemporaryError) + { + NdbSleep_MilliSleep(50); + continue; // retry + } + ret = (r == 0); + } + return (ret); + } while (1); } @@ -792,8 +813,6 @@ BackupRestore::table(const TableS & table){ } info << "Successfully restored table event " << event_name << endl ; } - - m_ndb->setAutoIncrementValue(tab, ~(Uint64)0, false); } const NdbDictionary::Table* null = 0; m_new_tables.fill(table.m_dictTable->getTableId(), null); diff --git a/support-files/mysql.spec.sh b/support-files/mysql.spec.sh index 72e230da0c2..1c22fb2586b 100644 --- a/support-files/mysql.spec.sh +++ b/support-files/mysql.spec.sh @@ -478,17 +478,7 @@ chown -R %{mysqld_user}:%{mysqld_group} $mysql_datadir # Initiate databases if needed %{_bindir}/mysql_install_db --rpm --user=%{mysqld_user} -# Upgrade databases if needed -# This must be done as database user "root", who should be password-protected, -# but this password is not available here. -# So ensure the server is isolated as much as possible, and start it so that -# passwords are not checked. -# See the related change in the start script "/etc/init.d/mysql". -chmod 700 $mysql_datadir -%{_sysconfdir}/init.d/mysql start --skip-networking --skip-grant-tables -%{_bindir}/mysql_upgrade -%{_sysconfdir}/init.d/mysql stop --skip-networking --skip-grant-tables -chmod 755 $mysql_datadir +# Upgrade databases if needed would go here - but it cannot be automated yet # Change permissions again to fix any new files. chown -R %{mysqld_user}:%{mysqld_group} $mysql_datadir @@ -584,6 +574,7 @@ fi %attr(755, root, root) %{_bindir}/mysqlbug %attr(755, root, root) %{_bindir}/mysqld_multi %attr(755, root, root) %{_bindir}/mysqld_safe +%attr(755, root, root) %{_bindir}/mysqldumpslow %attr(755, root, root) %{_bindir}/mysqlhotcopy %attr(755, root, root) %{_bindir}/mysqltest %attr(755, root, root) %{_bindir}/perror @@ -613,7 +604,6 @@ fi %attr(755, root, root) %{_bindir}/mysqlbinlog %attr(755, root, root) %{_bindir}/mysqlcheck %attr(755, root, root) %{_bindir}/mysqldump -%attr(755, root, root) %{_bindir}/mysqldumpslow %attr(755, root, root) %{_bindir}/mysqlimport %attr(755, root, root) %{_bindir}/mysqlshow %attr(755, root, root) %{_bindir}/mysqlslap @@ -701,6 +691,20 @@ fi # itself - note that they must be ordered by date (important when # merging BK trees) %changelog +* Tue Jun 27 2006 Joerg Bruehe <joerg@mysql.com> + +- move "mysqldumpslow" from the client RPM to the server RPM (bug#20216) + +- Revert all previous attempts to call "mysql_upgrade" during RPM upgrade, + there are some more aspects which need to be solved before this is possible. + For now, just ensure the binary "mysql_upgrade" is delivered and installed. + +* Thu Jun 22 2006 Joerg Bruehe <joerg@mysql.com> + +- Close a gap of the previous version by explicitly using + a newly created temporary directory for the socket to be used + in the "mysql_upgrade" operation, overriding any local setting. + * Tue Jun 20 2006 Joerg Bruehe <joerg@mysql.com> - To run "mysql_upgrade", we need a running server; |