diff options
89 files changed, 973 insertions, 3280 deletions
diff --git a/client/mysqldump.c b/client/mysqldump.c index 4a32d1617c2..0421f0b70aa 100644 --- a/client/mysqldump.c +++ b/client/mysqldump.c @@ -100,7 +100,7 @@ static my_bool verbose= 0, opt_no_create_info= 0, opt_no_data= 0, opt_replace_into= 0, opt_dump_triggers= 0, opt_routines=0, opt_tz_utc=1, opt_events= 0, - opt_alltspcs=0; + opt_alltspcs=0, opt_notspcs= 0; static ulong opt_max_allowed_packet, opt_net_buffer_length; static MYSQL mysql_connection,*mysql=0; static my_bool insert_pat_inited= 0, info_flag; @@ -172,6 +172,10 @@ static struct my_option my_long_options[] = "Dump all the tablespaces.", (gptr*) &opt_alltspcs, (gptr*) &opt_alltspcs, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, + {"no-tablespaces", 'y', + "Do not dump any tablespace information.", + (gptr*) &opt_notspcs, (gptr*) &opt_notspcs, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, + 0, 0}, {"add-drop-database", OPT_DROP_DATABASE, "Add a 'DROP DATABASE' before each create.", (gptr*) &opt_drop_database, (gptr*) &opt_drop_database, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, @@ -450,6 +454,10 @@ char check_if_ignore_table(const char *table_name, char *table_type); static char *primary_key_fields(const char *table_name); static my_bool get_view_structure(char *table, char* db); static my_bool dump_all_views_in_db(char *database); +static int dump_all_tablespaces(); +static int dump_tablespaces_for_tables(char *db, char **table_names, int tables); +static int dump_tablespaces_for_databases(char** databases); +static int dump_tablespaces(char* ts_where); #include <help_start.h> @@ -2837,9 +2845,80 @@ static char *getTableName(int reset) static int dump_all_tablespaces() { + return dump_tablespaces(NULL); +} + +static int dump_tablespaces_for_tables(char *db, char **table_names, int tables) +{ + DYNAMIC_STRING where; + int r; + int i; + char name_buff[NAME_LEN*2+3]; + + mysql_real_escape_string(mysql, name_buff, db, strlen(db)); + + init_dynamic_string(&where, " AND TABLESPACE_NAME IN (" + "SELECT DISTINCT TABLESPACE_NAME FROM" + " INFORMATION_SCHEMA.PARTITIONS" + " WHERE" + " TABLE_SCHEMA='", 256, 1024); + dynstr_append(&where, name_buff); + dynstr_append(&where, "' AND TABLE_NAME IN ("); + + for (i=0 ; i<tables ; i++) + { + mysql_real_escape_string(mysql, name_buff, + table_names[i], strlen(table_names[i])); + + dynstr_append(&where, "'"); + dynstr_append(&where, name_buff); + dynstr_append(&where, "',"); + } + dynstr_trunc(&where, 1); + dynstr_append(&where,"))"); + + DBUG_PRINT("info",("Dump TS for Tables where: %s",where)); + r= dump_tablespaces(where.str); + dynstr_free(&where); + return r; +} + +static int dump_tablespaces_for_databases(char** databases) +{ + DYNAMIC_STRING where; + int r; + int i; + + init_dynamic_string(&where, " AND TABLESPACE_NAME IN (" + "SELECT DISTINCT TABLESPACE_NAME FROM" + " INFORMATION_SCHEMA.PARTITIONS" + " WHERE" + " TABLE_SCHEMA IN (", 256, 1024); + + for (i=0 ; databases[i]!=NULL ; i++) + { + char db_name_buff[NAME_LEN*2+3]; + mysql_real_escape_string(mysql, db_name_buff, + databases[i], strlen(databases[i])); + dynstr_append(&where, "'"); + dynstr_append(&where, db_name_buff); + dynstr_append(&where, "',"); + } + dynstr_trunc(&where, 1); + dynstr_append(&where,"))"); + + DBUG_PRINT("info",("Dump TS for DBs where: %s",where)); + r= dump_tablespaces(where.str); + dynstr_free(&where); + return r; +} + +static int dump_tablespaces(char* ts_where) +{ MYSQL_ROW row; MYSQL_RES *tableres; char buf[FN_REFLEN]; + DYNAMIC_STRING sqlbuf; int first; /* The following are used for parsing the EXTRA field @@ -2848,21 +2927,51 @@ static int dump_all_tablespaces() char *ubs; char *endsemi; - if (mysql_query_with_error_report(mysql, &tableres, - "SELECT" - " LOGFILE_GROUP_NAME," - " FILE_NAME," - " TOTAL_EXTENTS," - " INITIAL_SIZE," - " ENGINE," - " EXTRA" - " FROM INFORMATION_SCHEMA.FILES" - " WHERE FILE_TYPE = \"UNDO LOG\"" - " AND FILE_NAME IS NOT NULL" - " GROUP BY LOGFILE_GROUP_NAME, FILE_NAME" - ", ENGINE" - " ORDER BY LOGFILE_GROUP_NAME")) + init_dynamic_string(&sqlbuf, + "SELECT LOGFILE_GROUP_NAME," + " FILE_NAME," + " TOTAL_EXTENTS," + " INITIAL_SIZE," + " ENGINE," + " EXTRA" + " FROM INFORMATION_SCHEMA.FILES" + " WHERE FILE_TYPE = 'UNDO LOG'" + " AND FILE_NAME IS NOT NULL", + 256, 1024); + if(ts_where) + { + dynstr_append(&sqlbuf, + " AND LOGFILE_GROUP_NAME IN (" + "SELECT DISTINCT LOGFILE_GROUP_NAME" + " FROM INFORMATION_SCHEMA.FILES" + " WHERE FILE_TYPE = 'DATAFILE'" + ); + dynstr_append(&sqlbuf, ts_where); + dynstr_append(&sqlbuf, ")"); + } + dynstr_append(&sqlbuf, + " GROUP BY LOGFILE_GROUP_NAME, FILE_NAME" + ", ENGINE" + " ORDER BY LOGFILE_GROUP_NAME"); + + if (mysql_query(mysql, sqlbuf.str) || + !(tableres = mysql_store_result(mysql))) + { + if (mysql_errno(mysql) == ER_BAD_TABLE_ERROR || + mysql_errno(mysql) == ER_BAD_DB_ERROR || + mysql_errno(mysql) == ER_UNKNOWN_TABLE) + { + fprintf(md_result_file, + "\n--\n-- Not dumping tablespaces as no INFORMATION_SCHEMA.FILES" + " table on this server\n--\n"); + check_io(md_result_file); + return 0; + } + + my_printf_error(0, "Error: Couldn't dump tablespaces %s", + MYF(0), mysql_error(mysql)); return 1; + } buf[0]= 0; while ((row= mysql_fetch_row(tableres))) @@ -2912,18 +3021,24 @@ static int dump_all_tablespaces() strxmov(buf, row[0], NullS); } } - - if (mysql_query_with_error_report(mysql, &tableres, - "SELECT DISTINCT" - " TABLESPACE_NAME," - " FILE_NAME," - " LOGFILE_GROUP_NAME," - " EXTENT_SIZE," - " INITIAL_SIZE," - " ENGINE" - " FROM INFORMATION_SCHEMA.FILES" - " WHERE FILE_TYPE = \"DATAFILE\"" - " ORDER BY TABLESPACE_NAME, LOGFILE_GROUP_NAME")) + dynstr_free(&sqlbuf); + init_dynamic_string(&sqlbuf, + "SELECT DISTINCT TABLESPACE_NAME," + " FILE_NAME," + " LOGFILE_GROUP_NAME," + " EXTENT_SIZE," + " INITIAL_SIZE," + " ENGINE" + " FROM INFORMATION_SCHEMA.FILES" + " WHERE FILE_TYPE = 'DATAFILE'", + 256, 1024); + + if(ts_where) + dynstr_append(&sqlbuf, ts_where); + + dynstr_append(&sqlbuf, " ORDER BY TABLESPACE_NAME, LOGFILE_GROUP_NAME"); + + if (mysql_query_with_error_report(mysql, &tableres, sqlbuf.str)) return 1; buf[0]= 0; @@ -2969,6 +3084,8 @@ static int dump_all_tablespaces() strxmov(buf, row[0], NullS); } } + + dynstr_free(&sqlbuf); return 0; } @@ -4024,15 +4141,23 @@ int main(int argc, char **argv) dump_all_tablespaces(); if (opt_alldbs) + { + if (!opt_alltspcs && !opt_notspcs) + dump_all_tablespaces(); dump_all_databases(); + } else if (argc > 1 && !opt_databases) { /* Only one database and selected table(s) */ + if (!opt_alltspcs && !opt_notspcs) + dump_tablespaces_for_tables(*argv, (argv + 1), (argc -1)); dump_selected_tables(*argv, (argv + 1), (argc - 1)); } else { /* One or more databases, all tables */ + if (!opt_alltspcs && !opt_notspcs) + dump_tablespaces_for_databases(argv); dump_databases(argv); } #ifdef HAVE_SMEM diff --git a/configure.in b/configure.in index 8cf3ebe6b0c..f314a9ca754 100644 --- a/configure.in +++ b/configure.in @@ -700,6 +700,8 @@ AC_CHECK_HEADERS(fcntl.h float.h floatingpoint.h ieeefp.h limits.h \ sys/ioctl.h malloc.h sys/malloc.h sys/ipc.h sys/shm.h linux/config.h \ sys/resource.h sys/param.h) +AC_CHECK_HEADERS([xfs/xfs.h]) + #-------------------------------------------------------------------- # Check for system libraries. Adds the library to $LIBS # and defines HAVE_LIBM etc diff --git a/include/my_sys.h b/include/my_sys.h index 7d8e2de1c27..d185913cda1 100644 --- a/include/my_sys.h +++ b/include/my_sys.h @@ -785,6 +785,7 @@ my_bool dynstr_append_mem(DYNAMIC_STRING *str, const char *append, uint length); extern my_bool dynstr_set(DYNAMIC_STRING *str, const char *init_str); extern my_bool dynstr_realloc(DYNAMIC_STRING *str, ulong additional_size); +extern my_bool dynstr_trunc(DYNAMIC_STRING *str, int n); extern void dynstr_free(DYNAMIC_STRING *str); #ifdef HAVE_MLOCK extern byte *my_malloc_lock(uint length,myf flags); diff --git a/mysql-test/r/information_schema_part.result b/mysql-test/r/information_schema_part.result index 6ba980e0f21..df3abdbee0a 100644 --- a/mysql-test/r/information_schema_part.result +++ b/mysql-test/r/information_schema_part.result @@ -28,18 +28,18 @@ partitions 3; select * from information_schema.partitions where table_schema="test" and table_name="t3"; TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME -NULL test t3 p0 NULL 1 NULL HASH NULL month(f1) NULL NULL 0 0 0 # 1024 0 # # NULL NULL default default default -NULL test t3 p1 NULL 2 NULL HASH NULL month(f1) NULL NULL 0 0 0 # 1024 0 # # NULL NULL default default default -NULL test t3 p2 NULL 3 NULL HASH NULL month(f1) NULL NULL 0 0 0 # 1024 0 # # NULL NULL default default default +NULL test t3 p0 NULL 1 NULL HASH NULL month(f1) NULL NULL 0 0 0 # 1024 0 # # NULL NULL default default NULL +NULL test t3 p1 NULL 2 NULL HASH NULL month(f1) NULL NULL 0 0 0 # 1024 0 # # NULL NULL default default NULL +NULL test t3 p2 NULL 3 NULL HASH NULL month(f1) NULL NULL 0 0 0 # 1024 0 # # NULL NULL default default NULL create table t4 (f1 date, f2 int) partition by key(f1,f2) partitions 3; select * from information_schema.partitions where table_schema="test" and table_name="t4"; TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME -NULL test t4 p0 NULL 1 NULL KEY NULL f1,f2 NULL NULL 0 0 0 # 1024 0 # # NULL NULL default default default -NULL test t4 p1 NULL 2 NULL KEY NULL f1,f2 NULL NULL 0 0 0 # 1024 0 # # NULL NULL default default default -NULL test t4 p2 NULL 3 NULL KEY NULL f1,f2 NULL NULL 0 0 0 # 1024 0 # # NULL NULL default default default +NULL test t4 p0 NULL 1 NULL KEY NULL f1,f2 NULL NULL 0 0 0 # 1024 0 # # NULL NULL default default NULL +NULL test t4 p1 NULL 2 NULL KEY NULL f1,f2 NULL NULL 0 0 0 # 1024 0 # # NULL NULL default default NULL +NULL test t4 p2 NULL 3 NULL KEY NULL f1,f2 NULL NULL 0 0 0 # 1024 0 # # NULL NULL default default NULL drop table t1,t2,t3,t4; create table t1 (a int not null,b int not null,c int not null,primary key (a,b)) partition by range (a) @@ -99,7 +99,7 @@ drop table t1; create table t1(f1 int, f2 int); select * from information_schema.partitions where table_schema="test"; TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME -NULL test t1 NULL NULL NULL NULL NULL NULL NULL NULL NULL 0 0 0 # 1024 0 # # NULL NULL +NULL test t1 NULL NULL NULL NULL NULL NULL NULL NULL NULL 0 0 0 # 1024 0 # # NULL NULL NULL drop table t1; create table t1 (f1 date) partition by linear hash(month(f1)) @@ -107,9 +107,9 @@ partitions 3; select * from information_schema.partitions where table_schema="test" and table_name="t1"; TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME -NULL test t1 p0 NULL 1 NULL LINEAR HASH NULL month(f1) NULL NULL 0 0 0 # 1024 0 # # NULL NULL default default default -NULL test t1 p1 NULL 2 NULL LINEAR HASH NULL month(f1) NULL NULL 0 0 0 # 1024 0 # # NULL NULL default default default -NULL test t1 p2 NULL 3 NULL LINEAR HASH NULL month(f1) NULL NULL 0 0 0 # 1024 0 # # NULL NULL default default default +NULL test t1 p0 NULL 1 NULL LINEAR HASH NULL month(f1) NULL NULL 0 0 0 # 1024 0 # # NULL NULL default default NULL +NULL test t1 p1 NULL 2 NULL LINEAR HASH NULL month(f1) NULL NULL 0 0 0 # 1024 0 # # NULL NULL default default NULL +NULL test t1 p2 NULL 3 NULL LINEAR HASH NULL month(f1) NULL NULL 0 0 0 # 1024 0 # # NULL NULL default default NULL drop table t1; create table t1 (a int) PARTITION BY RANGE (a) diff --git a/mysql-test/r/ndb_basic.result b/mysql-test/r/ndb_basic.result index e0255ddb9e7..b7e893faf29 100644 --- a/mysql-test/r/ndb_basic.result +++ b/mysql-test/r/ndb_basic.result @@ -758,4 +758,19 @@ c VARCHAR(255) NOT NULL, CONSTRAINT pk_b_c_id PRIMARY KEY (b,c), CONSTRAINT fk_a FOREIGN KEY(a) REFERENCES t1(a))engine=ndb; drop table t1, t2; +create table t1 (a int not null primary key, b int) engine=ndb; +insert into t1 values(1,1),(2,2),(3,3); +create table t2 like t1; +insert into t2 select * from t1; +select * from t1 order by a; +a b +1 1 +2 2 +3 3 +select * from t2 order by a; +a b +1 1 +2 2 +3 3 +drop table t1, t2; End of 5.1 tests diff --git a/mysql-test/r/ndb_dd_backuprestore.result b/mysql-test/r/ndb_dd_backuprestore.result index 3c0815cbec8..1dd609b932d 100644 --- a/mysql-test/r/ndb_dd_backuprestore.result +++ b/mysql-test/r/ndb_dd_backuprestore.result @@ -223,31 +223,31 @@ t6 CREATE TABLE `t6` ( ) ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY RANGE (pk1) (PARTITION x1 VALUES LESS THAN (333) ENGINE = ndbcluster, PARTITION x2 VALUES LESS THAN (720) ENGINE = ndbcluster) */ SELECT * FROM information_schema.partitions WHERE table_name= 't1'; TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME -NULL test t1 p0 NULL 1 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default default default -NULL test t1 p1 NULL 2 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default default default -NULL test t1 p2 NULL 3 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default default default -NULL test t1 p3 NULL 4 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default default default +NULL test t1 p0 NULL 1 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default default table_space1 +NULL test t1 p1 NULL 2 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default default table_space1 +NULL test t1 p2 NULL 3 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default default table_space1 +NULL test t1 p3 NULL 4 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default default table_space1 SELECT * FROM information_schema.partitions WHERE table_name= 't2'; TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME -NULL test t2 p0 NULL 1 NULL KEY NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default default default -NULL test t2 p1 NULL 2 NULL KEY NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default default default +NULL test t2 p0 NULL 1 NULL KEY NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default default table_space2 +NULL test t2 p1 NULL 2 NULL KEY NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default default table_space2 SELECT * FROM information_schema.partitions WHERE table_name= 't3'; TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME -NULL test t3 x1 NULL 1 NULL RANGE NULL c3 NULL 105 0 0 0 NULL 0 0 NULL NULL NULL NULL default default default -NULL test t3 x2 NULL 2 NULL RANGE NULL c3 NULL 333 0 0 0 NULL 0 0 NULL NULL NULL NULL default default default -NULL test t3 x3 NULL 3 NULL RANGE NULL c3 NULL 720 0 0 0 NULL 0 0 NULL NULL NULL NULL default default default +NULL test t3 x1 NULL 1 NULL RANGE NULL c3 NULL 105 0 0 0 NULL 0 0 NULL NULL NULL NULL default default table_space2 +NULL test t3 x2 NULL 2 NULL RANGE NULL c3 NULL 333 0 0 0 NULL 0 0 NULL NULL NULL NULL default default table_space2 +NULL test t3 x3 NULL 3 NULL RANGE NULL c3 NULL 720 0 0 0 NULL 0 0 NULL NULL NULL NULL default default table_space2 SELECT * FROM information_schema.partitions WHERE table_name= 't4'; TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME -NULL test t4 p0 NULL 1 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default default default -NULL test t4 p1 NULL 2 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default default default +NULL test t4 p0 NULL 1 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default default NULL +NULL test t4 p1 NULL 2 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default default NULL SELECT * FROM information_schema.partitions WHERE table_name= 't5'; TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME -NULL test t5 p0 NULL 1 NULL KEY NULL pk1 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default default default -NULL test t5 p1 NULL 2 NULL KEY NULL pk1 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default default default +NULL test t5 p0 NULL 1 NULL KEY NULL pk1 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default default NULL +NULL test t5 p1 NULL 2 NULL KEY NULL pk1 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default default NULL SELECT * FROM information_schema.partitions WHERE table_name= 't6'; TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME -NULL test t6 x1 NULL 1 NULL RANGE NULL pk1 NULL 333 0 0 0 NULL 0 0 NULL NULL NULL NULL default default default -NULL test t6 x2 NULL 2 NULL RANGE NULL pk1 NULL 720 0 0 0 NULL 0 0 NULL NULL NULL NULL default default default +NULL test t6 x1 NULL 1 NULL RANGE NULL pk1 NULL 333 0 0 0 NULL 0 0 NULL NULL NULL NULL default default NULL +NULL test t6 x2 NULL 2 NULL RANGE NULL pk1 NULL 720 0 0 0 NULL 0 0 NULL NULL NULL NULL default default NULL SELECT COUNT(*) FROM test.t1; COUNT(*) 250 @@ -389,31 +389,31 @@ t6 CREATE TABLE `t6` ( ) ENGINE=ndbcluster DEFAULT CHARSET=latin1 /*!50100 PARTITION BY RANGE (pk1) (PARTITION x1 VALUES LESS THAN (333) ENGINE = ndbcluster, PARTITION x2 VALUES LESS THAN (720) ENGINE = ndbcluster) */ SELECT * FROM information_schema.partitions WHERE table_name= 't1'; TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME -NULL test t1 p0 NULL 1 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default default default -NULL test t1 p1 NULL 2 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default default default -NULL test t1 p2 NULL 3 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default default default -NULL test t1 p3 NULL 4 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default default default +NULL test t1 p0 NULL 1 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default default table_space1 +NULL test t1 p1 NULL 2 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default default table_space1 +NULL test t1 p2 NULL 3 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default default table_space1 +NULL test t1 p3 NULL 4 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default default table_space1 SELECT * FROM information_schema.partitions WHERE table_name= 't2'; TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME -NULL test t2 p0 NULL 1 NULL KEY NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default default default -NULL test t2 p1 NULL 2 NULL KEY NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default default default +NULL test t2 p0 NULL 1 NULL KEY NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default default table_space2 +NULL test t2 p1 NULL 2 NULL KEY NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default default table_space2 SELECT * FROM information_schema.partitions WHERE table_name= 't3'; TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME -NULL test t3 x1 NULL 1 NULL RANGE NULL c3 NULL 105 0 0 0 NULL 0 0 NULL NULL NULL NULL default default default -NULL test t3 x2 NULL 2 NULL RANGE NULL c3 NULL 333 0 0 0 NULL 0 0 NULL NULL NULL NULL default default default -NULL test t3 x3 NULL 3 NULL RANGE NULL c3 NULL 720 0 0 0 NULL 0 0 NULL NULL NULL NULL default default default +NULL test t3 x1 NULL 1 NULL RANGE NULL c3 NULL 105 0 0 0 NULL 0 0 NULL NULL NULL NULL default default table_space2 +NULL test t3 x2 NULL 2 NULL RANGE NULL c3 NULL 333 0 0 0 NULL 0 0 NULL NULL NULL NULL default default table_space2 +NULL test t3 x3 NULL 3 NULL RANGE NULL c3 NULL 720 0 0 0 NULL 0 0 NULL NULL NULL NULL default default table_space2 SELECT * FROM information_schema.partitions WHERE table_name= 't4'; TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME -NULL test t4 p0 NULL 1 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default default default -NULL test t4 p1 NULL 2 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default default default +NULL test t4 p0 NULL 1 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default default NULL +NULL test t4 p1 NULL 2 NULL HASH NULL c3 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default default NULL SELECT * FROM information_schema.partitions WHERE table_name= 't5'; TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME -NULL test t5 p0 NULL 1 NULL KEY NULL pk1 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default default default -NULL test t5 p1 NULL 2 NULL KEY NULL pk1 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default default default +NULL test t5 p0 NULL 1 NULL KEY NULL pk1 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default default NULL +NULL test t5 p1 NULL 2 NULL KEY NULL pk1 NULL NULL 0 0 0 NULL 0 0 NULL NULL NULL NULL default default NULL SELECT * FROM information_schema.partitions WHERE table_name= 't6'; TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME -NULL test t6 x1 NULL 1 NULL RANGE NULL pk1 NULL 333 0 0 0 NULL 0 0 NULL NULL NULL NULL default default default -NULL test t6 x2 NULL 2 NULL RANGE NULL pk1 NULL 720 0 0 0 NULL 0 0 NULL NULL NULL NULL default default default +NULL test t6 x1 NULL 1 NULL RANGE NULL pk1 NULL 333 0 0 0 NULL 0 0 NULL NULL NULL NULL default default NULL +NULL test t6 x2 NULL 2 NULL RANGE NULL pk1 NULL 720 0 0 0 NULL 0 0 NULL NULL NULL NULL default default NULL SELECT COUNT(*) FROM test.t1; COUNT(*) 250 diff --git a/mysql-test/r/ndb_index_ordered.result b/mysql-test/r/ndb_index_ordered.result index 3b8415b9267..788c0d68259 100644 --- a/mysql-test/r/ndb_index_ordered.result +++ b/mysql-test/r/ndb_index_ordered.result @@ -796,3 +796,42 @@ Variable_name Value ndb_index_stat_cache_entries 32 ndb_index_stat_enable OFF ndb_index_stat_update_freq 20 +create table t1 (a int primary key) engine = ndb; +insert into t1 values (1), (2), (3); +begin; +delete from t1 where a > 1; +rollback; +select * from t1 order by a; +a +1 +2 +3 +begin; +delete from t1 where a > 1; +rollback; +begin; +select * from t1 order by a; +a +1 +2 +3 +delete from t1 where a > 2; +select * from t1 order by a; +a +1 +2 +delete from t1 where a > 1; +select * from t1 order by a; +a +1 +delete from t1 where a > 0; +select * from t1 order by a; +a +rollback; +select * from t1 order by a; +a +1 +2 +3 +delete from t1; +drop table t1; diff --git a/mysql-test/r/ndb_index_unique.result b/mysql-test/r/ndb_index_unique.result index 563024a8800..fbc3a2bb915 100644 --- a/mysql-test/r/ndb_index_unique.result +++ b/mysql-test/r/ndb_index_unique.result @@ -144,7 +144,40 @@ b int unsigned not null, c int unsigned, UNIQUE (b, c) USING HASH ) engine=ndbcluster; -ERROR 42000: Table handler doesn't support NULL in given index. Please change column 'c' to be NOT NULL or use another handler +Warnings: +Warning 1121 Ndb does not support unique index on NULL valued attributes, index access with NULL value will become full table scan +insert t2 values(1,1,NULL),(2,2,2),(3,3,NULL),(4,4,4),(5,5,NULL),(6,6,6),(7,7,NULL),(8,3,NULL),(9,3,NULL); +select * from t2 where c IS NULL order by a; +a b c +1 1 NULL +3 3 NULL +5 5 NULL +7 7 NULL +8 3 NULL +9 3 NULL +select * from t2 where b = 3 AND c IS NULL order by a; +a b c +3 3 NULL +8 3 NULL +9 3 NULL +select * from t2 where (b = 3 OR b = 5) AND c IS NULL order by a; +a b c +3 3 NULL +5 5 NULL +8 3 NULL +9 3 NULL +set @old_ecpd = @@session.engine_condition_pushdown; +set engine_condition_pushdown = true; +explain select * from t2 where (b = 3 OR b = 5) AND c IS NULL AND a < 9 order by a; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 range PRIMARY,b PRIMARY 4 NULL 1 Using where with pushed condition +select * from t2 where (b = 3 OR b = 5) AND c IS NULL AND a < 9 order by a; +a b c +3 3 NULL +5 5 NULL +8 3 NULL +set engine_condition_pushdown = @old_ecpd; +drop table t2; CREATE TABLE t3 ( a int unsigned NOT NULL, b int unsigned not null, diff --git a/mysql-test/r/ndb_multi.result b/mysql-test/r/ndb_multi.result index 1c9633589e0..2bc49bf9b45 100644 --- a/mysql-test/r/ndb_multi.result +++ b/mysql-test/r/ndb_multi.result @@ -97,3 +97,27 @@ c1 3 5 drop table t1; +create database db; +use db; +create table t1(x int) engine=ndb; +use db; +show tables; +Tables_in_db +t1 +drop database db; +show tables; +ERROR 42000: Unknown database 'db' +create database db; +use db; +create table t1(x int) engine=ndb; +use db; +create table t2(x int) engine=myisam; +show tables; +Tables_in_db +t1 +t2 +drop database db; +show tables; +Tables_in_db +t2 +drop database db; diff --git a/mysql-test/r/ndb_partition_range.result b/mysql-test/r/ndb_partition_range.result index f4bae479239..8057ac59613 100644 --- a/mysql-test/r/ndb_partition_range.result +++ b/mysql-test/r/ndb_partition_range.result @@ -17,9 +17,9 @@ INSERT into t1 values (10, 1, 1); INSERT into t1 values (15, 1, 1); select * from information_schema.partitions where table_name= 't1'; TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME -NULL test t1 x1 NULL 1 NULL RANGE NULL a NULL 5 0 0 0 # 0 0 # # NULL NULL default default default -NULL test t1 x2 NULL 2 NULL RANGE NULL a NULL 10 0 0 0 # 0 0 # # NULL NULL default default default -NULL test t1 x3 NULL 3 NULL RANGE NULL a NULL 20 0 0 0 # 0 0 # # NULL NULL default default default +NULL test t1 x1 NULL 1 NULL RANGE NULL a NULL 5 0 0 0 # 0 0 # # NULL NULL default default NULL +NULL test t1 x2 NULL 2 NULL RANGE NULL a NULL 10 0 0 0 # 0 0 # # NULL NULL default default NULL +NULL test t1 x3 NULL 3 NULL RANGE NULL a NULL 20 0 0 0 # 0 0 # # NULL NULL default default NULL select * from t1 order by a; a b c 1 1 1 diff --git a/mysql-test/r/ndb_update.result b/mysql-test/r/ndb_update.result index 7603686ea35..919b8c44a40 100644 --- a/mysql-test/r/ndb_update.result +++ b/mysql-test/r/ndb_update.result @@ -18,7 +18,7 @@ pk1 b c 2 2 2 4 1 1 UPDATE t1 set pk1 = 4 where pk1 = 2; -ERROR 23000: Duplicate entry '4' for key 1 +ERROR 23000: Duplicate entry '4' for key 'PRIMARY' UPDATE IGNORE t1 set pk1 = 4 where pk1 = 2; select * from t1 order by pk1; pk1 b c @@ -27,6 +27,7 @@ pk1 b c 4 1 1 UPDATE t1 set pk1 = 1, c = 2 where pk1 = 4; ERROR 23000: Duplicate entry '' for key '*UNKNOWN*' +UPDATE IGNORE t1 set pk1 = 1, c = 2 where pk1 = 4; select * from t1 order by pk1; pk1 b c 0 0 0 diff --git a/mysql-test/t/ndb_basic.test b/mysql-test/t/ndb_basic.test index bbe5d263e36..3348a94c044 100644 --- a/mysql-test/t/ndb_basic.test +++ b/mysql-test/t/ndb_basic.test @@ -738,4 +738,13 @@ CREATE TABLE t2(a VARCHAR(255) NOT NULL, CONSTRAINT fk_a FOREIGN KEY(a) REFERENCES t1(a))engine=ndb; drop table t1, t2; +# bug#24301 +create table t1 (a int not null primary key, b int) engine=ndb; +insert into t1 values(1,1),(2,2),(3,3); +create table t2 like t1; +insert into t2 select * from t1; +select * from t1 order by a; +select * from t2 order by a; +drop table t1, t2; + --echo End of 5.1 tests diff --git a/mysql-test/t/ndb_index_ordered.test b/mysql-test/t/ndb_index_ordered.test index b9a47725b85..bba0c5ca53f 100644 --- a/mysql-test/t/ndb_index_ordered.test +++ b/mysql-test/t/ndb_index_ordered.test @@ -430,3 +430,28 @@ set ndb_index_stat_update_freq = @@global.ndb_index_stat_update_freq; show session variables like 'ndb_index_stat_%'; # End of 4.1 tests + +# bug#24039 + +create table t1 (a int primary key) engine = ndb; +insert into t1 values (1), (2), (3); +begin; +delete from t1 where a > 1; +rollback; +select * from t1 order by a; +begin; +delete from t1 where a > 1; +rollback; + +begin; +select * from t1 order by a; +delete from t1 where a > 2; +select * from t1 order by a; +delete from t1 where a > 1; +select * from t1 order by a; +delete from t1 where a > 0; +select * from t1 order by a; +rollback; +select * from t1 order by a; +delete from t1; +drop table t1; diff --git a/mysql-test/t/ndb_multi.test b/mysql-test/t/ndb_multi.test index 0f098c96fa8..a50b3ef28ea 100644 --- a/mysql-test/t/ndb_multi.test +++ b/mysql-test/t/ndb_multi.test @@ -87,3 +87,40 @@ connection server1; select * from t1 order by c1; drop table t1; # End of 4.1 tests + +# Check distributed drop of database in 5.1 +create database db; +use db; +create table t1(x int) engine=ndb; + +connection server2; +use db; +show tables; + +connection server1; +drop database db; + +connection server2; +--error 1049 +show tables; + +connection server1; + +# bug#21495 +create database db; +use db; +create table t1(x int) engine=ndb; + +connection server2; +use db; +create table t2(x int) engine=myisam; +show tables; + +connection server1; +drop database db; + +connection server2; +show tables; +drop database db; + +connection server1; diff --git a/mysys/string.c b/mysys/string.c index dfd42d137dd..368f7344aa6 100644 --- a/mysys/string.c +++ b/mysys/string.c @@ -115,6 +115,12 @@ my_bool dynstr_append_mem(DYNAMIC_STRING *str, const char *append, return FALSE; } +my_bool dynstr_trunc(DYNAMIC_STRING *str, int n) +{ + str->length-=n; + str->str[str->length]= '\0'; + return FALSE; +} void dynstr_free(DYNAMIC_STRING *str) { diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 6203a1038ae..59f213394ad 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -966,8 +966,7 @@ bool ha_ndbcluster::uses_blob_value() blob_index_end= blob_index + table_share->blob_fields; do { - if (bitmap_is_set(table->write_set, - table->field[*blob_index]->field_index)) + if (bitmap_is_set(bitmap, table->field[*blob_index]->field_index)) return TRUE; } while (++blob_index != blob_index_end); return FALSE; @@ -1256,6 +1255,9 @@ int ha_ndbcluster::open_indexes(Ndb *ndb, TABLE *tab, bool ignore_error) m_index[i].index= m_index[i].unique_index= NULL; else break; + m_index[i].null_in_unique_index= false; + if (check_index_fields_not_null(key_info)) + m_index[i].null_in_unique_index= true; } if (error && !ignore_error) @@ -1398,7 +1400,7 @@ NDB_INDEX_TYPE ha_ndbcluster::get_index_type_from_key(uint inx, ORDERED_INDEX); } -int ha_ndbcluster::check_index_fields_not_null(KEY* key_info) +bool ha_ndbcluster::check_index_fields_not_null(KEY* key_info) { KEY_PART_INFO* key_part= key_info->key_part; KEY_PART_INFO* end= key_part+key_info->key_parts; @@ -1408,14 +1410,10 @@ int ha_ndbcluster::check_index_fields_not_null(KEY* key_info) { Field* field= key_part->field; if (field->maybe_null()) - { - my_printf_error(ER_NULL_COLUMN_IN_INDEX,ER(ER_NULL_COLUMN_IN_INDEX), - MYF(0),field->field_name); - DBUG_RETURN(ER_NULL_COLUMN_IN_INDEX); - } + DBUG_RETURN(true); } - DBUG_RETURN(0); + DBUG_RETURN(false); } void ha_ndbcluster::release_metadata(THD *thd, Ndb *ndb) @@ -1515,6 +1513,12 @@ inline NDB_INDEX_TYPE ha_ndbcluster::get_index_type(uint idx_no) const return m_index[idx_no].type; } +inline bool ha_ndbcluster::has_null_in_unique_index(uint idx_no) const +{ + DBUG_ASSERT(idx_no < MAX_KEY); + return m_index[idx_no].null_in_unique_index; +} + /* Get the flags for an index @@ -2435,6 +2439,78 @@ guess_scan_flags(NdbOperation::LockMode lm, return flags; } + +/* + Unique index scan in NDB (full table scan with scan filter) + */ + +int ha_ndbcluster::unique_index_scan(const KEY* key_info, + const byte *key, + uint key_len, + byte *buf) +{ + int res; + NdbScanOperation *op; + NdbTransaction *trans= m_active_trans; + part_id_range part_spec; + + DBUG_ENTER("unique_index_scan"); + DBUG_PRINT("enter", ("Starting new scan on %s", m_tabname)); + + NdbOperation::LockMode lm= + (NdbOperation::LockMode)get_ndb_lock_type(m_lock.type); + int flags= guess_scan_flags(lm, m_table, table->read_set); + if (!(op=trans->getNdbScanOperation((const NDBTAB *) m_table)) || + op->readTuples(lm, flags, parallelism)) + ERR_RETURN(trans->getNdbError()); + m_active_cursor= op; + + if (m_use_partition_function) + { + part_spec.start_part= 0; + part_spec.end_part= m_part_info->get_tot_partitions() - 1; + prune_partition_set(table, &part_spec); + DBUG_PRINT("info", ("part_spec.start_part = %u, part_spec.end_part = %u", + part_spec.start_part, part_spec.end_part)); + /* + If partition pruning has found no partition in set + we can return HA_ERR_END_OF_FILE + If partition pruning has found exactly one partition in set + we can optimize scan to run towards that partition only. + */ + if (part_spec.start_part > part_spec.end_part) + { + DBUG_RETURN(HA_ERR_END_OF_FILE); + } + else if (part_spec.start_part == part_spec.end_part) + { + /* + Only one partition is required to scan, if sorted is required we + don't need it any more since output from one ordered partitioned + index is always sorted. + */ + m_active_cursor->setPartitionId(part_spec.start_part); + } + // If table has user defined partitioning + // and no primary key, we need to read the partition id + // to support ORDER BY queries + if ((table_share->primary_key == MAX_KEY) && + (get_ndb_partition_id(op))) + ERR_RETURN(trans->getNdbError()); + } + + if (generate_scan_filter_from_key(op, key_info, key, key_len, buf)) + DBUG_RETURN(ndb_err(trans)); + if ((res= define_read_attrs(buf, op))) + DBUG_RETURN(res); + + if (execute_no_commit(this,trans,false) != 0) + DBUG_RETURN(ndb_err(trans)); + DBUG_PRINT("exit", ("Scan started successfully")); + DBUG_RETURN(next_result(buf)); +} + + /* Start full table scan in NDB */ @@ -3415,6 +3491,11 @@ int ha_ndbcluster::read_range_first_to_buf(const key_range *start_key, DBUG_RETURN(error); DBUG_RETURN(unique_index_read(start_key->key, start_key->length, buf)); } + else if (type == UNIQUE_INDEX) + DBUG_RETURN(unique_index_scan(key_info, + start_key->key, + start_key->length, + buf)); break; default: break; @@ -5032,8 +5113,13 @@ int ha_ndbcluster::create_index(const char *name, KEY *key_info, error= create_unique_index(unique_name, key_info); break; case UNIQUE_INDEX: - if (!(error= check_index_fields_not_null(key_info))) - error= create_unique_index(unique_name, key_info); + if (check_index_fields_not_null(key_info)) + { + push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_NULL_COLUMN_IN_INDEX, + "Ndb does not support unique index on NULL valued attributes, index access with NULL value will become full table scan"); + } + error= create_unique_index(unique_name, key_info); break; case ORDERED_INDEX: error= create_ordered_index(name, key_info); @@ -7755,6 +7841,30 @@ ha_ndbcluster::release_completed_operations(NdbTransaction *trans, trans->releaseCompletedOperations(); } +bool +ha_ndbcluster::null_value_index_search(KEY_MULTI_RANGE *ranges, + KEY_MULTI_RANGE *end_range, + HANDLER_BUFFER *buffer) +{ + DBUG_ENTER("null_value_index_search"); + KEY* key_info= table->key_info + active_index; + KEY_MULTI_RANGE *range= ranges; + ulong reclength= table->s->reclength; + byte *curr= (byte*)buffer->buffer; + byte *end_of_buffer= (byte*)buffer->buffer_end; + + for (; range<end_range && curr+reclength <= end_of_buffer; + range++) + { + const byte *key= range->start_key.key; + uint key_len= range->start_key.length; + if (check_null_in_key(key_info, key, key_len)) + DBUG_RETURN(true); + curr += reclength; + } + DBUG_RETURN(false); +} + int ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p, KEY_MULTI_RANGE *ranges, @@ -7772,11 +7882,14 @@ ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p, NdbOperation* op; Thd_ndb *thd_ndb= get_thd_ndb(current_thd); - if (uses_blob_value()) + /** + * blobs and unique hash index with NULL can't be batched currently + */ + if (uses_blob_value() || + (index_type == UNIQUE_INDEX && + has_null_in_unique_index(active_index) && + null_value_index_search(ranges, ranges+range_count, buffer))) { - /** - * blobs can't be batched currently - */ m_disable_multi_read= TRUE; DBUG_RETURN(handler::read_multi_range_first(found_range_p, ranges, @@ -9728,31 +9841,12 @@ ha_ndbcluster::generate_scan_filter(Ndb_cond_stack *ndb_cond_stack, NdbScanOperation *op) { DBUG_ENTER("generate_scan_filter"); + if (ndb_cond_stack) { - DBUG_PRINT("info", ("Generating scan filter")); NdbScanFilter filter(op); - bool multiple_cond= FALSE; - // Wrap an AND group around multiple conditions - if (ndb_cond_stack->next) { - multiple_cond= TRUE; - if (filter.begin() == -1) - DBUG_RETURN(1); - } - for (Ndb_cond_stack *stack= ndb_cond_stack; - (stack); - stack= stack->next) - { - Ndb_cond *cond= stack->ndb_cond; - - if (build_scan_filter(cond, &filter)) - { - DBUG_PRINT("info", ("build_scan_filter failed")); - DBUG_RETURN(1); - } - } - if (multiple_cond && filter.end() == -1) - DBUG_RETURN(1); + + DBUG_RETURN(generate_scan_filter_from_cond(ndb_cond_stack, filter)); } else { @@ -9762,6 +9856,89 @@ ha_ndbcluster::generate_scan_filter(Ndb_cond_stack *ndb_cond_stack, DBUG_RETURN(0); } +int +ha_ndbcluster::generate_scan_filter_from_cond(Ndb_cond_stack *ndb_cond_stack, + NdbScanFilter& filter) +{ + DBUG_ENTER("generate_scan_filter_from_cond"); + bool multiple_cond= FALSE; + + DBUG_PRINT("info", ("Generating scan filter")); + // Wrap an AND group around multiple conditions + if (ndb_cond_stack->next) + { + multiple_cond= TRUE; + if (filter.begin() == -1) + DBUG_RETURN(1); + } + for (Ndb_cond_stack *stack= ndb_cond_stack; + (stack); + stack= stack->next) + { + Ndb_cond *cond= stack->ndb_cond; + + if (build_scan_filter(cond, &filter)) + { + DBUG_PRINT("info", ("build_scan_filter failed")); + DBUG_RETURN(1); + } + } + if (multiple_cond && filter.end() == -1) + DBUG_RETURN(1); + + DBUG_RETURN(0); +} + +int ha_ndbcluster::generate_scan_filter_from_key(NdbScanOperation *op, + const KEY* key_info, + const byte *key, + uint key_len, + byte *buf) +{ + KEY_PART_INFO* key_part= key_info->key_part; + KEY_PART_INFO* end= key_part+key_info->key_parts; + NdbScanFilter filter(op); + int res; + + DBUG_ENTER("generate_scan_filter_from_key"); + filter.begin(NdbScanFilter::AND); + for (; key_part != end; key_part++) + { + Field* field= key_part->field; + uint32 pack_len= field->pack_length(); + const byte* ptr= key; + char buf[256]; + DBUG_PRINT("info", ("Filtering value for %s", field->field_name)); + DBUG_DUMP("key", (char*)ptr, pack_len); + if (key_part->null_bit) + { + DBUG_PRINT("info", ("Generating ISNULL filter")); + if (filter.isnull(key_part->fieldnr-1) == -1) + DBUG_RETURN(1); + } + else + { + DBUG_PRINT("info", ("Generating EQ filter")); + if (filter.cmp(NdbScanFilter::COND_EQ, + key_part->fieldnr-1, + ptr, + pack_len) == -1) + DBUG_RETURN(1); + } + key += key_part->store_length; + } + // Add any pushed condition + if (m_cond_stack && + (res= generate_scan_filter_from_cond(m_cond_stack, filter))) + DBUG_RETURN(res); + + if (filter.end() == -1) + DBUG_RETURN(1); + + DBUG_RETURN(0); +} + + /* get table space info for SHOW CREATE TABLE */ diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h index 4f0db20d0b0..8e153e0bbc4 100644 --- a/sql/ha_ndbcluster.h +++ b/sql/ha_ndbcluster.h @@ -73,6 +73,7 @@ typedef struct ndb_index_data { const NdbDictionary::Index *index; const NdbDictionary::Index *unique_index; unsigned char *unique_index_attrid_map; + bool null_in_unique_index; // In this version stats are not shared between threads NdbIndexStat* index_stat; uint index_stat_cache_entries; @@ -670,6 +671,9 @@ class ha_ndbcluster: public handler KEY_MULTI_RANGE*ranges, uint range_count, bool sorted, HANDLER_BUFFER *buffer); int read_multi_range_next(KEY_MULTI_RANGE **found_range_p); + bool null_value_index_search(KEY_MULTI_RANGE *ranges, + KEY_MULTI_RANGE *end_range, + HANDLER_BUFFER *buffer); bool get_error_message(int error, String *buf); ha_rows records(); @@ -814,7 +818,8 @@ private: NDB_INDEX_TYPE get_index_type_from_table(uint index_no) const; NDB_INDEX_TYPE get_index_type_from_key(uint index_no, KEY *key_info, bool primary) const; - int check_index_fields_not_null(KEY *key_info); + bool has_null_in_unique_index(uint idx_no) const; + bool check_index_fields_not_null(KEY *key_info); uint set_up_partition_info(partition_info *part_info, TABLE *table, @@ -829,6 +834,12 @@ private: const key_range *end_key, bool sorted, bool descending, byte* buf, part_id_range *part_spec); + int unique_index_read(const byte *key, uint key_len, + byte *buf); + int unique_index_scan(const KEY* key_info, + const byte *key, + uint key_len, + byte *buf); int full_table_scan(byte * buf); bool check_all_operations_for_error(NdbTransaction *trans, @@ -836,8 +847,6 @@ private: const NdbOperation *last, uint errcode); int peek_indexed_rows(const byte *record); - int unique_index_read(const byte *key, uint key_len, - byte *buf); int fetch_next(NdbScanOperation* op); int next_result(byte *buf); int define_read_attrs(byte* buf, NdbOperation* op); @@ -903,6 +912,13 @@ private: int build_scan_filter(Ndb_cond* &cond, NdbScanFilter* filter); int generate_scan_filter(Ndb_cond_stack* cond_stack, NdbScanOperation* op); + int generate_scan_filter_from_cond(Ndb_cond_stack* cond_stack, + NdbScanFilter& filter); + int generate_scan_filter_from_key(NdbScanOperation* op, + const KEY* key_info, + const byte *key, + uint key_len, + byte *buf); friend int execute_commit(ha_ndbcluster*, NdbTransaction*); friend int execute_no_commit_ignore_no_key(ha_ndbcluster*, NdbTransaction*); diff --git a/sql/ha_ndbcluster_binlog.cc b/sql/ha_ndbcluster_binlog.cc index 865fa0bde94..92bc82f3114 100644 --- a/sql/ha_ndbcluster_binlog.cc +++ b/sql/ha_ndbcluster_binlog.cc @@ -16,6 +16,7 @@ */ #include "mysql_priv.h" +#include "sql_show.h" #ifdef WITH_NDBCLUSTER_STORAGE_ENGINE #include "ha_ndbcluster.h" @@ -1830,14 +1831,27 @@ ndb_binlog_thread_handle_schema_event(THD *thd, Ndb *ndb, log_query= 1; break; case SOT_DROP_DB: - run_query(thd, schema->query, - schema->query + schema->query_length, - TRUE, /* print error */ - TRUE); /* don't binlog the query */ - /* binlog dropping database after any table operations */ - post_epoch_log_list->push_back(schema, mem_root); - /* acknowledge this query _after_ epoch completion */ - post_epoch_unlock= 1; + /* Drop the database locally if it only contains ndb tables */ + if (! ndbcluster_check_if_local_tables_in_db(thd, schema->db)) + { + run_query(thd, schema->query, + schema->query + schema->query_length, + TRUE, /* print error */ + TRUE); /* don't binlog the query */ + /* binlog dropping database after any table operations */ + post_epoch_log_list->push_back(schema, mem_root); + /* acknowledge this query _after_ epoch completion */ + post_epoch_unlock= 1; + } + else + { + /* Database contained local tables, leave it */ + sql_print_error("NDB binlog: Skipping drop database '%s' since it contained local tables " + "binlog schema event '%s' from node %d. ", + schema->db, schema->query, + schema->node_id); + log_query= 1; + } break; case SOT_CREATE_DB: /* fall through */ @@ -2336,6 +2350,32 @@ ndbcluster_check_if_local_table(const char *dbname, const char *tabname) DBUG_RETURN(false); } +bool +ndbcluster_check_if_local_tables_in_db(THD *thd, const char *dbname) +{ + DBUG_ENTER("ndbcluster_check_if_local_tables_in_db"); + DBUG_PRINT("info", ("Looking for files in directory %s", dbname)); + char *tabname; + List<char> files; + char path[FN_REFLEN]; + + build_table_filename(path, sizeof(path), dbname, "", "", 0); + if (find_files(thd, &files, dbname, path, NullS, 0) != FIND_FILES_OK) + { + DBUG_PRINT("info", ("Failed to find files")); + DBUG_RETURN(true); + } + DBUG_PRINT("info",("found: %d files", files.elements)); + while ((tabname= files.pop())) + { + DBUG_PRINT("info", ("Found table %s", tabname)); + if (ndbcluster_check_if_local_table(dbname, tabname)) + DBUG_RETURN(true); + } + + DBUG_RETURN(false); +} + /* Common function for setting up everything for logging a table at create/discover. diff --git a/sql/ha_ndbcluster_binlog.h b/sql/ha_ndbcluster_binlog.h index 233d1a58aaa..5cf3cedf03b 100644 --- a/sql/ha_ndbcluster_binlog.h +++ b/sql/ha_ndbcluster_binlog.h @@ -124,6 +124,7 @@ void ndbcluster_binlog_init_handlerton(); void ndbcluster_binlog_init_share(NDB_SHARE *share, TABLE *table); bool ndbcluster_check_if_local_table(const char *dbname, const char *tabname); +bool ndbcluster_check_if_local_tables_in_db(THD *thd, const char *dbname); int ndbcluster_create_binlog_setup(Ndb *ndb, const char *key, uint key_len, diff --git a/sql/sql_show.cc b/sql/sql_show.cc index 227cc37cdaf..f582e9508a6 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -491,13 +491,7 @@ bool mysqld_show_column_types(THD *thd) FIND_FILES_DIR no such directory, or directory can't be read */ -enum find_files_result { - FIND_FILES_OK, - FIND_FILES_OOM, - FIND_FILES_DIR -}; -static find_files_result find_files(THD *thd, List<char> *files, const char *db, const char *path, const char *wild, bool dir) @@ -3919,10 +3913,12 @@ static void collect_partition_expr(List<char> &field_list, String *str) } -static void store_schema_partitions_record(THD *thd, TABLE *table, +static void store_schema_partitions_record(THD *thd, TABLE *schema_table, + TABLE *showing_table, partition_element *part_elem, handler *file, uint part_id) { + TABLE* table= schema_table; CHARSET_INFO *cs= system_charset_info; PARTITION_INFO stat_info; TIME time; @@ -3973,11 +3969,22 @@ static void store_schema_partitions_record(THD *thd, TABLE *table, table->field[23]->store((longlong) part_elem->nodegroup_id, TRUE); else table->field[23]->store(STRING_WITH_LEN("default"), cs); + + table->field[24]->set_notnull(); if (part_elem->tablespace_name) table->field[24]->store(part_elem->tablespace_name, strlen(part_elem->tablespace_name), cs); else - table->field[24]->store(STRING_WITH_LEN("default"), cs); + { + char *ts= showing_table->file->get_tablespace_name(thd); + if(ts) + { + table->field[24]->store(ts, strlen(ts), cs); + my_free(ts, MYF(0)); + } + else + table->field[24]->set_null(); + } } return; } @@ -4160,7 +4167,7 @@ static int get_schema_partitions_record(THD *thd, struct st_table_list *tables, table->field[6]->store((longlong) ++subpart_pos, TRUE); table->field[6]->set_notnull(); - store_schema_partitions_record(thd, table, subpart_elem, + store_schema_partitions_record(thd, table, show_table, subpart_elem, file, part_id); part_id++; if(schema_table_store_record(thd, table)) @@ -4169,7 +4176,7 @@ static int get_schema_partitions_record(THD *thd, struct st_table_list *tables, } else { - store_schema_partitions_record(thd, table, part_elem, + store_schema_partitions_record(thd, table, show_table, part_elem, file, part_id); part_id++; if(schema_table_store_record(thd, table)) @@ -4181,7 +4188,7 @@ static int get_schema_partitions_record(THD *thd, struct st_table_list *tables, else #endif { - store_schema_partitions_record(thd, table, 0, file, 0); + store_schema_partitions_record(thd, table, show_table, 0, file, 0); if(schema_table_store_record(thd, table)) DBUG_RETURN(1); } @@ -5546,7 +5553,7 @@ ST_FIELD_INFO partitions_fields_info[]= {"CHECKSUM", 21 , MYSQL_TYPE_LONG, 0, 1, 0}, {"PARTITION_COMMENT", 80, MYSQL_TYPE_STRING, 0, 0, 0}, {"NODEGROUP", 12 , MYSQL_TYPE_STRING, 0, 0, 0}, - {"TABLESPACE_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"TABLESPACE_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 1, 0}, {0, 0, MYSQL_TYPE_STRING, 0, 0, 0} }; diff --git a/sql/sql_show.h b/sql/sql_show.h index 681d1232b39..29cd52eb9fd 100644 --- a/sql/sql_show.h +++ b/sql/sql_show.h @@ -10,6 +10,15 @@ struct st_table_list; typedef st_ha_create_information HA_CREATE_INFO; typedef st_table_list TABLE_LIST; +enum find_files_result { + FIND_FILES_OK, + FIND_FILES_OOM, + FIND_FILES_DIR +}; + +find_files_result find_files(THD *thd, List<char> *files, const char *db, + const char *path, const char *wild, bool dir); + int store_create_info(THD *thd, TABLE_LIST *table_list, String *packet, HA_CREATE_INFO *create_info_arg); int view_store_create_info(THD *thd, TABLE_LIST *table, String *buff); diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 547095d191f..17643efe1ed 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -4744,8 +4744,9 @@ bool mysql_create_like_table(THD* thd, TABLE_LIST* table, my_copy(src_path, dst_path, MYF(MY_DONT_OVERWRITE_FILE)); #endif dst_path[dst_path_length - reg_ext_length]= '\0'; // Remove .frm + pthread_mutex_lock(&LOCK_open); err= ha_create_table(thd, dst_path, db, table_name, create_info, 1); - + pthread_mutex_unlock(&LOCK_open); if (create_info->options & HA_LEX_CREATE_TMP_TABLE) { if (err || !open_temporary_table(thd, dst_path, db, table_name, 1)) diff --git a/storage/ndb/include/debugger/EventLogger.hpp b/storage/ndb/include/debugger/EventLogger.hpp index 6d09be70fe0..044688c9817 100644 --- a/storage/ndb/include/debugger/EventLogger.hpp +++ b/storage/ndb/include/debugger/EventLogger.hpp @@ -172,7 +172,6 @@ private: Uint32 m_filterLevel; STATIC_CONST(MAX_TEXT_LENGTH = 256); - char m_text[MAX_TEXT_LENGTH]; }; diff --git a/storage/ndb/include/logger/FileLogHandler.hpp b/storage/ndb/include/logger/FileLogHandler.hpp index 8fb25e72be7..60a455390b5 100644 --- a/storage/ndb/include/logger/FileLogHandler.hpp +++ b/storage/ndb/include/logger/FileLogHandler.hpp @@ -102,7 +102,7 @@ private: bool setMaxFiles(const BaseString &files); int m_maxNoFiles; - long m_maxFileSize; + off_t m_maxFileSize; unsigned int m_maxLogEntries; File_class* m_pLogFile; }; diff --git a/storage/ndb/include/logger/Logger.hpp b/storage/ndb/include/logger/Logger.hpp index 3414468d42d..0a0906aca9e 100644 --- a/storage/ndb/include/logger/Logger.hpp +++ b/storage/ndb/include/logger/Logger.hpp @@ -155,8 +155,6 @@ public: /** * Create a default handler that logs to the syslog. * - * On OSE a ConsoleHandler will be created since there is no syslog support. - * * @return true if successful. */ bool createSyslogHandler(); @@ -276,6 +274,8 @@ public: protected: + NdbMutex *m_mutex; + void log(LoggerLevel logLevel, const char* msg, va_list ap) const; private: @@ -290,7 +290,9 @@ private: LogHandlerList* m_pHandlerList; const char* m_pCategory; + /* Default handlers */ + NdbMutex *m_handler_mutex; LogHandler* m_pConsoleHandler; LogHandler* m_pFileHandler; LogHandler* m_pSyslogHandler; diff --git a/storage/ndb/include/mgmapi/mgmapi.h b/storage/ndb/include/mgmapi/mgmapi.h index b6b87ebaaa9..70dda4d3b66 100644 --- a/storage/ndb/include/mgmapi/mgmapi.h +++ b/storage/ndb/include/mgmapi/mgmapi.h @@ -17,6 +17,11 @@ #ifndef MGMAPI_H #define MGMAPI_H +#include "mgmapi_config_parameters.h" +#include "ndb_logevent.h" + +#define MGM_LOGLEVELS CFG_MAX_LOGLEVEL - CFG_MIN_LOGLEVEL + 1 + /** * @mainpage MySQL Cluster Management API * @@ -843,6 +848,30 @@ extern "C" { * * @param handle NDB management handle * + * @param loglevel A vector of seven (NDB_MGM_EVENT_SEVERITY_ALL) + * elements of struct ndb_mgm_severity, + * where each element contains + * 1 if a severity indicator is enabled and 0 if not. + * A severity level is stored at position + * ndb_mgm_clusterlog_level; + * for example the "error" level is stored in position + * [NDB_MGM_EVENT_SEVERITY_ERROR]. + * The first element [NDB_MGM_EVENT_SEVERITY_ON] in + * the vector signals whether the cluster log + * is disabled or enabled. + * @param severity_size The size of the vector (NDB_MGM_EVENT_SEVERITY_ALL) + * @return Number of returned severities or -1 on error + */ + int ndb_mgm_get_clusterlog_severity_filter(NdbMgmHandle handle, + struct ndb_mgm_severity* severity, + unsigned int severity_size); + +#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED + /** + * Get clusterlog severity filter + * + * @param handle NDB management handle + * * @return A vector of seven elements, * where each element contains * 1 if a severity indicator is enabled and 0 if not. @@ -855,7 +884,8 @@ extern "C" { * whether the cluster log * is disabled or enabled. */ - const unsigned int *ndb_mgm_get_clusterlog_severity_filter(NdbMgmHandle handle); + const unsigned int *ndb_mgm_get_clusterlog_severity_filter_old(NdbMgmHandle handle); +#endif /** * Set log category and levels for the cluster log @@ -872,6 +902,23 @@ extern "C" { enum ndb_mgm_event_category category, int level, struct ndb_mgm_reply* reply); + + /** + * get log category and levels + * + * @param handle NDB management handle. + * @param loglevel A vector of twelve (MGM_LOGLEVELS) elements + * of struct ndb_mgm_loglevel, + * where each element contains + * loglevel of corresponding category + * @param loglevel_size The size of the vector (MGM_LOGLEVELS) + * @return Number of returned loglevels or -1 on error + */ + int ndb_mgm_get_clusterlog_loglevel(NdbMgmHandle handle, + struct ndb_mgm_loglevel* loglevel, + unsigned int loglevel_size); + +#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED /** * get log category and levels * @@ -880,7 +927,9 @@ extern "C" { * where each element contains * loglevel of corresponding category */ - const unsigned int *ndb_mgm_get_clusterlog_loglevel(NdbMgmHandle handle); + const unsigned int *ndb_mgm_get_clusterlog_loglevel_old(NdbMgmHandle handle); +#endif + /** @} *********************************************************************/ /** @@ -1153,10 +1202,14 @@ extern "C" { int e, struct ndb_mgm_reply* r) { return ndb_mgm_set_clusterlog_severity_filter(h,(ndb_mgm_event_severity)s, e,r); } - + struct ndb_mgm_severity { + enum ndb_mgm_event_severity category; + unsigned int value; + }; + inline - const unsigned int *ndb_mgm_get_logfilter(NdbMgmHandle h) - { return ndb_mgm_get_clusterlog_severity_filter(h); } + const unsigned int * ndb_mgm_get_logfilter(NdbMgmHandle h) + { return ndb_mgm_get_clusterlog_severity_filter_old(h); } inline int ndb_mgm_set_loglevel_clusterlog(NdbMgmHandle h, int n, @@ -1164,9 +1217,14 @@ extern "C" { int l, struct ndb_mgm_reply* r) { return ndb_mgm_set_clusterlog_loglevel(h,n,c,l,r); } + struct ndb_mgm_loglevel { + enum ndb_mgm_event_category category; + unsigned int value; + }; + inline - const unsigned int *ndb_mgm_get_loglevel_clusterlog(NdbMgmHandle h) - { return ndb_mgm_get_clusterlog_loglevel(h); } + const unsigned int * ndb_mgm_get_loglevel_clusterlog(NdbMgmHandle h) + { return ndb_mgm_get_clusterlog_loglevel_old(h); } #endif diff --git a/storage/ndb/include/mgmapi/mgmapi_config_parameters.h b/storage/ndb/include/mgmapi/mgmapi_config_parameters.h index d1feaa1a7d3..70af187db77 100644 --- a/storage/ndb/include/mgmapi/mgmapi_config_parameters.h +++ b/storage/ndb/include/mgmapi/mgmapi_config_parameters.h @@ -149,9 +149,9 @@ #define CFG_SCI_SEND_LIMIT 554 #define CFG_SCI_BUFFER_MEM 555 -#define CFG_OSE_PRIO_A_SIZE 602 -#define CFG_OSE_PRIO_B_SIZE 603 -#define CFG_OSE_RECEIVE_ARRAY_SIZE 604 +#define CFG_602 602 // Removed: was OSE +#define CFG_603 603 // Removed: was OSE +#define CFG_604 604 // Removed: was OSE /** * API Config variables @@ -178,6 +178,6 @@ #define CONNECTION_TYPE_TCP 0 #define CONNECTION_TYPE_SHM 1 #define CONNECTION_TYPE_SCI 2 -#define CONNECTION_TYPE_OSE 3 +#define CONNECTION_TYPE_OSE 3 // Removed. #endif diff --git a/storage/ndb/include/ndbapi/Ndb.hpp b/storage/ndb/include/ndbapi/Ndb.hpp index 5c6ad76c063..726a58c591d 100644 --- a/storage/ndb/include/ndbapi/Ndb.hpp +++ b/storage/ndb/include/ndbapi/Ndb.hpp @@ -501,12 +501,11 @@ There are four conditions leading to the transfer of database operations from Ndb object buffers to the NDB kernel: - -# The NDB Transporter (TCP/IP, OSE, SCI or shared memory) + -# The NDB Transporter (TCP/IP, SCI or shared memory) decides that a buffer is full and sends it off. The buffer size is implementation-dependent and may change between MySQL Cluster releases. On TCP/IP the buffer size is usually around 64 KB; - on OSE/Delta it is usually less than 2000 bytes. Since each Ndb object provides a single buffer per storage node, the notion of a "full" buffer is local to this storage node. -# The accumulation of statistical data on transferred information @@ -991,16 +990,7 @@ template <class T> struct Ndb_free_list_t; typedef void (* NdbEventCallback)(NdbEventOperation*, Ndb*, void*); -#if defined NDB_OSE -/** - * Default time to wait for response after request has been sent to - * NDB Cluster (Set to 10 seconds usually, but to 100 s in - * the OSE operating system) - */ -#define WAITFOR_RESPONSE_TIMEOUT 100000 // Milliseconds -#else #define WAITFOR_RESPONSE_TIMEOUT 120000 // Milliseconds -#endif #define NDB_SYSTEM_DATABASE "sys" #define NDB_SYSTEM_SCHEMA "def" diff --git a/storage/ndb/include/portlib/NdbMain.h b/storage/ndb/include/portlib/NdbMain.h index 7cc7a877750..b39847964bc 100644 --- a/storage/ndb/include/portlib/NdbMain.h +++ b/storage/ndb/include/portlib/NdbMain.h @@ -17,50 +17,10 @@ #ifndef NDBMAIN_H #define NDBMAIN_H -#if defined NDB_SOFTOSE || defined NDB_OSE -#include <ose.h> -#include <shell.h> - -/* Define an OSE_PROCESS that can be started from osemain.con */ -#define NDB_MAIN(name) \ -int main_ ## name(int argc, const char** argv); \ -OS_PROCESS(name){ \ - main_ ## name(0, 0); \ - stop(current_process()); \ - exit(0); \ -} \ -int main_ ## name(int argc, const char** argv) - -/* Define an function that can be started from the command line */ -#define NDB_COMMAND(name, str_name, syntax, description, stacksize) \ -int main_ ## name(int argc, const char** argv); \ - \ -static int run_ ## name(int argc, char *argv[]){ \ - return main_ ## name (argc, argv); \ -} \ - \ -OS_PROCESS(init_ ## name){ \ - shell_add_cmd_attrs(str_name, syntax, description, \ - run_ ## name, OS_PRI_PROC, 25, stacksize); \ - stop(current_process()); \ - return; \ -} \ - \ -int main_ ## name(int argc, const char** argv) - - - - -#else - #define NDB_MAIN(name) \ int main(int argc, const char** argv) #define NDB_COMMAND(name, str_name, syntax, description, stacksize) \ int main(int argc, const char** argv) - -#endif - - #endif diff --git a/storage/ndb/include/portlib/NdbMutex.h b/storage/ndb/include/portlib/NdbMutex.h index b0b985ecef5..33d04ea2927 100644 --- a/storage/ndb/include/portlib/NdbMutex.h +++ b/storage/ndb/include/portlib/NdbMutex.h @@ -28,10 +28,7 @@ extern "C" { #endif -#if defined NDB_OSE || defined NDB_SOFTOSE -#include <ose.h> -typedef SEMAPHORE NdbMutex; -#elif defined NDB_WIN32 +#if defined NDB_WIN32 typedef CRITICAL_SECTION NdbMutex; #else #include <pthread.h> diff --git a/storage/ndb/include/portlib/NdbTCP.h b/storage/ndb/include/portlib/NdbTCP.h index 9ed5b5e7f96..98431fe73cf 100644 --- a/storage/ndb/include/portlib/NdbTCP.h +++ b/storage/ndb/include/portlib/NdbTCP.h @@ -20,27 +20,7 @@ #include <ndb_global.h> #include <ndb_net.h> -#if defined NDB_OSE || defined NDB_SOFTOSE -/** - * Include files needed - */ -#include "inet.h" - -#include <netdb.h> - -#define NDB_NONBLOCK FNDELAY -#define NDB_SOCKET_TYPE int -#define NDB_INVALID_SOCKET -1 -#define _NDB_CLOSE_SOCKET(x) close(x) - -/** - * socklen_t not defined in the header files of OSE - */ -typedef int socklen_t; - -#define InetErrno (* inet_errno()) - -#elif defined NDB_WIN32 +#if defined NDB_WIN32 /** * Include files needed diff --git a/storage/ndb/include/portlib/NdbTick.h b/storage/ndb/include/portlib/NdbTick.h index 9bd8eca22bd..3f589ae8f42 100644 --- a/storage/ndb/include/portlib/NdbTick.h +++ b/storage/ndb/include/portlib/NdbTick.h @@ -23,11 +23,7 @@ extern "C" { #endif -#if defined NDB_OSE || defined NDB_SOFTOSE -typedef unsigned long NDB_TICKS; -#else typedef Uint64 NDB_TICKS; -#endif /** * Returns the current millisecond since 1970 diff --git a/storage/ndb/include/transporter/TransporterRegistry.hpp b/storage/ndb/include/transporter/TransporterRegistry.hpp index 89ae3a19e87..b82505ebdee 100644 --- a/storage/ndb/include/transporter/TransporterRegistry.hpp +++ b/storage/ndb/include/transporter/TransporterRegistry.hpp @@ -49,8 +49,8 @@ enum IOState { enum TransporterType { tt_TCP_TRANSPORTER = 1, tt_SCI_TRANSPORTER = 2, - tt_SHM_TRANSPORTER = 3, - tt_OSE_TRANSPORTER = 4 + tt_SHM_TRANSPORTER = 3 + // ID 4 was OSE Transporter which has been removed. Don't use ID 4. }; static const char *performStateString[] = @@ -63,7 +63,6 @@ class Transporter; class TCP_Transporter; class SCI_Transporter; class SHM_Transporter; -class OSE_Transporter; class TransporterRegistry; class SocketAuthenticator; @@ -89,7 +88,6 @@ public: * @brief ... */ class TransporterRegistry { - friend class OSE_Receiver; friend class SHM_Transporter; friend class Transporter; friend class TransporterService; @@ -202,7 +200,6 @@ public: bool createTCPTransporter(struct TransporterConfiguration * config); bool createSCITransporter(struct TransporterConfiguration * config); bool createSHMTransporter(struct TransporterConfiguration * config); - bool createOSETransporter(struct TransporterConfiguration * config); /** * Get free buffer space @@ -288,7 +285,6 @@ private: int nTCPTransporters; int nSCITransporters; int nSHMTransporters; - int nOSETransporters; /** * Arrays holding all transporters in the order they are created @@ -296,7 +292,6 @@ private: TCP_Transporter** theTCPTransporters; SCI_Transporter** theSCITransporters; SHM_Transporter** theSHMTransporters; - OSE_Transporter** theOSETransporters; /** * Array, indexed by nodeId, holding all transporters @@ -304,24 +299,6 @@ private: TransporterType* theTransporterTypes; Transporter** theTransporters; - /** - * OSE Receiver - */ - class OSE_Receiver * theOSEReceiver; - - /** - * In OSE you for some bizar reason needs to create a socket - * the first thing you do when using inet functions. - * - * Furthermore a process doing select has to "own" a socket - * - */ - int theOSEJunkSocketSend; - int theOSEJunkSocketRecv; -#if defined NDB_OSE || defined NDB_SOFTOSE - PROCESS theReceiverPid; -#endif - /** * State arrays, index by host id */ @@ -355,7 +332,6 @@ private: int tcpReadSelectReply; fd_set tcpReadset; - Uint32 poll_OSE(Uint32 timeOutMillis); Uint32 poll_TCP(Uint32 timeOutMillis); Uint32 poll_SCI(Uint32 timeOutMillis); Uint32 poll_SHM(Uint32 timeOutMillis); diff --git a/storage/ndb/include/util/File.hpp b/storage/ndb/include/util/File.hpp index fc71394c8c5..89c03d60cae 100644 --- a/storage/ndb/include/util/File.hpp +++ b/storage/ndb/include/util/File.hpp @@ -50,7 +50,7 @@ public: * @param f a pointer to a FILE descriptor. * @return the size of the file. */ - static long size(FILE* f); + static off_t size(FILE* f); /** * Renames a file. @@ -182,7 +182,7 @@ public: * * @return the file size. */ - long size() const; + off_t size() const; /** * Returns the filename. diff --git a/storage/ndb/include/util/OutputStream.hpp b/storage/ndb/include/util/OutputStream.hpp index 66adb549da0..723aeffb30c 100644 --- a/storage/ndb/include/util/OutputStream.hpp +++ b/storage/ndb/include/util/OutputStream.hpp @@ -53,15 +53,6 @@ public: int println(const char * fmt, ...); }; -class SoftOseOutputStream : public OutputStream { -public: - SoftOseOutputStream(); - virtual ~SoftOseOutputStream() {} - - int print(const char * fmt, ...); - int println(const char * fmt, ...); -}; - class NullOutputStream : public OutputStream { public: virtual ~NullOutputStream() {} diff --git a/storage/ndb/include/util/Vector.hpp b/storage/ndb/include/util/Vector.hpp index cd01d914741..95af0c8fbfc 100644 --- a/storage/ndb/include/util/Vector.hpp +++ b/storage/ndb/include/util/Vector.hpp @@ -96,6 +96,8 @@ void Vector<T>::push_back(const T & t){ if(m_size == m_arraySize){ T * tmp = new T [m_arraySize + m_incSize]; + if(!tmp) + abort(); for (unsigned k = 0; k < m_size; k++) tmp[k] = m_items[k]; delete[] m_items; diff --git a/storage/ndb/include/util/ndb_opts.h b/storage/ndb/include/util/ndb_opts.h index 053cc8613f7..181d0fe3c26 100644 --- a/storage/ndb/include/util/ndb_opts.h +++ b/storage/ndb/include/util/ndb_opts.h @@ -42,7 +42,7 @@ const char *opt_debug= 0; #endif #define OPT_NDB_CONNECTSTRING 'c' -#if defined VM_TRACE && ( ! ( defined NDB_OSE || defined NDB_SOFTOSE) ) +#if defined VM_TRACE #define OPT_WANT_CORE_DEFAULT 1 #else #define OPT_WANT_CORE_DEFAULT 0 diff --git a/storage/ndb/src/common/debugger/EventLogger.cpp b/storage/ndb/src/common/debugger/EventLogger.cpp index ca1d2381693..45df07fefaf 100644 --- a/storage/ndb/src/common/debugger/EventLogger.cpp +++ b/storage/ndb/src/common/debugger/EventLogger.cpp @@ -1003,6 +1003,7 @@ EventLogger::log(int eventType, const Uint32* theData, NodeId nodeId, Logger::LoggerLevel severity = Logger::LL_WARNING; LogLevel::EventCategory cat= LogLevel::llInvalid; EventTextFunction textF; + char log_text[MAX_TEXT_LENGTH]; DBUG_ENTER("EventLogger::log"); DBUG_PRINT("enter",("eventType=%d, nodeid=%d", eventType, nodeId)); @@ -1016,29 +1017,29 @@ EventLogger::log(int eventType, const Uint32* theData, NodeId nodeId, DBUG_PRINT("info",("m_logLevel.getLogLevel=%d", m_logLevel.getLogLevel(cat))); if (threshold <= set){ - getText(m_text,sizeof(m_text),textF,theData,nodeId); + getText(log_text,sizeof(log_text),textF,theData,nodeId); switch (severity){ case Logger::LL_ALERT: - alert(m_text); + alert(log_text); break; case Logger::LL_CRITICAL: - critical(m_text); + critical(log_text); break; case Logger::LL_WARNING: - warning(m_text); + warning(log_text); break; case Logger::LL_ERROR: - error(m_text); + error(log_text); break; case Logger::LL_INFO: - info(m_text); + info(log_text); break; case Logger::LL_DEBUG: - debug(m_text); + debug(log_text); break; default: - info(m_text); + info(log_text); break; } } // if (.. @@ -1056,7 +1057,3 @@ EventLogger::setFilterLevel(int filterLevel) { m_filterLevel = filterLevel; } - -// -// PRIVATE -// diff --git a/storage/ndb/src/common/logger/FileLogHandler.cpp b/storage/ndb/src/common/logger/FileLogHandler.cpp index b8859630406..cf757064aa0 100644 --- a/storage/ndb/src/common/logger/FileLogHandler.cpp +++ b/storage/ndb/src/common/logger/FileLogHandler.cpp @@ -125,8 +125,6 @@ FileLogHandler::writeFooter() } callCount++; - // Needed on Cello since writes to the flash disk does not happen until - // we flush and fsync. m_pLogFile->flush(); } diff --git a/storage/ndb/src/common/logger/Logger.cpp b/storage/ndb/src/common/logger/Logger.cpp index 48e084a782b..94f949eefd3 100644 --- a/storage/ndb/src/common/logger/Logger.cpp +++ b/storage/ndb/src/common/logger/Logger.cpp @@ -23,7 +23,7 @@ #include <FileLogHandler.hpp> #include "LogHandlerList.hpp" -#if !defined NDB_OSE || !defined NDB_SOFTOSE || !defined NDB_WIN32 +#if !defined NDB_WIN32 #include <SysLogHandler.hpp> #endif @@ -46,6 +46,8 @@ Logger::Logger() : m_pSyslogHandler(NULL) { m_pHandlerList = new LogHandlerList(); + m_mutex= NdbMutex_Create(); + m_handler_mutex= NdbMutex_Create(); disable(LL_ALL); enable(LL_ON); enable(LL_INFO); @@ -53,20 +55,25 @@ Logger::Logger() : Logger::~Logger() { - removeAllHandlers(); + removeAllHandlers(); delete m_pHandlerList; + NdbMutex_Destroy(m_handler_mutex); + NdbMutex_Destroy(m_mutex); } void Logger::setCategory(const char* pCategory) { + Guard g(m_mutex); m_pCategory = pCategory; } bool Logger::createConsoleHandler() { + Guard g(m_handler_mutex); bool rc = true; + if (m_pConsoleHandler == NULL) { m_pConsoleHandler = new ConsoleLogHandler(); @@ -84,6 +91,7 @@ Logger::createConsoleHandler() void Logger::removeConsoleHandler() { + Guard g(m_handler_mutex); if (removeHandler(m_pConsoleHandler)) { m_pConsoleHandler = NULL; @@ -93,6 +101,7 @@ Logger::removeConsoleHandler() bool Logger::createFileHandler() { + Guard g(m_handler_mutex); bool rc = true; if (m_pFileHandler == NULL) { @@ -111,6 +120,7 @@ Logger::createFileHandler() void Logger::removeFileHandler() { + Guard g(m_handler_mutex); if (removeHandler(m_pFileHandler)) { m_pFileHandler = NULL; @@ -120,10 +130,11 @@ Logger::removeFileHandler() bool Logger::createSyslogHandler() { + Guard g(m_handler_mutex); bool rc = true; if (m_pSyslogHandler == NULL) { -#if defined NDB_OSE || defined NDB_SOFTOSE || defined NDB_WIN32 +#if defined NDB_WIN32 m_pSyslogHandler = new ConsoleLogHandler(); #else m_pSyslogHandler = new SysLogHandler(); @@ -142,6 +153,7 @@ Logger::createSyslogHandler() void Logger::removeSyslogHandler() { + Guard g(m_handler_mutex); if (removeHandler(m_pSyslogHandler)) { m_pSyslogHandler = NULL; @@ -151,6 +163,7 @@ Logger::removeSyslogHandler() bool Logger::addHandler(LogHandler* pHandler) { + Guard g(m_mutex); assert(pHandler != NULL); bool rc = pHandler->open(); @@ -224,6 +237,7 @@ Logger::addHandler(const BaseString &logstring, int *err, int len, char* errStr) bool Logger::removeHandler(LogHandler* pHandler) { + Guard g(m_mutex); int rc = false; if (pHandler != NULL) { @@ -236,12 +250,14 @@ Logger::removeHandler(LogHandler* pHandler) void Logger::removeAllHandlers() { + Guard g(m_mutex); m_pHandlerList->removeAll(); } bool Logger::isEnable(LoggerLevel logLevel) const { + Guard g(m_mutex); if (logLevel == LL_ALL) { for (unsigned i = 1; i < MAX_LOG_LEVELS; i++) @@ -255,6 +271,7 @@ Logger::isEnable(LoggerLevel logLevel) const void Logger::enable(LoggerLevel logLevel) { + Guard g(m_mutex); if (logLevel == LL_ALL) { for (unsigned i = 0; i < MAX_LOG_LEVELS; i++) @@ -271,6 +288,7 @@ Logger::enable(LoggerLevel logLevel) void Logger::enable(LoggerLevel fromLogLevel, LoggerLevel toLogLevel) { + Guard g(m_mutex); if (fromLogLevel > toLogLevel) { LoggerLevel tmp = toLogLevel; @@ -287,6 +305,7 @@ Logger::enable(LoggerLevel fromLogLevel, LoggerLevel toLogLevel) void Logger::disable(LoggerLevel logLevel) { + Guard g(m_mutex); if (logLevel == LL_ALL) { for (unsigned i = 0; i < MAX_LOG_LEVELS; i++) @@ -359,6 +378,7 @@ Logger::debug(const char* pMsg, ...) const void Logger::log(LoggerLevel logLevel, const char* pMsg, va_list ap) const { + Guard g(m_mutex); if (m_logLevels[LL_ON] && m_logLevels[logLevel]) { char buf[MAX_LOG_MESSAGE_SIZE]; diff --git a/storage/ndb/src/common/logger/loggertest/LoggerUnitTest.cpp b/storage/ndb/src/common/logger/loggertest/LoggerUnitTest.cpp index 990d2e0eada..9c476d67497 100644 --- a/storage/ndb/src/common/logger/loggertest/LoggerUnitTest.cpp +++ b/storage/ndb/src/common/logger/loggertest/LoggerUnitTest.cpp @@ -20,9 +20,7 @@ #include <ConsoleLogHandler.hpp> #include <FileLogHandler.hpp> -#if !defined NDB_OSE || !defined NDB_SOFTOSE #include <SysLogHandler.hpp> -#endif #include <NdbOut.hpp> #include <NdbMain.h> @@ -53,11 +51,7 @@ NDB_COMMAND(loggertest, "loggertest", "loggertest -console | -file", { if (argc < 2) { -#if defined NDB_OSE || defined NDB_SOFTOSE - ndbout << "Usage: loggertest -console | -file" << endl; -#else ndbout << "Usage: loggertest -console | -file | -syslog" << endl; -#endif return 0; } @@ -70,12 +64,10 @@ NDB_COMMAND(loggertest, "loggertest", "loggertest -console | -file", logger.createFileHandler(); //logger.addHandler(new FileLogHandler(argv[2])); } -#if !defined NDB_OSE || !defined NDB_SOFTOSE else if (strcmp(argv[1], "-syslog") == 0) { logger.createSyslogHandler(); } -#endif logger.disable(Logger::LL_ALL); @@ -101,8 +93,8 @@ NDB_COMMAND(loggertest, "loggertest", "loggertest -console | -file", ndbout << endl << "-- " << testCount - testFailed << " passed, " << testFailed << " failed --" << endl; - logger.removeAllHandlers(); // Need to remove all for OSE, - // because logger is global + logger.removeAllHandlers(); + return 0; } diff --git a/storage/ndb/src/common/mgmcommon/IPCConfig.cpp b/storage/ndb/src/common/mgmcommon/IPCConfig.cpp index bc442ffc3ef..eae622acbc9 100644 --- a/storage/ndb/src/common/mgmcommon/IPCConfig.cpp +++ b/storage/ndb/src/common/mgmcommon/IPCConfig.cpp @@ -358,18 +358,6 @@ IPCConfig::configureTransporters(Uint32 nodeId, "maxReceiveSize = %d", conf.tcp.sendBufferSize, conf.tcp.maxReceiveSize)); break; - case CONNECTION_TYPE_OSE: - if(iter.get(CFG_OSE_PRIO_A_SIZE, &conf.ose.prioASignalSize)) break; - if(iter.get(CFG_OSE_PRIO_B_SIZE, &conf.ose.prioBSignalSize)) break; - - if(!tr.createOSETransporter(&conf)){ - ndbout << "Failed to create OSE Transporter from: " - << nodeId << " to: " << remoteNodeId << endl; - } else { - noOfTransportersCreated++; - } - break; - default: ndbout << "Unknown transporter type from: " << nodeId << " to: " << remoteNodeId << endl; diff --git a/storage/ndb/src/common/portlib/NdbPortLibTest.cpp b/storage/ndb/src/common/portlib/NdbPortLibTest.cpp index d7892411851..0ace8d7a3bb 100644 --- a/storage/ndb/src/common/portlib/NdbPortLibTest.cpp +++ b/storage/ndb/src/common/portlib/NdbPortLibTest.cpp @@ -474,11 +474,6 @@ NDB_COMMAND(PortLibTest, "portlibtest", "portlibtest", "Test the portable functi testMicros(iter); ndbout << "Testing microsecond timer - COMPLETED" << endl; -#if defined NDB_OSE || defined NDB_SOFTOSE - ndbout << "system_tick() = " << system_tick() << " us per tick" << endl; -#endif - - ndbout << "= TEST10 ===============================" << endl; testmutex = NdbMutex_Create(); diff --git a/storage/ndb/src/common/portlib/mmslist.cpp b/storage/ndb/src/common/portlib/mmslist.cpp deleted file mode 100644 index 05538785293..00000000000 --- a/storage/ndb/src/common/portlib/mmslist.cpp +++ /dev/null @@ -1,103 +0,0 @@ -/* Copyright (C) 2003 MySQL AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - -#include <ndb_common.h> - -#include <NdbOut.hpp> -#include <NdbMain.h> - -#include <ose.h> -#include <mms.sig> -#include <mms_err.h> -#include <NdbOut.hpp> - -/** - * NOTE: To use NdbMem from a OSE system ose_mms has to be defined - * as a "Required External Process"(see OSE Kernel User's Guide/R1.1(p. 148)), - * like this: - * EXT_PROC(ose_mms, ose_mms, 50000) - * This will create a global variable ose_mms_ that is used from here. - */ - -union SIGNAL -{ - SIGSELECT sigNo; - struct MmsListDomainRequest mmsListDomainRequest; - struct MmsListDomainReply mmsListDomainReply; -}; /* union SIGNAL */ - -extern PROCESS ose_mms_; - -struct ARegion -{ - unsigned long int address; - unsigned long int size; - char name[32]; - - U32 resident; /* Boolean, nonzero if resident. */ - U32 access; /* See values for AccessType (above) .*/ - U32 type; /* either RAM-mem (1) or Io-mem (2) */ - U32 cache; /* 0-copyback,1-writethrough, 2-CacheInhibit.*/ -}; - -NDB_COMMAND(mmslist, "mmslist", "mmslist", "LIst the MMS memory segments", 4096){ - if (argc == 1){ - - static SIGSELECT allocate_sig[] = {1,MMS_LIST_DOMAIN_REPLY}; - union SIGNAL *sig; - - /* Send request to list all segments and regions. */ - sig = alloc(sizeof(struct MmsListDomainRequest), - MMS_LIST_DOMAIN_REQUEST); - send(&sig, ose_mms_); - - while (true){ - sig = receive(allocate_sig); - if (sig != NIL){ - if (sig->mmsListDomainReply.status == MMS_SUCCESS){ - /* Print domain info */ - ndbout << "=================================" << endl; - ndbout << "domain: " << sig->mmsListDomainReply.domain << endl; - ndbout << "name : " << sig->mmsListDomainReply.name << endl; - ndbout << "used : " << sig->mmsListDomainReply.used << endl; - ndbout << "lock : " << sig->mmsListDomainReply.lock << endl; - ndbout << "numOfRegions:" << sig->mmsListDomainReply.numOfRegions << endl; - struct ARegion * tmp = (struct ARegion*)&sig->mmsListDomainReply.regions[0]; - for (int i = 0; i < sig->mmsListDomainReply.numOfRegions && i < 256; i++){ - ndbout << i << ": adress=" << tmp->address << - ", size=" << tmp->size << - ", name=" << tmp->name << - ", resident=" << tmp->resident << - ", access=" << tmp->access << - ", type=" << tmp->type << - ", cache=" << tmp->cache << endl; - tmp++; - } - - free_buf(&sig); - }else{ - free_buf(&sig); - break; - } - } - - } - - }else{ - ndbout << "Usage: mmslist" << endl; - } - return NULL; -} diff --git a/storage/ndb/src/common/transporter/OSE_Receiver.cpp b/storage/ndb/src/common/transporter/OSE_Receiver.cpp deleted file mode 100644 index 63a33fc8f24..00000000000 --- a/storage/ndb/src/common/transporter/OSE_Receiver.cpp +++ /dev/null @@ -1,359 +0,0 @@ -/* Copyright (C) 2003 MySQL AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - -#include <NdbOut.hpp> -#include "OSE_Receiver.hpp" -#include "OSE_Transporter.hpp" -#include "TransporterCallback.hpp" -#include <TransporterRegistry.hpp> -#include "TransporterInternalDefinitions.hpp" - -OSE_Receiver::OSE_Receiver(TransporterRegistry * tr, - int _recBufSize, - NodeId _localNodeId) { - theTransporterRegistry = tr; - - recBufSize = _recBufSize; - recBufReadIndex = 0; - recBufWriteIndex = 0; - receiveBuffer = new union SIGNAL * [recBufSize]; - - waitStackCount = 0; - waitStackSize = _recBufSize; - waitStack = new union SIGNAL * [waitStackSize]; - - nextSigId = new Uint32[MAX_NTRANSPORTERS]; - for (int i = 0; i < MAX_NTRANSPORTERS; i++) - nextSigId[i] = 0; - - phantomCreated = false; - localNodeId = _localNodeId; - BaseString::snprintf(localHostName, sizeof(localHostName), - "ndb_node%d", localNodeId); - - DEBUG("localNodeId = " << localNodeId << " -> localHostName = " - << localHostName); -} - -OSE_Receiver::~OSE_Receiver(){ - while(recBufReadIndex != recBufWriteIndex){ - free_buf(&receiveBuffer[recBufReadIndex]); - recBufReadIndex = (recBufReadIndex + 1) % recBufSize; - } - delete [] receiveBuffer; - destroyPhantom(); -} - -PROCESS -OSE_Receiver::createPhantom(){ - redir.sig = 1; - redir.pid = current_process(); - - if(!phantomCreated){ - phantomPid = create_process - (OS_PHANTOM, // Type - localHostName, // Name - NULL, // Entry point - 0, // Stack size - 0, // Prio - Not used - (OSTIME)0, // Timeslice - Not used - 0, // Block - current block - &redir, - (OSVECTOR)0, // vector - (OSUSER)0); // user - phantomCreated = true; - DEBUG("Created phantom pid: " << hex << phantomPid); - } - return phantomPid; -} - -void -OSE_Receiver::destroyPhantom(){ - if(phantomCreated){ - DEBUG("Destroying phantom pid: " << hex << phantomPid); - kill_proc(phantomPid); - phantomCreated = false; - } -} - -static SIGSELECT PRIO_A_SIGNALS[] = { 6, - NDB_TRANSPORTER_PRIO_A, - NDB_TRANSPORTER_HUNT, - NDB_TRANSPORTER_CONNECT_REQ, - NDB_TRANSPORTER_CONNECT_REF, - NDB_TRANSPORTER_CONNECT_CONF, - NDB_TRANSPORTER_DISCONNECT_ORD -}; - -static SIGSELECT PRIO_B_SIGNALS[] = { 1, - NDB_TRANSPORTER_DATA -}; - -/** - * Check waitstack for signals that are next in sequence - * Put any found signal in receive buffer - * Returns true if one signal is found - */ -bool -OSE_Receiver::checkWaitStack(NodeId _nodeId){ - - for(int i = 0; i < waitStackCount; i++){ - if (waitStack[i]->dataSignal.senderNodeId == _nodeId && - waitStack[i]->dataSignal.sigId == nextSigId[_nodeId]){ - - ndbout_c("INFO: signal popped from waitStack, sigId = %d", - waitStack[i]->dataSignal.sigId); - - if(isFull()){ - ndbout_c("ERROR: receiveBuffer is full"); - reportError(callbackObj, _nodeId, TE_RECEIVE_BUFFER_FULL); - return false; - } - - // The next signal was found, put it in the receive buffer - insertReceiveBuffer(waitStack[i]); - - // Increase sequence id, set it to the next expected id - nextSigId[_nodeId]++; - - // Move signals below up one step - for(int j = i; j < waitStackCount-1; j++) - waitStack[j] = waitStack[j+1]; - waitStack[waitStackCount] = NULL; - waitStackCount--; - - // return true since signal was found - return true; - } - } - return false; -} - -/** - * Clear waitstack for signals from node with _nodeId - */ -void -OSE_Receiver::clearWaitStack(NodeId _nodeId){ - - for(int i = 0; i < waitStackCount; i++){ - if (waitStack[i]->dataSignal.senderNodeId == _nodeId){ - - // Free signal buffer - free_buf(&waitStack[i]); - - // Move signals below up one step - for(int j = i; j < waitStackCount-1; j++) - waitStack[j] = waitStack[j+1]; - waitStack[waitStackCount] = NULL; - waitStackCount--; - } - } - nextSigId[_nodeId] = 0; -} - - -inline -void -OSE_Receiver::insertWaitStack(union SIGNAL* _sig){ - if (waitStackCount <= waitStackSize){ - waitStack[waitStackCount] = _sig; - waitStackCount++; - } else { - ndbout_c("ERROR: waitStack is full"); - reportError(callbackObj, localNodeId, TE_WAIT_STACK_FULL); - } -} - -bool -OSE_Receiver::doReceive(Uint32 timeOutMillis) { - if(isFull()) - return false; - - union SIGNAL * sig = receive_w_tmo(0, - PRIO_A_SIGNALS); - if(sig == NIL){ - sig = receive_w_tmo(timeOutMillis, - PRIO_B_SIGNALS); - if(sig == NIL) - return false; - } - - DEBUG("Received signal: " << sig->sigNo << " " - << sigNo2String(sig->sigNo)); - - switch(sig->sigNo){ - case NDB_TRANSPORTER_PRIO_A: - { - OSE_Transporter * t = getTransporter(sig->dataSignal.senderNodeId); - if (t != 0 && t->isConnected()){ - insertReceiveBuffer(sig); - } else { - free_buf(&sig); - } - } - break; - case NDB_TRANSPORTER_DATA: - { - OSE_Transporter * t = getTransporter(sig->dataSignal.senderNodeId); - if (t != 0 && t->isConnected()){ - int nodeId = sig->dataSignal.senderNodeId; - Uint32 currSigId = sig->dataSignal.sigId; - - /** - * Check if signal is the next in sequence - * nextSigId is always set to the next sigId to wait for - */ - if (nextSigId[nodeId] == currSigId){ - - // Insert in receive buffer - insertReceiveBuffer(sig); - - // Increase sequence id, set it to the next expected id - nextSigId[nodeId]++; - - // Check if there are any signal in the wait stack - if (waitStackCount > 0){ - while(checkWaitStack(nodeId)); - } - } else { - // Signal was not received in correct order - // Check values and put it in the waitStack - ndbout_c("WARNING: sigId out of order," - " currSigId = %d, nextSigId = %d", - currSigId, nextSigId[nodeId]); - - if (currSigId < nextSigId[nodeId]){ - // Current recieved sigId was smaller than nextSigId - // There is no use to put it in the waitStack - ndbout_c("ERROR: recieved sigId was smaller than nextSigId"); - reportError(callbackObj, nodeId, TE_TOO_SMALL_SIGID); - return false; - } - - if (currSigId > (nextSigId[nodeId] + waitStackSize)){ - // Current sigId was larger than nextSigId + size of waitStack - // we can never "save" so many signal's on the stack - ndbout_c("ERROR: currSigId > (nextSigId + size of waitStack)"); - reportError(callbackObj, nodeId, TE_TOO_LARGE_SIGID); - return false; - } - - // Insert in wait stack - insertWaitStack(sig); - } - } else { - free_buf(&sig); - } - } - break; - case NDB_TRANSPORTER_HUNT: - { - NdbTransporterHunt * s = (NdbTransporterHunt*)sig; - OSE_Transporter * t = getTransporter(s->remoteNodeId); - if(t != 0) - t->huntReceived(s); - free_buf(&sig); - } - break; - case NDB_TRANSPORTER_CONNECT_REQ: - { - NdbTransporterConnectReq * s = (NdbTransporterConnectReq*)sig; - OSE_Transporter * t = getTransporter(s->senderNodeId); - if(t != 0){ - if(t->connectReq(s)){ - clearWaitStack(s->senderNodeId); - clearRecvBuffer(s->senderNodeId); - } - } - free_buf(&sig); - } - break; - case NDB_TRANSPORTER_CONNECT_REF: - { - NdbTransporterConnectRef * s = (NdbTransporterConnectRef*)sig; - OSE_Transporter * t = getTransporter(s->senderNodeId); - if(t != 0){ - if(t->connectRef(s)){ - clearWaitStack(s->senderNodeId); - clearRecvBuffer(s->senderNodeId); - } - } - free_buf(&sig); - } - break; - case NDB_TRANSPORTER_CONNECT_CONF: - { - NdbTransporterConnectConf * s = (NdbTransporterConnectConf*)sig; - OSE_Transporter * t = getTransporter(s->senderNodeId); - if(t != 0){ - if(t->connectConf(s)){ - clearWaitStack(s->senderNodeId); - clearRecvBuffer(s->senderNodeId); - } - } - free_buf(&sig); - } - break; - case NDB_TRANSPORTER_DISCONNECT_ORD: - { - NdbTransporterDisconnectOrd * s = (NdbTransporterDisconnectOrd*)sig; - OSE_Transporter * t = getTransporter(s->senderNodeId); - if(t != 0){ - if(t->disconnectOrd(s)){ - clearWaitStack(s->senderNodeId); - clearRecvBuffer(s->senderNodeId); - } - } - free_buf(&sig); - } - } - return true; -} - -OSE_Transporter * -OSE_Receiver::getTransporter(NodeId nodeId){ - if(theTransporterRegistry->theTransporterTypes[nodeId] != tt_OSE_TRANSPORTER) - return 0; - return (OSE_Transporter *) - theTransporterRegistry->theTransporters[nodeId]; -} - -void -OSE_Receiver::clearRecvBuffer(NodeId nodeId){ - int tmpIndex = 0; - union SIGNAL** tmp = new union SIGNAL * [recBufSize]; - - /** - * Put all signal that I want to keep into tmp - */ - while(recBufReadIndex != recBufWriteIndex){ - if(receiveBuffer[recBufReadIndex]->dataSignal.senderNodeId != nodeId){ - tmp[tmpIndex] = receiveBuffer[recBufReadIndex]; - tmpIndex++; - } else { - free_buf(&receiveBuffer[recBufReadIndex]); - } - recBufReadIndex = (recBufReadIndex + 1) % recBufSize; - } - - /** - * Put all signals that I kept back into receiveBuffer - */ - for(int i = 0; i<tmpIndex; i++) - insertReceiveBuffer(tmp[i]); - - delete [] tmp; -} diff --git a/storage/ndb/src/common/transporter/OSE_Receiver.hpp b/storage/ndb/src/common/transporter/OSE_Receiver.hpp deleted file mode 100644 index 1812ab51065..00000000000 --- a/storage/ndb/src/common/transporter/OSE_Receiver.hpp +++ /dev/null @@ -1,119 +0,0 @@ -/* Copyright (C) 2003 MySQL AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - -#ifndef OSE_RECEIVER_HPP -#define OSE_RECEIVER_HPP - -#include "ose.h" -#include "OSE_Signals.hpp" -#include <kernel_types.h> - -class OSE_Receiver { -public: - OSE_Receiver(class TransporterRegistry *, - int recBufSize, - NodeId localNodeId); - - ~OSE_Receiver(); - - bool hasData() const ; - bool isFull() const ; - - Uint32 getReceiveData(NodeId * remoteNodeId, - Uint32 ** readPtr); - - void updateReceiveDataPtr(Uint32 szRead); - - bool doReceive(Uint32 timeOutMillis); - - PROCESS createPhantom(); - void destroyPhantom(); - -private: - class TransporterRegistry * theTransporterRegistry; - - NodeId localNodeId; - char localHostName[255]; - - bool phantomCreated; - PROCESS phantomPid; - struct OS_redir_entry redir; - - int recBufReadIndex; - int recBufWriteIndex; - int recBufSize; - union SIGNAL **receiveBuffer; - - // Stack for signals that are received out of order - int waitStackCount; - int waitStackSize; - union SIGNAL** waitStack; - - // Counters for the next signal id - Uint32* nextSigId; - - class OSE_Transporter * getTransporter(NodeId nodeId); - - void insertReceiveBuffer(union SIGNAL * _sig); - void clearRecvBuffer(NodeId _nodeId); - bool checkWaitStack(NodeId _nodeId); - void clearWaitStack(NodeId _nodeId); - void insertWaitStack(union SIGNAL* _sig); -}; - -inline -bool -OSE_Receiver::hasData () const { - return recBufReadIndex != recBufWriteIndex; -} - -inline -bool -OSE_Receiver::isFull () const { - return ((recBufWriteIndex + 1) % recBufSize) == recBufWriteIndex; -} - -inline -Uint32 -OSE_Receiver::getReceiveData(NodeId * remoteNodeId, - Uint32 ** readPtr){ - NdbTransporterData *s = (NdbTransporterData *)receiveBuffer[recBufReadIndex]; - if(recBufReadIndex != recBufWriteIndex){ - * remoteNodeId = s->senderNodeId; - * readPtr = &s->data[0]; - return s->length; - } - return 0; -} - -inline -void -OSE_Receiver::updateReceiveDataPtr(Uint32 bytesRead){ - if(bytesRead != 0){ - free_buf(&receiveBuffer[recBufReadIndex]); - recBufReadIndex = (recBufReadIndex + 1) % recBufSize; - } -} - -inline -void -OSE_Receiver::insertReceiveBuffer(union SIGNAL * _sig){ - receiveBuffer[recBufWriteIndex] = _sig; - recBufWriteIndex = (recBufWriteIndex + 1) % recBufSize; -} - - -#endif diff --git a/storage/ndb/src/common/transporter/OSE_Signals.hpp b/storage/ndb/src/common/transporter/OSE_Signals.hpp deleted file mode 100644 index 3f6cc07b473..00000000000 --- a/storage/ndb/src/common/transporter/OSE_Signals.hpp +++ /dev/null @@ -1,144 +0,0 @@ -/* Copyright (C) 2003 MySQL AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - -#ifndef OSE_SIGNALS_HPP -#define OSE_SIGNALS_HPP - -#include <ose.h> -#include <kernel_types.h> - -#define NDB_TRANSPORTER_SIGBASE 3000 - -#define NDB_TRANSPORTER_DATA (NDB_TRANSPORTER_SIGBASE + 1) /* !-SIGNO(struct NdbTransporterData)-! */ -#define NDB_TRANSPORTER_HUNT (NDB_TRANSPORTER_SIGBASE + 2) /* !-SIGNO(struct NdbTransporterHunt)-! */ -#define NDB_TRANSPORTER_CONNECT_REQ (NDB_TRANSPORTER_SIGBASE + 3) /* !-SIGNO(struct NdbTransporterConnectReq)-! */ -#define NDB_TRANSPORTER_CONNECT_REF (NDB_TRANSPORTER_SIGBASE + 4) /* !-SIGNO(struct NdbTransporterConnectRef)-! */ -#define NDB_TRANSPORTER_CONNECT_CONF (NDB_TRANSPORTER_SIGBASE + 5) /* !-SIGNO(struct NdbTransporterConnectConf)-! */ -#define NDB_TRANSPORTER_DISCONNECT_ORD (NDB_TRANSPORTER_SIGBASE + 6) /* !-SIGNO(struct NdbTransporterDisconnectOrd)-! */ -#define NDB_TRANSPORTER_PRIO_A (NDB_TRANSPORTER_SIGBASE + 7) - -inline -const char * -sigNo2String(SIGSELECT sigNo){ - switch(sigNo){ - case NDB_TRANSPORTER_PRIO_A: - return "PRIO_A_DATA"; - break; - case NDB_TRANSPORTER_DATA: - return "PRIO_B_DATA"; - break; - case NDB_TRANSPORTER_HUNT: - return "HUNT"; - break; - case NDB_TRANSPORTER_CONNECT_REQ: - return "CONNECT_REQ"; - break; - case NDB_TRANSPORTER_CONNECT_REF: - return "CONNECT_REF"; - break; - case NDB_TRANSPORTER_CONNECT_CONF: - return "CONNECT_CONF"; - break; - case NDB_TRANSPORTER_DISCONNECT_ORD: - return "DISCONNECT_ORD"; - break; - } - return "UNKNOWN"; -} - -struct NdbTransporterData -{ - SIGSELECT sigNo; - Uint32 sigId; // Sequence number for this signal - Uint32 senderNodeId; - Uint32 length; - Uint32 data[1]; -}; - -struct NdbTransporterData_PrioA -{ - SIGSELECT sigNo; - Uint32 sigId; // Sequence number for this signal - Uint32 senderNodeId; - Uint32 length; - Uint32 data[1]; -}; - -struct NdbTransporterHunt -{ - SIGSELECT sigNo; - NodeId remoteNodeId; -}; - - -struct NdbTransporterConnectReq -{ - SIGSELECT sigNo; - NodeId remoteNodeId; - NodeId senderNodeId; -}; - - -struct NdbTransporterConnectConf -{ - SIGSELECT sigNo; - NodeId remoteNodeId; - NodeId senderNodeId; -}; - -struct NdbTransporterConnectRef -{ - SIGSELECT sigNo; - NodeId remoteNodeId; - NodeId senderNodeId; - Uint32 reason; - - /** - * Node is not accepting connections - */ - static const Uint32 INVALID_STATE = 1; -}; - -struct NdbTransporterDisconnectOrd -{ - SIGSELECT sigNo; - NodeId senderNodeId; - Uint32 reason; - - /** - * Process died - */ - static const Uint32 PROCESS_DIED = 1; - - /** - * Ndb disconnected - */ - static const Uint32 NDB_DISCONNECT = 2; -}; - -union SIGNAL -{ - SIGSELECT sigNo; - struct NdbTransporterData dataSignal; - struct NdbTransporterData prioAData; - struct NdbTransporterHunt ndbHunt; - struct NdbTransporterConnectReq ndbConnectReq; - struct NdbTransporterConnectRef ndbConnectRef; - struct NdbTransporterConnectConf ndbConnectConf; - struct NdbTransporterDisconnectOrd ndbDisconnect; -}; - -#endif diff --git a/storage/ndb/src/common/transporter/OSE_Transporter.cpp b/storage/ndb/src/common/transporter/OSE_Transporter.cpp deleted file mode 100644 index ad67791fc0c..00000000000 --- a/storage/ndb/src/common/transporter/OSE_Transporter.cpp +++ /dev/null @@ -1,487 +0,0 @@ -/* Copyright (C) 2003 MySQL AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - -#include <ose.h> -#include "OSE_Transporter.hpp" -#include "OSE_Signals.hpp" - -#include <TransporterCallback.hpp> -#include "TransporterInternalDefinitions.hpp" - -#include <NdbMutex.h> - -#include <NdbHost.h> -#include <NdbOut.hpp> -#include <time.h> - -OSE_Transporter::OSE_Transporter(int _prioASignalSize, - int _prioBSignalSize, - NodeId localNodeId, - const char * lHostName, - NodeId remoteNodeId, - NodeId serverNodeId, - const char * rHostName, - int byteorder, - bool compression, - bool checksum, - bool signalId, - Uint32 reportFreq) : - Transporter(localNodeId, - remoteNodeId, - serverNodeId, - byteorder, - compression, - checksum, - signalId), - isServer(localNodeId < remoteNodeId) -{ - - signalIdCounter = 0; - prioBSignalSize = _prioBSignalSize; - - if (strcmp(lHostName, rHostName) == 0){ - BaseString::snprintf(remoteNodeName, sizeof(remoteNodeName), - "ndb_node%d", remoteNodeId); - } else { - BaseString::snprintf(remoteNodeName, sizeof(remoteNodeName), - "%s/ndb_node%d", rHostName, remoteNodeId); - } - - prioBSignal = NIL; -} - -OSE_Transporter::~OSE_Transporter() { - -#if 0 - /** - * Don't free these buffers since they have already been freed - * when the process allocating them died (wild pointers) - */ - if(prioBSignal != NIL) - free_buf(&prioBSignal); -#endif -} - -bool -OSE_Transporter::initTransporter() { - - struct OS_pcb * pcb = get_pcb(current_process()); - if(pcb != NULL){ - if(pcb->type != OS_ILLEGAL){ - if(prioBSignalSize > pcb->max_sigsize){ - DEBUG("prioBSignalSize(" << prioBSignalSize << ") > max_sigsize(" - << pcb->max_sigsize << ") using max_sigsize"); - prioBSignalSize = pcb->max_sigsize; - } - } - free_buf((union SIGNAL **)&pcb); - } - - maxPrioBDataSize = prioBSignalSize; - maxPrioBDataSize -= (sizeof(NdbTransporterData) + MAX_MESSAGE_SIZE - 4); - - if(maxPrioBDataSize < 0){ - -#ifdef DEBUG_TRANSPORTER - printf("maxPrioBDataSize < 0 %d\n", - maxPrioBDataSize); -#endif - return false; - } - - initSignals(); - - return true; -} - -void -OSE_Transporter::initSignals(){ - if(prioBSignal == NIL){ - prioBSignal = alloc(prioBSignalSize, NDB_TRANSPORTER_DATA); - prioBInsertPtr = &prioBSignal->dataSignal.data[0]; - - prioBSignal->dataSignal.length = 0; - prioBSignal->dataSignal.senderNodeId = localNodeId; - } - dataToSend = 0; -} - -NdbTransporterData * -OSE_Transporter::allocPrioASignal(Uint32 messageLenBytes) const -{ - - const Uint32 lenBytes = messageLenBytes + sizeof(NdbTransporterData) - 4; - - NdbTransporterData * sig = - (NdbTransporterData*)alloc(lenBytes, NDB_TRANSPORTER_PRIO_A); - - sig->length = 0; - sig->senderNodeId = localNodeId; - - return sig; -} - -Uint32 * -OSE_Transporter::getWritePtr(Uint32 lenBytes, Uint32 prio){ - if(prio >= 1){ - prio = 1; - insertPtr = prioBInsertPtr; - signal = (NdbTransporterData*)prioBSignal; - } else { - signal = allocPrioASignal(lenBytes); - insertPtr = &signal->data[0]; - } - return insertPtr; -} - -void -OSE_Transporter::updateWritePtr(Uint32 lenBytes, Uint32 prio){ - - Uint32 bufferSize = signal->length; - bufferSize += lenBytes; - signal->length = bufferSize; - if(prio >= 1){ - prioBInsertPtr += (lenBytes / 4); - if(bufferSize >= maxPrioBDataSize) - doSend(); - } else { - /** - * Prio A signal are sent directly - */ - signal->sigId = 0; - - ::send((union SIGNAL**)&signal, remoteNodePid); - } -} - -#if 0 -int getSeq(int _seq){ - if (_seq > 0){ - switch (_seq % 100){ - case 10: - return _seq - 1; - case 9: - return _seq + 1; - default: - return _seq; - } - }else{ - return _seq; - } -} -int getSeq(int _seq){ - - switch (_seq % 40){ - case 10: - return _seq-4; - case 9: - return _seq-2; - case 8: - return _seq; - case 7: - return _seq+2; - case 6: - return _seq+4; - - - case 30: - return _seq-9; - case 29: - return _seq-7; - case 28: - return _seq-5; - case 27: - return _seq-3; - case 26: - return _seq-1; - case 25: - return _seq+1; - case 24: - return _seq+3; - case 23: - return _seq+5; - case 22: - return _seq+7; - case 21: - return _seq+9; - - default: - return _seq; - - } -} -#endif - -void -OSE_Transporter::doSend() { - /** - * restore is always called to make sure the signal buffer is taken over - * by a process that is alive, this will otherwise lead to that these buffers - * are removed when the process that allocated them dies - */ - restore(prioBSignal); - if(prioBSignal->dataSignal.length > 0){ - - prioBSignal->dataSignal.sigId = signalIdCounter; - signalIdCounter++; - - ::send(&prioBSignal, remoteNodePid); - } - - initSignals(); -} - -void -OSE_Transporter::doConnect() { - - NdbMutex_Lock(theMutexPtr); - if(_connecting || _disconnecting || _connected){ - NdbMutex_Unlock(theMutexPtr); - return; - } - - _connecting = true; - signalIdCounter = 0; - - if(isServer){ - DEBUG("Waiting for connect req: "); - state = WAITING_FOR_CONNECT_REQ; - } else { - state = WAITING_FOR_HUNT; - - DEBUG("Hunting for: " << remoteNodeName); - - union SIGNAL* huntsig; - huntsig = alloc(sizeof(NdbTransporterHunt), NDB_TRANSPORTER_HUNT); - huntsig->ndbHunt.remoteNodeId = remoteNodeId; - hunt(remoteNodeName, 0, NULL, &huntsig); - } - NdbMutex_Unlock(theMutexPtr); -} - -void -OSE_Transporter::doDisconnect() { - NdbMutex_Lock(theMutexPtr); - - switch(state){ - case DISCONNECTED: - case WAITING_FOR_HUNT: - case WAITING_FOR_CONNECT_REQ: - case WAITING_FOR_CONNECT_CONF: - break; - case CONNECTED: - { -#if 0 - /** - * There should not be anything in the buffer that needs to be sent here - */ - DEBUG("Doing send before disconnect"); - doSend(); -#endif - union SIGNAL * sig = alloc(sizeof(NdbTransporterDisconnectOrd), - NDB_TRANSPORTER_DISCONNECT_ORD); - sig->ndbDisconnect.senderNodeId = localNodeId; - sig->ndbDisconnect.reason = NdbTransporterDisconnectOrd::NDB_DISCONNECT; - ::send(&sig, remoteNodePid); - detach(&remoteNodeRef); - - } - break; - } - state = DISCONNECTED; - - _connected = false; - _connecting = false; - _disconnecting = false; - - NdbMutex_Unlock(theMutexPtr); -} - -void -OSE_Transporter::huntReceived(struct NdbTransporterHunt * sig){ - if(isServer){ - WARNING("Hunt received for server: remoteNodeId: " << - sig->remoteNodeId); - return; - } - - if(state != WAITING_FOR_HUNT){ - WARNING("Hunt received while in state: " << state); - return; - } - remoteNodePid = sender((union SIGNAL**)&sig); - union SIGNAL * signal = alloc(sizeof(NdbTransporterConnectReq), - NDB_TRANSPORTER_CONNECT_REQ); - signal->ndbConnectReq.remoteNodeId = remoteNodeId; - signal->ndbConnectReq.senderNodeId = localNodeId; - - DEBUG("Sending connect req to pid: " << hex << remoteNodePid); - - ::send(&signal, remoteNodePid); - state = WAITING_FOR_CONNECT_CONF; - return; -} - -bool -OSE_Transporter::connectReq(struct NdbTransporterConnectReq * sig){ - if(!isServer){ - WARNING("OSE Connect Req received for client: senderNodeId: " << - sig->senderNodeId); - return false; - } - - if(state != WAITING_FOR_CONNECT_REQ){ - PROCESS pid = sender((union SIGNAL**)&sig); - union SIGNAL * signal = alloc(sizeof(NdbTransporterConnectRef), - NDB_TRANSPORTER_CONNECT_REF); - signal->ndbConnectRef.senderNodeId = localNodeId; - signal->ndbConnectRef.reason = NdbTransporterConnectRef::INVALID_STATE; - - DEBUG("Sending connect ref to pid: " << hex << pid); - - ::send(&signal, pid); - return false; - } - - NdbMutex_Lock(theMutexPtr); - - if(prioBSignal != NIL){ - restore(prioBSignal); - free_buf(&prioBSignal); - } - initSignals(); - - remoteNodePid = sender((union SIGNAL**)&sig); - union SIGNAL * signal = alloc(sizeof(NdbTransporterConnectRef), - NDB_TRANSPORTER_CONNECT_CONF); - signal->ndbConnectConf.senderNodeId = localNodeId; - signal->ndbConnectConf.remoteNodeId = remoteNodeId; - - union SIGNAL * discon = alloc(sizeof(NdbTransporterDisconnectOrd), - NDB_TRANSPORTER_DISCONNECT_ORD); - discon->ndbDisconnect.senderNodeId = remoteNodeId; - discon->ndbDisconnect.reason = NdbTransporterDisconnectOrd::PROCESS_DIED; - - DEBUG("Attaching to pid: " << hex << remoteNodePid); - - remoteNodeRef = attach(&discon, remoteNodePid); - - DEBUG("Sending connect conf to pid: " << hex << remoteNodePid); - - ::send(&signal, remoteNodePid); - state = CONNECTED; - - _connected = true; - _connecting = false; - _disconnecting = false; - - NdbMutex_Unlock(theMutexPtr); - - return true; -} - -bool -OSE_Transporter::connectRef(struct NdbTransporterConnectRef * sig){ - if(isServer){ - WARNING("OSE Connect Ref received for server: senderNodeId: " << - sig->senderNodeId); - return false; - } - if(state != WAITING_FOR_CONNECT_CONF){ - WARNING("OSE Connect Ref received for client while in state: " << - state << " senderNodeId: " << sig->senderNodeId); - return false; - } - doDisconnect(); -#if 0 - /** - * Don't call connect directly, wait until the next time - * checkConnections is called which will trigger a new connect attempt - */ - doConnect(); -#endif - return true; -} - - -bool -OSE_Transporter::connectConf(struct NdbTransporterConnectConf * sig){ - if(isServer){ - WARNING("OSE Connect Conf received for server: senderNodeId: " << - sig->senderNodeId); - return false; - } - if(state != WAITING_FOR_CONNECT_CONF){ - WARNING("OSE Connect Conf received while in state: " << - state); - return false; - } - NdbMutex_Lock(theMutexPtr); - - // Free the buffers to get rid of any "junk" that they might contain - if(prioBSignal != NIL){ - restore(prioBSignal); - free_buf(&prioBSignal); - } - initSignals(); - - union SIGNAL * discon = alloc(sizeof(NdbTransporterDisconnectOrd), - NDB_TRANSPORTER_DISCONNECT_ORD); - discon->ndbDisconnect.senderNodeId = remoteNodeId; - discon->ndbDisconnect.reason= NdbTransporterDisconnectOrd::PROCESS_DIED; - - remoteNodeRef = attach(&discon, remoteNodePid); - - state = CONNECTED; - _connected = true; - _connecting = false; - _disconnecting = false; - - // Free the buffers to get rid of any "junk" that they might contain - if(prioBSignal != NIL){ - restore(prioBSignal); - free_buf(&prioBSignal); - } - initSignals(); - - NdbMutex_Unlock(theMutexPtr); - return true; -} - - -bool -OSE_Transporter::disconnectOrd(struct NdbTransporterDisconnectOrd * sig){ - if(state != CONNECTED){ - WARNING("OSE Disconnect Ord received while in state: " << state << - " reason: " << sig->reason); - return false; - } - - if(sig->reason == NdbTransporterDisconnectOrd::PROCESS_DIED){ - state = DISCONNECTED; - } - - doDisconnect(); - reportDisconnect(callbackObj, remoteNodeId,0); - return true; -} - - - - - - - diff --git a/storage/ndb/src/common/transporter/OSE_Transporter.hpp b/storage/ndb/src/common/transporter/OSE_Transporter.hpp deleted file mode 100644 index 898352366ba..00000000000 --- a/storage/ndb/src/common/transporter/OSE_Transporter.hpp +++ /dev/null @@ -1,159 +0,0 @@ -/* Copyright (C) 2003 MySQL AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - -//**************************************************************************** -// -// AUTHOR -// Magnus Svensson -// -// NAME -// OSE_Transporter -// -// DESCRIPTION -// A OSE_Transporter instance is created when OSE-signal communication -// shall be used (user specified). It handles connect, disconnect, -// send and receive. -// -// -// -//***************************************************************************/ -#ifndef OSE_Transporter_H -#define OSE_Transporter_H - -#include "Transporter.hpp" - -#include "ose.h" - -class OSE_Transporter : public Transporter { - friend class OSE_Receiver; - friend class TransporterRegistry; -public: - - // Initialize member variables - OSE_Transporter(int prioASignalSize, - int prioBSignalSize, - NodeId localNodeId, - const char * lHostName, - NodeId remoteNodeId, - NodeId serverNodeId, - const char * rHostName, - int byteorder, - bool compression, - bool checksum, - bool signalId, - Uint32 reportFreq = 4096); - - // Disconnect, delete send buffers and receive buffer - ~OSE_Transporter(); - - /** - * Allocate buffers for sending and receiving - */ - bool initTransporter(); - - /** - * Connect - */ - virtual void doConnect(); - - /** - * Disconnect - */ - virtual void doDisconnect(); - - Uint32 * getWritePtr(Uint32 lenBytes, Uint32 prio); - void updateWritePtr(Uint32 lenBytes, Uint32 prio); - - /** - * Retrieves the contents of the send buffers, copies it into - * an OSE signal and sends it. Until the send buffers are empty - */ - void doSend(); - - bool hasDataToSend() const { - return prioBSignal->dataSignal.length > 0; - } - -protected: - /** - * Not implemented - * OSE uses async connect/disconnect - */ - virtual bool connectImpl(Uint32 timeOut){ - return false; - } - - /** - * Not implemented - * OSE uses async connect/disconnect - */ - virtual void disconnectImpl(){ - } - -private: - const bool isServer; - - int maxPrioBDataSize; - - /** - * Remote node name - * On same machine: ndb_node1 - * On remote machine: rhost/ndb_node1 - **/ - PROCESS remoteNodePid; - OSATTREF remoteNodeRef; - char remoteNodeName[256]; - - Uint32 signalIdCounter; - - int prioBSignalSize; - - Uint32 * prioBInsertPtr; - union SIGNAL * prioBSignal; - - struct NdbTransporterData * allocPrioASignal(Uint32 lenBytes) const; - - /** - * Statistics - */ - Uint32 reportFreq; - Uint32 receiveCount; - Uint64 receiveSize; - Uint32 sendCount; - Uint64 sendSize; - - void initSignals(); - - /** - * OSE Receiver callbacks - */ - void huntReceived(struct NdbTransporterHunt * sig); - bool connectReq(struct NdbTransporterConnectReq * sig); - bool connectRef(struct NdbTransporterConnectRef * sig); - bool connectConf(struct NdbTransporterConnectConf * sig); - bool disconnectOrd(struct NdbTransporterDisconnectOrd * sig); - - enum OSETransporterState { - DISCONNECTED = 0, - WAITING_FOR_HUNT = 1, - WAITING_FOR_CONNECT_REQ = 2, - WAITING_FOR_CONNECT_CONF = 3, - CONNECTED = 4 - } state; -}; - -// Define of OSE_Transporter_H -#endif diff --git a/storage/ndb/src/common/transporter/TCP_Transporter.cpp b/storage/ndb/src/common/transporter/TCP_Transporter.cpp index 5db12d3985c..a2ceaea339f 100644 --- a/storage/ndb/src/common/transporter/TCP_Transporter.cpp +++ b/storage/ndb/src/common/transporter/TCP_Transporter.cpp @@ -22,12 +22,6 @@ #include <NdbSleep.h> // End of stuff to be moved -#if defined NDB_OSE || defined NDB_SOFTOSE -#define inet_send inet_send -#else -#define inet_send send -#endif - #ifdef NDB_WIN32 class ndbstrerror { @@ -221,22 +215,6 @@ TCP_Transporter::setSocketNonBlocking(NDB_SOCKET_TYPE socket){ bool TCP_Transporter::sendIsPossible(struct timeval * timeout) { -#ifdef NDB_OSE - /** - * In OSE you cant do select without owning a socket, - * and since this method might be called by any thread in the api - * we choose not to implementet and always return true after sleeping - * a while. - * - * Note that this only sensible as long as the sockets are non blocking - */ - if(theSocket >= 0){ - Uint32 timeOutMillis = timeout->tv_sec * 1000 + timeout->tv_usec / 1000; - NdbSleep_MilliSleep(timeOutMillis); - return true; - } - return false; -#else if(theSocket != NDB_INVALID_SOCKET){ fd_set writeset; FD_ZERO(&writeset); @@ -250,7 +228,6 @@ TCP_Transporter::sendIsPossible(struct timeval * timeout) { return false; } return false; -#endif } Uint32 @@ -334,7 +311,7 @@ TCP_Transporter::doSend() { const char * const sendPtr = m_sendBuffer.sendPtr; const Uint32 sizeToSend = m_sendBuffer.sendDataSize; if (sizeToSend > 0){ - const int nBytesSent = inet_send(theSocket, sendPtr, sizeToSend, 0); + const int nBytesSent = send(theSocket, sendPtr, sizeToSend, 0); if (nBytesSent > 0) { m_sendBuffer.bytesSent(nBytesSent); diff --git a/storage/ndb/src/common/transporter/TCP_Transporter.hpp b/storage/ndb/src/common/transporter/TCP_Transporter.hpp index df4149531b4..a6019f3572f 100644 --- a/storage/ndb/src/common/transporter/TCP_Transporter.hpp +++ b/storage/ndb/src/common/transporter/TCP_Transporter.hpp @@ -153,10 +153,6 @@ private: Uint64 sendSize; ReceiveBuffer receiveBuffer; - -#if defined NDB_OSE || defined NDB_SOFTOSE - PROCESS theReceiverPid; -#endif }; inline diff --git a/storage/ndb/src/common/transporter/TransporterInternalDefinitions.hpp b/storage/ndb/src/common/transporter/TransporterInternalDefinitions.hpp index 624b495422f..db536c19c38 100644 --- a/storage/ndb/src/common/transporter/TransporterInternalDefinitions.hpp +++ b/storage/ndb/src/common/transporter/TransporterInternalDefinitions.hpp @@ -31,10 +31,6 @@ #define NDB_SCI_TRANSPORTER #endif -#ifdef HAVE_NDB_OSE -#define NDB_OSE_TRANSPORTER -#endif - #ifdef DEBUG_TRANSPORTER #define DEBUG(x) ndbout << x << endl #else diff --git a/storage/ndb/src/common/transporter/TransporterRegistry.cpp b/storage/ndb/src/common/transporter/TransporterRegistry.cpp index 4a0be702a86..71f39b63ba1 100644 --- a/storage/ndb/src/common/transporter/TransporterRegistry.cpp +++ b/storage/ndb/src/common/transporter/TransporterRegistry.cpp @@ -27,11 +27,6 @@ #include "TCP_Transporter.hpp" #endif -#ifdef NDB_OSE_TRANSPORTER -#include "OSE_Receiver.hpp" -#include "OSE_Transporter.hpp" -#endif - #ifdef NDB_SCI_TRANSPORTER #include "SCI_Transporter.hpp" #endif @@ -95,7 +90,6 @@ TransporterRegistry::TransporterRegistry(void * callback, theTCPTransporters = new TCP_Transporter * [maxTransporters]; theSCITransporters = new SCI_Transporter * [maxTransporters]; theSHMTransporters = new SHM_Transporter * [maxTransporters]; - theOSETransporters = new OSE_Transporter * [maxTransporters]; theTransporterTypes = new TransporterType [maxTransporters]; theTransporters = new Transporter * [maxTransporters]; performStates = new PerformState [maxTransporters]; @@ -106,21 +100,16 @@ TransporterRegistry::TransporterRegistry(void * callback, nTCPTransporters = 0; nSCITransporters = 0; nSHMTransporters = 0; - nOSETransporters = 0; // Initialize the transporter arrays for (unsigned i=0; i<maxTransporters; i++) { theTCPTransporters[i] = NULL; theSCITransporters[i] = NULL; theSHMTransporters[i] = NULL; - theOSETransporters[i] = NULL; theTransporters[i] = NULL; performStates[i] = DISCONNECTED; ioStates[i] = NoHalt; } - theOSEReceiver = 0; - theOSEJunkSocketSend = 0; - theOSEJunkSocketRecv = 0; DBUG_VOID_RETURN; } @@ -155,19 +144,11 @@ TransporterRegistry::~TransporterRegistry() delete[] theTCPTransporters; delete[] theSCITransporters; delete[] theSHMTransporters; - delete[] theOSETransporters; delete[] theTransporterTypes; delete[] theTransporters; delete[] performStates; delete[] ioStates; -#ifdef NDB_OSE_TRANSPORTER - if(theOSEReceiver != NULL){ - theOSEReceiver->destroyPhantom(); - delete theOSEReceiver; - theOSEReceiver = 0; - } -#endif if (m_mgm_handle) ndb_mgm_destroy_handle(&m_mgm_handle); @@ -327,60 +308,6 @@ TransporterRegistry::createTCPTransporter(TransporterConfiguration *config) { nTransporters++; nTCPTransporters++; -#if defined NDB_OSE || defined NDB_SOFTOSE - t->theReceiverPid = theReceiverPid; -#endif - - return true; -#else - return false; -#endif -} - -bool -TransporterRegistry::createOSETransporter(TransporterConfiguration *conf) { -#ifdef NDB_OSE_TRANSPORTER - - if(!nodeIdSpecified){ - init(conf->localNodeId); - } - - if(conf->localNodeId != localNodeId) - return false; - - if(theTransporters[conf->remoteNodeId] != NULL) - return false; - - if(theOSEReceiver == NULL){ - theOSEReceiver = new OSE_Receiver(this, - 10, - localNodeId); - } - - OSE_Transporter * t = new OSE_Transporter(conf->ose.prioASignalSize, - conf->ose.prioBSignalSize, - localNodeId, - conf->localHostName, - conf->remoteNodeId, - conf->serverNodeId, - conf->remoteHostName, - conf->checksum, - conf->signalId); - if (t == NULL) - return false; - else if (!t->initTransporter()) { - delete t; - return false; - } - // Put the transporter in the transporter arrays - theOSETransporters[nOSETransporters] = t; - theTransporters[t->getRemoteNodeId()] = t; - theTransporterTypes[t->getRemoteNodeId()] = tt_OSE_TRANSPORTER; - performStates[t->getRemoteNodeId()] = DISCONNECTED; - - nTransporters++; - nOSETransporters++; - return true; #else return false; @@ -550,19 +477,8 @@ TransporterRegistry::removeTransporter(NodeId nodeId) { nSHMTransporters --; #endif break; - case tt_OSE_TRANSPORTER: -#ifdef NDB_OSE_TRANSPORTER - for(; ind < nOSETransporters; ind++) - if(theOSETransporters[ind]->getRemoteNodeId() == nodeId) - break; - ind++; - for(; ind<nOSETransporters; ind++) - theOSETransporters[ind-1] = theOSETransporters[ind]; - nOSETransporters --; -#endif - break; } - + nTransporters--; // Delete the transporter and remove it from theTransporters array @@ -742,12 +658,7 @@ TransporterRegistry::external_IO(Uint32 timeOutMillis) { Uint32 TransporterRegistry::pollReceive(Uint32 timeOutMillis){ Uint32 retVal = 0; -#ifdef NDB_OSE_TRANSPORTER - retVal |= poll_OSE(timeOutMillis); - retVal |= poll_TCP(0); - return retVal; -#endif - + if((nSCITransporters) > 0) { timeOutMillis=0; @@ -824,18 +735,6 @@ TransporterRegistry::poll_SHM(Uint32 timeOutMillis) } #endif -#ifdef NDB_OSE_TRANSPORTER -Uint32 -TransporterRegistry::poll_OSE(Uint32 timeOutMillis) -{ - if(theOSEReceiver != NULL){ - return theOSEReceiver->doReceive(timeOutMillis); - } - NdbSleep_MilliSleep(timeOutMillis); - return 0; -} -#endif - #ifdef NDB_TCP_TRANSPORTER Uint32 TransporterRegistry::poll_TCP(Uint32 timeOutMillis) @@ -847,20 +746,8 @@ TransporterRegistry::poll_TCP(Uint32 timeOutMillis) } struct timeval timeout; -#ifdef NDB_OSE - // Return directly if there are no TCP transporters configured - - if(timeOutMillis <= 1){ - timeout.tv_sec = 0; - timeout.tv_usec = 1025; - } else { - timeout.tv_sec = timeOutMillis / 1000; - timeout.tv_usec = (timeOutMillis % 1000) * 1000; - } -#else timeout.tv_sec = timeOutMillis / 1000; timeout.tv_usec = (timeOutMillis % 1000) * 1000; -#endif NDB_SOCKET_TYPE maxSocketValue = -1; @@ -908,33 +795,6 @@ TransporterRegistry::poll_TCP(Uint32 timeOutMillis) void TransporterRegistry::performReceive() { -#ifdef NDB_OSE_TRANSPORTER - if(theOSEReceiver != 0) - { - while(theOSEReceiver->hasData()) - { - NodeId remoteNodeId; - Uint32 * readPtr; - Uint32 sz = theOSEReceiver->getReceiveData(&remoteNodeId, &readPtr); - transporter_recv_from(callbackObj, remoteNodeId); - Uint32 szUsed = unpack(readPtr, - sz, - remoteNodeId, - ioStates[remoteNodeId]); -#ifdef DEBUG_TRANSPORTER - /** - * OSE transporter can handle executions of - * half signals - */ - assert(sz == szUsed); -#endif - theOSEReceiver->updateReceiveDataPtr(szUsed); - theOSEReceiver->doReceive(0); - // checkJobBuffer(); - } - } -#endif - #ifdef NDB_TCP_TRANSPORTER if(tcpReadSelectReply > 0) { @@ -1008,67 +868,7 @@ TransporterRegistry::performSend() { int i; sendCounter = 1; - -#ifdef NDB_OSE_TRANSPORTER - for (int i = 0; i < nOSETransporters; i++) - { - OSE_Transporter *t = theOSETransporters[i]; - if(is_connected(t->getRemoteNodeId()) &&& (t->isConnected())) - { - t->doSend(); - }//if - }//for -#endif - -#ifdef NDB_TCP_TRANSPORTER -#ifdef NDB_OSE - { - int maxSocketValue = 0; - - // Needed for TCP/IP connections - // The writeset are used by select - fd_set writeset; - FD_ZERO(&writeset); - - // Prepare for sending and receiving - for (i = 0; i < nTCPTransporters; i++) { - TCP_Transporter * t = theTCPTransporters[i]; - - // If the transporter is connected - if ((t->hasDataToSend()) && (t->isConnected())) { - const int socket = t->getSocket(); - // Find the highest socket value. It will be used by select - if (socket > maxSocketValue) { - maxSocketValue = socket; - }//if - FD_SET(socket, &writeset); - }//if - }//for - - // The highest socket value plus one - if(maxSocketValue == 0) - return; - - maxSocketValue++; - struct timeval timeout = { 0, 1025 }; - Uint32 tmp = select(maxSocketValue, 0, &writeset, 0, &timeout); - - if (tmp == 0) - { - return; - }//if - for (i = 0; i < nTCPTransporters; i++) { - TCP_Transporter *t = theTCPTransporters[i]; - const NodeId nodeId = t->getRemoteNodeId(); - const int socket = t->getSocket(); - if(is_connected(nodeId)){ - if(t->isConnected() && FD_ISSET(socket, &writeset)) { - t->doSend(); - }//if - }//if - }//for - } -#endif + #ifdef NDB_TCP_TRANSPORTER for (i = m_transp_count; i < nTCPTransporters; i++) { @@ -1091,7 +891,6 @@ TransporterRegistry::performSend() m_transp_count++; if (m_transp_count == nTCPTransporters) m_transp_count = 0; #endif -#endif #ifdef NDB_SCI_TRANSPORTER //scroll through the SCI transporters, // get each transporter, check if connected, send data @@ -1470,21 +1269,6 @@ void TransporterRegistry::startReceiving() { DBUG_ENTER("TransporterRegistry::startReceiving"); -#ifdef NDB_OSE_TRANSPORTER - if(theOSEReceiver != NULL){ - theOSEReceiver->createPhantom(); - } -#endif - -#ifdef NDB_OSE - theOSEJunkSocketRecv = socket(AF_INET, SOCK_STREAM, 0); -#endif - -#if defined NDB_OSE || defined NDB_SOFTOSE - theReceiverPid = current_process(); - for(int i = 0; i<nTCPTransporters; i++) - theTCPTransporters[i]->theReceiverPid = theReceiverPid; -#endif #ifdef NDB_SHM_TRANSPORTER m_shm_own_pid = getpid(); @@ -1513,41 +1297,20 @@ TransporterRegistry::startReceiving() void TransporterRegistry::stopReceiving(){ -#ifdef NDB_OSE_TRANSPORTER - if(theOSEReceiver != NULL){ - theOSEReceiver->destroyPhantom(); - } -#endif - /** * Disconnect all transporters, this includes detach from remote node * and since that must be done from the same process that called attach * it's done here in the receive thread */ disconnectAll(); - -#if defined NDB_OSE || defined NDB_SOFTOSE - if(theOSEJunkSocketRecv > 0) - close(theOSEJunkSocketRecv); - theOSEJunkSocketRecv = -1; -#endif - } void TransporterRegistry::startSending(){ -#if defined NDB_OSE || defined NDB_SOFTOSE - theOSEJunkSocketSend = socket(AF_INET, SOCK_STREAM, 0); -#endif } void TransporterRegistry::stopSending(){ -#if defined NDB_OSE || defined NDB_SOFTOSE - if(theOSEJunkSocketSend > 0) - close(theOSEJunkSocketSend); - theOSEJunkSocketSend = -1; -#endif } NdbOut & operator <<(NdbOut & out, SignalHeader & sh){ diff --git a/storage/ndb/src/common/transporter/basictest/basicTransporterTest.cpp b/storage/ndb/src/common/transporter/basictest/basicTransporterTest.cpp index c0a437c4907..54c9cb4c453 100644 --- a/storage/ndb/src/common/transporter/basictest/basicTransporterTest.cpp +++ b/storage/ndb/src/common/transporter/basictest/basicTransporterTest.cpp @@ -57,21 +57,6 @@ TCP_TransporterConfiguration tcpTemplate = { true // signalId; }; -OSE_TransporterConfiguration oseTemplate = { - "", // remoteHostName; - "", // localHostName; - 0, // remoteNodeId; - 0, // localNodeId; - false, // compression; - true, // checksum; - true, // signalId; - 0, // byteOrder; - - 2000, // prioASignalSize; - 1000, // prioBSignalSize; - 10 -}; - SHM_TransporterConfiguration shmTemplate = { 0, //remoteNodeId 0, //localNodeId; @@ -85,16 +70,12 @@ SHM_TransporterConfiguration shmTemplate = { TransporterRegistry *tReg = 0; -#ifndef OSE_DELTA #include <signal.h> -#endif extern "C" void signalHandler(int signo){ -#ifndef OSE_DELTA ::signal(13, signalHandler); -#endif char buf[255]; sprintf(buf,"Signal: %d\n", signo); ndbout << buf << endl; @@ -114,7 +95,6 @@ typedef void (* CreateTransporterFunc)(void * conf, const char * localHostName, const char * remoteHostName); -void createOSETransporter(void *, NodeId, NodeId, const char *, const char *); void createSCITransporter(void *, NodeId, NodeId, const char *, const char *); void createTCPTransporter(void *, NodeId, NodeId, const char *, const char *); void createSHMTransporter(void *, NodeId, NodeId, const char *, const char *); @@ -172,9 +152,6 @@ main(int argc, const char **argv){ if(strcasecmp(type, "tcp") == 0){ func = createTCPTransporter; confTemplate = &tcpTemplate; - } else if(strcasecmp(type, "ose") == 0){ - func = createOSETransporter; - confTemplate = &oseTemplate; } else if(strcasecmp(type, "sci") == 0){ func = createSCITransporter; confTemplate = &sciTemplate; diff --git a/storage/ndb/src/common/transporter/perftest/perfTransporterTest.cpp b/storage/ndb/src/common/transporter/perftest/perfTransporterTest.cpp index 71df9f12a4c..aaee2471801 100644 --- a/storage/ndb/src/common/transporter/perftest/perfTransporterTest.cpp +++ b/storage/ndb/src/common/transporter/perftest/perfTransporterTest.cpp @@ -70,33 +70,14 @@ TCP_TransporterConfiguration tcpTemplate = { true // signalId; }; -OSE_TransporterConfiguration oseTemplate = { - "", // remoteHostName; - "", // localHostName; - 0, // remoteNodeId; - 0, // localNodeId; - false, // compression; - true, // checksum; - true, // signalId; - 0, // byteOrder; - - 2000, // prioASignalSize; - 2000, // prioBSignalSize; - 10 // Recv buf size -}; - TransporterRegistry *tReg = 0; -#ifndef OSE_DELTA #include <signal.h> -#endif extern "C" void signalHandler(int signo){ -#ifndef OSE_DELTA ::signal(13, signalHandler); -#endif char buf[255]; sprintf(buf,"Signal: %d\n", signo); ndbout << buf << endl; @@ -119,8 +100,6 @@ typedef void (* CreateTransporterFunc)(void * conf, int sendBuf, int recvBuf); -void -createOSETransporter(void*, NodeId, NodeId, const char*, const char*, int, int); void createTCPTransporter(void*, NodeId, NodeId, const char*, const char*, int, int); void @@ -455,9 +434,6 @@ main(int argc, const char **argv){ if(strcasecmp(type, "tcp") == 0){ func = createTCPTransporter; confTemplate = &tcpTemplate; - } else if(strcasecmp(type, "ose") == 0){ - func = createOSETransporter; - confTemplate = &oseTemplate; } else if(strcasecmp(type, "sci") == 0){ func = createSCITransporter; confTemplate = &sciTemplate; @@ -632,43 +608,6 @@ checkJobBuffer() { } void -createOSETransporter(void * _conf, - NodeId localNodeId, - NodeId remoteNodeId, - const char * localHostName, - const char * remoteHostName, - int sendBuf, - int recvBuf){ - - ndbout << "Creating OSE transporter from node " - << localNodeId << "(" << localHostName << ") to " - << remoteNodeId << "(" << remoteHostName << ")..." << endl;; - - OSE_TransporterConfiguration * conf = (OSE_TransporterConfiguration*)_conf; - - if(sendBuf != -1){ - conf->prioBSignalSize = sendBuf; - } - if(recvBuf != -1){ - conf->receiveBufferSize = recvBuf; - } - - ndbout << "\tSendBufferSize: " << conf->prioBSignalSize << endl; - ndbout << "\tReceiveBufferSize: " << conf->receiveBufferSize << endl; - - conf->localNodeId = localNodeId; - conf->localHostName = localHostName; - conf->remoteNodeId = remoteNodeId; - conf->remoteHostName = remoteHostName; - bool res = tReg->createTransporter(conf); - if(res) - ndbout << "... -- Success " << endl; - else - ndbout << "... -- Failure " << endl; -} - - -void createSCITransporter(void * _conf, NodeId localNodeId, NodeId remoteNodeId, diff --git a/storage/ndb/src/common/transporter/priotest/prioOSE/Makefile b/storage/ndb/src/common/transporter/priotest/prioOSE/Makefile deleted file mode 100644 index 4df66fa35e0..00000000000 --- a/storage/ndb/src/common/transporter/priotest/prioOSE/Makefile +++ /dev/null @@ -1,17 +0,0 @@ -include .defs.mk - -TYPE := ndbapi - -BIN_TARGET := perfOSE -BIN_TARGET_ARCHIVES := perftransportertest transporter portlib - -CCFLAGS_LOC += -I.. - -SOURCES = perfOSE.cpp - -include $(NDB_TOP)/Epilogue.mk - - - - - diff --git a/storage/ndb/src/common/transporter/priotest/prioTransporterTest.cpp b/storage/ndb/src/common/transporter/priotest/prioTransporterTest.cpp index 6c5623a49a6..b65e41cf065 100644 --- a/storage/ndb/src/common/transporter/priotest/prioTransporterTest.cpp +++ b/storage/ndb/src/common/transporter/priotest/prioTransporterTest.cpp @@ -71,33 +71,14 @@ TCP_TransporterConfiguration tcpTemplate = { true // signalId; }; -OSE_TransporterConfiguration oseTemplate = { - "", // remoteHostName; - "", // localHostName; - 0, // remoteNodeId; - 0, // localNodeId; - false, // compression; - true, // checksum; - true, // signalId; - 0, // byteOrder; - - 2000, // prioASignalSize; - 2000, // prioBSignalSize; - 10 // Recv buf size -}; - TransporterRegistry *tReg = 0; -#ifndef OSE_DELTA #include <signal.h> -#endif extern "C" void signalHandler(int signo){ -#ifndef OSE_DELTA ::signal(13, signalHandler); -#endif char buf[255]; sprintf(buf,"Signal: %d\n", signo); ndbout << buf << endl; @@ -120,43 +101,6 @@ typedef void (* CreateTransporterFunc)(void * conf, int recvBuf); void -createOSETransporter(void * _conf, - NodeId localNodeId, - NodeId remoteNodeId, - const char * localHostName, - const char * remoteHostName, - int sendBuf, - int recvBuf){ - - ndbout << "Creating OSE transporter from node " - << localNodeId << "(" << localHostName << ") to " - << remoteNodeId << "(" << remoteHostName << ")..." << endl;; - - OSE_TransporterConfiguration * conf = (OSE_TransporterConfiguration*)_conf; - - if(sendBuf != -1){ - conf->prioBSignalSize = sendBuf; - } - if(recvBuf != -1){ - conf->receiveBufferSize = recvBuf; - } - - ndbout << "\tSendBufferSize: " << conf->prioBSignalSize << endl; - ndbout << "\tReceiveBufferSize: " << conf->receiveBufferSize << endl; - - conf->localNodeId = localNodeId; - conf->localHostName = localHostName; - conf->remoteNodeId = remoteNodeId; - conf->remoteHostName = remoteHostName; - bool res = tReg->createTransporter(conf); - if(res) - ndbout << "... -- Success " << endl; - else - ndbout << "... -- Failure " << endl; -} - - -void createSCITransporter(void * _conf, NodeId localNodeId, NodeId remoteNodeId, @@ -578,10 +522,6 @@ prioTransporterTest(TestType tt, const char * progName, func = createTCPTransporter; confTemplate = &tcpTemplate; break; - case TestOSE: - func = createOSETransporter; - confTemplate = &oseTemplate; - break; case TestSCI: func = createSCITransporter; confTemplate = &sciTemplate; diff --git a/storage/ndb/src/common/transporter/priotest/prioTransporterTest.hpp b/storage/ndb/src/common/transporter/priotest/prioTransporterTest.hpp index 787a9f46433..f6d7e3e09df 100644 --- a/storage/ndb/src/common/transporter/priotest/prioTransporterTest.hpp +++ b/storage/ndb/src/common/transporter/priotest/prioTransporterTest.hpp @@ -20,7 +20,6 @@ enum TestType { TestTCP, - TestOSE, TestSCI, TestSHM }; diff --git a/storage/ndb/src/common/util/File.cpp b/storage/ndb/src/common/util/File.cpp index 056b7ff199b..f0aecc4f8e4 100644 --- a/storage/ndb/src/common/util/File.cpp +++ b/storage/ndb/src/common/util/File.cpp @@ -45,17 +45,16 @@ File_class::exists(const char* aFileName) return (my_stat(aFileName, &stmp, MYF(0))!=NULL); } -long +off_t File_class::size(FILE* f) { - long cur_pos = 0, length = 0; - - cur_pos = ::ftell(f); - ::fseek(f, 0, SEEK_END); - length = ::ftell(f); - ::fseek(f, cur_pos, SEEK_SET); // restore original position + MY_STAT s; + + // Note that my_fstat behaves *differently* than my_stat. ARGGGHH! + if(my_fstat(::fileno(f), &s, MYF(0))) + return 0; - return length; + return s.st_size; } bool @@ -168,8 +167,8 @@ File_class::writeChar(const char* buf) { return writeChar(buf, 0, ::strlen(buf)); } - -long + +off_t File_class::size() const { return File_class::size(m_file); @@ -184,14 +183,5 @@ File_class::getName() const int File_class::flush() const { -#if defined NDB_OSE || defined NDB_SOFTOSE - ::fflush(m_file); - return ::fsync(::fileno(m_file)); -#else - return 0; -#endif + return ::fflush(m_file);; } - -// -// PRIVATE -// diff --git a/storage/ndb/src/common/util/NdbErrHnd.cpp b/storage/ndb/src/common/util/NdbErrHnd.cpp deleted file mode 100644 index 38a67f29853..00000000000 --- a/storage/ndb/src/common/util/NdbErrHnd.cpp +++ /dev/null @@ -1,492 +0,0 @@ -/* Copyright (C) 2003 MySQL AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - - -#if defined NDB_OSE || defined NDB_SOFTOSE - -#include <NdbOut.hpp> -#include <ndb_types.h> - -#include "ose.h" -#include "ose_err.h" -#include "osetypes.h" - - -#define BUFSIZE 100 - -typedef struct { - char header1[BUFSIZE]; - char header2[BUFSIZE]; - char error_code_line[BUFSIZE]; - char subcode_line[BUFSIZE]; - char product_line[BUFSIZE]; - char header_file_line[BUFSIZE]; - char extra_line[BUFSIZE]; - char user_called_line[BUFSIZE]; - char current_process_id_line[BUFSIZE]; - char current_process_name_line[BUFSIZE]; - char file_line[BUFSIZE]; - char line_line[BUFSIZE]; - char err_hnd_file[BUFSIZE]; -} Error_message; - -char assert_line[BUFSIZE]; -char unknown_signal_line[BUFSIZE]; -char signal_number_line[BUFSIZE]; -char sender_line[BUFSIZE]; -char receiver_line[BUFSIZE]; - -extern "C" OSBOOLEAN ndb_err_hnd(bool user_called, - Uint32 error_code, - Uint32 extra) -{ - static Error_message error_message; - bool error_handled; - Uint32 subcode; - - char* subcode_mnemonic; - char* product_name; - char* file_name; - - /*The subcode (bit 16 - 30) is extracted from error_code */ - subcode = (error_code & 0x7fff0000) >> 16; - - if (user_called) { - switch (subcode) { - case 0x0050 : - subcode_mnemonic= "OSE_PRH_PLS"; - product_name= "Program Loader"; - file_name = "prherr.h"; - break; - case 0x0051 : - subcode_mnemonic = "OSE_PRH_START_PRH"; - product_name= "start_prh"; - file_name= " start_prh.c"; - break; - case 0x0052 : - subcode_mnemonic= "OSE_PRH_ASF"; - product_name= "Archive Server"; - file_name = "prherr.h"; - break; - case 0x0058 : - case 0x4058 : - case 0x3fff : - case 0x8058 : - subcode_mnemonic= "OSE_MMS_EBASE"; - product_name= "MMS"; - file_name= "mms_err.h"; - break; - /*Link Handler G3***************************************/ - case 0x0060 : - case 0x8060 : - subcode_mnemonic= "OSE_GLH_EBASE"; - product_name= "General Link Handler"; - file_name= "glherr.h"; - break; - case 0x0064 : - case 0x8064 : - subcode_mnemonic= "OSE_GPL_EBASE"; - product_name= "General Protocol Link Handler"; - file_name= "gplerr.h"; - break; - case 0x0066 : - case 0x8066 : - subcode_mnemonic= "OSE_UDPPDR_EBASE"; - product_name= "UDP driver for GPL"; - file_name= "udppdrerr.h"; - break; - case 0x0067 : - case 0x8067 : - subcode_mnemonic= "OSE_SERPDR_EBASE"; - product_name= "Serial driver for GPL"; - file_name= "serpdrerr.h"; - break; - case 0x0068 : - case 0x8068 : - subcode_mnemonic= "OSE_ETHPDR_EBASE"; - product_name= "Ethernet driver for GPL"; - file_name= "ethpdrerr.h"; - break; - /*Link handler G4***************************************/ - case 0x0061 : - subcode_mnemonic= "OSE_OTL_EBASE"; - product_name= "OSE Transport Layer"; - file_name= "otlerr.h"; - break; - case 0x0062 : - subcode_mnemonic= "OSE_LALUDP_EBASE"; - product_name= "Link Adaption Layer for UDP"; - file_name= "header file unknown"; - break; - /*Internet Utilities************************************/ - case 0x0069 : - subcode_mnemonic= "OSE_TFTPD"; - product_name= "TFTP server"; - file_name= "inetutilerr.h"; - break; - case 0x006a : - subcode_mnemonic= "OSE_TELUDPD"; - product_name= "TELNET/UDP server"; - file_name= "inetutilerr.h"; - break; - case 0x006b : - subcode_mnemonic= "OSE_FTPD"; - product_name= "FTP server"; - file_name= "inetutilerr.h"; - break; - case 0x006c : - subcode_mnemonic= "OSE_TELNETD"; - product_name= "TELNET server"; - file_name= "inetutilerr.h"; - break; - case 0x006d : - subcode_mnemonic= "OSE_SURFER"; - product_name= "OSE System Surfer"; - file_name= "inetutilerr.h"; - break; - case 0x006e : - subcode_mnemonic= "OSE_BOOTP"; - product_name= "BOOTP client"; - file_name= "inetutilerr.h"; - break; - case 0x006f : - switch((error_code & 0x0000f000)){ - case 0x00000000 : - subcode_mnemonic= "OSE_RES"; - product_name= "DNS resolver"; - file_name= "inetutilerr.h"; - break; - case 0x00001000 : - subcode_mnemonic= "OSE_DHCPC"; - product_name= "DHCP client"; - file_name= "inetutilerr.h"; - break; - case 0x00002000 : - subcode_mnemonic= "OSE_FTP"; - product_name= "FTP client"; - file_name= "inetutilerr.h"; - break; - default : - subcode_mnemonic= "Unknown error"; - product_name= "unknown product"; - file_name = "header file unknown"; - break; - } - break; - case 0x00c2 : - subcode_mnemonic= "OSE_DNS"; - product_name= "DNS server"; - file_name= "dns_err.h"; - break; - /*INET**************************/ - case 0x0070 : - subcode_mnemonic= "INET_ERRBASE"; - product_name= "Internet Protocols (INET)"; - file_name= "ineterr.h"; - break; - case 0x0071 : - subcode_mnemonic= "WEBS_ERRBASE"; - product_name= "Web Server (WEBS)"; - file_name= "webserr.h"; - break; - case 0x0072 : - subcode_mnemonic= "SNMP"; - product_name= "SNMP"; - file_name= "header file unknown"; - break; - case 0x0073 : - subcode_mnemonic= "STP_BRIDGE"; - product_name= "STP bridge"; - file_name= "header file unknown"; - break; - case 0x0200 : - case 0x0201 : - case 0x0202 : - case 0x0203 : - case 0x0204 : - case 0x0205 : - case 0x0206 : - case 0x0207 : - case 0x0208 : - case 0x0209 : - case 0x020a : - case 0x020b : - case 0x020c : - case 0x020d : - case 0x020e : - case 0x020f : - subcode_mnemonic = "INETINIT_ERR_BASE"; - product_name = "INET"; - file_name = "startinet.c"; - break; - /*Miscellanous******************************************/ - case 0x0082 : - subcode_mnemonic= "OSE_HEAP_EBASE"; - product_name= "Heap Manager"; - file_name= "heap_err.h"; - break; - case 0x0088 : - subcode_mnemonic= "OSE_BSP"; - product_name= "Board Support Package"; - file_name= "bsperr.h"; - break; - case 0x008a : - subcode_mnemonic= "OSE_TOSV_EBASE"; - product_name= "Time Out Server"; - file_name= "tosverr.h"; - break; - case 0x008b : - subcode_mnemonic= "OSE_RTC_EBASE"; - product_name= "Real Time Clock"; - file_name= "rtcerr.h"; - break; - case 0x008d : - case 0x808d : - subcode_mnemonic= "OSENS_ERR_BASE"; - product_name= "Name Server"; - file_name= "osens_err.h"; - break; - case 0x008e : - subcode_mnemonic= "PMD_ERR_BASE"; - product_name= "Post Mortem Dump"; - file_name= "pmderr.h"; - break; - /*Embedded File System***********************************/ - case 0x0090 : - subcode_mnemonic= "OSE_EFS_COMMON"; - product_name= "EFS common"; - file_name= "efs_err.h"; - break; - case 0x0091 : - subcode_mnemonic= "OSE_EFS_FLIB"; - product_name= "EFS function library"; - file_name= "efs_err.h"; - break; - case 0x0092 : - subcode_mnemonic= "OSE_EFS_SERDD"; - product_name= "EFS serdd"; - file_name= "efs_err.h"; - break; - case 0x0093 : - subcode_mnemonic= "OSE_EFS_SHELL"; - product_name= "OSE shell"; - file_name= "efs_err.h"; - break; - case 0x0094 : - subcode_mnemonic= "OSE_EFS_STARTEFS"; - product_name= "EFS startefs.c"; - file_name= "efs_err.h"; - break; - /*Debugger related***************************************/ - case 0x00a0 : - subcode_mnemonic= "DBGSERVER_ERR_BASE"; - product_name= "Debug server for Illuminator"; - file_name= "degservererr.h"; - break; - case 0x00b2 : - subcode_mnemonic= "OSE_MDM"; - product_name= "Multi INDRT monitor"; - file_name= "header file unknown"; - break; - /*Miscellanous*******************************************/ - case 0x00c0 : - subcode_mnemonic= "OSE_POTS_EBASE"; - product_name= "POTS tutorial example"; - file_name= "pots_err.h"; - break; - case 0x00c1 : - subcode_mnemonic= "OSE_PTH_ECODE_BASE"; - product_name= "Pthreads"; - file_name= "pthread_err.h"; - break; - case 0x00c3 : - subcode_mnemonic= "OSE_NTP_EBASE"; - product_name= "OSE NTP/SNTP"; - file_name= "ntp_err.h"; - break; - case 0x00c4 : - subcode_mnemonic= "TRILLIUM_BASE"; - product_name= "Trillium OSE port"; - file_name= "sk_ss.c"; - break; - case 0x00c5 : - subcode_mnemonic= "OSE_OSECPP_EBASE"; - product_name= "C++ Support with libosecpp.a"; - file_name= "cpp_err.h"; - break; - case 0x00c6 : - subcode_mnemonic= "OSE_RIP_ERR_BASE"; - product_name= "OSE RIP"; - file_name= "oserip.h"; - break; - /*Unknown error_code*************************************/ - default : - subcode_mnemonic= "Unknown error"; - product_name= "unknown product"; - file_name = "header file unknown"; - break; - } - } else { - /* user_called = 0, i.e. reported by the kernel */ - subcode_mnemonic= "OSE_KRN"; - product_name= "Kernel"; - file_name = "ose_err.h"; - } - - BaseString::snprintf(error_message.header1, - BUFSIZE, - "This is the OSE Example System Error handler\r\n"); - - BaseString::snprintf(error_message.err_hnd_file, - BUFSIZE, - "located in: " __FILE__ "\r\n"); - - BaseString::snprintf(error_message.header2, - BUFSIZE, - "An Error has been reported:\r\n"); - - if (user_called == (OSBOOLEAN) 0 ) { - BaseString::snprintf(error_message.user_called_line, - BUFSIZE, - "user_called: 0x%x (Error detected by the kernel)\r\n", - user_called); - } - else { - BaseString::snprintf(error_message.user_called_line, - BUFSIZE, - "user_called: 0x%x (Error detected by an application)\r\n", - user_called); - } - - BaseString::snprintf(error_message.error_code_line, - BUFSIZE, - "error code: 0x%08x\r\n", - error_code); - - BaseString::snprintf(error_message.subcode_line, - BUFSIZE, - " subcode: %s (0x%08x)\r\n", - subcode_mnemonic, - ( subcode << 16)); - - BaseString::snprintf(error_message.product_line, - BUFSIZE, - " product: %s\r\n", - product_name); - - BaseString::snprintf(error_message.header_file_line, - BUFSIZE, - " header file: %s\r\n", - file_name); - - BaseString::snprintf(error_message.extra_line, - BUFSIZE, - "extra: 0x%08x\r\n", - extra); - - if (error_code != OSE_ENO_KERN_SPACE || user_called){ - struct OS_pcb *pcb = get_pcb(current_process()); - const char *process_name = &pcb->strings[pcb->name]; - - BaseString::snprintf(error_message.current_process_id_line, - BUFSIZE, - "Current Process: 0x%08x\r\n", - current_process()); - - BaseString::snprintf(error_message.current_process_name_line, - BUFSIZE, - "Process Name: %s\r\n", - process_name); - - BaseString::snprintf(error_message.file_line, - BUFSIZE, - "File: %s\r\n", - &pcb->strings[pcb->file]); - - BaseString::snprintf(error_message.line_line, - BUFSIZE, - "Line: %d\r\n", - pcb->line); - - free_buf((union SIGNAL **)&pcb); - } - - if ( !(((error_code & OSE_EFATAL_MASK) != 0) && (user_called == 0))){ - /* If the error is reported by the kernel and the fatal flag is set, - * dbgprintf can't be trusted */ - ndbout << error_message.header1; - ndbout << error_message.err_hnd_file; - ndbout << error_message.header2; - ndbout << error_message.user_called_line; - ndbout << error_message.error_code_line; - ndbout << error_message.subcode_line; - ndbout << error_message.product_line; - ndbout << error_message.header_file_line; - ndbout << error_message.extra_line; - ndbout << error_message.current_process_id_line; - ndbout << error_message.current_process_name_line; - ndbout << error_message.file_line; - ndbout << error_message.line_line; - ndbout << endl; - } - - if(user_called){ - switch (error_code) { - /* Check for assertion failure (see oseassert.h and assert.c). */ - case (OSERRCODE) 0xffffffff: - { - if(extra != 0){ - char *expr = ((char **)extra)[0]; - char *file = ((char **)extra)[1]; - unsigned line = ((unsigned *)extra)[2]; - BaseString::snprintf(assert_line, BUFSIZE, "Assertion Failed: %s:%u: %s\r\n", file, line, expr); - ndbout << assert_line; - } - } - /* Check for unknown signal */ - case (OSERRCODE) 0xfffffffe: - { - union SIGNAL *sig = (union SIGNAL *)extra; - SIGSELECT signo = *(SIGSELECT*)sig; - PROCESS rcv_ = current_process(); - PROCESS snd_ = sender(&sig); - struct OS_pcb *rcv = get_pcb(rcv_); - const char *rcv_name = &rcv->strings[rcv->name]; - struct OS_pcb *snd = get_pcb(snd_); - const char *snd_name = &snd->strings[snd->name]; - BaseString::snprintf(unknown_signal_line, BUFSIZE, - "Unknown Signal Received\r\n"); - BaseString::snprintf(unknown_signal_line, BUFSIZE, - "Signal Number: 0x%08lx\r\n", signo); - BaseString::snprintf(unknown_signal_line, BUFSIZE, - "Sending Process: 0x%08lx (%s))\r\n", snd_, snd_name); - BaseString::snprintf(unknown_signal_line, BUFSIZE, - "Receiving Process: 0x%08lx (%s))\r\n", rcv_, rcv_name); - free_buf((union SIGNAL **)&rcv); - free_buf((union SIGNAL **)&snd); } - ndbout << unknown_signal_line; - ndbout << signal_number_line; - ndbout << sender_line; - ndbout << receiver_line; - } /* switch */ - } /* if */ - - /* Zero means the error has not been fixed by the error handler. */ - error_handled = 0; - return error_handled; -} - -#endif diff --git a/storage/ndb/src/common/util/OutputStream.cpp b/storage/ndb/src/common/util/OutputStream.cpp index a41eef649dd..57d9eff91a3 100644 --- a/storage/ndb/src/common/util/OutputStream.cpp +++ b/storage/ndb/src/common/util/OutputStream.cpp @@ -64,36 +64,3 @@ SocketOutputStream::println(const char * fmt, ...){ va_end(ap); return ret; } - -#ifdef NDB_SOFTOSE -#include <dbgprintf.h> -int -SoftOseOutputStream::print(const char * fmt, ...){ - va_list ap; - char buf[1000]; - - va_start(ap, fmt); - if (fmt != 0) - BaseString::vsnprintf(buf, sizeof(buf)-1, fmt, ap); - else - buf[0] = 0; - va_end(ap); - dbgprintf(buf); -} - -int -SoftOseOutputStream::println(const char * fmt, ...){ - va_list ap; - char buf[1000]; - - va_start(ap, fmt); - if (fmt != 0) - BaseString::vsnprintf(buf, sizeof(buf)-1, fmt, ap); - else - buf[0] = 0; - va_end(ap); - - strcat(buf, "\n\r"); - dbgprintf(buf); -} -#endif diff --git a/storage/ndb/src/common/util/SocketServer.cpp b/storage/ndb/src/common/util/SocketServer.cpp index f9d2c7463be..6972c363e03 100644 --- a/storage/ndb/src/common/util/SocketServer.cpp +++ b/storage/ndb/src/common/util/SocketServer.cpp @@ -264,21 +264,6 @@ SocketServer::startSession(SessionInstance & si){ NDB_THREAD_PRIO_LOW); } -static -bool -transfer(NDB_SOCKET_TYPE sock){ -#if defined NDB_OSE || defined NDB_SOFTOSE - const PROCESS p = current_process(); - const size_t ps = sizeof(PROCESS); - int res = setsockopt(sock, SOL_SOCKET, SO_OSEOWNER, &p, ps); - if(res != 0){ - ndbout << "Failed to transfer ownership of socket" << endl; - return false; - } -#endif - return true; -} - void SocketServer::foreachSession(void (*func)(SocketServer::Session*, void *), void *data) { @@ -350,11 +335,6 @@ void* sessionThread_C(void* _sc){ SocketServer::Session * si = (SocketServer::Session *)_sc; - if(!transfer(si->m_socket)){ - si->m_stopped = true; - return 0; - } - /** * may have m_stopped set if we're transforming a mgm * connection into a transporter connection. diff --git a/storage/ndb/src/common/util/socket_io.cpp b/storage/ndb/src/common/util/socket_io.cpp index 74edbf94a6f..fec41a5d08e 100644 --- a/storage/ndb/src/common/util/socket_io.cpp +++ b/storage/ndb/src/common/util/socket_io.cpp @@ -103,6 +103,11 @@ readln_socket(NDB_SOCKET_TYPE socket, int timeout_millis, ptr += t; len -= t; } + if (i > 0 && buf[i-1] == '\r') + { + buf[i-1] = '\n'; + ptr--; + } ptr[0]= 0; return ptr - buf; } diff --git a/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp b/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp index 957248bcf56..e4b2c018c1a 100644 --- a/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp +++ b/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp @@ -719,6 +719,7 @@ Cmvmi::execTEST_ORD(Signal * signal){ // Do nothing break; } + globalSignalLoggers.flushSignalLog(); } #endif diff --git a/storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp b/storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp index 8a994db4fbc..ff05aac0b9b 100644 --- a/storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp +++ b/storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp @@ -4394,7 +4394,8 @@ void Dbacc::commitOperation(Signal* signal) Uint32 opbits = operationRecPtr.p->m_op_bits; Uint32 op = opbits & Operationrec::OP_MASK; ndbrequire((opbits & Operationrec::OP_STATE_MASK) == Operationrec::OP_STATE_EXECUTED); - if ((opbits & Operationrec::OP_COMMIT_DELETE_CHECK) == 0 && (op != ZREAD)) + if ((opbits & Operationrec::OP_COMMIT_DELETE_CHECK) == 0 && + (op != ZREAD && op != ZSCAN_OP)) { jam(); /* This method is used to check whether the end result of the transaction diff --git a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp index 1c305d74863..8d0c427d488 100644 --- a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp +++ b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp @@ -3248,9 +3248,15 @@ Dbdict::restartCreateTab_dihComplete(Signal* signal, CreateTableRecordPtr createTabPtr; ndbrequire(c_opCreateTable.find(createTabPtr, callbackData)); - - //@todo check error - ndbrequire(createTabPtr.p->m_errorCode == 0); + + if(createTabPtr.p->m_errorCode) + { + char buf[100]; + BaseString::snprintf(buf, sizeof(buf), "Failed to create table during" + " restart, Error: %u", + createTabPtr.p->m_errorCode); + progError(__LINE__, NDBD_EXIT_RESOURCE_ALLOC_ERROR, buf); + } Callback callback; callback.m_callbackData = callbackData; diff --git a/storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp b/storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp index 82554746a0f..89d393557c0 100644 --- a/storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp +++ b/storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp @@ -18,6 +18,10 @@ #include <my_sys.h> #include <my_pthread.h> +#ifdef HAVE_XFS_XFS_H +#include <xfs/xfs.h> +#endif + #include "AsyncFile.hpp" #include <ErrorHandlingMacros.hpp> @@ -36,7 +40,7 @@ #undef HAVE_PREAD #endif -#if defined NDB_WIN32 || defined NDB_OSE || defined NDB_SOFTOSE +#if defined NDB_WIN32 #else // For readv and writev #include <sys/uio.h> @@ -60,11 +64,7 @@ void printErrorAndFlags(Uint32 used_flags); #endif // Define the size of the write buffer (for each thread) -#if defined NDB_SOFTOSE || defined NDB_OSE -#define WRITEBUFFERSIZE 65536 -#else #define WRITEBUFFERSIZE 262144 -#endif const char *actionName[] = { "open", @@ -463,6 +463,18 @@ no_odirect: Uint32 index = 0; Uint32 block = refToBlock(request->theUserReference); +#ifdef HAVE_XFS_XFS_H + if(platform_test_xfs_fd(theFd)) + { + ndbout_c("Using xfsctl(XFS_IOC_RESVSP64) to allocate disk space"); + xfs_flock64_t fl; + fl.l_whence= 0; + fl.l_start= 0; + fl.l_len= (off64_t)sz; + if(xfsctl(NULL, theFd, XFS_IOC_RESVSP64, &fl) < 0) + ndbout_c("failed to optimally allocate disk space"); + } +#endif #ifdef HAVE_POSIX_FALLOCATE posix_fallocate(theFd, 0, sz); #endif @@ -1142,7 +1154,6 @@ void printErrorAndFlags(Uint32 used_flags) { case EOPNOTSUPP: strcat(buf, "EOPNOTSUPP"); break; -#if !defined NDB_OSE && !defined NDB_SOFTOSE case EMULTIHOP : strcat(buf, "EMULTIHOP"); break; @@ -1155,7 +1166,6 @@ void printErrorAndFlags(Uint32 used_flags) { case EOVERFLOW : strcat(buf, "EOVERFLOW"); break; -#endif case EROFS : strcat(buf, "EROFS"); break; @@ -1188,9 +1198,6 @@ void printErrorAndFlags(Uint32 used_flags) { break; } strcat(buf, "\" "); -#if defined NDB_OSE - strcat(buf, strerror(errno) << " "); -#endif strcat(buf, " flags: "); switch(used_flags & 3){ case O_RDONLY: @@ -1218,7 +1225,6 @@ void printErrorAndFlags(Uint32 used_flags) { strcat(buf, "O_NONBLOCK, "); if((used_flags & O_TRUNC)==O_TRUNC) strcat(buf, "O_TRUNC, "); -#if !defined NDB_OSE && !defined NDB_SOFTOSE if((used_flags & O_DSYNC)==O_DSYNC) strcat(buf, "O_DSYNC, "); if((used_flags & O_NDELAY)==O_NDELAY) @@ -1230,7 +1236,6 @@ void printErrorAndFlags(Uint32 used_flags) { strcat(buf, "O_SYNC, "); #endif DEBUG(ndbout_c(buf)); -#endif } #endif diff --git a/storage/ndb/src/kernel/blocks/ndbfs/MemoryChannel.hpp b/storage/ndb/src/kernel/blocks/ndbfs/MemoryChannel.hpp index f46cc66fe16..5c1245fe0d7 100644 --- a/storage/ndb/src/kernel/blocks/ndbfs/MemoryChannel.hpp +++ b/storage/ndb/src/kernel/blocks/ndbfs/MemoryChannel.hpp @@ -65,10 +65,6 @@ // T : item from the channel or zero if channel is empty. // -#if defined NDB_OSE || defined NDB_SOFTOSE -#include "MemoryChannelOSE.hpp" -#else - #include "ErrorHandlingMacros.hpp" #include "CircularIndex.hpp" #include "NdbMutex.h" @@ -179,7 +175,5 @@ template <class T> T* MemoryChannel<T>::tryReadChannel() return tmp; } -#endif - #endif // MemoryChannel_H diff --git a/storage/ndb/src/kernel/blocks/ndbfs/MemoryChannelOSE.hpp b/storage/ndb/src/kernel/blocks/ndbfs/MemoryChannelOSE.hpp deleted file mode 100644 index ca90bc60153..00000000000 --- a/storage/ndb/src/kernel/blocks/ndbfs/MemoryChannelOSE.hpp +++ /dev/null @@ -1,204 +0,0 @@ -/* Copyright (C) 2003 MySQL AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - -#ifndef MemoryChannelOSE_H -#define MemoryChannelOSE_H - -//=========================================================================== -// -// .DESCRIPTION -// Pointer based communication channel for communication between two -// thread. It sends the pointer to the other signal via an OSE signal -// -// .TYPICAL USE: -// to communicate between threads. -// -// .EXAMPLE: -// See AsyncFile.C -//=========================================================================== -// -// -// MemoryChannel( int size= 256); -// Constuctor -// Parameters: -// size : is ignored in OSE version -// -// void operator ++ (); -// increments the index with one, if size is reached it is set to zero -// -// virtual void write( T *t); -// Puts the item in the channel if the channel is full an error is reported. -// Parameters: -// t: pointer to item to put in the channel, after this the item -// is shared with the other thread. -// errors -// AFS_ERROR_CHANNALFULL, channel is full -// -// T* read(); -// Reads a itemn from the channel, if channel is empty it blocks untill -// an item can be read. -// return -// T : item from the channel -// -// T* tryRead(); -// Reads a item from the channel, if channel is empty it returns zero. -// return -// T : item from the channel or zero if channel is empty. -// - -#include <ose.h> -#include "ErrorHandlingMacros.hpp" -#include "Error.hpp" -#include "NdbMutex.h" -#include "NdbCondition.h" - - - - - -template <class T> -class MemoryChannel -{ -public: - MemoryChannel( int size= 256); - virtual ~MemoryChannel( ); - - virtual void writeChannel( T *t); - T* readChannel(); - T* tryReadChannel(); - -private: - PROCESS theReceiverPid; -}; - -template <class T> class MemoryChannelMultipleWriter:public MemoryChannel<T> -{ -public: - MemoryChannelMultipleWriter( int size= 256); - ~MemoryChannelMultipleWriter( ); - void writeChannel( T *t); - -private: -}; - - -#define MEMCHANNEL_SIGBASE 5643 - -#define MEMCHANNEL_SIGNAL (MEMCHANNEL_SIGBASE + 1) /* !-SIGNO(struct MemChannelSignal)-! */ - - -struct MemChannelSignal -{ - SIGSELECT sigNo; - void* ptr; -}; - -union SIGNAL -{ - SIGSELECT sigNo; - struct MemChannelSignal memChanSig; -}; - -template <class T> MemoryChannel<T>::MemoryChannel( int size ) -{ - // Default receiver for this channel is the creating process - theReceiverPid = current_process(); -} - -template <class T> MemoryChannel<T>::~MemoryChannel( ) -{ -} - -template <class T> void MemoryChannel<T>::writeChannel( T *t) -{ - union SIGNAL* sig; - - sig = alloc(sizeof(struct MemChannelSignal), MEMCHANNEL_SIGNAL); - ((struct MemChannelSignal*)sig)->ptr = t; - send(&sig, theReceiverPid); -} - - -template <class T> T* MemoryChannel<T>::readChannel() -{ - T* tmp; - - static const SIGSELECT sel_mem[] = {1, MEMCHANNEL_SIGNAL}; - union SIGNAL* sig; - - tmp = NULL; /* Default value */ - - sig = receive((SIGSELECT*)sel_mem); - if (sig != NIL){ - if (sig->sigNo == MEMCHANNEL_SIGNAL){ - tmp = (T*)(((struct MemChannelSignal*)sig)->ptr); - }else{ - assert(1==0); - } - free_buf(&sig); - } - - return tmp; -} - -template <class T> T* MemoryChannel<T>::tryReadChannel() -{ - T* tmp; - - static const SIGSELECT sel_mem[] = {1, MEMCHANNEL_SIGNAL}; - union SIGNAL* sig; - - tmp = NULL; /* Default value */ - - sig = receive_w_tmo(0, (SIGSELECT*)sel_mem); - if (sig != NIL){ - if (sig->sigNo == MEMCHANNEL_SIGNAL){ - tmp = (T*)(((struct MemChannelSignal*)sig)->ptr); - }else{ - assert(1==0); - } - free_buf(&sig); - } - - return tmp; -} - - -#endif // MemoryChannel_H - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp b/storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp index b20f810d029..27e2d33002d 100644 --- a/storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp +++ b/storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp @@ -914,54 +914,6 @@ Uint32 Ndbfs::translateErrno(int aErrno) return FsRef::fsErrUnknown; } } -#elif defined NDB_OSE || defined NDB_SOFTOSE -Uint32 Ndbfs::translateErrno(int aErrno) -{ - switch (aErrno) - { - //permission denied - case EACCES: - case EROFS: - case ENXIO: - return FsRef::fsErrPermissionDenied; - //temporary not accessible - case EAGAIN: - case ETIMEDOUT: - case ENOLCK: - return FsRef::fsErrTemporaryNotAccessible; - //no space left on device - case ENFILE: - case EDQUOT: - case ENOSPC: - return FsRef::fsErrNoSpaceLeftOnDevice; - //none valid parameters - case EINVAL: - case EFBIG: - case EBADF: - case ENAMETOOLONG: - case EFAULT: - case EISDIR: - return FsRef::fsErrInvalidParameters; - //environment error - case EMLINK: - case ELOOP: - return FsRef::fsErrEnvironmentError; - - //no more process resources - case EMFILE: - case ENOMEM: - return FsRef::fsErrNoMoreResources; - //no file - case ENOENT: - return FsRef::fsErrFileDoesNotExist; - - case ERR_ReadUnderflow: - return FsRef::fsErrReadUnderflow; - - default: - return FsRef::fsErrUnknown; - } -} #else Uint32 Ndbfs::translateErrno(int aErrno) { diff --git a/storage/ndb/src/kernel/main.cpp b/storage/ndb/src/kernel/main.cpp index 2eb6cf4d869..3cdf0dca532 100644 --- a/storage/ndb/src/kernel/main.cpp +++ b/storage/ndb/src/kernel/main.cpp @@ -540,7 +540,7 @@ systemInfo(const Configuration & config, const LogLevel & logLevel){ void catchsigs(bool ignore){ -#if !defined NDB_WIN32 && !defined NDB_SOFTOSE && !defined NDB_OSE +#if !defined NDB_WIN32 static const int signals_shutdown[] = { #ifdef SIGBREAK diff --git a/storage/ndb/src/kernel/vm/Emulator.cpp b/storage/ndb/src/kernel/vm/Emulator.cpp index 7784048dcb3..cddadf0bdcf 100644 --- a/storage/ndb/src/kernel/vm/Emulator.cpp +++ b/storage/ndb/src/kernel/vm/Emulator.cpp @@ -140,15 +140,15 @@ NdbShutdown(NdbShutdownType type, globalData.theRestartFlag = perform_stop; bool restart = false; -#if ! ( defined NDB_OSE || defined NDB_SOFTOSE) + if((type != NST_Normal && globalEmulatorData.theConfiguration->stopOnError() == false) || type == NST_Restart) { restart = true; } -#endif - + + const char * shutting = "shutting down"; if(restart){ shutting = "restarting"; @@ -278,7 +278,7 @@ NdbShutdown(NdbShutdownType type, */ if (type== NST_Watchdog){ g_eventLogger.info("Watchdog is killing system the hard way"); -#if defined VM_TRACE && ( ! ( defined NDB_OSE || defined NDB_SOFTOSE) ) +#if defined VM_TRACE childAbort(-1,g_currentStartPhase); #else childExit(-1,g_currentStartPhase); diff --git a/storage/ndb/src/kernel/vm/ThreadConfig.cpp b/storage/ndb/src/kernel/vm/ThreadConfig.cpp index 76fcc4ba84f..59c5a1feba5 100644 --- a/storage/ndb/src/kernel/vm/ThreadConfig.cpp +++ b/storage/ndb/src/kernel/vm/ThreadConfig.cpp @@ -101,15 +101,6 @@ ThreadConfig::scanTimeQueue() void ThreadConfig::ipControlLoop() { -#if defined NDB_OSE || defined NDB_SOFTOSE -//-------------------------------------------------------------------- -// To let the Cello Watchdog do it's work NDB must sleep a short -// period every 10 minutes. If this is not done, the watchdog will -// reboot the board NDB is running on when the load is high. -//-------------------------------------------------------------------- - int loopCounter = 0; -#endif - //-------------------------------------------------------------------- // initialise the counter that keeps track of the current millisecond //-------------------------------------------------------------------- @@ -117,18 +108,6 @@ void ThreadConfig::ipControlLoop() Uint32 i = 0; while (globalData.theRestartFlag != perform_stop) { -#if defined NDB_OSE || defined NDB_SOFTOSE - loopCounter++; - if(loopCounter > 1000){ -//-------------------------------------------------------------------- -// This is done to allow OSE do a context switch to let the watchdog -// do it's stuff. -//-------------------------------------------------------------------- - NdbSleep_MilliSleep(1); - loopCounter = 0; - } -#endif - Uint32 timeOutMillis = 0; if (LEVEL_IDLE == globalData.highestAvailablePrio) { //-------------------------------------------------------------------- diff --git a/storage/ndb/src/mgmapi/mgmapi.cpp b/storage/ndb/src/mgmapi/mgmapi.cpp index b64b24aa3cf..e042a2a569a 100644 --- a/storage/ndb/src/mgmapi/mgmapi.cpp +++ b/storage/ndb/src/mgmapi/mgmapi.cpp @@ -1,4 +1,4 @@ -/* Copyright (C) 2003 MySQL AB + /* Copyright (C) 2003 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -1258,11 +1258,42 @@ ndb_mgm_get_event_severity_string(enum ndb_mgm_event_severity severity) } extern "C" +int +ndb_mgm_get_clusterlog_severity_filter(NdbMgmHandle handle, + struct ndb_mgm_severity* severity, + unsigned int severity_size) +{ + SET_ERROR(handle, NDB_MGM_NO_ERROR, "Executing: ndb_mgm_get_clusterlog_severity_filter"); + const ParserRow<ParserDummy> getinfo_reply[] = { + MGM_CMD("clusterlog", NULL, ""), + MGM_ARG(clusterlog_severity_names[0], Int, Mandatory, ""), + MGM_ARG(clusterlog_severity_names[1], Int, Mandatory, ""), + MGM_ARG(clusterlog_severity_names[2], Int, Mandatory, ""), + MGM_ARG(clusterlog_severity_names[3], Int, Mandatory, ""), + MGM_ARG(clusterlog_severity_names[4], Int, Mandatory, ""), + MGM_ARG(clusterlog_severity_names[5], Int, Mandatory, ""), + MGM_ARG(clusterlog_severity_names[6], Int, Mandatory, ""), + }; + CHECK_HANDLE(handle, NULL); + CHECK_CONNECTED(handle, NULL); + + Properties args; + const Properties *reply; + reply = ndb_mgm_call(handle, getinfo_reply, "get info clusterlog", &args); + CHECK_REPLY(reply, NULL); + + for(unsigned int i=0; i < severity_size; i++) { + reply->get(clusterlog_severity_names[severity[i].category], &severity[i].value); + } + return severity_size; +} + +extern "C" const unsigned int * -ndb_mgm_get_clusterlog_severity_filter(NdbMgmHandle handle) +ndb_mgm_get_clusterlog_severity_filter_old(NdbMgmHandle handle) { SET_ERROR(handle, NDB_MGM_NO_ERROR, "Executing: ndb_mgm_get_clusterlog_severity_filter"); - unsigned int enabled[(int)NDB_MGM_EVENT_SEVERITY_ALL]= + static unsigned int enabled[(int)NDB_MGM_EVENT_SEVERITY_ALL]= {0,0,0,0,0,0,0}; const ParserRow<ParserDummy> getinfo_reply[] = { MGM_CMD("clusterlog", NULL, ""), @@ -1378,8 +1409,45 @@ static const char *clusterlog_names[]= { "startup", "shutdown", "statistics", "checkpoint", "noderestart", "connection", "info", "warning", "error", "congestion", "debug", "backup" }; extern "C" +int +ndb_mgm_get_clusterlog_loglevel(NdbMgmHandle handle, + struct ndb_mgm_loglevel* loglevel, + unsigned int loglevel_size) +{ + SET_ERROR(handle, NDB_MGM_NO_ERROR, "Executing: ndb_mgm_get_clusterlog_loglevel"); + int loglevel_count = loglevel_size; + const ParserRow<ParserDummy> getloglevel_reply[] = { + MGM_CMD("get cluster loglevel", NULL, ""), + MGM_ARG(clusterlog_names[0], Int, Mandatory, ""), + MGM_ARG(clusterlog_names[1], Int, Mandatory, ""), + MGM_ARG(clusterlog_names[2], Int, Mandatory, ""), + MGM_ARG(clusterlog_names[3], Int, Mandatory, ""), + MGM_ARG(clusterlog_names[4], Int, Mandatory, ""), + MGM_ARG(clusterlog_names[5], Int, Mandatory, ""), + MGM_ARG(clusterlog_names[6], Int, Mandatory, ""), + MGM_ARG(clusterlog_names[7], Int, Mandatory, ""), + MGM_ARG(clusterlog_names[8], Int, Mandatory, ""), + MGM_ARG(clusterlog_names[9], Int, Mandatory, ""), + MGM_ARG(clusterlog_names[10], Int, Mandatory, ""), + MGM_ARG(clusterlog_names[11], Int, Mandatory, ""), + }; + CHECK_HANDLE(handle, NULL); + CHECK_CONNECTED(handle, NULL); + + Properties args; + const Properties *reply; + reply = ndb_mgm_call(handle, getloglevel_reply, "get cluster loglevel", &args); + CHECK_REPLY(reply, NULL); + + for(int i=0; i < loglevel_count; i++) { + reply->get(clusterlog_names[loglevel[i].category], &loglevel[i].value); + } + return loglevel_count; +} + +extern "C" const unsigned int * -ndb_mgm_get_clusterlog_loglevel(NdbMgmHandle handle) +ndb_mgm_get_clusterlog_loglevel_old(NdbMgmHandle handle) { SET_ERROR(handle, NDB_MGM_NO_ERROR, "Executing: ndb_mgm_get_clusterlog_loglevel"); int loglevel_count = CFG_MAX_LOGLEVEL - CFG_MIN_LOGLEVEL + 1 ; diff --git a/storage/ndb/src/mgmclient/CommandInterpreter.cpp b/storage/ndb/src/mgmclient/CommandInterpreter.cpp index debf5343a90..47204ff6b51 100644 --- a/storage/ndb/src/mgmclient/CommandInterpreter.cpp +++ b/storage/ndb/src/mgmclient/CommandInterpreter.cpp @@ -1634,7 +1634,15 @@ CommandInterpreter::executeClusterLog(char* parameters) char * item = strtok_r(tmpString, " ", &tmpPtr); int enable; - const unsigned int *enabled= ndb_mgm_get_logfilter(m_mgmsrv); + ndb_mgm_severity enabled[NDB_MGM_EVENT_SEVERITY_ALL] = + {{NDB_MGM_EVENT_SEVERITY_ON,0}, + {NDB_MGM_EVENT_SEVERITY_DEBUG,0}, + {NDB_MGM_EVENT_SEVERITY_INFO,0}, + {NDB_MGM_EVENT_SEVERITY_WARNING,0}, + {NDB_MGM_EVENT_SEVERITY_ERROR,0}, + {NDB_MGM_EVENT_SEVERITY_CRITICAL,0}, + {NDB_MGM_EVENT_SEVERITY_ALERT,0}}; + ndb_mgm_get_clusterlog_severity_filter(m_mgmsrv, &enabled[0], NDB_MGM_EVENT_SEVERITY_ALL); if(enabled == NULL) { ndbout << "Couldn't get status" << endl; printError(); @@ -1647,25 +1655,25 @@ CommandInterpreter::executeClusterLog(char* parameters) ********************/ if (strcasecmp(item, "INFO") == 0) { DBUG_PRINT("info",("INFO")); - if(enabled[0] == 0) + if(enabled[0].value == 0) { ndbout << "Cluster logging is disabled." << endl; m_error = 0; DBUG_VOID_RETURN; } #if 0 - for(i = 0; i<7;i++) - printf("enabled[%d] = %d\n", i, enabled[i]); + for(i = 0; i<DB_MGM_EVENT_SEVERITY_ALL;i++) + printf("enabled[%d] = %d\n", i, enabled[i].value); #endif ndbout << "Severities enabled: "; for(i = 1; i < (int)NDB_MGM_EVENT_SEVERITY_ALL; i++) { - const char *str= ndb_mgm_get_event_severity_string((ndb_mgm_event_severity)i); + const char *str= ndb_mgm_get_event_severity_string(enabled[i].category); if (str == 0) { DBUG_ASSERT(false); continue; } - if(enabled[i]) + if(enabled[i].value) ndbout << BaseString(str).ndb_toupper() << " "; } ndbout << endl; diff --git a/storage/ndb/src/mgmsrv/Config.hpp b/storage/ndb/src/mgmsrv/Config.hpp index 8e16ddf1810..05a48ad91f0 100644 --- a/storage/ndb/src/mgmsrv/Config.hpp +++ b/storage/ndb/src/mgmsrv/Config.hpp @@ -39,7 +39,7 @@ class ConfigInfo; * - Connections between nodes and computers the nodes will execute on. * * The following categories (sections) of configuration parameters exists: - * - COMPUTER, DB, MGM, API, TCP, SCI, SHM, OSE + * - COMPUTER, DB, MGM, API, TCP, SCI, SHM * */ diff --git a/storage/ndb/src/mgmsrv/ConfigInfo.cpp b/storage/ndb/src/mgmsrv/ConfigInfo.cpp index 6d36662e516..25b6454b3ad 100644 --- a/storage/ndb/src/mgmsrv/ConfigInfo.cpp +++ b/storage/ndb/src/mgmsrv/ConfigInfo.cpp @@ -66,8 +66,7 @@ ConfigInfo::m_sectionNames[]={ "TCP", "SCI", - "SHM", - "OSE" + "SHM" }; const int ConfigInfo::m_noOfSectionNames = sizeof(m_sectionNames)/sizeof(char*); @@ -112,12 +111,10 @@ ConfigInfo::m_SectionRules[] = { { "TCP", checkConnectionSupport, 0 }, { "SHM", checkConnectionSupport, 0 }, { "SCI", checkConnectionSupport, 0 }, - { "OSE", checkConnectionSupport, 0 }, { "TCP", transformConnection, 0 }, { "SHM", transformConnection, 0 }, { "SCI", transformConnection, 0 }, - { "OSE", transformConnection, 0 }, { DB_TOKEN, fixNodeHostname, 0 }, { API_TOKEN, fixNodeHostname, 0 }, @@ -129,9 +126,7 @@ ConfigInfo::m_SectionRules[] = { { "SHM", fixNodeId, "NodeId2" }, { "SCI", fixNodeId, "NodeId1" }, { "SCI", fixNodeId, "NodeId2" }, - { "OSE", fixNodeId, "NodeId1" }, - { "OSE", fixNodeId, "NodeId2" }, - + { "TCP", fixHostname, "HostName1" }, { "TCP", fixHostname, "HostName2" }, { "SHM", fixHostname, "HostName1" }, @@ -140,8 +135,6 @@ ConfigInfo::m_SectionRules[] = { { "SCI", fixHostname, "HostName2" }, { "SHM", fixHostname, "HostName1" }, { "SHM", fixHostname, "HostName2" }, - { "OSE", fixHostname, "HostName1" }, - { "OSE", fixHostname, "HostName2" }, { "TCP", fixPortNumber, 0 }, // has to come after fixHostName { "SHM", fixPortNumber, 0 }, // has to come after fixHostName @@ -165,7 +158,6 @@ ConfigInfo::m_SectionRules[] = { { "TCP", checkConnectionConstraints, 0 }, { "SHM", checkConnectionConstraints, 0 }, { "SCI", checkConnectionConstraints, 0 }, - { "OSE", checkConnectionConstraints, 0 }, { "TCP", checkTCPConstraints, "HostName1" }, { "TCP", checkTCPConstraints, "HostName2" }, @@ -182,8 +174,7 @@ ConfigInfo::m_SectionRules[] = { { "TCP", saveInConfigValues, 0 }, { "SHM", saveInConfigValues, 0 }, - { "SCI", saveInConfigValues, 0 }, - { "OSE", saveInConfigValues, 0 } + { "SCI", saveInConfigValues, 0 } }; const int ConfigInfo::m_NoOfRules = sizeof(m_SectionRules)/sizeof(SectionRule); @@ -2147,150 +2138,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { false, ConfigInfo::CI_STRING, UNDEFINED, - 0, 0 }, - - /**************************************************************************** - * OSE - ***************************************************************************/ - { - CFG_SECTION_CONNECTION, - "OSE", - "OSE", - "Connection section", - ConfigInfo::CI_USED, - false, - ConfigInfo::CI_SECTION, - (const char *)CONNECTION_TYPE_OSE, - 0, 0 - }, - - { - CFG_CONNECTION_HOSTNAME_1, - "HostName1", - "OSE", - "Name of computer on one side of the connection", - ConfigInfo::CI_USED, - false, - ConfigInfo::CI_STRING, - UNDEFINED, - 0, 0 }, - - { - CFG_CONNECTION_HOSTNAME_2, - "HostName2", - "OSE", - "Name of computer on one side of the connection", - ConfigInfo::CI_USED, - false, - ConfigInfo::CI_STRING, - UNDEFINED, - 0, 0 }, - - { - CFG_CONNECTION_NODE_1, - "NodeId1", - "OSE", - "Id of node ("DB_TOKEN_PRINT", "API_TOKEN_PRINT" or "MGM_TOKEN_PRINT") on one side of the connection", - ConfigInfo::CI_USED, - false, - ConfigInfo::CI_INT, - MANDATORY, - "0", - STR_VALUE(MAX_INT_RNIL) }, - - { - CFG_CONNECTION_NODE_2, - "NodeId2", - "OSE", - "Id of node ("DB_TOKEN_PRINT", "API_TOKEN_PRINT" or "MGM_TOKEN_PRINT") on one side of the connection", - ConfigInfo::CI_USED, - false, - ConfigInfo::CI_INT, - UNDEFINED, - "0", - STR_VALUE(MAX_INT_RNIL) }, - - { - CFG_CONNECTION_SEND_SIGNAL_ID, - "SendSignalId", - "OSE", - "Sends id in each signal. Used in trace files.", - ConfigInfo::CI_USED, - false, - ConfigInfo::CI_BOOL, - "true", - "false", - "true" }, - - { - CFG_CONNECTION_CHECKSUM, - "Checksum", - "OSE", - "If checksum is enabled, all signals between nodes are checked for errors", - ConfigInfo::CI_USED, - false, - ConfigInfo::CI_BOOL, - "false", - "false", - "true" }, - - { - CFG_OSE_PRIO_A_SIZE, - "PrioASignalSize", - "OSE", - "Size of priority A signals (in bytes)", - ConfigInfo::CI_USED, - false, - ConfigInfo::CI_INT, - "1000", - "0", - STR_VALUE(MAX_INT_RNIL) }, - - { - CFG_OSE_PRIO_B_SIZE, - "PrioBSignalSize", - "OSE", - "Size of priority B signals (in bytes)", - ConfigInfo::CI_USED, - false, - ConfigInfo::CI_INT, - "1000", - "0", - STR_VALUE(MAX_INT_RNIL) }, - - { - CFG_OSE_RECEIVE_ARRAY_SIZE, - "ReceiveArraySize", - "OSE", - "Number of OSE signals checked for correct ordering (in no of OSE signals)", - ConfigInfo::CI_USED, - false, - ConfigInfo::CI_INT, - "10", - "0", - STR_VALUE(MAX_INT_RNIL) }, - - { - CFG_CONNECTION_NODE_1_SYSTEM, - "NodeId1_System", - "OSE", - "System for node 1 in connection", - ConfigInfo::CI_INTERNAL, - false, - ConfigInfo::CI_STRING, - UNDEFINED, - 0, 0 }, - - { - CFG_CONNECTION_NODE_2_SYSTEM, - "NodeId2_System", - "OSE", - "System for node 2 in connection", - ConfigInfo::CI_INTERNAL, - false, - ConfigInfo::CI_STRING, - UNDEFINED, - 0, 0 }, + 0, 0 } }; const int ConfigInfo::m_NoOfParams = sizeof(m_ParamInfo) / sizeof(ParamInfo); @@ -2835,12 +2683,7 @@ checkConnectionSupport(InitConfigFileParser::Context & ctx, const char * data) error= 1; #endif } - else if (strcasecmp("OSE",ctx.fname) == 0) - { -#ifndef NDB_OSE_TRANSPORTER - error= 1; -#endif - } + if (error) { ctx.reportError("Binary not compiled with this connection support, " diff --git a/storage/ndb/src/mgmsrv/main.cpp b/storage/ndb/src/mgmsrv/main.cpp index 9eb4ad9bde3..dd7f4680dff 100644 --- a/storage/ndb/src/mgmsrv/main.cpp +++ b/storage/ndb/src/mgmsrv/main.cpp @@ -37,11 +37,7 @@ #include <NdbAutoPtr.hpp> -#if defined NDB_OSE || defined NDB_SOFTOSE -#include <efs.h> -#else #include <ndb_mgmclient.hpp> -#endif #undef DEBUG #define DEBUG(x) ndbout << x << endl; @@ -212,15 +208,6 @@ int main(int argc, char** argv) start: glob= new MgmGlobals; - /** - * OSE specific. Enable shared ownership of file system resources. - * This is needed in order to use the cluster log since the events - * from the cluster is written from the 'ndb_receive'(NDBAPI) thread/process. - */ -#if defined NDB_OSE || defined NDB_SOFTOSE - efs_segment_share(); -#endif - global_mgmt_server_check = 1; if (opt_interactive || @@ -349,7 +336,6 @@ start: g_RestartServer= false; glob->socketServer->startServer(); -#if ! defined NDB_OSE && ! defined NDB_SOFTOSE if(opt_interactive) { BaseString con_str; if(glob->interface_name) @@ -359,7 +345,6 @@ start: Ndb_mgmclient com(con_str.c_str(), 1); while(g_StopServer != true && read_and_execute(&com, "ndb_mgm> ", 1)); } else -#endif { while(g_StopServer != true) NdbSleep_MilliSleep(500); diff --git a/storage/ndb/src/ndbapi/ClusterMgr.cpp b/storage/ndb/src/ndbapi/ClusterMgr.cpp index 4a865a0eb14..ef0bf51cc2b 100644 --- a/storage/ndb/src/ndbapi/ClusterMgr.cpp +++ b/storage/ndb/src/ndbapi/ClusterMgr.cpp @@ -46,15 +46,7 @@ void* runClusterMgr_C(void * me) { ((ClusterMgr*) me)->threadMain(); - /** - * Sleep to allow another thread that is not exiting to take control - * of signals allocated by this thread - * - * see Ndb::~Ndb() in Ndbinit.cpp - */ -#ifdef NDB_OSE - NdbSleep_MilliSleep(50); -#endif + return NULL; } diff --git a/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp b/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp index dca1432d18a..40eb815d48b 100644 --- a/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp +++ b/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp @@ -2299,7 +2299,7 @@ NdbDictionaryImpl::createTable(NdbTableImpl &t) // blob tables - use "t2" to get values set by kernel if (t2->m_noOfBlobs != 0 && createBlobTables(*t2) != 0) { int save_code = m_error.code; - (void)dropTable(*t2); + (void)dropTableGlobal(*t2); m_error.code = save_code; delete t2; DBUG_RETURN(-1); diff --git a/storage/ndb/src/ndbapi/Ndbinit.cpp b/storage/ndb/src/ndbapi/Ndbinit.cpp index de67b99c8d8..3b53c9484d4 100644 --- a/storage/ndb/src/ndbapi/Ndbinit.cpp +++ b/storage/ndb/src/ndbapi/Ndbinit.cpp @@ -163,18 +163,6 @@ Ndb::~Ndb() delete theImpl; - /** - * This sleep is to make sure that the transporter - * send thread will come in and send any - * signal buffers that this thread may have allocated. - * If that doesn't happen an error will occur in OSE - * when trying to restore a signal buffer allocated by a thread - * that have been killed. - */ -#ifdef NDB_OSE - NdbSleep_MilliSleep(50); -#endif - #ifdef POORMANSPURIFY #ifdef POORMANSGUI ndbout << "cnewSignals=" << cnewSignals << endl; diff --git a/storage/ndb/src/ndbapi/TransporterFacade.cpp b/storage/ndb/src/ndbapi/TransporterFacade.cpp index 8d0693f17a7..7bede292f5f 100644 --- a/storage/ndb/src/ndbapi/TransporterFacade.cpp +++ b/storage/ndb/src/ndbapi/TransporterFacade.cpp @@ -398,7 +398,7 @@ TransporterFacade::start_instance(int nodeId, * This due to the fact that a socket connection might have * been closed in between a select and a corresponding send */ -#if !defined NDB_OSE && !defined NDB_SOFTOSE && !defined NDB_WIN32 +#if !defined NDB_WIN32 signal(SIGPIPE, SIG_IGN); #endif diff --git a/storage/ndb/src/ndbapi/ndb_cluster_connection.cpp b/storage/ndb/src/ndbapi/ndb_cluster_connection.cpp index 3bb6b2fe414..cae01877322 100644 --- a/storage/ndb/src/ndbapi/ndb_cluster_connection.cpp +++ b/storage/ndb/src/ndbapi/ndb_cluster_connection.cpp @@ -459,9 +459,6 @@ Ndb_cluster_connection_impl::init_nodes_vector(Uint32 nodeid, group--; // upgrade group value break; } - case CONNECTION_TYPE_OSE:{ - break; - } } m_impl.m_all_nodes.push_back(Node(group,remoteNodeId)); DBUG_PRINT("info",("saved %d %d", group,remoteNodeId)); diff --git a/storage/ndb/test/ndbapi/acid.cpp b/storage/ndb/test/ndbapi/acid.cpp index 3eb1625be26..a93286e0c97 100644 --- a/storage/ndb/test/ndbapi/acid.cpp +++ b/storage/ndb/test/ndbapi/acid.cpp @@ -472,25 +472,12 @@ NDB_COMMAND(acid, "acid", "acid", "acid", 65535) } NdbSchemaOp* pNdbSchemaOp = NULL ; VerifyMethodPtr(pNdbSchemaOp, pNdbSchemaCon, getNdbSchemaOp()); -#if defined NDB_OSE || defined NDB_SOFTOSE - VerifyMethodInt(pNdbSchemaOp, createTable( - c_szWarehouse, - (4+4+4+12)*1.02*g_nWarehouseCount/1024+1, - TupleKey, - (4+14)*g_nWarehouseCount/8/1024+1, - All, - 6, - 78, - 80, - 1, - false)); -#else VerifyMethodInt(pNdbSchemaOp, createTable( c_szWarehouse, (4+4+4+12)*1.02*g_nWarehouseCount/1024+1, TupleKey, (4+14)*g_nWarehouseCount/8/1024+1)); -#endif + VerifyMethodInt(pNdbSchemaOp, createAttribute(c_szWarehouseNumber, TupleKey, 32, 1, UnSigned, MMBased, false)); VerifyMethodInt(pNdbSchemaOp, createAttribute(c_szWarehouseSum, NoKey, 32, 1, UnSigned, MMBased, false)); VerifyMethodInt(pNdbSchemaOp, createAttribute(c_szWarehouseCount, NoKey, 32, 1, UnSigned, MMBased, false)); @@ -499,26 +486,13 @@ NDB_COMMAND(acid, "acid", "acid", "acid", 65535) pNdbSchemaCon= NdbSchemaCon::startSchemaTrans(pNdb); VerifyMethodPtr(pNdbSchemaOp, pNdbSchemaCon, getNdbSchemaOp()); -#if defined NDB_OSE || defined NDB_SOFTOSE - VerifyMethodInt(pNdbSchemaOp, createTable( - c_szDistrict, - (4+4+4+4+12)*1.02*g_nWarehouseCount*g_nDistrictPerWarehouse/1024+1, - TupleKey, - (4+4+14)*g_nWarehouseCount*g_nDistrictPerWarehouse/8/1024+1, - All, - 6, - 78, - 80, - 1, - false)); -#else VerifyMethodInt(pNdbSchemaOp, createTable( c_szDistrict, (4+4+4+4+12)*1.02*g_nWarehouseCount*g_nDistrictPerWarehouse/1024+1, TupleKey, (4+4+14)*g_nWarehouseCount*g_nDistrictPerWarehouse/8/1024+1)); -#endif + VerifyMethodInt(pNdbSchemaOp, createAttribute(c_szDistrictWarehouseNumber, TupleKey, 32, 1, UnSigned, MMBased, false)); VerifyMethodInt(pNdbSchemaOp, createAttribute(c_szDistrictNumber, TupleKey, 32, 1, UnSigned, MMBased, false)); VerifyMethodInt(pNdbSchemaOp, createAttribute(c_szDistrictSum, NoKey, 32, 1, UnSigned, MMBased, false)); diff --git a/storage/ndb/test/ndbapi/flexHammer.cpp b/storage/ndb/test/ndbapi/flexHammer.cpp index f254b1e5ccf..a9c96db3f49 100644 --- a/storage/ndb/test/ndbapi/flexHammer.cpp +++ b/storage/ndb/test/ndbapi/flexHammer.cpp @@ -63,9 +63,6 @@ Revision history: ErrorData * flexHammerErrorData; -#if defined NDB_OSE || defined NDB_SOFTOSE -#include <outfmt.h> -#endif #define MAXSTRLEN 16 #define MAXATTR 64 @@ -775,24 +772,11 @@ createTables(Ndb* pMyNdb) } // if // Create tables, rest of parameters are default right now -#if defined NDB_OSE || defined NDB_SOFTOSE - check = MySchemaOp->createTable(tableName[i], - 8, // Table Size - TupleKey, // Key Type - 40, // Nr of Pages - All, - 6, - 78, - 80, - 1, - false); - -#else check = MySchemaOp->createTable(tableName[i], 8, // Table Size TupleKey, // Key Type 40); // Nr of Pages -#endif + if (check == -1) { // Clean up opened schema transaction NdbSchemaCon::closeSchemaTrans(MySchemaTransaction); diff --git a/storage/ndb/test/ndbapi/flexScan.cpp b/storage/ndb/test/ndbapi/flexScan.cpp index 1f001bd0210..bc82d06d4f4 100644 --- a/storage/ndb/test/ndbapi/flexScan.cpp +++ b/storage/ndb/test/ndbapi/flexScan.cpp @@ -776,23 +776,11 @@ static int createTables(Ndb* pMyNdb) return (-1); } // if -#if defined NDB_OSE || defined NDB_SOFTOSE - check = MySchemaOp->createTable(tableName[i - 1], - 8, // Table Size - TupleKey, // Key Type - 40, // Nr of Pages - All, - 6, - 78, - 80, - 1, - false); -#else check = MySchemaOp->createTable(tableName[i - 1] ,8 // Table Size ,TupleKey // Key Type ,40); // Nr of Pages -#endif + if (check == -1) { NdbSchemaCon::closeSchemaTrans(MySchemaTransaction); return -1; diff --git a/storage/ndb/test/ndbapi/initronja.cpp b/storage/ndb/test/ndbapi/initronja.cpp index 3ce274e4319..e0fc3b2ad70 100644 --- a/storage/ndb/test/ndbapi/initronja.cpp +++ b/storage/ndb/test/ndbapi/initronja.cpp @@ -96,24 +96,11 @@ NDB_COMMAND(initronja, "initronja", "initronja", "initronja", 65535){ if(!MySchemaTransaction) goto error_handler; MySchemaOp = MySchemaTransaction->getNdbSchemaOp(); if(!MySchemaOp) goto error_handler; -#if defined NDB_OSE || defined NDB_SOFTOSE - check = MySchemaOp->createTable( "SHORT_REC" - ,8 // Table Size - ,TupleKey // Key Type - ,40 // Nr of Pages - ,All - ,6 - ,78 - ,80 - ,1 - ,false); -#else check = MySchemaOp->createTable( "SHORT_REC" ,8 // Table Size ,TupleKey // Key Type ,40 // Nr of Pages ); -#endif if (check == -1) goto error_handler; ndbout << "Key attribute..." ; @@ -159,24 +146,11 @@ NDB_COMMAND(initronja, "initronja", "initronja", "initronja", 65535){ MySchemaOp = MySchemaTransaction->getNdbSchemaOp(); if(!MySchemaOp) goto error_handler; -#if defined NDB_OSE || defined NDB_SOFTOSE - check = MySchemaOp->createTable( "LONG_REC" - ,8 // Table Size - ,TupleKey // Key Type - ,40 // Nr of Pages - ,All - ,6 - ,78 - ,80 - ,1 - ,false); -#else check = MySchemaOp->createTable( "LONG_REC" ,8 // Table Size ,TupleKey // Key Type ,40 // Nr of Pages ); -#endif if (check == -1) goto error_handler; diff --git a/storage/ndb/test/ndbapi/testDict.cpp b/storage/ndb/test/ndbapi/testDict.cpp index 9bc085aa48d..674a88d75d5 100644 --- a/storage/ndb/test/ndbapi/testDict.cpp +++ b/storage/ndb/test/ndbapi/testDict.cpp @@ -1988,7 +1988,7 @@ runDictOps(NDBT_Context* ctx, NDBT_Step* step) // create indexes const char** indlist = NDBT_Tables::getIndexes(tabName); uint indnum = 0; - while (*indlist != 0) { + while (indlist != 0 && *indlist != 0) { uint count = 0; try_create_index: count++; diff --git a/storage/ndb/test/run-test/daily-basic-tests.txt b/storage/ndb/test/run-test/daily-basic-tests.txt index f9551c8d526..786f74cade4 100644 --- a/storage/ndb/test/run-test/daily-basic-tests.txt +++ b/storage/ndb/test/run-test/daily-basic-tests.txt @@ -545,7 +545,7 @@ args: -n TemporaryTables T1 T6 T7 T8 max-time: 1500 cmd: testDict -args: -n Restart_NR2 T1 +args: -n Restart_NR2 T1 I3 # # TEST NDBAPI |