summaryrefslogtreecommitdiff
path: root/storage
diff options
context:
space:
mode:
Diffstat (limited to 'storage')
-rw-r--r--storage/archive/archive_reader.c1
-rw-r--r--storage/archive/archive_test.c12
-rw-r--r--storage/archive/azio.c10
-rw-r--r--storage/blackhole/ha_blackhole.cc2
-rw-r--r--storage/innobase/dict/dict0crea.c4
-rw-r--r--storage/innobase/dict/dict0dict.c10
-rw-r--r--storage/innobase/dict/dict0load.c6
-rw-r--r--storage/innobase/dict/dict0mem.c22
-rw-r--r--storage/innobase/eval/eval0proc.c2
-rw-r--r--storage/innobase/handler/ha_innodb.cc24
-rw-r--r--storage/innobase/include/ut0byte.ic4
-rw-r--r--storage/innobase/include/ut0ut.ic2
-rw-r--r--storage/innobase/mtr/mtr0log.c2
-rw-r--r--storage/innobase/pars/lexyy.c2
-rw-r--r--storage/innobase/pars/pars0lex.l2
-rw-r--r--storage/innobase/rem/rem0cmp.c2
-rw-r--r--storage/innobase/row/row0mysql.c4
-rw-r--r--storage/innobase/row/row0sel.c2
-rw-r--r--storage/innobase/sync/sync0rw.c8
-rw-r--r--storage/innobase/trx/trx0trx.c2
-rw-r--r--storage/myisam/ha_myisam.cc22
-rw-r--r--storage/myisam/mi_dynrec.c8
-rw-r--r--storage/myisam/mi_extra.c7
-rw-r--r--storage/myisam/mi_log.c2
-rw-r--r--storage/myisam/mi_open.c16
-rw-r--r--storage/myisam/mi_packrec.c2
-rw-r--r--storage/myisam/myisamchk.c1
-rw-r--r--storage/myisammrg/ha_myisammrg.cc3
-rw-r--r--storage/ndb/include/kernel/GlobalSignalNumbers.h10
-rw-r--r--storage/ndb/include/kernel/signaldata/CreateFilegroup.hpp2
-rw-r--r--storage/ndb/include/kernel/signaldata/DumpStateOrd.hpp5
-rw-r--r--storage/ndb/include/mgmapi/mgmapi.h1
-rw-r--r--storage/ndb/include/ndbapi/NdbOperation.hpp21
-rw-r--r--storage/ndb/include/util/OutputStream.hpp4
-rw-r--r--storage/ndb/ndbapi-examples/ndbapi_simple_index/ndbapi_simple_index.cpp17
-rw-r--r--storage/ndb/src/common/debugger/EventLogger.cpp19
-rw-r--r--storage/ndb/src/common/debugger/signaldata/SignalNames.cpp7
-rw-r--r--storage/ndb/src/common/util/ConfigValues.cpp8
-rw-r--r--storage/ndb/src/common/util/NdbSqlUtil.cpp2
-rw-r--r--storage/ndb/src/common/util/OutputStream.cpp8
-rw-r--r--storage/ndb/src/cw/cpcd/CPCD.hpp1
-rw-r--r--storage/ndb/src/cw/cpcd/Makefile.am2
-rw-r--r--storage/ndb/src/kernel/blocks/ERROR_codes.txt4
-rw-r--r--storage/ndb/src/kernel/blocks/backup/Backup.cpp3
-rw-r--r--storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp196
-rw-r--r--storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.hpp6
-rw-r--r--storage/ndb/src/kernel/blocks/dbacc/Dbacc.hpp2
-rw-r--r--storage/ndb/src/kernel/blocks/dbacc/DbaccInit.cpp1
-rw-r--r--storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp29
-rw-r--r--storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp11
-rw-r--r--storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp1
-rw-r--r--storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp2
-rw-r--r--storage/ndb/src/kernel/blocks/dbdih/DbdihInit.cpp2
-rw-r--r--storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp204
-rw-r--r--storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp1
-rw-r--r--storage/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp1
-rw-r--r--storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp51
-rw-r--r--storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp1
-rw-r--r--storage/ndb/src/kernel/blocks/dbtc/DbtcInit.cpp1
-rw-r--r--storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp34
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp4
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupAbort.cpp4
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp2
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupDiskAlloc.cpp7
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp11
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupFixAlloc.cpp1
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp28
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp9
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp7
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupScan.cpp1
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupVarAlloc.cpp1
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/tuppage.cpp3
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/tuppage.hpp2
-rw-r--r--storage/ndb/src/kernel/blocks/dbtux/DbtuxStat.cpp1
-rw-r--r--storage/ndb/src/kernel/blocks/diskpage.hpp3
-rw-r--r--storage/ndb/src/kernel/blocks/ndbcntr/Ndbcntr.hpp1
-rw-r--r--storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrInit.cpp1
-rw-r--r--storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp19
-rw-r--r--storage/ndb/src/kernel/blocks/qmgr/Qmgr.hpp1
-rw-r--r--storage/ndb/src/kernel/blocks/qmgr/QmgrInit.cpp1
-rw-r--r--storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp28
-rw-r--r--storage/ndb/src/kernel/vm/WatchDog.cpp7
-rw-r--r--storage/ndb/src/kernel/vm/ndbd_malloc.cpp2
-rw-r--r--storage/ndb/src/kernel/vm/ndbd_malloc_impl.cpp1
-rw-r--r--storage/ndb/src/mgmapi/mgmapi.cpp37
-rw-r--r--storage/ndb/src/mgmclient/main.cpp2
-rw-r--r--storage/ndb/src/mgmsrv/InitConfigFileParser.cpp3
-rw-r--r--storage/ndb/src/mgmsrv/MgmtSrvr.cpp94
-rw-r--r--storage/ndb/src/mgmsrv/MgmtSrvr.hpp1
-rw-r--r--storage/ndb/src/mgmsrv/Services.cpp94
-rw-r--r--storage/ndb/src/mgmsrv/Services.hpp9
-rw-r--r--storage/ndb/src/ndbapi/ClusterMgr.cpp5
-rw-r--r--storage/ndb/src/ndbapi/ClusterMgr.hpp1
-rw-r--r--storage/ndb/src/ndbapi/Makefile.am9
-rw-r--r--storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp5
-rw-r--r--storage/ndb/src/ndbapi/NdbOperationDefine.cpp20
-rw-r--r--storage/ndb/src/ndbapi/NdbOperationExec.cpp4
-rw-r--r--storage/ndb/src/ndbapi/ObjectMap.hpp13
-rw-r--r--storage/ndb/src/ndbapi/SignalSender.cpp9
-rw-r--r--storage/ndb/src/ndbapi/SignalSender.hpp2
-rw-r--r--storage/ndb/src/ndbapi/TransporterFacade.cpp3
-rw-r--r--storage/ndb/src/ndbapi/ndberror.c2
-rw-r--r--storage/ndb/src/ndbapi/ndberror_check.c38
-rw-r--r--storage/ndb/test/include/NdbRestarter.hpp2
-rw-r--r--storage/ndb/test/ndbapi/testBitfield.cpp16
-rw-r--r--storage/ndb/test/ndbapi/testDict.cpp6
-rw-r--r--storage/ndb/test/ndbapi/testNodeRestart.cpp85
-rw-r--r--storage/ndb/test/run-test/Makefile.am22
-rwxr-xr-xstorage/ndb/test/run-test/atrt-gather-result.sh2
-rw-r--r--storage/ndb/test/run-test/atrt.hpp161
-rw-r--r--storage/ndb/test/run-test/autotest-boot.sh165
-rw-r--r--storage/ndb/test/run-test/autotest-run.sh270
-rw-r--r--storage/ndb/test/run-test/conf-daily-basic-ndb08.txt19
-rw-r--r--storage/ndb/test/run-test/conf-daily-devel-ndb08.txt19
-rw-r--r--storage/ndb/test/run-test/conf-daily-devel-ndbmaster.txt22
-rw-r--r--storage/ndb/test/run-test/conf-daily-sql-ndb08.txt20
-rw-r--r--storage/ndb/test/run-test/conf-daily-sql-ndbmaster.txt20
-rw-r--r--storage/ndb/test/run-test/conf-dl145a.cnf23
-rw-r--r--storage/ndb/test/run-test/conf-dl145a.txt22
-rw-r--r--storage/ndb/test/run-test/conf-ndbmaster.cnf23
-rw-r--r--storage/ndb/test/run-test/conf-ndbmaster.txt22
-rw-r--r--storage/ndb/test/run-test/conf-repl.cnf28
-rw-r--r--storage/ndb/test/run-test/conf-shark.txt22
-rw-r--r--storage/ndb/test/run-test/daily-basic-tests.txt10
-rw-r--r--storage/ndb/test/run-test/example-my.cnf116
-rw-r--r--storage/ndb/test/run-test/files.cpp383
-rw-r--r--storage/ndb/test/run-test/main.cpp1101
-rw-r--r--storage/ndb/test/run-test/run-test.hpp95
-rw-r--r--storage/ndb/test/run-test/setup.cpp965
-rw-r--r--storage/ndb/test/run-test/test-tests.txt24
-rw-r--r--storage/ndb/test/src/HugoOperations.cpp17
-rw-r--r--storage/ndb/test/src/NdbRestarter.cpp62
-rw-r--r--storage/ndb/test/tools/Makefile.am1
-rw-r--r--storage/ndb/tools/Makefile.am2
-rw-r--r--storage/ndb/tools/ndb_config.cpp (renamed from storage/ndb/tools/ndb_condig.cpp)0
-rw-r--r--storage/ndb/tools/restore/consumer_restore.cpp6
136 files changed, 3583 insertions, 1505 deletions
diff --git a/storage/archive/archive_reader.c b/storage/archive/archive_reader.c
index 66b90130882..14018217dea 100644
--- a/storage/archive/archive_reader.c
+++ b/storage/archive/archive_reader.c
@@ -4,6 +4,7 @@
#include <stdio.h>
#include <stdarg.h>
#include <m_ctype.h>
+#include <m_string.h>
#include <my_getopt.h>
#include <mysql_version.h>
diff --git a/storage/archive/archive_test.c b/storage/archive/archive_test.c
index 3c46534e44c..9ac043330fc 100644
--- a/storage/archive/archive_test.c
+++ b/storage/archive/archive_test.c
@@ -36,9 +36,9 @@
char test_string[BUFFER_LEN];
-#define TWOGIG 2147483648
-#define FOURGIG 4294967296
-#define EIGHTGIG 8589934592
+#define TWOGIG LL(2147483648)
+#define FOURGIG LL(4294967296)
+#define EIGHTGIG LL(8589934592)
/* prototypes */
int size_test(unsigned long long length, unsigned long long rows_to_test_for);
@@ -222,9 +222,9 @@ int main(int argc, char *argv[])
/* Start size tests */
printf("About to run 2/4/8 gig tests now, you may want to hit CTRL-C\n");
- size_test(TWOGIG, 2097152);
- size_test(FOURGIG, 4194304);
- size_test(EIGHTGIG, 8388608);
+ size_test(TWOGIG, 2097152L);
+ size_test(FOURGIG, 4194304L);
+ size_test(EIGHTGIG, 8388608L);
return 0;
}
diff --git a/storage/archive/azio.c b/storage/archive/azio.c
index f3084d03ecc..7876dd69cab 100644
--- a/storage/archive/azio.c
+++ b/storage/archive/azio.c
@@ -140,7 +140,8 @@ int az_open (azio_stream *s, const char *path, int Flags, File fd)
else if (s->mode == 'w')
{
unsigned char buffer[AZHEADER_SIZE + AZMETA_BUFFER_SIZE];
- my_pread(s->file, buffer, AZHEADER_SIZE + AZMETA_BUFFER_SIZE, 0, MYF(0));
+ my_pread(s->file, (byte*) buffer, AZHEADER_SIZE + AZMETA_BUFFER_SIZE, 0,
+ MYF(0));
read_header(s, buffer); /* skip the .az header */
my_seek(s->file, 0, MY_SEEK_END, MYF(0));
}
@@ -609,7 +610,8 @@ int ZEXPORT azflush (s, flush)
if (s->mode == 'r')
{
unsigned char buffer[AZHEADER_SIZE + AZMETA_BUFFER_SIZE];
- my_pread(s->file, buffer, AZHEADER_SIZE + AZMETA_BUFFER_SIZE, 0, MYF(0));
+ my_pread(s->file, (byte*) buffer, AZHEADER_SIZE + AZMETA_BUFFER_SIZE, 0,
+ MYF(0));
read_header(s, buffer); /* skip the .az header */
return Z_OK;
@@ -810,7 +812,7 @@ int azwrite_frm(azio_stream *s, char *blob, unsigned int length)
if (s->rows > 0)
return 1;
- s->frm_start_pos= s->start;
+ s->frm_start_pos= (uint) s->start;
s->frm_length= length;
s->start+= length;
@@ -841,7 +843,7 @@ int azwrite_comment(azio_stream *s, char *blob, unsigned int length)
if (s->rows > 0)
return 1;
- s->comment_start_pos= s->start;
+ s->comment_start_pos= (uint) s->start;
s->comment_length= length;
s->start+= length;
diff --git a/storage/blackhole/ha_blackhole.cc b/storage/blackhole/ha_blackhole.cc
index 7bdb4e40b3d..3fbb4e9ef93 100644
--- a/storage/blackhole/ha_blackhole.cc
+++ b/storage/blackhole/ha_blackhole.cc
@@ -155,7 +155,7 @@ int ha_blackhole::index_read(byte * buf, const byte * key,
uint key_len, enum ha_rkey_function find_flag)
{
DBUG_ENTER("ha_blackhole::index_read");
- DBUG_RETURN(0);
+ DBUG_RETURN(HA_ERR_END_OF_FILE);
}
diff --git a/storage/innobase/dict/dict0crea.c b/storage/innobase/dict/dict0crea.c
index 33e328d1e0b..76474c72c43 100644
--- a/storage/innobase/dict/dict0crea.c
+++ b/storage/innobase/dict/dict0crea.c
@@ -255,7 +255,7 @@ dict_build_table_def_step(
error = fil_create_new_single_table_tablespace(
&space, path_or_name, is_path,
FIL_IBD_FILE_INITIAL_SIZE);
- table->space = space;
+ table->space = (unsigned int) space;
if (error != DB_SUCCESS) {
@@ -806,7 +806,7 @@ dict_truncate_index_tree(
root_page_no = btr_create(type, space, index_id, comp, mtr);
if (index) {
- index->page = root_page_no;
+ index->page = (unsigned int) root_page_no;
} else {
ut_print_timestamp(stderr);
fprintf(stderr,
diff --git a/storage/innobase/dict/dict0dict.c b/storage/innobase/dict/dict0dict.c
index 2b3cfdba27d..6ae02c0b81a 100644
--- a/storage/innobase/dict/dict0dict.c
+++ b/storage/innobase/dict/dict0dict.c
@@ -1415,7 +1415,7 @@ dict_index_add_to_cache(
dict_index_get_nth_field(new_index, i)->col->ord_part = 1;
}
- new_index->page = page_no;
+ new_index->page = (unsigned int) page_no;
rw_lock_create(&new_index->lock, SYNC_INDEX_TREE);
if (!UNIV_UNLIKELY(new_index->type & DICT_UNIVERSAL)) {
@@ -1531,10 +1531,10 @@ dict_index_add_col(
field = dict_index_get_nth_field(index, index->n_def - 1);
field->col = col;
- field->fixed_len = dict_col_get_fixed_size(col);
+ field->fixed_len = (unsigned int) dict_col_get_fixed_size(col);
if (prefix_len && field->fixed_len > prefix_len) {
- field->fixed_len = prefix_len;
+ field->fixed_len = (unsigned int) prefix_len;
}
/* Long fixed-length fields that need external storage are treated as
@@ -1736,7 +1736,7 @@ dict_index_build_internal_clust(
break;
}
- new_index->trx_id_offset += fixed_size;
+ new_index->trx_id_offset += (unsigned int) fixed_size;
}
}
@@ -3045,7 +3045,7 @@ col_loop1:
foreign->foreign_table_name = mem_heap_strdup(foreign->heap,
table->name);
foreign->foreign_index = index;
- foreign->n_fields = i;
+ foreign->n_fields = (unsigned int) i;
foreign->foreign_col_names = mem_heap_alloc(foreign->heap,
i * sizeof(void*));
for (i = 0; i < foreign->n_fields; i++) {
diff --git a/storage/innobase/dict/dict0load.c b/storage/innobase/dict/dict0load.c
index f4f8c2d5d23..e23795f9898 100644
--- a/storage/innobase/dict/dict0load.c
+++ b/storage/innobase/dict/dict0load.c
@@ -843,7 +843,7 @@ err_exit:
table = dict_mem_table_create(name, space, n_cols & ~0x80000000UL,
flags);
- table->ibd_file_missing = ibd_file_missing;
+ table->ibd_file_missing = (unsigned int) ibd_file_missing;
ut_a(name_of_col_is(sys_tables, sys_index, 3, "ID"));
@@ -1180,8 +1180,8 @@ dict_load_foreign(
/* We store the type in the bits 24..29 of n_fields_and_type. */
- foreign->type = n_fields_and_type >> 24;
- foreign->n_fields = n_fields_and_type & 0x3FFUL;
+ foreign->type = (unsigned int) (n_fields_and_type >> 24);
+ foreign->n_fields = (unsigned int) (n_fields_and_type & 0x3FFUL);
foreign->id = mem_heap_strdup(foreign->heap, id);
diff --git a/storage/innobase/dict/dict0mem.c b/storage/innobase/dict/dict0mem.c
index cee0ffec20b..9aa49dee745 100644
--- a/storage/innobase/dict/dict0mem.c
+++ b/storage/innobase/dict/dict0mem.c
@@ -50,14 +50,14 @@ dict_mem_table_create(
table->heap = heap;
- table->flags = flags;
+ table->flags = (unsigned int) flags;
table->name = mem_heap_strdup(heap, name);
table->dir_path_of_temp_table = NULL;
- table->space = space;
+ table->space = (unsigned int) space;
table->ibd_file_missing = FALSE;
table->tablespace_discarded = FALSE;
table->n_def = 0;
- table->n_cols = n_cols + DATA_N_SYS_COLS;
+ table->n_cols = (unsigned int) (n_cols + DATA_N_SYS_COLS);
table->n_mysql_handles_opened = 0;
table->n_foreign_key_checks_running = 0;
@@ -208,14 +208,14 @@ dict_mem_table_add_col(
col->ind = table->n_def - 1;
col->ord_part = 0;
- col->mtype = mtype;
- col->prtype = prtype;
- col->len = len;
+ col->mtype = (unsigned int) mtype;
+ col->prtype = (unsigned int) prtype;
+ col->len = (unsigned int) len;
dtype_get_mblen(mtype, prtype, &mbminlen, &mbmaxlen);
- col->mbminlen = mbminlen;
- col->mbmaxlen = mbmaxlen;
+ col->mbminlen = (unsigned int) mbminlen;
+ col->mbmaxlen = (unsigned int) mbmaxlen;
}
/**************************************************************************
@@ -245,13 +245,13 @@ dict_mem_index_create(
index->heap = heap;
index->type = type;
- index->space = space;
+ index->space = (unsigned int) space;
index->page = 0;
index->name = mem_heap_strdup(heap, index_name);
index->table_name = table_name;
index->table = NULL;
index->n_def = index->n_nullable = 0;
- index->n_fields = n_fields;
+ index->n_fields = (unsigned int) n_fields;
index->fields = mem_heap_alloc(heap, 1 + n_fields
* sizeof(dict_field_t));
/* The '1 +' above prevents allocation
@@ -326,7 +326,7 @@ dict_mem_index_add_field(
field = dict_index_get_nth_field(index, index->n_def - 1);
field->name = name;
- field->prefix_len = prefix_len;
+ field->prefix_len = (unsigned int) prefix_len;
}
/**************************************************************************
diff --git a/storage/innobase/eval/eval0proc.c b/storage/innobase/eval/eval0proc.c
index f5a9d9dc2a8..a513e8e4024 100644
--- a/storage/innobase/eval/eval0proc.c
+++ b/storage/innobase/eval/eval0proc.c
@@ -194,7 +194,7 @@ for_step(
loop_var_value = eval_node_get_int_val(node->loop_start_limit);
node->loop_end_value
- = eval_node_get_int_val(node->loop_end_limit);
+ = (int) eval_node_get_int_val(node->loop_end_limit);
}
/* Check if we should do another loop */
diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc
index b5b354d4b39..14e93cca66f 100644
--- a/storage/innobase/handler/ha_innodb.cc
+++ b/storage/innobase/handler/ha_innodb.cc
@@ -695,7 +695,7 @@ innobase_convert_from_table_id(
uint errors;
strconvert(current_thd->charset(), from,
- &my_charset_filename, to, len, &errors);
+ &my_charset_filename, to, (uint) len, &errors);
}
/**********************************************************************
@@ -714,7 +714,7 @@ innobase_convert_from_id(
uint errors;
strconvert(current_thd->charset(), from,
- system_charset_info, to, len, &errors);
+ system_charset_info, to, (uint) len, &errors);
}
/**********************************************************************
@@ -839,8 +839,9 @@ innobase_convert_string(
CHARSET_INFO* from_cs,
uint* errors)
{
- return(copy_and_convert((char*)to, to_length, to_cs,
- (const char*)from, from_length, from_cs, errors));
+ return(copy_and_convert((char*)to, (uint32) to_length, to_cs,
+ (const char*)from, (uint32) from_length, from_cs,
+ errors));
}
/*************************************************************************
@@ -1203,9 +1204,9 @@ innobase_print_identifier(
output strings buffers must not be shared. The function
only produces more output when the name contains other
characters than [0-9A-Z_a-z]. */
- char* temp_name = my_malloc(namelen + 1, MYF(MY_WME));
- uint qnamelen = namelen
- + (1 + sizeof srv_mysql50_table_name_prefix);
+ char* temp_name = my_malloc((uint) namelen + 1, MYF(MY_WME));
+ uint qnamelen = (uint) (namelen
+ + (1 + sizeof srv_mysql50_table_name_prefix));
if (temp_name) {
qname = my_malloc(qnamelen, MYF(MY_WME));
@@ -2866,7 +2867,8 @@ ha_innobase::store_key_val_for_row(
true_len = (ulint) cs->cset->well_formed_len(cs,
(const char *) data,
(const char *) data + len,
- key_len / cs->mbmaxlen,
+ (uint) (key_len /
+ cs->mbmaxlen),
&error);
}
@@ -2935,7 +2937,8 @@ ha_innobase::store_key_val_for_row(
(const char *) blob_data,
(const char *) blob_data
+ blob_len,
- key_len / cs->mbmaxlen,
+ (uint) (key_len /
+ cs->mbmaxlen),
&error);
}
@@ -3007,7 +3010,8 @@ ha_innobase::store_key_val_for_row(
(const char *)src_start,
(const char *)src_start
+ key_len,
- key_len / cs->mbmaxlen,
+ (uint) (key_len /
+ cs->mbmaxlen),
&error);
}
}
diff --git a/storage/innobase/include/ut0byte.ic b/storage/innobase/include/ut0byte.ic
index 020cf9cedd9..01b6c29d08f 100644
--- a/storage/innobase/include/ut0byte.ic
+++ b/storage/innobase/include/ut0byte.ic
@@ -390,8 +390,8 @@ ut_bit_set_nth(
# error "TRUE != 1"
#endif
if (val) {
- return((1 << n) | a);
+ return(((ulint) 1 << n) | a);
} else {
- return(~(1 << n) & a);
+ return(~((ulint) 1 << n) & a);
}
}
diff --git a/storage/innobase/include/ut0ut.ic b/storage/innobase/include/ut0ut.ic
index 7b080216117..412717a094e 100644
--- a/storage/innobase/include/ut0ut.ic
+++ b/storage/innobase/include/ut0ut.ic
@@ -170,5 +170,5 @@ ut_2_exp(
/* out: 2 to power n */
ulint n) /* in: number */
{
- return(1 << n);
+ return((ulint) 1 << n);
}
diff --git a/storage/innobase/mtr/mtr0log.c b/storage/innobase/mtr/mtr0log.c
index cb03f207a56..f9704dc2d20 100644
--- a/storage/innobase/mtr/mtr0log.c
+++ b/storage/innobase/mtr/mtr0log.c
@@ -529,7 +529,7 @@ mlog_parse_index(
ind = dict_mem_index_create("LOG_DUMMY", "LOG_DUMMY",
DICT_HDR_SPACE, 0, n);
ind->table = table;
- ind->n_uniq = n_uniq;
+ ind->n_uniq = (unsigned int) n_uniq;
if (n_uniq != n) {
ind->type = DICT_CLUSTERED;
}
diff --git a/storage/innobase/pars/lexyy.c b/storage/innobase/pars/lexyy.c
index 70daf261186..b65de138573 100644
--- a/storage/innobase/pars/lexyy.c
+++ b/storage/innobase/pars/lexyy.c
@@ -1017,7 +1017,7 @@ YY_RULE_SETUP
yylval = sym_tab_add_bound_lit(pars_sym_tab_global,
yytext + 1, &type);
- return(type);
+ return((int) type);
}
YY_BREAK
case 4:
diff --git a/storage/innobase/pars/pars0lex.l b/storage/innobase/pars/pars0lex.l
index 71ac4c98267..ad65034fab0 100644
--- a/storage/innobase/pars/pars0lex.l
+++ b/storage/innobase/pars/pars0lex.l
@@ -109,7 +109,7 @@ BOUND_ID \$[a-z_A-Z0-9]+
yylval = sym_tab_add_bound_lit(pars_sym_tab_global,
yytext + 1, &type);
- return(type);
+ return((int) type);
}
{BOUND_ID} {
diff --git a/storage/innobase/rem/rem0cmp.c b/storage/innobase/rem/rem0cmp.c
index 07e5b64c157..ca0ec663548 100644
--- a/storage/innobase/rem/rem0cmp.c
+++ b/storage/innobase/rem/rem0cmp.c
@@ -597,7 +597,7 @@ cmp_dtuple_rec_with_match(
dtuple_byte = cmp_collate(dtuple_byte);
}
- ret = dtuple_byte - rec_byte;
+ ret = (int) (dtuple_byte - rec_byte);
if (UNIV_UNLIKELY(ret)) {
if (ret < 0) {
ret = -1;
diff --git a/storage/innobase/row/row0mysql.c b/storage/innobase/row/row0mysql.c
index 6779f536daa..78851c7b4f9 100644
--- a/storage/innobase/row/row0mysql.c
+++ b/storage/innobase/row/row0mysql.c
@@ -3423,7 +3423,7 @@ row_delete_constraint_low(
pars_info_add_str_literal(info, "id", id);
- return(que_eval_sql(info,
+ return((int) que_eval_sql(info,
"PROCEDURE DELETE_CONSTRAINT () IS\n"
"BEGIN\n"
"DELETE FROM SYS_FOREIGN_COLS WHERE ID = :id;\n"
@@ -3462,7 +3462,7 @@ row_delete_constraint(
err = row_delete_constraint_low(id, trx);
}
- return(err);
+ return((int) err);
}
/*************************************************************************
diff --git a/storage/innobase/row/row0sel.c b/storage/innobase/row/row0sel.c
index bee9f1472ce..a3199055b54 100644
--- a/storage/innobase/row/row0sel.c
+++ b/storage/innobase/row/row0sel.c
@@ -2116,7 +2116,7 @@ row_fetch_store_uint4(
ut_a(len == 4);
tmp = mach_read_from_4(dfield_get_data(dfield));
- *val = tmp;
+ *val = (ib_uint32_t) tmp;
return(NULL);
}
diff --git a/storage/innobase/sync/sync0rw.c b/storage/innobase/sync/sync0rw.c
index 549ad36271b..f06db577bad 100644
--- a/storage/innobase/sync/sync0rw.c
+++ b/storage/innobase/sync/sync0rw.c
@@ -127,7 +127,7 @@ rw_lock_create_func(
lock->magic_n = RW_LOCK_MAGIC_N;
lock->cfile_name = cfile_name;
- lock->cline = cline;
+ lock->cline = (unsigned int) cline;
lock->last_s_file_name = "not yet reserved";
lock->last_x_file_name = "not yet reserved";
@@ -356,7 +356,7 @@ rw_lock_x_lock_low(
file_name, line);
#endif
lock->last_x_file_name = file_name;
- lock->last_x_line = line;
+ lock->last_x_line = (unsigned int) line;
/* Locking succeeded, we may return */
return(RW_LOCK_EX);
@@ -393,7 +393,7 @@ rw_lock_x_lock_low(
#endif
lock->last_x_file_name = file_name;
- lock->last_x_line = line;
+ lock->last_x_line = (unsigned int) line;
/* Locking succeeded, we may return */
return(RW_LOCK_EX);
@@ -415,7 +415,7 @@ rw_lock_x_lock_low(
#endif
lock->last_x_file_name = file_name;
- lock->last_x_line = line;
+ lock->last_x_line = (unsigned int) line;
/* Locking succeeded, we may return */
return(RW_LOCK_EX);
diff --git a/storage/innobase/trx/trx0trx.c b/storage/innobase/trx/trx0trx.c
index cfa2b01f406..6f59d2659ec 100644
--- a/storage/innobase/trx/trx0trx.c
+++ b/storage/innobase/trx/trx0trx.c
@@ -2023,7 +2023,7 @@ trx_recover_for_mysql(
(ulong) count);
}
- return (count);
+ return ((int) count);
}
/***********************************************************************
diff --git a/storage/myisam/ha_myisam.cc b/storage/myisam/ha_myisam.cc
index 764c53d2f75..06ec5c4b44e 100644
--- a/storage/myisam/ha_myisam.cc
+++ b/storage/myisam/ha_myisam.cc
@@ -598,15 +598,31 @@ bool ha_myisam::check_if_locking_is_allowed(uint sql_command,
int ha_myisam::open(const char *name, int mode, uint test_if_locked)
{
uint i;
+
+ /*
+ If the user wants to have memory mapped data files, add an
+ open_flag. Do not memory map temporary tables because they are
+ expected to be inserted and thus extended a lot. Memory mapping is
+ efficient for files that keep their size, but very inefficient for
+ growing files. Using an open_flag instead of calling mi_extra(...
+ HA_EXTRA_MMAP ...) after mi_open() has the advantage that the
+ mapping is not repeated for every open, but just done on the initial
+ open, when the MyISAM share is created. Everytime the server
+ requires to open a new instance of a table it calls this method. We
+ will always supply HA_OPEN_MMAP for a permanent table. However, the
+ MyISAM storage engine will ignore this flag if this is a secondary
+ open of a table that is in use by other threads already (if the
+ MyISAM share exists already).
+ */
+ if (!(test_if_locked & HA_OPEN_TMP_TABLE) && opt_myisam_use_mmap)
+ test_if_locked|= HA_OPEN_MMAP;
+
if (!(file=mi_open(name, mode, test_if_locked | HA_OPEN_FROM_SQL_LAYER)))
return (my_errno ? my_errno : -1);
if (test_if_locked & (HA_OPEN_IGNORE_IF_LOCKED | HA_OPEN_TMP_TABLE))
VOID(mi_extra(file, HA_EXTRA_NO_WAIT_LOCK, 0));
- if (!(test_if_locked & HA_OPEN_TMP_TABLE) && opt_myisam_use_mmap)
- VOID(mi_extra(file, HA_EXTRA_MMAP, 0));
-
info(HA_STATUS_NO_LOCK | HA_STATUS_VARIABLE | HA_STATUS_CONST);
if (!(test_if_locked & HA_OPEN_WAIT_IF_LOCKED))
VOID(mi_extra(file, HA_EXTRA_WAIT_LOCK, 0));
diff --git a/storage/myisam/mi_dynrec.c b/storage/myisam/mi_dynrec.c
index 642efbd4389..5342619c79b 100644
--- a/storage/myisam/mi_dynrec.c
+++ b/storage/myisam/mi_dynrec.c
@@ -71,6 +71,14 @@ my_bool mi_dynmap_file(MI_INFO *info, my_off_t size)
DBUG_PRINT("warning", ("File is too large for mmap"));
DBUG_RETURN(1);
}
+ /*
+ I wonder if it is good to use MAP_NORESERVE. From the Linux man page:
+ MAP_NORESERVE
+ Do not reserve swap space for this mapping. When swap space is
+ reserved, one has the guarantee that it is possible to modify the
+ mapping. When swap space is not reserved one might get SIGSEGV
+ upon a write if no physical memory is available.
+ */
info->s->file_map= (byte*)
my_mmap(0, (size_t)(size + MEMMAP_EXTRA_MARGIN),
info->s->mode==O_RDONLY ? PROT_READ :
diff --git a/storage/myisam/mi_extra.c b/storage/myisam/mi_extra.c
index e1288fa6624..ae584b06173 100644
--- a/storage/myisam/mi_extra.c
+++ b/storage/myisam/mi_extra.c
@@ -349,7 +349,12 @@ int mi_extra(MI_INFO *info, enum ha_extra_function function, void *extra_arg)
case HA_EXTRA_MMAP:
#ifdef HAVE_MMAP
pthread_mutex_lock(&share->intern_lock);
- if (!share->file_map)
+ /*
+ Memory map the data file if it is not already mapped and if there
+ are no other threads using this table. intern_lock prevents other
+ threads from starting to use the table while we are mapping it.
+ */
+ if (!share->file_map && (share->tot_locks == 1))
{
if (mi_dynmap_file(info, share->state.state.data_file_length))
{
diff --git a/storage/myisam/mi_log.c b/storage/myisam/mi_log.c
index 2672a9dacd6..f720f752a06 100644
--- a/storage/myisam/mi_log.c
+++ b/storage/myisam/mi_log.c
@@ -31,7 +31,7 @@
#undef GETPID /* For HPUX */
#ifdef THREAD
-#define GETPID() (log_type == 1 ? (long) myisam_pid : (long) my_thread_id());
+#define GETPID() (log_type == 1 ? (long) myisam_pid : (long) my_thread_dbug_id())
#else
#define GETPID() myisam_pid
#endif
diff --git a/storage/myisam/mi_open.c b/storage/myisam/mi_open.c
index 5e783bf7890..830332fe0c1 100644
--- a/storage/myisam/mi_open.c
+++ b/storage/myisam/mi_open.c
@@ -506,6 +506,22 @@ MI_INFO *mi_open(const char *name, int mode, uint open_flags)
share->data_file_type = DYNAMIC_RECORD;
my_afree((gptr) disk_cache);
mi_setup_functions(share);
+ if (open_flags & HA_OPEN_MMAP)
+ {
+ info.s= share;
+ if (mi_dynmap_file(&info, share->state.state.data_file_length))
+ {
+ /* purecov: begin inspected */
+ /* Ignore if mmap fails. Use file I/O instead. */
+ DBUG_PRINT("warning", ("mmap failed: errno: %d", errno));
+ /* purecov: end */
+ }
+ else
+ {
+ share->file_read= mi_mmap_pread;
+ share->file_write= mi_mmap_pwrite;
+ }
+ }
share->is_log_table= FALSE;
#ifdef THREAD
thr_lock_init(&share->lock);
diff --git a/storage/myisam/mi_packrec.c b/storage/myisam/mi_packrec.c
index 30c95dcb0bd..a5a9aaededd 100644
--- a/storage/myisam/mi_packrec.c
+++ b/storage/myisam/mi_packrec.c
@@ -564,7 +564,7 @@ static void fill_quick_table(uint16 *table, uint bits, uint max_bits,
*/
value|= (max_bits - bits) << 8 | IS_CHAR;
- for (end= table + (1 << bits); table < end; table++)
+ for (end= table + (uint) (((uint) 1 << bits)); table < end; table++)
{
*table= (uint16) value;
}
diff --git a/storage/myisam/myisamchk.c b/storage/myisam/myisamchk.c
index bb4ae9a97ec..066e6cdb81b 100644
--- a/storage/myisam/myisamchk.c
+++ b/storage/myisam/myisamchk.c
@@ -713,6 +713,7 @@ get_one_option(int optid,
case 2:
method_conv= MI_STATS_METHOD_IGNORE_NULLS;
break;
+ default: assert(0); /* Impossible */
}
check_param.stats_method= method_conv;
break;
diff --git a/storage/myisammrg/ha_myisammrg.cc b/storage/myisammrg/ha_myisammrg.cc
index 4392a456f60..d9e7e1d5700 100644
--- a/storage/myisammrg/ha_myisammrg.cc
+++ b/storage/myisammrg/ha_myisammrg.cc
@@ -460,6 +460,7 @@ void ha_myisammrg::update_create_info(HA_CREATE_INFO *create_info)
{
TABLE_LIST *ptr;
LEX_STRING db, name;
+ LINT_INIT(db.str);
if (!(ptr = (TABLE_LIST *) thd->calloc(sizeof(TABLE_LIST))))
goto err;
@@ -570,6 +571,8 @@ void ha_myisammrg::append_create_info(String *packet)
open_table++)
{
LEX_STRING db, name;
+ LINT_INIT(db.str);
+
split_file_name(open_table->table->filename, &db, &name);
if (open_table != first)
packet->append(',');
diff --git a/storage/ndb/include/kernel/GlobalSignalNumbers.h b/storage/ndb/include/kernel/GlobalSignalNumbers.h
index fcbdedc44cc..aa0596f102a 100644
--- a/storage/ndb/include/kernel/GlobalSignalNumbers.h
+++ b/storage/ndb/include/kernel/GlobalSignalNumbers.h
@@ -551,13 +551,13 @@ extern const GlobalSignalNumber NO_OF_SIGNAL_NAMES;
#define GSN_ABORT_ALL_REF 446
#define GSN_ABORT_ALL_CONF 447
-#define GSN_STATISTICS_REQ 448
+/* 448 unused - formerly GSN_STATISTICS_REQ */
#define GSN_STOP_ORD 449
#define GSN_TAMPER_ORD 450
-#define GSN_SET_VAR_REQ 451
-#define GSN_SET_VAR_CONF 452
-#define GSN_SET_VAR_REF 453
-#define GSN_STATISTICS_CONF 454
+/* 451 unused - formerly GSN_SET_VAR_REQ */
+/* 452 unused - formerly GSN_SET_VAR_CONF */
+/* 453 unused - formerly GSN_SET_VAR_REF */
+/* 454 unused - formerly GSN_STATISTICS_CONF */
#define GSN_START_ORD 455
/* 457 unused */
diff --git a/storage/ndb/include/kernel/signaldata/CreateFilegroup.hpp b/storage/ndb/include/kernel/signaldata/CreateFilegroup.hpp
index d40f3f7d8cb..b1261431a4e 100644
--- a/storage/ndb/include/kernel/signaldata/CreateFilegroup.hpp
+++ b/storage/ndb/include/kernel/signaldata/CreateFilegroup.hpp
@@ -64,7 +64,7 @@ struct CreateFilegroupRef {
InvalidFormat = 740,
OutOfFilegroupRecords = 765,
InvalidExtentSize = 764,
- InvalidUndoBufferSize = 763,
+ InvalidUndoBufferSize = 779,
NoSuchLogfileGroup = 767,
InvalidFilegroupVersion = 768
};
diff --git a/storage/ndb/include/kernel/signaldata/DumpStateOrd.hpp b/storage/ndb/include/kernel/signaldata/DumpStateOrd.hpp
index 27bb9af03c0..46c5ef3751b 100644
--- a/storage/ndb/include/kernel/signaldata/DumpStateOrd.hpp
+++ b/storage/ndb/include/kernel/signaldata/DumpStateOrd.hpp
@@ -107,7 +107,10 @@ public:
CmvmiDumpLongSignalMemory = 2601,
CmvmiSetRestartOnErrorInsert = 2602,
CmvmiTestLongSigWithDelay = 2603,
-
+ CmvmiDumpSubscriptions = 2604, /* note: done to respective outfile
+ to be able to debug if events
+ for some reason does not end up
+ in clusterlog */
LCPContinue = 5900,
// 7000 DIH
// 7001 DIH
diff --git a/storage/ndb/include/mgmapi/mgmapi.h b/storage/ndb/include/mgmapi/mgmapi.h
index 42a6b53098f..883d3c43699 100644
--- a/storage/ndb/include/mgmapi/mgmapi.h
+++ b/storage/ndb/include/mgmapi/mgmapi.h
@@ -20,6 +20,7 @@
#include "ndb_logevent.h"
#define MGM_LOGLEVELS CFG_MAX_LOGLEVEL - CFG_MIN_LOGLEVEL + 1
+#define NDB_MGM_MAX_LOGLEVEL 15
/**
* @mainpage MySQL Cluster Management API
diff --git a/storage/ndb/include/ndbapi/NdbOperation.hpp b/storage/ndb/include/ndbapi/NdbOperation.hpp
index 90b90c7e481..380926c6a41 100644
--- a/storage/ndb/include/ndbapi/NdbOperation.hpp
+++ b/storage/ndb/include/ndbapi/NdbOperation.hpp
@@ -98,7 +98,20 @@ public:
};
/**
- * How should transaction be handled if operation fails
+ * How should transaction be handled if operation fails.
+ *
+ * If AO_IgnoreError, a failure in one operation will not abort the
+ * transaction, and NdbTransaction::execute() will return 0 (success). Use
+ * NdbOperation::getNdbError() to check for errors from individual
+ * operations.
+ *
+ * If AbortOnError, a failure in one operation will abort the transaction
+ * and cause NdbTransaction::execute() to return -1.
+ *
+ * Abort option can be set on execute(), or in the individual operation.
+ * Setting AO_IgnoreError or AbortOnError in execute() overrides the settings
+ * on individual operations. Setting DefaultAbortOption in execute() (the
+ * default) causes individual operation settings to be used.
*
* For READ, default is AO_IgnoreError
* DML, default is AbortOnError
@@ -1019,10 +1032,8 @@ protected:
NdbBlob* theBlobList;
/*
- * Abort option per operation, used by blobs. Default -1. If set,
- * overrides abort option on connection level. If set to IgnoreError,
- * does not cause execute() to return failure. This is different from
- * IgnoreError on connection level.
+ * Abort option per operation, used by blobs.
+ * See also comments on enum AbortOption.
*/
Int8 m_abortOption;
diff --git a/storage/ndb/include/util/OutputStream.hpp b/storage/ndb/include/util/OutputStream.hpp
index cbc00fb286a..d56d04adc50 100644
--- a/storage/ndb/include/util/OutputStream.hpp
+++ b/storage/ndb/include/util/OutputStream.hpp
@@ -44,9 +44,9 @@ public:
class SocketOutputStream : public OutputStream {
NDB_SOCKET_TYPE m_socket;
- unsigned m_timeout;
+ unsigned m_timeout_ms;
public:
- SocketOutputStream(NDB_SOCKET_TYPE socket, unsigned writeTimeout = 1000);
+ SocketOutputStream(NDB_SOCKET_TYPE socket, unsigned write_timeout_ms = 1000);
virtual ~SocketOutputStream() {}
int print(const char * fmt, ...);
diff --git a/storage/ndb/ndbapi-examples/ndbapi_simple_index/ndbapi_simple_index.cpp b/storage/ndb/ndbapi-examples/ndbapi_simple_index/ndbapi_simple_index.cpp
index dae99642a24..440face79ae 100644
--- a/storage/ndb/ndbapi-examples/ndbapi_simple_index/ndbapi_simple_index.cpp
+++ b/storage/ndb/ndbapi-examples/ndbapi_simple_index/ndbapi_simple_index.cpp
@@ -19,6 +19,17 @@
// Correct output from this program is:
//
// ATTR1 ATTR2
+// 0 0
+// 1 1
+// 2 2
+// 3 3
+// 4 4
+// 5 5
+// 6 6
+// 7 7
+// 8 8
+// 9 9
+// ATTR1 ATTR2
// 0 10
// 1 1
// 2 12
@@ -166,7 +177,8 @@ int main(int argc, char** argv)
NdbRecAttr *myRecAttr= myIndexOperation->getValue("ATTR1", NULL);
if (myRecAttr == NULL) APIERROR(myTransaction->getNdbError());
- if(myTransaction->execute( NdbTransaction::Commit ) != -1)
+ if(myTransaction->execute( NdbTransaction::Commit,
+ NdbOperation::AbortOnError ) != -1)
printf(" %2d %2d\n", myRecAttr->u_32_value(), i);
myNdb->closeTransaction(myTransaction);
@@ -232,7 +244,8 @@ int main(int argc, char** argv)
NdbRecAttr *myRecAttr= myOperation->getValue("ATTR2", NULL);
if (myRecAttr == NULL) APIERROR(myTransaction->getNdbError());
- if(myTransaction->execute( NdbTransaction::Commit ) == -1)
+ if(myTransaction->execute( NdbTransaction::Commit,
+ NdbOperation::AbortOnError ) == -1)
if (i == 3) {
std::cout << "Detected that deleted tuple doesn't exist!\n";
} else {
diff --git a/storage/ndb/src/common/debugger/EventLogger.cpp b/storage/ndb/src/common/debugger/EventLogger.cpp
index 4c0c4c44344..4e9ba906645 100644
--- a/storage/ndb/src/common/debugger/EventLogger.cpp
+++ b/storage/ndb/src/common/debugger/EventLogger.cpp
@@ -530,8 +530,8 @@ void getTextUndoLogBlocked(QQQQ) {
void getTextTransporterError(QQQQ) {
struct myTransporterError{
- int errorNum;
- char errorString[256];
+ Uint32 errorNum;
+ char errorString[256];
};
int i = 0;
int lenth = 0;
@@ -608,19 +608,21 @@ void getTextTransporterError(QQQQ) {
lenth = sizeof(TransporterErrorString)/sizeof(struct myTransporterError);
for(i=0; i<lenth; i++)
{
- if(theData[2] == TransporterErrorString[i].errorNum)
+ if(theData[2] == (Uint32) TransporterErrorString[i].errorNum)
{
BaseString::snprintf(m_text, m_text_len,
- "Transporter to node %d reported error: %s",
- theData[1],
- TransporterErrorString[i].errorString);
+ "Transporter to node %d reported error 0x%x: %s",
+ theData[1],
+ theData[2],
+ TransporterErrorString[i].errorString);
break;
}
}
if(i == lenth)
BaseString::snprintf(m_text, m_text_len,
- "Transporter to node %d reported error: no such error",
- theData[1]);
+ "Transporter to node %d reported error 0x%x: unknown error",
+ theData[1],
+ theData[2]);
}
void getTextTransporterWarning(QQQQ) {
getTextTransporterError(m_text, m_text_len, theData);
@@ -1041,6 +1043,7 @@ EventLogger::close()
}
#ifdef NOT_USED
+
static NdbOut&
operator<<(NdbOut& out, const LogLevel & ll)
{
diff --git a/storage/ndb/src/common/debugger/signaldata/SignalNames.cpp b/storage/ndb/src/common/debugger/signaldata/SignalNames.cpp
index 8530187963d..884a49b3a94 100644
--- a/storage/ndb/src/common/debugger/signaldata/SignalNames.cpp
+++ b/storage/ndb/src/common/debugger/signaldata/SignalNames.cpp
@@ -350,15 +350,10 @@ const GsnName SignalNames [] = {
,{ GSN_TUP_WRITELOG_REQ, "TUP_WRITELOG_REQ" }
,{ GSN_LQH_WRITELOG_REQ, "LQH_WRITELOG_REQ" }
- ,{ GSN_STATISTICS_REQ, "STATISTICS_REQ" }
,{ GSN_START_ORD, "START_ORD" }
,{ GSN_STOP_ORD, "STOP_ORD" }
,{ GSN_TAMPER_ORD, "TAMPER_ORD" }
- ,{ GSN_SET_VAR_REQ, "SET_VAR_REQ" }
- ,{ GSN_SET_VAR_CONF, "SET_VAR_CONF" }
- ,{ GSN_SET_VAR_REF, "SET_VAR_REF" }
- ,{ GSN_STATISTICS_CONF, "STATISTICS_CONF" }
-
+
,{ GSN_EVENT_SUBSCRIBE_REQ, "EVENT_SUBSCRIBE_REQ" }
,{ GSN_EVENT_SUBSCRIBE_CONF, "EVENT_SUBSCRIBE_CONF" }
,{ GSN_EVENT_SUBSCRIBE_REF, "EVENT_SUBSCRIBE_REF" }
diff --git a/storage/ndb/src/common/util/ConfigValues.cpp b/storage/ndb/src/common/util/ConfigValues.cpp
index 6652fd5753b..9309fe3fbd6 100644
--- a/storage/ndb/src/common/util/ConfigValues.cpp
+++ b/storage/ndb/src/common/util/ConfigValues.cpp
@@ -88,18 +88,18 @@ bool
ConfigValues::getByPos(Uint32 pos, Entry * result) const {
assert(pos < (2 * m_size));
Uint32 keypart = m_values[pos];
- Uint32 val = m_values[pos+1];
+ Uint32 val2 = m_values[pos+1];
switch(::getTypeOf(keypart)){
case IntType:
case SectionType:
- result->m_int = val;
+ result->m_int = val2;
break;
case StringType:
- result->m_string = * getString(val);
+ result->m_string = * getString(val2);
break;
case Int64Type:
- result->m_int64 = * get64(val);
+ result->m_int64 = * get64(val2);
break;
case InvalidType:
default:
diff --git a/storage/ndb/src/common/util/NdbSqlUtil.cpp b/storage/ndb/src/common/util/NdbSqlUtil.cpp
index 1234e4ece6b..0f62d66c149 100644
--- a/storage/ndb/src/common/util/NdbSqlUtil.cpp
+++ b/storage/ndb/src/common/util/NdbSqlUtil.cpp
@@ -681,8 +681,6 @@ int
NdbSqlUtil::cmpBit(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2, bool full)
{
Uint32 n = (n1 < n2) ? n1 : n2;
- char* c1 = (char*)p1;
- char* c2 = (char*)p2;
int ret = memcmp(p1, p2, n);
return ret;
}
diff --git a/storage/ndb/src/common/util/OutputStream.cpp b/storage/ndb/src/common/util/OutputStream.cpp
index 322b270d1cf..99216ba5a28 100644
--- a/storage/ndb/src/common/util/OutputStream.cpp
+++ b/storage/ndb/src/common/util/OutputStream.cpp
@@ -42,16 +42,16 @@ FileOutputStream::println(const char * fmt, ...){
}
SocketOutputStream::SocketOutputStream(NDB_SOCKET_TYPE socket,
- unsigned timeout){
+ unsigned write_timeout_ms){
m_socket = socket;
- m_timeout = timeout;
+ m_timeout_ms = write_timeout_ms;
}
int
SocketOutputStream::print(const char * fmt, ...){
va_list ap;
va_start(ap, fmt);
- const int ret = vprint_socket(m_socket, m_timeout, fmt, ap);
+ const int ret = vprint_socket(m_socket, m_timeout_ms, fmt, ap);
va_end(ap);
return ret;
}
@@ -59,7 +59,7 @@ int
SocketOutputStream::println(const char * fmt, ...){
va_list ap;
va_start(ap, fmt);
- const int ret = vprintln_socket(m_socket, m_timeout, fmt, ap);
+ const int ret = vprintln_socket(m_socket, m_timeout_ms, fmt, ap);
va_end(ap);
return ret;
}
diff --git a/storage/ndb/src/cw/cpcd/CPCD.hpp b/storage/ndb/src/cw/cpcd/CPCD.hpp
index 2cada43b609..4d48bba096f 100644
--- a/storage/ndb/src/cw/cpcd/CPCD.hpp
+++ b/storage/ndb/src/cw/cpcd/CPCD.hpp
@@ -62,6 +62,7 @@ struct CPCEvent {
struct EventSubscriber {
virtual void report(const CPCEvent &) = 0;
+ EventSubscriber() {}
virtual ~EventSubscriber() {}
};
diff --git a/storage/ndb/src/cw/cpcd/Makefile.am b/storage/ndb/src/cw/cpcd/Makefile.am
index dfd2e8c270b..efc828e21a9 100644
--- a/storage/ndb/src/cw/cpcd/Makefile.am
+++ b/storage/ndb/src/cw/cpcd/Makefile.am
@@ -26,7 +26,7 @@ LDADD_LOC = \
include $(top_srcdir)/storage/ndb/config/common.mk.am
include $(top_srcdir)/storage/ndb/config/type_util.mk.am
-ndb_cpcd_LDFLAGS = @ndb_bin_am_ldflags@
+ndb_cpcd_LDFLAGS = -static @ndb_bin_am_ldflags@
# Don't update the files from bitkeeper
%::SCCS/s.%
diff --git a/storage/ndb/src/kernel/blocks/ERROR_codes.txt b/storage/ndb/src/kernel/blocks/ERROR_codes.txt
index 2fc28c8ac07..c91a2da15d1 100644
--- a/storage/ndb/src/kernel/blocks/ERROR_codes.txt
+++ b/storage/ndb/src/kernel/blocks/ERROR_codes.txt
@@ -5,7 +5,7 @@ Next DBACC 3002
Next DBTUP 4024
Next DBLQH 5045
Next DBDICT 6007
-Next DBDIH 7178
+Next DBDIH 7181
Next DBTC 8039
Next CMVMI 9000
Next BACKUP 10038
@@ -73,6 +73,8 @@ Delay GCP_SAVEREQ by 10 secs
7177: Delay copying of sysfileData in execCOPY_GCIREQ
+7180: Crash master during master-take-over in execMASTER_LCPCONF
+
ERROR CODES FOR TESTING NODE FAILURE, LOCAL CHECKPOINT HANDLING:
-----------------------------------------------------------------
diff --git a/storage/ndb/src/kernel/blocks/backup/Backup.cpp b/storage/ndb/src/kernel/blocks/backup/Backup.cpp
index cb85c2c5e7e..fc698d161e0 100644
--- a/storage/ndb/src/kernel/blocks/backup/Backup.cpp
+++ b/storage/ndb/src/kernel/blocks/backup/Backup.cpp
@@ -1488,7 +1488,6 @@ Backup::execCREATE_TRIG_CONF(Signal* signal)
const Uint32 ptrI = conf->getConnectionPtr();
const Uint32 tableId = conf->getTableId();
const TriggerEvent::Value type = conf->getTriggerEvent();
- const Uint32 triggerId = conf->getTriggerId();
BackupRecordPtr ptr LINT_SET_PTR;
c_backupPool.getPtr(ptr, ptrI);
@@ -2152,7 +2151,6 @@ Backup::execDROP_TRIG_CONF(Signal* signal)
DropTrigConf* conf = (DropTrigConf*)signal->getDataPtr();
const Uint32 ptrI = conf->getConnectionPtr();
- const Uint32 triggerId= conf->getTriggerId();
BackupRecordPtr ptr LINT_SET_PTR;
c_backupPool.getPtr(ptr, ptrI);
@@ -4658,7 +4656,6 @@ Backup::execABORT_BACKUP_ORD(Signal* signal)
}
ndbrequire(ok);
- Uint32 ref= ptr.p->masterRef;
ptr.p->masterRef = reference();
ptr.p->nodes.clear();
ptr.p->nodes.set(getOwnNodeId());
diff --git a/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp b/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp
index ddf0dc95098..3fe85de73e6 100644
--- a/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp
+++ b/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp
@@ -78,11 +78,7 @@ Cmvmi::Cmvmi(Block_context& ctx) :
addRecSignal(GSN_OPEN_COMREQ, &Cmvmi::execOPEN_COMREQ);
addRecSignal(GSN_TEST_ORD, &Cmvmi::execTEST_ORD);
- addRecSignal(GSN_STATISTICS_REQ, &Cmvmi::execSTATISTICS_REQ);
addRecSignal(GSN_TAMPER_ORD, &Cmvmi::execTAMPER_ORD);
- addRecSignal(GSN_SET_VAR_REQ, &Cmvmi::execSET_VAR_REQ);
- addRecSignal(GSN_SET_VAR_CONF, &Cmvmi::execSET_VAR_CONF);
- addRecSignal(GSN_SET_VAR_REF, &Cmvmi::execSET_VAR_REF);
addRecSignal(GSN_STOP_ORD, &Cmvmi::execSTOP_ORD);
addRecSignal(GSN_START_ORD, &Cmvmi::execSTART_ORD);
addRecSignal(GSN_EVENT_SUBSCRIBE_REQ,
@@ -727,24 +723,6 @@ Cmvmi::execTEST_ORD(Signal * signal){
#endif
}
-void Cmvmi::execSTATISTICS_REQ(Signal* signal)
-{
- // TODO Note ! This is only a test implementation...
-
- static int stat1 = 0;
- jamEntry();
-
- //ndbout << "data 1: " << signal->theData[1];
-
- int x = signal->theData[0];
- stat1++;
- signal->theData[0] = stat1;
- sendSignal(x, GSN_STATISTICS_CONF, signal, 7, JBB);
-
-}//execSTATISTICS_REQ()
-
-
-
void Cmvmi::execSTOP_ORD(Signal* signal)
{
jamEntry();
@@ -863,7 +841,7 @@ void Cmvmi::execTAMPER_ORD(Signal* signal)
// to be able to indicate if we really introduced an error.
#ifdef ERROR_INSERT
TamperOrd* const tamperOrd = (TamperOrd*)&signal->theData[0];
-
+ signal->theData[2] = 0;
signal->theData[1] = tamperOrd->errorNo;
signal->theData[0] = 5;
sendSignal(DBDIH_REF, GSN_DIHNDBTAMPER, signal, 3,JBB);
@@ -871,160 +849,6 @@ void Cmvmi::execTAMPER_ORD(Signal* signal)
}//execTAMPER_ORD()
-
-
-void Cmvmi::execSET_VAR_REQ(Signal* signal)
-{
-#if 0
-
- SetVarReq* const setVarReq = (SetVarReq*)&signal->theData[0];
- ConfigParamId var = setVarReq->variable();
- jamEntry();
- switch (var) {
-
- // NDBCNTR_REF
-
- // DBTC
- case TransactionDeadlockDetectionTimeout:
- case TransactionInactiveTime:
- case NoOfConcurrentProcessesHandleTakeover:
- sendSignal(DBTC_REF, GSN_SET_VAR_REQ, signal, 3, JBB);
- break;
-
- // DBDIH
- case TimeBetweenLocalCheckpoints:
- case TimeBetweenGlobalCheckpoints:
- sendSignal(DBDIH_REF, GSN_SET_VAR_REQ, signal, 3, JBB);
- break;
-
- // DBLQH
- case NoOfConcurrentCheckpointsDuringRestart:
- case NoOfConcurrentCheckpointsAfterRestart:
- sendSignal(DBLQH_REF, GSN_SET_VAR_REQ, signal, 3, JBB);
- break;
-
- // DBACC
- case NoOfDiskPagesToDiskDuringRestartACC:
- case NoOfDiskPagesToDiskAfterRestartACC:
- sendSignal(DBACC_REF, GSN_SET_VAR_REQ, signal, 3, JBB);
- break;
-
- // DBTUP
- case NoOfDiskPagesToDiskDuringRestartTUP:
- case NoOfDiskPagesToDiskAfterRestartTUP:
- sendSignal(DBTUP_REF, GSN_SET_VAR_REQ, signal, 3, JBB);
- break;
-
- // DBDICT
-
- // NDBCNTR
- case TimeToWaitAlive:
-
- // QMGR
- case HeartbeatIntervalDbDb: // TODO ev till Ndbcnt också
- case HeartbeatIntervalDbApi:
- case ArbitTimeout:
- sendSignal(QMGR_REF, GSN_SET_VAR_REQ, signal, 3, JBB);
- break;
-
- // NDBFS
-
- // CMVMI
- case MaxNoOfSavedMessages:
- case LockPagesInMainMemory:
- case TimeBetweenWatchDogCheck:
- case StopOnError:
- handleSET_VAR_REQ(signal);
- break;
-
-
- // Not possible to update (this could of course be handled by each block
- // instead but I havn't investigated where they belong)
- case Id:
- case ExecuteOnComputer:
- case ShmKey:
- case MaxNoOfConcurrentOperations:
- case MaxNoOfConcurrentTransactions:
- case MemorySpaceIndexes:
- case MemorySpaceTuples:
- case MemoryDiskPages:
- case NoOfFreeDiskClusters:
- case NoOfDiskClusters:
- case NoOfFragmentLogFiles:
- case NoOfDiskClustersPerDiskFile:
- case NoOfDiskFiles:
- case MaxNoOfSavedEvents:
- default:
-
- int mgmtSrvr = setVarReq->mgmtSrvrBlockRef();
- sendSignal(mgmtSrvr, GSN_SET_VAR_REF, signal, 0, JBB);
- } // switch
-
-#endif
-}//execSET_VAR_REQ()
-
-
-void Cmvmi::execSET_VAR_CONF(Signal* signal)
-{
- int mgmtSrvr = signal->theData[0];
- sendSignal(mgmtSrvr, GSN_SET_VAR_CONF, signal, 0, JBB);
-
-}//execSET_VAR_CONF()
-
-
-void Cmvmi::execSET_VAR_REF(Signal* signal)
-{
- int mgmtSrvr = signal->theData[0];
- sendSignal(mgmtSrvr, GSN_SET_VAR_REF, signal, 0, JBB);
-
-}//execSET_VAR_REF()
-
-
-void Cmvmi::handleSET_VAR_REQ(Signal* signal) {
-#if 0
- SetVarReq* const setVarReq = (SetVarReq*)&signal->theData[0];
- ConfigParamId var = setVarReq->variable();
- int val = setVarReq->value();
-
- switch (var) {
- case MaxNoOfSavedMessages:
- m_ctx.m_config.maxNoOfErrorLogs(val);
- sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
- break;
-
- case LockPagesInMainMemory:
- int result;
- if (val == 0) {
- result = NdbMem_MemUnlockAll();
- }
- else {
- result = NdbMem_MemLockAll();
- }
- if (result == 0) {
- sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
- }
- else {
- sendSignal(CMVMI_REF, GSN_SET_VAR_REF, signal, 1, JBB);
- }
- break;
-
- case TimeBetweenWatchDogCheck:
- m_ctx.m_config.timeBetweenWatchDogCheck(val);
- sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
- break;
-
- case StopOnError:
- m_ctx.m_config.stopOnError(val);
- sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
- break;
-
- default:
- sendSignal(CMVMI_REF, GSN_SET_VAR_REF, signal, 1, JBB);
- return;
- } // switch
-#endif
-}
-
#ifdef VM_TRACE
class RefSignalTest {
public:
@@ -1129,6 +953,24 @@ Cmvmi::execDUMP_STATE_ORD(Signal* signal)
}
}
+ if (arg == DumpStateOrd::CmvmiDumpSubscriptions)
+ {
+ SubscriberPtr ptr;
+ subscribers.first(ptr);
+ g_eventLogger.info("List subscriptions:");
+ while(ptr.i != RNIL)
+ {
+ g_eventLogger.info("Subscription: %u, nodeId: %u, ref: 0x%x",
+ ptr.i, refToNode(ptr.p->blockRef), ptr.p->blockRef);
+ for(Uint32 i = 0; i < LogLevel::LOGLEVEL_CATEGORIES; i++)
+ {
+ Uint32 level = ptr.p->logLevel.getLogLevel((LogLevel::EventCategory)i);
+ g_eventLogger.info("Category %u Level %u", i, level);
+ }
+ subscribers.next(ptr);
+ }
+ }
+
if (arg == DumpStateOrd::CmvmiDumpLongSignalMemory){
infoEvent("Cmvmi: g_sectionSegmentPool size: %d free: %d",
g_sectionSegmentPool.getSize(),
diff --git a/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.hpp b/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.hpp
index 208f2511c6d..bc88f1a0c63 100644
--- a/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.hpp
+++ b/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.hpp
@@ -55,20 +55,14 @@ private:
void execSIZEALT_ACK(Signal* signal);
void execTEST_ORD(Signal* signal);
- void execSTATISTICS_REQ(Signal* signal);
void execSTOP_ORD(Signal* signal);
void execSTART_ORD(Signal* signal);
void execTAMPER_ORD(Signal* signal);
- void execSET_VAR_REQ(Signal* signal);
- void execSET_VAR_CONF(Signal* signal);
- void execSET_VAR_REF(Signal* signal);
void execDUMP_STATE_ORD(Signal* signal);
void execEVENT_SUBSCRIBE_REQ(Signal *);
void cancelSubscription(NodeId nodeId);
-
- void handleSET_VAR_REQ(Signal* signal);
void execTESTSIG(Signal* signal);
void execNODE_START_REP(Signal* signal);
diff --git a/storage/ndb/src/kernel/blocks/dbacc/Dbacc.hpp b/storage/ndb/src/kernel/blocks/dbacc/Dbacc.hpp
index 6337e252c0b..ca348b23e6a 100644
--- a/storage/ndb/src/kernel/blocks/dbacc/Dbacc.hpp
+++ b/storage/ndb/src/kernel/blocks/dbacc/Dbacc.hpp
@@ -537,6 +537,7 @@ struct Operationrec {
,OP_INITIAL = ~(Uint32)0
};
+ Operationrec() {}
bool is_same_trans(const Operationrec* op) const {
return
transId1 == op->transId1 && transId2 == op->transId2;
@@ -660,7 +661,6 @@ private:
void execNDB_STTOR(Signal* signal);
void execDROP_TAB_REQ(Signal* signal);
void execREAD_CONFIG_REQ(Signal* signal);
- void execSET_VAR_REQ(Signal* signal);
void execDUMP_STATE_ORD(Signal* signal);
// Statement blocks
diff --git a/storage/ndb/src/kernel/blocks/dbacc/DbaccInit.cpp b/storage/ndb/src/kernel/blocks/dbacc/DbaccInit.cpp
index 87db12cea51..9ba164d264c 100644
--- a/storage/ndb/src/kernel/blocks/dbacc/DbaccInit.cpp
+++ b/storage/ndb/src/kernel/blocks/dbacc/DbaccInit.cpp
@@ -113,7 +113,6 @@ Dbacc::Dbacc(Block_context& ctx):
addRecSignal(GSN_NDB_STTOR, &Dbacc::execNDB_STTOR);
addRecSignal(GSN_DROP_TAB_REQ, &Dbacc::execDROP_TAB_REQ);
addRecSignal(GSN_READ_CONFIG_REQ, &Dbacc::execREAD_CONFIG_REQ, true);
- addRecSignal(GSN_SET_VAR_REQ, &Dbacc::execSET_VAR_REQ);
initData();
diff --git a/storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp b/storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp
index bf2fa5b7584..b90a000d55b 100644
--- a/storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp
+++ b/storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp
@@ -698,7 +698,6 @@ Dbacc::execDROP_TAB_REQ(Signal* signal){
void Dbacc::releaseRootFragResources(Signal* signal, Uint32 tableId)
{
- FragmentrecPtr rootPtr;
TabrecPtr tabPtr;
tabPtr.i = tableId;
ptrCheckGuard(tabPtr, ctablesize, tabrec);
@@ -2266,7 +2265,6 @@ void Dbacc::execACCMINUPDATE(Signal* signal)
Page8Ptr ulkPageidptr;
Uint32 tulkLocalPtr;
Uint32 tlocalkey1, tlocalkey2;
- Uint32 TlogStart;
jamEntry();
operationRecPtr.i = signal->theData[0];
@@ -8511,33 +8509,6 @@ Dbacc::execDUMP_STATE_ORD(Signal* signal)
#endif
}//Dbacc::execDUMP_STATE_ORD()
-void Dbacc::execSET_VAR_REQ(Signal* signal)
-{
-#if 0
- SetVarReq* const setVarReq = (SetVarReq*)&signal->theData[0];
- ConfigParamId var = setVarReq->variable();
- int val = setVarReq->value();
-
-
- switch (var) {
-
- case NoOfDiskPagesToDiskAfterRestartACC:
- clblPagesPerTick = val;
- sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
- break;
-
- case NoOfDiskPagesToDiskDuringRestartACC:
- // Valid only during start so value not set.
- sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
- break;
-
- default:
- sendSignal(CMVMI_REF, GSN_SET_VAR_REF, signal, 1, JBB);
- } // switch
-#endif
-
-}//execSET_VAR_REQ()
-
void
Dbacc::execREAD_PSEUDO_REQ(Signal* signal){
jamEntry();
diff --git a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
index 00a984e591b..15362a7c34e 100644
--- a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
+++ b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
@@ -291,7 +291,6 @@ Dbdict::execDUMP_STATE_ORD(Signal* signal)
for(; ok; ok = c_obj_hash.next(iter))
{
Rope name(c_rope_pool, iter.curr.p->m_name);
- const Uint32 size = name.size();
char buf[1024];
name.copy(buf);
ndbout_c("%s m_ref_count: %d", buf, iter.curr.p->m_ref_count);
@@ -3793,7 +3792,7 @@ Dbdict::execCREATE_TABLE_REQ(Signal* signal){
createTabPtr.p->m_dihAddFragPtr = RNIL;
Uint32 key = c_opRecordSequence + 1;
- Uint32 *theData = signal->getDataPtrSend(), i;
+ Uint32 *theData = signal->getDataPtrSend();
Uint16 *frag_data= (Uint16*)&signal->theData[25];
CreateFragmentationReq * const req = (CreateFragmentationReq*)theData;
req->senderRef = reference();
@@ -4940,7 +4939,6 @@ Dbdict::execCREATE_FRAGMENTATION_CONF(Signal* signal){
packTableIntoPages(w, tabPtr);
SegmentedSectionPtr spDataPtr;
- Ptr<SectionSegment> tmpTsPtr;
w.getPtr(spDataPtr);
signal->setSection(spDataPtr, CreateTabReq::DICT_TAB_INFO);
@@ -5425,7 +5423,6 @@ Dbdict::execADD_FRAGREQ(Signal* signal) {
Uint32 fragCount = req->totalFragments;
Uint32 requestInfo = req->requestInfo;
Uint32 startGci = req->startGci;
- Uint32 tablespace_id= req->tablespaceId;
Uint32 logPart = req->logPartId;
ndbrequire(node == getOwnNodeId());
@@ -7538,7 +7535,6 @@ void
Dbdict::execLIST_TABLES_REQ(Signal* signal)
{
jamEntry();
- Uint32 i;
ListTablesReq * req = (ListTablesReq*)signal->getDataPtr();
Uint32 senderRef = req->senderRef;
Uint32 senderData = req->senderData;
@@ -9415,7 +9411,6 @@ Dbdict::createEventUTIL_PREPARE(Signal* signal,
evntRecPtr.i = ref->getSenderData();
ndbrequire((evntRecPtr.p = c_opCreateEvent.getPtr(evntRecPtr.i)) != NULL);
- Uint32 err;
interpretUtilPrepareErrorCode(errorCode, evntRecPtr.p->m_errorCode,
evntRecPtr.p->m_errorLine);
evntRecPtr.p->m_errorNode = reference();
@@ -15184,7 +15179,6 @@ Dbdict::execDROP_OBJ_REQ(Signal* signal){
const Uint32 objId = req->objId;
const Uint32 objVersion = req->objVersion;
const Uint32 objType = req->objType;
- const Uint32 requestInfo = req->requestInfo;
DropObjRecordPtr dropObjPtr;
ndbrequire(c_opDropObj.seize(dropObjPtr));
@@ -15683,8 +15677,7 @@ Dbdict::execCREATE_FILEGROUP_CONF(Signal* signal){
void
Dbdict::create_fg_abort_start(Signal* signal, SchemaOp* op){
- CreateFilegroupImplReq* req =
- (CreateFilegroupImplReq*)signal->getDataPtrSend();
+ (void) signal->getDataPtrSend();
if (op->m_obj_ptr_i != RNIL)
{
diff --git a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp
index d9fd604036e..2473a83f383 100644
--- a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp
+++ b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp
@@ -1975,6 +1975,7 @@ public:
NodeBitmask m_nodes;
Uint32 m_errorCode;
+ SchemaTransaction() {}
void setErrorCode(Uint32 c){ if(m_errorCode == 0) m_errorCode = c;}
/**
diff --git a/storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp b/storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp
index eb81672fef5..f7e27359261 100644
--- a/storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp
+++ b/storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp
@@ -700,7 +700,6 @@ private:
void execFSREADREF(Signal *);
void execFSWRITECONF(Signal *);
void execFSWRITEREF(Signal *);
- void execSET_VAR_REQ(Signal *);
void execCHECKNODEGROUPSREQ(Signal *);
void execSTART_INFOREQ(Signal*);
void execSTART_INFOREF(Signal*);
@@ -1382,6 +1381,7 @@ private:
Uint32 csystemnodes;
Uint32 currentgcp;
Uint32 c_newest_restorable_gci;
+ Uint32 c_set_initial_start_flag;
enum GcpMasterTakeOverState {
GMTOS_IDLE = 0,
diff --git a/storage/ndb/src/kernel/blocks/dbdih/DbdihInit.cpp b/storage/ndb/src/kernel/blocks/dbdih/DbdihInit.cpp
index cf46f6124f2..aff31d625f4 100644
--- a/storage/ndb/src/kernel/blocks/dbdih/DbdihInit.cpp
+++ b/storage/ndb/src/kernel/blocks/dbdih/DbdihInit.cpp
@@ -61,6 +61,7 @@ void Dbdih::initData()
c_blockCommit = false;
c_blockCommitNo = 1;
cntrlblockref = RNIL;
+ c_set_initial_start_flag = FALSE;
}//Dbdih::initData()
void Dbdih::initRecords()
@@ -203,7 +204,6 @@ Dbdih::Dbdih(Block_context& ctx):
addRecSignal(GSN_FSREADREF, &Dbdih::execFSREADREF, true);
addRecSignal(GSN_FSWRITECONF, &Dbdih::execFSWRITECONF);
addRecSignal(GSN_FSWRITEREF, &Dbdih::execFSWRITEREF, true);
- addRecSignal(GSN_SET_VAR_REQ, &Dbdih::execSET_VAR_REQ);
addRecSignal(GSN_START_INFOREQ,
&Dbdih::execSTART_INFOREQ);
diff --git a/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp b/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
index 9cd7dbfc59b..4934fca68c5 100644
--- a/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
+++ b/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
@@ -677,6 +677,12 @@ done:
Uint32 tmp= SYSFILE->m_restart_seq;
memcpy(sysfileData, cdata, sizeof(sysfileData));
SYSFILE->m_restart_seq = tmp;
+
+ if (c_set_initial_start_flag)
+ {
+ jam();
+ Sysfile::setInitialStartOngoing(SYSFILE->systemRestartBits);
+ }
}
c_copyGCISlave.m_copyReason = reason;
@@ -1337,6 +1343,11 @@ void Dbdih::execNDB_STTOR(Signal* signal)
// The permission is given by the master node in the alive set.
/*-----------------------------------------------------------------------*/
createMutexes(signal, 0);
+ if (cstarttype == NodeState::ST_INITIAL_NODE_RESTART)
+ {
+ jam();
+ c_set_initial_start_flag = TRUE; // In sysfile...
+ }
break;
case ZNDB_SPH3:
@@ -1883,8 +1894,8 @@ void Dbdih::execSTART_PERMREQ(Signal* signal)
return;
}//if
if (getNodeStatus(nodeId) != NodeRecord::DEAD){
- ndbout << "nodeStatus in START_PERMREQ = "
- << (Uint32) getNodeStatus(nodeId) << endl;
+ g_eventLogger.error("nodeStatus in START_PERMREQ = %u",
+ (Uint32) getNodeStatus(nodeId));
ndbrequire(false);
}//if
@@ -2954,7 +2965,6 @@ Dbdih::nr_start_fragment(Signal* signal,
if (replicaPtr.p->lcpStatus[idx] == ZVALID)
{
ndbrequire(replicaPtr.p->lcpId[idx] > maxLcpId);
- Uint32 startGci = replicaPtr.p->maxGciCompleted[idx];
Uint32 stopGci = replicaPtr.p->maxGciStarted[idx];
for (;j < replicaPtr.p->noCrashedReplicas; j++)
{
@@ -4297,9 +4307,9 @@ void Dbdih::checkCopyTab(NodeRecordPtr failedNodePtr)
jam();
break;
default:
- ndbout_c("outstanding gsn: %s(%d)",
- getSignalName(c_nodeStartMaster.m_outstandingGsn),
- c_nodeStartMaster.m_outstandingGsn);
+ g_eventLogger.error("outstanding gsn: %s(%d)",
+ getSignalName(c_nodeStartMaster.m_outstandingGsn),
+ c_nodeStartMaster.m_outstandingGsn);
ndbrequire(false);
}
@@ -4752,9 +4762,10 @@ void Dbdih::failedNodeLcpHandling(Signal* signal, NodeRecordPtr failedNodePtr)
failedNodePtr.p->activeStatus = Sysfile::NS_NotActive_NotTakenOver;
break;
default:
- ndbout << "activeStatus = " << (Uint32) failedNodePtr.p->activeStatus;
- ndbout << " at failure after NODE_FAILREP of node = ";
- ndbout << failedNodePtr.i << endl;
+ g_eventLogger.error("activeStatus = %u "
+ "at failure after NODE_FAILREP of node = %u",
+ (Uint32) failedNodePtr.p->activeStatus,
+ failedNodePtr.i);
ndbrequire(false);
break;
}//switch
@@ -4891,6 +4902,8 @@ void
Dbdih::startLcpMasterTakeOver(Signal* signal, Uint32 nodeId){
jam();
+ Uint32 oldNode = c_lcpMasterTakeOverState.failedNodeId;
+
c_lcpMasterTakeOverState.minTableId = ~0;
c_lcpMasterTakeOverState.minFragId = ~0;
c_lcpMasterTakeOverState.failedNodeId = nodeId;
@@ -4909,7 +4922,20 @@ Dbdih::startLcpMasterTakeOver(Signal* signal, Uint32 nodeId){
/**
* Node failure during master take over...
*/
- ndbout_c("Nodefail during master take over");
+ g_eventLogger.info("Nodefail during master take over (old: %d)", oldNode);
+ }
+
+ NodeRecordPtr nodePtr;
+ nodePtr.i = oldNode;
+ if (oldNode > 0 && oldNode < MAX_NDB_NODES)
+ {
+ jam();
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
+ if (nodePtr.p->m_nodefailSteps.get(NF_LCP_TAKE_OVER))
+ {
+ jam();
+ checkLocalNodefailComplete(signal, oldNode, NF_LCP_TAKE_OVER);
+ }
}
setLocalNodefailHandling(signal, nodeId, NF_LCP_TAKE_OVER);
@@ -5149,7 +5175,8 @@ void Dbdih::execMASTER_GCPCONF(Signal* signal)
if (latestLcpId > SYSFILE->latestLCP_ID) {
jam();
#if 0
- ndbout_c("Dbdih: Setting SYSFILE->latestLCP_ID to %d", latestLcpId);
+ g_eventLogger.info("Dbdih: Setting SYSFILE->latestLCP_ID to %d",
+ latestLcpId);
SYSFILE->latestLCP_ID = latestLcpId;
#endif
SYSFILE->keepGCI = oldestKeepGci;
@@ -5808,7 +5835,7 @@ Dbdih::checkLocalNodefailComplete(Signal* signal, Uint32 failedNodeId,
if (ERROR_INSERTED(7030))
{
- ndbout_c("Reenable GCP_PREPARE");
+ g_eventLogger.info("Reenable GCP_PREPARE");
CLEAR_ERROR_INSERT_VALUE;
}
@@ -5925,6 +5952,14 @@ void Dbdih::execMASTER_LCPREQ(Signal* signal)
jamEntry();
const BlockReference newMasterBlockref = req->masterRef;
+ if (newMasterBlockref != cmasterdihref)
+ {
+ jam();
+ ndbout_c("resending GSN_MASTER_LCPREQ");
+ sendSignalWithDelay(reference(), GSN_MASTER_LCPREQ, signal,
+ signal->getLength(), 50);
+ return;
+ }
Uint32 failedNodeId = req->failedNodeId;
/**
@@ -5981,7 +6016,7 @@ Dbdih::sendMASTER_LCPCONF(Signal * signal){
c_lcpState.setLcpStatus(LCP_STATUS_IDLE, __LINE__);
#if 0
if(c_copyGCISlave.m_copyReason == CopyGCIReq::LOCAL_CHECKPOINT){
- ndbout_c("Dbdih: Also resetting c_copyGCISlave");
+ g_eventLogger.info("Dbdih: Also resetting c_copyGCISlave");
c_copyGCISlave.m_copyReason = CopyGCIReq::IDLE;
c_copyGCISlave.m_expectedNextWord = 0;
}
@@ -6066,7 +6101,7 @@ Dbdih::sendMASTER_LCPCONF(Signal * signal){
if(c_lcpState.lcpStatus == LCP_TAB_SAVED){
#ifdef VM_TRACE
- ndbout_c("Sending extra GSN_LCP_COMPLETE_REP to new master");
+ g_eventLogger.info("Sending extra GSN_LCP_COMPLETE_REP to new master");
#endif
sendLCP_COMPLETE_REP(signal);
}
@@ -6221,8 +6256,10 @@ void Dbdih::execMASTER_LCPCONF(Signal* signal)
ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
nodePtr.p->lcpStateAtTakeOver = lcpState;
+ CRASH_INSERTION(7180);
+
#ifdef VM_TRACE
- ndbout_c("MASTER_LCPCONF");
+ g_eventLogger.info("MASTER_LCPCONF");
printMASTER_LCP_CONF(stdout, &signal->theData[0], 0, 0);
#endif
@@ -6299,7 +6336,7 @@ void Dbdih::MASTER_LCPhandling(Signal* signal, Uint32 failedNodeId)
// protocol.
/* --------------------------------------------------------------------- */
#ifdef VM_TRACE
- ndbout_c("MASTER_LCPhandling:: LMTOS_ALL_IDLE -> checkLcpStart");
+ g_eventLogger.info("MASTER_LCPhandling:: LMTOS_ALL_IDLE -> checkLcpStart");
#endif
checkLcpStart(signal, __LINE__);
break;
@@ -6310,7 +6347,7 @@ void Dbdih::MASTER_LCPhandling(Signal* signal, Uint32 failedNodeId)
// protocol by calculating the keep gci and storing the new lcp id.
/* --------------------------------------------------------------------- */
#ifdef VM_TRACE
- ndbout_c("MASTER_LCPhandling:: LMTOS_COPY_ONGOING -> storeNewLcpId");
+ g_eventLogger.info("MASTER_LCPhandling:: LMTOS_COPY_ONGOING -> storeNewLcpId");
#endif
if (c_lcpState.lcpStatus == LCP_STATUS_ACTIVE) {
jam();
@@ -6321,7 +6358,7 @@ void Dbdih::MASTER_LCPhandling(Signal* signal, Uint32 failedNodeId)
/*---------------------------------------------------------------------*/
Uint32 lcpId = SYSFILE->latestLCP_ID;
#ifdef VM_TRACE
- ndbout_c("Decreasing latestLCP_ID from %d to %d", lcpId, lcpId - 1);
+ g_eventLogger.info("Decreasing latestLCP_ID from %d to %d", lcpId, lcpId - 1);
#endif
SYSFILE->latestLCP_ID--;
}//if
@@ -6338,10 +6375,10 @@ void Dbdih::MASTER_LCPhandling(Signal* signal, Uint32 failedNodeId)
* complete before finalising the LCP process.
* ------------------------------------------------------------------ */
#ifdef VM_TRACE
- ndbout_c("MASTER_LCPhandling:: LMTOS_ALL_ACTIVE -> "
- "startLcpRoundLoopLab(table=%u, fragment=%u)",
- c_lcpMasterTakeOverState.minTableId,
- c_lcpMasterTakeOverState.minFragId);
+ g_eventLogger.info("MASTER_LCPhandling:: LMTOS_ALL_ACTIVE -> "
+ "startLcpRoundLoopLab(table=%u, fragment=%u)",
+ c_lcpMasterTakeOverState.minTableId,
+ c_lcpMasterTakeOverState.minFragId);
#endif
c_lcpState.keepGci = SYSFILE->keepGCI;
@@ -7745,8 +7782,8 @@ void Dbdih::checkGcpStopLab(Signal* signal)
if (cgcpSameCounter == 1200) {
jam();
#ifdef VM_TRACE
- ndbout << "System crash due to GCP Stop in state = ";
- ndbout << (Uint32) cgcpStatus << endl;
+ g_eventLogger.error("System crash due to GCP Stop in state = %u",
+ (Uint32) cgcpStatus);
#endif
crashSystemAtGcpStop(signal);
return;
@@ -7759,8 +7796,8 @@ void Dbdih::checkGcpStopLab(Signal* signal)
if (cgcpSameCounter == 1200) {
jam();
#ifdef VM_TRACE
- ndbout << "System crash due to GCP Stop in state = ";
- ndbout << (Uint32) cgcpStatus << endl;
+ g_eventLogger.error("System crash due to GCP Stop in state = %u",
+ (Uint32) cgcpStatus);
#endif
crashSystemAtGcpStop(signal);
return;
@@ -7951,7 +7988,7 @@ void Dbdih::GCP_SAVEhandling(Signal* signal, Uint32 nodeId)
getNodeState().startLevel == NodeState::SL_STARTED){
jam();
#if 0
- ndbout_c("Dbdih: Clearing initial start ongoing");
+ g_eventLogger.info("Dbdih: Clearing initial start ongoing");
#endif
Sysfile::clearInitialStartOngoing(SYSFILE->systemRestartBits);
}
@@ -7970,7 +8007,7 @@ void Dbdih::execGCP_PREPARE(Signal* signal)
if (ERROR_INSERTED(7030))
{
cgckptflag = true;
- ndbout_c("Delayed GCP_PREPARE 5s");
+ g_eventLogger.info("Delayed GCP_PREPARE 5s");
sendSignalWithDelay(reference(), GSN_GCP_PREPARE, signal, 5000,
signal->getLength());
return;
@@ -7990,7 +8027,7 @@ void Dbdih::execGCP_PREPARE(Signal* signal)
if (ERROR_INSERTED(7031))
{
- ndbout_c("Crashing delayed in GCP_PREPARE 3s");
+ g_eventLogger.info("Crashing delayed in GCP_PREPARE 3s");
signal->theData[0] = 9999;
sendSignalWithDelay(CMVMI_REF, GSN_NDB_TAMPER, signal, 3000, 1);
return;
@@ -8514,7 +8551,7 @@ void Dbdih::initLcpLab(Signal* signal, Uint32 senderRef, Uint32 tableId)
* This is LCP master takeover
*/
#ifdef VM_TRACE
- ndbout_c("initLcpLab aborted due to LCP master takeover - 1");
+ g_eventLogger.info("initLcpLab aborted due to LCP master takeover - 1");
#endif
c_lcpState.setLcpStatus(LCP_STATUS_IDLE, __LINE__);
sendMASTER_LCPCONF(signal);
@@ -8527,7 +8564,7 @@ void Dbdih::initLcpLab(Signal* signal, Uint32 senderRef, Uint32 tableId)
* Master take over but has not yet received MASTER_LCPREQ
*/
#ifdef VM_TRACE
- ndbout_c("initLcpLab aborted due to LCP master takeover - 2");
+ g_eventLogger.info("initLcpLab aborted due to LCP master takeover - 2");
#endif
return;
}
@@ -9836,9 +9873,10 @@ void Dbdih::checkTcCounterLab(Signal* signal)
{
CRASH_INSERTION(7009);
if (c_lcpState.lcpStatus != LCP_STATUS_IDLE) {
- ndbout << "lcpStatus = " << (Uint32) c_lcpState.lcpStatus;
- ndbout << "lcpStatusUpdatedPlace = " <<
- c_lcpState.lcpStatusUpdatedPlace << endl;
+ g_eventLogger.error("lcpStatus = %u"
+ "lcpStatusUpdatedPlace = %d",
+ (Uint32) c_lcpState.lcpStatus,
+ c_lcpState.lcpStatusUpdatedPlace);
ndbrequire(false);
return;
}//if
@@ -10421,9 +10459,8 @@ void Dbdih::execLCP_FRAG_REP(Signal* signal)
if(tabPtr.p->tabStatus == TabRecord::TS_DROPPING){
jam();
- ndbout_c("TS_DROPPING - Neglecting to save Table: %d Frag: %d - ",
- tableId,
- fragId);
+ g_eventLogger.info("TS_DROPPING - Neglecting to save Table: %d Frag: %d - ",
+ tableId, fragId);
} else {
jam();
/**
@@ -10553,7 +10590,7 @@ void Dbdih::findReplica(ReplicaRecordPtr& replicaPtr,
};
#ifdef VM_TRACE
- ndbout_c("Fragment Replica(node=%d) not found", nodeId);
+ g_eventLogger.info("Fragment Replica(node=%d) not found", nodeId);
replicaPtr.i = fragPtrP->oldStoredReplicas;
while(replicaPtr.i != RNIL){
ptrCheckGuard(replicaPtr, creplicaFileSize, replicaRecord);
@@ -10566,9 +10603,9 @@ void Dbdih::findReplica(ReplicaRecordPtr& replicaPtr,
}//if
};
if(replicaPtr.i != RNIL){
- ndbout_c("...But was found in oldStoredReplicas");
+ g_eventLogger.info("...But was found in oldStoredReplicas");
} else {
- ndbout_c("...And wasn't found in oldStoredReplicas");
+ g_eventLogger.info("...And wasn't found in oldStoredReplicas");
}
#endif
ndbrequire(false);
@@ -10582,8 +10619,6 @@ Dbdih::handle_invalid_lcp_no(const LcpFragRep* rep,
ndbrequire(!isMaster());
Uint32 lcpNo = rep->lcpNo;
Uint32 lcpId = rep->lcpId;
- Uint32 replicaLcpNo = replicaPtr.p->nextLcp;
- Uint32 prevReplicaLcpNo = prevLcpNo(replicaLcpNo);
warningEvent("Detected previous node failure of %d during lcp",
rep->nodeId);
@@ -10635,8 +10670,8 @@ Dbdih::reportLcpCompletion(const LcpFragRep* lcpReport)
if(lcpNo != replicaPtr.p->nextLcp){
if (handle_invalid_lcp_no(lcpReport, replicaPtr))
{
- ndbout_c("lcpNo = %d replicaPtr.p->nextLcp = %d",
- lcpNo, replicaPtr.p->nextLcp);
+ g_eventLogger.error("lcpNo = %d replicaPtr.p->nextLcp = %d",
+ lcpNo, replicaPtr.p->nextLcp);
ndbrequire(false);
}
}
@@ -10672,7 +10707,7 @@ Dbdih::reportLcpCompletion(const LcpFragRep* lcpReport)
// Not all fragments in table have been checkpointed.
/* ----------------------------------------------------------------- */
if(0)
- ndbout_c("reportLcpCompletion: fragment %d not ready", fid);
+ g_eventLogger.info("reportLcpCompletion: fragment %d not ready", fid);
return false;
}//if
}//for
@@ -10779,6 +10814,17 @@ Dbdih::sendLCP_COMPLETE_REP(Signal* signal){
sendSignal(c_lcpState.m_masterLcpDihRef, GSN_LCP_COMPLETE_REP, signal,
LcpCompleteRep::SignalLength, JBB);
+
+ /**
+ * Say that an initial node restart does not need to be redone
+ * once node has been part of first LCP
+ */
+ if (c_set_initial_start_flag &&
+ c_lcpState.m_participatingLQH.get(getOwnNodeId()))
+ {
+ jam();
+ c_set_initial_start_flag = FALSE;
+ }
}
/*-------------------------------------------------------------------------- */
@@ -10789,7 +10835,7 @@ void Dbdih::execLCP_COMPLETE_REP(Signal* signal)
jamEntry();
#if 0
- ndbout_c("LCP_COMPLETE_REP");
+ g_eventLogger.info("LCP_COMPLETE_REP");
printLCP_COMPLETE_REP(stdout,
signal->getDataPtr(),
signal->length(), number());
@@ -10875,7 +10921,7 @@ void Dbdih::allNodesLcpCompletedLab(Signal* signal)
if(c_lcpMasterTakeOverState.state != LMTOS_IDLE){
jam();
#ifdef VM_TRACE
- ndbout_c("Exiting from allNodesLcpCompletedLab");
+ g_eventLogger.info("Exiting from allNodesLcpCompletedLab");
#endif
return;
}
@@ -11112,14 +11158,14 @@ void Dbdih::crashSystemAtGcpStop(Signal* signal)
infoEvent("Detected GCP stop...sending kill to %s",
c_GCP_SAVEREQ_Counter.getText());
- ndbout_c("Detected GCP stop...sending kill to %s",
- c_GCP_SAVEREQ_Counter.getText());
+ g_eventLogger.error("Detected GCP stop...sending kill to %s",
+ c_GCP_SAVEREQ_Counter.getText());
return;
}
case GCP_SAVE_LQH_FINISHED:
- ndbout_c("m_copyReason: %d m_waiting: %d",
- c_copyGCIMaster.m_copyReason,
- c_copyGCIMaster.m_waiting);
+ g_eventLogger.error("m_copyReason: %d m_waiting: %d",
+ c_copyGCIMaster.m_copyReason,
+ c_copyGCIMaster.m_waiting);
break;
case GCP_READY: // shut up lint
case GCP_PREPARE_SENT:
@@ -11127,11 +11173,11 @@ void Dbdih::crashSystemAtGcpStop(Signal* signal)
break;
}
- ndbout_c("c_copyGCISlave: sender{Data, Ref} %d %x reason: %d nextWord: %d",
- c_copyGCISlave.m_senderData,
- c_copyGCISlave.m_senderRef,
- c_copyGCISlave.m_copyReason,
- c_copyGCISlave.m_expectedNextWord);
+ g_eventLogger.error("c_copyGCISlave: sender{Data, Ref} %d %x reason: %d nextWord: %d",
+ c_copyGCISlave.m_senderData,
+ c_copyGCISlave.m_senderRef,
+ c_copyGCISlave.m_copyReason,
+ c_copyGCISlave.m_expectedNextWord);
FileRecordPtr file0Ptr;
file0Ptr.i = crestartInfoFile[0];
@@ -13350,9 +13396,9 @@ void Dbdih::setLcpActiveStatusEnd()
nodePtr.i = getOwnNodeId();
ptrAss(nodePtr, nodeRecord);
ndbrequire(nodePtr.p->activeStatus == Sysfile::NS_Active);
- ndbout_c("NR: setLcpActiveStatusEnd - m_participatingLQH");
+ g_eventLogger.info("NR: setLcpActiveStatusEnd - m_participatingLQH");
} else {
- ndbout_c("NR: setLcpActiveStatusEnd - !m_participatingLQH");
+ g_eventLogger.info("NR: setLcpActiveStatusEnd - !m_participatingLQH");
}
}
@@ -14184,8 +14230,8 @@ Dbdih::execDUMP_STATE_ORD(Signal* signal)
}
if(arg == DumpStateOrd::EnableUndoDelayDataWrite){
- ndbout << "Dbdih:: delay write of datapages for table = "
- << dumpState->args[1]<< endl;
+ g_eventLogger.info("Dbdih:: delay write of datapages for table = %s",
+ dumpState->args[1]);
// Send this dump to ACC and TUP
EXECUTE_DIRECT(DBACC, GSN_DUMP_STATE_ORD, signal, 2);
EXECUTE_DIRECT(DBTUP, GSN_DUMP_STATE_ORD, signal, 2);
@@ -14202,13 +14248,13 @@ Dbdih::execDUMP_STATE_ORD(Signal* signal)
}//if
if (signal->theData[0] == DumpStateOrd::DihMinTimeBetweenLCP) {
// Set time between LCP to min value
- ndbout << "Set time between LCP to min value" << endl;
+ g_eventLogger.info("Set time between LCP to min value");
c_lcpState.clcpDelay = 0; // TimeBetweenLocalCheckpoints.min
return;
}
if (signal->theData[0] == DumpStateOrd::DihMaxTimeBetweenLCP) {
// Set time between LCP to max value
- ndbout << "Set time between LCP to max value" << endl;
+ g_eventLogger.info("Set time between LCP to max value");
c_lcpState.clcpDelay = 31; // TimeBetweenLocalCheckpoints.max
return;
}
@@ -14244,7 +14290,7 @@ Dbdih::execDUMP_STATE_ORD(Signal* signal)
{
cgcpDelay = signal->theData[1];
}
- ndbout_c("Setting time between gcp : %d", cgcpDelay);
+ g_eventLogger.info("Setting time between gcp : %d", cgcpDelay);
}
if (arg == 7021 && signal->getLength() == 2)
@@ -14367,7 +14413,7 @@ Dbdih::execPREP_DROP_TAB_REQ(Signal* signal){
while(index < count){
if(nodePtr.p->queuedChkpt[index].tableId == tabPtr.i){
jam();
- // ndbout_c("Unqueuing %d", index);
+ // g_eventLogger.info("Unqueuing %d", index);
count--;
for(Uint32 i = index; i<count; i++){
@@ -14407,7 +14453,7 @@ Dbdih::execPREP_DROP_TAB_REQ(Signal* signal){
if(checkLcpAllTablesDoneInLqh()){
jam();
- ndbout_c("This is the last table");
+ g_eventLogger.info("This is the last table");
/**
* Then check if saving of tab info is done for all tables
@@ -14416,7 +14462,7 @@ Dbdih::execPREP_DROP_TAB_REQ(Signal* signal){
checkLcpCompletedLab(signal);
if(a != c_lcpState.lcpStatus){
- ndbout_c("And all tables are written to already written disk");
+ g_eventLogger.info("And all tables are written to already written disk");
}
}
break;
@@ -14573,30 +14619,6 @@ Dbdih::execNDB_TAMPER(Signal* signal)
return;
}//Dbdih::execNDB_TAMPER()
-void Dbdih::execSET_VAR_REQ(Signal* signal) {
-#if 0
- SetVarReq* const setVarReq = (SetVarReq*)&signal->theData[0];
- ConfigParamId var = setVarReq->variable();
- int val = setVarReq->value();
-
-
- switch (var) {
- case TimeBetweenLocalCheckpoints:
- c_lcpState.clcpDelay = val;
- sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
- break;
-
- case TimeBetweenGlobalCheckpoints:
- cgcpDelay = val;
- sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
- break;
-
- default:
- sendSignal(CMVMI_REF, GSN_SET_VAR_REF, signal, 1, JBB);
- } // switch
-#endif
-}
-
void Dbdih::execBLOCK_COMMIT_ORD(Signal* signal){
BlockCommitOrd* const block = (BlockCommitOrd *)&signal->theData[0];
diff --git a/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp b/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp
index 3951b53184c..8d7290469ca 100644
--- a/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp
+++ b/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp
@@ -2169,7 +2169,6 @@ private:
void execFSREADCONF(Signal* signal);
void execFSREADREF(Signal* signal);
void execSCAN_HBREP(Signal* signal);
- void execSET_VAR_REQ(Signal* signal);
void execTIME_SIGNAL(Signal* signal);
void execFSSYNCCONF(Signal* signal);
diff --git a/storage/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp b/storage/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp
index 8ddb96f9111..c054c227c8e 100644
--- a/storage/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp
+++ b/storage/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp
@@ -267,7 +267,6 @@ Dblqh::Dblqh(Block_context& ctx):
addRecSignal(GSN_FSREADCONF, &Dblqh::execFSREADCONF);
addRecSignal(GSN_FSREADREF, &Dblqh::execFSREADREF, true);
addRecSignal(GSN_ACC_ABORTCONF, &Dblqh::execACC_ABORTCONF);
- addRecSignal(GSN_SET_VAR_REQ, &Dblqh::execSET_VAR_REQ);
addRecSignal(GSN_TIME_SIGNAL, &Dblqh::execTIME_SIGNAL);
addRecSignal(GSN_FSSYNCCONF, &Dblqh::execFSSYNCCONF);
addRecSignal(GSN_REMOVE_MARKER_ORD, &Dblqh::execREMOVE_MARKER_ORD);
diff --git a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
index f2eef543833..db28daea336 100644
--- a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
+++ b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
@@ -62,6 +62,7 @@
#include <signaldata/AttrInfo.hpp>
#include <KeyDescriptor.hpp>
#include <signaldata/RouteOrd.hpp>
+#include <signaldata/FsRef.hpp>
// Use DEBUG to print messages that should be
// seen only when we debug the product
@@ -598,7 +599,7 @@ Dblqh::execDEFINE_BACKUP_REF(Signal* signal)
case DefineBackupRef::FailedInsertTableList:
jam();
err_code = NDBD_EXIT_INVALID_CONFIG;
- extra_msg = "Probably Backup parameters configuration error, Please consult the manual";
+ extra_msg = (char*) "Probably Backup parameters configuration error, Please consult the manual";
progError(__LINE__, err_code, extra_msg);
}
@@ -3962,7 +3963,6 @@ void
Dblqh::handle_nr_copy(Signal* signal, Ptr<TcConnectionrec> regTcPtr)
{
jam();
- Uint32 tableId = regTcPtr.p->tableref;
Uint32 fragPtr = fragptr.p->tupFragptr;
Uint32 op = regTcPtr.p->operation;
@@ -4256,7 +4256,7 @@ Dblqh::nr_copy_delete_row(Signal* signal,
signal->theData, sizeof(Local_key));
regTcPtr.p->m_nr_delete.m_page_id[pos] = RNIL;
regTcPtr.p->m_nr_delete.m_cnt = pos + 2;
- ndbout << "PENDING DISK DELETE: " <<
+ if (0) ndbout << "PENDING DISK DELETE: " <<
regTcPtr.p->m_nr_delete.m_disk_ref[pos] << endl;
}
@@ -6727,7 +6727,6 @@ void Dblqh::execABORT(Signal* signal)
}//if
TcConnectionrec * const regTcPtr = tcConnectptr.p;
- Uint32 activeCreat = regTcPtr->activeCreat;
if (ERROR_INSERTED(5100))
{
SET_ERROR_INSERT_VALUE(5101);
@@ -6807,7 +6806,6 @@ void Dblqh::execABORTREQ(Signal* signal)
return;
}//if
TcConnectionrec * const regTcPtr = tcConnectptr.p;
- Uint32 activeCreat = regTcPtr->activeCreat;
if (regTcPtr->transactionState != TcConnectionrec::PREPARED) {
warningReport(signal, 10);
return;
@@ -7572,7 +7570,7 @@ void Dblqh::lqhTransNextLab(Signal* signal)
* THE RECEIVER OF THE COPY HAVE FAILED.
* WE HAVE TO CLOSE THE COPY PROCESS.
* ----------------------------------------------------------- */
- ndbout_c("close copy");
+ if (0) ndbout_c("close copy");
tcConnectptr.p->tcNodeFailrec = tcNodeFailptr.i;
tcConnectptr.p->abortState = TcConnectionrec::NEW_FROM_TC;
closeCopyRequestLab(signal);
@@ -10833,7 +10831,7 @@ void Dblqh::tupCopyCloseConfLab(Signal* signal)
void Dblqh::closeCopyRequestLab(Signal* signal)
{
scanptr.p->scanErrorCounter++;
- ndbout_c("closeCopyRequestLab: scanState: %d", scanptr.p->scanState);
+ if (0) ndbout_c("closeCopyRequestLab: scanState: %d", scanptr.p->scanState);
switch (scanptr.p->scanState) {
case ScanRecord::WAIT_TUPKEY_COPY:
case ScanRecord::WAIT_NEXT_SCAN_COPY:
@@ -11434,7 +11432,17 @@ void Dblqh::execLCP_PREPARE_CONF(Signal* signal)
void Dblqh::execBACKUP_FRAGMENT_REF(Signal* signal)
{
- ndbrequire(false);
+ BackupFragmentRef *ref= (BackupFragmentRef*)signal->getDataPtr();
+ char buf[100];
+ BaseString::snprintf(buf,sizeof(buf),
+ "Unable to store fragment during LCP. NDBFS Error: %u",
+ ref->errorCode);
+
+ progError(__LINE__,
+ (ref->errorCode & FsRef::FS_ERR_BIT)?
+ NDBD_EXIT_AFS_UNKNOWN
+ : ref->errorCode,
+ buf);
}
void Dblqh::execBACKUP_FRAGMENT_CONF(Signal* signal)
@@ -11928,7 +11936,7 @@ void Dblqh::execGCP_SAVEREQ(Signal* signal)
return;
}
- if (getNodeState().getNodeRestartInProgress())
+ if (getNodeState().getNodeRestartInProgress() && cstartRecReq == ZFALSE)
{
GCPSaveRef * const saveRef = (GCPSaveRef*)&signal->theData[0];
saveRef->dihPtr = dihPtr;
@@ -11975,7 +11983,6 @@ void Dblqh::execGCP_SAVEREQ(Signal* signal)
}//if
ndbrequire(ccurrentGcprec == RNIL);
-
ccurrentGcprec = 0;
gcpPtr.i = ccurrentGcprec;
ptrCheckGuard(gcpPtr, cgcprecFileSize, gcpRecord);
@@ -18855,30 +18862,6 @@ Dblqh::execDUMP_STATE_ORD(Signal* signal)
}//Dblqh::execDUMP_STATE_ORD()
-void Dblqh::execSET_VAR_REQ(Signal* signal)
-{
-#if 0
- SetVarReq* const setVarReq = (SetVarReq*)&signal->theData[0];
- ConfigParamId var = setVarReq->variable();
-
- switch (var) {
-
- case NoOfConcurrentCheckpointsAfterRestart:
- sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
- break;
-
- case NoOfConcurrentCheckpointsDuringRestart:
- // Valid only during start so value not set.
- sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
- break;
-
- default:
- sendSignal(CMVMI_REF, GSN_SET_VAR_REF, signal, 1, JBB);
- } // switch
-#endif
-}//execSET_VAR_REQ()
-
-
/* **************************************************************** */
/* ---------------------------------------------------------------- */
/* ---------------------- TRIGGER HANDLING ------------------------ */
diff --git a/storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp b/storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp
index 39b7c00e3a1..dae8ee7e73b 100644
--- a/storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp
+++ b/storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp
@@ -1327,7 +1327,6 @@ private:
void execTIME_SIGNAL(Signal* signal);
void execAPI_FAILREQ(Signal* signal);
void execSCAN_HBREP(Signal* signal);
- void execSET_VAR_REQ(Signal* signal);
void execABORT_ALL_REQ(Signal* signal);
diff --git a/storage/ndb/src/kernel/blocks/dbtc/DbtcInit.cpp b/storage/ndb/src/kernel/blocks/dbtc/DbtcInit.cpp
index 452ae6d8d70..3bba771f3f0 100644
--- a/storage/ndb/src/kernel/blocks/dbtc/DbtcInit.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtc/DbtcInit.cpp
@@ -245,7 +245,6 @@ Dbtc::Dbtc(Block_context& ctx):
addRecSignal(GSN_INCL_NODEREQ, &Dbtc::execINCL_NODEREQ);
addRecSignal(GSN_TIME_SIGNAL, &Dbtc::execTIME_SIGNAL);
addRecSignal(GSN_API_FAILREQ, &Dbtc::execAPI_FAILREQ);
- addRecSignal(GSN_SET_VAR_REQ, &Dbtc::execSET_VAR_REQ);
addRecSignal(GSN_TC_COMMIT_ACK, &Dbtc::execTC_COMMIT_ACK);
addRecSignal(GSN_ABORT_ALL_REQ, &Dbtc::execABORT_ALL_REQ);
diff --git a/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp b/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
index c7ca8048354..7ea6d3936b0 100644
--- a/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
@@ -6937,7 +6937,6 @@ void Dbtc::execGCP_NOMORETRANS(Signal* signal)
/*****************************************************************************/
void Dbtc::execNODE_FAILREP(Signal* signal)
{
- HostRecordPtr tmpHostptr;
jamEntry();
NodeFailRep * const nodeFail = (NodeFailRep *)&signal->theData[0];
@@ -11004,36 +11003,6 @@ Dbtc::execDUMP_STATE_ORD(Signal* signal)
}
}//Dbtc::execDUMP_STATE_ORD()
-void Dbtc::execSET_VAR_REQ(Signal* signal)
-{
-#if 0
- SetVarReq* const setVarReq = (SetVarReq*)&signal->theData[0];
- ConfigParamId var = setVarReq->variable();
- int val = setVarReq->value();
-
-
- switch (var) {
-
- case TransactionInactiveTime:
- jam();
- set_appl_timeout_value(val);
- break;
- case TransactionDeadlockDetectionTimeout:
- set_timeout_value(val);
- sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
- break;
-
- case NoOfConcurrentProcessesHandleTakeover:
- set_no_parallel_takeover(val);
- sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
- break;
-
- default:
- sendSignal(CMVMI_REF, GSN_SET_VAR_REF, signal, 1, JBB);
- } // switch
-#endif
-}
-
void Dbtc::execABORT_ALL_REQ(Signal* signal)
{
jamEntry();
@@ -11871,8 +11840,6 @@ void Dbtc::execTCKEYREF(Signal* signal)
}
const UintR TconnectIndex = indexOp->connectionIndex;
ApiConnectRecord * const regApiPtr = &apiConnectRecord[TconnectIndex];
- Uint32 tcKeyRequestInfo = indexOp->tcIndxReq.requestInfo;
- Uint32 commitFlg = TcKeyReq::getCommitFlag(tcKeyRequestInfo);
switch(indexOp->indexOpState) {
case(IOS_NOOP): {
@@ -13335,7 +13302,6 @@ Dbtc::execROUTE_ORD(Signal* signal)
Uint32 dstRef = ord->dstRef;
Uint32 srcRef = ord->srcRef;
Uint32 gsn = ord->gsn;
- Uint32 cnt = ord->cnt;
if (likely(getNodeInfo(refToNode(dstRef)).m_connected))
{
diff --git a/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp b/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp
index 230895c942a..fecc4649fe9 100644
--- a/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp
+++ b/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp
@@ -516,6 +516,7 @@ typedef Ptr<Fragoperrec> FragoperrecPtr;
return (m_key.m_file_no << 16) ^ m_key.m_page_idx;
}
+ Extent_info() {}
bool equal(const Extent_info & rec) const {
return m_key.m_file_no == rec.m_key.m_file_no &&
m_key.m_page_idx == rec.m_key.m_page_idx;
@@ -667,6 +668,7 @@ struct Operationrec {
Uint32 currentAttrinbufLen; //Used until copyAttrinfo
};
+ Operationrec() {}
bool is_first_operation() const { return prevActiveOp == RNIL;}
bool is_last_operation() const { return nextActiveOp == RNIL;}
@@ -1241,6 +1243,7 @@ typedef Ptr<HostBuffer> HostBufferPtr;
STATIC_CONST( LCP_KEEP = 0x02000000 ); // Should be returned in LCP
STATIC_CONST( FREE = 0x02800000 ); // Is free
+ Tuple_header() {}
Uint32 get_tuple_version() const {
return m_header_bits & TUP_VERSION_MASK;
}
@@ -1474,7 +1477,6 @@ private:
void execTUP_ABORTREQ(Signal* signal);
void execNDB_STTOR(Signal* signal);
void execREAD_CONFIG_REQ(Signal* signal);
- void execSET_VAR_REQ(Signal* signal);
void execDROP_TAB_REQ(Signal* signal);
void execALTER_TAB_REQ(Signal* signal);
void execTUP_DEALLOCREQ(Signal* signal);
diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupAbort.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupAbort.cpp
index 904629fff77..2414e8a10bf 100644
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupAbort.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupAbort.cpp
@@ -145,7 +145,7 @@ void Dbtup::do_tup_abortreq(Signal* signal, Uint32 flags)
{
if(copy_bits & Tuple_header::MM_GROWN)
{
- ndbout_c("abort grow");
+ if (0) ndbout_c("abort grow");
Ptr<Page> vpage;
Uint32 idx= regOperPtr.p->m_tuple_location.m_page_idx;
Uint32 mm_vars= regTabPtr.p->m_attributes[MM].m_no_of_varsize;
@@ -168,7 +168,7 @@ void Dbtup::do_tup_abortreq(Signal* signal, Uint32 flags)
}
else if(bits & Tuple_header::MM_SHRINK)
{
- ndbout_c("abort shrink");
+ if (0) ndbout_c("abort shrink");
}
}
else if (regOperPtr.p->is_first_operation() &&
diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp
index 1f703599cf5..91d2ca97744 100644
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp
@@ -514,7 +514,7 @@ void Dbtup::execTUP_COMMITREQ(Signal* signal)
c_lgman->free_log_space(regFragPtr.p->m_logfile_group_id,
regOperPtr.p->m_undo_buffer_space);
- ndbout_c("insert+delete");
+ if (0) ndbout_c("insert+delete");
goto skip_disk;
}
}
diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupDiskAlloc.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupDiskAlloc.cpp
index 54abbf18664..f865904b413 100644
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupDiskAlloc.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupDiskAlloc.cpp
@@ -153,12 +153,10 @@ Dbtup::Disk_alloc_info::Disk_alloc_info(const Tablerec* tabPtrP,
return;
Uint32 min_size= 4*tabPtrP->m_offsets[DD].m_fix_header_size;
- Uint32 var_size= tabPtrP->m_offsets[DD].m_max_var_offset;
if (tabPtrP->m_attributes[DD].m_no_of_varsize == 0)
{
Uint32 recs_per_page= (4*Tup_fixsize_page::DATA_WORDS)/min_size;
- Uint32 pct_free= 0;
m_page_free_bits_map[0] = recs_per_page; // 100% free
m_page_free_bits_map[1] = 1;
m_page_free_bits_map[2] = 0;
@@ -317,7 +315,7 @@ Dbtup::restart_setup_page(Disk_alloc_info& alloc, PagePtr pagePtr)
0, 0, 0);
unsigned uncommitted, committed;
uncommitted = committed = ~(unsigned)0;
- int ret = tsman.get_page_free_bits(&page, &uncommitted, &committed);
+ (void) tsman.get_page_free_bits(&page, &uncommitted, &committed);
idx = alloc.calc_page_free_bits(real_free);
ddassert(idx == committed);
@@ -859,9 +857,6 @@ Dbtup::disk_page_set_dirty(PagePtr pagePtr)
if (DBG_DISK)
ndbout << " disk_page_set_dirty " << key << endl;
- Uint32 tableId = pagePtr.p->m_table_id;
- Uint32 fragId = pagePtr.p->m_fragment_id;
-
Ptr<Tablerec> tabPtr;
tabPtr.i= pagePtr.p->m_table_id;
ptrCheckGuard(tabPtr, cnoOfTablerec, tablerec);
diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp
index c394812ad1a..fe0f570f484 100644
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp
@@ -451,7 +451,6 @@ Dbtup::load_diskpage(Signal* signal,
Tuple_header* ptr= (Tuple_header*)tmp;
int res= 1;
- Uint32 opPtr= ptr->m_operation_ptr_i;
if(ptr->m_header_bits & Tuple_header::DISK_PART)
{
Page_cache_client::Request req;
@@ -536,7 +535,6 @@ Dbtup::load_diskpage_scan(Signal* signal,
Tuple_header* ptr= (Tuple_header*)tmp;
int res= 1;
- Uint32 opPtr= ptr->m_operation_ptr_i;
if(ptr->m_header_bits & Tuple_header::DISK_PART)
{
Page_cache_client::Request req;
@@ -2857,7 +2855,7 @@ Dbtup::handle_size_change_after_update(KeyReqStruct* req_struct,
if(needed <= alloc)
{
//ndbassert(!regOperPtr->is_first_operation());
- ndbout_c(" no grow");
+ if (0) ndbout_c(" no grow");
return 0;
}
copy_bits |= Tuple_header::MM_GROWN;
@@ -2923,7 +2921,6 @@ Dbtup::nr_read_pk(Uint32 fragPtrI,
ptrCheckGuard(tablePtr, cnoOfTablerec, tablerec);
Local_key tmp = *key;
- Uint32 pages = fragPtr.p->noOfPages;
int ret;
PagePtr page_ptr;
@@ -3143,7 +3140,7 @@ Dbtup::nr_delete(Signal* signal, Uint32 senderData,
break;
}
- ndbout << "DIRECT DISK DELETE: " << disk << endl;
+ if (0) ndbout << "DIRECT DISK DELETE: " << disk << endl;
disk_page_free(signal, tablePtr.p, fragPtr.p,
&disk, *(PagePtr*)&disk_page, gci);
return 0;
@@ -3195,7 +3192,7 @@ Dbtup::nr_delete_page_callback(Signal* signal,
break;
}
- ndbout << "PAGE CALLBACK DISK DELETE: " << op.m_disk_ref << endl;
+ if (0) ndbout << "PAGE CALLBACK DISK DELETE: " << op.m_disk_ref << endl;
disk_page_free(signal, tablePtr.p, fragPtr.p,
&op.m_disk_ref, pagePtr, op.m_gci);
@@ -3227,7 +3224,7 @@ Dbtup::nr_delete_log_buffer_callback(Signal* signal,
/**
* reset page no
*/
- ndbout << "LOGBUFFER CALLBACK DISK DELETE: " << op.m_disk_ref << endl;
+ if (0) ndbout << "LOGBUFFER CALLBACK DISK DELETE: " << op.m_disk_ref << endl;
disk_page_free(signal, tablePtr.p, fragPtr.p,
&op.m_disk_ref, pagePtr, op.m_gci);
diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupFixAlloc.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupFixAlloc.cpp
index 88a818e6fd7..50500b96134 100644
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupFixAlloc.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupFixAlloc.cpp
@@ -123,7 +123,6 @@ void Dbtup::convertThPage(Fix_page* regPagePtr,
Uint32 mm)
{
Uint32 nextTuple = regTabPtr->m_offsets[mm].m_fix_header_size;
- Uint32 endOfList;
/*
ASSUMES AT LEAST ONE TUPLE HEADER FITS AND THEREFORE NO HANDLING
OF ZERO AS EXTREME CASE
diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp
index 3e9469c4edf..67fc5a4ceb0 100644
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp
@@ -80,7 +80,6 @@ Dbtup::Dbtup(Block_context& ctx, Pgman* pgman)
addRecSignal(GSN_TUP_ABORTREQ, &Dbtup::execTUP_ABORTREQ);
addRecSignal(GSN_NDB_STTOR, &Dbtup::execNDB_STTOR);
addRecSignal(GSN_READ_CONFIG_REQ, &Dbtup::execREAD_CONFIG_REQ, true);
- addRecSignal(GSN_SET_VAR_REQ, &Dbtup::execSET_VAR_REQ);
// Trigger Signals
addRecSignal(GSN_CREATE_TRIG_REQ, &Dbtup::execCREATE_TRIG_REQ);
@@ -724,32 +723,5 @@ void Dbtup::releaseFragrec(FragrecordPtr regFragPtr)
cfirstfreefrag = regFragPtr.i;
}//Dbtup::releaseFragrec()
-void Dbtup::execSET_VAR_REQ(Signal* signal)
-{
-#if 0
- SetVarReq* const setVarReq = (SetVarReq*)signal->getDataPtrSend();
- ConfigParamId var = setVarReq->variable();
- int val = setVarReq->value();
-
- switch (var) {
-
- case NoOfDiskPagesToDiskAfterRestartTUP:
- clblPagesPerTick = val;
- sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
- break;
-
- case NoOfDiskPagesToDiskDuringRestartTUP:
- // Valid only during start so value not set.
- sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
- break;
-
- default:
- sendSignal(CMVMI_REF, GSN_SET_VAR_REF, signal, 1, JBB);
- } // switch
-#endif
-
-}//execSET_VAR_REQ()
-
-
diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp
index b5010205880..e51638b8a20 100644
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp
@@ -53,7 +53,6 @@ void Dbtup::execTUPFRAGREQ(Signal* signal)
regTabPtr.i = tupFragReq->tableId;
Uint32 noOfAttributes = tupFragReq->noOfAttr;
Uint32 fragId = tupFragReq->fragId;
- Uint32 noOfNullAttr = tupFragReq->noOfNullAttr;
/* Uint32 schemaVersion = tupFragReq->schemaVersion;*/
Uint32 noOfKeyAttr = tupFragReq->noOfKeyAttr;
Uint32 noOfCharsets = tupFragReq->noOfCharsets;
@@ -594,8 +593,8 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal)
Uint32 sz= sizeof(Disk_undo::Create) >> 2;
Logfile_client lgman(this, c_lgman, regFragPtr.p->m_logfile_group_id);
- int r0 = c_lgman->alloc_log_space(regFragPtr.p->m_logfile_group_id,
- sz);
+ (void) c_lgman->alloc_log_space(regFragPtr.p->m_logfile_group_id,
+ sz);
int res= lgman.get_log_buffer(signal, sz, &cb);
switch(res){
@@ -951,7 +950,7 @@ void Dbtup::releaseFragment(Signal* signal, Uint32 tableId,
cb.m_callbackFunction =
safe_cast(&Dbtup::drop_table_log_buffer_callback);
Uint32 sz= sizeof(Disk_undo::Drop) >> 2;
- int r0 = c_lgman->alloc_log_space(logfile_group_id, sz);
+ (void) c_lgman->alloc_log_space(logfile_group_id, sz);
Logfile_client lgman(this, c_lgman, logfile_group_id);
int res= lgman.get_log_buffer(signal, sz, &cb);
@@ -1081,7 +1080,7 @@ Dbtup::drop_fragment_free_extent(Signal *signal,
safe_cast(&Dbtup::drop_fragment_free_extent_log_buffer_callback);
#if NOT_YET_UNDO_FREE_EXTENT
Uint32 sz= sizeof(Disk_undo::FreeExtent) >> 2;
- int r0 = c_lgman->alloc_log_space(fragPtr.p->m_logfile_group_id, sz);
+ (void) c_lgman->alloc_log_space(fragPtr.p->m_logfile_group_id, sz);
Logfile_client lgman(this, c_lgman, fragPtr.p->m_logfile_group_id);
diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp
index c8546209f94..28f66c5620a 100644
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp
@@ -341,7 +341,6 @@ Dbtup::readFixedSizeTHManyWordNotNULL(Uint32* outBuffer,
Uint32 maxIndexBuf = indexBuf + (dstLen >> 2);
if (maxIndexBuf <= maxRead && ok) {
ljam();
- const char* ssrcPtr = (const char*)srcPtr;
int n = NdbSqlUtil::strnxfrm_bug7284(cs, dstPtr, dstLen, srcPtr + lb, len);
ndbrequire(n != -1);
int m = n;
@@ -510,7 +509,6 @@ Dbtup::readVarSizeNotNULL(Uint32* out_buffer,
Uint32 maxIndexBuf = index_buf + (dstLen >> 2);
if (maxIndexBuf <= max_read && ok) {
ljam();
- const char* ssrcPtr = (const char*)srcPtr;
int n = NdbSqlUtil::strnxfrm_bug7284(cs, dstPtr, dstLen, srcPtr + lb, len);
ndbrequire(n != -1);
int m = n;
@@ -618,7 +616,6 @@ Dbtup::readDiskFixedSizeNotNULL(Uint32* outBuffer,
Uint32 maxIndexBuf = indexBuf + (dstLen >> 2);
if (maxIndexBuf <= maxRead && ok) {
ljam();
- const char* ssrcPtr = (const char*)srcPtr;
int n = NdbSqlUtil::strnxfrm_bug7284(cs, dstPtr, dstLen, srcPtr + lb, len);
ndbrequire(n != -1);
int m = n;
@@ -1025,7 +1022,7 @@ Dbtup::updateVarSizeNotNULL(Uint32* in_buffer,
Uint32 attr_des2)
{
Uint32 attr_descriptor, index_buf, in_buf_len, var_index, null_ind;
- Uint32 vsize_in_bytes, vsize_in_words, new_index, max_var_size;
+ Uint32 vsize_in_words, new_index, max_var_size;
Uint32 var_attr_pos;
char *var_data_start;
Uint16 *vpos_array;
@@ -1445,7 +1442,7 @@ Dbtup::updateDiskVarSizeNotNULL(Uint32* in_buffer,
Uint32 attr_des2)
{
Uint32 attr_descriptor, index_buf, in_buf_len, var_index, null_ind;
- Uint32 vsize_in_bytes, vsize_in_words, new_index, max_var_size;
+ Uint32 vsize_in_words, new_index, max_var_size;
Uint32 var_attr_pos;
char *var_data_start;
Uint16 *vpos_array;
diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupScan.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupScan.cpp
index 653a24ba6a1..aad68c5ed17 100644
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupScan.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupScan.cpp
@@ -831,7 +831,6 @@ Dbtup::scanNext(Signal* signal, ScanOpPtr scanPtr)
{
ndbassert(bits & ScanOp::SCAN_NR);
Local_key& key_mm = pos.m_key_mm;
- Fix_page* page = (Fix_page*)pos.m_page;
if (! (bits & ScanOp::SCAN_DD)) {
key_mm = pos.m_key;
// caller has already set pos.m_get to next tuple
diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupVarAlloc.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupVarAlloc.cpp
index 28543882255..072bd69da97 100644
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupVarAlloc.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupVarAlloc.cpp
@@ -163,7 +163,6 @@ void Dbtup::free_var_rec(Fragrecord* fragPtr,
/**
* TODO free fix + var part
*/
- Uint32 page_idx= key->m_page_idx;
Uint32 *ptr = ((Fix_page*)pagePtr.p)->get_ptr(key->m_page_idx, 0);
Tuple_header* tuple = (Tuple_header*)ptr;
diff --git a/storage/ndb/src/kernel/blocks/dbtup/tuppage.cpp b/storage/ndb/src/kernel/blocks/dbtup/tuppage.cpp
index 8955faff99e..7ebbde93ac7 100644
--- a/storage/ndb/src/kernel/blocks/dbtup/tuppage.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtup/tuppage.cpp
@@ -461,9 +461,8 @@ operator<< (NdbOut& out, const Tup_fixsize_page& page)
<< " free: " << page.free_space;
out << " free list: " << hex << page.next_free_index << " " << flush;
- Uint32 startTuple = page.next_free_index >> 16;
-
#if 0
+ Uint32 startTuple = page.next_free_index >> 16;
Uint32 cnt = 0;
Uint32 next= startTuple;
while((next & 0xFFFF) != 0xFFFF)
diff --git a/storage/ndb/src/kernel/blocks/dbtup/tuppage.hpp b/storage/ndb/src/kernel/blocks/dbtup/tuppage.hpp
index 407dfae5865..44aa6182b54 100644
--- a/storage/ndb/src/kernel/blocks/dbtup/tuppage.hpp
+++ b/storage/ndb/src/kernel/blocks/dbtup/tuppage.hpp
@@ -94,6 +94,7 @@ struct Tup_fixsize_page
* Alloc record from page
* return page_idx
**/
+ Tup_fixsize_page() {}
Uint32 alloc_record();
Uint32 alloc_record(Uint32 page_idx);
Uint32 free_record(Uint32 page_idx);
@@ -148,6 +149,7 @@ struct Tup_varsize_page
Uint32 m_data[DATA_WORDS];
+ Tup_varsize_page() {}
void init();
Uint32* get_free_space_ptr() {
diff --git a/storage/ndb/src/kernel/blocks/dbtux/DbtuxStat.cpp b/storage/ndb/src/kernel/blocks/dbtux/DbtuxStat.cpp
index a0e1cbef61c..fe59b8bba2c 100644
--- a/storage/ndb/src/kernel/blocks/dbtux/DbtuxStat.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtux/DbtuxStat.cpp
@@ -51,7 +51,6 @@ Dbtux::statRecordsInRange(ScanOpPtr scanPtr, Uint32* out)
TreePos pos1 = scan.m_scanPos;
TreePos pos2;
{ // as in scanFirst()
- TreeHead& tree = frag.m_tree;
setKeyAttrs(frag);
const unsigned idir = 1;
const ScanBound& bound = *scan.m_bound[idir];
diff --git a/storage/ndb/src/kernel/blocks/diskpage.hpp b/storage/ndb/src/kernel/blocks/diskpage.hpp
index 579b538c910..4119c328e35 100644
--- a/storage/ndb/src/kernel/blocks/diskpage.hpp
+++ b/storage/ndb/src/kernel/blocks/diskpage.hpp
@@ -54,6 +54,7 @@ struct File_formats
Uint32 m_node_id;
Uint32 m_file_type;
Uint32 m_time; // time(0)
+ Zero_page_header() {}
void init(File_type ft, Uint32 node_id, Uint32 version, Uint32 now);
int validate(File_type ft, Uint32 node_id, Uint32 version, Uint32 now);
};
@@ -86,6 +87,7 @@ struct File_formats
Uint32 m_fragment_id;
Uint32 m_next_free_extent;
};
+ Extent_header() {}
Uint32 m_page_bitmask[1]; // (BitsPerPage*ExtentSize)/(32*PageSize)
Uint32 get_free_bits(Uint32 page) const;
Uint32 get_free_word_offset(Uint32 page) const;
@@ -102,6 +104,7 @@ struct File_formats
struct Page_header m_page_header;
Extent_header m_extents[1];
+ Extent_page() {}
Extent_header* get_header(Uint32 extent_no, Uint32 extent_size);
};
diff --git a/storage/ndb/src/kernel/blocks/ndbcntr/Ndbcntr.hpp b/storage/ndb/src/kernel/blocks/ndbcntr/Ndbcntr.hpp
index 84a0ada2d01..4db07591b60 100644
--- a/storage/ndb/src/kernel/blocks/ndbcntr/Ndbcntr.hpp
+++ b/storage/ndb/src/kernel/blocks/ndbcntr/Ndbcntr.hpp
@@ -192,7 +192,6 @@ private:
void execNDB_STARTCONF(Signal* signal);
void execREAD_NODESREQ(Signal* signal);
void execNDB_STARTREF(Signal* signal);
- void execSET_VAR_REQ(Signal* signal);
void execSTOP_PERM_REF(Signal* signal);
void execSTOP_PERM_CONF(Signal* signal);
diff --git a/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrInit.cpp b/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrInit.cpp
index a925eb4beaf..ae5afa7a57b 100644
--- a/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrInit.cpp
+++ b/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrInit.cpp
@@ -81,7 +81,6 @@ Ndbcntr::Ndbcntr(Block_context& ctx):
addRecSignal(GSN_NDB_STARTCONF, &Ndbcntr::execNDB_STARTCONF);
addRecSignal(GSN_READ_NODESREQ, &Ndbcntr::execREAD_NODESREQ);
addRecSignal(GSN_NDB_STARTREF, &Ndbcntr::execNDB_STARTREF);
- addRecSignal(GSN_SET_VAR_REQ, &Ndbcntr::execSET_VAR_REQ);
addRecSignal(GSN_STOP_PERM_REF, &Ndbcntr::execSTOP_PERM_REF);
addRecSignal(GSN_STOP_PERM_CONF, &Ndbcntr::execSTOP_PERM_CONF);
diff --git a/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp b/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp
index e2d402ca76a..8fc9e870b80 100644
--- a/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp
+++ b/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp
@@ -75,8 +75,8 @@ static BlockInfo ALL_BLOCKS[] = {
{ DBTUP_REF, 1 , 4000, 4007 },
{ DBDICT_REF, 1 , 6000, 6003 },
{ NDBCNTR_REF, 0 , 1000, 1999 },
+ { CMVMI_REF, 1 , 9000, 9999 }, // before QMGR
{ QMGR_REF, 1 , 1, 999 },
- { CMVMI_REF, 1 , 9000, 9999 },
{ TRIX_REF, 1 , 0, 0 },
{ BACKUP_REF, 1 , 10000, 10999 },
{ DBUTIL_REF, 1 , 11000, 11999 },
@@ -2067,23 +2067,6 @@ Ndbcntr::execDUMP_STATE_ORD(Signal* signal)
}//Ndbcntr::execDUMP_STATE_ORD()
-void Ndbcntr::execSET_VAR_REQ(Signal* signal) {
-#if 0
- SetVarReq* const setVarReq = (SetVarReq*)&signal->theData[0];
- ConfigParamId var = setVarReq->variable();
-
- switch (var) {
- case TimeToWaitAlive:
- // Valid only during start so value not set.
- sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
- break;
-
- default:
- sendSignal(CMVMI_REF, GSN_SET_VAR_REF, signal, 1, JBB);
- }// switch
-#endif
-}//Ndbcntr::execSET_VAR_REQ()
-
void Ndbcntr::updateNodeState(Signal* signal, const NodeState& newState) const{
NodeStateRep * const stateRep = (NodeStateRep *)&signal->theData[0];
diff --git a/storage/ndb/src/kernel/blocks/qmgr/Qmgr.hpp b/storage/ndb/src/kernel/blocks/qmgr/Qmgr.hpp
index 9b7b6b7f41c..8d51b24ec6a 100644
--- a/storage/ndb/src/kernel/blocks/qmgr/Qmgr.hpp
+++ b/storage/ndb/src/kernel/blocks/qmgr/Qmgr.hpp
@@ -249,7 +249,6 @@ private:
void execAPI_REGREQ(Signal* signal);
void execAPI_FAILCONF(Signal* signal);
void execREAD_NODESREQ(Signal* signal);
- void execSET_VAR_REQ(Signal* signal);
void execAPI_FAILREQ(Signal* signal);
void execREAD_NODESREF(Signal* signal);
diff --git a/storage/ndb/src/kernel/blocks/qmgr/QmgrInit.cpp b/storage/ndb/src/kernel/blocks/qmgr/QmgrInit.cpp
index 23bbe94f020..f9950072ab4 100644
--- a/storage/ndb/src/kernel/blocks/qmgr/QmgrInit.cpp
+++ b/storage/ndb/src/kernel/blocks/qmgr/QmgrInit.cpp
@@ -90,7 +90,6 @@ Qmgr::Qmgr(Block_context& ctx)
addRecSignal(GSN_API_FAILREQ, &Qmgr::execAPI_FAILREQ);
addRecSignal(GSN_API_FAILCONF, &Qmgr::execAPI_FAILCONF);
addRecSignal(GSN_READ_NODESREQ, &Qmgr::execREAD_NODESREQ);
- addRecSignal(GSN_SET_VAR_REQ, &Qmgr::execSET_VAR_REQ);
addRecSignal(GSN_API_BROADCAST_REP, &Qmgr::execAPI_BROADCAST_REP);
addRecSignal(GSN_NODE_FAILREP, &Qmgr::execNODE_FAILREP);
diff --git a/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp b/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp
index c8ba7b5aad0..4b4fba01889 100644
--- a/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp
+++ b/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp
@@ -5010,34 +5010,6 @@ Qmgr::execDUMP_STATE_ORD(Signal* signal)
#endif
}//Qmgr::execDUMP_STATE_ORD()
-void Qmgr::execSET_VAR_REQ(Signal* signal)
-{
-#if 0
- SetVarReq* const setVarReq = (SetVarReq*)&signal->theData[0];
- ConfigParamId var = setVarReq->variable();
- UintR val = setVarReq->value();
-
- switch (var) {
- case HeartbeatIntervalDbDb:
- setHbDelay(val/10);
- sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
- break;
-
- case HeartbeatIntervalDbApi:
- setHbApiDelay(val/10);
- sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
- break;
-
- case ArbitTimeout:
- setArbitTimeout(val);
- sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
- break;
-
- default:
- sendSignal(CMVMI_REF, GSN_SET_VAR_REF, signal, 1, JBB);
- }// switch
-#endif
-}//execSET_VAR_REQ()
void
Qmgr::execAPI_BROADCAST_REP(Signal* signal)
diff --git a/storage/ndb/src/kernel/vm/WatchDog.cpp b/storage/ndb/src/kernel/vm/WatchDog.cpp
index d8311ec5d35..d1abb709b1e 100644
--- a/storage/ndb/src/kernel/vm/WatchDog.cpp
+++ b/storage/ndb/src/kernel/vm/WatchDog.cpp
@@ -22,7 +22,10 @@
#include <NdbOut.hpp>
#include <NdbSleep.h>
#include <ErrorHandlingMacros.hpp>
-
+#include <EventLogger.hpp>
+
+extern EventLogger g_eventLogger;
+
extern "C"
void*
runWatchDog(void* w){
@@ -125,7 +128,7 @@ WatchDog::run(){
last_stuck_action = "Unknown place";
break;
}//switch
- ndbout << "Ndb kernel is stuck in: " << last_stuck_action << endl;
+ g_eventLogger.warning("Ndb kernel is stuck in: %s", last_stuck_action);
if(alerts == 3){
shutdownSystem(last_stuck_action);
}
diff --git a/storage/ndb/src/kernel/vm/ndbd_malloc.cpp b/storage/ndb/src/kernel/vm/ndbd_malloc.cpp
index 9386e3c7cd3..21a26ff11d8 100644
--- a/storage/ndb/src/kernel/vm/ndbd_malloc.cpp
+++ b/storage/ndb/src/kernel/vm/ndbd_malloc.cpp
@@ -22,12 +22,14 @@
#include <stdio.h>
#endif
+#ifdef TRACE_MALLOC
static void xxx(size_t size, size_t *s_m, size_t *s_k, size_t *s_b)
{
*s_m = size/1024/1024;
*s_k = (size - *s_m*1024*1024)/1024;
*s_b = size - *s_m*1024*1024-*s_k*1024;
}
+#endif
static Uint64 g_allocated_memory;
void *ndbd_malloc(size_t size)
diff --git a/storage/ndb/src/kernel/vm/ndbd_malloc_impl.cpp b/storage/ndb/src/kernel/vm/ndbd_malloc_impl.cpp
index 2c2d66d1334..70637a362d0 100644
--- a/storage/ndb/src/kernel/vm/ndbd_malloc_impl.cpp
+++ b/storage/ndb/src/kernel/vm/ndbd_malloc_impl.cpp
@@ -220,6 +220,7 @@ Ndbd_mem_manager::init(bool alloc_less_memory)
while (cnt < MAX_CHUNKS && allocated < pages)
{
InitChunk chunk;
+ LINT_INIT(chunk.m_start);
#if defined(_lint) || defined(FORCE_INIT_OF_VARS)
memset((char*) &chunk, 0 , sizeof(chunk));
diff --git a/storage/ndb/src/mgmapi/mgmapi.cpp b/storage/ndb/src/mgmapi/mgmapi.cpp
index 28dbf573bdf..f13e5880e22 100644
--- a/storage/ndb/src/mgmapi/mgmapi.cpp
+++ b/storage/ndb/src/mgmapi/mgmapi.cpp
@@ -2233,43 +2233,6 @@ ndb_mgm_alloc_nodeid(NdbMgmHandle handle, unsigned int version, int nodetype,
return nodeid;
}
-/*****************************************************************************
- * Global Replication
- ******************************************************************************/
-extern "C"
-int
-ndb_mgm_rep_command(NdbMgmHandle handle, unsigned int request,
- unsigned int* replication_id,
- struct ndb_mgm_reply* /*reply*/)
-{
- SET_ERROR(handle, NDB_MGM_NO_ERROR, "Executing: ndb_mgm_rep_command");
- const ParserRow<ParserDummy> replication_reply[] = {
- MGM_CMD("global replication reply", NULL, ""),
- MGM_ARG("result", String, Mandatory, "Error message"),
- MGM_ARG("id", Int, Optional, "Id of global replication"),
- MGM_END()
- };
- CHECK_HANDLE(handle, -1);
- CHECK_CONNECTED(handle, -1);
-
- Properties args;
- args.put("request", request);
- const Properties *reply;
- reply = ndb_mgm_call(handle, replication_reply, "rep", &args);
- CHECK_REPLY(reply, -1);
-
- const char * result;
- reply->get("result", &result);
- reply->get("id", replication_id);
- if(strcmp(result,"Ok")!=0) {
- delete reply;
- return -1;
- }
-
- delete reply;
- return 0;
-}
-
extern "C"
int
ndb_mgm_set_int_parameter(NdbMgmHandle handle,
diff --git a/storage/ndb/src/mgmclient/main.cpp b/storage/ndb/src/mgmclient/main.cpp
index f9b093f132a..44408362f09 100644
--- a/storage/ndb/src/mgmclient/main.cpp
+++ b/storage/ndb/src/mgmclient/main.cpp
@@ -128,8 +128,6 @@ read_and_execute(int _try_reconnect)
int main(int argc, char** argv){
NDB_INIT(argv[0]);
- const char *_host = 0;
- int _port = 0;
load_defaults("my",load_default_groups,&argc,&argv);
int ho_error;
diff --git a/storage/ndb/src/mgmsrv/InitConfigFileParser.cpp b/storage/ndb/src/mgmsrv/InitConfigFileParser.cpp
index b159c90605e..b63d4d8bc17 100644
--- a/storage/ndb/src/mgmsrv/InitConfigFileParser.cpp
+++ b/storage/ndb/src/mgmsrv/InitConfigFileParser.cpp
@@ -837,7 +837,7 @@ InitConfigFileParser::parse_mycnf()
opt.arg_type = REQUIRED_ARG;
options.push_back(opt);
- opt.name = "api";
+ opt.name = "ndbapi";
opt.id = 256;
opt.value = (gptr*)malloc(sizeof(char*));
opt.var_type = GET_STR;
@@ -852,7 +852,6 @@ InitConfigFileParser::parse_mycnf()
mysqld = &options[idx+2];
api = &options[idx+3];
}
-
Context ctx(m_info, m_errstream);
const char *groups[]= { "cluster_config", 0 };
diff --git a/storage/ndb/src/mgmsrv/MgmtSrvr.cpp b/storage/ndb/src/mgmsrv/MgmtSrvr.cpp
index 5560259a957..38223502175 100644
--- a/storage/ndb/src/mgmsrv/MgmtSrvr.cpp
+++ b/storage/ndb/src/mgmsrv/MgmtSrvr.cpp
@@ -701,7 +701,7 @@ int MgmtSrvr::okToSendTo(NodeId nodeId, bool unCond)
return WRONG_PROCESS_TYPE;
// Check if we have contact with it
if(unCond){
- if(theFacade->theClusterMgr->getNodeInfo(nodeId).connected)
+ if(theFacade->theClusterMgr->getNodeInfo(nodeId).m_api_reg_conf)
return 0;
}
else if (theFacade->get_node_alive(nodeId) == true)
@@ -1577,32 +1577,85 @@ MgmtSrvr::status(int nodeId,
}
int
-MgmtSrvr::setEventReportingLevelImpl(int nodeId,
+MgmtSrvr::setEventReportingLevelImpl(int nodeId_arg,
const EventSubscribeReq& ll)
{
SignalSender ss(theFacade);
- ss.lock();
-
- SimpleSignal ssig;
- EventSubscribeReq * dst =
- CAST_PTR(EventSubscribeReq, ssig.getDataPtrSend());
- ssig.set(ss,TestOrd::TraceAPI, CMVMI, GSN_EVENT_SUBSCRIBE_REQ,
- EventSubscribeReq::SignalLength);
- *dst = ll;
-
- NodeBitmask nodes;
+ NdbNodeBitmask nodes;
+ int retries = 30;
nodes.clear();
- Uint32 max = (nodeId == 0) ? (nodeId = 1, MAX_NDB_NODES) : nodeId;
- for(; (Uint32) nodeId <= max; nodeId++)
+ while (1)
{
- if (nodeTypes[nodeId] != NODE_TYPE_DB)
- continue;
- if (okToSendTo(nodeId, true))
- continue;
- if (ss.sendSignal(nodeId, &ssig) == SEND_OK)
+ Uint32 nodeId, max;
+ ss.lock();
+ SimpleSignal ssig;
+ EventSubscribeReq * dst =
+ CAST_PTR(EventSubscribeReq, ssig.getDataPtrSend());
+ ssig.set(ss,TestOrd::TraceAPI, CMVMI, GSN_EVENT_SUBSCRIBE_REQ,
+ EventSubscribeReq::SignalLength);
+ *dst = ll;
+
+ if (nodeId_arg == 0)
{
- nodes.set(nodeId);
+ // all nodes
+ nodeId = 1;
+ max = MAX_NDB_NODES;
+ }
+ else
+ {
+ // only one node
+ max = nodeId = nodeId_arg;
+ }
+ // first make sure nodes are sendable
+ for(; nodeId <= max; nodeId++)
+ {
+ if (nodeTypes[nodeId] != NODE_TYPE_DB)
+ continue;
+ if (okToSendTo(nodeId, true))
+ {
+ if (theFacade->theClusterMgr->getNodeInfo(nodeId).connected == false)
+ {
+ // node not connected we can safely skip this one
+ continue;
+ }
+ // api_reg_conf not recevied yet, need to retry
+ break;
+ }
+ }
+ if (nodeId <= max)
+ {
+ if (--retries)
+ {
+ ss.unlock();
+ NdbSleep_MilliSleep(100);
+ continue;
+ }
+ return SEND_OR_RECEIVE_FAILED;
+ }
+
+ if (nodeId_arg == 0)
+ {
+ // all nodes
+ nodeId = 1;
+ max = MAX_NDB_NODES;
+ }
+ else
+ {
+ // only one node
+ max = nodeId = nodeId_arg;
}
+ // now send to all sendable nodes nodes
+ // note, lock is held, so states have not changed
+ for(; (Uint32) nodeId <= max; nodeId++)
+ {
+ if (nodeTypes[nodeId] != NODE_TYPE_DB)
+ continue;
+ if (theFacade->theClusterMgr->getNodeInfo(nodeId).connected == false)
+ continue; // node is not connected, skip
+ if (ss.sendSignal(nodeId, &ssig) == SEND_OK)
+ nodes.set(nodeId);
+ }
+ break;
}
if (nodes.isclear())
@@ -1613,6 +1666,7 @@ MgmtSrvr::setEventReportingLevelImpl(int nodeId,
int error = 0;
while (!nodes.isclear())
{
+ Uint32 nodeId;
SimpleSignal *signal = ss.waitFor();
int gsn = signal->readSignalNumber();
nodeId = refToNode(signal->header.theSendersBlockRef);
diff --git a/storage/ndb/src/mgmsrv/MgmtSrvr.hpp b/storage/ndb/src/mgmsrv/MgmtSrvr.hpp
index 66e2fde0d40..19804f735b4 100644
--- a/storage/ndb/src/mgmsrv/MgmtSrvr.hpp
+++ b/storage/ndb/src/mgmsrv/MgmtSrvr.hpp
@@ -594,7 +594,6 @@ private:
*/
enum WaitSignalType {
NO_WAIT, // We don't expect to receive any signal
- WAIT_SET_VAR, // Accept SET_VAR_CONF and SET_VAR_REF
WAIT_SUBSCRIBE_CONF // Accept event subscription confirmation
};
diff --git a/storage/ndb/src/mgmsrv/Services.cpp b/storage/ndb/src/mgmsrv/Services.cpp
index 1cde5242a1d..dc865c594c0 100644
--- a/storage/ndb/src/mgmsrv/Services.cpp
+++ b/storage/ndb/src/mgmsrv/Services.cpp
@@ -349,19 +349,6 @@ MgmApiSession::runSession()
switch(ctx.m_status) {
case Parser_t::UnknownCommand:
-#ifdef MGM_GET_CONFIG_BACKWARDS_COMPAT
- /* Backwards compatibility for old NDBs that still use
- * the old "GET CONFIG" command.
- */
- size_t i;
- for(i=0; i<strlen(ctx.m_currentToken); i++)
- ctx.m_currentToken[i] = toupper(ctx.m_currentToken[i]);
-
- if(strncmp("GET CONFIG ",
- ctx.m_currentToken,
- strlen("GET CONFIG ")) == 0)
- getConfig_old(ctx);
-#endif /* MGM_GET_CONFIG_BACKWARDS_COMPAT */
break;
default:
break;
@@ -382,32 +369,6 @@ MgmApiSession::runSession()
DBUG_VOID_RETURN;
}
-#ifdef MGM_GET_CONFIG_BACKWARDS_COMPAT
-void
-MgmApiSession::getConfig_old(Parser_t::Context &ctx) {
- Properties args;
-
- Uint32 version, node;
-
- if(sscanf(ctx.m_currentToken, "GET CONFIG %d %d",
- (int *)&version, (int *)&node) != 2) {
- m_output->println("Expected 2 arguments for GET CONFIG");
- return;
- }
-
- /* Put arguments in properties object so we can call the real function */
- args.put("version", version);
- args.put("node", node);
- getConfig_common(ctx, args, true);
-}
-#endif /* MGM_GET_CONFIG_BACKWARDS_COMPAT */
-
-void
-MgmApiSession::getConfig(Parser_t::Context &ctx,
- const class Properties &args) {
- getConfig_common(ctx, args);
-}
-
static Properties *
backward(const char * base, const Properties* reply){
Properties * ret = new Properties();
@@ -584,9 +545,9 @@ MgmApiSession::get_nodeid(Parser_t::Context &,
}
void
-MgmApiSession::getConfig_common(Parser_t::Context &,
- const class Properties &args,
- bool compat) {
+MgmApiSession::getConfig(Parser_t::Context &,
+ const class Properties &args)
+{
Uint32 version, node = 0;
args.get("version", &version);
@@ -600,47 +561,6 @@ MgmApiSession::getConfig_common(Parser_t::Context &,
return;
}
- if(version > 0 && version < makeVersion(3, 5, 0) && compat){
- Properties *reply = backward("", conf->m_oldConfig);
- reply->put("Version", version);
- reply->put("LocalNodeId", node);
-
- backward("", reply);
- //reply->print();
-
- const Uint32 size = reply->getPackedSize();
- Uint32 *buffer = new Uint32[size/4+1];
-
- reply->pack(buffer);
- delete reply;
-
- const int uurows = (size + 44)/45;
- char * uubuf = new char[uurows * 62+5];
-
- const int uusz = uuencode_mem(uubuf, (char *)buffer, size);
- delete[] buffer;
-
- m_output->println("GET CONFIG %d %d %d %d %d",
- 0, version, node, size, uusz);
-
- m_output->println("begin 664 Ndb_cfg.bin");
-
- /* XXX Need to write directly to the socket, because the uubuf is not
- * NUL-terminated. This could/should probably be done in a nicer way.
- */
- write_socket(m_socket, MAX_WRITE_TIMEOUT, uubuf, uusz);
- delete[] uubuf;
-
- m_output->println("end");
- m_output->println("");
- return;
- }
-
- if(compat){
- m_output->println("GET CONFIG %d %d %d %d %d",1, version, 0, 0, 0);
- return;
- }
-
if(node != 0){
bool compatible;
switch (m_mgmsrv.getNodeType(node)) {
@@ -856,8 +776,7 @@ MgmApiSession::setClusterLogLevel(Parser<MgmApiSession>::Context &,
DBUG_PRINT("enter",("node=%d, category=%d, level=%d", node, cat, level));
- /* XXX should use constants for this value */
- if(level > 15) {
+ if(level > NDB_MGM_MAX_LOGLEVEL) {
m_output->println(reply);
m_output->println("result: Invalid loglevel %d", level);
m_output->println("");
@@ -900,8 +819,7 @@ MgmApiSession::setLogLevel(Parser<MgmApiSession>::Context &,
args.get("category", &cat);
args.get("level", &level);
- /* XXX should use constants for this value */
- if(level > 15) {
+ if(level > NDB_MGM_MAX_LOGLEVEL) {
m_output->println("set loglevel reply");
m_output->println("result: Invalid loglevel", errorString.c_str());
m_output->println("");
@@ -1604,7 +1522,7 @@ MgmApiSession::listen_event(Parser<MgmApiSession>::Context & ctx,
}
int level = atoi(spec[1].c_str());
- if(level < 0 || level > 15){
+ if(level < 0 || level > NDB_MGM_MAX_LOGLEVEL){
msg.appfmt("Invalid level: >%s<", spec[1].c_str());
result = -1;
goto done;
diff --git a/storage/ndb/src/mgmsrv/Services.hpp b/storage/ndb/src/mgmsrv/Services.hpp
index f6af16d58ba..c112c66da36 100644
--- a/storage/ndb/src/mgmsrv/Services.hpp
+++ b/storage/ndb/src/mgmsrv/Services.hpp
@@ -24,9 +24,6 @@
#include "MgmtSrvr.hpp"
-/** Undefine this to remove backwards compatibility for "GET CONFIG". */
-#define MGM_GET_CONFIG_BACKWARDS_COMPAT
-
class MgmApiSession : public SocketServer::Session
{
static void stop_session_if_timed_out(SocketServer::Session *_s, void *data);
@@ -49,9 +46,6 @@ private:
Parser_t::Context *m_ctx;
Uint64 m_session_id;
- void getConfig_common(Parser_t::Context &ctx,
- const class Properties &args,
- bool compat = false);
const char *get_error_text(int err_no)
{ return m_mgmsrv.getErrorText(err_no, m_err_str, sizeof(m_err_str)); }
@@ -61,9 +55,6 @@ public:
void runSession();
void getConfig(Parser_t::Context &ctx, const class Properties &args);
-#ifdef MGM_GET_CONFIG_BACKWARDS_COMPAT
- void getConfig_old(Parser_t::Context &ctx);
-#endif /* MGM_GET_CONFIG_BACKWARDS_COMPAT */
void get_nodeid(Parser_t::Context &ctx, const class Properties &args);
void getVersion(Parser_t::Context &ctx, const class Properties &args);
diff --git a/storage/ndb/src/ndbapi/ClusterMgr.cpp b/storage/ndb/src/ndbapi/ClusterMgr.cpp
index b162b85d61e..2a794f69ecb 100644
--- a/storage/ndb/src/ndbapi/ClusterMgr.cpp
+++ b/storage/ndb/src/ndbapi/ClusterMgr.cpp
@@ -313,7 +313,7 @@ ClusterMgr::showState(NodeId nodeId){
ClusterMgr::Node::Node()
: m_state(NodeState::SL_NOTHING) {
compatible = nfCompleteRep = true;
- connected = defined = m_alive = false;
+ connected = defined = m_alive = m_api_reg_conf = false;
m_state.m_connected_nodes.clear();
}
@@ -385,6 +385,8 @@ ClusterMgr::execAPI_REGCONF(const Uint32 * theData){
node.m_info.m_version);
}
+ node.m_api_reg_conf = true;
+
node.m_state = apiRegConf->nodeState;
if (node.compatible && (node.m_state.startLevel == NodeState::SL_STARTED ||
node.m_state.startLevel == NodeState::SL_SINGLEUSER)){
@@ -501,6 +503,7 @@ ClusterMgr::reportDisconnected(NodeId nodeId){
noOfConnectedNodes--;
theNodes[nodeId].connected = false;
+ theNodes[nodeId].m_api_reg_conf = false;
theNodes[nodeId].m_state.m_connected_nodes.clear();
reportNodeFailed(nodeId, true);
diff --git a/storage/ndb/src/ndbapi/ClusterMgr.hpp b/storage/ndb/src/ndbapi/ClusterMgr.hpp
index bb20d447c0c..6e74620dd4f 100644
--- a/storage/ndb/src/ndbapi/ClusterMgr.hpp
+++ b/storage/ndb/src/ndbapi/ClusterMgr.hpp
@@ -70,6 +70,7 @@ public:
bool compatible; // Version is compatible
bool nfCompleteRep; // NF Complete Rep has arrived
bool m_alive; // Node is alive
+ bool m_api_reg_conf;// API_REGCONF has arrived
NodeInfo m_info;
NodeState m_state;
diff --git a/storage/ndb/src/ndbapi/Makefile.am b/storage/ndb/src/ndbapi/Makefile.am
index 90e61b5b188..8469110fddb 100644
--- a/storage/ndb/src/ndbapi/Makefile.am
+++ b/storage/ndb/src/ndbapi/Makefile.am
@@ -15,6 +15,10 @@
#SUBDIRS = signal-sender
+noinst_PROGRAMS = ndberror_check
+
+ndberror_check_SOURCES = ndberror_check.c
+
noinst_LTLIBRARIES = libndbapi.la
libndbapi_la_SOURCES = \
@@ -61,6 +65,11 @@ NDB_CXXFLAGS_RELEASE_LOC = -O2
include $(top_srcdir)/storage/ndb/config/common.mk.am
include $(top_srcdir)/storage/ndb/config/type_ndbapi.mk.am
+ndberror_check_LDFLAGS = \
+ $(top_builddir)/dbug/libdbug.a \
+ $(top_builddir)/mysys/libmysys.a \
+ $(top_builddir)/strings/libmystrings.a
+
# Don't update the files from bitkeeper
%::SCCS/s.%
diff --git a/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp b/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp
index 6490ec91300..52760dbbd36 100644
--- a/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp
+++ b/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp
@@ -1338,6 +1338,7 @@ operator<<(NdbOut& out, const Gci_container& gci)
return out;
}
+#ifdef VM_TRACE
static
NdbOut&
operator<<(NdbOut& out, const Gci_container_pod& gci)
@@ -1346,7 +1347,7 @@ operator<<(NdbOut& out, const Gci_container_pod& gci)
out << *ptr;
return out;
}
-
+#endif
static
Gci_container*
@@ -1586,7 +1587,7 @@ NdbEventBuffer::complete_outof_order_gcis()
ndbout_c(" moved %ld rows -> %ld", (long) bucket->m_data.m_count,
(long) m_complete_data.m_data.m_count);
#else
- ndbout_c("");
+ ndbout_c(" ");
#endif
}
bzero(bucket, sizeof(Gci_container));
diff --git a/storage/ndb/src/ndbapi/NdbOperationDefine.cpp b/storage/ndb/src/ndbapi/NdbOperationDefine.cpp
index 8958b6ec596..b5019cf7386 100644
--- a/storage/ndb/src/ndbapi/NdbOperationDefine.cpp
+++ b/storage/ndb/src/ndbapi/NdbOperationDefine.cpp
@@ -715,6 +715,22 @@ insertATTRINFO_error1:
}//NdbOperation::insertATTRINFOloop()
+NdbOperation::AbortOption
+NdbOperation::getAbortOption() const
+{
+ return (AbortOption)m_abortOption;
+}
-
-
+int
+NdbOperation::setAbortOption(AbortOption ao)
+{
+ switch(ao)
+ {
+ case AO_IgnoreError:
+ case AbortOnError:
+ m_abortOption= ao;
+ return 0;
+ default:
+ return -1;
+ }
+}
diff --git a/storage/ndb/src/ndbapi/NdbOperationExec.cpp b/storage/ndb/src/ndbapi/NdbOperationExec.cpp
index ba1905760c3..9fe85265a0c 100644
--- a/storage/ndb/src/ndbapi/NdbOperationExec.cpp
+++ b/storage/ndb/src/ndbapi/NdbOperationExec.cpp
@@ -200,14 +200,14 @@ NdbOperation::prepareSend(Uint32 aTC_ConnectPtr,
OperationType tOperationType = theOperationType;
Uint32 tTupKeyLen = theTupKeyLen;
- Uint8 abortOption = (ao == DefaultAbortOption) ? m_abortOption : ao;
+ Uint8 abortOption = (ao == DefaultAbortOption) ? (Uint8) m_abortOption : (Uint8) ao;
tcKeyReq->setDirtyFlag(tReqInfo, tDirtyIndicator);
tcKeyReq->setOperationType(tReqInfo, tOperationType);
tcKeyReq->setKeyLength(tReqInfo, tTupKeyLen);
// A simple read is always ignore error
- abortOption = tSimpleState ? AO_IgnoreError : abortOption;
+ abortOption = tSimpleState ? (Uint8) AO_IgnoreError : (Uint8) abortOption;
tcKeyReq->setAbortOption(tReqInfo, abortOption);
m_abortOption = abortOption;
diff --git a/storage/ndb/src/ndbapi/ObjectMap.hpp b/storage/ndb/src/ndbapi/ObjectMap.hpp
index 13f9be66c24..9113a70798a 100644
--- a/storage/ndb/src/ndbapi/ObjectMap.hpp
+++ b/storage/ndb/src/ndbapi/ObjectMap.hpp
@@ -46,7 +46,7 @@ private:
} * m_map;
NdbMutex * m_mutex;
- void expand(Uint32 newSize);
+ int expand(Uint32 newSize);
};
inline
@@ -73,9 +73,8 @@ NdbObjectIdMap::map(void * object){
// lock();
- if(m_firstFree == InvalidId){
- expand(m_expandSize);
- }
+ if(m_firstFree == InvalidId && expand(m_expandSize))
+ return InvalidId;
Uint32 ff = m_firstFree;
m_firstFree = m_map[ff].m_next;
@@ -127,7 +126,7 @@ NdbObjectIdMap::getObject(Uint32 id){
return 0;
}
-inline void
+inline int
NdbObjectIdMap::expand(Uint32 incSize){
NdbMutex_Lock(m_mutex);
Uint32 newSize = m_size + incSize;
@@ -146,9 +145,11 @@ NdbObjectIdMap::expand(Uint32 incSize){
}
else
{
- ndbout_c("NdbObjectIdMap::expand unable to expand!!");
+ NdbMutex_Unlock(m_mutex);
+ return -1;
}
NdbMutex_Unlock(m_mutex);
+ return 0;
}
#endif
diff --git a/storage/ndb/src/ndbapi/SignalSender.cpp b/storage/ndb/src/ndbapi/SignalSender.cpp
index 0c0a9bd0e1f..d6d9f4446ce 100644
--- a/storage/ndb/src/ndbapi/SignalSender.cpp
+++ b/storage/ndb/src/ndbapi/SignalSender.cpp
@@ -19,15 +19,6 @@
#include <signaldata/NFCompleteRep.hpp>
#include <signaldata/NodeFailRep.hpp>
-#ifdef NOT_USED
-static
-void
-require(bool x)
-{
- if (!x)
- abort();
-}
-#endif
SimpleSignal::SimpleSignal(bool dealloc){
memset(this, 0, sizeof(* this));
diff --git a/storage/ndb/src/ndbapi/SignalSender.hpp b/storage/ndb/src/ndbapi/SignalSender.hpp
index ec874e63c52..4cad759a334 100644
--- a/storage/ndb/src/ndbapi/SignalSender.hpp
+++ b/storage/ndb/src/ndbapi/SignalSender.hpp
@@ -32,7 +32,7 @@ public:
Uint32 theData[25];
LinearSectionPtr ptr[3];
- int readSignalNumber() {return header.theVerId_signalNumber; }
+ int readSignalNumber() const {return header.theVerId_signalNumber; }
Uint32 *getDataPtrSend() { return theData; }
const Uint32 *getDataPtr() const { return theData; }
diff --git a/storage/ndb/src/ndbapi/TransporterFacade.cpp b/storage/ndb/src/ndbapi/TransporterFacade.cpp
index 2402c979620..eabfc6bc371 100644
--- a/storage/ndb/src/ndbapi/TransporterFacade.cpp
+++ b/storage/ndb/src/ndbapi/TransporterFacade.cpp
@@ -1527,7 +1527,8 @@ SignalSender::sendSignal(Uint16 nodeId, const SimpleSignal * s){
signalLogger.flushSignalLog();
}
#endif
-
+ assert(getNodeInfo(nodeId).m_api_reg_conf == true ||
+ s->readSignalNumber() == GSN_API_REGREQ);
return theFacade->theTransporterRegistry->prepareSend(&s->header,
1, // JBB
&s->theData[0],
diff --git a/storage/ndb/src/ndbapi/ndberror.c b/storage/ndb/src/ndbapi/ndberror.c
index 24c79ce1e2c..8e70f5ee250 100644
--- a/storage/ndb/src/ndbapi/ndberror.c
+++ b/storage/ndb/src/ndbapi/ndberror.c
@@ -367,7 +367,7 @@ ErrorBundle ErrorCodes[] = {
{ 771, HA_WRONG_CREATE_OPTION, AE, "Given NODEGROUP doesn't exist in this cluster" },
{ 772, HA_WRONG_CREATE_OPTION, IE, "Given fragmentType doesn't exist" },
{ 749, HA_WRONG_CREATE_OPTION, IE, "Primary Table in wrong state" },
- { 763, HA_WRONG_CREATE_OPTION, SE, "Invalid undo buffer size" },
+ { 779, HA_WRONG_CREATE_OPTION, SE, "Invalid undo buffer size" },
{ 764, HA_WRONG_CREATE_OPTION, SE, "Invalid extent size" },
{ 765, DMEC, SE, "Out of filegroup records" },
{ 750, IE, SE, "Invalid file type" },
diff --git a/storage/ndb/src/ndbapi/ndberror_check.c b/storage/ndb/src/ndbapi/ndberror_check.c
new file mode 100644
index 00000000000..6986d99f3d4
--- /dev/null
+++ b/storage/ndb/src/ndbapi/ndberror_check.c
@@ -0,0 +1,38 @@
+/* Copyright (C) 2007 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#include <stdio.h>
+#include "ndberror.c"
+
+int main()
+{
+ int i, j, error = 0;
+
+ /* check for duplicate error codes */
+ for(i = 0; i < NbErrorCodes; i++)
+ {
+ for(j = i + 1; j < NbErrorCodes; j++)
+ {
+ if (ErrorCodes[i].code == ErrorCodes[j].code)
+ {
+ fprintf(stderr, "Duplicate error code %u\n", ErrorCodes[i].code);
+ error = 1;
+ }
+ }
+ }
+ if (error)
+ return -1;
+ return 0;
+}
diff --git a/storage/ndb/test/include/NdbRestarter.hpp b/storage/ndb/test/include/NdbRestarter.hpp
index 3f7783be6e0..916848adf45 100644
--- a/storage/ndb/test/include/NdbRestarter.hpp
+++ b/storage/ndb/test/include/NdbRestarter.hpp
@@ -61,6 +61,8 @@ public:
int dumpStateAllNodes(const int * _args, int _num_args);
int getMasterNodeId();
+ int getNextMasterNodeId(int nodeId);
+ int getNodeGroup(int nodeId);
int getRandomNodeSameNodeGroup(int nodeId, int randomNumber);
int getRandomNodeOtherNodeGroup(int nodeId, int randomNumber);
int getRandomNotMasterNodeId(int randomNumber);
diff --git a/storage/ndb/test/ndbapi/testBitfield.cpp b/storage/ndb/test/ndbapi/testBitfield.cpp
index e26f495f5a4..8ba8f3d92ef 100644
--- a/storage/ndb/test/ndbapi/testBitfield.cpp
+++ b/storage/ndb/test/ndbapi/testBitfield.cpp
@@ -8,6 +8,15 @@
static const char* _dbname = "TEST_DB";
static int g_loops = 7;
+
+NDB_STD_OPTS_VARS;
+
+static struct my_option my_long_options[] =
+{
+ NDB_STD_OPTS("ndb_desc"),
+ { 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}
+};
+
static void usage()
{
ndb_std_print_version();
@@ -36,9 +45,10 @@ main(int argc, char** argv){
load_defaults("my",load_default_groups,&argc,&argv);
int ho_error;
- argc--;
- argv++;
-
+ if ((ho_error=handle_options(&argc, &argv, my_long_options,
+ ndb_std_get_one_option)))
+ return NDBT_ProgramExit(NDBT_WRONGARGS);
+
Ndb_cluster_connection con(opt_connect_str);
if(con.connect(12, 5, 1))
{
diff --git a/storage/ndb/test/ndbapi/testDict.cpp b/storage/ndb/test/ndbapi/testDict.cpp
index f7de43aea20..f72b9dee80b 100644
--- a/storage/ndb/test/ndbapi/testDict.cpp
+++ b/storage/ndb/test/ndbapi/testDict.cpp
@@ -321,7 +321,11 @@ int runCreateAndDropAtRandom(NDBT_Context* ctx, NDBT_Step* step)
}
i++;
}
-
+
+ for (Uint32 i = 0; i<numTables; i++)
+ if (tabList[i])
+ pDic->dropTable(NDBT_Tables::getTable(i)->getName());
+
delete [] tabList;
return result;
}
diff --git a/storage/ndb/test/ndbapi/testNodeRestart.cpp b/storage/ndb/test/ndbapi/testNodeRestart.cpp
index 04e77f70c38..34cb356236c 100644
--- a/storage/ndb/test/ndbapi/testNodeRestart.cpp
+++ b/storage/ndb/test/ndbapi/testNodeRestart.cpp
@@ -1273,6 +1273,85 @@ int runBug25984(NDBT_Context* ctx, NDBT_Step* step){
return NDBT_OK;
}
+int
+runBug26457(NDBT_Context* ctx, NDBT_Step* step)
+{
+ NdbRestarter res;
+ if (res.getNumDbNodes() < 4)
+ return NDBT_OK;
+
+ int loops = ctx->getNumLoops();
+ while (loops --)
+ {
+retry:
+ int master = res.getMasterNodeId();
+ int next = res.getNextMasterNodeId(master);
+
+ ndbout_c("master: %d next: %d", master, next);
+
+ if (res.getNodeGroup(master) == res.getNodeGroup(next))
+ {
+ res.restartOneDbNode(next, false, false, true);
+ if (res.waitClusterStarted())
+ return NDBT_FAILED;
+ goto retry;
+ }
+
+ int val2[] = { DumpStateOrd::CmvmiSetRestartOnErrorInsert, 2 };
+
+ if (res.dumpStateOneNode(next, val2, 2))
+ return NDBT_FAILED;
+
+ if (res.insertErrorInNode(next, 7180))
+ return NDBT_FAILED;
+
+ res.restartOneDbNode(master, false, false, true);
+ if (res.waitClusterStarted())
+ return NDBT_FAILED;
+ }
+
+ return NDBT_OK;
+}
+
+int
+runBug26481(NDBT_Context* ctx, NDBT_Step* step)
+{
+
+ int result = NDBT_OK;
+ int loops = ctx->getNumLoops();
+ int records = ctx->getNumRecords();
+ NdbRestarter res;
+
+ int node = res.getRandomNotMasterNodeId(rand());
+ ndbout_c("node: %d", node);
+ if (res.restartOneDbNode(node, true, true, true))
+ return NDBT_FAILED;
+
+ if (res.waitNodesNoStart(&node, 1))
+ return NDBT_FAILED;
+
+ int val2[] = { DumpStateOrd::CmvmiSetRestartOnErrorInsert, 1 };
+ if (res.dumpStateOneNode(node, val2, 2))
+ return NDBT_FAILED;
+
+ if (res.insertErrorInNode(node, 7018))
+ return NDBT_FAILED;
+
+ if (res.startNodes(&node, 1))
+ return NDBT_FAILED;
+
+ res.waitNodesStartPhase(&node, 1, 3);
+
+ if (res.waitNodesNoStart(&node, 1))
+ return NDBT_FAILED;
+
+ res.startNodes(&node, 1);
+
+ if (res.waitClusterStarted())
+ return NDBT_FAILED;
+
+ return NDBT_OK;
+}
NDBT_TESTSUITE(testNodeRestart);
TESTCASE("NoLoad",
@@ -1612,6 +1691,12 @@ TESTCASE("Bug25554", ""){
TESTCASE("Bug25984", ""){
INITIALIZER(runBug25984);
}
+TESTCASE("Bug26457", ""){
+ INITIALIZER(runBug26457);
+}
+TESTCASE("Bug26481", ""){
+ INITIALIZER(runBug26481);
+}
NDBT_TESTSUITE_END(testNodeRestart);
int main(int argc, const char** argv){
diff --git a/storage/ndb/test/run-test/Makefile.am b/storage/ndb/test/run-test/Makefile.am
index b5cb69d266e..d6c6536cfc8 100644
--- a/storage/ndb/test/run-test/Makefile.am
+++ b/storage/ndb/test/run-test/Makefile.am
@@ -18,20 +18,18 @@ testdir=$(prefix)/mysql-test/ndb
include $(top_srcdir)/storage/ndb/config/common.mk.am
include $(top_srcdir)/storage/ndb/config/type_util.mk.am
include $(top_srcdir)/storage/ndb/config/type_mgmapiclient.mk.am
+include $(top_srcdir)/storage/ndb/config/type_ndbapitools.mk.am
test_PROGRAMS = atrt
test_DATA=daily-basic-tests.txt daily-devel-tests.txt 16node-tests.txt \
- conf-daily-basic-ndb08.txt \
- conf-daily-devel-ndb08.txt \
- conf-daily-sql-ndb08.txt \
- conf-ndbmaster.txt \
- conf-shark.txt \
- conf-dl145a.txt
+ conf-ndbmaster.cnf \
+ conf-dl145a.cnf test-tests.txt
test_SCRIPTS=atrt-analyze-result.sh atrt-gather-result.sh atrt-setup.sh \
- atrt-clear-result.sh make-config.sh make-index.sh make-html-reports.sh
+ atrt-clear-result.sh autotest-run.sh
+
+atrt_SOURCES = main.cpp setup.cpp files.cpp
-atrt_SOURCES = main.cpp run-test.hpp
INCLUDES_LOC = -I$(top_srcdir)/storage/ndb/test/include
LDADD_LOC = $(top_builddir)/storage/ndb/test/src/libNDBT.a \
$(top_builddir)/storage/ndb/src/libndbclient.la \
@@ -39,6 +37,14 @@ LDADD_LOC = $(top_builddir)/storage/ndb/test/src/libNDBT.a \
$(top_builddir)/mysys/libmysys.a \
$(top_builddir)/strings/libmystrings.a @NDB_SCI_LIBS@
+atrt_CXXFLAGS = -I$(top_srcdir)/ndb/src/mgmapi \
+ -I$(top_srcdir)/ndb/src/mgmsrv \
+ -I$(top_srcdir)/ndb/include/mgmcommon \
+ -DMYSQLCLUSTERDIR="\"\"" \
+ -DDEFAULT_PREFIX="\"$(prefix)\""
+
+atrt_LDFLAGS = -static @ndb_bin_am_ldflags@
+
wrappersdir=$(prefix)/bin
wrappers_SCRIPTS=atrt-testBackup atrt-mysql-test-run
diff --git a/storage/ndb/test/run-test/atrt-gather-result.sh b/storage/ndb/test/run-test/atrt-gather-result.sh
index 93d4ae428d0..f2473578b41 100755
--- a/storage/ndb/test/run-test/atrt-gather-result.sh
+++ b/storage/ndb/test/run-test/atrt-gather-result.sh
@@ -8,7 +8,7 @@ rm -rf *
while [ $# -gt 0 ]
do
- rsync -a "$1" .
+ rsync -a --exclude='BACKUP' --exclude='ndb_*_fs' "$1" .
shift
done
diff --git a/storage/ndb/test/run-test/atrt.hpp b/storage/ndb/test/run-test/atrt.hpp
new file mode 100644
index 00000000000..14d2dccd245
--- /dev/null
+++ b/storage/ndb/test/run-test/atrt.hpp
@@ -0,0 +1,161 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifndef atrt_config_hpp
+#define atrt_config_hpp
+
+#include <ndb_global.h>
+#include <Vector.hpp>
+#include <BaseString.hpp>
+#include <Logger.hpp>
+#include <mgmapi.h>
+#include <CpcClient.hpp>
+#include <Properties.hpp>
+
+enum ErrorCodes
+{
+ ERR_OK = 0,
+ ERR_NDB_FAILED = 101,
+ ERR_SERVERS_FAILED = 102,
+ ERR_MAX_TIME_ELAPSED = 103
+};
+
+struct atrt_host
+{
+ size_t m_index;
+ BaseString m_user;
+ BaseString m_basedir;
+ BaseString m_hostname;
+ SimpleCpcClient * m_cpcd;
+ Vector<struct atrt_process*> m_processes;
+};
+
+struct atrt_options
+{
+ enum Feature {
+ AO_REPLICATION = 1,
+ AO_NDBCLUSTER = 2
+ };
+
+ int m_features;
+ Properties m_loaded;
+ Properties m_generated;
+};
+
+struct atrt_process
+{
+ size_t m_index;
+ struct atrt_host * m_host;
+ struct atrt_cluster * m_cluster;
+
+ enum Type {
+ AP_ALL = 255
+ ,AP_NDBD = 1
+ ,AP_NDB_API = 2
+ ,AP_NDB_MGMD = 4
+ ,AP_MYSQLD = 16
+ ,AP_CLIENT = 32
+ ,AP_CLUSTER = 256 // Used for options parsing for "cluster" options
+ } m_type;
+
+ SimpleCpcClient::Process m_proc;
+
+ NdbMgmHandle m_ndb_mgm_handle; // if type == ndb_mgm
+ atrt_process * m_mysqld; // if type == client
+ atrt_process * m_rep_src; // if type == mysqld
+ Vector<atrt_process*> m_rep_dst; // if type == mysqld
+
+ atrt_options m_options;
+};
+
+struct atrt_cluster
+{
+ BaseString m_name;
+ BaseString m_dir;
+ Vector<atrt_process*> m_processes;
+ atrt_options m_options;
+};
+
+struct atrt_config
+{
+ bool m_generated;
+ BaseString m_key;
+ BaseString m_replication;
+ Vector<atrt_host*> m_hosts;
+ Vector<atrt_cluster*> m_clusters;
+ Vector<atrt_process*> m_processes;
+};
+
+struct atrt_testcase
+{
+ bool m_report;
+ bool m_run_all;
+ time_t m_max_time;
+ BaseString m_command;
+ BaseString m_args;
+ BaseString m_name;
+};
+
+extern Logger g_logger;
+
+void require(bool x);
+bool parse_args(int argc, char** argv);
+bool setup_config(atrt_config&);
+bool configure(atrt_config&, int setup);
+bool setup_directories(atrt_config&, int setup);
+bool setup_files(atrt_config&, int setup, int sshx);
+
+bool deploy(atrt_config&);
+bool sshx(atrt_config&, unsigned procmask);
+bool start(atrt_config&, unsigned procmask);
+
+bool remove_dir(const char *, bool incl = true);
+bool connect_hosts(atrt_config&);
+bool connect_ndb_mgm(atrt_config&);
+bool wait_ndb(atrt_config&, int ndb_mgm_node_status);
+bool start_processes(atrt_config&, int);
+bool stop_processes(atrt_config&, int);
+bool update_status(atrt_config&, int);
+int is_running(atrt_config&, int);
+bool gather_result(atrt_config&, int * result);
+
+bool read_test_case(FILE *, atrt_testcase&, int& line);
+bool setup_test_case(atrt_config&, const atrt_testcase&);
+
+bool setup_hosts(atrt_config&);
+
+/**
+ * Global variables...
+ */
+extern Logger g_logger;
+extern atrt_config g_config;
+
+extern const char * g_cwd;
+extern const char * g_my_cnf;
+extern const char * g_user;
+extern const char * g_basedir;
+extern const char * g_prefix;
+extern int g_baseport;
+extern int g_fqpn;
+extern int g_default_ports;
+
+extern const char * g_clusters;
+
+extern const char *save_file;
+extern const char *save_group_suffix;
+extern char *save_extra_file;
+
+#endif
diff --git a/storage/ndb/test/run-test/autotest-boot.sh b/storage/ndb/test/run-test/autotest-boot.sh
new file mode 100644
index 00000000000..31f611460ec
--- /dev/null
+++ b/storage/ndb/test/run-test/autotest-boot.sh
@@ -0,0 +1,165 @@
+#!/bin/sh
+#############################################################
+# This script created by Jonas does the following #
+# Cleans up clones and pevious builds, pulls new clones, #
+# builds, deploys, configures the tests and launches ATRT #
+#############################################################
+
+###############
+#Script setup #
+##############
+
+save_args=$*
+VERSION="autotest-boot.sh version 1.00"
+
+DATE=`date '+%Y-%m-%d'`
+HOST=`hostname -s`
+export DATE HOST
+
+set -e
+
+echo "`date` starting: $*"
+
+verbose=0
+do_clone=yes
+build=yes
+
+conf=
+LOCK=$HOME/.autotest-lock
+
+############################
+# Read command line entries#
+############################
+
+while [ "$1" ]
+do
+ case "$1" in
+ --no-clone) do_clone="";;
+ --no-build) build="";;
+ --verbose) verbose=`expr $verbose + 1`;;
+ --clone=*) clone=`echo $1 | sed s/--clone=//`;;
+ --version) echo $VERSION; exit;;
+ --conf=*) conf=`echo $1 | sed s/--conf=//`;;
+ *) RUN=$*;;
+ esac
+ shift
+done
+
+#################################
+#Make sure the configfile exists#
+#if it does not exit. if it does#
+# (.) load it #
+#################################
+if [ -z "$conf" ]
+then
+ conf=`pwd`/autotest.conf
+fi
+
+if [ -f $conf ]
+then
+ . $conf
+else
+ echo "Can't find config file: $conf"
+ exit
+fi
+
+###############################
+# Validate that all interesting
+# variables where set in conf
+###############################
+vars="src_clone_base install_dir build_dir"
+for i in $vars
+do
+ t=`echo echo \\$$i`
+ if [ -z "`eval $t`" ]
+ then
+ echo "Invalid config: $conf, variable $i is not set"
+ exit
+ fi
+done
+
+###############################
+#Print out the enviroment vars#
+###############################
+
+if [ $verbose -gt 0 ]
+then
+ env
+fi
+
+####################################
+# Setup the lock file name and path#
+# Setup the clone source location #
+####################################
+
+src_clone=$src_clone_base-$clone
+
+#######################################
+# Check to see if the lock file exists#
+# If it does exit. #
+#######################################
+
+if [ -f $LOCK ]
+then
+ echo "Lock file exists: $LOCK"
+ exit 1
+fi
+
+#######################################
+# If the lock file does not exist then#
+# create it with date and run info #
+#######################################
+
+echo "$DATE $RUN" > $LOCK
+
+#############################
+#If any errors here down, we#
+# trap them, and remove the #
+# Lock file before exit #
+#############################
+if [ `uname -s` != "SunOS" ]
+then
+ trap "rm -f $LOCK" ERR
+fi
+
+# You can add more to this path#
+################################
+
+dst_place=${build_dir}/clone-mysql-$clone-$DATE.$$
+
+#########################################
+# Delete source and pull down the latest#
+#########################################
+
+if [ "$do_clone" ]
+then
+ rm -rf $dst_place
+ bk clone $src_clone $dst_place
+fi
+
+##########################################
+# Build the source, make installs, and #
+# create the database to be rsynced #
+##########################################
+
+if [ "$build" ]
+then
+ cd $dst_place
+ rm -rf $install_dir
+ BUILD/compile-ndb-autotest --prefix=$install_dir
+ make install
+fi
+
+
+################################
+# Start run script #
+################################
+
+script=$install_dir/mysql-test/ndb/autotest-run.sh
+$script $save_args --conf=$conf --install-dir=$install_dir --suite=$RUN --nolock
+
+if [ "$build" ]
+then
+ rm -rf $dst_place
+fi
+rm -f $LOCK
diff --git a/storage/ndb/test/run-test/autotest-run.sh b/storage/ndb/test/run-test/autotest-run.sh
new file mode 100644
index 00000000000..34c3fe53949
--- /dev/null
+++ b/storage/ndb/test/run-test/autotest-run.sh
@@ -0,0 +1,270 @@
+#!/bin/sh
+#############################################################
+# This script created by Jonas does the following #
+# Cleans up clones and pevious builds, pulls new clones, #
+# builds, deploys, configures the tests and launches ATRT #
+#############################################################
+
+###############
+#Script setup #
+##############
+
+save_args=$*
+VERSION="autotest-run.sh version 1.00"
+
+DATE=`date '+%Y-%m-%d'`
+HOST=`hostname -s`
+export DATE HOST
+
+set -e
+ulimit -Sc unlimited
+
+echo "`date` starting: $*"
+
+RSYNC_RSH=ssh
+export RSYNC_RSH
+
+verbose=0
+report=yes
+nolock=
+RUN="daily-basic"
+conf=autotest.conf
+LOCK=$HOME/.autotest-lock
+
+############################
+# Read command line entries#
+############################
+
+while [ "$1" ]
+do
+ case "$1" in
+ --verbose) verbose=`expr $verbose + 1`;;
+ --conf=*) conf=`echo $1 | sed s/--conf=//`;;
+ --version) echo $VERSION; exit;;
+ --suite=*) RUN=`echo $1 | sed s/--suite=//`;;
+ --install-dir=*) install_dir=`echo $1 | sed s/--install-dir=//`;;
+ --clone=*) clone=`echo $1 | sed s/--clone=//`;;
+ --nolock) nolock=true;;
+ esac
+ shift
+done
+
+#################################
+#Make sure the configfile exists#
+#if it does not exit. if it does#
+# (.) load it #
+#################################
+
+install_dir_save=$install_dir
+if [ -f $conf ]
+then
+ . $conf
+else
+ echo "Can't find config file: $conf"
+ exit
+fi
+install_dir=$install_dir_save
+
+###############################
+# Validate that all interesting
+# variables where set in conf
+###############################
+vars="target base_dir install_dir hosts"
+if [ "$report" ]
+then
+ vars="$vars result_host result_path"
+fi
+for i in $vars
+do
+ t=`echo echo \\$$i`
+ if [ -z "`eval $t`" ]
+ then
+ echo "Invalid config: $conf, variable $i is not set"
+ exit
+ fi
+done
+
+###############################
+#Print out the enviroment vars#
+###############################
+
+if [ $verbose -gt 0 ]
+then
+ env
+fi
+
+#######################################
+# Check to see if the lock file exists#
+# If it does exit. #
+#######################################
+
+if [ -z "$nolock" ]
+then
+ if [ -f $LOCK ]
+ then
+ echo "Lock file exists: $LOCK"
+ exit 1
+ fi
+ echo "$DATE $RUN" > $LOCK
+fi
+
+#############################
+#If any errors here down, we#
+# trap them, and remove the #
+# Lock file before exit #
+#############################
+if [ `uname -s` != "SunOS" ]
+then
+ trap "rm -f $LOCK" ERR
+fi
+
+
+###############################################
+# Check that all interesting files are present#
+###############################################
+
+test_dir=$install_dir/mysql-test/ndb
+atrt=$test_dir/atrt
+test_file=$test_dir/$RUN-tests.txt
+
+if [ ! -f "$test_file" ]
+then
+ echo "Cant find testfile: $test_file"
+ exit 1
+fi
+
+if [ ! -x "$atrt" ]
+then
+ echo "Cant find atrt binary at $atrt"
+ exit 1
+fi
+
+############################
+# check ndb_cpcc fail hosts#
+############################
+failed=`ndb_cpcc $hosts | awk '{ if($1=="Failed"){ print;}}'`
+if [ "$failed" ]
+then
+ echo "Cant contact cpcd on $failed, exiting"
+ exit 1
+fi
+
+#############################
+# Function for replacing the#
+# choose host with real host#
+# names. Note $$ = PID #
+#############################
+choose(){
+ SRC=$1
+ TMP1=/tmp/choose.$$
+ TMP2=/tmp/choose.$$.$$
+ shift
+
+ cp $SRC $TMP1
+ i=1
+ while [ $# -gt 0 ]
+ do
+ sed -e s,"CHOOSE_host$i",$1,g < $TMP1 > $TMP2
+ mv $TMP2 $TMP1
+ shift
+ i=`expr $i + 1`
+ done
+ cat $TMP1
+ rm -f $TMP1
+}
+
+choose_conf(){
+ if [ -f $test_dir/conf-$1-$HOST.cnf ]
+ then
+ echo "$test_dir/conf-$1-$HOST.cnf"
+ elif [ -f $test_dir/conf-$1.cnf ]
+ then
+ echo "$test_dir/conf-$1.cnf"
+ elif [ -f $test_dir/conf-$HOST.cnf ]
+ then
+ echo "$test_dir/conf-$HOST.cnf"
+ else
+ echo "Unable to find conf file looked for" 1>&2
+ echo "$test_dir/conf-$1-$HOST.cnf and" 1>&2
+ echo "$test_dir/conf-$HOST.cnf" 1>&2
+ echo "$test_dir/conf-$1.cnf" 1>&2
+ exit
+ fi
+}
+
+#########################################
+# Count how many computers we have ready#
+#########################################
+
+count_hosts(){
+ cnt=`grep "CHOOSE_host" $1 | awk '{for(i=1; i<=NF;i++) \
+ if(index($i, "CHOOSE_host") > 0) print $i;}' | sort | uniq | wc -l`
+ echo $cnt
+}
+
+conf=`choose_conf $RUN`
+count=`count_hosts $conf`
+avail=`echo $hosts | wc -w`
+if [ $count -gt $avail ]
+ then
+ echo "Not enough hosts"
+ echo "Needs: $count available: $avail ($avail_hosts)"
+ exit 1
+fi
+
+###
+# Make directories needed
+
+p=`pwd`
+run_dir=$install_dir/run-$RUN-mysql-$clone-$target
+res_dir=$base_dir/result-$RUN-mysql-$clone-$target/$DATE
+tar_dir=$base_dir/saved-results
+
+mkdir -p $run_dir $res_dir $tar_dir
+rm -rf $res_dir/* $run_dir/*
+
+
+###
+#
+# Do sed substitiutions
+#
+cd $run_dir
+choose $conf $hosts > d.tmp.$$
+sed -e s,CHOOSE_dir,"$run_dir/run",g < d.tmp.$$ > my.cnf
+
+# Setup configuration
+$atrt Cdq my.cnf
+
+# Start...
+$atrt --report-file=report.txt --log-file=log.txt --testcase-file=$test_dir/$RUN-tests.txt my.cnf
+
+# Make tar-ball
+[ -f log.txt ] && mv log.txt $res_dir
+[ -f report.txt ] && mv report.txt $res_dir
+[ "`find . -name 'result*'`" ] && mv result* $res_dir
+cd $res_dir
+
+echo "date=$DATE" > info.txt
+echo "suite=$RUN" >> info.txt
+echo "clone=mysql-$clone" >> info.txt
+echo "arch=$target" >> info.txt
+find . | xargs chmod ugo+r
+
+cd ..
+p2=`pwd`
+cd ..
+tarfile=res.$RUN.$clone.$target.$DATE.$HOST.$$.tgz
+tar cfz $tar_dir/$tarfile `basename $p2`/$DATE
+
+if [ "$report" ]
+then
+ scp $tar_dir/$tarfile $result_host:$result_path/
+fi
+
+cd $p
+rm -rf $res_dir $run_dir
+
+if [ -z "$nolock" ]
+then
+ rm -f $LOCK
+fi
diff --git a/storage/ndb/test/run-test/conf-daily-basic-ndb08.txt b/storage/ndb/test/run-test/conf-daily-basic-ndb08.txt
deleted file mode 100644
index bcd809593f3..00000000000
--- a/storage/ndb/test/run-test/conf-daily-basic-ndb08.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-baseport: 14000
-basedir: /space/autotest
-mgm: CHOOSE_host1
-ndb: CHOOSE_host2 CHOOSE_host3 CHOOSE_host2 CHOOSE_host3
-api: CHOOSE_host1 CHOOSE_host1 CHOOSE_host1
--- cluster config
-[DB DEFAULT]
-NoOfReplicas: 2
-IndexMemory: 100M
-DataMemory: 300M
-BackupMemory: 64M
-MaxNoOfConcurrentScans: 100
-DataDir: .
-FileSystemPath: /space/autotest/run
-
-[MGM DEFAULT]
-PortNumber: 14000
-ArbitrationRank: 1
-DataDir: .
diff --git a/storage/ndb/test/run-test/conf-daily-devel-ndb08.txt b/storage/ndb/test/run-test/conf-daily-devel-ndb08.txt
deleted file mode 100644
index 8b340e6a39d..00000000000
--- a/storage/ndb/test/run-test/conf-daily-devel-ndb08.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-baseport: 16000
-basedir: /space/autotest
-mgm: CHOOSE_host1
-ndb: CHOOSE_host2 CHOOSE_host3 CHOOSE_host2 CHOOSE_host3
-api: CHOOSE_host1 CHOOSE_host1 CHOOSE_host1
--- cluster config
-[DB DEFAULT]
-NoOfReplicas: 2
-IndexMemory: 100M
-DataMemory: 300M
-BackupMemory: 64M
-MaxNoOfConcurrentScans: 100
-DataDir: .
-FileSystemPath: /space/autotest/run
-
-[MGM DEFAULT]
-PortNumber: 16000
-ArbitrationRank: 1
-DataDir: .
diff --git a/storage/ndb/test/run-test/conf-daily-devel-ndbmaster.txt b/storage/ndb/test/run-test/conf-daily-devel-ndbmaster.txt
deleted file mode 100644
index 45e6e25f030..00000000000
--- a/storage/ndb/test/run-test/conf-daily-devel-ndbmaster.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-baseport: 16000
-basedir: CHOOSE_dir
-mgm: CHOOSE_host1
-ndb: CHOOSE_host2 CHOOSE_host3 CHOOSE_host2 CHOOSE_host3
-api: CHOOSE_host1 CHOOSE_host1 CHOOSE_host1
--- cluster config
-[DB DEFAULT]
-NoOfReplicas: 2
-IndexMemory: 100M
-DataMemory: 300M
-BackupMemory: 64M
-MaxNoOfConcurrentScans: 100
-DataDir: .
-FileSystemPath: CHOOSE_dir/run
-
-[MGM DEFAULT]
-PortNumber: 16000
-ArbitrationRank: 1
-DataDir: .
-
-[TCP DEFAULT]
-SendBufferMemory: 2M
diff --git a/storage/ndb/test/run-test/conf-daily-sql-ndb08.txt b/storage/ndb/test/run-test/conf-daily-sql-ndb08.txt
deleted file mode 100644
index 0d6a99f8d48..00000000000
--- a/storage/ndb/test/run-test/conf-daily-sql-ndb08.txt
+++ /dev/null
@@ -1,20 +0,0 @@
-baseport: 16000
-basedir: /space/autotest
-mgm: CHOOSE_host1
-ndb: CHOOSE_host2 CHOOSE_host3
-mysqld: CHOOSE_host1 CHOOSE_host4
-mysql: CHOOSE_host1 CHOOSE_host1 CHOOSE_host1 CHOOSE_host1 CHOOSE_host1 CHOOSE_host1 CHOOSE_host4 CHOOSE_host4 CHOOSE_host4 CHOOSE_host4 CHOOSE_host4 CHOOSE_host4
--- cluster config
-[DB DEFAULT]
-NoOfReplicas: 2
-IndexMemory: 100M
-DataMemory: 300M
-BackupMemory: 64M
-MaxNoOfConcurrentScans: 100
-DataDir: .
-FileSystemPath: /space/autotest/run
-
-[MGM DEFAULT]
-PortNumber: 16000
-ArbitrationRank: 1
-DataDir: .
diff --git a/storage/ndb/test/run-test/conf-daily-sql-ndbmaster.txt b/storage/ndb/test/run-test/conf-daily-sql-ndbmaster.txt
deleted file mode 100644
index 0d6a99f8d48..00000000000
--- a/storage/ndb/test/run-test/conf-daily-sql-ndbmaster.txt
+++ /dev/null
@@ -1,20 +0,0 @@
-baseport: 16000
-basedir: /space/autotest
-mgm: CHOOSE_host1
-ndb: CHOOSE_host2 CHOOSE_host3
-mysqld: CHOOSE_host1 CHOOSE_host4
-mysql: CHOOSE_host1 CHOOSE_host1 CHOOSE_host1 CHOOSE_host1 CHOOSE_host1 CHOOSE_host1 CHOOSE_host4 CHOOSE_host4 CHOOSE_host4 CHOOSE_host4 CHOOSE_host4 CHOOSE_host4
--- cluster config
-[DB DEFAULT]
-NoOfReplicas: 2
-IndexMemory: 100M
-DataMemory: 300M
-BackupMemory: 64M
-MaxNoOfConcurrentScans: 100
-DataDir: .
-FileSystemPath: /space/autotest/run
-
-[MGM DEFAULT]
-PortNumber: 16000
-ArbitrationRank: 1
-DataDir: .
diff --git a/storage/ndb/test/run-test/conf-dl145a.cnf b/storage/ndb/test/run-test/conf-dl145a.cnf
new file mode 100644
index 00000000000..ea344f1a62a
--- /dev/null
+++ b/storage/ndb/test/run-test/conf-dl145a.cnf
@@ -0,0 +1,23 @@
+[atrt]
+basedir = CHOOSE_dir
+baseport = 14000
+clusters = .2node
+
+[ndb_mgmd]
+
+[mysqld]
+skip-innodb
+skip-bdb
+
+[cluster_config.2node]
+ndb_mgmd = CHOOSE_host1
+ndbd = CHOOSE_host2,CHOOSE_host3
+ndbapi= CHOOSE_host1,CHOOSE_host1,CHOOSE_host1
+
+NoOfReplicas = 2
+IndexMemory = 100M
+DataMemory = 300M
+BackupMemory = 64M
+MaxNoOfConcurrentScans = 100
+MaxNoOfSavedMessages= 1000
+SendBufferMemory = 2M
diff --git a/storage/ndb/test/run-test/conf-dl145a.txt b/storage/ndb/test/run-test/conf-dl145a.txt
deleted file mode 100644
index d0a240f09d1..00000000000
--- a/storage/ndb/test/run-test/conf-dl145a.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-baseport: 14000
-basedir: /home/ndbdev/autotest/run
-mgm: CHOOSE_host1
-ndb: CHOOSE_host2 CHOOSE_host3
-api: CHOOSE_host1 CHOOSE_host1 CHOOSE_host1
--- cluster config
-[DB DEFAULT]
-NoOfReplicas: 2
-IndexMemory: 100M
-DataMemory: 300M
-BackupMemory: 64M
-MaxNoOfConcurrentScans: 100
-DataDir: .
-FileSystemPath: /home/ndbdev/autotest/run
-
-[MGM DEFAULT]
-PortNumber: 14000
-ArbitrationRank: 1
-DataDir: .
-
-[TCP DEFAULT]
-SendBufferMemory: 2M
diff --git a/storage/ndb/test/run-test/conf-ndbmaster.cnf b/storage/ndb/test/run-test/conf-ndbmaster.cnf
new file mode 100644
index 00000000000..417e2988d0d
--- /dev/null
+++ b/storage/ndb/test/run-test/conf-ndbmaster.cnf
@@ -0,0 +1,23 @@
+[atrt]
+basedir = CHOOSE_dir
+baseport = 14000
+clusters = .4node
+
+[ndb_mgmd]
+
+[mysqld]
+skip-innodb
+skip-bdb
+
+[cluster_config.4node]
+ndb_mgmd = CHOOSE_host1
+ndbd = CHOOSE_host2,CHOOSE_host3,CHOOSE_host2,CHOOSE_host3
+ndbapi= CHOOSE_host1,CHOOSE_host1,CHOOSE_host1
+
+NoOfReplicas = 2
+IndexMemory = 100M
+DataMemory = 300M
+BackupMemory = 64M
+MaxNoOfConcurrentScans = 100
+MaxNoOfSavedMessages= 1000
+SendBufferMemory = 2M
diff --git a/storage/ndb/test/run-test/conf-ndbmaster.txt b/storage/ndb/test/run-test/conf-ndbmaster.txt
deleted file mode 100644
index 9f50432f5e3..00000000000
--- a/storage/ndb/test/run-test/conf-ndbmaster.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-baseport: 14000
-basedir: CHOOSE_dir
-mgm: CHOOSE_host1
-ndb: CHOOSE_host2 CHOOSE_host3 CHOOSE_host2 CHOOSE_host3
-api: CHOOSE_host1 CHOOSE_host1 CHOOSE_host1
--- cluster config
-[DB DEFAULT]
-NoOfReplicas: 2
-IndexMemory: 100M
-DataMemory: 300M
-BackupMemory: 64M
-MaxNoOfConcurrentScans: 100
-DataDir: .
-FileSystemPath: CHOOSE_dir/run
-
-[MGM DEFAULT]
-PortNumber: 14000
-ArbitrationRank: 1
-DataDir: .
-
-[TCP DEFAULT]
-SendBufferMemory: 2M
diff --git a/storage/ndb/test/run-test/conf-repl.cnf b/storage/ndb/test/run-test/conf-repl.cnf
new file mode 100644
index 00000000000..57eb2ee413e
--- /dev/null
+++ b/storage/ndb/test/run-test/conf-repl.cnf
@@ -0,0 +1,28 @@
+[atrt]
+basedir=CHOOSE_dir
+baseport=15000
+clusters= .master,.slave
+replicate= 1.master:1.slave
+
+[ndb_mgmd]
+
+[mysqld]
+skip-innodb
+skip-bdb
+
+[cluster_config]
+MaxNoOfSavedMessages= 1000
+DataMemory = 100M
+
+[cluster_config.master]
+NoOfReplicas = 2
+ndb_mgmd = CHOOSE_host1
+ndbd = CHOOSE_host2,CHOOSE_host3
+mysqld = CHOOSE_host1
+ndbapi= CHOOSE_host1
+
+[cluster_config.slave]
+NoOfReplicas = 1
+ndb_mgmd = CHOOSE_host4
+ndbd = CHOOSE_host4
+mysqld = CHOOSE_host4
diff --git a/storage/ndb/test/run-test/conf-shark.txt b/storage/ndb/test/run-test/conf-shark.txt
deleted file mode 100644
index d66d0280d8a..00000000000
--- a/storage/ndb/test/run-test/conf-shark.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-baseport: 14000
-basedir: /space/autotest
-mgm: CHOOSE_host1
-ndb: CHOOSE_host1 CHOOSE_host1
-api: CHOOSE_host1 CHOOSE_host1 CHOOSE_host1
--- cluster config
-[DB DEFAULT]
-NoOfReplicas: 2
-IndexMemory: 100M
-DataMemory: 300M
-BackupMemory: 64M
-MaxNoOfConcurrentScans: 100
-DataDir: .
-FileSystemPath: /space/autotest/run
-
-[MGM DEFAULT]
-PortNumber: 14000
-ArbitrationRank: 1
-DataDir: .
-
-[TCP DEFAULT]
-SendBufferMemory: 2M
diff --git a/storage/ndb/test/run-test/daily-basic-tests.txt b/storage/ndb/test/run-test/daily-basic-tests.txt
index c972d432375..4022dffa258 100644
--- a/storage/ndb/test/run-test/daily-basic-tests.txt
+++ b/storage/ndb/test/run-test/daily-basic-tests.txt
@@ -525,10 +525,18 @@ max-time: 1000
cmd: testNodeRestart
args: -n Bug25554 T1
-max-time: 1000
+max-time: 3000
cmd: testNodeRestart
args: -n Bug25984
+max-time: 1000
+cmd: testNodeRestart
+args: -n Bug26457 T1
+
+max-time: 1000
+cmd: testNodeRestart
+args: -n Bug26481 T1
+
#
# DICT TESTS
max-time: 1500
diff --git a/storage/ndb/test/run-test/example-my.cnf b/storage/ndb/test/run-test/example-my.cnf
new file mode 100644
index 00000000000..99e1ce9f75b
--- /dev/null
+++ b/storage/ndb/test/run-test/example-my.cnf
@@ -0,0 +1,116 @@
+[atrt]
+basedir=/home/jonas/atrt
+baseport=10000
+clusters = .master
+clusters= .master,.slave
+replicate = 1.master:1.slave
+replicate = 2.master:2.slave
+
+[cluster_config]
+NoOfReplicas= 2
+IndexMemory= 10M
+DataMemory= 50M
+MaxNoOfConcurrentScans= 100
+Diskless = 1
+
+[cluster_config.master]
+ndb_mgmd = local1
+ndbd = local1,local1
+mysqld = local1,local1
+ndbapi= local1
+NoOfReplicas= 2
+
+[cluster_config.slave]
+ndb_mgmd = local1
+ndbd = local1
+ndbapi= local1
+mysqld = local1,local1
+NoOfReplicas= 1
+
+[mysqld]
+skip-innodb
+skip-bdb
+
+#
+# Generated by atrt
+# Mon May 29 23:27:49 2006
+
+[mysql_cluster.master]
+ndb-connectstring= local1:10000
+
+[cluster_config.ndb_mgmd.1.master]
+PortNumber= 10000
+
+[cluster_config.ndbd.1.master]
+FileSystemPath= /home/jonas/atrt/cluster.master/ndbd.1
+
+[cluster_config.ndbd.2.master]
+FileSystemPath= /home/jonas/atrt/cluster.master/ndbd.2
+
+[mysqld.1.master]
+datadir= /home/jonas/atrt/cluster.master/mysqld.1
+socket= /home/jonas/atrt/cluster.master/mysqld.1/mysql.sock
+port= 10001
+server-id= 1
+log-bin
+ndb-connectstring= local1:10000
+ndbcluster
+
+[client.1.master]
+socket= /home/jonas/atrt/cluster.master/mysqld.1/mysql.sock
+port= 10001
+
+[mysqld.2.master]
+datadir= /home/jonas/atrt/cluster.master/mysqld.2
+socket= /home/jonas/atrt/cluster.master/mysqld.2/mysql.sock
+port= 10002
+server-id= 2
+log-bin
+ndb-connectstring= local1:10000
+ndbcluster
+
+[client.2.master]
+socket= /home/jonas/atrt/cluster.master/mysqld.2/mysql.sock
+port= 10002
+
+[mysql_cluster.slave]
+ndb-connectstring= local1:10003
+
+[cluster_config.ndb_mgmd.1.slave]
+PortNumber= 10003
+
+[cluster_config.ndbd.1.slave]
+FileSystemPath= /home/jonas/atrt/cluster.slave/ndbd.1
+
+[mysqld.1.slave]
+datadir= /home/jonas/atrt/cluster.slave/mysqld.1
+socket= /home/jonas/atrt/cluster.slave/mysqld.1/mysql.sock
+port= 10004
+server-id= 3
+master-host= local1
+master-port= 10001
+master-user= root
+master-password= ""
+ndb-connectstring= local1:10003
+ndbcluster
+
+[client.1.slave]
+socket= /home/jonas/atrt/cluster.slave/mysqld.1/mysql.sock
+port= 10004
+
+[mysqld.2.slave]
+datadir= /home/jonas/atrt/cluster.slave/mysqld.2
+socket= /home/jonas/atrt/cluster.slave/mysqld.2/mysql.sock
+port= 10005
+server-id= 4
+master-host= local1
+master-port= 10002
+master-user= root
+master-password= ""
+ndb-connectstring= local1:10003
+ndbcluster
+
+[client.2.slave]
+socket= /home/jonas/atrt/cluster.slave/mysqld.2/mysql.sock
+port= 10005
+
diff --git a/storage/ndb/test/run-test/files.cpp b/storage/ndb/test/run-test/files.cpp
new file mode 100644
index 00000000000..231f7c88abc
--- /dev/null
+++ b/storage/ndb/test/run-test/files.cpp
@@ -0,0 +1,383 @@
+#include "atrt.hpp"
+#include <sys/types.h>
+#include <dirent.h>
+
+static bool create_directory(const char * path);
+
+bool
+setup_directories(atrt_config& config, int setup)
+{
+ /**
+ * 0 = validate
+ * 1 = setup
+ * 2 = setup+clean
+ */
+ for (size_t i = 0; i < config.m_clusters.size(); i++)
+ {
+ atrt_cluster& cluster = *config.m_clusters[i];
+ for (size_t j = 0; j<cluster.m_processes.size(); j++)
+ {
+ atrt_process& proc = *cluster.m_processes[j];
+ const char * dir = proc.m_proc.m_cwd.c_str();
+ struct stat sbuf;
+ int exists = 0;
+ if (lstat(dir, &sbuf) == 0)
+ {
+ if (S_ISDIR(sbuf.st_mode))
+ exists = 1;
+ else
+ exists = -1;
+ }
+
+ switch(setup){
+ case 0:
+ switch(exists){
+ case 0:
+ g_logger.error("Could not find directory: %s", dir);
+ return false;
+ case -1:
+ g_logger.error("%s is not a directory!", dir);
+ return false;
+ }
+ break;
+ case 1:
+ if (exists == -1)
+ {
+ g_logger.error("%s is not a directory!", dir);
+ return false;
+ }
+ break;
+ case 2:
+ if (exists == 1)
+ {
+ if (!remove_dir(dir))
+ {
+ g_logger.error("Failed to remove %s!", dir);
+ return false;
+ }
+ exists = 0;
+ break;
+ }
+ else if (exists == -1)
+ {
+ if (!unlink(dir))
+ {
+ g_logger.error("Failed to remove %s!", dir);
+ return false;
+ }
+ exists = 0;
+ }
+ }
+ if (exists != 1)
+ {
+ if (!create_directory(dir))
+ {
+ return false;
+ }
+ }
+ }
+ }
+ return true;
+}
+
+static
+void
+printfile(FILE* out, Properties& props, const char * section, ...)
+{
+ Properties::Iterator it (&props);
+ const char * name = it.first();
+ if (name)
+ {
+ va_list ap;
+ va_start(ap, section);
+ /* const int ret = */ vfprintf(out, section, ap);
+ va_end(ap);
+ fprintf(out, "\n");
+
+ for (; name; name = it.next())
+ {
+ const char* val;
+ props.get(name, &val);
+ fprintf(out, "%s %s\n", name + 2, val);
+ }
+ fprintf(out, "\n");
+ }
+ fflush(out);
+}
+
+bool
+setup_files(atrt_config& config, int setup, int sshx)
+{
+ /**
+ * 0 = validate
+ * 1 = setup
+ * 2 = setup+clean
+ */
+ BaseString mycnf;
+ mycnf.assfmt("%s/my.cnf", g_basedir);
+
+ if (mycnf != g_my_cnf)
+ {
+ struct stat sbuf;
+ int ret = lstat(mycnf.c_str(), &sbuf);
+
+ if (ret == 0)
+ {
+ if (unlink(mycnf.c_str()) != 0)
+ {
+ g_logger.error("Failed to remove %s", mycnf.c_str());
+ return false;
+ }
+ }
+
+ BaseString cp = "cp ";
+ cp.appfmt("%s %s", g_my_cnf, mycnf.c_str());
+ if (system(cp.c_str()) != 0)
+ {
+ g_logger.error("Failed to '%s'", cp.c_str());
+ return false;
+ }
+ }
+
+ if (setup == 2 || config.m_generated)
+ {
+ /**
+ * Do mysql_install_db
+ */
+ for (size_t i = 0; i < config.m_clusters.size(); i++)
+ {
+ atrt_cluster& cluster = *config.m_clusters[i];
+ for (size_t j = 0; j<cluster.m_processes.size(); j++)
+ {
+ atrt_process& proc = *cluster.m_processes[j];
+ if (proc.m_type == atrt_process::AP_MYSQLD)
+ {
+ const char * val;
+ require(proc.m_options.m_loaded.get("--datadir=", &val));
+ BaseString tmp;
+ tmp.assfmt("%s/bin/mysql_install_db --datadir=%s > /dev/null 2>&1",
+ g_prefix, val);
+ if (system(tmp.c_str()) != 0)
+ {
+ g_logger.error("Failed to mysql_install_db for %s",
+ proc.m_proc.m_cwd.c_str());
+ }
+ else
+ {
+ g_logger.info("mysql_install_db for %s",
+ proc.m_proc.m_cwd.c_str());
+ }
+ }
+ }
+ }
+ }
+
+ FILE * out = NULL;
+ if (config.m_generated == false)
+ {
+ g_logger.info("Nothing configured...");
+ }
+ else
+ {
+ out = fopen(mycnf.c_str(), "a+");
+ if (out == 0)
+ {
+ g_logger.error("Failed to open %s for append", mycnf.c_str());
+ return false;
+ }
+ time_t now = time(0);
+ fprintf(out, "#\n# Generated by atrt\n");
+ fprintf(out, "# %s\n", ctime(&now));
+ }
+
+ for (size_t i = 0; i < config.m_clusters.size(); i++)
+ {
+ atrt_cluster& cluster = *config.m_clusters[i];
+ if (out)
+ {
+ Properties::Iterator it(&cluster.m_options.m_generated);
+ printfile(out, cluster.m_options.m_generated,
+ "[mysql_cluster%s]", cluster.m_name.c_str());
+ }
+
+ for (size_t j = 0; j<cluster.m_processes.size(); j++)
+ {
+ atrt_process& proc = *cluster.m_processes[j];
+
+ if (out)
+ {
+ switch(proc.m_type){
+ case atrt_process::AP_NDB_MGMD:
+ printfile(out, proc.m_options.m_generated,
+ "[cluster_config.ndb_mgmd.%d%s]",
+ proc.m_index, proc.m_cluster->m_name.c_str());
+ break;
+ case atrt_process::AP_NDBD:
+ printfile(out, proc.m_options.m_generated,
+ "[cluster_config.ndbd.%d%s]",
+ proc.m_index, proc.m_cluster->m_name.c_str());
+ break;
+ case atrt_process::AP_MYSQLD:
+ printfile(out, proc.m_options.m_generated,
+ "[mysqld.%d%s]",
+ proc.m_index, proc.m_cluster->m_name.c_str());
+ break;
+ case atrt_process::AP_NDB_API:
+ break;
+ case atrt_process::AP_CLIENT:
+ printfile(out, proc.m_options.m_generated,
+ "[client.%d%s]",
+ proc.m_index, proc.m_cluster->m_name.c_str());
+ break;
+ case atrt_process::AP_ALL:
+ case atrt_process::AP_CLUSTER:
+ abort();
+ }
+ }
+
+ /**
+ * Create env.sh
+ */
+ BaseString tmp;
+ tmp.assfmt("%s/env.sh", proc.m_proc.m_cwd.c_str());
+ char **env = BaseString::argify(0, proc.m_proc.m_env.c_str());
+ if (env[0])
+ {
+ Vector<BaseString> keys;
+ FILE *fenv = fopen(tmp.c_str(), "w+");
+ if (fenv == 0)
+ {
+ g_logger.error("Failed to open %s for writing", tmp.c_str());
+ return false;
+ }
+ for (size_t k = 0; env[k]; k++)
+ {
+ tmp = env[k];
+ int pos = tmp.indexOf('=');
+ require(pos > 0);
+ env[k][pos] = 0;
+ fprintf(fenv, "%s=\"%s\"\n", env[k], env[k]+pos+1);
+ keys.push_back(env[k]);
+ free(env[k]);
+ }
+ fprintf(fenv, "PATH=%s/bin:%s/libexec:$PATH\n", g_prefix, g_prefix);
+ keys.push_back("PATH");
+ for (size_t k = 0; k<keys.size(); k++)
+ fprintf(fenv, "export %s\n", keys[k].c_str());
+ fflush(fenv);
+ fclose(fenv);
+ }
+ free(env);
+
+ tmp.assfmt("%s/ssh-login.sh", proc.m_proc.m_cwd.c_str());
+ FILE* fenv = fopen(tmp.c_str(), "w+");
+ if (fenv == 0)
+ {
+ g_logger.error("Failed to open %s for writing", tmp.c_str());
+ return false;
+ }
+ fprintf(fenv, "#!/bin/sh\n");
+ fprintf(fenv, "cd %s\n", proc.m_proc.m_cwd.c_str());
+ fprintf(fenv, "[ -f /etc/profile ] && . /etc/profile\n");
+ fprintf(fenv, ". env.sh\n");
+ fprintf(fenv, "ulimit -Sc unlimited\n");
+ fprintf(fenv, "bash -i");
+ fflush(fenv);
+ fclose(fenv);
+ }
+ }
+
+ if (out)
+ {
+ fflush(out);
+ fclose(out);
+ }
+
+ return true;
+}
+
+static
+bool
+create_directory(const char * path)
+{
+ BaseString tmp(path);
+ Vector<BaseString> list;
+ if (tmp.split(list, "/") == 0)
+ {
+ g_logger.error("Failed to create directory: %s", tmp.c_str());
+ return false;
+ }
+
+ BaseString cwd = "/";
+ for (size_t i = 0; i < list.size(); i++)
+ {
+ cwd.append(list[i].c_str());
+ cwd.append("/");
+ mkdir(cwd.c_str(), S_IRUSR | S_IWUSR | S_IXUSR | S_IXGRP | S_IRGRP);
+ }
+
+ struct stat sbuf;
+ if (lstat(path, &sbuf) != 0 ||
+ !S_ISDIR(sbuf.st_mode))
+ {
+ g_logger.error("Failed to create directory: %s (%s)",
+ tmp.c_str(),
+ cwd.c_str());
+ return false;
+ }
+
+ return true;
+}
+
+bool
+remove_dir(const char * path, bool inclusive)
+{
+ DIR* dirp = opendir(path);
+
+ if (dirp == 0)
+ {
+ if(errno != ENOENT)
+ {
+ g_logger.error("Failed to remove >%s< errno: %d %s",
+ path, errno, strerror(errno));
+ return false;
+ }
+ return true;
+ }
+
+ struct dirent * dp;
+ BaseString name = path;
+ name.append("/");
+ while ((dp = readdir(dirp)) != NULL)
+ {
+ if ((strcmp(".", dp->d_name) != 0) && (strcmp("..", dp->d_name) != 0))
+ {
+ BaseString tmp = name;
+ tmp.append(dp->d_name);
+
+ if (remove(tmp.c_str()) == 0)
+ {
+ continue;
+ }
+
+ if (!remove_dir(tmp.c_str()))
+ {
+ closedir(dirp);
+ return false;
+ }
+ }
+ }
+
+ closedir(dirp);
+ if (inclusive)
+ {
+ if (rmdir(path) != 0)
+ {
+ g_logger.error("Failed to remove >%s< errno: %d %s",
+ path, errno, strerror(errno));
+ return false;
+ }
+ }
+ return true;
+}
+
diff --git a/storage/ndb/test/run-test/main.cpp b/storage/ndb/test/run-test/main.cpp
index aef041d24d6..2e8d6bfde6d 100644
--- a/storage/ndb/test/run-test/main.cpp
+++ b/storage/ndb/test/run-test/main.cpp
@@ -14,20 +14,19 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-#include <ndb_global.h>
-#include <getarg.h>
-#include <BaseString.hpp>
-#include <Parser.hpp>
+#include "atrt.hpp"
+#include <my_sys.h>
+#include <my_getopt.h>
+
#include <NdbOut.hpp>
-#include <Properties.hpp>
#include <NdbAutoPtr.hpp>
-#include "run-test.hpp"
#include <SysLogHandler.hpp>
#include <FileLogHandler.hpp>
-#include <mgmapi.h>
-#include "CpcClient.hpp"
+#include <NdbSleep.h>
+
+#define PATH_SEPARATOR "/"
/** Global variables */
static const char progname[] = "ndb_atrt";
@@ -36,76 +35,198 @@ static const char * g_analyze_progname = "atrt-analyze-result.sh";
static const char * g_clear_progname = "atrt-clear-result.sh";
static const char * g_setup_progname = "atrt-setup.sh";
-static const char * g_setup_path = 0;
-static const char * g_process_config_filename = "d.txt";
static const char * g_log_filename = 0;
static const char * g_test_case_filename = 0;
static const char * g_report_filename = 0;
-static const char * g_default_user = 0;
-static const char * g_default_base_dir = 0;
-static int g_default_base_port = 0;
-static int g_mysqld_use_base = 1;
+static int g_do_setup = 0;
+static int g_do_deploy = 0;
+static int g_do_sshx = 0;
+static int g_do_start = 0;
+static int g_do_quit = 0;
-static int g_report = 0;
-static int g_verbosity = 0;
+static int g_help = 0;
+static int g_verbosity = 1;
static FILE * g_report_file = 0;
static FILE * g_test_case_file = stdin;
+static int g_mode = 0;
Logger g_logger;
atrt_config g_config;
-
-static int g_mode_bench = 0;
-static int g_mode_regression = 0;
-static int g_mode_interactive = 0;
-static int g_mode = 0;
-
-static
-struct getargs args[] = {
- { "process-config", 0, arg_string, &g_process_config_filename, 0, 0 },
- { "setup-path", 0, arg_string, &g_setup_path, 0, 0 },
- { 0, 'v', arg_counter, &g_verbosity, 0, 0 },
- { "log-file", 0, arg_string, &g_log_filename, 0, 0 },
- { "testcase-file", 'f', arg_string, &g_test_case_filename, 0, 0 },
- { 0, 'R', arg_flag, &g_report, 0, 0 },
- { "report-file", 0, arg_string, &g_report_filename, 0, 0 },
- { "interactive", 'i', arg_flag, &g_mode_interactive, 0, 0 },
- { "regression", 'r', arg_flag, &g_mode_regression, 0, 0 },
- { "bench", 'b', arg_flag, &g_mode_bench, 0, 0 },
+const char * g_user = 0;
+int g_baseport = 10000;
+int g_fqpn = 0;
+int g_default_ports = 0;
+
+const char * g_cwd = 0;
+const char * g_basedir = 0;
+const char * g_my_cnf = 0;
+const char * g_prefix = 0;
+const char * g_clusters = 0;
+BaseString g_replicate;
+const char *save_file = 0;
+char *save_extra_file = 0;
+const char *save_group_suffix = 0;
+const char * g_dummy;
+char * g_env_path = 0;
+
+/** Dummy, extern declared in ndb_opts.h */
+int g_print_full_config = 0, opt_ndb_shm;
+my_bool opt_core;
+
+static struct my_option g_options[] =
+{
+ { "help", '?', "Display this help and exit.",
+ (gptr*) &g_help, (gptr*) &g_help,
+ 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
+ { "version", 'V', "Output version information and exit.", 0, 0, 0,
+ GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0 },
+ { "clusters", 256, "Cluster",
+ (gptr*) &g_clusters, (gptr*) &g_clusters,
+ 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+ { "replicate", 1024, "replicate",
+ (gptr*) &g_dummy, (gptr*) &g_dummy,
+ 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+ { "log-file", 256, "log-file",
+ (gptr*) &g_log_filename, (gptr*) &g_log_filename,
+ 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+ { "testcase-file", 'f', "testcase-file",
+ (gptr*) &g_test_case_filename, (gptr*) &g_test_case_filename,
+ 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+ { "report-file", 'r', "report-file",
+ (gptr*) &g_report_filename, (gptr*) &g_report_filename,
+ 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+ { "basedir", 256, "Base path",
+ (gptr*) &g_basedir, (gptr*) &g_basedir,
+ 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+ { "baseport", 256, "Base port",
+ (gptr*) &g_baseport, (gptr*) &g_baseport,
+ 0, GET_INT, REQUIRED_ARG, g_baseport, 0, 0, 0, 0, 0},
+ { "prefix", 256, "mysql install dir",
+ (gptr*) &g_prefix, (gptr*) &g_prefix,
+ 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+ { "verbose", 'v', "Verbosity",
+ (gptr*) &g_verbosity, (gptr*) &g_verbosity,
+ 0, GET_INT, REQUIRED_ARG, g_verbosity, 0, 0, 0, 0, 0},
+ { "configure", 256, "configure",
+ (gptr*) &g_do_setup, (gptr*) &g_do_setup,
+ 0, GET_INT, REQUIRED_ARG, g_do_setup, 0, 0, 0, 0, 0 },
+ { "deploy", 256, "deploy",
+ (gptr*) &g_do_deploy, (gptr*) &g_do_deploy,
+ 0, GET_INT, REQUIRED_ARG, g_do_deploy, 0, 0, 0, 0, 0 },
+ { "sshx", 256, "sshx",
+ (gptr*) &g_do_sshx, (gptr*) &g_do_sshx,
+ 0, GET_INT, REQUIRED_ARG, g_do_sshx, 0, 0, 0, 0, 0 },
+ { "start", 256, "start",
+ (gptr*) &g_do_start, (gptr*) &g_do_start,
+ 0, GET_INT, REQUIRED_ARG, g_do_start, 0, 0, 0, 0, 0 },
+ { "fqpn", 256, "Fully qualified path-names ",
+ (gptr*) &g_fqpn, (gptr*) &g_fqpn,
+ 0, GET_INT, REQUIRED_ARG, g_fqpn, 0, 0, 0, 0, 0 },
+ { "default-ports", 256, "Use default ports when possible",
+ (gptr*) &g_default_ports, (gptr*) &g_default_ports,
+ 0, GET_INT, REQUIRED_ARG, g_default_ports, 0, 0, 0, 0, 0 },
+ { "mode", 256, "Mode 0=interactive 1=regression 2=bench",
+ (gptr*) &g_mode, (gptr*) &g_mode,
+ 0, GET_INT, REQUIRED_ARG, g_mode, 0, 0, 0, 0, 0 },
+ { "quit", 256, "Quit before starting tests",
+ (gptr*) &g_mode, (gptr*) &g_do_quit,
+ 0, GET_BOOL, NO_ARG, g_do_quit, 0, 0, 0, 0, 0 },
+ { 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}
};
-const int arg_count = 10;
+const int p_ndb = atrt_process::AP_NDB_MGMD | atrt_process::AP_NDBD;
+const int p_servers = atrt_process::AP_MYSQLD;
+const int p_clients = atrt_process::AP_CLIENT | atrt_process::AP_NDB_API;
int
-main(int argc, const char ** argv){
+main(int argc, char ** argv)
+{
ndb_init();
bool restart = true;
int lineno = 1;
int test_no = 1;
+ int return_code = 1;
- const int p_ndb = atrt_process::NDB_MGM | atrt_process::NDB_DB;
- const int p_servers = atrt_process::MYSQL_SERVER | atrt_process::NDB_REP;
- const int p_clients = atrt_process::MYSQL_CLIENT | atrt_process::NDB_API;
-
g_logger.setCategory(progname);
g_logger.enable(Logger::LL_ALL);
g_logger.createConsoleHandler();
if(!parse_args(argc, argv))
goto end;
-
+
g_logger.info("Starting...");
- if(!setup_config(g_config))
+ g_config.m_generated = false;
+ g_config.m_replication = g_replicate;
+ if (!setup_config(g_config))
+ goto end;
+
+ if (!configure(g_config, g_do_setup))
goto end;
+ g_logger.info("Setting up directories");
+ if (!setup_directories(g_config, g_do_setup))
+ goto end;
+
+ if (g_do_setup)
+ {
+ g_logger.info("Setting up files");
+ if (!setup_files(g_config, g_do_setup, g_do_sshx))
+ goto end;
+ }
+
+ if (g_do_deploy)
+ {
+ if (!deploy(g_config))
+ goto end;
+ }
+
+ if (g_do_quit)
+ {
+ return_code = 0;
+ goto end;
+ }
+
+ if(!setup_hosts(g_config))
+ goto end;
+
+ if (g_do_sshx)
+ {
+ g_logger.info("Starting xterm-ssh");
+ if (!sshx(g_config, g_do_sshx))
+ goto end;
+
+ g_logger.info("Done...sleeping");
+ while(true)
+ {
+ NdbSleep_SecSleep(1);
+ }
+ return_code = 0;
+ goto end;
+ }
+
g_logger.info("Connecting to hosts");
if(!connect_hosts(g_config))
goto end;
- if(!setup_hosts(g_config))
+ if (g_do_start && !g_test_case_filename)
+ {
+ g_logger.info("Starting server processes: %x", g_do_start);
+ if (!start(g_config, g_do_start))
+ goto end;
+
+ g_logger.info("Done...sleeping");
+ while(true)
+ {
+ NdbSleep_SecSleep(1);
+ }
+ return_code = 0;
goto end;
+ }
+ return_code = 0;
+
/**
* Main loop
*/
@@ -114,37 +235,25 @@ main(int argc, const char ** argv){
* Do we need to restart ndb
*/
if(restart){
- g_logger.info("(Re)starting ndb processes");
+ g_logger.info("(Re)starting server processes processes");
if(!stop_processes(g_config, ~0))
goto end;
- if(!start_processes(g_config, atrt_process::NDB_MGM))
+ if (!setup_directories(g_config, 2))
goto end;
- if(!connect_ndb_mgm(g_config)){
- goto end;
- }
-
- if(!start_processes(g_config, atrt_process::NDB_DB))
+ if (!setup_files(g_config, 2, 1))
goto end;
- if(!wait_ndb(g_config, NDB_MGM_NODE_STATUS_NOT_STARTED))
+ if(!setup_hosts(g_config))
goto end;
- for(Uint32 i = 0; i<3; i++)
- if(wait_ndb(g_config, NDB_MGM_NODE_STATUS_STARTED))
- goto started;
-
- goto end;
-
- started:
- if(!start_processes(g_config, p_servers))
- goto end;
-
- g_logger.info("Ndb start completed");
+ if (!start(g_config, p_ndb | p_servers))
+ goto end;
+ g_logger.info("All servers start completed");
}
- const int start_line = lineno;
+ // const int start_line = lineno;
atrt_testcase test_case;
if(!read_test_case(g_test_case_file, test_case, lineno))
goto end;
@@ -165,7 +274,7 @@ main(int argc, const char ** argv){
const time_t start = time(0);
time_t now = start;
do {
- if(!update_status(g_config, atrt_process::ALL))
+ if(!update_status(g_config, atrt_process::AP_ALL))
goto end;
int count = 0;
@@ -189,7 +298,7 @@ main(int argc, const char ** argv){
result = ERR_MAX_TIME_ELAPSED;
break;
}
- sleep(1);
+ NdbSleep_SecSleep(1);
} while(true);
const time_t elapsed = time(0) - start;
@@ -197,7 +306,8 @@ main(int argc, const char ** argv){
if(!stop_processes(g_config, p_clients))
goto end;
- if(!gather_result(g_config, &result))
+ int tmp, *rp = result ? &tmp : &result;
+ if(!gather_result(g_config, rp))
goto end;
g_logger.info("#%d %s(%d)",
@@ -205,29 +315,35 @@ main(int argc, const char ** argv){
(result == 0 ? "OK" : "FAILED"), result);
if(g_report_file != 0){
- fprintf(g_report_file, "%s %s ; %d ; %d ; %ld\n",
- test_case.m_command.c_str(),
- test_case.m_args.c_str(),
- test_no, result, elapsed);
+ fprintf(g_report_file, "%s ; %d ; %d ; %ld\n",
+ test_case.m_name.c_str(), test_no, result, elapsed);
fflush(g_report_file);
}
- if(test_case.m_report || g_mode_bench || (g_mode_regression && result)){
- BaseString tmp;
- tmp.assfmt("result.%d", test_no);
- if(rename("result", tmp.c_str()) != 0){
- g_logger.critical("Failed to rename %s as %s",
- "result", tmp.c_str());
- goto end;
- }
- }
-
- if(g_mode_interactive && result){
+ if(g_mode == 0 && result){
g_logger.info
("Encountered failed test in interactive mode - terminating");
break;
}
+ BaseString resdir;
+ resdir.assfmt("result.%d", test_no);
+ remove_dir(resdir.c_str(), true);
+
+ if(test_case.m_report || g_mode == 2 || (g_mode && result))
+ {
+ if(rename("result", resdir.c_str()) != 0)
+ {
+ g_logger.critical("Failed to rename %s as %s",
+ "result", resdir.c_str());
+ goto end;
+ }
+ }
+ else
+ {
+ remove_dir("result", true);
+ }
+
if(result != 0){
restart = true;
} else {
@@ -247,276 +363,254 @@ main(int argc, const char ** argv){
g_test_case_file = 0;
}
- stop_processes(g_config, atrt_process::ALL);
+ stop_processes(g_config, atrt_process::AP_ALL);
+ return return_code;
+}
+
+static
+my_bool
+get_one_option(int arg, const struct my_option * opt, char * value)
+{
+ if (arg == 1024)
+ {
+ if (g_replicate.length())
+ g_replicate.append(";");
+ g_replicate.append(value);
+ return 1;
+ }
return 0;
}
bool
-parse_args(int argc, const char** argv){
- int optind = 0;
- if(getarg(args, arg_count, argc, argv, &optind)) {
- arg_printusage(args, arg_count, progname, "");
+parse_args(int argc, char** argv)
+{
+ char buf[2048];
+ if (getcwd(buf, sizeof(buf)) == 0)
+ {
+ g_logger.error("Unable to get current working directory");
return false;
}
-
- if(g_log_filename != 0){
- g_logger.removeConsoleHandler();
- g_logger.addHandler(new FileLogHandler(g_log_filename));
+ g_cwd = strdup(buf);
+
+ struct stat sbuf;
+ BaseString mycnf;
+ if (argc > 1 && lstat(argv[argc-1], &sbuf) == 0)
+ {
+ mycnf.append(g_cwd);
+ mycnf.append(PATH_SEPARATOR);
+ mycnf.append(argv[argc-1]);
}
-
+ else
{
- int tmp = Logger::LL_WARNING - g_verbosity;
- tmp = (tmp < Logger::LL_DEBUG ? Logger::LL_DEBUG : tmp);
- g_logger.disable(Logger::LL_ALL);
- g_logger.enable(Logger::LL_ON);
- g_logger.enable((Logger::LoggerLevel)tmp, Logger::LL_ALERT);
+ mycnf.append(g_cwd);
+ mycnf.append(PATH_SEPARATOR);
+ mycnf.append("my.cnf");
+ if (lstat(mycnf.c_str(), &sbuf) != 0)
+ {
+ g_logger.error("Unable to stat %s", mycnf.c_str());
+ return false;
+ }
}
+ g_logger.info("Bootstrapping using %s", mycnf.c_str());
+
+ const char *groups[] = { "atrt", 0 };
+ int ret = load_defaults(mycnf.c_str(), groups, &argc, &argv);
+
+ save_file = my_defaults_file;
+ save_extra_file = my_defaults_extra_file;
+ save_group_suffix = my_defaults_group_suffix;
-
- if(!g_process_config_filename){
- g_logger.critical("Process config not specified!");
+ if (save_extra_file)
+ {
+ g_logger.error("--defaults-extra-file(%s) is not supported...",
+ save_extra_file);
return false;
}
- if(!g_setup_path){
- char buf[1024];
- if(getcwd(buf, sizeof(buf))){
- g_setup_path = strdup(buf);
- g_logger.info("Setup path not specified, using %s", buf);
- } else {
- g_logger.critical("Setup path not specified!\n");
- return false;
- }
- }
-
- if(g_report & !g_report_filename){
- g_report_filename = "report.txt";
+ if (ret || handle_options(&argc, &argv, g_options, get_one_option))
+ {
+ g_logger.error("Failed to load defaults/handle_options");
+ return false;
}
- if(g_report_filename){
- g_report_file = fopen(g_report_filename, "w");
- if(g_report_file == 0){
- g_logger.critical("Unable to create report file: %s", g_report_filename);
- return false;
+ if (argc >= 2)
+ {
+ const char * arg = argv[argc-2];
+ while(* arg)
+ {
+ switch(* arg){
+ case 'c':
+ g_do_setup = (g_do_setup == 0) ? 1 : g_do_setup;
+ break;
+ case 'C':
+ g_do_setup = 2;
+ break;
+ case 'd':
+ g_do_deploy = 1;
+ break;
+ case 'x':
+ g_do_sshx = atrt_process::AP_CLIENT | atrt_process::AP_NDB_API;
+ break;
+ case 'X':
+ g_do_sshx = atrt_process::AP_ALL;
+ break;
+ case 's':
+ g_do_start = p_ndb;
+ break;
+ case 'S':
+ g_do_start = p_ndb | p_servers;
+ break;
+ case 'f':
+ g_fqpn = 1;
+ break;
+ case 'q':
+ g_do_quit = 1;
+ break;
+ default:
+ g_logger.error("Unknown switch '%c'", *arg);
+ return false;
+ }
+ arg++;
}
}
- if(g_test_case_filename){
- g_test_case_file = fopen(g_test_case_filename, "r");
- if(g_test_case_file == 0){
- g_logger.critical("Unable to open file: %s", g_test_case_filename);
- return false;
- }
+ if(g_log_filename != 0)
+ {
+ g_logger.removeConsoleHandler();
+ g_logger.addHandler(new FileLogHandler(g_log_filename));
}
- int sum = g_mode_interactive + g_mode_regression + g_mode_bench;
- if(sum == 0){
- g_mode_interactive = 1;
- }
-
- if(sum > 1){
- g_logger.critical
- ("Only one of bench/regression/interactive can be specified");
- return false;
+ {
+ int tmp = Logger::LL_WARNING - g_verbosity;
+ tmp = (tmp < Logger::LL_DEBUG ? Logger::LL_DEBUG : tmp);
+ g_logger.disable(Logger::LL_ALL);
+ g_logger.enable(Logger::LL_ON);
+ g_logger.enable((Logger::LoggerLevel)tmp, Logger::LL_ALERT);
}
- g_default_user = strdup(getenv("LOGNAME"));
-
- return true;
-}
-
-
-static
-atrt_host *
-find(const BaseString& host, Vector<atrt_host> & hosts){
- for(size_t i = 0; i<hosts.size(); i++){
- if(hosts[i].m_hostname == host){
- return &hosts[i];
- }
+ if(!g_basedir)
+ {
+ g_basedir = g_cwd;
+ g_logger.info("basedir not specified, using %s", g_basedir);
}
- return 0;
-}
-bool
-setup_config(atrt_config& config){
-
- FILE * f = fopen(g_process_config_filename, "r");
- if(!f){
- g_logger.critical("Failed to open process config file: %s",
- g_process_config_filename);
- return false;
+ if (!g_prefix)
+ {
+ g_prefix = DEFAULT_PREFIX;
}
- bool result = true;
-
- int lineno = 0;
- char buf[2048];
- BaseString connect_string;
- int mysql_port_offset = 0;
- while(fgets(buf, 2048, f)){
- lineno++;
-
- BaseString tmp(buf);
- tmp.trim(" \t\n\r");
-
- if(tmp.length() == 0 || tmp == "" || tmp.c_str()[0] == '#')
- continue;
-
- Vector<BaseString> split1;
- if(tmp.split(split1, ":", 2) != 2){
- g_logger.warning("Invalid line %d in %s - ignoring",
- lineno, g_process_config_filename);
- continue;
+
+ /**
+ * Add path to atrt-*.sh
+ */
+ {
+ BaseString tmp;
+ const char* env = getenv("PATH");
+ if (env && strlen(env))
+ {
+ tmp.assfmt("PATH=%s:%s/mysql-test/ndb",
+ env, g_prefix);
}
-
- if(split1[0].trim() == "basedir"){
- g_default_base_dir = strdup(split1[1].trim().c_str());
- continue;
+ else
+ {
+ tmp.assfmt("PATH=%s/mysql-test/ndb", g_prefix);
}
+ g_env_path = strdup(tmp.c_str());
+ putenv(g_env_path);
+ }
+
+ if (g_help)
+ {
+ my_print_help(g_options);
+ my_print_variables(g_options);
+ return 0;
+ }
- if(split1[0].trim() == "baseport"){
- g_default_base_port = atoi(split1[1].trim().c_str());
- continue;
+ if(g_test_case_filename)
+ {
+ g_test_case_file = fopen(g_test_case_filename, "r");
+ if(g_test_case_file == 0)
+ {
+ g_logger.critical("Unable to open file: %s", g_test_case_filename);
+ return false;
}
+ if (g_do_setup == 0)
+ g_do_setup = 2;
+
+ if (g_do_start == 0)
+ g_do_start = p_ndb | p_servers;
+
+ if (g_mode == 0)
+ g_mode = 1;
- if(split1[0].trim() == "user"){
- g_default_user = strdup(split1[1].trim().c_str());
- continue;
+ if (g_do_sshx)
+ {
+ g_logger.critical("ssx specified...not possible with testfile");
+ return false;
}
-
- if(split1[0].trim() == "mysqld-use-base" && split1[1].trim() == "no"){
- g_mysqld_use_base = 0;
- continue;
+ }
+
+ if (g_do_setup == 0)
+ {
+ BaseString tmp;
+ tmp.append(g_basedir);
+ tmp.append(PATH_SEPARATOR);
+ tmp.append("my.cnf");
+ if (lstat(tmp.c_str(), &sbuf) != 0)
+ {
+ g_logger.error("Unable to stat %s", tmp.c_str());
+ return false;
}
- Vector<BaseString> hosts;
- if(split1[1].trim().split(hosts) <= 0){
- g_logger.warning("Invalid line %d in %s - ignoring",
- lineno, g_process_config_filename);
+ if (!S_ISREG(sbuf.st_mode))
+ {
+ g_logger.error("%s is not a regular file", tmp.c_str());
+ return false;
}
- // 1 - Check hosts
- for(size_t i = 0; i<hosts.size(); i++){
- Vector<BaseString> tmp;
- hosts[i].split(tmp, ":");
- BaseString hostname = tmp[0].trim();
- BaseString base_dir;
- if(tmp.size() >= 2)
- base_dir = tmp[1];
- else if(g_default_base_dir == 0){
- g_logger.critical("Basedir not specified...");
- return false;
- }
-
- atrt_host * host_ptr;
- if((host_ptr = find(hostname, config.m_hosts)) == 0){
- atrt_host host;
- host.m_index = config.m_hosts.size();
- host.m_cpcd = new SimpleCpcClient(hostname.c_str(), 1234);
- host.m_base_dir = (base_dir.empty() ? g_default_base_dir : base_dir);
- host.m_user = g_default_user;
- host.m_hostname = hostname.c_str();
- config.m_hosts.push_back(host);
- } else {
- if(!base_dir.empty() && (base_dir == host_ptr->m_base_dir)){
- g_logger.critical("Inconsistent base dir definition for host %s"
- ", \"%s\" != \"%s\"", hostname.c_str(),
- base_dir.c_str(), host_ptr->m_base_dir.c_str());
- return false;
- }
- }
- }
-
- for(size_t i = 0; i<hosts.size(); i++){
- BaseString & tmp = hosts[i];
- atrt_host * host = find(tmp, config.m_hosts);
- BaseString & dir = host->m_base_dir;
-
- const int index = config.m_processes.size() + 1;
-
- atrt_process proc;
- proc.m_index = index;
- proc.m_host = host;
- proc.m_proc.m_id = -1;
- proc.m_proc.m_type = "temporary";
- proc.m_proc.m_owner = "atrt";
- proc.m_proc.m_group = "group";
- proc.m_proc.m_cwd.assign(dir).append("/run/");
- proc.m_proc.m_stdout = "log.out";
- proc.m_proc.m_stderr = "2>&1";
- proc.m_proc.m_runas = proc.m_host->m_user;
- proc.m_proc.m_ulimit = "c:unlimited";
- proc.m_proc.m_env.assfmt("MYSQL_BASE_DIR=%s", dir.c_str());
- proc.m_proc.m_shutdown_options = "";
- proc.m_hostname = proc.m_host->m_hostname;
- proc.m_ndb_mgm_port = g_default_base_port;
- if(split1[0] == "mgm"){
- proc.m_type = atrt_process::NDB_MGM;
- proc.m_proc.m_name.assfmt("%d-%s", index, "ndb_mgmd");
- proc.m_proc.m_path.assign(dir).append("/libexec/ndb_mgmd");
- proc.m_proc.m_args = "--nodaemon -f config.ini";
- proc.m_proc.m_cwd.appfmt("%d.ndb_mgmd", index);
- connect_string.appfmt("host=%s:%d;",
- proc.m_hostname.c_str(), proc.m_ndb_mgm_port);
- } else if(split1[0] == "ndb"){
- proc.m_type = atrt_process::NDB_DB;
- proc.m_proc.m_name.assfmt("%d-%s", index, "ndbd");
- proc.m_proc.m_path.assign(dir).append("/libexec/ndbd");
- proc.m_proc.m_args = "--initial --nodaemon -n";
- proc.m_proc.m_cwd.appfmt("%d.ndbd", index);
- } else if(split1[0] == "mysqld"){
- proc.m_type = atrt_process::MYSQL_SERVER;
- proc.m_proc.m_name.assfmt("%d-%s", index, "mysqld");
- proc.m_proc.m_path.assign(dir).append("/libexec/mysqld");
- proc.m_proc.m_args = "--core-file --ndbcluster";
- proc.m_proc.m_cwd.appfmt("%d.mysqld", index);
- proc.m_proc.m_shutdown_options = "SIGKILL"; // not nice
- } else if(split1[0] == "api"){
- proc.m_type = atrt_process::NDB_API;
- proc.m_proc.m_name.assfmt("%d-%s", index, "ndb_api");
- proc.m_proc.m_path = "";
- proc.m_proc.m_args = "";
- proc.m_proc.m_cwd.appfmt("%d.ndb_api", index);
- } else if(split1[0] == "mysql"){
- proc.m_type = atrt_process::MYSQL_CLIENT;
- proc.m_proc.m_name.assfmt("%d-%s", index, "mysql");
- proc.m_proc.m_path = "";
- proc.m_proc.m_args = "";
- proc.m_proc.m_cwd.appfmt("%d.mysql", index);
- } else {
- g_logger.critical("%s:%d: Unhandled process type: %s",
- g_process_config_filename, lineno,
- split1[0].c_str());
- result = false;
- goto end;
- }
- config.m_processes.push_back(proc);
+ g_my_cnf = strdup(tmp.c_str());
+ g_logger.info("Using %s", tmp.c_str());
+ }
+ else
+ {
+ g_my_cnf = strdup(mycnf.c_str());
+ }
+
+ g_logger.info("Using --prefix=\"%s\"", g_prefix);
+
+ if(g_report_filename)
+ {
+ g_report_file = fopen(g_report_filename, "w");
+ if(g_report_file == 0)
+ {
+ g_logger.critical("Unable to create report file: %s", g_report_filename);
+ return false;
}
}
-
- // Setup connect string
- for(size_t i = 0; i<config.m_processes.size(); i++){
- config.m_processes[i].m_proc.m_env.appfmt(" NDB_CONNECTSTRING=%s",
- connect_string.c_str());
+
+ if (g_clusters == 0)
+ {
+ g_logger.critical("No clusters specified");
+ return false;
}
- end:
- fclose(f);
- return result;
+ g_user = strdup(getenv("LOGNAME"));
+
+ return true;
}
bool
connect_hosts(atrt_config& config){
for(size_t i = 0; i<config.m_hosts.size(); i++){
- if(config.m_hosts[i].m_cpcd->connect() != 0){
+ if(config.m_hosts[i]->m_cpcd->connect() != 0){
g_logger.error("Unable to connect to cpc %s:%d",
- config.m_hosts[i].m_cpcd->getHost(),
- config.m_hosts[i].m_cpcd->getPort());
+ config.m_hosts[i]->m_cpcd->getHost(),
+ config.m_hosts[i]->m_cpcd->getPort());
return false;
}
g_logger.debug("Connected to %s:%d",
- config.m_hosts[i].m_cpcd->getHost(),
- config.m_hosts[i].m_cpcd->getPort());
+ config.m_hosts[i]->m_cpcd->getHost(),
+ config.m_hosts[i]->m_cpcd->getPort());
}
return true;
@@ -529,8 +623,10 @@ connect_ndb_mgm(atrt_process & proc){
g_logger.critical("Unable to create mgm handle");
return false;
}
- BaseString tmp = proc.m_hostname;
- tmp.appfmt(":%d", proc.m_ndb_mgm_port);
+ BaseString tmp = proc.m_host->m_hostname;
+ const char * val;
+ proc.m_options.m_loaded.get("--PortNumber=", &val);
+ tmp.appfmt(":%s", val);
if (ndb_mgm_set_connectstring(handle,tmp.c_str()))
{
@@ -551,8 +647,8 @@ connect_ndb_mgm(atrt_process & proc){
bool
connect_ndb_mgm(atrt_config& config){
for(size_t i = 0; i<config.m_processes.size(); i++){
- atrt_process & proc = config.m_processes[i];
- if((proc.m_type & atrt_process::NDB_MGM) != 0){
+ atrt_process & proc = *config.m_processes[i];
+ if((proc.m_type & atrt_process::AP_NDB_MGMD) != 0){
if(!connect_ndb_mgm(proc)){
return false;
}
@@ -573,100 +669,110 @@ wait_ndb(atrt_config& config, int goal){
goal = remap(goal);
-
- /**
- * Get mgm handle for cluster
- */
- NdbMgmHandle handle = 0;
- for(size_t i = 0; i<config.m_processes.size(); i++){
- atrt_process & proc = config.m_processes[i];
- if((proc.m_type & atrt_process::NDB_MGM) != 0){
- handle = proc.m_ndb_mgm_handle;
- break;
- }
- }
- if(handle == 0){
- g_logger.critical("Unable to find mgm handle");
- return false;
- }
-
- if(goal == NDB_MGM_NODE_STATUS_STARTED){
+ size_t cnt = 0;
+ for (size_t i = 0; i<config.m_clusters.size(); i++)
+ {
+ atrt_cluster* cluster = config.m_clusters[i];
/**
- * 1) wait NOT_STARTED
- * 2) send start
- * 3) wait STARTED
+ * Get mgm handle for cluster
*/
- if(!wait_ndb(config, NDB_MGM_NODE_STATUS_NOT_STARTED))
+ NdbMgmHandle handle = 0;
+ for(size_t j = 0; j<cluster->m_processes.size(); j++){
+ atrt_process & proc = *cluster->m_processes[j];
+ if((proc.m_type & atrt_process::AP_NDB_MGMD) != 0){
+ handle = proc.m_ndb_mgm_handle;
+ break;
+ }
+ }
+
+ if(handle == 0){
+ g_logger.critical("Unable to find mgm handle");
return false;
+ }
- ndb_mgm_start(handle, 0, 0);
- }
-
- struct ndb_mgm_cluster_state * state;
-
- time_t now = time(0);
- time_t end = now + 360;
- int min = remap(NDB_MGM_NODE_STATUS_NO_CONTACT);
- int min2 = goal;
-
- while(now < end){
- /**
- * 1) retreive current state
- */
- state = 0;
- do {
- state = ndb_mgm_get_status(handle);
- if(state == 0){
- const int err = ndb_mgm_get_latest_error(handle);
- g_logger.error("Unable to poll db state: %d %s %s",
- ndb_mgm_get_latest_error(handle),
- ndb_mgm_get_latest_error_msg(handle),
- ndb_mgm_get_latest_error_desc(handle));
- if(err == NDB_MGM_SERVER_NOT_CONNECTED && connect_ndb_mgm(config)){
- g_logger.error("Reconnected...");
- continue;
- }
+ if(goal == NDB_MGM_NODE_STATUS_STARTED){
+ /**
+ * 1) wait NOT_STARTED
+ * 2) send start
+ * 3) wait STARTED
+ */
+ if(!wait_ndb(config, NDB_MGM_NODE_STATUS_NOT_STARTED))
return false;
- }
- } while(state == 0);
- NdbAutoPtr<void> tmp(state);
+
+ ndb_mgm_start(handle, 0, 0);
+ }
+
+ struct ndb_mgm_cluster_state * state;
+
+ time_t now = time(0);
+ time_t end = now + 360;
+ int min = remap(NDB_MGM_NODE_STATUS_NO_CONTACT);
+ int min2 = goal;
- min2 = goal;
- for(int i = 0; i<state->no_of_nodes; i++){
- if(state->node_states[i].node_type == NDB_MGM_NODE_TYPE_NDB){
- const int s = remap(state->node_states[i].node_status);
- min2 = (min2 < s ? min2 : s );
-
- if(s < remap(NDB_MGM_NODE_STATUS_NO_CONTACT) ||
- s > NDB_MGM_NODE_STATUS_STARTED){
- g_logger.critical("Strange DB status during start: %d %d", i, min2);
+ while(now < end){
+ /**
+ * 1) retreive current state
+ */
+ state = 0;
+ do {
+ state = ndb_mgm_get_status(handle);
+ if(state == 0){
+ const int err = ndb_mgm_get_latest_error(handle);
+ g_logger.error("Unable to poll db state: %d %s %s",
+ ndb_mgm_get_latest_error(handle),
+ ndb_mgm_get_latest_error_msg(handle),
+ ndb_mgm_get_latest_error_desc(handle));
+ if(err == NDB_MGM_SERVER_NOT_CONNECTED && connect_ndb_mgm(config)){
+ g_logger.error("Reconnected...");
+ continue;
+ }
return false;
}
-
- if(min2 < min){
- g_logger.critical("wait ndb failed node: %d %d %d %d",
- state->node_states[i].node_id, min, min2, goal);
+ } while(state == 0);
+ NdbAutoPtr<void> tmp(state);
+
+ min2 = goal;
+ for(int j = 0; j<state->no_of_nodes; j++){
+ if(state->node_states[j].node_type == NDB_MGM_NODE_TYPE_NDB){
+ const int s = remap(state->node_states[j].node_status);
+ min2 = (min2 < s ? min2 : s );
+
+ if(s < remap(NDB_MGM_NODE_STATUS_NO_CONTACT) ||
+ s > NDB_MGM_NODE_STATUS_STARTED){
+ g_logger.critical("Strange DB status during start: %d %d",
+ j, min2);
+ return false;
+ }
+
+ if(min2 < min){
+ g_logger.critical("wait ndb failed node: %d %d %d %d",
+ state->node_states[j].node_id, min, min2, goal);
+ }
}
}
+
+ if(min2 < min){
+ g_logger.critical("wait ndb failed %d %d %d", min, min2, goal);
+ return false;
+ }
+
+ if(min2 == goal){
+ cnt++;
+ goto next;
+ }
+
+ min = min2;
+ now = time(0);
}
- if(min2 < min){
- g_logger.critical("wait ndb failed %d %d %d", min, min2, goal);
- return false;
- }
-
- if(min2 == goal){
- return true;
- break;
- }
-
- min = min2;
- now = time(0);
+ g_logger.critical("wait ndb timed out %d %d %d", min, min2, goal);
+ break;
+
+next:
+ ;
}
-
- g_logger.critical("wait ndb timed out %d %d %d", min, min2, goal);
-
- return false;
+
+ return cnt == config.m_clusters.size();
}
bool
@@ -676,21 +782,19 @@ start_process(atrt_process & proc){
return false;
}
- BaseString path = proc.m_proc.m_cwd.substr(proc.m_host->m_base_dir.length()+BaseString("/run").length());
-
BaseString tmp = g_setup_progname;
- tmp.appfmt(" %s %s/%s/ %s",
+ tmp.appfmt(" %s %s/ %s",
proc.m_host->m_hostname.c_str(),
- g_setup_path,
- path.c_str(),
+ proc.m_proc.m_cwd.c_str(),
proc.m_proc.m_cwd.c_str());
-
+
+ g_logger.debug("system(%s)", tmp.c_str());
const int r1 = system(tmp.c_str());
if(r1 != 0){
g_logger.critical("Failed to setup process");
return false;
}
-
+
{
Properties reply;
if(proc.m_host->m_cpcd->define_process(proc.m_proc, reply) != 0){
@@ -715,7 +819,7 @@ start_process(atrt_process & proc){
bool
start_processes(atrt_config& config, int types){
for(size_t i = 0; i<config.m_processes.size(); i++){
- atrt_process & proc = config.m_processes[i];
+ atrt_process & proc = *config.m_processes[i];
if((types & proc.m_type) != 0 && proc.m_proc.m_path != ""){
if(!start_process(proc)){
return false;
@@ -760,7 +864,7 @@ stop_process(atrt_process & proc){
bool
stop_processes(atrt_config& config, int types){
for(size_t i = 0; i<config.m_processes.size(); i++){
- atrt_process & proc = config.m_processes[i];
+ atrt_process & proc = *config.m_processes[i];
if((types & proc.m_type) != 0){
if(!stop_process(proc)){
return false;
@@ -779,11 +883,11 @@ update_status(atrt_config& config, int){
m_procs.fill(config.m_hosts.size(), dummy);
for(size_t i = 0; i<config.m_hosts.size(); i++){
Properties p;
- config.m_hosts[i].m_cpcd->list_processes(m_procs[i], p);
+ config.m_hosts[i]->m_cpcd->list_processes(m_procs[i], p);
}
for(size_t i = 0; i<config.m_processes.size(); i++){
- atrt_process & proc = config.m_processes[i];
+ atrt_process & proc = *config.m_processes[i];
if(proc.m_proc.m_id != -1){
Vector<SimpleCpcClient::Process> &h_procs= m_procs[proc.m_host->m_index];
bool found = false;
@@ -798,7 +902,7 @@ update_status(atrt_config& config, int){
g_logger.error("update_status: not found");
g_logger.error("id: %d host: %s cmd: %s",
proc.m_proc.m_id,
- proc.m_hostname.c_str(),
+ proc.m_host->m_hostname.c_str(),
proc.m_proc.m_path.c_str());
for(size_t j = 0; j<h_procs.size(); j++){
g_logger.error("found: %d %s", h_procs[j].m_id,
@@ -815,7 +919,7 @@ int
is_running(atrt_config& config, int types){
int found = 0, running = 0;
for(size_t i = 0; i<config.m_processes.size(); i++){
- atrt_process & proc = config.m_processes[i];
+ atrt_process & proc = *config.m_processes[i];
if((types & proc.m_type) != 0){
found++;
if(proc.m_proc.m_status == "running")
@@ -910,12 +1014,24 @@ read_test_case(FILE * file, atrt_testcase& tc, int& line){
tc.m_run_all= true;
else
tc.m_run_all= false;
+
+ if (!p.get("name", &mt))
+ {
+ tc.m_name.assfmt("%s %s",
+ tc.m_command.c_str(),
+ tc.m_args.c_str());
+ }
+ else
+ {
+ tc.m_name.assign(mt);
+ }
return true;
}
bool
setup_test_case(atrt_config& config, const atrt_testcase& tc){
+ g_logger.debug("system(%s)", g_clear_progname);
const int r1 = system(g_clear_progname);
if(r1 != 0){
g_logger.critical("Failed to clear result");
@@ -923,19 +1039,24 @@ setup_test_case(atrt_config& config, const atrt_testcase& tc){
}
size_t i = 0;
- for(; i<config.m_processes.size(); i++){
- atrt_process & proc = config.m_processes[i];
- if(proc.m_type == atrt_process::NDB_API || proc.m_type == atrt_process::MYSQL_CLIENT){
- proc.m_proc.m_path.assfmt("%s/bin/%s", proc.m_host->m_base_dir.c_str(),
- tc.m_command.c_str());
+ for(; i<config.m_processes.size(); i++)
+ {
+ atrt_process & proc = *config.m_processes[i];
+ if(proc.m_type == atrt_process::AP_NDB_API || proc.m_type == atrt_process::AP_CLIENT){
+ proc.m_proc.m_path = "";
+ if (tc.m_command.c_str()[0] != '/')
+ {
+ proc.m_proc.m_path.appfmt("%s/bin/", g_prefix);
+ }
+ proc.m_proc.m_path.append(tc.m_command.c_str());
proc.m_proc.m_args.assign(tc.m_args);
if(!tc.m_run_all)
break;
}
}
for(i++; i<config.m_processes.size(); i++){
- atrt_process & proc = config.m_processes[i];
- if(proc.m_type == atrt_process::NDB_API || proc.m_type == atrt_process::MYSQL_CLIENT){
+ atrt_process & proc = *config.m_processes[i];
+ if(proc.m_type == atrt_process::AP_NDB_API || proc.m_type == atrt_process::AP_CLIENT){
proc.m_proc.m_path.assign("");
proc.m_proc.m_args.assign("");
}
@@ -946,24 +1067,27 @@ setup_test_case(atrt_config& config, const atrt_testcase& tc){
bool
gather_result(atrt_config& config, int * result){
BaseString tmp = g_gather_progname;
- for(size_t i = 0; i<config.m_processes.size(); i++){
- atrt_process & proc = config.m_processes[i];
- if(proc.m_proc.m_path != ""){
- tmp.appfmt(" %s:%s",
- proc.m_hostname.c_str(),
- proc.m_proc.m_cwd.c_str());
- }
+
+ for(size_t i = 0; i<config.m_hosts.size(); i++)
+ {
+ tmp.appfmt(" %s:%s/*",
+ config.m_hosts[i]->m_hostname.c_str(),
+ config.m_hosts[i]->m_basedir.c_str());
}
-
+
+ g_logger.debug("system(%s)", tmp.c_str());
const int r1 = system(tmp.c_str());
- if(r1 != 0){
- g_logger.critical("Failed to gather result");
+ if(r1 != 0)
+ {
+ g_logger.critical("Failed to gather result!");
return false;
}
-
+
+ g_logger.debug("system(%s)", g_analyze_progname);
const int r2 = system(g_analyze_progname);
-
- if(r2 == -1 || r2 == (127 << 8)){
+
+ if(r2 == -1 || r2 == (127 << 8))
+ {
g_logger.critical("Failed to analyze results");
return false;
}
@@ -974,6 +1098,7 @@ gather_result(atrt_config& config, int * result){
bool
setup_hosts(atrt_config& config){
+ g_logger.debug("system(%s)", g_clear_progname);
const int r1 = system(g_clear_progname);
if(r1 != 0){
g_logger.critical("Failed to clear result");
@@ -982,21 +1107,143 @@ setup_hosts(atrt_config& config){
for(size_t i = 0; i<config.m_hosts.size(); i++){
BaseString tmp = g_setup_progname;
- tmp.appfmt(" %s %s/ %s/run",
- config.m_hosts[i].m_hostname.c_str(),
- g_setup_path,
- config.m_hosts[i].m_base_dir.c_str());
+ tmp.appfmt(" %s %s/ %s/",
+ config.m_hosts[i]->m_hostname.c_str(),
+ g_basedir,
+ config.m_hosts[i]->m_basedir.c_str());
+ g_logger.debug("system(%s)", tmp.c_str());
const int r1 = system(tmp.c_str());
if(r1 != 0){
g_logger.critical("Failed to setup %s",
- config.m_hosts[i].m_hostname.c_str());
+ config.m_hosts[i]->m_hostname.c_str());
+ return false;
+ }
+ }
+ return true;
+}
+
+bool
+deploy(atrt_config & config)
+{
+ for (size_t i = 0; i<config.m_hosts.size(); i++)
+ {
+ BaseString tmp = g_setup_progname;
+ tmp.appfmt(" %s %s/ %s",
+ config.m_hosts[i]->m_hostname.c_str(),
+ g_prefix,
+ g_prefix);
+
+ g_logger.info("rsyncing %s to %s", g_prefix,
+ config.m_hosts[i]->m_hostname.c_str());
+ g_logger.debug("system(%s)", tmp.c_str());
+ const int r1 = system(tmp.c_str());
+ if(r1 != 0)
+ {
+ g_logger.critical("Failed to rsync %s to %s",
+ g_prefix,
+ config.m_hosts[i]->m_hostname.c_str());
return false;
}
}
+
return true;
}
+bool
+sshx(atrt_config & config, unsigned mask)
+{
+ for (size_t i = 0; i<config.m_processes.size(); i++)
+ {
+ atrt_process & proc = *config.m_processes[i];
+
+ BaseString tmp;
+ const char * type = 0;
+ switch(proc.m_type){
+ case atrt_process::AP_NDB_MGMD:
+ type = (mask & proc.m_type) ? "ndb_mgmd" : 0;
+ break;
+ case atrt_process::AP_NDBD:
+ type = (mask & proc.m_type) ? "ndbd" : 0;
+ break;
+ case atrt_process::AP_MYSQLD:
+ type = (mask & proc.m_type) ? "mysqld" : 0;
+ break;
+ case atrt_process::AP_NDB_API:
+ type = (mask & proc.m_type) ? "ndbapi" : 0;
+ break;
+ case atrt_process::AP_CLIENT:
+ type = (mask & proc.m_type) ? "client" : 0;
+ break;
+ default:
+ type = "<unknown>";
+ }
+
+ if (type == 0)
+ continue;
+
+ tmp.appfmt("xterm -fg black -title \"%s(%s) on %s\""
+ " -e 'ssh -t -X %s sh %s/ssh-login.sh' &",
+ type,
+ proc.m_cluster->m_name.c_str(),
+ proc.m_host->m_hostname.c_str(),
+ proc.m_host->m_hostname.c_str(),
+ proc.m_proc.m_cwd.c_str());
+
+ g_logger.debug("system(%s)", tmp.c_str());
+ const int r1 = system(tmp.c_str());
+ if(r1 != 0)
+ {
+ g_logger.critical("Failed sshx (%s)",
+ tmp.c_str());
+ return false;
+ }
+ NdbSleep_MilliSleep(300); // To prevent xlock problem
+ }
+
+ return true;
+}
+
+bool
+start(atrt_config & config, unsigned proc_mask)
+{
+ if (proc_mask & atrt_process::AP_NDB_MGMD)
+ if(!start_processes(g_config, atrt_process::AP_NDB_MGMD))
+ return false;
+
+ if (proc_mask & atrt_process::AP_NDBD)
+ {
+ if(!connect_ndb_mgm(g_config)){
+ return false;
+ }
+
+ if(!start_processes(g_config, atrt_process::AP_NDBD))
+ return false;
+
+ if(!wait_ndb(g_config, NDB_MGM_NODE_STATUS_NOT_STARTED))
+ return false;
+
+ for(Uint32 i = 0; i<3; i++)
+ if(wait_ndb(g_config, NDB_MGM_NODE_STATUS_STARTED))
+ goto started;
+ return false;
+ }
+
+started:
+ if(!start_processes(g_config, p_servers & proc_mask))
+ return false;
+
+ return true;
+}
+
+void
+require(bool x)
+{
+ if (!x)
+ abort();
+}
+
template class Vector<Vector<SimpleCpcClient::Process> >;
-template class Vector<atrt_host>;
-template class Vector<atrt_process>;
+template class Vector<atrt_host*>;
+template class Vector<atrt_cluster*>;
+template class Vector<atrt_process*>;
diff --git a/storage/ndb/test/run-test/run-test.hpp b/storage/ndb/test/run-test/run-test.hpp
deleted file mode 100644
index 2b259e83a60..00000000000
--- a/storage/ndb/test/run-test/run-test.hpp
+++ /dev/null
@@ -1,95 +0,0 @@
-/* Copyright (C) 2003 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; version 2 of the License.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-#ifndef atrt_config_hpp
-#define atrt_config_hpp
-
-#include <getarg.h>
-#include <Vector.hpp>
-#include <BaseString.hpp>
-#include <Logger.hpp>
-#include <mgmapi.h>
-#include <CpcClient.hpp>
-
-#undef MYSQL_CLIENT
-
-enum ErrorCodes {
- ERR_OK = 0,
- ERR_NDB_FAILED = 101,
- ERR_SERVERS_FAILED = 102,
- ERR_MAX_TIME_ELAPSED = 103
-};
-
-struct atrt_host {
- size_t m_index;
- BaseString m_user;
- BaseString m_base_dir;
- BaseString m_hostname;
- SimpleCpcClient * m_cpcd;
-};
-
-struct atrt_process {
- size_t m_index;
- BaseString m_hostname;
- struct atrt_host * m_host;
-
- enum Type {
- ALL = 255,
- NDB_DB = 1,
- NDB_API = 2,
- NDB_MGM = 4,
- NDB_REP = 8,
- MYSQL_SERVER = 16,
- MYSQL_CLIENT = 32
- } m_type;
-
- SimpleCpcClient::Process m_proc;
- short m_ndb_mgm_port;
- NdbMgmHandle m_ndb_mgm_handle; // if type == ndb_mgm
-};
-
-struct atrt_config {
- BaseString m_key;
- Vector<atrt_host> m_hosts;
- Vector<atrt_process> m_processes;
-};
-
-struct atrt_testcase {
- bool m_report;
- bool m_run_all;
- time_t m_max_time;
- BaseString m_command;
- BaseString m_args;
-};
-
-extern Logger g_logger;
-
-bool parse_args(int argc, const char** argv);
-bool setup_config(atrt_config&);
-bool connect_hosts(atrt_config&);
-bool connect_ndb_mgm(atrt_config&);
-bool wait_ndb(atrt_config&, int ndb_mgm_node_status);
-bool start_processes(atrt_config&, int);
-bool stop_processes(atrt_config&, int);
-bool update_status(atrt_config&, int);
-int is_running(atrt_config&, int);
-bool gather_result(atrt_config&, int * result);
-
-bool read_test_case(FILE *, atrt_testcase&, int& line);
-bool setup_test_case(atrt_config&, const atrt_testcase&);
-
-bool setup_hosts(atrt_config&);
-
-#endif
diff --git a/storage/ndb/test/run-test/setup.cpp b/storage/ndb/test/run-test/setup.cpp
new file mode 100644
index 00000000000..cbb7a34f171
--- /dev/null
+++ b/storage/ndb/test/run-test/setup.cpp
@@ -0,0 +1,965 @@
+#include "atrt.hpp"
+#include <ndb_global.h>
+#include <my_sys.h>
+#include <my_getopt.h>
+#include <NdbOut.hpp>
+
+static NdbOut& operator<<(NdbOut& out, const atrt_process& proc);
+static atrt_host * find(const char * hostname, Vector<atrt_host*>&);
+static bool load_process(atrt_config&, atrt_cluster&, atrt_process::Type,
+ size_t idx, const char * hostname);
+static bool load_options(int argc, char** argv, int type, atrt_options&);
+
+enum {
+ PO_NDB = atrt_options::AO_NDBCLUSTER
+
+ ,PO_REP_SLAVE = 256
+ ,PO_REP_MASTER = 512
+ ,PO_REP = (atrt_options::AO_REPLICATION | PO_REP_SLAVE | PO_REP_MASTER)
+};
+
+struct proc_option
+{
+ const char * name;
+ int type;
+ int options;
+};
+
+static
+struct proc_option f_options[] = {
+ { "--FileSystemPath=", atrt_process::AP_NDBD, 0 }
+ ,{ "--PortNumber=", atrt_process::AP_NDB_MGMD, 0 }
+ ,{ "--datadir=", atrt_process::AP_MYSQLD, 0 }
+ ,{ "--socket=", atrt_process::AP_MYSQLD | atrt_process::AP_CLIENT, 0 }
+ ,{ "--port=", atrt_process::AP_MYSQLD | atrt_process::AP_CLIENT, 0 }
+ ,{ "--server-id=", atrt_process::AP_MYSQLD, PO_REP }
+ ,{ "--log-bin", atrt_process::AP_MYSQLD, PO_REP_MASTER }
+ ,{ "--master-host=", atrt_process::AP_MYSQLD, PO_REP_SLAVE }
+ ,{ "--master-port=", atrt_process::AP_MYSQLD, PO_REP_SLAVE }
+ ,{ "--master-user=", atrt_process::AP_MYSQLD, PO_REP_SLAVE }
+ ,{ "--master-password=", atrt_process::AP_MYSQLD, PO_REP_SLAVE }
+ ,{ "--ndb-connectstring=", atrt_process::AP_MYSQLD | atrt_process::AP_CLUSTER
+ ,PO_NDB }
+ ,{ "--ndbcluster", atrt_process::AP_MYSQLD, PO_NDB }
+ ,{ 0, 0, 0 }
+};
+const char * ndbcs = "--ndb-connectstring=";
+
+bool
+setup_config(atrt_config& config)
+{
+ BaseString tmp(g_clusters);
+ Vector<BaseString> clusters;
+ tmp.split(clusters, ",");
+
+ bool fqpn = clusters.size() > 1 || g_fqpn;
+
+ size_t j,k;
+ for (size_t i = 0; i<clusters.size(); i++)
+ {
+ struct atrt_cluster *cluster = new atrt_cluster;
+ config.m_clusters.push_back(cluster);
+
+ cluster->m_name = clusters[i];
+ if (fqpn)
+ {
+ cluster->m_dir.assfmt("cluster%s/", cluster->m_name.c_str());
+ }
+ else
+ {
+ cluster->m_dir = "";
+ }
+
+ int argc = 1;
+ const char * argv[] = { "atrt", 0, 0 };
+
+ BaseString buf;
+ buf.assfmt("--defaults-group-suffix=%s", clusters[i].c_str());
+ argv[argc++] = buf.c_str();
+ char ** tmp = (char**)argv;
+ const char *groups[] = { "cluster_config", 0 };
+ int ret = load_defaults(g_my_cnf, groups, &argc, &tmp);
+ if (ret)
+ {
+ g_logger.error("Unable to load defaults for cluster: %s",
+ clusters[i].c_str());
+ return false;
+ }
+
+ struct
+ {
+ atrt_process::Type type;
+ const char * name;
+ const char * value;
+ } proc_args[] = {
+ { atrt_process::AP_NDB_MGMD, "--ndb_mgmd=", 0 },
+ { atrt_process::AP_NDBD, "--ndbd=", 0 },
+ { atrt_process::AP_NDB_API, "--ndbapi=", 0 },
+ { atrt_process::AP_NDB_API, "--api=", 0 },
+ { atrt_process::AP_MYSQLD, "--mysqld=", 0 },
+ { atrt_process::AP_ALL, 0, 0}
+ };
+
+ /**
+ * Find all processes...
+ */
+ for (j = 0; j<(size_t)argc; j++)
+ {
+ for (k = 0; proc_args[k].name; k++)
+ {
+ if (!strncmp(tmp[j], proc_args[k].name, strlen(proc_args[k].name)))
+ {
+ proc_args[k].value = tmp[j] + strlen(proc_args[k].name);
+ break;
+ }
+ }
+ }
+
+ /**
+ * Load each process
+ */
+ for (j = 0; proc_args[j].name; j++)
+ {
+ if (proc_args[j].value)
+ {
+ BaseString tmp(proc_args[j].value);
+ Vector<BaseString> list;
+ tmp.split(list, ",");
+ for (k = 0; k<list.size(); k++)
+ if (!load_process(config, *cluster, proc_args[j].type,
+ k + 1, list[k].c_str()))
+ return false;
+ }
+ }
+
+ {
+ /**
+ * Load cluster options
+ */
+
+ argc = 1;
+ argv[argc++] = buf.c_str();
+ const char *groups[] = { "mysql_cluster", 0 };
+ ret = load_defaults(g_my_cnf, groups, &argc, &tmp);
+
+ if (ret)
+ {
+ g_logger.error("Unable to load defaults for cluster: %s",
+ clusters[i].c_str());
+ return false;
+ }
+
+ load_options(argc, tmp, atrt_process::AP_CLUSTER, cluster->m_options);
+ }
+ }
+ return true;
+}
+
+static
+atrt_host *
+find(const char * hostname, Vector<atrt_host*> & hosts){
+ for (size_t i = 0; i<hosts.size(); i++){
+ if (hosts[i]->m_hostname == hostname){
+ return hosts[i];
+ }
+ }
+
+ atrt_host* host = new atrt_host;
+ host->m_index = hosts.size();
+ host->m_cpcd = new SimpleCpcClient(hostname, 1234);
+ host->m_basedir = g_basedir;
+ host->m_user = g_user;
+ host->m_hostname = hostname;
+ hosts.push_back(host);
+ return host;
+}
+
+static
+bool
+load_process(atrt_config& config, atrt_cluster& cluster,
+ atrt_process::Type type,
+ size_t idx,
+ const char * hostname)
+{
+ atrt_host * host_ptr = find(hostname, config.m_hosts);
+ atrt_process *proc_ptr = new atrt_process;
+
+ config.m_processes.push_back(proc_ptr);
+ host_ptr->m_processes.push_back(proc_ptr);
+ cluster.m_processes.push_back(proc_ptr);
+
+ atrt_process& proc = *proc_ptr;
+
+ const size_t proc_no = config.m_processes.size();
+ proc.m_index = idx;
+ proc.m_type = type;
+ proc.m_host = host_ptr;
+ proc.m_cluster = &cluster;
+ proc.m_options.m_features = 0;
+ proc.m_rep_src = 0;
+ proc.m_proc.m_id = -1;
+ proc.m_proc.m_type = "temporary";
+ proc.m_proc.m_owner = "atrt";
+ proc.m_proc.m_group = cluster.m_name.c_str();
+ proc.m_proc.m_stdout = "log.out";
+ proc.m_proc.m_stderr = "2>&1";
+ proc.m_proc.m_runas = proc.m_host->m_user;
+ proc.m_proc.m_ulimit = "c:unlimited";
+ proc.m_proc.m_env.assfmt("MYSQL_BASE_DIR=%s", g_prefix);
+ proc.m_proc.m_env.appfmt(" MYSQL_HOME=%s", g_basedir);
+ proc.m_proc.m_shutdown_options = "";
+
+ int argc = 1;
+ const char * argv[] = { "atrt", 0, 0 };
+
+ BaseString buf[10];
+ char ** tmp = (char**)argv;
+ const char *groups[] = { 0, 0, 0, 0 };
+ switch(type){
+ case atrt_process::AP_NDB_MGMD:
+ groups[0] = "cluster_config";
+ buf[1].assfmt("cluster_config.ndb_mgmd.%d", idx);
+ groups[1] = buf[1].c_str();
+ buf[0].assfmt("--defaults-group-suffix=%s", cluster.m_name.c_str());
+ argv[argc++] = buf[0].c_str();
+ break;
+ case atrt_process::AP_NDBD:
+ groups[0] = "cluster_config";
+ buf[1].assfmt("cluster_config.ndbd.%d", idx);
+ groups[1] = buf[1].c_str();
+ buf[0].assfmt("--defaults-group-suffix=%s", cluster.m_name.c_str());
+ argv[argc++] = buf[0].c_str();
+ break;
+ case atrt_process::AP_MYSQLD:
+ groups[0] = "mysqld";
+ groups[1] = "mysql_cluster";
+ buf[0].assfmt("--defaults-group-suffix=.%d%s",idx,cluster.m_name.c_str());
+ argv[argc++] = buf[0].c_str();
+ break;
+ case atrt_process::AP_CLIENT:
+ buf[0].assfmt("client.%d%s", idx, cluster.m_name.c_str());
+ groups[0] = buf[0].c_str();
+ break;
+ case atrt_process::AP_NDB_API:
+ break;
+ default:
+ g_logger.critical("Unhandled process type: %d", type);
+ return false;
+ }
+
+ int ret = load_defaults(g_my_cnf, groups, &argc, &tmp);
+ if (ret)
+ {
+ g_logger.error("Unable to load defaults for cluster: %s",
+ cluster.m_name.c_str());
+ return false;
+ }
+
+ load_options(argc, tmp, type, proc.m_options);
+
+ BaseString dir;
+ dir.assfmt("%s/%s",
+ proc.m_host->m_basedir.c_str(),
+ cluster.m_dir.c_str());
+
+ switch(type){
+ case atrt_process::AP_NDB_MGMD:
+ {
+ proc.m_proc.m_name.assfmt("%d-%s", proc_no, "ndb_mgmd");
+ proc.m_proc.m_path.assign(g_prefix).append("/libexec/ndb_mgmd");
+ proc.m_proc.m_args.assfmt("--defaults-file=%s/my.cnf",
+ proc.m_host->m_basedir.c_str());
+ proc.m_proc.m_args.appfmt(" --defaults-group-suffix=%s",
+ cluster.m_name.c_str());
+ proc.m_proc.m_args.append(" --nodaemon --mycnf");
+ proc.m_proc.m_cwd.assfmt("%sndb_mgmd.%d", dir.c_str(), proc.m_index);
+ proc.m_proc.m_env.appfmt(" MYSQL_GROUP_SUFFIX=%s",
+ cluster.m_name.c_str());
+ break;
+ }
+ case atrt_process::AP_NDBD:
+ {
+ proc.m_proc.m_name.assfmt("%d-%s", proc_no, "ndbd");
+ proc.m_proc.m_path.assign(g_prefix).append("/libexec/ndbd");
+ proc.m_proc.m_args.assfmt("--defaults-file=%s/my.cnf",
+ proc.m_host->m_basedir.c_str());
+ proc.m_proc.m_args.appfmt(" --defaults-group-suffix=%s",
+ cluster.m_name.c_str());
+ proc.m_proc.m_args.append(" --nodaemon -n");
+ proc.m_proc.m_cwd.assfmt("%sndbd.%d", dir.c_str(), proc.m_index);
+ proc.m_proc.m_env.appfmt(" MYSQL_GROUP_SUFFIX=%s",
+ cluster.m_name.c_str());
+ break;
+ }
+ case atrt_process::AP_MYSQLD:
+ {
+ proc.m_proc.m_name.assfmt("%d-%s", proc_no, "mysqld");
+ proc.m_proc.m_path.assign(g_prefix).append("/libexec/mysqld");
+ proc.m_proc.m_args.assfmt("--defaults-file=%s/my.cnf",
+ proc.m_host->m_basedir.c_str());
+ proc.m_proc.m_args.appfmt(" --defaults-group-suffix=.%d%s",
+ proc.m_index,
+ cluster.m_name.c_str());
+ proc.m_proc.m_args.append(" --core-file");
+ proc.m_proc.m_cwd.appfmt("%smysqld.%d", dir.c_str(), proc.m_index);
+ proc.m_proc.m_shutdown_options = "SIGKILL"; // not nice
+ proc.m_proc.m_env.appfmt(" MYSQL_GROUP_SUFFIX=.%d%s",
+ proc.m_index,
+ cluster.m_name.c_str());
+ break;
+ }
+ case atrt_process::AP_NDB_API:
+ {
+ proc.m_proc.m_name.assfmt("%d-%s", proc_no, "ndb_api");
+ proc.m_proc.m_path = "";
+ proc.m_proc.m_args = "";
+ proc.m_proc.m_cwd.appfmt("%sndb_api.%d", dir.c_str(), proc.m_index);
+ proc.m_proc.m_env.appfmt(" MYSQL_GROUP_SUFFIX=%s",
+ cluster.m_name.c_str());
+ break;
+ }
+ case atrt_process::AP_CLIENT:
+ {
+ proc.m_proc.m_name.assfmt("%d-%s", proc_no, "mysql");
+ proc.m_proc.m_path = "";
+ proc.m_proc.m_args = "";
+ proc.m_proc.m_cwd.appfmt("%s/client.%d", dir.c_str(), proc.m_index);
+ proc.m_proc.m_env.appfmt(" MYSQL_GROUP_SUFFIX=.%d%s",
+ proc.m_index,
+ cluster.m_name.c_str());
+ break;
+ }
+ case atrt_process::AP_ALL:
+ case atrt_process::AP_CLUSTER:
+ g_logger.critical("Unhandled process type: %d", proc.m_type);
+ return false;
+ }
+
+ if (proc.m_proc.m_path.length())
+ {
+ proc.m_proc.m_env.appfmt(" CMD=\"%s", proc.m_proc.m_path.c_str());
+ if (proc.m_proc.m_args.length())
+ proc.m_proc.m_env.append(" ");
+ proc.m_proc.m_env.append(proc.m_proc.m_args);
+ proc.m_proc.m_env.append("\" ");
+ }
+
+ if (type == atrt_process::AP_MYSQLD)
+ {
+ /**
+ * Add a client for each mysqld
+ */
+ if (!load_process(config, cluster, atrt_process::AP_CLIENT, idx, hostname))
+ {
+ return false;
+ }
+ }
+
+ if (type == atrt_process::AP_CLIENT)
+ {
+ proc.m_mysqld = cluster.m_processes[cluster.m_processes.size()-2];
+ }
+
+ return true;
+}
+
+static
+bool
+load_options(int argc, char** argv, int type, atrt_options& opts)
+{
+ for (size_t i = 0; i<(size_t)argc; i++)
+ {
+ for (size_t j = 0; f_options[j].name; j++)
+ {
+ const char * name = f_options[j].name;
+ const size_t len = strlen(name);
+
+ if ((f_options[j].type & type) && strncmp(argv[i], name, len) == 0)
+ {
+ opts.m_loaded.put(name, argv[i]+len, true);
+ break;
+ }
+ }
+ }
+ return true;
+}
+
+struct proc_rule_ctx
+{
+ int m_setup;
+ atrt_config* m_config;
+ atrt_host * m_host;
+ atrt_cluster* m_cluster;
+ atrt_process* m_process;
+};
+
+struct proc_rule
+{
+ int type;
+ bool (* func)(Properties& prop, proc_rule_ctx&, int extra);
+ int extra;
+};
+
+static bool pr_check_replication(Properties&, proc_rule_ctx&, int);
+static bool pr_check_features(Properties&, proc_rule_ctx&, int);
+static bool pr_fix_client(Properties&, proc_rule_ctx&, int);
+static bool pr_proc_options(Properties&, proc_rule_ctx&, int);
+static bool pr_fix_ndb_connectstring(Properties&, proc_rule_ctx&, int);
+static bool pr_set_ndb_connectstring(Properties&, proc_rule_ctx&, int);
+static bool pr_check_proc(Properties&, proc_rule_ctx&, int);
+
+static
+proc_rule f_rules[] =
+{
+ { atrt_process::AP_CLUSTER, pr_check_features, 0 }
+ ,{ atrt_process::AP_MYSQLD, pr_check_replication, 0 }
+ ,{ (atrt_process::AP_ALL & ~atrt_process::AP_CLIENT), pr_proc_options,
+ ~(PO_REP | PO_NDB) }
+ ,{ (atrt_process::AP_ALL & ~atrt_process::AP_CLIENT), pr_proc_options, PO_REP }
+ ,{ atrt_process::AP_CLIENT, pr_fix_client, 0 }
+ ,{ atrt_process::AP_CLUSTER, pr_fix_ndb_connectstring, 0 }
+ ,{ atrt_process::AP_MYSQLD, pr_set_ndb_connectstring, 0 }
+ ,{ atrt_process::AP_ALL, pr_check_proc, 0 }
+ ,{ 0, 0, 0 }
+};
+
+bool
+configure(atrt_config& config, int setup)
+{
+ Properties props;
+
+ for (size_t i = 0; f_rules[i].func; i++)
+ {
+ bool ok = true;
+ proc_rule_ctx ctx;
+ bzero(&ctx, sizeof(ctx));
+ ctx.m_setup = setup;
+ ctx.m_config = &config;
+
+ for (size_t j = 0; j < config.m_clusters.size(); j++)
+ {
+ ctx.m_cluster = config.m_clusters[j];
+
+ if (f_rules[i].type & atrt_process::AP_CLUSTER)
+ {
+ g_logger.debug("applying rule %d to cluster %s", i,
+ ctx.m_cluster->m_name.c_str());
+ if (! (* f_rules[i].func)(props, ctx, f_rules[i].extra))
+ ok = false;
+ }
+ else
+ {
+ atrt_cluster& cluster = *config.m_clusters[j];
+ for (size_t k = 0; k<cluster.m_processes.size(); k++)
+ {
+ atrt_process& proc = *cluster.m_processes[k];
+ ctx.m_process = cluster.m_processes[k];
+ if (proc.m_type & f_rules[i].type)
+ {
+ g_logger.debug("applying rule %d to %s", i,
+ proc.m_proc.m_cwd.c_str());
+ if (! (* f_rules[i].func)(props, ctx, f_rules[i].extra))
+ ok = false;
+ }
+ }
+ }
+ }
+
+ if (!ok)
+ {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static
+atrt_process*
+find(atrt_config& config, int type, const char * name)
+{
+ BaseString tmp(name);
+ Vector<BaseString> src;
+ Vector<BaseString> dst;
+ tmp.split(src, ".");
+
+ if (src.size() != 2)
+ {
+ return 0;
+ }
+ atrt_cluster* cluster = 0;
+ BaseString cl;
+ cl.appfmt(".%s", src[1].c_str());
+ for (size_t i = 0; i<config.m_clusters.size(); i++)
+ {
+ if (config.m_clusters[i]->m_name == cl)
+ {
+ cluster = config.m_clusters[i];
+ break;
+ }
+ }
+
+ if (cluster == 0)
+ {
+ return 0;
+ }
+
+ int idx = atoi(src[0].c_str()) - 1;
+ for (size_t i = 0; i<cluster->m_processes.size(); i++)
+ {
+ if (cluster->m_processes[i]->m_type & type)
+ {
+ if (idx == 0)
+ return cluster->m_processes[i];
+ else
+ idx --;
+ }
+ }
+
+ return 0;
+}
+
+static
+bool
+pr_check_replication(Properties& props, proc_rule_ctx& ctx, int)
+{
+ if (! (ctx.m_config->m_replication == ""))
+ {
+ Vector<BaseString> list;
+ ctx.m_config->m_replication.split(list, ";");
+ atrt_config& config = *ctx.m_config;
+
+ ctx.m_config->m_replication = "";
+
+ const char * msg = "Invalid replication specification";
+ for (size_t i = 0; i<list.size(); i++)
+ {
+ Vector<BaseString> rep;
+ list[i].split(rep, ":");
+ if (rep.size() != 2)
+ {
+ g_logger.error("%s: %s (split: %d)", msg, list[i].c_str(), rep.size());
+ return false;
+ }
+
+ atrt_process* src = find(config, atrt_process::AP_MYSQLD,rep[0].c_str());
+ atrt_process* dst = find(config, atrt_process::AP_MYSQLD,rep[1].c_str());
+
+ if (src == 0 || dst == 0)
+ {
+ g_logger.error("%s: %s (%d %d)",
+ msg, list[i].c_str(), src != 0, dst != 0);
+ return false;
+ }
+
+
+ if (dst->m_rep_src != 0)
+ {
+ g_logger.error("%s: %s : %s already has replication src (%s)",
+ msg,
+ list[i].c_str(),
+ dst->m_proc.m_cwd.c_str(),
+ dst->m_rep_src->m_proc.m_cwd.c_str());
+ return false;
+ }
+
+ dst->m_rep_src = src;
+ src->m_rep_dst.push_back(dst);
+
+ src->m_options.m_features |= PO_REP_MASTER;
+ dst->m_options.m_features |= PO_REP_SLAVE;
+ }
+ }
+ return true;
+}
+
+static
+bool
+pr_check_features(Properties& props, proc_rule_ctx& ctx, int)
+{
+ int features = 0;
+ atrt_cluster& cluster = *ctx.m_cluster;
+ for (size_t i = 0; i<cluster.m_processes.size(); i++)
+ {
+ if (cluster.m_processes[i]->m_type == atrt_process::AP_NDB_MGMD ||
+ cluster.m_processes[i]->m_type == atrt_process::AP_NDB_API ||
+ cluster.m_processes[i]->m_type == atrt_process::AP_NDBD)
+ {
+ features |= atrt_options::AO_NDBCLUSTER;
+ break;
+ }
+ }
+
+ if (features)
+ {
+ cluster.m_options.m_features |= features;
+ for (size_t i = 0; i<cluster.m_processes.size(); i++)
+ {
+ cluster.m_processes[i]->m_options.m_features |= features;
+ }
+ }
+ return true;
+}
+
+static
+bool
+pr_fix_client(Properties& props, proc_rule_ctx& ctx, int)
+{
+ for (size_t i = 0; f_options[i].name; i++)
+ {
+ proc_option& opt = f_options[i];
+ const char * name = opt.name;
+ if (opt.type & atrt_process::AP_CLIENT)
+ {
+ const char * val;
+ atrt_process& proc = *ctx.m_process;
+ if (!proc.m_options.m_loaded.get(name, &val))
+ {
+ require(proc.m_mysqld->m_options.m_loaded.get(name, &val));
+ proc.m_options.m_loaded.put(name, val);
+ proc.m_options.m_generated.put(name, val);
+ }
+ }
+ }
+
+ return true;
+}
+
+static
+Uint32
+try_default_port(atrt_process& proc, const char * name)
+{
+ Uint32 port =
+ strcmp(name, "--port=") == 0 ? 3306 :
+ strcmp(name, "--PortNumber=") == 0 ? 1186 :
+ 0;
+
+ atrt_host * host = proc.m_host;
+ for (size_t i = 0; i<host->m_processes.size(); i++)
+ {
+ const char * val;
+ if (host->m_processes[i]->m_options.m_loaded.get(name, &val))
+ {
+ if ((Uint32)atoi(val) == port)
+ return 0;
+ }
+ }
+ return port;
+}
+
+static
+bool
+generate(atrt_process& proc, const char * name, Properties& props)
+{
+ atrt_options& opts = proc.m_options;
+ if (strcmp(name, "--port=") == 0 ||
+ strcmp(name, "--PortNumber=") == 0)
+ {
+ Uint32 val;
+ if (g_default_ports == 0 || (val = try_default_port(proc, name)) == 0)
+ {
+ val = g_baseport;
+ props.get("--PortNumber=", &val);
+ props.put("--PortNumber=", (val + 1), true);
+ }
+
+ char buf[255];
+ snprintf(buf, sizeof(buf), "%u", val);
+ opts.m_loaded.put(name, buf);
+ opts.m_generated.put(name, buf);
+ return true;
+ }
+ else if (strcmp(name, "--datadir=") == 0)
+ {
+ opts.m_loaded.put(name, proc.m_proc.m_cwd.c_str());
+ opts.m_generated.put(name, proc.m_proc.m_cwd.c_str());
+ return true;
+ }
+ else if (strcmp(name, "--FileSystemPath=") == 0)
+ {
+ BaseString dir;
+ dir.append(proc.m_host->m_basedir);
+ dir.append("/");
+ dir.append(proc.m_cluster->m_dir);
+ opts.m_loaded.put(name, dir.c_str());
+ opts.m_generated.put(name, dir.c_str());
+ return true;
+ }
+ else if (strcmp(name, "--socket=") == 0)
+ {
+ const char * sock = 0;
+ if (g_default_ports)
+ {
+ sock = "/tmp/mysql.sock";
+ atrt_host * host = proc.m_host;
+ for (size_t i = 0; i<host->m_processes.size(); i++)
+ {
+ const char * val;
+ if (host->m_processes[i]->m_options.m_loaded.get(name, &val))
+ {
+ if (strcmp(sock, val) == 0)
+ {
+ sock = 0;
+ break;
+ }
+ }
+ }
+ }
+
+ BaseString tmp;
+ if (sock == 0)
+ {
+ tmp.assfmt("%s/mysql.sock", proc.m_proc.m_cwd.c_str());
+ sock = tmp.c_str();
+ }
+
+ opts.m_loaded.put(name, sock);
+ opts.m_generated.put(name, sock);
+ return true;
+ }
+ else if (strcmp(name, "--server-id=") == 0)
+ {
+ Uint32 val = 1;
+ props.get(name, &val);
+ char buf[255];
+ snprintf(buf, sizeof(buf), "%u", val);
+ opts.m_loaded.put(name, buf);
+ opts.m_generated.put(name, buf);
+ props.put(name, (val + 1), true);
+ return true;
+ }
+ else if (strcmp(name, "--log-bin") == 0)
+ {
+ opts.m_loaded.put(name, "");
+ opts.m_generated.put(name, "");
+ return true;
+ }
+ else if (strcmp(name, "--master-host=") == 0)
+ {
+ require(proc.m_rep_src != 0);
+ opts.m_loaded.put(name, proc.m_rep_src->m_host->m_hostname.c_str());
+ opts.m_generated.put(name, proc.m_rep_src->m_host->m_hostname.c_str());
+ return true;
+ }
+ else if (strcmp(name, "--master-port=") == 0)
+ {
+ const char* val;
+ require(proc.m_rep_src->m_options.m_loaded.get("--port=", &val));
+ opts.m_loaded.put(name, val);
+ opts.m_generated.put(name, val);
+ return true;
+ }
+ else if (strcmp(name, "--master-user=") == 0)
+ {
+ opts.m_loaded.put(name, "root");
+ opts.m_generated.put(name, "root");
+ return true;
+ }
+ else if (strcmp(name, "--master-password=") == 0)
+ {
+ opts.m_loaded.put(name, "\"\"");
+ opts.m_generated.put(name, "\"\"");
+ return true;
+ }
+
+ g_logger.warning("Unknown parameter: %s", name);
+ return true;
+}
+
+static
+bool
+pr_proc_options(Properties& props, proc_rule_ctx& ctx, int extra)
+{
+ for (size_t i = 0; f_options[i].name; i++)
+ {
+ proc_option& opt = f_options[i];
+ atrt_process& proc = *ctx.m_process;
+ const char * name = opt.name;
+ if (opt.type & proc.m_type)
+ {
+ if (opt.options == 0 ||
+ (opt.options & extra & proc.m_options.m_features))
+ {
+ const char * val;
+ if (!proc.m_options.m_loaded.get(name, &val))
+ {
+ generate(proc, name, props);
+ }
+ }
+ }
+ }
+ return true;
+}
+
+static
+bool
+pr_fix_ndb_connectstring(Properties& props, proc_rule_ctx& ctx, int)
+{
+ const char * val;
+ atrt_cluster& cluster = *ctx.m_cluster;
+
+ if (cluster.m_options.m_features & atrt_options::AO_NDBCLUSTER)
+ {
+ if (!cluster.m_options.m_loaded.get(ndbcs, &val))
+ {
+ /**
+ * Construct connect string for this cluster
+ */
+ BaseString str;
+ for (size_t i = 0; i<cluster.m_processes.size(); i++)
+ {
+ atrt_process* tmp = cluster.m_processes[i];
+ if (tmp->m_type == atrt_process::AP_NDB_MGMD)
+ {
+ if (str.length())
+ {
+ str.append(";");
+ }
+ const char * port;
+ require(tmp->m_options.m_loaded.get("--PortNumber=", &port));
+ str.appfmt("%s:%s", tmp->m_host->m_hostname.c_str(), port);
+ }
+ }
+ cluster.m_options.m_loaded.put(ndbcs, str.c_str());
+ cluster.m_options.m_generated.put(ndbcs, str.c_str());
+ cluster.m_options.m_loaded.get(ndbcs, &val);
+ }
+
+ for (size_t i = 0; i<cluster.m_processes.size(); i++)
+ {
+ cluster.m_processes[i]->m_proc.m_env.appfmt(" NDB_CONNECTSTRING=%s",
+ val);
+ }
+ }
+ return true;
+}
+
+static
+bool
+pr_set_ndb_connectstring(Properties& props, proc_rule_ctx& ctx, int)
+{
+ const char * val;
+
+ atrt_process& proc = *ctx.m_process;
+ if (proc.m_options.m_features & atrt_options::AO_NDBCLUSTER)
+ {
+ if (!proc.m_options.m_loaded.get(ndbcs, &val))
+ {
+ require(proc.m_cluster->m_options.m_loaded.get(ndbcs, &val));
+ proc.m_options.m_loaded.put(ndbcs, val);
+ proc.m_options.m_generated.put(ndbcs, val);
+ }
+
+ if (!proc.m_options.m_loaded.get("--ndbcluster", &val))
+ {
+ proc.m_options.m_loaded.put("--ndbcluster", "");
+ proc.m_options.m_generated.put("--ndbcluster", "");
+ }
+ }
+ return true;
+}
+
+static
+bool
+pr_check_proc(Properties& props, proc_rule_ctx& ctx, int)
+{
+ bool ok = true;
+ bool generated = false;
+ const int setup = ctx.m_setup;
+ atrt_process& proc = *ctx.m_process;
+ for (size_t i = 0; f_options[i].name; i++)
+ {
+ proc_option& opt = f_options[i];
+ const char * name = opt.name;
+ if ((ctx.m_process->m_type & opt.type) &&
+ (opt.options == 0 || (ctx.m_process->m_options.m_features & opt.options)))
+ {
+ const char * val;
+ if (!proc.m_options.m_loaded.get(name, &val))
+ {
+ ok = false;
+ g_logger.warning("Missing paramter: %s for %s",
+ name, proc.m_proc.m_cwd.c_str());
+ }
+ else if (proc.m_options.m_generated.get(name, &val))
+ {
+ if (setup == 0)
+ {
+ ok = false;
+ g_logger.warning("Missing paramter: %s for %s",
+ name, proc.m_proc.m_cwd.c_str());
+ }
+ else
+ {
+ generated = true;
+ }
+ }
+ }
+ }
+
+ if (generated)
+ {
+ ctx.m_config->m_generated = true;
+ }
+
+ //ndbout << proc << endl;
+
+ return ok;
+}
+
+
+NdbOut&
+operator<<(NdbOut& out, const atrt_process& proc)
+{
+ out << "[ atrt_process: ";
+ switch(proc.m_type){
+ case atrt_process::AP_NDB_MGMD:
+ out << "ndb_mgmd";
+ break;
+ case atrt_process::AP_NDBD:
+ out << "ndbd";
+ break;
+ case atrt_process::AP_MYSQLD:
+ out << "mysqld";
+ break;
+ case atrt_process::AP_NDB_API:
+ out << "ndbapi";
+ break;
+ case atrt_process::AP_CLIENT:
+ out << "client";
+ break;
+ default:
+ out << "<unknown: " << (int)proc.m_type << " >";
+ }
+
+ out << " cluster: " << proc.m_cluster->m_name.c_str()
+ << " host: " << proc.m_host->m_hostname.c_str()
+ << endl << " cwd: " << proc.m_proc.m_cwd.c_str()
+ << endl << " path: " << proc.m_proc.m_path.c_str()
+ << endl << " args: " << proc.m_proc.m_args.c_str()
+ << endl << " env: " << proc.m_proc.m_env.c_str() << endl;
+
+ proc.m_options.m_generated.print(stdout, "generated: ");
+
+ out << " ]";
+
+#if 0
+ proc.m_index = 0; //idx;
+ proc.m_host = host_ptr;
+ proc.m_cluster = cluster;
+ proc.m_proc.m_id = -1;
+ proc.m_proc.m_type = "temporary";
+ proc.m_proc.m_owner = "atrt";
+ proc.m_proc.m_group = cluster->m_name.c_str();
+ proc.m_proc.m_cwd.assign(dir).append("/atrt/").append(cluster->m_dir);
+ proc.m_proc.m_stdout = "log.out";
+ proc.m_proc.m_stderr = "2>&1";
+ proc.m_proc.m_runas = proc.m_host->m_user;
+ proc.m_proc.m_ulimit = "c:unlimited";
+ proc.m_proc.m_env.assfmt("MYSQL_BASE_DIR=%s", dir);
+ proc.m_proc.m_shutdown_options = "";
+#endif
+
+ return out;
+}
+
diff --git a/storage/ndb/test/run-test/test-tests.txt b/storage/ndb/test/run-test/test-tests.txt
new file mode 100644
index 00000000000..b57023fc0c1
--- /dev/null
+++ b/storage/ndb/test/run-test/test-tests.txt
@@ -0,0 +1,24 @@
+max-time: 600
+cmd: testBasic
+args: -n PkRead T1
+
+max-time: 1800
+cmd: testMgm
+args: -n SingleUserMode T1
+
+#
+#
+# SYSTEM RESTARTS
+#
+max-time: 1500
+cmd: testSystemRestart
+args: -n SR3 T6
+
+max-time: 1500
+cmd: testSystemRestart
+args: -n SR4 T6
+
+max-time: 600
+cmd: testBasic
+args: -n PkRead T1
+
diff --git a/storage/ndb/test/src/HugoOperations.cpp b/storage/ndb/test/src/HugoOperations.cpp
index 188e7a9288e..9a286a71b91 100644
--- a/storage/ndb/test/src/HugoOperations.cpp
+++ b/storage/ndb/test/src/HugoOperations.cpp
@@ -330,8 +330,8 @@ int HugoOperations::execute_Commit(Ndb* pNdb,
int check = 0;
check = pTrans->execute(Commit, eao);
- if( check == -1 ) {
- const NdbError err = pTrans->getNdbError();
+ const NdbError err = pTrans->getNdbError();
+ if( check == -1 || err.code) {
ERR(err);
NdbOperation* pOp = pTrans->getNdbErrorOperation();
if (pOp != NULL){
@@ -379,13 +379,16 @@ int HugoOperations::execute_NoCommit(Ndb* pNdb, AbortOption eao){
int check;
check = pTrans->execute(NoCommit, eao);
- if( check == -1 ) {
- const NdbError err = pTrans->getNdbError();
+ const NdbError err = pTrans->getNdbError();
+ if( check == -1 || err.code) {
ERR(err);
- NdbOperation* pOp;
- while ((pOp = pTrans->getNdbErrorOperation()) != NULL){
+ const NdbOperation* pOp = pTrans->getNdbErrorOperation();
+ while (pOp != NULL)
+ {
const NdbError err2 = pOp->getNdbError();
- ERR(err2);
+ if (err2.code)
+ ERR(err2);
+ pOp = pTrans->getNextCompletedOperation(pOp);
}
if (err.code == 0)
return NDBT_FAILED;
diff --git a/storage/ndb/test/src/NdbRestarter.cpp b/storage/ndb/test/src/NdbRestarter.cpp
index 1cfbb56f84d..299517b32d3 100644
--- a/storage/ndb/test/src/NdbRestarter.cpp
+++ b/storage/ndb/test/src/NdbRestarter.cpp
@@ -128,6 +128,68 @@ NdbRestarter::getMasterNodeId(){
}
int
+NdbRestarter::getNodeGroup(int nodeId){
+ if (!isConnected())
+ return -1;
+
+ if (getStatus() != 0)
+ return -1;
+
+ for(size_t i = 0; i < ndbNodes.size(); i++)
+ {
+ if(ndbNodes[i].node_id == nodeId)
+ {
+ return ndbNodes[i].node_group;
+ }
+ }
+
+ return -1;
+}
+
+int
+NdbRestarter::getNextMasterNodeId(int nodeId){
+ if (!isConnected())
+ return -1;
+
+ if (getStatus() != 0)
+ return -1;
+
+ size_t i;
+ for(i = 0; i < ndbNodes.size(); i++)
+ {
+ if(ndbNodes[i].node_id == nodeId)
+ {
+ break;
+ }
+ }
+ assert(i < ndbNodes.size());
+ if (i == ndbNodes.size())
+ return -1;
+
+ int dynid = ndbNodes[i].dynamic_id;
+ int minid = dynid;
+ for (i = 0; i<ndbNodes.size(); i++)
+ if (ndbNodes[i].dynamic_id > minid)
+ minid = ndbNodes[i].dynamic_id;
+
+ for (i = 0; i<ndbNodes.size(); i++)
+ if (ndbNodes[i].dynamic_id > dynid &&
+ ndbNodes[i].dynamic_id < minid)
+ {
+ minid = ndbNodes[i].dynamic_id;
+ }
+
+ if (minid != ~0)
+ {
+ for (i = 0; i<ndbNodes.size(); i++)
+ if (ndbNodes[i].dynamic_id == minid)
+ return ndbNodes[i].node_id;
+ }
+
+ return getMasterNodeId();
+}
+
+int
NdbRestarter::getRandomNotMasterNodeId(int rand){
int master = getMasterNodeId();
if(master == -1)
diff --git a/storage/ndb/test/tools/Makefile.am b/storage/ndb/test/tools/Makefile.am
index 8c451c0b6a1..386a59f723f 100644
--- a/storage/ndb/test/tools/Makefile.am
+++ b/storage/ndb/test/tools/Makefile.am
@@ -38,6 +38,7 @@ include $(top_srcdir)/storage/ndb/config/common.mk.am
include $(top_srcdir)/storage/ndb/config/type_ndbapitest.mk.am
ndb_cpcc_LDADD = $(LDADD)
+ndb_cpcc_LDFLAGS = -static
# Don't update the files from bitkeeper
%::SCCS/s.%
diff --git a/storage/ndb/tools/Makefile.am b/storage/ndb/tools/Makefile.am
index 3d0c6f79146..7480b9a2ae9 100644
--- a/storage/ndb/tools/Makefile.am
+++ b/storage/ndb/tools/Makefile.am
@@ -50,7 +50,7 @@ ndb_restore_SOURCES = restore/restore_main.cpp \
restore/Restore.cpp \
../test/src/NDBT_ResultRow.cpp $(tools_common_sources)
-ndb_config_SOURCES = ndb_condig.cpp \
+ndb_config_SOURCES = ndb_config.cpp \
../src/mgmsrv/Config.cpp \
../src/mgmsrv/ConfigInfo.cpp \
../src/mgmsrv/InitConfigFileParser.cpp
diff --git a/storage/ndb/tools/ndb_condig.cpp b/storage/ndb/tools/ndb_config.cpp
index 31fc59a8b83..31fc59a8b83 100644
--- a/storage/ndb/tools/ndb_condig.cpp
+++ b/storage/ndb/tools/ndb_config.cpp
diff --git a/storage/ndb/tools/restore/consumer_restore.cpp b/storage/ndb/tools/restore/consumer_restore.cpp
index e276eb7d6f9..0e1c3f506f8 100644
--- a/storage/ndb/tools/restore/consumer_restore.cpp
+++ b/storage/ndb/tools/restore/consumer_restore.cpp
@@ -182,6 +182,7 @@ BackupRestore::finalize_table(const TableS & table){
}
+#ifdef NOT_USED
static bool default_nodegroups(NdbDictionary::Table *table)
{
Uint16 *node_groups = (Uint16*)table->getFragmentData();
@@ -197,6 +198,7 @@ static bool default_nodegroups(NdbDictionary::Table *table)
}
return true;
}
+#endif
static Uint32 get_no_fragments(Uint64 max_rows, Uint32 no_nodes)
@@ -424,7 +426,7 @@ bool BackupRestore::translate_frm(NdbDictionary::Table *table)
{
const void *pack_data, *data, *new_pack_data;
char *new_data;
- uint data_len, pack_len, new_data_len, new_pack_len;
+ uint data_len, new_data_len, new_pack_len;
uint no_parts, extra_growth;
DBUG_ENTER("translate_frm");
@@ -1181,6 +1183,7 @@ BackupRestore::endOfTuples()
tuple_free();
}
+#ifdef NOT_USED
static bool use_part_id(const NdbDictionary::Table *table)
{
if (table->getDefaultNoPartitionsFlag() &&
@@ -1189,6 +1192,7 @@ static bool use_part_id(const NdbDictionary::Table *table)
else
return true;
}
+#endif
static Uint32 get_part_id(const NdbDictionary::Table *table,
Uint32 hash_value)