summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--mysql-test/ndb/ndbcluster.sh11
-rw-r--r--mysql-test/r/ndb_basic.result2
-rw-r--r--mysql-test/r/ndb_blob.result4
-rw-r--r--sql/ha_ndbcluster.cc13
-rw-r--r--sql/item_func.cc2
-rw-r--r--sql/mysqld.cc2
-rw-r--r--storage/archive/archive_reader.c1
-rw-r--r--storage/ndb/include/kernel/GlobalSignalNumbers.h10
-rw-r--r--storage/ndb/include/kernel/signaldata/CreateFilegroup.hpp2
-rw-r--r--storage/ndb/include/kernel/signaldata/DumpStateOrd.hpp5
-rw-r--r--storage/ndb/include/mgmapi/mgmapi.h1
-rw-r--r--storage/ndb/include/ndbapi/NdbOperation.hpp21
-rw-r--r--storage/ndb/include/util/OutputStream.hpp4
-rw-r--r--storage/ndb/ndbapi-examples/ndbapi_simple_index/ndbapi_simple_index.cpp17
-rw-r--r--storage/ndb/src/common/debugger/EventLogger.cpp12
-rw-r--r--storage/ndb/src/common/debugger/signaldata/SignalNames.cpp7
-rw-r--r--storage/ndb/src/common/util/OutputStream.cpp8
-rw-r--r--storage/ndb/src/cw/cpcd/Makefile.am2
-rw-r--r--storage/ndb/src/kernel/blocks/ERROR_codes.txt4
-rw-r--r--storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp196
-rw-r--r--storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.hpp6
-rw-r--r--storage/ndb/src/kernel/blocks/dbacc/Dbacc.hpp1
-rw-r--r--storage/ndb/src/kernel/blocks/dbacc/DbaccInit.cpp1
-rw-r--r--storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp27
-rw-r--r--storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp2
-rw-r--r--storage/ndb/src/kernel/blocks/dbdih/DbdihInit.cpp2
-rw-r--r--storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp201
-rw-r--r--storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp1
-rw-r--r--storage/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp1
-rw-r--r--storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp46
-rw-r--r--storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp1
-rw-r--r--storage/ndb/src/kernel/blocks/dbtc/DbtcInit.cpp1
-rw-r--r--storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp30
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp1
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupAbort.cpp4
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp2
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp8
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp28
-rw-r--r--storage/ndb/src/kernel/blocks/ndbcntr/Ndbcntr.hpp1
-rw-r--r--storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrInit.cpp1
-rw-r--r--storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp19
-rw-r--r--storage/ndb/src/kernel/blocks/qmgr/Qmgr.hpp1
-rw-r--r--storage/ndb/src/kernel/blocks/qmgr/QmgrInit.cpp1
-rw-r--r--storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp28
-rw-r--r--storage/ndb/src/kernel/vm/WatchDog.cpp7
-rw-r--r--storage/ndb/src/mgmapi/mgmapi.cpp37
-rw-r--r--storage/ndb/src/mgmsrv/InitConfigFileParser.cpp3
-rw-r--r--storage/ndb/src/mgmsrv/MgmtSrvr.cpp94
-rw-r--r--storage/ndb/src/mgmsrv/MgmtSrvr.hpp1
-rw-r--r--storage/ndb/src/mgmsrv/Services.cpp94
-rw-r--r--storage/ndb/src/mgmsrv/Services.hpp9
-rw-r--r--storage/ndb/src/ndbapi/ClusterMgr.cpp5
-rw-r--r--storage/ndb/src/ndbapi/ClusterMgr.hpp1
-rw-r--r--storage/ndb/src/ndbapi/Makefile.am9
-rw-r--r--storage/ndb/src/ndbapi/NdbOperationDefine.cpp20
-rw-r--r--storage/ndb/src/ndbapi/ObjectMap.hpp13
-rw-r--r--storage/ndb/src/ndbapi/SignalSender.hpp2
-rw-r--r--storage/ndb/src/ndbapi/TransporterFacade.cpp3
-rw-r--r--storage/ndb/src/ndbapi/ndberror.c2
-rw-r--r--storage/ndb/src/ndbapi/ndberror_check.c38
-rw-r--r--storage/ndb/test/include/NdbRestarter.hpp2
-rw-r--r--storage/ndb/test/ndbapi/testBitfield.cpp16
-rw-r--r--storage/ndb/test/ndbapi/testDict.cpp6
-rw-r--r--storage/ndb/test/ndbapi/testNodeRestart.cpp85
-rw-r--r--storage/ndb/test/run-test/Makefile.am22
-rwxr-xr-xstorage/ndb/test/run-test/atrt-gather-result.sh2
-rw-r--r--storage/ndb/test/run-test/atrt.hpp161
-rw-r--r--storage/ndb/test/run-test/autotest-boot.sh165
-rw-r--r--storage/ndb/test/run-test/autotest-run.sh270
-rw-r--r--storage/ndb/test/run-test/conf-daily-basic-ndb08.txt19
-rw-r--r--storage/ndb/test/run-test/conf-daily-devel-ndb08.txt19
-rw-r--r--storage/ndb/test/run-test/conf-daily-devel-ndbmaster.txt22
-rw-r--r--storage/ndb/test/run-test/conf-daily-sql-ndb08.txt20
-rw-r--r--storage/ndb/test/run-test/conf-daily-sql-ndbmaster.txt20
-rw-r--r--storage/ndb/test/run-test/conf-dl145a.cnf23
-rw-r--r--storage/ndb/test/run-test/conf-dl145a.txt22
-rw-r--r--storage/ndb/test/run-test/conf-ndbmaster.cnf23
-rw-r--r--storage/ndb/test/run-test/conf-ndbmaster.txt22
-rw-r--r--storage/ndb/test/run-test/conf-repl.cnf28
-rw-r--r--storage/ndb/test/run-test/conf-shark.txt22
-rw-r--r--storage/ndb/test/run-test/daily-basic-tests.txt10
-rw-r--r--storage/ndb/test/run-test/example-my.cnf116
-rw-r--r--storage/ndb/test/run-test/files.cpp383
-rw-r--r--storage/ndb/test/run-test/main.cpp1101
-rw-r--r--storage/ndb/test/run-test/run-test.hpp95
-rw-r--r--storage/ndb/test/run-test/setup.cpp965
-rw-r--r--storage/ndb/test/run-test/test-tests.txt24
-rw-r--r--storage/ndb/test/src/HugoOperations.cpp17
-rw-r--r--storage/ndb/test/src/NdbRestarter.cpp62
-rw-r--r--storage/ndb/test/tools/Makefile.am1
-rw-r--r--storage/ndb/tools/Makefile.am2
-rw-r--r--storage/ndb/tools/ndb_config.cpp (renamed from storage/ndb/tools/ndb_condig.cpp)0
92 files changed, 3443 insertions, 1378 deletions
diff --git a/mysql-test/ndb/ndbcluster.sh b/mysql-test/ndb/ndbcluster.sh
index e7fbf1ca8c2..1e25cd8047e 100644
--- a/mysql-test/ndb/ndbcluster.sh
+++ b/mysql-test/ndb/ndbcluster.sh
@@ -35,7 +35,8 @@ if [ -d ../sql ] ; then
exec_mgmtsrvr=$ndbtop/src/mgmsrv/ndb_mgmd
exec_waiter=$ndbtop/tools/ndb_waiter
exec_test=$ndbtop/tools/ndb_test_platform
- exec_mgmtclient=$ndbtop/src/mgmclient/ndb_mgm
+ exec_test_ndberror=
+ exec_test_ndberror=$ndbtop/src/ndbapi/ndberror_check
else
BINARY_DIST=1
if test -x "$BASEDIR/libexec/ndbd"
@@ -48,6 +49,7 @@ else
fi
exec_waiter=$BASEDIR/bin/ndb_waiter
exec_test=$BASEDIR/bin/ndb_test_platform
+ exec_test_ndberror=
exec_mgmtclient=$BASEDIR/bin/ndb_mgm
fi
@@ -56,6 +58,13 @@ if $exec_test ; then :; else
exit 1
fi
+if [ $exec_test_ndberror ] ; then
+if $exec_test_ndberror ; then :; else
+ echo "please fix in ndberror.c"
+ exit 1
+fi
+fi
+
pidfile=ndbcluster.pid
cfgfile=Ndb.cfg
test_ndb=
diff --git a/mysql-test/r/ndb_basic.result b/mysql-test/r/ndb_basic.result
index b4a55641e80..7717e9fe72a 100644
--- a/mysql-test/r/ndb_basic.result
+++ b/mysql-test/r/ndb_basic.result
@@ -568,7 +568,7 @@ t1
insert into t1 values (1,1),(2,1),(3,1),(4,1),(5,2),(6,1),(7,1);
explain select * from t1 where a12345678901234567890123456789a1234567890=2;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ref a12345678901234567890123456789a1234567890 a12345678901234567890123456789a1234567890 5 const # Using where
+1 SIMPLE t1 ref a12345678901234567890123456789a1234567890 a12345678901234567890123456789a1234567890 5 const # Using where with pushed condition
select * from t1 where a12345678901234567890123456789a1234567890=2;
a1234567890123456789012345678901234567890 a12345678901234567890123456789a1234567890
5 2
diff --git a/mysql-test/r/ndb_blob.result b/mysql-test/r/ndb_blob.result
index 04f2cea6250..ad33c7994d1 100644
--- a/mysql-test/r/ndb_blob.result
+++ b/mysql-test/r/ndb_blob.result
@@ -242,7 +242,7 @@ insert into t1 values(9,'b9',999,'dd9');
commit;
explain select * from t1 where c >= 100 order by a;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range c c 4 NULL # Using where; Using filesort
+1 SIMPLE t1 range c c 4 NULL # Using where with pushed condition; Using filesort
select * from t1 where c >= 100 order by a;
a b c d
1 b1 111 dd1
@@ -278,7 +278,7 @@ insert into t1 values(2,@b2,222,@d2);
commit;
explain select * from t1 where c >= 100 order by a;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range c c 4 NULL # Using where; Using filesort
+1 SIMPLE t1 range c c 4 NULL # Using where with pushed condition; Using filesort
select a,length(b),substr(b,1+2*900,2),length(d),substr(d,1+3*900,3)
from t1 where c >= 100 order by a;
a length(b) substr(b,1+2*900,2) length(d) substr(d,1+3*900,3)
diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc
index a65ca1c6736..f9351358df8 100644
--- a/sql/ha_ndbcluster.cc
+++ b/sql/ha_ndbcluster.cc
@@ -259,17 +259,16 @@ static int ndb_to_mysql_error(const NdbError *ndberr)
int execute_no_commit_ignore_no_key(ha_ndbcluster *h, NdbTransaction *trans)
{
- int res= trans->execute(NdbTransaction::NoCommit,
- NdbOperation::AO_IgnoreError,
- h->m_force_send);
- if (res == -1)
+ if (trans->execute(NdbTransaction::NoCommit,
+ NdbOperation::AO_IgnoreError,
+ h->m_force_send) == -1)
return -1;
const NdbError &err= trans->getNdbError();
if (err.classification != NdbError::NoError &&
err.classification != NdbError::ConstraintViolation &&
err.classification != NdbError::NoDataFound)
- return res;
+ return -1;
return 0;
}
@@ -3811,7 +3810,7 @@ int ha_ndbcluster::info(uint flag)
if (flag & HA_STATUS_AUTO)
{
DBUG_PRINT("info", ("HA_STATUS_AUTO"));
- if (m_table)
+ if (m_table && table->found_next_number_field)
{
Ndb *ndb= get_ndb();
Ndb_tuple_id_range_guard g(m_share);
@@ -9052,7 +9051,7 @@ void ndb_serialize_cond(const Item *item, void *arg)
Check that the field is part of the table of the handler
instance and that we expect a field with of this result type.
*/
- if (context->table == field->table)
+ if (context->table->s == field->table->s)
{
const NDBTAB *tab= (const NDBTAB *) context->ndb_table;
DBUG_PRINT("info", ("FIELD_ITEM"));
diff --git a/sql/item_func.cc b/sql/item_func.cc
index 82e6196183b..cd0229e0548 100644
--- a/sql/item_func.cc
+++ b/sql/item_func.cc
@@ -232,6 +232,8 @@ void Item_func::traverse_cond(Cond_traverser traverser,
(*traverser)(this, argument);
}
}
+ else
+ (*traverser)(this, argument);
}
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index d9fab73a23c..172891b7c8e 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -5072,7 +5072,7 @@ struct my_option my_long_options[] =
"Push supported query conditions to the storage engine.",
(gptr*) &global_system_variables.engine_condition_pushdown,
(gptr*) &global_system_variables.engine_condition_pushdown,
- 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
+ 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0},
/* See how it's handled in get_one_option() */
{"event-scheduler", OPT_EVENT_SCHEDULER, "Enable/disable the event scheduler.",
NULL, NULL, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0},
diff --git a/storage/archive/archive_reader.c b/storage/archive/archive_reader.c
index 66b90130882..14018217dea 100644
--- a/storage/archive/archive_reader.c
+++ b/storage/archive/archive_reader.c
@@ -4,6 +4,7 @@
#include <stdio.h>
#include <stdarg.h>
#include <m_ctype.h>
+#include <m_string.h>
#include <my_getopt.h>
#include <mysql_version.h>
diff --git a/storage/ndb/include/kernel/GlobalSignalNumbers.h b/storage/ndb/include/kernel/GlobalSignalNumbers.h
index fcbdedc44cc..aa0596f102a 100644
--- a/storage/ndb/include/kernel/GlobalSignalNumbers.h
+++ b/storage/ndb/include/kernel/GlobalSignalNumbers.h
@@ -551,13 +551,13 @@ extern const GlobalSignalNumber NO_OF_SIGNAL_NAMES;
#define GSN_ABORT_ALL_REF 446
#define GSN_ABORT_ALL_CONF 447
-#define GSN_STATISTICS_REQ 448
+/* 448 unused - formerly GSN_STATISTICS_REQ */
#define GSN_STOP_ORD 449
#define GSN_TAMPER_ORD 450
-#define GSN_SET_VAR_REQ 451
-#define GSN_SET_VAR_CONF 452
-#define GSN_SET_VAR_REF 453
-#define GSN_STATISTICS_CONF 454
+/* 451 unused - formerly GSN_SET_VAR_REQ */
+/* 452 unused - formerly GSN_SET_VAR_CONF */
+/* 453 unused - formerly GSN_SET_VAR_REF */
+/* 454 unused - formerly GSN_STATISTICS_CONF */
#define GSN_START_ORD 455
/* 457 unused */
diff --git a/storage/ndb/include/kernel/signaldata/CreateFilegroup.hpp b/storage/ndb/include/kernel/signaldata/CreateFilegroup.hpp
index d40f3f7d8cb..b1261431a4e 100644
--- a/storage/ndb/include/kernel/signaldata/CreateFilegroup.hpp
+++ b/storage/ndb/include/kernel/signaldata/CreateFilegroup.hpp
@@ -64,7 +64,7 @@ struct CreateFilegroupRef {
InvalidFormat = 740,
OutOfFilegroupRecords = 765,
InvalidExtentSize = 764,
- InvalidUndoBufferSize = 763,
+ InvalidUndoBufferSize = 779,
NoSuchLogfileGroup = 767,
InvalidFilegroupVersion = 768
};
diff --git a/storage/ndb/include/kernel/signaldata/DumpStateOrd.hpp b/storage/ndb/include/kernel/signaldata/DumpStateOrd.hpp
index 27bb9af03c0..46c5ef3751b 100644
--- a/storage/ndb/include/kernel/signaldata/DumpStateOrd.hpp
+++ b/storage/ndb/include/kernel/signaldata/DumpStateOrd.hpp
@@ -107,7 +107,10 @@ public:
CmvmiDumpLongSignalMemory = 2601,
CmvmiSetRestartOnErrorInsert = 2602,
CmvmiTestLongSigWithDelay = 2603,
-
+ CmvmiDumpSubscriptions = 2604, /* note: done to respective outfile
+ to be able to debug if events
+ for some reason does not end up
+ in clusterlog */
LCPContinue = 5900,
// 7000 DIH
// 7001 DIH
diff --git a/storage/ndb/include/mgmapi/mgmapi.h b/storage/ndb/include/mgmapi/mgmapi.h
index 42a6b53098f..883d3c43699 100644
--- a/storage/ndb/include/mgmapi/mgmapi.h
+++ b/storage/ndb/include/mgmapi/mgmapi.h
@@ -20,6 +20,7 @@
#include "ndb_logevent.h"
#define MGM_LOGLEVELS CFG_MAX_LOGLEVEL - CFG_MIN_LOGLEVEL + 1
+#define NDB_MGM_MAX_LOGLEVEL 15
/**
* @mainpage MySQL Cluster Management API
diff --git a/storage/ndb/include/ndbapi/NdbOperation.hpp b/storage/ndb/include/ndbapi/NdbOperation.hpp
index 90b90c7e481..380926c6a41 100644
--- a/storage/ndb/include/ndbapi/NdbOperation.hpp
+++ b/storage/ndb/include/ndbapi/NdbOperation.hpp
@@ -98,7 +98,20 @@ public:
};
/**
- * How should transaction be handled if operation fails
+ * How should transaction be handled if operation fails.
+ *
+ * If AO_IgnoreError, a failure in one operation will not abort the
+ * transaction, and NdbTransaction::execute() will return 0 (success). Use
+ * NdbOperation::getNdbError() to check for errors from individual
+ * operations.
+ *
+ * If AbortOnError, a failure in one operation will abort the transaction
+ * and cause NdbTransaction::execute() to return -1.
+ *
+ * Abort option can be set on execute(), or in the individual operation.
+ * Setting AO_IgnoreError or AbortOnError in execute() overrides the settings
+ * on individual operations. Setting DefaultAbortOption in execute() (the
+ * default) causes individual operation settings to be used.
*
* For READ, default is AO_IgnoreError
* DML, default is AbortOnError
@@ -1019,10 +1032,8 @@ protected:
NdbBlob* theBlobList;
/*
- * Abort option per operation, used by blobs. Default -1. If set,
- * overrides abort option on connection level. If set to IgnoreError,
- * does not cause execute() to return failure. This is different from
- * IgnoreError on connection level.
+ * Abort option per operation, used by blobs.
+ * See also comments on enum AbortOption.
*/
Int8 m_abortOption;
diff --git a/storage/ndb/include/util/OutputStream.hpp b/storage/ndb/include/util/OutputStream.hpp
index cbc00fb286a..d56d04adc50 100644
--- a/storage/ndb/include/util/OutputStream.hpp
+++ b/storage/ndb/include/util/OutputStream.hpp
@@ -44,9 +44,9 @@ public:
class SocketOutputStream : public OutputStream {
NDB_SOCKET_TYPE m_socket;
- unsigned m_timeout;
+ unsigned m_timeout_ms;
public:
- SocketOutputStream(NDB_SOCKET_TYPE socket, unsigned writeTimeout = 1000);
+ SocketOutputStream(NDB_SOCKET_TYPE socket, unsigned write_timeout_ms = 1000);
virtual ~SocketOutputStream() {}
int print(const char * fmt, ...);
diff --git a/storage/ndb/ndbapi-examples/ndbapi_simple_index/ndbapi_simple_index.cpp b/storage/ndb/ndbapi-examples/ndbapi_simple_index/ndbapi_simple_index.cpp
index dae99642a24..440face79ae 100644
--- a/storage/ndb/ndbapi-examples/ndbapi_simple_index/ndbapi_simple_index.cpp
+++ b/storage/ndb/ndbapi-examples/ndbapi_simple_index/ndbapi_simple_index.cpp
@@ -19,6 +19,17 @@
// Correct output from this program is:
//
// ATTR1 ATTR2
+// 0 0
+// 1 1
+// 2 2
+// 3 3
+// 4 4
+// 5 5
+// 6 6
+// 7 7
+// 8 8
+// 9 9
+// ATTR1 ATTR2
// 0 10
// 1 1
// 2 12
@@ -166,7 +177,8 @@ int main(int argc, char** argv)
NdbRecAttr *myRecAttr= myIndexOperation->getValue("ATTR1", NULL);
if (myRecAttr == NULL) APIERROR(myTransaction->getNdbError());
- if(myTransaction->execute( NdbTransaction::Commit ) != -1)
+ if(myTransaction->execute( NdbTransaction::Commit,
+ NdbOperation::AbortOnError ) != -1)
printf(" %2d %2d\n", myRecAttr->u_32_value(), i);
myNdb->closeTransaction(myTransaction);
@@ -232,7 +244,8 @@ int main(int argc, char** argv)
NdbRecAttr *myRecAttr= myOperation->getValue("ATTR2", NULL);
if (myRecAttr == NULL) APIERROR(myTransaction->getNdbError());
- if(myTransaction->execute( NdbTransaction::Commit ) == -1)
+ if(myTransaction->execute( NdbTransaction::Commit,
+ NdbOperation::AbortOnError ) == -1)
if (i == 3) {
std::cout << "Detected that deleted tuple doesn't exist!\n";
} else {
diff --git a/storage/ndb/src/common/debugger/EventLogger.cpp b/storage/ndb/src/common/debugger/EventLogger.cpp
index 4c0c4c44344..e9eb106ac2e 100644
--- a/storage/ndb/src/common/debugger/EventLogger.cpp
+++ b/storage/ndb/src/common/debugger/EventLogger.cpp
@@ -530,8 +530,8 @@ void getTextUndoLogBlocked(QQQQ) {
void getTextTransporterError(QQQQ) {
struct myTransporterError{
- int errorNum;
- char errorString[256];
+ Uint32 errorNum;
+ char errorString[256];
};
int i = 0;
int lenth = 0;
@@ -611,16 +611,18 @@ void getTextTransporterError(QQQQ) {
if(theData[2] == TransporterErrorString[i].errorNum)
{
BaseString::snprintf(m_text, m_text_len,
- "Transporter to node %d reported error: %s",
+ "Transporter to node %d reported error 0x%x: %s",
theData[1],
+ theData[2],
TransporterErrorString[i].errorString);
break;
}
}
if(i == lenth)
BaseString::snprintf(m_text, m_text_len,
- "Transporter to node %d reported error: no such error",
- theData[1]);
+ "Transporter to node %d reported error 0x%x: unknown error",
+ theData[1],
+ theData[2]);
}
void getTextTransporterWarning(QQQQ) {
getTextTransporterError(m_text, m_text_len, theData);
diff --git a/storage/ndb/src/common/debugger/signaldata/SignalNames.cpp b/storage/ndb/src/common/debugger/signaldata/SignalNames.cpp
index 8530187963d..884a49b3a94 100644
--- a/storage/ndb/src/common/debugger/signaldata/SignalNames.cpp
+++ b/storage/ndb/src/common/debugger/signaldata/SignalNames.cpp
@@ -350,15 +350,10 @@ const GsnName SignalNames [] = {
,{ GSN_TUP_WRITELOG_REQ, "TUP_WRITELOG_REQ" }
,{ GSN_LQH_WRITELOG_REQ, "LQH_WRITELOG_REQ" }
- ,{ GSN_STATISTICS_REQ, "STATISTICS_REQ" }
,{ GSN_START_ORD, "START_ORD" }
,{ GSN_STOP_ORD, "STOP_ORD" }
,{ GSN_TAMPER_ORD, "TAMPER_ORD" }
- ,{ GSN_SET_VAR_REQ, "SET_VAR_REQ" }
- ,{ GSN_SET_VAR_CONF, "SET_VAR_CONF" }
- ,{ GSN_SET_VAR_REF, "SET_VAR_REF" }
- ,{ GSN_STATISTICS_CONF, "STATISTICS_CONF" }
-
+
,{ GSN_EVENT_SUBSCRIBE_REQ, "EVENT_SUBSCRIBE_REQ" }
,{ GSN_EVENT_SUBSCRIBE_CONF, "EVENT_SUBSCRIBE_CONF" }
,{ GSN_EVENT_SUBSCRIBE_REF, "EVENT_SUBSCRIBE_REF" }
diff --git a/storage/ndb/src/common/util/OutputStream.cpp b/storage/ndb/src/common/util/OutputStream.cpp
index 322b270d1cf..99216ba5a28 100644
--- a/storage/ndb/src/common/util/OutputStream.cpp
+++ b/storage/ndb/src/common/util/OutputStream.cpp
@@ -42,16 +42,16 @@ FileOutputStream::println(const char * fmt, ...){
}
SocketOutputStream::SocketOutputStream(NDB_SOCKET_TYPE socket,
- unsigned timeout){
+ unsigned write_timeout_ms){
m_socket = socket;
- m_timeout = timeout;
+ m_timeout_ms = write_timeout_ms;
}
int
SocketOutputStream::print(const char * fmt, ...){
va_list ap;
va_start(ap, fmt);
- const int ret = vprint_socket(m_socket, m_timeout, fmt, ap);
+ const int ret = vprint_socket(m_socket, m_timeout_ms, fmt, ap);
va_end(ap);
return ret;
}
@@ -59,7 +59,7 @@ int
SocketOutputStream::println(const char * fmt, ...){
va_list ap;
va_start(ap, fmt);
- const int ret = vprintln_socket(m_socket, m_timeout, fmt, ap);
+ const int ret = vprintln_socket(m_socket, m_timeout_ms, fmt, ap);
va_end(ap);
return ret;
}
diff --git a/storage/ndb/src/cw/cpcd/Makefile.am b/storage/ndb/src/cw/cpcd/Makefile.am
index dfd2e8c270b..efc828e21a9 100644
--- a/storage/ndb/src/cw/cpcd/Makefile.am
+++ b/storage/ndb/src/cw/cpcd/Makefile.am
@@ -26,7 +26,7 @@ LDADD_LOC = \
include $(top_srcdir)/storage/ndb/config/common.mk.am
include $(top_srcdir)/storage/ndb/config/type_util.mk.am
-ndb_cpcd_LDFLAGS = @ndb_bin_am_ldflags@
+ndb_cpcd_LDFLAGS = -static @ndb_bin_am_ldflags@
# Don't update the files from bitkeeper
%::SCCS/s.%
diff --git a/storage/ndb/src/kernel/blocks/ERROR_codes.txt b/storage/ndb/src/kernel/blocks/ERROR_codes.txt
index 2fc28c8ac07..c91a2da15d1 100644
--- a/storage/ndb/src/kernel/blocks/ERROR_codes.txt
+++ b/storage/ndb/src/kernel/blocks/ERROR_codes.txt
@@ -5,7 +5,7 @@ Next DBACC 3002
Next DBTUP 4024
Next DBLQH 5045
Next DBDICT 6007
-Next DBDIH 7178
+Next DBDIH 7181
Next DBTC 8039
Next CMVMI 9000
Next BACKUP 10038
@@ -73,6 +73,8 @@ Delay GCP_SAVEREQ by 10 secs
7177: Delay copying of sysfileData in execCOPY_GCIREQ
+7180: Crash master during master-take-over in execMASTER_LCPCONF
+
ERROR CODES FOR TESTING NODE FAILURE, LOCAL CHECKPOINT HANDLING:
-----------------------------------------------------------------
diff --git a/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp b/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp
index ddf0dc95098..3fe85de73e6 100644
--- a/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp
+++ b/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp
@@ -78,11 +78,7 @@ Cmvmi::Cmvmi(Block_context& ctx) :
addRecSignal(GSN_OPEN_COMREQ, &Cmvmi::execOPEN_COMREQ);
addRecSignal(GSN_TEST_ORD, &Cmvmi::execTEST_ORD);
- addRecSignal(GSN_STATISTICS_REQ, &Cmvmi::execSTATISTICS_REQ);
addRecSignal(GSN_TAMPER_ORD, &Cmvmi::execTAMPER_ORD);
- addRecSignal(GSN_SET_VAR_REQ, &Cmvmi::execSET_VAR_REQ);
- addRecSignal(GSN_SET_VAR_CONF, &Cmvmi::execSET_VAR_CONF);
- addRecSignal(GSN_SET_VAR_REF, &Cmvmi::execSET_VAR_REF);
addRecSignal(GSN_STOP_ORD, &Cmvmi::execSTOP_ORD);
addRecSignal(GSN_START_ORD, &Cmvmi::execSTART_ORD);
addRecSignal(GSN_EVENT_SUBSCRIBE_REQ,
@@ -727,24 +723,6 @@ Cmvmi::execTEST_ORD(Signal * signal){
#endif
}
-void Cmvmi::execSTATISTICS_REQ(Signal* signal)
-{
- // TODO Note ! This is only a test implementation...
-
- static int stat1 = 0;
- jamEntry();
-
- //ndbout << "data 1: " << signal->theData[1];
-
- int x = signal->theData[0];
- stat1++;
- signal->theData[0] = stat1;
- sendSignal(x, GSN_STATISTICS_CONF, signal, 7, JBB);
-
-}//execSTATISTICS_REQ()
-
-
-
void Cmvmi::execSTOP_ORD(Signal* signal)
{
jamEntry();
@@ -863,7 +841,7 @@ void Cmvmi::execTAMPER_ORD(Signal* signal)
// to be able to indicate if we really introduced an error.
#ifdef ERROR_INSERT
TamperOrd* const tamperOrd = (TamperOrd*)&signal->theData[0];
-
+ signal->theData[2] = 0;
signal->theData[1] = tamperOrd->errorNo;
signal->theData[0] = 5;
sendSignal(DBDIH_REF, GSN_DIHNDBTAMPER, signal, 3,JBB);
@@ -871,160 +849,6 @@ void Cmvmi::execTAMPER_ORD(Signal* signal)
}//execTAMPER_ORD()
-
-
-void Cmvmi::execSET_VAR_REQ(Signal* signal)
-{
-#if 0
-
- SetVarReq* const setVarReq = (SetVarReq*)&signal->theData[0];
- ConfigParamId var = setVarReq->variable();
- jamEntry();
- switch (var) {
-
- // NDBCNTR_REF
-
- // DBTC
- case TransactionDeadlockDetectionTimeout:
- case TransactionInactiveTime:
- case NoOfConcurrentProcessesHandleTakeover:
- sendSignal(DBTC_REF, GSN_SET_VAR_REQ, signal, 3, JBB);
- break;
-
- // DBDIH
- case TimeBetweenLocalCheckpoints:
- case TimeBetweenGlobalCheckpoints:
- sendSignal(DBDIH_REF, GSN_SET_VAR_REQ, signal, 3, JBB);
- break;
-
- // DBLQH
- case NoOfConcurrentCheckpointsDuringRestart:
- case NoOfConcurrentCheckpointsAfterRestart:
- sendSignal(DBLQH_REF, GSN_SET_VAR_REQ, signal, 3, JBB);
- break;
-
- // DBACC
- case NoOfDiskPagesToDiskDuringRestartACC:
- case NoOfDiskPagesToDiskAfterRestartACC:
- sendSignal(DBACC_REF, GSN_SET_VAR_REQ, signal, 3, JBB);
- break;
-
- // DBTUP
- case NoOfDiskPagesToDiskDuringRestartTUP:
- case NoOfDiskPagesToDiskAfterRestartTUP:
- sendSignal(DBTUP_REF, GSN_SET_VAR_REQ, signal, 3, JBB);
- break;
-
- // DBDICT
-
- // NDBCNTR
- case TimeToWaitAlive:
-
- // QMGR
- case HeartbeatIntervalDbDb: // TODO ev till Ndbcnt också
- case HeartbeatIntervalDbApi:
- case ArbitTimeout:
- sendSignal(QMGR_REF, GSN_SET_VAR_REQ, signal, 3, JBB);
- break;
-
- // NDBFS
-
- // CMVMI
- case MaxNoOfSavedMessages:
- case LockPagesInMainMemory:
- case TimeBetweenWatchDogCheck:
- case StopOnError:
- handleSET_VAR_REQ(signal);
- break;
-
-
- // Not possible to update (this could of course be handled by each block
- // instead but I havn't investigated where they belong)
- case Id:
- case ExecuteOnComputer:
- case ShmKey:
- case MaxNoOfConcurrentOperations:
- case MaxNoOfConcurrentTransactions:
- case MemorySpaceIndexes:
- case MemorySpaceTuples:
- case MemoryDiskPages:
- case NoOfFreeDiskClusters:
- case NoOfDiskClusters:
- case NoOfFragmentLogFiles:
- case NoOfDiskClustersPerDiskFile:
- case NoOfDiskFiles:
- case MaxNoOfSavedEvents:
- default:
-
- int mgmtSrvr = setVarReq->mgmtSrvrBlockRef();
- sendSignal(mgmtSrvr, GSN_SET_VAR_REF, signal, 0, JBB);
- } // switch
-
-#endif
-}//execSET_VAR_REQ()
-
-
-void Cmvmi::execSET_VAR_CONF(Signal* signal)
-{
- int mgmtSrvr = signal->theData[0];
- sendSignal(mgmtSrvr, GSN_SET_VAR_CONF, signal, 0, JBB);
-
-}//execSET_VAR_CONF()
-
-
-void Cmvmi::execSET_VAR_REF(Signal* signal)
-{
- int mgmtSrvr = signal->theData[0];
- sendSignal(mgmtSrvr, GSN_SET_VAR_REF, signal, 0, JBB);
-
-}//execSET_VAR_REF()
-
-
-void Cmvmi::handleSET_VAR_REQ(Signal* signal) {
-#if 0
- SetVarReq* const setVarReq = (SetVarReq*)&signal->theData[0];
- ConfigParamId var = setVarReq->variable();
- int val = setVarReq->value();
-
- switch (var) {
- case MaxNoOfSavedMessages:
- m_ctx.m_config.maxNoOfErrorLogs(val);
- sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
- break;
-
- case LockPagesInMainMemory:
- int result;
- if (val == 0) {
- result = NdbMem_MemUnlockAll();
- }
- else {
- result = NdbMem_MemLockAll();
- }
- if (result == 0) {
- sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
- }
- else {
- sendSignal(CMVMI_REF, GSN_SET_VAR_REF, signal, 1, JBB);
- }
- break;
-
- case TimeBetweenWatchDogCheck:
- m_ctx.m_config.timeBetweenWatchDogCheck(val);
- sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
- break;
-
- case StopOnError:
- m_ctx.m_config.stopOnError(val);
- sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
- break;
-
- default:
- sendSignal(CMVMI_REF, GSN_SET_VAR_REF, signal, 1, JBB);
- return;
- } // switch
-#endif
-}
-
#ifdef VM_TRACE
class RefSignalTest {
public:
@@ -1129,6 +953,24 @@ Cmvmi::execDUMP_STATE_ORD(Signal* signal)
}
}
+ if (arg == DumpStateOrd::CmvmiDumpSubscriptions)
+ {
+ SubscriberPtr ptr;
+ subscribers.first(ptr);
+ g_eventLogger.info("List subscriptions:");
+ while(ptr.i != RNIL)
+ {
+ g_eventLogger.info("Subscription: %u, nodeId: %u, ref: 0x%x",
+ ptr.i, refToNode(ptr.p->blockRef), ptr.p->blockRef);
+ for(Uint32 i = 0; i < LogLevel::LOGLEVEL_CATEGORIES; i++)
+ {
+ Uint32 level = ptr.p->logLevel.getLogLevel((LogLevel::EventCategory)i);
+ g_eventLogger.info("Category %u Level %u", i, level);
+ }
+ subscribers.next(ptr);
+ }
+ }
+
if (arg == DumpStateOrd::CmvmiDumpLongSignalMemory){
infoEvent("Cmvmi: g_sectionSegmentPool size: %d free: %d",
g_sectionSegmentPool.getSize(),
diff --git a/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.hpp b/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.hpp
index 208f2511c6d..bc88f1a0c63 100644
--- a/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.hpp
+++ b/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.hpp
@@ -55,20 +55,14 @@ private:
void execSIZEALT_ACK(Signal* signal);
void execTEST_ORD(Signal* signal);
- void execSTATISTICS_REQ(Signal* signal);
void execSTOP_ORD(Signal* signal);
void execSTART_ORD(Signal* signal);
void execTAMPER_ORD(Signal* signal);
- void execSET_VAR_REQ(Signal* signal);
- void execSET_VAR_CONF(Signal* signal);
- void execSET_VAR_REF(Signal* signal);
void execDUMP_STATE_ORD(Signal* signal);
void execEVENT_SUBSCRIBE_REQ(Signal *);
void cancelSubscription(NodeId nodeId);
-
- void handleSET_VAR_REQ(Signal* signal);
void execTESTSIG(Signal* signal);
void execNODE_START_REP(Signal* signal);
diff --git a/storage/ndb/src/kernel/blocks/dbacc/Dbacc.hpp b/storage/ndb/src/kernel/blocks/dbacc/Dbacc.hpp
index 6337e252c0b..a44620b16ed 100644
--- a/storage/ndb/src/kernel/blocks/dbacc/Dbacc.hpp
+++ b/storage/ndb/src/kernel/blocks/dbacc/Dbacc.hpp
@@ -660,7 +660,6 @@ private:
void execNDB_STTOR(Signal* signal);
void execDROP_TAB_REQ(Signal* signal);
void execREAD_CONFIG_REQ(Signal* signal);
- void execSET_VAR_REQ(Signal* signal);
void execDUMP_STATE_ORD(Signal* signal);
// Statement blocks
diff --git a/storage/ndb/src/kernel/blocks/dbacc/DbaccInit.cpp b/storage/ndb/src/kernel/blocks/dbacc/DbaccInit.cpp
index 87db12cea51..9ba164d264c 100644
--- a/storage/ndb/src/kernel/blocks/dbacc/DbaccInit.cpp
+++ b/storage/ndb/src/kernel/blocks/dbacc/DbaccInit.cpp
@@ -113,7 +113,6 @@ Dbacc::Dbacc(Block_context& ctx):
addRecSignal(GSN_NDB_STTOR, &Dbacc::execNDB_STTOR);
addRecSignal(GSN_DROP_TAB_REQ, &Dbacc::execDROP_TAB_REQ);
addRecSignal(GSN_READ_CONFIG_REQ, &Dbacc::execREAD_CONFIG_REQ, true);
- addRecSignal(GSN_SET_VAR_REQ, &Dbacc::execSET_VAR_REQ);
initData();
diff --git a/storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp b/storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp
index bf2fa5b7584..70bb8368a57 100644
--- a/storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp
+++ b/storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp
@@ -8511,33 +8511,6 @@ Dbacc::execDUMP_STATE_ORD(Signal* signal)
#endif
}//Dbacc::execDUMP_STATE_ORD()
-void Dbacc::execSET_VAR_REQ(Signal* signal)
-{
-#if 0
- SetVarReq* const setVarReq = (SetVarReq*)&signal->theData[0];
- ConfigParamId var = setVarReq->variable();
- int val = setVarReq->value();
-
-
- switch (var) {
-
- case NoOfDiskPagesToDiskAfterRestartACC:
- clblPagesPerTick = val;
- sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
- break;
-
- case NoOfDiskPagesToDiskDuringRestartACC:
- // Valid only during start so value not set.
- sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
- break;
-
- default:
- sendSignal(CMVMI_REF, GSN_SET_VAR_REF, signal, 1, JBB);
- } // switch
-#endif
-
-}//execSET_VAR_REQ()
-
void
Dbacc::execREAD_PSEUDO_REQ(Signal* signal){
jamEntry();
diff --git a/storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp b/storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp
index eb81672fef5..f7e27359261 100644
--- a/storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp
+++ b/storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp
@@ -700,7 +700,6 @@ private:
void execFSREADREF(Signal *);
void execFSWRITECONF(Signal *);
void execFSWRITEREF(Signal *);
- void execSET_VAR_REQ(Signal *);
void execCHECKNODEGROUPSREQ(Signal *);
void execSTART_INFOREQ(Signal*);
void execSTART_INFOREF(Signal*);
@@ -1382,6 +1381,7 @@ private:
Uint32 csystemnodes;
Uint32 currentgcp;
Uint32 c_newest_restorable_gci;
+ Uint32 c_set_initial_start_flag;
enum GcpMasterTakeOverState {
GMTOS_IDLE = 0,
diff --git a/storage/ndb/src/kernel/blocks/dbdih/DbdihInit.cpp b/storage/ndb/src/kernel/blocks/dbdih/DbdihInit.cpp
index cf46f6124f2..aff31d625f4 100644
--- a/storage/ndb/src/kernel/blocks/dbdih/DbdihInit.cpp
+++ b/storage/ndb/src/kernel/blocks/dbdih/DbdihInit.cpp
@@ -61,6 +61,7 @@ void Dbdih::initData()
c_blockCommit = false;
c_blockCommitNo = 1;
cntrlblockref = RNIL;
+ c_set_initial_start_flag = FALSE;
}//Dbdih::initData()
void Dbdih::initRecords()
@@ -203,7 +204,6 @@ Dbdih::Dbdih(Block_context& ctx):
addRecSignal(GSN_FSREADREF, &Dbdih::execFSREADREF, true);
addRecSignal(GSN_FSWRITECONF, &Dbdih::execFSWRITECONF);
addRecSignal(GSN_FSWRITEREF, &Dbdih::execFSWRITEREF, true);
- addRecSignal(GSN_SET_VAR_REQ, &Dbdih::execSET_VAR_REQ);
addRecSignal(GSN_START_INFOREQ,
&Dbdih::execSTART_INFOREQ);
diff --git a/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp b/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
index 9cd7dbfc59b..e103db2e605 100644
--- a/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
+++ b/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
@@ -677,6 +677,12 @@ done:
Uint32 tmp= SYSFILE->m_restart_seq;
memcpy(sysfileData, cdata, sizeof(sysfileData));
SYSFILE->m_restart_seq = tmp;
+
+ if (c_set_initial_start_flag)
+ {
+ jam();
+ Sysfile::setInitialStartOngoing(SYSFILE->systemRestartBits);
+ }
}
c_copyGCISlave.m_copyReason = reason;
@@ -1337,6 +1343,11 @@ void Dbdih::execNDB_STTOR(Signal* signal)
// The permission is given by the master node in the alive set.
/*-----------------------------------------------------------------------*/
createMutexes(signal, 0);
+ if (cstarttype == NodeState::ST_INITIAL_NODE_RESTART)
+ {
+ jam();
+ c_set_initial_start_flag = TRUE; // In sysfile...
+ }
break;
case ZNDB_SPH3:
@@ -1883,8 +1894,8 @@ void Dbdih::execSTART_PERMREQ(Signal* signal)
return;
}//if
if (getNodeStatus(nodeId) != NodeRecord::DEAD){
- ndbout << "nodeStatus in START_PERMREQ = "
- << (Uint32) getNodeStatus(nodeId) << endl;
+ g_eventLogger.error("nodeStatus in START_PERMREQ = %u",
+ (Uint32) getNodeStatus(nodeId));
ndbrequire(false);
}//if
@@ -4297,9 +4308,9 @@ void Dbdih::checkCopyTab(NodeRecordPtr failedNodePtr)
jam();
break;
default:
- ndbout_c("outstanding gsn: %s(%d)",
- getSignalName(c_nodeStartMaster.m_outstandingGsn),
- c_nodeStartMaster.m_outstandingGsn);
+ g_eventLogger.error("outstanding gsn: %s(%d)",
+ getSignalName(c_nodeStartMaster.m_outstandingGsn),
+ c_nodeStartMaster.m_outstandingGsn);
ndbrequire(false);
}
@@ -4752,9 +4763,10 @@ void Dbdih::failedNodeLcpHandling(Signal* signal, NodeRecordPtr failedNodePtr)
failedNodePtr.p->activeStatus = Sysfile::NS_NotActive_NotTakenOver;
break;
default:
- ndbout << "activeStatus = " << (Uint32) failedNodePtr.p->activeStatus;
- ndbout << " at failure after NODE_FAILREP of node = ";
- ndbout << failedNodePtr.i << endl;
+ g_eventLogger.error("activeStatus = %u "
+ "at failure after NODE_FAILREP of node = %u",
+ (Uint32) failedNodePtr.p->activeStatus,
+ failedNodePtr.i);
ndbrequire(false);
break;
}//switch
@@ -4891,6 +4903,8 @@ void
Dbdih::startLcpMasterTakeOver(Signal* signal, Uint32 nodeId){
jam();
+ Uint32 oldNode = c_lcpMasterTakeOverState.failedNodeId;
+
c_lcpMasterTakeOverState.minTableId = ~0;
c_lcpMasterTakeOverState.minFragId = ~0;
c_lcpMasterTakeOverState.failedNodeId = nodeId;
@@ -4909,7 +4923,20 @@ Dbdih::startLcpMasterTakeOver(Signal* signal, Uint32 nodeId){
/**
* Node failure during master take over...
*/
- ndbout_c("Nodefail during master take over");
+ g_eventLogger.info("Nodefail during master take over (old: %d)", oldNode);
+ }
+
+ NodeRecordPtr nodePtr;
+ nodePtr.i = oldNode;
+ if (oldNode > 0 && oldNode < MAX_NDB_NODES)
+ {
+ jam();
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
+ if (nodePtr.p->m_nodefailSteps.get(NF_LCP_TAKE_OVER))
+ {
+ jam();
+ checkLocalNodefailComplete(signal, oldNode, NF_LCP_TAKE_OVER);
+ }
}
setLocalNodefailHandling(signal, nodeId, NF_LCP_TAKE_OVER);
@@ -5149,7 +5176,8 @@ void Dbdih::execMASTER_GCPCONF(Signal* signal)
if (latestLcpId > SYSFILE->latestLCP_ID) {
jam();
#if 0
- ndbout_c("Dbdih: Setting SYSFILE->latestLCP_ID to %d", latestLcpId);
+ g_eventLogger.info("Dbdih: Setting SYSFILE->latestLCP_ID to %d",
+ latestLcpId);
SYSFILE->latestLCP_ID = latestLcpId;
#endif
SYSFILE->keepGCI = oldestKeepGci;
@@ -5808,7 +5836,7 @@ Dbdih::checkLocalNodefailComplete(Signal* signal, Uint32 failedNodeId,
if (ERROR_INSERTED(7030))
{
- ndbout_c("Reenable GCP_PREPARE");
+ g_eventLogger.info("Reenable GCP_PREPARE");
CLEAR_ERROR_INSERT_VALUE;
}
@@ -5925,6 +5953,14 @@ void Dbdih::execMASTER_LCPREQ(Signal* signal)
jamEntry();
const BlockReference newMasterBlockref = req->masterRef;
+ if (newMasterBlockref != cmasterdihref)
+ {
+ jam();
+ ndbout_c("resending GSN_MASTER_LCPREQ");
+ sendSignalWithDelay(reference(), GSN_MASTER_LCPREQ, signal,
+ signal->getLength(), 50);
+ return;
+ }
Uint32 failedNodeId = req->failedNodeId;
/**
@@ -5981,7 +6017,7 @@ Dbdih::sendMASTER_LCPCONF(Signal * signal){
c_lcpState.setLcpStatus(LCP_STATUS_IDLE, __LINE__);
#if 0
if(c_copyGCISlave.m_copyReason == CopyGCIReq::LOCAL_CHECKPOINT){
- ndbout_c("Dbdih: Also resetting c_copyGCISlave");
+ g_eventLogger.info("Dbdih: Also resetting c_copyGCISlave");
c_copyGCISlave.m_copyReason = CopyGCIReq::IDLE;
c_copyGCISlave.m_expectedNextWord = 0;
}
@@ -6066,7 +6102,7 @@ Dbdih::sendMASTER_LCPCONF(Signal * signal){
if(c_lcpState.lcpStatus == LCP_TAB_SAVED){
#ifdef VM_TRACE
- ndbout_c("Sending extra GSN_LCP_COMPLETE_REP to new master");
+ g_eventLogger.info("Sending extra GSN_LCP_COMPLETE_REP to new master");
#endif
sendLCP_COMPLETE_REP(signal);
}
@@ -6221,8 +6257,10 @@ void Dbdih::execMASTER_LCPCONF(Signal* signal)
ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
nodePtr.p->lcpStateAtTakeOver = lcpState;
+ CRASH_INSERTION(7180);
+
#ifdef VM_TRACE
- ndbout_c("MASTER_LCPCONF");
+ g_eventLogger.info("MASTER_LCPCONF");
printMASTER_LCP_CONF(stdout, &signal->theData[0], 0, 0);
#endif
@@ -6299,7 +6337,7 @@ void Dbdih::MASTER_LCPhandling(Signal* signal, Uint32 failedNodeId)
// protocol.
/* --------------------------------------------------------------------- */
#ifdef VM_TRACE
- ndbout_c("MASTER_LCPhandling:: LMTOS_ALL_IDLE -> checkLcpStart");
+ g_eventLogger.info("MASTER_LCPhandling:: LMTOS_ALL_IDLE -> checkLcpStart");
#endif
checkLcpStart(signal, __LINE__);
break;
@@ -6310,7 +6348,7 @@ void Dbdih::MASTER_LCPhandling(Signal* signal, Uint32 failedNodeId)
// protocol by calculating the keep gci and storing the new lcp id.
/* --------------------------------------------------------------------- */
#ifdef VM_TRACE
- ndbout_c("MASTER_LCPhandling:: LMTOS_COPY_ONGOING -> storeNewLcpId");
+ g_eventLogger.info("MASTER_LCPhandling:: LMTOS_COPY_ONGOING -> storeNewLcpId");
#endif
if (c_lcpState.lcpStatus == LCP_STATUS_ACTIVE) {
jam();
@@ -6321,7 +6359,7 @@ void Dbdih::MASTER_LCPhandling(Signal* signal, Uint32 failedNodeId)
/*---------------------------------------------------------------------*/
Uint32 lcpId = SYSFILE->latestLCP_ID;
#ifdef VM_TRACE
- ndbout_c("Decreasing latestLCP_ID from %d to %d", lcpId, lcpId - 1);
+ g_eventLogger.info("Decreasing latestLCP_ID from %d to %d", lcpId, lcpId - 1);
#endif
SYSFILE->latestLCP_ID--;
}//if
@@ -6338,10 +6376,10 @@ void Dbdih::MASTER_LCPhandling(Signal* signal, Uint32 failedNodeId)
* complete before finalising the LCP process.
* ------------------------------------------------------------------ */
#ifdef VM_TRACE
- ndbout_c("MASTER_LCPhandling:: LMTOS_ALL_ACTIVE -> "
- "startLcpRoundLoopLab(table=%u, fragment=%u)",
- c_lcpMasterTakeOverState.minTableId,
- c_lcpMasterTakeOverState.minFragId);
+ g_eventLogger.info("MASTER_LCPhandling:: LMTOS_ALL_ACTIVE -> "
+ "startLcpRoundLoopLab(table=%u, fragment=%u)",
+ c_lcpMasterTakeOverState.minTableId,
+ c_lcpMasterTakeOverState.minFragId);
#endif
c_lcpState.keepGci = SYSFILE->keepGCI;
@@ -7745,8 +7783,8 @@ void Dbdih::checkGcpStopLab(Signal* signal)
if (cgcpSameCounter == 1200) {
jam();
#ifdef VM_TRACE
- ndbout << "System crash due to GCP Stop in state = ";
- ndbout << (Uint32) cgcpStatus << endl;
+ g_eventLogger.error("System crash due to GCP Stop in state = %u",
+ (Uint32) cgcpStatus);
#endif
crashSystemAtGcpStop(signal);
return;
@@ -7759,8 +7797,8 @@ void Dbdih::checkGcpStopLab(Signal* signal)
if (cgcpSameCounter == 1200) {
jam();
#ifdef VM_TRACE
- ndbout << "System crash due to GCP Stop in state = ";
- ndbout << (Uint32) cgcpStatus << endl;
+ g_eventLogger.error("System crash due to GCP Stop in state = %u",
+ (Uint32) cgcpStatus);
#endif
crashSystemAtGcpStop(signal);
return;
@@ -7951,7 +7989,7 @@ void Dbdih::GCP_SAVEhandling(Signal* signal, Uint32 nodeId)
getNodeState().startLevel == NodeState::SL_STARTED){
jam();
#if 0
- ndbout_c("Dbdih: Clearing initial start ongoing");
+ g_eventLogger.info("Dbdih: Clearing initial start ongoing");
#endif
Sysfile::clearInitialStartOngoing(SYSFILE->systemRestartBits);
}
@@ -7970,7 +8008,7 @@ void Dbdih::execGCP_PREPARE(Signal* signal)
if (ERROR_INSERTED(7030))
{
cgckptflag = true;
- ndbout_c("Delayed GCP_PREPARE 5s");
+ g_eventLogger.info("Delayed GCP_PREPARE 5s");
sendSignalWithDelay(reference(), GSN_GCP_PREPARE, signal, 5000,
signal->getLength());
return;
@@ -7990,7 +8028,7 @@ void Dbdih::execGCP_PREPARE(Signal* signal)
if (ERROR_INSERTED(7031))
{
- ndbout_c("Crashing delayed in GCP_PREPARE 3s");
+ g_eventLogger.info("Crashing delayed in GCP_PREPARE 3s");
signal->theData[0] = 9999;
sendSignalWithDelay(CMVMI_REF, GSN_NDB_TAMPER, signal, 3000, 1);
return;
@@ -8514,7 +8552,7 @@ void Dbdih::initLcpLab(Signal* signal, Uint32 senderRef, Uint32 tableId)
* This is LCP master takeover
*/
#ifdef VM_TRACE
- ndbout_c("initLcpLab aborted due to LCP master takeover - 1");
+ g_eventLogger.info("initLcpLab aborted due to LCP master takeover - 1");
#endif
c_lcpState.setLcpStatus(LCP_STATUS_IDLE, __LINE__);
sendMASTER_LCPCONF(signal);
@@ -8527,7 +8565,7 @@ void Dbdih::initLcpLab(Signal* signal, Uint32 senderRef, Uint32 tableId)
* Master take over but has not yet received MASTER_LCPREQ
*/
#ifdef VM_TRACE
- ndbout_c("initLcpLab aborted due to LCP master takeover - 2");
+ g_eventLogger.info("initLcpLab aborted due to LCP master takeover - 2");
#endif
return;
}
@@ -9836,9 +9874,10 @@ void Dbdih::checkTcCounterLab(Signal* signal)
{
CRASH_INSERTION(7009);
if (c_lcpState.lcpStatus != LCP_STATUS_IDLE) {
- ndbout << "lcpStatus = " << (Uint32) c_lcpState.lcpStatus;
- ndbout << "lcpStatusUpdatedPlace = " <<
- c_lcpState.lcpStatusUpdatedPlace << endl;
+ g_eventLogger.error("lcpStatus = %u"
+ "lcpStatusUpdatedPlace = %d",
+ (Uint32) c_lcpState.lcpStatus,
+ c_lcpState.lcpStatusUpdatedPlace);
ndbrequire(false);
return;
}//if
@@ -10421,9 +10460,8 @@ void Dbdih::execLCP_FRAG_REP(Signal* signal)
if(tabPtr.p->tabStatus == TabRecord::TS_DROPPING){
jam();
- ndbout_c("TS_DROPPING - Neglecting to save Table: %d Frag: %d - ",
- tableId,
- fragId);
+ g_eventLogger.info("TS_DROPPING - Neglecting to save Table: %d Frag: %d - ",
+ tableId, fragId);
} else {
jam();
/**
@@ -10553,7 +10591,7 @@ void Dbdih::findReplica(ReplicaRecordPtr& replicaPtr,
};
#ifdef VM_TRACE
- ndbout_c("Fragment Replica(node=%d) not found", nodeId);
+ g_eventLogger.info("Fragment Replica(node=%d) not found", nodeId);
replicaPtr.i = fragPtrP->oldStoredReplicas;
while(replicaPtr.i != RNIL){
ptrCheckGuard(replicaPtr, creplicaFileSize, replicaRecord);
@@ -10566,9 +10604,9 @@ void Dbdih::findReplica(ReplicaRecordPtr& replicaPtr,
}//if
};
if(replicaPtr.i != RNIL){
- ndbout_c("...But was found in oldStoredReplicas");
+ g_eventLogger.info("...But was found in oldStoredReplicas");
} else {
- ndbout_c("...And wasn't found in oldStoredReplicas");
+ g_eventLogger.info("...And wasn't found in oldStoredReplicas");
}
#endif
ndbrequire(false);
@@ -10635,8 +10673,8 @@ Dbdih::reportLcpCompletion(const LcpFragRep* lcpReport)
if(lcpNo != replicaPtr.p->nextLcp){
if (handle_invalid_lcp_no(lcpReport, replicaPtr))
{
- ndbout_c("lcpNo = %d replicaPtr.p->nextLcp = %d",
- lcpNo, replicaPtr.p->nextLcp);
+ g_eventLogger.error("lcpNo = %d replicaPtr.p->nextLcp = %d",
+ lcpNo, replicaPtr.p->nextLcp);
ndbrequire(false);
}
}
@@ -10672,7 +10710,7 @@ Dbdih::reportLcpCompletion(const LcpFragRep* lcpReport)
// Not all fragments in table have been checkpointed.
/* ----------------------------------------------------------------- */
if(0)
- ndbout_c("reportLcpCompletion: fragment %d not ready", fid);
+ g_eventLogger.info("reportLcpCompletion: fragment %d not ready", fid);
return false;
}//if
}//for
@@ -10779,6 +10817,17 @@ Dbdih::sendLCP_COMPLETE_REP(Signal* signal){
sendSignal(c_lcpState.m_masterLcpDihRef, GSN_LCP_COMPLETE_REP, signal,
LcpCompleteRep::SignalLength, JBB);
+
+ /**
+ * Say that an initial node restart does not need to be redone
+ * once node has been part of first LCP
+ */
+ if (c_set_initial_start_flag &&
+ c_lcpState.m_participatingLQH.get(getOwnNodeId()))
+ {
+ jam();
+ c_set_initial_start_flag = FALSE;
+ }
}
/*-------------------------------------------------------------------------- */
@@ -10789,7 +10838,7 @@ void Dbdih::execLCP_COMPLETE_REP(Signal* signal)
jamEntry();
#if 0
- ndbout_c("LCP_COMPLETE_REP");
+ g_eventLogger.info("LCP_COMPLETE_REP");
printLCP_COMPLETE_REP(stdout,
signal->getDataPtr(),
signal->length(), number());
@@ -10875,7 +10924,7 @@ void Dbdih::allNodesLcpCompletedLab(Signal* signal)
if(c_lcpMasterTakeOverState.state != LMTOS_IDLE){
jam();
#ifdef VM_TRACE
- ndbout_c("Exiting from allNodesLcpCompletedLab");
+ g_eventLogger.info("Exiting from allNodesLcpCompletedLab");
#endif
return;
}
@@ -11112,14 +11161,14 @@ void Dbdih::crashSystemAtGcpStop(Signal* signal)
infoEvent("Detected GCP stop...sending kill to %s",
c_GCP_SAVEREQ_Counter.getText());
- ndbout_c("Detected GCP stop...sending kill to %s",
- c_GCP_SAVEREQ_Counter.getText());
+ g_eventLogger.error("Detected GCP stop...sending kill to %s",
+ c_GCP_SAVEREQ_Counter.getText());
return;
}
case GCP_SAVE_LQH_FINISHED:
- ndbout_c("m_copyReason: %d m_waiting: %d",
- c_copyGCIMaster.m_copyReason,
- c_copyGCIMaster.m_waiting);
+ g_eventLogger.error("m_copyReason: %d m_waiting: %d",
+ c_copyGCIMaster.m_copyReason,
+ c_copyGCIMaster.m_waiting);
break;
case GCP_READY: // shut up lint
case GCP_PREPARE_SENT:
@@ -11127,11 +11176,11 @@ void Dbdih::crashSystemAtGcpStop(Signal* signal)
break;
}
- ndbout_c("c_copyGCISlave: sender{Data, Ref} %d %x reason: %d nextWord: %d",
- c_copyGCISlave.m_senderData,
- c_copyGCISlave.m_senderRef,
- c_copyGCISlave.m_copyReason,
- c_copyGCISlave.m_expectedNextWord);
+ g_eventLogger.error("c_copyGCISlave: sender{Data, Ref} %d %x reason: %d nextWord: %d",
+ c_copyGCISlave.m_senderData,
+ c_copyGCISlave.m_senderRef,
+ c_copyGCISlave.m_copyReason,
+ c_copyGCISlave.m_expectedNextWord);
FileRecordPtr file0Ptr;
file0Ptr.i = crestartInfoFile[0];
@@ -13350,9 +13399,9 @@ void Dbdih::setLcpActiveStatusEnd()
nodePtr.i = getOwnNodeId();
ptrAss(nodePtr, nodeRecord);
ndbrequire(nodePtr.p->activeStatus == Sysfile::NS_Active);
- ndbout_c("NR: setLcpActiveStatusEnd - m_participatingLQH");
+ g_eventLogger.info("NR: setLcpActiveStatusEnd - m_participatingLQH");
} else {
- ndbout_c("NR: setLcpActiveStatusEnd - !m_participatingLQH");
+ g_eventLogger.info("NR: setLcpActiveStatusEnd - !m_participatingLQH");
}
}
@@ -14184,8 +14233,8 @@ Dbdih::execDUMP_STATE_ORD(Signal* signal)
}
if(arg == DumpStateOrd::EnableUndoDelayDataWrite){
- ndbout << "Dbdih:: delay write of datapages for table = "
- << dumpState->args[1]<< endl;
+ g_eventLogger.info("Dbdih:: delay write of datapages for table = %s",
+ dumpState->args[1]);
// Send this dump to ACC and TUP
EXECUTE_DIRECT(DBACC, GSN_DUMP_STATE_ORD, signal, 2);
EXECUTE_DIRECT(DBTUP, GSN_DUMP_STATE_ORD, signal, 2);
@@ -14202,13 +14251,13 @@ Dbdih::execDUMP_STATE_ORD(Signal* signal)
}//if
if (signal->theData[0] == DumpStateOrd::DihMinTimeBetweenLCP) {
// Set time between LCP to min value
- ndbout << "Set time between LCP to min value" << endl;
+ g_eventLogger.info("Set time between LCP to min value");
c_lcpState.clcpDelay = 0; // TimeBetweenLocalCheckpoints.min
return;
}
if (signal->theData[0] == DumpStateOrd::DihMaxTimeBetweenLCP) {
// Set time between LCP to max value
- ndbout << "Set time between LCP to max value" << endl;
+ g_eventLogger.info("Set time between LCP to max value");
c_lcpState.clcpDelay = 31; // TimeBetweenLocalCheckpoints.max
return;
}
@@ -14244,7 +14293,7 @@ Dbdih::execDUMP_STATE_ORD(Signal* signal)
{
cgcpDelay = signal->theData[1];
}
- ndbout_c("Setting time between gcp : %d", cgcpDelay);
+ g_eventLogger.info("Setting time between gcp : %d", cgcpDelay);
}
if (arg == 7021 && signal->getLength() == 2)
@@ -14367,7 +14416,7 @@ Dbdih::execPREP_DROP_TAB_REQ(Signal* signal){
while(index < count){
if(nodePtr.p->queuedChkpt[index].tableId == tabPtr.i){
jam();
- // ndbout_c("Unqueuing %d", index);
+ // g_eventLogger.info("Unqueuing %d", index);
count--;
for(Uint32 i = index; i<count; i++){
@@ -14407,7 +14456,7 @@ Dbdih::execPREP_DROP_TAB_REQ(Signal* signal){
if(checkLcpAllTablesDoneInLqh()){
jam();
- ndbout_c("This is the last table");
+ g_eventLogger.info("This is the last table");
/**
* Then check if saving of tab info is done for all tables
@@ -14416,7 +14465,7 @@ Dbdih::execPREP_DROP_TAB_REQ(Signal* signal){
checkLcpCompletedLab(signal);
if(a != c_lcpState.lcpStatus){
- ndbout_c("And all tables are written to already written disk");
+ g_eventLogger.info("And all tables are written to already written disk");
}
}
break;
@@ -14573,30 +14622,6 @@ Dbdih::execNDB_TAMPER(Signal* signal)
return;
}//Dbdih::execNDB_TAMPER()
-void Dbdih::execSET_VAR_REQ(Signal* signal) {
-#if 0
- SetVarReq* const setVarReq = (SetVarReq*)&signal->theData[0];
- ConfigParamId var = setVarReq->variable();
- int val = setVarReq->value();
-
-
- switch (var) {
- case TimeBetweenLocalCheckpoints:
- c_lcpState.clcpDelay = val;
- sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
- break;
-
- case TimeBetweenGlobalCheckpoints:
- cgcpDelay = val;
- sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
- break;
-
- default:
- sendSignal(CMVMI_REF, GSN_SET_VAR_REF, signal, 1, JBB);
- } // switch
-#endif
-}
-
void Dbdih::execBLOCK_COMMIT_ORD(Signal* signal){
BlockCommitOrd* const block = (BlockCommitOrd *)&signal->theData[0];
diff --git a/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp b/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp
index 3951b53184c..8d7290469ca 100644
--- a/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp
+++ b/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp
@@ -2169,7 +2169,6 @@ private:
void execFSREADCONF(Signal* signal);
void execFSREADREF(Signal* signal);
void execSCAN_HBREP(Signal* signal);
- void execSET_VAR_REQ(Signal* signal);
void execTIME_SIGNAL(Signal* signal);
void execFSSYNCCONF(Signal* signal);
diff --git a/storage/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp b/storage/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp
index 8ddb96f9111..c054c227c8e 100644
--- a/storage/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp
+++ b/storage/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp
@@ -267,7 +267,6 @@ Dblqh::Dblqh(Block_context& ctx):
addRecSignal(GSN_FSREADCONF, &Dblqh::execFSREADCONF);
addRecSignal(GSN_FSREADREF, &Dblqh::execFSREADREF, true);
addRecSignal(GSN_ACC_ABORTCONF, &Dblqh::execACC_ABORTCONF);
- addRecSignal(GSN_SET_VAR_REQ, &Dblqh::execSET_VAR_REQ);
addRecSignal(GSN_TIME_SIGNAL, &Dblqh::execTIME_SIGNAL);
addRecSignal(GSN_FSSYNCCONF, &Dblqh::execFSSYNCCONF);
addRecSignal(GSN_REMOVE_MARKER_ORD, &Dblqh::execREMOVE_MARKER_ORD);
diff --git a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
index f2eef543833..0205f1db0de 100644
--- a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
+++ b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
@@ -62,6 +62,7 @@
#include <signaldata/AttrInfo.hpp>
#include <KeyDescriptor.hpp>
#include <signaldata/RouteOrd.hpp>
+#include <signaldata/FsRef.hpp>
// Use DEBUG to print messages that should be
// seen only when we debug the product
@@ -4256,7 +4257,7 @@ Dblqh::nr_copy_delete_row(Signal* signal,
signal->theData, sizeof(Local_key));
regTcPtr.p->m_nr_delete.m_page_id[pos] = RNIL;
regTcPtr.p->m_nr_delete.m_cnt = pos + 2;
- ndbout << "PENDING DISK DELETE: " <<
+ if (0) ndbout << "PENDING DISK DELETE: " <<
regTcPtr.p->m_nr_delete.m_disk_ref[pos] << endl;
}
@@ -7572,7 +7573,7 @@ void Dblqh::lqhTransNextLab(Signal* signal)
* THE RECEIVER OF THE COPY HAVE FAILED.
* WE HAVE TO CLOSE THE COPY PROCESS.
* ----------------------------------------------------------- */
- ndbout_c("close copy");
+ if (0) ndbout_c("close copy");
tcConnectptr.p->tcNodeFailrec = tcNodeFailptr.i;
tcConnectptr.p->abortState = TcConnectionrec::NEW_FROM_TC;
closeCopyRequestLab(signal);
@@ -10833,7 +10834,7 @@ void Dblqh::tupCopyCloseConfLab(Signal* signal)
void Dblqh::closeCopyRequestLab(Signal* signal)
{
scanptr.p->scanErrorCounter++;
- ndbout_c("closeCopyRequestLab: scanState: %d", scanptr.p->scanState);
+ if (0) ndbout_c("closeCopyRequestLab: scanState: %d", scanptr.p->scanState);
switch (scanptr.p->scanState) {
case ScanRecord::WAIT_TUPKEY_COPY:
case ScanRecord::WAIT_NEXT_SCAN_COPY:
@@ -11434,7 +11435,17 @@ void Dblqh::execLCP_PREPARE_CONF(Signal* signal)
void Dblqh::execBACKUP_FRAGMENT_REF(Signal* signal)
{
- ndbrequire(false);
+ BackupFragmentRef *ref= (BackupFragmentRef*)signal->getDataPtr();
+ char buf[100];
+ BaseString::snprintf(buf,sizeof(buf),
+ "Unable to store fragment during LCP. NDBFS Error: %u",
+ ref->errorCode);
+
+ progError(__LINE__,
+ (ref->errorCode & FsRef::FS_ERR_BIT)?
+ NDBD_EXIT_AFS_UNKNOWN
+ : ref->errorCode,
+ buf);
}
void Dblqh::execBACKUP_FRAGMENT_CONF(Signal* signal)
@@ -11928,7 +11939,7 @@ void Dblqh::execGCP_SAVEREQ(Signal* signal)
return;
}
- if (getNodeState().getNodeRestartInProgress())
+ if (getNodeState().getNodeRestartInProgress() && cstartRecReq == ZFALSE)
{
GCPSaveRef * const saveRef = (GCPSaveRef*)&signal->theData[0];
saveRef->dihPtr = dihPtr;
@@ -11975,7 +11986,6 @@ void Dblqh::execGCP_SAVEREQ(Signal* signal)
}//if
ndbrequire(ccurrentGcprec == RNIL);
-
ccurrentGcprec = 0;
gcpPtr.i = ccurrentGcprec;
ptrCheckGuard(gcpPtr, cgcprecFileSize, gcpRecord);
@@ -18855,30 +18865,6 @@ Dblqh::execDUMP_STATE_ORD(Signal* signal)
}//Dblqh::execDUMP_STATE_ORD()
-void Dblqh::execSET_VAR_REQ(Signal* signal)
-{
-#if 0
- SetVarReq* const setVarReq = (SetVarReq*)&signal->theData[0];
- ConfigParamId var = setVarReq->variable();
-
- switch (var) {
-
- case NoOfConcurrentCheckpointsAfterRestart:
- sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
- break;
-
- case NoOfConcurrentCheckpointsDuringRestart:
- // Valid only during start so value not set.
- sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
- break;
-
- default:
- sendSignal(CMVMI_REF, GSN_SET_VAR_REF, signal, 1, JBB);
- } // switch
-#endif
-}//execSET_VAR_REQ()
-
-
/* **************************************************************** */
/* ---------------------------------------------------------------- */
/* ---------------------- TRIGGER HANDLING ------------------------ */
diff --git a/storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp b/storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp
index 39b7c00e3a1..dae8ee7e73b 100644
--- a/storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp
+++ b/storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp
@@ -1327,7 +1327,6 @@ private:
void execTIME_SIGNAL(Signal* signal);
void execAPI_FAILREQ(Signal* signal);
void execSCAN_HBREP(Signal* signal);
- void execSET_VAR_REQ(Signal* signal);
void execABORT_ALL_REQ(Signal* signal);
diff --git a/storage/ndb/src/kernel/blocks/dbtc/DbtcInit.cpp b/storage/ndb/src/kernel/blocks/dbtc/DbtcInit.cpp
index 452ae6d8d70..3bba771f3f0 100644
--- a/storage/ndb/src/kernel/blocks/dbtc/DbtcInit.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtc/DbtcInit.cpp
@@ -245,7 +245,6 @@ Dbtc::Dbtc(Block_context& ctx):
addRecSignal(GSN_INCL_NODEREQ, &Dbtc::execINCL_NODEREQ);
addRecSignal(GSN_TIME_SIGNAL, &Dbtc::execTIME_SIGNAL);
addRecSignal(GSN_API_FAILREQ, &Dbtc::execAPI_FAILREQ);
- addRecSignal(GSN_SET_VAR_REQ, &Dbtc::execSET_VAR_REQ);
addRecSignal(GSN_TC_COMMIT_ACK, &Dbtc::execTC_COMMIT_ACK);
addRecSignal(GSN_ABORT_ALL_REQ, &Dbtc::execABORT_ALL_REQ);
diff --git a/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp b/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
index c7ca8048354..70dca820d73 100644
--- a/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
@@ -11004,36 +11004,6 @@ Dbtc::execDUMP_STATE_ORD(Signal* signal)
}
}//Dbtc::execDUMP_STATE_ORD()
-void Dbtc::execSET_VAR_REQ(Signal* signal)
-{
-#if 0
- SetVarReq* const setVarReq = (SetVarReq*)&signal->theData[0];
- ConfigParamId var = setVarReq->variable();
- int val = setVarReq->value();
-
-
- switch (var) {
-
- case TransactionInactiveTime:
- jam();
- set_appl_timeout_value(val);
- break;
- case TransactionDeadlockDetectionTimeout:
- set_timeout_value(val);
- sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
- break;
-
- case NoOfConcurrentProcessesHandleTakeover:
- set_no_parallel_takeover(val);
- sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
- break;
-
- default:
- sendSignal(CMVMI_REF, GSN_SET_VAR_REF, signal, 1, JBB);
- } // switch
-#endif
-}
-
void Dbtc::execABORT_ALL_REQ(Signal* signal)
{
jamEntry();
diff --git a/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp b/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp
index 230895c942a..a9f0905ab4c 100644
--- a/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp
+++ b/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp
@@ -1474,7 +1474,6 @@ private:
void execTUP_ABORTREQ(Signal* signal);
void execNDB_STTOR(Signal* signal);
void execREAD_CONFIG_REQ(Signal* signal);
- void execSET_VAR_REQ(Signal* signal);
void execDROP_TAB_REQ(Signal* signal);
void execALTER_TAB_REQ(Signal* signal);
void execTUP_DEALLOCREQ(Signal* signal);
diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupAbort.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupAbort.cpp
index 904629fff77..2414e8a10bf 100644
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupAbort.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupAbort.cpp
@@ -145,7 +145,7 @@ void Dbtup::do_tup_abortreq(Signal* signal, Uint32 flags)
{
if(copy_bits & Tuple_header::MM_GROWN)
{
- ndbout_c("abort grow");
+ if (0) ndbout_c("abort grow");
Ptr<Page> vpage;
Uint32 idx= regOperPtr.p->m_tuple_location.m_page_idx;
Uint32 mm_vars= regTabPtr.p->m_attributes[MM].m_no_of_varsize;
@@ -168,7 +168,7 @@ void Dbtup::do_tup_abortreq(Signal* signal, Uint32 flags)
}
else if(bits & Tuple_header::MM_SHRINK)
{
- ndbout_c("abort shrink");
+ if (0) ndbout_c("abort shrink");
}
}
else if (regOperPtr.p->is_first_operation() &&
diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp
index 1f703599cf5..91d2ca97744 100644
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp
@@ -514,7 +514,7 @@ void Dbtup::execTUP_COMMITREQ(Signal* signal)
c_lgman->free_log_space(regFragPtr.p->m_logfile_group_id,
regOperPtr.p->m_undo_buffer_space);
- ndbout_c("insert+delete");
+ if (0) ndbout_c("insert+delete");
goto skip_disk;
}
}
diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp
index c394812ad1a..a292be5e304 100644
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp
@@ -2857,7 +2857,7 @@ Dbtup::handle_size_change_after_update(KeyReqStruct* req_struct,
if(needed <= alloc)
{
//ndbassert(!regOperPtr->is_first_operation());
- ndbout_c(" no grow");
+ if (0) ndbout_c(" no grow");
return 0;
}
copy_bits |= Tuple_header::MM_GROWN;
@@ -3143,7 +3143,7 @@ Dbtup::nr_delete(Signal* signal, Uint32 senderData,
break;
}
- ndbout << "DIRECT DISK DELETE: " << disk << endl;
+ if (0) ndbout << "DIRECT DISK DELETE: " << disk << endl;
disk_page_free(signal, tablePtr.p, fragPtr.p,
&disk, *(PagePtr*)&disk_page, gci);
return 0;
@@ -3195,7 +3195,7 @@ Dbtup::nr_delete_page_callback(Signal* signal,
break;
}
- ndbout << "PAGE CALLBACK DISK DELETE: " << op.m_disk_ref << endl;
+ if (0) ndbout << "PAGE CALLBACK DISK DELETE: " << op.m_disk_ref << endl;
disk_page_free(signal, tablePtr.p, fragPtr.p,
&op.m_disk_ref, pagePtr, op.m_gci);
@@ -3227,7 +3227,7 @@ Dbtup::nr_delete_log_buffer_callback(Signal* signal,
/**
* reset page no
*/
- ndbout << "LOGBUFFER CALLBACK DISK DELETE: " << op.m_disk_ref << endl;
+ if (0) ndbout << "LOGBUFFER CALLBACK DISK DELETE: " << op.m_disk_ref << endl;
disk_page_free(signal, tablePtr.p, fragPtr.p,
&op.m_disk_ref, pagePtr, op.m_gci);
diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp
index 3e9469c4edf..67fc5a4ceb0 100644
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp
@@ -80,7 +80,6 @@ Dbtup::Dbtup(Block_context& ctx, Pgman* pgman)
addRecSignal(GSN_TUP_ABORTREQ, &Dbtup::execTUP_ABORTREQ);
addRecSignal(GSN_NDB_STTOR, &Dbtup::execNDB_STTOR);
addRecSignal(GSN_READ_CONFIG_REQ, &Dbtup::execREAD_CONFIG_REQ, true);
- addRecSignal(GSN_SET_VAR_REQ, &Dbtup::execSET_VAR_REQ);
// Trigger Signals
addRecSignal(GSN_CREATE_TRIG_REQ, &Dbtup::execCREATE_TRIG_REQ);
@@ -724,32 +723,5 @@ void Dbtup::releaseFragrec(FragrecordPtr regFragPtr)
cfirstfreefrag = regFragPtr.i;
}//Dbtup::releaseFragrec()
-void Dbtup::execSET_VAR_REQ(Signal* signal)
-{
-#if 0
- SetVarReq* const setVarReq = (SetVarReq*)signal->getDataPtrSend();
- ConfigParamId var = setVarReq->variable();
- int val = setVarReq->value();
-
- switch (var) {
-
- case NoOfDiskPagesToDiskAfterRestartTUP:
- clblPagesPerTick = val;
- sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
- break;
-
- case NoOfDiskPagesToDiskDuringRestartTUP:
- // Valid only during start so value not set.
- sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
- break;
-
- default:
- sendSignal(CMVMI_REF, GSN_SET_VAR_REF, signal, 1, JBB);
- } // switch
-#endif
-
-}//execSET_VAR_REQ()
-
-
diff --git a/storage/ndb/src/kernel/blocks/ndbcntr/Ndbcntr.hpp b/storage/ndb/src/kernel/blocks/ndbcntr/Ndbcntr.hpp
index 84a0ada2d01..4db07591b60 100644
--- a/storage/ndb/src/kernel/blocks/ndbcntr/Ndbcntr.hpp
+++ b/storage/ndb/src/kernel/blocks/ndbcntr/Ndbcntr.hpp
@@ -192,7 +192,6 @@ private:
void execNDB_STARTCONF(Signal* signal);
void execREAD_NODESREQ(Signal* signal);
void execNDB_STARTREF(Signal* signal);
- void execSET_VAR_REQ(Signal* signal);
void execSTOP_PERM_REF(Signal* signal);
void execSTOP_PERM_CONF(Signal* signal);
diff --git a/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrInit.cpp b/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrInit.cpp
index a925eb4beaf..ae5afa7a57b 100644
--- a/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrInit.cpp
+++ b/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrInit.cpp
@@ -81,7 +81,6 @@ Ndbcntr::Ndbcntr(Block_context& ctx):
addRecSignal(GSN_NDB_STARTCONF, &Ndbcntr::execNDB_STARTCONF);
addRecSignal(GSN_READ_NODESREQ, &Ndbcntr::execREAD_NODESREQ);
addRecSignal(GSN_NDB_STARTREF, &Ndbcntr::execNDB_STARTREF);
- addRecSignal(GSN_SET_VAR_REQ, &Ndbcntr::execSET_VAR_REQ);
addRecSignal(GSN_STOP_PERM_REF, &Ndbcntr::execSTOP_PERM_REF);
addRecSignal(GSN_STOP_PERM_CONF, &Ndbcntr::execSTOP_PERM_CONF);
diff --git a/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp b/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp
index e2d402ca76a..8fc9e870b80 100644
--- a/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp
+++ b/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp
@@ -75,8 +75,8 @@ static BlockInfo ALL_BLOCKS[] = {
{ DBTUP_REF, 1 , 4000, 4007 },
{ DBDICT_REF, 1 , 6000, 6003 },
{ NDBCNTR_REF, 0 , 1000, 1999 },
+ { CMVMI_REF, 1 , 9000, 9999 }, // before QMGR
{ QMGR_REF, 1 , 1, 999 },
- { CMVMI_REF, 1 , 9000, 9999 },
{ TRIX_REF, 1 , 0, 0 },
{ BACKUP_REF, 1 , 10000, 10999 },
{ DBUTIL_REF, 1 , 11000, 11999 },
@@ -2067,23 +2067,6 @@ Ndbcntr::execDUMP_STATE_ORD(Signal* signal)
}//Ndbcntr::execDUMP_STATE_ORD()
-void Ndbcntr::execSET_VAR_REQ(Signal* signal) {
-#if 0
- SetVarReq* const setVarReq = (SetVarReq*)&signal->theData[0];
- ConfigParamId var = setVarReq->variable();
-
- switch (var) {
- case TimeToWaitAlive:
- // Valid only during start so value not set.
- sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
- break;
-
- default:
- sendSignal(CMVMI_REF, GSN_SET_VAR_REF, signal, 1, JBB);
- }// switch
-#endif
-}//Ndbcntr::execSET_VAR_REQ()
-
void Ndbcntr::updateNodeState(Signal* signal, const NodeState& newState) const{
NodeStateRep * const stateRep = (NodeStateRep *)&signal->theData[0];
diff --git a/storage/ndb/src/kernel/blocks/qmgr/Qmgr.hpp b/storage/ndb/src/kernel/blocks/qmgr/Qmgr.hpp
index 9b7b6b7f41c..8d51b24ec6a 100644
--- a/storage/ndb/src/kernel/blocks/qmgr/Qmgr.hpp
+++ b/storage/ndb/src/kernel/blocks/qmgr/Qmgr.hpp
@@ -249,7 +249,6 @@ private:
void execAPI_REGREQ(Signal* signal);
void execAPI_FAILCONF(Signal* signal);
void execREAD_NODESREQ(Signal* signal);
- void execSET_VAR_REQ(Signal* signal);
void execAPI_FAILREQ(Signal* signal);
void execREAD_NODESREF(Signal* signal);
diff --git a/storage/ndb/src/kernel/blocks/qmgr/QmgrInit.cpp b/storage/ndb/src/kernel/blocks/qmgr/QmgrInit.cpp
index 23bbe94f020..f9950072ab4 100644
--- a/storage/ndb/src/kernel/blocks/qmgr/QmgrInit.cpp
+++ b/storage/ndb/src/kernel/blocks/qmgr/QmgrInit.cpp
@@ -90,7 +90,6 @@ Qmgr::Qmgr(Block_context& ctx)
addRecSignal(GSN_API_FAILREQ, &Qmgr::execAPI_FAILREQ);
addRecSignal(GSN_API_FAILCONF, &Qmgr::execAPI_FAILCONF);
addRecSignal(GSN_READ_NODESREQ, &Qmgr::execREAD_NODESREQ);
- addRecSignal(GSN_SET_VAR_REQ, &Qmgr::execSET_VAR_REQ);
addRecSignal(GSN_API_BROADCAST_REP, &Qmgr::execAPI_BROADCAST_REP);
addRecSignal(GSN_NODE_FAILREP, &Qmgr::execNODE_FAILREP);
diff --git a/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp b/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp
index c8ba7b5aad0..4b4fba01889 100644
--- a/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp
+++ b/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp
@@ -5010,34 +5010,6 @@ Qmgr::execDUMP_STATE_ORD(Signal* signal)
#endif
}//Qmgr::execDUMP_STATE_ORD()
-void Qmgr::execSET_VAR_REQ(Signal* signal)
-{
-#if 0
- SetVarReq* const setVarReq = (SetVarReq*)&signal->theData[0];
- ConfigParamId var = setVarReq->variable();
- UintR val = setVarReq->value();
-
- switch (var) {
- case HeartbeatIntervalDbDb:
- setHbDelay(val/10);
- sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
- break;
-
- case HeartbeatIntervalDbApi:
- setHbApiDelay(val/10);
- sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
- break;
-
- case ArbitTimeout:
- setArbitTimeout(val);
- sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
- break;
-
- default:
- sendSignal(CMVMI_REF, GSN_SET_VAR_REF, signal, 1, JBB);
- }// switch
-#endif
-}//execSET_VAR_REQ()
void
Qmgr::execAPI_BROADCAST_REP(Signal* signal)
diff --git a/storage/ndb/src/kernel/vm/WatchDog.cpp b/storage/ndb/src/kernel/vm/WatchDog.cpp
index d8311ec5d35..d1abb709b1e 100644
--- a/storage/ndb/src/kernel/vm/WatchDog.cpp
+++ b/storage/ndb/src/kernel/vm/WatchDog.cpp
@@ -22,7 +22,10 @@
#include <NdbOut.hpp>
#include <NdbSleep.h>
#include <ErrorHandlingMacros.hpp>
-
+#include <EventLogger.hpp>
+
+extern EventLogger g_eventLogger;
+
extern "C"
void*
runWatchDog(void* w){
@@ -125,7 +128,7 @@ WatchDog::run(){
last_stuck_action = "Unknown place";
break;
}//switch
- ndbout << "Ndb kernel is stuck in: " << last_stuck_action << endl;
+ g_eventLogger.warning("Ndb kernel is stuck in: %s", last_stuck_action);
if(alerts == 3){
shutdownSystem(last_stuck_action);
}
diff --git a/storage/ndb/src/mgmapi/mgmapi.cpp b/storage/ndb/src/mgmapi/mgmapi.cpp
index 28dbf573bdf..f13e5880e22 100644
--- a/storage/ndb/src/mgmapi/mgmapi.cpp
+++ b/storage/ndb/src/mgmapi/mgmapi.cpp
@@ -2233,43 +2233,6 @@ ndb_mgm_alloc_nodeid(NdbMgmHandle handle, unsigned int version, int nodetype,
return nodeid;
}
-/*****************************************************************************
- * Global Replication
- ******************************************************************************/
-extern "C"
-int
-ndb_mgm_rep_command(NdbMgmHandle handle, unsigned int request,
- unsigned int* replication_id,
- struct ndb_mgm_reply* /*reply*/)
-{
- SET_ERROR(handle, NDB_MGM_NO_ERROR, "Executing: ndb_mgm_rep_command");
- const ParserRow<ParserDummy> replication_reply[] = {
- MGM_CMD("global replication reply", NULL, ""),
- MGM_ARG("result", String, Mandatory, "Error message"),
- MGM_ARG("id", Int, Optional, "Id of global replication"),
- MGM_END()
- };
- CHECK_HANDLE(handle, -1);
- CHECK_CONNECTED(handle, -1);
-
- Properties args;
- args.put("request", request);
- const Properties *reply;
- reply = ndb_mgm_call(handle, replication_reply, "rep", &args);
- CHECK_REPLY(reply, -1);
-
- const char * result;
- reply->get("result", &result);
- reply->get("id", replication_id);
- if(strcmp(result,"Ok")!=0) {
- delete reply;
- return -1;
- }
-
- delete reply;
- return 0;
-}
-
extern "C"
int
ndb_mgm_set_int_parameter(NdbMgmHandle handle,
diff --git a/storage/ndb/src/mgmsrv/InitConfigFileParser.cpp b/storage/ndb/src/mgmsrv/InitConfigFileParser.cpp
index b159c90605e..b63d4d8bc17 100644
--- a/storage/ndb/src/mgmsrv/InitConfigFileParser.cpp
+++ b/storage/ndb/src/mgmsrv/InitConfigFileParser.cpp
@@ -837,7 +837,7 @@ InitConfigFileParser::parse_mycnf()
opt.arg_type = REQUIRED_ARG;
options.push_back(opt);
- opt.name = "api";
+ opt.name = "ndbapi";
opt.id = 256;
opt.value = (gptr*)malloc(sizeof(char*));
opt.var_type = GET_STR;
@@ -852,7 +852,6 @@ InitConfigFileParser::parse_mycnf()
mysqld = &options[idx+2];
api = &options[idx+3];
}
-
Context ctx(m_info, m_errstream);
const char *groups[]= { "cluster_config", 0 };
diff --git a/storage/ndb/src/mgmsrv/MgmtSrvr.cpp b/storage/ndb/src/mgmsrv/MgmtSrvr.cpp
index 5560259a957..38223502175 100644
--- a/storage/ndb/src/mgmsrv/MgmtSrvr.cpp
+++ b/storage/ndb/src/mgmsrv/MgmtSrvr.cpp
@@ -701,7 +701,7 @@ int MgmtSrvr::okToSendTo(NodeId nodeId, bool unCond)
return WRONG_PROCESS_TYPE;
// Check if we have contact with it
if(unCond){
- if(theFacade->theClusterMgr->getNodeInfo(nodeId).connected)
+ if(theFacade->theClusterMgr->getNodeInfo(nodeId).m_api_reg_conf)
return 0;
}
else if (theFacade->get_node_alive(nodeId) == true)
@@ -1577,32 +1577,85 @@ MgmtSrvr::status(int nodeId,
}
int
-MgmtSrvr::setEventReportingLevelImpl(int nodeId,
+MgmtSrvr::setEventReportingLevelImpl(int nodeId_arg,
const EventSubscribeReq& ll)
{
SignalSender ss(theFacade);
- ss.lock();
-
- SimpleSignal ssig;
- EventSubscribeReq * dst =
- CAST_PTR(EventSubscribeReq, ssig.getDataPtrSend());
- ssig.set(ss,TestOrd::TraceAPI, CMVMI, GSN_EVENT_SUBSCRIBE_REQ,
- EventSubscribeReq::SignalLength);
- *dst = ll;
-
- NodeBitmask nodes;
+ NdbNodeBitmask nodes;
+ int retries = 30;
nodes.clear();
- Uint32 max = (nodeId == 0) ? (nodeId = 1, MAX_NDB_NODES) : nodeId;
- for(; (Uint32) nodeId <= max; nodeId++)
+ while (1)
{
- if (nodeTypes[nodeId] != NODE_TYPE_DB)
- continue;
- if (okToSendTo(nodeId, true))
- continue;
- if (ss.sendSignal(nodeId, &ssig) == SEND_OK)
+ Uint32 nodeId, max;
+ ss.lock();
+ SimpleSignal ssig;
+ EventSubscribeReq * dst =
+ CAST_PTR(EventSubscribeReq, ssig.getDataPtrSend());
+ ssig.set(ss,TestOrd::TraceAPI, CMVMI, GSN_EVENT_SUBSCRIBE_REQ,
+ EventSubscribeReq::SignalLength);
+ *dst = ll;
+
+ if (nodeId_arg == 0)
{
- nodes.set(nodeId);
+ // all nodes
+ nodeId = 1;
+ max = MAX_NDB_NODES;
+ }
+ else
+ {
+ // only one node
+ max = nodeId = nodeId_arg;
+ }
+ // first make sure nodes are sendable
+ for(; nodeId <= max; nodeId++)
+ {
+ if (nodeTypes[nodeId] != NODE_TYPE_DB)
+ continue;
+ if (okToSendTo(nodeId, true))
+ {
+ if (theFacade->theClusterMgr->getNodeInfo(nodeId).connected == false)
+ {
+ // node not connected we can safely skip this one
+ continue;
+ }
+ // api_reg_conf not recevied yet, need to retry
+ break;
+ }
+ }
+ if (nodeId <= max)
+ {
+ if (--retries)
+ {
+ ss.unlock();
+ NdbSleep_MilliSleep(100);
+ continue;
+ }
+ return SEND_OR_RECEIVE_FAILED;
+ }
+
+ if (nodeId_arg == 0)
+ {
+ // all nodes
+ nodeId = 1;
+ max = MAX_NDB_NODES;
+ }
+ else
+ {
+ // only one node
+ max = nodeId = nodeId_arg;
}
+ // now send to all sendable nodes nodes
+ // note, lock is held, so states have not changed
+ for(; (Uint32) nodeId <= max; nodeId++)
+ {
+ if (nodeTypes[nodeId] != NODE_TYPE_DB)
+ continue;
+ if (theFacade->theClusterMgr->getNodeInfo(nodeId).connected == false)
+ continue; // node is not connected, skip
+ if (ss.sendSignal(nodeId, &ssig) == SEND_OK)
+ nodes.set(nodeId);
+ }
+ break;
}
if (nodes.isclear())
@@ -1613,6 +1666,7 @@ MgmtSrvr::setEventReportingLevelImpl(int nodeId,
int error = 0;
while (!nodes.isclear())
{
+ Uint32 nodeId;
SimpleSignal *signal = ss.waitFor();
int gsn = signal->readSignalNumber();
nodeId = refToNode(signal->header.theSendersBlockRef);
diff --git a/storage/ndb/src/mgmsrv/MgmtSrvr.hpp b/storage/ndb/src/mgmsrv/MgmtSrvr.hpp
index 66e2fde0d40..19804f735b4 100644
--- a/storage/ndb/src/mgmsrv/MgmtSrvr.hpp
+++ b/storage/ndb/src/mgmsrv/MgmtSrvr.hpp
@@ -594,7 +594,6 @@ private:
*/
enum WaitSignalType {
NO_WAIT, // We don't expect to receive any signal
- WAIT_SET_VAR, // Accept SET_VAR_CONF and SET_VAR_REF
WAIT_SUBSCRIBE_CONF // Accept event subscription confirmation
};
diff --git a/storage/ndb/src/mgmsrv/Services.cpp b/storage/ndb/src/mgmsrv/Services.cpp
index 1cde5242a1d..dc865c594c0 100644
--- a/storage/ndb/src/mgmsrv/Services.cpp
+++ b/storage/ndb/src/mgmsrv/Services.cpp
@@ -349,19 +349,6 @@ MgmApiSession::runSession()
switch(ctx.m_status) {
case Parser_t::UnknownCommand:
-#ifdef MGM_GET_CONFIG_BACKWARDS_COMPAT
- /* Backwards compatibility for old NDBs that still use
- * the old "GET CONFIG" command.
- */
- size_t i;
- for(i=0; i<strlen(ctx.m_currentToken); i++)
- ctx.m_currentToken[i] = toupper(ctx.m_currentToken[i]);
-
- if(strncmp("GET CONFIG ",
- ctx.m_currentToken,
- strlen("GET CONFIG ")) == 0)
- getConfig_old(ctx);
-#endif /* MGM_GET_CONFIG_BACKWARDS_COMPAT */
break;
default:
break;
@@ -382,32 +369,6 @@ MgmApiSession::runSession()
DBUG_VOID_RETURN;
}
-#ifdef MGM_GET_CONFIG_BACKWARDS_COMPAT
-void
-MgmApiSession::getConfig_old(Parser_t::Context &ctx) {
- Properties args;
-
- Uint32 version, node;
-
- if(sscanf(ctx.m_currentToken, "GET CONFIG %d %d",
- (int *)&version, (int *)&node) != 2) {
- m_output->println("Expected 2 arguments for GET CONFIG");
- return;
- }
-
- /* Put arguments in properties object so we can call the real function */
- args.put("version", version);
- args.put("node", node);
- getConfig_common(ctx, args, true);
-}
-#endif /* MGM_GET_CONFIG_BACKWARDS_COMPAT */
-
-void
-MgmApiSession::getConfig(Parser_t::Context &ctx,
- const class Properties &args) {
- getConfig_common(ctx, args);
-}
-
static Properties *
backward(const char * base, const Properties* reply){
Properties * ret = new Properties();
@@ -584,9 +545,9 @@ MgmApiSession::get_nodeid(Parser_t::Context &,
}
void
-MgmApiSession::getConfig_common(Parser_t::Context &,
- const class Properties &args,
- bool compat) {
+MgmApiSession::getConfig(Parser_t::Context &,
+ const class Properties &args)
+{
Uint32 version, node = 0;
args.get("version", &version);
@@ -600,47 +561,6 @@ MgmApiSession::getConfig_common(Parser_t::Context &,
return;
}
- if(version > 0 && version < makeVersion(3, 5, 0) && compat){
- Properties *reply = backward("", conf->m_oldConfig);
- reply->put("Version", version);
- reply->put("LocalNodeId", node);
-
- backward("", reply);
- //reply->print();
-
- const Uint32 size = reply->getPackedSize();
- Uint32 *buffer = new Uint32[size/4+1];
-
- reply->pack(buffer);
- delete reply;
-
- const int uurows = (size + 44)/45;
- char * uubuf = new char[uurows * 62+5];
-
- const int uusz = uuencode_mem(uubuf, (char *)buffer, size);
- delete[] buffer;
-
- m_output->println("GET CONFIG %d %d %d %d %d",
- 0, version, node, size, uusz);
-
- m_output->println("begin 664 Ndb_cfg.bin");
-
- /* XXX Need to write directly to the socket, because the uubuf is not
- * NUL-terminated. This could/should probably be done in a nicer way.
- */
- write_socket(m_socket, MAX_WRITE_TIMEOUT, uubuf, uusz);
- delete[] uubuf;
-
- m_output->println("end");
- m_output->println("");
- return;
- }
-
- if(compat){
- m_output->println("GET CONFIG %d %d %d %d %d",1, version, 0, 0, 0);
- return;
- }
-
if(node != 0){
bool compatible;
switch (m_mgmsrv.getNodeType(node)) {
@@ -856,8 +776,7 @@ MgmApiSession::setClusterLogLevel(Parser<MgmApiSession>::Context &,
DBUG_PRINT("enter",("node=%d, category=%d, level=%d", node, cat, level));
- /* XXX should use constants for this value */
- if(level > 15) {
+ if(level > NDB_MGM_MAX_LOGLEVEL) {
m_output->println(reply);
m_output->println("result: Invalid loglevel %d", level);
m_output->println("");
@@ -900,8 +819,7 @@ MgmApiSession::setLogLevel(Parser<MgmApiSession>::Context &,
args.get("category", &cat);
args.get("level", &level);
- /* XXX should use constants for this value */
- if(level > 15) {
+ if(level > NDB_MGM_MAX_LOGLEVEL) {
m_output->println("set loglevel reply");
m_output->println("result: Invalid loglevel", errorString.c_str());
m_output->println("");
@@ -1604,7 +1522,7 @@ MgmApiSession::listen_event(Parser<MgmApiSession>::Context & ctx,
}
int level = atoi(spec[1].c_str());
- if(level < 0 || level > 15){
+ if(level < 0 || level > NDB_MGM_MAX_LOGLEVEL){
msg.appfmt("Invalid level: >%s<", spec[1].c_str());
result = -1;
goto done;
diff --git a/storage/ndb/src/mgmsrv/Services.hpp b/storage/ndb/src/mgmsrv/Services.hpp
index f6af16d58ba..c112c66da36 100644
--- a/storage/ndb/src/mgmsrv/Services.hpp
+++ b/storage/ndb/src/mgmsrv/Services.hpp
@@ -24,9 +24,6 @@
#include "MgmtSrvr.hpp"
-/** Undefine this to remove backwards compatibility for "GET CONFIG". */
-#define MGM_GET_CONFIG_BACKWARDS_COMPAT
-
class MgmApiSession : public SocketServer::Session
{
static void stop_session_if_timed_out(SocketServer::Session *_s, void *data);
@@ -49,9 +46,6 @@ private:
Parser_t::Context *m_ctx;
Uint64 m_session_id;
- void getConfig_common(Parser_t::Context &ctx,
- const class Properties &args,
- bool compat = false);
const char *get_error_text(int err_no)
{ return m_mgmsrv.getErrorText(err_no, m_err_str, sizeof(m_err_str)); }
@@ -61,9 +55,6 @@ public:
void runSession();
void getConfig(Parser_t::Context &ctx, const class Properties &args);
-#ifdef MGM_GET_CONFIG_BACKWARDS_COMPAT
- void getConfig_old(Parser_t::Context &ctx);
-#endif /* MGM_GET_CONFIG_BACKWARDS_COMPAT */
void get_nodeid(Parser_t::Context &ctx, const class Properties &args);
void getVersion(Parser_t::Context &ctx, const class Properties &args);
diff --git a/storage/ndb/src/ndbapi/ClusterMgr.cpp b/storage/ndb/src/ndbapi/ClusterMgr.cpp
index b162b85d61e..2a794f69ecb 100644
--- a/storage/ndb/src/ndbapi/ClusterMgr.cpp
+++ b/storage/ndb/src/ndbapi/ClusterMgr.cpp
@@ -313,7 +313,7 @@ ClusterMgr::showState(NodeId nodeId){
ClusterMgr::Node::Node()
: m_state(NodeState::SL_NOTHING) {
compatible = nfCompleteRep = true;
- connected = defined = m_alive = false;
+ connected = defined = m_alive = m_api_reg_conf = false;
m_state.m_connected_nodes.clear();
}
@@ -385,6 +385,8 @@ ClusterMgr::execAPI_REGCONF(const Uint32 * theData){
node.m_info.m_version);
}
+ node.m_api_reg_conf = true;
+
node.m_state = apiRegConf->nodeState;
if (node.compatible && (node.m_state.startLevel == NodeState::SL_STARTED ||
node.m_state.startLevel == NodeState::SL_SINGLEUSER)){
@@ -501,6 +503,7 @@ ClusterMgr::reportDisconnected(NodeId nodeId){
noOfConnectedNodes--;
theNodes[nodeId].connected = false;
+ theNodes[nodeId].m_api_reg_conf = false;
theNodes[nodeId].m_state.m_connected_nodes.clear();
reportNodeFailed(nodeId, true);
diff --git a/storage/ndb/src/ndbapi/ClusterMgr.hpp b/storage/ndb/src/ndbapi/ClusterMgr.hpp
index bb20d447c0c..6e74620dd4f 100644
--- a/storage/ndb/src/ndbapi/ClusterMgr.hpp
+++ b/storage/ndb/src/ndbapi/ClusterMgr.hpp
@@ -70,6 +70,7 @@ public:
bool compatible; // Version is compatible
bool nfCompleteRep; // NF Complete Rep has arrived
bool m_alive; // Node is alive
+ bool m_api_reg_conf;// API_REGCONF has arrived
NodeInfo m_info;
NodeState m_state;
diff --git a/storage/ndb/src/ndbapi/Makefile.am b/storage/ndb/src/ndbapi/Makefile.am
index 90e61b5b188..8469110fddb 100644
--- a/storage/ndb/src/ndbapi/Makefile.am
+++ b/storage/ndb/src/ndbapi/Makefile.am
@@ -15,6 +15,10 @@
#SUBDIRS = signal-sender
+noinst_PROGRAMS = ndberror_check
+
+ndberror_check_SOURCES = ndberror_check.c
+
noinst_LTLIBRARIES = libndbapi.la
libndbapi_la_SOURCES = \
@@ -61,6 +65,11 @@ NDB_CXXFLAGS_RELEASE_LOC = -O2
include $(top_srcdir)/storage/ndb/config/common.mk.am
include $(top_srcdir)/storage/ndb/config/type_ndbapi.mk.am
+ndberror_check_LDFLAGS = \
+ $(top_builddir)/dbug/libdbug.a \
+ $(top_builddir)/mysys/libmysys.a \
+ $(top_builddir)/strings/libmystrings.a
+
# Don't update the files from bitkeeper
%::SCCS/s.%
diff --git a/storage/ndb/src/ndbapi/NdbOperationDefine.cpp b/storage/ndb/src/ndbapi/NdbOperationDefine.cpp
index 8958b6ec596..b5019cf7386 100644
--- a/storage/ndb/src/ndbapi/NdbOperationDefine.cpp
+++ b/storage/ndb/src/ndbapi/NdbOperationDefine.cpp
@@ -715,6 +715,22 @@ insertATTRINFO_error1:
}//NdbOperation::insertATTRINFOloop()
+NdbOperation::AbortOption
+NdbOperation::getAbortOption() const
+{
+ return (AbortOption)m_abortOption;
+}
-
-
+int
+NdbOperation::setAbortOption(AbortOption ao)
+{
+ switch(ao)
+ {
+ case AO_IgnoreError:
+ case AbortOnError:
+ m_abortOption= ao;
+ return 0;
+ default:
+ return -1;
+ }
+}
diff --git a/storage/ndb/src/ndbapi/ObjectMap.hpp b/storage/ndb/src/ndbapi/ObjectMap.hpp
index 13f9be66c24..9113a70798a 100644
--- a/storage/ndb/src/ndbapi/ObjectMap.hpp
+++ b/storage/ndb/src/ndbapi/ObjectMap.hpp
@@ -46,7 +46,7 @@ private:
} * m_map;
NdbMutex * m_mutex;
- void expand(Uint32 newSize);
+ int expand(Uint32 newSize);
};
inline
@@ -73,9 +73,8 @@ NdbObjectIdMap::map(void * object){
// lock();
- if(m_firstFree == InvalidId){
- expand(m_expandSize);
- }
+ if(m_firstFree == InvalidId && expand(m_expandSize))
+ return InvalidId;
Uint32 ff = m_firstFree;
m_firstFree = m_map[ff].m_next;
@@ -127,7 +126,7 @@ NdbObjectIdMap::getObject(Uint32 id){
return 0;
}
-inline void
+inline int
NdbObjectIdMap::expand(Uint32 incSize){
NdbMutex_Lock(m_mutex);
Uint32 newSize = m_size + incSize;
@@ -146,9 +145,11 @@ NdbObjectIdMap::expand(Uint32 incSize){
}
else
{
- ndbout_c("NdbObjectIdMap::expand unable to expand!!");
+ NdbMutex_Unlock(m_mutex);
+ return -1;
}
NdbMutex_Unlock(m_mutex);
+ return 0;
}
#endif
diff --git a/storage/ndb/src/ndbapi/SignalSender.hpp b/storage/ndb/src/ndbapi/SignalSender.hpp
index ec874e63c52..4cad759a334 100644
--- a/storage/ndb/src/ndbapi/SignalSender.hpp
+++ b/storage/ndb/src/ndbapi/SignalSender.hpp
@@ -32,7 +32,7 @@ public:
Uint32 theData[25];
LinearSectionPtr ptr[3];
- int readSignalNumber() {return header.theVerId_signalNumber; }
+ int readSignalNumber() const {return header.theVerId_signalNumber; }
Uint32 *getDataPtrSend() { return theData; }
const Uint32 *getDataPtr() const { return theData; }
diff --git a/storage/ndb/src/ndbapi/TransporterFacade.cpp b/storage/ndb/src/ndbapi/TransporterFacade.cpp
index 2402c979620..eabfc6bc371 100644
--- a/storage/ndb/src/ndbapi/TransporterFacade.cpp
+++ b/storage/ndb/src/ndbapi/TransporterFacade.cpp
@@ -1527,7 +1527,8 @@ SignalSender::sendSignal(Uint16 nodeId, const SimpleSignal * s){
signalLogger.flushSignalLog();
}
#endif
-
+ assert(getNodeInfo(nodeId).m_api_reg_conf == true ||
+ s->readSignalNumber() == GSN_API_REGREQ);
return theFacade->theTransporterRegistry->prepareSend(&s->header,
1, // JBB
&s->theData[0],
diff --git a/storage/ndb/src/ndbapi/ndberror.c b/storage/ndb/src/ndbapi/ndberror.c
index 24c79ce1e2c..8e70f5ee250 100644
--- a/storage/ndb/src/ndbapi/ndberror.c
+++ b/storage/ndb/src/ndbapi/ndberror.c
@@ -367,7 +367,7 @@ ErrorBundle ErrorCodes[] = {
{ 771, HA_WRONG_CREATE_OPTION, AE, "Given NODEGROUP doesn't exist in this cluster" },
{ 772, HA_WRONG_CREATE_OPTION, IE, "Given fragmentType doesn't exist" },
{ 749, HA_WRONG_CREATE_OPTION, IE, "Primary Table in wrong state" },
- { 763, HA_WRONG_CREATE_OPTION, SE, "Invalid undo buffer size" },
+ { 779, HA_WRONG_CREATE_OPTION, SE, "Invalid undo buffer size" },
{ 764, HA_WRONG_CREATE_OPTION, SE, "Invalid extent size" },
{ 765, DMEC, SE, "Out of filegroup records" },
{ 750, IE, SE, "Invalid file type" },
diff --git a/storage/ndb/src/ndbapi/ndberror_check.c b/storage/ndb/src/ndbapi/ndberror_check.c
new file mode 100644
index 00000000000..6986d99f3d4
--- /dev/null
+++ b/storage/ndb/src/ndbapi/ndberror_check.c
@@ -0,0 +1,38 @@
+/* Copyright (C) 2007 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#include <stdio.h>
+#include "ndberror.c"
+
+int main()
+{
+ int i, j, error = 0;
+
+ /* check for duplicate error codes */
+ for(i = 0; i < NbErrorCodes; i++)
+ {
+ for(j = i + 1; j < NbErrorCodes; j++)
+ {
+ if (ErrorCodes[i].code == ErrorCodes[j].code)
+ {
+ fprintf(stderr, "Duplicate error code %u\n", ErrorCodes[i].code);
+ error = 1;
+ }
+ }
+ }
+ if (error)
+ return -1;
+ return 0;
+}
diff --git a/storage/ndb/test/include/NdbRestarter.hpp b/storage/ndb/test/include/NdbRestarter.hpp
index 3f7783be6e0..916848adf45 100644
--- a/storage/ndb/test/include/NdbRestarter.hpp
+++ b/storage/ndb/test/include/NdbRestarter.hpp
@@ -61,6 +61,8 @@ public:
int dumpStateAllNodes(const int * _args, int _num_args);
int getMasterNodeId();
+ int getNextMasterNodeId(int nodeId);
+ int getNodeGroup(int nodeId);
int getRandomNodeSameNodeGroup(int nodeId, int randomNumber);
int getRandomNodeOtherNodeGroup(int nodeId, int randomNumber);
int getRandomNotMasterNodeId(int randomNumber);
diff --git a/storage/ndb/test/ndbapi/testBitfield.cpp b/storage/ndb/test/ndbapi/testBitfield.cpp
index e26f495f5a4..8ba8f3d92ef 100644
--- a/storage/ndb/test/ndbapi/testBitfield.cpp
+++ b/storage/ndb/test/ndbapi/testBitfield.cpp
@@ -8,6 +8,15 @@
static const char* _dbname = "TEST_DB";
static int g_loops = 7;
+
+NDB_STD_OPTS_VARS;
+
+static struct my_option my_long_options[] =
+{
+ NDB_STD_OPTS("ndb_desc"),
+ { 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}
+};
+
static void usage()
{
ndb_std_print_version();
@@ -36,9 +45,10 @@ main(int argc, char** argv){
load_defaults("my",load_default_groups,&argc,&argv);
int ho_error;
- argc--;
- argv++;
-
+ if ((ho_error=handle_options(&argc, &argv, my_long_options,
+ ndb_std_get_one_option)))
+ return NDBT_ProgramExit(NDBT_WRONGARGS);
+
Ndb_cluster_connection con(opt_connect_str);
if(con.connect(12, 5, 1))
{
diff --git a/storage/ndb/test/ndbapi/testDict.cpp b/storage/ndb/test/ndbapi/testDict.cpp
index f7de43aea20..f72b9dee80b 100644
--- a/storage/ndb/test/ndbapi/testDict.cpp
+++ b/storage/ndb/test/ndbapi/testDict.cpp
@@ -321,7 +321,11 @@ int runCreateAndDropAtRandom(NDBT_Context* ctx, NDBT_Step* step)
}
i++;
}
-
+
+ for (Uint32 i = 0; i<numTables; i++)
+ if (tabList[i])
+ pDic->dropTable(NDBT_Tables::getTable(i)->getName());
+
delete [] tabList;
return result;
}
diff --git a/storage/ndb/test/ndbapi/testNodeRestart.cpp b/storage/ndb/test/ndbapi/testNodeRestart.cpp
index 04e77f70c38..34cb356236c 100644
--- a/storage/ndb/test/ndbapi/testNodeRestart.cpp
+++ b/storage/ndb/test/ndbapi/testNodeRestart.cpp
@@ -1273,6 +1273,85 @@ int runBug25984(NDBT_Context* ctx, NDBT_Step* step){
return NDBT_OK;
}
+int
+runBug26457(NDBT_Context* ctx, NDBT_Step* step)
+{
+ NdbRestarter res;
+ if (res.getNumDbNodes() < 4)
+ return NDBT_OK;
+
+ int loops = ctx->getNumLoops();
+ while (loops --)
+ {
+retry:
+ int master = res.getMasterNodeId();
+ int next = res.getNextMasterNodeId(master);
+
+ ndbout_c("master: %d next: %d", master, next);
+
+ if (res.getNodeGroup(master) == res.getNodeGroup(next))
+ {
+ res.restartOneDbNode(next, false, false, true);
+ if (res.waitClusterStarted())
+ return NDBT_FAILED;
+ goto retry;
+ }
+
+ int val2[] = { DumpStateOrd::CmvmiSetRestartOnErrorInsert, 2 };
+
+ if (res.dumpStateOneNode(next, val2, 2))
+ return NDBT_FAILED;
+
+ if (res.insertErrorInNode(next, 7180))
+ return NDBT_FAILED;
+
+ res.restartOneDbNode(master, false, false, true);
+ if (res.waitClusterStarted())
+ return NDBT_FAILED;
+ }
+
+ return NDBT_OK;
+}
+
+int
+runBug26481(NDBT_Context* ctx, NDBT_Step* step)
+{
+
+ int result = NDBT_OK;
+ int loops = ctx->getNumLoops();
+ int records = ctx->getNumRecords();
+ NdbRestarter res;
+
+ int node = res.getRandomNotMasterNodeId(rand());
+ ndbout_c("node: %d", node);
+ if (res.restartOneDbNode(node, true, true, true))
+ return NDBT_FAILED;
+
+ if (res.waitNodesNoStart(&node, 1))
+ return NDBT_FAILED;
+
+ int val2[] = { DumpStateOrd::CmvmiSetRestartOnErrorInsert, 1 };
+ if (res.dumpStateOneNode(node, val2, 2))
+ return NDBT_FAILED;
+
+ if (res.insertErrorInNode(node, 7018))
+ return NDBT_FAILED;
+
+ if (res.startNodes(&node, 1))
+ return NDBT_FAILED;
+
+ res.waitNodesStartPhase(&node, 1, 3);
+
+ if (res.waitNodesNoStart(&node, 1))
+ return NDBT_FAILED;
+
+ res.startNodes(&node, 1);
+
+ if (res.waitClusterStarted())
+ return NDBT_FAILED;
+
+ return NDBT_OK;
+}
NDBT_TESTSUITE(testNodeRestart);
TESTCASE("NoLoad",
@@ -1612,6 +1691,12 @@ TESTCASE("Bug25554", ""){
TESTCASE("Bug25984", ""){
INITIALIZER(runBug25984);
}
+TESTCASE("Bug26457", ""){
+ INITIALIZER(runBug26457);
+}
+TESTCASE("Bug26481", ""){
+ INITIALIZER(runBug26481);
+}
NDBT_TESTSUITE_END(testNodeRestart);
int main(int argc, const char** argv){
diff --git a/storage/ndb/test/run-test/Makefile.am b/storage/ndb/test/run-test/Makefile.am
index b5cb69d266e..d6c6536cfc8 100644
--- a/storage/ndb/test/run-test/Makefile.am
+++ b/storage/ndb/test/run-test/Makefile.am
@@ -18,20 +18,18 @@ testdir=$(prefix)/mysql-test/ndb
include $(top_srcdir)/storage/ndb/config/common.mk.am
include $(top_srcdir)/storage/ndb/config/type_util.mk.am
include $(top_srcdir)/storage/ndb/config/type_mgmapiclient.mk.am
+include $(top_srcdir)/storage/ndb/config/type_ndbapitools.mk.am
test_PROGRAMS = atrt
test_DATA=daily-basic-tests.txt daily-devel-tests.txt 16node-tests.txt \
- conf-daily-basic-ndb08.txt \
- conf-daily-devel-ndb08.txt \
- conf-daily-sql-ndb08.txt \
- conf-ndbmaster.txt \
- conf-shark.txt \
- conf-dl145a.txt
+ conf-ndbmaster.cnf \
+ conf-dl145a.cnf test-tests.txt
test_SCRIPTS=atrt-analyze-result.sh atrt-gather-result.sh atrt-setup.sh \
- atrt-clear-result.sh make-config.sh make-index.sh make-html-reports.sh
+ atrt-clear-result.sh autotest-run.sh
+
+atrt_SOURCES = main.cpp setup.cpp files.cpp
-atrt_SOURCES = main.cpp run-test.hpp
INCLUDES_LOC = -I$(top_srcdir)/storage/ndb/test/include
LDADD_LOC = $(top_builddir)/storage/ndb/test/src/libNDBT.a \
$(top_builddir)/storage/ndb/src/libndbclient.la \
@@ -39,6 +37,14 @@ LDADD_LOC = $(top_builddir)/storage/ndb/test/src/libNDBT.a \
$(top_builddir)/mysys/libmysys.a \
$(top_builddir)/strings/libmystrings.a @NDB_SCI_LIBS@
+atrt_CXXFLAGS = -I$(top_srcdir)/ndb/src/mgmapi \
+ -I$(top_srcdir)/ndb/src/mgmsrv \
+ -I$(top_srcdir)/ndb/include/mgmcommon \
+ -DMYSQLCLUSTERDIR="\"\"" \
+ -DDEFAULT_PREFIX="\"$(prefix)\""
+
+atrt_LDFLAGS = -static @ndb_bin_am_ldflags@
+
wrappersdir=$(prefix)/bin
wrappers_SCRIPTS=atrt-testBackup atrt-mysql-test-run
diff --git a/storage/ndb/test/run-test/atrt-gather-result.sh b/storage/ndb/test/run-test/atrt-gather-result.sh
index 93d4ae428d0..f2473578b41 100755
--- a/storage/ndb/test/run-test/atrt-gather-result.sh
+++ b/storage/ndb/test/run-test/atrt-gather-result.sh
@@ -8,7 +8,7 @@ rm -rf *
while [ $# -gt 0 ]
do
- rsync -a "$1" .
+ rsync -a --exclude='BACKUP' --exclude='ndb_*_fs' "$1" .
shift
done
diff --git a/storage/ndb/test/run-test/atrt.hpp b/storage/ndb/test/run-test/atrt.hpp
new file mode 100644
index 00000000000..14d2dccd245
--- /dev/null
+++ b/storage/ndb/test/run-test/atrt.hpp
@@ -0,0 +1,161 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifndef atrt_config_hpp
+#define atrt_config_hpp
+
+#include <ndb_global.h>
+#include <Vector.hpp>
+#include <BaseString.hpp>
+#include <Logger.hpp>
+#include <mgmapi.h>
+#include <CpcClient.hpp>
+#include <Properties.hpp>
+
+enum ErrorCodes
+{
+ ERR_OK = 0,
+ ERR_NDB_FAILED = 101,
+ ERR_SERVERS_FAILED = 102,
+ ERR_MAX_TIME_ELAPSED = 103
+};
+
+struct atrt_host
+{
+ size_t m_index;
+ BaseString m_user;
+ BaseString m_basedir;
+ BaseString m_hostname;
+ SimpleCpcClient * m_cpcd;
+ Vector<struct atrt_process*> m_processes;
+};
+
+struct atrt_options
+{
+ enum Feature {
+ AO_REPLICATION = 1,
+ AO_NDBCLUSTER = 2
+ };
+
+ int m_features;
+ Properties m_loaded;
+ Properties m_generated;
+};
+
+struct atrt_process
+{
+ size_t m_index;
+ struct atrt_host * m_host;
+ struct atrt_cluster * m_cluster;
+
+ enum Type {
+ AP_ALL = 255
+ ,AP_NDBD = 1
+ ,AP_NDB_API = 2
+ ,AP_NDB_MGMD = 4
+ ,AP_MYSQLD = 16
+ ,AP_CLIENT = 32
+ ,AP_CLUSTER = 256 // Used for options parsing for "cluster" options
+ } m_type;
+
+ SimpleCpcClient::Process m_proc;
+
+ NdbMgmHandle m_ndb_mgm_handle; // if type == ndb_mgm
+ atrt_process * m_mysqld; // if type == client
+ atrt_process * m_rep_src; // if type == mysqld
+ Vector<atrt_process*> m_rep_dst; // if type == mysqld
+
+ atrt_options m_options;
+};
+
+struct atrt_cluster
+{
+ BaseString m_name;
+ BaseString m_dir;
+ Vector<atrt_process*> m_processes;
+ atrt_options m_options;
+};
+
+struct atrt_config
+{
+ bool m_generated;
+ BaseString m_key;
+ BaseString m_replication;
+ Vector<atrt_host*> m_hosts;
+ Vector<atrt_cluster*> m_clusters;
+ Vector<atrt_process*> m_processes;
+};
+
+struct atrt_testcase
+{
+ bool m_report;
+ bool m_run_all;
+ time_t m_max_time;
+ BaseString m_command;
+ BaseString m_args;
+ BaseString m_name;
+};
+
+extern Logger g_logger;
+
+void require(bool x);
+bool parse_args(int argc, char** argv);
+bool setup_config(atrt_config&);
+bool configure(atrt_config&, int setup);
+bool setup_directories(atrt_config&, int setup);
+bool setup_files(atrt_config&, int setup, int sshx);
+
+bool deploy(atrt_config&);
+bool sshx(atrt_config&, unsigned procmask);
+bool start(atrt_config&, unsigned procmask);
+
+bool remove_dir(const char *, bool incl = true);
+bool connect_hosts(atrt_config&);
+bool connect_ndb_mgm(atrt_config&);
+bool wait_ndb(atrt_config&, int ndb_mgm_node_status);
+bool start_processes(atrt_config&, int);
+bool stop_processes(atrt_config&, int);
+bool update_status(atrt_config&, int);
+int is_running(atrt_config&, int);
+bool gather_result(atrt_config&, int * result);
+
+bool read_test_case(FILE *, atrt_testcase&, int& line);
+bool setup_test_case(atrt_config&, const atrt_testcase&);
+
+bool setup_hosts(atrt_config&);
+
+/**
+ * Global variables...
+ */
+extern Logger g_logger;
+extern atrt_config g_config;
+
+extern const char * g_cwd;
+extern const char * g_my_cnf;
+extern const char * g_user;
+extern const char * g_basedir;
+extern const char * g_prefix;
+extern int g_baseport;
+extern int g_fqpn;
+extern int g_default_ports;
+
+extern const char * g_clusters;
+
+extern const char *save_file;
+extern const char *save_group_suffix;
+extern char *save_extra_file;
+
+#endif
diff --git a/storage/ndb/test/run-test/autotest-boot.sh b/storage/ndb/test/run-test/autotest-boot.sh
new file mode 100644
index 00000000000..31f611460ec
--- /dev/null
+++ b/storage/ndb/test/run-test/autotest-boot.sh
@@ -0,0 +1,165 @@
+#!/bin/sh
+#############################################################
+# This script created by Jonas does the following #
+# Cleans up clones and pevious builds, pulls new clones, #
+# builds, deploys, configures the tests and launches ATRT #
+#############################################################
+
+###############
+#Script setup #
+##############
+
+save_args=$*
+VERSION="autotest-boot.sh version 1.00"
+
+DATE=`date '+%Y-%m-%d'`
+HOST=`hostname -s`
+export DATE HOST
+
+set -e
+
+echo "`date` starting: $*"
+
+verbose=0
+do_clone=yes
+build=yes
+
+conf=
+LOCK=$HOME/.autotest-lock
+
+############################
+# Read command line entries#
+############################
+
+while [ "$1" ]
+do
+ case "$1" in
+ --no-clone) do_clone="";;
+ --no-build) build="";;
+ --verbose) verbose=`expr $verbose + 1`;;
+ --clone=*) clone=`echo $1 | sed s/--clone=//`;;
+ --version) echo $VERSION; exit;;
+ --conf=*) conf=`echo $1 | sed s/--conf=//`;;
+ *) RUN=$*;;
+ esac
+ shift
+done
+
+#################################
+#Make sure the configfile exists#
+#if it does not exit. if it does#
+# (.) load it #
+#################################
+if [ -z "$conf" ]
+then
+ conf=`pwd`/autotest.conf
+fi
+
+if [ -f $conf ]
+then
+ . $conf
+else
+ echo "Can't find config file: $conf"
+ exit
+fi
+
+###############################
+# Validate that all interesting
+# variables where set in conf
+###############################
+vars="src_clone_base install_dir build_dir"
+for i in $vars
+do
+ t=`echo echo \\$$i`
+ if [ -z "`eval $t`" ]
+ then
+ echo "Invalid config: $conf, variable $i is not set"
+ exit
+ fi
+done
+
+###############################
+#Print out the enviroment vars#
+###############################
+
+if [ $verbose -gt 0 ]
+then
+ env
+fi
+
+####################################
+# Setup the lock file name and path#
+# Setup the clone source location #
+####################################
+
+src_clone=$src_clone_base-$clone
+
+#######################################
+# Check to see if the lock file exists#
+# If it does exit. #
+#######################################
+
+if [ -f $LOCK ]
+then
+ echo "Lock file exists: $LOCK"
+ exit 1
+fi
+
+#######################################
+# If the lock file does not exist then#
+# create it with date and run info #
+#######################################
+
+echo "$DATE $RUN" > $LOCK
+
+#############################
+#If any errors here down, we#
+# trap them, and remove the #
+# Lock file before exit #
+#############################
+if [ `uname -s` != "SunOS" ]
+then
+ trap "rm -f $LOCK" ERR
+fi
+
+# You can add more to this path#
+################################
+
+dst_place=${build_dir}/clone-mysql-$clone-$DATE.$$
+
+#########################################
+# Delete source and pull down the latest#
+#########################################
+
+if [ "$do_clone" ]
+then
+ rm -rf $dst_place
+ bk clone $src_clone $dst_place
+fi
+
+##########################################
+# Build the source, make installs, and #
+# create the database to be rsynced #
+##########################################
+
+if [ "$build" ]
+then
+ cd $dst_place
+ rm -rf $install_dir
+ BUILD/compile-ndb-autotest --prefix=$install_dir
+ make install
+fi
+
+
+################################
+# Start run script #
+################################
+
+script=$install_dir/mysql-test/ndb/autotest-run.sh
+$script $save_args --conf=$conf --install-dir=$install_dir --suite=$RUN --nolock
+
+if [ "$build" ]
+then
+ rm -rf $dst_place
+fi
+rm -f $LOCK
diff --git a/storage/ndb/test/run-test/autotest-run.sh b/storage/ndb/test/run-test/autotest-run.sh
new file mode 100644
index 00000000000..34c3fe53949
--- /dev/null
+++ b/storage/ndb/test/run-test/autotest-run.sh
@@ -0,0 +1,270 @@
+#!/bin/sh
+#############################################################
+# This script created by Jonas does the following #
+# Cleans up clones and pevious builds, pulls new clones, #
+# builds, deploys, configures the tests and launches ATRT #
+#############################################################
+
+###############
+#Script setup #
+##############
+
+save_args=$*
+VERSION="autotest-run.sh version 1.00"
+
+DATE=`date '+%Y-%m-%d'`
+HOST=`hostname -s`
+export DATE HOST
+
+set -e
+ulimit -Sc unlimited
+
+echo "`date` starting: $*"
+
+RSYNC_RSH=ssh
+export RSYNC_RSH
+
+verbose=0
+report=yes
+nolock=
+RUN="daily-basic"
+conf=autotest.conf
+LOCK=$HOME/.autotest-lock
+
+############################
+# Read command line entries#
+############################
+
+while [ "$1" ]
+do
+ case "$1" in
+ --verbose) verbose=`expr $verbose + 1`;;
+ --conf=*) conf=`echo $1 | sed s/--conf=//`;;
+ --version) echo $VERSION; exit;;
+ --suite=*) RUN=`echo $1 | sed s/--suite=//`;;
+ --install-dir=*) install_dir=`echo $1 | sed s/--install-dir=//`;;
+ --clone=*) clone=`echo $1 | sed s/--clone=//`;;
+ --nolock) nolock=true;;
+ esac
+ shift
+done
+
+#################################
+#Make sure the configfile exists#
+#if it does not exit. if it does#
+# (.) load it #
+#################################
+
+install_dir_save=$install_dir
+if [ -f $conf ]
+then
+ . $conf
+else
+ echo "Can't find config file: $conf"
+ exit
+fi
+install_dir=$install_dir_save
+
+###############################
+# Validate that all interesting
+# variables where set in conf
+###############################
+vars="target base_dir install_dir hosts"
+if [ "$report" ]
+then
+ vars="$vars result_host result_path"
+fi
+for i in $vars
+do
+ t=`echo echo \\$$i`
+ if [ -z "`eval $t`" ]
+ then
+ echo "Invalid config: $conf, variable $i is not set"
+ exit
+ fi
+done
+
+###############################
+#Print out the enviroment vars#
+###############################
+
+if [ $verbose -gt 0 ]
+then
+ env
+fi
+
+#######################################
+# Check to see if the lock file exists#
+# If it does exit. #
+#######################################
+
+if [ -z "$nolock" ]
+then
+ if [ -f $LOCK ]
+ then
+ echo "Lock file exists: $LOCK"
+ exit 1
+ fi
+ echo "$DATE $RUN" > $LOCK
+fi
+
+#############################
+#If any errors here down, we#
+# trap them, and remove the #
+# Lock file before exit #
+#############################
+if [ `uname -s` != "SunOS" ]
+then
+ trap "rm -f $LOCK" ERR
+fi
+
+
+###############################################
+# Check that all interesting files are present#
+###############################################
+
+test_dir=$install_dir/mysql-test/ndb
+atrt=$test_dir/atrt
+test_file=$test_dir/$RUN-tests.txt
+
+if [ ! -f "$test_file" ]
+then
+ echo "Cant find testfile: $test_file"
+ exit 1
+fi
+
+if [ ! -x "$atrt" ]
+then
+ echo "Cant find atrt binary at $atrt"
+ exit 1
+fi
+
+############################
+# check ndb_cpcc fail hosts#
+############################
+failed=`ndb_cpcc $hosts | awk '{ if($1=="Failed"){ print;}}'`
+if [ "$failed" ]
+then
+ echo "Cant contact cpcd on $failed, exiting"
+ exit 1
+fi
+
+#############################
+# Function for replacing the#
+# choose host with real host#
+# names. Note $$ = PID #
+#############################
+choose(){
+ SRC=$1
+ TMP1=/tmp/choose.$$
+ TMP2=/tmp/choose.$$.$$
+ shift
+
+ cp $SRC $TMP1
+ i=1
+ while [ $# -gt 0 ]
+ do
+ sed -e s,"CHOOSE_host$i",$1,g < $TMP1 > $TMP2
+ mv $TMP2 $TMP1
+ shift
+ i=`expr $i + 1`
+ done
+ cat $TMP1
+ rm -f $TMP1
+}
+
+choose_conf(){
+ if [ -f $test_dir/conf-$1-$HOST.cnf ]
+ then
+ echo "$test_dir/conf-$1-$HOST.cnf"
+ elif [ -f $test_dir/conf-$1.cnf ]
+ then
+ echo "$test_dir/conf-$1.cnf"
+ elif [ -f $test_dir/conf-$HOST.cnf ]
+ then
+ echo "$test_dir/conf-$HOST.cnf"
+ else
+ echo "Unable to find conf file looked for" 1>&2
+ echo "$test_dir/conf-$1-$HOST.cnf and" 1>&2
+ echo "$test_dir/conf-$HOST.cnf" 1>&2
+ echo "$test_dir/conf-$1.cnf" 1>&2
+ exit
+ fi
+}
+
+#########################################
+# Count how many computers we have ready#
+#########################################
+
+count_hosts(){
+ cnt=`grep "CHOOSE_host" $1 | awk '{for(i=1; i<=NF;i++) \
+ if(index($i, "CHOOSE_host") > 0) print $i;}' | sort | uniq | wc -l`
+ echo $cnt
+}
+
+conf=`choose_conf $RUN`
+count=`count_hosts $conf`
+avail=`echo $hosts | wc -w`
+if [ $count -gt $avail ]
+ then
+ echo "Not enough hosts"
+ echo "Needs: $count available: $avail ($avail_hosts)"
+ exit 1
+fi
+
+###
+# Make directories needed
+
+p=`pwd`
+run_dir=$install_dir/run-$RUN-mysql-$clone-$target
+res_dir=$base_dir/result-$RUN-mysql-$clone-$target/$DATE
+tar_dir=$base_dir/saved-results
+
+mkdir -p $run_dir $res_dir $tar_dir
+rm -rf $res_dir/* $run_dir/*
+
+
+###
+#
+# Do sed substitiutions
+#
+cd $run_dir
+choose $conf $hosts > d.tmp.$$
+sed -e s,CHOOSE_dir,"$run_dir/run",g < d.tmp.$$ > my.cnf
+
+# Setup configuration
+$atrt Cdq my.cnf
+
+# Start...
+$atrt --report-file=report.txt --log-file=log.txt --testcase-file=$test_dir/$RUN-tests.txt my.cnf
+
+# Make tar-ball
+[ -f log.txt ] && mv log.txt $res_dir
+[ -f report.txt ] && mv report.txt $res_dir
+[ "`find . -name 'result*'`" ] && mv result* $res_dir
+cd $res_dir
+
+echo "date=$DATE" > info.txt
+echo "suite=$RUN" >> info.txt
+echo "clone=mysql-$clone" >> info.txt
+echo "arch=$target" >> info.txt
+find . | xargs chmod ugo+r
+
+cd ..
+p2=`pwd`
+cd ..
+tarfile=res.$RUN.$clone.$target.$DATE.$HOST.$$.tgz
+tar cfz $tar_dir/$tarfile `basename $p2`/$DATE
+
+if [ "$report" ]
+then
+ scp $tar_dir/$tarfile $result_host:$result_path/
+fi
+
+cd $p
+rm -rf $res_dir $run_dir
+
+if [ -z "$nolock" ]
+then
+ rm -f $LOCK
+fi
diff --git a/storage/ndb/test/run-test/conf-daily-basic-ndb08.txt b/storage/ndb/test/run-test/conf-daily-basic-ndb08.txt
deleted file mode 100644
index bcd809593f3..00000000000
--- a/storage/ndb/test/run-test/conf-daily-basic-ndb08.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-baseport: 14000
-basedir: /space/autotest
-mgm: CHOOSE_host1
-ndb: CHOOSE_host2 CHOOSE_host3 CHOOSE_host2 CHOOSE_host3
-api: CHOOSE_host1 CHOOSE_host1 CHOOSE_host1
--- cluster config
-[DB DEFAULT]
-NoOfReplicas: 2
-IndexMemory: 100M
-DataMemory: 300M
-BackupMemory: 64M
-MaxNoOfConcurrentScans: 100
-DataDir: .
-FileSystemPath: /space/autotest/run
-
-[MGM DEFAULT]
-PortNumber: 14000
-ArbitrationRank: 1
-DataDir: .
diff --git a/storage/ndb/test/run-test/conf-daily-devel-ndb08.txt b/storage/ndb/test/run-test/conf-daily-devel-ndb08.txt
deleted file mode 100644
index 8b340e6a39d..00000000000
--- a/storage/ndb/test/run-test/conf-daily-devel-ndb08.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-baseport: 16000
-basedir: /space/autotest
-mgm: CHOOSE_host1
-ndb: CHOOSE_host2 CHOOSE_host3 CHOOSE_host2 CHOOSE_host3
-api: CHOOSE_host1 CHOOSE_host1 CHOOSE_host1
--- cluster config
-[DB DEFAULT]
-NoOfReplicas: 2
-IndexMemory: 100M
-DataMemory: 300M
-BackupMemory: 64M
-MaxNoOfConcurrentScans: 100
-DataDir: .
-FileSystemPath: /space/autotest/run
-
-[MGM DEFAULT]
-PortNumber: 16000
-ArbitrationRank: 1
-DataDir: .
diff --git a/storage/ndb/test/run-test/conf-daily-devel-ndbmaster.txt b/storage/ndb/test/run-test/conf-daily-devel-ndbmaster.txt
deleted file mode 100644
index 45e6e25f030..00000000000
--- a/storage/ndb/test/run-test/conf-daily-devel-ndbmaster.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-baseport: 16000
-basedir: CHOOSE_dir
-mgm: CHOOSE_host1
-ndb: CHOOSE_host2 CHOOSE_host3 CHOOSE_host2 CHOOSE_host3
-api: CHOOSE_host1 CHOOSE_host1 CHOOSE_host1
--- cluster config
-[DB DEFAULT]
-NoOfReplicas: 2
-IndexMemory: 100M
-DataMemory: 300M
-BackupMemory: 64M
-MaxNoOfConcurrentScans: 100
-DataDir: .
-FileSystemPath: CHOOSE_dir/run
-
-[MGM DEFAULT]
-PortNumber: 16000
-ArbitrationRank: 1
-DataDir: .
-
-[TCP DEFAULT]
-SendBufferMemory: 2M
diff --git a/storage/ndb/test/run-test/conf-daily-sql-ndb08.txt b/storage/ndb/test/run-test/conf-daily-sql-ndb08.txt
deleted file mode 100644
index 0d6a99f8d48..00000000000
--- a/storage/ndb/test/run-test/conf-daily-sql-ndb08.txt
+++ /dev/null
@@ -1,20 +0,0 @@
-baseport: 16000
-basedir: /space/autotest
-mgm: CHOOSE_host1
-ndb: CHOOSE_host2 CHOOSE_host3
-mysqld: CHOOSE_host1 CHOOSE_host4
-mysql: CHOOSE_host1 CHOOSE_host1 CHOOSE_host1 CHOOSE_host1 CHOOSE_host1 CHOOSE_host1 CHOOSE_host4 CHOOSE_host4 CHOOSE_host4 CHOOSE_host4 CHOOSE_host4 CHOOSE_host4
--- cluster config
-[DB DEFAULT]
-NoOfReplicas: 2
-IndexMemory: 100M
-DataMemory: 300M
-BackupMemory: 64M
-MaxNoOfConcurrentScans: 100
-DataDir: .
-FileSystemPath: /space/autotest/run
-
-[MGM DEFAULT]
-PortNumber: 16000
-ArbitrationRank: 1
-DataDir: .
diff --git a/storage/ndb/test/run-test/conf-daily-sql-ndbmaster.txt b/storage/ndb/test/run-test/conf-daily-sql-ndbmaster.txt
deleted file mode 100644
index 0d6a99f8d48..00000000000
--- a/storage/ndb/test/run-test/conf-daily-sql-ndbmaster.txt
+++ /dev/null
@@ -1,20 +0,0 @@
-baseport: 16000
-basedir: /space/autotest
-mgm: CHOOSE_host1
-ndb: CHOOSE_host2 CHOOSE_host3
-mysqld: CHOOSE_host1 CHOOSE_host4
-mysql: CHOOSE_host1 CHOOSE_host1 CHOOSE_host1 CHOOSE_host1 CHOOSE_host1 CHOOSE_host1 CHOOSE_host4 CHOOSE_host4 CHOOSE_host4 CHOOSE_host4 CHOOSE_host4 CHOOSE_host4
--- cluster config
-[DB DEFAULT]
-NoOfReplicas: 2
-IndexMemory: 100M
-DataMemory: 300M
-BackupMemory: 64M
-MaxNoOfConcurrentScans: 100
-DataDir: .
-FileSystemPath: /space/autotest/run
-
-[MGM DEFAULT]
-PortNumber: 16000
-ArbitrationRank: 1
-DataDir: .
diff --git a/storage/ndb/test/run-test/conf-dl145a.cnf b/storage/ndb/test/run-test/conf-dl145a.cnf
new file mode 100644
index 00000000000..ea344f1a62a
--- /dev/null
+++ b/storage/ndb/test/run-test/conf-dl145a.cnf
@@ -0,0 +1,23 @@
+[atrt]
+basedir = CHOOSE_dir
+baseport = 14000
+clusters = .2node
+
+[ndb_mgmd]
+
+[mysqld]
+skip-innodb
+skip-bdb
+
+[cluster_config.2node]
+ndb_mgmd = CHOOSE_host1
+ndbd = CHOOSE_host2,CHOOSE_host3
+ndbapi= CHOOSE_host1,CHOOSE_host1,CHOOSE_host1
+
+NoOfReplicas = 2
+IndexMemory = 100M
+DataMemory = 300M
+BackupMemory = 64M
+MaxNoOfConcurrentScans = 100
+MaxNoOfSavedMessages= 1000
+SendBufferMemory = 2M
diff --git a/storage/ndb/test/run-test/conf-dl145a.txt b/storage/ndb/test/run-test/conf-dl145a.txt
deleted file mode 100644
index d0a240f09d1..00000000000
--- a/storage/ndb/test/run-test/conf-dl145a.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-baseport: 14000
-basedir: /home/ndbdev/autotest/run
-mgm: CHOOSE_host1
-ndb: CHOOSE_host2 CHOOSE_host3
-api: CHOOSE_host1 CHOOSE_host1 CHOOSE_host1
--- cluster config
-[DB DEFAULT]
-NoOfReplicas: 2
-IndexMemory: 100M
-DataMemory: 300M
-BackupMemory: 64M
-MaxNoOfConcurrentScans: 100
-DataDir: .
-FileSystemPath: /home/ndbdev/autotest/run
-
-[MGM DEFAULT]
-PortNumber: 14000
-ArbitrationRank: 1
-DataDir: .
-
-[TCP DEFAULT]
-SendBufferMemory: 2M
diff --git a/storage/ndb/test/run-test/conf-ndbmaster.cnf b/storage/ndb/test/run-test/conf-ndbmaster.cnf
new file mode 100644
index 00000000000..417e2988d0d
--- /dev/null
+++ b/storage/ndb/test/run-test/conf-ndbmaster.cnf
@@ -0,0 +1,23 @@
+[atrt]
+basedir = CHOOSE_dir
+baseport = 14000
+clusters = .4node
+
+[ndb_mgmd]
+
+[mysqld]
+skip-innodb
+skip-bdb
+
+[cluster_config.4node]
+ndb_mgmd = CHOOSE_host1
+ndbd = CHOOSE_host2,CHOOSE_host3,CHOOSE_host2,CHOOSE_host3
+ndbapi= CHOOSE_host1,CHOOSE_host1,CHOOSE_host1
+
+NoOfReplicas = 2
+IndexMemory = 100M
+DataMemory = 300M
+BackupMemory = 64M
+MaxNoOfConcurrentScans = 100
+MaxNoOfSavedMessages= 1000
+SendBufferMemory = 2M
diff --git a/storage/ndb/test/run-test/conf-ndbmaster.txt b/storage/ndb/test/run-test/conf-ndbmaster.txt
deleted file mode 100644
index 9f50432f5e3..00000000000
--- a/storage/ndb/test/run-test/conf-ndbmaster.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-baseport: 14000
-basedir: CHOOSE_dir
-mgm: CHOOSE_host1
-ndb: CHOOSE_host2 CHOOSE_host3 CHOOSE_host2 CHOOSE_host3
-api: CHOOSE_host1 CHOOSE_host1 CHOOSE_host1
--- cluster config
-[DB DEFAULT]
-NoOfReplicas: 2
-IndexMemory: 100M
-DataMemory: 300M
-BackupMemory: 64M
-MaxNoOfConcurrentScans: 100
-DataDir: .
-FileSystemPath: CHOOSE_dir/run
-
-[MGM DEFAULT]
-PortNumber: 14000
-ArbitrationRank: 1
-DataDir: .
-
-[TCP DEFAULT]
-SendBufferMemory: 2M
diff --git a/storage/ndb/test/run-test/conf-repl.cnf b/storage/ndb/test/run-test/conf-repl.cnf
new file mode 100644
index 00000000000..57eb2ee413e
--- /dev/null
+++ b/storage/ndb/test/run-test/conf-repl.cnf
@@ -0,0 +1,28 @@
+[atrt]
+basedir=CHOOSE_dir
+baseport=15000
+clusters= .master,.slave
+replicate= 1.master:1.slave
+
+[ndb_mgmd]
+
+[mysqld]
+skip-innodb
+skip-bdb
+
+[cluster_config]
+MaxNoOfSavedMessages= 1000
+DataMemory = 100M
+
+[cluster_config.master]
+NoOfReplicas = 2
+ndb_mgmd = CHOOSE_host1
+ndbd = CHOOSE_host2,CHOOSE_host3
+mysqld = CHOOSE_host1
+ndbapi= CHOOSE_host1
+
+[cluster_config.slave]
+NoOfReplicas = 1
+ndb_mgmd = CHOOSE_host4
+ndbd = CHOOSE_host4
+mysqld = CHOOSE_host4
diff --git a/storage/ndb/test/run-test/conf-shark.txt b/storage/ndb/test/run-test/conf-shark.txt
deleted file mode 100644
index d66d0280d8a..00000000000
--- a/storage/ndb/test/run-test/conf-shark.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-baseport: 14000
-basedir: /space/autotest
-mgm: CHOOSE_host1
-ndb: CHOOSE_host1 CHOOSE_host1
-api: CHOOSE_host1 CHOOSE_host1 CHOOSE_host1
--- cluster config
-[DB DEFAULT]
-NoOfReplicas: 2
-IndexMemory: 100M
-DataMemory: 300M
-BackupMemory: 64M
-MaxNoOfConcurrentScans: 100
-DataDir: .
-FileSystemPath: /space/autotest/run
-
-[MGM DEFAULT]
-PortNumber: 14000
-ArbitrationRank: 1
-DataDir: .
-
-[TCP DEFAULT]
-SendBufferMemory: 2M
diff --git a/storage/ndb/test/run-test/daily-basic-tests.txt b/storage/ndb/test/run-test/daily-basic-tests.txt
index c972d432375..4022dffa258 100644
--- a/storage/ndb/test/run-test/daily-basic-tests.txt
+++ b/storage/ndb/test/run-test/daily-basic-tests.txt
@@ -525,10 +525,18 @@ max-time: 1000
cmd: testNodeRestart
args: -n Bug25554 T1
-max-time: 1000
+max-time: 3000
cmd: testNodeRestart
args: -n Bug25984
+max-time: 1000
+cmd: testNodeRestart
+args: -n Bug26457 T1
+
+max-time: 1000
+cmd: testNodeRestart
+args: -n Bug26481 T1
+
#
# DICT TESTS
max-time: 1500
diff --git a/storage/ndb/test/run-test/example-my.cnf b/storage/ndb/test/run-test/example-my.cnf
new file mode 100644
index 00000000000..99e1ce9f75b
--- /dev/null
+++ b/storage/ndb/test/run-test/example-my.cnf
@@ -0,0 +1,116 @@
+[atrt]
+basedir=/home/jonas/atrt
+baseport=10000
+clusters = .master
+clusters= .master,.slave
+replicate = 1.master:1.slave
+replicate = 2.master:2.slave
+
+[cluster_config]
+NoOfReplicas= 2
+IndexMemory= 10M
+DataMemory= 50M
+MaxNoOfConcurrentScans= 100
+Diskless = 1
+
+[cluster_config.master]
+ndb_mgmd = local1
+ndbd = local1,local1
+mysqld = local1,local1
+ndbapi= local1
+NoOfReplicas= 2
+
+[cluster_config.slave]
+ndb_mgmd = local1
+ndbd = local1
+ndbapi= local1
+mysqld = local1,local1
+NoOfReplicas= 1
+
+[mysqld]
+skip-innodb
+skip-bdb
+
+#
+# Generated by atrt
+# Mon May 29 23:27:49 2006
+
+[mysql_cluster.master]
+ndb-connectstring= local1:10000
+
+[cluster_config.ndb_mgmd.1.master]
+PortNumber= 10000
+
+[cluster_config.ndbd.1.master]
+FileSystemPath= /home/jonas/atrt/cluster.master/ndbd.1
+
+[cluster_config.ndbd.2.master]
+FileSystemPath= /home/jonas/atrt/cluster.master/ndbd.2
+
+[mysqld.1.master]
+datadir= /home/jonas/atrt/cluster.master/mysqld.1
+socket= /home/jonas/atrt/cluster.master/mysqld.1/mysql.sock
+port= 10001
+server-id= 1
+log-bin
+ndb-connectstring= local1:10000
+ndbcluster
+
+[client.1.master]
+socket= /home/jonas/atrt/cluster.master/mysqld.1/mysql.sock
+port= 10001
+
+[mysqld.2.master]
+datadir= /home/jonas/atrt/cluster.master/mysqld.2
+socket= /home/jonas/atrt/cluster.master/mysqld.2/mysql.sock
+port= 10002
+server-id= 2
+log-bin
+ndb-connectstring= local1:10000
+ndbcluster
+
+[client.2.master]
+socket= /home/jonas/atrt/cluster.master/mysqld.2/mysql.sock
+port= 10002
+
+[mysql_cluster.slave]
+ndb-connectstring= local1:10003
+
+[cluster_config.ndb_mgmd.1.slave]
+PortNumber= 10003
+
+[cluster_config.ndbd.1.slave]
+FileSystemPath= /home/jonas/atrt/cluster.slave/ndbd.1
+
+[mysqld.1.slave]
+datadir= /home/jonas/atrt/cluster.slave/mysqld.1
+socket= /home/jonas/atrt/cluster.slave/mysqld.1/mysql.sock
+port= 10004
+server-id= 3
+master-host= local1
+master-port= 10001
+master-user= root
+master-password= ""
+ndb-connectstring= local1:10003
+ndbcluster
+
+[client.1.slave]
+socket= /home/jonas/atrt/cluster.slave/mysqld.1/mysql.sock
+port= 10004
+
+[mysqld.2.slave]
+datadir= /home/jonas/atrt/cluster.slave/mysqld.2
+socket= /home/jonas/atrt/cluster.slave/mysqld.2/mysql.sock
+port= 10005
+server-id= 4
+master-host= local1
+master-port= 10002
+master-user= root
+master-password= ""
+ndb-connectstring= local1:10003
+ndbcluster
+
+[client.2.slave]
+socket= /home/jonas/atrt/cluster.slave/mysqld.2/mysql.sock
+port= 10005
+
diff --git a/storage/ndb/test/run-test/files.cpp b/storage/ndb/test/run-test/files.cpp
new file mode 100644
index 00000000000..231f7c88abc
--- /dev/null
+++ b/storage/ndb/test/run-test/files.cpp
@@ -0,0 +1,383 @@
+#include "atrt.hpp"
+#include <sys/types.h>
+#include <dirent.h>
+
+static bool create_directory(const char * path);
+
+bool
+setup_directories(atrt_config& config, int setup)
+{
+ /**
+ * 0 = validate
+ * 1 = setup
+ * 2 = setup+clean
+ */
+ for (size_t i = 0; i < config.m_clusters.size(); i++)
+ {
+ atrt_cluster& cluster = *config.m_clusters[i];
+ for (size_t j = 0; j<cluster.m_processes.size(); j++)
+ {
+ atrt_process& proc = *cluster.m_processes[j];
+ const char * dir = proc.m_proc.m_cwd.c_str();
+ struct stat sbuf;
+ int exists = 0;
+ if (lstat(dir, &sbuf) == 0)
+ {
+ if (S_ISDIR(sbuf.st_mode))
+ exists = 1;
+ else
+ exists = -1;
+ }
+
+ switch(setup){
+ case 0:
+ switch(exists){
+ case 0:
+ g_logger.error("Could not find directory: %s", dir);
+ return false;
+ case -1:
+ g_logger.error("%s is not a directory!", dir);
+ return false;
+ }
+ break;
+ case 1:
+ if (exists == -1)
+ {
+ g_logger.error("%s is not a directory!", dir);
+ return false;
+ }
+ break;
+ case 2:
+ if (exists == 1)
+ {
+ if (!remove_dir(dir))
+ {
+ g_logger.error("Failed to remove %s!", dir);
+ return false;
+ }
+ exists = 0;
+ break;
+ }
+ else if (exists == -1)
+ {
+ if (!unlink(dir))
+ {
+ g_logger.error("Failed to remove %s!", dir);
+ return false;
+ }
+ exists = 0;
+ }
+ }
+ if (exists != 1)
+ {
+ if (!create_directory(dir))
+ {
+ return false;
+ }
+ }
+ }
+ }
+ return true;
+}
+
+static
+void
+printfile(FILE* out, Properties& props, const char * section, ...)
+{
+ Properties::Iterator it (&props);
+ const char * name = it.first();
+ if (name)
+ {
+ va_list ap;
+ va_start(ap, section);
+ /* const int ret = */ vfprintf(out, section, ap);
+ va_end(ap);
+ fprintf(out, "\n");
+
+ for (; name; name = it.next())
+ {
+ const char* val;
+ props.get(name, &val);
+ fprintf(out, "%s %s\n", name + 2, val);
+ }
+ fprintf(out, "\n");
+ }
+ fflush(out);
+}
+
+bool
+setup_files(atrt_config& config, int setup, int sshx)
+{
+ /**
+ * 0 = validate
+ * 1 = setup
+ * 2 = setup+clean
+ */
+ BaseString mycnf;
+ mycnf.assfmt("%s/my.cnf", g_basedir);
+
+ if (mycnf != g_my_cnf)
+ {
+ struct stat sbuf;
+ int ret = lstat(mycnf.c_str(), &sbuf);
+
+ if (ret == 0)
+ {
+ if (unlink(mycnf.c_str()) != 0)
+ {
+ g_logger.error("Failed to remove %s", mycnf.c_str());
+ return false;
+ }
+ }
+
+ BaseString cp = "cp ";
+ cp.appfmt("%s %s", g_my_cnf, mycnf.c_str());
+ if (system(cp.c_str()) != 0)
+ {
+ g_logger.error("Failed to '%s'", cp.c_str());
+ return false;
+ }
+ }
+
+ if (setup == 2 || config.m_generated)
+ {
+ /**
+ * Do mysql_install_db
+ */
+ for (size_t i = 0; i < config.m_clusters.size(); i++)
+ {
+ atrt_cluster& cluster = *config.m_clusters[i];
+ for (size_t j = 0; j<cluster.m_processes.size(); j++)
+ {
+ atrt_process& proc = *cluster.m_processes[j];
+ if (proc.m_type == atrt_process::AP_MYSQLD)
+ {
+ const char * val;
+ require(proc.m_options.m_loaded.get("--datadir=", &val));
+ BaseString tmp;
+ tmp.assfmt("%s/bin/mysql_install_db --datadir=%s > /dev/null 2>&1",
+ g_prefix, val);
+ if (system(tmp.c_str()) != 0)
+ {
+ g_logger.error("Failed to mysql_install_db for %s",
+ proc.m_proc.m_cwd.c_str());
+ }
+ else
+ {
+ g_logger.info("mysql_install_db for %s",
+ proc.m_proc.m_cwd.c_str());
+ }
+ }
+ }
+ }
+ }
+
+ FILE * out = NULL;
+ if (config.m_generated == false)
+ {
+ g_logger.info("Nothing configured...");
+ }
+ else
+ {
+ out = fopen(mycnf.c_str(), "a+");
+ if (out == 0)
+ {
+ g_logger.error("Failed to open %s for append", mycnf.c_str());
+ return false;
+ }
+ time_t now = time(0);
+ fprintf(out, "#\n# Generated by atrt\n");
+ fprintf(out, "# %s\n", ctime(&now));
+ }
+
+ for (size_t i = 0; i < config.m_clusters.size(); i++)
+ {
+ atrt_cluster& cluster = *config.m_clusters[i];
+ if (out)
+ {
+ Properties::Iterator it(&cluster.m_options.m_generated);
+ printfile(out, cluster.m_options.m_generated,
+ "[mysql_cluster%s]", cluster.m_name.c_str());
+ }
+
+ for (size_t j = 0; j<cluster.m_processes.size(); j++)
+ {
+ atrt_process& proc = *cluster.m_processes[j];
+
+ if (out)
+ {
+ switch(proc.m_type){
+ case atrt_process::AP_NDB_MGMD:
+ printfile(out, proc.m_options.m_generated,
+ "[cluster_config.ndb_mgmd.%d%s]",
+ proc.m_index, proc.m_cluster->m_name.c_str());
+ break;
+ case atrt_process::AP_NDBD:
+ printfile(out, proc.m_options.m_generated,
+ "[cluster_config.ndbd.%d%s]",
+ proc.m_index, proc.m_cluster->m_name.c_str());
+ break;
+ case atrt_process::AP_MYSQLD:
+ printfile(out, proc.m_options.m_generated,
+ "[mysqld.%d%s]",
+ proc.m_index, proc.m_cluster->m_name.c_str());
+ break;
+ case atrt_process::AP_NDB_API:
+ break;
+ case atrt_process::AP_CLIENT:
+ printfile(out, proc.m_options.m_generated,
+ "[client.%d%s]",
+ proc.m_index, proc.m_cluster->m_name.c_str());
+ break;
+ case atrt_process::AP_ALL:
+ case atrt_process::AP_CLUSTER:
+ abort();
+ }
+ }
+
+ /**
+ * Create env.sh
+ */
+ BaseString tmp;
+ tmp.assfmt("%s/env.sh", proc.m_proc.m_cwd.c_str());
+ char **env = BaseString::argify(0, proc.m_proc.m_env.c_str());
+ if (env[0])
+ {
+ Vector<BaseString> keys;
+ FILE *fenv = fopen(tmp.c_str(), "w+");
+ if (fenv == 0)
+ {
+ g_logger.error("Failed to open %s for writing", tmp.c_str());
+ return false;
+ }
+ for (size_t k = 0; env[k]; k++)
+ {
+ tmp = env[k];
+ int pos = tmp.indexOf('=');
+ require(pos > 0);
+ env[k][pos] = 0;
+ fprintf(fenv, "%s=\"%s\"\n", env[k], env[k]+pos+1);
+ keys.push_back(env[k]);
+ free(env[k]);
+ }
+ fprintf(fenv, "PATH=%s/bin:%s/libexec:$PATH\n", g_prefix, g_prefix);
+ keys.push_back("PATH");
+ for (size_t k = 0; k<keys.size(); k++)
+ fprintf(fenv, "export %s\n", keys[k].c_str());
+ fflush(fenv);
+ fclose(fenv);
+ }
+ free(env);
+
+ tmp.assfmt("%s/ssh-login.sh", proc.m_proc.m_cwd.c_str());
+ FILE* fenv = fopen(tmp.c_str(), "w+");
+ if (fenv == 0)
+ {
+ g_logger.error("Failed to open %s for writing", tmp.c_str());
+ return false;
+ }
+ fprintf(fenv, "#!/bin/sh\n");
+ fprintf(fenv, "cd %s\n", proc.m_proc.m_cwd.c_str());
+ fprintf(fenv, "[ -f /etc/profile ] && . /etc/profile\n");
+ fprintf(fenv, ". env.sh\n");
+ fprintf(fenv, "ulimit -Sc unlimited\n");
+ fprintf(fenv, "bash -i");
+ fflush(fenv);
+ fclose(fenv);
+ }
+ }
+
+ if (out)
+ {
+ fflush(out);
+ fclose(out);
+ }
+
+ return true;
+}
+
+static
+bool
+create_directory(const char * path)
+{
+ BaseString tmp(path);
+ Vector<BaseString> list;
+ if (tmp.split(list, "/") == 0)
+ {
+ g_logger.error("Failed to create directory: %s", tmp.c_str());
+ return false;
+ }
+
+ BaseString cwd = "/";
+ for (size_t i = 0; i < list.size(); i++)
+ {
+ cwd.append(list[i].c_str());
+ cwd.append("/");
+ mkdir(cwd.c_str(), S_IRUSR | S_IWUSR | S_IXUSR | S_IXGRP | S_IRGRP);
+ }
+
+ struct stat sbuf;
+ if (lstat(path, &sbuf) != 0 ||
+ !S_ISDIR(sbuf.st_mode))
+ {
+ g_logger.error("Failed to create directory: %s (%s)",
+ tmp.c_str(),
+ cwd.c_str());
+ return false;
+ }
+
+ return true;
+}
+
+bool
+remove_dir(const char * path, bool inclusive)
+{
+ DIR* dirp = opendir(path);
+
+ if (dirp == 0)
+ {
+ if(errno != ENOENT)
+ {
+ g_logger.error("Failed to remove >%s< errno: %d %s",
+ path, errno, strerror(errno));
+ return false;
+ }
+ return true;
+ }
+
+ struct dirent * dp;
+ BaseString name = path;
+ name.append("/");
+ while ((dp = readdir(dirp)) != NULL)
+ {
+ if ((strcmp(".", dp->d_name) != 0) && (strcmp("..", dp->d_name) != 0))
+ {
+ BaseString tmp = name;
+ tmp.append(dp->d_name);
+
+ if (remove(tmp.c_str()) == 0)
+ {
+ continue;
+ }
+
+ if (!remove_dir(tmp.c_str()))
+ {
+ closedir(dirp);
+ return false;
+ }
+ }
+ }
+
+ closedir(dirp);
+ if (inclusive)
+ {
+ if (rmdir(path) != 0)
+ {
+ g_logger.error("Failed to remove >%s< errno: %d %s",
+ path, errno, strerror(errno));
+ return false;
+ }
+ }
+ return true;
+}
+
diff --git a/storage/ndb/test/run-test/main.cpp b/storage/ndb/test/run-test/main.cpp
index aef041d24d6..2e8d6bfde6d 100644
--- a/storage/ndb/test/run-test/main.cpp
+++ b/storage/ndb/test/run-test/main.cpp
@@ -14,20 +14,19 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-#include <ndb_global.h>
-#include <getarg.h>
-#include <BaseString.hpp>
-#include <Parser.hpp>
+#include "atrt.hpp"
+#include <my_sys.h>
+#include <my_getopt.h>
+
#include <NdbOut.hpp>
-#include <Properties.hpp>
#include <NdbAutoPtr.hpp>
-#include "run-test.hpp"
#include <SysLogHandler.hpp>
#include <FileLogHandler.hpp>
-#include <mgmapi.h>
-#include "CpcClient.hpp"
+#include <NdbSleep.h>
+
+#define PATH_SEPARATOR "/"
/** Global variables */
static const char progname[] = "ndb_atrt";
@@ -36,76 +35,198 @@ static const char * g_analyze_progname = "atrt-analyze-result.sh";
static const char * g_clear_progname = "atrt-clear-result.sh";
static const char * g_setup_progname = "atrt-setup.sh";
-static const char * g_setup_path = 0;
-static const char * g_process_config_filename = "d.txt";
static const char * g_log_filename = 0;
static const char * g_test_case_filename = 0;
static const char * g_report_filename = 0;
-static const char * g_default_user = 0;
-static const char * g_default_base_dir = 0;
-static int g_default_base_port = 0;
-static int g_mysqld_use_base = 1;
+static int g_do_setup = 0;
+static int g_do_deploy = 0;
+static int g_do_sshx = 0;
+static int g_do_start = 0;
+static int g_do_quit = 0;
-static int g_report = 0;
-static int g_verbosity = 0;
+static int g_help = 0;
+static int g_verbosity = 1;
static FILE * g_report_file = 0;
static FILE * g_test_case_file = stdin;
+static int g_mode = 0;
Logger g_logger;
atrt_config g_config;
-
-static int g_mode_bench = 0;
-static int g_mode_regression = 0;
-static int g_mode_interactive = 0;
-static int g_mode = 0;
-
-static
-struct getargs args[] = {
- { "process-config", 0, arg_string, &g_process_config_filename, 0, 0 },
- { "setup-path", 0, arg_string, &g_setup_path, 0, 0 },
- { 0, 'v', arg_counter, &g_verbosity, 0, 0 },
- { "log-file", 0, arg_string, &g_log_filename, 0, 0 },
- { "testcase-file", 'f', arg_string, &g_test_case_filename, 0, 0 },
- { 0, 'R', arg_flag, &g_report, 0, 0 },
- { "report-file", 0, arg_string, &g_report_filename, 0, 0 },
- { "interactive", 'i', arg_flag, &g_mode_interactive, 0, 0 },
- { "regression", 'r', arg_flag, &g_mode_regression, 0, 0 },
- { "bench", 'b', arg_flag, &g_mode_bench, 0, 0 },
+const char * g_user = 0;
+int g_baseport = 10000;
+int g_fqpn = 0;
+int g_default_ports = 0;
+
+const char * g_cwd = 0;
+const char * g_basedir = 0;
+const char * g_my_cnf = 0;
+const char * g_prefix = 0;
+const char * g_clusters = 0;
+BaseString g_replicate;
+const char *save_file = 0;
+char *save_extra_file = 0;
+const char *save_group_suffix = 0;
+const char * g_dummy;
+char * g_env_path = 0;
+
+/** Dummy, extern declared in ndb_opts.h */
+int g_print_full_config = 0, opt_ndb_shm;
+my_bool opt_core;
+
+static struct my_option g_options[] =
+{
+ { "help", '?', "Display this help and exit.",
+ (gptr*) &g_help, (gptr*) &g_help,
+ 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
+ { "version", 'V', "Output version information and exit.", 0, 0, 0,
+ GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0 },
+ { "clusters", 256, "Cluster",
+ (gptr*) &g_clusters, (gptr*) &g_clusters,
+ 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+ { "replicate", 1024, "replicate",
+ (gptr*) &g_dummy, (gptr*) &g_dummy,
+ 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+ { "log-file", 256, "log-file",
+ (gptr*) &g_log_filename, (gptr*) &g_log_filename,
+ 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+ { "testcase-file", 'f', "testcase-file",
+ (gptr*) &g_test_case_filename, (gptr*) &g_test_case_filename,
+ 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+ { "report-file", 'r', "report-file",
+ (gptr*) &g_report_filename, (gptr*) &g_report_filename,
+ 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+ { "basedir", 256, "Base path",
+ (gptr*) &g_basedir, (gptr*) &g_basedir,
+ 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+ { "baseport", 256, "Base port",
+ (gptr*) &g_baseport, (gptr*) &g_baseport,
+ 0, GET_INT, REQUIRED_ARG, g_baseport, 0, 0, 0, 0, 0},
+ { "prefix", 256, "mysql install dir",
+ (gptr*) &g_prefix, (gptr*) &g_prefix,
+ 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+ { "verbose", 'v', "Verbosity",
+ (gptr*) &g_verbosity, (gptr*) &g_verbosity,
+ 0, GET_INT, REQUIRED_ARG, g_verbosity, 0, 0, 0, 0, 0},
+ { "configure", 256, "configure",
+ (gptr*) &g_do_setup, (gptr*) &g_do_setup,
+ 0, GET_INT, REQUIRED_ARG, g_do_setup, 0, 0, 0, 0, 0 },
+ { "deploy", 256, "deploy",
+ (gptr*) &g_do_deploy, (gptr*) &g_do_deploy,
+ 0, GET_INT, REQUIRED_ARG, g_do_deploy, 0, 0, 0, 0, 0 },
+ { "sshx", 256, "sshx",
+ (gptr*) &g_do_sshx, (gptr*) &g_do_sshx,
+ 0, GET_INT, REQUIRED_ARG, g_do_sshx, 0, 0, 0, 0, 0 },
+ { "start", 256, "start",
+ (gptr*) &g_do_start, (gptr*) &g_do_start,
+ 0, GET_INT, REQUIRED_ARG, g_do_start, 0, 0, 0, 0, 0 },
+ { "fqpn", 256, "Fully qualified path-names ",
+ (gptr*) &g_fqpn, (gptr*) &g_fqpn,
+ 0, GET_INT, REQUIRED_ARG, g_fqpn, 0, 0, 0, 0, 0 },
+ { "default-ports", 256, "Use default ports when possible",
+ (gptr*) &g_default_ports, (gptr*) &g_default_ports,
+ 0, GET_INT, REQUIRED_ARG, g_default_ports, 0, 0, 0, 0, 0 },
+ { "mode", 256, "Mode 0=interactive 1=regression 2=bench",
+ (gptr*) &g_mode, (gptr*) &g_mode,
+ 0, GET_INT, REQUIRED_ARG, g_mode, 0, 0, 0, 0, 0 },
+ { "quit", 256, "Quit before starting tests",
+ (gptr*) &g_mode, (gptr*) &g_do_quit,
+ 0, GET_BOOL, NO_ARG, g_do_quit, 0, 0, 0, 0, 0 },
+ { 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}
};
-const int arg_count = 10;
+const int p_ndb = atrt_process::AP_NDB_MGMD | atrt_process::AP_NDBD;
+const int p_servers = atrt_process::AP_MYSQLD;
+const int p_clients = atrt_process::AP_CLIENT | atrt_process::AP_NDB_API;
int
-main(int argc, const char ** argv){
+main(int argc, char ** argv)
+{
ndb_init();
bool restart = true;
int lineno = 1;
int test_no = 1;
+ int return_code = 1;
- const int p_ndb = atrt_process::NDB_MGM | atrt_process::NDB_DB;
- const int p_servers = atrt_process::MYSQL_SERVER | atrt_process::NDB_REP;
- const int p_clients = atrt_process::MYSQL_CLIENT | atrt_process::NDB_API;
-
g_logger.setCategory(progname);
g_logger.enable(Logger::LL_ALL);
g_logger.createConsoleHandler();
if(!parse_args(argc, argv))
goto end;
-
+
g_logger.info("Starting...");
- if(!setup_config(g_config))
+ g_config.m_generated = false;
+ g_config.m_replication = g_replicate;
+ if (!setup_config(g_config))
+ goto end;
+
+ if (!configure(g_config, g_do_setup))
goto end;
+ g_logger.info("Setting up directories");
+ if (!setup_directories(g_config, g_do_setup))
+ goto end;
+
+ if (g_do_setup)
+ {
+ g_logger.info("Setting up files");
+ if (!setup_files(g_config, g_do_setup, g_do_sshx))
+ goto end;
+ }
+
+ if (g_do_deploy)
+ {
+ if (!deploy(g_config))
+ goto end;
+ }
+
+ if (g_do_quit)
+ {
+ return_code = 0;
+ goto end;
+ }
+
+ if(!setup_hosts(g_config))
+ goto end;
+
+ if (g_do_sshx)
+ {
+ g_logger.info("Starting xterm-ssh");
+ if (!sshx(g_config, g_do_sshx))
+ goto end;
+
+ g_logger.info("Done...sleeping");
+ while(true)
+ {
+ NdbSleep_SecSleep(1);
+ }
+ return_code = 0;
+ goto end;
+ }
+
g_logger.info("Connecting to hosts");
if(!connect_hosts(g_config))
goto end;
- if(!setup_hosts(g_config))
+ if (g_do_start && !g_test_case_filename)
+ {
+ g_logger.info("Starting server processes: %x", g_do_start);
+ if (!start(g_config, g_do_start))
+ goto end;
+
+ g_logger.info("Done...sleeping");
+ while(true)
+ {
+ NdbSleep_SecSleep(1);
+ }
+ return_code = 0;
goto end;
+ }
+ return_code = 0;
+
/**
* Main loop
*/
@@ -114,37 +235,25 @@ main(int argc, const char ** argv){
* Do we need to restart ndb
*/
if(restart){
- g_logger.info("(Re)starting ndb processes");
+ g_logger.info("(Re)starting server processes processes");
if(!stop_processes(g_config, ~0))
goto end;
- if(!start_processes(g_config, atrt_process::NDB_MGM))
+ if (!setup_directories(g_config, 2))
goto end;
- if(!connect_ndb_mgm(g_config)){
- goto end;
- }
-
- if(!start_processes(g_config, atrt_process::NDB_DB))
+ if (!setup_files(g_config, 2, 1))
goto end;
- if(!wait_ndb(g_config, NDB_MGM_NODE_STATUS_NOT_STARTED))
+ if(!setup_hosts(g_config))
goto end;
- for(Uint32 i = 0; i<3; i++)
- if(wait_ndb(g_config, NDB_MGM_NODE_STATUS_STARTED))
- goto started;
-
- goto end;
-
- started:
- if(!start_processes(g_config, p_servers))
- goto end;
-
- g_logger.info("Ndb start completed");
+ if (!start(g_config, p_ndb | p_servers))
+ goto end;
+ g_logger.info("All servers start completed");
}
- const int start_line = lineno;
+ // const int start_line = lineno;
atrt_testcase test_case;
if(!read_test_case(g_test_case_file, test_case, lineno))
goto end;
@@ -165,7 +274,7 @@ main(int argc, const char ** argv){
const time_t start = time(0);
time_t now = start;
do {
- if(!update_status(g_config, atrt_process::ALL))
+ if(!update_status(g_config, atrt_process::AP_ALL))
goto end;
int count = 0;
@@ -189,7 +298,7 @@ main(int argc, const char ** argv){
result = ERR_MAX_TIME_ELAPSED;
break;
}
- sleep(1);
+ NdbSleep_SecSleep(1);
} while(true);
const time_t elapsed = time(0) - start;
@@ -197,7 +306,8 @@ main(int argc, const char ** argv){
if(!stop_processes(g_config, p_clients))
goto end;
- if(!gather_result(g_config, &result))
+ int tmp, *rp = result ? &tmp : &result;
+ if(!gather_result(g_config, rp))
goto end;
g_logger.info("#%d %s(%d)",
@@ -205,29 +315,35 @@ main(int argc, const char ** argv){
(result == 0 ? "OK" : "FAILED"), result);
if(g_report_file != 0){
- fprintf(g_report_file, "%s %s ; %d ; %d ; %ld\n",
- test_case.m_command.c_str(),
- test_case.m_args.c_str(),
- test_no, result, elapsed);
+ fprintf(g_report_file, "%s ; %d ; %d ; %ld\n",
+ test_case.m_name.c_str(), test_no, result, elapsed);
fflush(g_report_file);
}
- if(test_case.m_report || g_mode_bench || (g_mode_regression && result)){
- BaseString tmp;
- tmp.assfmt("result.%d", test_no);
- if(rename("result", tmp.c_str()) != 0){
- g_logger.critical("Failed to rename %s as %s",
- "result", tmp.c_str());
- goto end;
- }
- }
-
- if(g_mode_interactive && result){
+ if(g_mode == 0 && result){
g_logger.info
("Encountered failed test in interactive mode - terminating");
break;
}
+ BaseString resdir;
+ resdir.assfmt("result.%d", test_no);
+ remove_dir(resdir.c_str(), true);
+
+ if(test_case.m_report || g_mode == 2 || (g_mode && result))
+ {
+ if(rename("result", resdir.c_str()) != 0)
+ {
+ g_logger.critical("Failed to rename %s as %s",
+ "result", resdir.c_str());
+ goto end;
+ }
+ }
+ else
+ {
+ remove_dir("result", true);
+ }
+
if(result != 0){
restart = true;
} else {
@@ -247,276 +363,254 @@ main(int argc, const char ** argv){
g_test_case_file = 0;
}
- stop_processes(g_config, atrt_process::ALL);
+ stop_processes(g_config, atrt_process::AP_ALL);
+ return return_code;
+}
+
+static
+my_bool
+get_one_option(int arg, const struct my_option * opt, char * value)
+{
+ if (arg == 1024)
+ {
+ if (g_replicate.length())
+ g_replicate.append(";");
+ g_replicate.append(value);
+ return 1;
+ }
return 0;
}
bool
-parse_args(int argc, const char** argv){
- int optind = 0;
- if(getarg(args, arg_count, argc, argv, &optind)) {
- arg_printusage(args, arg_count, progname, "");
+parse_args(int argc, char** argv)
+{
+ char buf[2048];
+ if (getcwd(buf, sizeof(buf)) == 0)
+ {
+ g_logger.error("Unable to get current working directory");
return false;
}
-
- if(g_log_filename != 0){
- g_logger.removeConsoleHandler();
- g_logger.addHandler(new FileLogHandler(g_log_filename));
+ g_cwd = strdup(buf);
+
+ struct stat sbuf;
+ BaseString mycnf;
+ if (argc > 1 && lstat(argv[argc-1], &sbuf) == 0)
+ {
+ mycnf.append(g_cwd);
+ mycnf.append(PATH_SEPARATOR);
+ mycnf.append(argv[argc-1]);
}
-
+ else
{
- int tmp = Logger::LL_WARNING - g_verbosity;
- tmp = (tmp < Logger::LL_DEBUG ? Logger::LL_DEBUG : tmp);
- g_logger.disable(Logger::LL_ALL);
- g_logger.enable(Logger::LL_ON);
- g_logger.enable((Logger::LoggerLevel)tmp, Logger::LL_ALERT);
+ mycnf.append(g_cwd);
+ mycnf.append(PATH_SEPARATOR);
+ mycnf.append("my.cnf");
+ if (lstat(mycnf.c_str(), &sbuf) != 0)
+ {
+ g_logger.error("Unable to stat %s", mycnf.c_str());
+ return false;
+ }
}
+ g_logger.info("Bootstrapping using %s", mycnf.c_str());
+
+ const char *groups[] = { "atrt", 0 };
+ int ret = load_defaults(mycnf.c_str(), groups, &argc, &argv);
+
+ save_file = my_defaults_file;
+ save_extra_file = my_defaults_extra_file;
+ save_group_suffix = my_defaults_group_suffix;
-
- if(!g_process_config_filename){
- g_logger.critical("Process config not specified!");
+ if (save_extra_file)
+ {
+ g_logger.error("--defaults-extra-file(%s) is not supported...",
+ save_extra_file);
return false;
}
- if(!g_setup_path){
- char buf[1024];
- if(getcwd(buf, sizeof(buf))){
- g_setup_path = strdup(buf);
- g_logger.info("Setup path not specified, using %s", buf);
- } else {
- g_logger.critical("Setup path not specified!\n");
- return false;
- }
- }
-
- if(g_report & !g_report_filename){
- g_report_filename = "report.txt";
+ if (ret || handle_options(&argc, &argv, g_options, get_one_option))
+ {
+ g_logger.error("Failed to load defaults/handle_options");
+ return false;
}
- if(g_report_filename){
- g_report_file = fopen(g_report_filename, "w");
- if(g_report_file == 0){
- g_logger.critical("Unable to create report file: %s", g_report_filename);
- return false;
+ if (argc >= 2)
+ {
+ const char * arg = argv[argc-2];
+ while(* arg)
+ {
+ switch(* arg){
+ case 'c':
+ g_do_setup = (g_do_setup == 0) ? 1 : g_do_setup;
+ break;
+ case 'C':
+ g_do_setup = 2;
+ break;
+ case 'd':
+ g_do_deploy = 1;
+ break;
+ case 'x':
+ g_do_sshx = atrt_process::AP_CLIENT | atrt_process::AP_NDB_API;
+ break;
+ case 'X':
+ g_do_sshx = atrt_process::AP_ALL;
+ break;
+ case 's':
+ g_do_start = p_ndb;
+ break;
+ case 'S':
+ g_do_start = p_ndb | p_servers;
+ break;
+ case 'f':
+ g_fqpn = 1;
+ break;
+ case 'q':
+ g_do_quit = 1;
+ break;
+ default:
+ g_logger.error("Unknown switch '%c'", *arg);
+ return false;
+ }
+ arg++;
}
}
- if(g_test_case_filename){
- g_test_case_file = fopen(g_test_case_filename, "r");
- if(g_test_case_file == 0){
- g_logger.critical("Unable to open file: %s", g_test_case_filename);
- return false;
- }
+ if(g_log_filename != 0)
+ {
+ g_logger.removeConsoleHandler();
+ g_logger.addHandler(new FileLogHandler(g_log_filename));
}
- int sum = g_mode_interactive + g_mode_regression + g_mode_bench;
- if(sum == 0){
- g_mode_interactive = 1;
- }
-
- if(sum > 1){
- g_logger.critical
- ("Only one of bench/regression/interactive can be specified");
- return false;
+ {
+ int tmp = Logger::LL_WARNING - g_verbosity;
+ tmp = (tmp < Logger::LL_DEBUG ? Logger::LL_DEBUG : tmp);
+ g_logger.disable(Logger::LL_ALL);
+ g_logger.enable(Logger::LL_ON);
+ g_logger.enable((Logger::LoggerLevel)tmp, Logger::LL_ALERT);
}
- g_default_user = strdup(getenv("LOGNAME"));
-
- return true;
-}
-
-
-static
-atrt_host *
-find(const BaseString& host, Vector<atrt_host> & hosts){
- for(size_t i = 0; i<hosts.size(); i++){
- if(hosts[i].m_hostname == host){
- return &hosts[i];
- }
+ if(!g_basedir)
+ {
+ g_basedir = g_cwd;
+ g_logger.info("basedir not specified, using %s", g_basedir);
}
- return 0;
-}
-bool
-setup_config(atrt_config& config){
-
- FILE * f = fopen(g_process_config_filename, "r");
- if(!f){
- g_logger.critical("Failed to open process config file: %s",
- g_process_config_filename);
- return false;
+ if (!g_prefix)
+ {
+ g_prefix = DEFAULT_PREFIX;
}
- bool result = true;
-
- int lineno = 0;
- char buf[2048];
- BaseString connect_string;
- int mysql_port_offset = 0;
- while(fgets(buf, 2048, f)){
- lineno++;
-
- BaseString tmp(buf);
- tmp.trim(" \t\n\r");
-
- if(tmp.length() == 0 || tmp == "" || tmp.c_str()[0] == '#')
- continue;
-
- Vector<BaseString> split1;
- if(tmp.split(split1, ":", 2) != 2){
- g_logger.warning("Invalid line %d in %s - ignoring",
- lineno, g_process_config_filename);
- continue;
+
+ /**
+ * Add path to atrt-*.sh
+ */
+ {
+ BaseString tmp;
+ const char* env = getenv("PATH");
+ if (env && strlen(env))
+ {
+ tmp.assfmt("PATH=%s:%s/mysql-test/ndb",
+ env, g_prefix);
}
-
- if(split1[0].trim() == "basedir"){
- g_default_base_dir = strdup(split1[1].trim().c_str());
- continue;
+ else
+ {
+ tmp.assfmt("PATH=%s/mysql-test/ndb", g_prefix);
}
+ g_env_path = strdup(tmp.c_str());
+ putenv(g_env_path);
+ }
+
+ if (g_help)
+ {
+ my_print_help(g_options);
+ my_print_variables(g_options);
+ return 0;
+ }
- if(split1[0].trim() == "baseport"){
- g_default_base_port = atoi(split1[1].trim().c_str());
- continue;
+ if(g_test_case_filename)
+ {
+ g_test_case_file = fopen(g_test_case_filename, "r");
+ if(g_test_case_file == 0)
+ {
+ g_logger.critical("Unable to open file: %s", g_test_case_filename);
+ return false;
}
+ if (g_do_setup == 0)
+ g_do_setup = 2;
+
+ if (g_do_start == 0)
+ g_do_start = p_ndb | p_servers;
+
+ if (g_mode == 0)
+ g_mode = 1;
- if(split1[0].trim() == "user"){
- g_default_user = strdup(split1[1].trim().c_str());
- continue;
+ if (g_do_sshx)
+ {
+ g_logger.critical("ssx specified...not possible with testfile");
+ return false;
}
-
- if(split1[0].trim() == "mysqld-use-base" && split1[1].trim() == "no"){
- g_mysqld_use_base = 0;
- continue;
+ }
+
+ if (g_do_setup == 0)
+ {
+ BaseString tmp;
+ tmp.append(g_basedir);
+ tmp.append(PATH_SEPARATOR);
+ tmp.append("my.cnf");
+ if (lstat(tmp.c_str(), &sbuf) != 0)
+ {
+ g_logger.error("Unable to stat %s", tmp.c_str());
+ return false;
}
- Vector<BaseString> hosts;
- if(split1[1].trim().split(hosts) <= 0){
- g_logger.warning("Invalid line %d in %s - ignoring",
- lineno, g_process_config_filename);
+ if (!S_ISREG(sbuf.st_mode))
+ {
+ g_logger.error("%s is not a regular file", tmp.c_str());
+ return false;
}
- // 1 - Check hosts
- for(size_t i = 0; i<hosts.size(); i++){
- Vector<BaseString> tmp;
- hosts[i].split(tmp, ":");
- BaseString hostname = tmp[0].trim();
- BaseString base_dir;
- if(tmp.size() >= 2)
- base_dir = tmp[1];
- else if(g_default_base_dir == 0){
- g_logger.critical("Basedir not specified...");
- return false;
- }
-
- atrt_host * host_ptr;
- if((host_ptr = find(hostname, config.m_hosts)) == 0){
- atrt_host host;
- host.m_index = config.m_hosts.size();
- host.m_cpcd = new SimpleCpcClient(hostname.c_str(), 1234);
- host.m_base_dir = (base_dir.empty() ? g_default_base_dir : base_dir);
- host.m_user = g_default_user;
- host.m_hostname = hostname.c_str();
- config.m_hosts.push_back(host);
- } else {
- if(!base_dir.empty() && (base_dir == host_ptr->m_base_dir)){
- g_logger.critical("Inconsistent base dir definition for host %s"
- ", \"%s\" != \"%s\"", hostname.c_str(),
- base_dir.c_str(), host_ptr->m_base_dir.c_str());
- return false;
- }
- }
- }
-
- for(size_t i = 0; i<hosts.size(); i++){
- BaseString & tmp = hosts[i];
- atrt_host * host = find(tmp, config.m_hosts);
- BaseString & dir = host->m_base_dir;
-
- const int index = config.m_processes.size() + 1;
-
- atrt_process proc;
- proc.m_index = index;
- proc.m_host = host;
- proc.m_proc.m_id = -1;
- proc.m_proc.m_type = "temporary";
- proc.m_proc.m_owner = "atrt";
- proc.m_proc.m_group = "group";
- proc.m_proc.m_cwd.assign(dir).append("/run/");
- proc.m_proc.m_stdout = "log.out";
- proc.m_proc.m_stderr = "2>&1";
- proc.m_proc.m_runas = proc.m_host->m_user;
- proc.m_proc.m_ulimit = "c:unlimited";
- proc.m_proc.m_env.assfmt("MYSQL_BASE_DIR=%s", dir.c_str());
- proc.m_proc.m_shutdown_options = "";
- proc.m_hostname = proc.m_host->m_hostname;
- proc.m_ndb_mgm_port = g_default_base_port;
- if(split1[0] == "mgm"){
- proc.m_type = atrt_process::NDB_MGM;
- proc.m_proc.m_name.assfmt("%d-%s", index, "ndb_mgmd");
- proc.m_proc.m_path.assign(dir).append("/libexec/ndb_mgmd");
- proc.m_proc.m_args = "--nodaemon -f config.ini";
- proc.m_proc.m_cwd.appfmt("%d.ndb_mgmd", index);
- connect_string.appfmt("host=%s:%d;",
- proc.m_hostname.c_str(), proc.m_ndb_mgm_port);
- } else if(split1[0] == "ndb"){
- proc.m_type = atrt_process::NDB_DB;
- proc.m_proc.m_name.assfmt("%d-%s", index, "ndbd");
- proc.m_proc.m_path.assign(dir).append("/libexec/ndbd");
- proc.m_proc.m_args = "--initial --nodaemon -n";
- proc.m_proc.m_cwd.appfmt("%d.ndbd", index);
- } else if(split1[0] == "mysqld"){
- proc.m_type = atrt_process::MYSQL_SERVER;
- proc.m_proc.m_name.assfmt("%d-%s", index, "mysqld");
- proc.m_proc.m_path.assign(dir).append("/libexec/mysqld");
- proc.m_proc.m_args = "--core-file --ndbcluster";
- proc.m_proc.m_cwd.appfmt("%d.mysqld", index);
- proc.m_proc.m_shutdown_options = "SIGKILL"; // not nice
- } else if(split1[0] == "api"){
- proc.m_type = atrt_process::NDB_API;
- proc.m_proc.m_name.assfmt("%d-%s", index, "ndb_api");
- proc.m_proc.m_path = "";
- proc.m_proc.m_args = "";
- proc.m_proc.m_cwd.appfmt("%d.ndb_api", index);
- } else if(split1[0] == "mysql"){
- proc.m_type = atrt_process::MYSQL_CLIENT;
- proc.m_proc.m_name.assfmt("%d-%s", index, "mysql");
- proc.m_proc.m_path = "";
- proc.m_proc.m_args = "";
- proc.m_proc.m_cwd.appfmt("%d.mysql", index);
- } else {
- g_logger.critical("%s:%d: Unhandled process type: %s",
- g_process_config_filename, lineno,
- split1[0].c_str());
- result = false;
- goto end;
- }
- config.m_processes.push_back(proc);
+ g_my_cnf = strdup(tmp.c_str());
+ g_logger.info("Using %s", tmp.c_str());
+ }
+ else
+ {
+ g_my_cnf = strdup(mycnf.c_str());
+ }
+
+ g_logger.info("Using --prefix=\"%s\"", g_prefix);
+
+ if(g_report_filename)
+ {
+ g_report_file = fopen(g_report_filename, "w");
+ if(g_report_file == 0)
+ {
+ g_logger.critical("Unable to create report file: %s", g_report_filename);
+ return false;
}
}
-
- // Setup connect string
- for(size_t i = 0; i<config.m_processes.size(); i++){
- config.m_processes[i].m_proc.m_env.appfmt(" NDB_CONNECTSTRING=%s",
- connect_string.c_str());
+
+ if (g_clusters == 0)
+ {
+ g_logger.critical("No clusters specified");
+ return false;
}
- end:
- fclose(f);
- return result;
+ g_user = strdup(getenv("LOGNAME"));
+
+ return true;
}
bool
connect_hosts(atrt_config& config){
for(size_t i = 0; i<config.m_hosts.size(); i++){
- if(config.m_hosts[i].m_cpcd->connect() != 0){
+ if(config.m_hosts[i]->m_cpcd->connect() != 0){
g_logger.error("Unable to connect to cpc %s:%d",
- config.m_hosts[i].m_cpcd->getHost(),
- config.m_hosts[i].m_cpcd->getPort());
+ config.m_hosts[i]->m_cpcd->getHost(),
+ config.m_hosts[i]->m_cpcd->getPort());
return false;
}
g_logger.debug("Connected to %s:%d",
- config.m_hosts[i].m_cpcd->getHost(),
- config.m_hosts[i].m_cpcd->getPort());
+ config.m_hosts[i]->m_cpcd->getHost(),
+ config.m_hosts[i]->m_cpcd->getPort());
}
return true;
@@ -529,8 +623,10 @@ connect_ndb_mgm(atrt_process & proc){
g_logger.critical("Unable to create mgm handle");
return false;
}
- BaseString tmp = proc.m_hostname;
- tmp.appfmt(":%d", proc.m_ndb_mgm_port);
+ BaseString tmp = proc.m_host->m_hostname;
+ const char * val;
+ proc.m_options.m_loaded.get("--PortNumber=", &val);
+ tmp.appfmt(":%s", val);
if (ndb_mgm_set_connectstring(handle,tmp.c_str()))
{
@@ -551,8 +647,8 @@ connect_ndb_mgm(atrt_process & proc){
bool
connect_ndb_mgm(atrt_config& config){
for(size_t i = 0; i<config.m_processes.size(); i++){
- atrt_process & proc = config.m_processes[i];
- if((proc.m_type & atrt_process::NDB_MGM) != 0){
+ atrt_process & proc = *config.m_processes[i];
+ if((proc.m_type & atrt_process::AP_NDB_MGMD) != 0){
if(!connect_ndb_mgm(proc)){
return false;
}
@@ -573,100 +669,110 @@ wait_ndb(atrt_config& config, int goal){
goal = remap(goal);
-
- /**
- * Get mgm handle for cluster
- */
- NdbMgmHandle handle = 0;
- for(size_t i = 0; i<config.m_processes.size(); i++){
- atrt_process & proc = config.m_processes[i];
- if((proc.m_type & atrt_process::NDB_MGM) != 0){
- handle = proc.m_ndb_mgm_handle;
- break;
- }
- }
- if(handle == 0){
- g_logger.critical("Unable to find mgm handle");
- return false;
- }
-
- if(goal == NDB_MGM_NODE_STATUS_STARTED){
+ size_t cnt = 0;
+ for (size_t i = 0; i<config.m_clusters.size(); i++)
+ {
+ atrt_cluster* cluster = config.m_clusters[i];
/**
- * 1) wait NOT_STARTED
- * 2) send start
- * 3) wait STARTED
+ * Get mgm handle for cluster
*/
- if(!wait_ndb(config, NDB_MGM_NODE_STATUS_NOT_STARTED))
+ NdbMgmHandle handle = 0;
+ for(size_t j = 0; j<cluster->m_processes.size(); j++){
+ atrt_process & proc = *cluster->m_processes[j];
+ if((proc.m_type & atrt_process::AP_NDB_MGMD) != 0){
+ handle = proc.m_ndb_mgm_handle;
+ break;
+ }
+ }
+
+ if(handle == 0){
+ g_logger.critical("Unable to find mgm handle");
return false;
+ }
- ndb_mgm_start(handle, 0, 0);
- }
-
- struct ndb_mgm_cluster_state * state;
-
- time_t now = time(0);
- time_t end = now + 360;
- int min = remap(NDB_MGM_NODE_STATUS_NO_CONTACT);
- int min2 = goal;
-
- while(now < end){
- /**
- * 1) retreive current state
- */
- state = 0;
- do {
- state = ndb_mgm_get_status(handle);
- if(state == 0){
- const int err = ndb_mgm_get_latest_error(handle);
- g_logger.error("Unable to poll db state: %d %s %s",
- ndb_mgm_get_latest_error(handle),
- ndb_mgm_get_latest_error_msg(handle),
- ndb_mgm_get_latest_error_desc(handle));
- if(err == NDB_MGM_SERVER_NOT_CONNECTED && connect_ndb_mgm(config)){
- g_logger.error("Reconnected...");
- continue;
- }
+ if(goal == NDB_MGM_NODE_STATUS_STARTED){
+ /**
+ * 1) wait NOT_STARTED
+ * 2) send start
+ * 3) wait STARTED
+ */
+ if(!wait_ndb(config, NDB_MGM_NODE_STATUS_NOT_STARTED))
return false;
- }
- } while(state == 0);
- NdbAutoPtr<void> tmp(state);
+
+ ndb_mgm_start(handle, 0, 0);
+ }
+
+ struct ndb_mgm_cluster_state * state;
+
+ time_t now = time(0);
+ time_t end = now + 360;
+ int min = remap(NDB_MGM_NODE_STATUS_NO_CONTACT);
+ int min2 = goal;
- min2 = goal;
- for(int i = 0; i<state->no_of_nodes; i++){
- if(state->node_states[i].node_type == NDB_MGM_NODE_TYPE_NDB){
- const int s = remap(state->node_states[i].node_status);
- min2 = (min2 < s ? min2 : s );
-
- if(s < remap(NDB_MGM_NODE_STATUS_NO_CONTACT) ||
- s > NDB_MGM_NODE_STATUS_STARTED){
- g_logger.critical("Strange DB status during start: %d %d", i, min2);
+ while(now < end){
+ /**
+ * 1) retreive current state
+ */
+ state = 0;
+ do {
+ state = ndb_mgm_get_status(handle);
+ if(state == 0){
+ const int err = ndb_mgm_get_latest_error(handle);
+ g_logger.error("Unable to poll db state: %d %s %s",
+ ndb_mgm_get_latest_error(handle),
+ ndb_mgm_get_latest_error_msg(handle),
+ ndb_mgm_get_latest_error_desc(handle));
+ if(err == NDB_MGM_SERVER_NOT_CONNECTED && connect_ndb_mgm(config)){
+ g_logger.error("Reconnected...");
+ continue;
+ }
return false;
}
-
- if(min2 < min){
- g_logger.critical("wait ndb failed node: %d %d %d %d",
- state->node_states[i].node_id, min, min2, goal);
+ } while(state == 0);
+ NdbAutoPtr<void> tmp(state);
+
+ min2 = goal;
+ for(int j = 0; j<state->no_of_nodes; j++){
+ if(state->node_states[j].node_type == NDB_MGM_NODE_TYPE_NDB){
+ const int s = remap(state->node_states[j].node_status);
+ min2 = (min2 < s ? min2 : s );
+
+ if(s < remap(NDB_MGM_NODE_STATUS_NO_CONTACT) ||
+ s > NDB_MGM_NODE_STATUS_STARTED){
+ g_logger.critical("Strange DB status during start: %d %d",
+ j, min2);
+ return false;
+ }
+
+ if(min2 < min){
+ g_logger.critical("wait ndb failed node: %d %d %d %d",
+ state->node_states[j].node_id, min, min2, goal);
+ }
}
}
+
+ if(min2 < min){
+ g_logger.critical("wait ndb failed %d %d %d", min, min2, goal);
+ return false;
+ }
+
+ if(min2 == goal){
+ cnt++;
+ goto next;
+ }
+
+ min = min2;
+ now = time(0);
}
- if(min2 < min){
- g_logger.critical("wait ndb failed %d %d %d", min, min2, goal);
- return false;
- }
-
- if(min2 == goal){
- return true;
- break;
- }
-
- min = min2;
- now = time(0);
+ g_logger.critical("wait ndb timed out %d %d %d", min, min2, goal);
+ break;
+
+next:
+ ;
}
-
- g_logger.critical("wait ndb timed out %d %d %d", min, min2, goal);
-
- return false;
+
+ return cnt == config.m_clusters.size();
}
bool
@@ -676,21 +782,19 @@ start_process(atrt_process & proc){
return false;
}
- BaseString path = proc.m_proc.m_cwd.substr(proc.m_host->m_base_dir.length()+BaseString("/run").length());
-
BaseString tmp = g_setup_progname;
- tmp.appfmt(" %s %s/%s/ %s",
+ tmp.appfmt(" %s %s/ %s",
proc.m_host->m_hostname.c_str(),
- g_setup_path,
- path.c_str(),
+ proc.m_proc.m_cwd.c_str(),
proc.m_proc.m_cwd.c_str());
-
+
+ g_logger.debug("system(%s)", tmp.c_str());
const int r1 = system(tmp.c_str());
if(r1 != 0){
g_logger.critical("Failed to setup process");
return false;
}
-
+
{
Properties reply;
if(proc.m_host->m_cpcd->define_process(proc.m_proc, reply) != 0){
@@ -715,7 +819,7 @@ start_process(atrt_process & proc){
bool
start_processes(atrt_config& config, int types){
for(size_t i = 0; i<config.m_processes.size(); i++){
- atrt_process & proc = config.m_processes[i];
+ atrt_process & proc = *config.m_processes[i];
if((types & proc.m_type) != 0 && proc.m_proc.m_path != ""){
if(!start_process(proc)){
return false;
@@ -760,7 +864,7 @@ stop_process(atrt_process & proc){
bool
stop_processes(atrt_config& config, int types){
for(size_t i = 0; i<config.m_processes.size(); i++){
- atrt_process & proc = config.m_processes[i];
+ atrt_process & proc = *config.m_processes[i];
if((types & proc.m_type) != 0){
if(!stop_process(proc)){
return false;
@@ -779,11 +883,11 @@ update_status(atrt_config& config, int){
m_procs.fill(config.m_hosts.size(), dummy);
for(size_t i = 0; i<config.m_hosts.size(); i++){
Properties p;
- config.m_hosts[i].m_cpcd->list_processes(m_procs[i], p);
+ config.m_hosts[i]->m_cpcd->list_processes(m_procs[i], p);
}
for(size_t i = 0; i<config.m_processes.size(); i++){
- atrt_process & proc = config.m_processes[i];
+ atrt_process & proc = *config.m_processes[i];
if(proc.m_proc.m_id != -1){
Vector<SimpleCpcClient::Process> &h_procs= m_procs[proc.m_host->m_index];
bool found = false;
@@ -798,7 +902,7 @@ update_status(atrt_config& config, int){
g_logger.error("update_status: not found");
g_logger.error("id: %d host: %s cmd: %s",
proc.m_proc.m_id,
- proc.m_hostname.c_str(),
+ proc.m_host->m_hostname.c_str(),
proc.m_proc.m_path.c_str());
for(size_t j = 0; j<h_procs.size(); j++){
g_logger.error("found: %d %s", h_procs[j].m_id,
@@ -815,7 +919,7 @@ int
is_running(atrt_config& config, int types){
int found = 0, running = 0;
for(size_t i = 0; i<config.m_processes.size(); i++){
- atrt_process & proc = config.m_processes[i];
+ atrt_process & proc = *config.m_processes[i];
if((types & proc.m_type) != 0){
found++;
if(proc.m_proc.m_status == "running")
@@ -910,12 +1014,24 @@ read_test_case(FILE * file, atrt_testcase& tc, int& line){
tc.m_run_all= true;
else
tc.m_run_all= false;
+
+ if (!p.get("name", &mt))
+ {
+ tc.m_name.assfmt("%s %s",
+ tc.m_command.c_str(),
+ tc.m_args.c_str());
+ }
+ else
+ {
+ tc.m_name.assign(mt);
+ }
return true;
}
bool
setup_test_case(atrt_config& config, const atrt_testcase& tc){
+ g_logger.debug("system(%s)", g_clear_progname);
const int r1 = system(g_clear_progname);
if(r1 != 0){
g_logger.critical("Failed to clear result");
@@ -923,19 +1039,24 @@ setup_test_case(atrt_config& config, const atrt_testcase& tc){
}
size_t i = 0;
- for(; i<config.m_processes.size(); i++){
- atrt_process & proc = config.m_processes[i];
- if(proc.m_type == atrt_process::NDB_API || proc.m_type == atrt_process::MYSQL_CLIENT){
- proc.m_proc.m_path.assfmt("%s/bin/%s", proc.m_host->m_base_dir.c_str(),
- tc.m_command.c_str());
+ for(; i<config.m_processes.size(); i++)
+ {
+ atrt_process & proc = *config.m_processes[i];
+ if(proc.m_type == atrt_process::AP_NDB_API || proc.m_type == atrt_process::AP_CLIENT){
+ proc.m_proc.m_path = "";
+ if (tc.m_command.c_str()[0] != '/')
+ {
+ proc.m_proc.m_path.appfmt("%s/bin/", g_prefix);
+ }
+ proc.m_proc.m_path.append(tc.m_command.c_str());
proc.m_proc.m_args.assign(tc.m_args);
if(!tc.m_run_all)
break;
}
}
for(i++; i<config.m_processes.size(); i++){
- atrt_process & proc = config.m_processes[i];
- if(proc.m_type == atrt_process::NDB_API || proc.m_type == atrt_process::MYSQL_CLIENT){
+ atrt_process & proc = *config.m_processes[i];
+ if(proc.m_type == atrt_process::AP_NDB_API || proc.m_type == atrt_process::AP_CLIENT){
proc.m_proc.m_path.assign("");
proc.m_proc.m_args.assign("");
}
@@ -946,24 +1067,27 @@ setup_test_case(atrt_config& config, const atrt_testcase& tc){
bool
gather_result(atrt_config& config, int * result){
BaseString tmp = g_gather_progname;
- for(size_t i = 0; i<config.m_processes.size(); i++){
- atrt_process & proc = config.m_processes[i];
- if(proc.m_proc.m_path != ""){
- tmp.appfmt(" %s:%s",
- proc.m_hostname.c_str(),
- proc.m_proc.m_cwd.c_str());
- }
+
+ for(size_t i = 0; i<config.m_hosts.size(); i++)
+ {
+ tmp.appfmt(" %s:%s/*",
+ config.m_hosts[i]->m_hostname.c_str(),
+ config.m_hosts[i]->m_basedir.c_str());
}
-
+
+ g_logger.debug("system(%s)", tmp.c_str());
const int r1 = system(tmp.c_str());
- if(r1 != 0){
- g_logger.critical("Failed to gather result");
+ if(r1 != 0)
+ {
+ g_logger.critical("Failed to gather result!");
return false;
}
-
+
+ g_logger.debug("system(%s)", g_analyze_progname);
const int r2 = system(g_analyze_progname);
-
- if(r2 == -1 || r2 == (127 << 8)){
+
+ if(r2 == -1 || r2 == (127 << 8))
+ {
g_logger.critical("Failed to analyze results");
return false;
}
@@ -974,6 +1098,7 @@ gather_result(atrt_config& config, int * result){
bool
setup_hosts(atrt_config& config){
+ g_logger.debug("system(%s)", g_clear_progname);
const int r1 = system(g_clear_progname);
if(r1 != 0){
g_logger.critical("Failed to clear result");
@@ -982,21 +1107,143 @@ setup_hosts(atrt_config& config){
for(size_t i = 0; i<config.m_hosts.size(); i++){
BaseString tmp = g_setup_progname;
- tmp.appfmt(" %s %s/ %s/run",
- config.m_hosts[i].m_hostname.c_str(),
- g_setup_path,
- config.m_hosts[i].m_base_dir.c_str());
+ tmp.appfmt(" %s %s/ %s/",
+ config.m_hosts[i]->m_hostname.c_str(),
+ g_basedir,
+ config.m_hosts[i]->m_basedir.c_str());
+ g_logger.debug("system(%s)", tmp.c_str());
const int r1 = system(tmp.c_str());
if(r1 != 0){
g_logger.critical("Failed to setup %s",
- config.m_hosts[i].m_hostname.c_str());
+ config.m_hosts[i]->m_hostname.c_str());
+ return false;
+ }
+ }
+ return true;
+}
+
+bool
+deploy(atrt_config & config)
+{
+ for (size_t i = 0; i<config.m_hosts.size(); i++)
+ {
+ BaseString tmp = g_setup_progname;
+ tmp.appfmt(" %s %s/ %s",
+ config.m_hosts[i]->m_hostname.c_str(),
+ g_prefix,
+ g_prefix);
+
+ g_logger.info("rsyncing %s to %s", g_prefix,
+ config.m_hosts[i]->m_hostname.c_str());
+ g_logger.debug("system(%s)", tmp.c_str());
+ const int r1 = system(tmp.c_str());
+ if(r1 != 0)
+ {
+ g_logger.critical("Failed to rsync %s to %s",
+ g_prefix,
+ config.m_hosts[i]->m_hostname.c_str());
return false;
}
}
+
return true;
}
+bool
+sshx(atrt_config & config, unsigned mask)
+{
+ for (size_t i = 0; i<config.m_processes.size(); i++)
+ {
+ atrt_process & proc = *config.m_processes[i];
+
+ BaseString tmp;
+ const char * type = 0;
+ switch(proc.m_type){
+ case atrt_process::AP_NDB_MGMD:
+ type = (mask & proc.m_type) ? "ndb_mgmd" : 0;
+ break;
+ case atrt_process::AP_NDBD:
+ type = (mask & proc.m_type) ? "ndbd" : 0;
+ break;
+ case atrt_process::AP_MYSQLD:
+ type = (mask & proc.m_type) ? "mysqld" : 0;
+ break;
+ case atrt_process::AP_NDB_API:
+ type = (mask & proc.m_type) ? "ndbapi" : 0;
+ break;
+ case atrt_process::AP_CLIENT:
+ type = (mask & proc.m_type) ? "client" : 0;
+ break;
+ default:
+ type = "<unknown>";
+ }
+
+ if (type == 0)
+ continue;
+
+ tmp.appfmt("xterm -fg black -title \"%s(%s) on %s\""
+ " -e 'ssh -t -X %s sh %s/ssh-login.sh' &",
+ type,
+ proc.m_cluster->m_name.c_str(),
+ proc.m_host->m_hostname.c_str(),
+ proc.m_host->m_hostname.c_str(),
+ proc.m_proc.m_cwd.c_str());
+
+ g_logger.debug("system(%s)", tmp.c_str());
+ const int r1 = system(tmp.c_str());
+ if(r1 != 0)
+ {
+ g_logger.critical("Failed sshx (%s)",
+ tmp.c_str());
+ return false;
+ }
+ NdbSleep_MilliSleep(300); // To prevent xlock problem
+ }
+
+ return true;
+}
+
+bool
+start(atrt_config & config, unsigned proc_mask)
+{
+ if (proc_mask & atrt_process::AP_NDB_MGMD)
+ if(!start_processes(g_config, atrt_process::AP_NDB_MGMD))
+ return false;
+
+ if (proc_mask & atrt_process::AP_NDBD)
+ {
+ if(!connect_ndb_mgm(g_config)){
+ return false;
+ }
+
+ if(!start_processes(g_config, atrt_process::AP_NDBD))
+ return false;
+
+ if(!wait_ndb(g_config, NDB_MGM_NODE_STATUS_NOT_STARTED))
+ return false;
+
+ for(Uint32 i = 0; i<3; i++)
+ if(wait_ndb(g_config, NDB_MGM_NODE_STATUS_STARTED))
+ goto started;
+ return false;
+ }
+
+started:
+ if(!start_processes(g_config, p_servers & proc_mask))
+ return false;
+
+ return true;
+}
+
+void
+require(bool x)
+{
+ if (!x)
+ abort();
+}
+
template class Vector<Vector<SimpleCpcClient::Process> >;
-template class Vector<atrt_host>;
-template class Vector<atrt_process>;
+template class Vector<atrt_host*>;
+template class Vector<atrt_cluster*>;
+template class Vector<atrt_process*>;
diff --git a/storage/ndb/test/run-test/run-test.hpp b/storage/ndb/test/run-test/run-test.hpp
deleted file mode 100644
index 2b259e83a60..00000000000
--- a/storage/ndb/test/run-test/run-test.hpp
+++ /dev/null
@@ -1,95 +0,0 @@
-/* Copyright (C) 2003 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; version 2 of the License.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-#ifndef atrt_config_hpp
-#define atrt_config_hpp
-
-#include <getarg.h>
-#include <Vector.hpp>
-#include <BaseString.hpp>
-#include <Logger.hpp>
-#include <mgmapi.h>
-#include <CpcClient.hpp>
-
-#undef MYSQL_CLIENT
-
-enum ErrorCodes {
- ERR_OK = 0,
- ERR_NDB_FAILED = 101,
- ERR_SERVERS_FAILED = 102,
- ERR_MAX_TIME_ELAPSED = 103
-};
-
-struct atrt_host {
- size_t m_index;
- BaseString m_user;
- BaseString m_base_dir;
- BaseString m_hostname;
- SimpleCpcClient * m_cpcd;
-};
-
-struct atrt_process {
- size_t m_index;
- BaseString m_hostname;
- struct atrt_host * m_host;
-
- enum Type {
- ALL = 255,
- NDB_DB = 1,
- NDB_API = 2,
- NDB_MGM = 4,
- NDB_REP = 8,
- MYSQL_SERVER = 16,
- MYSQL_CLIENT = 32
- } m_type;
-
- SimpleCpcClient::Process m_proc;
- short m_ndb_mgm_port;
- NdbMgmHandle m_ndb_mgm_handle; // if type == ndb_mgm
-};
-
-struct atrt_config {
- BaseString m_key;
- Vector<atrt_host> m_hosts;
- Vector<atrt_process> m_processes;
-};
-
-struct atrt_testcase {
- bool m_report;
- bool m_run_all;
- time_t m_max_time;
- BaseString m_command;
- BaseString m_args;
-};
-
-extern Logger g_logger;
-
-bool parse_args(int argc, const char** argv);
-bool setup_config(atrt_config&);
-bool connect_hosts(atrt_config&);
-bool connect_ndb_mgm(atrt_config&);
-bool wait_ndb(atrt_config&, int ndb_mgm_node_status);
-bool start_processes(atrt_config&, int);
-bool stop_processes(atrt_config&, int);
-bool update_status(atrt_config&, int);
-int is_running(atrt_config&, int);
-bool gather_result(atrt_config&, int * result);
-
-bool read_test_case(FILE *, atrt_testcase&, int& line);
-bool setup_test_case(atrt_config&, const atrt_testcase&);
-
-bool setup_hosts(atrt_config&);
-
-#endif
diff --git a/storage/ndb/test/run-test/setup.cpp b/storage/ndb/test/run-test/setup.cpp
new file mode 100644
index 00000000000..cbb7a34f171
--- /dev/null
+++ b/storage/ndb/test/run-test/setup.cpp
@@ -0,0 +1,965 @@
+#include "atrt.hpp"
+#include <ndb_global.h>
+#include <my_sys.h>
+#include <my_getopt.h>
+#include <NdbOut.hpp>
+
+static NdbOut& operator<<(NdbOut& out, const atrt_process& proc);
+static atrt_host * find(const char * hostname, Vector<atrt_host*>&);
+static bool load_process(atrt_config&, atrt_cluster&, atrt_process::Type,
+ size_t idx, const char * hostname);
+static bool load_options(int argc, char** argv, int type, atrt_options&);
+
+enum {
+ PO_NDB = atrt_options::AO_NDBCLUSTER
+
+ ,PO_REP_SLAVE = 256
+ ,PO_REP_MASTER = 512
+ ,PO_REP = (atrt_options::AO_REPLICATION | PO_REP_SLAVE | PO_REP_MASTER)
+};
+
+struct proc_option
+{
+ const char * name;
+ int type;
+ int options;
+};
+
+static
+struct proc_option f_options[] = {
+ { "--FileSystemPath=", atrt_process::AP_NDBD, 0 }
+ ,{ "--PortNumber=", atrt_process::AP_NDB_MGMD, 0 }
+ ,{ "--datadir=", atrt_process::AP_MYSQLD, 0 }
+ ,{ "--socket=", atrt_process::AP_MYSQLD | atrt_process::AP_CLIENT, 0 }
+ ,{ "--port=", atrt_process::AP_MYSQLD | atrt_process::AP_CLIENT, 0 }
+ ,{ "--server-id=", atrt_process::AP_MYSQLD, PO_REP }
+ ,{ "--log-bin", atrt_process::AP_MYSQLD, PO_REP_MASTER }
+ ,{ "--master-host=", atrt_process::AP_MYSQLD, PO_REP_SLAVE }
+ ,{ "--master-port=", atrt_process::AP_MYSQLD, PO_REP_SLAVE }
+ ,{ "--master-user=", atrt_process::AP_MYSQLD, PO_REP_SLAVE }
+ ,{ "--master-password=", atrt_process::AP_MYSQLD, PO_REP_SLAVE }
+ ,{ "--ndb-connectstring=", atrt_process::AP_MYSQLD | atrt_process::AP_CLUSTER
+ ,PO_NDB }
+ ,{ "--ndbcluster", atrt_process::AP_MYSQLD, PO_NDB }
+ ,{ 0, 0, 0 }
+};
+const char * ndbcs = "--ndb-connectstring=";
+
+bool
+setup_config(atrt_config& config)
+{
+ BaseString tmp(g_clusters);
+ Vector<BaseString> clusters;
+ tmp.split(clusters, ",");
+
+ bool fqpn = clusters.size() > 1 || g_fqpn;
+
+ size_t j,k;
+ for (size_t i = 0; i<clusters.size(); i++)
+ {
+ struct atrt_cluster *cluster = new atrt_cluster;
+ config.m_clusters.push_back(cluster);
+
+ cluster->m_name = clusters[i];
+ if (fqpn)
+ {
+ cluster->m_dir.assfmt("cluster%s/", cluster->m_name.c_str());
+ }
+ else
+ {
+ cluster->m_dir = "";
+ }
+
+ int argc = 1;
+ const char * argv[] = { "atrt", 0, 0 };
+
+ BaseString buf;
+ buf.assfmt("--defaults-group-suffix=%s", clusters[i].c_str());
+ argv[argc++] = buf.c_str();
+ char ** tmp = (char**)argv;
+ const char *groups[] = { "cluster_config", 0 };
+ int ret = load_defaults(g_my_cnf, groups, &argc, &tmp);
+ if (ret)
+ {
+ g_logger.error("Unable to load defaults for cluster: %s",
+ clusters[i].c_str());
+ return false;
+ }
+
+ struct
+ {
+ atrt_process::Type type;
+ const char * name;
+ const char * value;
+ } proc_args[] = {
+ { atrt_process::AP_NDB_MGMD, "--ndb_mgmd=", 0 },
+ { atrt_process::AP_NDBD, "--ndbd=", 0 },
+ { atrt_process::AP_NDB_API, "--ndbapi=", 0 },
+ { atrt_process::AP_NDB_API, "--api=", 0 },
+ { atrt_process::AP_MYSQLD, "--mysqld=", 0 },
+ { atrt_process::AP_ALL, 0, 0}
+ };
+
+ /**
+ * Find all processes...
+ */
+ for (j = 0; j<(size_t)argc; j++)
+ {
+ for (k = 0; proc_args[k].name; k++)
+ {
+ if (!strncmp(tmp[j], proc_args[k].name, strlen(proc_args[k].name)))
+ {
+ proc_args[k].value = tmp[j] + strlen(proc_args[k].name);
+ break;
+ }
+ }
+ }
+
+ /**
+ * Load each process
+ */
+ for (j = 0; proc_args[j].name; j++)
+ {
+ if (proc_args[j].value)
+ {
+ BaseString tmp(proc_args[j].value);
+ Vector<BaseString> list;
+ tmp.split(list, ",");
+ for (k = 0; k<list.size(); k++)
+ if (!load_process(config, *cluster, proc_args[j].type,
+ k + 1, list[k].c_str()))
+ return false;
+ }
+ }
+
+ {
+ /**
+ * Load cluster options
+ */
+
+ argc = 1;
+ argv[argc++] = buf.c_str();
+ const char *groups[] = { "mysql_cluster", 0 };
+ ret = load_defaults(g_my_cnf, groups, &argc, &tmp);
+
+ if (ret)
+ {
+ g_logger.error("Unable to load defaults for cluster: %s",
+ clusters[i].c_str());
+ return false;
+ }
+
+ load_options(argc, tmp, atrt_process::AP_CLUSTER, cluster->m_options);
+ }
+ }
+ return true;
+}
+
+static
+atrt_host *
+find(const char * hostname, Vector<atrt_host*> & hosts){
+ for (size_t i = 0; i<hosts.size(); i++){
+ if (hosts[i]->m_hostname == hostname){
+ return hosts[i];
+ }
+ }
+
+ atrt_host* host = new atrt_host;
+ host->m_index = hosts.size();
+ host->m_cpcd = new SimpleCpcClient(hostname, 1234);
+ host->m_basedir = g_basedir;
+ host->m_user = g_user;
+ host->m_hostname = hostname;
+ hosts.push_back(host);
+ return host;
+}
+
+static
+bool
+load_process(atrt_config& config, atrt_cluster& cluster,
+ atrt_process::Type type,
+ size_t idx,
+ const char * hostname)
+{
+ atrt_host * host_ptr = find(hostname, config.m_hosts);
+ atrt_process *proc_ptr = new atrt_process;
+
+ config.m_processes.push_back(proc_ptr);
+ host_ptr->m_processes.push_back(proc_ptr);
+ cluster.m_processes.push_back(proc_ptr);
+
+ atrt_process& proc = *proc_ptr;
+
+ const size_t proc_no = config.m_processes.size();
+ proc.m_index = idx;
+ proc.m_type = type;
+ proc.m_host = host_ptr;
+ proc.m_cluster = &cluster;
+ proc.m_options.m_features = 0;
+ proc.m_rep_src = 0;
+ proc.m_proc.m_id = -1;
+ proc.m_proc.m_type = "temporary";
+ proc.m_proc.m_owner = "atrt";
+ proc.m_proc.m_group = cluster.m_name.c_str();
+ proc.m_proc.m_stdout = "log.out";
+ proc.m_proc.m_stderr = "2>&1";
+ proc.m_proc.m_runas = proc.m_host->m_user;
+ proc.m_proc.m_ulimit = "c:unlimited";
+ proc.m_proc.m_env.assfmt("MYSQL_BASE_DIR=%s", g_prefix);
+ proc.m_proc.m_env.appfmt(" MYSQL_HOME=%s", g_basedir);
+ proc.m_proc.m_shutdown_options = "";
+
+ int argc = 1;
+ const char * argv[] = { "atrt", 0, 0 };
+
+ BaseString buf[10];
+ char ** tmp = (char**)argv;
+ const char *groups[] = { 0, 0, 0, 0 };
+ switch(type){
+ case atrt_process::AP_NDB_MGMD:
+ groups[0] = "cluster_config";
+ buf[1].assfmt("cluster_config.ndb_mgmd.%d", idx);
+ groups[1] = buf[1].c_str();
+ buf[0].assfmt("--defaults-group-suffix=%s", cluster.m_name.c_str());
+ argv[argc++] = buf[0].c_str();
+ break;
+ case atrt_process::AP_NDBD:
+ groups[0] = "cluster_config";
+ buf[1].assfmt("cluster_config.ndbd.%d", idx);
+ groups[1] = buf[1].c_str();
+ buf[0].assfmt("--defaults-group-suffix=%s", cluster.m_name.c_str());
+ argv[argc++] = buf[0].c_str();
+ break;
+ case atrt_process::AP_MYSQLD:
+ groups[0] = "mysqld";
+ groups[1] = "mysql_cluster";
+ buf[0].assfmt("--defaults-group-suffix=.%d%s",idx,cluster.m_name.c_str());
+ argv[argc++] = buf[0].c_str();
+ break;
+ case atrt_process::AP_CLIENT:
+ buf[0].assfmt("client.%d%s", idx, cluster.m_name.c_str());
+ groups[0] = buf[0].c_str();
+ break;
+ case atrt_process::AP_NDB_API:
+ break;
+ default:
+ g_logger.critical("Unhandled process type: %d", type);
+ return false;
+ }
+
+ int ret = load_defaults(g_my_cnf, groups, &argc, &tmp);
+ if (ret)
+ {
+ g_logger.error("Unable to load defaults for cluster: %s",
+ cluster.m_name.c_str());
+ return false;
+ }
+
+ load_options(argc, tmp, type, proc.m_options);
+
+ BaseString dir;
+ dir.assfmt("%s/%s",
+ proc.m_host->m_basedir.c_str(),
+ cluster.m_dir.c_str());
+
+ switch(type){
+ case atrt_process::AP_NDB_MGMD:
+ {
+ proc.m_proc.m_name.assfmt("%d-%s", proc_no, "ndb_mgmd");
+ proc.m_proc.m_path.assign(g_prefix).append("/libexec/ndb_mgmd");
+ proc.m_proc.m_args.assfmt("--defaults-file=%s/my.cnf",
+ proc.m_host->m_basedir.c_str());
+ proc.m_proc.m_args.appfmt(" --defaults-group-suffix=%s",
+ cluster.m_name.c_str());
+ proc.m_proc.m_args.append(" --nodaemon --mycnf");
+ proc.m_proc.m_cwd.assfmt("%sndb_mgmd.%d", dir.c_str(), proc.m_index);
+ proc.m_proc.m_env.appfmt(" MYSQL_GROUP_SUFFIX=%s",
+ cluster.m_name.c_str());
+ break;
+ }
+ case atrt_process::AP_NDBD:
+ {
+ proc.m_proc.m_name.assfmt("%d-%s", proc_no, "ndbd");
+ proc.m_proc.m_path.assign(g_prefix).append("/libexec/ndbd");
+ proc.m_proc.m_args.assfmt("--defaults-file=%s/my.cnf",
+ proc.m_host->m_basedir.c_str());
+ proc.m_proc.m_args.appfmt(" --defaults-group-suffix=%s",
+ cluster.m_name.c_str());
+ proc.m_proc.m_args.append(" --nodaemon -n");
+ proc.m_proc.m_cwd.assfmt("%sndbd.%d", dir.c_str(), proc.m_index);
+ proc.m_proc.m_env.appfmt(" MYSQL_GROUP_SUFFIX=%s",
+ cluster.m_name.c_str());
+ break;
+ }
+ case atrt_process::AP_MYSQLD:
+ {
+ proc.m_proc.m_name.assfmt("%d-%s", proc_no, "mysqld");
+ proc.m_proc.m_path.assign(g_prefix).append("/libexec/mysqld");
+ proc.m_proc.m_args.assfmt("--defaults-file=%s/my.cnf",
+ proc.m_host->m_basedir.c_str());
+ proc.m_proc.m_args.appfmt(" --defaults-group-suffix=.%d%s",
+ proc.m_index,
+ cluster.m_name.c_str());
+ proc.m_proc.m_args.append(" --core-file");
+ proc.m_proc.m_cwd.appfmt("%smysqld.%d", dir.c_str(), proc.m_index);
+ proc.m_proc.m_shutdown_options = "SIGKILL"; // not nice
+ proc.m_proc.m_env.appfmt(" MYSQL_GROUP_SUFFIX=.%d%s",
+ proc.m_index,
+ cluster.m_name.c_str());
+ break;
+ }
+ case atrt_process::AP_NDB_API:
+ {
+ proc.m_proc.m_name.assfmt("%d-%s", proc_no, "ndb_api");
+ proc.m_proc.m_path = "";
+ proc.m_proc.m_args = "";
+ proc.m_proc.m_cwd.appfmt("%sndb_api.%d", dir.c_str(), proc.m_index);
+ proc.m_proc.m_env.appfmt(" MYSQL_GROUP_SUFFIX=%s",
+ cluster.m_name.c_str());
+ break;
+ }
+ case atrt_process::AP_CLIENT:
+ {
+ proc.m_proc.m_name.assfmt("%d-%s", proc_no, "mysql");
+ proc.m_proc.m_path = "";
+ proc.m_proc.m_args = "";
+ proc.m_proc.m_cwd.appfmt("%s/client.%d", dir.c_str(), proc.m_index);
+ proc.m_proc.m_env.appfmt(" MYSQL_GROUP_SUFFIX=.%d%s",
+ proc.m_index,
+ cluster.m_name.c_str());
+ break;
+ }
+ case atrt_process::AP_ALL:
+ case atrt_process::AP_CLUSTER:
+ g_logger.critical("Unhandled process type: %d", proc.m_type);
+ return false;
+ }
+
+ if (proc.m_proc.m_path.length())
+ {
+ proc.m_proc.m_env.appfmt(" CMD=\"%s", proc.m_proc.m_path.c_str());
+ if (proc.m_proc.m_args.length())
+ proc.m_proc.m_env.append(" ");
+ proc.m_proc.m_env.append(proc.m_proc.m_args);
+ proc.m_proc.m_env.append("\" ");
+ }
+
+ if (type == atrt_process::AP_MYSQLD)
+ {
+ /**
+ * Add a client for each mysqld
+ */
+ if (!load_process(config, cluster, atrt_process::AP_CLIENT, idx, hostname))
+ {
+ return false;
+ }
+ }
+
+ if (type == atrt_process::AP_CLIENT)
+ {
+ proc.m_mysqld = cluster.m_processes[cluster.m_processes.size()-2];
+ }
+
+ return true;
+}
+
+static
+bool
+load_options(int argc, char** argv, int type, atrt_options& opts)
+{
+ for (size_t i = 0; i<(size_t)argc; i++)
+ {
+ for (size_t j = 0; f_options[j].name; j++)
+ {
+ const char * name = f_options[j].name;
+ const size_t len = strlen(name);
+
+ if ((f_options[j].type & type) && strncmp(argv[i], name, len) == 0)
+ {
+ opts.m_loaded.put(name, argv[i]+len, true);
+ break;
+ }
+ }
+ }
+ return true;
+}
+
+struct proc_rule_ctx
+{
+ int m_setup;
+ atrt_config* m_config;
+ atrt_host * m_host;
+ atrt_cluster* m_cluster;
+ atrt_process* m_process;
+};
+
+struct proc_rule
+{
+ int type;
+ bool (* func)(Properties& prop, proc_rule_ctx&, int extra);
+ int extra;
+};
+
+static bool pr_check_replication(Properties&, proc_rule_ctx&, int);
+static bool pr_check_features(Properties&, proc_rule_ctx&, int);
+static bool pr_fix_client(Properties&, proc_rule_ctx&, int);
+static bool pr_proc_options(Properties&, proc_rule_ctx&, int);
+static bool pr_fix_ndb_connectstring(Properties&, proc_rule_ctx&, int);
+static bool pr_set_ndb_connectstring(Properties&, proc_rule_ctx&, int);
+static bool pr_check_proc(Properties&, proc_rule_ctx&, int);
+
+static
+proc_rule f_rules[] =
+{
+ { atrt_process::AP_CLUSTER, pr_check_features, 0 }
+ ,{ atrt_process::AP_MYSQLD, pr_check_replication, 0 }
+ ,{ (atrt_process::AP_ALL & ~atrt_process::AP_CLIENT), pr_proc_options,
+ ~(PO_REP | PO_NDB) }
+ ,{ (atrt_process::AP_ALL & ~atrt_process::AP_CLIENT), pr_proc_options, PO_REP }
+ ,{ atrt_process::AP_CLIENT, pr_fix_client, 0 }
+ ,{ atrt_process::AP_CLUSTER, pr_fix_ndb_connectstring, 0 }
+ ,{ atrt_process::AP_MYSQLD, pr_set_ndb_connectstring, 0 }
+ ,{ atrt_process::AP_ALL, pr_check_proc, 0 }
+ ,{ 0, 0, 0 }
+};
+
+bool
+configure(atrt_config& config, int setup)
+{
+ Properties props;
+
+ for (size_t i = 0; f_rules[i].func; i++)
+ {
+ bool ok = true;
+ proc_rule_ctx ctx;
+ bzero(&ctx, sizeof(ctx));
+ ctx.m_setup = setup;
+ ctx.m_config = &config;
+
+ for (size_t j = 0; j < config.m_clusters.size(); j++)
+ {
+ ctx.m_cluster = config.m_clusters[j];
+
+ if (f_rules[i].type & atrt_process::AP_CLUSTER)
+ {
+ g_logger.debug("applying rule %d to cluster %s", i,
+ ctx.m_cluster->m_name.c_str());
+ if (! (* f_rules[i].func)(props, ctx, f_rules[i].extra))
+ ok = false;
+ }
+ else
+ {
+ atrt_cluster& cluster = *config.m_clusters[j];
+ for (size_t k = 0; k<cluster.m_processes.size(); k++)
+ {
+ atrt_process& proc = *cluster.m_processes[k];
+ ctx.m_process = cluster.m_processes[k];
+ if (proc.m_type & f_rules[i].type)
+ {
+ g_logger.debug("applying rule %d to %s", i,
+ proc.m_proc.m_cwd.c_str());
+ if (! (* f_rules[i].func)(props, ctx, f_rules[i].extra))
+ ok = false;
+ }
+ }
+ }
+ }
+
+ if (!ok)
+ {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static
+atrt_process*
+find(atrt_config& config, int type, const char * name)
+{
+ BaseString tmp(name);
+ Vector<BaseString> src;
+ Vector<BaseString> dst;
+ tmp.split(src, ".");
+
+ if (src.size() != 2)
+ {
+ return 0;
+ }
+ atrt_cluster* cluster = 0;
+ BaseString cl;
+ cl.appfmt(".%s", src[1].c_str());
+ for (size_t i = 0; i<config.m_clusters.size(); i++)
+ {
+ if (config.m_clusters[i]->m_name == cl)
+ {
+ cluster = config.m_clusters[i];
+ break;
+ }
+ }
+
+ if (cluster == 0)
+ {
+ return 0;
+ }
+
+ int idx = atoi(src[0].c_str()) - 1;
+ for (size_t i = 0; i<cluster->m_processes.size(); i++)
+ {
+ if (cluster->m_processes[i]->m_type & type)
+ {
+ if (idx == 0)
+ return cluster->m_processes[i];
+ else
+ idx --;
+ }
+ }
+
+ return 0;
+}
+
+static
+bool
+pr_check_replication(Properties& props, proc_rule_ctx& ctx, int)
+{
+ if (! (ctx.m_config->m_replication == ""))
+ {
+ Vector<BaseString> list;
+ ctx.m_config->m_replication.split(list, ";");
+ atrt_config& config = *ctx.m_config;
+
+ ctx.m_config->m_replication = "";
+
+ const char * msg = "Invalid replication specification";
+ for (size_t i = 0; i<list.size(); i++)
+ {
+ Vector<BaseString> rep;
+ list[i].split(rep, ":");
+ if (rep.size() != 2)
+ {
+ g_logger.error("%s: %s (split: %d)", msg, list[i].c_str(), rep.size());
+ return false;
+ }
+
+ atrt_process* src = find(config, atrt_process::AP_MYSQLD,rep[0].c_str());
+ atrt_process* dst = find(config, atrt_process::AP_MYSQLD,rep[1].c_str());
+
+ if (src == 0 || dst == 0)
+ {
+ g_logger.error("%s: %s (%d %d)",
+ msg, list[i].c_str(), src != 0, dst != 0);
+ return false;
+ }
+
+
+ if (dst->m_rep_src != 0)
+ {
+ g_logger.error("%s: %s : %s already has replication src (%s)",
+ msg,
+ list[i].c_str(),
+ dst->m_proc.m_cwd.c_str(),
+ dst->m_rep_src->m_proc.m_cwd.c_str());
+ return false;
+ }
+
+ dst->m_rep_src = src;
+ src->m_rep_dst.push_back(dst);
+
+ src->m_options.m_features |= PO_REP_MASTER;
+ dst->m_options.m_features |= PO_REP_SLAVE;
+ }
+ }
+ return true;
+}
+
+static
+bool
+pr_check_features(Properties& props, proc_rule_ctx& ctx, int)
+{
+ int features = 0;
+ atrt_cluster& cluster = *ctx.m_cluster;
+ for (size_t i = 0; i<cluster.m_processes.size(); i++)
+ {
+ if (cluster.m_processes[i]->m_type == atrt_process::AP_NDB_MGMD ||
+ cluster.m_processes[i]->m_type == atrt_process::AP_NDB_API ||
+ cluster.m_processes[i]->m_type == atrt_process::AP_NDBD)
+ {
+ features |= atrt_options::AO_NDBCLUSTER;
+ break;
+ }
+ }
+
+ if (features)
+ {
+ cluster.m_options.m_features |= features;
+ for (size_t i = 0; i<cluster.m_processes.size(); i++)
+ {
+ cluster.m_processes[i]->m_options.m_features |= features;
+ }
+ }
+ return true;
+}
+
+static
+bool
+pr_fix_client(Properties& props, proc_rule_ctx& ctx, int)
+{
+ for (size_t i = 0; f_options[i].name; i++)
+ {
+ proc_option& opt = f_options[i];
+ const char * name = opt.name;
+ if (opt.type & atrt_process::AP_CLIENT)
+ {
+ const char * val;
+ atrt_process& proc = *ctx.m_process;
+ if (!proc.m_options.m_loaded.get(name, &val))
+ {
+ require(proc.m_mysqld->m_options.m_loaded.get(name, &val));
+ proc.m_options.m_loaded.put(name, val);
+ proc.m_options.m_generated.put(name, val);
+ }
+ }
+ }
+
+ return true;
+}
+
+static
+Uint32
+try_default_port(atrt_process& proc, const char * name)
+{
+ Uint32 port =
+ strcmp(name, "--port=") == 0 ? 3306 :
+ strcmp(name, "--PortNumber=") == 0 ? 1186 :
+ 0;
+
+ atrt_host * host = proc.m_host;
+ for (size_t i = 0; i<host->m_processes.size(); i++)
+ {
+ const char * val;
+ if (host->m_processes[i]->m_options.m_loaded.get(name, &val))
+ {
+ if ((Uint32)atoi(val) == port)
+ return 0;
+ }
+ }
+ return port;
+}
+
+static
+bool
+generate(atrt_process& proc, const char * name, Properties& props)
+{
+ atrt_options& opts = proc.m_options;
+ if (strcmp(name, "--port=") == 0 ||
+ strcmp(name, "--PortNumber=") == 0)
+ {
+ Uint32 val;
+ if (g_default_ports == 0 || (val = try_default_port(proc, name)) == 0)
+ {
+ val = g_baseport;
+ props.get("--PortNumber=", &val);
+ props.put("--PortNumber=", (val + 1), true);
+ }
+
+ char buf[255];
+ snprintf(buf, sizeof(buf), "%u", val);
+ opts.m_loaded.put(name, buf);
+ opts.m_generated.put(name, buf);
+ return true;
+ }
+ else if (strcmp(name, "--datadir=") == 0)
+ {
+ opts.m_loaded.put(name, proc.m_proc.m_cwd.c_str());
+ opts.m_generated.put(name, proc.m_proc.m_cwd.c_str());
+ return true;
+ }
+ else if (strcmp(name, "--FileSystemPath=") == 0)
+ {
+ BaseString dir;
+ dir.append(proc.m_host->m_basedir);
+ dir.append("/");
+ dir.append(proc.m_cluster->m_dir);
+ opts.m_loaded.put(name, dir.c_str());
+ opts.m_generated.put(name, dir.c_str());
+ return true;
+ }
+ else if (strcmp(name, "--socket=") == 0)
+ {
+ const char * sock = 0;
+ if (g_default_ports)
+ {
+ sock = "/tmp/mysql.sock";
+ atrt_host * host = proc.m_host;
+ for (size_t i = 0; i<host->m_processes.size(); i++)
+ {
+ const char * val;
+ if (host->m_processes[i]->m_options.m_loaded.get(name, &val))
+ {
+ if (strcmp(sock, val) == 0)
+ {
+ sock = 0;
+ break;
+ }
+ }
+ }
+ }
+
+ BaseString tmp;
+ if (sock == 0)
+ {
+ tmp.assfmt("%s/mysql.sock", proc.m_proc.m_cwd.c_str());
+ sock = tmp.c_str();
+ }
+
+ opts.m_loaded.put(name, sock);
+ opts.m_generated.put(name, sock);
+ return true;
+ }
+ else if (strcmp(name, "--server-id=") == 0)
+ {
+ Uint32 val = 1;
+ props.get(name, &val);
+ char buf[255];
+ snprintf(buf, sizeof(buf), "%u", val);
+ opts.m_loaded.put(name, buf);
+ opts.m_generated.put(name, buf);
+ props.put(name, (val + 1), true);
+ return true;
+ }
+ else if (strcmp(name, "--log-bin") == 0)
+ {
+ opts.m_loaded.put(name, "");
+ opts.m_generated.put(name, "");
+ return true;
+ }
+ else if (strcmp(name, "--master-host=") == 0)
+ {
+ require(proc.m_rep_src != 0);
+ opts.m_loaded.put(name, proc.m_rep_src->m_host->m_hostname.c_str());
+ opts.m_generated.put(name, proc.m_rep_src->m_host->m_hostname.c_str());
+ return true;
+ }
+ else if (strcmp(name, "--master-port=") == 0)
+ {
+ const char* val;
+ require(proc.m_rep_src->m_options.m_loaded.get("--port=", &val));
+ opts.m_loaded.put(name, val);
+ opts.m_generated.put(name, val);
+ return true;
+ }
+ else if (strcmp(name, "--master-user=") == 0)
+ {
+ opts.m_loaded.put(name, "root");
+ opts.m_generated.put(name, "root");
+ return true;
+ }
+ else if (strcmp(name, "--master-password=") == 0)
+ {
+ opts.m_loaded.put(name, "\"\"");
+ opts.m_generated.put(name, "\"\"");
+ return true;
+ }
+
+ g_logger.warning("Unknown parameter: %s", name);
+ return true;
+}
+
+static
+bool
+pr_proc_options(Properties& props, proc_rule_ctx& ctx, int extra)
+{
+ for (size_t i = 0; f_options[i].name; i++)
+ {
+ proc_option& opt = f_options[i];
+ atrt_process& proc = *ctx.m_process;
+ const char * name = opt.name;
+ if (opt.type & proc.m_type)
+ {
+ if (opt.options == 0 ||
+ (opt.options & extra & proc.m_options.m_features))
+ {
+ const char * val;
+ if (!proc.m_options.m_loaded.get(name, &val))
+ {
+ generate(proc, name, props);
+ }
+ }
+ }
+ }
+ return true;
+}
+
+static
+bool
+pr_fix_ndb_connectstring(Properties& props, proc_rule_ctx& ctx, int)
+{
+ const char * val;
+ atrt_cluster& cluster = *ctx.m_cluster;
+
+ if (cluster.m_options.m_features & atrt_options::AO_NDBCLUSTER)
+ {
+ if (!cluster.m_options.m_loaded.get(ndbcs, &val))
+ {
+ /**
+ * Construct connect string for this cluster
+ */
+ BaseString str;
+ for (size_t i = 0; i<cluster.m_processes.size(); i++)
+ {
+ atrt_process* tmp = cluster.m_processes[i];
+ if (tmp->m_type == atrt_process::AP_NDB_MGMD)
+ {
+ if (str.length())
+ {
+ str.append(";");
+ }
+ const char * port;
+ require(tmp->m_options.m_loaded.get("--PortNumber=", &port));
+ str.appfmt("%s:%s", tmp->m_host->m_hostname.c_str(), port);
+ }
+ }
+ cluster.m_options.m_loaded.put(ndbcs, str.c_str());
+ cluster.m_options.m_generated.put(ndbcs, str.c_str());
+ cluster.m_options.m_loaded.get(ndbcs, &val);
+ }
+
+ for (size_t i = 0; i<cluster.m_processes.size(); i++)
+ {
+ cluster.m_processes[i]->m_proc.m_env.appfmt(" NDB_CONNECTSTRING=%s",
+ val);
+ }
+ }
+ return true;
+}
+
+static
+bool
+pr_set_ndb_connectstring(Properties& props, proc_rule_ctx& ctx, int)
+{
+ const char * val;
+
+ atrt_process& proc = *ctx.m_process;
+ if (proc.m_options.m_features & atrt_options::AO_NDBCLUSTER)
+ {
+ if (!proc.m_options.m_loaded.get(ndbcs, &val))
+ {
+ require(proc.m_cluster->m_options.m_loaded.get(ndbcs, &val));
+ proc.m_options.m_loaded.put(ndbcs, val);
+ proc.m_options.m_generated.put(ndbcs, val);
+ }
+
+ if (!proc.m_options.m_loaded.get("--ndbcluster", &val))
+ {
+ proc.m_options.m_loaded.put("--ndbcluster", "");
+ proc.m_options.m_generated.put("--ndbcluster", "");
+ }
+ }
+ return true;
+}
+
+static
+bool
+pr_check_proc(Properties& props, proc_rule_ctx& ctx, int)
+{
+ bool ok = true;
+ bool generated = false;
+ const int setup = ctx.m_setup;
+ atrt_process& proc = *ctx.m_process;
+ for (size_t i = 0; f_options[i].name; i++)
+ {
+ proc_option& opt = f_options[i];
+ const char * name = opt.name;
+ if ((ctx.m_process->m_type & opt.type) &&
+ (opt.options == 0 || (ctx.m_process->m_options.m_features & opt.options)))
+ {
+ const char * val;
+ if (!proc.m_options.m_loaded.get(name, &val))
+ {
+ ok = false;
+ g_logger.warning("Missing paramter: %s for %s",
+ name, proc.m_proc.m_cwd.c_str());
+ }
+ else if (proc.m_options.m_generated.get(name, &val))
+ {
+ if (setup == 0)
+ {
+ ok = false;
+ g_logger.warning("Missing paramter: %s for %s",
+ name, proc.m_proc.m_cwd.c_str());
+ }
+ else
+ {
+ generated = true;
+ }
+ }
+ }
+ }
+
+ if (generated)
+ {
+ ctx.m_config->m_generated = true;
+ }
+
+ //ndbout << proc << endl;
+
+ return ok;
+}
+
+
+NdbOut&
+operator<<(NdbOut& out, const atrt_process& proc)
+{
+ out << "[ atrt_process: ";
+ switch(proc.m_type){
+ case atrt_process::AP_NDB_MGMD:
+ out << "ndb_mgmd";
+ break;
+ case atrt_process::AP_NDBD:
+ out << "ndbd";
+ break;
+ case atrt_process::AP_MYSQLD:
+ out << "mysqld";
+ break;
+ case atrt_process::AP_NDB_API:
+ out << "ndbapi";
+ break;
+ case atrt_process::AP_CLIENT:
+ out << "client";
+ break;
+ default:
+ out << "<unknown: " << (int)proc.m_type << " >";
+ }
+
+ out << " cluster: " << proc.m_cluster->m_name.c_str()
+ << " host: " << proc.m_host->m_hostname.c_str()
+ << endl << " cwd: " << proc.m_proc.m_cwd.c_str()
+ << endl << " path: " << proc.m_proc.m_path.c_str()
+ << endl << " args: " << proc.m_proc.m_args.c_str()
+ << endl << " env: " << proc.m_proc.m_env.c_str() << endl;
+
+ proc.m_options.m_generated.print(stdout, "generated: ");
+
+ out << " ]";
+
+#if 0
+ proc.m_index = 0; //idx;
+ proc.m_host = host_ptr;
+ proc.m_cluster = cluster;
+ proc.m_proc.m_id = -1;
+ proc.m_proc.m_type = "temporary";
+ proc.m_proc.m_owner = "atrt";
+ proc.m_proc.m_group = cluster->m_name.c_str();
+ proc.m_proc.m_cwd.assign(dir).append("/atrt/").append(cluster->m_dir);
+ proc.m_proc.m_stdout = "log.out";
+ proc.m_proc.m_stderr = "2>&1";
+ proc.m_proc.m_runas = proc.m_host->m_user;
+ proc.m_proc.m_ulimit = "c:unlimited";
+ proc.m_proc.m_env.assfmt("MYSQL_BASE_DIR=%s", dir);
+ proc.m_proc.m_shutdown_options = "";
+#endif
+
+ return out;
+}
+
diff --git a/storage/ndb/test/run-test/test-tests.txt b/storage/ndb/test/run-test/test-tests.txt
new file mode 100644
index 00000000000..b57023fc0c1
--- /dev/null
+++ b/storage/ndb/test/run-test/test-tests.txt
@@ -0,0 +1,24 @@
+max-time: 600
+cmd: testBasic
+args: -n PkRead T1
+
+max-time: 1800
+cmd: testMgm
+args: -n SingleUserMode T1
+
+#
+#
+# SYSTEM RESTARTS
+#
+max-time: 1500
+cmd: testSystemRestart
+args: -n SR3 T6
+
+max-time: 1500
+cmd: testSystemRestart
+args: -n SR4 T6
+
+max-time: 600
+cmd: testBasic
+args: -n PkRead T1
+
diff --git a/storage/ndb/test/src/HugoOperations.cpp b/storage/ndb/test/src/HugoOperations.cpp
index 188e7a9288e..9a286a71b91 100644
--- a/storage/ndb/test/src/HugoOperations.cpp
+++ b/storage/ndb/test/src/HugoOperations.cpp
@@ -330,8 +330,8 @@ int HugoOperations::execute_Commit(Ndb* pNdb,
int check = 0;
check = pTrans->execute(Commit, eao);
- if( check == -1 ) {
- const NdbError err = pTrans->getNdbError();
+ const NdbError err = pTrans->getNdbError();
+ if( check == -1 || err.code) {
ERR(err);
NdbOperation* pOp = pTrans->getNdbErrorOperation();
if (pOp != NULL){
@@ -379,13 +379,16 @@ int HugoOperations::execute_NoCommit(Ndb* pNdb, AbortOption eao){
int check;
check = pTrans->execute(NoCommit, eao);
- if( check == -1 ) {
- const NdbError err = pTrans->getNdbError();
+ const NdbError err = pTrans->getNdbError();
+ if( check == -1 || err.code) {
ERR(err);
- NdbOperation* pOp;
- while ((pOp = pTrans->getNdbErrorOperation()) != NULL){
+ const NdbOperation* pOp = pTrans->getNdbErrorOperation();
+ while (pOp != NULL)
+ {
const NdbError err2 = pOp->getNdbError();
- ERR(err2);
+ if (err2.code)
+ ERR(err2);
+ pOp = pTrans->getNextCompletedOperation(pOp);
}
if (err.code == 0)
return NDBT_FAILED;
diff --git a/storage/ndb/test/src/NdbRestarter.cpp b/storage/ndb/test/src/NdbRestarter.cpp
index 1cfbb56f84d..299517b32d3 100644
--- a/storage/ndb/test/src/NdbRestarter.cpp
+++ b/storage/ndb/test/src/NdbRestarter.cpp
@@ -128,6 +128,68 @@ NdbRestarter::getMasterNodeId(){
}
int
+NdbRestarter::getNodeGroup(int nodeId){
+ if (!isConnected())
+ return -1;
+
+ if (getStatus() != 0)
+ return -1;
+
+ for(size_t i = 0; i < ndbNodes.size(); i++)
+ {
+ if(ndbNodes[i].node_id == nodeId)
+ {
+ return ndbNodes[i].node_group;
+ }
+ }
+
+ return -1;
+}
+
+int
+NdbRestarter::getNextMasterNodeId(int nodeId){
+ if (!isConnected())
+ return -1;
+
+ if (getStatus() != 0)
+ return -1;
+
+ size_t i;
+ for(i = 0; i < ndbNodes.size(); i++)
+ {
+ if(ndbNodes[i].node_id == nodeId)
+ {
+ break;
+ }
+ }
+ assert(i < ndbNodes.size());
+ if (i == ndbNodes.size())
+ return -1;
+
+ int dynid = ndbNodes[i].dynamic_id;
+ int minid = dynid;
+ for (i = 0; i<ndbNodes.size(); i++)
+ if (ndbNodes[i].dynamic_id > minid)
+ minid = ndbNodes[i].dynamic_id;
+
+ for (i = 0; i<ndbNodes.size(); i++)
+ if (ndbNodes[i].dynamic_id > dynid &&
+ ndbNodes[i].dynamic_id < minid)
+ {
+ minid = ndbNodes[i].dynamic_id;
+ }
+
+ if (minid != ~0)
+ {
+ for (i = 0; i<ndbNodes.size(); i++)
+ if (ndbNodes[i].dynamic_id == minid)
+ return ndbNodes[i].node_id;
+ }
+
+ return getMasterNodeId();
+}
+
+int
NdbRestarter::getRandomNotMasterNodeId(int rand){
int master = getMasterNodeId();
if(master == -1)
diff --git a/storage/ndb/test/tools/Makefile.am b/storage/ndb/test/tools/Makefile.am
index 8c451c0b6a1..386a59f723f 100644
--- a/storage/ndb/test/tools/Makefile.am
+++ b/storage/ndb/test/tools/Makefile.am
@@ -38,6 +38,7 @@ include $(top_srcdir)/storage/ndb/config/common.mk.am
include $(top_srcdir)/storage/ndb/config/type_ndbapitest.mk.am
ndb_cpcc_LDADD = $(LDADD)
+ndb_cpcc_LDFLAGS = -static
# Don't update the files from bitkeeper
%::SCCS/s.%
diff --git a/storage/ndb/tools/Makefile.am b/storage/ndb/tools/Makefile.am
index 3d0c6f79146..7480b9a2ae9 100644
--- a/storage/ndb/tools/Makefile.am
+++ b/storage/ndb/tools/Makefile.am
@@ -50,7 +50,7 @@ ndb_restore_SOURCES = restore/restore_main.cpp \
restore/Restore.cpp \
../test/src/NDBT_ResultRow.cpp $(tools_common_sources)
-ndb_config_SOURCES = ndb_condig.cpp \
+ndb_config_SOURCES = ndb_config.cpp \
../src/mgmsrv/Config.cpp \
../src/mgmsrv/ConfigInfo.cpp \
../src/mgmsrv/InitConfigFileParser.cpp
diff --git a/storage/ndb/tools/ndb_condig.cpp b/storage/ndb/tools/ndb_config.cpp
index 31fc59a8b83..31fc59a8b83 100644
--- a/storage/ndb/tools/ndb_condig.cpp
+++ b/storage/ndb/tools/ndb_config.cpp